summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
committerdim <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
commit2c8643c6396b0a3db33430cf9380e70bbb9efce0 (patch)
tree4df130b28021d86e13bf4565ef58c1c5a5e093b4 /contrib/llvm/lib
parent678318cd20f7db4e6c6b85d83fe00fa327b04fca (diff)
parente27feadae0885aa074df58ebfda2e7a7f7a7d590 (diff)
downloadFreeBSD-src-2c8643c6396b0a3db33430cf9380e70bbb9efce0.zip
FreeBSD-src-2c8643c6396b0a3db33430cf9380e70bbb9efce0.tar.gz
Merge llvm 3.5.0 release from ^/vendor/llvm/dist, resolve conflicts, and
preserve our customizations, where necessary.
Diffstat (limited to 'contrib/llvm/lib')
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp132
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp25
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp17
-rw-r--r--contrib/llvm/lib/Analysis/AliasDebugger.cpp20
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp29
-rw-r--r--contrib/llvm/lib/Analysis/Analysis.cpp33
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp411
-rw-r--r--contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp43
-rw-r--r--contrib/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp697
-rw-r--r--contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp18
-rw-r--r--contrib/llvm/lib/Analysis/CFG.cpp27
-rw-r--r--contrib/llvm/lib/Analysis/CFGPrinter.cpp35
-rw-r--r--contrib/llvm/lib/Analysis/CGSCCPassManager.cpp167
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp122
-rw-r--r--contrib/llvm/lib/Analysis/CodeMetrics.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp426
-rw-r--r--contrib/llvm/lib/Analysis/CostModel.cpp97
-rw-r--r--contrib/llvm/lib/Analysis/Delinearization.cpp48
-rw-r--r--contrib/llvm/lib/Analysis/DependenceAnalysis.cpp150
-rw-r--r--contrib/llvm/lib/Analysis/DomPrinter.cpp46
-rw-r--r--contrib/llvm/lib/Analysis/DominanceFrontier.cpp138
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp161
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp65
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallPrinter.cpp45
-rw-r--r--contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp108
-rw-r--r--contrib/llvm/lib/Analysis/IPA/IPA.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/IPA/InlineCost.cpp168
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp45
-rw-r--r--contrib/llvm/lib/Analysis/InstCount.cpp13
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp743
-rw-r--r--contrib/llvm/lib/Analysis/Interval.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/IntervalPartition.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/JumpInstrTableInfo.cpp40
-rw-r--r--contrib/llvm/lib/Analysis/LazyCallGraph.cpp728
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp42
-rw-r--r--contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/LibCallSemantics.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp117
-rw-r--r--contrib/llvm/lib/Analysis/Loads.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp62
-rw-r--r--contrib/llvm/lib/Analysis/LoopPass.cpp34
-rw-r--r--contrib/llvm/lib/Analysis/MemDepPrinter.cpp29
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp79
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp60
-rw-r--r--contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp29
-rw-r--r--contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp43
-rw-r--r--contrib/llvm/lib/Analysis/PHITransAddr.cpp69
-rw-r--r--contrib/llvm/lib/Analysis/PostDominators.cpp9
-rw-r--r--contrib/llvm/lib/Analysis/PtrUseVisitor.cpp9
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp836
-rw-r--r--contrib/llvm/lib/Analysis/RegionPass.cpp35
-rw-r--r--contrib/llvm/lib/Analysis/RegionPrinter.cpp66
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp1522
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp262
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp35
-rw-r--r--contrib/llvm/lib/Analysis/SparsePropagation.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/TargetTransformInfo.cpp165
-rw-r--r--contrib/llvm/lib/Analysis/Trace.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp52
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp317
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.cpp87
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.h5
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp627
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.h42
-rw-r--r--contrib/llvm/lib/AsmParser/LLToken.h23
-rw-r--r--contrib/llvm/lib/AsmParser/Parser.cpp29
-rw-r--r--contrib/llvm/lib/AsmParser/module.modulemap1
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitReader.cpp23
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp586
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h116
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp6
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp3
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp207
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp16
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp80
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h9
-rw-r--r--contrib/llvm/lib/Bitcode/module.modulemap1
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp68
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h12
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.h6
-rw-r--r--contrib/llvm/lib/CodeGen/Analysis.cpp21
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp88
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.cpp45
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.h52
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp720
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp253
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h57
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp94
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h71
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp185
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h840
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp240
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.h26
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp206
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h54
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h123
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocList.h23
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h41
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp52
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h419
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp3055
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h605
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h171
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp156
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h84
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.cpp74
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.h55
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp (renamed from contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp)1468
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h588
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp (renamed from contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp)168
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.h138
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp39
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp335
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h144
-rw-r--r--contrib/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp380
-rw-r--r--contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp225
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.cpp138
-rw-r--r--contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp24
-rw-r--r--contrib/llvm/lib/CodeGen/CallingConvLower.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGen.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp (renamed from contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp)1612
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp87
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h15
-rw-r--r--contrib/llvm/lib/CodeGen/DFAPacketizer.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp27
-rw-r--r--contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp29
-rw-r--r--contrib/llvm/lib/CodeGen/EdgeBundles.cpp32
-rw-r--r--contrib/llvm/lib/CodeGen/ErlangGC.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/GCMetadata.cpp29
-rw-r--r--contrib/llvm/lib/CodeGen/GCStrategy.cpp36
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalMerge.cpp (renamed from contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp)131
-rw-r--r--contrib/llvm/lib/CodeGen/IfConversion.cpp142
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp102
-rw-r--r--contrib/llvm/lib/CodeGen/InterferenceCache.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/InterferenceCache.h28
-rw-r--r--contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/JumpInstrTables.cpp297
-rw-r--r--contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp113
-rw-r--r--contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/LexicalScopes.cpp208
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp54
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.h6
-rw-r--r--contrib/llvm/lib/CodeGen/LiveInterval.cpp38
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp90
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/LivePhysRegs.cpp114
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp37
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeCalc.h7
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegUnits.cpp111
-rw-r--r--contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/LiveVariables.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp105
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp90
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp159
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp134
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCSE.cpp67
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/MachineDominanceFrontier.cpp54
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp151
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionPass.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp169
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLICM.cpp79
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp46
-rw-r--r--contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegionInfo.cpp138
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp76
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/MachineScheduler.cpp2130
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSink.cpp90
-rw-r--r--contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp88
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp135
-rw-r--r--contrib/llvm/lib/CodeGen/OptimizePHIs.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/PHIElimination.cpp47
-rw-r--r--contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/Passes.cpp71
-rw-r--r--contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp519
-rw-r--r--contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp283
-rw-r--r--contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp185
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.h4
-rw-r--r--contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.h4
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBasic.cpp29
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp626
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp150
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp149
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.h4
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterPressure.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterScavenging.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAG.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp374
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp2465
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp847
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp37
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp56
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp631
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp80
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp271
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp48
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h18
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp39
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp349
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp277
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h18
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp124
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp27
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h8
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp1342
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp1201
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h53
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp37
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp242
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp416
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/ShadowStackGC.cpp19
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp74
-rw-r--r--contrib/llvm/lib/CodeGen/SlotIndexes.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/SpillPlacement.cpp47
-rw-r--r--contrib/llvm/lib/CodeGen/SpillPlacement.h10
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.cpp19
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.cpp41
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.h4
-rw-r--r--contrib/llvm/lib/CodeGen/StackColoring.cpp254
-rw-r--r--contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp127
-rw-r--r--contrib/llvm/lib/CodeGen/StackMaps.cpp505
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp63
-rw-r--r--contrib/llvm/lib/CodeGen/StackSlotColoring.cpp31
-rw-r--r--contrib/llvm/lib/CodeGen/TailDuplication.cpp71
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp133
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp257
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp415
-rw-r--r--contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp110
-rw-r--r--contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.cpp60
-rw-r--r--contrib/llvm/lib/CodeGen/module.modulemap1
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.cpp18
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.h15
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h8
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.cpp530
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.h182
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp127
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h50
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp12
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.h12
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp131
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAranges.h47
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugFrame.cpp90
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugFrame.h9
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp116
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h62
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp380
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.h114
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLoc.cpp74
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLoc.h21
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp24
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h15
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp10
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFTypeUnit.h12
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFUnit.cpp165
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFUnit.h31
-rw-r--r--contrib/llvm/lib/DebugInfo/module.modulemap1
-rw-r--r--contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp251
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp43
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp21
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp71
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp7
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h24
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp85
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.h49
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp124
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp144
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp125
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h96
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp27
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp23
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp7
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp1
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp51
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h62
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp623
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp656
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp993
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h133
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h164
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp517
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h194
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h255
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h154
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h315
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h132
-rw-r--r--contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp8
-rw-r--r--contrib/llvm/lib/IR/AsmWriter.cpp324
-rw-r--r--contrib/llvm/lib/IR/AsmWriter.h10
-rw-r--r--contrib/llvm/lib/IR/AttributeImpl.h24
-rw-r--r--contrib/llvm/lib/IR/Attributes.cpp143
-rw-r--r--contrib/llvm/lib/IR/AutoUpgrade.cpp115
-rw-r--r--contrib/llvm/lib/IR/BasicBlock.cpp32
-rw-r--r--contrib/llvm/lib/IR/Comdat.cpp25
-rw-r--r--contrib/llvm/lib/IR/ConstantFold.cpp165
-rw-r--r--contrib/llvm/lib/IR/ConstantRange.cpp (renamed from contrib/llvm/lib/Support/ConstantRange.cpp)6
-rw-r--r--contrib/llvm/lib/IR/Constants.cpp215
-rw-r--r--contrib/llvm/lib/IR/ConstantsContext.h54
-rw-r--r--contrib/llvm/lib/IR/Core.cpp295
-rw-r--r--contrib/llvm/lib/IR/DIBuilder.cpp289
-rw-r--r--contrib/llvm/lib/IR/DataLayout.cpp327
-rw-r--r--contrib/llvm/lib/IR/DebugInfo.cpp226
-rw-r--r--contrib/llvm/lib/IR/DebugLoc.cpp85
-rw-r--r--contrib/llvm/lib/IR/DiagnosticInfo.cpp208
-rw-r--r--contrib/llvm/lib/IR/DiagnosticPrinter.cpp107
-rw-r--r--contrib/llvm/lib/IR/Dominators.cpp94
-rw-r--r--contrib/llvm/lib/IR/Function.cpp131
-rw-r--r--contrib/llvm/lib/IR/GCOV.cpp774
-rw-r--r--contrib/llvm/lib/IR/GVMaterializer.cpp2
-rw-r--r--contrib/llvm/lib/IR/Globals.cpp186
-rw-r--r--contrib/llvm/lib/IR/IRPrintingPasses.cpp127
-rw-r--r--contrib/llvm/lib/IR/InlineAsm.cpp12
-rw-r--r--contrib/llvm/lib/IR/Instruction.cpp192
-rw-r--r--contrib/llvm/lib/IR/Instructions.cpp295
-rw-r--r--contrib/llvm/lib/IR/IntrinsicInst.cpp6
-rw-r--r--contrib/llvm/lib/IR/LLVMContext.cpp96
-rw-r--r--contrib/llvm/lib/IR/LLVMContextImpl.cpp25
-rw-r--r--contrib/llvm/lib/IR/LLVMContextImpl.h38
-rw-r--r--contrib/llvm/lib/IR/LeakDetector.cpp2
-rw-r--r--contrib/llvm/lib/IR/LeaksContext.h16
-rw-r--r--contrib/llvm/lib/IR/LegacyPassManager.cpp155
-rw-r--r--contrib/llvm/lib/IR/MDBuilder.cpp139
-rw-r--r--contrib/llvm/lib/IR/Mangler.cpp144
-rw-r--r--contrib/llvm/lib/IR/Metadata.cpp122
-rw-r--r--contrib/llvm/lib/IR/Module.cpp173
-rw-r--r--contrib/llvm/lib/IR/Pass.cpp76
-rw-r--r--contrib/llvm/lib/IR/PassManager.cpp255
-rw-r--r--contrib/llvm/lib/IR/PassRegistry.cpp126
-rw-r--r--contrib/llvm/lib/IR/PrintModulePass.cpp136
-rw-r--r--contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h4
-rw-r--r--contrib/llvm/lib/IR/Type.cpp51
-rw-r--r--contrib/llvm/lib/IR/Use.cpp164
-rw-r--r--contrib/llvm/lib/IR/Value.cpp164
-rw-r--r--contrib/llvm/lib/IR/ValueSymbolTable.cpp9
-rw-r--r--contrib/llvm/lib/IR/Verifier.cpp1256
-rw-r--r--contrib/llvm/lib/IR/module.modulemap1
-rw-r--r--contrib/llvm/lib/IRReader/IRReader.cpp71
-rw-r--r--contrib/llvm/lib/LTO/LTOCodeGenerator.cpp179
-rw-r--r--contrib/llvm/lib/LTO/LTOModule.cpp570
-rw-r--r--contrib/llvm/lib/LineEditor/CMakeLists.txt11
-rw-r--r--contrib/llvm/lib/LineEditor/LLVMBuild.txt22
-rw-r--r--contrib/llvm/lib/LineEditor/LineEditor.cpp319
-rw-r--r--contrib/llvm/lib/LineEditor/Makefile15
-rw-r--r--contrib/llvm/lib/Linker/LinkModules.cpp619
-rw-r--r--contrib/llvm/lib/MC/ConstantPools.cpp98
-rw-r--r--contrib/llvm/lib/MC/ELFObjectWriter.cpp1230
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/CMakeLists.txt8
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/LLVMBuild.txt5
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/MCAtom.cpp (renamed from contrib/llvm/lib/MC/MCAtom.cpp)4
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/MCFunction.cpp (renamed from contrib/llvm/lib/MC/MCFunction.cpp)21
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/MCModule.cpp (renamed from contrib/llvm/lib/MC/MCModule.cpp)22
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/MCModuleYAML.cpp (renamed from contrib/llvm/lib/MC/MCModuleYAML.cpp)39
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/MCObjectDisassembler.cpp (renamed from contrib/llvm/lib/MC/MCObjectDisassembler.cpp)80
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/MCObjectSymbolizer.cpp (renamed from contrib/llvm/lib/MC/MCObjectSymbolizer.cpp)162
-rw-r--r--contrib/llvm/lib/MC/MCAnalysis/Makefile14
-rw-r--r--contrib/llvm/lib/MC/MCAsmBackend.cpp3
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfo.cpp54
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp5
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoELF.cpp1
-rw-r--r--contrib/llvm/lib/MC/MCAsmStreamer.cpp690
-rw-r--r--contrib/llvm/lib/MC/MCAssembler.cpp220
-rw-r--r--contrib/llvm/lib/MC/MCContext.cpp279
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler.cpp21
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp50
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/Disassembler.h17
-rw-r--r--contrib/llvm/lib/MC/MCDwarf.cpp647
-rw-r--r--contrib/llvm/lib/MC/MCELF.cpp10
-rw-r--r--contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp37
-rw-r--r--contrib/llvm/lib/MC/MCELFStreamer.cpp96
-rw-r--r--contrib/llvm/lib/MC/MCExpr.cpp164
-rw-r--r--contrib/llvm/lib/MC/MCExternalSymbolizer.cpp43
-rw-r--r--contrib/llvm/lib/MC/MCInst.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCLinkerOptimizationHint.cpp35
-rw-r--r--contrib/llvm/lib/MC/MCMachOStreamer.cpp146
-rw-r--r--contrib/llvm/lib/MC/MCNullStreamer.cpp92
-rw-r--r--contrib/llvm/lib/MC/MCObjectFileInfo.cpp349
-rw-r--r--contrib/llvm/lib/MC/MCObjectStreamer.cpp126
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmLexer.cpp96
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp994
-rw-r--r--contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp113
-rw-r--r--contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp546
-rw-r--r--contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp200
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCPureStreamer.cpp235
-rw-r--r--contrib/llvm/lib/MC/MCRelocationInfo.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCSectionCOFF.cpp33
-rw-r--r--contrib/llvm/lib/MC/MCSectionMachO.cpp160
-rw-r--r--contrib/llvm/lib/MC/MCStreamer.cpp468
-rw-r--r--contrib/llvm/lib/MC/MCSubtargetInfo.cpp17
-rw-r--r--contrib/llvm/lib/MC/MCSymbolizer.cpp5
-rw-r--r--contrib/llvm/lib/MC/MCTargetOptions.cpp20
-rw-r--r--contrib/llvm/lib/MC/MCValue.cpp25
-rw-r--r--contrib/llvm/lib/MC/MCWin64EH.cpp133
-rw-r--r--contrib/llvm/lib/MC/MachObjectWriter.cpp161
-rw-r--r--contrib/llvm/lib/MC/StringTableBuilder.cpp51
-rw-r--r--contrib/llvm/lib/MC/SubtargetFeature.cpp154
-rw-r--r--contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp433
-rw-r--r--contrib/llvm/lib/MC/WinCOFFStreamer.cpp379
-rw-r--r--contrib/llvm/lib/MC/YAML.cpp (renamed from contrib/llvm/lib/Object/YAML.cpp)21
-rw-r--r--contrib/llvm/lib/Object/Archive.cpp214
-rw-r--r--contrib/llvm/lib/Object/Binary.cpp85
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp1039
-rw-r--r--contrib/llvm/lib/Object/COFFYAML.cpp138
-rw-r--r--contrib/llvm/lib/Object/ELF.cpp12
-rw-r--r--contrib/llvm/lib/Object/ELFObjectFile.cpp51
-rw-r--r--contrib/llvm/lib/Object/ELFYAML.cpp427
-rw-r--r--contrib/llvm/lib/Object/Error.cpp19
-rw-r--r--contrib/llvm/lib/Object/IRObjectFile.cpp279
-rw-r--r--contrib/llvm/lib/Object/MachOObjectFile.cpp966
-rw-r--r--contrib/llvm/lib/Object/MachOUniversal.cpp84
-rw-r--r--contrib/llvm/lib/Object/Object.cpp68
-rw-r--r--contrib/llvm/lib/Object/ObjectFile.cpp47
-rw-r--r--contrib/llvm/lib/Object/RecordStreamer.cpp100
-rw-r--r--contrib/llvm/lib/Object/RecordStreamer.h42
-rw-r--r--contrib/llvm/lib/Object/SymbolicFile.cpp65
-rw-r--r--contrib/llvm/lib/Option/ArgList.cpp116
-rw-r--r--contrib/llvm/lib/Option/OptTable.cpp10
-rw-r--r--contrib/llvm/lib/Option/Option.cpp20
-rw-r--r--contrib/llvm/lib/ProfileData/CMakeLists.txt5
-rw-r--r--contrib/llvm/lib/ProfileData/InstrProf.cpp61
-rw-r--r--contrib/llvm/lib/ProfileData/InstrProfIndexed.h55
-rw-r--r--contrib/llvm/lib/ProfileData/InstrProfReader.cpp360
-rw-r--r--contrib/llvm/lib/ProfileData/InstrProfWriter.cpp127
-rw-r--r--contrib/llvm/lib/ProfileData/LLVMBuild.txt22
-rw-r--r--contrib/llvm/lib/ProfileData/Makefile14
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp44
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp17
-rw-r--r--contrib/llvm/lib/Support/ARMBuildAttrs.cpp95
-rw-r--r--contrib/llvm/lib/Support/ARMWinEH.cpp38
-rw-r--r--contrib/llvm/lib/Support/Allocator.cpp160
-rw-r--r--contrib/llvm/lib/Support/Atomic.cpp3
-rw-r--r--contrib/llvm/lib/Support/BlockFrequency.cpp120
-rw-r--r--contrib/llvm/lib/Support/BranchProbability.cpp55
-rw-r--r--contrib/llvm/lib/Support/CommandLine.cpp166
-rw-r--r--contrib/llvm/lib/Support/Compression.cpp36
-rw-r--r--contrib/llvm/lib/Support/ConvertUTF.c153
-rw-r--r--contrib/llvm/lib/Support/CrashRecoveryContext.cpp54
-rw-r--r--contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp8
-rw-r--r--contrib/llvm/lib/Support/DataExtractor.cpp6
-rw-r--r--contrib/llvm/lib/Support/DataStream.cpp15
-rw-r--r--contrib/llvm/lib/Support/Debug.cpp2
-rw-r--r--contrib/llvm/lib/Support/Disassembler.cpp74
-rw-r--r--contrib/llvm/lib/Support/Dwarf.cpp65
-rw-r--r--contrib/llvm/lib/Support/DynamicLibrary.cpp18
-rw-r--r--contrib/llvm/lib/Support/ErrorHandling.cpp100
-rw-r--r--contrib/llvm/lib/Support/FileOutputBuffer.cpp32
-rw-r--r--contrib/llvm/lib/Support/FileUtilities.cpp18
-rw-r--r--contrib/llvm/lib/Support/FoldingSet.cpp22
-rw-r--r--contrib/llvm/lib/Support/FormattedStream.cpp2
-rw-r--r--contrib/llvm/lib/Support/GraphWriter.cpp260
-rw-r--r--contrib/llvm/lib/Support/Host.cpp77
-rw-r--r--contrib/llvm/lib/Support/IntervalMap.cpp2
-rw-r--r--contrib/llvm/lib/Support/LEB128.cpp44
-rw-r--r--contrib/llvm/lib/Support/LineIterator.cpp69
-rw-r--r--contrib/llvm/lib/Support/LockFileManager.cpp115
-rw-r--r--contrib/llvm/lib/Support/ManagedStatic.cpp35
-rw-r--r--contrib/llvm/lib/Support/MemoryBuffer.cpp187
-rw-r--r--contrib/llvm/lib/Support/Mutex.cpp17
-rw-r--r--contrib/llvm/lib/Support/Path.cpp328
-rw-r--r--contrib/llvm/lib/Support/PrettyStackTrace.cpp6
-rw-r--r--contrib/llvm/lib/Support/Process.cpp41
-rw-r--r--contrib/llvm/lib/Support/Program.cpp7
-rw-r--r--contrib/llvm/lib/Support/RWMutex.cpp14
-rw-r--r--contrib/llvm/lib/Support/RandomNumberGenerator.cpp61
-rw-r--r--contrib/llvm/lib/Support/Regex.cpp24
-rw-r--r--contrib/llvm/lib/Support/ScaledNumber.cpp319
-rw-r--r--contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp2
-rw-r--r--contrib/llvm/lib/Support/SmallPtrSet.cpp80
-rw-r--r--contrib/llvm/lib/Support/SourceMgr.cpp92
-rw-r--r--contrib/llvm/lib/Support/SpecialCaseList.cpp (renamed from contrib/llvm/lib/Transforms/Utils/SpecialCaseList.cpp)92
-rw-r--r--contrib/llvm/lib/Support/Statistic.cpp23
-rw-r--r--contrib/llvm/lib/Support/StreamableMemoryObject.cpp23
-rw-r--r--contrib/llvm/lib/Support/StringMap.cpp20
-rw-r--r--contrib/llvm/lib/Support/StringPool.cpp2
-rw-r--r--contrib/llvm/lib/Support/StringRef.cpp5
-rw-r--r--contrib/llvm/lib/Support/TargetRegistry.cpp61
-rw-r--r--contrib/llvm/lib/Support/ThreadLocal.cpp9
-rw-r--r--contrib/llvm/lib/Support/Threading.cpp47
-rw-r--r--contrib/llvm/lib/Support/TimeValue.cpp2
-rw-r--r--contrib/llvm/lib/Support/Timer.cpp30
-rw-r--r--contrib/llvm/lib/Support/ToolOutputFile.cpp6
-rw-r--r--contrib/llvm/lib/Support/Triple.cpp357
-rw-r--r--contrib/llvm/lib/Support/Twine.cpp4
-rw-r--r--contrib/llvm/lib/Support/Unix/Host.inc2
-rw-r--r--contrib/llvm/lib/Support/Unix/Memory.inc57
-rw-r--r--contrib/llvm/lib/Support/Unix/Path.inc395
-rw-r--r--contrib/llvm/lib/Support/Unix/Process.inc20
-rw-r--r--contrib/llvm/lib/Support/Unix/Program.inc44
-rw-r--r--contrib/llvm/lib/Support/Unix/RWMutex.inc20
-rw-r--r--contrib/llvm/lib/Support/Unix/Signals.inc32
-rw-r--r--contrib/llvm/lib/Support/Unix/TimeValue.inc10
-rw-r--r--contrib/llvm/lib/Support/Unix/system_error.inc34
-rw-r--r--contrib/llvm/lib/Support/Valgrind.cpp14
-rw-r--r--contrib/llvm/lib/Support/Windows/DynamicLibrary.inc6
-rw-r--r--contrib/llvm/lib/Support/Windows/Host.inc4
-rw-r--r--contrib/llvm/lib/Support/Windows/Memory.inc33
-rw-r--r--contrib/llvm/lib/Support/Windows/Mutex.inc2
-rw-r--r--contrib/llvm/lib/Support/Windows/Path.inc612
-rw-r--r--contrib/llvm/lib/Support/Windows/Process.inc142
-rw-r--r--contrib/llvm/lib/Support/Windows/Program.inc30
-rw-r--r--contrib/llvm/lib/Support/Windows/RWMutex.inc2
-rw-r--r--contrib/llvm/lib/Support/Windows/Signals.inc8
-rw-r--r--contrib/llvm/lib/Support/Windows/ThreadLocal.inc4
-rw-r--r--contrib/llvm/lib/Support/Windows/TimeValue.inc21
-rw-r--r--contrib/llvm/lib/Support/Windows/WindowsSupport.h (renamed from contrib/llvm/lib/Support/Windows/Windows.h)17
-rw-r--r--contrib/llvm/lib/Support/Windows/system_error.inc142
-rw-r--r--contrib/llvm/lib/Support/YAMLParser.cpp103
-rw-r--r--contrib/llvm/lib/Support/YAMLTraits.cpp84
-rw-r--r--contrib/llvm/lib/Support/raw_ostream.cpp46
-rw-r--r--contrib/llvm/lib/Support/regcclass.h5
-rw-r--r--contrib/llvm/lib/Support/regcname.h5
-rw-r--r--contrib/llvm/lib/Support/regcomp.c6
-rw-r--r--contrib/llvm/lib/Support/regengine.inc2
-rw-r--r--contrib/llvm/lib/Support/regex2.h5
-rw-r--r--contrib/llvm/lib/Support/regutils.h5
-rw-r--r--contrib/llvm/lib/Support/system_error.cpp130
-rw-r--r--contrib/llvm/lib/TableGen/Error.cpp6
-rw-r--r--contrib/llvm/lib/TableGen/Main.cpp20
-rw-r--r--contrib/llvm/lib/TableGen/Record.cpp242
-rw-r--r--contrib/llvm/lib/TableGen/SetTheory.cpp323
-rw-r--r--contrib/llvm/lib/TableGen/TGLexer.cpp33
-rw-r--r--contrib/llvm/lib/TableGen/TGLexer.h8
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.cpp500
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.h23
-rw-r--r--contrib/llvm/lib/TableGen/module.modulemap1
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64.h41
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64.td94
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp491
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp387
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp655
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.h76
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64BranchFixupPass.cpp600
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp510
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64CallingConv.td197
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64CallingConvention.td242
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp147
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp1117
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp919
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp134
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp736
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64FastISel.cpp1994
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp1309
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.h122
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp4075
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp10642
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h702
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrAtomics.td364
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrFormats.td9502
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp2504
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.h235
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td9894
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrNEON.td8671
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp951
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp245
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.h52
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp18
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h188
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h6586
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp578
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp440
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.h104
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.td734
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SchedA53.td291
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SchedA57.td304
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SchedA57WriteRes.td512
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SchedCyclone.td865
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64Schedule.td98
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp49
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h15
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp168
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp131
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h108
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp190
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.h80
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp49
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h37
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp500
-rw-r--r--contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp5333
-rw-r--r--contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp2553
-rw-r--r--contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h40
-rw-r--r--contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp220
-rw-r--r--contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h38
-rw-r--r--contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp1565
-rw-r--r--contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h214
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h738
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp1007
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp437
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp53
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h7
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h161
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp71
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h23
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp908
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp187
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h237
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp220
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h38
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp396
-rw-r--r--contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp41
-rw-r--r--contrib/llvm/lib/Target/AArch64/README.txt2
-rw-r--r--contrib/llvm/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp27
-rw-r--r--contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp424
-rw-r--r--contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h580
-rw-r--r--contrib/llvm/lib/Target/ARM/A15SDOptimizer.cpp43
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.h9
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td82
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp620
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h66
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp197
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h236
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp91
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h84
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h170
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.h113
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.td19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp31
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp50
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h40
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp190
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFPUName.def1
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFastISel.cpp435
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFeatures.h46
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp713
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.h44
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp324
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp2170
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h215
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrFormats.td41
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td642
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td1626
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb.td52
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td95
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrVFP.td63
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp18
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.h32
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp366
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp18
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h36
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMOptimizeBarriersPass.cpp101
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA9.td17
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleV6.td4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp63
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp200
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h130
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp153
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.h153
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp28
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h18
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp147
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp2360
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp48
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp40
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.def50
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.h27
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp451
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h56
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp171
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp652
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h9
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp80
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h23
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp538
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp34
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h10
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp316
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h44
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp43
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp73
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h125
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp49
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h12
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp82
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp46
-rw-r--r--contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp28
-rw-r--r--contrib/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp13
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp144
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h32
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h13
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h30
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp9
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h17
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h14
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp44
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp122
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h14
-rw-r--r--contrib/llvm/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp7
-rw-r--r--contrib/llvm/lib/Target/Hexagon/Hexagon.td12
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp94
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h122
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp15
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h3
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp45
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp6
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp12
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp16
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h38
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp134
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp23
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp799
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h48
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td167
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td31
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp36
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h203
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td14
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td43
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp43
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h45
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp45
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp9
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp19
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h17
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp20
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td51
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td165
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp9
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h7
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp34
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp10
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp17
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h27
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp52
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h50
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp7
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h9
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp77
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp5
-rw-r--r--contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h72
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp8
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp5
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430.td6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp24
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp7
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h31
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp39
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp43
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h64
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp18
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h43
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp10
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp32
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h23
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp25
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h25
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp18
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h44
-rw-r--r--contrib/llvm/lib/Target/Mangler.cpp143
-rw-r--r--contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp2715
-rw-r--r--contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp626
-rw-r--r--contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp44
-rw-r--r--contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h7
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp66
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h238
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp469
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h93
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h28
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp183
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp43
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h58
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h24
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp591
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h154
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp89
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h66
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h33
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp101
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h20
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp272
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp92
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.cpp80
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.h31
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp629
-rw-r--r--contrib/llvm/lib/Target/Mips/MSA.txt7
-rw-r--r--contrib/llvm/lib/Target/Mips/MicroMipsInstrFPU.td148
-rw-r--r--contrib/llvm/lib/Target/Mips/MicroMipsInstrFormats.td321
-rw-r--r--contrib/llvm/lib/Target/Mips/MicroMipsInstrInfo.td151
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.h1
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.td125
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp62
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h23
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16HardFloat.cpp49
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16HardFloat.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.cpp50
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.h50
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp26
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.h10
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16ISelLowering.cpp101
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16ISelLowering.h27
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp239
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h57
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td102
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h16
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips32r6InstrFormats.td543
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips32r6InstrInfo.td824
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td418
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips64r6InstrInfo.td217
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp572
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h81
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCallingConv.td42
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp124
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCondMov.td250
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp318
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp137
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFastISel.cpp400
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp7
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.h6
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp27
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.h20
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp699
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.h142
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFPU.td618
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFormats.td194
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp58
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.h60
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.td890
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsJITInfo.h14
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp149
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp70
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.h8
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMSAInstrFormats.td63
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td351
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp15
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.h37
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.h16
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp301
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsOptionRecord.h80
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsOs16.cpp3
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsOs16.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp41
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h21
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td150
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp206
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h17
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp149
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h68
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp463
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEISelLowering.h48
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp160
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h53
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp57
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h12
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSchedule.td329
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp219
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.h173
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp152
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.h98
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp17
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h11
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetStreamer.h177
-rw-r--r--contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp11
-rw-r--r--contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h14
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h17
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp8
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTX.h4
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTX.td13
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h9
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp345
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h70
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp84
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp195
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp24
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h18
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp33
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp2940
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h31
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp2564
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h418
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp178
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td24
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp11
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h19
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td393
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td5449
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp12
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h9
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp3
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.h12
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h46
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp12
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp25
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h23
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td11
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp189
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSection.h16
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp73
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h41
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp65
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h62
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp111
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h60
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h49
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp69
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h4
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVVMReflect.cpp97
-rw-r--r--contrib/llvm/lib/Target/NVPTX/cl_common_defines.h6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp471
-rw-r--r--contrib/llvm/lib/Target/PowerPC/Disassembler/CMakeLists.txt3
-rw-r--r--contrib/llvm/lib/Target/PowerPC/Disassembler/LLVMBuild.txt23
-rw-r--r--contrib/llvm/lib/Target/PowerPC/Disassembler/Makefile16
-rw-r--r--contrib/llvm/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp348
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp21
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp95
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp116
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp37
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp175
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp42
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h12
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp121
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h1
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.h7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.td59
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp379
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp18
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp55
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td39
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp13
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp270
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp323
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h227
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp218
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h43
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp1022
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp2649
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h188
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td438
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td297
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td219
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp976
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h178
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td1290
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrVSX.td816
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp17
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h45
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp59
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp195
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h49
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td86
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule.td981
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td1118
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td246
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td493
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td592
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td119
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td147
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td168
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td180
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleP7.td385
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp11
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp106
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h64
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp90
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h51
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.cpp30
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetStreamer.h4
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp241
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPU.h34
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPU.td103
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.cpp237
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.h52
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUCallingConv.td6
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUConvertToISA.cpp62
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.cpp20
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.h13
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp525
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUISelLowering.cpp1788
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUISelLowering.h192
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp71
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.h77
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.td93
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUInstructions.td187
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUIntrinsicInfo.cpp (renamed from contrib/llvm/lib/Target/R600/AMDILIntrinsicInfo.cpp)42
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUIntrinsicInfo.h (renamed from contrib/llvm/lib/Target/R600/AMDILIntrinsicInfo.h)23
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUIntrinsics.td41
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.cpp57
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.h20
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.cpp6
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.h7
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp387
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.h28
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUSubtarget.cpp145
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUSubtarget.h151
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.cpp68
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.h32
-rw-r--r--contrib/llvm/lib/Target/R600/AMDGPUTargetTransformInfo.cpp83
-rw-r--r--contrib/llvm/lib/Target/R600/AMDILBase.td25
-rw-r--r--contrib/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp119
-rw-r--r--contrib/llvm/lib/Target/R600/AMDILISelLowering.cpp642
-rw-r--r--contrib/llvm/lib/Target/R600/AMDILInstrInfo.td150
-rw-r--r--contrib/llvm/lib/Target/R600/AMDILIntrinsics.td232
-rw-r--r--contrib/llvm/lib/Target/R600/AMDILRegisterInfo.td107
-rw-r--r--contrib/llvm/lib/Target/R600/CaymanInstructions.td224
-rw-r--r--contrib/llvm/lib/Target/R600/EvergreenInstructions.td609
-rw-r--r--contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp158
-rw-r--r--contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h47
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp82
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp8
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h34
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp10
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h15
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp7
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp44
-rw-r--r--contrib/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp76
-rw-r--r--contrib/llvm/lib/Target/R600/Processors.td84
-rw-r--r--contrib/llvm/lib/Target/R600/R600ClauseMergePass.cpp9
-rw-r--r--contrib/llvm/lib/Target/R600/R600ControlFlowFinalizer.cpp234
-rw-r--r--contrib/llvm/lib/Target/R600/R600Defines.h2
-rw-r--r--contrib/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp28
-rw-r--r--contrib/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp27
-rw-r--r--contrib/llvm/lib/Target/R600/R600ISelLowering.cpp537
-rw-r--r--contrib/llvm/lib/Target/R600/R600ISelLowering.h41
-rw-r--r--contrib/llvm/lib/Target/R600/R600InstrInfo.cpp124
-rw-r--r--contrib/llvm/lib/Target/R600/R600InstrInfo.h95
-rw-r--r--contrib/llvm/lib/Target/R600/R600Instructions.td948
-rw-r--r--contrib/llvm/lib/Target/R600/R600MachineFunctionInfo.h2
-rw-r--r--contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp22
-rw-r--r--contrib/llvm/lib/Target/R600/R600MachineScheduler.h17
-rw-r--r--contrib/llvm/lib/Target/R600/R600OptimizeVectorRegisters.cpp34
-rw-r--r--contrib/llvm/lib/Target/R600/R600Packetizer.cpp37
-rw-r--r--contrib/llvm/lib/Target/R600/R600RegisterInfo.cpp17
-rw-r--r--contrib/llvm/lib/Target/R600/R600RegisterInfo.h23
-rw-r--r--contrib/llvm/lib/Target/R600/R600RegisterInfo.td48
-rw-r--r--contrib/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp10
-rw-r--r--contrib/llvm/lib/Target/R600/R700Instructions.td21
-rw-r--r--contrib/llvm/lib/Target/R600/SIAnnotateControlFlow.cpp84
-rw-r--r--contrib/llvm/lib/Target/R600/SIDefines.h54
-rw-r--r--contrib/llvm/lib/Target/R600/SIFixSGPRCopies.cpp27
-rw-r--r--contrib/llvm/lib/Target/R600/SIFixSGPRLiveRanges.cpp110
-rw-r--r--contrib/llvm/lib/Target/R600/SIISelLowering.cpp960
-rw-r--r--contrib/llvm/lib/Target/R600/SIISelLowering.h53
-rw-r--r--contrib/llvm/lib/Target/R600/SIInsertWaits.cpp16
-rw-r--r--contrib/llvm/lib/Target/R600/SIInstrFormats.td351
-rw-r--r--contrib/llvm/lib/Target/R600/SIInstrInfo.cpp1037
-rw-r--r--contrib/llvm/lib/Target/R600/SIInstrInfo.h132
-rw-r--r--contrib/llvm/lib/Target/R600/SIInstrInfo.td520
-rw-r--r--contrib/llvm/lib/Target/R600/SIInstructions.td2335
-rw-r--r--contrib/llvm/lib/Target/R600/SIIntrinsics.td121
-rw-r--r--contrib/llvm/lib/Target/R600/SILowerControlFlow.cpp173
-rw-r--r--contrib/llvm/lib/Target/R600/SILowerI1Copies.cpp154
-rw-r--r--contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.cpp77
-rw-r--r--contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.h36
-rw-r--r--contrib/llvm/lib/Target/R600/SIRegisterInfo.cpp97
-rw-r--r--contrib/llvm/lib/Target/R600/SIRegisterInfo.h52
-rw-r--r--contrib/llvm/lib/Target/R600/SIRegisterInfo.td27
-rw-r--r--contrib/llvm/lib/Target/R600/SIShrinkInstructions.cpp194
-rw-r--r--contrib/llvm/lib/Target/R600/SITypeRewriter.cpp36
-rw-r--r--contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp337
-rw-r--r--contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp17
-rw-r--r--contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp284
-rw-r--r--contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp67
-rw-r--r--contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h16
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp60
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp33
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h4
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h14
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp84
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp45
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h10
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp41
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp12
-rw-r--r--contrib/llvm/lib/Target/Sparc/Sparc.h12
-rw-r--r--contrib/llvm/lib/Target/Sparc/Sparc.td31
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp100
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcCodeEmitter.cpp32
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp32
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h26
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp12
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp122
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h52
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td108
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td217
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td115
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h68
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td320
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td263
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcJITInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcJITInfo.h14
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp8
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h15
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td13
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.cpp9
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp58
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSubtarget.h39
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp10
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h44
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp25
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h7
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h12
-rw-r--r--contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp219
-rw-r--r--contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp21
-rw-r--r--contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h6
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp35
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp87
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h22
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp35
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h78
-rw-r--r--contrib/llvm/lib/Target/SystemZ/README.txt7
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.h187
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.td10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp18
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h22
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h14
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h20
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp122
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp136
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h66
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp110
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp872
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h425
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td69
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td128
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp60
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h284
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td201
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp210
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperands.td29
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperators.td27
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td15
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td20
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp50
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h50
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td25
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp21
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h43
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp67
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp35
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h36
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp25
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h43
-rw-r--r--contrib/llvm/lib/Target/Target.cpp16
-rw-r--r--contrib/llvm/lib/Target/TargetLibraryInfo.cpp74
-rw-r--r--contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp60
-rw-r--r--contrib/llvm/lib/Target/TargetMachine.cpp98
-rw-r--r--contrib/llvm/lib/Target/TargetMachineC.cpp33
-rw-r--r--contrib/llvm/lib/Target/TargetSubtargetInfo.cpp31
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp507
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h54
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp1574
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h43
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h488
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp207
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h27
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp (renamed from contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c)554
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h404
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h366
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp66
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h37
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp123
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp59
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h46
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp147
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h296
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp41
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h1
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp36
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h18
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp715
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp75
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h12
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp32
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp74
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp51
-rw-r--r--contrib/llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp1
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h3
-rw-r--r--contrib/llvm/lib/Target/X86/X86.h14
-rw-r--r--contrib/llvm/lib/Target/X86/X86.td106
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp469
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.h62
-rw-r--r--contrib/llvm/lib/Target/X86/X86AtomicExpandPass.cpp283
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp19
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h46
-rw-r--r--contrib/llvm/lib/Target/X86/X86CallingConv.td63
-rw-r--r--contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp414
-rw-r--r--contrib/llvm/lib/Target/X86/X86FastISel.cpp1947
-rw-r--r--contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp269
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp25
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp583
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.h52
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp150
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp7168
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h281
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrAVX512.td2865
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrArithmetic.td196
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrBuilder.h3
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td13
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCompiler.td344
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrControl.td119
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrExtension.td32
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFMA.td91
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFPStack.td232
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFormats.td525
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td75
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.cpp800
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.h281
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td1046
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrMMX.td20
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td1855
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSVM.td8
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td219
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSystem.td312
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrTSX.td3
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrVMX.td32
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrXOP.td46
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.cpp16
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.h30
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.cpp354
-rw-r--r--contrib/llvm/lib/Target/X86/X86PadShortFunction.cpp13
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp146
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h48
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.td24
-rw-r--r--contrib/llvm/lib/Target/X86/X86SchedHaswell.td140
-rw-r--r--contrib/llvm/lib/Target/X86/X86SchedSandyBridge.td118
-rw-r--r--contrib/llvm/lib/Target/X86/X86Schedule.td40
-rw-r--r--contrib/llvm/lib/Target/X86/X86ScheduleAtom.td5
-rw-r--r--contrib/llvm/lib/Target/X86/X86ScheduleSLM.td850
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp87
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h14
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp420
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.h140
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.cpp116
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.h109
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp142
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetObjectFile.h32
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp504
-rw-r--r--contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp350
-rw-r--r--contrib/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp19
-rw-r--r--contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.cpp3
-rw-r--r--contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h4
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp63
-rw-r--r--contrib/llvm/lib/Target/XCore/XCore.h1
-rw-r--r--contrib/llvm/lib/Target/XCore/XCore.td6
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp94
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreCallingConv.td6
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp556
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h28
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp62
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp44
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp564
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.h76
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp76
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h82
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td45
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp30
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMCInstLower.cpp2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp58
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h79
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp292
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h28
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp43
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.h11
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp16
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreSubtarget.h21
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp20
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h42
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp149
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h21
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetStreamer.h27
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetTransformInfo.cpp21
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp127
-rw-r--r--contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp24
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp116
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp86
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp66
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp619
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp42
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPO.cpp1
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp23
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp34
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp104
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Internalize.cpp32
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp24
-rw-r--r--contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp1424
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp36
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp100
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PruneEH.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp34
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombine.h136
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp496
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp276
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp451
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp364
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp686
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp274
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp362
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp118
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp231
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp109
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp91
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp249
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h12
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp740
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp1146
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp33
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp296
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/DebugIR.cpp87
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/DebugIR.h11
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp224
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp879
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp139
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h40
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h5
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp19
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h22
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp39
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp113
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ADCE.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp602
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp19
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DCE.cpp18
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp124
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp55
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp313
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp290
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp93
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LICM.cpp365
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp268
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp133
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp40
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp95
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp108
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp854
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp404
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp106
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp163
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp632
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp130
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SCCP.cpp105
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SROA.cpp892
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp1098
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Scalar.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp395
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Scalarizer.cpp663
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp776
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp31
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Sink.cpp54
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp89
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp395
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp114
-rw-r--r--contrib/llvm/lib/Transforms/Utils/AddDiscriminators.cpp222
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp221
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp103
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp77
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp106
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneModule.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp48
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp183
-rw-r--r--contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp18
-rw-r--r--contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Utils/GlobalStatus.cpp24
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp342
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp247
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LCSSA.cpp368
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp156
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp829
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp102
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp36
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp521
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp170
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp29
-rw-r--r--contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp71
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp25
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp563
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp118
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp40
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp712
-rw-r--r--contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Utils.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp255
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp2251
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp788
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp2
1496 files changed, 212539 insertions, 107484 deletions
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index b8b6d37..8b8106b 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -25,12 +25,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -60,6 +60,13 @@ bool AliasAnalysis::pointsToConstantMemory(const Location &Loc,
return AA->pointsToConstantMemory(Loc, OrLocal);
}
+AliasAnalysis::Location
+AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
+ AliasAnalysis::ModRefResult &Mask) {
+ assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
+ return AA->getArgLocation(CS, ArgIdx, Mask);
+}
+
void AliasAnalysis::deleteValue(Value *V) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
AA->deleteValue(V);
@@ -91,22 +98,26 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
if (onlyAccessesArgPointees(MRB)) {
bool doesAlias = false;
+ ModRefResult AllArgsMask = NoModRef;
if (doesAccessArgPointees(MRB)) {
- MDNode *CSTag = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa);
for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI) {
const Value *Arg = *AI;
if (!Arg->getType()->isPointerTy())
continue;
- Location CSLoc(Arg, UnknownSize, CSTag);
+ ModRefResult ArgMask;
+ Location CSLoc =
+ getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI),
+ ArgMask);
if (!isNoAlias(CSLoc, Loc)) {
doesAlias = true;
- break;
+ AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
}
}
}
if (!doesAlias)
return NoModRef;
+ Mask = ModRefResult(Mask & AllArgsMask);
}
// If Loc is a constant memory location, the call definitely could not
@@ -150,14 +161,23 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
if (onlyAccessesArgPointees(CS2B)) {
AliasAnalysis::ModRefResult R = NoModRef;
if (doesAccessArgPointees(CS2B)) {
- MDNode *CS2Tag = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa);
for (ImmutableCallSite::arg_iterator
I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- Location CS2Loc(Arg, UnknownSize, CS2Tag);
- R = ModRefResult((R | getModRefInfo(CS1, CS2Loc)) & Mask);
+ ModRefResult ArgMask;
+ Location CS2Loc =
+ getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I),
+ ArgMask);
+ // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of
+ // CS1 on that location is the inverse.
+ if (ArgMask == Mod)
+ ArgMask = ModRef;
+ else if (ArgMask == Ref)
+ ArgMask = Mod;
+
+ R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask);
if (R == Mask)
break;
}
@@ -170,21 +190,28 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
if (onlyAccessesArgPointees(CS1B)) {
AliasAnalysis::ModRefResult R = NoModRef;
if (doesAccessArgPointees(CS1B)) {
- MDNode *CS1Tag = CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa);
for (ImmutableCallSite::arg_iterator
I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- Location CS1Loc(Arg, UnknownSize, CS1Tag);
- if (getModRefInfo(CS2, CS1Loc) != NoModRef) {
- R = Mask;
+ ModRefResult ArgMask;
+ Location CS1Loc =
+ getArgLocation(CS1, (unsigned) std::distance(CS1.arg_begin(), I),
+ ArgMask);
+ // ArgMask indicates what CS1 might do to CS1Loc; if CS1 might Mod
+ // CS1Loc, then we care about either a Mod or a Ref by CS2. If CS1
+ // might Ref, then we care only about a Mod by CS2.
+ ModRefResult ArgR = getModRefInfo(CS2, CS1Loc);
+ if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
+ ((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
+ R = ModRefResult((R | ArgMask) & Mask);
+
+ if (R == Mask)
break;
- }
}
}
- if (R == NoModRef)
- return R;
+ return R;
}
// If this is the end of the chain, don't forward.
@@ -338,7 +365,7 @@ AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
- if (CX->getOrdering() > Monotonic)
+ if (CX->getSuccessOrdering() > Monotonic)
return ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
@@ -361,53 +388,6 @@ AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
return ModRef;
}
-namespace {
- /// Only find pointer captures which happen before the given instruction. Uses
- /// the dominator tree to determine whether one instruction is before another.
- /// Only support the case where the Value is defined in the same basic block
- /// as the given instruction and the use.
- struct CapturesBefore : public CaptureTracker {
- CapturesBefore(const Instruction *I, DominatorTree *DT)
- : BeforeHere(I), DT(DT), Captured(false) {}
-
- void tooManyUses() { Captured = true; }
-
- bool shouldExplore(Use *U) {
- Instruction *I = cast<Instruction>(U->getUser());
- BasicBlock *BB = I->getParent();
- // We explore this usage only if the usage can reach "BeforeHere".
- // If use is not reachable from entry, there is no need to explore.
- if (BeforeHere != I && !DT->isReachableFromEntry(BB))
- return false;
- // If the value is defined in the same basic block as use and BeforeHere,
- // there is no need to explore the use if BeforeHere dominates use.
- // Check whether there is a path from I to BeforeHere.
- if (BeforeHere != I && DT->dominates(BeforeHere, I) &&
- !isPotentiallyReachable(I, BeforeHere, DT))
- return false;
- return true;
- }
-
- bool captured(Use *U) {
- Instruction *I = cast<Instruction>(U->getUser());
- BasicBlock *BB = I->getParent();
- // Same logic as in shouldExplore.
- if (BeforeHere != I && !DT->isReachableFromEntry(BB))
- return false;
- if (BeforeHere != I && DT->dominates(BeforeHere, I) &&
- !isPotentiallyReachable(I, BeforeHere, DT))
- return false;
- Captured = true;
- return true;
- }
-
- const Instruction *BeforeHere;
- DominatorTree *DT;
-
- bool Captured;
- };
-}
-
// FIXME: this is really just shoring-up a deficiency in alias analysis.
// BasicAA isn't willing to spend linear time determining whether an alloca
// was captured before or after this particular call, while we are. However,
@@ -416,9 +396,9 @@ AliasAnalysis::ModRefResult
AliasAnalysis::callCapturesBefore(const Instruction *I,
const AliasAnalysis::Location &MemLoc,
DominatorTree *DT) {
- if (!DT || !TD) return AliasAnalysis::ModRef;
+ if (!DT || !DL) return AliasAnalysis::ModRef;
- const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
+ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return AliasAnalysis::ModRef;
@@ -427,9 +407,9 @@ AliasAnalysis::callCapturesBefore(const Instruction *I,
if (!CS.getInstruction() || CS.getInstruction() == Object)
return AliasAnalysis::ModRef;
- CapturesBefore CB(I, DT);
- llvm::PointerMayBeCaptured(Object, &CB);
- if (CB.Captured)
+ if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
+ /* StoreCaptures */ true, I, DT,
+ /* include Object */ true))
return AliasAnalysis::ModRef;
unsigned ArgNo = 0;
@@ -472,7 +452,8 @@ AliasAnalysis::~AliasAnalysis() {}
/// AliasAnalysis interface before any other methods are called.
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- TD = P->getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>();
}
@@ -487,7 +468,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
- return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
+ return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
}
/// canBasicBlockModify - Return true if it is possible for execution of the
@@ -554,3 +535,14 @@ bool llvm::isIdentifiedObject(const Value *V) {
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
}
+
+/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
+/// at the function-level. Different IdentifiedFunctionLocals can't alias.
+/// Further, an IdentifiedFunctionLocal can not alias with any function
+/// arguments other than itself, which is not necessarily true for
+/// IdentifiedObjects.
+bool llvm::isIdentifiedFunctionLocal(const Value *V)
+{
+ return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
+}
+
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
index 9f4a47c..b860914 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
@@ -14,7 +14,6 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -75,13 +74,13 @@ namespace {
}
}
- bool runOnModule(Module &M) {
+ bool runOnModule(Module &M) override {
this->M = &M;
InitializeAliasAnalysis(this);
return false;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AliasAnalysis::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.setPreservesAll();
@@ -91,25 +90,25 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ void *getAdjustedAnalysisPointer(AnalysisID PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
// FIXME: We could count these too...
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
return getAnalysis<AliasAnalysis>().pointsToConstantMemory(Loc, OrLocal);
}
// Forwarding functions: just delegate to a real AA implementation, counting
// the number of responses...
- AliasResult alias(const Location &LocA, const Location &LocB);
+ AliasResult alias(const Location &LocA, const Location &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc);
+ const Location &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) {
+ ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
};
@@ -127,7 +126,7 @@ AliasAnalysis::AliasResult
AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
- const char *AliasString = 0;
+ const char *AliasString = nullptr;
switch (R) {
case NoAlias: No++; AliasString = "No alias"; break;
case MayAlias: May++; AliasString = "May alias"; break;
@@ -138,10 +137,10 @@ AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
if (PrintAll || (PrintAllFailures && R == MayAlias)) {
errs() << AliasString << ":\t";
errs() << "[" << LocA.Size << "B] ";
- WriteAsOperand(errs(), LocA.Ptr, true, M);
+ LocA.Ptr->printAsOperand(errs(), true, M);
errs() << ", ";
errs() << "[" << LocB.Size << "B] ";
- WriteAsOperand(errs(), LocB.Ptr, true, M);
+ LocB.Ptr->printAsOperand(errs(), true, M);
errs() << "\n";
}
@@ -153,7 +152,7 @@ AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
const Location &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
- const char *MRString = 0;
+ const char *MRString = nullptr;
switch (R) {
case NoModRef: NoMR++; MRString = "NoModRef"; break;
case Ref: JustRef++; MRString = "JustRef"; break;
@@ -164,7 +163,7 @@ AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
if (PrintAll || (PrintAllFailures && R == ModRef)) {
errs() << MRString << ": Ptr: ";
errs() << "[" << Loc.Size << "B] ";
- WriteAsOperand(errs(), Loc.Ptr, true, M);
+ Loc.Ptr->printAsOperand(errs(), true, M);
errs() << "\t<->" << *CS.getInstruction() << '\n';
}
return R;
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index a571463..d9fa5a5 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -20,15 +20,14 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -57,12 +56,12 @@ namespace {
initializeAAEvalPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
AU.setPreservesAll();
}
- bool doInitialization(Module &M) {
+ bool doInitialization(Module &M) override {
NoAlias = MayAlias = PartialAlias = MustAlias = 0;
NoModRef = Mod = Ref = ModRef = 0;
@@ -74,8 +73,8 @@ namespace {
return false;
}
- bool runOnFunction(Function &F);
- bool doFinalization(Module &M);
+ bool runOnFunction(Function &F) override;
+ bool doFinalization(Module &M) override;
};
}
@@ -94,8 +93,8 @@ static void PrintResults(const char *Msg, bool P, const Value *V1,
std::string o1, o2;
{
raw_string_ostream os1(o1), os2(o2);
- WriteAsOperand(os1, V1, true, M);
- WriteAsOperand(os2, V2, true, M);
+ V1->printAsOperand(os1, true, M);
+ V2->printAsOperand(os2, true, M);
}
if (o2 < o1)
@@ -111,7 +110,7 @@ PrintModRefResults(const char *Msg, bool P, Instruction *I, Value *Ptr,
Module *M) {
if (P) {
errs() << " " << Msg << ": Ptr: ";
- WriteAsOperand(errs(), Ptr, true, M);
+ Ptr->printAsOperand(errs(), true, M);
errs() << "\t<->" << *I << '\n';
}
}
diff --git a/contrib/llvm/lib/Analysis/AliasDebugger.cpp b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
index f6178e3..5d61cf9 100644
--- a/contrib/llvm/lib/Analysis/AliasDebugger.cpp
+++ b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
@@ -43,7 +43,7 @@ namespace {
initializeAliasDebuggerPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M) {
+ bool runOnModule(Module &M) override {
InitializeAliasAnalysis(this); // set up super class
for(Module::global_iterator I = M.global_begin(),
@@ -76,7 +76,7 @@ namespace {
return false;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AliasAnalysis::getAnalysisUsage(AU);
AU.setPreservesAll(); // Does not transform code
}
@@ -85,7 +85,7 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ void *getAdjustedAnalysisPointer(AnalysisID PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
@@ -94,7 +94,7 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Location &LocA, const Location &LocB) {
+ AliasResult alias(const Location &LocA, const Location &LocB) override {
assert(Vals.find(LocA.Ptr) != Vals.end() &&
"Never seen value in AA before");
assert(Vals.find(LocB.Ptr) != Vals.end() &&
@@ -103,26 +103,26 @@ namespace {
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const Location &Loc) override {
assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::getModRefInfo(CS, Loc);
}
ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) {
+ ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
-
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
- virtual void deleteValue(Value *V) {
+ void deleteValue(Value *V) override {
assert(Vals.find(V) != Vals.end() && "Never seen value in AA before");
AliasAnalysis::deleteValue(V);
}
- virtual void copyValue(Value *From, Value *To) {
+ void copyValue(Value *From, Value *To) override {
Vals.insert(To);
AliasAnalysis::copyValue(From, To);
}
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index 2289c12..a45fe23 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -13,8 +13,8 @@
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
@@ -22,7 +22,6 @@
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -73,16 +72,16 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
AS.PtrList->setPrevInList(PtrListEnd);
PtrListEnd = AS.PtrListEnd;
- AS.PtrList = 0;
+ AS.PtrList = nullptr;
AS.PtrListEnd = &AS.PtrList;
- assert(*AS.PtrListEnd == 0 && "End of list is not null?");
+ assert(*AS.PtrListEnd == nullptr && "End of list is not null?");
}
}
void AliasSetTracker::removeAliasSet(AliasSet *AS) {
if (AliasSet *Fwd = AS->Forward) {
Fwd->dropRef(*this);
- AS->Forward = 0;
+ AS->Forward = nullptr;
}
AliasSets.erase(AS);
}
@@ -116,10 +115,10 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
// Add it to the end of the list...
- assert(*PtrListEnd == 0 && "End of list is not null?");
+ assert(*PtrListEnd == nullptr && "End of list is not null?");
*PtrListEnd = &Entry;
PtrListEnd = Entry.setPrevInList(PtrListEnd);
- assert(*PtrListEnd == 0 && "End of list is not null?");
+ assert(*PtrListEnd == nullptr && "End of list is not null?");
addRef(); // Entry points to alias set.
}
@@ -218,11 +217,11 @@ void AliasSetTracker::clear() {
AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
uint64_t Size,
const MDNode *TBAAInfo) {
- AliasSet *FoundSet = 0;
+ AliasSet *FoundSet = nullptr;
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I->Forward || !I->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
- if (FoundSet == 0) { // If this is the first alias set ptr can go into.
+ if (!FoundSet) { // If this is the first alias set ptr can go into.
FoundSet = I; // Remember it.
} else { // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*I, *this); // Merge in contents.
@@ -246,12 +245,12 @@ bool AliasSetTracker::containsPointer(Value *Ptr, uint64_t Size,
AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
- AliasSet *FoundSet = 0;
+ AliasSet *FoundSet = nullptr;
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I->Forward || !I->aliasesUnknownInst(Inst, AA))
continue;
- if (FoundSet == 0) // If this is the first alias set ptr can go into.
+ if (!FoundSet) // If this is the first alias set ptr can go into.
FoundSet = I; // Remember it.
else if (!I->Forward) // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*I, *this); // Merge in contents.
@@ -566,7 +565,7 @@ void AliasSet::print(raw_ostream &OS) const {
OS << "Pointers: ";
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I != begin()) OS << ", ";
- WriteAsOperand(OS << "(", I.getPointer());
+ I.getPointer()->printAsOperand(OS << "(");
OS << ", " << I.getSize() << ")";
}
}
@@ -574,7 +573,7 @@ void AliasSet::print(raw_ostream &OS) const {
OS << "\n " << UnknownInsts.size() << " Unknown instructions: ";
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) {
if (i) OS << ", ";
- WriteAsOperand(OS, UnknownInsts[i]);
+ UnknownInsts[i]->printAsOperand(OS);
}
}
OS << "\n";
@@ -628,12 +627,12 @@ namespace {
initializeAliasSetPrinterPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AliasAnalysis>();
}
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
Tracker = new AliasSetTracker(getAnalysis<AliasAnalysis>());
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
diff --git a/contrib/llvm/lib/Analysis/Analysis.cpp b/contrib/llvm/lib/Analysis/Analysis.cpp
index 98f2a55..c58012f 100644
--- a/contrib/llvm/lib/Analysis/Analysis.cpp
+++ b/contrib/llvm/lib/Analysis/Analysis.cpp
@@ -9,10 +9,11 @@
#include "llvm-c/Analysis.h"
#include "llvm-c/Initialization.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/InitializePasses.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/InitializePasses.h"
#include "llvm/PassRegistry.h"
+#include "llvm/Support/raw_ostream.h"
#include <cstring>
using namespace llvm;
@@ -47,6 +48,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeIVUsersPass(Registry);
initializeInstCountPass(Registry);
initializeIntervalPartitionPass(Registry);
+ initializeJumpInstrTableInfoPass(Registry);
initializeLazyValueInfoPass(Registry);
initializeLibCallAliasAnalysisPass(Registry);
initializeLintPass(Registry);
@@ -55,7 +57,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeMemoryDependenceAnalysisPass(Registry);
initializeModuleDebugInfoPrinterPass(Registry);
initializePostDominatorTreePass(Registry);
- initializeRegionInfoPass(Registry);
+ initializeRegionInfoPassPass(Registry);
initializeRegionViewerPass(Registry);
initializeRegionPrinterPass(Registry);
initializeRegionOnlyViewerPass(Registry);
@@ -72,21 +74,34 @@ void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
char **OutMessages) {
+ raw_ostream *DebugOS = Action != LLVMReturnStatusAction ? &errs() : nullptr;
std::string Messages;
+ raw_string_ostream MsgsOS(Messages);
+
+ LLVMBool Result = verifyModule(*unwrap(M), OutMessages ? &MsgsOS : DebugOS);
+
+ // Duplicate the output to stderr.
+ if (DebugOS && OutMessages)
+ *DebugOS << MsgsOS.str();
- LLVMBool Result = verifyModule(*unwrap(M),
- static_cast<VerifierFailureAction>(Action),
- OutMessages? &Messages : 0);
+ if (Action == LLVMAbortProcessAction && Result)
+ report_fatal_error("Broken module found, compilation aborted!");
if (OutMessages)
- *OutMessages = strdup(Messages.c_str());
+ *OutMessages = strdup(MsgsOS.str().c_str());
return Result;
}
LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action) {
- return verifyFunction(*unwrap<Function>(Fn),
- static_cast<VerifierFailureAction>(Action));
+ LLVMBool Result = verifyFunction(
+ *unwrap<Function>(Fn), Action != LLVMReturnStatusAction ? &errs()
+ : nullptr);
+
+ if (Action == LLVMAbortProcessAction && Result)
+ report_fatal_error("Broken function found, compilation aborted!");
+
+ return Result;
}
void LLVMViewFunctionCFG(LLVMValueRef Fn) {
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index d0e186c..38ec52d 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -17,9 +17,8 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
@@ -27,7 +26,9 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
@@ -36,7 +37,6 @@
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include <algorithm>
using namespace llvm;
@@ -47,6 +47,11 @@ using namespace llvm;
/// cannot be involved in a cycle.
const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
+// The max limit of the search depth in DecomposeGEPExpression() and
+// GetUnderlyingObject(), both functions need to use the same search
+// depth otherwise the algorithm in aliasGEP will assert.
+static const unsigned MaxLookupSearchDepth = 6;
+
//===----------------------------------------------------------------------===//
// Useful predicates
//===----------------------------------------------------------------------===//
@@ -93,11 +98,11 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
+static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
- if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
+ if (getObjectSize(V, Size, &DL, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -105,7 +110,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
- const DataLayout &TD,
+ const DataLayout &DL,
const TargetLibraryInfo &TLI) {
// Note that the meanings of the "object" are slightly different in the
// following contexts:
@@ -138,7 +143,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@@ -146,22 +151,11 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
/// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size,
- const DataLayout &TD, const TargetLibraryInfo &TLI) {
- uint64_t ObjectSize = getObjectSize(V, TD, TLI);
+ const DataLayout &DL, const TargetLibraryInfo &TLI) {
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
}
-/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
-/// at the function-level. Different IdentifiedFunctionLocals can't alias.
-/// Further, an IdentifiedFunctionLocal can not alias with any function
-/// arguments other than itself, which is not neccessarily true for
-/// IdentifiedObjects.
-static bool isIdentifiedFunctionLocal(const Value *V)
-{
- return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
-}
-
-
//===----------------------------------------------------------------------===//
// GetElementPtr Instruction Decomposition and Analysis
//===----------------------------------------------------------------------===//
@@ -200,7 +194,7 @@ namespace {
/// represented in the result.
static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
ExtensionKind &Extension,
- const DataLayout &TD, unsigned Depth) {
+ const DataLayout &DL, unsigned Depth) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@@ -217,23 +211,23 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
- if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD))
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &DL))
break;
// FALL THROUGH.
case Instruction::Add:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
- TD, Depth+1);
+ DL, Depth+1);
Offset += RHSC->getValue();
return V;
case Instruction::Mul:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
- TD, Depth+1);
+ DL, Depth+1);
Offset *= RHSC->getValue();
Scale *= RHSC->getValue();
return V;
case Instruction::Shl:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
- TD, Depth+1);
+ DL, Depth+1);
Offset <<= RHSC->getValue().getLimitedValue();
Scale <<= RHSC->getValue().getLimitedValue();
return V;
@@ -254,7 +248,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension,
- TD, Depth+1);
+ DL, Depth+1);
Scale = Scale.zext(OldWidth);
Offset = Offset.zext(OldWidth);
@@ -276,21 +270,24 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
/// the gep cannot necessarily be reconstructed from its decomposed form.
///
/// When DataLayout is around, this function is capable of analyzing everything
-/// that GetUnderlyingObject can look through. When not, it just looks
-/// through pointer casts.
+/// that GetUnderlyingObject can look through. To be able to do that
+/// GetUnderlyingObject and DecomposeGEPExpression must use the same search
+/// depth (MaxLookupSearchDepth).
+/// When DataLayout not is around, it just looks through pointer casts.
///
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
- const DataLayout *TD) {
+ bool &MaxLookupReached, const DataLayout *DL) {
// Limit recursion depth to limit compile time in crazy cases.
- unsigned MaxLookup = 6;
+ unsigned MaxLookup = MaxLookupSearchDepth;
+ MaxLookupReached = false;
BaseOffs = 0;
do {
// See if this is a bitcast or GEP.
const Operator *Op = dyn_cast<Operator>(V);
- if (Op == 0) {
+ if (!Op) {
// The only non-operator case we can handle are GlobalAliases.
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
if (!GA->mayBeOverridden()) {
@@ -301,19 +298,20 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
return V;
}
- if (Op->getOpcode() == Instruction::BitCast) {
+ if (Op->getOpcode() == Instruction::BitCast ||
+ Op->getOpcode() == Instruction::AddrSpaceCast) {
V = Op->getOperand(0);
continue;
}
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
- if (GEPOp == 0) {
+ if (!GEPOp) {
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
if (const Instruction *I = dyn_cast<Instruction>(V))
// TODO: Get a DominatorTree and use it here.
if (const Value *Simplified =
- SimplifyInstruction(const_cast<Instruction *>(I), TD)) {
+ SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
V = Simplified;
continue;
}
@@ -328,7 +326,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
- if (TD == 0) {
+ if (!DL) {
if (!GEPOp->hasAllZeroIndices())
return V;
V = GEPOp->getOperand(0);
@@ -347,30 +345,30 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;
- BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
+ BaseOffs += DL->getStructLayout(STy)->getElementOffset(FieldNo);
continue;
}
// For an array/pointer, add the element offset, explicitly scaled.
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero()) continue;
- BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
+ BaseOffs += DL->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
continue;
}
- uint64_t Scale = TD->getTypeAllocSize(*GTI);
+ uint64_t Scale = DL->getTypeAllocSize(*GTI);
ExtensionKind Extension = EK_NotExtended;
// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = Index->getType()->getIntegerBitWidth();
- if (TD->getPointerSizeInBits(AS) > Width)
+ if (DL->getPointerSizeInBits(AS) > Width)
Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
- *TD, 0);
+ *DL, 0);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
@@ -392,7 +390,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// Make sure that we have a scale that makes sense for this target's
// pointer size.
- if (unsigned ShiftBits = 64 - TD->getPointerSizeInBits(AS)) {
+ if (unsigned ShiftBits = 64 - DL->getPointerSizeInBits(AS)) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}
@@ -409,6 +407,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
} while (--MaxLookup);
// If the chain of expressions is too deep, just return early.
+ MaxLookupReached = true;
return V;
}
@@ -424,7 +423,7 @@ static const Function *getParent(const Value *V) {
if (const Argument *arg = dyn_cast<Argument>(V))
return arg->getParent();
- return NULL;
+ return nullptr;
}
static bool notDifferentParent(const Value *O1, const Value *O2) {
@@ -444,17 +443,16 @@ namespace {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ void initializePass() override {
InitializeAliasAnalysis(this);
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetLibraryInfo>();
}
- virtual AliasResult alias(const Location &LocA,
- const Location &LocB) {
+ AliasResult alias(const Location &LocA, const Location &LocB) override {
assert(AliasCache.empty() && "AliasCache must be cleared after use!");
assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
@@ -469,32 +467,36 @@ namespace {
return Alias;
}
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc);
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc) override;
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) override {
// The AliasAnalysis base class has some smarts, lets use them.
return AliasAnalysis::getModRefInfo(CS1, CS2);
}
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
- virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal);
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+
+ /// Get the location associated with a pointer argument of a callsite.
+ Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
+ ModRefResult &Mask) override;
/// getModRefBehavior - Return the behavior when calling the given
/// call site.
- virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
/// getModRefBehavior - Return the behavior when calling the given function.
/// For use when the call site is not known.
- virtual ModRefBehavior getModRefBehavior(const Function *F);
+ ModRefBehavior getModRefBehavior(const Function *F) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
@@ -593,7 +595,7 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
- const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD);
+ const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
if (!Visited.insert(V)) {
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
@@ -645,6 +647,21 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
return Worklist.empty();
}
+static bool isMemsetPattern16(const Function *MS,
+ const TargetLibraryInfo &TLI) {
+ if (TLI.has(LibFunc::memset_pattern16) &&
+ MS->getName() == "memset_pattern16") {
+ FunctionType *MemsetType = MS->getFunctionType();
+ if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
+ isa<PointerType>(MemsetType->getParamType(0)) &&
+ isa<PointerType>(MemsetType->getParamType(1)) &&
+ isa<IntegerType>(MemsetType->getParamType(2)))
+ return true;
+ }
+
+ return false;
+}
+
/// getModRefBehavior - Return the behavior when calling the given call site.
AliasAnalysis::ModRefBehavior
BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
@@ -684,10 +701,93 @@ BasicAliasAnalysis::getModRefBehavior(const Function *F) {
if (F->onlyReadsMemory())
Min = OnlyReadsMemory;
+ const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>();
+ if (isMemsetPattern16(F, TLI))
+ Min = OnlyAccessesArgumentPointees;
+
// Otherwise be conservative.
return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
}
+AliasAnalysis::Location
+BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
+ ModRefResult &Mask) {
+ Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask);
+ const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>();
+ const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
+ if (II != nullptr)
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memory intrinsic");
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+ Loc.Size = LenCI->getZExtValue();
+ assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
+ "Memory intrinsic location pointer not argument?");
+ Mask = ArgIdx ? Ref : Mod;
+ break;
+ }
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start: {
+ assert(ArgIdx == 1 && "Invalid argument index");
+ assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
+ "Intrinsic location pointer not argument?");
+ Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+ break;
+ }
+ case Intrinsic::invariant_end: {
+ assert(ArgIdx == 2 && "Invalid argument index");
+ assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
+ "Intrinsic location pointer not argument?");
+ Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+ break;
+ }
+ case Intrinsic::arm_neon_vld1: {
+ assert(ArgIdx == 0 && "Invalid argument index");
+ assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
+ "Intrinsic location pointer not argument?");
+ // LLVM's vld1 and vst1 intrinsics currently only support a single
+ // vector register.
+ if (DL)
+ Loc.Size = DL->getTypeStoreSize(II->getType());
+ break;
+ }
+ case Intrinsic::arm_neon_vst1: {
+ assert(ArgIdx == 0 && "Invalid argument index");
+ assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
+ "Intrinsic location pointer not argument?");
+ if (DL)
+ Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType());
+ break;
+ }
+ }
+
+ // We can bound the aliasing properties of memset_pattern16 just as we can
+ // for memcpy/memset. This is particularly important because the
+ // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
+ // whenever possible.
+ else if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memset_pattern16");
+ if (ArgIdx == 1)
+ Loc.Size = 16;
+ else if (const ConstantInt *LenCI =
+ dyn_cast<ConstantInt>(CS.getArgument(2)))
+ Loc.Size = LenCI->getZExtValue();
+ assert(Loc.Ptr == CS.getArgument(ArgIdx) &&
+ "memset_pattern16 location pointer not argument?");
+ Mask = ArgIdx ? Ref : Mod;
+ }
+ // FIXME: Handle memset_pattern4 and memset_pattern8 also.
+
+ return Loc;
+}
+
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
@@ -698,7 +798,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
- const Value *Object = GetUnderlyingObject(Loc.Ptr, TD);
+ const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
// If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
@@ -740,144 +840,13 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
return NoModRef;
}
- const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>();
- ModRefResult Min = ModRef;
-
- // Finally, handle specific knowledge of intrinsics.
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II != 0)
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::memcpy:
- case Intrinsic::memmove: {
- uint64_t Len = UnknownSize;
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
- Len = LenCI->getZExtValue();
- Value *Dest = II->getArgOperand(0);
- Value *Src = II->getArgOperand(1);
- // If it can't overlap the source dest, then it doesn't modref the loc.
- if (isNoAlias(Location(Dest, Len), Loc)) {
- if (isNoAlias(Location(Src, Len), Loc))
- return NoModRef;
- // If it can't overlap the dest, then worst case it reads the loc.
- Min = Ref;
- } else if (isNoAlias(Location(Src, Len), Loc)) {
- // If it can't overlap the source, then worst case it mutates the loc.
- Min = Mod;
- }
- break;
- }
- case Intrinsic::memset:
- // Since memset is 'accesses arguments' only, the AliasAnalysis base class
- // will handle it for the variable length case.
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
- uint64_t Len = LenCI->getZExtValue();
- Value *Dest = II->getArgOperand(0);
- if (isNoAlias(Location(Dest, Len), Loc))
- return NoModRef;
- }
- // We know that memset doesn't load anything.
- Min = Mod;
- break;
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- uint64_t PtrSize =
- cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- if (isNoAlias(Location(II->getArgOperand(1),
- PtrSize,
- II->getMetadata(LLVMContext::MD_tbaa)),
- Loc))
- return NoModRef;
- break;
- }
- case Intrinsic::invariant_end: {
- uint64_t PtrSize =
- cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- if (isNoAlias(Location(II->getArgOperand(2),
- PtrSize,
- II->getMetadata(LLVMContext::MD_tbaa)),
- Loc))
- return NoModRef;
- break;
- }
- case Intrinsic::arm_neon_vld1: {
- // LLVM's vld1 and vst1 intrinsics currently only support a single
- // vector register.
- uint64_t Size =
- TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize;
- if (isNoAlias(Location(II->getArgOperand(0), Size,
- II->getMetadata(LLVMContext::MD_tbaa)),
- Loc))
- return NoModRef;
- break;
- }
- case Intrinsic::arm_neon_vst1: {
- uint64_t Size =
- TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
- if (isNoAlias(Location(II->getArgOperand(0), Size,
- II->getMetadata(LLVMContext::MD_tbaa)),
- Loc))
- return NoModRef;
- break;
- }
- }
-
- // We can bound the aliasing properties of memset_pattern16 just as we can
- // for memcpy/memset. This is particularly important because the
- // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
- // whenever possible.
- else if (TLI.has(LibFunc::memset_pattern16) &&
- CS.getCalledFunction() &&
- CS.getCalledFunction()->getName() == "memset_pattern16") {
- const Function *MS = CS.getCalledFunction();
- FunctionType *MemsetType = MS->getFunctionType();
- if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
- isa<PointerType>(MemsetType->getParamType(0)) &&
- isa<PointerType>(MemsetType->getParamType(1)) &&
- isa<IntegerType>(MemsetType->getParamType(2))) {
- uint64_t Len = UnknownSize;
- if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
- Len = LenCI->getZExtValue();
- const Value *Dest = CS.getArgument(0);
- const Value *Src = CS.getArgument(1);
- // If it can't overlap the source dest, then it doesn't modref the loc.
- if (isNoAlias(Location(Dest, Len), Loc)) {
- // Always reads 16 bytes of the source.
- if (isNoAlias(Location(Src, 16), Loc))
- return NoModRef;
- // If it can't overlap the dest, then worst case it reads the loc.
- Min = Ref;
- // Always reads 16 bytes of the source.
- } else if (isNoAlias(Location(Src, 16), Loc)) {
- // If it can't overlap the source, then worst case it mutates the loc.
- Min = Mod;
- }
- }
- }
-
// The AliasAnalysis base class has some smarts, lets use them.
- return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
-}
-
-static bool areVarIndicesEqual(SmallVectorImpl<VariableGEPIndex> &Indices1,
- SmallVectorImpl<VariableGEPIndex> &Indices2) {
- unsigned Size1 = Indices1.size();
- unsigned Size2 = Indices2.size();
-
- if (Size1 != Size2)
- return false;
-
- for (unsigned I = 0; I != Size1; ++I)
- if (Indices1[I] != Indices2[I])
- return false;
-
- return true;
+ return AliasAnalysis::getModRefInfo(CS, Loc);
}
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
-/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
+/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
/// UnderlyingV2 is the same for V2.
///
AliasAnalysis::AliasResult
@@ -888,6 +857,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *UnderlyingV1,
const Value *UnderlyingV2) {
int64_t GEP1BaseOffset;
+ bool GEP1MaxLookupReached;
SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
// If we have two gep instructions with must-alias or not-alias'ing base
@@ -895,8 +865,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
- UnderlyingV2, UnknownSize, 0);
+ AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, nullptr,
+ UnderlyingV2, UnknownSize, nullptr);
// Check for geps of non-aliasing underlying pointers where the offsets are
// identical.
@@ -909,21 +879,28 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// See if the computed offset from the common pointer tells us about the
// relation of the resulting pointer.
int64_t GEP2BaseOffset;
+ bool GEP2MaxLookupReached;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
- DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
+ GEP2MaxLookupReached, DL);
const Value *GEP1BasePtr =
- DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
+ GEP1MaxLookupReached, DL);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
- assert(TD == 0 &&
- "DecomposeGEPExpression and GetUnderlyingObject disagree!");
+ assert(!DL &&
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
+ // If the max search depth is reached the result is undefined
+ if (GEP2MaxLookupReached || GEP1MaxLookupReached)
+ return MayAlias;
+
// Same offsets.
if (GEP1BaseOffset == GEP2BaseOffset &&
- areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices))
+ GEP1VariableIndices == GEP2VariableIndices)
return NoAlias;
GEP1VariableIndices.clear();
}
@@ -937,20 +914,26 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// exactly, see if the computed offset from the common pointer tells us
// about the relation of the resulting pointer.
const Value *GEP1BasePtr =
- DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
+ GEP1MaxLookupReached, DL);
int64_t GEP2BaseOffset;
+ bool GEP2MaxLookupReached;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
- DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
+ GEP2MaxLookupReached, DL);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
- assert(TD == 0 &&
+ assert(!DL &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
+ // If the max search depth is reached the result is undefined
+ if (GEP2MaxLookupReached || GEP1MaxLookupReached)
+ return MayAlias;
// Subtract the GEP2 pointer from the GEP1 pointer to find out their
// symbolic difference.
@@ -966,7 +949,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
if (V1Size == UnknownSize && V2Size == UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0,
+ AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, nullptr,
V2, V2Size, V2TBAAInfo);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
@@ -977,15 +960,19 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
return R;
const Value *GEP1BasePtr =
- DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
+ GEP1MaxLookupReached, DL);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) {
- assert(TD == 0 &&
+ assert(!DL &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
+ // If the max search depth is reached the result is undefined
+ if (GEP1MaxLookupReached)
+ return MayAlias;
}
// In the two GEP Case, if there is no difference in the offsets of the
@@ -1215,8 +1202,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
return NoAlias; // Scalars cannot alias each other
// Figure out what objects these things are pointing to if we can.
- const Value *O1 = GetUnderlyingObject(V1, TD);
- const Value *O2 = GetUnderlyingObject(V2, TD);
+ const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
+ const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
@@ -1265,9 +1252,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
- if (TD)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
+ if (DL)
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1319,9 +1306,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If both pointers are pointing into the same object and one of them
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
- if (TD && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
+ if (DL && O1 == O2)
+ if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
@@ -1343,7 +1330,9 @@ bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V,
return false;
// Use dominance or loop info if available.
- DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = getAnalysisIfAvailable<LoopInfo>();
// Make sure that the visited phis cannot reach the Value. This ensures that
diff --git a/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp b/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp
index 62f3ab1..8ed8e3e 100644
--- a/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp
@@ -1,4 +1,4 @@
-//=======-------- BlockFrequencyInfo.cpp - Block Frequency Analysis -------===//
+//===- BlockFrequencyInfo.cpp - Block Frequency Analysis ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,18 +12,20 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/BlockFrequencyInfo.h"
-#include "llvm/Analysis/BlockFrequencyImpl.h"
+#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/CFG.h"
#include "llvm/InitializePasses.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
using namespace llvm;
+#define DEBUG_TYPE "block-freq"
+
#ifndef NDEBUG
enum GVDAGType {
GVDT_None,
@@ -86,7 +88,7 @@ struct DOTGraphTraits<BlockFrequencyInfo*> : public DefaultDOTGraphTraits {
OS << Node->getName().str() << ":";
switch (ViewBlockFreqPropagationDAG) {
case GVDT_Fraction:
- Graph->getBlockFreq(Node).print(OS);
+ Graph->printBlockFreq(OS, Node);
break;
case GVDT_Integer:
OS << Graph->getBlockFreq(Node).getFrequency();
@@ -106,6 +108,7 @@ struct DOTGraphTraits<BlockFrequencyInfo*> : public DefaultDOTGraphTraits {
INITIALIZE_PASS_BEGIN(BlockFrequencyInfo, "block-freq",
"Block Frequency Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_END(BlockFrequencyInfo, "block-freq",
"Block Frequency Analysis", true, true)
@@ -114,21 +117,22 @@ char BlockFrequencyInfo::ID = 0;
BlockFrequencyInfo::BlockFrequencyInfo() : FunctionPass(ID) {
initializeBlockFrequencyInfoPass(*PassRegistry::getPassRegistry());
- BFI = new BlockFrequencyImpl<BasicBlock, Function, BranchProbabilityInfo>();
}
-BlockFrequencyInfo::~BlockFrequencyInfo() {
- delete BFI;
-}
+BlockFrequencyInfo::~BlockFrequencyInfo() {}
void BlockFrequencyInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<BranchProbabilityInfo>();
+ AU.addRequired<LoopInfo>();
AU.setPreservesAll();
}
bool BlockFrequencyInfo::runOnFunction(Function &F) {
BranchProbabilityInfo &BPI = getAnalysis<BranchProbabilityInfo>();
- BFI->doFunction(&F, &BPI);
+ LoopInfo &LI = getAnalysis<LoopInfo>();
+ if (!BFI)
+ BFI.reset(new ImplType);
+ BFI->doFunction(&F, &BPI, &LI);
#ifndef NDEBUG
if (ViewBlockFreqPropagationDAG != GVDT_None)
view();
@@ -136,12 +140,14 @@ bool BlockFrequencyInfo::runOnFunction(Function &F) {
return false;
}
+void BlockFrequencyInfo::releaseMemory() { BFI.reset(); }
+
void BlockFrequencyInfo::print(raw_ostream &O, const Module *) const {
if (BFI) BFI->print(O);
}
BlockFrequency BlockFrequencyInfo::getBlockFreq(const BasicBlock *BB) const {
- return BFI->getBlockFreq(BB);
+ return BFI ? BFI->getBlockFreq(BB) : 0;
}
/// Pop up a ghostview window with the current block frequency propagation
@@ -157,5 +163,20 @@ void BlockFrequencyInfo::view() const {
}
const Function *BlockFrequencyInfo::getFunction() const {
- return BFI->Fn;
+ return BFI ? BFI->getFunction() : nullptr;
+}
+
+raw_ostream &BlockFrequencyInfo::
+printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const {
+ return BFI ? BFI->printBlockFreq(OS, Freq) : OS;
+}
+
+raw_ostream &
+BlockFrequencyInfo::printBlockFreq(raw_ostream &OS,
+ const BasicBlock *BB) const {
+ return BFI ? BFI->printBlockFreq(OS, BB) : OS;
+}
+
+uint64_t BlockFrequencyInfo::getEntryFreq() const {
+ return BFI ? BFI->getEntryFreq() : 0;
}
diff --git a/contrib/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp b/contrib/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
new file mode 100644
index 0000000..3203c37
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -0,0 +1,697 @@
+//===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loops should be simplified before this analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/Support/raw_ostream.h"
+#include <deque>
+
+using namespace llvm;
+using namespace llvm::bfi_detail;
+
+#define DEBUG_TYPE "block-freq"
+
+ScaledNumber<uint64_t> BlockMass::toScaled() const {
+ if (isFull())
+ return ScaledNumber<uint64_t>(1, 0);
+ return ScaledNumber<uint64_t>(getMass() + 1, -64);
+}
+
+void BlockMass::dump() const { print(dbgs()); }
+
+static char getHexDigit(int N) {
+ assert(N < 16);
+ if (N < 10)
+ return '0' + N;
+ return 'a' + N - 10;
+}
+raw_ostream &BlockMass::print(raw_ostream &OS) const {
+ for (int Digits = 0; Digits < 16; ++Digits)
+ OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf);
+ return OS;
+}
+
+namespace {
+
+typedef BlockFrequencyInfoImplBase::BlockNode BlockNode;
+typedef BlockFrequencyInfoImplBase::Distribution Distribution;
+typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList;
+typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64;
+typedef BlockFrequencyInfoImplBase::LoopData LoopData;
+typedef BlockFrequencyInfoImplBase::Weight Weight;
+typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData;
+
+/// \brief Dithering mass distributer.
+///
+/// This class splits up a single mass into portions by weight, dithering to
+/// spread out error. No mass is lost. The dithering precision depends on the
+/// precision of the product of \a BlockMass and \a BranchProbability.
+///
+/// The distribution algorithm follows.
+///
+/// 1. Initialize by saving the sum of the weights in \a RemWeight and the
+/// mass to distribute in \a RemMass.
+///
+/// 2. For each portion:
+///
+/// 1. Construct a branch probability, P, as the portion's weight divided
+/// by the current value of \a RemWeight.
+/// 2. Calculate the portion's mass as \a RemMass times P.
+/// 3. Update \a RemWeight and \a RemMass at each portion by subtracting
+/// the current portion's weight and mass.
+struct DitheringDistributer {
+ uint32_t RemWeight;
+ BlockMass RemMass;
+
+ DitheringDistributer(Distribution &Dist, const BlockMass &Mass);
+
+ BlockMass takeMass(uint32_t Weight);
+};
+
+} // end namespace
+
+DitheringDistributer::DitheringDistributer(Distribution &Dist,
+ const BlockMass &Mass) {
+ Dist.normalize();
+ RemWeight = Dist.Total;
+ RemMass = Mass;
+}
+
+BlockMass DitheringDistributer::takeMass(uint32_t Weight) {
+ assert(Weight && "invalid weight");
+ assert(Weight <= RemWeight);
+ BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight);
+
+ // Decrement totals (dither).
+ RemWeight -= Weight;
+ RemMass -= Mass;
+ return Mass;
+}
+
+void Distribution::add(const BlockNode &Node, uint64_t Amount,
+ Weight::DistType Type) {
+ assert(Amount && "invalid weight of 0");
+ uint64_t NewTotal = Total + Amount;
+
+ // Check for overflow. It should be impossible to overflow twice.
+ bool IsOverflow = NewTotal < Total;
+ assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow");
+ DidOverflow |= IsOverflow;
+
+ // Update the total.
+ Total = NewTotal;
+
+ // Save the weight.
+ Weights.push_back(Weight(Type, Node, Amount));
+}
+
+static void combineWeight(Weight &W, const Weight &OtherW) {
+ assert(OtherW.TargetNode.isValid());
+ if (!W.Amount) {
+ W = OtherW;
+ return;
+ }
+ assert(W.Type == OtherW.Type);
+ assert(W.TargetNode == OtherW.TargetNode);
+ assert(W.Amount < W.Amount + OtherW.Amount && "Unexpected overflow");
+ W.Amount += OtherW.Amount;
+}
+static void combineWeightsBySorting(WeightList &Weights) {
+ // Sort so edges to the same node are adjacent.
+ std::sort(Weights.begin(), Weights.end(),
+ [](const Weight &L,
+ const Weight &R) { return L.TargetNode < R.TargetNode; });
+
+ // Combine adjacent edges.
+ WeightList::iterator O = Weights.begin();
+ for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E;
+ ++O, (I = L)) {
+ *O = *I;
+
+ // Find the adjacent weights to the same node.
+ for (++L; L != E && I->TargetNode == L->TargetNode; ++L)
+ combineWeight(*O, *L);
+ }
+
+ // Erase extra entries.
+ Weights.erase(O, Weights.end());
+ return;
+}
+static void combineWeightsByHashing(WeightList &Weights) {
+ // Collect weights into a DenseMap.
+ typedef DenseMap<BlockNode::IndexType, Weight> HashTable;
+ HashTable Combined(NextPowerOf2(2 * Weights.size()));
+ for (const Weight &W : Weights)
+ combineWeight(Combined[W.TargetNode.Index], W);
+
+ // Check whether anything changed.
+ if (Weights.size() == Combined.size())
+ return;
+
+ // Fill in the new weights.
+ Weights.clear();
+ Weights.reserve(Combined.size());
+ for (const auto &I : Combined)
+ Weights.push_back(I.second);
+}
+static void combineWeights(WeightList &Weights) {
+ // Use a hash table for many successors to keep this linear.
+ if (Weights.size() > 128) {
+ combineWeightsByHashing(Weights);
+ return;
+ }
+
+ combineWeightsBySorting(Weights);
+}
+static uint64_t shiftRightAndRound(uint64_t N, int Shift) {
+ assert(Shift >= 0);
+ assert(Shift < 64);
+ if (!Shift)
+ return N;
+ return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1));
+}
+void Distribution::normalize() {
+ // Early exit for termination nodes.
+ if (Weights.empty())
+ return;
+
+ // Only bother if there are multiple successors.
+ if (Weights.size() > 1)
+ combineWeights(Weights);
+
+ // Early exit when combined into a single successor.
+ if (Weights.size() == 1) {
+ Total = 1;
+ Weights.front().Amount = 1;
+ return;
+ }
+
+ // Determine how much to shift right so that the total fits into 32-bits.
+ //
+ // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1
+ // for each weight can cause a 32-bit overflow.
+ int Shift = 0;
+ if (DidOverflow)
+ Shift = 33;
+ else if (Total > UINT32_MAX)
+ Shift = 33 - countLeadingZeros(Total);
+
+ // Early exit if nothing needs to be scaled.
+ if (!Shift)
+ return;
+
+ // Recompute the total through accumulation (rather than shifting it) so that
+ // it's accurate after shifting.
+ Total = 0;
+
+ // Sum the weights to each node and shift right if necessary.
+ for (Weight &W : Weights) {
+ // Scale down below UINT32_MAX. Since Shift is larger than necessary, we
+ // can round here without concern about overflow.
+ assert(W.TargetNode.isValid());
+ W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift));
+ assert(W.Amount <= UINT32_MAX);
+
+ // Update the total.
+ Total += W.Amount;
+ }
+ assert(Total <= UINT32_MAX);
+}
+
+void BlockFrequencyInfoImplBase::clear() {
+ // Swap with a default-constructed std::vector, since std::vector<>::clear()
+ // does not actually clear heap storage.
+ std::vector<FrequencyData>().swap(Freqs);
+ std::vector<WorkingData>().swap(Working);
+ Loops.clear();
+}
+
+/// \brief Clear all memory not needed downstream.
+///
+/// Releases all memory not used downstream. In particular, saves Freqs.
+static void cleanup(BlockFrequencyInfoImplBase &BFI) {
+ std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs));
+ BFI.clear();
+ BFI.Freqs = std::move(SavedFreqs);
+}
+
+bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
+ const LoopData *OuterLoop,
+ const BlockNode &Pred,
+ const BlockNode &Succ,
+ uint64_t Weight) {
+ if (!Weight)
+ Weight = 1;
+
+ auto isLoopHeader = [&OuterLoop](const BlockNode &Node) {
+ return OuterLoop && OuterLoop->isHeader(Node);
+ };
+
+ BlockNode Resolved = Working[Succ.Index].getResolvedNode();
+
+#ifndef NDEBUG
+ auto debugSuccessor = [&](const char *Type) {
+ dbgs() << " =>"
+ << " [" << Type << "] weight = " << Weight;
+ if (!isLoopHeader(Resolved))
+ dbgs() << ", succ = " << getBlockName(Succ);
+ if (Resolved != Succ)
+ dbgs() << ", resolved = " << getBlockName(Resolved);
+ dbgs() << "\n";
+ };
+ (void)debugSuccessor;
+#endif
+
+ if (isLoopHeader(Resolved)) {
+ DEBUG(debugSuccessor("backedge"));
+ Dist.addBackedge(OuterLoop->getHeader(), Weight);
+ return true;
+ }
+
+ if (Working[Resolved.Index].getContainingLoop() != OuterLoop) {
+ DEBUG(debugSuccessor(" exit "));
+ Dist.addExit(Resolved, Weight);
+ return true;
+ }
+
+ if (Resolved < Pred) {
+ if (!isLoopHeader(Pred)) {
+ // If OuterLoop is an irreducible loop, we can't actually handle this.
+ assert((!OuterLoop || !OuterLoop->isIrreducible()) &&
+ "unhandled irreducible control flow");
+
+ // Irreducible backedge. Abort.
+ DEBUG(debugSuccessor("abort!!!"));
+ return false;
+ }
+
+ // If "Pred" is a loop header, then this isn't really a backedge; rather,
+ // OuterLoop must be irreducible. These false backedges can come only from
+ // secondary loop headers.
+ assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) &&
+ "unhandled irreducible control flow");
+ }
+
+ DEBUG(debugSuccessor(" local "));
+ Dist.addLocal(Resolved, Weight);
+ return true;
+}
+
+bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
+ const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) {
+ // Copy the exit map into Dist.
+ for (const auto &I : Loop.Exits)
+ if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first,
+ I.second.getMass()))
+ // Irreducible backedge.
+ return false;
+
+ return true;
+}
+
+/// \brief Get the maximum allowed loop scale.
+///
+/// Gives the maximum number of estimated iterations allowed for a loop. Very
+/// large numbers cause problems downstream (even within 64-bits).
+static Scaled64 getMaxLoopScale() { return Scaled64(1, 12); }
+
+/// \brief Compute the loop scale for a loop.
+void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
+ // Compute loop scale.
+ DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
+
+ // LoopScale == 1 / ExitMass
+ // ExitMass == HeadMass - BackedgeMass
+ BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass;
+
+ // Block scale stores the inverse of the scale.
+ Loop.Scale = ExitMass.toScaled().inverse();
+
+ DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
+ << " - " << Loop.BackedgeMass << ")\n"
+ << " - scale = " << Loop.Scale << "\n");
+
+ if (Loop.Scale > getMaxLoopScale()) {
+ Loop.Scale = getMaxLoopScale();
+ DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n");
+ }
+}
+
+/// \brief Package up a loop.
+void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
+ DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
+
+ // Clear the subloop exits to prevent quadratic memory usage.
+ for (const BlockNode &M : Loop.Nodes) {
+ if (auto *Loop = Working[M.Index].getPackagedLoop())
+ Loop->Exits.clear();
+ DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
+ }
+ Loop.IsPackaged = true;
+}
+
+void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
+ LoopData *OuterLoop,
+ Distribution &Dist) {
+ BlockMass Mass = Working[Source.Index].getMass();
+ DEBUG(dbgs() << " => mass: " << Mass << "\n");
+
+ // Distribute mass to successors as laid out in Dist.
+ DitheringDistributer D(Dist, Mass);
+
+#ifndef NDEBUG
+ auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
+ const char *Desc) {
+ dbgs() << " => assign " << M << " (" << D.RemMass << ")";
+ if (Desc)
+ dbgs() << " [" << Desc << "]";
+ if (T.isValid())
+ dbgs() << " to " << getBlockName(T);
+ dbgs() << "\n";
+ };
+ (void)debugAssign;
+#endif
+
+ for (const Weight &W : Dist.Weights) {
+ // Check for a local edge (non-backedge and non-exit).
+ BlockMass Taken = D.takeMass(W.Amount);
+ if (W.Type == Weight::Local) {
+ Working[W.TargetNode.Index].getMass() += Taken;
+ DEBUG(debugAssign(W.TargetNode, Taken, nullptr));
+ continue;
+ }
+
+ // Backedges and exits only make sense if we're processing a loop.
+ assert(OuterLoop && "backedge or exit outside of loop");
+
+ // Check for a backedge.
+ if (W.Type == Weight::Backedge) {
+ OuterLoop->BackedgeMass += Taken;
+ DEBUG(debugAssign(BlockNode(), Taken, "back"));
+ continue;
+ }
+
+ // This must be an exit.
+ assert(W.Type == Weight::Exit);
+ OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
+ DEBUG(debugAssign(W.TargetNode, Taken, "exit"));
+ }
+}
+
+static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
+ const Scaled64 &Min, const Scaled64 &Max) {
+ // Scale the Factor to a size that creates integers. Ideally, integers would
+ // be scaled so that Max == UINT64_MAX so that they can be best
+ // differentiated. However, the register allocator currently deals poorly
+ // with large numbers. Instead, push Min up a little from 1 to give some
+ // room to differentiate small, unequal numbers.
+ //
+ // TODO: fix issues downstream so that ScalingFactor can be
+ // Scaled64(1,64)/Max.
+ Scaled64 ScalingFactor = Min.inverse();
+ if ((Max / Min).lg() < 60)
+ ScalingFactor <<= 3;
+
+ // Translate the floats to integers.
+ DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
+ << ", factor = " << ScalingFactor << "\n");
+ for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) {
+ Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor;
+ BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>());
+ DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
+ << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled
+ << ", int = " << BFI.Freqs[Index].Integer << "\n");
+ }
+}
+
+/// \brief Unwrap a loop package.
+///
+/// Visits all the members of a loop, adjusting their BlockData according to
+/// the loop's pseudo-node.
+static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) {
+ DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop)
+ << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale
+ << "\n");
+ Loop.Scale *= Loop.Mass.toScaled();
+ Loop.IsPackaged = false;
+ DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n");
+
+ // Propagate the head scale through the loop. Since members are visited in
+ // RPO, the head scale will be updated by the loop scale first, and then the
+ // final head scale will be used for updated the rest of the members.
+ for (const BlockNode &N : Loop.Nodes) {
+ const auto &Working = BFI.Working[N.Index];
+ Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale
+ : BFI.Freqs[N.Index].Scaled;
+ Scaled64 New = Loop.Scale * F;
+ DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New
+ << "\n");
+ F = New;
+ }
+}
+
+void BlockFrequencyInfoImplBase::unwrapLoops() {
+ // Set initial frequencies from loop-local masses.
+ for (size_t Index = 0; Index < Working.size(); ++Index)
+ Freqs[Index].Scaled = Working[Index].Mass.toScaled();
+
+ for (LoopData &Loop : Loops)
+ unwrapLoop(*this, Loop);
+}
+
+void BlockFrequencyInfoImplBase::finalizeMetrics() {
+ // Unwrap loop packages in reverse post-order, tracking min and max
+ // frequencies.
+ auto Min = Scaled64::getLargest();
+ auto Max = Scaled64::getZero();
+ for (size_t Index = 0; Index < Working.size(); ++Index) {
+ // Update min/max scale.
+ Min = std::min(Min, Freqs[Index].Scaled);
+ Max = std::max(Max, Freqs[Index].Scaled);
+ }
+
+ // Convert to integers.
+ convertFloatingToInteger(*this, Min, Max);
+
+ // Clean up data structures.
+ cleanup(*this);
+
+ // Print out the final stats.
+ DEBUG(dump());
+}
+
+BlockFrequency
+BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const {
+ if (!Node.isValid())
+ return 0;
+ return Freqs[Node.Index].Integer;
+}
+Scaled64
+BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const {
+ if (!Node.isValid())
+ return Scaled64::getZero();
+ return Freqs[Node.Index].Scaled;
+}
+
+std::string
+BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const {
+ return std::string();
+}
+std::string
+BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const {
+ return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*");
+}
+
+raw_ostream &
+BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
+ const BlockNode &Node) const {
+ return OS << getFloatingBlockFreq(Node);
+}
+
+raw_ostream &
+BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
+ const BlockFrequency &Freq) const {
+ Scaled64 Block(Freq.getFrequency(), 0);
+ Scaled64 Entry(getEntryFreq(), 0);
+
+ return OS << Block / Entry;
+}
+
+void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) {
+ Start = OuterLoop.getHeader();
+ Nodes.reserve(OuterLoop.Nodes.size());
+ for (auto N : OuterLoop.Nodes)
+ addNode(N);
+ indexNodes();
+}
+void IrreducibleGraph::addNodesInFunction() {
+ Start = 0;
+ for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
+ if (!BFI.Working[Index].isPackaged())
+ addNode(Index);
+ indexNodes();
+}
+void IrreducibleGraph::indexNodes() {
+ for (auto &I : Nodes)
+ Lookup[I.Node.Index] = &I;
+}
+void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ,
+ const BFIBase::LoopData *OuterLoop) {
+ if (OuterLoop && OuterLoop->isHeader(Succ))
+ return;
+ auto L = Lookup.find(Succ.Index);
+ if (L == Lookup.end())
+ return;
+ IrrNode &SuccIrr = *L->second;
+ Irr.Edges.push_back(&SuccIrr);
+ SuccIrr.Edges.push_front(&Irr);
+ ++SuccIrr.NumIn;
+}
+
+namespace llvm {
+template <> struct GraphTraits<IrreducibleGraph> {
+ typedef bfi_detail::IrreducibleGraph GraphT;
+
+ typedef const GraphT::IrrNode NodeType;
+ typedef GraphT::IrrNode::iterator ChildIteratorType;
+
+ static const NodeType *getEntryNode(const GraphT &G) {
+ return G.StartIrr;
+ }
+ static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); }
+ static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); }
+};
+}
+
+/// \brief Find extra irreducible headers.
+///
+/// Find entry blocks and other blocks with backedges, which exist when \c G
+/// contains irreducible sub-SCCs.
+static void findIrreducibleHeaders(
+ const BlockFrequencyInfoImplBase &BFI,
+ const IrreducibleGraph &G,
+ const std::vector<const IrreducibleGraph::IrrNode *> &SCC,
+ LoopData::NodeList &Headers, LoopData::NodeList &Others) {
+ // Map from nodes in the SCC to whether it's an entry block.
+ SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC;
+
+ // InSCC also acts the set of nodes in the graph. Seed it.
+ for (const auto *I : SCC)
+ InSCC[I] = false;
+
+ for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) {
+ auto &Irr = *I->first;
+ for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
+ if (InSCC.count(P))
+ continue;
+
+ // This is an entry block.
+ I->second = true;
+ Headers.push_back(Irr.Node);
+ DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n");
+ break;
+ }
+ }
+ assert(Headers.size() >= 2 && "Should be irreducible");
+ if (Headers.size() == InSCC.size()) {
+ // Every block is a header.
+ std::sort(Headers.begin(), Headers.end());
+ return;
+ }
+
+ // Look for extra headers from irreducible sub-SCCs.
+ for (const auto &I : InSCC) {
+ // Entry blocks are already headers.
+ if (I.second)
+ continue;
+
+ auto &Irr = *I.first;
+ for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
+ // Skip forward edges.
+ if (P->Node < Irr.Node)
+ continue;
+
+ // Skip predecessors from entry blocks. These can have inverted
+ // ordering.
+ if (InSCC.lookup(P))
+ continue;
+
+ // Store the extra header.
+ Headers.push_back(Irr.Node);
+ DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n");
+ break;
+ }
+ if (Headers.back() == Irr.Node)
+ // Added this as a header.
+ continue;
+
+ // This is not a header.
+ Others.push_back(Irr.Node);
+ DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
+ }
+ std::sort(Headers.begin(), Headers.end());
+ std::sort(Others.begin(), Others.end());
+}
+
+static void createIrreducibleLoop(
+ BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G,
+ LoopData *OuterLoop, std::list<LoopData>::iterator Insert,
+ const std::vector<const IrreducibleGraph::IrrNode *> &SCC) {
+ // Translate the SCC into RPO.
+ DEBUG(dbgs() << " - found-scc\n");
+
+ LoopData::NodeList Headers;
+ LoopData::NodeList Others;
+ findIrreducibleHeaders(BFI, G, SCC, Headers, Others);
+
+ auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(),
+ Headers.end(), Others.begin(), Others.end());
+
+ // Update loop hierarchy.
+ for (const auto &N : Loop->Nodes)
+ if (BFI.Working[N.Index].isLoopHeader())
+ BFI.Working[N.Index].Loop->Parent = &*Loop;
+ else
+ BFI.Working[N.Index].Loop = &*Loop;
+}
+
+iterator_range<std::list<LoopData>::iterator>
+BlockFrequencyInfoImplBase::analyzeIrreducible(
+ const IrreducibleGraph &G, LoopData *OuterLoop,
+ std::list<LoopData>::iterator Insert) {
+ assert((OuterLoop == nullptr) == (Insert == Loops.begin()));
+ auto Prev = OuterLoop ? std::prev(Insert) : Loops.end();
+
+ for (auto I = scc_begin(G); !I.isAtEnd(); ++I) {
+ if (I->size() < 2)
+ continue;
+
+ // Translate the SCC into RPO.
+ createIrreducibleLoop(*this, G, OuterLoop, Insert, *I);
+ }
+
+ if (OuterLoop)
+ return make_range(std::next(Prev), Insert);
+ return make_range(Loops.begin(), Insert);
+}
+
+void
+BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) {
+ OuterLoop.Exits.clear();
+ OuterLoop.BackedgeMass = BlockMass::getEmpty();
+ auto O = OuterLoop.Nodes.begin() + 1;
+ for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I)
+ if (!Working[I->Index].isPackaged())
+ *O++ = *I;
+ OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end());
+}
diff --git a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 86560ca..bbd8750 100644
--- a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -14,16 +14,18 @@
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
+#define DEBUG_TYPE "branch-prob"
+
INITIALIZE_PASS_BEGIN(BranchProbabilityInfo, "branch-prob",
"Branch Probability Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -321,6 +323,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
InEdges.push_back(I.getSuccessorIndex());
}
+ if (BackEdges.empty() && ExitingEdges.empty())
+ return false;
+
if (uint32_t numBackEdges = BackEdges.size()) {
uint32_t backWeight = LBH_TAKEN_WEIGHT / numBackEdges;
if (backWeight < NORMAL_WEIGHT)
@@ -483,6 +488,8 @@ void BranchProbabilityInfo::getAnalysisUsage(AnalysisUsage &AU) const {
}
bool BranchProbabilityInfo::runOnFunction(Function &F) {
+ DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName()
+ << " ----\n\n");
LastF = &F; // Store the last function we ran on for printing.
LI = &getAnalysis<LoopInfo>();
assert(PostDominatedByUnreachable.empty());
@@ -554,7 +561,7 @@ isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const {
BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
uint32_t Sum = 0;
uint32_t MaxWeight = 0;
- BasicBlock *MaxSucc = 0;
+ BasicBlock *MaxSucc = nullptr;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
@@ -574,7 +581,7 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
if (BranchProbability(MaxWeight, Sum) > BranchProbability(4, 5))
return MaxSucc;
- return 0;
+ return nullptr;
}
/// Get the raw edge weight for the edge. If can't find it, return
@@ -591,6 +598,11 @@ getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const {
return DEFAULT_WEIGHT;
}
+uint32_t BranchProbabilityInfo::getEdgeWeight(const BasicBlock *Src,
+ succ_const_iterator Dst) const {
+ return getEdgeWeight(Src, Dst.getSuccessorIndex());
+}
+
/// Get the raw edge weight calculated for the block pair. This returns the sum
/// of all raw edge weights from Src to Dst.
uint32_t BranchProbabilityInfo::
diff --git a/contrib/llvm/lib/Analysis/CFG.cpp b/contrib/llvm/lib/Analysis/CFG.cpp
index c3f32d3..8ef5302 100644
--- a/contrib/llvm/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/lib/Analysis/CFG.cpp
@@ -13,10 +13,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/CFG.h"
-
#include "llvm/ADT/SmallSet.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/Dominators.h"
using namespace llvm;
@@ -102,15 +101,9 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
// If AllowIdenticalEdges is true, then we allow this edge to be considered
// non-critical iff all preds come from TI's block.
- while (I != E) {
- const BasicBlock *P = *I;
- if (P != FirstPred)
+ for (; I != E; ++I)
+ if (*I != FirstPred)
return true;
- // Note: leave this as is until no one ever compiles with either gcc 4.0.1
- // or Xcode 2. This seems to work around the pred_iterator assert in PR 2207
- E = pred_end(P);
- ++I;
- }
return false;
}
@@ -130,7 +123,7 @@ static bool loopContainsBoth(const LoopInfo *LI,
const BasicBlock *BB1, const BasicBlock *BB2) {
const Loop *L1 = getOutermostLoop(LI, BB1);
const Loop *L2 = getOutermostLoop(LI, BB2);
- return L1 != NULL && L1 == L2;
+ return L1 != nullptr && L1 == L2;
}
static bool isPotentiallyReachableInner(SmallVectorImpl<BasicBlock *> &Worklist,
@@ -140,7 +133,7 @@ static bool isPotentiallyReachableInner(SmallVectorImpl<BasicBlock *> &Worklist,
// When the stop block is unreachable, it's dominated from everywhere,
// regardless of whether there's a path between the two blocks.
if (DT && !DT->isReachableFromEntry(StopBB))
- DT = 0;
+ DT = nullptr;
// Limit the number of blocks we visit. The goal is to avoid run-away compile
// times on large CFGs without hampering sensible code. Arbitrarily chosen.
@@ -163,14 +156,13 @@ static bool isPotentiallyReachableInner(SmallVectorImpl<BasicBlock *> &Worklist,
return true;
}
- if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : 0) {
+ if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : nullptr) {
// All blocks in a single loop are reachable from all other blocks. From
// any of these blocks, we can skip directly to the exits of the loop,
// ignoring any other blocks inside the loop body.
Outer->getExitBlocks(Worklist);
} else {
- for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I)
- Worklist.push_back(*I);
+ Worklist.append(succ_begin(BB), succ_end(BB));
}
} while (!Worklist.empty());
@@ -208,7 +200,7 @@ bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B,
// If the block is in a loop then we can reach any instruction in the block
// from any other instruction in the block by going around a backedge.
- if (LI && LI->getLoopFor(BB) != 0)
+ if (LI && LI->getLoopFor(BB) != nullptr)
return true;
// Linear scan, start at 'A', see whether we hit 'B' or the end first.
@@ -223,8 +215,7 @@ bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B,
return false;
// Otherwise, continue doing the normal per-BB CFG walk.
- for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I)
- Worklist.push_back(*I);
+ Worklist.append(succ_begin(BB), succ_end(BB));
if (Worklist.empty()) {
// We've proven that there's no path!
diff --git a/contrib/llvm/lib/Analysis/CFGPrinter.cpp b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
index 9b6879a..c2c19d6 100644
--- a/contrib/llvm/lib/Analysis/CFGPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
@@ -19,6 +19,7 @@
#include "llvm/Analysis/CFGPrinter.h"
#include "llvm/Pass.h"
+#include "llvm/Support/FileSystem.h"
using namespace llvm;
namespace {
@@ -28,14 +29,14 @@ namespace {
initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
F.viewCFG();
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -51,14 +52,14 @@ namespace {
initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
F.viewCFGOnly();
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -75,12 +76,12 @@ namespace {
initializeCFGPrinterPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
std::string Filename = "cfg." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
- raw_fd_ostream File(Filename.c_str(), ErrorInfo);
+ raw_fd_ostream File(Filename.c_str(), ErrorInfo, sys::fs::F_Text);
if (ErrorInfo.empty())
WriteGraph(File, (const Function*)&F);
@@ -90,9 +91,9 @@ namespace {
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -108,13 +109,13 @@ namespace {
CFGOnlyPrinter() : FunctionPass(ID) {
initializeCFGOnlyPrinterPass(*PassRegistry::getPassRegistry());
}
-
- virtual bool runOnFunction(Function &F) {
+
+ bool runOnFunction(Function &F) override {
std::string Filename = "cfg." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
- raw_fd_ostream File(Filename.c_str(), ErrorInfo);
+ raw_fd_ostream File(Filename.c_str(), ErrorInfo, sys::fs::F_Text);
if (ErrorInfo.empty())
WriteGraph(File, (const Function*)&F, true);
@@ -123,9 +124,9 @@ namespace {
errs() << "\n";
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -147,8 +148,8 @@ void Function::viewCFG() const {
/// viewCFGOnly - This function is meant for use from the debugger. It works
/// just like viewCFG, but it does not include the contents of basic blocks
-/// into the nodes, just the label. If you are only interested in the CFG t
-/// his can make the graph smaller.
+/// into the nodes, just the label. If you are only interested in the CFG
+/// this can make the graph smaller.
///
void Function::viewCFGOnly() const {
ViewGraph(this, "cfg" + getName(), true);
diff --git a/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp b/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp
new file mode 100644
index 0000000..5d1d8a9
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp
@@ -0,0 +1,167 @@
+//===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+DebugPM("debug-cgscc-pass-manager", cl::Hidden,
+ cl::desc("Print CGSCC pass management debugging information"));
+
+PreservedAnalyses CGSCCPassManager::run(LazyCallGraph::SCC *C,
+ CGSCCAnalysisManager *AM) {
+ PreservedAnalyses PA = PreservedAnalyses::all();
+
+ if (DebugPM)
+ dbgs() << "Starting CGSCC pass manager run.\n";
+
+ for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
+ if (DebugPM)
+ dbgs() << "Running CGSCC pass: " << Passes[Idx]->name() << "\n";
+
+ PreservedAnalyses PassPA = Passes[Idx]->run(C, AM);
+ if (AM)
+ AM->invalidate(C, PassPA);
+ PA.intersect(std::move(PassPA));
+ }
+
+ if (DebugPM)
+ dbgs() << "Finished CGSCC pass manager run.\n";
+
+ return PA;
+}
+
+bool CGSCCAnalysisManager::empty() const {
+ assert(CGSCCAnalysisResults.empty() == CGSCCAnalysisResultLists.empty() &&
+ "The storage and index of analysis results disagree on how many there "
+ "are!");
+ return CGSCCAnalysisResults.empty();
+}
+
+void CGSCCAnalysisManager::clear() {
+ CGSCCAnalysisResults.clear();
+ CGSCCAnalysisResultLists.clear();
+}
+
+CGSCCAnalysisManager::ResultConceptT &
+CGSCCAnalysisManager::getResultImpl(void *PassID, LazyCallGraph::SCC *C) {
+ CGSCCAnalysisResultMapT::iterator RI;
+ bool Inserted;
+ std::tie(RI, Inserted) = CGSCCAnalysisResults.insert(std::make_pair(
+ std::make_pair(PassID, C), CGSCCAnalysisResultListT::iterator()));
+
+ // If we don't have a cached result for this function, look up the pass and
+ // run it to produce a result, which we then add to the cache.
+ if (Inserted) {
+ CGSCCAnalysisResultListT &ResultList = CGSCCAnalysisResultLists[C];
+ ResultList.emplace_back(PassID, lookupPass(PassID).run(C, this));
+ RI->second = std::prev(ResultList.end());
+ }
+
+ return *RI->second->second;
+}
+
+CGSCCAnalysisManager::ResultConceptT *
+CGSCCAnalysisManager::getCachedResultImpl(void *PassID,
+ LazyCallGraph::SCC *C) const {
+ CGSCCAnalysisResultMapT::const_iterator RI =
+ CGSCCAnalysisResults.find(std::make_pair(PassID, C));
+ return RI == CGSCCAnalysisResults.end() ? nullptr : &*RI->second->second;
+}
+
+void CGSCCAnalysisManager::invalidateImpl(void *PassID, LazyCallGraph::SCC *C) {
+ CGSCCAnalysisResultMapT::iterator RI =
+ CGSCCAnalysisResults.find(std::make_pair(PassID, C));
+ if (RI == CGSCCAnalysisResults.end())
+ return;
+
+ CGSCCAnalysisResultLists[C].erase(RI->second);
+}
+
+void CGSCCAnalysisManager::invalidateImpl(LazyCallGraph::SCC *C,
+ const PreservedAnalyses &PA) {
+ // Clear all the invalidated results associated specifically with this
+ // function.
+ SmallVector<void *, 8> InvalidatedPassIDs;
+ CGSCCAnalysisResultListT &ResultsList = CGSCCAnalysisResultLists[C];
+ for (CGSCCAnalysisResultListT::iterator I = ResultsList.begin(),
+ E = ResultsList.end();
+ I != E;)
+ if (I->second->invalidate(C, PA)) {
+ InvalidatedPassIDs.push_back(I->first);
+ I = ResultsList.erase(I);
+ } else {
+ ++I;
+ }
+ while (!InvalidatedPassIDs.empty())
+ CGSCCAnalysisResults.erase(
+ std::make_pair(InvalidatedPassIDs.pop_back_val(), C));
+ CGSCCAnalysisResultLists.erase(C);
+}
+
+char CGSCCAnalysisManagerModuleProxy::PassID;
+
+CGSCCAnalysisManagerModuleProxy::Result
+CGSCCAnalysisManagerModuleProxy::run(Module *M) {
+ assert(CGAM->empty() && "CGSCC analyses ran prior to the module proxy!");
+ return Result(*CGAM);
+}
+
+CGSCCAnalysisManagerModuleProxy::Result::~Result() {
+ // Clear out the analysis manager if we're being destroyed -- it means we
+ // didn't even see an invalidate call when we got invalidated.
+ CGAM->clear();
+}
+
+bool CGSCCAnalysisManagerModuleProxy::Result::invalidate(
+ Module *M, const PreservedAnalyses &PA) {
+ // If this proxy isn't marked as preserved, then we can't even invalidate
+ // individual CGSCC analyses, there may be an invalid set of SCC objects in
+ // the cache making it impossible to incrementally preserve them.
+ // Just clear the entire manager.
+ if (!PA.preserved(ID()))
+ CGAM->clear();
+
+ // Return false to indicate that this result is still a valid proxy.
+ return false;
+}
+
+char ModuleAnalysisManagerCGSCCProxy::PassID;
+
+char FunctionAnalysisManagerCGSCCProxy::PassID;
+
+FunctionAnalysisManagerCGSCCProxy::Result
+FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC *C) {
+ assert(FAM->empty() && "Function analyses ran prior to the CGSCC proxy!");
+ return Result(*FAM);
+}
+
+FunctionAnalysisManagerCGSCCProxy::Result::~Result() {
+ // Clear out the analysis manager if we're being destroyed -- it means we
+ // didn't even see an invalidate call when we got invalidated.
+ FAM->clear();
+}
+
+bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate(
+ LazyCallGraph::SCC *C, const PreservedAnalyses &PA) {
+ // If this proxy isn't marked as preserved, then we can't even invalidate
+ // individual function analyses, there may be an invalid set of Function
+ // objects in the cache making it impossible to incrementally preserve them.
+ // Just clear the entire manager.
+ if (!PA.preserved(ID()))
+ FAM->clear();
+
+ // Return false to indicate that this result is still a valid proxy.
+ return false;
+}
+
+char CGSCCAnalysisManagerFunctionProxy::PassID;
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index 79fab1b..f2f8877 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -20,24 +20,26 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/Support/CallSite.h"
using namespace llvm;
CaptureTracker::~CaptureTracker() {}
-bool CaptureTracker::shouldExplore(Use *U) { return true; }
+bool CaptureTracker::shouldExplore(const Use *U) { return true; }
namespace {
struct SimpleCaptureTracker : public CaptureTracker {
explicit SimpleCaptureTracker(bool ReturnCaptures)
: ReturnCaptures(ReturnCaptures), Captured(false) {}
- void tooManyUses() { Captured = true; }
+ void tooManyUses() override { Captured = true; }
- bool captured(Use *U) {
+ bool captured(const Use *U) override {
if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
return false;
@@ -49,6 +51,65 @@ namespace {
bool Captured;
};
+
+ /// Only find pointer captures which happen before the given instruction. Uses
+ /// the dominator tree to determine whether one instruction is before another.
+ /// Only support the case where the Value is defined in the same basic block
+ /// as the given instruction and the use.
+ struct CapturesBefore : public CaptureTracker {
+ CapturesBefore(bool ReturnCaptures, const Instruction *I, DominatorTree *DT,
+ bool IncludeI)
+ : BeforeHere(I), DT(DT), ReturnCaptures(ReturnCaptures),
+ IncludeI(IncludeI), Captured(false) {}
+
+ void tooManyUses() override { Captured = true; }
+
+ bool shouldExplore(const Use *U) override {
+ Instruction *I = cast<Instruction>(U->getUser());
+ if (BeforeHere == I && !IncludeI)
+ return false;
+
+ BasicBlock *BB = I->getParent();
+ // We explore this usage only if the usage can reach "BeforeHere".
+ // If use is not reachable from entry, there is no need to explore.
+ if (BeforeHere != I && !DT->isReachableFromEntry(BB))
+ return false;
+ // If the value is defined in the same basic block as use and BeforeHere,
+ // there is no need to explore the use if BeforeHere dominates use.
+ // Check whether there is a path from I to BeforeHere.
+ if (BeforeHere != I && DT->dominates(BeforeHere, I) &&
+ !isPotentiallyReachable(I, BeforeHere, DT))
+ return false;
+ return true;
+ }
+
+ bool captured(const Use *U) override {
+ if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
+ return false;
+
+ Instruction *I = cast<Instruction>(U->getUser());
+ if (BeforeHere == I && !IncludeI)
+ return false;
+
+ BasicBlock *BB = I->getParent();
+ // Same logic as in shouldExplore.
+ if (BeforeHere != I && !DT->isReachableFromEntry(BB))
+ return false;
+ if (BeforeHere != I && DT->dominates(BeforeHere, I) &&
+ !isPotentiallyReachable(I, BeforeHere, DT))
+ return false;
+ Captured = true;
+ return true;
+ }
+
+ const Instruction *BeforeHere;
+ DominatorTree *DT;
+
+ bool ReturnCaptures;
+ bool IncludeI;
+
+ bool Captured;
+ };
}
/// PointerMayBeCaptured - Return true if this pointer value may be captured
@@ -74,6 +135,32 @@ bool llvm::PointerMayBeCaptured(const Value *V,
return SCT.Captured;
}
+/// PointerMayBeCapturedBefore - Return true if this pointer value may be
+/// captured by the enclosing function (which is required to exist). If a
+/// DominatorTree is provided, only captures which happen before the given
+/// instruction are considered. This routine can be expensive, so consider
+/// caching the results. The boolean ReturnCaptures specifies whether
+/// returning the value (or part of it) from the function counts as capturing
+/// it or not. The boolean StoreCaptures specified whether storing the value
+/// (or part of it) into memory anywhere automatically counts as capturing it
+/// or not.
+bool llvm::PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
+ bool StoreCaptures, const Instruction *I,
+ DominatorTree *DT, bool IncludeI) {
+ assert(!isa<GlobalValue>(V) &&
+ "It doesn't make sense to ask whether a global is captured.");
+
+ if (!DT)
+ return PointerMayBeCaptured(V, ReturnCaptures, StoreCaptures);
+
+ // TODO: See comment in PointerMayBeCaptured regarding what could be done
+ // with StoreCaptures.
+
+ CapturesBefore CB(ReturnCaptures, I, DT, IncludeI);
+ PointerMayBeCaptured(V, &CB);
+ return CB.Captured;
+}
+
/// TODO: Write a new FunctionPass AliasAnalysis so that it can keep
/// a cache. Then we can move the code from BasicAliasAnalysis into
/// that path, and remove this threshold.
@@ -81,25 +168,23 @@ static int const Threshold = 20;
void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
assert(V->getType()->isPointerTy() && "Capture is for pointers only!");
- SmallVector<Use*, Threshold> Worklist;
- SmallSet<Use*, Threshold> Visited;
+ SmallVector<const Use *, Threshold> Worklist;
+ SmallSet<const Use *, Threshold> Visited;
int Count = 0;
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
+ for (const Use &U : V->uses()) {
// If there are lots of uses, conservatively say that the value
// is captured to avoid taking too much compile time.
if (Count++ >= Threshold)
return Tracker->tooManyUses();
- Use *U = &UI.getUse();
- if (!Tracker->shouldExplore(U)) continue;
- Visited.insert(U);
- Worklist.push_back(U);
+ if (!Tracker->shouldExplore(&U)) continue;
+ Visited.insert(&U);
+ Worklist.push_back(&U);
}
while (!Worklist.empty()) {
- Use *U = Worklist.pop_back_val();
+ const Use *U = Worklist.pop_back_val();
Instruction *I = cast<Instruction>(U->getUser());
V = U->get();
@@ -145,19 +230,18 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
case Instruction::GetElementPtr:
case Instruction::PHI:
case Instruction::Select:
+ case Instruction::AddrSpaceCast:
// The original value is not captured via this if the new value isn't.
Count = 0;
- for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI) {
+ for (Use &UU : I->uses()) {
// If there are lots of uses, conservatively say that the value
// is captured to avoid taking too much compile time.
if (Count++ >= Threshold)
return Tracker->tooManyUses();
- Use *U = &UI.getUse();
- if (Visited.insert(U))
- if (Tracker->shouldExplore(U))
- Worklist.push_back(U);
+ if (Visited.insert(&UU))
+ if (Tracker->shouldExplore(&UU))
+ Worklist.push_back(&UU);
}
break;
case Instruction::ICmp:
diff --git a/contrib/llvm/lib/Analysis/CodeMetrics.cpp b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
index 8cda01a..4c8a093 100644
--- a/contrib/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
@@ -13,10 +13,10 @@
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CallSite.h"
using namespace llvm;
@@ -65,11 +65,11 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
++NumVectorInsts;
if (const CallInst *CI = dyn_cast<CallInst>(II))
- if (CI->hasFnAttr(Attribute::NoDuplicate))
+ if (CI->cannotDuplicate())
notDuplicatable = true;
if (const InvokeInst *InvI = dyn_cast<InvokeInst>(II))
- if (InvI->hasFnAttr(Attribute::NoDuplicate))
+ if (InvI->cannotDuplicate())
notDuplicatable = true;
NumInsts += TTI.getUserCost(&*II);
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index 3d32232..8dc9421 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -21,21 +21,26 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Config/config.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FEnv.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include <cerrno>
#include <cmath>
+
+#ifdef HAVE_FENV_H
+#include <fenv.h>
+#endif
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -56,7 +61,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// Handle a vector->integer cast.
if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
VectorType *VTy = dyn_cast<VectorType>(C->getType());
- if (VTy == 0)
+ if (!VTy)
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = VTy->getNumElements();
@@ -73,7 +78,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
- if (CDV == 0)
+ if (!CDV)
return ConstantExpr::getBitCast(C, DestTy);
// Now that we know that the input value is a vector of integers, just shift
@@ -93,7 +98,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
- if (DestVTy == 0)
+ if (!DestVTy)
return ConstantExpr::getBitCast(C, DestTy);
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
@@ -235,7 +240,8 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
- CE->getOpcode() == Instruction::BitCast)
+ CE->getOpcode() == Instruction::BitCast ||
+ CE->getOpcode() == Instruction::AddrSpaceCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
@@ -411,32 +417,32 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
TD.getTypeAllocSizeInBits(LoadTy),
AS);
} else
- return 0;
+ return nullptr;
C = FoldBitCast(C, MapTy, TD);
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD))
return FoldBitCast(Res, LoadTy, TD);
- return 0;
+ return nullptr;
}
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0)
- return 0;
+ return nullptr;
GlobalValue *GVal;
APInt Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
- return 0;
+ return nullptr;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
- return 0;
+ return nullptr;
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset.isNegative())
- return 0;
+ return nullptr;
// If we're not accessing anything in this constant, the result is undefined.
if (Offset.getZExtValue() >=
@@ -446,7 +452,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
BytesLoaded, TD))
- return 0;
+ return nullptr;
APInt ResultVal = APInt(IntType->getBitWidth(), 0);
if (TD.isLittleEndian()) {
@@ -466,6 +472,52 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
return ConstantInt::get(IntType->getContext(), ResultVal);
}
+static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE,
+ const DataLayout *DL) {
+ if (!DL)
+ return nullptr;
+ auto *DestPtrTy = dyn_cast<PointerType>(CE->getType());
+ if (!DestPtrTy)
+ return nullptr;
+ Type *DestTy = DestPtrTy->getElementType();
+
+ Constant *C = ConstantFoldLoadFromConstPtr(CE->getOperand(0), DL);
+ if (!C)
+ return nullptr;
+
+ do {
+ Type *SrcTy = C->getType();
+
+ // If the type sizes are the same and a cast is legal, just directly
+ // cast the constant.
+ if (DL->getTypeSizeInBits(DestTy) == DL->getTypeSizeInBits(SrcTy)) {
+ Instruction::CastOps Cast = Instruction::BitCast;
+ // If we are going from a pointer to int or vice versa, we spell the cast
+ // differently.
+ if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
+ Cast = Instruction::IntToPtr;
+ else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
+ Cast = Instruction::PtrToInt;
+
+ if (CastInst::castIsValid(Cast, C, DestTy))
+ return ConstantExpr::getCast(Cast, C, DestTy);
+ }
+
+ // If this isn't an aggregate type, there is nothing we can do to drill down
+ // and find a bitcastable constant.
+ if (!SrcTy->isAggregateType())
+ return nullptr;
+
+ // We're simulating a load through a pointer that was bitcast to point to
+ // a different type, so we can try to walk down through the initial
+ // elements of an aggregate to see if some part of th e aggregate is
+ // castable to implement the "load" semantic model.
+ C = C->getAggregateElement(0u);
+ } while (C);
+
+ return nullptr;
+}
+
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
@@ -479,7 +531,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE)
- return 0;
+ return nullptr;
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
@@ -491,6 +543,10 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
}
}
+ if (CE->getOpcode() == Instruction::BitCast)
+ if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, TD))
+ return LoadedC;
+
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
@@ -542,16 +598,16 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// Try hard to fold loads from bitcasted strange and non-type-safe things.
if (TD)
return FoldReinterpretLoadFromConstPtr(CE, *TD);
- return 0;
+ return nullptr;
}
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
- if (LI->isVolatile()) return 0;
+ if (LI->isVolatile()) return nullptr;
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
- return 0;
+ return nullptr;
}
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
@@ -571,8 +627,8 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType());
APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
- ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL);
- ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL);
+ computeKnownBits(Op0, KnownZero0, KnownOne0, DL);
+ computeKnownBits(Op1, KnownZero1, KnownOne1, DL);
if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
// All the bits of Op0 that the 'and' could be masking are already zero.
return Op0;
@@ -608,7 +664,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
}
}
- return 0;
+ return nullptr;
}
/// CastGEPIndices - If array indices are not pointer-sized integers,
@@ -618,7 +674,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD)
- return 0;
+ return nullptr;
Type *IntPtrTy = TD->getIntPtrType(ResultTy);
@@ -641,7 +697,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
}
if (!Any)
- return 0;
+ return nullptr;
Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
@@ -656,7 +712,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
static Constant* StripPtrCastKeepAS(Constant* Ptr) {
assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
- Ptr = cast<Constant>(Ptr->stripPointerCasts());
+ Ptr = Ptr->stripPointerCasts();
PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
// Preserve the address space number of the pointer.
@@ -676,7 +732,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Constant *Ptr = Ops[0];
if (!TD || !Ptr->getType()->getPointerElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
- return 0;
+ return nullptr;
Type *IntPtrTy = TD->getIntPtrType(Ptr->getType());
Type *ResultElementTy = ResultTy->getPointerElementType();
@@ -690,7 +746,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
- assert((CE == 0 || CE->getType() == IntPtrTy) &&
+ assert((!CE || CE->getType() == IntPtrTy) &&
"CastGEPIndices didn't canonicalize index types!");
if (CE && CE->getOpcode() == Instruction::Sub &&
CE->getOperand(0)->isNullValue()) {
@@ -702,7 +758,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
return Res;
}
}
- return 0;
+ return nullptr;
}
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
@@ -765,7 +821,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
- return 0;
+ return nullptr;
}
// Determine which element of the array the offset points into.
@@ -810,7 +866,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// type, then the offset is pointing into the middle of an indivisible
// member, so we can't simplify it.
if (Offset != 0)
- return 0;
+ return nullptr;
// Create a GEP.
Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs);
@@ -841,7 +897,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
- Constant *CommonValue = 0;
+ Constant *CommonValue = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PN->getIncomingValue(i);
@@ -854,14 +910,14 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
// If the incoming value is not a constant, then give up.
Constant *C = dyn_cast<Constant>(Incoming);
if (!C)
- return 0;
+ return nullptr;
// Fold the PHI's operands.
if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(NewC, TD, TLI);
// If the incoming value is a different constant to
// the one we saw previously, then give up.
if (CommonValue && C != CommonValue)
- return 0;
+ return nullptr;
CommonValue = C;
}
@@ -876,7 +932,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
Constant *Op = dyn_cast<Constant>(*i);
if (!Op)
- return 0; // All operands not constant!
+ return nullptr; // All operands not constant!
// Fold the Instruction's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
@@ -966,14 +1022,14 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
}
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case Instruction::ICmp:
case Instruction::FCmp: llvm_unreachable("Invalid for compares");
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
- return 0;
+ return nullptr;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
@@ -1142,14 +1198,14 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
- return 0; // Do not allow stepping over the value!
+ return nullptr; // Do not allow stepping over the value!
// Loop over all of the operands, tracking down which value we are
// addressing.
for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
C = C->getAggregateElement(CE->getOperand(i));
- if (C == 0)
- return 0;
+ if (!C)
+ return nullptr;
}
return C;
}
@@ -1164,8 +1220,8 @@ Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
// addressing.
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
C = C->getAggregateElement(Indices[i]);
- if (C == 0)
- return 0;
+ if (!C)
+ return nullptr;
}
return C;
}
@@ -1186,6 +1242,7 @@ bool llvm::canConstantFoldCallTo(const Function *F) {
case Intrinsic::exp:
case Intrinsic::exp2:
case Intrinsic::floor:
+ case Intrinsic::ceil:
case Intrinsic::sqrt:
case Intrinsic::pow:
case Intrinsic::powi:
@@ -1193,6 +1250,10 @@ bool llvm::canConstantFoldCallTo(const Function *F) {
case Intrinsic::ctpop:
case Intrinsic::ctlz:
case Intrinsic::cttz:
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd:
+ case Intrinsic::copysign:
+ case Intrinsic::round:
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -1244,15 +1305,7 @@ bool llvm::canConstantFoldCallTo(const Function *F) {
}
}
-static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
- Type *Ty) {
- sys::llvm_fenv_clearexcept();
- V = NativeFP(V);
- if (sys::llvm_fenv_testexcept()) {
- sys::llvm_fenv_clearexcept();
- return 0;
- }
-
+static Constant *GetConstantFoldFPValue(double V, Type *Ty) {
if (Ty->isHalfTy()) {
APFloat APF(V);
bool unused;
@@ -1264,28 +1317,53 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
if (Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(), APFloat(V));
llvm_unreachable("Can only constant fold half/float/double");
+
+}
+
+namespace {
+/// llvm_fenv_clearexcept - Clear the floating-point exception state.
+static inline void llvm_fenv_clearexcept() {
+#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
+ feclearexcept(FE_ALL_EXCEPT);
+#endif
+ errno = 0;
+}
+
+/// llvm_fenv_testexcept - Test if a floating-point exception was raised.
+static inline bool llvm_fenv_testexcept() {
+ int errno_val = errno;
+ if (errno_val == ERANGE || errno_val == EDOM)
+ return true;
+#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
+ if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
+ return true;
+#endif
+ return false;
+}
+} // End namespace
+
+static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
+ Type *Ty) {
+ llvm_fenv_clearexcept();
+ V = NativeFP(V);
+ if (llvm_fenv_testexcept()) {
+ llvm_fenv_clearexcept();
+ return nullptr;
+ }
+
+ return GetConstantFoldFPValue(V, Ty);
}
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
double V, double W, Type *Ty) {
- sys::llvm_fenv_clearexcept();
+ llvm_fenv_clearexcept();
V = NativeFP(V, W);
- if (sys::llvm_fenv_testexcept()) {
- sys::llvm_fenv_clearexcept();
- return 0;
+ if (llvm_fenv_testexcept()) {
+ llvm_fenv_clearexcept();
+ return nullptr;
}
- if (Ty->isHalfTy()) {
- APFloat APF(V);
- bool unused;
- APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
- return ConstantFP::get(Ty->getContext(), APF);
- }
- if (Ty->isFloatTy())
- return ConstantFP::get(Ty->getContext(), APFloat((float)V));
- if (Ty->isDoubleTy())
- return ConstantFP::get(Ty->getContext(), APFloat(V));
- llvm_unreachable("Can only constant fold half/float/double");
+ return GetConstantFoldFPValue(V, Ty);
}
/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer
@@ -1311,59 +1389,61 @@ static Constant *ConstantFoldConvertToInt(const APFloat &Val,
/*isSigned=*/true, mode,
&isExact);
if (status != APFloat::opOK && status != APFloat::opInexact)
- return 0;
+ return nullptr;
return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
}
-/// ConstantFoldCall - Attempt to constant fold a call to the specified function
-/// with the specified arguments, returning null if unsuccessful.
-Constant *
-llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
- const TargetLibraryInfo *TLI) {
- if (!F->hasName())
- return 0;
- StringRef Name = F->getName();
+static double getValueAsDouble(ConstantFP *Op) {
+ Type *Ty = Op->getType();
- Type *Ty = F->getReturnType();
+ if (Ty->isFloatTy())
+ return Op->getValueAPF().convertToFloat();
+
+ if (Ty->isDoubleTy())
+ return Op->getValueAPF().convertToDouble();
+
+ bool unused;
+ APFloat APF = Op->getValueAPF();
+ APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
+ return APF.convertToDouble();
+}
+
+static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
+ Type *Ty, ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI) {
if (Operands.size() == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
- if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
+ if (IntrinsicID == Intrinsic::convert_to_fp16) {
APFloat Val(Op->getValueAPF());
bool lost = false;
Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
- return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
+ return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
}
- if (!TLI)
- return 0;
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
- return 0;
+ return nullptr;
+
+ if (IntrinsicID == Intrinsic::round) {
+ APFloat V = Op->getValueAPF();
+ V.roundToIntegral(APFloat::rmNearestTiesToAway);
+ return ConstantFP::get(Ty->getContext(), V);
+ }
/// We only fold functions with finite arguments. Folding NaN and inf is
/// likely to be aborted with an exception anyway, and some host libms
/// have known errors raising exceptions.
if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
- return 0;
+ return nullptr;
/// Currently APFloat versions of these functions do not exist, so we use
/// the host native double versions. Float versions are not called
/// directly but for all these it is true (float)(f((double)arg)) ==
/// f(arg). Long double not supported yet.
- double V;
- if (Ty->isFloatTy())
- V = Op->getValueAPF().convertToFloat();
- else if (Ty->isDoubleTy())
- V = Op->getValueAPF().convertToDouble();
- else {
- bool unused;
- APFloat APF = Op->getValueAPF();
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
- V = APF.convertToDouble();
- }
+ double V = getValueAsDouble(Op);
- switch (F->getIntrinsicID()) {
+ switch (IntrinsicID) {
default: break;
case Intrinsic::fabs:
return ConstantFoldFP(fabs, V, Ty);
@@ -1389,8 +1469,13 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
#endif
case Intrinsic::floor:
return ConstantFoldFP(floor, V, Ty);
+ case Intrinsic::ceil:
+ return ConstantFoldFP(ceil, V, Ty);
}
+ if (!TLI)
+ return nullptr;
+
switch (Name[0]) {
case 'a':
if (Name == "acos" && TLI->has(LibFunc::acos))
@@ -1431,7 +1516,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
return ConstantFoldFP(log, V, Ty);
else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
return ConstantFoldFP(log10, V, Ty);
- else if (F->getIntrinsicID() == Intrinsic::sqrt &&
+ else if (IntrinsicID == Intrinsic::sqrt &&
(Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) {
if (V >= -0.0)
return ConstantFoldFP(sqrt, V, Ty);
@@ -1460,13 +1545,13 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
default:
break;
}
- return 0;
+ return nullptr;
}
if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
- switch (F->getIntrinsicID()) {
+ switch (IntrinsicID) {
case Intrinsic::bswap:
- return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
+ return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
case Intrinsic::ctpop:
return ConstantInt::get(Ty, Op->getValue().countPopulation());
case Intrinsic::convert_from_fp16: {
@@ -1481,10 +1566,10 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
assert(status == APFloat::opOK && !lost &&
"Precision lost during fp16 constfolding");
- return ConstantFP::get(F->getContext(), Val);
+ return ConstantFP::get(Ty->getContext(), Val);
}
default:
- return 0;
+ return nullptr;
}
}
@@ -1492,7 +1577,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
- switch (F->getIntrinsicID()) {
+ switch (IntrinsicID) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
@@ -1514,51 +1599,36 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
if (isa<UndefValue>(Operands[0])) {
- if (F->getIntrinsicID() == Intrinsic::bswap)
+ if (IntrinsicID == Intrinsic::bswap)
return Operands[0];
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
if (Operands.size() == 2) {
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
- return 0;
- double Op1V;
- if (Ty->isFloatTy())
- Op1V = Op1->getValueAPF().convertToFloat();
- else if (Ty->isDoubleTy())
- Op1V = Op1->getValueAPF().convertToDouble();
- else {
- bool unused;
- APFloat APF = Op1->getValueAPF();
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
- Op1V = APF.convertToDouble();
- }
+ return nullptr;
+ double Op1V = getValueAsDouble(Op1);
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
- return 0;
-
- double Op2V;
- if (Ty->isFloatTy())
- Op2V = Op2->getValueAPF().convertToFloat();
- else if (Ty->isDoubleTy())
- Op2V = Op2->getValueAPF().convertToDouble();
- else {
- bool unused;
- APFloat APF = Op2->getValueAPF();
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
- Op2V = APF.convertToDouble();
- }
+ return nullptr;
- if (F->getIntrinsicID() == Intrinsic::pow) {
+ double Op2V = getValueAsDouble(Op2);
+ if (IntrinsicID == Intrinsic::pow) {
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
}
+ if (IntrinsicID == Intrinsic::copysign) {
+ APFloat V1 = Op1->getValueAPF();
+ APFloat V2 = Op2->getValueAPF();
+ V1.copySign(V2);
+ return ConstantFP::get(Ty->getContext(), V1);
+ }
if (!TLI)
- return 0;
+ return nullptr;
if (Name == "pow" && TLI->has(LibFunc::pow))
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
if (Name == "fmod" && TLI->has(LibFunc::fmod))
@@ -1566,25 +1636,25 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
if (Name == "atan2" && TLI->has(LibFunc::atan2))
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
- if (F->getIntrinsicID() == Intrinsic::powi && Ty->isHalfTy())
- return ConstantFP::get(F->getContext(),
+ if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
+ return ConstantFP::get(Ty->getContext(),
APFloat((float)std::pow((float)Op1V,
(int)Op2C->getZExtValue())));
- if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy())
- return ConstantFP::get(F->getContext(),
+ if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
+ return ConstantFP::get(Ty->getContext(),
APFloat((float)std::pow((float)Op1V,
(int)Op2C->getZExtValue())));
- if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy())
- return ConstantFP::get(F->getContext(),
+ if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
+ return ConstantFP::get(Ty->getContext(),
APFloat((double)std::pow((double)Op1V,
(int)Op2C->getZExtValue())));
}
- return 0;
+ return nullptr;
}
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
- switch (F->getIntrinsicID()) {
+ switch (IntrinsicID) {
default: break;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
@@ -1594,7 +1664,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case Intrinsic::umul_with_overflow: {
APInt Res;
bool Overflow;
- switch (F->getIntrinsicID()) {
+ switch (IntrinsicID) {
default: llvm_unreachable("Invalid case");
case Intrinsic::sadd_with_overflow:
Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
@@ -1616,10 +1686,10 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
break;
}
Constant *Ops[] = {
- ConstantInt::get(F->getContext(), Res),
- ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow)
+ ConstantInt::get(Ty->getContext(), Res),
+ ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
};
- return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops);
+ return ConstantStruct::get(cast<StructType>(Ty), Ops);
}
case Intrinsic::cttz:
if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
@@ -1632,9 +1702,79 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
}
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
- return 0;
+
+ if (Operands.size() != 3)
+ return nullptr;
+
+ if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
+ if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
+ if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
+ switch (IntrinsicID) {
+ default: break;
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd: {
+ APFloat V = Op1->getValueAPF();
+ APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
+ Op3->getValueAPF(),
+ APFloat::rmNearestTiesToEven);
+ if (s != APFloat::opInvalidOp)
+ return ConstantFP::get(Ty->getContext(), V);
+
+ return nullptr;
+ }
+ }
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
+ VectorType *VTy,
+ ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI) {
+ SmallVector<Constant *, 4> Result(VTy->getNumElements());
+ SmallVector<Constant *, 4> Lane(Operands.size());
+ Type *Ty = VTy->getElementType();
+
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ // Gather a column of constants.
+ for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
+ Constant *Agg = Operands[J]->getAggregateElement(I);
+ if (!Agg)
+ return nullptr;
+
+ Lane[J] = Agg;
+ }
+
+ // Use the regular scalar folding to simplify this column.
+ Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
+ if (!Folded)
+ return nullptr;
+ Result[I] = Folded;
+ }
+
+ return ConstantVector::get(Result);
+}
+
+/// ConstantFoldCall - Attempt to constant fold a call to the specified function
+/// with the specified arguments, returning null if unsuccessful.
+Constant *
+llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI) {
+ if (!F->hasName())
+ return nullptr;
+ StringRef Name = F->getName();
+
+ Type *Ty = F->getReturnType();
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI);
+
+ return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
}
diff --git a/contrib/llvm/lib/Analysis/CostModel.cpp b/contrib/llvm/lib/Analysis/CostModel.cpp
index f943258..1b74f8c 100644
--- a/contrib/llvm/lib/Analysis/CostModel.cpp
+++ b/contrib/llvm/lib/Analysis/CostModel.cpp
@@ -17,8 +17,6 @@
//
//===----------------------------------------------------------------------===//
-#define CM_NAME "cost-model"
-#define DEBUG_TYPE CM_NAME
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -32,6 +30,9 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define CM_NAME "cost-model"
+#define DEBUG_TYPE CM_NAME
+
static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
cl::Hidden,
cl::desc("Recognize reduction patterns."));
@@ -41,7 +42,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- CostModelAnalysis() : FunctionPass(ID), F(0), TTI(0) {
+ CostModelAnalysis() : FunctionPass(ID), F(nullptr), TTI(nullptr) {
initializeCostModelAnalysisPass(
*PassRegistry::getPassRegistry());
}
@@ -53,9 +54,9 @@ namespace {
unsigned getInstructionCost(const Instruction *I) const;
private:
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnFunction(Function &F);
- virtual void print(raw_ostream &OS, const Module*) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+ void print(raw_ostream &OS, const Module*) const override;
/// The function that we analyze.
Function *F;
@@ -94,34 +95,45 @@ static bool isReverseVectorMask(SmallVectorImpl<int> &Mask) {
return true;
}
+static bool isAlternateVectorMask(SmallVectorImpl<int> &Mask) {
+ bool isAlternate = true;
+ unsigned MaskSize = Mask.size();
+
+ // Example: shufflevector A, B, <0,5,2,7>
+ for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
+ if (Mask[i] < 0)
+ continue;
+ isAlternate = Mask[i] == (int)((i & 1) ? MaskSize + i : i);
+ }
+
+ if (isAlternate)
+ return true;
+
+ isAlternate = true;
+ // Example: shufflevector A, B, <4,1,6,3>
+ for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
+ if (Mask[i] < 0)
+ continue;
+ isAlternate = Mask[i] == (int)((i & 1) ? i : MaskSize + i);
+ }
+
+ return isAlternate;
+}
+
static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) {
TargetTransformInfo::OperandValueKind OpInfo =
TargetTransformInfo::OK_AnyValue;
- // Check for a splat of a constant.
- ConstantDataVector *CDV = 0;
- if ((CDV = dyn_cast<ConstantDataVector>(V)))
- if (CDV->getSplatValue() != NULL)
- OpInfo = TargetTransformInfo::OK_UniformConstantValue;
- ConstantVector *CV = 0;
- if ((CV = dyn_cast<ConstantVector>(V)))
- if (CV->getSplatValue() != NULL)
+ // Check for a splat of a constant or for a non uniform vector of constants.
+ if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
+ OpInfo = TargetTransformInfo::OK_NonUniformConstantValue;
+ if (cast<Constant>(V)->getSplatValue() != nullptr)
OpInfo = TargetTransformInfo::OK_UniformConstantValue;
+ }
return OpInfo;
}
-static bool matchMask(SmallVectorImpl<int> &M1, SmallVectorImpl<int> &M2) {
- if (M1.size() != M2.size())
- return false;
-
- for (unsigned i = 0, e = M1.size(); i != e; ++i)
- if (M1[i] != M2[i])
- return false;
-
- return true;
-}
-
static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
unsigned Level) {
// We don't need a shuffle if we just want to have element 0 in position 0 of
@@ -139,7 +151,7 @@ static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
Mask[i] = val;
SmallVector<int, 16> ActualMask = SI->getShuffleMask();
- if (!matchMask(Mask, ActualMask))
+ if (Mask != ActualMask)
return false;
return true;
@@ -153,7 +165,7 @@ static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
- if (BinOp == 0)
+ if (BinOp == nullptr)
return false;
assert(BinOp->getType()->isVectorTy() && "Expecting a vector type");
@@ -174,9 +186,9 @@ static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
return false;
// Shuffle inputs must match.
- Value *NextLevelOpL = LS ? LS->getOperand(0) : 0;
- Value *NextLevelOpR = RS ? RS->getOperand(0) : 0;
- Value *NextLevelOp = 0;
+ Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
+ Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
+ Value *NextLevelOp = nullptr;
if (NextLevelOpR && NextLevelOpL) {
// If we have two shuffles their operands must match.
if (NextLevelOpL != NextLevelOpR)
@@ -201,7 +213,7 @@ static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
// Check that the next levels binary operation exists and matches with the
// current one.
- BinaryOperator *NextLevelBinOp = 0;
+ BinaryOperator *NextLevelBinOp = nullptr;
if (Level + 1 != NumLevels) {
if (!(NextLevelBinOp = dyn_cast<BinaryOperator>(NextLevelOp)))
return false;
@@ -280,7 +292,7 @@ getShuffleAndOtherOprd(BinaryOperator *B) {
Value *L = B->getOperand(0);
Value *R = B->getOperand(1);
- ShuffleVectorInst *S = 0;
+ ShuffleVectorInst *S = nullptr;
if ((S = dyn_cast<ShuffleVectorInst>(L)))
return std::make_pair(R, S);
@@ -337,10 +349,10 @@ static bool matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
Value *NextRdxOp;
ShuffleVectorInst *Shuffle;
- tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
+ std::tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
// Check the current reduction operation and the shuffle use the same value.
- if (Shuffle == 0)
+ if (Shuffle == nullptr)
return false;
if (Shuffle->getOperand(0) != NextRdxOp)
return false;
@@ -352,7 +364,7 @@ static bool matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
- if (!matchMask(ShuffleMask, Mask))
+ if (ShuffleMask != Mask)
return false;
RdxOp = NextRdxOp;
@@ -439,7 +451,8 @@ unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
- case Instruction::BitCast: {
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast: {
Type *SrcTy = I->getOperand(0)->getType();
return TTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
}
@@ -478,9 +491,15 @@ unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
unsigned NumVecElems = VecTypOp0->getVectorNumElements();
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
- if (NumVecElems == Mask.size() && isReverseVectorMask(Mask))
- return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0, 0,
- 0);
+ if (NumVecElems == Mask.size()) {
+ if (isReverseVectorMask(Mask))
+ return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0,
+ 0, nullptr);
+ if (isAlternateVectorMask(Mask))
+ return TTI->getShuffleCost(TargetTransformInfo::SK_Alternate,
+ VecTypOp0, 0, nullptr);
+ }
+
return -1;
}
case Instruction::Call:
diff --git a/contrib/llvm/lib/Analysis/Delinearization.cpp b/contrib/llvm/lib/Analysis/Delinearization.cpp
index 3ed0609..9334ceb 100644
--- a/contrib/llvm/lib/Analysis/Delinearization.cpp
+++ b/contrib/llvm/lib/Analysis/Delinearization.cpp
@@ -14,26 +14,27 @@
//
//===----------------------------------------------------------------------===//
-#define DL_NAME "delinearize"
-#define DEBUG_TYPE DL_NAME
#include "llvm/IR/Constants.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Pass.h"
#include "llvm/IR/Type.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DL_NAME "delinearize"
+#define DEBUG_TYPE DL_NAME
+
namespace {
class Delinearization : public FunctionPass {
@@ -49,9 +50,9 @@ public:
Delinearization() : FunctionPass(ID) {
initializeDelinearizationPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual void print(raw_ostream &O, const Module *M = 0) const;
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void print(raw_ostream &O, const Module *M = nullptr) const override;
};
} // end anonymous namespace
@@ -76,7 +77,7 @@ static Value *getPointerOperand(Instruction &Inst) {
return Store->getPointerOperand();
else if (GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(&Inst))
return Gep->getPointerOperand();
- return NULL;
+ return nullptr;
}
void Delinearization::print(raw_ostream &O, const Module *) const {
@@ -92,25 +93,38 @@ void Delinearization::print(raw_ostream &O, const Module *) const {
const BasicBlock *BB = Inst->getParent();
// Delinearize the memory access as analyzed in all the surrounding loops.
// Do not analyze memory accesses outside loops.
- for (Loop *L = LI->getLoopFor(BB); L != NULL; L = L->getParentLoop()) {
+ for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) {
const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
+
+ const SCEVUnknown *BasePointer =
+ dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFn));
+ // Do not delinearize if we cannot find the base pointer.
+ if (!BasePointer)
+ break;
+ AccessFn = SE->getMinusSCEV(AccessFn, BasePointer);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(AccessFn);
// Do not try to delinearize memory accesses that are not AddRecs.
if (!AR)
break;
+
+ O << "\n";
+ O << "Inst:" << *Inst << "\n";
+ O << "In Loop with Header: " << L->getHeader()->getName() << "\n";
O << "AddRec: " << *AR << "\n";
SmallVector<const SCEV *, 3> Subscripts, Sizes;
- const SCEV *Res = AR->delinearize(*SE, Subscripts, Sizes);
- int Size = Subscripts.size();
- if (Res == AR || Size == 0) {
+ AR->delinearize(*SE, Subscripts, Sizes, SE->getElementSize(Inst));
+ if (Subscripts.size() == 0 || Sizes.size() == 0 ||
+ Subscripts.size() != Sizes.size()) {
O << "failed to delinearize\n";
continue;
}
- O << "Base offset: " << *Res << "\n";
+
+ O << "Base offset: " << *BasePointer << "\n";
O << "ArrayDecl[UnknownSize]";
+ int Size = Subscripts.size();
for (int i = 0; i < Size - 1; i++)
O << "[" << *Sizes[i] << "]";
O << " with elements of " << *Sizes[Size - 1] << " bytes.\n";
diff --git a/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
index 3b3e2ef..d0784f1 100644
--- a/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -51,8 +51,6 @@
// //
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "da"
-
#include "llvm/Analysis/DependenceAnalysis.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -60,15 +58,17 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "da"
+
//===----------------------------------------------------------------------===//
// statistics
@@ -234,7 +234,7 @@ FullDependence::FullDependence(Instruction *Source,
Levels(CommonLevels),
LoopIndependent(PossiblyLoopIndependent) {
Consistent = true;
- DV = CommonLevels ? new DVEntry[CommonLevels] : NULL;
+ DV = CommonLevels ? new DVEntry[CommonLevels] : nullptr;
}
// The rest are simple getters that hide the implementation.
@@ -658,7 +658,7 @@ Value *getPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
llvm_unreachable("Value is not load or store instruction");
- return 0;
+ return nullptr;
}
@@ -932,7 +932,7 @@ const SCEV *DependenceAnalysis::collectUpperBound(const Loop *L,
const SCEV *UB = SE->getBackedgeTakenCount(L);
return SE->getNoopOrZeroExtend(UB, T);
}
- return NULL;
+ return nullptr;
}
@@ -943,7 +943,7 @@ const SCEVConstant *DependenceAnalysis::collectConstantUpperBound(const Loop *L,
) const {
if (const SCEV *UB = collectUpperBound(L, T))
return dyn_cast<SCEVConstant>(UB);
- return NULL;
+ return nullptr;
}
@@ -1275,8 +1275,8 @@ bool DependenceAnalysis::weakCrossingSIVtest(const SCEV *Coeff,
//
// Program 2.1, page 29.
// Computes the GCD of AM and BM.
-// Also finds a solution to the equation ax - by = gdc(a, b).
-// Returns true iff the gcd divides Delta.
+// Also finds a solution to the equation ax - by = gcd(a, b).
+// Returns true if dependence disproved; i.e., gcd does not divide Delta.
static
bool findGCD(unsigned Bits, APInt AM, APInt BM, APInt Delta,
APInt &G, APInt &X, APInt &Y) {
@@ -2194,7 +2194,7 @@ const SCEVConstant *getConstantPart(const SCEVMulExpr *Product) {
if (const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Product->getOperand(Op)))
return Constant;
}
- return NULL;
+ return nullptr;
}
@@ -2646,8 +2646,8 @@ void DependenceAnalysis::findBoundsALL(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::ALL] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::ALL] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::ALL] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::ALL] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
Bound[K].Lower[Dependence::DVEntry::ALL] =
SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart),
@@ -2687,8 +2687,8 @@ void DependenceAnalysis::findBoundsEQ(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::EQ] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::EQ] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::EQ] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::EQ] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
const SCEV *NegativePart = getNegativePart(Delta);
@@ -2729,8 +2729,8 @@ void DependenceAnalysis::findBoundsLT(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::LT] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::LT] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::LT] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::LT] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Iter_1 =
SE->getMinusSCEV(Bound[K].Iterations,
@@ -2776,8 +2776,8 @@ void DependenceAnalysis::findBoundsGT(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::GT] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::GT] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::GT] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::GT] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Iter_1 =
SE->getMinusSCEV(Bound[K].Iterations,
@@ -2829,7 +2829,7 @@ DependenceAnalysis::collectCoeffInfo(const SCEV *Subscript,
CI[K].Coeff = Zero;
CI[K].PosPart = Zero;
CI[K].NegPart = Zero;
- CI[K].Iterations = NULL;
+ CI[K].Iterations = nullptr;
}
while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Subscript)) {
const Loop *L = AddRec->getLoop();
@@ -2872,7 +2872,7 @@ const SCEV *DependenceAnalysis::getLowerBound(BoundInfo *Bound) const {
if (Bound[K].Lower[Bound[K].Direction])
Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]);
else
- Sum = NULL;
+ Sum = nullptr;
}
return Sum;
}
@@ -2888,7 +2888,7 @@ const SCEV *DependenceAnalysis::getUpperBound(BoundInfo *Bound) const {
if (Bound[K].Upper[Bound[K].Direction])
Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]);
else
- Sum = NULL;
+ Sum = nullptr;
}
return Sum;
}
@@ -3148,12 +3148,12 @@ void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
}
else if (CurConstraint.isLine()) {
Level.Scalar = false;
- Level.Distance = NULL;
+ Level.Distance = nullptr;
// direction should be accurate
}
else if (CurConstraint.isPoint()) {
Level.Scalar = false;
- Level.Distance = NULL;
+ Level.Distance = nullptr;
unsigned NewDirection = Dependence::DVEntry::NONE;
if (!isKnownPredicate(CmpInst::ICMP_NE,
CurConstraint.getY(),
@@ -3178,33 +3178,57 @@ void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
/// Check if we can delinearize the subscripts. If the SCEVs representing the
/// source and destination array references are recurrences on a nested loop,
-/// this function flattens the nested recurrences into seperate recurrences
+/// this function flattens the nested recurrences into separate recurrences
/// for each loop level.
-bool
-DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV,
- SmallVectorImpl<Subscript> &Pair) const {
+bool DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV,
+ const SCEV *DstSCEV,
+ SmallVectorImpl<Subscript> &Pair,
+ const SCEV *ElementSize) const {
+ const SCEVUnknown *SrcBase =
+ dyn_cast<SCEVUnknown>(SE->getPointerBase(SrcSCEV));
+ const SCEVUnknown *DstBase =
+ dyn_cast<SCEVUnknown>(SE->getPointerBase(DstSCEV));
+
+ if (!SrcBase || !DstBase || SrcBase != DstBase)
+ return false;
+
+ SrcSCEV = SE->getMinusSCEV(SrcSCEV, SrcBase);
+ DstSCEV = SE->getMinusSCEV(DstSCEV, DstBase);
+
const SCEVAddRecExpr *SrcAR = dyn_cast<SCEVAddRecExpr>(SrcSCEV);
const SCEVAddRecExpr *DstAR = dyn_cast<SCEVAddRecExpr>(DstSCEV);
if (!SrcAR || !DstAR || !SrcAR->isAffine() || !DstAR->isAffine())
return false;
- SmallVector<const SCEV *, 4> SrcSubscripts, DstSubscripts, SrcSizes, DstSizes;
- SrcAR->delinearize(*SE, SrcSubscripts, SrcSizes);
- DstAR->delinearize(*SE, DstSubscripts, DstSizes);
+ // First step: collect parametric terms in both array references.
+ SmallVector<const SCEV *, 4> Terms;
+ SrcAR->collectParametricTerms(*SE, Terms);
+ DstAR->collectParametricTerms(*SE, Terms);
- int size = SrcSubscripts.size();
- int dstSize = DstSubscripts.size();
- if (size != dstSize || size < 2)
+ // Second step: find subscript sizes.
+ SmallVector<const SCEV *, 4> Sizes;
+ SE->findArrayDimensions(Terms, Sizes, ElementSize);
+
+ // Third step: compute the access functions for each subscript.
+ SmallVector<const SCEV *, 4> SrcSubscripts, DstSubscripts;
+ SrcAR->computeAccessFunctions(*SE, SrcSubscripts, Sizes);
+ DstAR->computeAccessFunctions(*SE, DstSubscripts, Sizes);
+
+ // Fail when there is only a subscript: that's a linearized access function.
+ if (SrcSubscripts.size() < 2 || DstSubscripts.size() < 2 ||
+ SrcSubscripts.size() != DstSubscripts.size())
return false;
-#ifndef NDEBUG
- DEBUG(errs() << "\nSrcSubscripts: ");
- for (int i = 0; i < size; i++)
- DEBUG(errs() << *SrcSubscripts[i]);
- DEBUG(errs() << "\nDstSubscripts: ");
- for (int i = 0; i < size; i++)
- DEBUG(errs() << *DstSubscripts[i]);
-#endif
+ int size = SrcSubscripts.size();
+
+ DEBUG({
+ dbgs() << "\nSrcSubscripts: ";
+ for (int i = 0; i < size; i++)
+ dbgs() << *SrcSubscripts[i];
+ dbgs() << "\nDstSubscripts: ";
+ for (int i = 0; i < size; i++)
+ dbgs() << *DstSubscripts[i];
+ });
// The delinearization transforms a single-subscript MIV dependence test into
// a multi-subscript SIV dependence test that is easier to compute. So we
@@ -3262,7 +3286,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) ||
(!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory()))
// if both instructions don't reference memory, there's no dependence
- return NULL;
+ return nullptr;
if (!isLoadOrStore(Src) || !isLoadOrStore(Dst)) {
// can only analyze simple loads and stores, i.e., no calls, invokes, etc.
@@ -3282,7 +3306,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
case AliasAnalysis::NoAlias:
// If the objects noalias, they are distinct, accesses are independent.
DEBUG(dbgs() << "no alias\n");
- return NULL;
+ return nullptr;
case AliasAnalysis::MustAlias:
break; // The underlying objects alias; test accesses for dependence.
}
@@ -3335,7 +3359,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
}
if (Delinearize && Pairs == 1 && CommonLevels > 1 &&
- tryDelinearize(Pair[0].Src, Pair[0].Dst, Pair)) {
+ tryDelinearize(Pair[0].Src, Pair[0].Dst, Pair, SE->getElementSize(Src))) {
DEBUG(dbgs() << " delinerized GEP\n");
Pairs = Pair.size();
}
@@ -3477,26 +3501,26 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
case Subscript::ZIV:
DEBUG(dbgs() << ", ZIV\n");
if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result))
- return NULL;
+ return nullptr;
break;
case Subscript::SIV: {
DEBUG(dbgs() << ", SIV\n");
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
Result, NewConstraint, SplitIter))
- return NULL;
+ return nullptr;
break;
}
case Subscript::RDIV:
DEBUG(dbgs() << ", RDIV\n");
if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result))
- return NULL;
+ return nullptr;
break;
case Subscript::MIV:
DEBUG(dbgs() << ", MIV\n");
if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result))
- return NULL;
+ return nullptr;
break;
default:
llvm_unreachable("subscript has unexpected classification");
@@ -3530,16 +3554,16 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
// SJ is an SIV subscript that's part of the current coupled group
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
DEBUG(dbgs() << "SIV\n");
if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
Result, NewConstraint, SplitIter))
- return NULL;
+ return nullptr;
ConstrainedLevels.set(Level);
if (intersectConstraints(&Constraints[Level], &NewConstraint)) {
if (Constraints[Level].isEmpty()) {
++DeltaIndependence;
- return NULL;
+ return nullptr;
}
Changed = true;
}
@@ -3565,7 +3589,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
case Subscript::ZIV:
DEBUG(dbgs() << "ZIV\n");
if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
- return NULL;
+ return nullptr;
Mivs.reset(SJ);
break;
case Subscript::SIV:
@@ -3588,7 +3612,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
if (Pair[SJ].Classification == Subscript::RDIV) {
DEBUG(dbgs() << "RDIV test\n");
if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
- return NULL;
+ return nullptr;
// I don't yet understand how to propagate RDIV results
Mivs.reset(SJ);
}
@@ -3601,7 +3625,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
if (Pair[SJ].Classification == Subscript::MIV) {
DEBUG(dbgs() << "MIV test\n");
if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
- return NULL;
+ return nullptr;
}
else
llvm_unreachable("expected only MIV subscripts at this point");
@@ -3613,7 +3637,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) {
updateDirection(Result.DV[SJ - 1], Constraints[SJ]);
if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE)
- return NULL;
+ return nullptr;
}
}
}
@@ -3648,11 +3672,11 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
}
}
if (AllEqual)
- return NULL;
+ return nullptr;
}
FullDependence *Final = new FullDependence(Result);
- Result.DV = NULL;
+ Result.DV = nullptr;
return Final;
}
@@ -3759,7 +3783,7 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
}
if (Delinearize && Pairs == 1 && CommonLevels > 1 &&
- tryDelinearize(Pair[0].Src, Pair[0].Dst, Pair)) {
+ tryDelinearize(Pair[0].Src, Pair[0].Dst, Pair, SE->getElementSize(Src))) {
DEBUG(dbgs() << " delinerized GEP\n");
Pairs = Pair.size();
}
@@ -3825,11 +3849,11 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
switch (Pair[SI].Classification) {
case Subscript::SIV: {
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
(void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
Result, NewConstraint, SplitIter);
if (Level == SplitLevel) {
- assert(SplitIter != NULL);
+ assert(SplitIter != nullptr);
return SplitIter;
}
break;
@@ -3864,7 +3888,7 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
// SJ is an SIV subscript that's part of the current coupled group
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
(void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
Result, NewConstraint, SplitIter);
if (Level == SplitLevel && SplitIter)
@@ -3905,5 +3929,5 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
}
}
llvm_unreachable("somehow reached end of routine");
- return NULL;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Analysis/DomPrinter.cpp b/contrib/llvm/lib/Analysis/DomPrinter.cpp
index cde4314..0c880df 100644
--- a/contrib/llvm/lib/Analysis/DomPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DomPrinter.cpp
@@ -81,18 +81,32 @@ struct DOTGraphTraits<PostDominatorTree*>
}
namespace {
-struct DomViewer
- : public DOTGraphTraitsViewer<DominatorTree, false> {
+struct DominatorTreeWrapperPassAnalysisGraphTraits {
+ static DominatorTree *getGraph(DominatorTreeWrapperPass *DTWP) {
+ return &DTWP->getDomTree();
+ }
+};
+
+struct DomViewer : public DOTGraphTraitsViewer<
+ DominatorTreeWrapperPass, false, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
- DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", ID){
+ DomViewer()
+ : DOTGraphTraitsViewer<DominatorTreeWrapperPass, false, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits>(
+ "dom", ID) {
initializeDomViewerPass(*PassRegistry::getPassRegistry());
}
};
-struct DomOnlyViewer
- : public DOTGraphTraitsViewer<DominatorTree, true> {
+struct DomOnlyViewer : public DOTGraphTraitsViewer<
+ DominatorTreeWrapperPass, true, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
- DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", ID){
+ DomOnlyViewer()
+ : DOTGraphTraitsViewer<DominatorTreeWrapperPass, true, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits>(
+ "domonly", ID) {
initializeDomOnlyViewerPass(*PassRegistry::getPassRegistry());
}
};
@@ -136,18 +150,26 @@ INITIALIZE_PASS(PostDomOnlyViewer, "view-postdom-only",
false, false)
namespace {
-struct DomPrinter
- : public DOTGraphTraitsPrinter<DominatorTree, false> {
+struct DomPrinter : public DOTGraphTraitsPrinter<
+ DominatorTreeWrapperPass, false, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
- DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", ID) {
+ DomPrinter()
+ : DOTGraphTraitsPrinter<DominatorTreeWrapperPass, false, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits>(
+ "dom", ID) {
initializeDomPrinterPass(*PassRegistry::getPassRegistry());
}
};
-struct DomOnlyPrinter
- : public DOTGraphTraitsPrinter<DominatorTree, true> {
+struct DomOnlyPrinter : public DOTGraphTraitsPrinter<
+ DominatorTreeWrapperPass, true, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
- DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", ID) {
+ DomOnlyPrinter()
+ : DOTGraphTraitsPrinter<DominatorTreeWrapperPass, true, DominatorTree *,
+ DominatorTreeWrapperPassAnalysisGraphTraits>(
+ "domonly", ID) {
initializeDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
}
};
diff --git a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
index 7e4a89f..7ba91bc 100644
--- a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
+++ b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
@@ -8,134 +8,50 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DominanceFrontier.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Analysis/DominanceFrontierImpl.h"
+
using namespace llvm;
+namespace llvm {
+template class DominanceFrontierBase<BasicBlock>;
+template class ForwardDominanceFrontierBase<BasicBlock>;
+}
+
char DominanceFrontier::ID = 0;
+
INITIALIZE_PASS_BEGIN(DominanceFrontier, "domfrontier",
"Dominance Frontier Construction", true, true)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(DominanceFrontier, "domfrontier",
"Dominance Frontier Construction", true, true)
-namespace {
- class DFCalculateWorkObject {
- public:
- DFCalculateWorkObject(BasicBlock *B, BasicBlock *P,
- const DomTreeNode *N,
- const DomTreeNode *PN)
- : currentBB(B), parentBB(P), Node(N), parentNode(PN) {}
- BasicBlock *currentBB;
- BasicBlock *parentBB;
- const DomTreeNode *Node;
- const DomTreeNode *parentNode;
- };
+DominanceFrontier::DominanceFrontier()
+ : FunctionPass(ID),
+ Base() {
+ initializeDominanceFrontierPass(*PassRegistry::getPassRegistry());
}
-void DominanceFrontier::anchor() { }
-
-const DominanceFrontier::DomSetType &
-DominanceFrontier::calculate(const DominatorTree &DT,
- const DomTreeNode *Node) {
- BasicBlock *BB = Node->getBlock();
- DomSetType *Result = NULL;
-
- std::vector<DFCalculateWorkObject> workList;
- SmallPtrSet<BasicBlock *, 32> visited;
-
- workList.push_back(DFCalculateWorkObject(BB, NULL, Node, NULL));
- do {
- DFCalculateWorkObject *currentW = &workList.back();
- assert (currentW && "Missing work object.");
-
- BasicBlock *currentBB = currentW->currentBB;
- BasicBlock *parentBB = currentW->parentBB;
- const DomTreeNode *currentNode = currentW->Node;
- const DomTreeNode *parentNode = currentW->parentNode;
- assert (currentBB && "Invalid work object. Missing current Basic Block");
- assert (currentNode && "Invalid work object. Missing current Node");
- DomSetType &S = Frontiers[currentBB];
-
- // Visit each block only once.
- if (visited.count(currentBB) == 0) {
- visited.insert(currentBB);
-
- // Loop over CFG successors to calculate DFlocal[currentNode]
- for (succ_iterator SI = succ_begin(currentBB), SE = succ_end(currentBB);
- SI != SE; ++SI) {
- // Does Node immediately dominate this successor?
- if (DT[*SI]->getIDom() != currentNode)
- S.insert(*SI);
- }
- }
-
- // At this point, S is DFlocal. Now we union in DFup's of our children...
- // Loop through and visit the nodes that Node immediately dominates (Node's
- // children in the IDomTree)
- bool visitChild = false;
- for (DomTreeNode::const_iterator NI = currentNode->begin(),
- NE = currentNode->end(); NI != NE; ++NI) {
- DomTreeNode *IDominee = *NI;
- BasicBlock *childBB = IDominee->getBlock();
- if (visited.count(childBB) == 0) {
- workList.push_back(DFCalculateWorkObject(childBB, currentBB,
- IDominee, currentNode));
- visitChild = true;
- }
- }
-
- // If all children are visited or there is any child then pop this block
- // from the workList.
- if (!visitChild) {
-
- if (!parentBB) {
- Result = &S;
- break;
- }
-
- DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
- DomSetType &parentSet = Frontiers[parentBB];
- for (; CDFI != CDFE; ++CDFI) {
- if (!DT.properlyDominates(parentNode, DT[*CDFI]))
- parentSet.insert(*CDFI);
- }
- workList.pop_back();
- }
+void DominanceFrontier::releaseMemory() {
+ Base.releaseMemory();
+}
- } while (!workList.empty());
+bool DominanceFrontier::runOnFunction(Function &) {
+ releaseMemory();
+ Base.analyze(getAnalysis<DominatorTreeWrapperPass>().getDomTree());
+ return false;
+}
- return *Result;
+void DominanceFrontier::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<DominatorTreeWrapperPass>();
}
-void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const {
- for (const_iterator I = begin(), E = end(); I != E; ++I) {
- OS << " DomFrontier for BB ";
- if (I->first)
- WriteAsOperand(OS, I->first, false);
- else
- OS << " <<exit node>>";
- OS << " is:\t";
-
- const std::set<BasicBlock*> &BBs = I->second;
-
- for (std::set<BasicBlock*>::const_iterator I = BBs.begin(), E = BBs.end();
- I != E; ++I) {
- OS << ' ';
- if (*I)
- WriteAsOperand(OS, *I, false);
- else
- OS << "<<exit node>>";
- }
- OS << "\n";
- }
+void DominanceFrontier::print(raw_ostream &OS, const Module *) const {
+ Base.print(OS);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void DominanceFrontierBase::dump() const {
+void DominanceFrontier::dump() const {
print(dbgs());
}
#endif
-
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index f042964..dfabb0a 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -8,17 +8,45 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/CallGraph.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-CallGraph::CallGraph()
- : ModulePass(ID), Root(0), ExternalCallingNode(0), CallsExternalNode(0) {
- initializeCallGraphPass(*PassRegistry::getPassRegistry());
+//===----------------------------------------------------------------------===//
+// Implementations of the CallGraph class methods.
+//
+
+CallGraph::CallGraph(Module &M)
+ : M(M), Root(nullptr), ExternalCallingNode(getOrInsertFunction(nullptr)),
+ CallsExternalNode(new CallGraphNode(nullptr)) {
+ // Add every function to the call graph.
+ for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
+ addToCallGraph(I);
+
+ // If we didn't find a main function, use the external call graph node
+ if (!Root)
+ Root = ExternalCallingNode;
+}
+
+CallGraph::~CallGraph() {
+ // CallsExternalNode is not in the function map, delete it explicitly.
+ CallsExternalNode->allReferencesDropped();
+ delete CallsExternalNode;
+
+// Reset all node's use counts to zero before deleting them to prevent an
+// assertion from firing.
+#ifndef NDEBUG
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ I->second->allReferencesDropped();
+#endif
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ delete I->second;
}
void CallGraph::addToCallGraph(Function *F) {
@@ -62,59 +90,7 @@ void CallGraph::addToCallGraph(Function *F) {
}
}
-void CallGraph::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
-}
-
-bool CallGraph::runOnModule(Module &M) {
- Mod = &M;
-
- ExternalCallingNode = getOrInsertFunction(0);
- assert(!CallsExternalNode);
- CallsExternalNode = new CallGraphNode(0);
- Root = 0;
-
- // Add every function to the call graph.
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
- addToCallGraph(I);
-
- // If we didn't find a main function, use the external call graph node
- if (Root == 0)
- Root = ExternalCallingNode;
-
- return false;
-}
-
-INITIALIZE_PASS(CallGraph, "basiccg", "CallGraph Construction", false, true)
-
-char CallGraph::ID = 0;
-
-void CallGraph::releaseMemory() {
- /// CallsExternalNode is not in the function map, delete it explicitly.
- if (CallsExternalNode) {
- CallsExternalNode->allReferencesDropped();
- delete CallsExternalNode;
- CallsExternalNode = 0;
- }
-
- if (FunctionMap.empty())
- return;
-
-// Reset all node's use counts to zero before deleting them to prevent an
-// assertion from firing.
-#ifndef NDEBUG
- for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
- I != E; ++I)
- I->second->allReferencesDropped();
-#endif
-
- for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
- I != E; ++I)
- delete I->second;
- FunctionMap.clear();
-}
-
-void CallGraph::print(raw_ostream &OS, const Module*) const {
+void CallGraph::print(raw_ostream &OS) const {
OS << "CallGraph Root is: ";
if (Function *F = Root->getFunction())
OS << F->getName() << "\n";
@@ -125,16 +101,11 @@ void CallGraph::print(raw_ostream &OS, const Module*) const {
for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I)
I->second->print(OS);
}
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void CallGraph::dump() const {
- print(dbgs(), 0);
-}
+void CallGraph::dump() const { print(dbgs()); }
#endif
-//===----------------------------------------------------------------------===//
-// Implementations of public modification methods
-//
-
// removeFunctionFromModule - Unlink the function from this module, returning
// it. Because this removes the function from the module, the call graph node
// is destroyed. This is only valid if the function does not call any other
@@ -148,7 +119,7 @@ Function *CallGraph::removeFunctionFromModule(CallGraphNode *CGN) {
delete CGN; // Delete the call graph node for this func
FunctionMap.erase(F); // Remove the call graph node from the map
- Mod->getFunctionList().remove(F);
+ M.getFunctionList().remove(F);
return F;
}
@@ -172,12 +143,17 @@ void CallGraph::spliceFunction(const Function *From, const Function *To) {
// not already exist.
CallGraphNode *CallGraph::getOrInsertFunction(const Function *F) {
CallGraphNode *&CGN = FunctionMap[F];
- if (CGN) return CGN;
-
- assert((!F || F->getParent() == Mod) && "Function not in current module!");
+ if (CGN)
+ return CGN;
+
+ assert((!F || F->getParent() == &M) && "Function not in current module!");
return CGN = new CallGraphNode(const_cast<Function*>(F));
}
+//===----------------------------------------------------------------------===//
+// Implementations of the CallGraphNode class methods.
+//
+
void CallGraphNode::print(raw_ostream &OS) const {
if (Function *F = getFunction())
OS << "Call graph node for function: '" << F->getName() << "'";
@@ -234,7 +210,7 @@ void CallGraphNode::removeOneAbstractEdgeTo(CallGraphNode *Callee) {
for (CalledFunctionsVector::iterator I = CalledFunctions.begin(); ; ++I) {
assert(I != CalledFunctions.end() && "Cannot find callee to remove!");
CallRecord &CR = *I;
- if (CR.second == Callee && CR.first == 0) {
+ if (CR.second == Callee && CR.first == nullptr) {
Callee->DropRef();
*I = CalledFunctions.back();
CalledFunctions.pop_back();
@@ -260,5 +236,52 @@ void CallGraphNode::replaceCallEdge(CallSite CS,
}
}
+//===----------------------------------------------------------------------===//
+// Out-of-line definitions of CallGraphAnalysis class members.
+//
+
+char CallGraphAnalysis::PassID;
+
+//===----------------------------------------------------------------------===//
+// Implementations of the CallGraphWrapperPass class methods.
+//
+
+CallGraphWrapperPass::CallGraphWrapperPass() : ModulePass(ID) {
+ initializeCallGraphWrapperPassPass(*PassRegistry::getPassRegistry());
+}
+
+CallGraphWrapperPass::~CallGraphWrapperPass() {}
+
+void CallGraphWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+}
+
+bool CallGraphWrapperPass::runOnModule(Module &M) {
+ // All the real work is done in the constructor for the CallGraph.
+ G.reset(new CallGraph(M));
+ return false;
+}
+
+INITIALIZE_PASS(CallGraphWrapperPass, "basiccg", "CallGraph Construction",
+ false, true)
+
+char CallGraphWrapperPass::ID = 0;
+
+void CallGraphWrapperPass::releaseMemory() { G.reset(); }
+
+void CallGraphWrapperPass::print(raw_ostream &OS, const Module *) const {
+ if (!G) {
+ OS << "No call graph has been built!\n";
+ return;
+ }
+
+ // Just delegate.
+ G->print(OS);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void CallGraphWrapperPass::dump() const { print(dbgs(), nullptr); }
+#endif
+
// Enuse that users of CallGraph.h also link with this file
DEFINING_FILE_FOR(CallGraph)
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
index 182beca..c27edbf 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "cgscc-passmgr"
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/Statistic.h"
@@ -23,12 +22,15 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "cgscc-passmgr"
+
static cl::opt<unsigned>
MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4));
@@ -49,7 +51,7 @@ public:
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
using ModulePass::doInitialization;
using ModulePass::doFinalization;
@@ -58,21 +60,21 @@ public:
bool doFinalization(CallGraph &CG);
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
// CGPassManager walks SCC and it needs CallGraph.
- Info.addRequired<CallGraph>();
+ Info.addRequired<CallGraphWrapperPass>();
Info.setPreservesAll();
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "CallGraph Pass Manager";
}
- virtual PMDataManager *getAsPMDataManager() { return this; }
- virtual Pass *getAsPass() { return this; }
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
// Print passes managed by this manager
- void dumpPassStructure(unsigned Offset) {
+ void dumpPassStructure(unsigned Offset) override {
errs().indent(Offset*2) << "Call Graph SCC Pass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
Pass *P = getContainedPass(Index);
@@ -86,7 +88,7 @@ public:
return static_cast<Pass *>(PassVector[N]);
}
- virtual PassManagerType getPassManagerType() const {
+ PassManagerType getPassManagerType() const override {
return PMT_CallGraphPassManager;
}
@@ -112,7 +114,7 @@ bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
bool Changed = false;
PMDataManager *PM = P->getAsPMDataManager();
- if (PM == 0) {
+ if (!PM) {
CallGraphSCCPass *CGSP = (CallGraphSCCPass*)P;
if (!CallGraphUpToDate) {
DevirtualizedCall |= RefreshCallGraph(CurSCC, CG, false);
@@ -144,8 +146,11 @@ bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
I != E; ++I) {
if (Function *F = (*I)->getFunction()) {
dumpPassInfo(P, EXECUTION_MSG, ON_FUNCTION_MSG, F->getName());
- TimeRegion PassTimer(getPassTimer(FPP));
- Changed |= FPP->runOnFunction(*F);
+ {
+ TimeRegion PassTimer(getPassTimer(FPP));
+ Changed |= FPP->runOnFunction(*F);
+ }
+ F->getContext().yield();
}
}
@@ -190,7 +195,7 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
SCCIdx != E; ++SCCIdx, ++FunctionNo) {
CallGraphNode *CGN = *SCCIdx;
Function *F = CGN->getFunction();
- if (F == 0 || F->isDeclaration()) continue;
+ if (!F || F->isDeclaration()) continue;
// Walk the function body looking for call sites. Sync up the call sites in
// CGN with those actually in the function.
@@ -203,7 +208,7 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) {
// If this call site is null, then the function pass deleted the call
// entirely and the WeakVH nulled it out.
- if (I->first == 0 ||
+ if (!I->first ||
// If we've already seen this call site, then the FunctionPass RAUW'd
// one call with another, which resulted in two "uses" in the edge
// list of the same call.
@@ -217,7 +222,7 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
"CallGraphSCCPass did not update the CallGraph correctly!");
// If this was an indirect call site, count it.
- if (I->second->getFunction() == 0)
+ if (!I->second->getFunction())
++NumIndirectRemoved;
else
++NumDirectRemoved;
@@ -273,7 +278,7 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
// site could be turned direct), don't reject it in checking mode, and
// don't tweak it to be more precise.
if (CheckingMode && CS.getCalledFunction() &&
- ExistingNode->getFunction() == 0)
+ ExistingNode->getFunction() == nullptr)
continue;
assert(!CheckingMode &&
@@ -286,7 +291,7 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
CalleeNode = CG.getOrInsertFunction(Callee);
// Keep track of whether we turned an indirect call into a direct
// one.
- if (ExistingNode->getFunction() == 0) {
+ if (!ExistingNode->getFunction()) {
DevirtualizedCall = true;
DEBUG(dbgs() << " CGSCCPASSMGR: Devirtualized call to '"
<< Callee->getName() << "'\n");
@@ -424,7 +429,7 @@ bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool CGPassManager::runOnModule(Module &M) {
- CallGraph &CG = getAnalysis<CallGraph>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
bool Changed = doInitialization(CG);
// Walk the callgraph in bottom-up SCC order.
@@ -434,8 +439,8 @@ bool CGPassManager::runOnModule(Module &M) {
while (!CGI.isAtEnd()) {
// Copy the current SCC and increment past it so that the pass can hack
// on the SCC if it wants to without invalidating our iterator.
- std::vector<CallGraphNode*> &NodeVec = *CGI;
- CurSCC.initialize(&NodeVec[0], &NodeVec[0]+NodeVec.size());
+ const std::vector<CallGraphNode *> &NodeVec = *CGI;
+ CurSCC.initialize(NodeVec.data(), NodeVec.data() + NodeVec.size());
++CGI;
// At the top level, we run all the passes in this pass manager on the
@@ -570,8 +575,8 @@ void CallGraphSCCPass::assignPassManager(PMStack &PMS,
/// the call graph. If the derived class implements this method, it should
/// always explicitly call the implementation here.
void CallGraphSCCPass::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<CallGraph>();
- AU.addPreserved<CallGraph>();
+ AU.addRequired<CallGraphWrapperPass>();
+ AU.addPreserved<CallGraphWrapperPass>();
}
@@ -590,15 +595,19 @@ namespace {
static char ID;
PrintCallGraphPass(const std::string &B, raw_ostream &o)
: CallGraphSCCPass(ID), Banner(B), Out(o) {}
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
-
- bool runOnSCC(CallGraphSCC &SCC) {
+
+ bool runOnSCC(CallGraphSCC &SCC) override {
Out << Banner;
- for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
- (*I)->getFunction()->print(Out);
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ if ((*I)->getFunction())
+ (*I)->getFunction()->print(Out);
+ else
+ Out << "\nPrinting <null> Function\n";
+ }
return false;
}
};
diff --git a/contrib/llvm/lib/Analysis/IPA/CallPrinter.cpp b/contrib/llvm/lib/Analysis/IPA/CallPrinter.cpp
index 306ae7a..68dcd3c 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallPrinter.cpp
@@ -22,13 +22,10 @@ using namespace llvm;
namespace llvm {
-template<>
-struct DOTGraphTraits<CallGraph*> : public DefaultDOTGraphTraits {
- DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+template <> struct DOTGraphTraits<CallGraph *> : public DefaultDOTGraphTraits {
+ DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
- static std::string getGraphName(CallGraph *Graph) {
- return "Call graph";
- }
+ static std::string getGraphName(CallGraph *Graph) { return "Call graph"; }
std::string getNodeLabel(CallGraphNode *Node, CallGraph *Graph) {
if (Function *Func = Node->getFunction())
@@ -38,49 +35,57 @@ struct DOTGraphTraits<CallGraph*> : public DefaultDOTGraphTraits {
}
};
+struct AnalysisCallGraphWrapperPassTraits {
+ static CallGraph *getGraph(CallGraphWrapperPass *P) {
+ return &P->getCallGraph();
+ }
+};
+
} // end llvm namespace
namespace {
struct CallGraphViewer
- : public DOTGraphTraitsModuleViewer<CallGraph, true> {
+ : public DOTGraphTraitsModuleViewer<CallGraphWrapperPass, true, CallGraph *,
+ AnalysisCallGraphWrapperPassTraits> {
static char ID;
CallGraphViewer()
- : DOTGraphTraitsModuleViewer<CallGraph, true>("callgraph", ID) {
+ : DOTGraphTraitsModuleViewer<CallGraphWrapperPass, true, CallGraph *,
+ AnalysisCallGraphWrapperPassTraits>(
+ "callgraph", ID) {
initializeCallGraphViewerPass(*PassRegistry::getPassRegistry());
}
};
-struct CallGraphPrinter
- : public DOTGraphTraitsModulePrinter<CallGraph, true> {
+struct CallGraphPrinter : public DOTGraphTraitsModulePrinter<
+ CallGraphWrapperPass, true, CallGraph *,
+ AnalysisCallGraphWrapperPassTraits> {
static char ID;
CallGraphPrinter()
- : DOTGraphTraitsModulePrinter<CallGraph, true>("callgraph", ID) {
- initializeCallGraphPrinterPass(*PassRegistry::getPassRegistry());
+ : DOTGraphTraitsModulePrinter<CallGraphWrapperPass, true, CallGraph *,
+ AnalysisCallGraphWrapperPassTraits>(
+ "callgraph", ID) {
+ initializeCallGraphPrinterPass(*PassRegistry::getPassRegistry());
}
};
} // end anonymous namespace
char CallGraphViewer::ID = 0;
-INITIALIZE_PASS(CallGraphViewer, "view-callgraph",
- "View call graph",
- false, false)
+INITIALIZE_PASS(CallGraphViewer, "view-callgraph", "View call graph", false,
+ false)
char CallGraphPrinter::ID = 0;
INITIALIZE_PASS(CallGraphPrinter, "dot-callgraph",
- "Print call graph to 'dot' file",
- false, false)
+ "Print call graph to 'dot' file", false, false)
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
// the link time optimization.
-ModulePass *llvm::createCallGraphViewerPass() {
- return new CallGraphViewer();
-}
+ModulePass *llvm::createCallGraphViewerPass() { return new CallGraphViewer(); }
ModulePass *llvm::createCallGraphPrinterPass() {
return new CallGraphPrinter();
diff --git a/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp b/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
index 1c4f17d..b37344b 100644
--- a/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
@@ -14,11 +14,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/FindUsedTypes.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index 7ec4644..607c068 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -14,7 +14,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "globalsmodref-aa"
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/Statistic.h"
@@ -24,15 +23,17 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/InstIterator.h"
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "globalsmodref-aa"
+
STATISTIC(NumNonAddrTakenGlobalVars,
"Number of global vars without address taken");
STATISTIC(NumNonAddrTakenFunctions,"Number of functions without address taken");
@@ -94,34 +95,38 @@ namespace {
initializeGlobalsModRefPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M) {
- InitializeAliasAnalysis(this); // set up super class
- AnalyzeGlobals(M); // find non-addr taken globals
- AnalyzeCallGraph(getAnalysis<CallGraph>(), M); // Propagate on CG
+ bool runOnModule(Module &M) override {
+ InitializeAliasAnalysis(this);
+
+ // Find non-addr taken globals.
+ AnalyzeGlobals(M);
+
+ // Propagate on CG.
+ AnalyzeCallGraph(getAnalysis<CallGraphWrapperPass>().getCallGraph(), M);
return false;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AliasAnalysis::getAnalysisUsage(AU);
- AU.addRequired<CallGraph>();
+ AU.addRequired<CallGraphWrapperPass>();
AU.setPreservesAll(); // Does not transform code
}
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Location &LocA, const Location &LocB);
+ AliasResult alias(const Location &LocA, const Location &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc);
+ const Location &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) {
+ ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1, CS2);
}
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- ModRefBehavior getModRefBehavior(const Function *F) {
+ ModRefBehavior getModRefBehavior(const Function *F) override {
ModRefBehavior Min = UnknownModRefBehavior;
if (FunctionRecord *FR = getFunctionInfo(F)) {
@@ -137,7 +142,7 @@ namespace {
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
ModRefBehavior Min = UnknownModRefBehavior;
if (const Function* F = CS.getCalledFunction())
@@ -151,15 +156,15 @@ namespace {
return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
}
- virtual void deleteValue(Value *V);
- virtual void copyValue(Value *From, Value *To);
- virtual void addEscapingUse(Use &U);
+ void deleteValue(Value *V) override;
+ void copyValue(Value *From, Value *To) override;
+ void addEscapingUse(Use &U) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ void *getAdjustedAnalysisPointer(AnalysisID PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
@@ -173,14 +178,14 @@ namespace {
FunctionInfo.find(F);
if (I != FunctionInfo.end())
return &I->second;
- return 0;
+ return nullptr;
}
void AnalyzeGlobals(Module &M);
void AnalyzeCallGraph(CallGraph &CG, Module &M);
bool AnalyzeUsesOfPointer(Value *V, std::vector<Function*> &Readers,
std::vector<Function*> &Writers,
- GlobalValue *OkayStoreDest = 0);
+ GlobalValue *OkayStoreDest = nullptr);
bool AnalyzeIndirectGlobalMemory(GlobalValue *GV);
};
}
@@ -189,7 +194,7 @@ char GlobalsModRef::ID = 0;
INITIALIZE_AG_PASS_BEGIN(GlobalsModRef, AliasAnalysis,
"globalsmodref-aa", "Simple mod/ref analysis for globals",
false, true, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_AG_PASS_END(GlobalsModRef, AliasAnalysis,
"globalsmodref-aa", "Simple mod/ref analysis for globals",
false, true, false)
@@ -248,42 +253,33 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
GlobalValue *OkayStoreDest) {
if (!V->getType()->isPointerTy()) return true;
- for (Value::use_iterator UI = V->use_begin(), E=V->use_end(); UI != E; ++UI) {
- User *U = *UI;
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ for (Use &U : V->uses()) {
+ User *I = U.getUser();
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Readers.push_back(LI->getParent()->getParent());
- } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (V == SI->getOperand(1)) {
Writers.push_back(SI->getParent()->getParent());
} else if (SI->getOperand(1) != OkayStoreDest) {
return true; // Storing the pointer
}
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
- if (AnalyzeUsesOfPointer(GEP, Readers, Writers)) return true;
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
+ } else if (Operator::getOpcode(I) == Instruction::GetElementPtr) {
+ if (AnalyzeUsesOfPointer(I, Readers, Writers))
return true;
- } else if (isFreeCall(U, TLI)) {
- Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
- } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
- // Make sure that this is just the function being called, not that it is
- // passing into the function.
- for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
- if (CI->getArgOperand(i) == V) return true;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
+ } else if (Operator::getOpcode(I) == Instruction::BitCast) {
+ if (AnalyzeUsesOfPointer(I, Readers, Writers, OkayStoreDest))
+ return true;
+ } else if (CallSite CS = I) {
// Make sure that this is just the function being called, not that it is
// passing into the function.
- for (unsigned i = 0, e = II->getNumArgOperands(); i != e; ++i)
- if (II->getArgOperand(i) == V) return true;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
- if (CE->getOpcode() == Instruction::GetElementPtr ||
- CE->getOpcode() == Instruction::BitCast) {
- if (AnalyzeUsesOfPointer(CE, Readers, Writers))
- return true;
- } else {
- return true;
+ if (!CS.isCallee(&U)) {
+ // Detect calls to free.
+ if (isFreeCall(I, TLI))
+ Writers.push_back(CS->getParent()->getParent());
+ else
+ return true; // Argument of an unknown call.
}
- } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
+ } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return true; // Allow comparison against null.
} else {
@@ -308,8 +304,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Walk the user list of the global. If we find anything other than a direct
// load or store, bail out.
- for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
- User *U = *I;
+ for (User *U : GV->users()) {
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
// The pointer loaded from the global can only be used in simple ways:
// we allow addressing of it and loading storing to it. We do *not* allow
@@ -363,9 +358,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
// We do a bottom-up SCC traversal of the call graph. In other words, we
// visit all callees before callers (leaf-first).
- for (scc_iterator<CallGraph*> I = scc_begin(&CG), E = scc_end(&CG); I != E;
- ++I) {
- std::vector<CallGraphNode *> &SCC = *I;
+ for (scc_iterator<CallGraph*> I = scc_begin(&CG); !I.isAtEnd(); ++I) {
+ const std::vector<CallGraphNode *> &SCC = *I;
assert(!SCC.empty() && "SCC with no functions?");
if (!SCC[0]->getFunction()) {
@@ -417,10 +411,8 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
FunctionEffect |= CalleeFR->FunctionEffect;
// Incorporate callee's effects on globals into our info.
- for (std::map<const GlobalValue*, unsigned>::iterator GI =
- CalleeFR->GlobalInfo.begin(), E = CalleeFR->GlobalInfo.end();
- GI != E; ++GI)
- FR.GlobalInfo[GI->first] |= GI->second;
+ for (const auto &G : CalleeFR->GlobalInfo)
+ FR.GlobalInfo[G.first] |= G.second;
FR.MayReadAnyGlobal |= CalleeFR->MayReadAnyGlobal;
} else {
// Can't say anything about it. However, if it is inside our SCC,
@@ -499,8 +491,8 @@ GlobalsModRef::alias(const Location &LocA,
if (GV1 || GV2) {
// If the global's address is taken, pretend we don't know it's a pointer to
// the global.
- if (GV1 && !NonAddressTakenGlobals.count(GV1)) GV1 = 0;
- if (GV2 && !NonAddressTakenGlobals.count(GV2)) GV2 = 0;
+ if (GV1 && !NonAddressTakenGlobals.count(GV1)) GV1 = nullptr;
+ if (GV2 && !NonAddressTakenGlobals.count(GV2)) GV2 = nullptr;
// If the two pointers are derived from two different non-addr-taken
// globals, or if one is and the other isn't, we know these can't alias.
@@ -514,7 +506,7 @@ GlobalsModRef::alias(const Location &LocA,
// These pointers may be based on the memory owned by an indirect global. If
// so, we may be able to handle this. First check to see if the base pointer
// is a direct load from an indirect global.
- GV1 = GV2 = 0;
+ GV1 = GV2 = nullptr;
if (const LoadInst *LI = dyn_cast<LoadInst>(UV1))
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
if (IndirectGlobals.count(GV))
diff --git a/contrib/llvm/lib/Analysis/IPA/IPA.cpp b/contrib/llvm/lib/Analysis/IPA/IPA.cpp
index 47357cf..b26c052 100644
--- a/contrib/llvm/lib/Analysis/IPA/IPA.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/IPA.cpp
@@ -12,14 +12,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/InitializePasses.h"
-#include "llvm/PassRegistry.h"
#include "llvm-c/Initialization.h"
+#include "llvm/PassRegistry.h"
using namespace llvm;
/// initializeIPA - Initialize all passes linked into the IPA library.
void llvm::initializeIPA(PassRegistry &Registry) {
- initializeCallGraphPass(Registry);
+ initializeCallGraphWrapperPassPass(Registry);
initializeCallGraphPrinterPass(Registry);
initializeCallGraphViewerPass(Registry);
initializeFindUsedTypesPass(Registry);
diff --git a/contrib/llvm/lib/Analysis/IPA/InlineCost.cpp b/contrib/llvm/lib/Analysis/IPA/InlineCost.cpp
index 3bc796e..8807529 100644
--- a/contrib/llvm/lib/Analysis/IPA/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/InlineCost.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "inline-cost"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
@@ -21,19 +20,21 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
-#include "llvm/InstVisitor.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "inline-cost"
+
STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
namespace {
@@ -43,7 +44,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
friend class InstVisitor<CallAnalyzer, bool>;
// DataLayout if available, or null.
- const DataLayout *const TD;
+ const DataLayout *const DL;
/// The TargetTransformInfo available for this compilation.
const TargetTransformInfo &TTI;
@@ -97,9 +98,6 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
void disableSROA(Value *V);
void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
int InstructionCost);
- bool handleSROACandidate(bool IsSROAValid,
- DenseMap<Value *, int>::iterator CostIt,
- int InstructionCost);
bool isGEPOffsetConstant(GetElementPtrInst &GEP);
bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
bool simplifyCallSite(Function *F, CallSite CS);
@@ -142,9 +140,9 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool visitUnreachableInst(UnreachableInst &I);
public:
- CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI,
+ CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
Function &Callee, int Threshold)
- : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
+ : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
@@ -225,21 +223,6 @@ void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
SROACostSavings += InstructionCost;
}
-/// \brief Helper for the common pattern of handling a SROA candidate.
-/// Either accumulates the cost savings if the SROA remains valid, or disables
-/// SROA for the candidate.
-bool CallAnalyzer::handleSROACandidate(bool IsSROAValid,
- DenseMap<Value *, int>::iterator CostIt,
- int InstructionCost) {
- if (IsSROAValid) {
- accumulateSROACost(CostIt, InstructionCost);
- return true;
- }
-
- disableSROA(CostIt);
- return false;
-}
-
/// \brief Check whether a GEP's indices are all constant.
///
/// Respects any simplified values known during the analysis of this callsite.
@@ -256,10 +239,10 @@ bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
- if (!TD)
+ if (!DL)
return false;
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ unsigned IntPtrWidth = DL->getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@@ -275,25 +258,34 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
unsigned ElementIdx = OpC->getZExtValue();
- const StructLayout *SL = TD->getStructLayout(STy);
+ const StructLayout *SL = DL->getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
continue;
}
- APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
+ APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
}
return true;
}
bool CallAnalyzer::visitAlloca(AllocaInst &I) {
- // FIXME: Check whether inlining will turn a dynamic alloca into a static
+ // Check whether inlining will turn a dynamic alloca into a static
// alloca, and handle that case.
+ if (I.isArrayAllocation()) {
+ if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
+ ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
+ assert(AllocSize && "Allocation size not a constant int?");
+ Type *Ty = I.getAllocatedType();
+ AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
+ return Base::visitAlloca(I);
+ }
+ }
// Accumulate the allocated size.
if (I.isStaticAlloca()) {
Type *Ty = I.getAllocatedType();
- AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
+ AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
Ty->getPrimitiveSizeInBits());
}
@@ -330,7 +322,7 @@ bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
// Try to fold GEPs of constant-offset call site argument pointers. This
// requires target data and inbounds GEPs.
- if (TD && I.isInBounds()) {
+ if (DL && I.isInBounds()) {
// Check if we have a base + offset for the pointer.
Value *Ptr = I.getPointerOperand();
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
@@ -399,6 +391,7 @@ bool CallAnalyzer::visitBitCast(BitCastInst &I) {
}
bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
+ const DataLayout *DL = I.getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
@@ -412,7 +405,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
- if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
+ if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first)
@@ -435,6 +428,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
}
bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
+ const DataLayout *DL = I.getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
@@ -449,7 +443,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
- if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
+ if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset;
@@ -488,7 +482,7 @@ bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
COp = SimplifiedValues.lookup(Operand);
if (COp)
if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
- COp, TD)) {
+ COp, DL)) {
SimplifiedValues[&I] = C;
return true;
}
@@ -523,9 +517,9 @@ bool CallAnalyzer::visitCmpInst(CmpInst &I) {
// a common base.
Value *LHSBase, *RHSBase;
APInt LHSOffset, RHSOffset;
- llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
+ std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
if (LHSBase) {
- llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
+ std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
if (RHSBase && LHSBase == RHSBase) {
// We have common bases, fold the icmp to a constant based on the
// offsets.
@@ -573,9 +567,9 @@ bool CallAnalyzer::visitSub(BinaryOperator &I) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
Value *LHSBase, *RHSBase;
APInt LHSOffset, RHSOffset;
- llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
+ std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
if (LHSBase) {
- llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
+ std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
if (RHSBase && LHSBase == RHSBase) {
// We have common bases, fold the subtract to a constant based on the
// offsets.
@@ -602,7 +596,7 @@ bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
if (!isa<Constant>(RHS))
if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
RHS = SimpleRHS;
- Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
+ Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
SimplifiedValues[&I] = C;
return true;
@@ -721,7 +715,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
return false;
}
if (CS.isCall() &&
- cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate))
+ cast<CallInst>(CS.getInstruction())->cannotDuplicate())
ContainsNoDuplicateCall = true;
if (Function *F = CS.getCalledFunction()) {
@@ -784,7 +778,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
// during devirtualization and so we want to give it a hefty bonus for
// inlining, but cap that bonus in the event that inlining wouldn't pan
// out. Pretend to inline the function, with a custom threshold.
- CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold);
+ CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold);
if (CA.analyzeCall(CS)) {
// We were able to inline the indirect call! Subtract the cost from the
// bonus we want to apply, but don't go below zero.
@@ -814,9 +808,29 @@ bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
// We model unconditional switches as free, see the comments on handling
// branches.
- return isa<ConstantInt>(SI.getCondition()) ||
- dyn_cast_or_null<ConstantInt>(
- SimplifiedValues.lookup(SI.getCondition()));
+ if (isa<ConstantInt>(SI.getCondition()))
+ return true;
+ if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
+ if (isa<ConstantInt>(V))
+ return true;
+
+ // Otherwise, we need to accumulate a cost proportional to the number of
+ // distinct successor blocks. This fan-out in the CFG cannot be represented
+ // for free even if we can represent the core switch as a jumptable that
+ // takes a single instruction.
+ //
+ // NB: We convert large switches which are just used to initialize large phi
+ // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
+ // inlining those. It will prevent inlining in cases where the optimization
+ // does not (yet) fire.
+ SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
+ SuccessorBlocks.insert(SI.getDefaultDest());
+ for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
+ SuccessorBlocks.insert(I.getCaseSuccessor());
+ // Add cost corresponding to the number of distinct destinations. The first
+ // we model as free because of fallthrough.
+ Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
+ return false;
}
bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
@@ -827,10 +841,7 @@ bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
// original function which is extremely undefined behavior.
// FIXME: This logic isn't really right; we can safely inline functions with
// indirectbr's as long as no other function or global references the
- // blockaddress of a block within the current function. And as a QOI issue,
- // if someone is using a blockaddress without an indirectbr, and that
- // reference somehow ends up in another function or global, we probably don't
- // want to inline this function.
+ // blockaddress of a block within the current function.
HasIndirectBr = true;
return false;
}
@@ -872,6 +883,16 @@ bool CallAnalyzer::visitInstruction(Instruction &I) {
/// viable, and true if inlining remains viable.
bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ // FIXME: Currently, the number of instructions in a function regardless of
+ // our ability to simplify them during inline to constants or dead code,
+ // are actually used by the vector bonus heuristic. As long as that's true,
+ // we have to special case debug intrinsics here to prevent differences in
+ // inlining due to debug symbols. Eventually, the number of unsimplified
+ // instructions shouldn't factor into the cost computation, but until then,
+ // hack around it here.
+ if (isa<DbgInfoIntrinsic>(I))
+ continue;
+
++NumInstructions;
if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
++NumVectorInstructions;
@@ -921,10 +942,10 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
- if (!TD || !V->getType()->isPointerTy())
- return 0;
+ if (!DL || !V->getType()->isPointerTy())
+ return nullptr;
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ unsigned IntPtrWidth = DL->getPointerSizeInBits();
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
@@ -934,7 +955,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
- return 0;
+ return nullptr;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
@@ -948,7 +969,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD->getIntPtrType(V->getContext());
+ Type *IntPtrTy = DL->getIntPtrType(V->getContext());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
@@ -983,12 +1004,12 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// Give out bonuses per argument, as the instructions setting them up will
// be gone after inlining.
for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
- if (TD && CS.isByValArgument(I)) {
+ if (DL && CS.isByValArgument(I)) {
// We approximate the number of loads and stores needed by dividing the
// size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
- unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
- unsigned PointerSize = TD->getPointerSizeInBits();
+ unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
+ unsigned PointerSize = DL->getPointerSizeInBits();
// Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
@@ -1040,9 +1061,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
Function *Caller = CS.getInstruction()->getParent()->getParent();
// Check if the caller function is recursive itself.
- for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
- U != E; ++U) {
- CallSite Site(cast<Value>(*U));
+ for (User *U : Caller->users()) {
+ CallSite Site(U);
if (!Site)
continue;
Instruction *I = Site.getInstruction();
@@ -1098,6 +1118,15 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
if (BB->empty())
continue;
+ // Disallow inlining a blockaddress. A blockaddress only has defined
+ // behavior for an indirect branch in the same function, and we do not
+ // currently support inlining indirect branches. But, the inliner may not
+ // see an indirect branch that ends up being dead code at a particular call
+ // site. If the blockaddress escapes the function, e.g., via a global
+ // variable, inlining may lead to an invalid cross-function reference.
+ if (BB->hasAddressTaken())
+ return false;
+
// Analyze the cost of this block. If we blow through the threshold, this
// returns false, and we can bail on out.
if (!analyzeBlock(BB)) {
@@ -1154,7 +1183,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
}
}
- // If this is a noduplicate call, we can still inline as long as
+ // If this is a noduplicate call, we can still inline as long as
// inlining this would cause the removal of the caller (so the instruction
// is not actually duplicated, just moved).
if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
@@ -1168,7 +1197,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Dump stats about this call's analysis.
void CallAnalyzer::dump() {
-#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
+#define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
DEBUG_PRINT_STAT(NumConstantArgs);
DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
DEBUG_PRINT_STAT(NumAllocaArgs);
@@ -1178,6 +1207,9 @@ void CallAnalyzer::dump() {
DEBUG_PRINT_STAT(SROACostSavings);
DEBUG_PRINT_STAT(SROACostSavingsLost);
DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
+ DEBUG_PRINT_STAT(Cost);
+ DEBUG_PRINT_STAT(Threshold);
+ DEBUG_PRINT_STAT(VectorBonus);
#undef DEBUG_PRINT_STAT
}
#endif
@@ -1190,7 +1222,7 @@ INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
char InlineCostAnalysis::ID = 0;
-InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {}
+InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {}
InlineCostAnalysis::~InlineCostAnalysis() {}
@@ -1201,7 +1233,6 @@ void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
}
bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
- TD = getAnalysisIfAvailable<DataLayout>();
TTI = &getAnalysis<TargetTransformInfo>();
return false;
}
@@ -1234,7 +1265,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
// Calls to functions with always-inline attributes should be inlined
// whenever possible.
- if (Callee->hasFnAttribute(Attribute::AlwaysInline)) {
+ if (CS.hasFnAttr(Attribute::AlwaysInline)) {
if (isInlineViable(*Callee))
return llvm::InlineCost::getAlways();
return llvm::InlineCost::getNever();
@@ -1259,7 +1290,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
- CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
+ CallAnalyzer CA(Callee->getDataLayout(), *TTI, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
DEBUG(CA.dump());
@@ -1278,8 +1309,9 @@ bool InlineCostAnalysis::isInlineViable(Function &F) {
F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::ReturnsTwice);
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
- // Disallow inlining of functions which contain an indirect branch.
- if (isa<IndirectBrInst>(BI->getTerminator()))
+ // Disallow inlining of functions which contain indirect branches or
+ // blockaddresses.
+ if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
return false;
for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index 5a06cdc..24655aa 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -12,17 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "iv-users"
#include "llvm/Analysis/IVUsers.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
@@ -30,11 +28,13 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "iv-users"
+
char IVUsers::ID = 0;
INITIALIZE_PASS_BEGIN(IVUsers, "iv-users",
"Induction Variable Users", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(IVUsers, "iv-users",
"Induction Variable Users", false, true)
@@ -85,7 +85,7 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
const LoopInfo *LI,
SmallPtrSet<Loop*,16> &SimpleLoopNests) {
- Loop *NearestLoop = 0;
+ Loop *NearestLoop = nullptr;
for (DomTreeNode *Rung = DT->getNode(BB);
Rung; Rung = Rung->getIDom()) {
BasicBlock *DomBB = Rung->getBlock();
@@ -124,14 +124,14 @@ bool IVUsers::AddUsersImpl(Instruction *I,
// IVUsers is used by LSR which assumes that all SCEV expressions are safe to
// pass to SCEVExpander. Expressions are not safe to expand if they represent
// operations that are not safe to speculate, namely integer division.
- if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, TD))
+ if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL))
return false;
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
// Also avoid creating IVs of non-native types. For example, we don't want a
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
uint64_t Width = SE->getTypeSizeInBits(I->getType());
- if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
+ if (Width > 64 || (DL && !DL->isLegalInteger(Width)))
return false;
// Get the symbolic expression for this instruction.
@@ -143,9 +143,8 @@ bool IVUsers::AddUsersImpl(Instruction *I,
return false;
SmallPtrSet<Instruction *, 4> UniqueUsers;
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (Use &U : I->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
if (!UniqueUsers.insert(User))
continue;
@@ -158,7 +157,7 @@ bool IVUsers::AddUsersImpl(Instruction *I,
BasicBlock *UseBB = User->getParent();
// A phi's use is live out of its predecessor block.
if (PHINode *PHI = dyn_cast<PHINode>(User)) {
- unsigned OperandNo = UI.getOperandNo();
+ unsigned OperandNo = U.getOperandNo();
unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
UseBB = PHI->getIncomingBlock(ValNo);
}
@@ -243,7 +242,7 @@ IVUsers::IVUsers()
void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LoopInfo>();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.setPreservesAll();
}
@@ -252,9 +251,10 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
L = l;
LI = &getAnalysis<LoopInfo>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
@@ -267,7 +267,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
void IVUsers::print(raw_ostream &OS, const Module *M) const {
OS << "IV Users for loop ";
- WriteAsOperand(OS, L->getHeader(), false);
+ L->getHeader()->printAsOperand(OS, false);
if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
OS << " with backedge-taken count "
<< *SE->getBackedgeTakenCount(L);
@@ -277,17 +277,20 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),
E = IVUses.end(); UI != E; ++UI) {
OS << " ";
- WriteAsOperand(OS, UI->getOperandValToReplace(), false);
+ UI->getOperandValToReplace()->printAsOperand(OS, false);
OS << " = " << *getReplacementExpr(*UI);
for (PostIncLoopSet::const_iterator
I = UI->PostIncLoops.begin(),
E = UI->PostIncLoops.end(); I != E; ++I) {
OS << " (post-inc with loop ";
- WriteAsOperand(OS, (*I)->getHeader(), false);
+ (*I)->getHeader()->printAsOperand(OS, false);
OS << ")";
}
OS << " in ";
- UI->getUser()->print(OS);
+ if (UI->getUser())
+ UI->getUser()->print(OS);
+ else
+ OS << "Printing <null> User";
OS << '\n';
}
}
@@ -330,16 +333,16 @@ static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) {
I != E; ++I)
if (const SCEVAddRecExpr *AR = findAddRecForLoop(*I, L))
return AR;
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
const SCEV *IVUsers::getStride(const IVStrideUse &IU, const Loop *L) const {
if (const SCEVAddRecExpr *AR = findAddRecForLoop(getExpr(IU), L))
return AR->getStepRecurrence(*SE);
- return 0;
+ return nullptr;
}
void IVStrideUse::transformToPostInc(const Loop *L) {
diff --git a/contrib/llvm/lib/Analysis/InstCount.cpp b/contrib/llvm/lib/Analysis/InstCount.cpp
index 75a49eb..de2b9c0 100644
--- a/contrib/llvm/lib/Analysis/InstCount.cpp
+++ b/contrib/llvm/lib/Analysis/InstCount.cpp
@@ -11,17 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "instcount"
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/Function.h"
-#include "llvm/InstVisitor.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "instcount"
+
STATISTIC(TotalInsts , "Number of instructions (of all types)");
STATISTIC(TotalBlocks, "Number of basic blocks");
STATISTIC(TotalFuncs , "Number of non-external functions");
@@ -47,7 +48,7 @@ namespace {
void visitInstruction(Instruction &I) {
errs() << "Instruction Count does not know about " << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
public:
static char ID; // Pass identification, replacement for typeid
@@ -55,12 +56,12 @@ namespace {
initializeInstCountPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
- virtual void print(raw_ostream &O, const Module *M) const {}
+ void print(raw_ostream &O, const Module *M) const override {}
};
}
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index b867af1..7a820a5 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -17,37 +17,37 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "instsimplify"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+#define DEBUG_TYPE "instsimplify"
+
enum { RecursionLimit = 3 };
STATISTIC(NumExpand, "Number of expansions");
-STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc, "Number of reassociations");
struct Query {
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
- Query(const DataLayout *td, const TargetLibraryInfo *tli,
- const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
+ Query(const DataLayout *DL, const TargetLibraryInfo *tli,
+ const DominatorTree *dt) : DL(DL), TLI(tli), DT(dt) {}
};
static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
@@ -131,7 +131,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Check whether the expression has the form "(A op' B) op C".
if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
@@ -179,79 +179,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
}
- return 0;
-}
-
-/// FactorizeBinOp - Simplify "LHS Opcode RHS" by factorizing out a common term
-/// using the operation OpCodeToExtract. For example, when Opcode is Add and
-/// OpCodeToExtract is Mul then this tries to turn "(A*B)+(A*C)" into "A*(B+C)".
-/// Returns the simplified value, or null if no simplification was performed.
-static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- unsigned OpcToExtract, const Query &Q,
- unsigned MaxRecurse) {
- Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
- // Recursion is always used, so bail out at once if we already hit the limit.
- if (!MaxRecurse--)
- return 0;
-
- BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
- BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
-
- if (!Op0 || Op0->getOpcode() != OpcodeToExtract ||
- !Op1 || Op1->getOpcode() != OpcodeToExtract)
- return 0;
-
- // The expression has the form "(A op' B) op (C op' D)".
- Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
- Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
-
- // Use left distributivity, i.e. "X op' (Y op Z) = (X op' Y) op (X op' Z)".
- // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
- // commutative case, "(A op' B) op (C op' A)"?
- if (A == C || (Instruction::isCommutative(OpcodeToExtract) && A == D)) {
- Value *DD = A == C ? D : C;
- // Form "A op' (B op DD)" if it simplifies completely.
- // Does "B op DD" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, DD, Q, MaxRecurse)) {
- // It does! Return "A op' V" if it simplifies or is already available.
- // If V equals B then "A op' V" is just the LHS. If V equals DD then
- // "A op' V" is just the RHS.
- if (V == B || V == DD) {
- ++NumFactor;
- return V == B ? LHS : RHS;
- }
- // Otherwise return "A op' V" if it simplifies.
- if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, Q, MaxRecurse)) {
- ++NumFactor;
- return W;
- }
- }
- }
-
- // Use right distributivity, i.e. "(X op Y) op' Z = (X op' Z) op (Y op' Z)".
- // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
- // commutative case, "(A op' B) op (B op' D)"?
- if (B == D || (Instruction::isCommutative(OpcodeToExtract) && B == C)) {
- Value *CC = B == D ? C : D;
- // Form "(A op CC) op' B" if it simplifies completely..
- // Does "A op CC" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, CC, Q, MaxRecurse)) {
- // It does! Return "V op' B" if it simplifies or is already available.
- // If V equals A then "V op' B" is just the LHS. If V equals CC then
- // "V op' B" is just the RHS.
- if (V == A || V == CC) {
- ++NumFactor;
- return V == A ? LHS : RHS;
- }
- // Otherwise return "V op' B" if it simplifies.
- if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, Q, MaxRecurse)) {
- ++NumFactor;
- return W;
- }
- }
- }
-
- return 0;
+ return nullptr;
}
/// SimplifyAssociativeBinOp - Generic simplifications for associative binary
@@ -263,7 +191,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
@@ -308,7 +236,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
// The remaining transforms require commutativity as well as associativity.
if (!Instruction::isCommutative(Opcode))
- return 0;
+ return nullptr;
// Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
if (Op0 && Op0->getOpcode() == Opcode) {
@@ -348,7 +276,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
}
}
- return 0;
+ return nullptr;
}
/// ThreadBinOpOverSelect - In the case of a binary operation with a select
@@ -359,7 +287,7 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
SelectInst *SI;
if (isa<SelectInst>(LHS)) {
@@ -420,7 +348,7 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
}
}
- return 0;
+ return nullptr;
}
/// ThreadCmpOverSelect - In the case of a comparison with a select instruction,
@@ -432,7 +360,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Make sure the select is on the LHS.
if (!isa<SelectInst>(LHS)) {
@@ -456,7 +384,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// It didn't simplify. However if "cmp TV, RHS" is equal to the select
// condition then we can replace it with 'true'. Otherwise give up.
if (!isSameCompare(Cond, Pred, TV, RHS))
- return 0;
+ return nullptr;
TCmp = getTrue(Cond->getType());
}
@@ -470,7 +398,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// It didn't simplify. However if "cmp FV, RHS" is equal to the select
// condition then we can replace it with 'false'. Otherwise give up.
if (!isSameCompare(Cond, Pred, FV, RHS))
- return 0;
+ return nullptr;
FCmp = getFalse(Cond->getType());
}
@@ -482,7 +410,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// The remaining cases only make sense if the select condition has the same
// type as the result of the comparison, so bail out if this is not so.
if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
- return 0;
+ return nullptr;
// If the false value simplified to false, then the result of the compare
// is equal to "Cond && TCmp". This also catches the case when the false
// value simplified to false and the true value to true, returning "Cond".
@@ -502,7 +430,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// ThreadBinOpOverPHI - In the case of a binary operation with an operand that
@@ -513,24 +441,24 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
PHINode *PI;
if (isa<PHINode>(LHS)) {
PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
- return 0;
+ return nullptr;
} else {
assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
PI = cast<PHINode>(RHS);
// Bail out if LHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(LHS, PI, Q.DT))
- return 0;
+ return nullptr;
}
// Evaluate the BinOp on the incoming phi values.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
@@ -541,7 +469,7 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
- return 0;
+ return nullptr;
CommonValue = V;
}
@@ -556,7 +484,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Make sure the phi is on the LHS.
if (!isa<PHINode>(LHS)) {
@@ -568,10 +496,10 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
- return 0;
+ return nullptr;
// Evaluate the BinOp on the incoming phi values.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
@@ -580,7 +508,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
- return 0;
+ return nullptr;
CommonValue = V;
}
@@ -595,7 +523,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(), Ops,
- Q.TD, Q.TLI);
+ Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -613,7 +541,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// X + (Y - X) -> Y
// (Y - X) + X -> Y
// Eg: X + -X -> 0
- Value *Y = 0;
+ Value *Y = nullptr;
if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
return Y;
@@ -633,11 +561,6 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
MaxRecurse))
return V;
- // Mul distributes over Add. Try some generic simplifications based on this.
- if (Value *V = FactorizeBinOp(Instruction::Add, Op0, Op1, Instruction::Mul,
- Q, MaxRecurse))
- return V;
-
// Threading Add over selects and phi nodes is pointless, so don't bother.
// Threading over the select in "A + select(cond, B, C)" means evaluating
// "A+B" and "A+C" and seeing if they are equal; but they are equal if and
@@ -647,13 +570,13 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -667,17 +590,17 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
/// folding.
-static Constant *stripAndComputeConstantOffsets(const DataLayout *TD,
+static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
Value *&V,
bool AllowNonInbounds = false) {
assert(V->getType()->getScalarType()->isPointerTy());
// Without DataLayout, just be conservative for now. Theoretically, more could
// be done in this case.
- if (!TD)
+ if (!DL)
return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0);
- Type *IntPtrTy = TD->getIntPtrType(V->getType())->getScalarType();
+ Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType();
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
// Even though we don't look through PHI nodes, we could be called on an
@@ -687,7 +610,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout *TD,
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if ((!AllowNonInbounds && !GEP->isInBounds()) ||
- !GEP->accumulateConstantOffset(*TD, Offset))
+ !GEP->accumulateConstantOffset(*DL, Offset))
break;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
@@ -712,15 +635,15 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout *TD,
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const DataLayout *TD,
+static Constant *computePointerDifference(const DataLayout *DL,
Value *LHS, Value *RHS) {
- Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
- Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
+ Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
+ Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
// If LHS and RHS are not related via constant offsets to the same base
// value, there is nothing we can do here.
if (LHS != RHS)
- return 0;
+ return nullptr;
// Otherwise, the difference of LHS - RHS can be computed as:
// LHS - RHS
@@ -737,7 +660,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// X - undef -> undef
@@ -753,16 +676,9 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
if (Op0 == Op1)
return Constant::getNullValue(Op0->getType());
- // (X*2) - X -> X
- // (X<<1) - X -> X
- Value *X = 0;
- if (match(Op0, m_Mul(m_Specific(Op1), m_ConstantInt<2>())) ||
- match(Op0, m_Shl(m_Specific(Op1), m_One())))
- return Op1;
-
// (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
// For example, (X + Y) - Y -> X; (Y + X) - Y -> X
- Value *Y = 0, *Z = Op1;
+ Value *X = nullptr, *Y = nullptr, *Z = Op1;
if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
// See if "V === Y - Z" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
@@ -831,14 +747,9 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
if (match(Op0, m_PtrToInt(m_Value(X))) &&
match(Op1, m_PtrToInt(m_Value(Y))))
- if (Constant *Result = computePointerDifference(Q.TD, X, Y))
+ if (Constant *Result = computePointerDifference(Q.DL, X, Y))
return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
- // Mul distributes over Sub. Try some generic simplifications based on this.
- if (Value *V = FactorizeBinOp(Instruction::Sub, Op0, Op1, Instruction::Mul,
- Q, MaxRecurse))
- return V;
-
// i1 sub -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
@@ -853,13 +764,13 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -871,7 +782,7 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FAdd, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -890,7 +801,7 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
// where nnan and ninf have to occur at least once somewhere in this
// expression
- Value *SubOp = 0;
+ Value *SubOp = nullptr;
if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
SubOp = Op1;
else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
@@ -902,7 +813,7 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
return Constant::getNullValue(Op0->getType());
}
- return 0;
+ return nullptr;
}
/// Given operands for an FSub, see if we can fold the result. If not, this
@@ -913,7 +824,7 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FSub, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
}
@@ -939,7 +850,7 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
- return 0;
+ return nullptr;
}
/// Given the operands for an FMul, see if we can fold the result
@@ -951,7 +862,7 @@ static Value *SimplifyFMulInst(Value *Op0, Value *Op1,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FMul, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -966,7 +877,7 @@ static Value *SimplifyFMulInst(Value *Op0, Value *Op1,
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
return Op1;
- return 0;
+ return nullptr;
}
/// SimplifyMulInst - Given operands for a Mul, see if we can
@@ -977,7 +888,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -997,7 +908,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
return Op0;
// (X / Y) * Y -> X if the division is exact.
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
return X;
@@ -1031,33 +942,33 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFAddInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFAddInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);
}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFSubInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFSubInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1,
FastMathFlags FMF,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFMulInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFMulInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyMulInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
@@ -1067,7 +978,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
@@ -1098,7 +1009,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
return ConstantInt::get(Op0->getType(), 1);
// (X * Y) / Y -> X if the multiplication does not overflow.
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
@@ -1129,7 +1040,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifySDivInst - Given operands for an SDiv, see if we can
@@ -1139,13 +1050,13 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifySDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
@@ -1155,13 +1066,13 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyUDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
@@ -1174,13 +1085,13 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
if (match(Op1, m_Undef()))
return Op1;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyRem - Given operands for an SRem or URem, see if we can
@@ -1190,7 +1101,7 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
@@ -1234,7 +1145,7 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifySRemInst - Given operands for an SRem, see if we can
@@ -1244,13 +1155,13 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifySRemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyURemInst - Given operands for a URem, see if we can
@@ -1260,13 +1171,13 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyURemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
@@ -1279,13 +1190,40 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
if (match(Op1, m_Undef()))
return Op1;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFRemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
+}
+
+/// isUndefShift - Returns true if a shift by \c Amount always yields undef.
+static bool isUndefShift(Value *Amount) {
+ Constant *C = dyn_cast<Constant>(Amount);
+ if (!C)
+ return false;
+
+ // X shift by undef -> undef because it may shift by the bitwidth.
+ if (isa<UndefValue>(C))
+ return true;
+
+ // Shifting by the bitwidth or more is undefined.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ if (CI->getValue().getLimitedValue() >=
+ CI->getType()->getScalarSizeInBits())
+ return true;
+
+ // If all lanes of a vector shift are undefined the whole shift is.
+ if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
+ for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
+ if (!isUndefShift(C->getAggregateElement(I)))
+ return false;
+ return true;
+ }
+
+ return false;
}
/// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can
@@ -1295,7 +1233,7 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
@@ -1307,15 +1245,9 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
if (match(Op1, m_Zero()))
return Op0;
- // X shift by undef -> undef because it may shift by the bitwidth.
- if (match(Op1, m_Undef()))
- return Op1;
-
- // Shifting by the bitwidth or more is undefined.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1))
- if (CI->getValue().getLimitedValue() >=
- Op0->getType()->getScalarSizeInBits())
- return UndefValue::get(Op0->getType());
+ // Fold undefined shifts.
+ if (isUndefShift(Op1))
+ return UndefValue::get(Op0->getType());
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
@@ -1329,7 +1261,7 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifyShlInst - Given operands for an Shl, see if we can
@@ -1347,13 +1279,13 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *X;
if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -1378,14 +1310,14 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap())
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
+ return ::SimplifyLShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -1414,14 +1346,19 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
return X;
- return 0;
+ // Arithmetic shifting an all-sign-bit value is a no-op.
+ unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL);
+ if (NumSignBits == Op0->getType()->getScalarSizeInBits())
+ return Op0;
+
+ return nullptr;
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
+ return ::SimplifyAShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -1433,7 +1370,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -1462,7 +1399,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
return Constant::getNullValue(Op0->getType());
// (A | ?) & A = A
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
@@ -1496,11 +1433,6 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
Q, MaxRecurse))
return V;
- // Or distributes over And. Try some generic simplifications based on this.
- if (Value *V = FactorizeBinOp(Instruction::And, Op0, Op1, Instruction::Or,
- Q, MaxRecurse))
- return V;
-
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
@@ -1515,13 +1447,13 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyAndInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyOrInst - Given operands for an Or, see if we can
@@ -1532,7 +1464,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -1561,7 +1493,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
return Constant::getAllOnesValue(Op0->getType());
// (A & ?) | A = A
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
@@ -1591,11 +1523,6 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
- // And distributes over Or. Try some generic simplifications based on this.
- if (Value *V = FactorizeBinOp(Instruction::Or, Op0, Op1, Instruction::And,
- Q, MaxRecurse))
- return V;
-
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
@@ -1603,19 +1530,51 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
+ // (A & C)|(B & D)
+ Value *C = nullptr, *D = nullptr;
+ if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
+ match(Op1, m_And(m_Value(B), m_Value(D)))) {
+ ConstantInt *C1 = dyn_cast<ConstantInt>(C);
+ ConstantInt *C2 = dyn_cast<ConstantInt>(D);
+ if (C1 && C2 && (C1->getValue() == ~C2->getValue())) {
+ // (A & C1)|(B & C2)
+ // If we have: ((V + N) & C1) | (V & C2)
+ // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ // replace with V+N.
+ Value *V1, *V2;
+ if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+
+ match(A, m_Add(m_Value(V1), m_Value(V2)))) {
+ // Add commutes, try both ways.
+ if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
+ return A;
+ if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
+ return A;
+ }
+ // Or commutes, try both ways.
+ if ((C1->getValue() & (C1->getValue() + 1)) == 0 &&
+ match(B, m_Add(m_Value(V1), m_Value(V2)))) {
+ // Add commutes, try both ways.
+ if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
+ return B;
+ if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
+ return B;
+ }
+ }
+ }
+
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyOrInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyXorInst - Given operands for a Xor, see if we can
@@ -1626,7 +1585,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -1655,11 +1614,6 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
- // And distributes over Xor. Try some generic simplifications based on this.
- if (Value *V = FactorizeBinOp(Instruction::Xor, Op0, Op1, Instruction::And,
- Q, MaxRecurse))
- return V;
-
// Threading Xor over selects and phi nodes is pointless, so don't bother.
// Threading over the select in "A ^ select(cond, B, C)" means evaluating
// "A^B" and "A^C" and seeing if they are equal; but they are equal if and
@@ -1669,13 +1623,13 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
// "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyXorInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
static Type *GetCompareTy(Value *Op) {
@@ -1689,17 +1643,17 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
if (!SI)
- return 0;
+ return nullptr;
CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
if (!Cmp)
- return 0;
+ return nullptr;
Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
return Cmp;
if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
LHS == CmpRHS && RHS == CmpLHS)
return Cmp;
- return 0;
+ return nullptr;
}
// A significant optimization not implemented here is assuming that alloca
@@ -1730,7 +1684,7 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
// If the C and C++ standards are ever made sufficiently restrictive in this
// area, it may be possible to update LLVM's semantics accordingly and reinstate
// this optimization.
-static Constant *computePointerICmp(const DataLayout *TD,
+static Constant *computePointerICmp(const DataLayout *DL,
const TargetLibraryInfo *TLI,
CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
@@ -1747,7 +1701,7 @@ static Constant *computePointerICmp(const DataLayout *TD,
// We can only fold certain predicates on pointer comparisons.
switch (Pred) {
default:
- return 0;
+ return nullptr;
// Equality comaprisons are easy to fold.
case CmpInst::ICMP_EQ:
@@ -1772,8 +1726,8 @@ static Constant *computePointerICmp(const DataLayout *TD,
// numerous hazards. AliasAnalysis and its utilities rely on special rules
// governing loads and stores which don't apply to icmps. Also, AliasAnalysis
// doesn't need to guarantee pointer inequality when it says NoAlias.
- Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
- Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
+ Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
+ Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
// If LHS and RHS are related via constant offsets to the same base
// value, we can replace it with an icmp which just compares the offsets.
@@ -1817,8 +1771,8 @@ static Constant *computePointerICmp(const DataLayout *TD,
ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
uint64_t LHSSize, RHSSize;
if (LHSOffsetCI && RHSOffsetCI &&
- getObjectSize(LHS, LHSSize, TD, TLI) &&
- getObjectSize(RHS, RHSSize, TD, TLI)) {
+ getObjectSize(LHS, LHSSize, DL, TLI) &&
+ getObjectSize(RHS, RHSSize, DL, TLI)) {
const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
if (!LHSOffsetValue.isNegative() &&
@@ -1844,8 +1798,8 @@ static Constant *computePointerICmp(const DataLayout *TD,
// equality comparisons concerning the result. We avoid walking the whole
// chain again by starting where the last calls to
// stripAndComputeConstantOffsets left off and accumulate the offsets.
- Constant *LHSNoBound = stripAndComputeConstantOffsets(TD, LHS, true);
- Constant *RHSNoBound = stripAndComputeConstantOffsets(TD, RHS, true);
+ Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
+ Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
if (LHS == RHS)
return ConstantExpr::getICmp(Pred,
ConstantExpr::getAdd(LHSOffset, LHSNoBound),
@@ -1853,7 +1807,7 @@ static Constant *computePointerICmp(const DataLayout *TD,
}
// Otherwise, fail.
- return 0;
+ return nullptr;
}
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
@@ -1865,7 +1819,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD, Q.TLI);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
@@ -1929,40 +1883,40 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, Q.TD))
+ if (isKnownNonZero(LHS, Q.DL))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, Q.TD))
+ if (isKnownNonZero(LHS, Q.DL))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getTrue(ITy);
if (LHSKnownNonNegative)
return getFalse(ITy);
break;
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getTrue(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))
return getFalse(ITy);
break;
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getFalse(ITy);
if (LHSKnownNonNegative)
return getTrue(ITy);
break;
case ICmpInst::ICMP_SGT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getFalse(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))
return getTrue(ITy);
break;
}
@@ -1979,7 +1933,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Many binary operators with constant RHS have easy to compute constant
// range. Use them to check whether the comparison is a tautology.
- uint32_t Width = CI->getBitWidth();
+ unsigned Width = CI->getBitWidth();
APInt Lower = APInt(Width, 0);
APInt Upper = APInt(Width, 0);
ConstantInt *CI2;
@@ -1998,20 +1952,47 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
APInt NegOne = APInt::getAllOnesValue(Width);
if (!CI2->isZero())
Upper = NegOne.udiv(CI2->getValue()) + 1;
+ } else if (match(LHS, m_SDiv(m_ConstantInt(CI2), m_Value()))) {
+ if (CI2->isMinSignedValue()) {
+ // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
+ Lower = CI2->getValue();
+ Upper = Lower.lshr(1) + 1;
+ } else {
+ // 'sdiv CI2, x' produces [-|CI2|, |CI2|].
+ Upper = CI2->getValue().abs() + 1;
+ Lower = (-Upper) + 1;
+ }
} else if (match(LHS, m_SDiv(m_Value(), m_ConstantInt(CI2)))) {
- // 'sdiv x, CI2' produces [INT_MIN / CI2, INT_MAX / CI2].
APInt IntMin = APInt::getSignedMinValue(Width);
APInt IntMax = APInt::getSignedMaxValue(Width);
- APInt Val = CI2->getValue().abs();
- if (!Val.isMinValue()) {
+ APInt Val = CI2->getValue();
+ if (Val.isAllOnesValue()) {
+ // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
+ // where CI2 != -1 and CI2 != 0 and CI2 != 1
+ Lower = IntMin + 1;
+ Upper = IntMax + 1;
+ } else if (Val.countLeadingZeros() < Width - 1) {
+ // 'sdiv x, CI2' produces [INT_MIN / CI2, INT_MAX / CI2]
+ // where CI2 != -1 and CI2 != 0 and CI2 != 1
Lower = IntMin.sdiv(Val);
- Upper = IntMax.sdiv(Val) + 1;
+ Upper = IntMax.sdiv(Val);
+ if (Lower.sgt(Upper))
+ std::swap(Lower, Upper);
+ Upper = Upper + 1;
+ assert(Upper != Lower && "Upper part of range has wrapped!");
}
} else if (match(LHS, m_LShr(m_Value(), m_ConstantInt(CI2)))) {
// 'lshr x, CI2' produces [0, UINT_MAX >> CI2].
APInt NegOne = APInt::getAllOnesValue(Width);
if (CI2->getValue().ult(Width))
Upper = NegOne.lshr(CI2->getValue()) + 1;
+ } else if (match(LHS, m_LShr(m_ConstantInt(CI2), m_Value()))) {
+ // 'lshr CI2, x' produces [CI2 >> (Width-1), CI2].
+ unsigned ShiftAmount = Width - 1;
+ if (!CI2->isZero() && cast<BinaryOperator>(LHS)->isExact())
+ ShiftAmount = CI2->getValue().countTrailingZeros();
+ Lower = CI2->getValue().lshr(ShiftAmount);
+ Upper = CI2->getValue() + 1;
} else if (match(LHS, m_AShr(m_Value(), m_ConstantInt(CI2)))) {
// 'ashr x, CI2' produces [INT_MIN >> CI2, INT_MAX >> CI2].
APInt IntMin = APInt::getSignedMinValue(Width);
@@ -2020,6 +2001,19 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Lower = IntMin.ashr(CI2->getValue());
Upper = IntMax.ashr(CI2->getValue()) + 1;
}
+ } else if (match(LHS, m_AShr(m_ConstantInt(CI2), m_Value()))) {
+ unsigned ShiftAmount = Width - 1;
+ if (!CI2->isZero() && cast<BinaryOperator>(LHS)->isExact())
+ ShiftAmount = CI2->getValue().countTrailingZeros();
+ if (CI2->isNegative()) {
+ // 'ashr CI2, x' produces [CI2, CI2 >> (Width-1)]
+ Lower = CI2->getValue();
+ Upper = CI2->getValue().ashr(ShiftAmount) + 1;
+ } else {
+ // 'ashr CI2, x' produces [CI2 >> (Width-1), CI2]
+ Lower = CI2->getValue().ashr(ShiftAmount);
+ Upper = CI2->getValue() + 1;
+ }
} else if (match(LHS, m_Or(m_Value(), m_ConstantInt(CI2)))) {
// 'or x, CI2' produces [CI2, UINT_MAX].
Lower = CI2->getValue();
@@ -2045,8 +2039,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
- if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
- Q.TD->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
+ if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) &&
+ Q.DL->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
@@ -2195,12 +2189,31 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
+ // If a bit is known to be zero for A and known to be one for B,
+ // then A and B cannot be equal.
+ if (ICmpInst::isEquality(Pred)) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ uint32_t BitWidth = CI->getBitWidth();
+ APInt LHSKnownZero(BitWidth, 0);
+ APInt LHSKnownOne(BitWidth, 0);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
+ APInt RHSKnownZero(BitWidth, 0);
+ APInt RHSKnownOne(BitWidth, 0);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
+ if (((LHSKnownOne & RHSKnownZero) != 0) ||
+ ((LHSKnownZero & RHSKnownOne) != 0))
+ return (Pred == ICmpInst::ICMP_EQ)
+ ? ConstantInt::getFalse(CI->getContext())
+ : ConstantInt::getTrue(CI->getContext());
+ }
+ }
+
// Special logic for binary operators.
BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
if (MaxRecurse && (LBO || RBO)) {
// Analyze the case when either LHS or RHS is an add instruction.
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
// LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
if (LBO && LBO->getOpcode() == Instruction::Add) {
@@ -2258,6 +2271,28 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
+ // 0 - (zext X) pred C
+ if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
+ if (RHSC->getValue().isStrictlyPositive()) {
+ if (Pred == ICmpInst::ICMP_SLT)
+ return ConstantInt::getTrue(RHSC->getContext());
+ if (Pred == ICmpInst::ICMP_SGE)
+ return ConstantInt::getFalse(RHSC->getContext());
+ if (Pred == ICmpInst::ICMP_EQ)
+ return ConstantInt::getFalse(RHSC->getContext());
+ if (Pred == ICmpInst::ICMP_NE)
+ return ConstantInt::getTrue(RHSC->getContext());
+ }
+ if (RHSC->getValue().isNonNegative()) {
+ if (Pred == ICmpInst::ICMP_SLE)
+ return ConstantInt::getTrue(RHSC->getContext());
+ if (Pred == ICmpInst::ICMP_SGT)
+ return ConstantInt::getFalse(RHSC->getContext());
+ }
+ }
+ }
+
// icmp pred (urem X, Y), Y
if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
bool KnownNonNegative, KnownNegative;
@@ -2266,7 +2301,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
@@ -2276,7 +2311,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getFalse(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
@@ -2295,7 +2330,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
@@ -2305,7 +2340,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getTrue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
@@ -2548,7 +2583,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Simplify comparisons of related pointers using a powerful, recursive
// GEP-walk when we have target data available..
if (LHS->getType()->isPointerTy())
- if (Constant *C = computePointerICmp(Q.TD, Q.TLI, Pred, LHS, RHS))
+ if (Constant *C = computePointerICmp(Q.DL, Q.TLI, Pred, LHS, RHS))
return C;
if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
@@ -2584,14 +2619,14 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -2604,7 +2639,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD, Q.TLI);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
@@ -2681,14 +2716,14 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -2699,8 +2734,12 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
unsigned MaxRecurse) {
// select true, X, Y -> X
// select false, X, Y -> Y
- if (ConstantInt *CB = dyn_cast<ConstantInt>(CondVal))
- return CB->getZExtValue() ? TrueVal : FalseVal;
+ if (Constant *CB = dyn_cast<Constant>(CondVal)) {
+ if (CB->isAllOnesValue())
+ return TrueVal;
+ if (CB->isNullValue())
+ return FalseVal;
+ }
// select C, X, X -> X
if (TrueVal == FalseVal)
@@ -2716,14 +2755,14 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
return TrueVal;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
+ return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -2731,10 +2770,7 @@ Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
/// fold the result. If not, this returns null.
static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// The type of the GEP pointer operand.
- PointerType *PtrTy = dyn_cast<PointerType>(Ops[0]->getType());
- // The GEP pointer operand is not a pointer, it's a vector of pointers.
- if (!PtrTy)
- return 0;
+ PointerType *PtrTy = cast<PointerType>(Ops[0]->getType()->getScalarType());
// getelementptr P -> P.
if (Ops.size() == 1)
@@ -2744,18 +2780,19 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// Compute the (pointer) type returned by the GEP instruction.
Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, Ops.slice(1));
Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
+ GEPTy = VectorType::get(GEPTy, VT->getNumElements());
return UndefValue::get(GEPTy);
}
if (Ops.size() == 2) {
// getelementptr P, 0 -> P.
- if (ConstantInt *C = dyn_cast<ConstantInt>(Ops[1]))
- if (C->isZero())
- return Ops[0];
+ if (match(Ops[1], m_Zero()))
+ return Ops[0];
// getelementptr P, N -> P if P points to a type of zero size.
- if (Q.TD) {
+ if (Q.DL) {
Type *Ty = PtrTy->getElementType();
- if (Ty->isSized() && Q.TD->getTypeAllocSize(Ty) == 0)
+ if (Ty->isSized() && Q.DL->getTypeAllocSize(Ty) == 0)
return Ops[0];
}
}
@@ -2763,15 +2800,15 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// Check to see if this is constant foldable.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (!isa<Constant>(Ops[i]))
- return 0;
+ return nullptr;
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyGEPInst(Ops, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
@@ -2800,15 +2837,15 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
return Agg;
}
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
+ return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -2816,7 +2853,7 @@ Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
bool HasUndefInput = false;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PN->getIncomingValue(i);
@@ -2828,7 +2865,7 @@ static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
continue;
}
if (CommonValue && Incoming != CommonValue)
- return 0; // Not the same, bail out.
+ return nullptr; // Not the same, bail out.
CommonValue = Incoming;
}
@@ -2841,22 +2878,22 @@ static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
if (HasUndefInput)
- return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : 0;
+ return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
return CommonValue;
}
static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
if (Constant *C = dyn_cast<Constant>(Op))
- return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI);
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyTruncInst(Op, Ty, Query (DL, TLI, DT), RecursionLimit);
}
//=== Helper functions for higher up the class hierarchy.
@@ -2901,7 +2938,7 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Constant *COps[] = {CLHS, CRHS};
- return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.TD,
+ return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.DL,
Q.TLI);
}
@@ -2922,14 +2959,14 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyBinOp(Opcode, LHS, RHS, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
@@ -2942,9 +2979,9 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
RecursionLimit);
}
@@ -2969,7 +3006,7 @@ static Value *SimplifyIntrinsic(Intrinsic::ID IID, IterTy ArgBegin, IterTy ArgEn
const Query &Q, unsigned MaxRecurse) {
// Perform idempotent optimizations
if (!IsIdempotent(IID))
- return 0;
+ return nullptr;
// Unary Ops
if (std::distance(ArgBegin, ArgEnd) == 1)
@@ -2977,7 +3014,7 @@ static Value *SimplifyIntrinsic(Intrinsic::ID IID, IterTy ArgBegin, IterTy ArgEn
if (II->getIntrinsicID() == IID)
return II;
- return 0;
+ return nullptr;
}
template <typename IterTy>
@@ -2994,7 +3031,7 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
Function *F = dyn_cast<Function>(V);
if (!F)
- return 0;
+ return nullptr;
if (unsigned IID = F->getIntrinsicID())
if (Value *Ret =
@@ -3002,14 +3039,14 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
return Ret;
if (!canConstantFoldCallTo(F))
- return 0;
+ return nullptr;
SmallVector<Constant *, 4> ConstantArgs;
ConstantArgs.reserve(ArgEnd - ArgBegin);
for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
Constant *C = dyn_cast<Constant>(*I);
if (!C)
- return 0;
+ return nullptr;
ConstantArgs.push_back(C);
}
@@ -3017,136 +3054,136 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
}
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const DataLayout *TD,
+ User::op_iterator ArgEnd, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(TD, TLI, DT),
+ return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT),
RecursionLimit);
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyCall(V, Args.begin(), Args.end(), Query(TD, TLI, DT),
+ return ::SimplifyCall(V, Args.begin(), Args.end(), Query(DL, TLI, DT),
RecursionLimit);
}
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
switch (I->getOpcode()) {
default:
- Result = ConstantFoldInstruction(I, TD, TLI);
+ Result = ConstantFoldInstruction(I, DL, TLI);
break;
case Instruction::FAdd:
Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), TD, TLI, DT);
+ I->getFastMathFlags(), DL, TLI, DT);
break;
case Instruction::Add:
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::FSub:
Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), TD, TLI, DT);
+ I->getFastMathFlags(), DL, TLI, DT);
break;
case Instruction::Sub:
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::FMul:
Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), TD, TLI, DT);
+ I->getFastMathFlags(), DL, TLI, DT);
break;
case Instruction::Mul:
- Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::SDiv:
- Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::UDiv:
- Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::FDiv:
- Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::SRem:
- Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::URem:
- Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::FRem:
- Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::And:
- Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Or:
- Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Xor:
- Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::ICmp:
Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::FCmp:
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
- I->getOperand(2), TD, TLI, DT);
+ I->getOperand(2), DL, TLI, DT);
break;
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
- Result = SimplifyGEPInst(Ops, TD, TLI, DT);
+ Result = SimplifyGEPInst(Ops, DL, TLI, DT);
break;
}
case Instruction::InsertValue: {
InsertValueInst *IV = cast<InsertValueInst>(I);
Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
IV->getInsertedValueOperand(),
- IV->getIndices(), TD, TLI, DT);
+ IV->getIndices(), DL, TLI, DT);
break;
}
case Instruction::PHI:
- Result = SimplifyPHINode(cast<PHINode>(I), Query (TD, TLI, DT));
+ Result = SimplifyPHINode(cast<PHINode>(I), Query (DL, TLI, DT));
break;
case Instruction::Call: {
CallSite CS(cast<CallInst>(I));
Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
}
case Instruction::Trunc:
- Result = SimplifyTruncInst(I->getOperand(0), I->getType(), TD, TLI, DT);
+ Result = SimplifyTruncInst(I->getOperand(0), I->getType(), DL, TLI, DT);
break;
}
@@ -3168,7 +3205,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
bool Simplified = false;
@@ -3177,10 +3214,9 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
// If we have an explicit value to collapse to, do that round of the
// simplification loop by hand initially.
if (SimpleV) {
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- if (*UI != I)
- Worklist.insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ if (U != I)
+ Worklist.insert(cast<Instruction>(U));
// Replace the instruction with its simplified value.
I->replaceAllUsesWith(SimpleV);
@@ -3198,7 +3234,7 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
I = Worklist[Idx];
// See if this instruction simplifies.
- SimpleV = SimplifyInstruction(I, TD, TLI, DT);
+ SimpleV = SimplifyInstruction(I, DL, TLI, DT);
if (!SimpleV)
continue;
@@ -3207,9 +3243,8 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
// Stash away all the uses of the old instruction so we can check them for
// recursive simplifications after a RAUW. This is cheaper than checking all
// uses of To on the recursive step in most cases.
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- Worklist.insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ Worklist.insert(cast<Instruction>(U));
// Replace the instruction with its simplified value.
I->replaceAllUsesWith(SimpleV);
@@ -3223,17 +3258,17 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
}
bool llvm::recursivelySimplifyInstruction(Instruction *I,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
+ return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
- return replaceAndRecursivelySimplifyImpl(I, SimpleV, TD, TLI, DT);
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT);
}
diff --git a/contrib/llvm/lib/Analysis/Interval.cpp b/contrib/llvm/lib/Analysis/Interval.cpp
index 26a0322..e3e785f 100644
--- a/contrib/llvm/lib/Analysis/Interval.cpp
+++ b/contrib/llvm/lib/Analysis/Interval.cpp
@@ -14,7 +14,7 @@
#include "llvm/Analysis/Interval.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/IR/CFG.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
diff --git a/contrib/llvm/lib/Analysis/IntervalPartition.cpp b/contrib/llvm/lib/Analysis/IntervalPartition.cpp
index 2e259b1..a0583e8 100644
--- a/contrib/llvm/lib/Analysis/IntervalPartition.cpp
+++ b/contrib/llvm/lib/Analysis/IntervalPartition.cpp
@@ -29,7 +29,7 @@ void IntervalPartition::releaseMemory() {
delete Intervals[i];
IntervalMap.clear();
Intervals.clear();
- RootInterval = 0;
+ RootInterval = nullptr;
}
void IntervalPartition::print(raw_ostream &O, const Module*) const {
diff --git a/contrib/llvm/lib/Analysis/JumpInstrTableInfo.cpp b/contrib/llvm/lib/Analysis/JumpInstrTableInfo.cpp
new file mode 100644
index 0000000..b5b4265
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/JumpInstrTableInfo.cpp
@@ -0,0 +1,40 @@
+//===-- JumpInstrTableInfo.cpp: Info for Jump-Instruction Tables ----------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Information about jump-instruction tables that have been created by
+/// JumpInstrTables pass.
+///
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jiti"
+
+#include "llvm/Analysis/JumpInstrTableInfo.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+
+using namespace llvm;
+
+INITIALIZE_PASS(JumpInstrTableInfo, "jump-instr-table-info",
+ "Jump-Instruction Table Info", true, true)
+char JumpInstrTableInfo::ID = 0;
+
+ImmutablePass *llvm::createJumpInstrTableInfoPass() {
+ return new JumpInstrTableInfo();
+}
+
+JumpInstrTableInfo::JumpInstrTableInfo() : ImmutablePass(ID), Tables() {
+ initializeJumpInstrTableInfoPass(*PassRegistry::getPassRegistry());
+}
+
+JumpInstrTableInfo::~JumpInstrTableInfo() {}
+
+void JumpInstrTableInfo::insertEntry(FunctionType *TableFunTy, Function *Target,
+ Function *Jump) {
+ Tables[TableFunTy].push_back(JumpPair(Target, Jump));
+}
diff --git a/contrib/llvm/lib/Analysis/LazyCallGraph.cpp b/contrib/llvm/lib/Analysis/LazyCallGraph.cpp
new file mode 100644
index 0000000..e073616
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/LazyCallGraph.cpp
@@ -0,0 +1,728 @@
+//===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "lcg"
+
+static void findCallees(
+ SmallVectorImpl<Constant *> &Worklist, SmallPtrSetImpl<Constant *> &Visited,
+ SmallVectorImpl<PointerUnion<Function *, LazyCallGraph::Node *>> &Callees,
+ DenseMap<Function *, size_t> &CalleeIndexMap) {
+ while (!Worklist.empty()) {
+ Constant *C = Worklist.pop_back_val();
+
+ if (Function *F = dyn_cast<Function>(C)) {
+ // Note that we consider *any* function with a definition to be a viable
+ // edge. Even if the function's definition is subject to replacement by
+ // some other module (say, a weak definition) there may still be
+ // optimizations which essentially speculate based on the definition and
+ // a way to check that the specific definition is in fact the one being
+ // used. For example, this could be done by moving the weak definition to
+ // a strong (internal) definition and making the weak definition be an
+ // alias. Then a test of the address of the weak function against the new
+ // strong definition's address would be an effective way to determine the
+ // safety of optimizing a direct call edge.
+ if (!F->isDeclaration() &&
+ CalleeIndexMap.insert(std::make_pair(F, Callees.size())).second) {
+ DEBUG(dbgs() << " Added callable function: " << F->getName()
+ << "\n");
+ Callees.push_back(F);
+ }
+ continue;
+ }
+
+ for (Value *Op : C->operand_values())
+ if (Visited.insert(cast<Constant>(Op)))
+ Worklist.push_back(cast<Constant>(Op));
+ }
+}
+
+LazyCallGraph::Node::Node(LazyCallGraph &G, Function &F)
+ : G(&G), F(F), DFSNumber(0), LowLink(0) {
+ DEBUG(dbgs() << " Adding functions called by '" << F.getName()
+ << "' to the graph.\n");
+
+ SmallVector<Constant *, 16> Worklist;
+ SmallPtrSet<Constant *, 16> Visited;
+ // Find all the potential callees in this function. First walk the
+ // instructions and add every operand which is a constant to the worklist.
+ for (BasicBlock &BB : F)
+ for (Instruction &I : BB)
+ for (Value *Op : I.operand_values())
+ if (Constant *C = dyn_cast<Constant>(Op))
+ if (Visited.insert(C))
+ Worklist.push_back(C);
+
+ // We've collected all the constant (and thus potentially function or
+ // function containing) operands to all of the instructions in the function.
+ // Process them (recursively) collecting every function found.
+ findCallees(Worklist, Visited, Callees, CalleeIndexMap);
+}
+
+void LazyCallGraph::Node::insertEdgeInternal(Function &Callee) {
+ if (Node *N = G->lookup(Callee))
+ return insertEdgeInternal(*N);
+
+ CalleeIndexMap.insert(std::make_pair(&Callee, Callees.size()));
+ Callees.push_back(&Callee);
+}
+
+void LazyCallGraph::Node::insertEdgeInternal(Node &CalleeN) {
+ CalleeIndexMap.insert(std::make_pair(&CalleeN.getFunction(), Callees.size()));
+ Callees.push_back(&CalleeN);
+}
+
+void LazyCallGraph::Node::removeEdgeInternal(Function &Callee) {
+ auto IndexMapI = CalleeIndexMap.find(&Callee);
+ assert(IndexMapI != CalleeIndexMap.end() &&
+ "Callee not in the callee set for this caller?");
+
+ Callees[IndexMapI->second] = nullptr;
+ CalleeIndexMap.erase(IndexMapI);
+}
+
+LazyCallGraph::LazyCallGraph(Module &M) : NextDFSNumber(0) {
+ DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
+ << "\n");
+ for (Function &F : M)
+ if (!F.isDeclaration() && !F.hasLocalLinkage())
+ if (EntryIndexMap.insert(std::make_pair(&F, EntryNodes.size())).second) {
+ DEBUG(dbgs() << " Adding '" << F.getName()
+ << "' to entry set of the graph.\n");
+ EntryNodes.push_back(&F);
+ }
+
+ // Now add entry nodes for functions reachable via initializers to globals.
+ SmallVector<Constant *, 16> Worklist;
+ SmallPtrSet<Constant *, 16> Visited;
+ for (GlobalVariable &GV : M.globals())
+ if (GV.hasInitializer())
+ if (Visited.insert(GV.getInitializer()))
+ Worklist.push_back(GV.getInitializer());
+
+ DEBUG(dbgs() << " Adding functions referenced by global initializers to the "
+ "entry set.\n");
+ findCallees(Worklist, Visited, EntryNodes, EntryIndexMap);
+
+ for (auto &Entry : EntryNodes) {
+ assert(!Entry.isNull() &&
+ "We can't have removed edges before we finish the constructor!");
+ if (Function *F = Entry.dyn_cast<Function *>())
+ SCCEntryNodes.push_back(F);
+ else
+ SCCEntryNodes.push_back(&Entry.get<Node *>()->getFunction());
+ }
+}
+
+LazyCallGraph::LazyCallGraph(LazyCallGraph &&G)
+ : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
+ EntryNodes(std::move(G.EntryNodes)),
+ EntryIndexMap(std::move(G.EntryIndexMap)), SCCBPA(std::move(G.SCCBPA)),
+ SCCMap(std::move(G.SCCMap)), LeafSCCs(std::move(G.LeafSCCs)),
+ DFSStack(std::move(G.DFSStack)),
+ SCCEntryNodes(std::move(G.SCCEntryNodes)),
+ NextDFSNumber(G.NextDFSNumber) {
+ updateGraphPtrs();
+}
+
+LazyCallGraph &LazyCallGraph::operator=(LazyCallGraph &&G) {
+ BPA = std::move(G.BPA);
+ NodeMap = std::move(G.NodeMap);
+ EntryNodes = std::move(G.EntryNodes);
+ EntryIndexMap = std::move(G.EntryIndexMap);
+ SCCBPA = std::move(G.SCCBPA);
+ SCCMap = std::move(G.SCCMap);
+ LeafSCCs = std::move(G.LeafSCCs);
+ DFSStack = std::move(G.DFSStack);
+ SCCEntryNodes = std::move(G.SCCEntryNodes);
+ NextDFSNumber = G.NextDFSNumber;
+ updateGraphPtrs();
+ return *this;
+}
+
+void LazyCallGraph::SCC::insert(Node &N) {
+ N.DFSNumber = N.LowLink = -1;
+ Nodes.push_back(&N);
+ G->SCCMap[&N] = this;
+}
+
+bool LazyCallGraph::SCC::isDescendantOf(const SCC &C) const {
+ // Walk up the parents of this SCC and verify that we eventually find C.
+ SmallVector<const SCC *, 4> AncestorWorklist;
+ AncestorWorklist.push_back(this);
+ do {
+ const SCC *AncestorC = AncestorWorklist.pop_back_val();
+ if (AncestorC->isChildOf(C))
+ return true;
+ for (const SCC *ParentC : AncestorC->ParentSCCs)
+ AncestorWorklist.push_back(ParentC);
+ } while (!AncestorWorklist.empty());
+
+ return false;
+}
+
+void LazyCallGraph::SCC::insertIntraSCCEdge(Node &CallerN, Node &CalleeN) {
+ // First insert it into the caller.
+ CallerN.insertEdgeInternal(CalleeN);
+
+ assert(G->SCCMap.lookup(&CallerN) == this && "Caller must be in this SCC.");
+ assert(G->SCCMap.lookup(&CalleeN) == this && "Callee must be in this SCC.");
+
+ // Nothing changes about this SCC or any other.
+}
+
+void LazyCallGraph::SCC::insertOutgoingEdge(Node &CallerN, Node &CalleeN) {
+ // First insert it into the caller.
+ CallerN.insertEdgeInternal(CalleeN);
+
+ assert(G->SCCMap.lookup(&CallerN) == this && "Caller must be in this SCC.");
+
+ SCC &CalleeC = *G->SCCMap.lookup(&CalleeN);
+ assert(&CalleeC != this && "Callee must not be in this SCC.");
+ assert(CalleeC.isDescendantOf(*this) &&
+ "Callee must be a descendant of the Caller.");
+
+ // The only change required is to add this SCC to the parent set of the callee.
+ CalleeC.ParentSCCs.insert(this);
+}
+
+SmallVector<LazyCallGraph::SCC *, 1>
+LazyCallGraph::SCC::insertIncomingEdge(Node &CallerN, Node &CalleeN) {
+ // First insert it into the caller.
+ CallerN.insertEdgeInternal(CalleeN);
+
+ assert(G->SCCMap.lookup(&CalleeN) == this && "Callee must be in this SCC.");
+
+ SCC &CallerC = *G->SCCMap.lookup(&CallerN);
+ assert(&CallerC != this && "Caller must not be in this SCC.");
+ assert(CallerC.isDescendantOf(*this) &&
+ "Caller must be a descendant of the Callee.");
+
+ // The algorithm we use for merging SCCs based on the cycle introduced here
+ // is to walk the SCC inverted DAG formed by the parent SCC sets. The inverse
+ // graph has the same cycle properties as the actual DAG of the SCCs, and
+ // when forming SCCs lazily by a DFS, the bottom of the graph won't exist in
+ // many cases which should prune the search space.
+ //
+ // FIXME: We can get this pruning behavior even after the incremental SCC
+ // formation by leaving behind (conservative) DFS numberings in the nodes,
+ // and pruning the search with them. These would need to be cleverly updated
+ // during the removal of intra-SCC edges, but could be preserved
+ // conservatively.
+
+ // The set of SCCs that are connected to the caller, and thus will
+ // participate in the merged connected component.
+ SmallPtrSet<SCC *, 8> ConnectedSCCs;
+ ConnectedSCCs.insert(this);
+ ConnectedSCCs.insert(&CallerC);
+
+ // We build up a DFS stack of the parents chains.
+ SmallVector<std::pair<SCC *, SCC::parent_iterator>, 8> DFSSCCs;
+ SmallPtrSet<SCC *, 8> VisitedSCCs;
+ int ConnectedDepth = -1;
+ SCC *C = this;
+ parent_iterator I = parent_begin(), E = parent_end();
+ for (;;) {
+ while (I != E) {
+ SCC &ParentSCC = *I++;
+
+ // If we have already processed this parent SCC, skip it, and remember
+ // whether it was connected so we don't have to check the rest of the
+ // stack. This also handles when we reach a child of the 'this' SCC (the
+ // callee) which terminates the search.
+ if (ConnectedSCCs.count(&ParentSCC)) {
+ ConnectedDepth = std::max<int>(ConnectedDepth, DFSSCCs.size());
+ continue;
+ }
+ if (VisitedSCCs.count(&ParentSCC))
+ continue;
+
+ // We fully explore the depth-first space, adding nodes to the connected
+ // set only as we pop them off, so "recurse" by rotating to the parent.
+ DFSSCCs.push_back(std::make_pair(C, I));
+ C = &ParentSCC;
+ I = ParentSCC.parent_begin();
+ E = ParentSCC.parent_end();
+ }
+
+ // If we've found a connection anywhere below this point on the stack (and
+ // thus up the parent graph from the caller), the current node needs to be
+ // added to the connected set now that we've processed all of its parents.
+ if ((int)DFSSCCs.size() == ConnectedDepth) {
+ --ConnectedDepth; // We're finished with this connection.
+ ConnectedSCCs.insert(C);
+ } else {
+ // Otherwise remember that its parents don't ever connect.
+ assert(ConnectedDepth < (int)DFSSCCs.size() &&
+ "Cannot have a connected depth greater than the DFS depth!");
+ VisitedSCCs.insert(C);
+ }
+
+ if (DFSSCCs.empty())
+ break; // We've walked all the parents of the caller transitively.
+
+ // Pop off the prior node and position to unwind the depth first recursion.
+ std::tie(C, I) = DFSSCCs.pop_back_val();
+ E = C->parent_end();
+ }
+
+ // Now that we have identified all of the SCCs which need to be merged into
+ // a connected set with the inserted edge, merge all of them into this SCC.
+ // FIXME: This operation currently creates ordering stability problems
+ // because we don't use stably ordered containers for the parent SCCs or the
+ // connected SCCs.
+ unsigned NewNodeBeginIdx = Nodes.size();
+ for (SCC *C : ConnectedSCCs) {
+ if (C == this)
+ continue;
+ for (SCC *ParentC : C->ParentSCCs)
+ if (!ConnectedSCCs.count(ParentC))
+ ParentSCCs.insert(ParentC);
+ C->ParentSCCs.clear();
+
+ for (Node *N : *C) {
+ for (Node &ChildN : *N) {
+ SCC &ChildC = *G->SCCMap.lookup(&ChildN);
+ if (&ChildC != C)
+ ChildC.ParentSCCs.erase(C);
+ }
+ G->SCCMap[N] = this;
+ Nodes.push_back(N);
+ }
+ C->Nodes.clear();
+ }
+ for (auto I = Nodes.begin() + NewNodeBeginIdx, E = Nodes.end(); I != E; ++I)
+ for (Node &ChildN : **I) {
+ SCC &ChildC = *G->SCCMap.lookup(&ChildN);
+ if (&ChildC != this)
+ ChildC.ParentSCCs.insert(this);
+ }
+
+ // We return the list of SCCs which were merged so that callers can
+ // invalidate any data they have associated with those SCCs. Note that these
+ // SCCs are no longer in an interesting state (they are totally empty) but
+ // the pointers will remain stable for the life of the graph itself.
+ return SmallVector<SCC *, 1>(ConnectedSCCs.begin(), ConnectedSCCs.end());
+}
+
+void LazyCallGraph::SCC::removeInterSCCEdge(Node &CallerN, Node &CalleeN) {
+ // First remove it from the node.
+ CallerN.removeEdgeInternal(CalleeN.getFunction());
+
+ assert(G->SCCMap.lookup(&CallerN) == this &&
+ "The caller must be a member of this SCC.");
+
+ SCC &CalleeC = *G->SCCMap.lookup(&CalleeN);
+ assert(&CalleeC != this &&
+ "This API only supports the rmoval of inter-SCC edges.");
+
+ assert(std::find(G->LeafSCCs.begin(), G->LeafSCCs.end(), this) ==
+ G->LeafSCCs.end() &&
+ "Cannot have a leaf SCC caller with a different SCC callee.");
+
+ bool HasOtherCallToCalleeC = false;
+ bool HasOtherCallOutsideSCC = false;
+ for (Node *N : *this) {
+ for (Node &OtherCalleeN : *N) {
+ SCC &OtherCalleeC = *G->SCCMap.lookup(&OtherCalleeN);
+ if (&OtherCalleeC == &CalleeC) {
+ HasOtherCallToCalleeC = true;
+ break;
+ }
+ if (&OtherCalleeC != this)
+ HasOtherCallOutsideSCC = true;
+ }
+ if (HasOtherCallToCalleeC)
+ break;
+ }
+ // Because the SCCs form a DAG, deleting such an edge cannot change the set
+ // of SCCs in the graph. However, it may cut an edge of the SCC DAG, making
+ // the caller no longer a parent of the callee. Walk the other call edges
+ // in the caller to tell.
+ if (!HasOtherCallToCalleeC) {
+ bool Removed = CalleeC.ParentSCCs.erase(this);
+ (void)Removed;
+ assert(Removed &&
+ "Did not find the caller SCC in the callee SCC's parent list!");
+
+ // It may orphan an SCC if it is the last edge reaching it, but that does
+ // not violate any invariants of the graph.
+ if (CalleeC.ParentSCCs.empty())
+ DEBUG(dbgs() << "LCG: Update removing " << CallerN.getFunction().getName()
+ << " -> " << CalleeN.getFunction().getName()
+ << " edge orphaned the callee's SCC!\n");
+ }
+
+ // It may make the Caller SCC a leaf SCC.
+ if (!HasOtherCallOutsideSCC)
+ G->LeafSCCs.push_back(this);
+}
+
+void LazyCallGraph::SCC::internalDFS(
+ SmallVectorImpl<std::pair<Node *, Node::iterator>> &DFSStack,
+ SmallVectorImpl<Node *> &PendingSCCStack, Node *N,
+ SmallVectorImpl<SCC *> &ResultSCCs) {
+ Node::iterator I = N->begin();
+ N->LowLink = N->DFSNumber = 1;
+ int NextDFSNumber = 2;
+ for (;;) {
+ assert(N->DFSNumber != 0 && "We should always assign a DFS number "
+ "before processing a node.");
+
+ // We simulate recursion by popping out of the nested loop and continuing.
+ Node::iterator E = N->end();
+ while (I != E) {
+ Node &ChildN = *I;
+ if (SCC *ChildSCC = G->SCCMap.lookup(&ChildN)) {
+ // Check if we have reached a node in the new (known connected) set of
+ // this SCC. If so, the entire stack is necessarily in that set and we
+ // can re-start.
+ if (ChildSCC == this) {
+ insert(*N);
+ while (!PendingSCCStack.empty())
+ insert(*PendingSCCStack.pop_back_val());
+ while (!DFSStack.empty())
+ insert(*DFSStack.pop_back_val().first);
+ return;
+ }
+
+ // If this child isn't currently in this SCC, no need to process it.
+ // However, we do need to remove this SCC from its SCC's parent set.
+ ChildSCC->ParentSCCs.erase(this);
+ ++I;
+ continue;
+ }
+
+ if (ChildN.DFSNumber == 0) {
+ // Mark that we should start at this child when next this node is the
+ // top of the stack. We don't start at the next child to ensure this
+ // child's lowlink is reflected.
+ DFSStack.push_back(std::make_pair(N, I));
+
+ // Continue, resetting to the child node.
+ ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
+ N = &ChildN;
+ I = ChildN.begin();
+ E = ChildN.end();
+ continue;
+ }
+
+ // Track the lowest link of the children, if any are still in the stack.
+ // Any child not on the stack will have a LowLink of -1.
+ assert(ChildN.LowLink != 0 &&
+ "Low-link must not be zero with a non-zero DFS number.");
+ if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
+ N->LowLink = ChildN.LowLink;
+ ++I;
+ }
+
+ if (N->LowLink == N->DFSNumber) {
+ ResultSCCs.push_back(G->formSCC(N, PendingSCCStack));
+ if (DFSStack.empty())
+ return;
+ } else {
+ // At this point we know that N cannot ever be an SCC root. Its low-link
+ // is not its dfs-number, and we've processed all of its children. It is
+ // just sitting here waiting until some node further down the stack gets
+ // low-link == dfs-number and pops it off as well. Move it to the pending
+ // stack which is pulled into the next SCC to be formed.
+ PendingSCCStack.push_back(N);
+
+ assert(!DFSStack.empty() && "We shouldn't have an empty stack!");
+ }
+
+ N = DFSStack.back().first;
+ I = DFSStack.back().second;
+ DFSStack.pop_back();
+ }
+}
+
+SmallVector<LazyCallGraph::SCC *, 1>
+LazyCallGraph::SCC::removeIntraSCCEdge(Node &CallerN,
+ Node &CalleeN) {
+ // First remove it from the node.
+ CallerN.removeEdgeInternal(CalleeN.getFunction());
+
+ // We return a list of the resulting *new* SCCs in postorder.
+ SmallVector<SCC *, 1> ResultSCCs;
+
+ // Direct recursion doesn't impact the SCC graph at all.
+ if (&CallerN == &CalleeN)
+ return ResultSCCs;
+
+ // The worklist is every node in the original SCC.
+ SmallVector<Node *, 1> Worklist;
+ Worklist.swap(Nodes);
+ for (Node *N : Worklist) {
+ // The nodes formerly in this SCC are no longer in any SCC.
+ N->DFSNumber = 0;
+ N->LowLink = 0;
+ G->SCCMap.erase(N);
+ }
+ assert(Worklist.size() > 1 && "We have to have at least two nodes to have an "
+ "edge between them that is within the SCC.");
+
+ // The callee can already reach every node in this SCC (by definition). It is
+ // the only node we know will stay inside this SCC. Everything which
+ // transitively reaches Callee will also remain in the SCC. To model this we
+ // incrementally add any chain of nodes which reaches something in the new
+ // node set to the new node set. This short circuits one side of the Tarjan's
+ // walk.
+ insert(CalleeN);
+
+ // We're going to do a full mini-Tarjan's walk using a local stack here.
+ SmallVector<std::pair<Node *, Node::iterator>, 4> DFSStack;
+ SmallVector<Node *, 4> PendingSCCStack;
+ do {
+ Node *N = Worklist.pop_back_val();
+ if (N->DFSNumber == 0)
+ internalDFS(DFSStack, PendingSCCStack, N, ResultSCCs);
+
+ assert(DFSStack.empty() && "Didn't flush the entire DFS stack!");
+ assert(PendingSCCStack.empty() && "Didn't flush all pending SCC nodes!");
+ } while (!Worklist.empty());
+
+ // Now we need to reconnect the current SCC to the graph.
+ bool IsLeafSCC = true;
+ for (Node *N : Nodes) {
+ for (Node &ChildN : *N) {
+ SCC &ChildSCC = *G->SCCMap.lookup(&ChildN);
+ if (&ChildSCC == this)
+ continue;
+ ChildSCC.ParentSCCs.insert(this);
+ IsLeafSCC = false;
+ }
+ }
+#ifndef NDEBUG
+ if (!ResultSCCs.empty())
+ assert(!IsLeafSCC && "This SCC cannot be a leaf as we have split out new "
+ "SCCs by removing this edge.");
+ if (!std::any_of(G->LeafSCCs.begin(), G->LeafSCCs.end(),
+ [&](SCC *C) { return C == this; }))
+ assert(!IsLeafSCC && "This SCC cannot be a leaf as it already had child "
+ "SCCs before we removed this edge.");
+#endif
+ // If this SCC stopped being a leaf through this edge removal, remove it from
+ // the leaf SCC list.
+ if (!IsLeafSCC && !ResultSCCs.empty())
+ G->LeafSCCs.erase(std::remove(G->LeafSCCs.begin(), G->LeafSCCs.end(), this),
+ G->LeafSCCs.end());
+
+ // Return the new list of SCCs.
+ return ResultSCCs;
+}
+
+void LazyCallGraph::insertEdge(Node &CallerN, Function &Callee) {
+ assert(SCCMap.empty() && DFSStack.empty() &&
+ "This method cannot be called after SCCs have been formed!");
+
+ return CallerN.insertEdgeInternal(Callee);
+}
+
+void LazyCallGraph::removeEdge(Node &CallerN, Function &Callee) {
+ assert(SCCMap.empty() && DFSStack.empty() &&
+ "This method cannot be called after SCCs have been formed!");
+
+ return CallerN.removeEdgeInternal(Callee);
+}
+
+LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
+ return *new (MappedN = BPA.Allocate()) Node(*this, F);
+}
+
+void LazyCallGraph::updateGraphPtrs() {
+ // Process all nodes updating the graph pointers.
+ {
+ SmallVector<Node *, 16> Worklist;
+ for (auto &Entry : EntryNodes)
+ if (Node *EntryN = Entry.dyn_cast<Node *>())
+ Worklist.push_back(EntryN);
+
+ while (!Worklist.empty()) {
+ Node *N = Worklist.pop_back_val();
+ N->G = this;
+ for (auto &Callee : N->Callees)
+ if (!Callee.isNull())
+ if (Node *CalleeN = Callee.dyn_cast<Node *>())
+ Worklist.push_back(CalleeN);
+ }
+ }
+
+ // Process all SCCs updating the graph pointers.
+ {
+ SmallVector<SCC *, 16> Worklist(LeafSCCs.begin(), LeafSCCs.end());
+
+ while (!Worklist.empty()) {
+ SCC *C = Worklist.pop_back_val();
+ C->G = this;
+ Worklist.insert(Worklist.end(), C->ParentSCCs.begin(),
+ C->ParentSCCs.end());
+ }
+ }
+}
+
+LazyCallGraph::SCC *LazyCallGraph::formSCC(Node *RootN,
+ SmallVectorImpl<Node *> &NodeStack) {
+ // The tail of the stack is the new SCC. Allocate the SCC and pop the stack
+ // into it.
+ SCC *NewSCC = new (SCCBPA.Allocate()) SCC(*this);
+
+ while (!NodeStack.empty() && NodeStack.back()->DFSNumber > RootN->DFSNumber) {
+ assert(NodeStack.back()->LowLink >= RootN->LowLink &&
+ "We cannot have a low link in an SCC lower than its root on the "
+ "stack!");
+ NewSCC->insert(*NodeStack.pop_back_val());
+ }
+ NewSCC->insert(*RootN);
+
+ // A final pass over all edges in the SCC (this remains linear as we only
+ // do this once when we build the SCC) to connect it to the parent sets of
+ // its children.
+ bool IsLeafSCC = true;
+ for (Node *SCCN : NewSCC->Nodes)
+ for (Node &SCCChildN : *SCCN) {
+ SCC &ChildSCC = *SCCMap.lookup(&SCCChildN);
+ if (&ChildSCC == NewSCC)
+ continue;
+ ChildSCC.ParentSCCs.insert(NewSCC);
+ IsLeafSCC = false;
+ }
+
+ // For the SCCs where we fine no child SCCs, add them to the leaf list.
+ if (IsLeafSCC)
+ LeafSCCs.push_back(NewSCC);
+
+ return NewSCC;
+}
+
+LazyCallGraph::SCC *LazyCallGraph::getNextSCCInPostOrder() {
+ Node *N;
+ Node::iterator I;
+ if (!DFSStack.empty()) {
+ N = DFSStack.back().first;
+ I = DFSStack.back().second;
+ DFSStack.pop_back();
+ } else {
+ // If we've handled all candidate entry nodes to the SCC forest, we're done.
+ do {
+ if (SCCEntryNodes.empty())
+ return nullptr;
+
+ N = &get(*SCCEntryNodes.pop_back_val());
+ } while (N->DFSNumber != 0);
+ I = N->begin();
+ N->LowLink = N->DFSNumber = 1;
+ NextDFSNumber = 2;
+ }
+
+ for (;;) {
+ assert(N->DFSNumber != 0 && "We should always assign a DFS number "
+ "before placing a node onto the stack.");
+
+ Node::iterator E = N->end();
+ while (I != E) {
+ Node &ChildN = *I;
+ if (ChildN.DFSNumber == 0) {
+ // Mark that we should start at this child when next this node is the
+ // top of the stack. We don't start at the next child to ensure this
+ // child's lowlink is reflected.
+ DFSStack.push_back(std::make_pair(N, N->begin()));
+
+ // Recurse onto this node via a tail call.
+ assert(!SCCMap.count(&ChildN) &&
+ "Found a node with 0 DFS number but already in an SCC!");
+ ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
+ N = &ChildN;
+ I = ChildN.begin();
+ E = ChildN.end();
+ continue;
+ }
+
+ // Track the lowest link of the children, if any are still in the stack.
+ assert(ChildN.LowLink != 0 &&
+ "Low-link must not be zero with a non-zero DFS number.");
+ if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
+ N->LowLink = ChildN.LowLink;
+ ++I;
+ }
+
+ if (N->LowLink == N->DFSNumber)
+ // Form the new SCC out of the top of the DFS stack.
+ return formSCC(N, PendingSCCStack);
+
+ // At this point we know that N cannot ever be an SCC root. Its low-link
+ // is not its dfs-number, and we've processed all of its children. It is
+ // just sitting here waiting until some node further down the stack gets
+ // low-link == dfs-number and pops it off as well. Move it to the pending
+ // stack which is pulled into the next SCC to be formed.
+ PendingSCCStack.push_back(N);
+
+ assert(!DFSStack.empty() && "We never found a viable root!");
+ N = DFSStack.back().first;
+ I = DFSStack.back().second;
+ DFSStack.pop_back();
+ }
+}
+
+char LazyCallGraphAnalysis::PassID;
+
+LazyCallGraphPrinterPass::LazyCallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+static void printNodes(raw_ostream &OS, LazyCallGraph::Node &N,
+ SmallPtrSetImpl<LazyCallGraph::Node *> &Printed) {
+ // Recurse depth first through the nodes.
+ for (LazyCallGraph::Node &ChildN : N)
+ if (Printed.insert(&ChildN))
+ printNodes(OS, ChildN, Printed);
+
+ OS << " Call edges in function: " << N.getFunction().getName() << "\n";
+ for (LazyCallGraph::iterator I = N.begin(), E = N.end(); I != E; ++I)
+ OS << " -> " << I->getFunction().getName() << "\n";
+
+ OS << "\n";
+}
+
+static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &SCC) {
+ ptrdiff_t SCCSize = std::distance(SCC.begin(), SCC.end());
+ OS << " SCC with " << SCCSize << " functions:\n";
+
+ for (LazyCallGraph::Node *N : SCC)
+ OS << " " << N->getFunction().getName() << "\n";
+
+ OS << "\n";
+}
+
+PreservedAnalyses LazyCallGraphPrinterPass::run(Module *M,
+ ModuleAnalysisManager *AM) {
+ LazyCallGraph &G = AM->getResult<LazyCallGraphAnalysis>(M);
+
+ OS << "Printing the call graph for module: " << M->getModuleIdentifier()
+ << "\n\n";
+
+ SmallPtrSet<LazyCallGraph::Node *, 16> Printed;
+ for (LazyCallGraph::Node &N : G)
+ if (Printed.insert(&N))
+ printNodes(OS, N, Printed);
+
+ for (LazyCallGraph::SCC &SCC : G.postorder_sccs())
+ printSCC(OS, SCC);
+
+ return PreservedAnalyses::all();
+
+}
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index b6970af..9f919f7 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -12,21 +12,20 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "lazy-value-info"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/ConstantRange.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/PatternMatch.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include <map>
@@ -34,6 +33,8 @@
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "lazy-value-info"
+
char LazyValueInfo::ID = 0;
INITIALIZE_PASS_BEGIN(LazyValueInfo, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
@@ -82,7 +83,7 @@ class LVILatticeVal {
ConstantRange Range;
public:
- LVILatticeVal() : Tag(undefined), Val(0), Range(1, true) {}
+ LVILatticeVal() : Tag(undefined), Val(nullptr), Range(1, true) {}
static LVILatticeVal get(Constant *C) {
LVILatticeVal Res;
@@ -302,9 +303,9 @@ namespace {
LVIValueHandle(Value *V, LazyValueInfoCache *P)
: CallbackVH(V), Parent(P) { }
-
- void deleted();
- void allUsesReplacedWith(Value *V) {
+
+ void deleted() override;
+ void allUsesReplacedWith(Value *V) override {
deleted();
}
};
@@ -516,7 +517,7 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
BBLV.markOverdefined();
Instruction *BBI = dyn_cast<Instruction>(Val);
- if (BBI == 0 || BBI->getParent() != BB) {
+ if (!BBI || BBI->getParent() != BB) {
return ODCacheUpdater.markResult(solveBlockValueNonLocal(BBLV, Val, BB));
}
@@ -595,7 +596,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
Value *UnderlyingVal = GetUnderlyingObject(Val);
// If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
- if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) {
+ if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, nullptr, 1)) {
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
BI != BE; ++BI) {
if (InstructionDereferencesPointer(BI, UnderlyingVal)) {
@@ -813,7 +814,7 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
// Recognize the range checking idiom that InstCombine produces.
// (X-C1) u< C2 --> [C1, C1+C2)
- ConstantInt *NegOffset = 0;
+ ConstantInt *NegOffset = nullptr;
if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
match(ICI->getOperand(0), m_Add(m_Specific(Val),
m_ConstantInt(NegOffset)));
@@ -1013,7 +1014,8 @@ bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl)
getCache(PImpl).clear();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy.
@@ -1029,7 +1031,7 @@ void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
delete &getCache(PImpl);
- PImpl = 0;
+ PImpl = nullptr;
}
}
@@ -1043,7 +1045,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB) {
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
- return 0;
+ return nullptr;
}
/// getConstantOnEdge - Determine whether the specified value is known to be a
@@ -1059,7 +1061,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
- return 0;
+ return nullptr;
}
/// getPredicateOnEdge - Determine whether the specified value comparison
@@ -1071,9 +1073,9 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
LVILatticeVal Result = getCache(PImpl).getValueOnEdge(V, FromBB, ToBB);
// If we know the value is a constant, evaluate the conditional.
- Constant *Res = 0;
+ Constant *Res = nullptr;
if (Result.isConstant()) {
- Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD,
+ Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
TLI);
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? False : True;
@@ -1115,14 +1117,14 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
if (Pred == ICmpInst::ICMP_EQ) {
// !C1 == C -> false iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD,
+ Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return False;
} else if (Pred == ICmpInst::ICMP_NE) {
// !C1 != C -> true iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD,
+ Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return True;
diff --git a/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
index fefa516..016f8c5 100644
--- a/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -54,7 +54,7 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
// if we have detailed info and if 'P' is any of the locations we know
// about.
const LibCallFunctionInfo::LocationMRInfo *Details = FI->LocationDetails;
- if (Details == 0)
+ if (Details == nullptr)
return MRInfo;
// If the details array is of the 'DoesNot' kind, we only know something if
diff --git a/contrib/llvm/lib/Analysis/LibCallSemantics.cpp b/contrib/llvm/lib/Analysis/LibCallSemantics.cpp
index 0592ccb..7d4e254 100644
--- a/contrib/llvm/lib/Analysis/LibCallSemantics.cpp
+++ b/contrib/llvm/lib/Analysis/LibCallSemantics.cpp
@@ -46,11 +46,11 @@ LibCallInfo::getFunctionInfo(const Function *F) const {
/// If this is the first time we are querying for this info, lazily construct
/// the StringMap to index it.
- if (Map == 0) {
+ if (!Map) {
Impl = Map = new StringMap<const LibCallFunctionInfo*>();
const LibCallFunctionInfo *Array = getFunctionInfoArray();
- if (Array == 0) return 0;
+ if (!Array) return nullptr;
// We now have the array of entries. Populate the StringMap.
for (unsigned i = 0; Array[i].Name; ++i)
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index ec17f47..b14f329 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -16,7 +16,7 @@
// those aren't comprehensive either. Second, many conditions cannot be
// checked statically. This pass does no dynamic instrumentation, so it
// can't check for all possible problems.
-//
+//
// Another limitation is that it assumes all code will be executed. A store
// through a null pointer in a basic block which is never reached is harmless,
// but this pass will warn about it anyway. This is the main reason why most
@@ -26,31 +26,30 @@
// less obvious. If an optimization pass appears to be introducing a warning,
// it may be that the optimization pass is merely exposing an existing
// condition in the code.
-//
+//
// This code may be run before instcombine. In many cases, instcombine checks
// for the same kinds of things and turns instructions with undefined behavior
// into unreachable (or equivalent). Because of this, this pass makes some
// effort to look through bitcasts and so on.
-//
+//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Lint.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
@@ -103,7 +102,7 @@ namespace {
Module *Mod;
AliasAnalysis *AA;
DominatorTree *DT;
- DataLayout *TD;
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
std::string Messages;
@@ -114,22 +113,22 @@ namespace {
initializeLintPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetLibraryInfo>();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
}
- virtual void print(raw_ostream &O, const Module *M) const {}
+ void print(raw_ostream &O, const Module *M) const override {}
void WriteValue(const Value *V) {
if (!V) return;
if (isa<Instruction>(V)) {
MessagesStr << *V << '\n';
} else {
- WriteAsOperand(MessagesStr, V, true, Mod);
+ V->printAsOperand(MessagesStr, true, Mod);
MessagesStr << '\n';
}
}
@@ -138,8 +137,8 @@ namespace {
// that failed. This provides a nice place to put a breakpoint if you want
// to see why something is not correct.
void CheckFailed(const Twine &Message,
- const Value *V1 = 0, const Value *V2 = 0,
- const Value *V3 = 0, const Value *V4 = 0) {
+ const Value *V1 = nullptr, const Value *V2 = nullptr,
+ const Value *V3 = nullptr, const Value *V4 = nullptr) {
MessagesStr << Message.str() << "\n";
WriteValue(V1);
WriteValue(V2);
@@ -153,7 +152,7 @@ char Lint::ID = 0;
INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
false, true)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
false, true)
@@ -176,8 +175,9 @@ INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
bool Lint::runOnFunction(Function &F) {
Mod = F.getParent();
AA = &getAnalysis<AliasAnalysis>();
- DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
@@ -199,7 +199,7 @@ void Lint::visitCallSite(CallSite CS) {
Value *Callee = CS.getCalledValue();
visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Callee);
+ 0, nullptr, MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
Assert1(CS.getCallingConv() == F->getCallingConv(),
@@ -248,7 +248,7 @@ void Lint::visitCallSite(CallSite CS) {
Type *Ty =
cast<PointerType>(Formal->getType())->getElementType();
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
- TD ? TD->getABITypeAlignment(Ty) : 0,
+ DL ? DL->getABITypeAlignment(Ty) : 0,
Ty, MemRef::Read | MemRef::Write);
}
}
@@ -275,10 +275,10 @@ void Lint::visitCallSite(CallSite CS) {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), 0,
+ MCI->getAlignment(), nullptr,
MemRef::Write);
visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), 0,
+ MCI->getAlignment(), nullptr,
MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
@@ -299,10 +299,10 @@ void Lint::visitCallSite(CallSite CS) {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), 0,
+ MMI->getAlignment(), nullptr,
MemRef::Write);
visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), 0,
+ MMI->getAlignment(), nullptr,
MemRef::Read);
break;
}
@@ -310,7 +310,7 @@ void Lint::visitCallSite(CallSite CS) {
MemSetInst *MSI = cast<MemSetInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
- MSI->getAlignment(), 0,
+ MSI->getAlignment(), nullptr,
MemRef::Write);
break;
}
@@ -321,17 +321,17 @@ void Lint::visitCallSite(CallSite CS) {
&I);
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read | MemRef::Write);
+ 0, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Write);
+ 0, nullptr, MemRef::Write);
visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read);
+ 0, nullptr, MemRef::Read);
break;
case Intrinsic::vaend:
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read | MemRef::Write);
+ 0, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::stackrestore:
@@ -339,7 +339,7 @@ void Lint::visitCallSite(CallSite CS) {
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read | MemRef::Write);
+ 0, nullptr, MemRef::Read | MemRef::Write);
break;
}
}
@@ -415,7 +415,7 @@ void Lint::visitMemoryReference(Instruction &I,
// Only handles memory references that read/write something simple like an
// alloca instruction or a global variable.
int64_t Offset = 0;
- if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, TD)) {
+ if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
// something we can handle and if so extract the size of this base object
// along with its alignment.
@@ -424,21 +424,21 @@ void Lint::visitMemoryReference(Instruction &I,
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
Type *ATy = AI->getAllocatedType();
- if (TD && !AI->isArrayAllocation() && ATy->isSized())
- BaseSize = TD->getTypeAllocSize(ATy);
+ if (DL && !AI->isArrayAllocation() && ATy->isSized())
+ BaseSize = DL->getTypeAllocSize(ATy);
BaseAlign = AI->getAlignment();
- if (TD && BaseAlign == 0 && ATy->isSized())
- BaseAlign = TD->getABITypeAlignment(ATy);
+ if (DL && BaseAlign == 0 && ATy->isSized())
+ BaseAlign = DL->getABITypeAlignment(ATy);
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// If the global may be defined differently in another compilation unit
// then don't warn about funky memory accesses.
if (GV->hasDefinitiveInitializer()) {
Type *GTy = GV->getType()->getElementType();
- if (TD && GTy->isSized())
- BaseSize = TD->getTypeAllocSize(GTy);
+ if (DL && GTy->isSized())
+ BaseSize = DL->getTypeAllocSize(GTy);
BaseAlign = GV->getAlignment();
- if (TD && BaseAlign == 0 && GTy->isSized())
- BaseAlign = TD->getABITypeAlignment(GTy);
+ if (DL && BaseAlign == 0 && GTy->isSized())
+ BaseAlign = DL->getABITypeAlignment(GTy);
}
}
@@ -451,8 +451,8 @@ void Lint::visitMemoryReference(Instruction &I,
// Accesses that say that the memory is more aligned than it is are not
// defined.
- if (TD && Align == 0 && Ty && Ty->isSized())
- Align = TD->getABITypeAlignment(Ty);
+ if (DL && Align == 0 && Ty && Ty->isSized())
+ Align = DL->getABITypeAlignment(Ty);
Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
"Undefined behavior: Memory reference address is misaligned", &I);
}
@@ -504,7 +504,7 @@ void Lint::visitShl(BinaryOperator &I) {
"Undefined result: Shift count out of range", &I);
}
-static bool isZero(Value *V, DataLayout *DL) {
+static bool isZero(Value *V, const DataLayout *DL) {
// Assume undef could be zero.
if (isa<UndefValue>(V))
return true;
@@ -513,7 +513,7 @@ static bool isZero(Value *V, DataLayout *DL) {
if (!VecTy) {
unsigned BitWidth = V->getType()->getIntegerBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, KnownZero, KnownOne, DL);
+ computeKnownBits(V, KnownZero, KnownOne, DL);
return KnownZero.isAllOnesValue();
}
@@ -534,7 +534,7 @@ static bool isZero(Value *V, DataLayout *DL) {
return true;
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(Elem, KnownZero, KnownOne, DL);
+ computeKnownBits(Elem, KnownZero, KnownOne, DL);
if (KnownZero.isAllOnesValue())
return true;
}
@@ -543,22 +543,22 @@ static bool isZero(Value *V, DataLayout *DL) {
}
void Lint::visitSDiv(BinaryOperator &I) {
- Assert1(!isZero(I.getOperand(1), TD),
+ Assert1(!isZero(I.getOperand(1), DL),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitUDiv(BinaryOperator &I) {
- Assert1(!isZero(I.getOperand(1), TD),
+ Assert1(!isZero(I.getOperand(1), DL),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitSRem(BinaryOperator &I) {
- Assert1(!isZero(I.getOperand(1), TD),
+ Assert1(!isZero(I.getOperand(1), DL),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitURem(BinaryOperator &I) {
- Assert1(!isZero(I.getOperand(1), TD),
+ Assert1(!isZero(I.getOperand(1), DL),
"Undefined behavior: Division by zero", &I);
}
@@ -572,13 +572,13 @@ void Lint::visitAllocaInst(AllocaInst &I) {
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0,
- MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0,
- MemRef::Branchee);
+ visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0,
+ nullptr, MemRef::Branchee);
Assert1(I.getNumDestinations() != 0,
"Undefined behavior: indirectbr with no destinations", &I);
@@ -603,7 +603,7 @@ void Lint::visitInsertElementInst(InsertElementInst &I) {
void Lint::visitUnreachableInst(UnreachableInst &I) {
// This isn't undefined behavior, it's merely suspicious.
Assert1(&I == I.getParent()->begin() ||
- prior(BasicBlock::iterator(&I))->mayHaveSideEffects(),
+ std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(),
"Unusual: unreachable immediately preceded by instruction without "
"side effects", &I);
}
@@ -632,7 +632,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// TODO: Look through eliminable cast pairs.
// TODO: Look through calls with unique return values.
// TODO: Look through vector insert/extract/shuffle.
- V = OffsetOk ? GetUnderlyingObject(V, TD) : V->stripPointerCasts();
+ V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
BasicBlock::iterator BBI = L;
BasicBlock *BB = L->getParent();
@@ -652,8 +652,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
- if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
- Type::getInt64Ty(V->getContext())))
+ if (CI->isNoopCast(DL))
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
@@ -666,7 +665,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
CE->getOperand(0)->getType(),
CE->getType(),
- TD ? TD->getIntPtrType(V->getContext()) :
+ DL ? DL->getIntPtrType(V->getType()) :
Type::getInt64Ty(V->getContext())))
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
} else if (CE->getOpcode() == Instruction::ExtractValue) {
@@ -679,10 +678,10 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Value *W = SimplifyInstruction(Inst, TD, TLI, DT))
+ if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT))
return findValueImpl(W, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (Value *W = ConstantFoldConstantExpression(CE, TD, TLI))
+ if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
}
diff --git a/contrib/llvm/lib/Analysis/Loads.cpp b/contrib/llvm/lib/Analysis/Loads.cpp
index 0902a39..005d309 100644
--- a/contrib/llvm/lib/Analysis/Loads.cpp
+++ b/contrib/llvm/lib/Analysis/Loads.cpp
@@ -62,7 +62,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
if (ByteOffset < 0) // out of bounds
return false;
- Type *BaseType = 0;
+ Type *BaseType = nullptr;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
@@ -161,7 +161,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
ScanFrom++;
// Don't scan huge blocks.
- if (MaxInstsToScan-- == 0) return 0;
+ if (MaxInstsToScan-- == 0) return nullptr;
--ScanFrom;
// If this is a load of Ptr, the loaded value is available.
@@ -198,7 +198,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// Otherwise the store that may or may not alias the pointer, bail out.
++ScanFrom;
- return 0;
+ return nullptr;
}
// If this is some other instruction that may clobber Ptr, bail out.
@@ -211,11 +211,11 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// May modify the pointer, bail out.
++ScanFrom;
- return 0;
+ return nullptr;
}
}
// Got to the start of the block, we didn't find it, but are done for this
// block.
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index e369633..46c0eaabe 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -17,15 +17,14 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfoImpl.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include <algorithm>
@@ -47,7 +46,7 @@ VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo),
char LoopInfo::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInfo, "loops", "Natural Loop Information", true, true)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(LoopInfo, "loops", "Natural Loop Information", true, true)
// Loop identifier metadata name.
@@ -142,21 +141,21 @@ bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
PHINode *Loop::getCanonicalInductionVariable() const {
BasicBlock *H = getHeader();
- BasicBlock *Incoming = 0, *Backedge = 0;
+ BasicBlock *Incoming = nullptr, *Backedge = nullptr;
pred_iterator PI = pred_begin(H);
assert(PI != pred_end(H) &&
"Loop must have at least one backedge!");
Backedge = *PI++;
- if (PI == pred_end(H)) return 0; // dead loop
+ if (PI == pred_end(H)) return nullptr; // dead loop
Incoming = *PI++;
- if (PI != pred_end(H)) return 0; // multiple backedges?
+ if (PI != pred_end(H)) return nullptr; // multiple backedges?
if (contains(Incoming)) {
if (contains(Backedge))
- return 0;
+ return nullptr;
std::swap(Incoming, Backedge);
} else if (!contains(Backedge))
- return 0;
+ return nullptr;
// Loop over all of the PHI nodes, looking for a canonical indvar.
for (BasicBlock::iterator I = H->begin(); isa<PHINode>(I); ++I) {
@@ -172,7 +171,7 @@ PHINode *Loop::getCanonicalInductionVariable() const {
if (CI->equalsInt(1))
return PN;
}
- return 0;
+ return nullptr;
}
/// isLCSSAForm - Return true if the Loop is in LCSSA form
@@ -180,12 +179,11 @@ bool Loop::isLCSSAForm(DominatorTree &DT) const {
for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) {
BasicBlock *BB = *BI;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I)
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
- ++UI) {
- User *U = *UI;
- BasicBlock *UserBB = cast<Instruction>(U)->getParent();
- if (PHINode *P = dyn_cast<PHINode>(U))
- UserBB = P->getIncomingBlock(UI);
+ for (Use &U : I->uses()) {
+ Instruction *UI = cast<Instruction>(U.getUser());
+ BasicBlock *UserBB = UI->getParent();
+ if (PHINode *P = dyn_cast<PHINode>(UI))
+ UserBB = P->getIncomingBlock(U);
// Check the current block, as a fast-path, before checking whether
// the use is anywhere in the loop. Most values are used in the same
@@ -220,12 +218,12 @@ bool Loop::isSafeToClone() const {
return false;
if (const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator()))
- if (II->hasFnAttr(Attribute::NoDuplicate))
+ if (II->cannotDuplicate())
return false;
for (BasicBlock::iterator BI = (*I)->begin(), BE = (*I)->end(); BI != BE; ++BI) {
if (const CallInst *CI = dyn_cast<CallInst>(BI)) {
- if (CI->hasFnAttr(Attribute::NoDuplicate))
+ if (CI->cannotDuplicate())
return false;
}
}
@@ -234,7 +232,7 @@ bool Loop::isSafeToClone() const {
}
MDNode *Loop::getLoopID() const {
- MDNode *LoopID = 0;
+ MDNode *LoopID = nullptr;
if (isLoopSimplifyForm()) {
LoopID = getLoopLatch()->getTerminator()->getMetadata(LoopMDName);
} else {
@@ -243,7 +241,7 @@ MDNode *Loop::getLoopID() const {
BasicBlock *H = getHeader();
for (block_iterator I = block_begin(), IE = block_end(); I != IE; ++I) {
TerminatorInst *TI = (*I)->getTerminator();
- MDNode *MD = 0;
+ MDNode *MD = nullptr;
// Check if this terminator branches to the loop header.
for (unsigned i = 0, ie = TI->getNumSuccessors(); i != ie; ++i) {
@@ -253,17 +251,17 @@ MDNode *Loop::getLoopID() const {
}
}
if (!MD)
- return 0;
+ return nullptr;
if (!LoopID)
LoopID = MD;
else if (MD != LoopID)
- return 0;
+ return nullptr;
}
}
if (!LoopID || LoopID->getNumOperands() == 0 ||
LoopID->getOperand(0) != LoopID)
- return 0;
+ return nullptr;
return LoopID;
}
@@ -404,7 +402,7 @@ BasicBlock *Loop::getUniqueExitBlock() const {
getUniqueExitBlocks(UniqueExitBlocks);
if (UniqueExitBlocks.size() == 1)
return UniqueExitBlocks[0];
- return 0;
+ return nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -528,8 +526,8 @@ void UnloopUpdater::removeBlocksFromAncestors() {
/// nested within unloop.
void UnloopUpdater::updateSubloopParents() {
while (!Unloop->empty()) {
- Loop *Subloop = *llvm::prior(Unloop->end());
- Unloop->removeChildLoop(llvm::prior(Unloop->end()));
+ Loop *Subloop = *std::prev(Unloop->end());
+ Unloop->removeChildLoop(std::prev(Unloop->end()));
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
if (Loop *Parent = SubloopParents[Subloop])
@@ -550,7 +548,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
// is considered uninitialized.
Loop *NearLoop = BBLoop;
- Loop *Subloop = 0;
+ Loop *Subloop = nullptr;
if (NearLoop != Unloop && Unloop->contains(NearLoop)) {
Subloop = NearLoop;
// Find the subloop ancestor that is directly contained within Unloop.
@@ -566,7 +564,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
succ_iterator I = succ_begin(BB), E = succ_end(BB);
if (I == E) {
assert(!Subloop && "subloop blocks must have a successor");
- NearLoop = 0; // unloop blocks may now exit the function.
+ NearLoop = nullptr; // unloop blocks may now exit the function.
}
for (; I != E; ++I) {
if (*I == BB)
@@ -614,7 +612,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
//
bool LoopInfo::runOnFunction(Function &) {
releaseMemory();
- LI.Analyze(getAnalysis<DominatorTree>().getBase());
+ LI.Analyze(getAnalysis<DominatorTreeWrapperPass>().getDomTree());
return false;
}
@@ -639,7 +637,7 @@ void LoopInfo::updateUnloop(Loop *Unloop) {
// Blocks no longer have a parent but are still referenced by Unloop until
// the Unloop object is deleted.
- LI.changeLoopFor(*I, 0);
+ LI.changeLoopFor(*I, nullptr);
}
// Remove the loop from the top-level LoopInfo object.
@@ -653,7 +651,7 @@ void LoopInfo::updateUnloop(Loop *Unloop) {
// Move all of the subloops to the top-level.
while (!Unloop->empty())
- LI.addTopLevelLoop(Unloop->removeChildLoop(llvm::prior(Unloop->end())));
+ LI.addTopLevelLoop(Unloop->removeChildLoop(std::prev(Unloop->end())));
return;
}
@@ -705,7 +703,7 @@ void LoopInfo::verifyAnalysis() const {
void LoopInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
}
void LoopInfo::print(raw_ostream &OS, const Module*) const {
diff --git a/contrib/llvm/lib/Analysis/LoopPass.cpp b/contrib/llvm/lib/Analysis/LoopPass.cpp
index acf2ba6..7bd866e 100644
--- a/contrib/llvm/lib/Analysis/LoopPass.cpp
+++ b/contrib/llvm/lib/Analysis/LoopPass.cpp
@@ -14,11 +14,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Timer.h"
using namespace llvm;
+#define DEBUG_TYPE "loop-pass-manager"
+
namespace {
/// PrintLoopPass - Print a Function corresponding to a Loop.
@@ -33,16 +36,19 @@ public:
PrintLoopPass(const std::string &B, raw_ostream &o)
: LoopPass(ID), Banner(B), Out(o) {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
- bool runOnLoop(Loop *L, LPPassManager &) {
+ bool runOnLoop(Loop *L, LPPassManager &) override {
Out << Banner;
for (Loop::block_iterator b = L->block_begin(), be = L->block_end();
b != be;
++b) {
- (*b)->print(Out);
+ if (*b)
+ (*b)->print(Out);
+ else
+ Out << "Printing <null> block";
}
return false;
}
@@ -61,8 +67,8 @@ LPPassManager::LPPassManager()
: FunctionPass(ID), PMDataManager() {
skipThisLoop = false;
redoThisLoop = false;
- LI = NULL;
- CurrentLoop = NULL;
+ LI = nullptr;
+ CurrentLoop = nullptr;
}
/// Delete loop from the loop queue and loop hierarchy (LoopInfo).
@@ -251,6 +257,8 @@ bool LPPassManager::runOnFunction(Function &F) {
// Then call the regular verifyAnalysis functions.
verifyPreservedAnalysis(P);
+
+ F.getContext().yield();
}
removeNotPreservedAnalysis(P);
@@ -365,3 +373,17 @@ void LoopPass::assignPassManager(PMStack &PMS,
LPPM->add(this);
}
+
+// Containing function has Attribute::OptimizeNone and transformation
+// passes should skip it.
+bool LoopPass::skipOptnoneFunction(const Loop *L) const {
+ const Function *F = L->getHeader()->getParent();
+ if (F && F->hasFnAttribute(Attribute::OptimizeNone)) {
+ // FIXME: Report this to dbgs() only once per function.
+ DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' in function " << F->getName() << "\n");
+ // FIXME: Delete loop from pass manager's queue?
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
index d26aaf1..10da3d5 100644
--- a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
@@ -13,11 +13,10 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -45,19 +44,19 @@ namespace {
initializeMemDepPrinterPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- void print(raw_ostream &OS, const Module * = 0) const;
+ void print(raw_ostream &OS, const Module * = nullptr) const override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredTransitive<AliasAnalysis>();
AU.addRequiredTransitive<MemoryDependenceAnalysis>();
AU.setPreservesAll();
}
- virtual void releaseMemory() {
+ void releaseMemory() override {
Deps.clear();
- F = 0;
+ F = nullptr;
}
private:
@@ -68,7 +67,7 @@ namespace {
return InstTypePair(dep.getInst(), Def);
if (dep.isNonFuncLocal())
return InstTypePair(dep.getInst(), NonFuncLocal);
- assert(dep.isUnknown() && "unexptected dependence type");
+ assert(dep.isUnknown() && "unexpected dependence type");
return InstTypePair(dep.getInst(), Unknown);
}
static InstTypePair getInstTypePair(const Instruction* inst, DepType type) {
@@ -107,7 +106,7 @@ bool MemDepPrinter::runOnFunction(Function &F) {
MemDepResult Res = MDA.getDependency(Inst);
if (!Res.isNonLocal()) {
Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
- static_cast<BasicBlock *>(0)));
+ static_cast<BasicBlock *>(nullptr)));
} else if (CallSite CS = cast<Value>(Inst)) {
const MemoryDependenceAnalysis::NonLocalDepInfo &NLDI =
MDA.getNonLocalCallDependency(CS);
@@ -123,8 +122,8 @@ bool MemDepPrinter::runOnFunction(Function &F) {
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (!LI->isUnordered()) {
// FIXME: Handle atomic/volatile loads.
- Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
- static_cast<BasicBlock *>(0)));
+ Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
+ static_cast<BasicBlock *>(nullptr)));
continue;
}
AliasAnalysis::Location Loc = AA.getLocation(LI);
@@ -132,8 +131,8 @@ bool MemDepPrinter::runOnFunction(Function &F) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (!SI->isUnordered()) {
// FIXME: Handle atomic/volatile stores.
- Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
- static_cast<BasicBlock *>(0)));
+ Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
+ static_cast<BasicBlock *>(nullptr)));
continue;
}
AliasAnalysis::Location Loc = AA.getLocation(SI);
@@ -177,7 +176,7 @@ void MemDepPrinter::print(raw_ostream &OS, const Module *M) const {
OS << DepTypeStr[type];
if (DepBB) {
OS << " in block ";
- WriteAsOperand(OS, DepBB, /*PrintType=*/false, M);
+ DepBB->printAsOperand(OS, /*PrintType=*/false, M);
}
if (DepInst) {
OS << " from: ";
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index 1db0f63..64d339f 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "memory-builtins"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
@@ -30,6 +29,8 @@
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "memory-builtins"
+
enum AllocType {
OpNewLike = 1<<0, // allocates; never returns null
MallocLike = 1<<1 | OpNewLike, // allocates; may return null
@@ -76,14 +77,14 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
CallSite CS(const_cast<Value*>(V));
if (!CS.getInstruction())
- return 0;
+ return nullptr;
if (CS.isNoBuiltin())
- return 0;
+ return nullptr;
Function *Callee = CS.getCalledFunction();
if (!Callee || !Callee->isDeclaration())
- return 0;
+ return nullptr;
return Callee;
}
@@ -94,17 +95,17 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
bool LookThroughBitCast = false) {
// Skip intrinsics
if (isa<IntrinsicInst>(V))
- return 0;
+ return nullptr;
Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee)
- return 0;
+ return nullptr;
// Make sure that the function is available.
StringRef FnName = Callee->getName();
LibFunc::Func TLIFn;
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
- return 0;
+ return nullptr;
unsigned i = 0;
bool found = false;
@@ -115,11 +116,11 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
}
}
if (!found)
- return 0;
+ return nullptr;
const AllocFnsTy *FnData = &AllocationFnData[i];
if ((FnData->AllocTy & AllocTy) != FnData->AllocTy)
- return 0;
+ return nullptr;
// Check function prototype.
int FstParam = FnData->FstParam;
@@ -135,7 +136,7 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
FTy->getParamType(SndParam)->isIntegerTy(32) ||
FTy->getParamType(SndParam)->isIntegerTy(64)))
return FnData;
- return 0;
+ return nullptr;
}
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
@@ -202,19 +203,19 @@ bool llvm::isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
/// ignore InvokeInst here.
const CallInst *llvm::extractMallocCall(const Value *I,
const TargetLibraryInfo *TLI) {
- return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
+ return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : nullptr;
}
static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
- return 0;
+ return nullptr;
// The size of the malloc's result type must be known to determine array size.
Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !DL)
- return 0;
+ return nullptr;
unsigned ElementSize = DL->getTypeAllocSize(T);
if (StructType *ST = dyn_cast<StructType>(T))
@@ -223,12 +224,12 @@ static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
// If malloc call's arg can be determined to be a multiple of ElementSize,
// return the multiple. Otherwise, return NULL.
Value *MallocArg = CI->getArgOperand(0);
- Value *Multiple = 0;
+ Value *Multiple = nullptr;
if (ComputeMultiple(MallocArg, ElementSize, Multiple,
LookThroughSExt))
return Multiple;
- return 0;
+ return nullptr;
}
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
@@ -245,7 +246,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I,
return CI;
// CI is a non-array malloc or we can't figure out that it is an array malloc.
- return 0;
+ return nullptr;
}
/// getMallocType - Returns the PointerType resulting from the malloc call.
@@ -257,12 +258,12 @@ PointerType *llvm::getMallocType(const CallInst *CI,
const TargetLibraryInfo *TLI) {
assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
- PointerType *MallocType = 0;
+ PointerType *MallocType = nullptr;
unsigned NumOfBitCastUses = 0;
// Determine if CallInst has a bitcast use.
- for (Value::const_use_iterator UI = CI->use_begin(), E = CI->use_end();
- UI != E; )
+ for (Value::const_user_iterator UI = CI->user_begin(), E = CI->user_end();
+ UI != E;)
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(*UI++)) {
MallocType = cast<PointerType>(BCI->getDestTy());
NumOfBitCastUses++;
@@ -277,7 +278,7 @@ PointerType *llvm::getMallocType(const CallInst *CI,
return cast<PointerType>(CI->getType());
// Type could not be determined.
- return 0;
+ return nullptr;
}
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
@@ -288,7 +289,7 @@ PointerType *llvm::getMallocType(const CallInst *CI,
Type *llvm::getMallocAllocatedType(const CallInst *CI,
const TargetLibraryInfo *TLI) {
PointerType *PT = getMallocType(CI, TLI);
- return PT ? PT->getElementType() : 0;
+ return PT ? PT->getElementType() : nullptr;
}
/// getMallocArraySize - Returns the array size of a malloc call. If the
@@ -308,7 +309,7 @@ Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *DL,
/// is a calloc call.
const CallInst *llvm::extractCallocCall(const Value *I,
const TargetLibraryInfo *TLI) {
- return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : 0;
+ return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : nullptr;
}
@@ -316,15 +317,15 @@ const CallInst *llvm::extractCallocCall(const Value *I,
const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI || isa<IntrinsicInst>(CI))
- return 0;
+ return nullptr;
Function *Callee = CI->getCalledFunction();
- if (Callee == 0 || !Callee->isDeclaration())
- return 0;
+ if (Callee == nullptr || !Callee->isDeclaration())
+ return nullptr;
StringRef FnName = Callee->getName();
LibFunc::Func TLIFn;
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
- return 0;
+ return nullptr;
unsigned ExpectedNumParams;
if (TLIFn == LibFunc::free ||
@@ -335,18 +336,18 @@ const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
TLIFn == LibFunc::ZdaPvRKSt9nothrow_t) // delete[](void*, nothrow)
ExpectedNumParams = 2;
else
- return 0;
+ return nullptr;
// Check free prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
FunctionType *FTy = Callee->getFunctionType();
if (!FTy->getReturnType()->isVoidTy())
- return 0;
+ return nullptr;
if (FTy->getNumParams() != ExpectedNumParams)
- return 0;
+ return nullptr;
if (FTy->getParamType(0) != Type::getInt8PtrTy(Callee->getContext()))
- return 0;
+ return nullptr;
return CI;
}
@@ -399,12 +400,14 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *DL,
LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), RoundToAlign(RoundToAlign) {
- IntegerType *IntTy = DL->getIntPtrType(Context);
- IntTyBits = IntTy->getBitWidth();
- Zero = APInt::getNullValue(IntTyBits);
+ // Pointer size must be rechecked for each object visited since it could have
+ // a different address space.
}
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
+ IntTyBits = DL->getPointerTypeSizeInBits(V->getType());
+ Zero = APInt::getNullValue(IntTyBits);
+
V = V->stripPointerCasts();
if (Instruction *I = dyn_cast<Instruction>(V)) {
// If we have already seen this instruction, bail out. Cycles can happen in
@@ -456,7 +459,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
// no interprocedural analysis is done at the moment
- if (!A.hasByValAttr()) {
+ if (!A.hasByValOrInAllocaAttr()) {
++ObjectVisitorArgument;
return unknown();
}
@@ -592,11 +595,15 @@ ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *DL,
bool RoundToAlign)
: DL(DL), TLI(TLI), Context(Context), Builder(Context, TargetFolder(DL)),
RoundToAlign(RoundToAlign) {
- IntTy = DL->getIntPtrType(Context);
- Zero = ConstantInt::get(IntTy, 0);
+ // IntTy and Zero must be set for each compute() since the address space may
+ // be different for later objects.
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
+ // XXX - Are vectors of pointers possible here?
+ IntTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));
+ Zero = ConstantInt::get(IntTy, 0);
+
SizeOffsetEvalType Result = compute_(V);
if (!bothKnown(Result)) {
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 84ff2ee..9eaf109 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -14,25 +14,26 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "memdep"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/PredIteratorCache.h"
using namespace llvm;
+#define DEBUG_TYPE "memdep"
+
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
@@ -59,7 +60,7 @@ INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
"Memory Dependence Analysis", false, true)
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
-: FunctionPass(ID), PredCache(0) {
+ : FunctionPass(ID), PredCache() {
initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
@@ -87,8 +88,11 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
- TD = getAnalysisIfAvailable<DataLayout>();
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
if (!PredCache)
PredCache.reset(new PredIteratorCache());
return false;
@@ -256,17 +260,17 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// If we have no target data, we can't do this.
- if (TD == 0) return false;
+ if (!DL) return false;
// If we haven't already computed the base/offset of MemLoc, do so now.
- if (MemLocBase == 0)
- MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
+ if (!MemLocBase)
+ MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
- LI, *TD);
+ LI, *DL);
return Size != 0;
}
@@ -280,7 +284,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
@@ -293,7 +297,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
// Get the base of this load.
int64_t LIOffs = 0;
const Value *LIBase =
- GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
+ GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
// If the two pointers are not based on the same pointer, we can't tell that
// they are related.
@@ -329,7 +333,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
// If this load size is bigger than our known alignment or would not fit
// into a native integer register, then we fail.
if (NewLoadByteSize > LoadAlign ||
- !TD.fitsInLegalInteger(NewLoadByteSize*8))
+ !DL.fitsInLegalInteger(NewLoadByteSize*8))
return 0;
if (LIOffs+NewLoadByteSize > MemLocEnd &&
@@ -359,13 +363,13 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
BasicBlock::iterator ScanIt, BasicBlock *BB,
Instruction *QueryInst) {
- const Value *MemLocBase = 0;
+ const Value *MemLocBase = nullptr;
int64_t MemLocOffset = 0;
unsigned Limit = BlockScanLimit;
bool isInvariantLoad = false;
if (isLoad && QueryInst) {
LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
- if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != 0)
+ if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
isInvariantLoad = true;
}
@@ -422,7 +426,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
- MemLocOffset, LI, TD))
+ MemLocOffset, LI, DL))
return MemDepResult::getClobber(Inst);
continue;
@@ -498,7 +502,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// need to continue scanning until the malloc call.
const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
- const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
+ const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
@@ -690,10 +694,10 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
NonLocalDepEntry(DirtyBB));
- if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
+ if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
--Entry;
- NonLocalDepEntry *ExistingResult = 0;
+ NonLocalDepEntry *ExistingResult = nullptr;
if (Entry != Cache.begin()+NumSortedEntries &&
Entry->getBB() == DirtyBB) {
// If we already have an entry, and if it isn't already dirty, the block
@@ -771,7 +775,7 @@ getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
"Can't get pointer deps of a non-pointer!");
Result.clear();
- PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
+ PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
@@ -804,7 +808,7 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
--Entry;
- NonLocalDepEntry *ExistingResult = 0;
+ NonLocalDepEntry *ExistingResult = nullptr;
if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
ExistingResult = &*Entry;
@@ -957,7 +961,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
if (CacheInfo->TBAATag != Loc.TBAATag) {
if (CacheInfo->TBAATag) {
CacheInfo->Pair = BBSkipFirstBlockPair();
- CacheInfo->TBAATag = 0;
+ CacheInfo->TBAATag = nullptr;
for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
if (Instruction *Inst = DI->getResult().getInst())
@@ -1113,7 +1117,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
NumSortedEntries = Cache->size();
}
- Cache = 0;
+ Cache = nullptr;
PredList.clear();
for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
@@ -1123,7 +1127,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// Get the PHI translated pointer in this predecessor. This can fail if
// not translatable, in which case the getAddr() returns null.
PHITransAddr &PredPointer = PredList.back().second;
- PredPointer.PHITranslateValue(BB, Pred, 0);
+ PredPointer.PHITranslateValue(BB, Pred, nullptr);
Value *PredPtrVal = PredPointer.getAddr();
@@ -1172,7 +1176,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// predecessor, then we have to assume that the pointer is clobbered in
// that predecessor. We can still do PRE of the load, which would insert
// a computation of the pointer in this predecessor.
- if (PredPtrVal == 0)
+ if (!PredPtrVal)
CanTranslate = false;
// FIXME: it is entirely possible that PHI translating will end up with
@@ -1221,7 +1225,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// for the given block. It assumes that we haven't modified any of
// our datastructures while processing the current block.
- if (Cache == 0) {
+ if (!Cache) {
// Refresh the CacheInfo/Cache pointer if it got invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
Cache = &CacheInfo->NonLocalDeps;
@@ -1276,7 +1280,7 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Instruction *Target = PInfo[i].getResult().getInst();
- if (Target == 0) continue; // Ignore non-local dep results.
+ if (!Target) continue; // Ignore non-local dep results.
assert(Target->getParent() == PInfo[i].getBB());
// Eliminating the dirty entry from 'Cache', so update the reverse info.
diff --git a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
index 0341537..f645558 100644
--- a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -17,8 +17,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
#include "llvm/Support/ErrorHandling.h"
@@ -34,12 +33,12 @@ namespace {
initializeModuleDebugInfoPrinterPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
- virtual void print(raw_ostream &O, const Module *M) const;
+ void print(raw_ostream &O, const Module *M) const override;
};
}
@@ -57,31 +56,27 @@ bool ModuleDebugInfoPrinter::runOnModule(Module &M) {
}
void ModuleDebugInfoPrinter::print(raw_ostream &O, const Module *M) const {
- for (DebugInfoFinder::iterator I = Finder.compile_unit_begin(),
- E = Finder.compile_unit_end(); I != E; ++I) {
+ for (DICompileUnit CU : Finder.compile_units()) {
O << "Compile Unit: ";
- DICompileUnit(*I).print(O);
+ CU.print(O);
O << '\n';
}
- for (DebugInfoFinder::iterator I = Finder.subprogram_begin(),
- E = Finder.subprogram_end(); I != E; ++I) {
+ for (DISubprogram S : Finder.subprograms()) {
O << "Subprogram: ";
- DISubprogram(*I).print(O);
+ S.print(O);
O << '\n';
}
- for (DebugInfoFinder::iterator I = Finder.global_variable_begin(),
- E = Finder.global_variable_end(); I != E; ++I) {
+ for (DIGlobalVariable GV : Finder.global_variables()) {
O << "GlobalVariable: ";
- DIGlobalVariable(*I).print(O);
+ GV.print(O);
O << '\n';
}
- for (DebugInfoFinder::iterator I = Finder.type_begin(),
- E = Finder.type_end(); I != E; ++I) {
+ for (DIType T : Finder.types()) {
O << "Type: ";
- DIType(*I).print(O);
+ T.print(O);
O << '\n';
}
}
diff --git a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
index 907e9621..139fa38 100644
--- a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
@@ -15,6 +15,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Pass.h"
using namespace llvm;
@@ -30,48 +31,54 @@ namespace {
initializeNoAAPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {}
- virtual void initializePass() {
+ void initializePass() override {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
}
- virtual AliasResult alias(const Location &LocA, const Location &LocB) {
+ AliasResult alias(const Location &LocA, const Location &LocB) override {
return MayAlias;
}
- virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
return UnknownModRefBehavior;
}
- virtual ModRefBehavior getModRefBehavior(const Function *F) {
+ ModRefBehavior getModRefBehavior(const Function *F) override {
return UnknownModRefBehavior;
}
- virtual bool pointsToConstantMemory(const Location &Loc,
- bool OrLocal) {
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
return false;
}
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
+ ModRefResult &Mask) override {
+ Mask = ModRef;
+ return Location(CS.getArgument(ArgIdx), UnknownSize,
+ CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa));
+ }
+
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc) override {
return ModRef;
}
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) override {
return ModRef;
}
- virtual void deleteValue(Value *V) {}
- virtual void copyValue(Value *From, Value *To) {}
- virtual void addEscapingUse(Use &U) {}
-
+ void deleteValue(Value *V) override {}
+ void copyValue(Value *From, Value *To) override {}
+ void addEscapingUse(Use &U) override {}
+
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
diff --git a/contrib/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
index e6af066..bfe8642 100644
--- a/contrib/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
@@ -12,10 +12,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/PHITransAddr.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -43,7 +43,7 @@ static bool CanPHITrans(Instruction *Inst) {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void PHITransAddr::dump() const {
- if (Addr == 0) {
+ if (!Addr) {
dbgs() << "PHITransAddr: null\n";
return;
}
@@ -58,7 +58,7 @@ static bool VerifySubExpr(Value *Expr,
SmallVectorImpl<Instruction*> &InstInputs) {
// If this is a non-instruction value, there is nothing to do.
Instruction *I = dyn_cast<Instruction>(Expr);
- if (I == 0) return true;
+ if (!I) return true;
// If it's an instruction, it is either in Tmp or its operands recursively
// are.
@@ -72,7 +72,7 @@ static bool VerifySubExpr(Value *Expr,
// If it isn't in the InstInputs list it is a subexpr incorporated into the
// address. Sanity check that it is phi translatable.
if (!CanPHITrans(I)) {
- errs() << "Non phi translatable instruction found in PHITransAddr:\n";
+ errs() << "Instruction in PHITransAddr is not phi-translatable:\n";
errs() << *I << '\n';
llvm_unreachable("Either something is missing from InstInputs or "
"CanPHITrans is wrong.");
@@ -90,7 +90,7 @@ static bool VerifySubExpr(Value *Expr,
/// structure is valid, it returns true. If invalid, it prints errors and
/// returns false.
bool PHITransAddr::Verify() const {
- if (Addr == 0) return true;
+ if (!Addr) return true;
SmallVector<Instruction*, 8> Tmp(InstInputs.begin(), InstInputs.end());
@@ -116,14 +116,14 @@ bool PHITransAddr::IsPotentiallyPHITranslatable() const {
// If the input value is not an instruction, or if it is not defined in CurBB,
// then we don't need to phi translate it.
Instruction *Inst = dyn_cast<Instruction>(Addr);
- return Inst == 0 || CanPHITrans(Inst);
+ return !Inst || CanPHITrans(Inst);
}
static void RemoveInstInputs(Value *V,
SmallVectorImpl<Instruction*> &InstInputs) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return;
+ if (!I) return;
// If the instruction is in the InstInputs list, remove it.
SmallVectorImpl<Instruction*>::iterator Entry =
@@ -147,7 +147,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
const DominatorTree *DT) {
// If this is a non-instruction value, it can't require PHI translation.
Instruction *Inst = dyn_cast<Instruction>(V);
- if (Inst == 0) return V;
+ if (!Inst) return V;
// Determine whether 'Inst' is an input to our PHI translatable expression.
bool isInput = std::count(InstInputs.begin(), InstInputs.end(), Inst);
@@ -173,7 +173,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// If this is a non-phi value, and it is analyzable, we can incorporate it
// into the expression by making all instruction operands be inputs.
if (!CanPHITrans(Inst))
- return 0;
+ return nullptr;
// All instruction operands are now inputs (and of course, they may also be
// defined in this block, so they may need to be phi translated themselves.
@@ -187,9 +187,9 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// operands need to be phi translated, and if so, reconstruct it.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!isSafeToSpeculativelyExecute(Cast)) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return nullptr;
Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT);
- if (PHIIn == 0) return 0;
+ if (!PHIIn) return nullptr;
if (PHIIn == Cast->getOperand(0))
return Cast;
@@ -202,15 +202,14 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// Otherwise we have to see if a casted version of the incoming pointer
// is available. If so, we can use it, otherwise we have to fail.
- for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end();
- UI != E; ++UI) {
- if (CastInst *CastI = dyn_cast<CastInst>(*UI))
+ for (User *U : PHIIn->users()) {
+ if (CastInst *CastI = dyn_cast<CastInst>(U))
if (CastI->getOpcode() == Cast->getOpcode() &&
CastI->getType() == Cast->getType() &&
(!DT || DT->dominates(CastI->getParent(), PredBB)))
return CastI;
}
- return 0;
+ return nullptr;
}
// Handle getelementptr with at least one PHI translatable operand.
@@ -219,7 +218,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
bool AnyChanged = false;
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *GEPOp = PHITranslateSubExpr(GEP->getOperand(i), CurBB, PredBB, DT);
- if (GEPOp == 0) return 0;
+ if (!GEPOp) return nullptr;
AnyChanged |= GEPOp != GEP->getOperand(i);
GEPOps.push_back(GEPOp);
@@ -229,7 +228,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEP;
// Simplify the GEP to handle 'gep x, 0' -> x etc.
- if (Value *V = SimplifyGEPInst(GEPOps, TD, TLI, DT)) {
+ if (Value *V = SimplifyGEPInst(GEPOps, DL, TLI, DT)) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
@@ -238,9 +237,8 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// Scan to see if we have this GEP available.
Value *APHIOp = GEPOps[0];
- for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end();
- UI != E; ++UI) {
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI))
+ for (User *U : APHIOp->users()) {
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U))
if (GEPI->getType() == GEP->getType() &&
GEPI->getNumOperands() == GEPOps.size() &&
GEPI->getParent()->getParent() == CurBB->getParent() &&
@@ -255,7 +253,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEPI;
}
}
- return 0;
+ return nullptr;
}
// Handle add with a constant RHS.
@@ -267,7 +265,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap();
Value *LHS = PHITranslateSubExpr(Inst->getOperand(0), CurBB, PredBB, DT);
- if (LHS == 0) return 0;
+ if (!LHS) return nullptr;
// If the PHI translated LHS is an add of a constant, fold the immediates.
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS))
@@ -285,7 +283,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT)) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
@@ -297,9 +295,8 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return Inst;
// Otherwise, see if we have this add available somewhere.
- for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end();
- UI != E; ++UI) {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(*UI))
+ for (User *U : LHS->users()) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U))
if (BO->getOpcode() == Instruction::Add &&
BO->getOperand(0) == LHS && BO->getOperand(1) == RHS &&
BO->getParent()->getParent() == CurBB->getParent() &&
@@ -307,11 +304,11 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return BO;
}
- return 0;
+ return nullptr;
}
// Otherwise, we failed.
- return 0;
+ return nullptr;
}
@@ -329,10 +326,10 @@ bool PHITransAddr::PHITranslateValue(BasicBlock *CurBB, BasicBlock *PredBB,
// Make sure the value is live in the predecessor.
if (Instruction *Inst = dyn_cast_or_null<Instruction>(Addr))
if (!DT->dominates(Inst->getParent(), PredBB))
- Addr = 0;
+ Addr = nullptr;
}
- return Addr == 0;
+ return Addr == nullptr;
}
/// PHITranslateWithInsertion - PHI translate this value into the specified
@@ -357,7 +354,7 @@ PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
// If not, destroy any intermediate instructions inserted.
while (NewInsts.size() != NISize)
NewInsts.pop_back_val()->eraseFromParent();
- return 0;
+ return nullptr;
}
@@ -372,7 +369,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
SmallVectorImpl<Instruction*> &NewInsts) {
// See if we have a version of this value already available and dominating
// PredBB. If so, there is no need to insert a new instance of it.
- PHITransAddr Tmp(InVal, TD);
+ PHITransAddr Tmp(InVal, DL);
if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT))
return Tmp.getAddr();
@@ -382,10 +379,10 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
// Handle cast of PHI translatable value.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!isSafeToSpeculativelyExecute(Cast)) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return nullptr;
Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0),
CurBB, PredBB, DT, NewInsts);
- if (OpVal == 0) return 0;
+ if (!OpVal) return nullptr;
// Otherwise insert a cast at the end of PredBB.
CastInst *New = CastInst::Create(Cast->getOpcode(),
@@ -403,7 +400,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *OpVal = InsertPHITranslatedSubExpr(GEP->getOperand(i),
CurBB, PredBB, DT, NewInsts);
- if (OpVal == 0) return 0;
+ if (!OpVal) return nullptr;
GEPOps.push_back(OpVal);
}
@@ -439,5 +436,5 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
}
#endif
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Analysis/PostDominators.cpp b/contrib/llvm/lib/Analysis/PostDominators.cpp
index 96804a0..6d92909 100644
--- a/contrib/llvm/lib/Analysis/PostDominators.cpp
+++ b/contrib/llvm/lib/Analysis/PostDominators.cpp
@@ -11,18 +11,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "postdomtree"
-
#include "llvm/Analysis/PostDominators.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetOperations.h"
-#include "llvm/Analysis/DominatorInternals.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
using namespace llvm;
+#define DEBUG_TYPE "postdomtree"
+
//===----------------------------------------------------------------------===//
// PostDominatorTree Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Analysis/PtrUseVisitor.cpp b/contrib/llvm/lib/Analysis/PtrUseVisitor.cpp
index 0a342b2..1b0f359 100644
--- a/contrib/llvm/lib/Analysis/PtrUseVisitor.cpp
+++ b/contrib/llvm/lib/Analysis/PtrUseVisitor.cpp
@@ -16,14 +16,13 @@
using namespace llvm;
void detail::PtrUseVisitorBase::enqueueUsers(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
- UI != UE; ++UI) {
- if (VisitedUses.insert(&UI.getUse())) {
+ for (Use &U : I.uses()) {
+ if (VisitedUses.insert(&U)) {
UseToVisit NewU = {
- UseToVisit::UseAndIsOffsetKnownPair(&UI.getUse(), IsOffsetKnown),
+ UseToVisit::UseAndIsOffsetKnownPair(&U, IsOffsetKnown),
Offset
};
- Worklist.push_back(llvm_move(NewU));
+ Worklist.push_back(std::move(NewU));
}
}
}
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
index 5635688..08ebf0d 100644
--- a/contrib/llvm/lib/Analysis/RegionInfo.cpp
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -9,36 +9,43 @@
// Detects single entry single exit regions in the control flow graph.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "region"
#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionInfoImpl.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/RegionIterator.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
+#include <iterator>
#include <set>
using namespace llvm;
-// Always verify if expensive checking is enabled.
-#ifdef XDEBUG
-static bool VerifyRegionInfo = true;
-#else
-static bool VerifyRegionInfo = false;
-#endif
+#define DEBUG_TYPE "region"
-static cl::opt<bool,true>
-VerifyRegionInfoX("verify-region-info", cl::location(VerifyRegionInfo),
- cl::desc("Verify region info (time consuming)"));
+namespace llvm {
+template class RegionBase<RegionTraits<Function>>;
+template class RegionNodeBase<RegionTraits<Function>>;
+template class RegionInfoBase<RegionTraits<Function>>;
+}
STATISTIC(numRegions, "The # of regions");
STATISTIC(numSimpleRegions, "The # of simple regions");
-static cl::opt<enum Region::PrintStyle> printStyle("print-region-style",
+// Always verify if expensive checking is enabled.
+
+static cl::opt<bool,true>
+VerifyRegionInfoX(
+ "verify-region-info",
+ cl::location(RegionInfoBase<RegionTraits<Function>>::VerifyRegionInfo),
+ cl::desc("Verify region info (time consuming)"));
+
+
+static cl::opt<Region::PrintStyle, true> printStyleX("print-region-style",
+ cl::location(RegionInfo::printStyle),
cl::Hidden,
cl::desc("style of printing regions"),
cl::values(
@@ -48,809 +55,110 @@ static cl::opt<enum Region::PrintStyle> printStyle("print-region-style",
clEnumValN(Region::PrintRN, "rn",
"print regions in detail with element_iterator"),
clEnumValEnd));
-//===----------------------------------------------------------------------===//
-/// Region Implementation
-Region::Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RInfo,
- DominatorTree *dt, Region *Parent)
- : RegionNode(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}
-
-Region::~Region() {
- // Free the cached nodes.
- for (BBNodeMapT::iterator it = BBNodeMap.begin(),
- ie = BBNodeMap.end(); it != ie; ++it)
- delete it->second;
-
- // Only clean the cache for this Region. Caches of child Regions will be
- // cleaned when the child Regions are deleted.
- BBNodeMap.clear();
-
- for (iterator I = begin(), E = end(); I != E; ++I)
- delete *I;
-}
-
-void Region::replaceEntry(BasicBlock *BB) {
- entry.setPointer(BB);
-}
-
-void Region::replaceExit(BasicBlock *BB) {
- assert(exit && "No exit to replace!");
- exit = BB;
-}
-
-void Region::replaceEntryRecursive(BasicBlock *NewEntry) {
- std::vector<Region *> RegionQueue;
- BasicBlock *OldEntry = getEntry();
-
- RegionQueue.push_back(this);
- while (!RegionQueue.empty()) {
- Region *R = RegionQueue.back();
- RegionQueue.pop_back();
-
- R->replaceEntry(NewEntry);
- for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI)
- if ((*RI)->getEntry() == OldEntry)
- RegionQueue.push_back(*RI);
- }
-}
-
-void Region::replaceExitRecursive(BasicBlock *NewExit) {
- std::vector<Region *> RegionQueue;
- BasicBlock *OldExit = getExit();
-
- RegionQueue.push_back(this);
- while (!RegionQueue.empty()) {
- Region *R = RegionQueue.back();
- RegionQueue.pop_back();
-
- R->replaceExit(NewExit);
- for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI)
- if ((*RI)->getExit() == OldExit)
- RegionQueue.push_back(*RI);
- }
-}
-
-bool Region::contains(const BasicBlock *B) const {
- BasicBlock *BB = const_cast<BasicBlock*>(B);
- if (!DT->getNode(BB))
- return false;
- BasicBlock *entry = getEntry(), *exit = getExit();
-
- // Toplevel region.
- if (!exit)
- return true;
-
- return (DT->dominates(entry, BB)
- && !(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
-}
-
-bool Region::contains(const Loop *L) const {
- // BBs that are not part of any loop are element of the Loop
- // described by the NULL pointer. This loop is not part of any region,
- // except if the region describes the whole function.
- if (L == 0)
- return getExit() == 0;
-
- if (!contains(L->getHeader()))
- return false;
-
- SmallVector<BasicBlock *, 8> ExitingBlocks;
- L->getExitingBlocks(ExitingBlocks);
-
- for (SmallVectorImpl<BasicBlock*>::iterator BI = ExitingBlocks.begin(),
- BE = ExitingBlocks.end(); BI != BE; ++BI)
- if (!contains(*BI))
- return false;
-
- return true;
-}
-
-Loop *Region::outermostLoopInRegion(Loop *L) const {
- if (!contains(L))
- return 0;
-
- while (L && contains(L->getParentLoop())) {
- L = L->getParentLoop();
- }
-
- return L;
-}
-
-Loop *Region::outermostLoopInRegion(LoopInfo *LI, BasicBlock* BB) const {
- assert(LI && BB && "LI and BB cannot be null!");
- Loop *L = LI->getLoopFor(BB);
- return outermostLoopInRegion(L);
-}
-
-BasicBlock *Region::getEnteringBlock() const {
- BasicBlock *entry = getEntry();
- BasicBlock *Pred;
- BasicBlock *enteringBlock = 0;
-
- for (pred_iterator PI = pred_begin(entry), PE = pred_end(entry); PI != PE;
- ++PI) {
- Pred = *PI;
- if (DT->getNode(Pred) && !contains(Pred)) {
- if (enteringBlock)
- return 0;
-
- enteringBlock = Pred;
- }
- }
-
- return enteringBlock;
-}
-
-BasicBlock *Region::getExitingBlock() const {
- BasicBlock *exit = getExit();
- BasicBlock *Pred;
- BasicBlock *exitingBlock = 0;
-
- if (!exit)
- return 0;
-
- for (pred_iterator PI = pred_begin(exit), PE = pred_end(exit); PI != PE;
- ++PI) {
- Pred = *PI;
- if (contains(Pred)) {
- if (exitingBlock)
- return 0;
-
- exitingBlock = Pred;
- }
- }
-
- return exitingBlock;
-}
-
-bool Region::isSimple() const {
- return !isTopLevelRegion() && getEnteringBlock() && getExitingBlock();
-}
-
-std::string Region::getNameStr() const {
- std::string exitName;
- std::string entryName;
-
- if (getEntry()->getName().empty()) {
- raw_string_ostream OS(entryName);
-
- WriteAsOperand(OS, getEntry(), false);
- } else
- entryName = getEntry()->getName();
-
- if (getExit()) {
- if (getExit()->getName().empty()) {
- raw_string_ostream OS(exitName);
-
- WriteAsOperand(OS, getExit(), false);
- } else
- exitName = getExit()->getName();
- } else
- exitName = "<Function Return>";
-
- return entryName + " => " + exitName;
-}
-
-void Region::verifyBBInRegion(BasicBlock *BB) const {
- if (!contains(BB))
- llvm_unreachable("Broken region found!");
-
- BasicBlock *entry = getEntry(), *exit = getExit();
-
- for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
- if (!contains(*SI) && exit != *SI)
- llvm_unreachable("Broken region found!");
-
- if (entry != BB)
- for (pred_iterator SI = pred_begin(BB), SE = pred_end(BB); SI != SE; ++SI)
- if (!contains(*SI))
- llvm_unreachable("Broken region found!");
-}
-
-void Region::verifyWalk(BasicBlock *BB, std::set<BasicBlock*> *visited) const {
- BasicBlock *exit = getExit();
-
- visited->insert(BB);
-
- verifyBBInRegion(BB);
-
- for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
- if (*SI != exit && visited->find(*SI) == visited->end())
- verifyWalk(*SI, visited);
-}
-
-void Region::verifyRegion() const {
- // Only do verification when user wants to, otherwise this expensive
- // check will be invoked by PassManager.
- if (!VerifyRegionInfo) return;
-
- std::set<BasicBlock*> visited;
- verifyWalk(getEntry(), &visited);
-}
-
-void Region::verifyRegionNest() const {
- for (Region::const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
- (*RI)->verifyRegionNest();
-
- verifyRegion();
-}
-
-Region::element_iterator Region::element_begin() {
- return GraphTraits<Region*>::nodes_begin(this);
-}
-
-Region::element_iterator Region::element_end() {
- return GraphTraits<Region*>::nodes_end(this);
-}
-
-Region::const_element_iterator Region::element_begin() const {
- return GraphTraits<const Region*>::nodes_begin(this);
-}
-
-Region::const_element_iterator Region::element_end() const {
- return GraphTraits<const Region*>::nodes_end(this);
-}
-
-Region* Region::getSubRegionNode(BasicBlock *BB) const {
- Region *R = RI->getRegionFor(BB);
-
- if (!R || R == this)
- return 0;
-
- // If we pass the BB out of this region, that means our code is broken.
- assert(contains(R) && "BB not in current region!");
-
- while (contains(R->getParent()) && R->getParent() != this)
- R = R->getParent();
-
- if (R->getEntry() != BB)
- return 0;
-
- return R;
-}
-
-RegionNode* Region::getBBNode(BasicBlock *BB) const {
- assert(contains(BB) && "Can get BB node out of this region!");
-
- BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
-
- if (at != BBNodeMap.end())
- return at->second;
-
- RegionNode *NewNode = new RegionNode(const_cast<Region*>(this), BB);
- BBNodeMap.insert(std::make_pair(BB, NewNode));
- return NewNode;
-}
-
-RegionNode* Region::getNode(BasicBlock *BB) const {
- assert(contains(BB) && "Can get BB node out of this region!");
- if (Region* Child = getSubRegionNode(BB))
- return Child->getNode();
-
- return getBBNode(BB);
-}
-
-void Region::transferChildrenTo(Region *To) {
- for (iterator I = begin(), E = end(); I != E; ++I) {
- (*I)->parent = To;
- To->children.push_back(*I);
- }
- children.clear();
-}
-
-void Region::addSubRegion(Region *SubRegion, bool moveChildren) {
- assert(SubRegion->parent == 0 && "SubRegion already has a parent!");
- assert(std::find(begin(), end(), SubRegion) == children.end()
- && "Subregion already exists!");
-
- SubRegion->parent = this;
- children.push_back(SubRegion);
-
- if (!moveChildren)
- return;
-
- assert(SubRegion->children.size() == 0
- && "SubRegions that contain children are not supported");
-
- for (element_iterator I = element_begin(), E = element_end(); I != E; ++I)
- if (!(*I)->isSubRegion()) {
- BasicBlock *BB = (*I)->getNodeAs<BasicBlock>();
-
- if (SubRegion->contains(BB))
- RI->setRegionFor(BB, SubRegion);
- }
-
- std::vector<Region*> Keep;
- for (iterator I = begin(), E = end(); I != E; ++I)
- if (SubRegion->contains(*I) && *I != SubRegion) {
- SubRegion->children.push_back(*I);
- (*I)->parent = SubRegion;
- } else
- Keep.push_back(*I);
-
- children.clear();
- children.insert(children.begin(), Keep.begin(), Keep.end());
-}
-
-
-Region *Region::removeSubRegion(Region *Child) {
- assert(Child->parent == this && "Child is not a child of this region!");
- Child->parent = 0;
- RegionSet::iterator I = std::find(children.begin(), children.end(), Child);
- assert(I != children.end() && "Region does not exit. Unable to remove.");
- children.erase(children.begin()+(I-begin()));
- return Child;
-}
-
-unsigned Region::getDepth() const {
- unsigned Depth = 0;
-
- for (Region *R = parent; R != 0; R = R->parent)
- ++Depth;
-
- return Depth;
-}
-
-Region *Region::getExpandedRegion() const {
- unsigned NumSuccessors = exit->getTerminator()->getNumSuccessors();
-
- if (NumSuccessors == 0)
- return NULL;
-
- for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
- PI != PE; ++PI)
- if (!DT->dominates(getEntry(), *PI))
- return NULL;
-
- Region *R = RI->getRegionFor(exit);
-
- if (R->getEntry() != exit) {
- if (exit->getTerminator()->getNumSuccessors() == 1)
- return new Region(getEntry(), *succ_begin(exit), RI, DT);
- else
- return NULL;
- }
-
- while (R->getParent() && R->getParent()->getEntry() == exit)
- R = R->getParent();
-
- if (!DT->dominates(getEntry(), R->getExit()))
- for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
- PI != PE; ++PI)
- if (!DT->dominates(R->getExit(), *PI))
- return NULL;
-
- return new Region(getEntry(), R->getExit(), RI, DT);
-}
-
-void Region::print(raw_ostream &OS, bool print_tree, unsigned level,
- enum PrintStyle Style) const {
- if (print_tree)
- OS.indent(level*2) << "[" << level << "] " << getNameStr();
- else
- OS.indent(level*2) << getNameStr();
-
- OS << "\n";
-
-
- if (Style != PrintNone) {
- OS.indent(level*2) << "{\n";
- OS.indent(level*2 + 2);
-
- if (Style == PrintBB) {
- for (const_block_iterator I = block_begin(), E = block_end(); I != E; ++I)
- OS << (*I)->getName() << ", "; // TODO: remove the last ","
- } else if (Style == PrintRN) {
- for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
- OS << **I << ", "; // TODO: remove the last ",
- }
-
- OS << "\n";
- }
+//===----------------------------------------------------------------------===//
+// Region implementation
+//
- if (print_tree)
- for (const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
- (*RI)->print(OS, print_tree, level+1, Style);
+Region::Region(BasicBlock *Entry, BasicBlock *Exit,
+ RegionInfo* RI,
+ DominatorTree *DT, Region *Parent) :
+ RegionBase<RegionTraits<Function>>(Entry, Exit, RI, DT, Parent) {
- if (Style != PrintNone)
- OS.indent(level*2) << "} \n";
}
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void Region::dump() const {
- print(dbgs(), true, getDepth(), printStyle.getValue());
-}
-#endif
-
-void Region::clearNodeCache() {
- // Free the cached nodes.
- for (BBNodeMapT::iterator I = BBNodeMap.begin(),
- IE = BBNodeMap.end(); I != IE; ++I)
- delete I->second;
-
- BBNodeMap.clear();
- for (Region::iterator RI = begin(), RE = end(); RI != RE; ++RI)
- (*RI)->clearNodeCache();
-}
+Region::~Region() { }
//===----------------------------------------------------------------------===//
// RegionInfo implementation
//
-bool RegionInfo::isCommonDomFrontier(BasicBlock *BB, BasicBlock *entry,
- BasicBlock *exit) const {
- for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
- BasicBlock *P = *PI;
- if (DT->dominates(entry, P) && !DT->dominates(exit, P))
- return false;
- }
- return true;
-}
+RegionInfo::RegionInfo() :
+ RegionInfoBase<RegionTraits<Function>>() {
-bool RegionInfo::isRegion(BasicBlock *entry, BasicBlock *exit) const {
- assert(entry && exit && "entry and exit must not be null!");
- typedef DominanceFrontier::DomSetType DST;
-
- DST *entrySuccs = &DF->find(entry)->second;
-
- // Exit is the header of a loop that contains the entry. In this case,
- // the dominance frontier must only contain the exit.
- if (!DT->dominates(entry, exit)) {
- for (DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
- SI != SE; ++SI)
- if (*SI != exit && *SI != entry)
- return false;
-
- return true;
- }
-
- DST *exitSuccs = &DF->find(exit)->second;
-
- // Do not allow edges leaving the region.
- for (DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
- SI != SE; ++SI) {
- if (*SI == exit || *SI == entry)
- continue;
- if (exitSuccs->find(*SI) == exitSuccs->end())
- return false;
- if (!isCommonDomFrontier(*SI, entry, exit))
- return false;
- }
-
- // Do not allow edges pointing into the region.
- for (DST::iterator SI = exitSuccs->begin(), SE = exitSuccs->end();
- SI != SE; ++SI)
- if (DT->properlyDominates(entry, *SI) && *SI != exit)
- return false;
-
-
- return true;
-}
-
-void RegionInfo::insertShortCut(BasicBlock *entry, BasicBlock *exit,
- BBtoBBMap *ShortCut) const {
- assert(entry && exit && "entry and exit must not be null!");
-
- BBtoBBMap::iterator e = ShortCut->find(exit);
-
- if (e == ShortCut->end())
- // No further region at exit available.
- (*ShortCut)[entry] = exit;
- else {
- // We found a region e that starts at exit. Therefore (entry, e->second)
- // is also a region, that is larger than (entry, exit). Insert the
- // larger one.
- BasicBlock *BB = e->second;
- (*ShortCut)[entry] = BB;
- }
-}
-
-DomTreeNode* RegionInfo::getNextPostDom(DomTreeNode* N,
- BBtoBBMap *ShortCut) const {
- BBtoBBMap::iterator e = ShortCut->find(N->getBlock());
-
- if (e == ShortCut->end())
- return N->getIDom();
-
- return PDT->getNode(e->second)->getIDom();
}
-bool RegionInfo::isTrivialRegion(BasicBlock *entry, BasicBlock *exit) const {
- assert(entry && exit && "entry and exit must not be null!");
-
- unsigned num_successors = succ_end(entry) - succ_begin(entry);
-
- if (num_successors <= 1 && exit == *(succ_begin(entry)))
- return true;
+RegionInfo::~RegionInfo() {
- return false;
}
void RegionInfo::updateStatistics(Region *R) {
++numRegions;
// TODO: Slow. Should only be enabled if -stats is used.
- if (R->isSimple()) ++numSimpleRegions;
-}
-
-Region *RegionInfo::createRegion(BasicBlock *entry, BasicBlock *exit) {
- assert(entry && exit && "entry and exit must not be null!");
-
- if (isTrivialRegion(entry, exit))
- return 0;
-
- Region *region = new Region(entry, exit, this, DT);
- BBtoRegion.insert(std::make_pair(entry, region));
-
- #ifdef XDEBUG
- region->verifyRegion();
- #else
- DEBUG(region->verifyRegion());
- #endif
-
- updateStatistics(region);
- return region;
-}
-
-void RegionInfo::findRegionsWithEntry(BasicBlock *entry, BBtoBBMap *ShortCut) {
- assert(entry);
-
- DomTreeNode *N = PDT->getNode(entry);
-
- if (!N)
- return;
-
- Region *lastRegion= 0;
- BasicBlock *lastExit = entry;
-
- // As only a BasicBlock that postdominates entry can finish a region, walk the
- // post dominance tree upwards.
- while ((N = getNextPostDom(N, ShortCut))) {
- BasicBlock *exit = N->getBlock();
-
- if (!exit)
- break;
-
- if (isRegion(entry, exit)) {
- Region *newRegion = createRegion(entry, exit);
-
- if (lastRegion)
- newRegion->addSubRegion(lastRegion);
-
- lastRegion = newRegion;
- lastExit = exit;
- }
-
- // This can never be a region, so stop the search.
- if (!DT->dominates(entry, exit))
- break;
- }
-
- // Tried to create regions from entry to lastExit. Next time take a
- // shortcut from entry to lastExit.
- if (lastExit != entry)
- insertShortCut(entry, lastExit, ShortCut);
-}
-
-void RegionInfo::scanForRegions(Function &F, BBtoBBMap *ShortCut) {
- BasicBlock *entry = &(F.getEntryBlock());
- DomTreeNode *N = DT->getNode(entry);
-
- // Iterate over the dominance tree in post order to start with the small
- // regions from the bottom of the dominance tree. If the small regions are
- // detected first, detection of bigger regions is faster, as we can jump
- // over the small regions.
- for (po_iterator<DomTreeNode*> FI = po_begin(N), FE = po_end(N); FI != FE;
- ++FI) {
- findRegionsWithEntry(FI->getBlock(), ShortCut);
- }
+ if (R->isSimple())
+ ++numSimpleRegions;
}
-Region *RegionInfo::getTopMostParent(Region *region) {
- while (region->parent)
- region = region->getParent();
+void RegionInfo::recalculate(Function &F, DominatorTree *DT_,
+ PostDominatorTree *PDT_, DominanceFrontier *DF_) {
+ DT = DT_;
+ PDT = PDT_;
+ DF = DF_;
- return region;
+ TopLevelRegion = new Region(&F.getEntryBlock(), nullptr,
+ this, DT, nullptr);
+ updateStatistics(TopLevelRegion);
+ calculate(F);
}
-void RegionInfo::buildRegionsTree(DomTreeNode *N, Region *region) {
- BasicBlock *BB = N->getBlock();
-
- // Passed region exit
- while (BB == region->getExit())
- region = region->getParent();
-
- BBtoRegionMap::iterator it = BBtoRegion.find(BB);
-
- // This basic block is a start block of a region. It is already in the
- // BBtoRegion relation. Only the child basic blocks have to be updated.
- if (it != BBtoRegion.end()) {
- Region *newRegion = it->second;
- region->addSubRegion(getTopMostParent(newRegion));
- region = newRegion;
- } else {
- BBtoRegion[BB] = region;
- }
+//===----------------------------------------------------------------------===//
+// RegionInfoPass implementation
+//
- for (DomTreeNode::iterator CI = N->begin(), CE = N->end(); CI != CE; ++CI)
- buildRegionsTree(*CI, region);
+RegionInfoPass::RegionInfoPass() : FunctionPass(ID) {
+ initializeRegionInfoPassPass(*PassRegistry::getPassRegistry());
}
-void RegionInfo::releaseMemory() {
- BBtoRegion.clear();
- if (TopLevelRegion)
- delete TopLevelRegion;
- TopLevelRegion = 0;
-}
+RegionInfoPass::~RegionInfoPass() {
-RegionInfo::RegionInfo() : FunctionPass(ID) {
- initializeRegionInfoPass(*PassRegistry::getPassRegistry());
- TopLevelRegion = 0;
}
-RegionInfo::~RegionInfo() {
+bool RegionInfoPass::runOnFunction(Function &F) {
releaseMemory();
-}
-void RegionInfo::Calculate(Function &F) {
- // ShortCut a function where for every BB the exit of the largest region
- // starting with BB is stored. These regions can be threated as single BBS.
- // This improves performance on linear CFGs.
- BBtoBBMap ShortCut;
+ auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ auto PDT = &getAnalysis<PostDominatorTree>();
+ auto DF = &getAnalysis<DominanceFrontier>();
- scanForRegions(F, &ShortCut);
- BasicBlock *BB = &F.getEntryBlock();
- buildRegionsTree(DT->getNode(BB), TopLevelRegion);
+ RI.recalculate(F, DT, PDT, DF);
+ return false;
}
-bool RegionInfo::runOnFunction(Function &F) {
- releaseMemory();
-
- DT = &getAnalysis<DominatorTree>();
- PDT = &getAnalysis<PostDominatorTree>();
- DF = &getAnalysis<DominanceFrontier>();
-
- TopLevelRegion = new Region(&F.getEntryBlock(), 0, this, DT, 0);
- updateStatistics(TopLevelRegion);
-
- Calculate(F);
+void RegionInfoPass::releaseMemory() {
+ RI.releaseMemory();
+}
- return false;
+void RegionInfoPass::verifyAnalysis() const {
+ RI.verifyAnalysis();
}
-void RegionInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+void RegionInfoPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
- AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequiredTransitive<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTree>();
AU.addRequired<DominanceFrontier>();
}
-void RegionInfo::print(raw_ostream &OS, const Module *) const {
- OS << "Region tree:\n";
- TopLevelRegion->print(OS, true, 0, printStyle.getValue());
- OS << "End region tree\n";
+void RegionInfoPass::print(raw_ostream &OS, const Module *) const {
+ RI.print(OS);
}
-void RegionInfo::verifyAnalysis() const {
- // Only do verification when user wants to, otherwise this expensive check
- // will be invoked by PMDataManager::verifyPreservedAnalysis when
- // a regionpass (marked PreservedAll) finish.
- if (!VerifyRegionInfo) return;
-
- TopLevelRegion->verifyRegionNest();
-}
-
-// Region pass manager support.
-Region *RegionInfo::getRegionFor(BasicBlock *BB) const {
- BBtoRegionMap::const_iterator I=
- BBtoRegion.find(BB);
- return I != BBtoRegion.end() ? I->second : 0;
-}
-
-void RegionInfo::setRegionFor(BasicBlock *BB, Region *R) {
- BBtoRegion[BB] = R;
-}
-
-Region *RegionInfo::operator[](BasicBlock *BB) const {
- return getRegionFor(BB);
-}
-
-BasicBlock *RegionInfo::getMaxRegionExit(BasicBlock *BB) const {
- BasicBlock *Exit = NULL;
-
- while (true) {
- // Get largest region that starts at BB.
- Region *R = getRegionFor(BB);
- while (R && R->getParent() && R->getParent()->getEntry() == BB)
- R = R->getParent();
-
- // Get the single exit of BB.
- if (R && R->getEntry() == BB)
- Exit = R->getExit();
- else if (++succ_begin(BB) == succ_end(BB))
- Exit = *succ_begin(BB);
- else // No single exit exists.
- return Exit;
-
- // Get largest region that starts at Exit.
- Region *ExitR = getRegionFor(Exit);
- while (ExitR && ExitR->getParent()
- && ExitR->getParent()->getEntry() == Exit)
- ExitR = ExitR->getParent();
-
- for (pred_iterator PI = pred_begin(Exit), PE = pred_end(Exit); PI != PE;
- ++PI)
- if (!R->contains(*PI) && !ExitR->contains(*PI))
- break;
-
- // This stops infinite cycles.
- if (DT->dominates(Exit, BB))
- break;
-
- BB = Exit;
- }
-
- return Exit;
-}
-
-Region*
-RegionInfo::getCommonRegion(Region *A, Region *B) const {
- assert (A && B && "One of the Regions is NULL");
-
- if (A->contains(B)) return A;
-
- while (!B->contains(A))
- B = B->getParent();
-
- return B;
-}
-
-Region*
-RegionInfo::getCommonRegion(SmallVectorImpl<Region*> &Regions) const {
- Region* ret = Regions.back();
- Regions.pop_back();
-
- for (SmallVectorImpl<Region*>::const_iterator I = Regions.begin(),
- E = Regions.end(); I != E; ++I)
- ret = getCommonRegion(ret, *I);
-
- return ret;
-}
-
-Region*
-RegionInfo::getCommonRegion(SmallVectorImpl<BasicBlock*> &BBs) const {
- Region* ret = getRegionFor(BBs.back());
- BBs.pop_back();
-
- for (SmallVectorImpl<BasicBlock*>::const_iterator I = BBs.begin(),
- E = BBs.end(); I != E; ++I)
- ret = getCommonRegion(ret, getRegionFor(*I));
-
- return ret;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void RegionInfoPass::dump() const {
+ RI.dump();
}
+#endif
-void RegionInfo::splitBlock(BasicBlock* NewBB, BasicBlock *OldBB)
-{
- Region *R = getRegionFor(OldBB);
-
- setRegionFor(NewBB, R);
-
- while (R->getEntry() == OldBB && !R->isTopLevelRegion()) {
- R->replaceEntry(NewBB);
- R = R->getParent();
- }
-
- setRegionFor(OldBB, R);
-}
+char RegionInfoPass::ID = 0;
-char RegionInfo::ID = 0;
-INITIALIZE_PASS_BEGIN(RegionInfo, "regions",
+INITIALIZE_PASS_BEGIN(RegionInfoPass, "regions",
"Detect single entry single exit regions", true, true)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
INITIALIZE_PASS_DEPENDENCY(DominanceFrontier)
-INITIALIZE_PASS_END(RegionInfo, "regions",
+INITIALIZE_PASS_END(RegionInfoPass, "regions",
"Detect single entry single exit regions", true, true)
// Create methods available outside of this file, to use them
@@ -859,7 +167,7 @@ INITIALIZE_PASS_END(RegionInfo, "regions",
namespace llvm {
FunctionPass *createRegionInfoPass() {
- return new RegionInfo();
+ return new RegionInfoPass();
}
}
diff --git a/contrib/llvm/lib/Analysis/RegionPass.cpp b/contrib/llvm/lib/Analysis/RegionPass.cpp
index 9208fa2..de34b72 100644
--- a/contrib/llvm/lib/Analysis/RegionPass.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPass.cpp
@@ -17,10 +17,11 @@
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Support/Timer.h"
-#define DEBUG_TYPE "regionpassmgr"
#include "llvm/Support/Debug.h"
using namespace llvm;
+#define DEBUG_TYPE "regionpassmgr"
+
//===----------------------------------------------------------------------===//
// RGPassManager
//
@@ -31,33 +32,33 @@ RGPassManager::RGPassManager()
: FunctionPass(ID), PMDataManager() {
skipThisRegion = false;
redoThisRegion = false;
- RI = NULL;
- CurrentRegion = NULL;
+ RI = nullptr;
+ CurrentRegion = nullptr;
}
// Recurse through all subregions and all regions into RQ.
-static void addRegionIntoQueue(Region *R, std::deque<Region *> &RQ) {
- RQ.push_back(R);
- for (Region::iterator I = R->begin(), E = R->end(); I != E; ++I)
- addRegionIntoQueue(*I, RQ);
+static void addRegionIntoQueue(Region &R, std::deque<Region *> &RQ) {
+ RQ.push_back(&R);
+ for (const auto &E : R)
+ addRegionIntoQueue(*E, RQ);
}
/// Pass Manager itself does not invalidate any analysis info.
void RGPassManager::getAnalysisUsage(AnalysisUsage &Info) const {
- Info.addRequired<RegionInfo>();
+ Info.addRequired<RegionInfoPass>();
Info.setPreservesAll();
}
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the function, and if so, return true.
bool RGPassManager::runOnFunction(Function &F) {
- RI = &getAnalysis<RegionInfo>();
+ RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
bool Changed = false;
// Collect inherited analysis from Module level pass manager.
populateInheritedAnalysis(TPM->activeStack);
- addRegionIntoQueue(RI->getTopLevelRegion(), RQ);
+ addRegionIntoQueue(*RI->getTopLevelRegion(), RQ);
if (RQ.empty()) // No regions, skip calling finalizers
return false;
@@ -185,19 +186,21 @@ private:
public:
static char ID;
- PrintRegionPass() : RegionPass(ID), Out(dbgs()) {}
PrintRegionPass(const std::string &B, raw_ostream &o)
: RegionPass(ID), Banner(B), Out(o) {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
- virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
+ bool runOnRegion(Region *R, RGPassManager &RGM) override {
Out << Banner;
- for (Region::block_iterator I = R->block_begin(), E = R->block_end();
- I != E; ++I)
- (*I)->print(Out);
+ for (const auto &BB : R->blocks()) {
+ if (BB)
+ BB->print(Out);
+ else
+ Out << "Printing <null> Block";
+ }
return false;
}
diff --git a/contrib/llvm/lib/Analysis/RegionPrinter.cpp b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
index c5f1b92..ad83113 100644
--- a/contrib/llvm/lib/Analysis/RegionPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
@@ -56,23 +56,24 @@ struct DOTGraphTraits<RegionNode*> : public DefaultDOTGraphTraits {
};
template<>
-struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
+struct DOTGraphTraits<RegionInfoPass*> : public DOTGraphTraits<RegionNode*> {
- DOTGraphTraits (bool isSimple=false)
+ DOTGraphTraits (bool isSimple = false)
: DOTGraphTraits<RegionNode*>(isSimple) {}
- static std::string getGraphName(RegionInfo *DT) {
+ static std::string getGraphName(RegionInfoPass *DT) {
return "Region Graph";
}
- std::string getNodeLabel(RegionNode *Node, RegionInfo *G) {
+ std::string getNodeLabel(RegionNode *Node, RegionInfoPass *G) {
+ RegionInfo &RI = G->getRegionInfo();
return DOTGraphTraits<RegionNode*>::getNodeLabel(Node,
- G->getTopLevelRegion());
+ reinterpret_cast<RegionNode*>(RI.getTopLevelRegion()));
}
std::string getEdgeAttributes(RegionNode *srcNode,
- GraphTraits<RegionInfo*>::ChildIteratorType CI, RegionInfo *RI) {
-
+ GraphTraits<RegionInfo*>::ChildIteratorType CI, RegionInfoPass *G) {
+ RegionInfo &RI = G->getRegionInfo();
RegionNode *destNode = *CI;
if (srcNode->isSubRegion() || destNode->isSubRegion())
@@ -82,7 +83,7 @@ struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
BasicBlock *srcBB = srcNode->getNodeAs<BasicBlock>();
BasicBlock *destBB = destNode->getNodeAs<BasicBlock>();
- Region *R = RI->getRegionFor(destBB);
+ Region *R = RI.getRegionFor(destBB);
while (R && R->getParent())
if (R->getParent()->getEntry() == destBB)
@@ -98,44 +99,45 @@ struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
// Print the cluster of the subregions. This groups the single basic blocks
// and adds a different background color for each group.
- static void printRegionCluster(const Region *R, GraphWriter<RegionInfo*> &GW,
+ static void printRegionCluster(const Region &R,
+ GraphWriter<RegionInfoPass*> &GW,
unsigned depth = 0) {
raw_ostream &O = GW.getOStream();
- O.indent(2 * depth) << "subgraph cluster_" << static_cast<const void*>(R)
+ O.indent(2 * depth) << "subgraph cluster_" << static_cast<const void*>(&R)
<< " {\n";
O.indent(2 * (depth + 1)) << "label = \"\";\n";
- if (!onlySimpleRegions || R->isSimple()) {
+ if (!onlySimpleRegions || R.isSimple()) {
O.indent(2 * (depth + 1)) << "style = filled;\n";
O.indent(2 * (depth + 1)) << "color = "
- << ((R->getDepth() * 2 % 12) + 1) << "\n";
+ << ((R.getDepth() * 2 % 12) + 1) << "\n";
} else {
O.indent(2 * (depth + 1)) << "style = solid;\n";
O.indent(2 * (depth + 1)) << "color = "
- << ((R->getDepth() * 2 % 12) + 2) << "\n";
+ << ((R.getDepth() * 2 % 12) + 2) << "\n";
}
- for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI)
- printRegionCluster(*RI, GW, depth + 1);
+ for (Region::const_iterator RI = R.begin(), RE = R.end(); RI != RE; ++RI)
+ printRegionCluster(**RI, GW, depth + 1);
- RegionInfo *RI = R->getRegionInfo();
+ const RegionInfo &RI = *static_cast<const RegionInfo*>(R.getRegionInfo());
- for (Region::const_block_iterator BI = R->block_begin(),
- BE = R->block_end(); BI != BE; ++BI)
- if (RI->getRegionFor(*BI) == R)
+ for (const auto &BB : R.blocks())
+ if (RI.getRegionFor(BB) == &R)
O.indent(2 * (depth + 1)) << "Node"
- << static_cast<const void*>(RI->getTopLevelRegion()->getBBNode(*BI))
+ << static_cast<const void*>(RI.getTopLevelRegion()->getBBNode(BB))
<< ";\n";
O.indent(2 * depth) << "}\n";
}
- static void addCustomGraphFeatures(const RegionInfo* RI,
- GraphWriter<RegionInfo*> &GW) {
+ static void addCustomGraphFeatures(const RegionInfoPass* RIP,
+ GraphWriter<RegionInfoPass*> &GW) {
+ const RegionInfo &RI = RIP->getRegionInfo();
raw_ostream &O = GW.getOStream();
O << "\tcolorscheme = \"paired12\"\n";
- printRegionCluster(RI->getTopLevelRegion(), GW, 4);
+ printRegionCluster(*RI.getTopLevelRegion(), GW, 4);
}
};
} //end namespace llvm
@@ -143,28 +145,28 @@ struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
namespace {
struct RegionViewer
- : public DOTGraphTraitsViewer<RegionInfo, false> {
+ : public DOTGraphTraitsViewer<RegionInfoPass, false> {
static char ID;
- RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", ID){
+ RegionViewer() : DOTGraphTraitsViewer<RegionInfoPass, false>("reg", ID){
initializeRegionViewerPass(*PassRegistry::getPassRegistry());
}
};
char RegionViewer::ID = 0;
struct RegionOnlyViewer
- : public DOTGraphTraitsViewer<RegionInfo, true> {
+ : public DOTGraphTraitsViewer<RegionInfoPass, true> {
static char ID;
- RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", ID) {
+ RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfoPass, true>("regonly", ID) {
initializeRegionOnlyViewerPass(*PassRegistry::getPassRegistry());
}
};
char RegionOnlyViewer::ID = 0;
struct RegionPrinter
- : public DOTGraphTraitsPrinter<RegionInfo, false> {
+ : public DOTGraphTraitsPrinter<RegionInfoPass, false> {
static char ID;
RegionPrinter() :
- DOTGraphTraitsPrinter<RegionInfo, false>("reg", ID) {
+ DOTGraphTraitsPrinter<RegionInfoPass, false>("reg", ID) {
initializeRegionPrinterPass(*PassRegistry::getPassRegistry());
}
};
@@ -176,7 +178,7 @@ INITIALIZE_PASS(RegionPrinter, "dot-regions",
INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
true, true)
-
+
INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
"View regions of function (with no function bodies)",
true, true)
@@ -184,10 +186,10 @@ INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
namespace {
struct RegionOnlyPrinter
- : public DOTGraphTraitsPrinter<RegionInfo, true> {
+ : public DOTGraphTraitsPrinter<RegionInfoPass, true> {
static char ID;
RegionOnlyPrinter() :
- DOTGraphTraitsPrinter<RegionInfo, true>("reg", ID) {
+ DOTGraphTraitsPrinter<RegionInfoPass, true>("reg", ID) {
initializeRegionOnlyPrinterPass(*PassRegistry::getPassRegistry());
}
};
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index d9b696e..06dbde5 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -58,38 +58,38 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "scalar-evolution"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "scalar-evolution"
+
STATISTIC(NumArrayLenItCounts,
"Number of trip counts computed with array length");
STATISTIC(NumTripCountsComputed,
@@ -114,7 +114,7 @@ VerifySCEV("verify-scev",
INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
@@ -136,9 +136,9 @@ void SCEV::dump() const {
#endif
void SCEV::print(raw_ostream &OS) const {
- switch (getSCEVType()) {
+ switch (static_cast<SCEVTypes>(getSCEVType())) {
case scConstant:
- WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
+ cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
return;
case scTruncate: {
const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
@@ -174,7 +174,7 @@ void SCEV::print(raw_ostream &OS) const {
if (AR->getNoWrapFlags(FlagNW) &&
!AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
OS << "nw><";
- WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
+ AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
OS << ">";
return;
}
@@ -183,7 +183,7 @@ void SCEV::print(raw_ostream &OS) const {
case scUMaxExpr:
case scSMaxExpr: {
const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
- const char *OpStr = 0;
+ const char *OpStr = nullptr;
switch (NAry->getSCEVType()) {
case scAddExpr: OpStr = " + "; break;
case scMulExpr: OpStr = " * "; break;
@@ -194,7 +194,7 @@ void SCEV::print(raw_ostream &OS) const {
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
I != E; ++I) {
OS << **I;
- if (llvm::next(I) != E)
+ if (std::next(I) != E)
OS << OpStr;
}
OS << ")";
@@ -229,25 +229,24 @@ void SCEV::print(raw_ostream &OS) const {
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo)) {
OS << "offsetof(" << *CTy << ", ";
- WriteAsOperand(OS, FieldNo, false);
+ FieldNo->printAsOperand(OS, false);
OS << ")";
return;
}
// Otherwise just print it normally.
- WriteAsOperand(OS, U->getValue(), false);
+ U->getValue()->printAsOperand(OS, false);
return;
}
case scCouldNotCompute:
OS << "***COULDNOTCOMPUTE***";
return;
- default: break;
}
llvm_unreachable("Unknown SCEV kind!");
}
Type *SCEV::getType() const {
- switch (getSCEVType()) {
+ switch (static_cast<SCEVTypes>(getSCEVType())) {
case scConstant:
return cast<SCEVConstant>(this)->getType();
case scTruncate:
@@ -267,9 +266,8 @@ Type *SCEV::getType() const {
return cast<SCEVUnknown>(this)->getType();
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- default:
- llvm_unreachable("Unknown SCEV kind!");
}
+ llvm_unreachable("Unknown SCEV kind!");
}
bool SCEV::isZero() const {
@@ -315,14 +313,14 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
FoldingSetNodeID ID;
ID.AddInteger(scConstant);
ID.AddPointer(V);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
-const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
+const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
return getConstant(ConstantInt::get(getContext(), Val));
}
@@ -368,7 +366,7 @@ void SCEVUnknown::deleted() {
SE->UniqueSCEVs.RemoveNode(this);
// Release the value.
- setValPtr(0);
+ setValPtr(nullptr);
}
void SCEVUnknown::allUsesReplacedWith(Value *New) {
@@ -482,7 +480,7 @@ namespace {
// Aside from the getSCEVType() ordering, the particular ordering
// isn't very important except that it's beneficial to be consistent,
// so that (a + b) and (b + a) don't end up as different expressions.
- switch (LType) {
+ switch (static_cast<SCEVTypes>(LType)) {
case scUnknown: {
const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
@@ -619,9 +617,10 @@ namespace {
return compare(LC->getOperand(), RC->getOperand());
}
- default:
- llvm_unreachable("Unknown SCEV kind!");
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
}
+ llvm_unreachable("Unknown SCEV kind!");
}
};
}
@@ -831,7 +830,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
ID.AddInteger(scTruncate);
ID.AddPointer(Op);
ID.AddPointer(Ty);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// Fold if the operand is constant.
@@ -921,7 +920,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
ID.AddInteger(scZeroExtend);
ID.AddPointer(Op);
ID.AddPointer(Ty);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// zext(trunc(x)) --> zext(x) or x or trunc(x)
@@ -1074,7 +1073,7 @@ static const SCEV *getOverflowLimitForStep(const SCEV *Step,
return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
SE->getSignedRange(Step).getSignedMin());
}
- return 0;
+ return nullptr;
}
// The recurrence AR has been shown to have no signed wrap. Typically, if we can
@@ -1093,19 +1092,18 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
// Check for a simple looking step prior to loop entry.
const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
if (!SA)
- return 0;
+ return nullptr;
// Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
// subtraction is expensive. For this purpose, perform a quick and dirty
// difference, by checking for Step in the operand list.
SmallVector<const SCEV *, 4> DiffOps;
- for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end();
- I != E; ++I) {
- if (*I != Step)
- DiffOps.push_back(*I);
- }
+ for (const SCEV *Op : SA->operands())
+ if (Op != Step)
+ DiffOps.push_back(Op);
+
if (DiffOps.size() == SA->getNumOperands())
- return 0;
+ return nullptr;
// This is a postinc AR. Check for overflow on the preinc recurrence using the
// same three conditions that getSignExtendedExpr checks.
@@ -1141,7 +1139,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
return PreStart;
}
- return 0;
+ return nullptr;
}
// Get the normalized sign-extended expression for this AddRec's Start.
@@ -1183,7 +1181,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
ID.AddInteger(scSignExtend);
ID.AddPointer(Op);
ID.AddPointer(Ty);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// If the input value is provably positive, build a zext instead.
@@ -1203,6 +1201,23 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
return getTruncateOrSignExtend(X, Ty);
}
+ // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
+ if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
+ if (SA->getNumOperands() == 2) {
+ auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
+ auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
+ if (SMul && SC1) {
+ if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
+ const APInt &C1 = SC1->getValue()->getValue();
+ const APInt &C2 = SC2->getValue()->getValue();
+ if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
+ C2.ugt(C1) && C2.isPowerOf2())
+ return getAddExpr(getSignExtendExpr(SC1, Ty),
+ getSignExtendExpr(SMul, Ty));
+ }
+ }
+ }
+ }
// If the input value is a chrec scev, and we can prove that the value
// did not overflow the old, smaller, value, we can sign extend all of the
// operands (often constants). This allows analysis of something like
@@ -1294,6 +1309,22 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
L, AR->getNoWrapFlags());
}
}
+ // If Start and Step are constants, check if we can apply this
+ // transformation:
+ // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
+ auto SC1 = dyn_cast<SCEVConstant>(Start);
+ auto SC2 = dyn_cast<SCEVConstant>(Step);
+ if (SC1 && SC2) {
+ const APInt &C1 = SC1->getValue()->getValue();
+ const APInt &C2 = SC2->getValue()->getValue();
+ if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
+ C2.isPowerOf2()) {
+ Start = getSignExtendExpr(Start, Ty);
+ const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
+ L, AR->getNoWrapFlags());
+ return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
+ }
+ }
}
// The cast wasn't folded; create an explicit cast node.
@@ -1342,9 +1373,8 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
// Force the cast to be folded into the operands of an addrec.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
SmallVector<const SCEV *, 4> Ops;
- for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
- I != E; ++I)
- Ops.push_back(getAnyExtendExpr(*I, Ty));
+ for (const SCEV *Op : AR->operands())
+ Ops.push_back(getAnyExtendExpr(Op, Ty));
return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
}
@@ -1362,7 +1392,7 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
/// what it does, given a sequence of operands that would form an add
/// expression like this:
///
-/// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
+/// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
///
/// where A and B are constants, update the map with these values:
///
@@ -1813,7 +1843,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
ID.AddInteger(scAddExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
@@ -2107,7 +2137,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
ID.AddInteger(scMulExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
@@ -2232,7 +2262,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
ID.AddInteger(scUDivExpr);
ID.AddPointer(LHS);
ID.AddPointer(RHS);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
@@ -2240,6 +2270,77 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
return S;
}
+static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
+ APInt A = C1->getValue()->getValue().abs();
+ APInt B = C2->getValue()->getValue().abs();
+ uint32_t ABW = A.getBitWidth();
+ uint32_t BBW = B.getBitWidth();
+
+ if (ABW > BBW)
+ B = B.zext(ABW);
+ else if (ABW < BBW)
+ A = A.zext(BBW);
+
+ return APIntOps::GreatestCommonDivisor(A, B);
+}
+
+/// getUDivExactExpr - Get a canonical unsigned division expression, or
+/// something simpler if possible. There is no representation for an exact udiv
+/// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
+/// We can't do this when it's not exact because the udiv may be clearing bits.
+const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
+ const SCEV *RHS) {
+ // TODO: we could try to find factors in all sorts of things, but for now we
+ // just deal with u/exact (multiply, constant). See SCEVDivision towards the
+ // end of this file for inspiration.
+
+ const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
+ if (!Mul)
+ return getUDivExpr(LHS, RHS);
+
+ if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
+ // If the mulexpr multiplies by a constant, then that constant must be the
+ // first element of the mulexpr.
+ if (const SCEVConstant *LHSCst =
+ dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
+ if (LHSCst == RHSCst) {
+ SmallVector<const SCEV *, 2> Operands;
+ Operands.append(Mul->op_begin() + 1, Mul->op_end());
+ return getMulExpr(Operands);
+ }
+
+ // We can't just assume that LHSCst divides RHSCst cleanly, it could be
+ // that there's a factor provided by one of the other terms. We need to
+ // check.
+ APInt Factor = gcd(LHSCst, RHSCst);
+ if (!Factor.isIntN(1)) {
+ LHSCst = cast<SCEVConstant>(
+ getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
+ RHSCst = cast<SCEVConstant>(
+ getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
+ SmallVector<const SCEV *, 2> Operands;
+ Operands.push_back(LHSCst);
+ Operands.append(Mul->op_begin() + 1, Mul->op_end());
+ LHS = getMulExpr(Operands);
+ RHS = RHSCst;
+ Mul = dyn_cast<SCEVMulExpr>(LHS);
+ if (!Mul)
+ return getUDivExactExpr(LHS, RHS);
+ }
+ }
+ }
+
+ for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
+ if (Mul->getOperand(i) == RHS) {
+ SmallVector<const SCEV *, 2> Operands;
+ Operands.append(Mul->op_begin(), Mul->op_begin() + i);
+ Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
+ return getMulExpr(Operands);
+ }
+ }
+
+ return getUDivExpr(LHS, RHS);
+}
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
/// Simplify the expression as much as possible.
@@ -2356,7 +2457,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
ID.AddPointer(Operands[i]);
ID.AddPointer(L);
- void *IP = 0;
+ void *IP = nullptr;
SCEVAddRecExpr *S =
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
@@ -2464,7 +2565,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddInteger(scSMaxExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
@@ -2568,7 +2669,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddInteger(scUMaxExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
@@ -2594,12 +2695,12 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
- if (TD)
- return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
+ if (DL)
+ return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
assert(Ty == IntTy && "Effective SCEV type doesn't match");
@@ -2612,14 +2713,14 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
- if (TD) {
+ if (DL) {
return getConstant(IntTy,
- TD->getStructLayout(STy)->getElementOffset(FieldNo));
+ DL->getStructLayout(STy)->getElementOffset(FieldNo));
}
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
@@ -2635,7 +2736,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
FoldingSetNodeID ID;
ID.AddInteger(scUnknown);
ID.AddPointer(V);
- void *IP = 0;
+ void *IP = nullptr;
if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
assert(cast<SCEVUnknown>(S)->getValue() == V &&
"Stale SCEVUnknown in uniquing map!");
@@ -2667,8 +2768,8 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
// If we have a DataLayout, use it!
- if (TD)
- return TD->getTypeSizeInBits(Ty);
+ if (DL)
+ return DL->getTypeSizeInBits(Ty);
// Integer types have fixed sizes.
if (Ty->isIntegerTy())
@@ -2694,8 +2795,8 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
- if (TD)
- return TD->getIntPtrType(Ty);
+ if (DL)
+ return DL->getIntPtrType(Ty);
// Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
@@ -2714,7 +2815,7 @@ namespace {
bool FindOne;
FindInvalidSCEVUnknown() { FindOne = false; }
bool follow(const SCEV *S) {
- switch (S->getSCEVType()) {
+ switch (static_cast<SCEVTypes>(S->getSCEVType())) {
case scConstant:
return false;
case scUnknown:
@@ -2941,7 +3042,7 @@ const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
return getPointerBase(Cast->getOperand());
}
else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
- const SCEV *PtrOp = 0;
+ const SCEV *PtrOp = nullptr;
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
I != E; ++I) {
if ((*I)->getType()->isPointerTy()) {
@@ -2964,9 +3065,8 @@ static void
PushDefUseChildren(Instruction *I,
SmallVectorImpl<Instruction *> &Worklist) {
// Push the def-use children onto the Worklist stack.
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI)
- Worklist.push_back(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ Worklist.push_back(cast<Instruction>(U));
}
/// ForgetSymbolicValue - This looks up computed SCEV values for all
@@ -3022,20 +3122,20 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// The loop may have multiple entrances or multiple exits; we can analyze
// this phi as an addrec if it has a unique entry value and a unique
// backedge value.
- Value *BEValueV = 0, *StartValueV = 0;
+ Value *BEValueV = nullptr, *StartValueV = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = PN->getIncomingValue(i);
if (L->contains(PN->getIncomingBlock(i))) {
if (!BEValueV) {
BEValueV = V;
} else if (BEValueV != V) {
- BEValueV = 0;
+ BEValueV = nullptr;
break;
}
} else if (!StartValueV) {
StartValueV = V;
} else if (StartValueV != V) {
- StartValueV = 0;
+ StartValueV = nullptr;
break;
}
}
@@ -3163,7 +3263,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
+ if (Value *V = SimplifyInstruction(PN, DL, TLI, DT))
if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -3189,7 +3289,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
gep_type_iterator GTI = gep_type_begin(GEP);
- for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
+ for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
E = GEP->op_end();
I != E; ++I) {
Value *Index = *I;
@@ -3295,7 +3395,7 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- ComputeMaskedBits(U->getValue(), Zeros, Ones);
+ computeKnownBits(U->getValue(), Zeros, Ones);
return Zeros.countTrailingOnes();
}
@@ -3434,7 +3534,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
+ computeKnownBits(U->getValue(), Zeros, Ones, DL);
if (Ones == ~Zeros + 1)
return setUnsignedRange(U, ConservativeResult);
return setUnsignedRange(U,
@@ -3584,9 +3684,9 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- if (!U->getValue()->getType()->isIntegerTy() && !TD)
+ if (!U->getValue()->getType()->isIntegerTy() && !DL)
return setSignedRange(U, ConservativeResult);
- unsigned NS = ComputeNumSignBits(U->getValue(), TD);
+ unsigned NS = ComputeNumSignBits(U->getValue(), DL);
if (NS <= 1)
return setSignedRange(U, ConservativeResult);
return setSignedRange(U, ConservativeResult.intersectWith(
@@ -3687,20 +3787,27 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Instcombine's ShrinkDemandedConstant may strip bits out of
// constants, obscuring what would otherwise be a low-bits mask.
- // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
+ // Use computeKnownBits to compute what ShrinkDemandedConstant
// knew about to reconstruct a low-bits mask value.
unsigned LZ = A.countLeadingZeros();
+ unsigned TZ = A.countTrailingZeros();
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
-
- APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
-
- if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
- return
- getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
- IntegerType::get(getContext(), BitWidth - LZ)),
- U->getType());
+ computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL);
+
+ APInt EffectiveMask =
+ APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
+ if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
+ const SCEV *MulCount = getConstant(
+ ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ)));
+ return getMulExpr(
+ getZeroExtendExpr(
+ getTruncateExpr(
+ getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount),
+ IntegerType::get(getContext(), BitWidth - LZ - TZ)),
+ U->getType()),
+ MulCount);
+ }
}
break;
@@ -4241,9 +4348,9 @@ ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
- const SCEV *BECount = 0;
+ const SCEV *BECount = nullptr;
for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
- ENT != 0; ENT = ENT->getNextExit()) {
+ ENT != nullptr; ENT = ENT->getNextExit()) {
assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
@@ -4261,7 +4368,7 @@ const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
ScalarEvolution *SE) const {
for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
- ENT != 0; ENT = ENT->getNextExit()) {
+ ENT != nullptr; ENT = ENT->getNextExit()) {
if (ENT->ExitingBlock == ExitingBlock)
return ENT->ExactNotTaken;
@@ -4284,7 +4391,7 @@ bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
return false;
for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
- ENT != 0; ENT = ENT->getNextExit()) {
+ ENT != nullptr; ENT = ENT->getNextExit()) {
if (ENT->ExactNotTaken != SE->getCouldNotCompute()
&& SE->hasOperand(ENT->ExactNotTaken, S)) {
@@ -4323,8 +4430,8 @@ ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
/// clear - Invalidate this result and free the ExitNotTakenInfo array.
void ScalarEvolution::BackedgeTakenInfo::clear() {
- ExitNotTaken.ExitingBlock = 0;
- ExitNotTaken.ExactNotTaken = 0;
+ ExitNotTaken.ExitingBlock = nullptr;
+ ExitNotTaken.ExactNotTaken = nullptr;
delete[] ExitNotTaken.getNextExit();
}
@@ -4335,32 +4442,63 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
SmallVector<BasicBlock *, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
- // Examine all exits and pick the most conservative values.
- const SCEV *MaxBECount = getCouldNotCompute();
- bool CouldComputeBECount = true;
SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
+ bool CouldComputeBECount = true;
+ BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
+ const SCEV *MustExitMaxBECount = nullptr;
+ const SCEV *MayExitMaxBECount = nullptr;
+
+ // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
+ // and compute maxBECount.
for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
- ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
+ BasicBlock *ExitBB = ExitingBlocks[i];
+ ExitLimit EL = ComputeExitLimit(L, ExitBB);
+
+ // 1. For each exit that can be computed, add an entry to ExitCounts.
+ // CouldComputeBECount is true only if all exits can be computed.
if (EL.Exact == getCouldNotCompute())
// We couldn't compute an exact value for this exit, so
// we won't be able to compute an exact value for the loop.
CouldComputeBECount = false;
else
- ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact));
+ ExitCounts.push_back(std::make_pair(ExitBB, EL.Exact));
- if (MaxBECount == getCouldNotCompute())
- MaxBECount = EL.Max;
- else if (EL.Max != getCouldNotCompute()) {
- // We cannot take the "min" MaxBECount, because non-unit stride loops may
- // skip some loop tests. Taking the max over the exits is sufficiently
- // conservative. TODO: We could do better taking into consideration
- // that (1) the loop has unit stride (2) the last loop test is
- // less-than/greater-than (3) any loop test is less-than/greater-than AND
- // falls-through some constant times less then the other tests.
- MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
+ // 2. Derive the loop's MaxBECount from each exit's max number of
+ // non-exiting iterations. Partition the loop exits into two kinds:
+ // LoopMustExits and LoopMayExits.
+ //
+ // A LoopMustExit meets two requirements:
+ //
+ // (a) Its ExitLimit.MustExit flag must be set which indicates that the exit
+ // test condition cannot be skipped (the tested variable has unit stride or
+ // the test is less-than or greater-than, rather than a strict inequality).
+ //
+ // (b) It must dominate the loop latch, hence must be tested on every loop
+ // iteration.
+ //
+ // If any computable LoopMustExit is found, then MaxBECount is the minimum
+ // EL.Max of computable LoopMustExits. Otherwise, MaxBECount is
+ // conservatively the maximum EL.Max, where CouldNotCompute is considered
+ // greater than any computable EL.Max.
+ if (EL.MustExit && EL.Max != getCouldNotCompute() && Latch &&
+ DT->dominates(ExitBB, Latch)) {
+ if (!MustExitMaxBECount)
+ MustExitMaxBECount = EL.Max;
+ else {
+ MustExitMaxBECount =
+ getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max);
+ }
+ } else if (MayExitMaxBECount != getCouldNotCompute()) {
+ if (!MayExitMaxBECount || EL.Max == getCouldNotCompute())
+ MayExitMaxBECount = EL.Max;
+ else {
+ MayExitMaxBECount =
+ getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max);
+ }
}
}
-
+ const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
+ (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
}
@@ -4370,12 +4508,19 @@ ScalarEvolution::ExitLimit
ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
// Okay, we've chosen an exiting block. See what condition causes us to
- // exit at this block.
- //
- // FIXME: we should be able to handle switch instructions (with a single exit)
- BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
- if (ExitBr == 0) return getCouldNotCompute();
- assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
+ // exit at this block and remember the exit block and whether all other targets
+ // lead to the loop header.
+ bool MustExecuteLoopHeader = true;
+ BasicBlock *Exit = nullptr;
+ for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
+ SI != SE; ++SI)
+ if (!L->contains(*SI)) {
+ if (Exit) // Multiple exit successors.
+ return getCouldNotCompute();
+ Exit = *SI;
+ } else if (*SI != L->getHeader()) {
+ MustExecuteLoopHeader = false;
+ }
// At this point, we know we have a conditional branch that determines whether
// the loop is exited. However, we don't know if the branch is executed each
@@ -4394,13 +4539,11 @@ ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
//
// More extensive analysis could be done to handle more cases here.
//
- if (ExitBr->getSuccessor(0) != L->getHeader() &&
- ExitBr->getSuccessor(1) != L->getHeader() &&
- ExitBr->getParent() != L->getHeader()) {
+ if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) {
// The simple checks failed, try climbing the unique predecessor chain
// up to the header.
bool Ok = false;
- for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
+ for (BasicBlock *BB = ExitingBlock; BB; ) {
BasicBlock *Pred = BB->getUniquePredecessor();
if (!Pred)
return getCouldNotCompute();
@@ -4424,11 +4567,20 @@ ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
return getCouldNotCompute();
}
- // Proceed to the next level to examine the exit condition expression.
- return ComputeExitLimitFromCond(L, ExitBr->getCondition(),
- ExitBr->getSuccessor(0),
- ExitBr->getSuccessor(1),
- /*IsSubExpr=*/false);
+ TerminatorInst *Term = ExitingBlock->getTerminator();
+ if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
+ assert(BI->isConditional() && "If unconditional, it can't be in loop!");
+ // Proceed to the next level to examine the exit condition expression.
+ return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0),
+ BI->getSuccessor(1),
+ /*IsSubExpr=*/false);
+ }
+
+ if (SwitchInst *SI = dyn_cast<SwitchInst>(Term))
+ return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit,
+ /*IsSubExpr=*/false);
+
+ return getCouldNotCompute();
}
/// ComputeExitLimitFromCond - Compute the number of times the
@@ -4456,6 +4608,7 @@ ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
IsSubExpr || EitherMayExit);
const SCEV *BECount = getCouldNotCompute();
const SCEV *MaxBECount = getCouldNotCompute();
+ bool MustExit = false;
if (EitherMayExit) {
// Both conditions must be true for the loop to continue executing.
// Choose the less conservative count.
@@ -4470,6 +4623,7 @@ ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
MaxBECount = EL0.Max;
else
MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
+ MustExit = EL0.MustExit || EL1.MustExit;
} else {
// Both conditions must be true at the same time for the loop to exit.
// For now, be conservative.
@@ -4478,9 +4632,10 @@ ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
MaxBECount = EL0.Max;
if (EL0.Exact == EL1.Exact)
BECount = EL0.Exact;
+ MustExit = EL0.MustExit && EL1.MustExit;
}
- return ExitLimit(BECount, MaxBECount);
+ return ExitLimit(BECount, MaxBECount, MustExit);
}
if (BO->getOpcode() == Instruction::Or) {
// Recurse on the operands of the or.
@@ -4491,6 +4646,7 @@ ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
IsSubExpr || EitherMayExit);
const SCEV *BECount = getCouldNotCompute();
const SCEV *MaxBECount = getCouldNotCompute();
+ bool MustExit = false;
if (EitherMayExit) {
// Both conditions must be false for the loop to continue executing.
// Choose the less conservative count.
@@ -4505,6 +4661,7 @@ ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
MaxBECount = EL0.Max;
else
MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
+ MustExit = EL0.MustExit || EL1.MustExit;
} else {
// Both conditions must be false at the same time for the loop to exit.
// For now, be conservative.
@@ -4513,9 +4670,10 @@ ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
MaxBECount = EL0.Max;
if (EL0.Exact == EL1.Exact)
BECount = EL0.Exact;
+ MustExit = EL0.MustExit && EL1.MustExit;
}
- return ExitLimit(BECount, MaxBECount);
+ return ExitLimit(BECount, MaxBECount, MustExit);
}
}
@@ -4639,6 +4797,30 @@ ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
}
+ScalarEvolution::ExitLimit
+ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L,
+ SwitchInst *Switch,
+ BasicBlock *ExitingBlock,
+ bool IsSubExpr) {
+ assert(!L->contains(ExitingBlock) && "Not an exiting block!");
+
+ // Give up if the exit is the default dest of a switch.
+ if (Switch->getDefaultDest() == ExitingBlock)
+ return getCouldNotCompute();
+
+ assert(L->contains(Switch->getDefaultDest()) &&
+ "Default case must not exit the loop!");
+ const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
+ const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
+
+ // while (X != Y) --> while (X-Y != 0)
+ ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
+ if (EL.hasAnyInfo())
+ return EL;
+
+ return getCouldNotCompute();
+}
+
static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
ScalarEvolution &SE) {
@@ -4675,7 +4857,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
return getCouldNotCompute();
// Okay, we allow one non-constant index into the GEP instruction.
- Value *VarIdx = 0;
+ Value *VarIdx = nullptr;
std::vector<Constant*> Indexes;
unsigned VarIdxNum = 0;
for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
@@ -4685,7 +4867,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
VarIdx = GEP->getOperand(i);
VarIdxNum = i-2;
- Indexes.push_back(0);
+ Indexes.push_back(nullptr);
}
// Loop-invariant loads may be a byproduct of loop optimization. Skip them.
@@ -4716,7 +4898,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
Indexes);
- if (Result == 0) break; // Cannot compute!
+ if (!Result) break; // Cannot compute!
// Evaluate the condition for this iteration.
Result = ConstantExpr::getICmp(predicate, Result, RHS);
@@ -4777,14 +4959,14 @@ getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
// Otherwise, we can evaluate this instruction if all of its operands are
// constant or derived from a PHI node themselves.
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
for (Instruction::op_iterator OpI = UseInst->op_begin(),
OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
if (isa<Constant>(*OpI)) continue;
Instruction *OpInst = dyn_cast<Instruction>(*OpI);
- if (!OpInst || !canConstantEvolve(OpInst, L)) return 0;
+ if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
PHINode *P = dyn_cast<PHINode>(OpInst);
if (!P)
@@ -4798,8 +4980,10 @@ getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
PHIMap[OpInst] = P;
}
- if (P == 0) return 0; // Not evolving from PHI
- if (PHI && PHI != P) return 0; // Evolving from multiple different PHIs.
+ if (!P)
+ return nullptr; // Not evolving from PHI
+ if (PHI && PHI != P)
+ return nullptr; // Evolving from multiple different PHIs.
PHI = P;
}
// This is a expression evolving from a constant PHI!
@@ -4813,7 +4997,7 @@ getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
/// constraints, return null.
static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0 || !canConstantEvolve(I, L)) return 0;
+ if (!I || !canConstantEvolve(I, L)) return nullptr;
if (PHINode *PN = dyn_cast<PHINode>(I)) {
return PN;
@@ -4830,23 +5014,23 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return 0;
+ if (!I) return nullptr;
if (Constant *C = Vals.lookup(I)) return C;
// An instruction inside the loop depends on a value outside the loop that we
// weren't given a mapping for, or a value such as a call inside the loop.
- if (!canConstantEvolve(I, L)) return 0;
+ if (!canConstantEvolve(I, L)) return nullptr;
// An unmapped PHI can be due to a branch or another loop inside this loop,
// or due to this not being the initial iteration through a loop where we
// couldn't compute the evolution of this particular PHI last time.
- if (isa<PHINode>(I)) return 0;
+ if (isa<PHINode>(I)) return nullptr;
std::vector<Constant*> Operands(I->getNumOperands());
@@ -4854,23 +5038,23 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
if (!Operand) {
Operands[i] = dyn_cast<Constant>(I->getOperand(i));
- if (!Operands[i]) return 0;
+ if (!Operands[i]) return nullptr;
continue;
}
- Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
+ Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
Vals[Operand] = C;
- if (!C) return 0;
+ if (!C) return nullptr;
Operands[i] = C;
}
if (CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
- Operands[1], TD, TLI);
+ Operands[1], DL, TLI);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
- return ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ return ConstantFoldLoadFromConstPtr(Operands[0], DL);
}
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
TLI);
}
@@ -4888,7 +5072,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
return I->second;
if (BEs.ugt(MaxBruteForceIterations))
- return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
+ return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
@@ -4900,22 +5084,22 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
// entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
for (BasicBlock::iterator I = Header->begin();
(PHI = dyn_cast<PHINode>(I)); ++I) {
Constant *StartCST =
dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0) continue;
+ if (!StartCST) continue;
CurrentIterVals[PHI] = StartCST;
}
if (!CurrentIterVals.count(PN))
- return RetVal = 0;
+ return RetVal = nullptr;
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
// Execute the loop symbolically to determine the exit value.
if (BEs.getActiveBits() >= 32)
- return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
+ return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
unsigned NumIterations = BEs.getZExtValue(); // must be in range
unsigned IterationNum = 0;
@@ -4926,10 +5110,10 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
// Compute the value of the PHIs for the next iteration.
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
DenseMap<Instruction *, Constant *> NextIterVals;
- Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
+ Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
TLI);
- if (NextPHI == 0)
- return 0; // Couldn't evaluate!
+ if (!NextPHI)
+ return nullptr; // Couldn't evaluate!
NextIterVals[PN] = NextPHI;
bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
@@ -4952,7 +5136,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
Constant *&NextPHI = NextIterVals[PHI];
if (!NextPHI) { // Not already computed.
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
- NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
}
if (NextPHI != I->second)
StoppedEvolving = false;
@@ -4976,7 +5160,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
Value *Cond,
bool ExitWhen) {
PHINode *PN = getConstantEvolvingPHI(Cond, L);
- if (PN == 0) return getCouldNotCompute();
+ if (!PN) return getCouldNotCompute();
// If the loop is canonicalized, the PHI will have exactly two entries.
// That's the only form we support here.
@@ -4989,12 +5173,12 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
// One entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
for (BasicBlock::iterator I = Header->begin();
(PHI = dyn_cast<PHINode>(I)); ++I) {
Constant *StartCST =
dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0) continue;
+ if (!StartCST) continue;
CurrentIterVals[PHI] = StartCST;
}
if (!CurrentIterVals.count(PN))
@@ -5008,7 +5192,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
ConstantInt *CondVal =
dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
- TD, TLI));
+ DL, TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
@@ -5038,7 +5222,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
if (NextPHI) continue; // Already computed!
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
- NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
}
CurrentIterVals.swap(NextIterVals);
}
@@ -5064,7 +5248,7 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
if (Values[u].first == L)
return Values[u].second ? Values[u].second : V;
}
- Values.push_back(std::make_pair(L, static_cast<const SCEV *>(0)));
+ Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
// Otherwise compute it.
const SCEV *C = computeSCEVAtScope(V, L);
SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
@@ -5082,8 +5266,7 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
/// Returns NULL if the SCEV isn't representable as a Constant.
static Constant *BuildConstantFromSCEV(const SCEV *V) {
- switch (V->getSCEVType()) {
- default: // TODO: smax, umax.
+ switch (static_cast<SCEVTypes>(V->getSCEVType())) {
case scCouldNotCompute:
case scAddRecExpr:
break;
@@ -5119,7 +5302,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
}
for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
- if (!C2) return 0;
+ if (!C2) return nullptr;
// First pointer!
if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
@@ -5134,7 +5317,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
// Don't bother trying to sum two pointers. We probably can't
// statically compute a load that results from it anyway.
if (C2->getType()->isPointerTy())
- return 0;
+ return nullptr;
if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
if (PTy->getElementType()->isStructTy())
@@ -5152,10 +5335,10 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
// Don't bother with pointers at all.
- if (C->getType()->isPointerTy()) return 0;
+ if (C->getType()->isPointerTy()) return nullptr;
for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
- if (!C2 || C2->getType()->isPointerTy()) return 0;
+ if (!C2 || C2->getType()->isPointerTy()) return nullptr;
C = ConstantExpr::getMul(C, C2);
}
return C;
@@ -5170,8 +5353,11 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
return ConstantExpr::getUDiv(LHS, RHS);
break;
}
+ case scSMaxExpr:
+ case scUMaxExpr:
+ break; // TODO: smax, umax.
}
- return 0;
+ return nullptr;
}
const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
@@ -5238,17 +5424,17 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Check to see if getSCEVAtScope actually made an improvement.
if (MadeImprovement) {
- Constant *C = 0;
+ Constant *C = nullptr;
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD,
+ Operands[0], Operands[1], DL,
TLI);
else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
- C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
} else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- Operands, TD, TLI);
+ Operands, DL, TLI);
if (!C) return V;
return getSCEV(C);
}
@@ -5570,7 +5756,7 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
// to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
// We have not yet seen any such cases.
const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
- if (StepC == 0 || StepC->getValue()->equalsInt(0))
+ if (!StepC || StepC->getValue()->equalsInt(0))
return getCouldNotCompute();
// For positive steps (counting up until unsigned overflow):
@@ -5595,7 +5781,7 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
else
MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
: -CR.getUnsignedMin());
- return ExitLimit(Distance, MaxBECount);
+ return ExitLimit(Distance, MaxBECount, /*MustExit=*/true);
}
// If the recurrence is known not to wraparound, unsigned divide computes the
@@ -5603,15 +5789,29 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
// that the value will either become zero (and thus the loop terminates), that
// the loop will terminate through some other exit condition first, or that
// the loop has undefined behavior. This means we can't "miss" the exit
- // value, even with nonunit stride.
+ // value, even with nonunit stride, and exit later via the same branch. Note
+ // that we can skip this exit if loop later exits via a different
+ // branch. Hence MustExit=false.
//
// This is only valid for expressions that directly compute the loop exit. It
// is invalid for subexpressions in which the loop may exit through this
// branch even if this subexpression is false. In that case, the trip count
// computed by this udiv could be smaller than the number of well-defined
// iterations.
- if (!IsSubExpr && AddRec->getNoWrapFlags(SCEV::FlagNW))
- return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
+ if (!IsSubExpr && AddRec->getNoWrapFlags(SCEV::FlagNW)) {
+ const SCEV *Exact =
+ getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
+ return ExitLimit(Exact, Exact, /*MustExit=*/false);
+ }
+
+ // If Step is a power of two that evenly divides Start we know that the loop
+ // will always terminate. Start may not be a constant so we just have the
+ // number of trailing zeros available. This is safe even in presence of
+ // overflow as the recurrence will overflow to exactly 0.
+ const APInt &StepV = StepC->getValue()->getValue();
+ if (StepV.isPowerOf2() &&
+ GetMinTrailingZeros(getNegativeSCEV(Start)) >= StepV.countTrailingZeros())
+ return getUDivExactExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
// Then, try to solve the above equation provided that Start is constant.
if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
@@ -5995,18 +6195,30 @@ bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
// If LHS or RHS is an addrec, check to see if the condition is true in
// every iteration of the loop.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
- if (isLoopEntryGuardedByCond(
- AR->getLoop(), Pred, AR->getStart(), RHS) &&
- isLoopBackedgeGuardedByCond(
- AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
- return true;
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
- if (isLoopEntryGuardedByCond(
- AR->getLoop(), Pred, LHS, AR->getStart()) &&
- isLoopBackedgeGuardedByCond(
- AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
- return true;
+ // If LHS and RHS are both addrec, both conditions must be true in
+ // every iteration of the loop.
+ const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
+ const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
+ bool LeftGuarded = false;
+ bool RightGuarded = false;
+ if (LAR) {
+ const Loop *L = LAR->getLoop();
+ if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) &&
+ isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) {
+ if (!RAR) return true;
+ LeftGuarded = true;
+ }
+ }
+ if (RAR) {
+ const Loop *L = RAR->getLoop();
+ if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) &&
+ isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) {
+ if (!LAR) return true;
+ RightGuarded = true;
+ }
+ }
+ if (LeftGuarded && RightGuarded)
+ return true;
// Otherwise see what can be done with known constant ranges.
return isKnownPredicateWithRanges(Pred, LHS, RHS);
@@ -6024,7 +6236,6 @@ ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
default:
llvm_unreachable("Unexpected ICmpInst::Predicate value!");
case ICmpInst::ICMP_SGT:
- Pred = ICmpInst::ICMP_SLT;
std::swap(LHS, RHS);
case ICmpInst::ICMP_SLT: {
ConstantRange LHSRange = getSignedRange(LHS);
@@ -6036,7 +6247,6 @@ ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
break;
}
case ICmpInst::ICMP_SGE:
- Pred = ICmpInst::ICMP_SLE;
std::swap(LHS, RHS);
case ICmpInst::ICMP_SLE: {
ConstantRange LHSRange = getSignedRange(LHS);
@@ -6048,7 +6258,6 @@ ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
break;
}
case ICmpInst::ICMP_UGT:
- Pred = ICmpInst::ICMP_ULT;
std::swap(LHS, RHS);
case ICmpInst::ICMP_ULT: {
ConstantRange LHSRange = getUnsignedRange(LHS);
@@ -6060,7 +6269,6 @@ ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
break;
}
case ICmpInst::ICMP_UGE:
- Pred = ICmpInst::ICMP_ULE;
std::swap(LHS, RHS);
case ICmpInst::ICMP_ULE: {
ConstantRange LHSRange = getUnsignedRange(LHS);
@@ -6466,7 +6674,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
: APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
- const SCEV *MaxBECount = getCouldNotCompute();
+ const SCEV *MaxBECount;
if (isa<SCEVConstant>(BECount))
MaxBECount = BECount;
else
@@ -6476,7 +6684,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
if (isa<SCEVCouldNotCompute>(MaxBECount))
MaxBECount = BECount;
- return ExitLimit(BECount, MaxBECount);
+ return ExitLimit(BECount, MaxBECount, /*MustExit=*/true);
}
ScalarEvolution::ExitLimit
@@ -6548,7 +6756,7 @@ ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
if (isa<SCEVCouldNotCompute>(MaxBECount))
MaxBECount = BECount;
- return ExitLimit(BECount, MaxBECount);
+ return ExitLimit(BECount, MaxBECount, /*MustExit=*/true);
}
/// getNumIterationsInRange - Return the number of iterations of this loop that
@@ -6677,18 +6885,103 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
return SE.getCouldNotCompute();
}
-static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
- APInt A = C1->getValue()->getValue().abs();
- APInt B = C2->getValue()->getValue().abs();
- uint32_t ABW = A.getBitWidth();
- uint32_t BBW = B.getBitWidth();
+namespace {
+struct FindUndefs {
+ bool Found;
+ FindUndefs() : Found(false) {}
- if (ABW > BBW)
- B = B.zext(ABW);
- else if (ABW < BBW)
- A = A.zext(BBW);
+ bool follow(const SCEV *S) {
+ if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) {
+ if (isa<UndefValue>(C->getValue()))
+ Found = true;
+ } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
+ if (isa<UndefValue>(C->getValue()))
+ Found = true;
+ }
- return APIntOps::GreatestCommonDivisor(A, B);
+ // Keep looking if we haven't found it yet.
+ return !Found;
+ }
+ bool isDone() const {
+ // Stop recursion if we have found an undef.
+ return Found;
+ }
+};
+}
+
+// Return true when S contains at least an undef value.
+static inline bool
+containsUndefs(const SCEV *S) {
+ FindUndefs F;
+ SCEVTraversal<FindUndefs> ST(F);
+ ST.visitAll(S);
+
+ return F.Found;
+}
+
+namespace {
+// Collect all steps of SCEV expressions.
+struct SCEVCollectStrides {
+ ScalarEvolution &SE;
+ SmallVectorImpl<const SCEV *> &Strides;
+
+ SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
+ : SE(SE), Strides(S) {}
+
+ bool follow(const SCEV *S) {
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
+ Strides.push_back(AR->getStepRecurrence(SE));
+ return true;
+ }
+ bool isDone() const { return false; }
+};
+
+// Collect all SCEVUnknown and SCEVMulExpr expressions.
+struct SCEVCollectTerms {
+ SmallVectorImpl<const SCEV *> &Terms;
+
+ SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T)
+ : Terms(T) {}
+
+ bool follow(const SCEV *S) {
+ if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) {
+ if (!containsUndefs(S))
+ Terms.push_back(S);
+
+ // Stop recursion: once we collected a term, do not walk its operands.
+ return false;
+ }
+
+ // Keep looking.
+ return true;
+ }
+ bool isDone() const { return false; }
+};
+}
+
+/// Find parametric terms in this SCEVAddRecExpr.
+void SCEVAddRecExpr::collectParametricTerms(
+ ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Terms) const {
+ SmallVector<const SCEV *, 4> Strides;
+ SCEVCollectStrides StrideCollector(SE, Strides);
+ visitAll(this, StrideCollector);
+
+ DEBUG({
+ dbgs() << "Strides:\n";
+ for (const SCEV *S : Strides)
+ dbgs() << *S << "\n";
+ });
+
+ for (const SCEV *S : Strides) {
+ SCEVCollectTerms TermCollector(Terms);
+ visitAll(S, TermCollector);
+ }
+
+ DEBUG({
+ dbgs() << "Terms:\n";
+ for (const SCEV *T : Terms)
+ dbgs() << *T << "\n";
+ });
}
static const APInt srem(const SCEVConstant *C1, const SCEVConstant *C2) {
@@ -6720,352 +7013,488 @@ static const APInt sdiv(const SCEVConstant *C1, const SCEVConstant *C2) {
}
namespace {
-struct SCEVGCD : public SCEVVisitor<SCEVGCD, const SCEV *> {
-public:
- // Pattern match Step into Start. When Step is a multiply expression, find
- // the largest subexpression of Step that appears in Start. When Start is an
- // add expression, try to match Step in the subexpressions of Start, non
- // matching subexpressions are returned under Remainder.
- static const SCEV *findGCD(ScalarEvolution &SE, const SCEV *Start,
- const SCEV *Step, const SCEV **Remainder) {
- assert(Remainder && "Remainder should not be NULL");
- SCEVGCD R(SE, Step, SE.getConstant(Step->getType(), 0));
- const SCEV *Res = R.visit(Start);
- *Remainder = R.Remainder;
- return Res;
- }
+struct FindSCEVSize {
+ int Size;
+ FindSCEVSize() : Size(0) {}
- SCEVGCD(ScalarEvolution &S, const SCEV *G, const SCEV *R)
- : SE(S), GCD(G), Remainder(R) {
- Zero = SE.getConstant(GCD->getType(), 0);
- One = SE.getConstant(GCD->getType(), 1);
+ bool follow(const SCEV *S) {
+ ++Size;
+ // Keep looking at all operands of S.
+ return true;
+ }
+ bool isDone() const {
+ return false;
}
+};
+}
- const SCEV *visitConstant(const SCEVConstant *Constant) {
- if (GCD == Constant || Constant == Zero)
- return GCD;
+// Returns the size of the SCEV S.
+static inline int sizeOfSCEV(const SCEV *S) {
+ FindSCEVSize F;
+ SCEVTraversal<FindSCEVSize> ST(F);
+ ST.visitAll(S);
+ return F.Size;
+}
- if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD)) {
- const SCEV *Res = SE.getConstant(gcd(Constant, CGCD));
- if (Res != One)
- return Res;
+namespace {
- Remainder = SE.getConstant(srem(Constant, CGCD));
- Constant = cast<SCEVConstant>(SE.getMinusSCEV(Constant, Remainder));
- Res = SE.getConstant(gcd(Constant, CGCD));
- return Res;
+struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
+public:
+ // Computes the Quotient and Remainder of the division of Numerator by
+ // Denominator.
+ static void divide(ScalarEvolution &SE, const SCEV *Numerator,
+ const SCEV *Denominator, const SCEV **Quotient,
+ const SCEV **Remainder) {
+ assert(Numerator && Denominator && "Uninitialized SCEV");
+
+ SCEVDivision D(SE, Numerator, Denominator);
+
+ // Check for the trivial case here to avoid having to check for it in the
+ // rest of the code.
+ if (Numerator == Denominator) {
+ *Quotient = D.One;
+ *Remainder = D.Zero;
+ return;
}
- // When GCD is not a constant, it could be that the GCD is an Add, Mul,
- // AddRec, etc., in which case we want to find out how many times the
- // Constant divides the GCD: we then return that as the new GCD.
- const SCEV *Rem = Zero;
- const SCEV *Res = findGCD(SE, GCD, Constant, &Rem);
+ if (Numerator->isZero()) {
+ *Quotient = D.Zero;
+ *Remainder = D.Zero;
+ return;
+ }
- if (Res == One || Rem != Zero) {
- Remainder = Constant;
- return One;
+ // Split the Denominator when it is a product.
+ if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
+ const SCEV *Q, *R;
+ *Quotient = Numerator;
+ for (const SCEV *Op : T->operands()) {
+ divide(SE, *Quotient, Op, &Q, &R);
+ *Quotient = Q;
+
+ // Bail out when the Numerator is not divisible by one of the terms of
+ // the Denominator.
+ if (!R->isZero()) {
+ *Quotient = D.Zero;
+ *Remainder = Numerator;
+ return;
+ }
+ }
+ *Remainder = D.Zero;
+ return;
}
- assert(isa<SCEVConstant>(Res) && "Res should be a constant");
- Remainder = SE.getConstant(srem(Constant, cast<SCEVConstant>(Res)));
- return Res;
+ D.visit(Numerator);
+ *Quotient = D.Quotient;
+ *Remainder = D.Remainder;
+ }
+
+ SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, const SCEV *Denominator)
+ : SE(S), Denominator(Denominator) {
+ Zero = SE.getConstant(Denominator->getType(), 0);
+ One = SE.getConstant(Denominator->getType(), 1);
+
+ // By default, we don't know how to divide Expr by Denominator.
+ // Providing the default here simplifies the rest of the code.
+ Quotient = Zero;
+ Remainder = Numerator;
+ }
+
+ // Except in the trivial case described above, we do not know how to divide
+ // Expr by Denominator for the following functions with empty implementation.
+ void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
+ void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
+ void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
+ void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
+ void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
+ void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
+ void visitUnknown(const SCEVUnknown *Numerator) {}
+ void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
+
+ void visitConstant(const SCEVConstant *Numerator) {
+ if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
+ Quotient = SE.getConstant(sdiv(Numerator, D));
+ Remainder = SE.getConstant(srem(Numerator, D));
+ return;
+ }
}
- const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
+ void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
+ const SCEV *StartQ, *StartR, *StepQ, *StepR;
+ assert(Numerator->isAffine() && "Numerator should be affine");
+ divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
+ divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
+ Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
+ Numerator->getNoWrapFlags());
+ Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
+ Numerator->getNoWrapFlags());
}
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
- }
+ void visitAddExpr(const SCEVAddExpr *Numerator) {
+ SmallVector<const SCEV *, 2> Qs, Rs;
+ Type *Ty = Denominator->getType();
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
- }
+ for (const SCEV *Op : Numerator->operands()) {
+ const SCEV *Q, *R;
+ divide(SE, Op, Denominator, &Q, &R);
- const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- if (GCD == Expr)
- return GCD;
+ // Bail out if types do not match.
+ if (Ty != Q->getType() || Ty != R->getType()) {
+ Quotient = Zero;
+ Remainder = Numerator;
+ return;
+ }
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
- const SCEV *Rem = Zero;
- const SCEV *Res = findGCD(SE, Expr->getOperand(e - 1 - i), GCD, &Rem);
+ Qs.push_back(Q);
+ Rs.push_back(R);
+ }
- // FIXME: There may be ambiguous situations: for instance,
- // GCD(-4 + (3 * %m), 2 * %m) where 2 divides -4 and %m divides (3 * %m).
- // The order in which the AddExpr is traversed computes a different GCD
- // and Remainder.
- if (Res != One)
- GCD = Res;
- if (Rem != Zero)
- Remainder = SE.getAddExpr(Remainder, Rem);
+ if (Qs.size() == 1) {
+ Quotient = Qs[0];
+ Remainder = Rs[0];
+ return;
}
- return GCD;
+ Quotient = SE.getAddExpr(Qs);
+ Remainder = SE.getAddExpr(Rs);
}
- const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- if (GCD == Expr)
- return GCD;
+ void visitMulExpr(const SCEVMulExpr *Numerator) {
+ SmallVector<const SCEV *, 2> Qs;
+ Type *Ty = Denominator->getType();
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
- if (Expr->getOperand(i) == GCD)
- return GCD;
- }
+ bool FoundDenominatorTerm = false;
+ for (const SCEV *Op : Numerator->operands()) {
+ // Bail out if types do not match.
+ if (Ty != Op->getType()) {
+ Quotient = Zero;
+ Remainder = Numerator;
+ return;
+ }
+
+ if (FoundDenominatorTerm) {
+ Qs.push_back(Op);
+ continue;
+ }
- // If we have not returned yet, it means that GCD is not part of Expr.
- const SCEV *PartialGCD = One;
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
- const SCEV *Rem = Zero;
- const SCEV *Res = findGCD(SE, Expr->getOperand(i), GCD, &Rem);
- if (Rem != Zero)
- // GCD does not divide Expr->getOperand(i).
+ // Check whether Denominator divides one of the product operands.
+ const SCEV *Q, *R;
+ divide(SE, Op, Denominator, &Q, &R);
+ if (!R->isZero()) {
+ Qs.push_back(Op);
continue;
+ }
- if (Res == GCD)
- return GCD;
- PartialGCD = SE.getMulExpr(PartialGCD, Res);
- if (PartialGCD == GCD)
- return GCD;
- }
-
- if (PartialGCD != One)
- return PartialGCD;
-
- Remainder = Expr;
- const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(GCD);
- if (!Mul)
- return PartialGCD;
-
- // When the GCD is a multiply expression, try to decompose it:
- // this occurs when Step does not divide the Start expression
- // as in: {(-4 + (3 * %m)),+,(2 * %m)}
- for (int i = 0, e = Mul->getNumOperands(); i < e; ++i) {
- const SCEV *Rem = Zero;
- const SCEV *Res = findGCD(SE, Expr, Mul->getOperand(i), &Rem);
- if (Rem == Zero) {
- Remainder = Rem;
- return Res;
+ // Bail out if types do not match.
+ if (Ty != Q->getType()) {
+ Quotient = Zero;
+ Remainder = Numerator;
+ return;
}
+
+ FoundDenominatorTerm = true;
+ Qs.push_back(Q);
}
- return PartialGCD;
- }
+ if (FoundDenominatorTerm) {
+ Remainder = Zero;
+ if (Qs.size() == 1)
+ Quotient = Qs[0];
+ else
+ Quotient = SE.getMulExpr(Qs);
+ return;
+ }
- const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
- }
+ if (!isa<SCEVUnknown>(Denominator)) {
+ Quotient = Zero;
+ Remainder = Numerator;
+ return;
+ }
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- if (GCD == Expr)
- return GCD;
+ // The Remainder is obtained by replacing Denominator by 0 in Numerator.
+ ValueToValueMap RewriteMap;
+ RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
+ cast<SCEVConstant>(Zero)->getValue();
+ Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
+
+ if (Remainder->isZero()) {
+ // The Quotient is obtained by replacing Denominator by 1 in Numerator.
+ RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
+ cast<SCEVConstant>(One)->getValue();
+ Quotient =
+ SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
+ return;
+ }
- if (!Expr->isAffine()) {
- Remainder = Expr;
- return GCD;
+ // Quotient is (Numerator - Remainder) divided by Denominator.
+ const SCEV *Q, *R;
+ const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
+ if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) {
+ // This SCEV does not seem to simplify: fail the division here.
+ Quotient = Zero;
+ Remainder = Numerator;
+ return;
}
+ divide(SE, Diff, Denominator, &Q, &R);
+ assert(R == Zero &&
+ "(Numerator - Remainder) should evenly divide Denominator");
+ Quotient = Q;
+ }
+
+private:
+ ScalarEvolution &SE;
+ const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
+};
+}
- const SCEV *Rem = Zero;
- const SCEV *Res = findGCD(SE, Expr->getOperand(0), GCD, &Rem);
- if (Rem != Zero)
- Remainder = SE.getAddExpr(Remainder, Rem);
+static bool findArrayDimensionsRec(ScalarEvolution &SE,
+ SmallVectorImpl<const SCEV *> &Terms,
+ SmallVectorImpl<const SCEV *> &Sizes) {
+ int Last = Terms.size() - 1;
+ const SCEV *Step = Terms[Last];
- Rem = Zero;
- Res = findGCD(SE, Expr->getOperand(1), Res, &Rem);
- if (Rem != Zero) {
- Remainder = Expr;
- return GCD;
+ // End of recursion.
+ if (Last == 0) {
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
+ SmallVector<const SCEV *, 2> Qs;
+ for (const SCEV *Op : M->operands())
+ if (!isa<SCEVConstant>(Op))
+ Qs.push_back(Op);
+
+ Step = SE.getMulExpr(Qs);
}
- return Res;
+ Sizes.push_back(Step);
+ return true;
}
- const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
- }
+ for (const SCEV *&Term : Terms) {
+ // Normalize the terms before the next call to findArrayDimensionsRec.
+ const SCEV *Q, *R;
+ SCEVDivision::divide(SE, Term, Step, &Q, &R);
- const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
- }
+ // Bail out when GCD does not evenly divide one of the terms.
+ if (!R->isZero())
+ return false;
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- if (GCD != Expr)
- Remainder = Expr;
- return GCD;
+ Term = Q;
}
- const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
- return One;
- }
+ // Remove all SCEVConstants.
+ Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) {
+ return isa<SCEVConstant>(E);
+ }),
+ Terms.end());
-private:
- ScalarEvolution &SE;
- const SCEV *GCD, *Remainder, *Zero, *One;
-};
+ if (Terms.size() > 0)
+ if (!findArrayDimensionsRec(SE, Terms, Sizes))
+ return false;
-struct SCEVDivision : public SCEVVisitor<SCEVDivision, const SCEV *> {
-public:
- // Remove from Start all multiples of Step.
- static const SCEV *divide(ScalarEvolution &SE, const SCEV *Start,
- const SCEV *Step) {
- SCEVDivision D(SE, Step);
- const SCEV *Rem = D.Zero;
- (void)Rem;
- // The division is guaranteed to succeed: Step should divide Start with no
- // remainder.
- assert(Step == SCEVGCD::findGCD(SE, Start, Step, &Rem) && Rem == D.Zero &&
- "Step should divide Start with no remainder.");
- return D.visit(Start);
- }
+ Sizes.push_back(Step);
+ return true;
+}
+
+namespace {
+struct FindParameter {
+ bool FoundParameter;
+ FindParameter() : FoundParameter(false) {}
- SCEVDivision(ScalarEvolution &S, const SCEV *G) : SE(S), GCD(G) {
- Zero = SE.getConstant(GCD->getType(), 0);
- One = SE.getConstant(GCD->getType(), 1);
+ bool follow(const SCEV *S) {
+ if (isa<SCEVUnknown>(S)) {
+ FoundParameter = true;
+ // Stop recursion: we found a parameter.
+ return false;
+ }
+ // Keep looking.
+ return true;
+ }
+ bool isDone() const {
+ // Stop recursion if we have found a parameter.
+ return FoundParameter;
}
+};
+}
- const SCEV *visitConstant(const SCEVConstant *Constant) {
- if (GCD == Constant)
- return One;
+// Returns true when S contains at least a SCEVUnknown parameter.
+static inline bool
+containsParameters(const SCEV *S) {
+ FindParameter F;
+ SCEVTraversal<FindParameter> ST(F);
+ ST.visitAll(S);
- if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD))
- return SE.getConstant(sdiv(Constant, CGCD));
- return Constant;
- }
+ return F.FoundParameter;
+}
- const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
- }
+// Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
+static inline bool
+containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
+ for (const SCEV *T : Terms)
+ if (containsParameters(T))
+ return true;
+ return false;
+}
- const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
- }
+// Return the number of product terms in S.
+static inline int numberOfTerms(const SCEV *S) {
+ if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
+ return Expr->getNumOperands();
+ return 1;
+}
- const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
- }
+static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
+ if (isa<SCEVConstant>(T))
+ return nullptr;
- const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
- if (GCD == Expr)
- return One;
+ if (isa<SCEVUnknown>(T))
+ return T;
- SmallVector<const SCEV *, 2> Operands;
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
- Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
+ SmallVector<const SCEV *, 2> Factors;
+ for (const SCEV *Op : M->operands())
+ if (!isa<SCEVConstant>(Op))
+ Factors.push_back(Op);
- if (Operands.size() == 1)
- return Operands[0];
- return SE.getAddExpr(Operands);
+ return SE.getMulExpr(Factors);
}
- const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
- if (GCD == Expr)
- return One;
+ return T;
+}
- bool FoundGCDTerm = false;
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
- if (Expr->getOperand(i) == GCD)
- FoundGCDTerm = true;
+/// Return the size of an element read or written by Inst.
+const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
+ Type *Ty;
+ if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
+ Ty = Store->getValueOperand()->getType();
+ else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
+ Ty = Load->getType();
+ else
+ return nullptr;
- SmallVector<const SCEV *, 2> Operands;
- if (FoundGCDTerm) {
- FoundGCDTerm = false;
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
- if (FoundGCDTerm)
- Operands.push_back(Expr->getOperand(i));
- else if (Expr->getOperand(i) == GCD)
- FoundGCDTerm = true;
- else
- Operands.push_back(Expr->getOperand(i));
- }
- } else {
- FoundGCDTerm = false;
- const SCEV *PartialGCD = One;
- for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
- if (PartialGCD == GCD) {
- Operands.push_back(Expr->getOperand(i));
- continue;
- }
+ Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
+ return getSizeOfExpr(ETy, Ty);
+}
- const SCEV *Rem = Zero;
- const SCEV *Res = SCEVGCD::findGCD(SE, Expr->getOperand(i), GCD, &Rem);
- if (Rem == Zero) {
- PartialGCD = SE.getMulExpr(PartialGCD, Res);
- Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
- } else {
- Operands.push_back(Expr->getOperand(i));
- }
- }
- }
+/// Second step of delinearization: compute the array dimensions Sizes from the
+/// set of Terms extracted from the memory access function of this SCEVAddRec.
+void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) const {
- if (Operands.size() == 1)
- return Operands[0];
- return SE.getMulExpr(Operands);
- }
+ if (Terms.size() < 1 || !ElementSize)
+ return;
- const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
- }
+ // Early return when Terms do not contain parameters: we do not delinearize
+ // non parametric SCEVs.
+ if (!containsParameters(Terms))
+ return;
- const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
- if (GCD == Expr)
- return One;
+ DEBUG({
+ dbgs() << "Terms:\n";
+ for (const SCEV *T : Terms)
+ dbgs() << *T << "\n";
+ });
- assert(Expr->isAffine() && "Expr should be affine");
+ // Remove duplicates.
+ std::sort(Terms.begin(), Terms.end());
+ Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
- const SCEV *Start = divide(SE, Expr->getStart(), GCD);
- const SCEV *Step = divide(SE, Expr->getStepRecurrence(SE), GCD);
+ // Put larger terms first.
+ std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) {
+ return numberOfTerms(LHS) > numberOfTerms(RHS);
+ });
- return SE.getAddRecExpr(Start, Step, Expr->getLoop(),
- Expr->getNoWrapFlags());
- }
+ ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
- const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
+ // Divide all terms by the element size.
+ for (const SCEV *&Term : Terms) {
+ const SCEV *Q, *R;
+ SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
+ Term = Q;
}
- const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
- }
+ SmallVector<const SCEV *, 4> NewTerms;
+
+ // Remove constant factors.
+ for (const SCEV *T : Terms)
+ if (const SCEV *NewT = removeConstantFactors(SE, T))
+ NewTerms.push_back(NewT);
- const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- if (GCD == Expr)
- return One;
- return Expr;
+ DEBUG({
+ dbgs() << "Terms after sorting:\n";
+ for (const SCEV *T : NewTerms)
+ dbgs() << *T << "\n";
+ });
+
+ if (NewTerms.empty() ||
+ !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
+ Sizes.clear();
+ return;
}
- const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
- return Expr;
+ // The last element to be pushed into Sizes is the size of an element.
+ Sizes.push_back(ElementSize);
+
+ DEBUG({
+ dbgs() << "Sizes:\n";
+ for (const SCEV *S : Sizes)
+ dbgs() << *S << "\n";
+ });
+}
+
+/// Third step of delinearization: compute the access functions for the
+/// Subscripts based on the dimensions in Sizes.
+void SCEVAddRecExpr::computeAccessFunctions(
+ ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes) const {
+
+ // Early exit in case this SCEV is not an affine multivariate function.
+ if (Sizes.empty() || !this->isAffine())
+ return;
+
+ const SCEV *Res = this;
+ int Last = Sizes.size() - 1;
+ for (int i = Last; i >= 0; i--) {
+ const SCEV *Q, *R;
+ SCEVDivision::divide(SE, Res, Sizes[i], &Q, &R);
+
+ DEBUG({
+ dbgs() << "Res: " << *Res << "\n";
+ dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
+ dbgs() << "Res divided by Sizes[i]:\n";
+ dbgs() << "Quotient: " << *Q << "\n";
+ dbgs() << "Remainder: " << *R << "\n";
+ });
+
+ Res = Q;
+
+ // Do not record the last subscript corresponding to the size of elements in
+ // the array.
+ if (i == Last) {
+
+ // Bail out if the remainder is too complex.
+ if (isa<SCEVAddRecExpr>(R)) {
+ Subscripts.clear();
+ Sizes.clear();
+ return;
+ }
+
+ continue;
+ }
+
+ // Record the access function for the current subscript.
+ Subscripts.push_back(R);
}
-private:
- ScalarEvolution &SE;
- const SCEV *GCD, *Zero, *One;
-};
+ // Also push in last position the remainder of the last division: it will be
+ // the access function of the innermost dimension.
+ Subscripts.push_back(Res);
+
+ std::reverse(Subscripts.begin(), Subscripts.end());
+
+ DEBUG({
+ dbgs() << "Subscripts:\n";
+ for (const SCEV *S : Subscripts)
+ dbgs() << *S << "\n";
+ });
}
/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
@@ -7117,93 +7546,40 @@ private:
/// asking for the SCEV of the memory access with respect to all enclosing
/// loops, calling SCEV->delinearize on that and printing the results.
-const SCEV *
-SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
- SmallVectorImpl<const SCEV *> &Subscripts,
- SmallVectorImpl<const SCEV *> &Sizes) const {
- // Early exit in case this SCEV is not an affine multivariate function.
- if (!this->isAffine())
- return this;
-
- const SCEV *Start = this->getStart();
- const SCEV *Step = this->getStepRecurrence(SE);
-
- // Build the SCEV representation of the cannonical induction variable in the
- // loop of this SCEV.
- const SCEV *Zero = SE.getConstant(this->getType(), 0);
- const SCEV *One = SE.getConstant(this->getType(), 1);
- const SCEV *IV =
- SE.getAddRecExpr(Zero, One, this->getLoop(), this->getNoWrapFlags());
-
- DEBUG(dbgs() << "(delinearize: " << *this << "\n");
-
- // Currently we fail to delinearize when the stride of this SCEV is 1. We
- // could decide to not fail in this case: we could just return 1 for the size
- // of the subscript, and this same SCEV for the access function.
- if (Step == One) {
- DEBUG(dbgs() << "failed to delinearize " << *this << "\n)\n");
- return this;
- }
-
- // Find the GCD and Remainder of the Start and Step coefficients of this SCEV.
- const SCEV *Remainder = NULL;
- const SCEV *GCD = SCEVGCD::findGCD(SE, Start, Step, &Remainder);
-
- DEBUG(dbgs() << "GCD: " << *GCD << "\n");
- DEBUG(dbgs() << "Remainder: " << *Remainder << "\n");
-
- // Same remark as above: we currently fail the delinearization, although we
- // can very well handle this special case.
- if (GCD == One) {
- DEBUG(dbgs() << "failed to delinearize " << *this << "\n)\n");
- return this;
- }
-
- // As findGCD computed Remainder, GCD divides "Start - Remainder." The
- // Quotient is then this SCEV without Remainder, scaled down by the GCD. The
- // Quotient is what will be used in the next subscript delinearization.
- const SCEV *Quotient =
- SCEVDivision::divide(SE, SE.getMinusSCEV(Start, Remainder), GCD);
- DEBUG(dbgs() << "Quotient: " << *Quotient << "\n");
-
- const SCEV *Rem;
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Quotient))
- // Recursively call delinearize on the Quotient until there are no more
- // multiples that can be recognized.
- Rem = AR->delinearize(SE, Subscripts, Sizes);
- else
- Rem = Quotient;
+void SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) const {
+ // First step: collect parametric terms.
+ SmallVector<const SCEV *, 4> Terms;
+ collectParametricTerms(SE, Terms);
- // Scale up the cannonical induction variable IV by whatever remains from the
- // Step after division by the GCD: the GCD is the size of all the sub-array.
- if (Step != GCD) {
- Step = SCEVDivision::divide(SE, Step, GCD);
- IV = SE.getMulExpr(IV, Step);
- }
- // The access function in the current subscript is computed as the cannonical
- // induction variable IV (potentially scaled up by the step) and offset by
- // Rem, the offset of delinearization in the sub-array.
- const SCEV *Index = SE.getAddExpr(IV, Rem);
+ if (Terms.empty())
+ return;
- // Record the access function and the size of the current subscript.
- Subscripts.push_back(Index);
- Sizes.push_back(GCD);
+ // Second step: find subscript sizes.
+ SE.findArrayDimensions(Terms, Sizes, ElementSize);
-#ifndef NDEBUG
- int Size = Sizes.size();
- DEBUG(dbgs() << "succeeded to delinearize " << *this << "\n");
- DEBUG(dbgs() << "ArrayDecl[UnknownSize]");
- for (int i = 0; i < Size - 1; i++)
- DEBUG(dbgs() << "[" << *Sizes[i] << "]");
- DEBUG(dbgs() << " with elements of " << *Sizes[Size - 1] << " bytes.\n");
-
- DEBUG(dbgs() << "ArrayRef");
- for (int i = 0; i < Size; i++)
- DEBUG(dbgs() << "[" << *Subscripts[i] << "]");
- DEBUG(dbgs() << "\n)\n");
-#endif
+ if (Sizes.empty())
+ return;
+
+ // Third step: compute the access functions for each subscript.
+ computeAccessFunctions(SE, Subscripts, Sizes);
+
+ if (Subscripts.empty())
+ return;
- return Remainder;
+ DEBUG({
+ dbgs() << "succeeded to delinearize " << *this << "\n";
+ dbgs() << "ArrayDecl[UnknownSize]";
+ for (const SCEV *S : Sizes)
+ dbgs() << "[" << *S << "]";
+
+ dbgs() << "\nArrayRef";
+ for (const SCEV *S : Subscripts)
+ dbgs() << "[" << *S << "]";
+ dbgs() << "\n";
+ });
}
//===----------------------------------------------------------------------===//
@@ -7225,11 +7601,8 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
// so that future queries will recompute the expressions using the new
// value.
Value *Old = getValPtr();
- SmallVector<User *, 16> Worklist;
+ SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
SmallPtrSet<User *, 8> Visited;
- for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
- UI != UE; ++UI)
- Worklist.push_back(*UI);
while (!Worklist.empty()) {
User *U = Worklist.pop_back_val();
// Deleting the Old value will cause this to dangle. Postpone
@@ -7241,9 +7614,7 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
if (PHINode *PN = dyn_cast<PHINode>(U))
SE->ConstantEvolutionLoopExitValue.erase(PN);
SE->ValueExprMap.erase(U);
- for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
- UI != UE; ++UI)
- Worklist.push_back(*UI);
+ Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
}
// Delete the Old value.
if (PHINode *PN = dyn_cast<PHINode>(Old))
@@ -7260,16 +7631,18 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution()
- : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), FirstUnknown(0) {
+ : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64),
+ BlockDispositions(64), FirstUnknown(nullptr) {
initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
}
bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
}
@@ -7278,7 +7651,7 @@ void ScalarEvolution::releaseMemory() {
// destructors, so that they release their references to their values.
for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
U->~SCEVUnknown();
- FirstUnknown = 0;
+ FirstUnknown = nullptr;
ValueExprMap.clear();
@@ -7306,7 +7679,7 @@ void ScalarEvolution::releaseMemory() {
void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<LoopInfo>();
- AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequiredTransitive<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfo>();
}
@@ -7321,7 +7694,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
PrintLoopInfo(OS, SE, *I);
OS << "Loop ";
- WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
+ L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
OS << ": ";
SmallVector<BasicBlock *, 8> ExitBlocks;
@@ -7337,7 +7710,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
OS << "\n"
"Loop ";
- WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
+ L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
OS << ": ";
if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
@@ -7359,7 +7732,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
OS << "Classifying expressions for: ";
- WriteAsOperand(OS, F, /*PrintType=*/false);
+ F->printAsOperand(OS, /*PrintType=*/false);
OS << "\n";
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
@@ -7390,7 +7763,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
}
OS << "Determining loop execution counts for: ";
- WriteAsOperand(OS, F, /*PrintType=*/false);
+ F->printAsOperand(OS, /*PrintType=*/false);
OS << "\n";
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
PrintLoopInfo(OS, &SE, *I);
@@ -7417,7 +7790,7 @@ ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
ScalarEvolution::LoopDisposition
ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
- switch (S->getSCEVType()) {
+ switch (static_cast<SCEVTypes>(S->getSCEVType())) {
case scConstant:
return LoopInvariant;
case scTruncate:
@@ -7490,8 +7863,8 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
return LoopInvariant;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- default: llvm_unreachable("Unknown SCEV kind!");
}
+ llvm_unreachable("Unknown SCEV kind!");
}
bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
@@ -7523,7 +7896,7 @@ ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
ScalarEvolution::BlockDisposition
ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
- switch (S->getSCEVType()) {
+ switch (static_cast<SCEVTypes>(S->getSCEVType())) {
case scConstant:
return ProperlyDominatesBlock;
case scTruncate:
@@ -7580,9 +7953,8 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
return ProperlyDominatesBlock;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- default:
- llvm_unreachable("Unknown SCEV kind!");
}
+ llvm_unreachable("Unknown SCEV kind!");
}
bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
@@ -7637,7 +8009,7 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
typedef DenseMap<const Loop *, std::string> VerifyMap;
-/// replaceSubString - Replaces all occurences of From in Str with To.
+/// replaceSubString - Replaces all occurrences of From in Str with To.
static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
size_t Pos = 0;
while ((Pos = Str.find(From, Pos)) != std::string::npos) {
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 79c5f0d..6933f74 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -34,7 +34,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {
+ ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(nullptr) {
initializeScalarEvolutionAliasAnalysisPass(
*PassRegistry::getPassRegistry());
}
@@ -43,16 +43,16 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ void *getAdjustedAnalysisPointer(AnalysisID PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
private:
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnFunction(Function &F);
- virtual AliasResult alias(const Location &LocA, const Location &LocB);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+ AliasResult alias(const Location &LocA, const Location &LocB) override;
Value *GetBaseValue(const SCEV *S);
};
@@ -102,7 +102,7 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
return U->getValue();
}
// No Identified object found.
- return 0;
+ return nullptr;
}
AliasAnalysis::AliasResult
@@ -162,10 +162,10 @@ ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
if (alias(Location(AO ? AO : LocA.Ptr,
AO ? +UnknownSize : LocA.Size,
- AO ? 0 : LocA.TBAATag),
+ AO ? nullptr : LocA.TBAATag),
Location(BO ? BO : LocB.Ptr,
BO ? +UnknownSize : LocB.Size,
- BO ? 0 : LocB.TBAATag)) == NoAlias)
+ BO ? nullptr : LocB.TBAATag)) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 86a557b..968c619 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -14,11 +14,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ScalarEvolutionExpander.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Debug.h"
@@ -43,12 +45,10 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// not allowed to move it.
BasicBlock::iterator BIP = Builder.GetInsertPoint();
- Instruction *Ret = NULL;
+ Instruction *Ret = nullptr;
// Check to see if there is already a cast!
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- User *U = *UI;
+ for (User *U : V->users())
if (U->getType() == Ty)
if (CastInst *CI = dyn_cast<CastInst>(U))
if (CI->getOpcode() == Op) {
@@ -68,7 +68,6 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
Ret = CI;
break;
}
- }
// Create a new cast.
if (!Ret)
@@ -209,7 +208,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
@@ -249,7 +248,7 @@ static bool FactorOutConstant(const SCEV *&S,
// In a Mul, check if there is a constant operand which is a multiple
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
- if (TD) {
+ if (DL) {
// With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
@@ -269,7 +268,7 @@ static bool FactorOutConstant(const SCEV *&S,
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
- if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
+ if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
Remainder->isZero()) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
@@ -284,12 +283,12 @@ static bool FactorOutConstant(const SCEV *&S,
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = A->getStepRecurrence(SE);
const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
- if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
+ if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
return false;
if (!StepRem->isZero())
return false;
const SCEV *Start = A->getStart();
- if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
+ if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
return false;
S = SE.getAddRecExpr(Start, Step, A->getLoop(),
A->getNoWrapFlags(SCEV::FlagNW));
@@ -403,8 +402,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// without the other.
SplitAddRecs(Ops, Ty, SE);
- Type *IntPtrTy = SE.TD
- ? SE.TD->getIntPtrType(PTy)
+ Type *IntPtrTy = SE.DL
+ ? SE.DL->getIntPtrType(PTy)
: Type::getInt64Ty(PTy->getContext());
// Descend down the pointer's type and attempt to convert the other
@@ -423,7 +422,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
const SCEV *Remainder = SE.getConstant(Ty, 0);
- if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
+ if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
// Op now has ElSize factored out.
ScaledOps.push_back(Op);
if (!Remainder->isZero())
@@ -457,13 +456,13 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
- if (SE.TD) {
+ if (SE.DL) {
// With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
- const StructLayout &SL = *SE.TD->getStructLayout(STy);
+ const StructLayout &SL = *SE.DL->getStructLayout(STy);
uint64_t FullOffset = C->getValue()->getZExtValue();
if (FullOffset < SL.getSizeInBytes()) {
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
@@ -629,21 +628,21 @@ static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
// Test whether we've already computed the most relevant loop for this SCEV.
std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
- RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
+ RelevantLoops.insert(std::make_pair(S, nullptr));
if (!Pair.second)
return Pair.first->second;
if (isa<SCEVConstant>(S))
// A constant has no relevant loops.
- return 0;
+ return nullptr;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
return Pair.first->second = SE.LI->getLoopFor(I->getParent());
// A non-instruction has no relevant loops.
- return 0;
+ return nullptr;
}
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
- const Loop *L = 0;
+ const Loop *L = nullptr;
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
@@ -718,7 +717,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// Emit instructions to add all the operands. Hoist as much as possible
// out of loops, and form meaningful getelementptrs where possible.
- Value *Sum = 0;
+ Value *Sum = nullptr;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const Loop *CurLoop = I->first;
@@ -786,7 +785,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
// Emit instructions to mul all the operands. Hoist as much as possible
// out of loops.
- Value *Prod = 0;
+ Value *Prod = nullptr;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const SCEV *Op = I->second;
@@ -894,18 +893,18 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
Instruction *InsertPos,
bool allowScale) {
if (IncV == InsertPos)
- return NULL;
+ return nullptr;
switch (IncV->getOpcode()) {
default:
- return NULL;
+ return nullptr;
// Check for a simple Add/Sub or GEP of a loop invariant step.
case Instruction::Add:
case Instruction::Sub: {
Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
if (!OInst || SE.DT->dominates(OInst, InsertPos))
return dyn_cast<Instruction>(IncV->getOperand(0));
- return NULL;
+ return nullptr;
}
case Instruction::BitCast:
return dyn_cast<Instruction>(IncV->getOperand(0));
@@ -916,7 +915,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
continue;
if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
if (!SE.DT->dominates(OInst, InsertPos))
- return NULL;
+ return nullptr;
}
if (allowScale) {
// allow any kind of GEP as long as it can be hoisted.
@@ -927,11 +926,11 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
// have 2 operands. i1* is used by the expander to represent an
// address-size element.
if (IncV->getNumOperands() != 2)
- return NULL;
+ return nullptr;
unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
&& IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
- return NULL;
+ return nullptr;
break;
}
return dyn_cast<Instruction>(IncV->getOperand(0));
@@ -1016,6 +1015,54 @@ Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
return IncV;
}
+/// \brief Hoist the addrec instruction chain rooted in the loop phi above the
+/// position. This routine assumes that this is possible (has been checked).
+static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
+ Instruction *Pos, PHINode *LoopPhi) {
+ do {
+ if (DT->dominates(InstToHoist, Pos))
+ break;
+ // Make sure the increment is where we want it. But don't move it
+ // down past a potential existing post-inc user.
+ InstToHoist->moveBefore(Pos);
+ Pos = InstToHoist;
+ InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
+ } while (InstToHoist != LoopPhi);
+}
+
+/// \brief Check whether we can cheaply express the requested SCEV in terms of
+/// the available PHI SCEV by truncation and/or invertion of the step.
+static bool canBeCheaplyTransformed(ScalarEvolution &SE,
+ const SCEVAddRecExpr *Phi,
+ const SCEVAddRecExpr *Requested,
+ bool &InvertStep) {
+ Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
+ Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
+
+ if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
+ return false;
+
+ // Try truncate it if necessary.
+ Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
+ if (!Phi)
+ return false;
+
+ // Check whether truncation will help.
+ if (Phi == Requested) {
+ InvertStep = false;
+ return true;
+ }
+
+ // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
+ if (SE.getAddExpr(Requested->getStart(),
+ SE.getNegativeSCEV(Requested)) == Phi) {
+ InvertStep = true;
+ return true;
+ }
+
+ return false;
+}
+
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
/// the base addrec, which is the addrec without any non-loop-dominating
/// values, and return the PHI.
@@ -1023,49 +1070,87 @@ PHINode *
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
Type *ExpandTy,
- Type *IntTy) {
+ Type *IntTy,
+ Type *&TruncTy,
+ bool &InvertStep) {
assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
// Reuse a previously-inserted PHI, if present.
BasicBlock *LatchBlock = L->getLoopLatch();
if (LatchBlock) {
+ PHINode *AddRecPhiMatch = nullptr;
+ Instruction *IncV = nullptr;
+ TruncTy = nullptr;
+ InvertStep = false;
+
+ // Only try partially matching scevs that need truncation and/or
+ // step-inversion if we know this loop is outside the current loop.
+ bool TryNonMatchingSCEV = IVIncInsertLoop &&
+ SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
+
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I) {
- if (!SE.isSCEVable(PN->getType()) ||
- (SE.getEffectiveSCEVType(PN->getType()) !=
- SE.getEffectiveSCEVType(Normalized->getType())) ||
- SE.getSCEV(PN) != Normalized)
+ if (!SE.isSCEVable(PN->getType()))
continue;
- Instruction *IncV =
- cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+ const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
+ if (!PhiSCEV)
+ continue;
+ bool IsMatchingSCEV = PhiSCEV == Normalized;
+ // We only handle truncation and inversion of phi recurrences for the
+ // expanded expression if the expanded expression's loop dominates the
+ // loop we insert to. Check now, so we can bail out early.
+ if (!IsMatchingSCEV && !TryNonMatchingSCEV)
+ continue;
+
+ Instruction *TempIncV =
+ cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+
+ // Check whether we can reuse this PHI node.
if (LSRMode) {
- if (!isExpandedAddRecExprPHI(PN, IncV, L))
+ if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
continue;
- if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos))
+ if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
continue;
- }
- else {
- if (!isNormalAddRecExprPHI(PN, IncV, L))
+ } else {
+ if (!isNormalAddRecExprPHI(PN, TempIncV, L))
continue;
- if (L == IVIncInsertLoop)
- do {
- if (SE.DT->dominates(IncV, IVIncInsertPos))
- break;
- // Make sure the increment is where we want it. But don't move it
- // down past a potential existing post-inc user.
- IncV->moveBefore(IVIncInsertPos);
- IVIncInsertPos = IncV;
- IncV = cast<Instruction>(IncV->getOperand(0));
- } while (IncV != PN);
}
+
+ // Stop if we have found an exact match SCEV.
+ if (IsMatchingSCEV) {
+ IncV = TempIncV;
+ TruncTy = nullptr;
+ InvertStep = false;
+ AddRecPhiMatch = PN;
+ break;
+ }
+
+ // Try whether the phi can be translated into the requested form
+ // (truncated and/or offset by a constant).
+ if ((!TruncTy || InvertStep) &&
+ canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
+ // Record the phi node. But don't stop we might find an exact match
+ // later.
+ AddRecPhiMatch = PN;
+ IncV = TempIncV;
+ TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
+ }
+ }
+
+ if (AddRecPhiMatch) {
+ // Potentially, move the increment. We have made sure in
+ // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
+ if (L == IVIncInsertLoop)
+ hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
+
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
- InsertedValues.insert(PN);
+ InsertedValues.insert(AddRecPhiMatch);
// Remember the increment.
rememberInstruction(IncV);
- return PN;
+ return AddRecPhiMatch;
}
}
@@ -1159,13 +1244,13 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
PostIncLoopSet Loops;
Loops.insert(L);
Normalized =
- cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
- Loops, SE, *SE.DT));
+ cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
+ nullptr, Loops, SE, *SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
const SCEV *Start = Normalized->getStart();
- const SCEV *PostLoopOffset = 0;
+ const SCEV *PostLoopOffset = nullptr;
if (!SE.properlyDominates(Start, L->getHeader())) {
PostLoopOffset = Start;
Start = SE.getConstant(Normalized->getType(), 0);
@@ -1177,7 +1262,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Strip off any non-loop-dominating component from the addrec step.
const SCEV *Step = Normalized->getStepRecurrence(SE);
- const SCEV *PostLoopScale = 0;
+ const SCEV *PostLoopScale = nullptr;
if (!SE.dominates(Step, L->getHeader())) {
PostLoopScale = Step;
Step = SE.getConstant(Normalized->getType(), 1);
@@ -1190,7 +1275,12 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Expand the core addrec. If we need post-loop scaling, force it to
// expand to an integer type to avoid the need for additional casting.
Type *ExpandTy = PostLoopScale ? IntTy : STy;
- PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
+ // In some cases, we decide to reuse an existing phi node but need to truncate
+ // it and/or invert the step.
+ Type *TruncTy = nullptr;
+ bool InvertStep = false;
+ PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
+ TruncTy, InvertStep);
// Accommodate post-inc mode, if necessary.
Value *Result;
@@ -1231,6 +1321,26 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
}
}
+ // We have decided to reuse an induction variable of a dominating loop. Apply
+ // truncation and/or invertion of the step.
+ if (TruncTy) {
+ Type *ResTy = Result->getType();
+ // Normalize the result type.
+ if (ResTy != SE.getEffectiveSCEVType(ResTy))
+ Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
+ // Truncate the result.
+ if (TruncTy != Result->getType()) {
+ Result = Builder.CreateTrunc(Result, TruncTy);
+ rememberInstruction(Result);
+ }
+ // Invert the result.
+ if (InvertStep) {
+ Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
+ Result);
+ rememberInstruction(Result);
+ }
+ }
+
// Re-apply any non-loop-dominating scale.
if (PostLoopScale) {
assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
@@ -1263,7 +1373,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
const Loop *L = S->getLoop();
// First check for an existing canonical IV in a suitable type.
- PHINode *CanonicalIV = 0;
+ PHINode *CanonicalIV = nullptr;
if (PHINode *PN = L->getCanonicalInductionVariable())
if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
@@ -1279,12 +1389,12 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
S->getNoWrapFlags(SCEV::FlagNW)));
BasicBlock::iterator NewInsertPt =
- llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
+ std::next(BasicBlock::iterator(cast<Instruction>(V)));
BuilderType::InsertPointGuard Guard(Builder);
while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
isa<LandingPadInst>(NewInsertPt))
++NewInsertPt;
- V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
+ V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
NewInsertPt);
return V;
}
@@ -1333,8 +1443,12 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
BasicBlock *HP = *HPI;
- if (!PredSeen.insert(HP))
+ if (!PredSeen.insert(HP)) {
+ // There must be an incoming value for each predecessor, even the
+ // duplicates!
+ CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
continue;
+ }
if (L->contains(HP)) {
// Insert a unit add instruction right before the terminator
@@ -1507,7 +1621,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
while (InsertPt != Builder.GetInsertPoint()
&& (isInsertedInstruction(InsertPt)
|| isa<DbgInfoIntrinsic>(InsertPt))) {
- InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
+ InsertPt = std::next(BasicBlock::iterator(InsertPt));
}
break;
}
@@ -1528,7 +1642,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
//
// This is independent of PostIncLoops. The mapped value simply materializes
// the expression at this insertion point. If the mapped value happened to be
- // a postinc expansion, it could be reused by a non postinc user, but only if
+ // a postinc expansion, it could be reused by a non-postinc user, but only if
// its insertion point was already at the head of the loop.
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
return V;
@@ -1557,20 +1671,12 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
// Emit code for it.
BuilderType::InsertPointGuard Guard(Builder);
- PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
+ PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
+ L->getHeader()->begin()));
return V;
}
-/// Sort values by integer width for replaceCongruentIVs.
-static bool width_descending(Value *lhs, Value *rhs) {
- // Put pointers at the back and make sure pointer < pointer = false.
- if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
- return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
- return rhs->getType()->getPrimitiveSizeInBits()
- < lhs->getType()->getPrimitiveSizeInBits();
-}
-
/// replaceCongruentIVs - Check for congruent phis in this loop header and
/// replace them with their most canonical representative. Return the number of
/// phis eliminated.
@@ -1587,7 +1693,13 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
Phis.push_back(Phi);
}
if (TTI)
- std::sort(Phis.begin(), Phis.end(), width_descending);
+ std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
+ // Put pointers at the back and make sure pointer < pointer = false.
+ if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
+ return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
+ return RHS->getType()->getPrimitiveSizeInBits() <
+ LHS->getType()->getPrimitiveSizeInBits();
+ });
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
@@ -1599,7 +1711,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// Fold constant phis. They may be congruent to other constant phis and
// would confuse the logic below that expects proper IVs.
- if (Value *V = Phi->hasConstantValue()) {
+ if (Value *V = SimplifyInstruction(Phi, SE.DL, SE.TLI, SE.DT)) {
Phi->replaceAllUsesWith(V);
DeadInsts.push_back(Phi);
++NumElim;
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index f110616..3ccefb0 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
@@ -113,7 +113,7 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
// Transform each operand.
for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
I != E; ++I) {
- Operands.push_back(TransformSubExpr(*I, LUser, 0));
+ Operands.push_back(TransformSubExpr(*I, LUser, nullptr));
}
// Conservatively use AnyWrap until/unless we need FlagNW.
const SCEV *Result = SE.getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
@@ -131,7 +131,10 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
// expression: {-2,+,1,+,2} + {1,+,2} => {-1,+,3,+,2}
if (AR->isAffine() &&
IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
- Result = SE.getMinusSCEV(Result, AR->getStepRecurrence(SE));
+ const SCEV *TransformedStep =
+ TransformSubExpr(AR->getStepRecurrence(SE),
+ User, OperandValToReplace);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
Loops.insert(L);
}
#if 0
@@ -144,6 +147,20 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
#endif
break;
case Normalize:
+ // We want to normalize step expression, because otherwise we might not be
+ // able to denormalize to the original expression.
+ //
+ // Here is an example what will happen if we don't normalize step:
+ // ORIGINAL ISE:
+ // {(100 /u {1,+,1}<%bb16>),+,(100 /u {1,+,1}<%bb16>)}<%bb25>
+ // NORMALIZED ISE:
+ // {((-1 * (100 /u {1,+,1}<%bb16>)) + (100 /u {0,+,1}<%bb16>)),+,
+ // (100 /u {0,+,1}<%bb16>)}<%bb25>
+ // DENORMALIZED BACK ISE:
+ // {((2 * (100 /u {1,+,1}<%bb16>)) + (-1 * (100 /u {2,+,1}<%bb16>))),+,
+ // (100 /u {1,+,1}<%bb16>)}<%bb25>
+ // Note that the initial value changes after normalization +
+ // denormalization, which isn't correct.
if (Loops.count(L)) {
const SCEV *TransformedStep =
TransformSubExpr(AR->getStepRecurrence(SE),
@@ -157,8 +174,14 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
#endif
break;
case Denormalize:
- if (Loops.count(L))
- Result = cast<SCEVAddRecExpr>(Result)->getPostIncExpr(SE);
+ // Here we want to normalize step expressions for the same reasons, as
+ // stated above.
+ if (Loops.count(L)) {
+ const SCEV *TransformedStep =
+ TransformSubExpr(AR->getStepRecurrence(SE),
+ User, OperandValToReplace);
+ Result = SE.getAddExpr(Result, TransformedStep);
+ }
break;
}
return Result;
@@ -218,7 +241,7 @@ TransformSubExpr(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
}
/// Top level driver for transforming an expression DAG into its requested
-/// post-inc form (either "Normalized" or "Denormalized".
+/// post-inc form (either "Normalized" or "Denormalized").
const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
const SCEV *S,
Instruction *User,
diff --git a/contrib/llvm/lib/Analysis/SparsePropagation.cpp b/contrib/llvm/lib/Analysis/SparsePropagation.cpp
index 15b7872..edd82f5 100644
--- a/contrib/llvm/lib/Analysis/SparsePropagation.cpp
+++ b/contrib/llvm/lib/Analysis/SparsePropagation.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sparseprop"
#include "llvm/Analysis/SparsePropagation.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
@@ -21,6 +20,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "sparseprop"
+
//===----------------------------------------------------------------------===//
// AbstractLatticeFunction Implementation
//===----------------------------------------------------------------------===//
@@ -147,7 +148,7 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
return;
Constant *C = LatticeFunc->GetConstant(BCValue, BI->getCondition(), *this);
- if (C == 0 || !isa<ConstantInt>(C)) {
+ if (!C || !isa<ConstantInt>(C)) {
// Non-constant values can go either way.
Succs[0] = Succs[1] = true;
return;
@@ -189,7 +190,7 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
return;
Constant *C = LatticeFunc->GetConstant(SCValue, SI.getCondition(), *this);
- if (C == 0 || !isa<ConstantInt>(C)) {
+ if (!C || !isa<ConstantInt>(C)) {
// All destinations are executable!
Succs.assign(TI.getNumSuccessors(), true);
return;
@@ -303,11 +304,10 @@ void SparseSolver::Solve(Function &F) {
// "I" got into the work list because it made a transition. See if any
// users are both live and in need of updating.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- Instruction *U = cast<Instruction>(*UI);
- if (BBExecutable.count(U->getParent())) // Inst is executable?
- visitInst(*U);
+ for (User *U : I->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (BBExecutable.count(UI->getParent())) // Inst is executable?
+ visitInst(*UI);
}
}
diff --git a/contrib/llvm/lib/Analysis/TargetTransformInfo.cpp b/contrib/llvm/lib/Analysis/TargetTransformInfo.cpp
index 0353295..cdb0b79 100644
--- a/contrib/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -7,18 +7,19 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "tti"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Operator.h"
#include "llvm/IR/Instruction.h"
-#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
+#define DEBUG_TYPE "tti"
+
// Setup the analysis group to manage the TargetTransformInfo passes.
INITIALIZE_ANALYSIS_GROUP(TargetTransformInfo, "Target Information", NoTTI)
char TargetTransformInfo::ID = 0;
@@ -35,16 +36,6 @@ void TargetTransformInfo::pushTTIStack(Pass *P) {
PTTI->TopTTI = this;
}
-void TargetTransformInfo::popTTIStack() {
- TopTTI = 0;
-
- // Walk up the chain and update the top TTI pointer.
- for (TargetTransformInfo *PTTI = PrevTTI; PTTI; PTTI = PTTI->PrevTTI)
- PTTI->TopTTI = PrevTTI;
-
- PrevTTI = 0;
-}
-
void TargetTransformInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetTransformInfo>();
}
@@ -158,6 +149,16 @@ unsigned TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty) const {
return PrevTTI->getIntImmCost(Imm, Ty);
}
+unsigned TargetTransformInfo::getIntImmCost(unsigned Opc, unsigned Idx,
+ const APInt &Imm, Type *Ty) const {
+ return PrevTTI->getIntImmCost(Opc, Idx, Imm, Ty);
+}
+
+unsigned TargetTransformInfo::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) const {
+ return PrevTTI->getIntImmCost(IID, Idx, Imm, Ty);
+}
+
unsigned TargetTransformInfo::getNumberOfRegisters(bool Vector) const {
return PrevTTI->getNumberOfRegisters(Vector);
}
@@ -231,22 +232,23 @@ unsigned TargetTransformInfo::getReductionCost(unsigned Opcode, Type *Ty,
namespace {
-struct NoTTI : ImmutablePass, TargetTransformInfo {
+struct NoTTI final : ImmutablePass, TargetTransformInfo {
const DataLayout *DL;
- NoTTI() : ImmutablePass(ID), DL(0) {
+ NoTTI() : ImmutablePass(ID), DL(nullptr) {
initializeNoTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ virtual void initializePass() override {
// Note that this subclass is special, and must *not* call initializeTTI as
// it does not chain.
TopTTI = this;
- PrevTTI = 0;
- DL = getAnalysisIfAvailable<DataLayout>();
+ PrevTTI = nullptr;
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
// Note that this subclass is special, and must *not* call
// TTI::getAnalysisUsage as it breaks the recursion.
}
@@ -255,13 +257,14 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ virtual void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
}
- unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) const {
+ unsigned getOperationCost(unsigned Opcode, Type *Ty,
+ Type *OpTy) const override {
switch (Opcode) {
default:
// By default, just classify everything as 'basic'.
@@ -318,7 +321,7 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
}
unsigned getGEPCost(const Value *Ptr,
- ArrayRef<const Value *> Operands) const {
+ ArrayRef<const Value *> Operands) const override {
// In the basic model, we just assume that all-constant GEPs will be folded
// into their uses via addressing modes.
for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
@@ -328,7 +331,8 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
return TCC_Free;
}
- unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const {
+ unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const override
+ {
assert(FTy && "FunctionType must be provided to this routine.");
// The target-independent implementation just measures the size of the
@@ -343,7 +347,8 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
return TCC_Basic * (NumArgs + 1);
}
- unsigned getCallCost(const Function *F, int NumArgs = -1) const {
+ unsigned getCallCost(const Function *F, int NumArgs = -1) const override
+ {
assert(F && "A concrete function must be provided to this routine.");
if (NumArgs < 0)
@@ -364,7 +369,7 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
}
unsigned getCallCost(const Function *F,
- ArrayRef<const Value *> Arguments) const {
+ ArrayRef<const Value *> Arguments) const override {
// Simply delegate to generic handling of the call.
// FIXME: We should use instsimplify or something else to catch calls which
// will constant fold with these arguments.
@@ -372,7 +377,7 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
- ArrayRef<Type *> ParamTys) const {
+ ArrayRef<Type *> ParamTys) const override {
switch (IID) {
default:
// Intrinsics rarely (if ever) have normal argument setup constraints.
@@ -394,8 +399,9 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
}
}
- unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
- ArrayRef<const Value *> Arguments) const {
+ unsigned
+ getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<const Value *> Arguments) const override {
// Delegate to the generic intrinsic handling code. This mostly provides an
// opportunity for targets to (for example) special case the cost of
// certain intrinsics based on constants used as arguments.
@@ -406,14 +412,14 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
return TopTTI->getIntrinsicCost(IID, RetTy, ParamTys);
}
- unsigned getUserCost(const User *U) const {
+ unsigned getUserCost(const User *U) const override {
if (isa<PHINode>(U))
return TCC_Free; // Model all PHI nodes as free.
- if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U))
- // In the basic model we just assume that all-constant GEPs will be
- // folded into their uses via addressing modes.
- return GEP->hasAllConstantIndices() ? TCC_Free : TCC_Basic;
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+ SmallVector<const Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end());
+ return TopTTI->getGEPCost(GEP->getPointerOperand(), Indices);
+ }
if (ImmutableCallSite CS = U) {
const Function *F = CS.getCalledFunction();
@@ -423,12 +429,7 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
return TopTTI->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
}
- SmallVector<const Value *, 8> Arguments;
- for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(),
- AE = CS.arg_end();
- AI != AE; ++AI)
- Arguments.push_back(*AI);
-
+ SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
return TopTTI->getCallCost(F, Arguments);
}
@@ -443,12 +444,12 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
// Otherwise delegate to the fully generic implementations.
return getOperationCost(Operator::getOpcode(U), U->getType(),
U->getNumOperands() == 1 ?
- U->getOperand(0)->getType() : 0);
+ U->getOperand(0)->getType() : nullptr);
}
- bool hasBranchDivergence() const { return false; }
+ bool hasBranchDivergence() const override { return false; }
- bool isLoweredToCall(const Function *F) const {
+ bool isLoweredToCall(const Function *F) const override {
// FIXME: These should almost certainly not be handled here, and instead
// handled with the help of TLI or the target itself. This was largely
// ported from existing analysis heuristics here so that such refactorings
@@ -479,126 +480,136 @@ struct NoTTI : ImmutablePass, TargetTransformInfo {
return true;
}
- void getUnrollingPreferences(Loop *, UnrollingPreferences &) const { }
+ void getUnrollingPreferences(Loop *, UnrollingPreferences &) const override {
+ }
- bool isLegalAddImmediate(int64_t Imm) const {
+ bool isLegalAddImmediate(int64_t Imm) const override {
return false;
}
- bool isLegalICmpImmediate(int64_t Imm) const {
+ bool isLegalICmpImmediate(int64_t Imm) const override {
return false;
}
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
- bool HasBaseReg, int64_t Scale) const {
+ bool HasBaseReg, int64_t Scale) const override
+ {
// Guess that reg+reg addressing is allowed. This heuristic is taken from
// the implementation of LSR.
return !BaseGV && BaseOffset == 0 && Scale <= 1;
}
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
- bool HasBaseReg, int64_t Scale) const {
+ bool HasBaseReg, int64_t Scale) const override {
// Guess that all legal addressing mode are free.
if(isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale))
return 0;
return -1;
}
-
- bool isTruncateFree(Type *Ty1, Type *Ty2) const {
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
return false;
}
- bool isTypeLegal(Type *Ty) const {
+ bool isTypeLegal(Type *Ty) const override {
return false;
}
- unsigned getJumpBufAlignment() const {
+ unsigned getJumpBufAlignment() const override {
return 0;
}
- unsigned getJumpBufSize() const {
+ unsigned getJumpBufSize() const override {
return 0;
}
- bool shouldBuildLookupTables() const {
+ bool shouldBuildLookupTables() const override {
return true;
}
- PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const {
+ PopcntSupportKind
+ getPopcntSupport(unsigned IntTyWidthInBit) const override {
return PSK_Software;
}
- bool haveFastSqrt(Type *Ty) const {
+ bool haveFastSqrt(Type *Ty) const override {
return false;
}
- unsigned getIntImmCost(const APInt &Imm, Type *Ty) const {
- return 1;
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override {
+ return TCC_Basic;
+ }
+
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override {
+ return TCC_Free;
+ }
+
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override {
+ return TCC_Free;
}
- unsigned getNumberOfRegisters(bool Vector) const {
+ unsigned getNumberOfRegisters(bool Vector) const override {
return 8;
}
- unsigned getRegisterBitWidth(bool Vector) const {
+ unsigned getRegisterBitWidth(bool Vector) const override {
return 32;
}
- unsigned getMaximumUnrollFactor() const {
+ unsigned getMaximumUnrollFactor() const override {
return 1;
}
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
- OperandValueKind) const {
+ OperandValueKind) const override {
return 1;
}
- unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index = 0, Type *SubTp = 0) const {
+ unsigned getShuffleCost(ShuffleKind Kind, Type *Ty,
+ int Index = 0, Type *SubTp = nullptr) const override {
return 1;
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const {
+ Type *Src) const override {
return 1;
}
- unsigned getCFInstrCost(unsigned Opcode) const {
+ unsigned getCFInstrCost(unsigned Opcode) const override {
return 1;
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy = 0) const {
+ Type *CondTy = nullptr) const override {
return 1;
}
unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index = -1) const {
+ unsigned Index = -1) const override {
return 1;
}
- unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
- unsigned Alignment,
- unsigned AddressSpace) const {
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const override {
return 1;
}
- unsigned getIntrinsicInstrCost(Intrinsic::ID ID,
- Type *RetTy,
- ArrayRef<Type*> Tys) const {
+ unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Type*> Tys) const override {
return 1;
}
- unsigned getNumberOfParts(Type *Tp) const {
+ unsigned getNumberOfParts(Type *Tp) const override {
return 0;
}
- unsigned getAddressComputationCost(Type *Tp, bool) const {
+ unsigned getAddressComputationCost(Type *Tp, bool) const override {
return 0;
}
- unsigned getReductionCost(unsigned, Type *, bool) const {
+ unsigned getReductionCost(unsigned, Type *, bool) const override {
return 1;
}
};
diff --git a/contrib/llvm/lib/Analysis/Trace.cpp b/contrib/llvm/lib/Analysis/Trace.cpp
index 4c68322..5a1acc0 100644
--- a/contrib/llvm/lib/Analysis/Trace.cpp
+++ b/contrib/llvm/lib/Analysis/Trace.cpp
@@ -16,7 +16,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Trace.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -37,7 +36,7 @@ void Trace::print(raw_ostream &O) const {
O << "; Trace from function " << F->getName() << ", blocks:\n";
for (const_iterator i = begin(), e = end(); i != e; ++i) {
O << "; ";
- WriteAsOperand(O, *i, true, getModule());
+ (*i)->printAsOperand(O, true, getModule());
O << "\n";
}
O << "; Trace parent function: \n" << *F;
diff --git a/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 6791d4b..f36f6f8 100644
--- a/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -144,7 +144,7 @@ namespace {
const MDNode *Node;
public:
- TBAANode() : Node(0) {}
+ TBAANode() : Node(nullptr) {}
explicit TBAANode(const MDNode *N) : Node(N) {}
/// getNode - Get the MDNode for this TBAANode.
@@ -182,7 +182,6 @@ namespace {
const MDNode *Node;
public:
- TBAAStructTagNode() : Node(0) {}
explicit TBAAStructTagNode(const MDNode *N) : Node(N) {}
/// Get the MDNode for this TBAAStructTagNode.
@@ -218,7 +217,7 @@ namespace {
const MDNode *Node;
public:
- TBAAStructTypeNode() : Node(0) {}
+ TBAAStructTypeNode() : Node(nullptr) {}
explicit TBAAStructTypeNode(const MDNode *N) : Node(N) {}
/// Get the MDNode for this TBAAStructTypeNode.
@@ -281,7 +280,7 @@ namespace {
initializeTypeBasedAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ void initializePass() override {
InitializeAliasAnalysis(this);
}
@@ -289,7 +288,7 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const void *PI) {
+ void *getAdjustedAnalysisPointer(const void *PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
@@ -299,15 +298,15 @@ namespace {
bool PathAliases(const MDNode *A, const MDNode *B) const;
private:
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual AliasResult alias(const Location &LocA, const Location &LocB);
- virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal);
- virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
- virtual ModRefBehavior getModRefBehavior(const Function *F);
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc);
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ AliasResult alias(const Location &LocA, const Location &LocB) override;
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
+ ModRefBehavior getModRefBehavior(const Function *F) override;
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc) override;
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) override;
};
} // End of anonymous namespace
@@ -340,7 +339,8 @@ static bool isStructPathTBAA(const MDNode *MD) {
bool
TypeBasedAliasAnalysis::Aliases(const MDNode *A,
const MDNode *B) const {
- if (isStructPathTBAA(A))
+ // Make sure that both MDNodes are struct-path aware.
+ if (isStructPathTBAA(A) && isStructPathTBAA(B))
return PathAliases(A, B);
// Keep track of the root node for A and B.
@@ -386,6 +386,10 @@ TypeBasedAliasAnalysis::Aliases(const MDNode *A,
bool
TypeBasedAliasAnalysis::PathAliases(const MDNode *A,
const MDNode *B) const {
+ // Verify that both input nodes are struct-path aware.
+ assert(isStructPathTBAA(A) && "MDNode A is not struct-path aware.");
+ assert(isStructPathTBAA(B) && "MDNode B is not struct-path aware.");
+
// Keep track of the root node for A and B.
TBAAStructTypeNode RootA, RootB;
TBAAStructTagNode TagA(A), TagB(B);
@@ -555,38 +559,40 @@ bool MDNode::isTBAAVtableAccess() const {
MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) {
if (!A || !B)
- return NULL;
+ return nullptr;
if (A == B)
return A;
// For struct-path aware TBAA, we use the access type of the tag.
- bool StructPath = isStructPathTBAA(A);
+ bool StructPath = isStructPathTBAA(A) && isStructPathTBAA(B);
if (StructPath) {
A = cast_or_null<MDNode>(A->getOperand(1));
- if (!A) return 0;
+ if (!A) return nullptr;
B = cast_or_null<MDNode>(B->getOperand(1));
- if (!B) return 0;
+ if (!B) return nullptr;
}
SmallVector<MDNode *, 4> PathA;
MDNode *T = A;
while (T) {
PathA.push_back(T);
- T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0;
+ T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1))
+ : nullptr;
}
SmallVector<MDNode *, 4> PathB;
T = B;
while (T) {
PathB.push_back(T);
- T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0;
+ T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1))
+ : nullptr;
}
int IA = PathA.size() - 1;
int IB = PathB.size() - 1;
- MDNode *Ret = 0;
+ MDNode *Ret = nullptr;
while (IA >= 0 && IB >=0) {
if (PathA[IA] == PathB[IB])
Ret = PathA[IA];
@@ -599,7 +605,7 @@ MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) {
return Ret;
if (!Ret)
- return 0;
+ return nullptr;
// We need to convert from a type node to a tag node.
Type *Int64 = IntegerType::get(A->getContext(), 64);
Value *Ops[3] = { Ret, Ret, ConstantInt::get(Int64, 0) };
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index e39ee62..e6d09f4 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -16,8 +16,11 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
@@ -25,10 +28,9 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/PatternMatch.h"
#include <cstring>
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -44,10 +46,10 @@ static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
return TD ? TD->getPointerTypeSizeInBits(Ty) : 0;
}
-static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
- APInt &KnownZero, APInt &KnownOne,
- APInt &KnownZero2, APInt &KnownOne2,
- const DataLayout *TD, unsigned Depth) {
+static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2,
+ const DataLayout *TD, unsigned Depth) {
if (!Add) {
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
// We know that the top bits of C-X are clear if X contains less bits
@@ -58,7 +60,7 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
// NLZ can't be BitWidth with no sign bit
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
- llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
+ llvm::computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
// If all of the MaskV bits are known to be zero, then we know the
// output top bits are zero, because we now know that the output is
@@ -79,13 +81,10 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
// result. For an add, this works with either operand. For a subtract,
// this only works if the known zeros are in the right operand.
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- llvm::ComputeMaskedBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1);
- assert((LHSKnownZero & LHSKnownOne) == 0 &&
- "Bits known to be one AND zero?");
+ llvm::computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1);
unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
- llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ llvm::computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
// Determine which operand has more trailing zeros, and use that
@@ -130,15 +129,13 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
}
}
-static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
- APInt &KnownZero, APInt &KnownOne,
- APInt &KnownZero2, APInt &KnownOne2,
- const DataLayout *TD, unsigned Depth) {
+static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2,
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = KnownZero.getBitWidth();
- ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op1, KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
bool isKnownNegative = false;
bool isKnownNonNegative = false;
@@ -192,7 +189,8 @@ static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
KnownOne.setBit(BitWidth - 1);
}
-void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
+void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
+ APInt &KnownZero) {
unsigned BitWidth = KnownZero.getBitWidth();
unsigned NumRanges = Ranges.getNumOperands() / 2;
assert(NumRanges >= 1);
@@ -211,8 +209,9 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
}
-/// ComputeMaskedBits - Determine which of the bits are known to be either zero
-/// or one and return them in the KnownZero/KnownOne bit sets.
+
+/// Determine which bits of V are known to be either zero or one and return
+/// them in the KnownZero/KnownOne bit sets.
///
/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
/// we cannot optimize based on the assumption that it is zero without changing
@@ -226,8 +225,8 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
/// where V is a vector, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
-void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const DataLayout *TD, unsigned Depth) {
+void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ const DataLayout *TD, unsigned Depth) {
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = KnownZero.getBitWidth();
@@ -241,7 +240,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
V->getType()->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
- "V, Mask, KnownOne and KnownZero should have same BitWidth");
+ "V, KnownOne and KnownZero should have same BitWidth");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
// We know all of the bits for a constant!
@@ -303,7 +302,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
if (GA->mayBeOverridden()) {
KnownZero.clearAllBits(); KnownOne.clearAllBits();
} else {
- ComputeMaskedBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1);
}
return;
}
@@ -311,8 +310,9 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
if (Argument *A = dyn_cast<Argument>(V)) {
unsigned Align = 0;
- if (A->hasByValAttr()) {
- // Get alignment information off byval arguments if specified in the IR.
+ if (A->hasByValOrInAllocaAttr()) {
+ // Get alignment information off byval/inalloca arguments if specified in
+ // the IR.
Align = A->getParamAlignment();
} else if (TD && A->hasStructRetAttr()) {
// An sret parameter has at least the ABI alignment of the return type.
@@ -340,49 +340,43 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
default: break;
case Instruction::Load:
if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
- computeMaskedBitsLoad(*MD, KnownZero);
- return;
+ computeKnownBitsFromRangeMetadata(*MD, KnownZero);
+ break;
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
// Output known-1 bits are only known if set in both the LHS & RHS.
KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
KnownZero |= KnownZero2;
- return;
+ break;
}
case Instruction::Or: {
- ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
// Output known-0 bits are only known if clear in both the LHS & RHS.
KnownZero &= KnownZero2;
// Output known-1 are known to be set if set in either the LHS | RHS.
KnownOne |= KnownOne2;
- return;
+ break;
}
case Instruction::Xor: {
- ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
// Output known-0 bits are known if clear or set in both the LHS & RHS.
APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
KnownZero = KnownZeroOut;
- return;
+ break;
}
case Instruction::Mul: {
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
- ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW,
+ computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW,
KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth);
break;
}
@@ -390,42 +384,41 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
- ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
KnownOne2.clearAllBits();
KnownZero2.clearAllBits();
- ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
+ computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
- return;
+ break;
}
case Instruction::Select:
- ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
+ computeKnownBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
- return;
+ break;
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::SIToFP:
case Instruction::UIToFP:
- return; // Can't work with floating point.
+ break; // Can't work with floating point.
case Instruction::PtrToInt:
case Instruction::IntToPtr:
+ case Instruction::AddrSpaceCast: // Pointers could be different sizes.
// We can't handle these if we don't know the pointer size.
- if (!TD) return;
+ if (!TD) break;
// FALL THROUGH and handle them the same as zext/trunc.
case Instruction::ZExt:
case Instruction::Trunc: {
@@ -438,19 +431,19 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
} else {
SrcBitWidth = SrcTy->getScalarSizeInBits();
- if (!SrcBitWidth) return;
+ if (!SrcBitWidth) break;
}
assert(SrcBitWidth && "SrcBitWidth can't be zero");
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
KnownZero = KnownZero.zextOrTrunc(BitWidth);
KnownOne = KnownOne.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
if (BitWidth > SrcBitWidth)
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
- return;
+ break;
}
case Instruction::BitCast: {
Type *SrcTy = I->getOperand(0)->getType();
@@ -458,8 +451,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
!I->getType()->isVectorTy()) {
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- return;
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
+ break;
}
break;
}
@@ -469,8 +462,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
KnownZero = KnownZero.trunc(SrcBitWidth);
KnownOne = KnownOne.trunc(SrcBitWidth);
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
@@ -480,18 +472,17 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
- return;
+ break;
}
case Instruction::Shl:
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
KnownZero <<= ShiftAmt;
KnownOne <<= ShiftAmt;
KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
- return;
+ break;
}
break;
case Instruction::LShr:
@@ -501,13 +492,12 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
// Unsigned shift right.
- ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
// high bits known zero.
KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
- return;
+ break;
}
break;
case Instruction::AShr:
@@ -517,8 +507,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Signed shift right.
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
@@ -527,19 +516,19 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
KnownZero |= HighBits;
else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
KnownOne |= HighBits;
- return;
+ break;
}
break;
case Instruction::Sub: {
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
- ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
+ computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
Depth);
break;
}
case Instruction::Add: {
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
- ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
+ computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
Depth);
break;
@@ -549,7 +538,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
APInt RA = Rem->getValue().abs();
if (RA.isPowerOf2()) {
APInt LowBits = RA - 1;
- ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
// The low bits of the first operand are unchanged by the srem.
KnownZero = KnownZero2 & LowBits;
@@ -573,8 +562,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// remainder is zero.
if (KnownZero.isNonNegative()) {
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD,
- Depth+1);
+ computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD,
+ Depth+1);
// If it's known zero, our sign bit is also zero.
if (LHSKnownZero.isNegative())
KnownZero.setBit(BitWidth - 1);
@@ -586,9 +575,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
APInt RA = Rem->getValue();
if (RA.isPowerOf2()) {
APInt LowBits = (RA - 1);
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD,
- Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD,
+ Depth+1);
KnownZero |= ~LowBits;
KnownOne &= LowBits;
break;
@@ -597,8 +585,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// Since the result is less than or equal to either operand, any leading
// zero bits in either operand must also exist in the result.
- ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
+ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
@@ -621,8 +609,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// Analyze all of the subscripts of this getelementptr instruction
// to determine if we can prove known low zero bits.
APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
- ComputeMaskedBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD,
- Depth+1);
+ computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD,
+ Depth+1);
unsigned TrailZ = LocalKnownZero.countTrailingOnes();
gep_type_iterator GTI = gep_type_begin(I);
@@ -630,8 +618,10 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
Value *Index = I->getOperand(i);
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
// Handle struct member offset arithmetic.
- if (!TD)
- return;
+ if (!TD) {
+ TrailZ = 0;
+ break;
+ }
// Handle case when index is vector zeroinitializer
Constant *CIndex = cast<Constant>(Index);
@@ -649,11 +639,14 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
} else {
// Handle array index arithmetic.
Type *IndexedTy = GTI.getIndexedType();
- if (!IndexedTy->isSized()) return;
+ if (!IndexedTy->isSized()) {
+ TrailZ = 0;
+ break;
+ }
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
- ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
+ computeKnownBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
TrailZ = std::min(TrailZ,
unsigned(countTrailingZeros(TypeSize) +
LocalKnownZero.countTrailingOnes()));
@@ -695,11 +688,11 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
break;
// Ok, we have a PHI of the form L op= R. Check for low
// zero bits.
- ComputeMaskedBits(R, KnownZero2, KnownOne2, TD, Depth+1);
+ computeKnownBits(R, KnownZero2, KnownOne2, TD, Depth+1);
// We need to take the minimum number of known bits
APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
- ComputeMaskedBits(L, KnownZero3, KnownOne3, TD, Depth+1);
+ computeKnownBits(L, KnownZero3, KnownOne3, TD, Depth+1);
KnownZero = APInt::getLowBitsSet(BitWidth,
std::min(KnownZero2.countTrailingOnes(),
@@ -711,7 +704,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// Unreachable blocks may have zero-operand PHI nodes.
if (P->getNumIncomingValues() == 0)
- return;
+ break;
// Otherwise take the unions of the known bit sets of the operands,
// taking conservative care to avoid excessive recursion.
@@ -730,8 +723,8 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
KnownOne2 = APInt(BitWidth, 0);
// Recurse, but cap the recursion to one level, because we don't
// want to waste time spinning around in loops.
- ComputeMaskedBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD,
- MaxDepth-1);
+ computeKnownBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD,
+ MaxDepth-1);
KnownZero &= KnownZero2;
KnownOne &= KnownOne2;
// If all bits have been ruled out, there's no need to check
@@ -743,6 +736,12 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
break;
}
case Instruction::Call:
+ case Instruction::Invoke:
+ if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
+ computeKnownBitsFromRangeMetadata(*MD, KnownZero);
+ // If a range metadata is attached to this IntrinsicInst, intersect the
+ // explicit range specified by the metadata and the implicit range of
+ // the intrinsic.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
@@ -752,16 +751,16 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
// If this call is undefined for 0, the result will be less than 2^n.
if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
LowBits -= 1;
- KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
case Intrinsic::ctpop: {
unsigned LowBits = Log2_32(BitWidth)+1;
- KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
case Intrinsic::x86_sse42_crc32_64_64:
- KnownZero = APInt::getHighBitsSet(64, 32);
+ KnownZero |= APInt::getHighBitsSet(64, 32);
break;
}
}
@@ -775,30 +774,32 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
default: break;
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
- ComputeMaskedBitsAddSub(true, II->getArgOperand(0),
- II->getArgOperand(1), false, KnownZero,
- KnownOne, KnownZero2, KnownOne2, TD, Depth);
+ computeKnownBitsAddSub(true, II->getArgOperand(0),
+ II->getArgOperand(1), false, KnownZero,
+ KnownOne, KnownZero2, KnownOne2, TD, Depth);
break;
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
- ComputeMaskedBitsAddSub(false, II->getArgOperand(0),
- II->getArgOperand(1), false, KnownZero,
- KnownOne, KnownZero2, KnownOne2, TD, Depth);
+ computeKnownBitsAddSub(false, II->getArgOperand(0),
+ II->getArgOperand(1), false, KnownZero,
+ KnownOne, KnownZero2, KnownOne2, TD, Depth);
break;
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
- ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1),
- false, KnownZero, KnownOne,
- KnownZero2, KnownOne2, TD, Depth);
+ computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1),
+ false, KnownZero, KnownOne,
+ KnownZero2, KnownOne2, TD, Depth);
break;
}
}
}
}
+
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
-/// one. Convenience wrapper around ComputeMaskedBits.
+/// one. Convenience wrapper around computeKnownBits.
void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType(), TD);
@@ -809,7 +810,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
}
APInt ZeroBits(BitWidth, 0);
APInt OneBits(BitWidth, 0);
- ComputeMaskedBits(V, ZeroBits, OneBits, TD, Depth);
+ computeKnownBits(V, ZeroBits, OneBits, TD, Depth);
KnownOne = OneBits[BitWidth - 1];
KnownZero = ZeroBits[BitWidth - 1];
}
@@ -841,7 +842,7 @@ bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) {
if (Depth++ == MaxDepth)
return false;
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
// A shift of a power of two is a power of two or zero.
if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
match(V, m_Shr(m_Value(X), m_Value()))))
@@ -881,10 +882,10 @@ bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) {
unsigned BitWidth = V->getType()->getScalarSizeInBits();
APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
- ComputeMaskedBits(X, LHSZeroBits, LHSOneBits, 0, Depth);
+ computeKnownBits(X, LHSZeroBits, LHSOneBits, nullptr, Depth);
APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
- ComputeMaskedBits(Y, RHSZeroBits, RHSOneBits, 0, Depth);
+ computeKnownBits(Y, RHSZeroBits, RHSOneBits, nullptr, Depth);
// If i8 V is a power of two or zero:
// ZeroBits: 1 1 1 0 1 1 1 1
// ~ZeroBits: 0 0 0 1 0 0 0 0
@@ -1004,7 +1005,7 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), TD);
// X | Y != 0 if X != 0 or Y != 0.
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (match(V, m_Or(m_Value(X), m_Value(Y))))
return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth);
@@ -1022,7 +1023,7 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth);
+ computeKnownBits(X, KnownZero, KnownOne, TD, Depth);
if (KnownOne[0])
return true;
}
@@ -1064,12 +1065,12 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
APInt Mask = APInt::getSignedMaxValue(BitWidth);
// The sign bit of X is set. If some other bit is set then X is not equal
// to INT_MIN.
- ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth);
+ computeKnownBits(X, KnownZero, KnownOne, TD, Depth);
if ((KnownOne & Mask) != 0)
return true;
// The sign bit of Y is set. If some other bit is set then Y is not equal
// to INT_MIN.
- ComputeMaskedBits(Y, KnownZero, KnownOne, TD, Depth);
+ computeKnownBits(Y, KnownZero, KnownOne, TD, Depth);
if ((KnownOne & Mask) != 0)
return true;
}
@@ -1099,7 +1100,7 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
if (!BitWidth) return false;
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
+ computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
return KnownOne != 0;
}
@@ -1115,8 +1116,7 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
const DataLayout *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
- ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
return (KnownZero & Mask) == Mask;
}
@@ -1141,7 +1141,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
unsigned Tmp, Tmp2;
unsigned FirstAnswer = 1;
- // Note that ConstantInt is handled by the general ComputeMaskedBits case
+ // Note that ConstantInt is handled by the general computeKnownBits case
// below.
if (Depth == 6)
@@ -1186,7 +1186,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
FirstAnswer = std::min(Tmp, Tmp2);
// We computed what we know about the sign bits as our first
// answer. Now proceed to the generic code that uses
- // ComputeMaskedBits, and pick whichever answer is better.
+ // computeKnownBits, and pick whichever answer is better.
}
break;
@@ -1206,7 +1206,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
if (CRHS->isAllOnesValue()) {
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
- ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
@@ -1231,7 +1231,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
if (CLHS->isNullValue()) {
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
- ComputeMaskedBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ computeKnownBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
@@ -1277,7 +1277,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
// use this information.
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
APInt Mask;
- ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
+ computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
if (KnownZero.isNegative()) { // sign bit is 0
Mask = KnownZero;
@@ -1363,7 +1363,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
Op1 = ConstantInt::get(V->getContext(), API);
}
- Value *Mul0 = NULL;
+ Value *Mul0 = nullptr;
if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
if (Constant *Op1C = dyn_cast<Constant>(Op1))
if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
@@ -1387,7 +1387,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
}
}
- Value *Mul1 = NULL;
+ Value *Mul1 = nullptr;
if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
if (Constant *Op0C = dyn_cast<Constant>(Op0))
if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
@@ -1431,7 +1431,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
return 1; // Limit search depth.
const Operator *I = dyn_cast<Operator>(V);
- if (I == 0) return false;
+ if (!I) return false;
// Check if the nsz fast-math flag is set
if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
@@ -1512,7 +1512,7 @@ Value *llvm::isBytewiseValue(Value *V) {
// If the top/bottom halves aren't the same, reject it.
if (Val != Val2)
- return 0;
+ return nullptr;
}
return ConstantInt::get(V->getContext(), Val);
}
@@ -1524,11 +1524,11 @@ Value *llvm::isBytewiseValue(Value *V) {
Value *Elt = CA->getElementAsConstant(0);
Value *Val = isBytewiseValue(Elt);
if (!Val)
- return 0;
+ return nullptr;
for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
if (CA->getElementAsConstant(I) != Elt)
- return 0;
+ return nullptr;
return Val;
}
@@ -1539,7 +1539,7 @@ Value *llvm::isBytewiseValue(Value *V) {
// %c = or i16 %a, %b
// but until there is an example that actually needs this, it doesn't seem
// worth worrying about.
- return 0;
+ return nullptr;
}
@@ -1589,7 +1589,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
Value *V = FindInsertedValue(From, Idxs);
if (!V)
- return NULL;
+ return nullptr;
// Insert the value in the new (sub) aggregrate
return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
@@ -1640,7 +1640,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
if (Constant *C = dyn_cast<Constant>(V)) {
C = C->getAggregateElement(idx_range[0]);
- if (C == 0) return 0;
+ if (!C) return nullptr;
return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
}
@@ -1653,7 +1653,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
if (req_idx == idx_range.end()) {
// We can't handle this without inserting insertvalues
if (!InsertBefore)
- return 0;
+ return nullptr;
// The requested index identifies a part of a nested aggregate. Handle
// this specially. For example,
@@ -1707,7 +1707,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
}
// Otherwise, we don't know (such as, extracting from a function return value
// or load instruction)
- return 0;
+ return nullptr;
}
/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
@@ -1733,7 +1733,8 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
}
Ptr = GEP->getPointerOperand();
- } else if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
+ } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
+ Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
Ptr = cast<Operator>(Ptr)->getOperand(0);
} else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
if (GA->mayBeOverridden())
@@ -1768,13 +1769,13 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
// Make sure the index-ee is a pointer to array of i8.
PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
- if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
+ if (!AT || !AT->getElementType()->isIntegerTy(8))
return false;
// Check to make sure that the first operand of the GEP is an integer and
// has value 0 so that we are sure we're indexing into the initializer.
const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
- if (FirstIdx == 0 || !FirstIdx->isZero())
+ if (!FirstIdx || !FirstIdx->isZero())
return false;
// If the second index isn't a ConstantInt, then this is a variable index
@@ -1806,7 +1807,7 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
// Must be a Constant Array
const ConstantDataArray *Array =
dyn_cast<ConstantDataArray>(GV->getInitializer());
- if (Array == 0 || !Array->isString())
+ if (!Array || !Array->isString())
return false;
// Get the number of elements in the array
@@ -1902,7 +1903,8 @@ llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
V = GEP->getPointerOperand();
- } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ } else if (Operator::getOpcode(V) == Instruction::BitCast ||
+ Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
V = cast<Operator>(V)->getOperand(0);
} else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
if (GA->mayBeOverridden())
@@ -1912,7 +1914,7 @@ llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
// See if InstructionSimplify knows any relevant tricks.
if (Instruction *I = dyn_cast<Instruction>(V))
// TODO: Acquire a DominatorTree and use it.
- if (Value *Simplified = SimplifyInstruction(I, TD, 0)) {
+ if (Value *Simplified = SimplifyInstruction(I, TD, nullptr)) {
V = Simplified;
continue;
}
@@ -1959,9 +1961,8 @@ llvm::GetUnderlyingObjects(Value *V,
/// are lifetime markers.
///
bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI);
+ for (const User *U : V->users()) {
+ const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
if (!II) return false;
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
@@ -1987,7 +1988,7 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
return true;
case Instruction::UDiv:
case Instruction::URem:
- // x / y is undefined if y == 0, but calcuations like x / 3 are safe.
+ // x / y is undefined if y == 0, but calculations like x / 3 are safe.
return isKnownNonZero(Inst->getOperand(1), TD);
case Instruction::SDiv:
case Instruction::SRem: {
@@ -2001,19 +2002,21 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
return false;
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(Op, KnownZero, KnownOne, TD);
+ computeKnownBits(Op, KnownZero, KnownOne, TD);
return !!KnownZero;
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(Inst);
- if (!LI->isUnordered())
+ if (!LI->isUnordered() ||
+ // Speculative load may create a race that did not exist in the source.
+ LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return false;
- return LI->getPointerOperand()->isDereferenceablePointer();
+ return LI->getPointerOperand()->isDereferenceablePointer(TD);
}
case Instruction::Call: {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
- // These synthetic intrinsics have no side-effects, and just mark
+ // These synthetic intrinsics have no side-effects and just mark
// information about their operands.
// FIXME: There are other no-op synthetic instructions that potentially
// should be considered at least *safe* to speculate...
@@ -2033,6 +2036,12 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
case Intrinsic::umul_with_overflow:
case Intrinsic::usub_with_overflow:
return true;
+ // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
+ // errno like libm sqrt would.
+ case Intrinsic::sqrt:
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd:
+ return true;
// TODO: some fp intrinsics are marked as having the same error handling
// as libm. They're safe to speculate when they won't error.
// TODO: are convert_{from,to}_fp16 safe?
@@ -2068,14 +2077,18 @@ bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
// Alloca never returns null, malloc might.
if (isa<AllocaInst>(V)) return true;
- // A byval argument is never null.
+ // A byval, inalloca, or nonnull argument is never null.
if (const Argument *A = dyn_cast<Argument>(V))
- return A->hasByValAttr();
+ return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
// Global values are not null unless extern weak.
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
return !GV->hasExternalWeakLinkage();
+ if (ImmutableCallSite CS = V)
+ if (CS.isReturnNonNull())
+ return true;
+
// operator new never returns null.
if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
return true;
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.cpp b/contrib/llvm/lib/AsmParser/LLLexer.cpp
index 1e6085b..962298f 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.cpp
+++ b/contrib/llvm/lib/AsmParser/LLLexer.cpp
@@ -14,7 +14,7 @@
#include "LLLexer.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Assembly/Parser.h"
+#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
@@ -34,6 +34,10 @@ bool LLLexer::Error(LocTy ErrorLoc, const Twine &Msg) const {
return true;
}
+void LLLexer::Warning(LocTy WarningLoc, const Twine &Msg) const {
+ SM.PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg);
+}
+
//===----------------------------------------------------------------------===//
// Helper functions.
//===----------------------------------------------------------------------===//
@@ -146,7 +150,7 @@ static bool isLabelChar(char C) {
static const char *isLabelTail(const char *CurPtr) {
while (1) {
if (CurPtr[0] == ':') return CurPtr+1;
- if (!isLabelChar(CurPtr[0])) return 0;
+ if (!isLabelChar(CurPtr[0])) return nullptr;
++CurPtr;
}
}
@@ -205,6 +209,7 @@ lltok::Kind LLLexer::LexToken() {
return LexToken();
case '+': return LexPositive();
case '@': return LexAt();
+ case '$': return LexDollar();
case '%': return LexPercent();
case '"': return LexQuote();
case '.':
@@ -218,13 +223,6 @@ lltok::Kind LLLexer::LexToken() {
return lltok::dotdotdot;
}
return lltok::Error;
- case '$':
- if (const char *Ptr = isLabelTail(CurPtr)) {
- CurPtr = Ptr;
- StrVal.assign(TokStart, CurPtr-1);
- return lltok::LabelStr;
- }
- return lltok::Error;
case ';':
SkipLineComment();
return LexToken();
@@ -275,6 +273,10 @@ lltok::Kind LLLexer::LexAt() {
if (CurChar == '"') {
StrVal.assign(TokStart+2, CurPtr-1);
UnEscapeLexed(StrVal);
+ if (StringRef(StrVal).find_first_of(0) != StringRef::npos) {
+ Error("Null bytes are not allowed in names");
+ return lltok::Error;
+ }
return lltok::GlobalVar;
}
}
@@ -299,6 +301,43 @@ lltok::Kind LLLexer::LexAt() {
return lltok::Error;
}
+lltok::Kind LLLexer::LexDollar() {
+ if (const char *Ptr = isLabelTail(TokStart)) {
+ CurPtr = Ptr;
+ StrVal.assign(TokStart, CurPtr - 1);
+ return lltok::LabelStr;
+ }
+
+ // Handle DollarStringConstant: $\"[^\"]*\"
+ if (CurPtr[0] == '"') {
+ ++CurPtr;
+
+ while (1) {
+ int CurChar = getNextChar();
+
+ if (CurChar == EOF) {
+ Error("end of file in COMDAT variable name");
+ return lltok::Error;
+ }
+ if (CurChar == '"') {
+ StrVal.assign(TokStart + 2, CurPtr - 1);
+ UnEscapeLexed(StrVal);
+ if (StringRef(StrVal).find_first_of(0) != StringRef::npos) {
+ Error("Null bytes are not allowed in names");
+ return lltok::Error;
+ }
+ return lltok::ComdatVar;
+ }
+ }
+ }
+
+ // Handle ComdatVarName: $[-a-zA-Z$._][-a-zA-Z$._0-9]*
+ if (ReadVarName())
+ return lltok::ComdatVar;
+
+ return lltok::Error;
+}
+
/// ReadString - Read a string until the closing quote.
lltok::Kind LLLexer::ReadString(lltok::Kind kind) {
const char *Start = CurPtr;
@@ -427,8 +466,8 @@ lltok::Kind LLLexer::LexHash() {
/// HexIntConstant [us]0x[0-9A-Fa-f]+
lltok::Kind LLLexer::LexIdentifier() {
const char *StartChar = CurPtr;
- const char *IntEnd = CurPtr[-1] == 'i' ? 0 : StartChar;
- const char *KeywordEnd = 0;
+ const char *IntEnd = CurPtr[-1] == 'i' ? nullptr : StartChar;
+ const char *KeywordEnd = nullptr;
for (; isLabelChar(*CurPtr); ++CurPtr) {
// If we decide this is an integer, remember the end of the sequence.
@@ -447,7 +486,7 @@ lltok::Kind LLLexer::LexIdentifier() {
// Otherwise, this wasn't a label. If this was valid as an integer type,
// return it.
- if (IntEnd == 0) IntEnd = CurPtr;
+ if (!IntEnd) IntEnd = CurPtr;
if (IntEnd != StartChar) {
CurPtr = IntEnd;
uint64_t NumBits = atoull(StartChar, CurPtr);
@@ -461,7 +500,7 @@ lltok::Kind LLLexer::LexIdentifier() {
}
// Otherwise, this was a letter sequence. See which keyword this is.
- if (KeywordEnd == 0) KeywordEnd = CurPtr;
+ if (!KeywordEnd) KeywordEnd = CurPtr;
CurPtr = KeywordEnd;
--StartChar;
unsigned Len = CurPtr-StartChar;
@@ -476,13 +515,13 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(global); KEYWORD(constant);
KEYWORD(private);
- KEYWORD(linker_private);
- KEYWORD(linker_private_weak);
KEYWORD(internal);
+ KEYWORD(linker_private); // NOTE: deprecated, for parser compatibility
+ KEYWORD(linker_private_weak); // NOTE: deprecated, for parser compatibility
KEYWORD(available_externally);
KEYWORD(linkonce);
KEYWORD(linkonce_odr);
- KEYWORD(weak);
+ KEYWORD(weak); // Use as a linkage, and a modifier for "cmpxchg".
KEYWORD(weak_odr);
KEYWORD(appending);
KEYWORD(dllimport);
@@ -504,6 +543,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(null);
KEYWORD(to);
KEYWORD(tail);
+ KEYWORD(musttail);
KEYWORD(target);
KEYWORD(triple);
KEYWORD(unwind);
@@ -559,6 +599,8 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(x86_64_win64cc);
KEYWORD(webkit_jscc);
KEYWORD(anyregcc);
+ KEYWORD(preserve_mostcc);
+ KEYWORD(preserve_allcc);
KEYWORD(cc);
KEYWORD(c);
@@ -568,9 +610,12 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(alwaysinline);
KEYWORD(builtin);
KEYWORD(byval);
+ KEYWORD(inalloca);
KEYWORD(cold);
+ KEYWORD(dereferenceable);
KEYWORD(inlinehint);
KEYWORD(inreg);
+ KEYWORD(jumptable);
KEYWORD(minsize);
KEYWORD(naked);
KEYWORD(nest);
@@ -581,6 +626,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(noimplicitfloat);
KEYWORD(noinline);
KEYWORD(nonlazybind);
+ KEYWORD(nonnull);
KEYWORD(noredzone);
KEYWORD(noreturn);
KEYWORD(nounwind);
@@ -604,6 +650,15 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(type);
KEYWORD(opaque);
+ KEYWORD(comdat);
+
+ // Comdat types
+ KEYWORD(any);
+ KEYWORD(exactmatch);
+ KEYWORD(largest);
+ KEYWORD(noduplicates);
+ KEYWORD(samesize);
+
KEYWORD(eq); KEYWORD(ne); KEYWORD(slt); KEYWORD(sgt); KEYWORD(sle);
KEYWORD(sge); KEYWORD(ult); KEYWORD(ugt); KEYWORD(ule); KEYWORD(uge);
KEYWORD(oeq); KEYWORD(one); KEYWORD(olt); KEYWORD(ogt); KEYWORD(ole);
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.h b/contrib/llvm/lib/AsmParser/LLLexer.h
index 85703c7..d42de57 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.h
+++ b/contrib/llvm/lib/AsmParser/LLLexer.h
@@ -63,6 +63,10 @@ namespace llvm {
bool Error(LocTy L, const Twine &Msg) const;
bool Error(const Twine &Msg) const { return Error(getLoc(), Msg); }
+
+ void Warning(LocTy WarningLoc, const Twine &Msg) const;
+ void Warning(const Twine &Msg) const { return Warning(getLoc(), Msg); }
+
std::string getFilename() const;
private:
@@ -77,6 +81,7 @@ namespace llvm {
lltok::Kind LexDigitOrNegative();
lltok::Kind LexPositive();
lltok::Kind LexAt();
+ lltok::Kind LexDollar();
lltok::Kind LexExclaim();
lltok::Kind LexPercent();
lltok::Kind LexQuote();
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index 3b903cd..ac6e0e5 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -13,7 +13,7 @@
#include "LLParser.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/AutoUpgrade.h"
+#include "llvm/IR/AutoUpgrade.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
@@ -57,7 +57,8 @@ bool LLParser::ValidateEndOfModule() {
for (unsigned i = 0, e = MDList.size(); i != e; ++i) {
unsigned SlotNo = MDList[i].MDSlot;
- if (SlotNo >= NumberedMetadata.size() || NumberedMetadata[SlotNo] == 0)
+ if (SlotNo >= NumberedMetadata.size() ||
+ NumberedMetadata[SlotNo] == nullptr)
return Error(MDList[i].Loc, "use of undefined metadata '!" +
Twine(SlotNo) + "'");
Inst->setMetadata(MDList[i].MDKind, NumberedMetadata[SlotNo]);
@@ -132,20 +133,20 @@ bool LLParser::ValidateEndOfModule() {
// references after the function was defined. Resolve those now.
while (!ForwardRefBlockAddresses.empty()) {
// Okay, we are referencing an already-parsed function, resolve them now.
- Function *TheFn = 0;
+ Function *TheFn = nullptr;
const ValID &Fn = ForwardRefBlockAddresses.begin()->first;
if (Fn.Kind == ValID::t_GlobalName)
TheFn = M->getFunction(Fn.StrVal);
else if (Fn.UIntVal < NumberedVals.size())
TheFn = dyn_cast<Function>(NumberedVals[Fn.UIntVal]);
- if (TheFn == 0)
+ if (!TheFn)
return Error(Fn.Loc, "unknown function referenced by blockaddress");
// Resolve all these references.
if (ResolveForwardRefBlockAddresses(TheFn,
ForwardRefBlockAddresses.begin()->second,
- 0))
+ nullptr))
return true;
ForwardRefBlockAddresses.erase(ForwardRefBlockAddresses.begin());
@@ -162,6 +163,11 @@ bool LLParser::ValidateEndOfModule() {
return Error(I->second.second,
"use of undefined type named '" + I->getKey() + "'");
+ if (!ForwardRefComdats.empty())
+ return Error(ForwardRefComdats.begin()->second,
+ "use of undefined comdat '$" +
+ ForwardRefComdats.begin()->first + "'");
+
if (!ForwardRefVals.empty())
return Error(ForwardRefVals.begin()->second.second,
"use of undefined value '@" + ForwardRefVals.begin()->first +
@@ -206,7 +212,7 @@ bool LLParser::ResolveForwardRefBlockAddresses(Function *TheFn,
TheFn->getValueSymbolTable().lookup(Refs[i].first.StrVal));
}
- if (Res == 0)
+ if (!Res)
return Error(Refs[i].first.Loc,
"referenced value is not a basic block");
@@ -237,52 +243,51 @@ bool LLParser::ParseTopLevelEntities() {
case lltok::LocalVar: if (ParseNamedType()) return true; break;
case lltok::GlobalID: if (ParseUnnamedGlobal()) return true; break;
case lltok::GlobalVar: if (ParseNamedGlobal()) return true; break;
+ case lltok::ComdatVar: if (parseComdat()) return true; break;
case lltok::exclaim: if (ParseStandaloneMetadata()) return true; break;
case lltok::MetadataVar:if (ParseNamedMetadata()) return true; break;
// The Global variable production with no name can have many different
// optional leading prefixes, the production is:
- // GlobalVar ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
- // OptionalAddrSpace OptionalUnNammedAddr
+ // GlobalVar ::= OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+ // OptionalThreadLocal OptionalAddrSpace OptionalUnNammedAddr
// ('constant'|'global') ...
case lltok::kw_private: // OptionalLinkage
- case lltok::kw_linker_private: // OptionalLinkage
- case lltok::kw_linker_private_weak: // OptionalLinkage
case lltok::kw_internal: // OptionalLinkage
+ case lltok::kw_linker_private: // Obsolete OptionalLinkage
+ case lltok::kw_linker_private_weak: // Obsolete OptionalLinkage
case lltok::kw_weak: // OptionalLinkage
case lltok::kw_weak_odr: // OptionalLinkage
case lltok::kw_linkonce: // OptionalLinkage
case lltok::kw_linkonce_odr: // OptionalLinkage
case lltok::kw_appending: // OptionalLinkage
- case lltok::kw_dllexport: // OptionalLinkage
case lltok::kw_common: // OptionalLinkage
- case lltok::kw_dllimport: // OptionalLinkage
case lltok::kw_extern_weak: // OptionalLinkage
- case lltok::kw_external: { // OptionalLinkage
- unsigned Linkage, Visibility;
- if (ParseOptionalLinkage(Linkage) ||
+ case lltok::kw_external: // OptionalLinkage
+ case lltok::kw_default: // OptionalVisibility
+ case lltok::kw_hidden: // OptionalVisibility
+ case lltok::kw_protected: // OptionalVisibility
+ case lltok::kw_dllimport: // OptionalDLLStorageClass
+ case lltok::kw_dllexport: // OptionalDLLStorageClass
+ case lltok::kw_thread_local: // OptionalThreadLocal
+ case lltok::kw_addrspace: // OptionalAddrSpace
+ case lltok::kw_constant: // GlobalType
+ case lltok::kw_global: { // GlobalType
+ unsigned Linkage, Visibility, DLLStorageClass;
+ bool UnnamedAddr;
+ GlobalVariable::ThreadLocalMode TLM;
+ bool HasLinkage;
+ if (ParseOptionalLinkage(Linkage, HasLinkage) ||
ParseOptionalVisibility(Visibility) ||
- ParseGlobal("", SMLoc(), Linkage, true, Visibility))
- return true;
- break;
- }
- case lltok::kw_default: // OptionalVisibility
- case lltok::kw_hidden: // OptionalVisibility
- case lltok::kw_protected: { // OptionalVisibility
- unsigned Visibility;
- if (ParseOptionalVisibility(Visibility) ||
- ParseGlobal("", SMLoc(), 0, false, Visibility))
+ ParseOptionalDLLStorageClass(DLLStorageClass) ||
+ ParseOptionalThreadLocal(TLM) ||
+ parseOptionalUnnamedAddr(UnnamedAddr) ||
+ ParseGlobal("", SMLoc(), Linkage, HasLinkage, Visibility,
+ DLLStorageClass, TLM, UnnamedAddr))
return true;
break;
}
- case lltok::kw_thread_local: // OptionalThreadLocal
- case lltok::kw_addrspace: // OptionalAddrSpace
- case lltok::kw_constant: // GlobalType
- case lltok::kw_global: // GlobalType
- if (ParseGlobal("", SMLoc(), 0, false, 0)) return true;
- break;
-
case lltok::kw_attributes: if (ParseUnnamedAttrGrp()) return true; break;
}
}
@@ -364,7 +369,7 @@ bool LLParser::ParseUnnamedType() {
if (TypeID >= NumberedTypes.size())
NumberedTypes.resize(TypeID+1);
- Type *Result = 0;
+ Type *Result = nullptr;
if (ParseStructDefinition(TypeLoc, "",
NumberedTypes[TypeID], Result)) return true;
@@ -391,7 +396,7 @@ bool LLParser::ParseNamedType() {
ParseToken(lltok::kw_type, "expected 'type' after name"))
return true;
- Type *Result = 0;
+ Type *Result = nullptr;
if (ParseStructDefinition(NameLoc, Name,
NamedTypes[Name], Result)) return true;
@@ -446,9 +451,11 @@ bool LLParser::ParseGlobalType(bool &IsConstant) {
/// ParseUnnamedGlobal:
/// OptionalVisibility ALIAS ...
-/// OptionalLinkage OptionalVisibility ... -> global variable
+/// OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+/// ... -> global variable
/// GlobalID '=' OptionalVisibility ALIAS ...
-/// GlobalID '=' OptionalLinkage OptionalVisibility ... -> global variable
+/// GlobalID '=' OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+/// ... -> global variable
bool LLParser::ParseUnnamedGlobal() {
unsigned VarID = NumberedVals.size();
std::string Name;
@@ -466,19 +473,27 @@ bool LLParser::ParseUnnamedGlobal() {
}
bool HasLinkage;
- unsigned Linkage, Visibility;
+ unsigned Linkage, Visibility, DLLStorageClass;
+ GlobalVariable::ThreadLocalMode TLM;
+ bool UnnamedAddr;
if (ParseOptionalLinkage(Linkage, HasLinkage) ||
- ParseOptionalVisibility(Visibility))
+ ParseOptionalVisibility(Visibility) ||
+ ParseOptionalDLLStorageClass(DLLStorageClass) ||
+ ParseOptionalThreadLocal(TLM) ||
+ parseOptionalUnnamedAddr(UnnamedAddr))
return true;
if (HasLinkage || Lex.getKind() != lltok::kw_alias)
- return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility);
- return ParseAlias(Name, NameLoc, Visibility);
+ return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility,
+ DLLStorageClass, TLM, UnnamedAddr);
+ return ParseAlias(Name, NameLoc, Visibility, DLLStorageClass, TLM,
+ UnnamedAddr);
}
/// ParseNamedGlobal:
/// GlobalVar '=' OptionalVisibility ALIAS ...
-/// GlobalVar '=' OptionalLinkage OptionalVisibility ... -> global variable
+/// GlobalVar '=' OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+/// ... -> global variable
bool LLParser::ParseNamedGlobal() {
assert(Lex.getKind() == lltok::GlobalVar);
LocTy NameLoc = Lex.getLoc();
@@ -486,15 +501,72 @@ bool LLParser::ParseNamedGlobal() {
Lex.Lex();
bool HasLinkage;
- unsigned Linkage, Visibility;
+ unsigned Linkage, Visibility, DLLStorageClass;
+ GlobalVariable::ThreadLocalMode TLM;
+ bool UnnamedAddr;
if (ParseToken(lltok::equal, "expected '=' in global variable") ||
ParseOptionalLinkage(Linkage, HasLinkage) ||
- ParseOptionalVisibility(Visibility))
+ ParseOptionalVisibility(Visibility) ||
+ ParseOptionalDLLStorageClass(DLLStorageClass) ||
+ ParseOptionalThreadLocal(TLM) ||
+ parseOptionalUnnamedAddr(UnnamedAddr))
return true;
if (HasLinkage || Lex.getKind() != lltok::kw_alias)
- return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility);
- return ParseAlias(Name, NameLoc, Visibility);
+ return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility,
+ DLLStorageClass, TLM, UnnamedAddr);
+ return ParseAlias(Name, NameLoc, Visibility, DLLStorageClass, TLM,
+ UnnamedAddr);
+}
+
+bool LLParser::parseComdat() {
+ assert(Lex.getKind() == lltok::ComdatVar);
+ std::string Name = Lex.getStrVal();
+ LocTy NameLoc = Lex.getLoc();
+ Lex.Lex();
+
+ if (ParseToken(lltok::equal, "expected '=' here"))
+ return true;
+
+ if (ParseToken(lltok::kw_comdat, "expected comdat keyword"))
+ return TokError("expected comdat type");
+
+ Comdat::SelectionKind SK;
+ switch (Lex.getKind()) {
+ default:
+ return TokError("unknown selection kind");
+ case lltok::kw_any:
+ SK = Comdat::Any;
+ break;
+ case lltok::kw_exactmatch:
+ SK = Comdat::ExactMatch;
+ break;
+ case lltok::kw_largest:
+ SK = Comdat::Largest;
+ break;
+ case lltok::kw_noduplicates:
+ SK = Comdat::NoDuplicates;
+ break;
+ case lltok::kw_samesize:
+ SK = Comdat::SameSize;
+ break;
+ }
+ Lex.Lex();
+
+ // See if the comdat was forward referenced, if so, use the comdat.
+ Module::ComdatSymTabType &ComdatSymTab = M->getComdatSymbolTable();
+ Module::ComdatSymTabType::iterator I = ComdatSymTab.find(Name);
+ if (I != ComdatSymTab.end() && !ForwardRefComdats.erase(Name))
+ return Error(NameLoc, "redefinition of comdat '$" + Name + "'");
+
+ Comdat *C;
+ if (I != ComdatSymTab.end())
+ C = &I->second;
+ else
+ C = M->getOrInsertComdat(Name);
+ C->setSelectionKind(SK);
+
+ return false;
}
// MDString:
@@ -502,6 +574,7 @@ bool LLParser::ParseNamedGlobal() {
bool LLParser::ParseMDString(MDString *&Result) {
std::string Str;
if (ParseStringConstant(Str)) return true;
+ llvm::UpgradeMDStringConstant(Str);
Result = MDString::get(Context, Str);
return false;
}
@@ -516,10 +589,10 @@ bool LLParser::ParseMDNodeID(MDNode *&Result, unsigned &SlotNo) {
if (ParseUInt32(SlotNo)) return true;
// Check existing MDNode.
- if (SlotNo < NumberedMetadata.size() && NumberedMetadata[SlotNo] != 0)
+ if (SlotNo < NumberedMetadata.size() && NumberedMetadata[SlotNo] != nullptr)
Result = NumberedMetadata[SlotNo];
else
- Result = 0;
+ Result = nullptr;
return false;
}
@@ -560,7 +633,7 @@ bool LLParser::ParseNamedMetadata() {
if (ParseToken(lltok::exclaim, "Expected '!' here"))
return true;
- MDNode *N = 0;
+ MDNode *N = nullptr;
if (ParseMDNodeID(N)) return true;
NMD->addOperand(N);
} while (EatIfPresent(lltok::comma));
@@ -579,14 +652,14 @@ bool LLParser::ParseStandaloneMetadata() {
unsigned MetadataID = 0;
LocTy TyLoc;
- Type *Ty = 0;
+ Type *Ty = nullptr;
SmallVector<Value *, 16> Elts;
if (ParseUInt32(MetadataID) ||
ParseToken(lltok::equal, "expected '=' here") ||
ParseType(Ty, TyLoc) ||
ParseToken(lltok::exclaim, "Expected '!' here") ||
ParseToken(lltok::lbrace, "Expected '{' here") ||
- ParseMDNodeVector(Elts, NULL) ||
+ ParseMDNodeVector(Elts, nullptr) ||
ParseToken(lltok::rbrace, "expected end of metadata node"))
return true;
@@ -606,7 +679,7 @@ bool LLParser::ParseStandaloneMetadata() {
if (MetadataID >= NumberedMetadata.size())
NumberedMetadata.resize(MetadataID+1);
- if (NumberedMetadata[MetadataID] != 0)
+ if (NumberedMetadata[MetadataID] != nullptr)
return TokError("Metadata id is already used");
NumberedMetadata[MetadataID] = Init;
}
@@ -614,17 +687,25 @@ bool LLParser::ParseStandaloneMetadata() {
return false;
}
+static bool isValidVisibilityForLinkage(unsigned V, unsigned L) {
+ return !GlobalValue::isLocalLinkage((GlobalValue::LinkageTypes)L) ||
+ (GlobalValue::VisibilityTypes)V == GlobalValue::DefaultVisibility;
+}
+
/// ParseAlias:
-/// ::= GlobalVar '=' OptionalVisibility 'alias' OptionalLinkage Aliasee
+/// ::= GlobalVar '=' OptionalVisibility OptionalDLLStorageClass
+/// OptionalThreadLocal OptionalUnNammedAddr 'alias'
+/// OptionalLinkage Aliasee
+///
/// Aliasee
/// ::= TypeAndValue
-/// ::= 'bitcast' '(' TypeAndValue 'to' Type ')'
-/// ::= 'getelementptr' 'inbounds'? '(' ... ')'
///
-/// Everything through visibility has already been parsed.
+/// Everything through OptionalUnNammedAddr has already been parsed.
///
bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
- unsigned Visibility) {
+ unsigned Visibility, unsigned DLLStorageClass,
+ GlobalVariable::ThreadLocalMode TLM,
+ bool UnnamedAddr) {
assert(Lex.getKind() == lltok::kw_alias);
Lex.Lex();
LocTy LinkageLoc = Lex.getLoc();
@@ -637,28 +718,43 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
if(!GlobalAlias::isValidLinkage(Linkage))
return Error(LinkageLoc, "invalid linkage type for alias");
+ if (!isValidVisibilityForLinkage(Visibility, L))
+ return Error(LinkageLoc,
+ "symbol with local linkage must have default visibility");
+
Constant *Aliasee;
LocTy AliaseeLoc = Lex.getLoc();
if (Lex.getKind() != lltok::kw_bitcast &&
- Lex.getKind() != lltok::kw_getelementptr) {
- if (ParseGlobalTypeAndValue(Aliasee)) return true;
+ Lex.getKind() != lltok::kw_getelementptr &&
+ Lex.getKind() != lltok::kw_addrspacecast &&
+ Lex.getKind() != lltok::kw_inttoptr) {
+ if (ParseGlobalTypeAndValue(Aliasee))
+ return true;
} else {
// The bitcast dest type is not present, it is implied by the dest type.
ValID ID;
- if (ParseValID(ID)) return true;
+ if (ParseValID(ID))
+ return true;
if (ID.Kind != ValID::t_Constant)
return Error(AliaseeLoc, "invalid aliasee");
Aliasee = ID.ConstantVal;
}
- if (!Aliasee->getType()->isPointerTy())
- return Error(AliaseeLoc, "alias must have pointer type");
+ Type *AliaseeType = Aliasee->getType();
+ auto *PTy = dyn_cast<PointerType>(AliaseeType);
+ if (!PTy)
+ return Error(AliaseeLoc, "An alias must have pointer type");
+ Type *Ty = PTy->getElementType();
+ unsigned AddrSpace = PTy->getAddressSpace();
// Okay, create the alias but do not insert it into the module yet.
- GlobalAlias* GA = new GlobalAlias(Aliasee->getType(),
- (GlobalValue::LinkageTypes)Linkage, Name,
- Aliasee);
+ std::unique_ptr<GlobalAlias> GA(
+ GlobalAlias::create(Ty, AddrSpace, (GlobalValue::LinkageTypes)Linkage,
+ Name, Aliasee, /*Parent*/ nullptr));
+ GA->setThreadLocalMode(TLM);
GA->setVisibility((GlobalValue::VisibilityTypes)Visibility);
+ GA->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
+ GA->setUnnamedAddr(UnnamedAddr);
// See if this value already exists in the symbol table. If so, it is either
// a redefinition or a definition of a forward reference.
@@ -678,43 +774,48 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
// If they agree, just RAUW the old value with the alias and remove the
// forward ref info.
- Val->replaceAllUsesWith(GA);
+ Val->replaceAllUsesWith(GA.get());
Val->eraseFromParent();
ForwardRefVals.erase(I);
}
// Insert into the module, we know its name won't collide now.
- M->getAliasList().push_back(GA);
+ M->getAliasList().push_back(GA.get());
assert(GA->getName() == Name && "Should not be a name conflict!");
+ // The module owns this now
+ GA.release();
+
return false;
}
/// ParseGlobal
-/// ::= GlobalVar '=' OptionalLinkage OptionalVisibility OptionalThreadLocal
-/// OptionalAddrSpace OptionalUnNammedAddr
+/// ::= GlobalVar '=' OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+/// OptionalThreadLocal OptionalUnNammedAddr OptionalAddrSpace
/// OptionalExternallyInitialized GlobalType Type Const
-/// ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
-/// OptionalAddrSpace OptionalUnNammedAddr
+/// ::= OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+/// OptionalThreadLocal OptionalUnNammedAddr OptionalAddrSpace
/// OptionalExternallyInitialized GlobalType Type Const
///
-/// Everything through visibility has been parsed already.
+/// Everything up to and including OptionalUnNammedAddr has been parsed
+/// already.
///
bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
unsigned Linkage, bool HasLinkage,
- unsigned Visibility) {
+ unsigned Visibility, unsigned DLLStorageClass,
+ GlobalVariable::ThreadLocalMode TLM,
+ bool UnnamedAddr) {
+ if (!isValidVisibilityForLinkage(Visibility, Linkage))
+ return Error(NameLoc,
+ "symbol with local linkage must have default visibility");
+
unsigned AddrSpace;
- bool IsConstant, UnnamedAddr, IsExternallyInitialized;
- GlobalVariable::ThreadLocalMode TLM;
- LocTy UnnamedAddrLoc;
+ bool IsConstant, IsExternallyInitialized;
LocTy IsExternallyInitializedLoc;
LocTy TyLoc;
- Type *Ty = 0;
- if (ParseOptionalThreadLocal(TLM) ||
- ParseOptionalAddrSpace(AddrSpace) ||
- ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr,
- &UnnamedAddrLoc) ||
+ Type *Ty = nullptr;
+ if (ParseOptionalAddrSpace(AddrSpace) ||
ParseOptionalToken(lltok::kw_externally_initialized,
IsExternallyInitialized,
&IsExternallyInitializedLoc) ||
@@ -724,9 +825,8 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
// If the linkage is specified and is external, then no initializer is
// present.
- Constant *Init = 0;
- if (!HasLinkage || (Linkage != GlobalValue::DLLImportLinkage &&
- Linkage != GlobalValue::ExternalWeakLinkage &&
+ Constant *Init = nullptr;
+ if (!HasLinkage || (Linkage != GlobalValue::ExternalWeakLinkage &&
Linkage != GlobalValue::ExternalLinkage)) {
if (ParseGlobalValue(Ty, Init))
return true;
@@ -735,7 +835,7 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
if (Ty->isFunctionTy() || Ty->isLabelTy())
return Error(TyLoc, "invalid type for global variable");
- GlobalVariable *GV = 0;
+ GlobalVariable *GV = nullptr;
// See if the global was forward referenced, if so, use the global.
if (!Name.empty()) {
@@ -753,9 +853,9 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
}
}
- if (GV == 0) {
- GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage, 0,
- Name, 0, GlobalVariable::NotThreadLocal,
+ if (!GV) {
+ GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage, nullptr,
+ Name, nullptr, GlobalVariable::NotThreadLocal,
AddrSpace);
} else {
if (GV->getType()->getElementType() != Ty)
@@ -775,6 +875,7 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
GV->setConstant(IsConstant);
GV->setLinkage((GlobalValue::LinkageTypes)Linkage);
GV->setVisibility((GlobalValue::VisibilityTypes)Visibility);
+ GV->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
GV->setExternallyInitialized(IsExternallyInitialized);
GV->setThreadLocalMode(TLM);
GV->setUnnamedAddr(UnnamedAddr);
@@ -793,7 +894,13 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
if (ParseOptionalAlignment(Alignment)) return true;
GV->setAlignment(Alignment);
} else {
- TokError("unknown global variable property!");
+ Comdat *C;
+ if (parseOptionalComdat(C))
+ return true;
+ if (C)
+ GV->setComdat(C);
+ else
+ return TokError("unknown global variable property!");
}
}
@@ -912,6 +1019,7 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B,
case lltok::kw_builtin: B.addAttribute(Attribute::Builtin); break;
case lltok::kw_cold: B.addAttribute(Attribute::Cold); break;
case lltok::kw_inlinehint: B.addAttribute(Attribute::InlineHint); break;
+ case lltok::kw_jumptable: B.addAttribute(Attribute::JumpTable); break;
case lltok::kw_minsize: B.addAttribute(Attribute::MinSize); break;
case lltok::kw_naked: B.addAttribute(Attribute::Naked); break;
case lltok::kw_nobuiltin: B.addAttribute(Attribute::NoBuiltin); break;
@@ -944,9 +1052,12 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B,
"invalid use of attribute on a function");
break;
case lltok::kw_byval:
+ case lltok::kw_dereferenceable:
+ case lltok::kw_inalloca:
case lltok::kw_nest:
case lltok::kw_noalias:
case lltok::kw_nocapture:
+ case lltok::kw_nonnull:
case lltok::kw_returned:
case lltok::kw_sret:
HaveError |=
@@ -969,9 +1080,9 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B,
GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
LocTy Loc) {
PointerType *PTy = dyn_cast<PointerType>(Ty);
- if (PTy == 0) {
+ if (!PTy) {
Error(Loc, "global variable reference must have pointer type");
- return 0;
+ return nullptr;
}
// Look this name up in the normal function symbol table.
@@ -980,7 +1091,7 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
// If this is a forward reference for the value, see if we already created a
// forward ref record.
- if (Val == 0) {
+ if (!Val) {
std::map<std::string, std::pair<GlobalValue*, LocTy> >::iterator
I = ForwardRefVals.find(Name);
if (I != ForwardRefVals.end())
@@ -992,7 +1103,7 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
if (Val->getType() == Ty) return Val;
Error(Loc, "'@" + Name + "' defined with type '" +
getTypeString(Val->getType()) + "'");
- return 0;
+ return nullptr;
}
// Otherwise, create a new forward reference for this value and remember it.
@@ -1001,8 +1112,8 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
else
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
- GlobalValue::ExternalWeakLinkage, 0, Name,
- 0, GlobalVariable::NotThreadLocal,
+ GlobalValue::ExternalWeakLinkage, nullptr, Name,
+ nullptr, GlobalVariable::NotThreadLocal,
PTy->getAddressSpace());
ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
@@ -1011,16 +1122,16 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
PointerType *PTy = dyn_cast<PointerType>(Ty);
- if (PTy == 0) {
+ if (!PTy) {
Error(Loc, "global variable reference must have pointer type");
- return 0;
+ return nullptr;
}
- GlobalValue *Val = ID < NumberedVals.size() ? NumberedVals[ID] : 0;
+ GlobalValue *Val = ID < NumberedVals.size() ? NumberedVals[ID] : nullptr;
// If this is a forward reference for the value, see if we already created a
// forward ref record.
- if (Val == 0) {
+ if (!Val) {
std::map<unsigned, std::pair<GlobalValue*, LocTy> >::iterator
I = ForwardRefValIDs.find(ID);
if (I != ForwardRefValIDs.end())
@@ -1032,7 +1143,7 @@ GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
if (Val->getType() == Ty) return Val;
Error(Loc, "'@" + Twine(ID) + "' defined with type '" +
getTypeString(Val->getType()) + "'");
- return 0;
+ return nullptr;
}
// Otherwise, create a new forward reference for this value and remember it.
@@ -1041,7 +1152,7 @@ GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, "", M);
else
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
- GlobalValue::ExternalWeakLinkage, 0, "");
+ GlobalValue::ExternalWeakLinkage, nullptr, "");
ForwardRefValIDs[ID] = std::make_pair(FwdVal, Loc);
return FwdVal;
@@ -1049,6 +1160,24 @@ GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
//===----------------------------------------------------------------------===//
+// Comdat Reference/Resolution Routines.
+//===----------------------------------------------------------------------===//
+
+Comdat *LLParser::getComdat(const std::string &Name, LocTy Loc) {
+ // Look this name up in the comdat symbol table.
+ Module::ComdatSymTabType &ComdatSymTab = M->getComdatSymbolTable();
+ Module::ComdatSymTabType::iterator I = ComdatSymTab.find(Name);
+ if (I != ComdatSymTab.end())
+ return &I->second;
+
+ // Otherwise, create a new forward reference for this value and remember it.
+ Comdat *C = M->getOrInsertComdat(Name);
+ ForwardRefComdats[Name] = Loc;
+ return C;
+}
+
+
+//===----------------------------------------------------------------------===//
// Helper Routines.
//===----------------------------------------------------------------------===//
@@ -1084,6 +1213,16 @@ bool LLParser::ParseUInt32(unsigned &Val) {
return false;
}
+/// ParseUInt64
+/// ::= uint64
+bool LLParser::ParseUInt64(uint64_t &Val) {
+ if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned())
+ return TokError("expected integer");
+ Val = Lex.getAPSIntVal().getLimitedValue();
+ Lex.Lex();
+ return false;
+}
+
/// ParseTLSModel
/// := 'localdynamic'
/// := 'initialexec'
@@ -1156,10 +1295,19 @@ bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) {
continue;
}
case lltok::kw_byval: B.addAttribute(Attribute::ByVal); break;
+ case lltok::kw_dereferenceable: {
+ uint64_t Bytes;
+ if (ParseOptionalDereferenceableBytes(Bytes))
+ return true;
+ B.addDereferenceableAttr(Bytes);
+ continue;
+ }
+ case lltok::kw_inalloca: B.addAttribute(Attribute::InAlloca); break;
case lltok::kw_inreg: B.addAttribute(Attribute::InReg); break;
case lltok::kw_nest: B.addAttribute(Attribute::Nest); break;
case lltok::kw_noalias: B.addAttribute(Attribute::NoAlias); break;
case lltok::kw_nocapture: B.addAttribute(Attribute::NoCapture); break;
+ case lltok::kw_nonnull: B.addAttribute(Attribute::NonNull); break;
case lltok::kw_readnone: B.addAttribute(Attribute::ReadNone); break;
case lltok::kw_readonly: B.addAttribute(Attribute::ReadOnly); break;
case lltok::kw_returned: B.addAttribute(Attribute::Returned); break;
@@ -1171,6 +1319,7 @@ bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) {
case lltok::kw_alwaysinline:
case lltok::kw_builtin:
case lltok::kw_inlinehint:
+ case lltok::kw_jumptable:
case lltok::kw_minsize:
case lltok::kw_naked:
case lltok::kw_nobuiltin:
@@ -1210,14 +1359,23 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) {
switch (Token) {
default: // End of attributes.
return HaveError;
+ case lltok::kw_dereferenceable: {
+ uint64_t Bytes;
+ if (ParseOptionalDereferenceableBytes(Bytes))
+ return true;
+ B.addDereferenceableAttr(Bytes);
+ continue;
+ }
case lltok::kw_inreg: B.addAttribute(Attribute::InReg); break;
case lltok::kw_noalias: B.addAttribute(Attribute::NoAlias); break;
+ case lltok::kw_nonnull: B.addAttribute(Attribute::NonNull); break;
case lltok::kw_signext: B.addAttribute(Attribute::SExt); break;
case lltok::kw_zeroext: B.addAttribute(Attribute::ZExt); break;
// Error handling.
case lltok::kw_align:
case lltok::kw_byval:
+ case lltok::kw_inalloca:
case lltok::kw_nest:
case lltok::kw_nocapture:
case lltok::kw_returned:
@@ -1230,6 +1388,7 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) {
case lltok::kw_builtin:
case lltok::kw_cold:
case lltok::kw_inlinehint:
+ case lltok::kw_jumptable:
case lltok::kw_minsize:
case lltok::kw_naked:
case lltok::kw_nobuiltin:
@@ -1265,8 +1424,6 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) {
/// ParseOptionalLinkage
/// ::= /*empty*/
/// ::= 'private'
-/// ::= 'linker_private'
-/// ::= 'linker_private_weak'
/// ::= 'internal'
/// ::= 'weak'
/// ::= 'weak_odr'
@@ -1274,20 +1431,18 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) {
/// ::= 'linkonce_odr'
/// ::= 'available_externally'
/// ::= 'appending'
-/// ::= 'dllexport'
/// ::= 'common'
-/// ::= 'dllimport'
/// ::= 'extern_weak'
/// ::= 'external'
+///
+/// Deprecated Values:
+/// ::= 'linker_private'
+/// ::= 'linker_private_weak'
bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage) {
HasLinkage = false;
switch (Lex.getKind()) {
default: Res=GlobalValue::ExternalLinkage; return false;
case lltok::kw_private: Res = GlobalValue::PrivateLinkage; break;
- case lltok::kw_linker_private: Res = GlobalValue::LinkerPrivateLinkage; break;
- case lltok::kw_linker_private_weak:
- Res = GlobalValue::LinkerPrivateWeakLinkage;
- break;
case lltok::kw_internal: Res = GlobalValue::InternalLinkage; break;
case lltok::kw_weak: Res = GlobalValue::WeakAnyLinkage; break;
case lltok::kw_weak_odr: Res = GlobalValue::WeakODRLinkage; break;
@@ -1297,11 +1452,18 @@ bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage) {
Res = GlobalValue::AvailableExternallyLinkage;
break;
case lltok::kw_appending: Res = GlobalValue::AppendingLinkage; break;
- case lltok::kw_dllexport: Res = GlobalValue::DLLExportLinkage; break;
case lltok::kw_common: Res = GlobalValue::CommonLinkage; break;
- case lltok::kw_dllimport: Res = GlobalValue::DLLImportLinkage; break;
case lltok::kw_extern_weak: Res = GlobalValue::ExternalWeakLinkage; break;
case lltok::kw_external: Res = GlobalValue::ExternalLinkage; break;
+
+ case lltok::kw_linker_private:
+ case lltok::kw_linker_private_weak:
+ Lex.Warning("'" + Lex.getStrVal() + "' is deprecated, treating as"
+ " PrivateLinkage");
+ Lex.Lex();
+ // treat linker_private and linker_private_weak as PrivateLinkage
+ Res = GlobalValue::PrivateLinkage;
+ return false;
}
Lex.Lex();
HasLinkage = true;
@@ -1325,6 +1487,21 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) {
return false;
}
+/// ParseOptionalDLLStorageClass
+/// ::= /*empty*/
+/// ::= 'dllimport'
+/// ::= 'dllexport'
+///
+bool LLParser::ParseOptionalDLLStorageClass(unsigned &Res) {
+ switch (Lex.getKind()) {
+ default: Res = GlobalValue::DefaultStorageClass; return false;
+ case lltok::kw_dllimport: Res = GlobalValue::DLLImportStorageClass; break;
+ case lltok::kw_dllexport: Res = GlobalValue::DLLExportStorageClass; break;
+ }
+ Lex.Lex();
+ return false;
+}
+
/// ParseOptionalCallingConv
/// ::= /*empty*/
/// ::= 'ccc'
@@ -1346,6 +1523,8 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) {
/// ::= 'x86_64_win64cc'
/// ::= 'webkit_jscc'
/// ::= 'anyregcc'
+/// ::= 'preserve_mostcc'
+/// ::= 'preserve_allcc'
/// ::= 'cc' UINT
///
bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
@@ -1370,6 +1549,8 @@ bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
case lltok::kw_x86_64_win64cc: CC = CallingConv::X86_64_Win64; break;
case lltok::kw_webkit_jscc: CC = CallingConv::WebKit_JS; break;
case lltok::kw_anyregcc: CC = CallingConv::AnyReg; break;
+ case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break;
+ case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break;
case lltok::kw_cc: {
unsigned ArbitraryCC;
Lex.Lex();
@@ -1450,6 +1631,26 @@ bool LLParser::ParseOptionalAlignment(unsigned &Alignment) {
return false;
}
+/// ParseOptionalDereferenceableBytes
+/// ::= /* empty */
+/// ::= 'dereferenceable' '(' 4 ')'
+bool LLParser::ParseOptionalDereferenceableBytes(uint64_t &Bytes) {
+ Bytes = 0;
+ if (!EatIfPresent(lltok::kw_dereferenceable))
+ return false;
+ LocTy ParenLoc = Lex.getLoc();
+ if (!EatIfPresent(lltok::lparen))
+ return Error(ParenLoc, "expected '('");
+ LocTy DerefLoc = Lex.getLoc();
+ if (ParseUInt64(Bytes)) return true;
+ ParenLoc = Lex.getLoc();
+ if (!EatIfPresent(lltok::rparen))
+ return Error(ParenLoc, "expected ')'");
+ if (!Bytes)
+ return Error(DerefLoc, "dereferenceable bytes must be non-zero");
+ return false;
+}
+
/// ParseOptionalCommaAlign
/// ::=
/// ::= ',' align 4
@@ -1488,6 +1689,15 @@ bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
Scope = CrossThread;
if (EatIfPresent(lltok::kw_singlethread))
Scope = SingleThread;
+
+ return ParseOrdering(Ordering);
+}
+
+/// ParseOrdering
+/// ::= AtomicOrdering
+///
+/// This sets Ordering to the parsed value.
+bool LLParser::ParseOrdering(AtomicOrdering &Ordering) {
switch (Lex.getKind()) {
default: return TokError("Expected ordering on atomic instruction");
case lltok::kw_unordered: Ordering = Unordered; break;
@@ -1592,7 +1802,7 @@ bool LLParser::ParseType(Type *&Result, bool AllowVoid) {
// If the type hasn't been defined yet, create a forward definition and
// remember where that forward def'n was seen (in case it never is defined).
- if (Entry.first == 0) {
+ if (!Entry.first) {
Entry.first = StructType::create(Context, Lex.getStrVal());
Entry.second = Lex.getLoc();
}
@@ -1609,7 +1819,7 @@ bool LLParser::ParseType(Type *&Result, bool AllowVoid) {
// If the type hasn't been defined yet, create a forward definition and
// remember where that forward def'n was seen (in case it never is defined).
- if (Entry.first == 0) {
+ if (!Entry.first) {
Entry.first = StructType::create(Context);
Entry.second = Lex.getLoc();
}
@@ -1685,7 +1895,7 @@ bool LLParser::ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
// Parse the argument.
LocTy ArgLoc;
- Type *ArgTy = 0;
+ Type *ArgTy = nullptr;
AttrBuilder ArgAttrs;
Value *V;
if (ParseType(ArgTy, ArgLoc))
@@ -1727,7 +1937,7 @@ bool LLParser::ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
Lex.Lex();
} else {
LocTy TypeLoc = Lex.getLoc();
- Type *ArgTy = 0;
+ Type *ArgTy = nullptr;
AttrBuilder Attrs;
std::string Name;
@@ -1839,7 +2049,7 @@ bool LLParser::ParseStructDefinition(SMLoc TypeLoc, StringRef Name,
Entry.second = SMLoc();
// If this type number has never been uttered, create it.
- if (Entry.first == 0)
+ if (!Entry.first)
Entry.first = StructType::create(Context, Name);
ResultTy = Entry.first;
return false;
@@ -1855,7 +2065,7 @@ bool LLParser::ParseStructDefinition(SMLoc TypeLoc, StringRef Name,
if (Entry.first)
return Error(TypeLoc, "forward references to non-struct type");
- ResultTy = 0;
+ ResultTy = nullptr;
if (isPacked)
return ParseArrayVectorType(ResultTy, true);
return ParseType(ResultTy);
@@ -1865,7 +2075,7 @@ bool LLParser::ParseStructDefinition(SMLoc TypeLoc, StringRef Name,
Entry.second = SMLoc();
// If this type number has never been uttered, create it.
- if (Entry.first == 0)
+ if (!Entry.first)
Entry.first = StructType::create(Context, Name);
StructType *STy = cast<StructType>(Entry.first);
@@ -1896,7 +2106,7 @@ bool LLParser::ParseStructBody(SmallVectorImpl<Type*> &Body) {
return false;
LocTy EltTyLoc = Lex.getLoc();
- Type *Ty = 0;
+ Type *Ty = nullptr;
if (ParseType(Ty)) return true;
Body.push_back(Ty);
@@ -1934,7 +2144,7 @@ bool LLParser::ParseArrayVectorType(Type *&Result, bool isVector) {
return true;
LocTy TypeLoc = Lex.getLoc();
- Type *EltTy = 0;
+ Type *EltTy = nullptr;
if (ParseType(EltTy)) return true;
if (ParseToken(isVector ? lltok::greater : lltok::rsquare,
@@ -1980,7 +2190,7 @@ LLParser::PerFunctionState::~PerFunctionState() {
I->second.first->replaceAllUsesWith(
UndefValue::get(I->second.first->getType()));
delete I->second.first;
- I->second.first = 0;
+ I->second.first = nullptr;
}
for (std::map<unsigned, std::pair<Value*, LocTy> >::iterator
@@ -1989,7 +2199,7 @@ LLParser::PerFunctionState::~PerFunctionState() {
I->second.first->replaceAllUsesWith(
UndefValue::get(I->second.first->getType()));
delete I->second.first;
- I->second.first = 0;
+ I->second.first = nullptr;
}
}
@@ -2038,7 +2248,7 @@ Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
// If this is a forward reference for the value, see if we already created a
// forward ref record.
- if (Val == 0) {
+ if (!Val) {
std::map<std::string, std::pair<Value*, LocTy> >::iterator
I = ForwardRefVals.find(Name);
if (I != ForwardRefVals.end())
@@ -2053,13 +2263,13 @@ Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
else
P.Error(Loc, "'%" + Name + "' defined with type '" +
getTypeString(Val->getType()) + "'");
- return 0;
+ return nullptr;
}
// Don't make placeholders with invalid type.
if (!Ty->isFirstClassType() && !Ty->isLabelTy()) {
P.Error(Loc, "invalid use of a non-first-class type");
- return 0;
+ return nullptr;
}
// Otherwise, create a new forward reference for this value and remember it.
@@ -2076,11 +2286,11 @@ Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
Value *LLParser::PerFunctionState::GetVal(unsigned ID, Type *Ty,
LocTy Loc) {
// Look this name up in the normal function symbol table.
- Value *Val = ID < NumberedVals.size() ? NumberedVals[ID] : 0;
+ Value *Val = ID < NumberedVals.size() ? NumberedVals[ID] : nullptr;
// If this is a forward reference for the value, see if we already created a
// forward ref record.
- if (Val == 0) {
+ if (!Val) {
std::map<unsigned, std::pair<Value*, LocTy> >::iterator
I = ForwardRefValIDs.find(ID);
if (I != ForwardRefValIDs.end())
@@ -2095,12 +2305,12 @@ Value *LLParser::PerFunctionState::GetVal(unsigned ID, Type *Ty,
else
P.Error(Loc, "'%" + Twine(ID) + "' defined with type '" +
getTypeString(Val->getType()) + "'");
- return 0;
+ return nullptr;
}
if (!Ty->isFirstClassType() && !Ty->isLabelTy()) {
P.Error(Loc, "invalid use of a non-first-class type");
- return 0;
+ return nullptr;
}
// Otherwise, create a new forward reference for this value and remember it.
@@ -2196,7 +2406,7 @@ BasicBlock *LLParser::PerFunctionState::DefineBB(const std::string &Name,
BB = GetBB(NumberedVals.size(), Loc);
else
BB = GetBB(Name, Loc);
- if (BB == 0) return 0; // Already diagnosed error.
+ if (!BB) return nullptr; // Already diagnosed error.
// Move the block to the end of the function. Forward ref'd blocks are
// inserted wherever they happen to be referenced.
@@ -2404,7 +2614,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
// Make a global variable as a placeholder for this reference.
GlobalVariable *FwdRef = new GlobalVariable(*M, Type::getInt8Ty(Context),
false, GlobalValue::InternalLinkage,
- 0, "");
+ nullptr, "");
ForwardRefBlockAddresses[Fn].push_back(std::make_pair(Label, FwdRef));
ID.ConstantVal = FwdRef;
ID.Kind = ValID::t_Constant;
@@ -2425,7 +2635,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
case lltok::kw_inttoptr:
case lltok::kw_ptrtoint: {
unsigned Opc = Lex.getUIntVal();
- Type *DestTy = 0;
+ Type *DestTy = nullptr;
Constant *SrcVal;
Lex.Lex();
if (ParseToken(lltok::lparen, "expected '(' after constantexpr cast") ||
@@ -2689,22 +2899,35 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
/// ParseGlobalValue - Parse a global value with the specified type.
bool LLParser::ParseGlobalValue(Type *Ty, Constant *&C) {
- C = 0;
+ C = nullptr;
ValID ID;
- Value *V = NULL;
+ Value *V = nullptr;
bool Parsed = ParseValID(ID) ||
- ConvertValIDToValue(Ty, ID, V, NULL);
+ ConvertValIDToValue(Ty, ID, V, nullptr);
if (V && !(C = dyn_cast<Constant>(V)))
return Error(ID.Loc, "global values must be constants");
return Parsed;
}
bool LLParser::ParseGlobalTypeAndValue(Constant *&V) {
- Type *Ty = 0;
+ Type *Ty = nullptr;
return ParseType(Ty) ||
ParseGlobalValue(Ty, V);
}
+bool LLParser::parseOptionalComdat(Comdat *&C) {
+ C = nullptr;
+ if (!EatIfPresent(lltok::kw_comdat))
+ return false;
+ if (Lex.getKind() != lltok::ComdatVar)
+ return TokError("expected comdat variable");
+ LocTy Loc = Lex.getLoc();
+ StringRef Name = Lex.getStrVal();
+ C = getComdat(Name, Loc);
+ Lex.Lex();
+ return false;
+}
+
/// ParseGlobalValueVector
/// ::= /*empty*/
/// ::= TypeAndValue (',' TypeAndValue)*
@@ -2784,15 +3007,15 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
case ValID::t_LocalID:
if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
V = PFS->GetVal(ID.UIntVal, Ty, ID.Loc);
- return (V == 0);
+ return V == nullptr;
case ValID::t_LocalName:
if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
V = PFS->GetVal(ID.StrVal, Ty, ID.Loc);
- return (V == 0);
+ return V == nullptr;
case ValID::t_InlineAsm: {
PointerType *PTy = dyn_cast<PointerType>(Ty);
FunctionType *FTy =
- PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0;
+ PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : nullptr;
if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2))
return Error(ID.Loc, "invalid type for inline asm constraint string");
V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1,
@@ -2811,10 +3034,10 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
return false;
case ValID::t_GlobalName:
V = GetGlobalVal(ID.StrVal, Ty, ID.Loc);
- return V == 0;
+ return V == nullptr;
case ValID::t_GlobalID:
V = GetGlobalVal(ID.UIntVal, Ty, ID.Loc);
- return V == 0;
+ return V == nullptr;
case ValID::t_APSInt:
if (!Ty->isIntegerTy())
return Error(ID.Loc, "integer constant must have integer type");
@@ -2897,14 +3120,14 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
}
bool LLParser::ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS) {
- V = 0;
+ V = nullptr;
ValID ID;
return ParseValID(ID, PFS) ||
ConvertValIDToValue(Ty, ID, V, PFS);
}
bool LLParser::ParseTypeAndValue(Value *&V, PerFunctionState *PFS) {
- Type *Ty = 0;
+ Type *Ty = nullptr;
return ParseType(Ty) ||
ParseValue(Ty, V, PFS);
}
@@ -2931,12 +3154,14 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
unsigned Linkage;
unsigned Visibility;
+ unsigned DLLStorageClass;
AttrBuilder RetAttrs;
CallingConv::ID CC;
- Type *RetType = 0;
+ Type *RetType = nullptr;
LocTy RetTypeLoc = Lex.getLoc();
if (ParseOptionalLinkage(Linkage) ||
ParseOptionalVisibility(Visibility) ||
+ ParseOptionalDLLStorageClass(DLLStorageClass) ||
ParseOptionalCallingConv(CC) ||
ParseOptionalReturnAttrs(RetAttrs) ||
ParseType(RetType, RetTypeLoc, true /*void allowed*/))
@@ -2946,21 +3171,17 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
switch ((GlobalValue::LinkageTypes)Linkage) {
case GlobalValue::ExternalLinkage:
break; // always ok.
- case GlobalValue::DLLImportLinkage:
case GlobalValue::ExternalWeakLinkage:
if (isDefine)
return Error(LinkageLoc, "invalid linkage for function definition");
break;
case GlobalValue::PrivateLinkage:
- case GlobalValue::LinkerPrivateLinkage:
- case GlobalValue::LinkerPrivateWeakLinkage:
case GlobalValue::InternalLinkage:
case GlobalValue::AvailableExternallyLinkage:
case GlobalValue::LinkOnceAnyLinkage:
case GlobalValue::LinkOnceODRLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
- case GlobalValue::DLLExportLinkage:
if (!isDefine)
return Error(LinkageLoc, "invalid linkage for function declaration");
break;
@@ -2969,6 +3190,10 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
return Error(LinkageLoc, "invalid function linkage type");
}
+ if (!isValidVisibilityForLinkage(Visibility, Linkage))
+ return Error(LinkageLoc,
+ "symbol with local linkage must have default visibility");
+
if (!FunctionType::isValidReturnType(RetType))
return Error(RetTypeLoc, "invalid function return type");
@@ -3002,7 +3227,8 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
std::string GC;
bool UnnamedAddr;
LocTy UnnamedAddrLoc;
- Constant *Prefix = 0;
+ Constant *Prefix = nullptr;
+ Comdat *C;
if (ParseArgumentList(ArgList, isVarArg) ||
ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr,
@@ -3011,6 +3237,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
BuiltinLoc) ||
(EatIfPresent(lltok::kw_section) &&
ParseStringConstant(Section)) ||
+ parseOptionalComdat(C) ||
ParseOptionalAlignment(Alignment) ||
(EatIfPresent(lltok::kw_gc) &&
ParseStringConstant(GC)) ||
@@ -3059,7 +3286,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
FunctionType::get(RetType, ParamTypeList, isVarArg);
PointerType *PFT = PointerType::getUnqual(FT);
- Fn = 0;
+ Fn = nullptr;
if (!FunctionName.empty()) {
// If this was a definition of a forward reference, remove the definition
// from the forward reference table and fill in the forward ref.
@@ -3097,7 +3324,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
}
}
- if (Fn == 0)
+ if (!Fn)
Fn = Function::Create(FT, GlobalValue::ExternalLinkage, FunctionName, M);
else // Move the forward-reference to the correct spot in the module.
M->getFunctionList().splice(M->end(), M->getFunctionList(), Fn);
@@ -3107,11 +3334,13 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
Fn->setLinkage((GlobalValue::LinkageTypes)Linkage);
Fn->setVisibility((GlobalValue::VisibilityTypes)Visibility);
+ Fn->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
Fn->setCallingConv(CC);
Fn->setAttributes(PAL);
Fn->setUnnamedAddr(UnnamedAddr);
Fn->setAlignment(Alignment);
Fn->setSection(Section);
+ Fn->setComdat(C);
if (!GC.empty()) Fn->setGC(GC.c_str());
Fn->setPrefixData(Prefix);
ForwardRefAttrGroups[Fn] = FwdRefAttrGrps;
@@ -3173,7 +3402,7 @@ bool LLParser::ParseBasicBlock(PerFunctionState &PFS) {
}
BasicBlock *BB = PFS.DefineBB(Name, NameLoc);
- if (BB == 0) return true;
+ if (!BB) return true;
std::string NameStr;
@@ -3321,8 +3550,10 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_shufflevector: return ParseShuffleVector(Inst, PFS);
case lltok::kw_phi: return ParsePHI(Inst, PFS);
case lltok::kw_landingpad: return ParseLandingPad(Inst, PFS);
- case lltok::kw_call: return ParseCall(Inst, PFS, false);
- case lltok::kw_tail: return ParseCall(Inst, PFS, true);
+ // Call.
+ case lltok::kw_call: return ParseCall(Inst, PFS, CallInst::TCK_None);
+ case lltok::kw_tail: return ParseCall(Inst, PFS, CallInst::TCK_Tail);
+ case lltok::kw_musttail: return ParseCall(Inst, PFS, CallInst::TCK_MustTail);
// Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
case lltok::kw_load: return ParseLoad(Inst, PFS);
@@ -3387,7 +3618,7 @@ bool LLParser::ParseCmpPredicate(unsigned &P, unsigned Opc) {
bool LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB,
PerFunctionState &PFS) {
SMLoc TypeLoc = Lex.getLoc();
- Type *Ty = 0;
+ Type *Ty = nullptr;
if (ParseType(Ty, true /*void allowed*/)) return true;
Type *ResType = PFS.getFunction().getReturnType();
@@ -3537,7 +3768,7 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
std::vector<unsigned> FwdRefAttrGrps;
LocTy NoBuiltinLoc;
CallingConv::ID CC;
- Type *RetType = 0;
+ Type *RetType = nullptr;
LocTy RetTypeLoc;
ValID CalleeID;
SmallVector<ParamInfo, 16> ArgList;
@@ -3559,8 +3790,8 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
// If RetType is a non-function pointer type, then this is the short syntax
// for the call, which means that RetType is just the return type. Infer the
// rest of the function argument types from the arguments that are present.
- PointerType *PFTy = 0;
- FunctionType *Ty = 0;
+ PointerType *PFTy = nullptr;
+ FunctionType *Ty = nullptr;
if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
!(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
// Pull out the types of all of the arguments...
@@ -3593,7 +3824,7 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- Type *ExpectedTy = 0;
+ Type *ExpectedTy = nullptr;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
@@ -3734,7 +3965,7 @@ bool LLParser::ParseCast(Instruction *&Inst, PerFunctionState &PFS,
unsigned Opc) {
LocTy Loc;
Value *Op;
- Type *DestTy = 0;
+ Type *DestTy = nullptr;
if (ParseTypeAndValue(Op, Loc, PFS) ||
ParseToken(lltok::kw_to, "expected 'to' after cast value") ||
ParseType(DestTy))
@@ -3773,7 +4004,7 @@ bool LLParser::ParseSelect(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'va_arg' TypeAndValue ',' Type
bool LLParser::ParseVA_Arg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Op;
- Type *EltTy = 0;
+ Type *EltTy = nullptr;
LocTy TypeLoc;
if (ParseTypeAndValue(Op, PFS) ||
ParseToken(lltok::comma, "expected ',' after vaarg operand") ||
@@ -3845,7 +4076,7 @@ bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) {
/// ParsePHI
/// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Value ']')*
int LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) {
- Type *Ty = 0; LocTy TypeLoc;
+ Type *Ty = nullptr; LocTy TypeLoc;
Value *Op0, *Op1;
if (ParseType(Ty, TypeLoc) ||
@@ -3894,7 +4125,7 @@ int LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'filter'
/// ::= 'filter' TypeAndValue ( ',' TypeAndValue )*
bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
- Type *Ty = 0; LocTy TyLoc;
+ Type *Ty = nullptr; LocTy TyLoc;
Value *PersFn; LocTy PersFnLoc;
if (ParseType(Ty, TyLoc) ||
@@ -3914,7 +4145,8 @@ bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
else
return TokError("expected 'catch' or 'filter' clause type");
- Value *V; LocTy VLoc;
+ Value *V;
+ LocTy VLoc;
if (ParseTypeAndValue(V, VLoc, PFS)) {
delete LP;
return true;
@@ -3930,7 +4162,7 @@ bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
Error(VLoc, "'filter' clause has an invalid type");
}
- LP->addClause(V);
+ LP->addClause(cast<Constant>(V));
}
Inst = LP;
@@ -3938,21 +4170,26 @@ bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseCall
-/// ::= 'tail'? 'call' OptionalCallingConv OptionalAttrs Type Value
+/// ::= 'call' OptionalCallingConv OptionalAttrs Type Value
+/// ParameterList OptionalAttrs
+/// ::= 'tail' 'call' OptionalCallingConv OptionalAttrs Type Value
+/// ParameterList OptionalAttrs
+/// ::= 'musttail' 'call' OptionalCallingConv OptionalAttrs Type Value
/// ParameterList OptionalAttrs
bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
- bool isTail) {
+ CallInst::TailCallKind TCK) {
AttrBuilder RetAttrs, FnAttrs;
std::vector<unsigned> FwdRefAttrGrps;
LocTy BuiltinLoc;
CallingConv::ID CC;
- Type *RetType = 0;
+ Type *RetType = nullptr;
LocTy RetTypeLoc;
ValID CalleeID;
SmallVector<ParamInfo, 16> ArgList;
LocTy CallLoc = Lex.getLoc();
- if ((isTail && ParseToken(lltok::kw_call, "expected 'tail call'")) ||
+ if ((TCK != CallInst::TCK_None &&
+ ParseToken(lltok::kw_call, "expected 'tail call'")) ||
ParseOptionalCallingConv(CC) ||
ParseOptionalReturnAttrs(RetAttrs) ||
ParseType(RetType, RetTypeLoc, true /*void allowed*/) ||
@@ -3965,8 +4202,8 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
// If RetType is a non-function pointer type, then this is the short syntax
// for the call, which means that RetType is just the return type. Infer the
// rest of the function argument types from the arguments that are present.
- PointerType *PFTy = 0;
- FunctionType *Ty = 0;
+ PointerType *PFTy = nullptr;
+ FunctionType *Ty = nullptr;
if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
!(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
// Pull out the types of all of the arguments...
@@ -3999,7 +4236,7 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
FunctionType::param_iterator I = Ty->param_begin();
FunctionType::param_iterator E = Ty->param_end();
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- Type *ExpectedTy = 0;
+ Type *ExpectedTy = nullptr;
if (I != E) {
ExpectedTy = *I++;
} else if (!Ty->isVarArg()) {
@@ -4028,7 +4265,7 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
AttributeSet PAL = AttributeSet::get(Context, Attrs);
CallInst *CI = CallInst::Create(Callee, Args);
- CI->setTailCall(isTail);
+ CI->setTailCallKind(TCK);
CI->setCallingConv(CC);
CI->setAttributes(PAL);
ForwardRefAttrGroups[CI] = FwdRefAttrGrps;
@@ -4041,12 +4278,15 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
//===----------------------------------------------------------------------===//
/// ParseAlloc
-/// ::= 'alloca' Type (',' TypeAndValue)? (',' OptionalInfo)?
+/// ::= 'alloca' 'inalloca'? Type (',' TypeAndValue)? (',' 'align' i32)?
int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Size = 0;
+ Value *Size = nullptr;
LocTy SizeLoc;
unsigned Alignment = 0;
- Type *Ty = 0;
+ Type *Ty = nullptr;
+
+ bool IsInAlloca = EatIfPresent(lltok::kw_inalloca);
+
if (ParseType(Ty)) return true;
bool AteExtraComma = false;
@@ -4065,7 +4305,9 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
if (Size && !Size->getType()->isIntegerTy())
return Error(SizeLoc, "element count must have integer type");
- Inst = new AllocaInst(Ty, Size, Alignment);
+ AllocaInst *AI = new AllocaInst(Ty, Size, Alignment);
+ AI->setUsedWithInAlloca(IsInAlloca);
+ Inst = AI;
return AteExtraComma ? InstExtraComma : InstNormal;
}
@@ -4156,14 +4398,19 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseCmpXchg
-/// ::= 'cmpxchg' 'volatile'? TypeAndValue ',' TypeAndValue ',' TypeAndValue
-/// 'singlethread'? AtomicOrdering
+/// ::= 'cmpxchg' 'weak'? 'volatile'? TypeAndValue ',' TypeAndValue ','
+/// TypeAndValue 'singlethread'? AtomicOrdering AtomicOrdering
int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
bool AteExtraComma = false;
- AtomicOrdering Ordering = NotAtomic;
+ AtomicOrdering SuccessOrdering = NotAtomic;
+ AtomicOrdering FailureOrdering = NotAtomic;
SynchronizationScope Scope = CrossThread;
bool isVolatile = false;
+ bool isWeak = false;
+
+ if (EatIfPresent(lltok::kw_weak))
+ isWeak = true;
if (EatIfPresent(lltok::kw_volatile))
isVolatile = true;
@@ -4173,11 +4420,16 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg cmp operand") ||
ParseTypeAndValue(New, NewLoc, PFS) ||
- ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ ParseScopeAndOrdering(true /*Always atomic*/, Scope, SuccessOrdering) ||
+ ParseOrdering(FailureOrdering))
return true;
- if (Ordering == Unordered)
+ if (SuccessOrdering == Unordered || FailureOrdering == Unordered)
return TokError("cmpxchg cannot be unordered");
+ if (SuccessOrdering < FailureOrdering)
+ return TokError("cmpxchg must be at least as ordered on success as failure");
+ if (FailureOrdering == Release || FailureOrdering == AcquireRelease)
+ return TokError("cmpxchg failure ordering cannot include release semantics");
if (!Ptr->getType()->isPointerTy())
return Error(PtrLoc, "cmpxchg operand must be a pointer");
if (cast<PointerType>(Ptr->getType())->getElementType() != Cmp->getType())
@@ -4191,9 +4443,10 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
return Error(NewLoc, "cmpxchg operand must be power-of-two byte-sized"
" integer");
- AtomicCmpXchgInst *CXI =
- new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, Scope);
+ AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
+ Ptr, Cmp, New, SuccessOrdering, FailureOrdering, Scope);
CXI->setVolatile(isVolatile);
+ CXI->setWeak(isWeak);
Inst = CXI;
return AteExtraComma ? InstExtraComma : InstNormal;
}
@@ -4274,8 +4527,8 @@ int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
/// ParseGetElementPtr
/// ::= 'getelementptr' 'inbounds'? TypeAndValue (',' TypeAndValue)*
int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Ptr = 0;
- Value *Val = 0;
+ Value *Ptr = nullptr;
+ Value *Val = nullptr;
LocTy Loc, EltLoc;
bool InBounds = EatIfPresent(lltok::kw_inbounds);
@@ -4377,11 +4630,11 @@ bool LLParser::ParseMDNodeVector(SmallVectorImpl<Value*> &Elts,
do {
// Null is a special case since it is typeless.
if (EatIfPresent(lltok::kw_null)) {
- Elts.push_back(0);
+ Elts.push_back(nullptr);
continue;
}
- Value *V = 0;
+ Value *V = nullptr;
if (ParseTypeAndValue(V, PFS)) return true;
Elts.push_back(V);
} while (EatIfPresent(lltok::comma));
diff --git a/contrib/llvm/lib/AsmParser/LLParser.h b/contrib/llvm/lib/AsmParser/LLParser.h
index ded776c..7203bb2 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.h
+++ b/contrib/llvm/lib/AsmParser/LLParser.h
@@ -22,7 +22,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
#include <map>
namespace llvm {
@@ -34,6 +34,7 @@ namespace llvm {
class Instruction;
class Constant;
class GlobalValue;
+ class Comdat;
class MDString;
class MDNode;
class StructType;
@@ -122,6 +123,9 @@ namespace llvm {
std::map<unsigned, std::pair<GlobalValue*, LocTy> > ForwardRefValIDs;
std::vector<GlobalValue*> NumberedVals;
+ // Comdat forward reference information.
+ std::map<std::string, LocTy> ForwardRefComdats;
+
// References to blockaddress. The key is the function ValID, the value is
// a list of references to blocks in that function.
std::map<ValID, std::vector<std::pair<ValID, GlobalValue*> > >
@@ -154,6 +158,10 @@ namespace llvm {
GlobalValue *GetGlobalVal(const std::string &N, Type *Ty, LocTy Loc);
GlobalValue *GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc);
+ /// Get a Comdat with the specified name, creating a forward reference
+ /// record if needed.
+ Comdat *getComdat(const std::string &N, LocTy Loc);
+
// Helper Routines.
bool ParseToken(lltok::Kind T, const char *ErrMsg);
bool EatIfPresent(lltok::Kind T) {
@@ -176,7 +184,8 @@ namespace llvm {
return FMF;
}
- bool ParseOptionalToken(lltok::Kind T, bool &Present, LocTy *Loc = 0) {
+ bool ParseOptionalToken(lltok::Kind T, bool &Present,
+ LocTy *Loc = nullptr) {
if (Lex.getKind() != T) {
Present = false;
} else {
@@ -193,9 +202,17 @@ namespace llvm {
Loc = Lex.getLoc();
return ParseUInt32(Val);
}
+ bool ParseUInt64(uint64_t &Val);
+ bool ParseUInt64(uint64_t &Val, LocTy &Loc) {
+ Loc = Lex.getLoc();
+ return ParseUInt64(Val);
+ }
bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
+ bool parseOptionalUnnamedAddr(bool &UnnamedAddr) {
+ return ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr);
+ }
bool ParseOptionalAddrSpace(unsigned &AddrSpace);
bool ParseOptionalParamAttrs(AttrBuilder &B);
bool ParseOptionalReturnAttrs(AttrBuilder &B);
@@ -204,12 +221,16 @@ namespace llvm {
bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage);
}
bool ParseOptionalVisibility(unsigned &Visibility);
+ bool ParseOptionalDLLStorageClass(unsigned &DLLStorageClass);
bool ParseOptionalCallingConv(CallingConv::ID &CC);
bool ParseOptionalAlignment(unsigned &Alignment);
+ bool ParseOptionalDereferenceableBytes(uint64_t &Bytes);
bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
AtomicOrdering &Ordering);
+ bool ParseOrdering(AtomicOrdering &Ordering);
bool ParseOptionalStackAlignment(unsigned &Alignment);
bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
+ bool ParseOptionalCommaInAlloca(bool &IsInAlloca);
bool ParseIndexList(SmallVectorImpl<unsigned> &Indices,bool &AteExtraComma);
bool ParseIndexList(SmallVectorImpl<unsigned> &Indices) {
bool AteExtraComma;
@@ -234,8 +255,13 @@ namespace llvm {
bool ParseUnnamedGlobal();
bool ParseNamedGlobal();
bool ParseGlobal(const std::string &Name, LocTy Loc, unsigned Linkage,
- bool HasLinkage, unsigned Visibility);
- bool ParseAlias(const std::string &Name, LocTy Loc, unsigned Visibility);
+ bool HasLinkage, unsigned Visibility,
+ unsigned DLLStorageClass,
+ GlobalVariable::ThreadLocalMode TLM, bool UnnamedAddr);
+ bool ParseAlias(const std::string &Name, LocTy Loc, unsigned Visibility,
+ unsigned DLLStorageClass,
+ GlobalVariable::ThreadLocalMode TLM, bool UnnamedAddr);
+ bool parseComdat();
bool ParseStandaloneMetadata();
bool ParseNamedMetadata();
bool ParseMDString(MDString *&Result);
@@ -343,10 +369,11 @@ namespace llvm {
PerFunctionState &PFS);
// Constant Parsing.
- bool ParseValID(ValID &ID, PerFunctionState *PFS = NULL);
+ bool ParseValID(ValID &ID, PerFunctionState *PFS = nullptr);
bool ParseGlobalValue(Type *Ty, Constant *&V);
bool ParseGlobalTypeAndValue(Constant *&V);
bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
+ bool parseOptionalComdat(Comdat *&C);
bool ParseMetadataListValue(ValID &ID, PerFunctionState *PFS);
bool ParseMetadataValue(ValID &ID, PerFunctionState *PFS);
bool ParseMDNodeVector(SmallVectorImpl<Value*> &, PerFunctionState *PFS);
@@ -366,6 +393,8 @@ namespace llvm {
bool ParseFunctionBody(Function &Fn);
bool ParseBasicBlock(PerFunctionState &PFS);
+ enum TailCallType { TCT_None, TCT_Tail, TCT_MustTail };
+
// Instruction Parsing. Each instruction parsing routine can return with a
// normal result, an error result, or return having eaten an extra comma.
enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 };
@@ -392,7 +421,8 @@ namespace llvm {
bool ParseShuffleVector(Instruction *&I, PerFunctionState &PFS);
int ParsePHI(Instruction *&I, PerFunctionState &PFS);
bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
- bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
+ bool ParseCall(Instruction *&I, PerFunctionState &PFS,
+ CallInst::TailCallKind IsTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
int ParseLoad(Instruction *&I, PerFunctionState &PFS);
int ParseStore(Instruction *&I, PerFunctionState &PFS);
diff --git a/contrib/llvm/lib/AsmParser/LLToken.h b/contrib/llvm/lib/AsmParser/LLToken.h
index 786d84d..2f02606f 100644
--- a/contrib/llvm/lib/AsmParser/LLToken.h
+++ b/contrib/llvm/lib/AsmParser/LLToken.h
@@ -37,10 +37,13 @@ namespace lltok {
kw_declare, kw_define,
kw_global, kw_constant,
- kw_private, kw_linker_private, kw_linker_private_weak,
+ kw_private,
kw_internal,
+ kw_linker_private, // NOTE: deprecated, for parser compatibility
+ kw_linker_private_weak, // NOTE: deprecated, for parser compatibility
kw_linkonce, kw_linkonce_odr,
- kw_weak, kw_weak_odr, kw_appending,
+ kw_weak, // Used as a linkage, and a modifier for "cmpxchg".
+ kw_weak_odr, kw_appending,
kw_dllimport, kw_dllexport, kw_common, kw_available_externally,
kw_default, kw_hidden, kw_protected,
kw_unnamed_addr,
@@ -52,6 +55,7 @@ namespace lltok {
kw_undef, kw_null,
kw_to,
kw_tail,
+ kw_musttail,
kw_target,
kw_triple,
kw_unwind,
@@ -92,6 +96,7 @@ namespace lltok {
kw_spir_kernel, kw_spir_func,
kw_x86_64_sysvcc, kw_x86_64_win64cc,
kw_webkit_jscc, kw_anyregcc,
+ kw_preserve_mostcc, kw_preserve_allcc,
// Attributes:
kw_attributes,
@@ -99,9 +104,12 @@ namespace lltok {
kw_sanitize_address,
kw_builtin,
kw_byval,
+ kw_inalloca,
kw_cold,
+ kw_dereferenceable,
kw_inlinehint,
kw_inreg,
+ kw_jumptable,
kw_minsize,
kw_naked,
kw_nest,
@@ -112,6 +120,7 @@ namespace lltok {
kw_noimplicitfloat,
kw_noinline,
kw_nonlazybind,
+ kw_nonnull,
kw_noredzone,
kw_noreturn,
kw_nounwind,
@@ -134,6 +143,15 @@ namespace lltok {
kw_type,
kw_opaque,
+ kw_comdat,
+
+ // Comdat types
+ kw_any,
+ kw_exactmatch,
+ kw_largest,
+ kw_noduplicates,
+ kw_samesize,
+
kw_eq, kw_ne, kw_slt, kw_sgt, kw_sle, kw_sge, kw_ult, kw_ugt, kw_ule,
kw_uge, kw_oeq, kw_one, kw_olt, kw_ogt, kw_ole, kw_oge, kw_ord, kw_uno,
kw_ueq, kw_une,
@@ -172,6 +190,7 @@ namespace lltok {
// String valued tokens (StrVal).
LabelStr, // foo:
GlobalVar, // @foo @"foo"
+ ComdatVar, // $foo
LocalVar, // %foo %"foo"
MetadataVar, // !foo
StringConstant, // "foo"
diff --git a/contrib/llvm/lib/AsmParser/Parser.cpp b/contrib/llvm/lib/AsmParser/Parser.cpp
index d777ab9..91bb51c 100644
--- a/contrib/llvm/lib/AsmParser/Parser.cpp
+++ b/contrib/llvm/lib/AsmParser/Parser.cpp
@@ -7,19 +7,18 @@
//
//===----------------------------------------------------------------------===//
//
-// This library implements the functionality defined in llvm/Assembly/Parser.h
+// This library implements the functionality defined in llvm/AsmParser/Parser.h
//
//===----------------------------------------------------------------------===//
-#include "llvm/Assembly/Parser.h"
+#include "llvm/AsmParser/Parser.h"
#include "LLParser.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include <cstring>
+#include <system_error>
using namespace llvm;
Module *llvm::ParseAssembly(MemoryBuffer *F,
@@ -31,32 +30,32 @@ Module *llvm::ParseAssembly(MemoryBuffer *F,
// If we are parsing into an existing module, do it.
if (M)
- return LLParser(F, SM, Err, M).Run() ? 0 : M;
+ return LLParser(F, SM, Err, M).Run() ? nullptr : M;
// Otherwise create a new module.
- OwningPtr<Module> M2(new Module(F->getBufferIdentifier(), Context));
+ std::unique_ptr<Module> M2(new Module(F->getBufferIdentifier(), Context));
if (LLParser(F, SM, Err, M2.get()).Run())
- return 0;
- return M2.take();
+ return nullptr;
+ return M2.release();
}
Module *llvm::ParseAssemblyFile(const std::string &Filename, SMDiagnostic &Err,
LLVMContext &Context) {
- OwningPtr<MemoryBuffer> File;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, File)) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(Filename);
+ if (std::error_code EC = FileOrErr.getError()) {
Err = SMDiagnostic(Filename, SourceMgr::DK_Error,
- "Could not open input file: " + ec.message());
- return 0;
+ "Could not open input file: " + EC.message());
+ return nullptr;
}
- return ParseAssembly(File.take(), 0, Err, Context);
+ return ParseAssembly(FileOrErr.get().release(), nullptr, Err, Context);
}
Module *llvm::ParseAssemblyString(const char *AsmString, Module *M,
SMDiagnostic &Err, LLVMContext &Context) {
MemoryBuffer *F =
- MemoryBuffer::getMemBuffer(StringRef(AsmString, strlen(AsmString)),
- "<string>");
+ MemoryBuffer::getMemBuffer(StringRef(AsmString), "<string>");
return ParseAssembly(F, M, Err, Context);
}
diff --git a/contrib/llvm/lib/AsmParser/module.modulemap b/contrib/llvm/lib/AsmParser/module.modulemap
new file mode 100644
index 0000000..cc30006
--- /dev/null
+++ b/contrib/llvm/lib/AsmParser/module.modulemap
@@ -0,0 +1 @@
+module AsmParser { requires cplusplus umbrella "." module * { export * } }
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp
index 23630e5..b5886c1 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp
@@ -30,16 +30,16 @@ LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutModule,
char **OutMessage) {
- std::string Message;
-
- *OutModule = wrap(ParseBitcodeFile(unwrap(MemBuf), *unwrap(ContextRef),
- &Message));
- if (!*OutModule) {
+ ErrorOr<Module *> ModuleOrErr =
+ parseBitcodeFile(unwrap(MemBuf), *unwrap(ContextRef));
+ if (std::error_code EC = ModuleOrErr.getError()) {
if (OutMessage)
- *OutMessage = strdup(Message.c_str());
+ *OutMessage = strdup(EC.message().c_str());
+ *OutModule = wrap((Module*)nullptr);
return 1;
}
+ *OutModule = wrap(ModuleOrErr.get());
return 0;
}
@@ -51,15 +51,18 @@ LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
LLVMModuleRef *OutM,
char **OutMessage) {
std::string Message;
+ ErrorOr<Module *> ModuleOrErr =
+ getLazyBitcodeModule(unwrap(MemBuf), *unwrap(ContextRef));
- *OutM = wrap(getLazyBitcodeModule(unwrap(MemBuf), *unwrap(ContextRef),
- &Message));
- if (!*OutM) {
+ if (std::error_code EC = ModuleOrErr.getError()) {
+ *OutM = wrap((Module *)nullptr);
if (OutMessage)
- *OutMessage = strdup(Message.c_str());
+ *OutMessage = strdup(EC.message().c_str());
return 1;
}
+ *OutM = wrap(ModuleOrErr.get());
+
return 0;
}
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index ce3b7d1..47a3953 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -11,8 +11,8 @@
#include "BitcodeReader.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/AutoUpgrade.h"
#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/IR/AutoUpgrade.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InlineAsm.h"
@@ -39,12 +39,11 @@ void BitcodeReader::materializeForwardReferencedFunctions() {
}
void BitcodeReader::FreeState() {
- if (BufferOwned)
- delete Buffer;
- Buffer = 0;
+ Buffer = nullptr;
std::vector<Type*>().swap(TypeList);
ValueList.clear();
MDValueList.clear();
+ std::vector<Comdat *>().swap(ComdatList);
std::vector<AttributeSet>().swap(MAttributes);
std::vector<BasicBlock*>().swap(FunctionBBs);
@@ -80,16 +79,18 @@ static GlobalValue::LinkageTypes GetDecodedLinkage(unsigned Val) {
case 2: return GlobalValue::AppendingLinkage;
case 3: return GlobalValue::InternalLinkage;
case 4: return GlobalValue::LinkOnceAnyLinkage;
- case 5: return GlobalValue::DLLImportLinkage;
- case 6: return GlobalValue::DLLExportLinkage;
+ case 5: return GlobalValue::ExternalLinkage; // Obsolete DLLImportLinkage
+ case 6: return GlobalValue::ExternalLinkage; // Obsolete DLLExportLinkage
case 7: return GlobalValue::ExternalWeakLinkage;
case 8: return GlobalValue::CommonLinkage;
case 9: return GlobalValue::PrivateLinkage;
case 10: return GlobalValue::WeakODRLinkage;
case 11: return GlobalValue::LinkOnceODRLinkage;
case 12: return GlobalValue::AvailableExternallyLinkage;
- case 13: return GlobalValue::LinkerPrivateLinkage;
- case 14: return GlobalValue::LinkerPrivateWeakLinkage;
+ case 13:
+ return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateLinkage
+ case 14:
+ return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateWeakLinkage
}
}
@@ -102,6 +103,16 @@ static GlobalValue::VisibilityTypes GetDecodedVisibility(unsigned Val) {
}
}
+static GlobalValue::DLLStorageClassTypes
+GetDecodedDLLStorageClass(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown values to default.
+ case 0: return GlobalValue::DefaultStorageClass;
+ case 1: return GlobalValue::DLLImportStorageClass;
+ case 2: return GlobalValue::DLLExportStorageClass;
+ }
+}
+
static GlobalVariable::ThreadLocalMode GetDecodedThreadLocalMode(unsigned Val) {
switch (Val) {
case 0: return GlobalVariable::NotThreadLocal;
@@ -193,6 +204,29 @@ static SynchronizationScope GetDecodedSynchScope(unsigned Val) {
}
}
+static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown selection kinds to any.
+ case bitc::COMDAT_SELECTION_KIND_ANY:
+ return Comdat::Any;
+ case bitc::COMDAT_SELECTION_KIND_EXACT_MATCH:
+ return Comdat::ExactMatch;
+ case bitc::COMDAT_SELECTION_KIND_LARGEST:
+ return Comdat::Largest;
+ case bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES:
+ return Comdat::NoDuplicates;
+ case bitc::COMDAT_SELECTION_KIND_SAME_SIZE:
+ return Comdat::SameSize;
+ }
+}
+
+static void UpgradeDLLImportExportLinkage(llvm::GlobalValue *GV, unsigned Val) {
+ switch (Val) {
+ case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break;
+ case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break;
+ }
+}
+
namespace llvm {
namespace {
/// @brief A class for maintaining the slot number definition
@@ -239,7 +273,7 @@ void BitcodeReaderValueList::AssignValue(Value *V, unsigned Idx) {
resize(Idx+1);
WeakVH &OldV = ValuePtrs[Idx];
- if (OldV == 0) {
+ if (!OldV) {
OldV = V;
return;
}
@@ -279,12 +313,12 @@ Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
resize(Idx + 1);
if (Value *V = ValuePtrs[Idx]) {
- assert((Ty == 0 || Ty == V->getType()) && "Type mismatch in value table!");
+ assert((!Ty || Ty == V->getType()) && "Type mismatch in value table!");
return V;
}
// No type specified, must be invalid reference.
- if (Ty == 0) return 0;
+ if (!Ty) return nullptr;
// Create and return a placeholder, which will later be RAUW'd.
Value *V = new Argument(Ty);
@@ -315,7 +349,7 @@ void BitcodeReaderValueList::ResolveConstantForwardRefs() {
// new value. If they reference more than one placeholder, update them all
// at once.
while (!Placeholder->use_empty()) {
- Value::use_iterator UI = Placeholder->use_begin();
+ auto UI = Placeholder->user_begin();
User *U = *UI;
// If the using object isn't uniqued, just update the operands. This
@@ -384,7 +418,7 @@ void BitcodeReaderMDValueList::AssignValue(Value *V, unsigned Idx) {
resize(Idx+1);
WeakVH &OldV = MDValuePtrs[Idx];
- if (OldV == 0) {
+ if (!OldV) {
OldV = V;
return;
}
@@ -416,7 +450,7 @@ Value *BitcodeReaderMDValueList::getValueFwdRef(unsigned Idx) {
Type *BitcodeReader::getTypeByID(unsigned ID) {
// The type table size is always specified correctly.
if (ID >= TypeList.size())
- return 0;
+ return nullptr;
if (Type *Ty = TypeList[ID])
return Ty;
@@ -451,7 +485,7 @@ static void decodeLLVMAttributesForBitcode(AttrBuilder &B,
(EncodedAttrs & 0xffff));
}
-error_code BitcodeReader::ParseAttributeBlock() {
+std::error_code BitcodeReader::ParseAttributeBlock() {
if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID))
return Error(InvalidRecord);
@@ -471,7 +505,7 @@ error_code BitcodeReader::ParseAttributeBlock() {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -522,12 +556,16 @@ static Attribute::AttrKind GetAttrFromCode(uint64_t Code) {
return Attribute::Builtin;
case bitc::ATTR_KIND_BY_VAL:
return Attribute::ByVal;
+ case bitc::ATTR_KIND_IN_ALLOCA:
+ return Attribute::InAlloca;
case bitc::ATTR_KIND_COLD:
return Attribute::Cold;
case bitc::ATTR_KIND_INLINE_HINT:
return Attribute::InlineHint;
case bitc::ATTR_KIND_IN_REG:
return Attribute::InReg;
+ case bitc::ATTR_KIND_JUMP_TABLE:
+ return Attribute::JumpTable;
case bitc::ATTR_KIND_MIN_SIZE:
return Attribute::MinSize;
case bitc::ATTR_KIND_NAKED:
@@ -548,6 +586,10 @@ static Attribute::AttrKind GetAttrFromCode(uint64_t Code) {
return Attribute::NoInline;
case bitc::ATTR_KIND_NON_LAZY_BIND:
return Attribute::NonLazyBind;
+ case bitc::ATTR_KIND_NON_NULL:
+ return Attribute::NonNull;
+ case bitc::ATTR_KIND_DEREFERENCEABLE:
+ return Attribute::Dereferenceable;
case bitc::ATTR_KIND_NO_RED_ZONE:
return Attribute::NoRedZone;
case bitc::ATTR_KIND_NO_RETURN:
@@ -591,15 +633,15 @@ static Attribute::AttrKind GetAttrFromCode(uint64_t Code) {
}
}
-error_code BitcodeReader::ParseAttrKind(uint64_t Code,
- Attribute::AttrKind *Kind) {
+std::error_code BitcodeReader::ParseAttrKind(uint64_t Code,
+ Attribute::AttrKind *Kind) {
*Kind = GetAttrFromCode(Code);
if (*Kind == Attribute::None)
return Error(InvalidValue);
- return error_code::success();
+ return std::error_code();
}
-error_code BitcodeReader::ParseAttributeGroupBlock() {
+std::error_code BitcodeReader::ParseAttributeGroupBlock() {
if (Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID))
return Error(InvalidRecord);
@@ -617,7 +659,7 @@ error_code BitcodeReader::ParseAttributeGroupBlock() {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -639,18 +681,20 @@ error_code BitcodeReader::ParseAttributeGroupBlock() {
for (unsigned i = 2, e = Record.size(); i != e; ++i) {
if (Record[i] == 0) { // Enum attribute
Attribute::AttrKind Kind;
- if (error_code EC = ParseAttrKind(Record[++i], &Kind))
+ if (std::error_code EC = ParseAttrKind(Record[++i], &Kind))
return EC;
B.addAttribute(Kind);
- } else if (Record[i] == 1) { // Align attribute
+ } else if (Record[i] == 1) { // Integer attribute
Attribute::AttrKind Kind;
- if (error_code EC = ParseAttrKind(Record[++i], &Kind))
+ if (std::error_code EC = ParseAttrKind(Record[++i], &Kind))
return EC;
if (Kind == Attribute::Alignment)
B.addAlignmentAttr(Record[++i]);
- else
+ else if (Kind == Attribute::StackAlignment)
B.addStackAlignmentAttr(Record[++i]);
+ else if (Kind == Attribute::Dereferenceable)
+ B.addDereferenceableAttr(Record[++i]);
} else { // String attribute
assert((Record[i] == 3 || Record[i] == 4) &&
"Invalid attribute group entry");
@@ -681,14 +725,14 @@ error_code BitcodeReader::ParseAttributeGroupBlock() {
}
}
-error_code BitcodeReader::ParseTypeTable() {
+std::error_code BitcodeReader::ParseTypeTable() {
if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW))
return Error(InvalidRecord);
return ParseTypeTableBody();
}
-error_code BitcodeReader::ParseTypeTableBody() {
+std::error_code BitcodeReader::ParseTypeTableBody() {
if (!TypeList.empty())
return Error(InvalidMultipleBlocks);
@@ -708,7 +752,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
case BitstreamEntry::EndBlock:
if (NumRecords != TypeList.size())
return Error(MalformedBlock);
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -716,7 +760,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
// Read a record.
Record.clear();
- Type *ResultTy = 0;
+ Type *ResultTy = nullptr;
switch (Stream.readRecord(Entry.ID, Record)) {
default:
return Error(InvalidValue);
@@ -771,7 +815,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
if (Record.size() == 2)
AddressSpace = Record[1];
ResultTy = getTypeByID(Record[0]);
- if (ResultTy == 0)
+ if (!ResultTy)
return Error(InvalidType);
ResultTy = PointerType::get(ResultTy, AddressSpace);
break;
@@ -790,7 +834,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
}
ResultTy = getTypeByID(Record[2]);
- if (ResultTy == 0 || ArgTys.size() < Record.size()-3)
+ if (!ResultTy || ArgTys.size() < Record.size()-3)
return Error(InvalidType);
ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
@@ -809,7 +853,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
}
ResultTy = getTypeByID(Record[1]);
- if (ResultTy == 0 || ArgTys.size() < Record.size()-2)
+ if (!ResultTy || ArgTys.size() < Record.size()-2)
return Error(InvalidType);
ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
@@ -846,7 +890,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
if (Res) {
Res->setName(TypeName);
- TypeList[NumRecords] = 0;
+ TypeList[NumRecords] = nullptr;
} else // Otherwise, create a new struct.
Res = StructType::create(Context, TypeName);
TypeName.clear();
@@ -875,7 +919,7 @@ error_code BitcodeReader::ParseTypeTableBody() {
StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
if (Res) {
Res->setName(TypeName);
- TypeList[NumRecords] = 0;
+ TypeList[NumRecords] = nullptr;
} else // Otherwise, create a new struct with no body.
Res = StructType::create(Context, TypeName);
TypeName.clear();
@@ -903,12 +947,12 @@ error_code BitcodeReader::ParseTypeTableBody() {
if (NumRecords >= TypeList.size())
return Error(InvalidTYPETable);
assert(ResultTy && "Didn't read a type?");
- assert(TypeList[NumRecords] == 0 && "Already read type?");
+ assert(!TypeList[NumRecords] && "Already read type?");
TypeList[NumRecords++] = ResultTy;
}
}
-error_code BitcodeReader::ParseValueSymbolTable() {
+std::error_code BitcodeReader::ParseValueSymbolTable() {
if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
return Error(InvalidRecord);
@@ -924,7 +968,7 @@ error_code BitcodeReader::ParseValueSymbolTable() {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -939,7 +983,7 @@ error_code BitcodeReader::ParseValueSymbolTable() {
if (ConvertToString(Record, 1, ValueName))
return Error(InvalidRecord);
unsigned ValueID = Record[0];
- if (ValueID >= ValueList.size())
+ if (ValueID >= ValueList.size() || !ValueList[ValueID])
return Error(InvalidRecord);
Value *V = ValueList[ValueID];
@@ -951,7 +995,7 @@ error_code BitcodeReader::ParseValueSymbolTable() {
if (ConvertToString(Record, 1, ValueName))
return Error(InvalidRecord);
BasicBlock *BB = getBasicBlock(Record[0]);
- if (BB == 0)
+ if (!BB)
return Error(InvalidRecord);
BB->setName(StringRef(ValueName.data(), ValueName.size()));
@@ -962,7 +1006,7 @@ error_code BitcodeReader::ParseValueSymbolTable() {
}
}
-error_code BitcodeReader::ParseMetadata() {
+std::error_code BitcodeReader::ParseMetadata() {
unsigned NextMDValueNo = MDValueList.size();
if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID))
@@ -979,7 +1023,7 @@ error_code BitcodeReader::ParseMetadata() {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -1006,8 +1050,8 @@ error_code BitcodeReader::ParseMetadata() {
unsigned Size = Record.size();
NamedMDNode *NMD = TheModule->getOrInsertNamedMetadata(Name);
for (unsigned i = 0; i != Size; ++i) {
- MDNode *MD = dyn_cast<MDNode>(MDValueList.getValueFwdRef(Record[i]));
- if (MD == 0)
+ MDNode *MD = dyn_cast_or_null<MDNode>(MDValueList.getValueFwdRef(Record[i]));
+ if (!MD)
return Error(InvalidRecord);
NMD->addOperand(MD);
}
@@ -1031,7 +1075,7 @@ error_code BitcodeReader::ParseMetadata() {
else if (!Ty->isVoidTy())
Elts.push_back(ValueList.getValueFwdRef(Record[i+1], Ty));
else
- Elts.push_back(NULL);
+ Elts.push_back(nullptr);
}
Value *V = MDNode::getWhenValsUnresolved(Context, Elts, IsFunctionLocal);
IsFunctionLocal = false;
@@ -1039,7 +1083,8 @@ error_code BitcodeReader::ParseMetadata() {
break;
}
case bitc::METADATA_STRING: {
- SmallString<8> String(Record.begin(), Record.end());
+ std::string String(Record.begin(), Record.end());
+ llvm::UpgradeMDStringConstant(String);
Value *V = MDString::get(Context, String);
MDValueList.AssignValue(V, NextMDValueNo++);
break;
@@ -1073,7 +1118,7 @@ uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
/// ResolveGlobalAndAliasInits - Resolve all of the initializers for global
/// values and aliases that we can.
-error_code BitcodeReader::ResolveGlobalAndAliasInits() {
+std::error_code BitcodeReader::ResolveGlobalAndAliasInits() {
std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInitWorklist;
std::vector<std::pair<GlobalAlias*, unsigned> > AliasInitWorklist;
std::vector<std::pair<Function*, unsigned> > FunctionPrefixWorklist;
@@ -1088,7 +1133,7 @@ error_code BitcodeReader::ResolveGlobalAndAliasInits() {
// Not ready to resolve this yet, it requires something later in the file.
GlobalInits.push_back(GlobalInitWorklist.back());
} else {
- if (Constant *C = dyn_cast<Constant>(ValueList[ValID]))
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
GlobalInitWorklist.back().first->setInitializer(C);
else
return Error(ExpectedConstant);
@@ -1101,7 +1146,7 @@ error_code BitcodeReader::ResolveGlobalAndAliasInits() {
if (ValID >= ValueList.size()) {
AliasInits.push_back(AliasInitWorklist.back());
} else {
- if (Constant *C = dyn_cast<Constant>(ValueList[ValID]))
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
AliasInitWorklist.back().first->setAliasee(C);
else
return Error(ExpectedConstant);
@@ -1114,7 +1159,7 @@ error_code BitcodeReader::ResolveGlobalAndAliasInits() {
if (ValID >= ValueList.size()) {
FunctionPrefixes.push_back(FunctionPrefixWorklist.back());
} else {
- if (Constant *C = dyn_cast<Constant>(ValueList[ValID]))
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
FunctionPrefixWorklist.back().first->setPrefixData(C);
else
return Error(ExpectedConstant);
@@ -1122,7 +1167,7 @@ error_code BitcodeReader::ResolveGlobalAndAliasInits() {
FunctionPrefixWorklist.pop_back();
}
- return error_code::success();
+ return std::error_code();
}
static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
@@ -1133,7 +1178,7 @@ static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
return APInt(TypeBits, Words);
}
-error_code BitcodeReader::ParseConstants() {
+std::error_code BitcodeReader::ParseConstants() {
if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID))
return Error(InvalidRecord);
@@ -1156,7 +1201,7 @@ error_code BitcodeReader::ParseConstants() {
// Once all the constants have been read, go through and resolve forward
// references.
ValueList.ResolveConstantForwardRefs();
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -1164,7 +1209,7 @@ error_code BitcodeReader::ParseConstants() {
// Read a record.
Record.clear();
- Value *V = 0;
+ Value *V = nullptr;
unsigned BitCode = Stream.readRecord(Entry.ID, Record);
switch (BitCode) {
default: // Default behavior: unknown constant
@@ -1174,7 +1219,7 @@ error_code BitcodeReader::ParseConstants() {
case bitc::CST_CODE_SETTYPE: // SETTYPE: [typeid]
if (Record.empty())
return Error(InvalidRecord);
- if (Record[0] >= TypeList.size())
+ if (Record[0] >= TypeList.size() || !TypeList[Record[0]])
return Error(InvalidRecord);
CurTy = TypeList[Record[0]];
continue; // Skip the ValueList manipulation.
@@ -1397,34 +1442,52 @@ error_code BitcodeReader::ParseConstants() {
ValueList.getConstantFwdRef(Record[2],CurTy));
break;
}
- case bitc::CST_CODE_CE_EXTRACTELT: { // CE_EXTRACTELT: [opty, opval, opval]
+ case bitc::CST_CODE_CE_EXTRACTELT
+ : { // CE_EXTRACTELT: [opty, opval, opty, opval]
if (Record.size() < 3)
return Error(InvalidRecord);
VectorType *OpTy =
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
- if (OpTy == 0)
+ if (!OpTy)
return Error(InvalidRecord);
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
- Constant *Op1 = ValueList.getConstantFwdRef(Record[2],
- Type::getInt32Ty(Context));
+ Constant *Op1 = nullptr;
+ if (Record.size() == 4) {
+ Type *IdxTy = getTypeByID(Record[2]);
+ if (!IdxTy)
+ return Error(InvalidRecord);
+ Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy);
+ } else // TODO: Remove with llvm 4.0
+ Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
+ if (!Op1)
+ return Error(InvalidRecord);
V = ConstantExpr::getExtractElement(Op0, Op1);
break;
}
- case bitc::CST_CODE_CE_INSERTELT: { // CE_INSERTELT: [opval, opval, opval]
+ case bitc::CST_CODE_CE_INSERTELT
+ : { // CE_INSERTELT: [opval, opval, opty, opval]
VectorType *OpTy = dyn_cast<VectorType>(CurTy);
- if (Record.size() < 3 || OpTy == 0)
+ if (Record.size() < 3 || !OpTy)
return Error(InvalidRecord);
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[1],
OpTy->getElementType());
- Constant *Op2 = ValueList.getConstantFwdRef(Record[2],
- Type::getInt32Ty(Context));
+ Constant *Op2 = nullptr;
+ if (Record.size() == 4) {
+ Type *IdxTy = getTypeByID(Record[2]);
+ if (!IdxTy)
+ return Error(InvalidRecord);
+ Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy);
+ } else // TODO: Remove with llvm 4.0
+ Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
+ if (!Op2)
+ return Error(InvalidRecord);
V = ConstantExpr::getInsertElement(Op0, Op1, Op2);
break;
}
case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
VectorType *OpTy = dyn_cast<VectorType>(CurTy);
- if (Record.size() < 3 || OpTy == 0)
+ if (Record.size() < 3 || !OpTy)
return Error(InvalidRecord);
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy);
@@ -1438,7 +1501,7 @@ error_code BitcodeReader::ParseConstants() {
VectorType *RTy = dyn_cast<VectorType>(CurTy);
VectorType *OpTy =
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
- if (Record.size() < 4 || RTy == 0 || OpTy == 0)
+ if (Record.size() < 4 || !RTy || !OpTy)
return Error(InvalidRecord);
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
@@ -1452,7 +1515,7 @@ error_code BitcodeReader::ParseConstants() {
if (Record.size() < 4)
return Error(InvalidRecord);
Type *OpTy = getTypeByID(Record[0]);
- if (OpTy == 0)
+ if (!OpTy)
return Error(InvalidRecord);
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
@@ -1517,11 +1580,11 @@ error_code BitcodeReader::ParseConstants() {
if (Record.size() < 3)
return Error(InvalidRecord);
Type *FnTy = getTypeByID(Record[0]);
- if (FnTy == 0)
+ if (!FnTy)
return Error(InvalidRecord);
Function *Fn =
dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
- if (Fn == 0)
+ if (!Fn)
return Error(InvalidRecord);
// If the function is already parsed we can insert the block address right
@@ -1540,7 +1603,7 @@ error_code BitcodeReader::ParseConstants() {
GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(),
Type::getInt8Ty(Context),
false, GlobalValue::InternalLinkage,
- 0, "");
+ nullptr, "");
BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef));
V = FwdRef;
}
@@ -1553,7 +1616,7 @@ error_code BitcodeReader::ParseConstants() {
}
}
-error_code BitcodeReader::ParseUseLists() {
+std::error_code BitcodeReader::ParseUseLists() {
if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID))
return Error(InvalidRecord);
@@ -1568,7 +1631,7 @@ error_code BitcodeReader::ParseUseLists() {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -1593,7 +1656,7 @@ error_code BitcodeReader::ParseUseLists() {
/// RememberAndSkipFunctionBody - When we see the block for a function body,
/// remember where it is and then skip it. This lets us lazily deserialize the
/// functions.
-error_code BitcodeReader::RememberAndSkipFunctionBody() {
+std::error_code BitcodeReader::RememberAndSkipFunctionBody() {
// Get the function we are talking about.
if (FunctionsWithBodies.empty())
return Error(InsufficientFunctionProtos);
@@ -1608,10 +1671,10 @@ error_code BitcodeReader::RememberAndSkipFunctionBody() {
// Skip over the function block for now.
if (Stream.SkipBlock())
return Error(InvalidRecord);
- return error_code::success();
+ return std::error_code();
}
-error_code BitcodeReader::GlobalCleanup() {
+std::error_code BitcodeReader::GlobalCleanup() {
// Patch the initializers for globals and aliases up.
ResolveGlobalAndAliasInits();
if (!GlobalInits.empty() || !AliasInits.empty())
@@ -1628,16 +1691,19 @@ error_code BitcodeReader::GlobalCleanup() {
// Look for global variables which need to be renamed.
for (Module::global_iterator
GI = TheModule->global_begin(), GE = TheModule->global_end();
- GI != GE; ++GI)
- UpgradeGlobalVariable(GI);
+ GI != GE;) {
+ GlobalVariable *GV = GI++;
+ UpgradeGlobalVariable(GV);
+ }
+
// Force deallocation of memory for these vectors to favor the client that
// want lazy deserialization.
std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
std::vector<std::pair<GlobalAlias*, unsigned> >().swap(AliasInits);
- return error_code::success();
+ return std::error_code();
}
-error_code BitcodeReader::ParseModule(bool Resume) {
+std::error_code BitcodeReader::ParseModule(bool Resume) {
if (Resume)
Stream.JumpToBit(NextUnreadBit);
else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
@@ -1668,30 +1734,30 @@ error_code BitcodeReader::ParseModule(bool Resume) {
return Error(MalformedBlock);
break;
case bitc::PARAMATTR_BLOCK_ID:
- if (error_code EC = ParseAttributeBlock())
+ if (std::error_code EC = ParseAttributeBlock())
return EC;
break;
case bitc::PARAMATTR_GROUP_BLOCK_ID:
- if (error_code EC = ParseAttributeGroupBlock())
+ if (std::error_code EC = ParseAttributeGroupBlock())
return EC;
break;
case bitc::TYPE_BLOCK_ID_NEW:
- if (error_code EC = ParseTypeTable())
+ if (std::error_code EC = ParseTypeTable())
return EC;
break;
case bitc::VALUE_SYMTAB_BLOCK_ID:
- if (error_code EC = ParseValueSymbolTable())
+ if (std::error_code EC = ParseValueSymbolTable())
return EC;
SeenValueSymbolTable = true;
break;
case bitc::CONSTANTS_BLOCK_ID:
- if (error_code EC = ParseConstants())
+ if (std::error_code EC = ParseConstants())
return EC;
- if (error_code EC = ResolveGlobalAndAliasInits())
+ if (std::error_code EC = ResolveGlobalAndAliasInits())
return EC;
break;
case bitc::METADATA_BLOCK_ID:
- if (error_code EC = ParseMetadata())
+ if (std::error_code EC = ParseMetadata())
return EC;
break;
case bitc::FUNCTION_BLOCK_ID:
@@ -1699,12 +1765,12 @@ error_code BitcodeReader::ParseModule(bool Resume) {
// FunctionsWithBodies list.
if (!SeenFirstFunctionBody) {
std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end());
- if (error_code EC = GlobalCleanup())
+ if (std::error_code EC = GlobalCleanup())
return EC;
SeenFirstFunctionBody = true;
}
- if (error_code EC = RememberAndSkipFunctionBody())
+ if (std::error_code EC = RememberAndSkipFunctionBody())
return EC;
// For streaming bitcode, suspend parsing when we reach the function
// bodies. Subsequent materialization calls will resume it when
@@ -1714,11 +1780,11 @@ error_code BitcodeReader::ParseModule(bool Resume) {
// just finish the parse now.
if (LazyStreamer && SeenValueSymbolTable) {
NextUnreadBit = Stream.GetCurrentBitNo();
- return error_code::success();
+ return std::error_code();
}
break;
case bitc::USELIST_BLOCK_ID:
- if (error_code EC = ParseUseLists())
+ if (std::error_code EC = ParseUseLists())
return EC;
break;
}
@@ -1793,9 +1859,23 @@ error_code BitcodeReader::ParseModule(bool Resume) {
GCTable.push_back(S);
break;
}
+ case bitc::MODULE_CODE_COMDAT: { // COMDAT: [selection_kind, name]
+ if (Record.size() < 2)
+ return Error(InvalidRecord);
+ Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]);
+ unsigned ComdatNameSize = Record[1];
+ std::string ComdatName;
+ ComdatName.reserve(ComdatNameSize);
+ for (unsigned i = 0; i != ComdatNameSize; ++i)
+ ComdatName += (char)Record[2 + i];
+ Comdat *C = TheModule->getOrInsertComdat(ComdatName);
+ C->setSelectionKind(SK);
+ ComdatList.push_back(C);
+ break;
+ }
// GLOBALVAR: [pointer type, isconst, initid,
// linkage, alignment, section, visibility, threadlocal,
- // unnamed_addr]
+ // unnamed_addr, dllstorageclass]
case bitc::MODULE_CODE_GLOBALVAR: {
if (Record.size() < 6)
return Error(InvalidRecord);
@@ -1817,7 +1897,9 @@ error_code BitcodeReader::ParseModule(bool Resume) {
Section = SectionTable[Record[5]-1];
}
GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility;
- if (Record.size() > 6)
+ // Local linkage must have default visibility.
+ if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage))
+ // FIXME: Change to an error if non-default in 4.0.
Visibility = GetDecodedVisibility(Record[6]);
GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal;
@@ -1833,7 +1915,7 @@ error_code BitcodeReader::ParseModule(bool Resume) {
ExternallyInitialized = Record[9];
GlobalVariable *NewGV =
- new GlobalVariable(*TheModule, Ty, isConstant, Linkage, 0, "", 0,
+ new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, "", nullptr,
TLM, AddressSpace, ExternallyInitialized);
NewGV->setAlignment(Alignment);
if (!Section.empty())
@@ -1841,15 +1923,27 @@ error_code BitcodeReader::ParseModule(bool Resume) {
NewGV->setVisibility(Visibility);
NewGV->setUnnamedAddr(UnnamedAddr);
+ if (Record.size() > 10)
+ NewGV->setDLLStorageClass(GetDecodedDLLStorageClass(Record[10]));
+ else
+ UpgradeDLLImportExportLinkage(NewGV, Record[3]);
+
ValueList.push_back(NewGV);
// Remember which value to use for the global initializer.
if (unsigned InitID = Record[2])
GlobalInits.push_back(std::make_pair(NewGV, InitID-1));
+
+ if (Record.size() > 11)
+ if (unsigned ComdatID = Record[11]) {
+ assert(ComdatID <= ComdatList.size());
+ NewGV->setComdat(ComdatList[ComdatID - 1]);
+ }
break;
}
// FUNCTION: [type, callingconv, isproto, linkage, paramattr,
- // alignment, section, visibility, gc, unnamed_addr]
+ // alignment, section, visibility, gc, unnamed_addr,
+ // dllstorageclass]
case bitc::MODULE_CODE_FUNCTION: {
if (Record.size() < 8)
return Error(InvalidRecord);
@@ -1877,7 +1971,10 @@ error_code BitcodeReader::ParseModule(bool Resume) {
return Error(InvalidID);
Func->setSection(SectionTable[Record[6]-1]);
}
- Func->setVisibility(GetDecodedVisibility(Record[7]));
+ // Local linkage must have default visibility.
+ if (!Func->hasLocalLinkage())
+ // FIXME: Change to an error if non-default in 4.0.
+ Func->setVisibility(GetDecodedVisibility(Record[7]));
if (Record.size() > 8 && Record[8]) {
if (Record[8]-1 > GCTable.size())
return Error(InvalidID);
@@ -1889,6 +1986,18 @@ error_code BitcodeReader::ParseModule(bool Resume) {
Func->setUnnamedAddr(UnnamedAddr);
if (Record.size() > 10 && Record[10] != 0)
FunctionPrefixes.push_back(std::make_pair(Func, Record[10]-1));
+
+ if (Record.size() > 11)
+ Func->setDLLStorageClass(GetDecodedDLLStorageClass(Record[11]));
+ else
+ UpgradeDLLImportExportLinkage(Func, Record[3]);
+
+ if (Record.size() > 12)
+ if (unsigned ComdatID = Record[12]) {
+ assert(ComdatID <= ComdatList.size());
+ Func->setComdat(ComdatList[ComdatID - 1]);
+ }
+
ValueList.push_back(Func);
// If this is a function with a body, remember the prototype we are
@@ -1900,21 +2009,33 @@ error_code BitcodeReader::ParseModule(bool Resume) {
break;
}
// ALIAS: [alias type, aliasee val#, linkage]
- // ALIAS: [alias type, aliasee val#, linkage, visibility]
+ // ALIAS: [alias type, aliasee val#, linkage, visibility, dllstorageclass]
case bitc::MODULE_CODE_ALIAS: {
if (Record.size() < 3)
return Error(InvalidRecord);
Type *Ty = getTypeByID(Record[0]);
if (!Ty)
return Error(InvalidRecord);
- if (!Ty->isPointerTy())
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ if (!PTy)
return Error(InvalidTypeForValue);
- GlobalAlias *NewGA = new GlobalAlias(Ty, GetDecodedLinkage(Record[2]),
- "", 0, TheModule);
+ auto *NewGA =
+ GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ GetDecodedLinkage(Record[2]), "", TheModule);
// Old bitcode files didn't have visibility field.
- if (Record.size() > 3)
+ // Local linkage must have default visibility.
+ if (Record.size() > 3 && !NewGA->hasLocalLinkage())
+ // FIXME: Change to an error if non-default in 4.0.
NewGA->setVisibility(GetDecodedVisibility(Record[3]));
+ if (Record.size() > 4)
+ NewGA->setDLLStorageClass(GetDecodedDLLStorageClass(Record[4]));
+ else
+ UpgradeDLLImportExportLinkage(NewGA, Record[2]);
+ if (Record.size() > 5)
+ NewGA->setThreadLocalMode(GetDecodedThreadLocalMode(Record[5]));
+ if (Record.size() > 6)
+ NewGA->setUnnamedAddr(Record[6]);
ValueList.push_back(NewGA);
AliasInits.push_back(std::make_pair(NewGA, Record[1]));
break;
@@ -1931,10 +2052,10 @@ error_code BitcodeReader::ParseModule(bool Resume) {
}
}
-error_code BitcodeReader::ParseBitcodeInto(Module *M) {
- TheModule = 0;
+std::error_code BitcodeReader::ParseBitcodeInto(Module *M) {
+ TheModule = nullptr;
- if (error_code EC = InitStream())
+ if (std::error_code EC = InitStream())
return EC;
// Sniff for the signature.
@@ -1950,7 +2071,7 @@ error_code BitcodeReader::ParseBitcodeInto(Module *M) {
// need to understand them all.
while (1) {
if (Stream.AtEndOfStream())
- return error_code::success();
+ return std::error_code();
BitstreamEntry Entry =
Stream.advance(BitstreamCursor::AF_DontAutoprocessAbbrevs);
@@ -1959,7 +2080,7 @@ error_code BitcodeReader::ParseBitcodeInto(Module *M) {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::SubBlock:
switch (Entry.ID) {
@@ -1972,10 +2093,10 @@ error_code BitcodeReader::ParseBitcodeInto(Module *M) {
if (TheModule)
return Error(InvalidMultipleBlocks);
TheModule = M;
- if (error_code EC = ParseModule(false))
+ if (std::error_code EC = ParseModule(false))
return EC;
if (LazyStreamer)
- return error_code::success();
+ return std::error_code();
break;
default:
if (Stream.SkipBlock())
@@ -1992,19 +2113,20 @@ error_code BitcodeReader::ParseBitcodeInto(Module *M) {
if (Stream.getAbbrevIDWidth() == 2 && Entry.ID == 2 &&
Stream.Read(6) == 2 && Stream.Read(24) == 0xa0a0a &&
Stream.AtEndOfStream())
- return error_code::success();
+ return std::error_code();
return Error(InvalidRecord);
}
}
}
-error_code BitcodeReader::ParseModuleTriple(std::string &Triple) {
+ErrorOr<std::string> BitcodeReader::parseModuleTriple() {
if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
return Error(InvalidRecord);
SmallVector<uint64_t, 64> Record;
+ std::string Triple;
// Read all the records for this module.
while (1) {
BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
@@ -2014,7 +2136,7 @@ error_code BitcodeReader::ParseModuleTriple(std::string &Triple) {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return Triple;
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -2033,10 +2155,11 @@ error_code BitcodeReader::ParseModuleTriple(std::string &Triple) {
}
Record.clear();
}
+ llvm_unreachable("Exit infinite loop");
}
-error_code BitcodeReader::ParseTriple(std::string &Triple) {
- if (error_code EC = InitStream())
+ErrorOr<std::string> BitcodeReader::parseTriple() {
+ if (std::error_code EC = InitStream())
return EC;
// Sniff for the signature.
@@ -2057,11 +2180,11 @@ error_code BitcodeReader::ParseTriple(std::string &Triple) {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::SubBlock:
if (Entry.ID == bitc::MODULE_BLOCK_ID)
- return ParseModuleTriple(Triple);
+ return parseModuleTriple();
// Ignore other sub-blocks.
if (Stream.SkipBlock())
@@ -2076,7 +2199,7 @@ error_code BitcodeReader::ParseTriple(std::string &Triple) {
}
/// ParseMetadataAttachment - Parse metadata attachments.
-error_code BitcodeReader::ParseMetadataAttachment() {
+std::error_code BitcodeReader::ParseMetadataAttachment() {
if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID))
return Error(InvalidRecord);
@@ -2089,7 +2212,7 @@ error_code BitcodeReader::ParseMetadataAttachment() {
case BitstreamEntry::Error:
return Error(MalformedBlock);
case BitstreamEntry::EndBlock:
- return error_code::success();
+ return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
break;
@@ -2123,7 +2246,7 @@ error_code BitcodeReader::ParseMetadataAttachment() {
}
/// ParseFunctionBody - Lazily parse the specified function body block.
-error_code BitcodeReader::ParseFunctionBody(Function *F) {
+std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID))
return Error(InvalidRecord);
@@ -2136,7 +2259,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
ValueList.push_back(I);
unsigned NextValueNo = ValueList.size();
- BasicBlock *CurBB = 0;
+ BasicBlock *CurBB = nullptr;
unsigned CurBBNo = 0;
DebugLoc LastLoc;
@@ -2159,20 +2282,20 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
return Error(InvalidRecord);
break;
case bitc::CONSTANTS_BLOCK_ID:
- if (error_code EC = ParseConstants())
+ if (std::error_code EC = ParseConstants())
return EC;
NextValueNo = ValueList.size();
break;
case bitc::VALUE_SYMTAB_BLOCK_ID:
- if (error_code EC = ParseValueSymbolTable())
+ if (std::error_code EC = ParseValueSymbolTable())
return EC;
break;
case bitc::METADATA_ATTACHMENT_ID:
- if (error_code EC = ParseMetadataAttachment())
+ if (std::error_code EC = ParseMetadataAttachment())
return EC;
break;
case bitc::METADATA_BLOCK_ID:
- if (error_code EC = ParseMetadata())
+ if (std::error_code EC = ParseMetadata())
return EC;
break;
}
@@ -2185,7 +2308,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
// Read a record.
Record.clear();
- Instruction *I = 0;
+ Instruction *I = nullptr;
unsigned BitCode = Stream.readRecord(Entry.ID, Record);
switch (BitCode) {
default: // Default behavior: reject
@@ -2203,7 +2326,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_DEBUG_LOC_AGAIN: // DEBUG_LOC_AGAIN
// This record indicates that the last instruction is at the same
// location as the previous instruction with a location.
- I = 0;
+ I = nullptr;
// Get the last instruction emitted.
if (CurBB && !CurBB->empty())
@@ -2212,31 +2335,31 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
!FunctionBBs[CurBBNo-1]->empty())
I = &FunctionBBs[CurBBNo-1]->back();
- if (I == 0)
+ if (!I)
return Error(InvalidRecord);
I->setDebugLoc(LastLoc);
- I = 0;
+ I = nullptr;
continue;
case bitc::FUNC_CODE_DEBUG_LOC: { // DEBUG_LOC: [line, col, scope, ia]
- I = 0; // Get the last instruction emitted.
+ I = nullptr; // Get the last instruction emitted.
if (CurBB && !CurBB->empty())
I = &CurBB->back();
else if (CurBBNo && FunctionBBs[CurBBNo-1] &&
!FunctionBBs[CurBBNo-1]->empty())
I = &FunctionBBs[CurBBNo-1]->back();
- if (I == 0 || Record.size() < 4)
+ if (!I || Record.size() < 4)
return Error(InvalidRecord);
unsigned Line = Record[0], Col = Record[1];
unsigned ScopeID = Record[2], IAID = Record[3];
- MDNode *Scope = 0, *IA = 0;
+ MDNode *Scope = nullptr, *IA = nullptr;
if (ScopeID) Scope = cast<MDNode>(MDValueList.getValueFwdRef(ScopeID-1));
if (IAID) IA = cast<MDNode>(MDValueList.getValueFwdRef(IAID-1));
LastLoc = DebugLoc::get(Line, Col, Scope, IA);
I->setDebugLoc(LastLoc);
- I = 0;
+ I = nullptr;
continue;
}
@@ -2296,9 +2419,9 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
Type *ResTy = getTypeByID(Record[OpNum]);
int Opc = GetDecodedCastOpcode(Record[OpNum+1]);
- if (Opc == -1 || ResTy == 0)
+ if (Opc == -1 || !ResTy)
return Error(InvalidRecord);
- Instruction *Temp = 0;
+ Instruction *Temp = nullptr;
if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) {
if (Temp) {
InstructionList.push_back(Temp);
@@ -2423,7 +2546,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Vec, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx))
+ getValueTypePair(Record, OpNum, NextValueNo, Idx))
return Error(InvalidRecord);
I = ExtractElementInst::Create(Vec, Idx);
InstructionList.push_back(I);
@@ -2436,7 +2559,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
popValue(Record, OpNum, NextValueNo,
cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
- popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx))
+ getValueTypePair(Record, OpNum, NextValueNo, Idx))
return Error(InvalidRecord);
I = InsertElementInst::Create(Vec, Elt, Idx);
InstructionList.push_back(I);
@@ -2489,7 +2612,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
unsigned OpNum = 0;
- Value *Op = NULL;
+ Value *Op = nullptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Op))
return Error(InvalidRecord);
if (OpNum != Record.size())
@@ -2503,7 +2626,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (Record.size() != 1 && Record.size() != 3)
return Error(InvalidRecord);
BasicBlock *TrueDest = getBasicBlock(Record[0]);
- if (TrueDest == 0)
+ if (!TrueDest)
return Error(InvalidRecord);
if (Record.size() == 1) {
@@ -2514,7 +2637,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
BasicBlock *FalseDest = getBasicBlock(Record[1]);
Value *Cond = getValue(Record, 2, NextValueNo,
Type::getInt1Ty(Context));
- if (FalseDest == 0 || Cond == 0)
+ if (!FalseDest || !Cond)
return Error(InvalidRecord);
I = BranchInst::Create(TrueDest, FalseDest, Cond);
InstructionList.push_back(I);
@@ -2534,7 +2657,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Cond = getValue(Record, 2, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[3]);
- if (OpTy == 0 || Cond == 0 || Default == 0)
+ if (!OpTy || !Cond || !Default)
return Error(InvalidRecord);
unsigned NumCases = Record[4];
@@ -2591,7 +2714,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
Type *OpTy = getTypeByID(Record[0]);
Value *Cond = getValue(Record, 1, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[2]);
- if (OpTy == 0 || Cond == 0 || Default == 0)
+ if (!OpTy || !Cond || !Default)
return Error(InvalidRecord);
unsigned NumCases = (Record.size()-3)/2;
SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
@@ -2600,7 +2723,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
ConstantInt *CaseVal =
dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy));
BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]);
- if (CaseVal == 0 || DestBB == 0) {
+ if (!CaseVal || !DestBB) {
delete SI;
return Error(InvalidRecord);
}
@@ -2614,7 +2737,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
return Error(InvalidRecord);
Type *OpTy = getTypeByID(Record[0]);
Value *Address = getValue(Record, 1, NextValueNo, OpTy);
- if (OpTy == 0 || Address == 0)
+ if (!OpTy || !Address)
return Error(InvalidRecord);
unsigned NumDests = Record.size()-2;
IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests);
@@ -2646,11 +2769,11 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
return Error(InvalidRecord);
PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
- FunctionType *FTy = !CalleeTy ? 0 :
+ FunctionType *FTy = !CalleeTy ? nullptr :
dyn_cast<FunctionType>(CalleeTy->getElementType());
// Check that the right number of fixed parameters are here.
- if (FTy == 0 || NormalBB == 0 || UnwindBB == 0 ||
+ if (!FTy || !NormalBB || !UnwindBB ||
Record.size() < OpNum+FTy->getNumParams())
return Error(InvalidRecord);
@@ -2658,7 +2781,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
Ops.push_back(getValue(Record, OpNum, NextValueNo,
FTy->getParamType(i)));
- if (Ops.back() == 0)
+ if (!Ops.back())
return Error(InvalidRecord);
}
@@ -2684,7 +2807,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval]
unsigned Idx = 0;
- Value *Val = 0;
+ Value *Val = nullptr;
if (getValueTypePair(Record, Idx, NextValueNo, Val))
return Error(InvalidRecord);
I = ResumeInst::Create(Val);
@@ -2731,7 +2854,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
Type *Ty = getTypeByID(Record[Idx++]);
if (!Ty)
return Error(InvalidRecord);
- Value *PersFn = 0;
+ Value *PersFn = nullptr;
if (getValueTypePair(Record, Idx, NextValueNo, PersFn))
return Error(InvalidRecord);
@@ -2755,7 +2878,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
assert((CT != LandingPadInst::Filter ||
isa<ArrayType>(Val->getType())) &&
"Filter clause has invalid type!");
- LP->addClause(Val);
+ LP->addClause(cast<Constant>(Val));
}
I = LP;
@@ -2770,10 +2893,14 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
dyn_cast_or_null<PointerType>(getTypeByID(Record[0]));
Type *OpTy = getTypeByID(Record[1]);
Value *Size = getFnValueByID(Record[2], OpTy);
- unsigned Align = Record[3];
+ unsigned AlignRecord = Record[3];
+ bool InAlloca = AlignRecord & (1 << 5);
+ unsigned Align = AlignRecord & ((1 << 5) - 1);
if (!Ty || !Size)
return Error(InvalidRecord);
- I = new AllocaInst(Ty->getElementType(), Size, (1 << Align) >> 1);
+ AllocaInst *AI = new AllocaInst(Ty->getElementType(), Size, (1 << Align) >> 1);
+ AI->setUsedWithInAlloca(InAlloca);
+ I = AI;
InstructionList.push_back(I);
break;
}
@@ -2847,7 +2974,8 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_CMPXCHG: {
- // CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope]
+ // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, synchscope,
+ // failureordering?, isweak?]
unsigned OpNum = 0;
Value *Ptr, *Cmp, *New;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
@@ -2855,14 +2983,34 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
cast<PointerType>(Ptr->getType())->getElementType(), Cmp) ||
popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), New) ||
- OpNum+3 != Record.size())
+ (Record.size() < OpNum + 3 || Record.size() > OpNum + 5))
return Error(InvalidRecord);
- AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
- if (Ordering == NotAtomic || Ordering == Unordered)
+ AtomicOrdering SuccessOrdering = GetDecodedOrdering(Record[OpNum+1]);
+ if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered)
return Error(InvalidRecord);
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
- I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
+
+ AtomicOrdering FailureOrdering;
+ if (Record.size() < 7)
+ FailureOrdering =
+ AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering);
+ else
+ FailureOrdering = GetDecodedOrdering(Record[OpNum+3]);
+
+ I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
+ SynchScope);
cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
+
+ if (Record.size() < 8) {
+ // Before weak cmpxchgs existed, the instruction simply returned the
+ // value loaded from memory, so bitcode files from that era will be
+ // expecting the first component of a modern cmpxchg.
+ CurBB->getInstList().push_back(I);
+ I = ExtractValueInst::Create(I, 0);
+ } else {
+ cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]);
+ }
+
InstructionList.push_back(I);
break;
}
@@ -2914,7 +3062,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
return Error(InvalidRecord);
PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
- FunctionType *FTy = 0;
+ FunctionType *FTy = nullptr;
if (OpTy) FTy = dyn_cast<FunctionType>(OpTy->getElementType());
if (!FTy || Record.size() < FTy->getNumParams()+OpNum)
return Error(InvalidRecord);
@@ -2927,7 +3075,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
else
Args.push_back(getValue(Record, OpNum, NextValueNo,
FTy->getParamType(i)));
- if (Args.back() == 0)
+ if (!Args.back())
return Error(InvalidRecord);
}
@@ -2947,8 +3095,13 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
I = CallInst::Create(Callee, Args);
InstructionList.push_back(I);
cast<CallInst>(I)->setCallingConv(
- static_cast<CallingConv::ID>(CCInfo>>1));
- cast<CallInst>(I)->setTailCall(CCInfo & 1);
+ static_cast<CallingConv::ID>((~(1U << 14) & CCInfo) >> 1));
+ CallInst::TailCallKind TCK = CallInst::TCK_None;
+ if (CCInfo & 1)
+ TCK = CallInst::TCK_Tail;
+ if (CCInfo & (1 << 14))
+ TCK = CallInst::TCK_MustTail;
+ cast<CallInst>(I)->setTailCallKind(TCK);
cast<CallInst>(I)->setAttributes(PAL);
break;
}
@@ -2968,7 +3121,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
// Add instruction to end of current BB. If there is no current BB, reject
// this file.
- if (CurBB == 0) {
+ if (!CurBB) {
delete I;
return Error(InvalidInstructionWithNoBB);
}
@@ -2977,7 +3130,7 @@ error_code BitcodeReader::ParseFunctionBody(Function *F) {
// If this was a terminator instruction, move to the next block.
if (isa<TerminatorInst>(I)) {
++CurBBNo;
- CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : 0;
+ CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : nullptr;
}
// Non-void values get registered in the value table for future use.
@@ -2989,10 +3142,10 @@ OutOfRecordLoop:
// Check the function list for unresolved values.
if (Argument *A = dyn_cast<Argument>(ValueList.back())) {
- if (A->getParent() == 0) {
+ if (!A->getParent()) {
// We found at least one unresolved value. Nuke them all to avoid leaks.
for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){
- if ((A = dyn_cast<Argument>(ValueList[i])) && A->getParent() == 0) {
+ if ((A = dyn_cast_or_null<Argument>(ValueList[i])) && !A->getParent()) {
A->replaceAllUsesWith(UndefValue::get(A->getType()));
delete A;
}
@@ -3027,27 +3180,29 @@ OutOfRecordLoop:
ValueList.shrinkTo(ModuleValueListSize);
MDValueList.shrinkTo(ModuleMDValueListSize);
std::vector<BasicBlock*>().swap(FunctionBBs);
- return error_code::success();
+ return std::error_code();
}
/// Find the function body in the bitcode stream
-error_code BitcodeReader::FindFunctionInStream(Function *F,
- DenseMap<Function*, uint64_t>::iterator DeferredFunctionInfoIterator) {
+std::error_code BitcodeReader::FindFunctionInStream(
+ Function *F,
+ DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) {
while (DeferredFunctionInfoIterator->second == 0) {
if (Stream.AtEndOfStream())
return Error(CouldNotFindFunctionInStream);
// ParseModule will parse the next body in the stream and set its
// position in the DeferredFunctionInfo map.
- if (error_code EC = ParseModule(true))
+ if (std::error_code EC = ParseModule(true))
return EC;
}
- return error_code::success();
+ return std::error_code();
}
//===----------------------------------------------------------------------===//
// GVMaterializer implementation
//===----------------------------------------------------------------------===//
+void BitcodeReader::releaseBuffer() { Buffer.release(); }
bool BitcodeReader::isMaterializable(const GlobalValue *GV) const {
if (const Function *F = dyn_cast<Function>(GV)) {
@@ -3057,39 +3212,39 @@ bool BitcodeReader::isMaterializable(const GlobalValue *GV) const {
return false;
}
-error_code BitcodeReader::Materialize(GlobalValue *GV) {
+std::error_code BitcodeReader::Materialize(GlobalValue *GV) {
Function *F = dyn_cast<Function>(GV);
// If it's not a function or is already material, ignore the request.
if (!F || !F->isMaterializable())
- return error_code::success();
+ return std::error_code();
DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F);
assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
// If its position is recorded as 0, its body is somewhere in the stream
// but we haven't seen it yet.
if (DFII->second == 0 && LazyStreamer)
- if (error_code EC = FindFunctionInStream(F, DFII))
+ if (std::error_code EC = FindFunctionInStream(F, DFII))
return EC;
// Move the bit stream to the saved position of the deferred function body.
Stream.JumpToBit(DFII->second);
- if (error_code EC = ParseFunctionBody(F))
+ if (std::error_code EC = ParseFunctionBody(F))
return EC;
// Upgrade any old intrinsic calls in the function.
for (UpgradedIntrinsicMap::iterator I = UpgradedIntrinsics.begin(),
E = UpgradedIntrinsics.end(); I != E; ++I) {
if (I->first != I->second) {
- for (Value::use_iterator UI = I->first->use_begin(),
- UE = I->first->use_end(); UI != UE; ) {
+ for (auto UI = I->first->user_begin(), UE = I->first->user_end();
+ UI != UE;) {
if (CallInst* CI = dyn_cast<CallInst>(*UI++))
UpgradeIntrinsicCall(CI, I->second);
}
}
}
- return error_code::success();
+ return std::error_code();
}
bool BitcodeReader::isDematerializable(const GlobalValue *GV) const {
@@ -3111,8 +3266,7 @@ void BitcodeReader::Dematerialize(GlobalValue *GV) {
F->deleteBody();
}
-
-error_code BitcodeReader::MaterializeModule(Module *M) {
+std::error_code BitcodeReader::MaterializeModule(Module *M) {
assert(M == TheModule &&
"Can only Materialize the Module this BitcodeReader is attached to.");
// Iterate over the module, deserializing any functions that are still on
@@ -3120,7 +3274,7 @@ error_code BitcodeReader::MaterializeModule(Module *M) {
for (Module::iterator F = TheModule->begin(), E = TheModule->end();
F != E; ++F) {
if (F->isMaterializable()) {
- if (error_code EC = Materialize(F))
+ if (std::error_code EC = Materialize(F))
return EC;
}
}
@@ -3137,8 +3291,8 @@ error_code BitcodeReader::MaterializeModule(Module *M) {
for (std::vector<std::pair<Function*, Function*> >::iterator I =
UpgradedIntrinsics.begin(), E = UpgradedIntrinsics.end(); I != E; ++I) {
if (I->first != I->second) {
- for (Value::use_iterator UI = I->first->use_begin(),
- UE = I->first->use_end(); UI != UE; ) {
+ for (auto UI = I->first->user_begin(), UE = I->first->user_end();
+ UI != UE;) {
if (CallInst* CI = dyn_cast<CallInst>(*UI++))
UpgradeIntrinsicCall(CI, I->second);
}
@@ -3153,16 +3307,16 @@ error_code BitcodeReader::MaterializeModule(Module *M) {
UpgradeInstWithTBAATag(InstsWithTBAATag[I]);
UpgradeDebugInfo(*M);
- return error_code::success();
+ return std::error_code();
}
-error_code BitcodeReader::InitStream() {
+std::error_code BitcodeReader::InitStream() {
if (LazyStreamer)
return InitLazyStream();
return InitStreamFromBuffer();
}
-error_code BitcodeReader::InitStreamFromBuffer() {
+std::error_code BitcodeReader::InitStreamFromBuffer() {
const unsigned char *BufPtr = (const unsigned char*)Buffer->getBufferStart();
const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
@@ -3182,10 +3336,10 @@ error_code BitcodeReader::InitStreamFromBuffer() {
StreamFile.reset(new BitstreamReader(BufPtr, BufEnd));
Stream.init(*StreamFile);
- return error_code::success();
+ return std::error_code();
}
-error_code BitcodeReader::InitLazyStream() {
+std::error_code BitcodeReader::InitLazyStream() {
// Check and strip off the bitcode wrapper; BitstreamReader expects never to
// see it.
StreamingMemoryObject *Bytes = new StreamingMemoryObject(LazyStreamer);
@@ -3206,15 +3360,15 @@ error_code BitcodeReader::InitLazyStream() {
Bytes->dropLeadingBytes(bitcodeStart - buf);
Bytes->setKnownObjectSize(bitcodeEnd - bitcodeStart);
}
- return error_code::success();
+ return std::error_code();
}
namespace {
-class BitcodeErrorCategoryType : public _do_message {
- const char *name() const LLVM_OVERRIDE {
+class BitcodeErrorCategoryType : public std::error_category {
+ const char *name() const LLVM_NOEXCEPT override {
return "llvm.bitcode";
}
- std::string message(int IE) const LLVM_OVERRIDE {
+ std::string message(int IE) const override {
BitcodeReader::ErrorType E = static_cast<BitcodeReader::ErrorType>(IE);
switch (E) {
case BitcodeReader::BitcodeStreamInvalidSize:
@@ -3261,7 +3415,7 @@ class BitcodeErrorCategoryType : public _do_message {
};
}
-const error_category &BitcodeReader::BitcodeErrorCategory() {
+const std::error_category &BitcodeReader::BitcodeErrorCategory() {
static BitcodeErrorCategoryType O;
return O;
}
@@ -3272,21 +3426,16 @@ const error_category &BitcodeReader::BitcodeErrorCategory() {
/// getLazyBitcodeModule - lazy function-at-a-time loading from a file.
///
-Module *llvm::getLazyBitcodeModule(MemoryBuffer *Buffer,
- LLVMContext& Context,
- std::string *ErrMsg) {
+ErrorOr<Module *> llvm::getLazyBitcodeModule(MemoryBuffer *Buffer,
+ LLVMContext &Context) {
Module *M = new Module(Buffer->getBufferIdentifier(), Context);
BitcodeReader *R = new BitcodeReader(Buffer, Context);
M->setMaterializer(R);
- if (error_code EC = R->ParseBitcodeInto(M)) {
- if (ErrMsg)
- *ErrMsg = EC.message();
-
+ if (std::error_code EC = R->ParseBitcodeInto(M)) {
+ R->releaseBuffer(); // Never take ownership on error.
delete M; // Also deletes R.
- return 0;
+ return EC;
}
- // Have the BitcodeReader dtor delete 'Buffer'.
- R->setBufferOwned(true);
R->materializeForwardReferencedFunctions();
@@ -3301,31 +3450,25 @@ Module *llvm::getStreamedBitcodeModule(const std::string &name,
Module *M = new Module(name, Context);
BitcodeReader *R = new BitcodeReader(streamer, Context);
M->setMaterializer(R);
- if (error_code EC = R->ParseBitcodeInto(M)) {
+ if (std::error_code EC = R->ParseBitcodeInto(M)) {
if (ErrMsg)
*ErrMsg = EC.message();
delete M; // Also deletes R.
- return 0;
+ return nullptr;
}
- R->setBufferOwned(false); // no buffer to delete
return M;
}
-/// ParseBitcodeFile - Read the specified bitcode file, returning the module.
-/// If an error occurs, return null and fill in *ErrMsg if non-null.
-Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
- std::string *ErrMsg){
- Module *M = getLazyBitcodeModule(Buffer, Context, ErrMsg);
- if (!M) return 0;
-
- // Don't let the BitcodeReader dtor delete 'Buffer', regardless of whether
- // there was an error.
- static_cast<BitcodeReader*>(M->getMaterializer())->setBufferOwned(false);
-
+ErrorOr<Module *> llvm::parseBitcodeFile(MemoryBuffer *Buffer,
+ LLVMContext &Context) {
+ ErrorOr<Module *> ModuleOrErr = getLazyBitcodeModule(Buffer, Context);
+ if (!ModuleOrErr)
+ return ModuleOrErr;
+ Module *M = ModuleOrErr.get();
// Read in the entire module, and destroy the BitcodeReader.
- if (M->MaterializeAllPermanently(ErrMsg)) {
+ if (std::error_code EC = M->materializeAllPermanently(true)) {
delete M;
- return 0;
+ return EC;
}
// TODO: Restore the use-lists to the in-memory state when the bitcode was
@@ -3335,17 +3478,12 @@ Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
}
std::string llvm::getBitcodeTargetTriple(MemoryBuffer *Buffer,
- LLVMContext& Context,
- std::string *ErrMsg) {
+ LLVMContext &Context) {
BitcodeReader *R = new BitcodeReader(Buffer, Context);
- // Don't let the BitcodeReader dtor delete 'Buffer'.
- R->setBufferOwned(false);
-
- std::string Triple("");
- if (error_code EC = R->ParseTriple(Triple))
- if (ErrMsg)
- *ErrMsg = EC.message();
-
+ ErrorOr<std::string> Triple = R->parseTriple();
+ R->releaseBuffer();
delete R;
- return Triple;
+ if (Triple.getError())
+ return "";
+ return Triple.get();
}
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
index c5d345b..1d4869a 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
@@ -17,15 +17,16 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/Bitcode/LLVMBitCodes.h"
-#include "llvm/GVMaterializer.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/GVMaterializer.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/system_error.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
+#include <system_error>
#include <vector>
namespace llvm {
+ class Comdat;
class MemoryBuffer;
class LLVMContext;
@@ -125,9 +126,8 @@ public:
class BitcodeReader : public GVMaterializer {
LLVMContext &Context;
Module *TheModule;
- MemoryBuffer *Buffer;
- bool BufferOwned;
- OwningPtr<BitstreamReader> StreamFile;
+ std::unique_ptr<MemoryBuffer> Buffer;
+ std::unique_ptr<BitstreamReader> StreamFile;
BitstreamCursor Stream;
DataStreamer *LazyStreamer;
uint64_t NextUnreadBit;
@@ -136,6 +136,7 @@ class BitcodeReader : public GVMaterializer {
std::vector<Type*> TypeList;
BitcodeReaderValueList ValueList;
BitcodeReaderMDValueList MDValueList;
+ std::vector<Comdat *> ComdatList;
SmallVector<Instruction *, 64> InstructionList;
SmallVector<SmallVector<uint64_t, 64>, 64> UseListRecords;
@@ -193,7 +194,7 @@ class BitcodeReader : public GVMaterializer {
/// not need this flag.
bool UseRelativeIDs;
- static const error_category &BitcodeErrorCategory();
+ static const std::error_category &BitcodeErrorCategory();
public:
enum ErrorType {
@@ -219,47 +220,39 @@ public:
InvalidValue // Invalid version, inst number, attr number, etc
};
- error_code Error(ErrorType E) {
- return error_code(E, BitcodeErrorCategory());
+ std::error_code Error(ErrorType E) {
+ return std::error_code(E, BitcodeErrorCategory());
}
explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
- : Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
- LazyStreamer(0), NextUnreadBit(0), SeenValueSymbolTable(false),
- ValueList(C), MDValueList(C),
- SeenFirstFunctionBody(false), UseRelativeIDs(false) {
- }
+ : Context(C), TheModule(nullptr), Buffer(buffer), LazyStreamer(nullptr),
+ NextUnreadBit(0), SeenValueSymbolTable(false), ValueList(C),
+ MDValueList(C), SeenFirstFunctionBody(false), UseRelativeIDs(false) {}
explicit BitcodeReader(DataStreamer *streamer, LLVMContext &C)
- : Context(C), TheModule(0), Buffer(0), BufferOwned(false),
- LazyStreamer(streamer), NextUnreadBit(0), SeenValueSymbolTable(false),
- ValueList(C), MDValueList(C),
- SeenFirstFunctionBody(false), UseRelativeIDs(false) {
- }
- ~BitcodeReader() {
- FreeState();
- }
+ : Context(C), TheModule(nullptr), Buffer(nullptr), LazyStreamer(streamer),
+ NextUnreadBit(0), SeenValueSymbolTable(false), ValueList(C),
+ MDValueList(C), SeenFirstFunctionBody(false), UseRelativeIDs(false) {}
+ ~BitcodeReader() { FreeState(); }
void materializeForwardReferencedFunctions();
void FreeState();
- /// setBufferOwned - If this is true, the reader will destroy the MemoryBuffer
- /// when the reader is destroyed.
- void setBufferOwned(bool Owned) { BufferOwned = Owned; }
+ void releaseBuffer() override;
- virtual bool isMaterializable(const GlobalValue *GV) const;
- virtual bool isDematerializable(const GlobalValue *GV) const;
- virtual error_code Materialize(GlobalValue *GV);
- virtual error_code MaterializeModule(Module *M);
- virtual void Dematerialize(GlobalValue *GV);
+ bool isMaterializable(const GlobalValue *GV) const override;
+ bool isDematerializable(const GlobalValue *GV) const override;
+ std::error_code Materialize(GlobalValue *GV) override;
+ std::error_code MaterializeModule(Module *M) override;
+ void Dematerialize(GlobalValue *GV) override;
/// @brief Main interface to parsing a bitcode buffer.
/// @returns true if an error occurred.
- error_code ParseBitcodeInto(Module *M);
+ std::error_code ParseBitcodeInto(Module *M);
/// @brief Cheap mechanism to just extract module triple
/// @returns true if an error occurred.
- error_code ParseTriple(std::string &Triple);
+ ErrorOr<std::string> parseTriple();
static uint64_t decodeSignRotatedValue(uint64_t V);
@@ -271,7 +264,7 @@ private:
return ValueList.getValueFwdRef(ID, Ty);
}
BasicBlock *getBasicBlock(unsigned ID) const {
- if (ID >= FunctionBBs.size()) return 0; // Invalid ID
+ if (ID >= FunctionBBs.size()) return nullptr; // Invalid ID
return FunctionBBs[ID];
}
AttributeSet getAttributes(unsigned i) const {
@@ -293,15 +286,15 @@ private:
if (ValNo < InstNum) {
// If this is not a forward reference, just return the value we already
// have.
- ResVal = getFnValueByID(ValNo, 0);
- return ResVal == 0;
+ ResVal = getFnValueByID(ValNo, nullptr);
+ return ResVal == nullptr;
} else if (Slot == Record.size()) {
return true;
}
unsigned TypeNo = (unsigned)Record[Slot++];
ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo));
- return ResVal == 0;
+ return ResVal == nullptr;
}
/// popValue - Read a value out of the specified record from slot 'Slot'.
@@ -320,14 +313,14 @@ private:
bool getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
unsigned InstNum, Type *Ty, Value *&ResVal) {
ResVal = getValue(Record, Slot, InstNum, Ty);
- return ResVal == 0;
+ return ResVal == nullptr;
}
/// getValue -- Version of getValue that returns ResVal directly,
/// or 0 if there is an error.
Value *getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
unsigned InstNum, Type *Ty) {
- if (Slot == Record.size()) return 0;
+ if (Slot == Record.size()) return nullptr;
unsigned ValNo = (unsigned)Record[Slot];
// Adjust the ValNo, if it was encoded relative to the InstNum.
if (UseRelativeIDs)
@@ -338,7 +331,7 @@ private:
/// getValueSigned -- Like getValue, but decodes signed VBRs.
Value *getValueSigned(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
unsigned InstNum, Type *Ty) {
- if (Slot == Record.size()) return 0;
+ if (Slot == Record.size()) return nullptr;
unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]);
// Adjust the ValNo, if it was encoded relative to the InstNum.
if (UseRelativeIDs)
@@ -346,28 +339,29 @@ private:
return getFnValueByID(ValNo, Ty);
}
- error_code ParseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
- error_code ParseModule(bool Resume);
- error_code ParseAttributeBlock();
- error_code ParseAttributeGroupBlock();
- error_code ParseTypeTable();
- error_code ParseTypeTableBody();
-
- error_code ParseValueSymbolTable();
- error_code ParseConstants();
- error_code RememberAndSkipFunctionBody();
- error_code ParseFunctionBody(Function *F);
- error_code GlobalCleanup();
- error_code ResolveGlobalAndAliasInits();
- error_code ParseMetadata();
- error_code ParseMetadataAttachment();
- error_code ParseModuleTriple(std::string &Triple);
- error_code ParseUseLists();
- error_code InitStream();
- error_code InitStreamFromBuffer();
- error_code InitLazyStream();
- error_code FindFunctionInStream(Function *F,
- DenseMap<Function*, uint64_t>::iterator DeferredFunctionInfoIterator);
+ std::error_code ParseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
+ std::error_code ParseModule(bool Resume);
+ std::error_code ParseAttributeBlock();
+ std::error_code ParseAttributeGroupBlock();
+ std::error_code ParseTypeTable();
+ std::error_code ParseTypeTableBody();
+
+ std::error_code ParseValueSymbolTable();
+ std::error_code ParseConstants();
+ std::error_code RememberAndSkipFunctionBody();
+ std::error_code ParseFunctionBody(Function *F);
+ std::error_code GlobalCleanup();
+ std::error_code ResolveGlobalAndAliasInits();
+ std::error_code ParseMetadata();
+ std::error_code ParseMetadataAttachment();
+ ErrorOr<std::string> parseModuleTriple();
+ std::error_code ParseUseLists();
+ std::error_code InitStream();
+ std::error_code InitStreamFromBuffer();
+ std::error_code InitLazyStream();
+ std::error_code FindFunctionInStream(
+ Function *F,
+ DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp
index 1fd9abd..72451ec 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp
@@ -97,7 +97,7 @@ void BitstreamCursor::readAbbreviatedField(const BitCodeAbbrevOp &Op,
switch (Op.getEncoding()) {
case BitCodeAbbrevOp::Array:
case BitCodeAbbrevOp::Blob:
- assert(0 && "Should not reach here");
+ llvm_unreachable("Should not reach here");
case BitCodeAbbrevOp::Fixed:
Vals.push_back(Read((unsigned)Op.getEncodingData()));
break;
@@ -117,7 +117,7 @@ void BitstreamCursor::skipAbbreviatedField(const BitCodeAbbrevOp &Op) {
switch (Op.getEncoding()) {
case BitCodeAbbrevOp::Array:
case BitCodeAbbrevOp::Blob:
- assert(0 && "Should not reach here");
+ llvm_unreachable("Should not reach here");
case BitCodeAbbrevOp::Fixed:
(void)Read((unsigned)Op.getEncodingData());
break;
@@ -315,7 +315,7 @@ bool BitstreamCursor::ReadBlockInfoBlock() {
if (EnterSubBlock(bitc::BLOCKINFO_BLOCK_ID)) return true;
SmallVector<uint64_t, 64> Record;
- BitstreamReader::BlockInfo *CurBlockInfo = 0;
+ BitstreamReader::BlockInfo *CurBlockInfo = nullptr;
// Read all the records for this module.
while (1) {
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp
index cd1ada2..3747122 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp
@@ -10,6 +10,7 @@
#include "llvm-c/BitWriter.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -18,7 +19,7 @@ using namespace llvm;
int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path) {
std::string ErrorInfo;
- raw_fd_ostream OS(Path, ErrorInfo, sys::fs::F_Binary);
+ raw_fd_ostream OS(Path, ErrorInfo, sys::fs::F_None);
if (!ErrorInfo.empty())
return -1;
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 4cfc6bd..b2e4948 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -169,12 +169,16 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_BUILTIN;
case Attribute::ByVal:
return bitc::ATTR_KIND_BY_VAL;
+ case Attribute::InAlloca:
+ return bitc::ATTR_KIND_IN_ALLOCA;
case Attribute::Cold:
return bitc::ATTR_KIND_COLD;
case Attribute::InlineHint:
return bitc::ATTR_KIND_INLINE_HINT;
case Attribute::InReg:
return bitc::ATTR_KIND_IN_REG;
+ case Attribute::JumpTable:
+ return bitc::ATTR_KIND_JUMP_TABLE;
case Attribute::MinSize:
return bitc::ATTR_KIND_MIN_SIZE;
case Attribute::Naked:
@@ -195,6 +199,10 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_NO_INLINE;
case Attribute::NonLazyBind:
return bitc::ATTR_KIND_NON_LAZY_BIND;
+ case Attribute::NonNull:
+ return bitc::ATTR_KIND_NON_NULL;
+ case Attribute::Dereferenceable:
+ return bitc::ATTR_KIND_DEREFERENCEABLE;
case Attribute::NoRedZone:
return bitc::ATTR_KIND_NO_RED_ZONE;
case Attribute::NoReturn:
@@ -266,7 +274,7 @@ static void WriteAttributeGroupTable(const ValueEnumerator &VE,
if (Attr.isEnumAttribute()) {
Record.push_back(0);
Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
- } else if (Attr.isAlignAttribute()) {
+ } else if (Attr.isIntAttribute()) {
Record.push_back(1);
Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
Record.push_back(Attr.getValueAsInt());
@@ -382,7 +390,6 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
unsigned Code = 0;
switch (T->getTypeID()) {
- default: llvm_unreachable("Unknown type!");
case Type::VoidTyID: Code = bitc::TYPE_CODE_VOID; break;
case Type::HalfTyID: Code = bitc::TYPE_CODE_HALF; break;
case Type::FloatTyID: Code = bitc::TYPE_CODE_FLOAT; break;
@@ -473,29 +480,25 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Stream.ExitBlock();
}
-static unsigned getEncodedLinkage(const GlobalValue *GV) {
- switch (GV->getLinkage()) {
+static unsigned getEncodedLinkage(const GlobalValue &GV) {
+ switch (GV.getLinkage()) {
case GlobalValue::ExternalLinkage: return 0;
case GlobalValue::WeakAnyLinkage: return 1;
case GlobalValue::AppendingLinkage: return 2;
case GlobalValue::InternalLinkage: return 3;
case GlobalValue::LinkOnceAnyLinkage: return 4;
- case GlobalValue::DLLImportLinkage: return 5;
- case GlobalValue::DLLExportLinkage: return 6;
case GlobalValue::ExternalWeakLinkage: return 7;
case GlobalValue::CommonLinkage: return 8;
case GlobalValue::PrivateLinkage: return 9;
case GlobalValue::WeakODRLinkage: return 10;
case GlobalValue::LinkOnceODRLinkage: return 11;
case GlobalValue::AvailableExternallyLinkage: return 12;
- case GlobalValue::LinkerPrivateLinkage: return 13;
- case GlobalValue::LinkerPrivateWeakLinkage: return 14;
}
llvm_unreachable("Invalid linkage");
}
-static unsigned getEncodedVisibility(const GlobalValue *GV) {
- switch (GV->getVisibility()) {
+static unsigned getEncodedVisibility(const GlobalValue &GV) {
+ switch (GV.getVisibility()) {
case GlobalValue::DefaultVisibility: return 0;
case GlobalValue::HiddenVisibility: return 1;
case GlobalValue::ProtectedVisibility: return 2;
@@ -503,8 +506,17 @@ static unsigned getEncodedVisibility(const GlobalValue *GV) {
llvm_unreachable("Invalid visibility");
}
-static unsigned getEncodedThreadLocalMode(const GlobalVariable *GV) {
- switch (GV->getThreadLocalMode()) {
+static unsigned getEncodedDLLStorageClass(const GlobalValue &GV) {
+ switch (GV.getDLLStorageClass()) {
+ case GlobalValue::DefaultStorageClass: return 0;
+ case GlobalValue::DLLImportStorageClass: return 1;
+ case GlobalValue::DLLExportStorageClass: return 2;
+ }
+ llvm_unreachable("Invalid DLL storage class");
+}
+
+static unsigned getEncodedThreadLocalMode(const GlobalValue &GV) {
+ switch (GV.getThreadLocalMode()) {
case GlobalVariable::NotThreadLocal: return 0;
case GlobalVariable::GeneralDynamicTLSModel: return 1;
case GlobalVariable::LocalDynamicTLSModel: return 2;
@@ -514,6 +526,35 @@ static unsigned getEncodedThreadLocalMode(const GlobalVariable *GV) {
llvm_unreachable("Invalid TLS model");
}
+static unsigned getEncodedComdatSelectionKind(const Comdat &C) {
+ switch (C.getSelectionKind()) {
+ case Comdat::Any:
+ return bitc::COMDAT_SELECTION_KIND_ANY;
+ case Comdat::ExactMatch:
+ return bitc::COMDAT_SELECTION_KIND_EXACT_MATCH;
+ case Comdat::Largest:
+ return bitc::COMDAT_SELECTION_KIND_LARGEST;
+ case Comdat::NoDuplicates:
+ return bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES;
+ case Comdat::SameSize:
+ return bitc::COMDAT_SELECTION_KIND_SAME_SIZE;
+ }
+ llvm_unreachable("Invalid selection kind");
+}
+
+static void writeComdats(const ValueEnumerator &VE, BitstreamWriter &Stream) {
+ SmallVector<uint8_t, 64> Vals;
+ for (const Comdat *C : VE.getComdats()) {
+ // COMDAT: [selection_kind, name]
+ Vals.push_back(getEncodedComdatSelectionKind(*C));
+ Vals.push_back(C->getName().size());
+ for (char Chr : C->getName())
+ Vals.push_back((unsigned char)Chr);
+ Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0);
+ Vals.clear();
+ }
+}
+
// Emit top-level description of module, including target triple, inline asm,
// descriptors for global variables, and function prototype info.
static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
@@ -522,9 +563,9 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
if (!M->getTargetTriple().empty())
WriteStringRecord(bitc::MODULE_CODE_TRIPLE, M->getTargetTriple(),
0/*TODO*/, Stream);
- if (!M->getDataLayout().empty())
- WriteStringRecord(bitc::MODULE_CODE_DATALAYOUT, M->getDataLayout(),
- 0/*TODO*/, Stream);
+ const std::string &DL = M->getDataLayoutStr();
+ if (!DL.empty())
+ WriteStringRecord(bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/, Stream);
if (!M->getModuleInlineAsm().empty())
WriteStringRecord(bitc::MODULE_CODE_ASM, M->getModuleInlineAsm(),
0/*TODO*/, Stream);
@@ -535,36 +576,35 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
std::map<std::string, unsigned> GCMap;
unsigned MaxAlignment = 0;
unsigned MaxGlobalType = 0;
- for (Module::const_global_iterator GV = M->global_begin(),E = M->global_end();
- GV != E; ++GV) {
- MaxAlignment = std::max(MaxAlignment, GV->getAlignment());
- MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV->getType()));
- if (GV->hasSection()) {
+ for (const GlobalValue &GV : M->globals()) {
+ MaxAlignment = std::max(MaxAlignment, GV.getAlignment());
+ MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getType()));
+ if (GV.hasSection()) {
// Give section names unique ID's.
- unsigned &Entry = SectionMap[GV->getSection()];
+ unsigned &Entry = SectionMap[GV.getSection()];
if (!Entry) {
- WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, GV->getSection(),
+ WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, GV.getSection(),
0/*TODO*/, Stream);
Entry = SectionMap.size();
}
}
}
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
- MaxAlignment = std::max(MaxAlignment, F->getAlignment());
- if (F->hasSection()) {
+ for (const Function &F : *M) {
+ MaxAlignment = std::max(MaxAlignment, F.getAlignment());
+ if (F.hasSection()) {
// Give section names unique ID's.
- unsigned &Entry = SectionMap[F->getSection()];
+ unsigned &Entry = SectionMap[F.getSection()];
if (!Entry) {
- WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, F->getSection(),
+ WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, F.getSection(),
0/*TODO*/, Stream);
Entry = SectionMap.size();
}
}
- if (F->hasGC()) {
+ if (F.hasGC()) {
// Same for GC names.
- unsigned &Entry = GCMap[F->getGC()];
+ unsigned &Entry = GCMap[F.getGC()];
if (!Entry) {
- WriteStringRecord(bitc::MODULE_CODE_GCNAME, F->getGC(),
+ WriteStringRecord(bitc::MODULE_CODE_GCNAME, F.getGC(),
0/*TODO*/, Stream);
Entry = GCMap.size();
}
@@ -600,27 +640,30 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
// Emit the global variable information.
SmallVector<unsigned, 64> Vals;
- for (Module::const_global_iterator GV = M->global_begin(),E = M->global_end();
- GV != E; ++GV) {
+ for (const GlobalVariable &GV : M->globals()) {
unsigned AbbrevToUse = 0;
// GLOBALVAR: [type, isconst, initid,
// linkage, alignment, section, visibility, threadlocal,
- // unnamed_addr, externally_initialized]
- Vals.push_back(VE.getTypeID(GV->getType()));
- Vals.push_back(GV->isConstant());
- Vals.push_back(GV->isDeclaration() ? 0 :
- (VE.getValueID(GV->getInitializer()) + 1));
+ // unnamed_addr, externally_initialized, dllstorageclass]
+ Vals.push_back(VE.getTypeID(GV.getType()));
+ Vals.push_back(GV.isConstant());
+ Vals.push_back(GV.isDeclaration() ? 0 :
+ (VE.getValueID(GV.getInitializer()) + 1));
Vals.push_back(getEncodedLinkage(GV));
- Vals.push_back(Log2_32(GV->getAlignment())+1);
- Vals.push_back(GV->hasSection() ? SectionMap[GV->getSection()] : 0);
- if (GV->isThreadLocal() ||
- GV->getVisibility() != GlobalValue::DefaultVisibility ||
- GV->hasUnnamedAddr() || GV->isExternallyInitialized()) {
+ Vals.push_back(Log2_32(GV.getAlignment())+1);
+ Vals.push_back(GV.hasSection() ? SectionMap[GV.getSection()] : 0);
+ if (GV.isThreadLocal() ||
+ GV.getVisibility() != GlobalValue::DefaultVisibility ||
+ GV.hasUnnamedAddr() || GV.isExternallyInitialized() ||
+ GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass ||
+ GV.hasComdat()) {
Vals.push_back(getEncodedVisibility(GV));
Vals.push_back(getEncodedThreadLocalMode(GV));
- Vals.push_back(GV->hasUnnamedAddr());
- Vals.push_back(GV->isExternallyInitialized());
+ Vals.push_back(GV.hasUnnamedAddr());
+ Vals.push_back(GV.isExternallyInitialized());
+ Vals.push_back(getEncodedDLLStorageClass(GV));
+ Vals.push_back(GV.hasComdat() ? VE.getComdatID(GV.getComdat()) : 0);
} else {
AbbrevToUse = SimpleGVarAbbrev;
}
@@ -630,21 +673,23 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
}
// Emit the function proto information.
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
+ for (const Function &F : *M) {
// FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
// section, visibility, gc, unnamed_addr, prefix]
- Vals.push_back(VE.getTypeID(F->getType()));
- Vals.push_back(F->getCallingConv());
- Vals.push_back(F->isDeclaration());
+ Vals.push_back(VE.getTypeID(F.getType()));
+ Vals.push_back(F.getCallingConv());
+ Vals.push_back(F.isDeclaration());
Vals.push_back(getEncodedLinkage(F));
- Vals.push_back(VE.getAttributeID(F->getAttributes()));
- Vals.push_back(Log2_32(F->getAlignment())+1);
- Vals.push_back(F->hasSection() ? SectionMap[F->getSection()] : 0);
+ Vals.push_back(VE.getAttributeID(F.getAttributes()));
+ Vals.push_back(Log2_32(F.getAlignment())+1);
+ Vals.push_back(F.hasSection() ? SectionMap[F.getSection()] : 0);
Vals.push_back(getEncodedVisibility(F));
- Vals.push_back(F->hasGC() ? GCMap[F->getGC()] : 0);
- Vals.push_back(F->hasUnnamedAddr());
- Vals.push_back(F->hasPrefixData() ? (VE.getValueID(F->getPrefixData()) + 1)
+ Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0);
+ Vals.push_back(F.hasUnnamedAddr());
+ Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1)
: 0);
+ Vals.push_back(getEncodedDLLStorageClass(F));
+ Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0);
unsigned AbbrevToUse = 0;
Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
@@ -652,13 +697,15 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
}
// Emit the alias information.
- for (Module::const_alias_iterator AI = M->alias_begin(), E = M->alias_end();
- AI != E; ++AI) {
+ for (const GlobalAlias &A : M->aliases()) {
// ALIAS: [alias type, aliasee val#, linkage, visibility]
- Vals.push_back(VE.getTypeID(AI->getType()));
- Vals.push_back(VE.getValueID(AI->getAliasee()));
- Vals.push_back(getEncodedLinkage(AI));
- Vals.push_back(getEncodedVisibility(AI));
+ Vals.push_back(VE.getTypeID(A.getType()));
+ Vals.push_back(VE.getValueID(A.getAliasee()));
+ Vals.push_back(getEncodedLinkage(A));
+ Vals.push_back(getEncodedVisibility(A));
+ Vals.push_back(getEncodedDLLStorageClass(A));
+ Vals.push_back(getEncodedThreadLocalMode(A));
+ Vals.push_back(A.hasUnnamedAddr());
unsigned AbbrevToUse = 0;
Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals, AbbrevToUse);
Vals.clear();
@@ -907,7 +954,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
SmallVector<uint64_t, 64> Record;
const ValueEnumerator::ValueList &Vals = VE.getValues();
- Type *LastTy = 0;
+ Type *LastTy = nullptr;
for (unsigned i = FirstVal; i != LastVal; ++i) {
const Value *V = Vals[i].first;
// If we need to switch types, do so now.
@@ -1077,12 +1124,14 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
Code = bitc::CST_CODE_CE_EXTRACTELT;
Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getTypeID(C->getOperand(1)->getType()));
Record.push_back(VE.getValueID(C->getOperand(1)));
break;
case Instruction::InsertElement:
Code = bitc::CST_CODE_CE_INSERTELT;
Record.push_back(VE.getValueID(C->getOperand(0)));
Record.push_back(VE.getValueID(C->getOperand(1)));
+ Record.push_back(VE.getTypeID(C->getOperand(2)->getType()));
Record.push_back(VE.getValueID(C->getOperand(2)));
break;
case Instruction::ShuffleVector:
@@ -1243,13 +1292,13 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::ExtractElement:
Code = bitc::FUNC_CODE_INST_EXTRACTELT;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- pushValue(I.getOperand(1), InstID, Vals, VE);
+ PushValueAndType(I.getOperand(1), InstID, Vals, VE);
break;
case Instruction::InsertElement:
Code = bitc::FUNC_CODE_INST_INSERTELT;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
pushValue(I.getOperand(1), InstID, Vals, VE);
- pushValue(I.getOperand(2), InstID, Vals, VE);
+ PushValueAndType(I.getOperand(2), InstID, Vals, VE);
break;
case Instruction::ShuffleVector:
Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
@@ -1384,13 +1433,20 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
break;
}
- case Instruction::Alloca:
+ case Instruction::Alloca: {
Code = bitc::FUNC_CODE_INST_ALLOCA;
Vals.push_back(VE.getTypeID(I.getType()));
Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
Vals.push_back(VE.getValueID(I.getOperand(0))); // size.
- Vals.push_back(Log2_32(cast<AllocaInst>(I).getAlignment())+1);
+ const AllocaInst &AI = cast<AllocaInst>(I);
+ unsigned AlignRecord = Log2_32(AI.getAlignment()) + 1;
+ assert(Log2_32(Value::MaximumAlignment) + 1 < 1 << 5 &&
+ "not enough bits for maximum alignment");
+ assert(AlignRecord < 1 << 5 && "alignment greater than 1 << 64");
+ AlignRecord |= AI.isUsedWithInAlloca() << 5;
+ Vals.push_back(AlignRecord);
break;
+ }
case Instruction::Load:
if (cast<LoadInst>(I).isAtomic()) {
@@ -1429,9 +1485,12 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
pushValue(I.getOperand(2), InstID, Vals, VE); // newval.
Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
Vals.push_back(GetEncodedOrdering(
- cast<AtomicCmpXchgInst>(I).getOrdering()));
+ cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
Vals.push_back(GetEncodedSynchScope(
cast<AtomicCmpXchgInst>(I).getSynchScope()));
+ Vals.push_back(GetEncodedOrdering(
+ cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
+ Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak());
break;
case Instruction::AtomicRMW:
Code = bitc::FUNC_CODE_INST_ATOMICRMW;
@@ -1457,7 +1516,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Code = bitc::FUNC_CODE_INST_CALL;
Vals.push_back(VE.getAttributeID(CI.getAttributes()));
- Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()));
+ Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()) |
+ unsigned(CI.isMustTailCall()) << 14);
PushValueAndType(CI.getCalledValue(), InstID, Vals, VE); // Callee
// Emit value #'s for the fixed parameters.
@@ -1795,17 +1855,10 @@ static void WriteUseList(const Value *V, const ValueEnumerator &VE,
return;
// Make a copy of the in-memory use-list for sorting.
- unsigned UseListSize = std::distance(V->use_begin(), V->use_end());
- SmallVector<const User*, 8> UseList;
- UseList.reserve(UseListSize);
- for (Value::const_use_iterator I = V->use_begin(), E = V->use_end();
- I != E; ++I) {
- const User *U = *I;
- UseList.push_back(U);
- }
+ SmallVector<const User*, 8> UserList(V->user_begin(), V->user_end());
// Sort the copy based on the order read by the BitcodeReader.
- std::sort(UseList.begin(), UseList.end(), bitcodereader_order);
+ std::sort(UserList.begin(), UserList.end(), bitcodereader_order);
// TODO: Generate a diff between the BitcodeWriter in-memory use-list and the
// sorted list (i.e., the expected BitcodeReader in-memory use-list).
@@ -1903,6 +1956,8 @@ static void WriteModule(const Module *M, BitstreamWriter &Stream) {
// Emit information describing all of the types in the module.
WriteTypeTable(VE, Stream);
+ writeComdats(VE, Stream);
+
// Emit top-level description of module, including target triple, inline asm,
// descriptors for global variables, and function prototype info.
WriteModuleInfo(M, VE, Stream);
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
index e5e76e2..4167f6d 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
@@ -1,4 +1,4 @@
-//===--- Bitcode/Writer/BitcodeWriterPass.cpp - Bitcode Writer ------------===//
+//===- BitcodeWriterPass.cpp - Bitcode writing pass -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,10 +11,18 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
using namespace llvm;
+PreservedAnalyses BitcodeWriterPass::run(Module *M) {
+ WriteBitcodeToFile(M, OS);
+ return PreservedAnalyses::all();
+}
+
namespace {
class WriteBitcodePass : public ModulePass {
raw_ostream &OS; // raw_ostream to print on
@@ -23,9 +31,9 @@ namespace {
explicit WriteBitcodePass(raw_ostream &o)
: ModulePass(ID), OS(o) {}
- const char *getPassName() const { return "Bitcode Writer"; }
+ const char *getPassName() const override { return "Bitcode Writer"; }
- bool runOnModule(Module &M) {
+ bool runOnModule(Module &M) override {
WriteBitcodeToFile(&M, OS);
return false;
}
@@ -34,8 +42,6 @@ namespace {
char WriteBitcodePass::ID = 0;
-/// createBitcodeWriterPass - Create and return a pass that writes the module
-/// to the specified ostream.
ModulePass *llvm::createBitcodeWriterPass(raw_ostream &Str) {
return new WriteBitcodePass(Str);
}
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index a164104..15f8034 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -73,37 +73,34 @@ ValueEnumerator::ValueEnumerator(const Module *M) {
SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
// Enumerate types used by function bodies and argument lists.
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
-
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I)
- EnumerateType(I->getType());
-
- for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;++I){
- for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
- OI != E; ++OI) {
- if (MDNode *MD = dyn_cast<MDNode>(*OI))
+ for (const Function &F : *M) {
+ for (const Argument &A : F.args())
+ EnumerateType(A.getType());
+
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB) {
+ for (const Use &Op : I.operands()) {
+ if (MDNode *MD = dyn_cast<MDNode>(&Op))
if (MD->isFunctionLocal() && MD->getFunction())
// These will get enumerated during function-incorporation.
continue;
- EnumerateOperandType(*OI);
+ EnumerateOperandType(Op);
}
- EnumerateType(I->getType());
- if (const CallInst *CI = dyn_cast<CallInst>(I))
+ EnumerateType(I.getType());
+ if (const CallInst *CI = dyn_cast<CallInst>(&I))
EnumerateAttributes(CI->getAttributes());
- else if (const InvokeInst *II = dyn_cast<InvokeInst>(I))
+ else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I))
EnumerateAttributes(II->getAttributes());
// Enumerate metadata attached with this instruction.
MDs.clear();
- I->getAllMetadataOtherThanDebugLoc(MDs);
+ I.getAllMetadataOtherThanDebugLoc(MDs);
for (unsigned i = 0, e = MDs.size(); i != e; ++i)
EnumerateMetadata(MDs[i].second);
- if (!I->getDebugLoc().isUnknown()) {
+ if (!I.getDebugLoc().isUnknown()) {
MDNode *Scope, *IA;
- I->getDebugLoc().getScopeAndInlinedAt(Scope, IA, I->getContext());
+ I.getDebugLoc().getScopeAndInlinedAt(Scope, IA, I.getContext());
if (Scope) EnumerateMetadata(Scope);
if (IA) EnumerateMetadata(IA);
}
@@ -120,6 +117,12 @@ unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const {
return I->second;
}
+unsigned ValueEnumerator::getComdatID(const Comdat *C) const {
+ unsigned ComdatID = Comdats.idFor(C);
+ assert(ComdatID && "Comdat not found!");
+ return ComdatID;
+}
+
void ValueEnumerator::setInstructionID(const Instruction *I) {
InstructionMap[I] = InstructionCount++;
}
@@ -159,12 +162,11 @@ void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
V->dump();
OS << " Uses(" << std::distance(V->use_begin(),V->use_end()) << "):";
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- if (UI != V->use_begin())
+ for (const Use &U : V->uses()) {
+ if (&U != &*V->use_begin())
OS << ",";
- if((*UI)->hasName())
- OS << " " << (*UI)->getName();
+ if(U->hasName())
+ OS << " " << U->getName();
else
OS << " [null]";
@@ -173,29 +175,19 @@ void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
}
}
-// Optimize constant ordering.
-namespace {
- struct CstSortPredicate {
- ValueEnumerator &VE;
- explicit CstSortPredicate(ValueEnumerator &ve) : VE(ve) {}
- bool operator()(const std::pair<const Value*, unsigned> &LHS,
- const std::pair<const Value*, unsigned> &RHS) {
- // Sort by plane.
- if (LHS.first->getType() != RHS.first->getType())
- return VE.getTypeID(LHS.first->getType()) <
- VE.getTypeID(RHS.first->getType());
- // Then by frequency.
- return LHS.second > RHS.second;
- }
- };
-}
-
/// OptimizeConstants - Reorder constant pool for denser encoding.
void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) {
if (CstStart == CstEnd || CstStart+1 == CstEnd) return;
- CstSortPredicate P(*this);
- std::stable_sort(Values.begin()+CstStart, Values.begin()+CstEnd, P);
+ std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd,
+ [this](const std::pair<const Value *, unsigned> &LHS,
+ const std::pair<const Value *, unsigned> &RHS) {
+ // Sort by plane.
+ if (LHS.first->getType() != RHS.first->getType())
+ return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType());
+ // Then by frequency.
+ return LHS.second > RHS.second;
+ });
// Ensure that integer and vector of integer constants are at the start of the
// constant pool. This is important so that GEP structure indices come before
@@ -321,6 +313,10 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
return;
}
+ if (auto *GO = dyn_cast<GlobalObject>(V))
+ if (const Comdat *C = GO->getComdat())
+ Comdats.insert(C);
+
// Enumerate the type of this value.
EnumerateType(V->getType());
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
index d1ca15f..1c9f38e 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/UniqueVector.h"
#include "llvm/IR/Attributes.h"
#include <vector>
@@ -25,6 +26,7 @@ class Type;
class Value;
class Instruction;
class BasicBlock;
+class Comdat;
class Function;
class Module;
class MDNode;
@@ -48,6 +50,10 @@ private:
typedef DenseMap<const Value*, unsigned> ValueMapType;
ValueMapType ValueMap;
ValueList Values;
+
+ typedef UniqueVector<const Comdat *> ComdatSetType;
+ ComdatSetType Comdats;
+
ValueList MDValues;
SmallVector<const MDNode *, 8> FunctionLocalMDs;
ValueMapType MDValueMap;
@@ -139,6 +145,9 @@ public:
return AttributeGroups;
}
+ const ComdatSetType &getComdats() const { return Comdats; }
+ unsigned getComdatID(const Comdat *C) const;
+
/// getGlobalBasicBlockID - This returns the function-specific ID for the
/// specified basic block. This is relatively expensive information, so it
/// should only be used by rare constructs such as address-of-label.
diff --git a/contrib/llvm/lib/Bitcode/module.modulemap b/contrib/llvm/lib/Bitcode/module.modulemap
new file mode 100644
index 0000000..7df1a0a
--- /dev/null
+++ b/contrib/llvm/lib/Bitcode/module.modulemap
@@ -0,0 +1 @@
+module Bitcode { requires cplusplus umbrella "." module * { export * } }
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 2ee7767..44345ad 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -14,7 +14,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "post-RA-sched"
#include "AggressiveAntiDepBreaker.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -29,6 +28,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "post-RA-sched"
+
// If DebugDiv > 0 then only break antidep with (ID % DebugDiv) == DebugMod
static cl::opt<int>
DebugDiv("agg-antidep-debugdiv",
@@ -121,7 +122,7 @@ AggressiveAntiDepBreaker(MachineFunction& MFi,
TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
RegClassInfo(RCI),
- State(NULL) {
+ State(nullptr) {
/* Collect a bitset of all registers that are only broken if they
are on the critical path. */
for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
@@ -144,7 +145,7 @@ AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
}
void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
- assert(State == NULL);
+ assert(!State);
State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
bool IsReturnBlock = (!BB->empty() && BB->back().isReturn());
@@ -169,7 +170,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// callee-saved register that is not saved in the prolog.
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(BB);
- for (const uint16_t *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
+ for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
unsigned Reg = *I;
if (!IsReturnBlock && !Pristine.test(Reg)) continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
@@ -183,7 +184,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
void AggressiveAntiDepBreaker::FinishBlock() {
delete State;
- State = NULL;
+ State = nullptr;
}
void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
@@ -230,13 +231,13 @@ bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr *MI,
if (Reg == 0)
return false;
- MachineOperand *Op = NULL;
+ MachineOperand *Op = nullptr;
if (MO.isDef())
Op = MI->findRegisterUseOperand(Reg, true);
else
Op = MI->findRegisterDefOperand(Reg);
- return((Op != NULL) && Op->isImplicit());
+ return(Op && Op->isImplicit());
}
void AggressiveAntiDepBreaker::GetPassthruRegs(MachineInstr *MI,
@@ -273,10 +274,10 @@ static void AntiDepEdges(const SUnit *SU, std::vector<const SDep*>& Edges) {
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static const SUnit *CriticalPathStep(const SUnit *SU) {
- const SDep *Next = 0;
+ const SDep *Next = nullptr;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
- if (SU != 0) {
+ if (SU) {
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
const SUnit *PredSU = P->getSUnit();
@@ -292,7 +293,7 @@ static const SUnit *CriticalPathStep(const SUnit *SU) {
}
}
- return (Next) ? Next->getSUnit() : 0;
+ return (Next) ? Next->getSUnit() : nullptr;
}
void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
@@ -309,8 +310,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DefIndices[Reg] = ~0u;
RegRefs.erase(Reg);
State->LeaveGroup(Reg);
- DEBUG(if (header != NULL) {
- dbgs() << header << TRI->getName(Reg); header = NULL; });
+ DEBUG(if (header) {
+ dbgs() << header << TRI->getName(Reg); header = nullptr; });
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
}
// Repeat for subregisters.
@@ -321,14 +322,14 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DefIndices[SubregReg] = ~0u;
RegRefs.erase(SubregReg);
State->LeaveGroup(SubregReg);
- DEBUG(if (header != NULL) {
- dbgs() << header << TRI->getName(Reg); header = NULL; });
+ DEBUG(if (header) {
+ dbgs() << header << TRI->getName(Reg); header = nullptr; });
DEBUG(dbgs() << " " << TRI->getName(SubregReg) << "->g" <<
State->GetGroup(SubregReg) << tag);
}
}
- DEBUG(if ((header == NULL) && (footer != NULL)) dbgs() << footer);
+ DEBUG(if (!header && footer) dbgs() << footer);
}
void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
@@ -382,7 +383,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
}
// Note register reference...
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
if (i < MI->getDesc().getNumOperands())
RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
@@ -403,8 +404,18 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
continue;
// Update def for Reg and aliases.
- for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ // We need to be careful here not to define already-live super registers.
+ // If the super register is already live, then this definition is not
+ // a definition of the whole super register (just a partial insertion
+ // into it). Earlier subregister definitions (which we've not yet visited
+ // because we're iterating bottom-up) need to be linked to the same group
+ // as this definition.
+ if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI))
+ continue;
+
DefIndices[*AI] = Count;
+ }
}
}
@@ -456,7 +467,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
}
// Note register reference...
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
if (i < MI->getDesc().getNumOperands())
RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
@@ -506,7 +517,7 @@ BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) {
AggressiveAntiDepState::RegisterReference>::iterator Q = Range.first,
QE = Range.second; Q != QE; ++Q) {
const TargetRegisterClass *RC = Q->second.RC;
- if (RC == NULL) continue;
+ if (!RC) continue;
BitVector RCBV = TRI->getAllocatableSet(MF, RC);
if (first) {
@@ -571,7 +582,9 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
unsigned Reg = Regs[i];
if (Reg == SuperReg) continue;
bool IsSub = TRI->isSubRegister(SuperReg, Reg);
- assert(IsSub && "Expecting group subregister");
+ // FIXME: remove this once PR18663 has been properly fixed. For now,
+ // return a conservative answer:
+ // assert(IsSub && "Expecting group subregister");
if (!IsSub)
return false;
}
@@ -724,8 +737,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// Track progress along the critical path through the SUnit graph as
// we walk the instructions. This is needed for regclasses that only
// break critical-path anti-dependencies.
- const SUnit *CriticalPathSU = 0;
- MachineInstr *CriticalPathMI = 0;
+ const SUnit *CriticalPathSU = nullptr;
+ MachineInstr *CriticalPathMI = nullptr;
if (CriticalPathSet.any()) {
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
@@ -778,10 +791,10 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// If MI is not on the critical path, then we don't rename
// registers in the CriticalPathSet.
- BitVector *ExcludeRegs = NULL;
+ BitVector *ExcludeRegs = nullptr;
if (MI == CriticalPathMI) {
CriticalPathSU = CriticalPathStep(CriticalPathSU);
- CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : 0;
+ CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : nullptr;
} else if (CriticalPathSet.any()) {
ExcludeRegs = &CriticalPathSet;
}
@@ -805,7 +818,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// Don't break anti-dependencies on non-allocatable registers.
DEBUG(dbgs() << " (non-allocatable)\n");
continue;
- } else if ((ExcludeRegs != NULL) && ExcludeRegs->test(AntiDepReg)) {
+ } else if (ExcludeRegs && ExcludeRegs->test(AntiDepReg)) {
// Don't break anti-dependencies for critical path registers
// if not on the critical path
DEBUG(dbgs() << " (not critical-path)\n");
@@ -819,9 +832,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
} else {
// No anti-dep breaking for implicit deps
MachineOperand *AntiDepOp = MI->findRegisterDefOperand(AntiDepReg);
- assert(AntiDepOp != NULL &&
- "Can't find index for defined register operand");
- if ((AntiDepOp == NULL) || AntiDepOp->isImplicit()) {
+ assert(AntiDepOp && "Can't find index for defined register operand");
+ if (!AntiDepOp || AntiDepOp->isImplicit()) {
DEBUG(dbgs() << " (implicit)\n");
continue;
}
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
index 6683630..2ab9d89 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
@@ -136,7 +136,7 @@ class RegisterClassInfo;
~AggressiveAntiDepBreaker();
/// Start - Initialize anti-dep breaking for a new basic block.
- void StartBlock(MachineBasicBlock *BB);
+ void StartBlock(MachineBasicBlock *BB) override;
/// BreakAntiDependencies - Identifiy anti-dependencies along the critical
/// path
@@ -146,15 +146,16 @@ class RegisterClassInfo;
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
- DbgValueVector &DbgValues);
+ DbgValueVector &DbgValues) override;
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
- void Observe(MachineInstr *MI, unsigned Count, unsigned InsertPosIndex);
+ void Observe(MachineInstr *MI, unsigned Count,
+ unsigned InsertPosIndex) override;
/// Finish - Finish anti-dep breaking for a basic block.
- void FinishBlock();
+ void FinishBlock() override;
private:
/// Keep track of a position in the allocation order for each regclass.
@@ -169,7 +170,8 @@ class RegisterClassInfo;
void GetPassthruRegs(MachineInstr *MI, std::set<unsigned>& PassthruRegs);
void HandleLastUse(unsigned Reg, unsigned KillIdx, const char *tag,
- const char *header =NULL, const char *footer =NULL);
+ const char *header = nullptr,
+ const char *footer = nullptr);
void PrescanInstruction(MachineInstr *MI, unsigned Count,
std::set<unsigned>& PassthruRegs);
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
index 3fa1f8f..dc9bcff 100644
--- a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
@@ -14,7 +14,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "AllocationOrder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -25,6 +24,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
// Compare VirtRegMap::getRegAllocPref().
AllocationOrder::AllocationOrder(unsigned VirtReg,
const VirtRegMap &VRM,
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.h b/contrib/llvm/lib/CodeGen/AllocationOrder.h
index aed461a..64ff2a7 100644
--- a/contrib/llvm/lib/CodeGen/AllocationOrder.h
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.h
@@ -45,10 +45,12 @@ public:
/// Return the next physical register in the allocation order, or 0.
/// It is safe to call next() again after it returned 0, it will keep
/// returning 0 until rewind() is called.
- unsigned next() {
+ unsigned next(unsigned Limit = 0) {
if (Pos < 0)
return Hints.end()[Pos++];
- while (Pos < int(Order.size())) {
+ if (!Limit)
+ Limit = Order.size();
+ while (Pos < int(Limit)) {
unsigned Reg = Order[Pos++];
if (!isHint(Reg))
return Reg;
diff --git a/contrib/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm/lib/CodeGen/Analysis.cpp
index 1600c67..0eabee3 100644
--- a/contrib/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm/lib/CodeGen/Analysis.cpp
@@ -7,13 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines several CodeGen-specific LLVM IR analysis utilties.
+// This file defines several CodeGen-specific LLVM IR analysis utilities.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Analysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
@@ -46,7 +47,7 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
EI != EE; ++EI) {
if (Indices && *Indices == unsigned(EI - EB))
return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
- CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
+ CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
}
return CurIndex;
}
@@ -56,7 +57,7 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
if (Indices && *Indices == i)
return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
- CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
+ CurIndex = ComputeLinearIndex(EltTy, nullptr, nullptr, CurIndex);
}
return CurIndex;
}
@@ -228,7 +229,7 @@ static const Value *getNoopInput(const Value *V,
// through.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I || I->getNumOperands() == 0) return V;
- const Value *NoopInput = 0;
+ const Value *NoopInput = nullptr;
Value *Op = I->getOperand(0);
if (isa<BitCastInst>(I)) {
@@ -474,8 +475,7 @@ static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
/// between it and the return.
///
/// This function only tests target-independent requirements.
-bool llvm::isInTailCallPosition(ImmutableCallSite CS,
- const TargetLowering &TLI) {
+bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
const Instruction *I = CS.getInstruction();
const BasicBlock *ExitBB = I->getParent();
const TerminatorInst *Term = ExitBB->getTerminator();
@@ -490,16 +490,14 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS,
// longjmp on x86), it can end up causing miscompilation that has not
// been fully understood.
if (!Ret &&
- (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
- !isa<UnreachableInst>(Term)))
+ (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
return false;
// If I will have a chain, make sure no other instruction that will have a
// chain interposes between I and the return.
if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
!isSafeToSpeculativelyExecute(I))
- for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
- --BBI) {
+ for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
if (&*BBI == I)
break;
// Debug info intrinsics do not get in the way of tail call optimization.
@@ -510,7 +508,8 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS,
return false;
}
- return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret, TLI);
+ return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret,
+ *TM.getTargetLowering());
}
bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
index 5d82dd9..251f5ef 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -30,76 +31,83 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
-static cl::opt<bool>
-EnableARMEHABIDescriptors("arm-enable-ehabi-descriptors", cl::Hidden,
- cl::desc("Generate ARM EHABI tables with unwinding descriptors"),
- cl::init(false));
-
-
ARMException::ARMException(AsmPrinter *A)
- : DwarfException(A) {}
+ : EHStreamer(A), shouldEmitCFI(false) {}
ARMException::~ARMException() {}
ARMTargetStreamer &ARMException::getTargetStreamer() {
- MCTargetStreamer &TS = Asm->OutStreamer.getTargetStreamer();
+ MCTargetStreamer &TS = *Asm->OutStreamer.getTargetStreamer();
return static_cast<ARMTargetStreamer &>(TS);
}
-void ARMException::EndModule() {
+/// endModule - Emit all exception information that should come after the
+/// content.
+void ARMException::endModule() {
+ if (shouldEmitCFI)
+ Asm->OutStreamer.EmitCFISections(false, true);
}
-/// BeginFunction - Gather pre-function exception information. Assumes it's
+/// beginFunction - Gather pre-function exception information. Assumes it's
/// being emitted immediately after the function entry point.
-void ARMException::BeginFunction(const MachineFunction *MF) {
- getTargetStreamer().emitFnStart();
- if (Asm->MF->getFunction()->needsUnwindTableEntry())
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_begin",
- Asm->getFunctionNumber()));
+void ARMException::beginFunction(const MachineFunction *MF) {
+ if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::ARM)
+ getTargetStreamer().emitFnStart();
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_begin",
+ Asm->getFunctionNumber()));
+ // See if we need call frame info.
+ AsmPrinter::CFIMoveType MoveType = Asm->needsCFIMoves();
+ assert(MoveType != AsmPrinter::CFI_M_EH &&
+ "non-EH CFI not yet supported in prologue with EHABI lowering");
+ if (MoveType == AsmPrinter::CFI_M_Debug) {
+ shouldEmitCFI = true;
+ Asm->OutStreamer.EmitCFIStartProc(false);
+ }
}
-/// EndFunction - Gather and emit post-function exception information.
+/// endFunction - Gather and emit post-function exception information.
///
-void ARMException::EndFunction() {
+void ARMException::endFunction(const MachineFunction *) {
+ if (shouldEmitCFI)
+ Asm->OutStreamer.EmitCFIEndProc();
+
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
ARMTargetStreamer &ATS = getTargetStreamer();
- if (!Asm->MF->getFunction()->needsUnwindTableEntry())
+ if (!Asm->MF->getFunction()->needsUnwindTableEntry() &&
+ MMI->getLandingPads().empty())
ATS.emitCantUnwind();
else {
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_end",
Asm->getFunctionNumber()));
+ if (!MMI->getLandingPads().empty()) {
+ // Emit references to personality.
+ if (const Function * Personality =
+ MMI->getPersonalities()[MMI->getPersonalityIndex()]) {
+ MCSymbol *PerSym = Asm->getSymbol(Personality);
+ Asm->OutStreamer.EmitSymbolAttribute(PerSym, MCSA_Global);
+ ATS.emitPersonality(PerSym);
+ }
- if (EnableARMEHABIDescriptors) {
- // Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
-
- if (!MMI->getLandingPads().empty()) {
- // Emit references to personality.
- if (const Function * Personality =
- MMI->getPersonalities()[MMI->getPersonalityIndex()]) {
- MCSymbol *PerSym = Asm->getSymbol(Personality);
- Asm->OutStreamer.EmitSymbolAttribute(PerSym, MCSA_Global);
- ATS.emitPersonality(PerSym);
- }
-
- // Emit .handlerdata directive.
- ATS.emitHandlerData();
+ // Emit .handlerdata directive.
+ ATS.emitHandlerData();
- // Emit actual exception table
- EmitExceptionTable();
- }
+ // Emit actual exception table
+ emitExceptionTable();
}
}
- ATS.emitFnEnd();
+ if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::ARM)
+ ATS.emitFnEnd();
}
-void ARMException::EmitTypeInfos(unsigned TTypeEncoding) {
+void ARMException::emitTypeInfos(unsigned TTypeEncoding) {
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
@@ -136,7 +144,7 @@ void ARMException::EmitTypeInfos(unsigned TTypeEncoding) {
Asm->OutStreamer.AddComment("FilterInfo " + Twine(Entry));
}
- Asm->EmitTTypeReference((TypeID == 0 ? 0 : TypeInfos[TypeID - 1]),
+ Asm->EmitTTypeReference((TypeID == 0 ? nullptr : TypeInfos[TypeID - 1]),
TTypeEncoding);
}
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.cpp
new file mode 100644
index 0000000..8dab5e5
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.cpp
@@ -0,0 +1,45 @@
+//===-- llvm/CodeGen/AddressPool.cpp - Dwarf Debug Framework ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AddressPool.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
+using namespace llvm;
+
+class MCExpr;
+
+unsigned AddressPool::getIndex(const MCSymbol *Sym, bool TLS) {
+ HasBeenUsed = true;
+ auto IterBool =
+ Pool.insert(std::make_pair(Sym, AddressPoolEntry(Pool.size(), TLS)));
+ return IterBool.first->second.Number;
+}
+
+// Emit addresses into the section given.
+void AddressPool::emit(AsmPrinter &Asm, const MCSection *AddrSection) {
+ if (Pool.empty())
+ return;
+
+ // Start the dwarf addr section.
+ Asm.OutStreamer.SwitchSection(AddrSection);
+
+ // Order the address pool entries by ID
+ SmallVector<const MCExpr *, 64> Entries(Pool.size());
+
+ for (const auto &I : Pool)
+ Entries[I.second.Number] =
+ I.second.TLS
+ ? Asm.getObjFileLowering().getDebugThreadLocalSymbol(I.first)
+ : MCSymbolRefExpr::Create(I.first, Asm.OutContext);
+
+ for (const MCExpr *Entry : Entries)
+ Asm.OutStreamer.EmitValue(Entry, Asm.getDataLayout().getPointerSize());
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.h b/contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.h
new file mode 100644
index 0000000..42757d7
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AddressPool.h
@@ -0,0 +1,52 @@
+//===-- llvm/CodeGen/AddressPool.h - Dwarf Debug Framework -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_ADDRESSPOOL_H__
+#define CODEGEN_ASMPRINTER_ADDRESSPOOL_H__
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+class MCSection;
+class MCSymbol;
+class AsmPrinter;
+// Collection of addresses for this unit and assorted labels.
+// A Symbol->unsigned mapping of addresses used by indirect
+// references.
+class AddressPool {
+ struct AddressPoolEntry {
+ unsigned Number;
+ bool TLS;
+ AddressPoolEntry(unsigned Number, bool TLS) : Number(Number), TLS(TLS) {}
+ };
+ DenseMap<const MCSymbol *, AddressPoolEntry> Pool;
+
+ /// Record whether the AddressPool has been queried for an address index since
+ /// the last "resetUsedFlag" call. Used to implement type unit fallback - a
+ /// type that references addresses cannot be placed in a type unit when using
+ /// fission.
+ bool HasBeenUsed;
+
+public:
+ AddressPool() : HasBeenUsed(false) {}
+
+ /// \brief Returns the index into the address pool with the given
+ /// label/symbol.
+ unsigned getIndex(const MCSymbol *Sym, bool TLS = false);
+
+ void emit(AsmPrinter &Asm, const MCSection *AddrSection);
+
+ bool isEmpty() { return Pool.empty(); }
+
+ bool hasBeenUsed() const { return HasBeenUsed; }
+
+ void resetUsedFlag() { HasBeenUsed = false; }
+};
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 060e010..424e759 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -11,14 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "llvm/CodeGen/AsmPrinter.h"
#include "DwarfDebug.h"
#include "DwarfException.h"
+#include "WinCodeViewLineTables.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/Analysis/JumpInstrTableInfo.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -27,8 +27,9 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -42,27 +43,29 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Timer.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
static const char *const DWARFGroupName = "DWARF Emission";
-static const char *const DbgTimerName = "DWARF Debug Writer";
+static const char *const DbgTimerName = "Debug Info Emission";
static const char *const EHTimerName = "DWARF Exception Writer";
+static const char *const CodeViewLineTablesGroupName = "CodeView Line Tables";
STATISTIC(EmittedInsts, "Number of machine instrs printed");
char AsmPrinter::ID = 0;
-typedef DenseMap<GCStrategy*,GCMetadataPrinter*> gcp_map_type;
+typedef DenseMap<GCStrategy*, std::unique_ptr<GCMetadataPrinter>> gcp_map_type;
static gcp_map_type &getGCMap(void *&P) {
- if (P == 0)
+ if (!P)
P = new gcp_map_type();
return *(gcp_map_type*)P;
}
@@ -99,23 +102,21 @@ AsmPrinter::AsmPrinter(TargetMachine &tm, MCStreamer &Streamer)
TM(tm), MAI(tm.getMCAsmInfo()), MII(tm.getInstrInfo()),
OutContext(Streamer.getContext()),
OutStreamer(Streamer),
- LastMI(0), LastFn(0), Counter(~0U), SetCounter(0) {
- DD = 0; DE = 0; MMI = 0; LI = 0; MF = 0;
- CurrentFnSym = CurrentFnSymForSize = 0;
- GCMetadataPrinters = 0;
+ LastMI(nullptr), LastFn(0), Counter(~0U), SetCounter(0) {
+ DD = nullptr; MMI = nullptr; LI = nullptr; MF = nullptr;
+ CurrentFnSym = CurrentFnSymForSize = nullptr;
+ GCMetadataPrinters = nullptr;
VerboseAsm = Streamer.isVerboseAsm();
}
AsmPrinter::~AsmPrinter() {
- assert(DD == 0 && DE == 0 && "Debug/EH info didn't get finalized");
+ assert(!DD && Handlers.empty() && "Debug/EH info didn't get finalized");
- if (GCMetadataPrinters != 0) {
+ if (GCMetadataPrinters) {
gcp_map_type &GCMap = getGCMap(GCMetadataPrinters);
- for (gcp_map_type::iterator I = GCMap.begin(), E = GCMap.end(); I != E; ++I)
- delete I->second;
delete &GCMap;
- GCMetadataPrinters = 0;
+ GCMetadataPrinters = nullptr;
}
delete &OutStreamer;
@@ -136,6 +137,14 @@ const DataLayout &AsmPrinter::getDataLayout() const {
return *TM.getDataLayout();
}
+const MCSubtargetInfo &AsmPrinter::getSubtargetInfo() const {
+ return TM.getSubtarget<MCSubtargetInfo>();
+}
+
+void AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
+ S.EmitInstruction(Inst, getSubtargetInfo());
+}
+
StringRef AsmPrinter::getTargetTriple() const {
return TM.getTargetTriple();
}
@@ -164,9 +173,28 @@ bool AsmPrinter::doInitialization(Module &M) {
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM);
- OutStreamer.InitStreamer();
+ OutStreamer.InitSections();
- Mang = new Mangler(&TM);
+ Mang = new Mangler(TM.getDataLayout());
+
+ // Emit the version-min deplyment target directive if needed.
+ //
+ // FIXME: If we end up with a collection of these sorts of Darwin-specific
+ // or ELF-specific things, it may make sense to have a platform helper class
+ // that will work with the target helper class. For now keep it here, as the
+ // alternative is duplicated code in each of the target asm printers that
+ // use the directive, where it would need the same conditionalization
+ // anyway.
+ Triple TT(getTargetTriple());
+ if (TT.isOSDarwin()) {
+ unsigned Major, Minor, Update;
+ TT.getOSVersion(Major, Minor, Update);
+ // If there is a version specified, Major will be non-zero.
+ if (Major)
+ OutStreamer.EmitVersionMin((TT.isMacOSX() ?
+ MCVM_OSXVersionMin : MCVM_IOSVersionMin),
+ Major, Minor, Update);
+ }
// Allow the target to emit any magic that it wants at the start of the file.
EmitStartOfAsmFile(M);
@@ -180,7 +208,7 @@ bool AsmPrinter::doInitialization(Module &M) {
GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
assert(MI && "AsmPrinter didn't require GCModuleInfo?");
- for (GCModuleInfo::iterator I = MI->begin(), E = MI->end(); I != E; ++I)
+ for (auto &I : *MI)
if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*I))
MP->beginAssembly(*this);
@@ -193,25 +221,65 @@ bool AsmPrinter::doInitialization(Module &M) {
OutStreamer.AddBlankLine();
}
- if (MAI->doesSupportDebugInformation())
- DD = new DwarfDebug(this, &M);
+ if (MAI->doesSupportDebugInformation()) {
+ if (Triple(TM.getTargetTriple()).isKnownWindowsMSVCEnvironment()) {
+ Handlers.push_back(HandlerInfo(new WinCodeViewLineTables(this),
+ DbgTimerName,
+ CodeViewLineTablesGroupName));
+ } else {
+ DD = new DwarfDebug(this, &M);
+ Handlers.push_back(HandlerInfo(DD, DbgTimerName, DWARFGroupName));
+ }
+ }
+ EHStreamer *ES = nullptr;
switch (MAI->getExceptionHandlingType()) {
case ExceptionHandling::None:
- return false;
+ break;
case ExceptionHandling::SjLj:
case ExceptionHandling::DwarfCFI:
- DE = new DwarfCFIException(this);
- return false;
+ ES = new DwarfCFIException(this);
+ break;
case ExceptionHandling::ARM:
- DE = new ARMException(this);
+ ES = new ARMException(this);
+ break;
+ case ExceptionHandling::WinEH:
+ ES = new Win64Exception(this);
+ break;
+ }
+ if (ES)
+ Handlers.push_back(HandlerInfo(ES, EHTimerName, DWARFGroupName));
+ return false;
+}
+
+static bool canBeHidden(const GlobalValue *GV, const MCAsmInfo &MAI) {
+ GlobalValue::LinkageTypes Linkage = GV->getLinkage();
+ if (Linkage != GlobalValue::LinkOnceODRLinkage)
+ return false;
+
+ if (!MAI.hasWeakDefCanBeHiddenDirective())
return false;
- case ExceptionHandling::Win64:
- DE = new Win64Exception(this);
+
+ if (GV->hasUnnamedAddr())
+ return true;
+
+ // This is only used for MachO, so right now it doesn't really matter how
+ // we handle alias. Revisit this once the MachO linker implements aliases.
+ if (isa<GlobalAlias>(GV))
return false;
+
+ // If it is a non constant variable, it needs to be uniqued across shared
+ // objects.
+ if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
+ if (!Var->isConstant())
+ return false;
}
- llvm_unreachable("Unknown exception type.");
+ GlobalStatus GS;
+ if (!GlobalStatus::analyzeGlobal(GV, GS) && !GS.IsCompared)
+ return true;
+
+ return false;
}
void AsmPrinter::EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const {
@@ -222,25 +290,11 @@ void AsmPrinter::EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const {
case GlobalValue::LinkOnceODRLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
- case GlobalValue::LinkerPrivateWeakLinkage:
if (MAI->hasWeakDefDirective()) {
// .globl _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
- bool CanBeHidden = false;
-
- if (Linkage == GlobalValue::LinkOnceODRLinkage &&
- MAI->hasWeakDefCanBeHiddenDirective()) {
- if (GV->hasUnnamedAddr()) {
- CanBeHidden = true;
- } else {
- GlobalStatus GS;
- if (!GlobalStatus::analyzeGlobal(GV, GS) && !GS.IsCompared)
- CanBeHidden = true;
- }
- }
-
- if (!CanBeHidden)
+ if (!canBeHidden(GV, *MAI))
// .weak_definition _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition);
else
@@ -254,7 +308,6 @@ void AsmPrinter::EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const {
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Weak);
}
return;
- case GlobalValue::DLLExportLinkage:
case GlobalValue::AppendingLinkage:
// FIXME: appending linkage variables should go into a section of
// their name or something. For now, just emit them as external.
@@ -265,19 +318,22 @@ void AsmPrinter::EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const {
return;
case GlobalValue::PrivateLinkage:
case GlobalValue::InternalLinkage:
- case GlobalValue::LinkerPrivateLinkage:
return;
case GlobalValue::AvailableExternallyLinkage:
llvm_unreachable("Should never emit this");
- case GlobalValue::DLLImportLinkage:
case GlobalValue::ExternalWeakLinkage:
llvm_unreachable("Don't know how to emit these");
}
llvm_unreachable("Unknown linkage type!");
}
+void AsmPrinter::getNameWithPrefix(SmallVectorImpl<char> &Name,
+ const GlobalValue *GV) const {
+ TM.getNameWithPrefix(Name, GV, *Mang);
+}
+
MCSymbol *AsmPrinter::getSymbol(const GlobalValue *GV) const {
- return getObjFileLowering().getSymbol(*Mang, GV);
+ return TM.getSymbol(GV, *Mang);
}
/// EmitGlobalVariable - Emit the specified global variable to the .s file.
@@ -288,7 +344,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
return;
if (isVerbose()) {
- WriteAsOperand(OutStreamer.GetCommentOS(), GV,
+ GV->printAsOperand(OutStreamer.GetCommentOS(),
/*PrintType=*/false, GV->getParent());
OutStreamer.GetCommentOS() << '\n';
}
@@ -313,8 +369,10 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// sections and expected to be contiguous (e.g. ObjC metadata).
unsigned AlignLog = getGVAlignmentLog2(GV, *DL);
- if (DD)
- DD->setSymbolSize(GVSym, Size);
+ for (const HandlerInfo &HI : Handlers) {
+ NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled);
+ HI.Handler->setSymbolSize(GVSym, Size);
+ }
// Handle common and BSS local symbols (.lcomm).
if (GVKind.isCommon() || GVKind.isBSSLocal()) {
@@ -334,7 +392,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// Handle local BSS symbols.
if (MAI->hasMachoZeroFillDirective()) {
const MCSection *TheSection =
- getObjFileLowering().SectionForGlobal(GV, GVKind, Mang, TM);
+ getObjFileLowering().SectionForGlobal(GV, GVKind, *Mang, TM);
// .zerofill __DATA, __bss, _foo, 400, 5
OutStreamer.EmitZerofill(TheSection, GVSym, Size, Align);
return;
@@ -363,7 +421,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
}
const MCSection *TheSection =
- getObjFileLowering().SectionForGlobal(GV, GVKind, Mang, TM);
+ getObjFileLowering().SectionForGlobal(GV, GVKind, *Mang, TM);
// Handle the zerofill directive on darwin, which is a special form of BSS
// emission.
@@ -454,7 +512,8 @@ void AsmPrinter::EmitFunctionHeader() {
// Print the 'header' of function.
const Function *F = MF->getFunction();
- OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(F, Mang, TM));
+ OutStreamer.SwitchSection(
+ getObjFileLowering().SectionForGlobal(F, *Mang, TM));
EmitVisibility(CurrentFnSym, F->getVisibility());
EmitLinkage(F, CurrentFnSym);
@@ -464,7 +523,7 @@ void AsmPrinter::EmitFunctionHeader() {
OutStreamer.EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
if (isVerbose()) {
- WriteAsOperand(OutStreamer.GetCommentOS(), F,
+ F->printAsOperand(OutStreamer.GetCommentOS(),
/*PrintType=*/false, F->getParent());
OutStreamer.GetCommentOS() << '\n';
}
@@ -484,13 +543,9 @@ void AsmPrinter::EmitFunctionHeader() {
}
// Emit pre-function debug and/or EH information.
- if (DE) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
- DE->BeginFunction(MF);
- }
- if (DD) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->beginFunction(MF);
+ for (const HandlerInfo &HI : Handlers) {
+ NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled);
+ HI.Handler->beginFunction(MF);
}
// Emit the prefix data.
@@ -578,10 +633,9 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
- OS << '\t' << AP.MAI->getCommentString() << "DEBUG_VALUE: ";
+ OS << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode*>(MI->getOperand(2).getMetadata()));
+ DIVariable V(MI->getOperand(2).getMetadata());
if (V.getContext().isSubprogram()) {
StringRef Name = DISubprogram(V.getContext()).getDisplayName();
if (!Name.empty())
@@ -627,7 +681,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
// Suppress offset, it is not meaningful here.
OS << "undef";
// NOTE: Want this comment at start of line, don't emit with AddComment.
- AP.OutStreamer.EmitRawText(OS.str());
+ AP.OutStreamer.emitRawComment(OS.str());
return true;
}
if (Deref)
@@ -639,7 +693,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
OS << '+' << Offset << ']';
// NOTE: Want this comment at start of line, don't emit with AddComment.
- AP.OutStreamer.EmitRawText(OS.str());
+ AP.OutStreamer.emitRawComment(OS.str());
return true;
}
@@ -655,18 +709,14 @@ AsmPrinter::CFIMoveType AsmPrinter::needsCFIMoves() {
}
bool AsmPrinter::needsSEHMoves() {
- return MAI->getExceptionHandlingType() == ExceptionHandling::Win64 &&
+ return MAI->getExceptionHandlingType() == ExceptionHandling::WinEH &&
MF->getFunction()->needsUnwindTableEntry();
}
-bool AsmPrinter::needsRelocationsForDwarfStringPool() const {
- return MAI->doesDwarfUseRelocationsAcrossSections();
-}
-
-void AsmPrinter::emitPrologLabel(const MachineInstr &MI) {
- const MCSymbol *Label = MI.getOperand(0).getMCSymbol();
-
- if (MAI->getExceptionHandlingType() != ExceptionHandling::DwarfCFI)
+void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) {
+ ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
+ if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
+ ExceptionHandlingType != ExceptionHandling::ARM)
return;
if (needsCFIMoves() == CFI_M_None)
@@ -677,16 +727,9 @@ void AsmPrinter::emitPrologLabel(const MachineInstr &MI) {
const MachineModuleInfo &MMI = MF->getMMI();
const std::vector<MCCFIInstruction> &Instrs = MMI.getFrameInstructions();
- bool FoundOne = false;
- (void)FoundOne;
- for (std::vector<MCCFIInstruction>::const_iterator I = Instrs.begin(),
- E = Instrs.end(); I != E; ++I) {
- if (I->getLabel() == Label) {
- emitCFIInstruction(*I);
- FoundOne = true;
- }
- }
- assert(FoundOne);
+ unsigned CFIIndex = MI.getOperand(0).getCFIIndex();
+ const MCCFIInstruction &CFI = Instrs[CFIIndex];
+ emitCFIInstruction(CFI);
}
/// EmitFunctionBody - This method emits the body and trailer for a
@@ -695,69 +738,70 @@ void AsmPrinter::EmitFunctionBody() {
// Emit target-specific gunk before the function body.
EmitFunctionBodyStart();
- bool ShouldPrintDebugScopes = DD && MMI->hasDebugInfo();
+ bool ShouldPrintDebugScopes = MMI->hasDebugInfo();
// Print out code for the function.
bool HasAnyRealCode = false;
- const MachineInstr *LastMI = 0;
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
+ const MachineInstr *LastMI = nullptr;
+ for (auto &MBB : *MF) {
// Print a label for the basic block.
- EmitBasicBlockStart(I);
- for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
- II != IE; ++II) {
- LastMI = II;
+ EmitBasicBlockStart(MBB);
+ for (auto &MI : MBB) {
+ LastMI = &MI;
// Print the assembly for the instruction.
- if (!II->isLabel() && !II->isImplicitDef() && !II->isKill() &&
- !II->isDebugValue()) {
+ if (!MI.isPosition() && !MI.isImplicitDef() && !MI.isKill() &&
+ !MI.isDebugValue()) {
HasAnyRealCode = true;
++EmittedInsts;
}
if (ShouldPrintDebugScopes) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->beginInstruction(II);
+ for (const HandlerInfo &HI : Handlers) {
+ NamedRegionTimer T(HI.TimerName, HI.TimerGroupName,
+ TimePassesIsEnabled);
+ HI.Handler->beginInstruction(&MI);
+ }
}
if (isVerbose())
- emitComments(*II, OutStreamer.GetCommentOS());
+ emitComments(MI, OutStreamer.GetCommentOS());
- switch (II->getOpcode()) {
- case TargetOpcode::PROLOG_LABEL:
- emitPrologLabel(*II);
+ switch (MI.getOpcode()) {
+ case TargetOpcode::CFI_INSTRUCTION:
+ emitCFIInstruction(MI);
break;
case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
- OutStreamer.EmitLabel(II->getOperand(0).getMCSymbol());
+ OutStreamer.EmitLabel(MI.getOperand(0).getMCSymbol());
break;
case TargetOpcode::INLINEASM:
- EmitInlineAsm(II);
+ EmitInlineAsm(&MI);
break;
case TargetOpcode::DBG_VALUE:
if (isVerbose()) {
- if (!emitDebugValueComment(II, *this))
- EmitInstruction(II);
+ if (!emitDebugValueComment(&MI, *this))
+ EmitInstruction(&MI);
}
break;
case TargetOpcode::IMPLICIT_DEF:
- if (isVerbose()) emitImplicitDef(II);
+ if (isVerbose()) emitImplicitDef(&MI);
break;
case TargetOpcode::KILL:
- if (isVerbose()) emitKill(II, *this);
+ if (isVerbose()) emitKill(&MI, *this);
break;
default:
- if (!TM.hasMCUseLoc())
- MCLineEntry::Make(&OutStreamer, getCurrentSection());
-
- EmitInstruction(II);
+ EmitInstruction(&MI);
break;
}
if (ShouldPrintDebugScopes) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->endInstruction(II);
+ for (const HandlerInfo &HI : Handlers) {
+ NamedRegionTimer T(HI.TimerName, HI.TimerGroupName,
+ TimePassesIsEnabled);
+ HI.Handler->endInstruction();
+ }
}
}
}
@@ -767,7 +811,7 @@ void AsmPrinter::EmitFunctionBody() {
// label equaling the end of function label and an invalid "row" in the
// FDE. We need to emit a noop in this situation so that the FDE's rows are
// valid.
- bool RequiresNoop = LastMI && LastMI->isPrologLabel();
+ bool RequiresNoop = LastMI && LastMI->isCFIInstruction();
// If the function is empty and the object file uses .subsections_via_symbols,
// then we need to emit *something* to the function body to prevent the
@@ -777,17 +821,16 @@ void AsmPrinter::EmitFunctionBody() {
TM.getInstrInfo()->getNoopForMachoTarget(Noop);
if (Noop.getOpcode()) {
OutStreamer.AddComment("avoids zero-length function");
- OutStreamer.EmitInstruction(Noop);
+ OutStreamer.EmitInstruction(Noop, getSubtargetInfo());
} else // Target not mc-ized yet.
OutStreamer.EmitRawText(StringRef("\tnop\n"));
}
const Function *F = MF->getFunction();
- for (Function::const_iterator i = F->begin(), e = F->end(); i != e; ++i) {
- const BasicBlock *BB = i;
- if (!BB->hasAddressTaken())
+ for (const auto &BB : *F) {
+ if (!BB.hasAddressTaken())
continue;
- MCSymbol *Sym = GetBlockAddressSymbol(BB);
+ MCSymbol *Sym = GetBlockAddressSymbol(&BB);
if (Sym->isDefined())
continue;
OutStreamer.AddComment("Address of block that was removed by CodeGen");
@@ -813,14 +856,10 @@ void AsmPrinter::EmitFunctionBody() {
OutStreamer.EmitELFSize(CurrentFnSym, SizeExp);
}
- // Emit post-function debug information.
- if (DD) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->endFunction(MF);
- }
- if (DE) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
- DE->EndFunction();
+ // Emit post-function debug and/or EH information.
+ for (const HandlerInfo &HI : Handlers) {
+ NamedRegionTimer T(HI.TimerName, HI.TimerGroupName, TimePassesIsEnabled);
+ HI.Handler->endFunction(MF);
}
MMI->EndFunction();
@@ -830,65 +869,15 @@ void AsmPrinter::EmitFunctionBody() {
OutStreamer.AddBlankLine();
}
-/// EmitDwarfRegOp - Emit dwarf register operation.
-void AsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc,
- bool Indirect) const {
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
-
- for (MCSuperRegIterator SR(MLoc.getReg(), TRI); SR.isValid() && Reg < 0;
- ++SR) {
- Reg = TRI->getDwarfRegNum(*SR, false);
- // FIXME: Get the bit range this register uses of the superregister
- // so that we can produce a DW_OP_bit_piece
- }
-
- // FIXME: Handle cases like a super register being encoded as
- // DW_OP_reg 32 DW_OP_piece 4 DW_OP_reg 33
-
- // FIXME: We have no reasonable way of handling errors in here. The
- // caller might be in the middle of an dwarf expression. We should
- // probably assert that Reg >= 0 once debug info generation is more mature.
-
- if (MLoc.isIndirect() || Indirect) {
- if (Reg < 32) {
- OutStreamer.AddComment(
- dwarf::OperationEncodingString(dwarf::DW_OP_breg0 + Reg));
- EmitInt8(dwarf::DW_OP_breg0 + Reg);
- } else {
- OutStreamer.AddComment("DW_OP_bregx");
- EmitInt8(dwarf::DW_OP_bregx);
- OutStreamer.AddComment(Twine(Reg));
- EmitULEB128(Reg);
- }
- EmitSLEB128(!MLoc.isIndirect() ? 0 : MLoc.getOffset());
- if (MLoc.isIndirect() && Indirect)
- EmitInt8(dwarf::DW_OP_deref);
- } else {
- if (Reg < 32) {
- OutStreamer.AddComment(
- dwarf::OperationEncodingString(dwarf::DW_OP_reg0 + Reg));
- EmitInt8(dwarf::DW_OP_reg0 + Reg);
- } else {
- OutStreamer.AddComment("DW_OP_regx");
- EmitInt8(dwarf::DW_OP_regx);
- OutStreamer.AddComment(Twine(Reg));
- EmitULEB128(Reg);
- }
- }
-
- // FIXME: Produce a DW_OP_bit_piece if we used a superregister
-}
+static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP);
bool AsmPrinter::doFinalization(Module &M) {
// Emit global variables.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- EmitGlobalVariable(I);
+ for (const auto &G : M.globals())
+ EmitGlobalVariable(&G);
// Emit visibility info for declarations
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
- const Function &F = *I;
+ for (const Function &F : M) {
if (!F.isDeclaration())
continue;
GlobalValue::VisibilityTypes V = F.getVisibility();
@@ -899,30 +888,72 @@ bool AsmPrinter::doFinalization(Module &M) {
EmitVisibility(Name, V, false);
}
+ // Get information about jump-instruction tables to print.
+ JumpInstrTableInfo *JITI = getAnalysisIfAvailable<JumpInstrTableInfo>();
+
+ if (JITI && !JITI->getTables().empty()) {
+ unsigned Arch = Triple(getTargetTriple()).getArch();
+ bool IsThumb = (Arch == Triple::thumb || Arch == Triple::thumbeb);
+ MCInst TrapInst;
+ TM.getInstrInfo()->getTrap(TrapInst);
+ for (const auto &KV : JITI->getTables()) {
+ uint64_t Count = 0;
+ for (const auto &FunPair : KV.second) {
+ // Emit the function labels to make this be a function entry point.
+ MCSymbol *FunSym =
+ OutContext.GetOrCreateSymbol(FunPair.second->getName());
+ OutStreamer.EmitSymbolAttribute(FunSym, MCSA_Global);
+ // FIXME: JumpTableInstrInfo should store information about the required
+ // alignment of table entries and the size of the padding instruction.
+ EmitAlignment(3);
+ if (IsThumb)
+ OutStreamer.EmitThumbFunc(FunSym);
+ if (MAI->hasDotTypeDotSizeDirective())
+ OutStreamer.EmitSymbolAttribute(FunSym, MCSA_ELF_TypeFunction);
+ OutStreamer.EmitLabel(FunSym);
+
+ // Emit the jump instruction to transfer control to the original
+ // function.
+ MCInst JumpToFun;
+ MCSymbol *TargetSymbol =
+ OutContext.GetOrCreateSymbol(FunPair.first->getName());
+ const MCSymbolRefExpr *TargetSymRef =
+ MCSymbolRefExpr::Create(TargetSymbol, MCSymbolRefExpr::VK_PLT,
+ OutContext);
+ TM.getInstrInfo()->getUnconditionalBranch(JumpToFun, TargetSymRef);
+ OutStreamer.EmitInstruction(JumpToFun, getSubtargetInfo());
+ ++Count;
+ }
+
+ // Emit enough padding instructions to fill up to the next power of two.
+ // This assumes that the trap instruction takes 8 bytes or fewer.
+ uint64_t Remaining = NextPowerOf2(Count) - Count;
+ for (uint64_t C = 0; C < Remaining; ++C) {
+ EmitAlignment(3);
+ OutStreamer.EmitInstruction(TrapInst, getSubtargetInfo());
+ }
+
+ }
+ }
+
// Emit module flags.
SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
M.getModuleFlagsMetadata(ModuleFlags);
if (!ModuleFlags.empty())
- getObjFileLowering().emitModuleFlags(OutStreamer, ModuleFlags, Mang, TM);
+ getObjFileLowering().emitModuleFlags(OutStreamer, ModuleFlags, *Mang, TM);
// Make sure we wrote out everything we need.
OutStreamer.Flush();
// Finalize debug and EH information.
- if (DE) {
- {
- NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
- DE->EndModule();
- }
- delete DE; DE = 0;
- }
- if (DD) {
- {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->endModule();
- }
- delete DD; DD = 0;
+ for (const HandlerInfo &HI : Handlers) {
+ NamedRegionTimer T(HI.TimerName, HI.TimerGroupName,
+ TimePassesIsEnabled);
+ HI.Handler->endModule();
+ delete HI.Handler;
}
+ Handlers.clear();
+ DD = nullptr;
// If the target wants to know about weak references, print them all.
if (MAI->getWeakRefDirective()) {
@@ -932,51 +963,43 @@ bool AsmPrinter::doFinalization(Module &M) {
// happen with the MC stuff eventually.
// Print out module-level global variables here.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- if (!I->hasExternalWeakLinkage()) continue;
- OutStreamer.EmitSymbolAttribute(getSymbol(I), MCSA_WeakReference);
+ for (const auto &G : M.globals()) {
+ if (!G.hasExternalWeakLinkage())
+ continue;
+ OutStreamer.EmitSymbolAttribute(getSymbol(&G), MCSA_WeakReference);
}
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
- if (!I->hasExternalWeakLinkage()) continue;
- OutStreamer.EmitSymbolAttribute(getSymbol(I), MCSA_WeakReference);
+ for (const auto &F : M) {
+ if (!F.hasExternalWeakLinkage())
+ continue;
+ OutStreamer.EmitSymbolAttribute(getSymbol(&F), MCSA_WeakReference);
}
}
if (MAI->hasSetDirective()) {
OutStreamer.AddBlankLine();
- for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
- I != E; ++I) {
- MCSymbol *Name = getSymbol(I);
-
- const GlobalValue *GV = I->getAliasedGlobal();
- if (GV->isDeclaration()) {
- report_fatal_error(Name->getName() +
- ": Target doesn't support aliases to declarations");
- }
-
- MCSymbol *Target = getSymbol(GV);
+ for (const auto &Alias : M.aliases()) {
+ MCSymbol *Name = getSymbol(&Alias);
- if (I->hasExternalLinkage() || !MAI->getWeakRefDirective())
+ if (Alias.hasExternalLinkage() || !MAI->getWeakRefDirective())
OutStreamer.EmitSymbolAttribute(Name, MCSA_Global);
- else if (I->hasWeakLinkage() || I->hasLinkOnceLinkage())
+ else if (Alias.hasWeakLinkage() || Alias.hasLinkOnceLinkage())
OutStreamer.EmitSymbolAttribute(Name, MCSA_WeakReference);
else
- assert(I->hasLocalLinkage() && "Invalid alias linkage");
+ assert(Alias.hasLocalLinkage() && "Invalid alias linkage");
- EmitVisibility(Name, I->getVisibility());
+ EmitVisibility(Name, Alias.getVisibility());
// Emit the directives as assignments aka .set:
OutStreamer.EmitAssignment(Name,
- MCSymbolRefExpr::Create(Target, OutContext));
+ lowerConstant(Alias.getAliasee(), *this));
}
}
GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
assert(MI && "AsmPrinter didn't require GCModuleInfo?");
for (GCModuleInfo::iterator I = MI->end(), E = MI->begin(); I != E; )
- if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*--I))
+ if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(**--I))
MP->finishAssembly(*this);
// Emit llvm.ident metadata in an '.ident' directive.
@@ -993,8 +1016,8 @@ bool AsmPrinter::doFinalization(Module &M) {
// after everything else has gone out.
EmitEndOfAsmFile(M);
- delete Mang; Mang = 0;
- MMI = 0;
+ delete Mang; Mang = nullptr;
+ MMI = nullptr;
OutStreamer.Finish();
OutStreamer.reset();
@@ -1039,23 +1062,13 @@ void AsmPrinter::EmitConstantPool() {
const MachineConstantPoolEntry &CPE = CP[i];
unsigned Align = CPE.getAlignment();
- SectionKind Kind;
- switch (CPE.getRelocationInfo()) {
- default: llvm_unreachable("Unknown section kind");
- case 2: Kind = SectionKind::getReadOnlyWithRel(); break;
- case 1:
- Kind = SectionKind::getReadOnlyWithRelLocal();
- break;
- case 0:
- switch (TM.getDataLayout()->getTypeAllocSize(CPE.getType())) {
- case 4: Kind = SectionKind::getMergeableConst4(); break;
- case 8: Kind = SectionKind::getMergeableConst8(); break;
- case 16: Kind = SectionKind::getMergeableConst16();break;
- default: Kind = SectionKind::getMergeableConst(); break;
- }
- }
+ SectionKind Kind = CPE.getSectionKind(TM.getDataLayout());
- const MCSection *S = getObjFileLowering().getSectionForConstant(Kind);
+ const Constant *C = nullptr;
+ if (!CPE.isMachineConstantPoolEntry())
+ C = CPE.Val.ConstVal;
+
+ const MCSection *S = getObjFileLowering().getSectionForConstant(Kind, C);
// The number of sections are small, just do a linear search from the
// last section to the first.
@@ -1078,13 +1091,22 @@ void AsmPrinter::EmitConstantPool() {
}
// Now print stuff into the calculated sections.
+ const MCSection *CurSection = nullptr;
+ unsigned Offset = 0;
for (unsigned i = 0, e = CPSections.size(); i != e; ++i) {
- OutStreamer.SwitchSection(CPSections[i].S);
- EmitAlignment(Log2_32(CPSections[i].Alignment));
-
- unsigned Offset = 0;
for (unsigned j = 0, ee = CPSections[i].CPEs.size(); j != ee; ++j) {
unsigned CPI = CPSections[i].CPEs[j];
+ MCSymbol *Sym = GetCPISymbol(CPI);
+ if (!Sym->isUndefined())
+ continue;
+
+ if (CurSection != CPSections[i].S) {
+ OutStreamer.SwitchSection(CPSections[i].S);
+ EmitAlignment(Log2_32(CPSections[i].Alignment));
+ CurSection = CPSections[i].S;
+ Offset = 0;
+ }
+
MachineConstantPoolEntry CPE = CP[CPI];
// Emit inter-object padding for alignment.
@@ -1094,8 +1116,8 @@ void AsmPrinter::EmitConstantPool() {
Type *Ty = CPE.getType();
Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty);
- OutStreamer.EmitLabel(GetCPISymbol(CPI));
+ OutStreamer.EmitLabel(Sym);
if (CPE.isMachineConstantPoolEntry())
EmitMachineConstantPoolValue(CPE.Val.MachineCPVal);
else
@@ -1108,8 +1130,9 @@ void AsmPrinter::EmitConstantPool() {
/// by the current function to the current output stream.
///
void AsmPrinter::EmitJumpTableInfo() {
+ const DataLayout *DL = MF->getTarget().getDataLayout();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- if (MJTI == 0) return;
+ if (!MJTI) return;
if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline) return;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
if (JT.empty()) return;
@@ -1127,11 +1150,13 @@ void AsmPrinter::EmitJumpTableInfo() {
// FIXME: this isn't the right predicate, should be based on the MCSection
// for the function.
F->isWeakForLinker()) {
- OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(F,Mang,TM));
+ OutStreamer.SwitchSection(
+ getObjFileLowering().SectionForGlobal(F, *Mang, TM));
} else {
// Otherwise, drop it in the readonly section.
const MCSection *ReadOnlySection =
- getObjFileLowering().getSectionForConstant(SectionKind::getReadOnly());
+ getObjFileLowering().getSectionForConstant(SectionKind::getReadOnly(),
+ /*C=*/nullptr);
OutStreamer.SwitchSection(ReadOnlySection);
JTInDiffSection = true;
}
@@ -1173,7 +1198,7 @@ void AsmPrinter::EmitJumpTableInfo() {
// before each jump table. The first label is never referenced, but tells
// the assembler and linker the extents of the jump table object. The
// second label is actually referenced by the code.
- if (JTInDiffSection && MAI->getLinkerPrivateGlobalPrefix()[0])
+ if (JTInDiffSection && DL->hasLinkerPrivateGlobalPrefix())
// FIXME: This doesn't have to have any specific name, just any randomly
// named and numbered 'l' label would work. Simplify GetJTISymbol.
OutStreamer.EmitLabel(GetJTISymbol(JTI, true));
@@ -1193,7 +1218,7 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned UID) const {
assert(MBB && MBB->getNumber() >= 0 && "Invalid basic block");
- const MCExpr *Value = 0;
+ const MCExpr *Value = nullptr;
switch (MJTI->getEntryKind()) {
case MachineJumpTableInfo::EK_Inline:
llvm_unreachable("Cannot emit EK_Inline jump table entry");
@@ -1268,7 +1293,7 @@ bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) {
}
// Ignore debug and non-emitted data. This handles llvm.compiler.used.
- if (GV->getSection() == "llvm.metadata" ||
+ if (StringRef(GV->getSection()) == "llvm.metadata" ||
GV->hasAvailableExternallyLinkage())
return true;
@@ -1311,11 +1336,20 @@ void AsmPrinter::EmitLLVMUsedList(const ConstantArray *InitList) {
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
const GlobalValue *GV =
dyn_cast<GlobalValue>(InitList->getOperand(i)->stripPointerCasts());
- if (GV && getObjFileLowering().shouldEmitUsedDirectiveFor(GV, Mang))
+ if (GV)
OutStreamer.EmitSymbolAttribute(getSymbol(GV), MCSA_NoDeadStrip);
}
}
+namespace {
+struct Structor {
+ Structor() : Priority(0), Func(nullptr), ComdatKey(nullptr) {}
+ int Priority;
+ llvm::Constant *Func;
+ llvm::GlobalValue *ComdatKey;
+};
+} // end namespace
+
/// EmitXXStructorList - Emit the ctor or dtor list taking into account the init
/// priority.
void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
@@ -1327,37 +1361,55 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
const ConstantArray *InitList = dyn_cast<ConstantArray>(List);
if (!InitList) return; // Not an array!
StructType *ETy = dyn_cast<StructType>(InitList->getType()->getElementType());
- if (!ETy || ETy->getNumElements() != 2) return; // Not an array of pairs!
+ // FIXME: Only allow the 3-field form in LLVM 4.0.
+ if (!ETy || ETy->getNumElements() < 2 || ETy->getNumElements() > 3)
+ return; // Not an array of two or three elements!
if (!isa<IntegerType>(ETy->getTypeAtIndex(0U)) ||
!isa<PointerType>(ETy->getTypeAtIndex(1U))) return; // Not (int, ptr).
+ if (ETy->getNumElements() == 3 && !isa<PointerType>(ETy->getTypeAtIndex(2U)))
+ return; // Not (int, ptr, ptr).
// Gather the structors in a form that's convenient for sorting by priority.
- typedef std::pair<unsigned, Constant *> Structor;
SmallVector<Structor, 8> Structors;
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
- ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
+ for (Value *O : InitList->operands()) {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(O);
if (!CS) continue; // Malformed.
if (CS->getOperand(1)->isNullValue())
break; // Found a null terminator, skip the rest.
ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
if (!Priority) continue; // Malformed.
- Structors.push_back(std::make_pair(Priority->getLimitedValue(65535),
- CS->getOperand(1)));
+ Structors.push_back(Structor());
+ Structor &S = Structors.back();
+ S.Priority = Priority->getLimitedValue(65535);
+ S.Func = CS->getOperand(1);
+ if (ETy->getNumElements() == 3 && !CS->getOperand(2)->isNullValue())
+ S.ComdatKey = dyn_cast<GlobalValue>(CS->getOperand(2)->stripPointerCasts());
}
// Emit the function pointers in the target-specific order
const DataLayout *DL = TM.getDataLayout();
unsigned Align = Log2_32(DL->getPointerPrefAlignment());
- std::stable_sort(Structors.begin(), Structors.end(), less_first());
- for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
+ std::stable_sort(Structors.begin(), Structors.end(),
+ [](const Structor &L,
+ const Structor &R) { return L.Priority < R.Priority; });
+ for (Structor &S : Structors) {
+ const TargetLoweringObjectFile &Obj = getObjFileLowering();
+ const MCSymbol *KeySym = nullptr;
+ if (GlobalValue *GV = S.ComdatKey) {
+ if (GV->hasAvailableExternallyLinkage())
+ // If the associated variable is available_externally, some other TU
+ // will provide its dynamic initializer.
+ continue;
+
+ KeySym = getSymbol(GV);
+ }
const MCSection *OutputSection =
- (isCtor ?
- getObjFileLowering().getStaticCtorSection(Structors[i].first) :
- getObjFileLowering().getStaticDtorSection(Structors[i].first));
+ (isCtor ? Obj.getStaticCtorSection(S.Priority, KeySym)
+ : Obj.getStaticDtorSection(S.Priority, KeySym));
OutStreamer.SwitchSection(OutputSection);
if (OutStreamer.getCurrentSection() != OutStreamer.getPreviousSection())
EmitAlignment(Align);
- EmitXXStructor(Structors[i].second);
+ EmitXXStructor(S.Func);
}
}
@@ -1368,7 +1420,7 @@ void AsmPrinter::EmitModuleIdents(Module &M) {
if (const NamedMDNode *NMD = M.getNamedMetadata("llvm.ident")) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
const MDNode *N = NMD->getOperand(i);
- assert(N->getNumOperands() == 1 &&
+ assert(N->getNumOperands() == 1 &&
"llvm.ident metadata entry can have only one operand");
const MDString *S = cast<MDString>(N->getOperand(0));
OutStreamer.EmitIdent(S->getString());
@@ -1424,8 +1476,8 @@ void AsmPrinter::EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
/// where the size in bytes of the directive is specified by Size and Hi/Lo
/// specify the labels. This implicitly uses .set if it is available.
void AsmPrinter::EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
- const MCSymbol *Lo, unsigned Size)
- const {
+ const MCSymbol *Lo,
+ unsigned Size) const {
// Emit Hi+Offset - Lo
// Get the Hi+Offset expression.
@@ -1454,8 +1506,8 @@ void AsmPrinter::EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
/// where the size in bytes of the directive is specified by Size and Label
/// specifies the label. This implicitly uses .set if it is available.
void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
- unsigned Size, bool IsSectionRelative)
- const {
+ unsigned Size,
+ bool IsSectionRelative) const {
if (MAI->needsDwarfSectionOffsetDirective() && IsSectionRelative) {
OutStreamer.EmitCOFFSecRel32(Label);
return;
@@ -1464,14 +1516,12 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
// Emit Label+Offset (or just Label if Offset is zero)
const MCExpr *Expr = MCSymbolRefExpr::Create(Label, OutContext);
if (Offset)
- Expr = MCBinaryExpr::CreateAdd(Expr,
- MCConstantExpr::Create(Offset, OutContext),
- OutContext);
+ Expr = MCBinaryExpr::CreateAdd(
+ Expr, MCConstantExpr::Create(Offset, OutContext), OutContext);
OutStreamer.EmitValue(Expr, Size);
}
-
//===----------------------------------------------------------------------===//
// EmitAlignment - Emit an alignment directive to the specified power of
@@ -1480,7 +1530,7 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
// an explicit alignment requested, it will override the alignment request
// if required for correctness.
//
-void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
+void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalObject *GV) const {
if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits);
if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment.
@@ -1488,7 +1538,7 @@ void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
if (getCurrentSection()->getKind().isText())
OutStreamer.EmitCodeAlignment(1 << NumBits);
else
- OutStreamer.EmitValueToAlignment(1 << NumBits, 0, 1, 0);
+ OutStreamer.EmitValueToAlignment(1 << NumBits);
}
//===----------------------------------------------------------------------===//
@@ -1513,10 +1563,15 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
- if (CE == 0) {
+ if (!CE) {
llvm_unreachable("Unknown constant value to lower!");
}
+ if (const MCExpr *RelocExpr =
+ AP.getObjFileLowering().getExecutableRelativeSymbol(CE, *AP.Mang,
+ AP.TM))
+ return RelocExpr;
+
switch (CE->getOpcode()) {
default:
// If the code isn't optimized, there may be outstanding folding
@@ -1532,8 +1587,8 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
std::string S;
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
- WriteAsOperand(OS, CE, /*PrintType=*/false,
- !AP.MF ? 0 : AP.MF->getFunction()->getParent());
+ CE->printAsOperand(OS, /*PrintType=*/false,
+ !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
report_fatal_error(OS.str());
}
case Instruction::GetElementPtr: {
@@ -1810,7 +1865,10 @@ static void emitGlobalConstantFP(const ConstantFP *CFP, AsmPrinter &AP) {
SmallString<8> StrVal;
CFP->getValueAPF().toString(StrVal);
- CFP->getType()->print(AP.OutStreamer.GetCommentOS());
+ if (CFP->getType())
+ CFP->getType()->print(AP.OutStreamer.GetCommentOS());
+ else
+ AP.OutStreamer.GetCommentOS() << "Printing <null> Type";
AP.OutStreamer.GetCommentOS() << ' ' << StrVal << '\n';
}
@@ -1823,7 +1881,8 @@ static void emitGlobalConstantFP(const ConstantFP *CFP, AsmPrinter &AP) {
// PPC's long double has odd notions of endianness compared to how LLVM
// handles it: p[0] goes first for *big* endian on PPC.
- if (AP.TM.getDataLayout()->isBigEndian() != CFP->getType()->isPPC_FP128Ty()) {
+ if (AP.TM.getDataLayout()->isBigEndian() &&
+ !CFP->getType()->isPPC_FP128Ty()) {
int Chunk = API.getNumWords() - 1;
if (TrailingBytes)
@@ -1996,15 +2055,17 @@ void AsmPrinter::printOffset(int64_t Offset, raw_ostream &OS) const {
/// GetTempSymbol - Return the MCSymbol corresponding to the assembler
/// temporary label with the specified stem and unique ID.
-MCSymbol *AsmPrinter::GetTempSymbol(StringRef Name, unsigned ID) const {
- return OutContext.GetOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix()) +
+MCSymbol *AsmPrinter::GetTempSymbol(Twine Name, unsigned ID) const {
+ const DataLayout *DL = TM.getDataLayout();
+ return OutContext.GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix()) +
Name + Twine(ID));
}
/// GetTempSymbol - Return an assembler temporary label with the specified
/// stem.
-MCSymbol *AsmPrinter::GetTempSymbol(StringRef Name) const {
- return OutContext.GetOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix())+
+MCSymbol *AsmPrinter::GetTempSymbol(Twine Name) const {
+ const DataLayout *DL = TM.getDataLayout();
+ return OutContext.GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
Name);
}
@@ -2019,8 +2080,9 @@ MCSymbol *AsmPrinter::GetBlockAddressSymbol(const BasicBlock *BB) const {
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
+ const DataLayout *DL = TM.getDataLayout();
return OutContext.GetOrCreateSymbol
- (Twine(MAI->getPrivateGlobalPrefix()) + "CPI" + Twine(getFunctionNumber())
+ (Twine(DL->getPrivateGlobalPrefix()) + "CPI" + Twine(getFunctionNumber())
+ "_" + Twine(CPID));
}
@@ -2032,21 +2094,16 @@ MCSymbol *AsmPrinter::GetJTISymbol(unsigned JTID, bool isLinkerPrivate) const {
/// GetJTSetSymbol - Return the symbol for the specified jump table .set
/// FIXME: privatize to AsmPrinter.
MCSymbol *AsmPrinter::GetJTSetSymbol(unsigned UID, unsigned MBBID) const {
+ const DataLayout *DL = TM.getDataLayout();
return OutContext.GetOrCreateSymbol
- (Twine(MAI->getPrivateGlobalPrefix()) + Twine(getFunctionNumber()) + "_" +
+ (Twine(DL->getPrivateGlobalPrefix()) + Twine(getFunctionNumber()) + "_" +
Twine(UID) + "_set_" + Twine(MBBID));
}
-/// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
-/// global value name as its base, with the specified suffix, and where the
-/// symbol is forced to have private linkage if ForcePrivate is true.
-MCSymbol *AsmPrinter::GetSymbolWithGlobalValueBase(const GlobalValue *GV,
- StringRef Suffix,
- bool ForcePrivate) const {
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, GV, ForcePrivate);
- NameStr.append(Suffix.begin(), Suffix.end());
- return OutContext.GetOrCreateSymbol(NameStr.str());
+MCSymbol *AsmPrinter::getSymbolWithGlobalValueBase(const GlobalValue *GV,
+ StringRef Suffix) const {
+ return getObjFileLowering().getSymbolWithGlobalValueBase(GV, Suffix, *Mang,
+ TM);
}
/// GetExternalSymbolSymbol - Return the MCSymbol for the specified
@@ -2062,7 +2119,7 @@ MCSymbol *AsmPrinter::GetExternalSymbolSymbol(StringRef Sym) const {
/// PrintParentLoopComment - Print comments about parent loops of this one.
static void PrintParentLoopComment(raw_ostream &OS, const MachineLoop *Loop,
unsigned FunctionNumber) {
- if (Loop == 0) return;
+ if (!Loop) return;
PrintParentLoopComment(OS, Loop->getParentLoop(), FunctionNumber);
OS.indent(Loop->getLoopDepth()*2)
<< "Parent Loop BB" << FunctionNumber << "_"
@@ -2076,12 +2133,12 @@ static void PrintParentLoopComment(raw_ostream &OS, const MachineLoop *Loop,
static void PrintChildLoopComment(raw_ostream &OS, const MachineLoop *Loop,
unsigned FunctionNumber) {
// Add child loop information
- for (MachineLoop::iterator CL = Loop->begin(), E = Loop->end();CL != E; ++CL){
- OS.indent((*CL)->getLoopDepth()*2)
+ for (const MachineLoop *CL : *Loop) {
+ OS.indent(CL->getLoopDepth()*2)
<< "Child Loop BB" << FunctionNumber << "_"
- << (*CL)->getHeader()->getNumber() << " Depth " << (*CL)->getLoopDepth()
+ << CL->getHeader()->getNumber() << " Depth " << CL->getLoopDepth()
<< '\n';
- PrintChildLoopComment(OS, *CL, FunctionNumber);
+ PrintChildLoopComment(OS, CL, FunctionNumber);
}
}
@@ -2091,7 +2148,7 @@ static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB,
const AsmPrinter &AP) {
// Add loop depth information
const MachineLoop *Loop = LI->getLoopFor(&MBB);
- if (Loop == 0) return;
+ if (!Loop) return;
MachineBasicBlock *Header = Loop->getHeader();
assert(Header && "No header for loop");
@@ -2127,43 +2184,41 @@ static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB,
/// EmitBasicBlockStart - This method prints the label for the specified
/// MachineBasicBlock, an alignment (if present) and a comment describing
/// it if appropriate.
-void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock *MBB) const {
+void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const {
// Emit an alignment directive for this block, if needed.
- if (unsigned Align = MBB->getAlignment())
+ if (unsigned Align = MBB.getAlignment())
EmitAlignment(Align);
// If the block has its address taken, emit any labels that were used to
// reference the block. It is possible that there is more than one label
// here, because multiple LLVM BB's may have been RAUW'd to this block after
// the references were generated.
- if (MBB->hasAddressTaken()) {
- const BasicBlock *BB = MBB->getBasicBlock();
+ if (MBB.hasAddressTaken()) {
+ const BasicBlock *BB = MBB.getBasicBlock();
if (isVerbose())
OutStreamer.AddComment("Block address taken");
- std::vector<MCSymbol*> Syms = MMI->getAddrLabelSymbolToEmit(BB);
-
- for (unsigned i = 0, e = Syms.size(); i != e; ++i)
- OutStreamer.EmitLabel(Syms[i]);
+ std::vector<MCSymbol*> Symbols = MMI->getAddrLabelSymbolToEmit(BB);
+ for (auto *Sym : Symbols)
+ OutStreamer.EmitLabel(Sym);
}
// Print some verbose block comments.
if (isVerbose()) {
- if (const BasicBlock *BB = MBB->getBasicBlock())
+ if (const BasicBlock *BB = MBB.getBasicBlock())
if (BB->hasName())
OutStreamer.AddComment("%" + BB->getName());
- emitBasicBlockLoopComments(*MBB, LI, *this);
+ emitBasicBlockLoopComments(MBB, LI, *this);
}
// Print the main label for the block.
- if (MBB->pred_empty() || isBlockOnlyReachableByFallthrough(MBB)) {
- if (isVerbose() && OutStreamer.hasRawTextSupport()) {
+ if (MBB.pred_empty() || isBlockOnlyReachableByFallthrough(&MBB)) {
+ if (isVerbose()) {
// NOTE: Want this comment at start of line, don't emit with AddComment.
- OutStreamer.EmitRawText(Twine(MAI->getCommentString()) + " BB#" +
- Twine(MBB->getNumber()) + ":");
+ OutStreamer.emitRawComment(" BB#" + Twine(MBB.getNumber()) + ":", false);
}
} else {
- OutStreamer.EmitLabel(MBB->getSymbol());
+ OutStreamer.EmitLabel(MBB.getSymbol());
}
}
@@ -2199,14 +2254,11 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
return false;
// If there isn't exactly one predecessor, it can't be a fall through.
- MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
- ++PI2;
- if (PI2 != MBB->pred_end())
+ if (MBB->pred_size() > 1)
return false;
// The predecessor has to be immediately before this block.
- MachineBasicBlock *Pred = *PI;
-
+ MachineBasicBlock *Pred = *MBB->pred_begin();
if (!Pred->isLayoutSuccessor(MBB))
return false;
@@ -2215,10 +2267,7 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
return true;
// Check the terminators in the previous blocks
- for (MachineBasicBlock::iterator II = Pred->getFirstTerminator(),
- IE = Pred->end(); II != IE; ++II) {
- MachineInstr &MI = *II;
-
+ for (const auto &MI : Pred->terminators()) {
// If it is not a simple branch, we are in a table somewhere.
if (!MI.isBranch() || MI.isIndirectBranch())
return false;
@@ -2239,26 +2288,29 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
-GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy *S) {
- if (!S->usesMetadata())
- return 0;
+GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy &S) {
+ if (!S.usesMetadata())
+ return nullptr;
gcp_map_type &GCMap = getGCMap(GCMetadataPrinters);
- gcp_map_type::iterator GCPI = GCMap.find(S);
+ gcp_map_type::iterator GCPI = GCMap.find(&S);
if (GCPI != GCMap.end())
- return GCPI->second;
+ return GCPI->second.get();
- const char *Name = S->getName().c_str();
+ const char *Name = S.getName().c_str();
for (GCMetadataPrinterRegistry::iterator
I = GCMetadataPrinterRegistry::begin(),
E = GCMetadataPrinterRegistry::end(); I != E; ++I)
if (strcmp(Name, I->getName()) == 0) {
- GCMetadataPrinter *GMP = I->instantiate();
- GMP->S = S;
- GCMap.insert(std::make_pair(S, GMP));
- return GMP;
+ std::unique_ptr<GCMetadataPrinter> GMP = I->instantiate();
+ GMP->S = &S;
+ auto IterBool = GCMap.insert(std::make_pair(&S, std::move(GMP)));
+ return IterBool.first->second.get();
}
report_fatal_error("no GCMetadataPrinter registered for GC: " + Twine(Name));
}
+
+/// Pin vtable to this file.
+AsmPrinterHandler::~AsmPrinterHandler() {}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index b92f49c..02cd12b 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -11,8 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
+#include "ByteStreamer.h"
#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -28,6 +29,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
//===----------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===----------------------------------------------------------------------===//
@@ -52,9 +55,9 @@ void AsmPrinter::EmitULEB128(uint64_t Value, const char *Desc,
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
void AsmPrinter::EmitCFAByte(unsigned Val) const {
if (isVerbose()) {
- if (Val >= dwarf::DW_CFA_offset && Val < dwarf::DW_CFA_offset+64)
+ if (Val >= dwarf::DW_CFA_offset && Val < dwarf::DW_CFA_offset + 64)
OutStreamer.AddComment("DW_CFA_offset + Reg (" +
- Twine(Val-dwarf::DW_CFA_offset) + ")");
+ Twine(Val - dwarf::DW_CFA_offset) + ")");
else
OutStreamer.AddComment(dwarf::CallFrameString(Val));
}
@@ -63,43 +66,56 @@ void AsmPrinter::EmitCFAByte(unsigned Val) const {
static const char *DecodeDWARFEncoding(unsigned Encoding) {
switch (Encoding) {
- case dwarf::DW_EH_PE_absptr: return "absptr";
- case dwarf::DW_EH_PE_omit: return "omit";
- case dwarf::DW_EH_PE_pcrel: return "pcrel";
- case dwarf::DW_EH_PE_udata4: return "udata4";
- case dwarf::DW_EH_PE_udata8: return "udata8";
- case dwarf::DW_EH_PE_sdata4: return "sdata4";
- case dwarf::DW_EH_PE_sdata8: return "sdata8";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4: return "pcrel udata4";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4: return "pcrel sdata4";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8: return "pcrel udata8";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8: return "pcrel sdata8";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata4:
+ case dwarf::DW_EH_PE_absptr:
+ return "absptr";
+ case dwarf::DW_EH_PE_omit:
+ return "omit";
+ case dwarf::DW_EH_PE_pcrel:
+ return "pcrel";
+ case dwarf::DW_EH_PE_udata4:
+ return "udata4";
+ case dwarf::DW_EH_PE_udata8:
+ return "udata8";
+ case dwarf::DW_EH_PE_sdata4:
+ return "sdata4";
+ case dwarf::DW_EH_PE_sdata8:
+ return "sdata8";
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4:
+ return "pcrel udata4";
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4:
+ return "pcrel sdata4";
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8:
+ return "pcrel udata8";
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8:
+ return "pcrel sdata8";
+ case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4
+ :
return "indirect pcrel udata4";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata4:
+ case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
+ :
return "indirect pcrel sdata4";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata8:
+ case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8
+ :
return "indirect pcrel udata8";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata8:
+ case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8
+ :
return "indirect pcrel sdata8";
}
return "<unknown encoding>";
}
-
/// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
/// encoding. If verbose assembly output is enabled, we output comments
/// describing the encoding. Desc is an optional string saying what the
/// encoding is specifying (e.g. "LSDA").
void AsmPrinter::EmitEncodingByte(unsigned Val, const char *Desc) const {
if (isVerbose()) {
- if (Desc != 0)
- OutStreamer.AddComment(Twine(Desc)+" Encoding = " +
+ if (Desc)
+ OutStreamer.AddComment(Twine(Desc) + " Encoding = " +
Twine(DecodeDWARFEncoding(Val)));
else
- OutStreamer.AddComment(Twine("Encoding = ") +
- DecodeDWARFEncoding(Val));
+ OutStreamer.AddComment(Twine("Encoding = ") + DecodeDWARFEncoding(Val));
}
OutStreamer.EmitIntValue(Val, 1);
@@ -111,11 +127,16 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
return 0;
switch (Encoding & 0x07) {
- default: llvm_unreachable("Invalid encoded value.");
- case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
- case dwarf::DW_EH_PE_udata2: return 2;
- case dwarf::DW_EH_PE_udata4: return 4;
- case dwarf::DW_EH_PE_udata8: return 8;
+ default:
+ llvm_unreachable("Invalid encoded value.");
+ case dwarf::DW_EH_PE_absptr:
+ return TM.getDataLayout()->getPointerSize();
+ case dwarf::DW_EH_PE_udata2:
+ return 2;
+ case dwarf::DW_EH_PE_udata4:
+ return 4;
+ case dwarf::DW_EH_PE_udata8:
+ return 8;
}
}
@@ -125,7 +146,7 @@ void AsmPrinter::EmitTTypeReference(const GlobalValue *GV,
const TargetLoweringObjectFile &TLOF = getObjFileLowering();
const MCExpr *Exp =
- TLOF.getTTypeGlobalReference(GV, Mang, MMI, Encoding, OutStreamer);
+ TLOF.getTTypeGlobalReference(GV, Encoding, *Mang, TM, MMI, OutStreamer);
OutStreamer.EmitValue(Exp, GetSizeOfEncodedValue(Encoding));
} else
OutStreamer.EmitIntValue(0, GetSizeOfEncodedValue(Encoding));
@@ -165,6 +186,177 @@ void AsmPrinter::EmitSectionOffset(const MCSymbol *Label,
EmitLabelDifference(Label, SectionLabel, 4);
}
+/// Emit a dwarf register operation.
+static void emitDwarfRegOp(ByteStreamer &Streamer, int Reg) {
+ assert(Reg >= 0);
+ if (Reg < 32) {
+ Streamer.EmitInt8(dwarf::DW_OP_reg0 + Reg,
+ dwarf::OperationEncodingString(dwarf::DW_OP_reg0 + Reg));
+ } else {
+ Streamer.EmitInt8(dwarf::DW_OP_regx, "DW_OP_regx");
+ Streamer.EmitULEB128(Reg, Twine(Reg));
+ }
+}
+
+/// Emit an (double-)indirect dwarf register operation.
+static void emitDwarfRegOpIndirect(ByteStreamer &Streamer, int Reg, int Offset,
+ bool Deref) {
+ assert(Reg >= 0);
+ if (Reg < 32) {
+ Streamer.EmitInt8(dwarf::DW_OP_breg0 + Reg,
+ dwarf::OperationEncodingString(dwarf::DW_OP_breg0 + Reg));
+ } else {
+ Streamer.EmitInt8(dwarf::DW_OP_bregx, "DW_OP_bregx");
+ Streamer.EmitULEB128(Reg, Twine(Reg));
+ }
+ Streamer.EmitSLEB128(Offset);
+ if (Deref)
+ Streamer.EmitInt8(dwarf::DW_OP_deref, "DW_OP_deref");
+}
+
+/// Emit a dwarf register operation for describing
+/// - a small value occupying only part of a register or
+/// - a small register representing only part of a value.
+static void emitDwarfOpPiece(ByteStreamer &Streamer, unsigned SizeInBits,
+ unsigned OffsetInBits) {
+ assert(SizeInBits > 0 && "zero-sized piece");
+ unsigned SizeOfByte = 8;
+ if (OffsetInBits > 0 || SizeInBits % SizeOfByte) {
+ Streamer.EmitInt8(dwarf::DW_OP_bit_piece, "DW_OP_bit_piece");
+ Streamer.EmitULEB128(SizeInBits, Twine(SizeInBits));
+ Streamer.EmitULEB128(OffsetInBits, Twine(OffsetInBits));
+ } else {
+ Streamer.EmitInt8(dwarf::DW_OP_piece, "DW_OP_piece");
+ unsigned ByteSize = SizeInBits / SizeOfByte;
+ Streamer.EmitULEB128(ByteSize, Twine(ByteSize));
+ }
+}
+
+/// Emit a shift-right dwarf expression.
+static void emitDwarfOpShr(ByteStreamer &Streamer,
+ unsigned ShiftBy) {
+ Streamer.EmitInt8(dwarf::DW_OP_constu, "DW_OP_constu");
+ Streamer.EmitULEB128(ShiftBy);
+ Streamer.EmitInt8(dwarf::DW_OP_shr, "DW_OP_shr");
+}
+
+// Some targets do not provide a DWARF register number for every
+// register. This function attempts to emit a DWARF register by
+// emitting a piece of a super-register or by piecing together
+// multiple subregisters that alias the register.
+void AsmPrinter::EmitDwarfRegOpPiece(ByteStreamer &Streamer,
+ const MachineLocation &MLoc,
+ unsigned PieceSizeInBits,
+ unsigned PieceOffsetInBits) const {
+ assert(MLoc.isReg() && "MLoc must be a register");
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
+
+ // If this is a valid register number, emit it.
+ if (Reg >= 0) {
+ emitDwarfRegOp(Streamer, Reg);
+ emitDwarfOpPiece(Streamer, PieceSizeInBits, PieceOffsetInBits);
+ return;
+ }
+
+ // Walk up the super-register chain until we find a valid number.
+ // For example, EAX on x86_64 is a 32-bit piece of RAX with offset 0.
+ for (MCSuperRegIterator SR(MLoc.getReg(), TRI); SR.isValid(); ++SR) {
+ Reg = TRI->getDwarfRegNum(*SR, false);
+ if (Reg >= 0) {
+ unsigned Idx = TRI->getSubRegIndex(*SR, MLoc.getReg());
+ unsigned Size = TRI->getSubRegIdxSize(Idx);
+ unsigned Offset = TRI->getSubRegIdxOffset(Idx);
+ OutStreamer.AddComment("super-register");
+ emitDwarfRegOp(Streamer, Reg);
+ if (PieceOffsetInBits == Offset) {
+ emitDwarfOpPiece(Streamer, Size, Offset);
+ } else {
+ // If this is part of a variable in a sub-register at a
+ // non-zero offset, we need to manually shift the value into
+ // place, since the DW_OP_piece describes the part of the
+ // variable, not the position of the subregister.
+ emitDwarfOpPiece(Streamer, Size, PieceOffsetInBits);
+ if (Offset)
+ emitDwarfOpShr(Streamer, Offset);
+ }
+ return;
+ }
+ }
+
+ // Otherwise, attempt to find a covering set of sub-register numbers.
+ // For example, Q0 on ARM is a composition of D0+D1.
+ //
+ // Keep track of the current position so we can emit the more
+ // efficient DW_OP_piece.
+ unsigned CurPos = PieceOffsetInBits;
+ // The size of the register in bits, assuming 8 bits per byte.
+ unsigned RegSize = TRI->getMinimalPhysRegClass(MLoc.getReg())->getSize() * 8;
+ // Keep track of the bits in the register we already emitted, so we
+ // can avoid emitting redundant aliasing subregs.
+ SmallBitVector Coverage(RegSize, false);
+ for (MCSubRegIterator SR(MLoc.getReg(), TRI); SR.isValid(); ++SR) {
+ unsigned Idx = TRI->getSubRegIndex(MLoc.getReg(), *SR);
+ unsigned Size = TRI->getSubRegIdxSize(Idx);
+ unsigned Offset = TRI->getSubRegIdxOffset(Idx);
+ Reg = TRI->getDwarfRegNum(*SR, false);
+
+ // Intersection between the bits we already emitted and the bits
+ // covered by this subregister.
+ SmallBitVector Intersection(RegSize, false);
+ Intersection.set(Offset, Offset + Size);
+ Intersection ^= Coverage;
+
+ // If this sub-register has a DWARF number and we haven't covered
+ // its range, emit a DWARF piece for it.
+ if (Reg >= 0 && Intersection.any()) {
+ OutStreamer.AddComment("sub-register");
+ emitDwarfRegOp(Streamer, Reg);
+ emitDwarfOpPiece(Streamer, Size, Offset == CurPos ? 0 : Offset);
+ CurPos = Offset + Size;
+
+ // Mark it as emitted.
+ Coverage.set(Offset, Offset + Size);
+ }
+ }
+
+ if (CurPos == PieceOffsetInBits) {
+ // FIXME: We have no reasonable way of handling errors in here.
+ Streamer.EmitInt8(dwarf::DW_OP_nop,
+ "nop (could not find a dwarf register number)");
+ }
+}
+
+/// EmitDwarfRegOp - Emit dwarf register operation.
+void AsmPrinter::EmitDwarfRegOp(ByteStreamer &Streamer,
+ const MachineLocation &MLoc,
+ bool Indirect) const {
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
+ if (Reg < 0) {
+ // We assume that pointers are always in an addressable register.
+ if (Indirect || MLoc.isIndirect()) {
+ // FIXME: We have no reasonable way of handling errors in here. The
+ // caller might be in the middle of a dwarf expression. We should
+ // probably assert that Reg >= 0 once debug info generation is more
+ // mature.
+ Streamer.EmitInt8(dwarf::DW_OP_nop,
+ "nop (invalid dwarf register number for indirect loc)");
+ return;
+ }
+
+ // Attempt to find a valid super- or sub-register.
+ return EmitDwarfRegOpPiece(Streamer, MLoc);
+ }
+
+ if (MLoc.isIndirect())
+ emitDwarfRegOpIndirect(Streamer, Reg, MLoc.getOffset(), Indirect);
+ else if (Indirect)
+ emitDwarfRegOpIndirect(Streamer, Reg, 0, false);
+ else
+ emitDwarfRegOp(Streamer, Reg);
+}
+
//===----------------------------------------------------------------------===//
// Dwarf Lowering Routines
//===----------------------------------------------------------------------===//
@@ -191,5 +383,8 @@ void AsmPrinter::emitCFIInstruction(const MCCFIInstruction &Inst) const {
case MCCFIInstruction::OpWindowSave:
OutStreamer.EmitCFIWindowSave();
break;
+ case MCCFIInstruction::OpSameValue:
+ OutStreamer.EmitCFISameValue(Inst.getRegister());
+ break;
}
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h
new file mode 100644
index 0000000..2825367
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h
@@ -0,0 +1,57 @@
+//===-- lib/CodeGen/AsmPrinter/AsmPrinterHandler.h -------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a generic interface for AsmPrinter handlers,
+// like debug and EH info emitters.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_ASMPRINTERHANDLER_H__
+#define CODEGEN_ASMPRINTER_ASMPRINTERHANDLER_H__
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class MCSymbol;
+
+/// \brief Collects and handles AsmPrinter objects required to build debug
+/// or EH information.
+class AsmPrinterHandler {
+public:
+ virtual ~AsmPrinterHandler();
+
+ /// \brief For symbols that have a size designated (e.g. common symbols),
+ /// this tracks that size.
+ virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;
+
+ /// \brief Emit all sections that should come after the content.
+ virtual void endModule() = 0;
+
+ /// \brief Gather pre-function debug information.
+ /// Every beginFunction(MF) call should be followed by an endFunction(MF)
+ /// call.
+ virtual void beginFunction(const MachineFunction *MF) = 0;
+
+ /// \brief Gather post-function debug information.
+ /// Please note that some AsmPrinter implementations may not call
+ /// beginFunction at all.
+ virtual void endFunction(const MachineFunction *MF) = 0;
+
+ /// \brief Process beginning of an instruction.
+ virtual void beginInstruction(const MachineInstr *MI) = 0;
+
+ /// \brief Process end of an instruction.
+ virtual void endInstruction() = 0;
+};
+} // End of namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index 4f927f6..46ee0c8 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -11,14 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
@@ -33,8 +33,11 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
namespace {
struct SrcMgrDiagInfo {
const MDNode *LocInfo;
@@ -77,11 +80,17 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode,
if (isNullTerminated)
Str = Str.substr(0, Str.size()-1);
- // If the output streamer is actually a .s file, just emit the blob textually.
+ // If the output streamer does not have mature MC support or the integrated
+ // assembler has been disabled, just emit the blob textually.
+ // Otherwise parse the asm and emit it via MC support.
// This is useful in case the asm parser doesn't handle something but the
// system assembler does.
- if (OutStreamer.hasRawTextSupport()) {
+ const MCAsmInfo *MCAI = TM.getMCAsmInfo();
+ assert(MCAI && "No MCAsmInfo");
+ if (!MCAI->useIntegratedAssembler() &&
+ !OutStreamer.isIntegratedAssemblerRequired()) {
OutStreamer.EmitRawText(Str);
+ emitInlineAsmEnd(TM.getSubtarget<MCSubtargetInfo>(), nullptr);
return;
}
@@ -91,7 +100,7 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode,
// If the current LLVMContext has an inline asm handler, set it in SourceMgr.
LLVMContext &LLVMCtx = MMI->getModule()->getContext();
bool HasDiagHandler = false;
- if (LLVMCtx.getInlineAsmDiagnosticHandler() != 0) {
+ if (LLVMCtx.getInlineAsmDiagnosticHandler() != nullptr) {
// If the source manager has an issue, we arrange for srcMgrDiagHandler
// to be invoked, getting DiagInfo passed into it.
DiagInfo.LocInfo = LocMDNode;
@@ -110,20 +119,28 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode,
// Tell SrcMgr about this buffer, it takes ownership of the buffer.
SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
- OwningPtr<MCAsmParser> Parser(createMCAsmParser(SrcMgr,
- OutContext, OutStreamer,
- *MAI));
-
- // FIXME: It would be nice if we can avoid createing a new instance of
- // MCSubtargetInfo here given TargetSubtargetInfo is available. However,
- // we have to watch out for asm directives which can change subtarget
- // state. e.g. .code 16, .code 32.
- OwningPtr<MCSubtargetInfo>
- STI(TM.getTarget().createMCSubtargetInfo(TM.getTargetTriple(),
- TM.getTargetCPU(),
- TM.getTargetFeatureString()));
- OwningPtr<MCTargetAsmParser>
- TAP(TM.getTarget().createMCAsmParser(*STI, *Parser, *MII));
+ std::unique_ptr<MCAsmParser> Parser(
+ createMCAsmParser(SrcMgr, OutContext, OutStreamer, *MAI));
+
+ // Initialize the parser with a fresh subtarget info. It is better to use a
+ // new STI here because the parser may modify it and we do not want those
+ // modifications to persist after parsing the inlineasm. The modifications
+ // made by the parser will be seen by the code emitters because it passes
+ // the current STI down to the EncodeInstruction() method.
+ std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
+ TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+
+ // Preserve a copy of the original STI because the parser may modify it. For
+ // example, when switching between arm and thumb mode. If the target needs to
+ // emit code to return to the original state it can do so in
+ // emitInlineAsmEnd().
+ MCSubtargetInfo STIOrig = *STI;
+
+ MCTargetOptions MCOptions;
+ if (MF)
+ MCOptions = MF->getTarget().Options.MCOptions;
+ std::unique_ptr<MCTargetAsmParser> TAP(
+ TM.getTarget().createMCAsmParser(*STI, *Parser, *MII, MCOptions));
if (!TAP)
report_fatal_error("Inline asm not supported by this streamer because"
" we don't have an asm parser for this target\n");
@@ -133,6 +150,7 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode,
// Don't implicitly switch to the text section before the asm.
int Res = Parser->Run(/*NoInitialTextSection*/ true,
/*NoFinalize*/ true);
+ emitInlineAsmEnd(STIOrig, STI.get());
if (Res && !HasDiagHandler)
report_fatal_error("Error parsing inline asm\n");
}
@@ -216,10 +234,10 @@ static void EmitMSInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
if (InlineAsm::isMemKind(OpFlags)) {
Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
- /*Modifier*/ 0, OS);
+ /*Modifier*/ nullptr, OS);
} else {
Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
- /*Modifier*/ 0, OS);
+ /*Modifier*/ nullptr, OS);
}
}
if (Error) {
@@ -311,7 +329,7 @@ static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
++LastEmitted;
const char *StrStart = LastEmitted;
const char *StrEnd = strchr(StrStart, '}');
- if (StrEnd == 0)
+ if (!StrEnd)
report_fatal_error("Unterminated ${:foo} operand in inline asm"
" string: '" + Twine(AsmStr) + "'");
@@ -386,11 +404,11 @@ static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
else {
if (InlineAsm::isMemKind(OpFlags)) {
Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
- Modifier[0] ? Modifier : 0,
+ Modifier[0] ? Modifier : nullptr,
OS);
} else {
Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
- Modifier[0] ? Modifier : 0, OS);
+ Modifier[0] ? Modifier : nullptr, OS);
}
}
}
@@ -427,26 +445,19 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
// If this asmstr is empty, just print the #APP/#NOAPP markers.
// These are useful to see where empty asm's wound up.
if (AsmStr[0] == 0) {
- // Don't emit the comments if writing to a .o file.
- if (!OutStreamer.hasRawTextSupport()) return;
-
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmStart());
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmEnd());
+ OutStreamer.emitRawComment(MAI->getInlineAsmStart());
+ OutStreamer.emitRawComment(MAI->getInlineAsmEnd());
return;
}
// Emit the #APP start marker. This has to happen even if verbose-asm isn't
- // enabled, so we use EmitRawText.
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmStart());
+ // enabled, so we use emitRawComment.
+ OutStreamer.emitRawComment(MAI->getInlineAsmStart());
// Get the !srcloc metadata node if we have it, and decode the loc cookie from
// it.
unsigned LocCookie = 0;
- const MDNode *LocMD = 0;
+ const MDNode *LocMD = nullptr;
for (unsigned i = MI->getNumOperands(); i != 0; --i) {
if (MI->getOperand(i-1).isMetadata() &&
(LocMD = MI->getOperand(i-1).getMetadata()) &&
@@ -476,10 +487,8 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
EmitInlineAsm(OS.str(), LocMD, MI->getInlineAsmDialect());
// Emit the #NOAPP end marker. This has to happen even if verbose-asm isn't
- // enabled, so we use EmitRawText.
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmEnd());
+ // enabled, so we use emitRawComment.
+ OutStreamer.emitRawComment(MAI->getInlineAsmEnd());
}
@@ -491,8 +500,9 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
/// for their own strange codes.
void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
const char *Code) const {
+ const DataLayout *DL = TM.getDataLayout();
if (!strcmp(Code, "private")) {
- OS << MAI->getPrivateGlobalPrefix();
+ OS << DL->getPrivateGlobalPrefix();
} else if (!strcmp(Code, "comment")) {
OS << MAI->getCommentString();
} else if (!strcmp(Code, "uid")) {
@@ -551,3 +561,5 @@ bool AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
return true;
}
+void AsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const {}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h b/contrib/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h
new file mode 100644
index 0000000..6c01d65
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h
@@ -0,0 +1,71 @@
+//===-- llvm/CodeGen/ByteStreamer.h - ByteStreamer class --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class that can take bytes that would normally be
+// streamed via the AsmPrinter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_BYTESTREAMER_H
+#define LLVM_CODEGEN_BYTESTREAMER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCStreamer.h"
+#include "DIEHash.h"
+
+namespace llvm {
+class ByteStreamer {
+ public:
+ virtual ~ByteStreamer() {}
+
+ // For now we're just handling the calls we need for dwarf emission/hashing.
+ virtual void EmitInt8(uint8_t Byte, const Twine &Comment = "") = 0;
+ virtual void EmitSLEB128(uint64_t DWord, const Twine &Comment = "") = 0;
+ virtual void EmitULEB128(uint64_t DWord, const Twine &Comment = "") = 0;
+};
+
+class APByteStreamer : public ByteStreamer {
+private:
+ AsmPrinter &AP;
+
+public:
+ APByteStreamer(AsmPrinter &Asm) : AP(Asm) {}
+ void EmitInt8(uint8_t Byte, const Twine &Comment) override {
+ AP.OutStreamer.AddComment(Comment);
+ AP.EmitInt8(Byte);
+ }
+ void EmitSLEB128(uint64_t DWord, const Twine &Comment) override {
+ AP.OutStreamer.AddComment(Comment);
+ AP.EmitSLEB128(DWord);
+ }
+ void EmitULEB128(uint64_t DWord, const Twine &Comment) override {
+ AP.OutStreamer.AddComment(Comment);
+ AP.EmitULEB128(DWord);
+ }
+};
+
+class HashingByteStreamer : public ByteStreamer {
+ private:
+ DIEHash &Hash;
+ public:
+ HashingByteStreamer(DIEHash &H) : Hash(H) {}
+ void EmitInt8(uint8_t Byte, const Twine &Comment) override {
+ Hash.update(Byte);
+ }
+ void EmitSLEB128(uint64_t DWord, const Twine &Comment) override {
+ Hash.addSLEB128(DWord);
+ }
+ void EmitULEB128(uint64_t DWord, const Twine &Comment) override {
+ Hash.addULEB128(DWord);
+ }
+};
+}
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
index e39b374..c3dcd9c 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -13,17 +13,18 @@
#include "DIE.h"
#include "DwarfDebug.h"
+#include "DwarfUnit.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/MD5.h"
using namespace llvm;
@@ -48,7 +49,7 @@ void DIEAbbrevData::Profile(FoldingSetNodeID &ID) const {
///
void DIEAbbrev::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(unsigned(Tag));
- ID.AddInteger(ChildrenFlag);
+ ID.AddInteger(unsigned(Children));
// For each attribute description.
for (unsigned i = 0, N = Data.size(); i < N; ++i)
@@ -62,7 +63,7 @@ void DIEAbbrev::Emit(AsmPrinter *AP) const {
AP->EmitULEB128(Tag, dwarf::TagString(Tag));
// Emit whether it has children DIEs.
- AP->EmitULEB128(ChildrenFlag, dwarf::ChildrenString(ChildrenFlag));
+ AP->EmitULEB128((unsigned)Children, dwarf::ChildrenString(Children));
// For each attribute description.
for (unsigned i = 0, N = Data.size(); i < N; ++i) {
@@ -89,7 +90,7 @@ void DIEAbbrev::print(raw_ostream &O) {
<< " "
<< dwarf::TagString(Tag)
<< " "
- << dwarf::ChildrenString(ChildrenFlag)
+ << dwarf::ChildrenString(Children)
<< '\n';
for (unsigned i = 0, N = Data.size(); i < N; ++i) {
@@ -103,36 +104,28 @@ void DIEAbbrev::print(raw_ostream &O) {
void DIEAbbrev::dump() { print(dbgs()); }
#endif
-//===----------------------------------------------------------------------===//
-// DIE Implementation
-//===----------------------------------------------------------------------===//
-
-DIE::~DIE() {
- for (unsigned i = 0, N = Children.size(); i < N; ++i)
- delete Children[i];
-}
-
-/// Climb up the parent chain to get the compile unit DIE to which this DIE
+/// Climb up the parent chain to get the unit DIE to which this DIE
/// belongs.
-const DIE *DIE::getCompileUnit() const {
- const DIE *Cu = getCompileUnitOrNull();
+const DIE *DIE::getUnit() const {
+ const DIE *Cu = getUnitOrNull();
assert(Cu && "We should not have orphaned DIEs.");
return Cu;
}
-/// Climb up the parent chain to get the compile unit DIE this DIE belongs
+/// Climb up the parent chain to get the unit DIE this DIE belongs
/// to. Return NULL if DIE is not added to an owner yet.
-const DIE *DIE::getCompileUnitOrNull() const {
+const DIE *DIE::getUnitOrNull() const {
const DIE *p = this;
while (p) {
- if (p->getTag() == dwarf::DW_TAG_compile_unit)
+ if (p->getTag() == dwarf::DW_TAG_compile_unit ||
+ p->getTag() == dwarf::DW_TAG_type_unit)
return p;
p = p->getParent();
}
- return NULL;
+ return nullptr;
}
-DIEValue *DIE::findAttribute(uint16_t Attribute) {
+DIEValue *DIE::findAttribute(dwarf::Attribute Attribute) const {
const SmallVectorImpl<DIEValue *> &Values = getValues();
const DIEAbbrev &Abbrevs = getAbbrev();
@@ -141,7 +134,7 @@ DIEValue *DIE::findAttribute(uint16_t Attribute) {
for (size_t i = 0; i < Values.size(); ++i)
if (Abbrevs.getData()[i].getAttribute() == Attribute)
return Values[i];
- return NULL;
+ return nullptr;
}
#ifndef NDEBUG
@@ -159,7 +152,7 @@ void DIE::print(raw_ostream &O, unsigned IndentCount) const {
O << Indent
<< dwarf::TagString(Abbrev.getTag())
<< " "
- << dwarf::ChildrenString(Abbrev.getChildrenFlag()) << "\n";
+ << dwarf::ChildrenString(Abbrev.hasChildren()) << "\n";
} else {
O << "Size: " << Size << "\n";
}
@@ -215,8 +208,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, dwarf::Form Form) const {
case dwarf::DW_FORM_flag_present:
// Emit something to keep the lines and comments in sync.
// FIXME: Is there a better way to do this?
- if (Asm->OutStreamer.hasRawTextSupport())
- Asm->OutStreamer.EmitRawText("");
+ Asm->OutStreamer.AddBlankLine();
return;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
@@ -227,6 +219,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, dwarf::Form Form) const {
case dwarf::DW_FORM_ref4: // Fall thru
case dwarf::DW_FORM_data4: Size = 4; break;
case dwarf::DW_FORM_ref8: // Fall thru
+ case dwarf::DW_FORM_ref_sig8: // Fall thru
case dwarf::DW_FORM_data8: Size = 8; break;
case dwarf::DW_FORM_GNU_str_index: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_GNU_addr_index: Asm->EmitULEB128(Integer); return;
@@ -253,11 +246,12 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, dwarf::Form Form) const {
case dwarf::DW_FORM_ref4: // Fall thru
case dwarf::DW_FORM_data4: return sizeof(int32_t);
case dwarf::DW_FORM_ref8: // Fall thru
+ case dwarf::DW_FORM_ref_sig8: // Fall thru
case dwarf::DW_FORM_data8: return sizeof(int64_t);
- case dwarf::DW_FORM_GNU_str_index: return MCAsmInfo::getULEB128Size(Integer);
- case dwarf::DW_FORM_GNU_addr_index: return MCAsmInfo::getULEB128Size(Integer);
- case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
- case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
+ case dwarf::DW_FORM_GNU_str_index: return getULEB128Size(Integer);
+ case dwarf::DW_FORM_GNU_addr_index: return getULEB128Size(Integer);
+ case dwarf::DW_FORM_udata: return getULEB128Size(Integer);
+ case dwarf::DW_FORM_sdata: return getSLEB128Size(Integer);
case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
default: llvm_unreachable("DIE Value form not supported yet");
}
@@ -379,7 +373,26 @@ void DIEString::print(raw_ostream &O) const {
/// EmitValue - Emit debug information entry offset.
///
void DIEEntry::EmitValue(AsmPrinter *AP, dwarf::Form Form) const {
- AP->EmitInt32(Entry->getOffset());
+
+ if (Form == dwarf::DW_FORM_ref_addr) {
+ const DwarfDebug *DD = AP->getDwarfDebug();
+ unsigned Addr = Entry.getOffset();
+ assert(!DD->useSplitDwarf() && "TODO: dwo files can't have relocations.");
+ // For DW_FORM_ref_addr, output the offset from beginning of debug info
+ // section. Entry->getOffset() returns the offset from start of the
+ // compile unit.
+ DwarfCompileUnit *CU = DD->lookupUnit(Entry.getUnit());
+ assert(CU && "CUDie should belong to a CU.");
+ Addr += CU->getDebugInfoOffset();
+ if (AP->MAI->doesDwarfUseRelocationsAcrossSections())
+ AP->EmitLabelPlusOffset(CU->getSectionSym(), Addr,
+ DIEEntry::getRefAddrSize(AP));
+ else
+ AP->EmitLabelOffsetDifference(CU->getSectionSym(), Addr,
+ CU->getSectionSym(),
+ DIEEntry::getRefAddrSize(AP));
+ } else
+ AP->EmitInt32(Entry.getOffset());
}
unsigned DIEEntry::getRefAddrSize(AsmPrinter *AP) {
@@ -387,14 +400,87 @@ unsigned DIEEntry::getRefAddrSize(AsmPrinter *AP) {
// specified to be four bytes in the DWARF 32-bit format and eight bytes
// in the DWARF 64-bit format, while DWARF Version 2 specifies that such
// references have the same size as an address on the target system.
- if (AP->getDwarfDebug()->getDwarfVersion() == 2)
+ const DwarfDebug *DD = AP->getDwarfDebug();
+ assert(DD && "Expected Dwarf Debug info to be available");
+ if (DD->getDwarfVersion() == 2)
return AP->getDataLayout().getPointerSize();
return sizeof(int32_t);
}
#ifndef NDEBUG
void DIEEntry::print(raw_ostream &O) const {
- O << format("Die: 0x%lx", (long)(intptr_t)Entry);
+ O << format("Die: 0x%lx", (long)(intptr_t)&Entry);
+}
+#endif
+
+//===----------------------------------------------------------------------===//
+// DIETypeSignature Implementation
+//===----------------------------------------------------------------------===//
+void DIETypeSignature::EmitValue(AsmPrinter *Asm, dwarf::Form Form) const {
+ assert(Form == dwarf::DW_FORM_ref_sig8);
+ Asm->OutStreamer.EmitIntValue(Unit.getTypeSignature(), 8);
+}
+
+#ifndef NDEBUG
+void DIETypeSignature::print(raw_ostream &O) const {
+ O << format("Type Unit: 0x%lx", Unit.getTypeSignature());
+}
+
+void DIETypeSignature::dump() const { print(dbgs()); }
+#endif
+
+//===----------------------------------------------------------------------===//
+// DIELoc Implementation
+//===----------------------------------------------------------------------===//
+
+/// ComputeSize - calculate the size of the location expression.
+///
+unsigned DIELoc::ComputeSize(AsmPrinter *AP) const {
+ if (!Size) {
+ const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData();
+ for (unsigned i = 0, N = Values.size(); i < N; ++i)
+ Size += Values[i]->SizeOf(AP, AbbrevData[i].getForm());
+ }
+
+ return Size;
+}
+
+/// EmitValue - Emit location data.
+///
+void DIELoc::EmitValue(AsmPrinter *Asm, dwarf::Form Form) const {
+ switch (Form) {
+ default: llvm_unreachable("Improper form for block");
+ case dwarf::DW_FORM_block1: Asm->EmitInt8(Size); break;
+ case dwarf::DW_FORM_block2: Asm->EmitInt16(Size); break;
+ case dwarf::DW_FORM_block4: Asm->EmitInt32(Size); break;
+ case dwarf::DW_FORM_block:
+ case dwarf::DW_FORM_exprloc:
+ Asm->EmitULEB128(Size); break;
+ }
+
+ const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData();
+ for (unsigned i = 0, N = Values.size(); i < N; ++i)
+ Values[i]->EmitValue(Asm, AbbrevData[i].getForm());
+}
+
+/// SizeOf - Determine size of location data in bytes.
+///
+unsigned DIELoc::SizeOf(AsmPrinter *AP, dwarf::Form Form) const {
+ switch (Form) {
+ case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
+ case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
+ case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
+ case dwarf::DW_FORM_block:
+ case dwarf::DW_FORM_exprloc:
+ return Size + getULEB128Size(Size);
+ default: llvm_unreachable("Improper form for block");
+ }
+}
+
+#ifndef NDEBUG
+void DIELoc::print(raw_ostream &O) const {
+ O << "ExprLoc: ";
+ DIE::print(O, 5);
}
#endif
@@ -404,7 +490,7 @@ void DIEEntry::print(raw_ostream &O) const {
/// ComputeSize - calculate the size of the block.
///
-unsigned DIEBlock::ComputeSize(AsmPrinter *AP) {
+unsigned DIEBlock::ComputeSize(AsmPrinter *AP) const {
if (!Size) {
const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData();
for (unsigned i = 0, N = Values.size(); i < N; ++i)
@@ -437,7 +523,7 @@ unsigned DIEBlock::SizeOf(AsmPrinter *AP, dwarf::Form Form) const {
case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
- case dwarf::DW_FORM_block: return Size + MCAsmInfo::getULEB128Size(Size);
+ case dwarf::DW_FORM_block: return Size + getULEB128Size(Size);
default: llvm_unreachable("Improper form for block");
}
}
@@ -448,3 +534,34 @@ void DIEBlock::print(raw_ostream &O) const {
DIE::print(O, 5);
}
#endif
+
+//===----------------------------------------------------------------------===//
+// DIELocList Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned DIELocList::SizeOf(AsmPrinter *AP, dwarf::Form Form) const {
+ if (Form == dwarf::DW_FORM_data4)
+ return 4;
+ if (Form == dwarf::DW_FORM_sec_offset)
+ return 4;
+ return AP->getDataLayout().getPointerSize();
+}
+
+/// EmitValue - Emit label value.
+///
+void DIELocList::EmitValue(AsmPrinter *AP, dwarf::Form Form) const {
+ DwarfDebug *DD = AP->getDwarfDebug();
+ MCSymbol *Label = DD->getDebugLocEntries()[Index].Label;
+
+ if (AP->MAI->doesDwarfUseRelocationsAcrossSections() && !DD->useSplitDwarf())
+ AP->EmitSectionOffset(Label, DD->getDebugLocSym());
+ else
+ AP->EmitLabelDifference(Label, DD->getDebugLocSym(), 4);
+}
+
+#ifndef NDEBUG
+void DIELocList::print(raw_ostream &O) const {
+ O << "LocList: " << Index;
+
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
index f4fa326..ef05f17 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
@@ -16,437 +16,571 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/MC/MCExpr.h"
#include <vector>
namespace llvm {
- class AsmPrinter;
- class MCSymbol;
- class MCSymbolRefExpr;
- class raw_ostream;
-
- //===--------------------------------------------------------------------===//
- /// DIEAbbrevData - Dwarf abbreviation data, describes one attribute of a
- /// Dwarf abbreviation.
- class DIEAbbrevData {
- /// Attribute - Dwarf attribute code.
- ///
- dwarf::Attribute Attribute;
-
- /// Form - Dwarf form code.
- ///
- dwarf::Form Form;
- public:
- DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) : Attribute(A), Form(F) {}
-
- // Accessors.
- dwarf::Attribute getAttribute() const { return Attribute; }
- dwarf::Form getForm() const { return Form; }
-
- /// Profile - Used to gather unique data for the abbreviation folding set.
- ///
- void Profile(FoldingSetNodeID &ID) const;
- };
+class AsmPrinter;
+class MCExpr;
+class MCSymbol;
+class raw_ostream;
+class DwarfTypeUnit;
+
+//===--------------------------------------------------------------------===//
+/// DIEAbbrevData - Dwarf abbreviation data, describes one attribute of a
+/// Dwarf abbreviation.
+class DIEAbbrevData {
+ /// Attribute - Dwarf attribute code.
+ ///
+ dwarf::Attribute Attribute;
- //===--------------------------------------------------------------------===//
- /// DIEAbbrev - Dwarf abbreviation, describes the organization of a debug
- /// information object.
- class DIEAbbrev : public FoldingSetNode {
- /// Tag - Dwarf tag code.
- ///
- dwarf::Tag Tag;
-
- /// ChildrenFlag - Dwarf children flag.
- ///
- uint16_t ChildrenFlag;
-
- /// Unique number for node.
- ///
- unsigned Number;
-
- /// Data - Raw data bytes for abbreviation.
- ///
- SmallVector<DIEAbbrevData, 12> Data;
-
- public:
- DIEAbbrev(dwarf::Tag T, uint16_t C) : Tag(T), ChildrenFlag(C), Data() {}
-
- // Accessors.
- dwarf::Tag getTag() const { return Tag; }
- unsigned getNumber() const { return Number; }
- uint16_t getChildrenFlag() const { return ChildrenFlag; }
- const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
- void setChildrenFlag(uint16_t CF) { ChildrenFlag = CF; }
- void setNumber(unsigned N) { Number = N; }
-
- /// AddAttribute - Adds another set of attribute information to the
- /// abbreviation.
- void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
- Data.push_back(DIEAbbrevData(Attribute, Form));
- }
+ /// Form - Dwarf form code.
+ ///
+ dwarf::Form Form;
+
+public:
+ DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) : Attribute(A), Form(F) {}
+
+ // Accessors.
+ dwarf::Attribute getAttribute() const { return Attribute; }
+ dwarf::Form getForm() const { return Form; }
+
+ /// Profile - Used to gather unique data for the abbreviation folding set.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEAbbrev - Dwarf abbreviation, describes the organization of a debug
+/// information object.
+class DIEAbbrev : public FoldingSetNode {
+ /// Unique number for node.
+ ///
+ unsigned Number;
- /// Profile - Used to gather unique data for the abbreviation folding set.
- ///
- void Profile(FoldingSetNodeID &ID) const;
+ /// Tag - Dwarf tag code.
+ ///
+ dwarf::Tag Tag;
- /// Emit - Print the abbreviation using the specified asm printer.
- ///
- void Emit(AsmPrinter *AP) const;
+ /// Children - Whether or not this node has children.
+ ///
+ // This cheats a bit in all of the uses since the values in the standard
+ // are 0 and 1 for no children and children respectively.
+ bool Children;
+
+ /// Data - Raw data bytes for abbreviation.
+ ///
+ SmallVector<DIEAbbrevData, 12> Data;
+
+public:
+ DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C), Data() {}
+
+ // Accessors.
+ dwarf::Tag getTag() const { return Tag; }
+ unsigned getNumber() const { return Number; }
+ bool hasChildren() const { return Children; }
+ const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
+ void setChildrenFlag(bool hasChild) { Children = hasChild; }
+ void setNumber(unsigned N) { Number = N; }
+
+ /// AddAttribute - Adds another set of attribute information to the
+ /// abbreviation.
+ void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
+ Data.push_back(DIEAbbrevData(Attribute, Form));
+ }
+
+ /// Profile - Used to gather unique data for the abbreviation folding set.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+
+ /// Emit - Print the abbreviation using the specified asm printer.
+ ///
+ void Emit(AsmPrinter *AP) const;
#ifndef NDEBUG
- void print(raw_ostream &O);
- void dump();
+ void print(raw_ostream &O);
+ void dump();
#endif
- };
+};
- //===--------------------------------------------------------------------===//
- /// DIE - A structured debug information entry. Has an abbreviation which
- /// describes its organization.
- class DIEValue;
-
- class DIE {
- protected:
- /// Offset - Offset in debug info section.
- ///
- unsigned Offset;
-
- /// Size - Size of instance + children.
- ///
- unsigned Size;
-
- /// Abbrev - Buffer for constructing abbreviation.
- ///
- DIEAbbrev Abbrev;
-
- /// Children DIEs.
- ///
- std::vector<DIE *> Children;
-
- DIE *Parent;
-
- /// Attribute values.
- ///
- SmallVector<DIEValue*, 12> Values;
-
- public:
- explicit DIE(unsigned Tag)
- : Offset(0), Size(0), Abbrev((dwarf::Tag)Tag, dwarf::DW_CHILDREN_no),
- Parent(0) {}
- virtual ~DIE();
-
- // Accessors.
- DIEAbbrev &getAbbrev() { return Abbrev; }
- const DIEAbbrev &getAbbrev() const { return Abbrev; }
- unsigned getAbbrevNumber() const { return Abbrev.getNumber(); }
- dwarf::Tag getTag() const { return Abbrev.getTag(); }
- unsigned getOffset() const { return Offset; }
- unsigned getSize() const { return Size; }
- const std::vector<DIE *> &getChildren() const { return Children; }
- const SmallVectorImpl<DIEValue*> &getValues() const { return Values; }
- DIE *getParent() const { return Parent; }
- /// Climb up the parent chain to get the compile unit DIE this DIE belongs
- /// to.
- const DIE *getCompileUnit() const;
- /// Similar to getCompileUnit, returns null when DIE is not added to an
- /// owner yet.
- const DIE *getCompileUnitOrNull() const;
- void setOffset(unsigned O) { Offset = O; }
- void setSize(unsigned S) { Size = S; }
-
- /// addValue - Add a value and attributes to a DIE.
- ///
- void addValue(dwarf::Attribute Attribute, dwarf::Form Form,
- DIEValue *Value) {
- Abbrev.AddAttribute(Attribute, Form);
- Values.push_back(Value);
- }
+//===--------------------------------------------------------------------===//
+/// DIE - A structured debug information entry. Has an abbreviation which
+/// describes its organization.
+class DIEValue;
- /// addChild - Add a child to the DIE.
- ///
- void addChild(DIE *Child) {
- assert(!Child->getParent());
- Abbrev.setChildrenFlag(dwarf::DW_CHILDREN_yes);
- Children.push_back(Child);
- Child->Parent = this;
- }
+class DIE {
+protected:
+ /// Offset - Offset in debug info section.
+ ///
+ unsigned Offset;
+
+ /// Size - Size of instance + children.
+ ///
+ unsigned Size;
+
+ /// Abbrev - Buffer for constructing abbreviation.
+ ///
+ DIEAbbrev Abbrev;
+
+ /// Children DIEs.
+ ///
+ // This can't be a vector<DIE> because pointer validity is requirent for the
+ // Parent pointer and DIEEntry.
+ // It can't be a list<DIE> because some clients need pointer validity before
+ // the object has been added to any child list
+ // (eg: DwarfUnit::constructVariableDIE). These aren't insurmountable, but may
+ // be more convoluted than beneficial.
+ std::vector<std::unique_ptr<DIE>> Children;
+
+ DIE *Parent;
- /// findAttribute - Find a value in the DIE with the attribute given, returns NULL
- /// if no such attribute exists.
- DIEValue *findAttribute(uint16_t Attribute);
+ /// Attribute values.
+ ///
+ SmallVector<DIEValue *, 12> Values;
+
+protected:
+ DIE()
+ : Offset(0), Size(0), Abbrev((dwarf::Tag)0, dwarf::DW_CHILDREN_no),
+ Parent(nullptr) {}
+
+public:
+ explicit DIE(dwarf::Tag Tag)
+ : Offset(0), Size(0), Abbrev((dwarf::Tag)Tag, dwarf::DW_CHILDREN_no),
+ Parent(nullptr) {}
+
+ // Accessors.
+ DIEAbbrev &getAbbrev() { return Abbrev; }
+ const DIEAbbrev &getAbbrev() const { return Abbrev; }
+ unsigned getAbbrevNumber() const { return Abbrev.getNumber(); }
+ dwarf::Tag getTag() const { return Abbrev.getTag(); }
+ unsigned getOffset() const { return Offset; }
+ unsigned getSize() const { return Size; }
+ const std::vector<std::unique_ptr<DIE>> &getChildren() const {
+ return Children;
+ }
+ const SmallVectorImpl<DIEValue *> &getValues() const { return Values; }
+ DIE *getParent() const { return Parent; }
+ /// Climb up the parent chain to get the compile or type unit DIE this DIE
+ /// belongs to.
+ const DIE *getUnit() const;
+ /// Similar to getUnit, returns null when DIE is not added to an
+ /// owner yet.
+ const DIE *getUnitOrNull() const;
+ void setOffset(unsigned O) { Offset = O; }
+ void setSize(unsigned S) { Size = S; }
+
+ /// addValue - Add a value and attributes to a DIE.
+ ///
+ void addValue(dwarf::Attribute Attribute, dwarf::Form Form, DIEValue *Value) {
+ Abbrev.AddAttribute(Attribute, Form);
+ Values.push_back(Value);
+ }
+
+ /// addChild - Add a child to the DIE.
+ ///
+ void addChild(std::unique_ptr<DIE> Child) {
+ assert(!Child->getParent());
+ Abbrev.setChildrenFlag(dwarf::DW_CHILDREN_yes);
+ Child->Parent = this;
+ Children.push_back(std::move(Child));
+ }
+
+ /// findAttribute - Find a value in the DIE with the attribute given,
+ /// returns NULL if no such attribute exists.
+ DIEValue *findAttribute(dwarf::Attribute Attribute) const;
#ifndef NDEBUG
- void print(raw_ostream &O, unsigned IndentCount = 0) const;
- void dump();
+ void print(raw_ostream &O, unsigned IndentCount = 0) const;
+ void dump();
#endif
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEValue - A debug information entry value. Some of these roughly correlate
+/// to DWARF attribute classes.
+///
+class DIEValue {
+ virtual void anchor();
+
+public:
+ enum Type {
+ isInteger,
+ isString,
+ isExpr,
+ isLabel,
+ isDelta,
+ isEntry,
+ isTypeSignature,
+ isBlock,
+ isLoc,
+ isLocList,
};
- //===--------------------------------------------------------------------===//
- /// DIEValue - A debug information entry value.
- ///
- class DIEValue {
- virtual void anchor();
- public:
- enum {
- isInteger,
- isString,
- isExpr,
- isLabel,
- isDelta,
- isEntry,
- isBlock
- };
- protected:
- /// Type - Type of data stored in the value.
- ///
- unsigned Type;
- public:
- explicit DIEValue(unsigned T) : Type(T) {}
- virtual ~DIEValue() {}
-
- // Accessors
- unsigned getType() const { return Type; }
-
- /// EmitValue - Emit value via the Dwarf writer.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const = 0;
-
- /// SizeOf - Return the size of a value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const = 0;
+protected:
+ /// Ty - Type of data stored in the value.
+ ///
+ Type Ty;
+
+ explicit DIEValue(Type T) : Ty(T) {}
+ virtual ~DIEValue() {}
+
+public:
+ // Accessors
+ Type getType() const { return Ty; }
+
+ /// EmitValue - Emit value via the Dwarf writer.
+ ///
+ virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const = 0;
+
+ /// SizeOf - Return the size of a value in bytes.
+ ///
+ virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const = 0;
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const = 0;
- void dump() const;
+ virtual void print(raw_ostream &O) const = 0;
+ void dump() const;
#endif
- };
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEInteger - An integer value DIE.
+///
+class DIEInteger : public DIEValue {
+ uint64_t Integer;
- //===--------------------------------------------------------------------===//
- /// DIEInteger - An integer value DIE.
- ///
- class DIEInteger : public DIEValue {
- uint64_t Integer;
- public:
- explicit DIEInteger(uint64_t I) : DIEValue(isInteger), Integer(I) {}
-
- /// BestForm - Choose the best form for integer.
- ///
- static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
- if (IsSigned) {
- const int64_t SignedInt = Int;
- if ((char)Int == SignedInt) return dwarf::DW_FORM_data1;
- if ((short)Int == SignedInt) return dwarf::DW_FORM_data2;
- if ((int)Int == SignedInt) return dwarf::DW_FORM_data4;
- } else {
- if ((unsigned char)Int == Int) return dwarf::DW_FORM_data1;
- if ((unsigned short)Int == Int) return dwarf::DW_FORM_data2;
- if ((unsigned int)Int == Int) return dwarf::DW_FORM_data4;
- }
- return dwarf::DW_FORM_data8;
+public:
+ explicit DIEInteger(uint64_t I) : DIEValue(isInteger), Integer(I) {}
+
+ /// BestForm - Choose the best form for integer.
+ ///
+ static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
+ if (IsSigned) {
+ const int64_t SignedInt = Int;
+ if ((char)Int == SignedInt)
+ return dwarf::DW_FORM_data1;
+ if ((short)Int == SignedInt)
+ return dwarf::DW_FORM_data2;
+ if ((int)Int == SignedInt)
+ return dwarf::DW_FORM_data4;
+ } else {
+ if ((unsigned char)Int == Int)
+ return dwarf::DW_FORM_data1;
+ if ((unsigned short)Int == Int)
+ return dwarf::DW_FORM_data2;
+ if ((unsigned int)Int == Int)
+ return dwarf::DW_FORM_data4;
}
+ return dwarf::DW_FORM_data8;
+ }
- /// EmitValue - Emit integer of appropriate size.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+ /// EmitValue - Emit integer of appropriate size.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
- uint64_t getValue() const { return Integer; }
+ uint64_t getValue() const { return Integer; }
- /// SizeOf - Determine size of integer value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const;
+ /// SizeOf - Determine size of integer value in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *I) { return I->getType() == isInteger; }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *I) { return I->getType() == isInteger; }
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
#endif
- };
+};
- //===--------------------------------------------------------------------===//
- /// DIEExpr - An expression DIE.
- //
- class DIEExpr : public DIEValue {
- const MCExpr *Expr;
- public:
- explicit DIEExpr(const MCExpr *E) : DIEValue(isExpr), Expr(E) {}
+//===--------------------------------------------------------------------===//
+/// DIEExpr - An expression DIE.
+//
+class DIEExpr : public DIEValue {
+ const MCExpr *Expr;
- /// EmitValue - Emit expression value.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+public:
+ explicit DIEExpr(const MCExpr *E) : DIEValue(isExpr), Expr(E) {}
- /// getValue - Get MCExpr.
- ///
- const MCExpr *getValue() const { return Expr; }
+ /// EmitValue - Emit expression value.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
- /// SizeOf - Determine size of expression value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const;
+ /// getValue - Get MCExpr.
+ ///
+ const MCExpr *getValue() const { return Expr; }
+
+ /// SizeOf - Determine size of expression value in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *E) { return E->getType() == isExpr; }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *E) { return E->getType() == isExpr; }
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
#endif
- };
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELabel - A label DIE.
+//
+class DIELabel : public DIEValue {
+ const MCSymbol *Label;
- //===--------------------------------------------------------------------===//
- /// DIELabel - A label DIE.
- //
- class DIELabel : public DIEValue {
- const MCSymbol *Label;
- public:
- explicit DIELabel(const MCSymbol *L) : DIEValue(isLabel), Label(L) {}
+public:
+ explicit DIELabel(const MCSymbol *L) : DIEValue(isLabel), Label(L) {}
- /// EmitValue - Emit label value.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+ /// EmitValue - Emit label value.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
- /// getValue - Get MCSymbol.
- ///
- const MCSymbol *getValue() const { return Label; }
+ /// getValue - Get MCSymbol.
+ ///
+ const MCSymbol *getValue() const { return Label; }
- /// SizeOf - Determine size of label value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const;
+ /// SizeOf - Determine size of label value in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *L) { return L->getType() == isLabel; }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *L) { return L->getType() == isLabel; }
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
#endif
- };
+};
- //===--------------------------------------------------------------------===//
- /// DIEDelta - A simple label difference DIE.
- ///
- class DIEDelta : public DIEValue {
- const MCSymbol *LabelHi;
- const MCSymbol *LabelLo;
- public:
- DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo)
+//===--------------------------------------------------------------------===//
+/// DIEDelta - A simple label difference DIE.
+///
+class DIEDelta : public DIEValue {
+ const MCSymbol *LabelHi;
+ const MCSymbol *LabelLo;
+
+public:
+ DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo)
: DIEValue(isDelta), LabelHi(Hi), LabelLo(Lo) {}
- /// EmitValue - Emit delta value.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+ /// EmitValue - Emit delta value.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
- /// SizeOf - Determine size of delta value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const;
+ /// SizeOf - Determine size of delta value in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *D) { return D->getType() == isDelta; }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *D) { return D->getType() == isDelta; }
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
#endif
- };
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEString - A container for string values.
+///
+class DIEString : public DIEValue {
+ const DIEValue *Access;
+ const StringRef Str;
+
+public:
+ DIEString(const DIEValue *Acc, const StringRef S)
+ : DIEValue(isString), Access(Acc), Str(S) {}
- //===--------------------------------------------------------------------===//
- /// DIEString - A container for string values.
+ /// getString - Grab the string out of the object.
+ StringRef getString() const { return Str; }
+
+ /// EmitValue - Emit delta value.
///
- class DIEString : public DIEValue {
- const DIEValue *Access;
- const StringRef Str;
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
- public:
- DIEString(const DIEValue *Acc, const StringRef S)
- : DIEValue(isString), Access(Acc), Str(S) {}
+ /// SizeOf - Determine size of delta value in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
- /// getString - Grab the string out of the object.
- StringRef getString() const { return Str; }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *D) { return D->getType() == isString; }
- /// EmitValue - Emit delta value.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+#ifndef NDEBUG
+ void print(raw_ostream &O) const override;
+#endif
+};
- /// SizeOf - Determine size of delta value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const;
+//===--------------------------------------------------------------------===//
+/// DIEEntry - A pointer to another debug information entry. An instance of
+/// this class can also be used as a proxy for a debug information entry not
+/// yet defined (ie. types.)
+class DIEEntry : public DIEValue {
+ DIE &Entry;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *D) { return D->getType() == isString; }
+public:
+ explicit DIEEntry(DIE &E) : DIEValue(isEntry), Entry(E) {
+ }
- #ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
- #endif
- };
+ DIE &getEntry() const { return Entry; }
- //===--------------------------------------------------------------------===//
- /// DIEEntry - A pointer to another debug information entry. An instance of
- /// this class can also be used as a proxy for a debug information entry not
- /// yet defined (ie. types.)
- class DIEEntry : public DIEValue {
- DIE *const Entry;
- public:
- explicit DIEEntry(DIE *E) : DIEValue(isEntry), Entry(E) {
- assert(E && "Cannot construct a DIEEntry with a null DIE");
- }
+ /// EmitValue - Emit debug information entry offset.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
- DIE *getEntry() const { return Entry; }
+ /// SizeOf - Determine size of debug information entry in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override {
+ return Form == dwarf::DW_FORM_ref_addr ? getRefAddrSize(AP)
+ : sizeof(int32_t);
+ }
- /// EmitValue - Emit debug information entry offset.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+ /// Returns size of a ref_addr entry.
+ static unsigned getRefAddrSize(AsmPrinter *AP);
- /// SizeOf - Determine size of debug information entry in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const {
- return Form == dwarf::DW_FORM_ref_addr ? getRefAddrSize(AP)
- : sizeof(int32_t);
- }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *E) { return E->getType() == isEntry; }
+
+#ifndef NDEBUG
+ void print(raw_ostream &O) const override;
+#endif
+};
+
+//===--------------------------------------------------------------------===//
+/// \brief A signature reference to a type unit.
+class DIETypeSignature : public DIEValue {
+ const DwarfTypeUnit &Unit;
+
+public:
+ explicit DIETypeSignature(const DwarfTypeUnit &Unit)
+ : DIEValue(isTypeSignature), Unit(Unit) {}
+
+ /// \brief Emit type unit signature.
+ void EmitValue(AsmPrinter *Asm, dwarf::Form Form) const override;
+
+ /// Returns size of a ref_sig8 entry.
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override {
+ assert(Form == dwarf::DW_FORM_ref_sig8);
+ return 8;
+ }
+
+ // \brief Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *E) {
+ return E->getType() == isTypeSignature;
+ }
+#ifndef NDEBUG
+ void print(raw_ostream &O) const override;
+ void dump() const;
+#endif
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELoc - Represents an expression location.
+//
+class DIELoc : public DIEValue, public DIE {
+ mutable unsigned Size; // Size in bytes excluding size header.
+public:
+ DIELoc() : DIEValue(isLoc), Size(0) {}
- /// Returns size of a ref_addr entry.
- static unsigned getRefAddrSize(AsmPrinter *AP);
+ /// ComputeSize - Calculate the size of the location expression.
+ ///
+ unsigned ComputeSize(AsmPrinter *AP) const;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *E) { return E->getType() == isEntry; }
+ /// BestForm - Choose the best form for data.
+ ///
+ dwarf::Form BestForm(unsigned DwarfVersion) const {
+ if (DwarfVersion > 3)
+ return dwarf::DW_FORM_exprloc;
+ // Pre-DWARF4 location expressions were blocks and not exprloc.
+ if ((unsigned char)Size == Size)
+ return dwarf::DW_FORM_block1;
+ if ((unsigned short)Size == Size)
+ return dwarf::DW_FORM_block2;
+ if ((unsigned int)Size == Size)
+ return dwarf::DW_FORM_block4;
+ return dwarf::DW_FORM_block;
+ }
+
+ /// EmitValue - Emit location data.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
+
+ /// SizeOf - Determine size of location data in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
+
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *E) { return E->getType() == isLoc; }
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
#endif
- };
+};
- //===--------------------------------------------------------------------===//
- /// DIEBlock - A block of values. Primarily used for location expressions.
- //
- class DIEBlock : public DIEValue, public DIE {
- unsigned Size; // Size in bytes excluding size header.
- public:
- DIEBlock() : DIEValue(isBlock), DIE(0), Size(0) {}
-
- /// ComputeSize - calculate the size of the block.
- ///
- unsigned ComputeSize(AsmPrinter *AP);
-
- /// BestForm - Choose the best form for data.
- ///
- dwarf::Form BestForm() const {
- if ((unsigned char)Size == Size) return dwarf::DW_FORM_block1;
- if ((unsigned short)Size == Size) return dwarf::DW_FORM_block2;
- if ((unsigned int)Size == Size) return dwarf::DW_FORM_block4;
- return dwarf::DW_FORM_block;
- }
+//===--------------------------------------------------------------------===//
+/// DIEBlock - Represents a block of values.
+//
+class DIEBlock : public DIEValue, public DIE {
+ mutable unsigned Size; // Size in bytes excluding size header.
+public:
+ DIEBlock() : DIEValue(isBlock), Size(0) {}
- /// EmitValue - Emit block data.
- ///
- virtual void EmitValue(AsmPrinter *AP, dwarf::Form Form) const;
+ /// ComputeSize - Calculate the size of the location expression.
+ ///
+ unsigned ComputeSize(AsmPrinter *AP) const;
- /// SizeOf - Determine size of block data in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const;
+ /// BestForm - Choose the best form for data.
+ ///
+ dwarf::Form BestForm() const {
+ if ((unsigned char)Size == Size)
+ return dwarf::DW_FORM_block1;
+ if ((unsigned short)Size == Size)
+ return dwarf::DW_FORM_block2;
+ if ((unsigned int)Size == Size)
+ return dwarf::DW_FORM_block4;
+ return dwarf::DW_FORM_block;
+ }
+
+ /// EmitValue - Emit location data.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
+
+ /// SizeOf - Determine size of location data in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *E) { return E->getType() == isBlock; }
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *E) { return E->getType() == isBlock; }
#ifndef NDEBUG
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
#endif
- };
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELocList - Represents a pointer to a location list in the debug_loc
+/// section.
+//
+class DIELocList : public DIEValue {
+ // Index into the .debug_loc vector.
+ size_t Index;
+
+public:
+ DIELocList(size_t I) : DIEValue(isLocList), Index(I) {}
+
+ /// getValue - Grab the current index out.
+ size_t getValue() const { return Index; }
+
+ /// EmitValue - Emit location data.
+ ///
+ void EmitValue(AsmPrinter *AP, dwarf::Form Form) const override;
+
+ /// SizeOf - Determine size of location data in bytes.
+ ///
+ unsigned SizeOf(AsmPrinter *AP, dwarf::Form Form) const override;
+
+ // Implement isa/cast/dyncast.
+ static bool classof(const DIEValue *E) { return E->getType() == isLocList; }
+
+#ifndef NDEBUG
+ void print(raw_ostream &O) const override;
+#endif
+};
} // end llvm namespace
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp
index 95eca90..c2fad59 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp
@@ -11,14 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dwarfdebug"
-
+#include "ByteStreamer.h"
#include "DIEHash.h"
-
#include "DIE.h"
-#include "DwarfCompileUnit.h"
+#include "DwarfDebug.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/Endian.h"
@@ -27,6 +26,8 @@
using namespace llvm;
+#define DEBUG_TYPE "dwarfdebug"
+
/// \brief Grabs the string in whichever attribute is passed in and returns
/// a reference to it.
static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) {
@@ -75,7 +76,7 @@ void DIEHash::addSLEB128(int64_t Value) {
do {
uint8_t Byte = Value & 0x7f;
Value >>= 7;
- More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
+ More = !((((Value == 0) && ((Byte & 0x40) == 0)) ||
((Value == -1) && ((Byte & 0x40) != 0))));
if (More)
Byte |= 0x80; // Mark this byte to show that more bytes will follow.
@@ -92,10 +93,12 @@ void DIEHash::addParentContext(const DIE &Parent) {
// outermost such construct...
SmallVector<const DIE *, 1> Parents;
const DIE *Cur = &Parent;
- while (Cur->getTag() != dwarf::DW_TAG_compile_unit) {
+ while (Cur->getParent()) {
Parents.push_back(Cur);
Cur = Cur->getParent();
}
+ assert(Cur->getTag() == dwarf::DW_TAG_compile_unit ||
+ Cur->getTag() == dwarf::DW_TAG_type_unit);
// Reverse iterate over our list to go from the outermost construct to the
// innermost.
@@ -134,55 +137,55 @@ void DIEHash::collectAttributes(const DIE &Die, DIEAttrs &Attrs) {
<< dwarf::AttributeString(Abbrevs.getData()[i].getAttribute())
<< " added.\n");
switch (Abbrevs.getData()[i].getAttribute()) {
- COLLECT_ATTR(DW_AT_name);
- COLLECT_ATTR(DW_AT_accessibility);
- COLLECT_ATTR(DW_AT_address_class);
- COLLECT_ATTR(DW_AT_allocated);
- COLLECT_ATTR(DW_AT_artificial);
- COLLECT_ATTR(DW_AT_associated);
- COLLECT_ATTR(DW_AT_binary_scale);
- COLLECT_ATTR(DW_AT_bit_offset);
- COLLECT_ATTR(DW_AT_bit_size);
- COLLECT_ATTR(DW_AT_bit_stride);
- COLLECT_ATTR(DW_AT_byte_size);
- COLLECT_ATTR(DW_AT_byte_stride);
- COLLECT_ATTR(DW_AT_const_expr);
- COLLECT_ATTR(DW_AT_const_value);
- COLLECT_ATTR(DW_AT_containing_type);
- COLLECT_ATTR(DW_AT_count);
- COLLECT_ATTR(DW_AT_data_bit_offset);
- COLLECT_ATTR(DW_AT_data_location);
- COLLECT_ATTR(DW_AT_data_member_location);
- COLLECT_ATTR(DW_AT_decimal_scale);
- COLLECT_ATTR(DW_AT_decimal_sign);
- COLLECT_ATTR(DW_AT_default_value);
- COLLECT_ATTR(DW_AT_digit_count);
- COLLECT_ATTR(DW_AT_discr);
- COLLECT_ATTR(DW_AT_discr_list);
- COLLECT_ATTR(DW_AT_discr_value);
- COLLECT_ATTR(DW_AT_encoding);
- COLLECT_ATTR(DW_AT_enum_class);
- COLLECT_ATTR(DW_AT_endianity);
- COLLECT_ATTR(DW_AT_explicit);
- COLLECT_ATTR(DW_AT_is_optional);
- COLLECT_ATTR(DW_AT_location);
- COLLECT_ATTR(DW_AT_lower_bound);
- COLLECT_ATTR(DW_AT_mutable);
- COLLECT_ATTR(DW_AT_ordering);
- COLLECT_ATTR(DW_AT_picture_string);
- COLLECT_ATTR(DW_AT_prototyped);
- COLLECT_ATTR(DW_AT_small);
- COLLECT_ATTR(DW_AT_segment);
- COLLECT_ATTR(DW_AT_string_length);
- COLLECT_ATTR(DW_AT_threads_scaled);
- COLLECT_ATTR(DW_AT_upper_bound);
- COLLECT_ATTR(DW_AT_use_location);
- COLLECT_ATTR(DW_AT_use_UTF8);
- COLLECT_ATTR(DW_AT_variable_parameter);
- COLLECT_ATTR(DW_AT_virtuality);
- COLLECT_ATTR(DW_AT_visibility);
- COLLECT_ATTR(DW_AT_vtable_elem_location);
- COLLECT_ATTR(DW_AT_type);
+ COLLECT_ATTR(DW_AT_name);
+ COLLECT_ATTR(DW_AT_accessibility);
+ COLLECT_ATTR(DW_AT_address_class);
+ COLLECT_ATTR(DW_AT_allocated);
+ COLLECT_ATTR(DW_AT_artificial);
+ COLLECT_ATTR(DW_AT_associated);
+ COLLECT_ATTR(DW_AT_binary_scale);
+ COLLECT_ATTR(DW_AT_bit_offset);
+ COLLECT_ATTR(DW_AT_bit_size);
+ COLLECT_ATTR(DW_AT_bit_stride);
+ COLLECT_ATTR(DW_AT_byte_size);
+ COLLECT_ATTR(DW_AT_byte_stride);
+ COLLECT_ATTR(DW_AT_const_expr);
+ COLLECT_ATTR(DW_AT_const_value);
+ COLLECT_ATTR(DW_AT_containing_type);
+ COLLECT_ATTR(DW_AT_count);
+ COLLECT_ATTR(DW_AT_data_bit_offset);
+ COLLECT_ATTR(DW_AT_data_location);
+ COLLECT_ATTR(DW_AT_data_member_location);
+ COLLECT_ATTR(DW_AT_decimal_scale);
+ COLLECT_ATTR(DW_AT_decimal_sign);
+ COLLECT_ATTR(DW_AT_default_value);
+ COLLECT_ATTR(DW_AT_digit_count);
+ COLLECT_ATTR(DW_AT_discr);
+ COLLECT_ATTR(DW_AT_discr_list);
+ COLLECT_ATTR(DW_AT_discr_value);
+ COLLECT_ATTR(DW_AT_encoding);
+ COLLECT_ATTR(DW_AT_enum_class);
+ COLLECT_ATTR(DW_AT_endianity);
+ COLLECT_ATTR(DW_AT_explicit);
+ COLLECT_ATTR(DW_AT_is_optional);
+ COLLECT_ATTR(DW_AT_location);
+ COLLECT_ATTR(DW_AT_lower_bound);
+ COLLECT_ATTR(DW_AT_mutable);
+ COLLECT_ATTR(DW_AT_ordering);
+ COLLECT_ATTR(DW_AT_picture_string);
+ COLLECT_ATTR(DW_AT_prototyped);
+ COLLECT_ATTR(DW_AT_small);
+ COLLECT_ATTR(DW_AT_segment);
+ COLLECT_ATTR(DW_AT_string_length);
+ COLLECT_ATTR(DW_AT_threads_scaled);
+ COLLECT_ATTR(DW_AT_upper_bound);
+ COLLECT_ATTR(DW_AT_use_location);
+ COLLECT_ATTR(DW_AT_use_UTF8);
+ COLLECT_ATTR(DW_AT_variable_parameter);
+ COLLECT_ATTR(DW_AT_virtuality);
+ COLLECT_ATTR(DW_AT_visibility);
+ COLLECT_ATTR(DW_AT_vtable_elem_location);
+ COLLECT_ATTR(DW_AT_type);
default:
break;
}
@@ -269,6 +272,24 @@ void DIEHash::hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag,
computeHash(Entry);
}
+// Hash all of the values in a block like set of values. This assumes that
+// all of the data is going to be added as integers.
+void DIEHash::hashBlockData(const SmallVectorImpl<DIEValue *> &Values) {
+ for (SmallVectorImpl<DIEValue *>::const_iterator I = Values.begin(),
+ E = Values.end();
+ I != E; ++I)
+ Hash.update((uint64_t)cast<DIEInteger>(*I)->getValue());
+}
+
+// Hash the contents of a loclistptr class.
+void DIEHash::hashLocList(const DIELocList &LocList) {
+ HashingByteStreamer Streamer(*this);
+ DwarfDebug &DD = *AP->getDwarfDebug();
+ for (const auto &Entry :
+ DD.getDebugLocEntries()[LocList.getValue()].List)
+ DD.emitDebugLocEntry(Streamer, Entry);
+}
+
// Hash an individual attribute \param Attr based on the type of attribute and
// the form.
void DIEHash::hashAttribute(AttrEntry Attr, dwarf::Tag Tag) {
@@ -276,43 +297,76 @@ void DIEHash::hashAttribute(AttrEntry Attr, dwarf::Tag Tag) {
const DIEAbbrevData *Desc = Attr.Desc;
dwarf::Attribute Attribute = Desc->getAttribute();
- // 7.27 Step 3
- // ... An attribute that refers to another type entry T is processed as
- // follows:
- if (const DIEEntry *EntryAttr = dyn_cast<DIEEntry>(Value)) {
- hashDIEEntry(Attribute, Tag, *EntryAttr->getEntry());
- return;
+ // Other attribute values use the letter 'A' as the marker, and the value
+ // consists of the form code (encoded as an unsigned LEB128 value) followed by
+ // the encoding of the value according to the form code. To ensure
+ // reproducibility of the signature, the set of forms used in the signature
+ // computation is limited to the following: DW_FORM_sdata, DW_FORM_flag,
+ // DW_FORM_string, and DW_FORM_block.
+
+ switch (Value->getType()) {
+ // 7.27 Step 3
+ // ... An attribute that refers to another type entry T is processed as
+ // follows:
+ case DIEValue::isEntry:
+ hashDIEEntry(Attribute, Tag, cast<DIEEntry>(Value)->getEntry());
+ break;
+ case DIEValue::isInteger: {
+ addULEB128('A');
+ addULEB128(Attribute);
+ switch (Desc->getForm()) {
+ case dwarf::DW_FORM_data1:
+ case dwarf::DW_FORM_data2:
+ case dwarf::DW_FORM_data4:
+ case dwarf::DW_FORM_data8:
+ case dwarf::DW_FORM_udata:
+ case dwarf::DW_FORM_sdata:
+ addULEB128(dwarf::DW_FORM_sdata);
+ addSLEB128((int64_t)cast<DIEInteger>(Value)->getValue());
+ break;
+ // DW_FORM_flag_present is just flag with a value of one. We still give it a
+ // value so just use the value.
+ case dwarf::DW_FORM_flag_present:
+ case dwarf::DW_FORM_flag:
+ addULEB128(dwarf::DW_FORM_flag);
+ addULEB128((int64_t)cast<DIEInteger>(Value)->getValue());
+ break;
+ default:
+ llvm_unreachable("Unknown integer form!");
+ }
+ break;
}
-
- // Other attribute values use the letter 'A' as the marker, ...
- addULEB128('A');
-
- addULEB128(Attribute);
-
- // ... and the value consists of the form code (encoded as an unsigned LEB128
- // value) followed by the encoding of the value according to the form code. To
- // ensure reproducibility of the signature, the set of forms used in the
- // signature computation is limited to the following: DW_FORM_sdata,
- // DW_FORM_flag, DW_FORM_string, and DW_FORM_block.
- switch (Desc->getForm()) {
- case dwarf::DW_FORM_string:
- llvm_unreachable(
- "Add support for DW_FORM_string if we ever start emitting them again");
- case dwarf::DW_FORM_GNU_str_index:
- case dwarf::DW_FORM_strp:
+ case DIEValue::isString:
+ addULEB128('A');
+ addULEB128(Attribute);
addULEB128(dwarf::DW_FORM_string);
addString(cast<DIEString>(Value)->getString());
break;
- case dwarf::DW_FORM_data1:
- case dwarf::DW_FORM_data2:
- case dwarf::DW_FORM_data4:
- case dwarf::DW_FORM_data8:
- case dwarf::DW_FORM_udata:
- addULEB128(dwarf::DW_FORM_sdata);
- addSLEB128((int64_t)cast<DIEInteger>(Value)->getValue());
+ case DIEValue::isBlock:
+ case DIEValue::isLoc:
+ case DIEValue::isLocList:
+ addULEB128('A');
+ addULEB128(Attribute);
+ addULEB128(dwarf::DW_FORM_block);
+ if (isa<DIEBlock>(Value)) {
+ addULEB128(cast<DIEBlock>(Value)->ComputeSize(AP));
+ hashBlockData(cast<DIEBlock>(Value)->getValues());
+ } else if (isa<DIELoc>(Value)) {
+ addULEB128(cast<DIELoc>(Value)->ComputeSize(AP));
+ hashBlockData(cast<DIELoc>(Value)->getValues());
+ } else {
+ // We could add the block length, but that would take
+ // a bit of work and not add a lot of uniqueness
+ // to the hash in some way we could test.
+ hashLocList(*cast<DIELocList>(Value));
+ }
break;
- default:
- llvm_unreachable("Add support for additional forms");
+ // FIXME: It's uncertain whether or not we should handle this at the moment.
+ case DIEValue::isExpr:
+ case DIEValue::isLabel:
+ case DIEValue::isDelta:
+ case DIEValue::isTypeSignature:
+ llvm_unreachable("Add support for additional value types.");
}
}
@@ -409,20 +463,18 @@ void DIEHash::computeHash(const DIE &Die) {
addAttributes(Die);
// Then hash each of the children of the DIE.
- for (std::vector<DIE *>::const_iterator I = Die.getChildren().begin(),
- E = Die.getChildren().end();
- I != E; ++I) {
+ for (auto &C : Die.getChildren()) {
// 7.27 Step 7
// If C is a nested type entry or a member function entry, ...
- if (isType((*I)->getTag()) || (*I)->getTag() == dwarf::DW_TAG_subprogram) {
- StringRef Name = getDIEStringAttr(**I, dwarf::DW_AT_name);
+ if (isType(C->getTag()) || C->getTag() == dwarf::DW_TAG_subprogram) {
+ StringRef Name = getDIEStringAttr(*C, dwarf::DW_AT_name);
// ... and has a DW_AT_name attribute
if (!Name.empty()) {
- hashNestedType(**I, Name);
+ hashNestedType(*C, Name);
continue;
}
}
- computeHash(**I);
+ computeHash(*C);
}
// Following the last (or if there are no children), append a zero byte.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.h
index f0c4ef9..175d660 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIEHash.h
@@ -11,17 +11,22 @@
//
//===----------------------------------------------------------------------===//
+#ifndef CODEGEN_ASMPRINTER_DIEHASH_H__
+#define CODEGEN_ASMPRINTER_DIEHASH_H__
+
#include "DIE.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/MD5.h"
namespace llvm {
+class AsmPrinter;
class CompileUnit;
/// \brief An object containing the capability of hashing and adding hash
/// attributes onto a DIE.
class DIEHash {
+
// The entry for a particular attribute.
struct AttrEntry {
const DIEValue *Val;
@@ -84,6 +89,8 @@ class DIEHash {
};
public:
+ DIEHash(AsmPrinter *A = nullptr) : AP(A) {}
+
/// \brief Computes the ODR signature.
uint64_t computeDIEODRSignature(const DIE &Die);
@@ -105,13 +112,17 @@ private:
void computeHash(const DIE &Die);
// Routines that add DIEValues to the hash.
-private:
+public:
+ /// \brief Adds \param Value to the hash.
+ void update(uint8_t Value) { Hash.update(Value); }
+
/// \brief Encodes and adds \param Value to the hash as a ULEB128.
void addULEB128(uint64_t Value);
/// \brief Encodes and adds \param Value to the hash as a SLEB128.
void addSLEB128(int64_t Value);
+private:
/// \brief Adds \param Str to the hash and includes a NULL byte.
void addString(StringRef Str);
@@ -122,6 +133,13 @@ private:
/// \brief Hashes the attributes in \param Attrs in order.
void hashAttributes(const DIEAttrs &Attrs, dwarf::Tag Tag);
+ /// \brief Hashes the data in a block like DIEValue, e.g. DW_FORM_block or
+ /// DW_FORM_exprloc.
+ void hashBlockData(const SmallVectorImpl<DIEValue *> &Values);
+
+ /// \brief Hashes the contents pointed to in the .debug_loc section.
+ void hashLocList(const DIELocList &LocList);
+
/// \brief Hashes an individual attribute.
void hashAttribute(AttrEntry Attr, dwarf::Tag Tag);
@@ -136,12 +154,16 @@ private:
StringRef Name);
/// \brief Hashes a reference to a previously referenced type DIE.
- void hashRepeatedTypeReference(dwarf::Attribute Attribute, unsigned DieNumber);
+ void hashRepeatedTypeReference(dwarf::Attribute Attribute,
+ unsigned DieNumber);
void hashNestedType(const DIE &Die, StringRef Name);
private:
MD5 Hash;
+ AsmPrinter *AP;
DenseMap<const DIE *, unsigned> Numbering;
};
}
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
new file mode 100644
index 0000000..a66d08e
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
@@ -0,0 +1,206 @@
+//===-- llvm/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DbgValueHistoryCalculator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <algorithm>
+#include <map>
+#include <set>
+
+#define DEBUG_TYPE "dwarfdebug"
+
+namespace llvm {
+
+// \brief If @MI is a DBG_VALUE with debug value described by a
+// defined register, returns the number of this register.
+// In the other case, returns 0.
+static unsigned isDescribedByReg(const MachineInstr &MI) {
+ assert(MI.isDebugValue());
+ assert(MI.getNumOperands() == 3);
+ // If location of variable is described using a register (directly or
+ // indirecltly), this register is always a first operand.
+ return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
+}
+
+void DbgValueHistoryMap::startInstrRange(const MDNode *Var,
+ const MachineInstr &MI) {
+ // Instruction range should start with a DBG_VALUE instruction for the
+ // variable.
+ assert(MI.isDebugValue() && MI.getDebugVariable() == Var);
+ auto &Ranges = VarInstrRanges[Var];
+ if (!Ranges.empty() && Ranges.back().second == nullptr &&
+ Ranges.back().first->isIdenticalTo(&MI)) {
+ DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
+ << "\t" << Ranges.back().first << "\t" << MI << "\n");
+ return;
+ }
+ Ranges.push_back(std::make_pair(&MI, nullptr));
+}
+
+void DbgValueHistoryMap::endInstrRange(const MDNode *Var,
+ const MachineInstr &MI) {
+ auto &Ranges = VarInstrRanges[Var];
+ // Verify that the current instruction range is not yet closed.
+ assert(!Ranges.empty() && Ranges.back().second == nullptr);
+ // For now, instruction ranges are not allowed to cross basic block
+ // boundaries.
+ assert(Ranges.back().first->getParent() == MI.getParent());
+ Ranges.back().second = &MI;
+}
+
+unsigned DbgValueHistoryMap::getRegisterForVar(const MDNode *Var) const {
+ const auto &I = VarInstrRanges.find(Var);
+ if (I == VarInstrRanges.end())
+ return 0;
+ const auto &Ranges = I->second;
+ if (Ranges.empty() || Ranges.back().second != nullptr)
+ return 0;
+ return isDescribedByReg(*Ranges.back().first);
+}
+
+namespace {
+// Maps physreg numbers to the variables they describe.
+typedef std::map<unsigned, SmallVector<const MDNode *, 1>> RegDescribedVarsMap;
+}
+
+// \brief Claim that @Var is not described by @RegNo anymore.
+static void dropRegDescribedVar(RegDescribedVarsMap &RegVars,
+ unsigned RegNo, const MDNode *Var) {
+ const auto &I = RegVars.find(RegNo);
+ assert(RegNo != 0U && I != RegVars.end());
+ auto &VarSet = I->second;
+ const auto &VarPos = std::find(VarSet.begin(), VarSet.end(), Var);
+ assert(VarPos != VarSet.end());
+ VarSet.erase(VarPos);
+ // Don't keep empty sets in a map to keep it as small as possible.
+ if (VarSet.empty())
+ RegVars.erase(I);
+}
+
+// \brief Claim that @Var is now described by @RegNo.
+static void addRegDescribedVar(RegDescribedVarsMap &RegVars,
+ unsigned RegNo, const MDNode *Var) {
+ assert(RegNo != 0U);
+ auto &VarSet = RegVars[RegNo];
+ assert(std::find(VarSet.begin(), VarSet.end(), Var) == VarSet.end());
+ VarSet.push_back(Var);
+}
+
+// \brief Terminate the location range for variables described by register
+// @RegNo by inserting @ClobberingInstr to their history.
+static void clobberRegisterUses(RegDescribedVarsMap &RegVars, unsigned RegNo,
+ DbgValueHistoryMap &HistMap,
+ const MachineInstr &ClobberingInstr) {
+ const auto &I = RegVars.find(RegNo);
+ if (I == RegVars.end())
+ return;
+ // Iterate over all variables described by this register and add this
+ // instruction to their history, clobbering it.
+ for (const auto &Var : I->second)
+ HistMap.endInstrRange(Var, ClobberingInstr);
+ RegVars.erase(I);
+}
+
+// \brief Collect all registers clobbered by @MI and insert them to @Regs.
+static void collectClobberedRegisters(const MachineInstr &MI,
+ const TargetRegisterInfo *TRI,
+ std::set<unsigned> &Regs) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg())
+ continue;
+ for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
+ Regs.insert(*AI);
+ }
+}
+
+// \brief Returns the first instruction in @MBB which corresponds to
+// the function epilogue, or nullptr if @MBB doesn't contain an epilogue.
+static const MachineInstr *getFirstEpilogueInst(const MachineBasicBlock &MBB) {
+ auto LastMI = MBB.getLastNonDebugInstr();
+ if (LastMI == MBB.end() || !LastMI->isReturn())
+ return nullptr;
+ // Assume that epilogue starts with instruction having the same debug location
+ // as the return instruction.
+ DebugLoc LastLoc = LastMI->getDebugLoc();
+ auto Res = LastMI;
+ for (MachineBasicBlock::const_reverse_iterator I(std::next(LastMI)); I != MBB.rend();
+ ++I) {
+ if (I->getDebugLoc() != LastLoc)
+ return Res;
+ Res = std::prev(I.base());
+ }
+ // If all instructions have the same debug location, assume whole MBB is
+ // an epilogue.
+ return MBB.begin();
+}
+
+// \brief Collect registers that are modified in the function body (their
+// contents is changed only in the prologue and epilogue).
+static void collectChangingRegs(const MachineFunction *MF,
+ const TargetRegisterInfo *TRI,
+ std::set<unsigned> &Regs) {
+ for (const auto &MBB : *MF) {
+ auto FirstEpilogueInst = getFirstEpilogueInst(MBB);
+ bool IsInEpilogue = false;
+ for (const auto &MI : MBB) {
+ IsInEpilogue |= &MI == FirstEpilogueInst;
+ if (!MI.getFlag(MachineInstr::FrameSetup) && !IsInEpilogue)
+ collectClobberedRegisters(MI, TRI, Regs);
+ }
+ }
+}
+
+void calculateDbgValueHistory(const MachineFunction *MF,
+ const TargetRegisterInfo *TRI,
+ DbgValueHistoryMap &Result) {
+ std::set<unsigned> ChangingRegs;
+ collectChangingRegs(MF, TRI, ChangingRegs);
+
+ RegDescribedVarsMap RegVars;
+ for (const auto &MBB : *MF) {
+ for (const auto &MI : MBB) {
+ if (!MI.isDebugValue()) {
+ // Not a DBG_VALUE instruction. It may clobber registers which describe
+ // some variables.
+ std::set<unsigned> MIClobberedRegs;
+ collectClobberedRegisters(MI, TRI, MIClobberedRegs);
+ for (unsigned RegNo : MIClobberedRegs) {
+ if (ChangingRegs.count(RegNo))
+ clobberRegisterUses(RegVars, RegNo, Result, MI);
+ }
+ continue;
+ }
+
+ assert(MI.getNumOperands() > 1 && "Invalid DBG_VALUE instruction!");
+ const MDNode *Var = MI.getDebugVariable();
+
+ if (unsigned PrevReg = Result.getRegisterForVar(Var))
+ dropRegDescribedVar(RegVars, PrevReg, Var);
+
+ Result.startInstrRange(Var, MI);
+
+ if (unsigned NewReg = isDescribedByReg(MI))
+ addRegDescribedVar(RegVars, NewReg, Var);
+ }
+
+ // Make sure locations for register-described variables are valid only
+ // until the end of the basic block (unless it's the last basic block, in
+ // which case let their liveness run off to the end of the function).
+ if (!MBB.empty() && &MBB != &MF->back()) {
+ for (unsigned RegNo : ChangingRegs)
+ clobberRegisterUses(RegVars, RegNo, Result, MBB.back());
+ }
+ }
+}
+
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
new file mode 100644
index 0000000..b9177f0
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
@@ -0,0 +1,54 @@
+//===-- llvm/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H_
+#define CODEGEN_ASMPRINTER_DBGVALUEHISTORYCALCULATOR_H_
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class MDNode;
+class TargetRegisterInfo;
+
+// For each user variable, keep a list of instruction ranges where this variable
+// is accessible. The variables are listed in order of appearance.
+class DbgValueHistoryMap {
+ // Each instruction range starts with a DBG_VALUE instruction, specifying the
+ // location of a variable, which is assumed to be valid until the end of the
+ // range. If end is not specified, location is valid until the start
+ // instruction of the next instruction range, or until the end of the
+ // function.
+ typedef std::pair<const MachineInstr *, const MachineInstr *> InstrRange;
+ typedef SmallVector<InstrRange, 4> InstrRanges;
+ typedef MapVector<const MDNode *, InstrRanges> InstrRangesMap;
+ InstrRangesMap VarInstrRanges;
+
+public:
+ void startInstrRange(const MDNode *Var, const MachineInstr &MI);
+ void endInstrRange(const MDNode *Var, const MachineInstr &MI);
+ // Returns register currently describing @Var. If @Var is currently
+ // unaccessible or is not described by a register, returns 0.
+ unsigned getRegisterForVar(const MDNode *Var) const;
+
+ bool empty() const { return VarInstrRanges.empty(); }
+ void clear() { VarInstrRanges.clear(); }
+ InstrRangesMap::const_iterator begin() const { return VarInstrRanges.begin(); }
+ InstrRangesMap::const_iterator end() const { return VarInstrRanges.end(); }
+};
+
+void calculateDbgValueHistory(const MachineFunction *MF,
+ const TargetRegisterInfo *TRI,
+ DbgValueHistoryMap &Result);
+}
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h
new file mode 100644
index 0000000..3beb799
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h
@@ -0,0 +1,123 @@
+//===-- llvm/CodeGen/DebugLocEntry.h - Entry in debug_loc list -*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_DEBUGLOCENTRY_H__
+#define CODEGEN_ASMPRINTER_DEBUGLOCENTRY_H__
+#include "llvm/IR/Constants.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class DwarfCompileUnit;
+class MDNode;
+/// \brief This struct describes location entries emitted in the .debug_loc
+/// section.
+class DebugLocEntry {
+ // Begin and end symbols for the address range that this location is valid.
+ const MCSymbol *Begin;
+ const MCSymbol *End;
+
+public:
+ /// A single location or constant.
+ struct Value {
+ Value(const MDNode *Var, int64_t i)
+ : Variable(Var), EntryKind(E_Integer) {
+ Constant.Int = i;
+ }
+ Value(const MDNode *Var, const ConstantFP *CFP)
+ : Variable(Var), EntryKind(E_ConstantFP) {
+ Constant.CFP = CFP;
+ }
+ Value(const MDNode *Var, const ConstantInt *CIP)
+ : Variable(Var), EntryKind(E_ConstantInt) {
+ Constant.CIP = CIP;
+ }
+ Value(const MDNode *Var, MachineLocation Loc)
+ : Variable(Var), EntryKind(E_Location), Loc(Loc) {
+ }
+
+ // The variable to which this location entry corresponds.
+ const MDNode *Variable;
+
+ // Type of entry that this represents.
+ enum EntryType { E_Location, E_Integer, E_ConstantFP, E_ConstantInt };
+ enum EntryType EntryKind;
+
+ // Either a constant,
+ union {
+ int64_t Int;
+ const ConstantFP *CFP;
+ const ConstantInt *CIP;
+ } Constant;
+
+ // Or a location in the machine frame.
+ MachineLocation Loc;
+
+ bool operator==(const Value &other) const {
+ if (EntryKind != other.EntryKind)
+ return false;
+
+ switch (EntryKind) {
+ case E_Location:
+ return Loc == other.Loc;
+ case E_Integer:
+ return Constant.Int == other.Constant.Int;
+ case E_ConstantFP:
+ return Constant.CFP == other.Constant.CFP;
+ case E_ConstantInt:
+ return Constant.CIP == other.Constant.CIP;
+ }
+ llvm_unreachable("unhandled EntryKind");
+ }
+
+ bool isLocation() const { return EntryKind == E_Location; }
+ bool isInt() const { return EntryKind == E_Integer; }
+ bool isConstantFP() const { return EntryKind == E_ConstantFP; }
+ bool isConstantInt() const { return EntryKind == E_ConstantInt; }
+ int64_t getInt() const { return Constant.Int; }
+ const ConstantFP *getConstantFP() const { return Constant.CFP; }
+ const ConstantInt *getConstantInt() const { return Constant.CIP; }
+ MachineLocation getLoc() const { return Loc; }
+ const MDNode *getVariable() const { return Variable; }
+ };
+private:
+ /// A list of locations/constants belonging to this entry.
+ SmallVector<Value, 1> Values;
+
+ /// The compile unit that this location entry is referenced by.
+ const DwarfCompileUnit *Unit;
+
+public:
+ DebugLocEntry() : Begin(nullptr), End(nullptr), Unit(nullptr) {}
+ DebugLocEntry(const MCSymbol *B, const MCSymbol *E,
+ Value Val, const DwarfCompileUnit *U)
+ : Begin(B), End(E), Unit(U) {
+ Values.push_back(std::move(Val));
+ }
+
+ /// \brief Attempt to merge this DebugLocEntry with Next and return
+ /// true if the merge was successful. Entries can be merged if they
+ /// share the same Loc/Constant and if Next immediately follows this
+ /// Entry.
+ bool Merge(const DebugLocEntry &Next) {
+ if ((End == Next.Begin && Values == Next.Values)) {
+ End = Next.End;
+ return true;
+ }
+ return false;
+ }
+ const MCSymbol *getBeginSym() const { return Begin; }
+ const MCSymbol *getEndSym() const { return End; }
+ const DwarfCompileUnit *getCU() const { return Unit; }
+ const ArrayRef<Value> getValues() const { return Values; }
+ void addValue(Value Val) { Values.push_back(Val); }
+};
+
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocList.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocList.h
new file mode 100644
index 0000000..7a51c7b
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DebugLocList.h
@@ -0,0 +1,23 @@
+//===--- lib/CodeGen/DebugLocList.h - DWARF debug_loc list ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_DEBUGLOCLIST_H__
+#define CODEGEN_ASMPRINTER_DEBUGLOCLIST_H__
+
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/ADT/SmallVector.h"
+#include "DebugLocEntry.h"
+
+namespace llvm {
+struct DebugLocList {
+ MCSymbol *Label;
+ SmallVector<DebugLocEntry, 4> List;
+};
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
index 689aeda..e9527c4 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
@@ -29,14 +29,15 @@ DwarfAccelTable::DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom> atomList)
: Header(8 + (atomList.size() * 4)), HeaderData(atomList),
Entries(Allocator) {}
-DwarfAccelTable::~DwarfAccelTable() {}
-
-void DwarfAccelTable::AddName(StringRef Name, DIE *die, char Flags) {
+void DwarfAccelTable::AddName(StringRef Name, MCSymbol *StrSym, const DIE *die,
+ char Flags) {
assert(Data.empty() && "Already finalized!");
// If the string is in the list already then add this die to the list
// otherwise add a new one.
DataArray &DIEs = Entries[Name];
- DIEs.push_back(new (Allocator) HashDataContents(die, Flags));
+ assert(!DIEs.StrSym || DIEs.StrSym == StrSym);
+ DIEs.StrSym = StrSym;
+ DIEs.Values.push_back(new (Allocator) HashDataContents(die, Flags));
}
void DwarfAccelTable::ComputeBucketCount(void) {
@@ -72,9 +73,10 @@ void DwarfAccelTable::FinalizeTable(AsmPrinter *Asm, StringRef Prefix) {
EI != EE; ++EI) {
// Unique the entries.
- std::stable_sort(EI->second.begin(), EI->second.end(), compareDIEs);
- EI->second.erase(std::unique(EI->second.begin(), EI->second.end()),
- EI->second.end());
+ std::stable_sort(EI->second.Values.begin(), EI->second.Values.end(), compareDIEs);
+ EI->second.Values.erase(
+ std::unique(EI->second.Values.begin(), EI->second.Values.end()),
+ EI->second.Values.end());
HashData *Entry = new (Allocator) HashData(EI->getKey(), EI->second);
Data.push_back(Entry);
@@ -172,7 +174,7 @@ void DwarfAccelTable::EmitOffsets(AsmPrinter *Asm, MCSymbol *SecBegin) {
// Walk through the buckets and emit the full data for each element in
// the bucket. For the string case emit the dies and the various offsets.
// Terminate each HashData bucket with 0.
-void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfUnits *D) {
+void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfFile *D) {
uint64_t PrevHash = UINT64_MAX;
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
for (HashList::const_iterator HI = Buckets[i].begin(),
@@ -181,21 +183,18 @@ void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfUnits *D) {
// Remember to emit the label for our offset.
Asm->OutStreamer.EmitLabel((*HI)->Sym);
Asm->OutStreamer.AddComment((*HI)->Str);
- Asm->EmitSectionOffset(D->getStringPoolEntry((*HI)->Str),
- D->getStringPoolSym());
+ Asm->EmitSectionOffset((*HI)->Data.StrSym,
+ D->getStringPool().getSectionSymbol());
Asm->OutStreamer.AddComment("Num DIEs");
- Asm->EmitInt32((*HI)->Data.size());
- for (ArrayRef<HashDataContents *>::const_iterator
- DI = (*HI)->Data.begin(),
- DE = (*HI)->Data.end();
- DI != DE; ++DI) {
+ Asm->EmitInt32((*HI)->Data.Values.size());
+ for (HashDataContents *HD : (*HI)->Data.Values) {
// Emit the DIE offset
- Asm->EmitInt32((*DI)->Die->getOffset());
+ Asm->EmitInt32(HD->Die->getOffset());
// If we have multiple Atoms emit that info too.
// FIXME: A bit of a hack, we either emit only one atom or all info.
if (HeaderData.Atoms.size() > 1) {
- Asm->EmitInt16((*DI)->Die->getTag());
- Asm->EmitInt8((*DI)->Flags);
+ Asm->EmitInt16(HD->Die->getTag());
+ Asm->EmitInt8(HD->Flags);
}
}
// Emit a 0 to terminate the data unless we have a hash collision.
@@ -207,7 +206,7 @@ void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfUnits *D) {
}
// Emit the entire data structure to the output file.
-void DwarfAccelTable::Emit(AsmPrinter *Asm, MCSymbol *SecBegin, DwarfUnits *D) {
+void DwarfAccelTable::Emit(AsmPrinter *Asm, MCSymbol *SecBegin, DwarfFile *D) {
// Emit the header.
EmitHeader(Asm);
@@ -235,10 +234,8 @@ void DwarfAccelTable::print(raw_ostream &O) {
EE = Entries.end();
EI != EE; ++EI) {
O << "Name: " << EI->getKeyData() << "\n";
- for (DataArray::const_iterator DI = EI->second.begin(),
- DE = EI->second.end();
- DI != DE; ++DI)
- (*DI)->print(O);
+ for (HashDataContents *HD : EI->second.Values)
+ HD->print(O);
}
O << "Buckets and Hashes: \n";
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
index 7627313..a3cc95f 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
@@ -18,13 +18,13 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
-#include <map>
#include <vector>
// The dwarf accelerator tables are an indirect hash table optimized
@@ -62,8 +62,7 @@
namespace llvm {
class AsmPrinter;
-class DIE;
-class DwarfUnits;
+class DwarfFile;
class DwarfAccelTable {
@@ -127,7 +126,8 @@ public:
uint16_t type; // enum AtomType
uint16_t form; // DWARF DW_FORM_ defines
- Atom(uint16_t type, uint16_t form) : type(type), form(form) {}
+ LLVM_CONSTEXPR Atom(uint16_t type, uint16_t form)
+ : type(type), form(form) {}
#ifndef NDEBUG
void print(raw_ostream &O) {
O << "Type: " << dwarf::AtomTypeString(type) << "\n"
@@ -165,10 +165,10 @@ private:
// HashData[hash_data_count]
public:
struct HashDataContents {
- DIE *Die; // Offsets
+ const DIE *Die; // Offsets
char Flags; // Specific flags to output
- HashDataContents(DIE *D, char Flags) : Die(D), Flags(Flags) {}
+ HashDataContents(const DIE *D, char Flags) : Die(D), Flags(Flags) {}
#ifndef NDEBUG
void print(raw_ostream &O) const {
O << " Offset: " << Die->getOffset() << "\n";
@@ -179,12 +179,19 @@ public:
};
private:
+ // String Data
+ struct DataArray {
+ MCSymbol *StrSym;
+ std::vector<HashDataContents *> Values;
+ DataArray() : StrSym(nullptr) {}
+ };
+ friend struct HashData;
struct HashData {
StringRef Str;
uint32_t HashValue;
MCSymbol *Sym;
- ArrayRef<HashDataContents *> Data; // offsets
- HashData(StringRef S, ArrayRef<HashDataContents *> Data)
+ DwarfAccelTable::DataArray &Data; // offsets
+ HashData(StringRef S, DwarfAccelTable::DataArray &Data)
: Str(S), Data(Data) {
HashValue = DwarfAccelTable::HashDJB(S);
}
@@ -198,10 +205,10 @@ private:
else
O << "<none>";
O << "\n";
- for (size_t i = 0; i < Data.size(); i++) {
- O << " Offset: " << Data[i]->Die->getOffset() << "\n";
- O << " Tag: " << dwarf::TagString(Data[i]->Die->getTag()) << "\n";
- O << " Flags: " << Data[i]->Flags << "\n";
+ for (HashDataContents *C : Data.Values) {
+ O << " Offset: " << C->Die->getOffset() << "\n";
+ O << " Tag: " << dwarf::TagString(C->Die->getTag()) << "\n";
+ O << " Flags: " << C->Flags << "\n";
}
}
void dump() { print(dbgs()); }
@@ -216,7 +223,7 @@ private:
void EmitBuckets(AsmPrinter *);
void EmitHashes(AsmPrinter *);
void EmitOffsets(AsmPrinter *, MCSymbol *);
- void EmitData(AsmPrinter *, DwarfUnits *D);
+ void EmitData(AsmPrinter *, DwarfFile *D);
// Allocator for HashData and HashDataContents.
BumpPtrAllocator Allocator;
@@ -226,8 +233,6 @@ private:
TableHeaderData HeaderData;
std::vector<HashData *> Data;
- // String Data
- typedef std::vector<HashDataContents *> DataArray;
typedef StringMap<DataArray, BumpPtrAllocator &> StringEntries;
StringEntries Entries;
@@ -240,10 +245,10 @@ private:
// Public Implementation
public:
DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom>);
- ~DwarfAccelTable();
- void AddName(StringRef, DIE *, char = 0);
+ void AddName(StringRef Name, MCSymbol *StrSym, const DIE *Die,
+ char Flags = 0);
void FinalizeTable(AsmPrinter *, StringRef);
- void Emit(AsmPrinter *, MCSymbol *, DwarfUnits *);
+ void Emit(AsmPrinter *, MCSymbol *, DwarfFile *);
#ifndef NDEBUG
void print(raw_ostream &O);
void dump() { print(dbgs()); }
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
index 8918f3d..74215aa 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -31,7 +32,6 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -40,15 +40,14 @@
using namespace llvm;
DwarfCFIException::DwarfCFIException(AsmPrinter *A)
- : DwarfException(A),
- shouldEmitPersonality(false), shouldEmitLSDA(false), shouldEmitMoves(false),
- moveTypeModule(AsmPrinter::CFI_M_None) {}
+ : EHStreamer(A), shouldEmitPersonality(false), shouldEmitLSDA(false),
+ shouldEmitMoves(false), moveTypeModule(AsmPrinter::CFI_M_None) {}
DwarfCFIException::~DwarfCFIException() {}
-/// EndModule - Emit all exception information that should come after the
+/// endModule - Emit all exception information that should come after the
/// content.
-void DwarfCFIException::EndModule() {
+void DwarfCFIException::endModule() {
if (moveTypeModule == AsmPrinter::CFI_M_Debug)
Asm->OutStreamer.EmitCFISections(false, true);
@@ -59,32 +58,22 @@ void DwarfCFIException::EndModule() {
unsigned PerEncoding = TLOF.getPersonalityEncoding();
- if ((PerEncoding & 0x70) != dwarf::DW_EH_PE_pcrel)
+ if ((PerEncoding & 0x80) != dwarf::DW_EH_PE_indirect)
return;
// Emit references to all used personality functions
- bool AtLeastOne = false;
const std::vector<const Function*> &Personalities = MMI->getPersonalities();
for (size_t i = 0, e = Personalities.size(); i != e; ++i) {
if (!Personalities[i])
continue;
MCSymbol *Sym = Asm->getSymbol(Personalities[i]);
TLOF.emitPersonalityValue(Asm->OutStreamer, Asm->TM, Sym);
- AtLeastOne = true;
- }
-
- if (AtLeastOne && !TLOF.isFunctionEHFrameSymbolPrivate()) {
- // This is a temporary hack to keep sections in the same order they
- // were before. This lets us produce bit identical outputs while
- // transitioning to CFI.
- Asm->OutStreamer.SwitchSection(
- const_cast<TargetLoweringObjectFile&>(TLOF).getEHFrameSection());
}
}
-/// BeginFunction - Gather pre-function exception information. Assumes it's
+/// beginFunction - Gather pre-function exception information. Assumes it's
/// being emitted immediately after the function entry point.
-void DwarfCFIException::BeginFunction(const MachineFunction *MF) {
+void DwarfCFIException::beginFunction(const MachineFunction *MF) {
shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false;
// If any landing pads survive, we need an EH table.
@@ -113,18 +102,27 @@ void DwarfCFIException::BeginFunction(const MachineFunction *MF) {
if (!shouldEmitPersonality && !shouldEmitMoves)
return;
- Asm->OutStreamer.EmitCFIStartProc();
+ Asm->OutStreamer.EmitCFIStartProc(/*IsSimple=*/false);
// Indicate personality routine, if any.
if (!shouldEmitPersonality)
return;
- const MCSymbol *Sym = TLOF.getCFIPersonalitySymbol(Per, Asm->Mang, MMI);
+ const MCSymbol *Sym =
+ TLOF.getCFIPersonalitySymbol(Per, *Asm->Mang, Asm->TM, MMI);
Asm->OutStreamer.EmitCFIPersonality(Sym, PerEncoding);
- Asm->OutStreamer.EmitDebugLabel
- (Asm->GetTempSymbol("eh_func_begin",
- Asm->getFunctionNumber()));
+ MCSymbol *EHBegin =
+ Asm->GetTempSymbol("eh_func_begin", Asm->getFunctionNumber());
+ if (Asm->MAI->useAssignmentForEHBegin()) {
+ MCContext &Ctx = Asm->OutContext;
+ MCSymbol *CurPos = Ctx.CreateTempSymbol();
+ Asm->OutStreamer.EmitLabel(CurPos);
+ Asm->OutStreamer.EmitAssignment(EHBegin,
+ MCSymbolRefExpr::Create(CurPos, Ctx));
+ } else {
+ Asm->OutStreamer.EmitLabel(EHBegin);
+ }
// Provide LSDA information.
if (!shouldEmitLSDA)
@@ -135,9 +133,9 @@ void DwarfCFIException::BeginFunction(const MachineFunction *MF) {
LSDAEncoding);
}
-/// EndFunction - Gather and emit post-function exception information.
+/// endFunction - Gather and emit post-function exception information.
///
-void DwarfCFIException::EndFunction() {
+void DwarfCFIException::endFunction(const MachineFunction *) {
if (!shouldEmitPersonality && !shouldEmitMoves)
return;
@@ -152,5 +150,5 @@ void DwarfCFIException::EndFunction() {
// Map all labels and get rid of any dead landing pads.
MMI->TidyLandingPads();
- EmitExceptionTable();
+ emitExceptionTable();
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
deleted file mode 100644
index b9e941e..0000000
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ /dev/null
@@ -1,419 +0,0 @@
-//===-- llvm/CodeGen/DwarfCompileUnit.h - Dwarf Compile Unit ---*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing dwarf compile unit.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
-#define CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
-
-#include "DIE.h"
-#include "DwarfDebug.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/DebugInfo.h"
-#include "llvm/MC/MCExpr.h"
-
-namespace llvm {
-
-class MachineLocation;
-class MachineOperand;
-class ConstantInt;
-class ConstantFP;
-class DbgVariable;
-
-//===----------------------------------------------------------------------===//
-/// CompileUnit - This dwarf writer support class manages information associated
-/// with a source file.
-class CompileUnit {
- /// UniqueID - a numeric ID unique among all CUs in the module
- ///
- unsigned UniqueID;
-
- /// Node - MDNode for the compile unit.
- DICompileUnit Node;
-
- /// CUDie - Compile unit debug information entry.
- ///
- const OwningPtr<DIE> CUDie;
-
- /// Asm - Target of Dwarf emission.
- AsmPrinter *Asm;
-
- // Holders for some common dwarf information.
- DwarfDebug *DD;
- DwarfUnits *DU;
-
- /// IndexTyDie - An anonymous type for index type. Owned by CUDie.
- DIE *IndexTyDie;
-
- /// MDNodeToDieMap - Tracks the mapping of unit level debug information
- /// variables to debug information entries.
- DenseMap<const MDNode *, DIE *> MDNodeToDieMap;
-
- /// MDNodeToDIEEntryMap - Tracks the mapping of unit level debug information
- /// descriptors to debug information entries using a DIEEntry proxy.
- DenseMap<const MDNode *, DIEEntry *> MDNodeToDIEEntryMap;
-
- /// GlobalNames - A map of globally visible named entities for this unit.
- ///
- StringMap<DIE *> GlobalNames;
-
- /// GlobalTypes - A map of globally visible types for this unit.
- ///
- StringMap<DIE *> GlobalTypes;
-
- /// AccelNames - A map of names for the name accelerator table.
- ///
- StringMap<std::vector<DIE *> > AccelNames;
- StringMap<std::vector<DIE *> > AccelObjC;
- StringMap<std::vector<DIE *> > AccelNamespace;
- StringMap<std::vector<std::pair<DIE *, unsigned> > > AccelTypes;
-
- /// DIEBlocks - A list of all the DIEBlocks in use.
- std::vector<DIEBlock *> DIEBlocks;
-
- /// ContainingTypeMap - This map is used to keep track of subprogram DIEs that
- /// need DW_AT_containing_type attribute. This attribute points to a DIE that
- /// corresponds to the MDNode mapped with the subprogram DIE.
- DenseMap<DIE *, const MDNode *> ContainingTypeMap;
-
- // DIEValueAllocator - All DIEValues are allocated through this allocator.
- BumpPtrAllocator DIEValueAllocator;
-
- // DIEIntegerOne - A preallocated DIEValue because 1 is used frequently.
- DIEInteger *DIEIntegerOne;
-
-public:
- CompileUnit(unsigned UID, DIE *D, DICompileUnit CU, AsmPrinter *A,
- DwarfDebug *DW, DwarfUnits *DWU);
- ~CompileUnit();
-
- // Accessors.
- unsigned getUniqueID() const { return UniqueID; }
- uint16_t getLanguage() const { return Node.getLanguage(); }
- DICompileUnit getNode() const { return Node; }
- DIE *getCUDie() const { return CUDie.get(); }
- const StringMap<DIE *> &getGlobalNames() const { return GlobalNames; }
- const StringMap<DIE *> &getGlobalTypes() const { return GlobalTypes; }
-
- const StringMap<std::vector<DIE *> > &getAccelNames() const {
- return AccelNames;
- }
- const StringMap<std::vector<DIE *> > &getAccelObjC() const {
- return AccelObjC;
- }
- const StringMap<std::vector<DIE *> > &getAccelNamespace() const {
- return AccelNamespace;
- }
- const StringMap<std::vector<std::pair<DIE *, unsigned> > > &
- getAccelTypes() const {
- return AccelTypes;
- }
-
- unsigned getDebugInfoOffset() const { return DebugInfoOffset; }
- void setDebugInfoOffset(unsigned DbgInfoOff) { DebugInfoOffset = DbgInfoOff; }
-
- /// hasContent - Return true if this compile unit has something to write out.
- ///
- bool hasContent() const { return !CUDie->getChildren().empty(); }
-
- /// getParentContextString - Get a string containing the language specific
- /// context for a global name.
- std::string getParentContextString(DIScope Context) const;
-
- /// addGlobalName - Add a new global entity to the compile unit.
- ///
- void addGlobalName(StringRef Name, DIE *Die, DIScope Context);
-
- /// addGlobalType - Add a new global type to the compile unit.
- ///
- void addGlobalType(DIType Ty);
-
- /// addPubTypes - Add a set of types from the subprogram to the global types.
- void addPubTypes(DISubprogram SP);
-
- /// addAccelName - Add a new name to the name accelerator table.
- void addAccelName(StringRef Name, DIE *Die);
-
- /// addAccelObjC - Add a new name to the ObjC accelerator table.
- void addAccelObjC(StringRef Name, DIE *Die);
-
- /// addAccelNamespace - Add a new name to the namespace accelerator table.
- void addAccelNamespace(StringRef Name, DIE *Die);
-
- /// addAccelType - Add a new type to the type accelerator table.
- void addAccelType(StringRef Name, std::pair<DIE *, unsigned> Die);
-
- /// getDIE - Returns the debug information entry map slot for the
- /// specified debug variable. We delegate the request to DwarfDebug
- /// when the MDNode can be part of the type system, since DIEs for
- /// the type system can be shared across CUs and the mappings are
- /// kept in DwarfDebug.
- DIE *getDIE(DIDescriptor D) const;
-
- DIEBlock *getDIEBlock() { return new (DIEValueAllocator) DIEBlock(); }
-
- /// insertDIE - Insert DIE into the map. We delegate the request to DwarfDebug
- /// when the MDNode can be part of the type system, since DIEs for
- /// the type system can be shared across CUs and the mappings are
- /// kept in DwarfDebug.
- void insertDIE(DIDescriptor Desc, DIE *D);
-
- /// addDie - Adds or interns the DIE to the compile unit.
- ///
- void addDie(DIE *Buffer) { CUDie->addChild(Buffer); }
-
- /// addFlag - Add a flag that is true to the DIE.
- void addFlag(DIE *Die, dwarf::Attribute Attribute);
-
- /// addUInt - Add an unsigned integer attribute data and value.
- ///
- void addUInt(DIE *Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
- uint64_t Integer);
-
- void addUInt(DIEBlock *Block, dwarf::Form Form, uint64_t Integer);
-
- /// addSInt - Add an signed integer attribute data and value.
- ///
- void addSInt(DIE *Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
- int64_t Integer);
-
- void addSInt(DIEBlock *Die, Optional<dwarf::Form> Form, int64_t Integer);
-
- /// addString - Add a string attribute data and value.
- ///
- void addString(DIE *Die, dwarf::Attribute Attribute, const StringRef Str);
-
- /// addLocalString - Add a string attribute data and value.
- ///
- void addLocalString(DIE *Die, dwarf::Attribute Attribute, const StringRef Str);
-
- /// addExpr - Add a Dwarf expression attribute data and value.
- ///
- void addExpr(DIEBlock *Die, dwarf::Form Form, const MCExpr *Expr);
-
- /// addLabel - Add a Dwarf label attribute data and value.
- ///
- void addLabel(DIE *Die, dwarf::Attribute Attribute, dwarf::Form Form,
- const MCSymbol *Label);
-
- void addLabel(DIEBlock *Die, dwarf::Form Form, const MCSymbol *Label);
-
- /// addSectionLabel - Add a Dwarf section label attribute data and value.
- ///
- void addSectionLabel(DIE *Die, dwarf::Attribute Attribute, const MCSymbol *Label);
-
- /// addSectionOffset - Add an offset into a section attribute data and value.
- ///
- void addSectionOffset(DIE *Die, dwarf::Attribute Attribute, uint64_t Integer);
-
- /// addLabelAddress - Add a dwarf label attribute data and value using
- /// either DW_FORM_addr or DW_FORM_GNU_addr_index.
- ///
- void addLabelAddress(DIE *Die, dwarf::Attribute Attribute, MCSymbol *Label);
-
- /// addOpAddress - Add a dwarf op address data and value using the
- /// form given and an op of either DW_FORM_addr or DW_FORM_GNU_addr_index.
- ///
- void addOpAddress(DIEBlock *Die, const MCSymbol *Label);
-
- /// addSectionDelta - Add a label delta attribute data and value.
- void addSectionDelta(DIE *Die, dwarf::Attribute Attribute, const MCSymbol *Hi,
- const MCSymbol *Lo);
-
- /// addDIEEntry - Add a DIE attribute data and value.
- ///
- void addDIEEntry(DIE *Die, dwarf::Attribute Attribute, DIE *Entry);
-
- /// addDIEEntry - Add a DIE attribute data and value.
- ///
- void addDIEEntry(DIE *Die, dwarf::Attribute Attribute, DIEEntry *Entry);
-
- /// addBlock - Add block data.
- ///
- void addBlock(DIE *Die, dwarf::Attribute Attribute, DIEBlock *Block);
-
- /// addSourceLine - Add location information to specified debug information
- /// entry.
- void addSourceLine(DIE *Die, DIVariable V);
- void addSourceLine(DIE *Die, DIGlobalVariable G);
- void addSourceLine(DIE *Die, DISubprogram SP);
- void addSourceLine(DIE *Die, DIType Ty);
- void addSourceLine(DIE *Die, DINameSpace NS);
- void addSourceLine(DIE *Die, DIObjCProperty Ty);
-
- /// addAddress - Add an address attribute to a die based on the location
- /// provided.
- void addAddress(DIE *Die, dwarf::Attribute Attribute, const MachineLocation &Location,
- bool Indirect = false);
-
- /// addConstantValue - Add constant value entry in variable DIE.
- void addConstantValue(DIE *Die, const MachineOperand &MO, DIType Ty);
- void addConstantValue(DIE *Die, const ConstantInt *CI, bool Unsigned);
- void addConstantValue(DIE *Die, const APInt &Val, bool Unsigned);
-
- /// addConstantFPValue - Add constant value entry in variable DIE.
- void addConstantFPValue(DIE *Die, const MachineOperand &MO);
- void addConstantFPValue(DIE *Die, const ConstantFP *CFP);
-
- /// addTemplateParams - Add template parameters in buffer.
- void addTemplateParams(DIE &Buffer, DIArray TParams);
-
- /// addRegisterOp - Add register operand.
- void addRegisterOp(DIEBlock *TheDie, unsigned Reg);
-
- /// addRegisterOffset - Add register offset.
- void addRegisterOffset(DIEBlock *TheDie, unsigned Reg, int64_t Offset);
-
- /// addComplexAddress - Start with the address based on the location provided,
- /// and generate the DWARF information necessary to find the actual variable
- /// (navigating the extra location information encoded in the type) based on
- /// the starting location. Add the DWARF information to the die.
- ///
- void addComplexAddress(const DbgVariable &DV, DIE *Die, dwarf::Attribute Attribute,
- const MachineLocation &Location);
-
- // FIXME: Should be reformulated in terms of addComplexAddress.
- /// addBlockByrefAddress - Start with the address based on the location
- /// provided, and generate the DWARF information necessary to find the
- /// actual Block variable (navigating the Block struct) based on the
- /// starting location. Add the DWARF information to the die. Obsolete,
- /// please use addComplexAddress instead.
- ///
- void addBlockByrefAddress(const DbgVariable &DV, DIE *Die, dwarf::Attribute Attribute,
- const MachineLocation &Location);
-
- /// addVariableAddress - Add DW_AT_location attribute for a
- /// DbgVariable based on provided MachineLocation.
- void addVariableAddress(const DbgVariable &DV, DIE *Die,
- MachineLocation Location);
-
- /// addType - Add a new type attribute to the specified entity. This takes
- /// and attribute parameter because DW_AT_friend attributes are also
- /// type references.
- void addType(DIE *Entity, DIType Ty, dwarf::Attribute Attribute = dwarf::DW_AT_type);
-
- /// getOrCreateNameSpace - Create a DIE for DINameSpace.
- DIE *getOrCreateNameSpace(DINameSpace NS);
-
- /// getOrCreateSubprogramDIE - Create new DIE using SP.
- DIE *getOrCreateSubprogramDIE(DISubprogram SP);
-
- /// getOrCreateTypeDIE - Find existing DIE or create new DIE for the
- /// given DIType.
- DIE *getOrCreateTypeDIE(const MDNode *N);
-
- /// getOrCreateContextDIE - Get context owner's DIE.
- DIE *getOrCreateContextDIE(DIScope Context);
-
- /// createGlobalVariableDIE - create global variable DIE.
- void createGlobalVariableDIE(DIGlobalVariable GV);
-
- /// constructContainingTypeDIEs - Construct DIEs for types that contain
- /// vtables.
- void constructContainingTypeDIEs();
-
- /// constructVariableDIE - Construct a DIE for the given DbgVariable.
- DIE *constructVariableDIE(DbgVariable &DV, bool isScopeAbstract);
-
- /// Create a DIE with the given Tag, add the DIE to its parent, and
- /// call insertDIE if MD is not null.
- DIE *createAndAddDIE(unsigned Tag, DIE &Parent, DIDescriptor N = DIDescriptor());
-
- /// Compute the size of a header for this unit, not including the initial
- /// length field.
- unsigned getHeaderSize() const {
- return sizeof(int16_t) + // DWARF version number
- sizeof(int32_t) + // Offset Into Abbrev. Section
- sizeof(int8_t); // Pointer Size (in bytes)
- }
-
- /// Emit the header for this unit, not including the initial length field.
- void emitHeader(const MCSection *ASection, const MCSymbol *ASectionSym);
-
-private:
- /// constructSubprogramArguments - Construct function argument DIEs.
- void constructSubprogramArguments(DIE &Buffer, DIArray Args);
-
- /// constructTypeDIE - Construct basic type die from DIBasicType.
- void constructTypeDIE(DIE &Buffer, DIBasicType BTy);
-
- /// constructTypeDIE - Construct derived type die from DIDerivedType.
- void constructTypeDIE(DIE &Buffer, DIDerivedType DTy);
-
- /// constructTypeDIE - Construct type DIE from DICompositeType.
- void constructTypeDIE(DIE &Buffer, DICompositeType CTy);
-
- /// constructSubrangeDIE - Construct subrange DIE from DISubrange.
- void constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy);
-
- /// constructArrayTypeDIE - Construct array type DIE from DICompositeType.
- void constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy);
-
- /// constructEnumTypeDIE - Construct enum type DIE from DIEnumerator.
- void constructEnumTypeDIE(DIE &Buffer, DICompositeType CTy);
-
- /// constructMemberDIE - Construct member DIE from DIDerivedType.
- void constructMemberDIE(DIE &Buffer, DIDerivedType DT);
-
- /// constructTemplateTypeParameterDIE - Construct new DIE for the given
- /// DITemplateTypeParameter.
- void constructTemplateTypeParameterDIE(DIE &Buffer,
- DITemplateTypeParameter TP);
-
- /// constructTemplateValueParameterDIE - Construct new DIE for the given
- /// DITemplateValueParameter.
- void constructTemplateValueParameterDIE(DIE &Buffer,
- DITemplateValueParameter TVP);
-
- /// getOrCreateStaticMemberDIE - Create new static data member DIE.
- DIE *getOrCreateStaticMemberDIE(DIDerivedType DT);
-
- /// Offset of the CUDie from beginning of debug info section.
- unsigned DebugInfoOffset;
-
- /// getLowerBoundDefault - Return the default lower bound for an array. If the
- /// DWARF version doesn't handle the language, return -1.
- int64_t getDefaultLowerBound() const;
-
- /// getDIEEntry - Returns the debug information entry for the specified
- /// debug variable.
- DIEEntry *getDIEEntry(const MDNode *N) const {
- return MDNodeToDIEEntryMap.lookup(N);
- }
-
- /// insertDIEEntry - Insert debug information entry into the map.
- void insertDIEEntry(const MDNode *N, DIEEntry *E) {
- MDNodeToDIEEntryMap.insert(std::make_pair(N, E));
- }
-
- // getIndexTyDie - Get an anonymous type for index type.
- DIE *getIndexTyDie() { return IndexTyDie; }
-
- // setIndexTyDie - Set D as anonymous type for index which can be reused
- // later.
- void setIndexTyDie(DIE *D) { IndexTyDie = D; }
-
- /// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
- /// information entry.
- DIEEntry *createDIEEntry(DIE *Entry);
-
- /// resolve - Look in the DwarfDebug map for the MDNode that
- /// corresponds to the reference.
- template <typename T> T resolve(DIRef<T> Ref) const {
- return DD->resolve(Ref);
- }
-};
-
-} // end llvm namespace
-#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index d922433..ac1c0ff 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -11,24 +11,24 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dwarfdebug"
+#include "ByteStreamer.h"
#include "DwarfDebug.h"
#include "DIE.h"
#include "DIEHash.h"
-#include "DwarfAccelTable.h"
-#include "DwarfCompileUnit.h"
+#include "DwarfUnit.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
@@ -36,12 +36,13 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -49,6 +50,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "dwarfdebug"
+
static cl::opt<bool>
DisableDebugInfoPrinting("disable-debug-info-print", cl::Hidden,
cl::desc("Disable debug info printing"));
@@ -59,26 +62,17 @@ static cl::opt<bool> UnknownLocations(
cl::init(false));
static cl::opt<bool>
-GenerateODRHash("generate-odr-hash", cl::Hidden,
- cl::desc("Add an ODR hash to external type DIEs."),
- cl::init(false));
-
-static cl::opt<bool>
-GenerateCUHash("generate-cu-hash", cl::Hidden,
- cl::desc("Add the CU hash as the dwo_id."),
- cl::init(false));
-
-static cl::opt<bool>
GenerateGnuPubSections("generate-gnu-dwarf-pub-sections", cl::Hidden,
cl::desc("Generate GNU-style pubnames and pubtypes"),
cl::init(false));
+static cl::opt<bool> GenerateARangeSection("generate-arange-section",
+ cl::Hidden,
+ cl::desc("Generate dwarf aranges"),
+ cl::init(false));
+
namespace {
-enum DefaultOnOff {
- Default,
- Enable,
- Disable
-};
+enum DefaultOnOff { Default, Enable, Disable };
}
static cl::opt<DefaultOnOff>
@@ -91,7 +85,7 @@ DwarfAccelTables("dwarf-accel-tables", cl::Hidden,
static cl::opt<DefaultOnOff>
SplitDwarf("split-dwarf", cl::Hidden,
- cl::desc("Output prototype dwarf split debug info."),
+ cl::desc("Output DWARF5 split debug info."),
cl::values(clEnumVal(Default, "Default for platform"),
clEnumVal(Enable, "Enabled"),
clEnumVal(Disable, "Disabled"), clEnumValEnd),
@@ -105,34 +99,27 @@ DwarfPubSections("generate-dwarf-pub-sections", cl::Hidden,
clEnumVal(Disable, "Disabled"), clEnumValEnd),
cl::init(Default));
-static cl::opt<unsigned>
-DwarfVersionNumber("dwarf-version", cl::Hidden,
- cl::desc("Generate DWARF for dwarf version."),
- cl::init(0));
-
static const char *const DWARFGroupName = "DWARF Emission";
static const char *const DbgTimerName = "DWARF Debug Writer";
//===----------------------------------------------------------------------===//
-// Configuration values for initial hash set sizes (log2).
-//
-static const unsigned InitAbbreviationsSetSize = 9; // log2(512)
-
-namespace llvm {
-
/// resolve - Look in the DwarfDebug map for the MDNode that
/// corresponds to the reference.
-template <typename T>
-T DbgVariable::resolve(DIRef<T> Ref) const {
+template <typename T> T DbgVariable::resolve(DIRef<T> Ref) const {
return DD->resolve(Ref);
}
+bool DbgVariable::isBlockByrefVariable() const {
+ assert(Var.isVariable() && "Invalid complex DbgVariable!");
+ return Var.isBlockByrefVariable(DD->getTypeIdentifierMap());
+}
+
DIType DbgVariable::getType() const {
- DIType Ty = Var.getType();
+ DIType Ty = Var.getType().resolve(DD->getTypeIdentifierMap());
// FIXME: isBlockByrefVariable should be reformulated in terms of complex
// addresses instead.
- if (Var.isBlockByrefVariable()) {
+ if (Var.isBlockByrefVariable(DD->getTypeIdentifierMap())) {
/* Byref variables, in Blocks, are declared by the programmer as
"SomeType VarName;", but the compiler creates a
__Block_byref_x_VarName struct, and gives the variable VarName
@@ -173,33 +160,32 @@ DIType DbgVariable::getType() const {
return Ty;
}
-} // end llvm namespace
-
-/// Return Dwarf Version by checking module flags.
-static unsigned getDwarfVersionFromModule(const Module *M) {
- Value *Val = M->getModuleFlag("Dwarf Version");
- if (!Val)
- return dwarf::DWARF_VERSION;
- return cast<ConstantInt>(Val)->getZExtValue();
-}
+static LLVM_CONSTEXPR DwarfAccelTable::Atom TypeAtoms[] = {
+ DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
+ DwarfAccelTable::Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
+ DwarfAccelTable::Atom(dwarf::DW_ATOM_type_flags, dwarf::DW_FORM_data1)};
DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
- : Asm(A), MMI(Asm->MMI), FirstCU(0),
- AbbreviationsSet(InitAbbreviationsSetSize),
- SourceIdMap(DIEValueAllocator),
- PrevLabel(NULL), GlobalCUIndexCount(0),
- InfoHolder(A, &AbbreviationsSet, Abbreviations, "info_string",
- DIEValueAllocator),
- SkeletonAbbrevSet(InitAbbreviationsSetSize),
- SkeletonHolder(A, &SkeletonAbbrevSet, SkeletonAbbrevs, "skel_string",
- DIEValueAllocator) {
-
- DwarfInfoSectionSym = DwarfAbbrevSectionSym = 0;
- DwarfStrSectionSym = TextSectionSym = 0;
- DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = DwarfLineSectionSym = 0;
- DwarfAddrSectionSym = 0;
- DwarfAbbrevDWOSectionSym = DwarfStrDWOSectionSym = 0;
- FunctionBeginSym = FunctionEndSym = 0;
+ : Asm(A), MMI(Asm->MMI), FirstCU(nullptr), PrevLabel(nullptr),
+ GlobalRangeCount(0), InfoHolder(A, "info_string", DIEValueAllocator),
+ UsedNonDefaultText(false),
+ SkeletonHolder(A, "skel_string", DIEValueAllocator),
+ AccelNames(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
+ dwarf::DW_FORM_data4)),
+ AccelObjC(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
+ dwarf::DW_FORM_data4)),
+ AccelNamespace(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
+ dwarf::DW_FORM_data4)),
+ AccelTypes(TypeAtoms) {
+
+ DwarfInfoSectionSym = DwarfAbbrevSectionSym = DwarfStrSectionSym = nullptr;
+ DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = nullptr;
+ DwarfLineSectionSym = nullptr;
+ DwarfAddrSectionSym = nullptr;
+ DwarfAbbrevDWOSectionSym = DwarfStrDWOSectionSym = nullptr;
+ FunctionBeginSym = FunctionEndSym = nullptr;
+ CurFn = nullptr;
+ CurMI = nullptr;
// Turn on accelerator tables for Darwin by default, pubnames by
// default for non-Darwin, and handle split dwarf.
@@ -220,9 +206,11 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
else
HasDwarfPubSections = DwarfPubSections == Enable;
- DwarfVersion = DwarfVersionNumber
- ? DwarfVersionNumber
- : getDwarfVersionFromModule(MMI->getModule());
+ unsigned DwarfVersionNumber = Asm->TM.Options.MCOptions.DwarfVersion;
+ DwarfVersion = DwarfVersionNumber ? DwarfVersionNumber
+ : MMI->getModule()->getDwarfVersion();
+
+ Asm->OutStreamer.getContext().setDwarfVersion(DwarfVersion);
{
NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
@@ -230,78 +218,29 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
}
}
+// Define out of line so we don't have to include DwarfUnit.h in DwarfDebug.h.
+DwarfDebug::~DwarfDebug() { }
+
// Switch to the specified MCSection and emit an assembler
// temporary label to it if SymbolStem is specified.
static MCSymbol *emitSectionSym(AsmPrinter *Asm, const MCSection *Section,
- const char *SymbolStem = 0) {
+ const char *SymbolStem = nullptr) {
Asm->OutStreamer.SwitchSection(Section);
- if (!SymbolStem) return 0;
+ if (!SymbolStem)
+ return nullptr;
MCSymbol *TmpSym = Asm->GetTempSymbol(SymbolStem);
Asm->OutStreamer.EmitLabel(TmpSym);
return TmpSym;
}
-MCSymbol *DwarfUnits::getStringPoolSym() {
- return Asm->GetTempSymbol(StringPref);
-}
-
-MCSymbol *DwarfUnits::getStringPoolEntry(StringRef Str) {
- std::pair<MCSymbol*, unsigned> &Entry =
- StringPool.GetOrCreateValue(Str).getValue();
- if (Entry.first) return Entry.first;
-
- Entry.second = NextStringPoolNumber++;
- return Entry.first = Asm->GetTempSymbol(StringPref, Entry.second);
-}
-
-unsigned DwarfUnits::getStringPoolIndex(StringRef Str) {
- std::pair<MCSymbol*, unsigned> &Entry =
- StringPool.GetOrCreateValue(Str).getValue();
- if (Entry.first) return Entry.second;
-
- Entry.second = NextStringPoolNumber++;
- Entry.first = Asm->GetTempSymbol(StringPref, Entry.second);
- return Entry.second;
-}
-
-unsigned DwarfUnits::getAddrPoolIndex(const MCSymbol *Sym) {
- return getAddrPoolIndex(MCSymbolRefExpr::Create(Sym, Asm->OutContext));
-}
-
-unsigned DwarfUnits::getAddrPoolIndex(const MCExpr *Sym) {
- std::pair<DenseMap<const MCExpr *, unsigned>::iterator, bool> P =
- AddressPool.insert(std::make_pair(Sym, NextAddrPoolNumber));
- if (P.second)
- ++NextAddrPoolNumber;
- return P.first->second;
-}
-
-// Define a unique number for the abbreviation.
-//
-void DwarfUnits::assignAbbrevNumber(DIEAbbrev &Abbrev) {
- // Check the set for priors.
- DIEAbbrev *InSet = AbbreviationsSet->GetOrInsertNode(&Abbrev);
-
- // If it's newly added.
- if (InSet == &Abbrev) {
- // Add to abbreviation list.
- Abbreviations.push_back(&Abbrev);
-
- // Assign the vector position + 1 as its number.
- Abbrev.setNumber(Abbreviations.size());
- } else {
- // Assign existing abbreviation number.
- Abbrev.setNumber(InSet->getNumber());
- }
-}
-
static bool isObjCClass(StringRef Name) {
return Name.startswith("+") || Name.startswith("-");
}
static bool hasObjCCategory(StringRef Name) {
- if (!isObjCClass(Name)) return false;
+ if (!isObjCClass(Name))
+ return false;
return Name.find(") ") != StringRef::npos;
}
@@ -325,35 +264,35 @@ static StringRef getObjCMethodName(StringRef In) {
// Helper for sorting sections into a stable output order.
static bool SectionSort(const MCSection *A, const MCSection *B) {
- std::string LA = (A ? A->getLabelBeginName() : "");
- std::string LB = (B ? B->getLabelBeginName() : "");
- return LA < LB;
+ std::string LA = (A ? A->getLabelBeginName() : "");
+ std::string LB = (B ? B->getLabelBeginName() : "");
+ return LA < LB;
}
// Add the various names to the Dwarf accelerator table names.
// TODO: Determine whether or not we should add names for programs
// that do not have a DW_AT_name or DW_AT_linkage_name field - this
// is only slightly different than the lookup of non-standard ObjC names.
-static void addSubprogramNames(CompileUnit *TheCU, DISubprogram SP,
- DIE* Die) {
- if (!SP.isDefinition()) return;
- TheCU->addAccelName(SP.getName(), Die);
+void DwarfDebug::addSubprogramNames(DISubprogram SP, DIE &Die) {
+ if (!SP.isDefinition())
+ return;
+ addAccelName(SP.getName(), Die);
// If the linkage name is different than the name, go ahead and output
// that as well into the name table.
if (SP.getLinkageName() != "" && SP.getName() != SP.getLinkageName())
- TheCU->addAccelName(SP.getLinkageName(), Die);
+ addAccelName(SP.getLinkageName(), Die);
// If this is an Objective-C selector name add it to the ObjC accelerator
// too.
if (isObjCClass(SP.getName())) {
StringRef Class, Category;
getObjCClassCategory(SP.getName(), Class, Category);
- TheCU->addAccelObjC(Class, Die);
+ addAccelObjC(Class, Die);
if (Category != "")
- TheCU->addAccelObjC(Category, Die);
+ addAccelObjC(Category, Die);
// Also add the base method name to the name table.
- TheCU->addAccelName(getObjCMethodName(SP.getName()), Die);
+ addAccelName(getObjCMethodName(SP.getName()), Die);
}
}
@@ -373,76 +312,21 @@ bool DwarfDebug::isSubprogramContext(const MDNode *Context) {
// Find DIE for the given subprogram and attach appropriate DW_AT_low_pc
// and DW_AT_high_pc attributes. If there are global variables in this
// scope then create and insert DIEs for these variables.
-DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU, DISubprogram SP) {
- DIE *SPDie = SPCU->getDIE(SP);
-
- assert(SPDie && "Unable to find subprogram DIE!");
-
- // If we're updating an abstract DIE, then we will be adding the children and
- // object pointer later on. But what we don't want to do is process the
- // concrete DIE twice.
- if (DIE *AbsSPDIE = AbstractSPDies.lookup(SP)) {
- // Pick up abstract subprogram DIE.
- SPDie = SPCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *SPCU->getCUDie());
- SPCU->addDIEEntry(SPDie, dwarf::DW_AT_abstract_origin, AbsSPDIE);
- } else {
- DISubprogram SPDecl = SP.getFunctionDeclaration();
- if (!SPDecl.isSubprogram()) {
- // There is not any need to generate specification DIE for a function
- // defined at compile unit level. If a function is defined inside another
- // function then gdb prefers the definition at top level and but does not
- // expect specification DIE in parent function. So avoid creating
- // specification DIE for a function defined inside a function.
- DIScope SPContext = resolve(SP.getContext());
- if (SP.isDefinition() && !SPContext.isCompileUnit() &&
- !SPContext.isFile() &&
- !isSubprogramContext(SPContext)) {
- SPCU->addFlag(SPDie, dwarf::DW_AT_declaration);
-
- // Add arguments.
- DICompositeType SPTy = SP.getType();
- DIArray Args = SPTy.getTypeArray();
- uint16_t SPTag = SPTy.getTag();
- if (SPTag == dwarf::DW_TAG_subroutine_type)
- // FIXME: Use DwarfUnit::constructSubprogramArguments() here.
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIType ATy(Args.getElement(i));
- if (ATy.isUnspecifiedParameter()) {
- assert(i == N-1 && "ellipsis must be the last argument");
- SPCU->createAndAddDIE(dwarf::DW_TAG_unspecified_parameters, *SPDie);
- } else {
- DIE *Arg =
- SPCU->createAndAddDIE(dwarf::DW_TAG_formal_parameter, *SPDie);
- SPCU->addType(Arg, ATy);
- if (ATy.isArtificial())
- SPCU->addFlag(Arg, dwarf::DW_AT_artificial);
- if (ATy.isObjectPointer())
- SPCU->addDIEEntry(SPDie, dwarf::DW_AT_object_pointer, Arg);
- }
- }
- DIE *SPDeclDie = SPDie;
- SPDie =
- SPCU->createAndAddDIE(dwarf::DW_TAG_subprogram, *SPCU->getCUDie());
- SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, SPDeclDie);
- }
- }
- }
+DIE &DwarfDebug::updateSubprogramScopeDIE(DwarfCompileUnit &SPCU,
+ DISubprogram SP) {
+ DIE *SPDie = SPCU.getOrCreateSubprogramDIE(SP);
+
+ attachLowHighPC(SPCU, *SPDie, FunctionBeginSym, FunctionEndSym);
- SPCU->addLabelAddress(SPDie, dwarf::DW_AT_low_pc,
- Asm->GetTempSymbol("func_begin",
- Asm->getFunctionNumber()));
- SPCU->addLabelAddress(SPDie, dwarf::DW_AT_high_pc,
- Asm->GetTempSymbol("func_end",
- Asm->getFunctionNumber()));
const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
MachineLocation Location(RI->getFrameRegister(*Asm->MF));
- SPCU->addAddress(SPDie, dwarf::DW_AT_frame_base, Location);
+ SPCU.addAddress(*SPDie, dwarf::DW_AT_frame_base, Location);
// Add name to the name table, we do this here because we're guaranteed
// to have concrete versions of our DW_TAG_subprogram nodes.
- addSubprogramNames(SPCU, SP, SPDie);
+ addSubprogramNames(SP, *SPDie);
- return SPDie;
+ return *SPDie;
}
/// Check whether we should create a DIE for the given Scope, return true
@@ -466,450 +350,376 @@ bool DwarfDebug::isLexicalScopeDIENull(LexicalScope *Scope) {
return !End;
}
-// Construct new DW_TAG_lexical_block for this scope and attach
-// DW_AT_low_pc/DW_AT_high_pc labels.
-DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
- LexicalScope *Scope) {
- if (isLexicalScopeDIENull(Scope))
- return 0;
+static void addSectionLabel(AsmPrinter &Asm, DwarfUnit &U, DIE &D,
+ dwarf::Attribute A, const MCSymbol *L,
+ const MCSymbol *Sec) {
+ if (Asm.MAI->doesDwarfUseRelocationsAcrossSections())
+ U.addSectionLabel(D, A, L);
+ else
+ U.addSectionDelta(D, A, L, Sec);
+}
- DIE *ScopeDIE = new DIE(dwarf::DW_TAG_lexical_block);
- if (Scope->isAbstractScope())
- return ScopeDIE;
+void DwarfDebug::addScopeRangeList(DwarfCompileUnit &TheCU, DIE &ScopeDIE,
+ const SmallVectorImpl<InsnRange> &Range) {
+ // Emit offset in .debug_range as a relocatable label. emitDIE will handle
+ // emitting it appropriately.
+ MCSymbol *RangeSym = Asm->GetTempSymbol("debug_ranges", GlobalRangeCount++);
- const SmallVectorImpl<InsnRange> &Ranges = Scope->getRanges();
- // If we have multiple ranges, emit them into the range section.
- if (Ranges.size() > 1) {
- // .debug_range section has not been laid out yet. Emit offset in
- // .debug_range as a uint, size 4, for now. emitDIE will handle
- // DW_AT_ranges appropriately.
- TheCU->addSectionOffset(ScopeDIE, dwarf::DW_AT_ranges,
- DebugRangeSymbols.size() *
- Asm->getDataLayout().getPointerSize());
- for (SmallVectorImpl<InsnRange>::const_iterator RI = Ranges.begin(),
- RE = Ranges.end(); RI != RE; ++RI) {
- DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
- DebugRangeSymbols.push_back(getLabelAfterInsn(RI->second));
- }
+ // Under fission, ranges are specified by constant offsets relative to the
+ // CU's DW_AT_GNU_ranges_base.
+ if (useSplitDwarf())
+ TheCU.addSectionDelta(ScopeDIE, dwarf::DW_AT_ranges, RangeSym,
+ DwarfDebugRangeSectionSym);
+ else
+ addSectionLabel(*Asm, TheCU, ScopeDIE, dwarf::DW_AT_ranges, RangeSym,
+ DwarfDebugRangeSectionSym);
- // Terminate the range list.
- DebugRangeSymbols.push_back(NULL);
- DebugRangeSymbols.push_back(NULL);
- return ScopeDIE;
+ RangeSpanList List(RangeSym);
+ for (const InsnRange &R : Range) {
+ RangeSpan Span(getLabelBeforeInsn(R.first), getLabelAfterInsn(R.second));
+ List.addRange(std::move(Span));
}
- // Construct the address range for this DIE.
- SmallVectorImpl<InsnRange>::const_iterator RI = Ranges.begin();
- MCSymbol *Start = getLabelBeforeInsn(RI->first);
- MCSymbol *End = getLabelAfterInsn(RI->second);
- assert(End && "End label should not be null!");
+ // Add the range list to the set of ranges to be emitted.
+ TheCU.addRangeList(std::move(List));
+}
+
+void DwarfDebug::attachRangesOrLowHighPC(DwarfCompileUnit &TheCU, DIE &Die,
+ const SmallVectorImpl<InsnRange> &Ranges) {
+ assert(!Ranges.empty());
+ if (Ranges.size() == 1)
+ attachLowHighPC(TheCU, Die, getLabelBeforeInsn(Ranges.front().first),
+ getLabelAfterInsn(Ranges.front().second));
+ else
+ addScopeRangeList(TheCU, Die, Ranges);
+}
+
+// Construct new DW_TAG_lexical_block for this scope and attach
+// DW_AT_low_pc/DW_AT_high_pc labels.
+std::unique_ptr<DIE>
+DwarfDebug::constructLexicalScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope) {
+ if (isLexicalScopeDIENull(Scope))
+ return nullptr;
- assert(Start->isDefined() && "Invalid starting label for an inlined scope!");
- assert(End->isDefined() && "Invalid end label for an inlined scope!");
+ auto ScopeDIE = make_unique<DIE>(dwarf::DW_TAG_lexical_block);
+ if (Scope->isAbstractScope())
+ return ScopeDIE;
- TheCU->addLabelAddress(ScopeDIE, dwarf::DW_AT_low_pc, Start);
- TheCU->addLabelAddress(ScopeDIE, dwarf::DW_AT_high_pc, End);
+ attachRangesOrLowHighPC(TheCU, *ScopeDIE, Scope->getRanges());
return ScopeDIE;
}
// This scope represents inlined body of a function. Construct DIE to
// represent this concrete inlined copy of the function.
-DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
- LexicalScope *Scope) {
- const SmallVectorImpl<InsnRange> &Ranges = Scope->getRanges();
- assert(Ranges.empty() == false &&
- "LexicalScope does not have instruction markers!");
-
- if (!Scope->getScopeNode())
- return NULL;
+std::unique_ptr<DIE>
+DwarfDebug::constructInlinedScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope) {
+ assert(Scope->getScopeNode());
DIScope DS(Scope->getScopeNode());
DISubprogram InlinedSP = getDISubprogram(DS);
- DIE *OriginDIE = TheCU->getDIE(InlinedSP);
- if (!OriginDIE) {
- DEBUG(dbgs() << "Unable to find original DIE for an inlined subprogram.");
- return NULL;
- }
-
- DIE *ScopeDIE = new DIE(dwarf::DW_TAG_inlined_subroutine);
- TheCU->addDIEEntry(ScopeDIE, dwarf::DW_AT_abstract_origin, OriginDIE);
-
- if (Ranges.size() > 1) {
- // .debug_range section has not been laid out yet. Emit offset in
- // .debug_range as a uint, size 4, for now. emitDIE will handle
- // DW_AT_ranges appropriately.
- TheCU->addSectionOffset(ScopeDIE, dwarf::DW_AT_ranges,
- DebugRangeSymbols.size() *
- Asm->getDataLayout().getPointerSize());
- for (SmallVectorImpl<InsnRange>::const_iterator RI = Ranges.begin(),
- RE = Ranges.end(); RI != RE; ++RI) {
- DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
- DebugRangeSymbols.push_back(getLabelAfterInsn(RI->second));
- }
- DebugRangeSymbols.push_back(NULL);
- DebugRangeSymbols.push_back(NULL);
- } else {
- SmallVectorImpl<InsnRange>::const_iterator RI = Ranges.begin();
- MCSymbol *StartLabel = getLabelBeforeInsn(RI->first);
- MCSymbol *EndLabel = getLabelAfterInsn(RI->second);
+ // Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram
+ // was inlined from another compile unit.
+ DIE *OriginDIE = AbstractSPDies[InlinedSP];
+ assert(OriginDIE && "Unable to find original DIE for an inlined subprogram.");
- if (StartLabel == 0 || EndLabel == 0)
- llvm_unreachable("Unexpected Start and End labels for an inlined scope!");
+ auto ScopeDIE = make_unique<DIE>(dwarf::DW_TAG_inlined_subroutine);
+ TheCU.addDIEEntry(*ScopeDIE, dwarf::DW_AT_abstract_origin, *OriginDIE);
- assert(StartLabel->isDefined() &&
- "Invalid starting label for an inlined scope!");
- assert(EndLabel->isDefined() && "Invalid end label for an inlined scope!");
-
- TheCU->addLabelAddress(ScopeDIE, dwarf::DW_AT_low_pc, StartLabel);
- TheCU->addLabelAddress(ScopeDIE, dwarf::DW_AT_high_pc, EndLabel);
- }
+ attachRangesOrLowHighPC(TheCU, *ScopeDIE, Scope->getRanges());
InlinedSubprogramDIEs.insert(OriginDIE);
// Add the call site information to the DIE.
DILocation DL(Scope->getInlinedAt());
- TheCU->addUInt(ScopeDIE, dwarf::DW_AT_call_file, None,
- getOrCreateSourceID(DL.getFilename(), DL.getDirectory(),
- TheCU->getUniqueID()));
- TheCU->addUInt(ScopeDIE, dwarf::DW_AT_call_line, None, DL.getLineNumber());
+ TheCU.addUInt(*ScopeDIE, dwarf::DW_AT_call_file, None,
+ TheCU.getOrCreateSourceID(DL.getFilename(), DL.getDirectory()));
+ TheCU.addUInt(*ScopeDIE, dwarf::DW_AT_call_line, None, DL.getLineNumber());
// Add name to the name table, we do this here because we're guaranteed
// to have concrete versions of our DW_TAG_inlined_subprogram nodes.
- addSubprogramNames(TheCU, InlinedSP, ScopeDIE);
+ addSubprogramNames(InlinedSP, *ScopeDIE);
return ScopeDIE;
}
-DIE *DwarfDebug::createScopeChildrenDIE(CompileUnit *TheCU, LexicalScope *Scope,
- SmallVectorImpl<DIE*> &Children) {
- DIE *ObjectPointer = NULL;
+static std::unique_ptr<DIE> constructVariableDIE(DwarfCompileUnit &TheCU,
+ DbgVariable &DV,
+ const LexicalScope &Scope,
+ DIE *&ObjectPointer) {
+ auto Var = TheCU.constructVariableDIE(DV, Scope.isAbstractScope());
+ if (DV.isObjectPointer())
+ ObjectPointer = Var.get();
+ return Var;
+}
+
+DIE *DwarfDebug::createScopeChildrenDIE(
+ DwarfCompileUnit &TheCU, LexicalScope *Scope,
+ SmallVectorImpl<std::unique_ptr<DIE>> &Children) {
+ DIE *ObjectPointer = nullptr;
// Collect arguments for current function.
if (LScopes.isCurrentFunctionScope(Scope)) {
- for (unsigned i = 0, N = CurrentFnArguments.size(); i < N; ++i)
- if (DbgVariable *ArgDV = CurrentFnArguments[i])
- if (DIE *Arg =
- TheCU->constructVariableDIE(*ArgDV, Scope->isAbstractScope())) {
- Children.push_back(Arg);
- if (ArgDV->isObjectPointer()) ObjectPointer = Arg;
- }
+ for (DbgVariable *ArgDV : CurrentFnArguments)
+ if (ArgDV)
+ Children.push_back(
+ constructVariableDIE(TheCU, *ArgDV, *Scope, ObjectPointer));
- // Create the unspecified parameter that marks a function as variadic.
+ // If this is a variadic function, add an unspecified parameter.
DISubprogram SP(Scope->getScopeNode());
- assert(SP.Verify());
DIArray FnArgs = SP.getType().getTypeArray();
- if (FnArgs.getElement(FnArgs.getNumElements()-1).isUnspecifiedParameter()) {
- DIE *Ellipsis = new DIE(dwarf::DW_TAG_unspecified_parameters);
- Children.push_back(Ellipsis);
+ if (FnArgs.getElement(FnArgs.getNumElements() - 1)
+ .isUnspecifiedParameter()) {
+ Children.push_back(
+ make_unique<DIE>(dwarf::DW_TAG_unspecified_parameters));
}
}
// Collect lexical scope children first.
- const SmallVectorImpl<DbgVariable *> &Variables =ScopeVariables.lookup(Scope);
- for (unsigned i = 0, N = Variables.size(); i < N; ++i)
- if (DIE *Variable =
- TheCU->constructVariableDIE(*Variables[i], Scope->isAbstractScope())) {
- Children.push_back(Variable);
- if (Variables[i]->isObjectPointer()) ObjectPointer = Variable;
- }
- const SmallVectorImpl<LexicalScope *> &Scopes = Scope->getChildren();
- for (unsigned j = 0, M = Scopes.size(); j < M; ++j)
- if (DIE *Nested = constructScopeDIE(TheCU, Scopes[j]))
- Children.push_back(Nested);
+ for (DbgVariable *DV : ScopeVariables.lookup(Scope))
+ Children.push_back(constructVariableDIE(TheCU, *DV, *Scope, ObjectPointer));
+
+ for (LexicalScope *LS : Scope->getChildren())
+ if (std::unique_ptr<DIE> Nested = constructScopeDIE(TheCU, LS))
+ Children.push_back(std::move(Nested));
return ObjectPointer;
}
+void DwarfDebug::createAndAddScopeChildren(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope, DIE &ScopeDIE) {
+ // We create children when the scope DIE is not null.
+ SmallVector<std::unique_ptr<DIE>, 8> Children;
+ if (DIE *ObjectPointer = createScopeChildrenDIE(TheCU, Scope, Children))
+ TheCU.addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer, *ObjectPointer);
+
+ // Add children
+ for (auto &I : Children)
+ ScopeDIE.addChild(std::move(I));
+}
+
+void DwarfDebug::constructAbstractSubprogramScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope) {
+ assert(Scope && Scope->getScopeNode());
+ assert(Scope->isAbstractScope());
+ assert(!Scope->getInlinedAt());
+
+ DISubprogram SP(Scope->getScopeNode());
+
+ ProcessedSPNodes.insert(SP);
+
+ DIE *&AbsDef = AbstractSPDies[SP];
+ if (AbsDef)
+ return;
+
+ // Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram
+ // was inlined from another compile unit.
+ DwarfCompileUnit &SPCU = *SPMap[SP];
+ DIE *ContextDIE;
+
+ // Some of this is duplicated from DwarfUnit::getOrCreateSubprogramDIE, with
+ // the important distinction that the DIDescriptor is not associated with the
+ // DIE (since the DIDescriptor will be associated with the concrete DIE, if
+ // any). It could be refactored to some common utility function.
+ if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
+ ContextDIE = &SPCU.getUnitDie();
+ SPCU.getOrCreateSubprogramDIE(SPDecl);
+ } else
+ ContextDIE = SPCU.getOrCreateContextDIE(resolve(SP.getContext()));
+
+ // Passing null as the associated DIDescriptor because the abstract definition
+ // shouldn't be found by lookup.
+ AbsDef = &SPCU.createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE,
+ DIDescriptor());
+ SPCU.applySubprogramAttributesToDefinition(SP, *AbsDef);
+
+ SPCU.addUInt(*AbsDef, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
+ createAndAddScopeChildren(SPCU, Scope, *AbsDef);
+}
+
+DIE &DwarfDebug::constructSubprogramScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope) {
+ assert(Scope && Scope->getScopeNode());
+ assert(!Scope->getInlinedAt());
+ assert(!Scope->isAbstractScope());
+ DISubprogram Sub(Scope->getScopeNode());
+
+ assert(Sub.isSubprogram());
+
+ ProcessedSPNodes.insert(Sub);
+
+ DIE &ScopeDIE = updateSubprogramScopeDIE(TheCU, Sub);
+
+ createAndAddScopeChildren(TheCU, Scope, ScopeDIE);
+
+ return ScopeDIE;
+}
+
// Construct a DIE for this scope.
-DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) {
+std::unique_ptr<DIE> DwarfDebug::constructScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope) {
if (!Scope || !Scope->getScopeNode())
- return NULL;
+ return nullptr;
DIScope DS(Scope->getScopeNode());
- SmallVector<DIE *, 8> Children;
- DIE *ObjectPointer = NULL;
- bool ChildrenCreated = false;
+ assert((Scope->getInlinedAt() || !DS.isSubprogram()) &&
+ "Only handle inlined subprograms here, use "
+ "constructSubprogramScopeDIE for non-inlined "
+ "subprograms");
+
+ SmallVector<std::unique_ptr<DIE>, 8> Children;
// We try to create the scope DIE first, then the children DIEs. This will
// avoid creating un-used children then removing them later when we find out
// the scope DIE is null.
- DIE *ScopeDIE = NULL;
- if (Scope->getInlinedAt())
+ std::unique_ptr<DIE> ScopeDIE;
+ if (Scope->getParent() && DS.isSubprogram()) {
ScopeDIE = constructInlinedScopeDIE(TheCU, Scope);
- else if (DS.isSubprogram()) {
- ProcessedSPNodes.insert(DS);
- if (Scope->isAbstractScope()) {
- ScopeDIE = TheCU->getDIE(DS);
- // Note down abstract DIE.
- if (ScopeDIE)
- AbstractSPDies.insert(std::make_pair(DS, ScopeDIE));
- } else
- ScopeDIE = updateSubprogramScopeDIE(TheCU, DISubprogram(DS));
+ if (!ScopeDIE)
+ return nullptr;
+ // We create children when the scope DIE is not null.
+ createScopeChildrenDIE(TheCU, Scope, Children);
} else {
// Early exit when we know the scope DIE is going to be null.
if (isLexicalScopeDIENull(Scope))
- return NULL;
+ return nullptr;
// We create children here when we know the scope DIE is not going to be
// null and the children will be added to the scope DIE.
- ObjectPointer = createScopeChildrenDIE(TheCU, Scope, Children);
- ChildrenCreated = true;
+ createScopeChildrenDIE(TheCU, Scope, Children);
// There is no need to emit empty lexical block DIE.
std::pair<ImportedEntityMap::const_iterator,
- ImportedEntityMap::const_iterator> Range = std::equal_range(
- ScopesWithImportedEntities.begin(), ScopesWithImportedEntities.end(),
- std::pair<const MDNode *, const MDNode *>(DS, (const MDNode*)0),
- less_first());
+ ImportedEntityMap::const_iterator> Range =
+ std::equal_range(ScopesWithImportedEntities.begin(),
+ ScopesWithImportedEntities.end(),
+ std::pair<const MDNode *, const MDNode *>(DS, nullptr),
+ less_first());
if (Children.empty() && Range.first == Range.second)
- return NULL;
+ return nullptr;
ScopeDIE = constructLexicalScopeDIE(TheCU, Scope);
assert(ScopeDIE && "Scope DIE should not be null.");
for (ImportedEntityMap::const_iterator i = Range.first; i != Range.second;
++i)
- constructImportedEntityDIE(TheCU, i->second, ScopeDIE);
- }
-
- if (!ScopeDIE) {
- assert(Children.empty() &&
- "We create children only when the scope DIE is not null.");
- return NULL;
+ constructImportedEntityDIE(TheCU, i->second, *ScopeDIE);
}
- if (!ChildrenCreated)
- // We create children when the scope DIE is not null.
- ObjectPointer = createScopeChildrenDIE(TheCU, Scope, Children);
// Add children
- for (SmallVectorImpl<DIE *>::iterator I = Children.begin(),
- E = Children.end(); I != E; ++I)
- ScopeDIE->addChild(*I);
-
- if (DS.isSubprogram() && ObjectPointer != NULL)
- TheCU->addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer, ObjectPointer);
-
- if (DS.isSubprogram())
- TheCU->addPubTypes(DISubprogram(DS));
+ for (auto &I : Children)
+ ScopeDIE->addChild(std::move(I));
return ScopeDIE;
}
-// Look up the source id with the given directory and source file names.
-// If none currently exists, create a new id and insert it in the
-// SourceIds map. This can update DirectoryNames and SourceFileNames maps
-// as well.
-unsigned DwarfDebug::getOrCreateSourceID(StringRef FileName,
- StringRef DirName, unsigned CUID) {
- // If we use .loc in assembly, we can't separate .file entries according to
- // compile units. Thus all files will belong to the default compile unit.
-
- // FIXME: add a better feature test than hasRawTextSupport. Even better,
- // extend .file to support this.
- if (Asm->TM.hasMCUseLoc() && Asm->OutStreamer.hasRawTextSupport())
- CUID = 0;
-
- // If FE did not provide a file name, then assume stdin.
- if (FileName.empty())
- return getOrCreateSourceID("<stdin>", StringRef(), CUID);
-
- // TODO: this might not belong here. See if we can factor this better.
- if (DirName == CompilationDir)
- DirName = "";
-
- // FileIDCUMap stores the current ID for the given compile unit.
- unsigned SrcId = FileIDCUMap[CUID] + 1;
-
- // We look up the CUID/file/dir by concatenating them with a zero byte.
- SmallString<128> NamePair;
- NamePair += utostr(CUID);
- NamePair += '\0';
- NamePair += DirName;
- NamePair += '\0'; // Zero bytes are not allowed in paths.
- NamePair += FileName;
-
- StringMapEntry<unsigned> &Ent = SourceIdMap.GetOrCreateValue(NamePair, SrcId);
- if (Ent.getValue() != SrcId)
- return Ent.getValue();
-
- FileIDCUMap[CUID] = SrcId;
- // Print out a .file directive to specify files for .loc directives.
- Asm->OutStreamer.EmitDwarfFileDirective(SrcId, DirName, FileName, CUID);
+void DwarfDebug::addGnuPubAttributes(DwarfUnit &U, DIE &D) const {
+ if (!GenerateGnuPubSections)
+ return;
- return SrcId;
+ U.addFlag(D, dwarf::DW_AT_GNU_pubnames);
}
-// Create new CompileUnit for the given metadata node with tag
+// Create new DwarfCompileUnit for the given metadata node with tag
// DW_TAG_compile_unit.
-CompileUnit *DwarfDebug::constructCompileUnit(DICompileUnit DIUnit) {
+DwarfCompileUnit &DwarfDebug::constructDwarfCompileUnit(DICompileUnit DIUnit) {
StringRef FN = DIUnit.getFilename();
CompilationDir = DIUnit.getDirectory();
- DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
- CompileUnit *NewCU = new CompileUnit(GlobalCUIndexCount++, Die, DIUnit, Asm,
- this, &InfoHolder);
-
- FileIDCUMap[NewCU->getUniqueID()] = 0;
- // Call this to emit a .file directive if it wasn't emitted for the source
- // file this CU comes from yet.
- getOrCreateSourceID(FN, CompilationDir, NewCU->getUniqueID());
-
- NewCU->addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer());
- NewCU->addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
- DIUnit.getLanguage());
- NewCU->addString(Die, dwarf::DW_AT_name, FN);
-
- // 2.17.1 requires that we use DW_AT_low_pc for a single entry point
- // into an entity. We're using 0 (or a NULL label) for this. For
- // split dwarf it's in the skeleton CU so omit it here.
- if (!useSplitDwarf())
- NewCU->addLabelAddress(Die, dwarf::DW_AT_low_pc, NULL);
-
- // Define start line table label for each Compile Unit.
- MCSymbol *LineTableStartSym = Asm->GetTempSymbol("line_table_start",
- NewCU->getUniqueID());
- Asm->OutStreamer.getContext().setMCLineTableSymbol(LineTableStartSym,
- NewCU->getUniqueID());
-
- // Use a single line table if we are using .loc and generating assembly.
- bool UseTheFirstCU =
- (Asm->TM.hasMCUseLoc() && Asm->OutStreamer.hasRawTextSupport()) ||
- (NewCU->getUniqueID() == 0);
+ auto OwnedUnit = make_unique<DwarfCompileUnit>(
+ InfoHolder.getUnits().size(), DIUnit, Asm, this, &InfoHolder);
+ DwarfCompileUnit &NewCU = *OwnedUnit;
+ DIE &Die = NewCU.getUnitDie();
+ InfoHolder.addUnit(std::move(OwnedUnit));
+
+ // LTO with assembly output shares a single line table amongst multiple CUs.
+ // To avoid the compilation directory being ambiguous, let the line table
+ // explicitly describe the directory of all files, never relying on the
+ // compilation directory.
+ if (!Asm->OutStreamer.hasRawTextSupport() || SingleCU)
+ Asm->OutStreamer.getContext().setMCLineTableCompilationDir(
+ NewCU.getUniqueID(), CompilationDir);
+
+ NewCU.addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer());
+ NewCU.addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
+ DIUnit.getLanguage());
+ NewCU.addString(Die, dwarf::DW_AT_name, FN);
if (!useSplitDwarf()) {
- // DW_AT_stmt_list is a offset of line number information for this
- // compile unit in debug_line section. For split dwarf this is
- // left in the skeleton CU and so not included.
- // The line table entries are not always emitted in assembly, so it
- // is not okay to use line_table_start here.
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(
- Die, dwarf::DW_AT_stmt_list,
- UseTheFirstCU ? Asm->GetTempSymbol("section_line")
- : LineTableStartSym);
- else if (UseTheFirstCU)
- NewCU->addSectionOffset(Die, dwarf::DW_AT_stmt_list, 0);
- else
- NewCU->addSectionDelta(Die, dwarf::DW_AT_stmt_list,
- LineTableStartSym, DwarfLineSectionSym);
+ NewCU.initStmtList(DwarfLineSectionSym);
// If we're using split dwarf the compilation dir is going to be in the
// skeleton CU and so we don't need to duplicate it here.
if (!CompilationDir.empty())
- NewCU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
-
- // Flags to let the linker know we have emitted new style pubnames. Only
- // emit it here if we don't have a skeleton CU for split dwarf.
- if (GenerateGnuPubSections) {
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(
- Die, dwarf::DW_AT_GNU_pubnames,
- Asm->GetTempSymbol("gnu_pubnames", NewCU->getUniqueID()));
- else
- NewCU->addSectionDelta(
- Die, dwarf::DW_AT_GNU_pubnames,
- Asm->GetTempSymbol("gnu_pubnames", NewCU->getUniqueID()),
- DwarfGnuPubNamesSectionSym);
-
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(
- Die, dwarf::DW_AT_GNU_pubtypes,
- Asm->GetTempSymbol("gnu_pubtypes", NewCU->getUniqueID()));
- else
- NewCU->addSectionDelta(
- Die, dwarf::DW_AT_GNU_pubtypes,
- Asm->GetTempSymbol("gnu_pubtypes", NewCU->getUniqueID()),
- DwarfGnuPubTypesSectionSym);
- }
+ NewCU.addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
+
+ addGnuPubAttributes(NewCU, Die);
}
if (DIUnit.isOptimized())
- NewCU->addFlag(Die, dwarf::DW_AT_APPLE_optimized);
+ NewCU.addFlag(Die, dwarf::DW_AT_APPLE_optimized);
StringRef Flags = DIUnit.getFlags();
if (!Flags.empty())
- NewCU->addString(Die, dwarf::DW_AT_APPLE_flags, Flags);
+ NewCU.addString(Die, dwarf::DW_AT_APPLE_flags, Flags);
if (unsigned RVer = DIUnit.getRunTimeVersion())
- NewCU->addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers,
- dwarf::DW_FORM_data1, RVer);
+ NewCU.addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers,
+ dwarf::DW_FORM_data1, RVer);
if (!FirstCU)
- FirstCU = NewCU;
-
- InfoHolder.addUnit(NewCU);
+ FirstCU = &NewCU;
- CUMap.insert(std::make_pair(DIUnit, NewCU));
- CUDieMap.insert(std::make_pair(Die, NewCU));
+ if (useSplitDwarf()) {
+ NewCU.initSection(Asm->getObjFileLowering().getDwarfInfoDWOSection(),
+ DwarfInfoDWOSectionSym);
+ NewCU.setSkeleton(constructSkeletonCU(NewCU));
+ } else
+ NewCU.initSection(Asm->getObjFileLowering().getDwarfInfoSection(),
+ DwarfInfoSectionSym);
+
+ CUMap.insert(std::make_pair(DIUnit, &NewCU));
+ CUDieMap.insert(std::make_pair(&Die, &NewCU));
return NewCU;
}
-// Construct subprogram DIE.
-void DwarfDebug::constructSubprogramDIE(CompileUnit *TheCU, const MDNode *N) {
- // FIXME: We should only call this routine once, however, during LTO if a
- // program is defined in multiple CUs we could end up calling it out of
- // beginModule as we walk the CUs.
-
- CompileUnit *&CURef = SPMap[N];
- if (CURef)
- return;
- CURef = TheCU;
-
- DISubprogram SP(N);
- if (!SP.isDefinition())
- // This is a method declaration which will be handled while constructing
- // class type.
- return;
-
- DIE *SubprogramDie = TheCU->getOrCreateSubprogramDIE(SP);
-
- // Expose as a global name.
- TheCU->addGlobalName(SP.getName(), SubprogramDie, resolve(SP.getContext()));
-}
-
-void DwarfDebug::constructImportedEntityDIE(CompileUnit *TheCU,
+void DwarfDebug::constructImportedEntityDIE(DwarfCompileUnit &TheCU,
const MDNode *N) {
DIImportedEntity Module(N);
- if (!Module.Verify())
- return;
- if (DIE *D = TheCU->getOrCreateContextDIE(Module.getContext()))
- constructImportedEntityDIE(TheCU, Module, D);
+ assert(Module.Verify());
+ if (DIE *D = TheCU.getOrCreateContextDIE(Module.getContext()))
+ constructImportedEntityDIE(TheCU, Module, *D);
}
-void DwarfDebug::constructImportedEntityDIE(CompileUnit *TheCU, const MDNode *N,
- DIE *Context) {
+void DwarfDebug::constructImportedEntityDIE(DwarfCompileUnit &TheCU,
+ const MDNode *N, DIE &Context) {
DIImportedEntity Module(N);
- if (!Module.Verify())
- return;
+ assert(Module.Verify());
return constructImportedEntityDIE(TheCU, Module, Context);
}
-void DwarfDebug::constructImportedEntityDIE(CompileUnit *TheCU,
+void DwarfDebug::constructImportedEntityDIE(DwarfCompileUnit &TheCU,
const DIImportedEntity &Module,
- DIE *Context) {
+ DIE &Context) {
assert(Module.Verify() &&
"Use one of the MDNode * overloads to handle invalid metadata");
- assert(Context && "Should always have a context for an imported_module");
- DIE *IMDie = new DIE(Module.getTag());
- TheCU->insertDIE(Module, IMDie);
+ DIE &IMDie = TheCU.createAndAddDIE(Module.getTag(), Context, Module);
DIE *EntityDie;
- DIDescriptor Entity = Module.getEntity();
+ DIDescriptor Entity = resolve(Module.getEntity());
if (Entity.isNameSpace())
- EntityDie = TheCU->getOrCreateNameSpace(DINameSpace(Entity));
+ EntityDie = TheCU.getOrCreateNameSpace(DINameSpace(Entity));
else if (Entity.isSubprogram())
- EntityDie = TheCU->getOrCreateSubprogramDIE(DISubprogram(Entity));
+ EntityDie = TheCU.getOrCreateSubprogramDIE(DISubprogram(Entity));
else if (Entity.isType())
- EntityDie = TheCU->getOrCreateTypeDIE(DIType(Entity));
+ EntityDie = TheCU.getOrCreateTypeDIE(DIType(Entity));
else
- EntityDie = TheCU->getDIE(Entity);
- unsigned FileID = getOrCreateSourceID(Module.getContext().getFilename(),
- Module.getContext().getDirectory(),
- TheCU->getUniqueID());
- TheCU->addUInt(IMDie, dwarf::DW_AT_decl_file, None, FileID);
- TheCU->addUInt(IMDie, dwarf::DW_AT_decl_line, None, Module.getLineNumber());
- TheCU->addDIEEntry(IMDie, dwarf::DW_AT_import, EntityDie);
+ EntityDie = TheCU.getDIE(Entity);
+ TheCU.addSourceLine(IMDie, Module.getLineNumber(),
+ Module.getContext().getFilename(),
+ Module.getContext().getDirectory());
+ TheCU.addDIEEntry(IMDie, dwarf::DW_AT_import, *EntityDie);
StringRef Name = Module.getName();
if (!Name.empty())
- TheCU->addString(IMDie, dwarf::DW_AT_name, Name);
- Context->addChild(IMDie);
+ TheCU.addString(IMDie, dwarf::DW_AT_name, Name);
}
// Emit all Dwarf sections that should come prior to the content. Create
@@ -931,9 +741,11 @@ void DwarfDebug::beginModule() {
// Emit initial sections so we can reference labels later.
emitSectionLabels();
- for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
- DICompileUnit CUNode(CU_Nodes->getOperand(i));
- CompileUnit *CU = constructCompileUnit(CUNode);
+ SingleCU = CU_Nodes->getNumOperands() == 1;
+
+ for (MDNode *N : CU_Nodes->operands()) {
+ DICompileUnit CUNode(N);
+ DwarfCompileUnit &CU = constructDwarfCompileUnit(CUNode);
DIArray ImportedEntities = CUNode.getImportedEntities();
for (unsigned i = 0, e = ImportedEntities.getNumElements(); i != e; ++i)
ScopesWithImportedEntities.push_back(std::make_pair(
@@ -943,16 +755,21 @@ void DwarfDebug::beginModule() {
ScopesWithImportedEntities.end(), less_first());
DIArray GVs = CUNode.getGlobalVariables();
for (unsigned i = 0, e = GVs.getNumElements(); i != e; ++i)
- CU->createGlobalVariableDIE(DIGlobalVariable(GVs.getElement(i)));
+ CU.createGlobalVariableDIE(DIGlobalVariable(GVs.getElement(i)));
DIArray SPs = CUNode.getSubprograms();
for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i)
- constructSubprogramDIE(CU, SPs.getElement(i));
+ SPMap.insert(std::make_pair(SPs.getElement(i), &CU));
DIArray EnumTypes = CUNode.getEnumTypes();
for (unsigned i = 0, e = EnumTypes.getNumElements(); i != e; ++i)
- CU->getOrCreateTypeDIE(EnumTypes.getElement(i));
+ CU.getOrCreateTypeDIE(EnumTypes.getElement(i));
DIArray RetainedTypes = CUNode.getRetainedTypes();
- for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i)
- CU->getOrCreateTypeDIE(RetainedTypes.getElement(i));
+ for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i) {
+ DIType Ty(RetainedTypes.getElement(i));
+ // The retained types array by design contains pointers to
+ // MDNodes rather than DIRefs. Unique them here.
+ DIType UniqueTy(resolve(Ty.getRef()));
+ CU.getOrCreateTypeDIE(UniqueTy);
+ }
// Emit imported_modules last so that the relevant context is already
// available.
for (unsigned i = 0, e = ImportedEntities.getNumElements(); i != e; ++i)
@@ -966,147 +783,171 @@ void DwarfDebug::beginModule() {
SectionMap[Asm->getObjFileLowering().getTextSection()];
}
-// Attach DW_AT_inline attribute with inlined subprogram DIEs.
-void DwarfDebug::computeInlinedDIEs() {
- // Attach DW_AT_inline attribute with inlined subprogram DIEs.
- for (SmallPtrSet<DIE *, 4>::iterator AI = InlinedSubprogramDIEs.begin(),
- AE = InlinedSubprogramDIEs.end(); AI != AE; ++AI) {
- DIE *ISP = *AI;
- FirstCU->addUInt(ISP, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
- }
- for (DenseMap<const MDNode *, DIE *>::iterator AI = AbstractSPDies.begin(),
- AE = AbstractSPDies.end(); AI != AE; ++AI) {
- DIE *ISP = AI->second;
- if (InlinedSubprogramDIEs.count(ISP))
+void DwarfDebug::finishVariableDefinitions() {
+ for (const auto &Var : ConcreteVariables) {
+ DIE *VariableDie = Var->getDIE();
+ // FIXME: There shouldn't be any variables without DIEs.
+ if (!VariableDie)
continue;
- FirstCU->addUInt(ISP, dwarf::DW_AT_inline, None, dwarf::DW_INL_inlined);
+ // FIXME: Consider the time-space tradeoff of just storing the unit pointer
+ // in the ConcreteVariables list, rather than looking it up again here.
+ // DIE::getUnit isn't simple - it walks parent pointers, etc.
+ DwarfCompileUnit *Unit = lookupUnit(VariableDie->getUnit());
+ assert(Unit);
+ DbgVariable *AbsVar = getExistingAbstractVariable(Var->getVariable());
+ if (AbsVar && AbsVar->getDIE()) {
+ Unit->addDIEEntry(*VariableDie, dwarf::DW_AT_abstract_origin,
+ *AbsVar->getDIE());
+ } else
+ Unit->applyVariableAttributes(*Var, *VariableDie);
}
}
+void DwarfDebug::finishSubprogramDefinitions() {
+ const Module *M = MMI->getModule();
+
+ NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
+ for (MDNode *N : CU_Nodes->operands()) {
+ DICompileUnit TheCU(N);
+ // Construct subprogram DIE and add variables DIEs.
+ DwarfCompileUnit *SPCU =
+ static_cast<DwarfCompileUnit *>(CUMap.lookup(TheCU));
+ DIArray Subprograms = TheCU.getSubprograms();
+ for (unsigned i = 0, e = Subprograms.getNumElements(); i != e; ++i) {
+ DISubprogram SP(Subprograms.getElement(i));
+ // Perhaps the subprogram is in another CU (such as due to comdat
+ // folding, etc), in which case ignore it here.
+ if (SPMap[SP] != SPCU)
+ continue;
+ DIE *D = SPCU->getDIE(SP);
+ if (DIE *AbsSPDIE = AbstractSPDies.lookup(SP)) {
+ if (D)
+ // If this subprogram has an abstract definition, reference that
+ SPCU->addDIEEntry(*D, dwarf::DW_AT_abstract_origin, *AbsSPDIE);
+ } else {
+ if (!D)
+ // Lazily construct the subprogram if we didn't see either concrete or
+ // inlined versions during codegen.
+ D = SPCU->getOrCreateSubprogramDIE(SP);
+ // And attach the attributes
+ SPCU->applySubprogramAttributesToDefinition(SP, *D);
+ }
+ }
+ }
+}
+
+
// Collect info for variables that were optimized out.
void DwarfDebug::collectDeadVariables() {
const Module *M = MMI->getModule();
if (NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu")) {
- for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
- DICompileUnit TheCU(CU_Nodes->getOperand(i));
+ for (MDNode *N : CU_Nodes->operands()) {
+ DICompileUnit TheCU(N);
+ // Construct subprogram DIE and add variables DIEs.
+ DwarfCompileUnit *SPCU =
+ static_cast<DwarfCompileUnit *>(CUMap.lookup(TheCU));
+ assert(SPCU && "Unable to find Compile Unit!");
DIArray Subprograms = TheCU.getSubprograms();
for (unsigned i = 0, e = Subprograms.getNumElements(); i != e; ++i) {
DISubprogram SP(Subprograms.getElement(i));
if (ProcessedSPNodes.count(SP) != 0)
continue;
- if (!SP.isSubprogram())
- continue;
- if (!SP.isDefinition())
- continue;
+ assert(SP.isSubprogram() &&
+ "CU's subprogram list contains a non-subprogram");
+ assert(SP.isDefinition() &&
+ "CU's subprogram list contains a subprogram declaration");
DIArray Variables = SP.getVariables();
if (Variables.getNumElements() == 0)
continue;
- // Construct subprogram DIE and add variables DIEs.
- CompileUnit *SPCU = CUMap.lookup(TheCU);
- assert(SPCU && "Unable to find Compile Unit!");
- // FIXME: See the comment in constructSubprogramDIE about duplicate
- // subprogram DIEs.
- constructSubprogramDIE(SPCU, SP);
- DIE *SPDIE = SPCU->getDIE(SP);
+ DIE *SPDIE = AbstractSPDies.lookup(SP);
+ if (!SPDIE)
+ SPDIE = SPCU->getDIE(SP);
+ assert(SPDIE);
for (unsigned vi = 0, ve = Variables.getNumElements(); vi != ve; ++vi) {
DIVariable DV(Variables.getElement(vi));
- if (!DV.isVariable())
- continue;
- DbgVariable NewVar(DV, NULL, this);
- if (DIE *VariableDIE =
- SPCU->constructVariableDIE(NewVar, false))
- SPDIE->addChild(VariableDIE);
+ assert(DV.isVariable());
+ DbgVariable NewVar(DV, this);
+ auto VariableDie = SPCU->constructVariableDIE(NewVar);
+ SPCU->applyVariableAttributes(NewVar, *VariableDie);
+ SPDIE->addChild(std::move(VariableDie));
}
}
}
}
}
-// Type Signature [7.27] and ODR Hash code.
-
-/// \brief Grabs the string in whichever attribute is passed in and returns
-/// a reference to it. Returns "" if the attribute doesn't exist.
-static StringRef getDIEStringAttr(DIE *Die, unsigned Attr) {
- DIEValue *V = Die->findAttribute(Attr);
-
- if (DIEString *S = dyn_cast_or_null<DIEString>(V))
- return S->getString();
-
- return StringRef("");
-}
-
-/// Return true if the current DIE is contained within an anonymous namespace.
-static bool isContainedInAnonNamespace(DIE *Die) {
- DIE *Parent = Die->getParent();
-
- while (Parent) {
- if (Parent->getTag() == dwarf::DW_TAG_namespace &&
- getDIEStringAttr(Parent, dwarf::DW_AT_name) == "")
- return true;
- Parent = Parent->getParent();
- }
-
- return false;
-}
+void DwarfDebug::finalizeModuleInfo() {
+ finishSubprogramDefinitions();
-/// Test if the current CU language is C++ and that we have
-/// a named type that is not contained in an anonymous namespace.
-static bool shouldAddODRHash(CompileUnit *CU, DIE *Die) {
- return CU->getLanguage() == dwarf::DW_LANG_C_plus_plus &&
- getDIEStringAttr(Die, dwarf::DW_AT_name) != "" &&
- !isContainedInAnonNamespace(Die);
-}
+ finishVariableDefinitions();
-void DwarfDebug::finalizeModuleInfo() {
// Collect info for variables that were optimized out.
collectDeadVariables();
- // Attach DW_AT_inline attribute with inlined subprogram DIEs.
- computeInlinedDIEs();
-
- // Split out type units and conditionally add an ODR tag to the split
- // out type.
- // FIXME: Do type splitting.
- for (unsigned i = 0, e = TypeUnits.size(); i != e; ++i) {
- DIE *Die = TypeUnits[i];
- DIEHash Hash;
- // If we've requested ODR hashes and it's applicable for an ODR hash then
- // add the ODR signature now.
- // FIXME: This should be added onto the type unit, not the type, but this
- // works as an intermediate stage.
- if (GenerateODRHash && shouldAddODRHash(CUMap.begin()->second, Die))
- CUMap.begin()->second->addUInt(Die, dwarf::DW_AT_GNU_odr_signature,
- dwarf::DW_FORM_data8,
- Hash.computeDIEODRSignature(*Die));
- }
-
- // Handle anything that needs to be done on a per-cu basis.
- for (DenseMap<const MDNode *, CompileUnit *>::iterator CUI = CUMap.begin(),
- CUE = CUMap.end();
- CUI != CUE; ++CUI) {
- CompileUnit *TheCU = CUI->second;
+ // Handle anything that needs to be done on a per-unit basis after
+ // all other generation.
+ for (const auto &TheU : getUnits()) {
// Emit DW_AT_containing_type attribute to connect types with their
// vtable holding type.
- TheCU->constructContainingTypeDIEs();
-
- // If we're splitting the dwarf out now that we've got the entire
- // CU then construct a skeleton CU based upon it.
- if (useSplitDwarf()) {
- uint64_t ID = 0;
- if (GenerateCUHash) {
- DIEHash CUHash;
- ID = CUHash.computeCUSignature(*TheCU->getCUDie());
+ TheU->constructContainingTypeDIEs();
+
+ // Add CU specific attributes if we need to add any.
+ if (TheU->getUnitDie().getTag() == dwarf::DW_TAG_compile_unit) {
+ // If we're splitting the dwarf out now that we've got the entire
+ // CU then add the dwo id to it.
+ DwarfCompileUnit *SkCU =
+ static_cast<DwarfCompileUnit *>(TheU->getSkeleton());
+ if (useSplitDwarf()) {
+ // Emit a unique identifier for this CU.
+ uint64_t ID = DIEHash(Asm).computeCUSignature(TheU->getUnitDie());
+ TheU->addUInt(TheU->getUnitDie(), dwarf::DW_AT_GNU_dwo_id,
+ dwarf::DW_FORM_data8, ID);
+ SkCU->addUInt(SkCU->getUnitDie(), dwarf::DW_AT_GNU_dwo_id,
+ dwarf::DW_FORM_data8, ID);
+
+ // We don't keep track of which addresses are used in which CU so this
+ // is a bit pessimistic under LTO.
+ if (!AddrPool.isEmpty())
+ addSectionLabel(*Asm, *SkCU, SkCU->getUnitDie(),
+ dwarf::DW_AT_GNU_addr_base, DwarfAddrSectionSym,
+ DwarfAddrSectionSym);
+ if (!TheU->getRangeLists().empty())
+ addSectionLabel(*Asm, *SkCU, SkCU->getUnitDie(),
+ dwarf::DW_AT_GNU_ranges_base,
+ DwarfDebugRangeSectionSym, DwarfDebugRangeSectionSym);
+ }
+
+ // If we have code split among multiple sections or non-contiguous
+ // ranges of code then emit a DW_AT_ranges attribute on the unit that will
+ // remain in the .o file, otherwise add a DW_AT_low_pc.
+ // FIXME: We should use ranges allow reordering of code ala
+ // .subsections_via_symbols in mach-o. This would mean turning on
+ // ranges for all subprogram DIEs for mach-o.
+ DwarfCompileUnit &U =
+ SkCU ? *SkCU : static_cast<DwarfCompileUnit &>(*TheU);
+ unsigned NumRanges = TheU->getRanges().size();
+ if (NumRanges) {
+ if (NumRanges > 1) {
+ addSectionLabel(*Asm, U, U.getUnitDie(), dwarf::DW_AT_ranges,
+ Asm->GetTempSymbol("cu_ranges", U.getUniqueID()),
+ DwarfDebugRangeSectionSym);
+
+ // A DW_AT_low_pc attribute may also be specified in combination with
+ // DW_AT_ranges to specify the default base address for use in
+ // location lists (see Section 2.6.2) and range lists (see Section
+ // 2.17.3).
+ U.addUInt(U.getUnitDie(), dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
+ 0);
+ } else {
+ RangeSpan &Range = TheU->getRanges().back();
+ U.addLocalLabelAddress(U.getUnitDie(), dwarf::DW_AT_low_pc,
+ Range.getStart());
+ U.addLabelDelta(U.getUnitDie(), dwarf::DW_AT_high_pc, Range.getEnd(),
+ Range.getStart());
+ }
}
- // This should be a unique identifier when we want to build .dwp files.
- TheCU->addUInt(TheCU->getCUDie(), dwarf::DW_AT_GNU_dwo_id,
- dwarf::DW_FORM_data8, ID);
- // Now construct the skeleton CU associated.
- CompileUnit *SkCU = constructSkeletonCU(TheCU);
- // This should be a unique identifier when we want to build .dwp files.
- SkCU->addUInt(SkCU->getCUDie(), dwarf::DW_AT_GNU_dwo_id,
- dwarf::DW_FORM_data8, ID);
}
}
@@ -1117,9 +958,8 @@ void DwarfDebug::finalizeModuleInfo() {
}
void DwarfDebug::endSections() {
- // Filter labels by section.
- for (size_t n = 0; n < ArangeLabels.size(); n++) {
- const SymbolCU &SCU = ArangeLabels[n];
+ // Filter labels by section.
+ for (const SymbolCU &SCU : ArangeLabels) {
if (SCU.Sym->isInSection()) {
// Make a note of this symbol and it's section.
const MCSection *Section = &SCU.Sym->getSection();
@@ -1129,15 +969,14 @@ void DwarfDebug::endSections() {
// Some symbols (e.g. common/bss on mach-o) can have no section but still
// appear in the output. This sucks as we rely on sections to build
// arange spans. We can do it without, but it's icky.
- SectionMap[NULL].push_back(SCU);
+ SectionMap[nullptr].push_back(SCU);
}
}
// Build a list of sections used.
std::vector<const MCSection *> Sections;
- for (SectionMapType::iterator it = SectionMap.begin(); it != SectionMap.end();
- it++) {
- const MCSection *Section = it->first;
+ for (const auto &it : SectionMap) {
+ const MCSection *Section = it.first;
Sections.push_back(Section);
}
@@ -1146,29 +985,32 @@ void DwarfDebug::endSections() {
std::sort(Sections.begin(), Sections.end(), SectionSort);
// Add terminating symbols for each section.
- for (unsigned ID=0;ID<Sections.size();ID++) {
+ for (unsigned ID = 0, E = Sections.size(); ID != E; ID++) {
const MCSection *Section = Sections[ID];
- MCSymbol *Sym = NULL;
+ MCSymbol *Sym = nullptr;
if (Section) {
// We can't call MCSection::getLabelEndName, as it's only safe to do so
- // if we know the section name up-front. For user-created sections, the resulting
- // label may not be valid to use as a label. (section names can use a greater
- // set of characters on some systems)
+ // if we know the section name up-front. For user-created sections, the
+ // resulting label may not be valid to use as a label. (section names can
+ // use a greater set of characters on some systems)
Sym = Asm->GetTempSymbol("debug_end", ID);
Asm->OutStreamer.SwitchSection(Section);
Asm->OutStreamer.EmitLabel(Sym);
}
// Insert a final terminator.
- SectionMap[Section].push_back(SymbolCU(NULL, Sym));
+ SectionMap[Section].push_back(SymbolCU(nullptr, Sym));
}
}
// Emit all Dwarf sections that should come after the content.
void DwarfDebug::endModule() {
+ assert(CurFn == nullptr);
+ assert(CurMI == nullptr);
- if (!FirstCU) return;
+ if (!FirstCU)
+ return;
// End any existing sections.
// TODO: Does this need to happen?
@@ -1177,59 +1019,33 @@ void DwarfDebug::endModule() {
// Finalize the debug info for the module.
finalizeModuleInfo();
- if (!useSplitDwarf()) {
- emitDebugStr();
-
- // Emit all the DIEs into a debug info section.
- emitDebugInfo();
+ emitDebugStr();
- // Corresponding abbreviations into a abbrev section.
- emitAbbreviations();
+ // Emit all the DIEs into a debug info section.
+ emitDebugInfo();
- // Emit info into a debug loc section.
- emitDebugLoc();
+ // Corresponding abbreviations into a abbrev section.
+ emitAbbreviations();
- // Emit info into a debug aranges section.
+ // Emit info into a debug aranges section.
+ if (GenerateARangeSection)
emitDebugARanges();
- // Emit info into a debug ranges section.
- emitDebugRanges();
-
- // Emit info into a debug macinfo section.
- emitDebugMacInfo();
+ // Emit info into a debug ranges section.
+ emitDebugRanges();
- } else {
- // TODO: Fill this in for separated debug sections and separate
- // out information into new sections.
- emitDebugStr();
- if (useSplitDwarf())
- emitDebugStrDWO();
-
- // Emit the debug info section and compile units.
- emitDebugInfo();
+ if (useSplitDwarf()) {
+ emitDebugStrDWO();
emitDebugInfoDWO();
-
- // Corresponding abbreviations into a abbrev section.
- emitAbbreviations();
emitDebugAbbrevDWO();
-
+ emitDebugLineDWO();
+ emitDebugLocDWO();
+ // Emit DWO addresses.
+ AddrPool.emit(*Asm, Asm->getObjFileLowering().getDwarfAddrSection());
+ } else
// Emit info into a debug loc section.
emitDebugLoc();
- // Emit info into a debug aranges section.
- emitDebugARanges();
-
- // Emit info into a debug ranges section.
- emitDebugRanges();
-
- // Emit info into a debug macinfo section.
- emitDebugMacInfo();
-
- // Emit DWO addresses.
- InfoHolder.emitAddresses(Asm->getObjFileLowering().getDwarfAddrSection());
-
- }
-
// Emit info into the dwarf accelerator table sections.
if (useDwarfAccelTables()) {
emitAccelNames();
@@ -1246,41 +1062,62 @@ void DwarfDebug::endModule() {
// clean up.
SPMap.clear();
- for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
- E = CUMap.end(); I != E; ++I)
- delete I->second;
-
- for (SmallVectorImpl<CompileUnit *>::iterator I = SkeletonCUs.begin(),
- E = SkeletonCUs.end(); I != E; ++I)
- delete *I;
+ AbstractVariables.clear();
// Reset these for the next Module if we have one.
- FirstCU = NULL;
+ FirstCU = nullptr;
}
// Find abstract variable, if any, associated with Var.
-DbgVariable *DwarfDebug::findAbstractVariable(DIVariable &DV,
- DebugLoc ScopeLoc) {
+DbgVariable *DwarfDebug::getExistingAbstractVariable(const DIVariable &DV,
+ DIVariable &Cleansed) {
LLVMContext &Ctx = DV->getContext();
// More then one inlined variable corresponds to one abstract variable.
- DIVariable Var = cleanseInlinedVariable(DV, Ctx);
- DbgVariable *AbsDbgVariable = AbstractVariables.lookup(Var);
- if (AbsDbgVariable)
- return AbsDbgVariable;
+ // FIXME: This duplication of variables when inlining should probably be
+ // removed. It's done to allow each DIVariable to describe its location
+ // because the DebugLoc on the dbg.value/declare isn't accurate. We should
+ // make it accurate then remove this duplication/cleansing stuff.
+ Cleansed = cleanseInlinedVariable(DV, Ctx);
+ auto I = AbstractVariables.find(Cleansed);
+ if (I != AbstractVariables.end())
+ return I->second.get();
+ return nullptr;
+}
+
+DbgVariable *DwarfDebug::getExistingAbstractVariable(const DIVariable &DV) {
+ DIVariable Cleansed;
+ return getExistingAbstractVariable(DV, Cleansed);
+}
+
+void DwarfDebug::createAbstractVariable(const DIVariable &Var,
+ LexicalScope *Scope) {
+ auto AbsDbgVariable = make_unique<DbgVariable>(Var, this);
+ addScopeVariable(Scope, AbsDbgVariable.get());
+ AbstractVariables[Var] = std::move(AbsDbgVariable);
+}
+
+void DwarfDebug::ensureAbstractVariableIsCreated(const DIVariable &DV,
+ const MDNode *ScopeNode) {
+ DIVariable Cleansed = DV;
+ if (getExistingAbstractVariable(DV, Cleansed))
+ return;
+
+ createAbstractVariable(Cleansed, LScopes.getOrCreateAbstractScope(ScopeNode));
+}
- LexicalScope *Scope = LScopes.findAbstractScope(ScopeLoc.getScope(Ctx));
- if (!Scope)
- return NULL;
+void
+DwarfDebug::ensureAbstractVariableIsCreatedIfScoped(const DIVariable &DV,
+ const MDNode *ScopeNode) {
+ DIVariable Cleansed = DV;
+ if (getExistingAbstractVariable(DV, Cleansed))
+ return;
- AbsDbgVariable = new DbgVariable(Var, NULL, this);
- addScopeVariable(Scope, AbsDbgVariable);
- AbstractVariables[Var] = AbsDbgVariable;
- return AbsDbgVariable;
+ if (LexicalScope *Scope = LScopes.findAbstractScope(ScopeNode))
+ createAbstractVariable(Cleansed, Scope);
}
// If Var is a current function argument then add it to CurrentFnArguments list.
-bool DwarfDebug::addCurrentFnArgument(const MachineFunction *MF,
- DbgVariable *Var, LexicalScope *Scope) {
+bool DwarfDebug::addCurrentFnArgument(DbgVariable *Var, LexicalScope *Scope) {
if (!LScopes.isCurrentFunctionScope(Scope))
return false;
DIVariable DV = Var->getVariable();
@@ -1292,7 +1129,7 @@ bool DwarfDebug::addCurrentFnArgument(const MachineFunction *MF,
size_t Size = CurrentFnArguments.size();
if (Size == 0)
- CurrentFnArguments.resize(MF->getFunction()->arg_size());
+ CurrentFnArguments.resize(CurFn->getFunction()->arg_size());
// llvm::Function argument size is not good indicator of how many
// arguments does the function have at source level.
if (ArgNo > Size)
@@ -1302,50 +1139,30 @@ bool DwarfDebug::addCurrentFnArgument(const MachineFunction *MF,
}
// Collect variable information from side table maintained by MMI.
-void
-DwarfDebug::collectVariableInfoFromMMITable(const MachineFunction *MF,
- SmallPtrSet<const MDNode *, 16> &Processed) {
- MachineModuleInfo::VariableDbgInfoMapTy &VMap = MMI->getVariableDbgInfo();
- for (MachineModuleInfo::VariableDbgInfoMapTy::iterator VI = VMap.begin(),
- VE = VMap.end(); VI != VE; ++VI) {
- const MDNode *Var = VI->first;
- if (!Var) continue;
- Processed.insert(Var);
- DIVariable DV(Var);
- const std::pair<unsigned, DebugLoc> &VP = VI->second;
-
- LexicalScope *Scope = LScopes.findLexicalScope(VP.second);
+void DwarfDebug::collectVariableInfoFromMMITable(
+ SmallPtrSet<const MDNode *, 16> &Processed) {
+ for (const auto &VI : MMI->getVariableDbgInfo()) {
+ if (!VI.Var)
+ continue;
+ Processed.insert(VI.Var);
+ DIVariable DV(VI.Var);
+ LexicalScope *Scope = LScopes.findLexicalScope(VI.Loc);
// If variable scope is not found then skip this variable.
- if (Scope == 0)
+ if (!Scope)
continue;
- DbgVariable *AbsDbgVariable = findAbstractVariable(DV, VP.second);
- DbgVariable *RegVar = new DbgVariable(DV, AbsDbgVariable, this);
- RegVar->setFrameIndex(VP.first);
- if (!addCurrentFnArgument(MF, RegVar, Scope))
- addScopeVariable(Scope, RegVar);
- if (AbsDbgVariable)
- AbsDbgVariable->setFrameIndex(VP.first);
+ ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
+ ConcreteVariables.push_back(make_unique<DbgVariable>(DV, this));
+ DbgVariable *RegVar = ConcreteVariables.back().get();
+ RegVar->setFrameIndex(VI.Slot);
+ addScopeVariable(Scope, RegVar);
}
}
-// Return true if debug value, encoded by DBG_VALUE instruction, is in a
-// defined reg.
-static bool isDbgValueInDefinedReg(const MachineInstr *MI) {
- assert(MI->isDebugValue() && "Invalid DBG_VALUE machine instruction!");
- return MI->getNumOperands() == 3 &&
- MI->getOperand(0).isReg() && MI->getOperand(0).getReg() &&
- (MI->getOperand(1).isImm() ||
- (MI->getOperand(1).isReg() && MI->getOperand(1).getReg() == 0U));
-}
-
// Get .debug_loc entry for the instruction range starting at MI.
-static DotDebugLocEntry getDebugLocEntry(AsmPrinter *Asm,
- const MCSymbol *FLabel,
- const MCSymbol *SLabel,
- const MachineInstr *MI) {
- const MDNode *Var = MI->getOperand(MI->getNumOperands() - 1).getMetadata();
+static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) {
+ const MDNode *Var = MI->getDebugVariable();
assert(MI->getNumOperands() == 3);
if (MI->getOperand(0).isReg()) {
@@ -1356,120 +1173,116 @@ static DotDebugLocEntry getDebugLocEntry(AsmPrinter *Asm,
MLoc.set(MI->getOperand(0).getReg());
else
MLoc.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
- return DotDebugLocEntry(FLabel, SLabel, MLoc, Var);
+ return DebugLocEntry::Value(Var, MLoc);
}
if (MI->getOperand(0).isImm())
- return DotDebugLocEntry(FLabel, SLabel, MI->getOperand(0).getImm());
+ return DebugLocEntry::Value(Var, MI->getOperand(0).getImm());
if (MI->getOperand(0).isFPImm())
- return DotDebugLocEntry(FLabel, SLabel, MI->getOperand(0).getFPImm());
+ return DebugLocEntry::Value(Var, MI->getOperand(0).getFPImm());
if (MI->getOperand(0).isCImm())
- return DotDebugLocEntry(FLabel, SLabel, MI->getOperand(0).getCImm());
+ return DebugLocEntry::Value(Var, MI->getOperand(0).getCImm());
llvm_unreachable("Unexpected 3 operand DBG_VALUE instruction!");
}
// Find variables for each lexical scope.
void
-DwarfDebug::collectVariableInfo(const MachineFunction *MF,
- SmallPtrSet<const MDNode *, 16> &Processed) {
+DwarfDebug::collectVariableInfo(SmallPtrSet<const MDNode *, 16> &Processed) {
+ LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
+ DwarfCompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode());
// Grab the variable info that was squirreled away in the MMI side-table.
- collectVariableInfoFromMMITable(MF, Processed);
+ collectVariableInfoFromMMITable(Processed);
- for (SmallVectorImpl<const MDNode*>::const_iterator
- UVI = UserVariables.begin(), UVE = UserVariables.end(); UVI != UVE;
- ++UVI) {
- const MDNode *Var = *UVI;
- if (Processed.count(Var))
+ for (const auto &I : DbgValues) {
+ DIVariable DV(I.first);
+ if (Processed.count(DV))
continue;
- // History contains relevant DBG_VALUE instructions for Var and instructions
- // clobbering it.
- SmallVectorImpl<const MachineInstr*> &History = DbgValues[Var];
- if (History.empty())
+ // Instruction ranges, specifying where DV is accessible.
+ const auto &Ranges = I.second;
+ if (Ranges.empty())
continue;
- const MachineInstr *MInsn = History.front();
- DIVariable DV(Var);
- LexicalScope *Scope = NULL;
+ LexicalScope *Scope = nullptr;
if (DV.getTag() == dwarf::DW_TAG_arg_variable &&
- DISubprogram(DV.getContext()).describes(MF->getFunction()))
+ DISubprogram(DV.getContext()).describes(CurFn->getFunction()))
Scope = LScopes.getCurrentFunctionScope();
- else if (MDNode *IA = DV.getInlinedAt())
- Scope = LScopes.findInlinedScope(DebugLoc::getFromDILocation(IA));
- else
- Scope = LScopes.findLexicalScope(cast<MDNode>(DV->getOperand(1)));
+ else if (MDNode *IA = DV.getInlinedAt()) {
+ DebugLoc DL = DebugLoc::getFromDILocation(IA);
+ Scope = LScopes.findInlinedScope(DebugLoc::get(
+ DL.getLine(), DL.getCol(), DV.getContext(), IA));
+ } else
+ Scope = LScopes.findLexicalScope(DV.getContext());
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
Processed.insert(DV);
+ const MachineInstr *MInsn = Ranges.front().first;
assert(MInsn->isDebugValue() && "History must begin with debug value");
- DbgVariable *AbsVar = findAbstractVariable(DV, MInsn->getDebugLoc());
- DbgVariable *RegVar = new DbgVariable(DV, AbsVar, this);
- if (!addCurrentFnArgument(MF, RegVar, Scope))
- addScopeVariable(Scope, RegVar);
- if (AbsVar)
- AbsVar->setMInsn(MInsn);
-
- // Simplify ranges that are fully coalesced.
- if (History.size() <= 1 || (History.size() == 2 &&
- MInsn->isIdenticalTo(History.back()))) {
- RegVar->setMInsn(MInsn);
+ ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
+ ConcreteVariables.push_back(make_unique<DbgVariable>(MInsn, this));
+ DbgVariable *RegVar = ConcreteVariables.back().get();
+ addScopeVariable(Scope, RegVar);
+
+ // Check if the first DBG_VALUE is valid for the rest of the function.
+ if (Ranges.size() == 1 && Ranges.front().second == nullptr)
continue;
- }
// Handle multiple DBG_VALUE instructions describing one variable.
RegVar->setDotDebugLocOffset(DotDebugLocEntries.size());
- for (SmallVectorImpl<const MachineInstr*>::const_iterator
- HI = History.begin(), HE = History.end(); HI != HE; ++HI) {
- const MachineInstr *Begin = *HI;
+ DotDebugLocEntries.resize(DotDebugLocEntries.size() + 1);
+ DebugLocList &LocList = DotDebugLocEntries.back();
+ LocList.Label =
+ Asm->GetTempSymbol("debug_loc", DotDebugLocEntries.size() - 1);
+ SmallVector<DebugLocEntry, 4> &DebugLoc = LocList.List;
+ for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) {
+ const MachineInstr *Begin = I->first;
+ const MachineInstr *End = I->second;
assert(Begin->isDebugValue() && "Invalid History entry");
- // Check if DBG_VALUE is truncating a range.
- if (Begin->getNumOperands() > 1 && Begin->getOperand(0).isReg()
- && !Begin->getOperand(0).getReg())
+ // Check if a variable is unaccessible in this range.
+ if (Begin->getNumOperands() > 1 && Begin->getOperand(0).isReg() &&
+ !Begin->getOperand(0).getReg())
continue;
+ DEBUG(dbgs() << "DotDebugLoc Pair:\n" << "\t" << *Begin);
+ if (End != nullptr)
+ DEBUG(dbgs() << "\t" << *End);
+ else
+ DEBUG(dbgs() << "\tNULL\n");
- // Compute the range for a register location.
- const MCSymbol *FLabel = getLabelBeforeInsn(Begin);
- const MCSymbol *SLabel = 0;
-
- if (HI + 1 == HE)
- // If Begin is the last instruction in History then its value is valid
- // until the end of the function.
- SLabel = FunctionEndSym;
- else {
- const MachineInstr *End = HI[1];
- DEBUG(dbgs() << "DotDebugLoc Pair:\n"
- << "\t" << *Begin << "\t" << *End << "\n");
- if (End->isDebugValue())
- SLabel = getLabelBeforeInsn(End);
- else {
- // End is a normal instruction clobbering the range.
- SLabel = getLabelAfterInsn(End);
- assert(SLabel && "Forgot label after clobber instruction");
- ++HI;
- }
- }
+ const MCSymbol *StartLabel = getLabelBeforeInsn(Begin);
+ assert(StartLabel && "Forgot label before DBG_VALUE starting a range!");
+
+ const MCSymbol *EndLabel;
+ if (End != nullptr)
+ EndLabel = getLabelAfterInsn(End);
+ else if (std::next(I) == Ranges.end())
+ EndLabel = FunctionEndSym;
+ else
+ EndLabel = getLabelBeforeInsn(std::next(I)->first);
+ assert(EndLabel && "Forgot label after instruction ending a range!");
- // The value is valid until the next DBG_VALUE or clobber.
- DotDebugLocEntries.push_back(getDebugLocEntry(Asm, FLabel, SLabel,
- Begin));
+ DebugLocEntry Loc(StartLabel, EndLabel, getDebugLocValue(Begin), TheCU);
+ if (DebugLoc.empty() || !DebugLoc.back().Merge(Loc))
+ DebugLoc.push_back(std::move(Loc));
}
- DotDebugLocEntries.push_back(DotDebugLocEntry());
}
// Collect info for variables that were optimized out.
- LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
DIArray Variables = DISubprogram(FnScope->getScopeNode()).getVariables();
for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
DIVariable DV(Variables.getElement(i));
- if (!DV || !DV.isVariable() || !Processed.insert(DV))
+ assert(DV.isVariable());
+ if (!Processed.insert(DV))
continue;
- if (LexicalScope *Scope = LScopes.findLexicalScope(DV.getContext()))
- addScopeVariable(Scope, new DbgVariable(DV, NULL, this));
+ if (LexicalScope *Scope = LScopes.findLexicalScope(DV.getContext())) {
+ ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
+ ConcreteVariables.push_back(make_unique<DbgVariable>(DV, this));
+ addScopeVariable(Scope, ConcreteVariables.back().get());
+ }
}
}
@@ -1487,6 +1300,8 @@ MCSymbol *DwarfDebug::getLabelAfterInsn(const MachineInstr *MI) {
// Process beginning of an instruction.
void DwarfDebug::beginInstruction(const MachineInstr *MI) {
+ assert(CurMI == nullptr);
+ CurMI = MI;
// Check if source location changes, but ignore DBG_VALUE locations.
if (!MI->isDebugValue()) {
DebugLoc DL = MI->getDebugLoc();
@@ -1504,13 +1319,13 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
const MDNode *Scope = DL.getScope(Asm->MF->getFunction()->getContext());
recordSourceLine(DL.getLine(), DL.getCol(), Scope, Flags);
} else
- recordSourceLine(0, 0, 0, 0);
+ recordSourceLine(0, 0, nullptr, 0);
}
}
// Insert labels where requested.
- DenseMap<const MachineInstr*, MCSymbol*>::iterator I =
- LabelsBeforeInsn.find(MI);
+ DenseMap<const MachineInstr *, MCSymbol *>::iterator I =
+ LabelsBeforeInsn.find(MI);
// No label needed.
if (I == LabelsBeforeInsn.end())
@@ -1528,14 +1343,16 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
}
// Process end of an instruction.
-void DwarfDebug::endInstruction(const MachineInstr *MI) {
+void DwarfDebug::endInstruction() {
+ assert(CurMI != nullptr);
// Don't create a new label after DBG_VALUE instructions.
// They don't generate code.
- if (!MI->isDebugValue())
- PrevLabel = 0;
+ if (!CurMI->isDebugValue())
+ PrevLabel = nullptr;
- DenseMap<const MachineInstr*, MCSymbol*>::iterator I =
- LabelsAfterInsn.find(MI);
+ DenseMap<const MachineInstr *, MCSymbol *>::iterator I =
+ LabelsAfterInsn.find(CurMI);
+ CurMI = nullptr;
// No label needed.
if (I == LabelsAfterInsn.end())
@@ -1565,53 +1382,35 @@ void DwarfDebug::identifyScopeMarkers() {
const SmallVectorImpl<LexicalScope *> &Children = S->getChildren();
if (!Children.empty())
- for (SmallVectorImpl<LexicalScope *>::const_iterator SI = Children.begin(),
- SE = Children.end(); SI != SE; ++SI)
- WorkList.push_back(*SI);
+ WorkList.append(Children.begin(), Children.end());
if (S->isAbstractScope())
continue;
- const SmallVectorImpl<InsnRange> &Ranges = S->getRanges();
- if (Ranges.empty())
- continue;
- for (SmallVectorImpl<InsnRange>::const_iterator RI = Ranges.begin(),
- RE = Ranges.end(); RI != RE; ++RI) {
- assert(RI->first && "InsnRange does not have first instruction!");
- assert(RI->second && "InsnRange does not have second instruction!");
- requestLabelBeforeInsn(RI->first);
- requestLabelAfterInsn(RI->second);
+ for (const InsnRange &R : S->getRanges()) {
+ assert(R.first && "InsnRange does not have first instruction!");
+ assert(R.second && "InsnRange does not have second instruction!");
+ requestLabelBeforeInsn(R.first);
+ requestLabelAfterInsn(R.second);
}
}
}
-// Get MDNode for DebugLoc's scope.
-static MDNode *getScopeNode(DebugLoc DL, const LLVMContext &Ctx) {
- if (MDNode *InlinedAt = DL.getInlinedAt(Ctx))
- return getScopeNode(DebugLoc::getFromDILocation(InlinedAt), Ctx);
- return DL.getScope(Ctx);
-}
-
-// Walk up the scope chain of given debug loc and find line number info
-// for the function.
-static DebugLoc getFnDebugLoc(DebugLoc DL, const LLVMContext &Ctx) {
- const MDNode *Scope = getScopeNode(DL, Ctx);
- DISubprogram SP = getDISubprogram(Scope);
- if (SP.isSubprogram()) {
- // Check for number of operands since the compatibility is
- // cheap here.
- if (SP->getNumOperands() > 19)
- return DebugLoc::get(SP.getScopeLineNumber(), 0, SP);
- else
- return DebugLoc::get(SP.getLineNumber(), 0, SP);
- }
-
+static DebugLoc findPrologueEndLoc(const MachineFunction *MF) {
+ // First known non-DBG_VALUE and non-frame setup location marks
+ // the beginning of the function body.
+ for (const auto &MBB : *MF)
+ for (const auto &MI : MBB)
+ if (!MI.isDebugValue() && !MI.getFlag(MachineInstr::FrameSetup) &&
+ !MI.getDebugLoc().isUnknown())
+ return MI.getDebugLoc();
return DebugLoc();
}
// Gather pre-function debug information. Assumes being called immediately
// after the function entry point has been emitted.
void DwarfDebug::beginFunction(const MachineFunction *MF) {
+ CurFn = MF;
// If there's no debug info for the function we're not going to do anything.
if (!MMI->hasDebugInfo())
@@ -1623,19 +1422,19 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
if (LScopes.empty())
return;
- assert(UserVariables.empty() && DbgValues.empty() && "Maps weren't cleaned");
+ assert(DbgValues.empty() && "DbgValues map wasn't cleaned!");
// Make sure that each lexical scope will have a begin/end label.
identifyScopeMarkers();
- // Set DwarfCompileUnitID in MCContext to the Compile Unit this function
+ // Set DwarfDwarfCompileUnitID in MCContext to the Compile Unit this function
// belongs to so that we add to the correct per-cu line table in the
// non-asm case.
LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
- CompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode());
+ DwarfCompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode());
assert(TheCU && "Unable to find compile unit!");
- if (Asm->TM.hasMCUseLoc() && Asm->OutStreamer.hasRawTextSupport())
- // Use a single line table if we are using .loc and generating assembly.
+ if (Asm->OutStreamer.hasRawTextSupport())
+ // Use a single line table if we are generating assembly.
Asm->OutStreamer.getContext().setDwarfCompileUnitID(0);
else
Asm->OutStreamer.getContext().setDwarfCompileUnitID(TheCU->getUniqueID());
@@ -1645,148 +1444,26 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
// Assumes in correct section after the entry point.
Asm->OutStreamer.EmitLabel(FunctionBeginSym);
- const TargetRegisterInfo *TRI = Asm->TM.getRegisterInfo();
- // LiveUserVar - Map physreg numbers to the MDNode they contain.
- std::vector<const MDNode *> LiveUserVar(TRI->getNumRegs());
-
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E;
- ++I) {
- bool AtBlockEntry = true;
- for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
- II != IE; ++II) {
- const MachineInstr *MI = II;
-
- if (MI->isDebugValue()) {
- assert(MI->getNumOperands() > 1 && "Invalid machine instruction!");
-
- // Keep track of user variables.
- const MDNode *Var =
- MI->getOperand(MI->getNumOperands() - 1).getMetadata();
-
- // Variable is in a register, we need to check for clobbers.
- if (isDbgValueInDefinedReg(MI))
- LiveUserVar[MI->getOperand(0).getReg()] = Var;
-
- // Check the history of this variable.
- SmallVectorImpl<const MachineInstr *> &History = DbgValues[Var];
- if (History.empty()) {
- UserVariables.push_back(Var);
- // The first mention of a function argument gets the FunctionBeginSym
- // label, so arguments are visible when breaking at function entry.
- DIVariable DV(Var);
- if (DV.isVariable() && DV.getTag() == dwarf::DW_TAG_arg_variable &&
- getDISubprogram(DV.getContext()).describes(MF->getFunction()))
- LabelsBeforeInsn[MI] = FunctionBeginSym;
- } else {
- // We have seen this variable before. Try to coalesce DBG_VALUEs.
- const MachineInstr *Prev = History.back();
- if (Prev->isDebugValue()) {
- // Coalesce identical entries at the end of History.
- if (History.size() >= 2 &&
- Prev->isIdenticalTo(History[History.size() - 2])) {
- DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
- << "\t" << *Prev << "\t"
- << *History[History.size() - 2] << "\n");
- History.pop_back();
- }
-
- // Terminate old register assignments that don't reach MI;
- MachineFunction::const_iterator PrevMBB = Prev->getParent();
- if (PrevMBB != I && (!AtBlockEntry || llvm::next(PrevMBB) != I) &&
- isDbgValueInDefinedReg(Prev)) {
- // Previous register assignment needs to terminate at the end of
- // its basic block.
- MachineBasicBlock::const_iterator LastMI =
- PrevMBB->getLastNonDebugInstr();
- if (LastMI == PrevMBB->end()) {
- // Drop DBG_VALUE for empty range.
- DEBUG(dbgs() << "Dropping DBG_VALUE for empty range:\n"
- << "\t" << *Prev << "\n");
- History.pop_back();
- } else if (llvm::next(PrevMBB) != PrevMBB->getParent()->end())
- // Terminate after LastMI.
- History.push_back(LastMI);
- }
- }
- }
- History.push_back(MI);
- } else {
- // Not a DBG_VALUE instruction.
- if (!MI->isLabel())
- AtBlockEntry = false;
-
- // First known non-DBG_VALUE and non-frame setup location marks
- // the beginning of the function body.
- if (!MI->getFlag(MachineInstr::FrameSetup) &&
- (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown()))
- PrologEndLoc = MI->getDebugLoc();
-
- // Check if the instruction clobbers any registers with debug vars.
- for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end();
- MOI != MOE; ++MOI) {
- if (!MOI->isReg() || !MOI->isDef() || !MOI->getReg())
- continue;
- for (MCRegAliasIterator AI(MOI->getReg(), TRI, true); AI.isValid();
- ++AI) {
- unsigned Reg = *AI;
- const MDNode *Var = LiveUserVar[Reg];
- if (!Var)
- continue;
- // Reg is now clobbered.
- LiveUserVar[Reg] = 0;
-
- // Was MD last defined by a DBG_VALUE referring to Reg?
- DbgValueHistoryMap::iterator HistI = DbgValues.find(Var);
- if (HistI == DbgValues.end())
- continue;
- SmallVectorImpl<const MachineInstr *> &History = HistI->second;
- if (History.empty())
- continue;
- const MachineInstr *Prev = History.back();
- // Sanity-check: Register assignments are terminated at the end of
- // their block.
- if (!Prev->isDebugValue() || Prev->getParent() != MI->getParent())
- continue;
- // Is the variable still in Reg?
- if (!isDbgValueInDefinedReg(Prev) ||
- Prev->getOperand(0).getReg() != Reg)
- continue;
- // Var is clobbered. Make sure the next instruction gets a label.
- History.push_back(MI);
- }
- }
- }
- }
- }
+ // Calculate history for local variables.
+ calculateDbgValueHistory(MF, Asm->TM.getRegisterInfo(), DbgValues);
- for (DbgValueHistoryMap::iterator I = DbgValues.begin(), E = DbgValues.end();
- I != E; ++I) {
- SmallVectorImpl<const MachineInstr *> &History = I->second;
- if (History.empty())
+ // Request labels for the full history.
+ for (const auto &I : DbgValues) {
+ const auto &Ranges = I.second;
+ if (Ranges.empty())
continue;
- // Make sure the final register assignments are terminated.
- const MachineInstr *Prev = History.back();
- if (Prev->isDebugValue() && isDbgValueInDefinedReg(Prev)) {
- const MachineBasicBlock *PrevMBB = Prev->getParent();
- MachineBasicBlock::const_iterator LastMI =
- PrevMBB->getLastNonDebugInstr();
- if (LastMI == PrevMBB->end())
- // Drop DBG_VALUE for empty range.
- History.pop_back();
- else if (PrevMBB != &PrevMBB->getParent()->back()) {
- // Terminate after LastMI.
- History.push_back(LastMI);
- }
- }
- // Request labels for the full history.
- for (unsigned i = 0, e = History.size(); i != e; ++i) {
- const MachineInstr *MI = History[i];
- if (MI->isDebugValue())
- requestLabelBeforeInsn(MI);
- else
- requestLabelAfterInsn(MI);
+ // The first mention of a function argument gets the FunctionBeginSym
+ // label, so arguments are visible when breaking at function entry.
+ DIVariable DV(I.first);
+ if (DV.isVariable() && DV.getTag() == dwarf::DW_TAG_arg_variable &&
+ getDISubprogram(DV.getContext()).describes(MF->getFunction()))
+ LabelsBeforeInsn[Ranges.front().first] = FunctionBeginSym;
+
+ for (const auto &Range : Ranges) {
+ requestLabelBeforeInsn(Range.first);
+ if (Range.second)
+ requestLabelAfterInsn(Range.second);
}
}
@@ -1794,9 +1471,10 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
PrevLabel = FunctionBeginSym;
// Record beginning of function.
+ PrologEndLoc = findPrologueEndLoc(MF);
if (!PrologEndLoc.isUnknown()) {
DebugLoc FnStartDL =
- getFnDebugLoc(PrologEndLoc, MF->getFunction()->getContext());
+ PrologEndLoc.getFnDebugLoc(MF->getFunction()->getContext());
recordSourceLine(
FnStartDL.getLine(), FnStartDL.getCol(),
FnStartDL.getScope(MF->getFunction()->getContext()),
@@ -1807,6 +1485,8 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
}
void DwarfDebug::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
+ if (addCurrentFnArgument(Var, LS))
+ return;
SmallVectorImpl<DbgVariable *> &Vars = ScopeVariables[LS];
DIVariable DV = Var->getVariable();
// Variables with positive arg numbers are parameters.
@@ -1839,66 +1519,76 @@ void DwarfDebug::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
// Gather and emit post-function debug information.
void DwarfDebug::endFunction(const MachineFunction *MF) {
- if (!MMI->hasDebugInfo() || LScopes.empty()) return;
+ // Every beginFunction(MF) call should be followed by an endFunction(MF) call,
+ // though the beginFunction may not be called at all.
+ // We should handle both cases.
+ if (!CurFn)
+ CurFn = MF;
+ else
+ assert(CurFn == MF);
+ assert(CurFn != nullptr);
+
+ if (!MMI->hasDebugInfo() || LScopes.empty()) {
+ // If we don't have a lexical scope for this function then there will
+ // be a hole in the range information. Keep note of this by setting the
+ // previously used section to nullptr.
+ PrevSection = nullptr;
+ PrevCU = nullptr;
+ CurFn = nullptr;
+ return;
+ }
// Define end label for subprogram.
- FunctionEndSym = Asm->GetTempSymbol("func_end",
- Asm->getFunctionNumber());
+ FunctionEndSym = Asm->GetTempSymbol("func_end", Asm->getFunctionNumber());
// Assumes in correct section after the entry point.
Asm->OutStreamer.EmitLabel(FunctionEndSym);
- // Set DwarfCompileUnitID in MCContext to default value.
+
+ // Set DwarfDwarfCompileUnitID in MCContext to default value.
Asm->OutStreamer.getContext().setDwarfCompileUnitID(0);
SmallPtrSet<const MDNode *, 16> ProcessedVars;
- collectVariableInfo(MF, ProcessedVars);
+ collectVariableInfo(ProcessedVars);
LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
- CompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode());
- assert(TheCU && "Unable to find compile unit!");
+ DwarfCompileUnit &TheCU = *SPMap.lookup(FnScope->getScopeNode());
// Construct abstract scopes.
- ArrayRef<LexicalScope *> AList = LScopes.getAbstractScopesList();
- for (unsigned i = 0, e = AList.size(); i != e; ++i) {
- LexicalScope *AScope = AList[i];
+ for (LexicalScope *AScope : LScopes.getAbstractScopesList()) {
DISubprogram SP(AScope->getScopeNode());
- if (SP.isSubprogram()) {
- // Collect info for variables that were optimized out.
- DIArray Variables = SP.getVariables();
- for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
- DIVariable DV(Variables.getElement(i));
- if (!DV || !DV.isVariable() || !ProcessedVars.insert(DV))
- continue;
- // Check that DbgVariable for DV wasn't created earlier, when
- // findAbstractVariable() was called for inlined instance of DV.
- LLVMContext &Ctx = DV->getContext();
- DIVariable CleanDV = cleanseInlinedVariable(DV, Ctx);
- if (AbstractVariables.lookup(CleanDV))
- continue;
- if (LexicalScope *Scope = LScopes.findAbstractScope(DV.getContext()))
- addScopeVariable(Scope, new DbgVariable(DV, NULL, this));
- }
+ assert(SP.isSubprogram());
+ // Collect info for variables that were optimized out.
+ DIArray Variables = SP.getVariables();
+ for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
+ DIVariable DV(Variables.getElement(i));
+ assert(DV && DV.isVariable());
+ if (!ProcessedVars.insert(DV))
+ continue;
+ ensureAbstractVariableIsCreated(DV, DV.getContext());
}
- if (ProcessedSPNodes.count(AScope->getScopeNode()) == 0)
- constructScopeDIE(TheCU, AScope);
+ constructAbstractSubprogramScopeDIE(TheCU, AScope);
}
- DIE *CurFnDIE = constructScopeDIE(TheCU, FnScope);
+ DIE &CurFnDIE = constructSubprogramScopeDIE(TheCU, FnScope);
+ if (!CurFn->getTarget().Options.DisableFramePointerElim(*CurFn))
+ TheCU.addFlag(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr);
- if (!MF->getTarget().Options.DisableFramePointerElim(*MF))
- TheCU->addFlag(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr);
+ // Add the range of this function to the list of ranges for the CU.
+ RangeSpan Span(FunctionBeginSym, FunctionEndSym);
+ TheCU.addRange(std::move(Span));
+ PrevSection = Asm->getCurrentSection();
+ PrevCU = &TheCU;
// Clear debug info
- for (ScopeVariablesMap::iterator
- I = ScopeVariables.begin(), E = ScopeVariables.end(); I != E; ++I)
- DeleteContainerPointers(I->second);
+ // Ownership of DbgVariables is a bit subtle - ScopeVariables owns all the
+ // DbgVariables except those that are also in AbstractVariables (since they
+ // can be used cross-function)
ScopeVariables.clear();
- DeleteContainerPointers(CurrentFnArguments);
- UserVariables.clear();
+ CurrentFnArguments.clear();
DbgValues.clear();
- AbstractVariables.clear();
LabelsBeforeInsn.clear();
LabelsAfterInsn.clear();
- PrevLabel = NULL;
+ PrevLabel = nullptr;
+ CurFn = nullptr;
}
// Register a source line with debug info. Returns the unique label that was
@@ -1908,129 +1598,46 @@ void DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S,
StringRef Fn;
StringRef Dir;
unsigned Src = 1;
- if (S) {
- DIDescriptor Scope(S);
-
- if (Scope.isCompileUnit()) {
- DICompileUnit CU(S);
- Fn = CU.getFilename();
- Dir = CU.getDirectory();
- } else if (Scope.isFile()) {
- DIFile F(S);
- Fn = F.getFilename();
- Dir = F.getDirectory();
- } else if (Scope.isSubprogram()) {
- DISubprogram SP(S);
- Fn = SP.getFilename();
- Dir = SP.getDirectory();
- } else if (Scope.isLexicalBlockFile()) {
- DILexicalBlockFile DBF(S);
- Fn = DBF.getFilename();
- Dir = DBF.getDirectory();
- } else if (Scope.isLexicalBlock()) {
- DILexicalBlock DB(S);
- Fn = DB.getFilename();
- Dir = DB.getDirectory();
- } else
- llvm_unreachable("Unexpected scope info");
+ unsigned Discriminator = 0;
+ if (DIScope Scope = DIScope(S)) {
+ assert(Scope.isScope());
+ Fn = Scope.getFilename();
+ Dir = Scope.getDirectory();
+ if (Scope.isLexicalBlock())
+ Discriminator = DILexicalBlock(S).getDiscriminator();
- Src = getOrCreateSourceID(Fn, Dir,
- Asm->OutStreamer.getContext().getDwarfCompileUnitID());
+ unsigned CUID = Asm->OutStreamer.getContext().getDwarfCompileUnitID();
+ Src = static_cast<DwarfCompileUnit &>(*InfoHolder.getUnits()[CUID])
+ .getOrCreateSourceID(Fn, Dir);
}
- Asm->OutStreamer.EmitDwarfLocDirective(Src, Line, Col, Flags, 0, 0, Fn);
+ Asm->OutStreamer.EmitDwarfLocDirective(Src, Line, Col, Flags, 0,
+ Discriminator, Fn);
}
//===----------------------------------------------------------------------===//
// Emit Methods
//===----------------------------------------------------------------------===//
-// Compute the size and offset of a DIE. The offset is relative to start of the
-// CU. It returns the offset after laying out the DIE.
-unsigned
-DwarfUnits::computeSizeAndOffset(DIE *Die, unsigned Offset) {
- // Get the children.
- const std::vector<DIE *> &Children = Die->getChildren();
-
- // Record the abbreviation.
- assignAbbrevNumber(Die->getAbbrev());
-
- // Get the abbreviation for this DIE.
- unsigned AbbrevNumber = Die->getAbbrevNumber();
- const DIEAbbrev *Abbrev = Abbreviations[AbbrevNumber - 1];
-
- // Set DIE offset
- Die->setOffset(Offset);
-
- // Start the size with the size of abbreviation code.
- Offset += MCAsmInfo::getULEB128Size(AbbrevNumber);
-
- const SmallVectorImpl<DIEValue*> &Values = Die->getValues();
- const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev->getData();
-
- // Size the DIE attribute values.
- for (unsigned i = 0, N = Values.size(); i < N; ++i)
- // Size attribute value.
- Offset += Values[i]->SizeOf(Asm, AbbrevData[i].getForm());
-
- // Size the DIE children if any.
- if (!Children.empty()) {
- assert(Abbrev->getChildrenFlag() == dwarf::DW_CHILDREN_yes &&
- "Children flag not set");
-
- for (unsigned j = 0, M = Children.size(); j < M; ++j)
- Offset = computeSizeAndOffset(Children[j], Offset);
-
- // End of children marker.
- Offset += sizeof(int8_t);
- }
-
- Die->setSize(Offset - Die->getOffset());
- return Offset;
-}
-
-// Compute the size and offset for each DIE.
-void DwarfUnits::computeSizeAndOffsets() {
- // Offset from the first CU in the debug info section is 0 initially.
- unsigned SecOffset = 0;
-
- // Iterate over each compile unit and set the size and offsets for each
- // DIE within each compile unit. All offsets are CU relative.
- for (SmallVectorImpl<CompileUnit *>::iterator I = CUs.begin(),
- E = CUs.end(); I != E; ++I) {
- (*I)->setDebugInfoOffset(SecOffset);
-
- // CU-relative offset is reset to 0 here.
- unsigned Offset = sizeof(int32_t) + // Length of Unit Info
- (*I)->getHeaderSize(); // Unit-specific headers
-
- // EndOffset here is CU-relative, after laying out
- // all of the CU DIE.
- unsigned EndOffset = computeSizeAndOffset((*I)->getCUDie(), Offset);
- SecOffset += EndOffset;
- }
-}
-
// Emit initial Dwarf sections with a label at the start of each one.
void DwarfDebug::emitSectionLabels() {
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
// Dwarf sections base addresses.
DwarfInfoSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfInfoSection(), "section_info");
+ emitSectionSym(Asm, TLOF.getDwarfInfoSection(), "section_info");
+ if (useSplitDwarf())
+ DwarfInfoDWOSectionSym =
+ emitSectionSym(Asm, TLOF.getDwarfInfoDWOSection(), "section_info_dwo");
DwarfAbbrevSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfAbbrevSection(), "section_abbrev");
+ emitSectionSym(Asm, TLOF.getDwarfAbbrevSection(), "section_abbrev");
if (useSplitDwarf())
- DwarfAbbrevDWOSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfAbbrevDWOSection(),
- "section_abbrev_dwo");
- emitSectionSym(Asm, TLOF.getDwarfARangesSection());
-
- if (const MCSection *MacroInfo = TLOF.getDwarfMacroInfoSection())
- emitSectionSym(Asm, MacroInfo);
+ DwarfAbbrevDWOSectionSym = emitSectionSym(
+ Asm, TLOF.getDwarfAbbrevDWOSection(), "section_abbrev_dwo");
+ if (GenerateARangeSection)
+ emitSectionSym(Asm, TLOF.getDwarfARangesSection());
DwarfLineSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
- emitSectionSym(Asm, TLOF.getDwarfLocSection());
+ emitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
if (GenerateGnuPubSections) {
DwarfGnuPubNamesSectionSym =
emitSectionSym(Asm, TLOF.getDwarfGnuPubNamesSection());
@@ -2042,39 +1649,36 @@ void DwarfDebug::emitSectionLabels() {
}
DwarfStrSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfStrSection(), "info_string");
+ emitSectionSym(Asm, TLOF.getDwarfStrSection(), "info_string");
if (useSplitDwarf()) {
DwarfStrDWOSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfStrDWOSection(), "skel_string");
+ emitSectionSym(Asm, TLOF.getDwarfStrDWOSection(), "skel_string");
DwarfAddrSectionSym =
- emitSectionSym(Asm, TLOF.getDwarfAddrSection(), "addr_sec");
- }
- DwarfDebugRangeSectionSym = emitSectionSym(Asm, TLOF.getDwarfRangesSection(),
- "debug_range");
-
- DwarfDebugLocSectionSym = emitSectionSym(Asm, TLOF.getDwarfLocSection(),
- "section_debug_loc");
-
- TextSectionSym = emitSectionSym(Asm, TLOF.getTextSection(), "text_begin");
- emitSectionSym(Asm, TLOF.getDataSection());
+ emitSectionSym(Asm, TLOF.getDwarfAddrSection(), "addr_sec");
+ DwarfDebugLocSectionSym =
+ emitSectionSym(Asm, TLOF.getDwarfLocDWOSection(), "skel_loc");
+ } else
+ DwarfDebugLocSectionSym =
+ emitSectionSym(Asm, TLOF.getDwarfLocSection(), "section_debug_loc");
+ DwarfDebugRangeSectionSym =
+ emitSectionSym(Asm, TLOF.getDwarfRangesSection(), "debug_range");
}
// Recursively emits a debug information entry.
-void DwarfDebug::emitDIE(DIE *Die, ArrayRef<DIEAbbrev *> Abbrevs) {
+void DwarfDebug::emitDIE(DIE &Die) {
// Get the abbreviation for this DIE.
- unsigned AbbrevNumber = Die->getAbbrevNumber();
- const DIEAbbrev *Abbrev = Abbrevs[AbbrevNumber - 1];
+ const DIEAbbrev &Abbrev = Die.getAbbrev();
// Emit the code (index) for the abbreviation.
if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("Abbrev [" + Twine(AbbrevNumber) + "] 0x" +
- Twine::utohexstr(Die->getOffset()) + ":0x" +
- Twine::utohexstr(Die->getSize()) + " " +
- dwarf::TagString(Abbrev->getTag()));
- Asm->EmitULEB128(AbbrevNumber);
+ Asm->OutStreamer.AddComment("Abbrev [" + Twine(Abbrev.getNumber()) +
+ "] 0x" + Twine::utohexstr(Die.getOffset()) +
+ ":0x" + Twine::utohexstr(Die.getSize()) + " " +
+ dwarf::TagString(Abbrev.getTag()));
+ Asm->EmitULEB128(Abbrev.getNumber());
- const SmallVectorImpl<DIEValue*> &Values = Die->getValues();
- const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev->getData();
+ const SmallVectorImpl<DIEValue *> &Values = Die.getValues();
+ const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData();
// Emit the DIE attribute values.
for (unsigned i = 0, N = Values.size(); i < N; ++i) {
@@ -2082,172 +1686,39 @@ void DwarfDebug::emitDIE(DIE *Die, ArrayRef<DIEAbbrev *> Abbrevs) {
dwarf::Form Form = AbbrevData[i].getForm();
assert(Form && "Too many attributes for DIE (check abbreviation)");
- if (Asm->isVerbose())
+ if (Asm->isVerbose()) {
Asm->OutStreamer.AddComment(dwarf::AttributeString(Attr));
-
- switch (Attr) {
- case dwarf::DW_AT_abstract_origin:
- case dwarf::DW_AT_type:
- case dwarf::DW_AT_friend:
- case dwarf::DW_AT_specification:
- case dwarf::DW_AT_import:
- case dwarf::DW_AT_containing_type: {
- DIEEntry *E = cast<DIEEntry>(Values[i]);
- DIE *Origin = E->getEntry();
- unsigned Addr = Origin->getOffset();
- if (Form == dwarf::DW_FORM_ref_addr) {
- assert(!useSplitDwarf() && "TODO: dwo files can't have relocations.");
- // For DW_FORM_ref_addr, output the offset from beginning of debug info
- // section. Origin->getOffset() returns the offset from start of the
- // compile unit.
- CompileUnit *CU = CUDieMap.lookup(Origin->getCompileUnit());
- assert(CU && "CUDie should belong to a CU.");
- Addr += CU->getDebugInfoOffset();
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- Asm->EmitLabelPlusOffset(DwarfInfoSectionSym, Addr,
- DIEEntry::getRefAddrSize(Asm));
- else
- Asm->EmitLabelOffsetDifference(DwarfInfoSectionSym, Addr,
- DwarfInfoSectionSym,
- DIEEntry::getRefAddrSize(Asm));
- } else {
- // Make sure Origin belong to the same CU.
- assert(Die->getCompileUnit() == Origin->getCompileUnit() &&
- "The referenced DIE should belong to the same CU in ref4");
- Asm->EmitInt32(Addr);
- }
- break;
- }
- case dwarf::DW_AT_ranges: {
- // DW_AT_range Value encodes offset in debug_range section.
- DIEInteger *V = cast<DIEInteger>(Values[i]);
-
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections()) {
- Asm->EmitLabelPlusOffset(DwarfDebugRangeSectionSym,
- V->getValue(),
- 4);
- } else {
- Asm->EmitLabelOffsetDifference(DwarfDebugRangeSectionSym,
- V->getValue(),
- DwarfDebugRangeSectionSym,
- 4);
- }
- break;
- }
- case dwarf::DW_AT_location: {
- if (DIELabel *L = dyn_cast<DIELabel>(Values[i])) {
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- Asm->EmitSectionOffset(L->getValue(), DwarfDebugLocSectionSym);
- else
- Asm->EmitLabelDifference(L->getValue(), DwarfDebugLocSectionSym, 4);
- } else {
- Values[i]->EmitValue(Asm, Form);
- }
- break;
- }
- case dwarf::DW_AT_accessibility: {
- if (Asm->isVerbose()) {
- DIEInteger *V = cast<DIEInteger>(Values[i]);
- Asm->OutStreamer.AddComment(dwarf::AccessibilityString(V->getValue()));
- }
- Values[i]->EmitValue(Asm, Form);
- break;
- }
- default:
- // Emit an attribute using the defined form.
- Values[i]->EmitValue(Asm, Form);
- break;
+ if (Attr == dwarf::DW_AT_accessibility)
+ Asm->OutStreamer.AddComment(dwarf::AccessibilityString(
+ cast<DIEInteger>(Values[i])->getValue()));
}
+
+ // Emit an attribute using the defined form.
+ Values[i]->EmitValue(Asm, Form);
}
// Emit the DIE children if any.
- if (Abbrev->getChildrenFlag() == dwarf::DW_CHILDREN_yes) {
- const std::vector<DIE *> &Children = Die->getChildren();
+ if (Abbrev.hasChildren()) {
+ for (auto &Child : Die.getChildren())
+ emitDIE(*Child);
- for (unsigned j = 0, M = Children.size(); j < M; ++j)
- emitDIE(Children[j], Abbrevs);
-
- if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("End Of Children Mark");
+ Asm->OutStreamer.AddComment("End Of Children Mark");
Asm->EmitInt8(0);
}
}
-// Emit the various dwarf units to the unit section USection with
-// the abbreviations going into ASection.
-void DwarfUnits::emitUnits(DwarfDebug *DD,
- const MCSection *USection,
- const MCSection *ASection,
- const MCSymbol *ASectionSym) {
- Asm->OutStreamer.SwitchSection(USection);
- for (SmallVectorImpl<CompileUnit *>::iterator I = CUs.begin(),
- E = CUs.end(); I != E; ++I) {
- CompileUnit *TheCU = *I;
- DIE *Die = TheCU->getCUDie();
-
- // Emit the compile units header.
- Asm->OutStreamer
- .EmitLabel(Asm->GetTempSymbol(USection->getLabelBeginName(),
- TheCU->getUniqueID()));
-
- // Emit size of content not including length itself
- Asm->OutStreamer.AddComment("Length of Unit");
- Asm->EmitInt32(TheCU->getHeaderSize() + Die->getSize());
-
- TheCU->emitHeader(ASection, ASectionSym);
-
- DD->emitDIE(Die, Abbreviations);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol(USection->getLabelEndName(),
- TheCU->getUniqueID()));
- }
-}
-
// Emit the debug info section.
void DwarfDebug::emitDebugInfo() {
- DwarfUnits &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
+ DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
- Holder.emitUnits(this, Asm->getObjFileLowering().getDwarfInfoSection(),
- Asm->getObjFileLowering().getDwarfAbbrevSection(),
- DwarfAbbrevSectionSym);
+ Holder.emitUnits(this, DwarfAbbrevSectionSym);
}
// Emit the abbreviation section.
void DwarfDebug::emitAbbreviations() {
- if (!useSplitDwarf())
- emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevSection(),
- &Abbreviations);
- else
- emitSkeletonAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevSection());
-}
-
-void DwarfDebug::emitAbbrevs(const MCSection *Section,
- std::vector<DIEAbbrev *> *Abbrevs) {
- // Check to see if it is worth the effort.
- if (!Abbrevs->empty()) {
- // Start the debug abbrev section.
- Asm->OutStreamer.SwitchSection(Section);
-
- MCSymbol *Begin = Asm->GetTempSymbol(Section->getLabelBeginName());
- Asm->OutStreamer.EmitLabel(Begin);
-
- // For each abbrevation.
- for (unsigned i = 0, N = Abbrevs->size(); i < N; ++i) {
- // Get abbreviation data
- const DIEAbbrev *Abbrev = Abbrevs->at(i);
-
- // Emit the abbrevations code (base 1 index.)
- Asm->EmitULEB128(Abbrev->getNumber(), "Abbreviation Code");
+ DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
- // Emit the abbreviations data.
- Abbrev->Emit(Asm);
- }
-
- // Mark end of abbreviations.
- Asm->EmitULEB128(0, "EOM(3)");
-
- MCSymbol *End = Asm->GetTempSymbol(Section->getLabelEndName());
- Asm->OutStreamer.EmitLabel(End);
- }
+ Holder.emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevSection());
}
// Emit the last address of the section and the end of the line matrix.
@@ -2263,8 +1734,9 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->OutStreamer.AddComment("Section end label");
- Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
- Asm->getDataLayout().getPointerSize());
+ Asm->OutStreamer.EmitSymbolValue(
+ Asm->GetTempSymbol("section_end", SectionEnd),
+ Asm->getDataLayout().getPointerSize());
// Mark end of matrix.
Asm->OutStreamer.AddComment("DW_LNE_end_sequence");
@@ -2275,122 +1747,52 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
// Emit visible names into a hashed accelerator table section.
void DwarfDebug::emitAccelNames() {
- DwarfAccelTable AT(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
- dwarf::DW_FORM_data4));
- for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
- E = CUMap.end(); I != E; ++I) {
- CompileUnit *TheCU = I->second;
- const StringMap<std::vector<DIE*> > &Names = TheCU->getAccelNames();
- for (StringMap<std::vector<DIE*> >::const_iterator
- GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
- StringRef Name = GI->getKey();
- const std::vector<DIE *> &Entities = GI->second;
- for (std::vector<DIE *>::const_iterator DI = Entities.begin(),
- DE = Entities.end(); DI != DE; ++DI)
- AT.AddName(Name, (*DI));
- }
- }
-
- AT.FinalizeTable(Asm, "Names");
+ AccelNames.FinalizeTable(Asm, "Names");
Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfAccelNamesSection());
+ Asm->getObjFileLowering().getDwarfAccelNamesSection());
MCSymbol *SectionBegin = Asm->GetTempSymbol("names_begin");
Asm->OutStreamer.EmitLabel(SectionBegin);
// Emit the full data.
- AT.Emit(Asm, SectionBegin, &InfoHolder);
+ AccelNames.Emit(Asm, SectionBegin, &InfoHolder);
}
// Emit objective C classes and categories into a hashed accelerator table
// section.
void DwarfDebug::emitAccelObjC() {
- DwarfAccelTable AT(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
- dwarf::DW_FORM_data4));
- for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
- E = CUMap.end(); I != E; ++I) {
- CompileUnit *TheCU = I->second;
- const StringMap<std::vector<DIE*> > &Names = TheCU->getAccelObjC();
- for (StringMap<std::vector<DIE*> >::const_iterator
- GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
- StringRef Name = GI->getKey();
- const std::vector<DIE *> &Entities = GI->second;
- for (std::vector<DIE *>::const_iterator DI = Entities.begin(),
- DE = Entities.end(); DI != DE; ++DI)
- AT.AddName(Name, (*DI));
- }
- }
-
- AT.FinalizeTable(Asm, "ObjC");
- Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering()
- .getDwarfAccelObjCSection());
+ AccelObjC.FinalizeTable(Asm, "ObjC");
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfAccelObjCSection());
MCSymbol *SectionBegin = Asm->GetTempSymbol("objc_begin");
Asm->OutStreamer.EmitLabel(SectionBegin);
// Emit the full data.
- AT.Emit(Asm, SectionBegin, &InfoHolder);
+ AccelObjC.Emit(Asm, SectionBegin, &InfoHolder);
}
// Emit namespace dies into a hashed accelerator table.
void DwarfDebug::emitAccelNamespaces() {
- DwarfAccelTable AT(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
- dwarf::DW_FORM_data4));
- for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
- E = CUMap.end(); I != E; ++I) {
- CompileUnit *TheCU = I->second;
- const StringMap<std::vector<DIE*> > &Names = TheCU->getAccelNamespace();
- for (StringMap<std::vector<DIE*> >::const_iterator
- GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
- StringRef Name = GI->getKey();
- const std::vector<DIE *> &Entities = GI->second;
- for (std::vector<DIE *>::const_iterator DI = Entities.begin(),
- DE = Entities.end(); DI != DE; ++DI)
- AT.AddName(Name, (*DI));
- }
- }
-
- AT.FinalizeTable(Asm, "namespac");
- Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering()
- .getDwarfAccelNamespaceSection());
+ AccelNamespace.FinalizeTable(Asm, "namespac");
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfAccelNamespaceSection());
MCSymbol *SectionBegin = Asm->GetTempSymbol("namespac_begin");
Asm->OutStreamer.EmitLabel(SectionBegin);
// Emit the full data.
- AT.Emit(Asm, SectionBegin, &InfoHolder);
+ AccelNamespace.Emit(Asm, SectionBegin, &InfoHolder);
}
// Emit type dies into a hashed accelerator table.
void DwarfDebug::emitAccelTypes() {
- std::vector<DwarfAccelTable::Atom> Atoms;
- Atoms.push_back(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_offset,
- dwarf::DW_FORM_data4));
- Atoms.push_back(DwarfAccelTable::Atom(dwarf::DW_ATOM_die_tag,
- dwarf::DW_FORM_data2));
- Atoms.push_back(DwarfAccelTable::Atom(dwarf::DW_ATOM_type_flags,
- dwarf::DW_FORM_data1));
- DwarfAccelTable AT(Atoms);
- for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
- E = CUMap.end(); I != E; ++I) {
- CompileUnit *TheCU = I->second;
- const StringMap<std::vector<std::pair<DIE*, unsigned > > > &Names
- = TheCU->getAccelTypes();
- for (StringMap<std::vector<std::pair<DIE*, unsigned> > >::const_iterator
- GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
- StringRef Name = GI->getKey();
- const std::vector<std::pair<DIE *, unsigned> > &Entities = GI->second;
- for (std::vector<std::pair<DIE *, unsigned> >::const_iterator DI
- = Entities.begin(), DE = Entities.end(); DI !=DE; ++DI)
- AT.AddName(Name, (*DI).first, (*DI).second);
- }
- }
- AT.FinalizeTable(Asm, "types");
- Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering()
- .getDwarfAccelTypesSection());
+ AccelTypes.FinalizeTable(Asm, "types");
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfAccelTypesSection());
MCSymbol *SectionBegin = Asm->GetTempSymbol("types_begin");
Asm->OutStreamer.EmitLabel(SectionBegin);
// Emit the full data.
- AT.Emit(Asm, SectionBegin, &InfoHolder);
+ AccelTypes.Emit(Asm, SectionBegin, &InfoHolder);
}
// Public name handling.
@@ -2408,16 +1810,16 @@ void DwarfDebug::emitAccelTypes() {
// reference in the pubname header doesn't change.
/// computeIndexValue - Compute the gdb index value for the DIE and CU.
-static dwarf::PubIndexEntryDescriptor computeIndexValue(CompileUnit *CU,
- DIE *Die) {
+static dwarf::PubIndexEntryDescriptor computeIndexValue(DwarfUnit *CU,
+ const DIE *Die) {
dwarf::GDBIndexEntryLinkage Linkage = dwarf::GIEL_STATIC;
// We could have a specification DIE that has our most of our knowledge,
// look for that now.
DIEValue *SpecVal = Die->findAttribute(dwarf::DW_AT_specification);
if (SpecVal) {
- DIE *SpecDIE = cast<DIEEntry>(SpecVal)->getEntry();
- if (SpecDIE->findAttribute(dwarf::DW_AT_external))
+ DIE &SpecDIE = cast<DIEEntry>(SpecVal)->getEntry();
+ if (SpecDIE.findAttribute(dwarf::DW_AT_external))
Linkage = dwarf::GIEL_EXTERNAL;
} else if (Die->findAttribute(dwarf::DW_AT_external))
Linkage = dwarf::GIEL_EXTERNAL;
@@ -2453,342 +1855,212 @@ static dwarf::PubIndexEntryDescriptor computeIndexValue(CompileUnit *CU,
/// emitDebugPubNames - Emit visible names into a debug pubnames section.
///
void DwarfDebug::emitDebugPubNames(bool GnuStyle) {
- const MCSection *ISec = Asm->getObjFileLowering().getDwarfInfoSection();
const MCSection *PSec =
GnuStyle ? Asm->getObjFileLowering().getDwarfGnuPubNamesSection()
: Asm->getObjFileLowering().getDwarfPubNamesSection();
- typedef DenseMap<const MDNode*, CompileUnit*> CUMapType;
- for (CUMapType::iterator I = CUMap.begin(), E = CUMap.end(); I != E; ++I) {
- CompileUnit *TheCU = I->second;
- unsigned ID = TheCU->getUniqueID();
+ emitDebugPubSection(GnuStyle, PSec, "Names", &DwarfUnit::getGlobalNames);
+}
+
+void DwarfDebug::emitDebugPubSection(
+ bool GnuStyle, const MCSection *PSec, StringRef Name,
+ const StringMap<const DIE *> &(DwarfUnit::*Accessor)() const) {
+ for (const auto &NU : CUMap) {
+ DwarfCompileUnit *TheU = NU.second;
+
+ const auto &Globals = (TheU->*Accessor)();
+
+ if (Globals.empty())
+ continue;
+
+ if (auto Skeleton = static_cast<DwarfCompileUnit *>(TheU->getSkeleton()))
+ TheU = Skeleton;
+ unsigned ID = TheU->getUniqueID();
// Start the dwarf pubnames section.
Asm->OutStreamer.SwitchSection(PSec);
- // Emit a label so we can reference the beginning of this pubname section.
- if (GnuStyle)
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("gnu_pubnames",
- TheCU->getUniqueID()));
-
// Emit the header.
- Asm->OutStreamer.AddComment("Length of Public Names Info");
- Asm->EmitLabelDifference(Asm->GetTempSymbol("pubnames_end", ID),
- Asm->GetTempSymbol("pubnames_begin", ID), 4);
+ Asm->OutStreamer.AddComment("Length of Public " + Name + " Info");
+ MCSymbol *BeginLabel = Asm->GetTempSymbol("pub" + Name + "_begin", ID);
+ MCSymbol *EndLabel = Asm->GetTempSymbol("pub" + Name + "_end", ID);
+ Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("pubnames_begin", ID));
+ Asm->OutStreamer.EmitLabel(BeginLabel);
Asm->OutStreamer.AddComment("DWARF Version");
Asm->EmitInt16(dwarf::DW_PUBNAMES_VERSION);
Asm->OutStreamer.AddComment("Offset of Compilation Unit Info");
- Asm->EmitSectionOffset(Asm->GetTempSymbol(ISec->getLabelBeginName(), ID),
- DwarfInfoSectionSym);
+ Asm->EmitSectionOffset(TheU->getLabelBegin(), TheU->getSectionSym());
Asm->OutStreamer.AddComment("Compilation Unit Length");
- Asm->EmitLabelDifference(Asm->GetTempSymbol(ISec->getLabelEndName(), ID),
- Asm->GetTempSymbol(ISec->getLabelBeginName(), ID),
- 4);
+ Asm->EmitLabelDifference(TheU->getLabelEnd(), TheU->getLabelBegin(), 4);
// Emit the pubnames for this compilation unit.
- const StringMap<DIE*> &Globals = TheCU->getGlobalNames();
- for (StringMap<DIE*>::const_iterator
- GI = Globals.begin(), GE = Globals.end(); GI != GE; ++GI) {
- const char *Name = GI->getKeyData();
- DIE *Entity = GI->second;
+ for (const auto &GI : Globals) {
+ const char *Name = GI.getKeyData();
+ const DIE *Entity = GI.second;
Asm->OutStreamer.AddComment("DIE offset");
Asm->EmitInt32(Entity->getOffset());
if (GnuStyle) {
- dwarf::PubIndexEntryDescriptor Desc = computeIndexValue(TheCU, Entity);
+ dwarf::PubIndexEntryDescriptor Desc = computeIndexValue(TheU, Entity);
Asm->OutStreamer.AddComment(
Twine("Kind: ") + dwarf::GDBIndexEntryKindString(Desc.Kind) + ", " +
dwarf::GDBIndexEntryLinkageString(Desc.Linkage));
Asm->EmitInt8(Desc.toBits());
}
- if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("External Name");
- Asm->OutStreamer.EmitBytes(StringRef(Name, GI->getKeyLength()+1));
+ Asm->OutStreamer.AddComment("External Name");
+ Asm->OutStreamer.EmitBytes(StringRef(Name, GI.getKeyLength() + 1));
}
Asm->OutStreamer.AddComment("End Mark");
Asm->EmitInt32(0);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("pubnames_end", ID));
+ Asm->OutStreamer.EmitLabel(EndLabel);
}
}
void DwarfDebug::emitDebugPubTypes(bool GnuStyle) {
- const MCSection *ISec = Asm->getObjFileLowering().getDwarfInfoSection();
const MCSection *PSec =
GnuStyle ? Asm->getObjFileLowering().getDwarfGnuPubTypesSection()
: Asm->getObjFileLowering().getDwarfPubTypesSection();
- for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
- E = CUMap.end();
- I != E; ++I) {
- CompileUnit *TheCU = I->second;
- // Start the dwarf pubtypes section.
- Asm->OutStreamer.SwitchSection(PSec);
-
- // Emit a label so we can reference the beginning of this pubtype section.
- if (GnuStyle)
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("gnu_pubtypes",
- TheCU->getUniqueID()));
-
- // Emit the header.
- Asm->OutStreamer.AddComment("Length of Public Types Info");
- Asm->EmitLabelDifference(
- Asm->GetTempSymbol("pubtypes_end", TheCU->getUniqueID()),
- Asm->GetTempSymbol("pubtypes_begin", TheCU->getUniqueID()), 4);
-
- Asm->OutStreamer.EmitLabel(
- Asm->GetTempSymbol("pubtypes_begin", TheCU->getUniqueID()));
-
- if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("DWARF Version");
- Asm->EmitInt16(dwarf::DW_PUBTYPES_VERSION);
-
- Asm->OutStreamer.AddComment("Offset of Compilation Unit Info");
- Asm->EmitSectionOffset(
- Asm->GetTempSymbol(ISec->getLabelBeginName(), TheCU->getUniqueID()),
- DwarfInfoSectionSym);
-
- Asm->OutStreamer.AddComment("Compilation Unit Length");
- Asm->EmitLabelDifference(
- Asm->GetTempSymbol(ISec->getLabelEndName(), TheCU->getUniqueID()),
- Asm->GetTempSymbol(ISec->getLabelBeginName(), TheCU->getUniqueID()), 4);
-
- // Emit the pubtypes.
- const StringMap<DIE *> &Globals = TheCU->getGlobalTypes();
- for (StringMap<DIE *>::const_iterator GI = Globals.begin(),
- GE = Globals.end();
- GI != GE; ++GI) {
- const char *Name = GI->getKeyData();
- DIE *Entity = GI->second;
-
- if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("DIE offset");
- Asm->EmitInt32(Entity->getOffset());
-
- if (GnuStyle) {
- dwarf::PubIndexEntryDescriptor Desc = computeIndexValue(TheCU, Entity);
- Asm->OutStreamer.AddComment(
- Twine("Kind: ") + dwarf::GDBIndexEntryKindString(Desc.Kind) + ", " +
- dwarf::GDBIndexEntryLinkageString(Desc.Linkage));
- Asm->EmitInt8(Desc.toBits());
- }
-
- if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("External Name");
-
- // Emit the name with a terminating null byte.
- Asm->OutStreamer.EmitBytes(StringRef(Name, GI->getKeyLength() + 1));
- }
-
- Asm->OutStreamer.AddComment("End Mark");
- Asm->EmitInt32(0);
- Asm->OutStreamer.EmitLabel(
- Asm->GetTempSymbol("pubtypes_end", TheCU->getUniqueID()));
- }
+ emitDebugPubSection(GnuStyle, PSec, "Types", &DwarfUnit::getGlobalTypes);
}
-// Emit strings into a string section.
-void DwarfUnits::emitStrings(const MCSection *StrSection,
- const MCSection *OffsetSection = NULL,
- const MCSymbol *StrSecSym = NULL) {
-
- if (StringPool.empty()) return;
-
- // Start the dwarf str section.
- Asm->OutStreamer.SwitchSection(StrSection);
-
- // Get all of the string pool entries and put them in an array by their ID so
- // we can sort them.
- SmallVector<std::pair<unsigned,
- StringMapEntry<std::pair<MCSymbol*, unsigned> >*>, 64> Entries;
-
- for (StringMap<std::pair<MCSymbol*, unsigned> >::iterator
- I = StringPool.begin(), E = StringPool.end();
- I != E; ++I)
- Entries.push_back(std::make_pair(I->second.second, &*I));
-
- array_pod_sort(Entries.begin(), Entries.end());
-
- for (unsigned i = 0, e = Entries.size(); i != e; ++i) {
- // Emit a label for reference from debug information entries.
- Asm->OutStreamer.EmitLabel(Entries[i].second->getValue().first);
-
- // Emit the string itself with a terminating null byte.
- Asm->OutStreamer.EmitBytes(StringRef(Entries[i].second->getKeyData(),
- Entries[i].second->getKeyLength()+1));
- }
-
- // If we've got an offset section go ahead and emit that now as well.
- if (OffsetSection) {
- Asm->OutStreamer.SwitchSection(OffsetSection);
- unsigned offset = 0;
- unsigned size = 4; // FIXME: DWARF64 is 8.
- for (unsigned i = 0, e = Entries.size(); i != e; ++i) {
- Asm->OutStreamer.EmitIntValue(offset, size);
- offset += Entries[i].second->getKeyLength() + 1;
- }
- }
+// Emit visible names into a debug str section.
+void DwarfDebug::emitDebugStr() {
+ DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
+ Holder.emitStrings(Asm->getObjFileLowering().getDwarfStrSection());
}
-// Emit strings into a string section.
-void DwarfUnits::emitAddresses(const MCSection *AddrSection) {
-
- if (AddressPool.empty()) return;
-
- // Start the dwarf addr section.
- Asm->OutStreamer.SwitchSection(AddrSection);
-
- // Order the address pool entries by ID
- SmallVector<const MCExpr *, 64> Entries(AddressPool.size());
-
- for (DenseMap<const MCExpr *, unsigned>::iterator I = AddressPool.begin(),
- E = AddressPool.end();
- I != E; ++I)
- Entries[I->second] = I->first;
+void DwarfDebug::emitDebugLocEntry(ByteStreamer &Streamer,
+ const DebugLocEntry &Entry) {
+ assert(Entry.getValues().size() == 1 &&
+ "multi-value entries are not supported yet.");
+ const DebugLocEntry::Value Value = Entry.getValues()[0];
+ DIVariable DV(Value.getVariable());
+ if (Value.isInt()) {
+ DIBasicType BTy(resolve(DV.getType()));
+ if (BTy.Verify() && (BTy.getEncoding() == dwarf::DW_ATE_signed ||
+ BTy.getEncoding() == dwarf::DW_ATE_signed_char)) {
+ Streamer.EmitInt8(dwarf::DW_OP_consts, "DW_OP_consts");
+ Streamer.EmitSLEB128(Value.getInt());
+ } else {
+ Streamer.EmitInt8(dwarf::DW_OP_constu, "DW_OP_constu");
+ Streamer.EmitULEB128(Value.getInt());
+ }
+ } else if (Value.isLocation()) {
+ MachineLocation Loc = Value.getLoc();
+ if (!DV.hasComplexAddress())
+ // Regular entry.
+ Asm->EmitDwarfRegOp(Streamer, Loc, DV.isIndirect());
+ else {
+ // Complex address entry.
+ unsigned N = DV.getNumAddrElements();
+ unsigned i = 0;
+ if (N >= 2 && DV.getAddrElement(0) == DIBuilder::OpPlus) {
+ if (Loc.getOffset()) {
+ i = 2;
+ Asm->EmitDwarfRegOp(Streamer, Loc, DV.isIndirect());
+ Streamer.EmitInt8(dwarf::DW_OP_deref, "DW_OP_deref");
+ Streamer.EmitInt8(dwarf::DW_OP_plus_uconst, "DW_OP_plus_uconst");
+ Streamer.EmitSLEB128(DV.getAddrElement(1));
+ } else {
+ // If first address element is OpPlus then emit
+ // DW_OP_breg + Offset instead of DW_OP_reg + Offset.
+ MachineLocation TLoc(Loc.getReg(), DV.getAddrElement(1));
+ Asm->EmitDwarfRegOp(Streamer, TLoc, DV.isIndirect());
+ i = 2;
+ }
+ } else {
+ Asm->EmitDwarfRegOp(Streamer, Loc, DV.isIndirect());
+ }
- for (unsigned i = 0, e = Entries.size(); i != e; ++i) {
- // Emit an expression for reference from debug information entries.
- if (const MCExpr *Expr = Entries[i])
- Asm->OutStreamer.EmitValue(Expr, Asm->getDataLayout().getPointerSize());
- else
- Asm->OutStreamer.EmitIntValue(0, Asm->getDataLayout().getPointerSize());
+ // Emit remaining complex address elements.
+ for (; i < N; ++i) {
+ uint64_t Element = DV.getAddrElement(i);
+ if (Element == DIBuilder::OpPlus) {
+ Streamer.EmitInt8(dwarf::DW_OP_plus_uconst, "DW_OP_plus_uconst");
+ Streamer.EmitULEB128(DV.getAddrElement(++i));
+ } else if (Element == DIBuilder::OpDeref) {
+ if (!Loc.isReg())
+ Streamer.EmitInt8(dwarf::DW_OP_deref, "DW_OP_deref");
+ } else
+ llvm_unreachable("unknown Opcode found in complex address");
+ }
+ }
}
-
+ // else ... ignore constant fp. There is not any good way to
+ // to represent them here in dwarf.
+ // FIXME: ^
}
-// Emit visible names into a debug str section.
-void DwarfDebug::emitDebugStr() {
- DwarfUnits &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
- Holder.emitStrings(Asm->getObjFileLowering().getDwarfStrSection());
+void DwarfDebug::emitDebugLocEntryLocation(const DebugLocEntry &Entry) {
+ Asm->OutStreamer.AddComment("Loc expr size");
+ MCSymbol *begin = Asm->OutStreamer.getContext().CreateTempSymbol();
+ MCSymbol *end = Asm->OutStreamer.getContext().CreateTempSymbol();
+ Asm->EmitLabelDifference(end, begin, 2);
+ Asm->OutStreamer.EmitLabel(begin);
+ // Emit the entry.
+ APByteStreamer Streamer(*Asm);
+ emitDebugLocEntry(Streamer, Entry);
+ // Close the range.
+ Asm->OutStreamer.EmitLabel(end);
}
// Emit locations into the debug loc section.
void DwarfDebug::emitDebugLoc() {
- if (DotDebugLocEntries.empty())
- return;
-
- for (SmallVectorImpl<DotDebugLocEntry>::iterator
- I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end();
- I != E; ++I) {
- DotDebugLocEntry &Entry = *I;
- if (I + 1 != DotDebugLocEntries.end())
- Entry.Merge(I+1);
- }
-
// Start the dwarf loc section.
Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfLocSection());
+ Asm->getObjFileLowering().getDwarfLocSection());
unsigned char Size = Asm->getDataLayout().getPointerSize();
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
- unsigned index = 1;
- for (SmallVectorImpl<DotDebugLocEntry>::iterator
- I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end();
- I != E; ++I, ++index) {
- DotDebugLocEntry &Entry = *I;
- if (Entry.isMerged()) continue;
- if (Entry.isEmpty()) {
- Asm->OutStreamer.EmitIntValue(0, Size);
- Asm->OutStreamer.EmitIntValue(0, Size);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", index));
- } else {
- Asm->OutStreamer.EmitSymbolValue(Entry.getBeginSym(), Size);
- Asm->OutStreamer.EmitSymbolValue(Entry.getEndSym(), Size);
- DIVariable DV(Entry.getVariable());
- Asm->OutStreamer.AddComment("Loc expr size");
- MCSymbol *begin = Asm->OutStreamer.getContext().CreateTempSymbol();
- MCSymbol *end = Asm->OutStreamer.getContext().CreateTempSymbol();
- Asm->EmitLabelDifference(end, begin, 2);
- Asm->OutStreamer.EmitLabel(begin);
- if (Entry.isInt()) {
- DIBasicType BTy(DV.getType());
- if (BTy.Verify() &&
- (BTy.getEncoding() == dwarf::DW_ATE_signed
- || BTy.getEncoding() == dwarf::DW_ATE_signed_char)) {
- Asm->OutStreamer.AddComment("DW_OP_consts");
- Asm->EmitInt8(dwarf::DW_OP_consts);
- Asm->EmitSLEB128(Entry.getInt());
- } else {
- Asm->OutStreamer.AddComment("DW_OP_constu");
- Asm->EmitInt8(dwarf::DW_OP_constu);
- Asm->EmitULEB128(Entry.getInt());
- }
- } else if (Entry.isLocation()) {
- MachineLocation Loc = Entry.getLoc();
- if (!DV.hasComplexAddress())
- // Regular entry.
- Asm->EmitDwarfRegOp(Loc, DV.isIndirect());
- else {
- // Complex address entry.
- unsigned N = DV.getNumAddrElements();
- unsigned i = 0;
- if (N >= 2 && DV.getAddrElement(0) == DIBuilder::OpPlus) {
- if (Loc.getOffset()) {
- i = 2;
- Asm->EmitDwarfRegOp(Loc, DV.isIndirect());
- Asm->OutStreamer.AddComment("DW_OP_deref");
- Asm->EmitInt8(dwarf::DW_OP_deref);
- Asm->OutStreamer.AddComment("DW_OP_plus_uconst");
- Asm->EmitInt8(dwarf::DW_OP_plus_uconst);
- Asm->EmitSLEB128(DV.getAddrElement(1));
- } else {
- // If first address element is OpPlus then emit
- // DW_OP_breg + Offset instead of DW_OP_reg + Offset.
- MachineLocation TLoc(Loc.getReg(), DV.getAddrElement(1));
- Asm->EmitDwarfRegOp(TLoc, DV.isIndirect());
- i = 2;
- }
- } else {
- Asm->EmitDwarfRegOp(Loc, DV.isIndirect());
- }
-
- // Emit remaining complex address elements.
- for (; i < N; ++i) {
- uint64_t Element = DV.getAddrElement(i);
- if (Element == DIBuilder::OpPlus) {
- Asm->EmitInt8(dwarf::DW_OP_plus_uconst);
- Asm->EmitULEB128(DV.getAddrElement(++i));
- } else if (Element == DIBuilder::OpDeref) {
- if (!Loc.isReg())
- Asm->EmitInt8(dwarf::DW_OP_deref);
- } else
- llvm_unreachable("unknown Opcode found in complex address");
- }
- }
+ for (const auto &DebugLoc : DotDebugLocEntries) {
+ Asm->OutStreamer.EmitLabel(DebugLoc.Label);
+ for (const auto &Entry : DebugLoc.List) {
+ // Set up the range. This range is relative to the entry point of the
+ // compile unit. This is a hard coded 0 for low_pc when we're emitting
+ // ranges, or the DW_AT_low_pc on the compile unit otherwise.
+ const DwarfCompileUnit *CU = Entry.getCU();
+ if (CU->getRanges().size() == 1) {
+ // Grab the begin symbol from the first range as our base.
+ const MCSymbol *Base = CU->getRanges()[0].getStart();
+ Asm->EmitLabelDifference(Entry.getBeginSym(), Base, Size);
+ Asm->EmitLabelDifference(Entry.getEndSym(), Base, Size);
+ } else {
+ Asm->OutStreamer.EmitSymbolValue(Entry.getBeginSym(), Size);
+ Asm->OutStreamer.EmitSymbolValue(Entry.getEndSym(), Size);
}
- // else ... ignore constant fp. There is not any good way to
- // to represent them here in dwarf.
- Asm->OutStreamer.EmitLabel(end);
+
+ emitDebugLocEntryLocation(Entry);
}
+ Asm->OutStreamer.EmitIntValue(0, Size);
+ Asm->OutStreamer.EmitIntValue(0, Size);
}
}
-struct SymbolCUSorter {
- SymbolCUSorter(const MCStreamer &s) : Streamer(s) {}
- const MCStreamer &Streamer;
-
- bool operator() (const SymbolCU &A, const SymbolCU &B) {
- unsigned IA = A.Sym ? Streamer.GetSymbolOrder(A.Sym) : 0;
- unsigned IB = B.Sym ? Streamer.GetSymbolOrder(B.Sym) : 0;
-
- // Symbols with no order assigned should be placed at the end.
- // (e.g. section end labels)
- if (IA == 0)
- IA = (unsigned)(-1);
- if (IB == 0)
- IB = (unsigned)(-1);
- return IA < IB;
+void DwarfDebug::emitDebugLocDWO() {
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfLocDWOSection());
+ for (const auto &DebugLoc : DotDebugLocEntries) {
+ Asm->OutStreamer.EmitLabel(DebugLoc.Label);
+ for (const auto &Entry : DebugLoc.List) {
+ // Just always use start_length for now - at least that's one address
+ // rather than two. We could get fancier and try to, say, reuse an
+ // address we know we've emitted elsewhere (the start of the function?
+ // The start of the CU or CU subrange that encloses this range?)
+ Asm->EmitInt8(dwarf::DW_LLE_start_length_entry);
+ unsigned idx = AddrPool.getIndex(Entry.getBeginSym());
+ Asm->EmitULEB128(idx);
+ Asm->EmitLabelDifference(Entry.getEndSym(), Entry.getBeginSym(), 4);
+
+ emitDebugLocEntryLocation(Entry);
+ }
+ Asm->EmitInt8(dwarf::DW_LLE_end_of_list_entry);
}
-};
-
-static bool CUSort(const CompileUnit *A, const CompileUnit *B) {
- return (A->getUniqueID() < B->getUniqueID());
}
struct ArangeSpan {
@@ -2799,18 +2071,17 @@ struct ArangeSpan {
// address we can tie back to a CU.
void DwarfDebug::emitDebugARanges() {
// Start the dwarf aranges section.
- Asm->OutStreamer
- .SwitchSection(Asm->getObjFileLowering().getDwarfARangesSection());
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfARangesSection());
- typedef DenseMap<CompileUnit *, std::vector<ArangeSpan> > SpansType;
+ typedef DenseMap<DwarfCompileUnit *, std::vector<ArangeSpan>> SpansType;
SpansType Spans;
// Build a list of sections used.
std::vector<const MCSection *> Sections;
- for (SectionMapType::iterator it = SectionMap.begin(); it != SectionMap.end();
- it++) {
- const MCSection *Section = it->first;
+ for (const auto &it : SectionMap) {
+ const MCSection *Section = it.first;
Sections.push_back(Section);
}
@@ -2819,32 +2090,40 @@ void DwarfDebug::emitDebugARanges() {
std::sort(Sections.begin(), Sections.end(), SectionSort);
// Build a set of address spans, sorted by CU.
- for (size_t SecIdx=0;SecIdx<Sections.size();SecIdx++) {
- const MCSection *Section = Sections[SecIdx];
+ for (const MCSection *Section : Sections) {
SmallVector<SymbolCU, 8> &List = SectionMap[Section];
if (List.size() < 2)
continue;
// Sort the symbols by offset within the section.
- SymbolCUSorter sorter(Asm->OutStreamer);
- std::sort(List.begin(), List.end(), sorter);
+ std::sort(List.begin(), List.end(),
+ [&](const SymbolCU &A, const SymbolCU &B) {
+ unsigned IA = A.Sym ? Asm->OutStreamer.GetSymbolOrder(A.Sym) : 0;
+ unsigned IB = B.Sym ? Asm->OutStreamer.GetSymbolOrder(B.Sym) : 0;
+
+ // Symbols with no order assigned should be placed at the end.
+ // (e.g. section end labels)
+ if (IA == 0)
+ return false;
+ if (IB == 0)
+ return true;
+ return IA < IB;
+ });
// If we have no section (e.g. common), just write out
// individual spans for each symbol.
- if (Section == NULL) {
- for (size_t n = 0; n < List.size(); n++) {
- const SymbolCU &Cur = List[n];
-
+ if (!Section) {
+ for (const SymbolCU &Cur : List) {
ArangeSpan Span;
Span.Start = Cur.Sym;
- Span.End = NULL;
+ Span.End = nullptr;
if (Cur.CU)
Spans[Cur.CU].push_back(Span);
}
} else {
// Build spans between each label.
const MCSymbol *StartSym = List[0].Sym;
- for (size_t n = 1; n < List.size(); n++) {
+ for (size_t n = 1, e = List.size(); n < e; n++) {
const SymbolCU &Prev = List[n - 1];
const SymbolCU &Cur = List[n];
@@ -2860,37 +2139,36 @@ void DwarfDebug::emitDebugARanges() {
}
}
- const MCSection *ISec = Asm->getObjFileLowering().getDwarfInfoSection();
unsigned PtrSize = Asm->getDataLayout().getPointerSize();
// Build a list of CUs used.
- std::vector<CompileUnit *> CUs;
- for (SpansType::iterator it = Spans.begin(); it != Spans.end(); it++) {
- CompileUnit *CU = it->first;
+ std::vector<DwarfCompileUnit *> CUs;
+ for (const auto &it : Spans) {
+ DwarfCompileUnit *CU = it.first;
CUs.push_back(CU);
}
// Sort the CU list (again, to ensure consistent output order).
- std::sort(CUs.begin(), CUs.end(), CUSort);
+ std::sort(CUs.begin(), CUs.end(), [](const DwarfUnit *A, const DwarfUnit *B) {
+ return A->getUniqueID() < B->getUniqueID();
+ });
// Emit an arange table for each CU we used.
- for (size_t CUIdx=0;CUIdx<CUs.size();CUIdx++) {
- CompileUnit *CU = CUs[CUIdx];
+ for (DwarfCompileUnit *CU : CUs) {
std::vector<ArangeSpan> &List = Spans[CU];
// Emit size of content not including length itself.
- unsigned ContentSize
- = sizeof(int16_t) // DWARF ARange version number
- + sizeof(int32_t) // Offset of CU in the .debug_info section
- + sizeof(int8_t) // Pointer Size (in bytes)
- + sizeof(int8_t); // Segment Size (in bytes)
+ unsigned ContentSize =
+ sizeof(int16_t) + // DWARF ARange version number
+ sizeof(int32_t) + // Offset of CU in the .debug_info section
+ sizeof(int8_t) + // Pointer Size (in bytes)
+ sizeof(int8_t); // Segment Size (in bytes)
unsigned TupleSize = PtrSize * 2;
// 7.20 in the Dwarf specs requires the table to be aligned to a tuple.
- unsigned Padding = 0;
- while (((sizeof(int32_t) + ContentSize + Padding) % TupleSize) != 0)
- Padding++;
+ unsigned Padding =
+ OffsetToAlignment(sizeof(int32_t) + ContentSize, TupleSize);
ContentSize += Padding;
ContentSize += (List.size() + 1) * TupleSize;
@@ -2901,19 +2179,15 @@ void DwarfDebug::emitDebugARanges() {
Asm->OutStreamer.AddComment("DWARF Arange version number");
Asm->EmitInt16(dwarf::DW_ARANGES_VERSION);
Asm->OutStreamer.AddComment("Offset Into Debug Info Section");
- Asm->EmitSectionOffset(
- Asm->GetTempSymbol(ISec->getLabelBeginName(), CU->getUniqueID()),
- DwarfInfoSectionSym);
+ Asm->EmitSectionOffset(CU->getLocalLabelBegin(), CU->getLocalSectionSym());
Asm->OutStreamer.AddComment("Address Size (in bytes)");
Asm->EmitInt8(PtrSize);
Asm->OutStreamer.AddComment("Segment Size (in bytes)");
Asm->EmitInt8(0);
- for (unsigned n = 0; n < Padding; n++)
- Asm->EmitInt8(0xff);
+ Asm->OutStreamer.EmitFill(Padding, 0xff);
- for (unsigned n = 0; n < List.size(); n++) {
- const ArangeSpan &Span = List[n];
+ for (const ArangeSpan &Span : List) {
Asm->EmitLabelReference(Span.Start, PtrSize);
// Calculate the size as being from the span start to it's end.
@@ -2939,125 +2213,133 @@ void DwarfDebug::emitDebugARanges() {
// Emit visible names into a debug ranges section.
void DwarfDebug::emitDebugRanges() {
// Start the dwarf ranges section.
- Asm->OutStreamer
- .SwitchSection(Asm->getObjFileLowering().getDwarfRangesSection());
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfRangesSection());
+
+ // Size for our labels.
unsigned char Size = Asm->getDataLayout().getPointerSize();
- for (SmallVectorImpl<const MCSymbol *>::iterator
- I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
- I != E; ++I) {
- if (*I)
- Asm->OutStreamer.EmitSymbolValue(const_cast<MCSymbol*>(*I), Size);
- else
+
+ // Grab the specific ranges for the compile units in the module.
+ for (const auto &I : CUMap) {
+ DwarfCompileUnit *TheCU = I.second;
+
+ // Iterate over the misc ranges for the compile units in the module.
+ for (const RangeSpanList &List : TheCU->getRangeLists()) {
+ // Emit our symbol so we can find the beginning of the range.
+ Asm->OutStreamer.EmitLabel(List.getSym());
+
+ for (const RangeSpan &Range : List.getRanges()) {
+ const MCSymbol *Begin = Range.getStart();
+ const MCSymbol *End = Range.getEnd();
+ assert(Begin && "Range without a begin symbol?");
+ assert(End && "Range without an end symbol?");
+ if (TheCU->getRanges().size() == 1) {
+ // Grab the begin symbol from the first range as our base.
+ const MCSymbol *Base = TheCU->getRanges()[0].getStart();
+ Asm->EmitLabelDifference(Begin, Base, Size);
+ Asm->EmitLabelDifference(End, Base, Size);
+ } else {
+ Asm->OutStreamer.EmitSymbolValue(Begin, Size);
+ Asm->OutStreamer.EmitSymbolValue(End, Size);
+ }
+ }
+
+ // And terminate the list with two 0 values.
Asm->OutStreamer.EmitIntValue(0, Size);
- }
-}
+ Asm->OutStreamer.EmitIntValue(0, Size);
+ }
-// Emit visible names into a debug macinfo section.
-void DwarfDebug::emitDebugMacInfo() {
- if (const MCSection *LineInfo =
- Asm->getObjFileLowering().getDwarfMacroInfoSection()) {
- // Start the dwarf macinfo section.
- Asm->OutStreamer.SwitchSection(LineInfo);
+ // Now emit a range for the CU itself.
+ if (TheCU->getRanges().size() > 1) {
+ Asm->OutStreamer.EmitLabel(
+ Asm->GetTempSymbol("cu_ranges", TheCU->getUniqueID()));
+ for (const RangeSpan &Range : TheCU->getRanges()) {
+ const MCSymbol *Begin = Range.getStart();
+ const MCSymbol *End = Range.getEnd();
+ assert(Begin && "Range without a begin symbol?");
+ assert(End && "Range without an end symbol?");
+ Asm->OutStreamer.EmitSymbolValue(Begin, Size);
+ Asm->OutStreamer.EmitSymbolValue(End, Size);
+ }
+ // And terminate the list with two 0 values.
+ Asm->OutStreamer.EmitIntValue(0, Size);
+ Asm->OutStreamer.EmitIntValue(0, Size);
+ }
}
}
// DWARF5 Experimental Separate Dwarf emitters.
-// This DIE has the following attributes: DW_AT_comp_dir, DW_AT_stmt_list,
-// DW_AT_low_pc, DW_AT_high_pc, DW_AT_ranges, DW_AT_dwo_name, DW_AT_dwo_id,
-// DW_AT_ranges_base, DW_AT_addr_base.
-CompileUnit *DwarfDebug::constructSkeletonCU(const CompileUnit *CU) {
+void DwarfDebug::initSkeletonUnit(const DwarfUnit &U, DIE &Die,
+ std::unique_ptr<DwarfUnit> NewU) {
+ NewU->addLocalString(Die, dwarf::DW_AT_GNU_dwo_name,
+ U.getCUNode().getSplitDebugFilename());
- DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
- CompileUnit *NewCU = new CompileUnit(CU->getUniqueID(), Die, CU->getNode(),
- Asm, this, &SkeletonHolder);
+ if (!CompilationDir.empty())
+ NewU->addLocalString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
- NewCU->addLocalString(Die, dwarf::DW_AT_GNU_dwo_name,
- CU->getNode().getSplitDebugFilename());
+ addGnuPubAttributes(*NewU, Die);
- // Relocate to the beginning of the addr_base section, else 0 for the
- // beginning of the one for this compile unit.
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(Die, dwarf::DW_AT_GNU_addr_base,
- DwarfAddrSectionSym);
- else
- NewCU->addSectionOffset(Die, dwarf::DW_AT_GNU_addr_base, 0);
-
- // 2.17.1 requires that we use DW_AT_low_pc for a single entry point
- // into an entity. We're using 0, or a NULL label for this.
- NewCU->addUInt(Die, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr, 0);
-
- // DW_AT_stmt_list is a offset of line number information for this
- // compile unit in debug_line section.
- // FIXME: Should handle multiple compile units.
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(Die, dwarf::DW_AT_stmt_list,
- DwarfLineSectionSym);
- else
- NewCU->addSectionOffset(Die, dwarf::DW_AT_stmt_list, 0);
+ SkeletonHolder.addUnit(std::move(NewU));
+}
- if (!CompilationDir.empty())
- NewCU->addLocalString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
+// This DIE has the following attributes: DW_AT_comp_dir, DW_AT_stmt_list,
+// DW_AT_low_pc, DW_AT_high_pc, DW_AT_ranges, DW_AT_dwo_name, DW_AT_dwo_id,
+// DW_AT_addr_base, DW_AT_ranges_base.
+DwarfCompileUnit &DwarfDebug::constructSkeletonCU(const DwarfCompileUnit &CU) {
- // Flags to let the linker know we have emitted new style pubnames.
- if (GenerateGnuPubSections) {
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(
- Die, dwarf::DW_AT_GNU_pubnames,
- Asm->GetTempSymbol("gnu_pubnames", NewCU->getUniqueID()));
- else
- NewCU->addSectionDelta(
- Die, dwarf::DW_AT_GNU_pubnames,
- Asm->GetTempSymbol("gnu_pubnames", NewCU->getUniqueID()),
- DwarfGnuPubNamesSectionSym);
-
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(
- Die, dwarf::DW_AT_GNU_pubtypes,
- Asm->GetTempSymbol("gnu_pubtypes", NewCU->getUniqueID()));
- else
- NewCU->addSectionDelta(
- Die, dwarf::DW_AT_GNU_pubtypes,
- Asm->GetTempSymbol("gnu_pubtypes", NewCU->getUniqueID()),
- DwarfGnuPubTypesSectionSym);
- }
+ auto OwnedUnit = make_unique<DwarfCompileUnit>(
+ CU.getUniqueID(), CU.getCUNode(), Asm, this, &SkeletonHolder);
+ DwarfCompileUnit &NewCU = *OwnedUnit;
+ NewCU.initSection(Asm->getObjFileLowering().getDwarfInfoSection(),
+ DwarfInfoSectionSym);
- // Flag if we've emitted any ranges and their location for the compile unit.
- if (DebugRangeSymbols.size()) {
- if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
- NewCU->addSectionLabel(Die, dwarf::DW_AT_GNU_ranges_base,
- DwarfDebugRangeSectionSym);
- else
- NewCU->addUInt(Die, dwarf::DW_AT_GNU_ranges_base, dwarf::DW_FORM_data4,
- 0);
- }
+ NewCU.initStmtList(DwarfLineSectionSym);
- SkeletonHolder.addUnit(NewCU);
- SkeletonCUs.push_back(NewCU);
+ initSkeletonUnit(CU, NewCU.getUnitDie(), std::move(OwnedUnit));
return NewCU;
}
-void DwarfDebug::emitSkeletonAbbrevs(const MCSection *Section) {
- assert(useSplitDwarf() && "No split dwarf debug info?");
- emitAbbrevs(Section, &SkeletonAbbrevs);
+// This DIE has the following attributes: DW_AT_comp_dir, DW_AT_dwo_name,
+// DW_AT_addr_base.
+DwarfTypeUnit &DwarfDebug::constructSkeletonTU(DwarfTypeUnit &TU) {
+ DwarfCompileUnit &CU = static_cast<DwarfCompileUnit &>(
+ *SkeletonHolder.getUnits()[TU.getCU().getUniqueID()]);
+
+ auto OwnedUnit = make_unique<DwarfTypeUnit>(TU.getUniqueID(), CU, Asm, this,
+ &SkeletonHolder);
+ DwarfTypeUnit &NewTU = *OwnedUnit;
+ NewTU.setTypeSignature(TU.getTypeSignature());
+ NewTU.setType(nullptr);
+ NewTU.initSection(
+ Asm->getObjFileLowering().getDwarfTypesSection(TU.getTypeSignature()));
+
+ initSkeletonUnit(TU, NewTU.getUnitDie(), std::move(OwnedUnit));
+ return NewTU;
}
// Emit the .debug_info.dwo section for separated dwarf. This contains the
// compile units that would normally be in debug_info.
void DwarfDebug::emitDebugInfoDWO() {
assert(useSplitDwarf() && "No split dwarf debug info?");
- InfoHolder.emitUnits(this, Asm->getObjFileLowering().getDwarfInfoDWOSection(),
- Asm->getObjFileLowering().getDwarfAbbrevDWOSection(),
- DwarfAbbrevDWOSectionSym);
+ // Don't pass an abbrev symbol, using a constant zero instead so as not to
+ // emit relocations into the dwo file.
+ InfoHolder.emitUnits(this, /* AbbrevSymbol */ nullptr);
}
// Emit the .debug_abbrev.dwo section for separated dwarf. This contains the
// abbreviations for the .debug_info.dwo section.
void DwarfDebug::emitDebugAbbrevDWO() {
assert(useSplitDwarf() && "No split dwarf?");
- emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevDWOSection(),
- &Abbreviations);
+ InfoHolder.emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevDWOSection());
+}
+
+void DwarfDebug::emitDebugLineDWO() {
+ assert(useSplitDwarf() && "No split dwarf?");
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfLineDWOSection());
+ SplitTypeUnitFileTable.Emit(Asm->OutStreamer);
}
// Emit the .debug_str.dwo section for separated dwarf. This contains the
@@ -3065,9 +2347,152 @@ void DwarfDebug::emitDebugAbbrevDWO() {
// sections.
void DwarfDebug::emitDebugStrDWO() {
assert(useSplitDwarf() && "No split dwarf?");
- const MCSection *OffSec = Asm->getObjFileLowering()
- .getDwarfStrOffDWOSection();
+ const MCSection *OffSec =
+ Asm->getObjFileLowering().getDwarfStrOffDWOSection();
const MCSymbol *StrSym = DwarfStrSectionSym;
InfoHolder.emitStrings(Asm->getObjFileLowering().getDwarfStrDWOSection(),
OffSec, StrSym);
}
+
+MCDwarfDwoLineTable *DwarfDebug::getDwoLineTable(const DwarfCompileUnit &CU) {
+ if (!useSplitDwarf())
+ return nullptr;
+ if (SingleCU)
+ SplitTypeUnitFileTable.setCompilationDir(CU.getCUNode().getDirectory());
+ return &SplitTypeUnitFileTable;
+}
+
+static uint64_t makeTypeSignature(StringRef Identifier) {
+ MD5 Hash;
+ Hash.update(Identifier);
+ // ... take the least significant 8 bytes and return those. Our MD5
+ // implementation always returns its results in little endian, swap bytes
+ // appropriately.
+ MD5::MD5Result Result;
+ Hash.final(Result);
+ return *reinterpret_cast<support::ulittle64_t *>(Result + 8);
+}
+
+void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
+ StringRef Identifier, DIE &RefDie,
+ DICompositeType CTy) {
+ // Fast path if we're building some type units and one has already used the
+ // address pool we know we're going to throw away all this work anyway, so
+ // don't bother building dependent types.
+ if (!TypeUnitsUnderConstruction.empty() && AddrPool.hasBeenUsed())
+ return;
+
+ const DwarfTypeUnit *&TU = DwarfTypeUnits[CTy];
+ if (TU) {
+ CU.addDIETypeSignature(RefDie, *TU);
+ return;
+ }
+
+ bool TopLevelType = TypeUnitsUnderConstruction.empty();
+ AddrPool.resetUsedFlag();
+
+ auto OwnedUnit =
+ make_unique<DwarfTypeUnit>(InfoHolder.getUnits().size(), CU, Asm, this,
+ &InfoHolder, getDwoLineTable(CU));
+ DwarfTypeUnit &NewTU = *OwnedUnit;
+ DIE &UnitDie = NewTU.getUnitDie();
+ TU = &NewTU;
+ TypeUnitsUnderConstruction.push_back(
+ std::make_pair(std::move(OwnedUnit), CTy));
+
+ NewTU.addUInt(UnitDie, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
+ CU.getLanguage());
+
+ uint64_t Signature = makeTypeSignature(Identifier);
+ NewTU.setTypeSignature(Signature);
+
+ if (!useSplitDwarf())
+ CU.applyStmtList(UnitDie);
+
+ // FIXME: Skip using COMDAT groups for type units in the .dwo file once tools
+ // such as DWP ( http://gcc.gnu.org/wiki/DebugFissionDWP ) can cope with it.
+ NewTU.initSection(
+ useSplitDwarf()
+ ? Asm->getObjFileLowering().getDwarfTypesDWOSection(Signature)
+ : Asm->getObjFileLowering().getDwarfTypesSection(Signature));
+
+ NewTU.setType(NewTU.createTypeDIE(CTy));
+
+ if (TopLevelType) {
+ auto TypeUnitsToAdd = std::move(TypeUnitsUnderConstruction);
+ TypeUnitsUnderConstruction.clear();
+
+ // Types referencing entries in the address table cannot be placed in type
+ // units.
+ if (AddrPool.hasBeenUsed()) {
+
+ // Remove all the types built while building this type.
+ // This is pessimistic as some of these types might not be dependent on
+ // the type that used an address.
+ for (const auto &TU : TypeUnitsToAdd)
+ DwarfTypeUnits.erase(TU.second);
+
+ // Construct this type in the CU directly.
+ // This is inefficient because all the dependent types will be rebuilt
+ // from scratch, including building them in type units, discovering that
+ // they depend on addresses, throwing them out and rebuilding them.
+ CU.constructTypeDIE(RefDie, CTy);
+ return;
+ }
+
+ // If the type wasn't dependent on fission addresses, finish adding the type
+ // and all its dependent types.
+ for (auto &TU : TypeUnitsToAdd) {
+ if (useSplitDwarf())
+ TU.first->setSkeleton(constructSkeletonTU(*TU.first));
+ InfoHolder.addUnit(std::move(TU.first));
+ }
+ }
+ CU.addDIETypeSignature(RefDie, NewTU);
+}
+
+void DwarfDebug::attachLowHighPC(DwarfCompileUnit &Unit, DIE &D,
+ MCSymbol *Begin, MCSymbol *End) {
+ assert(Begin && "Begin label should not be null!");
+ assert(End && "End label should not be null!");
+ assert(Begin->isDefined() && "Invalid starting label");
+ assert(End->isDefined() && "Invalid end label");
+
+ Unit.addLabelAddress(D, dwarf::DW_AT_low_pc, Begin);
+ if (DwarfVersion < 4)
+ Unit.addLabelAddress(D, dwarf::DW_AT_high_pc, End);
+ else
+ Unit.addLabelDelta(D, dwarf::DW_AT_high_pc, End, Begin);
+}
+
+// Accelerator table mutators - add each name along with its companion
+// DIE to the proper table while ensuring that the name that we're going
+// to reference is in the string table. We do this since the names we
+// add may not only be identical to the names in the DIE.
+void DwarfDebug::addAccelName(StringRef Name, const DIE &Die) {
+ if (!useDwarfAccelTables())
+ return;
+ AccelNames.AddName(Name, InfoHolder.getStringPool().getSymbol(*Asm, Name),
+ &Die);
+}
+
+void DwarfDebug::addAccelObjC(StringRef Name, const DIE &Die) {
+ if (!useDwarfAccelTables())
+ return;
+ AccelObjC.AddName(Name, InfoHolder.getStringPool().getSymbol(*Asm, Name),
+ &Die);
+}
+
+void DwarfDebug::addAccelNamespace(StringRef Name, const DIE &Die) {
+ if (!useDwarfAccelTables())
+ return;
+ AccelNamespace.AddName(Name, InfoHolder.getStringPool().getSymbol(*Asm, Name),
+ &Die);
+}
+
+void DwarfDebug::addAccelType(StringRef Name, const DIE &Die, char Flags) {
+ if (!useDwarfAccelTables())
+ return;
+ AccelTypes.AddName(Name, InfoHolder.getStringPool().getSymbol(*Asm, Name),
+ &Die);
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index cebac39..f2aa808 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -14,44 +14,50 @@
#ifndef CODEGEN_ASMPRINTER_DWARFDEBUG_H__
#define CODEGEN_ASMPRINTER_DWARFDEBUG_H__
+#include "DwarfFile.h"
+#include "AsmPrinterHandler.h"
#include "DIE.h"
+#include "DbgValueHistoryCalculator.h"
+#include "DebugLocEntry.h"
+#include "DebugLocList.h"
+#include "DwarfAccelTable.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/ADT/FoldingSet.h"
#include "llvm/CodeGen/LexicalScopes.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCDwarf.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/DebugLoc.h"
+
+#include <memory>
namespace llvm {
-class CompileUnit;
+class AsmPrinter;
+class ByteStreamer;
class ConstantInt;
class ConstantFP;
-class DbgVariable;
-class MachineFrameInfo;
+class DwarfCompileUnit;
+class DwarfDebug;
+class DwarfTypeUnit;
+class DwarfUnit;
class MachineModuleInfo;
-class MachineOperand;
-class MCAsmInfo;
-class DIEAbbrev;
-class DIE;
-class DIEBlock;
-class DIEEntry;
//===----------------------------------------------------------------------===//
/// \brief This class is used to record source line correspondence.
class SrcLineInfo {
- unsigned Line; // Source line number.
- unsigned Column; // Source column.
- unsigned SourceID; // Source ID number.
- MCSymbol *Label; // Label in code ID number.
+ unsigned Line; // Source line number.
+ unsigned Column; // Source column.
+ unsigned SourceID; // Source ID number.
+ MCSymbol *Label; // Label in code ID number.
public:
SrcLineInfo(unsigned L, unsigned C, unsigned S, MCSymbol *label)
- : Line(L), Column(C), SourceID(S), Label(label) {}
+ : Line(L), Column(C), SourceID(S), Label(label) {}
// Accessors
unsigned getLine() const { return Line; }
@@ -60,124 +66,47 @@ public:
MCSymbol *getLabel() const { return Label; }
};
-/// \brief This struct describes location entries emitted in the .debug_loc
-/// section.
-class DotDebugLocEntry {
- // Begin and end symbols for the address range that this location is valid.
- const MCSymbol *Begin;
- const MCSymbol *End;
-
- // Type of entry that this represents.
- enum EntryType {
- E_Location,
- E_Integer,
- E_ConstantFP,
- E_ConstantInt
- };
- enum EntryType EntryKind;
-
- union {
- int64_t Int;
- const ConstantFP *CFP;
- const ConstantInt *CIP;
- } Constants;
-
- // The location in the machine frame.
- MachineLocation Loc;
-
- // The variable to which this location entry corresponds.
- const MDNode *Variable;
-
- // Whether this location has been merged.
- bool Merged;
-
-public:
- DotDebugLocEntry() : Begin(0), End(0), Variable(0), Merged(false) {
- Constants.Int = 0;
- }
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, MachineLocation &L,
- const MDNode *V)
- : Begin(B), End(E), Loc(L), Variable(V), Merged(false) {
- Constants.Int = 0;
- EntryKind = E_Location;
- }
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, int64_t i)
- : Begin(B), End(E), Variable(0), Merged(false) {
- Constants.Int = i;
- EntryKind = E_Integer;
- }
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantFP *FPtr)
- : Begin(B), End(E), Variable(0), Merged(false) {
- Constants.CFP = FPtr;
- EntryKind = E_ConstantFP;
- }
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E,
- const ConstantInt *IPtr)
- : Begin(B), End(E), Variable(0), Merged(false) {
- Constants.CIP = IPtr;
- EntryKind = E_ConstantInt;
- }
-
- /// \brief Empty entries are also used as a trigger to emit temp label. Such
- /// labels are referenced is used to find debug_loc offset for a given DIE.
- bool isEmpty() { return Begin == 0 && End == 0; }
- bool isMerged() { return Merged; }
- void Merge(DotDebugLocEntry *Next) {
- if (!(Begin && Loc == Next->Loc && End == Next->Begin))
- return;
- Next->Begin = Begin;
- Merged = true;
- }
- bool isLocation() const { return EntryKind == E_Location; }
- bool isInt() const { return EntryKind == E_Integer; }
- bool isConstantFP() const { return EntryKind == E_ConstantFP; }
- bool isConstantInt() const { return EntryKind == E_ConstantInt; }
- int64_t getInt() const { return Constants.Int; }
- const ConstantFP *getConstantFP() const { return Constants.CFP; }
- const ConstantInt *getConstantInt() const { return Constants.CIP; }
- const MDNode *getVariable() const { return Variable; }
- const MCSymbol *getBeginSym() const { return Begin; }
- const MCSymbol *getEndSym() const { return End; }
- MachineLocation getLoc() const { return Loc; }
-};
-
//===----------------------------------------------------------------------===//
/// \brief This class is used to track local variable information.
class DbgVariable {
- DIVariable Var; // Variable Descriptor.
- DIE *TheDIE; // Variable DIE.
- unsigned DotDebugLocOffset; // Offset in DotDebugLocEntries.
- DbgVariable *AbsVar; // Corresponding Abstract variable, if any.
- const MachineInstr *MInsn; // DBG_VALUE instruction of the variable.
+ DIVariable Var; // Variable Descriptor.
+ DIE *TheDIE; // Variable DIE.
+ unsigned DotDebugLocOffset; // Offset in DotDebugLocEntries.
+ const MachineInstr *MInsn; // DBG_VALUE instruction of the variable.
int FrameIndex;
DwarfDebug *DD;
+
public:
- // AbsVar may be NULL.
- DbgVariable(DIVariable V, DbgVariable *AV, DwarfDebug *DD)
- : Var(V), TheDIE(0), DotDebugLocOffset(~0U), AbsVar(AV), MInsn(0),
- FrameIndex(~0), DD(DD) {}
+ /// Construct a DbgVariable from a DIVariable.
+ DbgVariable(DIVariable V, DwarfDebug *DD)
+ : Var(V), TheDIE(nullptr), DotDebugLocOffset(~0U), MInsn(nullptr),
+ FrameIndex(~0), DD(DD) {}
+
+ /// Construct a DbgVariable from a DEBUG_VALUE.
+ /// AbstractVar may be NULL.
+ DbgVariable(const MachineInstr *DbgValue, DwarfDebug *DD)
+ : Var(DbgValue->getDebugVariable()), TheDIE(nullptr),
+ DotDebugLocOffset(~0U), MInsn(DbgValue), FrameIndex(~0), DD(DD) {}
// Accessors.
- DIVariable getVariable() const { return Var; }
- void setDIE(DIE *D) { TheDIE = D; }
- DIE *getDIE() const { return TheDIE; }
- void setDotDebugLocOffset(unsigned O) { DotDebugLocOffset = O; }
- unsigned getDotDebugLocOffset() const { return DotDebugLocOffset; }
- StringRef getName() const { return Var.getName(); }
- DbgVariable *getAbstractVariable() const { return AbsVar; }
- const MachineInstr *getMInsn() const { return MInsn; }
- void setMInsn(const MachineInstr *M) { MInsn = M; }
- int getFrameIndex() const { return FrameIndex; }
- void setFrameIndex(int FI) { FrameIndex = FI; }
+ DIVariable getVariable() const { return Var; }
+ void setDIE(DIE &D) { TheDIE = &D; }
+ DIE *getDIE() const { return TheDIE; }
+ void setDotDebugLocOffset(unsigned O) { DotDebugLocOffset = O; }
+ unsigned getDotDebugLocOffset() const { return DotDebugLocOffset; }
+ StringRef getName() const { return Var.getName(); }
+ const MachineInstr *getMInsn() const { return MInsn; }
+ int getFrameIndex() const { return FrameIndex; }
+ void setFrameIndex(int FI) { FrameIndex = FI; }
// Translate tag to proper Dwarf tag.
- uint16_t getTag() const {
+ dwarf::Tag getTag() const {
if (Var.getTag() == dwarf::DW_TAG_arg_variable)
return dwarf::DW_TAG_formal_parameter;
return dwarf::DW_TAG_variable;
}
/// \brief Return true if DbgVariable is artificial.
- bool isArtificial() const {
+ bool isArtificial() const {
if (Var.isArtificial())
return true;
if (getType().isArtificial())
@@ -185,7 +114,7 @@ public:
return false;
}
- bool isObjectPointer() const {
+ bool isObjectPointer() const {
if (Var.isObjectPointer())
return true;
if (getType().isObjectPointer())
@@ -193,21 +122,16 @@ public:
return false;
}
- bool variableHasComplexAddress() const {
+ bool variableHasComplexAddress() const {
assert(Var.isVariable() && "Invalid complex DbgVariable!");
return Var.hasComplexAddress();
}
- bool isBlockByrefVariable() const {
- assert(Var.isVariable() && "Invalid complex DbgVariable!");
- return Var.isBlockByrefVariable();
- }
- unsigned getNumAddrElements() const {
+ bool isBlockByrefVariable() const;
+ unsigned getNumAddrElements() const {
assert(Var.isVariable() && "Invalid complex DbgVariable!");
return Var.getNumAddrElements();
}
- uint64_t getAddrElement(unsigned i) const {
- return Var.getAddrElement(i);
- }
+ uint64_t getAddrElement(unsigned i) const { return Var.getAddrElement(i); }
DIType getType() const;
private:
@@ -216,101 +140,16 @@ private:
template <typename T> T resolve(DIRef<T> Ref) const;
};
-/// \brief Collects and handles information specific to a particular
-/// collection of units.
-class DwarfUnits {
- // Target of Dwarf emission, used for sizing of abbreviations.
- AsmPrinter *Asm;
-
- // Used to uniquely define abbreviations.
- FoldingSet<DIEAbbrev> *AbbreviationsSet;
-
- // A list of all the unique abbreviations in use.
- std::vector<DIEAbbrev *> &Abbreviations;
-
- // A pointer to all units in the section.
- SmallVector<CompileUnit *, 1> CUs;
-
- // Collection of strings for this unit and assorted symbols.
- // A String->Symbol mapping of strings used by indirect
- // references.
- typedef StringMap<std::pair<MCSymbol*, unsigned>,
- BumpPtrAllocator&> StrPool;
- StrPool StringPool;
- unsigned NextStringPoolNumber;
- std::string StringPref;
-
- // Collection of addresses for this unit and assorted labels.
- // A Symbol->unsigned mapping of addresses used by indirect
- // references.
- typedef DenseMap<const MCExpr *, unsigned> AddrPool;
- AddrPool AddressPool;
- unsigned NextAddrPoolNumber;
-
-public:
- DwarfUnits(AsmPrinter *AP, FoldingSet<DIEAbbrev> *AS,
- std::vector<DIEAbbrev *> &A, const char *Pref,
- BumpPtrAllocator &DA)
- : Asm(AP), AbbreviationsSet(AS), Abbreviations(A), StringPool(DA),
- NextStringPoolNumber(0), StringPref(Pref), AddressPool(),
- NextAddrPoolNumber(0) {}
-
- /// \brief Compute the size and offset of a DIE given an incoming Offset.
- unsigned computeSizeAndOffset(DIE *Die, unsigned Offset);
-
- /// \brief Compute the size and offset of all the DIEs.
- void computeSizeAndOffsets();
-
- /// \brief Define a unique number for the abbreviation.
- void assignAbbrevNumber(DIEAbbrev &Abbrev);
-
- /// \brief Add a unit to the list of CUs.
- void addUnit(CompileUnit *CU) { CUs.push_back(CU); }
-
- /// \brief Emit all of the units to the section listed with the given
- /// abbreviation section.
- void emitUnits(DwarfDebug *DD, const MCSection *USection,
- const MCSection *ASection, const MCSymbol *ASectionSym);
-
- /// \brief Emit all of the strings to the section given.
- void emitStrings(const MCSection *StrSection, const MCSection *OffsetSection,
- const MCSymbol *StrSecSym);
-
- /// \brief Emit all of the addresses to the section given.
- void emitAddresses(const MCSection *AddrSection);
-
- /// \brief Returns the entry into the start of the pool.
- MCSymbol *getStringPoolSym();
-
- /// \brief Returns an entry into the string pool with the given
- /// string text.
- MCSymbol *getStringPoolEntry(StringRef Str);
-
- /// \brief Returns the index into the string pool with the given
- /// string text.
- unsigned getStringPoolIndex(StringRef Str);
-
- /// \brief Returns the string pool.
- StrPool *getStringPool() { return &StringPool; }
-
- /// \brief Returns the index into the address pool with the given
- /// label/symbol.
- unsigned getAddrPoolIndex(const MCExpr *Sym);
- unsigned getAddrPoolIndex(const MCSymbol *Sym);
-
- /// \brief Returns the address pool.
- AddrPool *getAddrPool() { return &AddressPool; }
-};
/// \brief Helper used to pair up a symbol and its DWARF compile unit.
struct SymbolCU {
- SymbolCU(CompileUnit *CU, const MCSymbol *Sym) : Sym(Sym), CU(CU) {}
+ SymbolCU(DwarfCompileUnit *CU, const MCSymbol *Sym) : Sym(Sym), CU(CU) {}
const MCSymbol *Sym;
- CompileUnit *CU;
+ DwarfCompileUnit *CU;
};
/// \brief Collects and handles dwarf debug information.
-class DwarfDebug {
+class DwarfDebug : public AsmPrinterHandler {
// Target of Dwarf emission.
AsmPrinter *Asm;
@@ -320,40 +159,31 @@ class DwarfDebug {
// All DIEValues are allocated through this allocator.
BumpPtrAllocator DIEValueAllocator;
- // Handle to the a compile unit used for the inline extension handling.
- CompileUnit *FirstCU;
+ // Handle to the compile unit used for the inline extension handling,
+ // this is just so that the DIEValue allocator has a place to store
+ // the particular elements.
+ // FIXME: Store these off of DwarfDebug instead?
+ DwarfCompileUnit *FirstCU;
- // Maps MDNode with its corresponding CompileUnit.
- DenseMap <const MDNode *, CompileUnit *> CUMap;
+ // Maps MDNode with its corresponding DwarfCompileUnit.
+ MapVector<const MDNode *, DwarfCompileUnit *> CUMap;
- // Maps subprogram MDNode with its corresponding CompileUnit.
- DenseMap <const MDNode *, CompileUnit *> SPMap;
+ // Maps subprogram MDNode with its corresponding DwarfCompileUnit.
+ DenseMap<const MDNode *, DwarfCompileUnit *> SPMap;
- // Maps a CU DIE with its corresponding CompileUnit.
- DenseMap <const DIE *, CompileUnit *> CUDieMap;
+ // Maps a CU DIE with its corresponding DwarfCompileUnit.
+ DenseMap<const DIE *, DwarfCompileUnit *> CUDieMap;
- /// Maps MDNodes for type sysstem with the corresponding DIEs. These DIEs can
+ /// Maps MDNodes for type system with the corresponding DIEs. These DIEs can
/// be shared across CUs, that is why we keep the map here instead
- /// of in CompileUnit.
+ /// of in DwarfCompileUnit.
DenseMap<const MDNode *, DIE *> MDTypeNodeToDieMap;
- // Used to uniquely define abbreviations.
- FoldingSet<DIEAbbrev> AbbreviationsSet;
-
- // A list of all the unique abbreviations in use.
- std::vector<DIEAbbrev *> Abbreviations;
-
- // Stores the current file ID for a given compile unit.
- DenseMap <unsigned, unsigned> FileIDCUMap;
- // Source id map, i.e. CUID, source filename and directory,
- // separated by a zero byte, mapped to a unique id.
- StringMap<unsigned, BumpPtrAllocator&> SourceIdMap;
-
// List of all labels used in aranges generation.
std::vector<SymbolCU> ArangeLabels;
// Size of each symbol emitted (for those symbols that have a specific size).
- DenseMap <const MCSymbol *, uint64_t> SymSize;
+ DenseMap<const MCSymbol *, uint64_t> SymSize;
// Provides a unique id per text section.
typedef DenseMap<const MCSection *, SmallVector<SymbolCU, 8> > SectionMapType;
@@ -368,15 +198,17 @@ class DwarfDebug {
DenseMap<const MDNode *, DIE *> AbstractSPDies;
// Collection of dbg variables of a scope.
- typedef DenseMap<LexicalScope *,
- SmallVector<DbgVariable *, 8> > ScopeVariablesMap;
+ typedef DenseMap<LexicalScope *, SmallVector<DbgVariable *, 8> >
+ ScopeVariablesMap;
ScopeVariablesMap ScopeVariables;
// Collection of abstract variables.
- DenseMap<const MDNode *, DbgVariable *> AbstractVariables;
+ DenseMap<const MDNode *, std::unique_ptr<DbgVariable>> AbstractVariables;
+ SmallVector<std::unique_ptr<DbgVariable>, 64> ConcreteVariables;
- // Collection of DotDebugLocEntry.
- SmallVector<DotDebugLocEntry, 4> DotDebugLocEntries;
+ // Collection of DebugLocEntry. Stored in a linked list so that DIELocLists
+ // can refer to them in spite of insertions into this list.
+ SmallVector<DebugLocList, 4> DotDebugLocEntries;
// Collection of subprogram DIEs that are marked (at the end of the module)
// as DW_AT_inline.
@@ -392,19 +224,10 @@ class DwarfDebug {
// Maps instruction with label emitted after instruction.
DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn;
- // Every user variable mentioned by a DBG_VALUE instruction in order of
- // appearance.
- SmallVector<const MDNode*, 8> UserVariables;
-
- // For each user variable, keep a list of DBG_VALUE instructions in order.
- // The list can also contain normal instructions that clobber the previous
- // DBG_VALUE.
- typedef DenseMap<const MDNode*, SmallVector<const MachineInstr*, 4> >
- DbgValueHistoryMap;
+ // History of DBG_VALUE and clobber instructions for each user variable.
+ // Variables are listed in order of appearance.
DbgValueHistoryMap DbgValues;
- SmallVector<const MCSymbol *, 8> DebugRangeSymbols;
-
// Previous instruction's location information. This is used to determine
// label location to indicate scope boundries in dwarf debug info.
DebugLoc PrevInstLoc;
@@ -414,6 +237,19 @@ class DwarfDebug {
// body.
DebugLoc PrologEndLoc;
+ // If nonnull, stores the current machine function we're processing.
+ const MachineFunction *CurFn;
+
+ // If nonnull, stores the current machine instruction we're processing.
+ const MachineInstr *CurMI;
+
+ // If nonnull, stores the section that the previous function was allocated to
+ // emitting.
+ const MCSection *PrevSection;
+
+ // If nonnull, stores the CU in which the previous subprogram was contained.
+ const DwarfCompileUnit *PrevCU;
+
// Section Symbols: these are assembler temporary labels that are emitted at
// the beginning of each supported dwarf section. These are used to form
// section offsets and are created by EmitSectionLabels.
@@ -421,36 +257,50 @@ class DwarfDebug {
MCSymbol *DwarfStrSectionSym, *TextSectionSym, *DwarfDebugRangeSectionSym;
MCSymbol *DwarfDebugLocSectionSym, *DwarfLineSectionSym, *DwarfAddrSectionSym;
MCSymbol *FunctionBeginSym, *FunctionEndSym;
- MCSymbol *DwarfAbbrevDWOSectionSym, *DwarfStrDWOSectionSym;
+ MCSymbol *DwarfInfoDWOSectionSym, *DwarfAbbrevDWOSectionSym;
+ MCSymbol *DwarfStrDWOSectionSym;
MCSymbol *DwarfGnuPubNamesSectionSym, *DwarfGnuPubTypesSectionSym;
// As an optimization, there is no need to emit an entry in the directory
// table for the same directory as DW_AT_comp_dir.
StringRef CompilationDir;
- // Counter for assigning globally unique IDs for CUs.
- unsigned GlobalCUIndexCount;
+ // Counter for assigning globally unique IDs for ranges.
+ unsigned GlobalRangeCount;
// Holder for the file specific debug information.
- DwarfUnits InfoHolder;
+ DwarfFile InfoHolder;
// Holders for the various debug information flags that we might need to
// have exposed. See accessor functions below for description.
// Holder for imported entities.
typedef SmallVector<std::pair<const MDNode *, const MDNode *>, 32>
- ImportedEntityMap;
+ ImportedEntityMap;
ImportedEntityMap ScopesWithImportedEntities;
- // Holder for types that are going to be extracted out into a type unit.
- std::vector<DIE *> TypeUnits;
+ // Map from MDNodes for user-defined types to the type units that describe
+ // them.
+ DenseMap<const MDNode *, const DwarfTypeUnit *> DwarfTypeUnits;
+
+ SmallVector<std::pair<std::unique_ptr<DwarfTypeUnit>, DICompositeType>, 1> TypeUnitsUnderConstruction;
// Whether to emit the pubnames/pubtypes sections.
bool HasDwarfPubSections;
+ // Whether or not to use AT_ranges for compilation units.
+ bool HasCURanges;
+
+ // Whether we emitted a function into a section other than the default
+ // text.
+ bool UsedNonDefaultText;
+
// Version of dwarf we're emitting.
unsigned DwarfVersion;
+ // Maps from a type identifier to the actual MDNode.
+ DITypeIdentifierMap TypeIdentifierMap;
+
// DWARF5 Experimental Options
bool HasDwarfAccelTables;
bool HasSplitDwarf;
@@ -460,50 +310,83 @@ class DwarfDebug {
// original object file, rather than things that are meant
// to be in the .dwo sections.
- // The CUs left in the original object file for separated debug info.
- SmallVector<CompileUnit *, 1> SkeletonCUs;
+ // Holder for the skeleton information.
+ DwarfFile SkeletonHolder;
- // Used to uniquely define abbreviations for the skeleton emission.
- FoldingSet<DIEAbbrev> SkeletonAbbrevSet;
+ /// Store file names for type units under fission in a line table header that
+ /// will be emitted into debug_line.dwo.
+ // FIXME: replace this with a map from comp_dir to table so that we can emit
+ // multiple tables during LTO each of which uses directory 0, referencing the
+ // comp_dir of all the type units that use it.
+ MCDwarfDwoLineTable SplitTypeUnitFileTable;
- // A list of all the unique abbreviations in use.
- std::vector<DIEAbbrev *> SkeletonAbbrevs;
+ // True iff there are multiple CUs in this module.
+ bool SingleCU;
- // Holder for the skeleton information.
- DwarfUnits SkeletonHolder;
+ AddressPool AddrPool;
- // Maps from a type identifier to the actual MDNode.
- DITypeIdentifierMap TypeIdentifierMap;
+ DwarfAccelTable AccelNames;
+ DwarfAccelTable AccelObjC;
+ DwarfAccelTable AccelNamespace;
+ DwarfAccelTable AccelTypes;
-private:
+ MCDwarfDwoLineTable *getDwoLineTable(const DwarfCompileUnit &);
void addScopeVariable(LexicalScope *LS, DbgVariable *Var);
+ const SmallVectorImpl<std::unique_ptr<DwarfUnit>> &getUnits() {
+ return InfoHolder.getUnits();
+ }
+
/// \brief Find abstract variable associated with Var.
- DbgVariable *findAbstractVariable(DIVariable &Var, DebugLoc Loc);
+ DbgVariable *getExistingAbstractVariable(const DIVariable &DV,
+ DIVariable &Cleansed);
+ DbgVariable *getExistingAbstractVariable(const DIVariable &DV);
+ void createAbstractVariable(const DIVariable &DV, LexicalScope *Scope);
+ void ensureAbstractVariableIsCreated(const DIVariable &Var,
+ const MDNode *Scope);
+ void ensureAbstractVariableIsCreatedIfScoped(const DIVariable &Var,
+ const MDNode *Scope);
/// \brief Find DIE for the given subprogram and attach appropriate
/// DW_AT_low_pc and DW_AT_high_pc attributes. If there are global
/// variables in this scope then create and insert DIEs for these
/// variables.
- DIE *updateSubprogramScopeDIE(CompileUnit *SPCU, DISubprogram SP);
+ DIE &updateSubprogramScopeDIE(DwarfCompileUnit &SPCU, DISubprogram SP);
+
+ /// \brief A helper function to check whether the DIE for a given Scope is
+ /// going to be null.
+ bool isLexicalScopeDIENull(LexicalScope *Scope);
+
+ /// \brief A helper function to construct a RangeSpanList for a given
+ /// lexical scope.
+ void addScopeRangeList(DwarfCompileUnit &TheCU, DIE &ScopeDIE,
+ const SmallVectorImpl<InsnRange> &Range);
/// \brief Construct new DW_TAG_lexical_block for this scope and
/// attach DW_AT_low_pc/DW_AT_high_pc labels.
- DIE *constructLexicalScopeDIE(CompileUnit *TheCU, LexicalScope *Scope);
- /// A helper function to check whether the DIE for a given Scope is going
- /// to be null.
- bool isLexicalScopeDIENull(LexicalScope *Scope);
+ std::unique_ptr<DIE> constructLexicalScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope);
/// \brief This scope represents inlined body of a function. Construct
/// DIE to represent this concrete inlined copy of the function.
- DIE *constructInlinedScopeDIE(CompileUnit *TheCU, LexicalScope *Scope);
+ std::unique_ptr<DIE> constructInlinedScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope);
/// \brief Construct a DIE for this scope.
- DIE *constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope);
+ std::unique_ptr<DIE> constructScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope);
+ void createAndAddScopeChildren(DwarfCompileUnit &TheCU, LexicalScope *Scope,
+ DIE &ScopeDIE);
+ /// \brief Construct a DIE for this abstract scope.
+ void constructAbstractSubprogramScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope);
+ /// \brief Construct a DIE for this subprogram scope.
+ DIE &constructSubprogramScopeDIE(DwarfCompileUnit &TheCU,
+ LexicalScope *Scope);
/// A helper function to create children of a Scope DIE.
- DIE *createScopeChildrenDIE(CompileUnit *TheCU, LexicalScope *Scope,
- SmallVectorImpl<DIE*> &Children);
+ DIE *createScopeChildrenDIE(DwarfCompileUnit &TheCU, LexicalScope *Scope,
+ SmallVectorImpl<std::unique_ptr<DIE>> &Children);
/// \brief Emit initial Dwarf sections with a label at the start of each one.
void emitSectionLabels();
@@ -514,12 +397,13 @@ private:
/// \brief Compute the size and offset of all the DIEs.
void computeSizeAndOffsets();
- /// \brief Attach DW_AT_inline attribute with inlined subprogram DIEs.
- void computeInlinedDIEs();
-
/// \brief Collect info for variables that were optimized out.
void collectDeadVariables();
+ void finishVariableDefinitions();
+
+ void finishSubprogramDefinitions();
+
/// \brief Finish off debug information after all functions have been
/// processed.
void finalizeModuleInfo();
@@ -528,9 +412,6 @@ private:
/// open.
void endSections();
- /// \brief Emit a set of abbreviations to the specific section.
- void emitAbbrevs(const MCSection *, std::vector<DIEAbbrev*> *);
-
/// \brief Emit the debug info section.
void emitDebugInfo();
@@ -566,32 +447,42 @@ private:
/// index.
void emitDebugPubTypes(bool GnuStyle = false);
+ void
+ emitDebugPubSection(bool GnuStyle, const MCSection *PSec, StringRef Name,
+ const StringMap<const DIE *> &(DwarfUnit::*Accessor)()
+ const);
+
/// \brief Emit visible names into a debug str section.
void emitDebugStr();
/// \brief Emit visible names into a debug loc section.
void emitDebugLoc();
+ /// \brief Emit visible names into a debug loc dwo section.
+ void emitDebugLocDWO();
+
/// \brief Emit visible names into a debug aranges section.
void emitDebugARanges();
/// \brief Emit visible names into a debug ranges section.
void emitDebugRanges();
- /// \brief Emit visible names into a debug macinfo section.
- void emitDebugMacInfo();
-
/// \brief Emit inline info using custom format.
void emitDebugInlineInfo();
/// DWARF 5 Experimental Split Dwarf Emitters
+ /// \brief Initialize common features of skeleton units.
+ void initSkeletonUnit(const DwarfUnit &U, DIE &Die,
+ std::unique_ptr<DwarfUnit> NewU);
+
/// \brief Construct the split debug info compile unit for the debug info
/// section.
- CompileUnit *constructSkeletonCU(const CompileUnit *CU);
+ DwarfCompileUnit &constructSkeletonCU(const DwarfCompileUnit &CU);
- /// \brief Emit the local split abbreviations.
- void emitSkeletonAbbrevs(const MCSection *);
+ /// \brief Construct the split debug info compile unit for the debug info
+ /// section.
+ DwarfTypeUnit &constructSkeletonTU(DwarfTypeUnit &TU);
/// \brief Emit the debug info dwo section.
void emitDebugInfoDWO();
@@ -599,27 +490,30 @@ private:
/// \brief Emit the debug abbrev dwo section.
void emitDebugAbbrevDWO();
+ /// \brief Emit the debug line dwo section.
+ void emitDebugLineDWO();
+
/// \brief Emit the debug str dwo section.
void emitDebugStrDWO();
- /// \brief Create new CompileUnit for the given metadata node with tag
- /// DW_TAG_compile_unit.
- CompileUnit *constructCompileUnit(DICompileUnit DIUnit);
+ /// Flags to let the linker know we have emitted new style pubnames. Only
+ /// emit it here if we don't have a skeleton CU for split dwarf.
+ void addGnuPubAttributes(DwarfUnit &U, DIE &D) const;
- /// \brief Construct subprogram DIE.
- void constructSubprogramDIE(CompileUnit *TheCU, const MDNode *N);
+ /// \brief Create new DwarfCompileUnit for the given metadata node with tag
+ /// DW_TAG_compile_unit.
+ DwarfCompileUnit &constructDwarfCompileUnit(DICompileUnit DIUnit);
/// \brief Construct imported_module or imported_declaration DIE.
- void constructImportedEntityDIE(CompileUnit *TheCU, const MDNode *N);
+ void constructImportedEntityDIE(DwarfCompileUnit &TheCU, const MDNode *N);
/// \brief Construct import_module DIE.
- void constructImportedEntityDIE(CompileUnit *TheCU, const MDNode *N,
- DIE *Context);
+ void constructImportedEntityDIE(DwarfCompileUnit &TheCU, const MDNode *N,
+ DIE &Context);
/// \brief Construct import_module DIE.
- void constructImportedEntityDIE(CompileUnit *TheCU,
- const DIImportedEntity &Module,
- DIE *Context);
+ void constructImportedEntityDIE(DwarfCompileUnit &TheCU,
+ const DIImportedEntity &Module, DIE &Context);
/// \brief Register a source line with debug info. Returns the unique
/// label that was emitted and which provides correspondence to the
@@ -633,21 +527,18 @@ private:
/// \brief If Var is an current function argument that add it in
/// CurrentFnArguments list.
- bool addCurrentFnArgument(const MachineFunction *MF,
- DbgVariable *Var, LexicalScope *Scope);
+ bool addCurrentFnArgument(DbgVariable *Var, LexicalScope *Scope);
/// \brief Populate LexicalScope entries with variables' info.
- void collectVariableInfo(const MachineFunction *,
- SmallPtrSet<const MDNode *, 16> &ProcessedVars);
+ void collectVariableInfo(SmallPtrSet<const MDNode *, 16> &ProcessedVars);
/// \brief Collect variable information from the side table maintained
/// by MMI.
- void collectVariableInfoFromMMITable(const MachineFunction * MF,
- SmallPtrSet<const MDNode *, 16> &P);
+ void collectVariableInfoFromMMITable(SmallPtrSet<const MDNode *, 16> &P);
/// \brief Ensure that a label will be emitted before MI.
void requestLabelBeforeInsn(const MachineInstr *MI) {
- LabelsBeforeInsn.insert(std::make_pair(MI, (MCSymbol*)0));
+ LabelsBeforeInsn.insert(std::make_pair(MI, nullptr));
}
/// \brief Return Label preceding the instruction.
@@ -655,18 +546,25 @@ private:
/// \brief Ensure that a label will be emitted after MI.
void requestLabelAfterInsn(const MachineInstr *MI) {
- LabelsAfterInsn.insert(std::make_pair(MI, (MCSymbol*)0));
+ LabelsAfterInsn.insert(std::make_pair(MI, nullptr));
}
/// \brief Return Label immediately following the instruction.
MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
+ void attachRangesOrLowHighPC(DwarfCompileUnit &Unit, DIE &D,
+ const SmallVectorImpl<InsnRange> &Ranges);
+ void attachLowHighPC(DwarfCompileUnit &Unit, DIE &D, MCSymbol *Begin,
+ MCSymbol *End);
+
public:
//===--------------------------------------------------------------------===//
// Main entry points.
//
DwarfDebug(AsmPrinter *A, Module *M);
+ ~DwarfDebug() override;
+
void insertDIE(const MDNode *TypeMD, DIE *Die) {
MDTypeNodeToDieMap.insert(std::make_pair(TypeMD, Die));
}
@@ -679,62 +577,101 @@ public:
void beginModule();
/// \brief Emit all Dwarf sections that should come after the content.
- void endModule();
+ void endModule() override;
/// \brief Gather pre-function debug information.
- void beginFunction(const MachineFunction *MF);
+ void beginFunction(const MachineFunction *MF) override;
/// \brief Gather and emit post-function debug information.
- void endFunction(const MachineFunction *MF);
+ void endFunction(const MachineFunction *MF) override;
/// \brief Process beginning of an instruction.
- void beginInstruction(const MachineInstr *MI);
+ void beginInstruction(const MachineInstr *MI) override;
/// \brief Process end of an instruction.
- void endInstruction(const MachineInstr *MI);
+ void endInstruction() override;
/// \brief Add a DIE to the set of types that we're going to pull into
/// type units.
- void addTypeUnitType(DIE *Die) { TypeUnits.push_back(Die); }
+ void addDwarfTypeUnitType(DwarfCompileUnit &CU, StringRef Identifier,
+ DIE &Die, DICompositeType CTy);
/// \brief Add a label so that arange data can be generated for it.
void addArangeLabel(SymbolCU SCU) { ArangeLabels.push_back(SCU); }
/// \brief For symbols that have a size designated (e.g. common symbols),
/// this tracks that size.
- void setSymbolSize(const MCSymbol *Sym, uint64_t Size) { SymSize[Sym] = Size;}
-
- /// \brief Look up the source id with the given directory and source file
- /// names. If none currently exists, create a new id and insert it in the
- /// SourceIds map.
- unsigned getOrCreateSourceID(StringRef DirName, StringRef FullName,
- unsigned CUID);
+ void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {
+ SymSize[Sym] = Size;
+ }
/// \brief Recursively Emits a debug information entry.
- void emitDIE(DIE *Die, ArrayRef<DIEAbbrev *> Abbrevs);
+ void emitDIE(DIE &Die);
// Experimental DWARF5 features.
/// \brief Returns whether or not to emit tables that dwarf consumers can
/// use to accelerate lookup.
- bool useDwarfAccelTables() { return HasDwarfAccelTables; }
+ bool useDwarfAccelTables() const { return HasDwarfAccelTables; }
/// \brief Returns whether or not to change the current debug info for the
/// split dwarf proposal support.
- bool useSplitDwarf() { return HasSplitDwarf; }
+ bool useSplitDwarf() const { return HasSplitDwarf; }
/// Returns the Dwarf Version.
unsigned getDwarfVersion() const { return DwarfVersion; }
+ /// Returns the section symbol for the .debug_loc section.
+ MCSymbol *getDebugLocSym() const { return DwarfDebugLocSectionSym; }
+
+ /// Returns the previous section that was emitted into.
+ const MCSection *getPrevSection() const { return PrevSection; }
+
+ /// Returns the previous CU that was being updated
+ const DwarfCompileUnit *getPrevCU() const { return PrevCU; }
+
+ /// Returns the entries for the .debug_loc section.
+ const SmallVectorImpl<DebugLocList> &
+ getDebugLocEntries() const {
+ return DotDebugLocEntries;
+ }
+
+ /// \brief Emit an entry for the debug loc section. This can be used to
+ /// handle an entry that's going to be emitted into the debug loc section.
+ void emitDebugLocEntry(ByteStreamer &Streamer, const DebugLocEntry &Entry);
+
+ /// Emit the location for a debug loc entry, including the size header.
+ void emitDebugLocEntryLocation(const DebugLocEntry &Entry);
+
/// Find the MDNode for the given reference.
template <typename T> T resolve(DIRef<T> Ref) const {
return Ref.resolve(TypeIdentifierMap);
}
+ /// \brief Return the TypeIdentifierMap.
+ const DITypeIdentifierMap &getTypeIdentifierMap() const {
+ return TypeIdentifierMap;
+ }
+
+ /// Find the DwarfCompileUnit for the given CU Die.
+ DwarfCompileUnit *lookupUnit(const DIE *CU) const {
+ return CUDieMap.lookup(CU);
+ }
/// isSubprogramContext - Return true if Context is either a subprogram
/// or another context nested inside a subprogram.
bool isSubprogramContext(const MDNode *Context);
+ void addSubprogramNames(DISubprogram SP, DIE &Die);
+
+ AddressPool &getAddressPool() { return AddrPool; }
+
+ void addAccelName(StringRef Name, const DIE &Die);
+
+ void addAccelObjC(StringRef Name, const DIE &Die);
+
+ void addAccelNamespace(StringRef Name, const DIE &Die);
+
+ void addAccelType(StringRef Name, const DIE &Die, char Flags);
};
} // End of namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
index 1575161..0440fce 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
@@ -14,135 +14,14 @@
#ifndef LLVM_CODEGEN_ASMPRINTER_DWARFEXCEPTION_H
#define LLVM_CODEGEN_ASMPRINTER_DWARFEXCEPTION_H
-#include "llvm/ADT/DenseMap.h"
+#include "EHStreamer.h"
#include "llvm/CodeGen/AsmPrinter.h"
-#include <vector>
namespace llvm {
-
-template <typename T> class SmallVectorImpl;
-struct LandingPadInfo;
-class MachineModuleInfo;
-class MachineInstr;
class MachineFunction;
-class MCAsmInfo;
-class MCExpr;
-class MCSymbol;
-class Function;
class ARMTargetStreamer;
-class AsmPrinter;
-
-//===----------------------------------------------------------------------===//
-/// DwarfException - Emits Dwarf exception handling directives.
-///
-class DwarfException {
-protected:
- /// Asm - Target of Dwarf emission.
- AsmPrinter *Asm;
-
- /// MMI - Collected machine module information.
- MachineModuleInfo *MMI;
-
- /// SharedTypeIds - How many leading type ids two landing pads have in common.
- static unsigned SharedTypeIds(const LandingPadInfo *L,
- const LandingPadInfo *R);
-
- /// PadLT - Order landing pads lexicographically by type id.
- static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R);
-
- /// PadRange - Structure holding a try-range and the associated landing pad.
- struct PadRange {
- // The index of the landing pad.
- unsigned PadIndex;
- // The index of the begin and end labels in the landing pad's label lists.
- unsigned RangeIndex;
- };
-
- typedef DenseMap<MCSymbol *, PadRange> RangeMapType;
-
- /// ActionEntry - Structure describing an entry in the actions table.
- struct ActionEntry {
- int ValueForTypeID; // The value to write - may not be equal to the type id.
- int NextAction;
- unsigned Previous;
- };
-
- /// CallSiteEntry - Structure describing an entry in the call-site table.
- struct CallSiteEntry {
- // The 'try-range' is BeginLabel .. EndLabel.
- MCSymbol *BeginLabel; // zero indicates the start of the function.
- MCSymbol *EndLabel; // zero indicates the end of the function.
-
- // The landing pad starts at PadLabel.
- MCSymbol *PadLabel; // zero indicates that there is no landing pad.
- unsigned Action;
- };
-
- /// ComputeActionsTable - Compute the actions table and gather the first
- /// action index for each landing pad site.
- unsigned ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*>&LPs,
- SmallVectorImpl<ActionEntry> &Actions,
- SmallVectorImpl<unsigned> &FirstActions);
-
- /// CallToNoUnwindFunction - Return `true' if this is a call to a function
- /// marked `nounwind'. Return `false' otherwise.
- bool CallToNoUnwindFunction(const MachineInstr *MI);
-
- /// ComputeCallSiteTable - Compute the call-site table. The entry for an
- /// invoke has a try-range containing the call, a non-zero landing pad and an
- /// appropriate action. The entry for an ordinary call has a try-range
- /// containing the call and zero for the landing pad and the action. Calls
- /// marked 'nounwind' have no entry and must not be contained in the try-range
- /// of any entry - they form gaps in the table. Entries must be ordered by
- /// try-range address.
- void ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
- const RangeMapType &PadMap,
- const SmallVectorImpl<const LandingPadInfo *> &LPs,
- const SmallVectorImpl<unsigned> &FirstActions);
-
- /// EmitExceptionTable - Emit landing pads and actions.
- ///
- /// The general organization of the table is complex, but the basic concepts
- /// are easy. First there is a header which describes the location and
- /// organization of the three components that follow.
- /// 1. The landing pad site information describes the range of code covered
- /// by the try. In our case it's an accumulation of the ranges covered
- /// by the invokes in the try. There is also a reference to the landing
- /// pad that handles the exception once processed. Finally an index into
- /// the actions table.
- /// 2. The action table, in our case, is composed of pairs of type ids
- /// and next action offset. Starting with the action index from the
- /// landing pad site, each type Id is checked for a match to the current
- /// exception. If it matches then the exception and type id are passed
- /// on to the landing pad. Otherwise the next action is looked up. This
- /// chain is terminated with a next action of zero. If no type id is
- /// found the frame is unwound and handling continues.
- /// 3. Type id table contains references to all the C++ typeinfo for all
- /// catches in the function. This tables is reversed indexed base 1.
- void EmitExceptionTable();
-
- virtual void EmitTypeInfos(unsigned TTypeEncoding);
-
-public:
- //===--------------------------------------------------------------------===//
- // Main entry points.
- //
- DwarfException(AsmPrinter *A);
- virtual ~DwarfException();
- /// EndModule - Emit all exception information that should come after the
- /// content.
- virtual void EndModule();
-
- /// BeginFunction - Gather pre-function exception information. Assumes being
- /// emitted immediately after the function entry point.
- virtual void BeginFunction(const MachineFunction *MF);
-
- /// EndFunction - Gather and emit post-function exception information.
- virtual void EndFunction();
-};
-
-class DwarfCFIException : public DwarfException {
+class DwarfCFIException : public EHStreamer {
/// shouldEmitPersonality - Per-function flag to indicate if .cfi_personality
/// should be emitted.
bool shouldEmitPersonality;
@@ -164,22 +43,26 @@ public:
DwarfCFIException(AsmPrinter *A);
virtual ~DwarfCFIException();
- /// EndModule - Emit all exception information that should come after the
+ /// endModule - Emit all exception information that should come after the
/// content.
- virtual void EndModule();
+ void endModule() override;
- /// BeginFunction - Gather pre-function exception information. Assumes being
+ /// beginFunction - Gather pre-function exception information. Assumes being
/// emitted immediately after the function entry point.
- virtual void BeginFunction(const MachineFunction *MF);
+ void beginFunction(const MachineFunction *MF) override;
- /// EndFunction - Gather and emit post-function exception information.
- virtual void EndFunction();
+ /// endFunction - Gather and emit post-function exception information.
+ void endFunction(const MachineFunction *) override;
};
-class ARMException : public DwarfException {
- void EmitTypeInfos(unsigned TTypeEncoding);
+class ARMException : public EHStreamer {
+ void emitTypeInfos(unsigned TTypeEncoding) override;
ARMTargetStreamer &getTargetStreamer();
+ /// shouldEmitCFI - Per-function flag to indicate if frame CFI info
+ /// should be emitted.
+ bool shouldEmitCFI;
+
public:
//===--------------------------------------------------------------------===//
// Main entry points.
@@ -187,19 +70,19 @@ public:
ARMException(AsmPrinter *A);
virtual ~ARMException();
- /// EndModule - Emit all exception information that should come after the
+ /// endModule - Emit all exception information that should come after the
/// content.
- virtual void EndModule();
+ void endModule() override;
- /// BeginFunction - Gather pre-function exception information. Assumes being
+ /// beginFunction - Gather pre-function exception information. Assumes being
/// emitted immediately after the function entry point.
- virtual void BeginFunction(const MachineFunction *MF);
+ void beginFunction(const MachineFunction *MF) override;
- /// EndFunction - Gather and emit post-function exception information.
- virtual void EndFunction();
+ /// endFunction - Gather and emit post-function exception information.
+ void endFunction(const MachineFunction *) override;
};
-class Win64Exception : public DwarfException {
+class Win64Exception : public EHStreamer {
/// shouldEmitPersonality - Per-function flag to indicate if personality
/// info should be emitted.
bool shouldEmitPersonality;
@@ -219,16 +102,16 @@ public:
Win64Exception(AsmPrinter *A);
virtual ~Win64Exception();
- /// EndModule - Emit all exception information that should come after the
+ /// endModule - Emit all exception information that should come after the
/// content.
- virtual void EndModule();
+ void endModule() override;
- /// BeginFunction - Gather pre-function exception information. Assumes being
+ /// beginFunction - Gather pre-function exception information. Assumes being
/// emitted immediately after the function entry point.
- virtual void BeginFunction(const MachineFunction *MF);
+ void beginFunction(const MachineFunction *MF) override;
- /// EndFunction - Gather and emit post-function exception information.
- virtual void EndFunction();
+ /// endFunction - Gather and emit post-function exception information.
+ void endFunction(const MachineFunction *) override;
};
} // End of namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp
new file mode 100644
index 0000000..737ee54
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.cpp
@@ -0,0 +1,156 @@
+//===-- llvm/CodeGen/DwarfFile.cpp - Dwarf Debug Framework ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DwarfFile.h"
+
+#include "DwarfDebug.h"
+#include "DwarfUnit.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
+namespace llvm {
+DwarfFile::DwarfFile(AsmPrinter *AP, StringRef Pref, BumpPtrAllocator &DA)
+ : Asm(AP), StrPool(DA, *Asm, Pref) {}
+
+DwarfFile::~DwarfFile() {}
+
+// Define a unique number for the abbreviation.
+//
+void DwarfFile::assignAbbrevNumber(DIEAbbrev &Abbrev) {
+ // Check the set for priors.
+ DIEAbbrev *InSet = AbbreviationsSet.GetOrInsertNode(&Abbrev);
+
+ // If it's newly added.
+ if (InSet == &Abbrev) {
+ // Add to abbreviation list.
+ Abbreviations.push_back(&Abbrev);
+
+ // Assign the vector position + 1 as its number.
+ Abbrev.setNumber(Abbreviations.size());
+ } else {
+ // Assign existing abbreviation number.
+ Abbrev.setNumber(InSet->getNumber());
+ }
+}
+
+void DwarfFile::addUnit(std::unique_ptr<DwarfUnit> U) {
+ CUs.push_back(std::move(U));
+}
+
+// Emit the various dwarf units to the unit section USection with
+// the abbreviations going into ASection.
+void DwarfFile::emitUnits(DwarfDebug *DD, const MCSymbol *ASectionSym) {
+ for (const auto &TheU : CUs) {
+ DIE &Die = TheU->getUnitDie();
+ const MCSection *USection = TheU->getSection();
+ Asm->OutStreamer.SwitchSection(USection);
+
+ // Emit the compile units header.
+ Asm->OutStreamer.EmitLabel(TheU->getLabelBegin());
+
+ // Emit size of content not including length itself
+ Asm->OutStreamer.AddComment("Length of Unit");
+ Asm->EmitInt32(TheU->getHeaderSize() + Die.getSize());
+
+ TheU->emitHeader(ASectionSym);
+
+ DD->emitDIE(Die);
+ Asm->OutStreamer.EmitLabel(TheU->getLabelEnd());
+ }
+}
+// Compute the size and offset for each DIE.
+void DwarfFile::computeSizeAndOffsets() {
+ // Offset from the first CU in the debug info section is 0 initially.
+ unsigned SecOffset = 0;
+
+ // Iterate over each compile unit and set the size and offsets for each
+ // DIE within each compile unit. All offsets are CU relative.
+ for (const auto &TheU : CUs) {
+ TheU->setDebugInfoOffset(SecOffset);
+
+ // CU-relative offset is reset to 0 here.
+ unsigned Offset = sizeof(int32_t) + // Length of Unit Info
+ TheU->getHeaderSize(); // Unit-specific headers
+
+ // EndOffset here is CU-relative, after laying out
+ // all of the CU DIE.
+ unsigned EndOffset = computeSizeAndOffset(TheU->getUnitDie(), Offset);
+ SecOffset += EndOffset;
+ }
+}
+// Compute the size and offset of a DIE. The offset is relative to start of the
+// CU. It returns the offset after laying out the DIE.
+unsigned DwarfFile::computeSizeAndOffset(DIE &Die, unsigned Offset) {
+ // Record the abbreviation.
+ assignAbbrevNumber(Die.getAbbrev());
+
+ // Get the abbreviation for this DIE.
+ const DIEAbbrev &Abbrev = Die.getAbbrev();
+
+ // Set DIE offset
+ Die.setOffset(Offset);
+
+ // Start the size with the size of abbreviation code.
+ Offset += getULEB128Size(Die.getAbbrevNumber());
+
+ const SmallVectorImpl<DIEValue *> &Values = Die.getValues();
+ const SmallVectorImpl<DIEAbbrevData> &AbbrevData = Abbrev.getData();
+
+ // Size the DIE attribute values.
+ for (unsigned i = 0, N = Values.size(); i < N; ++i)
+ // Size attribute value.
+ Offset += Values[i]->SizeOf(Asm, AbbrevData[i].getForm());
+
+ // Get the children.
+ const auto &Children = Die.getChildren();
+
+ // Size the DIE children if any.
+ if (!Children.empty()) {
+ assert(Abbrev.hasChildren() && "Children flag not set");
+
+ for (auto &Child : Children)
+ Offset = computeSizeAndOffset(*Child, Offset);
+
+ // End of children marker.
+ Offset += sizeof(int8_t);
+ }
+
+ Die.setSize(Offset - Die.getOffset());
+ return Offset;
+}
+void DwarfFile::emitAbbrevs(const MCSection *Section) {
+ // Check to see if it is worth the effort.
+ if (!Abbreviations.empty()) {
+ // Start the debug abbrev section.
+ Asm->OutStreamer.SwitchSection(Section);
+
+ // For each abbrevation.
+ for (const DIEAbbrev *Abbrev : Abbreviations) {
+ // Emit the abbrevations code (base 1 index.)
+ Asm->EmitULEB128(Abbrev->getNumber(), "Abbreviation Code");
+
+ // Emit the abbreviations data.
+ Abbrev->Emit(Asm);
+ }
+
+ // Mark end of abbreviations.
+ Asm->EmitULEB128(0, "EOM(3)");
+ }
+}
+
+// Emit strings into a string section.
+void DwarfFile::emitStrings(const MCSection *StrSection,
+ const MCSection *OffsetSection,
+ const MCSymbol *StrSecSym) {
+ StrPool.emit(*Asm, StrSection, OffsetSection, StrSecSym);
+}
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h
new file mode 100644
index 0000000..3985eb2
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h
@@ -0,0 +1,84 @@
+//===-- llvm/CodeGen/DwarfFile.h - Dwarf Debug Framework -------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_DWARFFILE_H__
+#define CODEGEN_ASMPRINTER_DWARFFILE_H__
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Allocator.h"
+#include "AddressPool.h"
+#include "DwarfStringPool.h"
+
+#include <vector>
+#include <string>
+#include <memory>
+
+namespace llvm {
+class AsmPrinter;
+class DwarfUnit;
+class DIEAbbrev;
+class MCSymbol;
+class DIE;
+class StringRef;
+class DwarfDebug;
+class MCSection;
+class DwarfFile {
+ // Target of Dwarf emission, used for sizing of abbreviations.
+ AsmPrinter *Asm;
+
+ // Used to uniquely define abbreviations.
+ FoldingSet<DIEAbbrev> AbbreviationsSet;
+
+ // A list of all the unique abbreviations in use.
+ std::vector<DIEAbbrev *> Abbreviations;
+
+ // A pointer to all units in the section.
+ SmallVector<std::unique_ptr<DwarfUnit>, 1> CUs;
+
+ DwarfStringPool StrPool;
+
+public:
+ DwarfFile(AsmPrinter *AP, StringRef Pref, BumpPtrAllocator &DA);
+
+ ~DwarfFile();
+
+ const SmallVectorImpl<std::unique_ptr<DwarfUnit>> &getUnits() { return CUs; }
+
+ /// \brief Compute the size and offset of a DIE given an incoming Offset.
+ unsigned computeSizeAndOffset(DIE &Die, unsigned Offset);
+
+ /// \brief Compute the size and offset of all the DIEs.
+ void computeSizeAndOffsets();
+
+ /// \brief Define a unique number for the abbreviation.
+ void assignAbbrevNumber(DIEAbbrev &Abbrev);
+
+ /// \brief Add a unit to the list of CUs.
+ void addUnit(std::unique_ptr<DwarfUnit> U);
+
+ /// \brief Emit all of the units to the section listed with the given
+ /// abbreviation section.
+ void emitUnits(DwarfDebug *DD, const MCSymbol *ASectionSym);
+
+ /// \brief Emit a set of abbreviations to the specific section.
+ void emitAbbrevs(const MCSection *);
+
+ /// \brief Emit all of the strings to the section given.
+ void emitStrings(const MCSection *StrSection,
+ const MCSection *OffsetSection = nullptr,
+ const MCSymbol *StrSecSym = nullptr);
+
+ /// \brief Returns the string pool.
+ DwarfStringPool &getStringPool() { return StrPool; }
+};
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.cpp
new file mode 100644
index 0000000..72cab60
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.cpp
@@ -0,0 +1,74 @@
+//===-- llvm/CodeGen/DwarfStringPool.cpp - Dwarf Debug Framework ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DwarfStringPool.h"
+#include "llvm/MC/MCStreamer.h"
+
+using namespace llvm;
+
+MCSymbol *DwarfStringPool::getSectionSymbol() { return SectionSymbol; }
+
+static std::pair<MCSymbol *, unsigned> &
+getEntry(AsmPrinter &Asm,
+ StringMap<std::pair<MCSymbol *, unsigned>, BumpPtrAllocator &> &Pool,
+ StringRef Prefix, StringRef Str) {
+ std::pair<MCSymbol *, unsigned> &Entry =
+ Pool.GetOrCreateValue(Str).getValue();
+ if (!Entry.first) {
+ Entry.second = Pool.size() - 1;
+ Entry.first = Asm.GetTempSymbol(Prefix, Entry.second);
+ }
+ return Entry;
+}
+
+MCSymbol *DwarfStringPool::getSymbol(AsmPrinter &Asm, StringRef Str) {
+ return getEntry(Asm, Pool, Prefix, Str).first;
+}
+
+unsigned DwarfStringPool::getIndex(AsmPrinter &Asm, StringRef Str) {
+ return getEntry(Asm, Pool, Prefix, Str).second;
+}
+
+void DwarfStringPool::emit(AsmPrinter &Asm, const MCSection *StrSection,
+ const MCSection *OffsetSection,
+ const MCSymbol *StrSecSym) {
+ if (Pool.empty())
+ return;
+
+ // Start the dwarf str section.
+ Asm.OutStreamer.SwitchSection(StrSection);
+
+ // Get all of the string pool entries and put them in an array by their ID so
+ // we can sort them.
+ SmallVector<const StringMapEntry<std::pair<MCSymbol *, unsigned>> *, 64>
+ Entries(Pool.size());
+
+ for (const auto &E : Pool)
+ Entries[E.getValue().second] = &E;
+
+ for (const auto &Entry : Entries) {
+ // Emit a label for reference from debug information entries.
+ Asm.OutStreamer.EmitLabel(Entry->getValue().first);
+
+ // Emit the string itself with a terminating null byte.
+ Asm.OutStreamer.EmitBytes(
+ StringRef(Entry->getKeyData(), Entry->getKeyLength() + 1));
+ }
+
+ // If we've got an offset section go ahead and emit that now as well.
+ if (OffsetSection) {
+ Asm.OutStreamer.SwitchSection(OffsetSection);
+ unsigned offset = 0;
+ unsigned size = 4; // FIXME: DWARF64 is 8.
+ for (const auto &Entry : Entries) {
+ Asm.OutStreamer.EmitIntValue(offset, size);
+ offset += Entry->getKeyLength() + 1;
+ }
+ }
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.h
new file mode 100644
index 0000000..c1615fb
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfStringPool.h
@@ -0,0 +1,55 @@
+//===-- llvm/CodeGen/DwarfStringPool.h - Dwarf Debug Framework -*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_STRINGPOOL_H__
+#define CODEGEN_ASMPRINTER_STRINGPOOL_H__
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/Support/Allocator.h"
+
+#include <utility>
+
+namespace llvm {
+
+class MCSymbol;
+class MCSection;
+class StringRef;
+
+// Collection of strings for this unit and assorted symbols.
+// A String->Symbol mapping of strings used by indirect
+// references.
+class DwarfStringPool {
+ StringMap<std::pair<MCSymbol *, unsigned>, BumpPtrAllocator &> Pool;
+ StringRef Prefix;
+ MCSymbol *SectionSymbol;
+
+public:
+ DwarfStringPool(BumpPtrAllocator &A, AsmPrinter &Asm, StringRef Prefix)
+ : Pool(A), Prefix(Prefix), SectionSymbol(Asm.GetTempSymbol(Prefix)) {}
+
+ void emit(AsmPrinter &Asm, const MCSection *StrSection,
+ const MCSection *OffsetSection = nullptr,
+ const MCSymbol *StrSecSym = nullptr);
+
+ /// \brief Returns the entry into the start of the pool.
+ MCSymbol *getSectionSymbol();
+
+ /// \brief Returns an entry into the string pool with the given
+ /// string text.
+ MCSymbol *getSymbol(AsmPrinter &Asm, StringRef Str);
+
+ /// \brief Returns the index into the string pool with the given
+ /// string text.
+ unsigned getIndex(AsmPrinter &Asm, StringRef Str);
+
+ bool empty() const { return Pool.empty(); }
+};
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 50f5cc9..9538bee 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/DwarfCompileUnit.cpp - Dwarf Compile Unit ------------===//
+//===-- llvm/CodeGen/DwarfUnit.cpp - Dwarf Type and Compile Units ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,52 +11,80 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dwarfdebug"
-
-#include "DwarfCompileUnit.h"
+#include "DwarfUnit.h"
#include "DwarfAccelTable.h"
#include "DwarfDebug.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/DIBuilder.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/Target/Mangler.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
-/// CompileUnit - Compile unit constructor.
-CompileUnit::CompileUnit(unsigned UID, DIE *D, DICompileUnit Node,
- AsmPrinter *A, DwarfDebug *DW, DwarfUnits *DWU)
- : UniqueID(UID), Node(Node), CUDie(D), Asm(A), DD(DW), DU(DWU),
- IndexTyDie(0), DebugInfoOffset(0) {
+#define DEBUG_TYPE "dwarfdebug"
+
+static cl::opt<bool>
+GenerateDwarfTypeUnits("generate-type-units", cl::Hidden,
+ cl::desc("Generate DWARF4 type units."),
+ cl::init(false));
+
+/// Unit - Unit constructor.
+DwarfUnit::DwarfUnit(unsigned UID, dwarf::Tag UnitTag, DICompileUnit Node,
+ AsmPrinter *A, DwarfDebug *DW, DwarfFile *DWU)
+ : UniqueID(UID), CUNode(Node), UnitDie(UnitTag), DebugInfoOffset(0), Asm(A),
+ DD(DW), DU(DWU), IndexTyDie(nullptr), Section(nullptr),
+ Skeleton(nullptr) {
+ assert(UnitTag == dwarf::DW_TAG_compile_unit ||
+ UnitTag == dwarf::DW_TAG_type_unit);
DIEIntegerOne = new (DIEValueAllocator) DIEInteger(1);
- insertDIE(Node, D);
}
-/// ~CompileUnit - Destructor for compile unit.
-CompileUnit::~CompileUnit() {
+DwarfCompileUnit::DwarfCompileUnit(unsigned UID, DICompileUnit Node,
+ AsmPrinter *A, DwarfDebug *DW,
+ DwarfFile *DWU)
+ : DwarfUnit(UID, dwarf::DW_TAG_compile_unit, Node, A, DW, DWU) {
+ insertDIE(Node, &getUnitDie());
+}
+
+DwarfTypeUnit::DwarfTypeUnit(unsigned UID, DwarfCompileUnit &CU, AsmPrinter *A,
+ DwarfDebug *DW, DwarfFile *DWU,
+ MCDwarfDwoLineTable *SplitLineTable)
+ : DwarfUnit(UID, dwarf::DW_TAG_type_unit, CU.getCUNode(), A, DW, DWU),
+ CU(CU), SplitLineTable(SplitLineTable) {
+ if (SplitLineTable)
+ addSectionOffset(UnitDie, dwarf::DW_AT_stmt_list, 0);
+}
+
+/// ~Unit - Destructor for compile unit.
+DwarfUnit::~DwarfUnit() {
for (unsigned j = 0, M = DIEBlocks.size(); j < M; ++j)
DIEBlocks[j]->~DIEBlock();
+ for (unsigned j = 0, M = DIELocs.size(); j < M; ++j)
+ DIELocs[j]->~DIELoc();
}
/// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
/// information entry.
-DIEEntry *CompileUnit::createDIEEntry(DIE *Entry) {
+DIEEntry *DwarfUnit::createDIEEntry(DIE &Entry) {
DIEEntry *Value = new (DIEValueAllocator) DIEEntry(Entry);
return Value;
}
/// getDefaultLowerBound - Return the default lower bound for an array. If the
/// DWARF version doesn't handle the language, return -1.
-int64_t CompileUnit::getDefaultLowerBound() const {
+int64_t DwarfUnit::getDefaultLowerBound() const {
switch (getLanguage()) {
default:
break;
@@ -100,17 +128,23 @@ int64_t CompileUnit::getDefaultLowerBound() const {
/// Check whether the DIE for this MDNode can be shared across CUs.
static bool isShareableAcrossCUs(DIDescriptor D) {
- // When the MDNode can be part of the type system, the DIE can be
- // shared across CUs.
- return D.isType() ||
- (D.isSubprogram() && !DISubprogram(D).isDefinition());
+ // When the MDNode can be part of the type system, the DIE can be shared
+ // across CUs.
+ // Combining type units and cross-CU DIE sharing is lower value (since
+ // cross-CU DIE sharing is used in LTO and removes type redundancy at that
+ // level already) but may be implementable for some value in projects
+ // building multiple independent libraries with LTO and then linking those
+ // together.
+ return (D.isType() ||
+ (D.isSubprogram() && !DISubprogram(D).isDefinition())) &&
+ !GenerateDwarfTypeUnits;
}
/// getDIE - Returns the debug information entry map slot for the
/// specified debug variable. We delegate the request to DwarfDebug
/// when the DIE for this MDNode can be shared across CUs. The mappings
/// will be kept in DwarfDebug for shareable DIEs.
-DIE *CompileUnit::getDIE(DIDescriptor D) const {
+DIE *DwarfUnit::getDIE(DIDescriptor D) const {
if (isShareableAcrossCUs(D))
return DD->getDIE(D);
return MDNodeToDieMap.lookup(D);
@@ -119,7 +153,7 @@ DIE *CompileUnit::getDIE(DIDescriptor D) const {
/// insertDIE - Insert DIE into the map. We delegate the request to DwarfDebug
/// when the DIE for this MDNode can be shared across CUs. The mappings
/// will be kept in DwarfDebug for shareable DIEs.
-void CompileUnit::insertDIE(DIDescriptor Desc, DIE *D) {
+void DwarfUnit::insertDIE(DIDescriptor Desc, DIE *D) {
if (isShareableAcrossCUs(Desc)) {
DD->insertDIE(Desc, D);
return;
@@ -128,40 +162,40 @@ void CompileUnit::insertDIE(DIDescriptor Desc, DIE *D) {
}
/// addFlag - Add a flag that is true.
-void CompileUnit::addFlag(DIE *Die, dwarf::Attribute Attribute) {
+void DwarfUnit::addFlag(DIE &Die, dwarf::Attribute Attribute) {
if (DD->getDwarfVersion() >= 4)
- Die->addValue(Attribute, dwarf::DW_FORM_flag_present, DIEIntegerOne);
+ Die.addValue(Attribute, dwarf::DW_FORM_flag_present, DIEIntegerOne);
else
- Die->addValue(Attribute, dwarf::DW_FORM_flag, DIEIntegerOne);
+ Die.addValue(Attribute, dwarf::DW_FORM_flag, DIEIntegerOne);
}
/// addUInt - Add an unsigned integer attribute data and value.
///
-void CompileUnit::addUInt(DIE *Die, dwarf::Attribute Attribute,
- Optional<dwarf::Form> Form, uint64_t Integer) {
+void DwarfUnit::addUInt(DIE &Die, dwarf::Attribute Attribute,
+ Optional<dwarf::Form> Form, uint64_t Integer) {
if (!Form)
Form = DIEInteger::BestForm(false, Integer);
DIEValue *Value = Integer == 1 ? DIEIntegerOne : new (DIEValueAllocator)
DIEInteger(Integer);
- Die->addValue(Attribute, *Form, Value);
+ Die.addValue(Attribute, *Form, Value);
}
-void CompileUnit::addUInt(DIEBlock *Block, dwarf::Form Form, uint64_t Integer) {
+void DwarfUnit::addUInt(DIE &Block, dwarf::Form Form, uint64_t Integer) {
addUInt(Block, (dwarf::Attribute)0, Form, Integer);
}
/// addSInt - Add an signed integer attribute data and value.
///
-void CompileUnit::addSInt(DIE *Die, dwarf::Attribute Attribute,
- Optional<dwarf::Form> Form, int64_t Integer) {
+void DwarfUnit::addSInt(DIE &Die, dwarf::Attribute Attribute,
+ Optional<dwarf::Form> Form, int64_t Integer) {
if (!Form)
Form = DIEInteger::BestForm(true, Integer);
DIEValue *Value = new (DIEValueAllocator) DIEInteger(Integer);
- Die->addValue(Attribute, *Form, Value);
+ Die.addValue(Attribute, *Form, Value);
}
-void CompileUnit::addSInt(DIEBlock *Die, Optional<dwarf::Form> Form,
- int64_t Integer) {
+void DwarfUnit::addSInt(DIELoc &Die, Optional<dwarf::Form> Form,
+ int64_t Integer) {
addSInt(Die, (dwarf::Attribute)0, Form, Integer);
}
@@ -170,67 +204,67 @@ void CompileUnit::addSInt(DIEBlock *Die, Optional<dwarf::Form> Form,
/// more predictable sizes. In the case of split dwarf we emit an index
/// into another table which gets us the static offset into the string
/// table.
-void CompileUnit::addString(DIE *Die, dwarf::Attribute Attribute,
- StringRef String) {
- DIEValue *Value;
- dwarf::Form Form;
- if (!DD->useSplitDwarf()) {
- MCSymbol *Symb = DU->getStringPoolEntry(String);
- if (Asm->needsRelocationsForDwarfStringPool())
- Value = new (DIEValueAllocator) DIELabel(Symb);
- else {
- MCSymbol *StringPool = DU->getStringPoolSym();
- Value = new (DIEValueAllocator) DIEDelta(Symb, StringPool);
- }
- Form = dwarf::DW_FORM_strp;
- } else {
- unsigned idx = DU->getStringPoolIndex(String);
- Value = new (DIEValueAllocator) DIEInteger(idx);
- Form = dwarf::DW_FORM_GNU_str_index;
- }
+void DwarfUnit::addString(DIE &Die, dwarf::Attribute Attribute,
+ StringRef String) {
+
+ if (!DD->useSplitDwarf())
+ return addLocalString(Die, Attribute, String);
+
+ unsigned idx = DU->getStringPool().getIndex(*Asm, String);
+ DIEValue *Value = new (DIEValueAllocator) DIEInteger(idx);
DIEValue *Str = new (DIEValueAllocator) DIEString(Value, String);
- Die->addValue(Attribute, Form, Str);
+ Die.addValue(Attribute, dwarf::DW_FORM_GNU_str_index, Str);
}
/// addLocalString - Add a string attribute data and value. This is guaranteed
/// to be in the local string pool instead of indirected.
-void CompileUnit::addLocalString(DIE *Die, dwarf::Attribute Attribute,
- StringRef String) {
- MCSymbol *Symb = DU->getStringPoolEntry(String);
+void DwarfUnit::addLocalString(DIE &Die, dwarf::Attribute Attribute,
+ StringRef String) {
+ MCSymbol *Symb = DU->getStringPool().getSymbol(*Asm, String);
DIEValue *Value;
- if (Asm->needsRelocationsForDwarfStringPool())
+ if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
Value = new (DIEValueAllocator) DIELabel(Symb);
else {
- MCSymbol *StringPool = DU->getStringPoolSym();
+ MCSymbol *StringPool = DU->getStringPool().getSectionSymbol();
Value = new (DIEValueAllocator) DIEDelta(Symb, StringPool);
}
- Die->addValue(Attribute, dwarf::DW_FORM_strp, Value);
+ DIEValue *Str = new (DIEValueAllocator) DIEString(Value, String);
+ Die.addValue(Attribute, dwarf::DW_FORM_strp, Str);
}
/// addExpr - Add a Dwarf expression attribute data and value.
///
-void CompileUnit::addExpr(DIEBlock *Die, dwarf::Form Form, const MCExpr *Expr) {
+void DwarfUnit::addExpr(DIELoc &Die, dwarf::Form Form, const MCExpr *Expr) {
DIEValue *Value = new (DIEValueAllocator) DIEExpr(Expr);
- Die->addValue((dwarf::Attribute)0, Form, Value);
+ Die.addValue((dwarf::Attribute)0, Form, Value);
+}
+
+/// addLocationList - Add a Dwarf loclistptr attribute data and value.
+///
+void DwarfUnit::addLocationList(DIE &Die, dwarf::Attribute Attribute,
+ unsigned Index) {
+ DIEValue *Value = new (DIEValueAllocator) DIELocList(Index);
+ dwarf::Form Form = DD->getDwarfVersion() >= 4 ? dwarf::DW_FORM_sec_offset
+ : dwarf::DW_FORM_data4;
+ Die.addValue(Attribute, Form, Value);
}
/// addLabel - Add a Dwarf label attribute data and value.
///
-void CompileUnit::addLabel(DIE *Die, dwarf::Attribute Attribute,
- dwarf::Form Form, const MCSymbol *Label) {
+void DwarfUnit::addLabel(DIE &Die, dwarf::Attribute Attribute, dwarf::Form Form,
+ const MCSymbol *Label) {
DIEValue *Value = new (DIEValueAllocator) DIELabel(Label);
- Die->addValue(Attribute, Form, Value);
+ Die.addValue(Attribute, Form, Value);
}
-void CompileUnit::addLabel(DIEBlock *Die, dwarf::Form Form,
- const MCSymbol *Label) {
+void DwarfUnit::addLabel(DIELoc &Die, dwarf::Form Form, const MCSymbol *Label) {
addLabel(Die, (dwarf::Attribute)0, Form, Label);
}
/// addSectionLabel - Add a Dwarf section label attribute data and value.
///
-void CompileUnit::addSectionLabel(DIE *Die, dwarf::Attribute Attribute,
- const MCSymbol *Label) {
+void DwarfUnit::addSectionLabel(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Label) {
if (DD->getDwarfVersion() >= 4)
addLabel(Die, Attribute, dwarf::DW_FORM_sec_offset, Label);
else
@@ -239,8 +273,8 @@ void CompileUnit::addSectionLabel(DIE *Die, dwarf::Attribute Attribute,
/// addSectionOffset - Add an offset into a section attribute data and value.
///
-void CompileUnit::addSectionOffset(DIE *Die, dwarf::Attribute Attribute,
- uint64_t Integer) {
+void DwarfUnit::addSectionOffset(DIE &Die, dwarf::Attribute Attribute,
+ uint64_t Integer) {
if (DD->getDwarfVersion() >= 4)
addUInt(Die, Attribute, dwarf::DW_FORM_sec_offset, Integer);
else
@@ -250,104 +284,143 @@ void CompileUnit::addSectionOffset(DIE *Die, dwarf::Attribute Attribute,
/// addLabelAddress - Add a dwarf label attribute data and value using
/// DW_FORM_addr or DW_FORM_GNU_addr_index.
///
-void CompileUnit::addLabelAddress(DIE *Die, dwarf::Attribute Attribute,
- MCSymbol *Label) {
+void DwarfCompileUnit::addLabelAddress(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Label) {
+
+ if (!DD->useSplitDwarf())
+ return addLocalLabelAddress(Die, Attribute, Label);
+
if (Label)
DD->addArangeLabel(SymbolCU(this, Label));
- if (!DD->useSplitDwarf()) {
- if (Label != NULL) {
- DIEValue *Value = new (DIEValueAllocator) DIELabel(Label);
- Die->addValue(Attribute, dwarf::DW_FORM_addr, Value);
- } else {
- DIEValue *Value = new (DIEValueAllocator) DIEInteger(0);
- Die->addValue(Attribute, dwarf::DW_FORM_addr, Value);
- }
- } else {
- unsigned idx = DU->getAddrPoolIndex(Label);
- DIEValue *Value = new (DIEValueAllocator) DIEInteger(idx);
- Die->addValue(Attribute, dwarf::DW_FORM_GNU_addr_index, Value);
- }
+ unsigned idx = DD->getAddressPool().getIndex(Label);
+ DIEValue *Value = new (DIEValueAllocator) DIEInteger(idx);
+ Die.addValue(Attribute, dwarf::DW_FORM_GNU_addr_index, Value);
+}
+
+void DwarfCompileUnit::addLocalLabelAddress(DIE &Die,
+ dwarf::Attribute Attribute,
+ const MCSymbol *Label) {
+ if (Label)
+ DD->addArangeLabel(SymbolCU(this, Label));
+
+ Die.addValue(Attribute, dwarf::DW_FORM_addr,
+ Label ? (DIEValue *)new (DIEValueAllocator) DIELabel(Label)
+ : new (DIEValueAllocator) DIEInteger(0));
+}
+
+unsigned DwarfCompileUnit::getOrCreateSourceID(StringRef FileName, StringRef DirName) {
+ // If we print assembly, we can't separate .file entries according to
+ // compile units. Thus all files will belong to the default compile unit.
+
+ // FIXME: add a better feature test than hasRawTextSupport. Even better,
+ // extend .file to support this.
+ return Asm->OutStreamer.EmitDwarfFileDirective(
+ 0, DirName, FileName,
+ Asm->OutStreamer.hasRawTextSupport() ? 0 : getUniqueID());
+}
+
+unsigned DwarfTypeUnit::getOrCreateSourceID(StringRef FileName, StringRef DirName) {
+ return SplitLineTable ? SplitLineTable->getFile(DirName, FileName)
+ : getCU().getOrCreateSourceID(FileName, DirName);
}
/// addOpAddress - Add a dwarf op address data and value using the
/// form given and an op of either DW_FORM_addr or DW_FORM_GNU_addr_index.
///
-void CompileUnit::addOpAddress(DIEBlock *Die, const MCSymbol *Sym) {
- DD->addArangeLabel(SymbolCU(this, Sym));
+void DwarfUnit::addOpAddress(DIELoc &Die, const MCSymbol *Sym) {
if (!DD->useSplitDwarf()) {
addUInt(Die, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
addLabel(Die, dwarf::DW_FORM_udata, Sym);
} else {
addUInt(Die, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_addr_index);
- addUInt(Die, dwarf::DW_FORM_GNU_addr_index, DU->getAddrPoolIndex(Sym));
+ addUInt(Die, dwarf::DW_FORM_GNU_addr_index,
+ DD->getAddressPool().getIndex(Sym));
}
}
/// addSectionDelta - Add a section label delta attribute data and value.
///
-void CompileUnit::addSectionDelta(DIE *Die, dwarf::Attribute Attribute,
- const MCSymbol *Hi, const MCSymbol *Lo) {
+void DwarfUnit::addSectionDelta(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Hi, const MCSymbol *Lo) {
DIEValue *Value = new (DIEValueAllocator) DIEDelta(Hi, Lo);
- if (DD->getDwarfVersion() >= 4)
- Die->addValue(Attribute, dwarf::DW_FORM_sec_offset, Value);
- else
- Die->addValue(Attribute, dwarf::DW_FORM_data4, Value);
+ Die.addValue(Attribute, DD->getDwarfVersion() >= 4 ? dwarf::DW_FORM_sec_offset
+ : dwarf::DW_FORM_data4,
+ Value);
+}
+
+void DwarfUnit::addLabelDelta(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Hi, const MCSymbol *Lo) {
+ DIEValue *Value = new (DIEValueAllocator) DIEDelta(Hi, Lo);
+ Die.addValue(Attribute, dwarf::DW_FORM_data4, Value);
}
/// addDIEEntry - Add a DIE attribute data and value.
///
-void CompileUnit::addDIEEntry(DIE *Die, dwarf::Attribute Attribute,
- DIE *Entry) {
+void DwarfUnit::addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIE &Entry) {
addDIEEntry(Die, Attribute, createDIEEntry(Entry));
}
-void CompileUnit::addDIEEntry(DIE *Die, dwarf::Attribute Attribute,
- DIEEntry *Entry) {
- const DIE *DieCU = Die->getCompileUnitOrNull();
- const DIE *EntryCU = Entry->getEntry()->getCompileUnitOrNull();
+void DwarfUnit::addDIETypeSignature(DIE &Die, const DwarfTypeUnit &Type) {
+ // Flag the type unit reference as a declaration so that if it contains
+ // members (implicit special members, static data member definitions, member
+ // declarations for definitions in this CU, etc) consumers don't get confused
+ // and think this is a full definition.
+ addFlag(Die, dwarf::DW_AT_declaration);
+
+ Die.addValue(dwarf::DW_AT_signature, dwarf::DW_FORM_ref_sig8,
+ new (DIEValueAllocator) DIETypeSignature(Type));
+}
+
+void DwarfUnit::addDIEEntry(DIE &Die, dwarf::Attribute Attribute,
+ DIEEntry *Entry) {
+ const DIE *DieCU = Die.getUnitOrNull();
+ const DIE *EntryCU = Entry->getEntry().getUnitOrNull();
if (!DieCU)
// We assume that Die belongs to this CU, if it is not linked to any CU yet.
- DieCU = getCUDie();
+ DieCU = &getUnitDie();
if (!EntryCU)
- EntryCU = getCUDie();
- Die->addValue(Attribute, EntryCU == DieCU ? dwarf::DW_FORM_ref4
- : dwarf::DW_FORM_ref_addr,
- Entry);
+ EntryCU = &getUnitDie();
+ Die.addValue(Attribute,
+ EntryCU == DieCU ? dwarf::DW_FORM_ref4 : dwarf::DW_FORM_ref_addr,
+ Entry);
}
/// Create a DIE with the given Tag, add the DIE to its parent, and
/// call insertDIE if MD is not null.
-DIE *CompileUnit::createAndAddDIE(unsigned Tag, DIE &Parent, DIDescriptor N) {
- DIE *Die = new DIE(Tag);
- Parent.addChild(Die);
+DIE &DwarfUnit::createAndAddDIE(unsigned Tag, DIE &Parent, DIDescriptor N) {
+ assert(Tag != dwarf::DW_TAG_auto_variable &&
+ Tag != dwarf::DW_TAG_arg_variable);
+ Parent.addChild(make_unique<DIE>((dwarf::Tag)Tag));
+ DIE &Die = *Parent.getChildren().back();
if (N)
- insertDIE(N, Die);
+ insertDIE(N, &Die);
return Die;
}
/// addBlock - Add block data.
///
-void CompileUnit::addBlock(DIE *Die, dwarf::Attribute Attribute,
- DIEBlock *Block) {
+void DwarfUnit::addBlock(DIE &Die, dwarf::Attribute Attribute, DIELoc *Loc) {
+ Loc->ComputeSize(Asm);
+ DIELocs.push_back(Loc); // Memoize so we can call the destructor later on.
+ Die.addValue(Attribute, Loc->BestForm(DD->getDwarfVersion()), Loc);
+}
+
+void DwarfUnit::addBlock(DIE &Die, dwarf::Attribute Attribute,
+ DIEBlock *Block) {
Block->ComputeSize(Asm);
DIEBlocks.push_back(Block); // Memoize so we can call the destructor later on.
- Die->addValue(Attribute, Block->BestForm(), Block);
+ Die.addValue(Attribute, Block->BestForm(), Block);
}
/// addSourceLine - Add location information to specified debug information
/// entry.
-void CompileUnit::addSourceLine(DIE *Die, DIVariable V) {
- // Verify variable.
- if (!V.isVariable())
- return;
-
- unsigned Line = V.getLineNumber();
+void DwarfUnit::addSourceLine(DIE &Die, unsigned Line, StringRef File,
+ StringRef Directory) {
if (Line == 0)
return;
- unsigned FileID =
- DD->getOrCreateSourceID(V.getContext().getFilename(),
- V.getContext().getDirectory(), getUniqueID());
+
+ unsigned FileID = getOrCreateSourceID(File, Directory);
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, None, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, None, Line);
@@ -355,98 +428,59 @@ void CompileUnit::addSourceLine(DIE *Die, DIVariable V) {
/// addSourceLine - Add location information to specified debug information
/// entry.
-void CompileUnit::addSourceLine(DIE *Die, DIGlobalVariable G) {
- // Verify global variable.
- if (!G.isGlobalVariable())
- return;
+void DwarfUnit::addSourceLine(DIE &Die, DIVariable V) {
+ assert(V.isVariable());
- unsigned Line = G.getLineNumber();
- if (Line == 0)
- return;
- unsigned FileID =
- DD->getOrCreateSourceID(G.getFilename(), G.getDirectory(), getUniqueID());
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, None, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, None, Line);
+ addSourceLine(Die, V.getLineNumber(), V.getContext().getFilename(),
+ V.getContext().getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
-void CompileUnit::addSourceLine(DIE *Die, DISubprogram SP) {
- // Verify subprogram.
- if (!SP.isSubprogram())
- return;
+void DwarfUnit::addSourceLine(DIE &Die, DIGlobalVariable G) {
+ assert(G.isGlobalVariable());
- // If the line number is 0, don't add it.
- unsigned Line = SP.getLineNumber();
- if (Line == 0)
- return;
+ addSourceLine(Die, G.getLineNumber(), G.getFilename(), G.getDirectory());
+}
- unsigned FileID = DD->getOrCreateSourceID(SP.getFilename(), SP.getDirectory(),
- getUniqueID());
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, None, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, None, Line);
+/// addSourceLine - Add location information to specified debug information
+/// entry.
+void DwarfUnit::addSourceLine(DIE &Die, DISubprogram SP) {
+ assert(SP.isSubprogram());
+
+ addSourceLine(Die, SP.getLineNumber(), SP.getFilename(), SP.getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
-void CompileUnit::addSourceLine(DIE *Die, DIType Ty) {
- // Verify type.
- if (!Ty.isType())
- return;
+void DwarfUnit::addSourceLine(DIE &Die, DIType Ty) {
+ assert(Ty.isType());
- unsigned Line = Ty.getLineNumber();
- if (Line == 0)
- return;
- unsigned FileID = DD->getOrCreateSourceID(Ty.getFilename(), Ty.getDirectory(),
- getUniqueID());
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, None, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, None, Line);
+ addSourceLine(Die, Ty.getLineNumber(), Ty.getFilename(), Ty.getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
-void CompileUnit::addSourceLine(DIE *Die, DIObjCProperty Ty) {
- // Verify type.
- if (!Ty.isObjCProperty())
- return;
+void DwarfUnit::addSourceLine(DIE &Die, DIObjCProperty Ty) {
+ assert(Ty.isObjCProperty());
- unsigned Line = Ty.getLineNumber();
- if (Line == 0)
- return;
DIFile File = Ty.getFile();
- unsigned FileID = DD->getOrCreateSourceID(File.getFilename(),
- File.getDirectory(), getUniqueID());
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, None, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, None, Line);
+ addSourceLine(Die, Ty.getLineNumber(), File.getFilename(),
+ File.getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
-void CompileUnit::addSourceLine(DIE *Die, DINameSpace NS) {
- // Verify namespace.
- if (!NS.Verify())
- return;
+void DwarfUnit::addSourceLine(DIE &Die, DINameSpace NS) {
+ assert(NS.Verify());
- unsigned Line = NS.getLineNumber();
- if (Line == 0)
- return;
- StringRef FN = NS.getFilename();
-
- unsigned FileID =
- DD->getOrCreateSourceID(FN, NS.getDirectory(), getUniqueID());
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, None, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, None, Line);
+ addSourceLine(Die, NS.getLineNumber(), NS.getFilename(), NS.getDirectory());
}
/// addVariableAddress - Add DW_AT_location attribute for a
/// DbgVariable based on provided MachineLocation.
-void CompileUnit::addVariableAddress(const DbgVariable &DV, DIE *Die,
- MachineLocation Location) {
+void DwarfUnit::addVariableAddress(const DbgVariable &DV, DIE &Die,
+ MachineLocation Location) {
if (DV.variableHasComplexAddress())
addComplexAddress(DV, Die, dwarf::DW_AT_location, Location);
else if (DV.isBlockByrefVariable())
@@ -457,20 +491,53 @@ void CompileUnit::addVariableAddress(const DbgVariable &DV, DIE *Die,
}
/// addRegisterOp - Add register operand.
-void CompileUnit::addRegisterOp(DIEBlock *TheDie, unsigned Reg) {
+void DwarfUnit::addRegisterOp(DIELoc &TheDie, unsigned Reg) {
const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
- unsigned DWReg = RI->getDwarfRegNum(Reg, false);
+ int DWReg = RI->getDwarfRegNum(Reg, false);
+ bool isSubRegister = DWReg < 0;
+
+ unsigned Idx = 0;
+
+ // Go up the super-register chain until we hit a valid dwarf register number.
+ for (MCSuperRegIterator SR(Reg, RI); SR.isValid() && DWReg < 0; ++SR) {
+ DWReg = RI->getDwarfRegNum(*SR, false);
+ if (DWReg >= 0)
+ Idx = RI->getSubRegIndex(*SR, Reg);
+ }
+
+ if (DWReg < 0) {
+ DEBUG(dbgs() << "Invalid Dwarf register number.\n");
+ addUInt(TheDie, dwarf::DW_FORM_data1, dwarf::DW_OP_nop);
+ return;
+ }
+
+ // Emit register
if (DWReg < 32)
addUInt(TheDie, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + DWReg);
else {
addUInt(TheDie, dwarf::DW_FORM_data1, dwarf::DW_OP_regx);
addUInt(TheDie, dwarf::DW_FORM_udata, DWReg);
}
+
+ // Emit Mask
+ if (isSubRegister) {
+ unsigned Size = RI->getSubRegIdxSize(Idx);
+ unsigned Offset = RI->getSubRegIdxOffset(Idx);
+ if (Offset > 0) {
+ addUInt(TheDie, dwarf::DW_FORM_data1, dwarf::DW_OP_bit_piece);
+ addUInt(TheDie, dwarf::DW_FORM_data1, Size);
+ addUInt(TheDie, dwarf::DW_FORM_data1, Offset);
+ } else {
+ unsigned ByteSize = Size / 8; // Assuming 8 bits per byte.
+ addUInt(TheDie, dwarf::DW_FORM_data1, dwarf::DW_OP_piece);
+ addUInt(TheDie, dwarf::DW_FORM_data1, ByteSize);
+ }
+ }
}
/// addRegisterOffset - Add register offset.
-void CompileUnit::addRegisterOffset(DIEBlock *TheDie, unsigned Reg,
- int64_t Offset) {
+void DwarfUnit::addRegisterOffset(DIELoc &TheDie, unsigned Reg,
+ int64_t Offset) {
const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
unsigned DWReg = RI->getDwarfRegNum(Reg, false);
const TargetRegisterInfo *TRI = Asm->TM.getRegisterInfo();
@@ -488,59 +555,59 @@ void CompileUnit::addRegisterOffset(DIEBlock *TheDie, unsigned Reg,
/// addAddress - Add an address attribute to a die based on the location
/// provided.
-void CompileUnit::addAddress(DIE *Die, dwarf::Attribute Attribute,
- const MachineLocation &Location, bool Indirect) {
- DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+void DwarfUnit::addAddress(DIE &Die, dwarf::Attribute Attribute,
+ const MachineLocation &Location, bool Indirect) {
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc();
if (Location.isReg() && !Indirect)
- addRegisterOp(Block, Location.getReg());
+ addRegisterOp(*Loc, Location.getReg());
else {
- addRegisterOffset(Block, Location.getReg(), Location.getOffset());
+ addRegisterOffset(*Loc, Location.getReg(), Location.getOffset());
if (Indirect && !Location.isReg()) {
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
}
}
// Now attach the location information to the DIE.
- addBlock(Die, Attribute, Block);
+ addBlock(Die, Attribute, Loc);
}
/// addComplexAddress - Start with the address based on the location provided,
/// and generate the DWARF information necessary to find the actual variable
-/// given the extra address information encoded in the DIVariable, starting from
-/// the starting location. Add the DWARF information to the die.
+/// given the extra address information encoded in the DbgVariable, starting
+/// from the starting location. Add the DWARF information to the die.
///
-void CompileUnit::addComplexAddress(const DbgVariable &DV, DIE *Die,
- dwarf::Attribute Attribute,
- const MachineLocation &Location) {
- DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+void DwarfUnit::addComplexAddress(const DbgVariable &DV, DIE &Die,
+ dwarf::Attribute Attribute,
+ const MachineLocation &Location) {
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc();
unsigned N = DV.getNumAddrElements();
unsigned i = 0;
if (Location.isReg()) {
if (N >= 2 && DV.getAddrElement(0) == DIBuilder::OpPlus) {
// If first address element is OpPlus then emit
// DW_OP_breg + Offset instead of DW_OP_reg + Offset.
- addRegisterOffset(Block, Location.getReg(), DV.getAddrElement(1));
+ addRegisterOffset(*Loc, Location.getReg(), DV.getAddrElement(1));
i = 2;
} else
- addRegisterOp(Block, Location.getReg());
+ addRegisterOp(*Loc, Location.getReg());
} else
- addRegisterOffset(Block, Location.getReg(), Location.getOffset());
+ addRegisterOffset(*Loc, Location.getReg(), Location.getOffset());
for (; i < N; ++i) {
uint64_t Element = DV.getAddrElement(i);
if (Element == DIBuilder::OpPlus) {
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(Block, dwarf::DW_FORM_udata, DV.getAddrElement(++i));
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
+ addUInt(*Loc, dwarf::DW_FORM_udata, DV.getAddrElement(++i));
} else if (Element == DIBuilder::OpDeref) {
if (!Location.isReg())
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
} else
llvm_unreachable("unknown DIBuilder Opcode");
}
// Now attach the location information to the DIE.
- addBlock(Die, Attribute, Block);
+ addBlock(Die, Attribute, Loc);
}
/* Byref variables, in Blocks, are declared by the programmer as "SomeType
@@ -603,9 +670,9 @@ void CompileUnit::addComplexAddress(const DbgVariable &DV, DIE *Die,
/// starting location. Add the DWARF information to the die. For
/// more information, read large comment just above here.
///
-void CompileUnit::addBlockByrefAddress(const DbgVariable &DV, DIE *Die,
- dwarf::Attribute Attribute,
- const MachineLocation &Location) {
+void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
+ dwarf::Attribute Attribute,
+ const MachineLocation &Location) {
DIType Ty = DV.getType();
DIType TmpTy = Ty;
uint16_t Tag = Ty.getTag();
@@ -642,71 +709,81 @@ void CompileUnit::addBlockByrefAddress(const DbgVariable &DV, DIE *Die,
// Decode the original location, and use that as the start of the byref
// variable's location.
- DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc();
if (Location.isReg())
- addRegisterOp(Block, Location.getReg());
+ addRegisterOp(*Loc, Location.getReg());
else
- addRegisterOffset(Block, Location.getReg(), Location.getOffset());
+ addRegisterOffset(*Loc, Location.getReg(), Location.getOffset());
// If we started with a pointer to the __Block_byref... struct, then
// the first thing we need to do is dereference the pointer (DW_OP_deref).
if (isPointer)
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
// Next add the offset for the '__forwarding' field:
// DW_OP_plus_uconst ForwardingFieldOffset. Note there's no point in
// adding the offset if it's 0.
if (forwardingFieldOffset > 0) {
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(Block, dwarf::DW_FORM_udata, forwardingFieldOffset);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
+ addUInt(*Loc, dwarf::DW_FORM_udata, forwardingFieldOffset);
}
// Now dereference the __forwarding field to get to the real __Block_byref
// struct: DW_OP_deref.
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
// Now that we've got the real __Block_byref... struct, add the offset
// for the variable's field to get to the location of the actual variable:
// DW_OP_plus_uconst varFieldOffset. Again, don't add if it's 0.
if (varFieldOffset > 0) {
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(Block, dwarf::DW_FORM_udata, varFieldOffset);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
+ addUInt(*Loc, dwarf::DW_FORM_udata, varFieldOffset);
}
// Now attach the location information to the DIE.
- addBlock(Die, Attribute, Block);
-}
-
-/// isTypeSigned - Return true if the type is signed.
-static bool isTypeSigned(DwarfDebug *DD, DIType Ty, int *SizeInBits) {
- if (Ty.isDerivedType())
- return isTypeSigned(DD, DD->resolve(DIDerivedType(Ty).getTypeDerivedFrom()),
- SizeInBits);
- if (Ty.isBasicType())
- if (DIBasicType(Ty).getEncoding() == dwarf::DW_ATE_signed ||
- DIBasicType(Ty).getEncoding() == dwarf::DW_ATE_signed_char) {
- *SizeInBits = Ty.getSizeInBits();
- return true;
- }
- return false;
+ addBlock(Die, Attribute, Loc);
}
/// Return true if type encoding is unsigned.
static bool isUnsignedDIType(DwarfDebug *DD, DIType Ty) {
DIDerivedType DTy(Ty);
- if (DTy.isDerivedType())
- return isUnsignedDIType(DD, DD->resolve(DTy.getTypeDerivedFrom()));
-
- DIBasicType BTy(Ty);
- if (BTy.isBasicType()) {
- unsigned Encoding = BTy.getEncoding();
- if (Encoding == dwarf::DW_ATE_unsigned ||
- Encoding == dwarf::DW_ATE_unsigned_char ||
- Encoding == dwarf::DW_ATE_boolean)
+ if (DTy.isDerivedType()) {
+ dwarf::Tag T = (dwarf::Tag)Ty.getTag();
+ // Encode pointer constants as unsigned bytes. This is used at least for
+ // null pointer constant emission.
+ // FIXME: reference and rvalue_reference /probably/ shouldn't be allowed
+ // here, but accept them for now due to a bug in SROA producing bogus
+ // dbg.values.
+ if (T == dwarf::DW_TAG_pointer_type ||
+ T == dwarf::DW_TAG_ptr_to_member_type ||
+ T == dwarf::DW_TAG_reference_type ||
+ T == dwarf::DW_TAG_rvalue_reference_type)
return true;
+ assert(T == dwarf::DW_TAG_typedef || T == dwarf::DW_TAG_const_type ||
+ T == dwarf::DW_TAG_volatile_type ||
+ T == dwarf::DW_TAG_restrict_type ||
+ T == dwarf::DW_TAG_enumeration_type);
+ if (DITypeRef Deriv = DTy.getTypeDerivedFrom())
+ return isUnsignedDIType(DD, DD->resolve(Deriv));
+ // FIXME: Enums without a fixed underlying type have unknown signedness
+ // here, leading to incorrectly emitted constants.
+ assert(DTy.getTag() == dwarf::DW_TAG_enumeration_type);
+ return false;
}
- return false;
+
+ DIBasicType BTy(Ty);
+ assert(BTy.isBasicType());
+ unsigned Encoding = BTy.getEncoding();
+ assert((Encoding == dwarf::DW_ATE_unsigned ||
+ Encoding == dwarf::DW_ATE_unsigned_char ||
+ Encoding == dwarf::DW_ATE_signed ||
+ Encoding == dwarf::DW_ATE_signed_char ||
+ Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean) &&
+ "Unsupported encoding");
+ return (Encoding == dwarf::DW_ATE_unsigned ||
+ Encoding == dwarf::DW_ATE_unsigned_char ||
+ Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean);
}
/// If this type is derived from a base type then return base type size.
@@ -720,8 +797,9 @@ static uint64_t getBaseTypeSize(DwarfDebug *DD, DIDerivedType Ty) {
DIType BaseType = DD->resolve(Ty.getTypeDerivedFrom());
- // If this type is not derived from any type then take conservative approach.
- if (!BaseType.isValid())
+ // If this type is not derived from any type or the type is a declaration then
+ // take conservative approach.
+ if (!BaseType.isValid() || BaseType.isForwardDecl())
return Ty.getSizeInBits();
// If this is a derived type, go ahead and get the base type, unless it's a
@@ -737,47 +815,8 @@ static uint64_t getBaseTypeSize(DwarfDebug *DD, DIDerivedType Ty) {
return BaseType.getSizeInBits();
}
-/// addConstantValue - Add constant value entry in variable DIE.
-void CompileUnit::addConstantValue(DIE *Die, const MachineOperand &MO,
- DIType Ty) {
- // FIXME: This is a bit conservative/simple - it emits negative values at
- // their maximum bit width which is a bit unfortunate (& doesn't prefer
- // udata/sdata over dataN as suggested by the DWARF spec)
- assert(MO.isImm() && "Invalid machine operand!");
- int SizeInBits = -1;
- bool SignedConstant = isTypeSigned(DD, Ty, &SizeInBits);
- dwarf::Form Form;
-
- // If we're a signed constant definitely use sdata.
- if (SignedConstant) {
- addSInt(Die, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata, MO.getImm());
- return;
- }
-
- // Else use data for now unless it's larger than we can deal with.
- switch (SizeInBits) {
- case 8:
- Form = dwarf::DW_FORM_data1;
- break;
- case 16:
- Form = dwarf::DW_FORM_data2;
- break;
- case 32:
- Form = dwarf::DW_FORM_data4;
- break;
- case 64:
- Form = dwarf::DW_FORM_data8;
- break;
- default:
- Form = dwarf::DW_FORM_udata;
- addUInt(Die, dwarf::DW_AT_const_value, Form, MO.getImm());
- return;
- }
- addUInt(Die, dwarf::DW_AT_const_value, Form, MO.getImm());
-}
-
/// addConstantFPValue - Add constant value entry in variable DIE.
-void CompileUnit::addConstantFPValue(DIE *Die, const MachineOperand &MO) {
+void DwarfUnit::addConstantFPValue(DIE &Die, const MachineOperand &MO) {
assert(MO.isFPImm() && "Invalid machine operand!");
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
APFloat FPImm = MO.getFPImm()->getValueAPF();
@@ -794,55 +833,47 @@ void CompileUnit::addConstantFPValue(DIE *Die, const MachineOperand &MO) {
// Output the constant to DWARF one byte at a time.
for (; Start != Stop; Start += Incr)
- addUInt(Block, dwarf::DW_FORM_data1, (unsigned char)0xFF & FltPtr[Start]);
+ addUInt(*Block, dwarf::DW_FORM_data1, (unsigned char)0xFF & FltPtr[Start]);
addBlock(Die, dwarf::DW_AT_const_value, Block);
}
/// addConstantFPValue - Add constant value entry in variable DIE.
-void CompileUnit::addConstantFPValue(DIE *Die, const ConstantFP *CFP) {
+void DwarfUnit::addConstantFPValue(DIE &Die, const ConstantFP *CFP) {
// Pass this down to addConstantValue as an unsigned bag of bits.
addConstantValue(Die, CFP->getValueAPF().bitcastToAPInt(), true);
}
/// addConstantValue - Add constant value entry in variable DIE.
-void CompileUnit::addConstantValue(DIE *Die, const ConstantInt *CI,
- bool Unsigned) {
- addConstantValue(Die, CI->getValue(), Unsigned);
+void DwarfUnit::addConstantValue(DIE &Die, const ConstantInt *CI, DIType Ty) {
+ addConstantValue(Die, CI->getValue(), Ty);
+}
+
+/// addConstantValue - Add constant value entry in variable DIE.
+void DwarfUnit::addConstantValue(DIE &Die, const MachineOperand &MO,
+ DIType Ty) {
+ assert(MO.isImm() && "Invalid machine operand!");
+
+ addConstantValue(Die, isUnsignedDIType(DD, Ty), MO.getImm());
+}
+
+void DwarfUnit::addConstantValue(DIE &Die, bool Unsigned, uint64_t Val) {
+ // FIXME: This is a bit conservative/simple - it emits negative values always
+ // sign extended to 64 bits rather than minimizing the number of bytes.
+ addUInt(Die, dwarf::DW_AT_const_value,
+ Unsigned ? dwarf::DW_FORM_udata : dwarf::DW_FORM_sdata, Val);
+}
+
+void DwarfUnit::addConstantValue(DIE &Die, const APInt &Val, DIType Ty) {
+ addConstantValue(Die, Val, isUnsignedDIType(DD, Ty));
}
// addConstantValue - Add constant value entry in variable DIE.
-void CompileUnit::addConstantValue(DIE *Die, const APInt &Val, bool Unsigned) {
+void DwarfUnit::addConstantValue(DIE &Die, const APInt &Val, bool Unsigned) {
unsigned CIBitWidth = Val.getBitWidth();
if (CIBitWidth <= 64) {
- // If we're a signed constant definitely use sdata.
- if (!Unsigned) {
- addSInt(Die, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata,
- Val.getSExtValue());
- return;
- }
-
- // Else use data for now unless it's larger than we can deal with.
- dwarf::Form Form;
- switch (CIBitWidth) {
- case 8:
- Form = dwarf::DW_FORM_data1;
- break;
- case 16:
- Form = dwarf::DW_FORM_data2;
- break;
- case 32:
- Form = dwarf::DW_FORM_data4;
- break;
- case 64:
- Form = dwarf::DW_FORM_data8;
- break;
- default:
- addUInt(Die, dwarf::DW_AT_const_value, dwarf::DW_FORM_udata,
- Val.getZExtValue());
- return;
- }
- addUInt(Die, dwarf::DW_AT_const_value, Form, Val.getZExtValue());
+ addConstantValue(Die, Unsigned,
+ Unsigned ? Val.getZExtValue() : Val.getSExtValue());
return;
}
@@ -861,14 +892,14 @@ void CompileUnit::addConstantValue(DIE *Die, const APInt &Val, bool Unsigned) {
c = Ptr64[i / 8] >> (8 * (i & 7));
else
c = Ptr64[(NumBytes - 1 - i) / 8] >> (8 * ((NumBytes - 1 - i) & 7));
- addUInt(Block, dwarf::DW_FORM_data1, c);
+ addUInt(*Block, dwarf::DW_FORM_data1, c);
}
addBlock(Die, dwarf::DW_AT_const_value, Block);
}
/// addTemplateParams - Add template parameters into buffer.
-void CompileUnit::addTemplateParams(DIE &Buffer, DIArray TParams) {
+void DwarfUnit::addTemplateParams(DIE &Buffer, DIArray TParams) {
// Add template parameters.
for (unsigned i = 0, e = TParams.getNumElements(); i != e; ++i) {
DIDescriptor Element = TParams.getElement(i);
@@ -882,9 +913,9 @@ void CompileUnit::addTemplateParams(DIE &Buffer, DIArray TParams) {
}
/// getOrCreateContextDIE - Get context owner's DIE.
-DIE *CompileUnit::getOrCreateContextDIE(DIScope Context) {
+DIE *DwarfUnit::getOrCreateContextDIE(DIScope Context) {
if (!Context || Context.isFile())
- return getCUDie();
+ return &getUnitDie();
if (Context.isType())
return getOrCreateTypeDIE(DIType(Context));
if (Context.isNameSpace())
@@ -894,37 +925,72 @@ DIE *CompileUnit::getOrCreateContextDIE(DIScope Context) {
return getDIE(Context);
}
+DIE *DwarfUnit::createTypeDIE(DICompositeType Ty) {
+ DIScope Context = resolve(Ty.getContext());
+ DIE *ContextDIE = getOrCreateContextDIE(Context);
+
+ if (DIE *TyDIE = getDIE(Ty))
+ return TyDIE;
+
+ // Create new type.
+ DIE &TyDIE = createAndAddDIE(Ty.getTag(), *ContextDIE, Ty);
+
+ constructTypeDIE(TyDIE, Ty);
+
+ updateAcceleratorTables(Context, Ty, TyDIE);
+ return &TyDIE;
+}
+
/// getOrCreateTypeDIE - Find existing DIE or create new DIE for the
/// given DIType.
-DIE *CompileUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
+DIE *DwarfUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
if (!TyNode)
- return NULL;
+ return nullptr;
DIType Ty(TyNode);
assert(Ty.isType());
+ assert(Ty == resolve(Ty.getRef()) &&
+ "type was not uniqued, possible ODR violation.");
+
+ // DW_TAG_restrict_type is not supported in DWARF2
+ if (Ty.getTag() == dwarf::DW_TAG_restrict_type && DD->getDwarfVersion() <= 2)
+ return getOrCreateTypeDIE(resolve(DIDerivedType(Ty).getTypeDerivedFrom()));
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE.
- DIE *ContextDIE = getOrCreateContextDIE(resolve(Ty.getContext()));
+ DIScope Context = resolve(Ty.getContext());
+ DIE *ContextDIE = getOrCreateContextDIE(Context);
assert(ContextDIE);
- DIE *TyDIE = getDIE(Ty);
- if (TyDIE)
+ if (DIE *TyDIE = getDIE(Ty))
return TyDIE;
// Create new type.
- TyDIE = createAndAddDIE(Ty.getTag(), *ContextDIE, Ty);
+ DIE &TyDIE = createAndAddDIE(Ty.getTag(), *ContextDIE, Ty);
+
+ updateAcceleratorTables(Context, Ty, TyDIE);
if (Ty.isBasicType())
- constructTypeDIE(*TyDIE, DIBasicType(Ty));
- else if (Ty.isCompositeType())
- constructTypeDIE(*TyDIE, DICompositeType(Ty));
- else {
+ constructTypeDIE(TyDIE, DIBasicType(Ty));
+ else if (Ty.isCompositeType()) {
+ DICompositeType CTy(Ty);
+ if (GenerateDwarfTypeUnits && !Ty.isForwardDecl())
+ if (MDString *TypeId = CTy.getIdentifier()) {
+ DD->addDwarfTypeUnitType(getCU(), TypeId->getString(), TyDIE, CTy);
+ // Skip updating the accelerator tables since this is not the full type.
+ return &TyDIE;
+ }
+ constructTypeDIE(TyDIE, CTy);
+ } else {
assert(Ty.isDerivedType() && "Unknown kind of DIType");
- constructTypeDIE(*TyDIE, DIDerivedType(Ty));
+ constructTypeDIE(TyDIE, DIDerivedType(Ty));
}
- // If this is a named finished type then include it in the list of types
- // for the accelerator tables.
+
+ return &TyDIE;
+}
+
+void DwarfUnit::updateAcceleratorTables(DIScope Context, DIType Ty,
+ const DIE &TyDIE) {
if (!Ty.getName().empty() && !Ty.isForwardDecl()) {
bool IsImplementation = 0;
if (Ty.isCompositeType()) {
@@ -934,14 +1000,18 @@ DIE *CompileUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
IsImplementation = (CT.getRunTimeLang() == 0) || CT.isObjcClassComplete();
}
unsigned Flags = IsImplementation ? dwarf::DW_FLAG_type_implementation : 0;
- addAccelType(Ty.getName(), std::make_pair(TyDIE, Flags));
- }
+ DD->addAccelType(Ty.getName(), TyDIE, Flags);
- return TyDIE;
+ if ((!Context || Context.isCompileUnit() || Context.isFile() ||
+ Context.isNameSpace()) &&
+ getCUNode().getEmissionKind() != DIBuilder::LineTablesOnly)
+ GlobalTypes[getParentContextString(Context) + Ty.getName().str()] =
+ &TyDIE;
+ }
}
/// addType - Add a new type attribute to the specified entity.
-void CompileUnit::addType(DIE *Entity, DIType Ty, dwarf::Attribute Attribute) {
+void DwarfUnit::addType(DIE &Entity, DIType Ty, dwarf::Attribute Attribute) {
assert(Ty && "Trying to add a type that doesn't exist?");
// Check for pre-existence.
@@ -956,61 +1026,17 @@ void CompileUnit::addType(DIE *Entity, DIType Ty, dwarf::Attribute Attribute) {
DIE *Buffer = getOrCreateTypeDIE(Ty);
// Set up proxy.
- Entry = createDIEEntry(Buffer);
+ Entry = createDIEEntry(*Buffer);
insertDIEEntry(Ty, Entry);
addDIEEntry(Entity, Attribute, Entry);
-
- // If this is a complete composite type then include it in the
- // list of global types.
- addGlobalType(Ty);
-}
-
-// Accelerator table mutators - add each name along with its companion
-// DIE to the proper table while ensuring that the name that we're going
-// to reference is in the string table. We do this since the names we
-// add may not only be identical to the names in the DIE.
-void CompileUnit::addAccelName(StringRef Name, DIE *Die) {
- DU->getStringPoolEntry(Name);
- std::vector<DIE *> &DIEs = AccelNames[Name];
- DIEs.push_back(Die);
-}
-
-void CompileUnit::addAccelObjC(StringRef Name, DIE *Die) {
- DU->getStringPoolEntry(Name);
- std::vector<DIE *> &DIEs = AccelObjC[Name];
- DIEs.push_back(Die);
-}
-
-void CompileUnit::addAccelNamespace(StringRef Name, DIE *Die) {
- DU->getStringPoolEntry(Name);
- std::vector<DIE *> &DIEs = AccelNamespace[Name];
- DIEs.push_back(Die);
-}
-
-void CompileUnit::addAccelType(StringRef Name, std::pair<DIE *, unsigned> Die) {
- DU->getStringPoolEntry(Name);
- std::vector<std::pair<DIE *, unsigned> > &DIEs = AccelTypes[Name];
- DIEs.push_back(Die);
}
/// addGlobalName - Add a new global name to the compile unit.
-void CompileUnit::addGlobalName(StringRef Name, DIE *Die, DIScope Context) {
+void DwarfUnit::addGlobalName(StringRef Name, DIE &Die, DIScope Context) {
+ if (getCUNode().getEmissionKind() == DIBuilder::LineTablesOnly)
+ return;
std::string FullName = getParentContextString(Context) + Name.str();
- GlobalNames[FullName] = Die;
-}
-
-/// addGlobalType - Add a new global type to the compile unit.
-///
-void CompileUnit::addGlobalType(DIType Ty) {
- DIScope Context = resolve(Ty.getContext());
- if (!Ty.getName().empty() && !Ty.isForwardDecl() &&
- (!Context || Context.isCompileUnit() || Context.isFile() ||
- Context.isNameSpace()))
- if (DIEEntry *Entry = getDIEEntry(Ty)) {
- std::string FullName =
- getParentContextString(Context) + Ty.getName().str();
- GlobalTypes[FullName] = Entry->getEntry();
- }
+ GlobalNames[FullName] = &Die;
}
/// getParentContextString - Walks the metadata parent chain in a language
@@ -1018,7 +1044,7 @@ void CompileUnit::addGlobalType(DIType Ty) {
/// it as a string. This is done at the metadata level because DIEs may
/// not currently have been added to the parent context and walking the
/// DIEs looking for names is more expensive than walking the metadata.
-std::string CompileUnit::getParentContextString(DIScope Context) const {
+std::string DwarfUnit::getParentContextString(DIScope Context) const {
if (!Context)
return "";
@@ -1045,6 +1071,8 @@ std::string CompileUnit::getParentContextString(DIScope Context) const {
I != E; ++I) {
DIScope Ctx = *I;
StringRef Name = Ctx.getName();
+ if (Name.empty() && Ctx.isNameSpace())
+ Name = "(anonymous namespace)";
if (!Name.empty()) {
CS += Name;
CS += "::";
@@ -1053,43 +1081,27 @@ std::string CompileUnit::getParentContextString(DIScope Context) const {
return CS;
}
-/// addPubTypes - Add subprogram argument types for pubtypes section.
-void CompileUnit::addPubTypes(DISubprogram SP) {
- DICompositeType SPTy = SP.getType();
- uint16_t SPTag = SPTy.getTag();
- if (SPTag != dwarf::DW_TAG_subroutine_type)
- return;
-
- DIArray Args = SPTy.getTypeArray();
- for (unsigned i = 0, e = Args.getNumElements(); i != e; ++i) {
- DIType ATy(Args.getElement(i));
- if (!ATy.isType())
- continue;
- addGlobalType(ATy);
- }
-}
-
/// constructTypeDIE - Construct basic type die from DIBasicType.
-void CompileUnit::constructTypeDIE(DIE &Buffer, DIBasicType BTy) {
+void DwarfUnit::constructTypeDIE(DIE &Buffer, DIBasicType BTy) {
// Get core information.
StringRef Name = BTy.getName();
// Add name if not anonymous or intermediate type.
if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, Name);
+ addString(Buffer, dwarf::DW_AT_name, Name);
// An unspecified type only has a name attribute.
if (BTy.getTag() == dwarf::DW_TAG_unspecified_type)
return;
- addUInt(&Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
+ addUInt(Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
BTy.getEncoding());
uint64_t Size = BTy.getSizeInBits() >> 3;
- addUInt(&Buffer, dwarf::DW_AT_byte_size, None, Size);
+ addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
}
/// constructTypeDIE - Construct derived type die from DIDerivedType.
-void CompileUnit::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
+void DwarfUnit::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
// Get core information.
StringRef Name = DTy.getName();
uint64_t Size = DTy.getSizeInBits() >> 3;
@@ -1098,74 +1110,43 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
// Map to main type, void will not have a type.
DIType FromTy = resolve(DTy.getTypeDerivedFrom());
if (FromTy)
- addType(&Buffer, FromTy);
+ addType(Buffer, FromTy);
// Add name if not anonymous or intermediate type.
if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, Name);
+ addString(Buffer, dwarf::DW_AT_name, Name);
// Add size if non-zero (derived types might be zero-sized.)
if (Size && Tag != dwarf::DW_TAG_pointer_type)
- addUInt(&Buffer, dwarf::DW_AT_byte_size, None, Size);
+ addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
if (Tag == dwarf::DW_TAG_ptr_to_member_type)
- addDIEEntry(&Buffer, dwarf::DW_AT_containing_type,
- getOrCreateTypeDIE(resolve(DTy.getClassType())));
+ addDIEEntry(Buffer, dwarf::DW_AT_containing_type,
+ *getOrCreateTypeDIE(resolve(DTy.getClassType())));
// Add source line info if available and TyDesc is not a forward declaration.
if (!DTy.isForwardDecl())
- addSourceLine(&Buffer, DTy);
+ addSourceLine(Buffer, DTy);
}
/// constructSubprogramArguments - Construct function argument DIEs.
-void CompileUnit::constructSubprogramArguments(DIE &Buffer, DIArray Args) {
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIDescriptor Ty = Args.getElement(i);
- if (Ty.isUnspecifiedParameter()) {
- assert(i == N-1 && "ellipsis must be the last argument");
- createAndAddDIE(dwarf::DW_TAG_unspecified_parameters, Buffer);
- } else {
- DIE *Arg = createAndAddDIE(dwarf::DW_TAG_formal_parameter, Buffer);
- addType(Arg, DIType(Ty));
- if (DIType(Ty).isArtificial())
- addFlag(Arg, dwarf::DW_AT_artificial);
- }
+void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DIArray Args) {
+ for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
+ DIDescriptor Ty = Args.getElement(i);
+ if (Ty.isUnspecifiedParameter()) {
+ assert(i == N-1 && "Unspecified parameter must be the last argument");
+ createAndAddDIE(dwarf::DW_TAG_unspecified_parameters, Buffer);
+ } else {
+ DIE &Arg = createAndAddDIE(dwarf::DW_TAG_formal_parameter, Buffer);
+ addType(Arg, DIType(Ty));
+ if (DIType(Ty).isArtificial())
+ addFlag(Arg, dwarf::DW_AT_artificial);
}
-}
-
-/// Return true if the type is appropriately scoped to be contained inside
-/// its own type unit.
-static bool isTypeUnitScoped(DIType Ty, const DwarfDebug *DD) {
- DIScope Parent = DD->resolve(Ty.getContext());
- while (Parent) {
- // Don't generate a hash for anything scoped inside a function.
- if (Parent.isSubprogram())
- return false;
- Parent = DD->resolve(Parent.getContext());
- }
- return true;
-}
-
-/// Return true if the type should be split out into a type unit.
-static bool shouldCreateTypeUnit(DICompositeType CTy, const DwarfDebug *DD) {
- uint16_t Tag = CTy.getTag();
-
- switch (Tag) {
- case dwarf::DW_TAG_structure_type:
- case dwarf::DW_TAG_union_type:
- case dwarf::DW_TAG_enumeration_type:
- case dwarf::DW_TAG_class_type:
- // If this is a class, structure, union, or enumeration type
- // that is a definition (not a declaration), and not scoped
- // inside a function then separate this out as a type unit.
- return !CTy.isForwardDecl() && isTypeUnitScoped(CTy, DD);
- default:
- return false;
}
}
/// constructTypeDIE - Construct type DIE from DICompositeType.
-void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
- // Get core information.
+void DwarfUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
+ // Add name if not anonymous or intermediate type.
StringRef Name = CTy.getName();
uint64_t Size = CTy.getSizeInBits() >> 3;
@@ -1183,7 +1164,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
DIArray Elements = CTy.getTypeArray();
DIType RTy(Elements.getElement(0));
if (RTy)
- addType(&Buffer, RTy);
+ addType(Buffer, RTy);
bool isPrototyped = true;
if (Elements.getNumElements() == 2 &&
@@ -1198,7 +1179,13 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
if (isPrototyped &&
(Language == dwarf::DW_LANG_C89 || Language == dwarf::DW_LANG_C99 ||
Language == dwarf::DW_LANG_ObjC))
- addFlag(&Buffer, dwarf::DW_AT_prototyped);
+ addFlag(Buffer, dwarf::DW_AT_prototyped);
+
+ if (CTy.isLValueReference())
+ addFlag(Buffer, dwarf::DW_AT_reference);
+
+ if (CTy.isRValueReference())
+ addFlag(Buffer, dwarf::DW_AT_rvalue_reference);
} break;
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
@@ -1207,25 +1194,12 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
DIArray Elements = CTy.getTypeArray();
for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
DIDescriptor Element = Elements.getElement(i);
- DIE *ElemDie = NULL;
- if (Element.isSubprogram()) {
- DISubprogram SP(Element);
- ElemDie = getOrCreateSubprogramDIE(SP);
- if (SP.isProtected())
- addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
- dwarf::DW_ACCESS_protected);
- else if (SP.isPrivate())
- addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
- dwarf::DW_ACCESS_private);
- else
- addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
- dwarf::DW_ACCESS_public);
- if (SP.isExplicit())
- addFlag(ElemDie, dwarf::DW_AT_explicit);
- } else if (Element.isDerivedType()) {
+ if (Element.isSubprogram())
+ getOrCreateSubprogramDIE(DISubprogram(Element));
+ else if (Element.isDerivedType()) {
DIDerivedType DDTy(Element);
if (DDTy.getTag() == dwarf::DW_TAG_friend) {
- ElemDie = createAndAddDIE(dwarf::DW_TAG_friend, Buffer);
+ DIE &ElemDie = createAndAddDIE(dwarf::DW_TAG_friend, Buffer);
addType(ElemDie, resolve(DDTy.getTypeDerivedFrom()),
dwarf::DW_AT_friend);
} else if (DDTy.isStaticMember()) {
@@ -1235,10 +1209,11 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
}
} else if (Element.isObjCProperty()) {
DIObjCProperty Property(Element);
- ElemDie = createAndAddDIE(Property.getTag(), Buffer);
+ DIE &ElemDie = createAndAddDIE(Property.getTag(), Buffer);
StringRef PropertyName = Property.getObjCPropertyName();
addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
- addType(ElemDie, Property.getType());
+ if (Property.getType())
+ addType(ElemDie, Property.getType());
addSourceLine(ElemDie, Property);
StringRef GetterName = Property.getObjCPropertyGetterName();
if (!GetterName.empty())
@@ -1273,15 +1248,15 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
}
if (CTy.isAppleBlockExtension())
- addFlag(&Buffer, dwarf::DW_AT_APPLE_block);
+ addFlag(Buffer, dwarf::DW_AT_APPLE_block);
DICompositeType ContainingType(resolve(CTy.getContainingType()));
if (ContainingType)
- addDIEEntry(&Buffer, dwarf::DW_AT_containing_type,
- getOrCreateTypeDIE(ContainingType));
+ addDIEEntry(Buffer, dwarf::DW_AT_containing_type,
+ *getOrCreateTypeDIE(ContainingType));
if (CTy.isObjcClassComplete())
- addFlag(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type);
+ addFlag(Buffer, dwarf::DW_AT_APPLE_objc_complete_type);
// Add template parameters to a class, structure or union types.
// FIXME: The support isn't in the metadata for this yet.
@@ -1297,7 +1272,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// Add name if not anonymous or intermediate type.
if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, Name);
+ addString(Buffer, dwarf::DW_AT_name, Name);
if (Tag == dwarf::DW_TAG_enumeration_type ||
Tag == dwarf::DW_TAG_class_type || Tag == dwarf::DW_TAG_structure_type ||
@@ -1305,37 +1280,32 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// Add size if non-zero (derived types might be zero-sized.)
// TODO: Do we care about size for enum forward declarations?
if (Size)
- addUInt(&Buffer, dwarf::DW_AT_byte_size, None, Size);
+ addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
else if (!CTy.isForwardDecl())
// Add zero size if it is not a forward declaration.
- addUInt(&Buffer, dwarf::DW_AT_byte_size, None, 0);
+ addUInt(Buffer, dwarf::DW_AT_byte_size, None, 0);
// If we're a forward decl, say so.
if (CTy.isForwardDecl())
- addFlag(&Buffer, dwarf::DW_AT_declaration);
+ addFlag(Buffer, dwarf::DW_AT_declaration);
// Add source line info if available.
if (!CTy.isForwardDecl())
- addSourceLine(&Buffer, CTy);
+ addSourceLine(Buffer, CTy);
// No harm in adding the runtime language to the declaration.
unsigned RLang = CTy.getRunTimeLang();
if (RLang)
- addUInt(&Buffer, dwarf::DW_AT_APPLE_runtime_class, dwarf::DW_FORM_data1,
+ addUInt(Buffer, dwarf::DW_AT_APPLE_runtime_class, dwarf::DW_FORM_data1,
RLang);
}
- // If this is a type applicable to a type unit it then add it to the
- // list of types we'll compute a hash for later.
- if (shouldCreateTypeUnit(CTy, DD))
- DD->addTypeUnitType(&Buffer);
}
/// constructTemplateTypeParameterDIE - Construct new DIE for the given
/// DITemplateTypeParameter.
-void
-CompileUnit::constructTemplateTypeParameterDIE(DIE &Buffer,
- DITemplateTypeParameter TP) {
- DIE *ParamDIE =
+void DwarfUnit::constructTemplateTypeParameterDIE(DIE &Buffer,
+ DITemplateTypeParameter TP) {
+ DIE &ParamDIE =
createAndAddDIE(dwarf::DW_TAG_template_type_parameter, Buffer);
// Add the type if it exists, it could be void and therefore no type.
if (TP.getType())
@@ -1347,9 +1317,9 @@ CompileUnit::constructTemplateTypeParameterDIE(DIE &Buffer,
/// constructTemplateValueParameterDIE - Construct new DIE for the given
/// DITemplateValueParameter.
void
-CompileUnit::constructTemplateValueParameterDIE(DIE &Buffer,
- DITemplateValueParameter VP) {
- DIE *ParamDIE = createAndAddDIE(VP.getTag(), Buffer);
+DwarfUnit::constructTemplateValueParameterDIE(DIE &Buffer,
+ DITemplateValueParameter VP) {
+ DIE &ParamDIE = createAndAddDIE(VP.getTag(), Buffer);
// Add the type if there is one, template template and template parameter
// packs will not have a type.
@@ -1359,17 +1329,16 @@ CompileUnit::constructTemplateValueParameterDIE(DIE &Buffer,
addString(ParamDIE, dwarf::DW_AT_name, VP.getName());
if (Value *Val = VP.getValue()) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val))
- addConstantValue(ParamDIE, CI,
- isUnsignedDIType(DD, resolve(VP.getType())));
+ addConstantValue(ParamDIE, CI, resolve(VP.getType()));
else if (GlobalValue *GV = dyn_cast<GlobalValue>(Val)) {
// For declaration non-type template parameters (such as global values and
// functions)
- DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
- addOpAddress(Block, Asm->getSymbol(GV));
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc();
+ addOpAddress(*Loc, Asm->getSymbol(GV));
// Emit DW_OP_stack_value to use the address as the immediate value of the
// parameter, rather than a pointer to it.
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_stack_value);
- addBlock(ParamDIE, dwarf::DW_AT_location, Block);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_stack_value);
+ addBlock(ParamDIE, dwarf::DW_AT_location, Loc);
} else if (VP.getTag() == dwarf::DW_TAG_GNU_template_template_param) {
assert(isa<MDString>(Val));
addString(ParamDIE, dwarf::DW_AT_GNU_template_name,
@@ -1377,73 +1346,98 @@ CompileUnit::constructTemplateValueParameterDIE(DIE &Buffer,
} else if (VP.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack) {
assert(isa<MDNode>(Val));
DIArray A(cast<MDNode>(Val));
- addTemplateParams(*ParamDIE, A);
+ addTemplateParams(ParamDIE, A);
}
}
}
/// getOrCreateNameSpace - Create a DIE for DINameSpace.
-DIE *CompileUnit::getOrCreateNameSpace(DINameSpace NS) {
+DIE *DwarfUnit::getOrCreateNameSpace(DINameSpace NS) {
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE.
DIE *ContextDIE = getOrCreateContextDIE(NS.getContext());
- DIE *NDie = getDIE(NS);
- if (NDie)
+ if (DIE *NDie = getDIE(NS))
return NDie;
- NDie = createAndAddDIE(dwarf::DW_TAG_namespace, *ContextDIE, NS);
+ DIE &NDie = createAndAddDIE(dwarf::DW_TAG_namespace, *ContextDIE, NS);
- if (!NS.getName().empty()) {
+ StringRef Name = NS.getName();
+ if (!Name.empty())
addString(NDie, dwarf::DW_AT_name, NS.getName());
- addAccelNamespace(NS.getName(), NDie);
- addGlobalName(NS.getName(), NDie, NS.getContext());
- } else
- addAccelNamespace("(anonymous namespace)", NDie);
+ else
+ Name = "(anonymous namespace)";
+ DD->addAccelNamespace(Name, NDie);
+ addGlobalName(Name, NDie, NS.getContext());
addSourceLine(NDie, NS);
- return NDie;
+ return &NDie;
}
/// getOrCreateSubprogramDIE - Create new DIE using SP.
-DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
+DIE *DwarfUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE (as is the case for member function
// declarations).
DIE *ContextDIE = getOrCreateContextDIE(resolve(SP.getContext()));
- DIE *SPDie = getDIE(SP);
- if (SPDie)
+ if (DIE *SPDie = getDIE(SP))
return SPDie;
- DISubprogram SPDecl = SP.getFunctionDeclaration();
- if (SPDecl.isSubprogram())
+ if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
// Add subprogram definitions to the CU die directly.
- ContextDIE = CUDie.get();
+ ContextDIE = &getUnitDie();
+ // Build the decl now to ensure it precedes the definition.
+ getOrCreateSubprogramDIE(SPDecl);
+ }
// DW_TAG_inlined_subroutine may refer to this DIE.
- SPDie = createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, SP);
+ DIE &SPDie = createAndAddDIE(dwarf::DW_TAG_subprogram, *ContextDIE, SP);
- DIE *DeclDie = NULL;
- if (SPDecl.isSubprogram())
- DeclDie = getOrCreateSubprogramDIE(SPDecl);
+ // Stop here and fill this in later, depending on whether or not this
+ // subprogram turns out to have inlined instances or not.
+ if (SP.isDefinition())
+ return &SPDie;
- // Add function template parameters.
- addTemplateParams(*SPDie, SP.getTemplateParams());
-
- // If this DIE is going to refer declaration info using AT_specification
- // then there is no need to add other attributes.
- if (DeclDie) {
- // Refer function declaration directly.
- addDIEEntry(SPDie, dwarf::DW_AT_specification, DeclDie);
+ applySubprogramAttributes(SP, SPDie);
+ return &SPDie;
+}
- return SPDie;
+void DwarfUnit::applySubprogramAttributesToDefinition(DISubprogram SP, DIE &SPDie) {
+ DISubprogram SPDecl = SP.getFunctionDeclaration();
+ DIScope Context = resolve(SPDecl ? SPDecl.getContext() : SP.getContext());
+ applySubprogramAttributes(SP, SPDie);
+ addGlobalName(SP.getName(), SPDie, Context);
+}
+
+void DwarfUnit::applySubprogramAttributes(DISubprogram SP, DIE &SPDie) {
+ DIE *DeclDie = nullptr;
+ StringRef DeclLinkageName;
+ if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
+ DeclDie = getDIE(SPDecl);
+ assert(DeclDie && "This DIE should've already been constructed when the "
+ "definition DIE was created in "
+ "getOrCreateSubprogramDIE");
+ DeclLinkageName = SPDecl.getLinkageName();
}
- // Add the linkage name if we have one.
+ // Add function template parameters.
+ addTemplateParams(SPDie, SP.getTemplateParams());
+
+ // Add the linkage name if we have one and it isn't in the Decl.
StringRef LinkageName = SP.getLinkageName();
- if (!LinkageName.empty())
+ assert(((LinkageName.empty() || DeclLinkageName.empty()) ||
+ LinkageName == DeclLinkageName) &&
+ "decl has a linkage name and it is different");
+ if (!LinkageName.empty() && DeclLinkageName.empty())
addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
GlobalValue::getRealLinkageName(LinkageName));
+ if (DeclDie) {
+ // Refer to the function declaration where all the other attributes will be
+ // found.
+ addDIEEntry(SPDie, dwarf::DW_AT_specification, *DeclDie);
+ return;
+ }
+
// Constructors and operators for anonymous aggregates do not have names.
if (!SP.getName().empty())
addString(SPDie, dwarf::DW_AT_name, SP.getName());
@@ -1471,12 +1465,12 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
unsigned VK = SP.getVirtuality();
if (VK) {
addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1, VK);
- DIEBlock *Block = getDIEBlock();
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(Block, dwarf::DW_FORM_udata, SP.getVirtualIndex());
+ DIELoc *Block = getDIELoc();
+ addUInt(*Block, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
+ addUInt(*Block, dwarf::DW_FORM_udata, SP.getVirtualIndex());
addBlock(SPDie, dwarf::DW_AT_vtable_elem_location, Block);
ContainingTypeMap.insert(
- std::make_pair(SPDie, resolve(SP.getContainingType())));
+ std::make_pair(&SPDie, resolve(SP.getContainingType())));
}
if (!SP.isDefinition()) {
@@ -1484,7 +1478,7 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
// Add arguments. Do not add arguments for subprogram definition. They will
// be handled while processing variables.
- constructSubprogramArguments(*SPDie, Args);
+ constructSubprogramArguments(SPDie, Args);
}
if (SP.isArtificial())
@@ -1500,7 +1494,35 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
addUInt(SPDie, dwarf::DW_AT_APPLE_isa, dwarf::DW_FORM_flag, isa);
}
- return SPDie;
+ if (SP.isLValueReference())
+ addFlag(SPDie, dwarf::DW_AT_reference);
+
+ if (SP.isRValueReference())
+ addFlag(SPDie, dwarf::DW_AT_rvalue_reference);
+
+ if (SP.isProtected())
+ addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
+ dwarf::DW_ACCESS_protected);
+ else if (SP.isPrivate())
+ addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
+ dwarf::DW_ACCESS_private);
+ else
+ addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
+ dwarf::DW_ACCESS_public);
+
+ if (SP.isExplicit())
+ addFlag(SPDie, dwarf::DW_AT_explicit);
+}
+
+void DwarfUnit::applyVariableAttributes(const DbgVariable &Var,
+ DIE &VariableDie) {
+ StringRef Name = Var.getName();
+ if (!Name.empty())
+ addString(VariableDie, dwarf::DW_AT_name, Name);
+ addSourceLine(VariableDie, Var.getVariable());
+ addType(VariableDie, Var.getType());
+ if (Var.isArtificial())
+ addFlag(VariableDie, dwarf::DW_AT_artificial);
}
// Return const expression if value is a GEP to access merged global
@@ -1510,42 +1532,40 @@ static const ConstantExpr *getMergedGlobalExpr(const Value *V) {
const ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(V);
if (!CE || CE->getNumOperands() != 3 ||
CE->getOpcode() != Instruction::GetElementPtr)
- return NULL;
+ return nullptr;
// First operand points to a global struct.
Value *Ptr = CE->getOperand(0);
if (!isa<GlobalValue>(Ptr) ||
!isa<StructType>(cast<PointerType>(Ptr->getType())->getElementType()))
- return NULL;
+ return nullptr;
// Second operand is zero.
const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CE->getOperand(1));
if (!CI || !CI->isZero())
- return NULL;
+ return nullptr;
// Third operand is offset.
if (!isa<ConstantInt>(CE->getOperand(2)))
- return NULL;
+ return nullptr;
return CE;
}
/// createGlobalVariableDIE - create global variable DIE.
-void CompileUnit::createGlobalVariableDIE(DIGlobalVariable GV) {
-
+void DwarfCompileUnit::createGlobalVariableDIE(DIGlobalVariable GV) {
// Check for pre-existence.
if (getDIE(GV))
return;
- if (!GV.isGlobalVariable())
- return;
+ assert(GV.isGlobalVariable());
DIScope GVContext = GV.getContext();
- DIType GTy = GV.getType();
+ DIType GTy = DD->resolve(GV.getType());
// If this is a static data member definition, some attributes belong
// to the declaration DIE.
- DIE *VariableDIE = NULL;
+ DIE *VariableDIE = nullptr;
bool IsStaticMember = false;
DIDerivedType SDMDecl = GV.getStaticDataMemberDeclaration();
if (SDMDecl.Verify()) {
@@ -1563,64 +1583,66 @@ void CompileUnit::createGlobalVariableDIE(DIGlobalVariable GV) {
DIE *ContextDIE = getOrCreateContextDIE(GVContext);
// Add to map.
- VariableDIE = createAndAddDIE(GV.getTag(), *ContextDIE, GV);
+ VariableDIE = &createAndAddDIE(GV.getTag(), *ContextDIE, GV);
// Add name and type.
- addString(VariableDIE, dwarf::DW_AT_name, GV.getDisplayName());
- addType(VariableDIE, GTy);
+ addString(*VariableDIE, dwarf::DW_AT_name, GV.getDisplayName());
+ addType(*VariableDIE, GTy);
// Add scoping info.
if (!GV.isLocalToUnit())
- addFlag(VariableDIE, dwarf::DW_AT_external);
+ addFlag(*VariableDIE, dwarf::DW_AT_external);
// Add line number info.
- addSourceLine(VariableDIE, GV);
+ addSourceLine(*VariableDIE, GV);
}
// Add location.
bool addToAccelTable = false;
- DIE *VariableSpecDIE = NULL;
- bool isGlobalVariable = GV.getGlobal() != NULL;
+ DIE *VariableSpecDIE = nullptr;
+ bool isGlobalVariable = GV.getGlobal() != nullptr;
if (isGlobalVariable) {
addToAccelTable = true;
- DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc();
const MCSymbol *Sym = Asm->getSymbol(GV.getGlobal());
if (GV.getGlobal()->isThreadLocal()) {
// FIXME: Make this work with -gsplit-dwarf.
unsigned PointerSize = Asm->getDataLayout().getPointerSize();
assert((PointerSize == 4 || PointerSize == 8) &&
"Add support for other sizes if necessary");
- const MCExpr *Expr =
- Asm->getObjFileLowering().getDebugThreadLocalSymbol(Sym);
// Based on GCC's support for TLS:
if (!DD->useSplitDwarf()) {
// 1) Start with a constNu of the appropriate pointer size
- addUInt(Block, dwarf::DW_FORM_data1,
+ addUInt(*Loc, dwarf::DW_FORM_data1,
PointerSize == 4 ? dwarf::DW_OP_const4u : dwarf::DW_OP_const8u);
// 2) containing the (relocated) offset of the TLS variable
// within the module's TLS block.
- addExpr(Block, dwarf::DW_FORM_udata, Expr);
+ addExpr(*Loc, dwarf::DW_FORM_udata,
+ Asm->getObjFileLowering().getDebugThreadLocalSymbol(Sym));
} else {
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_const_index);
- addUInt(Block, dwarf::DW_FORM_udata, DU->getAddrPoolIndex(Expr));
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_const_index);
+ addUInt(*Loc, dwarf::DW_FORM_udata,
+ DD->getAddressPool().getIndex(Sym, /* TLS */ true));
}
// 3) followed by a custom OP to make the debugger do a TLS lookup.
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_push_tls_address);
- } else
- addOpAddress(Block, Sym);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_GNU_push_tls_address);
+ } else {
+ DD->addArangeLabel(SymbolCU(this, Sym));
+ addOpAddress(*Loc, Sym);
+ }
// Do not create specification DIE if context is either compile unit
// or a subprogram.
if (GVContext && GV.isDefinition() && !GVContext.isCompileUnit() &&
!GVContext.isFile() && !DD->isSubprogramContext(GVContext)) {
// Create specification DIE.
- VariableSpecDIE = createAndAddDIE(dwarf::DW_TAG_variable, *CUDie);
- addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification, VariableDIE);
- addBlock(VariableSpecDIE, dwarf::DW_AT_location, Block);
+ VariableSpecDIE = &createAndAddDIE(dwarf::DW_TAG_variable, UnitDie);
+ addDIEEntry(*VariableSpecDIE, dwarf::DW_AT_specification, *VariableDIE);
+ addBlock(*VariableSpecDIE, dwarf::DW_AT_location, Loc);
// A static member's declaration is already flagged as such.
if (!SDMDecl.Verify())
- addFlag(VariableDIE, dwarf::DW_AT_declaration);
+ addFlag(*VariableDIE, dwarf::DW_AT_declaration);
} else {
- addBlock(VariableDIE, dwarf::DW_AT_location, Block);
+ addBlock(*VariableDIE, dwarf::DW_AT_location, Loc);
}
// Add the linkage name.
StringRef LinkageName = GV.getLinkageName();
@@ -1628,9 +1650,10 @@ void CompileUnit::createGlobalVariableDIE(DIGlobalVariable GV) {
// From DWARF4: DIEs to which DW_AT_linkage_name may apply include:
// TAG_common_block, TAG_constant, TAG_entry_point, TAG_subprogram and
// TAG_variable.
- addString(IsStaticMember && VariableSpecDIE ? VariableSpecDIE
- : VariableDIE,
- dwarf::DW_AT_MIPS_linkage_name,
+ addString(IsStaticMember && VariableSpecDIE ? *VariableSpecDIE
+ : *VariableDIE,
+ DD->getDwarfVersion() >= 4 ? dwarf::DW_AT_linkage_name
+ : dwarf::DW_AT_MIPS_linkage_name,
GlobalValue::getRealLinkageName(LinkageName));
} else if (const ConstantInt *CI =
dyn_cast_or_null<ConstantInt>(GV.getConstant())) {
@@ -1638,41 +1661,41 @@ void CompileUnit::createGlobalVariableDIE(DIGlobalVariable GV) {
// emitting AT_const_value multiple times, we only add AT_const_value when
// it is not a static member.
if (!IsStaticMember)
- addConstantValue(VariableDIE, CI, isUnsignedDIType(DD, GTy));
+ addConstantValue(*VariableDIE, CI, GTy);
} else if (const ConstantExpr *CE = getMergedGlobalExpr(GV->getOperand(11))) {
addToAccelTable = true;
// GV is a merged global.
- DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+ DIELoc *Loc = new (DIEValueAllocator) DIELoc();
Value *Ptr = CE->getOperand(0);
- addOpAddress(Block, Asm->getSymbol(cast<GlobalValue>(Ptr)));
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
+ MCSymbol *Sym = Asm->getSymbol(cast<GlobalValue>(Ptr));
+ DD->addArangeLabel(SymbolCU(this, Sym));
+ addOpAddress(*Loc, Sym);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
SmallVector<Value *, 3> Idx(CE->op_begin() + 1, CE->op_end());
- addUInt(Block, dwarf::DW_FORM_udata,
+ addUInt(*Loc, dwarf::DW_FORM_udata,
Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx));
- addUInt(Block, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
- addBlock(VariableDIE, dwarf::DW_AT_location, Block);
+ addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
+ addBlock(*VariableDIE, dwarf::DW_AT_location, Loc);
}
if (addToAccelTable) {
- DIE *AddrDIE = VariableSpecDIE ? VariableSpecDIE : VariableDIE;
- addAccelName(GV.getName(), AddrDIE);
+ DIE &AddrDIE = VariableSpecDIE ? *VariableSpecDIE : *VariableDIE;
+ DD->addAccelName(GV.getName(), AddrDIE);
// If the linkage name is different than the name, go ahead and output
// that as well into the name table.
if (GV.getLinkageName() != "" && GV.getName() != GV.getLinkageName())
- addAccelName(GV.getLinkageName(), AddrDIE);
+ DD->addAccelName(GV.getLinkageName(), AddrDIE);
}
- if (!GV.isLocalToUnit())
- addGlobalName(GV.getName(), VariableSpecDIE ? VariableSpecDIE : VariableDIE,
- GV.getContext());
+ addGlobalName(GV.getName(), VariableSpecDIE ? *VariableSpecDIE : *VariableDIE,
+ GV.getContext());
}
/// constructSubrangeDIE - Construct subrange DIE from DISubrange.
-void CompileUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR,
- DIE *IndexTy) {
- DIE *DW_Subrange = createAndAddDIE(dwarf::DW_TAG_subrange_type, Buffer);
- addDIEEntry(DW_Subrange, dwarf::DW_AT_type, IndexTy);
+void DwarfUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy) {
+ DIE &DW_Subrange = createAndAddDIE(dwarf::DW_TAG_subrange_type, Buffer);
+ addDIEEntry(DW_Subrange, dwarf::DW_AT_type, *IndexTy);
// The LowerBound value defines the lower bounds which is typically zero for
// C/C++. The Count value is the number of elements. Values are 64 bit. If
@@ -1695,24 +1718,24 @@ void CompileUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR,
}
/// constructArrayTypeDIE - Construct array type DIE from DICompositeType.
-void CompileUnit::constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy) {
+void DwarfUnit::constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy) {
if (CTy.isVector())
- addFlag(&Buffer, dwarf::DW_AT_GNU_vector);
+ addFlag(Buffer, dwarf::DW_AT_GNU_vector);
// Emit the element type.
- addType(&Buffer, resolve(CTy.getTypeDerivedFrom()));
+ addType(Buffer, resolve(CTy.getTypeDerivedFrom()));
// Get an anonymous type for index type.
// FIXME: This type should be passed down from the front end
// as different languages may have different sizes for indexes.
DIE *IdxTy = getIndexTyDie();
if (!IdxTy) {
- // Construct an anonymous type for index type.
- IdxTy = createAndAddDIE(dwarf::DW_TAG_base_type, *CUDie.get());
- addString(IdxTy, dwarf::DW_AT_name, "int");
- addUInt(IdxTy, dwarf::DW_AT_byte_size, None, sizeof(int32_t));
- addUInt(IdxTy, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
- dwarf::DW_ATE_signed);
+ // Construct an integer type to use for indexes.
+ IdxTy = &createAndAddDIE(dwarf::DW_TAG_base_type, UnitDie);
+ addString(*IdxTy, dwarf::DW_AT_name, "sizetype");
+ addUInt(*IdxTy, dwarf::DW_AT_byte_size, None, sizeof(int64_t));
+ addUInt(*IdxTy, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
+ dwarf::DW_ATE_unsigned);
setIndexTyDie(IdxTy);
}
@@ -1726,66 +1749,60 @@ void CompileUnit::constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy) {
}
/// constructEnumTypeDIE - Construct an enum type DIE from DICompositeType.
-void CompileUnit::constructEnumTypeDIE(DIE &Buffer, DICompositeType CTy) {
+void DwarfUnit::constructEnumTypeDIE(DIE &Buffer, DICompositeType CTy) {
DIArray Elements = CTy.getTypeArray();
// Add enumerators to enumeration type.
for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
DIEnumerator Enum(Elements.getElement(i));
if (Enum.isEnumerator()) {
- DIE *Enumerator = createAndAddDIE(dwarf::DW_TAG_enumerator, Buffer);
+ DIE &Enumerator = createAndAddDIE(dwarf::DW_TAG_enumerator, Buffer);
StringRef Name = Enum.getName();
addString(Enumerator, dwarf::DW_AT_name, Name);
int64_t Value = Enum.getEnumValue();
- addSInt(Enumerator, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata, Value);
+ addSInt(Enumerator, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata,
+ Value);
}
}
DIType DTy = resolve(CTy.getTypeDerivedFrom());
if (DTy) {
- addType(&Buffer, DTy);
- addFlag(&Buffer, dwarf::DW_AT_enum_class);
+ addType(Buffer, DTy);
+ addFlag(Buffer, dwarf::DW_AT_enum_class);
}
}
/// constructContainingTypeDIEs - Construct DIEs for types that contain
/// vtables.
-void CompileUnit::constructContainingTypeDIEs() {
+void DwarfUnit::constructContainingTypeDIEs() {
for (DenseMap<DIE *, const MDNode *>::iterator CI = ContainingTypeMap.begin(),
CE = ContainingTypeMap.end();
CI != CE; ++CI) {
- DIE *SPDie = CI->first;
+ DIE &SPDie = *CI->first;
DIDescriptor D(CI->second);
if (!D)
continue;
DIE *NDie = getDIE(D);
if (!NDie)
continue;
- addDIEEntry(SPDie, dwarf::DW_AT_containing_type, NDie);
+ addDIEEntry(SPDie, dwarf::DW_AT_containing_type, *NDie);
}
}
/// constructVariableDIE - Construct a DIE for the given DbgVariable.
-DIE *CompileUnit::constructVariableDIE(DbgVariable &DV, bool isScopeAbstract) {
- StringRef Name = DV.getName();
+std::unique_ptr<DIE> DwarfUnit::constructVariableDIE(DbgVariable &DV,
+ bool Abstract) {
+ auto D = constructVariableDIEImpl(DV, Abstract);
+ DV.setDIE(*D);
+ return D;
+}
+std::unique_ptr<DIE> DwarfUnit::constructVariableDIEImpl(const DbgVariable &DV,
+ bool Abstract) {
// Define variable debug information entry.
- DIE *VariableDie = new DIE(DV.getTag());
- DbgVariable *AbsVar = DV.getAbstractVariable();
- DIE *AbsDIE = AbsVar ? AbsVar->getDIE() : NULL;
- if (AbsDIE)
- addDIEEntry(VariableDie, dwarf::DW_AT_abstract_origin, AbsDIE);
- else {
- if (!Name.empty())
- addString(VariableDie, dwarf::DW_AT_name, Name);
- addSourceLine(VariableDie, DV.getVariable());
- addType(VariableDie, DV.getType());
- }
-
- if (DV.isArtificial())
- addFlag(VariableDie, dwarf::DW_AT_artificial);
+ auto VariableDie = make_unique<DIE>(DV.getTag());
- if (isScopeAbstract) {
- DV.setDIE(VariableDie);
+ if (Abstract) {
+ applyVariableAttributes(DV, *VariableDie);
return VariableDie;
}
@@ -1793,9 +1810,7 @@ DIE *CompileUnit::constructVariableDIE(DbgVariable &DV, bool isScopeAbstract) {
unsigned Offset = DV.getDotDebugLocOffset();
if (Offset != ~0U) {
- addSectionLabel(VariableDie, dwarf::DW_AT_location,
- Asm->GetTempSymbol("debug_loc", Offset));
- DV.setDIE(VariableDie);
+ addLocationList(*VariableDie, dwarf::DW_AT_location, Offset);
return VariableDie;
}
@@ -1808,38 +1823,36 @@ DIE *CompileUnit::constructVariableDIE(DbgVariable &DV, bool isScopeAbstract) {
if (DVInsn->getOperand(1).isImm()) {
MachineLocation Location(RegOp.getReg(),
DVInsn->getOperand(1).getImm());
- addVariableAddress(DV, VariableDie, Location);
+ addVariableAddress(DV, *VariableDie, Location);
} else if (RegOp.getReg())
- addVariableAddress(DV, VariableDie, MachineLocation(RegOp.getReg()));
+ addVariableAddress(DV, *VariableDie, MachineLocation(RegOp.getReg()));
} else if (DVInsn->getOperand(0).isImm())
- addConstantValue(VariableDie, DVInsn->getOperand(0), DV.getType());
+ addConstantValue(*VariableDie, DVInsn->getOperand(0), DV.getType());
else if (DVInsn->getOperand(0).isFPImm())
- addConstantFPValue(VariableDie, DVInsn->getOperand(0));
+ addConstantFPValue(*VariableDie, DVInsn->getOperand(0));
else if (DVInsn->getOperand(0).isCImm())
- addConstantValue(VariableDie, DVInsn->getOperand(0).getCImm(),
- isUnsignedDIType(DD, DV.getType()));
+ addConstantValue(*VariableDie, DVInsn->getOperand(0).getCImm(),
+ DV.getType());
- DV.setDIE(VariableDie);
return VariableDie;
- } else {
- // .. else use frame index.
- int FI = DV.getFrameIndex();
- if (FI != ~0) {
- unsigned FrameReg = 0;
- const TargetFrameLowering *TFI = Asm->TM.getFrameLowering();
- int Offset = TFI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
- MachineLocation Location(FrameReg, Offset);
- addVariableAddress(DV, VariableDie, Location);
- }
}
- DV.setDIE(VariableDie);
+ // .. else use frame index.
+ int FI = DV.getFrameIndex();
+ if (FI != ~0) {
+ unsigned FrameReg = 0;
+ const TargetFrameLowering *TFI = Asm->TM.getFrameLowering();
+ int Offset = TFI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
+ MachineLocation Location(FrameReg, Offset);
+ addVariableAddress(DV, *VariableDie, Location);
+ }
+
return VariableDie;
}
/// constructMemberDIE - Construct member DIE from DIDerivedType.
-void CompileUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
- DIE *MemberDie = createAndAddDIE(DT.getTag(), Buffer);
+void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
+ DIE &MemberDie = createAndAddDIE(DT.getTag(), Buffer);
StringRef Name = DT.getName();
if (!Name.empty())
addString(MemberDie, dwarf::DW_AT_name, Name);
@@ -1854,14 +1867,14 @@ void CompileUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
// expression to extract appropriate offset from vtable.
// BaseAddr = ObAddr + *((*ObAddr) - Offset)
- DIEBlock *VBaseLocationDie = new (DIEValueAllocator) DIEBlock();
- addUInt(VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_dup);
- addUInt(VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
- addUInt(VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(VBaseLocationDie, dwarf::DW_FORM_udata, DT.getOffsetInBits());
- addUInt(VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_minus);
- addUInt(VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
- addUInt(VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
+ DIELoc *VBaseLocationDie = new (DIEValueAllocator) DIELoc();
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_dup);
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_udata, DT.getOffsetInBits());
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_minus);
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
addBlock(MemberDie, dwarf::DW_AT_data_member_location, VBaseLocationDie);
} else {
@@ -1870,10 +1883,9 @@ void CompileUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
uint64_t OffsetInBytes;
if (Size != FieldSize) {
- // Handle bitfield.
- addUInt(MemberDie, dwarf::DW_AT_byte_size, None,
- getBaseTypeSize(DD, DT) >> 3);
- addUInt(MemberDie, dwarf::DW_AT_bit_size, None, DT.getSizeInBits());
+ // Handle bitfield, assume bytes are 8 bits.
+ addUInt(MemberDie, dwarf::DW_AT_byte_size, None, FieldSize/8);
+ addUInt(MemberDie, dwarf::DW_AT_bit_size, None, Size);
uint64_t Offset = DT.getOffsetInBits();
uint64_t AlignMask = ~(DT.getAlignInBits() - 1);
@@ -1886,7 +1898,7 @@ void CompileUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
Offset = FieldSize - (Offset + Size);
addUInt(MemberDie, dwarf::DW_AT_bit_offset, None, Offset);
- // Here WD_AT_data_member_location points to the anonymous
+ // Here DW_AT_data_member_location points to the anonymous
// field that includes this bit field.
OffsetInBytes = FieldOffset >> 3;
} else
@@ -1894,9 +1906,9 @@ void CompileUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
OffsetInBytes = DT.getOffsetInBits() >> 3;
if (DD->getDwarfVersion() <= 2) {
- DIEBlock *MemLocationDie = new (DIEValueAllocator) DIEBlock();
- addUInt(MemLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(MemLocationDie, dwarf::DW_FORM_udata, OffsetInBytes);
+ DIELoc *MemLocationDie = new (DIEValueAllocator) DIELoc();
+ addUInt(*MemLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
+ addUInt(*MemLocationDie, dwarf::DW_FORM_udata, OffsetInBytes);
addBlock(MemberDie, dwarf::DW_AT_data_member_location, MemLocationDie);
} else
addUInt(MemberDie, dwarf::DW_AT_data_member_location, None,
@@ -1920,17 +1932,17 @@ void CompileUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
// Objective-C properties.
if (MDNode *PNode = DT.getObjCProperty())
if (DIEEntry *PropertyDie = getDIEEntry(PNode))
- MemberDie->addValue(dwarf::DW_AT_APPLE_property, dwarf::DW_FORM_ref4,
- PropertyDie);
+ MemberDie.addValue(dwarf::DW_AT_APPLE_property, dwarf::DW_FORM_ref4,
+ PropertyDie);
if (DT.isArtificial())
addFlag(MemberDie, dwarf::DW_AT_artificial);
}
/// getOrCreateStaticMemberDIE - Create new DIE for C++ static member.
-DIE *CompileUnit::getOrCreateStaticMemberDIE(DIDerivedType DT) {
+DIE *DwarfUnit::getOrCreateStaticMemberDIE(DIDerivedType DT) {
if (!DT.Verify())
- return NULL;
+ return nullptr;
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE.
@@ -1938,11 +1950,10 @@ DIE *CompileUnit::getOrCreateStaticMemberDIE(DIDerivedType DT) {
assert(dwarf::isType(ContextDIE->getTag()) &&
"Static member should belong to a type.");
- DIE *StaticMemberDIE = getDIE(DT);
- if (StaticMemberDIE)
+ if (DIE *StaticMemberDIE = getDIE(DT))
return StaticMemberDIE;
- StaticMemberDIE = createAndAddDIE(DT.getTag(), *ContextDIE, DT);
+ DIE &StaticMemberDIE = createAndAddDIE(DT.getTag(), *ContextDIE, DT);
DIType Ty = resolve(DT.getTypeDerivedFrom());
@@ -1965,20 +1976,95 @@ DIE *CompileUnit::getOrCreateStaticMemberDIE(DIDerivedType DT) {
dwarf::DW_ACCESS_public);
if (const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(DT.getConstant()))
- addConstantValue(StaticMemberDIE, CI, isUnsignedDIType(DD, Ty));
+ addConstantValue(StaticMemberDIE, CI, Ty);
if (const ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(DT.getConstant()))
addConstantFPValue(StaticMemberDIE, CFP);
- return StaticMemberDIE;
+ return &StaticMemberDIE;
}
-void CompileUnit::emitHeader(const MCSection *ASection,
- const MCSymbol *ASectionSym) {
+void DwarfUnit::emitHeader(const MCSymbol *ASectionSym) const {
Asm->OutStreamer.AddComment("DWARF version number");
Asm->EmitInt16(DD->getDwarfVersion());
Asm->OutStreamer.AddComment("Offset Into Abbrev. Section");
- Asm->EmitSectionOffset(Asm->GetTempSymbol(ASection->getLabelBeginName()),
- ASectionSym);
+ // We share one abbreviations table across all units so it's always at the
+ // start of the section. Use a relocatable offset where needed to ensure
+ // linking doesn't invalidate that offset.
+ if (ASectionSym)
+ Asm->EmitSectionOffset(ASectionSym, ASectionSym);
+ else
+ // Use a constant value when no symbol is provided.
+ Asm->EmitInt32(0);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
}
+
+void DwarfUnit::addRange(RangeSpan Range) {
+ // Only add a range for this unit if we're emitting full debug.
+ if (getCUNode().getEmissionKind() == DIBuilder::FullDebug) {
+ // If we have no current ranges just add the range and return, otherwise,
+ // check the current section and CU against the previous section and CU we
+ // emitted into and the subprogram was contained within. If these are the
+ // same then extend our current range, otherwise add this as a new range.
+ if (CURanges.size() == 0 ||
+ this != DD->getPrevCU() ||
+ Asm->getCurrentSection() != DD->getPrevSection()) {
+ CURanges.push_back(Range);
+ return;
+ }
+
+ assert(&(CURanges.back().getEnd()->getSection()) ==
+ &(Range.getEnd()->getSection()) &&
+ "We can only append to a range in the same section!");
+ CURanges.back().setEnd(Range.getEnd());
+ }
+}
+
+void DwarfCompileUnit::initStmtList(MCSymbol *DwarfLineSectionSym) {
+ // Define start line table label for each Compile Unit.
+ MCSymbol *LineTableStartSym =
+ Asm->OutStreamer.getDwarfLineTableSymbol(getUniqueID());
+
+ stmtListIndex = UnitDie.getValues().size();
+
+ // DW_AT_stmt_list is a offset of line number information for this
+ // compile unit in debug_line section. For split dwarf this is
+ // left in the skeleton CU and so not included.
+ // The line table entries are not always emitted in assembly, so it
+ // is not okay to use line_table_start here.
+ if (Asm->MAI->doesDwarfUseRelocationsAcrossSections())
+ addSectionLabel(UnitDie, dwarf::DW_AT_stmt_list, LineTableStartSym);
+ else
+ addSectionDelta(UnitDie, dwarf::DW_AT_stmt_list, LineTableStartSym,
+ DwarfLineSectionSym);
+}
+
+void DwarfCompileUnit::applyStmtList(DIE &D) {
+ D.addValue(dwarf::DW_AT_stmt_list,
+ UnitDie.getAbbrev().getData()[stmtListIndex].getForm(),
+ UnitDie.getValues()[stmtListIndex]);
+}
+
+void DwarfTypeUnit::emitHeader(const MCSymbol *ASectionSym) const {
+ DwarfUnit::emitHeader(ASectionSym);
+ Asm->OutStreamer.AddComment("Type Signature");
+ Asm->OutStreamer.EmitIntValue(TypeSignature, sizeof(TypeSignature));
+ Asm->OutStreamer.AddComment("Type DIE Offset");
+ // In a skeleton type unit there is no type DIE so emit a zero offset.
+ Asm->OutStreamer.EmitIntValue(Ty ? Ty->getOffset() : 0,
+ sizeof(Ty->getOffset()));
+}
+
+void DwarfTypeUnit::initSection(const MCSection *Section) {
+ assert(!this->Section);
+ this->Section = Section;
+ // Since each type unit is contained in its own COMDAT section, the begin
+ // label and the section label are the same. Using the begin label emission in
+ // DwarfDebug to emit the section label as well is slightly subtle/sneaky, but
+ // the only other alternative of lazily constructing start-of-section labels
+ // and storing a mapping in DwarfDebug (or AsmPrinter).
+ this->SectionSym = this->LabelBegin =
+ Asm->GetTempSymbol(Section->getLabelBeginName(), getUniqueID());
+ this->LabelEnd =
+ Asm->GetTempSymbol(Section->getLabelEndName(), getUniqueID());
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
new file mode 100644
index 0000000..b7b83b2
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -0,0 +1,588 @@
+//===-- llvm/CodeGen/DwarfUnit.h - Dwarf Compile Unit ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing dwarf compile unit.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
+#define CODEGEN_ASMPRINTER_DWARFCOMPILEUNIT_H
+
+#include "DIE.h"
+#include "DwarfDebug.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCDwarf.h"
+
+namespace llvm {
+
+class MachineLocation;
+class MachineOperand;
+class ConstantInt;
+class ConstantFP;
+class DbgVariable;
+class DwarfCompileUnit;
+
+// Data structure to hold a range for range lists.
+class RangeSpan {
+public:
+ RangeSpan(MCSymbol *S, MCSymbol *E) : Start(S), End(E) {}
+ const MCSymbol *getStart() const { return Start; }
+ const MCSymbol *getEnd() const { return End; }
+ void setEnd(const MCSymbol *E) { End = E; }
+
+private:
+ const MCSymbol *Start, *End;
+};
+
+class RangeSpanList {
+private:
+ // Index for locating within the debug_range section this particular span.
+ MCSymbol *RangeSym;
+ // List of ranges.
+ SmallVector<RangeSpan, 2> Ranges;
+
+public:
+ RangeSpanList(MCSymbol *Sym) : RangeSym(Sym) {}
+ MCSymbol *getSym() const { return RangeSym; }
+ const SmallVectorImpl<RangeSpan> &getRanges() const { return Ranges; }
+ void addRange(RangeSpan Range) { Ranges.push_back(Range); }
+};
+
+//===----------------------------------------------------------------------===//
+/// Unit - This dwarf writer support class manages information associated
+/// with a source file.
+class DwarfUnit {
+protected:
+ /// UniqueID - a numeric ID unique among all CUs in the module
+ unsigned UniqueID;
+
+ /// Node - MDNode for the compile unit.
+ DICompileUnit CUNode;
+
+ /// Unit debug information entry.
+ DIE UnitDie;
+
+ /// Offset of the UnitDie from beginning of debug info section.
+ unsigned DebugInfoOffset;
+
+ /// Asm - Target of Dwarf emission.
+ AsmPrinter *Asm;
+
+ // Holders for some common dwarf information.
+ DwarfDebug *DD;
+ DwarfFile *DU;
+
+ /// IndexTyDie - An anonymous type for index type. Owned by UnitDie.
+ DIE *IndexTyDie;
+
+ /// MDNodeToDieMap - Tracks the mapping of unit level debug information
+ /// variables to debug information entries.
+ DenseMap<const MDNode *, DIE *> MDNodeToDieMap;
+
+ /// MDNodeToDIEEntryMap - Tracks the mapping of unit level debug information
+ /// descriptors to debug information entries using a DIEEntry proxy.
+ DenseMap<const MDNode *, DIEEntry *> MDNodeToDIEEntryMap;
+
+ /// GlobalNames - A map of globally visible named entities for this unit.
+ StringMap<const DIE *> GlobalNames;
+
+ /// GlobalTypes - A map of globally visible types for this unit.
+ StringMap<const DIE *> GlobalTypes;
+
+ /// DIEBlocks - A list of all the DIEBlocks in use.
+ std::vector<DIEBlock *> DIEBlocks;
+
+ /// DIELocs - A list of all the DIELocs in use.
+ std::vector<DIELoc *> DIELocs;
+
+ /// ContainingTypeMap - This map is used to keep track of subprogram DIEs that
+ /// need DW_AT_containing_type attribute. This attribute points to a DIE that
+ /// corresponds to the MDNode mapped with the subprogram DIE.
+ DenseMap<DIE *, const MDNode *> ContainingTypeMap;
+
+ // List of ranges for a given compile unit.
+ SmallVector<RangeSpan, 1> CURanges;
+
+ // List of range lists for a given compile unit, separate from the ranges for
+ // the CU itself.
+ SmallVector<RangeSpanList, 1> CURangeLists;
+
+ // DIEValueAllocator - All DIEValues are allocated through this allocator.
+ BumpPtrAllocator DIEValueAllocator;
+
+ // DIEIntegerOne - A preallocated DIEValue because 1 is used frequently.
+ DIEInteger *DIEIntegerOne;
+
+ /// The section this unit will be emitted in.
+ const MCSection *Section;
+
+ /// A label at the start of the non-dwo section related to this unit.
+ MCSymbol *SectionSym;
+
+ /// The start of the unit within its section.
+ MCSymbol *LabelBegin;
+
+ /// The end of the unit within its section.
+ MCSymbol *LabelEnd;
+
+ /// Skeleton unit associated with this unit.
+ DwarfUnit *Skeleton;
+
+ DwarfUnit(unsigned UID, dwarf::Tag, DICompileUnit CU, AsmPrinter *A,
+ DwarfDebug *DW, DwarfFile *DWU);
+
+public:
+ virtual ~DwarfUnit();
+
+ /// Set the skeleton unit associated with this unit.
+ void setSkeleton(DwarfUnit &Skel) { Skeleton = &Skel; }
+
+ /// Get the skeleton unit associated with this unit.
+ DwarfUnit *getSkeleton() const { return Skeleton; }
+
+ /// Pass in the SectionSym even though we could recreate it in every compile
+ /// unit (type units will have actually distinct symbols once they're in
+ /// comdat sections).
+ void initSection(const MCSection *Section, MCSymbol *SectionSym) {
+ assert(!this->Section);
+ this->Section = Section;
+ this->SectionSym = SectionSym;
+ this->LabelBegin =
+ Asm->GetTempSymbol(Section->getLabelBeginName(), getUniqueID());
+ this->LabelEnd =
+ Asm->GetTempSymbol(Section->getLabelEndName(), getUniqueID());
+ }
+
+ const MCSection *getSection() const {
+ assert(Section);
+ return Section;
+ }
+
+ /// If there's a skeleton then return the section symbol for the skeleton
+ /// unit, otherwise return the section symbol for this unit.
+ MCSymbol *getLocalSectionSym() const {
+ if (Skeleton)
+ return Skeleton->getSectionSym();
+ return getSectionSym();
+ }
+
+ MCSymbol *getSectionSym() const {
+ assert(Section);
+ return SectionSym;
+ }
+
+ /// If there's a skeleton then return the begin label for the skeleton unit,
+ /// otherwise return the local label for this unit.
+ MCSymbol *getLocalLabelBegin() const {
+ if (Skeleton)
+ return Skeleton->getLabelBegin();
+ return getLabelBegin();
+ }
+
+ MCSymbol *getLabelBegin() const {
+ assert(Section);
+ return LabelBegin;
+ }
+
+ MCSymbol *getLabelEnd() const {
+ assert(Section);
+ return LabelEnd;
+ }
+
+ // Accessors.
+ unsigned getUniqueID() const { return UniqueID; }
+ uint16_t getLanguage() const { return CUNode.getLanguage(); }
+ DICompileUnit getCUNode() const { return CUNode; }
+ DIE &getUnitDie() { return UnitDie; }
+ const StringMap<const DIE *> &getGlobalNames() const { return GlobalNames; }
+ const StringMap<const DIE *> &getGlobalTypes() const { return GlobalTypes; }
+
+ unsigned getDebugInfoOffset() const { return DebugInfoOffset; }
+ void setDebugInfoOffset(unsigned DbgInfoOff) { DebugInfoOffset = DbgInfoOff; }
+
+ /// hasContent - Return true if this compile unit has something to write out.
+ bool hasContent() const { return !UnitDie.getChildren().empty(); }
+
+ /// addRange - Add an address range to the list of ranges for this unit.
+ void addRange(RangeSpan Range);
+
+ /// getRanges - Get the list of ranges for this unit.
+ const SmallVectorImpl<RangeSpan> &getRanges() const { return CURanges; }
+ SmallVectorImpl<RangeSpan> &getRanges() { return CURanges; }
+
+ /// addRangeList - Add an address range list to the list of range lists.
+ void addRangeList(RangeSpanList Ranges) { CURangeLists.push_back(Ranges); }
+
+ /// getRangeLists - Get the vector of range lists.
+ const SmallVectorImpl<RangeSpanList> &getRangeLists() const {
+ return CURangeLists;
+ }
+ SmallVectorImpl<RangeSpanList> &getRangeLists() { return CURangeLists; }
+
+ /// getParentContextString - Get a string containing the language specific
+ /// context for a global name.
+ std::string getParentContextString(DIScope Context) const;
+
+ /// addGlobalName - Add a new global entity to the compile unit.
+ ///
+ void addGlobalName(StringRef Name, DIE &Die, DIScope Context);
+
+ /// addAccelNamespace - Add a new name to the namespace accelerator table.
+ void addAccelNamespace(StringRef Name, const DIE &Die);
+
+ /// getDIE - Returns the debug information entry map slot for the
+ /// specified debug variable. We delegate the request to DwarfDebug
+ /// when the MDNode can be part of the type system, since DIEs for
+ /// the type system can be shared across CUs and the mappings are
+ /// kept in DwarfDebug.
+ DIE *getDIE(DIDescriptor D) const;
+
+ /// getDIELoc - Returns a fresh newly allocated DIELoc.
+ DIELoc *getDIELoc() { return new (DIEValueAllocator) DIELoc(); }
+
+ /// insertDIE - Insert DIE into the map. We delegate the request to DwarfDebug
+ /// when the MDNode can be part of the type system, since DIEs for
+ /// the type system can be shared across CUs and the mappings are
+ /// kept in DwarfDebug.
+ void insertDIE(DIDescriptor Desc, DIE *D);
+
+ /// addFlag - Add a flag that is true to the DIE.
+ void addFlag(DIE &Die, dwarf::Attribute Attribute);
+
+ /// addUInt - Add an unsigned integer attribute data and value.
+ void addUInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
+ uint64_t Integer);
+
+ void addUInt(DIE &Block, dwarf::Form Form, uint64_t Integer);
+
+ /// addSInt - Add an signed integer attribute data and value.
+ void addSInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
+ int64_t Integer);
+
+ void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer);
+
+ /// addString - Add a string attribute data and value.
+ void addString(DIE &Die, dwarf::Attribute Attribute, const StringRef Str);
+
+ /// addLocalString - Add a string attribute data and value.
+ void addLocalString(DIE &Die, dwarf::Attribute Attribute,
+ const StringRef Str);
+
+ /// addExpr - Add a Dwarf expression attribute data and value.
+ void addExpr(DIELoc &Die, dwarf::Form Form, const MCExpr *Expr);
+
+ /// addLabel - Add a Dwarf label attribute data and value.
+ void addLabel(DIE &Die, dwarf::Attribute Attribute, dwarf::Form Form,
+ const MCSymbol *Label);
+
+ void addLabel(DIELoc &Die, dwarf::Form Form, const MCSymbol *Label);
+
+ /// addLocationList - Add a Dwarf loclistptr attribute data and value.
+ void addLocationList(DIE &Die, dwarf::Attribute Attribute, unsigned Index);
+
+ /// addSectionLabel - Add a Dwarf section label attribute data and value.
+ ///
+ void addSectionLabel(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Label);
+
+ /// addSectionOffset - Add an offset into a section attribute data and value.
+ ///
+ void addSectionOffset(DIE &Die, dwarf::Attribute Attribute, uint64_t Integer);
+
+ /// addOpAddress - Add a dwarf op address data and value using the
+ /// form given and an op of either DW_FORM_addr or DW_FORM_GNU_addr_index.
+ void addOpAddress(DIELoc &Die, const MCSymbol *Label);
+
+ /// addSectionDelta - Add a label delta attribute data and value.
+ void addSectionDelta(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Hi,
+ const MCSymbol *Lo);
+
+ /// addLabelDelta - Add a label delta attribute data and value.
+ void addLabelDelta(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Hi,
+ const MCSymbol *Lo);
+
+ /// addDIEEntry - Add a DIE attribute data and value.
+ void addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIE &Entry);
+
+ /// addDIEEntry - Add a DIE attribute data and value.
+ void addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIEEntry *Entry);
+
+ void addDIETypeSignature(DIE &Die, const DwarfTypeUnit &Type);
+
+ /// addBlock - Add block data.
+ void addBlock(DIE &Die, dwarf::Attribute Attribute, DIELoc *Block);
+
+ /// addBlock - Add block data.
+ void addBlock(DIE &Die, dwarf::Attribute Attribute, DIEBlock *Block);
+
+ /// addSourceLine - Add location information to specified debug information
+ /// entry.
+ void addSourceLine(DIE &Die, unsigned Line, StringRef File,
+ StringRef Directory);
+ void addSourceLine(DIE &Die, DIVariable V);
+ void addSourceLine(DIE &Die, DIGlobalVariable G);
+ void addSourceLine(DIE &Die, DISubprogram SP);
+ void addSourceLine(DIE &Die, DIType Ty);
+ void addSourceLine(DIE &Die, DINameSpace NS);
+ void addSourceLine(DIE &Die, DIObjCProperty Ty);
+
+ /// addAddress - Add an address attribute to a die based on the location
+ /// provided.
+ void addAddress(DIE &Die, dwarf::Attribute Attribute,
+ const MachineLocation &Location, bool Indirect = false);
+
+ /// addConstantValue - Add constant value entry in variable DIE.
+ void addConstantValue(DIE &Die, const MachineOperand &MO, DIType Ty);
+ void addConstantValue(DIE &Die, const ConstantInt *CI, DIType Ty);
+ void addConstantValue(DIE &Die, const APInt &Val, DIType Ty);
+ void addConstantValue(DIE &Die, const APInt &Val, bool Unsigned);
+ void addConstantValue(DIE &Die, bool Unsigned, uint64_t Val);
+
+ /// addConstantFPValue - Add constant value entry in variable DIE.
+ void addConstantFPValue(DIE &Die, const MachineOperand &MO);
+ void addConstantFPValue(DIE &Die, const ConstantFP *CFP);
+
+ /// addTemplateParams - Add template parameters in buffer.
+ void addTemplateParams(DIE &Buffer, DIArray TParams);
+
+ /// addRegisterOp - Add register operand.
+ void addRegisterOp(DIELoc &TheDie, unsigned Reg);
+
+ /// addRegisterOffset - Add register offset.
+ void addRegisterOffset(DIELoc &TheDie, unsigned Reg, int64_t Offset);
+
+ /// addComplexAddress - Start with the address based on the location provided,
+ /// and generate the DWARF information necessary to find the actual variable
+ /// (navigating the extra location information encoded in the type) based on
+ /// the starting location. Add the DWARF information to the die.
+ void addComplexAddress(const DbgVariable &DV, DIE &Die,
+ dwarf::Attribute Attribute,
+ const MachineLocation &Location);
+
+ // FIXME: Should be reformulated in terms of addComplexAddress.
+ /// addBlockByrefAddress - Start with the address based on the location
+ /// provided, and generate the DWARF information necessary to find the
+ /// actual Block variable (navigating the Block struct) based on the
+ /// starting location. Add the DWARF information to the die. Obsolete,
+ /// please use addComplexAddress instead.
+ void addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
+ dwarf::Attribute Attribute,
+ const MachineLocation &Location);
+
+ /// addVariableAddress - Add DW_AT_location attribute for a
+ /// DbgVariable based on provided MachineLocation.
+ void addVariableAddress(const DbgVariable &DV, DIE &Die,
+ MachineLocation Location);
+
+ /// addType - Add a new type attribute to the specified entity. This takes
+ /// and attribute parameter because DW_AT_friend attributes are also
+ /// type references.
+ void addType(DIE &Entity, DIType Ty,
+ dwarf::Attribute Attribute = dwarf::DW_AT_type);
+
+ /// getOrCreateNameSpace - Create a DIE for DINameSpace.
+ DIE *getOrCreateNameSpace(DINameSpace NS);
+
+ /// getOrCreateSubprogramDIE - Create new DIE using SP.
+ DIE *getOrCreateSubprogramDIE(DISubprogram SP);
+
+ void applySubprogramAttributes(DISubprogram SP, DIE &SPDie);
+ void applySubprogramAttributesToDefinition(DISubprogram SP, DIE &SPDie);
+ void applyVariableAttributes(const DbgVariable &Var, DIE &VariableDie);
+
+ /// getOrCreateTypeDIE - Find existing DIE or create new DIE for the
+ /// given DIType.
+ DIE *getOrCreateTypeDIE(const MDNode *N);
+
+ /// getOrCreateContextDIE - Get context owner's DIE.
+ DIE *createTypeDIE(DICompositeType Ty);
+
+ /// getOrCreateContextDIE - Get context owner's DIE.
+ DIE *getOrCreateContextDIE(DIScope Context);
+
+ /// constructContainingTypeDIEs - Construct DIEs for types that contain
+ /// vtables.
+ void constructContainingTypeDIEs();
+
+ /// constructVariableDIE - Construct a DIE for the given DbgVariable.
+ std::unique_ptr<DIE> constructVariableDIE(DbgVariable &DV,
+ bool Abstract = false);
+
+ /// constructSubprogramArguments - Construct function argument DIEs.
+ void constructSubprogramArguments(DIE &Buffer, DIArray Args);
+
+ /// Create a DIE with the given Tag, add the DIE to its parent, and
+ /// call insertDIE if MD is not null.
+ DIE &createAndAddDIE(unsigned Tag, DIE &Parent,
+ DIDescriptor N = DIDescriptor());
+
+ /// Compute the size of a header for this unit, not including the initial
+ /// length field.
+ virtual unsigned getHeaderSize() const {
+ return sizeof(int16_t) + // DWARF version number
+ sizeof(int32_t) + // Offset Into Abbrev. Section
+ sizeof(int8_t); // Pointer Size (in bytes)
+ }
+
+ /// Emit the header for this unit, not including the initial length field.
+ virtual void emitHeader(const MCSymbol *ASectionSym) const;
+
+ virtual DwarfCompileUnit &getCU() = 0;
+
+ /// constructTypeDIE - Construct type DIE from DICompositeType.
+ void constructTypeDIE(DIE &Buffer, DICompositeType CTy);
+
+protected:
+ /// getOrCreateStaticMemberDIE - Create new static data member DIE.
+ DIE *getOrCreateStaticMemberDIE(DIDerivedType DT);
+
+ /// Look up the source ID with the given directory and source file names. If
+ /// none currently exists, create a new ID and insert it in the line table.
+ virtual unsigned getOrCreateSourceID(StringRef File, StringRef Directory) = 0;
+
+private:
+ /// \brief Construct a DIE for the given DbgVariable without initializing the
+ /// DbgVariable's DIE reference.
+ std::unique_ptr<DIE> constructVariableDIEImpl(const DbgVariable &DV,
+ bool Abstract);
+
+ /// constructTypeDIE - Construct basic type die from DIBasicType.
+ void constructTypeDIE(DIE &Buffer, DIBasicType BTy);
+
+ /// constructTypeDIE - Construct derived type die from DIDerivedType.
+ void constructTypeDIE(DIE &Buffer, DIDerivedType DTy);
+
+ /// constructSubrangeDIE - Construct subrange DIE from DISubrange.
+ void constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy);
+
+ /// constructArrayTypeDIE - Construct array type DIE from DICompositeType.
+ void constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy);
+
+ /// constructEnumTypeDIE - Construct enum type DIE from DIEnumerator.
+ void constructEnumTypeDIE(DIE &Buffer, DICompositeType CTy);
+
+ /// constructMemberDIE - Construct member DIE from DIDerivedType.
+ void constructMemberDIE(DIE &Buffer, DIDerivedType DT);
+
+ /// constructTemplateTypeParameterDIE - Construct new DIE for the given
+ /// DITemplateTypeParameter.
+ void constructTemplateTypeParameterDIE(DIE &Buffer,
+ DITemplateTypeParameter TP);
+
+ /// constructTemplateValueParameterDIE - Construct new DIE for the given
+ /// DITemplateValueParameter.
+ void constructTemplateValueParameterDIE(DIE &Buffer,
+ DITemplateValueParameter TVP);
+
+ /// getLowerBoundDefault - Return the default lower bound for an array. If the
+ /// DWARF version doesn't handle the language, return -1.
+ int64_t getDefaultLowerBound() const;
+
+ /// getDIEEntry - Returns the debug information entry for the specified
+ /// debug variable.
+ DIEEntry *getDIEEntry(const MDNode *N) const {
+ return MDNodeToDIEEntryMap.lookup(N);
+ }
+
+ /// insertDIEEntry - Insert debug information entry into the map.
+ void insertDIEEntry(const MDNode *N, DIEEntry *E) {
+ MDNodeToDIEEntryMap.insert(std::make_pair(N, E));
+ }
+
+ // getIndexTyDie - Get an anonymous type for index type.
+ DIE *getIndexTyDie() { return IndexTyDie; }
+
+ // setIndexTyDie - Set D as anonymous type for index which can be reused
+ // later.
+ void setIndexTyDie(DIE *D) { IndexTyDie = D; }
+
+ /// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
+ /// information entry.
+ DIEEntry *createDIEEntry(DIE &Entry);
+
+ /// resolve - Look in the DwarfDebug map for the MDNode that
+ /// corresponds to the reference.
+ template <typename T> T resolve(DIRef<T> Ref) const {
+ return DD->resolve(Ref);
+ }
+
+ /// If this is a named finished type then include it in the list of types for
+ /// the accelerator tables.
+ void updateAcceleratorTables(DIScope Context, DIType Ty, const DIE &TyDIE);
+};
+
+class DwarfCompileUnit : public DwarfUnit {
+ /// The attribute index of DW_AT_stmt_list in the compile unit DIE, avoiding
+ /// the need to search for it in applyStmtList.
+ unsigned stmtListIndex;
+
+public:
+ DwarfCompileUnit(unsigned UID, DICompileUnit Node, AsmPrinter *A,
+ DwarfDebug *DW, DwarfFile *DWU);
+
+ void initStmtList(MCSymbol *DwarfLineSectionSym);
+
+ /// Apply the DW_AT_stmt_list from this compile unit to the specified DIE.
+ void applyStmtList(DIE &D);
+
+ /// createGlobalVariableDIE - create global variable DIE.
+ void createGlobalVariableDIE(DIGlobalVariable GV);
+
+ /// addLabelAddress - Add a dwarf label attribute data and value using
+ /// either DW_FORM_addr or DW_FORM_GNU_addr_index.
+ void addLabelAddress(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Label);
+
+ /// addLocalLabelAddress - Add a dwarf label attribute data and value using
+ /// DW_FORM_addr only.
+ void addLocalLabelAddress(DIE &Die, dwarf::Attribute Attribute,
+ const MCSymbol *Label);
+
+ DwarfCompileUnit &getCU() override { return *this; }
+
+ unsigned getOrCreateSourceID(StringRef FileName, StringRef DirName) override;
+};
+
+class DwarfTypeUnit : public DwarfUnit {
+private:
+ uint64_t TypeSignature;
+ const DIE *Ty;
+ DwarfCompileUnit &CU;
+ MCDwarfDwoLineTable *SplitLineTable;
+
+public:
+ DwarfTypeUnit(unsigned UID, DwarfCompileUnit &CU, AsmPrinter *A,
+ DwarfDebug *DW, DwarfFile *DWU,
+ MCDwarfDwoLineTable *SplitLineTable = nullptr);
+
+ void setTypeSignature(uint64_t Signature) { TypeSignature = Signature; }
+ uint64_t getTypeSignature() const { return TypeSignature; }
+ void setType(const DIE *Ty) { this->Ty = Ty; }
+
+ /// Emit the header for this unit, not including the initial length field.
+ void emitHeader(const MCSymbol *ASectionSym) const override;
+ unsigned getHeaderSize() const override {
+ return DwarfUnit::getHeaderSize() + sizeof(uint64_t) + // Type Signature
+ sizeof(uint32_t); // Type DIE Offset
+ }
+ void initSection(const MCSection *Section);
+ DwarfCompileUnit &getCU() override { return CU; }
+
+protected:
+ unsigned getOrCreateSourceID(StringRef File, StringRef Directory) override;
+};
+} // end llvm namespace
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
index 7133458..73f62bf 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
@@ -1,4 +1,4 @@
-//===-- CodeGen/AsmPrinter/DwarfException.cpp - Dwarf Exception Impl ------===//
+//===-- CodeGen/AsmPrinter/EHStreamer.cpp - Exception Directive Streamer --===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,44 +7,31 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains support for writing DWARF exception info into asm files.
+// This file contains support for writing exception info into assembly files.
//
//===----------------------------------------------------------------------===//
-#include "DwarfException.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Twine.h"
+#include "EHStreamer.h"
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Module.h"
+#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+
using namespace llvm;
-DwarfException::DwarfException(AsmPrinter *A)
- : Asm(A), MMI(Asm->MMI) {}
+EHStreamer::EHStreamer(AsmPrinter *A) : Asm(A), MMI(Asm->MMI) {}
-DwarfException::~DwarfException() {}
+EHStreamer::~EHStreamer() {}
-/// SharedTypeIds - How many leading type ids two landing pads have in common.
-unsigned DwarfException::SharedTypeIds(const LandingPadInfo *L,
- const LandingPadInfo *R) {
+/// How many leading type ids two landing pads have in common.
+unsigned EHStreamer::sharedTypeIDs(const LandingPadInfo *L,
+ const LandingPadInfo *R) {
const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
unsigned LSize = LIds.size(), RSize = RIds.size();
unsigned MinSize = LSize < RSize ? LSize : RSize;
@@ -57,23 +44,10 @@ unsigned DwarfException::SharedTypeIds(const LandingPadInfo *L,
return Count;
}
-/// PadLT - Order landing pads lexicographically by type id.
-bool DwarfException::PadLT(const LandingPadInfo *L, const LandingPadInfo *R) {
- const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
- unsigned LSize = LIds.size(), RSize = RIds.size();
- unsigned MinSize = LSize < RSize ? LSize : RSize;
-
- for (unsigned i = 0; i != MinSize; ++i)
- if (LIds[i] != RIds[i])
- return LIds[i] < RIds[i];
-
- return LSize < RSize;
-}
-
-/// ComputeActionsTable - Compute the actions table and gather the first action
-/// index for each landing pad site.
-unsigned DwarfException::
-ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
+/// Compute the actions table and gather the first action index for each landing
+/// pad site.
+unsigned EHStreamer::
+computeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
SmallVectorImpl<ActionEntry> &Actions,
SmallVectorImpl<unsigned> &FirstActions) {
@@ -108,20 +82,20 @@ ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
for (std::vector<unsigned>::const_iterator
I = FilterIds.begin(), E = FilterIds.end(); I != E; ++I) {
FilterOffsets.push_back(Offset);
- Offset -= MCAsmInfo::getULEB128Size(*I);
+ Offset -= getULEB128Size(*I);
}
FirstActions.reserve(LandingPads.size());
int FirstAction = 0;
unsigned SizeActions = 0;
- const LandingPadInfo *PrevLPI = 0;
+ const LandingPadInfo *PrevLPI = nullptr;
for (SmallVectorImpl<const LandingPadInfo *>::const_iterator
I = LandingPads.begin(), E = LandingPads.end(); I != E; ++I) {
const LandingPadInfo *LPI = *I;
const std::vector<int> &TypeIds = LPI->TypeIds;
- unsigned NumShared = PrevLPI ? SharedTypeIds(LPI, PrevLPI) : 0;
+ unsigned NumShared = PrevLPI ? sharedTypeIDs(LPI, PrevLPI) : 0;
unsigned SizeSiteActions = 0;
if (NumShared < TypeIds.size()) {
@@ -132,14 +106,12 @@ ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
unsigned SizePrevIds = PrevLPI->TypeIds.size();
assert(Actions.size());
PrevAction = Actions.size() - 1;
- SizeAction =
- MCAsmInfo::getSLEB128Size(Actions[PrevAction].NextAction) +
- MCAsmInfo::getSLEB128Size(Actions[PrevAction].ValueForTypeID);
+ SizeAction = getSLEB128Size(Actions[PrevAction].NextAction) +
+ getSLEB128Size(Actions[PrevAction].ValueForTypeID);
for (unsigned j = NumShared; j != SizePrevIds; ++j) {
assert(PrevAction != (unsigned)-1 && "PrevAction is invalid!");
- SizeAction -=
- MCAsmInfo::getSLEB128Size(Actions[PrevAction].ValueForTypeID);
+ SizeAction -= getSLEB128Size(Actions[PrevAction].ValueForTypeID);
SizeAction += -Actions[PrevAction].NextAction;
PrevAction = Actions[PrevAction].Previous;
}
@@ -150,10 +122,10 @@ ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
int TypeID = TypeIds[J];
assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
- unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
+ unsigned SizeTypeID = getSLEB128Size(ValueForTypeID);
int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
- SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
+ SizeAction = SizeTypeID + getSLEB128Size(NextAction);
SizeSiteActions += SizeAction;
ActionEntry Action = { ValueForTypeID, NextAction, PrevAction };
@@ -181,9 +153,9 @@ ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
return SizeActions;
}
-/// CallToNoUnwindFunction - Return `true' if this is a call to a function
-/// marked `nounwind'. Return `false' otherwise.
-bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
+/// Return `true' if this is a call to a function marked `nounwind'. Return
+/// `false' otherwise.
+bool EHStreamer::callToNoUnwindFunction(const MachineInstr *MI) {
assert(MI->isCall() && "This should be a call instruction!");
bool MarkedNoUnwind = false;
@@ -195,7 +167,7 @@ bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
if (!MO.isGlobal()) continue;
const Function *F = dyn_cast<Function>(MO.getGlobal());
- if (F == 0) continue;
+ if (!F) continue;
if (SawFunc) {
// Be conservative. If we have more than one function operand for this
@@ -215,20 +187,19 @@ bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
return MarkedNoUnwind;
}
-/// ComputeCallSiteTable - Compute the call-site table. The entry for an invoke
-/// has a try-range containing the call, a non-zero landing pad, and an
-/// appropriate action. The entry for an ordinary call has a try-range
-/// containing the call and zero for the landing pad and the action. Calls
-/// marked 'nounwind' have no entry and must not be contained in the try-range
-/// of any entry - they form gaps in the table. Entries must be ordered by
-/// try-range address.
-void DwarfException::
-ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
+/// Compute the call-site table. The entry for an invoke has a try-range
+/// containing the call, a non-zero landing pad, and an appropriate action. The
+/// entry for an ordinary call has a try-range containing the call and zero for
+/// the landing pad and the action. Calls marked 'nounwind' have no entry and
+/// must not be contained in the try-range of any entry - they form gaps in the
+/// table. Entries must be ordered by try-range address.
+void EHStreamer::
+computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
const RangeMapType &PadMap,
const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
const SmallVectorImpl<unsigned> &FirstActions) {
// The end label of the previous invoke or nounwind try-range.
- MCSymbol *LastLabel = 0;
+ MCSymbol *LastLabel = nullptr;
// Whether there is a potentially throwing instruction (currently this means
// an ordinary call) between the end of the previous try-range and now.
@@ -238,18 +209,16 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
bool PreviousIsInvoke = false;
// Visit all instructions in order of address.
- for (MachineFunction::const_iterator I = Asm->MF->begin(), E = Asm->MF->end();
- I != E; ++I) {
- for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
- MI != E; ++MI) {
- if (!MI->isLabel()) {
- if (MI->isCall())
- SawPotentiallyThrowing |= !CallToNoUnwindFunction(MI);
+ for (const auto &MBB : *Asm->MF) {
+ for (const auto &MI : MBB) {
+ if (!MI.isEHLabel()) {
+ if (MI.isCall())
+ SawPotentiallyThrowing |= !callToNoUnwindFunction(&MI);
continue;
}
// End of the previous try-range?
- MCSymbol *BeginLabel = MI->getOperand(0).getMCSymbol();
+ MCSymbol *BeginLabel = MI.getOperand(0).getMCSymbol();
if (BeginLabel == LastLabel)
SawPotentiallyThrowing = false;
@@ -269,7 +238,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
// create a call-site entry with no landing pad for the region between the
// try-ranges.
if (SawPotentiallyThrowing && Asm->MAI->isExceptionHandlingDwarf()) {
- CallSiteEntry Site = { LastLabel, BeginLabel, 0, 0 };
+ CallSiteEntry Site = { LastLabel, BeginLabel, nullptr, 0 };
CallSites.push_back(Site);
PreviousIsInvoke = false;
}
@@ -319,12 +288,12 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
// function may throw, create a call-site entry with no landing pad for the
// region following the try-range.
if (SawPotentiallyThrowing && Asm->MAI->isExceptionHandlingDwarf()) {
- CallSiteEntry Site = { LastLabel, 0, 0, 0 };
+ CallSiteEntry Site = { LastLabel, nullptr, nullptr, 0 };
CallSites.push_back(Site);
}
}
-/// EmitExceptionTable - Emit landing pads and actions.
+/// Emit landing pads and actions.
///
/// The general organization of the table is complex, but the basic concepts are
/// easy. First there is a header which describes the location and organization
@@ -344,7 +313,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
/// unwound and handling continues.
/// 3. Type ID table contains references to all the C++ typeinfo for all
/// catches in the function. This tables is reverse indexed base 1.
-void DwarfException::EmitExceptionTable() {
+void EHStreamer::emitExceptionTable() {
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
@@ -357,13 +326,17 @@ void DwarfException::EmitExceptionTable() {
for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
LandingPads.push_back(&PadInfos[i]);
- std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
+ // Order landing pads lexicographically by type id.
+ std::sort(LandingPads.begin(), LandingPads.end(),
+ [](const LandingPadInfo *L,
+ const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });
// Compute the actions table and gather the first action index for each
// landing pad site.
SmallVector<ActionEntry, 32> Actions;
SmallVector<unsigned, 64> FirstActions;
- unsigned SizeActions=ComputeActionsTable(LandingPads, Actions, FirstActions);
+ unsigned SizeActions =
+ computeActionsTable(LandingPads, Actions, FirstActions);
// Invokes and nounwind calls have entries in PadMap (due to being bracketed
// by try-range labels when lowered). Ordinary calls do not, so appropriate
@@ -381,7 +354,7 @@ void DwarfException::EmitExceptionTable() {
// Compute the call-site table.
SmallVector<CallSiteEntry, 64> CallSites;
- ComputeCallSiteTable(CallSites, PadMap, LandingPads, FirstActions);
+ computeCallSiteTable(CallSites, PadMap, LandingPads, FirstActions);
// Final tallies.
@@ -401,9 +374,9 @@ void DwarfException::EmitExceptionTable() {
}
for (unsigned i = 0, e = CallSites.size(); i < e; ++i) {
- CallSiteTableLength += MCAsmInfo::getULEB128Size(CallSites[i].Action);
+ CallSiteTableLength += getULEB128Size(CallSites[i].Action);
if (IsSJLJ)
- CallSiteTableLength += MCAsmInfo::getULEB128Size(i);
+ CallSiteTableLength += getULEB128Size(i);
}
// Type infos.
@@ -488,15 +461,14 @@ void DwarfException::EmitExceptionTable() {
// We chose another solution: don't output padding inside the table like GCC
// does, instead output it before the table.
unsigned SizeTypes = TypeInfos.size() * TypeFormatSize;
- unsigned CallSiteTableLengthSize =
- MCAsmInfo::getULEB128Size(CallSiteTableLength);
+ unsigned CallSiteTableLengthSize = getULEB128Size(CallSiteTableLength);
unsigned TTypeBaseOffset =
sizeof(int8_t) + // Call site format
CallSiteTableLengthSize + // Call site table length size
CallSiteTableLength + // Call site table length
SizeActions + // Actions size
SizeTypes;
- unsigned TTypeBaseOffsetSize = MCAsmInfo::getULEB128Size(TTypeBaseOffset);
+ unsigned TTypeBaseOffsetSize = getULEB128Size(TTypeBaseOffset);
unsigned TotalSize =
sizeof(int8_t) + // LPStart format
sizeof(int8_t) + // TType format
@@ -583,10 +555,10 @@ void DwarfException::EmitExceptionTable() {
Asm->GetTempSymbol("eh_func_begin", Asm->getFunctionNumber());
MCSymbol *BeginLabel = S.BeginLabel;
- if (BeginLabel == 0)
+ if (!BeginLabel)
BeginLabel = EHFuncBeginSym;
MCSymbol *EndLabel = S.EndLabel;
- if (EndLabel == 0)
+ if (!EndLabel)
EndLabel = Asm->GetTempSymbol("eh_func_end", Asm->getFunctionNumber());
@@ -671,12 +643,12 @@ void DwarfException::EmitExceptionTable() {
Asm->EmitSLEB128(Action.NextAction);
}
- EmitTypeInfos(TTypeEncoding);
+ emitTypeInfos(TTypeEncoding);
Asm->EmitAlignment(2);
}
-void DwarfException::EmitTypeInfos(unsigned TTypeEncoding) {
+void EHStreamer::emitTypeInfos(unsigned TTypeEncoding) {
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
@@ -717,20 +689,18 @@ void DwarfException::EmitTypeInfos(unsigned TTypeEncoding) {
}
}
-/// EndModule - Emit all exception information that should come after the
-/// content.
-void DwarfException::EndModule() {
+/// Emit all exception information that should come after the content.
+void EHStreamer::endModule() {
llvm_unreachable("Should be implemented");
}
-/// BeginFunction - Gather pre-function exception information. Assumes it's
-/// being emitted immediately after the function entry point.
-void DwarfException::BeginFunction(const MachineFunction *MF) {
+/// Gather pre-function exception information. Assumes it's being emitted
+/// immediately after the function entry point.
+void EHStreamer::beginFunction(const MachineFunction *MF) {
llvm_unreachable("Should be implemented");
}
-/// EndFunction - Gather and emit post-function exception information.
-///
-void DwarfException::EndFunction() {
+/// Gather and emit post-function exception information.
+void EHStreamer::endFunction(const MachineFunction *) {
llvm_unreachable("Should be implemented");
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.h b/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.h
new file mode 100644
index 0000000..2b6ba78
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.h
@@ -0,0 +1,138 @@
+//===-- EHStreamer.h - Exception Handling Directive Streamer ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing exception info into assembly files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTER_EHSTREAMER_H
+#define LLVM_CODEGEN_ASMPRINTER_EHSTREAMER_H
+
+#include "AsmPrinterHandler.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+struct LandingPadInfo;
+class MachineModuleInfo;
+class MachineInstr;
+class MachineFunction;
+class AsmPrinter;
+
+template <typename T>
+class SmallVectorImpl;
+
+/// Emits exception handling directives.
+class EHStreamer : public AsmPrinterHandler {
+protected:
+ /// Target of directive emission.
+ AsmPrinter *Asm;
+
+ /// Collected machine module information.
+ MachineModuleInfo *MMI;
+
+ /// How many leading type ids two landing pads have in common.
+ static unsigned sharedTypeIDs(const LandingPadInfo *L,
+ const LandingPadInfo *R);
+
+ /// Structure holding a try-range and the associated landing pad.
+ struct PadRange {
+ // The index of the landing pad.
+ unsigned PadIndex;
+ // The index of the begin and end labels in the landing pad's label lists.
+ unsigned RangeIndex;
+ };
+
+ typedef DenseMap<MCSymbol *, PadRange> RangeMapType;
+
+ /// Structure describing an entry in the actions table.
+ struct ActionEntry {
+ int ValueForTypeID; // The value to write - may not be equal to the type id.
+ int NextAction;
+ unsigned Previous;
+ };
+
+ /// Structure describing an entry in the call-site table.
+ struct CallSiteEntry {
+ // The 'try-range' is BeginLabel .. EndLabel.
+ MCSymbol *BeginLabel; // zero indicates the start of the function.
+ MCSymbol *EndLabel; // zero indicates the end of the function.
+
+ // The landing pad starts at PadLabel.
+ MCSymbol *PadLabel; // zero indicates that there is no landing pad.
+ unsigned Action;
+ };
+
+ /// Compute the actions table and gather the first action index for each
+ /// landing pad site.
+ unsigned computeActionsTable(const SmallVectorImpl<const LandingPadInfo*>&LPs,
+ SmallVectorImpl<ActionEntry> &Actions,
+ SmallVectorImpl<unsigned> &FirstActions);
+
+ /// Return `true' if this is a call to a function marked `nounwind'. Return
+ /// `false' otherwise.
+ bool callToNoUnwindFunction(const MachineInstr *MI);
+
+ /// Compute the call-site table. The entry for an invoke has a try-range
+ /// containing the call, a non-zero landing pad and an appropriate action.
+ /// The entry for an ordinary call has a try-range containing the call and
+ /// zero for the landing pad and the action. Calls marked 'nounwind' have
+ /// no entry and must not be contained in the try-range of any entry - they
+ /// form gaps in the table. Entries must be ordered by try-range address.
+
+ void computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
+ const RangeMapType &PadMap,
+ const SmallVectorImpl<const LandingPadInfo *> &LPs,
+ const SmallVectorImpl<unsigned> &FirstActions);
+
+ /// Emit landing pads and actions.
+ ///
+ /// The general organization of the table is complex, but the basic concepts
+ /// are easy. First there is a header which describes the location and
+ /// organization of the three components that follow.
+ /// 1. The landing pad site information describes the range of code covered
+ /// by the try. In our case it's an accumulation of the ranges covered
+ /// by the invokes in the try. There is also a reference to the landing
+ /// pad that handles the exception once processed. Finally an index into
+ /// the actions table.
+ /// 2. The action table, in our case, is composed of pairs of type ids
+ /// and next action offset. Starting with the action index from the
+ /// landing pad site, each type Id is checked for a match to the current
+ /// exception. If it matches then the exception and type id are passed
+ /// on to the landing pad. Otherwise the next action is looked up. This
+ /// chain is terminated with a next action of zero. If no type id is
+ /// found the frame is unwound and handling continues.
+ /// 3. Type id table contains references to all the C++ typeinfo for all
+ /// catches in the function. This tables is reversed indexed base 1.
+ void emitExceptionTable();
+
+ virtual void emitTypeInfos(unsigned TTypeEncoding);
+
+public:
+ EHStreamer(AsmPrinter *A);
+ virtual ~EHStreamer();
+
+ /// Emit all exception information that should come after the content.
+ void endModule() override;
+
+ /// Gather pre-function exception information. Assumes being emitted
+ /// immediately after the function entry point.
+ void beginFunction(const MachineFunction *MF) override;
+
+ /// Gather and emit post-function exception information.
+ void endFunction(const MachineFunction *) override;
+
+ // Unused.
+ void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {}
+ void beginInstruction(const MachineInstr *MI) override {}
+ void endInstruction() override {}
+};
+}
+
+#endif
+
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
index a8fb66d..bfcbe6b 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
@@ -14,8 +14,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/GCs.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
+#include "llvm/CodeGen/GCs.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
@@ -35,8 +35,8 @@ namespace {
class ErlangGCPrinter : public GCMetadataPrinter {
public:
- void beginAssembly(AsmPrinter &AP);
- void finishAssembly(AsmPrinter &AP);
+ void beginAssembly(AsmPrinter &AP) override;
+ void finishAssembly(AsmPrinter &AP) override;
};
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
index 98177c0..5a9ecd7 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
@@ -16,6 +16,7 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -23,7 +24,6 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include <cctype>
@@ -33,8 +33,8 @@ namespace {
class OcamlGCMetadataPrinter : public GCMetadataPrinter {
public:
- void beginAssembly(AsmPrinter &AP);
- void finishAssembly(AsmPrinter &AP);
+ void beginAssembly(AsmPrinter &AP) override;
+ void finishAssembly(AsmPrinter &AP) override;
};
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp
index 1561012..81285d5 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/Win64Exception.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -30,7 +31,6 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
@@ -38,20 +38,19 @@
using namespace llvm;
Win64Exception::Win64Exception(AsmPrinter *A)
- : DwarfException(A),
- shouldEmitPersonality(false), shouldEmitLSDA(false), shouldEmitMoves(false)
- {}
+ : EHStreamer(A), shouldEmitPersonality(false), shouldEmitLSDA(false),
+ shouldEmitMoves(false) {}
Win64Exception::~Win64Exception() {}
-/// EndModule - Emit all exception information that should come after the
+/// endModule - Emit all exception information that should come after the
/// content.
-void Win64Exception::EndModule() {
+void Win64Exception::endModule() {
}
-/// BeginFunction - Gather pre-function exception information. Assumes it's
+/// beginFunction - Gather pre-function exception information. Assumes it's
/// being emitted immediately after the function entry point.
-void Win64Exception::BeginFunction(const MachineFunction *MF) {
+void Win64Exception::beginFunction(const MachineFunction *MF) {
shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false;
// If any landing pads survive, we need an EH table.
@@ -73,22 +72,22 @@ void Win64Exception::BeginFunction(const MachineFunction *MF) {
if (!shouldEmitPersonality && !shouldEmitMoves)
return;
- Asm->OutStreamer.EmitWin64EHStartProc(Asm->CurrentFnSym);
+ Asm->OutStreamer.EmitWinCFIStartProc(Asm->CurrentFnSym);
if (!shouldEmitPersonality)
return;
- MCSymbol *GCCHandlerSym =
- Asm->GetExternalSymbolSymbol("_GCC_specific_handler");
- Asm->OutStreamer.EmitWin64EHHandler(GCCHandlerSym, true, true);
+ const MCSymbol *PersHandlerSym =
+ TLOF.getCFIPersonalitySymbol(Per, *Asm->Mang, Asm->TM, MMI);
+ Asm->OutStreamer.EmitWinEHHandler(PersHandlerSym, true, true);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_begin",
Asm->getFunctionNumber()));
}
-/// EndFunction - Gather and emit post-function exception information.
+/// endFunction - Gather and emit post-function exception information.
///
-void Win64Exception::EndFunction() {
+void Win64Exception::endFunction(const MachineFunction *) {
if (!shouldEmitPersonality && !shouldEmitMoves)
return;
@@ -99,16 +98,10 @@ void Win64Exception::EndFunction() {
MMI->TidyLandingPads();
if (shouldEmitPersonality) {
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
- const Function *Per = MMI->getPersonalities()[MMI->getPersonalityIndex()];
- const MCSymbol *Sym = TLOF.getCFIPersonalitySymbol(Per, Asm->Mang, MMI);
-
Asm->OutStreamer.PushSection();
- Asm->OutStreamer.EmitWin64EHHandlerData();
- Asm->OutStreamer.EmitValue(MCSymbolRefExpr::Create(Sym, Asm->OutContext),
- 4);
- EmitExceptionTable();
+ Asm->OutStreamer.EmitWinEHHandlerData();
+ emitExceptionTable();
Asm->OutStreamer.PopSection();
}
- Asm->OutStreamer.EmitWin64EHEndProc();
+ Asm->OutStreamer.EmitWinCFIEndProc();
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
new file mode 100644
index 0000000..6a5c431
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
@@ -0,0 +1,335 @@
+//===-- llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing line tables info into COFF files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "WinCodeViewLineTables.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/COFF.h"
+
+namespace llvm {
+
+StringRef WinCodeViewLineTables::getFullFilepath(const MDNode *S) {
+ assert(S);
+ DIDescriptor D(S);
+ assert((D.isCompileUnit() || D.isFile() || D.isSubprogram() ||
+ D.isLexicalBlockFile() || D.isLexicalBlock()) &&
+ "Unexpected scope info");
+
+ DIScope Scope(S);
+ StringRef Dir = Scope.getDirectory(),
+ Filename = Scope.getFilename();
+ char *&Result = DirAndFilenameToFilepathMap[std::make_pair(Dir, Filename)];
+ if (Result)
+ return Result;
+
+ // Clang emits directory and relative filename info into the IR, but CodeView
+ // operates on full paths. We could change Clang to emit full paths too, but
+ // that would increase the IR size and probably not needed for other users.
+ // For now, just concatenate and canonicalize the path here.
+ std::string Filepath;
+ if (Filename.find(':') == 1)
+ Filepath = Filename;
+ else
+ Filepath = (Dir + Twine("\\") + Filename).str();
+
+ // Canonicalize the path. We have to do it textually because we may no longer
+ // have access the file in the filesystem.
+ // First, replace all slashes with backslashes.
+ std::replace(Filepath.begin(), Filepath.end(), '/', '\\');
+
+ // Remove all "\.\" with "\".
+ size_t Cursor = 0;
+ while ((Cursor = Filepath.find("\\.\\", Cursor)) != std::string::npos)
+ Filepath.erase(Cursor, 2);
+
+ // Replace all "\XXX\..\" with "\". Don't try too hard though as the original
+ // path should be well-formatted, e.g. start with a drive letter, etc.
+ Cursor = 0;
+ while ((Cursor = Filepath.find("\\..\\", Cursor)) != std::string::npos) {
+ // Something's wrong if the path starts with "\..\", abort.
+ if (Cursor == 0)
+ break;
+
+ size_t PrevSlash = Filepath.rfind('\\', Cursor - 1);
+ if (PrevSlash == std::string::npos)
+ // Something's wrong, abort.
+ break;
+
+ Filepath.erase(PrevSlash, Cursor + 3 - PrevSlash);
+ // The next ".." might be following the one we've just erased.
+ Cursor = PrevSlash;
+ }
+
+ // Remove all duplicate backslashes.
+ Cursor = 0;
+ while ((Cursor = Filepath.find("\\\\", Cursor)) != std::string::npos)
+ Filepath.erase(Cursor, 1);
+
+ Result = strdup(Filepath.c_str());
+ return StringRef(Result);
+}
+
+void WinCodeViewLineTables::maybeRecordLocation(DebugLoc DL,
+ const MachineFunction *MF) {
+ const MDNode *Scope = DL.getScope(MF->getFunction()->getContext());
+ if (!Scope)
+ return;
+ StringRef Filename = getFullFilepath(Scope);
+
+ // Skip this instruction if it has the same file:line as the previous one.
+ assert(CurFn);
+ if (!CurFn->Instrs.empty()) {
+ const InstrInfoTy &LastInstr = InstrInfo[CurFn->Instrs.back()];
+ if (LastInstr.Filename == Filename && LastInstr.LineNumber == DL.getLine())
+ return;
+ }
+ FileNameRegistry.add(Filename);
+
+ MCSymbol *MCL = Asm->MMI->getContext().CreateTempSymbol();
+ Asm->OutStreamer.EmitLabel(MCL);
+ CurFn->Instrs.push_back(MCL);
+ InstrInfo[MCL] = InstrInfoTy(Filename, DL.getLine());
+}
+
+WinCodeViewLineTables::WinCodeViewLineTables(AsmPrinter *AP)
+ : Asm(nullptr), CurFn(nullptr) {
+ MachineModuleInfo *MMI = AP->MMI;
+
+ // If module doesn't have named metadata anchors or COFF debug section
+ // is not available, skip any debug info related stuff.
+ if (!MMI->getModule()->getNamedMetadata("llvm.dbg.cu") ||
+ !AP->getObjFileLowering().getCOFFDebugSymbolsSection())
+ return;
+
+ // Tell MMI that we have debug info.
+ MMI->setDebugInfoAvailability(true);
+ Asm = AP;
+}
+
+static void EmitLabelDiff(MCStreamer &Streamer,
+ const MCSymbol *From, const MCSymbol *To) {
+ MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
+ MCContext &Context = Streamer.getContext();
+ const MCExpr *FromRef = MCSymbolRefExpr::Create(From, Variant, Context),
+ *ToRef = MCSymbolRefExpr::Create(To, Variant, Context);
+ const MCExpr *AddrDelta =
+ MCBinaryExpr::Create(MCBinaryExpr::Sub, ToRef, FromRef, Context);
+ Streamer.EmitValue(AddrDelta, 4);
+}
+
+void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) {
+ // For each function there is a separate subsection
+ // which holds the PC to file:line table.
+ const MCSymbol *Fn = Asm->getSymbol(GV);
+ assert(Fn);
+
+ const FunctionInfo &FI = FnDebugInfo[GV];
+ if (FI.Instrs.empty())
+ return;
+ assert(FI.End && "Don't know where the function ends?");
+
+ // PCs/Instructions are grouped into segments sharing the same filename.
+ // Pre-calculate the lengths (in instructions) of these segments and store
+ // them in a map for convenience. Each index in the map is the sequential
+ // number of the respective instruction that starts a new segment.
+ DenseMap<size_t, size_t> FilenameSegmentLengths;
+ size_t LastSegmentEnd = 0;
+ StringRef PrevFilename = InstrInfo[FI.Instrs[0]].Filename;
+ for (size_t J = 1, F = FI.Instrs.size(); J != F; ++J) {
+ if (PrevFilename == InstrInfo[FI.Instrs[J]].Filename)
+ continue;
+ FilenameSegmentLengths[LastSegmentEnd] = J - LastSegmentEnd;
+ LastSegmentEnd = J;
+ PrevFilename = InstrInfo[FI.Instrs[J]].Filename;
+ }
+ FilenameSegmentLengths[LastSegmentEnd] = FI.Instrs.size() - LastSegmentEnd;
+
+ // Emit the control code of the subsection followed by the payload size.
+ Asm->OutStreamer.AddComment(
+ "Linetable subsection for " + Twine(Fn->getName()));
+ Asm->EmitInt32(COFF::DEBUG_LINE_TABLE_SUBSECTION);
+ MCSymbol *SubsectionBegin = Asm->MMI->getContext().CreateTempSymbol(),
+ *SubsectionEnd = Asm->MMI->getContext().CreateTempSymbol();
+ EmitLabelDiff(Asm->OutStreamer, SubsectionBegin, SubsectionEnd);
+ Asm->OutStreamer.EmitLabel(SubsectionBegin);
+
+ // Identify the function this subsection is for.
+ Asm->OutStreamer.EmitCOFFSecRel32(Fn);
+ Asm->OutStreamer.EmitCOFFSectionIndex(Fn);
+
+ // Length of the function's code, in bytes.
+ EmitLabelDiff(Asm->OutStreamer, Fn, FI.End);
+
+ // PC-to-linenumber lookup table:
+ MCSymbol *FileSegmentEnd = nullptr;
+ for (size_t J = 0, F = FI.Instrs.size(); J != F; ++J) {
+ MCSymbol *Instr = FI.Instrs[J];
+ assert(InstrInfo.count(Instr));
+
+ if (FilenameSegmentLengths.count(J)) {
+ // We came to a beginning of a new filename segment.
+ if (FileSegmentEnd)
+ Asm->OutStreamer.EmitLabel(FileSegmentEnd);
+ StringRef CurFilename = InstrInfo[FI.Instrs[J]].Filename;
+ assert(FileNameRegistry.Infos.count(CurFilename));
+ size_t IndexInStringTable =
+ FileNameRegistry.Infos[CurFilename].FilenameID;
+ // Each segment starts with the offset of the filename
+ // in the string table.
+ Asm->OutStreamer.AddComment(
+ "Segment for file '" + Twine(CurFilename) + "' begins");
+ MCSymbol *FileSegmentBegin = Asm->MMI->getContext().CreateTempSymbol();
+ Asm->OutStreamer.EmitLabel(FileSegmentBegin);
+ Asm->EmitInt32(8 * IndexInStringTable);
+
+ // Number of PC records in the lookup table.
+ size_t SegmentLength = FilenameSegmentLengths[J];
+ Asm->EmitInt32(SegmentLength);
+
+ // Full size of the segment for this filename, including the prev two
+ // records.
+ FileSegmentEnd = Asm->MMI->getContext().CreateTempSymbol();
+ EmitLabelDiff(Asm->OutStreamer, FileSegmentBegin, FileSegmentEnd);
+ }
+
+ // The first PC with the given linenumber and the linenumber itself.
+ EmitLabelDiff(Asm->OutStreamer, Fn, Instr);
+ Asm->EmitInt32(InstrInfo[Instr].LineNumber);
+ }
+
+ if (FileSegmentEnd)
+ Asm->OutStreamer.EmitLabel(FileSegmentEnd);
+ Asm->OutStreamer.EmitLabel(SubsectionEnd);
+}
+
+void WinCodeViewLineTables::endModule() {
+ if (FnDebugInfo.empty())
+ return;
+
+ assert(Asm != nullptr);
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getCOFFDebugSymbolsSection());
+ Asm->EmitInt32(COFF::DEBUG_SECTION_MAGIC);
+
+ // The COFF .debug$S section consists of several subsections, each starting
+ // with a 4-byte control code (e.g. 0xF1, 0xF2, etc) and then a 4-byte length
+ // of the payload followed by the payload itself. The subsections are 4-byte
+ // aligned.
+
+ for (size_t I = 0, E = VisitedFunctions.size(); I != E; ++I)
+ emitDebugInfoForFunction(VisitedFunctions[I]);
+
+ // This subsection holds a file index to offset in string table table.
+ Asm->OutStreamer.AddComment("File index to string table offset subsection");
+ Asm->EmitInt32(COFF::DEBUG_INDEX_SUBSECTION);
+ size_t NumFilenames = FileNameRegistry.Infos.size();
+ Asm->EmitInt32(8 * NumFilenames);
+ for (size_t I = 0, E = FileNameRegistry.Filenames.size(); I != E; ++I) {
+ StringRef Filename = FileNameRegistry.Filenames[I];
+ // For each unique filename, just write it's offset in the string table.
+ Asm->EmitInt32(FileNameRegistry.Infos[Filename].StartOffset);
+ // The function name offset is not followed by any additional data.
+ Asm->EmitInt32(0);
+ }
+
+ // This subsection holds the string table.
+ Asm->OutStreamer.AddComment("String table");
+ Asm->EmitInt32(COFF::DEBUG_STRING_TABLE_SUBSECTION);
+ Asm->EmitInt32(FileNameRegistry.LastOffset);
+ // The payload starts with a null character.
+ Asm->EmitInt8(0);
+
+ for (size_t I = 0, E = FileNameRegistry.Filenames.size(); I != E; ++I) {
+ // Just emit unique filenames one by one, separated by a null character.
+ Asm->OutStreamer.EmitBytes(FileNameRegistry.Filenames[I]);
+ Asm->EmitInt8(0);
+ }
+
+ // No more subsections. Fill with zeros to align the end of the section by 4.
+ Asm->OutStreamer.EmitFill((-FileNameRegistry.LastOffset) % 4, 0);
+
+ clear();
+}
+
+void WinCodeViewLineTables::beginFunction(const MachineFunction *MF) {
+ assert(!CurFn && "Can't process two functions at once!");
+
+ if (!Asm || !Asm->MMI->hasDebugInfo())
+ return;
+
+ const Function *GV = MF->getFunction();
+ assert(FnDebugInfo.count(GV) == false);
+ VisitedFunctions.push_back(GV);
+ CurFn = &FnDebugInfo[GV];
+
+ // Find the end of the function prolog.
+ // FIXME: is there a simpler a way to do this? Can we just search
+ // for the first instruction of the function, not the last of the prolog?
+ DebugLoc PrologEndLoc;
+ bool EmptyPrologue = true;
+ for (const auto &MBB : *MF) {
+ if (!PrologEndLoc.isUnknown())
+ break;
+ for (const auto &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+
+ // First known non-DBG_VALUE and non-frame setup location marks
+ // the beginning of the function body.
+ // FIXME: do we need the first subcondition?
+ if (!MI.getFlag(MachineInstr::FrameSetup) &&
+ (!MI.getDebugLoc().isUnknown())) {
+ PrologEndLoc = MI.getDebugLoc();
+ break;
+ }
+ EmptyPrologue = false;
+ }
+ }
+ // Record beginning of function if we have a non-empty prologue.
+ if (!PrologEndLoc.isUnknown() && !EmptyPrologue) {
+ DebugLoc FnStartDL =
+ PrologEndLoc.getFnDebugLoc(MF->getFunction()->getContext());
+ maybeRecordLocation(FnStartDL, MF);
+ }
+}
+
+void WinCodeViewLineTables::endFunction(const MachineFunction *MF) {
+ if (!Asm || !CurFn) // We haven't created any debug info for this function.
+ return;
+
+ const Function *GV = MF->getFunction();
+ assert(FnDebugInfo.count(GV));
+ assert(CurFn == &FnDebugInfo[GV]);
+
+ if (CurFn->Instrs.empty()) {
+ FnDebugInfo.erase(GV);
+ VisitedFunctions.pop_back();
+ } else {
+ // Define end label for subprogram.
+ MCSymbol *FunctionEndSym = Asm->OutStreamer.getContext().CreateTempSymbol();
+ Asm->OutStreamer.EmitLabel(FunctionEndSym);
+ CurFn->End = FunctionEndSym;
+ }
+ CurFn = nullptr;
+}
+
+void WinCodeViewLineTables::beginInstruction(const MachineInstr *MI) {
+ // Ignore DBG_VALUE locations and function prologue.
+ if (!Asm || MI->isDebugValue() || MI->getFlag(MachineInstr::FrameSetup))
+ return;
+ DebugLoc DL = MI->getDebugLoc();
+ if (DL == PrevInstLoc || DL.isUnknown())
+ return;
+ maybeRecordLocation(DL, Asm->MF);
+}
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
new file mode 100644
index 0000000..0734d97
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
@@ -0,0 +1,144 @@
+//===-- llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing line tables info into COFF files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_WINCODEVIEWLINETABLES_H__
+#define CODEGEN_ASMPRINTER_WINCODEVIEWLINETABLES_H__
+
+#include "AsmPrinterHandler.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/LexicalScopes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
+namespace llvm {
+/// \brief Collects and handles line tables information in a CodeView format.
+class WinCodeViewLineTables : public AsmPrinterHandler {
+ AsmPrinter *Asm;
+ DebugLoc PrevInstLoc;
+
+ // For each function, store a vector of labels to its instructions, as well as
+ // to the end of the function.
+ struct FunctionInfo {
+ SmallVector<MCSymbol *, 10> Instrs;
+ MCSymbol *End;
+ FunctionInfo() : End(nullptr) {}
+ } *CurFn;
+
+ typedef DenseMap<const Function *, FunctionInfo> FnDebugInfoTy;
+ FnDebugInfoTy FnDebugInfo;
+ // Store the functions we've visited in a vector so we can maintain a stable
+ // order while emitting subsections.
+ SmallVector<const Function *, 10> VisitedFunctions;
+
+ // InstrInfoTy - Holds the Filename:LineNumber information for every
+ // instruction with a unique debug location.
+ struct InstrInfoTy {
+ StringRef Filename;
+ unsigned LineNumber;
+
+ InstrInfoTy() : LineNumber(0) {}
+
+ InstrInfoTy(StringRef Filename, unsigned LineNumber)
+ : Filename(Filename), LineNumber(LineNumber) {}
+ };
+ DenseMap<MCSymbol *, InstrInfoTy> InstrInfo;
+
+ // FileNameRegistry - Manages filenames observed while generating debug info
+ // by filtering out duplicates and bookkeeping the offsets in the string
+ // table to be generated.
+ struct FileNameRegistryTy {
+ SmallVector<StringRef, 10> Filenames;
+ struct PerFileInfo {
+ size_t FilenameID, StartOffset;
+ };
+ StringMap<PerFileInfo> Infos;
+
+ // The offset in the string table where we'll write the next unique
+ // filename.
+ size_t LastOffset;
+
+ FileNameRegistryTy() {
+ clear();
+ }
+
+ // Add Filename to the registry, if it was not observed before.
+ void add(StringRef Filename) {
+ if (Infos.count(Filename))
+ return;
+ size_t OldSize = Infos.size();
+ Infos[Filename].FilenameID = OldSize;
+ Infos[Filename].StartOffset = LastOffset;
+ LastOffset += Filename.size() + 1;
+ Filenames.push_back(Filename);
+ }
+
+ void clear() {
+ LastOffset = 1;
+ Infos.clear();
+ Filenames.clear();
+ }
+ } FileNameRegistry;
+
+ typedef std::map<std::pair<StringRef, StringRef>, char *>
+ DirAndFilenameToFilepathMapTy;
+ DirAndFilenameToFilepathMapTy DirAndFilenameToFilepathMap;
+ StringRef getFullFilepath(const MDNode *S);
+
+ void maybeRecordLocation(DebugLoc DL, const MachineFunction *MF);
+
+ void clear() {
+ assert(CurFn == nullptr);
+ FileNameRegistry.clear();
+ InstrInfo.clear();
+ }
+
+ void emitDebugInfoForFunction(const Function *GV);
+
+public:
+ WinCodeViewLineTables(AsmPrinter *Asm);
+
+ ~WinCodeViewLineTables() {
+ for (DirAndFilenameToFilepathMapTy::iterator
+ I = DirAndFilenameToFilepathMap.begin(),
+ E = DirAndFilenameToFilepathMap.end();
+ I != E; ++I)
+ free(I->second);
+ }
+
+ void setSymbolSize(const llvm::MCSymbol *, uint64_t) override {}
+
+ /// \brief Emit the COFF section that holds the line table information.
+ void endModule() override;
+
+ /// \brief Gather pre-function debug information.
+ void beginFunction(const MachineFunction *MF) override;
+
+ /// \brief Gather post-function debug information.
+ void endFunction(const MachineFunction *) override;
+
+ /// \brief Process beginning of an instruction.
+ void beginInstruction(const MachineInstr *MI) override;
+
+ /// \brief Process end of an instruction.
+ void endInstruction() override {}
+};
+} // End of namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp b/contrib/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
new file mode 100644
index 0000000..421946d
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
@@ -0,0 +1,380 @@
+//===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass (at IR level) to replace atomic instructions with
+// appropriate (intrinsic-based) ldrex/strex loops.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "arm-atomic-expand"
+
+namespace {
+ class AtomicExpandLoadLinked : public FunctionPass {
+ const TargetMachine *TM;
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit AtomicExpandLoadLinked(const TargetMachine *TM = nullptr)
+ : FunctionPass(ID), TM(TM) {
+ initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+ bool expandAtomicInsts(Function &F);
+
+ bool expandAtomicLoad(LoadInst *LI);
+ bool expandAtomicStore(StoreInst *LI);
+ bool expandAtomicRMW(AtomicRMWInst *AI);
+ bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
+
+ AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
+ void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
+ };
+}
+
+char AtomicExpandLoadLinked::ID = 0;
+char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID;
+INITIALIZE_TM_PASS(AtomicExpandLoadLinked, "atomic-ll-sc",
+ "Expand Atomic calls in terms of load-linked & store-conditional",
+ false, false)
+
+FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) {
+ return new AtomicExpandLoadLinked(TM);
+}
+
+bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
+ if (!TM || !TM->getSubtargetImpl()->enableAtomicExpandLoadLinked())
+ return false;
+
+ SmallVector<Instruction *, 1> AtomicInsts;
+
+ // Changing control-flow while iterating through it is a bad idea, so gather a
+ // list of all atomic instructions before we start.
+ for (BasicBlock &BB : F)
+ for (Instruction &Inst : BB) {
+ if (isa<AtomicRMWInst>(&Inst) || isa<AtomicCmpXchgInst>(&Inst) ||
+ (isa<LoadInst>(&Inst) && cast<LoadInst>(&Inst)->isAtomic()) ||
+ (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
+ AtomicInsts.push_back(&Inst);
+ }
+
+ bool MadeChange = false;
+ for (Instruction *Inst : AtomicInsts) {
+ if (!TM->getTargetLowering()->shouldExpandAtomicInIR(Inst))
+ continue;
+
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
+ MadeChange |= expandAtomicRMW(AI);
+ else if (AtomicCmpXchgInst *CI = dyn_cast<AtomicCmpXchgInst>(Inst))
+ MadeChange |= expandAtomicCmpXchg(CI);
+ else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ MadeChange |= expandAtomicLoad(LI);
+ else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ MadeChange |= expandAtomicStore(SI);
+ else
+ llvm_unreachable("Unknown atomic instruction");
+ }
+
+ return MadeChange;
+}
+
+bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
+ // Load instructions don't actually need a leading fence, even in the
+ // SequentiallyConsistent case.
+ AtomicOrdering MemOpOrder =
+ TM->getTargetLowering()->getInsertFencesForAtomic() ? Monotonic
+ : LI->getOrdering();
+
+ // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
+ // an ldrexd (A3.5.3).
+ IRBuilder<> Builder(LI);
+ Value *Val = TM->getTargetLowering()->emitLoadLinked(
+ Builder, LI->getPointerOperand(), MemOpOrder);
+
+ insertTrailingFence(Builder, LI->getOrdering());
+
+ LI->replaceAllUsesWith(Val);
+ LI->eraseFromParent();
+
+ return true;
+}
+
+bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
+ // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
+ // we need a loop and the entire instruction is essentially an "atomicrmw
+ // xchg" that ignores the value loaded.
+ IRBuilder<> Builder(SI);
+ AtomicRMWInst *AI =
+ Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
+ SI->getValueOperand(), SI->getOrdering());
+ SI->eraseFromParent();
+
+ // Now we have an appropriate swap instruction, lower it as usual.
+ return expandAtomicRMW(AI);
+}
+
+bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
+ AtomicOrdering Order = AI->getOrdering();
+ Value *Addr = AI->getPointerOperand();
+ BasicBlock *BB = AI->getParent();
+ Function *F = BB->getParent();
+ LLVMContext &Ctx = F->getContext();
+
+ // Given: atomicrmw some_op iN* %addr, iN %incr ordering
+ //
+ // The standard expansion we produce is:
+ // [...]
+ // fence?
+ // atomicrmw.start:
+ // %loaded = @load.linked(%addr)
+ // %new = some_op iN %loaded, %incr
+ // %stored = @store_conditional(%new, %addr)
+ // %try_again = icmp i32 ne %stored, 0
+ // br i1 %try_again, label %loop, label %atomicrmw.end
+ // atomicrmw.end:
+ // fence?
+ // [...]
+ BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
+ BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
+
+ // This grabs the DebugLoc from AI.
+ IRBuilder<> Builder(AI);
+
+ // The split call above "helpfully" added a branch at the end of BB (to the
+ // wrong place), but we might want a fence too. It's easiest to just remove
+ // the branch entirely.
+ std::prev(BB->end())->eraseFromParent();
+ Builder.SetInsertPoint(BB);
+ AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
+ Builder.CreateBr(LoopBB);
+
+ // Start the main loop block now that we've taken care of the preliminaries.
+ Builder.SetInsertPoint(LoopBB);
+ Value *Loaded =
+ TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
+
+ Value *NewVal;
+ switch (AI->getOperation()) {
+ case AtomicRMWInst::Xchg:
+ NewVal = AI->getValOperand();
+ break;
+ case AtomicRMWInst::Add:
+ NewVal = Builder.CreateAdd(Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::Sub:
+ NewVal = Builder.CreateSub(Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::And:
+ NewVal = Builder.CreateAnd(Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::Nand:
+ NewVal = Builder.CreateNot(Builder.CreateAnd(Loaded, AI->getValOperand()),
+ "new");
+ break;
+ case AtomicRMWInst::Or:
+ NewVal = Builder.CreateOr(Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::Xor:
+ NewVal = Builder.CreateXor(Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::Max:
+ NewVal = Builder.CreateICmpSGT(Loaded, AI->getValOperand());
+ NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::Min:
+ NewVal = Builder.CreateICmpSLE(Loaded, AI->getValOperand());
+ NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::UMax:
+ NewVal = Builder.CreateICmpUGT(Loaded, AI->getValOperand());
+ NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
+ break;
+ case AtomicRMWInst::UMin:
+ NewVal = Builder.CreateICmpULE(Loaded, AI->getValOperand());
+ NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
+ break;
+ default:
+ llvm_unreachable("Unknown atomic op");
+ }
+
+ Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
+ Builder, NewVal, Addr, MemOpOrder);
+ Value *TryAgain = Builder.CreateICmpNE(
+ StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
+ Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
+
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+ insertTrailingFence(Builder, Order);
+
+ AI->replaceAllUsesWith(Loaded);
+ AI->eraseFromParent();
+
+ return true;
+}
+
+bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+ AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
+ AtomicOrdering FailureOrder = CI->getFailureOrdering();
+ Value *Addr = CI->getPointerOperand();
+ BasicBlock *BB = CI->getParent();
+ Function *F = BB->getParent();
+ LLVMContext &Ctx = F->getContext();
+
+ // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
+ //
+ // The full expansion we produce is:
+ // [...]
+ // fence?
+ // cmpxchg.start:
+ // %loaded = @load.linked(%addr)
+ // %should_store = icmp eq %loaded, %desired
+ // br i1 %should_store, label %cmpxchg.trystore,
+ // label %cmpxchg.failure
+ // cmpxchg.trystore:
+ // %stored = @store_conditional(%new, %addr)
+ // %success = icmp eq i32 %stored, 0
+ // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
+ // cmpxchg.success:
+ // fence?
+ // br label %cmpxchg.end
+ // cmpxchg.failure:
+ // fence?
+ // br label %cmpxchg.end
+ // cmpxchg.end:
+ // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
+ // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
+ // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
+ // [...]
+ BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
+ auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
+ auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
+ auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
+ auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
+
+ // This grabs the DebugLoc from CI
+ IRBuilder<> Builder(CI);
+
+ // The split call above "helpfully" added a branch at the end of BB (to the
+ // wrong place), but we might want a fence too. It's easiest to just remove
+ // the branch entirely.
+ std::prev(BB->end())->eraseFromParent();
+ Builder.SetInsertPoint(BB);
+ AtomicOrdering MemOpOrder = insertLeadingFence(Builder, SuccessOrder);
+ Builder.CreateBr(LoopBB);
+
+ // Start the main loop block now that we've taken care of the preliminaries.
+ Builder.SetInsertPoint(LoopBB);
+ Value *Loaded =
+ TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
+ Value *ShouldStore =
+ Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
+
+ // If the the cmpxchg doesn't actually need any ordering when it fails, we can
+ // jump straight past that fence instruction (if it exists).
+ Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
+
+ Builder.SetInsertPoint(TryStoreBB);
+ Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
+ Builder, CI->getNewValOperand(), Addr, MemOpOrder);
+ StoreSuccess = Builder.CreateICmpEQ(
+ StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
+ Builder.CreateCondBr(StoreSuccess, SuccessBB,
+ CI->isWeak() ? FailureBB : LoopBB);
+
+ // Make sure later instructions don't get reordered with a fence if necessary.
+ Builder.SetInsertPoint(SuccessBB);
+ insertTrailingFence(Builder, SuccessOrder);
+ Builder.CreateBr(ExitBB);
+
+ Builder.SetInsertPoint(FailureBB);
+ insertTrailingFence(Builder, FailureOrder);
+ Builder.CreateBr(ExitBB);
+
+ // Finally, we have control-flow based knowledge of whether the cmpxchg
+ // succeeded or not. We expose this to later passes by converting any
+ // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
+
+ // Setup the builder so we can create any PHIs we need.
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+ PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
+ Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
+ Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
+
+ // Look for any users of the cmpxchg that are just comparing the loaded value
+ // against the desired one, and replace them with the CFG-derived version.
+ SmallVector<ExtractValueInst *, 2> PrunedInsts;
+ for (auto User : CI->users()) {
+ ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
+ if (!EV)
+ continue;
+
+ assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
+ "weird extraction from { iN, i1 }");
+
+ if (EV->getIndices()[0] == 0)
+ EV->replaceAllUsesWith(Loaded);
+ else
+ EV->replaceAllUsesWith(Success);
+
+ PrunedInsts.push_back(EV);
+ }
+
+ // We can remove the instructions now we're no longer iterating through them.
+ for (auto EV : PrunedInsts)
+ EV->eraseFromParent();
+
+ if (!CI->use_empty()) {
+ // Some use of the full struct return that we don't understand has happened,
+ // so we've got to reconstruct it properly.
+ Value *Res;
+ Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
+ Res = Builder.CreateInsertValue(Res, Success, 1);
+
+ CI->replaceAllUsesWith(Res);
+ }
+
+ CI->eraseFromParent();
+ return true;
+}
+
+AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord) {
+ if (!TM->getTargetLowering()->getInsertFencesForAtomic())
+ return Ord;
+
+ if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
+ Builder.CreateFence(Release);
+
+ // The exclusive operations don't need any barrier if we're adding separate
+ // fences.
+ return Monotonic;
+}
+
+void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord) {
+ if (!TM->getTargetLowering()->getInsertFencesForAtomic())
+ return;
+
+ if (Ord == Acquire || Ord == AcquireRelease)
+ Builder.CreateFence(Acquire);
+ else if (Ord == SequentiallyConsistent)
+ Builder.CreateFence(SequentiallyConsistent);
+}
diff --git a/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp b/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
index 24aa1ab..b2737bf 100644
--- a/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
@@ -15,27 +15,37 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "basictti"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <utility>
-
using namespace llvm;
+static cl::opt<unsigned>
+PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
+ cl::desc("Threshold for partial unrolling"), cl::Hidden);
+
+#define DEBUG_TYPE "basictti"
+
namespace {
-class BasicTTI : public ImmutablePass, public TargetTransformInfo {
+class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
const TargetMachine *TM;
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the result needs to be inserted and/or extracted from vectors.
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+ /// Estimate the cost overhead of SK_Alternate shuffle.
+ unsigned getAltShuffleOverhead(Type *Ty) const;
+
const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); }
public:
- BasicTTI() : ImmutablePass(ID), TM(0) {
+ BasicTTI() : ImmutablePass(ID), TM(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
@@ -43,15 +53,11 @@ public:
initializeBasicTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ void initializePass() override {
pushTTIStack(this);
}
- virtual void finalizePass() {
- popTTIStack();
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
@@ -59,61 +65,61 @@ public:
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
}
- virtual bool hasBranchDivergence() const;
+ bool hasBranchDivergence() const override;
/// \name Scalar TTI Implementations
/// @{
- virtual bool isLegalAddImmediate(int64_t imm) const;
- virtual bool isLegalICmpImmediate(int64_t imm) const;
- virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
- int64_t BaseOffset, bool HasBaseReg,
- int64_t Scale) const;
- virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
- int64_t BaseOffset, bool HasBaseReg,
- int64_t Scale) const;
- virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
- virtual bool isTypeLegal(Type *Ty) const;
- virtual unsigned getJumpBufAlignment() const;
- virtual unsigned getJumpBufSize() const;
- virtual bool shouldBuildLookupTables() const;
- virtual bool haveFastSqrt(Type *Ty) const;
- virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
+ bool isLegalAddImmediate(int64_t imm) const override;
+ bool isLegalICmpImmediate(int64_t imm) const override;
+ bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
+ int64_t BaseOffset, bool HasBaseReg,
+ int64_t Scale) const override;
+ int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
+ int64_t BaseOffset, bool HasBaseReg,
+ int64_t Scale) const override;
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTypeLegal(Type *Ty) const override;
+ unsigned getJumpBufAlignment() const override;
+ unsigned getJumpBufSize() const override;
+ bool shouldBuildLookupTables() const override;
+ bool haveFastSqrt(Type *Ty) const override;
+ void getUnrollingPreferences(Loop *L,
+ UnrollingPreferences &UP) const override;
/// @}
/// \name Vector TTI Implementations
/// @{
- virtual unsigned getNumberOfRegisters(bool Vector) const;
- virtual unsigned getMaximumUnrollFactor() const;
- virtual unsigned getRegisterBitWidth(bool Vector) const;
- virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
- OperandValueKind,
- OperandValueKind) const;
- virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const;
- virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const;
- virtual unsigned getCFInstrCost(unsigned Opcode) const;
- virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const;
- virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const;
- virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
- unsigned Alignment,
- unsigned AddressSpace) const;
- virtual unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
- ArrayRef<Type*> Tys) const;
- virtual unsigned getNumberOfParts(Type *Tp) const;
- virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const;
- virtual unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) const;
+ unsigned getNumberOfRegisters(bool Vector) const override;
+ unsigned getMaximumUnrollFactor() const override;
+ unsigned getRegisterBitWidth(bool Vector) const override;
+ unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
+ OperandValueKind) const override;
+ unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
+ int Index, Type *SubTp) const override;
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const override;
+ unsigned getCFInstrCost(unsigned Opcode) const override;
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const override;
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const override;
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const override;
+ unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
+ ArrayRef<Type*> Tys) const override;
+ unsigned getNumberOfParts(Type *Tp) const override;
+ unsigned getAddressComputationCost( Type *Ty, bool IsComplex) const override;
+ unsigned getReductionCost(unsigned Opcode, Type *Ty,
+ bool IsPairwise) const override;
/// @}
};
@@ -191,7 +197,61 @@ bool BasicTTI::haveFastSqrt(Type *Ty) const {
return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
}
-void BasicTTI::getUnrollingPreferences(Loop *, UnrollingPreferences &) const { }
+void BasicTTI::getUnrollingPreferences(Loop *L,
+ UnrollingPreferences &UP) const {
+ // This unrolling functionality is target independent, but to provide some
+ // motivation for its intended use, for x86:
+
+ // According to the Intel 64 and IA-32 Architectures Optimization Reference
+ // Manual, Intel Core models and later have a loop stream detector
+ // (and associated uop queue) that can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have no more than 4 (8 for Nehalem and later) branches
+ // taken, and none of them may be calls.
+ // - The loop can have no more than 18 (28 for Nehalem and later) uops.
+
+ // According to the Software Optimization Guide for AMD Family 15h Processors,
+ // models 30h-4fh (Steamroller and later) have a loop predictor and loop
+ // buffer which can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have fewer than 16 branches
+ // - The loop must have less than 40 uops in all executed loop branches
+
+ // The number of taken branches in a loop is hard to estimate here, and
+ // benchmarking has revealed that it is better not to be conservative when
+ // estimating the branch count. As a result, we'll ignore the branch limits
+ // until someone finds a case where it matters in practice.
+
+ unsigned MaxOps;
+ const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
+ if (PartialUnrollingThreshold.getNumOccurrences() > 0)
+ MaxOps = PartialUnrollingThreshold;
+ else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0)
+ MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize;
+ else
+ return;
+
+ // Scan the loop: don't unroll loops with calls.
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
+ I != E; ++I) {
+ BasicBlock *BB = *I;
+
+ for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
+ if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
+ ImmutableCallSite CS(J);
+ if (const Function *F = CS.getCalledFunction()) {
+ if (!TopTTI->isLoweredToCall(F))
+ continue;
+ }
+
+ return;
+ }
+ }
+
+ // Enable runtime and partial unrolling up to the specified size.
+ UP.Partial = UP.Runtime = true;
+ UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
+}
//===----------------------------------------------------------------------===//
//
@@ -270,8 +330,28 @@ unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
return OpCost;
}
+unsigned BasicTTI::getAltShuffleOverhead(Type *Ty) const {
+ assert(Ty->isVectorTy() && "Can only shuffle vectors");
+ unsigned Cost = 0;
+ // Shuffle cost is equal to the cost of extracting element from its argument
+ // plus the cost of inserting them onto the result vector.
+
+ // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from index
+ // 0 of first vector, index 1 of second vector,index 2 of first vector and
+ // finally index 3 of second vector and insert them at index <0,1,2,3> of
+ // result vector.
+ for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ }
+ return Cost;
+}
+
unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) const {
+ if (Kind == SK_Alternate) {
+ return getAltShuffleOverhead(Tp);
+ }
return 1;
}
@@ -302,7 +382,8 @@ unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
return 0;
// If the cast is marked as legal (or promote) then assume low cost.
- if (TLI->isOperationLegalOrPromote(ISD, DstLT.second))
+ if (SrcLT.first == DstLT.first &&
+ TLI->isOperationLegalOrPromote(ISD, DstLT.second))
return 1;
// Handle scalar conversions.
@@ -409,7 +490,9 @@ unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) const {
- return 1;
+ std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Val->getScalarType());
+
+ return LT.first;
}
unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
@@ -418,8 +501,32 @@ unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
assert(!Src->isVoidTy() && "Invalid type");
std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
- // Assume that all loads of legal types cost 1.
- return LT.first;
+ // Assuming that all loads of legal types cost 1.
+ unsigned Cost = LT.first;
+
+ if (Src->isVectorTy() &&
+ Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
+ // This is a vector load that legalizes to a larger type than the vector
+ // itself. Unless the corresponding extending load or truncating store is
+ // legal, then this will scalarize.
+ TargetLowering::LegalizeAction LA = TargetLowering::Expand;
+ EVT MemVT = getTLI()->getValueType(Src, true);
+ if (MemVT.isSimple() && MemVT != MVT::Other) {
+ if (Opcode == Instruction::Store)
+ LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
+ else
+ LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT.getSimpleVT());
+ }
+
+ if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
+ // This is a vector load/store for some illegal type that is scalarized.
+ // We must account for the cost of building or decomposing the vector.
+ Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
+ Opcode == Instruction::Store);
+ }
+ }
+
+ return Cost;
}
unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
@@ -464,7 +571,7 @@ unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
case Intrinsic::round: ISD = ISD::FROUND; break;
case Intrinsic::pow: ISD = ISD::FPOW; break;
case Intrinsic::fma: ISD = ISD::FMA; break;
- case Intrinsic::fmuladd: ISD = ISD::FMA; break; // FIXME: mul + add?
+ case Intrinsic::fmuladd: ISD = ISD::FMA; break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return 0;
@@ -489,6 +596,12 @@ unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
return LT.first * 2;
}
+ // If we can't lower fmuladd into an FMA estimate the cost as a floating
+ // point mul followed by an add.
+ if (IID == Intrinsic::fmuladd)
+ return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
+ TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
+
// Else, assume that we need to scalarize this intrinsic. For math builtins
// this will emit a costly libcall, adding call overhead and spills. Make it
// very expensive.
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
index 9cd4208..7503e57 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
@@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "branchfolding"
#include "BranchFolding.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
@@ -38,6 +37,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "branchfolding"
+
STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
STATISTIC(NumBranchOpts, "Number of branches optimized");
STATISTIC(NumTailMerge , "Number of block tails merged");
@@ -66,9 +67,9 @@ namespace {
static char ID;
explicit BranchFolderPass(): MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -82,8 +83,15 @@ INITIALIZE_PASS(BranchFolderPass, "branch-folder",
"Control Flow Optimizer", false, false)
bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
- BranchFolder Folder(PassConfig->getEnableTailMerge(), /*CommonHoist=*/true);
+ // TailMerge can create jump into if branches that make CFG irreducible for
+ // HW that requires structurized CFG.
+ bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
+ PassConfig->getEnableTailMerge();
+ BranchFolder Folder(EnableTailMerge, /*CommonHoist=*/true);
return Folder.OptimizeFunction(MF,
MF.getTarget().getInstrInfo(),
MF.getTarget().getRegisterInfo(),
@@ -182,7 +190,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
TII = tii;
TRI = tri;
MMI = mmi;
- RS = NULL;
+ RS = nullptr;
// Use a RegScavenger to help update liveness when required.
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -194,7 +202,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
// Fix CFG. The later algorithms expect it to be right.
bool MadeChange = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; I++) {
- MachineBasicBlock *MBB = I, *TBB = 0, *FBB = 0;
+ MachineBasicBlock *MBB = I, *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true))
MadeChange |= MBB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
@@ -213,7 +221,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
// See if any jump tables have become dead as the code generator
// did its thing.
MachineJumpTableInfo *JTI = MF.getJumpTableInfo();
- if (JTI == 0) {
+ if (!JTI) {
delete RS;
return MadeChange;
}
@@ -379,7 +387,7 @@ void BranchFolder::MaintainLiveIns(MachineBasicBlock *CurMBB,
if (RS) {
RS->enterBasicBlock(CurMBB);
if (!CurMBB->empty())
- RS->forward(prior(CurMBB->end()));
+ RS->forward(std::prev(CurMBB->end()));
BitVector RegsLiveAtExit(TRI->getNumRegs());
RS->getRegsUsed(RegsLiveAtExit, false);
for (unsigned int i = 0, e = TRI->getNumRegs(); i != e; i++)
@@ -409,7 +417,7 @@ MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
MachineBasicBlock::iterator BBI1,
const BasicBlock *BB) {
if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
- return 0;
+ return nullptr;
MachineFunction &MF = *CurMBB.getParent();
@@ -458,8 +466,8 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
const TargetInstrInfo *TII) {
MachineFunction *MF = CurMBB->getParent();
- MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB));
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB));
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
DebugLoc dl; // FIXME: this is nowhere
if (I != MF->end() &&
@@ -468,12 +476,12 @@ static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
if (TBB == NextBB && !Cond.empty() && !FBB) {
if (!TII->ReverseBranchCondition(Cond)) {
TII->RemoveBranch(*CurMBB);
- TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond, dl);
+ TII->InsertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
return;
}
}
}
- TII->InsertBranch(*CurMBB, SuccBB, NULL,
+ TII->InsertBranch(*CurMBB, SuccBB, nullptr,
SmallVector<MachineOperand, 0>(), dl);
}
@@ -596,12 +604,11 @@ unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
unsigned maxCommonTailLength = 0U;
SameTails.clear();
MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
- MPIterator HighestMPIter = prior(MergePotentials.end());
- for (MPIterator CurMPIter = prior(MergePotentials.end()),
+ MPIterator HighestMPIter = std::prev(MergePotentials.end());
+ for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
B = MergePotentials.begin();
- CurMPIter != B && CurMPIter->getHash() == CurHash;
- --CurMPIter) {
- for (MPIterator I = prior(CurMPIter); I->getHash() == CurHash ; --I) {
+ CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
+ for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
unsigned CommonTailLen;
if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
minCommonTailLength,
@@ -630,9 +637,9 @@ void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
MachineBasicBlock *SuccBB,
MachineBasicBlock *PredBB) {
MPIterator CurMPIter, B;
- for (CurMPIter = prior(MergePotentials.end()), B = MergePotentials.begin();
- CurMPIter->getHash() == CurHash;
- --CurMPIter) {
+ for (CurMPIter = std::prev(MergePotentials.end()),
+ B = MergePotentials.begin();
+ CurMPIter->getHash() == CurHash; --CurMPIter) {
// Put the unconditional branch back, if we need one.
MachineBasicBlock *CurMBB = CurMPIter->getBlock();
if (SuccBB && CurMBB != PredBB)
@@ -843,7 +850,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// See if we can do any tail merging on those.
if (MergePotentials.size() >= 2)
- MadeChange |= TryTailMergeBlocks(NULL, NULL);
+ MadeChange |= TryTailMergeBlocks(nullptr, nullptr);
// Look at blocks (IBB) with multiple predecessors (PBB).
// We change each predecessor to a canonical form, by
@@ -864,12 +871,12 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// a compile-time infinite loop repeatedly doing and undoing the same
// transformations.)
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
+ for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
I != E; ++I) {
if (I->pred_size() < 2) continue;
SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
MachineBasicBlock *IBB = I;
- MachineBasicBlock *PredBB = prior(I);
+ MachineBasicBlock *PredBB = std::prev(I);
MergePotentials.clear();
for (MachineBasicBlock::pred_iterator P = I->pred_begin(),
E2 = I->pred_end();
@@ -890,7 +897,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
if (PBB->getLandingPadSuccessor())
continue;
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
// Failing case: IBB is the target of a cbr, and we cannot reverse the
@@ -901,7 +908,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
continue;
// This is the QBB case described above
if (!FBB)
- FBB = llvm::next(MachineFunction::iterator(PBB));
+ FBB = std::next(MachineFunction::iterator(PBB));
}
// Failing case: the only way IBB can be reached from PBB is via
@@ -909,10 +916,10 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// a bit in the edge so we didn't have to do all this.
if (IBB->isLandingPad()) {
MachineFunction::iterator IP = PBB; IP++;
- MachineBasicBlock *PredNextBB = NULL;
+ MachineBasicBlock *PredNextBB = nullptr;
if (IP != MF.end())
PredNextBB = IP;
- if (TBB == NULL) {
+ if (!TBB) {
if (IBB != PredNextBB) // fallthrough
continue;
} else if (FBB) {
@@ -933,7 +940,8 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
TII->RemoveBranch(*PBB);
if (!Cond.empty())
// reinsert conditional branch only, for now
- TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
+ TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
+ NewCond, dl);
}
MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
@@ -951,7 +959,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// Reinsert an unconditional branch if needed. The 1 below can occur as a
// result of removing blocks in TryTailMergeBlocks.
- PredBB = prior(I); // this may have been changed in TryTailMergeBlocks
+ PredBB = std::prev(I); // this may have been changed in TryTailMergeBlocks
if (MergePotentials.size() == 1 &&
MergePotentials.begin()->getBlock() != PredBB)
FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
@@ -970,7 +978,7 @@ bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
// Make sure blocks are numbered in order
MF.RenumberBlocks();
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
+ for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
I != E; ) {
MachineBasicBlock *MBB = I++;
MadeChange |= OptimizeBlock(MBB);
@@ -1091,9 +1099,9 @@ ReoptimizeBlock:
// Check to see if we can simplify the terminator of the block before this
// one.
- MachineBasicBlock &PrevBB = *prior(MachineFunction::iterator(MBB));
+ MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
- MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
+ MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
SmallVector<MachineOperand, 4> PriorCond;
bool PriorUnAnalyzable =
TII->AnalyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
@@ -1110,7 +1118,7 @@ ReoptimizeBlock:
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
+ TII->InsertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1154,7 +1162,7 @@ ReoptimizeBlock:
// If the previous branch *only* branches to *this* block (conditional or
// not) remove the branch.
- if (PriorTBB == MBB && PriorFBB == 0) {
+ if (PriorTBB == MBB && !PriorFBB) {
TII->RemoveBranch(PrevBB);
MadeChange = true;
++NumBranchOpts;
@@ -1166,7 +1174,7 @@ ReoptimizeBlock:
if (PriorFBB == MBB) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
+ TII->InsertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1180,7 +1188,7 @@ ReoptimizeBlock:
if (!TII->ReverseBranchCondition(NewPriorCond)) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond, dl);
+ TII->InsertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1195,7 +1203,7 @@ ReoptimizeBlock:
// We consider it more likely that execution will stay in the function (e.g.
// due to loops) than it is to exit it. This asserts in loops etc, moving
// the assert condition out of the loop body.
- if (MBB->succ_empty() && !PriorCond.empty() && PriorFBB == 0 &&
+ if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
MachineFunction::iterator(PriorTBB) == FallThrough &&
!MBB->canFallThrough()) {
bool DoTransform = true;
@@ -1218,7 +1226,7 @@ ReoptimizeBlock:
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond, dl);
+ TII->InsertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
// Move this block to the end of the function.
MBB->moveAfter(--MF.end());
@@ -1231,7 +1239,7 @@ ReoptimizeBlock:
}
// Analyze the branch in the current block.
- MachineBasicBlock *CurTBB = 0, *CurFBB = 0;
+ MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
SmallVector<MachineOperand, 4> CurCond;
bool CurUnAnalyzable= TII->AnalyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
if (!CurUnAnalyzable) {
@@ -1257,7 +1265,7 @@ ReoptimizeBlock:
// If this branch is the only thing in its block, see if we can forward
// other blocks across it.
- if (CurTBB && CurCond.empty() && CurFBB == 0 &&
+ if (CurTBB && CurCond.empty() && !CurFBB &&
IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
!MBB->hasAddressTaken()) {
DebugLoc dl = getBranchDebugLoc(*MBB);
@@ -1295,12 +1303,12 @@ ReoptimizeBlock:
// explicit branch to us to make updates simpler.
if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
PriorTBB != MBB && PriorFBB != MBB) {
- if (PriorTBB == 0) {
- assert(PriorCond.empty() && PriorFBB == 0 &&
+ if (!PriorTBB) {
+ assert(PriorCond.empty() && !PriorFBB &&
"Bad branch analysis");
PriorTBB = MBB;
} else {
- assert(PriorFBB == 0 && "Machine CFG out of date!");
+ assert(!PriorFBB && "Machine CFG out of date!");
PriorFBB = MBB;
}
DebugLoc pdl = getBranchDebugLoc(PrevBB);
@@ -1324,7 +1332,7 @@ ReoptimizeBlock:
// If this change resulted in PMBB ending in a conditional
// branch where both conditions go to the same destination,
// change this to an unconditional branch (and fix the CFG).
- MachineBasicBlock *NewCurTBB = 0, *NewCurFBB = 0;
+ MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
SmallVector<MachineOperand, 4> NewCurCond;
bool NewCurUnAnalyzable = TII->AnalyzeBranch(*PMBB, NewCurTBB,
NewCurFBB, NewCurCond, true);
@@ -1332,10 +1340,10 @@ ReoptimizeBlock:
DebugLoc pdl = getBranchDebugLoc(*PMBB);
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
- TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, pdl);
+ TII->InsertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
MadeChange = true;
++NumBranchOpts;
- PMBB->CorrectExtraCFGEdges(NewCurTBB, 0, false);
+ PMBB->CorrectExtraCFGEdges(NewCurTBB, nullptr, false);
}
}
}
@@ -1352,7 +1360,7 @@ ReoptimizeBlock:
}
// Add the branch back if the block is more than just an uncond branch.
- TII->InsertBranch(*MBB, CurTBB, 0, CurCond, dl);
+ TII->InsertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
}
}
@@ -1373,7 +1381,7 @@ ReoptimizeBlock:
// Analyze the branch at the end of the pred.
MachineBasicBlock *PredBB = *PI;
MachineFunction::iterator PredFallthrough = PredBB; ++PredFallthrough;
- MachineBasicBlock *PredTBB = 0, *PredFBB = 0;
+ MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (PredBB != MBB && !PredBB->canFallThrough() &&
!TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true)
@@ -1390,9 +1398,10 @@ ReoptimizeBlock:
// B elsewhere
// next:
if (CurFallsThru) {
- MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
+ MachineBasicBlock *NextBB =
+ std::next(MachineFunction::iterator(MBB));
CurCond.clear();
- TII->InsertBranch(*MBB, NextBB, 0, CurCond, DebugLoc());
+ TII->InsertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
}
MBB->moveAfter(PredBB);
MadeChange = true;
@@ -1425,7 +1434,7 @@ ReoptimizeBlock:
// Okay, there is no really great place to put this block. If, however,
// the block before this one would be a fall-through if this block were
// removed, move this block to the end of the function.
- MachineBasicBlock *PrevTBB = 0, *PrevFBB = 0;
+ MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
SmallVector<MachineOperand, 4> PrevCond;
if (FallThrough != MF.end() &&
!TII->AnalyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
@@ -1466,7 +1475,7 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
if (SuccBB != TrueBB)
return SuccBB;
}
- return NULL;
+ return nullptr;
}
/// findHoistingInsertPosAndDeps - Find the location to move common instructions
@@ -1496,10 +1505,17 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
if (MO.isUse()) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
- } else if (!MO.isDead())
- // Don't try to hoist code in the rare case the terminator defines a
- // register that is later used.
- return MBB->end();
+ } else {
+ if (!MO.isDead())
+ // Don't try to hoist code in the rare case the terminator defines a
+ // register that is later used.
+ return MBB->end();
+
+ // If the terminator defines a register, make sure we don't hoist
+ // the instruction whose def might be clobbered by the terminator.
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ Defs.insert(*AI);
+ }
}
if (Uses.empty())
@@ -1511,7 +1527,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
// branch from condition setting instruction.
MachineBasicBlock::iterator PI = Loc;
--PI;
- while (PI != MBB->begin() && Loc->isDebugValue())
+ while (PI != MBB->begin() && PI->isDebugValue())
--PI;
bool IsDef = false;
@@ -1540,7 +1556,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
// Also avoid moving code above predicated instruction since it's hard to
// reason about register liveness with predicated instruction.
bool DontMoveAcrossStore = true;
- if (!PI->isSafeToMove(TII, 0, DontMoveAcrossStore) ||
+ if (!PI->isSafeToMove(TII, nullptr, DontMoveAcrossStore) ||
TII->isPredicated(PI))
return MBB->end();
@@ -1574,7 +1590,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
/// sequence at the start of the function, move the instructions before MBB
/// terminator if it's legal.
bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
return false;
@@ -1679,7 +1695,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
break;
bool DontMoveAcrossStore = true;
- if (!TIB->isSafeToMove(TII, 0, DontMoveAcrossStore))
+ if (!TIB->isSafeToMove(TII, nullptr, DontMoveAcrossStore))
break;
// Remove kills from LocalDefsSet, these registers had short live ranges.
diff --git a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
index 4925c4d..bc033f9 100644
--- a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "calcspillweights"
-
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
@@ -22,6 +20,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "calcspillweights"
+
void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
MachineFunction &MF,
const MachineLoopInfo &MLI,
@@ -96,8 +96,8 @@ void
VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
MachineRegisterInfo &mri = MF.getRegInfo();
const TargetRegisterInfo &tri = *MF.getTarget().getRegisterInfo();
- MachineBasicBlock *mbb = 0;
- MachineLoop *loop = 0;
+ MachineBasicBlock *mbb = nullptr;
+ MachineLoop *loop = nullptr;
bool isExiting = false;
float totalWeight = 0;
SmallPtrSet<MachineInstr*, 8> visited;
@@ -112,8 +112,10 @@ VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
// Don't recompute spill weight for an unspillable register.
bool Spillable = li.isSpillable();
- for (MachineRegisterInfo::reg_iterator I = mri.reg_begin(li.reg);
- MachineInstr *mi = I.skipInstruction();) {
+ for (MachineRegisterInfo::reg_instr_iterator
+ I = mri.reg_instr_begin(li.reg), E = mri.reg_instr_end();
+ I != E; ) {
+ MachineInstr *mi = &*(I++);
if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
continue;
if (!visited.insert(mi))
@@ -130,9 +132,9 @@ VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
// Calculate instr weight.
bool reads, writes;
- tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
+ std::tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
weight = LiveIntervals::getSpillWeight(
- writes, reads, MBFI.getBlockFreq(mi->getParent()));
+ writes, reads, &MBFI, mi);
// Give extra weight to what looks like a loop induction variable update.
if (writes && isExiting && LIS.isLiveOutOfMBB(li, mbb))
@@ -147,7 +149,11 @@ VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
unsigned hint = copyHint(mi, li.reg, tri, mri);
if (!hint)
continue;
- float hweight = Hint[hint] += weight;
+ // Force hweight onto the stack so that x86 doesn't add hidden precision,
+ // making the comparison incorrectly pass (i.e., 1 > 1 == true??).
+ //
+ // FIXME: we probably shouldn't use floats at all.
+ volatile float hweight = Hint[hint] += weight;
if (TargetRegisterInfo::isPhysicalRegister(hint)) {
if (hweight > bestPhys && mri.isAllocatable(hint))
bestPhys = hweight, hintPhys = hint;
diff --git a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
index fcfc9dc..add861a 100644
--- a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -76,7 +76,7 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
dbgs() << "Formal argument #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -108,7 +108,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
dbgs() << "Return operand #" << i << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -126,7 +126,7 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
dbgs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -145,7 +145,7 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
dbgs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -162,7 +162,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
dbgs() << "Call result #" << i << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -175,6 +175,6 @@ void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
dbgs() << "Call result has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
diff --git a/contrib/llvm/lib/CodeGen/CodeGen.cpp b/contrib/llvm/lib/CodeGen/CodeGen.cpp
index 7430c53..b3beac3 100644
--- a/contrib/llvm/lib/CodeGen/CodeGen.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGen.cpp
@@ -13,15 +13,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/InitializePasses.h"
-#include "llvm/PassRegistry.h"
#include "llvm-c/Initialization.h"
+#include "llvm/PassRegistry.h"
using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) {
+ initializeAtomicExpandLoadLinkedPass(Registry);
initializeBasicTTIPass(Registry);
initializeBranchFolderPassPass(Registry);
+ initializeCodeGenPreparePass(Registry);
initializeDeadMachineInstructionElimPass(Registry);
initializeEarlyIfConverterPass(Registry);
initializeExpandPostRAPass(Registry);
@@ -51,6 +53,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeOptimizePHIsPass(Registry);
initializePHIEliminationPass(Registry);
initializePeepholeOptimizerPass(Registry);
+ initializePostMachineSchedulerPass(Registry);
initializePostRASchedulerPass(Registry);
initializeProcessImplicitDefsPass(Registry);
initializePEIPass(Registry);
@@ -69,6 +72,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeVirtRegRewriterPass(Registry);
initializeLowerIntrinsicsPass(Registry);
initializeMachineFunctionPrinterPassPass(Registry);
+ initializeStackMapLivenessPass(Registry);
}
void LLVMInitializeCodeGen(LLVMPassRegistryRef R) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 007e9b7..d5039b2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -13,34 +13,32 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "codegenprepare"
-#include "llvm/Transforms/Scalar.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/Analysis/DominatorInternals.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
@@ -48,6 +46,8 @@
using namespace llvm;
using namespace llvm::PatternMatch;
+#define DEBUG_TYPE "codegenprepare"
+
STATISTIC(NumBlocksElim, "Number of blocks eliminated");
STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
@@ -62,6 +62,7 @@ STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
STATISTIC(NumRetsDup, "Number of return instructions duplicated");
STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
+STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches");
static cl::opt<bool> DisableBranchOpts(
"disable-cgp-branch-opts", cl::Hidden, cl::init(false),
@@ -71,7 +72,18 @@ static cl::opt<bool> DisableSelectToBranch(
"disable-cgp-select2branch", cl::Hidden, cl::init(false),
cl::desc("Disable select to branch conversion."));
+static cl::opt<bool> AddrSinkUsingGEPs(
+ "addr-sink-using-gep", cl::Hidden, cl::init(false),
+ cl::desc("Address sinking in CGP using GEPs."));
+
+static cl::opt<bool> EnableAndCmpSinking(
+ "enable-andcmp-sinking", cl::Hidden, cl::init(true),
+ cl::desc("Enable sinkinig and/cmp into branches."));
+
namespace {
+typedef SmallPtrSet<Instruction *, 16> SetOfInstrs;
+typedef DenseMap<Instruction *, Type *> InstrToOrigTy;
+
class CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// transformation profitability.
@@ -90,6 +102,12 @@ namespace {
/// multiple load/stores of the same address.
ValueMap<Value*, Value*> SunkAddrs;
+ /// Keeps track of all truncates inserted for the current function.
+ SetOfInstrs InsertedTruncsSet;
+ /// Keeps track of the type of the related instruction before their
+ /// promotion for the current function.
+ InstrToOrigTy PromotedInsts;
+
/// ModifiedDT - If CFG is modified in anyway, dominator tree may need to
/// be updated.
bool ModifiedDT;
@@ -99,16 +117,16 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- explicit CodeGenPrepare(const TargetMachine *TM = 0)
- : FunctionPass(ID), TM(TM), TLI(0) {
+ explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
+ : FunctionPass(ID), TM(TM), TLI(nullptr) {
initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- const char *getPassName() const { return "CodeGen Prepare"; }
+ const char *getPassName() const override { return "CodeGen Prepare"; }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfo>();
}
@@ -125,29 +143,36 @@ namespace {
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
bool OptimizeSelectInst(SelectInst *SI);
+ bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI);
bool DupRetToEnableTailCallOpts(BasicBlock *BB);
bool PlaceDbgValues(Function &F);
+ bool sinkAndCmp(Function &F);
};
}
char CodeGenPrepare::ID = 0;
-INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
- "Optimize for code generation", false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
-INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",
- "Optimize for code generation", false, false)
+INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare",
+ "Optimize for code generation", false, false)
FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
return new CodeGenPrepare(TM);
}
bool CodeGenPrepare::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
bool EverMadeChange = false;
+ // Clear per function information.
+ InsertedTruncsSet.clear();
+ PromotedInsts.clear();
ModifiedDT = false;
if (TM) TLI = TM->getTargetLowering();
TLInfo = &getAnalysis<TargetLibraryInfo>();
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize);
@@ -169,6 +194,13 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
// find a node corresponding to the value.
EverMadeChange |= PlaceDbgValues(F);
+ // If there is a mask, compare against zero, and branch that can be combined
+ // into a single target instruction, push the mask and compare into branch
+ // users. Do this before OptimizeBlock -> OptimizeInst ->
+ // OptimizeCmpExpression, which perturbs the pattern being searched for.
+ if (!DisableBranchOpts)
+ EverMadeChange |= sinkAndCmp(F);
+
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
@@ -221,7 +253,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
}
if (ModifiedDT && DT)
- DT->DT->recalculate(F);
+ DT->recalculate(F);
return EverMadeChange;
}
@@ -232,7 +264,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
bool CodeGenPrepare::EliminateFallThrough(Function &F) {
bool Changed = false;
// Scan all of the blocks in the function, except for the entry block.
- for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
+ for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
BasicBlock *BB = I++;
// If the destination block has a single pred, then this is a trivial
// edge, just collapse it.
@@ -268,7 +300,7 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) {
bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
bool MadeChange = false;
// Note that this intentionally skips the entry block.
- for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
+ for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
BasicBlock *BB = I++;
// If this block doesn't end with an uncond branch, ignore it.
@@ -314,16 +346,15 @@ bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
// don't mess around with them.
BasicBlock::const_iterator BBI = BB->begin();
while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
- for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
- UI != E; ++UI) {
- const Instruction *User = cast<Instruction>(*UI);
- if (User->getParent() != DestBB || !isa<PHINode>(User))
+ for (const User *U : PN->users()) {
+ const Instruction *UI = cast<Instruction>(U);
+ if (UI->getParent() != DestBB || !isa<PHINode>(UI))
return false;
// If User is inside DestBB block and it is a PHINode then check
// incoming value. If incoming value is not from BB then this is
// a complex condition (e.g. preheaders) we want to avoid here.
- if (User->getParent() == DestBB) {
- if (const PHINode *UPN = dyn_cast<PHINode>(User))
+ if (UI->getParent() == DestBB) {
+ if (const PHINode *UPN = dyn_cast<PHINode>(UI))
for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
if (Insn && Insn->getParent() == BB &&
@@ -444,47 +475,15 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
}
-/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
-/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
-/// sink it into user blocks to reduce the number of virtual
-/// registers that must be created and coalesced.
-///
-/// Return true if any changes are made.
-///
-static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
- // If this is a noop copy,
- EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(CI->getType());
-
- // This is an fp<->int conversion?
- if (SrcVT.isInteger() != DstVT.isInteger())
- return false;
-
- // If this is an extension, it will be a zero or sign extension, which
- // isn't a noop.
- if (SrcVT.bitsLT(DstVT)) return false;
-
- // If these values will be promoted, find out what they will be promoted
- // to. This helps us consider truncates on PPC as noop copies when they
- // are.
- if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
- TargetLowering::TypePromoteInteger)
- SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
- if (TLI.getTypeAction(CI->getContext(), DstVT) ==
- TargetLowering::TypePromoteInteger)
- DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
-
- // If, after promotion, these are the same types, this is a noop copy.
- if (SrcVT != DstVT)
- return false;
-
+/// SinkCast - Sink the specified cast instruction into its user blocks
+static bool SinkCast(CastInst *CI) {
BasicBlock *DefBB = CI->getParent();
/// InsertedCasts - Only insert a cast in each block once.
DenseMap<BasicBlock*, CastInst*> InsertedCasts;
bool MadeChange = false;
- for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
+ for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
UI != E; ) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI);
@@ -493,7 +492,7 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
// appropriate predecessor block.
BasicBlock *UserBB = User->getParent();
if (PHINode *PN = dyn_cast<PHINode>(User)) {
- UserBB = PN->getIncomingBlock(UI);
+ UserBB = PN->getIncomingBlock(TheUse);
}
// Preincrement use iterator so we don't invalidate it.
@@ -527,6 +526,43 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
return MadeChange;
}
+/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
+/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
+/// sink it into user blocks to reduce the number of virtual
+/// registers that must be created and coalesced.
+///
+/// Return true if any changes are made.
+///
+static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
+ // If this is a noop copy,
+ EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(CI->getType());
+
+ // This is an fp<->int conversion?
+ if (SrcVT.isInteger() != DstVT.isInteger())
+ return false;
+
+ // If this is an extension, it will be a zero or sign extension, which
+ // isn't a noop.
+ if (SrcVT.bitsLT(DstVT)) return false;
+
+ // If these values will be promoted, find out what they will be promoted
+ // to. This helps us consider truncates on PPC as noop copies when they
+ // are.
+ if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
+ TargetLowering::TypePromoteInteger)
+ SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
+ if (TLI.getTypeAction(CI->getContext(), DstVT) ==
+ TargetLowering::TypePromoteInteger)
+ DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
+
+ // If, after promotion, these are the same types, this is a noop copy.
+ if (SrcVT != DstVT)
+ return false;
+
+ return SinkCast(CI);
+}
+
/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
/// the number of virtual registers that must be created and coalesced. This is
/// a clear win except on targets with multiple condition code registers
@@ -540,7 +576,7 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
bool MadeChange = false;
- for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
+ for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
UI != E; ) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI);
@@ -582,14 +618,195 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
return MadeChange;
}
+/// isExtractBitsCandidateUse - Check if the candidates could
+/// be combined with shift instruction, which includes:
+/// 1. Truncate instruction
+/// 2. And instruction and the imm is a mask of the low bits:
+/// imm & (imm+1) == 0
+static bool isExtractBitsCandidateUse(Instruction *User) {
+ if (!isa<TruncInst>(User)) {
+ if (User->getOpcode() != Instruction::And ||
+ !isa<ConstantInt>(User->getOperand(1)))
+ return false;
+
+ const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
+
+ if ((Cimm & (Cimm + 1)).getBoolValue())
+ return false;
+ }
+ return true;
+}
+
+/// SinkShiftAndTruncate - sink both shift and truncate instruction
+/// to the use of truncate's BB.
+static bool
+SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
+ DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
+ const TargetLowering &TLI) {
+ BasicBlock *UserBB = User->getParent();
+ DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
+ TruncInst *TruncI = dyn_cast<TruncInst>(User);
+ bool MadeChange = false;
+
+ for (Value::user_iterator TruncUI = TruncI->user_begin(),
+ TruncE = TruncI->user_end();
+ TruncUI != TruncE;) {
+
+ Use &TruncTheUse = TruncUI.getUse();
+ Instruction *TruncUser = cast<Instruction>(*TruncUI);
+ // Preincrement use iterator so we don't invalidate it.
+
+ ++TruncUI;
+
+ int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
+ if (!ISDOpcode)
+ continue;
+
+ // If the use is actually a legal node, there will not be an implicit
+ // truncate.
+ if (TLI.isOperationLegalOrCustom(ISDOpcode,
+ EVT::getEVT(TruncUser->getType())))
+ continue;
+
+ // Don't bother for PHI nodes.
+ if (isa<PHINode>(TruncUser))
+ continue;
+
+ BasicBlock *TruncUserBB = TruncUser->getParent();
+
+ if (UserBB == TruncUserBB)
+ continue;
+
+ BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
+ CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
+
+ if (!InsertedShift && !InsertedTrunc) {
+ BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
+ // Sink the shift
+ if (ShiftI->getOpcode() == Instruction::AShr)
+ InsertedShift =
+ BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt);
+ else
+ InsertedShift =
+ BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt);
+
+ // Sink the trunc
+ BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
+ TruncInsertPt++;
+
+ InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
+ TruncI->getType(), "", TruncInsertPt);
+
+ MadeChange = true;
+
+ TruncTheUse = InsertedTrunc;
+ }
+ }
+ return MadeChange;
+}
+
+/// OptimizeExtractBits - sink the shift *right* instruction into user blocks if
+/// the uses could potentially be combined with this shift instruction and
+/// generate BitExtract instruction. It will only be applied if the architecture
+/// supports BitExtract instruction. Here is an example:
+/// BB1:
+/// %x.extract.shift = lshr i64 %arg1, 32
+/// BB2:
+/// %x.extract.trunc = trunc i64 %x.extract.shift to i16
+/// ==>
+///
+/// BB2:
+/// %x.extract.shift.1 = lshr i64 %arg1, 32
+/// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
+///
+/// CodeGen will recoginze the pattern in BB2 and generate BitExtract
+/// instruction.
+/// Return true if any changes are made.
+static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
+ const TargetLowering &TLI) {
+ BasicBlock *DefBB = ShiftI->getParent();
+
+ /// Only insert instructions in each block once.
+ DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
+
+ bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType()));
+
+ bool MadeChange = false;
+ for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
+ UI != E;) {
+ Use &TheUse = UI.getUse();
+ Instruction *User = cast<Instruction>(*UI);
+ // Preincrement use iterator so we don't invalidate it.
+ ++UI;
+
+ // Don't bother for PHI nodes.
+ if (isa<PHINode>(User))
+ continue;
+
+ if (!isExtractBitsCandidateUse(User))
+ continue;
+
+ BasicBlock *UserBB = User->getParent();
+
+ if (UserBB == DefBB) {
+ // If the shift and truncate instruction are in the same BB. The use of
+ // the truncate(TruncUse) may still introduce another truncate if not
+ // legal. In this case, we would like to sink both shift and truncate
+ // instruction to the BB of TruncUse.
+ // for example:
+ // BB1:
+ // i64 shift.result = lshr i64 opnd, imm
+ // trunc.result = trunc shift.result to i16
+ //
+ // BB2:
+ // ----> We will have an implicit truncate here if the architecture does
+ // not have i16 compare.
+ // cmp i16 trunc.result, opnd2
+ //
+ if (isa<TruncInst>(User) && shiftIsLegal
+ // If the type of the truncate is legal, no trucate will be
+ // introduced in other basic blocks.
+ && (!TLI.isTypeLegal(TLI.getValueType(User->getType()))))
+ MadeChange =
+ SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI);
+
+ continue;
+ }
+ // If we have already inserted a shift into this block, use it.
+ BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
+
+ if (!InsertedShift) {
+ BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
+
+ if (ShiftI->getOpcode() == Instruction::AShr)
+ InsertedShift =
+ BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt);
+ else
+ InsertedShift =
+ BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt);
+
+ MadeChange = true;
+ }
+
+ // Replace a use of the shift with a use of the new shift.
+ TheUse = InsertedShift;
+ }
+
+ // If we removed all uses, nuke the shift.
+ if (ShiftI->use_empty())
+ ShiftI->eraseFromParent();
+
+ return MadeChange;
+}
+
namespace {
class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
protected:
- void replaceCall(Value *With) {
+ void replaceCall(Value *With) override {
CI->replaceAllUsesWith(With);
CI->eraseFromParent();
}
- bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
+ bool isFoldable(unsigned SizeCIOp, unsigned, bool) const override {
if (ConstantInt *SizeCI =
dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
return SizeCI->isAllOnesValue();
@@ -630,8 +847,9 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// happens.
WeakVH IterHandle(CurInstIterator);
- replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
- TLInfo, ModifiedDT ? 0 : DT);
+ replaceAndRecursivelySimplify(CI, RetVal,
+ TLI ? TLI->getDataLayout() : nullptr,
+ TLInfo, ModifiedDT ? nullptr : DT);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
@@ -652,10 +870,10 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
}
// From here on out we're working with named functions.
- if (CI->getCalledFunction() == 0) return false;
+ if (!CI->getCalledFunction()) return false;
// We'll need DataLayout from here on out.
- const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
+ const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
@@ -705,8 +923,8 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
if (!RI)
return false;
- PHINode *PN = 0;
- BitCastInst *BCI = 0;
+ PHINode *PN = nullptr;
+ BitCastInst *BCI = nullptr;
Value *V = RI->getReturnValue();
if (V) {
BCI = dyn_cast<BitCastInst>(V);
@@ -821,7 +1039,7 @@ namespace {
struct ExtAddrMode : public TargetLowering::AddrMode {
Value *BaseReg;
Value *ScaledReg;
- ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
+ ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
void print(raw_ostream &OS) const;
void dump() const;
@@ -845,23 +1063,26 @@ void ExtAddrMode::print(raw_ostream &OS) const {
if (BaseGV) {
OS << (NeedPlus ? " + " : "")
<< "GV:";
- WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
+ BaseGV->printAsOperand(OS, /*PrintType=*/false);
NeedPlus = true;
}
- if (BaseOffs)
- OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
+ if (BaseOffs) {
+ OS << (NeedPlus ? " + " : "")
+ << BaseOffs;
+ NeedPlus = true;
+ }
if (BaseReg) {
OS << (NeedPlus ? " + " : "")
<< "Base:";
- WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
+ BaseReg->printAsOperand(OS, /*PrintType=*/false);
NeedPlus = true;
}
if (Scale) {
OS << (NeedPlus ? " + " : "")
<< Scale << "*";
- WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
+ ScaledReg->printAsOperand(OS, /*PrintType=*/false);
}
OS << ']';
@@ -874,6 +1095,404 @@ void ExtAddrMode::dump() const {
}
#endif
+/// \brief This class provides transaction based operation on the IR.
+/// Every change made through this class is recorded in the internal state and
+/// can be undone (rollback) until commit is called.
+class TypePromotionTransaction {
+
+ /// \brief This represents the common interface of the individual transaction.
+ /// Each class implements the logic for doing one specific modification on
+ /// the IR via the TypePromotionTransaction.
+ class TypePromotionAction {
+ protected:
+ /// The Instruction modified.
+ Instruction *Inst;
+
+ public:
+ /// \brief Constructor of the action.
+ /// The constructor performs the related action on the IR.
+ TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
+
+ virtual ~TypePromotionAction() {}
+
+ /// \brief Undo the modification done by this action.
+ /// When this method is called, the IR must be in the same state as it was
+ /// before this action was applied.
+ /// \pre Undoing the action works if and only if the IR is in the exact same
+ /// state as it was directly after this action was applied.
+ virtual void undo() = 0;
+
+ /// \brief Advocate every change made by this action.
+ /// When the results on the IR of the action are to be kept, it is important
+ /// to call this function, otherwise hidden information may be kept forever.
+ virtual void commit() {
+ // Nothing to be done, this action is not doing anything.
+ }
+ };
+
+ /// \brief Utility to remember the position of an instruction.
+ class InsertionHandler {
+ /// Position of an instruction.
+ /// Either an instruction:
+ /// - Is the first in a basic block: BB is used.
+ /// - Has a previous instructon: PrevInst is used.
+ union {
+ Instruction *PrevInst;
+ BasicBlock *BB;
+ } Point;
+ /// Remember whether or not the instruction had a previous instruction.
+ bool HasPrevInstruction;
+
+ public:
+ /// \brief Record the position of \p Inst.
+ InsertionHandler(Instruction *Inst) {
+ BasicBlock::iterator It = Inst;
+ HasPrevInstruction = (It != (Inst->getParent()->begin()));
+ if (HasPrevInstruction)
+ Point.PrevInst = --It;
+ else
+ Point.BB = Inst->getParent();
+ }
+
+ /// \brief Insert \p Inst at the recorded position.
+ void insert(Instruction *Inst) {
+ if (HasPrevInstruction) {
+ if (Inst->getParent())
+ Inst->removeFromParent();
+ Inst->insertAfter(Point.PrevInst);
+ } else {
+ Instruction *Position = Point.BB->getFirstInsertionPt();
+ if (Inst->getParent())
+ Inst->moveBefore(Position);
+ else
+ Inst->insertBefore(Position);
+ }
+ }
+ };
+
+ /// \brief Move an instruction before another.
+ class InstructionMoveBefore : public TypePromotionAction {
+ /// Original position of the instruction.
+ InsertionHandler Position;
+
+ public:
+ /// \brief Move \p Inst before \p Before.
+ InstructionMoveBefore(Instruction *Inst, Instruction *Before)
+ : TypePromotionAction(Inst), Position(Inst) {
+ DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
+ Inst->moveBefore(Before);
+ }
+
+ /// \brief Move the instruction back to its original position.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
+ Position.insert(Inst);
+ }
+ };
+
+ /// \brief Set the operand of an instruction with a new value.
+ class OperandSetter : public TypePromotionAction {
+ /// Original operand of the instruction.
+ Value *Origin;
+ /// Index of the modified instruction.
+ unsigned Idx;
+
+ public:
+ /// \brief Set \p Idx operand of \p Inst with \p NewVal.
+ OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
+ : TypePromotionAction(Inst), Idx(Idx) {
+ DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
+ << "for:" << *Inst << "\n"
+ << "with:" << *NewVal << "\n");
+ Origin = Inst->getOperand(Idx);
+ Inst->setOperand(Idx, NewVal);
+ }
+
+ /// \brief Restore the original value of the instruction.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
+ << "for: " << *Inst << "\n"
+ << "with: " << *Origin << "\n");
+ Inst->setOperand(Idx, Origin);
+ }
+ };
+
+ /// \brief Hide the operands of an instruction.
+ /// Do as if this instruction was not using any of its operands.
+ class OperandsHider : public TypePromotionAction {
+ /// The list of original operands.
+ SmallVector<Value *, 4> OriginalValues;
+
+ public:
+ /// \brief Remove \p Inst from the uses of the operands of \p Inst.
+ OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
+ DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
+ unsigned NumOpnds = Inst->getNumOperands();
+ OriginalValues.reserve(NumOpnds);
+ for (unsigned It = 0; It < NumOpnds; ++It) {
+ // Save the current operand.
+ Value *Val = Inst->getOperand(It);
+ OriginalValues.push_back(Val);
+ // Set a dummy one.
+ // We could use OperandSetter here, but that would implied an overhead
+ // that we are not willing to pay.
+ Inst->setOperand(It, UndefValue::get(Val->getType()));
+ }
+ }
+
+ /// \brief Restore the original list of uses.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
+ for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
+ Inst->setOperand(It, OriginalValues[It]);
+ }
+ };
+
+ /// \brief Build a truncate instruction.
+ class TruncBuilder : public TypePromotionAction {
+ public:
+ /// \brief Build a truncate instruction of \p Opnd producing a \p Ty
+ /// result.
+ /// trunc Opnd to Ty.
+ TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
+ IRBuilder<> Builder(Opnd);
+ Inst = cast<Instruction>(Builder.CreateTrunc(Opnd, Ty, "promoted"));
+ DEBUG(dbgs() << "Do: TruncBuilder: " << *Inst << "\n");
+ }
+
+ /// \brief Get the built instruction.
+ Instruction *getBuiltInstruction() { return Inst; }
+
+ /// \brief Remove the built instruction.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: TruncBuilder: " << *Inst << "\n");
+ Inst->eraseFromParent();
+ }
+ };
+
+ /// \brief Build a sign extension instruction.
+ class SExtBuilder : public TypePromotionAction {
+ public:
+ /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
+ /// result.
+ /// sext Opnd to Ty.
+ SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
+ : TypePromotionAction(Inst) {
+ IRBuilder<> Builder(InsertPt);
+ Inst = cast<Instruction>(Builder.CreateSExt(Opnd, Ty, "promoted"));
+ DEBUG(dbgs() << "Do: SExtBuilder: " << *Inst << "\n");
+ }
+
+ /// \brief Get the built instruction.
+ Instruction *getBuiltInstruction() { return Inst; }
+
+ /// \brief Remove the built instruction.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: SExtBuilder: " << *Inst << "\n");
+ Inst->eraseFromParent();
+ }
+ };
+
+ /// \brief Mutate an instruction to another type.
+ class TypeMutator : public TypePromotionAction {
+ /// Record the original type.
+ Type *OrigTy;
+
+ public:
+ /// \brief Mutate the type of \p Inst into \p NewTy.
+ TypeMutator(Instruction *Inst, Type *NewTy)
+ : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
+ DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
+ << "\n");
+ Inst->mutateType(NewTy);
+ }
+
+ /// \brief Mutate the instruction back to its original type.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
+ << "\n");
+ Inst->mutateType(OrigTy);
+ }
+ };
+
+ /// \brief Replace the uses of an instruction by another instruction.
+ class UsesReplacer : public TypePromotionAction {
+ /// Helper structure to keep track of the replaced uses.
+ struct InstructionAndIdx {
+ /// The instruction using the instruction.
+ Instruction *Inst;
+ /// The index where this instruction is used for Inst.
+ unsigned Idx;
+ InstructionAndIdx(Instruction *Inst, unsigned Idx)
+ : Inst(Inst), Idx(Idx) {}
+ };
+
+ /// Keep track of the original uses (pair Instruction, Index).
+ SmallVector<InstructionAndIdx, 4> OriginalUses;
+ typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator;
+
+ public:
+ /// \brief Replace all the use of \p Inst by \p New.
+ UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
+ DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
+ << "\n");
+ // Record the original uses.
+ for (Use &U : Inst->uses()) {
+ Instruction *UserI = cast<Instruction>(U.getUser());
+ OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
+ }
+ // Now, we can replace the uses.
+ Inst->replaceAllUsesWith(New);
+ }
+
+ /// \brief Reassign the original uses of Inst to Inst.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
+ for (use_iterator UseIt = OriginalUses.begin(),
+ EndIt = OriginalUses.end();
+ UseIt != EndIt; ++UseIt) {
+ UseIt->Inst->setOperand(UseIt->Idx, Inst);
+ }
+ }
+ };
+
+ /// \brief Remove an instruction from the IR.
+ class InstructionRemover : public TypePromotionAction {
+ /// Original position of the instruction.
+ InsertionHandler Inserter;
+ /// Helper structure to hide all the link to the instruction. In other
+ /// words, this helps to do as if the instruction was removed.
+ OperandsHider Hider;
+ /// Keep track of the uses replaced, if any.
+ UsesReplacer *Replacer;
+
+ public:
+ /// \brief Remove all reference of \p Inst and optinally replace all its
+ /// uses with New.
+ /// \pre If !Inst->use_empty(), then New != nullptr
+ InstructionRemover(Instruction *Inst, Value *New = nullptr)
+ : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
+ Replacer(nullptr) {
+ if (New)
+ Replacer = new UsesReplacer(Inst, New);
+ DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
+ Inst->removeFromParent();
+ }
+
+ ~InstructionRemover() { delete Replacer; }
+
+ /// \brief Really remove the instruction.
+ void commit() override { delete Inst; }
+
+ /// \brief Resurrect the instruction and reassign it to the proper uses if
+ /// new value was provided when build this action.
+ void undo() override {
+ DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
+ Inserter.insert(Inst);
+ if (Replacer)
+ Replacer->undo();
+ Hider.undo();
+ }
+ };
+
+public:
+ /// Restoration point.
+ /// The restoration point is a pointer to an action instead of an iterator
+ /// because the iterator may be invalidated but not the pointer.
+ typedef const TypePromotionAction *ConstRestorationPt;
+ /// Advocate every changes made in that transaction.
+ void commit();
+ /// Undo all the changes made after the given point.
+ void rollback(ConstRestorationPt Point);
+ /// Get the current restoration point.
+ ConstRestorationPt getRestorationPoint() const;
+
+ /// \name API for IR modification with state keeping to support rollback.
+ /// @{
+ /// Same as Instruction::setOperand.
+ void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
+ /// Same as Instruction::eraseFromParent.
+ void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
+ /// Same as Value::replaceAllUsesWith.
+ void replaceAllUsesWith(Instruction *Inst, Value *New);
+ /// Same as Value::mutateType.
+ void mutateType(Instruction *Inst, Type *NewTy);
+ /// Same as IRBuilder::createTrunc.
+ Instruction *createTrunc(Instruction *Opnd, Type *Ty);
+ /// Same as IRBuilder::createSExt.
+ Instruction *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
+ /// Same as Instruction::moveBefore.
+ void moveBefore(Instruction *Inst, Instruction *Before);
+ /// @}
+
+private:
+ /// The ordered list of actions made so far.
+ SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
+ typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt;
+};
+
+void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
+ Value *NewVal) {
+ Actions.push_back(
+ make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal));
+}
+
+void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
+ Value *NewVal) {
+ Actions.push_back(
+ make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal));
+}
+
+void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
+ Value *New) {
+ Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
+}
+
+void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
+ Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
+}
+
+Instruction *TypePromotionTransaction::createTrunc(Instruction *Opnd,
+ Type *Ty) {
+ std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
+ Instruction *I = Ptr->getBuiltInstruction();
+ Actions.push_back(std::move(Ptr));
+ return I;
+}
+
+Instruction *TypePromotionTransaction::createSExt(Instruction *Inst,
+ Value *Opnd, Type *Ty) {
+ std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
+ Instruction *I = Ptr->getBuiltInstruction();
+ Actions.push_back(std::move(Ptr));
+ return I;
+}
+
+void TypePromotionTransaction::moveBefore(Instruction *Inst,
+ Instruction *Before) {
+ Actions.push_back(
+ make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before));
+}
+
+TypePromotionTransaction::ConstRestorationPt
+TypePromotionTransaction::getRestorationPoint() const {
+ return !Actions.empty() ? Actions.back().get() : nullptr;
+}
+
+void TypePromotionTransaction::commit() {
+ for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
+ ++It)
+ (*It)->commit();
+ Actions.clear();
+}
+
+void TypePromotionTransaction::rollback(
+ TypePromotionTransaction::ConstRestorationPt Point) {
+ while (!Actions.empty() && Point != Actions.back().get()) {
+ std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
+ Curr->undo();
+ }
+}
/// \brief A helper class for matching addressing modes.
///
@@ -891,6 +1510,13 @@ class AddressingModeMatcher {
/// part of the return value of this addressing mode matching stuff.
ExtAddrMode &AddrMode;
+ /// The truncate instruction inserted by other CodeGenPrepare optimizations.
+ const SetOfInstrs &InsertedTruncs;
+ /// A map from the instructions to their type before promotion.
+ InstrToOrigTy &PromotedInsts;
+ /// The ongoing transaction where every action should be registered.
+ TypePromotionTransaction &TPT;
+
/// IgnoreProfitability - This is set to true when we should not do
/// profitability checks. When true, IsProfitableToFoldIntoAddressingMode
/// always returns true.
@@ -898,8 +1524,12 @@ class AddressingModeMatcher {
AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
const TargetLowering &T, Type *AT,
- Instruction *MI, ExtAddrMode &AM)
- : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) {
+ Instruction *MI, ExtAddrMode &AM,
+ const SetOfInstrs &InsertedTruncs,
+ InstrToOrigTy &PromotedInsts,
+ TypePromotionTransaction &TPT)
+ : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM),
+ InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) {
IgnoreProfitability = false;
}
public:
@@ -907,26 +1537,37 @@ public:
/// Match - Find the maximal addressing mode that a load/store of V can fold,
/// give an access type of AccessTy. This returns a list of involved
/// instructions in AddrModeInsts.
+ /// \p InsertedTruncs The truncate instruction inserted by other
+ /// CodeGenPrepare
+ /// optimizations.
+ /// \p PromotedInsts maps the instructions to their type before promotion.
+ /// \p The ongoing transaction where every action should be registered.
static ExtAddrMode Match(Value *V, Type *AccessTy,
Instruction *MemoryInst,
SmallVectorImpl<Instruction*> &AddrModeInsts,
- const TargetLowering &TLI) {
+ const TargetLowering &TLI,
+ const SetOfInstrs &InsertedTruncs,
+ InstrToOrigTy &PromotedInsts,
+ TypePromotionTransaction &TPT) {
ExtAddrMode Result;
- bool Success =
- AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
- MemoryInst, Result).MatchAddr(V, 0);
+ bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
+ MemoryInst, Result, InsertedTruncs,
+ PromotedInsts, TPT).MatchAddr(V, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
return Result;
}
private:
bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
bool MatchAddr(Value *V, unsigned Depth);
- bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth);
+ bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
+ bool *MovedAway = nullptr);
bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
ExtAddrMode &AMBefore,
ExtAddrMode &AMAfter);
bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
+ bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion,
+ Value *PromotedOperand) const;
};
/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
@@ -965,7 +1606,7 @@ bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
// Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
// to see if ScaleReg is actually X+C. If so, we can turn this into adding
// X*Scale + C*Scale to addr mode.
- ConstantInt *CI = 0; Value *AddLHS = 0;
+ ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
if (isa<Instruction>(ScaleReg) && // not a constant expr.
match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
TestAddrMode.ScaledReg = AddLHS;
@@ -991,6 +1632,7 @@ bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
static bool MightBeFoldableInst(Instruction *I) {
switch (I->getOpcode()) {
case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
// Don't touch identity bitcasts.
if (I->getType() == I->getOperand(0)->getType())
return false;
@@ -1014,14 +1656,326 @@ static bool MightBeFoldableInst(Instruction *I) {
}
}
+/// \brief Hepler class to perform type promotion.
+class TypePromotionHelper {
+ /// \brief Utility function to check whether or not a sign extension of
+ /// \p Inst with \p ConsideredSExtType can be moved through \p Inst by either
+ /// using the operands of \p Inst or promoting \p Inst.
+ /// In other words, check if:
+ /// sext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredSExtType.
+ /// #1 Promotion applies:
+ /// ConsideredSExtType Inst (sext opnd1 to ConsideredSExtType, ...).
+ /// #2 Operand reuses:
+ /// sext opnd1 to ConsideredSExtType.
+ /// \p PromotedInsts maps the instructions to their type before promotion.
+ static bool canGetThrough(const Instruction *Inst, Type *ConsideredSExtType,
+ const InstrToOrigTy &PromotedInsts);
+
+ /// \brief Utility function to determine if \p OpIdx should be promoted when
+ /// promoting \p Inst.
+ static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
+ if (isa<SelectInst>(Inst) && OpIdx == 0)
+ return false;
+ return true;
+ }
+
+ /// \brief Utility function to promote the operand of \p SExt when this
+ /// operand is a promotable trunc or sext.
+ /// \p PromotedInsts maps the instructions to their type before promotion.
+ /// \p CreatedInsts[out] contains how many non-free instructions have been
+ /// created to promote the operand of SExt.
+ /// Should never be called directly.
+ /// \return The promoted value which is used instead of SExt.
+ static Value *promoteOperandForTruncAndSExt(Instruction *SExt,
+ TypePromotionTransaction &TPT,
+ InstrToOrigTy &PromotedInsts,
+ unsigned &CreatedInsts);
+
+ /// \brief Utility function to promote the operand of \p SExt when this
+ /// operand is promotable and is not a supported trunc or sext.
+ /// \p PromotedInsts maps the instructions to their type before promotion.
+ /// \p CreatedInsts[out] contains how many non-free instructions have been
+ /// created to promote the operand of SExt.
+ /// Should never be called directly.
+ /// \return The promoted value which is used instead of SExt.
+ static Value *promoteOperandForOther(Instruction *SExt,
+ TypePromotionTransaction &TPT,
+ InstrToOrigTy &PromotedInsts,
+ unsigned &CreatedInsts);
+
+public:
+ /// Type for the utility function that promotes the operand of SExt.
+ typedef Value *(*Action)(Instruction *SExt, TypePromotionTransaction &TPT,
+ InstrToOrigTy &PromotedInsts,
+ unsigned &CreatedInsts);
+ /// \brief Given a sign extend instruction \p SExt, return the approriate
+ /// action to promote the operand of \p SExt instead of using SExt.
+ /// \return NULL if no promotable action is possible with the current
+ /// sign extension.
+ /// \p InsertedTruncs keeps track of all the truncate instructions inserted by
+ /// the others CodeGenPrepare optimizations. This information is important
+ /// because we do not want to promote these instructions as CodeGenPrepare
+ /// will reinsert them later. Thus creating an infinite loop: create/remove.
+ /// \p PromotedInsts maps the instructions to their type before promotion.
+ static Action getAction(Instruction *SExt, const SetOfInstrs &InsertedTruncs,
+ const TargetLowering &TLI,
+ const InstrToOrigTy &PromotedInsts);
+};
+
+bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
+ Type *ConsideredSExtType,
+ const InstrToOrigTy &PromotedInsts) {
+ // We can always get through sext.
+ if (isa<SExtInst>(Inst))
+ return true;
+
+ // We can get through binary operator, if it is legal. In other words, the
+ // binary operator must have a nuw or nsw flag.
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
+ if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
+ (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap()))
+ return true;
+
+ // Check if we can do the following simplification.
+ // sext(trunc(sext)) --> sext
+ if (!isa<TruncInst>(Inst))
+ return false;
+
+ Value *OpndVal = Inst->getOperand(0);
+ // Check if we can use this operand in the sext.
+ // If the type is larger than the result type of the sign extension,
+ // we cannot.
+ if (OpndVal->getType()->getIntegerBitWidth() >
+ ConsideredSExtType->getIntegerBitWidth())
+ return false;
+
+ // If the operand of the truncate is not an instruction, we will not have
+ // any information on the dropped bits.
+ // (Actually we could for constant but it is not worth the extra logic).
+ Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
+ if (!Opnd)
+ return false;
+
+ // Check if the source of the type is narrow enough.
+ // I.e., check that trunc just drops sign extended bits.
+ // #1 get the type of the operand.
+ const Type *OpndType;
+ InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
+ if (It != PromotedInsts.end())
+ OpndType = It->second;
+ else if (isa<SExtInst>(Opnd))
+ OpndType = cast<Instruction>(Opnd)->getOperand(0)->getType();
+ else
+ return false;
+
+ // #2 check that the truncate just drop sign extended bits.
+ if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth())
+ return true;
+
+ return false;
+}
+
+TypePromotionHelper::Action TypePromotionHelper::getAction(
+ Instruction *SExt, const SetOfInstrs &InsertedTruncs,
+ const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
+ Instruction *SExtOpnd = dyn_cast<Instruction>(SExt->getOperand(0));
+ Type *SExtTy = SExt->getType();
+ // If the operand of the sign extension is not an instruction, we cannot
+ // get through.
+ // If it, check we can get through.
+ if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts))
+ return nullptr;
+
+ // Do not promote if the operand has been added by codegenprepare.
+ // Otherwise, it means we are undoing an optimization that is likely to be
+ // redone, thus causing potential infinite loop.
+ if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd))
+ return nullptr;
+
+ // SExt or Trunc instructions.
+ // Return the related handler.
+ if (isa<SExtInst>(SExtOpnd) || isa<TruncInst>(SExtOpnd))
+ return promoteOperandForTruncAndSExt;
+
+ // Regular instruction.
+ // Abort early if we will have to insert non-free instructions.
+ if (!SExtOpnd->hasOneUse() &&
+ !TLI.isTruncateFree(SExtTy, SExtOpnd->getType()))
+ return nullptr;
+ return promoteOperandForOther;
+}
+
+Value *TypePromotionHelper::promoteOperandForTruncAndSExt(
+ llvm::Instruction *SExt, TypePromotionTransaction &TPT,
+ InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts) {
+ // By construction, the operand of SExt is an instruction. Otherwise we cannot
+ // get through it and this method should not be called.
+ Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
+ // Replace sext(trunc(opnd)) or sext(sext(opnd))
+ // => sext(opnd).
+ TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
+ CreatedInsts = 0;
+
+ // Remove dead code.
+ if (SExtOpnd->use_empty())
+ TPT.eraseInstruction(SExtOpnd);
+
+ // Check if the sext is still needed.
+ if (SExt->getType() != SExt->getOperand(0)->getType())
+ return SExt;
+
+ // At this point we have: sext ty opnd to ty.
+ // Reassign the uses of SExt to the opnd and remove SExt.
+ Value *NextVal = SExt->getOperand(0);
+ TPT.eraseInstruction(SExt, NextVal);
+ return NextVal;
+}
+
+Value *
+TypePromotionHelper::promoteOperandForOther(Instruction *SExt,
+ TypePromotionTransaction &TPT,
+ InstrToOrigTy &PromotedInsts,
+ unsigned &CreatedInsts) {
+ // By construction, the operand of SExt is an instruction. Otherwise we cannot
+ // get through it and this method should not be called.
+ Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
+ CreatedInsts = 0;
+ if (!SExtOpnd->hasOneUse()) {
+ // SExtOpnd will be promoted.
+ // All its uses, but SExt, will need to use a truncated value of the
+ // promoted version.
+ // Create the truncate now.
+ Instruction *Trunc = TPT.createTrunc(SExt, SExtOpnd->getType());
+ Trunc->removeFromParent();
+ // Insert it just after the definition.
+ Trunc->insertAfter(SExtOpnd);
+
+ TPT.replaceAllUsesWith(SExtOpnd, Trunc);
+ // Restore the operand of SExt (which has been replace by the previous call
+ // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
+ TPT.setOperand(SExt, 0, SExtOpnd);
+ }
+
+ // Get through the Instruction:
+ // 1. Update its type.
+ // 2. Replace the uses of SExt by Inst.
+ // 3. Sign extend each operand that needs to be sign extended.
+
+ // Remember the original type of the instruction before promotion.
+ // This is useful to know that the high bits are sign extended bits.
+ PromotedInsts.insert(
+ std::pair<Instruction *, Type *>(SExtOpnd, SExtOpnd->getType()));
+ // Step #1.
+ TPT.mutateType(SExtOpnd, SExt->getType());
+ // Step #2.
+ TPT.replaceAllUsesWith(SExt, SExtOpnd);
+ // Step #3.
+ Instruction *SExtForOpnd = SExt;
+
+ DEBUG(dbgs() << "Propagate SExt to operands\n");
+ for (int OpIdx = 0, EndOpIdx = SExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
+ ++OpIdx) {
+ DEBUG(dbgs() << "Operand:\n" << *(SExtOpnd->getOperand(OpIdx)) << '\n');
+ if (SExtOpnd->getOperand(OpIdx)->getType() == SExt->getType() ||
+ !shouldSExtOperand(SExtOpnd, OpIdx)) {
+ DEBUG(dbgs() << "No need to propagate\n");
+ continue;
+ }
+ // Check if we can statically sign extend the operand.
+ Value *Opnd = SExtOpnd->getOperand(OpIdx);
+ if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
+ DEBUG(dbgs() << "Statically sign extend\n");
+ TPT.setOperand(
+ SExtOpnd, OpIdx,
+ ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue()));
+ continue;
+ }
+ // UndefValue are typed, so we have to statically sign extend them.
+ if (isa<UndefValue>(Opnd)) {
+ DEBUG(dbgs() << "Statically sign extend\n");
+ TPT.setOperand(SExtOpnd, OpIdx, UndefValue::get(SExt->getType()));
+ continue;
+ }
+
+ // Otherwise we have to explicity sign extend the operand.
+ // Check if SExt was reused to sign extend an operand.
+ if (!SExtForOpnd) {
+ // If yes, create a new one.
+ DEBUG(dbgs() << "More operands to sext\n");
+ SExtForOpnd = TPT.createSExt(SExt, Opnd, SExt->getType());
+ ++CreatedInsts;
+ }
+
+ TPT.setOperand(SExtForOpnd, 0, Opnd);
+
+ // Move the sign extension before the insertion point.
+ TPT.moveBefore(SExtForOpnd, SExtOpnd);
+ TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd);
+ // If more sext are required, new instructions will have to be created.
+ SExtForOpnd = nullptr;
+ }
+ if (SExtForOpnd == SExt) {
+ DEBUG(dbgs() << "Sign extension is useless now\n");
+ TPT.eraseInstruction(SExt);
+ }
+ return SExtOpnd;
+}
+
+/// IsPromotionProfitable - Check whether or not promoting an instruction
+/// to a wider type was profitable.
+/// \p MatchedSize gives the number of instructions that have been matched
+/// in the addressing mode after the promotion was applied.
+/// \p SizeWithPromotion gives the number of created instructions for
+/// the promotion plus the number of instructions that have been
+/// matched in the addressing mode before the promotion.
+/// \p PromotedOperand is the value that has been promoted.
+/// \return True if the promotion is profitable, false otherwise.
+bool
+AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize,
+ unsigned SizeWithPromotion,
+ Value *PromotedOperand) const {
+ // We folded less instructions than what we created to promote the operand.
+ // This is not profitable.
+ if (MatchedSize < SizeWithPromotion)
+ return false;
+ if (MatchedSize > SizeWithPromotion)
+ return true;
+ // The promotion is neutral but it may help folding the sign extension in
+ // loads for instance.
+ // Check that we did not create an illegal instruction.
+ Instruction *PromotedInst = dyn_cast<Instruction>(PromotedOperand);
+ if (!PromotedInst)
+ return false;
+ int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
+ // If the ISDOpcode is undefined, it was undefined before the promotion.
+ if (!ISDOpcode)
+ return true;
+ // Otherwise, check if the promoted instruction is legal or not.
+ return TLI.isOperationLegalOrCustom(ISDOpcode,
+ EVT::getEVT(PromotedInst->getType()));
+}
+
/// MatchOperationAddr - Given an instruction or constant expr, see if we can
/// fold the operation into the addressing mode. If so, update the addressing
/// mode and return true, otherwise return false without modifying AddrMode.
+/// If \p MovedAway is not NULL, it contains the information of whether or
+/// not AddrInst has to be folded into the addressing mode on success.
+/// If \p MovedAway == true, \p AddrInst will not be part of the addressing
+/// because it has been moved away.
+/// Thus AddrInst must not be added in the matched instructions.
+/// This state can happen when AddrInst is a sext, since it may be moved away.
+/// Therefore, AddrInst may not be valid when MovedAway is true and it must
+/// not be referenced anymore.
bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
- unsigned Depth) {
+ unsigned Depth,
+ bool *MovedAway) {
// Avoid exponential behavior on extremely deep expression trees.
if (Depth >= 5) return false;
+ // By default, all matched instructions stay in place.
+ if (MovedAway)
+ *MovedAway = false;
+
switch (Opcode) {
case Instruction::PtrToInt:
// PtrToInt is always a noop, as we know that the int type is pointer sized.
@@ -1033,6 +1987,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
return MatchAddr(AddrInst->getOperand(0), Depth);
return false;
case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
// BitCast is always a noop, and we can handle it as long as it is
// int->int or pointer->pointer (we don't want int<->fp or something).
if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
@@ -1047,6 +2002,13 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
// Check to see if we can merge in the RHS then the LHS. If so, we win.
ExtAddrMode BackupAddrMode = AddrMode;
unsigned OldSize = AddrModeInsts.size();
+ // Start a transaction at this point.
+ // The LHS may match but not the RHS.
+ // Therefore, we need a higher level restoration point to undo partially
+ // matched operation.
+ TypePromotionTransaction::ConstRestorationPt LastKnownGood =
+ TPT.getRestorationPoint();
+
if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
MatchAddr(AddrInst->getOperand(0), Depth+1))
return true;
@@ -1054,6 +2016,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
// Restore the old addr mode info.
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
+ TPT.rollback(LastKnownGood);
// Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
@@ -1063,6 +2026,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
// Otherwise we definitely can't merge the ADD in.
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
+ TPT.rollback(LastKnownGood);
break;
}
//case Instruction::Or:
@@ -1072,7 +2036,8 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
case Instruction::Shl: {
// Can only handle X*C and X << C.
ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
- if (!RHS) return false;
+ if (!RHS)
+ return false;
int64_t Scale = RHS->getSExtValue();
if (Opcode == Instruction::Shl)
Scale = 1LL << Scale;
@@ -1165,6 +2130,53 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
return true;
}
+ case Instruction::SExt: {
+ Instruction *SExt = dyn_cast<Instruction>(AddrInst);
+ if (!SExt)
+ return false;
+
+ // Try to move this sext out of the way of the addressing mode.
+ // Ask for a method for doing so.
+ TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
+ SExt, InsertedTruncs, TLI, PromotedInsts);
+ if (!TPH)
+ return false;
+
+ TypePromotionTransaction::ConstRestorationPt LastKnownGood =
+ TPT.getRestorationPoint();
+ unsigned CreatedInsts = 0;
+ Value *PromotedOperand = TPH(SExt, TPT, PromotedInsts, CreatedInsts);
+ // SExt has been moved away.
+ // Thus either it will be rematched later in the recursive calls or it is
+ // gone. Anyway, we must not fold it into the addressing mode at this point.
+ // E.g.,
+ // op = add opnd, 1
+ // idx = sext op
+ // addr = gep base, idx
+ // is now:
+ // promotedOpnd = sext opnd <- no match here
+ // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
+ // addr = gep base, op <- match
+ if (MovedAway)
+ *MovedAway = true;
+
+ assert(PromotedOperand &&
+ "TypePromotionHelper should have filtered out those cases");
+
+ ExtAddrMode BackupAddrMode = AddrMode;
+ unsigned OldSize = AddrModeInsts.size();
+
+ if (!MatchAddr(PromotedOperand, Depth) ||
+ !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts,
+ PromotedOperand)) {
+ AddrMode = BackupAddrMode;
+ AddrModeInsts.resize(OldSize);
+ DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
+ TPT.rollback(LastKnownGood);
+ return false;
+ }
+ return true;
+ }
}
return false;
}
@@ -1175,6 +2187,10 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
/// or intptr_t for the target.
///
bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
+ // Start a transaction at this point that we will rollback if the matching
+ // fails.
+ TypePromotionTransaction::ConstRestorationPt LastKnownGood =
+ TPT.getRestorationPoint();
if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
// Fold in immediates if legal for the target.
AddrMode.BaseOffs += CI->getSExtValue();
@@ -1183,18 +2199,23 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
AddrMode.BaseOffs -= CI->getSExtValue();
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
// If this is a global variable, try to fold it into the addressing mode.
- if (AddrMode.BaseGV == 0) {
+ if (!AddrMode.BaseGV) {
AddrMode.BaseGV = GV;
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
return true;
- AddrMode.BaseGV = 0;
+ AddrMode.BaseGV = nullptr;
}
} else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
ExtAddrMode BackupAddrMode = AddrMode;
unsigned OldSize = AddrModeInsts.size();
// Check to see if it is possible to fold this operation.
- if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
+ bool MovedAway = false;
+ if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
+ // This instruction may have been move away. If so, there is nothing
+ // to check here.
+ if (MovedAway)
+ return true;
// Okay, it's possible to fold this. Check to see if it is actually
// *profitable* to do so. We use a simple cost model to avoid increasing
// register pressure too much.
@@ -1208,10 +2229,12 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
//cerr << "NOT FOLDING: " << *I;
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
+ TPT.rollback(LastKnownGood);
}
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
return true;
+ TPT.rollback(LastKnownGood);
} else if (isa<ConstantPointerNull>(Addr)) {
// Null pointer gets folded without affecting the addressing mode.
return true;
@@ -1225,7 +2248,7 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
return true;
AddrMode.HasBaseReg = false;
- AddrMode.BaseReg = 0;
+ AddrMode.BaseReg = nullptr;
}
// If the base register is already taken, see if we can do [r+r].
@@ -1235,9 +2258,10 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
return true;
AddrMode.Scale = 0;
- AddrMode.ScaledReg = 0;
+ AddrMode.ScaledReg = nullptr;
}
// Couldn't match.
+ TPT.rollback(LastKnownGood);
return false;
}
@@ -1280,23 +2304,22 @@ static bool FindAllMemoryUses(Instruction *I,
return true;
// Loop over all the uses, recursively processing them.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- User *U = *UI;
+ for (Use &U : I->uses()) {
+ Instruction *UserI = cast<Instruction>(U.getUser());
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
- MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
+ if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
+ MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
continue;
}
- if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
- unsigned opNo = UI.getOperandNo();
+ if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
+ unsigned opNo = U.getOperandNo();
if (opNo == 0) return true; // Storing addr, not into addr.
MemoryUses.push_back(std::make_pair(SI, opNo));
continue;
}
- if (CallInst *CI = dyn_cast<CallInst>(U)) {
+ if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
if (!IA) return true;
@@ -1306,8 +2329,7 @@ static bool FindAllMemoryUses(Instruction *I,
continue;
}
- if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
- TLI))
+ if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI))
return true;
}
@@ -1321,7 +2343,7 @@ static bool FindAllMemoryUses(Instruction *I,
bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
Value *KnownLive2) {
// If Val is either of the known-live values, we know it is live!
- if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
+ if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
return true;
// All values other than instructions and arguments (e.g. constants) are live.
@@ -1380,13 +2402,13 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
// If the BaseReg or ScaledReg was referenced by the previous addrmode, their
// lifetime wasn't extended by adding this instruction.
if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
- BaseReg = 0;
+ BaseReg = nullptr;
if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
- ScaledReg = 0;
+ ScaledReg = nullptr;
// If folding this instruction (and it's subexprs) didn't extend any live
// ranges, we're ok with it.
- if (BaseReg == 0 && ScaledReg == 0)
+ if (!BaseReg && !ScaledReg)
return true;
// If all uses of this instruction are ultimately load/store/inlineasm's,
@@ -1418,12 +2440,20 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
// will tell us if the addressing mode for the memory operation will
// *actually* cover the shared instruction.
ExtAddrMode Result;
+ TypePromotionTransaction::ConstRestorationPt LastKnownGood =
+ TPT.getRestorationPoint();
AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
- MemoryInst, Result);
+ MemoryInst, Result, InsertedTruncs,
+ PromotedInsts, TPT);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.MatchAddr(Address, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
+ // The match was to check the profitability, the changes made are not
+ // part of the original matcher. Therefore, they should be dropped
+ // otherwise the original matcher will not present the right state.
+ TPT.rollback(LastKnownGood);
+
// If the match didn't cover I, then it won't be shared by it.
if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
I) == MatchedAddrModeInsts.end())
@@ -1467,18 +2497,21 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// Use a worklist to iteratively look through PHI nodes, and ensure that
// the addressing mode obtained from the non-PHI roots of the graph
// are equivalent.
- Value *Consensus = 0;
+ Value *Consensus = nullptr;
unsigned NumUsesConsensus = 0;
bool IsNumUsesConsensusValid = false;
SmallVector<Instruction*, 16> AddrModeInsts;
ExtAddrMode AddrMode;
+ TypePromotionTransaction TPT;
+ TypePromotionTransaction::ConstRestorationPt LastKnownGood =
+ TPT.getRestorationPoint();
while (!worklist.empty()) {
Value *V = worklist.back();
worklist.pop_back();
// Break use-def graph loops.
if (!Visited.insert(V)) {
- Consensus = 0;
+ Consensus = nullptr;
break;
}
@@ -1491,9 +2524,9 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// For non-PHIs, determine the addressing mode being computed.
SmallVector<Instruction*, 16> NewAddrModeInsts;
- ExtAddrMode NewAddrMode =
- AddressingModeMatcher::Match(V, AccessTy, MemoryInst,
- NewAddrModeInsts, *TLI);
+ ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
+ V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet,
+ PromotedInsts, TPT);
// This check is broken into two cases with very similar code to avoid using
// getNumUses() as much as possible. Some values have a lot of uses, so
@@ -1524,13 +2557,17 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
continue;
}
- Consensus = 0;
+ Consensus = nullptr;
break;
}
// If the addressing mode couldn't be determined, or if multiple different
// ones were determined, bail out now.
- if (!Consensus) return false;
+ if (!Consensus) {
+ TPT.rollback(LastKnownGood);
+ return false;
+ }
+ TPT.commit();
// Check to see if any of the instructions supersumed by this addr mode are
// non-local to I's BB.
@@ -1560,14 +2597,135 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Value *&SunkAddr = SunkAddrs[Addr];
if (SunkAddr) {
DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst);
+ << *MemoryInst << "\n");
if (SunkAddr->getType() != Addr->getType())
SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
+ } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
+ TM && TM->getSubtarget<TargetSubtargetInfo>().useAA())) {
+ // By default, we use the GEP-based method when AA is used later. This
+ // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
+ DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst << "\n");
+ Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
+ Value *ResultPtr = nullptr, *ResultIndex = nullptr;
+
+ // First, find the pointer.
+ if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
+ ResultPtr = AddrMode.BaseReg;
+ AddrMode.BaseReg = nullptr;
+ }
+
+ if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
+ // We can't add more than one pointer together, nor can we scale a
+ // pointer (both of which seem meaningless).
+ if (ResultPtr || AddrMode.Scale != 1)
+ return false;
+
+ ResultPtr = AddrMode.ScaledReg;
+ AddrMode.Scale = 0;
+ }
+
+ if (AddrMode.BaseGV) {
+ if (ResultPtr)
+ return false;
+
+ ResultPtr = AddrMode.BaseGV;
+ }
+
+ // If the real base value actually came from an inttoptr, then the matcher
+ // will look through it and provide only the integer value. In that case,
+ // use it here.
+ if (!ResultPtr && AddrMode.BaseReg) {
+ ResultPtr =
+ Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr");
+ AddrMode.BaseReg = nullptr;
+ } else if (!ResultPtr && AddrMode.Scale == 1) {
+ ResultPtr =
+ Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr");
+ AddrMode.Scale = 0;
+ }
+
+ if (!ResultPtr &&
+ !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
+ SunkAddr = Constant::getNullValue(Addr->getType());
+ } else if (!ResultPtr) {
+ return false;
+ } else {
+ Type *I8PtrTy =
+ Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
+
+ // Start with the base register. Do this first so that subsequent address
+ // matching finds it last, which will prevent it from trying to match it
+ // as the scaled value in case it happens to be a mul. That would be
+ // problematic if we've sunk a different mul for the scale, because then
+ // we'd end up sinking both muls.
+ if (AddrMode.BaseReg) {
+ Value *V = AddrMode.BaseReg;
+ if (V->getType() != IntPtrTy)
+ V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
+
+ ResultIndex = V;
+ }
+
+ // Add the scale value.
+ if (AddrMode.Scale) {
+ Value *V = AddrMode.ScaledReg;
+ if (V->getType() == IntPtrTy) {
+ // done.
+ } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
+ cast<IntegerType>(V->getType())->getBitWidth()) {
+ V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
+ } else {
+ // It is only safe to sign extend the BaseReg if we know that the math
+ // required to create it did not overflow before we extend it. Since
+ // the original IR value was tossed in favor of a constant back when
+ // the AddrMode was created we need to bail out gracefully if widths
+ // do not match instead of extending it.
+ Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
+ if (I && (ResultIndex != AddrMode.BaseReg))
+ I->eraseFromParent();
+ return false;
+ }
+
+ if (AddrMode.Scale != 1)
+ V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
+ "sunkaddr");
+ if (ResultIndex)
+ ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
+ else
+ ResultIndex = V;
+ }
+
+ // Add in the Base Offset if present.
+ if (AddrMode.BaseOffs) {
+ Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
+ if (ResultIndex) {
+ // We need to add this separately from the scale above to help with
+ // SDAG consecutive load/store merging.
+ if (ResultPtr->getType() != I8PtrTy)
+ ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
+ ResultPtr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr");
+ }
+
+ ResultIndex = V;
+ }
+
+ if (!ResultIndex) {
+ SunkAddr = ResultPtr;
+ } else {
+ if (ResultPtr->getType() != I8PtrTy)
+ ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
+ SunkAddr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr");
+ }
+
+ if (SunkAddr->getType() != Addr->getType())
+ SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
+ }
} else {
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst);
+ << *MemoryInst << "\n");
Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
- Value *Result = 0;
+ Value *Result = nullptr;
// Start with the base register. Do this first so that subsequent address
// matching finds it last, which will prevent it from trying to match it
@@ -1594,7 +2752,15 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
cast<IntegerType>(V->getType())->getBitWidth()) {
V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
} else {
- V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr");
+ // It is only safe to sign extend the BaseReg if we know that the math
+ // required to create it did not overflow before we extend it. Since
+ // the original IR value was tossed in favor of a constant back when
+ // the AddrMode was created we need to bail out gracefully if widths
+ // do not match instead of extending it.
+ Instruction *I = dyn_cast_or_null<Instruction>(Result);
+ if (I && (Result != AddrMode.BaseReg))
+ I->eraseFromParent();
+ return false;
}
if (AddrMode.Scale != 1)
V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
@@ -1623,7 +2789,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Result = V;
}
- if (Result == 0)
+ if (!Result)
SunkAddr = Constant::getNullValue(Addr->getType());
else
SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
@@ -1737,12 +2903,11 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
return false;
bool DefIsLiveOut = false;
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (User *U : I->users()) {
+ Instruction *UI = cast<Instruction>(U);
// Figure out which BB this ext is used in.
- BasicBlock *UserBB = User->getParent();
+ BasicBlock *UserBB = UI->getParent();
if (UserBB == DefBB) continue;
DefIsLiveOut = true;
break;
@@ -1751,14 +2916,13 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
return false;
// Make sure none of the uses are PHI nodes.
- for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- BasicBlock *UserBB = User->getParent();
+ for (User *U : Src->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ BasicBlock *UserBB = UI->getParent();
if (UserBB == DefBB) continue;
// Be conservative. We don't want this xform to end up introducing
// reloads just before load / store instructions.
- if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
+ if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
return false;
}
@@ -1766,10 +2930,8 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
bool MadeChange = false;
- for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
- UI != E; ++UI) {
- Use &TheUse = UI.getUse();
- Instruction *User = cast<Instruction>(*UI);
+ for (Use &U : Src->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
// Figure out which BB this ext is used in.
BasicBlock *UserBB = User->getParent();
@@ -1781,10 +2943,11 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
if (!InsertedTrunc) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
+ InsertedTruncsSet.insert(InsertedTrunc);
}
// Replace a use of the {s|z}ext source with a use of the result.
- TheUse = InsertedTrunc;
+ U = InsertedTrunc;
++NumExtUses;
MadeChange = true;
}
@@ -1880,12 +3043,78 @@ bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
return true;
}
+static bool isBroadcastShuffle(ShuffleVectorInst *SVI) {
+ SmallVector<int, 16> Mask(SVI->getShuffleMask());
+ int SplatElem = -1;
+ for (unsigned i = 0; i < Mask.size(); ++i) {
+ if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
+ return false;
+ SplatElem = Mask[i];
+ }
+
+ return true;
+}
+
+/// Some targets have expensive vector shifts if the lanes aren't all the same
+/// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
+/// it's often worth sinking a shufflevector splat down to its use so that
+/// codegen can spot all lanes are identical.
+bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
+ BasicBlock *DefBB = SVI->getParent();
+
+ // Only do this xform if variable vector shifts are particularly expensive.
+ if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
+ return false;
+
+ // We only expect better codegen by sinking a shuffle if we can recognise a
+ // constant splat.
+ if (!isBroadcastShuffle(SVI))
+ return false;
+
+ // InsertedShuffles - Only insert a shuffle in each block once.
+ DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
+
+ bool MadeChange = false;
+ for (User *U : SVI->users()) {
+ Instruction *UI = cast<Instruction>(U);
+
+ // Figure out which BB this ext is used in.
+ BasicBlock *UserBB = UI->getParent();
+ if (UserBB == DefBB) continue;
+
+ // For now only apply this when the splat is used by a shift instruction.
+ if (!UI->isShift()) continue;
+
+ // Everything checks out, sink the shuffle if the user's block doesn't
+ // already have a copy.
+ Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
+
+ if (!InsertedShuffle) {
+ BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
+ InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0),
+ SVI->getOperand(1),
+ SVI->getOperand(2), "", InsertPt);
+ }
+
+ UI->replaceUsesOfWith(SVI, InsertedShuffle);
+ MadeChange = true;
+ }
+
+ // If we removed all uses, nuke the shuffle.
+ if (SVI->use_empty()) {
+ SVI->eraseFromParent();
+ MadeChange = true;
+ }
+
+ return MadeChange;
+}
+
bool CodeGenPrepare::OptimizeInst(Instruction *I) {
if (PHINode *P = dyn_cast<PHINode>(I)) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
- if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : 0,
+ if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr,
TLInfo, DT)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
@@ -1909,14 +3138,23 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
return true;
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
- bool MadeChange = MoveExtToFormExtLoad(I);
- return MadeChange | OptimizeExtUses(I);
+ /// Sink a zext or sext into its user blocks if the target type doesn't
+ /// fit in one register
+ if (TLI && TLI->getTypeAction(CI->getContext(),
+ TLI->getValueType(CI->getType())) ==
+ TargetLowering::TypeExpandInteger) {
+ return SinkCast(CI);
+ } else {
+ bool MadeChange = MoveExtToFormExtLoad(I);
+ return MadeChange | OptimizeExtUses(I);
+ }
}
return false;
}
if (CmpInst *CI = dyn_cast<CmpInst>(I))
- return OptimizeCmpExpression(CI);
+ if (!TLI || !TLI->hasMultipleConditionRegisters())
+ return OptimizeCmpExpression(CI);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (TLI)
@@ -1931,6 +3169,17 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
return false;
}
+ BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
+
+ if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
+ BinOp->getOpcode() == Instruction::LShr)) {
+ ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
+ if (TLI && CI && TLI->hasExtractBitsInsn())
+ return OptimizeExtractBits(BinOp, CI, *TLI);
+
+ return false;
+ }
+
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
if (GEPI->hasAllZeroIndices()) {
/// The GEP operand must be a pointer, so must its result -> BitCast
@@ -1951,6 +3200,9 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
if (SelectInst *SI = dyn_cast<SelectInst>(I))
return OptimizeSelectInst(SI);
+ if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
+ return OptimizeShuffleVectorInst(SVI);
+
return false;
}
@@ -1976,11 +3228,16 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
bool CodeGenPrepare::PlaceDbgValues(Function &F) {
bool MadeChange = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
- Instruction *PrevNonDbgInst = NULL;
+ Instruction *PrevNonDbgInst = nullptr;
for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
Instruction *Insn = BI; ++BI;
DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
- if (!DVI) {
+ // Leave dbg.values that refer to an alloca alone. These
+ // instrinsics describe the address of a variable (= the alloca)
+ // being taken. They should not be moved next to the alloca
+ // (and to the beginning of the scope), but rather stay close to
+ // where said address is used.
+ if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) {
PrevNonDbgInst = Insn;
continue;
}
@@ -2000,3 +3257,70 @@ bool CodeGenPrepare::PlaceDbgValues(Function &F) {
}
return MadeChange;
}
+
+// If there is a sequence that branches based on comparing a single bit
+// against zero that can be combined into a single instruction, and the
+// target supports folding these into a single instruction, sink the
+// mask and compare into the branch uses. Do this before OptimizeBlock ->
+// OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being
+// searched for.
+bool CodeGenPrepare::sinkAndCmp(Function &F) {
+ if (!EnableAndCmpSinking)
+ return false;
+ if (!TLI || !TLI->isMaskAndBranchFoldingLegal())
+ return false;
+ bool MadeChange = false;
+ for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
+ BasicBlock *BB = I++;
+
+ // Does this BB end with the following?
+ // %andVal = and %val, #single-bit-set
+ // %icmpVal = icmp %andResult, 0
+ // br i1 %cmpVal label %dest1, label %dest2"
+ BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator());
+ if (!Brcc || !Brcc->isConditional())
+ continue;
+ ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0));
+ if (!Cmp || Cmp->getParent() != BB)
+ continue;
+ ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1));
+ if (!Zero || !Zero->isZero())
+ continue;
+ Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0));
+ if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB)
+ continue;
+ ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1));
+ if (!Mask || !Mask->getUniqueInteger().isPowerOf2())
+ continue;
+ DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump());
+
+ // Push the "and; icmp" for any users that are conditional branches.
+ // Since there can only be one branch use per BB, we don't need to keep
+ // track of which BBs we insert into.
+ for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end();
+ UI != E; ) {
+ Use &TheUse = *UI;
+ // Find brcc use.
+ BranchInst *BrccUser = dyn_cast<BranchInst>(*UI);
+ ++UI;
+ if (!BrccUser || !BrccUser->isConditional())
+ continue;
+ BasicBlock *UserBB = BrccUser->getParent();
+ if (UserBB == BB) continue;
+ DEBUG(dbgs() << "found Brcc use\n");
+
+ // Sink the "and; icmp" to use.
+ MadeChange = true;
+ BinaryOperator *NewAnd =
+ BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "",
+ BrccUser);
+ CmpInst *NewCmp =
+ CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero,
+ "", BrccUser);
+ TheUse = NewCmp;
+ ++NumAndCmpsMoved;
+ DEBUG(BrccUser->getParent()->dump());
+ }
+ }
+ return MadeChange;
+}
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 18c8e0a..d3ffcc7 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "post-RA-sched"
#include "CriticalAntiDepBreaker.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -26,6 +25,8 @@
using namespace llvm;
+#define DEBUG_TYPE "post-RA-sched"
+
CriticalAntiDepBreaker::
CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo &RCI) :
AntiDepBreaker(), MF(MFi),
@@ -33,7 +34,7 @@ CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo &RCI) :
TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
RegClassInfo(RCI),
- Classes(TRI->getNumRegs(), static_cast<const TargetRegisterClass *>(0)),
+ Classes(TRI->getNumRegs(), nullptr),
KillIndices(TRI->getNumRegs(), 0),
DefIndices(TRI->getNumRegs(), 0),
KeepRegs(TRI->getNumRegs(), false) {}
@@ -45,7 +46,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
const unsigned BBSize = BB->size();
for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
// Clear out the register class data.
- Classes[i] = static_cast<const TargetRegisterClass *>(0);
+ Classes[i] = nullptr;
// Initialize the indices to indicate that no registers are live.
KillIndices[i] = ~0u;
@@ -75,7 +76,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// callee-saved register that is not saved in the prolog.
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(BB);
- for (const uint16_t *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
+ for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
if (!IsReturnBlock && !Pristine.test(*I)) continue;
for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
unsigned Reg = *AI;
@@ -124,7 +125,7 @@ void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static const SDep *CriticalPathStep(const SUnit *SU) {
- const SDep *Next = 0;
+ const SDep *Next = nullptr;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
@@ -145,8 +146,8 @@ static const SDep *CriticalPathStep(const SUnit *SU) {
void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
// It's not safe to change register allocation for source operands of
- // that have special allocation requirements. Also assume all registers
- // used in a call must not be changed (ABI).
+ // instructions that have special allocation requirements. Also assume all
+ // registers used in a call must not be changed (ABI).
// FIXME: The issue with predicated instruction is more complex. We are being
// conservative here because the kill markers cannot be trusted after
// if-conversion:
@@ -171,7 +172,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- const TargetRegisterClass *NewRC = 0;
+ const TargetRegisterClass *NewRC = nullptr;
if (i < MI->getDesc().getNumOperands())
NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
@@ -199,6 +200,28 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
+ // If this reg is tied and live (Classes[Reg] is set to -1), we can't change
+ // it or any of its sub or super regs. We need to use KeepRegs to mark the
+ // reg because not all uses of the same reg within an instruction are
+ // necessarily tagged as tied.
+ // Example: an x86 "xor %eax, %eax" will have one source operand tied to the
+ // def register but not the second (see PR20020 for details).
+ // FIXME: can this check be relaxed to account for undef uses
+ // of a register? In the above 'xor' example, the uses of %eax are undef, so
+ // earlier instructions could still replace %eax even though the 'xor'
+ // itself can't be changed.
+ if (MI->isRegTiedToUseOperand(i) &&
+ Classes[Reg] == reinterpret_cast<TargetRegisterClass *>(-1)) {
+ for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+ SubRegs.isValid(); ++SubRegs) {
+ KeepRegs.set(*SubRegs);
+ }
+ for (MCSuperRegIterator SuperRegs(Reg, TRI);
+ SuperRegs.isValid(); ++SuperRegs) {
+ KeepRegs.set(*SuperRegs);
+ }
+ }
+
if (MO.isUse() && Special) {
if (!KeepRegs.test(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
@@ -227,7 +250,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
DefIndices[i] = Count;
KillIndices[i] = ~0u;
KeepRegs.reset(i);
- Classes[i] = 0;
+ Classes[i] = nullptr;
RegRefs.erase(i);
}
@@ -235,16 +258,22 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
+
+ // If we've already marked this reg as unchangeable, carry on.
+ if (KeepRegs.test(Reg)) continue;
+
// Ignore two-addr defs.
if (MI->isRegTiedToUseOperand(i)) continue;
+ // FIXME: we should use a SubRegIterator that includes self (as above), so
+ // we don't have to repeat all this code for the reg itself.
DefIndices[Reg] = Count;
KillIndices[Reg] = ~0u;
assert(((KillIndices[Reg] == ~0u) !=
(DefIndices[Reg] == ~0u)) &&
"Kill and Def maps aren't consistent for Reg!");
KeepRegs.reset(Reg);
- Classes[Reg] = 0;
+ Classes[Reg] = nullptr;
RegRefs.erase(Reg);
// Repeat, for all subregs.
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
@@ -252,7 +281,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
KeepRegs.reset(SubregReg);
- Classes[SubregReg] = 0;
+ Classes[SubregReg] = nullptr;
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
@@ -267,7 +296,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
if (Reg == 0) continue;
if (!MO.isUse()) continue;
- const TargetRegisterClass *NewRC = 0;
+ const TargetRegisterClass *NewRC = nullptr;
if (i < MI->getDesc().getNumOperands())
NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
@@ -280,6 +309,9 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
RegRefs.insert(std::make_pair(Reg, &MO));
+ // FIXME: we should use an MCRegAliasIterator that includes self so we don't
+ // have to repeat all this code for the reg itself.
+
// It wasn't previously live but now it is, this is a kill.
if (KillIndices[Reg] == ~0u) {
KillIndices[Reg] = Count;
@@ -308,7 +340,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// the two-address instruction also defines NewReg, as may happen with
// pre/postincrement loads. In this case, both the use and def operands are in
// RegRefs because the def is inserted by PrescanInstruction and not erased
-// during ScanInstruction. So checking for an instructions with definitions of
+// during ScanInstruction. So checking for an instruction with definitions of
// both NewReg and AntiDepReg covers it.
bool
CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
@@ -324,7 +356,7 @@ CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
if (RefOper->isDef() && RefOper->isEarlyClobber())
return true;
- // Handle cases in which this instructions defines NewReg.
+ // Handle cases in which this instruction defines NewReg.
MachineInstr *MI = RefOper->getParent();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &CheckOper = MI->getOperand(i);
@@ -342,11 +374,11 @@ CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
return true;
// Don't allow an instruction using AntiDepReg to be earlyclobbered by
- // NewReg
+ // NewReg.
if (CheckOper.isEarlyClobber())
return true;
- // Don't allow inline asm to define NewReg at all. Who know what it's
+ // Don't allow inline asm to define NewReg at all. Who knows what it's
// doing with it.
if (MI->isInlineAsm())
return true;
@@ -419,7 +451,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
// Find the node at the bottom of the critical path.
- const SUnit *Max = 0;
+ const SUnit *Max = nullptr;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
MISUnitMap[SU->getInstr()] = SU;
@@ -493,8 +525,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// as we go to help determine which registers are available.
unsigned Broken = 0;
unsigned Count = InsertPosIndex - 1;
- for (MachineBasicBlock::iterator I = End, E = Begin;
- I != E; --Count) {
+ for (MachineBasicBlock::iterator I = End, E = Begin; I != E; --Count) {
MachineInstr *MI = --I;
if (MI->isDebugValue())
continue;
@@ -525,7 +556,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// Don't break anti-dependencies on non-allocatable registers.
AntiDepReg = 0;
else if (KeepRegs.test(AntiDepReg))
- // Don't break anti-dependencies if an use down below requires
+ // Don't break anti-dependencies if a use down below requires
// this exact register.
AntiDepReg = 0;
else {
@@ -551,8 +582,8 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
CriticalPathMI = CriticalPathSU->getInstr();
} else {
// We've reached the end of the critical path.
- CriticalPathSU = 0;
- CriticalPathMI = 0;
+ CriticalPathSU = nullptr;
+ CriticalPathMI = nullptr;
}
}
@@ -563,8 +594,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
- if (MI->isCall() || MI->hasExtraDefRegAllocReq() ||
- TII->isPredicated(MI))
+ if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI))
// If this instruction's defs have special allocation requirement, don't
// break this anti-dependency.
AntiDepReg = 0;
@@ -589,13 +619,14 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// Determine AntiDepReg's register class, if it is live and is
// consistently used within a single class.
- const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
- assert((AntiDepReg == 0 || RC != NULL) &&
+ const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg]
+ : nullptr;
+ assert((AntiDepReg == 0 || RC != nullptr) &&
"Register should be live if it's causing an anti-dependence!");
if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
AntiDepReg = 0;
- // Look for a suitable register to use to break the anti-depenence.
+ // Look for a suitable register to use to break the anti-dependence.
//
// TODO: Instead of picking the first free register, consider which might
// be the best.
@@ -638,7 +669,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
(DefIndices[NewReg] == ~0u)) &&
"Kill and Def maps aren't consistent for NewReg!");
- Classes[AntiDepReg] = 0;
+ Classes[AntiDepReg] = nullptr;
DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
KillIndices[AntiDepReg] = ~0u;
assert(((KillIndices[AntiDepReg] == ~0u) !=
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
index 565d20b..45e4ff5 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -55,12 +55,12 @@ class TargetRegisterInfo;
typedef std::multimap<unsigned, MachineOperand *>::const_iterator
RegRefIter;
- /// KillIndices - The index of the most recent kill (proceding bottom-up),
+ /// KillIndices - The index of the most recent kill (proceeding bottom-up),
/// or ~0u if the register is not live.
std::vector<unsigned> KillIndices;
- /// DefIndices - The index of the most recent complete def (proceding bottom
- /// up), or ~0u if the register is live.
+ /// DefIndices - The index of the most recent complete def (proceeding
+ /// bottom up), or ~0u if the register is live.
std::vector<unsigned> DefIndices;
/// KeepRegs - A set of registers which are live and cannot be changed to
@@ -72,7 +72,7 @@ class TargetRegisterInfo;
~CriticalAntiDepBreaker();
/// Start - Initialize anti-dep breaking for a new basic block.
- void StartBlock(MachineBasicBlock *BB);
+ void StartBlock(MachineBasicBlock *BB) override;
/// BreakAntiDependencies - Identifiy anti-dependencies along the critical
/// path
@@ -82,15 +82,16 @@ class TargetRegisterInfo;
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
- DbgValueVector &DbgValues);
+ DbgValueVector &DbgValues) override;
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
- void Observe(MachineInstr *MI, unsigned Count, unsigned InsertPosIndex);
+ void Observe(MachineInstr *MI, unsigned Count,
+ unsigned InsertPosIndex) override;
/// Finish - Finish anti-dep breaking for a basic block.
- void FinishBlock();
+ void FinishBlock() override;
private:
void PrescanInstruction(MachineInstr *MI);
diff --git a/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp b/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
index 6619bcf..bc6e9dc 100644
--- a/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
+++ b/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
@@ -108,7 +108,7 @@ public:
DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
MachineDominatorTree &MDT, bool IsPostRA);
// Schedule - Actual scheduling work.
- void schedule();
+ void schedule() override;
};
}
@@ -121,7 +121,7 @@ DefaultVLIWScheduler::DefaultVLIWScheduler(
void DefaultVLIWScheduler::schedule() {
// Build the scheduling graph.
- buildSchedGraph(0);
+ buildSchedGraph(nullptr);
}
// VLIWPacketizerList Ctor
@@ -129,7 +129,7 @@ VLIWPacketizerList::VLIWPacketizerList(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
TII = TM.getInstrInfo();
- ResourceTracker = TII->CreateTargetScheduleState(&TM, 0);
+ ResourceTracker = TII->CreateTargetScheduleState(&TM, nullptr);
VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
}
diff --git a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index 5efe1ff..2b144d8 100644
--- a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "codegen-dce"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -23,11 +22,13 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "codegen-dce"
+
STATISTIC(NumDeletes, "Number of dead instructions deleted");
namespace {
class DeadMachineInstructionElim : public MachineFunctionPass {
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
@@ -59,7 +60,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
// Don't delete instructions with side effects.
bool SawStore = false;
- if (!MI->isSafeToMove(TII, 0, SawStore) && !MI->isPHI())
+ if (!MI->isSafeToMove(TII, nullptr, SawStore) && !MI->isPHI())
return false;
// Examine each operand.
@@ -84,6 +85,9 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
}
bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
bool AnyChanges = false;
MRI = &MF.getRegInfo();
TRI = MF.getTarget().getRegisterInfo();
@@ -127,17 +131,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- MachineRegisterInfo::use_iterator nextI;
- for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg),
- E = MRI->use_end(); I!=E; I=nextI) {
- nextI = llvm::next(I); // I is invalidated by the setReg
- MachineOperand& Use = I.getOperand();
- MachineInstr *UseMI = Use.getParent();
- if (UseMI==MI)
- continue;
- assert(Use.isDebug());
- UseMI->getOperand(0).setReg(0U);
- }
+ MRI->markUsesInDebugValueAsUndef(Reg);
}
AnyChanges = true;
MI->eraseFromParent();
diff --git a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
index c7c1752..a195586 100644
--- a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
@@ -12,22 +12,23 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dwarfehprepare"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
+#define DEBUG_TYPE "dwarfehprepare"
+
STATISTIC(NumResumesLowered, "Number of resume calls lowered");
namespace {
@@ -42,16 +43,16 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid.
- DwarfEHPrepare(const TargetMachine *TM) :
- FunctionPass(ID), TM(TM), RewindFunction(0) {
- initializeDominatorTreePass(*PassRegistry::getPassRegistry());
- }
+ DwarfEHPrepare(const TargetMachine *TM)
+ : FunctionPass(ID), TM(TM), RewindFunction(nullptr) {
+ initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
- virtual bool runOnFunction(Function &Fn);
+ bool runOnFunction(Function &Fn) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
+ void getAnalysisUsage(AnalysisUsage &AU) const override { }
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Exception handling preparation";
}
};
@@ -68,10 +69,10 @@ FunctionPass *llvm::createDwarfEHPass(const TargetMachine *TM) {
/// instructions, including the 'resume' instruction.
Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
Value *V = RI->getOperand(0);
- Value *ExnObj = 0;
+ Value *ExnObj = nullptr;
InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V);
- LoadInst *SelLoad = 0;
- InsertValueInst *ExcIVI = 0;
+ LoadInst *SelLoad = nullptr;
+ InsertValueInst *ExcIVI = nullptr;
bool EraseIVIs = false;
if (SelIVI) {
diff --git a/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp b/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
index 5447df0..c470632 100644
--- a/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/EarlyIfConversion.cpp
@@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "early-ifcvt"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
@@ -40,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "early-ifcvt"
+
// Absolute maximum number of instructions allowed per speculated block.
// This bypasses all other heuristics, so it should be set fairly high.
static cl::opt<unsigned>
@@ -219,7 +220,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
// We never speculate stores, so an AA pointer isn't necessary.
bool DontMoveAcrossStore = true;
- if (!I->isSafeToMove(TII, 0, DontMoveAcrossStore)) {
+ if (!I->isSafeToMove(TII, nullptr, DontMoveAcrossStore)) {
DEBUG(dbgs() << "Can't speculate: " << *I);
return false;
}
@@ -338,7 +339,7 @@ bool SSAIfConv::findInsertionPoint() {
///
bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
Head = MBB;
- TBB = FBB = Tail = 0;
+ TBB = FBB = Tail = nullptr;
if (Head->succ_size() != 2)
return false;
@@ -461,9 +462,9 @@ void SSAIfConv::replacePHIInstrs() {
DEBUG(dbgs() << "If-converting " << *PI.PHI);
unsigned DstReg = PI.PHI->getOperand(0).getReg();
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
- DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
+ DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
PI.PHI->eraseFromParent();
- PI.PHI = 0;
+ PI.PHI = nullptr;
}
}
@@ -482,7 +483,7 @@ void SSAIfConv::rewritePHIOperands() {
unsigned PHIDst = PI.PHI->getOperand(0).getReg();
unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
- DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
+ DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
// Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
@@ -564,7 +565,7 @@ void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
// We need a branch to Tail, let code placement work it out later.
DEBUG(dbgs() << "Converting to unconditional branch.\n");
SmallVector<MachineOperand, 0> EmptyCond;
- TII->InsertBranch(*Head, Tail, 0, EmptyCond, HeadDL);
+ TII->InsertBranch(*Head, Tail, nullptr, EmptyCond, HeadDL);
Head->addSuccessor(Tail);
}
DEBUG(dbgs() << *Head);
@@ -590,9 +591,9 @@ class EarlyIfConverter : public MachineFunctionPass {
public:
static char ID;
EarlyIfConverter() : MachineFunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const;
- bool runOnMachineFunction(MachineFunction &MF);
- const char *getPassName() const { return "Early If-Conversion"; }
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ const char *getPassName() const override { return "Early If-Conversion"; }
private:
bool tryConvertIf(MachineBasicBlock*);
@@ -775,6 +776,12 @@ bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) {
bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
<< "********** Function: " << MF.getName() << '\n');
+ // Only run if conversion if the target wants it.
+ if (!MF.getTarget()
+ .getSubtarget<TargetSubtargetInfo>()
+ .enableEarlyIfConversion())
+ return false;
+
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
SchedModel =
@@ -783,7 +790,7 @@ bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
- MinInstr = 0;
+ MinInstr = nullptr;
bool Changed = false;
IfConv.runOnMachineFunction(MF);
diff --git a/contrib/llvm/lib/CodeGen/EdgeBundles.cpp b/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
index 3bb0465..aea7c31 100644
--- a/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
+++ b/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
@@ -41,9 +41,7 @@ bool EdgeBundles::runOnMachineFunction(MachineFunction &mf) {
EC.clear();
EC.grow(2 * MF->getNumBlockIDs());
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E;
- ++I) {
- const MachineBasicBlock &MBB = *I;
+ for (const auto &MBB : *MF) {
unsigned OutE = 2 * MBB.getNumber() + 1;
// Join the outgoing bundle with the ingoing bundles of all successors.
for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
@@ -69,29 +67,31 @@ bool EdgeBundles::runOnMachineFunction(MachineFunction &mf) {
return false;
}
-/// view - Visualize the annotated bipartite CFG with Graphviz.
-void EdgeBundles::view() const {
- ViewGraph(*this, "EdgeBundles");
-}
-
/// Specialize WriteGraph, the standard implementation won't work.
-raw_ostream &llvm::WriteGraph(raw_ostream &O, const EdgeBundles &G,
- bool ShortNames,
- const Twine &Title) {
+namespace llvm {
+template<>
+raw_ostream &WriteGraph<>(raw_ostream &O, const EdgeBundles &G,
+ bool ShortNames,
+ const Twine &Title) {
const MachineFunction *MF = G.getMachineFunction();
O << "digraph {\n";
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- unsigned BB = I->getNumber();
+ for (const auto &MBB : *MF) {
+ unsigned BB = MBB.getNumber();
O << "\t\"BB#" << BB << "\" [ shape=box ]\n"
<< '\t' << G.getBundle(BB, false) << " -> \"BB#" << BB << "\"\n"
<< "\t\"BB#" << BB << "\" -> " << G.getBundle(BB, true) << '\n';
- for (MachineBasicBlock::const_succ_iterator SI = I->succ_begin(),
- SE = I->succ_end(); SI != SE; ++SI)
+ for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
+ SE = MBB.succ_end(); SI != SE; ++SI)
O << "\t\"BB#" << BB << "\" -> \"BB#" << (*SI)->getNumber()
<< "\" [ color=lightgray ]\n";
}
O << "}\n";
return O;
}
+}
+
+/// view - Visualize the annotated bipartite CFG with Graphviz.
+void EdgeBundles::view() const {
+ ViewGraph(*this, "EdgeBundles");
+}
diff --git a/contrib/llvm/lib/CodeGen/ErlangGC.cpp b/contrib/llvm/lib/CodeGen/ErlangGC.cpp
index 8a1e2d9..e976d7f 100644
--- a/contrib/llvm/lib/CodeGen/ErlangGC.cpp
+++ b/contrib/llvm/lib/CodeGen/ErlangGC.cpp
@@ -32,7 +32,7 @@ namespace {
DebugLoc DL) const;
public:
ErlangGC();
- bool findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF);
+ bool findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF) override;
};
}
diff --git a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
index 031f19c..cf55b68 100644
--- a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
@@ -20,10 +20,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "execution-fix"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/CodeGen/LiveRegUnits.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Allocator.h"
@@ -33,6 +32,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "execution-fix"
+
/// A DomainValue is a bit like LiveIntervals' ValNo, but it also keeps track
/// of execution domains.
///
@@ -100,7 +101,7 @@ struct DomainValue {
// Clear this DomainValue and point to next which has all its data.
void clear() {
AvailableDomains = 0;
- Next = 0;
+ Next = nullptr;
Instrs.clear();
}
};
@@ -141,7 +142,7 @@ class ExeDepsFix : public MachineFunctionPass {
std::vector<std::pair<MachineInstr*, unsigned> > UndefReads;
/// Storage for register unit liveness.
- LiveRegUnits LiveUnits;
+ LivePhysRegs LiveRegSet;
/// Current instruction number.
/// The first instruction in each basic block is 0.
@@ -155,14 +156,14 @@ public:
ExeDepsFix(const TargetRegisterClass *rc)
: MachineFunctionPass(ID), RC(rc), NumRegs(RC->getNumRegs()) {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Execution dependency fix";
}
@@ -275,7 +276,7 @@ void ExeDepsFix::kill(int rx) {
return;
release(LiveRegs[rx].Value);
- LiveRegs[rx].Value = 0;
+ LiveRegs[rx].Value = nullptr;
}
/// Force register rx into domain.
@@ -352,7 +353,7 @@ void ExeDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
// Set up UndefReads to track undefined register reads.
UndefReads.clear();
- LiveUnits.clear();
+ LiveRegSet.clear();
// Set up LiveRegs to represent registers entering MBB.
if (!LiveRegs)
@@ -360,7 +361,7 @@ void ExeDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
// Default values are 'nothing happened a long time ago'.
for (unsigned rx = 0; rx != NumRegs; ++rx) {
- LiveRegs[rx].Value = 0;
+ LiveRegs[rx].Value = nullptr;
LiveRegs[rx].Def = -(1 << 20);
}
@@ -404,7 +405,7 @@ void ExeDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
// We have a live DomainValue from more than one predecessor.
if (LiveRegs[rx].Value->isCollapsed()) {
- // We are already collapsed, but predecessor is not. Force him.
+ // We are already collapsed, but predecessor is not. Force it.
unsigned Domain = LiveRegs[rx].Value->getFirstDomain();
if (!pdv->isCollapsed() && pdv->hasDomain(Domain))
collapse(pdv, Domain);
@@ -440,7 +441,7 @@ void ExeDepsFix::leaveBasicBlock(MachineBasicBlock *MBB) {
release(LiveRegs[i].Value);
delete[] LiveRegs;
}
- LiveRegs = 0;
+ LiveRegs = nullptr;
}
void ExeDepsFix::visitInstr(MachineInstr *MI) {
@@ -547,21 +548,19 @@ void ExeDepsFix::processUndefReads(MachineBasicBlock *MBB) {
return;
// Collect this block's live out register units.
- LiveUnits.init(TRI);
- for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI) {
- LiveUnits.addLiveIns(*SI, *TRI);
- }
+ LiveRegSet.init(TRI);
+ LiveRegSet.addLiveOuts(MBB);
+
MachineInstr *UndefMI = UndefReads.back().first;
unsigned OpIdx = UndefReads.back().second;
for (MachineBasicBlock::reverse_iterator I = MBB->rbegin(), E = MBB->rend();
I != E; ++I) {
- // Update liveness, including the current instrucion's defs.
- LiveUnits.stepBackward(*I, *TRI);
+ // Update liveness, including the current instruction's defs.
+ LiveRegSet.stepBackward(*I);
if (UndefMI == &*I) {
- if (!LiveUnits.contains(UndefMI->getOperand(OpIdx).getReg(), *TRI))
+ if (!LiveRegSet.contains(UndefMI->getOperand(OpIdx).getReg()))
TII->breakPartialRegDependency(UndefMI, OpIdx, TRI);
UndefReads.pop_back();
@@ -666,7 +665,7 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// doms are now sorted in order of appearance. Try to merge them all, giving
// priority to the latest ones.
- DomainValue *dv = 0;
+ DomainValue *dv = nullptr;
while (!Regs.empty()) {
if (!dv) {
dv = Regs.pop_back_val().Value;
@@ -716,7 +715,7 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
TII = MF->getTarget().getInstrInfo();
TRI = MF->getTarget().getRegisterInfo();
- LiveRegs = 0;
+ LiveRegs = nullptr;
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
DEBUG(dbgs() << "********** FIX EXECUTION DEPENDENCIES: "
diff --git a/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
index b2b6882..90b62b5 100644
--- a/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
+++ b/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
@@ -14,7 +14,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "expand-isel-pseudos"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -23,6 +22,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "expand-isel-pseudos"
+
namespace {
class ExpandISelPseudos : public MachineFunctionPass {
public:
@@ -30,9 +31,9 @@ namespace {
ExpandISelPseudos() : MachineFunctionPass(ID) {}
private:
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
diff --git a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
index 6c73fff..8969bcc 100644
--- a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "postrapseudos"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -25,6 +24,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "postrapseudos"
+
namespace {
struct ExpandPostRA : public MachineFunctionPass {
private:
@@ -35,7 +36,7 @@ public:
static char ID; // Pass identification, replacement for typeid
ExpandPostRA() : MachineFunctionPass(ID) {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
@@ -43,7 +44,7 @@ public:
}
/// runOnMachineFunction - pass entry point
- bool runOnMachineFunction(MachineFunction&);
+ bool runOnMachineFunction(MachineFunction&) override;
private:
bool LowerSubregToReg(MachineInstr *MI);
diff --git a/contrib/llvm/lib/CodeGen/GCMetadata.cpp b/contrib/llvm/lib/CodeGen/GCMetadata.cpp
index ef5247c..c3e4f3e 100644
--- a/contrib/llvm/lib/CodeGen/GCMetadata.cpp
+++ b/contrib/llvm/lib/CodeGen/GCMetadata.cpp
@@ -32,12 +32,12 @@ namespace {
public:
explicit Printer(raw_ostream &OS) : FunctionPass(ID), OS(OS) {}
-
- const char *getPassName() const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
-
- bool runOnFunction(Function &F);
- bool doFinalization(Module &M);
+
+ const char *getPassName() const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnFunction(Function &F) override;
+ bool doFinalization(Module &M) override;
};
}
@@ -61,10 +61,6 @@ GCModuleInfo::GCModuleInfo()
initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
}
-GCModuleInfo::~GCModuleInfo() {
- clear();
-}
-
GCStrategy *GCModuleInfo::getOrCreateStrategy(const Module *M,
const std::string &Name) {
strategy_map_type::iterator NMI = StrategyMap.find(Name);
@@ -74,17 +70,17 @@ GCStrategy *GCModuleInfo::getOrCreateStrategy(const Module *M,
for (GCRegistry::iterator I = GCRegistry::begin(),
E = GCRegistry::end(); I != E; ++I) {
if (Name == I->getName()) {
- GCStrategy *S = I->instantiate();
+ std::unique_ptr<GCStrategy> S = I->instantiate();
S->M = M;
S->Name = Name;
- StrategyMap.GetOrCreateValue(Name).setValue(S);
- StrategyList.push_back(S);
- return S;
+ StrategyMap.GetOrCreateValue(Name).setValue(S.get());
+ StrategyList.push_back(std::move(S));
+ return StrategyList.back().get();
}
}
dbgs() << "unsupported GC: " << Name << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
GCFunctionInfo &GCModuleInfo::getFunctionInfo(const Function &F) {
@@ -104,9 +100,6 @@ GCFunctionInfo &GCModuleInfo::getFunctionInfo(const Function &F) {
void GCModuleInfo::clear() {
FInfoMap.clear();
StrategyMap.clear();
-
- for (iterator I = begin(), E = end(); I != E; ++I)
- delete *I;
StrategyList.clear();
}
diff --git a/contrib/llvm/lib/CodeGen/GCStrategy.cpp b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
index 1173d11..1fdff6b 100644
--- a/contrib/llvm/lib/CodeGen/GCStrategy.cpp
+++ b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
@@ -16,13 +16,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCStrategy.h"
-#include "llvm/Analysis/DominatorInternals.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
@@ -53,11 +52,11 @@ namespace {
static char ID;
LowerIntrinsics();
- const char *getPassName() const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
+ const char *getPassName() const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- bool doInitialization(Module &M);
- bool runOnFunction(Function &F);
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
};
@@ -83,9 +82,9 @@ namespace {
static char ID;
GCMachineCodeAnalysis();
- void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
};
}
@@ -102,13 +101,6 @@ GCStrategy::GCStrategy() :
UsesMetadata(false)
{}
-GCStrategy::~GCStrategy() {
- for (iterator I = begin(), E = end(); I != E; ++I)
- delete *I;
-
- Functions.clear();
-}
-
bool GCStrategy::initializeCustomLowering(Module &M) { return false; }
bool GCStrategy::performCustomLowering(Function &F) {
@@ -119,14 +111,13 @@ bool GCStrategy::performCustomLowering(Function &F) {
bool GCStrategy::findCustomSafePoints(GCFunctionInfo& FI, MachineFunction &F) {
dbgs() << "gc " << getName() << " must override findCustomSafePoints.\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
GCFunctionInfo *GCStrategy::insertFunctionInfo(const Function &F) {
- GCFunctionInfo *FI = new GCFunctionInfo(F, *this);
- Functions.push_back(FI);
- return FI;
+ Functions.push_back(make_unique<GCFunctionInfo>(F, *this));
+ return Functions.back().get();
}
// -----------------------------------------------------------------------------
@@ -154,7 +145,7 @@ const char *LowerIntrinsics::getPassName() const {
void LowerIntrinsics::getAnalysisUsage(AnalysisUsage &AU) const {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<GCModuleInfo>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
}
/// doInitialization - If this module uses the GC intrinsics, find them now.
@@ -271,8 +262,9 @@ bool LowerIntrinsics::runOnFunction(Function &F) {
// Custom lowering may modify the CFG, so dominators must be recomputed.
if (UseCustomLoweringPass) {
- if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>())
- DT->DT->recalculate(F);
+ if (DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>())
+ DTWP->getDomTree().recalculate(F);
}
return MadeChange;
diff --git a/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp b/contrib/llvm/lib/CodeGen/GlobalMerge.cpp
index 954e545..5572a06 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalMerge.cpp
@@ -51,7 +51,6 @@
// note that we saved 2 registers here almostly "for free".
// ===---------------------------------------------------------------------===//
-#define DEBUG_TYPE "global-merge"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -65,16 +64,31 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
+#define DEBUG_TYPE "global-merge"
+
+static cl::opt<bool>
+EnableGlobalMerge("enable-global-merge", cl::Hidden,
+ cl::desc("Enable global merge pass"),
+ cl::init(true));
+
static cl::opt<bool>
EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
cl::desc("Enable global merge pass on constants"),
cl::init(false));
+// FIXME: this could be a transitional option, and we probably need to remove
+// it if only we are sure this optimization could always benefit all targets.
+static cl::opt<bool>
+EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
+ cl::desc("Enable global merge pass on external linkage"),
+ cl::init(false));
+
STATISTIC(NumMerged , "Number of globals merged");
namespace {
class GlobalMerge : public FunctionPass {
@@ -102,48 +116,34 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid.
- explicit GlobalMerge(const TargetMachine *TM = 0)
+ explicit GlobalMerge(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM) {
initializeGlobalMergePass(*PassRegistry::getPassRegistry());
}
- virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function &F);
- virtual bool doFinalization(Module &M);
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+ bool doFinalization(Module &M) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Merge internal globals";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
FunctionPass::getAnalysisUsage(AU);
}
-
- struct GlobalCmp {
- const DataLayout *TD;
-
- GlobalCmp(const DataLayout *td) : TD(td) { }
-
- bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
- Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
- Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
-
- return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));
- }
- };
};
} // end anonymous namespace
char GlobalMerge::ID = 0;
-INITIALIZE_PASS(GlobalMerge, "global-merge",
- "Global Merge", false, false)
-
+INITIALIZE_TM_PASS(GlobalMerge, "global-merge", "Merge global variables",
+ false, false)
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const {
const TargetLowering *TLI = TM->getTargetLowering();
- const DataLayout *TD = TLI->getDataLayout();
+ const DataLayout *DL = TLI->getDataLayout();
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
@@ -151,33 +151,70 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
// FIXME: Find better heuristics
- std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD));
+ std::stable_sort(Globals.begin(), Globals.end(),
+ [DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
+ Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
+ Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
+
+ return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
+ });
Type *Int32Ty = Type::getInt32Ty(M.getContext());
+ assert(Globals.size() > 1);
+
+ // FIXME: This simple solution merges globals all together as maximum as
+ // possible. However, with this solution it would be hard to remove dead
+ // global symbols at link-time. An alternative solution could be checking
+ // global symbols references function by function, and make the symbols
+ // being referred in the same function merged and we would probably need
+ // to introduce heuristic algorithm to solve the merge conflict from
+ // different functions.
for (size_t i = 0, e = Globals.size(); i != e; ) {
size_t j = 0;
uint64_t MergedSize = 0;
std::vector<Type*> Tys;
std::vector<Constant*> Inits;
+
+ bool HasExternal = false;
+ GlobalVariable *TheFirstExternal = 0;
for (j = i; j != e; ++j) {
Type *Ty = Globals[j]->getType()->getElementType();
- MergedSize += TD->getTypeAllocSize(Ty);
+ MergedSize += DL->getTypeAllocSize(Ty);
if (MergedSize > MaxOffset) {
break;
}
Tys.push_back(Ty);
Inits.push_back(Globals[j]->getInitializer());
+
+ if (Globals[j]->hasExternalLinkage() && !HasExternal) {
+ HasExternal = true;
+ TheFirstExternal = Globals[j];
+ }
}
+ // If merged variables doesn't have external linkage, we needn't to expose
+ // the symbol after merging.
+ GlobalValue::LinkageTypes Linkage = HasExternal
+ ? GlobalValue::ExternalLinkage
+ : GlobalValue::InternalLinkage;
+
StructType *MergedTy = StructType::get(M.getContext(), Tys);
Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
- GlobalVariable *MergedGV = new GlobalVariable(M, MergedTy, isConst,
- GlobalValue::InternalLinkage,
- MergedInit, "_MergedGlobals",
- 0, GlobalVariable::NotThreadLocal,
- AddrSpace);
+
+ // If merged variables have external linkage, we use symbol name of the
+ // first variable merged as the suffix of global symbol name. This would
+ // be able to avoid the link-time naming conflict for globalm symbols.
+ GlobalVariable *MergedGV = new GlobalVariable(
+ M, MergedTy, isConst, Linkage, MergedInit,
+ HasExternal ? "_MergedGlobals_" + TheFirstExternal->getName()
+ : "_MergedGlobals",
+ nullptr, GlobalVariable::NotThreadLocal, AddrSpace);
+
for (size_t k = i; k < j; ++k) {
+ GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
+ std::string Name = Globals[k]->getName();
+
Constant *Idx[2] = {
ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, k-i)
@@ -185,6 +222,14 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(MergedGV, Idx);
Globals[k]->replaceAllUsesWith(GEP);
Globals[k]->eraseFromParent();
+
+ if (Linkage != GlobalValue::InternalLinkage) {
+ // Generate a new alias...
+ auto *PTy = cast<PointerType>(GEP->getType());
+ GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ Linkage, Name, GEP, &M);
+ }
+
NumMerged++;
}
i = j;
@@ -214,7 +259,7 @@ void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
++IFn) {
for (Function::iterator IBB = IFn->begin(), IEndBB = IFn->end();
IBB != IEndBB; ++IBB) {
- // Follow the inwoke link to find the landing pad instruction
+ // Follow the invoke link to find the landing pad instruction
const InvokeInst *II = dyn_cast<InvokeInst>(IBB->getTerminator());
if (!II) continue;
@@ -231,10 +276,13 @@ void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
}
bool GlobalMerge::doInitialization(Module &M) {
+ if (!EnableGlobalMerge)
+ return false;
+
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
BSSGlobals;
const TargetLowering *TLI = TM->getTargetLowering();
- const DataLayout *TD = TLI->getDataLayout();
+ const DataLayout *DL = TLI->getDataLayout();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
bool Changed = false;
setMustKeepGlobalVariables(M);
@@ -242,8 +290,12 @@ bool GlobalMerge::doInitialization(Module &M) {
// Grab all non-const globals.
for (Module::global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I) {
- // Merge is safe for "normal" internal globals only
- if (!I->hasLocalLinkage() || I->isThreadLocal() || I->hasSection())
+ // Merge is safe for "normal" internal or external globals only
+ if (I->isDeclaration() || I->isThreadLocal() || I->hasSection())
+ continue;
+
+ if (!(EnableGlobalMergeOnExternal && I->hasExternalLinkage()) &&
+ !I->hasInternalLinkage())
continue;
PointerType *PT = dyn_cast<PointerType>(I->getType());
@@ -252,9 +304,9 @@ bool GlobalMerge::doInitialization(Module &M) {
unsigned AddressSpace = PT->getAddressSpace();
// Ignore fancy-aligned globals for now.
- unsigned Alignment = TD->getPreferredAlignment(I);
+ unsigned Alignment = DL->getPreferredAlignment(I);
Type *Ty = I->getType()->getElementType();
- if (Alignment > TD->getABITypeAlignment(Ty))
+ if (Alignment > DL->getABITypeAlignment(Ty))
continue;
// Ignore all 'special' globals.
@@ -266,9 +318,8 @@ bool GlobalMerge::doInitialization(Module &M) {
if (isMustKeepGlobalVariable(I))
continue;
- if (TD->getTypeAllocSize(Ty) < MaxOffset) {
- if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine())
- .isBSSLocal())
+ if (DL->getTypeAllocSize(Ty) < MaxOffset) {
+ if (TargetLoweringObjectFile::getKindForGlobal(I, *TM).isBSSLocal())
BSSGlobals[AddressSpace].push_back(I);
else if (I->isConstant())
ConstGlobals[AddressSpace].push_back(I);
diff --git a/contrib/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm/lib/CodeGen/IfConversion.cpp
index e2d0eb4..1502d5f 100644
--- a/contrib/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/IfConversion.cpp
@@ -11,19 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ifcvt"
#include "llvm/CodeGen/Passes.h"
#include "BranchFolding.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
-#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -37,6 +36,8 @@
using namespace llvm;
+#define DEBUG_TYPE "ifcvt"
+
// Hidden options for help debugging.
static cl::opt<int> IfCvtFnStart("ifcvt-fn-start", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtFnStop("ifcvt-fn-stop", cl::init(-1), cl::Hidden);
@@ -127,7 +128,8 @@ namespace {
IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
HasFallThrough(false), IsUnpredicable(false),
CannotBeCopied(false), ClobbersPred(false), NonPredSize(0),
- ExtraCost(0), ExtraCost2(0), BB(0), TrueBB(0), FalseBB(0) {}
+ ExtraCost(0), ExtraCost2(0), BB(nullptr), TrueBB(nullptr),
+ FalseBB(nullptr) {}
};
/// IfcvtToken - Record information about pending if-conversions to attempt:
@@ -162,8 +164,8 @@ namespace {
const MachineBranchProbabilityInfo *MBPI;
MachineRegisterInfo *MRI;
- LiveRegUnits Redefs;
- LiveRegUnits DontKill;
+ LivePhysRegs Redefs;
+ LivePhysRegs DontKill;
bool PreRegAlloc;
bool MadeChange;
@@ -174,12 +176,12 @@ namespace {
initializeIfConverterPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
bool ReverseBranchCondition(BBInfo &BBI);
@@ -205,7 +207,7 @@ namespace {
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
SmallVectorImpl<MachineOperand> &Cond,
- SmallSet<unsigned, 4> *LaterRedefs = 0);
+ SmallSet<unsigned, 4> *LaterRedefs = nullptr);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
bool IgnoreBr = false);
@@ -230,7 +232,7 @@ namespace {
// blockAlwaysFallThrough - Block ends without a terminator.
bool blockAlwaysFallThrough(BBInfo &BBI) const {
- return BBI.IsBrAnalyzable && BBI.TrueBB == NULL;
+ return BBI.IsBrAnalyzable && BBI.TrueBB == nullptr;
}
// IfcvtTokenCmp - Used to sort if-conversion candidates.
@@ -438,7 +440,7 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
if (SuccBB != TrueBB)
return SuccBB;
}
- return NULL;
+ return nullptr;
}
/// ReverseBranchCondition - Reverse the condition of the end of the block
@@ -460,7 +462,7 @@ static inline MachineBasicBlock *getNextBlock(MachineBasicBlock *BB) {
MachineFunction::iterator I = BB;
MachineFunction::iterator E = BB->getParent()->end();
if (++I == E)
- return NULL;
+ return nullptr;
return I;
}
@@ -551,7 +553,7 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
FT = getNextBlock(FalseBBI.BB);
if (TT != FT)
return false;
- if (TT == NULL && (TrueBBI.IsBrAnalyzable || FalseBBI.IsBrAnalyzable))
+ if (!TT && (TrueBBI.IsBrAnalyzable || FalseBBI.IsBrAnalyzable))
return false;
if (TrueBBI.BB->pred_size() > 1 || FalseBBI.BB->pred_size() > 1)
return false;
@@ -641,11 +643,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
bool AlreadyPredicated = !BBI.Predicate.empty();
// First analyze the end of BB branches.
- BBI.TrueBB = BBI.FalseBB = NULL;
+ BBI.TrueBB = BBI.FalseBB = nullptr;
BBI.BrCond.clear();
BBI.IsBrAnalyzable =
!TII->AnalyzeBranch(*BBI.BB, BBI.TrueBB, BBI.FalseBB, BBI.BrCond);
- BBI.HasFallThrough = BBI.IsBrAnalyzable && BBI.FalseBB == NULL;
+ BBI.HasFallThrough = BBI.IsBrAnalyzable && BBI.FalseBB == nullptr;
if (BBI.BrCond.size()) {
// No false branch. This BB must end with a conditional branch and a
@@ -921,7 +923,7 @@ void IfConverter::AnalyzeBlocks(MachineFunction &MF,
/// next block).
static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) {
MachineFunction::iterator PI = BB;
- MachineFunction::iterator I = llvm::next(PI);
+ MachineFunction::iterator I = std::next(PI);
MachineFunction::iterator TI = ToBB;
MachineFunction::iterator E = BB->getParent()->end();
while (I != TI) {
@@ -954,13 +956,13 @@ static void InsertUncondBranch(MachineBasicBlock *BB, MachineBasicBlock *ToBB,
const TargetInstrInfo *TII) {
DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 0> NoCond;
- TII->InsertBranch(*BB, ToBB, NULL, NoCond, dl);
+ TII->InsertBranch(*BB, ToBB, nullptr, NoCond, dl);
}
/// RemoveExtraEdges - Remove true / false edges if either / both are no longer
/// successors.
void IfConverter::RemoveExtraEdges(BBInfo &BBI) {
- MachineBasicBlock *TBB = NULL, *FBB = NULL;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*BBI.BB, TBB, FBB, Cond))
BBI.BB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
@@ -968,23 +970,22 @@ void IfConverter::RemoveExtraEdges(BBInfo &BBI) {
/// Behaves like LiveRegUnits::StepForward() but also adds implicit uses to all
/// values defined in MI which are not live/used by MI.
-static void UpdatePredRedefs(MachineInstr *MI, LiveRegUnits &Redefs,
- const TargetRegisterInfo *TRI) {
+static void UpdatePredRedefs(MachineInstr *MI, LivePhysRegs &Redefs) {
for (ConstMIBundleOperands Ops(MI); Ops.isValid(); ++Ops) {
if (!Ops->isReg() || !Ops->isKill())
continue;
unsigned Reg = Ops->getReg();
if (Reg == 0)
continue;
- Redefs.removeReg(Reg, *TRI);
+ Redefs.removeReg(Reg);
}
for (MIBundleOperands Ops(MI); Ops.isValid(); ++Ops) {
if (!Ops->isReg() || !Ops->isDef())
continue;
unsigned Reg = Ops->getReg();
- if (Reg == 0 || Redefs.contains(Reg, *TRI))
+ if (Reg == 0 || Redefs.contains(Reg))
continue;
- Redefs.addReg(Reg, *TRI);
+ Redefs.addReg(Reg);
MachineOperand &Op = *Ops;
MachineInstr *MI = Op.getParent();
@@ -996,12 +997,11 @@ static void UpdatePredRedefs(MachineInstr *MI, LiveRegUnits &Redefs,
/**
* Remove kill flags from operands with a registers in the @p DontKill set.
*/
-static void RemoveKills(MachineInstr &MI, const LiveRegUnits &DontKill,
- const MCRegisterInfo &MCRI) {
+static void RemoveKills(MachineInstr &MI, const LivePhysRegs &DontKill) {
for (MIBundleOperands O(&MI); O.isValid(); ++O) {
if (!O->isReg() || !O->isKill())
continue;
- if (DontKill.contains(O->getReg(), MCRI))
+ if (DontKill.contains(O->getReg()))
O->setIsKill(false);
}
}
@@ -1012,10 +1012,10 @@ static void RemoveKills(MachineInstr &MI, const LiveRegUnits &DontKill,
*/
static void RemoveKills(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E,
- const LiveRegUnits &DontKill,
+ const LivePhysRegs &DontKill,
const MCRegisterInfo &MCRI) {
for ( ; I != E; ++I)
- RemoveKills(*I, DontKill, MCRI);
+ RemoveKills(*I, DontKill);
}
/// IfConvertSimple - If convert a simple (split, no rejoin) sub-CFG.
@@ -1049,13 +1049,13 @@ bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
// Initialize liveins to the first BB. These are potentiall redefined by
// predicated instructions.
Redefs.init(TRI);
- Redefs.addLiveIns(CvtBBI->BB, *TRI);
- Redefs.addLiveIns(NextBBI->BB, *TRI);
+ Redefs.addLiveIns(CvtBBI->BB);
+ Redefs.addLiveIns(NextBBI->BB);
// Compute a set of registers which must not be killed by instructions in
// BB1: This is everything live-in to BB2.
DontKill.init(TRI);
- DontKill.addLiveIns(NextBBI->BB, *TRI);
+ DontKill.addLiveIns(NextBBI->BB);
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
@@ -1104,6 +1104,28 @@ bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
return true;
}
+/// Scale down weights to fit into uint32_t. NewTrue is the new weight
+/// for successor TrueBB, and NewFalse is the new weight for successor
+/// FalseBB.
+static void ScaleWeights(uint64_t NewTrue, uint64_t NewFalse,
+ MachineBasicBlock *MBB,
+ const MachineBasicBlock *TrueBB,
+ const MachineBasicBlock *FalseBB,
+ const MachineBranchProbabilityInfo *MBPI) {
+ uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
+ uint32_t Scale = (NewMax / UINT32_MAX) + 1;
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end();
+ SI != SE; ++SI) {
+ if (*SI == TrueBB)
+ MBB->setSuccWeight(SI, (uint32_t)(NewTrue / Scale));
+ else if (*SI == FalseBB)
+ MBB->setSuccWeight(SI, (uint32_t)(NewFalse / Scale));
+ else
+ MBB->setSuccWeight(SI, MBPI->getEdgeWeight(MBB, SI) / Scale);
+ }
+}
+
/// IfConvertTriangle - If convert a triangle sub-CFG.
///
bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
@@ -1154,12 +1176,22 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
// Initialize liveins to the first BB. These are potentially redefined by
// predicated instructions.
Redefs.init(TRI);
- Redefs.addLiveIns(CvtBBI->BB, *TRI);
- Redefs.addLiveIns(NextBBI->BB, *TRI);
+ Redefs.addLiveIns(CvtBBI->BB);
+ Redefs.addLiveIns(NextBBI->BB);
DontKill.clear();
- bool HasEarlyExit = CvtBBI->FalseBB != NULL;
+ bool HasEarlyExit = CvtBBI->FalseBB != nullptr;
+ uint64_t CvtNext = 0, CvtFalse = 0, BBNext = 0, BBCvt = 0, SumWeight = 0;
+ uint32_t WeightScale = 0;
+ if (HasEarlyExit) {
+ // Get weights before modifying CvtBBI->BB and BBI.BB.
+ CvtNext = MBPI->getEdgeWeight(CvtBBI->BB, NextBBI->BB);
+ CvtFalse = MBPI->getEdgeWeight(CvtBBI->BB, CvtBBI->FalseBB);
+ BBNext = MBPI->getEdgeWeight(BBI.BB, NextBBI->BB);
+ BBCvt = MBPI->getEdgeWeight(BBI.BB, CvtBBI->BB);
+ SumWeight = MBPI->getSumForBlock(CvtBBI->BB, WeightScale);
+ }
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
@@ -1185,8 +1217,22 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
llvm_unreachable("Unable to reverse branch condition!");
- TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond, dl);
+ TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, nullptr, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
+ // Update the edge weight for both CvtBBI->FalseBB and NextBBI.
+ // New_Weight(BBI.BB, NextBBI->BB) =
+ // Weight(BBI.BB, NextBBI->BB) * getSumForBlock(CvtBBI->BB) +
+ // Weight(BBI.BB, CvtBBI->BB) * Weight(CvtBBI->BB, NextBBI->BB)
+ // New_Weight(BBI.BB, CvtBBI->FalseBB) =
+ // Weight(BBI.BB, CvtBBI->BB) * Weight(CvtBBI->BB, CvtBBI->FalseBB)
+
+ uint64_t NewNext = BBNext * SumWeight + (BBCvt * CvtNext) / WeightScale;
+ uint64_t NewFalse = (BBCvt * CvtFalse) / WeightScale;
+ // We need to scale down all weights of BBI.BB to fit uint32_t.
+ // Here BBI.BB is connected to CvtBBI->FalseBB and will fall through to
+ // the next block.
+ ScaleWeights(NewNext, NewFalse, BBI.BB, getNextBlock(BBI.BB),
+ CvtBBI->FalseBB, MBPI);
}
// Merge in the 'false' block if the 'false' block has no other
@@ -1284,7 +1330,7 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// Initialize liveins to the first BB. These are potentially redefined by
// predicated instructions.
Redefs.init(TRI);
- Redefs.addLiveIns(BBI1->BB, *TRI);
+ Redefs.addLiveIns(BBI1->BB);
// Remove the duplicated instructions at the beginnings of both paths.
MachineBasicBlock::iterator DI1 = BBI1->BB->begin();
@@ -1317,12 +1363,12 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
DontKill.init(TRI);
for (MachineBasicBlock::reverse_iterator I = BBI2->BB->rbegin(),
E = MachineBasicBlock::reverse_iterator(DI2); I != E; ++I) {
- DontKill.stepBackward(*I, *TRI);
+ DontKill.stepBackward(*I);
}
for (MachineBasicBlock::const_iterator I = BBI1->BB->begin(), E = DI1; I != E;
++I) {
- Redefs.stepForward(*I, *TRI);
+ Redefs.stepForward(*I);
}
BBI.BB->splice(BBI.BB->end(), BBI1->BB, BBI1->BB->begin(), DI1);
BBI2->BB->erase(BBI2->BB->begin(), DI2);
@@ -1409,8 +1455,8 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
PredicateBlock(*BBI2, DI2, *Cond2);
// Merge the true block into the entry of the diamond.
- MergeBlocks(BBI, *BBI1, TailBB == 0);
- MergeBlocks(BBI, *BBI2, TailBB == 0);
+ MergeBlocks(BBI, *BBI1, TailBB == nullptr);
+ MergeBlocks(BBI, *BBI2, TailBB == nullptr);
// If the if-converted block falls through or unconditionally branches into
// the tail block, and the tail block does not have other predecessors, then
@@ -1459,7 +1505,7 @@ static bool MaySpeculate(const MachineInstr *MI,
SmallSet<unsigned, 4> &LaterRedefs,
const TargetInstrInfo *TII) {
bool SawStore = true;
- if (!MI->isSafeToMove(TII, 0, SawStore))
+ if (!MI->isSafeToMove(TII, nullptr, SawStore))
return false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -1483,7 +1529,7 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
SmallVectorImpl<MachineOperand> &Cond,
SmallSet<unsigned, 4> *LaterRedefs) {
bool AnyUnpred = false;
- bool MaySpec = LaterRedefs != 0;
+ bool MaySpec = LaterRedefs != nullptr;
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
if (I->isDebugValue() || TII->isPredicated(I))
continue;
@@ -1501,12 +1547,12 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
// If the predicated instruction now redefines a register as the result of
// if-conversion, add an implicit kill.
- UpdatePredRedefs(I, Redefs, TRI);
+ UpdatePredRedefs(I, Redefs);
}
std::copy(Cond.begin(), Cond.end(), std::back_inserter(BBI.Predicate));
@@ -1546,24 +1592,24 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
// If the predicated instruction now redefines a register as the result of
// if-conversion, add an implicit kill.
- UpdatePredRedefs(MI, Redefs, TRI);
+ UpdatePredRedefs(MI, Redefs);
// Some kill flags may not be correct anymore.
if (!DontKill.empty())
- RemoveKills(*MI, DontKill, *TRI);
+ RemoveKills(*MI, DontKill);
}
if (!IgnoreBr) {
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
- MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
+ MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr;
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
MachineBasicBlock *Succ = Succs[i];
@@ -1599,7 +1645,7 @@ void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
- MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
+ MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr;
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
MachineBasicBlock *Succ = Succs[i];
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
index bb0e642..f3c8d3d 100644
--- a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "Spiller.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
@@ -21,8 +20,9 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -38,6 +38,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
STATISTIC(NumSnippets, "Number of spilled snippets");
STATISTIC(NumSpills, "Number of spills inserted");
@@ -120,7 +122,7 @@ public:
SibValueInfo(unsigned Reg, VNInfo *VNI)
: AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false),
- SpillReg(Reg), SpillVNI(VNI), SpillMBB(0), DefMI(0) {}
+ SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {}
// Returns true when a def has been found.
bool hasDef() const { return DefByOrigPHI || DefMI; }
@@ -153,7 +155,7 @@ public:
TRI(*mf.getTarget().getRegisterInfo()),
MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {}
- void spill(LiveRangeEdit &);
+ void spill(LiveRangeEdit &) override;
private:
bool isSnippet(const LiveInterval &SnipLI);
@@ -166,7 +168,7 @@ private:
bool isSibling(unsigned Reg);
MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);
- void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = 0);
+ void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr);
void analyzeSiblingValues();
bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI);
@@ -178,7 +180,7 @@ private:
bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >,
- MachineInstr *LoadMI = 0);
+ MachineInstr *LoadMI = nullptr);
void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
@@ -235,12 +237,13 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
return false;
- MachineInstr *UseMI = 0;
+ MachineInstr *UseMI = nullptr;
// Check that all uses satisfy our criteria.
- for (MachineRegisterInfo::reg_nodbg_iterator
- RI = MRI.reg_nodbg_begin(SnipLI.reg);
- MachineInstr *MI = RI.skipInstruction();) {
+ for (MachineRegisterInfo::reg_instr_nodbg_iterator
+ RI = MRI.reg_instr_nodbg_begin(SnipLI.reg),
+ E = MRI.reg_instr_nodbg_end(); RI != E; ) {
+ MachineInstr *MI = &*(RI++);
// Allow copies to/from Reg.
if (isFullCopyOf(MI, Reg))
@@ -277,8 +280,9 @@ void InlineSpiller::collectRegsToSpill() {
if (Original == Reg)
return;
- for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
- MachineInstr *MI = RI.skipInstruction();) {
+ for (MachineRegisterInfo::reg_instr_iterator
+ RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
+ MachineInstr *MI = &*(RI++);
unsigned SnipReg = isFullCopyOf(MI, Reg);
if (!isSibling(SnipReg))
continue;
@@ -364,7 +368,7 @@ void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter,
do {
SVI = WorkList.pop_back_val();
TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps;
- VNI = 0;
+ VNI = nullptr;
SibValueInfo &SV = SVI->second;
if (!SV.SpillMBB)
@@ -438,7 +442,20 @@ void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter,
// Also hoist spills to blocks with smaller loop depth, but make sure
// that the new value dominates. Non-phi dependents are always
// dominated, phis need checking.
+
+ const BranchProbability MarginProb(4, 5); // 80%
+ // Hoist a spill to outer loop if there are multiple dependents (it
+ // can be beneficial if more than one dependents are hoisted) or
+ // if DepSV (the hoisting source) is hotter than SV (the hoisting
+ // destination) (we add a 80% margin to bias a little towards
+ // loop depth).
+ bool HoistCondition =
+ (MBFI.getBlockFreq(DepSV.SpillMBB) >=
+ (MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) ||
+ Deps->size() > 1;
+
if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) &&
+ HoistCondition &&
(!DepSVI->first->isPHIDef() ||
MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) {
Changed = true;
@@ -476,7 +493,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
// Check if a cached value already exists.
SibValueMap::iterator SVI;
bool Inserted;
- tie(SVI, Inserted) =
+ std::tie(SVI, Inserted) =
SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI)));
if (!Inserted) {
DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':'
@@ -495,7 +512,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
do {
unsigned Reg;
VNInfo *VNI;
- tie(Reg, VNI) = WorkList.pop_back_val();
+ std::tie(Reg, VNI) = WorkList.pop_back_val();
DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def
<< ":\t");
@@ -554,7 +571,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) {
VNInfo *NonPHI = NonPHIs[i];
// Known value? Try an insertion.
- tie(SVI, Inserted) =
+ std::tie(SVI, Inserted) =
SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI)));
// Add all the PHIs as dependents of NonPHI.
for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi)
@@ -587,8 +604,8 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
<< SrcVNI->id << '@' << SrcVNI->def
<< " kill=" << unsigned(SVI->second.KillsSource) << '\n');
// Known sibling source value? Try an insertion.
- tie(SVI, Inserted) = SibValues.insert(std::make_pair(SrcVNI,
- SibValueInfo(SrcReg, SrcVNI)));
+ std::tie(SVI, Inserted) = SibValues.insert(
+ std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI)));
// This is the first time we see Src, add it to the worklist.
if (Inserted)
WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
@@ -643,7 +660,7 @@ void InlineSpiller::analyzeSiblingValues() {
VNInfo *VNI = *VI;
if (VNI->isUnused())
continue;
- MachineInstr *DefMI = 0;
+ MachineInstr *DefMI = nullptr;
if (!VNI->isPHIDef()) {
DefMI = LIS.getInstructionFromIndex(VNI->def);
assert(DefMI && "No defining instruction");
@@ -745,7 +762,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
do {
LiveInterval *LI;
- tie(LI, VNI) = WorkList.pop_back_val();
+ std::tie(LI, VNI) = WorkList.pop_back_val();
unsigned Reg = LI->reg;
DEBUG(dbgs() << "Checking redundant spills for "
<< VNI->id << '@' << VNI->def << " in " << *LI << '\n');
@@ -759,8 +776,10 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
// Find all spills and copies of VNI.
- for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg);
- MachineInstr *MI = UI.skipInstruction();) {
+ for (MachineRegisterInfo::use_instr_nodbg_iterator
+ UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
+ UI != E; ) {
+ MachineInstr *MI = &*(UI++);
if (!MI->isCopy() && !MI->mayStore())
continue;
SlotIndex Idx = LIS.getInstructionIndex(MI);
@@ -804,7 +823,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
WorkList.push_back(std::make_pair(LI, VNI));
do {
- tie(LI, VNI) = WorkList.pop_back_val();
+ std::tie(LI, VNI) = WorkList.pop_back_val();
if (!UsedValues.insert(VNI))
continue;
@@ -920,10 +939,12 @@ void InlineSpiller::reMaterializeAll() {
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
unsigned Reg = RegsToSpill[i];
LiveInterval &LI = LIS.getInterval(Reg);
- for (MachineRegisterInfo::use_nodbg_iterator
- RI = MRI.use_nodbg_begin(Reg);
- MachineInstr *MI = RI.skipBundle();)
+ for (MachineRegisterInfo::use_bundle_nodbg_iterator
+ RI = MRI.use_bundle_nodbg_begin(Reg), E = MRI.use_bundle_nodbg_end();
+ RI != E; ) {
+ MachineInstr *MI = &*(RI++);
anyRemat |= reMaterializeFor(LI, MI);
+ }
}
if (!anyRemat)
return;
@@ -1014,7 +1035,7 @@ static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
char NextLine = '\n';
char SlotIndent = '\t';
- if (llvm::next(B) == E) {
+ if (std::next(B) == E) {
NextLine = ' ';
SlotIndent = ' ';
}
@@ -1098,12 +1119,11 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
MRI.isReserved(Reg)) {
continue;
}
+ // Skip non-Defs, including undef uses and internal reads.
+ if (MO->isUse())
+ continue;
MIBundleOperands::PhysRegInfo RI =
MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
- if (MO->readsReg()) {
- assert(RI.Reads && "Cannot fold physreg reader");
- continue;
- }
if (RI.Defines)
continue;
// FoldMI does not define this physreg. Remove the LI segment.
@@ -1172,12 +1192,12 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
MachineBasicBlock &MBB = *MI->getParent();
MachineInstrSpan MIS(MI);
- TII.storeRegToStackSlot(MBB, llvm::next(MI), NewVReg, isKill, StackSlot,
+ TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
MRI.getRegClass(NewVReg), &TRI);
- LIS.InsertMachineInstrRangeInMaps(llvm::next(MI), MIS.end());
+ LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
- DEBUG(dumpMachineInstrRangeWithSlotIndex(llvm::next(MI), MIS.end(), LIS,
+ DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
"spill"));
++NumSpills;
}
@@ -1188,8 +1208,10 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
LiveInterval &OldLI = LIS.getInterval(Reg);
// Iterate over instructions using Reg.
- for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg);
- MachineInstr *MI = RegI.skipBundle();) {
+ for (MachineRegisterInfo::reg_bundle_iterator
+ RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
+ RegI != E; ) {
+ MachineInstr *MI = &*(RegI++);
// Debug values are not allowed to affect codegen.
if (MI->isDebugValue()) {
@@ -1314,8 +1336,10 @@ void InlineSpiller::spillAll() {
// Finally delete the SnippetCopies.
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
- for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]);
- MachineInstr *MI = RI.skipInstruction();) {
+ for (MachineRegisterInfo::reg_instr_iterator
+ RI = MRI.reg_instr_begin(RegsToSpill[i]), E = MRI.reg_instr_end();
+ RI != E; ) {
+ MachineInstr *MI = &*(RI++);
assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy");
// FIXME: Do this with a LiveRangeEdit callback.
LIS.RemoveMachineInstrFromMaps(MI);
@@ -1336,7 +1360,7 @@ void InlineSpiller::spill(LiveRangeEdit &edit) {
// Share a stack slot among all descendants of Original.
Original = VRM.getOriginal(edit.getReg());
StackSlot = VRM.getStackSlot(Original);
- StackInt = 0;
+ StackInt = nullptr;
DEBUG(dbgs() << "Inline spilling "
<< MRI.getRegClass(edit.getReg())->getName()
diff --git a/contrib/llvm/lib/CodeGen/InterferenceCache.cpp b/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
index 427225d..187e015 100644
--- a/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
+++ b/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "InterferenceCache.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/Support/ErrorHandling.h"
@@ -19,9 +18,27 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
// Static member used for null interference cursors.
InterferenceCache::BlockInterference InterferenceCache::Cursor::NoInterference;
+// Initializes PhysRegEntries (instead of a SmallVector, PhysRegEntries is a
+// buffer of size NumPhysRegs to speed up alloc/clear for targets with large
+// reg files). Calloced memory is used for good form, and quites tools like
+// Valgrind too, but zero initialized memory is not required by the algorithm:
+// this is because PhysRegEntries works like a SparseSet and its entries are
+// only valid when there is a corresponding CacheEntries assignment. There is
+// also support for when pass managers are reused for targets with different
+// numbers of PhysRegs: in this case PhysRegEntries is freed and reinitialized.
+void InterferenceCache::reinitPhysRegEntries() {
+ if (PhysRegEntriesCount == TRI->getNumRegs()) return;
+ free(PhysRegEntries);
+ PhysRegEntriesCount = TRI->getNumRegs();
+ PhysRegEntries = (unsigned char*)
+ calloc(PhysRegEntriesCount, sizeof(unsigned char));
+}
+
void InterferenceCache::init(MachineFunction *mf,
LiveIntervalUnion *liuarray,
SlotIndexes *indexes,
@@ -30,7 +47,7 @@ void InterferenceCache::init(MachineFunction *mf,
MF = mf;
LIUArray = liuarray;
TRI = tri;
- PhysRegEntries.assign(TRI->getNumRegs(), 0);
+ reinitPhysRegEntries();
for (unsigned i = 0; i != CacheEntries; ++i)
Entries[i].clear(mf, indexes, lis);
}
@@ -105,7 +122,7 @@ bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
void InterferenceCache::Entry::update(unsigned MBBNum) {
SlotIndex Start, Stop;
- tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
+ std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
// Use advanceTo only when possible.
if (PrevPos != Start) {
@@ -182,7 +199,7 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
BI = &Blocks[MBBNum];
if (BI->Tag == Tag)
return;
- tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
+ std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
}
// Check for last interference in block.
diff --git a/contrib/llvm/lib/CodeGen/InterferenceCache.h b/contrib/llvm/lib/CodeGen/InterferenceCache.h
index 800f705..91a1da9 100644
--- a/contrib/llvm/lib/CodeGen/InterferenceCache.h
+++ b/contrib/llvm/lib/CodeGen/InterferenceCache.h
@@ -77,7 +77,8 @@ class InterferenceCache {
/// Iterator pointing into the fixed RegUnit interference.
LiveInterval::iterator FixedI;
- RegUnitInfo(LiveIntervalUnion &LIU) : VirtTag(LIU.getTag()), Fixed(0) {
+ RegUnitInfo(LiveIntervalUnion &LIU)
+ : VirtTag(LIU.getTag()), Fixed(nullptr) {
VirtI.setMap(LIU.getMap());
}
};
@@ -93,7 +94,7 @@ class InterferenceCache {
void update(unsigned MBBNum);
public:
- Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(0), LIS(0) {}
+ Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(nullptr), LIS(nullptr) {}
void clear(MachineFunction *mf, SlotIndexes *indexes, LiveIntervals *lis) {
assert(!hasRefs() && "Cannot clear cache entry with references");
@@ -135,7 +136,8 @@ class InterferenceCache {
// Point to an entry for each physreg. The entry pointed to may not be up to
// date, and it may have been reused for a different physreg.
- SmallVector<unsigned char, 2> PhysRegEntries;
+ unsigned char* PhysRegEntries;
+ size_t PhysRegEntriesCount;
// Next round-robin entry to be picked.
unsigned RoundRobin;
@@ -147,7 +149,15 @@ class InterferenceCache {
Entry *get(unsigned PhysReg);
public:
- InterferenceCache() : TRI(0), LIUArray(0), MF(0), RoundRobin(0) {}
+ InterferenceCache()
+ : TRI(nullptr), LIUArray(nullptr), MF(nullptr), PhysRegEntries(nullptr),
+ PhysRegEntriesCount(0), RoundRobin(0) {}
+
+ ~InterferenceCache() {
+ free(PhysRegEntries);
+ }
+
+ void reinitPhysRegEntries();
/// init - Prepare cache for a new function.
void init(MachineFunction*, LiveIntervalUnion*, SlotIndexes*, LiveIntervals*,
@@ -164,7 +174,7 @@ public:
static BlockInterference NoInterference;
void setEntry(Entry *E) {
- Current = 0;
+ Current = nullptr;
// Update reference counts. Nothing happens when RefCount reaches 0, so
// we don't have to check for E == CacheEntry etc.
if (CacheEntry)
@@ -176,10 +186,10 @@ public:
public:
/// Cursor - Create a dangling cursor.
- Cursor() : CacheEntry(0), Current(0) {}
- ~Cursor() { setEntry(0); }
+ Cursor() : CacheEntry(nullptr), Current(nullptr) {}
+ ~Cursor() { setEntry(nullptr); }
- Cursor(const Cursor &O) : CacheEntry(0), Current(0) {
+ Cursor(const Cursor &O) : CacheEntry(nullptr), Current(nullptr) {
setEntry(O.CacheEntry);
}
@@ -192,7 +202,7 @@ public:
void setPhysReg(InterferenceCache &Cache, unsigned PhysReg) {
// Release reference before getting a new one. That guarantees we can
// actually have CacheEntries live cursors.
- setEntry(0);
+ setEntry(nullptr);
if (PhysReg)
setEntry(Cache.get(PhysReg));
}
diff --git a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
index c38d4fb..a8b8600 100644
--- a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -13,13 +13,13 @@
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
- TD.getIntPtrType(Context), (Type *)0);
+ DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::memmove:
M.getOrInsertFunction("memmove",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
- TD.getIntPtrType(Context), (Type *)0);
+ DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::memset:
M.getOrInsertFunction("memset",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt32Ty(M.getContext()),
- TD.getIntPtrType(Context), (Type *)0);
+ DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::sqrt:
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
@@ -463,7 +463,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; // Strip out annotate intrinsic
case Intrinsic::memcpy: {
- Type *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = DL.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -474,7 +474,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memmove: {
- Type *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = DL.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -486,7 +486,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
}
case Intrinsic::memset: {
Value *Op0 = CI->getArgOperand(0);
- Type *IntPtr = TD.getIntPtrType(Op0->getType());
+ Type *IntPtr = DL.getIntPtrType(Op0->getType());
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
diff --git a/contrib/llvm/lib/CodeGen/JumpInstrTables.cpp b/contrib/llvm/lib/CodeGen/JumpInstrTables.cpp
new file mode 100644
index 0000000..750f71f
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/JumpInstrTables.cpp
@@ -0,0 +1,297 @@
+//===-- JumpInstrTables.cpp: Jump-Instruction Tables ----------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief An implementation of jump-instruction tables.
+///
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jt"
+
+#include "llvm/CodeGen/JumpInstrTables.h"
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/JumpInstrTableInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <vector>
+
+using namespace llvm;
+
+char JumpInstrTables::ID = 0;
+
+INITIALIZE_PASS_BEGIN(JumpInstrTables, "jump-instr-tables",
+ "Jump-Instruction Tables", true, true)
+INITIALIZE_PASS_DEPENDENCY(JumpInstrTableInfo);
+INITIALIZE_PASS_END(JumpInstrTables, "jump-instr-tables",
+ "Jump-Instruction Tables", true, true)
+
+STATISTIC(NumJumpTables, "Number of indirect call tables generated");
+STATISTIC(NumFuncsInJumpTables, "Number of functions in the jump tables");
+
+ModulePass *llvm::createJumpInstrTablesPass() {
+ // The default implementation uses a single table for all functions.
+ return new JumpInstrTables(JumpTable::Single);
+}
+
+ModulePass *llvm::createJumpInstrTablesPass(JumpTable::JumpTableType JTT) {
+ return new JumpInstrTables(JTT);
+}
+
+namespace {
+static const char jump_func_prefix[] = "__llvm_jump_instr_table_";
+static const char jump_section_prefix[] = ".jump.instr.table.text.";
+
+// Checks to see if a given CallSite is making an indirect call, including
+// cases where the indirect call is made through a bitcast.
+bool isIndirectCall(CallSite &CS) {
+ if (CS.getCalledFunction())
+ return false;
+
+ // Check the value to see if it is merely a bitcast of a function. In
+ // this case, it will translate to a direct function call in the resulting
+ // assembly, so we won't treat it as an indirect call here.
+ const Value *V = CS.getCalledValue();
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ return !(CE->isCast() && isa<Function>(CE->getOperand(0)));
+ }
+
+ // Otherwise, since we know it's a call, it must be an indirect call
+ return true;
+}
+
+// Replaces Functions and GlobalAliases with a different Value.
+bool replaceGlobalValueIndirectUse(GlobalValue *GV, Value *V, Use *U) {
+ User *Us = U->getUser();
+ if (!Us)
+ return false;
+ if (Instruction *I = dyn_cast<Instruction>(Us)) {
+ CallSite CS(I);
+
+ // Don't do the replacement if this use is a direct call to this function.
+ // If the use is not the called value, then replace it.
+ if (CS && (isIndirectCall(CS) || CS.isCallee(U))) {
+ return false;
+ }
+
+ U->set(V);
+ } else if (Constant *C = dyn_cast<Constant>(Us)) {
+ // Don't replace calls to bitcasts of function symbols, since they get
+ // translated to direct calls.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Us)) {
+ if (CE->getOpcode() == Instruction::BitCast) {
+ // This bitcast must have exactly one user.
+ if (CE->user_begin() != CE->user_end()) {
+ User *ParentUs = *CE->user_begin();
+ if (CallInst *CI = dyn_cast<CallInst>(ParentUs)) {
+ CallSite CS(CI);
+ Use &CEU = *CE->use_begin();
+ if (CS.isCallee(&CEU)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ // GlobalAlias doesn't support replaceUsesOfWithOnConstant. And the verifier
+ // requires alias to point to a defined function. So, GlobalAlias is handled
+ // as a separate case in runOnModule.
+ if (!isa<GlobalAlias>(C))
+ C->replaceUsesOfWithOnConstant(GV, V, U);
+ } else {
+ assert(false && "The Use of a Function symbol is neither an instruction nor"
+ " a constant");
+ }
+
+ return true;
+}
+
+// Replaces all replaceable address-taken uses of GV with a pointer to a
+// jump-instruction table entry.
+void replaceValueWithFunction(GlobalValue *GV, Function *F) {
+ // Go through all uses of this function and replace the uses of GV with the
+ // jump-table version of the function. Get the uses as a vector before
+ // replacing them, since replacing them changes the use list and invalidates
+ // the iterator otherwise.
+ for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E;) {
+ Use &U = *I++;
+
+ // Replacement of constants replaces all instances in the constant. So, some
+ // uses might have already been handled by the time we reach them here.
+ if (U.get() == GV)
+ replaceGlobalValueIndirectUse(GV, F, &U);
+ }
+
+ return;
+}
+} // end anonymous namespace
+
+JumpInstrTables::JumpInstrTables()
+ : ModulePass(ID), Metadata(), JITI(nullptr), TableCount(0),
+ JTType(JumpTable::Single) {
+ initializeJumpInstrTablesPass(*PassRegistry::getPassRegistry());
+}
+
+JumpInstrTables::JumpInstrTables(JumpTable::JumpTableType JTT)
+ : ModulePass(ID), Metadata(), JITI(nullptr), TableCount(0), JTType(JTT) {
+ initializeJumpInstrTablesPass(*PassRegistry::getPassRegistry());
+}
+
+JumpInstrTables::~JumpInstrTables() {}
+
+void JumpInstrTables::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<JumpInstrTableInfo>();
+}
+
+Function *JumpInstrTables::insertEntry(Module &M, Function *Target) {
+ FunctionType *OrigFunTy = Target->getFunctionType();
+ FunctionType *FunTy = transformType(OrigFunTy);
+
+ JumpMap::iterator it = Metadata.find(FunTy);
+ if (Metadata.end() == it) {
+ struct TableMeta Meta;
+ Meta.TableNum = TableCount;
+ Meta.Count = 0;
+ Metadata[FunTy] = Meta;
+ it = Metadata.find(FunTy);
+ ++NumJumpTables;
+ ++TableCount;
+ }
+
+ it->second.Count++;
+
+ std::string NewName(jump_func_prefix);
+ NewName += (Twine(it->second.TableNum) + "_" + Twine(it->second.Count)).str();
+ Function *JumpFun =
+ Function::Create(OrigFunTy, GlobalValue::ExternalLinkage, NewName, &M);
+ // The section for this table
+ JumpFun->setSection((jump_section_prefix + Twine(it->second.TableNum)).str());
+ JITI->insertEntry(FunTy, Target, JumpFun);
+
+ ++NumFuncsInJumpTables;
+ return JumpFun;
+}
+
+bool JumpInstrTables::hasTable(FunctionType *FunTy) {
+ FunctionType *TransTy = transformType(FunTy);
+ return Metadata.end() != Metadata.find(TransTy);
+}
+
+FunctionType *JumpInstrTables::transformType(FunctionType *FunTy) {
+ // Returning nullptr forces all types into the same table, since all types map
+ // to the same type
+ Type *VoidPtrTy = Type::getInt8PtrTy(FunTy->getContext());
+
+ // Ignore the return type.
+ Type *RetTy = VoidPtrTy;
+ bool IsVarArg = FunTy->isVarArg();
+ std::vector<Type *> ParamTys(FunTy->getNumParams());
+ FunctionType::param_iterator PI, PE;
+ int i = 0;
+
+ std::vector<Type *> EmptyParams;
+ Type *Int32Ty = Type::getInt32Ty(FunTy->getContext());
+ FunctionType *VoidFnTy = FunctionType::get(
+ Type::getVoidTy(FunTy->getContext()), EmptyParams, false);
+ switch (JTType) {
+ case JumpTable::Single:
+
+ return FunctionType::get(RetTy, EmptyParams, false);
+ case JumpTable::Arity:
+ // Transform all types to void* so that all functions with the same arity
+ // end up in the same table.
+ for (PI = FunTy->param_begin(), PE = FunTy->param_end(); PI != PE;
+ PI++, i++) {
+ ParamTys[i] = VoidPtrTy;
+ }
+
+ return FunctionType::get(RetTy, ParamTys, IsVarArg);
+ case JumpTable::Simplified:
+ // Project all parameters types to one of 3 types: composite, integer, and
+ // function, matching the three subclasses of Type.
+ for (PI = FunTy->param_begin(), PE = FunTy->param_end(); PI != PE;
+ ++PI, ++i) {
+ assert((isa<IntegerType>(*PI) || isa<FunctionType>(*PI) ||
+ isa<CompositeType>(*PI)) &&
+ "This type is not an Integer or a Composite or a Function");
+ if (isa<CompositeType>(*PI)) {
+ ParamTys[i] = VoidPtrTy;
+ } else if (isa<FunctionType>(*PI)) {
+ ParamTys[i] = VoidFnTy;
+ } else if (isa<IntegerType>(*PI)) {
+ ParamTys[i] = Int32Ty;
+ }
+ }
+
+ return FunctionType::get(RetTy, ParamTys, IsVarArg);
+ case JumpTable::Full:
+ // Don't transform this type at all.
+ return FunTy;
+ }
+
+ return nullptr;
+}
+
+bool JumpInstrTables::runOnModule(Module &M) {
+ JITI = &getAnalysis<JumpInstrTableInfo>();
+
+ // Get the set of jumptable-annotated functions.
+ DenseMap<Function *, Function *> Functions;
+ for (Function &F : M) {
+ if (F.hasFnAttribute(Attribute::JumpTable)) {
+ assert(F.hasUnnamedAddr() &&
+ "Attribute 'jumptable' requires 'unnamed_addr'");
+ Functions[&F] = nullptr;
+ }
+ }
+
+ // Create the jump-table functions.
+ for (auto &KV : Functions) {
+ Function *F = KV.first;
+ KV.second = insertEntry(M, F);
+ }
+
+ // GlobalAlias is a special case, because the target of an alias statement
+ // must be a defined function. So, instead of replacing a given function in
+ // the alias, we replace all uses of aliases that target jumptable functions.
+ // Note that there's no need to create these functions, since only aliases
+ // that target known jumptable functions are replaced, and there's no way to
+ // put the jumptable annotation on a global alias.
+ DenseMap<GlobalAlias *, Function *> Aliases;
+ for (GlobalAlias &GA : M.aliases()) {
+ Constant *Aliasee = GA.getAliasee();
+ if (Function *F = dyn_cast<Function>(Aliasee)) {
+ auto it = Functions.find(F);
+ if (it != Functions.end()) {
+ Aliases[&GA] = it->second;
+ }
+ }
+ }
+
+ // Replace each address taken function with its jump-instruction table entry.
+ for (auto &KV : Functions)
+ replaceValueWithFunction(KV.first, KV.second);
+
+ for (auto &KV : Aliases)
+ replaceValueWithFunction(KV.first, KV.second);
+
+ return !Functions.empty();
+}
diff --git a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index ad2c553..df96b94 100644
--- a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -12,12 +12,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Assembly/PrintModulePass.h"
+
+#include "llvm/Analysis/Passes.h"
#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/JumpInstrTables.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -44,33 +47,24 @@ static cl::opt<cl::boolOrDefault>
EnableFastISelOption("fast-isel", cl::Hidden,
cl::desc("Enable the \"fast\" instruction selector"));
-static cl::opt<bool> ShowMCEncoding("show-mc-encoding", cl::Hidden,
- cl::desc("Show encoding in .s output"));
-static cl::opt<bool> ShowMCInst("show-mc-inst", cl::Hidden,
- cl::desc("Show instruction structure in .s output"));
-
-static cl::opt<cl::boolOrDefault>
-AsmVerbose("asm-verbose", cl::desc("Add comments to directives."),
- cl::init(cl::BOU_UNSET));
-
-static bool getVerboseAsm() {
- switch (AsmVerbose) {
- case cl::BOU_UNSET: return TargetMachine::getAsmVerbosityDefault();
- case cl::BOU_TRUE: return true;
- case cl::BOU_FALSE: return false;
- }
- llvm_unreachable("Invalid verbose asm state");
-}
-
void LLVMTargetMachine::initAsmInfo() {
- AsmInfo = TheTarget.createMCAsmInfo(*getRegisterInfo(), TargetTriple);
+ MCAsmInfo *TmpAsmInfo = TheTarget.createMCAsmInfo(*getRegisterInfo(),
+ TargetTriple);
// TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0,
// and if the old one gets included then MCAsmInfo will be NULL and
// we'll crash later.
// Provide the user with a useful error message about what's wrong.
- assert(AsmInfo && "MCAsmInfo not initialized. "
+ assert(TmpAsmInfo && "MCAsmInfo not initialized. "
"Make sure you include the correct TargetSelect.h"
"and that InitializeAllTargetMCs() is being invoked!");
+
+ if (Options.DisableIntegratedAS)
+ TmpAsmInfo->setUseIntegratedAssembler(false);
+
+ if (Options.CompressDebugSections)
+ TmpAsmInfo->setCompressDebugSections(true);
+
+ AsmInfo = TmpAsmInfo;
}
LLVMTargetMachine::LLVMTargetMachine(const Target &T, StringRef Triple,
@@ -92,7 +86,12 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
bool DisableVerify,
AnalysisID StartAfter,
AnalysisID StopAfter) {
- // Targets may override createPassConfig to provide a target-specific sublass.
+
+ // Add internal analysis passes from the target machine.
+ TM->addAnalysisPasses(PM);
+
+ // Targets may override createPassConfig to provide a target-specific
+ // subclass.
TargetPassConfig *PassConfig = TM->createPassConfig(PM);
PassConfig->setStartStopPasses(StartAfter, StopAfter);
@@ -127,7 +126,7 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
// Ask the target for an isel.
if (PassConfig->addInstSelector())
- return NULL;
+ return nullptr;
PassConfig->addMachinePasses();
@@ -142,6 +141,11 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
bool DisableVerify,
AnalysisID StartAfter,
AnalysisID StopAfter) {
+ // Passes to handle jumptable function annotations. These can't be handled at
+ // JIT time, so we don't add them directly to addPassesToGenerateCode.
+ PM.add(createJumpInstrTableInfoPass());
+ PM.add(createJumpInstrTablesPass(Options.JTType));
+
// Add common CodeGen passes.
MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify,
StartAfter, StopAfter);
@@ -154,18 +158,18 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
// machine-level pass), and whatever other information is needed to
// deserialize the code and resume compilation. For now, just write the
// LLVM IR.
- PM.add(createPrintModulePass(&Out));
+ PM.add(createPrintModulePass(Out));
return false;
}
- if (hasMCSaveTempLabels())
+ if (Options.MCOptions.MCSaveTempLabels)
Context->setAllowTemporaryLabels(false);
const MCAsmInfo &MAI = *getMCAsmInfo();
const MCRegisterInfo &MRI = *getRegisterInfo();
const MCInstrInfo &MII = *getInstrInfo();
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
- OwningPtr<MCStreamer> AsmStreamer;
+ std::unique_ptr<MCStreamer> AsmStreamer;
switch (FileType) {
case CGFT_AssemblyFile: {
@@ -174,20 +178,16 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
MII, MRI, STI);
// Create a code emitter if asked to show the encoding.
- MCCodeEmitter *MCE = 0;
- if (ShowMCEncoding)
+ MCCodeEmitter *MCE = nullptr;
+ if (Options.MCOptions.ShowMCEncoding)
MCE = getTarget().createMCCodeEmitter(MII, MRI, STI, *Context);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
- MCStreamer *S = getTarget().createAsmStreamer(*Context, Out,
- getVerboseAsm(),
- hasMCUseLoc(),
- hasMCUseCFI(),
- hasMCUseDwarfDirectory(),
- InstPrinter,
- MCE, MAB,
- ShowMCInst);
+ MCStreamer *S = getTarget().createAsmStreamer(
+ *Context, Out, Options.MCOptions.AsmVerbose,
+ Options.MCOptions.MCUseDwarfDirectory, InstPrinter, MCE, MAB,
+ Options.MCOptions.ShowMCInst);
AsmStreamer.reset(S);
break;
}
@@ -198,30 +198,28 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
*Context);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
- if (MCE == 0 || MAB == 0)
+ if (!MCE || !MAB)
return true;
- AsmStreamer.reset(getTarget().createMCObjectStreamer(getTargetTriple(),
- *Context, *MAB, Out,
- MCE, hasMCRelaxAll(),
- hasMCNoExecStack()));
- AsmStreamer.get()->setAutoInitSections(true);
+ AsmStreamer.reset(getTarget().createMCObjectStreamer(
+ getTargetTriple(), *Context, *MAB, Out, MCE, STI,
+ Options.MCOptions.MCRelaxAll, Options.MCOptions.MCNoExecStack));
break;
}
case CGFT_Null:
// The Null output is intended for use for performance analysis and testing,
// not real users.
- AsmStreamer.reset(createNullStreamer(*Context));
+ AsmStreamer.reset(getTarget().createNullStreamer(*Context));
break;
}
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
- if (Printer == 0)
+ if (!Printer)
return true;
// If successful, createAsmPrinter took ownership of AsmStreamer.
- AsmStreamer.take();
+ AsmStreamer.release();
PM.add(Printer);
@@ -238,7 +236,8 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
JITCodeEmitter &JCE,
bool DisableVerify) {
// Add common CodeGen passes.
- MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify, 0, 0);
+ MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify, nullptr,
+ nullptr);
if (!Context)
return true;
@@ -257,11 +256,11 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
raw_ostream &Out,
bool DisableVerify) {
// Add common CodeGen passes.
- Ctx = addPassesToGenerateCode(this, PM, DisableVerify, 0, 0);
+ Ctx = addPassesToGenerateCode(this, PM, DisableVerify, nullptr, nullptr);
if (!Ctx)
return true;
- if (hasMCSaveTempLabels())
+ if (Options.MCOptions.MCSaveTempLabels)
Ctx->setAllowTemporaryLabels(false);
// Create the code emitter for the target if it exists. If not, .o file
@@ -272,23 +271,21 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
STI, *Ctx);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
- if (MCE == 0 || MAB == 0)
+ if (!MCE || !MAB)
return true;
- OwningPtr<MCStreamer> AsmStreamer;
- AsmStreamer.reset(getTarget().createMCObjectStreamer(getTargetTriple(), *Ctx,
- *MAB, Out, MCE,
- hasMCRelaxAll(),
- hasMCNoExecStack()));
- AsmStreamer.get()->InitSections();
+ std::unique_ptr<MCStreamer> AsmStreamer;
+ AsmStreamer.reset(getTarget().createMCObjectStreamer(
+ getTargetTriple(), *Ctx, *MAB, Out, MCE, STI,
+ Options.MCOptions.MCRelaxAll, Options.MCOptions.MCNoExecStack));
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
- if (Printer == 0)
+ if (!Printer)
return true;
// If successful, createAsmPrinter took ownership of AsmStreamer.
- AsmStreamer.take();
+ AsmStreamer.release();
PM.add(Printer);
diff --git a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
index deab05a..cdf505e 100644
--- a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
@@ -13,12 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "scheduler"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "scheduler"
+
bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
// The isScheduleHigh flag allows nodes with wraparound dependencies that
// cannot easily be modeled as edges with latencies to be scheduled as
@@ -53,7 +54,7 @@ bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
/// of SU, return it, otherwise return null.
SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
- SUnit *OnlyAvailablePred = 0;
+ SUnit *OnlyAvailablePred = nullptr;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit &Pred = *I->getSUnit();
@@ -61,7 +62,7 @@ SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
- return 0;
+ return nullptr;
OnlyAvailablePred = &Pred;
}
}
@@ -105,7 +106,7 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
if (SU->isAvailable) return; // All preds scheduled.
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
- if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
+ if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return;
// Okay, we found a single predecessor that is available, but not scheduled.
// Since it is available, it must be in the priority queue. First remove it.
@@ -117,14 +118,14 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
}
SUnit *LatencyPriorityQueue::pop() {
- if (empty()) return NULL;
+ if (empty()) return nullptr;
std::vector<SUnit *>::iterator Best = Queue.begin();
- for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
SUnit *V = *Best;
- if (Best != prior(Queue.end()))
+ if (Best != std::prev(Queue.end()))
std::swap(*Best, Queue.back());
Queue.pop_back();
return V;
@@ -133,7 +134,7 @@ SUnit *LatencyPriorityQueue::pop() {
void LatencyPriorityQueue::remove(SUnit *SU) {
assert(!Queue.empty() && "Queue is empty!");
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
- if (I != prior(Queue.end()))
+ if (I != std::prev(Queue.end()))
std::swap(*I, Queue.back());
Queue.pop_back();
}
diff --git a/contrib/llvm/lib/CodeGen/LexicalScopes.cpp b/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
index ffe407a..d12c234 100644
--- a/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
+++ b/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
@@ -14,34 +14,31 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "lexicalscopes"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
using namespace llvm;
-LexicalScopes::~LexicalScopes() {
- releaseMemory();
-}
+#define DEBUG_TYPE "lexicalscopes"
-/// releaseMemory - release memory.
-void LexicalScopes::releaseMemory() {
- MF = NULL;
- CurrentFnLexicalScope = NULL;
- DeleteContainerSeconds(LexicalScopeMap);
- DeleteContainerSeconds(AbstractScopeMap);
+/// reset - Reset the instance so that it's prepared for another function.
+void LexicalScopes::reset() {
+ MF = nullptr;
+ CurrentFnLexicalScope = nullptr;
+ LexicalScopeMap.clear();
+ AbstractScopeMap.clear();
InlinedLexicalScopeMap.clear();
AbstractScopesList.clear();
}
/// initialize - Scan machine function and constuct lexical scope nest.
void LexicalScopes::initialize(const MachineFunction &Fn) {
- releaseMemory();
+ reset();
MF = &Fn;
SmallVector<InsnRange, 4> MIRanges;
DenseMap<const MachineInstr *, LexicalScope *> MI2ScopeMap;
@@ -54,35 +51,31 @@ void LexicalScopes::initialize(const MachineFunction &Fn) {
/// extractLexicalScopes - Extract instruction ranges for each lexical scopes
/// for the given machine function.
-void LexicalScopes::
-extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
- DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap) {
+void LexicalScopes::extractLexicalScopes(
+ SmallVectorImpl<InsnRange> &MIRanges,
+ DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap) {
// Scan each instruction and create scopes. First build working set of scopes.
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- const MachineInstr *RangeBeginMI = NULL;
- const MachineInstr *PrevMI = NULL;
+ for (const auto &MBB : *MF) {
+ const MachineInstr *RangeBeginMI = nullptr;
+ const MachineInstr *PrevMI = nullptr;
DebugLoc PrevDL;
- for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
- II != IE; ++II) {
- const MachineInstr *MInsn = II;
-
+ for (const auto &MInsn : MBB) {
// Check if instruction has valid location information.
- const DebugLoc MIDL = MInsn->getDebugLoc();
+ const DebugLoc MIDL = MInsn.getDebugLoc();
if (MIDL.isUnknown()) {
- PrevMI = MInsn;
+ PrevMI = &MInsn;
continue;
}
// If scope has not changed then skip this instruction.
if (MIDL == PrevDL) {
- PrevMI = MInsn;
+ PrevMI = &MInsn;
continue;
}
// Ignore DBG_VALUE. It does not contribute to any instruction in output.
- if (MInsn->isDebugValue())
+ if (MInsn.isDebugValue())
continue;
if (RangeBeginMI) {
@@ -95,10 +88,10 @@ extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
}
// This is a beginning of a new instruction range.
- RangeBeginMI = MInsn;
+ RangeBeginMI = &MInsn;
// Reset previous markers.
- PrevMI = MInsn;
+ PrevMI = &MInsn;
PrevDL = MIDL;
}
@@ -111,30 +104,41 @@ extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
}
}
+LexicalScope *LexicalScopes::findInlinedScope(DebugLoc DL) {
+ MDNode *Scope = nullptr;
+ MDNode *IA = nullptr;
+ DL.getScopeAndInlinedAt(Scope, IA, MF->getFunction()->getContext());
+ auto I = InlinedLexicalScopeMap.find(std::make_pair(Scope, IA));
+ return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
+}
+
/// findLexicalScope - Find lexical scope, either regular or inlined, for the
/// given DebugLoc. Return NULL if not found.
LexicalScope *LexicalScopes::findLexicalScope(DebugLoc DL) {
- MDNode *Scope = NULL;
- MDNode *IA = NULL;
+ MDNode *Scope = nullptr;
+ MDNode *IA = nullptr;
DL.getScopeAndInlinedAt(Scope, IA, MF->getFunction()->getContext());
- if (!Scope) return NULL;
+ if (!Scope)
+ return nullptr;
// The scope that we were created with could have an extra file - which
// isn't what we care about in this case.
DIDescriptor D = DIDescriptor(Scope);
if (D.isLexicalBlockFile())
Scope = DILexicalBlockFile(Scope).getScope();
-
- if (IA)
- return InlinedLexicalScopeMap.lookup(DebugLoc::getFromDILocation(IA));
- return LexicalScopeMap.lookup(Scope);
+
+ if (IA) {
+ auto I = InlinedLexicalScopeMap.find(std::make_pair(Scope, IA));
+ return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
+ }
+ return findLexicalScope(Scope);
}
/// getOrCreateLexicalScope - Find lexical scope for the given DebugLoc. If
/// not available then create new lexical scope.
LexicalScope *LexicalScopes::getOrCreateLexicalScope(DebugLoc DL) {
- MDNode *Scope = NULL;
- MDNode *InlinedAt = NULL;
+ MDNode *Scope = nullptr;
+ MDNode *InlinedAt = nullptr;
DL.getScopeAndInlinedAt(Scope, InlinedAt, MF->getFunction()->getContext());
if (InlinedAt) {
@@ -143,7 +147,7 @@ LexicalScope *LexicalScopes::getOrCreateLexicalScope(DebugLoc DL) {
// Create an inlined scope for inlined function.
return getOrCreateInlinedScope(Scope, InlinedAt);
}
-
+
return getOrCreateRegularScope(Scope);
}
@@ -154,36 +158,49 @@ LexicalScope *LexicalScopes::getOrCreateRegularScope(MDNode *Scope) {
Scope = DILexicalBlockFile(Scope).getScope();
D = DIDescriptor(Scope);
}
-
- LexicalScope *WScope = LexicalScopeMap.lookup(Scope);
- if (WScope)
- return WScope;
- LexicalScope *Parent = NULL;
+ auto I = LexicalScopeMap.find(Scope);
+ if (I != LexicalScopeMap.end())
+ return &I->second;
+
+ LexicalScope *Parent = nullptr;
if (D.isLexicalBlock())
Parent = getOrCreateLexicalScope(DebugLoc::getFromDILexicalBlock(Scope));
- WScope = new LexicalScope(Parent, DIDescriptor(Scope), NULL, false);
- LexicalScopeMap.insert(std::make_pair(Scope, WScope));
- if (!Parent && DIDescriptor(Scope).isSubprogram()
- && DISubprogram(Scope).describes(MF->getFunction()))
- CurrentFnLexicalScope = WScope;
-
- return WScope;
+ // FIXME: Use forward_as_tuple instead of make_tuple, once MSVC2012
+ // compatibility is no longer required.
+ I = LexicalScopeMap.emplace(std::piecewise_construct, std::make_tuple(Scope),
+ std::make_tuple(Parent, DIDescriptor(Scope),
+ nullptr, false)).first;
+
+ if (!Parent && DIDescriptor(Scope).isSubprogram() &&
+ DISubprogram(Scope).describes(MF->getFunction()))
+ CurrentFnLexicalScope = &I->second;
+
+ return &I->second;
}
/// getOrCreateInlinedScope - Find or create an inlined lexical scope.
-LexicalScope *LexicalScopes::getOrCreateInlinedScope(MDNode *Scope,
+LexicalScope *LexicalScopes::getOrCreateInlinedScope(MDNode *ScopeNode,
MDNode *InlinedAt) {
- LexicalScope *InlinedScope = LexicalScopeMap.lookup(InlinedAt);
- if (InlinedScope)
- return InlinedScope;
-
- DebugLoc InlinedLoc = DebugLoc::getFromDILocation(InlinedAt);
- InlinedScope = new LexicalScope(getOrCreateLexicalScope(InlinedLoc),
- DIDescriptor(Scope), InlinedAt, false);
- InlinedLexicalScopeMap[InlinedLoc] = InlinedScope;
- LexicalScopeMap[InlinedAt] = InlinedScope;
- return InlinedScope;
+ std::pair<const MDNode*, const MDNode*> P(ScopeNode, InlinedAt);
+ auto I = InlinedLexicalScopeMap.find(P);
+ if (I != InlinedLexicalScopeMap.end())
+ return &I->second;
+
+ LexicalScope *Parent;
+ DILexicalBlock Scope(ScopeNode);
+ if (Scope.isSubprogram())
+ Parent = getOrCreateLexicalScope(DebugLoc::getFromDILocation(InlinedAt));
+ else
+ Parent = getOrCreateInlinedScope(Scope.getContext(), InlinedAt);
+
+ // FIXME: Use forward_as_tuple instead of make_tuple, once MSVC2012
+ // compatibility is no longer required.
+ I = InlinedLexicalScopeMap.emplace(std::piecewise_construct,
+ std::make_tuple(P),
+ std::make_tuple(Parent, Scope, InlinedAt,
+ false)).first;
+ return &I->second;
}
/// getOrCreateAbstractScope - Find or create an abstract lexical scope.
@@ -193,26 +210,28 @@ LexicalScope *LexicalScopes::getOrCreateAbstractScope(const MDNode *N) {
DIDescriptor Scope(N);
if (Scope.isLexicalBlockFile())
Scope = DILexicalBlockFile(Scope).getScope();
- LexicalScope *AScope = AbstractScopeMap.lookup(N);
- if (AScope)
- return AScope;
+ auto I = AbstractScopeMap.find(Scope);
+ if (I != AbstractScopeMap.end())
+ return &I->second;
- LexicalScope *Parent = NULL;
+ LexicalScope *Parent = nullptr;
if (Scope.isLexicalBlock()) {
- DILexicalBlock DB(N);
+ DILexicalBlock DB(Scope);
DIDescriptor ParentDesc = DB.getContext();
Parent = getOrCreateAbstractScope(ParentDesc);
}
- AScope = new LexicalScope(Parent, DIDescriptor(N), NULL, true);
- AbstractScopeMap[N] = AScope;
- if (DIDescriptor(N).isSubprogram())
- AbstractScopesList.push_back(AScope);
- return AScope;
+ I = AbstractScopeMap.emplace(std::piecewise_construct,
+ std::forward_as_tuple(Scope),
+ std::forward_as_tuple(Parent, Scope,
+ nullptr, true)).first;
+ if (Scope.isSubprogram())
+ AbstractScopesList.push_back(&I->second);
+ return &I->second;
}
/// constructScopeNest
void LexicalScopes::constructScopeNest(LexicalScope *Scope) {
- assert (Scope && "Unable to calculate scope dominance graph!");
+ assert(Scope && "Unable to calculate scope dominance graph!");
SmallVector<LexicalScope *, 4> WorkStack;
WorkStack.push_back(Scope);
unsigned Counter = 0;
@@ -221,7 +240,8 @@ void LexicalScopes::constructScopeNest(LexicalScope *Scope) {
const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren();
bool visitedChildren = false;
for (SmallVectorImpl<LexicalScope *>::const_iterator SI = Children.begin(),
- SE = Children.end(); SI != SE; ++SI) {
+ SE = Children.end();
+ SI != SE; ++SI) {
LexicalScope *ChildScope = *SI;
if (!ChildScope->getDFSOut()) {
WorkStack.push_back(ChildScope);
@@ -239,17 +259,17 @@ void LexicalScopes::constructScopeNest(LexicalScope *Scope) {
/// assignInstructionRanges - Find ranges of instructions covered by each
/// lexical scope.
-void LexicalScopes::
-assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
- DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap)
-{
-
- LexicalScope *PrevLexicalScope = NULL;
+void LexicalScopes::assignInstructionRanges(
+ SmallVectorImpl<InsnRange> &MIRanges,
+ DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap) {
+
+ LexicalScope *PrevLexicalScope = nullptr;
for (SmallVectorImpl<InsnRange>::const_iterator RI = MIRanges.begin(),
- RE = MIRanges.end(); RI != RE; ++RI) {
+ RE = MIRanges.end();
+ RI != RE; ++RI) {
const InsnRange &R = *RI;
LexicalScope *S = MI2ScopeMap.lookup(R.first);
- assert (S && "Lost LexicalScope for a machine instruction!");
+ assert(S && "Lost LexicalScope for a machine instruction!");
if (PrevLexicalScope && !PrevLexicalScope->dominates(S))
PrevLexicalScope->closeInsnRange(S);
S->openInsnRange(R.first);
@@ -262,26 +282,25 @@ assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
}
/// getMachineBasicBlocks - Populate given set using machine basic blocks which
-/// have machine instructions that belong to lexical scope identified by
+/// have machine instructions that belong to lexical scope identified by
/// DebugLoc.
-void LexicalScopes::
-getMachineBasicBlocks(DebugLoc DL,
- SmallPtrSet<const MachineBasicBlock*, 4> &MBBs) {
+void LexicalScopes::getMachineBasicBlocks(
+ DebugLoc DL, SmallPtrSet<const MachineBasicBlock *, 4> &MBBs) {
MBBs.clear();
LexicalScope *Scope = getOrCreateLexicalScope(DL);
if (!Scope)
return;
-
+
if (Scope == CurrentFnLexicalScope) {
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I)
- MBBs.insert(I);
+ for (const auto &MBB : *MF)
+ MBBs.insert(&MBB);
return;
}
SmallVectorImpl<InsnRange> &InsnRanges = Scope->getRanges();
for (SmallVectorImpl<InsnRange>::iterator I = InsnRanges.begin(),
- E = InsnRanges.end(); I != E; ++I) {
+ E = InsnRanges.end();
+ I != E; ++I) {
InsnRange &R = *I;
MBBs.insert(R.first->getParent());
}
@@ -299,8 +318,8 @@ bool LexicalScopes::dominates(DebugLoc DL, MachineBasicBlock *MBB) {
return true;
bool Result = false;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I) {
DebugLoc IDL = I->getDebugLoc();
if (IDL.isUnknown())
continue;
@@ -311,8 +330,6 @@ bool LexicalScopes::dominates(DebugLoc DL, MachineBasicBlock *MBB) {
return Result;
}
-void LexicalScope::anchor() { }
-
/// dump - Print data structures.
void LexicalScope::dump(unsigned Indent) const {
#ifndef NDEBUG
@@ -332,4 +349,3 @@ void LexicalScope::dump(unsigned Indent) const {
Children[i]->dump(Indent + 2);
#endif
}
-
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
index 25645e0..388f58f 100644
--- a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -19,7 +19,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "livedebug"
#include "LiveDebugVariables.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/Statistic.h"
@@ -31,8 +30,8 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/VirtRegMap.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CommandLine.h"
@@ -41,8 +40,12 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include <memory>
+
using namespace llvm;
+#define DEBUG_TYPE "livedebug"
+
static cl::opt<bool>
EnableLDV("live-debug-variables", cl::init(true),
cl::desc("Enable the live debug variables pass"), cl::Hidden);
@@ -64,7 +67,7 @@ void LiveDebugVariables::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(0) {
+LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(nullptr) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
}
@@ -72,7 +75,7 @@ LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(0) {
typedef IntervalMap<SlotIndex, unsigned, 4> LocMap;
namespace {
-/// UserValueScopes - Keeps track of lexical scopes associated with an
+/// UserValueScopes - Keeps track of lexical scopes associated with a
/// user value's source location.
class UserValueScopes {
DebugLoc DL;
@@ -139,7 +142,7 @@ public:
UserValue(const MDNode *var, unsigned o, bool i, DebugLoc L,
LocMap::Allocator &alloc)
: variable(var), offset(o), IsIndirect(i), dl(L), leader(this),
- next(0), locInts(alloc)
+ next(nullptr), locInts(alloc)
{}
/// getLeader - Get the leader of this value's equivalence class.
@@ -154,8 +157,8 @@ public:
UserValue *getNext() const { return next; }
/// match - Does this UserValue match the parameters?
- bool match(const MDNode *Var, unsigned Offset) const {
- return Var == variable && Offset == offset;
+ bool match(const MDNode *Var, unsigned Offset, bool indirect) const {
+ return Var == variable && Offset == offset && indirect == IsIndirect;
}
/// merge - Merge equivalence classes.
@@ -292,7 +295,7 @@ class LDVImpl {
bool ModifiedMF;
/// userValues - All allocated UserValue instances.
- SmallVector<UserValue*, 8> userValues;
+ SmallVector<std::unique_ptr<UserValue>, 8> userValues;
/// Map virtual register to eq class leader.
typedef DenseMap<unsigned, UserValue*> VRMap;
@@ -332,7 +335,6 @@ public:
/// clear - Release all memory.
void clear() {
- DeleteContainerPointers(userValues);
userValues.clear();
virtRegToEqClass.clear();
userVarMap.clear();
@@ -425,12 +427,13 @@ UserValue *LDVImpl::getUserValue(const MDNode *Var, unsigned Offset,
UserValue *UV = Leader->getLeader();
Leader = UV;
for (; UV; UV = UV->getNext())
- if (UV->match(Var, Offset))
+ if (UV->match(Var, Offset, IsIndirect))
return UV;
}
- UserValue *UV = new UserValue(Var, Offset, IsIndirect, DL, allocator);
- userValues.push_back(UV);
+ userValues.push_back(
+ make_unique<UserValue>(Var, Offset, IsIndirect, DL, allocator));
+ UserValue *UV = userValues.back().get();
Leader = UserValue::merge(Leader, UV);
return UV;
}
@@ -444,7 +447,7 @@ void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) {
if (UserValue *UV = virtRegToEqClass.lookup(VirtReg))
return UV->getLeader();
- return 0;
+ return nullptr;
}
bool LDVImpl::handleDebugValue(MachineInstr *MI, SlotIndex Idx) {
@@ -480,7 +483,7 @@ bool LDVImpl::collectDebugValues(MachineFunction &mf) {
// DBG_VALUE has no slot index, use the previous instruction instead.
SlotIndex Idx = MBBI == MBB->begin() ?
LIS->getMBBStartIdx(MBB) :
- LIS->getInstructionIndex(llvm::prior(MBBI)).getRegSlot();
+ LIS->getInstructionIndex(std::prev(MBBI)).getRegSlot();
// Handle consecutive DBG_VALUE instructions with the same slot index.
do {
if (handleDebugValue(MBBI, Idx)) {
@@ -568,13 +571,11 @@ UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
// Collect all the (vreg, valno) pairs that are copies of LI.
SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI.use_nodbg_begin(LI->reg),
- UE = MRI.use_nodbg_end(); UI != UE; ++UI) {
+ for (MachineOperand &MO : MRI.use_nodbg_operands(LI->reg)) {
+ MachineInstr *MI = MO.getParent();
// Copies of the full value.
- if (UI.getOperand().getSubReg() || !UI->isCopy())
+ if (MO.getSubReg() || !MI->isCopy())
continue;
- MachineInstr *MI = &*UI;
unsigned DstReg = MI->getOperand(0).getReg();
// Don't follow copies to physregs. These are usually setting up call
@@ -648,14 +649,14 @@ UserValue::computeIntervals(MachineRegisterInfo &MRI,
const MachineOperand &Loc = locations[LocNo];
if (!Loc.isReg()) {
- extendDef(Idx, LocNo, 0, 0, 0, LIS, MDT, UVS);
+ extendDef(Idx, LocNo, nullptr, nullptr, nullptr, LIS, MDT, UVS);
continue;
}
// Register locations are constrained to where the register value is live.
if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
- LiveInterval *LI = 0;
- const VNInfo *VNI = 0;
+ LiveInterval *LI = nullptr;
+ const VNInfo *VNI = nullptr;
if (LIS.hasInterval(Loc.getReg())) {
LI = &LIS.getInterval(Loc.getReg());
VNI = LI->getVNInfoAt(Idx);
@@ -672,7 +673,7 @@ UserValue::computeIntervals(MachineRegisterInfo &MRI,
LiveRange *LR = &LIS.getRegUnit(Unit);
const VNInfo *VNI = LR->getVNInfoAt(Idx);
// Don't track copies from physregs, it is too expensive.
- extendDef(Idx, LocNo, LR, VNI, 0, LIS, MDT, UVS);
+ extendDef(Idx, LocNo, LR, VNI, nullptr, LIS, MDT, UVS);
}
// Finally, erase all the undefs.
@@ -704,7 +705,6 @@ bool LDVImpl::runOnMachineFunction(MachineFunction &mf) {
bool Changed = collectDebugValues(mf);
computeIntervals();
DEBUG(print(dbgs()));
- LS.releaseMemory();
ModifiedMF = Changed;
return Changed;
}
@@ -736,7 +736,7 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals& LIS) {
DEBUG({
dbgs() << "Splitting Loc" << OldLocNo << '\t';
- print(dbgs(), 0);
+ print(dbgs(), nullptr);
});
bool DidChange = false;
LocMap::iterator LocMapI;
@@ -826,7 +826,7 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
}
}
- DEBUG({dbgs() << "Split result: \t"; print(dbgs(), 0);});
+ DEBUG({dbgs() << "Split result: \t"; print(dbgs(), nullptr);});
return DidChange;
}
@@ -915,7 +915,7 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
// Don't insert anything after the first terminator, though.
return MI->isTerminator() ? MBB->getFirstTerminator() :
- llvm::next(MachineBasicBlock::iterator(MI));
+ std::next(MachineBasicBlock::iterator(MI));
}
DebugLoc UserValue::findDebugLoc() {
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.h b/contrib/llvm/lib/CodeGen/LiveDebugVariables.h
index 58a3f0f..bb67435 100644
--- a/contrib/llvm/lib/CodeGen/LiveDebugVariables.h
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.h
@@ -61,9 +61,9 @@ public:
private:
- virtual bool runOnMachineFunction(MachineFunction &);
- virtual void releaseMemory();
- virtual void getAnalysisUsage(AnalysisUsage &) const;
+ bool runOnMachineFunction(MachineFunction &) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &) const override;
};
diff --git a/contrib/llvm/lib/CodeGen/LiveInterval.cpp b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
index 2b8feb8..ce8ce96 100644
--- a/contrib/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
@@ -222,13 +222,13 @@ void LiveRange::extendSegmentEndTo(iterator I, SlotIndex NewEnd) {
VNInfo *ValNo = I->valno;
// Search for the first segment that we can't merge with.
- iterator MergeTo = llvm::next(I);
+ iterator MergeTo = std::next(I);
for (; MergeTo != end() && NewEnd >= MergeTo->end; ++MergeTo) {
assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
}
// If NewEnd was in the middle of a segment, make sure to get its endpoint.
- I->end = std::max(NewEnd, prior(MergeTo)->end);
+ I->end = std::max(NewEnd, std::prev(MergeTo)->end);
// If the newly formed segment now touches the segment after it and if they
// have the same value number, merge the two segments into one segment.
@@ -239,7 +239,7 @@ void LiveRange::extendSegmentEndTo(iterator I, SlotIndex NewEnd) {
}
// Erase any dead segments.
- segments.erase(llvm::next(I), MergeTo);
+ segments.erase(std::next(I), MergeTo);
}
@@ -274,7 +274,7 @@ LiveRange::extendSegmentStartTo(iterator I, SlotIndex NewStart) {
MergeTo->end = I->end;
}
- segments.erase(llvm::next(MergeTo), llvm::next(I));
+ segments.erase(std::next(MergeTo), std::next(I));
return MergeTo;
}
@@ -285,7 +285,7 @@ LiveRange::iterator LiveRange::addSegmentFrom(Segment S, iterator From) {
// If the inserted segment starts in the middle or right at the end of
// another segment, just extend that segment to contain the segment of S.
if (it != begin()) {
- iterator B = prior(it);
+ iterator B = std::prev(it);
if (S.valno == B->valno) {
if (B->start <= Start && B->end >= Start) {
extendSegmentEndTo(B, End);
@@ -331,13 +331,13 @@ LiveRange::iterator LiveRange::addSegmentFrom(Segment S, iterator From) {
/// the value. If there is no live range before Kill, return NULL.
VNInfo *LiveRange::extendInBlock(SlotIndex StartIdx, SlotIndex Kill) {
if (empty())
- return 0;
+ return nullptr;
iterator I = std::upper_bound(begin(), end(), Kill.getPrevSlot());
if (I == begin())
- return 0;
+ return nullptr;
--I;
if (I->end <= StartIdx)
- return 0;
+ return nullptr;
if (I->end < Kill)
extendSegmentEndTo(I, Kill);
return I->valno;
@@ -389,7 +389,7 @@ void LiveRange::removeSegment(SlotIndex Start, SlotIndex End,
I->end = Start; // Trim the old segment.
// Insert the new one.
- segments.insert(llvm::next(I), Segment(End, OldEnd, ValNo));
+ segments.insert(std::next(I), Segment(End, OldEnd, ValNo));
}
/// removeValNo - Remove all the segments defined by the specified value#.
@@ -433,9 +433,9 @@ void LiveRange::join(LiveRange &Other,
iterator OutIt = begin();
OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
- for (iterator I = llvm::next(OutIt), E = end(); I != E; ++I) {
+ for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
- assert(nextValNo != 0 && "Huh?");
+ assert(nextValNo && "Huh?");
// If this live range has the same value # as its immediate predecessor,
// and if they are neighbors, remove one Segment. This happens when we
@@ -638,13 +638,13 @@ void LiveRange::verify() const {
assert(I->start.isValid());
assert(I->end.isValid());
assert(I->start < I->end);
- assert(I->valno != 0);
+ assert(I->valno != nullptr);
assert(I->valno->id < valnos.size());
assert(I->valno == valnos[I->valno->id]);
- if (llvm::next(I) != E) {
- assert(I->end <= llvm::next(I)->start);
- if (I->end == llvm::next(I)->start)
- assert(I->valno != llvm::next(I)->valno);
+ if (std::next(I) != E) {
+ assert(I->end <= std::next(I)->start);
+ if (I->end == std::next(I)->start)
+ assert(I->valno != std::next(I)->valno);
}
}
}
@@ -857,7 +857,7 @@ unsigned ConnectedVNInfoEqClasses::Classify(const LiveInterval *LI) {
EqClass.clear();
EqClass.grow(LI->getNumValNums());
- const VNInfo *used = 0, *unused = 0;
+ const VNInfo *used = nullptr, *unused = nullptr;
// Determine connections.
for (LiveInterval::const_vni_iterator I = LI->vni_begin(), E = LI->vni_end();
@@ -905,8 +905,8 @@ void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[],
// Rewrite instructions.
for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg),
RE = MRI.reg_end(); RI != RE;) {
- MachineOperand &MO = RI.getOperand();
- MachineInstr *MI = MO.getParent();
+ MachineOperand &MO = *RI;
+ MachineInstr *MI = RI->getParent();
++RI;
// DBG_VALUE instructions don't have slot indexes, so get the index of the
// instruction before them.
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index e1c3217..1559560 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -15,13 +15,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "LiveRangeCalc.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -41,6 +41,8 @@
#include <limits>
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
char LiveIntervals::ID = 0;
char &llvm::LiveIntervalsID = LiveIntervals::ID;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
@@ -78,7 +80,7 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
}
LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
- DomTree(0), LRCalc(0) {
+ DomTree(nullptr), LRCalc(nullptr) {
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
}
@@ -184,6 +186,7 @@ void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
LRCalc->createDeadDefs(LI);
LRCalc->extendToUses(LI);
+ computeDeadValues(&LI, LI, nullptr, nullptr);
}
void LiveIntervals::computeVirtRegs() {
@@ -325,8 +328,10 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
// Visit all instructions reading li->reg.
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
- MachineInstr *UseMI = I.skipInstruction();) {
+ for (MachineRegisterInfo::reg_instr_iterator
+ I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end();
+ I != E; ) {
+ MachineInstr *UseMI = &*(I++);
if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
continue;
SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
@@ -408,21 +413,34 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// Handle dead values.
bool CanSeparate = false;
+ computeDeadValues(li, NewLR, &CanSeparate, dead);
+
+ // Move the trimmed segments back.
+ li->segments.swap(NewLR.segments);
+ DEBUG(dbgs() << "Shrunk: " << *li << '\n');
+ return CanSeparate;
+}
+
+void LiveIntervals::computeDeadValues(LiveInterval *li,
+ LiveRange &LR,
+ bool *CanSeparate,
+ SmallVectorImpl<MachineInstr*> *dead) {
for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
I != E; ++I) {
VNInfo *VNI = *I;
if (VNI->isUnused())
continue;
- LiveRange::iterator LRI = NewLR.FindSegmentContaining(VNI->def);
- assert(LRI != NewLR.end() && "Missing segment for PHI");
+ LiveRange::iterator LRI = LR.FindSegmentContaining(VNI->def);
+ assert(LRI != LR.end() && "Missing segment for PHI");
if (LRI->end != VNI->def.getDeadSlot())
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
VNI->markUnused();
- NewLR.removeSegment(LRI->start, LRI->end);
+ LR.removeSegment(LRI->start, LRI->end);
DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
- CanSeparate = true;
+ if (CanSeparate)
+ *CanSeparate = true;
} else {
// This is a dead def. Make sure the instruction knows.
MachineInstr *MI = getInstructionFromIndex(VNI->def);
@@ -434,11 +452,6 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
}
}
}
-
- // Move the trimmed segments back.
- li->segments.swap(NewLR.segments);
- DEBUG(dbgs() << "Shrunk: " << *li << '\n');
- return CanSeparate;
}
void LiveIntervals::extendToIndices(LiveRange &LR,
@@ -458,7 +471,7 @@ void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill,
MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
SlotIndex MBBStart, MBBEnd;
- tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB);
+ std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB);
// If VNI isn't live out from KillMBB, the value is trivially pruned.
if (LRQ.endPoint() < MBBEnd) {
@@ -485,7 +498,7 @@ void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill,
MachineBasicBlock *MBB = *I;
// Check if VNI is live in to MBB.
- tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
+ std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
LiveQueryResult LRQ = LI->Query(MBBStart);
if (LRQ.valueIn() != VNI) {
// This block isn't part of the VNI segment. Prune the search.
@@ -569,9 +582,9 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
break;
}
if (CancelKill)
- MI->clearRegisterKills(Reg, NULL);
+ MI->clearRegisterKills(Reg, nullptr);
else
- MI->addRegisterKilled(Reg, NULL);
+ MI->addRegisterKilled(Reg, nullptr);
}
}
}
@@ -587,17 +600,17 @@ LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
SlotIndex Start = LI.beginIndex();
if (Start.isBlock())
- return NULL;
+ return nullptr;
SlotIndex Stop = LI.endIndex();
if (Stop.isBlock())
- return NULL;
+ return nullptr;
// getMBBFromIndex doesn't need to search the MBB table when both indexes
// belong to proper instructions.
MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
- return MBB1 == MBB2 ? MBB1 : NULL;
+ return MBB1 == MBB2 ? MBB1 : nullptr;
}
bool
@@ -620,9 +633,12 @@ LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
}
float
-LiveIntervals::getSpillWeight(bool isDef, bool isUse, BlockFrequency freq) {
- const float Scale = 1.0f / BlockFrequency::getEntryFrequency();
- return (isDef + isUse) * (freq.getFrequency() * Scale);
+LiveIntervals::getSpillWeight(bool isDef, bool isUse,
+ const MachineBlockFrequencyInfo *MBFI,
+ const MachineInstr *MI) {
+ BlockFrequency Freq = MBFI->getBlockFreq(MI->getParent());
+ const float Scale = 1.0f / MBFI->getEntryFreq();
+ return (isDef + isUse) * (Freq.getFrequency() * Scale);
}
LiveRange::Segment
@@ -870,8 +886,8 @@ private:
// values. The new range should be placed immediately before NewI, move any
// intermediate ranges up.
assert(NewI != I && "Inconsistent iterators");
- std::copy(llvm::next(I), NewI, I);
- *llvm::prior(NewI)
+ std::copy(std::next(I), NewI, I);
+ *std::prev(NewI)
= LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
@@ -916,7 +932,7 @@ private:
if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
// No def, search for the new kill.
// This can never be an early clobber kill since there is no def.
- llvm::prior(I)->end = findLastUseBefore(Reg).getRegSlot();
+ std::prev(I)->end = findLastUseBefore(Reg).getRegSlot();
return;
}
}
@@ -952,7 +968,7 @@ private:
// DefVNI is a dead def. It may have been moved across other values in LR,
// so move I up to NewI. Slide [NewI;I) down one position.
- std::copy_backward(NewI, I, llvm::next(I));
+ std::copy_backward(NewI, I, std::next(I));
*NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
@@ -964,11 +980,11 @@ private:
"No RegMask at OldIdx.");
*RI = NewIdx.getRegSlot();
assert((RI == LIS.RegMaskSlots.begin() ||
- SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) &&
- "Cannot move regmask instruction above another call");
- assert((llvm::next(RI) == LIS.RegMaskSlots.end() ||
- SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) &&
- "Cannot move regmask instruction below another call");
+ SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
+ "Cannot move regmask instruction above another call");
+ assert((std::next(RI) == LIS.RegMaskSlots.end() ||
+ SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
+ "Cannot move regmask instruction below another call");
}
// Return the last use of reg between NewIdx and OldIdx.
@@ -976,10 +992,10 @@ private:
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
SlotIndex LastUse = NewIdx;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI.use_nodbg_begin(Reg),
- UE = MRI.use_nodbg_end();
- UI != UE; UI.skipInstruction()) {
+ for (MachineRegisterInfo::use_instr_nodbg_iterator
+ UI = MRI.use_instr_nodbg_begin(Reg),
+ UE = MRI.use_instr_nodbg_end();
+ UI != UE; ++UI) {
const MachineInstr* MI = &*UI;
SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
if (InstSlot > LastUse && InstSlot < OldIdx)
@@ -1121,7 +1137,7 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
if (LII->end.isDead()) {
SlotIndex prevStart;
if (LII != LI.begin())
- prevStart = llvm::prior(LII)->start;
+ prevStart = std::prev(LII)->start;
// FIXME: This could be more efficient if there was a
// removeSegment method that returned an iterator.
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
index d5a81a3..d81221b 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/LiveIntervalUnion.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/Support/Debug.h"
@@ -23,6 +22,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
// Merge a LiveInterval's segments. Guarantee no overlaps.
void LiveIntervalUnion::unify(LiveInterval &VirtReg) {
@@ -138,7 +139,7 @@ collectInterferingVRegs(unsigned MaxInterferingRegs) {
}
LiveInterval::iterator VirtRegEnd = VirtReg->end();
- LiveInterval *RecentReg = 0;
+ LiveInterval *RecentReg = nullptr;
while (LiveUnionI.valid()) {
assert(VirtRegI != VirtRegEnd && "Reached end of VirtReg");
@@ -200,5 +201,5 @@ void LiveIntervalUnion::Array::clear() {
LIUs[i].~LiveIntervalUnion();
free(LIUs);
Size = 0;
- LIUs = 0;
+ LIUs = nullptr;
}
diff --git a/contrib/llvm/lib/CodeGen/LivePhysRegs.cpp b/contrib/llvm/lib/CodeGen/LivePhysRegs.cpp
new file mode 100644
index 0000000..7efd941
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LivePhysRegs.cpp
@@ -0,0 +1,114 @@
+//===--- LivePhysRegs.cpp - Live Physical Register Set --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LivePhysRegs utility for tracking liveness of
+// physical registers across machine instructions in forward or backward order.
+// A more detailed description can be found in the corresponding header file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+
+/// \brief Remove all registers from the set that get clobbered by the register
+/// mask.
+void LivePhysRegs::removeRegsInMask(const MachineOperand &MO) {
+ SparseSet<unsigned>::iterator LRI = LiveRegs.begin();
+ while (LRI != LiveRegs.end()) {
+ if (MO.clobbersPhysReg(*LRI))
+ LRI = LiveRegs.erase(LRI);
+ else
+ ++LRI;
+ }
+}
+
+/// Simulates liveness when stepping backwards over an instruction(bundle):
+/// Remove Defs, add uses. This is the recommended way of calculating liveness.
+void LivePhysRegs::stepBackward(const MachineInstr &MI) {
+ // Remove defined registers and regmask kills from the set.
+ for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
+ if (O->isReg()) {
+ if (!O->isDef())
+ continue;
+ unsigned Reg = O->getReg();
+ if (Reg == 0)
+ continue;
+ removeReg(Reg);
+ } else if (O->isRegMask())
+ removeRegsInMask(*O);
+ }
+
+ // Add uses to the set.
+ for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
+ if (!O->isReg() || !O->readsReg() || O->isUndef())
+ continue;
+ unsigned Reg = O->getReg();
+ if (Reg == 0)
+ continue;
+ addReg(Reg);
+ }
+}
+
+/// Simulates liveness when stepping forward over an instruction(bundle): Remove
+/// killed-uses, add defs. This is the not recommended way, because it depends
+/// on accurate kill flags. If possible use stepBackwards() instead of this
+/// function.
+void LivePhysRegs::stepForward(const MachineInstr &MI) {
+ SmallVector<unsigned, 4> Defs;
+ // Remove killed registers from the set.
+ for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
+ if (O->isReg()) {
+ unsigned Reg = O->getReg();
+ if (Reg == 0)
+ continue;
+ if (O->isDef()) {
+ if (!O->isDead())
+ Defs.push_back(Reg);
+ } else {
+ if (!O->isKill())
+ continue;
+ assert(O->isUse());
+ removeReg(Reg);
+ }
+ } else if (O->isRegMask())
+ removeRegsInMask(*O);
+ }
+
+ // Add defs to the set.
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i)
+ addReg(Defs[i]);
+}
+
+/// Prin the currently live registers to OS.
+void LivePhysRegs::print(raw_ostream &OS) const {
+ OS << "Live Registers:";
+ if (!TRI) {
+ OS << " (uninitialized)\n";
+ return;
+ }
+
+ if (empty()) {
+ OS << " (empty)\n";
+ return;
+ }
+
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ OS << " " << PrintReg(*I, TRI);
+ OS << "\n";
+}
+
+/// Dumps the currently live registers to the debug output.
+void LivePhysRegs::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ dbgs() << " " << *this;
+#endif
+}
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
index ae086bc..a558e14 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "LiveRangeCalc.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
void LiveRangeCalc::reset(const MachineFunction *mf,
SlotIndexes *SI,
MachineDominatorTree *MDT,
@@ -41,9 +42,8 @@ void LiveRangeCalc::createDeadDefs(LiveRange &LR, unsigned Reg) {
// Visit all def operands. If the same instruction has multiple defs of Reg,
// LR.createDeadDef() will deduplicate.
- for (MachineRegisterInfo::def_iterator
- I = MRI->def_begin(Reg), E = MRI->def_end(); I != E; ++I) {
- const MachineInstr *MI = &*I;
+ for (MachineOperand &MO : MRI->def_operands(Reg)) {
+ const MachineInstr *MI = MO.getParent();
// Find the corresponding slot index.
SlotIndex Idx;
if (MI->isPHI())
@@ -52,7 +52,7 @@ void LiveRangeCalc::createDeadDefs(LiveRange &LR, unsigned Reg) {
else
// Instructions are either normal 'r', or early clobber 'e'.
Idx = Indexes->getInstructionIndex(MI)
- .getRegSlot(I.getOperand().isEarlyClobber());
+ .getRegSlot(MO.isEarlyClobber());
// Create the def in LR. This may find an existing def.
LR.createDeadDef(Idx, *Alloc);
@@ -64,9 +64,7 @@ void LiveRangeCalc::extendToUses(LiveRange &LR, unsigned Reg) {
assert(MRI && Indexes && "call reset() first");
// Visit all operands that read Reg. This may include partial defs.
- for (MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(Reg),
- E = MRI->reg_nodbg_end(); I != E; ++I) {
- MachineOperand &MO = I.getOperand();
+ for (MachineOperand &MO : MRI->reg_nodbg_operands(Reg)) {
// Clear all kill flags. They will be reinserted after register allocation
// by LiveIntervalAnalysis::addKillFlags().
if (MO.isUse())
@@ -75,7 +73,8 @@ void LiveRangeCalc::extendToUses(LiveRange &LR, unsigned Reg) {
continue;
// MI is reading Reg. We may have visited MI before if it happens to be
// reading Reg multiple times. That is OK, extend() is idempotent.
- const MachineInstr *MI = &*I;
+ const MachineInstr *MI = MO.getParent();
+ unsigned OpNo = (&MO - &MI->getOperand(0));
// Find the SlotIndex being read.
SlotIndex Idx;
@@ -83,7 +82,7 @@ void LiveRangeCalc::extendToUses(LiveRange &LR, unsigned Reg) {
assert(!MO.isDef() && "Cannot handle PHI def of partial register.");
// PHI operands are paired: (Reg, PredMBB).
// Extend the live range to be live-out from PredMBB.
- Idx = Indexes->getMBBEndIdx(MI->getOperand(I.getOperandNo()+1).getMBB());
+ Idx = Indexes->getMBBEndIdx(MI->getOperand(OpNo+1).getMBB());
} else {
// This is a normal instruction.
Idx = Indexes->getInstructionIndex(MI).getRegSlot();
@@ -92,7 +91,7 @@ void LiveRangeCalc::extendToUses(LiveRange &LR, unsigned Reg) {
if (MO.isDef()) {
if (MO.isEarlyClobber())
Idx = Idx.getRegSlot(true);
- } else if (MI->isRegTiedToDefOperand(I.getOperandNo(), &DefIdx)) {
+ } else if (MI->isRegTiedToDefOperand(OpNo, &DefIdx)) {
// FIXME: This would be a lot easier if tied early-clobber uses also
// had an early-clobber flag.
if (MI->getOperand(DefIdx).isEarlyClobber())
@@ -114,7 +113,7 @@ void LiveRangeCalc::updateLiveIns() {
MachineBasicBlock *MBB = I->DomNode->getBlock();
assert(I->Value && "No live-in value found");
SlotIndex Start, End;
- tie(Start, End) = Indexes->getMBBRange(MBB);
+ std::tie(Start, End) = Indexes->getMBBRange(MBB);
if (I->Kill.isValid())
// Value is killed inside this block.
@@ -123,7 +122,7 @@ void LiveRangeCalc::updateLiveIns() {
// The value is live-through, update LiveOut as well.
// Defer the Domtree lookup until it is needed.
assert(Seen.test(MBB->getNumber()));
- LiveOut[MBB] = LiveOutPair(I->Value, (MachineDomTreeNode *)0);
+ LiveOut[MBB] = LiveOutPair(I->Value, (MachineDomTreeNode *)nullptr);
}
Updater.setDest(&I->LR);
Updater.add(Start, End, I->Value);
@@ -176,7 +175,7 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &KillMBB,
// Remember if we have seen more than one value.
bool UniqueVNI = true;
- VNInfo *TheVNI = 0;
+ VNInfo *TheVNI = nullptr;
// Using Seen as a visited set, perform a BFS for all reaching defs.
for (unsigned i = 0; i != WorkList.size(); ++i) {
@@ -212,7 +211,7 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &KillMBB,
}
SlotIndex Start, End;
- tie(Start, End) = Indexes->getMBBRange(Pred);
+ std::tie(Start, End) = Indexes->getMBBRange(Pred);
// First time we see Pred. Try to determine the live-out value, but set
// it as null if Pred is live-through with an unknown value.
@@ -247,13 +246,13 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &KillMBB,
for (SmallVectorImpl<unsigned>::const_iterator I = WorkList.begin(),
E = WorkList.end(); I != E; ++I) {
SlotIndex Start, End;
- tie(Start, End) = Indexes->getMBBRange(*I);
+ std::tie(Start, End) = Indexes->getMBBRange(*I);
// Trim the live range in KillMBB.
if (*I == KillMBBNum && Kill.isValid())
End = Kill;
else
LiveOut[MF->getBlockNumbered(*I)] =
- LiveOutPair(TheVNI, (MachineDomTreeNode *)0);
+ LiveOutPair(TheVNI, nullptr);
Updater.add(Start, End, TheVNI);
}
return true;
@@ -342,12 +341,12 @@ void LiveRangeCalc::updateSSA() {
++Changes;
assert(Alloc && "Need VNInfo allocator to create PHI-defs");
SlotIndex Start, End;
- tie(Start, End) = Indexes->getMBBRange(MBB);
+ std::tie(Start, End) = Indexes->getMBBRange(MBB);
LiveRange &LR = I->LR;
VNInfo *VNI = LR.getNextValue(Start, *Alloc);
I->Value = VNI;
// This block is done, we know the final value.
- I->DomNode = 0;
+ I->DomNode = nullptr;
// Add liveness since updateLiveIns now skips this node.
if (I->Kill.isValid())
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeCalc.h b/contrib/llvm/lib/CodeGen/LiveRangeCalc.h
index a3a3fbb..67ab559 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeCalc.h
+++ b/contrib/llvm/lib/CodeGen/LiveRangeCalc.h
@@ -92,7 +92,7 @@ class LiveRangeCalc {
VNInfo *Value;
LiveInBlock(LiveRange &LR, MachineDomTreeNode *node, SlotIndex kill)
- : LR(LR), DomNode(node), Kill(kill), Value(0) {}
+ : LR(LR), DomNode(node), Kill(kill), Value(nullptr) {}
};
/// LiveIn - Work list of blocks where the live-in value has yet to be
@@ -125,7 +125,8 @@ class LiveRangeCalc {
void updateLiveIns();
public:
- LiveRangeCalc() : MF(0), MRI(0), Indexes(0), DomTree(0), Alloc(0) {}
+ LiveRangeCalc() : MF(nullptr), MRI(nullptr), Indexes(nullptr),
+ DomTree(nullptr), Alloc(nullptr) {}
//===--------------------------------------------------------------------===//
// High-level interface.
@@ -203,7 +204,7 @@ public:
/// addLiveInBlock().
void setLiveOutValue(MachineBasicBlock *MBB, VNInfo *VNI) {
Seen.set(MBB->getNumber());
- LiveOut[MBB] = LiveOutPair(VNI, (MachineDomTreeNode *)0);
+ LiveOut[MBB] = LiveOutPair(VNI, nullptr);
}
/// addLiveInBlock - Add a block with an unknown live-in value. This
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
index cb70c43..431241f 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -11,7 +11,6 @@
// is spilled or split.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
@@ -24,6 +23,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumDCEDeleted, "Number of instructions deleted by DCE");
STATISTIC(NumDCEFoldedLoads, "Number of single use loads folded after DCE");
STATISTIC(NumFracRanges, "Number of live ranges fractured by DCE");
@@ -164,12 +165,10 @@ void LiveRangeEdit::eraseVirtReg(unsigned Reg) {
bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
SmallVectorImpl<MachineInstr*> &Dead) {
- MachineInstr *DefMI = 0, *UseMI = 0;
+ MachineInstr *DefMI = nullptr, *UseMI = nullptr;
// Check that there is a single def and a single use.
- for (MachineRegisterInfo::reg_nodbg_iterator I = MRI.reg_nodbg_begin(LI->reg),
- E = MRI.reg_nodbg_end(); I != E; ++I) {
- MachineOperand &MO = I.getOperand();
+ for (MachineOperand &MO : MRI.reg_nodbg_operands(LI->reg)) {
MachineInstr *MI = MO.getParent();
if (MO.isDef()) {
if (DefMI && DefMI != MI)
@@ -199,7 +198,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
// We also need to make sure it is safe to move the load.
// Assume there are stores between DefMI and UseMI.
bool SawStore = true;
- if (!DefMI->isSafeToMove(&TII, 0, SawStore))
+ if (!DefMI->isSafeToMove(&TII, nullptr, SawStore))
return false;
DEBUG(dbgs() << "Try to fold single def: " << *DefMI
@@ -215,7 +214,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
DEBUG(dbgs() << " folded: " << *FoldMI);
LIS.ReplaceMachineInstrInMaps(UseMI, FoldMI);
UseMI->eraseFromParent();
- DefMI->addRegisterDead(LI->reg, 0);
+ DefMI->addRegisterDead(LI->reg, nullptr);
Dead.push_back(DefMI);
++NumDCEFoldedLoads;
return true;
@@ -238,7 +237,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) {
// Use the same criteria as DeadMachineInstructionElim.
bool SawStore = false;
- if (!MI->isSafeToMove(&TII, 0, SawStore)) {
+ if (!MI->isSafeToMove(&TII, nullptr, SawStore)) {
DEBUG(dbgs() << "Can't delete: " << Idx << '\t' << *MI);
return;
}
diff --git a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
index 1d801ac..de2ce22 100644
--- a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/LiveRegMatrix.h"
#include "RegisterCoalescer.h"
#include "llvm/ADT/Statistic.h"
@@ -25,6 +24,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumAssigned , "Number of registers assigned");
STATISTIC(NumUnassigned , "Number of registers unassigned");
@@ -65,7 +66,9 @@ bool LiveRegMatrix::runOnMachineFunction(MachineFunction &MF) {
void LiveRegMatrix::releaseMemory() {
for (unsigned i = 0, e = Matrix.size(); i != e; ++i) {
Matrix[i].clear();
- Queries[i].clear();
+ // No need to clear Queries here, since LiveIntervalUnion::Query doesn't
+ // have anything important to clear and LiveRegMatrix's runOnFunction()
+ // does a std::unique_ptr::reset anyways.
}
}
diff --git a/contrib/llvm/lib/CodeGen/LiveRegUnits.cpp b/contrib/llvm/lib/CodeGen/LiveRegUnits.cpp
deleted file mode 100644
index 6221ca2..0000000
--- a/contrib/llvm/lib/CodeGen/LiveRegUnits.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-//===-- LiveInterval.cpp - Live Interval Representation -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the LiveRegUnits utility for tracking liveness of
-// physical register units across machine instructions in forward or backward
-// order.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/LiveRegUnits.h"
-#include "llvm/CodeGen/MachineInstrBundle.h"
-using namespace llvm;
-
-/// Return true if the given MachineOperand clobbers the given register unit.
-/// A register unit is only clobbered if all its super-registers are clobbered.
-static bool operClobbersUnit(const MachineOperand *MO, unsigned Unit,
- const MCRegisterInfo *MCRI) {
- for (MCRegUnitRootIterator RI(Unit, MCRI); RI.isValid(); ++RI) {
- for (MCSuperRegIterator SI(*RI, MCRI, true); SI.isValid(); ++SI) {
- if (!MO->clobbersPhysReg(*SI))
- return false;
- }
- }
- return true;
-}
-
-/// We assume the high bits of a physical super register are not preserved
-/// unless the instruction has an implicit-use operand reading the
-/// super-register or a register unit for the upper bits is available.
-void LiveRegUnits::removeRegsInMask(const MachineOperand &Op,
- const MCRegisterInfo &MCRI) {
- SparseSet<unsigned>::iterator LUI = LiveUnits.begin();
- while (LUI != LiveUnits.end()) {
- if (operClobbersUnit(&Op, *LUI, &MCRI))
- LUI = LiveUnits.erase(LUI);
- else
- ++LUI;
- }
-}
-
-void LiveRegUnits::stepBackward(const MachineInstr &MI,
- const MCRegisterInfo &MCRI) {
- // Remove defined registers and regmask kills from the set.
- for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
- if (O->isReg()) {
- if (!O->isDef())
- continue;
- unsigned Reg = O->getReg();
- if (Reg == 0)
- continue;
- removeReg(Reg, MCRI);
- } else if (O->isRegMask()) {
- removeRegsInMask(*O, MCRI);
- }
- }
- // Add uses to the set.
- for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
- if (!O->isReg() || !O->readsReg() || O->isUndef())
- continue;
- unsigned Reg = O->getReg();
- if (Reg == 0)
- continue;
- addReg(Reg, MCRI);
- }
-}
-
-/// Uses with kill flag get removed from the set, defs added. If possible
-/// use StepBackward() instead of this function because some kill flags may
-/// be missing.
-void LiveRegUnits::stepForward(const MachineInstr &MI,
- const MCRegisterInfo &MCRI) {
- SmallVector<unsigned, 4> Defs;
- // Remove killed registers from the set.
- for (ConstMIBundleOperands O(&MI); O.isValid(); ++O) {
- if (O->isReg()) {
- unsigned Reg = O->getReg();
- if (Reg == 0)
- continue;
- if (O->isDef()) {
- if (!O->isDead())
- Defs.push_back(Reg);
- } else {
- if (!O->isKill())
- continue;
- assert(O->isUse());
- removeReg(Reg, MCRI);
- }
- } else if (O->isRegMask()) {
- removeRegsInMask(*O, MCRI);
- }
- }
- // Add defs to the set.
- for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
- addReg(Defs[i], MCRI);
- }
-}
-
-/// Adds all registers in the live-in list of block @p BB.
-void LiveRegUnits::addLiveIns(const MachineBasicBlock *MBB,
- const MCRegisterInfo &MCRI) {
- for (MachineBasicBlock::livein_iterator L = MBB->livein_begin(),
- LE = MBB->livein_end(); L != LE; ++L) {
- addReg(*L, MCRI);
- }
-}
diff --git a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
index be11a8f..b3161a4 100644
--- a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "livestacks"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -24,6 +23,8 @@
#include <limits>
using namespace llvm;
+#define DEBUG_TYPE "livestacks"
+
char LiveStacks::ID = 0;
INITIALIZE_PASS_BEGIN(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
diff --git a/contrib/llvm/lib/CodeGen/LiveVariables.cpp b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
index ed55d7a..758b216 100644
--- a/contrib/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
@@ -61,7 +61,7 @@ LiveVariables::VarInfo::findKill(const MachineBasicBlock *MBB) const {
for (unsigned i = 0, e = Kills.size(); i != e; ++i)
if (Kills[i]->getParent() == MBB)
return Kills[i];
- return NULL;
+ return nullptr;
}
void LiveVariables::VarInfo::dump() const {
@@ -193,7 +193,7 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
SmallSet<unsigned,4> &PartDefRegs) {
unsigned LastDefReg = 0;
unsigned LastDefDist = 0;
- MachineInstr *LastDef = NULL;
+ MachineInstr *LastDef = nullptr;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
MachineInstr *Def = PhysRegDef[SubReg];
@@ -208,7 +208,7 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
}
if (!LastDef)
- return 0;
+ return nullptr;
PartDefRegs.insert(LastDefReg);
for (unsigned i = 0, e = LastDef->getNumOperands(); i != e; ++i) {
@@ -282,7 +282,7 @@ MachineInstr *LiveVariables::FindLastRefOrPartRef(unsigned Reg) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
- return 0;
+ return nullptr;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
@@ -333,7 +333,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
// AX<dead> = AL<imp-def>
// = AL<kill>
// AX =
- MachineInstr *LastPartDef = 0;
+ MachineInstr *LastPartDef = nullptr;
unsigned LastPartDefDist = 0;
SmallSet<unsigned, 8> PartUses;
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
@@ -436,7 +436,7 @@ void LiveVariables::HandleRegMask(const MachineOperand &MO) {
for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
if ((PhysRegDef[*SR] || PhysRegUse[*SR]) && MO.clobbersPhysReg(*SR))
Super = *SR;
- HandlePhysRegKill(Super, 0);
+ HandlePhysRegKill(Super, nullptr);
}
}
@@ -492,7 +492,7 @@ void LiveVariables::UpdatePhysRegDefs(MachineInstr *MI,
SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
PhysRegDef[SubReg] = MI;
- PhysRegUse[SubReg] = NULL;
+ PhysRegUse[SubReg] = nullptr;
}
}
}
@@ -506,8 +506,8 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
PhysRegDef = new MachineInstr*[NumRegs];
PhysRegUse = new MachineInstr*[NumRegs];
PHIVarInfo = new SmallVector<unsigned, 4>[MF->getNumBlockIDs()];
- std::fill(PhysRegDef, PhysRegDef + NumRegs, (MachineInstr*)0);
- std::fill(PhysRegUse, PhysRegUse + NumRegs, (MachineInstr*)0);
+ std::fill(PhysRegDef, PhysRegDef + NumRegs, nullptr);
+ std::fill(PhysRegUse, PhysRegUse + NumRegs, nullptr);
PHIJoins.clear();
// FIXME: LiveIntervals will be updated to remove its dependence on
@@ -536,7 +536,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
EE = MBB->livein_end(); II != EE; ++II) {
assert(TargetRegisterInfo::isPhysicalRegister(*II) &&
"Cannot have a live-in virtual register!");
- HandlePhysRegDef(*II, 0, Defs);
+ HandlePhysRegDef(*II, nullptr, Defs);
}
// Loop over all of the instructions, processing them.
@@ -639,10 +639,10 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// available at the end of the basic block.
for (unsigned i = 0; i != NumRegs; ++i)
if ((PhysRegDef[i] || PhysRegUse[i]) && !LiveOuts.count(i))
- HandlePhysRegDef(i, 0, Defs);
+ HandlePhysRegDef(i, nullptr, Defs);
- std::fill(PhysRegDef, PhysRegDef + NumRegs, (MachineInstr*)0);
- std::fill(PhysRegUse, PhysRegUse + NumRegs, (MachineInstr*)0);
+ std::fill(PhysRegDef, PhysRegDef + NumRegs, nullptr);
+ std::fill(PhysRegUse, PhysRegUse + NumRegs, nullptr);
}
// Convert and transfer the dead / killed information we have gathered into
@@ -701,14 +701,15 @@ void LiveVariables::removeVirtualRegistersKilled(MachineInstr *MI) {
/// which is used in a PHI node. We map that to the BB the vreg is coming from.
///
void LiveVariables::analyzePHINodes(const MachineFunction& Fn) {
- for (MachineFunction::const_iterator I = Fn.begin(), E = Fn.end();
- I != E; ++I)
- for (MachineBasicBlock::const_iterator BBI = I->begin(), BBE = I->end();
- BBI != BBE && BBI->isPHI(); ++BBI)
- for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
- if (BBI->getOperand(i).readsReg())
- PHIVarInfo[BBI->getOperand(i + 1).getMBB()->getNumber()]
- .push_back(BBI->getOperand(i).getReg());
+ for (const auto &MBB : Fn)
+ for (const auto &BBI : MBB) {
+ if (!BBI.isPHI())
+ break;
+ for (unsigned i = 1, e = BBI.getNumOperands(); i != e; i += 2)
+ if (BBI.getOperand(i).readsReg())
+ PHIVarInfo[BBI.getOperand(i + 1).getMBB()->getNumber()]
+ .push_back(BBI.getOperand(i).getReg());
+ }
}
bool LiveVariables::VarInfo::isLiveIn(const MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
index 26a1176..36885e8 100644
--- a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -14,15 +14,16 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "localstackalloc"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/StackProtector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
@@ -38,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "localstackalloc"
+
STATISTIC(NumAllocations, "Number of frame indices allocated into local block");
STATISTIC(NumBaseRegisters, "Number of virtual frame base registers allocated");
STATISTIC(NumReplacements, "Number of frame indices references replaced");
@@ -60,18 +63,27 @@ namespace {
class LocalStackSlotPass: public MachineFunctionPass {
SmallVector<int64_t,16> LocalOffsets;
+ /// StackObjSet - A set of stack object indexes
+ typedef SmallSetVector<int, 8> StackObjSet;
void AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx, int64_t &Offset,
bool StackGrowsDown, unsigned &MaxAlign);
+ void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
+ SmallSet<int, 16> &ProtectedObjs,
+ MachineFrameInfo *MFI, bool StackGrowsDown,
+ int64_t &Offset, unsigned &MaxAlign);
void calculateFrameObjectOffsets(MachineFunction &Fn);
bool insertFrameReferenceRegisters(MachineFunction &Fn);
public:
static char ID; // Pass identification, replacement for typeid
- explicit LocalStackSlotPass() : MachineFunctionPass(ID) { }
- bool runOnMachineFunction(MachineFunction &MF);
+ explicit LocalStackSlotPass() : MachineFunctionPass(ID) {
+ initializeLocalStackSlotPassPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
+ AU.addRequired<StackProtector>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -81,8 +93,12 @@ namespace {
char LocalStackSlotPass::ID = 0;
char &llvm::LocalStackSlotAllocationID = LocalStackSlotPass::ID;
-INITIALIZE_PASS(LocalStackSlotPass, "localstackalloc",
- "Local Stack Slot Allocation", false, false)
+INITIALIZE_PASS_BEGIN(LocalStackSlotPass, "localstackalloc",
+ "Local Stack Slot Allocation", false, false)
+INITIALIZE_PASS_DEPENDENCY(StackProtector)
+INITIALIZE_PASS_END(LocalStackSlotPass, "localstackalloc",
+ "Local Stack Slot Allocation", false, false)
+
bool LocalStackSlotPass::runOnMachineFunction(MachineFunction &MF) {
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -145,6 +161,22 @@ void LocalStackSlotPass::AdjustStackOffset(MachineFrameInfo *MFI,
++NumAllocations;
}
+/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
+/// those required to be close to the Stack Protector) to stack offsets.
+void LocalStackSlotPass::AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
+ SmallSet<int, 16> &ProtectedObjs,
+ MachineFrameInfo *MFI,
+ bool StackGrowsDown, int64_t &Offset,
+ unsigned &MaxAlign) {
+
+ for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
+ E = UnassignedObjs.end(); I != E; ++I) {
+ int i = *I;
+ AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
+ ProtectedObjs.insert(i);
+ }
+}
+
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
@@ -156,11 +188,16 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int64_t Offset = 0;
unsigned MaxAlign = 0;
+ StackProtector *SP = &getAnalysis<StackProtector>();
// Make sure that the stack protector comes before the local variables on the
// stack.
- SmallSet<int, 16> LargeStackObjs;
+ SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
+ StackObjSet LargeArrayObjs;
+ StackObjSet SmallArrayObjs;
+ StackObjSet AddrOfObjs;
+
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
StackGrowsDown, MaxAlign);
@@ -170,12 +207,29 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
- if (!MFI->MayNeedStackProtector(i))
- continue;
- AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
- LargeStackObjs.insert(i);
+ switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
+ case StackProtector::SSPLK_None:
+ continue;
+ case StackProtector::SSPLK_SmallArray:
+ SmallArrayObjs.insert(i);
+ continue;
+ case StackProtector::SSPLK_AddrOf:
+ AddrOfObjs.insert(i);
+ continue;
+ case StackProtector::SSPLK_LargeArray:
+ LargeArrayObjs.insert(i);
+ continue;
+ }
+ llvm_unreachable("Unexpected SSPLayoutKind.");
}
+
+ AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
+ AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
+ AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
@@ -185,7 +239,7 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
- if (LargeStackObjs.count(i))
+ if (ProtectedObjs.count(i))
continue;
AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
@@ -233,9 +287,11 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
MachineInstr *MI = I;
- // Debug value instructions can't be out of range, so they don't need
- // any updates.
- if (MI->isDebugValue())
+ // Debug value, stackmap and patchpoint instructions can't be out of
+ // range, so they don't need any updates.
+ if (MI->isDebugValue() ||
+ MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT)
continue;
// For now, allocate the base register(s) within the basic block
@@ -322,18 +378,11 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// processed all FrameRefs before this one, just check whether or not
// the next FrameRef will be able to reuse this new register. If not,
// then don't bother creating it.
- bool CanReuse = false;
- for (int refn = ref + 1; refn < e; ++refn) {
- FrameRef &FRN = FrameReferenceInsns[refn];
- MachineBasicBlock::iterator J = FRN.getMachineInstr();
- MachineInstr *MIN = J;
-
- CanReuse = lookupCandidateBaseReg(BaseOffset, FrameSizeAdjust,
- FRN.getLocalOffset(), MIN, TRI);
- break;
- }
-
- if (!CanReuse) {
+ if (ref + 1 >= e ||
+ !lookupCandidateBaseReg(
+ BaseOffset, FrameSizeAdjust,
+ FrameReferenceInsns[ref + 1].getLocalOffset(),
+ FrameReferenceInsns[ref + 1].getMachineInstr(), TRI)) {
BaseOffset = PrevBaseOffset;
continue;
}
@@ -363,7 +412,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// Modify the instruction to use the new base register rather
// than the frame index operand.
- TRI->resolveFrameIndex(I, BaseReg, Offset);
+ TRI->resolveFrameIndex(*I, BaseReg, Offset);
DEBUG(dbgs() << "Resolved: " << *MI);
++NumReplacements;
diff --git a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
index ca71e3b..08fef5f 100644
--- a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -14,7 +14,6 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -25,10 +24,10 @@
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -36,9 +35,11 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "codegen"
+
MachineBasicBlock::MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb)
: BB(bb), Number(-1), xParent(&mf), Alignment(0), IsLandingPad(false),
- AddressTaken(false), CachedMCSymbol(NULL) {
+ AddressTaken(false), CachedMCSymbol(nullptr) {
Insts.Parent = this;
}
@@ -52,7 +53,8 @@ MCSymbol *MachineBasicBlock::getSymbol() const {
if (!CachedMCSymbol) {
const MachineFunction *MF = getParent();
MCContext &Ctx = MF->getContext();
- const char *Prefix = Ctx.getAsmInfo()->getPrivateGlobalPrefix();
+ const TargetMachine &TM = MF->getTarget();
+ const char *Prefix = TM.getDataLayout()->getPrivateGlobalPrefix();
CachedMCSymbol = Ctx.GetOrCreateSymbol(Twine(Prefix) + "BB" +
Twine(MF->getFunctionNumber()) +
"_" + Twine(getNumber()));
@@ -98,7 +100,7 @@ void ilist_traits<MachineBasicBlock>::removeNodeFromList(MachineBasicBlock *N) {
/// list, we update its parent pointer and add its operands from reg use/def
/// lists if appropriate.
void ilist_traits<MachineInstr>::addNodeToList(MachineInstr *N) {
- assert(N->getParent() == 0 && "machine instruction already in a basic block");
+ assert(!N->getParent() && "machine instruction already in a basic block");
N->setParent(Parent);
// Add the instruction's register operands to their corresponding
@@ -113,13 +115,13 @@ void ilist_traits<MachineInstr>::addNodeToList(MachineInstr *N) {
/// list, we update its parent pointer and remove its operands from reg use/def
/// lists if appropriate.
void ilist_traits<MachineInstr>::removeNodeFromList(MachineInstr *N) {
- assert(N->getParent() != 0 && "machine instruction not in a basic block");
+ assert(N->getParent() && "machine instruction not in a basic block");
// Remove from the use/def lists.
if (MachineFunction *MF = N->getParent()->getParent())
N->RemoveRegOperandsFromUseLists(MF->getRegInfo());
- N->setParent(0);
+ N->setParent(nullptr);
LeakDetector::addGarbageObject(N);
}
@@ -160,7 +162,7 @@ MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
MachineBasicBlock::iterator
MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) {
iterator E = end();
- while (I != E && (I->isPHI() || I->isLabel() || I->isDebugValue()))
+ while (I != E && (I->isPHI() || I->isPosition() || I->isDebugValue()))
++I;
// FIXME: This needs to change if we wish to bundle labels / dbg_values
// inside the bundle.
@@ -229,11 +231,11 @@ MachineBasicBlock::getLastNonDebugInstr() const {
const MachineBasicBlock *MachineBasicBlock::getLandingPadSuccessor() const {
// A block with a landing pad successor only has one other successor.
if (succ_size() > 2)
- return 0;
+ return nullptr;
for (const_succ_iterator I = succ_begin(), E = succ_end(); I != E; ++I)
if ((*I)->isLandingPad())
return *I;
- return 0;
+ return nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -277,7 +279,7 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
const char *Comma = "";
if (const BasicBlock *LBB = getBasicBlock()) {
OS << Comma << "derived from LLVM BB ";
- WriteAsOperand(OS, LBB, /*PrintType=*/false);
+ LBB->printAsOperand(OS, /*PrintType=*/false);
Comma = ", ";
}
if (isLandingPad()) { OS << Comma << "EH LANDING PAD"; Comma = ", "; }
@@ -330,6 +332,10 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
}
}
+void MachineBasicBlock::printAsOperand(raw_ostream &OS, bool /*PrintType*/) const {
+ OS << "BB#" << getNumber();
+}
+
void MachineBasicBlock::removeLiveIn(unsigned Reg) {
std::vector<unsigned>::iterator I =
std::find(LiveIns.begin(), LiveIns.end(), Reg);
@@ -388,7 +394,7 @@ void MachineBasicBlock::updateTerminator() {
// A block with no successors has no concerns with fall-through edges.
if (this->succ_empty()) return;
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
DebugLoc dl; // FIXME: this is nowhere
bool B = TII->AnalyzeBranch(*this, TBB, FBB, Cond);
@@ -419,7 +425,7 @@ void MachineBasicBlock::updateTerminator() {
// Finally update the unconditional successor to be reached via a branch
// if it would not be reached by fallthrough.
if (!isLayoutSuccessor(TBB))
- TII->InsertBranch(*this, TBB, 0, Cond, dl);
+ TII->InsertBranch(*this, TBB, nullptr, Cond, dl);
}
} else {
if (FBB) {
@@ -430,16 +436,16 @@ void MachineBasicBlock::updateTerminator() {
if (TII->ReverseBranchCondition(Cond))
return;
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, FBB, 0, Cond, dl);
+ TII->InsertBranch(*this, FBB, nullptr, Cond, dl);
} else if (isLayoutSuccessor(FBB)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, 0, Cond, dl);
+ TII->InsertBranch(*this, TBB, nullptr, Cond, dl);
}
} else {
// Walk through the successors and find the successor which is not
// a landing pad and is not the conditional branch destination (in TBB)
// as the fallthrough successor.
- MachineBasicBlock *FallthroughBB = 0;
+ MachineBasicBlock *FallthroughBB = nullptr;
for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
if ((*SI)->isLandingPad() || *SI == TBB)
continue;
@@ -457,7 +463,7 @@ void MachineBasicBlock::updateTerminator() {
// Finally update the unconditional successor to be reached via a branch
// if it would not be reached by fallthrough.
if (!isLayoutSuccessor(TBB))
- TII->InsertBranch(*this, TBB, 0, Cond, dl);
+ TII->InsertBranch(*this, TBB, nullptr, Cond, dl);
return;
}
@@ -466,11 +472,11 @@ void MachineBasicBlock::updateTerminator() {
if (TII->ReverseBranchCondition(Cond)) {
// We can't reverse the condition, add an unconditional branch.
Cond.clear();
- TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl);
+ TII->InsertBranch(*this, FallthroughBB, nullptr, Cond, dl);
return;
}
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl);
+ TII->InsertBranch(*this, FallthroughBB, nullptr, Cond, dl);
} else if (!isLayoutSuccessor(FallthroughBB)) {
TII->RemoveBranch(*this);
TII->InsertBranch(*this, TBB, FallthroughBB, Cond, dl);
@@ -622,7 +628,7 @@ bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const {
bool MachineBasicBlock::isLayoutSuccessor(const MachineBasicBlock *MBB) const {
MachineFunction::const_iterator I(this);
- return llvm::next(I) == MachineFunction::const_iterator(MBB);
+ return std::next(I) == MachineFunction::const_iterator(MBB);
}
bool MachineBasicBlock::canFallThrough() {
@@ -637,7 +643,7 @@ bool MachineBasicBlock::canFallThrough() {
return false;
// Analyze the branches, if any, at the end of the block.
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
const TargetInstrInfo *TII = getParent()->getTarget().getInstrInfo();
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond)) {
@@ -650,7 +656,7 @@ bool MachineBasicBlock::canFallThrough() {
}
// If there is no branch, control always falls through.
- if (TBB == 0) return true;
+ if (!TBB) return true;
// If there is some explicit branch to the fallthrough block, it can obviously
// reach, even though the branch should get folded to fall through implicitly.
@@ -664,7 +670,7 @@ bool MachineBasicBlock::canFallThrough() {
// Otherwise, if it is conditional and has no explicit false block, it falls
// through.
- return FBB == 0;
+ return FBB == nullptr;
}
MachineBasicBlock *
@@ -672,18 +678,23 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
// Splitting the critical edge to a landing pad block is non-trivial. Don't do
// it in this generic function.
if (Succ->isLandingPad())
- return NULL;
+ return nullptr;
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
+ // Performance might be harmed on HW that implements branching using exec mask
+ // where both sides of the branches are always executed.
+ if (MF->getTarget().requiresStructuredCFG())
+ return nullptr;
+
// We may need to update this's terminator, but we can't do that if
// AnalyzeBranch fails. If this uses a jump table, we won't touch it.
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
- return NULL;
+ return nullptr;
// Avoid bugpoint weirdness: A block may end with a conditional branch but
// jumps to the same MBB is either case. We have duplicate CFG edges in that
@@ -692,11 +703,11 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
if (TBB && TBB == FBB) {
DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
<< getNumber() << '\n');
- return NULL;
+ return nullptr;
}
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
- MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
+ MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
DEBUG(dbgs() << "Splitting critical edge:"
" BB#" << getNumber()
<< " -- BB#" << NMBB->getNumber()
@@ -784,7 +795,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
NMBB->addSuccessor(Succ);
if (!NMBB->isLayoutSuccessor(Succ)) {
Cond.clear();
- MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
+ MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, nullptr, Cond, dl);
if (Indexes) {
for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
@@ -839,7 +850,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
// extend to the end of the new split block.
bool isLastMBB =
- llvm::next(MachineFunction::iterator(NMBB)) == getParent()->end();
+ std::next(MachineFunction::iterator(NMBB)) == getParent()->end();
SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
SlotIndex PrevIndex = StartIndex.getPrevSlot();
@@ -1054,13 +1065,13 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA,
bool Changed = false;
MachineFunction::iterator FallThru =
- llvm::next(MachineFunction::iterator(this));
+ std::next(MachineFunction::iterator(this));
- if (DestA == 0 && DestB == 0) {
+ if (!DestA && !DestB) {
// Block falls through to successor.
DestA = FallThru;
DestB = FallThru;
- } else if (DestA != 0 && DestB == 0) {
+ } else if (DestA && !DestB) {
if (isCond)
// Block ends in conditional jump that falls through to successor.
DestB = FallThru;
@@ -1114,6 +1125,13 @@ uint32_t MachineBasicBlock::getSuccWeight(const_succ_iterator Succ) const {
return *getWeightIterator(Succ);
}
+/// Set successor weight of a given iterator.
+void MachineBasicBlock::setSuccWeight(succ_iterator I, uint32_t weight) {
+ if (Weights.empty())
+ return;
+ *getWeightIterator(I) = weight;
+}
+
/// getWeightIterator - Return wight iterator corresonding to the I successor
/// iterator
MachineBasicBlock::weight_iterator MachineBasicBlock::
@@ -1210,9 +1228,3 @@ MachineBasicBlock::computeRegisterLiveness(const TargetRegisterInfo *TRI,
// At this point we have no idea of the liveness of the register.
return LQR_Unknown;
}
-
-void llvm::WriteAsOperand(raw_ostream &OS, const MachineBasicBlock *MBB,
- bool t) {
- OS << "BB#" << MBB->getNumber();
-}
-
diff --git a/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp b/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp
index e269d24..9151d99 100644
--- a/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp
@@ -1,4 +1,4 @@
-//====----- MachineBlockFrequencyInfo.cpp - Machine Block Frequency Analysis ----====//
+//===- MachineBlockFrequencyInfo.cpp - MBB Frequency Analysis -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,45 +12,182 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
-#include "llvm/Analysis/BlockFrequencyImpl.h"
+#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/InitializePasses.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GraphWriter.h"
using namespace llvm;
+#define DEBUG_TYPE "block-freq"
+
+#ifndef NDEBUG
+enum GVDAGType {
+ GVDT_None,
+ GVDT_Fraction,
+ GVDT_Integer
+};
+
+static cl::opt<GVDAGType>
+ViewMachineBlockFreqPropagationDAG("view-machine-block-freq-propagation-dags",
+ cl::Hidden,
+ cl::desc("Pop up a window to show a dag displaying how machine block "
+ "frequencies propagate through the CFG."),
+ cl::values(
+ clEnumValN(GVDT_None, "none",
+ "do not display graphs."),
+ clEnumValN(GVDT_Fraction, "fraction", "display a graph using the "
+ "fractional block frequency representation."),
+ clEnumValN(GVDT_Integer, "integer", "display a graph using the raw "
+ "integer fractional block frequency representation."),
+ clEnumValEnd));
+
+namespace llvm {
+
+template <>
+struct GraphTraits<MachineBlockFrequencyInfo *> {
+ typedef const MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
+ typedef MachineFunction::const_iterator nodes_iterator;
+
+ static inline
+ const NodeType *getEntryNode(const MachineBlockFrequencyInfo *G) {
+ return G->getFunction()->begin();
+ }
+
+ static ChildIteratorType child_begin(const NodeType *N) {
+ return N->succ_begin();
+ }
+
+ static ChildIteratorType child_end(const NodeType *N) {
+ return N->succ_end();
+ }
+
+ static nodes_iterator nodes_begin(const MachineBlockFrequencyInfo *G) {
+ return G->getFunction()->begin();
+ }
+
+ static nodes_iterator nodes_end(const MachineBlockFrequencyInfo *G) {
+ return G->getFunction()->end();
+ }
+};
+
+template<>
+struct DOTGraphTraits<MachineBlockFrequencyInfo*> :
+ public DefaultDOTGraphTraits {
+ explicit DOTGraphTraits(bool isSimple=false) :
+ DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getGraphName(const MachineBlockFrequencyInfo *G) {
+ return G->getFunction()->getName();
+ }
+
+ std::string getNodeLabel(const MachineBasicBlock *Node,
+ const MachineBlockFrequencyInfo *Graph) {
+ std::string Result;
+ raw_string_ostream OS(Result);
+
+ OS << Node->getName().str() << ":";
+ switch (ViewMachineBlockFreqPropagationDAG) {
+ case GVDT_Fraction:
+ Graph->printBlockFreq(OS, Node);
+ break;
+ case GVDT_Integer:
+ OS << Graph->getBlockFreq(Node).getFrequency();
+ break;
+ case GVDT_None:
+ llvm_unreachable("If we are not supposed to render a graph we should "
+ "never reach this point.");
+ }
+
+ return Result;
+ }
+};
+
+
+} // end namespace llvm
+#endif
+
INITIALIZE_PASS_BEGIN(MachineBlockFrequencyInfo, "machine-block-freq",
"Machine Block Frequency Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(MachineBlockFrequencyInfo, "machine-block-freq",
"Machine Block Frequency Analysis", true, true)
char MachineBlockFrequencyInfo::ID = 0;
-MachineBlockFrequencyInfo::MachineBlockFrequencyInfo() : MachineFunctionPass(ID) {
+MachineBlockFrequencyInfo::
+MachineBlockFrequencyInfo() :MachineFunctionPass(ID) {
initializeMachineBlockFrequencyInfoPass(*PassRegistry::getPassRegistry());
- MBFI = new BlockFrequencyImpl<MachineBasicBlock, MachineFunction,
- MachineBranchProbabilityInfo>();
}
-MachineBlockFrequencyInfo::~MachineBlockFrequencyInfo() {
- delete MBFI;
-}
+MachineBlockFrequencyInfo::~MachineBlockFrequencyInfo() {}
void MachineBlockFrequencyInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineLoopInfo>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool MachineBlockFrequencyInfo::runOnMachineFunction(MachineFunction &F) {
- MachineBranchProbabilityInfo &MBPI = getAnalysis<MachineBranchProbabilityInfo>();
- MBFI->doFunction(&F, &MBPI);
+ MachineBranchProbabilityInfo &MBPI =
+ getAnalysis<MachineBranchProbabilityInfo>();
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ if (!MBFI)
+ MBFI.reset(new ImplType);
+ MBFI->doFunction(&F, &MBPI, &MLI);
+#ifndef NDEBUG
+ if (ViewMachineBlockFreqPropagationDAG != GVDT_None) {
+ view();
+ }
+#endif
return false;
}
+void MachineBlockFrequencyInfo::releaseMemory() { MBFI.reset(); }
+
+/// Pop up a ghostview window with the current block frequency propagation
+/// rendered using dot.
+void MachineBlockFrequencyInfo::view() const {
+// This code is only for debugging.
+#ifndef NDEBUG
+ ViewGraph(const_cast<MachineBlockFrequencyInfo *>(this),
+ "MachineBlockFrequencyDAGs");
+#else
+ errs() << "MachineBlockFrequencyInfo::view is only available in debug builds "
+ "on systems with Graphviz or gv!\n";
+#endif // NDEBUG
+}
+
BlockFrequency MachineBlockFrequencyInfo::
getBlockFreq(const MachineBasicBlock *MBB) const {
- return MBFI->getBlockFreq(MBB);
+ return MBFI ? MBFI->getBlockFreq(MBB) : 0;
+}
+
+const MachineFunction *MachineBlockFrequencyInfo::getFunction() const {
+ return MBFI ? MBFI->getFunction() : nullptr;
+}
+
+raw_ostream &
+MachineBlockFrequencyInfo::printBlockFreq(raw_ostream &OS,
+ const BlockFrequency Freq) const {
+ return MBFI ? MBFI->printBlockFreq(OS, Freq) : OS;
+}
+
+raw_ostream &
+MachineBlockFrequencyInfo::printBlockFreq(raw_ostream &OS,
+ const MachineBasicBlock *MBB) const {
+ return MBFI ? MBFI->printBlockFreq(OS, MBB) : OS;
+}
+
+uint64_t MachineBlockFrequencyInfo::getEntryFreq() const {
+ return MBFI ? MBFI->getEntryFreq() : 0;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 4b0f7f3..74af1e2 100644
--- a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -25,7 +25,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "block-placement2"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -46,6 +45,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "block-placement2"
+
STATISTIC(NumCondBranches, "Number of conditional branches");
STATISTIC(NumUncondBranches, "Number of uncondittional branches");
STATISTIC(CondBranchTakenFreq,
@@ -58,6 +59,13 @@ static cl::opt<unsigned> AlignAllBlock("align-all-blocks",
"blocks in the function."),
cl::init(0), cl::Hidden);
+// FIXME: Find a good default for this flag and remove the flag.
+static cl::opt<unsigned>
+ExitBlockBias("block-placement-exit-block-bias",
+ cl::desc("Block frequency percentage a loop exit block needs "
+ "over the original exit to be considered the new exit."),
+ cl::init(0), cl::Hidden);
+
namespace {
class BlockChain;
/// \brief Type for our function-wide basic block -> block chain mapping.
@@ -145,7 +153,7 @@ public:
#ifndef NDEBUG
/// \brief Dump the blocks in this chain.
- void dump() LLVM_ATTRIBUTE_USED {
+ LLVM_DUMP_METHOD void dump() {
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->dump();
}
@@ -199,7 +207,7 @@ class MachineBlockPlacement : public MachineFunctionPass {
void markChainSuccessors(BlockChain &Chain,
MachineBasicBlock *LoopHeaderBB,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
- const BlockFilterSet *BlockFilter = 0);
+ const BlockFilterSet *BlockFilter = nullptr);
MachineBasicBlock *selectBestSuccessor(MachineBasicBlock *BB,
BlockChain &Chain,
const BlockFilterSet *BlockFilter);
@@ -213,7 +221,7 @@ class MachineBlockPlacement : public MachineFunctionPass {
const BlockFilterSet *BlockFilter);
void buildChain(MachineBasicBlock *BB, BlockChain &Chain,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
- const BlockFilterSet *BlockFilter = 0);
+ const BlockFilterSet *BlockFilter = nullptr);
MachineBasicBlock *findBestLoopTop(MachineLoop &L,
const BlockFilterSet &LoopBlockSet);
MachineBasicBlock *findBestLoopExit(MachineFunction &F,
@@ -230,9 +238,9 @@ public:
initializeMachineBlockPlacementPass(*PassRegistry::getPassRegistry());
}
- bool runOnMachineFunction(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F) override;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.addRequired<MachineLoopInfo>();
@@ -327,7 +335,7 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
const BlockFilterSet *BlockFilter) {
const BranchProbability HotProb(4, 5); // 80%
- MachineBasicBlock *BestSucc = 0;
+ MachineBasicBlock *BestSucc = nullptr;
// FIXME: Due to the performance of the probability and weight routines in
// the MBPI analysis, we manually compute probabilities using the edge
// weights. This is suboptimal as it means that the somewhat subtle
@@ -360,7 +368,8 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
// any CFG constraints.
if (SuccChain.LoopPredecessors != 0) {
if (SuccProb < HotProb) {
- DEBUG(dbgs() << " " << getBlockName(*SI) << " -> CFG conflict\n");
+ DEBUG(dbgs() << " " << getBlockName(*SI) << " -> " << SuccProb
+ << " (prob) (CFG conflict)\n");
continue;
}
@@ -383,8 +392,8 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
}
}
if (BadCFGConflict) {
- DEBUG(dbgs() << " " << getBlockName(*SI)
- << " -> non-cold CFG conflict\n");
+ DEBUG(dbgs() << " " << getBlockName(*SI) << " -> " << SuccProb
+ << " (prob) (non-cold CFG conflict)\n");
continue;
}
}
@@ -401,23 +410,6 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
return BestSucc;
}
-namespace {
-/// \brief Predicate struct to detect blocks already placed.
-class IsBlockPlaced {
- const BlockChain &PlacedChain;
- const BlockToChainMapType &BlockToChain;
-
-public:
- IsBlockPlaced(const BlockChain &PlacedChain,
- const BlockToChainMapType &BlockToChain)
- : PlacedChain(PlacedChain), BlockToChain(BlockToChain) {}
-
- bool operator()(MachineBasicBlock *BB) const {
- return BlockToChain.lookup(BB) == &PlacedChain;
- }
-};
-}
-
/// \brief Select the best block from a worklist.
///
/// This looks through the provided worklist as a list of candidate basic
@@ -436,10 +428,12 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
// FIXME: If this shows up on profiles, it could be folded (at the cost of
// some code complexity) into the loop below.
WorkList.erase(std::remove_if(WorkList.begin(), WorkList.end(),
- IsBlockPlaced(Chain, BlockToChain)),
+ [&](MachineBasicBlock *BB) {
+ return BlockToChain.lookup(BB) == &Chain;
+ }),
WorkList.end());
- MachineBasicBlock *BestBlock = 0;
+ MachineBasicBlock *BestBlock = nullptr;
BlockFrequency BestFreq;
for (SmallVectorImpl<MachineBasicBlock *>::iterator WBI = WorkList.begin(),
WBE = WorkList.end();
@@ -453,8 +447,8 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
assert(SuccChain.LoopPredecessors == 0 && "Found CFG-violating block");
BlockFrequency CandidateFreq = MBFI->getBlockFreq(*WBI);
- DEBUG(dbgs() << " " << getBlockName(*WBI) << " -> " << CandidateFreq
- << " (freq)\n");
+ DEBUG(dbgs() << " " << getBlockName(*WBI) << " -> ";
+ MBFI->printBlockFreq(dbgs(), CandidateFreq) << " (freq)\n");
if (BestBlock && BestFreq >= CandidateFreq)
continue;
BestBlock = *WBI;
@@ -486,7 +480,7 @@ MachineBasicBlock *MachineBlockPlacement::getFirstUnplacedBlock(
return *BlockToChain[I]->begin();
}
}
- return 0;
+ return nullptr;
}
void MachineBlockPlacement::buildChain(
@@ -501,11 +495,11 @@ void MachineBlockPlacement::buildChain(
MachineBasicBlock *LoopHeaderBB = BB;
markChainSuccessors(Chain, LoopHeaderBB, BlockWorkList, BlockFilter);
- BB = *llvm::prior(Chain.end());
+ BB = *std::prev(Chain.end());
for (;;) {
assert(BB);
assert(BlockToChain[BB] == &Chain);
- assert(*llvm::prior(Chain.end()) == BB);
+ assert(*std::prev(Chain.end()) == BB);
// Look for the best viable successor if there is one to place immediately
// after this block.
@@ -536,7 +530,7 @@ void MachineBlockPlacement::buildChain(
<< " to " << getBlockNum(BestSucc) << "\n");
markChainSuccessors(SuccChain, LoopHeaderBB, BlockWorkList, BlockFilter);
Chain.merge(BestSucc, &SuccChain);
- BB = *llvm::prior(Chain.end());
+ BB = *std::prev(Chain.end());
}
DEBUG(dbgs() << "Finished forming chain for header block "
@@ -567,7 +561,7 @@ MachineBlockPlacement::findBestLoopTop(MachineLoop &L,
<< getBlockName(L.getHeader()) << "\n");
BlockFrequency BestPredFreq;
- MachineBasicBlock *BestPred = 0;
+ MachineBasicBlock *BestPred = nullptr;
for (MachineBasicBlock::pred_iterator PI = L.getHeader()->pred_begin(),
PE = L.getHeader()->pred_end();
PI != PE; ++PI) {
@@ -575,8 +569,8 @@ MachineBlockPlacement::findBestLoopTop(MachineLoop &L,
if (!LoopBlockSet.count(Pred))
continue;
DEBUG(dbgs() << " header pred: " << getBlockName(Pred) << ", "
- << Pred->succ_size() << " successors, "
- << MBFI->getBlockFreq(Pred) << " freq\n");
+ << Pred->succ_size() << " successors, ";
+ MBFI->printBlockFreq(dbgs(), Pred) << " freq\n");
if (Pred->succ_size() > 1)
continue;
@@ -623,11 +617,11 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F,
// header and only rotate if safe.
BlockChain &HeaderChain = *BlockToChain[L.getHeader()];
if (!LoopBlockSet.count(*HeaderChain.begin()))
- return 0;
+ return nullptr;
BlockFrequency BestExitEdgeFreq;
unsigned BestExitLoopDepth = 0;
- MachineBasicBlock *ExitingBB = 0;
+ MachineBasicBlock *ExitingBB = nullptr;
// If there are exits to outer loops, loop rotation can severely limit
// fallthrough opportunites unless it selects such an exit. Keep a set of
// blocks where rotating to exit with that block will reach an outer loop.
@@ -641,7 +635,7 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F,
BlockChain &Chain = *BlockToChain[*I];
// Ensure that this block is at the end of a chain; otherwise it could be
// mid-way through an inner loop or a successor of an analyzable branch.
- if (*I != *llvm::prior(Chain.end()))
+ if (*I != *std::prev(Chain.end()))
continue;
// Now walk the successors. We need to establish whether this has a viable
@@ -690,14 +684,17 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F,
BlockFrequency ExitEdgeFreq = MBFI->getBlockFreq(*I) * SuccProb;
DEBUG(dbgs() << " exiting: " << getBlockName(*I) << " -> "
<< getBlockName(*SI) << " [L:" << SuccLoopDepth
- << "] (" << ExitEdgeFreq << ")\n");
- // Note that we slightly bias this toward an existing layout successor to
- // retain incoming order in the absence of better information.
- // FIXME: Should we bias this more strongly? It's pretty weak.
+ << "] (";
+ MBFI->printBlockFreq(dbgs(), ExitEdgeFreq) << ")\n");
+ // Note that we bias this toward an existing layout successor to retain
+ // incoming order in the absence of better information. The exit must have
+ // a frequency higher than the current exit before we consider breaking
+ // the layout.
+ BranchProbability Bias(100 - ExitBlockBias, 100);
if (!ExitingBB || BestExitLoopDepth < SuccLoopDepth ||
ExitEdgeFreq > BestExitEdgeFreq ||
((*I)->isLayoutSuccessor(*SI) &&
- !(ExitEdgeFreq < BestExitEdgeFreq))) {
+ !(ExitEdgeFreq < BestExitEdgeFreq * Bias))) {
BestExitEdgeFreq = ExitEdgeFreq;
ExitingBB = *I;
}
@@ -713,14 +710,14 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F,
// Without a candidate exiting block or with only a single block in the
// loop, just use the loop header to layout the loop.
if (!ExitingBB || L.getNumBlocks() == 1)
- return 0;
+ return nullptr;
// Also, if we have exit blocks which lead to outer loops but didn't select
// one of them as the exiting block we are rotating toward, disable loop
// rotation altogether.
if (!BlocksExitingToOuterLoop.empty() &&
!BlocksExitingToOuterLoop.count(ExitingBB))
- return 0;
+ return nullptr;
DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB) << "\n");
return ExitingBB;
@@ -745,7 +742,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
PI != PE; ++PI) {
BlockChain *PredChain = BlockToChain[*PI];
if (!LoopBlockSet.count(*PI) &&
- (!PredChain || *PI == *llvm::prior(PredChain->end()))) {
+ (!PredChain || *PI == *std::prev(PredChain->end()))) {
ViableTopFallthrough = true;
break;
}
@@ -755,7 +752,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
// bottom is a viable exiting block. If so, bail out as rotating will
// introduce an unnecessary branch.
if (ViableTopFallthrough) {
- MachineBasicBlock *Bottom = *llvm::prior(LoopChain.end());
+ MachineBasicBlock *Bottom = *std::prev(LoopChain.end());
for (MachineBasicBlock::succ_iterator SI = Bottom->succ_begin(),
SE = Bottom->succ_end();
SI != SE; ++SI) {
@@ -771,7 +768,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
if (ExitIt == LoopChain.end())
return;
- std::rotate(LoopChain.begin(), llvm::next(ExitIt), LoopChain.end());
+ std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end());
}
/// \brief Forms basic block chains from the natural loop structures.
@@ -799,7 +796,7 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
// If we selected just the header for the loop top, look for a potentially
// profitable exit block in the event that rotating the loop can eliminate
// branches by placing an exit edge at the bottom.
- MachineBasicBlock *ExitingBB = 0;
+ MachineBasicBlock *ExitingBB = nullptr;
if (LoopTop == L.getHeader())
ExitingBB = findBestLoopExit(F, L, LoopBlockSet);
@@ -887,11 +884,11 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// the exact fallthrough behavior for.
for (;;) {
Cond.clear();
- MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond) || !FI->canFallThrough())
break;
- MachineFunction::iterator NextFI(llvm::next(FI));
+ MachineFunction::iterator NextFI(std::next(FI));
MachineBasicBlock *NextBB = NextFI;
// Ensure that the layout successor is a viable block, as we know that
// fallthrough is a possibility.
@@ -899,7 +896,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
DEBUG(dbgs() << "Pre-merging due to unanalyzable fallthrough: "
<< getBlockName(BB) << " -> " << getBlockName(NextBB)
<< "\n");
- Chain->merge(NextBB, 0);
+ Chain->merge(NextBB, nullptr);
FI = NextFI;
BB = NextBB;
}
@@ -939,7 +936,9 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
BlockChain &FunctionChain = *BlockToChain[&F.front()];
buildChain(&F.front(), FunctionChain, BlockWorkList);
+#ifndef NDEBUG
typedef SmallPtrSet<MachineBasicBlock *, 16> FunctionBlockSetType;
+#endif
DEBUG({
// Crash at the end so we get all of the debugging output first.
bool BadFunc = false;
@@ -983,13 +982,13 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// Update the terminator of the previous block.
if (BI == FunctionChain.begin())
continue;
- MachineBasicBlock *PrevBB = llvm::prior(MachineFunction::iterator(*BI));
+ MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(*BI));
// FIXME: It would be awesome of updateTerminator would just return rather
// than assert when the branch cannot be analyzed in order to remove this
// boiler plate.
Cond.clear();
- MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond)) {
// The "PrevBB" is not yet updated to reflect current code layout, so,
// o. it may fall-through to a block without explict "goto" instruction
@@ -1006,10 +1005,10 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
PrevBB->updateTerminator();
needUpdateBr = false;
Cond.clear();
- TBB = FBB = 0;
+ TBB = FBB = nullptr;
if (TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond)) {
// FIXME: This should never take place.
- TBB = FBB = 0;
+ TBB = FBB = nullptr;
}
}
@@ -1034,7 +1033,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// Fixup the last block.
Cond.clear();
- MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(F.back(), TBB, FBB, Cond))
F.back().updateTerminator();
@@ -1055,7 +1054,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
const BranchProbability ColdProb(1, 5); // 20%
BlockFrequency EntryFreq = MBFI->getBlockFreq(F.begin());
BlockFrequency WeightedEntryFreq = EntryFreq * ColdProb;
- for (BlockChain::iterator BI = llvm::next(FunctionChain.begin()),
+ for (BlockChain::iterator BI = std::next(FunctionChain.begin()),
BE = FunctionChain.end();
BI != BE; ++BI) {
// Don't align non-looping basic blocks. These are unlikely to execute
@@ -1081,7 +1080,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
// Check for the existence of a non-layout predecessor which would benefit
// from aligning this block.
- MachineBasicBlock *LayoutPred = *llvm::prior(BI);
+ MachineBasicBlock *LayoutPred = *std::prev(BI);
// Force alignment if all the predecessors are jumps. We already checked
// that the block isn't cold above.
@@ -1103,7 +1102,10 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) {
// Check for single-block functions and skip them.
- if (llvm::next(F.begin()) == F.end())
+ if (std::next(F.begin()) == F.end())
+ return false;
+
+ if (skipOptnoneFunction(*F.getFunction()))
return false;
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
@@ -1149,9 +1151,9 @@ public:
initializeMachineBlockPlacementStatsPass(*PassRegistry::getPassRegistry());
}
- bool runOnMachineFunction(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F) override;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineBlockFrequencyInfo>();
AU.setPreservesAll();
@@ -1171,7 +1173,7 @@ INITIALIZE_PASS_END(MachineBlockPlacementStats, "block-placement-stats",
bool MachineBlockPlacementStats::runOnMachineFunction(MachineFunction &F) {
// Check for single-block functions and skip them.
- if (llvm::next(F.begin()) == F.end())
+ if (std::next(F.begin()) == F.end())
return false;
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
diff --git a/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
index ae70912..6fbc2be 100644
--- a/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
@@ -77,8 +77,9 @@ getEdgeWeight(const MachineBasicBlock *Src,
return getEdgeWeight(Src, std::find(Src->succ_begin(), Src->succ_end(), Dst));
}
-bool MachineBranchProbabilityInfo::isEdgeHot(MachineBasicBlock *Src,
- MachineBasicBlock *Dst) const {
+bool
+MachineBranchProbabilityInfo::isEdgeHot(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
// FIXME: Compare against a static "hot" BranchProbability.
return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
@@ -87,7 +88,7 @@ bool MachineBranchProbabilityInfo::isEdgeHot(MachineBasicBlock *Src,
MachineBasicBlock *
MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const {
uint32_t MaxWeight = 0;
- MachineBasicBlock *MaxSucc = 0;
+ MachineBasicBlock *MaxSucc = nullptr;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
uint32_t Weight = getEdgeWeight(MBB, I);
@@ -100,12 +101,11 @@ MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const {
if (getEdgeProbability(MBB, MaxSucc) >= BranchProbability(4, 5))
return MaxSucc;
- return 0;
+ return nullptr;
}
-BranchProbability
-MachineBranchProbabilityInfo::getEdgeProbability(MachineBasicBlock *Src,
- MachineBasicBlock *Dst) const {
+BranchProbability MachineBranchProbabilityInfo::getEdgeProbability(
+ const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const {
uint32_t Scale = 1;
uint32_t D = getSumForBlock(Src, Scale);
uint32_t N = getEdgeWeight(Src, Dst) / Scale;
@@ -113,13 +113,13 @@ MachineBranchProbabilityInfo::getEdgeProbability(MachineBasicBlock *Src,
return BranchProbability(N, D);
}
-raw_ostream &MachineBranchProbabilityInfo::
-printEdgeProbability(raw_ostream &OS, MachineBasicBlock *Src,
- MachineBasicBlock *Dst) const {
+raw_ostream &MachineBranchProbabilityInfo::printEdgeProbability(
+ raw_ostream &OS, const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const {
const BranchProbability Prob = getEdgeProbability(Src, Dst);
OS << "edge MBB#" << Src->getNumber() << " -> MBB#" << Dst->getNumber()
- << " probability is " << Prob
+ << " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
return OS;
diff --git a/contrib/llvm/lib/CodeGen/MachineCSE.cpp b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
index d228286..7da439c 100644
--- a/contrib/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "machine-cse"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ScopedHashTable.h"
@@ -28,6 +27,8 @@
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "machine-cse"
+
STATISTIC(NumCoalesces, "Number of copies coalesced");
STATISTIC(NumCSEs, "Number of common subexpression eliminated");
STATISTIC(NumPhysCSEs,
@@ -49,9 +50,9 @@ namespace {
initializeMachineCSEPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
@@ -60,7 +61,7 @@ namespace {
AU.addPreserved<MachineDominatorTree>();
}
- virtual void releaseMemory() {
+ void releaseMemory() override {
ScopeMap.clear();
Exps.clear();
}
@@ -131,9 +132,24 @@ bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
unsigned SrcReg = DefMI->getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
continue;
- if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
+ if (DefMI->getOperand(0).getSubReg())
+ continue;
+ // FIXME: We should trivially coalesce subregister copies to expose CSE
+ // opportunities on instructions with truncated operands (see
+ // cse-add-with-overflow.ll). This can be done here as follows:
+ // if (SrcSubReg)
+ // RC = TRI->getMatchingSuperRegClass(MRI->getRegClass(SrcReg), RC,
+ // SrcSubReg);
+ // MO.substVirtReg(SrcReg, SrcSubReg, *TRI);
+ //
+ // The 2-addr pass has been updated to handle coalesced subregs. However,
+ // some machine-specific code still can't handle it.
+ // To handle it properly we also need a way find a constrained subregister
+ // class given a super-reg class and subreg index.
+ if (DefMI->getOperand(1).getSubReg())
continue;
- if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg)))
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ if (!MRI->constrainRegClass(SrcReg, RC))
continue;
DEBUG(dbgs() << "Coalescing: " << *DefMI);
DEBUG(dbgs() << "*** to: " << *MI);
@@ -214,7 +230,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
// Next, collect all defs into PhysDefs. If any is already in PhysRefs
// (which currently contains only uses), set the PhysUseDef flag.
PhysUseDef = false;
- MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
+ MachineBasicBlock::const_iterator I = MI; I = std::next(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
@@ -265,7 +281,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
}
CrossMBB = true;
}
- MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
+ MachineBasicBlock::const_iterator I = CSMI; I = std::next(I);
MachineBasicBlock::const_iterator E = MI;
MachineBasicBlock::const_iterator EE = CSMBB->end();
unsigned LookAheadLeft = LookAheadLimit;
@@ -310,8 +326,8 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
}
bool MachineCSE::isCSECandidate(MachineInstr *MI) {
- if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
- MI->isKill() || MI->isInlineAsm() || MI->isDebugValue())
+ if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() || MI->isKill() ||
+ MI->isInlineAsm() || MI->isDebugValue())
return false;
// Ignore copies.
@@ -349,15 +365,11 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
TargetRegisterInfo::isVirtualRegister(Reg)) {
MayIncreasePressure = false;
SmallPtrSet<MachineInstr*, 8> CSUses;
- for (MachineRegisterInfo::use_nodbg_iterator I =MRI->use_nodbg_begin(CSReg),
- E = MRI->use_nodbg_end(); I != E; ++I) {
- MachineInstr *Use = &*I;
- CSUses.insert(Use);
+ for (MachineInstr &MI : MRI->use_nodbg_instructions(CSReg)) {
+ CSUses.insert(&MI);
}
- for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
- E = MRI->use_nodbg_end(); I != E; ++I) {
- MachineInstr *Use = &*I;
- if (!CSUses.count(Use)) {
+ for (MachineInstr &MI : MRI->use_nodbg_instructions(Reg)) {
+ if (!CSUses.count(&MI)) {
MayIncreasePressure = true;
break;
}
@@ -388,11 +400,9 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
}
if (!HasVRegUse) {
bool HasNonCopyUse = false;
- for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
- E = MRI->use_nodbg_end(); I != E; ++I) {
- MachineInstr *Use = &*I;
+ for (MachineInstr &MI : MRI->use_nodbg_instructions(Reg)) {
// Ignore copies.
- if (!Use->isCopyLike()) {
+ if (!MI.isCopyLike()) {
HasNonCopyUse = true;
break;
}
@@ -405,11 +415,9 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
// it unless the defined value is already used in the BB of the new use.
bool HasPHI = false;
SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
- for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg),
- E = MRI->use_nodbg_end(); I != E; ++I) {
- MachineInstr *Use = &*I;
- HasPHI |= Use->isPHI();
- CSBBs.insert(Use->getParent());
+ for (MachineInstr &MI : MRI->use_nodbg_instructions(CSReg)) {
+ HasPHI |= MI.isPHI();
+ CSBBs.insert(MI.getParent());
}
if (!HasPHI)
@@ -513,7 +521,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
bool DoCSE = true;
unsigned NumDefs = MI->getDesc().getNumDefs() +
MI->getDesc().getNumImplicitDefs();
-
+
for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
@@ -652,6 +660,9 @@ bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
}
bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
MRI = &MF.getRegInfo();
diff --git a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index 4f48e2c..3119a35 100644
--- a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "codegen-cp"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
@@ -28,6 +27,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "codegen-cp"
+
STATISTIC(NumDeletes, "Number of dead copies deleted");
namespace {
@@ -42,7 +43,7 @@ namespace {
initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
typedef SmallVector<unsigned, 4> DestList;
@@ -127,13 +128,10 @@ static bool isNopCopy(MachineInstr *CopyMI, unsigned Def, unsigned Src,
}
// Remove MI from the function because it has been determined it is dead.
-// Turn it into a noop KILL instruction if it has super-register liveness
-// adjustments.
+// Turn it into a noop KILL instruction as opposed to removing it to
+// maintain imp-use/imp-def chains.
void MachineCopyPropagation::removeCopy(MachineInstr *MI) {
- if (MI->getNumOperands() == 2)
- MI->eraseFromParent();
- else
- MI->setDesc(TII->get(TargetOpcode::KILL));
+ MI->setDesc(TII->get(TargetOpcode::KILL));
}
bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
@@ -142,6 +140,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DenseMap<unsigned, MachineInstr*> CopyMap; // Def -> copies map
SourceMap SrcMap; // Src -> Def map
+ DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
+
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) {
MachineInstr *MI = &*I;
@@ -176,6 +176,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// CALL
// %RAX<def> = COPY %RSP
+ DEBUG(dbgs() << "MCP: copy is a NOP, removing: "; MI->dump());
+
// Clear any kills of Def between CopyMI and MI. This extends the
// live range.
for (MachineBasicBlock::iterator I = CopyMI, E = MI; I != E; ++I)
@@ -191,10 +193,14 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// If Src is defined by a previous copy, it cannot be eliminated.
for (MCRegAliasIterator AI(Src, TRI, true); AI.isValid(); ++AI) {
CI = CopyMap.find(*AI);
- if (CI != CopyMap.end())
+ if (CI != CopyMap.end()) {
+ DEBUG(dbgs() << "MCP: Copy is no longer dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
+ }
}
+ DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
+
// Copy is now a candidate for deletion.
MaybeDeadCopies.insert(MI);
@@ -255,8 +261,10 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
DenseMap<unsigned, MachineInstr*>::iterator CI = CopyMap.find(*AI);
- if (CI != CopyMap.end())
+ if (CI != CopyMap.end()) {
+ DEBUG(dbgs() << "MCP: Copy is used - not dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
+ }
}
}
@@ -273,6 +281,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
unsigned Reg = (*DI)->getOperand(0).getReg();
if (MRI->isReserved(Reg) || !MaskMO.clobbersPhysReg(Reg))
continue;
+ DEBUG(dbgs() << "MCP: Removing copy due to regmask clobbering: ";
+ (*DI)->dump());
removeCopy(*DI);
Changed = true;
++NumDeletes;
@@ -320,6 +330,9 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
}
bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
bool Changed = false;
TRI = MF.getTarget().getRegisterInfo();
diff --git a/contrib/llvm/lib/CodeGen/MachineDominanceFrontier.cpp b/contrib/llvm/lib/CodeGen/MachineDominanceFrontier.cpp
new file mode 100644
index 0000000..0bee846
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineDominanceFrontier.cpp
@@ -0,0 +1,54 @@
+//===- MachineDominanceFrontier.cpp ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineDominanceFrontier.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/Analysis/DominanceFrontierImpl.h"
+#include "llvm/CodeGen/Passes.h"
+
+
+using namespace llvm;
+
+namespace llvm {
+template class DominanceFrontierBase<MachineBasicBlock>;
+template class ForwardDominanceFrontierBase<MachineBasicBlock>;
+}
+
+
+char MachineDominanceFrontier::ID = 0;
+
+INITIALIZE_PASS_BEGIN(MachineDominanceFrontier, "machine-domfrontier",
+ "Machine Dominance Frontier Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(MachineDominanceFrontier, "machine-domfrontier",
+ "Machine Dominance Frontier Construction", true, true)
+
+MachineDominanceFrontier::MachineDominanceFrontier()
+ : MachineFunctionPass(ID),
+ Base() {
+ initializeMachineDominanceFrontierPass(*PassRegistry::getPassRegistry());
+}
+
+char &llvm::MachineDominanceFrontierID = MachineDominanceFrontier::ID;
+
+bool MachineDominanceFrontier::runOnMachineFunction(MachineFunction &) {
+ releaseMemory();
+ Base.analyze(getAnalysis<MachineDominatorTree>().getBase());
+ return false;
+}
+
+void MachineDominanceFrontier::releaseMemory() {
+ Base.releaseMemory();
+}
+
+void MachineDominanceFrontier::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index f3be318..7e9b755 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -17,7 +17,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -26,8 +25,8 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -39,6 +38,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "codegen"
+
//===----------------------------------------------------------------------===//
// MachineFunction implementation
//===----------------------------------------------------------------------===//
@@ -57,9 +58,9 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
if (TM.getRegisterInfo())
RegInfo = new (Allocator) MachineRegisterInfo(TM);
else
- RegInfo = 0;
+ RegInfo = nullptr;
- MFInfo = 0;
+ MFInfo = nullptr;
FrameInfo =
new (Allocator) MachineFrameInfo(TM,!F->hasFnAttribute("no-realign-stack"));
@@ -78,7 +79,7 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
TM.getTargetLowering()->getPrefFunctionAlignment());
FunctionNumber = FunctionNum;
- JumpTableInfo = 0;
+ JumpTableInfo = nullptr;
}
MachineFunction::~MachineFunction() {
@@ -124,6 +125,11 @@ getOrCreateJumpTableInfo(unsigned EntryKind) {
return JumpTableInfo;
}
+/// Should we be emitting segmented stack stuff for the function
+bool MachineFunction::shouldSplitStack() {
+ return getFunction()->hasFnAttribute("split-stack");
+}
+
/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
/// recomputes them. This guarantees that the MBB numbers are sequential,
/// dense, and match the ordering of the blocks within the function. If a
@@ -132,7 +138,7 @@ getOrCreateJumpTableInfo(unsigned EntryKind) {
void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
if (empty()) { MBBNumbering.clear(); return; }
MachineFunction::iterator MBBI, E = end();
- if (MBB == 0)
+ if (MBB == nullptr)
MBBI = begin();
else
MBBI = MBB;
@@ -140,7 +146,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
// Figure out the block number this should have.
unsigned BlockNo = 0;
if (MBBI != begin())
- BlockNo = prior(MBBI)->getNumber()+1;
+ BlockNo = std::prev(MBBI)->getNumber() + 1;
for (; MBBI != E; ++MBBI, ++BlockNo) {
if (MBBI->getNumber() != (int)BlockNo) {
@@ -148,7 +154,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
if (MBBI->getNumber() != -1) {
assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
"MBB number mismatch!");
- MBBNumbering[MBBI->getNumber()] = 0;
+ MBBNumbering[MBBI->getNumber()] = nullptr;
}
// If BlockNo is already taken, set that block's number to -1.
@@ -232,11 +238,17 @@ MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
+ if (MMO->getValue())
+ return new (Allocator)
+ MachineMemOperand(MachinePointerInfo(MMO->getValue(),
+ MMO->getOffset()+Offset),
+ MMO->getFlags(), Size,
+ MMO->getBaseAlignment(), nullptr);
return new (Allocator)
- MachineMemOperand(MachinePointerInfo(MMO->getValue(),
+ MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size,
- MMO->getBaseAlignment(), 0);
+ MMO->getBaseAlignment(), nullptr);
}
MachineInstr::mmo_iterator
@@ -347,15 +359,15 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
OS << PrintReg(I->first, TRI);
if (I->second)
OS << " in " << PrintReg(I->second, TRI);
- if (llvm::next(I) != E)
+ if (std::next(I) != E)
OS << ", ";
}
OS << '\n';
}
- for (const_iterator BB = begin(), E = end(); BB != E; ++BB) {
+ for (const auto &BB : *this) {
OS << '\n';
- BB->print(OS, Indexes);
+ BB.print(OS, Indexes);
}
OS << "\n# End machine code for function " << getName() << ".\n\n";
@@ -425,7 +437,16 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
MachineRegisterInfo &MRI = getRegInfo();
unsigned VReg = MRI.getLiveInVirtReg(PReg);
if (VReg) {
- assert(MRI.getRegClass(VReg) == RC && "Register class mismatch!");
+ const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
+ (void)VRegRC;
+ // A physical register can be added several times.
+ // Between two calls, the register class of the related virtual register
+ // may have been constrained to match some operation constraints.
+ // In that case, check that the current register class includes the
+ // physical register and is a sub class of the specified RC.
+ assert((VRegRC == RC || (VRegRC->contains(PReg) &&
+ RC->hasSubClassEq(VRegRC))) &&
+ "Register class mismatch!");
return VReg;
}
VReg = MRI.createVirtualRegister(RC);
@@ -436,14 +457,14 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
-MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
+MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate) const {
+ const DataLayout *DL = getTarget().getDataLayout();
assert(JumpTableInfo && "No jump tables");
assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
- const MCAsmInfo &MAI = *getTarget().getMCAsmInfo();
- const char *Prefix = isLinkerPrivate ? MAI.getLinkerPrivateGlobalPrefix() :
- MAI.getPrivateGlobalPrefix();
+ const char *Prefix = isLinkerPrivate ? DL->getLinkerPrivateGlobalPrefix() :
+ DL->getPrivateGlobalPrefix();
SmallString<60> Name;
raw_svector_ostream(Name)
<< Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
@@ -453,8 +474,8 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
/// base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
- const MCAsmInfo &MAI = *Target.getMCAsmInfo();
- return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+
+ const DataLayout *DL = getTarget().getDataLayout();
+ return Ctx.GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
Twine(getFunctionNumber())+"$pb");
}
@@ -490,14 +511,13 @@ static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
/// a nonnegative identifier to represent it.
///
int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
- bool isSS, bool MayNeedSP, const AllocaInst *Alloca) {
+ bool isSS, const AllocaInst *Alloca) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
Alignment =
clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
!RealignOption,
Alignment, getFrameLowering()->getStackAlignment());
- Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP,
- Alloca));
+ Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca));
int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
ensureMaxAlignment(Alignment);
@@ -510,11 +530,10 @@ int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
///
int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
unsigned Alignment) {
- Alignment =
- clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
- !RealignOption,
- Alignment, getFrameLowering()->getStackAlignment());
- CreateStackObject(Size, Alignment, true, false);
+ Alignment = clampStackAlignment(
+ !getFrameLowering()->isStackRealignable() || !RealignOption, Alignment,
+ getFrameLowering()->getStackAlignment());
+ CreateStackObject(Size, Alignment, true);
int Index = (int)Objects.size() - NumFixedObjects - 1;
ensureMaxAlignment(Alignment);
return Index;
@@ -528,11 +547,10 @@ int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
const AllocaInst *Alloca) {
HasVarSizedObjects = true;
- Alignment =
- clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
- !RealignOption,
- Alignment, getFrameLowering()->getStackAlignment());
- Objects.push_back(StackObject(0, Alignment, 0, false, false, true, Alloca));
+ Alignment = clampStackAlignment(
+ !getFrameLowering()->isStackRealignable() || !RealignOption, Alignment,
+ getFrameLowering()->getStackAlignment());
+ Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca));
ensureMaxAlignment(Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}
@@ -551,17 +569,30 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
// object is 16-byte aligned.
unsigned StackAlign = getFrameLowering()->getStackAlignment();
unsigned Align = MinAlign(SPOffset, StackAlign);
- Align =
- clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
- !RealignOption,
- Align, getFrameLowering()->getStackAlignment());
+ Align = clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
+ !RealignOption,
+ Align, getFrameLowering()->getStackAlignment());
Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
/*isSS*/ false,
- /*NeedSP*/ false,
- /*Alloca*/ 0));
+ /*Alloca*/ nullptr));
return -++NumFixedObjects;
}
+/// CreateFixedSpillStackObject - Create a spill slot at a fixed location
+/// on the stack. Returns an index with a negative value.
+int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
+ int64_t SPOffset) {
+ unsigned StackAlign = getFrameLowering()->getStackAlignment();
+ unsigned Align = MinAlign(SPOffset, StackAlign);
+ Align = clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
+ !RealignOption,
+ Align, getFrameLowering()->getStackAlignment());
+ Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset,
+ /*Immutable*/ true,
+ /*isSS*/ true,
+ /*Alloca*/ nullptr));
+ return -++NumFixedObjects;
+}
BitVector
MachineFrameInfo::getPristineRegs(const MachineBasicBlock *MBB) const {
@@ -577,7 +608,7 @@ MachineFrameInfo::getPristineRegs(const MachineBasicBlock *MBB) const {
if (!isCalleeSavedInfoValid())
return BV;
- for (const uint16_t *CSR = TRI->getCalleeSavedRegs(MF); CSR && *CSR; ++CSR)
+ for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(MF); CSR && *CSR; ++CSR)
BV.set(*CSR);
// The entry MBB always has all CSRs pristine.
@@ -805,6 +836,37 @@ unsigned MachineConstantPoolEntry::getRelocationInfo() const {
return Val.ConstVal->getRelocationInfo();
}
+SectionKind
+MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
+ SectionKind Kind;
+ switch (getRelocationInfo()) {
+ default:
+ llvm_unreachable("Unknown section kind");
+ case 2:
+ Kind = SectionKind::getReadOnlyWithRel();
+ break;
+ case 1:
+ Kind = SectionKind::getReadOnlyWithRelLocal();
+ break;
+ case 0:
+ switch (DL->getTypeAllocSize(getType())) {
+ case 4:
+ Kind = SectionKind::getMergeableConst4();
+ break;
+ case 8:
+ Kind = SectionKind::getMergeableConst8();
+ break;
+ case 16:
+ Kind = SectionKind::getMergeableConst16();
+ break;
+ default:
+ Kind = SectionKind::getMergeableConst();
+ break;
+ }
+ }
+ return Kind;
+}
+
MachineConstantPool::~MachineConstantPool() {
for (unsigned i = 0, e = Constants.size(); i != e; ++i)
if (Constants[i].isMachineConstantPoolEntry())
@@ -830,11 +892,10 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
return false;
-
+
// For now, only support constants with the same size.
uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
- if (StoreSize != TD->getTypeStoreSize(B->getType()) ||
- StoreSize > 128)
+ if (StoreSize != TD->getTypeStoreSize(B->getType()) || StoreSize > 128)
return false;
Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
@@ -863,7 +924,7 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
/// an existing one. User must specify the log2 of the minimum required
/// alignment for the object.
///
-unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
+unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
@@ -911,7 +972,7 @@ void MachineConstantPool::print(raw_ostream &OS) const {
if (Constants[i].isMachineConstantPoolEntry())
Constants[i].Val.MachineCPVal->print(OS);
else
- WriteAsOperand(OS, Constants[i].Val.ConstVal, /*PrintType=*/false);
+ Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
OS << ", align=" << Constants[i].getAlignment();
OS << "\n";
}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
index 35591e1..46cd60a 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
char MachineFunctionAnalysis::ID = 0;
MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm) :
- FunctionPass(ID), TM(tm), MF(0) {
+ FunctionPass(ID), TM(tm), MF(nullptr) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
@@ -53,5 +53,5 @@ bool MachineFunctionAnalysis::runOnFunction(Function &F) {
void MachineFunctionAnalysis::releaseMemory() {
delete MF;
- MF = 0;
+ MF = nullptr;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionPass.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionPass.cpp
index 674cc80..789f204 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionPass.cpp
@@ -51,6 +51,7 @@ void MachineFunctionPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved("domfrontier");
AU.addPreserved("loops");
AU.addPreserved("lda");
+ AU.addPreserved("stack-protector");
FunctionPass::getAnalysisUsage(AU);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
index fa9c821..dee3977 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
@@ -34,14 +34,14 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
MachineFunctionPrinterPass(raw_ostream &os, const std::string &banner)
: MachineFunctionPass(ID), OS(os), Banner(banner) {}
- const char *getPassName() const { return "MachineFunction Printer"; }
+ const char *getPassName() const override { return "MachineFunction Printer"; }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
- bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
OS << "# " << Banner << ":\n";
MF.print(OS, getAnalysisIfAvailable<SlotIndexes>());
return false;
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index 295b450..5122165 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -15,15 +15,14 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
@@ -129,7 +128,7 @@ void MachineOperand::ChangeToImmediate(int64_t ImmVal) {
void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
bool isKill, bool isDead, bool isUndef,
bool isDebug) {
- MachineRegisterInfo *RegInfo = 0;
+ MachineRegisterInfo *RegInfo = nullptr;
if (MachineInstr *MI = getParent())
if (MachineBasicBlock *MBB = MI->getParent())
if (MachineFunction *MF = MBB->getParent())
@@ -153,7 +152,7 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
IsEarlyClobber = false;
IsDebug = isDebug;
// Ensure isOnRegUseList() returns false.
- Contents.Reg.Prev = 0;
+ Contents.Reg.Prev = nullptr;
// Preserve the tie when the operand was already a register.
if (!WasReg)
TiedTo = 0;
@@ -199,10 +198,13 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
case MachineOperand::MO_BlockAddress:
return getBlockAddress() == Other.getBlockAddress() &&
getOffset() == Other.getOffset();
- case MO_RegisterMask:
+ case MachineOperand::MO_RegisterMask:
+ case MachineOperand::MO_RegisterLiveOut:
return getRegMask() == Other.getRegMask();
case MachineOperand::MO_MCSymbol:
return getMCSymbol() == Other.getMCSymbol();
+ case MachineOperand::MO_CFIIndex:
+ return getCFIIndex() == Other.getCFIIndex();
case MachineOperand::MO_Metadata:
return getMetadata() == Other.getMetadata();
}
@@ -241,11 +243,14 @@ hash_code llvm::hash_value(const MachineOperand &MO) {
return hash_combine(MO.getType(), MO.getTargetFlags(),
MO.getBlockAddress(), MO.getOffset());
case MachineOperand::MO_RegisterMask:
+ case MachineOperand::MO_RegisterLiveOut:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
case MachineOperand::MO_Metadata:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata());
case MachineOperand::MO_MCSymbol:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol());
+ case MachineOperand::MO_CFIIndex:
+ return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex());
}
llvm_unreachable("Invalid machine operand type");
}
@@ -260,7 +265,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
if (const MachineBasicBlock *MBB = MI->getParent())
if (const MachineFunction *MF = MBB->getParent())
TM = &MF->getTarget();
- const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : 0;
+ const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : nullptr;
switch (getType()) {
case MachineOperand::MO_Register:
@@ -312,7 +317,6 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << "tied";
if (TiedTo != 15)
OS << unsigned(TiedTo - 1);
- NeedComma = true;
}
OS << '>';
}
@@ -350,7 +354,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
break;
case MachineOperand::MO_GlobalAddress:
OS << "<ga:";
- WriteAsOperand(OS, getGlobal(), /*PrintType=*/false);
+ getGlobal()->printAsOperand(OS, /*PrintType=*/false);
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
@@ -361,21 +365,27 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
break;
case MachineOperand::MO_BlockAddress:
OS << '<';
- WriteAsOperand(OS, getBlockAddress(), /*PrintType=*/false);
+ getBlockAddress()->printAsOperand(OS, /*PrintType=*/false);
if (getOffset()) OS << "+" << getOffset();
OS << '>';
break;
case MachineOperand::MO_RegisterMask:
OS << "<regmask>";
break;
+ case MachineOperand::MO_RegisterLiveOut:
+ OS << "<regliveout>";
+ break;
case MachineOperand::MO_Metadata:
OS << '<';
- WriteAsOperand(OS, getMetadata(), /*PrintType=*/false);
+ getMetadata()->printAsOperand(OS, /*PrintType=*/false);
OS << '>';
break;
case MachineOperand::MO_MCSymbol:
OS << "<MCSym=" << *getMCSymbol() << '>';
break;
+ case MachineOperand::MO_CFIIndex:
+ OS << "<call frame instruction>";
+ break;
}
if (unsigned TF = getTargetFlags())
@@ -389,8 +399,8 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
/// getAddrSpace - Return the LLVM IR address space number that this pointer
/// points into.
unsigned MachinePointerInfo::getAddrSpace() const {
- if (V == 0) return 0;
- return cast<PointerType>(V->getType())->getAddressSpace();
+ if (V.isNull() || V.is<const PseudoSourceValue*>()) return 0;
+ return cast<PointerType>(V.get<const Value*>()->getType())->getAddressSpace();
}
/// getConstantPool - Return a MachinePointerInfo record that refers to the
@@ -424,7 +434,8 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
: PtrInfo(ptrinfo), Size(s),
Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)),
TBAAInfo(TBAAInfo), Ranges(Ranges) {
- assert((PtrInfo.V == 0 || isa<PointerType>(PtrInfo.V->getType())) &&
+ assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue*>() ||
+ isa<PointerType>(PtrInfo.V.get<const Value*>()->getType())) &&
"invalid pointer value");
assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");
@@ -435,7 +446,7 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(getOffset());
ID.AddInteger(Size);
- ID.AddPointer(getValue());
+ ID.AddPointer(getOpaqueValue());
ID.AddInteger(Flags);
}
@@ -476,10 +487,16 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
// Print the address information.
OS << "[";
- if (!MMO.getValue())
- OS << "<unknown>";
+ if (const Value *V = MMO.getValue())
+ V->printAsOperand(OS, /*PrintType=*/false);
+ else if (const PseudoSourceValue *PSV = MMO.getPseudoValue())
+ PSV->printCustom(OS);
else
- WriteAsOperand(OS, MMO.getValue(), /*PrintType=*/false);
+ OS << "<unknown>";
+
+ unsigned AS = MMO.getAddrSpace();
+ if (AS != 0)
+ OS << "(addrspace=" << AS << ')';
// If the alignment of the memory reference itself differs from the alignment
// of the base pointer, print the base alignment explicitly, next to the base
@@ -500,7 +517,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
if (const MDNode *TBAAInfo = MMO.getTBAAInfo()) {
OS << "(tbaa=";
if (TBAAInfo->getNumOperands() > 0)
- WriteAsOperand(OS, TBAAInfo->getOperand(0), /*PrintType=*/false);
+ TBAAInfo->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
else
OS << "<unknown>";
OS << ")";
@@ -531,9 +548,9 @@ void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
/// the MCInstrDesc.
MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
const DebugLoc dl, bool NoImp)
- : MCID(&tid), Parent(0), Operands(0), NumOperands(0),
+ : MCID(&tid), Parent(nullptr), Operands(nullptr), NumOperands(0),
Flags(0), AsmPrinterFlags(0),
- NumMemRefs(0), MemRefs(0), debugLoc(dl) {
+ NumMemRefs(0), MemRefs(nullptr), debugLoc(dl) {
// Reserve space for the expected number of operands.
if (unsigned NumOps = MCID->getNumOperands() +
MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
@@ -548,7 +565,7 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
- : MCID(&MI.getDesc()), Parent(0), Operands(0), NumOperands(0),
+ : MCID(&MI.getDesc()), Parent(nullptr), Operands(nullptr), NumOperands(0),
Flags(0), AsmPrinterFlags(0),
NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
debugLoc(MI.getDebugLoc()) {
@@ -569,7 +586,7 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
MachineRegisterInfo *MachineInstr::getRegInfo() {
if (MachineBasicBlock *MBB = getParent())
return &MBB->getParent()->getRegInfo();
- return 0;
+ return nullptr;
}
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
@@ -688,7 +705,7 @@ void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
// When adding a register operand, tell MRI about it.
if (NewMO->isReg()) {
// Ensure isOnRegUseList() returns false, regardless of Op's status.
- NewMO->Contents.Reg.Prev = 0;
+ NewMO->Contents.Reg.Prev = nullptr;
// Ignore existing ties. This is not a property that can be copied.
NewMO->TiedTo = 0;
// Add the new operand to MRI, but only for instructions in an MBB.
@@ -960,7 +977,7 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
if (!getOperand(OpIdx).isReg())
- return NULL;
+ return nullptr;
// For tied uses on inline asm, get the constraint from the def.
unsigned DefIdx;
@@ -970,7 +987,7 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
// Inline asm stores register class constraints in the flag word.
int FlagIdx = findInlineAsmFlagIdx(OpIdx);
if (FlagIdx < 0)
- return NULL;
+ return nullptr;
unsigned Flag = getOperand(FlagIdx).getImm();
unsigned RCID;
@@ -981,7 +998,55 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
return TRI->getPointerRegClass(MF);
- return NULL;
+ return nullptr;
+}
+
+const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
+ unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI, bool ExploreBundle) const {
+ // Check every operands inside the bundle if we have
+ // been asked to.
+ if (ExploreBundle)
+ for (ConstMIBundleOperands OpndIt(this); OpndIt.isValid() && CurRC;
+ ++OpndIt)
+ CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
+ OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
+ else
+ // Otherwise, just check the current operands.
+ for (ConstMIOperands OpndIt(this); OpndIt.isValid() && CurRC; ++OpndIt)
+ CurRC = getRegClassConstraintEffectForVRegImpl(OpndIt.getOperandNo(), Reg,
+ CurRC, TII, TRI);
+ return CurRC;
+}
+
+const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
+ unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
+ const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
+ assert(CurRC && "Invalid initial register class");
+ // Check if Reg is constrained by some of its use/def from MI.
+ const MachineOperand &MO = getOperand(OpIdx);
+ if (!MO.isReg() || MO.getReg() != Reg)
+ return CurRC;
+ // If yes, accumulate the constraints through the operand.
+ return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
+}
+
+const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
+ unsigned OpIdx, const TargetRegisterClass *CurRC,
+ const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
+ const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
+ const MachineOperand &MO = getOperand(OpIdx);
+ assert(MO.isReg() &&
+ "Cannot get register constraints for non-register operand");
+ assert(CurRC && "Invalid initial register class");
+ if (unsigned SubIdx = MO.getSubReg()) {
+ if (OpRC)
+ CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
+ else
+ CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
+ } else if (OpRC)
+ CurRC = TRI->getCommonSubClass(CurRC, OpRC);
+ return CurRC;
}
/// Return the number of instructions inside the MI bundle, not counting the
@@ -1239,8 +1304,8 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
return false;
}
- if (isLabel() || isDebugValue() ||
- isTerminator() || hasUnmodeledSideEffects())
+ if (isPosition() || isDebugValue() || isTerminator() ||
+ hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
@@ -1304,11 +1369,13 @@ bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
if ((*I)->isStore()) return false;
if ((*I)->isInvariant()) return true;
+
+ // A load from a constant PseudoSourceValue is invariant.
+ if (const PseudoSourceValue *PSV = (*I)->getPseudoValue())
+ if (PSV->isConstant(MFI))
+ continue;
+
if (const Value *V = (*I)->getValue()) {
- // A load from a constant PseudoSourceValue is invariant.
- if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
- if (PSV->isConstant(MFI))
- continue;
// If we have an AliasAnalysis, ask it whether the memory is constant.
if (AA && AA->pointsToConstantMemory(
AliasAnalysis::Location(V, (*I)->getSize(),
@@ -1372,7 +1439,7 @@ void MachineInstr::copyImplicitOps(MachineFunction &MF,
for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isImplicit())
+ if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
addOperand(MF, MO);
}
}
@@ -1386,32 +1453,14 @@ void MachineInstr::dump() const {
static void printDebugLoc(DebugLoc DL, const MachineFunction *MF,
raw_ostream &CommentOS) {
const LLVMContext &Ctx = MF->getFunction()->getContext();
- if (!DL.isUnknown()) { // Print source line info.
- DIScope Scope(DL.getScope(Ctx));
- assert((!Scope || Scope.isScope()) &&
- "Scope of a DebugLoc should be null or a DIScope.");
- // Omit the directory, because it's likely to be long and uninteresting.
- if (Scope)
- CommentOS << Scope.getFilename();
- else
- CommentOS << "<unknown>";
- CommentOS << ':' << DL.getLine();
- if (DL.getCol() != 0)
- CommentOS << ':' << DL.getCol();
- DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
- if (!InlinedAtDL.isUnknown()) {
- CommentOS << " @[ ";
- printDebugLoc(InlinedAtDL, MF, CommentOS);
- CommentOS << " ]";
- }
- }
+ DL.print(Ctx, CommentOS);
}
void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM,
bool SkipOpers) const {
// We can be a bit tidier if we know the TargetMachine and/or MachineFunction.
- const MachineFunction *MF = 0;
- const MachineRegisterInfo *MRI = 0;
+ const MachineFunction *MF = nullptr;
+ const MachineRegisterInfo *MRI = nullptr;
if (const MachineBasicBlock *MBB = getParent()) {
MF = MBB->getParent();
if (!TM && MF)
@@ -1587,7 +1636,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM,
for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
i != e; ++i) {
OS << **i;
- if (llvm::next(i) != e)
+ if (std::next(i) != e)
OS << " ";
}
}
@@ -1612,19 +1661,19 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM,
// Print debug location information.
if (isDebugValue() && getOperand(e - 1).isMetadata()) {
- if (!HaveSemi) OS << ";"; HaveSemi = true;
+ if (!HaveSemi) OS << ";";
DIVariable DV(getOperand(e - 1).getMetadata());
OS << " line no:" << DV.getLineNumber();
if (MDNode *InlinedAt = DV.getInlinedAt()) {
DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt);
- if (!InlinedAtDL.isUnknown()) {
+ if (!InlinedAtDL.isUnknown() && MF) {
OS << " inlined @[ ";
printDebugLoc(InlinedAtDL, MF, OS);
OS << " ]";
}
}
} else if (!debugLoc.isUnknown() && MF) {
- if (!HaveSemi) OS << ";"; HaveSemi = true;
+ if (!HaveSemi) OS << ";";
OS << " dbg:";
printDebugLoc(debugLoc, MF, OS);
}
@@ -1694,7 +1743,7 @@ bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
void MachineInstr::clearRegisterKills(unsigned Reg,
const TargetRegisterInfo *RegInfo) {
if (!TargetRegisterInfo::isPhysicalRegister(Reg))
- RegInfo = 0;
+ RegInfo = nullptr;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
@@ -1827,7 +1876,7 @@ MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
void MachineInstr::emitError(StringRef Msg) const {
// Find the source location cookie.
unsigned LocCookie = 0;
- const MDNode *LocMD = 0;
+ const MDNode *LocMD = nullptr;
for (unsigned i = getNumOperands(); i != 0; --i) {
if (getOperand(i-1).isMetadata() &&
(LocMD = getOperand(i-1).getMetadata()) &&
diff --git a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
index 77bcd1d..962169e 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
@@ -26,7 +26,7 @@ namespace {
initializeUnpackMachineBundlesPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
};
} // end anonymous namespace
@@ -77,7 +77,7 @@ namespace {
initializeFinalizeMachineBundlesPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
};
} // end anonymous namespace
@@ -211,7 +211,7 @@ MachineBasicBlock::instr_iterator
llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI) {
MachineBasicBlock::instr_iterator E = MBB.instr_end();
- MachineBasicBlock::instr_iterator LastMI = llvm::next(FirstMI);
+ MachineBasicBlock::instr_iterator LastMI = std::next(FirstMI);
while (LastMI != E && LastMI->isInsideBundle())
++LastMI;
finalizeBundle(MBB, FirstMI, LastMI);
@@ -235,7 +235,7 @@ bool llvm::finalizeBundles(MachineFunction &MF) {
if (!MII->isInsideBundle())
++MII;
else {
- MII = finalizeBundle(MBB, llvm::prior(MII));
+ MII = finalizeBundle(MBB, std::prev(MII));
Changed = true;
}
}
diff --git a/contrib/llvm/lib/CodeGen/MachineLICM.cpp b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
index 104eacd..68d2efd 100644
--- a/contrib/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
@@ -20,7 +20,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "machine-licm"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
@@ -42,6 +41,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "machine-licm"
+
static cl::opt<bool>
AvoidSpeculation("avoid-speculation",
cl::desc("MachineLICM should avoid speculation"),
@@ -125,9 +126,9 @@ namespace {
initializeMachineLICMPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineLoopInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<AliasAnalysis>();
@@ -136,7 +137,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual void releaseMemory() {
+ void releaseMemory() override {
RegSeen.clear();
RegPressure.clear();
RegLimit.clear();
@@ -319,6 +320,9 @@ static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
}
bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
Changed = FirstInLoop = false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
@@ -355,7 +359,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
while (!Worklist.empty()) {
CurLoop = Worklist.pop_back_val();
- CurPreheader = 0;
+ CurPreheader = nullptr;
ExitBlocks.clear();
// If this is done before regalloc, only visit outer-most preheader-sporting
@@ -387,10 +391,10 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
oe = MI->memoperands_end(); o != oe; ++o) {
- if (!(*o)->isStore() || !(*o)->getValue())
+ if (!(*o)->isStore() || !(*o)->getPseudoValue())
continue;
if (const FixedStackPseudoSourceValue *Value =
- dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
+ dyn_cast<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) {
if (Value->getFrameIndex() == FI)
return true;
}
@@ -697,7 +701,7 @@ void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
WorkList.push_back(HeaderN);
do {
MachineDomTreeNode *Node = WorkList.pop_back_val();
- assert(Node != 0 && "Null dominator tree node?");
+ assert(Node && "Null dominator tree node?");
MachineBasicBlock *BB = Node->getBlock();
// If the header of the loop containing this basic block is a landing pad,
@@ -801,7 +805,7 @@ void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
// defs as well. This happens whenever the preheader is created by splitting
// the critical edge from the loop predecessor to the loop header.
if (BB->pred_size() == 1) {
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
InitRegPressure(*BB->pred_begin());
@@ -879,10 +883,9 @@ static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
assert (MI.mayLoad() && "Expected MI that loads!");
for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
E = MI.memoperands_end(); I != E; ++I) {
- if (const Value *V = (*I)->getValue()) {
- if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
- if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
- return true;
+ if (const PseudoSourceValue *PSV = (*I)->getPseudoValue()) {
+ if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
+ return true;
}
}
return false;
@@ -978,25 +981,23 @@ bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
unsigned Reg = MO->getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
- UE = MRI->use_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
+ for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
// A PHI may cause a copy to be inserted.
- if (UseMI->isPHI()) {
+ if (UseMI.isPHI()) {
// A PHI inside the loop causes a copy because the live range of Reg is
// extended across the PHI.
- if (CurLoop->contains(UseMI))
+ if (CurLoop->contains(&UseMI))
return true;
// A PHI in an exit block can cause a copy to be inserted if the PHI
// has multiple predecessors in the loop with different values.
// For now, approximate by rejecting all exit blocks.
- if (isExitBlock(UseMI->getParent()))
+ if (isExitBlock(UseMI.getParent()))
return true;
continue;
}
// Look past copies as well.
- if (UseMI->isCopy() && CurLoop->contains(UseMI))
- Work.push_back(UseMI);
+ if (UseMI.isCopy() && CurLoop->contains(&UseMI))
+ Work.push_back(&UseMI);
}
}
} while (!Work.empty());
@@ -1011,22 +1012,20 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
return false;
- for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
- E = MRI->use_nodbg_end(); I != E; ++I) {
- MachineInstr *UseMI = &*I;
- if (UseMI->isCopyLike())
+ for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
+ if (UseMI.isCopyLike())
continue;
- if (!CurLoop->contains(UseMI->getParent()))
+ if (!CurLoop->contains(UseMI.getParent()))
continue;
- for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = UseMI->getOperand(i);
+ for (unsigned i = 0, e = UseMI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = UseMI.getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
if (MOReg != Reg)
continue;
- if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
+ if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, &UseMI, i))
return true;
}
@@ -1242,13 +1241,13 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
// Don't unfold simple loads.
if (MI->canFoldAsLoad())
- return 0;
+ return nullptr;
// If not, we may be able to unfold a load and hoist that.
// First test whether the instruction is loading from an amenable
// memory location.
if (!MI->isInvariantLoad(AA))
- return 0;
+ return nullptr;
// Next determine the register class for a temporary register.
unsigned LoadRegIndex;
@@ -1257,9 +1256,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false,
&LoadRegIndex);
- if (NewOpc == 0) return 0;
+ if (NewOpc == 0) return nullptr;
const MCInstrDesc &MID = TII->get(NewOpc);
- if (MID.getNumDefs() != 1) return 0;
+ if (MID.getNumDefs() != 1) return nullptr;
MachineFunction &MF = *MI->getParent()->getParent();
const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
// Ok, we're unfolding. Create a temporary register and do the unfold.
@@ -1285,7 +1284,7 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
NewMIs[0]->eraseFromParent();
NewMIs[1]->eraseFromParent();
- return 0;
+ return nullptr;
}
// Update register pressure for the unfolded instruction.
@@ -1317,10 +1316,10 @@ MachineLICM::LookForDuplicate(const MachineInstr *MI,
std::vector<const MachineInstr*> &PrevMIs) {
for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
const MachineInstr *PrevMI = PrevMIs[i];
- if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
+ if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : nullptr)))
return PrevMI;
}
- return 0;
+ return nullptr;
}
bool MachineLICM::EliminateCSE(MachineInstr *MI,
@@ -1391,7 +1390,7 @@ bool MachineLICM::MayCSE(MachineInstr *MI) {
if (CI == CSEMap.end() || MI->isImplicitDef())
return false;
- return LookForDuplicate(MI, CI->second) != 0;
+ return LookForDuplicate(MI, CI->second) != nullptr;
}
/// Hoist - When an instruction is found to use only loop invariant operands
@@ -1467,7 +1466,7 @@ MachineBasicBlock *MachineLICM::getCurPreheader() {
// If we've tried to get a preheader and failed, don't try again.
if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
- return 0;
+ return nullptr;
if (!CurPreheader) {
CurPreheader = CurLoop->getLoopPreheader();
@@ -1475,13 +1474,13 @@ MachineBasicBlock *MachineLICM::getCurPreheader() {
MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
if (!Pred) {
CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
- return 0;
+ return nullptr;
}
CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
if (!CurPreheader) {
CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
- return 0;
+ return nullptr;
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
index 4e2cfdc..89054d4 100644
--- a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
@@ -50,11 +50,11 @@ MachineBasicBlock *MachineLoop::getTopBlock() {
MachineBasicBlock *TopMBB = getHeader();
MachineFunction::iterator Begin = TopMBB->getParent()->begin();
if (TopMBB != Begin) {
- MachineBasicBlock *PriorMBB = prior(MachineFunction::iterator(TopMBB));
+ MachineBasicBlock *PriorMBB = std::prev(MachineFunction::iterator(TopMBB));
while (contains(PriorMBB)) {
TopMBB = PriorMBB;
if (TopMBB == Begin) break;
- PriorMBB = prior(MachineFunction::iterator(TopMBB));
+ PriorMBB = std::prev(MachineFunction::iterator(TopMBB));
}
}
return TopMBB;
@@ -63,12 +63,12 @@ MachineBasicBlock *MachineLoop::getTopBlock() {
MachineBasicBlock *MachineLoop::getBottomBlock() {
MachineBasicBlock *BotMBB = getHeader();
MachineFunction::iterator End = BotMBB->getParent()->end();
- if (BotMBB != prior(End)) {
- MachineBasicBlock *NextMBB = llvm::next(MachineFunction::iterator(BotMBB));
+ if (BotMBB != std::prev(End)) {
+ MachineBasicBlock *NextMBB = std::next(MachineFunction::iterator(BotMBB));
while (contains(NextMBB)) {
BotMBB = NextMBB;
- if (BotMBB == llvm::next(MachineFunction::iterator(BotMBB))) break;
- NextMBB = llvm::next(MachineFunction::iterator(BotMBB));
+ if (BotMBB == std::next(MachineFunction::iterator(BotMBB))) break;
+ NextMBB = std::next(MachineFunction::iterator(BotMBB));
}
}
return BotMBB;
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
index bb54284..4976e35 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -36,8 +36,8 @@ namespace llvm {
class MMIAddrLabelMapCallbackPtr : CallbackVH {
MMIAddrLabelMap *Map;
public:
- MMIAddrLabelMapCallbackPtr() : Map(0) {}
- MMIAddrLabelMapCallbackPtr(Value *V) : CallbackVH(V), Map(0) {}
+ MMIAddrLabelMapCallbackPtr() : Map(nullptr) {}
+ MMIAddrLabelMapCallbackPtr(Value *V) : CallbackVH(V), Map(nullptr) {}
void setPtr(BasicBlock *BB) {
ValueHandleBase::operator=(BB);
@@ -45,8 +45,8 @@ public:
void setMap(MMIAddrLabelMap *map) { Map = map; }
- virtual void deleted();
- virtual void allUsesReplacedWith(Value *V2);
+ void deleted() override;
+ void allUsesReplacedWith(Value *V2) override;
};
class MMIAddrLabelMap {
@@ -163,9 +163,9 @@ void MMIAddrLabelMap::UpdateForDeletedBlock(BasicBlock *BB) {
AddrLabelSymEntry Entry = AddrLabelSymbols[BB];
AddrLabelSymbols.erase(BB);
assert(!Entry.Symbols.isNull() && "Didn't have a symbol, why a callback?");
- BBCallbacks[Entry.Index] = 0; // Clear the callback.
+ BBCallbacks[Entry.Index] = nullptr; // Clear the callback.
- assert((BB->getParent() == 0 || BB->getParent() == Entry.Fn) &&
+ assert((BB->getParent() == nullptr || BB->getParent() == Entry.Fn) &&
"Block/parent mismatch");
// Handle both the single and the multiple symbols cases.
@@ -213,7 +213,7 @@ void MMIAddrLabelMap::UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New) {
return;
}
- BBCallbacks[OldEntry.Index] = 0; // Update the callback.
+ BBCallbacks[OldEntry.Index] = nullptr; // Update the callback.
// Otherwise, we need to add the old symbol to the new block's set. If it is
// just a single entry, upgrade it to a symbol list.
@@ -253,12 +253,12 @@ void MMIAddrLabelMapCallbackPtr::allUsesReplacedWith(Value *V2) {
MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI,
const MCRegisterInfo &MRI,
const MCObjectFileInfo *MOFI)
- : ImmutablePass(ID), Context(&MAI, &MRI, MOFI, 0, false) {
+ : ImmutablePass(ID), Context(&MAI, &MRI, MOFI, nullptr, false) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
MachineModuleInfo::MachineModuleInfo()
- : ImmutablePass(ID), Context(0, 0, 0) {
+ : ImmutablePass(ID), Context(nullptr, nullptr, nullptr) {
llvm_unreachable("This MachineModuleInfo constructor should never be called, "
"MMI should always be explicitly constructed by "
"LLVMTargetMachine");
@@ -269,16 +269,16 @@ MachineModuleInfo::~MachineModuleInfo() {
bool MachineModuleInfo::doInitialization(Module &M) {
- ObjFileMMI = 0;
+ ObjFileMMI = nullptr;
CompactUnwindEncoding = 0;
CurCallSite = 0;
CallsEHReturn = 0;
CallsUnwindInit = 0;
DbgInfoAvailable = UsesVAFloatArgument = false;
// Always emit some info, by default "no personality" info.
- Personalities.push_back(NULL);
- AddrLabelSymbols = 0;
- TheModule = 0;
+ Personalities.push_back(nullptr);
+ AddrLabelSymbols = nullptr;
+ TheModule = nullptr;
return false;
}
@@ -288,12 +288,12 @@ bool MachineModuleInfo::doFinalization(Module &M) {
Personalities.clear();
delete AddrLabelSymbols;
- AddrLabelSymbols = 0;
+ AddrLabelSymbols = nullptr;
Context.reset();
delete ObjFileMMI;
- ObjFileMMI = 0;
+ ObjFileMMI = nullptr;
return false;
}
@@ -313,7 +313,7 @@ void MachineModuleInfo::EndFunction() {
CallsEHReturn = 0;
CallsUnwindInit = 0;
CompactUnwindEncoding = 0;
- VariableDbgInfo.clear();
+ VariableDbgInfos.clear();
}
/// AnalyzeModule - Scan the module for global debug information.
@@ -341,7 +341,7 @@ void MachineModuleInfo::AnalyzeModule(const Module &M) {
/// because the block may be accessed outside its containing function.
MCSymbol *MachineModuleInfo::getAddrLabelSymbol(const BasicBlock *BB) {
// Lazily create AddrLabelSymbols.
- if (AddrLabelSymbols == 0)
+ if (!AddrLabelSymbols)
AddrLabelSymbols = new MMIAddrLabelMap(Context);
return AddrLabelSymbols->getAddrLabelSymbol(const_cast<BasicBlock*>(BB));
}
@@ -352,7 +352,7 @@ MCSymbol *MachineModuleInfo::getAddrLabelSymbol(const BasicBlock *BB) {
std::vector<MCSymbol*> MachineModuleInfo::
getAddrLabelSymbolToEmit(const BasicBlock *BB) {
// Lazily create AddrLabelSymbols.
- if (AddrLabelSymbols == 0)
+ if (!AddrLabelSymbols)
AddrLabelSymbols = new MMIAddrLabelMap(Context);
return AddrLabelSymbols->getAddrLabelSymbolToEmit(const_cast<BasicBlock*>(BB));
}
@@ -366,7 +366,7 @@ void MachineModuleInfo::
takeDeletedSymbolsForFunction(const Function *F,
std::vector<MCSymbol*> &Result) {
// If no blocks have had their addresses taken, we're done.
- if (AddrLabelSymbols == 0) return;
+ if (!AddrLabelSymbols) return;
return AddrLabelSymbols->
takeDeletedSymbolsForFunction(const_cast<Function*>(F), Result);
}
@@ -419,7 +419,7 @@ void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
// If this is the first personality we're adding go
// ahead and add it at the beginning.
- if (Personalities[0] == NULL)
+ if (!Personalities[0])
Personalities[0] = Personality;
else
Personalities.push_back(Personality);
@@ -462,7 +462,7 @@ void MachineModuleInfo::TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {
if (LandingPad.LandingPadLabel &&
!LandingPad.LandingPadLabel->isDefined() &&
(!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
- LandingPad.LandingPadLabel = 0;
+ LandingPad.LandingPadLabel = nullptr;
// Special case: we *should* emit LPs with null LP MBB. This indicates
// "nounwind" case.
@@ -550,13 +550,13 @@ try_next:;
const Function *MachineModuleInfo::getPersonality() const {
// FIXME: Until PR1414 will be fixed, we're using 1 personality function per
// function
- return !LandingPads.empty() ? LandingPads[0].Personality : NULL;
+ return !LandingPads.empty() ? LandingPads[0].Personality : nullptr;
}
/// getPersonalityIndex - Return unique index for current personality
/// function. NULL/first personality function should always get zero index.
unsigned MachineModuleInfo::getPersonalityIndex() const {
- const Function* Personality = NULL;
+ const Function* Personality = nullptr;
// Scan landing pads. If there is at least one non-NULL personality - use it.
for (unsigned i = 0, e = LandingPads.size(); i != e; ++i)
diff --git a/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp b/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
index cb204fd..3ee3e40 100644
--- a/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
+++ b/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
@@ -20,7 +20,7 @@ void MachinePassRegistryListener::anchor() { }
/// setDefault - Set the default constructor by name.
void MachinePassRegistry::setDefault(StringRef Name) {
- MachinePassCtor Ctor = 0;
+ MachinePassCtor Ctor = nullptr;
for(MachinePassRegistryNode *R = getList(); R; R = R->getNext()) {
if (R->getName() == Name) {
Ctor = R->getCtor();
diff --git a/contrib/llvm/lib/CodeGen/MachineRegionInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegionInfo.cpp
new file mode 100644
index 0000000..c6b6802
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineRegionInfo.cpp
@@ -0,0 +1,138 @@
+
+#include "llvm/CodeGen/MachineRegionInfo.h"
+#include "llvm/CodeGen/MachinePostDominators.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/RegionInfoImpl.h"
+
+using namespace llvm;
+
+STATISTIC(numMachineRegions, "The # of machine regions");
+STATISTIC(numMachineSimpleRegions, "The # of simple machine regions");
+
+namespace llvm {
+template class RegionBase<RegionTraits<MachineFunction>>;
+template class RegionNodeBase<RegionTraits<MachineFunction>>;
+template class RegionInfoBase<RegionTraits<MachineFunction>>;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineRegion implementation
+//
+
+MachineRegion::MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
+ MachineRegionInfo* RI,
+ MachineDominatorTree *DT, MachineRegion *Parent) :
+ RegionBase<RegionTraits<MachineFunction>>(Entry, Exit, RI, DT, Parent) {
+
+}
+
+MachineRegion::~MachineRegion() { }
+
+//===----------------------------------------------------------------------===//
+// MachineRegionInfo implementation
+//
+
+MachineRegionInfo::MachineRegionInfo() :
+ RegionInfoBase<RegionTraits<MachineFunction>>() {
+
+}
+
+MachineRegionInfo::~MachineRegionInfo() {
+
+}
+
+void MachineRegionInfo::updateStatistics(MachineRegion *R) {
+ ++numMachineRegions;
+
+ // TODO: Slow. Should only be enabled if -stats is used.
+ if (R->isSimple())
+ ++numMachineSimpleRegions;
+}
+
+void MachineRegionInfo::recalculate(MachineFunction &F,
+ MachineDominatorTree *DT_,
+ MachinePostDominatorTree *PDT_,
+ MachineDominanceFrontier *DF_) {
+ DT = DT_;
+ PDT = PDT_;
+ DF = DF_;
+
+ MachineBasicBlock *Entry = GraphTraits<MachineFunction*>::getEntryNode(&F);
+
+ TopLevelRegion = new MachineRegion(Entry, nullptr, this, DT, nullptr);
+ updateStatistics(TopLevelRegion);
+ calculate(F);
+}
+
+//===----------------------------------------------------------------------===//
+// MachineRegionInfoPass implementation
+//
+
+MachineRegionInfoPass::MachineRegionInfoPass() : MachineFunctionPass(ID) {
+ initializeMachineRegionInfoPassPass(*PassRegistry::getPassRegistry());
+}
+
+MachineRegionInfoPass::~MachineRegionInfoPass() {
+
+}
+
+bool MachineRegionInfoPass::runOnMachineFunction(MachineFunction &F) {
+ releaseMemory();
+
+ auto DT = &getAnalysis<MachineDominatorTree>();
+ auto PDT = &getAnalysis<MachinePostDominatorTree>();
+ auto DF = &getAnalysis<MachineDominanceFrontier>();
+
+ RI.recalculate(F, DT, PDT, DF);
+ return false;
+}
+
+void MachineRegionInfoPass::releaseMemory() {
+ RI.releaseMemory();
+}
+
+void MachineRegionInfoPass::verifyAnalysis() const {
+ // Only do verification when user wants to, otherwise this expensive check
+ // will be invoked by PMDataManager::verifyPreservedAnalysis when
+ // a regionpass (marked PreservedAll) finish.
+ if (MachineRegionInfo::VerifyRegionInfo)
+ RI.verifyAnalysis();
+}
+
+void MachineRegionInfoPass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<DominatorTreeWrapperPass>();
+ AU.addRequired<PostDominatorTree>();
+ AU.addRequired<DominanceFrontier>();
+}
+
+void MachineRegionInfoPass::print(raw_ostream &OS, const Module *) const {
+ RI.print(OS);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void MachineRegionInfoPass::dump() const {
+ RI.dump();
+}
+#endif
+
+char MachineRegionInfoPass::ID = 0;
+
+INITIALIZE_PASS_BEGIN(MachineRegionInfoPass, "regions",
+ "Detect single entry single exit regions", true, true)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
+INITIALIZE_PASS_END(MachineRegionInfoPass, "regions",
+ "Detect single entry single exit regions", true, true)
+
+// Create methods available outside of this file, to use them
+// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
+// the link time optimization.
+
+namespace llvm {
+ FunctionPass *createMachineRegionInfoPass() {
+ return new MachineRegionInfoPass();
+ }
+}
+
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index f8b8796..f560259 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -13,9 +13,9 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/raw_os_ostream.h"
using namespace llvm;
@@ -23,7 +23,7 @@ using namespace llvm;
void MachineRegisterInfo::Delegate::anchor() {}
MachineRegisterInfo::MachineRegisterInfo(const TargetMachine &TM)
- : TM(TM), TheDelegate(0), IsSSA(true), TracksLiveness(true) {
+ : TM(TM), TheDelegate(nullptr), IsSSA(true), TracksLiveness(true) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
UsedRegUnits.resize(getTargetRegisterInfo()->getNumRegUnits());
@@ -60,7 +60,7 @@ MachineRegisterInfo::constrainRegClass(unsigned Reg,
if (!NewRC || NewRC == OldRC)
return NewRC;
if (NewRC->getNumRegs() < MinNumRegs)
- return 0;
+ return nullptr;
setRegClass(Reg, NewRC);
return NewRC;
}
@@ -77,19 +77,12 @@ MachineRegisterInfo::recomputeRegClass(unsigned Reg, const TargetMachine &TM) {
return false;
// Accumulate constraints from all uses.
- for (reg_nodbg_iterator I = reg_nodbg_begin(Reg), E = reg_nodbg_end(); I != E;
- ++I) {
- const TargetRegisterClass *OpRC =
- I->getRegClassConstraint(I.getOperandNo(), TII,
- getTargetRegisterInfo());
- if (unsigned SubIdx = I.getOperand().getSubReg()) {
- if (OpRC)
- NewRC = getTargetRegisterInfo()->getMatchingSuperRegClass(NewRC, OpRC,
- SubIdx);
- else
- NewRC = getTargetRegisterInfo()->getSubClassWithSubReg(NewRC, SubIdx);
- } else if (OpRC)
- NewRC = getTargetRegisterInfo()->getCommonSubClass(NewRC, OpRC);
+ for (MachineOperand &MO : reg_nodbg_operands(Reg)) {
+ // Apply the effect of the given operand to NewRC.
+ MachineInstr *MI = MO.getParent();
+ unsigned OpNo = &MO - &MI->getOperand(0);
+ NewRC = MI->getRegClassConstraintEffect(OpNo, NewRC, TII,
+ getTargetRegisterInfo());
if (!NewRC || NewRC == OldRC)
return false;
}
@@ -133,8 +126,8 @@ void MachineRegisterInfo::clearVirtRegs() {
void MachineRegisterInfo::verifyUseList(unsigned Reg) const {
#ifndef NDEBUG
bool Valid = true;
- for (reg_iterator I = reg_begin(Reg), E = reg_end(); I != E; ++I) {
- MachineOperand *MO = &I.getOperand();
+ for (MachineOperand &M : reg_operands(Reg)) {
+ MachineOperand *MO = &M;
MachineInstr *MI = MO->getParent();
if (!MI) {
errs() << PrintReg(Reg, getTargetRegisterInfo())
@@ -189,7 +182,7 @@ void MachineRegisterInfo::addRegOperandToUseList(MachineOperand *MO) {
// Head is NULL for an empty list.
if (!Head) {
MO->Contents.Reg.Prev = MO;
- MO->Contents.Reg.Next = 0;
+ MO->Contents.Reg.Next = nullptr;
HeadRef = MO;
return;
}
@@ -210,7 +203,7 @@ void MachineRegisterInfo::addRegOperandToUseList(MachineOperand *MO) {
HeadRef = MO;
} else {
// Insert use at the end.
- MO->Contents.Reg.Next = 0;
+ MO->Contents.Reg.Next = nullptr;
Last->Contents.Reg.Next = MO;
}
}
@@ -234,8 +227,8 @@ void MachineRegisterInfo::removeRegOperandFromUseList(MachineOperand *MO) {
(Next ? Next : Head)->Contents.Reg.Prev = Prev;
- MO->Contents.Reg.Prev = 0;
- MO->Contents.Reg.Next = 0;
+ MO->Contents.Reg.Prev = nullptr;
+ MO->Contents.Reg.Next = nullptr;
}
/// Move NumOps operands from Src to Dst, updating use-def lists as needed.
@@ -295,7 +288,7 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
// TODO: This could be more efficient by bulk changing the operands.
for (reg_iterator I = reg_begin(FromReg), E = reg_end(); I != E; ) {
- MachineOperand &O = I.getOperand();
+ MachineOperand &O = *I;
++I;
O.setReg(ToReg);
}
@@ -307,20 +300,20 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
/// form, so there should only be one definition.
MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
// Since we are in SSA form, we can use the first definition.
- def_iterator I = def_begin(Reg);
- assert((I.atEnd() || llvm::next(I) == def_end()) &&
+ def_instr_iterator I = def_instr_begin(Reg);
+ assert((I.atEnd() || std::next(I) == def_instr_end()) &&
"getVRegDef assumes a single definition or no definition");
- return !I.atEnd() ? &*I : 0;
+ return !I.atEnd() ? &*I : nullptr;
}
/// getUniqueVRegDef - Return the unique machine instr that defines the
/// specified virtual register or null if none is found. If there are
/// multiple definitions or no definition, return null.
MachineInstr *MachineRegisterInfo::getUniqueVRegDef(unsigned Reg) const {
- if (def_empty(Reg)) return 0;
- def_iterator I = def_begin(Reg);
- if (llvm::next(I) != def_end())
- return 0;
+ if (def_empty(Reg)) return nullptr;
+ def_instr_iterator I = def_instr_begin(Reg);
+ if (std::next(I) != def_instr_end())
+ return nullptr;
return &*I;
}
@@ -336,8 +329,8 @@ bool MachineRegisterInfo::hasOneNonDBGUse(unsigned RegNo) const {
/// optimization passes which extend register lifetimes and need only
/// preserve conservative kill flag information.
void MachineRegisterInfo::clearKillFlags(unsigned Reg) const {
- for (use_iterator UI = use_begin(Reg), UE = use_end(); UI != UE; ++UI)
- UI.getOperand().setIsKill(false);
+ for (MachineOperand &MO : use_operands(Reg))
+ MO.setIsKill(false);
}
bool MachineRegisterInfo::isLiveIn(unsigned Reg) const {
@@ -399,8 +392,8 @@ MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
#ifndef NDEBUG
void MachineRegisterInfo::dumpUses(unsigned Reg) const {
- for (use_iterator I = use_begin(Reg), E = use_end(); I != E; ++I)
- I.getOperand().getParent()->dump();
+ for (MachineInstr &I : use_instructions(Reg))
+ I.dump();
}
#endif
@@ -422,3 +415,18 @@ bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg,
return false;
return true;
}
+
+/// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
+/// specified register as undefined which causes the DBG_VALUE to be
+/// deleted during LiveDebugVariables analysis.
+void MachineRegisterInfo::markUsesInDebugValueAsUndef(unsigned Reg) const {
+ // Mark any DBG_VALUE that uses Reg as undef (but don't delete it.)
+ MachineRegisterInfo::use_instr_iterator nextI;
+ for (use_instr_iterator I = use_instr_begin(Reg), E = use_instr_end();
+ I != E; I = nextI) {
+ nextI = std::next(I); // I is invalidated by the setReg
+ MachineInstr *UseMI = &*I;
+ if (UseMI->isDebugValue())
+ UseMI->getOperand(0).setReg(0U);
+ }
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp b/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
index 17f0af8..d9173a2 100644
--- a/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
@@ -29,6 +29,8 @@
#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
using namespace llvm;
+#define DEBUG_TYPE "machine-ssaupdater"
+
typedef DenseMap<MachineBasicBlock*, unsigned> AvailableValsTy;
static AvailableValsTy &getAvailableVals(void *AV) {
return *static_cast<AvailableValsTy*>(AV);
@@ -36,7 +38,7 @@ static AvailableValsTy &getAvailableVals(void *AV) {
MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
SmallVectorImpl<MachineInstr*> *NewPHI)
- : AV(0), InsertedPHIs(NewPHI) {
+ : AV(nullptr), InsertedPHIs(NewPHI) {
TII = MF.getTarget().getInstrInfo();
MRI = &MF.getRegInfo();
}
@@ -48,7 +50,7 @@ MachineSSAUpdater::~MachineSSAUpdater() {
/// Initialize - Reset this object to get ready for a new set of SSA
/// updates. ProtoValue is the value used to name PHI nodes.
void MachineSSAUpdater::Initialize(unsigned V) {
- if (AV == 0)
+ if (!AV)
AV = new AvailableValsTy();
else
getAvailableVals(AV).clear();
@@ -230,16 +232,6 @@ void MachineSSAUpdater::RewriteUse(MachineOperand &U) {
U.setReg(NewVR);
}
-void MachineSSAUpdater::ReplaceRegWith(unsigned OldReg, unsigned NewReg) {
- MRI->replaceRegWith(OldReg, NewReg);
-
- AvailableValsTy &AvailableVals = getAvailableVals(AV);
- for (DenseMap<MachineBasicBlock*, unsigned>::iterator
- I = AvailableVals.begin(), E = AvailableVals.end(); I != E; ++I)
- if (I->second == OldReg)
- I->second = NewReg;
-}
-
/// SSAUpdaterTraits<MachineSSAUpdater> - Traits for the SSAUpdaterImpl
/// template, specialized for MachineSSAUpdater.
namespace llvm {
@@ -323,7 +315,7 @@ public:
static MachineInstr *InstrIsPHI(MachineInstr *I) {
if (I && I->isPHI())
return I;
- return 0;
+ return nullptr;
}
/// ValueIsPHI - Check if the instruction that defines the specified register
@@ -338,7 +330,7 @@ public:
MachineInstr *PHI = ValueIsPHI(Val, Updater);
if (PHI && PHI->getNumOperands() <= 1)
return PHI;
- return 0;
+ return nullptr;
}
/// GetPHIValue - For the specified PHI instruction, return the register
diff --git a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
index e71c4df..44191f7 100644
--- a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -12,10 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "misched"
-
#include "llvm/CodeGen/MachineScheduler.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -36,6 +33,8 @@
using namespace llvm;
+#define DEBUG_TYPE "misched"
+
namespace llvm {
cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
cl::desc("Force top-down list scheduling"));
@@ -49,6 +48,11 @@ static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
+
+static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
+ cl::desc("Only schedule this function"));
+static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
+ cl::desc("Only schedule this MBB#"));
#else
static bool ViewMISchedDAGs = false;
#endif // NDEBUG
@@ -81,7 +85,7 @@ void ScheduleDAGMutation::anchor() {}
//===----------------------------------------------------------------------===//
MachineSchedContext::MachineSchedContext():
- MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
+ MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
RegClassInfo = new RegisterClassInfo();
}
@@ -90,24 +94,46 @@ MachineSchedContext::~MachineSchedContext() {
}
namespace {
+/// Base class for a machine scheduler class that can run at any point.
+class MachineSchedulerBase : public MachineSchedContext,
+ public MachineFunctionPass {
+public:
+ MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
+
+ void print(raw_ostream &O, const Module* = nullptr) const override;
+
+protected:
+ void scheduleRegions(ScheduleDAGInstrs &Scheduler);
+};
+
/// MachineScheduler runs after coalescing and before register allocation.
-class MachineScheduler : public MachineSchedContext,
- public MachineFunctionPass {
+class MachineScheduler : public MachineSchedulerBase {
public:
MachineScheduler();
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnMachineFunction(MachineFunction&) override;
+
+ static char ID; // Class identification, replacement for typeinfo
+
+protected:
+ ScheduleDAGInstrs *createMachineScheduler();
+};
- virtual void releaseMemory() {}
+/// PostMachineScheduler runs after shortly before code emission.
+class PostMachineScheduler : public MachineSchedulerBase {
+public:
+ PostMachineScheduler();
- virtual bool runOnMachineFunction(MachineFunction&);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- virtual void print(raw_ostream &O, const Module* = 0) const;
+ bool runOnMachineFunction(MachineFunction&) override;
static char ID; // Class identification, replacement for typeinfo
protected:
- ScheduleDAGInstrs *createMachineScheduler();
+ ScheduleDAGInstrs *createPostMachineScheduler();
};
} // namespace
@@ -124,7 +150,7 @@ INITIALIZE_PASS_END(MachineScheduler, "misched",
"Machine Instruction Scheduler", false, false)
MachineScheduler::MachineScheduler()
-: MachineFunctionPass(ID) {
+: MachineSchedulerBase(ID) {
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
}
@@ -141,12 +167,32 @@ void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
+char PostMachineScheduler::ID = 0;
+
+char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
+
+INITIALIZE_PASS(PostMachineScheduler, "postmisched",
+ "PostRA Machine Instruction Scheduler", false, false)
+
+PostMachineScheduler::PostMachineScheduler()
+: MachineSchedulerBase(ID) {
+ initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
+}
+
+void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequiredID(MachineDominatorsID);
+ AU.addRequired<MachineLoopInfo>();
+ AU.addRequired<TargetPassConfig>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
MachinePassRegistry MachineSchedRegistry::Registry;
/// A dummy default scheduler factory indicates whether the scheduler
/// is overridden on the command line.
static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
- return 0;
+ return nullptr;
}
/// MachineSchedOpt allows command line selection of the scheduler.
@@ -162,8 +208,8 @@ DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
/// Forward declare the standard machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
-static ScheduleDAGInstrs *createGenericSched(MachineSchedContext *C);
-
+static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
+static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
/// Decrement this iterator until reaching the top or a non-debug instr.
static MachineBasicBlock::const_iterator
@@ -222,7 +268,20 @@ ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
return Scheduler;
// Default to GenericScheduler.
- return createGenericSched(this);
+ return createGenericSchedLive(this);
+}
+
+/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
+/// the caller. We don't have a command line option to override the postRA
+/// scheduler. The Target must configure it.
+ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
+ // Get the postRA scheduler set by the target for this function.
+ ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
+ if (Scheduler)
+ return Scheduler;
+
+ // Default to GenericScheduler.
+ return createGenericSchedPostRA(this);
}
/// Top-level MachineScheduler pass driver.
@@ -252,7 +311,6 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
AA = &getAnalysis<AliasAnalysis>();
LIS = &getAnalysis<LiveIntervals>();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
if (VerifyScheduling) {
DEBUG(LIS->dump());
@@ -262,7 +320,66 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
// Instantiate the selected scheduler for this target, function, and
// optimization level.
- OwningPtr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
+ std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
+ scheduleRegions(*Scheduler);
+
+ DEBUG(LIS->dump());
+ if (VerifyScheduling)
+ MF->verify(this, "After machine scheduling.");
+ return true;
+}
+
+bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
+ if (skipOptnoneFunction(*mf.getFunction()))
+ return false;
+
+ const TargetSubtargetInfo &ST =
+ mf.getTarget().getSubtarget<TargetSubtargetInfo>();
+ if (!ST.enablePostMachineScheduler()) {
+ DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
+ return false;
+ }
+ DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
+
+ // Initialize the context of the pass.
+ MF = &mf;
+ PassConfig = &getAnalysis<TargetPassConfig>();
+
+ if (VerifyScheduling)
+ MF->verify(this, "Before post machine scheduling.");
+
+ // Instantiate the selected scheduler for this target, function, and
+ // optimization level.
+ std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
+ scheduleRegions(*Scheduler);
+
+ if (VerifyScheduling)
+ MF->verify(this, "After post machine scheduling.");
+ return true;
+}
+
+/// Return true of the given instruction should not be included in a scheduling
+/// region.
+///
+/// MachineScheduler does not currently support scheduling across calls. To
+/// handle calls, the DAG builder needs to be modified to create register
+/// anti/output dependencies on the registers clobbered by the call's regmask
+/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
+/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
+/// the boundary, but there would be no benefit to postRA scheduling across
+/// calls this late anyway.
+static bool isSchedBoundary(MachineBasicBlock::iterator MI,
+ MachineBasicBlock *MBB,
+ MachineFunction *MF,
+ const TargetInstrInfo *TII,
+ bool IsPostRA) {
+ return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF);
+}
+
+/// Main driver for both MachineScheduler and PostMachineScheduler.
+void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ bool IsPostRA = Scheduler.isPostRA();
// Visit all machine basic blocks.
//
@@ -271,7 +388,15 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
MBB != MBBEnd; ++MBB) {
- Scheduler->startBlock(MBB);
+ Scheduler.startBlock(MBB);
+
+#ifndef NDEBUG
+ if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
+ continue;
+ if (SchedOnlyBlock.getNumOccurrences()
+ && (int)SchedOnlyBlock != MBB->getNumber())
+ continue;
+#endif
// Break the block into scheduling regions [I, RegionEnd), and schedule each
// region as soon as it is discovered. RegionEnd points the scheduling
@@ -283,13 +408,16 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
// The Scheduler may insert instructions during either schedule() or
// exitRegion(), even for empty regions. So the local iterators 'I' and
// 'RegionEnd' are invalid across these calls.
- unsigned RemainingInstrs = MBB->size();
+ //
+ // MBB::size() uses instr_iterator to count. Here we need a bundle to count
+ // as a single instruction.
+ unsigned RemainingInstrs = std::distance(MBB->begin(), MBB->end());
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
- RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
+ RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
// Avoid decrementing RegionEnd for blocks with no terminator.
- if (RegionEnd != MBB->end()
- || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
+ if (RegionEnd != MBB->end() ||
+ isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) {
--RegionEnd;
// Count the boundary instruction.
--RemainingInstrs;
@@ -300,21 +428,22 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
unsigned NumRegionInstrs = 0;
MachineBasicBlock::iterator I = RegionEnd;
for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) {
- if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
+ if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA))
break;
}
// Notify the scheduler of the region, even if we may skip scheduling
// it. Perhaps it still needs to be bundled.
- Scheduler->enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
+ Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
// Skip empty scheduling regions (0 or 1 schedulable instructions).
- if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
+ if (I == RegionEnd || I == std::prev(RegionEnd)) {
// Close the current region. Bundle the terminator if needed.
// This invalidates 'RegionEnd' and 'I'.
- Scheduler->exitRegion();
+ Scheduler.exitRegion();
continue;
}
- DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ DEBUG(dbgs() << "********** " << ((Scheduler.isPostRA()) ? "PostRA " : "")
+ << "MI Scheduling **********\n");
DEBUG(dbgs() << MF->getName()
<< ":BB#" << MBB->getNumber() << " " << MBB->getName()
<< "\n From: " << *I << " To: ";
@@ -325,47 +454,46 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
// Schedule a region: possibly reorder instructions.
// This invalidates 'RegionEnd' and 'I'.
- Scheduler->schedule();
+ Scheduler.schedule();
// Close the current region.
- Scheduler->exitRegion();
+ Scheduler.exitRegion();
// Scheduling has invalidated the current iterator 'I'. Ask the
// scheduler for the top of it's scheduled region.
- RegionEnd = Scheduler->begin();
+ RegionEnd = Scheduler.begin();
}
assert(RemainingInstrs == 0 && "Instruction count mismatch!");
- Scheduler->finishBlock();
+ Scheduler.finishBlock();
+ if (Scheduler.isPostRA()) {
+ // FIXME: Ideally, no further passes should rely on kill flags. However,
+ // thumb2 size reduction is currently an exception.
+ Scheduler.fixupKills(MBB);
+ }
}
- Scheduler->finalizeSchedule();
- DEBUG(LIS->dump());
- if (VerifyScheduling)
- MF->verify(this, "After machine scheduling.");
- return true;
+ Scheduler.finalizeSchedule();
}
-void MachineScheduler::print(raw_ostream &O, const Module* m) const {
+void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
// unimplemented
}
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD
void ReadyQueue::dump() {
dbgs() << Name << ": ";
for (unsigned i = 0, e = Queue.size(); i < e; ++i)
dbgs() << Queue[i]->NodeNum << " ";
dbgs() << "\n";
}
-#endif
//===----------------------------------------------------------------------===//
-// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
-// preservation.
-//===----------------------------------------------------------------------===//
+// ScheduleDAGMI - Basic machine instruction scheduling. This is
+// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
+// virtual registers.
+// ===----------------------------------------------------------------------===/
+// Provide a vtable anchor.
ScheduleDAGMI::~ScheduleDAGMI() {
- delete DFSResult;
- DeleteContainerPointers(Mutations);
- delete SchedImpl;
}
bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
@@ -403,9 +531,14 @@ void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
#endif
+ // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
+ // CurrCycle may have advanced since then.
+ if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
+ SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
+
--SuccSU->NumPredsLeft;
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
SchedImpl->releaseTopNode(SuccSU);
@@ -437,9 +570,14 @@ void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
#endif
+ // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
+ // CurrCycle may have advanced since then.
+ if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
+ PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
+
--PredSU->NumSuccsLeft;
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
SchedImpl->releaseBottomNode(PredSU);
@@ -453,10 +591,24 @@ void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
}
}
+/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
+/// crossing a scheduling boundary. [begin, end) includes all instructions in
+/// the region, including the boundary itself and single-instruction regions
+/// that don't get scheduled.
+void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs)
+{
+ ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
+
+ SchedImpl->initPolicy(begin, end, regioninstrs);
+}
+
/// This is normally called from the main scheduler loop but may also be invoked
/// by the scheduling strategy to perform additional code motion.
-void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
- MachineBasicBlock::iterator InsertPos) {
+void ScheduleDAGMI::moveInstruction(
+ MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
// Advance RegionBegin if the first instruction moves down.
if (&*RegionBegin == MI)
++RegionBegin;
@@ -465,7 +617,8 @@ void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
BB->splice(InsertPos, BB, MI);
// Update LiveIntervals
- LIS->handleMove(MI, /*UpdateFlags=*/true);
+ if (LIS)
+ LIS->handleMove(MI, /*UpdateFlags=*/true);
// Recede RegionBegin if an instruction moves above the first.
if (RegionBegin == InsertPos)
@@ -483,31 +636,215 @@ bool ScheduleDAGMI::checkSchedLimit() {
return true;
}
+/// Per-region scheduling driver, called back from
+/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
+/// does not consider liveness or register pressure. It is useful for PostRA
+/// scheduling and potentially other custom schedulers.
+void ScheduleDAGMI::schedule() {
+ // Build the DAG.
+ buildSchedGraph(AA);
+
+ Topo.InitDAGTopologicalSorting();
+
+ postprocessDAG();
+
+ SmallVector<SUnit*, 8> TopRoots, BotRoots;
+ findRootsAndBiasEdges(TopRoots, BotRoots);
+
+ // Initialize the strategy before modifying the DAG.
+ // This may initialize a DFSResult to be used for queue priority.
+ SchedImpl->initialize(this);
+
+ DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ SUnits[su].dumpAll(this));
+ if (ViewMISchedDAGs) viewGraph();
+
+ // Initialize ready queues now that the DAG and priority data are finalized.
+ initQueues(TopRoots, BotRoots);
+
+ bool IsTopNode = false;
+ while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
+ assert(!SU->isScheduled && "Node already scheduled");
+ if (!checkSchedLimit())
+ break;
+
+ MachineInstr *MI = SU->getInstr();
+ if (IsTopNode) {
+ assert(SU->isTopReady() && "node still has unscheduled dependencies");
+ if (&*CurrentTop == MI)
+ CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
+ else
+ moveInstruction(MI, CurrentTop);
+ }
+ else {
+ assert(SU->isBottomReady() && "node still has unscheduled dependencies");
+ MachineBasicBlock::iterator priorII =
+ priorNonDebug(CurrentBottom, CurrentTop);
+ if (&*priorII == MI)
+ CurrentBottom = priorII;
+ else {
+ if (&*CurrentTop == MI)
+ CurrentTop = nextIfDebug(++CurrentTop, priorII);
+ moveInstruction(MI, CurrentBottom);
+ CurrentBottom = MI;
+ }
+ }
+ // Notify the scheduling strategy before updating the DAG.
+ // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
+ // runs, it can then use the accurate ReadyCycle time to determine whether
+ // newly released nodes can move to the readyQ.
+ SchedImpl->schedNode(SU, IsTopNode);
+
+ updateQueues(SU, IsTopNode);
+ }
+ assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
+
+ placeDebugValues();
+
+ DEBUG({
+ unsigned BBNum = begin()->getParent()->getNumber();
+ dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
+}
+
+/// Apply each ScheduleDAGMutation step in order.
+void ScheduleDAGMI::postprocessDAG() {
+ for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
+ Mutations[i]->apply(this);
+ }
+}
+
+void ScheduleDAGMI::
+findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
+ SmallVectorImpl<SUnit*> &BotRoots) {
+ for (std::vector<SUnit>::iterator
+ I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
+ SUnit *SU = &(*I);
+ assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
+
+ // Order predecessors so DFSResult follows the critical path.
+ SU->biasCriticalPath();
+
+ // A SUnit is ready to top schedule if it has no predecessors.
+ if (!I->NumPredsLeft)
+ TopRoots.push_back(SU);
+ // A SUnit is ready to bottom schedule if it has no successors.
+ if (!I->NumSuccsLeft)
+ BotRoots.push_back(SU);
+ }
+ ExitSU.biasCriticalPath();
+}
+
+/// Identify DAG roots and setup scheduler queues.
+void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
+ ArrayRef<SUnit*> BotRoots) {
+ NextClusterSucc = nullptr;
+ NextClusterPred = nullptr;
+
+ // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
+ //
+ // Nodes with unreleased weak edges can still be roots.
+ // Release top roots in forward order.
+ for (SmallVectorImpl<SUnit*>::const_iterator
+ I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
+ SchedImpl->releaseTopNode(*I);
+ }
+ // Release bottom roots in reverse order so the higher priority nodes appear
+ // first. This is more natural and slightly more efficient.
+ for (SmallVectorImpl<SUnit*>::const_reverse_iterator
+ I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
+ SchedImpl->releaseBottomNode(*I);
+ }
+
+ releaseSuccessors(&EntrySU);
+ releasePredecessors(&ExitSU);
+
+ SchedImpl->registerRoots();
+
+ // Advance past initial DebugValues.
+ CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
+ CurrentBottom = RegionEnd;
+}
+
+/// Update scheduler queues after scheduling an instruction.
+void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
+ // Release dependent instructions for scheduling.
+ if (IsTopNode)
+ releaseSuccessors(SU);
+ else
+ releasePredecessors(SU);
+
+ SU->isScheduled = true;
+}
+
+/// Reinsert any remaining debug_values, just like the PostRA scheduler.
+void ScheduleDAGMI::placeDebugValues() {
+ // If first instruction was a DBG_VALUE then put it back.
+ if (FirstDbgValue) {
+ BB->splice(RegionBegin, BB, FirstDbgValue);
+ RegionBegin = FirstDbgValue;
+ }
+
+ for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
+ DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
+ std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
+ MachineInstr *DbgValue = P.first;
+ MachineBasicBlock::iterator OrigPrevMI = P.second;
+ if (&*RegionBegin == DbgValue)
+ ++RegionBegin;
+ BB->splice(++OrigPrevMI, BB, DbgValue);
+ if (OrigPrevMI == std::prev(RegionEnd))
+ RegionEnd = DbgValue;
+ }
+ DbgValues.clear();
+ FirstDbgValue = nullptr;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void ScheduleDAGMI::dumpSchedule() const {
+ for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
+ if (SUnit *SU = getSUnit(&(*MI)))
+ SU->dump(this);
+ else
+ dbgs() << "Missing SUnit\n";
+ }
+}
+#endif
+
+//===----------------------------------------------------------------------===//
+// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
+// preservation.
+//===----------------------------------------------------------------------===//
+
+ScheduleDAGMILive::~ScheduleDAGMILive() {
+ delete DFSResult;
+}
+
/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
/// crossing a scheduling boundary. [begin, end) includes all instructions in
/// the region, including the boundary itself and single-instruction regions
/// that don't get scheduled.
-void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
+void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
MachineBasicBlock::iterator begin,
MachineBasicBlock::iterator end,
unsigned regioninstrs)
{
- ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
+ // ScheduleDAGMI initializes SchedImpl's per-region policy.
+ ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
// For convenience remember the end of the liveness region.
- LiveRegionEnd =
- (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
+ LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
SUPressureDiffs.clear();
- SchedImpl->initPolicy(begin, end, regioninstrs);
-
ShouldTrackPressure = SchedImpl->shouldTrackPressure();
}
// Setup the register pressure trackers for the top scheduled top and bottom
// scheduled regions.
-void ScheduleDAGMI::initRegPressure() {
+void ScheduleDAGMILive::initRegPressure() {
TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
@@ -567,7 +904,7 @@ void ScheduleDAGMI::initRegPressure() {
dbgs() << "\n");
}
-void ScheduleDAGMI::
+void ScheduleDAGMILive::
updateScheduledPressure(const SUnit *SU,
const std::vector<unsigned> &NewMaxPressure) {
const PressureDiff &PDiff = getPressureDiff(SU);
@@ -595,7 +932,7 @@ updateScheduledPressure(const SUnit *SU,
/// Update the PressureDiff array for liveness after scheduling this
/// instruction.
-void ScheduleDAGMI::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
+void ScheduleDAGMILive::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
for (unsigned LUIdx = 0, LUEnd = LiveUses.size(); LUIdx != LUEnd; ++LUIdx) {
/// FIXME: Currently assuming single-use physregs.
unsigned Reg = LiveUses[LUIdx];
@@ -644,9 +981,9 @@ void ScheduleDAGMI::updatePressureDiffs(ArrayRef<unsigned> LiveUses) {
/// so that it can be easilly extended by experimental schedulers. Generally,
/// implementing MachineSchedStrategy should be sufficient to implement a new
/// scheduling algorithm. However, if a scheduler further subclasses
-/// ScheduleDAGMI then it will want to override this virtual method in order to
-/// update any specialized state.
-void ScheduleDAGMI::schedule() {
+/// ScheduleDAGMILive then it will want to override this virtual method in order
+/// to update any specialized state.
+void ScheduleDAGMILive::schedule() {
buildDAGWithRegPressure();
Topo.InitDAGTopologicalSorting();
@@ -667,6 +1004,11 @@ void ScheduleDAGMI::schedule() {
// Initialize ready queues now that the DAG and priority data are finalized.
initQueues(TopRoots, BotRoots);
+ if (ShouldTrackPressure) {
+ assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
+ TopRPTracker.setPos(CurrentTop);
+ }
+
bool IsTopNode = false;
while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
assert(!SU->isScheduled && "Node already scheduled");
@@ -676,6 +1018,18 @@ void ScheduleDAGMI::schedule() {
scheduleMI(SU, IsTopNode);
updateQueues(SU, IsTopNode);
+
+ if (DFSResult) {
+ unsigned SubtreeID = DFSResult->getSubtreeID(SU);
+ if (!ScheduledTrees.test(SubtreeID)) {
+ ScheduledTrees.set(SubtreeID);
+ DFSResult->scheduleTree(SubtreeID);
+ SchedImpl->scheduleTree(SubtreeID);
+ }
+ }
+
+ // Notify the scheduling strategy after updating the DAG.
+ SchedImpl->schedNode(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
@@ -690,7 +1044,7 @@ void ScheduleDAGMI::schedule() {
}
/// Build the DAG and setup three register pressure trackers.
-void ScheduleDAGMI::buildDAGWithRegPressure() {
+void ScheduleDAGMILive::buildDAGWithRegPressure() {
if (!ShouldTrackPressure) {
RPTracker.reset();
RegionCriticalPSets.clear();
@@ -713,14 +1067,7 @@ void ScheduleDAGMI::buildDAGWithRegPressure() {
initRegPressure();
}
-/// Apply each ScheduleDAGMutation step in order.
-void ScheduleDAGMI::postprocessDAG() {
- for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
- Mutations[i]->apply(this);
- }
-}
-
-void ScheduleDAGMI::computeDFSResult() {
+void ScheduleDAGMILive::computeDFSResult() {
if (!DFSResult)
DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
DFSResult->clear();
@@ -730,26 +1077,6 @@ void ScheduleDAGMI::computeDFSResult() {
ScheduledTrees.resize(DFSResult->getNumSubtrees());
}
-void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
- SmallVectorImpl<SUnit*> &BotRoots) {
- for (std::vector<SUnit>::iterator
- I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
- SUnit *SU = &(*I);
- assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
-
- // Order predecessors so DFSResult follows the critical path.
- SU->biasCriticalPath();
-
- // A SUnit is ready to top schedule if it has no predecessors.
- if (!I->NumPredsLeft)
- TopRoots.push_back(SU);
- // A SUnit is ready to bottom schedule if it has no successors.
- if (!I->NumSuccsLeft)
- BotRoots.push_back(SU);
- }
- ExitSU.biasCriticalPath();
-}
-
/// Compute the max cyclic critical path through the DAG. The scheduling DAG
/// only provides the critical path for single block loops. To handle loops that
/// span blocks, we could use the vreg path latencies provided by
@@ -773,7 +1100,10 @@ void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
/// CyclicCriticalPath = min(2, 2) = 2
-unsigned ScheduleDAGMI::computeCyclicCriticalPath() {
+///
+/// This could be relevant to PostRA scheduling, but is currently implemented
+/// assuming LiveIntervals.
+unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
// This only applies to single block loop.
if (!BB->isSuccessor(BB))
return 0;
@@ -835,44 +1165,8 @@ unsigned ScheduleDAGMI::computeCyclicCriticalPath() {
return MaxCyclicLatency;
}
-/// Identify DAG roots and setup scheduler queues.
-void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
- ArrayRef<SUnit*> BotRoots) {
- NextClusterSucc = NULL;
- NextClusterPred = NULL;
-
- // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
- //
- // Nodes with unreleased weak edges can still be roots.
- // Release top roots in forward order.
- for (SmallVectorImpl<SUnit*>::const_iterator
- I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
- SchedImpl->releaseTopNode(*I);
- }
- // Release bottom roots in reverse order so the higher priority nodes appear
- // first. This is more natural and slightly more efficient.
- for (SmallVectorImpl<SUnit*>::const_reverse_iterator
- I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
- SchedImpl->releaseBottomNode(*I);
- }
-
- releaseSuccessors(&EntrySU);
- releasePredecessors(&ExitSU);
-
- SchedImpl->registerRoots();
-
- // Advance past initial DebugValues.
- CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
- CurrentBottom = RegionEnd;
-
- if (ShouldTrackPressure) {
- assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
- TopRPTracker.setPos(CurrentTop);
- }
-}
-
/// Move an instruction and update register pressure.
-void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
+void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
// Move the instruction to its new location in the instruction stream.
MachineInstr *MI = SU->getInstr();
@@ -917,63 +1211,6 @@ void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
}
}
-/// Update scheduler queues after scheduling an instruction.
-void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
- // Release dependent instructions for scheduling.
- if (IsTopNode)
- releaseSuccessors(SU);
- else
- releasePredecessors(SU);
-
- SU->isScheduled = true;
-
- if (DFSResult) {
- unsigned SubtreeID = DFSResult->getSubtreeID(SU);
- if (!ScheduledTrees.test(SubtreeID)) {
- ScheduledTrees.set(SubtreeID);
- DFSResult->scheduleTree(SubtreeID);
- SchedImpl->scheduleTree(SubtreeID);
- }
- }
-
- // Notify the scheduling strategy after updating the DAG.
- SchedImpl->schedNode(SU, IsTopNode);
-}
-
-/// Reinsert any remaining debug_values, just like the PostRA scheduler.
-void ScheduleDAGMI::placeDebugValues() {
- // If first instruction was a DBG_VALUE then put it back.
- if (FirstDbgValue) {
- BB->splice(RegionBegin, BB, FirstDbgValue);
- RegionBegin = FirstDbgValue;
- }
-
- for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
- DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
- std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
- MachineInstr *DbgValue = P.first;
- MachineBasicBlock::iterator OrigPrevMI = P.second;
- if (&*RegionBegin == DbgValue)
- ++RegionBegin;
- BB->splice(++OrigPrevMI, BB, DbgValue);
- if (OrigPrevMI == llvm::prior(RegionEnd))
- RegionEnd = DbgValue;
- }
- DbgValues.clear();
- FirstDbgValue = NULL;
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void ScheduleDAGMI::dumpSchedule() const {
- for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
- if (SUnit *SU = getSUnit(&(*MI)))
- SU->dump(this);
- else
- dbgs() << "Missing SUnit\n";
- }
-}
-#endif
-
//===----------------------------------------------------------------------===//
// LoadClusterMutation - DAG post-processing to cluster loads.
//===----------------------------------------------------------------------===//
@@ -988,9 +1225,11 @@ class LoadClusterMutation : public ScheduleDAGMutation {
unsigned Offset;
LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
: SU(su), BaseReg(reg), Offset(ofs) {}
+
+ bool operator<(const LoadInfo &RHS) const {
+ return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
+ }
};
- static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
- const LoadClusterMutation::LoadInfo &RHS);
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
@@ -999,20 +1238,12 @@ public:
const TargetRegisterInfo *tri)
: TII(tii), TRI(tri) {}
- virtual void apply(ScheduleDAGMI *DAG);
+ void apply(ScheduleDAGMI *DAG) override;
protected:
void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
};
} // anonymous
-bool LoadClusterMutation::LoadInfoLess(
- const LoadClusterMutation::LoadInfo &LHS,
- const LoadClusterMutation::LoadInfo &RHS) {
- if (LHS.BaseReg != RHS.BaseReg)
- return LHS.BaseReg < RHS.BaseReg;
- return LHS.Offset < RHS.Offset;
-}
-
void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
ScheduleDAGMI *DAG) {
SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
@@ -1025,7 +1256,7 @@ void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
}
if (LoadRecords.size() < 2)
return;
- std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
+ std::sort(LoadRecords.begin(), LoadRecords.end());
unsigned ClusterLength = 1;
for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
@@ -1102,7 +1333,7 @@ class MacroFusion : public ScheduleDAGMutation {
public:
MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
- virtual void apply(ScheduleDAGMI *DAG);
+ void apply(ScheduleDAGMI *DAG) override;
};
} // anonymous
@@ -1151,10 +1382,10 @@ class CopyConstrain : public ScheduleDAGMutation {
public:
CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
- virtual void apply(ScheduleDAGMI *DAG);
+ void apply(ScheduleDAGMI *DAG) override;
protected:
- void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG);
+ void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
};
} // anonymous
@@ -1177,7 +1408,7 @@ protected:
/// this algorithm should handle extended blocks. An EBB is a set of
/// contiguously numbered blocks such that the previous block in the EBB is
/// always the single predecessor.
-void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
+void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
LiveIntervals *LIS = DAG->getLIS();
MachineInstr *Copy = CopySU->getInstr();
@@ -1227,19 +1458,19 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
// Check if GlobalLI contains a hole in the vicinity of LocalLI.
if (GlobalSegment != GlobalLI->begin()) {
// Two address defs have no hole.
- if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
+ if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
GlobalSegment->start)) {
return;
}
// If the prior global segment may be defined by the same two-address
// instruction that also defines LocalLI, then can't make a hole here.
- if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start,
+ if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
LocalLI->beginIndex())) {
return;
}
// If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
// it would be a disconnected component in the live range.
- assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
+ assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
"Disconnected LRG within the scheduling region.");
}
MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
@@ -1302,6 +1533,8 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
/// \brief Callback from DAG postProcessing to create weak edges to encourage
/// copy elimination.
void CopyConstrain::apply(ScheduleDAGMI *DAG) {
+ assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
+
MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
if (FirstPos == DAG->end())
return;
@@ -1314,370 +1547,53 @@ void CopyConstrain::apply(ScheduleDAGMI *DAG) {
if (!SU->getInstr()->isCopy())
continue;
- constrainLocalCopy(SU, DAG);
+ constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
}
}
//===----------------------------------------------------------------------===//
-// GenericScheduler - Implementation of the generic MachineSchedStrategy.
+// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
+// and possibly other custom schedulers.
//===----------------------------------------------------------------------===//
-namespace {
-/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
-/// the schedule.
-class GenericScheduler : public MachineSchedStrategy {
-public:
- /// Represent the type of SchedCandidate found within a single queue.
- /// pickNodeBidirectional depends on these listed by decreasing priority.
- enum CandReason {
- NoCand, PhysRegCopy, RegExcess, RegCritical, Cluster, Weak, RegMax,
- ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
- TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
-
-#ifndef NDEBUG
- static const char *getReasonStr(GenericScheduler::CandReason Reason);
-#endif
-
- /// Policy for scheduling the next instruction in the candidate's zone.
- struct CandPolicy {
- bool ReduceLatency;
- unsigned ReduceResIdx;
- unsigned DemandResIdx;
-
- CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
- };
-
- /// Status of an instruction's critical resource consumption.
- struct SchedResourceDelta {
- // Count critical resources in the scheduled region required by SU.
- unsigned CritResources;
-
- // Count critical resources from another region consumed by SU.
- unsigned DemandedResources;
-
- SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
-
- bool operator==(const SchedResourceDelta &RHS) const {
- return CritResources == RHS.CritResources
- && DemandedResources == RHS.DemandedResources;
- }
- bool operator!=(const SchedResourceDelta &RHS) const {
- return !operator==(RHS);
- }
- };
-
- /// Store the state used by GenericScheduler heuristics, required for the
- /// lifetime of one invocation of pickNode().
- struct SchedCandidate {
- CandPolicy Policy;
-
- // The best SUnit candidate.
- SUnit *SU;
-
- // The reason for this candidate.
- CandReason Reason;
-
- // Set of reasons that apply to multiple candidates.
- uint32_t RepeatReasonSet;
-
- // Register pressure values for the best candidate.
- RegPressureDelta RPDelta;
-
- // Critical resource consumption of the best candidate.
- SchedResourceDelta ResDelta;
-
- SchedCandidate(const CandPolicy &policy)
- : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {}
-
- bool isValid() const { return SU; }
-
- // Copy the status of another candidate without changing policy.
- void setBest(SchedCandidate &Best) {
- assert(Best.Reason != NoCand && "uninitialized Sched candidate");
- SU = Best.SU;
- Reason = Best.Reason;
- RPDelta = Best.RPDelta;
- ResDelta = Best.ResDelta;
- }
-
- bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
- void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
-
- void initResourceDelta(const ScheduleDAGMI *DAG,
- const TargetSchedModel *SchedModel);
- };
-
- /// Summarize the unscheduled region.
- struct SchedRemainder {
- // Critical path through the DAG in expected latency.
- unsigned CriticalPath;
- unsigned CyclicCritPath;
-
- // Scaled count of micro-ops left to schedule.
- unsigned RemIssueCount;
-
- bool IsAcyclicLatencyLimited;
-
- // Unscheduled resources
- SmallVector<unsigned, 16> RemainingCounts;
-
- void reset() {
- CriticalPath = 0;
- CyclicCritPath = 0;
- RemIssueCount = 0;
- IsAcyclicLatencyLimited = false;
- RemainingCounts.clear();
- }
-
- SchedRemainder() { reset(); }
-
- void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
- };
-
- /// Each Scheduling boundary is associated with ready queues. It tracks the
- /// current cycle in the direction of movement, and maintains the state
- /// of "hazards" and other interlocks at the current cycle.
- struct SchedBoundary {
- ScheduleDAGMI *DAG;
- const TargetSchedModel *SchedModel;
- SchedRemainder *Rem;
-
- ReadyQueue Available;
- ReadyQueue Pending;
- bool CheckPending;
-
- // For heuristics, keep a list of the nodes that immediately depend on the
- // most recently scheduled node.
- SmallPtrSet<const SUnit*, 8> NextSUs;
-
- ScheduleHazardRecognizer *HazardRec;
-
- /// Number of cycles it takes to issue the instructions scheduled in this
- /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
- /// See getStalls().
- unsigned CurrCycle;
-
- /// Micro-ops issued in the current cycle
- unsigned CurrMOps;
-
- /// MinReadyCycle - Cycle of the soonest available instruction.
- unsigned MinReadyCycle;
-
- // The expected latency of the critical path in this scheduled zone.
- unsigned ExpectedLatency;
-
- // The latency of dependence chains leading into this zone.
- // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
- // For each cycle scheduled: DLat -= 1.
- unsigned DependentLatency;
-
- /// Count the scheduled (issued) micro-ops that can be retired by
- /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
- unsigned RetiredMOps;
-
- // Count scheduled resources that have been executed. Resources are
- // considered executed if they become ready in the time that it takes to
- // saturate any resource including the one in question. Counts are scaled
- // for direct comparison with other resources. Counts can be compared with
- // MOps * getMicroOpFactor and Latency * getLatencyFactor.
- SmallVector<unsigned, 16> ExecutedResCounts;
-
- /// Cache the max count for a single resource.
- unsigned MaxExecutedResCount;
-
- // Cache the critical resources ID in this scheduled zone.
- unsigned ZoneCritResIdx;
-
- // Is the scheduled region resource limited vs. latency limited.
- bool IsResourceLimited;
-
-#ifndef NDEBUG
- // Remember the greatest operand latency as an upper bound on the number of
- // times we should retry the pending queue because of a hazard.
- unsigned MaxObservedLatency;
-#endif
-
- void reset() {
- // A new HazardRec is created for each DAG and owned by SchedBoundary.
- // Destroying and reconstructing it is very expensive though. So keep
- // invalid, placeholder HazardRecs.
- if (HazardRec && HazardRec->isEnabled()) {
- delete HazardRec;
- HazardRec = 0;
- }
- Available.clear();
- Pending.clear();
- CheckPending = false;
- NextSUs.clear();
- CurrCycle = 0;
- CurrMOps = 0;
- MinReadyCycle = UINT_MAX;
- ExpectedLatency = 0;
- DependentLatency = 0;
- RetiredMOps = 0;
- MaxExecutedResCount = 0;
- ZoneCritResIdx = 0;
- IsResourceLimited = false;
-#ifndef NDEBUG
- MaxObservedLatency = 0;
-#endif
- // Reserve a zero-count for invalid CritResIdx.
- ExecutedResCounts.resize(1);
- assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
- }
-
- /// Pending queues extend the ready queues with the same ID and the
- /// PendingFlag set.
- SchedBoundary(unsigned ID, const Twine &Name):
- DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
- Pending(ID << GenericScheduler::LogMaxQID, Name+".P"),
- HazardRec(0) {
- reset();
- }
-
- ~SchedBoundary() { delete HazardRec; }
-
- void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
- SchedRemainder *rem);
-
- bool isTop() const {
- return Available.getID() == GenericScheduler::TopQID;
- }
-
-#ifndef NDEBUG
- const char *getResourceName(unsigned PIdx) {
- if (!PIdx)
- return "MOps";
- return SchedModel->getProcResource(PIdx)->Name;
- }
-#endif
-
- /// Get the number of latency cycles "covered" by the scheduled
- /// instructions. This is the larger of the critical path within the zone
- /// and the number of cycles required to issue the instructions.
- unsigned getScheduledLatency() const {
- return std::max(ExpectedLatency, CurrCycle);
- }
-
- unsigned getUnscheduledLatency(SUnit *SU) const {
- return isTop() ? SU->getHeight() : SU->getDepth();
- }
-
- unsigned getResourceCount(unsigned ResIdx) const {
- return ExecutedResCounts[ResIdx];
- }
-
- /// Get the scaled count of scheduled micro-ops and resources, including
- /// executed resources.
- unsigned getCriticalCount() const {
- if (!ZoneCritResIdx)
- return RetiredMOps * SchedModel->getMicroOpFactor();
- return getResourceCount(ZoneCritResIdx);
- }
-
- /// Get a scaled count for the minimum execution time of the scheduled
- /// micro-ops that are ready to execute by getExecutedCount. Notice the
- /// feedback loop.
- unsigned getExecutedCount() const {
- return std::max(CurrCycle * SchedModel->getLatencyFactor(),
- MaxExecutedResCount);
- }
-
- bool checkHazard(SUnit *SU);
-
- unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
-
- unsigned getOtherResourceCount(unsigned &OtherCritIdx);
-
- void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone);
-
- void releaseNode(SUnit *SU, unsigned ReadyCycle);
-
- void bumpCycle(unsigned NextCycle);
-
- void incExecutedResources(unsigned PIdx, unsigned Count);
-
- unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
-
- void bumpNode(SUnit *SU);
-
- void releasePending();
-
- void removeReady(SUnit *SU);
-
- SUnit *pickOnlyChoice();
+static const unsigned InvalidCycle = ~0U;
-#ifndef NDEBUG
- void dumpScheduledState();
-#endif
- };
-
-private:
- const MachineSchedContext *Context;
- ScheduleDAGMI *DAG;
- const TargetSchedModel *SchedModel;
- const TargetRegisterInfo *TRI;
-
- // State of the top and bottom scheduled instruction boundaries.
- SchedRemainder Rem;
- SchedBoundary Top;
- SchedBoundary Bot;
-
- MachineSchedPolicy RegionPolicy;
-public:
- /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
- enum {
- TopQID = 1,
- BotQID = 2,
- LogMaxQID = 2
- };
-
- GenericScheduler(const MachineSchedContext *C):
- Context(C), DAG(0), SchedModel(0), TRI(0),
- Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
-
- virtual void initPolicy(MachineBasicBlock::iterator Begin,
- MachineBasicBlock::iterator End,
- unsigned NumRegionInstrs);
-
- bool shouldTrackPressure() const { return RegionPolicy.ShouldTrackPressure; }
-
- virtual void initialize(ScheduleDAGMI *dag);
-
- virtual SUnit *pickNode(bool &IsTopNode);
-
- virtual void schedNode(SUnit *SU, bool IsTopNode);
-
- virtual void releaseTopNode(SUnit *SU);
-
- virtual void releaseBottomNode(SUnit *SU);
-
- virtual void registerRoots();
-
-protected:
- void checkAcyclicLatency();
-
- void tryCandidate(SchedCandidate &Cand,
- SchedCandidate &TryCand,
- SchedBoundary &Zone,
- const RegPressureTracker &RPTracker,
- RegPressureTracker &TempTracker);
-
- SUnit *pickNodeBidirectional(bool &IsTopNode);
-
- void pickNodeFromQueue(SchedBoundary &Zone,
- const RegPressureTracker &RPTracker,
- SchedCandidate &Candidate);
-
- void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+SchedBoundary::~SchedBoundary() { delete HazardRec; }
+void SchedBoundary::reset() {
+ // A new HazardRec is created for each DAG and owned by SchedBoundary.
+ // Destroying and reconstructing it is very expensive though. So keep
+ // invalid, placeholder HazardRecs.
+ if (HazardRec && HazardRec->isEnabled()) {
+ delete HazardRec;
+ HazardRec = nullptr;
+ }
+ Available.clear();
+ Pending.clear();
+ CheckPending = false;
+ NextSUs.clear();
+ CurrCycle = 0;
+ CurrMOps = 0;
+ MinReadyCycle = UINT_MAX;
+ ExpectedLatency = 0;
+ DependentLatency = 0;
+ RetiredMOps = 0;
+ MaxExecutedResCount = 0;
+ ZoneCritResIdx = 0;
+ IsResourceLimited = false;
+ ReservedCycles.clear();
#ifndef NDEBUG
- void traceCandidate(const SchedCandidate &Cand);
+ // Track the maximum number of stall cycles that could arise either from the
+ // latency of a DAG edge or the number of cycles that a processor resource is
+ // reserved (SchedBoundary::ReservedCycles).
+ MaxObservedStall = 0;
#endif
-};
-} // namespace
+ // Reserve a zero-count for invalid CritResIdx.
+ ExecutedResCounts.resize(1);
+ assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
+}
-void GenericScheduler::SchedRemainder::
+void SchedRemainder::
init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
reset();
if (!SchedModel->hasInstrSchedModel())
@@ -1698,175 +1614,47 @@ init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
}
}
-void GenericScheduler::SchedBoundary::
+void SchedBoundary::
init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
reset();
DAG = dag;
SchedModel = smodel;
Rem = rem;
- if (SchedModel->hasInstrSchedModel())
+ if (SchedModel->hasInstrSchedModel()) {
ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
-}
-
-/// Initialize the per-region scheduling policy.
-void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
- MachineBasicBlock::iterator End,
- unsigned NumRegionInstrs) {
- const TargetMachine &TM = Context->MF->getTarget();
-
- // Avoid setting up the register pressure tracker for small regions to save
- // compile time. As a rough heuristic, only track pressure when the number of
- // schedulable instructions exceeds half the integer register file.
- unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
- TM.getTargetLowering()->getRegClassFor(MVT::i32));
-
- RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
-
- // For generic targets, we default to bottom-up, because it's simpler and more
- // compile-time optimizations have been implemented in that direction.
- RegionPolicy.OnlyBottomUp = true;
-
- // Allow the subtarget to override default policy.
- const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
- ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
-
- // After subtarget overrides, apply command line options.
- if (!EnableRegPressure)
- RegionPolicy.ShouldTrackPressure = false;
-
- // Check -misched-topdown/bottomup can force or unforce scheduling direction.
- // e.g. -misched-bottomup=false allows scheduling in both directions.
- assert((!ForceTopDown || !ForceBottomUp) &&
- "-misched-topdown incompatible with -misched-bottomup");
- if (ForceBottomUp.getNumOccurrences() > 0) {
- RegionPolicy.OnlyBottomUp = ForceBottomUp;
- if (RegionPolicy.OnlyBottomUp)
- RegionPolicy.OnlyTopDown = false;
- }
- if (ForceTopDown.getNumOccurrences() > 0) {
- RegionPolicy.OnlyTopDown = ForceTopDown;
- if (RegionPolicy.OnlyTopDown)
- RegionPolicy.OnlyBottomUp = false;
- }
-}
-
-void GenericScheduler::initialize(ScheduleDAGMI *dag) {
- DAG = dag;
- SchedModel = DAG->getSchedModel();
- TRI = DAG->TRI;
-
- Rem.init(DAG, SchedModel);
- Top.init(DAG, SchedModel, &Rem);
- Bot.init(DAG, SchedModel, &Rem);
-
- // Initialize resource counts.
-
- // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
- // are disabled, then these HazardRecs will be disabled.
- const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
- const TargetMachine &TM = DAG->MF.getTarget();
- if (!Top.HazardRec) {
- Top.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
- }
- if (!Bot.HazardRec) {
- Bot.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
- }
-}
-
-void GenericScheduler::releaseTopNode(SUnit *SU) {
- if (SU->isScheduled)
- return;
-
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isWeak())
- continue;
- unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
- unsigned Latency = I->getLatency();
-#ifndef NDEBUG
- Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency);
-#endif
- if (SU->TopReadyCycle < PredReadyCycle + Latency)
- SU->TopReadyCycle = PredReadyCycle + Latency;
- }
- Top.releaseNode(SU, SU->TopReadyCycle);
-}
-
-void GenericScheduler::releaseBottomNode(SUnit *SU) {
- if (SU->isScheduled)
- return;
-
- assert(SU->getInstr() && "Scheduled SUnit must have instr");
-
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isWeak())
- continue;
- unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
- unsigned Latency = I->getLatency();
-#ifndef NDEBUG
- Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency);
-#endif
- if (SU->BotReadyCycle < SuccReadyCycle + Latency)
- SU->BotReadyCycle = SuccReadyCycle + Latency;
+ ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
}
- Bot.releaseNode(SU, SU->BotReadyCycle);
}
-/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
-/// critical path by more cycles than it takes to drain the instruction buffer.
-/// We estimate an upper bounds on in-flight instructions as:
-///
-/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
-/// InFlightIterations = AcyclicPath / CyclesPerIteration
-/// InFlightResources = InFlightIterations * LoopResources
-///
-/// TODO: Check execution resources in addition to IssueCount.
-void GenericScheduler::checkAcyclicLatency() {
- if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
- return;
-
- // Scaled number of cycles per loop iteration.
- unsigned IterCount =
- std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
- Rem.RemIssueCount);
- // Scaled acyclic critical path.
- unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
- // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
- unsigned InFlightCount =
- (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
- unsigned BufferLimit =
- SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
-
- Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
+/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
+/// these "soft stalls" differently than the hard stall cycles based on CPU
+/// resources and computed by checkHazard(). A fully in-order model
+/// (MicroOpBufferSize==0) will not make use of this since instructions are not
+/// available for scheduling until they are ready. However, a weaker in-order
+/// model may use this for heuristics. For example, if a processor has in-order
+/// behavior when reading certain resources, this may come into play.
+unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
+ if (!SU->isUnbuffered)
+ return 0;
- DEBUG(dbgs() << "IssueCycles="
- << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
- << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
- << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
- << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
- << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
- if (Rem.IsAcyclicLatencyLimited)
- dbgs() << " ACYCLIC LATENCY LIMIT\n");
+ unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
+ if (ReadyCycle > CurrCycle)
+ return ReadyCycle - CurrCycle;
+ return 0;
}
-void GenericScheduler::registerRoots() {
- Rem.CriticalPath = DAG->ExitSU.getDepth();
-
- // Some roots may not feed into ExitSU. Check all of them in case.
- for (std::vector<SUnit*>::const_iterator
- I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
- if ((*I)->getDepth() > Rem.CriticalPath)
- Rem.CriticalPath = (*I)->getDepth();
- }
- DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
-
- if (EnableCyclicPath) {
- Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
- checkAcyclicLatency();
- }
+/// Compute the next cycle at which the given processor resource can be
+/// scheduled.
+unsigned SchedBoundary::
+getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
+ unsigned NextUnreserved = ReservedCycles[PIdx];
+ // If this resource has never been used, always return cycle zero.
+ if (NextUnreserved == InvalidCycle)
+ return 0;
+ // For bottom-up scheduling add the cycles needed for the current operation.
+ if (!isTop())
+ NextUnreserved += Cycles;
+ return NextUnreserved;
}
/// Does this SU have a hazard within the current instruction group.
@@ -1882,23 +1670,41 @@ void GenericScheduler::registerRoots() {
/// can dispatch per cycle.
///
/// TODO: Also check whether the SU must start a new group.
-bool GenericScheduler::SchedBoundary::checkHazard(SUnit *SU) {
- if (HazardRec->isEnabled())
- return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
-
+bool SchedBoundary::checkHazard(SUnit *SU) {
+ if (HazardRec->isEnabled()
+ && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
+ return true;
+ }
unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
<< SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
return true;
}
+ if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
+ const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
+ for (TargetSchedModel::ProcResIter
+ PI = SchedModel->getWriteProcResBegin(SC),
+ PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
+ unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
+ if (NRCycle > CurrCycle) {
+#ifndef NDEBUG
+ MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
+#endif
+ DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
+ << SchedModel->getResourceName(PI->ProcResourceIdx)
+ << "=" << NRCycle << "c\n");
+ return true;
+ }
+ }
+ }
return false;
}
// Find the unscheduled node in ReadySUs with the highest latency.
-unsigned GenericScheduler::SchedBoundary::
+unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
- SUnit *LateSU = 0;
+ SUnit *LateSU = nullptr;
unsigned RemLatency = 0;
for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
I != E; ++I) {
@@ -1918,7 +1724,7 @@ findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
// Count resources in this zone and the remaining unscheduled
// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
// resource index, or zero if the zone is issue limited.
-unsigned GenericScheduler::SchedBoundary::
+unsigned SchedBoundary::
getOtherResourceCount(unsigned &OtherCritIdx) {
OtherCritIdx = 0;
if (!SchedModel->hasInstrSchedModel())
@@ -1939,74 +1745,22 @@ getOtherResourceCount(unsigned &OtherCritIdx) {
if (OtherCritIdx) {
DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
<< OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
- << " " << getResourceName(OtherCritIdx) << "\n");
+ << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
}
return OtherCritCount;
}
-/// Set the CandPolicy for this zone given the current resources and latencies
-/// inside and outside the zone.
-void GenericScheduler::SchedBoundary::setPolicy(CandPolicy &Policy,
- SchedBoundary &OtherZone) {
- // Now that potential stalls have been considered, apply preemptive heuristics
- // based on the the total latency and resources inside and outside this
- // zone.
-
- // Compute remaining latency. We need this both to determine whether the
- // overall schedule has become latency-limited and whether the instructions
- // outside this zone are resource or latency limited.
- //
- // The "dependent" latency is updated incrementally during scheduling as the
- // max height/depth of scheduled nodes minus the cycles since it was
- // scheduled:
- // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
- //
- // The "independent" latency is the max ready queue depth:
- // ILat = max N.depth for N in Available|Pending
- //
- // RemainingLatency is the greater of independent and dependent latency.
- unsigned RemLatency = DependentLatency;
- RemLatency = std::max(RemLatency, findMaxLatency(Available.elements()));
- RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements()));
-
- // Compute the critical resource outside the zone.
- unsigned OtherCritIdx;
- unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
-
- bool OtherResLimited = false;
- if (SchedModel->hasInstrSchedModel()) {
- unsigned LFactor = SchedModel->getLatencyFactor();
- OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
- }
- if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) {
- Policy.ReduceLatency |= true;
- DEBUG(dbgs() << " " << Available.getName() << " RemainingLatency "
- << RemLatency << " + " << CurrCycle << "c > CritPath "
- << Rem->CriticalPath << "\n");
- }
- // If the same resource is limiting inside and outside the zone, do nothing.
- if (ZoneCritResIdx == OtherCritIdx)
- return;
-
- DEBUG(
- if (IsResourceLimited) {
- dbgs() << " " << Available.getName() << " ResourceLimited: "
- << getResourceName(ZoneCritResIdx) << "\n";
- }
- if (OtherResLimited)
- dbgs() << " RemainingLimit: " << getResourceName(OtherCritIdx) << "\n";
- if (!IsResourceLimited && !OtherResLimited)
- dbgs() << " Latency limited both directions.\n");
-
- if (IsResourceLimited && !Policy.ReduceResIdx)
- Policy.ReduceResIdx = ZoneCritResIdx;
+void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
+ assert(SU->getInstr() && "Scheduled SUnit must have instr");
- if (OtherResLimited)
- Policy.DemandResIdx = OtherCritIdx;
-}
+#ifndef NDEBUG
+ // ReadyCycle was been bumped up to the CurrCycle when this node was
+ // scheduled, but CurrCycle may have been eagerly advanced immediately after
+ // scheduling, so may now be greater than ReadyCycle.
+ if (ReadyCycle > CurrCycle)
+ MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
+#endif
-void GenericScheduler::SchedBoundary::releaseNode(SUnit *SU,
- unsigned ReadyCycle) {
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
@@ -2022,8 +1776,22 @@ void GenericScheduler::SchedBoundary::releaseNode(SUnit *SU,
NextSUs.insert(SU);
}
+void SchedBoundary::releaseTopNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ releaseNode(SU, SU->TopReadyCycle);
+}
+
+void SchedBoundary::releaseBottomNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ releaseNode(SU, SU->BotReadyCycle);
+}
+
/// Move the boundary of scheduled code by one cycle.
-void GenericScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
+void SchedBoundary::bumpCycle(unsigned NextCycle) {
if (SchedModel->getMicroOpBufferSize() == 0) {
assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
if (MinReadyCycle > NextCycle)
@@ -2061,8 +1829,7 @@ void GenericScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
}
-void GenericScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
- unsigned Count) {
+void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
ExecutedResCounts[PIdx] += Count;
if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
MaxExecutedResCount = ExecutedResCounts[PIdx];
@@ -2075,11 +1842,11 @@ void GenericScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
///
/// \return the next cycle at which the instruction may execute without
/// oversubscribing resources.
-unsigned GenericScheduler::SchedBoundary::
-countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) {
+unsigned SchedBoundary::
+countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
unsigned Factor = SchedModel->getResourceFactor(PIdx);
unsigned Count = Factor * Cycles;
- DEBUG(dbgs() << " " << getResourceName(PIdx)
+ DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
<< " +" << Cycles << "x" << Factor << "u\n");
// Update Executed resources counts.
@@ -2092,15 +1859,21 @@ countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) {
if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
ZoneCritResIdx = PIdx;
DEBUG(dbgs() << " *** Critical resource "
- << getResourceName(PIdx) << ": "
+ << SchedModel->getResourceName(PIdx) << ": "
<< getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
}
- // TODO: We don't yet model reserved resources. It's not hard though.
- return CurrCycle;
+ // For reserved resources, record the highest cycle using the resource.
+ unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
+ if (NextAvailable > CurrCycle) {
+ DEBUG(dbgs() << " Resource conflict: "
+ << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
+ << NextAvailable << "\n");
+ }
+ return NextAvailable;
}
/// Move the boundary of scheduled code by one SUnit.
-void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
+void SchedBoundary::bumpNode(SUnit *SU) {
// Update the reservation table.
if (HazardRec->isEnabled()) {
if (!isTop() && SU->isCall) {
@@ -2110,25 +1883,18 @@ void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
}
HazardRec->EmitInstruction(SU);
}
+ // checkHazard should prevent scheduling multiple instructions per cycle that
+ // exceed the issue width.
const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
- CurrMOps += IncMOps;
- // checkHazard prevents scheduling multiple instructions per cycle that exceed
- // issue width. However, we commonly reach the maximum. In this case
- // opportunistically bump the cycle to avoid uselessly checking everything in
- // the readyQ. Furthermore, a single instruction may produce more than one
- // cycle's worth of micro-ops.
- //
- // TODO: Also check if this SU must end a dispatch group.
- unsigned NextCycle = CurrCycle;
- if (CurrMOps >= SchedModel->getIssueWidth()) {
- ++NextCycle;
- DEBUG(dbgs() << " *** Max MOps " << CurrMOps
- << " at cycle " << CurrCycle << '\n');
- }
+ assert(
+ (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
+ "Cannot schedule this instruction's MicroOps in the current cycle.");
+
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
+ unsigned NextCycle = CurrCycle;
switch (SchedModel->getMicroOpBufferSize()) {
case 0:
assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
@@ -2141,7 +1907,11 @@ void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
break;
default:
// We don't currently model the OOO reorder buffer, so consider all
- // scheduled MOps to be "retired".
+ // scheduled MOps to be "retired". We do loosely model in-order resource
+ // latency. If this instruction uses an in-order resource, account for any
+ // likely stall cycles.
+ if (SU->isUnbuffered && ReadyCycle > NextCycle)
+ NextCycle = ReadyCycle;
break;
}
RetiredMOps += IncMOps;
@@ -2169,10 +1939,29 @@ void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
unsigned RCycle =
- countResource(PI->ProcResourceIdx, PI->Cycles, ReadyCycle);
+ countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
if (RCycle > NextCycle)
NextCycle = RCycle;
}
+ if (SU->hasReservedResource) {
+ // For reserved resources, record the highest cycle using the resource.
+ // For top-down scheduling, this is the cycle in which we schedule this
+ // instruction plus the number of cycles the operations reserves the
+ // resource. For bottom-up is it simply the instruction's cycle.
+ for (TargetSchedModel::ProcResIter
+ PI = SchedModel->getWriteProcResBegin(SC),
+ PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
+ unsigned PIdx = PI->ProcResourceIdx;
+ if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
+ if (isTop()) {
+ ReservedCycles[PIdx] =
+ std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
+ }
+ else
+ ReservedCycles[PIdx] = NextCycle;
+ }
+ }
+ }
}
// Update ExpectedLatency and DependentLatency.
unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
@@ -2193,18 +1982,28 @@ void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
}
else {
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
- // resource limited. If a stall occured, bumpCycle does this.
+ // resource limited. If a stall occurred, bumpCycle does this.
unsigned LFactor = SchedModel->getLatencyFactor();
IsResourceLimited =
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
> (int)LFactor;
}
+ // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
+ // resets CurrMOps. Loop to handle instructions with more MOps than issue in
+ // one cycle. Since we commonly reach the max MOps here, opportunistically
+ // bump the cycle to avoid uselessly checking everything in the readyQ.
+ CurrMOps += IncMOps;
+ while (CurrMOps >= SchedModel->getIssueWidth()) {
+ DEBUG(dbgs() << " *** Max MOps " << CurrMOps
+ << " at cycle " << CurrCycle << '\n');
+ bumpCycle(++NextCycle);
+ }
DEBUG(dumpScheduledState());
}
/// Release pending ready nodes in to the available queue. This makes them
/// visible to heuristics.
-void GenericScheduler::SchedBoundary::releasePending() {
+void SchedBoundary::releasePending() {
// If the available queue is empty, it is safe to reset MinReadyCycle.
if (Available.empty())
MinReadyCycle = UINT_MAX;
@@ -2234,7 +2033,7 @@ void GenericScheduler::SchedBoundary::releasePending() {
}
/// Remove SU from the ready set for this boundary.
-void GenericScheduler::SchedBoundary::removeReady(SUnit *SU) {
+void SchedBoundary::removeReady(SUnit *SU) {
if (Available.isInQueue(SU))
Available.remove(Available.find(SU));
else {
@@ -2246,7 +2045,7 @@ void GenericScheduler::SchedBoundary::removeReady(SUnit *SU) {
/// If this queue only has one ready candidate, return it. As a side effect,
/// defer any nodes that now hit a hazard, and advance the cycle until at least
/// one node is ready. If multiple instructions are ready, return NULL.
-SUnit *GenericScheduler::SchedBoundary::pickOnlyChoice() {
+SUnit *SchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
@@ -2262,20 +2061,22 @@ SUnit *GenericScheduler::SchedBoundary::pickOnlyChoice() {
}
}
for (unsigned i = 0; Available.empty(); ++i) {
- assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) &&
- "permanent hazard"); (void)i;
+// FIXME: Re-enable assert once PR20057 is resolved.
+// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
+// "permanent hazard");
+ (void)i;
bumpCycle(CurrCycle + 1);
releasePending();
}
if (Available.size() == 1)
return *Available.begin();
- return NULL;
+ return nullptr;
}
#ifndef NDEBUG
// This is useful information to dump after bumpNode.
// Note that the Queue contents are more useful before pickNodeFromQueue.
-void GenericScheduler::SchedBoundary::dumpScheduledState() {
+void SchedBoundary::dumpScheduledState() {
unsigned ResFactor;
unsigned ResCount;
if (ZoneCritResIdx) {
@@ -2291,14 +2092,19 @@ void GenericScheduler::SchedBoundary::dumpScheduledState() {
<< " Retired: " << RetiredMOps;
dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
- << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx)
+ << ResCount / ResFactor << " "
+ << SchedModel->getResourceName(ZoneCritResIdx)
<< "\n ExpectedLatency: " << ExpectedLatency << "c\n"
<< (IsResourceLimited ? " - Resource" : " - Latency")
<< " limited.\n";
}
#endif
-void GenericScheduler::SchedCandidate::
+//===----------------------------------------------------------------------===//
+// GenericScheduler - Generic implementation of MachineSchedStrategy.
+//===----------------------------------------------------------------------===//
+
+void GenericSchedulerBase::SchedCandidate::
initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel) {
if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
@@ -2315,12 +2121,162 @@ initResourceDelta(const ScheduleDAGMI *DAG,
}
}
+/// Set the CandPolicy given a scheduling zone given the current resources and
+/// latencies inside and outside the zone.
+void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
+ bool IsPostRA,
+ SchedBoundary &CurrZone,
+ SchedBoundary *OtherZone) {
+ // Apply preemptive heuristics based on the the total latency and resources
+ // inside and outside this zone. Potential stalls should be considered before
+ // following this policy.
+
+ // Compute remaining latency. We need this both to determine whether the
+ // overall schedule has become latency-limited and whether the instructions
+ // outside this zone are resource or latency limited.
+ //
+ // The "dependent" latency is updated incrementally during scheduling as the
+ // max height/depth of scheduled nodes minus the cycles since it was
+ // scheduled:
+ // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
+ //
+ // The "independent" latency is the max ready queue depth:
+ // ILat = max N.depth for N in Available|Pending
+ //
+ // RemainingLatency is the greater of independent and dependent latency.
+ unsigned RemLatency = CurrZone.getDependentLatency();
+ RemLatency = std::max(RemLatency,
+ CurrZone.findMaxLatency(CurrZone.Available.elements()));
+ RemLatency = std::max(RemLatency,
+ CurrZone.findMaxLatency(CurrZone.Pending.elements()));
+
+ // Compute the critical resource outside the zone.
+ unsigned OtherCritIdx = 0;
+ unsigned OtherCount =
+ OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
+
+ bool OtherResLimited = false;
+ if (SchedModel->hasInstrSchedModel()) {
+ unsigned LFactor = SchedModel->getLatencyFactor();
+ OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
+ }
+ // Schedule aggressively for latency in PostRA mode. We don't check for
+ // acyclic latency during PostRA, and highly out-of-order processors will
+ // skip PostRA scheduling.
+ if (!OtherResLimited) {
+ if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
+ Policy.ReduceLatency |= true;
+ DEBUG(dbgs() << " " << CurrZone.Available.getName()
+ << " RemainingLatency " << RemLatency << " + "
+ << CurrZone.getCurrCycle() << "c > CritPath "
+ << Rem.CriticalPath << "\n");
+ }
+ }
+ // If the same resource is limiting inside and outside the zone, do nothing.
+ if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
+ return;
+
+ DEBUG(
+ if (CurrZone.isResourceLimited()) {
+ dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
+ << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
+ << "\n";
+ }
+ if (OtherResLimited)
+ dbgs() << " RemainingLimit: "
+ << SchedModel->getResourceName(OtherCritIdx) << "\n";
+ if (!CurrZone.isResourceLimited() && !OtherResLimited)
+ dbgs() << " Latency limited both directions.\n");
+
+ if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
+ Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
+
+ if (OtherResLimited)
+ Policy.DemandResIdx = OtherCritIdx;
+}
+
+#ifndef NDEBUG
+const char *GenericSchedulerBase::getReasonStr(
+ GenericSchedulerBase::CandReason Reason) {
+ switch (Reason) {
+ case NoCand: return "NOCAND ";
+ case PhysRegCopy: return "PREG-COPY";
+ case RegExcess: return "REG-EXCESS";
+ case RegCritical: return "REG-CRIT ";
+ case Stall: return "STALL ";
+ case Cluster: return "CLUSTER ";
+ case Weak: return "WEAK ";
+ case RegMax: return "REG-MAX ";
+ case ResourceReduce: return "RES-REDUCE";
+ case ResourceDemand: return "RES-DEMAND";
+ case TopDepthReduce: return "TOP-DEPTH ";
+ case TopPathReduce: return "TOP-PATH ";
+ case BotHeightReduce:return "BOT-HEIGHT";
+ case BotPathReduce: return "BOT-PATH ";
+ case NextDefUse: return "DEF-USE ";
+ case NodeOrder: return "ORDER ";
+ };
+ llvm_unreachable("Unknown reason!");
+}
+
+void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
+ PressureChange P;
+ unsigned ResIdx = 0;
+ unsigned Latency = 0;
+ switch (Cand.Reason) {
+ default:
+ break;
+ case RegExcess:
+ P = Cand.RPDelta.Excess;
+ break;
+ case RegCritical:
+ P = Cand.RPDelta.CriticalMax;
+ break;
+ case RegMax:
+ P = Cand.RPDelta.CurrentMax;
+ break;
+ case ResourceReduce:
+ ResIdx = Cand.Policy.ReduceResIdx;
+ break;
+ case ResourceDemand:
+ ResIdx = Cand.Policy.DemandResIdx;
+ break;
+ case TopDepthReduce:
+ Latency = Cand.SU->getDepth();
+ break;
+ case TopPathReduce:
+ Latency = Cand.SU->getHeight();
+ break;
+ case BotHeightReduce:
+ Latency = Cand.SU->getHeight();
+ break;
+ case BotPathReduce:
+ Latency = Cand.SU->getDepth();
+ break;
+ }
+ dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
+ if (P.isValid())
+ dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
+ << ":" << P.getUnitInc() << " ";
+ else
+ dbgs() << " ";
+ if (ResIdx)
+ dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
+ else
+ dbgs() << " ";
+ if (Latency)
+ dbgs() << " " << Latency << " cycles ";
+ else
+ dbgs() << " ";
+ dbgs() << '\n';
+}
+#endif
/// Return true if this heuristic determines order.
static bool tryLess(int TryVal, int CandVal,
- GenericScheduler::SchedCandidate &TryCand,
- GenericScheduler::SchedCandidate &Cand,
- GenericScheduler::CandReason Reason) {
+ GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ GenericSchedulerBase::CandReason Reason) {
if (TryVal < CandVal) {
TryCand.Reason = Reason;
return true;
@@ -2335,9 +2291,9 @@ static bool tryLess(int TryVal, int CandVal,
}
static bool tryGreater(int TryVal, int CandVal,
- GenericScheduler::SchedCandidate &TryCand,
- GenericScheduler::SchedCandidate &Cand,
- GenericScheduler::CandReason Reason) {
+ GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ GenericSchedulerBase::CandReason Reason) {
if (TryVal > CandVal) {
TryCand.Reason = Reason;
return true;
@@ -2351,11 +2307,172 @@ static bool tryGreater(int TryVal, int CandVal,
return false;
}
+static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ SchedBoundary &Zone) {
+ if (Zone.isTop()) {
+ if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
+ if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
+ TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
+ return true;
+ }
+ if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
+ TryCand, Cand, GenericSchedulerBase::TopPathReduce))
+ return true;
+ }
+ else {
+ if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
+ if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
+ TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
+ return true;
+ }
+ if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
+ TryCand, Cand, GenericSchedulerBase::BotPathReduce))
+ return true;
+ }
+ return false;
+}
+
+static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand,
+ bool IsTop) {
+ DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
+ << GenericSchedulerBase::getReasonStr(Cand.Reason) << '\n');
+}
+
+void GenericScheduler::initialize(ScheduleDAGMI *dag) {
+ assert(dag->hasVRegLiveness() &&
+ "(PreRA)GenericScheduler needs vreg liveness");
+ DAG = static_cast<ScheduleDAGMILive*>(dag);
+ SchedModel = DAG->getSchedModel();
+ TRI = DAG->TRI;
+
+ Rem.init(DAG, SchedModel);
+ Top.init(DAG, SchedModel, &Rem);
+ Bot.init(DAG, SchedModel, &Rem);
+
+ // Initialize resource counts.
+
+ // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
+ // are disabled, then these HazardRecs will be disabled.
+ const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
+ const TargetMachine &TM = DAG->MF.getTarget();
+ if (!Top.HazardRec) {
+ Top.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
+ if (!Bot.HazardRec) {
+ Bot.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
+}
+
+/// Initialize the per-region scheduling policy.
+void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs) {
+ const TargetMachine &TM = Context->MF->getTarget();
+ const TargetLowering *TLI = TM.getTargetLowering();
+
+ // Avoid setting up the register pressure tracker for small regions to save
+ // compile time. As a rough heuristic, only track pressure when the number of
+ // schedulable instructions exceeds half the integer register file.
+ RegionPolicy.ShouldTrackPressure = true;
+ for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
+ MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
+ if (TLI->isTypeLegal(LegalIntVT)) {
+ unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
+ TLI->getRegClassFor(LegalIntVT));
+ RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
+ }
+ }
+
+ // For generic targets, we default to bottom-up, because it's simpler and more
+ // compile-time optimizations have been implemented in that direction.
+ RegionPolicy.OnlyBottomUp = true;
+
+ // Allow the subtarget to override default policy.
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
+
+ // After subtarget overrides, apply command line options.
+ if (!EnableRegPressure)
+ RegionPolicy.ShouldTrackPressure = false;
+
+ // Check -misched-topdown/bottomup can force or unforce scheduling direction.
+ // e.g. -misched-bottomup=false allows scheduling in both directions.
+ assert((!ForceTopDown || !ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+ if (ForceBottomUp.getNumOccurrences() > 0) {
+ RegionPolicy.OnlyBottomUp = ForceBottomUp;
+ if (RegionPolicy.OnlyBottomUp)
+ RegionPolicy.OnlyTopDown = false;
+ }
+ if (ForceTopDown.getNumOccurrences() > 0) {
+ RegionPolicy.OnlyTopDown = ForceTopDown;
+ if (RegionPolicy.OnlyTopDown)
+ RegionPolicy.OnlyBottomUp = false;
+ }
+}
+
+/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
+/// critical path by more cycles than it takes to drain the instruction buffer.
+/// We estimate an upper bounds on in-flight instructions as:
+///
+/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
+/// InFlightIterations = AcyclicPath / CyclesPerIteration
+/// InFlightResources = InFlightIterations * LoopResources
+///
+/// TODO: Check execution resources in addition to IssueCount.
+void GenericScheduler::checkAcyclicLatency() {
+ if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
+ return;
+
+ // Scaled number of cycles per loop iteration.
+ unsigned IterCount =
+ std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
+ Rem.RemIssueCount);
+ // Scaled acyclic critical path.
+ unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
+ // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
+ unsigned InFlightCount =
+ (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
+ unsigned BufferLimit =
+ SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
+
+ Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
+
+ DEBUG(dbgs() << "IssueCycles="
+ << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
+ << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
+ << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
+ << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
+ << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
+ if (Rem.IsAcyclicLatencyLimited)
+ dbgs() << " ACYCLIC LATENCY LIMIT\n");
+}
+
+void GenericScheduler::registerRoots() {
+ Rem.CriticalPath = DAG->ExitSU.getDepth();
+
+ // Some roots may not feed into ExitSU. Check all of them in case.
+ for (std::vector<SUnit*>::const_iterator
+ I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
+ if ((*I)->getDepth() > Rem.CriticalPath)
+ Rem.CriticalPath = (*I)->getDepth();
+ }
+ DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
+
+ if (EnableCyclicPath) {
+ Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
+ checkAcyclicLatency();
+ }
+}
+
static bool tryPressure(const PressureChange &TryP,
const PressureChange &CandP,
- GenericScheduler::SchedCandidate &TryCand,
- GenericScheduler::SchedCandidate &Cand,
- GenericScheduler::CandReason Reason) {
+ GenericSchedulerBase::SchedCandidate &TryCand,
+ GenericSchedulerBase::SchedCandidate &Cand,
+ GenericSchedulerBase::CandReason Reason) {
int TryRank = TryP.getPSetOrMax();
int CandRank = CandP.getPSetOrMax();
// If both candidates affect the same set, go with the smallest increase.
@@ -2407,32 +2524,6 @@ static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
return 0;
}
-static bool tryLatency(GenericScheduler::SchedCandidate &TryCand,
- GenericScheduler::SchedCandidate &Cand,
- GenericScheduler::SchedBoundary &Zone) {
- if (Zone.isTop()) {
- if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
- if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
- TryCand, Cand, GenericScheduler::TopDepthReduce))
- return true;
- }
- if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
- TryCand, Cand, GenericScheduler::TopPathReduce))
- return true;
- }
- else {
- if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
- if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
- TryCand, Cand, GenericScheduler::BotHeightReduce))
- return true;
- }
- if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
- TryCand, Cand, GenericScheduler::BotPathReduce))
- return true;
- }
- return false;
-}
-
/// Apply a set of heursitics to a new candidate. Heuristics are currently
/// hierarchical. This may be more efficient than a graduated cost model because
/// we don't need to evaluate all aspects of the model for each node in the
@@ -2445,10 +2536,10 @@ static bool tryLatency(GenericScheduler::SchedCandidate &TryCand,
/// \param RPTracker describes reg pressure within the scheduled zone.
/// \param TempTracker is a scratch pressure tracker to reuse in queries.
void GenericScheduler::tryCandidate(SchedCandidate &Cand,
- SchedCandidate &TryCand,
- SchedBoundary &Zone,
- const RegPressureTracker &RPTracker,
- RegPressureTracker &TempTracker) {
+ SchedCandidate &TryCand,
+ SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ RegPressureTracker &TempTracker) {
if (DAG->isTrackingPressure()) {
// Always initialize TryCand's RPDelta.
@@ -2510,10 +2601,15 @@ void GenericScheduler::tryCandidate(SchedCandidate &Cand,
// For loops that are acyclic path limited, aggressively schedule for latency.
// This can result in very long dependence chains scheduled in sequence, so
// once every cycle (when CurrMOps == 0), switch to normal heuristics.
- if (Rem.IsAcyclicLatencyLimited && !Zone.CurrMOps
+ if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
&& tryLatency(TryCand, Cand, Zone))
return;
+ // Prioritize instructions that read unbuffered resources by stall cycles.
+ if (tryLess(Zone.getLatencyStallCycles(TryCand.SU),
+ Zone.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
+ return;
+
// Keep clustered nodes together to encourage downstream peephole
// optimizations which may reduce resource requirements.
//
@@ -2558,7 +2654,7 @@ void GenericScheduler::tryCandidate(SchedCandidate &Cand,
// Prefer immediate defs/users of the last scheduled instruction. This is a
// local pressure avoidance strategy that also makes the machine code
// readable.
- if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
+ if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
TryCand, Cand, NextDefUse))
return;
@@ -2569,90 +2665,14 @@ void GenericScheduler::tryCandidate(SchedCandidate &Cand,
}
}
-#ifndef NDEBUG
-const char *GenericScheduler::getReasonStr(
- GenericScheduler::CandReason Reason) {
- switch (Reason) {
- case NoCand: return "NOCAND ";
- case PhysRegCopy: return "PREG-COPY";
- case RegExcess: return "REG-EXCESS";
- case RegCritical: return "REG-CRIT ";
- case Cluster: return "CLUSTER ";
- case Weak: return "WEAK ";
- case RegMax: return "REG-MAX ";
- case ResourceReduce: return "RES-REDUCE";
- case ResourceDemand: return "RES-DEMAND";
- case TopDepthReduce: return "TOP-DEPTH ";
- case TopPathReduce: return "TOP-PATH ";
- case BotHeightReduce:return "BOT-HEIGHT";
- case BotPathReduce: return "BOT-PATH ";
- case NextDefUse: return "DEF-USE ";
- case NodeOrder: return "ORDER ";
- };
- llvm_unreachable("Unknown reason!");
-}
-
-void GenericScheduler::traceCandidate(const SchedCandidate &Cand) {
- PressureChange P;
- unsigned ResIdx = 0;
- unsigned Latency = 0;
- switch (Cand.Reason) {
- default:
- break;
- case RegExcess:
- P = Cand.RPDelta.Excess;
- break;
- case RegCritical:
- P = Cand.RPDelta.CriticalMax;
- break;
- case RegMax:
- P = Cand.RPDelta.CurrentMax;
- break;
- case ResourceReduce:
- ResIdx = Cand.Policy.ReduceResIdx;
- break;
- case ResourceDemand:
- ResIdx = Cand.Policy.DemandResIdx;
- break;
- case TopDepthReduce:
- Latency = Cand.SU->getDepth();
- break;
- case TopPathReduce:
- Latency = Cand.SU->getHeight();
- break;
- case BotHeightReduce:
- Latency = Cand.SU->getHeight();
- break;
- case BotPathReduce:
- Latency = Cand.SU->getDepth();
- break;
- }
- dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
- if (P.isValid())
- dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
- << ":" << P.getUnitInc() << " ";
- else
- dbgs() << " ";
- if (ResIdx)
- dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
- else
- dbgs() << " ";
- if (Latency)
- dbgs() << " " << Latency << " cycles ";
- else
- dbgs() << " ";
- dbgs() << '\n';
-}
-#endif
-
/// Pick the best candidate from the queue.
///
/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
/// DAG building. To adjust for the current scheduling location we need to
/// maintain the number of vreg uses remaining to be top-scheduled.
void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
- const RegPressureTracker &RPTracker,
- SchedCandidate &Cand) {
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Cand) {
ReadyQueue &Q = Zone.Available;
DEBUG(Q.dump());
@@ -2675,12 +2695,6 @@ void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
}
}
-static void tracePick(const GenericScheduler::SchedCandidate &Cand,
- bool IsTop) {
- DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
- << GenericScheduler::getReasonStr(Cand.Reason) << '\n');
-}
-
/// Pick the best candidate node from either the top or bottom queue.
SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
// Schedule as far as possible in the direction of no choice. This is most
@@ -2698,8 +2712,12 @@ SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
CandPolicy NoPolicy;
SchedCandidate BotCand(NoPolicy);
SchedCandidate TopCand(NoPolicy);
- Bot.setPolicy(BotCand.Policy, Top);
- Top.setPolicy(TopCand.Policy, Bot);
+ // Set the bottom-up policy based on the state of the current bottom zone and
+ // the instructions outside the zone, including the top zone.
+ setPolicy(BotCand.Policy, /*IsPostRA=*/false, Bot, &Top);
+ // Set the top-down policy based on the state of the current top zone and
+ // the instructions outside the zone, including the bottom zone.
+ setPolicy(TopCand.Policy, /*IsPostRA=*/false, Top, &Bot);
// Prefer bottom scheduling when heuristics are silent.
pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
@@ -2741,7 +2759,7 @@ SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
if (DAG->top() == DAG->bottom()) {
assert(Top.Available.empty() && Top.Pending.empty() &&
Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
- return NULL;
+ return nullptr;
}
SUnit *SU;
do {
@@ -2809,20 +2827,21 @@ void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
}
/// Update the scheduler's state after scheduling a node. This is the same node
-/// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
-/// it's state based on the current cycle before MachineSchedStrategy does.
+/// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
+/// update it's state based on the current cycle before MachineSchedStrategy
+/// does.
///
/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
/// them here. See comments in biasPhysRegCopy.
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
- SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle);
+ SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
reschedulePhysRegCopies(SU, true);
}
else {
- SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle);
+ SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
reschedulePhysRegCopies(SU, false);
@@ -2831,23 +2850,155 @@ void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
/// Create the standard converging machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
-static ScheduleDAGInstrs *createGenericSched(MachineSchedContext *C) {
- ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new GenericScheduler(C));
+static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
+ ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
// Register DAG post-processors.
//
// FIXME: extend the mutation API to allow earlier mutations to instantiate
// data and pass it to later mutations. Have a single mutation that gathers
// the interesting nodes in one pass.
- DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
+ DAG->addMutation(make_unique<CopyConstrain>(DAG->TII, DAG->TRI));
if (EnableLoadCluster && DAG->TII->enableClusterLoads())
- DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
+ DAG->addMutation(make_unique<LoadClusterMutation>(DAG->TII, DAG->TRI));
if (EnableMacroFusion)
- DAG->addMutation(new MacroFusion(DAG->TII));
+ DAG->addMutation(make_unique<MacroFusion>(DAG->TII));
return DAG;
}
+
static MachineSchedRegistry
GenericSchedRegistry("converge", "Standard converging scheduler.",
- createGenericSched);
+ createGenericSchedLive);
+
+//===----------------------------------------------------------------------===//
+// PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
+//===----------------------------------------------------------------------===//
+
+void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
+ DAG = Dag;
+ SchedModel = DAG->getSchedModel();
+ TRI = DAG->TRI;
+
+ Rem.init(DAG, SchedModel);
+ Top.init(DAG, SchedModel, &Rem);
+ BotRoots.clear();
+
+ // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
+ // or are disabled, then these HazardRecs will be disabled.
+ const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
+ const TargetMachine &TM = DAG->MF.getTarget();
+ if (!Top.HazardRec) {
+ Top.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
+}
+
+
+void PostGenericScheduler::registerRoots() {
+ Rem.CriticalPath = DAG->ExitSU.getDepth();
+
+ // Some roots may not feed into ExitSU. Check all of them in case.
+ for (SmallVectorImpl<SUnit*>::const_iterator
+ I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
+ if ((*I)->getDepth() > Rem.CriticalPath)
+ Rem.CriticalPath = (*I)->getDepth();
+ }
+ DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
+}
+
+/// Apply a set of heursitics to a new candidate for PostRA scheduling.
+///
+/// \param Cand provides the policy and current best candidate.
+/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
+void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
+ SchedCandidate &TryCand) {
+
+ // Initialize the candidate if needed.
+ if (!Cand.isValid()) {
+ TryCand.Reason = NodeOrder;
+ return;
+ }
+
+ // Prioritize instructions that read unbuffered resources by stall cycles.
+ if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
+ Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
+ return;
+
+ // Avoid critical resource consumption and balance the schedule.
+ if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
+ TryCand, Cand, ResourceReduce))
+ return;
+ if (tryGreater(TryCand.ResDelta.DemandedResources,
+ Cand.ResDelta.DemandedResources,
+ TryCand, Cand, ResourceDemand))
+ return;
+
+ // Avoid serializing long latency dependence chains.
+ if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
+ return;
+ }
+
+ // Fall through to original instruction order.
+ if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
+ TryCand.Reason = NodeOrder;
+}
+
+void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
+ ReadyQueue &Q = Top.Available;
+
+ DEBUG(Q.dump());
+
+ for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
+ SchedCandidate TryCand(Cand.Policy);
+ TryCand.SU = *I;
+ TryCand.initResourceDelta(DAG, SchedModel);
+ tryCandidate(Cand, TryCand);
+ if (TryCand.Reason != NoCand) {
+ Cand.setBest(TryCand);
+ DEBUG(traceCandidate(Cand));
+ }
+ }
+}
+
+/// Pick the next node to schedule.
+SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
+ if (DAG->top() == DAG->bottom()) {
+ assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
+ return nullptr;
+ }
+ SUnit *SU;
+ do {
+ SU = Top.pickOnlyChoice();
+ if (!SU) {
+ CandPolicy NoPolicy;
+ SchedCandidate TopCand(NoPolicy);
+ // Set the top-down policy based on the state of the current top zone and
+ // the instructions outside the zone, including the bottom zone.
+ setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
+ pickNodeFromQueue(TopCand);
+ assert(TopCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(TopCand, true);
+ SU = TopCand.SU;
+ }
+ } while (SU->isScheduled);
+
+ IsTopNode = true;
+ Top.removeReady(SU);
+
+ DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
+ return SU;
+}
+
+/// Called after ScheduleDAGMI has scheduled an instruction and updated
+/// scheduled/remaining flags in the DAG nodes.
+void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
+ SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
+ Top.bumpNode(SU);
+}
+
+/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
+static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
+ return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
+}
//===----------------------------------------------------------------------===//
// ILP Scheduler. Currently for experimental analysis of heuristics.
@@ -2860,7 +3011,8 @@ struct ILPOrder {
const BitVector *ScheduledTrees;
bool MaximizeILP;
- ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {}
+ ILPOrder(bool MaxILP)
+ : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
/// \brief Apply a less-than relation on node priority.
///
@@ -2889,22 +3041,23 @@ struct ILPOrder {
/// \brief Schedule based on the ILP metric.
class ILPScheduler : public MachineSchedStrategy {
- ScheduleDAGMI *DAG;
+ ScheduleDAGMILive *DAG;
ILPOrder Cmp;
std::vector<SUnit*> ReadyQ;
public:
- ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {}
+ ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
- virtual void initialize(ScheduleDAGMI *dag) {
- DAG = dag;
+ void initialize(ScheduleDAGMI *dag) override {
+ assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
+ DAG = static_cast<ScheduleDAGMILive*>(dag);
DAG->computeDFSResult();
Cmp.DFSResult = DAG->getDFSResult();
Cmp.ScheduledTrees = &DAG->getScheduledTrees();
ReadyQ.clear();
}
- virtual void registerRoots() {
+ void registerRoots() override {
// Restore the heap in ReadyQ with the updated DFS results.
std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
}
@@ -2913,8 +3066,8 @@ public:
/// -----------------------------------------
/// Callback to select the highest priority node from the ready Q.
- virtual SUnit *pickNode(bool &IsTopNode) {
- if (ReadyQ.empty()) return NULL;
+ SUnit *pickNode(bool &IsTopNode) override {
+ if (ReadyQ.empty()) return nullptr;
std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
SUnit *SU = ReadyQ.back();
ReadyQ.pop_back();
@@ -2929,19 +3082,19 @@ public:
}
/// \brief Scheduler callback to notify that a new subtree is scheduled.
- virtual void scheduleTree(unsigned SubtreeID) {
+ void scheduleTree(unsigned SubtreeID) override {
std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
}
/// Callback after a node is scheduled. Mark a newly scheduled tree, notify
/// DFSResults, and resort the priority Q.
- virtual void schedNode(SUnit *SU, bool IsTopNode) {
+ void schedNode(SUnit *SU, bool IsTopNode) override {
assert(!IsTopNode && "SchedDFSResult needs bottom-up");
}
- virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
+ void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
- virtual void releaseBottomNode(SUnit *SU) {
+ void releaseBottomNode(SUnit *SU) override {
ReadyQ.push_back(SU);
std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
}
@@ -2949,10 +3102,10 @@ public:
} // namespace
static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
- return new ScheduleDAGMI(C, new ILPScheduler(true));
+ return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
}
static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
- return new ScheduleDAGMI(C, new ILPScheduler(false));
+ return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
}
static MachineSchedRegistry ILPMaxRegistry(
"ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
@@ -2994,7 +3147,7 @@ public:
InstructionShuffler(bool alternate, bool topdown)
: IsAlternating(alternate), IsTopDown(topdown) {}
- virtual void initialize(ScheduleDAGMI *) {
+ void initialize(ScheduleDAGMI*) override {
TopQ.clear();
BottomQ.clear();
}
@@ -3002,11 +3155,11 @@ public:
/// Implement MachineSchedStrategy interface.
/// -----------------------------------------
- virtual SUnit *pickNode(bool &IsTopNode) {
+ SUnit *pickNode(bool &IsTopNode) override {
SUnit *SU;
if (IsTopDown) {
do {
- if (TopQ.empty()) return NULL;
+ if (TopQ.empty()) return nullptr;
SU = TopQ.top();
TopQ.pop();
} while (SU->isScheduled);
@@ -3014,7 +3167,7 @@ public:
}
else {
do {
- if (BottomQ.empty()) return NULL;
+ if (BottomQ.empty()) return nullptr;
SU = BottomQ.top();
BottomQ.pop();
} while (SU->isScheduled);
@@ -3025,12 +3178,12 @@ public:
return SU;
}
- virtual void schedNode(SUnit *SU, bool IsTopNode) {}
+ void schedNode(SUnit *SU, bool IsTopNode) override {}
- virtual void releaseTopNode(SUnit *SU) {
+ void releaseTopNode(SUnit *SU) override {
TopQ.push(SU);
}
- virtual void releaseBottomNode(SUnit *SU) {
+ void releaseBottomNode(SUnit *SU) override {
BottomQ.push(SU);
}
};
@@ -3041,7 +3194,7 @@ static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
bool TopDown = !ForceBottomUp;
assert((TopDown || !ForceTopDown) &&
"-misched-topdown incompatible with -misched-bottomup");
- return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
+ return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
}
static MachineSchedRegistry ShufflerRegistry(
"shuffle", "Shuffle machine instructions alternating directions",
@@ -3049,7 +3202,7 @@ static MachineSchedRegistry ShufflerRegistry(
#endif // !NDEBUG
//===----------------------------------------------------------------------===//
-// GraphWriter support for ScheduleDAGMI.
+// GraphWriter support for ScheduleDAGMILive.
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
@@ -3095,8 +3248,9 @@ struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
std::string Str;
raw_string_ostream SS(Str);
- const SchedDFSResult *DFS =
- static_cast<const ScheduleDAGMI*>(G)->getDFSResult();
+ const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
+ const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
+ static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
SS << "SU:" << SU->NodeNum;
if (DFS)
SS << " I:" << DFS->getNumInstrs(SU);
@@ -3106,11 +3260,11 @@ struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
return G->getGraphNodeLabel(SU);
}
- static std::string getNodeAttributes(const SUnit *N,
- const ScheduleDAG *Graph) {
+ static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
std::string Str("shape=Mrecord");
- const SchedDFSResult *DFS =
- static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult();
+ const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
+ const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
+ static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
if (DFS) {
Str += ",style=filled,fillcolor=\"#";
Str += DOT::getColorString(DFS->getSubtreeID(N));
diff --git a/contrib/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm/lib/CodeGen/MachineSink.cpp
index 105d7c2..f44e4d1 100644
--- a/contrib/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSink.cpp
@@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "machine-sink"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
@@ -32,6 +31,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "machine-sink"
+
static cl::opt<bool>
SplitEdges("machine-sink-split",
cl::desc("Split critical edges during machine sinking"),
@@ -60,9 +61,9 @@ namespace {
initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
@@ -72,7 +73,7 @@ namespace {
AU.addPreserved<MachineLoopInfo>();
}
- virtual void releaseMemory() {
+ void releaseMemory() override {
CEBCandidates.clear();
}
@@ -98,16 +99,6 @@ namespace {
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
};
-
- // SuccessorSorter - Sort Successors according to their loop depth.
- struct SuccessorSorter {
- SuccessorSorter(MachineLoopInfo *LoopInfo) : LI(LoopInfo) {}
- bool operator()(const MachineBasicBlock *LHS,
- const MachineBasicBlock *RHS) const {
- return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS);
- }
- MachineLoopInfo *LI;
- };
} // end anonymous namespace
char MachineSinking::ID = 0;
@@ -181,13 +172,12 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
// Predecessors according to CFG: BB#0 BB#1
// %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
BreakPHIEdge = true;
- for (MachineRegisterInfo::use_nodbg_iterator
- I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
- I != E; ++I) {
- MachineInstr *UseInst = &*I;
+ for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
+ MachineInstr *UseInst = MO.getParent();
+ unsigned OpNo = &MO - &UseInst->getOperand(0);
MachineBasicBlock *UseBlock = UseInst->getParent();
if (!(UseBlock == MBB && UseInst->isPHI() &&
- UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
+ UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
BreakPHIEdge = false;
break;
}
@@ -195,16 +185,15 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
if (BreakPHIEdge)
return true;
- for (MachineRegisterInfo::use_nodbg_iterator
- I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
- I != E; ++I) {
+ for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
// Determine the block of the use.
- MachineInstr *UseInst = &*I;
+ MachineInstr *UseInst = MO.getParent();
+ unsigned OpNo = &MO - &UseInst->getOperand(0);
MachineBasicBlock *UseBlock = UseInst->getParent();
if (UseInst->isPHI()) {
// PHI nodes use the operand in the predecessor block, not the block with
// the PHI.
- UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
+ UseBlock = UseInst->getOperand(OpNo+1).getMBB();
} else if (UseBlock == DefMBB) {
LocalUse = true;
return false;
@@ -219,6 +208,9 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
}
bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
DEBUG(dbgs() << "******** Machine Sinking ********\n");
const TargetMachine &TM = MF.getTarget();
@@ -341,16 +333,16 @@ MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
MachineBasicBlock *ToBB,
bool BreakPHIEdge) {
if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
- return 0;
+ return nullptr;
// Avoid breaking back edge. From == To means backedge for single BB loop.
if (!SplitEdges || FromBB == ToBB)
- return 0;
+ return nullptr;
// Check for backedges of more "complex" loops.
if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
LI->isLoopHeader(ToBB))
- return 0;
+ return nullptr;
// It's not always legal to break critical edges and sink the computation
// to the edge.
@@ -397,7 +389,7 @@ MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
if (*PI == FromBB)
continue;
if (!DT->dominates(ToBB, *PI))
- return 0;
+ return nullptr;
}
}
@@ -460,12 +452,9 @@ bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
// Check if only use in post dominated block is PHI instruction.
bool NonPHIUse = false;
- for (MachineRegisterInfo::use_nodbg_iterator
- I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
- I != E; ++I) {
- MachineInstr *UseInst = &*I;
- MachineBasicBlock *UseBlock = UseInst->getParent();
- if (UseBlock == SuccToSinkTo && !UseInst->isPHI())
+ for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
+ MachineBasicBlock *UseBlock = UseInst.getParent();
+ if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
NonPHIUse = true;
}
if (!NonPHIUse)
@@ -496,7 +485,7 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
- MachineBasicBlock *SuccToSinkTo = 0;
+ MachineBasicBlock *SuccToSinkTo = nullptr;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
@@ -510,10 +499,10 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
- return NULL;
+ return nullptr;
} else if (!MO.isDead()) {
// A def that isn't dead. We can't move it.
- return NULL;
+ return nullptr;
}
} else {
// Virtual register uses are always safe to sink.
@@ -521,7 +510,7 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// If it's not safe to move defs of the register class, then abort.
if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
- return NULL;
+ return nullptr;
// FIXME: This picks a successor to sink into based on having one
// successor that dominates all the uses. However, there are cases where
@@ -544,7 +533,7 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
bool LocalUse = false;
if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
BreakPHIEdge, LocalUse))
- return NULL;
+ return nullptr;
continue;
}
@@ -553,7 +542,12 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// we should sink to.
// We give successors with smaller loop depth higher priority.
SmallVector<MachineBasicBlock*, 4> Succs(MBB->succ_begin(), MBB->succ_end());
- std::stable_sort(Succs.begin(), Succs.end(), SuccessorSorter(LI));
+ // Sort Successors according to their loop depth.
+ std::stable_sort(
+ Succs.begin(), Succs.end(),
+ [this](const MachineBasicBlock *LHS, const MachineBasicBlock *RHS) {
+ return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS);
+ });
for (SmallVectorImpl<MachineBasicBlock *>::iterator SI = Succs.begin(),
E = Succs.end(); SI != E; ++SI) {
MachineBasicBlock *SuccBlock = *SI;
@@ -565,26 +559,26 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
}
if (LocalUse)
// Def is used locally, it's never safe to move this def.
- return NULL;
+ return nullptr;
}
// If we couldn't find a block to sink to, ignore this instruction.
- if (SuccToSinkTo == 0)
- return NULL;
- else if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo))
- return NULL;
+ if (!SuccToSinkTo)
+ return nullptr;
+ if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo))
+ return nullptr;
}
}
// It is not possible to sink an instruction into its own block. This can
// happen with loops.
if (MBB == SuccToSinkTo)
- return NULL;
+ return nullptr;
// It's not safe to sink instructions to EH landing pad. Control flow into
// landing pad is implicitly defined.
if (SuccToSinkTo && SuccToSinkTo->isLandingPad())
- return NULL;
+ return nullptr;
return SuccToSinkTo;
}
@@ -614,7 +608,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
MachineBasicBlock *SuccToSinkTo = FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge);
// If there are no outputs, it must have side-effects.
- if (SuccToSinkTo == 0)
+ if (!SuccToSinkTo)
return false;
diff --git a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index 6aa3f67..1bbf0ad 100644
--- a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -7,7 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "machine-trace-metrics"
#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SparseSet.h"
@@ -26,6 +25,8 @@
using namespace llvm;
+#define DEBUG_TYPE "machine-trace-metrics"
+
char MachineTraceMetrics::ID = 0;
char &llvm::MachineTraceMetricsID = MachineTraceMetrics::ID;
@@ -37,8 +38,9 @@ INITIALIZE_PASS_END(MachineTraceMetrics,
"machine-trace-metrics", "Machine Trace Metrics", false, true)
MachineTraceMetrics::MachineTraceMetrics()
- : MachineFunctionPass(ID), MF(0), TII(0), TRI(0), MRI(0), Loops(0) {
- std::fill(Ensembles, array_endof(Ensembles), (Ensemble*)0);
+ : MachineFunctionPass(ID), MF(nullptr), TII(nullptr), TRI(nullptr),
+ MRI(nullptr), Loops(nullptr) {
+ std::fill(std::begin(Ensembles), std::end(Ensembles), nullptr);
}
void MachineTraceMetrics::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -64,11 +66,11 @@ bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) {
}
void MachineTraceMetrics::releaseMemory() {
- MF = 0;
+ MF = nullptr;
BlockInfo.clear();
for (unsigned i = 0; i != TS_NumStrategies; ++i) {
delete Ensembles[i];
- Ensembles[i] = 0;
+ Ensembles[i] = nullptr;
}
}
@@ -95,19 +97,17 @@ MachineTraceMetrics::getResources(const MachineBasicBlock *MBB) {
unsigned PRKinds = SchedModel.getNumProcResourceKinds();
SmallVector<unsigned, 32> PRCycles(PRKinds);
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- const MachineInstr *MI = I;
- if (MI->isTransient())
+ for (const auto &MI : *MBB) {
+ if (MI.isTransient())
continue;
++InstrCount;
- if (MI->isCall())
+ if (MI.isCall())
FBI->HasCalls = true;
// Count processor resources used.
if (!SchedModel.hasInstrSchedModel())
continue;
- const MCSchedClassDesc *SC = SchedModel.resolveSchedClass(MI);
+ const MCSchedClassDesc *SC = SchedModel.resolveSchedClass(&MI);
if (!SC->isValid())
continue;
@@ -233,7 +233,7 @@ const MachineTraceMetrics::TraceBlockInfo*
MachineTraceMetrics::Ensemble::
getDepthResources(const MachineBasicBlock *MBB) const {
const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
- return TBI->hasValidDepth() ? TBI : 0;
+ return TBI->hasValidDepth() ? TBI : nullptr;
}
// Check if height resources for MBB are valid and return the TBI.
@@ -242,7 +242,7 @@ const MachineTraceMetrics::TraceBlockInfo*
MachineTraceMetrics::Ensemble::
getHeightResources(const MachineBasicBlock *MBB) const {
const TraceBlockInfo *TBI = &BlockInfo[MBB->getNumber()];
- return TBI->hasValidHeight() ? TBI : 0;
+ return TBI->hasValidHeight() ? TBI : nullptr;
}
/// Get an array of processor resource depths for MBB. Indexed by processor
@@ -302,9 +302,9 @@ static bool isExitingLoop(const MachineLoop *From, const MachineLoop *To) {
// instructions.
namespace {
class MinInstrCountEnsemble : public MachineTraceMetrics::Ensemble {
- const char *getName() const { return "MinInstr"; }
- const MachineBasicBlock *pickTracePred(const MachineBasicBlock*);
- const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*);
+ const char *getName() const override { return "MinInstr"; }
+ const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) override;
+ const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) override;
public:
MinInstrCountEnsemble(MachineTraceMetrics *mtm)
@@ -316,13 +316,13 @@ public:
const MachineBasicBlock*
MinInstrCountEnsemble::pickTracePred(const MachineBasicBlock *MBB) {
if (MBB->pred_empty())
- return 0;
+ return nullptr;
const MachineLoop *CurLoop = getLoopFor(MBB);
// Don't leave loops, and never follow back-edges.
if (CurLoop && MBB == CurLoop->getHeader())
- return 0;
+ return nullptr;
unsigned CurCount = MTM.getResources(MBB)->InstrCount;
- const MachineBasicBlock *Best = 0;
+ const MachineBasicBlock *Best = nullptr;
unsigned BestDepth = 0;
for (MachineBasicBlock::const_pred_iterator
I = MBB->pred_begin(), E = MBB->pred_end(); I != E; ++I) {
@@ -344,9 +344,9 @@ MinInstrCountEnsemble::pickTracePred(const MachineBasicBlock *MBB) {
const MachineBasicBlock*
MinInstrCountEnsemble::pickTraceSucc(const MachineBasicBlock *MBB) {
if (MBB->pred_empty())
- return 0;
+ return nullptr;
const MachineLoop *CurLoop = getLoopFor(MBB);
- const MachineBasicBlock *Best = 0;
+ const MachineBasicBlock *Best = nullptr;
unsigned BestHeight = 0;
for (MachineBasicBlock::const_succ_iterator
I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) {
@@ -568,9 +568,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
// invalidated, but their instructions will stay the same, so there is no
// need to erase the Cycle entries. They will be overwritten when we
// recompute.
- for (MachineBasicBlock::const_iterator I = BadMBB->begin(), E = BadMBB->end();
- I != E; ++I)
- Cycles.erase(I);
+ for (const auto &I : *BadMBB)
+ Cycles.erase(&I);
}
void MachineTraceMetrics::Ensemble::verify() const {
@@ -627,7 +626,7 @@ struct DataDep {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg));
MachineRegisterInfo::def_iterator DefI = MRI->def_begin(VirtReg);
assert(!DefI.atEnd() && "Register has no defs");
- DefMI = &*DefI;
+ DefMI = DefI->getParent();
DefOp = DefI.getOperandNo();
assert((++DefI).atEnd() && "Register has multiple defs");
}
@@ -690,7 +689,7 @@ struct LiveRegUnit {
unsigned getSparseSetIndex() const { return RegUnit; }
- LiveRegUnit(unsigned RU) : RegUnit(RU), Cycle(0), MI(0), Op(0) {}
+ LiveRegUnit(unsigned RU) : RegUnit(RU), Cycle(0), MI(nullptr), Op(0) {}
};
}
@@ -828,16 +827,13 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
if (TBI.HasValidInstrHeights)
TBI.CriticalPath = computeCrossBlockCriticalPath(TBI);
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- const MachineInstr *UseMI = I;
-
+ for (const auto &UseMI : *MBB) {
// Collect all data dependencies.
Deps.clear();
- if (UseMI->isPHI())
- getPHIDeps(UseMI, Deps, TBI.Pred, MTM.MRI);
- else if (getDataDeps(UseMI, Deps, MTM.MRI))
- updatePhysDepsDownwards(UseMI, Deps, RegUnits, MTM.TRI);
+ if (UseMI.isPHI())
+ getPHIDeps(&UseMI, Deps, TBI.Pred, MTM.MRI);
+ else if (getDataDeps(&UseMI, Deps, MTM.MRI))
+ updatePhysDepsDownwards(&UseMI, Deps, RegUnits, MTM.TRI);
// Filter and process dependencies, computing the earliest issue cycle.
unsigned Cycle = 0;
@@ -853,20 +849,20 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
DepCycle += MTM.SchedModel
- .computeOperandLatency(Dep.DefMI, Dep.DefOp, UseMI, Dep.UseOp);
+ .computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI, Dep.UseOp);
Cycle = std::max(Cycle, DepCycle);
}
// Remember the instruction depth.
- InstrCycles &MICycles = Cycles[UseMI];
+ InstrCycles &MICycles = Cycles[&UseMI];
MICycles.Depth = Cycle;
if (!TBI.HasValidInstrHeights) {
- DEBUG(dbgs() << Cycle << '\t' << *UseMI);
+ DEBUG(dbgs() << Cycle << '\t' << UseMI);
continue;
}
// Update critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Height);
- DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << *UseMI);
+ DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << UseMI);
}
}
}
@@ -944,7 +940,7 @@ static bool pushDepHeight(const DataDep &Dep,
// Update Heights[DefMI] to be the maximum height seen.
MIHeightMap::iterator I;
bool New;
- tie(I, New) = Heights.insert(std::make_pair(Dep.DefMI, UseHeight));
+ std::tie(I, New) = Heights.insert(std::make_pair(Dep.DefMI, UseHeight));
if (New)
return true;
@@ -1055,16 +1051,16 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
Succ = Loop->getHeader();
if (Succ) {
- for (MachineBasicBlock::const_iterator I = Succ->begin(), E = Succ->end();
- I != E && I->isPHI(); ++I) {
- const MachineInstr *PHI = I;
+ for (const auto &PHI : *Succ) {
+ if (!PHI.isPHI())
+ break;
Deps.clear();
- getPHIDeps(PHI, Deps, MBB, MTM.MRI);
+ getPHIDeps(&PHI, Deps, MBB, MTM.MRI);
if (!Deps.empty()) {
// Loop header PHI heights are all 0.
- unsigned Height = TBI.Succ ? Cycles.lookup(PHI).Height : 0;
- DEBUG(dbgs() << "pred\t" << Height << '\t' << *PHI);
- if (pushDepHeight(Deps.front(), PHI, Height,
+ unsigned Height = TBI.Succ ? Cycles.lookup(&PHI).Height : 0;
+ DEBUG(dbgs() << "pred\t" << Height << '\t' << PHI);
+ if (pushDepHeight(Deps.front(), &PHI, Height,
Heights, MTM.SchedModel, MTM.TII))
addLiveIns(Deps.front().DefMI, Deps.front().DefOp, Stack);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index d61470c..8515b0f 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -33,7 +33,6 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/BasicBlock.h"
@@ -42,6 +41,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -241,17 +241,17 @@ namespace {
static char ID; // Pass ID, replacement for typeid
const char *const Banner;
- MachineVerifierPass(const char *b = 0)
+ MachineVerifierPass(const char *b = nullptr)
: MachineFunctionPass(ID), Banner(b) {
initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
- bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
MF.verify(this, Banner);
return false;
}
@@ -273,10 +273,11 @@ void MachineFunction::verify(Pass *p, const char *Banner) const {
}
bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
- raw_ostream *OutFile = 0;
+ raw_ostream *OutFile = nullptr;
if (OutFileName) {
std::string ErrorInfo;
- OutFile = new raw_fd_ostream(OutFileName, ErrorInfo, sys::fs::F_Append);
+ OutFile = new raw_fd_ostream(OutFileName, ErrorInfo,
+ sys::fs::F_Append | sys::fs::F_Text);
if (!ErrorInfo.empty()) {
errs() << "Error opening '" << OutFileName << "': " << ErrorInfo << '\n';
exit(1);
@@ -295,10 +296,10 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
TRI = TM->getRegisterInfo();
MRI = &MF.getRegInfo();
- LiveVars = NULL;
- LiveInts = NULL;
- LiveStks = NULL;
- Indexes = NULL;
+ LiveVars = nullptr;
+ LiveInts = nullptr;
+ LiveStks = nullptr;
+ Indexes = nullptr;
if (PASS) {
LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
// We don't want to verify LiveVariables if LiveIntervals is available.
@@ -313,7 +314,7 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
MFI!=MFE; ++MFI) {
visitMachineBasicBlockBefore(MFI);
// Keep track of the current bundle header.
- const MachineInstr *CurBundle = 0;
+ const MachineInstr *CurBundle = nullptr;
// Do we expect the next instruction to be part of the same bundle?
bool InBundle = false;
@@ -468,18 +469,17 @@ void MachineVerifier::visitMachineFunctionBefore() {
// Build a set of the basic blocks in the function.
FunctionBlocks.clear();
- for (MachineFunction::const_iterator
- I = MF->begin(), E = MF->end(); I != E; ++I) {
- FunctionBlocks.insert(I);
- BBInfo &MInfo = MBBInfoMap[I];
-
- MInfo.Preds.insert(I->pred_begin(), I->pred_end());
- if (MInfo.Preds.size() != I->pred_size())
- report("MBB has duplicate entries in its predecessor list.", I);
-
- MInfo.Succs.insert(I->succ_begin(), I->succ_end());
- if (MInfo.Succs.size() != I->succ_size())
- report("MBB has duplicate entries in its successor list.", I);
+ for (const auto &MBB : *MF) {
+ FunctionBlocks.insert(&MBB);
+ BBInfo &MInfo = MBBInfoMap[&MBB];
+
+ MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
+ if (MInfo.Preds.size() != MBB.pred_size())
+ report("MBB has duplicate entries in its predecessor list.", &MBB);
+
+ MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
+ if (MInfo.Succs.size() != MBB.succ_size())
+ report("MBB has duplicate entries in its successor list.", &MBB);
}
// Check that the register use lists are sane.
@@ -500,7 +500,7 @@ static bool matchPair(MachineBasicBlock::const_succ_iterator i,
void
MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
- FirstTerminator = 0;
+ FirstTerminator = nullptr;
if (MRI->isSSA()) {
// If this block has allocatable physical registers live-in, check that
@@ -552,7 +552,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB has more than one landing pad successor", MBB);
// Call AnalyzeBranch. If it succeeds, there several more conditions to check.
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*const_cast<MachineBasicBlock *>(MBB),
TBB, FBB, Cond)) {
@@ -577,8 +577,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
- if (!MBB->empty() && getBundleStart(&MBB->back())->isBarrier() &&
- !TII->isPredicated(getBundleStart(&MBB->back()))) {
+ if (!MBB->empty() && MBB->back().isBarrier() &&
+ !TII->isPredicated(&MBB->back())) {
report("MBB exits via unconditional fall-through but ends with a "
"barrier instruction!", MBB);
}
@@ -598,10 +598,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via unconditional branch but doesn't contain "
"any instructions!", MBB);
- } else if (!getBundleStart(&MBB->back())->isBarrier()) {
+ } else if (!MBB->back().isBarrier()) {
report("MBB exits via unconditional branch but doesn't end with a "
"barrier instruction!", MBB);
- } else if (!getBundleStart(&MBB->back())->isTerminator()) {
+ } else if (!MBB->back().isTerminator()) {
report("MBB exits via unconditional branch but the branch isn't a "
"terminator instruction!", MBB);
}
@@ -629,10 +629,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via conditional branch/fall-through but doesn't "
"contain any instructions!", MBB);
- } else if (getBundleStart(&MBB->back())->isBarrier()) {
+ } else if (MBB->back().isBarrier()) {
report("MBB exits via conditional branch/fall-through but ends with a "
"barrier instruction!", MBB);
- } else if (!getBundleStart(&MBB->back())->isTerminator()) {
+ } else if (!MBB->back().isTerminator()) {
report("MBB exits via conditional branch/fall-through but the branch "
"isn't a terminator instruction!", MBB);
}
@@ -657,10 +657,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via conditional branch/branch but doesn't "
"contain any instructions!", MBB);
- } else if (!getBundleStart(&MBB->back())->isBarrier()) {
+ } else if (!MBB->back().isBarrier()) {
report("MBB exits via conditional branch/branch but doesn't end with a "
"barrier instruction!", MBB);
- } else if (!getBundleStart(&MBB->back())->isTerminator()) {
+ } else if (!MBB->back().isTerminator()) {
report("MBB exits via conditional branch/branch but the branch "
"isn't a terminator instruction!", MBB);
}
@@ -1075,7 +1075,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
// Verify SSA form.
if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
- llvm::next(MRI->def_begin(Reg)) != MRI->def_end())
+ std::next(MRI->def_begin(Reg)) != MRI->def_end())
report("Multiple virtual register defs in SSA form", MO, MONum);
// Check LiveInts for a live segment, but only for virtual registers.
@@ -1157,9 +1157,7 @@ void MachineVerifier::calcRegsPassed() {
// First push live-out regs to successors' vregsPassed. Remember the MBBs that
// have any vregsPassed.
SmallPtrSet<const MachineBasicBlock*, 8> todo;
- for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
- MFI != MFE; ++MFI) {
- const MachineBasicBlock &MBB(*MFI);
+ for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
if (!MInfo.reachable)
continue;
@@ -1194,9 +1192,7 @@ void MachineVerifier::calcRegsPassed() {
void MachineVerifier::calcRegsRequired() {
// First push live-in regs to predecessors' vregsRequired.
SmallPtrSet<const MachineBasicBlock*, 8> todo;
- for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
- MFI != MFE; ++MFI) {
- const MachineBasicBlock &MBB(*MFI);
+ for (const auto &MBB : *MF) {
BBInfo &MInfo = MBBInfoMap[&MBB];
for (MachineBasicBlock::const_pred_iterator PrI = MBB.pred_begin(),
PrE = MBB.pred_end(); PrI != PrE; ++PrI) {
@@ -1227,27 +1223,28 @@ void MachineVerifier::calcRegsRequired() {
// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) {
SmallPtrSet<const MachineBasicBlock*, 8> seen;
- for (MachineBasicBlock::const_iterator BBI = MBB->begin(), BBE = MBB->end();
- BBI != BBE && BBI->isPHI(); ++BBI) {
+ for (const auto &BBI : *MBB) {
+ if (!BBI.isPHI())
+ break;
seen.clear();
- for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
- unsigned Reg = BBI->getOperand(i).getReg();
- const MachineBasicBlock *Pre = BBI->getOperand(i + 1).getMBB();
+ for (unsigned i = 1, e = BBI.getNumOperands(); i != e; i += 2) {
+ unsigned Reg = BBI.getOperand(i).getReg();
+ const MachineBasicBlock *Pre = BBI.getOperand(i + 1).getMBB();
if (!Pre->isSuccessor(MBB))
continue;
seen.insert(Pre);
BBInfo &PrInfo = MBBInfoMap[Pre];
if (PrInfo.reachable && !PrInfo.isLiveOut(Reg))
report("PHI operand is not live-out from predecessor",
- &BBI->getOperand(i), i);
+ &BBI.getOperand(i), i);
}
// Did we see all predecessors?
for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(),
PrE = MBB->pred_end(); PrI != PrE; ++PrI) {
if (!seen.count(*PrI)) {
- report("Missing PHI operand", BBI);
+ report("Missing PHI operand", &BBI);
*OS << "BB#" << (*PrI)->getNumber()
<< " is a predecessor according to the CFG.\n";
}
@@ -1258,29 +1255,27 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) {
void MachineVerifier::visitMachineFunctionAfter() {
calcRegsPassed();
- for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
- MFI != MFE; ++MFI) {
- BBInfo &MInfo = MBBInfoMap[MFI];
+ for (const auto &MBB : *MF) {
+ BBInfo &MInfo = MBBInfoMap[&MBB];
// Skip unreachable MBBs.
if (!MInfo.reachable)
continue;
- checkPHIOps(MFI);
+ checkPHIOps(&MBB);
}
// Now check liveness info if available
calcRegsRequired();
// Check for killed virtual registers that should be live out.
- for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
- MFI != MFE; ++MFI) {
- BBInfo &MInfo = MBBInfoMap[MFI];
+ for (const auto &MBB : *MF) {
+ BBInfo &MInfo = MBBInfoMap[&MBB];
for (RegSet::iterator
I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
++I)
if (MInfo.regsKilled.count(*I)) {
- report("Virtual register killed in block, but needed live out.", MFI);
+ report("Virtual register killed in block, but needed live out.", &MBB);
*OS << "Virtual register " << PrintReg(*I)
<< " is used after the block.\n";
}
@@ -1306,20 +1301,19 @@ void MachineVerifier::verifyLiveVariables() {
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
- for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
- MFI != MFE; ++MFI) {
- BBInfo &MInfo = MBBInfoMap[MFI];
+ for (const auto &MBB : *MF) {
+ BBInfo &MInfo = MBBInfoMap[&MBB];
// Our vregsRequired should be identical to LiveVariables' AliveBlocks
if (MInfo.vregsRequired.count(Reg)) {
- if (!VI.AliveBlocks.test(MFI->getNumber())) {
- report("LiveVariables: Block missing from AliveBlocks", MFI);
+ if (!VI.AliveBlocks.test(MBB.getNumber())) {
+ report("LiveVariables: Block missing from AliveBlocks", &MBB);
*OS << "Virtual register " << PrintReg(Reg)
<< " must be live through the block.\n";
}
} else {
- if (VI.AliveBlocks.test(MFI->getNumber())) {
- report("LiveVariables: Block should not be in AliveBlocks", MFI);
+ if (VI.AliveBlocks.test(MBB.getNumber())) {
+ report("LiveVariables: Block should not be in AliveBlocks", &MBB);
*OS << "Virtual register " << PrintReg(Reg)
<< " is not needed live through the block.\n";
}
@@ -1674,32 +1668,31 @@ void MachineVerifier::verifyStackFrame() {
}
// Update stack state by checking contents of MBB.
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- if (I->getOpcode() == FrameSetupOpcode) {
+ for (const auto &I : *MBB) {
+ if (I.getOpcode() == FrameSetupOpcode) {
// The first operand of a FrameOpcode should be i32.
- int Size = I->getOperand(0).getImm();
+ int Size = I.getOperand(0).getImm();
assert(Size >= 0 &&
"Value should be non-negative in FrameSetup and FrameDestroy.\n");
if (BBState.ExitIsSetup)
- report("FrameSetup is after another FrameSetup", I);
+ report("FrameSetup is after another FrameSetup", &I);
BBState.ExitValue -= Size;
BBState.ExitIsSetup = true;
}
- if (I->getOpcode() == FrameDestroyOpcode) {
+ if (I.getOpcode() == FrameDestroyOpcode) {
// The first operand of a FrameOpcode should be i32.
- int Size = I->getOperand(0).getImm();
+ int Size = I.getOperand(0).getImm();
assert(Size >= 0 &&
"Value should be non-negative in FrameSetup and FrameDestroy.\n");
if (!BBState.ExitIsSetup)
- report("FrameDestroy is not after a FrameSetup", I);
+ report("FrameDestroy is not after a FrameSetup", &I);
int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
BBState.ExitValue;
if (BBState.ExitIsSetup && AbsSPAdj != Size) {
- report("FrameDestroy <n> is after FrameSetup <m>", I);
+ report("FrameDestroy <n> is after FrameSetup <m>", &I);
*OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
<< AbsSPAdj << ">.\n";
}
diff --git a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
index 3982612..95a2934 100644
--- a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
+++ b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "phi-opt"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -23,6 +22,8 @@
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "phi-opt"
+
STATISTIC(NumPHICycles, "Number of PHI cycles replaced");
STATISTIC(NumDeadPHICycles, "Number of dead PHI cycles");
@@ -37,9 +38,9 @@ namespace {
initializeOptimizePHIsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -61,6 +62,9 @@ INITIALIZE_PASS(OptimizePHIs, "opt-phis",
"Optimize machine instruction PHIs", false, false)
bool OptimizePHIs::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipOptnoneFunction(*Fn.getFunction()))
+ return false;
+
MRI = &Fn.getRegInfo();
TII = Fn.getTarget().getInstrInfo();
@@ -139,10 +143,8 @@ bool OptimizePHIs::IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle) {
if (PHIsInCycle.size() == 16)
return false;
- for (MachineRegisterInfo::use_iterator I = MRI->use_begin(DstReg),
- E = MRI->use_end(); I != E; ++I) {
- MachineInstr *UseMI = &*I;
- if (!UseMI->isPHI() || !IsDeadPHICycle(UseMI, PHIsInCycle))
+ for (MachineInstr &UseMI : MRI->use_instructions(DstReg)) {
+ if (!UseMI.isPHI() || !IsDeadPHICycle(&UseMI, PHIsInCycle))
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/PHIElimination.cpp b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
index dcd9072..c8d0819 100644
--- a/contrib/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "phielim"
#include "llvm/CodeGen/Passes.h"
#include "PHIEliminationUtils.h"
#include "llvm/ADT/STLExtras.h"
@@ -35,6 +34,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "phielim"
+
static cl::opt<bool>
DisableEdgeSplitting("disable-phi-elim-edge-splitting", cl::init(false),
cl::Hidden, cl::desc("Disable critical edge splitting "
@@ -57,8 +58,8 @@ namespace {
initializePHIEliminationPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &Fn);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
/// EliminatePHINodes - Eliminate phi nodes by inserting copy instructions
@@ -186,7 +187,7 @@ bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
// Get an iterator to the first instruction after the last PHI node (this may
// also be the end of the basic block).
MachineBasicBlock::iterator LastPHIIt =
- prior(MBB.SkipPHIsAndLabels(MBB.begin()));
+ std::prev(MBB.SkipPHIsAndLabels(MBB.begin()));
while (MBB.front().isPHI())
LowerPHINode(MBB, LastPHIIt);
@@ -198,9 +199,8 @@ bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
/// This includes registers with no defs.
static bool isImplicitlyDefined(unsigned VirtReg,
const MachineRegisterInfo *MRI) {
- for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(VirtReg),
- DE = MRI->def_end(); DI != DE; ++DI)
- if (!DI->isImplicitDef())
+ for (MachineInstr &DI : MRI->def_instructions(VirtReg))
+ if (!DI.isImplicitDef())
return false;
return true;
}
@@ -222,7 +222,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
MachineBasicBlock::iterator LastPHIIt) {
++NumLowered;
- MachineBasicBlock::iterator AfterPHIsIt = llvm::next(LastPHIIt);
+ MachineBasicBlock::iterator AfterPHIsIt = std::next(LastPHIIt);
// Unlink the PHI node from the basic block, but don't delete the PHI yet.
MachineInstr *MPhi = MBB.remove(MBB.begin());
@@ -267,7 +267,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
// Update live variable information if there is any.
if (LV) {
- MachineInstr *PHICopy = prior(AfterPHIsIt);
+ MachineInstr *PHICopy = std::prev(AfterPHIsIt);
if (IncomingReg) {
LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg);
@@ -306,7 +306,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
// Update LiveIntervals for the new copy or implicit def.
if (LIS) {
- MachineInstr *NewInstr = prior(AfterPHIsIt);
+ MachineInstr *NewInstr = std::prev(AfterPHIsIt);
SlotIndex DestCopyIndex = LIS->InsertMachineInstrInMaps(NewInstr);
SlotIndex MBBStartIndex = LIS->getMBBStartIdx(&MBB);
@@ -378,7 +378,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
findPHICopyInsertPoint(&opBlock, &MBB, SrcReg);
// Insert the copy.
- MachineInstr *NewSrcInstr = 0;
+ MachineInstr *NewSrcInstr = nullptr;
if (!reusedIncoming && IncomingReg) {
if (SrcUndef) {
// The source register is undefined, so there is no need for a real
@@ -444,7 +444,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
}
} else {
// We just inserted this copy.
- KillInst = prior(InsertPos);
+ KillInst = std::prev(InsertPos);
}
}
assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction");
@@ -504,7 +504,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
}
} else {
// We just inserted this copy.
- KillInst = prior(InsertPos);
+ KillInst = std::prev(InsertPos);
}
}
assert(KillInst->readsRegister(SrcReg) &&
@@ -532,13 +532,14 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
/// used later to determine when the vreg is killed in the BB.
///
void PHIElimination::analyzePHINodes(const MachineFunction& MF) {
- for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
- I != E; ++I)
- for (MachineBasicBlock::const_iterator BBI = I->begin(), BBE = I->end();
- BBI != BBE && BBI->isPHI(); ++BBI)
- for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
- ++VRegPHIUseCount[BBVRegPair(BBI->getOperand(i+1).getMBB()->getNumber(),
- BBI->getOperand(i).getReg())];
+ for (const auto &MBB : MF)
+ for (const auto &BBI : MBB) {
+ if (!BBI.isPHI())
+ break;
+ for (unsigned i = 1, e = BBI.getNumOperands(); i != e; i += 2)
+ ++VRegPHIUseCount[BBVRegPair(BBI.getOperand(i+1).getMBB()->getNumber(),
+ BBI.getOperand(i).getReg())];
+ }
}
bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
@@ -547,7 +548,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
return false; // Quick exit for basic blocks without PHIs.
- const MachineLoop *CurLoop = MLI ? MLI->getLoopFor(&MBB) : 0;
+ const MachineLoop *CurLoop = MLI ? MLI->getLoopFor(&MBB) : nullptr;
bool IsLoopHeader = CurLoop && &MBB == CurLoop->getHeader();
bool Changed = false;
@@ -564,7 +565,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
// out-of-line blocks into the loop which is very bad for code placement.
if (PreMBB == &MBB && !SplitAllCriticalEdges)
continue;
- const MachineLoop *PreLoop = MLI ? MLI->getLoopFor(PreMBB) : 0;
+ const MachineLoop *PreLoop = MLI ? MLI->getLoopFor(PreMBB) : nullptr;
if (IsLoopHeader && PreLoop == CurLoop && !SplitAllCriticalEdges)
continue;
@@ -607,7 +608,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
if (!ShouldSplit)
continue;
if (!PreMBB->SplitCriticalEdge(&MBB, this)) {
- DEBUG(dbgs() << "Failed to split ciritcal edge.\n");
+ DEBUG(dbgs() << "Failed to split critical edge.\n");
continue;
}
Changed = true;
diff --git a/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp b/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp
index e1b56e9..99bbad1 100644
--- a/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp
+++ b/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp
@@ -34,11 +34,9 @@ llvm::findPHICopyInsertPoint(MachineBasicBlock* MBB, MachineBasicBlock* SuccMBB,
// Discover any defs/uses in this basic block.
SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
MachineRegisterInfo& MRI = MBB->getParent()->getRegInfo();
- for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(SrcReg),
- RE = MRI.reg_end(); RI != RE; ++RI) {
- MachineInstr* DefUseMI = &*RI;
- if (DefUseMI->getParent() == MBB)
- DefUsesInMBB.insert(DefUseMI);
+ for (MachineInstr &RI : MRI.reg_instructions(SrcReg)) {
+ if (RI.getParent() == MBB)
+ DefUsesInMBB.insert(&RI);
}
MachineBasicBlock::iterator InsertPoint;
diff --git a/contrib/llvm/lib/CodeGen/Passes.cpp b/contrib/llvm/lib/CodeGen/Passes.cpp
index f4ffd03..249b2d0 100644
--- a/contrib/llvm/lib/CodeGen/Passes.cpp
+++ b/contrib/llvm/lib/CodeGen/Passes.cpp
@@ -14,11 +14,11 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
@@ -56,7 +56,7 @@ static cl::opt<cl::boolOrDefault>
OptimizeRegAlloc("optimize-regalloc", cl::Hidden,
cl::desc("Enable optimized register allocation compilation path."));
static cl::opt<cl::boolOrDefault>
-EnableMachineSched("enable-misched", cl::Hidden,
+EnableMachineSched("enable-misched",
cl::desc("Enable the machine instruction scheduling pass."));
static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
cl::Hidden,
@@ -65,6 +65,8 @@ static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
cl::desc("Disable Machine Sinking"));
static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
cl::desc("Disable Loop Strength Reduction Pass"));
+static cl::opt<bool> DisableConstantHoisting("disable-constant-hoisting",
+ cl::Hidden, cl::desc("Disable ConstantHoisting"));
static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
cl::desc("Disable Codegen Prepare"));
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
@@ -77,12 +79,20 @@ static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
cl::desc("Dump garbage collector data"));
static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
cl::desc("Verify generated machine code"),
- cl::init(getenv("LLVM_VERIFY_MACHINEINSTRS")!=NULL));
+ cl::init(getenv("LLVM_VERIFY_MACHINEINSTRS")!=nullptr));
static cl::opt<std::string>
PrintMachineInstrs("print-machineinstrs", cl::ValueOptional,
cl::desc("Print machine instrs"),
cl::value_desc("pass-name"), cl::init("option-unspecified"));
+// Temporary option to allow experimenting with MachineScheduler as a post-RA
+// scheduler. Targets can "properly" enable this with
+// substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); Ideally it
+// wouldn't be part of the standard pass pipeline, and the target would just add
+// a PostRA scheduling pass wherever it wants.
+static cl::opt<bool> MISchedPostRA("misched-postra", cl::Hidden,
+ cl::desc("Run MachineScheduler post regalloc (independent of preRA sched)"));
+
// Experimental option to run live interval analysis early.
static cl::opt<bool> EarlyLiveIntervals("early-live-intervals", cl::Hidden,
cl::desc("Run live interval analysis earlier in the pipeline"));
@@ -111,7 +121,7 @@ static IdentifyingPassPtr applyOverride(IdentifyingPassPtr TargetID,
case cl::BOU_TRUE:
if (TargetID.isValid())
return TargetID;
- if (StandardID == 0)
+ if (StandardID == nullptr)
report_fatal_error("Target cannot enable pass");
return StandardID;
case cl::BOU_FALSE:
@@ -217,8 +227,8 @@ TargetPassConfig::~TargetPassConfig() {
// Out of line constructor provides default values for pass options and
// registers all common codegen passes.
TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
- : ImmutablePass(ID), PM(&pm), StartAfter(0), StopAfter(0),
- Started(true), Stopped(false), TM(tm), Impl(0), Initialized(false),
+ : ImmutablePass(ID), PM(&pm), StartAfter(nullptr), StopAfter(nullptr),
+ Started(true), Stopped(false), TM(tm), Impl(nullptr), Initialized(false),
DisableVerify(false),
EnableTailMerge(true) {
@@ -259,7 +269,7 @@ TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
}
TargetPassConfig::TargetPassConfig()
- : ImmutablePass(ID), PM(0) {
+ : ImmutablePass(ID), PM(nullptr) {
llvm_unreachable("TargetPassConfig should not be constructed on-the-fly");
}
@@ -317,7 +327,7 @@ AnalysisID TargetPassConfig::addPass(AnalysisID PassID) {
IdentifyingPassPtr TargetID = getPassSubstitution(PassID);
IdentifyingPassPtr FinalPtr = overridePass(PassID, TargetID);
if (!FinalPtr.isValid())
- return 0;
+ return nullptr;
Pass *P;
if (FinalPtr.isInstance())
@@ -369,20 +379,26 @@ void TargetPassConfig::addIRPasses() {
// Before running any passes, run the verifier to determine if the input
// coming from the front-end and/or optimizer is valid.
- if (!DisableVerify)
+ if (!DisableVerify) {
addPass(createVerifierPass());
+ addPass(createDebugInfoVerifierPass());
+ }
// Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
addPass(createLoopStrengthReducePass());
if (PrintLSR)
- addPass(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
+ addPass(createPrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
}
addPass(createGCLoweringPass());
// Make sure that no unreachable blocks are instruction selected.
addPass(createUnreachableBlockEliminationPass());
+
+ // Prepare expensive constants for SelectionDAG.
+ if (getOptLevel() != CodeGenOpt::None && !DisableConstantHoisting)
+ addPass(createConstantHoistingPass());
}
/// Turn exception handling constructs into something the code generators can
@@ -400,11 +416,11 @@ void TargetPassConfig::addPassesToHandleExceptions() {
// FALLTHROUGH
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
- case ExceptionHandling::Win64:
+ case ExceptionHandling::WinEH:
addPass(createDwarfEHPass(TM));
break;
case ExceptionHandling::None:
- addPass(createLowerInvokePass(TM));
+ addPass(createLowerInvokePass());
// The lower invoke pass may create unreachable code. Remove it.
addPass(createUnreachableBlockEliminationPass());
@@ -422,14 +438,19 @@ void TargetPassConfig::addCodeGenPrepare() {
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
void TargetPassConfig::addISelPrepare() {
- addPass(createStackProtectorPass(TM));
-
addPreISel();
+ // Need to verify DebugInfo *before* creating the stack protector analysis.
+ // It's a function pass, and verifying between it and its users causes a
+ // crash.
+ if (!DisableVerify)
+ addPass(createDebugInfoVerifierPass());
+
+ addPass(createStackProtectorPass(TM));
+
if (PrintISelInput)
- addPass(createPrintFunctionPass("\n\n"
- "*** Final LLVM Code input to ISel ***\n",
- &dbgs()));
+ addPass(createPrintFunctionPass(
+ dbgs(), "\n\n*** Final LLVM Code input to ISel ***\n"));
// All passes which modify the LLVM IR are now complete; run the verifier
// to ensure that the IR is valid.
@@ -520,7 +541,10 @@ void TargetPassConfig::addMachinePasses() {
// Second pass scheduler.
if (getOptLevel() != CodeGenOpt::None) {
- addPass(&PostRASchedulerID);
+ if (MISchedPostRA)
+ addPass(&PostMachineSchedulerID);
+ else
+ addPass(&PostRASchedulerID);
printAndVerify("After PostRAScheduler");
}
@@ -536,6 +560,8 @@ void TargetPassConfig::addMachinePasses() {
if (addPreEmitPass())
printAndVerify("After PreEmit passes");
+
+ addPass(&StackMapLivenessID);
}
/// Add passes that optimize machine instructions in SSA form.
@@ -596,7 +622,7 @@ MachinePassRegistry RegisterRegAlloc::Registry;
/// A dummy default pass factory indicates whether the register allocator is
/// overridden on the command line.
-static FunctionPass *useDefaultRegisterAllocator() { return 0; }
+static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
static RegisterRegAlloc
defaultRegAlloc("default",
"pick register allocator based on -O option",
@@ -725,7 +751,10 @@ void TargetPassConfig::addMachineLateOptimization() {
printAndVerify("After BranchFolding");
// Tail duplication.
- if (addPass(&TailDuplicateID))
+ // Note that duplicating tail just increases code size and degrades
+ // performance for targets that require Structured Control Flow.
+ // In addition it can also make CFG irreducible. Thus we disable it.
+ if (!TM->requiresStructuredCFG() && addPass(&TailDuplicateID))
printAndVerify("After TailDuplicate");
// Copy propagation.
diff --git a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 28f2d2f..716cb1f 100644
--- a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -66,7 +66,6 @@
// C = copy A <-- same-bank copy
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "peephole-opt"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -81,6 +80,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "peephole-opt"
+
// Optimize Extensions
static cl::opt<bool>
Aggressive("aggressive-ext-opt", cl::Hidden,
@@ -90,6 +91,10 @@ static cl::opt<bool>
DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
cl::desc("Disable the peephole optimizer"));
+static cl::opt<bool>
+DisableAdvCopyOpt("disable-adv-copy-opt", cl::Hidden, cl::init(true),
+ cl::desc("Disable advanced copy optimization"));
+
STATISTIC(NumReuse, "Number of extension results reused");
STATISTIC(NumCmps, "Number of compares eliminated");
STATISTIC(NumImmFold, "Number of move immediate folded");
@@ -110,9 +115,9 @@ namespace {
initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
if (Aggressive) {
@@ -133,7 +138,107 @@ namespace {
bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
- bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg);
+ bool isLoadFoldable(MachineInstr *MI,
+ SmallSet<unsigned, 16> &FoldAsLoadDefCandidates);
+ };
+
+ /// \brief Helper class to track the possible sources of a value defined by
+ /// a (chain of) copy related instructions.
+ /// Given a definition (instruction and definition index), this class
+ /// follows the use-def chain to find successive suitable sources.
+ /// The given source can be used to rewrite the definition into
+ /// def = COPY src.
+ ///
+ /// For instance, let us consider the following snippet:
+ /// v0 =
+ /// v2 = INSERT_SUBREG v1, v0, sub0
+ /// def = COPY v2.sub0
+ ///
+ /// Using a ValueTracker for def = COPY v2.sub0 will give the following
+ /// suitable sources:
+ /// v2.sub0 and v0.
+ /// Then, def can be rewritten into def = COPY v0.
+ class ValueTracker {
+ private:
+ /// The current point into the use-def chain.
+ const MachineInstr *Def;
+ /// The index of the definition in Def.
+ unsigned DefIdx;
+ /// The sub register index of the definition.
+ unsigned DefSubReg;
+ /// The register where the value can be found.
+ unsigned Reg;
+ /// Specifiy whether or not the value tracking looks through
+ /// complex instructions. When this is false, the value tracker
+ /// bails on everything that is not a copy or a bitcast.
+ ///
+ /// Note: This could have been implemented as a specialized version of
+ /// the ValueTracker class but that would have complicated the code of
+ /// the users of this class.
+ bool UseAdvancedTracking;
+ /// Optional MachineRegisterInfo used to perform some complex
+ /// tracking.
+ const MachineRegisterInfo *MRI;
+
+ /// \brief Dispatcher to the right underlying implementation of
+ /// getNextSource.
+ bool getNextSourceImpl(unsigned &SrcIdx, unsigned &SrcSubReg);
+ /// \brief Specialized version of getNextSource for Copy instructions.
+ bool getNextSourceFromCopy(unsigned &SrcIdx, unsigned &SrcSubReg);
+ /// \brief Specialized version of getNextSource for Bitcast instructions.
+ bool getNextSourceFromBitcast(unsigned &SrcIdx, unsigned &SrcSubReg);
+ /// \brief Specialized version of getNextSource for RegSequence
+ /// instructions.
+ bool getNextSourceFromRegSequence(unsigned &SrcIdx, unsigned &SrcSubReg);
+ /// \brief Specialized version of getNextSource for InsertSubreg
+ /// instructions.
+ bool getNextSourceFromInsertSubreg(unsigned &SrcIdx, unsigned &SrcSubReg);
+ /// \brief Specialized version of getNextSource for ExtractSubreg
+ /// instructions.
+ bool getNextSourceFromExtractSubreg(unsigned &SrcIdx, unsigned &SrcSubReg);
+ /// \brief Specialized version of getNextSource for SubregToReg
+ /// instructions.
+ bool getNextSourceFromSubregToReg(unsigned &SrcIdx, unsigned &SrcSubReg);
+
+ public:
+ /// \brief Create a ValueTracker instance for the value defines by \p MI
+ /// at the operand index \p DefIdx.
+ /// \p DefSubReg represents the sub register index the value tracker will
+ /// track. It does not need to match the sub register index used in \p MI.
+ /// \p UseAdvancedTracking specifies whether or not the value tracker looks
+ /// through complex instructions. By default (false), it handles only copy
+ /// and bitcast instructions.
+ /// \p MRI useful to perform some complex checks.
+ ValueTracker(const MachineInstr &MI, unsigned DefIdx, unsigned DefSubReg,
+ bool UseAdvancedTracking = false,
+ const MachineRegisterInfo *MRI = nullptr)
+ : Def(&MI), DefIdx(DefIdx), DefSubReg(DefSubReg),
+ UseAdvancedTracking(UseAdvancedTracking), MRI(MRI) {
+ assert(Def->getOperand(DefIdx).isDef() &&
+ Def->getOperand(DefIdx).isReg() &&
+ "Definition does not match machine instruction");
+ // Initially the value is in the defined register.
+ Reg = Def->getOperand(DefIdx).getReg();
+ }
+
+ /// \brief Following the use-def chain, get the next available source
+ /// for the tracked value.
+ /// When the returned value is not nullptr, getReg() gives the register
+ /// that contain the tracked value.
+ /// \note The sub register index returned in \p SrcSubReg must be used
+ /// on that getReg() to access the actual value.
+ /// \return Unless the returned value is nullptr (i.e., no source found),
+ /// \p SrcIdx gives the index of the next source in the returned
+ /// instruction and \p SrcSubReg the index to be used on that source to
+ /// get the tracked value. When nullptr is returned, no alternative source
+ /// has been found.
+ const MachineInstr *getNextSource(unsigned &SrcIdx, unsigned &SrcSubReg);
+
+ /// \brief Get the last register where the initial value can be found.
+ /// Initially this is the register of the definition.
+ /// Then, after each successful call to getNextSource, this is the
+ /// register of the last source.
+ unsigned getReg() const { return Reg; }
};
}
@@ -182,15 +287,13 @@ optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
// If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
// SrcReg:SubIdx should be replaced.
bool UseSrcSubIdx = TM->getRegisterInfo()->
- getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;
+ getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != nullptr;
// The source has other uses. See if we can replace the other uses with use of
// the result of the extension.
SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
- UI != UE; ++UI)
- ReachedBBs.insert(UI->getParent());
+ for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
+ ReachedBBs.insert(UI.getParent());
// Uses that are in the same BB of uses of the result of the instruction.
SmallVector<MachineOperand*, 8> Uses;
@@ -199,11 +302,8 @@ optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallVector<MachineOperand*, 8> ExtendedUses;
bool ExtendLife = true;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
- UI != UE; ++UI) {
- MachineOperand &UseMO = UI.getOperand();
- MachineInstr *UseMI = &*UI;
+ for (MachineOperand &UseMO : MRI->use_nodbg_operands(SrcReg)) {
+ MachineInstr *UseMI = UseMO.getParent();
if (UseMI == MI)
continue;
@@ -270,11 +370,9 @@ optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
// Look for PHI uses of the extended result, we don't want to extend the
// liveness of a PHI input. It breaks all kinds of assumptions down
// stream. A PHI use is expected to be the kill of its source values.
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
- UI != UE; ++UI)
- if (UI->isPHI())
- PHIBBs.insert(UI->getParent());
+ for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
+ if (UI.isPHI())
+ PHIBBs.insert(UI.getParent());
const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
@@ -364,7 +462,7 @@ static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
unsigned SrcIdx, DefIdx;
if (SrcSubReg && DefSubReg)
return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
- SrcIdx, DefIdx) != NULL;
+ SrcIdx, DefIdx) != nullptr;
// At most one of the register is a sub register, make it Src to avoid
// duplicating the test.
if (!SrcSubReg) {
@@ -374,9 +472,9 @@ static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
// One of the register is a sub register, check if we can get a superclass.
if (SrcSubReg)
- return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != NULL;
+ return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
// Plain copy.
- return TRI.getCommonSubClass(DefRC, SrcRC) != NULL;
+ return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
}
/// \brief Get the index of the definition and source for \p Copy
@@ -448,31 +546,32 @@ bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) {
unsigned Src;
unsigned SrcSubReg;
bool ShouldRewrite = false;
- MachineInstr *Copy = MI;
const TargetRegisterInfo &TRI = *TM->getRegisterInfo();
- // Follow the chain of copies until we reach the top or find a
- // more suitable source.
+ // Follow the chain of copies until we reach the top of the use-def chain
+ // or find a more suitable source.
+ ValueTracker ValTracker(*MI, DefIdx, DefSubReg, !DisableAdvCopyOpt, MRI);
do {
- unsigned CopyDefIdx, CopySrcIdx;
- if (!getCopyOrBitcastDefUseIdx(*Copy, CopyDefIdx, CopySrcIdx))
+ unsigned CopySrcIdx, CopySrcSubReg;
+ if (!ValTracker.getNextSource(CopySrcIdx, CopySrcSubReg))
break;
- const MachineOperand &MO = Copy->getOperand(CopySrcIdx);
- assert(MO.isReg() && "Copies must be between registers.");
- Src = MO.getReg();
-
+ Src = ValTracker.getReg();
+ SrcSubReg = CopySrcSubReg;
+
+ // Do not extend the live-ranges of physical registers as they add
+ // constraints to the register allocator.
+ // Moreover, if we want to extend the live-range of a physical register,
+ // unlike SSA virtual register, we will have to check that they are not
+ // redefine before the related use.
if (TargetRegisterInfo::isPhysicalRegister(Src))
break;
const TargetRegisterClass *SrcRC = MRI->getRegClass(Src);
- SrcSubReg = MO.getSubReg();
// If this source does not incur a cross register bank copy, use it.
ShouldRewrite = shareSameRegisterFile(TRI, DefRC, DefSubReg, SrcRC,
SrcSubReg);
- // Follow the chain of copies: get the definition of Src.
- Copy = MRI->getVRegDef(Src);
- } while (!ShouldRewrite && Copy && (Copy->isCopy() || Copy->isBitcast()));
+ } while (!ShouldRewrite);
// If we did not find a more suitable source, there is nothing to optimize.
if (!ShouldRewrite || Src == MI->getOperand(SrcIdx).getReg())
@@ -488,6 +587,9 @@ bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) {
MRI->replaceRegWith(Def, NewVR);
MRI->clearKillFlags(NewVR);
+ // We extended the lifetime of Src.
+ // Clear the kill flags to account for that.
+ MRI->clearKillFlags(Src);
MI->eraseFromParent();
++NumCopiesBitcasts;
return true;
@@ -496,8 +598,9 @@ bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) {
/// isLoadFoldable - Check whether MI is a candidate for folding into a later
/// instruction. We only fold loads to virtual registers and the virtual
/// register defined has a single use.
-bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
- unsigned &FoldAsLoadDefReg) {
+bool PeepholeOptimizer::isLoadFoldable(
+ MachineInstr *MI,
+ SmallSet<unsigned, 16> &FoldAsLoadDefCandidates) {
if (!MI->canFoldAsLoad() || !MI->mayLoad())
return false;
const MCInstrDesc &MCID = MI->getDesc();
@@ -505,13 +608,13 @@ bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
return false;
unsigned Reg = MI->getOperand(0).getReg();
- // To reduce compilation time, we check MRI->hasOneUse when inserting
+ // To reduce compilation time, we check MRI->hasOneNonDBGUse when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
if (!MI->getOperand(0).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(Reg) &&
- MRI->hasOneUse(Reg)) {
- FoldAsLoadDefReg = Reg;
+ MRI->hasOneNonDBGUse(Reg)) {
+ FoldAsLoadDefCandidates.insert(Reg);
return true;
}
return false;
@@ -561,6 +664,9 @@ bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
}
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
@@ -570,22 +676,18 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
- DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
+ DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;
bool Changed = false;
- SmallPtrSet<MachineInstr*, 8> LocalMIs;
- SmallSet<unsigned, 4> ImmDefRegs;
- DenseMap<unsigned, MachineInstr*> ImmDefMIs;
- unsigned FoldAsLoadDefReg;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
bool SeenMoveImm = false;
- LocalMIs.clear();
- ImmDefRegs.clear();
- ImmDefMIs.clear();
- FoldAsLoadDefReg = 0;
+ SmallPtrSet<MachineInstr*, 8> LocalMIs;
+ SmallSet<unsigned, 4> ImmDefRegs;
+ DenseMap<unsigned, MachineInstr*> ImmDefMIs;
+ SmallSet<unsigned, 16> FoldAsLoadDefCandidates;
for (MachineBasicBlock::iterator
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
@@ -594,16 +696,20 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
++MII;
LocalMIs.insert(MI);
+ // Skip debug values. They should not affect this peephole optimization.
+ if (MI->isDebugValue())
+ continue;
+
// If there exists an instruction which belongs to the following
- // categories, we will discard the load candidate.
- if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
- MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
+ // categories, we will discard the load candidates.
+ if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() ||
+ MI->isKill() || MI->isInlineAsm() ||
MI->hasUnmodeledSideEffects()) {
- FoldAsLoadDefReg = 0;
+ FoldAsLoadDefCandidates.clear();
continue;
}
if (MI->mayStore() || MI->isCall())
- FoldAsLoadDefReg = 0;
+ FoldAsLoadDefCandidates.clear();
if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) ||
(MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
@@ -630,26 +736,43 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
// Check whether MI is a load candidate for folding into a later
// instruction. If MI is not a candidate, check whether we can fold an
// earlier load into MI.
- if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
- // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
- // can enable folding by converting SUB to CMP.
- MachineInstr *DefMI = 0;
- MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
- FoldAsLoadDefReg, DefMI);
- if (FoldMI) {
- // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
- DEBUG(dbgs() << "Replacing: " << *MI);
- DEBUG(dbgs() << " With: " << *FoldMI);
- LocalMIs.erase(MI);
- LocalMIs.erase(DefMI);
- LocalMIs.insert(FoldMI);
- MI->eraseFromParent();
- DefMI->eraseFromParent();
- ++NumLoadFold;
-
- // MI is replaced with FoldMI.
- Changed = true;
- continue;
+ if (!isLoadFoldable(MI, FoldAsLoadDefCandidates) &&
+ !FoldAsLoadDefCandidates.empty()) {
+ const MCInstrDesc &MIDesc = MI->getDesc();
+ for (unsigned i = MIDesc.getNumDefs(); i != MIDesc.getNumOperands();
+ ++i) {
+ const MachineOperand &MOp = MI->getOperand(i);
+ if (!MOp.isReg())
+ continue;
+ unsigned FoldAsLoadDefReg = MOp.getReg();
+ if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
+ // We need to fold load after optimizeCmpInstr, since
+ // optimizeCmpInstr can enable folding by converting SUB to CMP.
+ // Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
+ // we need it for markUsesInDebugValueAsUndef().
+ unsigned FoldedReg = FoldAsLoadDefReg;
+ MachineInstr *DefMI = nullptr;
+ MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
+ FoldAsLoadDefReg,
+ DefMI);
+ if (FoldMI) {
+ // Update LocalMIs since we replaced MI with FoldMI and deleted
+ // DefMI.
+ DEBUG(dbgs() << "Replacing: " << *MI);
+ DEBUG(dbgs() << " With: " << *FoldMI);
+ LocalMIs.erase(MI);
+ LocalMIs.erase(DefMI);
+ LocalMIs.insert(FoldMI);
+ MI->eraseFromParent();
+ DefMI->eraseFromParent();
+ MRI->markUsesInDebugValueAsUndef(FoldedReg);
+ FoldAsLoadDefCandidates.erase(FoldedReg);
+ ++NumLoadFold;
+ // MI is replaced with FoldMI.
+ Changed = true;
+ break;
+ }
+ }
}
}
}
@@ -657,3 +780,251 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
return Changed;
}
+
+bool ValueTracker::getNextSourceFromCopy(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ assert(Def->isCopy() && "Invalid definition");
+ // Copy instruction are supposed to be: Def = Src.
+ // If someone breaks this assumption, bad things will happen everywhere.
+ assert(Def->getDesc().getNumOperands() == 2 && "Invalid number of operands");
+
+ if (Def->getOperand(DefIdx).getSubReg() != DefSubReg)
+ // If we look for a different subreg, it means we want a subreg of src.
+ // Bails as we do not support composing subreg yet.
+ return false;
+ // Otherwise, we want the whole source.
+ SrcIdx = 1;
+ SrcSubReg = Def->getOperand(SrcIdx).getSubReg();
+ return true;
+}
+
+bool ValueTracker::getNextSourceFromBitcast(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ assert(Def->isBitcast() && "Invalid definition");
+
+ // Bail if there are effects that a plain copy will not expose.
+ if (Def->hasUnmodeledSideEffects())
+ return false;
+
+ // Bitcasts with more than one def are not supported.
+ if (Def->getDesc().getNumDefs() != 1)
+ return false;
+ if (Def->getOperand(DefIdx).getSubReg() != DefSubReg)
+ // If we look for a different subreg, it means we want a subreg of the src.
+ // Bails as we do not support composing subreg yet.
+ return false;
+
+ SrcIdx = Def->getDesc().getNumOperands();
+ for (unsigned OpIdx = DefIdx + 1, EndOpIdx = SrcIdx; OpIdx != EndOpIdx;
+ ++OpIdx) {
+ const MachineOperand &MO = Def->getOperand(OpIdx);
+ if (!MO.isReg() || !MO.getReg())
+ continue;
+ assert(!MO.isDef() && "We should have skipped all the definitions by now");
+ if (SrcIdx != EndOpIdx)
+ // Multiple sources?
+ return false;
+ SrcIdx = OpIdx;
+ }
+ SrcSubReg = Def->getOperand(SrcIdx).getSubReg();
+ return true;
+}
+
+bool ValueTracker::getNextSourceFromRegSequence(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ assert(Def->isRegSequence() && "Invalid definition");
+
+ if (Def->getOperand(DefIdx).getSubReg())
+ // If we are composing subreg, bails out.
+ // The case we are checking is Def.<subreg> = REG_SEQUENCE.
+ // This should almost never happen as the SSA property is tracked at
+ // the register level (as opposed to the subreg level).
+ // I.e.,
+ // Def.sub0 =
+ // Def.sub1 =
+ // is a valid SSA representation for Def.sub0 and Def.sub1, but not for
+ // Def. Thus, it must not be generated.
+ // However, some code could theoretically generates a single
+ // Def.sub0 (i.e, not defining the other subregs) and we would
+ // have this case.
+ // If we can ascertain (or force) that this never happens, we could
+ // turn that into an assertion.
+ return false;
+
+ // We are looking at:
+ // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
+ // Check if one of the operand defines the subreg we are interested in.
+ for (unsigned OpIdx = DefIdx + 1, EndOpIdx = Def->getNumOperands();
+ OpIdx != EndOpIdx; OpIdx += 2) {
+ const MachineOperand &MOSubIdx = Def->getOperand(OpIdx + 1);
+ assert(MOSubIdx.isImm() &&
+ "One of the subindex of the reg_sequence is not an immediate");
+ if (MOSubIdx.getImm() == DefSubReg) {
+ assert(Def->getOperand(OpIdx).isReg() &&
+ "One of the source of the reg_sequence is not a register");
+ SrcIdx = OpIdx;
+ SrcSubReg = Def->getOperand(SrcIdx).getSubReg();
+ return true;
+ }
+ }
+
+ // If the subreg we are tracking is super-defined by another subreg,
+ // we could follow this value. However, this would require to compose
+ // the subreg and we do not do that for now.
+ return false;
+}
+
+bool ValueTracker::getNextSourceFromInsertSubreg(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ assert(Def->isInsertSubreg() && "Invalid definition");
+ if (Def->getOperand(DefIdx).getSubReg())
+ // If we are composing subreg, bails out.
+ // Same remark as getNextSourceFromRegSequence.
+ // I.e., this may be turned into an assert.
+ return false;
+
+ // We are looking at:
+ // Def = INSERT_SUBREG v0, v1, sub1
+ // There are two cases:
+ // 1. DefSubReg == sub1, get v1.
+ // 2. DefSubReg != sub1, the value may be available through v0.
+
+ // #1 Check if the inserted register matches the require sub index.
+ unsigned InsertedSubReg = Def->getOperand(3).getImm();
+ if (InsertedSubReg == DefSubReg) {
+ SrcIdx = 2;
+ SrcSubReg = Def->getOperand(SrcIdx).getSubReg();
+ return true;
+ }
+ // #2 Otherwise, if the sub register we are looking for is not partial
+ // defined by the inserted element, we can look through the main
+ // register (v0).
+ // To check the overlapping we need a MRI and a TRI.
+ if (!MRI)
+ return false;
+
+ const MachineOperand &MODef = Def->getOperand(DefIdx);
+ const MachineOperand &MOBase = Def->getOperand(1);
+ // If the result register (Def) and the base register (v0) do not
+ // have the same register class or if we have to compose
+ // subregisters, bails out.
+ if (MRI->getRegClass(MODef.getReg()) != MRI->getRegClass(MOBase.getReg()) ||
+ MOBase.getSubReg())
+ return false;
+
+ // Get the TRI and check if inserted sub register overlaps with the
+ // sub register we are tracking.
+ const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+ if (!TRI ||
+ (TRI->getSubRegIndexLaneMask(DefSubReg) &
+ TRI->getSubRegIndexLaneMask(InsertedSubReg)) != 0)
+ return false;
+ // At this point, the value is available in v0 via the same subreg
+ // we used for Def.
+ SrcIdx = 1;
+ SrcSubReg = DefSubReg;
+ return true;
+}
+
+bool ValueTracker::getNextSourceFromExtractSubreg(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ assert(Def->isExtractSubreg() && "Invalid definition");
+ // We are looking at:
+ // Def = EXTRACT_SUBREG v0, sub0
+
+ // Bails if we have to compose sub registers.
+ // Indeed, if DefSubReg != 0, we would have to compose it with sub0.
+ if (DefSubReg)
+ return false;
+
+ // Bails if we have to compose sub registers.
+ // Likewise, if v0.subreg != 0, we would have to compose v0.subreg with sub0.
+ if (Def->getOperand(1).getSubReg())
+ return false;
+ // Otherwise, the value is available in the v0.sub0.
+ SrcIdx = 1;
+ SrcSubReg = Def->getOperand(2).getImm();
+ return true;
+}
+
+bool ValueTracker::getNextSourceFromSubregToReg(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ assert(Def->isSubregToReg() && "Invalid definition");
+ // We are looking at:
+ // Def = SUBREG_TO_REG Imm, v0, sub0
+
+ // Bails if we have to compose sub registers.
+ // If DefSubReg != sub0, we would have to check that all the bits
+ // we track are included in sub0 and if yes, we would have to
+ // determine the right subreg in v0.
+ if (DefSubReg != Def->getOperand(3).getImm())
+ return false;
+ // Bails if we have to compose sub registers.
+ // Likewise, if v0.subreg != 0, we would have to compose it with sub0.
+ if (Def->getOperand(2).getSubReg())
+ return false;
+
+ SrcIdx = 2;
+ SrcSubReg = Def->getOperand(3).getImm();
+ return true;
+}
+
+bool ValueTracker::getNextSourceImpl(unsigned &SrcIdx, unsigned &SrcSubReg) {
+ assert(Def && "This method needs a valid definition");
+
+ assert(
+ (DefIdx < Def->getDesc().getNumDefs() || Def->getDesc().isVariadic()) &&
+ Def->getOperand(DefIdx).isDef() && "Invalid DefIdx");
+ if (Def->isCopy())
+ return getNextSourceFromCopy(SrcIdx, SrcSubReg);
+ if (Def->isBitcast())
+ return getNextSourceFromBitcast(SrcIdx, SrcSubReg);
+ // All the remaining cases involve "complex" instructions.
+ // Bails if we did not ask for the advanced tracking.
+ if (!UseAdvancedTracking)
+ return false;
+ if (Def->isRegSequence())
+ return getNextSourceFromRegSequence(SrcIdx, SrcSubReg);
+ if (Def->isInsertSubreg())
+ return getNextSourceFromInsertSubreg(SrcIdx, SrcSubReg);
+ if (Def->isExtractSubreg())
+ return getNextSourceFromExtractSubreg(SrcIdx, SrcSubReg);
+ if (Def->isSubregToReg())
+ return getNextSourceFromSubregToReg(SrcIdx, SrcSubReg);
+ return false;
+}
+
+const MachineInstr *ValueTracker::getNextSource(unsigned &SrcIdx,
+ unsigned &SrcSubReg) {
+ // If we reach a point where we cannot move up in the use-def chain,
+ // there is nothing we can get.
+ if (!Def)
+ return nullptr;
+
+ const MachineInstr *PrevDef = nullptr;
+ // Try to find the next source.
+ if (getNextSourceImpl(SrcIdx, SrcSubReg)) {
+ // Update definition, definition index, and subregister for the
+ // next call of getNextSource.
+ const MachineOperand &MO = Def->getOperand(SrcIdx);
+ assert(MO.isReg() && !MO.isDef() && "Source is invalid");
+ // Update the current register.
+ Reg = MO.getReg();
+ // Update the return value before moving up in the use-def chain.
+ PrevDef = Def;
+ // If we can still move up in the use-def chain, move to the next
+ // defintion.
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ Def = MRI->getVRegDef(Reg);
+ DefIdx = MRI->def_begin(Reg).getOperandNo();
+ DefSubReg = SrcSubReg;
+ return PrevDef;
+ }
+ }
+ // If we end up here, this means we will not be able to find another source
+ // for the next iteration.
+ // Make sure any new call to getNextSource bails out early by cutting the
+ // use-def chain.
+ Def = nullptr;
+ return PrevDef;
+}
diff --git a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
index 1afc1ec..a1ab344 100644
--- a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -18,7 +18,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "post-RA-sched"
#include "llvm/CodeGen/Passes.h"
#include "AggressiveAntiDepBreaker.h"
#include "AntiDepBreaker.h"
@@ -30,7 +29,6 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
@@ -48,6 +46,8 @@
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "post-RA-sched"
+
STATISTIC(NumNoops, "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
@@ -86,7 +86,7 @@ namespace {
static char ID;
PostRAScheduler() : MachineFunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetPassConfig>();
@@ -97,7 +97,12 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ bool enablePostRAScheduler(
+ const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode &Mode,
+ TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
};
char PostRAScheduler::ID = 0;
@@ -121,9 +126,6 @@ namespace {
/// AA - AliasAnalysis for making memory reference queries.
AliasAnalysis *AA;
- /// LiveRegs - true if the register is live.
- BitVector LiveRegs;
-
/// The schedule. Null SUnit*'s represent noop instructions.
std::vector<SUnit*> Sequence;
@@ -145,23 +147,23 @@ namespace {
/// startBlock - Initialize register live-range state for scheduling in
/// this block.
///
- void startBlock(MachineBasicBlock *BB);
+ void startBlock(MachineBasicBlock *BB) override;
// Set the index of RegionEnd within the current BB.
void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
/// Initialize the scheduler state for the next scheduling region.
- virtual void enterRegion(MachineBasicBlock *bb,
- MachineBasicBlock::iterator begin,
- MachineBasicBlock::iterator end,
- unsigned regioninstrs);
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) override;
/// Notify that the scheduler has finished scheduling the current region.
- virtual void exitRegion();
+ void exitRegion() override;
/// Schedule - Schedule the instruction range using list scheduling.
///
- void schedule();
+ void schedule() override;
void EmitSchedule();
@@ -172,26 +174,16 @@ namespace {
/// finishBlock - Clean up register live-range state.
///
- void finishBlock();
-
- /// FixupKills - Fix register kill flags that have been made
- /// invalid due to scheduling
- ///
- void FixupKills(MachineBasicBlock *MBB);
+ void finishBlock() override;
private:
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
- void StartBlockForKills(MachineBasicBlock *BB);
-
- // ToggleKillFlag - Toggle a register operand kill flag. Other
- // adjustments may be made to the instruction if necessary. Return
- // true if the operand has been deleted, false if not.
- bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
void dumpSchedule() const;
+ void emitNoop(unsigned CurCycle);
};
}
@@ -205,9 +197,8 @@ SchedulePostRATDList::SchedulePostRATDList(
AliasAnalysis *AA, const RegisterClassInfo &RCI,
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs)
- : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA),
- LiveRegs(TRI->getNumRegs()), EndIndex(0)
-{
+ : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), EndIndex(0) {
+
const TargetMachine &TM = MF.getTarget();
const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
HazardRec =
@@ -220,7 +211,7 @@ SchedulePostRATDList::SchedulePostRATDList(
((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
- (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL));
+ (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
}
SchedulePostRATDList::~SchedulePostRATDList() {
@@ -259,7 +250,21 @@ void SchedulePostRATDList::dumpSchedule() const {
}
#endif
+bool PostRAScheduler::enablePostRAScheduler(
+ const TargetSubtargetInfo &ST,
+ CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode &Mode,
+ TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
+ Mode = ST.getAntiDepBreakMode();
+ ST.getCriticalPathRCs(CriticalPathRCs);
+ return ST.enablePostMachineScheduler() &&
+ OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
+}
+
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipOptnoneFunction(*Fn.getFunction()))
+ return false;
+
TII = Fn.getTarget().getInstrInfo();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
@@ -278,9 +283,10 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
} else {
// Check that post-RA scheduling is enabled for this target.
// This may upgrade the AntiDepMode.
- const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>();
- if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode,
- CriticalPathRCs))
+ const TargetSubtargetInfo &ST =
+ Fn.getTarget().getSubtarget<TargetSubtargetInfo>();
+ if (!enablePostRAScheduler(ST, PassConfig->getOptLevel(),
+ AntiDepMode, CriticalPathRCs))
return false;
}
@@ -320,7 +326,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::iterator Current = MBB->end();
unsigned Count = MBB->size(), CurrentCount = Count;
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
- MachineInstr *MI = llvm::prior(I);
+ MachineInstr *MI = std::prev(I);
--Count;
// Calls are not scheduling boundaries before register allocation, but
// post-ra we don't gain anything by scheduling across calls since we
@@ -352,7 +358,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
Scheduler.finishBlock();
// Update register kills
- Scheduler.FixupKills(MBB);
+ Scheduler.fixupKills(MBB);
}
return true;
@@ -367,7 +373,7 @@ void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
// Reset the hazard recognizer and anti-dep breaker.
HazardRec->Reset();
- if (AntiDepBreak != NULL)
+ if (AntiDepBreak)
AntiDepBreak->StartBlock(BB);
}
@@ -377,7 +383,7 @@ void SchedulePostRATDList::schedule() {
// Build the scheduling graph.
buildSchedGraph(AA);
- if (AntiDepBreak != NULL) {
+ if (AntiDepBreak) {
unsigned Broken =
AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
EndIndex, DbgValues);
@@ -409,162 +415,20 @@ void SchedulePostRATDList::schedule() {
/// instruction, which will not be scheduled.
///
void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
- if (AntiDepBreak != NULL)
+ if (AntiDepBreak)
AntiDepBreak->Observe(MI, Count, EndIndex);
}
/// FinishBlock - Clean up register live-range state.
///
void SchedulePostRATDList::finishBlock() {
- if (AntiDepBreak != NULL)
+ if (AntiDepBreak)
AntiDepBreak->FinishBlock();
// Call the superclass.
ScheduleDAGInstrs::finishBlock();
}
-/// StartBlockForKills - Initialize register live-range state for updating kills
-///
-void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
- // Start with no live registers.
- LiveRegs.reset();
-
- // Examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
- SE = BB->succ_end(); SI != SE; ++SI) {
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
- E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- // Repeat, for reg and all subregs.
- for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
- SubRegs.isValid(); ++SubRegs)
- LiveRegs.set(*SubRegs);
- }
- }
-}
-
-bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
- MachineOperand &MO) {
- // Setting kill flag...
- if (!MO.isKill()) {
- MO.setIsKill(true);
- return false;
- }
-
- // If MO itself is live, clear the kill flag...
- if (LiveRegs.test(MO.getReg())) {
- MO.setIsKill(false);
- return false;
- }
-
- // If any subreg of MO is live, then create an imp-def for that
- // subreg and keep MO marked as killed.
- MO.setIsKill(false);
- bool AllDead = true;
- const unsigned SuperReg = MO.getReg();
- MachineInstrBuilder MIB(MF, MI);
- for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
- if (LiveRegs.test(*SubRegs)) {
- MIB.addReg(*SubRegs, RegState::ImplicitDefine);
- AllDead = false;
- }
- }
-
- if(AllDead)
- MO.setIsKill(true);
- return false;
-}
-
-/// FixupKills - Fix the register kill flags, they may have been made
-/// incorrect by instruction reordering.
-///
-void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
-
- BitVector killedRegs(TRI->getNumRegs());
-
- StartBlockForKills(MBB);
-
- // Examine block from end to start...
- unsigned Count = MBB->size();
- for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
- I != E; --Count) {
- MachineInstr *MI = --I;
- if (MI->isDebugValue())
- continue;
-
- // Update liveness. Registers that are defed but not used in this
- // instruction are now dead. Mark register and all subregs as they
- // are completely defined.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isRegMask())
- LiveRegs.clearBitsNotInMask(MO.getRegMask());
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isDef()) continue;
- // Ignore two-addr defs.
- if (MI->isRegTiedToUseOperand(i)) continue;
-
- // Repeat for reg and all subregs.
- for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
- SubRegs.isValid(); ++SubRegs)
- LiveRegs.reset(*SubRegs);
- }
-
- // Examine all used registers and set/clear kill flag. When a
- // register is used multiple times we only set the kill flag on
- // the first use. Don't set kill flags on undef operands.
- killedRegs.reset();
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
- unsigned Reg = MO.getReg();
- if ((Reg == 0) || MRI.isReserved(Reg)) continue;
-
- bool kill = false;
- if (!killedRegs.test(Reg)) {
- kill = true;
- // A register is not killed if any subregs are live...
- for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
- if (LiveRegs.test(*SubRegs)) {
- kill = false;
- break;
- }
- }
-
- // If subreg is not live, then register is killed if it became
- // live in this instruction
- if (kill)
- kill = !LiveRegs.test(Reg);
- }
-
- if (MO.isKill() != kill) {
- DEBUG(dbgs() << "Fixing " << MO << " in ");
- // Warning: ToggleKillFlag may invalidate MO.
- ToggleKillFlag(MI, MO);
- DEBUG(MI->dump());
- }
-
- killedRegs.set(Reg);
- }
-
- // Mark any used register (that is not using undef) and subregs as
- // now live...
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
- unsigned Reg = MO.getReg();
- if ((Reg == 0) || MRI.isReserved(Reg)) continue;
-
- for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
- SubRegs.isValid(); ++SubRegs)
- LiveRegs.set(*SubRegs);
- }
- }
-}
-
//===----------------------------------------------------------------------===//
// Top-Down Scheduling
//===----------------------------------------------------------------------===//
@@ -583,7 +447,7 @@ void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
#endif
--SuccSU->NumPredsLeft;
@@ -630,6 +494,14 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
AvailableQueue.scheduledNode(SU);
}
+/// emitNoop - Add a noop to the current instruction sequence.
+void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
+ DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
+ HazardRec->EmitNoop();
+ Sequence.push_back(nullptr); // NULL here means noop
+ ++NumNoops;
+}
+
/// ListScheduleTopDown - The main loop of list scheduling for top-down
/// schedulers.
void SchedulePostRATDList::ListScheduleTopDown() {
@@ -678,7 +550,7 @@ void SchedulePostRATDList::ListScheduleTopDown() {
DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
- SUnit *FoundSUnit = 0;
+ SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
bool HasNoopHazards = false;
while (!AvailableQueue.empty()) {
SUnit *CurSUnit = AvailableQueue.pop();
@@ -686,8 +558,19 @@ void SchedulePostRATDList::ListScheduleTopDown() {
ScheduleHazardRecognizer::HazardType HT =
HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
if (HT == ScheduleHazardRecognizer::NoHazard) {
- FoundSUnit = CurSUnit;
- break;
+ if (HazardRec->ShouldPreferAnother(CurSUnit)) {
+ if (!NotPreferredSUnit) {
+ // If this is the first non-preferred node for this cycle, then
+ // record it and continue searching for a preferred node. If this
+ // is not the first non-preferred node, then treat it as though
+ // there had been a hazard.
+ NotPreferredSUnit = CurSUnit;
+ continue;
+ }
+ } else {
+ FoundSUnit = CurSUnit;
+ break;
+ }
}
// Remember if this is a noop hazard.
@@ -696,6 +579,20 @@ void SchedulePostRATDList::ListScheduleTopDown() {
NotReady.push_back(CurSUnit);
}
+ // If we have a non-preferred node, push it back onto the available list.
+ // If we did not find a preferred node, then schedule this first
+ // non-preferred node.
+ if (NotPreferredSUnit) {
+ if (!FoundSUnit) {
+ DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n");
+ FoundSUnit = NotPreferredSUnit;
+ } else {
+ AvailableQueue.push(NotPreferredSUnit);
+ }
+
+ NotPreferredSUnit = nullptr;
+ }
+
// Add the nodes that aren't ready back onto the available list.
if (!NotReady.empty()) {
AvailableQueue.push_all(NotReady);
@@ -704,6 +601,11 @@ void SchedulePostRATDList::ListScheduleTopDown() {
// If we found a node to schedule...
if (FoundSUnit) {
+ // If we need to emit noops prior to this instruction, then do so.
+ unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
+ for (unsigned i = 0; i != NumPreNoops; ++i)
+ emitNoop(CurCycle);
+
// ... schedule the node...
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
@@ -728,10 +630,7 @@ void SchedulePostRATDList::ListScheduleTopDown() {
// Otherwise, we have no instructions to issue and we have instructions
// that will fault if we don't do this right. This is the case for
// processors without pipeline interlocks and other cases.
- DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
- HazardRec->EmitNoop();
- Sequence.push_back(0); // NULL here means noop
- ++NumNoops;
+ emitNoop(CurCycle);
}
++CurCycle;
@@ -769,17 +668,17 @@ void SchedulePostRATDList::EmitSchedule() {
// Update the Begin iterator, as the first instruction in the block
// may have been scheduled later.
if (i == 0)
- RegionBegin = prior(RegionEnd);
+ RegionBegin = std::prev(RegionEnd);
}
// Reinsert any remaining debug_values.
for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
- std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
+ std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
MachineInstr *DbgValue = P.first;
MachineBasicBlock::iterator OrigPrivMI = P.second;
BB->splice(++OrigPrivMI, BB, DbgValue);
}
DbgValues.clear();
- FirstDbgValue = NULL;
+ FirstDbgValue = nullptr;
}
diff --git a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index 0c5173a..3129927 100644
--- a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "processimplicitdefs"
-
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -21,6 +19,8 @@
using namespace llvm;
+#define DEBUG_TYPE "processimplicitdefs"
+
namespace {
/// Process IMPLICIT_DEF instructions and make sure there is one implicit_def
/// for each use. Add isUndef marker to implicit_def defs and their uses.
@@ -41,9 +41,9 @@ public:
initializeProcessImplicitDefsPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &au) const;
+ void getAnalysisUsage(AnalysisUsage &au) const override;
- virtual bool runOnMachineFunction(MachineFunction &fn);
+ bool runOnMachineFunction(MachineFunction &fn) override;
};
} // end anonymous namespace
@@ -80,10 +80,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
// For virtual registers, mark all uses as <undef>, and convert users to
// implicit-def when possible.
- for (MachineRegisterInfo::use_nodbg_iterator UI =
- MRI->use_nodbg_begin(Reg),
- UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineOperand &MO = UI.getOperand();
+ for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
MO.setIsUndef();
MachineInstr *UserMI = MO.getParent();
if (!canTurnIntoImplicitDef(UserMI))
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index b0e494f..b98d210 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -16,10 +16,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pei"
#include "PrologEpilogInserter.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -29,7 +29,10 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/CodeGen/StackProtector.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -42,6 +45,8 @@
using namespace llvm;
+#define DEBUG_TYPE "pei"
+
char PEI::ID = 0;
char &llvm::PrologEpilogCodeInserterID = PEI::ID;
@@ -54,6 +59,7 @@ INITIALIZE_PASS_BEGIN(PEI, "prologepilog",
"Prologue/Epilogue Insertion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(StackProtector)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(PEI, "prologepilog",
"Prologue/Epilogue Insertion & Frame Finalization",
@@ -67,6 +73,7 @@ void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreserved<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<StackProtector>();
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -95,6 +102,9 @@ void PEI::calculateSets(MachineFunction &Fn) {
return;
}
+/// StackObjSet - A set of stack object indexes
+typedef SmallSetVector<int, 8> StackObjSet;
+
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
///
@@ -105,7 +115,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
assert(!Fn.getRegInfo().getNumVirtRegs() && "Regalloc must assign all vregs");
- RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : NULL;
+ RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
// Calculate the MaxCallFrameSize and AdjustsStack variables for the
@@ -150,7 +160,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
replaceFrameIndices(Fn);
// If register scavenging is needed, as we've enabled doing it as a
- // post-pass, scavenge the virtual registers that frame index elimiation
+ // post-pass, scavenge the virtual registers that frame index elimination
// inserted.
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging)
scavengeFrameVirtualRegs(Fn);
@@ -160,10 +170,11 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// Warn on stack size when we exceeds the given limit.
MachineFrameInfo *MFI = Fn.getFrameInfo();
- if (WarnStackSize.getNumOccurrences() > 0 &&
- WarnStackSize < MFI->getStackSize())
- errs() << "warning: Stack size limit exceeded (" << MFI->getStackSize()
- << ") in " << Fn.getName() << ".\n";
+ uint64_t StackSize = MFI->getStackSize();
+ if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) {
+ DiagnosticInfoStackSize DiagStackSize(*F, StackSize);
+ F->getContext().diagnose(DiagStackSize);
+ }
delete RS;
ReturnBlocks.clear();
@@ -233,14 +244,14 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &F) {
MachineFrameInfo *MFI = F.getFrameInfo();
// Get the callee saved register list...
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&F);
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F);
// These are used to keep track the callee-save area. Initialize them.
MinCSFrameIndex = INT_MAX;
MaxCSFrameIndex = 0;
// Early exit for targets which have no callee saved registers.
- if (CSRegs == 0 || CSRegs[0] == 0)
+ if (!CSRegs || CSRegs[0] == 0)
return;
// In Naked functions we aren't going to save any registers.
@@ -257,51 +268,56 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &F) {
}
}
- if (CSI.empty())
- return; // Early exit if no callee saved registers are modified!
+ if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) {
+ // If target doesn't implement this, use generic code.
- unsigned NumFixedSpillSlots;
- const TargetFrameLowering::SpillSlot *FixedSpillSlots =
- TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
+ if (CSI.empty())
+ return; // Early exit if no callee saved registers are modified!
- // Now that we know which registers need to be saved and restored, allocate
- // stack slots for them.
- for (std::vector<CalleeSavedInfo>::iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- unsigned Reg = I->getReg();
- const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
+ unsigned NumFixedSpillSlots;
+ const TargetFrameLowering::SpillSlot *FixedSpillSlots =
+ TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
- int FrameIdx;
- if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
- I->setFrameIdx(FrameIdx);
- continue;
- }
+ // Now that we know which registers need to be saved and restored, allocate
+ // stack slots for them.
+ for (std::vector<CalleeSavedInfo>::iterator I = CSI.begin(), E = CSI.end();
+ I != E; ++I) {
+ unsigned Reg = I->getReg();
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
- // Check to see if this physreg must be spilled to a particular stack slot
- // on this target.
- const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
- while (FixedSlot != FixedSpillSlots+NumFixedSpillSlots &&
- FixedSlot->Reg != Reg)
- ++FixedSlot;
-
- if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
- // Nope, just spill it anywhere convenient.
- unsigned Align = RC->getAlignment();
- unsigned StackAlign = TFI->getStackAlignment();
-
- // We may not be able to satisfy the desired alignment specification of
- // the TargetRegisterClass if the stack alignment is smaller. Use the
- // min.
- Align = std::min(Align, StackAlign);
- FrameIdx = MFI->CreateStackObject(RC->getSize(), Align, true);
- if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
- if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
- } else {
- // Spill it to the stack where we must.
- FrameIdx = MFI->CreateFixedObject(RC->getSize(), FixedSlot->Offset, true);
- }
+ int FrameIdx;
+ if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
+ I->setFrameIdx(FrameIdx);
+ continue;
+ }
+
+ // Check to see if this physreg must be spilled to a particular stack slot
+ // on this target.
+ const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
+ while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots &&
+ FixedSlot->Reg != Reg)
+ ++FixedSlot;
+
+ if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
+ // Nope, just spill it anywhere convenient.
+ unsigned Align = RC->getAlignment();
+ unsigned StackAlign = TFI->getStackAlignment();
+
+ // We may not be able to satisfy the desired alignment specification of
+ // the TargetRegisterClass if the stack alignment is smaller. Use the
+ // min.
+ Align = std::min(Align, StackAlign);
+ FrameIdx = MFI->CreateStackObject(RC->getSize(), Align, true);
+ if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
+ if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
+ } else {
+ // Spill it to the stack where we must.
+ FrameIdx =
+ MFI->CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset);
+ }
- I->setFrameIdx(FrameIdx);
+ I->setFrameIdx(FrameIdx);
+ }
}
MFI->setCalleeSavedInfo(CSI);
@@ -409,11 +425,28 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
}
}
+/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
+/// those required to be close to the Stack Protector) to stack offsets.
+static void
+AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
+ SmallSet<int, 16> &ProtectedObjs,
+ MachineFrameInfo *MFI, bool StackGrowsDown,
+ int64_t &Offset, unsigned &MaxAlign) {
+
+ for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
+ E = UnassignedObjs.end(); I != E; ++I) {
+ int i = *I;
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
+ ProtectedObjs.insert(i);
+ }
+}
+
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
+ StackProtector *SP = &getAnalysis<StackProtector>();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
@@ -523,8 +556,12 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Make sure that the stack protector comes before the local variables on the
// stack.
- SmallSet<int, 16> LargeStackObjs;
+ SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
+ StackObjSet LargeArrayObjs;
+ StackObjSet SmallArrayObjs;
+ StackObjSet AddrOfObjs;
+
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
@@ -541,12 +578,29 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
- if (!MFI->MayNeedStackProtector(i))
- continue;
- AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
- LargeStackObjs.insert(i);
+ switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
+ case StackProtector::SSPLK_None:
+ continue;
+ case StackProtector::SSPLK_SmallArray:
+ SmallArrayObjs.insert(i);
+ continue;
+ case StackProtector::SSPLK_AddrOf:
+ AddrOfObjs.insert(i);
+ continue;
+ case StackProtector::SSPLK_LargeArray:
+ LargeArrayObjs.insert(i);
+ continue;
+ }
+ llvm_unreachable("Unexpected SSPLayoutKind.");
}
+
+ AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
+ AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
+ AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
+ Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
@@ -563,7 +617,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
- if (LargeStackObjs.count(i))
+ if (ProtectedObjs.count(i))
continue;
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
@@ -632,7 +686,7 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
// we've been asked for it. This, when linked with a runtime with support
// for segmented stacks (libgcc is one), will result in allocating stack
// space in small chunks instead of one large contiguous block.
- if (Fn.getTarget().Options.EnableSegmentedStacks)
+ if (Fn.shouldSplitStack())
TFI.adjustForSegmentedStacks(Fn);
// Emit additional code that is required to explicitly handle the stack in
@@ -711,14 +765,14 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
SPAdj += Size;
MachineBasicBlock::iterator PrevI = BB->end();
- if (I != BB->begin()) PrevI = prior(I);
+ if (I != BB->begin()) PrevI = std::prev(I);
TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
// Visit the instructions created by eliminateCallFramePseudoInstr().
if (PrevI == BB->end())
I = BB->begin(); // The replaced instr was the first in the block.
else
- I = llvm::next(PrevI);
+ I = std::next(PrevI);
continue;
}
@@ -757,7 +811,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
// use that target machine register info object to eliminate
// it.
TRI.eliminateFrameIndex(MI, SPAdj, i,
- FrameIndexVirtualScavenging ? NULL : RS);
+ FrameIndexVirtualScavenging ? nullptr : RS);
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
@@ -765,7 +819,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
DoIncr = false;
}
- MI = 0;
+ MI = nullptr;
break;
}
@@ -797,13 +851,14 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// We might end up here again with a NULL iterator if we scavenged a
// register for which we inserted spill code for definition by what was
// originally the first instruction in BB.
- if (I == MachineBasicBlock::iterator(NULL))
+ if (I == MachineBasicBlock::iterator(nullptr))
I = BB->begin();
MachineInstr *MI = I;
- MachineBasicBlock::iterator J = llvm::next(I);
- MachineBasicBlock::iterator P = I == BB->begin() ?
- MachineBasicBlock::iterator(NULL) : llvm::prior(I);
+ MachineBasicBlock::iterator J = std::next(I);
+ MachineBasicBlock::iterator P =
+ I == BB->begin() ? MachineBasicBlock::iterator(nullptr)
+ : std::prev(I);
// RS should process this instruction before we might scavenge at this
// location. This is because we might be replacing a virtual register
@@ -846,7 +901,7 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// spill code will have been inserted in between I and J. This is a
// problem because we need the spill code before I: Move I to just
// prior to J.
- if (I != llvm::prior(J)) {
+ if (I != std::prev(J)) {
BB->splice(J, BB, I);
// Before we move I, we need to prepare the RS to visit I again.
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
index 77cfa2b..5a6d39a 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
@@ -37,12 +37,12 @@ namespace llvm {
initializePEIPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
///
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
private:
RegScavenger *RS;
diff --git a/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp b/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
index 8564911..12b2c90 100644
--- a/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
+++ b/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
@@ -58,13 +58,9 @@ static const char *const PSVNames[] = {
"ConstantPool"
};
-// FIXME: THIS IS A HACK!!!!
-// Eventually these should be uniqued on LLVMContext rather than in a managed
-// static. For now, we can safely use the global context for the time being to
-// squeak by.
-PseudoSourceValue::PseudoSourceValue(enum ValueTy Subclass) :
- Value(Type::getInt8PtrTy(getGlobalContext()),
- Subclass) {}
+PseudoSourceValue::PseudoSourceValue(bool isFixed) : isFixed(isFixed) {}
+
+PseudoSourceValue::~PseudoSourceValue() {}
void PseudoSourceValue::printCustom(raw_ostream &O) const {
O << PSVNames[this - PSVGlobals->PSVs];
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.cpp b/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
index 293e306..894aee7 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
@@ -7,12 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the RegAllocBase class which provides comon functionality
+// This file defines the RegAllocBase class which provides common functionality
// for LiveIntervalUnion-based register allocators.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "llvm/ADT/Statistic.h"
@@ -35,6 +34,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumNewQueued , "Number of new live ranges queued");
// Temporary verification option until we can put verification inside
@@ -101,8 +102,8 @@ void RegAllocBase::allocatePhysRegs() {
// register if possible and populate a list of new live intervals that
// result from splitting.
DEBUG(dbgs() << "\nselectOrSplit "
- << MRI->getRegClass(VirtReg->reg)->getName()
- << ':' << *VirtReg << '\n');
+ << MRI->getRegClass(VirtReg->reg)->getName()
+ << ':' << *VirtReg << " w=" << VirtReg->weight << '\n');
typedef SmallVector<unsigned, 4> VirtRegVec;
VirtRegVec SplitVRegs;
unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs);
@@ -110,11 +111,16 @@ void RegAllocBase::allocatePhysRegs() {
if (AvailablePhysReg == ~0u) {
// selectOrSplit failed to find a register!
// Probably caused by an inline asm.
- MachineInstr *MI;
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(VirtReg->reg);
- (MI = I.skipInstruction());)
- if (MI->isInlineAsm())
+ MachineInstr *MI = nullptr;
+ for (MachineRegisterInfo::reg_instr_iterator
+ I = MRI->reg_instr_begin(VirtReg->reg), E = MRI->reg_instr_end();
+ I != E; ) {
+ MachineInstr *TmpMI = &*(I++);
+ if (TmpMI->isInlineAsm()) {
+ MI = TmpMI;
break;
+ }
+ }
if (MI)
MI->emitError("inline assembly requires more registers than available");
else
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.h b/contrib/llvm/lib/CodeGen/RegAllocBase.h
index c17a8d9..b333c36 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBase.h
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.h
@@ -37,7 +37,6 @@
#ifndef LLVM_CODEGEN_REGALLOCBASE
#define LLVM_CODEGEN_REGALLOCBASE
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
@@ -66,7 +65,8 @@ protected:
LiveRegMatrix *Matrix;
RegisterClassInfo RegClassInfo;
- RegAllocBase(): TRI(0), MRI(0), VRM(0), LIS(0), Matrix(0) {}
+ RegAllocBase()
+ : TRI(nullptr), MRI(nullptr), VRM(nullptr), LIS(nullptr), Matrix(nullptr) {}
virtual ~RegAllocBase() {}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
index 6768e45..6bc678e8 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/Passes.h"
#include "AllocationOrder.h"
#include "LiveDebugVariables.h"
@@ -41,6 +40,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
static RegisterRegAlloc basicRegAlloc("basic", "basic register allocator",
createBasicRegisterAllocator);
@@ -64,7 +65,7 @@ class RABasic : public MachineFunctionPass, public RegAllocBase
MachineFunction *MF;
// state
- OwningPtr<Spiller> SpillerInstance;
+ std::unique_ptr<Spiller> SpillerInstance;
std::priority_queue<LiveInterval*, std::vector<LiveInterval*>,
CompSpillWeight> Queue;
@@ -76,36 +77,34 @@ public:
RABasic();
/// Return the pass name.
- virtual const char* getPassName() const {
+ const char* getPassName() const override {
return "Basic Register Allocator";
}
/// RABasic analysis usage.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-
- virtual void releaseMemory();
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- virtual Spiller &spiller() { return *SpillerInstance; }
+ void releaseMemory() override;
- virtual float getPriority(LiveInterval *LI) { return LI->weight; }
+ Spiller &spiller() override { return *SpillerInstance; }
- virtual void enqueue(LiveInterval *LI) {
+ void enqueue(LiveInterval *LI) override {
Queue.push(LI);
}
- virtual LiveInterval *dequeue() {
+ LiveInterval *dequeue() override {
if (Queue.empty())
- return 0;
+ return nullptr;
LiveInterval *LI = Queue.top();
Queue.pop();
return LI;
}
- virtual unsigned selectOrSplit(LiveInterval &VirtReg,
- SmallVectorImpl<unsigned> &SplitVRegs);
+ unsigned selectOrSplit(LiveInterval &VirtReg,
+ SmallVectorImpl<unsigned> &SplitVRegs) override;
/// Perform register allocation.
- virtual bool runOnMachineFunction(MachineFunction &mf);
+ bool runOnMachineFunction(MachineFunction &mf) override;
// Helper for spilling all live virtual registers currently unified under preg
// that interfere with the most recently queried lvr. Return true if spilling
@@ -158,7 +157,7 @@ void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
}
void RABasic::releaseMemory() {
- SpillerInstance.reset(0);
+ SpillerInstance.reset();
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index e92dbd2..97b9f76 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
@@ -38,6 +37,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumStores, "Number of stores added");
STATISTIC(NumLoads , "Number of loads added");
STATISTIC(NumCopies, "Number of copies coalesced");
@@ -75,7 +76,7 @@ namespace {
bool Dirty; // Register needs spill.
explicit LiveReg(unsigned v)
- : LastUse(0), VirtReg(v), PhysReg(0), LastOpNum(0), Dirty(false) {}
+ : LastUse(nullptr), VirtReg(v), PhysReg(0), LastOpNum(0), Dirty(false){}
unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
@@ -144,23 +145,23 @@ namespace {
// not be erased.
bool isBulkSpilling;
- enum LLVM_ENUM_INT_TYPE(unsigned) {
+ enum : unsigned {
spillClean = 1,
spillDirty = 100,
spillImpossible = ~0u
};
public:
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Fast Register Allocator";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
private:
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
void AllocateBasicBlock();
void handleThroughOperands(MachineInstr *MI,
SmallVectorImpl<unsigned> &VirtDead);
@@ -224,7 +225,7 @@ bool RAFast::isLastUseOfLocalReg(MachineOperand &MO) {
// Check that the use/def chain has exactly one operand - MO.
MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg());
- if (&I.getOperand() != &MO)
+ if (&*I != &MO)
return false;
return ++I == MRI->reg_nodbg_end();
}
@@ -319,7 +320,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
// now.
LRIDbgValues.clear();
if (SpillKill)
- LR.LastUse = 0; // Don't kill register again
+ LR.LastUse = nullptr; // Don't kill register again
}
killVirtReg(LRI);
}
@@ -585,12 +586,12 @@ RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
- tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
+ std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
if (New) {
// If there is no hint, peek at the only use of this register.
if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
MRI->hasOneNonDBGUse(VirtReg)) {
- const MachineInstr &UseMI = *MRI->use_nodbg_begin(VirtReg);
+ const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg);
// It's a copy, use the destination register as a hint.
if (UseMI.isCopyLike())
Hint = UseMI.getOperand(0).getReg();
@@ -618,7 +619,7 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
- tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
+ std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
MachineOperand &MO = MI->getOperand(OpNum);
if (New) {
LRI = allocVirtReg(MI, LRI, Hint);
diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
index c08d955..dee990c 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/Passes.h"
#include "AllocationOrder.h"
#include "InterferenceCache.h"
@@ -35,17 +34,23 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/PassAnalysisSupport.h"
+#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <queue>
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumGlobalSplits, "Number of split global live ranges");
STATISTIC(NumLocalSplits, "Number of split local live ranges");
STATISTIC(NumEvicted, "Number of interferences evicted");
@@ -59,6 +64,34 @@ SplitSpillMode("split-spill-mode", cl::Hidden,
clEnumValEnd),
cl::init(SplitEditor::SM_Partition));
+static cl::opt<unsigned>
+LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
+ cl::desc("Last chance recoloring max depth"),
+ cl::init(5));
+
+static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
+ "lcr-max-interf", cl::Hidden,
+ cl::desc("Last chance recoloring maximum number of considered"
+ " interference at a time"),
+ cl::init(8));
+
+static cl::opt<bool>
+ExhaustiveSearch("exhaustive-register-search", cl::NotHidden,
+ cl::desc("Exhaustive Search for registers bypassing the depth "
+ "and interference cutoffs of last chance recoloring"));
+
+static cl::opt<bool> EnableLocalReassignment(
+ "enable-local-reassign", cl::Hidden,
+ cl::desc("Local reassignment can yield better allocation decisions, but "
+ "may be compile time intensive"),
+ cl::init(false));
+
+// FIXME: Find a good default for this flag and remove the flag.
+static cl::opt<unsigned>
+CSRFirstTimeCost("regalloc-csr-first-time-cost",
+ cl::desc("Cost for first time use of callee-saved register."),
+ cl::init(0), cl::Hidden);
+
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
createGreedyRegisterAllocator);
@@ -66,10 +99,19 @@ namespace {
class RAGreedy : public MachineFunctionPass,
public RegAllocBase,
private LiveRangeEdit::Delegate {
+ // Convenient shortcuts.
+ typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue;
+ typedef SmallPtrSet<LiveInterval *, 4> SmallLISet;
+ typedef SmallSet<unsigned, 16> SmallVirtRegSet;
// context
MachineFunction *MF;
+ // Shortcuts to some useful interface.
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ RegisterClassInfo RCI;
+
// analyses
SlotIndexes *Indexes;
MachineBlockFrequencyInfo *MBFI;
@@ -80,8 +122,8 @@ class RAGreedy : public MachineFunctionPass,
LiveDebugVariables *DebugVars;
// state
- OwningPtr<Spiller> SpillerInstance;
- std::priority_queue<std::pair<unsigned, unsigned> > Queue;
+ std::unique_ptr<Spiller> SpillerInstance;
+ PQueue Queue;
unsigned NextCascade;
// Live ranges pass through a number of stages as we try to allocate them.
@@ -120,6 +162,22 @@ class RAGreedy : public MachineFunctionPass,
RS_Done
};
+ // Enum CutOffStage to keep a track whether the register allocation failed
+ // because of the cutoffs encountered in last chance recoloring.
+ // Note: This is used as bitmask. New value should be next power of 2.
+ enum CutOffStage {
+ // No cutoffs encountered
+ CO_None = 0,
+
+ // lcr-max-depth cutoff encountered
+ CO_Depth = 1,
+
+ // lcr-max-interf cutoff encountered
+ CO_Interf = 2
+ };
+
+ uint8_t CutOffInfo;
+
#ifndef NDEBUG
static const char *const StageName[];
#endif
@@ -160,20 +218,23 @@ class RAGreedy : public MachineFunctionPass,
unsigned BrokenHints; ///< Total number of broken hints.
float MaxWeight; ///< Maximum spill weight evicted.
- EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
+ EvictionCost(): BrokenHints(0), MaxWeight(0) {}
bool isMax() const { return BrokenHints == ~0u; }
+ void setMax() { BrokenHints = ~0u; }
+
+ void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
+
bool operator<(const EvictionCost &O) const {
- if (BrokenHints != O.BrokenHints)
- return BrokenHints < O.BrokenHints;
- return MaxWeight < O.MaxWeight;
+ return std::tie(BrokenHints, MaxWeight) <
+ std::tie(O.BrokenHints, O.MaxWeight);
}
};
// splitting state.
- OwningPtr<SplitAnalysis> SA;
- OwningPtr<SplitEditor> SE;
+ std::unique_ptr<SplitAnalysis> SA;
+ std::unique_ptr<SplitEditor> SE;
/// Cached per-block interference maps
InterferenceCache IntfCache;
@@ -217,43 +278,54 @@ class RAGreedy : public MachineFunctionPass,
}
};
- /// Candidate info for for each PhysReg in AllocationOrder.
+ /// Candidate info for each PhysReg in AllocationOrder.
/// This vector never shrinks, but grows to the size of the largest register
/// class.
SmallVector<GlobalSplitCandidate, 32> GlobalCand;
- enum LLVM_ENUM_INT_TYPE(unsigned) { NoCand = ~0u };
+ enum : unsigned { NoCand = ~0u };
/// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
/// NoCand which indicates the stack interval.
SmallVector<unsigned, 32> BundleCand;
+ /// Callee-save register cost, calculated once per machine function.
+ BlockFrequency CSRCost;
+
+ /// Run or not the local reassignment heuristic. This information is
+ /// obtained from the TargetSubtargetInfo.
+ bool EnableLocalReassign;
+
public:
RAGreedy();
/// Return the pass name.
- virtual const char* getPassName() const {
+ const char* getPassName() const override {
return "Greedy Register Allocator";
}
/// RAGreedy analysis usage.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual void releaseMemory();
- virtual Spiller &spiller() { return *SpillerInstance; }
- virtual void enqueue(LiveInterval *LI);
- virtual LiveInterval *dequeue();
- virtual unsigned selectOrSplit(LiveInterval&,
- SmallVectorImpl<unsigned>&);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void releaseMemory() override;
+ Spiller &spiller() override { return *SpillerInstance; }
+ void enqueue(LiveInterval *LI) override;
+ LiveInterval *dequeue() override;
+ unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
/// Perform register allocation.
- virtual bool runOnMachineFunction(MachineFunction &mf);
+ bool runOnMachineFunction(MachineFunction &mf) override;
static char ID;
private:
- bool LRE_CanEraseVirtReg(unsigned);
- void LRE_WillShrinkVirtReg(unsigned);
- void LRE_DidCloneVirtReg(unsigned, unsigned);
+ unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
+ SmallVirtRegSet &, unsigned = 0);
+
+ bool LRE_CanEraseVirtReg(unsigned) override;
+ void LRE_WillShrinkVirtReg(unsigned) override;
+ void LRE_DidCloneVirtReg(unsigned, unsigned) override;
+ void enqueue(PQueue &CurQueue, LiveInterval *LI);
+ LiveInterval *dequeue(PQueue &CurQueue);
BlockFrequency calcSpillCost();
bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
@@ -268,6 +340,9 @@ private:
bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
void evictInterference(LiveInterval&, unsigned,
SmallVectorImpl<unsigned>&);
+ bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
+ SmallLISet &RecoloringCandidates,
+ const SmallVirtRegSet &FixedRegisters);
unsigned tryAssign(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
@@ -275,6 +350,21 @@ private:
SmallVectorImpl<unsigned>&, unsigned = ~0u);
unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
+ /// Calculate cost of region splitting.
+ unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ BlockFrequency &BestCost,
+ unsigned &NumCands, bool IgnoreCSR);
+ /// Perform region splitting.
+ unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
+ bool HasCompact,
+ SmallVectorImpl<unsigned> &NewVRegs);
+ /// Check other options before using a callee-saved register for the first
+ /// time.
+ unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
+ unsigned PhysReg, unsigned &CostPerUseLimit,
+ SmallVectorImpl<unsigned> &NewVRegs);
+ void initializeCSRCost();
unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
@@ -283,6 +373,11 @@ private:
SmallVectorImpl<unsigned>&);
unsigned trySplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
+ unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
+ SmallVectorImpl<unsigned> &,
+ SmallVirtRegSet &, unsigned);
+ bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
+ SmallVirtRegSet &, unsigned);
};
} // end anonymous namespace
@@ -301,7 +396,7 @@ const char *const RAGreedy::StageName[] = {
// Hysteresis to use when comparing floats.
// This helps stabilize decisions based on float comparisons.
-const float Hysteresis = 0.98f;
+const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
FunctionPass* llvm::createGreedyRegisterAllocator() {
@@ -391,12 +486,14 @@ void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
}
void RAGreedy::releaseMemory() {
- SpillerInstance.reset(0);
+ SpillerInstance.reset();
ExtraRegInfo.clear();
GlobalCand.clear();
}
-void RAGreedy::enqueue(LiveInterval *LI) {
+void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); }
+
+void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
// Prioritize live ranges by size, assigning larger ranges first.
// The queue holds (size, reg) pairs.
const unsigned Size = LI->getSize();
@@ -414,12 +511,25 @@ void RAGreedy::enqueue(LiveInterval *LI) {
// everything else has been allocated.
Prio = Size;
} else {
- if (ExtraRegInfo[Reg].Stage == RS_Assign && !LI->empty() &&
+ // Giant live ranges fall back to the global assignment heuristic, which
+ // prevents excessive spilling in pathological cases.
+ bool ReverseLocal = TRI->reverseLocalAssignment();
+ bool ForceGlobal = !ReverseLocal && TRI->mayOverrideLocalAssignment() &&
+ (Size / SlotIndex::InstrDist) > (2 * MRI->getRegClass(Reg)->getNumRegs());
+
+ if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
LIS->intervalIsInOneMBB(*LI)) {
// Allocate original local ranges in linear instruction order. Since they
// are singly defined, this produces optimal coloring in the absence of
// global interference and other constraints.
- Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
+ if (!ReverseLocal)
+ Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
+ else {
+ // Allocating bottom up may allow many short LRGs to be assigned first
+ // to one of the cheap registers. This could be much faster for very
+ // large blocks on targets with many physical registers.
+ Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex());
+ }
}
else {
// Allocate global and split ranges in long->short order. Long ranges that
@@ -436,14 +546,16 @@ void RAGreedy::enqueue(LiveInterval *LI) {
}
// The virtual register number is a tie breaker for same-sized ranges.
// Give lower vreg numbers higher priority to assign them first.
- Queue.push(std::make_pair(Prio, ~Reg));
+ CurQueue.push(std::make_pair(Prio, ~Reg));
}
-LiveInterval *RAGreedy::dequeue() {
- if (Queue.empty())
- return 0;
- LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
- Queue.pop();
+LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
+
+LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
+ if (CurQueue.empty())
+ return nullptr;
+ LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
+ CurQueue.pop();
return LI;
}
@@ -471,7 +583,8 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
if (Order.isHint(Hint)) {
DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
- EvictionCost MaxCost(1);
+ EvictionCost MaxCost;
+ MaxCost.setBrokenHints(1);
if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
evictInterference(VirtReg, Hint, NewVRegs);
return Hint;
@@ -543,11 +656,15 @@ bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
if (CanSplit && IsHint && !BreaksHint)
return true;
- return A.weight > B.weight;
+ if (A.weight > B.weight) {
+ DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
+ return true;
+ }
+ return false;
}
/// canEvictInterference - Return true if all interferences between VirtReg and
-/// PhysReg can be evicted. When OnlyCheap is set, don't do anything
+/// PhysReg can be evicted.
///
/// @param VirtReg Live range that is about to be assigned.
/// @param PhysReg Desired register for assignment.
@@ -618,16 +735,16 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
return false;
if (Urgent)
continue;
+ // Apply the eviction policy for non-urgent evictions.
+ if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
+ return false;
// If !MaxCost.isMax(), then we're just looking for a cheap register.
// Evicting another local live range in this case could lead to suboptimal
// coloring.
if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
- !canReassign(*Intf, PhysReg)) {
+ (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
return false;
}
- // Finally, apply the eviction policy for non-urgent evictions.
- if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
- return false;
}
}
MaxCost = Cost;
@@ -685,7 +802,8 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
// Keep track of the cheapest interference seen so far.
- EvictionCost BestCost(~0u);
+ EvictionCost BestCost;
+ BestCost.setMax();
unsigned BestPhys = 0;
unsigned OrderLimit = Order.getOrder().size();
@@ -713,7 +831,7 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
}
Order.rewind();
- while (unsigned PhysReg = Order.nextWithDups(OrderLimit)) {
+ while (unsigned PhysReg = Order.next(OrderLimit)) {
if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
continue;
// The first use of a callee-saved register in a function has cost 1.
@@ -1172,9 +1290,7 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
unsigned NumCands = 0;
- unsigned BestCand = NoCand;
BlockFrequency BestCost;
- SmallVector<unsigned, 8> UsedCands;
// Check if we can split this live range around a compact region.
bool HasCompact = calcCompactRegion(GlobalCand.front());
@@ -1186,11 +1302,33 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// No benefit from the compact region, our fallback will be per-block
// splitting. Make sure we find a solution that is cheaper than spilling.
BestCost = calcSpillCost();
- DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
+ DEBUG(dbgs() << "Cost of isolating all blocks = ";
+ MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
}
+ unsigned BestCand =
+ calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
+ false/*IgnoreCSR*/);
+
+ // No solutions found, fall back to single block splitting.
+ if (!HasCompact && BestCand == NoCand)
+ return 0;
+
+ return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
+}
+
+unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ BlockFrequency &BestCost,
+ unsigned &NumCands,
+ bool IgnoreCSR) {
+ unsigned BestCand = NoCand;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
+ if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
+ if (IgnoreCSR && !MRI->isPhysRegUsed(CSR))
+ continue;
+
// Discard bad candidates before we run out of interference cache cursors.
// This will only affect register classes with a lot of registers (>32).
if (NumCands == IntfCache.getMaxCursors()) {
@@ -1220,7 +1358,8 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
continue;
}
- DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = ";
+ MBFI->printBlockFreq(dbgs(), Cost));
if (Cost >= BestCost) {
DEBUG({
if (BestCand == NoCand)
@@ -1243,7 +1382,8 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
Cost += calcGlobalSplitCost(Cand);
DEBUG({
- dbgs() << ", total = " << Cost << " with bundles";
+ dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost)
+ << " with bundles";
for (int i = Cand.LiveBundles.find_first(); i>=0;
i = Cand.LiveBundles.find_next(i))
dbgs() << " EB#" << i;
@@ -1255,11 +1395,13 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
}
++NumCands;
}
+ return BestCand;
+}
- // No solutions found, fall back to single block splitting.
- if (!HasCompact && BestCand == NoCand)
- return 0;
-
+unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
+ bool HasCompact,
+ SmallVectorImpl<unsigned> &NewVRegs) {
+ SmallVector<unsigned, 8> UsedCands;
// Prepare split editor.
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
@@ -1348,6 +1490,22 @@ unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// Per-Instruction Splitting
//===----------------------------------------------------------------------===//
+/// Get the number of allocatable registers that match the constraints of \p Reg
+/// on \p MI and that are also in \p SuperRC.
+static unsigned getNumAllocatableRegsForConstraints(
+ const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC,
+ const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
+ const RegisterClassInfo &RCI) {
+ assert(SuperRC && "Invalid register class");
+
+ const TargetRegisterClass *ConstrainedRC =
+ MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
+ /* ExploreBundle */ true);
+ if (!ConstrainedRC)
+ return 0;
+ return RCI.getNumAllocatableRegs(ConstrainedRC);
+}
+
/// tryInstructionSplit - Split a live range around individual instructions.
/// This is normally not worthwhile since the spiller is doing essentially the
/// same thing. However, when the live range is in a constrained register
@@ -1358,8 +1516,9 @@ unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
unsigned
RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
+ const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
// There is no point to this if there are no larger sub-classes.
- if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg)))
+ if (!RegClassInfo.isProperSubClass(CurRC))
return 0;
// Always enable split spill mode, since we're effectively spilling to a
@@ -1373,10 +1532,18 @@ RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
- // Split around every non-copy instruction.
+ const TargetRegisterClass *SuperRC = TRI->getLargestLegalSuperClass(CurRC);
+ unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
+ // Split around every non-copy instruction if this split will relax
+ // the constraints on the virtual register.
+ // Otherwise, splitting just inserts uncoalescable copies that do not help
+ // the allocation.
for (unsigned i = 0; i != Uses.size(); ++i) {
if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
- if (MI->isFullCopy()) {
+ if (MI->isFullCopy() ||
+ SuperRCNumAllocatableRegs ==
+ getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
+ TRI, RCI)) {
DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
continue;
}
@@ -1571,7 +1738,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
const float blockFreq =
SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
- (1.0f / BlockFrequency::getEntryFrequency());
+ (1.0f / MBFI->getEntryFreq());
SmallVector<float, 8> GapWeight;
Order.rewind();
@@ -1759,6 +1926,222 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
return tryBlockSplit(VirtReg, Order, NewVRegs);
}
+//===----------------------------------------------------------------------===//
+// Last Chance Recoloring
+//===----------------------------------------------------------------------===//
+
+/// mayRecolorAllInterferences - Check if the virtual registers that
+/// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
+/// recolored to free \p PhysReg.
+/// When true is returned, \p RecoloringCandidates has been augmented with all
+/// the live intervals that need to be recolored in order to free \p PhysReg
+/// for \p VirtReg.
+/// \p FixedRegisters contains all the virtual registers that cannot be
+/// recolored.
+bool
+RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
+ SmallLISet &RecoloringCandidates,
+ const SmallVirtRegSet &FixedRegisters) {
+ const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
+
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
+ LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
+ // If there is LastChanceRecoloringMaxInterference or more interferences,
+ // chances are one would not be recolorable.
+ if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
+ LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
+ DEBUG(dbgs() << "Early abort: too many interferences.\n");
+ CutOffInfo |= CO_Interf;
+ return false;
+ }
+ for (unsigned i = Q.interferingVRegs().size(); i; --i) {
+ LiveInterval *Intf = Q.interferingVRegs()[i - 1];
+ // If Intf is done and sit on the same register class as VirtReg,
+ // it would not be recolorable as it is in the same state as VirtReg.
+ if ((getStage(*Intf) == RS_Done &&
+ MRI->getRegClass(Intf->reg) == CurRC) ||
+ FixedRegisters.count(Intf->reg)) {
+ DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n");
+ return false;
+ }
+ RecoloringCandidates.insert(Intf);
+ }
+ }
+ return true;
+}
+
+/// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
+/// its interferences.
+/// Last chance recoloring chooses a color for \p VirtReg and recolors every
+/// virtual register that was using it. The recoloring process may recursively
+/// use the last chance recoloring. Therefore, when a virtual register has been
+/// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
+/// be last-chance-recolored again during this recoloring "session".
+/// E.g.,
+/// Let
+/// vA can use {R1, R2 }
+/// vB can use { R2, R3}
+/// vC can use {R1 }
+/// Where vA, vB, and vC cannot be split anymore (they are reloads for
+/// instance) and they all interfere.
+///
+/// vA is assigned R1
+/// vB is assigned R2
+/// vC tries to evict vA but vA is already done.
+/// Regular register allocation fails.
+///
+/// Last chance recoloring kicks in:
+/// vC does as if vA was evicted => vC uses R1.
+/// vC is marked as fixed.
+/// vA needs to find a color.
+/// None are available.
+/// vA cannot evict vC: vC is a fixed virtual register now.
+/// vA does as if vB was evicted => vA uses R2.
+/// vB needs to find a color.
+/// R3 is available.
+/// Recoloring => vC = R1, vA = R2, vB = R3
+///
+/// \p Order defines the preferred allocation order for \p VirtReg.
+/// \p NewRegs will contain any new virtual register that have been created
+/// (split, spill) during the process and that must be assigned.
+/// \p FixedRegisters contains all the virtual registers that cannot be
+/// recolored.
+/// \p Depth gives the current depth of the last chance recoloring.
+/// \return a physical register that can be used for VirtReg or ~0u if none
+/// exists.
+unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ SmallVectorImpl<unsigned> &NewVRegs,
+ SmallVirtRegSet &FixedRegisters,
+ unsigned Depth) {
+ DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
+ // Ranges must be Done.
+ assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
+ "Last chance recoloring should really be last chance");
+ // Set the max depth to LastChanceRecoloringMaxDepth.
+ // We may want to reconsider that if we end up with a too large search space
+ // for target with hundreds of registers.
+ // Indeed, in that case we may want to cut the search space earlier.
+ if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
+ DEBUG(dbgs() << "Abort because max depth has been reached.\n");
+ CutOffInfo |= CO_Depth;
+ return ~0u;
+ }
+
+ // Set of Live intervals that will need to be recolored.
+ SmallLISet RecoloringCandidates;
+ // Record the original mapping virtual register to physical register in case
+ // the recoloring fails.
+ DenseMap<unsigned, unsigned> VirtRegToPhysReg;
+ // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
+ // this recoloring "session".
+ FixedRegisters.insert(VirtReg.reg);
+
+ Order.rewind();
+ while (unsigned PhysReg = Order.next()) {
+ DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
+ << PrintReg(PhysReg, TRI) << '\n');
+ RecoloringCandidates.clear();
+ VirtRegToPhysReg.clear();
+
+ // It is only possible to recolor virtual register interference.
+ if (Matrix->checkInterference(VirtReg, PhysReg) >
+ LiveRegMatrix::IK_VirtReg) {
+ DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n");
+
+ continue;
+ }
+
+ // Early give up on this PhysReg if it is obvious we cannot recolor all
+ // the interferences.
+ if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
+ FixedRegisters)) {
+ DEBUG(dbgs() << "Some inteferences cannot be recolored.\n");
+ continue;
+ }
+
+ // RecoloringCandidates contains all the virtual registers that interfer
+ // with VirtReg on PhysReg (or one of its aliases).
+ // Enqueue them for recoloring and perform the actual recoloring.
+ PQueue RecoloringQueue;
+ for (SmallLISet::iterator It = RecoloringCandidates.begin(),
+ EndIt = RecoloringCandidates.end();
+ It != EndIt; ++It) {
+ unsigned ItVirtReg = (*It)->reg;
+ enqueue(RecoloringQueue, *It);
+ assert(VRM->hasPhys(ItVirtReg) &&
+ "Interferences are supposed to be with allocated vairables");
+
+ // Record the current allocation.
+ VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
+ // unset the related struct.
+ Matrix->unassign(**It);
+ }
+
+ // Do as if VirtReg was assigned to PhysReg so that the underlying
+ // recoloring has the right information about the interferes and
+ // available colors.
+ Matrix->assign(VirtReg, PhysReg);
+
+ // Save the current recoloring state.
+ // If we cannot recolor all the interferences, we will have to start again
+ // at this point for the next physical register.
+ SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
+ if (tryRecoloringCandidates(RecoloringQueue, NewVRegs, FixedRegisters,
+ Depth)) {
+ // Do not mess up with the global assignment process.
+ // I.e., VirtReg must be unassigned.
+ Matrix->unassign(VirtReg);
+ return PhysReg;
+ }
+
+ DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
+ << PrintReg(PhysReg, TRI) << '\n');
+
+ // The recoloring attempt failed, undo the changes.
+ FixedRegisters = SaveFixedRegisters;
+ Matrix->unassign(VirtReg);
+
+ for (SmallLISet::iterator It = RecoloringCandidates.begin(),
+ EndIt = RecoloringCandidates.end();
+ It != EndIt; ++It) {
+ unsigned ItVirtReg = (*It)->reg;
+ if (VRM->hasPhys(ItVirtReg))
+ Matrix->unassign(**It);
+ Matrix->assign(**It, VirtRegToPhysReg[ItVirtReg]);
+ }
+ }
+
+ // Last chance recoloring did not worked either, give up.
+ return ~0u;
+}
+
+/// tryRecoloringCandidates - Try to assign a new color to every register
+/// in \RecoloringQueue.
+/// \p NewRegs will contain any new virtual register created during the
+/// recoloring process.
+/// \p FixedRegisters[in/out] contains all the registers that have been
+/// recolored.
+/// \return true if all virtual registers in RecoloringQueue were successfully
+/// recolored, false otherwise.
+bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
+ SmallVectorImpl<unsigned> &NewVRegs,
+ SmallVirtRegSet &FixedRegisters,
+ unsigned Depth) {
+ while (!RecoloringQueue.empty()) {
+ LiveInterval *LI = dequeue(RecoloringQueue);
+ DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
+ unsigned PhysReg;
+ PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
+ if (PhysReg == ~0u || !PhysReg)
+ return false;
+ DEBUG(dbgs() << "Recoloring of " << *LI
+ << " succeeded with: " << PrintReg(PhysReg, TRI) << '\n');
+ Matrix->assign(*LI, PhysReg);
+ FixedRegisters.insert(LI->reg);
+ }
+ return true;
+}
//===----------------------------------------------------------------------===//
// Main Entry Point
@@ -1766,10 +2149,122 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs) {
+ CutOffInfo = CO_None;
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ SmallVirtRegSet FixedRegisters;
+ unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
+ if (Reg == ~0U && (CutOffInfo != CO_None)) {
+ uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
+ if (CutOffEncountered == CO_Depth)
+ Ctx.emitError("register allocation failed: maximum depth for recoloring "
+ "reached. Use -fexhaustive-register-search to skip "
+ "cutoffs");
+ else if (CutOffEncountered == CO_Interf)
+ Ctx.emitError("register allocation failed: maximum interference for "
+ "recoloring reached. Use -fexhaustive-register-search "
+ "to skip cutoffs");
+ else if (CutOffEncountered == (CO_Depth | CO_Interf))
+ Ctx.emitError("register allocation failed: maximum interference and "
+ "depth for recoloring reached. Use "
+ "-fexhaustive-register-search to skip cutoffs");
+ }
+ return Reg;
+}
+
+/// Using a CSR for the first time has a cost because it causes push|pop
+/// to be added to prologue|epilogue. Splitting a cold section of the live
+/// range can have lower cost than using the CSR for the first time;
+/// Spilling a live range in the cold path can have lower cost than using
+/// the CSR for the first time. Returns the physical register if we decide
+/// to use the CSR; otherwise return 0.
+unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ unsigned PhysReg,
+ unsigned &CostPerUseLimit,
+ SmallVectorImpl<unsigned> &NewVRegs) {
+ if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
+ // We choose spill over using the CSR for the first time if the spill cost
+ // is lower than CSRCost.
+ SA->analyze(&VirtReg);
+ if (calcSpillCost() >= CSRCost)
+ return PhysReg;
+
+ // We are going to spill, set CostPerUseLimit to 1 to make sure that
+ // we will not use a callee-saved register in tryEvict.
+ CostPerUseLimit = 1;
+ return 0;
+ }
+ if (getStage(VirtReg) < RS_Split) {
+ // We choose pre-splitting over using the CSR for the first time if
+ // the cost of splitting is lower than CSRCost.
+ SA->analyze(&VirtReg);
+ unsigned NumCands = 0;
+ BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
+ unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
+ NumCands, true /*IgnoreCSR*/);
+ if (BestCand == NoCand)
+ // Use the CSR if we can't find a region split below CSRCost.
+ return PhysReg;
+
+ // Perform the actual pre-splitting.
+ doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
+ return 0;
+ }
+ return PhysReg;
+}
+
+void RAGreedy::initializeCSRCost() {
+ // We use the larger one out of the command-line option and the value report
+ // by TRI.
+ CSRCost = BlockFrequency(
+ std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
+ if (!CSRCost.getFrequency())
+ return;
+
+ // Raw cost is relative to Entry == 2^14; scale it appropriately.
+ uint64_t ActualEntry = MBFI->getEntryFreq();
+ if (!ActualEntry) {
+ CSRCost = 0;
+ return;
+ }
+ uint64_t FixedEntry = 1 << 14;
+ if (ActualEntry < FixedEntry)
+ CSRCost *= BranchProbability(ActualEntry, FixedEntry);
+ else if (ActualEntry <= UINT32_MAX)
+ // Invert the fraction and divide.
+ CSRCost /= BranchProbability(FixedEntry, ActualEntry);
+ else
+ // Can't use BranchProbability in general, since it takes 32-bit numbers.
+ CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
+}
+
+unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
+ SmallVectorImpl<unsigned> &NewVRegs,
+ SmallVirtRegSet &FixedRegisters,
+ unsigned Depth) {
+ unsigned CostPerUseLimit = ~0u;
// First try assigning a free register.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
- if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
- return PhysReg;
+ if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
+ // We check other options if we are using a CSR for the first time.
+ bool CSRFirstUse = false;
+ if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
+ if (!MRI->isPhysRegUsed(CSR))
+ CSRFirstUse = true;
+
+ // When NewVRegs is not empty, we may have made decisions such as evicting
+ // a virtual register, go with the earlier decisions and use the physical
+ // register.
+ if (CSRCost.getFrequency() && CSRFirstUse && NewVRegs.empty()) {
+ unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
+ CostPerUseLimit, NewVRegs);
+ if (CSRReg || !NewVRegs.empty())
+ // Return now if we decide to use a CSR or create new vregs due to
+ // pre-splitting.
+ return CSRReg;
+ } else
+ return PhysReg;
+ }
LiveRangeStage Stage = getStage(VirtReg);
DEBUG(dbgs() << StageName[Stage]
@@ -1779,7 +2274,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
// queue. The RS_Split ranges already failed to do this, and they should not
// get a second chance until they have been split.
if (Stage != RS_Split)
- if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
+ if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit))
return PhysReg;
assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
@@ -1797,7 +2292,8 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
// If we couldn't allocate a register from spilling, there is probably some
// invalid inline assembly. The base class wil report it.
if (Stage >= RS_Done || !VirtReg.isSpillable())
- return ~0u;
+ return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
+ Depth);
// Try splitting VirtReg or interferences.
unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
@@ -1823,6 +2319,14 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
<< "********** Function: " << mf.getName() << '\n');
MF = &mf;
+ const TargetMachine &TM = MF->getTarget();
+ TRI = TM.getRegisterInfo();
+ TII = TM.getInstrInfo();
+ RCI.runOnMachineFunction(mf);
+
+ EnableLocalReassign = EnableLocalReassignment ||
+ TM.getSubtargetImpl()->enableRALocalReassignment(TM.getOptLevel());
+
if (VerifyEnabled)
MF->verify(this, "Before greedy register allocator");
@@ -1838,6 +2342,8 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
SpillPlacer = &getAnalysis<SpillPlacement>();
DebugVars = &getAnalysis<LiveDebugVariables>();
+ initializeCSRCost();
+
calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI);
DEBUG(LIS->dump());
diff --git a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
index 88c8201..8a3b53f 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -29,12 +29,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
-
#include "llvm/CodeGen/RegAllocPBQP.h"
#include "RegisterCoalescer.h"
#include "Spiller.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -45,13 +42,11 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PBQP/Graph.h"
-#include "llvm/CodeGen/PBQP/HeuristicSolver.h"
-#include "llvm/CodeGen/PBQP/Heuristics/Briggs.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -63,6 +58,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
static RegisterRegAlloc
registerPBQPRepAlloc("pbqp", "PBQP register allocator",
createDefaultPBQPRegisterAllocator);
@@ -91,8 +88,8 @@ public:
static char ID;
/// Construct a PBQP register allocator.
- RegAllocPBQP(OwningPtr<PBQPBuilder> &b, char *cPassID=0)
- : MachineFunctionPass(ID), builder(b.take()), customPassID(cPassID) {
+ RegAllocPBQP(std::unique_ptr<PBQPBuilder> b, char *cPassID = nullptr)
+ : MachineFunctionPass(ID), builder(std::move(b)), customPassID(cPassID) {
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
@@ -100,15 +97,15 @@ public:
}
/// Return the pass name.
- virtual const char* getPassName() const {
+ const char* getPassName() const override {
return "PBQP Register Allocator";
}
/// PBQP analysis usage.
- virtual void getAnalysisUsage(AnalysisUsage &au) const;
+ void getAnalysisUsage(AnalysisUsage &au) const override;
/// Perform register allocation
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
@@ -120,8 +117,7 @@ private:
typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
typedef std::set<unsigned> RegSet;
-
- OwningPtr<PBQPBuilder> builder;
+ std::unique_ptr<PBQPBuilder> builder;
char *customPassID;
@@ -132,7 +128,7 @@ private:
MachineRegisterInfo *mri;
const MachineBlockFrequencyInfo *mbfi;
- OwningPtr<Spiller> spiller;
+ std::unique_ptr<Spiller> spiller;
LiveIntervals *lis;
LiveStacks *lss;
VirtRegMap *vrm;
@@ -157,13 +153,13 @@ char RegAllocPBQP::ID = 0;
} // End anonymous namespace.
-unsigned PBQPRAProblem::getVRegForNode(PBQP::Graph::NodeId node) const {
+unsigned PBQPRAProblem::getVRegForNode(PBQPRAGraph::NodeId node) const {
Node2VReg::const_iterator vregItr = node2VReg.find(node);
assert(vregItr != node2VReg.end() && "No vreg for node.");
return vregItr->second;
}
-PBQP::Graph::NodeId PBQPRAProblem::getNodeForVReg(unsigned vreg) const {
+PBQPRAGraph::NodeId PBQPRAProblem::getNodeForVReg(unsigned vreg) const {
VReg2Node::const_iterator nodeItr = vreg2Node.find(vreg);
assert(nodeItr != vreg2Node.end() && "No node for vreg.");
return nodeItr->second;
@@ -194,8 +190,8 @@ PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis,
MachineRegisterInfo *mri = &mf->getRegInfo();
const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
- OwningPtr<PBQPRAProblem> p(new PBQPRAProblem());
- PBQP::Graph &g = p->getGraph();
+ std::unique_ptr<PBQPRAProblem> p(new PBQPRAProblem());
+ PBQPRAGraph &g = p->getGraph();
RegSet pregs;
// Collect the set of preg intervals, record that they're used in the MF.
@@ -220,7 +216,7 @@ PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis,
// Compute an initial allowed set for the current vreg.
typedef std::vector<unsigned> VRAllowed;
VRAllowed vrAllowed;
- ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf);
+ ArrayRef<MCPhysReg> rawOrder = trc->getRawAllocationOrder(*mf);
for (unsigned i = 0; i != rawOrder.size(); ++i) {
unsigned preg = rawOrder[i];
if (mri->isReserved(preg))
@@ -245,17 +241,19 @@ PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis,
vrAllowed.push_back(preg);
}
- // Construct the node.
- PBQP::Graph::NodeId node =
- g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0));
-
- // Record the mapping and allowed set in the problem.
- p->recordVReg(vreg, node, vrAllowed.begin(), vrAllowed.end());
+ PBQP::Vector nodeCosts(vrAllowed.size() + 1, 0);
PBQP::PBQPNum spillCost = (vregLI->weight != 0.0) ?
vregLI->weight : std::numeric_limits<PBQP::PBQPNum>::min();
- addSpillCosts(g.getNodeCosts(node), spillCost);
+ addSpillCosts(nodeCosts, spillCost);
+
+ // Construct the node.
+ PBQPRAGraph::NodeId nId = g.addNode(std::move(nodeCosts));
+
+ // Record the mapping and allowed set in the problem.
+ p->recordVReg(vreg, nId, vrAllowed.begin(), vrAllowed.end());
+
}
for (RegSet::const_iterator vr1Itr = vregs.begin(), vrEnd = vregs.end();
@@ -264,24 +262,24 @@ PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis,
const LiveInterval &l1 = lis->getInterval(vr1);
const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1);
- for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr);
- vr2Itr != vrEnd; ++vr2Itr) {
+ for (RegSet::const_iterator vr2Itr = std::next(vr1Itr); vr2Itr != vrEnd;
+ ++vr2Itr) {
unsigned vr2 = *vr2Itr;
const LiveInterval &l2 = lis->getInterval(vr2);
const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2);
assert(!l2.empty() && "Empty interval in vreg set?");
if (l1.overlaps(l2)) {
- PBQP::Graph::EdgeId edge =
- g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2),
- PBQP::Matrix(vr1Allowed.size()+1, vr2Allowed.size()+1, 0));
+ PBQP::Matrix edgeCosts(vr1Allowed.size()+1, vr2Allowed.size()+1, 0);
+ addInterferenceCosts(edgeCosts, vr1Allowed, vr2Allowed, tri);
- addInterferenceCosts(g.getEdgeCosts(edge), vr1Allowed, vr2Allowed, tri);
+ g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2),
+ std::move(edgeCosts));
}
}
}
- return p.take();
+ return p.release();
}
void PBQPBuilder::addSpillCosts(PBQP::Vector &costVec,
@@ -315,25 +313,17 @@ PBQPRAProblem *PBQPBuilderWithCoalescing::build(MachineFunction *mf,
const MachineBlockFrequencyInfo *mbfi,
const RegSet &vregs) {
- OwningPtr<PBQPRAProblem> p(PBQPBuilder::build(mf, lis, mbfi, vregs));
- PBQP::Graph &g = p->getGraph();
+ std::unique_ptr<PBQPRAProblem> p(PBQPBuilder::build(mf, lis, mbfi, vregs));
+ PBQPRAGraph &g = p->getGraph();
const TargetMachine &tm = mf->getTarget();
CoalescerPair cp(*tm.getRegisterInfo());
// Scan the machine function and add a coalescing cost whenever CoalescerPair
// gives the Ok.
- for (MachineFunction::const_iterator mbbItr = mf->begin(),
- mbbEnd = mf->end();
- mbbItr != mbbEnd; ++mbbItr) {
- const MachineBasicBlock *mbb = &*mbbItr;
-
- for (MachineBasicBlock::const_iterator miItr = mbb->begin(),
- miEnd = mbb->end();
- miItr != miEnd; ++miItr) {
- const MachineInstr *mi = &*miItr;
-
- if (!cp.setRegisters(mi)) {
+ for (const auto &mbb : *mf) {
+ for (const auto &mi : mbb) {
+ if (!cp.setRegisters(&mi)) {
continue; // Not coalescable.
}
@@ -348,8 +338,7 @@ PBQPRAProblem *PBQPBuilderWithCoalescing::build(MachineFunction *mf,
// value plucked randomly out of the air.
PBQP::PBQPNum cBenefit =
- copyFactor * LiveIntervals::getSpillWeight(false, true,
- mbfi->getBlockFreq(mbb));
+ copyFactor * LiveIntervals::getSpillWeight(false, true, mbfi, &mi);
if (cp.isPhys()) {
if (!mf->getRegInfo().isAllocatable(dst)) {
@@ -363,33 +352,37 @@ PBQPRAProblem *PBQPBuilderWithCoalescing::build(MachineFunction *mf,
}
if (pregOpt < allowed.size()) {
++pregOpt; // +1 to account for spill option.
- PBQP::Graph::NodeId node = p->getNodeForVReg(src);
- addPhysRegCoalesce(g.getNodeCosts(node), pregOpt, cBenefit);
+ PBQPRAGraph::NodeId node = p->getNodeForVReg(src);
+ llvm::dbgs() << "Reading node costs for node " << node << "\n";
+ llvm::dbgs() << "Source node: " << &g.getNodeCosts(node) << "\n";
+ PBQP::Vector newCosts(g.getNodeCosts(node));
+ addPhysRegCoalesce(newCosts, pregOpt, cBenefit);
+ g.setNodeCosts(node, newCosts);
}
} else {
const PBQPRAProblem::AllowedSet *allowed1 = &p->getAllowedSet(dst);
const PBQPRAProblem::AllowedSet *allowed2 = &p->getAllowedSet(src);
- PBQP::Graph::NodeId node1 = p->getNodeForVReg(dst);
- PBQP::Graph::NodeId node2 = p->getNodeForVReg(src);
- PBQP::Graph::EdgeId edge = g.findEdge(node1, node2);
+ PBQPRAGraph::NodeId node1 = p->getNodeForVReg(dst);
+ PBQPRAGraph::NodeId node2 = p->getNodeForVReg(src);
+ PBQPRAGraph::EdgeId edge = g.findEdge(node1, node2);
if (edge == g.invalidEdgeId()) {
- edge = g.addEdge(node1, node2, PBQP::Matrix(allowed1->size() + 1,
- allowed2->size() + 1,
- 0));
+ PBQP::Matrix costs(allowed1->size() + 1, allowed2->size() + 1, 0);
+ addVirtRegCoalesce(costs, *allowed1, *allowed2, cBenefit);
+ g.addEdge(node1, node2, costs);
} else {
- if (g.getEdgeNode1(edge) == node2) {
+ if (g.getEdgeNode1Id(edge) == node2) {
std::swap(node1, node2);
std::swap(allowed1, allowed2);
}
+ PBQP::Matrix costs(g.getEdgeCosts(edge));
+ addVirtRegCoalesce(costs, *allowed1, *allowed2, cBenefit);
+ g.setEdgeCosts(edge, costs);
}
-
- addVirtRegCoalesce(g.getEdgeCosts(edge), *allowed1, *allowed2,
- cBenefit);
}
}
}
- return p.take();
+ return p.release();
}
void PBQPBuilderWithCoalescing::addPhysRegCoalesce(PBQP::Vector &costVec,
@@ -472,14 +465,12 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
// Clear the existing allocation.
vrm->clearAllVirt();
- const PBQP::Graph &g = problem.getGraph();
+ const PBQPRAGraph &g = problem.getGraph();
// Iterate over the nodes mapping the PBQP solution to a register
// assignment.
- for (PBQP::Graph::NodeItr nodeItr = g.nodesBegin(),
- nodeEnd = g.nodesEnd();
- nodeItr != nodeEnd; ++nodeItr) {
- unsigned vreg = problem.getVRegForNode(*nodeItr);
- unsigned alloc = solution.getSelection(*nodeItr);
+ for (auto NId : g.nodeIds()) {
+ unsigned vreg = problem.getVRegForNode(NId);
+ unsigned alloc = solution.getSelection(NId);
if (problem.isPRegOption(vreg, alloc)) {
unsigned preg = problem.getPRegForOption(vreg, alloc);
@@ -587,8 +578,8 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
while (!pbqpAllocComplete) {
DEBUG(dbgs() << " PBQP Regalloc round " << round << ":\n");
- OwningPtr<PBQPRAProblem> problem(
- builder->build(mf, lis, mbfi, vregsToAlloc));
+ std::unique_ptr<PBQPRAProblem> problem(
+ builder->build(mf, lis, mbfi, vregsToAlloc));
#ifndef NDEBUG
if (pbqpDumpGraphs) {
@@ -596,7 +587,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
rs << round;
std::string graphFileName(fqn + "." + rs.str() + ".pbqpgraph");
std::string tmp;
- raw_fd_ostream os(graphFileName.c_str(), tmp);
+ raw_fd_ostream os(graphFileName.c_str(), tmp, sys::fs::F_Text);
DEBUG(dbgs() << "Dumping graph for round " << round << " to \""
<< graphFileName << "\"\n");
problem->getGraph().dump(os);
@@ -604,8 +595,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
#endif
PBQP::Solution solution =
- PBQP::HeuristicSolver<PBQP::Heuristics::Briggs>::solve(
- problem->getGraph());
+ PBQP::RegAlloc::solve(problem->getGraph());
pbqpAllocComplete = mapPBQPToRegAlloc(*problem, solution);
@@ -623,19 +613,19 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
return true;
}
-FunctionPass* llvm::createPBQPRegisterAllocator(
- OwningPtr<PBQPBuilder> &builder,
- char *customPassID) {
- return new RegAllocPBQP(builder, customPassID);
+FunctionPass *
+llvm::createPBQPRegisterAllocator(std::unique_ptr<PBQPBuilder> builder,
+ char *customPassID) {
+ return new RegAllocPBQP(std::move(builder), customPassID);
}
FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
- OwningPtr<PBQPBuilder> Builder;
+ std::unique_ptr<PBQPBuilder> Builder;
if (pbqpCoalescing)
- Builder.reset(new PBQPBuilderWithCoalescing());
+ Builder = llvm::make_unique<PBQPBuilderWithCoalescing>();
else
- Builder.reset(new PBQPBuilder());
- return createPBQPRegisterAllocator(Builder);
+ Builder = llvm::make_unique<PBQPBuilder>();
+ return createPBQPRegisterAllocator(std::move(Builder));
}
#undef DEBUG_TYPE
diff --git a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
index cacd7de..8b5445c 100644
--- a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
@@ -8,13 +8,12 @@
//===----------------------------------------------------------------------===//
//
// This file implements the RegisterClassInfo class which provides dynamic
-// information about target register classes. Callee saved and reserved
-// registers depends on calling conventions and other dynamic information, so
-// some things cannot be determined statically.
+// information about target register classes. Callee-saved vs. caller-saved and
+// reserved registers depend on calling conventions and other dynamic
+// information, so some things cannot be determined statically.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -25,12 +24,14 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
static cl::opt<unsigned>
StressRA("stress-regalloc", cl::Hidden, cl::init(0), cl::value_desc("N"),
cl::desc("Limit all regclasses to N registers"));
-RegisterClassInfo::RegisterClassInfo() : Tag(0), MF(0), TRI(0), CalleeSaved(0)
-{}
+RegisterClassInfo::RegisterClassInfo()
+ : Tag(0), MF(nullptr), TRI(nullptr), CalleeSaved(nullptr) {}
void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf) {
bool Update = false;
@@ -151,7 +152,7 @@ void RegisterClassInfo::compute(const TargetRegisterClass *RC) const {
/// nonoverlapping reserved registers. However, computing the allocation order
/// for all register classes would be too expensive.
unsigned RegisterClassInfo::computePSetLimit(unsigned Idx) const {
- const TargetRegisterClass *RC = 0;
+ const TargetRegisterClass *RC = nullptr;
unsigned NumRCUnits = 0;
for (TargetRegisterInfo::regclass_iterator
RI = TRI->regclass_begin(), RE = TRI->regclass_end(); RI != RE; ++RI) {
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
index dd86c1f..e04a3cf 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -13,9 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "RegisterCoalescer.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
@@ -43,6 +41,8 @@
#include <cmath>
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(numJoins , "Number of interval joins performed");
STATISTIC(numCrossRCs , "Number of cross class joins performed");
STATISTIC(numCommutes , "Number of instruction commuting performed");
@@ -112,7 +112,7 @@ namespace {
void eliminateDeadDefs();
/// LiveRangeEdit callback.
- void LRE_WillEraseInstruction(MachineInstr *MI);
+ void LRE_WillEraseInstruction(MachineInstr *MI) override;
/// coalesceLocals - coalesce the LocalWorkList.
void coalesceLocals();
@@ -188,15 +188,15 @@ namespace {
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- virtual void releaseMemory();
+ void releaseMemory() override;
/// runOnMachineFunction - pass entry point
- virtual bool runOnMachineFunction(MachineFunction&);
+ bool runOnMachineFunction(MachineFunction&) override;
/// print - Implement the dump method.
- virtual void print(raw_ostream &O, const Module* = 0) const;
+ void print(raw_ostream &O, const Module* = nullptr) const override;
};
} /// end anonymous namespace
@@ -241,9 +241,8 @@ static bool isSplitEdge(const MachineBasicBlock *MBB) {
if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
return false;
- for (MachineBasicBlock::const_iterator MII = MBB->begin(), E = MBB->end();
- MII != E; ++MII) {
- if (!MII->isCopyLike() && !MII->isUnconditionalBranch())
+ for (const auto &MI : *MBB) {
+ if (!MI.isCopyLike() && !MI.isUnconditionalBranch())
return false;
}
return true;
@@ -252,7 +251,7 @@ static bool isSplitEdge(const MachineBasicBlock *MBB) {
bool CoalescerPair::setRegisters(const MachineInstr *MI) {
SrcReg = DstReg = 0;
SrcIdx = DstIdx = 0;
- NewRC = 0;
+ NewRC = nullptr;
Flipped = CrossClass = false;
unsigned Src, Dst, SrcSub, DstSub;
@@ -283,7 +282,6 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
if (SrcSub) {
Dst = TRI.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
if (!Dst) return false;
- SrcSub = 0;
} else if (!MRI.getRegClass(Src)->contains(Dst)) {
return false;
}
@@ -399,7 +397,8 @@ void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
void RegisterCoalescer::eliminateDeadDefs() {
SmallVector<unsigned, 8> NewRegs;
- LiveRangeEdit(0, NewRegs, *MF, *LIS, 0, this).eliminateDeadDefs(DeadDefs);
+ LiveRangeEdit(nullptr, NewRegs, *MF, *LIS,
+ nullptr, this).eliminateDeadDefs(DeadDefs);
}
// Callback from eliminateDeadDefs().
@@ -622,16 +621,15 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// If some of the uses of IntA.reg is already coalesced away, return false.
// It's not possible to determine whether it's safe to perform the coalescing.
- for (MachineRegisterInfo::use_nodbg_iterator UI =
- MRI->use_nodbg_begin(IntA.reg),
- UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
+ for (MachineOperand &MO : MRI->use_nodbg_operands(IntA.reg)) {
+ MachineInstr *UseMI = MO.getParent();
+ unsigned OpNo = &MO - &UseMI->getOperand(0);
SlotIndex UseIdx = LIS->getInstructionIndex(UseMI);
LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx);
if (US == IntA.end() || US->valno != AValNo)
continue;
// If this use is tied to a def, we can't rewrite the register.
- if (UseMI->isRegTiedToDefOperand(UI.getOperandNo()))
+ if (UseMI->isRegTiedToDefOperand(OpNo))
return false;
}
@@ -669,8 +667,8 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// Update uses of IntA of the specific Val# with IntB.
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(IntA.reg),
UE = MRI->use_end(); UI != UE;) {
- MachineOperand &UseMO = UI.getOperand();
- MachineInstr *UseMI = &*UI;
+ MachineOperand &UseMO = *UI;
+ MachineInstr *UseMI = UseMO.getParent();
++UI;
if (UseMI->isDebugValue()) {
// FIXME These don't have an instruction index. Not clear we have enough
@@ -769,6 +767,14 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP,
if (DstOperand.getSubReg() && !DstOperand.isUndef())
return false;
+ // If both SrcIdx and DstIdx are set, correct rematerialization would widen
+ // the register substantially (beyond both source and dest size). This is bad
+ // for performance since it can cascade through a function, introducing many
+ // extra spills and fills (e.g. ARM can easily end up copying QQQQPR registers
+ // around after a few subreg copies).
+ if (SrcIdx && DstIdx)
+ return false;
+
const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF);
if (!DefMI->isImplicitDef()) {
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
@@ -793,9 +799,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP,
MachineBasicBlock *MBB = CopyMI->getParent();
MachineBasicBlock::iterator MII =
- llvm::next(MachineBasicBlock::iterator(CopyMI));
+ std::next(MachineBasicBlock::iterator(CopyMI));
TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI);
- MachineInstr *NewMI = prior(MII);
+ MachineInstr *NewMI = std::prev(MII);
LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI);
CopyMI->eraseFromParent();
@@ -816,31 +822,19 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP,
}
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ const TargetRegisterClass *NewRC = CP.getNewRC();
unsigned NewIdx = NewMI->getOperand(0).getSubReg();
- const TargetRegisterClass *RCForInst;
+
if (NewIdx)
- RCForInst = TRI->getMatchingSuperRegClass(MRI->getRegClass(DstReg), DefRC,
- NewIdx);
-
- if (MRI->constrainRegClass(DstReg, DefRC)) {
- // The materialized instruction is quite capable of setting DstReg
- // directly, but it may still have a now-trivial subregister index which
- // we should clear.
- NewMI->getOperand(0).setSubReg(0);
- } else if (NewIdx && RCForInst) {
- // The subreg index on NewMI is essential; we still have to make sure
- // DstReg:idx is in a class that NewMI can use.
- MRI->constrainRegClass(DstReg, RCForInst);
- } else {
- // DstReg is actually incompatible with NewMI, we have to move to a
- // super-reg's class. This could come from a sequence like:
- // GR32 = MOV32r0
- // GR8 = COPY GR32:sub_8
- MRI->setRegClass(DstReg, CP.getNewRC());
- updateRegDefsUses(DstReg, DstReg, DstIdx);
- NewMI->getOperand(0).setSubReg(
- TRI->composeSubRegIndices(SrcIdx, DefMI->getOperand(0).getSubReg()));
- }
+ NewRC = TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx);
+ else
+ NewRC = TRI->getCommonSubClass(NewRC, DefRC);
+
+ assert(NewRC && "subreg chosen for remat incompatible with instruction");
+ MRI->setRegClass(DstReg, NewRC);
+
+ updateRegDefsUses(DstReg, DstReg, DstIdx);
+ NewMI->getOperand(0).setSubReg(NewIdx);
} else if (NewMI->getOperand(0).getReg() != CopyDstReg) {
// The New instruction may be defining a sub-register of what's actually
// been asked for. If so it must implicitly define the whole thing.
@@ -851,6 +845,27 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP,
true /*IsDef*/,
true /*IsImp*/,
false /*IsKill*/));
+ // Record small dead def live-ranges for all the subregisters
+ // of the destination register.
+ // Otherwise, variables that live through may miss some
+ // interferences, thus creating invalid allocation.
+ // E.g., i386 code:
+ // vreg1 = somedef ; vreg1 GR8
+ // vreg2 = remat ; vreg2 GR32
+ // CL = COPY vreg2.sub_8bit
+ // = somedef vreg1 ; vreg1 GR8
+ // =>
+ // vreg1 = somedef ; vreg1 GR8
+ // ECX<def, dead> = remat ; CL<imp-def>
+ // = somedef vreg1 ; vreg1 GR8
+ // vreg1 will see the inteferences with CL but not with CH since
+ // no live-ranges would have been created for ECX.
+ // Fix that!
+ SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
+ for (MCRegUnitIterator Units(NewMI->getOperand(0).getReg(), TRI);
+ Units.isValid(); ++Units)
+ if (LiveRange *LR = LIS->getCachedRegUnit(*Units))
+ LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
}
if (NewMI->getOperand(0).getSubReg())
@@ -909,17 +924,14 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI,
// No intervals are live-in to CopyMI - it is undef.
if (CP.isFlipped())
DstInt = SrcInt;
- SrcInt = 0;
+ SrcInt = nullptr;
VNInfo *DeadVNI = DstInt->getVNInfoAt(Idx.getRegSlot());
assert(DeadVNI && "No value defined in DstInt");
DstInt->removeValNo(DeadVNI);
// Find new undef uses.
- for (MachineRegisterInfo::reg_nodbg_iterator
- I = MRI->reg_nodbg_begin(DstInt->reg), E = MRI->reg_nodbg_end();
- I != E; ++I) {
- MachineOperand &MO = I.getOperand();
+ for (MachineOperand &MO : MRI->reg_nodbg_operands(DstInt->reg)) {
if (MO.isDef() || MO.isUndef())
continue;
MachineInstr *MI = MO.getParent();
@@ -941,11 +953,14 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
unsigned DstReg,
unsigned SubIdx) {
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
- LiveInterval *DstInt = DstIsPhys ? 0 : &LIS->getInterval(DstReg);
+ LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg);
SmallPtrSet<MachineInstr*, 8> Visited;
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg);
- MachineInstr *UseMI = I.skipInstruction();) {
+ for (MachineRegisterInfo::reg_instr_iterator
+ I = MRI->reg_instr_begin(SrcReg), E = MRI->reg_instr_end();
+ I != E; ) {
+ MachineInstr *UseMI = &*(I++);
+
// Each instruction can only be rewritten once because sub-register
// composition is not always idempotent. When SrcReg != DstReg, rewriting
// the UseMI operands removes them from the SrcReg use-def chain, but when
@@ -956,7 +971,7 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
SmallVector<unsigned,8> Ops;
bool Reads, Writes;
- tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
+ std::tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
// If SrcReg wasn't read, it may still be the case that DstReg is live-in
// because SrcReg is a sub-register.
@@ -1022,6 +1037,22 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
return false;
}
+ if (CP.getNewRC()) {
+ auto SrcRC = MRI->getRegClass(CP.getSrcReg());
+ auto DstRC = MRI->getRegClass(CP.getDstReg());
+ unsigned SrcIdx = CP.getSrcIdx();
+ unsigned DstIdx = CP.getDstIdx();
+ if (CP.isFlipped()) {
+ std::swap(SrcIdx, DstIdx);
+ std::swap(SrcRC, DstRC);
+ }
+ if (!TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
+ CP.getNewRC())) {
+ DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n");
+ return false;
+ }
+ }
+
// Dead code elimination. This really should be handled by MachineDCE, but
// sometimes dead copies slip through, and we can't generate invalid live
// ranges.
@@ -1362,7 +1393,7 @@ class JoinVals {
bool PrunedComputed;
Val() : Resolution(CR_Keep), WriteLanes(0), ValidLanes(0),
- RedefVNI(0), OtherVNI(0), ErasableImplicitDef(false),
+ RedefVNI(nullptr), OtherVNI(nullptr), ErasableImplicitDef(false),
Pruned(false), PrunedComputed(false) {}
bool isAnalyzed() const { return WriteLanes != 0; }
@@ -1468,7 +1499,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
}
// Get the instruction defining this value, compute the lanes written.
- const MachineInstr *DefMI = 0;
+ const MachineInstr *DefMI = nullptr;
if (VNI->isPHIDef()) {
// Conservatively assume that all lanes in a PHI are valid.
V.ValidLanes = V.WriteLanes = TRI->getSubRegIndexLaneMask(SubIdx);
@@ -2092,14 +2123,14 @@ copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList) {
// Skip instruction pointers that have already been erased, for example by
// dead code elimination.
if (ErasedInstrs.erase(CurrList[i])) {
- CurrList[i] = 0;
+ CurrList[i] = nullptr;
continue;
}
bool Again = false;
bool Success = joinCopy(CurrList[i], Again);
Progress |= Success;
if (Success || !Again)
- CurrList[i] = 0;
+ CurrList[i] = nullptr;
}
return Progress;
}
@@ -2139,7 +2170,7 @@ RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
CurrList(WorkList.begin() + PrevSize, WorkList.end());
if (copyCoalesceWorkList(CurrList))
WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(),
- (MachineInstr*)0), WorkList.end());
+ (MachineInstr*)nullptr), WorkList.end());
}
void RegisterCoalescer::coalesceLocals() {
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
index 47c3df1..e57ceab 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
@@ -61,14 +61,14 @@ namespace llvm {
public:
CoalescerPair(const TargetRegisterInfo &tri)
: TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0),
- Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
+ Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
/// Create a CoalescerPair representing a virtreg-to-physreg copy.
/// No need to call setRegisters().
CoalescerPair(unsigned VirtReg, unsigned PhysReg,
const TargetRegisterInfo &tri)
: TRI(tri), DstReg(PhysReg), SrcReg(VirtReg), DstIdx(0), SrcIdx(0),
- Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
+ Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
/// setRegisters - set registers to match the copy instruction MI. Return
/// false if MI is not a coalescable copy instruction.
diff --git a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
index 092ecdd..617e459 100644
--- a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -41,7 +41,7 @@ static void decreaseSetPressure(std::vector<unsigned> &CurrSetPressure,
}
}
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD
void llvm::dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
const TargetRegisterInfo *TRI) {
bool Empty = true;
@@ -55,6 +55,7 @@ void llvm::dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
dbgs() << "\n";
}
+LLVM_DUMP_METHOD
void RegisterPressure::dump(const TargetRegisterInfo *TRI) const {
dbgs() << "Max Pressure: ";
dumpRegSetPressure(MaxSetPressure, TRI);
@@ -68,6 +69,7 @@ void RegisterPressure::dump(const TargetRegisterInfo *TRI) const {
dbgs() << '\n';
}
+LLVM_DUMP_METHOD
void RegPressureTracker::dump() const {
if (!isTopClosed() || !isBottomClosed()) {
dbgs() << "Curr Pressure: ";
@@ -75,7 +77,6 @@ void RegPressureTracker::dump() const {
}
P.dump(TRI);
}
-#endif
/// Increase the current pressure as impacted by these registers and bump
/// the high water mark if needed.
@@ -154,8 +155,8 @@ const LiveRange *RegPressureTracker::getLiveRange(unsigned Reg) const {
}
void RegPressureTracker::reset() {
- MBB = 0;
- LIS = 0;
+ MBB = nullptr;
+ LIS = nullptr;
CurrSetPressure.clear();
LiveThruPressure.clear();
@@ -506,7 +507,13 @@ bool RegPressureTracker::recede(SmallVectorImpl<unsigned> *LiveUses,
DeadDef = LRQ.isDeadDef();
}
}
- if (!DeadDef) {
+ if (DeadDef) {
+ // LiveIntervals knows this is a dead even though it's MachineOperand is
+ // not flagged as such. Since this register will not be recorded as
+ // live-out, increase its PDiff value to avoid underflowing pressure.
+ if (PDiff)
+ PDiff->addPressureChange(Reg, false, MRI);
+ } else {
if (LiveRegs.erase(Reg))
decreaseRegPressure(Reg);
else
@@ -876,9 +883,9 @@ static bool findUseBetween(unsigned Reg,
SlotIndex PriorUseIdx, SlotIndex NextUseIdx,
const MachineRegisterInfo *MRI,
const LiveIntervals *LIS) {
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI->use_nodbg_begin(Reg), UE = MRI->use_nodbg_end();
- UI != UE; UI.skipInstruction()) {
+ for (MachineRegisterInfo::use_instr_nodbg_iterator
+ UI = MRI->use_instr_nodbg_begin(Reg),
+ UE = MRI->use_instr_nodbg_end(); UI != UE; ++UI) {
const MachineInstr* MI = &*UI;
if (MI->isDebugValue())
continue;
diff --git a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
index 75ebdaa..72b6285 100644
--- a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -14,7 +14,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "reg-scavenging"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -29,6 +28,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "reg-scavenging"
+
/// setUsed - Set the register and its sub-registers as being used.
void RegScavenger::setUsed(unsigned Reg) {
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
@@ -47,7 +48,7 @@ void RegScavenger::initRegState() {
for (SmallVectorImpl<ScavengedInfo>::iterator I = Scavenged.begin(),
IE = Scavenged.end(); I != IE; ++I) {
I->Reg = 0;
- I->Restore = NULL;
+ I->Restore = nullptr;
}
// All registers started out unused.
@@ -91,8 +92,8 @@ void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
// Create callee-saved registers bitvector.
CalleeSavedRegs.resize(NumPhysRegs);
- const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF);
- if (CSRegs != NULL)
+ const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
+ if (CSRegs != nullptr)
for (unsigned i = 0; CSRegs[i]; ++i)
CalleeSavedRegs.set(CSRegs[i]);
}
@@ -162,7 +163,7 @@ void RegScavenger::unprocess() {
}
if (MBBI == MBB->begin()) {
- MBBI = MachineBasicBlock::iterator(NULL);
+ MBBI = MachineBasicBlock::iterator(nullptr);
Tracking = false;
} else
--MBBI;
@@ -175,7 +176,7 @@ void RegScavenger::forward() {
Tracking = true;
} else {
assert(MBBI != MBB->end() && "Already past the end of the basic block!");
- MBBI = llvm::next(MBBI);
+ MBBI = std::next(MBBI);
}
assert(MBBI != MBB->end() && "Already at the end of the basic block!");
@@ -187,7 +188,7 @@ void RegScavenger::forward() {
continue;
I->Reg = 0;
- I->Restore = NULL;
+ I->Restore = nullptr;
}
if (MI->isDebugValue())
@@ -223,7 +224,7 @@ void RegScavenger::forward() {
break;
}
if (!SubUsed) {
- MBB->getParent()->verify(NULL, "In Register Scavenger");
+ MBB->getParent()->verify(nullptr, "In Register Scavenger");
llvm_unreachable("Using an undefined register!");
}
(void)SubUsed;
@@ -415,7 +416,7 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
"Cannot scavenge register without an emergency spill slot!");
TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex,
RC, TRI);
- MachineBasicBlock::iterator II = prior(I);
+ MachineBasicBlock::iterator II = std::prev(I);
unsigned FIOperandNum = getFrameIndexOperandNum(II);
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
@@ -423,13 +424,13 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// Restore the scavenged register before its use (or first terminator).
TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex,
RC, TRI);
- II = prior(UseMI);
+ II = std::prev(UseMI);
FIOperandNum = getFrameIndexOperandNum(II);
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
}
- Scavenged[SI].Restore = prior(UseMI);
+ Scavenged[SI].Restore = std::prev(UseMI);
// Doing this here leads to infinite regress.
// Scavenged[SI].Reg = SReg;
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
index 75e3790..6a2a080 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pre-RA-sched"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
@@ -25,6 +24,8 @@
#include <climits>
using namespace llvm;
+#define DEBUG_TYPE "pre-RA-sched"
+
#ifndef NDEBUG
static cl::opt<bool> StressSchedOpt(
"stress-sched", cl::Hidden, cl::init(false),
@@ -55,7 +56,7 @@ void ScheduleDAG::clearDAG() {
/// getInstrDesc helper to handle SDNodes.
const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
- if (!Node || !Node->isMachineOpcode()) return NULL;
+ if (!Node || !Node->isMachineOpcode()) return nullptr;
return &TII->get(Node->getMachineOpcode());
}
@@ -63,7 +64,7 @@ const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
/// not already. It also adds the current node as a successor of the
/// specified node.
bool SUnit::addPred(const SDep &D, bool Required) {
- // If this node already has this depenence, don't add a redundant one.
+ // If this node already has this dependence, don't add a redundant one.
for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
I != E; ++I) {
// Zero-latency weak edges may be added purely for heuristic ordering. Don't
@@ -301,8 +302,8 @@ void SUnit::biasCriticalPath() {
SUnit::pred_iterator BestI = Preds.begin();
unsigned MaxDepth = BestI->getSUnit()->getDepth();
- for (SUnit::pred_iterator
- I = llvm::next(BestI), E = Preds.end(); I != E; ++I) {
+ for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
+ ++I) {
if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
BestI = I;
}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index 7f1f9c4..0f8b21c 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "misched"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -21,6 +20,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
@@ -40,17 +40,24 @@
using namespace llvm;
+#define DEBUG_TYPE "misched"
+
static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
cl::ZeroOrMore, cl::init(false),
cl::desc("Enable use of AA during MI GAD construction"));
+static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden,
+ cl::init(true), cl::desc("Enable use of TBAA during MI GAD construction"));
+
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
const MachineDominatorTree &mdt,
bool IsPostRAFlag,
+ bool RemoveKillFlags,
LiveIntervals *lis)
: ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis),
- IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) {
+ IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags),
+ CanHandleTerminators(false), FirstDbgValue(nullptr) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
@@ -92,7 +99,7 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
static void getUnderlyingObjects(const Value *V,
SmallVectorImpl<Value *> &Objects) {
- SmallPtrSet<const Value*, 16> Visited;
+ SmallPtrSet<const Value *, 16> Visited;
SmallVector<const Value *, 4> Working(1, V);
do {
V = Working.pop_back_val();
@@ -118,7 +125,8 @@ static void getUnderlyingObjects(const Value *V,
} while (!Working.empty());
}
-typedef SmallVector<PointerIntPair<const Value *, 1, bool>, 4>
+typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
+typedef SmallVector<PointerIntPair<ValueType, 1, bool>, 4>
UnderlyingObjectsVector;
/// getUnderlyingObjectsForInstr - If this machine instr has memory reference
@@ -128,10 +136,23 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
const MachineFrameInfo *MFI,
UnderlyingObjectsVector &Objects) {
if (!MI->hasOneMemOperand() ||
- !(*MI->memoperands_begin())->getValue() ||
+ (!(*MI->memoperands_begin())->getValue() &&
+ !(*MI->memoperands_begin())->getPseudoValue()) ||
(*MI->memoperands_begin())->isVolatile())
return;
+ if (const PseudoSourceValue *PSV =
+ (*MI->memoperands_begin())->getPseudoValue()) {
+ // For now, ignore PseudoSourceValues which may alias LLVM IR values
+ // because the code that uses this function has no way to cope with
+ // such aliases.
+ if (!PSV->isAliased(MFI)) {
+ bool MayAlias = PSV->mayAlias(MFI);
+ Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias));
+ }
+ return;
+ }
+
const Value *V = (*MI->memoperands_begin())->getValue();
if (!V)
return;
@@ -141,26 +162,14 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
I != IE; ++I) {
- bool MayAlias = true;
V = *I;
- if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
- // For now, ignore PseudoSourceValues which may alias LLVM IR values
- // because the code that uses this function has no way to cope with
- // such aliases.
-
- if (PSV->isAliased(MFI)) {
- Objects.clear();
- return;
- }
-
- MayAlias = PSV->mayAlias(MFI);
- } else if (!isIdentifiedObject(V)) {
+ if (!isIdentifiedObject(V)) {
Objects.clear();
return;
}
- Objects.push_back(UnderlyingObjectsVector::value_type(V, MayAlias));
+ Objects.push_back(UnderlyingObjectsVector::value_type(V, true));
}
}
@@ -170,7 +179,7 @@ void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
void ScheduleDAGInstrs::finishBlock() {
// Subclasses should no longer refer to the old block.
- BB = 0;
+ BB = nullptr;
}
/// Initialize the DAG and common scheduler state for the current scheduling
@@ -202,7 +211,7 @@ void ScheduleDAGInstrs::exitRegion() {
/// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block.
void ScheduleDAGInstrs::addSchedBarrierDeps() {
- MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : 0;
+ MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr;
ExitSU.setInstr(ExitMI);
bool AllDepKnown = ExitMI &&
(ExitMI->isCall() || ExitMI->isBarrier());
@@ -259,7 +268,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
// Adjust the dependence latency using operand def/use information,
// then allow the target to perform its own adjustments.
int UseOp = I->OpIdx;
- MachineInstr *RegUse = 0;
+ MachineInstr *RegUse = nullptr;
SDep Dep;
if (UseOp < 0)
Dep = SDep(SU, SDep::Artificial);
@@ -284,8 +293,8 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
/// this SUnit to following instructions in the same scheduling region that
/// depend the physical register referenced at OperIdx.
void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
- const MachineInstr *MI = SU->getInstr();
- const MachineOperand &MO = MI->getOperand(OperIdx);
+ MachineInstr *MI = SU->getInstr();
+ MachineOperand &MO = MI->getOperand(OperIdx);
// Optionally add output and anti dependencies. For anti
// dependencies we use a latency of 0 because for a multi-issue
@@ -323,6 +332,8 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
// retrieve the existing SUnits list for this register's uses.
// Push this SUnit on the use list.
Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
+ if (RemoveKillFlags)
+ MO.setIsKill(false);
}
else {
addPhysRegDataDeps(SU, OperIdx);
@@ -468,6 +479,15 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI,
if ((*MI->memoperands_begin())->isVolatile() ||
MI->hasUnmodeledSideEffects())
return true;
+
+ if ((*MI->memoperands_begin())->getPseudoValue()) {
+ // Similarly to getUnderlyingObjectForInstr:
+ // For now, ignore PseudoSourceValues which may alias LLVM IR values
+ // because the code that uses this function has no way to cope with
+ // such aliases.
+ return true;
+ }
+
const Value *V = (*MI->memoperands_begin())->getValue();
if (!V)
return true;
@@ -476,19 +496,8 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI,
getUnderlyingObjects(V, Objs);
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(),
IE = Objs.end(); I != IE; ++I) {
- V = *I;
-
- if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
- // Similarly to getUnderlyingObjectForInstr:
- // For now, ignore PseudoSourceValues which may alias LLVM IR values
- // because the code that uses this function has no way to cope with
- // such aliases.
- if (PSV->isAliased(MFI))
- return true;
- }
-
// Does this pointer refer to a distinct and identifiable object?
- if (!isIdentifiedObject(V))
+ if (!isIdentifiedObject(*I))
return true;
}
@@ -507,6 +516,10 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
if (MIa == MIb)
return false;
+ // FIXME: Need to handle multiple memory operands to support all targets.
+ if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
+ return true;
+
if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI))
return true;
@@ -522,9 +535,8 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
MachineMemOperand *MMOa = *MIa->memoperands_begin();
MachineMemOperand *MMOb = *MIb->memoperands_begin();
- // FIXME: Need to handle multiple memory operands to support all targets.
- if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
- llvm_unreachable("Multiple memory operands.");
+ if (!MMOa->getValue() || !MMOb->getValue())
+ return true;
// The following interface to AA is fashioned after DAGCombiner::isAlias
// and operates with MachineMemOperand offset with some important
@@ -550,10 +562,10 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
AliasAnalysis::AliasResult AAResult = AA->alias(
- AliasAnalysis::Location(MMOa->getValue(), Overlapa,
- MMOa->getTBAAInfo()),
- AliasAnalysis::Location(MMOb->getValue(), Overlapb,
- MMOb->getTBAAInfo()));
+ AliasAnalysis::Location(MMOa->getValue(), Overlapa,
+ UseTBAA ? MMOa->getTBAAInfo() : nullptr),
+ AliasAnalysis::Location(MMOb->getValue(), Overlapb,
+ UseTBAA ? MMOb->getTBAAInfo() : nullptr));
return (AAResult != AliasAnalysis::NoAlias);
}
@@ -687,10 +699,36 @@ void ScheduleDAGInstrs::initSUnits() {
// Assign the Latency field of SU using target-provided information.
SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
+
+ // If this SUnit uses a reserved or unbuffered resource, mark it as such.
+ //
+ // Reserved resources block an instruction from issuing and stall the
+ // entire pipeline. These are identified by BufferSize=0.
+ //
+ // Unbuffered resources prevent execution of subsequent instructions that
+ // require the same resources. This is used for in-order execution pipelines
+ // within an out-of-order core. These are identified by BufferSize=1.
+ if (SchedModel.hasInstrSchedModel()) {
+ const MCSchedClassDesc *SC = getSchedClass(SU);
+ for (TargetSchedModel::ProcResIter
+ PI = SchedModel.getWriteProcResBegin(SC),
+ PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) {
+ switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) {
+ case 0:
+ SU->hasReservedResource = true;
+ break;
+ case 1:
+ SU->isUnbuffered = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
}
-/// If RegPressure is non null, compute register pressure as a side effect. The
+/// If RegPressure is non-null, compute register pressure as a side effect. The
/// DAG builder is an efficient place to do it because it already visits
/// operands.
void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
@@ -699,7 +737,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI
: ST.useAA();
- AliasAnalysis *AAForDep = UseAA ? AA : 0;
+ AliasAnalysis *AAForDep = UseAA ? AA : nullptr;
MISUnitMap.clear();
ScheduleDAG::clearDAG();
@@ -714,20 +752,20 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// to top.
// Remember where a generic side-effecting instruction is as we procede.
- SUnit *BarrierChain = 0, *AliasChain = 0;
+ SUnit *BarrierChain = nullptr, *AliasChain = nullptr;
// Memory references to specific known memory locations are tracked
// so that they can be given more precise dependencies. We track
// separately the known memory locations that may alias and those
// that are known not to alias
- MapVector<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
- MapVector<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
+ MapVector<ValueType, std::vector<SUnit *> > AliasMemDefs, NonAliasMemDefs;
+ MapVector<ValueType, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
std::set<SUnit*> RejectMemNodes;
// Remove any stale debug info; sometimes BuildSchedGraph is called again
// without emitting the info from the previous call.
DbgValues.clear();
- FirstDbgValue = NULL;
+ FirstDbgValue = nullptr;
assert(Defs.empty() && Uses.empty() &&
"Only BuildGraph should update Defs/Uses");
@@ -744,13 +782,13 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
addSchedBarrierDeps();
// Walk the list of instructions, from bottom moving up.
- MachineInstr *DbgMI = NULL;
+ MachineInstr *DbgMI = nullptr;
for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
MII != MIE; --MII) {
- MachineInstr *MI = prior(MII);
+ MachineInstr *MI = std::prev(MII);
if (MI && DbgMI) {
DbgValues.push_back(std::make_pair(DbgMI, MI));
- DbgMI = NULL;
+ DbgMI = nullptr;
}
if (MI->isDebugValue()) {
@@ -761,13 +799,15 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
assert(SU && "No SUnit mapped to this MI");
if (RPTracker) {
- PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : 0;
- RPTracker->recede(/*LiveUses=*/0, PDiff);
- assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI");
+ PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : nullptr;
+ RPTracker->recede(/*LiveUses=*/nullptr, PDiff);
+ assert(RPTracker->getPos() == std::prev(MII) &&
+ "RPTracker can't find MI");
}
- assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) &&
- "Cannot schedule terminators or labels!");
+ assert(
+ (CanHandleTerminators || (!MI->isTerminator() && !MI->isPosition())) &&
+ "Cannot schedule terminators or labels!");
// Add register-based dependencies (data, anti, and output).
bool HasVRegDef = false;
@@ -815,11 +855,13 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
if (isGlobalMemoryObject(AA, MI)) {
// Be conservative with these and add dependencies on all memory
// references, even those that are known to not alias.
- for (MapVector<const Value *, SUnit *>::iterator I =
+ for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
- I->second->addPred(SDep(SU, SDep::Barrier));
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
+ I->second[i]->addPred(SDep(SU, SDep::Barrier));
+ }
}
- for (MapVector<const Value *, std::vector<SUnit *> >::iterator I =
+ for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
SDep Dep(SU, SDep::Barrier);
@@ -853,10 +895,12 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes,
TrueMemOrderLatency);
- for (MapVector<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
- E = AliasMemDefs.end(); I != E; ++I)
- addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes);
- for (MapVector<const Value *, std::vector<SUnit *> >::iterator I =
+ for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
+ AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) {
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes);
+ }
+ for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes,
@@ -879,7 +923,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
bool MayAlias = false;
for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end();
K != KE; ++K) {
- const Value *V = K->getPointer();
+ ValueType V = K->getPointer();
bool ThisMayAlias = K->getInt();
if (ThisMayAlias)
MayAlias = true;
@@ -887,24 +931,34 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// A store to a specific PseudoSourceValue. Add precise dependencies.
// Record the def in MemDefs, first adding a dep if there is
// an existing def.
- MapVector<const Value *, SUnit *>::iterator I =
+ MapVector<ValueType, std::vector<SUnit *> >::iterator I =
((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
- MapVector<const Value *, SUnit *>::iterator IE =
+ MapVector<ValueType, std::vector<SUnit *> >::iterator IE =
((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE) {
- addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes,
- 0, true);
- I->second = SU;
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes,
+ 0, true);
+
+ // If we're not using AA, then we only need one store per object.
+ if (!AAForDep)
+ I->second.clear();
+ I->second.push_back(SU);
} else {
- if (ThisMayAlias)
- AliasMemDefs[V] = SU;
- else
- NonAliasMemDefs[V] = SU;
+ if (ThisMayAlias) {
+ if (!AAForDep)
+ AliasMemDefs[V].clear();
+ AliasMemDefs[V].push_back(SU);
+ } else {
+ if (!AAForDep)
+ NonAliasMemDefs[V].clear();
+ NonAliasMemDefs[V].push_back(SU);
+ }
}
// Handle the uses in MemUses, if there are any.
- MapVector<const Value *, std::vector<SUnit *> >::iterator J =
+ MapVector<ValueType, std::vector<SUnit *> >::iterator J =
((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
- MapVector<const Value *, std::vector<SUnit *> >::iterator JE =
+ MapVector<ValueType, std::vector<SUnit *> >::iterator JE =
((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
if (J != JE) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
@@ -933,11 +987,6 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// we have lost all RejectMemNodes below barrier.
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Barrier));
-
- if (!ExitSU.isPred(SU))
- // Push store's up a bit to avoid them getting in between cmp
- // and branches.
- ExitSU.addPred(SDep(SU, SDep::Artificial));
} else if (MI->mayLoad()) {
bool MayAlias = true;
if (MI->isInvariantLoad(AA)) {
@@ -949,9 +998,11 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
if (Objs.empty()) {
// A load with no underlying object. Depend on all
// potentially aliasing stores.
- for (MapVector<const Value *, SUnit *>::iterator I =
+ for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
- addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes);
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ addChainDependency(AAForDep, MFI, SU, I->second[i],
+ RejectMemNodes);
PendingLoads.push_back(SU);
MayAlias = true;
@@ -961,20 +1012,21 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
for (UnderlyingObjectsVector::iterator
J = Objs.begin(), JE = Objs.end(); J != JE; ++J) {
- const Value *V = J->getPointer();
+ ValueType V = J->getPointer();
bool ThisMayAlias = J->getInt();
if (ThisMayAlias)
MayAlias = true;
// A load from a specific PseudoSourceValue. Add precise dependencies.
- MapVector<const Value *, SUnit *>::iterator I =
+ MapVector<ValueType, std::vector<SUnit *> >::iterator I =
((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
- MapVector<const Value *, SUnit *>::iterator IE =
+ MapVector<ValueType, std::vector<SUnit *> >::iterator IE =
((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE)
- addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes,
- 0, true);
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ addChainDependency(AAForDep, MFI, SU, I->second[i],
+ RejectMemNodes, 0, true);
if (ThisMayAlias)
AliasMemUses[V].push_back(SU);
else
@@ -999,6 +1051,145 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
PendingLoads.clear();
}
+/// \brief Initialize register live-range state for updating kills.
+void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) {
+ // Start with no live registers.
+ LiveRegs.reset();
+
+ // Examine the live-in regs of all successors.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end(); SI != SE; ++SI) {
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ E = (*SI)->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ // Repeat, for reg and all subregs.
+ for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+ SubRegs.isValid(); ++SubRegs)
+ LiveRegs.set(*SubRegs);
+ }
+ }
+}
+
+bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) {
+ // Setting kill flag...
+ if (!MO.isKill()) {
+ MO.setIsKill(true);
+ return false;
+ }
+
+ // If MO itself is live, clear the kill flag...
+ if (LiveRegs.test(MO.getReg())) {
+ MO.setIsKill(false);
+ return false;
+ }
+
+ // If any subreg of MO is live, then create an imp-def for that
+ // subreg and keep MO marked as killed.
+ MO.setIsKill(false);
+ bool AllDead = true;
+ const unsigned SuperReg = MO.getReg();
+ MachineInstrBuilder MIB(MF, MI);
+ for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
+ if (LiveRegs.test(*SubRegs)) {
+ MIB.addReg(*SubRegs, RegState::ImplicitDefine);
+ AllDead = false;
+ }
+ }
+
+ if(AllDead)
+ MO.setIsKill(true);
+ return false;
+}
+
+// FIXME: Reuse the LivePhysRegs utility for this.
+void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
+
+ LiveRegs.resize(TRI->getNumRegs());
+ BitVector killedRegs(TRI->getNumRegs());
+
+ startBlockForKills(MBB);
+
+ // Examine block from end to start...
+ unsigned Count = MBB->size();
+ for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
+ I != E; --Count) {
+ MachineInstr *MI = --I;
+ if (MI->isDebugValue())
+ continue;
+
+ // Update liveness. Registers that are defed but not used in this
+ // instruction are now dead. Mark register and all subregs as they
+ // are completely defined.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask())
+ LiveRegs.clearBitsNotInMask(MO.getRegMask());
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegTiedToUseOperand(i)) continue;
+
+ // Repeat for reg and all subregs.
+ for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+ SubRegs.isValid(); ++SubRegs)
+ LiveRegs.reset(*SubRegs);
+ }
+
+ // Examine all used registers and set/clear kill flag. When a
+ // register is used multiple times we only set the kill flag on
+ // the first use. Don't set kill flags on undef operands.
+ killedRegs.reset();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
+ unsigned Reg = MO.getReg();
+ if ((Reg == 0) || MRI.isReserved(Reg)) continue;
+
+ bool kill = false;
+ if (!killedRegs.test(Reg)) {
+ kill = true;
+ // A register is not killed if any subregs are live...
+ for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
+ if (LiveRegs.test(*SubRegs)) {
+ kill = false;
+ break;
+ }
+ }
+
+ // If subreg is not live, then register is killed if it became
+ // live in this instruction
+ if (kill)
+ kill = !LiveRegs.test(Reg);
+ }
+
+ if (MO.isKill() != kill) {
+ DEBUG(dbgs() << "Fixing " << MO << " in ");
+ // Warning: toggleKillFlag may invalidate MO.
+ toggleKillFlag(MI, MO);
+ DEBUG(MI->dump());
+ }
+
+ killedRegs.set(Reg);
+ }
+
+ // Mark any used register (that is not using undef) and subregs as
+ // now live...
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
+ unsigned Reg = MO.getReg();
+ if ((Reg == 0) || MRI.isReserved(Reg)) continue;
+
+ for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+ SubRegs.isValid(); ++SubRegs)
+ LiveRegs.set(*SubRegs);
+ }
+ }
+}
+
void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
SU->getInstr()->dump();
@@ -1234,7 +1425,7 @@ public:
const SDep *backtrack() {
DFSStack.pop_back();
- return DFSStack.empty() ? 0 : llvm::prior(DFSStack.back().second);
+ return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second);
}
const SUnit *getCurr() const { return DFSStack.back().first; }
@@ -1317,7 +1508,7 @@ void SchedDFSResult::scheduleTree(unsigned SubtreeID) {
}
}
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD
void ILPValue::print(raw_ostream &OS) const {
OS << InstrCount << " / " << Length << " = ";
if (!Length)
@@ -1326,16 +1517,17 @@ void ILPValue::print(raw_ostream &OS) const {
OS << format("%g", ((double)InstrCount / Length));
}
+LLVM_DUMP_METHOD
void ILPValue::dump() const {
dbgs() << *this << '\n';
}
namespace llvm {
+LLVM_DUMP_METHOD
raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
Val.print(OS);
return OS;
}
} // namespace llvm
-#endif // !NDEBUG || LLVM_ENABLE_DUMP
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
index 8ddb3e8..f59c6cf 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
@@ -14,7 +14,6 @@
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
diff --git a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index 2cd84d6..004c685 100644
--- a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE ::llvm::ScoreboardHazardRecognizer::DebugType
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/MC/MCInstrItineraries.h"
@@ -24,6 +23,8 @@
using namespace llvm;
+#define DEBUG_TYPE ::llvm::ScoreboardHazardRecognizer::DebugType
+
#ifndef NDEBUG
const char *ScoreboardHazardRecognizer::DebugType = "";
#endif
@@ -126,7 +127,7 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
// free FU's in the scoreboard at the appropriate future cycles.
const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
- if (MCID == NULL) {
+ if (!MCID) {
// Don't check hazards for non-machineinstr Nodes.
return NoHazard;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 69cf8d9..2abcdd5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dagcombine"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -40,6 +39,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "dagcombine"
+
STATISTIC(NodesCombined , "Number of dag nodes combined");
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
@@ -50,11 +51,22 @@ STATISTIC(SlicedLoads, "Number of load sliced");
namespace {
static cl::opt<bool>
CombinerAA("combiner-alias-analysis", cl::Hidden,
- cl::desc("Turn on alias analysis during testing"));
+ cl::desc("Enable DAG combiner alias-analysis heuristics"));
static cl::opt<bool>
CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
- cl::desc("Include global information in alias analysis"));
+ cl::desc("Enable DAG combiner's use of IR alias analysis"));
+
+ static cl::opt<bool>
+ UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
+ cl::desc("Enable DAG combiner's use of TBAA"));
+
+#ifndef NDEBUG
+ static cl::opt<std::string>
+ CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
+ cl::desc("Only use DAG-combiner alias analysis in this"
+ " function"));
+#endif
/// Hidden option to stress test load slicing, i.e., when this option
/// is enabled, load slicing bypasses most of its profitability guards.
@@ -92,20 +104,19 @@ namespace {
// contain duplicate or removed nodes. When choosing a node to
// visit, we pop off the order stack until we find an item that is
// also in the contents set. All operations are O(log N).
- SmallPtrSet<SDNode*, 64> WorkListContents;
- SmallVector<SDNode*, 64> WorkListOrder;
+ SmallPtrSet<SDNode*, 64> WorklistContents;
+ SmallVector<SDNode*, 64> WorklistOrder;
// AA - Used for DAG load/store alias analysis.
AliasAnalysis &AA;
- /// AddUsersToWorkList - When an instruction is simplified, add all users of
+ /// AddUsersToWorklist - When an instruction is simplified, add all users of
/// the instruction to the work lists because they might get more simplified
/// now.
///
- void AddUsersToWorkList(SDNode *N) {
- for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
- UI != UE; ++UI)
- AddToWorkList(*UI);
+ void AddUsersToWorklist(SDNode *N) {
+ for (SDNode *Node : N->uses())
+ AddToWorklist(Node);
}
/// visit - call the node-specific routine that knows how to fold each
@@ -113,17 +124,22 @@ namespace {
SDValue visit(SDNode *N);
public:
- /// AddToWorkList - Add to the work list making sure its instance is at the
+ /// AddToWorklist - Add to the work list making sure its instance is at the
/// back (next to be processed.)
- void AddToWorkList(SDNode *N) {
- WorkListContents.insert(N);
- WorkListOrder.push_back(N);
+ void AddToWorklist(SDNode *N) {
+ // Skip handle nodes as they can't usefully be combined and confuse the
+ // zero-use deletion strategy.
+ if (N->getOpcode() == ISD::HANDLENODE)
+ return;
+
+ WorklistContents.insert(N);
+ WorklistOrder.push_back(N);
}
- /// removeFromWorkList - remove all instances of N from the worklist.
+ /// removeFromWorklist - remove all instances of N from the worklist.
///
- void removeFromWorkList(SDNode *N) {
- WorkListContents.erase(N);
+ void removeFromWorklist(SDNode *N) {
+ WorklistContents.erase(N);
}
SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
@@ -212,6 +228,7 @@ namespace {
SDValue visitSHL(SDNode *N);
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
+ SDValue visitRotate(SDNode *N);
SDValue visitCTLZ(SDNode *N);
SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
@@ -257,11 +274,12 @@ namespace {
SDValue visitCONCAT_VECTORS(SDNode *N);
SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
SDValue visitVECTOR_SHUFFLE(SDNode *N);
+ SDValue visitINSERT_SUBVECTOR(SDNode *N);
SDValue XformToShuffleWithZero(SDNode *N);
SDValue ReassociateOps(unsigned Opc, SDLoc DL, SDValue LHS, SDValue RHS);
- SDValue visitShiftByConstant(SDNode *N, unsigned Amt);
+ SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
@@ -271,6 +289,11 @@ namespace {
bool NotExtCompare = false);
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
SDLoc DL, bool foldBooleans = true);
+
+ bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
+ SDValue &CC) const;
+ bool isOneUseSetCC(SDValue N) const;
+
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
@@ -280,6 +303,10 @@ namespace {
SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
bool DemandHighBits = true);
SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
+ SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
+ SDValue InnerPos, SDValue InnerNeg,
+ unsigned PosOpcode, unsigned NegOpcode,
+ SDLoc DL);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL);
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
@@ -296,26 +323,7 @@ namespace {
/// isAlias - Return true if there is any possibility that the two addresses
/// overlap.
- bool isAlias(SDValue Ptr1, int64_t Size1, bool IsVolatile1,
- const Value *SrcValue1, int SrcValueOffset1,
- unsigned SrcValueAlign1,
- const MDNode *TBAAInfo1,
- SDValue Ptr2, int64_t Size2, bool IsVolatile2,
- const Value *SrcValue2, int SrcValueOffset2,
- unsigned SrcValueAlign2,
- const MDNode *TBAAInfo2) const;
-
- /// isAlias - Return true if there is any possibility that the two addresses
- /// overlap.
- bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1);
-
- /// FindAliasInfo - Extracts the relevant alias information from the memory
- /// node. Returns true if the operand was a load.
- bool FindAliasInfo(SDNode *N,
- SDValue &Ptr, int64_t &Size, bool &IsVolatile,
- const Value *&SrcValue, int &SrcValueOffset,
- unsigned &SrcValueAlignment,
- const MDNode *&TBAAInfo) const;
+ bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const;
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
/// looking for a better chain (aliasing node.)
@@ -326,6 +334,14 @@ namespace {
/// \return True if some memory operations were changed.
bool MergeConsecutiveStores(StoreSDNode *N);
+ /// \brief Try to transform a truncation where C is a constant:
+ /// (trunc (and X, C)) -> (and (trunc X), (trunc C))
+ ///
+ /// \p N needs to be a truncation and its first operand an AND. Other
+ /// requirements are checked by the function (e.g. that trunc is
+ /// single-use) and if missed an empty SDValue is returned.
+ SDValue distributeTruncateThroughAnd(SDNode *N);
+
public:
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
@@ -370,16 +386,16 @@ namespace {
namespace {
-/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted
+/// WorklistRemover - This class is a DAGUpdateListener that removes any deleted
/// nodes from the worklist.
-class WorkListRemover : public SelectionDAG::DAGUpdateListener {
+class WorklistRemover : public SelectionDAG::DAGUpdateListener {
DAGCombiner &DC;
public:
- explicit WorkListRemover(DAGCombiner &dc)
+ explicit WorklistRemover(DAGCombiner &dc)
: SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
- DC.removeFromWorkList(N);
+ void NodeDeleted(SDNode *N, SDNode *E) override {
+ DC.removeFromWorklist(N);
}
};
}
@@ -389,11 +405,11 @@ public:
//===----------------------------------------------------------------------===//
void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
- ((DAGCombiner*)DC)->AddToWorkList(N);
+ ((DAGCombiner*)DC)->AddToWorklist(N);
}
void TargetLowering::DAGCombinerInfo::RemoveFromWorklist(SDNode *N) {
- ((DAGCombiner*)DC)->removeFromWorkList(N);
+ ((DAGCombiner*)DC)->removeFromWorklist(N);
}
SDValue TargetLowering::DAGCombinerInfo::
@@ -566,79 +582,130 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
}
}
-
// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc
-// that selects between the values 1 and 0, making it equivalent to a setcc.
-// Also, set the incoming LHS, RHS, and CC references to the appropriate
-// nodes based on the type of node we are checking. This simplifies life a
-// bit for the callers.
-static bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
- SDValue &CC) {
+// that selects between the target values used for true and false, making it
+// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
+// the appropriate nodes based on the type of node we are checking. This
+// simplifies life a bit for the callers.
+bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
+ SDValue &CC) const {
if (N.getOpcode() == ISD::SETCC) {
LHS = N.getOperand(0);
RHS = N.getOperand(1);
CC = N.getOperand(2);
return true;
}
- if (N.getOpcode() == ISD::SELECT_CC &&
- N.getOperand(2).getOpcode() == ISD::Constant &&
- N.getOperand(3).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(N.getOperand(2))->getAPIntValue() == 1 &&
- cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) {
- LHS = N.getOperand(0);
- RHS = N.getOperand(1);
- CC = N.getOperand(4);
- return true;
- }
- return false;
+
+ if (N.getOpcode() != ISD::SELECT_CC ||
+ !TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
+ !TLI.isConstFalseVal(N.getOperand(3).getNode()))
+ return false;
+
+ LHS = N.getOperand(0);
+ RHS = N.getOperand(1);
+ CC = N.getOperand(4);
+ return true;
}
// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only
// one use. If this is true, it allows the users to invert the operation for
// free when it is profitable to do so.
-static bool isOneUseSetCC(SDValue N) {
+bool DAGCombiner::isOneUseSetCC(SDValue N) const {
SDValue N0, N1, N2;
if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
return true;
return false;
}
+/// isConstantSplatVector - Returns true if N is a BUILD_VECTOR node whose
+/// elements are all the same constant or undefined.
+static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) {
+ BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(N);
+ if (!C)
+ return false;
+
+ APInt SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ EVT EltVT = N->getValueType(0).getVectorElementType();
+ return (C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs) &&
+ EltVT.getSizeInBits() >= SplatBitSize);
+}
+
+// \brief Returns the SDNode if it is a constant BuildVector or constant.
+static SDNode *isConstantBuildVectorOrConstantInt(SDValue N) {
+ if (isa<ConstantSDNode>(N))
+ return N.getNode();
+ BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
+ if(BV && BV->isConstant())
+ return BV;
+ return nullptr;
+}
+
+// \brief Returns the SDNode if it is a constant splat BuildVector or constant
+// int.
+static ConstantSDNode *isConstOrConstSplat(SDValue N) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
+ return CN;
+
+ if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
+ BitVector UndefElements;
+ ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
+
+ // BuildVectors can truncate their operands. Ignore that case here.
+ // FIXME: We blindly ignore splats which include undef which is overly
+ // pessimistic.
+ if (CN && UndefElements.none() &&
+ CN->getValueType(0) == N.getValueType().getScalarType())
+ return CN;
+ }
+
+ return nullptr;
+}
+
SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL,
SDValue N0, SDValue N1) {
EVT VT = N0.getValueType();
- if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
- if (isa<ConstantSDNode>(N1)) {
- // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
- SDValue OpNode =
- DAG.FoldConstantArithmetic(Opc, VT,
- cast<ConstantSDNode>(N0.getOperand(1)),
- cast<ConstantSDNode>(N1));
- return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
- }
- if (N0.hasOneUse()) {
- // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use
- SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT,
- N0.getOperand(0), N1);
- AddToWorkList(OpNode.getNode());
- return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
- }
- }
-
- if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) {
- if (isa<ConstantSDNode>(N0)) {
- // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
- SDValue OpNode =
- DAG.FoldConstantArithmetic(Opc, VT,
- cast<ConstantSDNode>(N1.getOperand(1)),
- cast<ConstantSDNode>(N0));
- return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
- }
- if (N1.hasOneUse()) {
- // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use
- SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT,
- N1.getOperand(0), N0);
- AddToWorkList(OpNode.getNode());
- return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
+ if (N0.getOpcode() == Opc) {
+ if (SDNode *L = isConstantBuildVectorOrConstantInt(N0.getOperand(1))) {
+ if (SDNode *R = isConstantBuildVectorOrConstantInt(N1)) {
+ // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
+ SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, L, R);
+ if (!OpNode.getNode())
+ return SDValue();
+ return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
+ }
+ if (N0.hasOneUse()) {
+ // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one
+ // use
+ SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
+ if (!OpNode.getNode())
+ return SDValue();
+ AddToWorklist(OpNode.getNode());
+ return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
+ }
+ }
+ }
+
+ if (N1.getOpcode() == Opc) {
+ if (SDNode *R = isConstantBuildVectorOrConstantInt(N1.getOperand(1))) {
+ if (SDNode *L = isConstantBuildVectorOrConstantInt(N0)) {
+ // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
+ SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, R, L);
+ if (!OpNode.getNode())
+ return SDValue();
+ return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
+ }
+ if (N1.hasOneUse()) {
+ // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one
+ // use
+ SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N1.getOperand(0), N0);
+ if (!OpNode.getNode())
+ return SDValue();
+ AddToWorklist(OpNode.getNode());
+ return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
+ }
}
}
@@ -658,14 +725,14 @@ SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
assert((!To[i].getNode() ||
N->getValueType(i) == To[i].getValueType()) &&
"Cannot combine value to value of different type!"));
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesWith(N, To);
if (AddTo) {
// Push the new nodes and any users onto the worklist
for (unsigned i = 0, e = NumTo; i != e; ++i) {
if (To[i].getNode()) {
- AddToWorkList(To[i].getNode());
- AddUsersToWorkList(To[i].getNode());
+ AddToWorklist(To[i].getNode());
+ AddUsersToWorklist(To[i].getNode());
}
}
}
@@ -676,7 +743,7 @@ SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
if (N->use_empty()) {
// Nodes can be reintroduced into the worklist. Make sure we do not
// process a node that has been replaced.
- removeFromWorkList(N);
+ removeFromWorklist(N);
// Finally, since the node is now dead, remove it from the graph.
DAG.DeleteNode(N);
@@ -688,24 +755,24 @@ void DAGCombiner::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
// Replace all uses. If any nodes become isomorphic to other nodes and
// are deleted, make sure to remove them from our worklist.
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
// Push the new node and any (possibly new) users onto the worklist.
- AddToWorkList(TLO.New.getNode());
- AddUsersToWorkList(TLO.New.getNode());
+ AddToWorklist(TLO.New.getNode());
+ AddUsersToWorklist(TLO.New.getNode());
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
// something else needing this node.
if (TLO.Old.getNode()->use_empty()) {
- removeFromWorkList(TLO.Old.getNode());
+ removeFromWorklist(TLO.Old.getNode());
// If the operands of this node are only used by the node, they will now
// be dead. Make sure to visit them first to delete dead nodes early.
for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands(); i != e; ++i)
if (TLO.Old.getNode()->getOperand(i).getNode()->hasOneUse())
- AddToWorkList(TLO.Old.getNode()->getOperand(i).getNode());
+ AddToWorklist(TLO.Old.getNode()->getOperand(i).getNode());
DAG.DeleteNode(TLO.Old.getNode());
}
@@ -721,7 +788,7 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
return false;
// Revisit the node.
- AddToWorkList(Op.getNode());
+ AddToWorklist(Op.getNode());
// Replace the old value with the new one.
++NodesCombined;
@@ -745,12 +812,12 @@ void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
dbgs() << "\nWith: ";
Trunc.getNode()->dump(&DAG);
dbgs() << '\n');
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
- removeFromWorkList(Load);
+ removeFromWorklist(Load);
DAG.DeleteNode(Load);
- AddToWorkList(Trunc.getNode());
+ AddToWorklist(Trunc.getNode());
}
SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
@@ -798,9 +865,9 @@ SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
SDLoc dl(Op);
bool Replace = false;
SDValue NewOp = PromoteOperand(Op, PVT, Replace);
- if (NewOp.getNode() == 0)
+ if (!NewOp.getNode())
return SDValue();
- AddToWorkList(NewOp.getNode());
+ AddToWorklist(NewOp.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
@@ -813,9 +880,9 @@ SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
SDLoc dl(Op);
bool Replace = false;
SDValue NewOp = PromoteOperand(Op, PVT, Replace);
- if (NewOp.getNode() == 0)
+ if (!NewOp.getNode())
return SDValue();
- AddToWorkList(NewOp.getNode());
+ AddToWorklist(NewOp.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
@@ -848,7 +915,7 @@ SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
bool Replace0 = false;
SDValue N0 = Op.getOperand(0);
SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
- if (NN0.getNode() == 0)
+ if (!NN0.getNode())
return SDValue();
bool Replace1 = false;
@@ -858,13 +925,13 @@ SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
NN1 = NN0;
else {
NN1 = PromoteOperand(N1, PVT, Replace1);
- if (NN1.getNode() == 0)
+ if (!NN1.getNode())
return SDValue();
}
- AddToWorkList(NN0.getNode());
+ AddToWorklist(NN0.getNode());
if (NN1.getNode())
- AddToWorkList(NN1.getNode());
+ AddToWorklist(NN1.getNode());
if (Replace0)
ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
@@ -911,10 +978,10 @@ SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
N0 = ZExtPromoteOperand(Op.getOperand(0), PVT);
else
N0 = PromoteOperand(N0, PVT, Replace);
- if (N0.getNode() == 0)
+ if (!N0.getNode())
return SDValue();
- AddToWorkList(N0.getNode());
+ AddToWorklist(N0.getNode());
if (Replace)
ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
@@ -994,12 +1061,12 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
dbgs() << "\nTo: ";
Result.getNode()->dump(&DAG);
dbgs() << '\n');
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
- removeFromWorkList(N);
+ removeFromWorklist(N);
DAG.DeleteNode(N);
- AddToWorkList(Result.getNode());
+ AddToWorklist(Result.getNode());
return true;
}
return false;
@@ -1019,7 +1086,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
// Add all the dag nodes to the worklist.
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = DAG.allnodes_end(); I != E; ++I)
- AddToWorkList(I);
+ AddToWorklist(I);
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted, and tracking any
@@ -1032,23 +1099,23 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
// while the worklist isn't empty, find a node and
// try and combine it.
- while (!WorkListContents.empty()) {
+ while (!WorklistContents.empty()) {
SDNode *N;
- // The WorkListOrder holds the SDNodes in order, but it may contain
+ // The WorklistOrder holds the SDNodes in order, but it may contain
// duplicates.
// In order to avoid a linear scan, we use a set (O(log N)) to hold what the
// worklist *should* contain, and check the node we want to visit is should
// actually be visited.
do {
- N = WorkListOrder.pop_back_val();
- } while (!WorkListContents.erase(N));
+ N = WorklistOrder.pop_back_val();
+ } while (!WorklistContents.erase(N));
// If N has no uses, it is dead. Make sure to revisit all N's operands once
// N is deleted from the DAG, since they too may now be dead or may have a
// reduced number of uses, allowing other xforms.
- if (N->use_empty() && N != &Dummy) {
+ if (N->use_empty()) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- AddToWorkList(N->getOperand(i).getNode());
+ AddToWorklist(N->getOperand(i).getNode());
DAG.DeleteNode(N);
continue;
@@ -1056,7 +1123,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
SDValue RV = combine(N);
- if (RV.getNode() == 0)
+ if (!RV.getNode())
continue;
++NodesCombined;
@@ -1080,7 +1147,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
// Transfer debug value.
DAG.TransferDbgValues(SDValue(N, 0), RV);
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
if (N->getNumValues() == RV.getNode()->getNumValues())
DAG.ReplaceAllUsesWith(N, RV.getNode());
else {
@@ -1091,14 +1158,14 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
}
// Push the new node and any users onto the worklist
- AddToWorkList(RV.getNode());
- AddUsersToWorkList(RV.getNode());
+ AddToWorklist(RV.getNode());
+ AddUsersToWorklist(RV.getNode());
// Add any uses of the old node to the worklist in case this node is the
// last one that uses them. They may become dead after this node is
// deleted.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- AddToWorkList(N->getOperand(i).getNode());
+ AddToWorklist(N->getOperand(i).getNode());
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
@@ -1106,7 +1173,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
if (N->use_empty()) {
// Nodes can be reintroduced into the worklist. Make sure we do not
// process a node that has been replaced.
- removeFromWorkList(N);
+ removeFromWorklist(N);
// Finally, since the node is now dead, remove it from the graph.
DAG.DeleteNode(N);
@@ -1148,6 +1215,8 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::SHL: return visitSHL(N);
case ISD::SRA: return visitSRA(N);
case ISD::SRL: return visitSRL(N);
+ case ISD::ROTR:
+ case ISD::ROTL: return visitRotate(N);
case ISD::CTLZ: return visitCTLZ(N);
case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
@@ -1193,6 +1262,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
+ case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N);
}
return SDValue();
}
@@ -1201,7 +1271,7 @@ SDValue DAGCombiner::combine(SDNode *N) {
SDValue RV = visit(N);
// If nothing happened, try a target-specific DAG combine.
- if (RV.getNode() == 0) {
+ if (!RV.getNode()) {
assert(N->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned NULL!");
@@ -1217,7 +1287,7 @@ SDValue DAGCombiner::combine(SDNode *N) {
}
// If nothing happened still, try promoting the operation.
- if (RV.getNode() == 0) {
+ if (!RV.getNode()) {
switch (N->getOpcode()) {
default: break;
case ISD::ADD:
@@ -1247,17 +1317,23 @@ SDValue DAGCombiner::combine(SDNode *N) {
// If N is a commutative binary node, try commuting it to enable more
// sdisel CSE.
- if (RV.getNode() == 0 &&
- SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
+ if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) &&
N->getNumValues() == 1) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Constant operands are canonicalized to RHS.
if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
- SDValue Ops[] = { N1, N0 };
- SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(),
- Ops, 2);
+ SDValue Ops[] = {N1, N0};
+ SDNode *CSENode;
+ if (const BinaryWithFlagsSDNode *BinNode =
+ dyn_cast<BinaryWithFlagsSDNode>(N)) {
+ CSENode = DAG.getNodeIfExists(
+ N->getOpcode(), N->getVTList(), Ops, BinNode->hasNoUnsignedWrap(),
+ BinNode->hasNoSignedWrap(), BinNode->isExact());
+ } else {
+ CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops);
+ }
if (CSENode)
return SDValue(CSENode, 0);
}
@@ -1321,7 +1397,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
// Queue up for processing.
TFs.push_back(Op.getNode());
// Clean up in case the token factor is removed.
- AddToWorkList(Op.getNode());
+ AddToWorklist(Op.getNode());
Changed = true;
break;
}
@@ -1347,8 +1423,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
Result = DAG.getEntryNode();
} else {
// New and improved token factor.
- Result = DAG.getNode(ISD::TokenFactor, SDLoc(N),
- MVT::Other, &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops);
}
// Don't add users to work list.
@@ -1360,18 +1435,18 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
/// MERGE_VALUES can always be eliminated.
SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
// Replacing results may cause a different MERGE_VALUES to suddenly
// be CSE'd with N, and carry its uses with it. Iterate until no
// uses remain, to ensure that the node can be safely deleted.
// First add the users of this node to the work list so that they
// can be tried again once they have new operands.
- AddUsersToWorkList(N);
+ AddUsersToWorklist(N);
do {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i));
} while (!N->use_empty());
- removeFromWorkList(N);
+ removeFromWorklist(N);
DAG.DeleteNode(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -1447,7 +1522,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
N0.getOperand(1));
// reassociate add
SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1);
- if (RADD.getNode() != 0)
+ if (RADD.getNode())
return RADD;
// fold ((0-A) + B) -> B-A
if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
@@ -1500,15 +1575,17 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (VT.isInteger() && !VT.isVector()) {
APInt LHSZero, LHSOne;
APInt RHSZero, RHSOne;
- DAG.ComputeMaskedBits(N0, LHSZero, LHSOne);
+ DAG.computeKnownBits(N0, LHSZero, LHSOne);
if (LHSZero.getBoolValue()) {
- DAG.ComputeMaskedBits(N1, RHSZero, RHSOne);
+ DAG.computeKnownBits(N1, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
- if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
- return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1);
+ if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero){
+ if (!LegalOperations || TLI.isOperationLegal(ISD::OR, VT))
+ return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N1);
+ }
}
}
@@ -1593,10 +1670,10 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
APInt LHSZero, LHSOne;
APInt RHSZero, RHSOne;
- DAG.ComputeMaskedBits(N0, LHSZero, LHSOne);
+ DAG.computeKnownBits(N0, LHSZero, LHSOne);
if (LHSZero.getBoolValue()) {
- DAG.ComputeMaskedBits(N1, RHSZero, RHSOne);
+ DAG.computeKnownBits(N1, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
@@ -1645,7 +1722,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? 0 :
+ ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? nullptr :
dyn_cast<ConstantSDNode>(N1.getOperand(1).getNode());
EVT VT = N0.getValueType();
@@ -1778,22 +1855,6 @@ SDValue DAGCombiner::visitSUBE(SDNode *N) {
return SDValue();
}
-/// isConstantSplatVector - Returns true if N is a BUILD_VECTOR node whose
-/// elements are all the same constant or undefined.
-static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) {
- BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(N);
- if (!C)
- return false;
-
- APInt SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- EVT EltVT = N->getValueType(0).getVectorElementType();
- return (C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
- HasAnyUndefs) &&
- EltVT.getSizeInBits() >= SplatBitSize);
-}
-
SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -1814,10 +1875,10 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
} else {
- N0IsConst = dyn_cast<ConstantSDNode>(N0) != 0;
+ N0IsConst = dyn_cast<ConstantSDNode>(N0) != nullptr;
ConstValue0 = N0IsConst ? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue()
: APInt();
- N1IsConst = dyn_cast<ConstantSDNode>(N1) != 0;
+ N1IsConst = dyn_cast<ConstantSDNode>(N1) != nullptr;
ConstValue1 = N1IsConst ? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue()
: APInt();
}
@@ -1867,7 +1928,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
isa<ConstantSDNode>(N0.getOperand(1)))) {
SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT,
N1, N0.getOperand(1));
- AddToWorkList(C3.getNode());
+ AddToWorklist(C3.getNode());
return DAG.getNode(ISD::MUL, SDLoc(N), VT,
N0.getOperand(0), C3);
}
@@ -1875,7 +1936,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
// Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
// use.
{
- SDValue Sh(0,0), Y(0,0);
+ SDValue Sh(nullptr,0), Y(nullptr,0);
// Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
if (N0.getOpcode() == ISD::SHL &&
(isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
@@ -1908,7 +1969,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
// reassociate mul
SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1);
- if (RMUL.getNode() != 0)
+ if (RMUL.getNode())
return RMUL;
return SDValue();
@@ -1917,8 +1978,8 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue DAGCombiner::visitSDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
+ ConstantSDNode *N0C = isConstOrConstSplat(N0);
+ ConstantSDNode *N1C = isConstOrConstSplat(N1);
EVT VT = N->getValueType(0);
// fold vector ops
@@ -1944,10 +2005,10 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
return DAG.getNode(ISD::UDIV, SDLoc(N), N1.getValueType(),
N0, N1);
}
+
// fold (sdiv X, pow2) -> simple ops after legalize
- if (N1C && !N1C->isNullValue() &&
- (N1C->getAPIntValue().isPowerOf2() ||
- (-N1C->getAPIntValue()).isPowerOf2())) {
+ if (N1C && !N1C->isNullValue() && (N1C->getAPIntValue().isPowerOf2() ||
+ (-N1C->getAPIntValue()).isPowerOf2())) {
// If dividing by powers of two is cheap, then don't perform the following
// fold.
if (TLI.isPow2DivCheap())
@@ -1956,18 +2017,20 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
unsigned lg2 = N1C->getAPIntValue().countTrailingZeros();
// Splat the sign bit into the register
- SDValue SGN = DAG.getNode(ISD::SRA, SDLoc(N), VT, N0,
- DAG.getConstant(VT.getSizeInBits()-1,
- getShiftAmountTy(N0.getValueType())));
- AddToWorkList(SGN.getNode());
+ SDValue SGN =
+ DAG.getNode(ISD::SRA, SDLoc(N), VT, N0,
+ DAG.getConstant(VT.getScalarSizeInBits() - 1,
+ getShiftAmountTy(N0.getValueType())));
+ AddToWorklist(SGN.getNode());
// Add (N0 < 0) ? abs2 - 1 : 0;
- SDValue SRL = DAG.getNode(ISD::SRL, SDLoc(N), VT, SGN,
- DAG.getConstant(VT.getSizeInBits() - lg2,
- getShiftAmountTy(SGN.getValueType())));
+ SDValue SRL =
+ DAG.getNode(ISD::SRL, SDLoc(N), VT, SGN,
+ DAG.getConstant(VT.getScalarSizeInBits() - lg2,
+ getShiftAmountTy(SGN.getValueType())));
SDValue ADD = DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, SRL);
- AddToWorkList(SRL.getNode());
- AddToWorkList(ADD.getNode()); // Divide by pow2
+ AddToWorklist(SRL.getNode());
+ AddToWorklist(ADD.getNode()); // Divide by pow2
SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), VT, ADD,
DAG.getConstant(lg2, getShiftAmountTy(ADD.getValueType())));
@@ -1976,14 +2039,13 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
if (N1C->getAPIntValue().isNonNegative())
return SRA;
- AddToWorkList(SRA.getNode());
- return DAG.getNode(ISD::SUB, SDLoc(N), VT,
- DAG.getConstant(0, VT), SRA);
+ AddToWorklist(SRA.getNode());
+ return DAG.getNode(ISD::SUB, SDLoc(N), VT, DAG.getConstant(0, VT), SRA);
}
// if integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence.
- if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) {
+ if (N1C && !TLI.isIntDivCheap()) {
SDValue Op = BuildSDIV(N);
if (Op.getNode()) return Op;
}
@@ -2001,8 +2063,8 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
SDValue DAGCombiner::visitUDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
+ ConstantSDNode *N0C = isConstOrConstSplat(N0);
+ ConstantSDNode *N1C = isConstOrConstSplat(N1);
EVT VT = N->getValueType(0);
// fold vector ops
@@ -2029,13 +2091,13 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
DAG.getConstant(SHC->getAPIntValue()
.logBase2(),
ADDVT));
- AddToWorkList(Add.getNode());
+ AddToWorklist(Add.getNode());
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, Add);
}
}
}
// fold (udiv x, c) -> alternate
- if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) {
+ if (N1C && !TLI.isIntDivCheap()) {
SDValue Op = BuildUDIV(N);
if (Op.getNode()) return Op;
}
@@ -2053,8 +2115,8 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
SDValue DAGCombiner::visitSREM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *N0C = isConstOrConstSplat(N0);
+ ConstantSDNode *N1C = isConstOrConstSplat(N1);
EVT VT = N->getValueType(0);
// fold (srem c1, c2) -> c1%c2
@@ -2071,13 +2133,13 @@ SDValue DAGCombiner::visitSREM(SDNode *N) {
// X%C to the equivalent of X-X/C*C.
if (N1C && !N1C->isNullValue()) {
SDValue Div = DAG.getNode(ISD::SDIV, SDLoc(N), VT, N0, N1);
- AddToWorkList(Div.getNode());
+ AddToWorklist(Div.getNode());
SDValue OptimizedDiv = combine(Div.getNode());
if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
OptimizedDiv, N1);
SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul);
- AddToWorkList(Mul.getNode());
+ AddToWorklist(Mul.getNode());
return Sub;
}
}
@@ -2095,8 +2157,8 @@ SDValue DAGCombiner::visitSREM(SDNode *N) {
SDValue DAGCombiner::visitUREM(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *N0C = isConstOrConstSplat(N0);
+ ConstantSDNode *N1C = isConstOrConstSplat(N1);
EVT VT = N->getValueType(0);
// fold (urem c1, c2) -> c1%c2
@@ -2114,7 +2176,7 @@ SDValue DAGCombiner::visitUREM(SDNode *N) {
DAG.getNode(ISD::ADD, SDLoc(N), VT, N1,
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()),
VT));
- AddToWorkList(Add.getNode());
+ AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, Add);
}
}
@@ -2124,13 +2186,13 @@ SDValue DAGCombiner::visitUREM(SDNode *N) {
// X%C to the equivalent of X-X/C*C.
if (N1C && !N1C->isNullValue()) {
SDValue Div = DAG.getNode(ISD::UDIV, SDLoc(N), VT, N0, N1);
- AddToWorkList(Div.getNode());
+ AddToWorklist(Div.getNode());
SDValue OptimizedDiv = combine(Div.getNode());
if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT,
OptimizedDiv, N1);
SDValue Sub = DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, Mul);
- AddToWorkList(Mul.getNode());
+ AddToWorklist(Mul.getNode());
return Sub;
}
}
@@ -2229,9 +2291,9 @@ SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
bool HiExists = N->hasAnyUseOfValue(1);
if (!HiExists &&
(!LegalOperations ||
- TLI.isOperationLegal(LoOp, N->getValueType(0)))) {
+ TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0),
- N->op_begin(), N->getNumOperands());
+ ArrayRef<SDUse>(N->op_begin(), N->op_end()));
return CombineTo(N, Res, Res);
}
@@ -2241,7 +2303,7 @@ SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
(!LegalOperations ||
TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1),
- N->op_begin(), N->getNumOperands());
+ ArrayRef<SDUse>(N->op_begin(), N->op_end()));
return CombineTo(N, Res, Res);
}
@@ -2252,8 +2314,8 @@ SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
// If the two computed results can be simplified separately, separate them.
if (LoExists) {
SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0),
- N->op_begin(), N->getNumOperands());
- AddToWorkList(Lo.getNode());
+ ArrayRef<SDUse>(N->op_begin(), N->op_end()));
+ AddToWorklist(Lo.getNode());
SDValue LoOpt = combine(Lo.getNode());
if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
(!LegalOperations ||
@@ -2263,8 +2325,8 @@ SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
if (HiExists) {
SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1),
- N->op_begin(), N->getNumOperands());
- AddToWorkList(Hi.getNode());
+ ArrayRef<SDUse>(N->op_begin(), N->op_end()));
+ AddToWorklist(Hi.getNode());
SDValue HiOpt = combine(Hi.getNode());
if (HiOpt.getNode() && HiOpt != Hi &&
(!LegalOperations ||
@@ -2403,7 +2465,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
N0.getOperand(0).getValueType(),
N0.getOperand(0), N1.getOperand(0));
- AddToWorkList(ORNode.getNode());
+ AddToWorklist(ORNode.getNode());
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode);
}
@@ -2417,7 +2479,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
N0.getOperand(0).getValueType(),
N0.getOperand(0), N1.getOperand(0));
- AddToWorkList(ORNode.getNode());
+ AddToWorklist(ORNode.getNode());
return DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
ORNode, N0.getOperand(1));
}
@@ -2442,7 +2504,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) {
SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1);
SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op);
- AddToWorkList(Op.getNode());
+ AddToWorklist(Op.getNode());
return BC;
}
}
@@ -2454,35 +2516,66 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
// The type-legalizer generates this pattern when loading illegal
// vector types from memory. In many cases this allows additional shuffle
// optimizations.
- if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
- N0.getOperand(1).getOpcode() == ISD::UNDEF &&
- N1.getOperand(1).getOpcode() == ISD::UNDEF) {
+ // There are other cases where moving the shuffle after the xor/and/or
+ // is profitable even if shuffles don't perform a swizzle.
+ // If both shuffles use the same mask, and both shuffles have the same first
+ // or second operand, then it might still be profitable to move the shuffle
+ // after the xor/and/or operation.
+ if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0);
ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1);
- assert(N0.getOperand(0).getValueType() == N1.getOperand(1).getValueType() &&
+ assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() &&
"Inputs to shuffles are not the same type");
- unsigned NumElts = VT.getVectorNumElements();
-
// Check that both shuffles use the same mask. The masks are known to be of
// the same length because the result vector type is the same.
- bool SameMask = true;
- for (unsigned i = 0; i != NumElts; ++i) {
- int Idx0 = SVN0->getMaskElt(i);
- int Idx1 = SVN1->getMaskElt(i);
- if (Idx0 != Idx1) {
- SameMask = false;
- break;
+ // Check also that shuffles have only one use to avoid introducing extra
+ // instructions.
+ if (SVN0->hasOneUse() && SVN1->hasOneUse() &&
+ SVN0->getMask().equals(SVN1->getMask())) {
+ SDValue ShOp = N0->getOperand(1);
+
+ // Don't try to fold this node if it requires introducing a
+ // build vector of all zeros that might be illegal at this stage.
+ if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) {
+ if (!LegalTypes)
+ ShOp = DAG.getConstant(0, VT);
+ else
+ ShOp = SDValue();
}
- }
- if (SameMask) {
- SDValue Op = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
- N0.getOperand(0), N1.getOperand(0));
- AddToWorkList(Op.getNode());
- return DAG.getVectorShuffle(VT, SDLoc(N), Op,
- DAG.getUNDEF(VT), &SVN0->getMask()[0]);
+ // (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C)
+ // (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C)
+ // (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0)
+ if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
+ SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
+ N0->getOperand(0), N1->getOperand(0));
+ AddToWorklist(NewNode.getNode());
+ return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp,
+ &SVN0->getMask()[0]);
+ }
+
+ // Don't try to fold this node if it requires introducing a
+ // build vector of all zeros that might be illegal at this stage.
+ ShOp = N0->getOperand(0);
+ if (N->getOpcode() == ISD::XOR && ShOp.getOpcode() != ISD::UNDEF) {
+ if (!LegalTypes)
+ ShOp = DAG.getConstant(0, VT);
+ else
+ ShOp = SDValue();
+ }
+
+ // (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B))
+ // (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B))
+ // (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B))
+ if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) {
+ SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
+ N0->getOperand(1), N1->getOperand(1));
+ AddToWorklist(NewNode.getNode());
+ return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode,
+ &SVN0->getMask()[0]);
+ }
}
}
@@ -2534,7 +2627,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
return DAG.getConstant(0, VT);
// reassociate and
SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1);
- if (RAND.getNode() != 0)
+ if (RAND.getNode())
return RAND;
// fold (and (or x, C), D) -> D if (C & D) == D
if (N1C && N0.getOpcode() == ISD::OR)
@@ -2670,21 +2763,21 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
LR.getValueType(), LL, RL);
- AddToWorkList(ORNode.getNode());
+ AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1);
}
// fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0),
LR.getValueType(), LL, RL);
- AddToWorkList(ANDNode.getNode());
+ AddToWorklist(ANDNode.getNode());
return DAG.getSetCC(SDLoc(N), VT, ANDNode, LR, Op1);
}
// fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1)
if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
LR.getValueType(), LL, RL);
- AddToWorkList(ORNode.getNode());
+ AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1);
}
}
@@ -2697,7 +2790,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
cast<ConstantSDNode>(RR)->isNullValue()))) {
SDValue ADDNode = DAG.getNode(ISD::ADD, SDLoc(N0), LL.getValueType(),
LL, DAG.getConstant(1, LL.getValueType()));
- AddToWorkList(ADDNode.getNode());
+ AddToWorklist(ADDNode.getNode());
return DAG.getSetCC(SDLoc(N), VT, ADDNode,
DAG.getConstant(2, LL.getValueType()), ISD::SETUGE);
}
@@ -2745,7 +2838,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
LN0->getChain(), LN0->getBasePtr(),
MemVT, LN0->getMemOperand());
- AddToWorkList(N);
+ AddToWorklist(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -2765,7 +2858,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
LN0->getChain(), LN0->getBasePtr(),
MemVT, LN0->getMemOperand());
- AddToWorkList(N);
+ AddToWorklist(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -2796,7 +2889,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy,
LN0->getChain(), LN0->getBasePtr(), ExtVT,
LN0->getMemOperand());
- AddToWorkList(N);
+ AddToWorklist(N);
CombineTo(LN0, NewLoad, NewLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -2823,7 +2916,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
Alignment = MinAlign(Alignment, PtrOff);
}
- AddToWorkList(NewPtr.getNode());
+ AddToWorklist(NewPtr.getNode());
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue Load =
@@ -2832,7 +2925,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
LN0->getPointerInfo(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
Alignment, LN0->getTBAAInfo());
- AddToWorkList(N);
+ AddToWorklist(N);
CombineTo(LN0, Load, Load.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -3067,7 +3160,7 @@ SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
if (!TLI.isOperationLegal(ISD::BSWAP, VT))
return SDValue();
- SmallVector<SDNode*,4> Parts(4, (SDNode*)0);
+ SmallVector<SDNode*,4> Parts(4, (SDNode*)nullptr);
// Look for either
// (or (or (and), (and)), (or (and), (and)))
// (or (or (or (and), (and)), (and)), (and))
@@ -3151,6 +3244,62 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
return N0;
if (ISD::isBuildVectorAllOnes(N1.getNode()))
return N1;
+
+ // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask1)
+ // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf B, A, Mask2)
+ // Do this only if the resulting shuffle is legal.
+ if (isa<ShuffleVectorSDNode>(N0) &&
+ isa<ShuffleVectorSDNode>(N1) &&
+ // Avoid folding a node with illegal type.
+ TLI.isTypeLegal(VT) &&
+ N0->getOperand(1) == N1->getOperand(1) &&
+ ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode())) {
+ bool CanFold = true;
+ unsigned NumElts = VT.getVectorNumElements();
+ const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
+ const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
+ // We construct two shuffle masks:
+ // - Mask1 is a shuffle mask for a shuffle with N0 as the first operand
+ // and N1 as the second operand.
+ // - Mask2 is a shuffle mask for a shuffle with N1 as the first operand
+ // and N0 as the second operand.
+ // We do this because OR is commutable and therefore there might be
+ // two ways to fold this node into a shuffle.
+ SmallVector<int,4> Mask1;
+ SmallVector<int,4> Mask2;
+
+ for (unsigned i = 0; i != NumElts && CanFold; ++i) {
+ int M0 = SV0->getMaskElt(i);
+ int M1 = SV1->getMaskElt(i);
+
+ // Both shuffle indexes are undef. Propagate Undef.
+ if (M0 < 0 && M1 < 0) {
+ Mask1.push_back(M0);
+ Mask2.push_back(M0);
+ continue;
+ }
+
+ if (M0 < 0 || M1 < 0 ||
+ (M0 < (int)NumElts && M1 < (int)NumElts) ||
+ (M0 >= (int)NumElts && M1 >= (int)NumElts)) {
+ CanFold = false;
+ break;
+ }
+
+ Mask1.push_back(M0 < (int)NumElts ? M0 : M1 + NumElts);
+ Mask2.push_back(M1 < (int)NumElts ? M1 : M0 + NumElts);
+ }
+
+ if (CanFold) {
+ // Fold this sequence only if the resulting shuffle is 'legal'.
+ if (TLI.isShuffleMaskLegal(Mask1, VT))
+ return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0),
+ N1->getOperand(0), &Mask1[0]);
+ if (TLI.isShuffleMaskLegal(Mask2, VT))
+ return DAG.getVectorShuffle(VT, SDLoc(N), N1->getOperand(0),
+ N0->getOperand(0), &Mask2[0]);
+ }
+ }
}
// fold (or x, undef) -> -1
@@ -3177,26 +3326,29 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
// Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
SDValue BSwap = MatchBSwapHWord(N, N0, N1);
- if (BSwap.getNode() != 0)
+ if (BSwap.getNode())
return BSwap;
BSwap = MatchBSwapHWordLow(N, N0, N1);
- if (BSwap.getNode() != 0)
+ if (BSwap.getNode())
return BSwap;
// reassociate or
SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1);
- if (ROR.getNode() != 0)
+ if (ROR.getNode())
return ROR;
// Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
// iff (c1 & c2) == 0.
if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1));
- if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0)
+ if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) {
+ SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1);
+ if (!COR.getNode())
+ return SDValue();
return DAG.getNode(ISD::AND, SDLoc(N), VT,
DAG.getNode(ISD::OR, SDLoc(N0), VT,
- N0.getOperand(0), N1),
- DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1));
+ N0.getOperand(0), N1), COR);
+ }
}
// fold (or (setcc x), (setcc y)) -> (setcc (or x, y))
if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
@@ -3211,7 +3363,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
(Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR),
LR.getValueType(), LL, RL);
- AddToWorkList(ORNode.getNode());
+ AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(N), VT, ORNode, LR, Op1);
}
// fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
@@ -3220,7 +3372,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
(Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR),
LR.getValueType(), LL, RL);
- AddToWorkList(ANDNode.getNode());
+ AddToWorklist(ANDNode.getNode());
return DAG.getSetCC(SDLoc(N), VT, ANDNode, LR, Op1);
}
}
@@ -3302,35 +3454,163 @@ static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
return false;
}
+// Return true if we can prove that, whenever Neg and Pos are both in the
+// range [0, OpSize), Neg == (Pos == 0 ? 0 : OpSize - Pos). This means that
+// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
+//
+// (or (shift1 X, Neg), (shift2 X, Pos))
+//
+// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
+// in direction shift1 by Neg. The range [0, OpSize) means that we only need
+// to consider shift amounts with defined behavior.
+static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned OpSize) {
+ // If OpSize is a power of 2 then:
+ //
+ // (a) (Pos == 0 ? 0 : OpSize - Pos) == (OpSize - Pos) & (OpSize - 1)
+ // (b) Neg == Neg & (OpSize - 1) whenever Neg is in [0, OpSize).
+ //
+ // So if OpSize is a power of 2 and Neg is (and Neg', OpSize-1), we check
+ // for the stronger condition:
+ //
+ // Neg & (OpSize - 1) == (OpSize - Pos) & (OpSize - 1) [A]
+ //
+ // for all Neg and Pos. Since Neg & (OpSize - 1) == Neg' & (OpSize - 1)
+ // we can just replace Neg with Neg' for the rest of the function.
+ //
+ // In other cases we check for the even stronger condition:
+ //
+ // Neg == OpSize - Pos [B]
+ //
+ // for all Neg and Pos. Note that the (or ...) then invokes undefined
+ // behavior if Pos == 0 (and consequently Neg == OpSize).
+ //
+ // We could actually use [A] whenever OpSize is a power of 2, but the
+ // only extra cases that it would match are those uninteresting ones
+ // where Neg and Pos are never in range at the same time. E.g. for
+ // OpSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
+ // as well as (sub 32, Pos), but:
+ //
+ // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
+ //
+ // always invokes undefined behavior for 32-bit X.
+ //
+ // Below, Mask == OpSize - 1 when using [A] and is all-ones otherwise.
+ unsigned MaskLoBits = 0;
+ if (Neg.getOpcode() == ISD::AND &&
+ isPowerOf2_64(OpSize) &&
+ Neg.getOperand(1).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(Neg.getOperand(1))->getAPIntValue() == OpSize - 1) {
+ Neg = Neg.getOperand(0);
+ MaskLoBits = Log2_64(OpSize);
+ }
+
+ // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
+ if (Neg.getOpcode() != ISD::SUB)
+ return 0;
+ ConstantSDNode *NegC = dyn_cast<ConstantSDNode>(Neg.getOperand(0));
+ if (!NegC)
+ return 0;
+ SDValue NegOp1 = Neg.getOperand(1);
+
+ // On the RHS of [A], if Pos is Pos' & (OpSize - 1), just replace Pos with
+ // Pos'. The truncation is redundant for the purpose of the equality.
+ if (MaskLoBits &&
+ Pos.getOpcode() == ISD::AND &&
+ Pos.getOperand(1).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() == OpSize - 1)
+ Pos = Pos.getOperand(0);
+
+ // The condition we need is now:
+ //
+ // (NegC - NegOp1) & Mask == (OpSize - Pos) & Mask
+ //
+ // If NegOp1 == Pos then we need:
+ //
+ // OpSize & Mask == NegC & Mask
+ //
+ // (because "x & Mask" is a truncation and distributes through subtraction).
+ APInt Width;
+ if (Pos == NegOp1)
+ Width = NegC->getAPIntValue();
+ // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
+ // Then the condition we want to prove becomes:
+ //
+ // (NegC - NegOp1) & Mask == (OpSize - (NegOp1 + PosC)) & Mask
+ //
+ // which, again because "x & Mask" is a truncation, becomes:
+ //
+ // NegC & Mask == (OpSize - PosC) & Mask
+ // OpSize & Mask == (NegC + PosC) & Mask
+ else if (Pos.getOpcode() == ISD::ADD &&
+ Pos.getOperand(0) == NegOp1 &&
+ Pos.getOperand(1).getOpcode() == ISD::Constant)
+ Width = (cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() +
+ NegC->getAPIntValue());
+ else
+ return false;
+
+ // Now we just need to check that OpSize & Mask == Width & Mask.
+ if (MaskLoBits)
+ // Opsize & Mask is 0 since Mask is Opsize - 1.
+ return Width.getLoBits(MaskLoBits) == 0;
+ return Width == OpSize;
+}
+
+// A subroutine of MatchRotate used once we have found an OR of two opposite
+// shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces
+// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
+// former being preferred if supported. InnerPos and InnerNeg are Pos and
+// Neg with outer conversions stripped away.
+SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
+ SDValue Neg, SDValue InnerPos,
+ SDValue InnerNeg, unsigned PosOpcode,
+ unsigned NegOpcode, SDLoc DL) {
+ // fold (or (shl x, (*ext y)),
+ // (srl x, (*ext (sub 32, y)))) ->
+ // (rotl x, y) or (rotr x, (sub 32, y))
+ //
+ // fold (or (shl x, (*ext (sub 32, y))),
+ // (srl x, (*ext y))) ->
+ // (rotr x, y) or (rotl x, (sub 32, y))
+ EVT VT = Shifted.getValueType();
+ if (matchRotateSub(InnerPos, InnerNeg, VT.getSizeInBits())) {
+ bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
+ return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
+ HasPos ? Pos : Neg).getNode();
+ }
+
+ return nullptr;
+}
+
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
// Must be a legal type. Expanded 'n promoted things won't work with rotates.
EVT VT = LHS.getValueType();
- if (!TLI.isTypeLegal(VT)) return 0;
+ if (!TLI.isTypeLegal(VT)) return nullptr;
// The target must have at least one rotate flavor.
bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT);
bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT);
- if (!HasROTL && !HasROTR) return 0;
+ if (!HasROTL && !HasROTR) return nullptr;
// Match "(X shl/srl V1) & V2" where V2 may not be present.
SDValue LHSShift; // The shift.
SDValue LHSMask; // AND value if any.
if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
- return 0; // Not part of a rotate.
+ return nullptr; // Not part of a rotate.
SDValue RHSShift; // The shift.
SDValue RHSMask; // AND value if any.
if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
- return 0; // Not part of a rotate.
+ return nullptr; // Not part of a rotate.
if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
- return 0; // Not shifting the same value.
+ return nullptr; // Not shifting the same value.
if (LHSShift.getOpcode() == RHSShift.getOpcode())
- return 0; // Shifts must disagree.
+ return nullptr; // Shifts must disagree.
// Canonicalize shl to left side in a shl/srl pair.
if (RHSShift.getOpcode() == ISD::SHL) {
@@ -3342,6 +3622,7 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
unsigned OpSizeInBits = VT.getSizeInBits();
SDValue LHSShiftArg = LHSShift.getOperand(0);
SDValue LHSShiftAmt = LHSShift.getOperand(1);
+ SDValue RHSShiftArg = RHSShift.getOperand(0);
SDValue RHSShiftAmt = RHSShift.getOperand(1);
// fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
@@ -3351,7 +3632,7 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue();
uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue();
if ((LShVal + RShVal) != OpSizeInBits)
- return 0;
+ return nullptr;
SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
@@ -3378,7 +3659,7 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
// If there is a mask here, and we have a variable shift, we can't be sure
// that we're masking out the right stuff.
if (LHSMask.getNode() || RHSMask.getNode())
- return 0;
+ return nullptr;
// If the shift amount is sign/zext/any-extended just peel it off.
SDValue LExtOp0 = LHSShiftAmt;
@@ -3395,30 +3676,17 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
RExtOp0 = RHSShiftAmt.getOperand(0);
}
- if (RExtOp0.getOpcode() == ISD::SUB && RExtOp0.getOperand(1) == LExtOp0) {
- // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
- // (rotl x, y)
- // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
- // (rotr x, (sub 32, y))
- if (ConstantSDNode *SUBC =
- dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0)))
- if (SUBC->getAPIntValue() == OpSizeInBits)
- return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg,
- HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
- } else if (LExtOp0.getOpcode() == ISD::SUB &&
- RExtOp0 == LExtOp0.getOperand(1)) {
- // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
- // (rotr x, y)
- // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
- // (rotl x, (sub 32, y))
- if (ConstantSDNode *SUBC =
- dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0)))
- if (SUBC->getAPIntValue() == OpSizeInBits)
- return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, LHSShiftArg,
- HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
- }
-
- return 0;
+ SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt,
+ LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL);
+ if (TryL)
+ return TryL;
+
+ SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
+ RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL);
+ if (TryR)
+ return TryR;
+
+ return nullptr;
}
SDValue DAGCombiner::visitXOR(SDNode *N) {
@@ -3460,7 +3728,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
return N0;
// reassociate xor
SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1);
- if (RXOR.getNode() != 0)
+ if (RXOR.getNode())
return RXOR;
// fold !(x cc y) -> (x !cc y)
@@ -3490,7 +3758,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
SDValue V = N0.getOperand(0);
V = DAG.getNode(ISD::XOR, SDLoc(N0), V.getValueType(), V,
DAG.getConstant(1, V.getValueType()));
- AddToWorkList(V.getNode());
+ AddToWorklist(V.getNode());
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V);
}
@@ -3502,7 +3770,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
- AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
+ AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
}
}
@@ -3514,7 +3782,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
- AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode());
+ AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
}
}
@@ -3523,7 +3791,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
N0->getOperand(1) == N1) {
SDValue X = N0->getOperand(0);
SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
- AddToWorkList(NotX.getNode());
+ AddToWorklist(NotX.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1);
}
// fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
@@ -3559,7 +3827,11 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
/// visitShiftByConstant - Handle transforms common to the three shifts, when
/// the shift amount is a constant.
-SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
+SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) {
+ // We can't and shouldn't fold opaque constants.
+ if (Amt->isOpaque())
+ return SDValue();
+
SDNode *LHS = N->getOperand(0).getNode();
if (!LHS->hasOneUse()) return SDValue();
@@ -3585,9 +3857,9 @@ SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
break;
}
- // We require the RHS of the binop to be a constant as well.
+ // We require the RHS of the binop to be a constant and not opaque as well.
ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
- if (!BinOpCst) return SDValue();
+ if (!BinOpCst || BinOpCst->isOpaque()) return SDValue();
// FIXME: disable this unless the input to the binop is a shift by a constant.
// If it is not a shift, it pessimizes some common cases like:
@@ -3613,10 +3885,14 @@ SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
return SDValue();
}
+ if (!TLI.isDesirableToCommuteWithShift(LHS))
+ return SDValue();
+
// Fold the constants, shifting the binop RHS by the shift amount.
SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)),
N->getValueType(0),
LHS->getOperand(1), N->getOperand(1));
+ assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!");
// Create the new shift.
SDValue NewShift = DAG.getNode(N->getOpcode(),
@@ -3627,18 +3903,74 @@ SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS);
}
+SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
+ assert(N->getOpcode() == ISD::TRUNCATE);
+ assert(N->getOperand(0).getOpcode() == ISD::AND);
+
+ // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
+ if (N->hasOneUse() && N->getOperand(0).hasOneUse()) {
+ SDValue N01 = N->getOperand(0).getOperand(1);
+
+ if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) {
+ EVT TruncVT = N->getValueType(0);
+ SDValue N00 = N->getOperand(0).getOperand(0);
+ APInt TruncC = N01C->getAPIntValue();
+ TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits());
+
+ return DAG.getNode(ISD::AND, SDLoc(N), TruncVT,
+ DAG.getNode(ISD::TRUNCATE, SDLoc(N), TruncVT, N00),
+ DAG.getConstant(TruncC, TruncVT));
+ }
+ }
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitRotate(SDNode *N) {
+ // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
+ if (N->getOperand(1).getOpcode() == ISD::TRUNCATE &&
+ N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) {
+ SDValue NewOp1 = distributeTruncateThroughAnd(N->getOperand(1).getNode());
+ if (NewOp1.getNode())
+ return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
+ N->getOperand(0), NewOp1);
+ }
+ return SDValue();
+}
+
SDValue DAGCombiner::visitSHL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N0.getValueType();
- unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
+ unsigned OpSizeInBits = VT.getScalarSizeInBits();
// fold vector ops
if (VT.isVector()) {
SDValue FoldedVOp = SimplifyVBinOp(N);
if (FoldedVOp.getNode()) return FoldedVOp;
+
+ BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
+ // If setcc produces all-one true value then:
+ // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
+ if (N1CV && N1CV->isConstant()) {
+ if (N0.getOpcode() == ISD::AND) {
+ SDValue N00 = N0->getOperand(0);
+ SDValue N01 = N0->getOperand(1);
+ BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);
+
+ if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
+ TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
+ TargetLowering::ZeroOrNegativeOneBooleanContent) {
+ SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, VT, N01CV, N1CV);
+ if (C.getNode())
+ return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
+ }
+ } else {
+ N1C = isConstOrConstSplat(N1);
+ }
+ }
}
// fold (shl c1, c2) -> c1<<c2
@@ -3662,35 +3994,25 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
return DAG.getConstant(0, VT);
// fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
- N1.getOperand(0).getOpcode() == ISD::AND &&
- N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
- SDValue N101 = N1.getOperand(0).getOperand(1);
- if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
- EVT TruncVT = N1.getValueType();
- SDValue N100 = N1.getOperand(0).getOperand(0);
- APInt TruncC = N101C->getAPIntValue();
- TruncC = TruncC.trunc(TruncVT.getSizeInBits());
- return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
- DAG.getNode(ISD::AND, SDLoc(N), TruncVT,
- DAG.getNode(ISD::TRUNCATE,
- SDLoc(N),
- TruncVT, N100),
- DAG.getConstant(TruncC, TruncVT)));
- }
+ N1.getOperand(0).getOpcode() == ISD::AND) {
+ SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
+ if (NewOp1.getNode())
+ return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
}
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
- if (N1C && N0.getOpcode() == ISD::SHL &&
- N0.getOperand(1).getOpcode() == ISD::Constant) {
- uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- uint64_t c2 = N1C->getZExtValue();
- if (c1 + c2 >= OpSizeInBits)
- return DAG.getConstant(0, VT);
- return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0),
- DAG.getConstant(c1 + c2, N1.getValueType()));
+ if (N1C && N0.getOpcode() == ISD::SHL) {
+ if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
+ uint64_t c1 = N0C1->getZExtValue();
+ uint64_t c2 = N1C->getZExtValue();
+ if (c1 + c2 >= OpSizeInBits)
+ return DAG.getConstant(0, VT);
+ return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getConstant(c1 + c2, N1.getValueType()));
+ }
}
// fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
@@ -3701,20 +4023,21 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND) &&
- N0.getOperand(0).getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
- uint64_t c1 =
- cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
- uint64_t c2 = N1C->getZExtValue();
- EVT InnerShiftVT = N0.getOperand(0).getValueType();
- uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
- if (c2 >= OpSizeInBits - InnerShiftSize) {
- if (c1 + c2 >= OpSizeInBits)
- return DAG.getConstant(0, VT);
- return DAG.getNode(ISD::SHL, SDLoc(N0), VT,
- DAG.getNode(N0.getOpcode(), SDLoc(N0), VT,
- N0.getOperand(0)->getOperand(0)),
- DAG.getConstant(c1 + c2, N1.getValueType()));
+ N0.getOperand(0).getOpcode() == ISD::SHL) {
+ SDValue N0Op0 = N0.getOperand(0);
+ if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
+ uint64_t c1 = N0Op0C1->getZExtValue();
+ uint64_t c2 = N1C->getZExtValue();
+ EVT InnerShiftVT = N0Op0.getValueType();
+ uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
+ if (c2 >= OpSizeInBits - InnerShiftSize) {
+ if (c1 + c2 >= OpSizeInBits)
+ return DAG.getConstant(0, VT);
+ return DAG.getNode(ISD::SHL, SDLoc(N0), VT,
+ DAG.getNode(N0.getOpcode(), SDLoc(N0), VT,
+ N0Op0->getOperand(0)),
+ DAG.getConstant(c1 + c2, N1.getValueType()));
+ }
}
}
@@ -3722,19 +4045,20 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// Only fold this if the inner zext has no other uses to avoid increasing
// the total number of instructions.
if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
- N0.getOperand(0).getOpcode() == ISD::SRL &&
- isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
- uint64_t c1 =
- cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
- if (c1 < VT.getSizeInBits()) {
- uint64_t c2 = N1C->getZExtValue();
- if (c1 == c2) {
- SDValue NewOp0 = N0.getOperand(0);
- EVT CountVT = NewOp0.getOperand(1).getValueType();
- SDValue NewSHL = DAG.getNode(ISD::SHL, SDLoc(N), NewOp0.getValueType(),
- NewOp0, DAG.getConstant(c2, CountVT));
- AddToWorkList(NewSHL.getNode());
- return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
+ N0.getOperand(0).getOpcode() == ISD::SRL) {
+ SDValue N0Op0 = N0.getOperand(0);
+ if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
+ uint64_t c1 = N0Op0C1->getZExtValue();
+ if (c1 < VT.getScalarSizeInBits()) {
+ uint64_t c2 = N1C->getZExtValue();
+ if (c1 == c2) {
+ SDValue NewOp0 = N0.getOperand(0);
+ EVT CountVT = NewOp0.getOperand(1).getValueType();
+ SDValue NewSHL = DAG.getNode(ISD::SHL, SDLoc(N), NewOp0.getValueType(),
+ NewOp0, DAG.getConstant(c2, CountVT));
+ AddToWorklist(NewSHL.getNode());
+ return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
+ }
}
}
}
@@ -3743,40 +4067,39 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// (and (srl x, (sub c1, c2), MASK)
// Only fold this if the inner shift has no other uses -- if it does, folding
// this will increase the total number of instructions.
- if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
- N0.getOperand(1).getOpcode() == ISD::Constant) {
- uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- if (c1 < VT.getSizeInBits()) {
- uint64_t c2 = N1C->getZExtValue();
- APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
- VT.getSizeInBits() - c1);
- SDValue Shift;
- if (c2 > c1) {
- Mask = Mask.shl(c2-c1);
- Shift = DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0),
- DAG.getConstant(c2-c1, N1.getValueType()));
- } else {
- Mask = Mask.lshr(c1-c2);
- Shift = DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0),
- DAG.getConstant(c1-c2, N1.getValueType()));
+ if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
+ if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
+ uint64_t c1 = N0C1->getZExtValue();
+ if (c1 < OpSizeInBits) {
+ uint64_t c2 = N1C->getZExtValue();
+ APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
+ SDValue Shift;
+ if (c2 > c1) {
+ Mask = Mask.shl(c2 - c1);
+ Shift = DAG.getNode(ISD::SHL, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getConstant(c2 - c1, N1.getValueType()));
+ } else {
+ Mask = Mask.lshr(c1 - c2);
+ Shift = DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getConstant(c1 - c2, N1.getValueType()));
+ }
+ return DAG.getNode(ISD::AND, SDLoc(N0), VT, Shift,
+ DAG.getConstant(Mask, VT));
}
- return DAG.getNode(ISD::AND, SDLoc(N0), VT, Shift,
- DAG.getConstant(Mask, VT));
}
}
// fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
+ unsigned BitSize = VT.getScalarSizeInBits();
SDValue HiBitsMask =
- DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
- VT.getSizeInBits() -
- N1C->getZExtValue()),
- VT);
+ DAG.getConstant(APInt::getHighBitsSet(BitSize,
+ BitSize - N1C->getZExtValue()), VT);
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0),
HiBitsMask);
}
if (N1C) {
- SDValue NewSHL = visitShiftByConstant(N, N1C->getZExtValue());
+ SDValue NewSHL = visitShiftByConstant(N, N1C);
if (NewSHL.getNode())
return NewSHL;
}
@@ -3796,6 +4119,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (VT.isVector()) {
SDValue FoldedVOp = SimplifyVBinOp(N);
if (FoldedVOp.getNode()) return FoldedVOp;
+
+ N1C = isConstOrConstSplat(N1);
}
// fold (sra c1, c2) -> (sra c1, c2)
@@ -3829,11 +4154,12 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
if (N1C && N0.getOpcode() == ISD::SRA) {
- if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
+ if (ConstantSDNode *C1 = isConstOrConstSplat(N0.getOperand(1))) {
unsigned Sum = N1C->getZExtValue() + C1->getZExtValue();
- if (Sum >= OpSizeInBits) Sum = OpSizeInBits-1;
+ if (Sum >= OpSizeInBits)
+ Sum = OpSizeInBits - 1;
return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0.getOperand(0),
- DAG.getConstant(Sum, N1C->getValueType(0)));
+ DAG.getConstant(Sum, N1.getValueType()));
}
}
@@ -3842,14 +4168,17 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// result_size - n != m.
// If truncate is free for the target sext(shl) is likely to result in better
// code.
- if (N0.getOpcode() == ISD::SHL) {
+ if (N0.getOpcode() == ISD::SHL && N1C) {
// Get the two constanst of the shifts, CN0 = m, CN = n.
- const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- if (N01C && N1C) {
+ const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
+ if (N01C) {
+ LLVMContext &Ctx = *DAG.getContext();
// Determine what the truncate's result bitsize and type would be.
- EVT TruncVT =
- EVT::getIntegerVT(*DAG.getContext(),
- OpSizeInBits - N1C->getZExtValue());
+ EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
+
+ if (VT.isVector())
+ TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());
+
// Determine the residual right-shift amount.
signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
@@ -3876,44 +4205,33 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
- N1.getOperand(0).getOpcode() == ISD::AND &&
- N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
- SDValue N101 = N1.getOperand(0).getOperand(1);
- if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
- EVT TruncVT = N1.getValueType();
- SDValue N100 = N1.getOperand(0).getOperand(0);
- APInt TruncC = N101C->getAPIntValue();
- TruncC = TruncC.trunc(TruncVT.getScalarType().getSizeInBits());
- return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0,
- DAG.getNode(ISD::AND, SDLoc(N),
- TruncVT,
- DAG.getNode(ISD::TRUNCATE,
- SDLoc(N),
- TruncVT, N100),
- DAG.getConstant(TruncC, TruncVT)));
- }
- }
-
- // fold (sra (trunc (sr x, c1)), c2) -> (trunc (sra x, c1+c2))
+ N1.getOperand(0).getOpcode() == ISD::AND) {
+ SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
+ if (NewOp1.getNode())
+ return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
+ }
+
+ // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
// if c1 is equal to the number of bits the trunc removes
if (N0.getOpcode() == ISD::TRUNCATE &&
(N0.getOperand(0).getOpcode() == ISD::SRL ||
N0.getOperand(0).getOpcode() == ISD::SRA) &&
N0.getOperand(0).hasOneUse() &&
N0.getOperand(0).getOperand(1).hasOneUse() &&
- N1C && isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
- EVT LargeVT = N0.getOperand(0).getValueType();
- ConstantSDNode *LargeShiftAmt =
- cast<ConstantSDNode>(N0.getOperand(0).getOperand(1));
-
- if (LargeVT.getScalarType().getSizeInBits() - OpSizeInBits ==
- LargeShiftAmt->getZExtValue()) {
- SDValue Amt =
- DAG.getConstant(LargeShiftAmt->getZExtValue() + N1C->getZExtValue(),
- getShiftAmountTy(N0.getOperand(0).getOperand(0).getValueType()));
- SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), LargeVT,
- N0.getOperand(0).getOperand(0), Amt);
- return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, SRA);
+ N1C) {
+ SDValue N0Op0 = N0.getOperand(0);
+ if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
+ unsigned LargeShiftVal = LargeShift->getZExtValue();
+ EVT LargeVT = N0Op0.getValueType();
+
+ if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) {
+ SDValue Amt =
+ DAG.getConstant(LargeShiftVal + N1C->getZExtValue(),
+ getShiftAmountTy(N0Op0.getOperand(0).getValueType()));
+ SDValue SRA = DAG.getNode(ISD::SRA, SDLoc(N), LargeVT,
+ N0Op0.getOperand(0), Amt);
+ return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, SRA);
+ }
}
}
@@ -3927,7 +4245,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
if (N1C) {
- SDValue NewSRA = visitShiftByConstant(N, N1C->getZExtValue());
+ SDValue NewSRA = visitShiftByConstant(N, N1C);
if (NewSRA.getNode())
return NewSRA;
}
@@ -3947,6 +4265,8 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (VT.isVector()) {
SDValue FoldedVOp = SimplifyVBinOp(N);
if (FoldedVOp.getNode()) return FoldedVOp;
+
+ N1C = isConstOrConstSplat(N1);
}
// fold (srl c1, c2) -> c1 >>u c2
@@ -3967,14 +4287,15 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return DAG.getConstant(0, VT);
// fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
- if (N1C && N0.getOpcode() == ISD::SRL &&
- N0.getOperand(1).getOpcode() == ISD::Constant) {
- uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- uint64_t c2 = N1C->getZExtValue();
- if (c1 + c2 >= OpSizeInBits)
- return DAG.getConstant(0, VT);
- return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0),
- DAG.getConstant(c1 + c2, N1.getValueType()));
+ if (N1C && N0.getOpcode() == ISD::SRL) {
+ if (ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1))) {
+ uint64_t c1 = N01C->getZExtValue();
+ uint64_t c2 = N1C->getZExtValue();
+ if (c1 + c2 >= OpSizeInBits)
+ return DAG.getConstant(0, VT);
+ return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getConstant(c1 + c2, N1.getValueType()));
+ }
}
// fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
@@ -3999,18 +4320,21 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
}
// fold (srl (shl x, c), c) -> (and x, cst2)
- if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
- N0.getValueSizeInBits() <= 64) {
- uint64_t ShAmt = N1C->getZExtValue()+64-N0.getValueSizeInBits();
- return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0),
- DAG.getConstant(~0ULL >> ShAmt, VT));
+ if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1) {
+ unsigned BitSize = N0.getScalarValueSizeInBits();
+ if (BitSize <= 64) {
+ uint64_t ShAmt = N1C->getZExtValue() + 64 - BitSize;
+ return DAG.getNode(ISD::AND, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getConstant(~0ULL >> ShAmt, VT));
+ }
}
// fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
// Shifting in all undef bits?
EVT SmallVT = N0.getOperand(0).getValueType();
- if (N1C->getZExtValue() >= SmallVT.getSizeInBits())
+ unsigned BitSize = SmallVT.getScalarSizeInBits();
+ if (N1C->getZExtValue() >= BitSize)
return DAG.getUNDEF(VT);
if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
@@ -4018,8 +4342,8 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
SDValue SmallShift = DAG.getNode(ISD::SRL, SDLoc(N0), SmallVT,
N0.getOperand(0),
DAG.getConstant(ShiftAmt, getShiftAmountTy(SmallVT)));
- AddToWorkList(SmallShift.getNode());
- APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits()).lshr(ShiftAmt);
+ AddToWorklist(SmallShift.getNode());
+ APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt);
return DAG.getNode(ISD::AND, SDLoc(N), VT,
DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, SmallShift),
DAG.getConstant(Mask, VT));
@@ -4028,16 +4352,16 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
// bit, which is unmodified by sra.
- if (N1C && N1C->getZExtValue() + 1 == VT.getSizeInBits()) {
+ if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) {
if (N0.getOpcode() == ISD::SRA)
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
}
// fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
if (N1C && N0.getOpcode() == ISD::CTLZ &&
- N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
+ N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(N0.getOperand(0), KnownZero, KnownOne);
+ DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne);
// If any of the input bits are KnownOne, then the input couldn't be all
// zeros, thus the result of the srl will always be zero.
@@ -4060,7 +4384,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (ShAmt) {
Op = DAG.getNode(ISD::SRL, SDLoc(N0), VT, Op,
DAG.getConstant(ShAmt, getShiftAmountTy(Op.getValueType())));
- AddToWorkList(Op.getNode());
+ AddToWorklist(Op.getNode());
}
return DAG.getNode(ISD::XOR, SDLoc(N), VT,
@@ -4070,22 +4394,10 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
- N1.getOperand(0).getOpcode() == ISD::AND &&
- N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
- SDValue N101 = N1.getOperand(0).getOperand(1);
- if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
- EVT TruncVT = N1.getValueType();
- SDValue N100 = N1.getOperand(0).getOperand(0);
- APInt TruncC = N101C->getAPIntValue();
- TruncC = TruncC.trunc(TruncVT.getSizeInBits());
- return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0,
- DAG.getNode(ISD::AND, SDLoc(N),
- TruncVT,
- DAG.getNode(ISD::TRUNCATE,
- SDLoc(N),
- TruncVT, N100),
- DAG.getConstant(TruncC, TruncVT)));
- }
+ N1.getOperand(0).getOpcode() == ISD::AND) {
+ SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
+ if (NewOp1.getNode())
+ return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
}
// fold operands of srl based on knowledge that the low bits are not
@@ -4094,7 +4406,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return SDValue(N, 0);
if (N1C) {
- SDValue NewSRL = visitShiftByConstant(N, N1C->getZExtValue());
+ SDValue NewSRL = visitShiftByConstant(N, N1C);
if (NewSRL.getNode())
return NewSRL;
}
@@ -4124,12 +4436,12 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (N->hasOneUse()) {
SDNode *Use = *N->use_begin();
if (Use->getOpcode() == ISD::BRCOND)
- AddToWorkList(Use);
+ AddToWorklist(Use);
else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
// Also look pass the truncate.
Use = *Use->use_begin();
if (Use->getOpcode() == ISD::BRCOND)
- AddToWorkList(Use);
+ AddToWorklist(Use);
}
}
@@ -4209,11 +4521,20 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
// fold (select C, 0, 1) -> (xor C, 1)
+ // We can't do this reliably if integer based booleans have different contents
+ // to floating point based booleans. This is because we can't tell whether we
+ // have an integer-based boolean or a floating-point-based boolean unless we
+ // can find the SETCC that produced it and inspect its operands. This is
+ // fairly easy if C is the SETCC node, but it can potentially be
+ // undiscoverable (or not reasonably discoverable). For example, it could be
+ // in another basic block or it could require searching a complicated
+ // expression.
if (VT.isInteger() &&
- (VT0 == MVT::i1 ||
- (VT0.isInteger() &&
- TLI.getBooleanContents(false) ==
- TargetLowering::ZeroOrOneBooleanContent)) &&
+ (VT0 == MVT::i1 || (VT0.isInteger() &&
+ TLI.getBooleanContents(false, false) ==
+ TLI.getBooleanContents(false, true) &&
+ TLI.getBooleanContents(false, false) ==
+ TargetLowering::ZeroOrOneBooleanContent)) &&
N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
SDValue XORNode;
if (VT == VT0)
@@ -4221,7 +4542,7 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
N0, DAG.getConstant(1, VT0));
XORNode = DAG.getNode(ISD::XOR, SDLoc(N0), VT0,
N0, DAG.getConstant(1, VT0));
- AddToWorkList(XORNode.getNode());
+ AddToWorklist(XORNode.getNode());
if (VT.bitsGT(VT0))
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, XORNode);
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, XORNode);
@@ -4229,13 +4550,13 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// fold (select C, 0, X) -> (and (not C), X)
if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
- AddToWorkList(NOTNode.getNode());
+ AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2);
}
// fold (select C, X, 1) -> (or (not C), X)
if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
- AddToWorkList(NOTNode.getNode());
+ AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1);
}
// fold (select C, X, 0) -> (and C, X)
@@ -4256,12 +4577,9 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// fold selects based on a setcc into other things, such as min/max/abs
if (N0.getOpcode() == ISD::SETCC) {
- // FIXME:
- // Check against MVT::Other for SELECT_CC, which is a workaround for targets
- // having to say they don't support SELECT_CC on every type the DAG knows
- // about, since there is no way to mark an opcode illegal at all value types
- if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) &&
- TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))
+ if ((!LegalOperations &&
+ TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
+ TLI.isOperationLegal(ISD::SELECT_CC, VT))
return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1),
N1, N2, N0.getOperand(2));
@@ -4275,12 +4593,12 @@ static
std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// Split the inputs.
SDValue Lo, Hi, LL, LH, RL, RH;
- llvm::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
- llvm::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
+ std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
+ std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
@@ -4288,6 +4606,56 @@ std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) {
return std::make_pair(Lo, Hi);
}
+// This function assumes all the vselect's arguments are CONCAT_VECTOR
+// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
+static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
+ SDLoc dl(N);
+ SDValue Cond = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ MVT VT = N->getSimpleValueType(0);
+ int NumElems = VT.getVectorNumElements();
+ assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&
+ RHS.getOpcode() == ISD::CONCAT_VECTORS &&
+ Cond.getOpcode() == ISD::BUILD_VECTOR);
+
+ // We're sure we have an even number of elements due to the
+ // concat_vectors we have as arguments to vselect.
+ // Skip BV elements until we find one that's not an UNDEF
+ // After we find an UNDEF element, keep looping until we get to half the
+ // length of the BV and see if all the non-undef nodes are the same.
+ ConstantSDNode *BottomHalf = nullptr;
+ for (int i = 0; i < NumElems / 2; ++i) {
+ if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF)
+ continue;
+
+ if (BottomHalf == nullptr)
+ BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
+ else if (Cond->getOperand(i).getNode() != BottomHalf)
+ return SDValue();
+ }
+
+ // Do the same for the second half of the BuildVector
+ ConstantSDNode *TopHalf = nullptr;
+ for (int i = NumElems / 2; i < NumElems; ++i) {
+ if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF)
+ continue;
+
+ if (TopHalf == nullptr)
+ TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
+ else if (Cond->getOperand(i).getNode() != TopHalf)
+ return SDValue();
+ }
+
+ assert(TopHalf && BottomHalf &&
+ "One half of the selector was all UNDEFs and the other was all the "
+ "same value. This should have been addressed before this function.");
+ return DAG.getNode(
+ ISD::CONCAT_VECTORS, dl, VT,
+ BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
+ TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
+}
+
SDValue DAGCombiner::visitVSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -4319,8 +4687,8 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
ISD::SRA, DL, VT, LHS,
DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, VT));
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
- AddToWorkList(Shift.getNode());
- AddToWorkList(Add.getNode());
+ AddToWorklist(Shift.getNode());
+ AddToWorklist(Add.getNode());
return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
}
}
@@ -4338,21 +4706,39 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
return SDValue();
SDValue Lo, Hi, CCLo, CCHi, LL, LH, RL, RH;
- llvm::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG);
- llvm::tie(LL, LH) = DAG.SplitVectorOperand(N, 1);
- llvm::tie(RL, RH) = DAG.SplitVectorOperand(N, 2);
+ std::tie(CCLo, CCHi) = SplitVSETCC(N0.getNode(), DAG);
+ std::tie(LL, LH) = DAG.SplitVectorOperand(N, 1);
+ std::tie(RL, RH) = DAG.SplitVectorOperand(N, 2);
Lo = DAG.getNode(N->getOpcode(), DL, LL.getValueType(), CCLo, LL, RL);
Hi = DAG.getNode(N->getOpcode(), DL, LH.getValueType(), CCHi, LH, RH);
// Add the new VSELECT nodes to the work list in case they need to be split
// again.
- AddToWorkList(Lo.getNode());
- AddToWorkList(Hi.getNode());
+ AddToWorklist(Lo.getNode());
+ AddToWorklist(Hi.getNode());
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
+ // Fold (vselect (build_vector all_ones), N1, N2) -> N1
+ if (ISD::isBuildVectorAllOnes(N0.getNode()))
+ return N1;
+ // Fold (vselect (build_vector all_zeros), N1, N2) -> N2
+ if (ISD::isBuildVectorAllZeros(N0.getNode()))
+ return N2;
+
+ // The ConvertSelectToConcatVector function is assuming both the above
+ // checks for (vselect (build_vector all{ones,zeros) ...) have been made
+ // and addressed.
+ if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
+ N2.getOpcode() == ISD::CONCAT_VECTORS &&
+ ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
+ SDValue CV = ConvertSelectToConcatVector(N, DAG);
+ if (CV.getNode())
+ return CV;
+ }
+
return SDValue();
}
@@ -4372,7 +4758,7 @@ SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, SDLoc(N), false);
if (SCC.getNode()) {
- AddToWorkList(SCC.getNode());
+ AddToWorklist(SCC.getNode());
if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
if (!SCCC->isNullValue())
@@ -4402,6 +4788,65 @@ SDValue DAGCombiner::visitSETCC(SDNode *N) {
SDLoc(N));
}
+// tryToFoldExtendOfConstant - Try to fold a sext/zext/aext
+// dag node into a ConstantSDNode or a build_vector of constants.
+// This function is called by the DAGCombiner when visiting sext/zext/aext
+// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
+// Vector extends are not folded if operations are legal; this is to
+// avoid introducing illegal build_vector dag nodes.
+static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
+ SelectionDAG &DAG, bool LegalTypes,
+ bool LegalOperations) {
+ unsigned Opcode = N->getOpcode();
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||
+ Opcode == ISD::ANY_EXTEND) && "Expected EXTEND dag node in input!");
+
+ // fold (sext c1) -> c1
+ // fold (zext c1) -> c1
+ // fold (aext c1) -> c1
+ if (isa<ConstantSDNode>(N0))
+ return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode();
+
+ // fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
+ // fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
+ // fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
+ EVT SVT = VT.getScalarType();
+ if (!(VT.isVector() &&
+ (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) &&
+ ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
+ return nullptr;
+
+ // We can fold this node into a build_vector.
+ unsigned VTBits = SVT.getSizeInBits();
+ unsigned EVTBits = N0->getValueType(0).getScalarType().getSizeInBits();
+ unsigned ShAmt = VTBits - EVTBits;
+ SmallVector<SDValue, 8> Elts;
+ unsigned NumElts = N0->getNumOperands();
+ SDLoc DL(N);
+
+ for (unsigned i=0; i != NumElts; ++i) {
+ SDValue Op = N0->getOperand(i);
+ if (Op->getOpcode() == ISD::UNDEF) {
+ Elts.push_back(DAG.getUNDEF(SVT));
+ continue;
+ }
+
+ ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op);
+ const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue());
+ if (Opcode == ISD::SIGN_EXTEND)
+ Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(),
+ SVT));
+ else
+ Elts.push_back(DAG.getConstant(C.shl(ShAmt).lshr(ShAmt).getZExtValue(),
+ SVT));
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Elts).getNode();
+}
+
// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
// transformation. Returns true if extension are possible and the above
@@ -4483,8 +4928,7 @@ void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
}
Ops.push_back(SetCC->getOperand(2));
- CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0),
- &Ops[0], Ops.size()));
+ CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
}
}
@@ -4492,9 +4936,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- // fold (sext c1) -> c1
- if (isa<ConstantSDNode>(N0))
- return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N0);
+ if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
+ LegalOperations))
+ return SDValue(Res, 0);
// fold (sext (sext x)) -> (sext x)
// fold (sext (aext x)) -> (sext x)
@@ -4511,7 +4955,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
- AddToWorkList(oye);
+ AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -4558,6 +5002,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// on vectors in one instruction. We only perform this transformation on
// scalars.
if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
+ ISD::isUNINDEXEDLoad(N0.getNode()) &&
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) {
bool DoXform = true;
@@ -4610,7 +5055,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()) &&
(!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0));
- if (LN0->getExtensionType() != ISD::ZEXTLOAD) {
+ if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
@@ -4638,12 +5083,12 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
}
if (N0.getOpcode() == ISD::SETCC) {
+ EVT N0VT = N0.getOperand(0).getValueType();
// sext(setcc) -> sext_in_reg(vsetcc) for vectors.
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations &&
- TLI.getBooleanContents(true) ==
- TargetLowering::ZeroOrNegativeOneBooleanContent) {
- EVT N0VT = N0.getOperand(0).getValueType();
+ TLI.getBooleanContents(N0VT) ==
+ TargetLowering::ZeroOrNegativeOneBooleanContent) {
// On some architectures (such as SSE/NEON/etc) the SETCC result type is
// of the same size as the compared operands. Only optimize sext(setcc())
// if this is the case.
@@ -4671,7 +5116,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
}
}
- // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
+ // sext(setcc x, y, cc) -> (select (setcc x, y, cc), -1, 0)
unsigned ElementWidth = VT.getScalarType().getSizeInBits();
SDValue NegOne =
DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT);
@@ -4680,15 +5125,21 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
NegOne, DAG.getConstant(0, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
- if (!VT.isVector() &&
- (!LegalOperations ||
- TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(VT)))) {
- return DAG.getSelect(SDLoc(N), VT,
- DAG.getSetCC(SDLoc(N),
- getSetCCResultType(VT),
- N0.getOperand(0), N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get()),
- NegOne, DAG.getConstant(0, VT));
+
+ if (!VT.isVector()) {
+ EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType());
+ if (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, SetCCVT)) {
+ SDLoc DL(N);
+ ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
+ SDValue SetCC = DAG.getSetCC(DL,
+ SetCCVT,
+ N0.getOperand(0), N0.getOperand(1), CC);
+ EVT SelectVT = getSetCCResultType(VT);
+ return DAG.getSelect(DL, VT,
+ DAG.getSExtOrTrunc(SetCC, DL, SelectVT),
+ NegOne, DAG.getConstant(0, VT));
+
+ }
}
}
@@ -4703,13 +5154,13 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// isTruncateOf - If N is a truncate of some other value, return true, record
// the value being truncated in Op and which of Op's bits are zero in KnownZero.
// This function computes KnownZero to avoid a duplicated call to
-// ComputeMaskedBits in the caller.
+// computeKnownBits in the caller.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
APInt &KnownZero) {
APInt KnownOne;
if (N->getOpcode() == ISD::TRUNCATE) {
Op = N->getOperand(0);
- DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, KnownZero, KnownOne);
return true;
}
@@ -4730,7 +5181,7 @@ static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
else
return false;
- DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, KnownZero, KnownOne);
if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue())
return false;
@@ -4742,9 +5193,10 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- // fold (zext c1) -> c1
- if (isa<ConstantSDNode>(N0))
- return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0);
+ if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
+ LegalOperations))
+ return SDValue(Res, 0);
+
// fold (zext (zext x)) -> (zext x)
// fold (zext (aext x)) -> (zext x)
if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
@@ -4784,7 +5236,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
- AddToWorkList(oye);
+ AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -4802,7 +5254,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
- AddToWorkList(oye);
+ AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -4810,10 +5262,10 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op);
- AddToWorkList(Op.getNode());
+ AddToWorklist(Op.getNode());
} else if (Op.getValueType().bitsGT(VT)) {
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
- AddToWorkList(Op.getNode());
+ AddToWorklist(Op.getNode());
}
return DAG.getZeroExtendInReg(Op, SDLoc(N),
N0.getValueType().getScalarType());
@@ -4844,6 +5296,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
// on vectors in one instruction. We only perform this transformation on
// scalars.
if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
+ ISD::isUNINDEXEDLoad(N0.getNode()) &&
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
bool DoXform = true;
@@ -4876,7 +5329,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()) &&
(!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0));
- if (LN0->getExtensionType() != ISD::SEXTLOAD) {
+ if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
@@ -4925,10 +5378,14 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
}
if (N0.getOpcode() == ISD::SETCC) {
- if (!LegalOperations && VT.isVector()) {
+ if (!LegalOperations && VT.isVector() &&
+ N0.getValueType().getVectorElementType() == MVT::i1) {
+ EVT N0VT = N0.getOperand(0).getValueType();
+ if (getSetCCResultType(N0VT) == N0.getValueType())
+ return SDValue();
+
// zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
// Only do this before legalize for now.
- EVT N0VT = N0.getOperand(0).getValueType();
EVT EltVT = VT.getVectorElementType();
SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(),
DAG.getConstant(1, EltVT));
@@ -4943,7 +5400,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get()),
DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT,
- &OneOps[0], OneOps.size()));
+ OneOps));
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
@@ -4960,8 +5417,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
cast<CondCodeSDNode>(N0.getOperand(2))->get());
return DAG.getNode(ISD::AND, SDLoc(N), VT,
DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT),
- DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT,
- &OneOps[0], OneOps.size()));
+ DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, OneOps));
}
// zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
@@ -5007,9 +5463,10 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- // fold (aext c1) -> c1
- if (isa<ConstantSDNode>(N0))
- return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, N0);
+ if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
+ LegalOperations))
+ return SDValue(Res, 0);
+
// fold (aext (aext x)) -> (aext x)
// fold (aext (zext x)) -> (zext x)
// fold (aext (sext x)) -> (sext x)
@@ -5027,7 +5484,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
- AddToWorkList(oye);
+ AddToWorklist(oye);
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -5067,8 +5524,8 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
// on vectors in one instruction. We only perform this transformation on
// scalars.
if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
- ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
+ ISD::isUNINDEXEDLoad(N0.getNode()) &&
+ TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType())) {
bool DoXform = true;
SmallVector<SDNode*, 4> SetCCs;
if (!N0.hasOneUse())
@@ -5096,20 +5553,26 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
!ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
+ ISD::LoadExtType ExtType = LN0->getExtensionType();
EVT MemVT = LN0->getMemoryVT();
- SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(N),
- VT, LN0->getChain(), LN0->getBasePtr(),
- MemVT, LN0->getMemOperand());
- CombineTo(N, ExtLoad);
- CombineTo(N0.getNode(),
- DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
- N0.getValueType(), ExtLoad),
- ExtLoad.getValue(1));
- return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ if (!LegalOperations || TLI.isLoadExtLegal(ExtType, MemVT)) {
+ SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
+ VT, LN0->getChain(), LN0->getBasePtr(),
+ MemVT, LN0->getMemOperand());
+ CombineTo(N, ExtLoad);
+ CombineTo(N0.getNode(),
+ DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
+ N0.getValueType(), ExtLoad),
+ ExtLoad.getValue(1));
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ }
}
if (N0.getOpcode() == ISD::SETCC) {
- // aext(setcc) -> sext_in_reg(vsetcc) for vectors.
+ // For vectors:
+ // aext(setcc) -> vsetcc
+ // aext(setcc) -> truncate(vsetcc)
+ // aext(setcc) -> aext(vsetcc)
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations) {
EVT N0VT = N0.getOperand(0).getValueType();
@@ -5124,19 +5587,14 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
- // truncate/sign extend
+ // truncate/any extend
else {
- EVT MatchingElementType =
- EVT::getIntegerVT(*DAG.getContext(),
- N0VT.getScalarType().getSizeInBits());
- EVT MatchingVectorType =
- EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
- N0VT.getVectorNumElements());
+ EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger();
SDValue VsetCC =
DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
- return DAG.getSExtOrTrunc(VsetCC, SDLoc(N), VT);
+ return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
}
}
@@ -5160,7 +5618,7 @@ SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
default: break;
case ISD::Constant: {
const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
- assert(CV != 0 && "Const value should be ConstSDNode.");
+ assert(CV && "Const value should be ConstSDNode.");
const APInt &CVal = CV->getAPIntValue();
APInt NewVal = CVal & Mask;
if (NewVal != CVal)
@@ -5324,7 +5782,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LN0),
PtrType, LN0->getBasePtr(),
DAG.getConstant(PtrOff, PtrType));
- AddToWorkList(NewPtr.getNode());
+ AddToWorklist(NewPtr.getNode());
SDValue Load;
if (ExtType == ISD::NON_EXTLOAD)
@@ -5339,7 +5797,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
NewAlign, LN0->getTBAAInfo());
// Replace the old load's chain with the new load's chain.
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
// Shift the result left, if we've swallowed a left shift.
@@ -5438,7 +5896,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
LN0->getMemOperand());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
- AddToWorkList(ExtLoad.getNode());
+ AddToWorklist(ExtLoad.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
// fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
@@ -5461,11 +5919,34 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false);
- if (BSwap.getNode() != 0)
+ if (BSwap.getNode())
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
BSwap, N1);
}
+ // Fold a sext_inreg of a build_vector of ConstantSDNodes or undefs
+ // into a build_vector.
+ if (ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
+ SmallVector<SDValue, 8> Elts;
+ unsigned NumElts = N0->getNumOperands();
+ unsigned ShAmt = VTBits - EVTBits;
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ SDValue Op = N0->getOperand(i);
+ if (Op->getOpcode() == ISD::UNDEF) {
+ Elts.push_back(Op);
+ continue;
+ }
+
+ ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op);
+ const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue());
+ Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(),
+ Op.getValueType()));
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Elts);
+ }
+
return SDValue();
}
@@ -5510,7 +5991,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// creates this pattern) and before operation legalization after which
// we need to be more careful about the vector instructions that we generate.
if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- LegalTypes && !LegalOperations && N0->hasOneUse()) {
+ LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
EVT VecTy = N0.getOperand(0).getValueType();
EVT ExTy = N0.getValueType();
@@ -5537,6 +6018,19 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
}
}
+ // trunc (select c, a, b) -> select c, (trunc a), (trunc b)
+ if (N0.getOpcode() == ISD::SELECT) {
+ EVT SrcVT = N0.getValueType();
+ if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
+ TLI.isTruncateFree(SrcVT, VT)) {
+ SDLoc SL(N0);
+ SDValue Cond = N0.getOperand(0);
+ SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
+ SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
+ return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
+ }
+ }
+
// Fold a series of buildvector, bitcast, and truncate if possible.
// For example fold
// (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
@@ -5564,8 +6058,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
Opnds.push_back(BuildVect.getOperand(i));
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, &Opnds[0],
- Opnds.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
}
}
@@ -5587,6 +6080,20 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue Reduced = ReduceLoadWidth(N);
if (Reduced.getNode())
return Reduced;
+ // Handle the case where the load remains an extending load even
+ // after truncation.
+ if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
+ LoadSDNode *LN0 = cast<LoadSDNode>(N0);
+ if (!LN0->isVolatile() &&
+ LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
+ SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
+ VT, LN0->getChain(), LN0->getBasePtr(),
+ LN0->getMemoryVT(),
+ LN0->getMemOperand());
+ DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
+ return NewLoad;
+ }
+ }
}
// fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
// where ... are all 'undef'.
@@ -5623,11 +6130,10 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
continue;
}
SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
- AddToWorkList(NV.getNode());
+ AddToWorklist(NV.getNode());
Opnds.push_back(NV);
}
- return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
- &Opnds[0], Opnds.size());
+ return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
}
}
@@ -5654,8 +6160,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
- LD1->getPointerInfo().getAddrSpace() !=
- LD2->getPointerInfo().getAddrSpace())
+ LD1->getAddressSpace() != LD2->getAddressSpace())
return SDValue();
EVT LD1VT = LD1->getValueType(0);
@@ -5691,14 +6196,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
if (!LegalTypes &&
N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
VT.isVector()) {
- bool isSimple = true;
- for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i)
- if (N0.getOperand(i).getOpcode() != ISD::UNDEF &&
- N0.getOperand(i).getOpcode() != ISD::Constant &&
- N0.getOperand(i).getOpcode() != ISD::ConstantFP) {
- isSimple = false;
- break;
- }
+ bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant();
EVT DestEltVT = N->getValueType(0).getVectorElementType();
assert(!DestEltVT.isVector() &&
@@ -5734,6 +6232,9 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile() &&
+ // Do not remove the cast if the types differ in endian layout.
+ TLI.hasBigEndianPartOrdering(N0.getValueType()) ==
+ TLI.hasBigEndianPartOrdering(VT) &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
@@ -5747,7 +6248,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->isInvariant(), OrigAlign,
LN0->getTBAAInfo());
- AddToWorkList(N);
+ AddToWorklist(N);
CombineTo(N0.getNode(),
DAG.getNode(ISD::BITCAST, SDLoc(N0),
N0.getValueType(), Load),
@@ -5765,7 +6266,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
!VT.isVector() && !N0.getValueType().isVector()) {
SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT,
N0.getOperand(0));
- AddToWorkList(NewConv.getNode());
+ AddToWorklist(NewConv.getNode());
APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
if (N0.getOpcode() == ISD::FNEG)
@@ -5788,34 +6289,34 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
if (isTypeLegal(IntXVT)) {
SDValue X = DAG.getNode(ISD::BITCAST, SDLoc(N0),
IntXVT, N0.getOperand(1));
- AddToWorkList(X.getNode());
+ AddToWorklist(X.getNode());
// If X has a different width than the result/lhs, sext it or truncate it.
unsigned VTWidth = VT.getSizeInBits();
if (OrigXWidth < VTWidth) {
X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
- AddToWorkList(X.getNode());
+ AddToWorklist(X.getNode());
} else if (OrigXWidth > VTWidth) {
// To get the sign bit in the right place, we have to shift it right
// before truncating.
X = DAG.getNode(ISD::SRL, SDLoc(X),
X.getValueType(), X,
DAG.getConstant(OrigXWidth-VTWidth, X.getValueType()));
- AddToWorkList(X.getNode());
+ AddToWorklist(X.getNode());
X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
- AddToWorkList(X.getNode());
+ AddToWorklist(X.getNode());
}
APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
X = DAG.getNode(ISD::AND, SDLoc(X), VT,
X, DAG.getConstant(SignBit, VT));
- AddToWorkList(X.getNode());
+ AddToWorklist(X.getNode());
SDValue Cst = DAG.getNode(ISD::BITCAST, SDLoc(N0),
VT, N0.getOperand(0));
Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
Cst, DAG.getConstant(~SignBit, VT));
- AddToWorkList(Cst.getNode());
+ AddToWorklist(Cst.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
}
@@ -5871,10 +6372,9 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
Ops.push_back(DAG.getNode(ISD::BITCAST, SDLoc(BV),
DstEltVT, Op));
- AddToWorkList(Ops.back().getNode());
+ AddToWorklist(Ops.back().getNode());
}
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT,
- &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
}
// Otherwise, we're growing or shrinking the elements. To avoid having to
@@ -5930,8 +6430,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
}
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT,
- &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
}
// Finally, this must be the case where we are shrinking elements: each input
@@ -5967,8 +6466,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT,
- &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BV), VT, Ops);
}
SDValue DAGCombiner::visitFADD(SDNode *N) {
@@ -6389,7 +6887,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
if (N1CFP->isExactlyValue(-1.0) &&
(!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0);
- AddToWorkList(RHSNeg.getNode());
+ AddToWorklist(RHSNeg.getNode());
return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg);
}
}
@@ -6551,12 +7049,8 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
}
- // The next optimizations are desireable only if SELECT_CC can be lowered.
- // Check against MVT::Other for SELECT_CC, which is a workaround for targets
- // having to say they don't support SELECT_CC on every type the DAG knows
- // about, since there is no way to mark an opcode illegal at all value types
- // (See also visitSELECT)
- if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) {
+ // The next optimizations are desirable only if SELECT_CC can be lowered.
+ if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
// fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
!VT.isVector() &&
@@ -6566,7 +7060,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
{ N0.getOperand(0), N0.getOperand(1),
DAG.getConstantFP(-1.0, VT) , DAG.getConstantFP(0.0, VT),
N0.getOperand(2) };
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops, 5);
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops);
}
// fold (sint_to_fp (zext (setcc x, y, cc))) ->
@@ -6579,7 +7073,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
{ N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
DAG.getConstantFP(1.0, VT) , DAG.getConstantFP(0.0, VT),
N0.getOperand(0).getOperand(2) };
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops, 5);
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops);
}
}
@@ -6608,12 +7102,8 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
}
- // The next optimizations are desireable only if SELECT_CC can be lowered.
- // Check against MVT::Other for SELECT_CC, which is a workaround for targets
- // having to say they don't support SELECT_CC on every type the DAG knows
- // about, since there is no way to mark an opcode illegal at all value types
- // (See also visitSELECT)
- if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) {
+ // The next optimizations are desirable only if SELECT_CC can be lowered.
+ if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
// fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
@@ -6623,7 +7113,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
{ N0.getOperand(0), N0.getOperand(1),
DAG.getConstantFP(1.0, VT), DAG.getConstantFP(0.0, VT),
N0.getOperand(2) };
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops, 5);
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, Ops);
}
}
@@ -6681,7 +7171,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
N0.getOperand(0), N1);
- AddToWorkList(Tmp.getNode());
+ AddToWorklist(Tmp.getNode());
return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
Tmp, N0.getOperand(1));
}
@@ -6732,8 +7222,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
// fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
- ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
- TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
+ TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
LN0->getChain(),
@@ -6765,6 +7254,8 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
// Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
// constant pool values.
+ // TODO: We can also optimize for vectors here, but we need to make sure
+ // that the sign mask is created properly for each vector element.
if (!TLI.isFNegFree(VT) && N0.getOpcode() == ISD::BITCAST &&
!VT.isVector() &&
N0.getNode()->hasOneUse() &&
@@ -6774,7 +7265,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
if (IntVT.isInteger() && !IntVT.isVector()) {
Int = DAG.getNode(ISD::XOR, SDLoc(N0), IntVT, Int,
DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
- AddToWorkList(Int.getNode());
+ AddToWorklist(Int.getNode());
return DAG.getNode(ISD::BITCAST, SDLoc(N),
VT, Int);
}
@@ -6783,11 +7274,16 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
// (fneg (fmul c, x)) -> (fmul -c, x)
if (N0.getOpcode() == ISD::FMUL) {
ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
- if (CFP1)
- return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
- N0.getOperand(0),
- DAG.getNode(ISD::FNEG, SDLoc(N), VT,
- N0.getOperand(1)));
+ if (CFP1) {
+ APFloat CVal = CFP1->getValueAPF();
+ CVal.changeSign();
+ if (Level >= AfterLegalizeDAG &&
+ (TLI.isFPImmLegal(CVal, N->getValueType(0)) ||
+ TLI.isOperationLegal(ISD::ConstantFP, N->getValueType(0))))
+ return DAG.getNode(
+ ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)));
+ }
}
return SDValue();
@@ -6852,16 +7348,18 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
// Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
// constant pool values.
+ // TODO: We can also optimize for vectors here, but we need to make sure
+ // that the sign mask is created properly for each vector element.
if (!TLI.isFAbsFree(VT) &&
N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger() &&
- !N0.getOperand(0).getValueType().isVector()) {
+ !VT.isVector()) {
SDValue Int = N0.getOperand(0);
EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
Int = DAG.getNode(ISD::AND, SDLoc(N0), IntVT, Int,
DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
- AddToWorkList(Int.getNode());
+ AddToWorklist(Int.getNode());
return DAG.getNode(ISD::BITCAST, SDLoc(N),
N->getValueType(0), Int);
}
@@ -6895,7 +7393,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) &&
(N1.getOperand(0).hasOneUse() &&
N1.getOperand(0).getOpcode() == ISD::SRL))) {
- SDNode *Trunc = 0;
+ SDNode *Trunc = nullptr;
if (N1.getOpcode() == ISD::TRUNCATE) {
// Look pass the truncate.
Trunc = N1.getNode();
@@ -6944,13 +7442,13 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
CombineTo(N, NewBRCond, false);
// Truncate is dead.
if (Trunc) {
- removeFromWorkList(Trunc);
+ removeFromWorklist(Trunc);
DAG.DeleteNode(Trunc);
}
// Replace the uses of SRL with SETCC
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
- removeFromWorkList(N1.getNode());
+ removeFromWorklist(N1.getNode());
DAG.DeleteNode(N1.getNode());
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -6978,9 +7476,9 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
dbgs() << "\nWith: ";
Tmp.getNode()->dump(&DAG);
dbgs() << '\n');
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N1, Tmp);
- removeFromWorkList(TheXor);
+ removeFromWorklist(TheXor);
DAG.DeleteNode(TheXor);
return DAG.getNode(ISD::BRCOND, SDLoc(N),
MVT::Other, Chain, Tmp, N2);
@@ -7009,9 +7507,9 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
Op0, Op1,
Equal ? ISD::SETEQ : ISD::SETNE);
// Replace the uses of XOR with SETCC
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N1, SetCC);
- removeFromWorkList(N1.getNode());
+ removeFromWorklist(N1.getNode());
DAG.DeleteNode(N1.getNode());
return DAG.getNode(ISD::BRCOND, SDLoc(N),
MVT::Other, Chain, SetCC, N2);
@@ -7037,7 +7535,7 @@ SDValue DAGCombiner::visitBR_CC(SDNode *N) {
SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
CondLHS, CondRHS, CC->get(), SDLoc(N),
false);
- if (Simp.getNode()) AddToWorkList(Simp.getNode());
+ if (Simp.getNode()) AddToWorklist(Simp.getNode());
// fold to a simpler setcc
if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
@@ -7176,9 +7674,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
// a copy of the original base pointer.
SmallVector<SDNode *, 16> OtherUses;
if (isa<ConstantSDNode>(Offset))
- for (SDNode::use_iterator I = BasePtr.getNode()->use_begin(),
- E = BasePtr.getNode()->use_end(); I != E; ++I) {
- SDNode *Use = *I;
+ for (SDNode *Use : BasePtr.getNode()->uses()) {
if (Use == Ptr.getNode())
continue;
@@ -7220,9 +7716,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
SmallPtrSet<const SDNode *, 32> Visited;
SmallVector<const SDNode *, 16> Worklist;
- for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
- E = Ptr.getNode()->use_end(); I != E; ++I) {
- SDNode *Use = *I;
+ for (SDNode *Use : Ptr.getNode()->uses()) {
if (Use == N)
continue;
if (N->hasPredecessorHelper(Use, Visited, Worklist))
@@ -7251,7 +7745,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
dbgs() << "\nWith: ";
Result.getNode()->dump(&DAG);
dbgs() << '\n');
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
@@ -7310,13 +7804,13 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
SDLoc(OtherUses[i]),
OtherUses[i]->getValueType(0), NewOp1, NewOp2);
DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
- removeFromWorkList(OtherUses[i]);
+ removeFromWorklist(OtherUses[i]);
DAG.DeleteNode(OtherUses[i]);
}
// Replace the uses of Ptr with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
- removeFromWorkList(Ptr.getNode());
+ removeFromWorklist(Ptr.getNode());
DAG.DeleteNode(Ptr.getNode());
return true;
@@ -7358,9 +7852,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
if (Ptr.getNode()->hasOneUse())
return false;
- for (SDNode::use_iterator I = Ptr.getNode()->use_begin(),
- E = Ptr.getNode()->use_end(); I != E; ++I) {
- SDNode *Op = *I;
+ for (SDNode *Op : Ptr.getNode()->uses()) {
if (Op == N ||
(Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
continue;
@@ -7386,9 +7878,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
// Check for #1.
bool TryNext = false;
- for (SDNode::use_iterator II = BasePtr.getNode()->use_begin(),
- EE = BasePtr.getNode()->use_end(); II != EE; ++II) {
- SDNode *Use = *II;
+ for (SDNode *Use : BasePtr.getNode()->uses()) {
if (Use == Ptr.getNode())
continue;
@@ -7396,9 +7886,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
// transformation.
if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
bool RealUse = false;
- for (SDNode::use_iterator III = Use->use_begin(),
- EEE = Use->use_end(); III != EEE; ++III) {
- SDNode *UseUse = *III;
+ for (SDNode *UseUse : Use->uses()) {
if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
RealUse = true;
}
@@ -7427,7 +7915,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
dbgs() << "\nWith: ";
Result.getNode()->dump(&DAG);
dbgs() << '\n');
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
@@ -7441,7 +7929,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
// Replace the uses of Use with uses of the updated base value.
DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
Result.getValue(isLoad ? 1 : 0));
- removeFromWorkList(Op);
+ removeFromWorklist(Op);
DAG.DeleteNode(Op);
return true;
}
@@ -7474,11 +7962,11 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
dbgs() << "\nWith chain: ";
Chain.getNode()->dump(&DAG);
dbgs() << "\n");
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
if (N->use_empty()) {
- removeFromWorkList(N);
+ removeFromWorklist(N);
DAG.DeleteNode(N);
}
@@ -7494,12 +7982,12 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
dbgs() << "\nWith: ";
Undef.getNode()->dump(&DAG);
dbgs() << " and 2 other values\n");
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1),
DAG.getUNDEF(N->getValueType(1)));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
- removeFromWorkList(N);
+ removeFromWorklist(N);
DAG.DeleteNode(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -7537,7 +8025,12 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA :
TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA();
- if (UseAA) {
+#ifndef NDEBUG
+ if (CombinerAAOnlyFunc.getNumOccurrences() &&
+ CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
+ UseAA = false;
+#endif
+ if (UseAA && LD->isUnindexed()) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
@@ -7561,7 +8054,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
MVT::Other, Chain, ReplLoad.getValue(1));
// Make sure the new and old chains are cleaned up.
- AddToWorkList(Token.getNode());
+ AddToWorklist(Token.getNode());
// Replace uses with load result and token factor. Don't add users
// to work list.
@@ -7686,8 +8179,8 @@ struct LoadedSlice {
// This is used to get some contextual information about legal types, etc.
SelectionDAG *DAG;
- LoadedSlice(SDNode *Inst = NULL, LoadSDNode *Origin = NULL,
- unsigned Shift = 0, SelectionDAG *DAG = NULL)
+ LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
+ unsigned Shift = 0, SelectionDAG *DAG = nullptr)
: Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
LoadedSlice(const LoadedSlice &LS)
@@ -7783,7 +8276,7 @@ struct LoadedSlice {
/// \brief Get the offset in bytes of this slice in the original chunk of
/// bits.
- /// \pre DAG != NULL.
+ /// \pre DAG != nullptr.
uint64_t getOffsetFromBase() const {
assert(DAG && "Missing context.");
bool IsBigEndian =
@@ -7888,14 +8381,6 @@ struct LoadedSlice {
};
}
-/// \brief Sorts LoadedSlice according to their offset.
-struct LoadedSliceSorter {
- bool operator()(const LoadedSlice &LHS, const LoadedSlice &RHS) {
- assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
- return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
- }
-};
-
/// \brief Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
static bool areUsedBitsDense(const APInt &UsedBits) {
@@ -7939,12 +8424,16 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
// Sort the slices so that elements that are likely to be next to each
// other in memory are next to each other in the list.
- std::sort(LoadedSlices.begin(), LoadedSlices.end(), LoadedSliceSorter());
+ std::sort(LoadedSlices.begin(), LoadedSlices.end(),
+ [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
+ assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
+ return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
+ });
const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
// First (resp. Second) is the first (resp. Second) potentially candidate
// to be placed in a paired load.
- const LoadedSlice *First = NULL;
- const LoadedSlice *Second = NULL;
+ const LoadedSlice *First = nullptr;
+ const LoadedSlice *Second = nullptr;
for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
// Set the beginning of the pair.
First = Second) {
@@ -7966,7 +8455,7 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
unsigned RequiredAlignment = 0;
if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
// move to the next pair, this type is hopeless.
- Second = NULL;
+ Second = nullptr;
continue;
}
// Check if we meet the alignment requirement.
@@ -7980,7 +8469,7 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!");
--GlobalLSCost.Loads;
// Move to the next pair.
- Second = NULL;
+ Second = nullptr;
}
}
@@ -8075,8 +8564,8 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) {
// The width of the type must be a power of 2 and greater than 8-bits.
// Otherwise the load cannot be represented in LLVM IR.
- // Moreover, if we shifted with a non 8-bits multiple, the slice
- // will be accross several bytes. We do not support that.
+ // Moreover, if we shifted with a non-8-bits multiple, the slice
+ // will be across several bytes. We do not support that.
unsigned Width = User->getValueSizeInBits(0);
if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
return 0;
@@ -8124,7 +8613,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) {
}
SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
- &ArgChains[0], ArgChains.size());
+ ArgChains);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
return true;
}
@@ -8219,14 +8708,14 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
// that uses this. If not, this is not a replacement.
APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
ByteShift*8, (ByteShift+NumBytes)*8);
- if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
+ if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr;
// Check that it is legal on the target to do this. It is legal if the new
// VT we're shrinking to (i8/i16/i32) is legal or we're still before type
// legalization.
MVT VT = MVT::getIntegerVT(NumBytes*8);
if (!DC->isTypeLegal(VT))
- return 0;
+ return nullptr;
// Okay, we can do this! Replace the 'St' store with a store of IVal that is
// shifted by ByteShift and truncated down to NumBytes.
@@ -8372,10 +8861,10 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
ST->getPointerInfo().getWithOffset(PtrOff),
false, false, NewAlign);
- AddToWorkList(NewPtr.getNode());
- AddToWorkList(NewLD.getNode());
- AddToWorkList(NewVal.getNode());
- WorkListRemover DeadNodes(*this);
+ AddToWorklist(NewPtr.getNode());
+ AddToWorklist(NewLD.getNode());
+ AddToWorklist(NewVal.getNode());
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
++OpsNarrowed;
return NewST;
@@ -8430,9 +8919,9 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
ST->getPointerInfo(),
false, false, STAlign);
- AddToWorkList(NewLD.getNode());
- AddToWorkList(NewST.getNode());
- WorkListRemover DeadNodes(*this);
+ AddToWorklist(NewLD.getNode());
+ AddToWorklist(NewST.getNode());
+ WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
++LdStFP2Int;
return NewST;
@@ -8543,17 +9032,6 @@ struct MemOpLink {
unsigned SequenceNum;
};
-/// Sorts store nodes in a link according to their offset from a shared
-// base ptr.
-struct ConsecutiveMemoryChainSorter {
- bool operator()(MemOpLink LHS, MemOpLink RHS) {
- return
- LHS.OffsetFromBase < RHS.OffsetFromBase ||
- (LHS.OffsetFromBase == RHS.OffsetFromBase &&
- LHS.SequenceNum > RHS.SequenceNum);
- }
-};
-
bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
EVT MemVT = St->getMemoryVT();
int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
@@ -8651,7 +9129,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
break;
} else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) {
if (Ldn->isVolatile()) {
- Index = NULL;
+ Index = nullptr;
break;
}
@@ -8660,7 +9138,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
NextInChain = Ldn->getChain().getNode();
continue;
} else {
- Index = NULL;
+ Index = nullptr;
break;
}
}
@@ -8672,7 +9150,11 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
// Sort the memory operands according to their distance from the base pointer.
std::sort(StoreNodes.begin(), StoreNodes.end(),
- ConsecutiveMemoryChainSorter());
+ [](MemOpLink LHS, MemOpLink RHS) {
+ return LHS.OffsetFromBase < RHS.OffsetFromBase ||
+ (LHS.OffsetFromBase == RHS.OffsetFromBase &&
+ LHS.SequenceNum > RHS.SequenceNum);
+ });
// Scan the memory operations on the chain and find the first non-consecutive
// store memory address.
@@ -8720,7 +9202,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) {
NonZero |= !C->getConstantFPValue()->isNullValue();
} else {
- // Non constant.
+ // Non-constant.
break;
}
@@ -8831,7 +9313,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
// Since we know that St is redundant, just iterate.
while (!St->use_empty())
DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain());
- removeFromWorkList(St);
+ removeFromWorklist(St);
DAG.DeleteNode(St);
}
@@ -9006,7 +9488,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
continue;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain());
- removeFromWorkList(St);
+ removeFromWorklist(St);
DAG.DeleteNode(St);
}
@@ -9128,7 +9610,12 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA :
TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA();
- if (UseAA) {
+#ifndef NDEBUG
+ if (CombinerAAOnlyFunc.getNumOccurrences() &&
+ CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
+ UseAA = false;
+#endif
+ if (UseAA && ST->isUnindexed()) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
@@ -9150,7 +9637,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
MVT::Other, Chain, ReplStore);
// Make sure the new and old chains are cleaned up.
- AddToWorkList(Token.getNode());
+ AddToWorklist(Token.getNode());
// Don't add users to work list.
return CombineTo(N, Token, false);
@@ -9172,7 +9659,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
APInt::getLowBitsSet(
Value.getValueType().getScalarType().getSizeInBits(),
ST->getMemoryVT().getScalarType().getSizeInBits()));
- AddToWorkList(Value.getNode());
+ AddToWorklist(Value.getNode());
if (Shorter.getNode())
return DAG.getTruncStore(Chain, SDLoc(N), Shorter,
Ptr, ST->getMemoryVT(), ST->getMemOperand());
@@ -9251,6 +9738,27 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
return SDValue();
unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ // Canonicalize insert_vector_elt dag nodes.
+ // Example:
+ // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
+ // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
+ //
+ // Do this only if the child insert_vector node has one use; also
+ // do this only if indices are both constants and Idx1 < Idx0.
+ if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
+ && isa<ConstantSDNode>(InVec.getOperand(2))) {
+ unsigned OtherElt =
+ cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue();
+ if (Elt < OtherElt) {
+ // Swap nodes.
+ SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VT,
+ InVec.getOperand(0), InVal, EltNo);
+ AddToWorklist(NewOp.getNode());
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
+ VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
+ }
+ }
+
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
// vector elements.
@@ -9280,8 +9788,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
}
// Return the new vector
- return DAG.getNode(ISD::BUILD_VECTOR, dl,
- VT, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
@@ -9309,9 +9816,10 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// We only perform this optimization before the op legalization phase because
// we may introduce new vector instructions which are not backed by TD
// patterns. For example on AVX, extracting elements from a wide vector
- // without using extract_subvector.
+ // without using extract_subvector. However, if we can find an underlying
+ // scalar value, then we can always use that.
if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE
- && ConstEltNo && !LegalOperations) {
+ && ConstEltNo) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
int NumElem = VT.getVectorNumElements();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec);
@@ -9323,16 +9831,32 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
return DAG.getUNDEF(NVT);
// Select the right vector half to extract from.
+ SDValue SVInVec;
if (OrigElt < NumElem) {
- InVec = InVec->getOperand(0);
+ SVInVec = InVec->getOperand(0);
} else {
- InVec = InVec->getOperand(1);
+ SVInVec = InVec->getOperand(1);
OrigElt -= NumElem;
}
- EVT IndexTy = TLI.getVectorIdxTy();
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT,
- InVec, DAG.getConstant(OrigElt, IndexTy));
+ if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
+ SDValue InOp = SVInVec.getOperand(OrigElt);
+ if (InOp.getValueType() != NVT) {
+ assert(InOp.getValueType().isInteger() && NVT.isInteger());
+ InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT);
+ }
+
+ return InOp;
+ }
+
+ // FIXME: We should handle recursing on other vector shuffles and
+ // scalar_to_vector here as well.
+
+ if (!LegalOperations) {
+ EVT IndexTy = TLI.getVectorIdxTy();
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT,
+ SVInVec, DAG.getConstant(OrigElt, IndexTy));
+ }
}
// Perform only after legalization to ensure build_vector / vector_shuffle
@@ -9370,8 +9894,8 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
NewLoad = true;
}
- LoadSDNode *LN0 = NULL;
- const ShuffleVectorSDNode *SVN = NULL;
+ LoadSDNode *LN0 = nullptr;
+ const ShuffleVectorSDNode *SVN = nullptr;
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
} else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
@@ -9478,16 +10002,16 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
else
Load = DAG.getNode(ISD::BITCAST, SDLoc(N), NVT, Load);
}
- WorkListRemover DeadNodes(*this);
+ WorklistRemover DeadNodes(*this);
SDValue From[] = { SDValue(N, 0), SDValue(LN0,1) };
SDValue To[] = { Load, Chain };
DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
// Since we're explcitly calling ReplaceAllUses, add the new node to the
// worklist explicitly as well.
- AddToWorkList(Load.getNode());
- AddUsersToWorkList(Load.getNode()); // Add users too
+ AddToWorklist(Load.getNode());
+ AddUsersToWorklist(Load.getNode()); // Add users too
// Make sure to revisit this node to clean it up; it will usually be dead.
- AddToWorkList(N);
+ AddToWorklist(N);
return SDValue(N, 0);
}
@@ -9596,10 +10120,10 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
if (!isTypeLegal(VecVT)) return SDValue();
// Make the new BUILD_VECTOR.
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], Ops.size());
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
// The new BUILD_VECTOR node has the potential to be further optimized.
- AddToWorkList(BV.getNode());
+ AddToWorklist(BV.getNode());
// Bitcast to the desired type.
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}
@@ -9664,9 +10188,8 @@ SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
else
Opnds.push_back(In.getOperand(0));
}
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT,
- &Opnds[0], Opnds.size());
- AddToWorkList(BV.getNode());
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Opnds);
+ AddToWorklist(BV.getNode());
return DAG.getNode(Opcode, dl, VT, BV);
}
@@ -9706,7 +10229,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// constant index, bail out.
if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) {
- VecIn1 = VecIn2 = SDValue(0, 0);
+ VecIn1 = VecIn2 = SDValue(nullptr, 0);
break;
}
@@ -9715,18 +10238,18 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
continue;
- if (VecIn1.getNode() == 0) {
+ if (!VecIn1.getNode()) {
VecIn1 = ExtractedFromVec;
- } else if (VecIn2.getNode() == 0) {
+ } else if (!VecIn2.getNode()) {
VecIn2 = ExtractedFromVec;
} else {
// Too many inputs.
- VecIn1 = VecIn2 = SDValue(0, 0);
+ VecIn1 = VecIn2 = SDValue(nullptr, 0);
break;
}
}
- // If everything is good, we can make a shuffle operation.
+ // If everything is good, we can make a shuffle operation.
if (VecIn1.getNode()) {
SmallVector<int, 8> Mask;
for (unsigned i = 0; i != NumInScalars; ++i) {
@@ -9756,7 +10279,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// Attempt to transform a single input vector to the correct type.
if ((VT != VecIn1.getValueType())) {
// We don't support shuffeling between TWO values of different types.
- if (VecIn2.getNode() != 0)
+ if (VecIn2.getNode())
return SDValue();
// We only support widening of vectors which are half the size of the
@@ -9839,6 +10362,39 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
}
}
+ // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
+ // -> (BUILD_VECTOR A, B, ..., C, D, ...)
+ if (N->getNumOperands() == 2 &&
+ N->getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
+ N->getOperand(1).getOpcode() == ISD::BUILD_VECTOR) {
+ EVT VT = N->getValueType(0);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SmallVector<SDValue, 8> Opnds;
+ unsigned BuildVecNumElts = N0.getNumOperands();
+
+ EVT SclTy0 = N0.getOperand(0)->getValueType(0);
+ EVT SclTy1 = N1.getOperand(0)->getValueType(0);
+ if (SclTy0.isFloatingPoint()) {
+ for (unsigned i = 0; i != BuildVecNumElts; ++i)
+ Opnds.push_back(N0.getOperand(i));
+ for (unsigned i = 0; i != BuildVecNumElts; ++i)
+ Opnds.push_back(N1.getOperand(i));
+ } else {
+ // If BUILD_VECTOR are from built from integer, they may have different
+ // operand types. Get the smaller type and truncate all operands to it.
+ EVT MinTy = SclTy0.bitsLE(SclTy1) ? SclTy0 : SclTy1;
+ for (unsigned i = 0; i != BuildVecNumElts; ++i)
+ Opnds.push_back(DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinTy,
+ N0.getOperand(i)));
+ for (unsigned i = 0; i != BuildVecNumElts; ++i)
+ Opnds.push_back(DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinTy,
+ N1.getOperand(i)));
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
+ }
+
// Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
// nodes often generate nop CONCAT_VECTOR nodes.
// Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
@@ -9993,8 +10549,7 @@ static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
}
}
- return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops.data(),
- Ops.size());
+ return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
}
SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
@@ -10110,22 +10665,19 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
}
// If this shuffle node is simply a swizzle of another shuffle node,
- // and it reverses the swizzle of the previous shuffle then we can
- // optimize shuffle(shuffle(x, undef), undef) -> x.
+ // then try to simplify it.
if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
N1.getOpcode() == ISD::UNDEF) {
ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
- // Shuffle nodes can only reverse shuffles with a single non-undef value.
- if (N0.getOperand(1).getOpcode() != ISD::UNDEF)
- return SDValue();
-
// The incoming shuffle must be of the same type as the result of the
// current shuffle.
assert(OtherSV->getOperand(0).getValueType() == VT &&
"Shuffle types don't match");
+ SmallVector<int, 4> Mask;
+ // Compute the combined shuffle mask.
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
assert(Idx < (int)NumElts && "Index references undef operand");
@@ -10133,13 +10685,174 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// shuffle. Adopt the incoming index.
if (Idx >= 0)
Idx = OtherSV->getMaskElt(Idx);
+ Mask.push_back(Idx);
+ }
+
+ bool CommuteOperands = false;
+ if (N0.getOperand(1).getOpcode() != ISD::UNDEF) {
+ // To be valid, the combine shuffle mask should only reference elements
+ // from one of the two vectors in input to the inner shufflevector.
+ bool IsValidMask = true;
+ for (unsigned i = 0; i != NumElts && IsValidMask; ++i)
+ // See if the combined mask only reference undefs or elements coming
+ // from the first shufflevector operand.
+ IsValidMask = Mask[i] < 0 || (unsigned)Mask[i] < NumElts;
+
+ if (!IsValidMask) {
+ IsValidMask = true;
+ for (unsigned i = 0; i != NumElts && IsValidMask; ++i)
+ // Check that all the elements come from the second shuffle operand.
+ IsValidMask = Mask[i] < 0 || (unsigned)Mask[i] >= NumElts;
+ CommuteOperands = IsValidMask;
+ }
- // The combined shuffle must map each index to itself.
- if (Idx >= 0 && (unsigned)Idx != i)
+ // Early exit if the combined shuffle mask is not valid.
+ if (!IsValidMask)
return SDValue();
}
- return OtherSV->getOperand(0);
+ // See if this pair of shuffles can be safely folded according to either
+ // of the following rules:
+ // shuffle(shuffle(x, y), undef) -> x
+ // shuffle(shuffle(x, undef), undef) -> x
+ // shuffle(shuffle(x, y), undef) -> y
+ bool IsIdentityMask = true;
+ unsigned BaseMaskIndex = CommuteOperands ? NumElts : 0;
+ for (unsigned i = 0; i != NumElts && IsIdentityMask; ++i) {
+ // Skip Undefs.
+ if (Mask[i] < 0)
+ continue;
+
+ // The combined shuffle must map each index to itself.
+ IsIdentityMask = (unsigned)Mask[i] == i + BaseMaskIndex;
+ }
+
+ if (IsIdentityMask) {
+ if (CommuteOperands)
+ // optimize shuffle(shuffle(x, y), undef) -> y.
+ return OtherSV->getOperand(1);
+
+ // optimize shuffle(shuffle(x, undef), undef) -> x
+ // optimize shuffle(shuffle(x, y), undef) -> x
+ return OtherSV->getOperand(0);
+ }
+
+ // It may still be beneficial to combine the two shuffles if the
+ // resulting shuffle is legal.
+ if (TLI.isTypeLegal(VT) && TLI.isShuffleMaskLegal(Mask, VT)) {
+ if (!CommuteOperands)
+ // shuffle(shuffle(x, undef, M1), undef, M2) -> shuffle(x, undef, M3).
+ // shuffle(shuffle(x, y, M1), undef, M2) -> shuffle(x, undef, M3)
+ return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0), N1,
+ &Mask[0]);
+
+ // shuffle(shuffle(x, y, M1), undef, M2) -> shuffle(undef, y, M3)
+ return DAG.getVectorShuffle(VT, SDLoc(N), N1, N0->getOperand(1),
+ &Mask[0]);
+ }
+ }
+
+ // Canonicalize shuffles according to rules:
+ // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
+ // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
+ // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
+ if (N1.getOpcode() == ISD::VECTOR_SHUFFLE && N0.getOpcode() != ISD::UNDEF &&
+ N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
+ TLI.isTypeLegal(VT)) {
+ // The incoming shuffle must be of the same type as the result of the
+ // current shuffle.
+ assert(N1->getOperand(0).getValueType() == VT &&
+ "Shuffle types don't match");
+
+ SDValue SV0 = N1->getOperand(0);
+ SDValue SV1 = N1->getOperand(1);
+ bool HasSameOp0 = N0 == SV0;
+ bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF;
+ if (HasSameOp0 || IsSV1Undef || N0 == SV1)
+ // Commute the operands of this shuffle so that next rule
+ // will trigger.
+ return DAG.getCommutedVectorShuffle(*SVN);
+ }
+
+ // Try to fold according to rules:
+ // shuffle(shuffle(A, B, M0), B, M1) -> shuffle(A, B, M2)
+ // shuffle(shuffle(A, B, M0), A, M1) -> shuffle(A, B, M2)
+ // shuffle(shuffle(A, Undef, M0), B, M1) -> shuffle(A, B, M2)
+ // shuffle(shuffle(A, Undef, M0), A, M1) -> shuffle(A, Undef, M2)
+ // Don't try to fold shuffles with illegal type.
+ if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
+ N1.getOpcode() != ISD::UNDEF && TLI.isTypeLegal(VT)) {
+ ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
+
+ // The incoming shuffle must be of the same type as the result of the
+ // current shuffle.
+ assert(OtherSV->getOperand(0).getValueType() == VT &&
+ "Shuffle types don't match");
+
+ SDValue SV0 = OtherSV->getOperand(0);
+ SDValue SV1 = OtherSV->getOperand(1);
+ bool HasSameOp0 = N1 == SV0;
+ bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF;
+ if (!HasSameOp0 && !IsSV1Undef && N1 != SV1)
+ // Early exit.
+ return SDValue();
+
+ SmallVector<int, 4> Mask;
+ // Compute the combined shuffle mask for a shuffle with SV0 as the first
+ // operand, and SV1 as the second operand.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Idx = SVN->getMaskElt(i);
+ if (Idx < 0) {
+ // Propagate Undef.
+ Mask.push_back(Idx);
+ continue;
+ }
+
+ if (Idx < (int)NumElts) {
+ Idx = OtherSV->getMaskElt(Idx);
+ if (IsSV1Undef && Idx >= (int) NumElts)
+ Idx = -1; // Propagate Undef.
+ } else
+ Idx = HasSameOp0 ? Idx - NumElts : Idx;
+
+ Mask.push_back(Idx);
+ }
+
+ // Avoid introducing shuffles with illegal mask.
+ if (TLI.isShuffleMaskLegal(Mask, VT)) {
+ if (IsSV1Undef)
+ // shuffle(shuffle(A, Undef, M0), B, M1) -> shuffle(A, B, M2)
+ // shuffle(shuffle(A, Undef, M0), A, M1) -> shuffle(A, Undef, M2)
+ return DAG.getVectorShuffle(VT, SDLoc(N), SV0, N1, &Mask[0]);
+ return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, &Mask[0]);
+ }
+ }
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N2 = N->getOperand(2);
+
+ // If the input vector is a concatenation, and the insert replaces
+ // one of the halves, we can optimize into a single concat_vectors.
+ if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
+ N0->getNumOperands() == 2 && N2.getOpcode() == ISD::Constant) {
+ APInt InsIdx = cast<ConstantSDNode>(N2)->getAPIntValue();
+ EVT VT = N->getValueType(0);
+
+ // Lower half: fold (insert_subvector (concat_vectors X, Y), Z) ->
+ // (concat_vectors Z, Y)
+ if (InsIdx == 0)
+ return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
+ N->getOperand(1), N0.getOperand(1));
+
+ // Upper half: fold (insert_subvector (concat_vectors X, Y), Z) ->
+ // (concat_vectors X, Z)
+ if (InsIdx == VT.getVectorNumElements()/2)
+ return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
+ N0.getOperand(0), N->getOperand(1));
}
return SDValue();
@@ -10182,8 +10895,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
EVT EltVT = RVT.getVectorElementType();
SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
DAG.getConstant(0, EltVT));
- SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
- RVT, &ZeroOps[0], ZeroOps.size());
+ SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), RVT, ZeroOps);
LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
@@ -10207,18 +10919,15 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
// this operation.
if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
RHS.getOpcode() == ISD::BUILD_VECTOR) {
+ // Check if both vectors are constants. If not bail out.
+ if (!(cast<BuildVectorSDNode>(LHS)->isConstant() &&
+ cast<BuildVectorSDNode>(RHS)->isConstant()))
+ return SDValue();
+
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
SDValue LHSOp = LHS.getOperand(i);
SDValue RHSOp = RHS.getOperand(i);
- // If these two elements can't be folded, bail out.
- if ((LHSOp.getOpcode() != ISD::UNDEF &&
- LHSOp.getOpcode() != ISD::Constant &&
- LHSOp.getOpcode() != ISD::ConstantFP) ||
- (RHSOp.getOpcode() != ISD::UNDEF &&
- RHSOp.getOpcode() != ISD::Constant &&
- RHSOp.getOpcode() != ISD::ConstantFP))
- break;
// Can't fold divide by zero.
if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
@@ -10251,12 +10960,32 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
FoldOp.getOpcode() != ISD::ConstantFP)
break;
Ops.push_back(FoldOp);
- AddToWorkList(FoldOp.getNode());
+ AddToWorklist(FoldOp.getNode());
}
if (Ops.size() == LHS.getNumOperands())
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
- LHS.getValueType(), &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), LHS.getValueType(), Ops);
+ }
+
+ // Type legalization might introduce new shuffles in the DAG.
+ // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask)))
+ // -> (shuffle (VBinOp (A, B)), Undef, Mask).
+ if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) &&
+ isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() &&
+ LHS.getOperand(1).getOpcode() == ISD::UNDEF &&
+ RHS.getOperand(1).getOpcode() == ISD::UNDEF) {
+ ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS);
+ ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS);
+
+ if (SVN0->getMask().equals(SVN1->getMask())) {
+ EVT VT = N->getValueType(0);
+ SDValue UndefVector = LHS.getOperand(1);
+ SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
+ LHS.getOperand(0), RHS.getOperand(0));
+ AddUsersToWorklist(N);
+ return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector,
+ &SVN0->getMask()[0]);
+ }
}
return SDValue();
@@ -10285,14 +11014,13 @@ SDValue DAGCombiner::SimplifyVUnaryOp(SDNode *N) {
FoldOp.getOpcode() != ISD::ConstantFP)
break;
Ops.push_back(FoldOp);
- AddToWorkList(FoldOp.getNode());
+ AddToWorklist(FoldOp.getNode());
}
if (Ops.size() != N0.getNumOperands())
return SDValue();
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
- N0.getValueType(), &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N0.getValueType(), Ops);
}
SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0,
@@ -10313,7 +11041,7 @@ SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0,
N0.getValueType(),
SCC.getOperand(0), SCC.getOperand(1),
SCC.getOperand(4));
- AddToWorkList(SETCC.getNode());
+ AddToWorklist(SETCC.getNode());
return DAG.getSelect(SDLoc(SCC), SCC.getValueType(),
SCC.getOperand(2), SCC.getOperand(3), SETCC);
}
@@ -10454,7 +11182,7 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
// Determine if the condition we're dealing with is constant
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, DL, false);
- if (SCC.getNode()) AddToWorkList(SCC.getNode());
+ if (SCC.getNode()) AddToWorklist(SCC.getNode());
ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode());
// fold select_cc true, x, y -> x
@@ -10494,7 +11222,9 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) {
if (TLI.isTypeLegal(N2.getValueType()) &&
(TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) !=
- TargetLowering::Legal) &&
+ TargetLowering::Legal &&
+ !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) &&
+ !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) &&
// If both constants have multiple uses, then we won't need to do an
// extra load, they are likely around in registers for other users.
(TV->hasOneUse() || FV->hasOneUse())) {
@@ -10520,13 +11250,13 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
SDValue Cond = DAG.getSetCC(DL,
getSetCCResultType(N0.getValueType()),
N0, N1, CC);
- AddToWorkList(Cond.getNode());
+ AddToWorklist(Cond.getNode());
SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(),
Cond, One, Zero);
- AddToWorkList(CstOffset.getNode());
+ AddToWorklist(CstOffset.getNode());
CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx,
CstOffset);
- AddToWorkList(CPIdx.getNode());
+ AddToWorklist(CPIdx.getNode());
return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(), false,
false, false, Alignment);
@@ -10551,11 +11281,11 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
getShiftAmountTy(N0.getValueType()));
SDValue Shift = DAG.getNode(ISD::SRL, SDLoc(N0),
XType, N0, ShCt);
- AddToWorkList(Shift.getNode());
+ AddToWorklist(Shift.getNode());
if (XType.bitsGT(AType)) {
Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
- AddToWorkList(Shift.getNode());
+ AddToWorklist(Shift.getNode());
}
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
@@ -10565,11 +11295,11 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
XType, N0,
DAG.getConstant(XType.getSizeInBits()-1,
getShiftAmountTy(N0.getValueType())));
- AddToWorkList(Shift.getNode());
+ AddToWorklist(Shift.getNode());
if (XType.bitsGT(AType)) {
Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
- AddToWorkList(Shift.getNode());
+ AddToWorklist(Shift.getNode());
}
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
@@ -10609,8 +11339,8 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
// fold select C, 16, 0 -> shl C, 4
if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
- TLI.getBooleanContents(N0.getValueType().isVector()) ==
- TargetLowering::ZeroOrOneBooleanContent) {
+ TLI.getBooleanContents(N0.getValueType()) ==
+ TargetLowering::ZeroOrOneBooleanContent) {
// If the caller doesn't want us to simplify this into a zext of a compare,
// don't do it.
@@ -10639,8 +11369,8 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
N2.getValueType(), SCC);
}
- AddToWorkList(SCC.getNode());
- AddToWorkList(Temp.getNode());
+ AddToWorklist(SCC.getNode());
+ AddToWorklist(Temp.getNode());
if (N2C->getAPIntValue() == 1)
return Temp;
@@ -10701,7 +11431,7 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
// select_cc setlt X, 1, -X, X ->
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
if (N1C) {
- ConstantSDNode *SubC = NULL;
+ ConstantSDNode *SubC = nullptr;
if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
(N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
@@ -10719,8 +11449,8 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
getShiftAmountTy(N0.getValueType())));
SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0),
XType, N0, Shift);
- AddToWorkList(Shift.getNode());
- AddToWorkList(Add.getNode());
+ AddToWorklist(Shift.getNode());
+ AddToWorklist(Add.getNode());
return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
}
}
@@ -10742,26 +11472,42 @@ SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
+ ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
+ if (!C)
+ return SDValue();
+
+ // Avoid division by zero.
+ if (!C->getAPIntValue())
+ return SDValue();
+
std::vector<SDNode*> Built;
- SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, &Built);
+ SDValue S =
+ TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built);
- for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
- ii != ee; ++ii)
- AddToWorkList(*ii);
+ for (SDNode *N : Built)
+ AddToWorklist(N);
return S;
}
-/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant,
+/// BuildUDIV - Given an ISD::UDIV node expressing a divide by constant,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
+ ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
+ if (!C)
+ return SDValue();
+
+ // Avoid division by zero.
+ if (!C->getAPIntValue())
+ return SDValue();
+
std::vector<SDNode*> Built;
- SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, &Built);
+ SDValue S =
+ TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built);
- for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
- ii != ee; ++ii)
- AddToWorkList(*ii);
+ for (SDNode *N : Built)
+ AddToWorklist(N);
return S;
}
@@ -10771,7 +11517,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
const GlobalValue *&GV, const void *&CV) {
// Assume it is a primitive operation.
- Base = Ptr; Offset = 0; GV = 0; CV = 0;
+ Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr;
// If it's an adding a simple constant then integrate the offset.
if (Base.getOpcode() == ISD::ADD) {
@@ -10805,31 +11551,27 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
/// isAlias - Return true if there is any possibility that the two addresses
/// overlap.
-bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, bool IsVolatile1,
- const Value *SrcValue1, int SrcValueOffset1,
- unsigned SrcValueAlign1,
- const MDNode *TBAAInfo1,
- SDValue Ptr2, int64_t Size2, bool IsVolatile2,
- const Value *SrcValue2, int SrcValueOffset2,
- unsigned SrcValueAlign2,
- const MDNode *TBAAInfo2) const {
+bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
// If they are the same then they must be aliases.
- if (Ptr1 == Ptr2) return true;
+ if (Op0->getBasePtr() == Op1->getBasePtr()) return true;
// If they are both volatile then they cannot be reordered.
- if (IsVolatile1 && IsVolatile2) return true;
+ if (Op0->isVolatile() && Op1->isVolatile()) return true;
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
const GlobalValue *GV1, *GV2;
const void *CV1, *CV2;
- bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
- bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);
+ bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(),
+ Base1, Offset1, GV1, CV1);
+ bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(),
+ Base2, Offset2, GV2, CV2);
// If they have a same base address then check to see if they overlap.
if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
- return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
+ return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
+ (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
// It is possible for different frame indices to alias each other, mostly
// when tail call optimization reuses return address slots for arguments.
@@ -10839,7 +11581,8 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, bool IsVolatile1,
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
- return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
+ return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 ||
+ (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1);
}
// Otherwise, if we know what the bases are, and they aren't identical, then
@@ -10851,28 +11594,44 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, bool IsVolatile1,
// compared to the size and offset of the access, we may be able to prove they
// do not alias. This check is conservative for now to catch cases created by
// splitting vector types.
- if ((SrcValueAlign1 == SrcValueAlign2) &&
- (SrcValueOffset1 != SrcValueOffset2) &&
- (Size1 == Size2) && (SrcValueAlign1 > Size1)) {
- int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
- int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
+ if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) &&
+ (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) &&
+ (Op0->getMemoryVT().getSizeInBits() >> 3 ==
+ Op1->getMemoryVT().getSizeInBits() >> 3) &&
+ (Op0->getOriginalAlignment() > Op0->getMemoryVT().getSizeInBits()) >> 3) {
+ int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment();
+ int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment();
// There is no overlap between these relatively aligned accesses of similar
// size, return no alias.
- if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
+ if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 ||
+ (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1)
return false;
}
bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0 ? CombinerGlobalAA :
TLI.getTargetMachine().getSubtarget<TargetSubtargetInfo>().useAA();
- if (UseAA && SrcValue1 && SrcValue2) {
+#ifndef NDEBUG
+ if (CombinerAAOnlyFunc.getNumOccurrences() &&
+ CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
+ UseAA = false;
+#endif
+ if (UseAA &&
+ Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) {
// Use alias analysis information.
- int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
- int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset;
- int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset;
+ int64_t MinOffset = std::min(Op0->getSrcValueOffset(),
+ Op1->getSrcValueOffset());
+ int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) +
+ Op0->getSrcValueOffset() - MinOffset;
+ int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
+ Op1->getSrcValueOffset() - MinOffset;
AliasAnalysis::AliasResult AAResult =
- AA.alias(AliasAnalysis::Location(SrcValue1, Overlap1, TBAAInfo1),
- AliasAnalysis::Location(SrcValue2, Overlap2, TBAAInfo2));
+ AA.alias(AliasAnalysis::Location(Op0->getMemOperand()->getValue(),
+ Overlap1,
+ UseTBAA ? Op0->getTBAAInfo() : nullptr),
+ AliasAnalysis::Location(Op1->getMemOperand()->getValue(),
+ Overlap2,
+ UseTBAA ? Op1->getTBAAInfo() : nullptr));
if (AAResult == AliasAnalysis::NoAlias)
return false;
}
@@ -10881,44 +11640,6 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, bool IsVolatile1,
return true;
}
-bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) {
- SDValue Ptr0, Ptr1;
- int64_t Size0, Size1;
- bool IsVolatile0, IsVolatile1;
- const Value *SrcValue0, *SrcValue1;
- int SrcValueOffset0, SrcValueOffset1;
- unsigned SrcValueAlign0, SrcValueAlign1;
- const MDNode *SrcTBAAInfo0, *SrcTBAAInfo1;
- FindAliasInfo(Op0, Ptr0, Size0, IsVolatile0, SrcValue0, SrcValueOffset0,
- SrcValueAlign0, SrcTBAAInfo0);
- FindAliasInfo(Op1, Ptr1, Size1, IsVolatile1, SrcValue1, SrcValueOffset1,
- SrcValueAlign1, SrcTBAAInfo1);
- return isAlias(Ptr0, Size0, IsVolatile0, SrcValue0, SrcValueOffset0,
- SrcValueAlign0, SrcTBAAInfo0,
- Ptr1, Size1, IsVolatile1, SrcValue1, SrcValueOffset1,
- SrcValueAlign1, SrcTBAAInfo1);
-}
-
-/// FindAliasInfo - Extracts the relevant alias information from the memory
-/// node. Returns true if the operand was a nonvolatile load.
-bool DAGCombiner::FindAliasInfo(SDNode *N,
- SDValue &Ptr, int64_t &Size, bool &IsVolatile,
- const Value *&SrcValue,
- int &SrcValueOffset,
- unsigned &SrcValueAlign,
- const MDNode *&TBAAInfo) const {
- LSBaseSDNode *LS = cast<LSBaseSDNode>(N);
-
- Ptr = LS->getBasePtr();
- Size = LS->getMemoryVT().getSizeInBits() >> 3;
- IsVolatile = LS->isVolatile();
- SrcValue = LS->getSrcValue();
- SrcValueOffset = LS->getSrcValueOffset();
- SrcValueAlign = LS->getOriginalAlignment();
- TBAAInfo = LS->getTBAAInfo();
- return isa<LoadSDNode>(LS) && !IsVolatile;
-}
-
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
@@ -10927,15 +11648,7 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
// Get alias information for node.
- SDValue Ptr;
- int64_t Size;
- bool IsVolatile;
- const Value *SrcValue;
- int SrcValueOffset;
- unsigned SrcValueAlign;
- const MDNode *SrcTBAAInfo;
- bool IsLoad = FindAliasInfo(N, Ptr, Size, IsVolatile, SrcValue,
- SrcValueOffset, SrcValueAlign, SrcTBAAInfo);
+ bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile();
// Starting off.
Chains.push_back(OriginalChain);
@@ -10959,7 +11672,7 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
if (Depth > 6 || Aliases.size() == 2) {
Aliases.clear();
Aliases.push_back(OriginalChain);
- break;
+ return;
}
// Don't bother if we've been before.
@@ -10974,24 +11687,12 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
case ISD::LOAD:
case ISD::STORE: {
// Get alias information for Chain.
- SDValue OpPtr;
- int64_t OpSize;
- bool OpIsVolatile;
- const Value *OpSrcValue;
- int OpSrcValueOffset;
- unsigned OpSrcValueAlign;
- const MDNode *OpSrcTBAAInfo;
- bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
- OpIsVolatile, OpSrcValue, OpSrcValueOffset,
- OpSrcValueAlign,
- OpSrcTBAAInfo);
+ bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) &&
+ !cast<LSBaseSDNode>(Chain.getNode())->isVolatile();
// If chain is alias then stop here.
if (!(IsLoad && IsOpLoad) &&
- isAlias(Ptr, Size, IsVolatile, SrcValue, SrcValueOffset,
- SrcValueAlign, SrcTBAAInfo,
- OpPtr, OpSize, OpIsVolatile, OpSrcValue, OpSrcValueOffset,
- OpSrcValueAlign, OpSrcTBAAInfo)) {
+ isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) {
Aliases.push_back(Chain);
} else {
// Look further up the chain.
@@ -11021,6 +11722,63 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
break;
}
}
+
+ // We need to be careful here to also search for aliases through the
+ // value operand of a store, etc. Consider the following situation:
+ // Token1 = ...
+ // L1 = load Token1, %52
+ // S1 = store Token1, L1, %51
+ // L2 = load Token1, %52+8
+ // S2 = store Token1, L2, %51+8
+ // Token2 = Token(S1, S2)
+ // L3 = load Token2, %53
+ // S3 = store Token2, L3, %52
+ // L4 = load Token2, %53+8
+ // S4 = store Token2, L4, %52+8
+ // If we search for aliases of S3 (which loads address %52), and we look
+ // only through the chain, then we'll miss the trivial dependence on L1
+ // (which also loads from %52). We then might change all loads and
+ // stores to use Token1 as their chain operand, which could result in
+ // copying %53 into %52 before copying %52 into %51 (which should
+ // happen first).
+ //
+ // The problem is, however, that searching for such data dependencies
+ // can become expensive, and the cost is not directly related to the
+ // chain depth. Instead, we'll rule out such configurations here by
+ // insisting that we've visited all chain users (except for users
+ // of the original chain, which is not necessary). When doing this,
+ // we need to look through nodes we don't care about (otherwise, things
+ // like register copies will interfere with trivial cases).
+
+ SmallVector<const SDNode *, 16> Worklist;
+ for (SmallPtrSet<SDNode *, 16>::iterator I = Visited.begin(),
+ IE = Visited.end(); I != IE; ++I)
+ if (*I != OriginalChain.getNode())
+ Worklist.push_back(*I);
+
+ while (!Worklist.empty()) {
+ const SDNode *M = Worklist.pop_back_val();
+
+ // We have already visited M, and want to make sure we've visited any uses
+ // of M that we care about. For uses that we've not visisted, and don't
+ // care about, queue them to the worklist.
+
+ for (SDNode::use_iterator UI = M->use_begin(),
+ UIE = M->use_end(); UI != UIE; ++UI)
+ if (UI.getUse().getValueType() == MVT::Other && Visited.insert(*UI)) {
+ if (isa<MemIntrinsicSDNode>(*UI) || isa<MemSDNode>(*UI)) {
+ // We've not visited this use, and we care about it (it could have an
+ // ordering dependency with the original node).
+ Aliases.clear();
+ Aliases.push_back(OriginalChain);
+ return;
+ }
+
+ // We've not visited this use, but we don't care about it. Mark it as
+ // visited and enqueue it to the worklist.
+ Worklist.push_back(*UI);
+ }
+ }
}
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking
@@ -11040,8 +11798,7 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
return Aliases[0];
// Construct a custom tailored token factor.
- return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other,
- &Aliases[0], Aliases.size());
+ return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases);
}
// SelectionDAG::Combine - This is the entry point for the file.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index a6f7461..ad75e91 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -39,18 +39,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "isel"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
@@ -64,12 +67,29 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "isel"
+
STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
"target-independent selector");
STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
"target-specific selector");
STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
+/// \brief Set CallLoweringInfo attribute flags based on a call instruction
+/// and called function attributes.
+void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
+ unsigned AttrIdx) {
+ isSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
+ isZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
+ isInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
+ isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
+ isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
+ isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
+ isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
+ isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
+ Alignment = CS->getParamAlignment(AttrIdx);
+}
+
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
@@ -79,7 +99,7 @@ void FastISel::startNewBlock() {
// Instructions are appended to FuncInfo.MBB. If the basic block already
// contains labels or copies, use the last instruction as the last local
// value.
- EmitStartPt = 0;
+ EmitStartPt = nullptr;
if (!FuncInfo.MBB->empty())
EmitStartPt = &FuncInfo.MBB->back();
LastLocalValue = EmitStartPt;
@@ -118,7 +138,7 @@ bool FastISel::hasTrivialKill(const Value *V) const {
// No-op casts are trivially coalesced by fast-isel.
if (const CastInst *Cast = dyn_cast<CastInst>(I))
- if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
+ if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
!hasTrivialKill(Cast->getOperand(0)))
return false;
@@ -133,7 +153,7 @@ bool FastISel::hasTrivialKill(const Value *V) const {
!(I->getOpcode() == Instruction::BitCast ||
I->getOpcode() == Instruction::PtrToInt ||
I->getOpcode() == Instruction::IntToPtr) &&
- cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
+ cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
}
unsigned FastISel::getRegForValue(const Value *V) {
@@ -192,7 +212,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Translate this as an integer zero so that it can be
// local-CSE'd with actual integer zeros.
Reg =
- getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
+ getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getContext())));
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
if (CF->isNullValue()) {
Reg = TargetMaterializeFloatZero(CF);
@@ -229,7 +249,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
@@ -335,20 +355,20 @@ void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
FastISel::SavePoint FastISel::enterLocalValueArea() {
MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
- DebugLoc OldDL = DL;
+ DebugLoc OldDL = DbgLoc;
recomputeInsertPt();
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
SavePoint SP = { OldInsertPt, OldDL };
return SP;
}
void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
- LastLocalValue = llvm::prior(FuncInfo.InsertPt);
+ LastLocalValue = std::prev(FuncInfo.InsertPt);
// Restore the previous insert position.
FuncInfo.InsertPt = OldInsertPt.InsertPt;
- DL = OldInsertPt.DL;
+ DbgLoc = OldInsertPt.DL;
}
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
@@ -484,7 +504,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
- TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
+ TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) {
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (N == 0)
@@ -503,7 +523,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
if (CI->isZero()) continue;
// N = N + Offset
TotalOffs +=
- TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ DL.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
if (TotalOffs >= MaxOffs) {
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (N == 0)
@@ -524,7 +544,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getTypeAllocSize(Ty);
+ uint64_t ElementSize = DL.getTypeAllocSize(Ty);
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
unsigned IdxN = Pair.first;
bool IdxNIsKill = Pair.second;
@@ -557,11 +577,472 @@ bool FastISel::SelectGetElementPtr(const User *I) {
return true;
}
+/// \brief Add a stackmap or patchpoint intrinsic call's live variable operands
+/// to a stackmap or patchpoint machine instruction.
+bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
+ const CallInst *CI, unsigned StartIdx) {
+ for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
+ Value *Val = CI->getArgOperand(i);
+ // Check for constants and encode them with a StackMaps::ConstantOp prefix.
+ if (auto *C = dyn_cast<ConstantInt>(Val)) {
+ Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
+ Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
+ } else if (isa<ConstantPointerNull>(Val)) {
+ Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
+ Ops.push_back(MachineOperand::CreateImm(0));
+ } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
+ // Values coming from a stack location also require a sepcial encoding,
+ // but that is added later on by the target specific frame index
+ // elimination implementation.
+ auto SI = FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end())
+ Ops.push_back(MachineOperand::CreateFI(SI->second));
+ else
+ return false;
+ } else {
+ unsigned Reg = getRegForValue(Val);
+ if (Reg == 0)
+ return false;
+ Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
+ }
+ }
+
+ return true;
+}
+
+bool FastISel::SelectStackmap(const CallInst *I) {
+ // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
+ // [live variables...])
+ assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
+ "Stackmap cannot return a value.");
+
+ // The stackmap intrinsic only records the live variables (the arguments
+ // passed to it) and emits NOPS (if requested). Unlike the patchpoint
+ // intrinsic, this won't be lowered to a function call. This means we don't
+ // have to worry about calling conventions and target-specific lowering code.
+ // Instead we perform the call lowering right here.
+ //
+ // CALLSEQ_START(0)
+ // STACKMAP(id, nbytes, ...)
+ // CALLSEQ_END(0, 0)
+ //
+ SmallVector<MachineOperand, 32> Ops;
+
+ // Add the <id> and <numBytes> constants.
+ assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
+ "Expected a constant integer.");
+ const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
+ Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
+
+ assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
+ "Expected a constant integer.");
+ const auto *NumBytes =
+ cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
+ Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
+
+ // Push live variables for the stack map (skipping the first two arguments
+ // <id> and <numBytes>).
+ if (!addStackMapLiveVars(Ops, I, 2))
+ return false;
+
+ // We are not adding any register mask info here, because the stackmap doesn't
+ // clobber anything.
+
+ // Add scratch registers as implicit def and early clobber.
+ CallingConv::ID CC = I->getCallingConv();
+ const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
+ for (unsigned i = 0; ScratchRegs[i]; ++i)
+ Ops.push_back(MachineOperand::CreateReg(
+ ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
+ /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
+
+ // Issue CALLSEQ_START
+ unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
+ .addImm(0);
+
+ // Issue STACKMAP.
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::STACKMAP));
+ for (auto const &MO : Ops)
+ MIB.addOperand(MO);
+
+ // Issue CALLSEQ_END
+ unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
+ .addImm(0).addImm(0);
+
+ // Inform the Frame Information that we have a stackmap in this function.
+ FuncInfo.MF->getFrameInfo()->setHasStackMap();
+
+ return true;
+}
+
+/// \brief Lower an argument list according to the target calling convention.
+///
+/// This is a helper for lowering intrinsics that follow a target calling
+/// convention or require stack pointer adjustment. Only a subset of the
+/// intrinsic's operands need to participate in the calling convention.
+bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
+ unsigned NumArgs, const Value *Callee,
+ bool ForceRetVoidTy, CallLoweringInfo &CLI) {
+ ArgListTy Args;
+ Args.reserve(NumArgs);
+
+ // Populate the argument list.
+ // Attributes for args start at offset 1, after the return attribute.
+ ImmutableCallSite CS(CI);
+ for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
+ ArgI != ArgE; ++ArgI) {
+ Value *V = CI->getOperand(ArgI);
+
+ assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
+
+ ArgListEntry Entry;
+ Entry.Val = V;
+ Entry.Ty = V->getType();
+ Entry.setAttributes(&CS, AttrI);
+ Args.push_back(Entry);
+ }
+
+ Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
+ : CI->getType();
+ CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
+
+ return LowerCallTo(CLI);
+}
+
+bool FastISel::SelectPatchpoint(const CallInst *I) {
+ // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
+ // i32 <numBytes>,
+ // i8* <target>,
+ // i32 <numArgs>,
+ // [Args...],
+ // [live variables...])
+ CallingConv::ID CC = I->getCallingConv();
+ bool IsAnyRegCC = CC == CallingConv::AnyReg;
+ bool HasDef = !I->getType()->isVoidTy();
+ Value *Callee = I->getOperand(PatchPointOpers::TargetPos);
+
+ // Get the real number of arguments participating in the call <numArgs>
+ assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
+ "Expected a constant integer.");
+ const auto *NumArgsVal =
+ cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
+ unsigned NumArgs = NumArgsVal->getZExtValue();
+
+ // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
+ // This includes all meta-operands up to but not including CC.
+ unsigned NumMetaOpers = PatchPointOpers::CCPos;
+ assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
+ "Not enough arguments provided to the patchpoint intrinsic");
+
+ // For AnyRegCC the arguments are lowered later on manually.
+ unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
+ CallLoweringInfo CLI;
+ if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
+ return false;
+
+ assert(CLI.Call && "No call instruction specified.");
+
+ SmallVector<MachineOperand, 32> Ops;
+
+ // Add an explicit result reg if we use the anyreg calling convention.
+ if (IsAnyRegCC && HasDef) {
+ assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
+ CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
+ CLI.NumResultRegs = 1;
+ Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
+ }
+
+ // Add the <id> and <numBytes> constants.
+ assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
+ "Expected a constant integer.");
+ const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
+ Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
+
+ assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
+ "Expected a constant integer.");
+ const auto *NumBytes =
+ cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
+ Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
+
+ // Assume that the callee is a constant address or null pointer.
+ // FIXME: handle function symbols in the future.
+ unsigned CalleeAddr;
+ if (const auto *C = dyn_cast<IntToPtrInst>(Callee))
+ CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
+ else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
+ if (C->getOpcode() == Instruction::IntToPtr)
+ CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
+ else
+ llvm_unreachable("Unsupported ConstantExpr.");
+ } else if (isa<ConstantPointerNull>(Callee))
+ CalleeAddr = 0;
+ else
+ llvm_unreachable("Unsupported callee address.");
+
+ Ops.push_back(MachineOperand::CreateImm(CalleeAddr));
+
+ // Adjust <numArgs> to account for any arguments that have been passed on
+ // the stack instead.
+ unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
+ Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
+
+ // Add the calling convention
+ Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
+
+ // Add the arguments we omitted previously. The register allocator should
+ // place these in any free register.
+ if (IsAnyRegCC) {
+ for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
+ unsigned Reg = getRegForValue(I->getArgOperand(i));
+ if (!Reg)
+ return false;
+ Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
+ }
+ }
+
+ // Push the arguments from the call instruction.
+ for (auto Reg : CLI.OutRegs)
+ Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
+
+ // Push live variables for the stack map.
+ if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
+ return false;
+
+ // Push the register mask info.
+ Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC)));
+
+ // Add scratch registers as implicit def and early clobber.
+ const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
+ for (unsigned i = 0; ScratchRegs[i]; ++i)
+ Ops.push_back(MachineOperand::CreateReg(
+ ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
+ /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
+
+ // Add implicit defs (return values).
+ for (auto Reg : CLI.InRegs)
+ Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
+ /*IsImpl=*/true));
+
+ // Insert the patchpoint instruction before the call generated by the target.
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
+ TII.get(TargetOpcode::PATCHPOINT));
+
+ for (auto &MO : Ops)
+ MIB.addOperand(MO);
+
+ MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
+
+ // Delete the original call instruction.
+ CLI.Call->eraseFromParent();
+
+ // Inform the Frame Information that we have a patchpoint in this function.
+ FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
+
+ if (CLI.NumResultRegs)
+ UpdateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
+ return true;
+}
+
+/// Returns an AttributeSet representing the attributes applied to the return
+/// value of the given call.
+static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
+ SmallVector<Attribute::AttrKind, 2> Attrs;
+ if (CLI.RetSExt)
+ Attrs.push_back(Attribute::SExt);
+ if (CLI.RetZExt)
+ Attrs.push_back(Attribute::ZExt);
+ if (CLI.IsInReg)
+ Attrs.push_back(Attribute::InReg);
+
+ return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
+ Attrs);
+}
+
+bool FastISel::LowerCallTo(const CallInst *CI, const char *SymName,
+ unsigned NumArgs) {
+ ImmutableCallSite CS(CI);
+
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ Type *RetTy = FTy->getReturnType();
+
+ ArgListTy Args;
+ Args.reserve(NumArgs);
+
+ // Populate the argument list.
+ // Attributes for args start at offset 1, after the return attribute.
+ for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
+ Value *V = CI->getOperand(ArgI);
+
+ assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
+
+ ArgListEntry Entry;
+ Entry.Val = V;
+ Entry.Ty = V->getType();
+ Entry.setAttributes(&CS, ArgI + 1);
+ Args.push_back(Entry);
+ }
+
+ CallLoweringInfo CLI;
+ CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
+
+ return LowerCallTo(CLI);
+}
+
+bool FastISel::LowerCallTo(CallLoweringInfo &CLI) {
+ // Handle the incoming return values from the call.
+ CLI.clearIns();
+ SmallVector<EVT, 4> RetTys;
+ ComputeValueVTs(TLI, CLI.RetTy, RetTys);
+
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
+
+ bool CanLowerReturn = TLI.CanLowerReturn(CLI.CallConv, *FuncInfo.MF,
+ CLI.IsVarArg, Outs,
+ CLI.RetTy->getContext());
+
+ // FIXME: sret demotion isn't supported yet - bail out.
+ if (!CanLowerReturn)
+ return false;
+
+ for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
+ EVT VT = RetTys[I];
+ MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ ISD::InputArg MyFlags;
+ MyFlags.VT = RegisterVT;
+ MyFlags.ArgVT = VT;
+ MyFlags.Used = CLI.IsReturnValueUsed;
+ if (CLI.RetSExt)
+ MyFlags.Flags.setSExt();
+ if (CLI.RetZExt)
+ MyFlags.Flags.setZExt();
+ if (CLI.IsInReg)
+ MyFlags.Flags.setInReg();
+ CLI.Ins.push_back(MyFlags);
+ }
+ }
+
+ // Handle all of the outgoing arguments.
+ CLI.clearOuts();
+ for (auto &Arg : CLI.getArgs()) {
+ Type *FinalType = Arg.Ty;
+ if (Arg.isByVal)
+ FinalType = cast<PointerType>(Arg.Ty)->getElementType();
+ bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
+ FinalType, CLI.CallConv, CLI.IsVarArg);
+
+ ISD::ArgFlagsTy Flags;
+ if (Arg.isZExt)
+ Flags.setZExt();
+ if (Arg.isSExt)
+ Flags.setSExt();
+ if (Arg.isInReg)
+ Flags.setInReg();
+ if (Arg.isSRet)
+ Flags.setSRet();
+ if (Arg.isByVal)
+ Flags.setByVal();
+ if (Arg.isInAlloca) {
+ Flags.setInAlloca();
+ // Set the byval flag for CCAssignFn callbacks that don't know about
+ // inalloca. This way we can know how many bytes we should've allocated
+ // and how many bytes a callee cleanup function will pop. If we port
+ // inalloca to more targets, we'll have to add custom inalloca handling in
+ // the various CC lowering callbacks.
+ Flags.setByVal();
+ }
+ if (Arg.isByVal || Arg.isInAlloca) {
+ PointerType *Ty = cast<PointerType>(Arg.Ty);
+ Type *ElementTy = Ty->getElementType();
+ unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
+ // For ByVal, alignment should come from FE. BE will guess if this info is
+ // not there, but there are cases it cannot get right.
+ unsigned FrameAlign = Arg.Alignment;
+ if (!FrameAlign)
+ FrameAlign = TLI.getByValTypeAlignment(ElementTy);
+ Flags.setByValSize(FrameSize);
+ Flags.setByValAlign(FrameAlign);
+ }
+ if (Arg.isNest)
+ Flags.setNest();
+ if (NeedsRegBlock)
+ Flags.setInConsecutiveRegs();
+ unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
+ Flags.setOrigAlign(OriginalAlignment);
+
+ CLI.OutVals.push_back(Arg.Val);
+ CLI.OutFlags.push_back(Flags);
+ }
+
+ if (!FastLowerCall(CLI))
+ return false;
+
+ // Set all unused physreg defs as dead.
+ assert(CLI.Call && "No call instruction specified.");
+ CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
+
+ if (CLI.NumResultRegs && CLI.CS)
+ UpdateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
+
+ return true;
+}
+
+bool FastISel::LowerCall(const CallInst *CI) {
+ ImmutableCallSite CS(CI);
+
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
+ Type *RetTy = FuncTy->getReturnType();
+
+ ArgListTy Args;
+ ArgListEntry Entry;
+ Args.reserve(CS.arg_size());
+
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ i != e; ++i) {
+ Value *V = *i;
+
+ // Skip empty types
+ if (V->getType()->isEmptyTy())
+ continue;
+
+ Entry.Val = V;
+ Entry.Ty = V->getType();
+
+ // Skip the first return-type Attribute to get to params.
+ Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
+ Args.push_back(Entry);
+ }
+
+ // Check if target-independent constraints permit a tail call here.
+ // Target-dependent constraints are checked within FastLowerCall.
+ bool IsTailCall = CI->isTailCall();
+ if (IsTailCall && !isInTailCallPosition(CS, TM))
+ IsTailCall = false;
+
+ CallLoweringInfo CLI;
+ CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
+ .setTailCall(IsTailCall);
+
+ return LowerCallTo(CLI);
+}
+
bool FastISel::SelectCall(const User *I) {
const CallInst *Call = cast<CallInst>(I);
// Handle simple inline asms.
if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
+ // If the inline asm has side effects, then make sure that no local value
+ // lives across by flushing the local value map.
+ if (IA->hasSideEffects())
+ flushLocalValueMap();
+
// Don't attempt to handle constraints.
if (!IA->getConstraintString().empty())
return false;
@@ -572,7 +1053,7 @@ bool FastISel::SelectCall(const User *I) {
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::INLINEASM))
.addExternalSymbol(IA->getAsmString().c_str())
.addImm(ExtraInfo);
@@ -582,26 +1063,37 @@ bool FastISel::SelectCall(const User *I) {
MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
ComputeUsesVAFloatArgument(*Call, &MMI);
- const Function *F = Call->getCalledFunction();
- if (!F) return false;
+ // Handle intrinsic function calls.
+ if (const auto *II = dyn_cast<IntrinsicInst>(Call))
+ return SelectIntrinsicCall(II);
- // Handle selected intrinsic function calls.
- switch (F->getIntrinsicID()) {
+ // Usually, it does not make sense to initialize a value,
+ // make an unrelated function call and use the value, because
+ // it tends to be spilled on the stack. So, we move the pointer
+ // to the last local value to the beginning of the block, so that
+ // all the values which have already been materialized,
+ // appear after the call. It also makes sense to skip intrinsics
+ // since they tend to be inlined.
+ flushLocalValueMap();
+
+ return LowerCall(Call);
+}
+
+bool FastISel::SelectIntrinsicCall(const IntrinsicInst *II) {
+ switch (II->getIntrinsicID()) {
default: break;
- // At -O0 we don't care about the lifetime intrinsics.
+ // At -O0 we don't care about the lifetime intrinsics.
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
- // The donothing intrinsic does, well, nothing.
+ // The donothing intrinsic does, well, nothing.
case Intrinsic::donothing:
return true;
-
case Intrinsic::dbg_declare: {
- const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
DIVariable DIVar(DI->getVariable());
assert((!DIVar || DIVar.isVariable()) &&
- "Variable in DbgDeclareInst should be either null or a DIVariable.");
- if (!DIVar ||
- !FuncInfo.MF->getMMI().hasDebugInfo()) {
+ "Variable in DbgDeclareInst should be either null or a DIVariable.");
+ if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
@@ -618,7 +1110,7 @@ bool FastISel::SelectCall(const User *I) {
// Some arguments' frame index is recorded during argument lowering.
Offset = FuncInfo.getArgumentFrameIndex(Arg);
if (Offset)
- Op = MachineOperand::CreateFI(Offset);
+ Op = MachineOperand::CreateFI(Offset);
if (!Op)
if (unsigned Reg = lookUpRegForValue(Address))
Op = MachineOperand::CreateReg(Reg, false);
@@ -643,15 +1135,15 @@ bool FastISel::SelectCall(const User *I) {
if (Op) {
if (Op->isReg()) {
Op->setIsDebug(true);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
DI->getVariable());
} else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE))
- .addOperand(*Op)
- .addImm(0)
- .addMetadata(DI->getVariable());
+ .addOperand(*Op)
+ .addImm(0)
+ .addMetadata(DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
@@ -661,32 +1153,32 @@ bool FastISel::SelectCall(const User *I) {
}
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
- const DbgValueInst *DI = cast<DbgValueInst>(Call);
+ const DbgValueInst *DI = cast<DbgValueInst>(II);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(0U).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addCImm(CI).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(CI->getZExtValue()).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(CF).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
// FIXME: This does not handle register-indirect values at offset 0.
bool IsIndirect = DI->getOffset() != 0;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect,
Reg, DI->getOffset(), DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
@@ -696,36 +1188,30 @@ bool FastISel::SelectCall(const User *I) {
return true;
}
case Intrinsic::objectsize: {
- ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
+ ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
unsigned long long Res = CI->isZero() ? -1ULL : 0;
- Constant *ResCI = ConstantInt::get(Call->getType(), Res);
+ Constant *ResCI = ConstantInt::get(II->getType(), Res);
unsigned ResultReg = getRegForValue(ResCI);
if (ResultReg == 0)
return false;
- UpdateValueMap(Call, ResultReg);
+ UpdateValueMap(II, ResultReg);
return true;
}
case Intrinsic::expect: {
- unsigned ResultReg = getRegForValue(Call->getArgOperand(0));
+ unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (ResultReg == 0)
return false;
- UpdateValueMap(Call, ResultReg);
+ UpdateValueMap(II, ResultReg);
return true;
}
+ case Intrinsic::experimental_stackmap:
+ return SelectStackmap(II);
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ return SelectPatchpoint(II);
}
- // Usually, it does not make sense to initialize a value,
- // make an unrelated function call and use the value, because
- // it tends to be spilled on the stack. So, we move the pointer
- // to the last local value to the beginning of the block, so that
- // all the values which have already been materialized,
- // appear after the call. It also makes sense to skip intrinsics
- // since they tend to be inlined.
- if (!isa<IntrinsicInst>(Call))
- flushLocalValueMap();
-
- // An arbitrary call. Bail.
- return false;
+ return FastLowerIntrinsicCall(II);
}
bool FastISel::SelectCast(const User *I, unsigned Opcode) {
@@ -798,8 +1284,8 @@ bool FastISel::SelectBitCast(const User *I) {
// Don't attempt a cross-class copy. It will likely fail.
if (SrcClass == DstClass) {
ResultReg = createResultReg(DstClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(Op0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
}
}
@@ -822,25 +1308,31 @@ FastISel::SelectInstruction(const Instruction *I) {
if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
return false;
- DL = I->getDebugLoc();
+ DbgLoc = I->getDebugLoc();
MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
- // As a special case, don't handle calls to builtin library functions that
- // may be translated directly to target instructions.
if (const CallInst *Call = dyn_cast<CallInst>(I)) {
const Function *F = Call->getCalledFunction();
LibFunc::Func Func;
+
+ // As a special case, don't handle calls to builtin library functions that
+ // may be translated directly to target instructions.
if (F && !F->hasLocalLinkage() && F->hasName() &&
LibInfo->getLibFunc(F->getName(), Func) &&
LibInfo->hasOptimizedCodeGen(Func))
return false;
+
+ // Don't handle Intrinsic::trap if a trap funciton is specified.
+ if (F && F->getIntrinsicID() == Intrinsic::trap &&
+ !TM.Options.getTrapFunctionName().empty())
+ return false;
}
// First, try doing target-independent selection.
if (SelectOperator(I, I->getOpcode())) {
++NumFastIselSuccessIndependent;
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
return true;
}
// Remove dead code. However, ignore call instructions since we've flushed
@@ -855,7 +1347,7 @@ FastISel::SelectInstruction(const Instruction *I) {
SavedInsertPt = FuncInfo.InsertPt;
if (TargetSelectInstruction(I)) {
++NumFastIselSuccessTarget;
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
return true;
}
// Check for dead code and remove as necessary.
@@ -863,7 +1355,7 @@ FastISel::SelectInstruction(const Instruction *I) {
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
return false;
}
@@ -871,8 +1363,7 @@ FastISel::SelectInstruction(const Instruction *I) {
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
void
-FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
-
+FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// For more accurate line information if this is the only instruction
@@ -880,10 +1371,14 @@ FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
// fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
- SmallVector<MachineOperand, 0>(), DL);
+ TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
+ SmallVector<MachineOperand, 0>(), DbgLoc);
}
- FuncInfo.MBB->addSuccessor(MSucc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
+ MSucc->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
}
/// SelectFNeg - Emit an FNeg operation.
@@ -1035,8 +1530,10 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
}
case Instruction::Unreachable:
- // Nothing to emit.
- return true;
+ if (TM.Options.TrapUnreachable)
+ return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
+ else
+ return true;
case Instruction::Alloca:
// FunctionLowering has the static-sized case covered.
@@ -1092,11 +1589,12 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
FastISel::FastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
: FuncInfo(funcInfo),
+ MF(funcInfo.MF),
MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()),
- TD(*TM.getDataLayout()),
+ DL(*TM.getDataLayout()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
TRI(*TM.getRegisterInfo()),
@@ -1109,6 +1607,14 @@ bool FastISel::FastLowerArguments() {
return false;
}
+bool FastISel::FastLowerCall(CallLoweringInfo &/*CLI*/) {
+ return false;
+}
+
+bool FastISel::FastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
+ return false;
+}
+
unsigned FastISel::FastEmit_(MVT, MVT,
unsigned) {
return 0;
@@ -1204,29 +1710,48 @@ unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
return MRI.createVirtualRegister(RC);
}
+unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II,
+ unsigned Op, unsigned OpNum) {
+ if (TargetRegisterInfo::isVirtualRegister(Op)) {
+ const TargetRegisterClass *RegClass =
+ TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
+ if (!MRI.constrainRegClass(Op, RegClass)) {
+ // If it's not legal to COPY between the register classes, something
+ // has gone very wrong before we got here.
+ unsigned NewOp = createResultReg(RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
+ return NewOp;
+ }
+ }
+ return Op;
+}
+
unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass* RC) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
return ResultReg;
}
unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
@@ -1236,19 +1761,22 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+ Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1258,21 +1786,25 @@ unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+ Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
+ Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1281,19 +1813,22 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ RC = TII.getRegClass(II, II.getNumDefs(), &TRI, *FuncInfo.MF);
+ MRI.constrainRegClass(Op0, RC);
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1302,21 +1837,23 @@ unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm1, uint64_t Imm2) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm1)
.addImm(Imm2);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm1)
.addImm(Imm2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1325,19 +1862,21 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
const ConstantFP *FPImm) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1347,21 +1886,24 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
uint64_t Imm) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+ Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1371,21 +1913,24 @@ unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
uint64_t Imm1, uint64_t Imm2) {
- unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
+ unsigned ResultReg = createResultReg(RC);
+ Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
+ Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
+
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm1).addImm(Imm2);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm1).addImm(Imm2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1397,11 +1942,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg).addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1413,12 +1958,12 @@ unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addImm(Imm1).addImm(Imm2);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1).addImm(Imm2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1432,7 +1977,7 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(TargetOpcode::COPY), ResultReg)
+ DbgLoc, TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
@@ -1498,9 +2043,9 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// Set the DebugLoc for the copy. Prefer the location of the operand
// if there is one; use the location of the PHI otherwise.
- DL = PN->getDebugLoc();
+ DbgLoc = PN->getDebugLoc();
if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
- DL = Inst->getDebugLoc();
+ DbgLoc = Inst->getDebugLoc();
unsigned Reg = getRegForValue(PHIOp);
if (Reg == 0) {
@@ -1508,7 +2053,7 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
return false;
}
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
}
}
@@ -1523,7 +2068,7 @@ bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
// this by scanning the single-use users of the load until we get to FoldInst.
unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
- const Instruction *TheUser = LI->use_back();
+ const Instruction *TheUser = LI->user_back();
while (TheUser != FoldInst && // Scan up until we find FoldInst.
// Stay in the right block.
TheUser->getParent() == FoldInst->getParent() &&
@@ -1532,7 +2077,7 @@ bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
if (!TheUser->hasOneUse())
return false;
- TheUser = TheUser->use_back();
+ TheUser = TheUser->user_back();
}
// If we didn't find the fold instruction, then we failed to collapse the
@@ -1559,7 +2104,7 @@ bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
return false;
MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
- MachineInstr *User = &*RI;
+ MachineInstr *User = RI->getParent();
// Set the insertion point properly. Folding the load can cause generation of
// other random instructions (like sign extends) for addressing modes; make
@@ -1576,8 +2121,8 @@ bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
if (!isa<AddOperator>(Add))
return false;
// Type size needs to match.
- if (TD.getTypeSizeInBits(GEP->getType()) !=
- TD.getTypeSizeInBits(Add->getType()))
+ if (DL.getTypeSizeInBits(GEP->getType()) !=
+ DL.getTypeSizeInBits(Add->getType()))
return false;
// Must be in the same basic block.
if (isa<Instruction>(Add) &&
@@ -1587,3 +2132,47 @@ bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
}
+MachineMemOperand *
+FastISel::createMachineMemOperandFor(const Instruction *I) const {
+ const Value *Ptr;
+ Type *ValTy;
+ unsigned Alignment;
+ unsigned Flags;
+ bool IsVolatile;
+
+ if (const auto *LI = dyn_cast<LoadInst>(I)) {
+ Alignment = LI->getAlignment();
+ IsVolatile = LI->isVolatile();
+ Flags = MachineMemOperand::MOLoad;
+ Ptr = LI->getPointerOperand();
+ ValTy = LI->getType();
+ } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
+ Alignment = SI->getAlignment();
+ IsVolatile = SI->isVolatile();
+ Flags = MachineMemOperand::MOStore;
+ Ptr = SI->getPointerOperand();
+ ValTy = SI->getValueOperand()->getType();
+ } else {
+ return nullptr;
+ }
+
+ bool IsNonTemporal = I->getMetadata("nontemporal") != nullptr;
+ bool IsInvariant = I->getMetadata("invariant.load") != nullptr;
+ const MDNode *TBAAInfo = I->getMetadata(LLVMContext::MD_tbaa);
+ const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
+
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0.
+ Alignment = DL.getABITypeAlignment(ValTy);
+
+ unsigned Size = TM.getDataLayout()->getTypeStoreSize(ValTy);
+
+ if (IsVolatile)
+ Flags |= MachineMemOperand::MOVolatile;
+ if (IsNonTemporal)
+ Flags |= MachineMemOperand::MONonTemporal;
+ if (IsInvariant)
+ Flags |= MachineMemOperand::MOInvariant;
+
+ return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
+ Alignment, TBAAInfo, Ranges);
+}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index e9d2324..ae124e8 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "function-lowering-info"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/CodeGen/Analysis.h"
@@ -21,8 +20,8 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
@@ -32,14 +31,16 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "function-lowering-info"
+
/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
/// PHI nodes or outside of the basic block that defines it, or used by a
/// switch or atomic instruction, which may expand to multiple basic blocks.
@@ -47,12 +48,10 @@ static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
if (I->use_empty()) return false;
if (isa<PHINode>(I)) return true;
const BasicBlock *BB = I->getParent();
- for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- const User *U = *UI;
+ for (const User *U : I->users())
if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
return true;
- }
+
return false;
}
@@ -76,7 +75,12 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// them.
Function::const_iterator BB = Fn->begin(), EB = Fn->end();
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
+ // Don't fold inalloca allocas or other dynamic allocas into the initial
+ // stack frame allocation, even if they are in the entry block.
+ if (!AI->isStaticAlloca())
+ continue;
+
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
@@ -87,17 +91,10 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
TySize *= CUI->getZExtValue(); // Get total allocated size.
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
- // The object may need to be placed onto the stack near the stack
- // protector if one exists. Determine here if this object is a suitable
- // candidate. I.e., it would trigger the creation of a stack protector.
- bool MayNeedSP =
- (AI->isArrayAllocation() ||
- (TySize >= 8 && isa<ArrayType>(Ty) &&
- cast<ArrayType>(Ty)->getElementType()->isIntegerTy(8)));
StaticAllocaMap[AI] =
- MF->getFrameInfo()->CreateStackObject(TySize, Align, false,
- MayNeedSP, AI);
+ MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
}
+ }
for (; BB != EB; ++BB)
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
@@ -120,7 +117,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Look for inline asm that clobbers the SP register.
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
ImmutableCallSite CS(I);
- if (const InlineAsm *IA = dyn_cast<InlineAsm>(CS.getCalledValue())) {
+ if (isa<InlineAsm>(CS.getCalledValue())) {
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
std::vector<TargetLowering::AsmOperandInfo> Ops =
TLI->ParseConstraints(CS);
@@ -287,11 +284,11 @@ unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
const FunctionLoweringInfo::LiveOutInfo *
FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
if (!LiveOutRegInfo.inBounds(Reg))
- return NULL;
+ return nullptr;
LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
if (!LOI->IsValid)
- return NULL;
+ return nullptr;
if (BitWidth > LOI->KnownZero.getBitWidth()) {
LOI->NumSignBits = 1;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 856ef34..7c124b8 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "instr-emitter"
#include "InstrEmitter.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/Statistic.h"
@@ -31,6 +30,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "instr-emitter"
+
/// MinRCSize - Smallest register class we allow when constraining virtual
/// registers. If satisfying all register class constraints would require
/// using a smaller register class, emit a COPY to a new virtual register
@@ -99,7 +100,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
bool MatchReg = true;
- const TargetRegisterClass *UseRC = NULL;
+ const TargetRegisterClass *UseRC = nullptr;
MVT VT = Node->getSimpleValueType(ResNo);
// Stick to the preferred register classes for legal types.
@@ -107,9 +108,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
UseRC = TLI->getRegClassFor(VT);
if (!IsClone && !IsCloned)
- for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
- UI != E; ++UI) {
- SDNode *User = *UI;
+ for (SDNode *User : Node->uses()) {
bool Match = true;
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node &&
@@ -131,7 +130,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
Match = false;
if (User->isMachineOpcode()) {
const MCInstrDesc &II = TII->get(User->getMachineOpcode());
- const TargetRegisterClass *RC = 0;
+ const TargetRegisterClass *RC = nullptr;
if (i+II.getNumDefs() < II.getNumOperands()) {
RC = TRI->getAllocatableClass(
TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
@@ -154,7 +153,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
break;
}
- const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
+ const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
// Figure out the register class to create for the destreg.
@@ -242,9 +241,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
}
if (!VRBase && !IsClone && !IsCloned)
- for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
- UI != E; ++UI) {
- SDNode *User = *UI;
+ for (SDNode *User : Node->uses()) {
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node &&
User->getOperand(2).getResNo() == i) {
@@ -329,7 +326,7 @@ InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
// shrink VReg's register class within reason. For example, if VReg == GR32
// and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
if (II) {
- const TargetRegisterClass *DstRC = 0;
+ const TargetRegisterClass *DstRC = nullptr;
if (IIOpNum < II->getNumOperands())
DstRC = TRI->getAllocatableClass(TII->getRegClass(*II,IIOpNum,TRI,*MF));
if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) {
@@ -470,9 +467,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
- for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
- UI != E; ++UI) {
- SDNode *User = *UI;
+ for (SDNode *User : Node->uses()) {
if (User->getOpcode() == ISD::CopyToReg &&
User->getOperand(2).getNode() == Node) {
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
@@ -561,10 +556,10 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
MIB.addImm(SD->getZExtValue());
} else
- AddOperand(MIB, N0, 0, 0, VRBaseMap, /*IsDebug=*/false,
+ AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
// Add the subregster being inserted
- AddOperand(MIB, N1, 0, 0, VRBaseMap, /*IsDebug=*/false,
+ AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
MIB.addImm(SubIdx);
MBB->insert(InsertPos, MIB);
@@ -693,10 +688,13 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
MIB.addReg(0U);
}
- if (Offset != 0) // Indirect addressing.
+ // Indirect addressing is indicated by an Imm as the second parameter.
+ if (SD->isIndirect())
MIB.addImm(Offset);
- else
+ else {
+ assert(Offset == 0 && "direct value cannot have an offset");
MIB.addReg(0U, RegState::Debug);
+ }
MIB.addMetadata(MDPtr);
@@ -738,19 +736,25 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
const MCInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
unsigned NumDefs = II.getNumDefs();
- const uint16_t *ScratchRegs = NULL;
-
- // Handle PATCHPOINT specially and then use the generic code.
- if (Opc == TargetOpcode::PATCHPOINT) {
- unsigned CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
- NumDefs = NumResults;
+ const MCPhysReg *ScratchRegs = nullptr;
+
+ // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
+ if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
+ // Stackmaps do not have arguments and do not preserve their calling
+ // convention. However, to simplify runtime support, they clobber the same
+ // scratch registers as AnyRegCC.
+ unsigned CC = CallingConv::AnyReg;
+ if (Opc == TargetOpcode::PATCHPOINT) {
+ CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
+ NumDefs = NumResults;
+ }
ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
}
unsigned NumImpUses = 0;
unsigned NodeOperands =
countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
- bool HasPhysRegOuts = NumResults > NumDefs && II.getImplicitDefs()!=0;
+ bool HasPhysRegOuts = NumResults > NumDefs && II.getImplicitDefs()!=nullptr;
#ifndef NDEBUG
unsigned NumMIOperands = NodeOperands + NumResults;
if (II.isVariadic())
@@ -976,7 +980,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
// The addressing mode has been selected, just add all of the
// operands to the machine instruction.
for (unsigned j = 0; j != NumVals; ++j, ++i)
- AddOperand(MIB, Node->getOperand(i), 0, 0, VRBaseMap,
+ AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
/*IsDebug=*/false, IsClone, IsCloned);
// Manually set isTied bits.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 9061ae9..16c5b4b 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -13,15 +13,16 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
@@ -152,10 +153,10 @@ private:
public:
// DAGUpdateListener implementation.
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ void NodeDeleted(SDNode *N, SDNode *E) override {
ForgetNode(N);
}
- virtual void NodeUpdated(SDNode *N) {}
+ void NodeUpdated(SDNode *N) override {}
// Node replacement helpers
void ReplacedNode(SDNode *N) {
@@ -269,7 +270,7 @@ SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
EVT OrigVT = VT;
EVT SVT = VT;
- while (SVT != MVT::f32) {
+ while (SVT != MVT::f32 && SVT != MVT::f16) {
SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
@@ -386,9 +387,7 @@ static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
MinAlign(ST->getAlignment(), Offset),
ST->getTBAAInfo()));
// The order of the stores doesn't matter - say it with a TokenFactor.
- SDValue Result =
- DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
- Stores.size());
+ SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
return;
}
@@ -505,8 +504,7 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
false, false, 0));
// The order of the stores doesn't matter - say it with a TokenFactor.
- SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
- Stores.size());
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
// Finally, perform the original load only redirected to the stack slot.
Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
@@ -704,7 +702,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
}
}
}
- return SDValue(0, 0);
+ return SDValue(nullptr, 0);
}
void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
@@ -729,10 +727,11 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
MVT VT = Value.getSimpleValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
+ case TargetLowering::Legal: {
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ unsigned AS = ST->getAddressSpace();
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT(), AS)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
@@ -740,6 +739,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
DAG, TLI, this);
}
break;
+ }
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode())
@@ -807,7 +807,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
DAG.getConstant(IncrementSize, Ptr.getValueType()));
Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
DAG.getConstant(RoundWidth,
- TLI.getShiftAmountTy(Value.getValueType())));
+ TLI.getShiftAmountTy(Value.getValueType())));
Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal,
@@ -818,7 +818,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// Store the top RoundWidth bits.
Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
DAG.getConstant(ExtraWidth,
- TLI.getShiftAmountTy(Value.getValueType())));
+ TLI.getShiftAmountTy(Value.getValueType())));
Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(),
RoundVT, isVolatile, isNonTemporal, Alignment,
TBAAInfo);
@@ -826,7 +826,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// Store the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
- DAG.getConstant(IncrementSize, Ptr.getValueType()));
+ DAG.getConstant(IncrementSize, Ptr.getValueType()));
Lo = DAG.getTruncStore(Chain, dl, Value, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal,
@@ -840,16 +840,18 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(),
StVT.getSimpleVT())) {
default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
+ case TargetLowering::Legal: {
+ unsigned AS = ST->getAddressSpace();
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT(), AS)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
break;
+ }
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode())
@@ -889,10 +891,11 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
default: llvm_unreachable("This action is not supported yet!");
- case TargetLowering::Legal:
+ case TargetLowering::Legal: {
+ unsigned AS = LD->getAddressSpace();
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT(), AS)) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment =
TLI.getDataLayout()->getABITypeAlignment(Ty);
@@ -901,6 +904,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
}
}
break;
+ }
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(RVal, DAG);
if (Res.getNode()) {
@@ -1017,7 +1021,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// Move the top bits to the right place.
Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
DAG.getConstant(RoundWidth,
- TLI.getShiftAmountTy(Hi.getValueType())));
+ TLI.getShiftAmountTy(Hi.getValueType())));
// Join the hi and lo parts.
Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
@@ -1047,7 +1051,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// Move the top bits to the right place.
Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
DAG.getConstant(ExtraWidth,
- TLI.getShiftAmountTy(Hi.getValueType())));
+ TLI.getShiftAmountTy(Hi.getValueType())));
// Join the hi and lo parts.
Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
@@ -1059,77 +1063,82 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
switch (TLI.getLoadExtAction(ExtType, SrcVT.getSimpleVT())) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
- isCustom = true;
- // FALLTHROUGH
+ isCustom = true;
+ // FALLTHROUGH
case TargetLowering::Legal: {
- Value = SDValue(Node, 0);
- Chain = SDValue(Node, 1);
-
- if (isCustom) {
- SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
- if (Res.getNode()) {
- Value = Res;
- Chain = Res.getValue(1);
- }
- } else {
- // If this is an unaligned load and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- Type *Ty =
- LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment =
- TLI.getDataLayout()->getABITypeAlignment(Ty);
- if (LD->getAlignment() < ABIAlignment){
- ExpandUnalignedLoad(cast<LoadSDNode>(Node),
- DAG, TLI, Value, Chain);
- }
- }
- }
- break;
+ Value = SDValue(Node, 0);
+ Chain = SDValue(Node, 1);
+
+ if (isCustom) {
+ SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
+ if (Res.getNode()) {
+ Value = Res;
+ Chain = Res.getValue(1);
+ }
+ } else {
+ // If this is an unaligned load and the target doesn't support
+ // it, expand it.
+ EVT MemVT = LD->getMemoryVT();
+ unsigned AS = LD->getAddressSpace();
+ if (!TLI.allowsUnalignedMemoryAccesses(MemVT, AS)) {
+ Type *Ty =
+ LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getDataLayout()->getABITypeAlignment(Ty);
+ if (LD->getAlignment() < ABIAlignment){
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node),
+ DAG, TLI, Value, Chain);
+ }
+ }
+ }
+ break;
}
case TargetLowering::Expand:
- if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) {
- SDValue Load = DAG.getLoad(SrcVT, dl, Chain, Ptr,
- LD->getMemOperand());
- unsigned ExtendOp;
- switch (ExtType) {
- case ISD::EXTLOAD:
- ExtendOp = (SrcVT.isFloatingPoint() ?
- ISD::FP_EXTEND : ISD::ANY_EXTEND);
- break;
- case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
- case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
- default: llvm_unreachable("Unexpected extend load type!");
- }
- Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
- Chain = Load.getValue(1);
- break;
- }
-
- assert(!SrcVT.isVector() &&
- "Vector Loads are handled in LegalizeVectorOps");
-
- // FIXME: This does not work for vectors on most targets. Sign- and
- // zero-extend operations are currently folded into extending loads,
- // whether they are legal or not, and then we end up here without any
- // support for legalizing them.
- assert(ExtType != ISD::EXTLOAD &&
- "EXTLOAD should always be supported!");
- // Turn the unsupported load into an EXTLOAD followed by an explicit
- // zero/sign extend inreg.
- SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
- Chain, Ptr, SrcVT,
- LD->getMemOperand());
- SDValue ValRes;
- if (ExtType == ISD::SEXTLOAD)
- ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
- Result.getValueType(),
- Result, DAG.getValueType(SrcVT));
- else
- ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
- Value = ValRes;
- Chain = Result.getValue(1);
- break;
+ if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) &&
+ TLI.isTypeLegal(SrcVT)) {
+ SDValue Load = DAG.getLoad(SrcVT, dl, Chain, Ptr,
+ LD->getMemOperand());
+ unsigned ExtendOp;
+ switch (ExtType) {
+ case ISD::EXTLOAD:
+ ExtendOp = (SrcVT.isFloatingPoint() ?
+ ISD::FP_EXTEND : ISD::ANY_EXTEND);
+ break;
+ case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
+ case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
+ default: llvm_unreachable("Unexpected extend load type!");
+ }
+ Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
+ Chain = Load.getValue(1);
+ break;
+ }
+
+ assert(!SrcVT.isVector() &&
+ "Vector Loads are handled in LegalizeVectorOps");
+
+ // FIXME: This does not work for vectors on most targets. Sign-
+ // and zero-extend operations are currently folded into extending
+ // loads, whether they are legal or not, and then we end up here
+ // without any support for legalizing them.
+ assert(ExtType != ISD::EXTLOAD &&
+ "EXTLOAD should always be supported!");
+ // Turn the unsupported load into an EXTLOAD followed by an
+ // explicit zero/sign extend inreg.
+ SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl,
+ Node->getValueType(0),
+ Chain, Ptr, SrcVT,
+ LD->getMemOperand());
+ SDValue ValRes;
+ if (ExtType == ISD::SEXTLOAD)
+ ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
+ Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else
+ ValRes = DAG.getZeroExtendInReg(Result, dl,
+ SrcVT.getScalarType());
+ Value = ValRes;
+ Chain = Result.getValue(1);
+ break;
}
}
@@ -1177,6 +1186,7 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
if (Action != TargetLowering::Promote)
Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
break;
+ case ISD::FP_TO_FP16:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::EXTRACT_VECTOR_ELT:
@@ -1256,6 +1266,13 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
if (Action == TargetLowering::Legal)
Action = TargetLowering::Custom;
break;
+ case ISD::READ_REGISTER:
+ case ISD::WRITE_REGISTER:
+ // Named register is legal in the DAG, but blocked by register name
+ // selection if not implemented by target (to chose the correct register)
+ // They'll be converted to Copy(To/From)Reg.
+ Action = TargetLowering::Legal;
+ break;
case ISD::DEBUGTRAP:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
if (Action == TargetLowering::Expand) {
@@ -1383,10 +1400,39 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
SDValue Vec = Op.getOperand(0);
SDValue Idx = Op.getOperand(1);
SDLoc dl(Op);
- // Store the value to a temporary stack slot, then LOAD the returned part.
- SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
- SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
- MachinePointerInfo(), false, false, 0);
+
+ // Before we generate a new store to a temporary stack slot, see if there is
+ // already one that we can use. There often is because when we scalarize
+ // vector operations (using SelectionDAG::UnrollVectorOp for example) a whole
+ // series of EXTRACT_VECTOR_ELT nodes are generated, one for each element in
+ // the vector. If all are expanded here, we don't want one store per vector
+ // element.
+ SDValue StackPtr, Ch;
+ for (SDNode::use_iterator UI = Vec.getNode()->use_begin(),
+ UE = Vec.getNode()->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(User)) {
+ if (ST->isIndexed() || ST->isTruncatingStore() ||
+ ST->getValue() != Vec)
+ continue;
+
+ // Make sure that nothing else could have stored into the destination of
+ // this store.
+ if (!ST->getChain().reachesChainWithoutSideEffects(DAG.getEntryNode()))
+ continue;
+
+ StackPtr = ST->getBasePtr();
+ Ch = SDValue(ST, 0);
+ break;
+ }
+ }
+
+ if (!Ch.getNode()) {
+ // Store the value to a temporary stack slot, then LOAD the returned part.
+ StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
+ Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
+ MachinePointerInfo(), false, false, 0);
+ }
// Add the offset to the index.
unsigned EltSize =
@@ -1487,8 +1533,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
SDValue StoreChain;
if (!Stores.empty()) // Not all undef elements?
- StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &Stores[0], Stores.size());
+ StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
else
StoreChain = DAG.getEntryNode();
@@ -1530,9 +1575,8 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
// the pointer so that the loaded integer will contain the sign bit.
unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits();
unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8;
- LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(),
- LoadPtr,
- DAG.getConstant(ByteOffset, LoadPtr.getValueType()));
+ LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), LoadPtr,
+ DAG.getConstant(ByteOffset, LoadPtr.getValueType()));
// Load a legal integer containing the sign bit.
SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(),
false, false, false, 0);
@@ -1555,8 +1599,8 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
// Select between the nabs and abs value based on the sign bit of
// the input.
return DAG.getSelect(dl, AbsVal.getValueType(), SignBit,
- DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
- AbsVal);
+ DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
+ AbsVal);
}
void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
@@ -1609,8 +1653,8 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
/// If the SETCC has been legalized using the inverse condcode, then LHS and
/// RHS will be unchanged, CC will set to the inverted condcode, and NeedInvert
/// will be set to true. The caller must invert the result of the SETCC with
-/// SelectionDAG::getNOT() or take equivalent action to swap the effect of a
-/// true/false result.
+/// SelectionDAG::getLogicalNOT() or take equivalent action to swap the effect
+/// of a true/false result.
///
/// \returns true if the SetCC has been legalized, false if it hasn't.
bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
@@ -1776,6 +1820,98 @@ SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
false, false, false, 0);
}
+static bool
+ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG,
+ const TargetLowering &TLI, SDValue &Res) {
+ unsigned NumElems = Node->getNumOperands();
+ SDLoc dl(Node);
+ EVT VT = Node->getValueType(0);
+
+ // Try to group the scalars into pairs, shuffle the pairs together, then
+ // shuffle the pairs of pairs together, etc. until the vector has
+ // been built. This will work only if all of the necessary shuffle masks
+ // are legal.
+
+ // We do this in two phases; first to check the legality of the shuffles,
+ // and next, assuming that all shuffles are legal, to create the new nodes.
+ for (int Phase = 0; Phase < 2; ++Phase) {
+ SmallVector<std::pair<SDValue, SmallVector<int, 16> >, 16> IntermedVals,
+ NewIntermedVals;
+ for (unsigned i = 0; i < NumElems; ++i) {
+ SDValue V = Node->getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+
+ SDValue Vec;
+ if (Phase)
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V);
+ IntermedVals.push_back(std::make_pair(Vec, SmallVector<int, 16>(1, i)));
+ }
+
+ while (IntermedVals.size() > 2) {
+ NewIntermedVals.clear();
+ for (unsigned i = 0, e = (IntermedVals.size() & ~1u); i < e; i += 2) {
+ // This vector and the next vector are shuffled together (simply to
+ // append the one to the other).
+ SmallVector<int, 16> ShuffleVec(NumElems, -1);
+
+ SmallVector<int, 16> FinalIndices;
+ FinalIndices.reserve(IntermedVals[i].second.size() +
+ IntermedVals[i+1].second.size());
+
+ int k = 0;
+ for (unsigned j = 0, f = IntermedVals[i].second.size(); j != f;
+ ++j, ++k) {
+ ShuffleVec[k] = j;
+ FinalIndices.push_back(IntermedVals[i].second[j]);
+ }
+ for (unsigned j = 0, f = IntermedVals[i+1].second.size(); j != f;
+ ++j, ++k) {
+ ShuffleVec[k] = NumElems + j;
+ FinalIndices.push_back(IntermedVals[i+1].second[j]);
+ }
+
+ SDValue Shuffle;
+ if (Phase)
+ Shuffle = DAG.getVectorShuffle(VT, dl, IntermedVals[i].first,
+ IntermedVals[i+1].first,
+ ShuffleVec.data());
+ else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
+ return false;
+ NewIntermedVals.push_back(std::make_pair(Shuffle, FinalIndices));
+ }
+
+ // If we had an odd number of defined values, then append the last
+ // element to the array of new vectors.
+ if ((IntermedVals.size() & 1) != 0)
+ NewIntermedVals.push_back(IntermedVals.back());
+
+ IntermedVals.swap(NewIntermedVals);
+ }
+
+ assert(IntermedVals.size() <= 2 && IntermedVals.size() > 0 &&
+ "Invalid number of intermediate vectors");
+ SDValue Vec1 = IntermedVals[0].first;
+ SDValue Vec2;
+ if (IntermedVals.size() > 1)
+ Vec2 = IntermedVals[1].first;
+ else if (Phase)
+ Vec2 = DAG.getUNDEF(VT);
+
+ SmallVector<int, 16> ShuffleVec(NumElems, -1);
+ for (unsigned i = 0, e = IntermedVals[0].second.size(); i != e; ++i)
+ ShuffleVec[IntermedVals[0].second[i]] = i;
+ for (unsigned i = 0, e = IntermedVals[1].second.size(); i != e; ++i)
+ ShuffleVec[IntermedVals[1].second[i]] = NumElems + i;
+
+ if (Phase)
+ Res = DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
+ else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT))
+ return false;
+ }
+
+ return true;
+}
/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
/// support the operation, but do support the resultant vector type.
@@ -1850,25 +1986,38 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
false, false, false, Alignment);
}
- if (!MoreThanTwoValues) {
- SmallVector<int, 8> ShuffleVec(NumElems, -1);
- for (unsigned i = 0; i < NumElems; ++i) {
- SDValue V = Node->getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
- continue;
- ShuffleVec[i] = V == Value1 ? 0 : NumElems;
- }
- if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) {
- // Get the splatted value into the low element of a vector register.
- SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1);
- SDValue Vec2;
- if (Value2.getNode())
- Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2);
- else
- Vec2 = DAG.getUNDEF(VT);
+ SmallSet<SDValue, 16> DefinedValues;
+ for (unsigned i = 0; i < NumElems; ++i) {
+ if (Node->getOperand(i).getOpcode() == ISD::UNDEF)
+ continue;
+ DefinedValues.insert(Node->getOperand(i));
+ }
- // Return shuffle(LowValVec, undef, <0,0,0,0>)
- return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
+ if (TLI.shouldExpandBuildVectorWithShuffles(VT, DefinedValues.size())) {
+ if (!MoreThanTwoValues) {
+ SmallVector<int, 8> ShuffleVec(NumElems, -1);
+ for (unsigned i = 0; i < NumElems; ++i) {
+ SDValue V = Node->getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ ShuffleVec[i] = V == Value1 ? 0 : NumElems;
+ }
+ if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) {
+ // Get the splatted value into the low element of a vector register.
+ SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1);
+ SDValue Vec2;
+ if (Value2.getNode())
+ Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2);
+ else
+ Vec2 = DAG.getUNDEF(VT);
+
+ // Return shuffle(LowValVec, undef, <0,0,0,0>)
+ return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data());
+ }
+ } else {
+ SDValue Res;
+ if (ExpandBVWithShuffles(Node, DAG, TLI, Res))
+ return Res;
}
}
@@ -1910,13 +2059,12 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
if (isTailCall)
InChain = TCChain;
- TargetLowering::
- CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), isTailCall,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, SDLoc(Node));
- std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
+ .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setTailCall(isTailCall).setSExtResult(isSigned).setZExtResult(!isSigned);
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
if (!CallInfo.second.getNode())
// It's a tailcall, return the chain (which is the DAG root).
@@ -1945,12 +2093,12 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
TLI.getPointerTy());
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
- TargetLowering::
- CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, TLI.getLibcallCallingConv(LC),
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, dl);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
+ .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setSExtResult(isSigned).setZExtResult(!isSigned);
+
std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo.first;
@@ -1979,11 +2127,12 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
- TargetLowering::
- CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, SDLoc(Node));
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
+ .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setSExtResult(isSigned).setZExtResult(!isSigned);
+
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo;
@@ -2038,7 +2187,7 @@ static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
}
- return TLI.getLibcallName(LC) != 0;
+ return TLI.getLibcallName(LC) != nullptr;
}
/// useDivRem - Only issue divrem libcall if both quotient and remainder are
@@ -2116,11 +2265,11 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
TLI.getPointerTy());
SDLoc dl(Node);
- TargetLowering::
- CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, dl);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(InChain)
+ .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setSExtResult(isSigned).setZExtResult(!isSigned);
+
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
// Remainder is loaded back from the stack frame.
@@ -2141,7 +2290,7 @@ static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
case MVT::f128: LC = RTLIB::SINCOS_F128; break;
case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
}
- return TLI.getLibcallName(LC) != 0;
+ return TLI.getLibcallName(LC) != nullptr;
}
/// canCombineSinCosLibcall - Return true if sincos libcall is available and
@@ -2230,12 +2379,11 @@ SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
TLI.getPointerTy());
SDLoc dl(Node);
- TargetLowering::
- CallLoweringInfo CLI(InChain, Type::getVoidTy(*DAG.getContext()),
- false, false, false, false,
- 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, dl);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(InChain)
+ .setCallee(TLI.getLibcallCallingConv(LC),
+ Type::getVoidTy(*DAG.getContext()), Callee, std::move(Args), 0);
+
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr,
@@ -2503,12 +2651,15 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
assert(NewOutTy.isInteger() && "Ran out of possibilities!");
+ // A larger signed type can hold all unsigned values of the requested type,
+ // so using FP_TO_SINT is valid
if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) {
OpToUse = ISD::FP_TO_SINT;
break;
}
- if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) {
+ // However, if the value may be < 0.0, we *must* use some FP_TO_SINT.
+ if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) {
OpToUse = ISD::FP_TO_UINT;
break;
}
@@ -2845,15 +2996,13 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// If the target didn't lower this, lower it to '__sync_synchronize()' call
// FIXME: handle "fence singlethread" more efficiently.
TargetLowering::ArgListTy Args;
- TargetLowering::
- CallLoweringInfo CLI(Node->getOperand(0),
- Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C,
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("__sync_synchronize",
- TLI.getPointerTy()),
- Args, DAG, dl);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Node->getOperand(0))
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("__sync_synchronize",
+ TLI.getPointerTy()), std::move(Args), 0);
+
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
Results.push_back(CallResult.second);
@@ -2862,13 +3011,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::ATOMIC_LOAD: {
// There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP.
SDValue Zero = DAG.getConstant(0, Node->getValueType(0));
- SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
- cast<AtomicSDNode>(Node)->getMemoryVT(),
- Node->getOperand(0),
- Node->getOperand(1), Zero, Zero,
- cast<AtomicSDNode>(Node)->getMemOperand(),
- cast<AtomicSDNode>(Node)->getOrdering(),
- cast<AtomicSDNode>(Node)->getSynchScope());
+ SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
+ SDValue Swap = DAG.getAtomicCmpSwap(
+ ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
+ Node->getOperand(0), Node->getOperand(1), Zero, Zero,
+ cast<AtomicSDNode>(Node)->getMemOperand(),
+ cast<AtomicSDNode>(Node)->getOrdering(),
+ cast<AtomicSDNode>(Node)->getOrdering(),
+ cast<AtomicSDNode>(Node)->getSynchScope());
Results.push_back(Swap.getValue(0));
Results.push_back(Swap.getValue(1));
break;
@@ -2905,6 +3055,27 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(Tmp.second);
break;
}
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
+ // Expanding an ATOMIC_CMP_SWAP_WITH_SUCCESS produces an ATOMIC_CMP_SWAP and
+ // splits out the success value as a comparison. Expanding the resulting
+ // ATOMIC_CMP_SWAP will produce a libcall.
+ SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
+ SDValue Res = DAG.getAtomicCmpSwap(
+ ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs,
+ Node->getOperand(0), Node->getOperand(1), Node->getOperand(2),
+ Node->getOperand(3), cast<MemSDNode>(Node)->getMemOperand(),
+ cast<AtomicSDNode>(Node)->getSuccessOrdering(),
+ cast<AtomicSDNode>(Node)->getFailureOrdering(),
+ cast<AtomicSDNode>(Node)->getSynchScope());
+
+ SDValue Success = DAG.getSetCC(SDLoc(Node), Node->getValueType(1),
+ Res, Node->getOperand(2), ISD::SETEQ);
+
+ Results.push_back(Res.getValue(0));
+ Results.push_back(Success);
+ Results.push_back(Res.getValue(1));
+ break;
+ }
case ISD::DYNAMIC_STACKALLOC:
ExpandDYNAMIC_STACKALLOC(Node, Results);
break;
@@ -2925,14 +3096,11 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::TRAP: {
// If this operation is not supported, lower it to 'abort()' call
TargetLowering::ArgListTy Args;
- TargetLowering::
- CallLoweringInfo CLI(Node->getOperand(0),
- Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C,
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("abort", TLI.getPointerTy()),
- Args, DAG, dl);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Node->getOperand(0))
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("abort", TLI.getPointerTy()),
+ std::move(Args), 0);
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
Results.push_back(CallResult.second);
@@ -2986,6 +3154,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Node->getOperand(0), Node->getValueType(0), dl);
Results.push_back(Tmp1);
break;
+ case ISD::FP_TO_SINT:
+ if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG))
+ Results.push_back(Tmp1);
+ break;
case ISD::FP_TO_UINT: {
SDValue True, False;
EVT VT = Node->getOperand(0).getValueType();
@@ -3099,7 +3271,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
// BUILD_VECTOR operands are allowed to be wider than the element type.
- // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept it
+ // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept
+ // it.
if (NewEltVT.bitsLT(EltVT)) {
// Convert shuffle node.
@@ -3107,8 +3280,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// cast operands to v8i32 and re-build the mask.
// Calculate new VT, the size of the new VT should be equal to original.
- EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT,
- VT.getSizeInBits()/NewEltVT.getSizeInBits());
+ EVT NewVT =
+ EVT::getVectorVT(*DAG.getContext(), NewEltVT,
+ VT.getSizeInBits() / NewEltVT.getSizeInBits());
assert(NewVT.bitsEq(VT));
// cast operands to new VT
@@ -3116,7 +3290,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1);
// Convert the shuffle mask
- unsigned int factor = NewVT.getVectorNumElements()/VT.getVectorNumElements();
+ unsigned int factor =
+ NewVT.getVectorNumElements()/VT.getVectorNumElements();
// EltVT gets smaller
assert(factor > 0);
@@ -3155,7 +3330,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
TLI.getVectorIdxTy())));
}
- Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
+ Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
// We may have changed the BUILD_VECTOR type. Cast it back to the Node type.
Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1);
Results.push_back(Tmp1);
@@ -3339,12 +3514,28 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
RTLIB::FMA_F80, RTLIB::FMA_F128,
RTLIB::FMA_PPCF128));
break;
- case ISD::FP16_TO_FP32:
- Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false));
+ case ISD::FP16_TO_FP: {
+ if (Node->getValueType(0) == MVT::f32) {
+ Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false));
+ break;
+ }
+
+ // We can extend to types bigger than f32 in two steps without changing the
+ // result. Since "f16 -> f32" is much more commonly available, give CodeGen
+ // the option of emitting that before resorting to a libcall.
+ SDValue Res =
+ DAG.getNode(ISD::FP16_TO_FP, dl, MVT::f32, Node->getOperand(0));
+ Results.push_back(
+ DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Res));
break;
- case ISD::FP32_TO_FP16:
- Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false));
+ }
+ case ISD::FP_TO_FP16: {
+ RTLIB::Libcall LC =
+ RTLIB::getFPROUND(Node->getOperand(0).getValueType(), MVT::f16);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to expand fp_to_fp16");
+ Results.push_back(ExpandLibCall(LC, Node, false));
break;
+ }
case ISD::ConstantFP: {
ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
// Check to see if this FP immediate is already legal.
@@ -3476,6 +3667,23 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Node->getOperand(1)));
break;
}
+
+ SDValue Lo, Hi;
+ EVT HalfType = VT.getHalfSizedIntegerVT(*DAG.getContext());
+ if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, VT) &&
+ TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, VT) &&
+ TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
+ TLI.isOperationLegalOrCustom(ISD::OR, VT) &&
+ TLI.expandMUL(Node, Lo, Hi, HalfType, DAG)) {
+ Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
+ Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi);
+ SDValue Shift = DAG.getConstant(HalfType.getSizeInBits(),
+ TLI.getShiftAmountTy(HalfType));
+ Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
+ Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi));
+ break;
+ }
+
Tmp1 = ExpandIntLibCall(Node, false,
RTLIB::MUL_I8,
RTLIB::MUL_I16, RTLIB::MUL_I32,
@@ -3491,7 +3699,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
Results.push_back(Sum);
- EVT OType = Node->getValueType(1);
+ EVT ResultType = Node->getValueType(1);
+ EVT OType = getSetCCResultType(Node->getValueType(0));
SDValue Zero = DAG.getConstant(0, LHS.getValueType());
@@ -3514,7 +3723,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
- Results.push_back(Cmp);
+ Results.push_back(DAG.getBoolExtOrTrunc(Cmp, dl, ResultType, ResultType));
break;
}
case ISD::UADDO:
@@ -3525,9 +3734,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
Results.push_back(Sum);
- Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS,
- Node->getOpcode () == ISD::UADDO ?
- ISD::SETULT : ISD::SETUGT));
+
+ EVT ResultType = Node->getValueType(1);
+ EVT SetCCType = getSetCCResultType(Node->getValueType(0));
+ ISD::CondCode CC
+ = Node->getOpcode() == ISD::UADDO ? ISD::SETULT : ISD::SETUGT;
+ SDValue SetCC = DAG.getSetCC(dl, SetCCType, Sum, LHS, CC);
+
+ Results.push_back(DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType));
break;
}
case ISD::UMULO:
@@ -3549,8 +3763,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
RHS);
TopHalf = BottomHalf.getValue(1);
- } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(),
- VT.getSizeInBits() * 2))) {
+ } else if (TLI.isTypeLegal(WideVT)) {
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
@@ -3708,7 +3921,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// If we expanded the SETCC by inverting the condition code, then wrap
// the existing SETCC in a NOT to restore the intended condition.
if (NeedInvert)
- Tmp1 = DAG.getNOT(dl, Tmp1, Tmp1->getValueType(0));
+ Tmp1 = DAG.getLogicalNOT(dl, Tmp1, Tmp1->getValueType(0));
Results.push_back(Tmp1);
break;
@@ -3718,7 +3931,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// illegal; expand it into a SELECT_CC.
EVT VT = Node->getValueType(0);
int TrueValue;
- switch (TLI.getBooleanContents(VT.isVector())) {
+ switch (TLI.getBooleanContents(Tmp1->getValueType(0))) {
case TargetLowering::ZeroOrOneBooleanContent:
case TargetLowering::UndefinedBooleanContent:
TrueValue = 1;
@@ -3738,13 +3951,29 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Tmp2 = Node->getOperand(1); // RHS
Tmp3 = Node->getOperand(2); // True
Tmp4 = Node->getOperand(3); // False
+ EVT VT = Node->getValueType(0);
SDValue CC = Node->getOperand(4);
+ ISD::CondCode CCOp = cast<CondCodeSDNode>(CC)->get();
+
+ if (TLI.isCondCodeLegal(CCOp, Tmp1.getSimpleValueType())) {
+ // If the condition code is legal, then we need to expand this
+ // node using SETCC and SELECT.
+ EVT CmpVT = Tmp1.getValueType();
+ assert(!TLI.isOperationExpand(ISD::SELECT, VT) &&
+ "Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be "
+ "expanded.");
+ EVT CCVT = TLI.getSetCCResultType(*DAG.getContext(), CmpVT);
+ SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC);
+ Results.push_back(DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4));
+ break;
+ }
+ // SELECT_CC is legal, so the condition code must not be.
bool Legalized = false;
// Try to legalize by inverting the condition. This is for targets that
// might support an ordered version of a condition, but not the unordered
// version (or vice versa).
- ISD::CondCode InvCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
+ ISD::CondCode InvCC = ISD::getSetCCInverse(CCOp,
Tmp1.getValueType().isInteger());
if (TLI.isCondCodeLegal(InvCC, Tmp1.getSimpleValueType())) {
// Use the new condition code and swap true and false
@@ -3782,8 +4011,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
} else {
Tmp2 = DAG.getConstant(0, Tmp1.getValueType());
CC = DAG.getCondCode(ISD::SETNE);
- Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2,
- Tmp3, Tmp4, CC);
+ Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1,
+ Tmp2, Tmp3, Tmp4, CC);
}
}
Results.push_back(Tmp1);
@@ -3813,8 +4042,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
} else {
Tmp3 = DAG.getConstant(0, Tmp2.getValueType());
Tmp4 = DAG.getCondCode(ISD::SETNE);
- Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2,
- Tmp3, Node->getOperand(4));
+ Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4,
+ Tmp2, Tmp3, Node->getOperand(4));
}
Results.push_back(Tmp1);
break;
@@ -3845,8 +4074,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
VT.getScalarType(), Ex, Sh));
}
SDValue Result =
- DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
- &Scalars[0], Scalars.size());
+ DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Scalars);
ReplaceNode(SDValue(Node, 0), Result);
break;
}
@@ -3976,7 +4204,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
}
case ISD::SELECT: {
unsigned ExtOp, TruncOp;
- if (Node->getValueType(0).isVector()) {
+ if (Node->getValueType(0).isVector() ||
+ Node->getValueType(0).getSizeInBits() == NVT.getSizeInBits()) {
ExtOp = ISD::BITCAST;
TruncOp = ISD::BITCAST;
} else if (Node->getValueType(0).isInteger()) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index ecf4c5d..649dd7a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -24,6 +24,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "legalize-types"
+
/// GetFPLibCall - Return the right libcall for the given floating point type.
static RTLIB::Libcall GetFPLibCall(EVT VT,
RTLIB::Libcall Call_F32,
@@ -83,7 +85,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::FNEG: R = SoftenFloatRes_FNEG(N); break;
case ISD::FP_EXTEND: R = SoftenFloatRes_FP_EXTEND(N); break;
case ISD::FP_ROUND: R = SoftenFloatRes_FP_ROUND(N); break;
- case ISD::FP16_TO_FP32:R = SoftenFloatRes_FP16_TO_FP32(N); break;
+ case ISD::FP16_TO_FP: R = SoftenFloatRes_FP16_TO_FP(N); break;
case ISD::FPOW: R = SoftenFloatRes_FPOW(N); break;
case ISD::FPOWI: R = SoftenFloatRes_FPOWI(N); break;
case ISD::FREM: R = SoftenFloatRes_FREM(N); break;
@@ -371,6 +373,13 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FNEG(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
+
+ // There's only a libcall for f16 -> f32, so proceed in two stages. Also, it's
+ // entirely possible for both f16 and f32 to be legal, so use the fully
+ // hard-float FP_EXTEND rather than FP16_TO_FP.
+ if (Op.getValueType() == MVT::f16 && N->getValueType(0) != MVT::f32)
+ Op = DAG.getNode(ISD::FP_EXTEND, SDLoc(N), MVT::f32, Op);
+
RTLIB::Libcall LC = RTLIB::getFPEXT(Op.getValueType(), N->getValueType(0));
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND!");
return TLI.makeLibCall(DAG, LC, NVT, &Op, 1, false, SDLoc(N)).first;
@@ -378,16 +387,29 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
// FIXME: Should we just use 'normal' FP_EXTEND / FP_TRUNC instead of special
// nodes?
-SDValue DAGTypeLegalizer::SoftenFloatRes_FP16_TO_FP32(SDNode *N) {
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+SDValue DAGTypeLegalizer::SoftenFloatRes_FP16_TO_FP(SDNode *N) {
+ EVT MidVT = TLI.getTypeToTransformTo(*DAG.getContext(), MVT::f32);
SDValue Op = N->getOperand(0);
- return TLI.makeLibCall(DAG, RTLIB::FPEXT_F16_F32, NVT, &Op, 1, false,
- SDLoc(N)).first;
+ SDValue Res32 = TLI.makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MidVT, &Op, 1,
+ false, SDLoc(N)).first;
+ if (N->getValueType(0) == MVT::f32)
+ return Res32;
+
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ RTLIB::Libcall LC = RTLIB::getFPEXT(MVT::f32, N->getValueType(0));
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND!");
+ return TLI.makeLibCall(DAG, LC, NVT, &Res32, 1, false, SDLoc(N)).first;
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
+ if (N->getValueType(0) == MVT::f16) {
+ // Semi-soften first, to FP_TO_FP16, so that targets which support f16 as a
+ // storage-only type get a chance to select things.
+ return DAG.getNode(ISD::FP_TO_FP16, SDLoc(N), NVT, Op);
+ }
+
RTLIB::Libcall LC = RTLIB::getFPROUND(Op.getValueType(), N->getValueType(0));
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND!");
return TLI.makeLibCall(DAG, LC, NVT, &Op, 1, false, SDLoc(N)).first;
@@ -496,6 +518,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_FTRUNC(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ if (N->getValueType(0) == MVT::f16)
+ return DAG.getNode(ISD::FP_TO_FP16, SDLoc(N), NVT, N->getOperand(0));
+
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0),
RTLIB::TRUNC_F32,
@@ -623,10 +648,11 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::BITCAST: Res = SoftenFloatOp_BITCAST(N); break;
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
+ case ISD::FP_EXTEND: Res = SoftenFloatOp_FP_EXTEND(N); break;
+ case ISD::FP_TO_FP16: // Same as FP_ROUND for softening purposes
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break;
case ISD::FP_TO_UINT: Res = SoftenFloatOp_FP_TO_UINT(N); break;
- case ISD::FP32_TO_FP16:Res = SoftenFloatOp_FP32_TO_FP16(N); break;
case ISD::SELECT_CC: Res = SoftenFloatOp_SELECT_CC(N); break;
case ISD::SETCC: Res = SoftenFloatOp_SETCC(N); break;
case ISD::STORE: Res = SoftenFloatOp_STORE(N, OpNo); break;
@@ -652,11 +678,32 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BITCAST(SDNode *N) {
GetSoftenedFloat(N->getOperand(0)));
}
+SDValue DAGTypeLegalizer::SoftenFloatOp_FP_EXTEND(SDNode *N) {
+ // If we get here, the result must be legal but the source illegal.
+ EVT SVT = N->getOperand(0).getValueType();
+ EVT RVT = N->getValueType(0);
+ SDValue Op = GetSoftenedFloat(N->getOperand(0));
+
+ if (SVT == MVT::f16)
+ return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), RVT, Op);
+
+ RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, RVT);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND libcall");
+
+ return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
+}
+
+
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_ROUND(SDNode *N) {
+ // We actually deal with the partially-softened FP_TO_FP16 node too, which
+ // returns an i16 so doesn't meet the constraints necessary for FP_ROUND.
+ assert(N->getOpcode() == ISD::FP_ROUND || N->getOpcode() == ISD::FP_TO_FP16);
+
EVT SVT = N->getOperand(0).getValueType();
EVT RVT = N->getValueType(0);
+ EVT FloatRVT = N->getOpcode() == ISD::FP_TO_FP16 ? MVT::f16 : RVT;
- RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, RVT);
+ RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, FloatRVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND libcall");
SDValue Op = GetSoftenedFloat(N->getOperand(0));
@@ -674,7 +721,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, NewLHS.getValueType());
CCCode = ISD::SETNE;
}
@@ -702,13 +749,6 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_UINT(SDNode *N) {
return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
}
-SDValue DAGTypeLegalizer::SoftenFloatOp_FP32_TO_FP16(SDNode *N) {
- EVT RVT = N->getValueType(0);
- RTLIB::Libcall LC = RTLIB::FPROUND_F32_F16;
- SDValue Op = GetSoftenedFloat(N->getOperand(0));
- return TLI.makeLibCall(DAG, LC, RVT, &Op, 1, false, SDLoc(N)).first;
-}
-
SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
@@ -720,7 +760,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, NewLHS.getValueType());
CCCode = ISD::SETNE;
}
@@ -742,7 +782,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
TLI.softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, SDLoc(N));
// If softenSetCCOperands returned a scalar, use it.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
assert(NewLHS.getValueType() == N->getValueType(0) &&
"Unexpected setcc expansion!");
return NewLHS;
@@ -1340,7 +1380,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, NewLHS.getValueType());
CCCode = ISD::SETNE;
}
@@ -1433,7 +1473,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, NewLHS.getValueType());
CCCode = ISD::SETNE;
}
@@ -1450,7 +1490,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, use it.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
assert(NewLHS.getValueType() == N->getValueType(0) &&
"Unexpected setcc expansion!");
return NewLHS;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 4255948..44d9e38 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -24,6 +24,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "legalize-types"
+
//===----------------------------------------------------------------------===//
// Integer Result Promotion
//===----------------------------------------------------------------------===//
@@ -97,7 +99,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = PromoteIntRes_FP_TO_XINT(N); break;
- case ISD::FP32_TO_FP16:Res = PromoteIntRes_FP32_TO_FP16(N); break;
+ case ISD::FP_TO_FP16: Res = PromoteIntRes_FP_TO_FP16(N); break;
case ISD::AND:
case ISD::OR:
@@ -136,7 +138,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
Res = PromoteIntRes_Atomic1(cast<AtomicSDNode>(N)); break;
case ISD::ATOMIC_CMP_SWAP:
- Res = PromoteIntRes_Atomic2(cast<AtomicSDNode>(N)); break;
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
+ Res = PromoteIntRes_AtomicCmpSwap(cast<AtomicSDNode>(N), ResNo);
+ break;
}
// If the result is null then the sub-method took care of registering it.
@@ -190,16 +194,40 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) {
return Res;
}
-SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_AtomicCmpSwap(AtomicSDNode *N,
+ unsigned ResNo) {
+ if (ResNo == 1) {
+ assert(N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
+ EVT SVT = getSetCCResultType(N->getOperand(2).getValueType());
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(1));
+
+ // Only use the result of getSetCCResultType if it is legal,
+ // otherwise just use the promoted result type (NVT).
+ if (!TLI.isTypeLegal(SVT))
+ SVT = NVT;
+
+ SDVTList VTs = DAG.getVTList(N->getValueType(0), SVT, MVT::Other);
+ SDValue Res = DAG.getAtomicCmpSwap(
+ ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, SDLoc(N), N->getMemoryVT(), VTs,
+ N->getChain(), N->getBasePtr(), N->getOperand(2), N->getOperand(3),
+ N->getMemOperand(), N->getSuccessOrdering(), N->getFailureOrdering(),
+ N->getSynchScope());
+ ReplaceValueWith(SDValue(N, 0), Res.getValue(0));
+ ReplaceValueWith(SDValue(N, 2), Res.getValue(2));
+ return Res.getValue(1);
+ }
+
SDValue Op2 = GetPromotedInteger(N->getOperand(2));
SDValue Op3 = GetPromotedInteger(N->getOperand(3));
- SDValue Res = DAG.getAtomic(N->getOpcode(), SDLoc(N),
- N->getMemoryVT(), N->getChain(), N->getBasePtr(),
- Op2, Op3, N->getMemOperand(), N->getOrdering(),
- N->getSynchScope());
- // Legalized the chain result - switch anything that used the old chain to
- // use the new one.
- ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ SDVTList VTs =
+ DAG.getVTList(Op2.getValueType(), N->getValueType(1), MVT::Other);
+ SDValue Res = DAG.getAtomicCmpSwap(
+ N->getOpcode(), SDLoc(N), N->getMemoryVT(), VTs, N->getChain(),
+ N->getBasePtr(), Op2, Op3, N->getMemOperand(), N->getSuccessOrdering(),
+ N->getFailureOrdering(), N->getSynchScope());
+ // Update the use to N with the newly created Res.
+ for (unsigned i = 1, NumResults = N->getNumValues(); i < NumResults; ++i)
+ ReplaceValueWith(SDValue(N, i), Res.getValue(i));
return Res;
}
@@ -266,9 +294,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) {
EVT NVT = Op.getValueType();
SDLoc dl(N);
- unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
+ unsigned DiffBits = NVT.getScalarSizeInBits() - OVT.getScalarSizeInBits();
return DAG.getNode(ISD::SRL, dl, NVT, DAG.getNode(ISD::BSWAP, dl, NVT, Op),
- DAG.getConstant(DiffBits, TLI.getPointerTy()));
+ DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) {
@@ -372,7 +400,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
DAG.getValueType(N->getValueType(0).getScalarType()));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_FP32_TO_FP16(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_FP16(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
@@ -432,7 +460,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) {
EVT ValueVTs[] = { N->getValueType(0), NVT };
SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
SDValue Res = DAG.getNode(N->getOpcode(), SDLoc(N),
- DAG.getVTList(ValueVTs, 2), Ops, 2);
+ DAG.getVTList(ValueVTs), Ops);
// Modified the sum result - switch anything that used the old sum to use
// the new one.
@@ -490,7 +518,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VSELECT(SDNode *N) {
EVT OpTy = N->getOperand(1).getValueType();
// Promote all the way up to the canonical SetCC type.
- Mask = PromoteTargetBoolean(Mask, getSetCCResultType(OpTy));
+ Mask = PromoteTargetBoolean(Mask, OpTy);
SDValue LHS = GetPromotedInteger(N->getOperand(1));
SDValue RHS = GetPromotedInteger(N->getOperand(2));
return DAG.getNode(ISD::VSELECT, SDLoc(N),
@@ -797,7 +825,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::STORE: Res = PromoteIntOp_STORE(cast<StoreSDNode>(N),
OpNo); break;
case ISD::TRUNCATE: Res = PromoteIntOp_TRUNCATE(N); break;
- case ISD::FP16_TO_FP32:
+ case ISD::FP16_TO_FP:
case ISD::UINT_TO_FP: Res = PromoteIntOp_UINT_TO_FP(N); break;
case ISD::ZERO_EXTEND: Res = PromoteIntOp_ZERO_EXTEND(N); break;
@@ -890,8 +918,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
assert(OpNo == 1 && "only know how to promote condition");
// Promote all the way up to the canonical SetCC type.
- EVT SVT = getSetCCResultType(MVT::Other);
- SDValue Cond = PromoteTargetBoolean(N->getOperand(1), SVT);
+ SDValue Cond = PromoteTargetBoolean(N->getOperand(1), MVT::Other);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Cond,
@@ -931,7 +958,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
for (unsigned i = 0; i < NumElts; ++i)
NewOps.push_back(GetPromotedInteger(N->getOperand(i)));
- return SDValue(DAG.UpdateNodeOperands(N, &NewOps[0], NumElts), 0);
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONVERT_RNDSAT(SDNode *N) {
@@ -984,9 +1011,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
EVT OpTy = N->getOperand(1).getValueType();
// Promote all the way up to the canonical SetCC type.
- EVT SVT = getSetCCResultType(N->getOpcode() == ISD::SELECT ?
- OpTy.getScalarType() : OpTy);
- Cond = PromoteTargetBoolean(Cond, SVT);
+ EVT OpVT = N->getOpcode() == ISD::SELECT ? OpTy.getScalarType() : OpTy;
+ Cond = PromoteTargetBoolean(Cond, OpVT);
return SDValue(DAG.UpdateNodeOperands(N, Cond, N->getOperand(1),
N->getOperand(2)), 0);
@@ -1141,6 +1167,26 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
ReplaceValueWith(SDValue(N, 1), Tmp.second);
break;
}
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
+ AtomicSDNode *AN = cast<AtomicSDNode>(N);
+ SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::Other);
+ SDValue Tmp = DAG.getAtomicCmpSwap(
+ ISD::ATOMIC_CMP_SWAP, SDLoc(N), AN->getMemoryVT(), VTs,
+ N->getOperand(0), N->getOperand(1), N->getOperand(2), N->getOperand(3),
+ AN->getMemOperand(), AN->getSuccessOrdering(), AN->getFailureOrdering(),
+ AN->getSynchScope());
+
+ // Expanding to the strong ATOMIC_CMP_SWAP node means we can determine
+ // success simply by comparing the loaded value against the ingoing
+ // comparison.
+ SDValue Success = DAG.getSetCC(SDLoc(N), N->getValueType(1), Tmp,
+ N->getOperand(2), ISD::SETEQ);
+
+ SplitInteger(Tmp, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 1), Success);
+ ReplaceValueWith(SDValue(N, 2), Tmp.getValue(1));
+ break;
+ }
case ISD::AND:
case ISD::OR:
@@ -1270,6 +1316,7 @@ std::pair <SDValue, SDValue> DAGTypeLegalizer::ExpandAtomic(SDNode *Node) {
/// and the shift amount is a constant 'Amt'. Expand the operation.
void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
SDValue &Lo, SDValue &Hi) {
+ assert(Amt && "Expected zero shifts to be already optimized away.");
SDLoc DL(N);
// Expand the incoming operand to be shifted, so that we have its parts
SDValue InL, InH;
@@ -1296,9 +1343,9 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
// Emit this X << 1 as X+X.
SDVTList VTList = DAG.getVTList(NVT, MVT::Glue);
SDValue LoOps[2] = { InL, InL };
- Lo = DAG.getNode(ISD::ADDC, DL, VTList, LoOps, 2);
+ Lo = DAG.getNode(ISD::ADDC, DL, VTList, LoOps);
SDValue HiOps[3] = { InH, InH, Lo.getValue(1) };
- Hi = DAG.getNode(ISD::ADDE, DL, VTList, HiOps, 3);
+ Hi = DAG.getNode(ISD::ADDE, DL, VTList, HiOps);
} else {
Lo = DAG.getNode(ISD::SHL, DL, NVT, InL, DAG.getConstant(Amt, ShTy));
Hi = DAG.getNode(ISD::OR, DL, NVT,
@@ -1372,7 +1419,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(N->getOperand(1), KnownZero, KnownOne);
+ DAG.computeKnownBits(N->getOperand(1), KnownZero, KnownOne);
// If we don't know anything about the high bits, exit.
if (((KnownZero|KnownOne) & HighBitMask) == 0)
@@ -1547,20 +1594,20 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
if (hasCarry) {
SDVTList VTList = DAG.getVTList(NVT, MVT::Glue);
if (N->getOpcode() == ISD::ADD) {
- Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps, 2);
+ Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
- Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps, 3);
+ Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps);
} else {
- Lo = DAG.getNode(ISD::SUBC, dl, VTList, LoOps, 2);
+ Lo = DAG.getNode(ISD::SUBC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
- Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps, 3);
+ Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps);
}
return;
}
if (N->getOpcode() == ISD::ADD) {
- Lo = DAG.getNode(ISD::ADD, dl, NVT, LoOps, 2);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, HiOps, 2);
+ Lo = DAG.getNode(ISD::ADD, dl, NVT, LoOps);
+ Hi = DAG.getNode(ISD::ADD, dl, NVT, makeArrayRef(HiOps, 2));
SDValue Cmp1 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[0],
ISD::SETULT);
SDValue Carry1 = DAG.getSelect(dl, NVT, Cmp1,
@@ -1572,8 +1619,8 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
DAG.getConstant(1, NVT), Carry1);
Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry2);
} else {
- Lo = DAG.getNode(ISD::SUB, dl, NVT, LoOps, 2);
- Hi = DAG.getNode(ISD::SUB, dl, NVT, HiOps, 2);
+ Lo = DAG.getNode(ISD::SUB, dl, NVT, LoOps);
+ Hi = DAG.getNode(ISD::SUB, dl, NVT, makeArrayRef(HiOps, 2));
SDValue Cmp =
DAG.getSetCC(dl, getSetCCResultType(LoOps[0].getValueType()),
LoOps[0], LoOps[1], ISD::SETULT);
@@ -1596,13 +1643,13 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N,
SDValue HiOps[3] = { LHSH, RHSH };
if (N->getOpcode() == ISD::ADDC) {
- Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps, 2);
+ Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
- Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps, 3);
+ Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps);
} else {
- Lo = DAG.getNode(ISD::SUBC, dl, VTList, LoOps, 2);
+ Lo = DAG.getNode(ISD::SUBC, dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
- Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps, 3);
+ Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps);
}
// Legalized the flag result - switch anything that used the old flag to
@@ -1621,9 +1668,9 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N,
SDValue LoOps[3] = { LHSL, RHSL, N->getOperand(2) };
SDValue HiOps[3] = { LHSH, RHSH };
- Lo = DAG.getNode(N->getOpcode(), dl, VTList, LoOps, 3);
+ Lo = DAG.getNode(N->getOpcode(), dl, VTList, LoOps);
HiOps[2] = Lo.getValue(1);
- Hi = DAG.getNode(N->getOpcode(), dl, VTList, HiOps, 3);
+ Hi = DAG.getNode(N->getOpcode(), dl, VTList, HiOps);
// Legalized the flag result - switch anything that used the old flag to
// use the new one.
@@ -1712,9 +1759,13 @@ void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NBitWidth = NVT.getSizeInBits();
- const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue();
- Lo = DAG.getConstant(Cst.trunc(NBitWidth), NVT);
- Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), NVT);
+ auto Constant = cast<ConstantSDNode>(N);
+ const APInt &Cst = Constant->getAPIntValue();
+ bool IsTarget = Constant->isTargetOpcode();
+ bool IsOpaque = Constant->isOpaque();
+ Lo = DAG.getConstant(Cst.trunc(NBitWidth), NVT, IsTarget, IsOpaque);
+ Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), NVT, IsTarget,
+ IsOpaque);
}
void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
@@ -1923,73 +1974,12 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDLoc dl(N);
- bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, NVT);
- bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, NVT);
- bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, NVT);
- bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, NVT);
- if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
- SDValue LL, LH, RL, RH;
- GetExpandedInteger(N->getOperand(0), LL, LH);
- GetExpandedInteger(N->getOperand(1), RL, RH);
- unsigned OuterBitSize = VT.getSizeInBits();
- unsigned InnerBitSize = NVT.getSizeInBits();
- unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
- unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
-
- APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
- if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
- DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
- // The inputs are both zero-extended.
- if (HasUMUL_LOHI) {
- // We can emit a umul_lohi.
- Lo = DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(NVT, NVT), LL, RL);
- Hi = SDValue(Lo.getNode(), 1);
- return;
- }
- if (HasMULHU) {
- // We can emit a mulhu+mul.
- Lo = DAG.getNode(ISD::MUL, dl, NVT, LL, RL);
- Hi = DAG.getNode(ISD::MULHU, dl, NVT, LL, RL);
- return;
- }
- }
- if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
- // The input values are both sign-extended.
- if (HasSMUL_LOHI) {
- // We can emit a smul_lohi.
- Lo = DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(NVT, NVT), LL, RL);
- Hi = SDValue(Lo.getNode(), 1);
- return;
- }
- if (HasMULHS) {
- // We can emit a mulhs+mul.
- Lo = DAG.getNode(ISD::MUL, dl, NVT, LL, RL);
- Hi = DAG.getNode(ISD::MULHS, dl, NVT, LL, RL);
- return;
- }
- }
- if (HasUMUL_LOHI) {
- // Lo,Hi = umul LHS, RHS.
- SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
- DAG.getVTList(NVT, NVT), LL, RL);
- Lo = UMulLOHI;
- Hi = UMulLOHI.getValue(1);
- RH = DAG.getNode(ISD::MUL, dl, NVT, LL, RH);
- LH = DAG.getNode(ISD::MUL, dl, NVT, LH, RL);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, RH);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, LH);
- return;
- }
- if (HasMULHU) {
- Lo = DAG.getNode(ISD::MUL, dl, NVT, LL, RL);
- Hi = DAG.getNode(ISD::MULHU, dl, NVT, LL, RL);
- RH = DAG.getNode(ISD::MUL, dl, NVT, LL, RH);
- LH = DAG.getNode(ISD::MUL, dl, NVT, LH, RL);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, RH);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, LH);
- return;
- }
- }
+ SDValue LL, LH, RL, RH;
+ GetExpandedInteger(N->getOperand(0), LL, LH);
+ GetExpandedInteger(N->getOperand(1), RL, RH);
+
+ if (TLI.expandMUL(N, Lo, Hi, NVT, DAG, LL, LH, RL, RH))
+ return;
// If nothing else, we can make a libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
@@ -2120,7 +2110,7 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
ShiftOp = DAG.getZExtOrTrunc(ShiftOp, dl, ShiftTy);
SDValue Ops[] = { LHSL, LHSH, ShiftOp };
- Lo = DAG.getNode(PartsOpc, dl, DAG.getVTList(VT, VT), Ops, 3);
+ Lo = DAG.getNode(PartsOpc, dl, DAG.getVTList(VT, VT), Ops);
Hi = Lo.getValue(1);
return;
}
@@ -2352,12 +2342,12 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
Args.push_back(Entry);
SDValue Func = DAG.getExternalSymbol(TLI.getLibcallName(LC), PtrVT);
- TargetLowering::
- CallLoweringInfo CLI(Chain, RetTy, true, false, false, false,
- 0, TLI.getLibcallCallingConv(LC),
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Func, Args, DAG, dl);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Func, std::move(Args), 0)
+ .setSExtResult();
+
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
SplitInteger(CallInfo.first, Lo, Hi);
@@ -2442,15 +2432,18 @@ void DAGTypeLegalizer::ExpandIntRes_ATOMIC_LOAD(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
EVT VT = cast<AtomicSDNode>(N)->getMemoryVT();
+ SDVTList VTs = DAG.getVTList(VT, MVT::i1, MVT::Other);
SDValue Zero = DAG.getConstant(0, VT);
- SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT,
- N->getOperand(0),
- N->getOperand(1), Zero, Zero,
- cast<AtomicSDNode>(N)->getMemOperand(),
- cast<AtomicSDNode>(N)->getOrdering(),
- cast<AtomicSDNode>(N)->getSynchScope());
+ SDValue Swap = DAG.getAtomicCmpSwap(
+ ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl,
+ cast<AtomicSDNode>(N)->getMemoryVT(), VTs, N->getOperand(0),
+ N->getOperand(1), Zero, Zero, cast<AtomicSDNode>(N)->getMemOperand(),
+ cast<AtomicSDNode>(N)->getOrdering(),
+ cast<AtomicSDNode>(N)->getOrdering(),
+ cast<AtomicSDNode>(N)->getSynchScope());
+
ReplaceValueWith(SDValue(N, 0), Swap.getValue(0));
- ReplaceValueWith(SDValue(N, 1), Swap.getValue(1));
+ ReplaceValueWith(SDValue(N, 1), Swap.getValue(2));
}
//===----------------------------------------------------------------------===//
@@ -2575,15 +2568,20 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
// NOTE: on targets without efficient SELECT of bools, we can always use
// this identity: (B1 ? B2 : B3) --> (B1 & B2)|(!B1&B3)
- TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, AfterLegalizeTypes, true, NULL);
+ TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, AfterLegalizeTypes, true,
+ nullptr);
SDValue Tmp1, Tmp2;
- Tmp1 = TLI.SimplifySetCC(getSetCCResultType(LHSLo.getValueType()),
- LHSLo, RHSLo, LowCC, false, DagCombineInfo, dl);
+ if (TLI.isTypeLegal(LHSLo.getValueType()) &&
+ TLI.isTypeLegal(RHSLo.getValueType()))
+ Tmp1 = TLI.SimplifySetCC(getSetCCResultType(LHSLo.getValueType()),
+ LHSLo, RHSLo, LowCC, false, DagCombineInfo, dl);
if (!Tmp1.getNode())
Tmp1 = DAG.getSetCC(dl, getSetCCResultType(LHSLo.getValueType()),
LHSLo, RHSLo, LowCC);
- Tmp2 = TLI.SimplifySetCC(getSetCCResultType(LHSHi.getValueType()),
- LHSHi, RHSHi, CCCode, false, DagCombineInfo, dl);
+ if (TLI.isTypeLegal(LHSHi.getValueType()) &&
+ TLI.isTypeLegal(RHSHi.getValueType()))
+ Tmp2 = TLI.SimplifySetCC(getSetCCResultType(LHSHi.getValueType()),
+ LHSHi, RHSHi, CCCode, false, DagCombineInfo, dl);
if (!Tmp2.getNode())
Tmp2 = DAG.getNode(ISD::SETCC, dl,
getSetCCResultType(LHSHi.getValueType()),
@@ -2624,7 +2622,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) {
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, NewLHS.getValueType());
CCCode = ISD::SETNE;
}
@@ -2642,7 +2640,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
NewRHS = DAG.getConstant(0, NewLHS.getValueType());
CCCode = ISD::SETNE;
}
@@ -2659,7 +2657,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N));
// If ExpandSetCCOperands returned a scalar, use it.
- if (NewRHS.getNode() == 0) {
+ if (!NewRHS.getNode()) {
assert(NewLHS.getValueType() == N->getValueType(0) &&
"Unexpected setcc expansion!");
return NewLHS;
@@ -2907,7 +2905,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
Ops.push_back(Op);
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops);
}
@@ -2954,7 +2952,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_VECTOR(SDNode *N) {
Ops.push_back(Op);
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SCALAR_TO_VECTOR(SDNode *N) {
@@ -3002,7 +3000,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CONCAT_VECTORS(SDNode *N) {
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, Ops);
}
SDValue DAGTypeLegalizer::PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N) {
@@ -3058,6 +3056,5 @@ SDValue DAGTypeLegalizer::PromoteIntOp_CONCAT_VECTORS(SDNode *N) {
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, N->getValueType(0),
- &NewOps[0], NewOps.size());
- }
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, N->getValueType(0), NewOps);
+}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index eb13230..bd7dacf 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -22,6 +22,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "legalize-types"
+
static cl::opt<bool>
EnableExpensiveChecks("enable-legalize-types-checking", cl::Hidden);
@@ -159,7 +161,7 @@ void DAGTypeLegalizer::PerformExpensiveChecks() {
if (Mapped & 128)
dbgs() << " WidenedVectors";
dbgs() << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -433,7 +435,7 @@ NodeDone:
if (Failed) {
I->dump(&DAG); dbgs() << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
#endif
@@ -488,7 +490,7 @@ SDNode *DAGTypeLegalizer::AnalyzeNewNode(SDNode *N) {
// Some operands changed - update the node.
if (!NewOps.empty()) {
- SDNode *M = DAG.UpdateNodeOperands(N, &NewOps[0], NewOps.size());
+ SDNode *M = DAG.UpdateNodeOperands(N, NewOps);
if (M != N) {
// The node morphed into a different node. Normally for this to happen
// the original node would have to be marked NewNode. However this can
@@ -634,7 +636,7 @@ namespace {
: SelectionDAG::DAGUpdateListener(dtl.getDAG()),
DTL(dtl), NodesToAnalyze(nta) {}
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ void NodeDeleted(SDNode *N, SDNode *E) override {
assert(N->getNodeId() != DAGTypeLegalizer::ReadyToProcess &&
N->getNodeId() != DAGTypeLegalizer::Processed &&
"Invalid node ID for RAUW deletion!");
@@ -655,7 +657,7 @@ namespace {
NodesToAnalyze.insert(E);
}
- virtual void NodeUpdated(SDNode *N) {
+ void NodeUpdated(SDNode *N) override {
// Node updates can mean pretty much anything. It is possible that an
// operand was set to something already processed (f.e.) in which case
// this node could become ready. Recompute its flags.
@@ -736,7 +738,7 @@ void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
AnalyzeNewValue(Result);
SDValue &OpEntry = PromotedIntegers[Op];
- assert(OpEntry.getNode() == 0 && "Node is already promoted!");
+ assert(!OpEntry.getNode() && "Node is already promoted!");
OpEntry = Result;
}
@@ -747,7 +749,7 @@ void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) {
AnalyzeNewValue(Result);
SDValue &OpEntry = SoftenedFloats[Op];
- assert(OpEntry.getNode() == 0 && "Node is already converted to integer!");
+ assert(!OpEntry.getNode() && "Node is already converted to integer!");
OpEntry = Result;
}
@@ -761,7 +763,7 @@ void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {
AnalyzeNewValue(Result);
SDValue &OpEntry = ScalarizedVectors[Op];
- assert(OpEntry.getNode() == 0 && "Node is already scalarized!");
+ assert(!OpEntry.getNode() && "Node is already scalarized!");
OpEntry = Result;
}
@@ -787,7 +789,7 @@ void DAGTypeLegalizer::SetExpandedInteger(SDValue Op, SDValue Lo,
// Remember that this is the result of the node.
std::pair<SDValue, SDValue> &Entry = ExpandedIntegers[Op];
- assert(Entry.first.getNode() == 0 && "Node already expanded");
+ assert(!Entry.first.getNode() && "Node already expanded");
Entry.first = Lo;
Entry.second = Hi;
}
@@ -814,7 +816,7 @@ void DAGTypeLegalizer::SetExpandedFloat(SDValue Op, SDValue Lo,
// Remember that this is the result of the node.
std::pair<SDValue, SDValue> &Entry = ExpandedFloats[Op];
- assert(Entry.first.getNode() == 0 && "Node already expanded");
+ assert(!Entry.first.getNode() && "Node already expanded");
Entry.first = Lo;
Entry.second = Hi;
}
@@ -843,7 +845,7 @@ void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo,
// Remember that this is the result of the node.
std::pair<SDValue, SDValue> &Entry = SplitVectors[Op];
- assert(Entry.first.getNode() == 0 && "Node already split");
+ assert(!Entry.first.getNode() && "Node already split");
Entry.first = Lo;
Entry.second = Hi;
}
@@ -855,7 +857,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
AnalyzeNewValue(Result);
SDValue &OpEntry = WidenedVectors[Op];
- assert(OpEntry.getNode() == 0 && "Node already widened!");
+ assert(!OpEntry.getNode() && "Node already widened!");
OpEntry = Result;
}
@@ -1007,7 +1009,7 @@ SDValue DAGTypeLegalizer::LibCallify(RTLIB::Libcall LC, SDNode *N,
unsigned NumOps = N->getNumOperands();
SDLoc dl(N);
if (NumOps == 0) {
- return TLI.makeLibCall(DAG, LC, N->getValueType(0), 0, 0, isSigned,
+ return TLI.makeLibCall(DAG, LC, N->getValueType(0), nullptr, 0, isSigned,
dl).first;
} else if (NumOps == 1) {
SDValue Op = N->getOperand(0);
@@ -1049,11 +1051,12 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
- TargetLowering::
- CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, SDLoc(Node));
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(SDLoc(Node)).setChain(InChain)
+ .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setSExtResult(isSigned).setZExtResult(!isSigned);
+
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo;
@@ -1062,11 +1065,14 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
/// PromoteTargetBoolean - Promote the given target boolean to a target boolean
/// of the given type. A target boolean is an integer value, not necessarily of
/// type i1, the bits of which conform to getBooleanContents.
-SDValue DAGTypeLegalizer::PromoteTargetBoolean(SDValue Bool, EVT VT) {
+///
+/// ValVT is the type of values that produced the boolean.
+SDValue DAGTypeLegalizer::PromoteTargetBoolean(SDValue Bool, EVT ValVT) {
SDLoc dl(Bool);
+ EVT BoolVT = getSetCCResultType(ValVT);
ISD::NodeType ExtendCode =
- TargetLowering::getExtendForContent(TLI.getBooleanContents(VT.isVector()));
- return DAG.getNode(ExtendCode, dl, VT, Bool);
+ TargetLowering::getExtendForContent(TLI.getBooleanContents(ValVT));
+ return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
}
/// SplitInteger - Return the lower LoVT bits of Op in Lo and the upper HiVT
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index ce2ba01..117ff31 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -16,7 +16,6 @@
#ifndef SELECTIONDAG_LEGALIZETYPES_H
#define SELECTIONDAG_LEGALIZETYPES_H
-#define DEBUG_TYPE "legalize-types"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/CodeGen/SelectionDAG.h"
@@ -168,7 +167,7 @@ private:
SDNode *Node, bool isSigned);
std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
- SDValue PromoteTargetBoolean(SDValue Bool, EVT VT);
+ SDValue PromoteTargetBoolean(SDValue Bool, EVT ValVT);
void ReplaceValueWith(SDValue From, SDValue To);
void SplitInteger(SDValue Op, SDValue &Lo, SDValue &Hi);
void SplitInteger(SDValue Op, EVT LoVT, EVT HiVT,
@@ -221,7 +220,7 @@ private:
SDValue PromoteIntRes_AssertZext(SDNode *N);
SDValue PromoteIntRes_Atomic0(AtomicSDNode *N);
SDValue PromoteIntRes_Atomic1(AtomicSDNode *N);
- SDValue PromoteIntRes_Atomic2(AtomicSDNode *N);
+ SDValue PromoteIntRes_AtomicCmpSwap(AtomicSDNode *N, unsigned ResNo);
SDValue PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N);
SDValue PromoteIntRes_VECTOR_SHUFFLE(SDNode *N);
SDValue PromoteIntRes_BUILD_VECTOR(SDNode *N);
@@ -238,7 +237,7 @@ private:
SDValue PromoteIntRes_CTTZ(SDNode *N);
SDValue PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue PromoteIntRes_FP_TO_XINT(SDNode *N);
- SDValue PromoteIntRes_FP32_TO_FP16(SDNode *N);
+ SDValue PromoteIntRes_FP_TO_FP16(SDNode *N);
SDValue PromoteIntRes_INT_EXTEND(SDNode *N);
SDValue PromoteIntRes_LOAD(LoadSDNode *N);
SDValue PromoteIntRes_Overflow(SDNode *N);
@@ -404,7 +403,7 @@ private:
SDValue SoftenFloatRes_FNEARBYINT(SDNode *N);
SDValue SoftenFloatRes_FNEG(SDNode *N);
SDValue SoftenFloatRes_FP_EXTEND(SDNode *N);
- SDValue SoftenFloatRes_FP16_TO_FP32(SDNode *N);
+ SDValue SoftenFloatRes_FP16_TO_FP(SDNode *N);
SDValue SoftenFloatRes_FP_ROUND(SDNode *N);
SDValue SoftenFloatRes_FPOW(SDNode *N);
SDValue SoftenFloatRes_FPOWI(SDNode *N);
@@ -426,10 +425,10 @@ private:
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
SDValue SoftenFloatOp_BITCAST(SDNode *N);
SDValue SoftenFloatOp_BR_CC(SDNode *N);
+ SDValue SoftenFloatOp_FP_EXTEND(SDNode *N);
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N);
SDValue SoftenFloatOp_FP_TO_UINT(SDNode *N);
- SDValue SoftenFloatOp_FP32_TO_FP16(SDNode *N);
SDValue SoftenFloatOp_SELECT_CC(SDNode *N);
SDValue SoftenFloatOp_SETCC(SDNode *N);
SDValue SoftenFloatOp_STORE(SDNode *N, unsigned OpNo);
@@ -540,7 +539,9 @@ private:
SDValue ScalarizeVecOp_UnaryOp(SDNode *N);
SDValue ScalarizeVecOp_CONCAT_VECTORS(SDNode *N);
SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
+ SDValue ScalarizeVecOp_VSELECT(SDNode *N);
SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
+ SDValue ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo);
//===--------------------------------------------------------------------===//
// Vector Splitting Support: LegalizeVectorTypes.cpp
@@ -644,6 +645,7 @@ private:
bool WidenVectorOperand(SDNode *N, unsigned OpNo);
SDValue WidenVecOp_BITCAST(SDNode *N);
SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
+ SDValue WidenVecOp_EXTEND(SDNode *N);
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue WidenVecOp_STORE(SDNode* N);
@@ -671,13 +673,13 @@ private:
LoadSDNode *LD, ISD::LoadExtType ExtType);
/// Helper genWidenVectorStores - Helper function to generate a set of
- /// stores to store a widen vector into non widen memory
+ /// stores to store a widen vector into non-widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
void GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, StoreSDNode *ST);
/// Helper genWidenVectorTruncStores - Helper function to generate a set of
- /// stores to store a truncate widen vector into non widen memory
+ /// stores to store a truncate widen vector into non-widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
void GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index c749fde..7e2f7b6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -23,6 +23,8 @@
#include "llvm/IR/DataLayout.h"
using namespace llvm;
+#define DEBUG_TYPE "legalize-types"
+
//===----------------------------------------------------------------------===//
// Generic Result Expansion.
//===----------------------------------------------------------------------===//
@@ -58,12 +60,15 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
case TargetLowering::TypeExpandFloat:
// Convert the expanded pieces of the input.
GetExpandedOp(InOp, Lo, Hi);
+ if (TLI.hasBigEndianPartOrdering(InVT) !=
+ TLI.hasBigEndianPartOrdering(OutVT))
+ std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case TargetLowering::TypeSplitVector:
GetSplitVector(InOp, Lo, Hi);
- if (TLI.isBigEndian())
+ if (TLI.hasBigEndianPartOrdering(OutVT))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
@@ -78,9 +83,9 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BITCAST");
InOp = GetWidenedVector(InOp);
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(InVT);
- llvm::tie(Lo, Hi) = DAG.SplitVector(InOp, dl, LoVT, HiVT);
- if (TLI.isBigEndian())
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(InVT);
+ std::tie(Lo, Hi) = DAG.SplitVector(InOp, dl, LoVT, HiVT);
+ if (TLI.hasBigEndianPartOrdering(OutVT))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
@@ -174,7 +179,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
false, false, MinAlign(Alignment, IncrementSize));
// Handle endianness of the load.
- if (TLI.isBigEndian())
+ if (TLI.hasBigEndianPartOrdering(OutVT))
std::swap(Lo, Hi);
}
@@ -243,7 +248,8 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
+ EVT ValueVT = LD->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT);
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
unsigned Alignment = LD->getAlignment();
@@ -273,7 +279,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
Hi.getValue(1));
// Handle endianness of the load.
- if (TLI.isBigEndian())
+ if (TLI.hasBigEndianPartOrdering(ValueVT))
std::swap(Lo, Hi);
// Modified the chain - switch anything that used the old chain to use
@@ -293,7 +299,7 @@ void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2), 0);
// Handle endianness of the load.
- if (TLI.isBigEndian())
+ if (TLI.hasBigEndianPartOrdering(OVT))
std::swap(Lo, Hi);
// Modified the chain - switch anything that used the old chain to use
@@ -352,7 +358,8 @@ SDValue DAGTypeLegalizer::ExpandOp_BITCAST(SDNode *N) {
SmallVector<SDValue, 8> Ops;
IntegerToVector(N->getOperand(0), NumElts, Ops, NVT.getVectorElementType());
- SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &Ops[0], NumElts);
+ SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT,
+ makeArrayRef(Ops.data(), NumElts));
return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), Vec);
}
@@ -388,7 +395,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
EVT::getVectorVT(*DAG.getContext(),
NewVT, NewElts.size()),
- &NewElts[0], NewElts.size());
+ NewElts);
// Convert the new vector to the old vector type.
return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
@@ -447,7 +454,7 @@ SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(Ops[0].getValueType());
for (unsigned i = 1; i < NumElts; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
@@ -456,8 +463,8 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
SDLoc dl(N);
StoreSDNode *St = cast<StoreSDNode>(N);
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(),
- St->getValue().getValueType());
+ EVT ValueVT = St->getValue().getValueType();
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT);
SDValue Chain = St->getChain();
SDValue Ptr = St->getBasePtr();
unsigned Alignment = St->getAlignment();
@@ -471,7 +478,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
SDValue Lo, Hi;
GetExpandedOp(St->getValue(), Lo, Hi);
- if (TLI.isBigEndian())
+ if (TLI.hasBigEndianPartOrdering(ValueVT))
std::swap(Lo, Hi);
Lo = DAG.getStore(Chain, dl, Lo, Ptr, St->getPointerInfo(),
@@ -518,7 +525,7 @@ void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo,
if (getTypeAction(Cond.getValueType()) == TargetLowering::TypeSplitVector)
GetSplitVector(Cond, CL, CH);
else
- llvm::tie(CL, CH) = DAG.SplitVector(Cond, dl);
+ std::tie(CL, CH) = DAG.SplitVector(Cond, dl);
}
Lo = DAG.getNode(N->getOpcode(), dl, LL.getValueType(), CL, LL, RL);
@@ -540,7 +547,7 @@ void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getUNDEF(LoVT);
Hi = DAG.getUNDEF(HiVT);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 3fb2d9b..507e7ff 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -37,12 +37,12 @@ class VectorLegalizer {
const TargetLowering &TLI;
bool Changed; // Keep track of whether anything changed
- /// LegalizedNodes - For nodes that are of legal width, and that have more
- /// than one use, this map indicates what regularized operand to use. This
- /// allows us to avoid legalizing the same thing more than once.
+ /// For nodes that are of legal width, and that have more than one use, this
+ /// map indicates what regularized operand to use. This allows us to avoid
+ /// legalizing the same thing more than once.
SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes;
- // Adds a node to the translation cache
+ /// \brief Adds a node to the translation cache.
void AddLegalizedOperand(SDValue From, SDValue To) {
LegalizedNodes.insert(std::make_pair(From, To));
// If someone requests legalization of the new node, return itself.
@@ -50,35 +50,81 @@ class VectorLegalizer {
LegalizedNodes.insert(std::make_pair(To, To));
}
- // Legalizes the given node
+ /// \brief Legalizes the given node.
SDValue LegalizeOp(SDValue Op);
- // Assuming the node is legal, "legalize" the results
+
+ /// \brief Assuming the node is legal, "legalize" the results.
SDValue TranslateLegalizeResults(SDValue Op, SDValue Result);
- // Implements unrolling a VSETCC.
+
+ /// \brief Implements unrolling a VSETCC.
SDValue UnrollVSETCC(SDValue Op);
- // Implements expansion for FNEG; falls back to UnrollVectorOp if FSUB
- // isn't legal.
- // Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
- // SINT_TO_FLOAT and SHR on vectors isn't legal.
+
+ /// \brief Implement expand-based legalization of vector operations.
+ ///
+ /// This is just a high-level routine to dispatch to specific code paths for
+ /// operations to legalize them.
+ SDValue Expand(SDValue Op);
+
+ /// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if
+ /// FSUB isn't legal.
+ ///
+ /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
+ /// SINT_TO_FLOAT and SHR on vectors isn't legal.
SDValue ExpandUINT_TO_FLOAT(SDValue Op);
- // Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
+
+ /// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
SDValue ExpandSEXTINREG(SDValue Op);
- // Implement vselect in terms of XOR, AND, OR when blend is not supported
- // by the target.
+
+ /// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG.
+ ///
+ /// Shuffles the low lanes of the operand into place and bitcasts to the proper
+ /// type. The contents of the bits in the extended part of each element are
+ /// undef.
+ SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op);
+
+ /// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG.
+ ///
+ /// Shuffles the low lanes of the operand into place, bitcasts to the proper
+ /// type, then shifts left and arithmetic shifts right to introduce a sign
+ /// extension.
+ SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op);
+
+ /// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG.
+ ///
+ /// Shuffles the low lanes of the operand into place and blends zeros into
+ /// the remaining lanes, finally bitcasting to the proper type.
+ SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op);
+
+ /// \brief Expand bswap of vectors into a shuffle if legal.
+ SDValue ExpandBSWAP(SDValue Op);
+
+ /// \brief Implement vselect in terms of XOR, AND, OR when blend is not
+ /// supported by the target.
SDValue ExpandVSELECT(SDValue Op);
SDValue ExpandSELECT(SDValue Op);
SDValue ExpandLoad(SDValue Op);
SDValue ExpandStore(SDValue Op);
SDValue ExpandFNEG(SDValue Op);
- // Implements vector promotion; this is essentially just bitcasting the
- // operands to a different type and bitcasting the result back to the
- // original type.
- SDValue PromoteVectorOp(SDValue Op);
- // Implements [SU]INT_TO_FP vector promotion; this is a [zs]ext of the input
- // operand to the next size up.
- SDValue PromoteVectorOpINT_TO_FP(SDValue Op);
-
- public:
+
+ /// \brief Implements vector promotion.
+ ///
+ /// This is essentially just bitcasting the operands to a different type and
+ /// bitcasting the result back to the original type.
+ SDValue Promote(SDValue Op);
+
+ /// \brief Implements [SU]INT_TO_FP vector promotion.
+ ///
+ /// This is a [zs]ext of the input operand to the next size up.
+ SDValue PromoteINT_TO_FP(SDValue Op);
+
+ /// \brief Implements FP_TO_[SU]INT vector promotion of the result type.
+ ///
+ /// It is promoted to the next size up integer type. The result is then
+ /// truncated back to the original type.
+ SDValue PromoteFP_TO_INT(SDValue Op, bool isSigned);
+
+public:
+ /// \brief Begin legalizer the vector operations in the DAG.
bool Run();
VectorLegalizer(SelectionDAG& dag) :
DAG(dag), TLI(dag.getTargetLoweringInfo()), Changed(false) {}
@@ -88,7 +134,7 @@ bool VectorLegalizer::Run() {
// Before we start legalizing vector nodes, check if there are any vectors.
bool HasVectors = false;
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
- E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) {
+ E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) {
// Check if the values of the nodes contain vectors. We don't need to check
// the operands because we are going to check their values at some point.
for (SDNode::value_iterator J = I->value_begin(), E = I->value_end();
@@ -112,7 +158,7 @@ bool VectorLegalizer::Run() {
// node is only legalized after all of its operands are legalized.
DAG.AssignTopologicalOrder();
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
- E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I)
+ E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I)
LegalizeOp(SDValue(I, 0));
// Finally, it's possible the root changed. Get the new root.
@@ -148,8 +194,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
Ops.push_back(LegalizeOp(Node->getOperand(i)));
- SDValue Result =
- SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops.data(), Ops.size()), 0);
+ SDValue Result = SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops), 0);
if (Op.getOpcode() == ISD::LOAD) {
LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
@@ -249,6 +294,9 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::FP_EXTEND:
case ISD::FMA:
case ISD::SIGN_EXTEND_INREG:
+ case ISD::ANY_EXTEND_VECTOR_INREG:
+ case ISD::SIGN_EXTEND_VECTOR_INREG:
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
QueryType = Node->getValueType(0);
break;
case ISD::FP_ROUND_INREG:
@@ -262,21 +310,11 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) {
case TargetLowering::Promote:
- switch (Op.getOpcode()) {
- default:
- // "Promote" the operation by bitcasting
- Result = PromoteVectorOp(Op);
- Changed = true;
- break;
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- // "Promote" the operation by extending the operand.
- Result = PromoteVectorOpINT_TO_FP(Op);
- Changed = true;
- break;
- }
+ Result = Promote(Op);
+ Changed = true;
+ break;
+ case TargetLowering::Legal:
break;
- case TargetLowering::Legal: break;
case TargetLowering::Custom: {
SDValue Tmp1 = TLI.LowerOperation(Op, DAG);
if (Tmp1.getNode()) {
@@ -286,21 +324,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
// FALL THROUGH
}
case TargetLowering::Expand:
- if (Node->getOpcode() == ISD::SIGN_EXTEND_INREG)
- Result = ExpandSEXTINREG(Op);
- else if (Node->getOpcode() == ISD::VSELECT)
- Result = ExpandVSELECT(Op);
- else if (Node->getOpcode() == ISD::SELECT)
- Result = ExpandSELECT(Op);
- else if (Node->getOpcode() == ISD::UINT_TO_FP)
- Result = ExpandUINT_TO_FLOAT(Op);
- else if (Node->getOpcode() == ISD::FNEG)
- Result = ExpandFNEG(Op);
- else if (Node->getOpcode() == ISD::SETCC)
- Result = UnrollVSETCC(Op);
- else
- Result = DAG.UnrollVectorOp(Op.getNode());
- break;
+ Result = Expand(Op);
}
// Make sure that the generated code is itself legal.
@@ -315,10 +339,23 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
return Result;
}
-SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
- // Vector "promotion" is basically just bitcasting and doing the operation
- // in a different type. For example, x86 promotes ISD::AND on v2i32 to
- // v1i64.
+SDValue VectorLegalizer::Promote(SDValue Op) {
+ // For a few operations there is a specific concept for promotion based on
+ // the operand's type.
+ switch (Op.getOpcode()) {
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ // "Promote" the operation by extending the operand.
+ return PromoteINT_TO_FP(Op);
+ case ISD::FP_TO_UINT:
+ case ISD::FP_TO_SINT:
+ // Promote the operation by extending the operand.
+ return PromoteFP_TO_INT(Op, Op->getOpcode() == ISD::FP_TO_SINT);
+ }
+
+ // The rest of the time, vector "promotion" is basically just bitcasting and
+ // doing the operation in a different type. For example, x86 promotes
+ // ISD::AND on v2i32 to v1i64.
MVT VT = Op.getSimpleValueType();
assert(Op.getNode()->getNumValues() == 1 &&
"Can't promote a vector with multiple results!");
@@ -333,12 +370,12 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
Operands[j] = Op.getOperand(j);
}
- Op = DAG.getNode(Op.getOpcode(), dl, NVT, &Operands[0], Operands.size());
+ Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands);
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
-SDValue VectorLegalizer::PromoteVectorOpINT_TO_FP(SDValue Op) {
+SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) {
// INT_TO_FP operations may require the input operand be promoted even
// when the type is otherwise legal.
EVT VT = Op.getOperand(0).getValueType();
@@ -352,14 +389,9 @@ SDValue VectorLegalizer::PromoteVectorOpINT_TO_FP(SDValue Op) {
//
// Increase the bitwidth of the element to the next pow-of-two
// (which is greater than 8 bits).
- unsigned NumElts = VT.getVectorNumElements();
- EVT EltVT = VT.getVectorElementType();
- EltVT = EVT::getIntegerVT(*DAG.getContext(), 2 * EltVT.getSizeInBits());
- assert(EltVT.isSimple() && "Promoting to a non-simple vector type!");
-
- // Build a new vector type and check if it is legal.
- MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
+ EVT NVT = VT.widenIntegerVectorElementType(*DAG.getContext());
+ assert(NVT.isSimple() && "Promoting to a non-simple vector type!");
SDLoc dl(Op);
SmallVector<SDValue, 4> Operands(Op.getNumOperands());
@@ -372,8 +404,36 @@ SDValue VectorLegalizer::PromoteVectorOpINT_TO_FP(SDValue Op) {
Operands[j] = Op.getOperand(j);
}
- return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), &Operands[0],
- Operands.size());
+ return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Operands);
+}
+
+// For FP_TO_INT we promote the result type to a vector type with wider
+// elements and then truncate the result. This is different from the default
+// PromoteVector which uses bitcast to promote thus assumning that the
+// promoted vector type has the same overall size.
+SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op, bool isSigned) {
+ assert(Op.getNode()->getNumValues() == 1 &&
+ "Can't promote a vector with multiple results!");
+ EVT VT = Op.getValueType();
+
+ EVT NewVT;
+ unsigned NewOpc;
+ while (1) {
+ NewVT = VT.widenIntegerVectorElementType(*DAG.getContext());
+ assert(NewVT.isSimple() && "Promoting to a non-simple vector type!");
+ if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewVT)) {
+ NewOpc = ISD::FP_TO_SINT;
+ break;
+ }
+ if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewVT)) {
+ NewOpc = ISD::FP_TO_UINT;
+ break;
+ }
+ }
+
+ SDLoc loc(Op);
+ SDValue promoted = DAG.getNode(NewOpc, SDLoc(Op), NewVT, Op.getOperand(0));
+ return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, promoted);
}
@@ -512,10 +572,9 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
}
}
- SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &LoadChains[0], LoadChains.size());
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl,
- Op.getNode()->getValueType(0), &Vals[0], Vals.size());
+ Op.getNode()->getValueType(0), Vals);
AddLegalizedOperand(Op.getValue(0), Value);
AddLegalizedOperand(Op.getValue(1), NewChain);
@@ -569,12 +628,38 @@ SDValue VectorLegalizer::ExpandStore(SDValue Op) {
Stores.push_back(Store);
}
- SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &Stores[0], Stores.size());
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
AddLegalizedOperand(Op, TF);
return TF;
}
+SDValue VectorLegalizer::Expand(SDValue Op) {
+ switch (Op->getOpcode()) {
+ case ISD::SIGN_EXTEND_INREG:
+ return ExpandSEXTINREG(Op);
+ case ISD::ANY_EXTEND_VECTOR_INREG:
+ return ExpandANY_EXTEND_VECTOR_INREG(Op);
+ case ISD::SIGN_EXTEND_VECTOR_INREG:
+ return ExpandSIGN_EXTEND_VECTOR_INREG(Op);
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
+ return ExpandZERO_EXTEND_VECTOR_INREG(Op);
+ case ISD::BSWAP:
+ return ExpandBSWAP(Op);
+ case ISD::VSELECT:
+ return ExpandVSELECT(Op);
+ case ISD::SELECT:
+ return ExpandSELECT(Op);
+ case ISD::UINT_TO_FP:
+ return ExpandUINT_TO_FLOAT(Op);
+ case ISD::FNEG:
+ return ExpandFNEG(Op);
+ case ISD::SETCC:
+ return UnrollVSETCC(Op);
+ default:
+ return DAG.UnrollVectorOp(Op.getNode());
+ }
+}
+
SDValue VectorLegalizer::ExpandSELECT(SDValue Op) {
// Lower a select instruction where the condition is a scalar and the
// operands are vectors. Lower this select to VSELECT and implement it
@@ -614,7 +699,7 @@ SDValue VectorLegalizer::ExpandSELECT(SDValue Op) {
// Broadcast the mask so that the entire vector is all-one or all zero.
SmallVector<SDValue, 8> Ops(NumElem, Mask);
- Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, &Ops[0], Ops.size());
+ Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, Ops);
// Bitcast the operands to be the same type as the mask.
// This is needed when we select between FP types because
@@ -652,6 +737,108 @@ SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) {
return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz);
}
+// Generically expand a vector anyext in register to a shuffle of the relevant
+// lanes into the appropriate locations, with other lanes left undef.
+SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ int NumElements = VT.getVectorNumElements();
+ SDValue Src = Op.getOperand(0);
+ EVT SrcVT = Src.getValueType();
+ int NumSrcElements = SrcVT.getVectorNumElements();
+
+ // Build a base mask of undef shuffles.
+ SmallVector<int, 16> ShuffleMask;
+ ShuffleMask.resize(NumSrcElements, -1);
+
+ // Place the extended lanes into the correct locations.
+ int ExtLaneScale = NumSrcElements / NumElements;
+ int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0;
+ for (int i = 0; i < NumElements; ++i)
+ ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
+
+ return DAG.getNode(
+ ISD::BITCAST, DL, VT,
+ DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask));
+}
+
+SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue Src = Op.getOperand(0);
+ EVT SrcVT = Src.getValueType();
+
+ // First build an any-extend node which can be legalized above when we
+ // recurse through it.
+ Op = DAG.getAnyExtendVectorInReg(Src, DL, VT);
+
+ // Now we need sign extend. Do this by shifting the elements. Even if these
+ // aren't legal operations, they have a better chance of being legalized
+ // without full scalarization than the sign extension does.
+ unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
+ unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits();
+ SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, VT);
+ return DAG.getNode(ISD::SRA, DL, VT,
+ DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
+ ShiftAmount);
+}
+
+// Generically expand a vector zext in register to a shuffle of the relevant
+// lanes into the appropriate locations, a blend of zero into the high bits,
+// and a bitcast to the wider element type.
+SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ int NumElements = VT.getVectorNumElements();
+ SDValue Src = Op.getOperand(0);
+ EVT SrcVT = Src.getValueType();
+ int NumSrcElements = SrcVT.getVectorNumElements();
+
+ // Build up a zero vector to blend into this one.
+ EVT SrcScalarVT = SrcVT.getScalarType();
+ SDValue ScalarZero = DAG.getTargetConstant(0, SrcScalarVT);
+ SmallVector<SDValue, 4> BuildVectorOperands(NumSrcElements, ScalarZero);
+ SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, DL, SrcVT, BuildVectorOperands);
+
+ // Shuffle the incoming lanes into the correct position, and pull all other
+ // lanes from the zero vector.
+ SmallVector<int, 16> ShuffleMask;
+ ShuffleMask.reserve(NumSrcElements);
+ for (int i = 0; i < NumSrcElements; ++i)
+ ShuffleMask.push_back(i);
+
+ int ExtLaneScale = NumSrcElements / NumElements;
+ int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0;
+ for (int i = 0; i < NumElements; ++i)
+ ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
+
+ return DAG.getNode(ISD::BITCAST, DL, VT,
+ DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask));
+}
+
+SDValue VectorLegalizer::ExpandBSWAP(SDValue Op) {
+ EVT VT = Op.getValueType();
+
+ // Generate a byte wise shuffle mask for the BSWAP.
+ SmallVector<int, 16> ShuffleMask;
+ int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
+ for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I)
+ for (int J = ScalarSizeInBytes - 1; J >= 0; --J)
+ ShuffleMask.push_back((I * ScalarSizeInBytes) + J);
+
+ EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size());
+
+ // Only emit a shuffle if the mask is legal.
+ if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT))
+ return DAG.UnrollVectorOp(Op.getNode());
+
+ SDLoc DL(Op);
+ Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0));
+ Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT),
+ ShuffleMask.data());
+ return DAG.getNode(ISD::BITCAST, DL, VT, Op);
+}
+
SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// Implement VSELECT in terms of XOR, AND, OR
// on platforms which do not support blend natively.
@@ -672,9 +859,9 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// FIXME: Sign extend 1 to all ones if thats legal on the target.
if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
- TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
- TLI.getBooleanContents(true) !=
- TargetLowering::ZeroOrNegativeOneBooleanContent)
+ TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
+ TLI.getBooleanContents(Op1.getValueType()) !=
+ TargetLowering::ZeroOrNegativeOneBooleanContent)
return DAG.UnrollVectorOp(Op.getNode());
// If the mask and the type are different sizes, unroll the vector op. This
@@ -769,7 +956,7 @@ SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
(EltVT.getSizeInBits()), EltVT),
DAG.getConstant(0, EltVT));
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElems);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index fb8c602..f77c592 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -26,6 +26,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "legalize-types"
+
//===----------------------------------------------------------------------===//
// Result Vector Scalarization: <1 x ty> -> ty.
//===----------------------------------------------------------------------===//
@@ -65,6 +67,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
case ISD::ANY_EXTEND:
+ case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTPOP:
case ISD::CTTZ:
@@ -254,8 +257,26 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(SDNode *N) {
SDValue Cond = GetScalarizedVector(N->getOperand(0));
SDValue LHS = GetScalarizedVector(N->getOperand(1));
- TargetLowering::BooleanContent ScalarBool = TLI.getBooleanContents(false);
- TargetLowering::BooleanContent VecBool = TLI.getBooleanContents(true);
+ TargetLowering::BooleanContent ScalarBool =
+ TLI.getBooleanContents(false, false);
+ TargetLowering::BooleanContent VecBool = TLI.getBooleanContents(true, false);
+
+ // If integer and float booleans have different contents then we can't
+ // reliably optimize in all cases. There is a full explanation for this in
+ // DAGCombiner::visitSELECT() where the same issue affects folding
+ // (select C, 0, 1) to (xor C, 1).
+ if (TLI.getBooleanContents(false, false) !=
+ TLI.getBooleanContents(false, true)) {
+ // At least try the common case where the boolean is generated by a
+ // comparison.
+ if (Cond->getOpcode() == ISD::SETCC) {
+ EVT OpVT = Cond->getOperand(0)->getValueType(0);
+ ScalarBool = TLI.getBooleanContents(OpVT.getScalarType());
+ VecBool = TLI.getBooleanContents(OpVT);
+ } else
+ ScalarBool = TargetLowering::UndefinedBooleanContent;
+ }
+
if (ScalarBool != VecBool) {
EVT CondVT = Cond.getValueType();
switch (ScalarBool) {
@@ -330,19 +351,31 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
"Operand types must be vectors");
-
- SDValue LHS = GetScalarizedVector(N->getOperand(0));
- SDValue RHS = GetScalarizedVector(N->getOperand(1));
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ EVT OpVT = LHS.getValueType();
EVT NVT = N->getValueType(0).getVectorElementType();
SDLoc DL(N);
+ // The result needs scalarizing, but it's not a given that the source does.
+ if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
+ LHS = GetScalarizedVector(LHS);
+ RHS = GetScalarizedVector(RHS);
+ } else {
+ EVT VT = OpVT.getVectorElementType();
+ LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS,
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
+ RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS,
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
+ }
+
// Turn it into a scalar SETCC.
SDValue Res = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS,
N->getOperand(2));
// Vectors may have a different boolean contents to scalars. Promote the
// value appropriately.
ISD::NodeType ExtendCode =
- TargetLowering::getExtendForContent(TLI.getBooleanContents(true));
+ TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
return DAG.getNode(ExtendCode, DL, NVT, Res);
}
@@ -357,7 +390,7 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
dbgs() << "\n");
SDValue Res = SDValue();
- if (Res.getNode() == 0) {
+ if (!Res.getNode()) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
@@ -381,9 +414,15 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::EXTRACT_VECTOR_ELT:
Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
break;
+ case ISD::VSELECT:
+ Res = ScalarizeVecOp_VSELECT(N);
+ break;
case ISD::STORE:
Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
break;
+ case ISD::FP_ROUND:
+ Res = ScalarizeVecOp_FP_ROUND(N, OpNo);
+ break;
}
}
@@ -416,13 +455,11 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp(SDNode *N) {
assert(N->getValueType(0).getVectorNumElements() == 1 &&
"Unexected vector type!");
SDValue Elt = GetScalarizedVector(N->getOperand(0));
- SmallVector<SDValue, 1> Ops(1);
- Ops[0] = DAG.getNode(N->getOpcode(), SDLoc(N),
- N->getValueType(0).getScalarType(), Elt);
+ SDValue Op = DAG.getNode(N->getOpcode(), SDLoc(N),
+ N->getValueType(0).getScalarType(), Elt);
// Revectorize the result so the types line up with what the uses of this
// expression expect.
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N->getValueType(0),
- &Ops[0], 1);
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N->getValueType(0), Op);
}
/// ScalarizeVecOp_CONCAT_VECTORS - The vectors to concatenate have length one -
@@ -431,8 +468,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(SDNode *N) {
SmallVector<SDValue, 8> Ops(N->getNumOperands());
for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
Ops[i] = GetScalarizedVector(N->getOperand(i));
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N->getValueType(0),
- &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), N->getValueType(0), Ops);
}
/// ScalarizeVecOp_EXTRACT_VECTOR_ELT - If the input is a vector that needs to
@@ -446,6 +482,18 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
return Res;
}
+
+/// ScalarizeVecOp_VSELECT - If the input condition is a vector that needs to be
+/// scalarized, it must be <1 x i1>, so just convert to a normal ISD::SELECT
+/// (still with vector output type since that was acceptable if we got here).
+SDValue DAGTypeLegalizer::ScalarizeVecOp_VSELECT(SDNode *N) {
+ SDValue ScalarCond = GetScalarizedVector(N->getOperand(0));
+ EVT VT = N->getValueType(0);
+
+ return DAG.getNode(ISD::SELECT, SDLoc(N), VT, ScalarCond, N->getOperand(1),
+ N->getOperand(2));
+}
+
/// ScalarizeVecOp_STORE - If the value to store is a vector that needs to be
/// scalarized, it must be <1 x ty>. Just store the element.
SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
@@ -467,6 +515,15 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
N->getOriginalAlignment(), N->getTBAAInfo());
}
+/// ScalarizeVecOp_FP_ROUND - If the value to round is a vector that needs
+/// to be scalarized, it must be <1 x ty>. Convert the element instead.
+SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo) {
+ SDValue Elt = GetScalarizedVector(N->getOperand(0));
+ SDValue Res = DAG.getNode(ISD::FP_ROUND, SDLoc(N),
+ N->getValueType(0).getVectorElementType(), Elt,
+ N->getOperand(1));
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
+}
//===----------------------------------------------------------------------===//
// Result Vector Splitting
@@ -522,6 +579,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi);
break;
+ case ISD::BSWAP:
case ISD::CONVERT_RNDSAT:
case ISD::CTLZ:
case ISD::CTTZ:
@@ -625,7 +683,7 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
// We know the result is a vector. The input may be either a vector or a
// scalar value.
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
SDLoc dl(N);
SDValue InOp = N->getOperand(0);
@@ -680,13 +738,13 @@ void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(N);
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
unsigned LoNumElts = LoVT.getVectorNumElements();
SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts);
- Lo = DAG.getNode(ISD::BUILD_VECTOR, dl, LoVT, &LoOps[0], LoOps.size());
+ Lo = DAG.getNode(ISD::BUILD_VECTOR, dl, LoVT, LoOps);
SmallVector<SDValue, 8> HiOps(N->op_begin()+LoNumElts, N->op_end());
- Hi = DAG.getNode(ISD::BUILD_VECTOR, dl, HiVT, &HiOps[0], HiOps.size());
+ Hi = DAG.getNode(ISD::BUILD_VECTOR, dl, HiVT, HiOps);
}
void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
@@ -701,13 +759,13 @@ void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
}
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors);
- Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, LoVT, &LoOps[0], LoOps.size());
+ Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, LoVT, LoOps);
SmallVector<SDValue, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end());
- Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HiVT, &HiOps[0], HiOps.size());
+ Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HiVT, HiOps);
}
void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
@@ -717,7 +775,7 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
SDLoc dl(N);
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
@@ -778,7 +836,7 @@ void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo,
SDLoc dl(N);
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) =
+ std::tie(LoVT, HiVT) =
DAG.GetSplitDestVTs(cast<VTSDNode>(N->getOperand(1))->getVT());
Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo,
@@ -842,7 +900,7 @@ void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(N);
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoVT, N->getOperand(0));
Hi = DAG.getUNDEF(HiVT);
}
@@ -852,7 +910,7 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!");
EVT LoVT, HiVT;
SDLoc dl(LD);
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(LD->getValueType(0));
ISD::LoadExtType ExtType = LD->getExtensionType();
SDValue Ch = LD->getChain();
@@ -866,7 +924,7 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
const MDNode *TBAAInfo = LD->getTBAAInfo();
EVT LoMemVT, HiMemVT;
- llvm::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
+ std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
LD->getPointerInfo(), LoMemVT, isVolatile, isNonTemporal,
@@ -897,12 +955,12 @@ void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc DL(N);
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// Split the input.
SDValue LL, LH, RL, RH;
- llvm::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
- llvm::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
+ std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
+ std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
@@ -913,7 +971,7 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
// Get the dest types - they may not match the input types, e.g. int_to_fp.
EVT LoVT, HiVT;
SDLoc dl(N);
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
// If the input also splits, handle it directly for a compile time speedup.
// Otherwise split it by hand.
@@ -921,7 +979,7 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
GetSplitVector(N->getOperand(0), Lo, Hi);
else
- llvm::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
+ std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
if (N->getOpcode() == ISD::FP_ROUND) {
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo, N->getOperand(1));
@@ -950,7 +1008,7 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
EVT SrcVT = N->getOperand(0).getValueType();
EVT DestVT = N->getValueType(0);
EVT LoVT, HiVT;
- llvm::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(DestVT);
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(DestVT);
// We can do better than a generic split operation if the extend is doing
// more than just doubling the width of the elements and the following are
@@ -976,7 +1034,7 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
EVT SplitSrcVT =
EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2);
EVT SplitLoVT, SplitHiVT;
- llvm::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT);
+ std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT);
if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) &&
TLI.isTypeLegal(NewSrcVT) && TLI.isTypeLegal(SplitLoVT)) {
DEBUG(dbgs() << "Split vector extend via incremental extend:";
@@ -985,7 +1043,7 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
SDValue NewSrc =
DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0));
// Get the low and high halves of the new, extended one step, vector.
- llvm::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl);
+ std::tie(Lo, Hi) = DAG.SplitVector(NewSrc, dl);
// Extend those vector halves the rest of the way.
Lo = DAG.getNode(N->getOpcode(), dl, LoVT, Lo);
Hi = DAG.getNode(N->getOpcode(), dl, HiVT, Hi);
@@ -1088,7 +1146,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
}
// Construct the Lo/Hi output using a BUILD_VECTOR.
- Output = DAG.getNode(ISD::BUILD_VECTOR,dl,NewVT, &SVOps[0], SVOps.size());
+ Output = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, SVOps);
} else if (InputUsed[0] == -1U) {
// No input vectors were used! The result is undefined.
Output = DAG.getUNDEF(NewVT);
@@ -1124,7 +1182,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
- if (Res.getNode() == 0) {
+ if (!Res.getNode()) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
@@ -1198,13 +1256,13 @@ SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(SDNode *N, unsigned OpNo) {
"Lo and Hi have differing types");
EVT LoOpVT, HiOpVT;
- llvm::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
+ std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
assert(LoOpVT == HiOpVT && "Asymmetric vector split?");
SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
- llvm::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0, DL);
- llvm::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1, DL);
- llvm::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL);
+ std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0, DL);
+ std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1, DL);
+ std::tie(LoMask, HiMask) = DAG.SplitVector(Mask, DL);
SDValue LoSelect =
DAG.getNode(ISD::VSELECT, DL, LoOpVT, LoMask, LoOp0, LoOp1);
@@ -1319,7 +1377,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
GetSplitVector(N->getOperand(1), Lo, Hi);
EVT LoMemVT, HiMemVT;
- llvm::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
+ std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
@@ -1366,8 +1424,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0),
- &Elts[0], Elts.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0), Elts);
}
SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) {
@@ -1408,7 +1465,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) {
// Extract the halves of the input via extract_subvector.
SDValue InLoVec, InHiVec;
- llvm::tie(InLoVec, InHiVec) = DAG.SplitVector(InVec, DL);
+ std::tie(InLoVec, InHiVec) = DAG.SplitVector(InVec, DL);
// Truncate them to 1/2 the element size.
EVT HalfElementVT = EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT,
@@ -1510,7 +1567,6 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::ADD:
case ISD::AND:
- case ISD::BSWAP:
case ISD::MUL:
case ISD::MULHS:
case ISD::MULHU:
@@ -1557,6 +1613,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
Res = WidenVecRes_Convert(N);
break;
+ case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTPOP:
case ISD::CTTZ:
@@ -1724,8 +1781,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
while (SubConcatEnd < OpsToConcat)
SubConcatOps[SubConcatEnd++] = undefVec;
ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl,
- NextVT, &SubConcatOps[0],
- OpsToConcat);
+ NextVT, SubConcatOps);
ConcatEnd = SubConcatIdx + 1;
}
}
@@ -1744,7 +1800,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
for (unsigned j = ConcatEnd; j < NumOps; ++j)
ConcatOps[j] = UndefVal;
}
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &ConcatOps[0], NumOps);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
+ makeArrayRef(ConcatOps.data(), NumOps));
}
SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
@@ -1786,8 +1843,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(InVT);
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
- SDValue InVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InWidenVT,
- &Ops[0], NumConcat);
+ SDValue InVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InWidenVT, Ops);
if (N->getNumOperands() == 1)
return DAG.getNode(Opcode, DL, WidenVT, InVec);
return DAG.getNode(Opcode, DL, WidenVT, InVec, N->getOperand(1));
@@ -1822,7 +1878,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, DL, WidenVT, &Ops[0], WidenNumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_POWI(SDNode *N) {
@@ -1946,11 +2002,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
SDValue NewVec;
if (InVT.isVector())
- NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl,
- NewInVT, &Ops[0], NewNumElts);
+ NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewInVT, Ops);
else
- NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
- NewInVT, &Ops[0], NewNumElts);
+ NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl, NewInVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec);
}
}
@@ -1975,7 +2029,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
assert(WidenNumElts >= NumElts && "Shrinking vector instead of widening!");
NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
- return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &NewOps[0], NewOps.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, NewOps);
}
SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
@@ -1998,7 +2052,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
Ops[i] = N->getOperand(i);
for (unsigned i = NumOperands; i != NumConcat; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &Ops[0], NumConcat);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, Ops);
}
} else {
InputWidened = true;
@@ -2044,7 +2098,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; Idx < WidenNumElts; ++Idx)
Ops[Idx] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
@@ -2089,7 +2143,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
- InOp = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWidenVT, &Ops[0],NumConcat);
+ InOp = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWidenVT, Ops);
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
@@ -2122,7 +2176,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
@@ -2161,7 +2215,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
@@ -2189,8 +2243,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
if (LdChain.size() == 1)
NewChain = LdChain[0];
else
- NewChain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
- &LdChain[0], LdChain.size());
+ NewChain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, LdChain);
// Modified the chain - switch anything that used the old chain to use
// the new one.
@@ -2300,7 +2353,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) {
SDValue InOp1 = N->getOperand(0);
EVT InVT = InOp1.getValueType();
- assert(InVT.isVector() && "can not widen non vector type");
+ assert(InVT.isVector() && "can not widen non-vector type");
EVT WidenInVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(), WidenNumElts);
InOp1 = GetWidenedVector(InOp1);
@@ -2346,15 +2399,18 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::STORE: Res = WidenVecOp_STORE(N); break;
case ISD::SETCC: Res = WidenVecOp_SETCC(N); break;
+ case ISD::ANY_EXTEND:
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ Res = WidenVecOp_EXTEND(N);
+ break;
+
case ISD::FP_EXTEND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::TRUNCATE:
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND:
Res = WidenVecOp_Convert(N);
break;
}
@@ -2375,6 +2431,68 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
return false;
}
+SDValue DAGTypeLegalizer::WidenVecOp_EXTEND(SDNode *N) {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+
+ SDValue InOp = N->getOperand(0);
+ // If some legalization strategy other than widening is used on the operand,
+ // we can't safely assume that just extending the low lanes is the correct
+ // transformation.
+ if (getTypeAction(InOp.getValueType()) != TargetLowering::TypeWidenVector)
+ return WidenVecOp_Convert(N);
+ InOp = GetWidenedVector(InOp);
+ assert(VT.getVectorNumElements() <
+ InOp.getValueType().getVectorNumElements() &&
+ "Input wasn't widened!");
+
+ // We may need to further widen the operand until it has the same total
+ // vector size as the result.
+ EVT InVT = InOp.getValueType();
+ if (InVT.getSizeInBits() != VT.getSizeInBits()) {
+ EVT InEltVT = InVT.getVectorElementType();
+ for (int i = MVT::FIRST_VECTOR_VALUETYPE, e = MVT::LAST_VECTOR_VALUETYPE; i < e; ++i) {
+ EVT FixedVT = (MVT::SimpleValueType)i;
+ EVT FixedEltVT = FixedVT.getVectorElementType();
+ if (TLI.isTypeLegal(FixedVT) &&
+ FixedVT.getSizeInBits() == VT.getSizeInBits() &&
+ FixedEltVT == InEltVT) {
+ assert(FixedVT.getVectorNumElements() >= VT.getVectorNumElements() &&
+ "Not enough elements in the fixed type for the operand!");
+ assert(FixedVT.getVectorNumElements() != InVT.getVectorNumElements() &&
+ "We can't have the same type as we started with!");
+ if (FixedVT.getVectorNumElements() > InVT.getVectorNumElements())
+ InOp = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, FixedVT,
+ DAG.getUNDEF(FixedVT), InOp,
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
+ else
+ InOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, FixedVT, InOp,
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
+ break;
+ }
+ }
+ InVT = InOp.getValueType();
+ if (InVT.getSizeInBits() != VT.getSizeInBits())
+ // We couldn't find a legal vector type that was a widening of the input
+ // and could be extended in-register to the result type, so we have to
+ // scalarize.
+ return WidenVecOp_Convert(N);
+ }
+
+ // Use special DAG nodes to represent the operation of extending the
+ // low lanes.
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Extend legalization on on extend operation!");
+ case ISD::ANY_EXTEND:
+ return DAG.getAnyExtendVectorInReg(InOp, DL, VT);
+ case ISD::SIGN_EXTEND:
+ return DAG.getSignExtendVectorInReg(InOp, DL, VT);
+ case ISD::ZERO_EXTEND:
+ return DAG.getZeroExtendVectorInReg(InOp, DL, VT);
+ }
+}
+
SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
// Since the result is legal and the input is illegal, it is unlikely
// that we can fix the input to a legal type so unroll the convert
@@ -2396,7 +2514,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
DAG.getConstant(i, TLI.getVectorIdxTy())));
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
@@ -2445,7 +2563,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
DAG.getConstant(j, TLI.getVectorIdxTy()));
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
@@ -2474,8 +2592,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
if (StChain.size() == 1)
return StChain[0];
else
- return DAG.getNode(ISD::TokenFactor, SDLoc(ST),
- MVT::Other,&StChain[0],StChain.size());
+ return DAG.getNode(ISD::TokenFactor, SDLoc(ST), MVT::Other, StChain);
}
SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
@@ -2650,8 +2767,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
ConcatOps[0] = LdOp;
for (unsigned i = 1; i != NumConcat; ++i)
ConcatOps[i] = UndefVal;
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &ConcatOps[0],
- NumConcat);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps);
}
// Load vector by using multiple loads from largest vector to scalar
@@ -2685,8 +2801,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
Loads.push_back(DAG.getUNDEF(L->getValueType(0)));
size += L->getValueSizeInBits(0);
}
- L = DAG.getNode(ISD::CONCAT_VECTORS, dl, LdOp->getValueType(0),
- &Loads[0], Loads.size());
+ L = DAG.getNode(ISD::CONCAT_VECTORS, dl, LdOp->getValueType(0), Loads);
}
} else {
L = DAG.getLoad(NewVT, dl, Chain, BasePtr,
@@ -2730,7 +2845,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
if (NewLdTy != LdTy) {
// Create a larger vector
ConcatOps[End-1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewLdTy,
- &ConcatOps[Idx], End - Idx);
+ makeArrayRef(&ConcatOps[Idx], End - Idx));
Idx = End - 1;
LdTy = NewLdTy;
}
@@ -2739,7 +2854,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
if (WidenWidth == LdTy.getSizeInBits()*(End - Idx))
return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
- &ConcatOps[Idx], End - Idx);
+ makeArrayRef(&ConcatOps[Idx], End - Idx));
// We need to fill the rest with undefs to build the vector
unsigned NumOps = WidenWidth / LdTy.getSizeInBits();
@@ -2752,7 +2867,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
for (; i != NumOps; ++i)
WidenOps[i] = UndefVal;
}
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &WidenOps[0],NumOps);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, WidenOps);
}
SDValue
@@ -2803,7 +2918,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
for (; i != WidenNumElts; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, Ops);
}
@@ -2949,7 +3064,7 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, &Ops[0], NumConcat);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, Ops);
}
if (WidenNumElts < InNumElts && InNumElts % WidenNumElts)
@@ -2968,5 +3083,5 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
SDValue UndefVal = DAG.getUNDEF(EltVT);
for ( ; Idx < WidenNumElts; ++Idx)
Ops[Idx] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &Ops[0], WidenNumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Ops);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
index 1dd2128..624003f 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
@@ -19,7 +19,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "scheduler"
#include "llvm/CodeGen/ResourcePriorityQueue.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
@@ -31,6 +30,8 @@
using namespace llvm;
+#define DEBUG_TYPE "scheduler"
+
static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden,
cl::ZeroOrMore, cl::init(false),
cl::desc("Disable use of DFA during scheduling"));
@@ -49,7 +50,7 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) :
TLI = IS->getTargetLowering();
const TargetMachine &tm = (*IS->MF).getTarget();
- ResourcesModel = tm.getInstrInfo()->CreateTargetScheduleState(&tm,NULL);
+ ResourcesModel = tm.getInstrInfo()->CreateTargetScheduleState(&tm,nullptr);
// This hard requirement could be relaxed, but for now
// do not let it procede.
assert (ResourcesModel && "Unimplemented CreateTargetScheduleState.");
@@ -214,7 +215,7 @@ bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
/// of SU, return it, otherwise return null.
SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
- SUnit *OnlyAvailablePred = 0;
+ SUnit *OnlyAvailablePred = nullptr;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit &Pred = *I->getSUnit();
@@ -222,7 +223,7 @@ SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
- return 0;
+ return nullptr;
OnlyAvailablePred = &Pred;
}
}
@@ -441,7 +442,7 @@ signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
ResCount -= (regPressureDelta(SU) * ScaleTwo);
}
- // These are platform specific things.
+ // These are platform-specific things.
// Will need to go into the back end
// and accessed from here via a hook.
for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
@@ -581,7 +582,7 @@ void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) {
if (SU->isAvailable) return; // All preds scheduled.
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
- if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable)
+ if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable)
return;
// Okay, we found a single predecessor that is available, but not scheduled.
@@ -598,12 +599,12 @@ void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) {
/// to be placed in scheduling sequence.
SUnit *ResourcePriorityQueue::pop() {
if (empty())
- return 0;
+ return nullptr;
std::vector<SUnit *>::iterator Best = Queue.begin();
if (!DisableDFASched) {
signed BestCost = SUSchedulingCost(*Best);
- for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I) {
if (SUSchedulingCost(*I) > BestCost) {
@@ -614,14 +615,14 @@ SUnit *ResourcePriorityQueue::pop() {
}
// Use default TD scheduling mechanism.
else {
- for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
}
SUnit *V = *Best;
- if (Best != prior(Queue.end()))
+ if (Best != std::prev(Queue.end()))
std::swap(*Best, Queue.back());
Queue.pop_back();
@@ -633,7 +634,7 @@ SUnit *ResourcePriorityQueue::pop() {
void ResourcePriorityQueue::remove(SUnit *SU) {
assert(!Queue.empty() && "Queue is empty!");
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
- if (I != prior(Queue.end()))
+ if (I != std::prev(Queue.end()))
std::swap(*I, Queue.back());
Queue.pop_back();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
index 4af7172..ee54292 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
@@ -15,8 +15,8 @@
#define LLVM_CODEGEN_SDNODEDBGVALUE_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/DebugLoc.h"
namespace llvm {
@@ -45,14 +45,17 @@ private:
unsigned FrameIx; // valid for stack objects
} u;
MDNode *mdPtr;
+ bool IsIndirect;
uint64_t Offset;
DebugLoc DL;
unsigned Order;
bool Invalid;
public:
// Constructor for non-constants.
- SDDbgValue(MDNode *mdP, SDNode *N, unsigned R, uint64_t off, DebugLoc dl,
- unsigned O) : mdPtr(mdP), Offset(off), DL(dl), Order(O),
+ SDDbgValue(MDNode *mdP, SDNode *N, unsigned R,
+ bool indir, uint64_t off, DebugLoc dl,
+ unsigned O) : mdPtr(mdP), IsIndirect(indir),
+ Offset(off), DL(dl), Order(O),
Invalid(false) {
kind = SDNODE;
u.s.Node = N;
@@ -62,14 +65,16 @@ public:
// Constructor for constants.
SDDbgValue(MDNode *mdP, const Value *C, uint64_t off, DebugLoc dl,
unsigned O) :
- mdPtr(mdP), Offset(off), DL(dl), Order(O), Invalid(false) {
+ mdPtr(mdP), IsIndirect(false), Offset(off), DL(dl), Order(O),
+ Invalid(false) {
kind = CONST;
u.Const = C;
}
// Constructor for frame indices.
SDDbgValue(MDNode *mdP, unsigned FI, uint64_t off, DebugLoc dl, unsigned O) :
- mdPtr(mdP), Offset(off), DL(dl), Order(O), Invalid(false) {
+ mdPtr(mdP), IsIndirect(false), Offset(off), DL(dl), Order(O),
+ Invalid(false) {
kind = FRAMEIX;
u.FrameIx = FI;
}
@@ -92,6 +97,9 @@ public:
// Returns the FrameIx for a stack object
unsigned getFrameIx() { assert (kind==FRAMEIX); return u.FrameIx; }
+ // Returns whether this is an indirect value.
+ bool isIndirect() { return IsIndirect; }
+
// Returns the offset.
uint64_t getOffset() { return Offset; }
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index 6c5e0ab..4d8c2c7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pre-RA-sched"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "InstrEmitter.h"
#include "ScheduleDAGSDNodes.h"
@@ -28,6 +27,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "pre-RA-sched"
+
STATISTIC(NumUnfolds, "Number of nodes unfolded");
STATISTIC(NumDups, "Number of duplicated nodes");
STATISTIC(NumPRCopies, "Number of physical copies");
@@ -54,7 +55,7 @@ namespace {
}
SUnit *pop() {
- if (empty()) return NULL;
+ if (empty()) return nullptr;
SUnit *V = Queue.back();
Queue.pop_back();
return V;
@@ -80,7 +81,7 @@ public:
ScheduleDAGFast(MachineFunction &mf)
: ScheduleDAGSDNodes(mf) {}
- void Schedule();
+ void Schedule() override;
/// AddPred - adds a predecessor edge to SUnit SU.
/// This returns true if this is a new predecessor.
@@ -107,7 +108,7 @@ private:
void ListScheduleBottomUp();
/// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
- bool forceUnitLatencies() const { return true; }
+ bool forceUnitLatencies() const override { return true; }
};
} // end anonymous namespace
@@ -117,11 +118,11 @@ void ScheduleDAGFast::Schedule() {
DEBUG(dbgs() << "********** List Scheduling **********\n");
NumLiveRegs = 0;
- LiveRegDefs.resize(TRI->getNumRegs(), NULL);
+ LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
LiveRegCycles.resize(TRI->getNumRegs(), 0);
// Build the scheduling graph.
- BuildSchedGraph(NULL);
+ BuildSchedGraph(nullptr);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
@@ -144,7 +145,7 @@ void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
#endif
--PredSU->NumSuccsLeft;
@@ -198,7 +199,7 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
assert(LiveRegDefs[I->getReg()] == SU &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = NULL;
+ LiveRegDefs[I->getReg()] = nullptr;
LiveRegCycles[I->getReg()] = 0;
}
}
@@ -211,18 +212,18 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
/// successors to the newly created node.
SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
if (SU->getNode()->getGluedNode())
- return NULL;
+ return nullptr;
SDNode *N = SU->getNode();
if (!N)
- return NULL;
+ return nullptr;
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT == MVT::Glue)
- return NULL;
+ return nullptr;
else if (VT == MVT::Other)
TryUnfold = true;
}
@@ -230,13 +231,13 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
const SDValue &Op = N->getOperand(i);
EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == MVT::Glue)
- return NULL;
+ return nullptr;
}
if (TryUnfold) {
SmallVector<SDNode*, 2> NewNodes;
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
- return NULL;
+ return nullptr;
DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
@@ -388,11 +389,11 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
SmallVectorImpl<SUnit*> &Copies) {
- SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
+ SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
- SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(NULL));
+ SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC;
@@ -583,7 +584,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
// and it is expensive.
// If cross copy register class is null, then it's not possible to copy
// the value at all.
- SUnit *NewDef = 0;
+ SUnit *NewDef = nullptr;
if (DestRC != RC) {
NewDef = CopyAndMoveSuccessors(LRDef);
if (!DestRC && !NewDef)
@@ -646,9 +647,10 @@ class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
public:
ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
- void Schedule();
+ void Schedule() override;
- MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos);
+ MachineBasicBlock *
+ EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
private:
std::vector<SDNode*> Sequence;
@@ -660,7 +662,7 @@ private:
void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
if (N->getNodeId() != 0)
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
if (!N->isMachineOpcode() &&
(N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
@@ -673,7 +675,7 @@ void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
unsigned NumOps = N->getNumOperands();
if (unsigned NumLeft = NumOps) {
- SDNode *GluedOpN = 0;
+ SDNode *GluedOpN = nullptr;
do {
const SDValue &Op = N->getOperand(NumLeft-1);
SDNode *OpN = Op.getNode();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 1a562d7..dedca41 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pre-RA-sched"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/STLExtras.h"
@@ -36,6 +35,8 @@
#include <climits>
using namespace llvm;
+#define DEBUG_TYPE "pre-RA-sched"
+
STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
STATISTIC(NumUnfolds, "Number of nodes unfolded");
STATISTIC(NumDups, "Number of duplicated nodes");
@@ -163,13 +164,14 @@ public:
CodeGenOpt::Level OptLevel)
: ScheduleDAGSDNodes(mf),
NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
- Topo(SUnits, NULL) {
+ Topo(SUnits, nullptr) {
const TargetMachine &tm = mf.getTarget();
if (DisableSchedCycles || !NeedLatency)
HazardRec = new ScheduleHazardRecognizer();
else
- HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
+ HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(
+ tm.getSubtargetImpl(), this);
}
~ScheduleDAGRRList() {
@@ -177,7 +179,7 @@ public:
delete AvailableQueue;
}
- void Schedule();
+ void Schedule() override;
ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
@@ -261,7 +263,7 @@ private:
/// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
/// need actual latency information but the hybrid scheduler does.
- bool forceUnitLatencies() const {
+ bool forceUnitLatencies() const override {
return !NeedLatency;
}
};
@@ -327,13 +329,13 @@ void ScheduleDAGRRList::Schedule() {
NumLiveRegs = 0;
// Allocate slots for each physical register, plus one for a special register
// to track the virtual resource of a calling sequence.
- LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
- LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
+ LiveRegDefs.resize(TRI->getNumRegs() + 1, nullptr);
+ LiveRegGens.resize(TRI->getNumRegs() + 1, nullptr);
CallSeqEndForStart.clear();
assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
// Build the scheduling graph.
- BuildSchedGraph(NULL);
+ BuildSchedGraph(nullptr);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
@@ -369,7 +371,7 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
#endif
--PredSU->NumSuccsLeft;
@@ -461,7 +463,7 @@ FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
// to get to the CALLSEQ_BEGIN, but we need to find the path with the
// most nesting in order to ensure that we find the corresponding match.
if (N->getOpcode() == ISD::TokenFactor) {
- SDNode *Best = 0;
+ SDNode *Best = nullptr;
unsigned BestMaxNest = MaxNest;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
unsigned MyNestLevel = NestLevel;
@@ -497,10 +499,10 @@ FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
N = N->getOperand(i).getNode();
goto found_chain_operand;
}
- return 0;
+ return nullptr;
found_chain_operand:;
if (N->getOpcode() == ISD::EntryToken)
- return 0;
+ return nullptr;
}
}
@@ -742,8 +744,8 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = NULL;
- LiveRegGens[I->getReg()] = NULL;
+ LiveRegDefs[I->getReg()] = nullptr;
+ LiveRegGens[I->getReg()] = nullptr;
releaseInterferences(I->getReg());
}
}
@@ -757,8 +759,8 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
- LiveRegDefs[CallResource] = NULL;
- LiveRegGens[CallResource] = NULL;
+ LiveRegDefs[CallResource] = nullptr;
+ LiveRegGens[CallResource] = nullptr;
releaseInterferences(CallResource);
}
}
@@ -813,8 +815,8 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = NULL;
- LiveRegGens[I->getReg()] = NULL;
+ LiveRegDefs[I->getReg()] = nullptr;
+ LiveRegGens[I->getReg()] = nullptr;
releaseInterferences(I->getReg());
}
}
@@ -841,8 +843,8 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
- LiveRegDefs[CallResource] = NULL;
- LiveRegGens[CallResource] = NULL;
+ LiveRegDefs[CallResource] = nullptr;
+ LiveRegGens[CallResource] = nullptr;
releaseInterferences(CallResource);
}
}
@@ -855,7 +857,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
// This becomes the nearest def. Note that an earlier def may still be
// pending if this is a two-address node.
LiveRegDefs[I->getReg()] = SU;
- if (LiveRegGens[I->getReg()] == NULL ||
+ if (LiveRegGens[I->getReg()] == nullptr ||
I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
LiveRegGens[I->getReg()] = I->getSUnit();
}
@@ -936,17 +938,17 @@ static bool isOperandOf(const SUnit *SU, SDNode *N) {
SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
SDNode *N = SU->getNode();
if (!N)
- return NULL;
+ return nullptr;
if (SU->getNode()->getGluedNode())
- return NULL;
+ return nullptr;
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT == MVT::Glue)
- return NULL;
+ return nullptr;
else if (VT == MVT::Other)
TryUnfold = true;
}
@@ -954,18 +956,18 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
const SDValue &Op = N->getOperand(i);
EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == MVT::Glue)
- return NULL;
+ return nullptr;
}
if (TryUnfold) {
SmallVector<SDNode*, 2> NewNodes;
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
- return NULL;
+ return nullptr;
// unfolding an x86 DEC64m operation results in store, dec, load which
// can't be handled here so quit
if (NewNodes.size() == 3)
- return NULL;
+ return nullptr;
DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
@@ -1136,11 +1138,11 @@ void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
SmallVectorImpl<SUnit*> &Copies) {
- SUnit *CopyFromSU = CreateNewSUnit(NULL);
+ SUnit *CopyFromSU = CreateNewSUnit(nullptr);
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
- SUnit *CopyToSU = CreateNewSUnit(NULL);
+ SUnit *CopyToSU = CreateNewSUnit(nullptr);
CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC;
@@ -1244,7 +1246,7 @@ static const uint32_t *getNodeRegMask(const SDNode *N) {
if (const RegisterMaskSDNode *Op =
dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
return Op->getRegMask();
- return NULL;
+ return nullptr;
}
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
@@ -1355,7 +1357,7 @@ void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
/// (2) No Hazards: resources are available
/// (3) No Interferences: may unschedule to break register interferences.
SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
- SUnit *CurSU = AvailableQueue->empty() ? 0 : AvailableQueue->pop();
+ SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
while (CurSU) {
SmallVector<unsigned, 4> LRegs;
if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
@@ -1371,7 +1373,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
Interferences.push_back(CurSU);
}
else {
- assert(CurSU->isPending && "Intereferences are pending");
+ assert(CurSU->isPending && "Interferences are pending");
// Update the interference with current live regs.
LRegsPair.first->second = LRegs;
}
@@ -1389,7 +1391,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// Try unscheduling up to the point where it's safe to schedule
// this node.
- SUnit *BtSU = NULL;
+ SUnit *BtSU = nullptr;
unsigned LiveCycle = UINT_MAX;
for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
unsigned Reg = LRegs[j];
@@ -1449,7 +1451,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// expensive.
// If cross copy register class is null, then it's not possible to copy
// the value at all.
- SUnit *NewDef = 0;
+ SUnit *NewDef = nullptr;
if (DestRC != RC) {
NewDef = CopyAndMoveSuccessors(LRDef);
if (!DestRC && !NewDef)
@@ -1539,7 +1541,6 @@ template<class SF>
struct reverse_sort : public queue_sort {
SF &SortFunc;
reverse_sort(SF &sf) : SortFunc(sf) {}
- reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
bool operator()(SUnit* left, SUnit* right) const {
// reverse left/right rather than simply !SortFunc(left, right)
@@ -1559,7 +1560,6 @@ struct bu_ls_rr_sort : public queue_sort {
RegReductionPQBase *SPQ;
bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
- bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
bool operator()(SUnit* left, SUnit* right) const;
};
@@ -1574,8 +1574,6 @@ struct src_ls_rr_sort : public queue_sort {
RegReductionPQBase *SPQ;
src_ls_rr_sort(RegReductionPQBase *spq)
: SPQ(spq) {}
- src_ls_rr_sort(const src_ls_rr_sort &RHS)
- : SPQ(RHS.SPQ) {}
bool operator()(SUnit* left, SUnit* right) const;
};
@@ -1590,8 +1588,6 @@ struct hybrid_ls_rr_sort : public queue_sort {
RegReductionPQBase *SPQ;
hybrid_ls_rr_sort(RegReductionPQBase *spq)
: SPQ(spq) {}
- hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
- : SPQ(RHS.SPQ) {}
bool isReady(SUnit *SU, unsigned CurCycle) const;
@@ -1609,8 +1605,6 @@ struct ilp_ls_rr_sort : public queue_sort {
RegReductionPQBase *SPQ;
ilp_ls_rr_sort(RegReductionPQBase *spq)
: SPQ(spq) {}
- ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
- : SPQ(RHS.SPQ) {}
bool isReady(SUnit *SU, unsigned CurCycle) const;
@@ -1654,7 +1648,7 @@ public:
const TargetLowering *tli)
: SchedulingPriorityQueue(hasReadyFilter),
CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
- MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
+ MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
if (TracksRegPressure) {
unsigned NumRC = TRI->getNumRegClasses();
RegLimit.resize(NumRC);
@@ -1675,14 +1669,14 @@ public:
return scheduleDAG->getHazardRec();
}
- void initNodes(std::vector<SUnit> &sunits);
+ void initNodes(std::vector<SUnit> &sunits) override;
- void addNode(const SUnit *SU);
+ void addNode(const SUnit *SU) override;
- void updateNode(const SUnit *SU);
+ void updateNode(const SUnit *SU) override;
- void releaseState() {
- SUnits = 0;
+ void releaseState() override {
+ SUnits = nullptr;
SethiUllmanNumbers.clear();
std::fill(RegPressure.begin(), RegPressure.end(), 0);
}
@@ -1695,26 +1689,26 @@ public:
return SU->getNode()->getIROrder();
}
- bool empty() const { return Queue.empty(); }
+ bool empty() const override { return Queue.empty(); }
- void push(SUnit *U) {
+ void push(SUnit *U) override {
assert(!U->NodeQueueId && "Node in the queue already");
U->NodeQueueId = ++CurQueueId;
Queue.push_back(U);
}
- void remove(SUnit *SU) {
+ void remove(SUnit *SU) override {
assert(!Queue.empty() && "Queue is empty!");
assert(SU->NodeQueueId != 0 && "Not in queue!");
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
SU);
- if (I != prior(Queue.end()))
+ if (I != std::prev(Queue.end()))
std::swap(*I, Queue.back());
Queue.pop_back();
SU->NodeQueueId = 0;
}
- bool tracksRegPressure() const { return TracksRegPressure; }
+ bool tracksRegPressure() const override { return TracksRegPressure; }
void dumpRegPressure() const;
@@ -1724,9 +1718,9 @@ public:
int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
- void scheduledNode(SUnit *SU);
+ void scheduledNode(SUnit *SU) override;
- void unscheduledNode(SUnit *SU);
+ void unscheduledNode(SUnit *SU) override;
protected:
bool canClobber(const SUnit *SU, const SUnit *Op);
@@ -1738,12 +1732,12 @@ protected:
template<class SF>
static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
std::vector<SUnit *>::iterator Best = Q.begin();
- for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
+ for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
E = Q.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
SUnit *V = *Best;
- if (Best != prior(Q.end()))
+ if (Best != std::prev(Q.end()))
std::swap(*Best, Q.back());
Q.pop_back();
return V;
@@ -1776,14 +1770,14 @@ public:
tii, tri, tli),
Picker(this) {}
- bool isBottomUp() const { return SF::IsBottomUp; }
+ bool isBottomUp() const override { return SF::IsBottomUp; }
- bool isReady(SUnit *U) const {
+ bool isReady(SUnit *U) const override {
return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
}
- SUnit *pop() {
- if (Queue.empty()) return NULL;
+ SUnit *pop() override {
+ if (Queue.empty()) return nullptr;
SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
V->NodeQueueId = 0;
@@ -1791,7 +1785,7 @@ public:
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- void dump(ScheduleDAG *DAG) const {
+ void dump(ScheduleDAG *DAG) const override {
// Emulate pop() without clobbering NodeQueueIds.
std::vector<SUnit*> DumpQueue = Queue;
SF DumpPicker = Picker;
@@ -2832,7 +2826,7 @@ void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
continue;
// Locate the single data predecessor.
- SUnit *PredSU = 0;
+ SUnit *PredSU = nullptr;
for (SUnit::const_pred_iterator II = SU->Preds.begin(),
EE = SU->Preds.end(); II != EE; ++II)
if (!II->isCtrl()) {
@@ -2988,7 +2982,7 @@ llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
BURegReductionPriorityQueue *PQ =
- new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0);
+ new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
@@ -3002,7 +2996,7 @@ llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
SrcRegReductionPriorityQueue *PQ =
- new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0);
+ new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index c1893c9..de910b7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pre-RA-sched"
#include "ScheduleDAGSDNodes.h"
#include "InstrEmitter.h"
#include "SDNodeDbgValue.h"
@@ -35,6 +34,8 @@
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "pre-RA-sched"
+
STATISTIC(LoadsClustered, "Number of loads clustered together");
// This allows latency based scheduler to notice high latency instructions
@@ -46,7 +47,7 @@ static cl::opt<int> HighLatencyCycles(
"instructions take for targets with no itinerary"));
ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
- : ScheduleDAG(mf), BB(0), DAG(0),
+ : ScheduleDAG(mf), BB(nullptr), DAG(nullptr),
InstrItins(mf.getTarget().getInstrItineraryData()) {}
/// Run - perform scheduling.
@@ -67,12 +68,12 @@ void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
///
SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
#ifndef NDEBUG
- const SUnit *Addr = 0;
+ const SUnit *Addr = nullptr;
if (!SUnits.empty())
Addr = &SUnits[0];
#endif
SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
- assert((Addr == 0 || Addr == &SUnits[0]) &&
+ assert((Addr == nullptr || Addr == &SUnits[0]) &&
"SUnits std::vector reallocated on the fly!");
SUnits.back().OrigNode = &SUnits.back();
SUnit *SU = &SUnits.back();
@@ -142,8 +143,8 @@ static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG,
if (ExtraOper.getNode())
Ops.push_back(ExtraOper);
- SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
- MachineSDNode::mmo_iterator Begin = 0, End = 0;
+ SDVTList VTList = DAG->getVTList(VTs);
+ MachineSDNode::mmo_iterator Begin = nullptr, End = nullptr;
MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
// Store memory references.
@@ -152,7 +153,7 @@ static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG,
End = MN->memoperands_end();
}
- DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
+ DAG->MorphNodeTo(N, N->getOpcode(), VTList, Ops);
// Reset the memory references
if (MN)
@@ -205,7 +206,7 @@ static void RemoveUnusedGlue(SDNode *N, SelectionDAG *DAG) {
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
- SDNode *Chain = 0;
+ SDNode *Chain = nullptr;
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
Chain = Node->getOperand(NumOps-1).getNode();
@@ -271,7 +272,7 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
// Cluster loads by adding MVT::Glue outputs and inputs. This also
// ensure they are scheduled in order of increasing addresses.
SDNode *Lead = Loads[0];
- SDValue InGlue = SDValue(0, 0);
+ SDValue InGlue = SDValue(nullptr, 0);
if (AddGlue(Lead, InGlue, true, DAG))
InGlue = SDValue(Lead, Lead->getNumValues() - 1);
for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
@@ -572,7 +573,7 @@ void ScheduleDAGSDNodes::RegDefIter::Advance() {
return; // Found a normal regdef.
}
Node = Node->getGluedNode();
- if (Node == NULL) {
+ if (!Node) {
return; // No values left to visit.
}
InitNodeNumDefs();
@@ -743,13 +744,13 @@ ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() ||
// Fast-isel may have inserted some instructions, in which case the
// BB->back().isPHI() test will not fire when we want it to.
- prior(Emitter.getInsertPos())->isPHI()) {
+ std::prev(Emitter.getInsertPos())->isPHI()) {
// Did not insert any instruction.
- Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
+ Orders.push_back(std::make_pair(Order, (MachineInstr*)nullptr));
return;
}
- Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
+ Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos())));
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 2ff37e0..39ebadf 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -117,13 +117,13 @@ namespace llvm {
virtual MachineBasicBlock*
EmitSchedule(MachineBasicBlock::iterator &InsertPos);
- virtual void dumpNode(const SUnit *SU) const;
+ void dumpNode(const SUnit *SU) const override;
void dumpSchedule() const;
- virtual std::string getGraphNodeLabel(const SUnit *SU) const;
+ std::string getGraphNodeLabel(const SUnit *SU) const override;
- virtual std::string getDAGName() const;
+ std::string getDAGName() const override;
virtual void getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const;
@@ -139,7 +139,7 @@ namespace llvm {
public:
RegDefIter(const SUnit *SU, const ScheduleDAGSDNodes *SD);
- bool IsValid() const { return Node != NULL; }
+ bool IsValid() const { return Node != nullptr; }
MVT GetValue() const {
assert(IsValid() && "bad iterator");
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
index 58aa1fe..4589b0c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
@@ -18,7 +18,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pre-RA-sched"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/Statistic.h"
@@ -35,6 +34,8 @@
#include <climits>
using namespace llvm;
+#define DEBUG_TYPE "pre-RA-sched"
+
STATISTIC(NumNoops , "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
@@ -72,7 +73,8 @@ public:
: ScheduleDAGSDNodes(mf), AvailableQueue(availqueue), AA(aa) {
const TargetMachine &tm = mf.getTarget();
- HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
+ HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(
+ tm.getSubtargetImpl(), this);
}
~ScheduleDAGVLIW() {
@@ -80,7 +82,7 @@ public:
delete AvailableQueue;
}
- void Schedule();
+ void Schedule() override;
private:
void releaseSucc(SUnit *SU, const SDep &D);
@@ -120,7 +122,7 @@ void ScheduleDAGVLIW::releaseSucc(SUnit *SU, const SDep &D) {
dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
#endif
assert(!D.isWeak() && "unexpected artificial DAG edge");
@@ -204,12 +206,12 @@ void ScheduleDAGVLIW::listScheduleTopDown() {
// don't advance the hazard recognizer.
if (AvailableQueue->empty()) {
// Reset DFA state.
- AvailableQueue->scheduledNode(0);
+ AvailableQueue->scheduledNode(nullptr);
++CurCycle;
continue;
}
- SUnit *FoundSUnit = 0;
+ SUnit *FoundSUnit = nullptr;
bool HasNoopHazards = false;
while (!AvailableQueue->empty()) {
@@ -256,7 +258,7 @@ void ScheduleDAGVLIW::listScheduleTopDown() {
// processors without pipeline interlocks and other cases.
DEBUG(dbgs() << "*** Emitting noop\n");
HazardRec->EmitNoop();
- Sequence.push_back(0); // NULL here means noop
+ Sequence.push_back(nullptr); // NULL here means noop
++NumNoops;
++CurCycle;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 00ffe00..16f7349 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -18,17 +18,15 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
@@ -50,6 +48,7 @@
#include "llvm/Target/TargetSelectionDAGInfo.h"
#include <algorithm>
#include <cmath>
+
using namespace llvm;
/// makeVTList - Return an instance of the SDVTList struct initialized with the
@@ -149,33 +148,50 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) {
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
- unsigned i = 0, e = N->getNumOperands();
-
- // Skip over all of the undef values.
- while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
- ++i;
+ bool IsAllUndef = true;
+ for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) {
+ if (N->getOperand(i).getOpcode() == ISD::UNDEF)
+ continue;
+ IsAllUndef = false;
+ // Do not accept build_vectors that aren't all constants or which have non-0
+ // elements. We have to be a bit careful here, as the type of the constant
+ // may not be the same as the type of the vector elements due to type
+ // legalization (the elements are promoted to a legal type for the target
+ // and a vector of a type may be legal when the base element type is not).
+ // We only want to check enough bits to cover the vector elements, because
+ // we care if the resultant vector is all zeros, not whether the individual
+ // constants are.
+ SDValue Zero = N->getOperand(i);
+ unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
+ if (CN->getAPIntValue().countTrailingZeros() < EltSize)
+ return false;
+ } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
+ if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
+ return false;
+ } else
+ return false;
+ }
// Do not accept an all-undef vector.
- if (i == e) return false;
+ if (IsAllUndef)
+ return false;
+ return true;
+}
- // Do not accept build_vectors that aren't all constants or which have non-0
- // elements.
- SDValue Zero = N->getOperand(i);
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
- if (!CN->isNullValue())
- return false;
- } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
- if (!CFPN->getValueAPF().isPosZero())
- return false;
- } else
+/// \brief Return true if the specified node is a BUILD_VECTOR node of
+/// all ConstantSDNode or undef.
+bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
+ if (N->getOpcode() != ISD::BUILD_VECTOR)
return false;
- // Okay, we have at least one 0 value, check to see if the rest match or are
- // undefs.
- for (++i; i != e; ++i)
- if (N->getOperand(i) != Zero &&
- N->getOperand(i).getOpcode() != ISD::UNDEF)
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ SDValue Op = N->getOperand(i);
+ if (Op.getOpcode() == ISD::UNDEF)
+ continue;
+ if (!isa<ConstantSDNode>(Op))
return false;
+ }
return true;
}
@@ -217,6 +233,21 @@ bool ISD::allOperandsUndef(const SDNode *N) {
return true;
}
+ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
+ switch (ExtType) {
+ case ISD::EXTLOAD:
+ return ISD::ANY_EXTEND;
+ case ISD::SEXTLOAD:
+ return ISD::SIGN_EXTEND;
+ case ISD::ZEXTLOAD:
+ return ISD::ZERO_EXTEND;
+ default:
+ break;
+ }
+
+ llvm_unreachable("Invalid LoadExtType");
+}
+
/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
/// when given the operation for (X op Y).
ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
@@ -335,29 +366,42 @@ static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
///
static void AddNodeIDOperands(FoldingSetNodeID &ID,
- const SDValue *Ops, unsigned NumOps) {
- for (; NumOps; --NumOps, ++Ops) {
- ID.AddPointer(Ops->getNode());
- ID.AddInteger(Ops->getResNo());
+ ArrayRef<SDValue> Ops) {
+ for (auto& Op : Ops) {
+ ID.AddPointer(Op.getNode());
+ ID.AddInteger(Op.getResNo());
}
}
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
///
static void AddNodeIDOperands(FoldingSetNodeID &ID,
- const SDUse *Ops, unsigned NumOps) {
- for (; NumOps; --NumOps, ++Ops) {
- ID.AddPointer(Ops->getNode());
- ID.AddInteger(Ops->getResNo());
+ ArrayRef<SDUse> Ops) {
+ for (auto& Op : Ops) {
+ ID.AddPointer(Op.getNode());
+ ID.AddInteger(Op.getResNo());
}
}
-static void AddNodeIDNode(FoldingSetNodeID &ID,
- unsigned short OpC, SDVTList VTList,
- const SDValue *OpList, unsigned N) {
+static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw,
+ bool exact) {
+ ID.AddBoolean(nuw);
+ ID.AddBoolean(nsw);
+ ID.AddBoolean(exact);
+}
+
+/// AddBinaryNodeIDCustom - Add BinarySDNodes special infos
+static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode,
+ bool nuw, bool nsw, bool exact) {
+ if (isBinOpWithFlags(Opcode))
+ AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
+}
+
+static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
+ SDVTList VTList, ArrayRef<SDValue> OpList) {
AddNodeIDOpcode(ID, OpC);
AddNodeIDValueTypes(ID, VTList);
- AddNodeIDOperands(ID, OpList, N);
+ AddNodeIDOperands(ID, OpList);
}
/// AddNodeIDCustom - If this is an SDNode with special info, add this info to
@@ -369,9 +413,12 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
llvm_unreachable("Should only be used on nodes with operands");
default: break; // Normal nodes don't need extra info.
case ISD::TargetConstant:
- case ISD::Constant:
- ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
+ case ISD::Constant: {
+ const ConstantSDNode *C = cast<ConstantSDNode>(N);
+ ID.AddPointer(C->getConstantIntValue());
+ ID.AddBoolean(C->isOpaque());
break;
+ }
case ISD::TargetConstantFP:
case ISD::ConstantFP: {
ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
@@ -442,7 +489,21 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(ST->getPointerInfo().getAddrSpace());
break;
}
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::SRA:
+ case ISD::SRL:
+ case ISD::MUL:
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::SHL: {
+ const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N);
+ AddBinaryNodeIDCustom(ID, N->getOpcode(), BinNode->hasNoUnsignedWrap(),
+ BinNode->hasNoSignedWrap(), BinNode->isExact());
+ break;
+ }
case ISD::ATOMIC_CMP_SWAP:
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
@@ -496,7 +557,7 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
// Add the return value info.
AddNodeIDValueTypes(ID, N->getVTList());
// Add the operand info.
- AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
+ AddNodeIDOperands(ID, N->ops());
// Handle SDNode leafs with special info.
AddNodeIDCustom(ID, N);
@@ -574,7 +635,7 @@ void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
SDNode *N = DeadNodes.pop_back_val();
for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
- DUL->NodeDeleted(N, 0);
+ DUL->NodeDeleted(N, nullptr);
// Take the node out of the appropriate CSE map.
RemoveNodeFromCSEMaps(N);
@@ -660,8 +721,8 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
case ISD::CONDCODE:
assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
"Cond code doesn't exist!");
- Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
- CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
+ Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
+ CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
break;
case ISD::ExternalSymbol:
Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
@@ -678,8 +739,8 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
if (VT.isExtended()) {
Erased = ExtendedValueTypeNodes.erase(VT);
} else {
- Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
- ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
+ Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
+ ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
}
break;
}
@@ -741,11 +802,11 @@ SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
void *&InsertPos) {
if (doNotCSE(N))
- return 0;
+ return nullptr;
SDValue Ops[] = { Op };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
+ AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
AddNodeIDCustom(ID, N);
SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
return Node;
@@ -759,11 +820,11 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
SDValue Op1, SDValue Op2,
void *&InsertPos) {
if (doNotCSE(N))
- return 0;
+ return nullptr;
SDValue Ops[] = { Op1, Op2 };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
+ AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
AddNodeIDCustom(ID, N);
SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
return Node;
@@ -774,14 +835,13 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
/// were replaced with those specified. If this node is never memoized,
/// return null, otherwise return a pointer to the slot it would take. If a
/// node already exists with these operands, the slot will be non-null.
-SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
- const SDValue *Ops,unsigned NumOps,
+SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
void *&InsertPos) {
if (doNotCSE(N))
- return 0;
+ return nullptr;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
+ AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
AddNodeIDCustom(ID, N);
SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
return Node;
@@ -877,18 +937,16 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
// EntryNode could meaningfully have debug info if we can find it...
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
- : TM(tm), TSI(*tm.getSelectionDAGInfo()), TTI(0), TLI(0), OptLevel(OL),
+ : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(nullptr), OptLevel(OL),
EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
- UpdateListeners(0) {
+ UpdateListeners(nullptr) {
AllNodes.push_back(&EntryNode);
DbgInfo = new SDDbgInfo();
}
-void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti,
- const TargetLowering *tli) {
+void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) {
MF = &mf;
- TTI = tti;
TLI = tli;
Context = &mf.getFunction()->getContext();
}
@@ -906,6 +964,25 @@ void SelectionDAG::allnodes_clear() {
DeallocateNode(AllNodes.begin());
}
+BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
+ SDVTList VTs, SDValue N1,
+ SDValue N2, bool nuw, bool nsw,
+ bool exact) {
+ if (isBinOpWithFlags(Opcode)) {
+ BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
+ Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
+ FN->setHasNoUnsignedWrap(nuw);
+ FN->setHasNoSignedWrap(nsw);
+ FN->setIsExact(exact);
+
+ return FN;
+ }
+
+ BinarySDNode *N = new (NodeAllocator)
+ BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
+ return N;
+}
+
void SelectionDAG::clear() {
allnodes_clear();
OperandAllocator.Reset();
@@ -915,11 +992,11 @@ void SelectionDAG::clear() {
ExternalSymbols.clear();
TargetExternalSymbols.clear();
std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
- static_cast<CondCodeSDNode*>(0));
+ static_cast<CondCodeSDNode*>(nullptr));
std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
- static_cast<SDNode*>(0));
+ static_cast<SDNode*>(nullptr));
- EntryNode.UseList = 0;
+ EntryNode.UseList = nullptr;
AllNodes.push_back(&EntryNode);
Root = getEntryNode();
DbgInfo->clear();
@@ -943,6 +1020,15 @@ SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
getNode(ISD::TRUNCATE, DL, VT, Op);
}
+SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
+ EVT OpVT) {
+ if (VT.bitsLE(Op.getValueType()))
+ return getNode(ISD::TRUNCATE, SL, VT, Op);
+
+ TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
+ return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
+}
+
SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
assert(!VT.isVector() &&
"getZeroExtendInReg should use the vector element type instead of "
@@ -955,6 +1041,36 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
getConstant(Imm, Op.getValueType()));
}
+SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
+ assert(VT.isVector() && "This DAG node is restricted to vector types.");
+ assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
+ "The sizes of the input and result must match in order to perform the "
+ "extend in-register.");
+ assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
+ "The destination vector type must have fewer lanes than the input.");
+ return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
+}
+
+SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
+ assert(VT.isVector() && "This DAG node is restricted to vector types.");
+ assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
+ "The sizes of the input and result must match in order to perform the "
+ "extend in-register.");
+ assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
+ "The destination vector type must have fewer lanes than the input.");
+ return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
+}
+
+SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
+ assert(VT.isVector() && "This DAG node is restricted to vector types.");
+ assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
+ "The sizes of the input and result must match in order to perform the "
+ "extend in-register.");
+ assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
+ "The destination vector type must have fewer lanes than the input.");
+ return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
+}
+
/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
///
SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
@@ -964,19 +1080,37 @@ SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
return getNode(ISD::XOR, DL, VT, Val, NegOne);
}
-SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
+SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
+ EVT EltVT = VT.getScalarType();
+ SDValue TrueValue;
+ switch (TLI->getBooleanContents(VT)) {
+ case TargetLowering::ZeroOrOneBooleanContent:
+ case TargetLowering::UndefinedBooleanContent:
+ TrueValue = getConstant(1, VT);
+ break;
+ case TargetLowering::ZeroOrNegativeOneBooleanContent:
+ TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()),
+ VT);
+ break;
+ }
+ return getNode(ISD::XOR, DL, VT, Val, TrueValue);
+}
+
+SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
EVT EltVT = VT.getScalarType();
assert((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
"getConstant with a uint64_t value that doesn't fit in the type!");
- return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
+ return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
}
-SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
- return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
+SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
+{
+ return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
}
-SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
+SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
+ bool isO) {
assert(VT.isInteger() && "Cannot create FP integer constant!");
EVT EltVT = VT.getScalarType();
@@ -1018,7 +1152,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
.trunc(ViaEltSizeInBits),
- ViaEltVT, isT));
+ ViaEltVT, isT, isO));
}
// EltParts is currently in little endian order. If we actually want
@@ -1039,7 +1173,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
- &Ops[0], Ops.size()));
+ Ops));
return Result;
}
@@ -1047,16 +1181,17 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
"APInt size does not match type size!");
unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
ID.AddPointer(Elt);
- void *IP = 0;
- SDNode *N = NULL;
+ ID.AddBoolean(isO);
+ void *IP = nullptr;
+ SDNode *N = nullptr;
if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
if (!VT.isVector())
return SDValue(N, 0);
if (!N) {
- N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT);
+ N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
}
@@ -1065,7 +1200,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
- Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
+ Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
}
return Result;
}
@@ -1089,10 +1224,10 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
// we don't have issues with SNANs.
unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
ID.AddPointer(&V);
- void *IP = 0;
- SDNode *N = NULL;
+ void *IP = nullptr;
+ SDNode *N = nullptr;
if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
if (!VT.isVector())
return SDValue(N, 0);
@@ -1108,7 +1243,7 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
// FIXME SDLoc info might be appropriate here
- Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
+ Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
}
return Result;
}
@@ -1143,26 +1278,19 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
if (BitWidth < 64)
Offset = SignExtend64(Offset, BitWidth);
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
- if (!GVar) {
- // If GV is an alias then use the aliasee for determining thread-localness.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
- }
-
unsigned Opc;
- if (GVar && GVar->isThreadLocal())
+ if (GV->isThreadLocal())
Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
else
Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddPointer(GV);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
ID.AddInteger(GV->getType()->getAddressSpace());
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1177,9 +1305,9 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(FI);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1195,10 +1323,10 @@ SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
"Cannot set target flags on target-independent jump tables");
unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(JTI);
ID.AddInteger(TargetFlags);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1220,12 +1348,12 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(Alignment);
ID.AddInteger(Offset);
ID.AddPointer(C);
ID.AddInteger(TargetFlags);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1248,12 +1376,12 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddInteger(Alignment);
ID.AddInteger(Offset);
C->addSelectionDAGCSEId(ID);
ID.AddInteger(TargetFlags);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1267,11 +1395,11 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
unsigned char TargetFlags) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
ID.AddInteger(Index);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1284,9 +1412,9 @@ SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
+ AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
ID.AddPointer(MBB);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1333,7 +1461,7 @@ SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
if ((unsigned)Cond >= CondCodeNodes.size())
CondCodeNodes.resize(Cond+1);
- if (CondCodeNodes[Cond] == 0) {
+ if (!CondCodeNodes[Cond]) {
CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
CondCodeNodes[Cond] = N;
AllNodes.push_back(N);
@@ -1407,6 +1535,11 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
N1 = getUNDEF(VT);
commuteShuffle(N1, N2, MaskVec);
}
+ // Reset our undef status after accounting for the mask.
+ N2Undef = N2.getOpcode() == ISD::UNDEF;
+ // Re-check whether both sides ended up undef.
+ if (N1.getOpcode() == ISD::UNDEF && N2Undef)
+ return getUNDEF(VT);
// If Identity shuffle return that node.
bool Identity = true;
@@ -1416,13 +1549,45 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
if (Identity && NElts)
return N1;
+ // Shuffling a constant splat doesn't change the result.
+ if (N2Undef) {
+ SDValue V = N1;
+
+ // Look through any bitcasts. We check that these don't change the number
+ // (and size) of elements and just changes their types.
+ while (V.getOpcode() == ISD::BITCAST)
+ V = V->getOperand(0);
+
+ // A splat should always show up as a build vector node.
+ if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
+ BitVector UndefElements;
+ SDValue Splat = BV->getSplatValue(&UndefElements);
+ // If this is a splat of an undef, shuffling it is also undef.
+ if (Splat && Splat.getOpcode() == ISD::UNDEF)
+ return getUNDEF(VT);
+
+ // We only have a splat which can skip shuffles if there is a splatted
+ // value and no undef lanes rearranged by the shuffle.
+ if (Splat && UndefElements.none()) {
+ // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
+ // number of elements match or the value splatted is a zero constant.
+ if (V.getValueType().getVectorNumElements() ==
+ VT.getVectorNumElements())
+ return N1;
+ if (auto *C = dyn_cast<ConstantSDNode>(Splat))
+ if (C->isNullValue())
+ return N1;
+ }
+ }
+ }
+
FoldingSetNodeID ID;
SDValue Ops[2] = { N1, N2 };
- AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
+ AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
for (unsigned i = 0; i != NElts; ++i)
ID.AddInteger(MaskVec[i]);
- void* IP = 0;
+ void* IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1441,6 +1606,27 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
return SDValue(N, 0);
}
+SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
+ MVT VT = SV.getSimpleValueType(0);
+ unsigned NumElems = VT.getVectorNumElements();
+ SmallVector<int, 8> MaskVec;
+
+ for (unsigned i = 0; i != NumElems; ++i) {
+ int Idx = SV.getMaskElt(i);
+ if (Idx >= 0) {
+ if (Idx < (int)NumElems)
+ Idx += NumElems;
+ else
+ Idx -= NumElems;
+ }
+ MaskVec.push_back(Idx);
+ }
+
+ SDValue Op0 = SV.getOperand(0);
+ SDValue Op1 = SV.getOperand(1);
+ return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]);
+}
+
SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
SDValue Val, SDValue DTy,
SDValue STy, SDValue Rnd, SDValue Sat,
@@ -1453,14 +1639,14 @@ SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
FoldingSetNodeID ID;
SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
- AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
- void* IP = 0;
+ AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
+ void* IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
dl.getDebugLoc(),
- Ops, 5, Code);
+ Ops, Code);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1468,9 +1654,9 @@ SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
ID.AddInteger(RegNo);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1482,9 +1668,9 @@ SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
+ AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
ID.AddPointer(RegMask);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1497,9 +1683,9 @@ SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
FoldingSetNodeID ID;
SDValue Ops[] = { Root };
- AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
+ AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
ID.AddPointer(Label);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1518,11 +1704,11 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
+ AddNodeIDNode(ID, Opc, getVTList(VT), None);
ID.AddPointer(BA);
ID.AddInteger(Offset);
ID.AddInteger(TargetFlags);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1538,10 +1724,10 @@ SDValue SelectionDAG::getSrcValue(const Value *V) {
"SrcValue is not a pointer?");
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
+ AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
ID.AddPointer(V);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1554,10 +1740,10 @@ SDValue SelectionDAG::getSrcValue(const Value *V) {
/// getMDNode - Return an MDNodeSDNode which holds an MDNode.
SDValue SelectionDAG::getMDNode(const MDNode *MD) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
+ AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
ID.AddPointer(MD);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1572,11 +1758,11 @@ SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
unsigned SrcAS, unsigned DestAS) {
SDValue Ops[] = {Ptr};
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1);
+ AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
ID.AddInteger(SrcAS);
ID.AddInteger(DestAS);
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -1640,7 +1826,8 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
case ISD::SETTRUE:
case ISD::SETTRUE2: {
const TargetLowering *TLI = TM.getTargetLowering();
- TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
+ TargetLowering::BooleanContent Cnt =
+ TLI->getBooleanContents(N1->getValueType(0));
return getConstant(
Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
}
@@ -1755,17 +1942,14 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
unsigned Depth) const {
APInt KnownZero, KnownOne;
- ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op, KnownZero, KnownOne, Depth);
return (KnownZero & Mask) == Mask;
}
-/// ComputeMaskedBits - Determine which of the bits specified in Mask are
-/// known to be either zero or one and return them in the KnownZero/KnownOne
-/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
-/// processing.
-void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
- APInt &KnownOne, unsigned Depth) const {
+/// Determine which bits of Op are known to be either zero or one and return
+/// them in the KnownZero/KnownOne bitsets.
+void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
+ APInt &KnownOne, unsigned Depth) const {
const TargetLowering *TLI = TM.getTargetLowering();
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
@@ -1780,48 +1964,40 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
// We know all of the bits for a constant!
KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
KnownZero = ~KnownOne;
- return;
+ break;
case ISD::AND:
// If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Output known-1 bits are only known if set in both the LHS & RHS.
KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
KnownZero |= KnownZero2;
- return;
+ break;
case ISD::OR:
- ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Output known-0 bits are only known if clear in both the LHS & RHS.
KnownZero &= KnownZero2;
// Output known-1 are known to be set if set in either the LHS | RHS.
KnownOne |= KnownOne2;
- return;
+ break;
case ISD::XOR: {
- ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Output known-0 bits are known if clear or set in both the LHS & RHS.
APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
KnownZero = KnownZeroOut;
- return;
+ break;
}
case ISD::MUL: {
- ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// If low bits are zero in either operand, output low known-0 bits.
// Also compute a conserative estimate for high known-0 bits.
@@ -1838,46 +2014,42 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
LeadZ = std::min(LeadZ, BitWidth);
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
APInt::getHighBitsSet(BitWidth, LeadZ);
- return;
+ break;
}
case ISD::UDIV: {
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
- ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
KnownOne2.clearAllBits();
KnownZero2.clearAllBits();
- ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
+ computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
- return;
+ break;
}
case ISD::SELECT:
- ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
- return;
+ break;
case ISD::SELECT_CC:
- ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
- return;
+ break;
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
@@ -1885,14 +2057,23 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
case ISD::SMULO:
case ISD::UMULO:
if (Op.getResNo() != 1)
- return;
- // The boolean result conforms to getBooleanContents. Fall through.
+ break;
+ // The boolean result conforms to getBooleanContents.
+ // If we know the result of a setcc has the top bits zero, use this info.
+ // We know that we have an integer-based boolean since these operations
+ // are only available for integer.
+ if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
+ TargetLowering::ZeroOrOneBooleanContent &&
+ BitWidth > 1)
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
+ break;
case ISD::SETCC:
// If we know the result of a setcc has the top bits zero, use this info.
- if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
- TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
+ if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
+ TargetLowering::ZeroOrOneBooleanContent &&
+ BitWidth > 1)
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
- return;
+ break;
case ISD::SHL:
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
@@ -1900,16 +2081,15 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
- return;
+ break;
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero <<= ShAmt;
KnownOne <<= ShAmt;
// low bits known zero.
KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
}
- return;
+ break;
case ISD::SRL:
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
@@ -1917,31 +2097,29 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
- return;
+ break;
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
KnownZero |= HighBits; // High bits known zero.
}
- return;
+ break;
case ISD::SRA:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
- return;
+ break;
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
@@ -1955,7 +2133,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
KnownOne |= HighBits; // New bits are known one.
}
}
- return;
+ break;
case ISD::SIGN_EXTEND_INREG: {
EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
unsigned EBits = EVT.getScalarType().getSizeInBits();
@@ -1973,10 +2151,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
if (NewBits.getBoolValue())
InputDemandedBits |= InSignBit;
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownOne &= InputDemandedBits;
KnownZero &= InputDemandedBits;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
@@ -1990,7 +2167,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
KnownZero &= ~NewBits;
KnownOne &= ~NewBits;
}
- return;
+ break;
}
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
@@ -2000,7 +2177,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
unsigned LowBits = Log2_32(BitWidth)+1;
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
KnownOne.clearAllBits();
- return;
+ break;
}
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Op);
@@ -2010,9 +2187,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
unsigned MemBits = VT.getScalarType().getSizeInBits();
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
} else if (const MDNode *Ranges = LD->getRanges()) {
- computeMaskedBitsLoad(*Ranges, KnownZero);
+ computeKnownBitsFromRangeMetadata(*Ranges, KnownZero);
}
- return;
+ break;
}
case ISD::ZERO_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
@@ -2020,11 +2197,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
KnownZero |= NewBits;
- return;
+ break;
}
case ISD::SIGN_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
@@ -2033,13 +2210,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
// Note if the sign bit is known to be zero or one.
bool SignBitKnownZero = KnownZero.isNegative();
bool SignBitKnownOne = KnownOne.isNegative();
- assert(!(SignBitKnownZero && SignBitKnownOne) &&
- "Sign bit can't be known to be both zero and one!");
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
@@ -2049,25 +2224,24 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
KnownZero |= NewBits;
else if (SignBitKnownOne)
KnownOne |= NewBits;
- return;
+ break;
}
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
- return;
+ break;
}
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
KnownZero = KnownZero.zext(InBits);
KnownOne = KnownOne.zext(InBits);
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.trunc(BitWidth);
KnownOne = KnownOne.trunc(BitWidth);
break;
@@ -2075,15 +2249,15 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
case ISD::AssertZext: {
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero |= (~InMask);
KnownOne &= (~KnownZero);
- return;
+ break;
}
case ISD::FGETSIGN:
// All bits are zero except the low bit.
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
- return;
+ break;
case ISD::SUB: {
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
@@ -2094,7 +2268,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
// NLZ can't be BitWidth with no sign bit
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
- ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
+ computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
// If all of the MaskV bits are known to be zero, then we know the
// output top bits are zero, because we now know that the output is
@@ -2113,18 +2287,16 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
- ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
- ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
KnownZeroOut = std::min(KnownZeroOut,
KnownZero2.countTrailingOnes());
if (Op.getOpcode() == ISD::ADD) {
KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
- return;
+ break;
}
// With ADDE, a carry bit may be added in, so we can only use this
@@ -2133,14 +2305,14 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
// are known zero.
if (KnownZeroOut >= 2) // ADDE
KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
- return;
+ break;
}
case ISD::SREM:
if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
const APInt &RA = Rem->getAPIntValue().abs();
if (RA.isPowerOf2()) {
APInt LowBits = RA - 1;
- ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
// The low bits of the first operand are unchanged by the srem.
KnownZero = KnownZero2 & LowBits;
@@ -2158,36 +2330,38 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
}
}
- return;
+ break;
case ISD::UREM: {
if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
const APInt &RA = Rem->getAPIntValue();
if (RA.isPowerOf2()) {
APInt LowBits = (RA - 1);
- KnownZero |= ~LowBits;
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
- assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
+
+ // The upper bits are all zero, the lower ones are unchanged.
+ KnownZero = KnownZero2 | ~LowBits;
+ KnownOne = KnownOne2 & LowBits;
break;
}
}
// Since the result is less than or equal to either operand, any leading
// zero bits in either operand must also exist in the result.
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
KnownOne.clearAllBits();
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
- return;
+ break;
}
case ISD::FrameIndex:
case ISD::TargetFrameIndex:
if (unsigned Align = InferPtrAlignment(Op)) {
// The low bits are known zero if the pointer is aligned.
KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
- return;
+ break;
}
break;
@@ -2199,9 +2373,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
// Allow the target to implement this method for its nodes.
- TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
- return;
+ TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
+ break;
}
+
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
/// ComputeNumSignBits - Return the number of times the sign bit of the
@@ -2275,7 +2451,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
FirstAnswer = std::min(Tmp, Tmp2);
// We computed what we know about the sign bits as our first
// answer. Now proceed to the generic code that uses
- // ComputeMaskedBits, and pick whichever answer is better.
+ // computeKnownBits, and pick whichever answer is better.
}
break;
@@ -2294,9 +2470,16 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
if (Op.getResNo() != 1)
break;
// The boolean result conforms to getBooleanContents. Fall through.
+ // If setcc returns 0/-1, all bits are sign bits.
+ // We know that we have an integer-based boolean since these operations
+ // are only available for integer.
+ if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
+ TargetLowering::ZeroOrNegativeOneBooleanContent)
+ return VTBits;
+ break;
case ISD::SETCC:
// If setcc returns 0/-1, all bits are sign bits.
- if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
+ if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
TargetLowering::ZeroOrNegativeOneBooleanContent)
return VTBits;
break;
@@ -2325,7 +2508,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
if (CRHS->isAllOnesValue()) {
APInt KnownZero, KnownOne;
- ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
@@ -2350,7 +2533,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
if (CLHS->isNullValue()) {
APInt KnownZero, KnownOne;
- ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
@@ -2397,14 +2580,14 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_VOID) {
- unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth);
+ unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
}
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
APInt KnownZero, KnownOne;
- ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
+ computeKnownBits(Op, KnownZero, KnownOne, Depth);
APInt Mask;
if (KnownZero.isNegative()) { // sign bit is 0
@@ -2492,8 +2675,8 @@ bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
///
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, getVTList(VT), None);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -2510,17 +2693,23 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
EVT VT, SDValue Operand) {
- // Constant fold unary operations with an integer constant operand.
+ // Constant fold unary operations with an integer constant operand. Even
+ // opaque constant will be folded, because the folding of unary operations
+ // doesn't create new constants with different values. Nevertheless, the
+ // opaque flag is preserved during folding to prevent future folding with
+ // other constants.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
const APInt &Val = C->getAPIntValue();
switch (Opcode) {
default: break;
case ISD::SIGN_EXTEND:
- return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
+ return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
+ C->isTargetOpcode(), C->isOpaque());
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::TRUNCATE:
- return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
+ return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
+ C->isTargetOpcode(), C->isOpaque());
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
APFloat apf(EVTToAPFloatSemantics(VT),
@@ -2537,15 +2726,19 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
break;
case ISD::BSWAP:
- return getConstant(Val.byteSwap(), VT);
+ return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
+ C->isOpaque());
case ISD::CTPOP:
- return getConstant(Val.countPopulation(), VT);
+ return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
+ C->isOpaque());
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
- return getConstant(Val.countLeadingZeros(), VT);
+ return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
+ C->isOpaque());
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
- return getConstant(Val.countTrailingZeros(), VT);
+ return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
+ C->isOpaque());
}
}
@@ -2608,6 +2801,31 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
}
}
+ // Constant fold unary operations with a vector integer operand.
+ if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand.getNode())) {
+ if (BV->isConstant()) {
+ switch (Opcode) {
+ default:
+ // FIXME: Entirely reasonable to perform folding of other unary
+ // operations here as the need arises.
+ break;
+ case ISD::UINT_TO_FP:
+ case ISD::SINT_TO_FP: {
+ SmallVector<SDValue, 8> Ops;
+ for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ SDValue OpN = BV->getOperand(i);
+ // Let the above scalar folding handle the conversion of each
+ // element.
+ OpN = getNode(ISD::SINT_TO_FP, DL, VT.getVectorElementType(),
+ OpN);
+ Ops.push_back(OpN);
+ }
+ return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+ }
+ }
+ }
+ }
+
unsigned OpOpcode = Operand.getNode()->getOpcode();
switch (Opcode) {
case ISD::TokenFactor:
@@ -2754,8 +2972,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
if (VT != MVT::Glue) { // Don't CSE flag producing nodes
FoldingSetNodeID ID;
SDValue Ops[1] = { Operand };
- AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, VTs, Ops);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -2776,16 +2994,25 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
SDNode *Cst1, SDNode *Cst2) {
+ // If the opcode is a target-specific ISD node, there's nothing we can
+ // do here and the operand rules may not line up with the below, so
+ // bail early.
+ if (Opcode >= ISD::BUILTIN_OP_END)
+ return SDValue();
+
SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
SmallVector<SDValue, 4> Outputs;
EVT SVT = VT.getScalarType();
ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
- if (Scalar1 && Scalar2) {
+ if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
+ return SDValue();
+
+ if (Scalar1 && Scalar2)
// Scalar instruction.
Inputs.push_back(std::make_pair(Scalar1, Scalar2));
- } else {
+ else {
// For vectors extract each constant element into Inputs so we can constant
// fold them individually.
BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
@@ -2801,6 +3028,9 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
if (!V1 || !V2) // Not a constant, bail.
return SDValue();
+ if (V1->isOpaque() || V2->isOpaque())
+ return SDValue();
+
// Avoid BUILD_VECTOR nodes that perform implicit truncation.
// FIXME: This is valid and could be handled by truncating the APInts.
if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
@@ -2874,17 +3104,22 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
}
}
+ assert((Scalar1 && Scalar2) || (VT.getVectorNumElements() == Outputs.size() &&
+ "Expected a scalar or vector!"));
+
// Handle the scalar case first.
- if (Scalar1 && Scalar2)
+ if (!VT.isVector())
return Outputs.back();
- // Otherwise build a big vector out of the scalar elements we generated.
- return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(),
- Outputs.size());
+ // We may have a vector type but a scalar result. Create a splat.
+ Outputs.resize(VT.getVectorNumElements(), Outputs.back());
+
+ // Build a big vector out of the scalar elements we generated.
+ return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
- SDValue N2) {
+ SDValue N2, bool nuw, bool nsw, bool exact) {
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
switch (Opcode) {
@@ -2910,7 +3145,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
N1.getNode()->op_end());
Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
- return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
+ return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
}
break;
case ISD::AND:
@@ -3324,22 +3559,25 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
}
// Memoize this node if possible.
- SDNode *N;
+ BinarySDNode *N;
SDVTList VTs = getVTList(VT);
+ const bool BinOpHasFlags = isBinOpWithFlags(Opcode);
if (VT != MVT::Glue) {
- SDValue Ops[] = { N1, N2 };
+ SDValue Ops[] = {N1, N2};
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, VTs, Ops);
+ if (BinOpHasFlags)
+ AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
- DL.getDebugLoc(), VTs, N1, N2);
+ N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
+
CSEMap.InsertNode(N, IP);
} else {
- N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
- DL.getDebugLoc(), VTs, N1, N2);
+
+ N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
}
AllNodes.push_back(N);
@@ -3379,7 +3617,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
N1.getNode()->op_end());
Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
- return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
+ return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
}
break;
case ISD::SETCC: {
@@ -3436,8 +3674,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
if (VT != MVT::Glue) {
SDValue Ops[] = { N1, N2, N3 };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, VTs, Ops);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -3460,14 +3698,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4) {
SDValue Ops[] = { N1, N2, N3, N4 };
- return getNode(Opcode, DL, VT, Ops, 4);
+ return getNode(Opcode, DL, VT, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4, SDValue N5) {
SDValue Ops[] = { N1, N2, N3, N4, N5 };
- return getNode(Opcode, DL, VT, Ops, 5);
+ return getNode(Opcode, DL, VT, Ops);
}
/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
@@ -3489,8 +3727,7 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
ArgChains.push_back(SDValue(L, 1));
// Build a tokenfactor for all the chains.
- return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
- &ArgChains[0], ArgChains.size());
+ return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
}
/// getMemsetValue - Vectorized representation of the memset value
@@ -3528,7 +3765,7 @@ static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
if (Str.empty()) {
if (VT.isInteger())
return DAG.getConstant(0, VT);
- else if (VT == MVT::f32 || VT == MVT::f64)
+ else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
return DAG.getConstantFP(0.0, VT);
else if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
@@ -3554,12 +3791,12 @@ static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
}
- // If the "cost" of materializing the integer immediate is 1 or free, then
- // it is cost effective to turn the load into the immediate.
- const TargetTransformInfo *TTI = DAG.getTargetTransformInfo();
- if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
+ // If the "cost" of materializing the integer immediate is less than the cost
+ // of a load, then it is cost effective to turn the load into the immediate.
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+ if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
return DAG.getConstant(Val, VT);
- return SDValue(0, 0);
+ return SDValue(nullptr, 0);
}
/// getMemBasePlusOffset - Returns base and offset node for the
@@ -3575,7 +3812,7 @@ static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
///
static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
unsigned SrcDelta = 0;
- GlobalAddressSDNode *G = NULL;
+ GlobalAddressSDNode *G = nullptr;
if (Src.getOpcode() == ISD::GlobalAddress)
G = cast<GlobalAddressSDNode>(Src);
else if (Src.getOpcode() == ISD::ADD &&
@@ -3617,8 +3854,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
DAG.getMachineFunction());
if (VT == MVT::Other) {
- if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
- TLI.allowsUnalignedMemoryAccesses(VT)) {
+ unsigned AS = 0;
+ if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
+ TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
VT = TLI.getPointerTy();
} else {
switch (DstAlign & 7) {
@@ -3675,9 +3913,10 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
// FIXME: Only does this for 64-bit or more since we don't have proper
// cost model for unaligned load / store.
bool Fast;
+ unsigned AS = 0;
if (NumMemOps && AllowOverlap &&
VTSize >= 8 && NewVTSize < Size &&
- TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
+ TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
VTSize = Size;
else {
VT = NewVT;
@@ -3809,8 +4048,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
Size -= VTSize;
}
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
@@ -3875,8 +4113,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
}
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &LoadChains[0], LoadChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
OutChains.clear();
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
@@ -3890,8 +4127,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
DstOff += VTSize;
}
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
/// \brief Lower the call to 'memset' intrinsic function into a series of store
@@ -3992,8 +4228,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
Size -= VTSize;
}
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
@@ -4052,15 +4287,13 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in SDLoc
- TargetLowering::
- CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
- false, false, false, false, 0,
- TLI->getLibcallCallingConv(RTLIB::MEMCPY),
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
- getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
- TLI->getPointerTy()),
- Args, *this, dl);
+ TargetLowering::CallLoweringInfo CLI(*this);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
+ Type::getVoidTy(*getContext()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
+ TLI->getPointerTy()), std::move(Args), 0)
+ .setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
@@ -4110,15 +4343,13 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in SDLoc
- TargetLowering::
- CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
- false, false, false, false, 0,
- TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
- /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
- getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
- TLI->getPointerTy()),
- Args, *this, dl);
+ TargetLowering::CallLoweringInfo CLI(*this);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
+ Type::getVoidTy(*getContext()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
+ TLI->getPointerTy()), std::move(Args), 0)
+ .setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
@@ -4174,31 +4405,31 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
Entry.Ty = IntPtrTy;
Entry.isSExt = false;
Args.push_back(Entry);
+
// FIXME: pass in SDLoc
- TargetLowering::
- CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
- false, false, false, false, 0,
- TLI->getLibcallCallingConv(RTLIB::MEMSET),
- /*isTailCall=*/false,
- /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
- getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
- TLI->getPointerTy()),
- Args, *this, dl);
- std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
+ TargetLowering::CallLoweringInfo CLI(*this);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
+ Type::getVoidTy(*getContext()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
+ TLI->getPointerTy()), std::move(Args), 0)
+ .setDiscardResult();
+ std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
- SDVTList VTList, SDValue* Ops, unsigned NumOps,
+ SDVTList VTList, ArrayRef<SDValue> Ops,
MachineMemOperand *MMO,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
- AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
+ AddNodeIDNode(ID, Opcode, VTList, Ops);
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
- void* IP = 0;
+ void* IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
@@ -4209,59 +4440,70 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
// the node is deallocated, but recovered when the allocator is released.
// If the number of operands is less than 5 we use AtomicSDNode's internal
// storage.
- SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : 0;
+ unsigned NumOps = Ops.size();
+ SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
+ : nullptr;
SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, MemVT,
- Ops, DynOps, NumOps, MMO,
- Ordering, SynchScope);
+ Ops.data(), DynOps, NumOps, MMO,
+ SuccessOrdering, FailureOrdering,
+ SynchScope);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
- SDValue Chain, SDValue Ptr, SDValue Cmp,
- SDValue Swp, MachinePointerInfo PtrInfo,
- unsigned Alignment,
+ SDVTList VTList, ArrayRef<SDValue> Ops,
+ MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope) {
+ return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
+ Ordering, SynchScope);
+}
+
+SDValue SelectionDAG::getAtomicCmpSwap(
+ unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
+ SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
+ unsigned Alignment, AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
+ assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
+ Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
+ assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
+
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
- // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
- // For now, atomics are considered to be volatile always.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
unsigned Flags = MachineMemOperand::MOVolatile;
- if (Opcode != ISD::ATOMIC_STORE)
- Flags |= MachineMemOperand::MOLoad;
- if (Opcode != ISD::ATOMIC_LOAD)
- Flags |= MachineMemOperand::MOStore;
+ Flags |= MachineMemOperand::MOLoad;
+ Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
- return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
- Ordering, SynchScope);
+ return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
+ SuccessOrdering, FailureOrdering, SynchScope);
}
-SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
- SDValue Chain,
- SDValue Ptr, SDValue Cmp,
- SDValue Swp, MachineMemOperand *MMO,
- AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
- assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
+SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
+ SDVTList VTs, SDValue Chain, SDValue Ptr,
+ SDValue Cmp, SDValue Swp,
+ MachineMemOperand *MMO,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
+ Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
- EVT VT = Cmp.getValueType();
-
- SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
- return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, Ordering, SynchScope);
+ return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
+ SuccessOrdering, FailureOrdering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
@@ -4320,38 +4562,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
getVTList(VT, MVT::Other);
SDValue Ops[] = {Chain, Ptr, Val};
- return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
-}
-
-SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
- EVT VT, SDValue Chain,
- SDValue Ptr,
- const Value* PtrVal,
- unsigned Alignment,
- AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = getEVTAlignment(MemVT);
-
- MachineFunction &MF = getMachineFunction();
- // An atomic store does not load. An atomic load does not store.
- // (An atomicrmw obviously both loads and stores.)
- // For now, atomics are considered to be volatile always, and they are
- // chained as such.
- // FIXME: Volatile isn't really correct; we should keep track of atomic
- // orderings in the memoperand.
- unsigned Flags = MachineMemOperand::MOVolatile;
- if (Opcode != ISD::ATOMIC_STORE)
- Flags |= MachineMemOperand::MOLoad;
- if (Opcode != ISD::ATOMIC_LOAD)
- Flags |= MachineMemOperand::MOStore;
-
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
- MemVT.getStoreSize(), Alignment);
-
- return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
- Ordering, SynchScope);
+ return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
@@ -4364,38 +4575,24 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = {Chain, Ptr};
- return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
+ return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
}
/// getMergeValues - Create a MERGE_VALUES node from the given operands.
-SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
- SDLoc dl) {
- if (NumOps == 1)
+SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
+ if (Ops.size() == 1)
return Ops[0];
SmallVector<EVT, 4> VTs;
- VTs.reserve(NumOps);
- for (unsigned i = 0; i < NumOps; ++i)
+ VTs.reserve(Ops.size());
+ for (unsigned i = 0; i < Ops.size(); ++i)
VTs.push_back(Ops[i].getValueType());
- return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
- Ops, NumOps);
-}
-
-SDValue
-SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
- const EVT *VTs, unsigned NumVTs,
- const SDValue *Ops, unsigned NumOps,
- EVT MemVT, MachinePointerInfo PtrInfo,
- unsigned Align, bool Vol,
- bool ReadMem, bool WriteMem) {
- return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
- MemVT, PtrInfo, Align, Vol,
- ReadMem, WriteMem);
+ return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
}
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps,
+ ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem) {
@@ -4413,13 +4610,13 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
- return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
+ return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
}
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps,
- EVT MemVT, MachineMemOperand *MMO) {
+ ArrayRef<SDValue> Ops, EVT MemVT,
+ MachineMemOperand *MMO) {
assert((Opcode == ISD::INTRINSIC_VOID ||
Opcode == ISD::INTRINSIC_W_CHAIN ||
Opcode == ISD::PREFETCH ||
@@ -4433,9 +4630,9 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
MemIntrinsicSDNode *N;
if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
+ AddNodeIDNode(ID, Opcode, VTList, Ops);
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
@@ -4443,12 +4640,12 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, Ops,
- NumOps, MemVT, MMO);
+ MemVT, MMO);
CSEMap.InsertNode(N, IP);
} else {
N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
dl.getDebugLoc(), VTList, Ops,
- NumOps, MemVT, MMO);
+ MemVT, MMO);
}
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -4511,7 +4708,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
// If we don't have a PtrInfo, infer the trivial frame index case to simplify
// clients.
- if (PtrInfo.V == 0)
+ if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(Ptr, Offset);
MachineFunction &MF = getMachineFunction();
@@ -4551,13 +4748,13 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, Ptr, Offset };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
+ AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
ID.AddInteger(MemVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
MMO->isNonTemporal(),
MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<LoadSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
@@ -4638,7 +4835,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
- if (PtrInfo.V == 0)
+ if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(Ptr);
MachineFunction &MF = getMachineFunction();
@@ -4659,12 +4856,12 @@ SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
+ AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
@@ -4693,7 +4890,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
- if (PtrInfo.V == 0)
+ if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(Ptr);
MachineFunction &MF = getMachineFunction();
@@ -4728,12 +4925,12 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
+ AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
ID.AddInteger(SVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant()));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
@@ -4755,11 +4952,11 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
+ AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
ID.AddInteger(ST->getMemoryVT().getRawBits());
ID.AddInteger(ST->getRawSubclassData());
ID.AddInteger(ST->getPointerInfo().getAddrSpace());
- void *IP = 0;
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -4778,14 +4975,14 @@ SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
SDValue SV,
unsigned Align) {
SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
- return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
+ return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
- const SDUse *Ops, unsigned NumOps) {
- switch (NumOps) {
+ ArrayRef<SDUse> Ops) {
+ switch (Ops.size()) {
case 0: return getNode(Opcode, DL, VT);
- case 1: return getNode(Opcode, DL, VT, Ops[0]);
+ case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
default: break;
@@ -4793,12 +4990,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
// Copy from an SDUse array into an SDValue array for use with
// the regular getNode logic.
- SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
- return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
+ SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
+ return getNode(Opcode, DL, VT, NewOps);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
- const SDValue *Ops, unsigned NumOps) {
+ ArrayRef<SDValue> Ops) {
+ unsigned NumOps = Ops.size();
switch (NumOps) {
case 0: return getNode(Opcode, DL, VT);
case 1: return getNode(Opcode, DL, VT, Ops[0]);
@@ -4833,18 +5031,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
if (VT != MVT::Glue) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, VTs, Ops);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
- VTs, Ops, NumOps);
+ VTs, Ops);
CSEMap.InsertNode(N, IP);
} else {
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
- VTs, Ops, NumOps);
+ VTs, Ops);
}
AllNodes.push_back(N);
@@ -4855,24 +5053,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
- ArrayRef<EVT> ResultTys,
- const SDValue *Ops, unsigned NumOps) {
- return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
- Ops, NumOps);
-}
-
-SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
- const EVT *VTs, unsigned NumVTs,
- const SDValue *Ops, unsigned NumOps) {
- if (NumVTs == 1)
- return getNode(Opcode, DL, VTs[0], Ops, NumOps);
- return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
+ ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
+ return getNode(Opcode, DL, getVTList(ResultTys), Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps) {
+ ArrayRef<SDValue> Ops) {
if (VTList.NumVTs == 1)
- return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
+ return getNode(Opcode, DL, VTList.VTs[0], Ops);
#if 0
switch (Opcode) {
@@ -4899,10 +5087,11 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
// Memoize the node unless it returns a flag.
SDNode *N;
+ unsigned NumOps = Ops.size();
if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, VTList, Ops);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
@@ -4919,7 +5108,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
Ops[1], Ops[2]);
} else {
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
- VTList, Ops, NumOps);
+ VTList, Ops);
}
CSEMap.InsertNode(N, IP);
} else {
@@ -4936,7 +5125,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
Ops[1], Ops[2]);
} else {
N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
- VTList, Ops, NumOps);
+ VTList, Ops);
}
}
AllNodes.push_back(N);
@@ -4947,39 +5136,39 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
- return getNode(Opcode, DL, VTList, 0, 0);
+ return getNode(Opcode, DL, VTList, ArrayRef<SDValue>());
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1) {
SDValue Ops[] = { N1 };
- return getNode(Opcode, DL, VTList, Ops, 1);
+ return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2) {
SDValue Ops[] = { N1, N2 };
- return getNode(Opcode, DL, VTList, Ops, 2);
+ return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2, SDValue N3) {
SDValue Ops[] = { N1, N2, N3 };
- return getNode(Opcode, DL, VTList, Ops, 3);
+ return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4) {
SDValue Ops[] = { N1, N2, N3, N4 };
- return getNode(Opcode, DL, VTList, Ops, 4);
+ return getNode(Opcode, DL, VTList, Ops);
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4, SDValue N5) {
SDValue Ops[] = { N1, N2, N3, N4, N5 };
- return getNode(Opcode, DL, VTList, Ops, 5);
+ return getNode(Opcode, DL, VTList, Ops);
}
SDVTList SelectionDAG::getVTList(EVT VT) {
@@ -4992,9 +5181,9 @@ SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
ID.AddInteger(VT1.getRawBits());
ID.AddInteger(VT2.getRawBits());
- void *IP = 0;
+ void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
- if (Result == NULL) {
+ if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(2);
Array[0] = VT1;
Array[1] = VT2;
@@ -5011,9 +5200,9 @@ SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
ID.AddInteger(VT2.getRawBits());
ID.AddInteger(VT3.getRawBits());
- void *IP = 0;
+ void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
- if (Result == NULL) {
+ if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(3);
Array[0] = VT1;
Array[1] = VT2;
@@ -5032,9 +5221,9 @@ SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
ID.AddInteger(VT3.getRawBits());
ID.AddInteger(VT4.getRawBits());
- void *IP = 0;
+ void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
- if (Result == NULL) {
+ if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(4);
Array[0] = VT1;
Array[1] = VT2;
@@ -5046,18 +5235,19 @@ SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
return Result->getSDVTList();
}
-SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
+SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
+ unsigned NumVTs = VTs.size();
FoldingSetNodeID ID;
ID.AddInteger(NumVTs);
for (unsigned index = 0; index < NumVTs; index++) {
ID.AddInteger(VTs[index].getRawBits());
}
- void *IP = 0;
+ void *IP = nullptr;
SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
- if (Result == NULL) {
+ if (!Result) {
EVT *Array = Allocator.Allocate<EVT>(NumVTs);
- std::copy(VTs, VTs + NumVTs, Array);
+ std::copy(VTs.begin(), VTs.end(), Array);
Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
VTListMap.InsertNode(Result, IP);
}
@@ -5078,14 +5268,14 @@ SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
if (Op == N->getOperand(0)) return N;
// See if the modified node already exists.
- void *InsertPos = 0;
+ void *InsertPos = nullptr;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
if (!RemoveNodeFromCSEMaps(N))
- InsertPos = 0;
+ InsertPos = nullptr;
// Now we update the operands.
N->OperandList[0].set(Op);
@@ -5103,14 +5293,14 @@ SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
return N; // No operands changed, just return the input node.
// See if the modified node already exists.
- void *InsertPos = 0;
+ void *InsertPos = nullptr;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
if (!RemoveNodeFromCSEMaps(N))
- InsertPos = 0;
+ InsertPos = nullptr;
// Now we update the operands.
if (N->OperandList[0] != Op1)
@@ -5126,25 +5316,26 @@ SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
SDValue Ops[] = { Op1, Op2, Op3 };
- return UpdateNodeOperands(N, Ops, 3);
+ return UpdateNodeOperands(N, Ops);
}
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4) {
SDValue Ops[] = { Op1, Op2, Op3, Op4 };
- return UpdateNodeOperands(N, Ops, 4);
+ return UpdateNodeOperands(N, Ops);
}
SDNode *SelectionDAG::
UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5) {
SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
- return UpdateNodeOperands(N, Ops, 5);
+ return UpdateNodeOperands(N, Ops);
}
SDNode *SelectionDAG::
-UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
+UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
+ unsigned NumOps = Ops.size();
assert(N->getNumOperands() == NumOps &&
"Update with wrong number of operands");
@@ -5161,14 +5352,14 @@ UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
if (!AnyChange) return N;
// See if the modified node already exists.
- void *InsertPos = 0;
- if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
+ void *InsertPos = nullptr;
+ if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
if (!RemoveNodeFromCSEMaps(N))
- InsertPos = 0;
+ InsertPos = nullptr;
// Now we update the operands.
for (unsigned i = 0; i != NumOps; ++i)
@@ -5197,14 +5388,14 @@ void SDNode::DropOperands() {
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT) {
SDVTList VTs = getVTList(VT);
- return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
+ return SelectNodeTo(N, MachineOpc, VTs, None);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT, SDValue Op1) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
@@ -5212,7 +5403,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDValue Op2) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
@@ -5220,41 +5411,39 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2, Op3 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- EVT VT, const SDValue *Ops,
- unsigned NumOps) {
+ EVT VT, ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT);
- return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- EVT VT1, EVT VT2, const SDValue *Ops,
- unsigned NumOps) {
+ EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2);
- return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
- return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
+ return SelectNodeTo(N, MachineOpc, VTs, None);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2, EVT VT3,
- const SDValue *Ops, unsigned NumOps) {
+ ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
- return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
EVT VT1, EVT VT2, EVT VT3, EVT VT4,
- const SDValue *Ops, unsigned NumOps) {
+ ArrayRef<SDValue> Ops) {
SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
- return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
@@ -5262,7 +5451,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
@@ -5270,7 +5459,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
@@ -5279,7 +5468,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2, Op3 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
@@ -5288,13 +5477,12 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2, Op3 };
- return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
+ return SelectNodeTo(N, MachineOpc, VTs, Ops);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- SDVTList VTs, const SDValue *Ops,
- unsigned NumOps) {
- N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
+ SDVTList VTs,ArrayRef<SDValue> Ops) {
+ N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
// Reset the NodeID to -1.
N->setNodeId(-1);
return N;
@@ -5331,19 +5519,19 @@ SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
/// the node's users.
///
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- SDVTList VTs, const SDValue *Ops,
- unsigned NumOps) {
+ SDVTList VTs, ArrayRef<SDValue> Ops) {
+ unsigned NumOps = Ops.size();
// If an identical node already exists, use it.
- void *IP = 0;
+ void *IP = nullptr;
if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
+ AddNodeIDNode(ID, Opc, VTs, Ops);
if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
}
if (!RemoveNodeFromCSEMaps(N))
- IP = 0;
+ IP = nullptr;
// Start the morphing.
N->NodeType = Opc;
@@ -5363,7 +5551,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
// Initialize the memory references information.
- MN->setMemRefs(0, 0);
+ MN->setMemRefs(nullptr, nullptr);
// If NumOps is larger than the # of operands we can have in a
// MachineSDNode, reallocate the operand list.
if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
@@ -5374,22 +5562,22 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
- Ops, NumOps);
+ Ops.data(), NumOps);
else
- MN->InitOperands(MN->LocalOperands, Ops, NumOps);
+ MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
MN->OperandsNeedDelete = false;
} else
- MN->InitOperands(MN->OperandList, Ops, NumOps);
+ MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
} else {
// If NumOps is larger than the # of operands we currently have, reallocate
// the operand list.
if (NumOps > N->NumOperands) {
if (N->OperandsNeedDelete)
delete[] N->OperandList;
- N->InitOperands(new SDUse[NumOps], Ops, NumOps);
+ N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
N->OperandsNeedDelete = true;
} else
- N->InitOperands(N->OperandList, Ops, NumOps);
+ N->InitOperands(N->OperandList, Ops.data(), NumOps);
}
// Delete any nodes that are still dead after adding the uses for the
@@ -5528,7 +5716,7 @@ MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops) {
- SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
+ SDVTList VTs = getVTList(ResultTys);
return getMachineNode(Opcode, dl, VTs, Ops);
}
@@ -5537,14 +5725,14 @@ SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
ArrayRef<SDValue> OpsArray) {
bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
MachineSDNode *N;
- void *IP = 0;
+ void *IP = nullptr;
const SDValue *Ops = OpsArray.data();
unsigned NumOps = OpsArray.size();
if (DoCSE) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
- IP = 0;
+ AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
+ IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
}
@@ -5600,34 +5788,42 @@ SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
/// getNodeIfExists - Get the specified node if it's already available, or
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps) {
- if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
+ ArrayRef<SDValue> Ops, bool nuw, bool nsw,
+ bool exact) {
+ if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
- void *IP = 0;
+ AddNodeIDNode(ID, Opcode, VTList, Ops);
+ if (isBinOpWithFlags(Opcode))
+ AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
+ void *IP = nullptr;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return E;
}
- return NULL;
+ return nullptr;
}
/// getDbgValue - Creates a SDDbgValue node.
///
+/// SDNode
SDDbgValue *
-SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
+SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R,
+ bool IsIndirect, uint64_t Off,
DebugLoc DL, unsigned O) {
- return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
+ return new (Allocator) SDDbgValue(MDPtr, N, R, IsIndirect, Off, DL, O);
}
+/// Constant
SDDbgValue *
-SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
- DebugLoc DL, unsigned O) {
+SelectionDAG::getConstantDbgValue(MDNode *MDPtr, const Value *C,
+ uint64_t Off,
+ DebugLoc DL, unsigned O) {
return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
}
+/// FrameIndex
SDDbgValue *
-SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
- DebugLoc DL, unsigned O) {
+SelectionDAG::getFrameIndexDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
+ DebugLoc DL, unsigned O) {
return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
}
@@ -5641,7 +5837,7 @@ class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
SDNode::use_iterator &UI;
SDNode::use_iterator &UE;
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ void NodeDeleted(SDNode *N, SDNode *E) override {
// Increment the iterator as needed.
while (UI != UE && N == *UI)
++UI;
@@ -5945,7 +6141,7 @@ unsigned SelectionDAG::AssignTopologicalOrder() {
// count of outstanding operands.
for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
SDNode *N = I++;
- checkForCycles(N);
+ checkForCycles(N, this);
unsigned Degree = N->getNumOperands();
if (Degree == 0) {
// A node with no uses, add it to the result array immediately.
@@ -5965,7 +6161,7 @@ unsigned SelectionDAG::AssignTopologicalOrder() {
// such that by the time the end is reached all nodes will be sorted.
for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
SDNode *N = I;
- checkForCycles(N);
+ checkForCycles(N, this);
// N is in sorted position, so all its uses have one less operand
// that needs to be sorted.
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
@@ -5990,9 +6186,11 @@ unsigned SelectionDAG::AssignTopologicalOrder() {
#ifndef NDEBUG
SDNode *S = ++I;
dbgs() << "Overran sorted position:\n";
- S->dumprFull();
+ S->dumprFull(this); dbgs() << "\n";
+ dbgs() << "Checking if this is due to cycles\n";
+ checkForCycles(this, true);
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -6033,6 +6231,7 @@ void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
SDDbgValue *Dbg = *I;
if (Dbg->getKind() == SDDbgValue::SDNODE) {
SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
+ Dbg->isIndirect(),
Dbg->getOffset(), Dbg->getDebugLoc(),
Dbg->getOrder());
ClonedDVs.push_back(Clone);
@@ -6076,9 +6275,8 @@ MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
}
MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps, EVT memvt,
- MachineMemOperand *mmo)
- : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
+ ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
+ : SDNode(Opc, Order, dl, VTs, Ops),
MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
MMO->isNonTemporal(), MMO->isInvariant());
@@ -6297,12 +6495,10 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
switch (N->getOpcode()) {
default:
- Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
- &Operands[0], Operands.size()));
+ Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
break;
case ISD::VSELECT:
- Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
- &Operands[0], Operands.size()));
+ Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
break;
case ISD::SHL:
case ISD::SRA:
@@ -6327,8 +6523,7 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
Scalars.push_back(getUNDEF(EltVT));
return getNode(ISD::BUILD_VECTOR, dl,
- EVT::getVectorVT(*getContext(), EltVT, ResNE),
- &Scalars[0], Scalars.size());
+ EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
}
@@ -6362,8 +6557,8 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
return true;
- const GlobalValue *GV1 = NULL;
- const GlobalValue *GV2 = NULL;
+ const GlobalValue *GV1 = nullptr;
+ const GlobalValue *GV2 = nullptr;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
const TargetLowering *TLI = TM.getTargetLowering();
@@ -6385,8 +6580,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
- llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
- TLI->getDataLayout());
+ llvm::computeKnownBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
+ TLI->getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align)
@@ -6448,6 +6643,22 @@ SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
return std::make_pair(Lo, Hi);
}
+void SelectionDAG::ExtractVectorElements(SDValue Op,
+ SmallVectorImpl<SDValue> &Args,
+ unsigned Start, unsigned Count) {
+ EVT VT = Op.getValueType();
+ if (Count == 0)
+ Count = VT.getVectorNumElements();
+
+ EVT EltVT = VT.getVectorElementType();
+ EVT IdxTy = TLI->getVectorIdxTy();
+ SDLoc SL(Op);
+ for (unsigned i = Start, e = Start + Count; i != e; ++i) {
+ Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
+ Op, getConstant(i, IdxTy)));
+ }
+}
+
// getAddressSpace - Return the address space this GlobalAddress belongs to.
unsigned GlobalAddressSDNode::getAddressSpace() const {
return getGlobal()->getType()->getAddressSpace();
@@ -6465,7 +6676,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
unsigned &SplatBitSize,
bool &HasAnyUndefs,
unsigned MinSplatBits,
- bool isBigEndian) {
+ bool isBigEndian) const {
EVT VT = getValueType(0);
assert(VT.isVector() && "Expected a vector type");
unsigned sz = VT.getSizeInBits();
@@ -6526,6 +6737,54 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
return true;
}
+SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
+ if (UndefElements) {
+ UndefElements->clear();
+ UndefElements->resize(getNumOperands());
+ }
+ SDValue Splatted;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ SDValue Op = getOperand(i);
+ if (Op.getOpcode() == ISD::UNDEF) {
+ if (UndefElements)
+ (*UndefElements)[i] = true;
+ } else if (!Splatted) {
+ Splatted = Op;
+ } else if (Splatted != Op) {
+ return SDValue();
+ }
+ }
+
+ if (!Splatted) {
+ assert(getOperand(0).getOpcode() == ISD::UNDEF &&
+ "Can only have a splat without a constant for all undefs.");
+ return getOperand(0);
+ }
+
+ return Splatted;
+}
+
+ConstantSDNode *
+BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
+ return dyn_cast_or_null<ConstantSDNode>(
+ getSplatValue(UndefElements).getNode());
+}
+
+ConstantFPSDNode *
+BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
+ return dyn_cast_or_null<ConstantFPSDNode>(
+ getSplatValue(UndefElements).getNode());
+}
+
+bool BuildVectorSDNode::isConstant() const {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ unsigned Opc = getOperand(i).getOpcode();
+ if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
+ return false;
+ }
+ return true;
+}
+
bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
// Find the first non-undef value in the shuffle mask.
unsigned i, e;
@@ -6542,10 +6801,11 @@ bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
return true;
}
-#ifdef XDEBUG
+#ifndef NDEBUG
static void checkForCyclesHelper(const SDNode *N,
SmallPtrSet<const SDNode*, 32> &Visited,
- SmallPtrSet<const SDNode*, 32> &Checked) {
+ SmallPtrSet<const SDNode*, 32> &Checked,
+ const llvm::SelectionDAG *DAG) {
// If this node has already been checked, don't check it again.
if (Checked.count(N))
return;
@@ -6553,29 +6813,37 @@ static void checkForCyclesHelper(const SDNode *N,
// If a node has already been visited on this depth-first walk, reject it as
// a cycle.
if (!Visited.insert(N)) {
- dbgs() << "Offending node:\n";
- N->dumprFull();
errs() << "Detected cycle in SelectionDAG\n";
+ dbgs() << "Offending node:\n";
+ N->dumprFull(DAG); dbgs() << "\n";
abort();
}
for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
+ checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked, DAG);
Checked.insert(N);
Visited.erase(N);
}
#endif
-void llvm::checkForCycles(const llvm::SDNode *N) {
+void llvm::checkForCycles(const llvm::SDNode *N,
+ const llvm::SelectionDAG *DAG,
+ bool force) {
+#ifndef NDEBUG
+ bool check = force;
#ifdef XDEBUG
- assert(N && "Checking nonexistent SDNode");
- SmallPtrSet<const SDNode*, 32> visited;
- SmallPtrSet<const SDNode*, 32> checked;
- checkForCyclesHelper(N, visited, checked);
-#endif
+ check = true;
+#endif // XDEBUG
+ if (check) {
+ assert(N && "Checking nonexistent SDNode");
+ SmallPtrSet<const SDNode*, 32> visited;
+ SmallPtrSet<const SDNode*, 32> checked;
+ checkForCyclesHelper(N, visited, checked, DAG);
+ }
+#endif // !NDEBUG
}
-void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
- checkForCycles(DAG->getRoot().getNode());
+void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
+ checkForCycles(DAG->getRoot().getNode(), DAG, force);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 41662a9..c07b5e6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "isel"
#include "SelectionDAGBuilder.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/BitVector.h"
@@ -34,10 +33,10 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/StackMaps.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
@@ -62,6 +61,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "isel"
+
/// LimitFloatPrecision - Generate low-precision inline sequences for
/// some float libcalls (6, 8 or 12 bits).
static unsigned LimitFloatPrecision;
@@ -168,7 +169,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
- if (TLI.isBigEndian())
+ if (TLI.hasBigEndianPartOrdering(ValueVT))
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
} else {
@@ -214,6 +215,20 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
llvm_unreachable("Unknown mismatch!");
}
+static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
+ const Twine &ErrMsg) {
+ const Instruction *I = dyn_cast_or_null<Instruction>(V);
+ if (!V)
+ return Ctx.emitError(ErrMsg);
+
+ const char *AsmError = ", possible invalid constraint for vector type";
+ if (const CallInst *CI = dyn_cast<CallInst>(I))
+ if (isa<InlineAsm>(CI->getCalledValue()))
+ return Ctx.emitError(I, ErrMsg + AsmError);
+
+ return Ctx.emitError(I, ErrMsg);
+}
+
/// getCopyFromPartsVector - Create a value that contains the specified legal
/// parts combined into the value they represent. If the parts combine to a
/// type larger then ValueVT then AssertOp can be used to specify whether the
@@ -262,9 +277,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands.
- Val = DAG.getNode(IntermediateVT.isVector() ?
- ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL,
- ValueVT, &Ops[0], NumIntermediates);
+ Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
+ : ISD::BUILD_VECTOR,
+ DL, ValueVT, Ops);
}
// There is now one part, held in Val. Correct it to match ValueVT.
@@ -306,16 +321,8 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
// Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() != 1) {
- LLVMContext &Ctx = *DAG.getContext();
- Twine ErrMsg("non-trivial scalar-to-vector conversion");
- if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- if (const CallInst *CI = dyn_cast<CallInst>(I))
- if (isa<InlineAsm>(CI->getCalledValue()))
- ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
- Ctx.emitError(I, ErrMsg);
- } else {
- Ctx.emitError(ErrMsg);
- }
+ diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
+ "non-trivial scalar-to-vector conversion");
return DAG.getUNDEF(ValueVT);
}
@@ -397,18 +404,9 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
"Failed to tile the value with PartVT!");
if (NumParts == 1) {
- if (PartEVT != ValueVT) {
- LLVMContext &Ctx = *DAG.getContext();
- Twine ErrMsg("scalar-to-vector conversion failed");
- if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- if (const CallInst *CI = dyn_cast<CallInst>(I))
- if (isa<InlineAsm>(CI->getCalledValue()))
- ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
- Ctx.emitError(I, ErrMsg);
- } else {
- Ctx.emitError(ErrMsg);
- }
- }
+ if (PartEVT != ValueVT)
+ diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
+ "scalar-to-vector conversion failed");
Parts[0] = Val;
return;
@@ -498,7 +496,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
e = PartVT.getVectorNumElements(); i != e; ++i)
Ops.push_back(DAG.getUNDEF(ElementVT));
- Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size());
+ Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
// FIXME: Use CONCAT for 2x -> 4x.
@@ -627,16 +625,6 @@ namespace {
}
}
- /// areValueTypesLegal - Return true if types of all the values are legal.
- bool areValueTypesLegal(const TargetLowering &TLI) {
- for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- MVT RegisterVT = RegVTs[Value];
- if (!TLI.isTypeLegal(RegisterVT))
- return false;
- }
- return true;
- }
-
/// append - Add the specified values to this one.
void append(const RegsForValue &RHS) {
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
@@ -651,7 +639,7 @@ namespace {
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
SDLoc dl,
SDValue &Chain, SDValue *Flag,
- const Value *V = 0) const;
+ const Value *V = nullptr) const;
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
/// specified value into the registers specified by this object. This uses
@@ -697,7 +685,7 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue P;
- if (Flag == 0) {
+ if (!Flag) {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
} else {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
@@ -765,9 +753,7 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
Parts.clear();
}
- return DAG.getNode(ISD::MERGE_VALUES, dl,
- DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
- &Values[0], ValueVTs.size());
+ return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
}
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
@@ -798,7 +784,7 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
SmallVector<SDValue, 8> Chains(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue Part;
- if (Flag == 0) {
+ if (!Flag) {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
} else {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
@@ -821,7 +807,7 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
// = op c3, ..., f2
Chain = Chains[NumRegs-1];
else
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
}
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
@@ -874,7 +860,7 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
AA = &aa;
GFI = gfi;
LibInfo = li;
- TD = DAG.getTarget().getDataLayout();
+ DL = DAG.getTarget().getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
@@ -890,8 +876,9 @@ void SelectionDAGBuilder::clear() {
UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
- CurInst = NULL;
+ CurInst = nullptr;
HasTailCall = false;
+ SDNodeOrder = LowestSDNodeOrder;
}
/// clearDanglingDebugInfo - Clear the dangling debug information
@@ -922,7 +909,7 @@ SDValue SelectionDAGBuilder::getRoot() {
// Otherwise, we have to make a token factor node.
SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
- &PendingLoads[0], PendingLoads.size());
+ PendingLoads);
PendingLoads.clear();
DAG.setRoot(Root);
return Root;
@@ -952,8 +939,7 @@ SDValue SelectionDAGBuilder::getControlRoot() {
}
Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
- &PendingExports[0],
- PendingExports.size());
+ PendingExports);
PendingExports.clear();
DAG.setRoot(Root);
return Root;
@@ -973,7 +959,7 @@ void SelectionDAGBuilder::visit(const Instruction &I) {
if (!isa<TerminatorInst>(&I) && !HasTailCall)
CopyToExportRegsIfNeeded(&I);
- CurInst = NULL;
+ CurInst = nullptr;
}
void SelectionDAGBuilder::visitPHI(const PHINode &) {
@@ -1003,11 +989,14 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
MDNode *Variable = DI->getVariable();
uint64_t Offset = DI->getOffset();
+ // A dbg.value for an alloca is always indirect.
+ bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
SDDbgValue *SDV;
if (Val.getNode()) {
- if (!EmitFuncArgumentDbgValue(V, Variable, Offset, Val)) {
+ if (!EmitFuncArgumentDbgValue(V, Variable, Offset, IsIndirect, Val)) {
SDV = DAG.getDbgValue(Variable, Val.getNode(),
- Val.getResNo(), Offset, dl, DbgSDNodeOrder);
+ Val.getResNo(), IsIndirect,
+ Offset, dl, DbgSDNodeOrder);
DAG.AddDbgValue(SDV, Val.getNode(), false);
}
} else
@@ -1032,7 +1021,7 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(),
InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
- N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
+ N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
resolveDanglingDebugInfo(V, N);
return N;
}
@@ -1103,8 +1092,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
Constants.push_back(SDValue(Val, i));
}
- return DAG.getMergeValues(&Constants[0], Constants.size(),
- getCurSDLoc());
+ return DAG.getMergeValues(Constants, getCurSDLoc());
}
if (const ConstantDataSequential *CDS =
@@ -1119,9 +1107,9 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
}
if (isa<ArrayType>(CDS->getType()))
- return DAG.getMergeValues(&Ops[0], Ops.size(), getCurSDLoc());
+ return DAG.getMergeValues(Ops, getCurSDLoc());
return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
- VT, &Ops[0], Ops.size());
+ VT, Ops);
}
if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
@@ -1144,8 +1132,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
Constants[i] = DAG.getConstant(0, EltVT);
}
- return DAG.getMergeValues(&Constants[0], NumElts,
- getCurSDLoc());
+ return DAG.getMergeValues(Constants, getCurSDLoc());
}
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
@@ -1173,8 +1160,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
}
// Create a BUILD_VECTOR node.
- return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
- VT, &Ops[0], Ops.size());
+ return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
}
// If this is a static alloca, generate it as the frameindex instead of
@@ -1191,7 +1177,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType());
SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
+ return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
llvm_unreachable("Can't get register for value!");
@@ -1235,7 +1221,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
}
Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
- MVT::Other, &Chains[0], NumValues);
+ MVT::Other, Chains);
} else if (I.getNumOperands() != 0) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs);
@@ -1392,7 +1378,9 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB) {
+ MachineBasicBlock *SwitchBB,
+ uint32_t TWeight,
+ uint32_t FWeight) {
const BasicBlock *BB = CurBB->getBasicBlock();
// If the leaf of the tree is a comparison, merge the condition into
@@ -1416,8 +1404,8 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
llvm_unreachable("Unknown compare instruction");
}
- CaseBlock CB(Condition, BOp->getOperand(0),
- BOp->getOperand(1), NULL, TBB, FBB, CurBB);
+ CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
+ TBB, FBB, CurBB, TWeight, FWeight);
SwitchCases.push_back(CB);
return;
}
@@ -1425,17 +1413,26 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
- NULL, TBB, FBB, CurBB);
+ nullptr, TBB, FBB, CurBB, TWeight, FWeight);
SwitchCases.push_back(CB);
}
+/// Scale down both weights to fit into uint32_t.
+static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
+ uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
+ uint32_t Scale = (NewMax / UINT32_MAX) + 1;
+ NewTrue = NewTrue / Scale;
+ NewFalse = NewFalse / Scale;
+}
+
/// FindMergedConditions - If Cond is an expression like
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
- unsigned Opc) {
+ unsigned Opc, uint32_t TWeight,
+ uint32_t FWeight) {
// If this node is not part of the or/and tree, emit it as a branch.
const Instruction *BOp = dyn_cast<Instruction>(Cond);
if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
@@ -1443,7 +1440,8 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
BOp->getParent() != CurBB->getBasicBlock() ||
!InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
- EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB);
+ EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
+ TWeight, FWeight);
return;
}
@@ -1455,6 +1453,7 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
if (Opc == Instruction::Or) {
// Codegen X | Y as:
+ // BB1:
// jmp_if_X TBB
// jmp TmpBB
// TmpBB:
@@ -1462,14 +1461,34 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
// jmp FBB
//
+ // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
+ // The requirement is that
+ // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
+ // = TrueProb for orignal BB.
+ // Assuming the orignal weights are A and B, one choice is to set BB1's
+ // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
+ // assumes that
+ // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
+ // Another choice is to assume TrueProb for BB1 equals to TrueProb for
+ // TmpBB, but the math is more complicated.
+
+ uint64_t NewTrueWeight = TWeight;
+ uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the LHS condition.
- FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
+ NewTrueWeight = TWeight;
+ NewFalseWeight = 2 * (uint64_t)FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
} else {
assert(Opc == Instruction::And && "Unknown merge op!");
// Codegen X & Y as:
+ // BB1:
// jmp_if_X TmpBB
// jmp FBB
// TmpBB:
@@ -1478,11 +1497,28 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
//
// This requires creation of TmpBB after CurBB.
+ // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
+ // The requirement is that
+ // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
+ // = FalseProb for orignal BB.
+ // Assuming the orignal weights are A and B, one choice is to set BB1's
+ // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
+ // assumes that
+ // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
+
+ uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
+ uint64_t NewFalseWeight = FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the LHS condition.
- FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
+ NewTrueWeight = 2 * (uint64_t)TWeight;
+ NewFalseWeight = FWeight;
+ ScaleWeights(NewTrueWeight, NewFalseWeight);
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
+ FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
+ NewTrueWeight, NewFalseWeight);
}
}
@@ -1524,7 +1560,7 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = BrMBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -1533,8 +1569,9 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// Update machine-CFG edges.
BrMBB->addSuccessor(Succ0MBB);
- // If this is not a fall-through branch, emit the branch.
- if (Succ0MBB != NextBlock)
+ // If this is not a fall-through branch or optimizations are switched off,
+ // emit the branch.
+ if (Succ0MBB != NextBlock || TM.getOptLevel() == CodeGenOpt::None)
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Succ0MBB)));
@@ -1569,7 +1606,8 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
- BOp->getOpcode());
+ BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
+ getEdgeWeight(BrMBB, Succ1MBB));
// If the compares in later blocks need to use values not currently
// exported from this block, export them now. This block should always
// be the first entry.
@@ -1599,7 +1637,7 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
- NULL, Succ0MBB, Succ1MBB, BrMBB);
+ nullptr, Succ0MBB, Succ1MBB, BrMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
@@ -1615,7 +1653,7 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
SDLoc dl = getCurSDLoc();
// Build the setcc now.
- if (CB.CmpMHS == NULL) {
+ if (!CB.CmpMHS) {
// Fold "(X == true)" to X and "(X == false)" to !X to
// handle common cases produced by branch lowering.
if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
@@ -1656,7 +1694,7 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -1734,7 +1772,7 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
@@ -1817,8 +1855,8 @@ void
SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
const TargetLowering *TLI = TM.getTargetLowering();
SDValue Chain = TLI->makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
- MVT::isVoid, 0, 0, false, getCurSDLoc(),
- false, false).second;
+ MVT::isVoid, nullptr, 0, false,
+ getCurSDLoc(), false, false).second;
DAG.setRoot(Chain);
}
@@ -1865,7 +1903,7 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -1939,7 +1977,7 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -2019,8 +2057,7 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
// Merge into one.
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
- &Ops[0], 2);
+ DAG.getVTList(ValueVTs), Ops);
setValue(&LP, Res);
}
@@ -2041,7 +2078,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
MachineFunction *CurMF = FuncInfo.MF;
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineFunction::iterator BBI = CR.CaseBB;
if (++BBI != FuncInfo.MF->end())
@@ -2152,7 +2189,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
if (I->High == I->Low) {
// This is just small small case range :) containing exactly 1 case
CC = ISD::SETEQ;
- LHS = SV; RHS = I->High; MHS = NULL;
+ LHS = SV; RHS = I->High; MHS = nullptr;
} else {
CC = ISD::SETLE;
LHS = I->Low; MHS = SV; RHS = I->High;
@@ -2359,7 +2396,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
volatile double RDensity =
(double)RSize.roundToDouble() /
(Last - RBegin + 1ULL).roundToDouble();
- double Metric = Range.logBase2()*(LDensity+RDensity);
+ volatile double Metric = Range.logBase2()*(LDensity+RDensity);
// Should always split in some non-trivial place
DEBUG(dbgs() <<"=>Step\n"
<< "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
@@ -2387,7 +2424,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
CaseRange LHSR(CR.Range.first, Pivot);
CaseRange RHSR(Pivot, CR.Range.second);
const Constant *C = Pivot->Low;
- MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
+ MachineBasicBlock *FalseBB = nullptr, *TrueBB = nullptr;
// We know that we branch to the LHS if the Value being switched on is
// less than the Pivot value, C. We use this to optimize our binary
@@ -2429,7 +2466,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
// Create a CaseBlock record representing a conditional branch to
// the LHS node if the value being switched on SV is less than C.
// Otherwise, branch to LHS.
- CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
+ CaseBlock CB(ISD::SETLT, SV, C, nullptr, TrueBB, FalseBB, CR.CaseBB);
if (CR.CaseBB == SwitchBB)
visitSwitchCase(CB, SwitchBB);
@@ -2598,7 +2635,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
if (Cases.size() >= 2)
// Must recompute end() each iteration because it may be
// invalidated by erase if we hold on to it
- for (CaseItr I = Cases.begin(), J = llvm::next(Cases.begin());
+ for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
J != Cases.end(); ) {
const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
@@ -2642,7 +2679,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
+ MachineBasicBlock *NextBlock = nullptr;
MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
// If there is only the default destination, branch to it if it is not the
@@ -2676,7 +2713,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
// Push the initial CaseRec onto the worklist
CaseRecVector WorkList;
- WorkList.push_back(CaseRec(SwitchMBB,0,0,
+ WorkList.push_back(CaseRec(SwitchMBB,nullptr,nullptr,
CaseRange(Cases.begin(),Cases.end())));
while (!WorkList.empty()) {
@@ -2725,6 +2762,11 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
getValue(I.getAddress())));
}
+void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
+ if (DAG.getTarget().Options.TrapUnreachable)
+ DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
+}
+
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
Type *Ty = I.getType();
@@ -2742,8 +2784,22 @@ void SelectionDAGBuilder::visitFSub(const User &I) {
void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(OpCode, getCurSDLoc(),
- Op1.getValueType(), Op1, Op2));
+
+ bool nuw = false;
+ bool nsw = false;
+ bool exact = false;
+ if (const OverflowingBinaryOperator *OFBinOp =
+ dyn_cast<const OverflowingBinaryOperator>(&I)) {
+ nuw = OFBinOp->hasNoUnsignedWrap();
+ nsw = OFBinOp->hasNoSignedWrap();
+ }
+ if (const PossiblyExactOperator *ExactOp =
+ dyn_cast<const PossiblyExactOperator>(&I))
+ exact = ExactOp->isExact();
+
+ SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
+ Op1, Op2, nuw, nsw, exact);
+ setValue(&I, BinNodeValue);
}
void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
@@ -2774,8 +2830,25 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
}
- setValue(&I, DAG.getNode(Opcode, getCurSDLoc(),
- Op1.getValueType(), Op1, Op2));
+ bool nuw = false;
+ bool nsw = false;
+ bool exact = false;
+
+ if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
+
+ if (const OverflowingBinaryOperator *OFBinOp =
+ dyn_cast<const OverflowingBinaryOperator>(&I)) {
+ nuw = OFBinOp->hasNoUnsignedWrap();
+ nsw = OFBinOp->hasNoSignedWrap();
+ }
+ if (const PossiblyExactOperator *ExactOp =
+ dyn_cast<const PossiblyExactOperator>(&I))
+ exact = ExactOp->isExact();
+ }
+
+ SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
+ nuw, nsw, exact);
+ setValue(&I, Res);
}
void SelectionDAGBuilder::visitSDiv(const User &I) {
@@ -2847,8 +2920,7 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
FalseVal.getResNo() + i));
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValueVTs[0], NumValues),
- &Values[0], NumValues));
+ DAG.getVTList(ValueVTs), Values));
}
void SelectionDAGBuilder::visitTrunc(const User &I) {
@@ -2944,6 +3016,13 @@ void SelectionDAGBuilder::visitBitCast(const User &I) {
if (DestVT != N.getValueType())
setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(),
DestVT, N)); // convert types.
+ // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
+ // might fold any kind of constant expression to an integer constant and that
+ // is not what we are looking for. Only regcognize a bitcast of a genuine
+ // constant integer as an opaque constant.
+ else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
+ setValue(&I, DAG.getConstant(C->getValue(), DestVT, /*isTarget=*/false,
+ /*isOpaque*/true));
else
setValue(&I, N); // noop cast.
}
@@ -3050,11 +3129,9 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
MOps2[0] = Src2;
Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
- getCurSDLoc(), VT,
- &MOps1[0], NumConcat);
+ getCurSDLoc(), VT, MOps1);
Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
- getCurSDLoc(), VT,
- &MOps2[0], NumConcat);
+ getCurSDLoc(), VT, MOps2);
// Readjust mask for new input vector length.
SmallVector<int, 8> MappedOps;
@@ -3172,8 +3249,7 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
Ops.push_back(Res);
}
- setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
- VT, &Ops[0], Ops.size()));
+ setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops));
}
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
@@ -3215,8 +3291,7 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
SDValue(Agg.getNode(), Agg.getResNo() + i);
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&AggValueVTs[0], NumAggValues),
- &Values[0], NumAggValues));
+ DAG.getVTList(AggValueVTs), Values));
}
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
@@ -3250,8 +3325,7 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
SDValue(Agg.getNode(), Agg.getResNo() + i);
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValValueVTs[0], NumValValues),
- &Values[0], NumValValues));
+ DAG.getVTList(ValValueVTs), Values));
}
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
@@ -3269,7 +3343,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset
- uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
+ uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
DAG.getConstant(Offset, N.getValueType()));
}
@@ -3283,7 +3357,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
uint64_t Offs =
- TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ DL->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
EVT PTy = TLI->getPointerTy(AS);
unsigned PtrBits = PTy.getSizeInBits();
@@ -3300,7 +3374,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
// N = N + Idx * ElementSize;
APInt ElementSize = APInt(TLI->getPointerSizeInBits(AS),
- TD->getTypeAllocSize(Ty));
+ DL->getTypeAllocSize(Ty));
SDValue IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
@@ -3373,8 +3447,7 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
- SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(),
- VTs, Ops, 3);
+ SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(), VTs, Ops);
setValue(&I, DSA);
DAG.setRoot(DSA.getValue(1));
@@ -3391,8 +3464,8 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
Type *Ty = I.getType();
bool isVolatile = I.isVolatile();
- bool isNonTemporal = I.getMetadata("nontemporal") != 0;
- bool isInvariant = I.getMetadata("invariant.load") != 0;
+ bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
+ bool isInvariant = I.getMetadata("invariant.load") != nullptr;
unsigned Alignment = I.getAlignment();
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
@@ -3406,7 +3479,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
SDValue Root;
bool ConstantMemory = false;
- if (I.isVolatile() || NumValues > MaxParallelChains)
+ if (isVolatile || NumValues > MaxParallelChains)
// Serialize volatile loads with other side effects.
Root = getRoot();
else if (AA->pointsToConstantMemory(
@@ -3419,6 +3492,10 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
Root = DAG.getRoot();
}
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (isVolatile)
+ Root = TLI->prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG);
+
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
NumValues));
@@ -3433,8 +3510,8 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
// (MaxParallelChains should always remain as failsafe).
if (ChainI == MaxParallelChains) {
assert(PendingLoads.empty() && "PendingLoads must be serialized first");
- SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
- MVT::Other, &Chains[0], ChainI);
+ SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
+ makeArrayRef(Chains.data(), ChainI));
Root = Chain;
ChainI = 0;
}
@@ -3451,8 +3528,8 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
}
if (!ConstantMemory) {
- SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
- MVT::Other, &Chains[0], ChainI);
+ SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
+ makeArrayRef(Chains.data(), ChainI));
if (isVolatile)
DAG.setRoot(Chain);
else
@@ -3460,8 +3537,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
}
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&ValueVTs[0], NumValues),
- &Values[0], NumValues));
+ DAG.getVTList(ValueVTs), Values));
}
void SelectionDAGBuilder::visitStore(const StoreInst &I) {
@@ -3489,7 +3565,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
NumValues));
EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
- bool isNonTemporal = I.getMetadata("nontemporal") != 0;
+ bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
unsigned Alignment = I.getAlignment();
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
@@ -3497,8 +3573,8 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
// See visitLoad comments.
if (ChainI == MaxParallelChains) {
- SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
- MVT::Other, &Chains[0], ChainI);
+ SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
+ makeArrayRef(Chains.data(), ChainI));
Root = Chain;
ChainI = 0;
}
@@ -3511,8 +3587,8 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
Chains[ChainI] = St;
}
- SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
- MVT::Other, &Chains[0], ChainI);
+ SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
+ makeArrayRef(Chains.data(), ChainI));
DAG.setRoot(StoreNode);
}
@@ -3525,48 +3601,48 @@ static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
if (Before) {
if (Order == AcquireRelease || Order == SequentiallyConsistent)
Order = Release;
- else if (Order == Acquire || Order == Monotonic)
+ else if (Order == Acquire || Order == Monotonic || Order == Unordered)
return Chain;
} else {
if (Order == AcquireRelease)
Order = Acquire;
- else if (Order == Release || Order == Monotonic)
+ else if (Order == Release || Order == Monotonic || Order == Unordered)
return Chain;
}
SDValue Ops[3];
Ops[0] = Chain;
Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
- return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3);
+ return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
}
void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDLoc dl = getCurSDLoc();
- AtomicOrdering Order = I.getOrdering();
+ AtomicOrdering SuccessOrder = I.getSuccessOrdering();
+ AtomicOrdering FailureOrder = I.getFailureOrdering();
SynchronizationScope Scope = I.getSynchScope();
SDValue InChain = getRoot();
const TargetLowering *TLI = TM.getTargetLowering();
if (TLI->getInsertFencesForAtomic())
- InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
+ InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl,
DAG, *TLI);
- SDValue L =
- DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
- getValue(I.getCompareOperand()).getSimpleValueType(),
- InChain,
- getValue(I.getPointerOperand()),
- getValue(I.getCompareOperand()),
- getValue(I.getNewValOperand()),
- MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
- TLI->getInsertFencesForAtomic() ? Monotonic : Order,
- Scope);
+ MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
+ SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
+ SDValue L = DAG.getAtomicCmpSwap(
+ ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
+ getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
+ getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
+ 0 /* Alignment */,
+ TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder,
+ TLI->getInsertFencesForAtomic() ? Monotonic : FailureOrder, Scope);
- SDValue OutChain = L.getValue(1);
+ SDValue OutChain = L.getValue(2);
if (TLI->getInsertFencesForAtomic())
- OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
+ OutChain = InsertFenceForAtomic(OutChain, SuccessOrder, Scope, false, dl,
DAG, *TLI);
setValue(&I, L);
@@ -3627,7 +3703,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
Ops[0] = getRoot();
Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy());
Ops[2] = DAG.getConstant(I.getSynchScope(), TLI->getPointerTy());
- DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3));
+ DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
}
void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
@@ -3643,12 +3719,21 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic load");
+ MachineMemOperand *MMO =
+ DAG.getMachineFunction().
+ getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
+ MachineMemOperand::MOVolatile |
+ MachineMemOperand::MOLoad,
+ VT.getStoreSize(),
+ I.getAlignment() ? I.getAlignment() :
+ DAG.getEVTAlignment(VT));
+
+ InChain = TLI->prepareVolatileOrAtomicLoad(InChain, dl, DAG);
SDValue L =
- DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
- getValue(I.getPointerOperand()),
- I.getPointerOperand(), I.getAlignment(),
- TLI->getInsertFencesForAtomic() ? Monotonic : Order,
- Scope);
+ DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
+ getValue(I.getPointerOperand()), MMO,
+ TLI->getInsertFencesForAtomic() ? Monotonic : Order,
+ Scope);
SDValue OutChain = L.getValue(1);
@@ -3734,27 +3819,23 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
if (HasChain)
ValueVTs.push_back(MVT::Other);
- SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
+ SDVTList VTs = DAG.getVTList(ValueVTs);
// Create the node.
SDValue Result;
if (IsTgtIntrinsic) {
// This is target intrinsic that touches memory
Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
- VTs, &Ops[0], Ops.size(),
- Info.memVT,
+ VTs, Ops, Info.memVT,
MachinePointerInfo(Info.ptrVal, Info.offset),
Info.align, Info.vol,
Info.readMem, Info.writeMem);
} else if (!HasChain) {
- Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(),
- VTs, &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
} else if (!I.getType()->isVoidTy()) {
- Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(),
- VTs, &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
} else {
- Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(),
- VTs, &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
}
if (HasChain) {
@@ -4476,7 +4557,7 @@ static unsigned getTruncatedArgReg(const SDValue &N) {
/// At the end of instruction selection, they will be inserted to the entry BB.
bool
SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
- int64_t Offset,
+ int64_t Offset, bool IsIndirect,
const SDValue &N) {
const Argument *Arg = dyn_cast<Argument>(V);
if (!Arg)
@@ -4528,8 +4609,6 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
if (!Op)
return false;
- // FIXME: This does not handle register-indirect values at offset 0.
- bool IsIndirect = Offset != 0;
if (Op->isReg())
FuncInfo.ArgDbgValues.push_back(BuildMI(MF, getCurDebugLoc(),
TII->get(TargetOpcode::DBG_VALUE),
@@ -4565,18 +4644,34 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
default:
// By default, turn this into a target intrinsic node.
visitTargetIntrinsic(I, Intrinsic);
- return 0;
- case Intrinsic::vastart: visitVAStart(I); return 0;
- case Intrinsic::vaend: visitVAEnd(I); return 0;
- case Intrinsic::vacopy: visitVACopy(I); return 0;
+ return nullptr;
+ case Intrinsic::vastart: visitVAStart(I); return nullptr;
+ case Intrinsic::vaend: visitVAEnd(I); return nullptr;
+ case Intrinsic::vacopy: visitVACopy(I); return nullptr;
case Intrinsic::returnaddress:
setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::frameaddress:
setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
+ case Intrinsic::read_register: {
+ Value *Reg = I.getArgOperand(0);
+ SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
+ EVT VT = TM.getTargetLowering()->getValueType(I.getType());
+ setValue(&I, DAG.getNode(ISD::READ_REGISTER, sdl, VT, RegName));
+ return nullptr;
+ }
+ case Intrinsic::write_register: {
+ Value *Reg = I.getArgOperand(0);
+ Value *RegValue = I.getArgOperand(1);
+ SDValue Chain = getValue(RegValue).getOperand(0);
+ SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
+ DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
+ RegName, getValue(RegValue)));
+ return nullptr;
+ }
case Intrinsic::setjmp:
return &"_setjmp"[!TLI->usesUnderscoreSetJmp()];
case Intrinsic::longjmp:
@@ -4599,7 +4694,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1))));
- return 0;
+ return nullptr;
}
case Intrinsic::memset: {
// Assert for address < 256 since we support only user defined address
@@ -4616,7 +4711,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
MachinePointerInfo(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::memmove: {
// Assert for address < 256 since we support only user defined address
@@ -4636,7 +4731,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1))));
- return 0;
+ return nullptr;
}
case Intrinsic::dbg_declare: {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
@@ -4647,14 +4742,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
"Variable in DbgDeclareInst should be either null or a DIVariable.");
if (!Address || !DIVar) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
- return 0;
+ return nullptr;
}
// Check if address has undef value.
if (isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
- return 0;
+ return nullptr;
}
SDValue &N = NodeMap[Address];
@@ -4676,29 +4771,29 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
if (FINode)
// Byval parameter. We have a frame index at this point.
- SDV = DAG.getDbgValue(Variable, FINode->getIndex(),
- 0, dl, SDNodeOrder);
+ SDV = DAG.getFrameIndexDbgValue(Variable, FINode->getIndex(),
+ 0, dl, SDNodeOrder);
else {
// Address is an argument, so try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
- EmitFuncArgumentDbgValue(Address, Variable, 0, N);
- return 0;
+ EmitFuncArgumentDbgValue(Address, Variable, 0, false, N);
+ return nullptr;
}
} else if (AI)
SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
- 0, dl, SDNodeOrder);
+ true, 0, dl, SDNodeOrder);
else {
// Can't do anything with other non-AI cases yet.
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
DEBUG(Address->dump());
- return 0;
+ return nullptr;
}
DAG.AddDbgValue(SDV, N.getNode(), isParameter);
} else {
// If Address is an argument then try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
- if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) {
+ if (!EmitFuncArgumentDbgValue(Address, Variable, 0, false, N)) {
// If variable is pinned by a alloca in dominating bb then
// use StaticAllocaMap.
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
@@ -4706,17 +4801,17 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- SDV = DAG.getDbgValue(Variable, SI->second,
- 0, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
- return 0;
+ SDV = DAG.getFrameIndexDbgValue(Variable, SI->second,
+ 0, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, nullptr, false);
+ return nullptr;
}
}
}
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
- return 0;
+ return nullptr;
}
case Intrinsic::dbg_value: {
const DbgValueInst &DI = cast<DbgValueInst>(I);
@@ -4724,18 +4819,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
assert((!DIVar || DIVar.isVariable()) &&
"Variable in DbgValueInst should be either null or a DIVariable.");
if (!DIVar)
- return 0;
+ return nullptr;
MDNode *Variable = DI.getVariable();
uint64_t Offset = DI.getOffset();
const Value *V = DI.getValue();
if (!V)
- return 0;
+ return nullptr;
SDDbgValue *SDV;
if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
- SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
+ SDV = DAG.getConstantDbgValue(Variable, V, Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, nullptr, false);
} else {
// Do not use getValue() in here; we don't want to generate code at
// this point if it hasn't been done yet.
@@ -4744,9 +4839,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Check unused arguments map.
N = UnusedArgNodeMap[V];
if (N.getNode()) {
- if (!EmitFuncArgumentDbgValue(V, Variable, Offset, N)) {
+ // A dbg.value for an alloca is always indirect.
+ bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
+ if (!EmitFuncArgumentDbgValue(V, Variable, Offset, IsIndirect, N)) {
SDV = DAG.getDbgValue(Variable, N.getNode(),
- N.getResNo(), Offset, dl, SDNodeOrder);
+ N.getResNo(), IsIndirect,
+ Offset, dl, SDNodeOrder);
DAG.AddDbgValue(SDV, N.getNode(), false);
}
} else if (!V->use_empty() ) {
@@ -4769,18 +4867,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (!AI) {
DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
- return 0;
+ return nullptr;
}
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI == FuncInfo.StaticAllocaMap.end())
- return 0; // VLAs.
- int FI = SI->second;
-
- MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
- if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
- MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
- return 0;
+ return nullptr; // VLAs.
+ return nullptr;
}
case Intrinsic::eh_typeid_for: {
@@ -4789,7 +4882,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
Res = DAG.getConstant(TypeID, MVT::i32);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::eh_return_i32:
@@ -4800,10 +4893,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getControlRoot(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
- return 0;
+ return nullptr;
case Intrinsic::eh_unwind_init:
DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
- return 0;
+ return nullptr;
case Intrinsic::eh_dwarf_cfa: {
SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
TLI->getPointerTy());
@@ -4817,7 +4910,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.getConstant(0, TLI->getPointerTy()));
setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
FA, Offset));
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_callsite: {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
@@ -4826,7 +4919,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
MMI.setCurrentCallSite(CI->getZExtValue());
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_functioncontext: {
// Get and store the index of the function context.
@@ -4835,23 +4928,22 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
int FI = FuncInfo.StaticAllocaMap[FnCtx];
MFI->setFunctionContextIndex(FI);
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_setjmp: {
SDValue Ops[2];
Ops[0] = getRoot();
Ops[1] = getValue(I.getArgOperand(0));
SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
- DAG.getVTList(MVT::i32, MVT::Other),
- Ops, 2);
+ DAG.getVTList(MVT::i32, MVT::Other), Ops);
setValue(&I, Op.getValue(0));
DAG.setRoot(Op.getValue(1));
- return 0;
+ return nullptr;
}
case Intrinsic::eh_sjlj_longjmp: {
DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
getRoot(), getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::x86_mmx_pslli_w:
@@ -4865,7 +4957,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue ShAmt = getValue(I.getArgOperand(1));
if (isa<ConstantSDNode>(ShAmt)) {
visitTargetIntrinsic(I, Intrinsic);
- return 0;
+ return nullptr;
}
unsigned NewIntrinsic = 0;
EVT ShAmtVT = MVT::v2i32;
@@ -4904,14 +4996,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue ShOps[2];
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, MVT::i32);
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, &ShOps[0], 2);
+ ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
EVT DestVT = TLI->getValueType(I.getType());
ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
DAG.getConstant(NewIntrinsic, MVT::i32),
getValue(I.getArgOperand(0)), ShAmt);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::x86_avx_vinsertf128_pd_256:
case Intrinsic::x86_avx_vinsertf128_ps_256:
@@ -4926,7 +5018,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(1)),
DAG.getConstant(Idx, TLI->getVectorIdxTy()));
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::x86_avx_vextractf128_pd_256:
case Intrinsic::x86_avx_vextractf128_ps_256:
@@ -4939,7 +5031,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(0)),
DAG.getConstant(Idx, TLI->getVectorIdxTy()));
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::convertff:
case Intrinsic::convertfsi:
@@ -4972,31 +5064,31 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(2)),
Code);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::powi:
setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), DAG));
- return 0;
+ return nullptr;
case Intrinsic::log:
setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::log2:
setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::log10:
setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::exp:
setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::exp2:
setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::pow:
setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), DAG, *TLI));
- return 0;
+ return nullptr;
case Intrinsic::sqrt:
case Intrinsic::fabs:
case Intrinsic::sin:
@@ -5025,21 +5117,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
setValue(&I, DAG.getNode(Opcode, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::copysign:
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1))));
- return 0;
+ return nullptr;
case Intrinsic::fma:
setValue(&I, DAG.getNode(ISD::FMA, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2))));
- return 0;
+ return nullptr;
case Intrinsic::fmuladd: {
EVT VT = TLI->getValueType(I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
@@ -5060,42 +5152,45 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(2)));
setValue(&I, Add);
}
- return 0;
+ return nullptr;
}
case Intrinsic::convert_to_fp16:
- setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, sdl,
- MVT::i16, getValue(I.getArgOperand(0))));
- return 0;
+ setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
+ DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
+ getValue(I.getArgOperand(0)),
+ DAG.getTargetConstant(0, MVT::i32))));
+ return nullptr;
case Intrinsic::convert_from_fp16:
- setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, sdl,
- MVT::f32, getValue(I.getArgOperand(0))));
- return 0;
+ setValue(&I,
+ DAG.getNode(ISD::FP_EXTEND, sdl, TLI->getValueType(I.getType()),
+ DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
+ getValue(I.getArgOperand(0)))));
+ return nullptr;
case Intrinsic::pcmarker: {
SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
- return 0;
+ return nullptr;
}
case Intrinsic::readcyclecounter: {
SDValue Op = getRoot();
Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
- DAG.getVTList(MVT::i64, MVT::Other),
- &Op, 1);
+ DAG.getVTList(MVT::i64, MVT::Other), Op);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
- return 0;
+ return nullptr;
}
case Intrinsic::bswap:
setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
case Intrinsic::cttz: {
SDValue Arg = getValue(I.getArgOperand(0));
ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
sdl, Ty, Arg));
- return 0;
+ return nullptr;
}
case Intrinsic::ctlz: {
SDValue Arg = getValue(I.getArgOperand(0));
@@ -5103,26 +5198,26 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
sdl, Ty, Arg));
- return 0;
+ return nullptr;
}
case Intrinsic::ctpop: {
SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
- return 0;
+ return nullptr;
}
case Intrinsic::stacksave: {
SDValue Op = getRoot();
Res = DAG.getNode(ISD::STACKSAVE, sdl,
- DAG.getVTList(TLI->getPointerTy(), MVT::Other), &Op, 1);
+ DAG.getVTList(TLI->getPointerTy(), MVT::Other), Op);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
- return 0;
+ return nullptr;
}
case Intrinsic::stackrestore: {
Res = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
- return 0;
+ return nullptr;
}
case Intrinsic::stackprotector: {
// Emit code into the DAG to store the stack guard onto the stack.
@@ -5144,7 +5239,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
true, false, 0);
setValue(&I, Res);
DAG.setRoot(Res);
- return 0;
+ return nullptr;
}
case Intrinsic::objectsize: {
// If we don't know by now, we're never going to know.
@@ -5161,16 +5256,16 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Res = DAG.getConstant(0, Ty);
setValue(&I, Res);
- return 0;
+ return nullptr;
}
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
- return 0;
+ return nullptr;
case Intrinsic::var_annotation:
// Discard annotate attributes
- return 0;
+ return nullptr;
case Intrinsic::init_trampoline: {
const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
@@ -5183,16 +5278,16 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
Ops[5] = DAG.getSrcValue(F);
- Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops, 6);
+ Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
DAG.setRoot(Res);
- return 0;
+ return nullptr;
}
case Intrinsic::adjust_trampoline: {
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
- return 0;
+ return nullptr;
}
case Intrinsic::gcroot:
if (GFI) {
@@ -5202,18 +5297,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
GFI->addStackRoot(FI->getIndex(), TypeMap);
}
- return 0;
+ return nullptr;
case Intrinsic::gcread:
case Intrinsic::gcwrite:
llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
case Intrinsic::flt_rounds:
setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
- return 0;
+ return nullptr;
case Intrinsic::expect: {
// Just replace __builtin_expect(exp, c) with EXP.
setValue(&I, getValue(I.getArgOperand(0)));
- return 0;
+ return nullptr;
}
case Intrinsic::debugtrap:
@@ -5223,20 +5318,19 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
ISD::TRAP : ISD::DEBUGTRAP;
DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
- return 0;
+ return nullptr;
}
TargetLowering::ArgListTy Args;
- TargetLowering::
- CallLoweringInfo CLI(getRoot(), I.getType(),
- false, false, false, false, 0, CallingConv::C,
- /*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol(TrapFuncName.data(),
- TLI->getPointerTy()),
- Args, DAG, sdl);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(sdl).setChain(getRoot())
+ .setCallee(CallingConv::C, I.getType(),
+ DAG.getExternalSymbol(TrapFuncName.data(), TLI->getPointerTy()),
+ std::move(Args), 0);
+
std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
DAG.setRoot(Result.second);
- return 0;
+ return nullptr;
}
case Intrinsic::uadd_with_overflow:
@@ -5260,7 +5354,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
- return 0;
+ return nullptr;
}
case Intrinsic::prefetch: {
SDValue Ops[5];
@@ -5271,25 +5365,24 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Ops[3] = getValue(I.getArgOperand(2));
Ops[4] = getValue(I.getArgOperand(3));
DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
- DAG.getVTList(MVT::Other),
- &Ops[0], 5,
+ DAG.getVTList(MVT::Other), Ops,
EVT::getIntegerVT(*Context, 8),
MachinePointerInfo(I.getArgOperand(0)),
0, /* align */
false, /* volatile */
rw==0, /* read */
rw==1)); /* write */
- return 0;
+ return nullptr;
}
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
// Stack coloring is not enabled in O0, discard region information.
if (TM.getOptLevel() == CodeGenOpt::None)
- return 0;
+ return nullptr;
SmallVector<Value *, 4> Allocas;
- GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
+ GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
@@ -5306,18 +5399,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Ops[1] = DAG.getFrameIndex(FI, TLI->getPointerTy(), true);
unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
- Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops, 2);
+ Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
DAG.setRoot(Res);
}
- return 0;
+ return nullptr;
}
case Intrinsic::invariant_start:
// Discard region information.
setValue(&I, DAG.getUNDEF(TLI->getPointerTy()));
- return 0;
+ return nullptr;
case Intrinsic::invariant_end:
// Discard region information.
- return 0;
+ return nullptr;
case Intrinsic::stackprotectorcheck: {
// Do not actually emit anything for this basic block. Instead we initialize
// the stack protector descriptor and export the guard variable so we can
@@ -5328,19 +5421,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Flush our exports since we are going to process a terminator.
(void)getControlRoot();
- return 0;
+ return nullptr;
}
+ case Intrinsic::clear_cache:
+ return TLI->getClearCacheBuiltinName();
case Intrinsic::donothing:
// ignore
- return 0;
+ return nullptr;
case Intrinsic::experimental_stackmap: {
visitStackmap(I);
- return 0;
+ return nullptr;
}
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64: {
visitPatchpoint(I);
- return 0;
+ return nullptr;
}
}
}
@@ -5348,53 +5443,17 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
+ const TargetLowering *TLI = TM.getTargetLowering();
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Type *RetTy = FTy->getReturnType();
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
- MCSymbol *BeginLabel = 0;
+ MCSymbol *BeginLabel = nullptr;
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Args.reserve(CS.arg_size());
- // Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
- const TargetLowering *TLI = TM.getTargetLowering();
- GetReturnInfo(RetTy, CS.getAttributes(), Outs, *TLI);
-
- bool CanLowerReturn = TLI->CanLowerReturn(CS.getCallingConv(),
- DAG.getMachineFunction(),
- FTy->isVarArg(), Outs,
- FTy->getContext());
-
- SDValue DemoteStackSlot;
- int DemoteStackIdx = -100;
-
- if (!CanLowerReturn) {
- uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(
- FTy->getReturnType());
- unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(
- FTy->getReturnType());
- MachineFunction &MF = DAG.getMachineFunction();
- DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
- Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
-
- DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI->getPointerTy());
- Entry.Node = DemoteStackSlot;
- Entry.Ty = StackSlotPtrType;
- Entry.isSExt = false;
- Entry.isZExt = false;
- Entry.isInReg = false;
- Entry.isSRet = true;
- Entry.isNest = false;
- Entry.isByVal = false;
- Entry.isReturned = false;
- Entry.Alignment = Align;
- Args.push_back(Entry);
- RetTy = Type::getVoidTy(FTy->getContext());
- }
-
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
const Value *V = *i;
@@ -5435,58 +5494,20 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// Check if target-independent constraints permit a tail call here.
// Target-dependent constraints are checked within TLI->LowerCallTo.
- if (isTailCall && !isInTailCallPosition(CS, *TLI))
+ if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
isTailCall = false;
- TargetLowering::
- CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG,
- getCurSDLoc(), CS);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
+ .setCallee(RetTy, FTy, Callee, std::move(Args), CS).setTailCall(isTailCall);
+
std::pair<SDValue,SDValue> Result = TLI->LowerCallTo(CLI);
assert((isTailCall || Result.second.getNode()) &&
"Non-null chain expected with non-tail call!");
assert((Result.second.getNode() || !Result.first.getNode()) &&
"Null value expected with tail call!");
- if (Result.first.getNode()) {
+ if (Result.first.getNode())
setValue(CS.getInstruction(), Result.first);
- } else if (!CanLowerReturn && Result.second.getNode()) {
- // The instruction result is the result of loading from the
- // hidden sret parameter.
- SmallVector<EVT, 1> PVTs;
- Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
-
- ComputeValueVTs(*TLI, PtrRetTy, PVTs);
- assert(PVTs.size() == 1 && "Pointers should fit in one register");
- EVT PtrVT = PVTs[0];
-
- SmallVector<EVT, 4> RetTys;
- SmallVector<uint64_t, 4> Offsets;
- RetTy = FTy->getReturnType();
- ComputeValueVTs(*TLI, RetTy, RetTys, &Offsets);
-
- unsigned NumValues = RetTys.size();
- SmallVector<SDValue, 4> Values(NumValues);
- SmallVector<SDValue, 4> Chains(NumValues);
-
- for (unsigned i = 0; i < NumValues; ++i) {
- SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT,
- DemoteStackSlot,
- DAG.getConstant(Offsets[i], PtrVT));
- SDValue L = DAG.getLoad(RetTys[i], getCurSDLoc(), Result.second, Add,
- MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
- false, false, false, 1);
- Values[i] = L;
- Chains[i] = L.getValue(1);
- }
-
- SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
- MVT::Other, &Chains[0], NumValues);
- PendingLoads.push_back(Chain);
-
- setValue(CS.getInstruction(),
- DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
- DAG.getVTList(&RetTys[0], RetTys.size()),
- &Values[0], Values.size()));
- }
if (!Result.second.getNode()) {
// As a special case, a null chain means that a tail call has been emitted
@@ -5514,9 +5535,8 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
/// value is equal or not-equal to zero.
static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ for (const User *U : V->users()) {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
if (IC->isEquality())
if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (C->isNullValue())
@@ -5540,7 +5560,7 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
if (const Constant *LoadCst =
ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
- Builder.TD))
+ Builder.DL))
return Builder.getValue(LoadCst);
}
@@ -5626,7 +5646,7 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
switch (CSize->getZExtValue()) {
default:
LoadVT = MVT::Other;
- LoadTy = 0;
+ LoadTy = nullptr;
ActuallyDoIt = false;
break;
case 2:
@@ -5659,9 +5679,13 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
// bloat the code.
const TargetLowering *TLI = TM.getTargetLowering();
if (ActuallyDoIt && CSize->getZExtValue() > 4) {
+ unsigned DstAS = LHS->getType()->getPointerAddressSpace();
+ unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
- if (!TLI->isTypeLegal(LoadVT) ||!TLI->allowsUnalignedMemoryAccesses(LoadVT))
+ if (!TLI->isTypeLegal(LoadVT) ||
+ !TLI->allowsUnalignedMemoryAccesses(LoadVT, SrcAS) ||
+ !TLI->allowsUnalignedMemoryAccesses(LoadVT, DstAS))
ActuallyDoIt = false;
}
@@ -5849,7 +5873,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
ComputeUsesVAFloatArgument(I, &MMI);
- const char *RenameFn = 0;
+ const char *RenameFn = nullptr;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
@@ -6024,7 +6048,7 @@ public:
RegsForValue AssignedRegs;
explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
- : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
+ : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
}
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
@@ -6032,8 +6056,8 @@ public:
/// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context,
const TargetLowering &TLI,
- const DataLayout *TD) const {
- if (CallOperandVal == 0) return MVT::Other;
+ const DataLayout *DL) const {
+ if (!CallOperandVal) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
return TLI.getPointerTy();
@@ -6058,7 +6082,7 @@ public:
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
- unsigned BitSize = TD->getTypeSizeInBits(OpTy);
+ unsigned BitSize = DL->getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
@@ -6114,7 +6138,7 @@ static void GetRegistersForValue(SelectionDAG &DAG,
// types are identical size, use a bitcast to convert (e.g. two differing
// vector types).
MVT RegVT = *PhysReg.second->vt_begin();
- if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
+ if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
@@ -6247,7 +6271,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
- OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, TD).
+ OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, DL).
getSimpleVT();
}
@@ -6354,7 +6378,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
// There is no longer a Value* corresponding to this operand.
- OpInfo.CallOperandVal = 0;
+ OpInfo.CallOperandVal = nullptr;
// It is now an indirect operand.
OpInfo.isIndirect = true;
@@ -6643,8 +6667,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
- DAG.getVTList(MVT::Other, MVT::Glue),
- &AsmNodeOperands[0], AsmNodeOperands.size());
+ DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
Flag = Chain.getValue(1);
// If this asm returns a register value, copy the result from that register
@@ -6707,8 +6730,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
if (!OutChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
- &OutChains[0], OutChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
DAG.setRoot(Chain);
}
@@ -6722,11 +6744,11 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
const TargetLowering *TLI = TM.getTargetLowering();
- const DataLayout &TD = *TLI->getDataLayout();
+ const DataLayout &DL = *TLI->getDataLayout();
SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
- TD.getABITypeAlignment(I.getType()));
+ DL.getABITypeAlignment(I.getType()));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
@@ -6778,15 +6800,51 @@ SelectionDAGBuilder::LowerCallOperands(const CallInst &CI, unsigned ArgIdx,
}
Type *retTy = useVoidTy ? Type::getVoidTy(*DAG.getContext()) : CI.getType();
- TargetLowering::CallLoweringInfo CLI(getRoot(), retTy, /*retSExt*/ false,
- /*retZExt*/ false, /*isVarArg*/ false, /*isInReg*/ false, NumArgs,
- CI.getCallingConv(), /*isTailCall*/ false, /*doesNotReturn*/ false,
- /*isReturnValueUsed*/ CI.use_empty(), Callee, Args, DAG, getCurSDLoc());
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
+ .setCallee(CI.getCallingConv(), retTy, Callee, std::move(Args), NumArgs)
+ .setDiscardResult(!CI.use_empty());
const TargetLowering *TLI = TM.getTargetLowering();
return TLI->LowerCallTo(CLI);
}
+/// \brief Add a stack map intrinsic call's live variable operands to a stackmap
+/// or patchpoint target node's operand list.
+///
+/// Constants are converted to TargetConstants purely as an optimization to
+/// avoid constant materialization and register allocation.
+///
+/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
+/// generate addess computation nodes, and so ExpandISelPseudo can convert the
+/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
+/// address materialization and register allocation, but may also be required
+/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
+/// alloca in the entry block, then the runtime may assume that the alloca's
+/// StackMap location can be read immediately after compilation and that the
+/// location is valid at any point during execution (this is similar to the
+/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
+/// only available in a register, then the runtime would need to trap when
+/// execution reaches the StackMap in order to read the alloca's location.
+static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx,
+ SmallVectorImpl<SDValue> &Ops,
+ SelectionDAGBuilder &Builder) {
+ for (unsigned i = StartIdx, e = CI.getNumArgOperands(); i != e; ++i) {
+ SDValue OpVal = Builder.getValue(CI.getArgOperand(i));
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
+ Ops.push_back(
+ Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
+ Ops.push_back(
+ Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
+ } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
+ const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
+ Ops.push_back(
+ Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
+ } else
+ Ops.push_back(OpVal);
+ }
+}
+
/// \brief Lower llvm.experimental.stackmap directly to its target opcode.
void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
// void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
@@ -6794,61 +6852,64 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
- SDValue Callee = getValue(CI.getCalledValue());
+ SDValue Chain, InFlag, Callee, NullPtr;
+ SmallVector<SDValue, 32> Ops;
- // Lower into a call sequence with no args and no return value.
- std::pair<SDValue, SDValue> Result = LowerCallOperands(CI, 0, 0, Callee);
- // Set the root to the target-lowered call chain.
- SDValue Chain = Result.second;
- DAG.setRoot(Chain);
+ SDLoc DL = getCurSDLoc();
+ Callee = getValue(CI.getCalledValue());
+ NullPtr = DAG.getIntPtrConstant(0, true);
- /// Get a call instruction from the call sequence chain.
- /// Tail calls are not allowed.
- SDNode *CallEnd = Chain.getNode();
- assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
- "Expected a callseq node.");
- SDNode *Call = CallEnd->getOperand(0).getNode();
- bool hasGlue = Call->getGluedNode();
+ // The stackmap intrinsic only records the live variables (the arguemnts
+ // passed to it) and emits NOPS (if requested). Unlike the patchpoint
+ // intrinsic, this won't be lowered to a function call. This means we don't
+ // have to worry about calling conventions and target specific lowering code.
+ // Instead we perform the call lowering right here.
+ //
+ // chain, flag = CALLSEQ_START(chain, 0)
+ // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
+ // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
+ //
+ Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
+ InFlag = Chain.getValue(1);
- // Replace the target specific call node with the stackmap intrinsic.
- SmallVector<SDValue, 8> Ops;
+ // Add the <id> and <numBytes> constants.
+ SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
+ SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
- // Add the <id> and <numShadowBytes> constants.
- for (unsigned i = 0; i < 2; ++i) {
- SDValue tmp = getValue(CI.getOperand(i));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(tmp)->getZExtValue(), MVT::i32));
- }
// Push live variables for the stack map.
- for (unsigned i = 2, e = CI.getNumArgOperands(); i != e; ++i)
- Ops.push_back(getValue(CI.getArgOperand(i)));
+ addStackMapLiveVars(CI, 2, Ops, *this);
- // Push the chain (this is originally the first operand of the call, but
- // becomes now the last or second to last operand).
- Ops.push_back(*(Call->op_begin()));
+ // We are not pushing any register mask info here on the operands list,
+ // because the stackmap doesn't clobber anything.
- // Push the glue flag (last operand).
- if (hasGlue)
- Ops.push_back(*(Call->op_end()-1));
+ // Push the chain and the glue flag.
+ Ops.push_back(Chain);
+ Ops.push_back(InFlag);
+ // Create the STACKMAP node.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
+ Chain = SDValue(SM, 0);
+ InFlag = Chain.getValue(1);
- // Replace the target specific call node with a STACKMAP node.
- MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::STACKMAP, getCurSDLoc(),
- NodeTys, Ops);
+ Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
- // StackMap generates no value, so nothing goes in the NodeMap.
+ // Stackmaps don't generate values, so nothing goes into the NodeMap.
- // Fixup the consumers of the intrinsic. The chain and glue may be used in the
- // call sequence.
- DAG.ReplaceAllUsesWith(Call, MN);
+ // Set the root to the target-lowered call chain.
+ DAG.setRoot(Chain);
- DAG.DeleteNode(Call);
+ // Inform the Frame Information that we have a stackmap in this function.
+ FuncInfo.MF->getFrameInfo()->setHasStackMap();
}
/// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
- // void|i64 @llvm.experimental.patchpoint.void|i64(i32 <id>,
+ // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
// i32 <numBytes>,
// i8* <target>,
// i32 <numArgs>,
@@ -6861,17 +6922,19 @@ void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
SDValue Callee = getValue(CI.getOperand(2)); // <target>
// Get the real number of arguments participating in the call <numArgs>
- unsigned NumArgs =
- cast<ConstantSDNode>(getValue(CI.getArgOperand(3)))->getZExtValue();
+ SDValue NArgVal = getValue(CI.getArgOperand(PatchPointOpers::NArgPos));
+ unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
- assert(CI.getNumArgOperands() >= NumArgs + 4 &&
+ // Intrinsics include all meta-operands up to but not including CC.
+ unsigned NumMetaOpers = PatchPointOpers::CCPos;
+ assert(CI.getNumArgOperands() >= NumMetaOpers + NumArgs &&
"Not enough arguments provided to the patchpoint intrinsic");
// For AnyRegCC the arguments are lowered later on manually.
unsigned NumCallArgs = isAnyRegCC ? 0 : NumArgs;
std::pair<SDValue, SDValue> Result =
- LowerCallOperands(CI, 4, NumCallArgs, Callee, isAnyRegCC);
+ LowerCallOperands(CI, NumMetaOpers, NumCallArgs, Callee, isAnyRegCC);
// Set the root to the target-lowered call chain.
SDValue Chain = Result.second;
@@ -6891,13 +6954,16 @@ void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
// Replace the target specific call node with the patchable intrinsic.
SmallVector<SDValue, 8> Ops;
- // Add the <id> and <numNopBytes> constants.
- for (unsigned i = 0; i < 2; ++i) {
- SDValue tmp = getValue(CI.getOperand(i));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(tmp)->getZExtValue(), MVT::i32));
- }
+ // Add the <id> and <numBytes> constants.
+ SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
+ SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
+ Ops.push_back(DAG.getTargetConstant(
+ cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
+
// Assume that the Callee is a constant address.
+ // FIXME: handle function symbols in the future.
Ops.push_back(
DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue(),
/*isTarget=*/true));
@@ -6915,25 +6981,16 @@ void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
// Add the arguments we omitted previously. The register allocator should
// place these in any free register.
if (isAnyRegCC)
- for (unsigned i = 4, e = NumArgs + 4; i != e; ++i)
+ for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
Ops.push_back(getValue(CI.getArgOperand(i)));
- // Push the arguments from the call instruction.
+ // Push the arguments from the call instruction up to the register mask.
SDNode::op_iterator e = hasGlue ? Call->op_end()-2 : Call->op_end()-1;
for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i)
Ops.push_back(*i);
// Push live variables for the stack map.
- for (unsigned i = NumArgs + 4, e = CI.getNumArgOperands(); i != e; ++i) {
- SDValue OpVal = getValue(CI.getArgOperand(i));
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
- Ops.push_back(
- DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
- Ops.push_back(
- DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
- } else
- Ops.push_back(OpVal);
- }
+ addStackMapLiveVars(CI, NumMetaOpers + NumArgs, Ops, *this);
// Push the register mask info.
if (hasGlue)
@@ -6960,7 +7017,7 @@ void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
// There is always a chain and a glue type at the end
ValueVTs.push_back(MVT::Other);
ValueVTs.push_back(MVT::Glue);
- NodeTys = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
+ NodeTys = DAG.getVTList(ValueVTs);
} else
NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
@@ -6987,6 +7044,24 @@ void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
} else
DAG.ReplaceAllUsesWith(Call, MN);
DAG.DeleteNode(Call);
+
+ // Inform the Frame Information that we have a patchpoint in this function.
+ FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
+}
+
+/// Returns an AttributeSet representing the attributes applied to the return
+/// value of the given call.
+static AttributeSet getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
+ SmallVector<Attribute::AttrKind, 2> Attrs;
+ if (CLI.RetSExt)
+ Attrs.push_back(Attribute::SExt);
+ if (CLI.RetZExt)
+ Attrs.push_back(Attribute::ZExt);
+ if (CLI.IsInReg)
+ Attrs.push_back(Attribute::InReg);
+
+ return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
+ Attrs);
}
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
@@ -6997,43 +7072,85 @@ std::pair<SDValue, SDValue>
TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// Handle the incoming return values from the call.
CLI.Ins.clear();
+ Type *OrigRetTy = CLI.RetTy;
SmallVector<EVT, 4> RetTys;
- ComputeValueVTs(*this, CLI.RetTy, RetTys);
- for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
- EVT VT = RetTys[I];
- MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
- unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
- for (unsigned i = 0; i != NumRegs; ++i) {
- ISD::InputArg MyFlags;
- MyFlags.VT = RegisterVT;
- MyFlags.ArgVT = VT;
- MyFlags.Used = CLI.IsReturnValueUsed;
- if (CLI.RetSExt)
- MyFlags.Flags.setSExt();
- if (CLI.RetZExt)
- MyFlags.Flags.setZExt();
- if (CLI.IsInReg)
- MyFlags.Flags.setInReg();
- CLI.Ins.push_back(MyFlags);
+ SmallVector<uint64_t, 4> Offsets;
+ ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets);
+
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this);
+
+ bool CanLowerReturn =
+ this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
+ CLI.IsVarArg, Outs, CLI.RetTy->getContext());
+
+ SDValue DemoteStackSlot;
+ int DemoteStackIdx = -100;
+ if (!CanLowerReturn) {
+ // FIXME: equivalent assert?
+ // assert(!CS.hasInAllocaArgument() &&
+ // "sret demotion is incompatible with inalloca");
+ uint64_t TySize = getDataLayout()->getTypeAllocSize(CLI.RetTy);
+ unsigned Align = getDataLayout()->getPrefTypeAlignment(CLI.RetTy);
+ MachineFunction &MF = CLI.DAG.getMachineFunction();
+ DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
+ Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
+
+ DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy());
+ ArgListEntry Entry;
+ Entry.Node = DemoteStackSlot;
+ Entry.Ty = StackSlotPtrType;
+ Entry.isSExt = false;
+ Entry.isZExt = false;
+ Entry.isInReg = false;
+ Entry.isSRet = true;
+ Entry.isNest = false;
+ Entry.isByVal = false;
+ Entry.isReturned = false;
+ Entry.Alignment = Align;
+ CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
+ CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
+ } else {
+ for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
+ EVT VT = RetTys[I];
+ MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ ISD::InputArg MyFlags;
+ MyFlags.VT = RegisterVT;
+ MyFlags.ArgVT = VT;
+ MyFlags.Used = CLI.IsReturnValueUsed;
+ if (CLI.RetSExt)
+ MyFlags.Flags.setSExt();
+ if (CLI.RetZExt)
+ MyFlags.Flags.setZExt();
+ if (CLI.IsInReg)
+ MyFlags.Flags.setInReg();
+ CLI.Ins.push_back(MyFlags);
+ }
}
}
// Handle all of the outgoing arguments.
CLI.Outs.clear();
CLI.OutVals.clear();
- ArgListTy &Args = CLI.Args;
+ ArgListTy &Args = CLI.getArgs();
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
- for (unsigned Value = 0, NumValues = ValueVTs.size();
- Value != NumValues; ++Value) {
+ Type *FinalType = Args[i].Ty;
+ if (Args[i].isByVal)
+ FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
+ bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
+ FinalType, CLI.CallConv, CLI.IsVarArg);
+ for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
+ ++Value) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment =
- getDataLayout()->getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
if (Args[i].isZExt)
Flags.setZExt();
@@ -7043,8 +7160,18 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setInReg();
if (Args[i].isSRet)
Flags.setSRet();
- if (Args[i].isByVal) {
+ if (Args[i].isByVal)
+ Flags.setByVal();
+ if (Args[i].isInAlloca) {
+ Flags.setInAlloca();
+ // Set the byval flag for CCAssignFn callbacks that don't know about
+ // inalloca. This way we can know how many bytes we should've allocated
+ // and how many bytes a callee cleanup function will pop. If we port
+ // inalloca to more targets, we'll have to add custom inalloca handling
+ // in the various CC lowering callbacks.
Flags.setByVal();
+ }
+ if (Args[i].isByVal || Args[i].isInAlloca) {
PointerType *Ty = cast<PointerType>(Args[i].Ty);
Type *ElementTy = Ty->getElementType();
Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
@@ -7059,6 +7186,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
}
if (Args[i].isNest)
Flags.setNest();
+ if (NeedsRegBlock)
+ Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
@@ -7091,8 +7220,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setReturned();
}
- getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
- PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind);
+ getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
+ CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
@@ -7104,6 +7233,10 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
else if (j != 0)
MyFlags.Flags.setOrigAlign(1);
+ // Only mark the end at the last register of the last value.
+ if (NeedsRegBlock && Value == NumValues - 1 && j == NumParts - 1)
+ MyFlags.Flags.setInConsecutiveRegsLast();
+
CLI.Outs.push_back(MyFlags);
CLI.OutVals.push_back(Parts[j]);
}
@@ -7137,35 +7270,62 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
"LowerCall emitted a value with the wrong type!");
});
- // Collect the legal value parts into potentially illegal values
- // that correspond to the original function's return values.
- ISD::NodeType AssertOp = ISD::DELETED_NODE;
- if (CLI.RetSExt)
- AssertOp = ISD::AssertSext;
- else if (CLI.RetZExt)
- AssertOp = ISD::AssertZext;
SmallVector<SDValue, 4> ReturnValues;
- unsigned CurReg = 0;
- for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
- EVT VT = RetTys[I];
- MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
- unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
+ if (!CanLowerReturn) {
+ // The instruction result is the result of loading from the
+ // hidden sret parameter.
+ SmallVector<EVT, 1> PVTs;
+ Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
- ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
- NumRegs, RegisterVT, VT, NULL,
- AssertOp));
- CurReg += NumRegs;
- }
+ ComputeValueVTs(*this, PtrRetTy, PVTs);
+ assert(PVTs.size() == 1 && "Pointers should fit in one register");
+ EVT PtrVT = PVTs[0];
+
+ unsigned NumValues = RetTys.size();
+ ReturnValues.resize(NumValues);
+ SmallVector<SDValue, 4> Chains(NumValues);
+
+ for (unsigned i = 0; i < NumValues; ++i) {
+ SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
+ CLI.DAG.getConstant(Offsets[i], PtrVT));
+ SDValue L = CLI.DAG.getLoad(
+ RetTys[i], CLI.DL, CLI.Chain, Add,
+ MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]), false,
+ false, false, 1);
+ ReturnValues[i] = L;
+ Chains[i] = L.getValue(1);
+ }
+
+ CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
+ } else {
+ // Collect the legal value parts into potentially illegal values
+ // that correspond to the original function's return values.
+ ISD::NodeType AssertOp = ISD::DELETED_NODE;
+ if (CLI.RetSExt)
+ AssertOp = ISD::AssertSext;
+ else if (CLI.RetZExt)
+ AssertOp = ISD::AssertZext;
+ unsigned CurReg = 0;
+ for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
+ EVT VT = RetTys[I];
+ MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
+
+ ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
+ NumRegs, RegisterVT, VT, nullptr,
+ AssertOp));
+ CurReg += NumRegs;
+ }
- // For a function returning void, there is no return value. We can't create
- // such a node, so we just return a null return value in that case. In
- // that case, nothing will actually look at the value.
- if (ReturnValues.empty())
- return std::make_pair(SDValue(), CLI.Chain);
+ // For a function returning void, there is no return value. We can't create
+ // such a node, so we just return a null return value in that case. In
+ // that case, nothing will actually look at the value.
+ if (ReturnValues.empty())
+ return std::make_pair(SDValue(), CLI.Chain);
+ }
SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
- CLI.DAG.getVTList(&RetTys[0], RetTys.size()),
- &ReturnValues[0], ReturnValues.size());
+ CLI.DAG.getVTList(RetTys), ReturnValues);
return std::make_pair(Res, CLI.Chain);
}
@@ -7192,7 +7352,7 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
const TargetLowering *TLI = TM.getTargetLowering();
RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
- RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, 0, V);
+ RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V);
PendingExports.push_back(Chain);
}
@@ -7208,12 +7368,10 @@ static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
return A->use_empty();
const BasicBlock *Entry = A->getParent()->begin();
- for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
- UI != E; ++UI) {
- const User *U = *UI;
+ for (const User *U : A->users())
if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
return false; // Use not in entry block.
- }
+
return true;
}
@@ -7221,7 +7379,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
SelectionDAG &DAG = SDB->DAG;
SDLoc dl = SDB->getCurSDLoc();
const TargetLowering *TLI = getTargetLowering();
- const DataLayout *TD = TLI->getDataLayout();
+ const DataLayout *DL = TLI->getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
if (!FuncInfo->CanLowerReturn) {
@@ -7247,13 +7405,17 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
ComputeValueVTs(*TLI, I->getType(), ValueVTs);
bool isArgValueUsed = !I->use_empty();
unsigned PartBase = 0;
+ Type *FinalType = I->getType();
+ if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
+ FinalType = cast<PointerType>(FinalType)->getElementType();
+ bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
+ FinalType, F.getCallingConv(), F.isVarArg());
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment =
- TD->getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt();
@@ -7263,11 +7425,21 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
Flags.setInReg();
if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
Flags.setSRet();
- if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
+ if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
Flags.setByVal();
+ if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
+ Flags.setInAlloca();
+ // Set the byval flag for CCAssignFn callbacks that don't know about
+ // inalloca. This way we can know how many bytes we should've allocated
+ // and how many bytes a callee cleanup function will pop. If we port
+ // inalloca to more targets, we'll have to add custom inalloca handling
+ // in the various CC lowering callbacks.
+ Flags.setByVal();
+ }
+ if (Flags.isByVal() || Flags.isInAlloca()) {
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
- Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
+ Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;
@@ -7279,6 +7451,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
}
if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
Flags.setNest();
+ if (NeedsRegBlock)
+ Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
@@ -7291,6 +7465,11 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// if it isn't first piece, alignment must be 1
else if (i > 0)
MyFlags.Flags.setOrigAlign(1);
+
+ // Only mark the end at the last register of the last value.
+ if (NeedsRegBlock && Value == NumValues - 1 && i == NumRegs - 1)
+ MyFlags.Flags.setInConsecutiveRegsLast();
+
Ins.push_back(MyFlags);
}
PartBase += VT.getStoreSize();
@@ -7332,7 +7511,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
ISD::NodeType AssertOp = ISD::DELETED_NODE;
SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
- RegVT, VT, NULL, AssertOp);
+ RegVT, VT, nullptr, AssertOp);
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
@@ -7379,7 +7558,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
NumParts, PartVT, VT,
- NULL, AssertOp));
+ nullptr, AssertOp));
}
i += NumParts;
@@ -7394,7 +7573,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
- SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
+ SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
SDB->getCurSDLoc());
SDB->setValue(I, Res);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 835f643..84679f9 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -18,9 +18,8 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include <vector>
@@ -57,6 +56,7 @@ class MachineBasicBlock;
class MachineInstr;
class MachineRegisterInfo;
class MDNode;
+class MVT;
class PHINode;
class PtrToIntInst;
class ReturnInst;
@@ -96,7 +96,7 @@ class SelectionDAGBuilder {
DebugLoc dl;
unsigned SDNodeOrder;
public:
- DanglingDebugInfo() : DI(0), dl(DebugLoc()), SDNodeOrder(0) { }
+ DanglingDebugInfo() : DI(nullptr), dl(DebugLoc()), SDNodeOrder(0) { }
DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) :
DI(di), dl(DL), SDNodeOrder(SDNO) { }
const DbgValueInst* getDI() { return DI; }
@@ -135,7 +135,7 @@ private:
MachineBasicBlock* BB;
uint32_t ExtraWeight;
- Case() : Low(0), High(0), BB(0), ExtraWeight(0) { }
+ Case() : Low(nullptr), High(nullptr), BB(nullptr), ExtraWeight(0) { }
Case(const Constant *low, const Constant *high, MachineBasicBlock *bb,
uint32_t extraweight) : Low(low), High(high), BB(bb),
ExtraWeight(extraweight) { }
@@ -320,7 +320,7 @@ private:
/// 1. Preserve the architecture independence of stack protector generation.
///
/// 2. Preserve the normal IR level stack protector check for platforms like
- /// OpenBSD for which we support platform specific stack protector
+ /// OpenBSD for which we support platform-specific stack protector
/// generation.
///
/// The main problem that guided the present solution is that one can not
@@ -338,7 +338,7 @@ private:
/// basic block (where the return inst is placed) and then move it back
/// later at SelectionDAG/MI time before the stack protector check if the
/// tail call optimization failed. The MI level option was nixed
- /// immediately since it would require platform specific pattern
+ /// immediately since it would require platform-specific pattern
/// matching. The SelectionDAG level option was nixed because
/// SelectionDAG only processes one IR level basic block at a time
/// implying one could not create a DAG Combine to move the callinst.
@@ -396,8 +396,8 @@ private:
/// the same function, use the same failure basic block).
class StackProtectorDescriptor {
public:
- StackProtectorDescriptor() : ParentMBB(0), SuccessMBB(0), FailureMBB(0),
- Guard(0) { }
+ StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
+ FailureMBB(nullptr), Guard(nullptr) { }
~StackProtectorDescriptor() { }
/// Returns true if all fields of the stack protector descriptor are
@@ -432,8 +432,8 @@ private:
/// parent mbb after we create the stack protector check (SuccessMBB). This
/// BB is visited only on stack protector check success.
void resetPerBBState() {
- ParentMBB = 0;
- SuccessMBB = 0;
+ ParentMBB = nullptr;
+ SuccessMBB = nullptr;
}
/// Reset state that only changes when we switch functions.
@@ -446,8 +446,8 @@ private:
/// 2.The guard variable since the guard variable we are checking against is
/// always the same.
void resetPerFunctionState() {
- FailureMBB = 0;
- Guard = 0;
+ FailureMBB = nullptr;
+ Guard = nullptr;
}
MachineBasicBlock *getParentMBB() { return ParentMBB; }
@@ -482,14 +482,18 @@ private:
/// block will be created.
MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
MachineBasicBlock *ParentMBB,
- MachineBasicBlock *SuccMBB = 0);
+ MachineBasicBlock *SuccMBB = nullptr);
};
private:
const TargetMachine &TM;
public:
+ /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
+ /// nodes without a corresponding SDNode.
+ static const unsigned LowestSDNodeOrder = 1;
+
SelectionDAG &DAG;
- const DataLayout *TD;
+ const DataLayout *DL;
AliasAnalysis *AA;
const TargetLibraryInfo *LibInfo;
@@ -534,7 +538,7 @@ public:
SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
CodeGenOpt::Level ol)
- : CurInst(NULL), SDNodeOrder(0), TM(dag.getTarget()),
+ : CurInst(nullptr), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()),
DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
HasTailCall(false) {
}
@@ -596,29 +600,31 @@ public:
void setValue(const Value *V, SDValue NewN) {
SDValue &N = NodeMap[V];
- assert(N.getNode() == 0 && "Already set a value for this node!");
+ assert(!N.getNode() && "Already set a value for this node!");
N = NewN;
}
void setUnusedArgValue(const Value *V, SDValue NewN) {
SDValue &N = UnusedArgNodeMap[V];
- assert(N.getNode() == 0 && "Already set a value for this node!");
+ assert(!N.getNode() && "Already set a value for this node!");
N = NewN;
}
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB, unsigned Opc);
+ MachineBasicBlock *SwitchBB, unsigned Opc,
+ uint32_t TW, uint32_t FW);
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB);
+ MachineBasicBlock *SwitchBB,
+ uint32_t TW, uint32_t FW);
bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases);
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
void CopyToExportRegsIfNeeded(const Value *V);
void ExportFromCurrentBlock(const Value *V);
void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
- MachineBasicBlock *LandingPad = NULL);
+ MachineBasicBlock *LandingPad = nullptr);
std::pair<SDValue, SDValue> LowerCallOperands(const CallInst &CI,
unsigned ArgIdx,
@@ -627,7 +633,7 @@ public:
bool useVoidTy = false);
/// UpdateSplitBlock - When an MBB was split during scheduling, update the
- /// references that ned to refer to the last resulting block.
+ /// references that need to refer to the last resulting block.
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
private:
@@ -636,7 +642,7 @@ private:
void visitBr(const BranchInst &I);
void visitSwitch(const SwitchInst &I);
void visitIndirectBr(const IndirectBrInst &I);
- void visitUnreachable(const UnreachableInst &I) { /* noop */ }
+ void visitUnreachable(const UnreachableInst &I);
// Helpers for visitSwitch
bool handleSmallSwitchRange(CaseRec& CR,
@@ -779,7 +785,8 @@ private:
/// corresponding DBG_VALUE machine instruction for it now. At the end of
/// instruction selection, they will be inserted to the entry BB.
bool EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
- int64_t Offset, const SDValue &N);
+ int64_t Offset, bool IsIndirect,
+ const SDValue &N);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index c04a08d..a71cc68 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -14,11 +14,10 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
@@ -56,6 +55,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::PREFETCH: return "Prefetch";
case ISD::ATOMIC_FENCE: return "AtomicFence";
case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: return "AtomicCmpSwapWithSuccess";
case ISD::ATOMIC_SWAP: return "AtomicSwap";
case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
@@ -82,7 +82,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::VALUETYPE: return "ValueType";
case ISD::Register: return "Register";
case ISD::RegisterMask: return "RegisterMask";
- case ISD::Constant: return "Constant";
+ case ISD::Constant:
+ if (cast<ConstantSDNode>(this)->isOpaque())
+ return "OpaqueConstant";
+ return "Constant";
case ISD::ConstantFP: return "ConstantFP";
case ISD::GlobalAddress: return "GlobalAddress";
case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
@@ -91,6 +94,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
case ISD::RETURNADDR: return "RETURNADDR";
case ISD::FRAMEADDR: return "FRAMEADDR";
+ case ISD::READ_REGISTER: return "READ_REGISTER";
+ case ISD::WRITE_REGISTER: return "WRITE_REGISTER";
case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
case ISD::EH_RETURN: return "EH_RETURN";
case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
@@ -112,7 +117,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
}
case ISD::BUILD_VECTOR: return "BUILD_VECTOR";
- case ISD::TargetConstant: return "TargetConstant";
+ case ISD::TargetConstant:
+ if (cast<ConstantSDNode>(this)->isOpaque())
+ return "OpaqueTargetConstant";
+ return "TargetConstant";
case ISD::TargetConstantFP: return "TargetConstantFP";
case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
@@ -213,6 +221,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::ZERO_EXTEND: return "zero_extend";
case ISD::ANY_EXTEND: return "any_extend";
case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
+ case ISD::ANY_EXTEND_VECTOR_INREG: return "any_extend_vector_inreg";
+ case ISD::SIGN_EXTEND_VECTOR_INREG: return "sign_extend_vector_inreg";
+ case ISD::ZERO_EXTEND_VECTOR_INREG: return "zero_extend_vector_inreg";
case ISD::TRUNCATE: return "truncate";
case ISD::FP_ROUND: return "fp_round";
case ISD::FLT_ROUNDS_: return "flt_rounds";
@@ -225,8 +236,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FP_TO_UINT: return "fp_to_uint";
case ISD::BITCAST: return "bitcast";
case ISD::ADDRSPACECAST: return "addrspacecast";
- case ISD::FP16_TO_FP32: return "fp16_to_fp32";
- case ISD::FP32_TO_FP16: return "fp32_to_fp16";
+ case ISD::FP16_TO_FP: return "fp16_to_fp";
+ case ISD::FP_TO_FP16: return "fp_to_fp16";
case ISD::CONVERT_RNDSAT: {
switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
@@ -325,7 +336,7 @@ const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
}
}
-void SDNode::dump() const { dump(0); }
+void SDNode::dump() const { dump(nullptr); }
void SDNode::dump(const SelectionDAG *G) const {
print(dbgs(), G);
dbgs() << '\n';
@@ -352,7 +363,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
e = MN->memoperands_end(); i != e; ++i) {
OS << **i;
- if (llvm::next(i) != e)
+ if (std::next(i) != e)
OS << " ";
}
OS << ">";
@@ -385,7 +396,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
dyn_cast<GlobalAddressSDNode>(this)) {
int64_t offset = GADN->getOffset();
OS << '<';
- WriteAsOperand(OS, GADN->getGlobal());
+ GADN->getGlobal()->printAsOperand(OS);
OS << '>';
if (offset > 0)
OS << " + " << offset;
@@ -422,7 +433,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << LBB->getName() << " ";
OS << (const void*)BBDN->getBasicBlock() << ">";
} else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
- OS << ' ' << PrintReg(R->getReg(), G ? G->getTarget().getRegisterInfo() :0);
+ OS << ' ' << PrintReg(R->getReg(), G ? G->getTarget().getRegisterInfo() :nullptr);
} else if (const ExternalSymbolSDNode *ES =
dyn_cast<ExternalSymbolSDNode>(this)) {
OS << "'" << ES->getSymbol() << "'";
@@ -476,9 +487,9 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
dyn_cast<BlockAddressSDNode>(this)) {
int64_t offset = BA->getOffset();
OS << "<";
- WriteAsOperand(OS, BA->getBlockAddress()->getFunction(), false);
+ BA->getBlockAddress()->getFunction()->printAsOperand(OS, false);
OS << ", ";
- WriteAsOperand(OS, BA->getBlockAddress()->getBasicBlock(), false);
+ BA->getBlockAddress()->getBasicBlock()->printAsOperand(OS, false);
OS << ">";
if (offset > 0)
OS << " + " << offset;
@@ -590,7 +601,7 @@ static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
void SDNode::dumpr() const {
VisitedSDNodeSet once;
- DumpNodesr(dbgs(), this, 0, 0, once);
+ DumpNodesr(dbgs(), this, 0, nullptr, once);
}
void SDNode::dumpr(const SelectionDAG *G) const {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index b8b4db4..57e22e2 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "isel"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
@@ -20,7 +19,6 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
@@ -33,8 +31,8 @@
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
@@ -59,6 +57,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "isel"
+
STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
@@ -141,6 +141,25 @@ STATISTIC(NumFastIselFailShuffleVector,"Fast isel fails on ShuffleVector");
STATISTIC(NumFastIselFailExtractValue,"Fast isel fails on ExtractValue");
STATISTIC(NumFastIselFailInsertValue,"Fast isel fails on InsertValue");
STATISTIC(NumFastIselFailLandingPad,"Fast isel fails on LandingPad");
+
+// Intrinsic instructions...
+STATISTIC(NumFastIselFailIntrinsicCall, "Fast isel fails on Intrinsic call");
+STATISTIC(NumFastIselFailSAddWithOverflow,
+ "Fast isel fails on sadd.with.overflow");
+STATISTIC(NumFastIselFailUAddWithOverflow,
+ "Fast isel fails on uadd.with.overflow");
+STATISTIC(NumFastIselFailSSubWithOverflow,
+ "Fast isel fails on ssub.with.overflow");
+STATISTIC(NumFastIselFailUSubWithOverflow,
+ "Fast isel fails on usub.with.overflow");
+STATISTIC(NumFastIselFailSMulWithOverflow,
+ "Fast isel fails on smul.with.overflow");
+STATISTIC(NumFastIselFailUMulWithOverflow,
+ "Fast isel fails on umul.with.overflow");
+STATISTIC(NumFastIselFailFrameaddress, "Fast isel fails on Frameaddress");
+STATISTIC(NumFastIselFailSqrt, "Fast isel fails on sqrt call");
+STATISTIC(NumFastIselFailStackMap, "Fast isel fails on StackMap call");
+STATISTIC(NumFastIselFailPatchPoint, "Fast isel fails on PatchPoint call");
#endif
static cl::opt<bool>
@@ -213,7 +232,7 @@ MachinePassRegistry RegisterScheduler::Registry;
static cl::opt<RegisterScheduler::FunctionPassCtor, false,
RegisterPassParser<RegisterScheduler> >
ISHeuristic("pre-RA-sched",
- cl::init(&createDefaultScheduler),
+ cl::init(&createDefaultScheduler), cl::Hidden,
cl::desc("Instruction schedulers available (before register"
" allocation):"));
@@ -300,7 +319,7 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
"'usesCustomInserter', it must implement "
"TargetLowering::EmitInstrWithCustomInserter!";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
@@ -357,7 +376,7 @@ static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
// Loop for blocks with phi nodes.
for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
PHINode *PN = dyn_cast<PHINode>(BB->begin());
- if (PN == 0) continue;
+ if (!PN) continue;
ReprocessBlock:
// For each block with a PHI node, check to see if any of the input values
@@ -367,7 +386,7 @@ static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
for (BasicBlock::iterator I = BB->begin(); (PN = dyn_cast<PHINode>(I)); ++I)
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
- if (CE == 0 || !CE->canTrap()) continue;
+ if (!CE || !CE->canTrap()) continue;
// The only case we have to worry about is when the edge is critical.
// Since this block has a PHI Node, we assume it has multiple input
@@ -400,8 +419,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
RegInfo = &MF->getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
LibInfo = &getAnalysis<TargetLibraryInfo>();
- TTI = getAnalysisIfAvailable<TargetTransformInfo>();
- GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : 0;
+ GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
TargetSubtargetInfo &ST =
const_cast<TargetSubtargetInfo&>(TM.getSubtarget<TargetSubtargetInfo>());
@@ -418,13 +436,13 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
SplitCriticalSideEffectEdges(const_cast<Function&>(Fn), this);
- CurDAG->init(*MF, TTI, TLI);
+ CurDAG->init(*MF, TLI);
FuncInfo->set(Fn, *MF, CurDAG);
if (UseMBPI && OptLevel != CodeGenOpt::None)
FuncInfo->BPI = &getAnalysis<BranchProbabilityInfo>();
else
- FuncInfo->BPI = 0;
+ FuncInfo->BPI = nullptr;
SDB->init(GFI, *AA, LibInfo);
@@ -449,7 +467,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
bool hasFI = MI->getOperand(0).isFI();
- unsigned Reg = hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
+ unsigned Reg =
+ hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
else {
@@ -457,7 +476,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
if (Def) {
MachineBasicBlock::iterator InsertPos = Def;
// FIXME: VR def may not be in entry block.
- Def->getParent()->insert(llvm::next(InsertPos), MI);
+ Def->getParent()->insert(std::next(InsertPos), MI);
} else
DEBUG(dbgs() << "Dropping debug info for dead vreg"
<< TargetRegisterInfo::virtReg2Index(Reg) << "\n");
@@ -483,16 +502,17 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// If this vreg is directly copied into an exported register then
// that COPY instructions also need DBG_VALUE, if it is the only
// user of LDI->second.
- MachineInstr *CopyUseMI = NULL;
- for (MachineRegisterInfo::use_iterator
- UI = RegInfo->use_begin(LDI->second);
- MachineInstr *UseMI = UI.skipInstruction();) {
+ MachineInstr *CopyUseMI = nullptr;
+ for (MachineRegisterInfo::use_instr_iterator
+ UI = RegInfo->use_instr_begin(LDI->second),
+ E = RegInfo->use_instr_end(); UI != E; ) {
+ MachineInstr *UseMI = &*(UI++);
if (UseMI->isDebugValue()) continue;
if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
CopyUseMI = UseMI; continue;
}
// Otherwise this is another use or second copy use.
- CopyUseMI = NULL; break;
+ CopyUseMI = nullptr; break;
}
if (CopyUseMI) {
MachineInstr *NewMI =
@@ -509,21 +529,17 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Determine if there are any calls in this machine function.
MachineFrameInfo *MFI = MF->getFrameInfo();
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E;
- ++I) {
-
+ for (const auto &MBB : *MF) {
if (MFI->hasCalls() && MF->hasInlineAsm())
break;
- const MachineBasicBlock *MBB = I;
- for (MachineBasicBlock::const_iterator II = MBB->begin(), IE = MBB->end();
- II != IE; ++II) {
- const MCInstrDesc &MCID = TM.getInstrInfo()->get(II->getOpcode());
+ for (const auto &MI : MBB) {
+ const MCInstrDesc &MCID = TM.getInstrInfo()->get(MI.getOpcode());
if ((MCID.isCall() && !MCID.isReturn()) ||
- II->isStackAligningInlineAsm()) {
+ MI.isStackAligningInlineAsm()) {
MFI->setHasCalls(true);
}
- if (II->isInlineAsm()) {
+ if (MI.isInlineAsm()) {
MF->setHasInlineAsm(true);
}
}
@@ -564,6 +580,9 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// at this point.
FuncInfo->clear();
+ DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
+ DEBUG(MF->print(dbgs()));
+
return true;
}
@@ -621,7 +640,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
continue;
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
- CurDAG->ComputeMaskedBits(Src, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Src, KnownZero, KnownOne);
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, KnownZero, KnownOne);
} while (!Worklist.empty());
}
@@ -801,7 +820,7 @@ public:
/// NodeDeleted - Handle nodes deleted from the graph. If the node being
/// deleted is the current ISelPosition node, update ISelPosition.
///
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ void NodeDeleted(SDNode *N, SDNode *E) override {
if (ISelPosition == SelectionDAG::allnodes_iterator(N))
++ISelPosition;
}
@@ -974,7 +993,37 @@ static void collectFailStats(const Instruction *I) {
case Instruction::FCmp: NumFastIselFailFCmp++; return;
case Instruction::PHI: NumFastIselFailPHI++; return;
case Instruction::Select: NumFastIselFailSelect++; return;
- case Instruction::Call: NumFastIselFailCall++; return;
+ case Instruction::Call: {
+ if (auto const *Intrinsic = dyn_cast<IntrinsicInst>(I)) {
+ switch (Intrinsic->getIntrinsicID()) {
+ default:
+ NumFastIselFailIntrinsicCall++; return;
+ case Intrinsic::sadd_with_overflow:
+ NumFastIselFailSAddWithOverflow++; return;
+ case Intrinsic::uadd_with_overflow:
+ NumFastIselFailUAddWithOverflow++; return;
+ case Intrinsic::ssub_with_overflow:
+ NumFastIselFailSSubWithOverflow++; return;
+ case Intrinsic::usub_with_overflow:
+ NumFastIselFailUSubWithOverflow++; return;
+ case Intrinsic::smul_with_overflow:
+ NumFastIselFailSMulWithOverflow++; return;
+ case Intrinsic::umul_with_overflow:
+ NumFastIselFailUMulWithOverflow++; return;
+ case Intrinsic::frameaddress:
+ NumFastIselFailFrameaddress++; return;
+ case Intrinsic::sqrt:
+ NumFastIselFailSqrt++; return;
+ case Intrinsic::experimental_stackmap:
+ NumFastIselFailStackMap++; return;
+ case Intrinsic::experimental_patchpoint_void: // fall-through
+ case Intrinsic::experimental_patchpoint_i64:
+ NumFastIselFailPatchPoint++; return;
+ }
+ }
+ NumFastIselFailCall++;
+ return;
+ }
case Instruction::Shl: NumFastIselFailShl++; return;
case Instruction::LShr: NumFastIselFailLShr++; return;
case Instruction::AShr: NumFastIselFailAShr++; return;
@@ -991,7 +1040,7 @@ static void collectFailStats(const Instruction *I) {
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
- FastISel *FastIS = 0;
+ FastISel *FastIS = nullptr;
if (TM.Options.EnableFastISel)
FastIS = getTargetLowering()->createFastISel(*FuncInfo, LibInfo);
@@ -1064,15 +1113,15 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// where they are, so we can be sure to emit subsequent instructions
// after them.
if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
- FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
+ FastIS->setLastLocalValue(std::prev(FuncInfo->InsertPt));
else
- FastIS->setLastLocalValue(0);
+ FastIS->setLastLocalValue(nullptr);
}
unsigned NumFastIselRemaining = std::distance(Begin, End);
// Do FastISel on as many instructions as possible.
for (; BI != Begin; --BI) {
- const Instruction *Inst = llvm::prior(BI);
+ const Instruction *Inst = std::prev(BI);
// If we no longer require this instruction, skip it.
if (isFoldedOrDeadInstruction(Inst, FuncInfo)) {
@@ -1093,7 +1142,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Try to fold the load if so.
const Instruction *BeforeInst = Inst;
while (BeforeInst != Begin) {
- BeforeInst = llvm::prior(BasicBlock::const_iterator(BeforeInst));
+ BeforeInst = std::prev(BasicBlock::const_iterator(BeforeInst));
if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
break;
}
@@ -1101,7 +1150,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
BeforeInst->hasOneUse() &&
FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
// If we succeeded, don't re-select the load.
- BI = llvm::next(BasicBlock::const_iterator(BeforeInst));
+ BI = std::next(BasicBlock::const_iterator(BeforeInst));
--NumFastIselRemaining;
++NumFastIselSuccess;
}
@@ -1604,7 +1653,7 @@ bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
APInt NeededMask = DesiredMask & ~ActualMask;
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(LHS, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(LHS, KnownZero, KnownOne);
// If all the missing bits in the or are already known to be set, match!
if ((NeededMask & KnownOne) == NeededMask)
@@ -1673,7 +1722,7 @@ static SDNode *findGlueUse(SDNode *N) {
if (Use.getResNo() == FlagResNo)
return Use.getUser();
}
- return NULL;
+ return nullptr;
}
/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
@@ -1780,7 +1829,7 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
EVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == MVT::Glue) {
SDNode *GU = findGlueUse(Root);
- if (GU == NULL)
+ if (!GU)
break;
Root = GU;
VT = Root->getValueType(Root->getNumValues()-1);
@@ -1802,12 +1851,39 @@ SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
SelectInlineAsmMemoryOperands(Ops);
EVT VTs[] = { MVT::Other, MVT::Glue };
- SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
- VTs, &Ops[0], Ops.size());
+ SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), VTs, Ops);
+ New->setNodeId(-1);
+ return New.getNode();
+}
+
+SDNode
+*SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
+ SDLoc dl(Op);
+ MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(0));
+ const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
+ unsigned Reg = getTargetLowering()->getRegisterByName(
+ RegStr->getString().data(), Op->getValueType(0));
+ SDValue New = CurDAG->getCopyFromReg(
+ CurDAG->getEntryNode(), dl, Reg, Op->getValueType(0));
New->setNodeId(-1);
return New.getNode();
}
+SDNode
+*SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
+ SDLoc dl(Op);
+ MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
+ const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
+ unsigned Reg = getTargetLowering()->getRegisterByName(
+ RegStr->getString().data(), Op->getOperand(2).getValueType());
+ SDValue New = CurDAG->getCopyToReg(
+ CurDAG->getEntryNode(), dl, Reg, Op->getOperand(2));
+ New->setNodeId(-1);
+ return New.getNode();
+}
+
+
+
SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) {
return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,N->getValueType(0));
}
@@ -1843,7 +1919,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
// Now that all the normal results are replaced, we replace the chain and
// glue results if present.
if (!ChainNodesMatched.empty()) {
- assert(InputChain.getNode() != 0 &&
+ assert(InputChain.getNode() &&
"Matched input chains but didn't produce a chain");
// Loop over all of the nodes we matched that produced a chain result.
// Replace all the chain results with the final chain we ended up with.
@@ -1874,7 +1950,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
// If the result produces glue, update any glue results in the matched
// pattern with the glue result.
- if (InputGlue.getNode() != 0) {
+ if (InputGlue.getNode()) {
// Handle any interior nodes explicitly marked.
for (unsigned i = 0, e = GlueResultNodesMatched.size(); i != e; ++i) {
SDNode *FRN = GlueResultNodesMatched[i];
@@ -2077,13 +2153,13 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
if (InputChains.size() == 1)
return InputChains[0];
return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
- MVT::Other, &InputChains[0], InputChains.size());
+ MVT::Other, InputChains);
}
/// MorphNode - Handle morphing a node in place for the selector.
SDNode *SelectionDAGISel::
MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo) {
+ ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
// It is possible we're using MorphNodeTo to replace a node with no
// normal results with one that has a normal result (or we could be
// adding a chain) and the input could have glue and chains as well.
@@ -2103,7 +2179,7 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
// Call the underlying SelectionDAG routine to do the transmogrification. Note
// that this deletes operands of the old node that become dead.
- SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops, NumOps);
+ SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
// MorphNodeTo can operate in two ways: if an existing node with the
// specified operands exists, it can just return it. Otherwise, it
@@ -2195,8 +2271,7 @@ CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, const TargetLowering *TLI,
- unsigned ChildNo) {
+ SDValue N, const TargetLowering *TLI, unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI);
@@ -2228,7 +2303,15 @@ CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
Val = GetVBR(Val, MatcherTable, MatcherIndex);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
- return C != 0 && C->getSExtValue() == Val;
+ return C && C->getSExtValue() == Val;
+}
+
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
+ SDValue N, unsigned ChildNo) {
+ if (ChildNo >= N.getNumOperands())
+ return false; // Match fails if out of range child #.
+ return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
@@ -2241,7 +2324,7 @@ CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
if (N->getOpcode() != ISD::AND) return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
- return C != 0 && SDISel.CheckAndMask(N.getOperand(0), C, Val);
+ return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
@@ -2254,7 +2337,7 @@ CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
if (N->getOpcode() != ISD::OR) return false;
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
- return C != 0 && SDISel.CheckOrMask(N.getOperand(0), C, Val);
+ return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
}
/// IsPredicateKnownToFail - If we know how and can do so without pushing a
@@ -2314,6 +2397,14 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table,
case SelectionDAGISel::OPC_CheckInteger:
Result = !::CheckInteger(Table, Index, N);
return Index;
+ case SelectionDAGISel::OPC_CheckChild0Integer:
+ case SelectionDAGISel::OPC_CheckChild1Integer:
+ case SelectionDAGISel::OPC_CheckChild2Integer:
+ case SelectionDAGISel::OPC_CheckChild3Integer:
+ case SelectionDAGISel::OPC_CheckChild4Integer:
+ Result = !::CheckChildInteger(Table, Index, N,
+ Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
+ return Index;
case SelectionDAGISel::OPC_CheckAndImm:
Result = !::CheckAndImm(Table, Index, N, SDISel);
return Index;
@@ -2378,13 +2469,15 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
NodeToMatch->setNodeId(-1); // Mark selected.
- return 0;
+ return nullptr;
case ISD::AssertSext:
case ISD::AssertZext:
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
NodeToMatch->getOperand(0));
- return 0;
+ return nullptr;
case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
+ case ISD::READ_REGISTER: return Select_READ_REGISTER(NodeToMatch);
+ case ISD::WRITE_REGISTER: return Select_WRITE_REGISTER(NodeToMatch);
case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
}
@@ -2530,7 +2623,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
}
case OPC_RecordNode: {
// Remember this node, it may end up being an operand in the pattern.
- SDNode *Parent = 0;
+ SDNode *Parent = nullptr;
if (NodeStack.size() > 1)
Parent = NodeStack[NodeStack.size()-2].getNode();
RecordedNodes.push_back(std::make_pair(N, Parent));
@@ -2694,6 +2787,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckInteger:
if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
continue;
+ case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
+ case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
+ case OPC_CheckChild4Integer:
+ if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
+ Opcode-OPC_CheckChild0Integer)) break;
+ continue;
case OPC_CheckAndImm:
if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
@@ -2731,7 +2830,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
- CurDAG->getTargetConstant(Val, VT), (SDNode*)0));
+ CurDAG->getTargetConstant(Val, VT), nullptr));
continue;
}
case OPC_EmitRegister: {
@@ -2739,7 +2838,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
unsigned RegNo = MatcherTable[MatcherIndex++];
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
- CurDAG->getRegister(RegNo, VT), (SDNode*)0));
+ CurDAG->getRegister(RegNo, VT), nullptr));
continue;
}
case OPC_EmitRegister2: {
@@ -2751,7 +2850,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned RegNo = MatcherTable[MatcherIndex++];
RegNo |= MatcherTable[MatcherIndex++] << 8;
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
- CurDAG->getRegister(RegNo, VT), (SDNode*)0));
+ CurDAG->getRegister(RegNo, VT), nullptr));
continue;
}
@@ -2776,7 +2875,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
// These are space-optimized forms of OPC_EmitMergeInputChains.
- assert(InputChain.getNode() == 0 &&
+ assert(!InputChain.getNode() &&
"EmitMergeInputChains should be the first chain producing node");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
@@ -2797,13 +2896,13 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
- if (InputChain.getNode() == 0)
+ if (!InputChain.getNode())
break; // Failed to merge.
continue;
}
case OPC_EmitMergeInputChains: {
- assert(InputChain.getNode() == 0 &&
+ assert(!InputChain.getNode() &&
"EmitMergeInputChains should be the first chain producing node");
// This node gets a list of nodes we matched in the input that have
// chains. We want to token factor all of the input chains to these nodes
@@ -2839,7 +2938,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
- if (InputChain.getNode() == 0)
+ if (!InputChain.getNode())
break; // Failed to merge.
continue;
@@ -2850,7 +2949,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
unsigned DestPhysReg = MatcherTable[MatcherIndex++];
- if (InputChain.getNode() == 0)
+ if (!InputChain.getNode())
InputChain = CurDAG->getEntryNode();
InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
@@ -2866,7 +2965,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
- RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, (SDNode*) 0));
+ RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
continue;
}
@@ -2898,7 +2997,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
else if (VTs.size() == 2)
VTList = CurDAG->getVTList(VTs[0], VTs[1]);
else
- VTList = CurDAG->getVTList(VTs.data(), VTs.size());
+ VTList = CurDAG->getVTList(VTs);
// Get the operand list.
unsigned NumOps = MatcherTable[MatcherIndex++];
@@ -2932,11 +3031,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// If this has chain/glue inputs, add them.
if (EmitNodeInfo & OPFL_Chain)
Ops.push_back(InputChain);
- if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != 0)
+ if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
Ops.push_back(InputGlue);
// Create the node.
- SDNode *Res = 0;
+ SDNode *Res = nullptr;
if (Opcode != OPC_MorphNodeTo) {
// If this is a normal EmitNode command, just create the new node and
// add the results to the RecordedNodes list.
@@ -2947,17 +3046,16 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
- (SDNode*) 0));
+ nullptr));
}
} else if (NodeToMatch->getOpcode() != ISD::DELETED_NODE) {
- Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops.data(), Ops.size(),
- EmitNodeInfo);
+ Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops, EmitNodeInfo);
} else {
// NodeToMatch was eliminated by CSE when the target changed the DAG.
// We will visit the equivalent node later.
DEBUG(dbgs() << "Node was eliminated by CSE\n");
- return 0;
+ return nullptr;
}
// If the node had chain/glue results, update our notion of the current
@@ -3087,7 +3185,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// FIXME: We just return here, which interacts correctly with SelectRoot
// above. We should fix this to not return an SDNode* anymore.
- return 0;
+ return nullptr;
}
}
@@ -3099,7 +3197,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
while (1) {
if (MatchScopes.empty()) {
CannotYetSelect(NodeToMatch);
- return 0;
+ return nullptr;
}
// Restore the interpreter state back to the point where the scope was
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index b752b482..4df5ede 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -15,12 +15,11 @@
#include "ScheduleDAGSDNodes.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
@@ -28,6 +27,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "dag-printer"
+
namespace llvm {
template<>
struct DOTGraphTraits<SelectionDAG*> : public DefaultDOTGraphTraits {
@@ -125,9 +126,9 @@ namespace llvm {
static void addCustomGraphFeatures(SelectionDAG *G,
GraphWriter<SelectionDAG*> &GW) {
- GW.emitSimpleNode(0, "plaintext=circle", "GraphRoot");
+ GW.emitSimpleNode(nullptr, "plaintext=circle", "GraphRoot");
if (G->getRoot().getNode())
- GW.emitEdge(0, -1, G->getRoot().getNode(), G->getRoot().getResNo(),
+ GW.emitEdge(nullptr, -1, G->getRoot().getNode(), G->getRoot().getResNo(),
"color=blue,style=dashed");
}
};
@@ -290,10 +291,10 @@ std::string ScheduleDAGSDNodes::getGraphNodeLabel(const SUnit *SU) const {
void ScheduleDAGSDNodes::getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const {
if (DAG) {
// Draw a special "GraphRoot" node to indicate the root of the graph.
- GW.emitSimpleNode(0, "plaintext=circle", "GraphRoot");
+ GW.emitSimpleNode(nullptr, "plaintext=circle", "GraphRoot");
const SDNode *N = DAG->getRoot().getNode();
if (N && N->getNodeId() != -1)
- GW.emitEdge(0, -1, &SUnits[N->getNodeId()], -1,
+ GW.emitEdge(nullptr, -1, &SUnits[N->getNodeId()], -1,
"color=blue,style=dashed");
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 82b068d..05ace41 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/CommandLine.h"
@@ -39,7 +40,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
: TargetLoweringBase(tm, tlof) {}
const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
- return NULL;
+ return nullptr;
}
/// Check whether a given call node is in tail position within its function. If
@@ -74,6 +75,7 @@ void TargetLowering::ArgListEntry::setAttributes(ImmutableCallSite *CS,
isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
+ isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
Alignment = CS->getParamAlignment(AttrIdx);
}
@@ -101,12 +103,11 @@ TargetLowering::makeLibCall(SelectionDAG &DAG,
SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), getPointerTy());
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
- TargetLowering::
- CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, getLibcallCallingConv(LC),
- /*isTailCall=*/false,
- doesNotReturn, isReturnValueUsed, Callee, Args,
- DAG, dl);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
+ .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setNoReturn(doesNotReturn).setDiscardResult(!isReturnValueUsed)
+ .setSExtResult(isSigned).setZExtResult(!isSigned);
return LowerCallTo(CLI);
}
@@ -224,7 +225,7 @@ unsigned TargetLowering::getJumpTableEncoding() const {
return MachineJumpTableInfo::EK_BlockAddress;
// In PIC mode, if the target supports a GPRel32 directive, use it.
- if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != 0)
+ if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr)
return MachineJumpTableInfo::EK_GPRel32BlockAddress;
// Otherwise, use a label difference.
@@ -326,6 +327,10 @@ TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
assert(Op.getNode()->getNumValues() == 1 &&
"ShrinkDemandedOp only supports nodes with one result!");
+ // Early return, as this function cannot handle vector types.
+ if (Op.getValueType().isVector())
+ return false;
+
// Don't do this if the node has another user, which may require the
// full value.
if (!Op.getNode()->hasOneUse())
@@ -384,7 +389,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (Depth != 0) {
// If not at the root, Just compute the KnownZero/KnownOne bits to
// simplify things downstream.
- TLO.DAG.ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
+ TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
return false;
}
// If this is the root being simplified, allow it to have multiple uses,
@@ -414,7 +419,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
APInt LHSZero, LHSOne;
// Do not increment Depth here; that can cause an infinite loop.
- TLO.DAG.ComputeMaskedBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
+ TLO.DAG.computeKnownBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
// If the LHS already has zeros where RHSC does, this and is dead.
if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
@@ -846,6 +851,31 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
break;
}
+ case ISD::BUILD_PAIR: {
+ EVT HalfVT = Op.getOperand(0).getValueType();
+ unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
+
+ APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
+ APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
+
+ APInt KnownZeroLo, KnownOneLo;
+ APInt KnownZeroHi, KnownOneHi;
+
+ if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
+ KnownOneLo, TLO, Depth + 1))
+ return true;
+
+ if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
+ KnownOneHi, TLO, Depth + 1))
+ return true;
+
+ KnownZero = KnownZeroLo.zext(BitWidth) |
+ KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
+
+ KnownOne = KnownOneLo.zext(BitWidth) |
+ KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
+ break;
+ }
case ISD::ZERO_EXTEND: {
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
@@ -1038,8 +1068,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
// FALL THROUGH
default:
- // Just use ComputeMaskedBits to compute output bits.
- TLO.DAG.ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
+ // Just use computeKnownBits to compute output bits.
+ TLO.DAG.computeKnownBits(Op, KnownZero, KnownOne, Depth);
break;
}
@@ -1051,14 +1081,14 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
return false;
}
-/// computeMaskedBitsForTargetNode - Determine which of the bits specified
+/// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
-void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
@@ -1072,6 +1102,7 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
/// targets that want to expose additional information about sign bits to the
/// DAG Combiner.
unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
+ const SelectionDAG &,
unsigned Depth) const {
assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
@@ -1083,7 +1114,7 @@ unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
}
/// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly
-/// one bit set. This differs from ComputeMaskedBits in that it doesn't need to
+/// one bit set. This differs from computeKnownBits in that it doesn't need to
/// determine which bit is set.
///
static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
@@ -1106,15 +1137,69 @@ static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
// More could be done here, though the above checks are enough
// to handle some common cases.
- // Fall back to ComputeMaskedBits to catch other known cases.
+ // Fall back to computeKnownBits to catch other known cases.
EVT OpVT = Val.getValueType();
unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(Val, KnownZero, KnownOne);
+ DAG.computeKnownBits(Val, KnownZero, KnownOne);
return (KnownZero.countPopulation() == BitWidth - 1) &&
(KnownOne.countPopulation() == 1);
}
+bool TargetLowering::isConstTrueVal(const SDNode *N) const {
+ if (!N)
+ return false;
+
+ const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
+ if (!CN) {
+ const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
+ if (!BV)
+ return false;
+
+ BitVector UndefElements;
+ CN = BV->getConstantSplatNode(&UndefElements);
+ // Only interested in constant splats, and we don't try to handle undef
+ // elements in identifying boolean constants.
+ if (!CN || UndefElements.none())
+ return false;
+ }
+
+ switch (getBooleanContents(N->getValueType(0))) {
+ case UndefinedBooleanContent:
+ return CN->getAPIntValue()[0];
+ case ZeroOrOneBooleanContent:
+ return CN->isOne();
+ case ZeroOrNegativeOneBooleanContent:
+ return CN->isAllOnesValue();
+ }
+
+ llvm_unreachable("Invalid boolean contents");
+}
+
+bool TargetLowering::isConstFalseVal(const SDNode *N) const {
+ if (!N)
+ return false;
+
+ const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
+ if (!CN) {
+ const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
+ if (!BV)
+ return false;
+
+ BitVector UndefElements;
+ CN = BV->getConstantSplatNode(&UndefElements);
+ // Only interested in constant splats, and we don't try to handle undef
+ // elements in identifying boolean constants.
+ if (!CN || UndefElements.none())
+ return false;
+ }
+
+ if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent)
+ return !CN->getAPIntValue()[0];
+
+ return CN->isNullValue();
+}
+
/// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue
@@ -1130,7 +1215,8 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
case ISD::SETFALSE2: return DAG.getConstant(0, VT);
case ISD::SETTRUE:
case ISD::SETTRUE2: {
- TargetLowering::BooleanContent Cnt = getBooleanContents(VT.isVector());
+ TargetLowering::BooleanContent Cnt =
+ getBooleanContents(N0->getValueType(0));
return DAG.getConstant(
Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
}
@@ -1331,10 +1417,14 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
EVT newVT = N0.getOperand(0).getValueType();
if (DCI.isBeforeLegalizeOps() ||
(isOperationLegal(ISD::SETCC, newVT) &&
- getCondCodeAction(Cond, newVT.getSimpleVT())==Legal))
- return DAG.getSetCC(dl, VT, N0.getOperand(0),
- DAG.getConstant(C1.trunc(InSize), newVT),
- Cond);
+ getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
+ EVT NewSetCCVT = getSetCCResultType(*DAG.getContext(), newVT);
+ SDValue NewConst = DAG.getConstant(C1.trunc(InSize), newVT);
+
+ SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
+ NewConst, Cond);
+ return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType());
+ }
break;
}
default:
@@ -1417,7 +1507,8 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
} else if (N1C->getAPIntValue() == 1 &&
(VT == MVT::i1 ||
- getBooleanContents(false) == ZeroOrOneBooleanContent)) {
+ getBooleanContents(N0->getValueType(0)) ==
+ ZeroOrOneBooleanContent)) {
SDValue Op0 = N0;
if (Op0.getOpcode() == ISD::TRUNCATE)
Op0 = Op0.getOperand(0);
@@ -1468,18 +1559,32 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// Canonicalize GE/LE comparisons to use GT/LT comparisons.
if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
- // X >= C0 --> X > (C0-1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(C1-1, N1.getValueType()),
- (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
+ // X >= C0 --> X > (C0 - 1)
+ APInt C = C1 - 1;
+ ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
+ if ((DCI.isBeforeLegalizeOps() ||
+ isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
+ (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
+ isLegalICmpImmediate(C.getSExtValue())))) {
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(C, N1.getValueType()),
+ NewCC);
+ }
}
if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
- // X <= C0 --> X < (C0+1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(C1+1, N1.getValueType()),
- (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
+ // X <= C0 --> X < (C0 + 1)
+ APInt C = C1 + 1;
+ ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
+ if ((DCI.isBeforeLegalizeOps() ||
+ isCondCodeLegal(NewCC, VT.getSimpleVT())) &&
+ (!N1C->isOpaque() || (N1C->isOpaque() && C.getBitWidth() <= 64 &&
+ isLegalICmpImmediate(C.getSExtValue())))) {
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(C, N1.getValueType()),
+ NewCC);
+ }
}
if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
@@ -1535,7 +1640,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
N0.getOpcode() == ISD::AND)
if (ConstantSDNode *AndRHS =
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
- EVT ShiftTy = DCI.isBeforeLegalizeOps() ?
+ EVT ShiftTy = DCI.isBeforeLegalize() ?
getPointerTy() : getShiftAmountTy(N0.getValueType());
if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
// Perform the xform if the AND RHS is a single bit.
@@ -1565,7 +1670,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
const APInt &AndRHSC = AndRHS->getAPIntValue();
if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
unsigned ShiftBits = AndRHSC.countTrailingZeros();
- EVT ShiftTy = DCI.isBeforeLegalizeOps() ?
+ EVT ShiftTy = DCI.isBeforeLegalize() ?
getPointerTy() : getShiftAmountTy(N0.getValueType());
EVT CmpTy = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
@@ -1593,7 +1698,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
NewC = NewC.lshr(ShiftBits);
if (ShiftBits && isLegalICmpImmediate(NewC.getSExtValue())) {
- EVT ShiftTy = DCI.isBeforeLegalizeOps() ?
+ EVT ShiftTy = DCI.isBeforeLegalize() ?
getPointerTy() : getShiftAmountTy(N0.getValueType());
EVT CmpTy = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
@@ -1674,7 +1779,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// The sext(setcc()) => setcc() optimization relies on the appropriate
// constant being emitted.
uint64_t EqVal = 0;
- switch (getBooleanContents(N0.getValueType().isVector())) {
+ switch (getBooleanContents(N0.getValueType())) {
case UndefinedBooleanContent:
case ZeroOrOneBooleanContent:
EqVal = ISD::isTrueWhenEqual(Cond);
@@ -1988,7 +2093,7 @@ const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
return "r";
if (ConstraintVT.isFloatingPoint())
return "f"; // works for many targets
- return 0;
+ return nullptr;
}
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
@@ -2022,12 +2127,12 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (Op.getOpcode() == ISD::ADD) {
C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
- if (C == 0 || GA == 0) {
+ if (!C || !GA) {
C = dyn_cast<ConstantSDNode>(Op.getOperand(0));
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1));
}
- if (C == 0 || GA == 0)
- C = 0, GA = 0;
+ if (!C || !GA)
+ C = nullptr, GA = nullptr;
}
// If we find a valid operand, map to the TargetXXX version so that the
@@ -2062,14 +2167,14 @@ std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const {
if (Constraint.empty() || Constraint[0] != '{')
- return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
+ return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
// Remove the braces from around the name.
StringRef RegName(Constraint.data()+1, Constraint.size()-2);
std::pair<unsigned, const TargetRegisterClass*> R =
- std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
+ std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
// Figure out which register class contains this reg.
const TargetRegisterInfo *RI = getTargetMachine().getRegisterInfo();
@@ -2364,7 +2469,7 @@ TargetLowering::ConstraintWeight
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
// Look at the constraint type.
switch (*constraint) {
@@ -2520,7 +2625,8 @@ SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
if (ShAmt) {
// TODO: For UDIV use SRL instead of SRA.
SDValue Amt = DAG.getConstant(ShAmt, getShiftAmountTy(Op1.getValueType()));
- Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt);
+ Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, false, false,
+ true);
d = d.ashr(ShAmt);
}
@@ -2537,9 +2643,9 @@ SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDValue TargetLowering::
-BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*> *Created) const {
+SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
+ SelectionDAG &DAG, bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const {
EVT VT = N->getValueType(0);
SDLoc dl(N);
@@ -2548,8 +2654,7 @@ BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
if (!isTypeLegal(VT))
return SDValue();
- APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
- APInt::ms magics = d.magic();
+ APInt::ms magics = Divisor.magic();
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
@@ -2566,13 +2671,13 @@ BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
else
return SDValue(); // No mulhs or equvialent
// If d > 0 and m < 0, add the numerator
- if (d.isStrictlyPositive() && magics.m.isNegative()) {
+ if (Divisor.isStrictlyPositive() && magics.m.isNegative()) {
Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
if (Created)
Created->push_back(Q.getNode());
}
// If d < 0 and m > 0, subtract the numerator.
- if (d.isNegative() && magics.m.isStrictlyPositive()) {
+ if (Divisor.isNegative() && magics.m.isStrictlyPositive()) {
Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
if (Created)
Created->push_back(Q.getNode());
@@ -2585,9 +2690,9 @@ BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
Created->push_back(Q.getNode());
}
// Extract the sign bit and add it to the quotient
- SDValue T =
- DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1,
- getShiftAmountTy(Q.getValueType())));
+ SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q,
+ DAG.getConstant(VT.getScalarSizeInBits() - 1,
+ getShiftAmountTy(Q.getValueType())));
if (Created)
Created->push_back(T.getNode());
return DAG.getNode(ISD::ADD, dl, VT, Q, T);
@@ -2597,9 +2702,9 @@ BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDValue TargetLowering::
-BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*> *Created) const {
+SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
+ SelectionDAG &DAG, bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const {
EVT VT = N->getValueType(0);
SDLoc dl(N);
@@ -2610,22 +2715,21 @@ BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
// FIXME: We should use a narrower constant when the upper
// bits are known to be zero.
- const APInt &N1C = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
- APInt::mu magics = N1C.magicu();
+ APInt::mu magics = Divisor.magicu();
SDValue Q = N->getOperand(0);
// If the divisor is even, we can avoid using the expensive fixup by shifting
// the divided value upfront.
- if (magics.a != 0 && !N1C[0]) {
- unsigned Shift = N1C.countTrailingZeros();
+ if (magics.a != 0 && !Divisor[0]) {
+ unsigned Shift = Divisor.countTrailingZeros();
Q = DAG.getNode(ISD::SRL, dl, VT, Q,
DAG.getConstant(Shift, getShiftAmountTy(Q.getValueType())));
if (Created)
Created->push_back(Q.getNode());
// Get magic number for the shifted divisor.
- magics = N1C.lshr(Shift).magicu(Shift);
+ magics = Divisor.lshr(Shift).magicu(Shift);
assert(magics.a == 0 && "Should use cheap fixup now");
}
@@ -2644,7 +2748,7 @@ BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
Created->push_back(Q.getNode());
if (magics.a == 0) {
- assert(magics.s < N1C.getBitWidth() &&
+ assert(magics.s < Divisor.getBitWidth() &&
"We shouldn't generate an undefined shift!");
return DAG.getNode(ISD::SRL, dl, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType())));
@@ -2663,3 +2767,183 @@ BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
DAG.getConstant(magics.s-1, getShiftAmountTy(NPQ.getValueType())));
}
}
+
+bool TargetLowering::
+verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
+ if (!isa<ConstantSDNode>(Op.getOperand(0))) {
+ DAG.getContext()->emitError("argument to '__builtin_return_address' must "
+ "be a constant integer");
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Legalization Utilities
+//===----------------------------------------------------------------------===//
+
+bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
+ SelectionDAG &DAG, SDValue LL, SDValue LH,
+ SDValue RL, SDValue RH) const {
+ EVT VT = N->getValueType(0);
+ SDLoc dl(N);
+
+ bool HasMULHS = isOperationLegalOrCustom(ISD::MULHS, HiLoVT);
+ bool HasMULHU = isOperationLegalOrCustom(ISD::MULHU, HiLoVT);
+ bool HasSMUL_LOHI = isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT);
+ bool HasUMUL_LOHI = isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT);
+ if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
+ unsigned OuterBitSize = VT.getSizeInBits();
+ unsigned InnerBitSize = HiLoVT.getSizeInBits();
+ unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
+ unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
+
+ // LL, LH, RL, and RH must be either all NULL or all set to a value.
+ assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) ||
+ (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode()));
+
+ if (!LL.getNode() && !RL.getNode() &&
+ isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
+ LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(0));
+ RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, N->getOperand(1));
+ }
+
+ if (!LL.getNode())
+ return false;
+
+ APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
+ if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
+ DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
+ // The inputs are both zero-extended.
+ if (HasUMUL_LOHI) {
+ // We can emit a umul_lohi.
+ Lo = DAG.getNode(ISD::UMUL_LOHI, dl,
+ DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
+ Hi = SDValue(Lo.getNode(), 1);
+ return true;
+ }
+ if (HasMULHU) {
+ // We can emit a mulhu+mul.
+ Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
+ Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
+ return true;
+ }
+ }
+ if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
+ // The input values are both sign-extended.
+ if (HasSMUL_LOHI) {
+ // We can emit a smul_lohi.
+ Lo = DAG.getNode(ISD::SMUL_LOHI, dl,
+ DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
+ Hi = SDValue(Lo.getNode(), 1);
+ return true;
+ }
+ if (HasMULHS) {
+ // We can emit a mulhs+mul.
+ Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
+ Hi = DAG.getNode(ISD::MULHS, dl, HiLoVT, LL, RL);
+ return true;
+ }
+ }
+
+ if (!LH.getNode() && !RH.getNode() &&
+ isOperationLegalOrCustom(ISD::SRL, VT) &&
+ isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
+ unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
+ SDValue Shift = DAG.getConstant(ShiftAmt, getShiftAmountTy(VT));
+ LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
+ LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
+ RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
+ RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH);
+ }
+
+ if (!LH.getNode())
+ return false;
+
+ if (HasUMUL_LOHI) {
+ // Lo,Hi = umul LHS, RHS.
+ SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, dl,
+ DAG.getVTList(HiLoVT, HiLoVT), LL, RL);
+ Lo = UMulLOHI;
+ Hi = UMulLOHI.getValue(1);
+ RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
+ LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
+ Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
+ Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
+ return true;
+ }
+ if (HasMULHU) {
+ Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RL);
+ Hi = DAG.getNode(ISD::MULHU, dl, HiLoVT, LL, RL);
+ RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH);
+ LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL);
+ Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH);
+ Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
+ SelectionDAG &DAG) const {
+ EVT VT = Node->getOperand(0).getValueType();
+ EVT NVT = Node->getValueType(0);
+ SDLoc dl(SDValue(Node, 0));
+
+ // FIXME: Only f32 to i64 conversions are supported.
+ if (VT != MVT::f32 || NVT != MVT::i64)
+ return false;
+
+ // Expand f32 -> i64 conversion
+ // This algorithm comes from compiler-rt's implementation of fixsfdi:
+ // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c
+ EVT IntVT = EVT::getIntegerVT(*DAG.getContext(),
+ VT.getSizeInBits());
+ SDValue ExponentMask = DAG.getConstant(0x7F800000, IntVT);
+ SDValue ExponentLoBit = DAG.getConstant(23, IntVT);
+ SDValue Bias = DAG.getConstant(127, IntVT);
+ SDValue SignMask = DAG.getConstant(APInt::getSignBit(VT.getSizeInBits()),
+ IntVT);
+ SDValue SignLowBit = DAG.getConstant(VT.getSizeInBits() - 1, IntVT);
+ SDValue MantissaMask = DAG.getConstant(0x007FFFFF, IntVT);
+
+ SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
+
+ SDValue ExponentBits = DAG.getNode(ISD::SRL, dl, IntVT,
+ DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
+ DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT)));
+ SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
+
+ SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT,
+ DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
+ DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT)));
+ Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
+
+ SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
+ DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask),
+ DAG.getConstant(0x00800000, IntVT));
+
+ R = DAG.getZExtOrTrunc(R, dl, NVT);
+
+
+ R = DAG.getSelectCC(dl, Exponent, ExponentLoBit,
+ DAG.getNode(ISD::SHL, dl, NVT, R,
+ DAG.getZExtOrTrunc(
+ DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
+ dl, getShiftAmountTy(IntVT))),
+ DAG.getNode(ISD::SRL, dl, NVT, R,
+ DAG.getZExtOrTrunc(
+ DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
+ dl, getShiftAmountTy(IntVT))),
+ ISD::SETGT);
+
+ SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
+ DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
+ Sign);
+
+ Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, IntVT),
+ DAG.getConstant(0, NVT), Ret, ISD::SETLT);
+ return true;
+}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
index f769b44..0e89bad 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
@@ -15,8 +15,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
-TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
- : TD(TM.getDataLayout()) {
+TargetSelectionDAGInfo::TargetSelectionDAGInfo(const DataLayout *DL)
+ : DL(DL) {
}
TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
diff --git a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
index 10f64c7..f7c64da 100644
--- a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
+++ b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
@@ -25,17 +25,18 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "shadowstackgc"
#include "llvm/CodeGen/GCs.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
using namespace llvm;
+#define DEBUG_TYPE "shadowstackgc"
+
namespace {
class ShadowStackGC : public GCStrategy {
@@ -55,8 +56,8 @@ namespace {
public:
ShadowStackGC();
- bool initializeCustomLowering(Module &M);
- bool performCustomLowering(Function &F);
+ bool initializeCustomLowering(Module &M) override;
+ bool performCustomLowering(Function &F) override;
private:
bool IsNullValue(Value *V);
@@ -101,7 +102,7 @@ namespace {
IRBuilder<> *Next() {
switch (State) {
default:
- return 0;
+ return nullptr;
case 0:
StateBB = F.begin();
@@ -137,7 +138,7 @@ namespace {
Calls.push_back(CI);
if (Calls.empty())
- return 0;
+ return nullptr;
// Create a cleanup block.
LLVMContext &C = F.getContext();
@@ -194,7 +195,7 @@ namespace {
void llvm::linkShadowStackGC() { }
-ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
+ShadowStackGC::ShadowStackGC() : Head(nullptr), StackEntryTy(nullptr) {
InitRoots = true;
CustomRoots = true;
}
@@ -390,8 +391,8 @@ bool ShadowStackGC::performCustomLowering(Function &F) {
BasicBlock::iterator IP = F.getEntryBlock().begin();
IRBuilder<> AtEntry(IP->getParent(), IP);
- Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
- "gc_frame");
+ Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, nullptr,
+ "gc_frame");
while (isa<AllocaInst>(IP)) ++IP;
AtEntry.SetInsertPoint(IP->getParent(), IP);
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index da2e710..b0950de 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sjljehprepare"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
@@ -38,6 +37,8 @@
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "sjljehprepare"
+
STATISTIC(NumInvokes, "Number of invokes replaced");
STATISTIC(NumSpilled, "Number of registers live across unwind edges");
@@ -60,11 +61,11 @@ class SjLjEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
explicit SjLjEHPrepare(const TargetMachine *TM) : FunctionPass(ID), TM(TM) {}
- bool doInitialization(Module &M);
- bool runOnFunction(Function &F);
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {}
- const char *getPassName() const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {}
+ const char *getPassName() const override {
return "SJLJ Exception Handling preparation";
}
@@ -100,10 +101,10 @@ bool SjLjEHPrepare::doInitialization(Module &M) {
NULL);
RegisterFn = M.getOrInsertFunction(
"_Unwind_SjLj_Register", Type::getVoidTy(M.getContext()),
- PointerType::getUnqual(FunctionContextTy), (Type *)0);
+ PointerType::getUnqual(FunctionContextTy), (Type *)nullptr);
UnregisterFn = M.getOrInsertFunction(
"_Unwind_SjLj_Unregister", Type::getVoidTy(M.getContext()),
- PointerType::getUnqual(FunctionContextTy), (Type *)0);
+ PointerType::getUnqual(FunctionContextTy), (Type *)nullptr);
FrameAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::frameaddress);
StackAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
@@ -111,7 +112,7 @@ bool SjLjEHPrepare::doInitialization(Module &M) {
LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
CallSiteFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_callsite);
FuncCtxFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_functioncontext);
- PersonalityFn = 0;
+ PersonalityFn = nullptr;
return true;
}
@@ -149,7 +150,7 @@ static void MarkBlocksLiveIn(BasicBlock *BB,
/// instruction with those returned by the personality function.
void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
Value *SelVal) {
- SmallVector<Value *, 8> UseWorkList(LPI->use_begin(), LPI->use_end());
+ SmallVector<Value *, 8> UseWorkList(LPI->user_begin(), LPI->user_end());
while (!UseWorkList.empty()) {
Value *Val = UseWorkList.pop_back_val();
ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val);
@@ -173,7 +174,7 @@ void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
Type *LPadType = LPI->getType();
Value *LPadVal = UndefValue::get(LPadType);
IRBuilder<> Builder(
- llvm::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
+ std::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
@@ -192,7 +193,7 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
const TargetLowering *TLI = TM->getTargetLowering();
unsigned Align =
TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy);
- FuncCtx = new AllocaInst(FunctionContextTy, 0, Align, "fn_context",
+ FuncCtx = new AllocaInst(FunctionContextTy, nullptr, Align, "fn_context",
EntryBB->begin());
// Fill in the function context structure.
@@ -248,34 +249,16 @@ void SjLjEHPrepare::lowerIncomingArguments(Function &F) {
++AI) {
Type *Ty = AI->getType();
- // Aggregate types can't be cast, but are legal argument types, so we have
- // to handle them differently. We use an extract/insert pair as a
- // lightweight method to achieve the same goal.
- if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
- Instruction *EI = ExtractValueInst::Create(AI, 0, "", AfterAllocaInsPt);
- Instruction *NI = InsertValueInst::Create(AI, EI, 0);
- NI->insertAfter(EI);
- AI->replaceAllUsesWith(NI);
-
- // Set the operand of the instructions back to the AllocaInst.
- EI->setOperand(0, AI);
- NI->setOperand(0, AI);
- } else {
- // This is always a no-op cast because we're casting AI to AI->getType()
- // so src and destination types are identical. BitCast is the only
- // possibility.
- CastInst *NC = new BitCastInst(AI, AI->getType(), AI->getName() + ".tmp",
- AfterAllocaInsPt);
- AI->replaceAllUsesWith(NC);
-
- // Set the operand of the cast instruction back to the AllocaInst.
- // Normally it's forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However, we're
- // replacing it here with the same value it was constructed with. We do
- // this because the above replaceAllUsesWith() clobbered the operand, but
- // we want this one to remain.
- NC->setOperand(0, AI);
- }
+ // Use 'select i8 true, %arg, undef' to simulate a 'no-op' instruction.
+ Value *TrueValue = ConstantInt::getTrue(F.getContext());
+ Value *UndefValue = UndefValue::get(Ty);
+ Instruction *SI = SelectInst::Create(TrueValue, AI, UndefValue,
+ AI->getName() + ".tmp",
+ AfterAllocaInsPt);
+ AI->replaceAllUsesWith(SI);
+
+ // Reset the operand, because it was clobbered by the RAUW above.
+ SI->setOperand(1, AI);
}
}
@@ -294,8 +277,8 @@ void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
if (Inst->use_empty())
continue;
if (Inst->hasOneUse() &&
- cast<Instruction>(Inst->use_back())->getParent() == BB &&
- !isa<PHINode>(Inst->use_back()))
+ cast<Instruction>(Inst->user_back())->getParent() == BB &&
+ !isa<PHINode>(Inst->user_back()))
continue;
// If this is an alloca in the entry block, it's not a real register
@@ -306,11 +289,10 @@ void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
// Avoid iterator invalidation by copying users to a temporary vector.
SmallVector<Instruction *, 16> Users;
- for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (User->getParent() != BB || isa<PHINode>(User))
- Users.push_back(User);
+ for (User *U : Inst->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (UI->getParent() != BB || isa<PHINode>(UI))
+ Users.push_back(UI);
}
// Find all of the blocks that this value is live in.
diff --git a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
index 20049a8..d46621d 100644
--- a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "slotindexes"
-
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -18,6 +16,8 @@
using namespace llvm;
+#define DEBUG_TYPE "slotindexes"
+
char SlotIndexes::ID = 0;
INITIALIZE_PASS(SlotIndexes, "slotindexes",
"Slot index numbering", false, false)
@@ -66,7 +66,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
MBBRanges.resize(mf->getNumBlockIDs());
idx2MBBMap.reserve(mf->size());
- indexList.push_back(createEntry(0, index));
+ indexList.push_back(createEntry(nullptr, index));
// Iterate over the function.
for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end();
@@ -91,7 +91,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
}
// We insert one blank instructions between basic blocks.
- indexList.push_back(createEntry(0, index += SlotIndex::InstrDist));
+ indexList.push_back(createEntry(nullptr, index += SlotIndex::InstrDist));
MBBRanges[mbb->getNumber()].first = blockStartIndex;
MBBRanges[mbb->getNumber()].second = SlotIndex(&indexList.back(),
@@ -129,7 +129,7 @@ void SlotIndexes::renumberIndexes(IndexList::iterator curItr) {
const unsigned Space = SlotIndex::InstrDist/2;
assert((Space & 3) == 0 && "InstrDist must be a multiple of 2*NUM");
- IndexList::iterator startItr = prior(curItr);
+ IndexList::iterator startItr = std::prev(curItr);
unsigned index = startItr->getIndex();
do {
curItr->setIndex(index += Space);
@@ -182,7 +182,7 @@ void SlotIndexes::repairIndexesInRange(MachineBasicBlock *MBB,
"Decremented past the beginning of region to repair.");
MachineInstr *SlotMI = ListI->getInstr();
- MachineInstr *MI = (MBBI != MBB->end() && !pastStart) ? MBBI : 0;
+ MachineInstr *MI = (MBBI != MBB->end() && !pastStart) ? MBBI : nullptr;
bool MBBIAtBegin = MBBI == Begin && (!includeStart || pastStart);
if (SlotMI == MI && !MBBIAtBegin) {
@@ -219,7 +219,7 @@ void SlotIndexes::dump() const {
itr != indexList.end(); ++itr) {
dbgs() << itr->getIndex() << " ";
- if (itr->getInstr() != 0) {
+ if (itr->getInstr()) {
dbgs() << *itr->getInstr();
} else {
dbgs() << "\n";
diff --git a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
index 10a93b7..24e94d1 100644
--- a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
+++ b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
@@ -27,7 +27,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "spillplacement"
#include "SpillPlacement.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/EdgeBundles.h"
@@ -41,6 +40,8 @@
using namespace llvm;
+#define DEBUG_TYPE "spillplacement"
+
char SpillPlacement::ID = 0;
INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
"Spill Code Placement Analysis", true, true)
@@ -59,9 +60,26 @@ void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
+namespace {
+static BlockFrequency Threshold;
+}
+
/// Decision threshold. A node gets the output value 0 if the weighted sum of
/// its inputs falls in the open interval (-Threshold;Threshold).
-static const BlockFrequency Threshold = 2;
+static BlockFrequency getThreshold() { return Threshold; }
+
+/// \brief Set the threshold for a given entry frequency.
+///
+/// Set the threshold relative to \c Entry. Since the threshold is used as a
+/// bound on the open interval (-Threshold;Threshold), 1 is the minimum
+/// threshold.
+static void setThreshold(const BlockFrequency &Entry) {
+ // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
+ // it. Divide by 2^13, rounding as appropriate.
+ uint64_t Freq = Entry.getFrequency();
+ uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
+ Threshold = std::max(UINT64_C(1), Scaled);
+}
/// Node - Each edge bundle corresponds to a Hopfield node.
///
@@ -110,7 +128,7 @@ struct SpillPlacement::Node {
// the CFG.
void clear() {
BiasN = BiasP = Value = 0;
- SumLinkWeights = Threshold;
+ SumLinkWeights = getThreshold();
Links.clear();
}
@@ -168,9 +186,9 @@ struct SpillPlacement::Node {
// 2. It helps tame rounding errors when the links nominally sum to 0.
//
bool Before = preferReg();
- if (SumN >= SumP + Threshold)
+ if (SumN >= SumP + getThreshold())
Value = -1;
- else if (SumP >= SumN + Threshold)
+ else if (SumP >= SumN + getThreshold())
Value = 1;
else
Value = 0;
@@ -188,10 +206,11 @@ bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
// Compute total ingoing and outgoing block frequencies for all bundles.
BlockFrequencies.resize(mf.getNumBlockIDs());
- MachineBlockFrequencyInfo &MBFI = getAnalysis<MachineBlockFrequencyInfo>();
+ MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
+ setThreshold(MBFI->getEntryFreq());
for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
unsigned Num = I->getNumber();
- BlockFrequencies[Num] = MBFI.getBlockFreq(I);
+ BlockFrequencies[Num] = MBFI->getBlockFreq(I);
}
// We never change the function.
@@ -200,7 +219,7 @@ bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
void SpillPlacement::releaseMemory() {
delete[] nodes;
- nodes = 0;
+ nodes = nullptr;
}
/// activate - mark node n as active if it wasn't already.
@@ -221,7 +240,7 @@ void SpillPlacement::activate(unsigned n) {
// Hopfield network.
if (bundles->getBlocks(n).size() > 100) {
nodes[n].BiasP = 0;
- nodes[n].BiasN = (BlockFrequency::getEntryFrequency() / 16);
+ nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
}
}
@@ -323,10 +342,12 @@ void SpillPlacement::iterate() {
// affect the entire network in a single iteration. That means very fast
// convergence, usually in a single iteration.
for (unsigned iteration = 0; iteration != 10; ++iteration) {
- // Scan backwards, skipping the last node which was just updated.
+ // Scan backwards, skipping the last node when iteration is not zero. When
+ // iteration is not zero, the last node was just updated.
bool Changed = false;
for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
- llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
+ iteration == 0 ? Linked.rbegin() : std::next(Linked.rbegin()),
+ E = Linked.rend(); I != E; ++I) {
unsigned n = *I;
if (nodes[n].update(nodes)) {
Changed = true;
@@ -340,7 +361,7 @@ void SpillPlacement::iterate() {
// Scan forwards, skipping the first node which was just updated.
Changed = false;
for (SmallVectorImpl<unsigned>::const_iterator I =
- llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
+ std::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
unsigned n = *I;
if (nodes[n].update(nodes)) {
Changed = true;
@@ -373,6 +394,6 @@ SpillPlacement::finish() {
ActiveNodes->reset(n);
Perfect = false;
}
- ActiveNodes = 0;
+ ActiveNodes = nullptr;
return Perfect;
}
diff --git a/contrib/llvm/lib/CodeGen/SpillPlacement.h b/contrib/llvm/lib/CodeGen/SpillPlacement.h
index 105516b..43fc7f5 100644
--- a/contrib/llvm/lib/CodeGen/SpillPlacement.h
+++ b/contrib/llvm/lib/CodeGen/SpillPlacement.h
@@ -38,12 +38,14 @@ class BitVector;
class EdgeBundles;
class MachineBasicBlock;
class MachineLoopInfo;
+class MachineBlockFrequencyInfo;
class SpillPlacement : public MachineFunctionPass {
struct Node;
const MachineFunction *MF;
const EdgeBundles *bundles;
const MachineLoopInfo *loops;
+ const MachineBlockFrequencyInfo *MBFI;
Node *nodes;
// Nodes that are active in the current computation. Owned by the prepare()
@@ -63,7 +65,7 @@ class SpillPlacement : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
- SpillPlacement() : MachineFunctionPass(ID), nodes(0) {}
+ SpillPlacement() : MachineFunctionPass(ID), nodes(nullptr) {}
~SpillPlacement() { releaseMemory(); }
/// BorderConstraint - A basic block has separate constraints for entry and
@@ -145,9 +147,9 @@ public:
}
private:
- virtual bool runOnMachineFunction(MachineFunction&);
- virtual void getAnalysisUsage(AnalysisUsage&) const;
- virtual void releaseMemory();
+ bool runOnMachineFunction(MachineFunction&) override;
+ void getAnalysisUsage(AnalysisUsage&) const override;
+ void releaseMemory() override;
void activate(unsigned);
};
diff --git a/contrib/llvm/lib/CodeGen/Spiller.cpp b/contrib/llvm/lib/CodeGen/Spiller.cpp
index d5b3a4a..0649448 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.cpp
+++ b/contrib/llvm/lib/CodeGen/Spiller.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "spiller"
-
#include "Spiller.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
@@ -28,6 +26,8 @@
using namespace llvm;
+#define DEBUG_TYPE "spiller"
+
namespace {
enum SpillerName { trivial, inline_ };
}
@@ -89,8 +89,9 @@ protected:
unsigned ss = vrm->assignVirt2StackSlot(li->reg);
// Iterate over reg uses/defs.
- for (MachineRegisterInfo::reg_iterator
- regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) {
+ for (MachineRegisterInfo::reg_instr_iterator
+ regItr = mri->reg_instr_begin(li->reg);
+ regItr != mri->reg_instr_end();) {
// Grab the use/def instr.
MachineInstr *mi = &*regItr;
@@ -98,9 +99,7 @@ protected:
DEBUG(dbgs() << " Processing " << *mi);
// Step regItr to the next use/def instr.
- do {
- ++regItr;
- } while (regItr != mri->reg_end() && (&*regItr == mi));
+ ++regItr;
// Collect uses & defs for this instr.
SmallVector<unsigned, 2> indices;
@@ -143,9 +142,9 @@ protected:
if (hasDef) {
MachineInstrSpan MIS(miItr);
- tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), NewVReg,
+ tii->storeRegToStackSlot(*mi->getParent(), std::next(miItr), NewVReg,
true, ss, trc, tri);
- lis->InsertMachineInstrRangeInMaps(llvm::next(miItr), MIS.end());
+ lis->InsertMachineInstrRangeInMaps(std::next(miItr), MIS.end());
}
}
}
@@ -164,7 +163,7 @@ public:
VirtRegMap &vrm)
: SpillerBase(pass, mf, vrm) {}
- void spill(LiveRangeEdit &LRE) {
+ void spill(LiveRangeEdit &LRE) override {
// Ignore spillIs - we don't use it.
trivialSpillEverywhere(LRE);
}
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.cpp b/contrib/llvm/lib/CodeGen/SplitKit.cpp
index 68a15f7..7d4f568 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.cpp
+++ b/contrib/llvm/lib/CodeGen/SplitKit.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "SplitKit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -29,6 +28,8 @@
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumFinished, "Number of splits finished");
STATISTIC(NumSimple, "Number of splits that were simple");
STATISTIC(NumCopies, "Number of copies inserted for splitting");
@@ -47,14 +48,14 @@ SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm,
LIS(lis),
Loops(mli),
TII(*MF.getTarget().getInstrInfo()),
- CurLI(0),
+ CurLI(nullptr),
LastSplitPoint(MF.getNumBlockIDs()) {}
void SplitAnalysis::clear() {
UseSlots.clear();
UseBlocks.clear();
ThroughBlocks.clear();
- CurLI = 0;
+ CurLI = nullptr;
DidRepairRange = false;
}
@@ -131,11 +132,9 @@ void SplitAnalysis::analyzeUses() {
// Get use slots form the use-def chain.
const MachineRegisterInfo &MRI = MF.getRegInfo();
- for (MachineRegisterInfo::use_nodbg_iterator
- I = MRI.use_nodbg_begin(CurLI->reg), E = MRI.use_nodbg_end(); I != E;
- ++I)
- if (!I.getOperand().isUndef())
- UseSlots.push_back(LIS.getInstructionIndex(&*I).getRegSlot());
+ for (MachineOperand &MO : MRI.use_nodbg_operands(CurLI->reg))
+ if (!MO.isUndef())
+ UseSlots.push_back(LIS.getInstructionIndex(MO.getParent()).getRegSlot());
array_pod_sort(UseSlots.begin(), UseSlots.end());
@@ -188,7 +187,7 @@ bool SplitAnalysis::calcLiveBlockInfo() {
BlockInfo BI;
BI.MBB = MFI;
SlotIndex Start, Stop;
- tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
+ std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
// If the block contains no uses, the range must be live through. At one
// point, RegisterCoalescer could create dangling ranges that ended
@@ -333,7 +332,7 @@ SplitEditor::SplitEditor(SplitAnalysis &sa,
TII(*vrm.getMachineFunction().getTarget().getInstrInfo()),
TRI(*vrm.getMachineFunction().getTarget().getRegisterInfo()),
MBFI(mbfi),
- Edit(0),
+ Edit(nullptr),
OpenIdx(0),
SpillMode(SM_Partition),
RegAssign(Allocator)
@@ -355,7 +354,7 @@ void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
// We don't need an AliasAnalysis since we will only be performing
// cheap-as-a-copy remats anyway.
- Edit->anyRematerializable(0);
+ Edit->anyRematerializable(nullptr);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -425,7 +424,7 @@ void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo *ParentVNI) {
LiveInterval *LI = &LIS.getInterval(Edit->get(RegIdx));
LI->addSegment(LiveInterval::Segment(Def, Def.getDeadSlot(), VNI));
// Mark as complex mapped, forced.
- VFP = ValueForcePair(0, true);
+ VFP = ValueForcePair(nullptr, true);
}
VNInfo *SplitEditor::defFromParent(unsigned RegIdx,
@@ -433,7 +432,7 @@ VNInfo *SplitEditor::defFromParent(unsigned RegIdx,
SlotIndex UseIdx,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
- MachineInstr *CopyMI = 0;
+ MachineInstr *CopyMI = nullptr;
SlotIndex Def;
LiveInterval *LI = &LIS.getInterval(Edit->get(RegIdx));
@@ -509,7 +508,7 @@ SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
assert(MI && "enterIntvAfter called with invalid index");
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(),
- llvm::next(MachineBasicBlock::iterator(MI)));
+ std::next(MachineBasicBlock::iterator(MI)));
return VNI->def;
}
@@ -570,7 +569,7 @@ SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
}
VNInfo *VNI = defFromParent(0, ParentVNI, Boundary, *MI->getParent(),
- llvm::next(MachineBasicBlock::iterator(MI)));
+ std::next(MachineBasicBlock::iterator(MI)));
return VNI->def;
}
@@ -888,7 +887,7 @@ bool SplitEditor::transferValues() {
// LiveInBlocks.
MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start);
SlotIndex BlockStart, BlockEnd;
- tie(BlockStart, BlockEnd) = LIS.getSlotIndexes()->getMBBRange(MBB);
+ std::tie(BlockStart, BlockEnd) = LIS.getSlotIndexes()->getMBBRange(MBB);
// The first block may be live-in, or it may have its own def.
if (Start != BlockStart) {
@@ -924,7 +923,7 @@ bool SplitEditor::transferValues() {
else {
// Live-through, and we don't know the value.
LRC.addLiveInBlock(LR, MDT[MBB]);
- LRC.setLiveOutValue(MBB, 0);
+ LRC.setLiveOutValue(MBB, nullptr);
}
}
BlockStart = BlockEnd;
@@ -972,7 +971,7 @@ void SplitEditor::extendPHIKillRanges() {
void SplitEditor::rewriteAssigned(bool ExtendRanges) {
for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Edit->getReg()),
RE = MRI.reg_end(); RI != RE;) {
- MachineOperand &MO = RI.getOperand();
+ MachineOperand &MO = *RI;
MachineInstr *MI = MO.getParent();
++RI;
// LiveDebugVariables should have handled all DBG_VALUE instructions.
@@ -1183,7 +1182,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
unsigned IntvIn, SlotIndex LeaveBefore,
unsigned IntvOut, SlotIndex EnterAfter){
SlotIndex Start, Stop;
- tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum);
+ std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum);
DEBUG(dbgs() << "BB#" << MBBNum << " [" << Start << ';' << Stop
<< ") intf " << LeaveBefore << '-' << EnterAfter
@@ -1286,7 +1285,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
unsigned IntvIn, SlotIndex LeaveBefore) {
SlotIndex Start, Stop;
- tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
+ std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop
<< "), uses " << BI.FirstInstr << '-' << BI.LastInstr
@@ -1378,7 +1377,7 @@ void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
unsigned IntvOut, SlotIndex EnterAfter) {
SlotIndex Start, Stop;
- tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
+ std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop
<< "), uses " << BI.FirstInstr << '-' << BI.LastInstr
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.h b/contrib/llvm/lib/CodeGen/SplitKit.h
index f029c73..7048ee3 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.h
+++ b/contrib/llvm/lib/CodeGen/SplitKit.h
@@ -377,7 +377,7 @@ public:
SlotIndex enterIntvAfter(SlotIndex Idx);
/// enterIntvAtEnd - Enter the open interval at the end of MBB.
- /// Use the open interval from he inserted copy to the MBB end.
+ /// Use the open interval from the inserted copy to the MBB end.
/// Return the beginning of the new live range.
SlotIndex enterIntvAtEnd(MachineBasicBlock &MBB);
@@ -417,7 +417,7 @@ public:
/// @param LRMap When not null, this vector will map each live range in Edit
/// back to the indices returned by openIntv.
/// There may be extra indices created by dead code elimination.
- void finish(SmallVectorImpl<unsigned> *LRMap = 0);
+ void finish(SmallVectorImpl<unsigned> *LRMap = nullptr);
/// dump - print the current interval maping to dbgs().
void dump() const;
diff --git a/contrib/llvm/lib/CodeGen/StackColoring.cpp b/contrib/llvm/lib/CodeGen/StackColoring.cpp
index 3dbc050..370430c 100644
--- a/contrib/llvm/lib/CodeGen/StackColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackColoring.cpp
@@ -21,7 +21,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "stackcoloring"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DepthFirstIterator.h"
@@ -30,7 +29,6 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
@@ -44,7 +42,9 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/CodeGen/StackProtector.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
@@ -57,6 +57,8 @@
using namespace llvm;
+#define DEBUG_TYPE "stackcoloring"
+
static cl::opt<bool>
DisableColoring("no-stack-coloring",
cl::init(false), cl::Hidden,
@@ -112,37 +114,25 @@ class StackColoring : public MachineFunctionPass {
SmallVector<const MachineBasicBlock*, 8> BasicBlockNumbering;
/// Maps liveness intervals for each slot.
- SmallVector<LiveInterval*, 16> Intervals;
+ SmallVector<std::unique_ptr<LiveInterval>, 16> Intervals;
/// VNInfo is used for the construction of LiveIntervals.
VNInfo::Allocator VNInfoAllocator;
/// SlotIndex analysis object.
SlotIndexes *Indexes;
+ /// The stack protector object.
+ StackProtector *SP;
/// The list of lifetime markers found. These markers are to be removed
/// once the coloring is done.
SmallVector<MachineInstr*, 8> Markers;
- /// SlotSizeSorter - A Sort utility for arranging stack slots according
- /// to their size.
- struct SlotSizeSorter {
- MachineFrameInfo *MFI;
- SlotSizeSorter(MachineFrameInfo *mfi) : MFI(mfi) { }
- bool operator()(int LHS, int RHS) {
- // We use -1 to denote a uninteresting slot. Place these slots at the end.
- if (LHS == -1) return false;
- if (RHS == -1) return true;
- // Sort according to size.
- return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
- }
-};
-
public:
static char ID;
StackColoring() : MachineFunctionPass(ID) {
initializeStackColoringPass(*PassRegistry::getPassRegistry());
}
- void getAnalysisUsage(AnalysisUsage &AU) const;
- bool runOnMachineFunction(MachineFunction &MF);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
/// Debug.
@@ -191,6 +181,7 @@ INITIALIZE_PASS_BEGIN(StackColoring,
"stack-coloring", "Merge disjoint stack slots", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(StackProtector)
INITIALIZE_PASS_END(StackColoring,
"stack-coloring", "Merge disjoint stack slots", false, false)
@@ -198,16 +189,16 @@ void StackColoring::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<SlotIndexes>();
+ AU.addRequired<StackProtector>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void StackColoring::dump() const {
- for (df_iterator<MachineFunction*> FI = df_begin(MF), FE = df_end(MF);
- FI != FE; ++FI) {
- DEBUG(dbgs()<<"Inspecting block #"<<BasicBlocks.lookup(*FI)<<
- " ["<<FI->getName()<<"]\n");
+ for (MachineBasicBlock *MBB : depth_first(MF)) {
+ DEBUG(dbgs() << "Inspecting block #" << BasicBlocks.lookup(MBB) << " ["
+ << MBB->getName() << "]\n");
- LivenessMap::const_iterator BI = BlockLiveness.find(*FI);
+ LivenessMap::const_iterator BI = BlockLiveness.find(MBB);
assert(BI != BlockLiveness.end() && "Block not found");
const BlockLifetimeInfo &BlockInfo = BI->second;
@@ -240,31 +231,28 @@ unsigned StackColoring::collectMarkers(unsigned NumSlot) {
// NOTE: We use the a reverse-post-order iteration to ensure that we obtain a
// deterministic numbering, and because we'll need a post-order iteration
// later for solving the liveness dataflow problem.
- for (df_iterator<MachineFunction*> FI = df_begin(MF), FE = df_end(MF);
- FI != FE; ++FI) {
+ for (MachineBasicBlock *MBB : depth_first(MF)) {
// Assign a serial number to this basic block.
- BasicBlocks[*FI] = BasicBlockNumbering.size();
- BasicBlockNumbering.push_back(*FI);
+ BasicBlocks[MBB] = BasicBlockNumbering.size();
+ BasicBlockNumbering.push_back(MBB);
// Keep a reference to avoid repeated lookups.
- BlockLifetimeInfo &BlockInfo = BlockLiveness[*FI];
+ BlockLifetimeInfo &BlockInfo = BlockLiveness[MBB];
BlockInfo.Begin.resize(NumSlot);
BlockInfo.End.resize(NumSlot);
- for (MachineBasicBlock::iterator BI = (*FI)->begin(), BE = (*FI)->end();
- BI != BE; ++BI) {
-
- if (BI->getOpcode() != TargetOpcode::LIFETIME_START &&
- BI->getOpcode() != TargetOpcode::LIFETIME_END)
+ for (MachineInstr &MI : *MBB) {
+ if (MI.getOpcode() != TargetOpcode::LIFETIME_START &&
+ MI.getOpcode() != TargetOpcode::LIFETIME_END)
continue;
- Markers.push_back(BI);
+ Markers.push_back(&MI);
- bool IsStart = BI->getOpcode() == TargetOpcode::LIFETIME_START;
- const MachineOperand &MI = BI->getOperand(0);
- unsigned Slot = MI.getIndex();
+ bool IsStart = MI.getOpcode() == TargetOpcode::LIFETIME_START;
+ const MachineOperand &MO = MI.getOperand(0);
+ unsigned Slot = MO.getIndex();
MarkersFound++;
@@ -310,11 +298,7 @@ void StackColoring::calculateLocalLiveness() {
SmallPtrSet<const MachineBasicBlock*, 8> NextBBSet;
- for (SmallVectorImpl<const MachineBasicBlock *>::iterator
- PI = BasicBlockNumbering.begin(), PE = BasicBlockNumbering.end();
- PI != PE; ++PI) {
-
- const MachineBasicBlock *BB = *PI;
+ for (const MachineBasicBlock *BB : BasicBlockNumbering) {
if (!BBSet.count(BB)) continue;
// Use an iterator to avoid repeated lookups.
@@ -369,18 +353,14 @@ void StackColoring::calculateLocalLiveness() {
changed = true;
BlockInfo.LiveIn |= LocalLiveIn;
- for (MachineBasicBlock::const_pred_iterator PI = BB->pred_begin(),
- PE = BB->pred_end(); PI != PE; ++PI)
- NextBBSet.insert(*PI);
+ NextBBSet.insert(BB->pred_begin(), BB->pred_end());
}
if (LocalLiveOut.test(BlockInfo.LiveOut)) {
changed = true;
BlockInfo.LiveOut |= LocalLiveOut;
- for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
- SE = BB->succ_end(); SI != SE; ++SI)
- NextBBSet.insert(*SI);
+ NextBBSet.insert(BB->succ_begin(), BB->succ_end());
}
}
@@ -394,18 +374,15 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
// For each block, find which slots are active within this block
// and update the live intervals.
- for (MachineFunction::iterator MBB = MF->begin(), MBBe = MF->end();
- MBB != MBBe; ++MBB) {
+ for (const MachineBasicBlock &MBB : *MF) {
Starts.clear();
Starts.resize(NumSlots);
Finishes.clear();
Finishes.resize(NumSlots);
// Create the interval for the basic blocks with lifetime markers in them.
- for (SmallVectorImpl<MachineInstr*>::const_iterator it = Markers.begin(),
- e = Markers.end(); it != e; ++it) {
- const MachineInstr *MI = *it;
- if (MI->getParent() != MBB)
+ for (const MachineInstr *MI : Markers) {
+ if (MI->getParent() != &MBB)
continue;
assert((MI->getOpcode() == TargetOpcode::LIFETIME_START ||
@@ -429,14 +406,14 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
}
// Create the interval of the blocks that we previously found to be 'alive'.
- BlockLifetimeInfo &MBBLiveness = BlockLiveness[MBB];
+ BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB];
for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1;
pos = MBBLiveness.LiveIn.find_next(pos)) {
- Starts[pos] = Indexes->getMBBStartIdx(MBB);
+ Starts[pos] = Indexes->getMBBStartIdx(&MBB);
}
for (int pos = MBBLiveness.LiveOut.find_first(); pos != -1;
pos = MBBLiveness.LiveOut.find_next(pos)) {
- Finishes[pos] = Indexes->getMBBEndIdx(MBB);
+ Finishes[pos] = Indexes->getMBBEndIdx(&MBB);
}
for (unsigned i = 0; i < NumSlots; ++i) {
@@ -452,10 +429,10 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
// We have a single consecutive region.
Intervals[i]->addSegment(LiveInterval::Segment(S, F, ValNum));
} else {
- // We have two non consecutive regions. This happens when
+ // We have two non-consecutive regions. This happens when
// LIFETIME_START appears after the LIFETIME_END marker.
- SlotIndex NewStart = Indexes->getMBBStartIdx(MBB);
- SlotIndex NewFin = Indexes->getMBBEndIdx(MBB);
+ SlotIndex NewStart = Indexes->getMBBStartIdx(&MBB);
+ SlotIndex NewFin = Indexes->getMBBEndIdx(&MBB);
Intervals[i]->addSegment(LiveInterval::Segment(NewStart, F, ValNum));
Intervals[i]->addSegment(LiveInterval::Segment(S, NewFin, ValNum));
}
@@ -465,8 +442,8 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
bool StackColoring::removeAllMarkers() {
unsigned Count = 0;
- for (unsigned i = 0; i < Markers.size(); ++i) {
- Markers[i]->eraseFromParent();
+ for (MachineInstr *MI : Markers) {
+ MI->eraseFromParent();
Count++;
}
Markers.clear();
@@ -482,65 +459,70 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
MachineModuleInfo *MMI = &MF->getMMI();
// Remap debug information that refers to stack slots.
- MachineModuleInfo::VariableDbgInfoMapTy &VMap = MMI->getVariableDbgInfo();
- for (MachineModuleInfo::VariableDbgInfoMapTy::iterator VI = VMap.begin(),
- VE = VMap.end(); VI != VE; ++VI) {
- const MDNode *Var = VI->first;
- if (!Var) continue;
- std::pair<unsigned, DebugLoc> &VP = VI->second;
- if (SlotRemap.count(VP.first)) {
- DEBUG(dbgs()<<"Remapping debug info for ["<<Var->getName()<<"].\n");
- VP.first = SlotRemap[VP.first];
+ for (auto &VI : MMI->getVariableDbgInfo()) {
+ if (!VI.Var)
+ continue;
+ if (SlotRemap.count(VI.Slot)) {
+ DEBUG(dbgs()<<"Remapping debug info for ["<<VI.Var->getName()<<"].\n");
+ VI.Slot = SlotRemap[VI.Slot];
FixedDbg++;
}
}
// Keep a list of *allocas* which need to be remapped.
DenseMap<const AllocaInst*, const AllocaInst*> Allocas;
- for (DenseMap<int, int>::const_iterator it = SlotRemap.begin(),
- e = SlotRemap.end(); it != e; ++it) {
- const AllocaInst *From = MFI->getObjectAllocation(it->first);
- const AllocaInst *To = MFI->getObjectAllocation(it->second);
+ for (const std::pair<int, int> &SI : SlotRemap) {
+ const AllocaInst *From = MFI->getObjectAllocation(SI.first);
+ const AllocaInst *To = MFI->getObjectAllocation(SI.second);
assert(To && From && "Invalid allocation object");
Allocas[From] = To;
+
+ // AA might be used later for instruction scheduling, and we need it to be
+ // able to deduce the correct aliasing releationships between pointers
+ // derived from the alloca being remapped and the target of that remapping.
+ // The only safe way, without directly informing AA about the remapping
+ // somehow, is to directly update the IR to reflect the change being made
+ // here.
+ Instruction *Inst = const_cast<AllocaInst *>(To);
+ if (From->getType() != To->getType()) {
+ BitCastInst *Cast = new BitCastInst(Inst, From->getType());
+ Cast->insertAfter(Inst);
+ Inst = Cast;
+ }
+
+ // Allow the stack protector to adjust its value map to account for the
+ // upcoming replacement.
+ SP->adjustForColoring(From, To);
+
+ // Note that this will not replace uses in MMOs (which we'll update below),
+ // or anywhere else (which is why we won't delete the original
+ // instruction).
+ const_cast<AllocaInst *>(From)->replaceAllUsesWith(Inst);
}
// Remap all instructions to the new stack slots.
- MachineFunction::iterator BB, BBE;
- MachineBasicBlock::iterator I, IE;
- for (BB = MF->begin(), BBE = MF->end(); BB != BBE; ++BB)
- for (I = BB->begin(), IE = BB->end(); I != IE; ++I) {
-
+ for (MachineBasicBlock &BB : *MF)
+ for (MachineInstr &I : BB) {
// Skip lifetime markers. We'll remove them soon.
- if (I->getOpcode() == TargetOpcode::LIFETIME_START ||
- I->getOpcode() == TargetOpcode::LIFETIME_END)
+ if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
+ I.getOpcode() == TargetOpcode::LIFETIME_END)
continue;
// Update the MachineMemOperand to use the new alloca.
- for (MachineInstr::mmo_iterator MM = I->memoperands_begin(),
- E = I->memoperands_end(); MM != E; ++MM) {
- MachineMemOperand *MMO = *MM;
-
- const Value *V = MMO->getValue();
-
- if (!V)
- continue;
-
- const PseudoSourceValue *PSV = dyn_cast<const PseudoSourceValue>(V);
- if (PSV && PSV->isConstant(MFI))
+ for (MachineMemOperand *MMO : I.memoperands()) {
+ // FIXME: In order to enable the use of TBAA when using AA in CodeGen,
+ // we'll also need to update the TBAA nodes in MMOs with values
+ // derived from the merged allocas. When doing this, we'll need to use
+ // the same variant of GetUnderlyingObjects that is used by the
+ // instruction scheduler (that can look through ptrtoint/inttoptr
+ // pairs).
+
+ // We've replaced IR-level uses of the remapped allocas, so we only
+ // need to replace direct uses here.
+ const AllocaInst *AI = dyn_cast_or_null<AllocaInst>(MMO->getValue());
+ if (!AI)
continue;
- // Climb up and find the original alloca.
- V = GetUnderlyingObject(V);
- // If we did not find one, or if the one that we found is not in our
- // map, then move on.
- if (!V || !isa<AllocaInst>(V)) {
- // Clear mem operand since we don't know for sure that it doesn't
- // alias a merged alloca.
- MMO->setValue(0);
- continue;
- }
- const AllocaInst *AI= cast<AllocaInst>(V);
if (!Allocas.count(AI))
continue;
@@ -549,9 +531,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
}
// Update all of the machine instruction operands.
- for (unsigned i = 0 ; i < I->getNumOperands(); ++i) {
- MachineOperand &MO = I->getOperand(i);
-
+ for (MachineOperand &MO : I.operands()) {
if (!MO.isFI())
continue;
int FromSlot = MO.getIndex();
@@ -572,12 +552,12 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
// zone are are okay, despite the fact that we don't have a good way
// for validating all of the usages of the calculation.
#ifndef NDEBUG
- bool TouchesMemory = I->mayLoad() || I->mayStore();
+ bool TouchesMemory = I.mayLoad() || I.mayStore();
// If we *don't* protect the user from escaped allocas, don't bother
// validating the instructions.
- if (!I->isDebugValue() && TouchesMemory && ProtectFromEscapedAllocas) {
- SlotIndex Index = Indexes->getInstructionIndex(I);
- LiveInterval *Interval = Intervals[FromSlot];
+ if (!I.isDebugValue() && TouchesMemory && ProtectFromEscapedAllocas) {
+ SlotIndex Index = Indexes->getInstructionIndex(&I);
+ const LiveInterval *Interval = &*Intervals[FromSlot];
assert(Interval->find(Index) != Interval->end() &&
"Found instruction usage outside of live range.");
}
@@ -596,13 +576,10 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
}
void StackColoring::removeInvalidSlotRanges() {
- MachineFunction::const_iterator BB, BBE;
- MachineBasicBlock::const_iterator I, IE;
- for (BB = MF->begin(), BBE = MF->end(); BB != BBE; ++BB)
- for (I = BB->begin(), IE = BB->end(); I != IE; ++I) {
-
- if (I->getOpcode() == TargetOpcode::LIFETIME_START ||
- I->getOpcode() == TargetOpcode::LIFETIME_END || I->isDebugValue())
+ for (MachineBasicBlock &BB : *MF)
+ for (MachineInstr &I : BB) {
+ if (I.getOpcode() == TargetOpcode::LIFETIME_START ||
+ I.getOpcode() == TargetOpcode::LIFETIME_END || I.isDebugValue())
continue;
// Some intervals are suspicious! In some cases we find address
@@ -611,13 +588,11 @@ void StackColoring::removeInvalidSlotRanges() {
// violation, but address calculations are okay. This can happen when
// GEPs are hoisted outside of the lifetime zone.
// So, in here we only check instructions which can read or write memory.
- if (!I->mayLoad() && !I->mayStore())
+ if (!I.mayLoad() && !I.mayStore())
continue;
// Check all of the machine operands.
- for (unsigned i = 0 ; i < I->getNumOperands(); ++i) {
- const MachineOperand &MO = I->getOperand(i);
-
+ for (const MachineOperand &MO : I.operands()) {
if (!MO.isFI())
continue;
@@ -631,10 +606,10 @@ void StackColoring::removeInvalidSlotRanges() {
// Check that the used slot is inside the calculated lifetime range.
// If it is not, warn about it and invalidate the range.
- LiveInterval *Interval = Intervals[Slot];
- SlotIndex Index = Indexes->getInstructionIndex(I);
+ LiveInterval *Interval = &*Intervals[Slot];
+ SlotIndex Index = Indexes->getInstructionIndex(&I);
if (Interval->find(Index) == Interval->end()) {
- Intervals[Slot]->clear();
+ Interval->clear();
DEBUG(dbgs()<<"Invalidating range #"<<Slot<<"\n");
EscapedAllocas++;
}
@@ -659,12 +634,16 @@ void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap,
}
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
+ if (skipOptnoneFunction(*Func.getFunction()))
+ return false;
+
DEBUG(dbgs() << "********** Stack Coloring **********\n"
<< "********** Function: "
<< ((const Value*)Func.getFunction())->getName() << '\n');
MF = &Func;
MFI = MF->getFrameInfo();
Indexes = &getAnalysis<SlotIndexes>();
+ SP = &getAnalysis<StackProtector>();
BlockLiveness.clear();
BasicBlocks.clear();
BasicBlockNumbering.clear();
@@ -704,9 +683,9 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
}
for (unsigned i=0; i < NumSlots; ++i) {
- LiveInterval *LI = new LiveInterval(i, 0);
- Intervals.push_back(LI);
+ std::unique_ptr<LiveInterval> LI(new LiveInterval(i, 0));
LI->getNextValue(Indexes->getZeroIndex(), VNInfoAllocator);
+ Intervals.push_back(std::move(LI));
SortedSlots.push_back(i);
}
@@ -741,7 +720,13 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
// Sort the slots according to their size. Place unused slots at the end.
// Use stable sort to guarantee deterministic code generation.
std::stable_sort(SortedSlots.begin(), SortedSlots.end(),
- SlotSizeSorter(MFI));
+ [this](int LHS, int RHS) {
+ // We use -1 to denote a uninteresting slot. Place these slots at the end.
+ if (LHS == -1) return false;
+ if (RHS == -1) return true;
+ // Sort according to size.
+ return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
+ });
bool Changed = true;
while (Changed) {
@@ -756,8 +741,8 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
int FirstSlot = SortedSlots[I];
int SecondSlot = SortedSlots[J];
- LiveInterval *First = Intervals[FirstSlot];
- LiveInterval *Second = Intervals[SecondSlot];
+ LiveInterval *First = &*Intervals[FirstSlot];
+ LiveInterval *Second = &*Intervals[SecondSlot];
assert (!First->empty() && !Second->empty() && "Found an empty range");
// Merge disjoint slots.
@@ -795,10 +780,5 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
expungeSlotMap(SlotRemap, NumSlots);
remapInstructions(SlotRemap);
- // Release the intervals.
- for (unsigned I = 0; I < NumSlots; ++I) {
- delete Intervals[I];
- }
-
return removeAllMarkers();
}
diff --git a/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp b/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp
new file mode 100644
index 0000000..3ba502f
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp
@@ -0,0 +1,127 @@
+//===-- StackMapLivenessAnalysis.cpp - StackMap live Out Analysis ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the StackMap Liveness analysis pass. The pass calculates
+// the liveness for each basic block in a function and attaches the register
+// live-out information to a stackmap or patchpoint intrinsic if present.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/StackMapLivenessAnalysis.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+
+using namespace llvm;
+
+#define DEBUG_TYPE "stackmaps"
+
+namespace llvm {
+cl::opt<bool> EnablePatchPointLiveness("enable-patchpoint-liveness",
+ cl::Hidden, cl::init(true),
+ cl::desc("Enable PatchPoint Liveness Analysis Pass"));
+}
+
+STATISTIC(NumStackMapFuncVisited, "Number of functions visited");
+STATISTIC(NumStackMapFuncSkipped, "Number of functions skipped");
+STATISTIC(NumBBsVisited, "Number of basic blocks visited");
+STATISTIC(NumBBsHaveNoStackmap, "Number of basic blocks with no stackmap");
+STATISTIC(NumStackMaps, "Number of StackMaps visited");
+
+char StackMapLiveness::ID = 0;
+char &llvm::StackMapLivenessID = StackMapLiveness::ID;
+INITIALIZE_PASS(StackMapLiveness, "stackmap-liveness",
+ "StackMap Liveness Analysis", false, false)
+
+/// Default construct and initialize the pass.
+StackMapLiveness::StackMapLiveness() : MachineFunctionPass(ID) {
+ initializeStackMapLivenessPass(*PassRegistry::getPassRegistry());
+}
+
+/// Tell the pass manager which passes we depend on and what information we
+/// preserve.
+void StackMapLiveness::getAnalysisUsage(AnalysisUsage &AU) const {
+ // We preserve all information.
+ AU.setPreservesAll();
+ AU.setPreservesCFG();
+ // Default dependencie for all MachineFunction passes.
+ AU.addRequired<MachineFunctionAnalysis>();
+}
+
+/// Calculate the liveness information for the given machine function.
+bool StackMapLiveness::runOnMachineFunction(MachineFunction &_MF) {
+ if (!EnablePatchPointLiveness)
+ return false;
+
+ DEBUG(dbgs() << "********** COMPUTING STACKMAP LIVENESS: "
+ << _MF.getName() << " **********\n");
+ MF = &_MF;
+ TRI = MF->getTarget().getRegisterInfo();
+ ++NumStackMapFuncVisited;
+
+ // Skip this function if there are no patchpoints to process.
+ if (!MF->getFrameInfo()->hasPatchPoint()) {
+ ++NumStackMapFuncSkipped;
+ return false;
+ }
+ return calculateLiveness();
+}
+
+/// Performs the actual liveness calculation for the function.
+bool StackMapLiveness::calculateLiveness() {
+ bool HasChanged = false;
+ // For all basic blocks in the function.
+ for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
+ MBBI != MBBE; ++MBBI) {
+ DEBUG(dbgs() << "****** BB " << MBBI->getName() << " ******\n");
+ LiveRegs.init(TRI);
+ LiveRegs.addLiveOuts(MBBI);
+ bool HasStackMap = false;
+ // Reverse iterate over all instructions and add the current live register
+ // set to an instruction if we encounter a patchpoint instruction.
+ for (MachineBasicBlock::reverse_iterator I = MBBI->rbegin(),
+ E = MBBI->rend(); I != E; ++I) {
+ if (I->getOpcode() == TargetOpcode::PATCHPOINT) {
+ addLiveOutSetToMI(*I);
+ HasChanged = true;
+ HasStackMap = true;
+ ++NumStackMaps;
+ }
+ DEBUG(dbgs() << " " << LiveRegs << " " << *I);
+ LiveRegs.stepBackward(*I);
+ }
+ ++NumBBsVisited;
+ if (!HasStackMap)
+ ++NumBBsHaveNoStackmap;
+ }
+ return HasChanged;
+}
+
+/// Add the current register live set to the instruction.
+void StackMapLiveness::addLiveOutSetToMI(MachineInstr &MI) {
+ uint32_t *Mask = createRegisterMask();
+ MachineOperand MO = MachineOperand::CreateRegLiveOut(Mask);
+ MI.addOperand(*MF, MO);
+}
+
+/// Create a register mask and initialize it with the registers from the
+/// register live set.
+uint32_t *StackMapLiveness::createRegisterMask() const {
+ // The mask is owned and cleaned up by the Machine Function.
+ uint32_t *Mask = MF->allocateRegisterMask(TRI->getNumRegs());
+ for (LivePhysRegs::const_iterator RI = LiveRegs.begin(), RE = LiveRegs.end();
+ RI != RE; ++RI)
+ Mask[*RI / 32] |= 1U << (*RI % 32);
+ return Mask;
+}
diff --git a/contrib/llvm/lib/CodeGen/StackMaps.cpp b/contrib/llvm/lib/CodeGen/StackMaps.cpp
index 40893ea..1473fc1 100644
--- a/contrib/llvm/lib/CodeGen/StackMaps.cpp
+++ b/contrib/llvm/lib/CodeGen/StackMaps.cpp
@@ -7,35 +7,41 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "stackmaps"
-
#include "llvm/CodeGen/StackMaps.h"
-
#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
-
#include <iterator>
using namespace llvm;
-PatchPointOpers::PatchPointOpers(const MachineInstr *MI):
- MI(MI),
- HasDef(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
- !MI->getOperand(0).isImplicit()),
- IsAnyReg(MI->getOperand(getMetaIdx(CCPos)).getImm() == CallingConv::AnyReg) {
+#define DEBUG_TYPE "stackmaps"
+
+static cl::opt<int> StackMapVersion("stackmap-version", cl::init(1),
+ cl::desc("Specify the stackmap encoding version (default = 1)"));
+const char *StackMaps::WSMP = "Stack Maps: ";
+
+PatchPointOpers::PatchPointOpers(const MachineInstr *MI)
+ : MI(MI),
+ HasDef(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
+ !MI->getOperand(0).isImplicit()),
+ IsAnyReg(MI->getOperand(getMetaIdx(CCPos)).getImm() == CallingConv::AnyReg)
+{
#ifndef NDEBUG
- {
unsigned CheckStartIdx = 0, e = MI->getNumOperands();
while (CheckStartIdx < e && MI->getOperand(CheckStartIdx).isReg() &&
MI->getOperand(CheckStartIdx).isDef() &&
@@ -43,8 +49,7 @@ PatchPointOpers::PatchPointOpers(const MachineInstr *MI):
++CheckStartIdx;
assert(getMetaIdx() == CheckStartIdx &&
- "Unexpected additonal definition in Patchpoint intrinsic.");
- }
+ "Unexpected additional definition in Patchpoint intrinsic.");
#endif
}
@@ -65,7 +70,126 @@ unsigned PatchPointOpers::getNextScratchIdx(unsigned StartIdx) const {
return ScratchIdx;
}
-void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint32_t ID,
+StackMaps::StackMaps(AsmPrinter &AP) : AP(AP) {
+ if (StackMapVersion != 1)
+ llvm_unreachable("Unsupported stackmap version!");
+}
+
+MachineInstr::const_mop_iterator
+StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE,
+ LocationVec &Locs, LiveOutVec &LiveOuts) const {
+ if (MOI->isImm()) {
+ switch (MOI->getImm()) {
+ default: llvm_unreachable("Unrecognized operand type.");
+ case StackMaps::DirectMemRefOp: {
+ unsigned Size = AP.TM.getDataLayout()->getPointerSizeInBits();
+ assert((Size % 8) == 0 && "Need pointer size in bytes.");
+ Size /= 8;
+ unsigned Reg = (++MOI)->getReg();
+ int64_t Imm = (++MOI)->getImm();
+ Locs.push_back(Location(StackMaps::Location::Direct, Size, Reg, Imm));
+ break;
+ }
+ case StackMaps::IndirectMemRefOp: {
+ int64_t Size = (++MOI)->getImm();
+ assert(Size > 0 && "Need a valid size for indirect memory locations.");
+ unsigned Reg = (++MOI)->getReg();
+ int64_t Imm = (++MOI)->getImm();
+ Locs.push_back(Location(StackMaps::Location::Indirect, Size, Reg, Imm));
+ break;
+ }
+ case StackMaps::ConstantOp: {
+ ++MOI;
+ assert(MOI->isImm() && "Expected constant operand.");
+ int64_t Imm = MOI->getImm();
+ Locs.push_back(Location(Location::Constant, sizeof(int64_t), 0, Imm));
+ break;
+ }
+ }
+ return ++MOI;
+ }
+
+ // The physical register number will ultimately be encoded as a DWARF regno.
+ // The stack map also records the size of a spill slot that can hold the
+ // register content. (The runtime can track the actual size of the data type
+ // if it needs to.)
+ if (MOI->isReg()) {
+ // Skip implicit registers (this includes our scratch registers)
+ if (MOI->isImplicit())
+ return ++MOI;
+
+ assert(TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) &&
+ "Virtreg operands should have been rewritten before now.");
+ const TargetRegisterClass *RC =
+ AP.TM.getRegisterInfo()->getMinimalPhysRegClass(MOI->getReg());
+ assert(!MOI->getSubReg() && "Physical subreg still around.");
+ Locs.push_back(
+ Location(Location::Register, RC->getSize(), MOI->getReg(), 0));
+ return ++MOI;
+ }
+
+ if (MOI->isRegLiveOut())
+ LiveOuts = parseRegisterLiveOutMask(MOI->getRegLiveOut());
+
+ return ++MOI;
+}
+
+/// Go up the super-register chain until we hit a valid dwarf register number.
+static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI) {
+ int RegNo = TRI->getDwarfRegNum(Reg, false);
+ for (MCSuperRegIterator SR(Reg, TRI); SR.isValid() && RegNo < 0; ++SR)
+ RegNo = TRI->getDwarfRegNum(*SR, false);
+
+ assert(RegNo >= 0 && "Invalid Dwarf register number.");
+ return (unsigned) RegNo;
+}
+
+/// Create a live-out register record for the given register Reg.
+StackMaps::LiveOutReg
+StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const {
+ unsigned RegNo = getDwarfRegNum(Reg, TRI);
+ unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
+ return LiveOutReg(Reg, RegNo, Size);
+}
+
+/// Parse the register live-out mask and return a vector of live-out registers
+/// that need to be recorded in the stackmap.
+StackMaps::LiveOutVec
+StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
+ assert(Mask && "No register mask specified");
+ const TargetRegisterInfo *TRI = AP.TM.getRegisterInfo();
+ LiveOutVec LiveOuts;
+
+ // Create a LiveOutReg for each bit that is set in the register mask.
+ for (unsigned Reg = 0, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg)
+ if ((Mask[Reg / 32] >> Reg % 32) & 1)
+ LiveOuts.push_back(createLiveOutReg(Reg, TRI));
+
+ // We don't need to keep track of a register if its super-register is already
+ // in the list. Merge entries that refer to the same dwarf register and use
+ // the maximum size that needs to be spilled.
+ std::sort(LiveOuts.begin(), LiveOuts.end());
+ for (LiveOutVec::iterator I = LiveOuts.begin(), E = LiveOuts.end();
+ I != E; ++I) {
+ for (LiveOutVec::iterator II = std::next(I); II != E; ++II) {
+ if (I->RegNo != II->RegNo) {
+ // Skip all the now invalid entries.
+ I = --II;
+ break;
+ }
+ I->Size = std::max(I->Size, II->Size);
+ if (TRI->isSuperRegister(I->Reg, II->Reg))
+ I->Reg = II->Reg;
+ II->MarkInvalid();
+ }
+ }
+ LiveOuts.erase(std::remove_if(LiveOuts.begin(), LiveOuts.end(),
+ LiveOutReg::IsInvalid), LiveOuts.end());
+ return LiveOuts;
+}
+
+void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE,
bool recordResult) {
@@ -74,71 +198,65 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint32_t ID,
MCSymbol *MILabel = OutContext.CreateTempSymbol();
AP.OutStreamer.EmitLabel(MILabel);
- LocationVec CallsiteLocs;
+ LocationVec Locations;
+ LiveOutVec LiveOuts;
if (recordResult) {
- std::pair<Location, MachineInstr::const_mop_iterator> ParseResult =
- OpParser(MI.operands_begin(), llvm::next(MI.operands_begin()), AP.TM);
-
- Location &Loc = ParseResult.first;
- assert(Loc.LocType == Location::Register &&
- "Stackmap return location must be a register.");
- CallsiteLocs.push_back(Loc);
+ assert(PatchPointOpers(&MI).hasDef() && "Stackmap has no return value.");
+ parseOperand(MI.operands_begin(), std::next(MI.operands_begin()),
+ Locations, LiveOuts);
}
+ // Parse operands.
while (MOI != MOE) {
- std::pair<Location, MachineInstr::const_mop_iterator> ParseResult =
- OpParser(MOI, MOE, AP.TM);
-
- Location &Loc = ParseResult.first;
+ MOI = parseOperand(MOI, MOE, Locations, LiveOuts);
+ }
- // Move large constants into the constant pool.
- if (Loc.LocType == Location::Constant && (Loc.Offset & ~0xFFFFFFFFULL)) {
- Loc.LocType = Location::ConstantIndex;
- Loc.Offset = ConstPool.getConstantIndex(Loc.Offset);
+ // Move large constants into the constant pool.
+ for (LocationVec::iterator I = Locations.begin(), E = Locations.end();
+ I != E; ++I) {
+ // Constants are encoded as sign-extended integers.
+ // -1 is directly encoded as .long 0xFFFFFFFF with no constant pool.
+ if (I->LocType == Location::Constant &&
+ ((I->Offset + (int64_t(1)<<31)) >> 32) != 0) {
+ I->LocType = Location::ConstantIndex;
+ auto Result = ConstPool.insert(std::make_pair(I->Offset, I->Offset));
+ I->Offset = Result.first - ConstPool.begin();
}
-
- CallsiteLocs.push_back(Loc);
- MOI = ParseResult.second;
}
+ // Create an expression to calculate the offset of the callsite from function
+ // entry.
const MCExpr *CSOffsetExpr = MCBinaryExpr::CreateSub(
MCSymbolRefExpr::Create(MILabel, OutContext),
MCSymbolRefExpr::Create(AP.CurrentFnSym, OutContext),
OutContext);
- CSInfos.push_back(CallsiteInfo(CSOffsetExpr, ID, CallsiteLocs));
-}
+ CSInfos.push_back(CallsiteInfo(CSOffsetExpr, ID, Locations, LiveOuts));
-static MachineInstr::const_mop_iterator
-getStackMapEndMOP(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE) {
- for (; MOI != MOE; ++MOI)
- if (MOI->isRegMask() || (MOI->isReg() && MOI->isImplicit()))
- break;
-
- return MOI;
+ // Record the stack size of the current function.
+ const MachineFrameInfo *MFI = AP.MF->getFrameInfo();
+ FnStackSize[AP.CurrentFnSym] =
+ MFI->hasVarSizedObjects() ? UINT64_MAX : MFI->getStackSize();
}
void StackMaps::recordStackMap(const MachineInstr &MI) {
- assert(MI.getOpcode() == TargetOpcode::STACKMAP && "exected stackmap");
+ assert(MI.getOpcode() == TargetOpcode::STACKMAP && "expected stackmap");
int64_t ID = MI.getOperand(0).getImm();
- assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs");
- recordStackMapOpers(MI, ID, llvm::next(MI.operands_begin(), 2),
- getStackMapEndMOP(MI.operands_begin(),
- MI.operands_end()));
+ recordStackMapOpers(MI, ID, std::next(MI.operands_begin(), 2),
+ MI.operands_end());
}
void StackMaps::recordPatchPoint(const MachineInstr &MI) {
- assert(MI.getOpcode() == TargetOpcode::PATCHPOINT && "exected stackmap");
+ assert(MI.getOpcode() == TargetOpcode::PATCHPOINT && "expected patchpoint");
PatchPointOpers opers(&MI);
int64_t ID = opers.getMetaOper(PatchPointOpers::IDPos).getImm();
- assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs");
+
MachineInstr::const_mop_iterator MOI =
- llvm::next(MI.operands_begin(), opers.getStackMapStartIdx());
- recordStackMapOpers(MI, ID, MOI, getStackMapEndMOP(MOI, MI.operands_end()),
+ std::next(MI.operands_begin(), opers.getStackMapStartIdx());
+ recordStackMapOpers(MI, ID, MOI, MI.operands_end(),
opers.isAnyReg() && opers.hasDef());
#ifndef NDEBUG
@@ -153,14 +271,66 @@ void StackMaps::recordPatchPoint(const MachineInstr &MI) {
#endif
}
-/// serializeToStackMapSection conceptually populates the following fields:
+/// Emit the stackmap header.
///
-/// uint32 : Reserved (header)
+/// Header {
+/// uint8 : Stack Map Version (currently 1)
+/// uint8 : Reserved (expected to be 0)
+/// uint16 : Reserved (expected to be 0)
+/// }
+/// uint32 : NumFunctions
/// uint32 : NumConstants
-/// int64 : Constants[NumConstants]
/// uint32 : NumRecords
+void StackMaps::emitStackmapHeader(MCStreamer &OS) {
+ // Header.
+ OS.EmitIntValue(StackMapVersion, 1); // Version.
+ OS.EmitIntValue(0, 1); // Reserved.
+ OS.EmitIntValue(0, 2); // Reserved.
+
+ // Num functions.
+ DEBUG(dbgs() << WSMP << "#functions = " << FnStackSize.size() << '\n');
+ OS.EmitIntValue(FnStackSize.size(), 4);
+ // Num constants.
+ DEBUG(dbgs() << WSMP << "#constants = " << ConstPool.size() << '\n');
+ OS.EmitIntValue(ConstPool.size(), 4);
+ // Num callsites.
+ DEBUG(dbgs() << WSMP << "#callsites = " << CSInfos.size() << '\n');
+ OS.EmitIntValue(CSInfos.size(), 4);
+}
+
+/// Emit the function frame record for each function.
+///
+/// StkSizeRecord[NumFunctions] {
+/// uint64 : Function Address
+/// uint64 : Stack Size
+/// }
+void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
+ // Function Frame records.
+ DEBUG(dbgs() << WSMP << "functions:\n");
+ for (auto const &FR : FnStackSize) {
+ DEBUG(dbgs() << WSMP << "function addr: " << FR.first
+ << " frame size: " << FR.second);
+ OS.EmitSymbolValue(FR.first, 8);
+ OS.EmitIntValue(FR.second, 8);
+ }
+}
+
+/// Emit the constant pool.
+///
+/// int64 : Constants[NumConstants]
+void StackMaps::emitConstantPoolEntries(MCStreamer &OS) {
+ // Constant pool entries.
+ DEBUG(dbgs() << WSMP << "constants:\n");
+ for (auto ConstEntry : ConstPool) {
+ DEBUG(dbgs() << WSMP << ConstEntry.second << '\n');
+ OS.EmitIntValue(ConstEntry.second, 8);
+ }
+}
+
+/// Emit the callsite info for each callsite.
+///
/// StkMapRecord[NumRecords] {
-/// uint32 : PatchPoint ID
+/// uint64 : PatchPoint ID
/// uint32 : Instruction Offset
/// uint16 : Reserved (record flags)
/// uint16 : NumLocations
@@ -170,6 +340,14 @@ void StackMaps::recordPatchPoint(const MachineInstr &MI) {
/// uint16 : Dwarf RegNum
/// int32 : Offset
/// }
+/// uint16 : Padding
+/// uint16 : NumLiveOuts
+/// LiveOuts[NumLiveOuts] {
+/// uint16 : Dwarf RegNum
+/// uint8 : Reserved
+/// uint8 : Size in Bytes
+/// }
+/// uint32 : Padding (only if required to align to 8 byte)
/// }
///
/// Location Encoding, Type, Value:
@@ -178,137 +356,154 @@ void StackMaps::recordPatchPoint(const MachineInstr &MI) {
/// 0x3, Indirect, [Reg + Offset] (spilled value)
/// 0x4, Constant, Offset (small constant)
/// 0x5, ConstIndex, Constants[Offset] (large constant)
-///
-void StackMaps::serializeToStackMapSection() {
- // Bail out if there's no stack map data.
- if (CSInfos.empty())
- return;
-
- MCContext &OutContext = AP.OutStreamer.getContext();
- const TargetRegisterInfo *TRI = AP.TM.getRegisterInfo();
-
- // Create the section.
- const MCSection *StackMapSection =
- OutContext.getObjectFileInfo()->getStackMapSection();
- AP.OutStreamer.SwitchSection(StackMapSection);
-
- // Emit a dummy symbol to force section inclusion.
- AP.OutStreamer.EmitLabel(
- OutContext.GetOrCreateSymbol(Twine("__LLVM_StackMaps")));
-
- // Serialize data.
- const char *WSMP = "Stack Maps: ";
- (void)WSMP;
- const MCRegisterInfo &MCRI = *OutContext.getRegisterInfo();
-
- DEBUG(dbgs() << "********** Stack Map Output **********\n");
-
- // Header.
- AP.OutStreamer.EmitIntValue(0, 4);
-
- // Num constants.
- AP.OutStreamer.EmitIntValue(ConstPool.getNumConstants(), 4);
-
- // Constant pool entries.
- for (unsigned i = 0; i < ConstPool.getNumConstants(); ++i)
- AP.OutStreamer.EmitIntValue(ConstPool.getConstant(i), 8);
-
- DEBUG(dbgs() << WSMP << "#callsites = " << CSInfos.size() << "\n");
- AP.OutStreamer.EmitIntValue(CSInfos.size(), 4);
+void StackMaps::emitCallsiteEntries(MCStreamer &OS,
+ const TargetRegisterInfo *TRI) {
+ // Callsite entries.
+ DEBUG(dbgs() << WSMP << "callsites:\n");
+ for (const auto &CSI : CSInfos) {
+ const LocationVec &CSLocs = CSI.Locations;
+ const LiveOutVec &LiveOuts = CSI.LiveOuts;
- for (CallsiteInfoList::const_iterator CSII = CSInfos.begin(),
- CSIE = CSInfos.end();
- CSII != CSIE; ++CSII) {
-
- unsigned CallsiteID = CSII->ID;
- const LocationVec &CSLocs = CSII->Locations;
-
- DEBUG(dbgs() << WSMP << "callsite " << CallsiteID << "\n");
+ DEBUG(dbgs() << WSMP << "callsite " << CSI.ID << "\n");
// Verify stack map entry. It's better to communicate a problem to the
// runtime than crash in case of in-process compilation. Currently, we do
// simple overflow checks, but we may eventually communicate other
// compilation errors this way.
- if (CSLocs.size() > UINT16_MAX) {
- AP.OutStreamer.EmitIntValue(UINT32_MAX, 4); // Invalid ID.
- AP.OutStreamer.EmitValue(CSII->CSOffsetExpr, 4);
- AP.OutStreamer.EmitIntValue(0, 2); // Reserved.
- AP.OutStreamer.EmitIntValue(0, 2); // 0 locations.
+ if (CSLocs.size() > UINT16_MAX || LiveOuts.size() > UINT16_MAX) {
+ OS.EmitIntValue(UINT64_MAX, 8); // Invalid ID.
+ OS.EmitValue(CSI.CSOffsetExpr, 4);
+ OS.EmitIntValue(0, 2); // Reserved.
+ OS.EmitIntValue(0, 2); // 0 locations.
+ OS.EmitIntValue(0, 2); // padding.
+ OS.EmitIntValue(0, 2); // 0 live-out registers.
+ OS.EmitIntValue(0, 4); // padding.
continue;
}
- AP.OutStreamer.EmitIntValue(CallsiteID, 4);
- AP.OutStreamer.EmitValue(CSII->CSOffsetExpr, 4);
+ OS.EmitIntValue(CSI.ID, 8);
+ OS.EmitValue(CSI.CSOffsetExpr, 4);
// Reserved for flags.
- AP.OutStreamer.EmitIntValue(0, 2);
+ OS.EmitIntValue(0, 2);
DEBUG(dbgs() << WSMP << " has " << CSLocs.size() << " locations\n");
- AP.OutStreamer.EmitIntValue(CSLocs.size(), 2);
-
- unsigned operIdx = 0;
- for (LocationVec::const_iterator LocI = CSLocs.begin(), LocE = CSLocs.end();
- LocI != LocE; ++LocI, ++operIdx) {
- const Location &Loc = *LocI;
- DEBUG(
- dbgs() << WSMP << " Loc " << operIdx << ": ";
- switch (Loc.LocType) {
- case Location::Unprocessed:
- dbgs() << "<Unprocessed operand>";
- break;
- case Location::Register:
- dbgs() << "Register " << MCRI.getName(Loc.Reg);
- break;
- case Location::Direct:
- dbgs() << "Direct " << MCRI.getName(Loc.Reg);
- if (Loc.Offset)
- dbgs() << " + " << Loc.Offset;
- break;
- case Location::Indirect:
- dbgs() << "Indirect " << MCRI.getName(Loc.Reg)
- << " + " << Loc.Offset;
- break;
- case Location::Constant:
- dbgs() << "Constant " << Loc.Offset;
- break;
- case Location::ConstantIndex:
- dbgs() << "Constant Index " << Loc.Offset;
- break;
- }
- dbgs() << "\n";
- );
+ OS.EmitIntValue(CSLocs.size(), 2);
+ unsigned OperIdx = 0;
+ for (const auto &Loc : CSLocs) {
unsigned RegNo = 0;
int Offset = Loc.Offset;
if(Loc.Reg) {
- RegNo = MCRI.getDwarfRegNum(Loc.Reg, false);
- for (MCSuperRegIterator SR(Loc.Reg, TRI);
- SR.isValid() && (int)RegNo < 0; ++SR) {
- RegNo = TRI->getDwarfRegNum(*SR, false);
- }
+ RegNo = getDwarfRegNum(Loc.Reg, TRI);
+
// If this is a register location, put the subregister byte offset in
// the location offset.
if (Loc.LocType == Location::Register) {
assert(!Loc.Offset && "Register location should have zero offset");
- unsigned LLVMRegNo = MCRI.getLLVMRegNum(RegNo, false);
- unsigned SubRegIdx = MCRI.getSubRegIndex(LLVMRegNo, Loc.Reg);
+ unsigned LLVMRegNo = TRI->getLLVMRegNum(RegNo, false);
+ unsigned SubRegIdx = TRI->getSubRegIndex(LLVMRegNo, Loc.Reg);
if (SubRegIdx)
- Offset = MCRI.getSubRegIdxOffset(SubRegIdx);
+ Offset = TRI->getSubRegIdxOffset(SubRegIdx);
}
}
else {
assert(Loc.LocType != Location::Register &&
"Missing location register");
}
- AP.OutStreamer.EmitIntValue(Loc.LocType, 1);
- AP.OutStreamer.EmitIntValue(Loc.Size, 1);
- AP.OutStreamer.EmitIntValue(RegNo, 2);
- AP.OutStreamer.EmitIntValue(Offset, 4);
+
+ DEBUG(dbgs() << WSMP << " Loc " << OperIdx << ": ";
+ switch (Loc.LocType) {
+ case Location::Unprocessed:
+ dbgs() << "<Unprocessed operand>";
+ break;
+ case Location::Register:
+ dbgs() << "Register " << TRI->getName(Loc.Reg);
+ break;
+ case Location::Direct:
+ dbgs() << "Direct " << TRI->getName(Loc.Reg);
+ if (Loc.Offset)
+ dbgs() << " + " << Loc.Offset;
+ break;
+ case Location::Indirect:
+ dbgs() << "Indirect " << TRI->getName(Loc.Reg)
+ << " + " << Loc.Offset;
+ break;
+ case Location::Constant:
+ dbgs() << "Constant " << Loc.Offset;
+ break;
+ case Location::ConstantIndex:
+ dbgs() << "Constant Index " << Loc.Offset;
+ break;
+ }
+ dbgs() << " [encoding: .byte " << Loc.LocType
+ << ", .byte " << Loc.Size
+ << ", .short " << RegNo
+ << ", .int " << Offset << "]\n";
+ );
+
+ OS.EmitIntValue(Loc.LocType, 1);
+ OS.EmitIntValue(Loc.Size, 1);
+ OS.EmitIntValue(RegNo, 2);
+ OS.EmitIntValue(Offset, 4);
+ OperIdx++;
+ }
+
+ DEBUG(dbgs() << WSMP << " has " << LiveOuts.size()
+ << " live-out registers\n");
+
+ // Num live-out registers and padding to align to 4 byte.
+ OS.EmitIntValue(0, 2);
+ OS.EmitIntValue(LiveOuts.size(), 2);
+
+ OperIdx = 0;
+ for (const auto &LO : LiveOuts) {
+ DEBUG(dbgs() << WSMP << " LO " << OperIdx << ": "
+ << TRI->getName(LO.Reg)
+ << " [encoding: .short " << LO.RegNo
+ << ", .byte 0, .byte " << LO.Size << "]\n");
+ OS.EmitIntValue(LO.RegNo, 2);
+ OS.EmitIntValue(0, 1);
+ OS.EmitIntValue(LO.Size, 1);
}
+ // Emit alignment to 8 byte.
+ OS.EmitValueToAlignment(8);
}
+}
- AP.OutStreamer.AddBlankLine();
+/// Serialize the stackmap data.
+void StackMaps::serializeToStackMapSection() {
+ (void) WSMP;
+ // Bail out if there's no stack map data.
+ assert((!CSInfos.empty() || (CSInfos.empty() && ConstPool.empty())) &&
+ "Expected empty constant pool too!");
+ assert((!CSInfos.empty() || (CSInfos.empty() && FnStackSize.empty())) &&
+ "Expected empty function record too!");
+ if (CSInfos.empty())
+ return;
+
+ MCContext &OutContext = AP.OutStreamer.getContext();
+ MCStreamer &OS = AP.OutStreamer;
+ const TargetRegisterInfo *TRI = AP.TM.getRegisterInfo();
+
+ // Create the section.
+ const MCSection *StackMapSection =
+ OutContext.getObjectFileInfo()->getStackMapSection();
+ OS.SwitchSection(StackMapSection);
+
+ // Emit a dummy symbol to force section inclusion.
+ OS.EmitLabel(OutContext.GetOrCreateSymbol(Twine("__LLVM_StackMaps")));
+
+ // Serialize data.
+ DEBUG(dbgs() << "********** Stack Map Output **********\n");
+ emitStackmapHeader(OS);
+ emitFunctionFrameRecords(OS);
+ emitConstantPoolEntries(OS);
+ emitCallsiteEntries(OS, TRI);
+ OS.AddBlankLine();
+ // Clean up.
CSInfos.clear();
+ ConstPool.clear();
}
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index 9020449..accfe7b 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -14,14 +14,12 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "stack-protector"
#include "llvm/CodeGen/StackProtector.h"
-#include "llvm/CodeGen/Analysis.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -38,6 +36,8 @@
#include <cstdlib>
using namespace llvm;
+#define DEBUG_TYPE "stack-protector"
+
STATISTIC(NumFunProtected, "Number of functions protected");
STATISTIC(NumAddrTaken, "Number of local variables that have their address"
" taken.");
@@ -58,19 +58,43 @@ StackProtector::getSSPLayout(const AllocaInst *AI) const {
return AI ? Layout.lookup(AI) : SSPLK_None;
}
+void StackProtector::adjustForColoring(const AllocaInst *From,
+ const AllocaInst *To) {
+ // When coloring replaces one alloca with another, transfer the SSPLayoutKind
+ // tag from the remapped to the target alloca. The remapped alloca should
+ // have a size smaller than or equal to the replacement alloca.
+ SSPLayoutMap::iterator I = Layout.find(From);
+ if (I != Layout.end()) {
+ SSPLayoutKind Kind = I->second;
+ Layout.erase(I);
+
+ // Transfer the tag, but make sure that SSPLK_AddrOf does not overwrite
+ // SSPLK_SmallArray or SSPLK_LargeArray, and make sure that
+ // SSPLK_SmallArray does not overwrite SSPLK_LargeArray.
+ I = Layout.find(To);
+ if (I == Layout.end())
+ Layout.insert(std::make_pair(To, Kind));
+ else if (I->second != SSPLK_LargeArray && Kind != SSPLK_AddrOf)
+ I->second = Kind;
+ }
+}
+
bool StackProtector::runOnFunction(Function &Fn) {
F = &Fn;
M = F->getParent();
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
TLI = TM->getTargetLowering();
- if (!RequiresStackProtector())
- return false;
-
Attribute Attr = Fn.getAttributes().getAttribute(
AttributeSet::FunctionIndex, "stack-protector-buffer-size");
- if (Attr.isStringAttribute())
- Attr.getValueAsString().getAsInteger(10, SSPBufferSize);
+ if (Attr.isStringAttribute() &&
+ Attr.getValueAsString().getAsInteger(10, SSPBufferSize))
+ return false; // Invalid integer string
+
+ if (!RequiresStackProtector())
+ return false;
++NumFunProtected;
return InsertStackProtectors();
@@ -127,9 +151,7 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
}
bool StackProtector::HasAddressTaken(const Instruction *AI) {
- for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE; ++UI) {
- const User *U = *UI;
+ for (const User *U : AI->users()) {
if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (AI == SI->getValueOperand())
return true;
@@ -261,8 +283,7 @@ static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI,
const unsigned MaxSearch = 4;
bool NoInterposingChain = true;
- for (BasicBlock::reverse_iterator I = llvm::next(BB->rbegin()),
- E = BB->rend();
+ for (BasicBlock::reverse_iterator I = std::next(BB->rbegin()), E = BB->rend();
I != E && SearchCounter < MaxSearch; ++I) {
Instruction *Inst = &*I;
@@ -299,7 +320,7 @@ static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI,
SearchCounter++;
}
- return 0;
+ return nullptr;
}
/// Insert code into the entry block that stores the __stack_chk_guard
@@ -334,7 +355,7 @@ static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
}
IRBuilder<> B(&F->getEntryBlock().front());
- AI = B.CreateAlloca(PtrTy, 0, "StackGuardSlot");
+ AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
LoadInst *LI = B.CreateLoad(StackGuardVar, "StackGuard");
B.CreateCall2(Intrinsic::getDeclaration(M, Intrinsic::stackprotector), LI,
AI);
@@ -352,8 +373,8 @@ bool StackProtector::InsertStackProtectors() {
bool HasPrologue = false;
bool SupportsSelectionDAGSP =
EnableSelectionDAGSP && !TM->Options.EnableFastISel;
- AllocaInst *AI = 0; // Place on stack that stores the stack guard.
- Value *StackGuardVar = 0; // The stack guard variable.
+ AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
+ Value *StackGuardVar = nullptr; // The stack guard variable.
for (Function::iterator I = F->begin(), E = F->end(); I != E;) {
BasicBlock *BB = I++;
@@ -370,14 +391,14 @@ bool StackProtector::InsertStackProtectors() {
if (SupportsSelectionDAGSP) {
// Since we have a potential tail call, insert the special stack check
// intrinsic.
- Instruction *InsertionPt = 0;
+ Instruction *InsertionPt = nullptr;
if (CallInst *CI = FindPotentialTailCall(BB, RI, TLI)) {
InsertionPt = CI;
} else {
InsertionPt = RI;
// At this point we know that BB has a return statement so it *DOES*
// have a terminator.
- assert(InsertionPt != 0 && "BB must have a terminator instruction at "
+ assert(InsertionPt != nullptr && "BB must have a terminator instruction at "
"this point.");
}
diff --git a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
index 9f44df8..791168f5 100644
--- a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "stackslotcoloring"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
@@ -33,6 +32,8 @@
#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "stackslotcoloring"
+
static cl::opt<bool>
DisableSharing("no-stack-slot-sharing",
cl::init(false), cl::Hidden,
@@ -87,7 +88,7 @@ namespace {
initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
@@ -98,7 +99,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
void InitializeSlots();
@@ -142,7 +143,6 @@ void StackSlotColoring::ScanForSpillSlotRefs(MachineFunction &MF) {
for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = &*MBBI;
- BlockFrequency Freq = MBFI->getBlockFreq(MBB);
for (MachineBasicBlock::iterator MII = MBB->begin(), EE = MBB->end();
MII != EE; ++MII) {
MachineInstr *MI = &*MII;
@@ -157,18 +157,17 @@ void StackSlotColoring::ScanForSpillSlotRefs(MachineFunction &MF) {
continue;
LiveInterval &li = LS->getInterval(FI);
if (!MI->isDebugValue())
- li.weight += LiveIntervals::getSpillWeight(false, true, Freq);
+ li.weight += LiveIntervals::getSpillWeight(false, true, MBFI, MI);
}
for (MachineInstr::mmo_iterator MMOI = MI->memoperands_begin(),
EE = MI->memoperands_end(); MMOI != EE; ++MMOI) {
MachineMemOperand *MMO = *MMOI;
- if (const Value *V = MMO->getValue()) {
- if (const FixedStackPseudoSourceValue *FSV =
- dyn_cast<FixedStackPseudoSourceValue>(V)) {
- int FI = FSV->getFrameIndex();
- if (FI >= 0)
- SSRefs[FI].push_back(MMO);
- }
+ if (const FixedStackPseudoSourceValue *FSV =
+ dyn_cast_or_null<FixedStackPseudoSourceValue>(
+ MMO->getPseudoValue())) {
+ int FI = FSV->getFrameIndex();
+ if (FI >= 0)
+ SSRefs[FI].push_back(MMO);
}
}
}
@@ -311,7 +310,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
if (NewFI == -1 || (NewFI == (int)SS))
continue;
- const Value *NewSV = PseudoSourceValue::getFixedStack(NewFI);
+ const PseudoSourceValue *NewSV = PseudoSourceValue::getFixedStack(NewFI);
SmallVectorImpl<MachineMemOperand *> &RefMMOs = SSRefs[SS];
for (unsigned i = 0, e = RefMMOs.size(); i != e; ++i)
RefMMOs[i]->setValue(NewSV);
@@ -386,8 +385,8 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
toErase.push_back(I);
continue;
}
-
- MachineBasicBlock::iterator NextMI = llvm::next(I);
+
+ MachineBasicBlock::iterator NextMI = std::next(I);
if (NextMI == MBB->end()) continue;
unsigned LoadReg = 0;
@@ -399,7 +398,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
++NumDead;
changed = true;
- if (NextMI->findRegisterUseOperandIdx(LoadReg, true, 0) != -1) {
+ if (NextMI->findRegisterUseOperandIdx(LoadReg, true, nullptr) != -1) {
++NumDead;
toErase.push_back(I);
}
diff --git a/contrib/llvm/lib/CodeGen/TailDuplication.cpp b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
index ff0181e..723a629 100644
--- a/contrib/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
@@ -12,13 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "tailduplication"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
@@ -34,6 +33,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "tailduplication"
+
STATISTIC(NumTails , "Number of tails duplicated");
STATISTIC(NumTailDups , "Number of tail duplicated blocks");
STATISTIC(NumInstrDups , "Additional instructions due to tail duplication");
@@ -61,9 +62,10 @@ namespace {
class TailDuplicatePass : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const MachineBranchProbabilityInfo *MBPI;
MachineModuleInfo *MMI;
MachineRegisterInfo *MRI;
- OwningPtr<RegScavenger> RS;
+ std::unique_ptr<RegScavenger> RS;
bool PreRegAlloc;
// SSAUpdateVRs - A list of virtual registers for which to update SSA form.
@@ -78,7 +80,9 @@ namespace {
explicit TailDuplicatePass() :
MachineFunctionPass(ID), PreRegAlloc(false) {}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
void AddSSAUpdateEntry(unsigned OrigReg, unsigned NewReg,
@@ -128,10 +132,15 @@ INITIALIZE_PASS(TailDuplicatePass, "tailduplication", "Tail Duplication",
false, false)
bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
+ if (skipOptnoneFunction(*MF.getFunction()))
+ return false;
+
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
MRI = &MF.getRegInfo();
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
+ MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
+
PreRegAlloc = MRI->isSSA();
RS.reset();
if (MRI->tracksLiveness() && TRI->trackLivenessAfterRegAlloc(MF))
@@ -144,6 +153,11 @@ bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
return MadeChange;
}
+void TailDuplicatePass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) {
for (MachineFunction::iterator I = ++MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = I;
@@ -168,7 +182,7 @@ static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) {
dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI;
dbgs() << " missing input from predecessor BB#"
<< PredBB->getNumber() << '\n';
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -179,12 +193,12 @@ static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) {
<< ": " << *MI;
dbgs() << " extra input from predecessor BB#"
<< PHIBB->getNumber() << '\n';
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
if (PHIBB->getNumber() < 0) {
dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI;
dbgs() << " non-existing BB#" << PHIBB->getNumber() << '\n';
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
++MI;
@@ -234,7 +248,7 @@ TailDuplicatePass::TailDuplicateAndUpdate(MachineBasicBlock *MBB,
// If the original definition is still around, add it as an available
// value.
MachineInstr *DefMI = MRI->getVRegDef(VReg);
- MachineBasicBlock *DefBB = 0;
+ MachineBasicBlock *DefBB = nullptr;
if (DefMI) {
DefBB = DefMI->getParent();
SSAUpdate.AddAvailableValue(DefBB, VReg);
@@ -252,8 +266,8 @@ TailDuplicatePass::TailDuplicateAndUpdate(MachineBasicBlock *MBB,
// Rewrite uses that are outside of the original def's block.
MachineRegisterInfo::use_iterator UI = MRI->use_begin(VReg);
while (UI != MRI->use_end()) {
- MachineOperand &UseMO = UI.getOperand();
- MachineInstr *UseMI = &*UI;
+ MachineOperand &UseMO = *UI;
+ MachineInstr *UseMI = UseMO.getParent();
++UI;
if (UseMI->isDebugValue()) {
// SSAUpdate can replace the use with an undef. That creates
@@ -328,12 +342,10 @@ bool TailDuplicatePass::TailDuplicateBlocks(MachineFunction &MF) {
static bool isDefLiveOut(unsigned Reg, MachineBasicBlock *BB,
const MachineRegisterInfo *MRI) {
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
- UE = MRI->use_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
- if (UseMI->isDebugValue())
+ for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
+ if (UseMI.isDebugValue())
continue;
- if (UseMI->getParent() != BB)
+ if (UseMI.getParent() != BB)
return true;
}
return false;
@@ -352,9 +364,7 @@ static unsigned getPHISrcRegOpIdx(MachineInstr *MI, MachineBasicBlock *SrcBB) {
// block (which is why we need to copy the information).
static void getRegsUsedByPHIs(const MachineBasicBlock &BB,
DenseSet<unsigned> *UsedByPhi) {
- for(MachineBasicBlock::const_iterator I = BB.begin(), E = BB.end();
- I != E; ++I) {
- const MachineInstr &MI = *I;
+ for (const auto &MI : BB) {
if (!MI.isPHI())
break;
for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
@@ -645,7 +655,7 @@ TailDuplicatePass::canCompletelyDuplicateBB(MachineBasicBlock &BB) {
if (PredBB->succ_size() > 1)
return false;
- MachineBasicBlock *PredTBB = NULL, *PredFBB = NULL;
+ MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
return false;
@@ -676,7 +686,7 @@ TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
if (bothUsedInPHI(*PredBB, Succs))
continue;
- MachineBasicBlock *PredTBB = NULL, *PredFBB = NULL;
+ MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
continue;
@@ -686,7 +696,7 @@ TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
<< "From simple Succ: " << *TailBB);
MachineBasicBlock *NewTarget = *TailBB->succ_begin();
- MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(PredBB));
+ MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(PredBB));
// Make PredFBB explicit.
if (PredCond.empty())
@@ -707,25 +717,26 @@ TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
// Make the branch unconditional if possible
if (PredTBB == PredFBB) {
PredCond.clear();
- PredFBB = NULL;
+ PredFBB = nullptr;
}
// Avoid adding fall through branches.
if (PredFBB == NextBB)
- PredFBB = NULL;
- if (PredTBB == NextBB && PredFBB == NULL)
- PredTBB = NULL;
+ PredFBB = nullptr;
+ if (PredTBB == NextBB && PredFBB == nullptr)
+ PredTBB = nullptr;
TII->RemoveBranch(*PredBB);
if (PredTBB)
TII->InsertBranch(*PredBB, PredTBB, PredFBB, PredCond, DebugLoc());
+ uint32_t Weight = MBPI->getEdgeWeight(PredBB, TailBB);
PredBB->removeSuccessor(TailBB);
unsigned NumSuccessors = PredBB->succ_size();
assert(NumSuccessors <= 1);
if (NumSuccessors == 0 || *PredBB->succ_begin() != NewTarget)
- PredBB->addSuccessor(NewTarget);
+ PredBB->addSuccessor(NewTarget, Weight);
TDBBs.push_back(PredBB);
}
@@ -786,7 +797,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
// Update PredBB livein.
RS->enterBasicBlock(PredBB);
if (!PredBB->empty())
- RS->forward(prior(PredBB->end()));
+ RS->forward(std::prev(PredBB->end()));
BitVector RegsLiveAtExit(TRI->getNumRegs());
RS->getRegsUsed(RegsLiveAtExit, false);
for (MachineBasicBlock::livein_iterator I = TailBB->livein_begin(),
@@ -836,7 +847,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
"TailDuplicate called on block with multiple successors!");
for (MachineBasicBlock::succ_iterator I = TailBB->succ_begin(),
E = TailBB->succ_end(); I != E; ++I)
- PredBB->addSuccessor(*I);
+ PredBB->addSuccessor(*I, MBPI->getEdgeWeight(TailBB, I));
Changed = true;
++NumTailDups;
@@ -845,8 +856,8 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
// If TailBB was duplicated into all its predecessors except for the prior
// block, which falls through unconditionally, move the contents of this
// block into the prior block.
- MachineBasicBlock *PrevBB = prior(MachineFunction::iterator(TailBB));
- MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
+ MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(TailBB));
+ MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
SmallVector<MachineOperand, 4> PriorCond;
// This has to check PrevBB->succ_size() because EH edges are ignored by
// AnalyzeBranch.
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
index bf4fd65..83966bd 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -13,10 +13,12 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
@@ -41,7 +43,7 @@ TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF) const {
if (OpNum >= MCID.getNumOperands())
- return 0;
+ return nullptr;
short RegClass = MCID.OpInfo[OpNum].RegClass;
if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
@@ -49,7 +51,7 @@ TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
// Instructions like INSERT_SUBREG do not have fixed register classes.
if (RegClass < 0)
- return 0;
+ return nullptr;
// Otherwise just look it up normally.
return TRI->getRegClass(RegClass);
@@ -109,7 +111,7 @@ TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
// If MBB isn't immediately before MBB, insert a branch to it.
if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
- InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
+ InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
Tail->getDebugLoc());
MBB->addSuccessor(NewDest);
}
@@ -122,13 +124,11 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
bool HasDef = MCID.getNumDefs();
if (HasDef && !MI->getOperand(0).isReg())
// No idea how to commute this instruction. Target should implement its own.
- return 0;
+ return nullptr;
unsigned Idx1, Idx2;
if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Don't know how to commute: " << *MI;
- report_fatal_error(Msg.str());
+ assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
+ return nullptr;
}
assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
@@ -248,13 +248,15 @@ bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
oe = MI->memoperands_end();
o != oe;
++o) {
- if ((*o)->isLoad() && (*o)->getValue())
+ if ((*o)->isLoad()) {
if (const FixedStackPseudoSourceValue *Value =
- dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
+ dyn_cast_or_null<FixedStackPseudoSourceValue>(
+ (*o)->getPseudoValue())) {
FrameIndex = Value->getFrameIndex();
MMO = *o;
return true;
}
+ }
}
return false;
}
@@ -266,13 +268,15 @@ bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
oe = MI->memoperands_end();
o != oe;
++o) {
- if ((*o)->isStore() && (*o)->getValue())
+ if ((*o)->isStore()) {
if (const FixedStackPseudoSourceValue *Value =
- dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
+ dyn_cast_or_null<FixedStackPseudoSourceValue>(
+ (*o)->getPseudoValue())) {
FrameIndex = Value->getFrameIndex();
MMO = *o;
return true;
}
+ }
}
return false;
}
@@ -338,14 +342,14 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
unsigned FoldIdx) {
assert(MI->isCopy() && "MI must be a COPY instruction");
if (MI->getNumOperands() != 2)
- return 0;
+ return nullptr;
assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
if (FoldOp.getSubReg() || LiveOp.getSubReg())
- return 0;
+ return nullptr;
unsigned FoldReg = FoldOp.getReg();
unsigned LiveReg = LiveOp.getReg();
@@ -357,13 +361,13 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
- return RC->contains(LiveOp.getReg()) ? RC : 0;
+ return RC->contains(LiveOp.getReg()) ? RC : nullptr;
if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
return RC;
// FIXME: Allow folding when register classes are memory compatible.
- return 0;
+ return nullptr;
}
bool TargetInstrInfo::
@@ -372,6 +376,65 @@ canFoldMemoryOperand(const MachineInstr *MI,
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
}
+static MachineInstr* foldPatchpoint(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex,
+ const TargetInstrInfo &TII) {
+ unsigned StartIdx = 0;
+ switch (MI->getOpcode()) {
+ case TargetOpcode::STACKMAP:
+ StartIdx = 2; // Skip ID, nShadowBytes.
+ break;
+ case TargetOpcode::PATCHPOINT: {
+ // For PatchPoint, the call args are not foldable.
+ PatchPointOpers opers(MI);
+ StartIdx = opers.getVarIdx();
+ break;
+ }
+ default:
+ llvm_unreachable("unexpected stackmap opcode");
+ }
+
+ // Return false if any operands requested for folding are not foldable (not
+ // part of the stackmap's live values).
+ for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
+ I != E; ++I) {
+ if (*I < StartIdx)
+ return nullptr;
+ }
+
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
+ MachineInstrBuilder MIB(MF, NewMI);
+
+ // No need to fold return, the meta data, and function arguments
+ for (unsigned i = 0; i < StartIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
+ unsigned SpillSize;
+ unsigned SpillOffset;
+ // Compute the spill slot size and offset.
+ const TargetRegisterClass *RC =
+ MF.getRegInfo().getRegClass(MO.getReg());
+ bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
+ SpillOffset, &MF.getTarget());
+ if (!Valid)
+ report_fatal_error("cannot spill patchpoint subregister operand");
+ MIB.addImm(StackMaps::IndirectMemRefOp);
+ MIB.addImm(SpillSize);
+ MIB.addFrameIndex(FrameIndex);
+ MIB.addImm(SpillOffset);
+ }
+ else
+ MIB.addOperand(MO);
+ }
+ return NewMI;
+}
+
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
@@ -393,8 +456,18 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
- // Ask the target to do the actual folding.
- if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
+ MachineInstr *NewMI = nullptr;
+
+ if (MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ // Fold stackmap/patchpoint.
+ NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
+ } else {
+ // Ask the target to do the actual folding.
+ NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
+ }
+
+ if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
@@ -417,11 +490,11 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
// Straight COPY may fold as load/store.
if (!MI->isCopy() || Ops.size() != 1)
- return 0;
+ return nullptr;
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
if (!RC)
- return 0;
+ return nullptr;
const MachineOperand &MO = MI->getOperand(1-Ops[0]);
MachineBasicBlock::iterator Pos = MI;
@@ -450,8 +523,20 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
- MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
- if (!NewMI) return 0;
+ MachineInstr *NewMI = nullptr;
+ int FrameIndex = 0;
+
+ if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
+ isLoadFromStackSlot(LoadMI, FrameIndex)) {
+ // Fold stackmap/patchpoint.
+ NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ } else {
+ // Ask the target to do the actual folding.
+ NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
+ }
+
+ if (!NewMI) return nullptr;
NewMI = MBB.insert(MI, NewMI);
@@ -562,7 +647,7 @@ bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const {
// Terminators and labels can't be scheduled around.
- if (MI->isTerminator() || MI->isLabel())
+ if (MI->isTerminator() || MI->isPosition())
return true;
// Don't attempt to schedule around any instruction that defines
@@ -586,7 +671,7 @@ bool TargetInstrInfo::usePreRAHazardRecognizer() const {
// Default implementation of CreateTargetRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::
-CreateTargetHazardRecognizer(const TargetMachine *TM,
+CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
const ScheduleDAG *DAG) const {
// Dummy hazard recognizer allows all instructions to issue.
return new ScheduleHazardRecognizer();
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 30305af..e80ef71 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -18,11 +18,15 @@
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -35,7 +39,7 @@ using namespace llvm;
/// InitLibcallNames - Set default libcall names.
///
-static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
+static void InitLibcallNames(const char **Names, const Triple &TT) {
Names[RTLIB::SHL_I16] = "__ashlhi3";
Names[RTLIB::SHL_I32] = "__ashlsi3";
Names[RTLIB::SHL_I64] = "__ashldi3";
@@ -78,16 +82,16 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::UREM_I128] = "__umodti3";
// These are generally not available.
- Names[RTLIB::SDIVREM_I8] = 0;
- Names[RTLIB::SDIVREM_I16] = 0;
- Names[RTLIB::SDIVREM_I32] = 0;
- Names[RTLIB::SDIVREM_I64] = 0;
- Names[RTLIB::SDIVREM_I128] = 0;
- Names[RTLIB::UDIVREM_I8] = 0;
- Names[RTLIB::UDIVREM_I16] = 0;
- Names[RTLIB::UDIVREM_I32] = 0;
- Names[RTLIB::UDIVREM_I64] = 0;
- Names[RTLIB::UDIVREM_I128] = 0;
+ Names[RTLIB::SDIVREM_I8] = nullptr;
+ Names[RTLIB::SDIVREM_I16] = nullptr;
+ Names[RTLIB::SDIVREM_I32] = nullptr;
+ Names[RTLIB::SDIVREM_I64] = nullptr;
+ Names[RTLIB::SDIVREM_I128] = nullptr;
+ Names[RTLIB::UDIVREM_I8] = nullptr;
+ Names[RTLIB::UDIVREM_I16] = nullptr;
+ Names[RTLIB::UDIVREM_I32] = nullptr;
+ Names[RTLIB::UDIVREM_I64] = nullptr;
+ Names[RTLIB::UDIVREM_I128] = nullptr;
Names[RTLIB::NEG_I32] = "__negsi2";
Names[RTLIB::NEG_I64] = "__negdi2";
@@ -201,6 +205,11 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::FLOOR_F80] = "floorl";
Names[RTLIB::FLOOR_F128] = "floorl";
Names[RTLIB::FLOOR_PPCF128] = "floorl";
+ Names[RTLIB::ROUND_F32] = "roundf";
+ Names[RTLIB::ROUND_F64] = "round";
+ Names[RTLIB::ROUND_F80] = "roundl";
+ Names[RTLIB::ROUND_F128] = "roundl";
+ Names[RTLIB::ROUND_PPCF128] = "roundl";
Names[RTLIB::COPYSIGN_F32] = "copysignf";
Names[RTLIB::COPYSIGN_F64] = "copysign";
Names[RTLIB::COPYSIGN_F80] = "copysignl";
@@ -211,6 +220,10 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
+ Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
+ Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
+ Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
+ Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
@@ -375,7 +388,7 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
- if (Triple(TM.getTargetTriple()).getEnvironment() == Triple::GNU) {
+ if (TT.getEnvironment() == Triple::GNU) {
Names[RTLIB::SINCOS_F32] = "sincosf";
Names[RTLIB::SINCOS_F64] = "sincos";
Names[RTLIB::SINCOS_F80] = "sincosl";
@@ -383,18 +396,18 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::SINCOS_PPCF128] = "sincosl";
} else {
// These are generally not available.
- Names[RTLIB::SINCOS_F32] = 0;
- Names[RTLIB::SINCOS_F64] = 0;
- Names[RTLIB::SINCOS_F80] = 0;
- Names[RTLIB::SINCOS_F128] = 0;
- Names[RTLIB::SINCOS_PPCF128] = 0;
+ Names[RTLIB::SINCOS_F32] = nullptr;
+ Names[RTLIB::SINCOS_F64] = nullptr;
+ Names[RTLIB::SINCOS_F80] = nullptr;
+ Names[RTLIB::SINCOS_F128] = nullptr;
+ Names[RTLIB::SINCOS_PPCF128] = nullptr;
}
- if (Triple(TM.getTargetTriple()).getOS() != Triple::OpenBSD) {
+ if (TT.getOS() != Triple::OpenBSD) {
Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
} else {
// These are generally not available.
- Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = 0;
+ Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
}
}
@@ -409,7 +422,10 @@ static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
- if (OpVT == MVT::f32) {
+ if (OpVT == MVT::f16) {
+ if (RetVT == MVT::f32)
+ return FPEXT_F16_F32;
+ } else if (OpVT == MVT::f32) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
if (RetVT == MVT::f128)
@@ -425,7 +441,18 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
- if (RetVT == MVT::f32) {
+ if (RetVT == MVT::f16) {
+ if (OpVT == MVT::f32)
+ return FPROUND_F32_F16;
+ if (OpVT == MVT::f64)
+ return FPROUND_F64_F16;
+ if (OpVT == MVT::f80)
+ return FPROUND_F80_F16;
+ if (OpVT == MVT::f128)
+ return FPROUND_F128_F16;
+ if (OpVT == MVT::ppcf128)
+ return FPROUND_PPCF128_F16;
+ } else if (RetVT == MVT::f32) {
if (OpVT == MVT::f64)
return FPROUND_F64_F32;
if (OpVT == MVT::f80)
@@ -659,25 +686,29 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
/// NOTE: The constructor takes ownership of TLOF.
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
const TargetLoweringObjectFile *tlof)
- : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
+ : TM(tm), DL(TM.getDataLayout()), TLOF(*tlof) {
initActions();
// Perform these initializations only once.
- IsLittleEndian = TD->isLittleEndian();
+ IsLittleEndian = DL->isLittleEndian();
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
= MaxStoresPerMemmoveOptSize = 4;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
SelectIsExpensive = false;
+ HasMultipleConditionRegisters = false;
+ HasExtractBitsInsn = false;
IntDivIsCheap = false;
Pow2DivIsCheap = false;
JumpIsExpensive = false;
PredictableSelectIsExpensive = false;
+ MaskAndBranchFoldingIsLegal = false;
StackPointerRegisterToSaveRestore = 0;
ExceptionPointerRegister = 0;
ExceptionSelectorRegister = 0;
BooleanContents = UndefinedBooleanContent;
+ BooleanFloatContents = UndefinedBooleanContent;
BooleanVectorContents = UndefinedBooleanContent;
SchedPreferenceInfo = Sched::ILP;
JumpBufSize = 0;
@@ -690,7 +721,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
SupportJumpTables = true;
MinimumJumpTableEntries = 4;
- InitLibcallNames(LibcallRoutineNames, TM);
+ InitLibcallNames(LibcallRoutineNames, Triple(TM.getTargetTriple()));
InitCmpLibcallCCs(CmpLibcallCCs);
InitLibcallCallingConvs(LibcallCallingConvs);
}
@@ -718,6 +749,10 @@ void TargetLoweringBase::initActions() {
setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
}
+ // Most backends expect to see the node which just returns the value loaded.
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
+ (MVT::SimpleValueType)VT, Expand);
+
// These operations default to expand.
setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand);
@@ -727,8 +762,15 @@ void TargetLoweringBase::initActions() {
// These operations default to expand for vector types.
if (VT >= MVT::FIRST_VECTOR_VALUETYPE &&
- VT <= MVT::LAST_VECTOR_VALUETYPE)
+ VT <= MVT::LAST_VECTOR_VALUETYPE) {
setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG,
+ (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG,
+ (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG,
+ (MVT::SimpleValueType)VT, Expand);
+ }
}
// Most targets ignore the @llvm.prefetch intrinsic.
@@ -754,6 +796,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f16, Expand);
setOperationAction(ISD::FRINT, MVT::f16, Expand);
setOperationAction(ISD::FTRUNC, MVT::f16, Expand);
+ setOperationAction(ISD::FROUND, MVT::f16, Expand);
setOperationAction(ISD::FLOG , MVT::f32, Expand);
setOperationAction(ISD::FLOG2, MVT::f32, Expand);
setOperationAction(ISD::FLOG10, MVT::f32, Expand);
@@ -764,6 +807,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f32, Expand);
setOperationAction(ISD::FRINT, MVT::f32, Expand);
setOperationAction(ISD::FTRUNC, MVT::f32, Expand);
+ setOperationAction(ISD::FROUND, MVT::f32, Expand);
setOperationAction(ISD::FLOG , MVT::f64, Expand);
setOperationAction(ISD::FLOG2, MVT::f64, Expand);
setOperationAction(ISD::FLOG10, MVT::f64, Expand);
@@ -774,6 +818,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f64, Expand);
setOperationAction(ISD::FRINT, MVT::f64, Expand);
setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
+ setOperationAction(ISD::FROUND, MVT::f64, Expand);
setOperationAction(ISD::FLOG , MVT::f128, Expand);
setOperationAction(ISD::FLOG2, MVT::f128, Expand);
setOperationAction(ISD::FLOG10, MVT::f128, Expand);
@@ -784,6 +829,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f128, Expand);
setOperationAction(ISD::FRINT, MVT::f128, Expand);
setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
+ setOperationAction(ISD::FROUND, MVT::f128, Expand);
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
@@ -799,7 +845,7 @@ MVT TargetLoweringBase::getPointerTy(uint32_t AS) const {
}
unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const {
- return TD->getPointerSizeInBits(AS);
+ return DL->getPointerSizeInBits(AS);
}
unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
@@ -808,7 +854,7 @@ unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
}
MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const {
- return MVT::getIntegerVT(8*TD->getPointerSize(0));
+ return MVT::getIntegerVT(8*DL->getPointerSize(0));
}
EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
@@ -894,6 +940,58 @@ bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
return false;
}
+/// Replace/modify any TargetFrameIndex operands with a targte-dependent
+/// sequence of memory operands that is recognized by PrologEpilogInserter.
+MachineBasicBlock*
+TargetLoweringBase::emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ MachineFunction &MF = *MI->getParent()->getParent();
+
+ // MI changes inside this loop as we grow operands.
+ for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
+ MachineOperand &MO = MI->getOperand(OperIdx);
+ if (!MO.isFI())
+ continue;
+
+ // foldMemoryOperand builds a new MI after replacing a single FI operand
+ // with the canonical set of five x86 addressing-mode operands.
+ int FI = MO.getIndex();
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
+
+ // Copy operands before the frame-index.
+ for (unsigned i = 0; i < OperIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+ // Add frame index operands: direct-mem-ref tag, #FI, offset.
+ MIB.addImm(StackMaps::DirectMemRefOp);
+ MIB.addOperand(MI->getOperand(OperIdx));
+ MIB.addImm(0);
+ // Copy the operands after the frame index.
+ for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ // Inherit previous memory operands.
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
+
+ // Add a new memory operand for this FI.
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ assert(MFI.getObjectOffset(FI) != -1);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
+ MachineMemOperand::MOLoad,
+ TM.getDataLayout()->getPointerSize(),
+ MFI.getObjectAlignment(FI));
+ MIB->addMemOperand(MF, MMO);
+
+ // Replace the instruction and update the operand index.
+ MBB->insert(MachineBasicBlock::iterator(MI), MIB);
+ OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
+ MI->eraseFromParent();
+ MI = MIB;
+ }
+ return MBB;
+}
+
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
std::pair<const TargetRegisterClass*, uint8_t>
@@ -938,7 +1036,7 @@ void TargetLoweringBase::computeRegisterProperties() {
// Find the largest integer register class.
unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
- for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
+ for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
// Every integer value type larger than this largest register takes twice as
@@ -1009,27 +1107,35 @@ void TargetLoweringBase::computeRegisterProperties() {
}
}
+ if (!isTypeLegal(MVT::f16)) {
+ NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
+ RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
+ TransformToType[MVT::f16] = MVT::i16;
+ ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat);
+ }
+
// Loop over all of the vector value types to see which need transformations.
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
- if (isTypeLegal(VT)) continue;
+ MVT VT = (MVT::SimpleValueType) i;
+ if (isTypeLegal(VT))
+ continue;
- // Determine if there is a legal wider type. If so, we should promote to
- // that wider vector type.
MVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
- if (NElts != 1 && !shouldSplitVectorElementType(EltVT)) {
- bool IsLegalWiderType = false;
- // First try to promote the elements of integer vectors. If no legal
- // promotion was found, fallback to the widen-vector method.
- for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- MVT SVT = (MVT::SimpleValueType)nVT;
+ bool IsLegalWiderType = false;
+ LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
+ switch (PreferredAction) {
+ case TypePromoteInteger: {
+ // Try to promote the elements of integer vectors. If no legal
+ // promotion was found, fall through to the widen-vector method.
+ for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+ MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.
if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits()
- && SVT.getVectorNumElements() == NElts &&
- isTypeLegal(SVT) && SVT.getScalarType().isInteger()) {
+ && SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)
+ && SVT.getScalarType().isInteger()) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
@@ -1038,15 +1144,15 @@ void TargetLoweringBase::computeRegisterProperties() {
break;
}
}
-
- if (IsLegalWiderType) continue;
-
+ if (IsLegalWiderType)
+ break;
+ }
+ case TypeWidenVector: {
// Try to widen the vector.
- for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- MVT SVT = (MVT::SimpleValueType)nVT;
- if (SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts &&
- isTypeLegal(SVT)) {
+ for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+ MVT SVT = (MVT::SimpleValueType) nVT;
+ if (SVT.getVectorElementType() == EltVT
+ && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
@@ -1055,27 +1161,34 @@ void TargetLoweringBase::computeRegisterProperties() {
break;
}
}
- if (IsLegalWiderType) continue;
+ if (IsLegalWiderType)
+ break;
}
-
- MVT IntermediateVT;
- MVT RegisterVT;
- unsigned NumIntermediates;
- NumRegistersForVT[i] =
- getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
- RegisterVT, this);
- RegisterTypeForVT[i] = RegisterVT;
-
- MVT NVT = VT.getPow2VectorType();
- if (NVT == VT) {
- // Type is already a power of 2. The default action is to split.
- TransformToType[i] = MVT::Other;
- unsigned NumElts = VT.getVectorNumElements();
- ValueTypeActions.setTypeAction(VT,
- NumElts > 1 ? TypeSplitVector : TypeScalarizeVector);
- } else {
- TransformToType[i] = NVT;
- ValueTypeActions.setTypeAction(VT, TypeWidenVector);
+ case TypeSplitVector:
+ case TypeScalarizeVector: {
+ MVT IntermediateVT;
+ MVT RegisterVT;
+ unsigned NumIntermediates;
+ NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
+ NumIntermediates, RegisterVT, this);
+ RegisterTypeForVT[i] = RegisterVT;
+
+ MVT NVT = VT.getPow2VectorType();
+ if (NVT == VT) {
+ // Type is already a power of 2. The default action is to split.
+ TransformToType[i] = MVT::Other;
+ if (PreferredAction == TypeScalarizeVector)
+ ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
+ else
+ ValueTypeActions.setTypeAction(VT, TypeSplitVector);
+ } else {
+ TransformToType[i] = NVT;
+ ValueTypeActions.setTypeAction(VT, TypeWidenVector);
+ }
+ break;
+ }
+ default:
+ llvm_unreachable("Unknown vector legalization action!");
}
}
@@ -1087,7 +1200,7 @@ void TargetLoweringBase::computeRegisterProperties() {
for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
const TargetRegisterClass* RRC;
uint8_t Cost;
- tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i);
+ std::tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i);
RepRegClassForVT[i] = RRC;
RepRegClassCostForVT[i] = Cost;
}
@@ -1230,7 +1343,7 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
- return TD->getCallFrameTypeAlignment(Ty);
+ return DL->getABITypeAlignment(Ty);
}
//===----------------------------------------------------------------------===//
@@ -1258,7 +1371,7 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
case Mul: return ISD::MUL;
case FMul: return ISD::FMUL;
case UDiv: return ISD::UDIV;
- case SDiv: return ISD::UDIV;
+ case SDiv: return ISD::SDIV;
case FDiv: return ISD::FDIV;
case URem: return ISD::UREM;
case SRem: return ISD::SREM;
@@ -1364,6 +1477,8 @@ bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM,
return false;
// Allow 2*r as r+r.
break;
+ default: // Don't allow n * r
+ return false;
}
return true;
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 59d7b57..f59efa3 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
@@ -34,7 +35,7 @@
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
using namespace dwarf;
@@ -43,21 +44,16 @@ using namespace dwarf;
// ELF
//===----------------------------------------------------------------------===//
-MCSymbol *
-TargetLoweringObjectFileELF::getCFIPersonalitySymbol(const GlobalValue *GV,
- Mangler *Mang,
- MachineModuleInfo *MMI) const {
+MCSymbol *TargetLoweringObjectFileELF::getCFIPersonalitySymbol(
+ const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI) const {
unsigned Encoding = getPersonalityEncoding();
- switch (Encoding & 0x70) {
- default:
- report_fatal_error("We do not support this DWARF encoding yet!");
- case dwarf::DW_EH_PE_absptr:
- return getSymbol(*Mang, GV);
- case dwarf::DW_EH_PE_pcrel: {
+ if ((Encoding & 0x80) == dwarf::DW_EH_PE_indirect)
return getContext().GetOrCreateSymbol(StringRef("DW.ref.") +
- getSymbol(*Mang, GV)->getName());
- }
- }
+ TM.getSymbol(GV, Mang)->getName());
+ if ((Encoding & 0x70) == dwarf::DW_EH_PE_absptr)
+ return TM.getSymbol(GV, Mang);
+ report_fatal_error("We do not support this DWARF encoding yet!");
}
void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
@@ -87,24 +83,21 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
Streamer.EmitSymbolValue(Sym, Size);
}
-const MCExpr *TargetLoweringObjectFileELF::
-getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const {
+const MCExpr *TargetLoweringObjectFileELF::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
if (Encoding & dwarf::DW_EH_PE_indirect) {
MachineModuleInfoELF &ELFMMI = MMI->getObjFileInfo<MachineModuleInfoELF>();
- SmallString<128> Name;
- Mang->getNameWithPrefix(Name, GV, true);
- Name += ".DW.stub";
+ MCSymbol *SSym = getSymbolWithGlobalValueBase(GV, ".DW.stub", Mang, TM);
// Add information about the stub reference to ELFMMI so that the stub
// gets emitted by the asmprinter.
- MCSymbol *SSym = getContext().GetOrCreateSymbol(Name.str());
MachineModuleInfoImpl::StubValueTy &StubSym = ELFMMI.getGVStubEntry(SSym);
- if (StubSym.getPointer() == 0) {
- MCSymbol *Sym = getSymbol(*Mang, GV);
+ if (!StubSym.getPointer()) {
+ MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
@@ -114,7 +107,7 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
}
return TargetLoweringObjectFile::
- getTTypeGlobalReference(GV, Mang, MMI, Encoding, Streamer);
+ getTTypeGlobalReference(GV, Encoding, Mang, TM, MMI, Streamer);
}
static SectionKind
@@ -199,23 +192,40 @@ getELFSectionFlags(SectionKind K) {
return Flags;
}
+static const Comdat *getELFComdat(const GlobalValue *GV) {
+ const Comdat *C = GV->getComdat();
+ if (!C)
+ return nullptr;
-const MCSection *TargetLoweringObjectFileELF::
-getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+ if (C->getSelectionKind() != Comdat::Any)
+ report_fatal_error("ELF COMDATs only support SelectionKind::Any, '" +
+ C->getName() + "' cannot be lowered.");
+
+ return C;
+}
+
+const MCSection *TargetLoweringObjectFileELF::getExplicitSectionGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
StringRef SectionName = GV->getSection();
// Infer section flags from the section name if we can.
Kind = getELFKindForNamedSection(SectionName, Kind);
+ StringRef Group = "";
+ unsigned Flags = getELFSectionFlags(Kind);
+ if (const Comdat *C = getELFComdat(GV)) {
+ Group = C->getName();
+ Flags |= ELF::SHF_GROUP;
+ }
return getContext().getELFSection(SectionName,
- getELFSectionType(SectionName, Kind),
- getELFSectionFlags(Kind), Kind);
+ getELFSectionType(SectionName, Kind), Flags,
+ Kind, /*EntrySize=*/0, Group);
}
/// getSectionPrefixForGlobal - Return the section prefix name used by options
/// FunctionsSections and DataSections.
-static const char *getSectionPrefixForGlobal(SectionKind Kind) {
+static StringRef getSectionPrefixForGlobal(SectionKind Kind) {
if (Kind.isText()) return ".text.";
if (Kind.isReadOnly()) return ".rodata.";
if (Kind.isBSS()) return ".bss.";
@@ -232,10 +242,9 @@ static const char *getSectionPrefixForGlobal(SectionKind Kind) {
return ".data.rel.ro.";
}
-
const MCSection *TargetLoweringObjectFileELF::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+ Mangler &Mang, const TargetMachine &TM) const {
// If we have -ffunction-section or -fdata-section then we should emit the
// global value to a uniqued section specifically for it.
bool EmitUniquedSection;
@@ -246,18 +255,20 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// If this global is linkonce/weak and the target handles this by emitting it
// into a 'uniqued' section name, create and return the section now.
- if ((GV->isWeakForLinker() || EmitUniquedSection) &&
+ if ((GV->isWeakForLinker() || EmitUniquedSection || GV->hasComdat()) &&
!Kind.isCommon()) {
- const char *Prefix;
- Prefix = getSectionPrefixForGlobal(Kind);
+ StringRef Prefix = getSectionPrefixForGlobal(Kind);
+
+ SmallString<128> Name(Prefix);
+ TM.getNameWithPrefix(Name, GV, Mang, true);
- SmallString<128> Name(Prefix, Prefix+strlen(Prefix));
- MCSymbol *Sym = getSymbol(*Mang, GV);
- Name.append(Sym->getName().begin(), Sym->getName().end());
StringRef Group = "";
unsigned Flags = getELFSectionFlags(Kind);
- if (GV->isWeakForLinker()) {
- Group = Sym->getName();
+ if (GV->isWeakForLinker() || GV->hasComdat()) {
+ if (const Comdat *C = getELFComdat(GV))
+ Group = C->getName();
+ else
+ Group = Name.substr(Prefix.size());
Flags |= ELF::SHF_GROUP;
}
@@ -327,8 +338,9 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
/// getSectionForConstant - Given a mergeable constant with the
/// specified size and relocation information, return a section that it
/// should be placed in.
-const MCSection *TargetLoweringObjectFileELF::
-getSectionForConstant(SectionKind Kind) const {
+const MCSection *
+TargetLoweringObjectFileELF::getSectionForConstant(SectionKind Kind,
+ const Constant *C) const {
if (Kind.isMergeableConst4() && MergeableConst4Section)
return MergeableConst4Section;
if (Kind.isMergeableConst8() && MergeableConst8Section)
@@ -343,8 +355,8 @@ getSectionForConstant(SectionKind Kind) const {
return DataRelROSection;
}
-const MCSection *
-TargetLoweringObjectFileELF::getStaticCtorSection(unsigned Priority) const {
+const MCSection *TargetLoweringObjectFileELF::getStaticCtorSection(
+ unsigned Priority, const MCSymbol *KeySym) const {
// The default scheme is .ctor / .dtor, so we have to invert the priority
// numbering.
if (Priority == 65535)
@@ -363,8 +375,8 @@ TargetLoweringObjectFileELF::getStaticCtorSection(unsigned Priority) const {
}
}
-const MCSection *
-TargetLoweringObjectFileELF::getStaticDtorSection(unsigned Priority) const {
+const MCSection *TargetLoweringObjectFileELF::getStaticDtorSection(
+ unsigned Priority, const MCSymbol *KeySym) const {
// The default scheme is .ctor / .dtor, so we have to invert the priority
// numbering.
if (Priority == 65535)
@@ -405,14 +417,24 @@ TargetLoweringObjectFileELF::InitializeELF(bool UseInitArray_) {
// MachO
//===----------------------------------------------------------------------===//
+/// getDepLibFromLinkerOpt - Extract the dependent library name from a linker
+/// option string. Returns StringRef() if the option does not specify a library.
+StringRef TargetLoweringObjectFileMachO::
+getDepLibFromLinkerOpt(StringRef LinkerOption) const {
+ const char *LibCmd = "-l";
+ if (LinkerOption.startswith(LibCmd))
+ return LinkerOption.substr(strlen(LibCmd));
+ return StringRef();
+}
+
/// emitModuleFlags - Perform code emission for module flags.
void TargetLoweringObjectFileMachO::
emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- Mangler *Mang, const TargetMachine &TM) const {
+ Mangler &Mang, const TargetMachine &TM) const {
unsigned VersionVal = 0;
unsigned ImageInfoFlags = 0;
- MDNode *LinkerOptions = 0;
+ MDNode *LinkerOptions = nullptr;
StringRef SectionVal;
for (ArrayRef<Module::ModuleFlagEntry>::iterator
@@ -481,13 +503,25 @@ emitModuleFlags(MCStreamer &Streamer,
Streamer.AddBlankLine();
}
-const MCSection *TargetLoweringObjectFileMachO::
-getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+static void checkMachOComdat(const GlobalValue *GV) {
+ const Comdat *C = GV->getComdat();
+ if (!C)
+ return;
+
+ report_fatal_error("MachO doesn't support COMDATs, '" + C->getName() +
+ "' cannot be lowered.");
+}
+
+const MCSection *TargetLoweringObjectFileMachO::getExplicitSectionGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
// Parse the section specifier and create it if valid.
StringRef Segment, Section;
unsigned TAA = 0, StubSize = 0;
bool TAAParsed;
+
+ checkMachOComdat(GV);
+
std::string ErrorCode =
MCSectionMachO::ParseSectionSpecifier(GV->getSection(), Segment, Section,
TAA, TAAParsed, StubSize);
@@ -520,9 +554,45 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
return S;
}
+bool TargetLoweringObjectFileMachO::isSectionAtomizableBySymbols(
+ const MCSection &Section) const {
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
+
+ // Sections holding 1 byte strings are atomized based on the data
+ // they contain.
+ // Sections holding 2 byte strings require symbols in order to be
+ // atomized.
+ // There is no dedicated section for 4 byte strings.
+ if (SMO.getKind().isMergeable1ByteCString())
+ return false;
+
+ if (SMO.getSegmentName() == "__DATA" &&
+ SMO.getSectionName() == "__cfstring")
+ return false;
+
+ switch (SMO.getType()) {
+ default:
+ return true;
+
+ // These sections are atomized at the element boundaries without using
+ // symbols.
+ case MachO::S_4BYTE_LITERALS:
+ case MachO::S_8BYTE_LITERALS:
+ case MachO::S_16BYTE_LITERALS:
+ case MachO::S_LITERAL_POINTERS:
+ case MachO::S_NON_LAZY_SYMBOL_POINTERS:
+ case MachO::S_LAZY_SYMBOL_POINTERS:
+ case MachO::S_MOD_INIT_FUNC_POINTERS:
+ case MachO::S_MOD_TERM_FUNC_POINTERS:
+ case MachO::S_INTERPOSING:
+ return false;
+ }
+}
+
const MCSection *TargetLoweringObjectFileMachO::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+ Mangler &Mang, const TargetMachine &TM) const {
+ checkMachOComdat(GV);
// Handle thread local data.
if (Kind.isThreadBSS()) return TLSBSSSection;
@@ -556,7 +626,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
return FourByteConstantSection;
if (Kind.isMergeableConst8())
return EightByteConstantSection;
- if (Kind.isMergeableConst16() && SixteenByteConstantSection)
+ if (Kind.isMergeableConst16())
return SixteenByteConstantSection;
}
@@ -585,7 +655,8 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
}
const MCSection *
-TargetLoweringObjectFileMachO::getSectionForConstant(SectionKind Kind) const {
+TargetLoweringObjectFileMachO::getSectionForConstant(SectionKind Kind,
+ const Constant *C) const {
// If this constant requires a relocation, we have to put it in the data
// segment, not in the text segment.
if (Kind.isDataRel() || Kind.isReadOnlyWithRel())
@@ -595,55 +666,31 @@ TargetLoweringObjectFileMachO::getSectionForConstant(SectionKind Kind) const {
return FourByteConstantSection;
if (Kind.isMergeableConst8())
return EightByteConstantSection;
- if (Kind.isMergeableConst16() && SixteenByteConstantSection)
+ if (Kind.isMergeableConst16())
return SixteenByteConstantSection;
return ReadOnlySection; // .const
}
-/// shouldEmitUsedDirectiveFor - This hook allows targets to selectively decide
-/// not to emit the UsedDirective for some symbols in llvm.used.
-// FIXME: REMOVE this (rdar://7071300)
-bool TargetLoweringObjectFileMachO::
-shouldEmitUsedDirectiveFor(const GlobalValue *GV, Mangler *Mang) const {
- /// On Darwin, internally linked data beginning with "L" or "l" does not have
- /// the directive emitted (this occurs in ObjC metadata).
- if (!GV) return false;
-
- // Check whether the mangled name has the "Private" or "LinkerPrivate" prefix.
- if (GV->hasLocalLinkage() && !isa<Function>(GV)) {
- // FIXME: ObjC metadata is currently emitted as internal symbols that have
- // \1L and \0l prefixes on them. Fix them to be Private/LinkerPrivate and
- // this horrible hack can go away.
- MCSymbol *Sym = getSymbol(*Mang, GV);
- if (Sym->getName()[0] == 'L' || Sym->getName()[0] == 'l')
- return false;
- }
-
- return true;
-}
-
-const MCExpr *TargetLoweringObjectFileMachO::
-getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const {
+const MCExpr *TargetLoweringObjectFileMachO::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
// The mach-o version of this method defaults to returning a stub reference.
if (Encoding & DW_EH_PE_indirect) {
MachineModuleInfoMachO &MachOMMI =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
- SmallString<128> Name;
- Mang->getNameWithPrefix(Name, GV, true);
- Name += "$non_lazy_ptr";
+ MCSymbol *SSym =
+ getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr", Mang, TM);
// Add information about the stub reference to MachOMMI so that the stub
// gets emitted by the asmprinter.
- MCSymbol *SSym = getContext().GetOrCreateSymbol(Name.str());
MachineModuleInfoImpl::StubValueTy &StubSym =
GV->hasHiddenVisibility() ? MachOMMI.getHiddenGVStubEntry(SSym) :
MachOMMI.getGVStubEntry(SSym);
- if (StubSym.getPointer() == 0) {
- MCSymbol *Sym = getSymbol(*Mang, GV);
+ if (!StubSym.getPointer()) {
+ MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
@@ -652,27 +699,24 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
Encoding & ~dwarf::DW_EH_PE_indirect, Streamer);
}
- return TargetLoweringObjectFile::
- getTTypeGlobalReference(GV, Mang, MMI, Encoding, Streamer);
+ return TargetLoweringObjectFile::getTTypeGlobalReference(GV, Encoding, Mang,
+ TM, MMI, Streamer);
}
-MCSymbol *TargetLoweringObjectFileMachO::
-getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI) const {
+MCSymbol *TargetLoweringObjectFileMachO::getCFIPersonalitySymbol(
+ const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI) const {
// The mach-o version of this method defaults to returning a stub reference.
MachineModuleInfoMachO &MachOMMI =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
- SmallString<128> Name;
- Mang->getNameWithPrefix(Name, GV, true);
- Name += "$non_lazy_ptr";
+ MCSymbol *SSym = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr", Mang, TM);
// Add information about the stub reference to MachOMMI so that the stub
// gets emitted by the asmprinter.
- MCSymbol *SSym = getContext().GetOrCreateSymbol(Name.str());
MachineModuleInfoImpl::StubValueTy &StubSym = MachOMMI.getGVStubEntry(SSym);
- if (StubSym.getPointer() == 0) {
- MCSymbol *Sym = getSymbol(*Mang, GV);
+ if (!StubSym.getPointer()) {
+ MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
@@ -718,58 +762,129 @@ getCOFFSectionFlags(SectionKind K) {
return Flags;
}
-const MCSection *TargetLoweringObjectFileCOFF::
-getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+static const GlobalValue *getComdatGVForCOFF(const GlobalValue *GV) {
+ const Comdat *C = GV->getComdat();
+ assert(C && "expected GV to have a Comdat!");
+
+ StringRef ComdatGVName = C->getName();
+ const GlobalValue *ComdatGV = GV->getParent()->getNamedValue(ComdatGVName);
+ if (!ComdatGV)
+ report_fatal_error("Associative COMDAT symbol '" + ComdatGVName +
+ "' does not exist.");
+
+ if (ComdatGV->getComdat() != C)
+ report_fatal_error("Associative COMDAT symbol '" + ComdatGVName +
+ "' is not a key for it's COMDAT.");
+
+ return ComdatGV;
+}
+
+static int getSelectionForCOFF(const GlobalValue *GV) {
+ if (const Comdat *C = GV->getComdat()) {
+ const GlobalValue *ComdatKey = getComdatGVForCOFF(GV);
+ if (const auto *GA = dyn_cast<GlobalAlias>(ComdatKey))
+ ComdatKey = GA->getBaseObject();
+ if (ComdatKey == GV) {
+ switch (C->getSelectionKind()) {
+ case Comdat::Any:
+ return COFF::IMAGE_COMDAT_SELECT_ANY;
+ case Comdat::ExactMatch:
+ return COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH;
+ case Comdat::Largest:
+ return COFF::IMAGE_COMDAT_SELECT_LARGEST;
+ case Comdat::NoDuplicates:
+ return COFF::IMAGE_COMDAT_SELECT_NODUPLICATES;
+ case Comdat::SameSize:
+ return COFF::IMAGE_COMDAT_SELECT_SAME_SIZE;
+ }
+ } else {
+ return COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE;
+ }
+ } else if (GV->isWeakForLinker()) {
+ return COFF::IMAGE_COMDAT_SELECT_ANY;
+ }
+ return 0;
+}
+
+const MCSection *TargetLoweringObjectFileCOFF::getExplicitSectionGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
int Selection = 0;
unsigned Characteristics = getCOFFSectionFlags(Kind);
- SmallString<128> Name(GV->getSection().c_str());
- if (GV->isWeakForLinker()) {
- Selection = COFF::IMAGE_COMDAT_SELECT_ANY;
- Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
- Name.append("$");
- Mang->getNameWithPrefix(Name, GV, false, false);
+ StringRef Name = GV->getSection();
+ StringRef COMDATSymName = "";
+ if ((GV->isWeakForLinker() || GV->hasComdat()) && !Kind.isCommon()) {
+ Selection = getSelectionForCOFF(GV);
+ const GlobalValue *ComdatGV;
+ if (Selection == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE)
+ ComdatGV = getComdatGVForCOFF(GV);
+ else
+ ComdatGV = GV;
+
+ if (!ComdatGV->hasPrivateLinkage()) {
+ MCSymbol *Sym = TM.getSymbol(ComdatGV, Mang);
+ COMDATSymName = Sym->getName();
+ Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
+ } else {
+ Selection = 0;
+ }
}
return getContext().getCOFFSection(Name,
Characteristics,
Kind,
- "",
+ COMDATSymName,
Selection);
}
-static const char *getCOFFSectionPrefixForUniqueGlobal(SectionKind Kind) {
+static const char *getCOFFSectionNameForUniqueGlobal(SectionKind Kind) {
if (Kind.isText())
- return ".text$";
- if (Kind.isBSS ())
- return ".bss$";
- if (Kind.isThreadLocal()) {
- // 'LLVM' is just an arbitary string to ensure that the section name gets
- // sorted in between '.tls$AAA' and '.tls$ZZZ' by the linker.
- return ".tls$LLVM";
- }
+ return ".text";
+ if (Kind.isBSS())
+ return ".bss";
+ if (Kind.isThreadLocal())
+ return ".tls$";
if (Kind.isWriteable())
- return ".data$";
- return ".rdata$";
+ return ".data";
+ return ".rdata";
}
const MCSection *TargetLoweringObjectFileCOFF::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+ Mangler &Mang, const TargetMachine &TM) const {
+ // If we have -ffunction-sections then we should emit the global value to a
+ // uniqued section specifically for it.
+ bool EmitUniquedSection;
+ if (Kind.isText())
+ EmitUniquedSection = TM.getFunctionSections();
+ else
+ EmitUniquedSection = TM.getDataSections();
// If this global is linkonce/weak and the target handles this by emitting it
// into a 'uniqued' section name, create and return the section now.
- if (GV->isWeakForLinker()) {
- const char *Prefix = getCOFFSectionPrefixForUniqueGlobal(Kind);
- SmallString<128> Name(Prefix, Prefix+strlen(Prefix));
- Mang->getNameWithPrefix(Name, GV, false, false);
-
+ // Section names depend on the name of the symbol which is not feasible if the
+ // symbol has private linkage.
+ if ((GV->isWeakForLinker() || EmitUniquedSection || GV->hasComdat()) &&
+ !Kind.isCommon()) {
+ const char *Name = getCOFFSectionNameForUniqueGlobal(Kind);
unsigned Characteristics = getCOFFSectionFlags(Kind);
Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
+ int Selection = getSelectionForCOFF(GV);
+ if (!Selection)
+ Selection = COFF::IMAGE_COMDAT_SELECT_NODUPLICATES;
+ const GlobalValue *ComdatGV;
+ if (GV->hasComdat())
+ ComdatGV = getComdatGVForCOFF(GV);
+ else
+ ComdatGV = GV;
- return getContext().getCOFFSection(Name.str(), Characteristics,
- Kind, "", COFF::IMAGE_COMDAT_SELECT_ANY);
+ if (!ComdatGV->hasPrivateLinkage()) {
+ MCSymbol *Sym = TM.getSymbol(ComdatGV, Mang);
+ StringRef COMDATSymName = Sym->getName();
+ return getContext().getCOFFSection(Name, Characteristics, Kind,
+ COMDATSymName, Selection);
+ }
}
if (Kind.isText())
@@ -781,17 +896,28 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
if (Kind.isReadOnly())
return ReadOnlySection;
- if (Kind.isBSS())
+ // Note: we claim that common symbols are put in BSSSection, but they are
+ // really emitted with the magic .comm directive, which creates a symbol table
+ // entry but not a section.
+ if (Kind.isBSS() || Kind.isCommon())
return BSSSection;
return DataSection;
}
+StringRef TargetLoweringObjectFileCOFF::
+getDepLibFromLinkerOpt(StringRef LinkerOption) const {
+ const char *LibCmd = "/DEFAULTLIB:";
+ if (LinkerOption.startswith(LibCmd))
+ return LinkerOption.substr(strlen(LibCmd));
+ return StringRef();
+}
+
void TargetLoweringObjectFileCOFF::
emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- Mangler *Mang, const TargetMachine &TM) const {
- MDNode *LinkerOptions = 0;
+ Mangler &Mang, const TargetMachine &TM) const {
+ MDNode *LinkerOptions = nullptr;
// Look for the "Linker Options" flag, since it's the only one we support.
for (ArrayRef<Module::ModuleFlagEntry>::iterator
@@ -833,3 +959,30 @@ emitModuleFlags(MCStreamer &Streamer,
}
}
}
+
+static const MCSection *getAssociativeCOFFSection(MCContext &Ctx,
+ const MCSection *Sec,
+ const MCSymbol *KeySym) {
+ // Return the normal section if we don't have to be associative.
+ if (!KeySym)
+ return Sec;
+
+ // Make an associative section with the same name and kind as the normal
+ // section.
+ const MCSectionCOFF *SecCOFF = cast<MCSectionCOFF>(Sec);
+ unsigned Characteristics =
+ SecCOFF->getCharacteristics() | COFF::IMAGE_SCN_LNK_COMDAT;
+ return Ctx.getCOFFSection(SecCOFF->getSectionName(), Characteristics,
+ SecCOFF->getKind(), KeySym->getName(),
+ COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE);
+}
+
+const MCSection *TargetLoweringObjectFileCOFF::getStaticCtorSection(
+ unsigned Priority, const MCSymbol *KeySym) const {
+ return getAssociativeCOFFSection(getContext(), StaticCtorSection, KeySym);
+}
+
+const MCSection *TargetLoweringObjectFileCOFF::getStaticDtorSection(
+ unsigned Priority, const MCSymbol *KeySym) const {
+ return getAssociativeCOFFSection(getContext(), StaticDtorSection, KeySym);
+}
diff --git a/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp b/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp
index f7bf86b..3ca2017 100644
--- a/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/Function.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index 5a15243..a3a4fb3 100644
--- a/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -101,7 +101,7 @@ TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
Idx += Offset + 1;
}
}
- return NULL;
+ return nullptr;
}
/// getMinimalPhysRegClass - Returns the Register Class of a physical
@@ -113,7 +113,7 @@ TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
// Pick the most sub register class of the right type that contains
// this physreg.
- const TargetRegisterClass* BestRC = 0;
+ const TargetRegisterClass* BestRC = nullptr;
for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
const TargetRegisterClass* RC = *I;
if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
@@ -130,7 +130,7 @@ TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
static void getAllocatableSetForRC(const MachineFunction &MF,
const TargetRegisterClass *RC, BitVector &R){
assert(RC->isAllocatable() && "invalid for nonallocatable sets");
- ArrayRef<uint16_t> Order = RC->getRawAllocationOrder(MF);
+ ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
for (unsigned i = 0; i != Order.size(); ++i)
R.set(Order[i]);
}
@@ -164,7 +164,7 @@ const TargetRegisterClass *firstCommonClass(const uint32_t *A,
for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
if (unsigned Common = *A++ & *B++)
return TRI->getRegClass(I + countTrailingZeros(Common));
- return 0;
+ return nullptr;
}
const TargetRegisterClass *
@@ -174,7 +174,7 @@ TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
if (A == B)
return A;
if (!A || !B)
- return 0;
+ return nullptr;
// Register classes are ordered topologically, so the largest common
// sub-class it the common sub-class with the smallest ID.
@@ -194,7 +194,7 @@ TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
// The bit mask contains all register classes that are projected into B
// by Idx. Find a class that is also a sub-class of A.
return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
- return 0;
+ return nullptr;
}
const TargetRegisterClass *TargetRegisterInfo::
@@ -215,7 +215,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
// Arrange for RCA to be the larger register so the answer will be found in
// the first iteration. This makes the search linear for the most common
// case.
- const TargetRegisterClass *BestRC = 0;
+ const TargetRegisterClass *BestRC = nullptr;
unsigned *BestPreA = &PreA;
unsigned *BestPreB = &PreB;
if (RCA->getSize() < RCB->getSize()) {
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index b9a6b47..f42d47b 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -27,7 +27,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "twoaddrinstr"
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
@@ -51,6 +50,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "twoaddrinstr"
+
STATISTIC(NumTwoAddressInstrs, "Number of two-address instructions");
STATISTIC(NumCommuted , "Number of instructions commuted to coalesce");
STATISTIC(NumAggrCommuted , "Number of instructions aggressively commuted");
@@ -144,7 +145,7 @@ public:
initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<LiveVariables>();
@@ -156,7 +157,7 @@ public:
}
/// runOnMachineFunction - Pass entry point.
- bool runOnMachineFunction(MachineFunction&);
+ bool runOnMachineFunction(MachineFunction&) override;
};
} // end anonymous namespace
@@ -211,7 +212,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
}
// Find the instruction that kills SavedReg.
- MachineInstr *KillMI = NULL;
+ MachineInstr *KillMI = nullptr;
if (LIS) {
LiveInterval &LI = LIS->getInterval(SavedReg);
assert(LI.end() != LI.begin() &&
@@ -229,7 +230,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
for (MachineRegisterInfo::use_nodbg_iterator
UI = MRI->use_nodbg_begin(SavedReg),
UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
- MachineOperand &UseMO = UI.getOperand();
+ MachineOperand &UseMO = *UI;
if (!UseMO.isKill())
continue;
KillMI = UseMO.getParent();
@@ -250,12 +251,12 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
// FIXME: This can be sped up if there is an easy way to query whether an
// instruction is before or after another instruction. Then we can use
// MachineRegisterInfo def / use instead.
- MachineOperand *KillMO = NULL;
+ MachineOperand *KillMO = nullptr;
MachineBasicBlock::iterator KillPos = KillMI;
++KillPos;
unsigned NumVisited = 0;
- for (MachineBasicBlock::iterator I = llvm::next(OldPos); I != KillPos; ++I) {
+ for (MachineBasicBlock::iterator I = std::next(OldPos); I != KillPos; ++I) {
MachineInstr *OtherMI = I;
// DBG_VALUE cannot be counted against the limit.
if (OtherMI->isDebugValue())
@@ -315,9 +316,7 @@ bool TwoAddressInstructionPass::noUseAfterLastDef(unsigned Reg, unsigned Dist,
unsigned &LastDef) {
LastDef = 0;
unsigned LastUse = Dist;
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Reg),
- E = MRI->reg_end(); I != E; ++I) {
- MachineOperand &MO = I.getOperand();
+ for (MachineOperand &MO : MRI->reg_operands(Reg)) {
MachineInstr *MI = MO.getParent();
if (MI->getParent() != MBB || MI->isDebugValue())
continue;
@@ -417,9 +416,9 @@ static bool isKilled(MachineInstr &MI, unsigned Reg,
MachineRegisterInfo::def_iterator Begin = MRI->def_begin(Reg);
// If there are multiple defs, we can't do a simple analysis, so just
// go with what the kill flag says.
- if (llvm::next(Begin) != MRI->def_end())
+ if (std::next(Begin) != MRI->def_end())
return true;
- DefMI = &*Begin;
+ DefMI = Begin->getParent();
bool IsSrcPhys, IsDstPhys;
unsigned SrcReg, DstReg;
// If the def is something other than a copy, then it isn't going to
@@ -456,10 +455,10 @@ MachineInstr *findOnlyInterestingUse(unsigned Reg, MachineBasicBlock *MBB,
unsigned &DstReg, bool &IsDstPhys) {
if (!MRI->hasOneNonDBGUse(Reg))
// None or more than one use.
- return 0;
- MachineInstr &UseMI = *MRI->use_nodbg_begin(Reg);
+ return nullptr;
+ MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(Reg);
if (UseMI.getParent() != MBB)
- return 0;
+ return nullptr;
unsigned SrcReg;
bool IsSrcPhys;
if (isCopyToReg(UseMI, TII, SrcReg, DstReg, IsSrcPhys, IsDstPhys)) {
@@ -471,7 +470,7 @@ MachineInstr *findOnlyInterestingUse(unsigned Reg, MachineBasicBlock *MBB,
IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
return &UseMI;
}
- return 0;
+ return nullptr;
}
/// getMappedReg - Return the physical register the specified virtual register
@@ -578,7 +577,7 @@ commuteInstruction(MachineBasicBlock::iterator &mi,
DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
MachineInstr *NewMI = TII->commuteInstruction(MI);
- if (NewMI == 0) {
+ if (NewMI == nullptr) {
DEBUG(dbgs() << "2addr: COMMUTING FAILED!\n");
return false;
}
@@ -647,7 +646,7 @@ TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi,
if (!Sunk) {
DistanceMap.insert(std::make_pair(NewMI, Dist));
mi = NewMI;
- nmi = llvm::next(mi);
+ nmi = std::next(mi);
}
// Update source and destination register maps.
@@ -757,7 +756,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
// Must be created from unfolded load. Don't waste time trying this.
return false;
- MachineInstr *KillMI = 0;
+ MachineInstr *KillMI = nullptr;
if (LIS) {
LiveInterval &LI = LIS->getInterval(Reg);
assert(LI.end() != LI.begin() &&
@@ -816,7 +815,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
// Move the copies connected to MI down as well.
MachineBasicBlock::iterator Begin = MI;
- MachineBasicBlock::iterator AfterMI = llvm::next(Begin);
+ MachineBasicBlock::iterator AfterMI = std::next(Begin);
MachineBasicBlock::iterator End = AfterMI;
while (End->isCopy() && Defs.count(End->getOperand(1).getReg())) {
@@ -876,7 +875,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
}
// Move debug info as well.
- while (Begin != MBB->begin() && llvm::prior(Begin)->isDebugValue())
+ while (Begin != MBB->begin() && std::prev(Begin)->isDebugValue())
--Begin;
nmi = End;
@@ -891,7 +890,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
LIS->handleMove(CopyMI);
InsertPos = CopyMI;
}
- End = llvm::next(MachineBasicBlock::iterator(MI));
+ End = std::next(MachineBasicBlock::iterator(MI));
}
// Copies following MI may have been moved as well.
@@ -914,19 +913,17 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
/// instruction too close to the defs of its register dependencies.
bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
MachineInstr *MI) {
- for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
- DE = MRI->def_end(); DI != DE; ++DI) {
- MachineInstr *DefMI = &*DI;
- if (DefMI->getParent() != MBB || DefMI->isCopy() || DefMI->isCopyLike())
+ for (MachineInstr &DefMI : MRI->def_instructions(Reg)) {
+ if (DefMI.getParent() != MBB || DefMI.isCopy() || DefMI.isCopyLike())
continue;
- if (DefMI == MI)
+ if (&DefMI == MI)
return true; // MI is defining something KillMI uses
- DenseMap<MachineInstr*, unsigned>::iterator DDI = DistanceMap.find(DefMI);
+ DenseMap<MachineInstr*, unsigned>::iterator DDI = DistanceMap.find(&DefMI);
if (DDI == DistanceMap.end())
return true; // Below MI
unsigned DefDist = DDI->second;
assert(Dist > DefDist && "Visited def already?");
- if (TII->getInstrLatency(InstrItins, DefMI) > (Dist - DefDist))
+ if (TII->getInstrLatency(InstrItins, &DefMI) > (Dist - DefDist))
return true;
}
return false;
@@ -951,7 +948,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
// Must be created from unfolded load. Don't waste time trying this.
return false;
- MachineInstr *KillMI = 0;
+ MachineInstr *KillMI = nullptr;
if (LIS) {
LiveInterval &LI = LIS->getInterval(Reg);
assert(LI.end() != LI.begin() &&
@@ -1060,15 +1057,15 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
// Move the old kill above MI, don't forget to move debug info as well.
MachineBasicBlock::iterator InsertPos = mi;
- while (InsertPos != MBB->begin() && llvm::prior(InsertPos)->isDebugValue())
+ while (InsertPos != MBB->begin() && std::prev(InsertPos)->isDebugValue())
--InsertPos;
MachineBasicBlock::iterator From = KillMI;
- MachineBasicBlock::iterator To = llvm::next(From);
- while (llvm::prior(From)->isDebugValue())
+ MachineBasicBlock::iterator To = std::next(From);
+ while (std::prev(From)->isDebugValue())
--From;
MBB->splice(InsertPos, MBB, From, To);
- nmi = llvm::prior(InsertPos); // Backtrack so we process the moved instr.
+ nmi = std::prev(InsertPos); // Backtrack so we process the moved instr.
DistanceMap.erase(DI);
// Update live variables
@@ -1317,13 +1314,14 @@ collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
assert(SrcReg && SrcMO.isUse() && "two address instruction invalid");
// Deal with <undef> uses immediately - simply rewrite the src operand.
- if (SrcMO.isUndef()) {
+ if (SrcMO.isUndef() && !DstMO.getSubReg()) {
// Constrain the DstReg register class if required.
if (TargetRegisterInfo::isVirtualRegister(DstReg))
if (const TargetRegisterClass *RC = TII->getRegClass(MCID, SrcIdx,
TRI, *MF))
MRI->constrainRegClass(DstReg, RC);
SrcMO.setReg(DstReg);
+ SrcMO.setSubReg(0);
DEBUG(dbgs() << "\t\trewrite undef:\t" << *MI);
continue;
}
@@ -1349,6 +1347,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
unsigned LastCopiedReg = 0;
SlotIndex LastCopyIdx;
unsigned RegB = 0;
+ unsigned SubRegB = 0;
for (unsigned tpi = 0, tpe = TiedPairs.size(); tpi != tpe; ++tpi) {
unsigned SrcIdx = TiedPairs[tpi].first;
unsigned DstIdx = TiedPairs[tpi].second;
@@ -1359,6 +1358,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
// Grab RegB from the instruction because it may have changed if the
// instruction was commuted.
RegB = MI->getOperand(SrcIdx).getReg();
+ SubRegB = MI->getOperand(SrcIdx).getSubReg();
if (RegA == RegB) {
// The register is tied to multiple destinations (or else we would
@@ -1383,8 +1383,25 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
#endif
// Emit a copy.
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::COPY), RegA).addReg(RegB);
+ MachineInstrBuilder MIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), RegA);
+ // If this operand is folding a truncation, the truncation now moves to the
+ // copy so that the register classes remain valid for the operands.
+ MIB.addReg(RegB, 0, SubRegB);
+ const TargetRegisterClass *RC = MRI->getRegClass(RegB);
+ if (SubRegB) {
+ if (TargetRegisterInfo::isVirtualRegister(RegA)) {
+ assert(TRI->getMatchingSuperRegClass(RC, MRI->getRegClass(RegA),
+ SubRegB) &&
+ "tied subregister must be a truncation");
+ // The superreg class will not be used to constrain the subreg class.
+ RC = nullptr;
+ }
+ else {
+ assert(TRI->getMatchingSuperReg(RegA, SubRegB, MRI->getRegClass(RegB))
+ && "tied subregister must be a truncation");
+ }
+ }
// Update DistanceMap.
MachineBasicBlock::iterator PrevMI = MI;
@@ -1404,7 +1421,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
}
}
- DEBUG(dbgs() << "\t\tprepend:\t" << *PrevMI);
+ DEBUG(dbgs() << "\t\tprepend:\t" << *MIB);
MachineOperand &MO = MI->getOperand(SrcIdx);
assert(MO.isReg() && MO.getReg() == RegB && MO.isUse() &&
@@ -1417,9 +1434,12 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
// Make sure regA is a legal regclass for the SrcIdx operand.
if (TargetRegisterInfo::isVirtualRegister(RegA) &&
TargetRegisterInfo::isVirtualRegister(RegB))
- MRI->constrainRegClass(RegA, MRI->getRegClass(RegB));
-
+ MRI->constrainRegClass(RegA, RC);
MO.setReg(RegA);
+ // The getMatchingSuper asserts guarantee that the register class projected
+ // by SubRegB is compatible with RegA with no subregister. So regardless of
+ // whether the dest oper writes a subreg, the source oper should not.
+ MO.setSubReg(0);
// Propagate SrcRegMap.
SrcRegMap[RegA] = RegB;
@@ -1431,12 +1451,14 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
// Replace other (un-tied) uses of regB with LastCopiedReg.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == RegB && MO.isUse()) {
+ if (MO.isReg() && MO.getReg() == RegB && MO.getSubReg() == SubRegB &&
+ MO.isUse()) {
if (MO.isKill()) {
MO.setIsKill(false);
RemovedKillFlag = true;
}
MO.setReg(LastCopiedReg);
+ MO.setSubReg(0);
}
}
}
@@ -1509,7 +1531,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
Processed.clear();
for (MachineBasicBlock::iterator mi = MBB->begin(), me = MBB->end();
mi != me; ) {
- MachineBasicBlock::iterator nmi = llvm::next(mi);
+ MachineBasicBlock::iterator nmi = std::next(mi);
if (mi->isDebugValue()) {
mi = nmi;
continue;
@@ -1610,7 +1632,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
TargetRegisterInfo::isPhysicalRegister(DstReg) ||
!(MI->getNumOperands() & 1)) {
DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << *MI);
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
SmallVector<unsigned, 4> OrigRegs;
@@ -1664,7 +1686,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
}
MachineBasicBlock::iterator EndMBBI =
- llvm::next(MachineBasicBlock::iterator(MI));
+ std::next(MachineBasicBlock::iterator(MI));
if (!DefEmitted) {
DEBUG(dbgs() << "Turned: " << *MI << " into an IMPLICIT_DEF");
diff --git a/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp b/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp
index f735ef2..2e22082 100644
--- a/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp
+++ b/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp
@@ -23,32 +23,32 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
namespace {
class UnreachableBlockElim : public FunctionPass {
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
public:
static char ID; // Pass identification, replacement for typeid
UnreachableBlockElim() : FunctionPass(ID) {
initializeUnreachableBlockElimPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
}
};
}
@@ -95,8 +95,8 @@ bool UnreachableBlockElim::runOnFunction(Function &F) {
namespace {
class UnreachableMachineBlockElim : public MachineFunctionPass {
- virtual bool runOnMachineFunction(MachineFunction &F);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnMachineFunction(MachineFunction &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
MachineModuleInfo *MMI;
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
index e0aa405..704736f 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -16,10 +16,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/VirtRegMap.h"
#include "LiveDebugVariables.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
@@ -39,6 +39,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "regalloc"
+
STATISTIC(NumSpillSlots, "Number of spill slots allocated");
STATISTIC(NumIdCopies, "Number of identity moves eliminated after rewriting");
@@ -160,6 +162,7 @@ class VirtRegRewriter : public MachineFunctionPass {
SlotIndexes *Indexes;
LiveIntervals *LIS;
VirtRegMap *VRM;
+ SparseSet<unsigned> PhysRegs;
void rewrite();
void addMBBLiveIns();
@@ -167,9 +170,9 @@ public:
static char ID;
VirtRegRewriter() : MachineFunctionPass(ID) {}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- virtual bool runOnMachineFunction(MachineFunction&);
+ bool runOnMachineFunction(MachineFunction&) override;
};
} // end anonymous namespace
@@ -267,6 +270,20 @@ void VirtRegRewriter::rewrite() {
SmallVector<unsigned, 8> SuperKills;
SmallPtrSet<const MachineInstr *, 4> NoReturnInsts;
+ // Here we have a SparseSet to hold which PhysRegs are actually encountered
+ // in the MF we are about to iterate over so that later when we call
+ // setPhysRegUsed, we are only doing it for physRegs that were actually found
+ // in the program and not for all of the possible physRegs for the given
+ // target architecture. If the target has a lot of physRegs, then for a small
+ // program there will be a significant compile time reduction here.
+ PhysRegs.clear();
+ PhysRegs.setUniverse(TRI->getNumRegs());
+
+ // The function with uwtable should guarantee that the stack unwinder
+ // can unwind the stack to the previous frame. Thus, we can't apply the
+ // noreturn optimization if the caller function has uwtable attribute.
+ bool HasUWTable = MF->getFunction()->hasFnAttribute(Attribute::UWTable);
+
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
DEBUG(MBBI->print(dbgs(), Indexes));
@@ -276,9 +293,12 @@ void VirtRegRewriter::rewrite() {
MachineInstr *MI = MII;
++MII;
- // Check if this instruction is a call to a noreturn function.
- // If so, all the definitions set by this instruction can be ignored.
- if (IsExitBB && MI->isCall())
+ // Check if this instruction is a call to a noreturn function. If this
+ // is a call to noreturn function and we don't need the stack unwinding
+ // functionality (i.e. this function does not have uwtable attribute and
+ // the callee function has the nounwind attribute), then we can ignore
+ // the definitions set by this instruction.
+ if (!HasUWTable && IsExitBB && MI->isCall()) {
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
@@ -294,6 +314,7 @@ void VirtRegRewriter::rewrite() {
NoReturnInsts.insert(MI);
break;
}
+ }
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
@@ -303,6 +324,15 @@ void VirtRegRewriter::rewrite() {
if (MO.isRegMask())
MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
+ // If we encounter a VirtReg or PhysReg then get at the PhysReg and add
+ // it to the physreg bitset. Later we use only the PhysRegs that were
+ // actually encountered in the MF to populate the MRI's used physregs.
+ if (MO.isReg() && MO.getReg())
+ PhysRegs.insert(
+ TargetRegisterInfo::isVirtualRegister(MO.getReg()) ?
+ VRM->getPhys(MO.getReg()) :
+ MO.getReg());
+
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
@@ -376,20 +406,21 @@ void VirtRegRewriter::rewrite() {
// Tell MRI about physical registers in use.
if (NoReturnInsts.empty()) {
- for (unsigned Reg = 1, RegE = TRI->getNumRegs(); Reg != RegE; ++Reg)
- if (!MRI->reg_nodbg_empty(Reg))
- MRI->setPhysRegUsed(Reg);
+ for (SparseSet<unsigned>::iterator
+ RegI = PhysRegs.begin(), E = PhysRegs.end(); RegI != E; ++RegI)
+ if (!MRI->reg_nodbg_empty(*RegI))
+ MRI->setPhysRegUsed(*RegI);
} else {
- for (unsigned Reg = 1, RegE = TRI->getNumRegs(); Reg != RegE; ++Reg) {
+ for (SparseSet<unsigned>::iterator
+ I = PhysRegs.begin(), E = PhysRegs.end(); I != E; ++I) {
+ unsigned Reg = *I;
if (MRI->reg_nodbg_empty(Reg))
continue;
// Check if this register has a use that will impact the rest of the
// code. Uses in debug and noreturn instructions do not impact the
// generated code.
- for (MachineRegisterInfo::reg_nodbg_iterator It =
- MRI->reg_nodbg_begin(Reg),
- EndIt = MRI->reg_nodbg_end(); It != EndIt; ++It) {
- if (!NoReturnInsts.count(&(*It))) {
+ for (MachineInstr &It : MRI->reg_nodbg_instructions(Reg)) {
+ if (!NoReturnInsts.count(&It)) {
MRI->setPhysRegUsed(Reg);
break;
}
@@ -397,3 +428,4 @@ void VirtRegRewriter::rewrite() {
}
}
}
+
diff --git a/contrib/llvm/lib/CodeGen/module.modulemap b/contrib/llvm/lib/CodeGen/module.modulemap
new file mode 100644
index 0000000..d4f68bc
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/module.modulemap
@@ -0,0 +1 @@
+module CodeGen { requires cplusplus umbrella "." module * { export * } }
diff --git a/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.cpp b/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.cpp
index f46fd58..c3e570e 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.cpp
@@ -18,7 +18,7 @@ void DWARFAbbreviationDeclaration::clear() {
Code = 0;
Tag = 0;
HasChildren = false;
- Attributes.clear();
+ AttributeSpecs.clear();
}
DWARFAbbreviationDeclaration::DWARFAbbreviationDeclaration() {
@@ -51,7 +51,7 @@ DWARFAbbreviationDeclaration::extract(DataExtractor Data, uint32_t* OffsetPtr) {
}
if (Attr == 0 && Form == 0)
break;
- Attributes.push_back(AttributeSpec(Attr, Form));
+ AttributeSpecs.push_back(AttributeSpec(Attr, Form));
}
if (Tag == 0) {
@@ -69,19 +69,19 @@ void DWARFAbbreviationDeclaration::dump(raw_ostream &OS) const {
else
OS << format("DW_TAG_Unknown_%x", getTag());
OS << "\tDW_CHILDREN_" << (hasChildren() ? "yes" : "no") << '\n';
- for (unsigned i = 0, e = Attributes.size(); i != e; ++i) {
+ for (const AttributeSpec &Spec : AttributeSpecs) {
OS << '\t';
- const char *attrString = AttributeString(Attributes[i].Attr);
+ const char *attrString = AttributeString(Spec.Attr);
if (attrString)
OS << attrString;
else
- OS << format("DW_AT_Unknown_%x", Attributes[i].Attr);
+ OS << format("DW_AT_Unknown_%x", Spec.Attr);
OS << '\t';
- const char *formString = FormEncodingString(Attributes[i].Form);
+ const char *formString = FormEncodingString(Spec.Form);
if (formString)
OS << formString;
else
- OS << format("DW_FORM_Unknown_%x", Attributes[i].Form);
+ OS << format("DW_FORM_Unknown_%x", Spec.Form);
OS << '\n';
}
OS << '\n';
@@ -89,8 +89,8 @@ void DWARFAbbreviationDeclaration::dump(raw_ostream &OS) const {
uint32_t
DWARFAbbreviationDeclaration::findAttributeIndex(uint16_t attr) const {
- for (uint32_t i = 0, e = Attributes.size(); i != e; ++i) {
- if (Attributes[i].Attr == attr)
+ for (uint32_t i = 0, e = AttributeSpecs.size(); i != e; ++i) {
+ if (AttributeSpecs[i].Attr == attr)
return i;
}
return -1U;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.h b/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.h
index e9b072e..b86b9ec 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFAbbreviationDeclaration.h
@@ -27,19 +27,24 @@ class DWARFAbbreviationDeclaration {
uint16_t Attr;
uint16_t Form;
};
- SmallVector<AttributeSpec, 8> Attributes;
+ typedef SmallVector<AttributeSpec, 8> AttributeSpecVector;
+ AttributeSpecVector AttributeSpecs;
public:
DWARFAbbreviationDeclaration();
uint32_t getCode() const { return Code; }
uint32_t getTag() const { return Tag; }
bool hasChildren() const { return HasChildren; }
- uint32_t getNumAttributes() const { return Attributes.size(); }
- uint16_t getAttrByIndex(uint32_t idx) const {
- return idx < Attributes.size() ? Attributes[idx].Attr : 0;
+
+ typedef iterator_range<AttributeSpecVector::const_iterator>
+ attr_iterator_range;
+
+ attr_iterator_range attributes() const {
+ return attr_iterator_range(AttributeSpecs.begin(), AttributeSpecs.end());
}
+
uint16_t getFormByIndex(uint32_t idx) const {
- return idx < Attributes.size() ? Attributes[idx].Form : 0;
+ return idx < AttributeSpecs.size() ? AttributeSpecs[idx].Form : 0;
}
uint32_t findAttributeIndex(uint16_t attr) const;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
index 1c9573b..2ed188e 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFCompileUnit.h
@@ -16,13 +16,13 @@ namespace llvm {
class DWARFCompileUnit : public DWARFUnit {
public:
- DWARFCompileUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef AS,
- StringRef RS, StringRef SS, StringRef SOS, StringRef AOS,
+ DWARFCompileUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef RS,
+ StringRef SS, StringRef SOS, StringRef AOS,
const RelocAddrMap *M, bool LE)
- : DWARFUnit(DA, IS, AS, RS, SS, SOS, AOS, M, LE) {}
+ : DWARFUnit(DA, IS, RS, SS, SOS, AOS, M, LE) {}
void dump(raw_ostream &OS);
// VTable anchor.
- ~DWARFCompileUnit() LLVM_OVERRIDE;
+ ~DWARFCompileUnit() override;
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
index e477190..3961905 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
@@ -8,9 +8,10 @@
//===----------------------------------------------------------------------===//
#include "DWARFContext.h"
+#include "DWARFDebugArangeSet.h"
+
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/Format.h"
@@ -21,40 +22,40 @@ using namespace llvm;
using namespace dwarf;
using namespace object;
-typedef DWARFDebugLine::LineTable DWARFLineTable;
+#define DEBUG_TYPE "dwarf"
-DWARFContext::~DWARFContext() {
- DeleteContainerPointers(CUs);
- DeleteContainerPointers(TUs);
- DeleteContainerPointers(DWOCUs);
-}
+typedef DWARFDebugLine::LineTable DWARFLineTable;
+typedef DILineInfoSpecifier::FileLineInfoKind FileLineInfoKind;
+typedef DILineInfoSpecifier::FunctionNameKind FunctionNameKind;
static void dumpPubSection(raw_ostream &OS, StringRef Name, StringRef Data,
bool LittleEndian, bool GnuStyle) {
OS << "\n." << Name << " contents:\n";
DataExtractor pubNames(Data, LittleEndian, 0);
uint32_t offset = 0;
- OS << "length = " << format("0x%08x", pubNames.getU32(&offset));
- OS << " version = " << format("0x%04x", pubNames.getU16(&offset));
- OS << " unit_offset = " << format("0x%08x", pubNames.getU32(&offset));
- OS << " unit_size = " << format("0x%08x", pubNames.getU32(&offset)) << '\n';
- if (GnuStyle)
- OS << "Offset Linkage Kind Name\n";
- else
- OS << "Offset Name\n";
-
- while (offset < Data.size()) {
- uint32_t dieRef = pubNames.getU32(&offset);
- if (dieRef == 0)
- break;
- OS << format("0x%8.8x ", dieRef);
- if (GnuStyle) {
- PubIndexEntryDescriptor desc(pubNames.getU8(&offset));
- OS << format("%-8s", dwarf::GDBIndexEntryLinkageString(desc.Linkage))
- << ' ' << format("%-8s", dwarf::GDBIndexEntryKindString(desc.Kind))
- << ' ';
+ while (pubNames.isValidOffset(offset)) {
+ OS << "length = " << format("0x%08x", pubNames.getU32(&offset));
+ OS << " version = " << format("0x%04x", pubNames.getU16(&offset));
+ OS << " unit_offset = " << format("0x%08x", pubNames.getU32(&offset));
+ OS << " unit_size = " << format("0x%08x", pubNames.getU32(&offset)) << '\n';
+ if (GnuStyle)
+ OS << "Offset Linkage Kind Name\n";
+ else
+ OS << "Offset Name\n";
+
+ while (offset < Data.size()) {
+ uint32_t dieRef = pubNames.getU32(&offset);
+ if (dieRef == 0)
+ break;
+ OS << format("0x%8.8x ", dieRef);
+ if (GnuStyle) {
+ PubIndexEntryDescriptor desc(pubNames.getU8(&offset));
+ OS << format("%-8s", dwarf::GDBIndexEntryLinkageString(desc.Linkage))
+ << ' ' << format("%-8s", dwarf::GDBIndexEntryKindString(desc.Kind))
+ << ' ';
+ }
+ OS << '\"' << pubNames.getCStr(&offset) << "\"\n";
}
- OS << '\"' << pubNames.getCStr(&offset) << "\"\n";
}
}
@@ -64,16 +65,36 @@ void DWARFContext::dump(raw_ostream &OS, DIDumpType DumpType) {
getDebugAbbrev()->dump(OS);
}
+ if (DumpType == DIDT_All || DumpType == DIDT_AbbrevDwo)
+ if (const DWARFDebugAbbrev *D = getDebugAbbrevDWO()) {
+ OS << "\n.debug_abbrev.dwo contents:\n";
+ D->dump(OS);
+ }
+
if (DumpType == DIDT_All || DumpType == DIDT_Info) {
OS << "\n.debug_info contents:\n";
- for (unsigned i = 0, e = getNumCompileUnits(); i != e; ++i)
- getCompileUnitAtIndex(i)->dump(OS);
+ for (const auto &CU : compile_units())
+ CU->dump(OS);
}
- if (DumpType == DIDT_All || DumpType == DIDT_Types) {
+ if ((DumpType == DIDT_All || DumpType == DIDT_InfoDwo) &&
+ getNumDWOCompileUnits()) {
+ OS << "\n.debug_info.dwo contents:\n";
+ for (const auto &DWOCU : dwo_compile_units())
+ DWOCU->dump(OS);
+ }
+
+ if ((DumpType == DIDT_All || DumpType == DIDT_Types) && getNumTypeUnits()) {
OS << "\n.debug_types contents:\n";
- for (unsigned i = 0, e = getNumTypeUnits(); i != e; ++i)
- getTypeUnitAtIndex(i)->dump(OS);
+ for (const auto &TU : type_units())
+ TU->dump(OS);
+ }
+
+ if ((DumpType == DIDT_All || DumpType == DIDT_TypesDwo) &&
+ getNumDWOTypeUnits()) {
+ OS << "\n.debug_types.dwo contents:\n";
+ for (const auto &DWOTU : dwo_type_units())
+ DWOTU->dump(OS);
}
if (DumpType == DIDT_All || DumpType == DIDT_Loc) {
@@ -81,6 +102,11 @@ void DWARFContext::dump(raw_ostream &OS, DIDumpType DumpType) {
getDebugLoc()->dump(OS);
}
+ if (DumpType == DIDT_All || DumpType == DIDT_LocDwo) {
+ OS << "\n.debug_loc.dwo contents:\n";
+ getDebugLocDWO()->dump(OS);
+ }
+
if (DumpType == DIDT_All || DumpType == DIDT_Frames) {
OS << "\n.debug_frame contents:\n";
getDebugFrame()->dump(OS);
@@ -98,21 +124,33 @@ void DWARFContext::dump(raw_ostream &OS, DIDumpType DumpType) {
uint8_t savedAddressByteSize = 0;
if (DumpType == DIDT_All || DumpType == DIDT_Line) {
OS << "\n.debug_line contents:\n";
- for (unsigned i = 0, e = getNumCompileUnits(); i != e; ++i) {
- DWARFCompileUnit *cu = getCompileUnitAtIndex(i);
- savedAddressByteSize = cu->getAddressByteSize();
+ for (const auto &CU : compile_units()) {
+ savedAddressByteSize = CU->getAddressByteSize();
unsigned stmtOffset =
- cu->getCompileUnitDIE()->getAttributeValueAsSectionOffset(
- cu, DW_AT_stmt_list, -1U);
+ CU->getCompileUnitDIE()->getAttributeValueAsSectionOffset(
+ CU.get(), DW_AT_stmt_list, -1U);
if (stmtOffset != -1U) {
DataExtractor lineData(getLineSection().Data, isLittleEndian(),
savedAddressByteSize);
- DWARFDebugLine::DumpingState state(OS);
- DWARFDebugLine::parseStatementTable(lineData, &getLineSection().Relocs, &stmtOffset, state);
+ DWARFDebugLine::LineTable LineTable;
+ LineTable.parse(lineData, &getLineSection().Relocs, &stmtOffset);
+ LineTable.dump(OS);
}
}
}
+ if (DumpType == DIDT_All || DumpType == DIDT_LineDwo) {
+ OS << "\n.debug_line.dwo contents:\n";
+ unsigned stmtOffset = 0;
+ DataExtractor lineData(getLineDWOSection().Data, isLittleEndian(),
+ savedAddressByteSize);
+ DWARFDebugLine::LineTable LineTable;
+ while (LineTable.Prologue.parse(lineData, &stmtOffset)) {
+ LineTable.dump(OS);
+ LineTable.clear();
+ }
+ }
+
if (DumpType == DIDT_All || DumpType == DIDT_Str) {
OS << "\n.debug_str contents:\n";
DataExtractor strData(getStringSection(), isLittleEndian(), 0);
@@ -124,6 +162,18 @@ void DWARFContext::dump(raw_ostream &OS, DIDumpType DumpType) {
}
}
+ if ((DumpType == DIDT_All || DumpType == DIDT_StrDwo) &&
+ !getStringDWOSection().empty()) {
+ OS << "\n.debug_str.dwo contents:\n";
+ DataExtractor strDWOData(getStringDWOSection(), isLittleEndian(), 0);
+ offset = 0;
+ uint32_t strDWOOffset = 0;
+ while (const char *s = strDWOData.getCStr(&offset)) {
+ OS << format("0x%8.8x: \"%s\"\n", strDWOOffset, s);
+ strDWOOffset = offset;
+ }
+ }
+
if (DumpType == DIDT_All || DumpType == DIDT_Ranges) {
OS << "\n.debug_ranges contents:\n";
// In fact, different compile units may have different address byte
@@ -154,44 +204,18 @@ void DWARFContext::dump(raw_ostream &OS, DIDumpType DumpType) {
dumpPubSection(OS, "debug_gnu_pubtypes", getGnuPubTypesSection(),
isLittleEndian(), true /* GnuStyle */);
- if (DumpType == DIDT_All || DumpType == DIDT_AbbrevDwo) {
- const DWARFDebugAbbrev *D = getDebugAbbrevDWO();
- if (D) {
- OS << "\n.debug_abbrev.dwo contents:\n";
- getDebugAbbrevDWO()->dump(OS);
+ if ((DumpType == DIDT_All || DumpType == DIDT_StrOffsetsDwo) &&
+ !getStringOffsetDWOSection().empty()) {
+ OS << "\n.debug_str_offsets.dwo contents:\n";
+ DataExtractor strOffsetExt(getStringOffsetDWOSection(), isLittleEndian(),
+ 0);
+ offset = 0;
+ uint64_t size = getStringOffsetDWOSection().size();
+ while (offset < size) {
+ OS << format("0x%8.8x: ", offset);
+ OS << format("%8.8x\n", strOffsetExt.getU32(&offset));
}
}
-
- if (DumpType == DIDT_All || DumpType == DIDT_InfoDwo)
- if (getNumDWOCompileUnits()) {
- OS << "\n.debug_info.dwo contents:\n";
- for (unsigned i = 0, e = getNumDWOCompileUnits(); i != e; ++i)
- getDWOCompileUnitAtIndex(i)->dump(OS);
- }
-
- if (DumpType == DIDT_All || DumpType == DIDT_StrDwo)
- if (!getStringDWOSection().empty()) {
- OS << "\n.debug_str.dwo contents:\n";
- DataExtractor strDWOData(getStringDWOSection(), isLittleEndian(), 0);
- offset = 0;
- uint32_t strDWOOffset = 0;
- while (const char *s = strDWOData.getCStr(&offset)) {
- OS << format("0x%8.8x: \"%s\"\n", strDWOOffset, s);
- strDWOOffset = offset;
- }
- }
-
- if (DumpType == DIDT_All || DumpType == DIDT_StrOffsetsDwo)
- if (!getStringOffsetDWOSection().empty()) {
- OS << "\n.debug_str_offsets.dwo contents:\n";
- DataExtractor strOffsetExt(getStringOffsetDWOSection(), isLittleEndian(), 0);
- offset = 0;
- uint64_t size = getStringOffsetDWOSection().size();
- while (offset < size) {
- OS << format("0x%8.8x: ", offset);
- OS << format("%8.8x\n", strOffsetExt.getU32(&offset));
- }
- }
}
const DWARFDebugAbbrev *DWARFContext::getDebugAbbrev() {
@@ -201,7 +225,7 @@ const DWARFDebugAbbrev *DWARFContext::getDebugAbbrev() {
DataExtractor abbrData(getAbbrevSection(), isLittleEndian(), 0);
Abbrev.reset(new DWARFDebugAbbrev());
- Abbrev->parse(abbrData);
+ Abbrev->extract(abbrData);
return Abbrev.get();
}
@@ -211,7 +235,7 @@ const DWARFDebugAbbrev *DWARFContext::getDebugAbbrevDWO() {
DataExtractor abbrData(getAbbrevDWOSection(), isLittleEndian(), 0);
AbbrevDWO.reset(new DWARFDebugAbbrev());
- AbbrevDWO->parse(abbrData);
+ AbbrevDWO->extract(abbrData);
return AbbrevDWO.get();
}
@@ -227,6 +251,16 @@ const DWARFDebugLoc *DWARFContext::getDebugLoc() {
return Loc.get();
}
+const DWARFDebugLocDWO *DWARFContext::getDebugLocDWO() {
+ if (LocDWO)
+ return LocDWO.get();
+
+ DataExtractor LocData(getLocDWOSection().Data, isLittleEndian(), 0);
+ LocDWO.reset(new DWARFDebugLocDWO());
+ LocDWO->parse(LocData);
+ return LocDWO.get();
+}
+
const DWARFDebugAranges *DWARFContext::getDebugAranges() {
if (Aranges)
return Aranges.get();
@@ -265,7 +299,7 @@ DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) {
cu->getCompileUnitDIE()->getAttributeValueAsSectionOffset(
cu, DW_AT_stmt_list, -1U);
if (stmtOffset == -1U)
- return 0; // No line table for this compile unit.
+ return nullptr; // No line table for this compile unit.
// See if the line table is cached.
if (const DWARFLineTable *lt = Line->getLineTable(stmtOffset))
@@ -278,87 +312,110 @@ DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) {
}
void DWARFContext::parseCompileUnits() {
+ if (!CUs.empty())
+ return;
uint32_t offset = 0;
const DataExtractor &DIData = DataExtractor(getInfoSection().Data,
isLittleEndian(), 0);
while (DIData.isValidOffset(offset)) {
- OwningPtr<DWARFCompileUnit> CU(new DWARFCompileUnit(
- getDebugAbbrev(), getInfoSection().Data, getAbbrevSection(),
- getRangeSection(), getStringSection(), StringRef(), getAddrSection(),
+ std::unique_ptr<DWARFCompileUnit> CU(new DWARFCompileUnit(
+ getDebugAbbrev(), getInfoSection().Data, getRangeSection(),
+ getStringSection(), StringRef(), getAddrSection(),
&getInfoSection().Relocs, isLittleEndian()));
if (!CU->extract(DIData, &offset)) {
break;
}
- CUs.push_back(CU.take());
+ CUs.push_back(std::move(CU));
offset = CUs.back()->getNextUnitOffset();
}
}
void DWARFContext::parseTypeUnits() {
- const std::map<object::SectionRef, Section> &Sections = getTypesSections();
- for (std::map<object::SectionRef, Section>::const_iterator
- I = Sections.begin(),
- E = Sections.end();
- I != E; ++I) {
+ if (!TUs.empty())
+ return;
+ for (const auto &I : getTypesSections()) {
uint32_t offset = 0;
const DataExtractor &DIData =
- DataExtractor(I->second.Data, isLittleEndian(), 0);
+ DataExtractor(I.second.Data, isLittleEndian(), 0);
while (DIData.isValidOffset(offset)) {
- OwningPtr<DWARFTypeUnit> TU(new DWARFTypeUnit(
- getDebugAbbrev(), I->second.Data, getAbbrevSection(),
- getRangeSection(), getStringSection(), StringRef(), getAddrSection(),
- &I->second.Relocs, isLittleEndian()));
+ std::unique_ptr<DWARFTypeUnit> TU(
+ new DWARFTypeUnit(getDebugAbbrev(), I.second.Data, getRangeSection(),
+ getStringSection(), StringRef(), getAddrSection(),
+ &I.second.Relocs, isLittleEndian()));
if (!TU->extract(DIData, &offset))
break;
- TUs.push_back(TU.take());
+ TUs.push_back(std::move(TU));
offset = TUs.back()->getNextUnitOffset();
}
}
}
void DWARFContext::parseDWOCompileUnits() {
+ if (!DWOCUs.empty())
+ return;
uint32_t offset = 0;
const DataExtractor &DIData =
DataExtractor(getInfoDWOSection().Data, isLittleEndian(), 0);
while (DIData.isValidOffset(offset)) {
- OwningPtr<DWARFCompileUnit> DWOCU(new DWARFCompileUnit(
- getDebugAbbrevDWO(), getInfoDWOSection().Data, getAbbrevDWOSection(),
- getRangeDWOSection(), getStringDWOSection(),
- getStringOffsetDWOSection(), getAddrSection(),
+ std::unique_ptr<DWARFCompileUnit> DWOCU(new DWARFCompileUnit(
+ getDebugAbbrevDWO(), getInfoDWOSection().Data, getRangeDWOSection(),
+ getStringDWOSection(), getStringOffsetDWOSection(), getAddrSection(),
&getInfoDWOSection().Relocs, isLittleEndian()));
if (!DWOCU->extract(DIData, &offset)) {
break;
}
- DWOCUs.push_back(DWOCU.take());
+ DWOCUs.push_back(std::move(DWOCU));
offset = DWOCUs.back()->getNextUnitOffset();
}
}
+void DWARFContext::parseDWOTypeUnits() {
+ if (!DWOTUs.empty())
+ return;
+ for (const auto &I : getTypesDWOSections()) {
+ uint32_t offset = 0;
+ const DataExtractor &DIData =
+ DataExtractor(I.second.Data, isLittleEndian(), 0);
+ while (DIData.isValidOffset(offset)) {
+ std::unique_ptr<DWARFTypeUnit> TU(new DWARFTypeUnit(
+ getDebugAbbrevDWO(), I.second.Data, getRangeDWOSection(),
+ getStringDWOSection(), getStringOffsetDWOSection(), getAddrSection(),
+ &I.second.Relocs, isLittleEndian()));
+ if (!TU->extract(DIData, &offset))
+ break;
+ DWOTUs.push_back(std::move(TU));
+ offset = DWOTUs.back()->getNextUnitOffset();
+ }
+ }
+}
+
namespace {
struct OffsetComparator {
- bool operator()(const DWARFCompileUnit *LHS,
- const DWARFCompileUnit *RHS) const {
+
+ bool operator()(const std::unique_ptr<DWARFCompileUnit> &LHS,
+ const std::unique_ptr<DWARFCompileUnit> &RHS) const {
return LHS->getOffset() < RHS->getOffset();
}
- bool operator()(const DWARFCompileUnit *LHS, uint32_t RHS) const {
+ bool operator()(const std::unique_ptr<DWARFCompileUnit> &LHS,
+ uint32_t RHS) const {
return LHS->getOffset() < RHS;
}
- bool operator()(uint32_t LHS, const DWARFCompileUnit *RHS) const {
+ bool operator()(uint32_t LHS,
+ const std::unique_ptr<DWARFCompileUnit> &RHS) const {
return LHS < RHS->getOffset();
}
};
}
DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t Offset) {
- if (CUs.empty())
- parseCompileUnits();
+ parseCompileUnits();
- DWARFCompileUnit **CU =
+ std::unique_ptr<DWARFCompileUnit> *CU =
std::lower_bound(CUs.begin(), CUs.end(), Offset, OffsetComparator());
if (CU != CUs.end()) {
- return *CU;
+ return CU->get();
}
- return 0;
+ return nullptr;
}
DWARFCompileUnit *DWARFContext::getCompileUnitForAddress(uint64_t Address) {
@@ -370,15 +427,13 @@ DWARFCompileUnit *DWARFContext::getCompileUnitForAddress(uint64_t Address) {
static bool getFileNameForCompileUnit(DWARFCompileUnit *CU,
const DWARFLineTable *LineTable,
- uint64_t FileIndex,
- bool NeedsAbsoluteFilePath,
+ uint64_t FileIndex, FileLineInfoKind Kind,
std::string &FileName) {
- if (CU == 0 ||
- LineTable == 0 ||
- !LineTable->getFileNameByIndex(FileIndex, NeedsAbsoluteFilePath,
- FileName))
+ if (!CU || !LineTable || Kind == FileLineInfoKind::None ||
+ !LineTable->getFileNameByIndex(FileIndex, Kind, FileName))
return false;
- if (NeedsAbsoluteFilePath && sys::path::is_relative(FileName)) {
+ if (Kind == FileLineInfoKind::AbsoluteFilePath &&
+ sys::path::is_relative(FileName)) {
// We may still need to append compilation directory of compile unit.
SmallString<16> AbsolutePath;
if (const char *CompilationDir = CU->getCompilationDir()) {
@@ -393,10 +448,9 @@ static bool getFileNameForCompileUnit(DWARFCompileUnit *CU,
static bool getFileLineInfoForCompileUnit(DWARFCompileUnit *CU,
const DWARFLineTable *LineTable,
uint64_t Address,
- bool NeedsAbsoluteFilePath,
- std::string &FileName,
- uint32_t &Line, uint32_t &Column) {
- if (CU == 0 || LineTable == 0)
+ FileLineInfoKind Kind,
+ DILineInfo &Result) {
+ if (!CU || !LineTable)
return false;
// Get the index of row we're looking for in the line table.
uint32_t RowIndex = LineTable->lookupAddress(Address);
@@ -404,144 +458,141 @@ static bool getFileLineInfoForCompileUnit(DWARFCompileUnit *CU,
return false;
// Take file number and line/column from the row.
const DWARFDebugLine::Row &Row = LineTable->Rows[RowIndex];
- if (!getFileNameForCompileUnit(CU, LineTable, Row.File,
- NeedsAbsoluteFilePath, FileName))
+ if (!getFileNameForCompileUnit(CU, LineTable, Row.File, Kind,
+ Result.FileName))
return false;
- Line = Row.Line;
- Column = Row.Column;
+ Result.Line = Row.Line;
+ Result.Column = Row.Column;
return true;
}
+static bool getFunctionNameForAddress(DWARFCompileUnit *CU, uint64_t Address,
+ FunctionNameKind Kind,
+ std::string &FunctionName) {
+ if (Kind == FunctionNameKind::None)
+ return false;
+ // The address may correspond to instruction in some inlined function,
+ // so we have to build the chain of inlined functions and take the
+ // name of the topmost function in it.
+ const DWARFDebugInfoEntryInlinedChain &InlinedChain =
+ CU->getInlinedChainForAddress(Address);
+ if (InlinedChain.DIEs.size() == 0)
+ return false;
+ const DWARFDebugInfoEntryMinimal &TopFunctionDIE = InlinedChain.DIEs[0];
+ if (const char *Name =
+ TopFunctionDIE.getSubroutineName(InlinedChain.U, Kind)) {
+ FunctionName = Name;
+ return true;
+ }
+ return false;
+}
+
DILineInfo DWARFContext::getLineInfoForAddress(uint64_t Address,
- DILineInfoSpecifier Specifier) {
+ DILineInfoSpecifier Spec) {
+ DILineInfo Result;
+
DWARFCompileUnit *CU = getCompileUnitForAddress(Address);
if (!CU)
- return DILineInfo();
- std::string FileName = "<invalid>";
- std::string FunctionName = "<invalid>";
- uint32_t Line = 0;
- uint32_t Column = 0;
- if (Specifier.needs(DILineInfoSpecifier::FunctionName)) {
- // The address may correspond to instruction in some inlined function,
- // so we have to build the chain of inlined functions and take the
- // name of the topmost function in it.
- const DWARFDebugInfoEntryInlinedChain &InlinedChain =
- CU->getInlinedChainForAddress(Address);
- if (InlinedChain.DIEs.size() > 0) {
- const DWARFDebugInfoEntryMinimal &TopFunctionDIE = InlinedChain.DIEs[0];
- if (const char *Name = TopFunctionDIE.getSubroutineName(InlinedChain.U))
- FunctionName = Name;
- }
- }
- if (Specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
+ return Result;
+ getFunctionNameForAddress(CU, Address, Spec.FNKind, Result.FunctionName);
+ if (Spec.FLIKind != FileLineInfoKind::None) {
const DWARFLineTable *LineTable = getLineTableForCompileUnit(CU);
- const bool NeedsAbsoluteFilePath =
- Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath);
- getFileLineInfoForCompileUnit(CU, LineTable, Address,
- NeedsAbsoluteFilePath,
- FileName, Line, Column);
+ getFileLineInfoForCompileUnit(CU, LineTable, Address, Spec.FLIKind, Result);
}
- return DILineInfo(StringRef(FileName), StringRef(FunctionName),
- Line, Column);
+ return Result;
}
-DILineInfoTable DWARFContext::getLineInfoForAddressRange(uint64_t Address,
- uint64_t Size,
- DILineInfoSpecifier Specifier) {
+DILineInfoTable
+DWARFContext::getLineInfoForAddressRange(uint64_t Address, uint64_t Size,
+ DILineInfoSpecifier Spec) {
DILineInfoTable Lines;
DWARFCompileUnit *CU = getCompileUnitForAddress(Address);
if (!CU)
return Lines;
std::string FunctionName = "<invalid>";
- if (Specifier.needs(DILineInfoSpecifier::FunctionName)) {
- // The address may correspond to instruction in some inlined function,
- // so we have to build the chain of inlined functions and take the
- // name of the topmost function in it.
- const DWARFDebugInfoEntryInlinedChain &InlinedChain =
- CU->getInlinedChainForAddress(Address);
- if (InlinedChain.DIEs.size() > 0) {
- const DWARFDebugInfoEntryMinimal &TopFunctionDIE = InlinedChain.DIEs[0];
- if (const char *Name = TopFunctionDIE.getSubroutineName(InlinedChain.U))
- FunctionName = Name;
- }
- }
+ getFunctionNameForAddress(CU, Address, Spec.FNKind, FunctionName);
// If the Specifier says we don't need FileLineInfo, just
// return the top-most function at the starting address.
- if (!Specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
- Lines.push_back(
- std::make_pair(Address, DILineInfo("<invalid>", FunctionName, 0, 0)));
+ if (Spec.FLIKind == FileLineInfoKind::None) {
+ DILineInfo Result;
+ Result.FunctionName = FunctionName;
+ Lines.push_back(std::make_pair(Address, Result));
return Lines;
}
const DWARFLineTable *LineTable = getLineTableForCompileUnit(CU);
- const bool NeedsAbsoluteFilePath =
- Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath);
// Get the index of row we're looking for in the line table.
std::vector<uint32_t> RowVector;
if (!LineTable->lookupAddressRange(Address, Size, RowVector))
return Lines;
- uint32_t NumRows = RowVector.size();
- for (uint32_t i = 0; i < NumRows; ++i) {
- uint32_t RowIndex = RowVector[i];
+ for (uint32_t RowIndex : RowVector) {
// Take file number and line/column from the row.
const DWARFDebugLine::Row &Row = LineTable->Rows[RowIndex];
- std::string FileName = "<invalid>";
- getFileNameForCompileUnit(CU, LineTable, Row.File,
- NeedsAbsoluteFilePath, FileName);
- Lines.push_back(std::make_pair(
- Row.Address, DILineInfo(FileName, FunctionName, Row.Line, Row.Column)));
+ DILineInfo Result;
+ getFileNameForCompileUnit(CU, LineTable, Row.File, Spec.FLIKind,
+ Result.FileName);
+ Result.FunctionName = FunctionName;
+ Result.Line = Row.Line;
+ Result.Column = Row.Column;
+ Lines.push_back(std::make_pair(Row.Address, Result));
}
return Lines;
}
-DIInliningInfo DWARFContext::getInliningInfoForAddress(uint64_t Address,
- DILineInfoSpecifier Specifier) {
+DIInliningInfo
+DWARFContext::getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Spec) {
+ DIInliningInfo InliningInfo;
+
DWARFCompileUnit *CU = getCompileUnitForAddress(Address);
if (!CU)
- return DIInliningInfo();
+ return InliningInfo;
+ const DWARFLineTable *LineTable = nullptr;
const DWARFDebugInfoEntryInlinedChain &InlinedChain =
CU->getInlinedChainForAddress(Address);
- if (InlinedChain.DIEs.size() == 0)
- return DIInliningInfo();
+ if (InlinedChain.DIEs.size() == 0) {
+ // If there is no DIE for address (e.g. it is in unavailable .dwo file),
+ // try to at least get file/line info from symbol table.
+ if (Spec.FLIKind != FileLineInfoKind::None) {
+ DILineInfo Frame;
+ LineTable = getLineTableForCompileUnit(CU);
+ if (getFileLineInfoForCompileUnit(CU, LineTable, Address, Spec.FLIKind,
+ Frame)) {
+ InliningInfo.addFrame(Frame);
+ }
+ }
+ return InliningInfo;
+ }
- DIInliningInfo InliningInfo;
uint32_t CallFile = 0, CallLine = 0, CallColumn = 0;
- const DWARFLineTable *LineTable = 0;
for (uint32_t i = 0, n = InlinedChain.DIEs.size(); i != n; i++) {
const DWARFDebugInfoEntryMinimal &FunctionDIE = InlinedChain.DIEs[i];
- std::string FileName = "<invalid>";
- std::string FunctionName = "<invalid>";
- uint32_t Line = 0;
- uint32_t Column = 0;
+ DILineInfo Frame;
// Get function name if necessary.
- if (Specifier.needs(DILineInfoSpecifier::FunctionName)) {
- if (const char *Name = FunctionDIE.getSubroutineName(InlinedChain.U))
- FunctionName = Name;
- }
- if (Specifier.needs(DILineInfoSpecifier::FileLineInfo)) {
- const bool NeedsAbsoluteFilePath =
- Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath);
+ if (const char *Name =
+ FunctionDIE.getSubroutineName(InlinedChain.U, Spec.FNKind))
+ Frame.FunctionName = Name;
+ if (Spec.FLIKind != FileLineInfoKind::None) {
if (i == 0) {
// For the topmost frame, initialize the line table of this
// compile unit and fetch file/line info from it.
LineTable = getLineTableForCompileUnit(CU);
// For the topmost routine, get file/line info from line table.
- getFileLineInfoForCompileUnit(CU, LineTable, Address,
- NeedsAbsoluteFilePath,
- FileName, Line, Column);
+ getFileLineInfoForCompileUnit(CU, LineTable, Address, Spec.FLIKind,
+ Frame);
} else {
// Otherwise, use call file, call line and call column from
// previous DIE in inlined chain.
- getFileNameForCompileUnit(CU, LineTable, CallFile,
- NeedsAbsoluteFilePath, FileName);
- Line = CallLine;
- Column = CallColumn;
+ getFileNameForCompileUnit(CU, LineTable, CallFile, Spec.FLIKind,
+ Frame.FileName);
+ Frame.Line = CallLine;
+ Frame.Column = CallColumn;
}
// Get call file/line/column of a current DIE.
if (i + 1 < n) {
@@ -549,8 +600,6 @@ DIInliningInfo DWARFContext::getInliningInfoForAddress(uint64_t Address,
CallColumn);
}
}
- DILineInfo Frame(StringRef(FileName), StringRef(FunctionName),
- Line, Column);
InliningInfo.addFrame(Frame);
}
return InliningInfo;
@@ -572,17 +621,14 @@ static bool consumeCompressedDebugSectionHeader(StringRef &data,
return true;
}
-DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) :
- IsLittleEndian(Obj->isLittleEndian()),
- AddressSize(Obj->getBytesInAddress()) {
- error_code ec;
- for (object::section_iterator i = Obj->begin_sections(),
- e = Obj->end_sections();
- i != e; i.increment(ec)) {
+DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj)
+ : IsLittleEndian(Obj->isLittleEndian()),
+ AddressSize(Obj->getBytesInAddress()) {
+ for (const SectionRef &Section : Obj->sections()) {
StringRef name;
- i->getName(name);
+ Section.getName(name);
StringRef data;
- i->getContents(data);
+ Section.getContents(data);
name = name.substr(name.find_first_not_of("._")); // Skip . and _ prefixes.
@@ -592,17 +638,18 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) :
if (!zlib::isAvailable() ||
!consumeCompressedDebugSectionHeader(data, OriginalSize))
continue;
- OwningPtr<MemoryBuffer> UncompressedSection;
- if (zlib::uncompress(data, UncompressedSection, OriginalSize) !=
- zlib::StatusOK)
+ UncompressedSections.resize(UncompressedSections.size() + 1);
+ if (zlib::uncompress(data, UncompressedSections.back(), OriginalSize) !=
+ zlib::StatusOK) {
+ UncompressedSections.pop_back();
continue;
+ }
// Make data point to uncompressed section contents and save its contents.
name = name.substr(1);
- data = UncompressedSection->getBuffer();
- UncompressedSections.push_back(UncompressedSection.take());
+ data = UncompressedSections.back();
}
- StringRef *Section =
+ StringRef *SectionData =
StringSwitch<StringRef *>(name)
.Case("debug_info", &InfoSection.Data)
.Case("debug_abbrev", &AbbrevSection)
@@ -618,13 +665,15 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) :
.Case("debug_gnu_pubtypes", &GnuPubTypesSection)
.Case("debug_info.dwo", &InfoDWOSection.Data)
.Case("debug_abbrev.dwo", &AbbrevDWOSection)
+ .Case("debug_loc.dwo", &LocDWOSection.Data)
+ .Case("debug_line.dwo", &LineDWOSection.Data)
.Case("debug_str.dwo", &StringDWOSection)
.Case("debug_str_offsets.dwo", &StringOffsetDWOSection)
.Case("debug_addr", &AddrSection)
// Any more debug info sections go here.
- .Default(0);
- if (Section) {
- *Section = data;
+ .Default(nullptr);
+ if (SectionData) {
+ *SectionData = data;
if (name == "debug_ranges") {
// FIXME: Use the other dwo range section when we emit it.
RangeDWOSection = data;
@@ -632,11 +681,13 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) :
} else if (name == "debug_types") {
// Find debug_types data by section rather than name as there are
// multiple, comdat grouped, debug_types sections.
- TypesSections[*i].Data = data;
+ TypesSections[Section].Data = data;
+ } else if (name == "debug_types.dwo") {
+ TypesDWOSections[Section].Data = data;
}
- section_iterator RelocatedSection = i->getRelocatedSection();
- if (RelocatedSection == Obj->end_sections())
+ section_iterator RelocatedSection = Section.getRelocatedSection();
+ if (RelocatedSection == Obj->section_end())
continue;
StringRef RelSecName;
@@ -651,38 +702,39 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) :
.Case("debug_loc", &LocSection.Relocs)
.Case("debug_info.dwo", &InfoDWOSection.Relocs)
.Case("debug_line", &LineSection.Relocs)
- .Default(0);
+ .Default(nullptr);
if (!Map) {
- if (RelSecName != "debug_types")
- continue;
// Find debug_types relocs by section rather than name as there are
// multiple, comdat grouped, debug_types sections.
- Map = &TypesSections[*RelocatedSection].Relocs;
+ if (RelSecName == "debug_types")
+ Map = &TypesSections[*RelocatedSection].Relocs;
+ else if (RelSecName == "debug_types.dwo")
+ Map = &TypesDWOSections[*RelocatedSection].Relocs;
+ else
+ continue;
}
- if (i->begin_relocations() != i->end_relocations()) {
+ if (Section.relocation_begin() != Section.relocation_end()) {
uint64_t SectionSize;
RelocatedSection->getSize(SectionSize);
- for (object::relocation_iterator reloc_i = i->begin_relocations(),
- reloc_e = i->end_relocations();
- reloc_i != reloc_e; reloc_i.increment(ec)) {
+ for (const RelocationRef &Reloc : Section.relocations()) {
uint64_t Address;
- reloc_i->getOffset(Address);
+ Reloc.getOffset(Address);
uint64_t Type;
- reloc_i->getType(Type);
+ Reloc.getType(Type);
uint64_t SymAddr = 0;
// ELF relocations may need the symbol address
if (Obj->isELF()) {
- object::symbol_iterator Sym = reloc_i->getSymbol();
+ object::symbol_iterator Sym = Reloc.getSymbol();
Sym->getAddress(SymAddr);
}
object::RelocVisitor V(Obj->getFileFormatName());
// The section address is always 0 for debug sections.
- object::RelocToApply R(V.visit(Type, *reloc_i, 0, SymAddr));
+ object::RelocToApply R(V.visit(Type, Reloc, 0, SymAddr));
if (V.error()) {
SmallString<32> Name;
- error_code ec(reloc_i->getTypeName(Name));
+ std::error_code ec(Reloc.getTypeName(Name));
if (ec) {
errs() << "Aaaaaa! Nameless relocation! Aaaaaa!\n";
}
@@ -712,8 +764,4 @@ DWARFContextInMemory::DWARFContextInMemory(object::ObjectFile *Obj) :
}
}
-DWARFContextInMemory::~DWARFContextInMemory() {
- DeleteContainerPointers(UncompressedSections);
-}
-
void DWARFContextInMemory::anchor() { }
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.h b/contrib/llvm/lib/DebugInfo/DWARFContext.h
index 03863ab..6d1ae92 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.h
@@ -17,7 +17,7 @@
#include "DWARFDebugLoc.h"
#include "DWARFDebugRangeList.h"
#include "DWARFTypeUnit.h"
-#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/DIContext.h"
@@ -28,30 +28,41 @@ namespace llvm {
/// information parsing. The actual data is supplied through pure virtual
/// methods that a concrete implementation provides.
class DWARFContext : public DIContext {
- SmallVector<DWARFCompileUnit *, 1> CUs;
- SmallVector<DWARFTypeUnit *, 1> TUs;
- OwningPtr<DWARFDebugAbbrev> Abbrev;
- OwningPtr<DWARFDebugLoc> Loc;
- OwningPtr<DWARFDebugAranges> Aranges;
- OwningPtr<DWARFDebugLine> Line;
- OwningPtr<DWARFDebugFrame> DebugFrame;
-
- SmallVector<DWARFCompileUnit *, 1> DWOCUs;
- OwningPtr<DWARFDebugAbbrev> AbbrevDWO;
+ typedef SmallVector<std::unique_ptr<DWARFCompileUnit>, 1> CUVector;
+ typedef SmallVector<std::unique_ptr<DWARFTypeUnit>, 1> TUVector;
+
+ CUVector CUs;
+ TUVector TUs;
+ std::unique_ptr<DWARFDebugAbbrev> Abbrev;
+ std::unique_ptr<DWARFDebugLoc> Loc;
+ std::unique_ptr<DWARFDebugAranges> Aranges;
+ std::unique_ptr<DWARFDebugLine> Line;
+ std::unique_ptr<DWARFDebugFrame> DebugFrame;
+
+ CUVector DWOCUs;
+ TUVector DWOTUs;
+ std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
+ std::unique_ptr<DWARFDebugLocDWO> LocDWO;
DWARFContext(DWARFContext &) LLVM_DELETED_FUNCTION;
DWARFContext &operator=(DWARFContext &) LLVM_DELETED_FUNCTION;
- /// Read compile units from the debug_info section and store them in CUs.
+ /// Read compile units from the debug_info section (if necessary)
+ /// and store them in CUs.
void parseCompileUnits();
- /// Read type units from the debug_types sections and store them in CUs.
+ /// Read type units from the debug_types sections (if necessary)
+ /// and store them in TUs.
void parseTypeUnits();
- /// Read compile units from the debug_info.dwo section and store them in
- /// DWOCUs.
+ /// Read compile units from the debug_info.dwo section (if necessary)
+ /// and store them in DWOCUs.
void parseDWOCompileUnits();
+ /// Read type units from the debug_types.dwo section (if necessary)
+ /// and store them in DWOTUs.
+ void parseDWOTypeUnits();
+
public:
struct Section {
StringRef Data;
@@ -59,54 +70,74 @@ public:
};
DWARFContext() : DIContext(CK_DWARF) {}
- virtual ~DWARFContext();
static bool classof(const DIContext *DICtx) {
return DICtx->getKind() == CK_DWARF;
}
- virtual void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All);
+ void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All) override;
+
+ typedef iterator_range<CUVector::iterator> cu_iterator_range;
+ typedef iterator_range<TUVector::iterator> tu_iterator_range;
+
+ /// Get compile units in this context.
+ cu_iterator_range compile_units() {
+ parseCompileUnits();
+ return cu_iterator_range(CUs.begin(), CUs.end());
+ }
+
+ /// Get type units in this context.
+ tu_iterator_range type_units() {
+ parseTypeUnits();
+ return tu_iterator_range(TUs.begin(), TUs.end());
+ }
+
+ /// Get compile units in the DWO context.
+ cu_iterator_range dwo_compile_units() {
+ parseDWOCompileUnits();
+ return cu_iterator_range(DWOCUs.begin(), DWOCUs.end());
+ }
+
+ /// Get type units in the DWO context.
+ tu_iterator_range dwo_type_units() {
+ parseDWOTypeUnits();
+ return tu_iterator_range(DWOTUs.begin(), DWOTUs.end());
+ }
/// Get the number of compile units in this context.
unsigned getNumCompileUnits() {
- if (CUs.empty())
- parseCompileUnits();
+ parseCompileUnits();
return CUs.size();
}
/// Get the number of compile units in this context.
unsigned getNumTypeUnits() {
- if (TUs.empty())
- parseTypeUnits();
+ parseTypeUnits();
return TUs.size();
}
/// Get the number of compile units in the DWO context.
unsigned getNumDWOCompileUnits() {
- if (DWOCUs.empty())
- parseDWOCompileUnits();
+ parseDWOCompileUnits();
return DWOCUs.size();
}
- /// Get the compile unit at the specified index for this compile unit.
- DWARFCompileUnit *getCompileUnitAtIndex(unsigned index) {
- if (CUs.empty())
- parseCompileUnits();
- return CUs[index];
+ /// Get the number of compile units in the DWO context.
+ unsigned getNumDWOTypeUnits() {
+ parseDWOTypeUnits();
+ return DWOTUs.size();
}
- /// Get the type unit at the specified index for this compile unit.
- DWARFTypeUnit *getTypeUnitAtIndex(unsigned index) {
- if (TUs.empty())
- parseTypeUnits();
- return TUs[index];
+ /// Get the compile unit at the specified index for this compile unit.
+ DWARFCompileUnit *getCompileUnitAtIndex(unsigned index) {
+ parseCompileUnits();
+ return CUs[index].get();
}
/// Get the compile unit at the specified index for the DWO compile units.
DWARFCompileUnit *getDWOCompileUnitAtIndex(unsigned index) {
- if (DWOCUs.empty())
- parseDWOCompileUnits();
- return DWOCUs[index];
+ parseDWOCompileUnits();
+ return DWOCUs[index].get();
}
/// Get a pointer to the parsed DebugAbbrev object.
@@ -118,6 +149,9 @@ public:
/// Get a pointer to the parsed dwo abbreviations object.
const DWARFDebugAbbrev *getDebugAbbrevDWO();
+ /// Get a pointer to the parsed DebugLoc object.
+ const DWARFDebugLocDWO *getDebugLocDWO();
+
/// Get a pointer to the parsed DebugAranges object.
const DWARFDebugAranges *getDebugAranges();
@@ -128,22 +162,26 @@ public:
const DWARFDebugLine::LineTable *
getLineTableForCompileUnit(DWARFCompileUnit *cu);
- virtual DILineInfo getLineInfoForAddress(uint64_t Address,
- DILineInfoSpecifier Specifier = DILineInfoSpecifier());
- virtual DILineInfoTable getLineInfoForAddressRange(uint64_t Address,
- uint64_t Size, DILineInfoSpecifier Specifier = DILineInfoSpecifier());
- virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address,
- DILineInfoSpecifier Specifier = DILineInfoSpecifier());
+ DILineInfo getLineInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DILineInfoTable getLineInfoForAddressRange(uint64_t Address, uint64_t Size,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
virtual bool isLittleEndian() const = 0;
virtual uint8_t getAddressSize() const = 0;
virtual const Section &getInfoSection() = 0;
- virtual const std::map<object::SectionRef, Section> &getTypesSections() = 0;
+ typedef MapVector<object::SectionRef, Section,
+ std::map<object::SectionRef, unsigned> > TypeSectionMap;
+ virtual const TypeSectionMap &getTypesSections() = 0;
virtual StringRef getAbbrevSection() = 0;
virtual const Section &getLocSection() = 0;
+ virtual const Section &getLocDWOSection() = 0;
virtual StringRef getARangeSection() = 0;
virtual StringRef getDebugFrameSection() = 0;
virtual const Section &getLineSection() = 0;
+ virtual const Section &getLineDWOSection() = 0;
virtual StringRef getStringSection() = 0;
virtual StringRef getRangeSection() = 0;
virtual StringRef getPubNamesSection() = 0;
@@ -153,6 +191,7 @@ public:
// Sections for DWARF5 split dwarf proposal.
virtual const Section &getInfoDWOSection() = 0;
+ virtual const TypeSectionMap &getTypesDWOSections() = 0;
virtual StringRef getAbbrevDWOSection() = 0;
virtual StringRef getStringDWOSection() = 0;
virtual StringRef getStringOffsetDWOSection() = 0;
@@ -179,12 +218,14 @@ class DWARFContextInMemory : public DWARFContext {
bool IsLittleEndian;
uint8_t AddressSize;
Section InfoSection;
- std::map<object::SectionRef, Section> TypesSections;
+ TypeSectionMap TypesSections;
StringRef AbbrevSection;
Section LocSection;
+ Section LocDWOSection;
StringRef ARangeSection;
StringRef DebugFrameSection;
Section LineSection;
+ Section LineDWOSection;
StringRef StringSection;
StringRef RangeSection;
StringRef PubNamesSection;
@@ -194,44 +235,47 @@ class DWARFContextInMemory : public DWARFContext {
// Sections for DWARF5 split dwarf proposal.
Section InfoDWOSection;
+ TypeSectionMap TypesDWOSections;
StringRef AbbrevDWOSection;
StringRef StringDWOSection;
StringRef StringOffsetDWOSection;
StringRef RangeDWOSection;
StringRef AddrSection;
- SmallVector<MemoryBuffer*, 4> UncompressedSections;
+ SmallVector<SmallString<32>, 4> UncompressedSections;
public:
DWARFContextInMemory(object::ObjectFile *);
- ~DWARFContextInMemory();
- virtual bool isLittleEndian() const { return IsLittleEndian; }
- virtual uint8_t getAddressSize() const { return AddressSize; }
- virtual const Section &getInfoSection() { return InfoSection; }
- virtual const std::map<object::SectionRef, Section> &getTypesSections() {
- return TypesSections;
- }
- virtual StringRef getAbbrevSection() { return AbbrevSection; }
- virtual const Section &getLocSection() { return LocSection; }
- virtual StringRef getARangeSection() { return ARangeSection; }
- virtual StringRef getDebugFrameSection() { return DebugFrameSection; }
- virtual const Section &getLineSection() { return LineSection; }
- virtual StringRef getStringSection() { return StringSection; }
- virtual StringRef getRangeSection() { return RangeSection; }
- virtual StringRef getPubNamesSection() { return PubNamesSection; }
- virtual StringRef getPubTypesSection() { return PubTypesSection; }
- virtual StringRef getGnuPubNamesSection() { return GnuPubNamesSection; }
- virtual StringRef getGnuPubTypesSection() { return GnuPubTypesSection; }
+ bool isLittleEndian() const override { return IsLittleEndian; }
+ uint8_t getAddressSize() const override { return AddressSize; }
+ const Section &getInfoSection() override { return InfoSection; }
+ const TypeSectionMap &getTypesSections() override { return TypesSections; }
+ StringRef getAbbrevSection() override { return AbbrevSection; }
+ const Section &getLocSection() override { return LocSection; }
+ const Section &getLocDWOSection() override { return LocDWOSection; }
+ StringRef getARangeSection() override { return ARangeSection; }
+ StringRef getDebugFrameSection() override { return DebugFrameSection; }
+ const Section &getLineSection() override { return LineSection; }
+ const Section &getLineDWOSection() override { return LineDWOSection; }
+ StringRef getStringSection() override { return StringSection; }
+ StringRef getRangeSection() override { return RangeSection; }
+ StringRef getPubNamesSection() override { return PubNamesSection; }
+ StringRef getPubTypesSection() override { return PubTypesSection; }
+ StringRef getGnuPubNamesSection() override { return GnuPubNamesSection; }
+ StringRef getGnuPubTypesSection() override { return GnuPubTypesSection; }
// Sections for DWARF5 split dwarf proposal.
- virtual const Section &getInfoDWOSection() { return InfoDWOSection; }
- virtual StringRef getAbbrevDWOSection() { return AbbrevDWOSection; }
- virtual StringRef getStringDWOSection() { return StringDWOSection; }
- virtual StringRef getStringOffsetDWOSection() {
+ const Section &getInfoDWOSection() override { return InfoDWOSection; }
+ const TypeSectionMap &getTypesDWOSections() override {
+ return TypesDWOSections;
+ }
+ StringRef getAbbrevDWOSection() override { return AbbrevDWOSection; }
+ StringRef getStringDWOSection() override { return StringDWOSection; }
+ StringRef getStringOffsetDWOSection() override {
return StringOffsetDWOSection;
}
- virtual StringRef getRangeDWOSection() { return RangeDWOSection; }
- virtual StringRef getAddrSection() {
+ StringRef getRangeDWOSection() override { return RangeDWOSection; }
+ StringRef getAddrSection() override {
return AddrSection;
}
};
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp
index 6e6c37e..8426bf9 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp
@@ -12,95 +12,104 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-bool DWARFAbbreviationDeclarationSet::extract(DataExtractor data,
- uint32_t* offset_ptr) {
- const uint32_t beginOffset = *offset_ptr;
- Offset = beginOffset;
+DWARFAbbreviationDeclarationSet::DWARFAbbreviationDeclarationSet() {
clear();
- DWARFAbbreviationDeclaration abbrevDeclaration;
- uint32_t prevAbbrAode = 0;
- while (abbrevDeclaration.extract(data, offset_ptr)) {
- Decls.push_back(abbrevDeclaration);
- if (IdxOffset == 0) {
- IdxOffset = abbrevDeclaration.getCode();
+}
+
+void DWARFAbbreviationDeclarationSet::clear() {
+ Offset = 0;
+ FirstAbbrCode = 0;
+ Decls.clear();
+}
+
+bool DWARFAbbreviationDeclarationSet::extract(DataExtractor Data,
+ uint32_t *OffsetPtr) {
+ clear();
+ const uint32_t BeginOffset = *OffsetPtr;
+ Offset = BeginOffset;
+ DWARFAbbreviationDeclaration AbbrDecl;
+ uint32_t PrevAbbrCode = 0;
+ while (AbbrDecl.extract(Data, OffsetPtr)) {
+ Decls.push_back(AbbrDecl);
+ if (FirstAbbrCode == 0) {
+ FirstAbbrCode = AbbrDecl.getCode();
} else {
- if (prevAbbrAode + 1 != abbrevDeclaration.getCode())
- IdxOffset = UINT32_MAX;// Out of order indexes, we can't do O(1) lookups
+ if (PrevAbbrCode + 1 != AbbrDecl.getCode()) {
+ // Codes are not consecutive, can't do O(1) lookups.
+ FirstAbbrCode = UINT32_MAX;
+ }
}
- prevAbbrAode = abbrevDeclaration.getCode();
+ PrevAbbrCode = AbbrDecl.getCode();
}
- return beginOffset != *offset_ptr;
+ return BeginOffset != *OffsetPtr;
}
void DWARFAbbreviationDeclarationSet::dump(raw_ostream &OS) const {
- for (unsigned i = 0, e = Decls.size(); i != e; ++i)
- Decls[i].dump(OS);
+ for (const auto &Decl : Decls)
+ Decl.dump(OS);
}
-const DWARFAbbreviationDeclaration*
-DWARFAbbreviationDeclarationSet::getAbbreviationDeclaration(uint32_t abbrCode)
- const {
- if (IdxOffset == UINT32_MAX) {
- DWARFAbbreviationDeclarationCollConstIter pos;
- DWARFAbbreviationDeclarationCollConstIter end = Decls.end();
- for (pos = Decls.begin(); pos != end; ++pos) {
- if (pos->getCode() == abbrCode)
- return &(*pos);
+const DWARFAbbreviationDeclaration *
+DWARFAbbreviationDeclarationSet::getAbbreviationDeclaration(
+ uint32_t AbbrCode) const {
+ if (FirstAbbrCode == UINT32_MAX) {
+ for (const auto &Decl : Decls) {
+ if (Decl.getCode() == AbbrCode)
+ return &Decl;
}
- } else {
- uint32_t idx = abbrCode - IdxOffset;
- if (idx < Decls.size())
- return &Decls[idx];
+ return nullptr;
}
- return NULL;
+ if (AbbrCode < FirstAbbrCode || AbbrCode >= FirstAbbrCode + Decls.size())
+ return nullptr;
+ return &Decls[AbbrCode - FirstAbbrCode];
}
-DWARFDebugAbbrev::DWARFDebugAbbrev() :
- AbbrevCollMap(),
- PrevAbbrOffsetPos(AbbrevCollMap.end()) {}
-
+DWARFDebugAbbrev::DWARFDebugAbbrev() {
+ clear();
+}
-void DWARFDebugAbbrev::parse(DataExtractor data) {
- uint32_t offset = 0;
+void DWARFDebugAbbrev::clear() {
+ AbbrDeclSets.clear();
+ PrevAbbrOffsetPos = AbbrDeclSets.end();
+}
- while (data.isValidOffset(offset)) {
- uint32_t initial_cu_offset = offset;
- DWARFAbbreviationDeclarationSet abbrevDeclSet;
+void DWARFDebugAbbrev::extract(DataExtractor Data) {
+ clear();
- if (abbrevDeclSet.extract(data, &offset))
- AbbrevCollMap[initial_cu_offset] = abbrevDeclSet;
- else
+ uint32_t Offset = 0;
+ DWARFAbbreviationDeclarationSet AbbrDecls;
+ while (Data.isValidOffset(Offset)) {
+ uint32_t CUAbbrOffset = Offset;
+ if (!AbbrDecls.extract(Data, &Offset))
break;
+ AbbrDeclSets[CUAbbrOffset] = AbbrDecls;
}
- PrevAbbrOffsetPos = AbbrevCollMap.end();
}
void DWARFDebugAbbrev::dump(raw_ostream &OS) const {
- if (AbbrevCollMap.empty()) {
+ if (AbbrDeclSets.empty()) {
OS << "< EMPTY >\n";
return;
}
- DWARFAbbreviationDeclarationCollMapConstIter pos;
- for (pos = AbbrevCollMap.begin(); pos != AbbrevCollMap.end(); ++pos) {
- OS << format("Abbrev table for offset: 0x%8.8" PRIx64 "\n", pos->first);
- pos->second.dump(OS);
+ for (const auto &I : AbbrDeclSets) {
+ OS << format("Abbrev table for offset: 0x%8.8" PRIx64 "\n", I.first);
+ I.second.dump(OS);
}
}
const DWARFAbbreviationDeclarationSet*
-DWARFDebugAbbrev::getAbbreviationDeclarationSet(uint64_t cu_abbr_offset) const {
- DWARFAbbreviationDeclarationCollMapConstIter end = AbbrevCollMap.end();
- DWARFAbbreviationDeclarationCollMapConstIter pos;
- if (PrevAbbrOffsetPos != end &&
- PrevAbbrOffsetPos->first == cu_abbr_offset) {
+DWARFDebugAbbrev::getAbbreviationDeclarationSet(uint64_t CUAbbrOffset) const {
+ const auto End = AbbrDeclSets.end();
+ if (PrevAbbrOffsetPos != End && PrevAbbrOffsetPos->first == CUAbbrOffset) {
return &(PrevAbbrOffsetPos->second);
- } else {
- pos = AbbrevCollMap.find(cu_abbr_offset);
- PrevAbbrOffsetPos = pos;
}
- if (pos != AbbrevCollMap.end())
- return &(pos->second);
- return NULL;
+ const auto Pos = AbbrDeclSets.find(CUAbbrOffset);
+ if (Pos != End) {
+ PrevAbbrOffsetPos = Pos;
+ return &(Pos->second);
+ }
+
+ return nullptr;
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h
index c7c0436..3a9adba 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h
@@ -17,55 +17,45 @@
namespace llvm {
-typedef std::vector<DWARFAbbreviationDeclaration>
- DWARFAbbreviationDeclarationColl;
-typedef DWARFAbbreviationDeclarationColl::iterator
- DWARFAbbreviationDeclarationCollIter;
-typedef DWARFAbbreviationDeclarationColl::const_iterator
- DWARFAbbreviationDeclarationCollConstIter;
-
class DWARFAbbreviationDeclarationSet {
uint32_t Offset;
- uint32_t IdxOffset;
+ /// Code of the first abbreviation, if all abbreviations in the set have
+ /// consecutive codes. UINT32_MAX otherwise.
+ uint32_t FirstAbbrCode;
std::vector<DWARFAbbreviationDeclaration> Decls;
- public:
- DWARFAbbreviationDeclarationSet()
- : Offset(0), IdxOffset(0) {}
- DWARFAbbreviationDeclarationSet(uint32_t offset, uint32_t idxOffset)
- : Offset(offset), IdxOffset(idxOffset) {}
+public:
+ DWARFAbbreviationDeclarationSet();
- void clear() {
- IdxOffset = 0;
- Decls.clear();
- }
uint32_t getOffset() const { return Offset; }
void dump(raw_ostream &OS) const;
- bool extract(DataExtractor data, uint32_t* offset_ptr);
+ bool extract(DataExtractor Data, uint32_t *OffsetPtr);
const DWARFAbbreviationDeclaration *
- getAbbreviationDeclaration(uint32_t abbrCode) const;
+ getAbbreviationDeclaration(uint32_t AbbrCode) const;
+
+private:
+ void clear();
};
class DWARFDebugAbbrev {
-public:
typedef std::map<uint64_t, DWARFAbbreviationDeclarationSet>
- DWARFAbbreviationDeclarationCollMap;
- typedef DWARFAbbreviationDeclarationCollMap::iterator
- DWARFAbbreviationDeclarationCollMapIter;
- typedef DWARFAbbreviationDeclarationCollMap::const_iterator
- DWARFAbbreviationDeclarationCollMapConstIter;
+ DWARFAbbreviationDeclarationSetMap;
-private:
- DWARFAbbreviationDeclarationCollMap AbbrevCollMap;
- mutable DWARFAbbreviationDeclarationCollMapConstIter PrevAbbrOffsetPos;
+ DWARFAbbreviationDeclarationSetMap AbbrDeclSets;
+ mutable DWARFAbbreviationDeclarationSetMap::const_iterator PrevAbbrOffsetPos;
public:
DWARFDebugAbbrev();
+
const DWARFAbbreviationDeclarationSet *
- getAbbreviationDeclarationSet(uint64_t cu_abbr_offset) const;
+ getAbbreviationDeclarationSet(uint64_t CUAbbrOffset) const;
+
void dump(raw_ostream &OS) const;
- void parse(DataExtractor data);
+ void extract(DataExtractor Data);
+
+private:
+ void clear();
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp
index 229376e..c0a33ce 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp
@@ -67,7 +67,9 @@ DWARFDebugArangeSet::extract(DataExtractor data, uint32_t *offset_ptr) {
Descriptor arangeDescriptor;
- assert(sizeof(arangeDescriptor.Address) == sizeof(arangeDescriptor.Length));
+ static_assert(sizeof(arangeDescriptor.Address) ==
+ sizeof(arangeDescriptor.Length),
+ "Different datatypes for addresses and sizes!");
assert(sizeof(arangeDescriptor.Address) >= HeaderData.AddrSize);
while (data.isValidOffset(*offset_ptr)) {
@@ -94,9 +96,9 @@ void DWARFDebugArangeSet::dump(raw_ostream &OS) const {
HeaderData.CuOffset, HeaderData.AddrSize, HeaderData.SegSize);
const uint32_t hex_width = HeaderData.AddrSize * 2;
- for (DescriptorConstIter pos = ArangeDescriptors.begin(),
- end = ArangeDescriptors.end(); pos != end; ++pos)
- OS << format("[0x%*.*" PRIx64 " -", hex_width, hex_width, pos->Address)
+ for (const auto &Desc : ArangeDescriptors) {
+ OS << format("[0x%*.*" PRIx64 " -", hex_width, hex_width, Desc.Address)
<< format(" 0x%*.*" PRIx64 ")\n",
- hex_width, hex_width, pos->getEndAddress());
+ hex_width, hex_width, Desc.getEndAddress());
+ }
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.h b/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.h
index 49a7132..d6c2d8b 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.h
@@ -10,6 +10,7 @@
#ifndef LLVM_DEBUGINFO_DWARFDEBUGARANGESET_H
#define LLVM_DEBUGINFO_DWARFDEBUGARANGESET_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/DataExtractor.h"
#include <vector>
@@ -44,7 +45,7 @@ public:
private:
typedef std::vector<Descriptor> DescriptorColl;
- typedef DescriptorColl::const_iterator DescriptorConstIter;
+ typedef iterator_range<DescriptorColl::const_iterator> desc_iterator_range;
uint32_t Offset;
Header HeaderData;
@@ -57,11 +58,10 @@ public:
void dump(raw_ostream &OS) const;
uint32_t getCompileUnitDIEOffset() const { return HeaderData.CuOffset; }
- uint32_t getNumDescriptors() const { return ArangeDescriptors.size(); }
- const Descriptor *getDescriptor(uint32_t i) const {
- if (i < ArangeDescriptors.size())
- return &ArangeDescriptors[i];
- return NULL;
+
+ desc_iterator_range descriptors() const {
+ return desc_iterator_range(ArangeDescriptors.begin(),
+ ArangeDescriptors.end());
}
};
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
index 591d4bd..fe7e46d 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
@@ -10,40 +10,28 @@
#include "DWARFDebugAranges.h"
#include "DWARFCompileUnit.h"
#include "DWARFContext.h"
+#include "DWARFDebugArangeSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
+#include <set>
using namespace llvm;
void DWARFDebugAranges::extract(DataExtractor DebugArangesData) {
if (!DebugArangesData.isValidOffset(0))
return;
uint32_t Offset = 0;
- typedef std::vector<DWARFDebugArangeSet> RangeSetColl;
- RangeSetColl Sets;
DWARFDebugArangeSet Set;
- uint32_t TotalRanges = 0;
while (Set.extract(DebugArangesData, &Offset)) {
- Sets.push_back(Set);
- TotalRanges += Set.getNumDescriptors();
- }
- if (TotalRanges == 0)
- return;
-
- Aranges.reserve(TotalRanges);
- for (RangeSetColl::const_iterator I = Sets.begin(), E = Sets.end(); I != E;
- ++I) {
- uint32_t CUOffset = I->getCompileUnitDIEOffset();
-
- for (uint32_t i = 0, n = I->getNumDescriptors(); i < n; ++i) {
- const DWARFDebugArangeSet::Descriptor *ArangeDescPtr =
- I->getDescriptor(i);
- uint64_t LowPC = ArangeDescPtr->Address;
- uint64_t HighPC = LowPC + ArangeDescPtr->Length;
+ uint32_t CUOffset = Set.getCompileUnitDIEOffset();
+ for (const auto &Desc : Set.descriptors()) {
+ uint64_t LowPC = Desc.Address;
+ uint64_t HighPC = Desc.getEndAddress();
appendRange(CUOffset, LowPC, HighPC);
}
+ ParsedCUOffsets.insert(CUOffset);
}
}
@@ -59,77 +47,66 @@ void DWARFDebugAranges::generate(DWARFContext *CTX) {
// Generate aranges from DIEs: even if .debug_aranges section is present,
// it may describe only a small subset of compilation units, so we need to
// manually build aranges for the rest of them.
- for (uint32_t i = 0, n = CTX->getNumCompileUnits(); i < n; ++i) {
- if (DWARFCompileUnit *CU = CTX->getCompileUnitAtIndex(i)) {
- uint32_t CUOffset = CU->getOffset();
- if (ParsedCUOffsets.insert(CUOffset).second)
- CU->buildAddressRangeTable(this, true, CUOffset);
+ for (const auto &CU : CTX->compile_units()) {
+ uint32_t CUOffset = CU->getOffset();
+ if (ParsedCUOffsets.insert(CUOffset).second) {
+ DWARFAddressRangesVector CURanges;
+ CU->collectAddressRanges(CURanges);
+ for (const auto &R : CURanges) {
+ appendRange(CUOffset, R.first, R.second);
+ }
}
}
- sortAndMinimize();
+ construct();
}
-void DWARFDebugAranges::appendRange(uint32_t CUOffset, uint64_t LowPC,
- uint64_t HighPC) {
- if (!Aranges.empty()) {
- if (Aranges.back().CUOffset == CUOffset &&
- Aranges.back().HighPC() == LowPC) {
- Aranges.back().setHighPC(HighPC);
- return;
- }
- }
- Aranges.push_back(Range(LowPC, HighPC, CUOffset));
+void DWARFDebugAranges::clear() {
+ Endpoints.clear();
+ Aranges.clear();
+ ParsedCUOffsets.clear();
}
-void DWARFDebugAranges::sortAndMinimize() {
- const size_t orig_arange_size = Aranges.size();
- // Size of one? If so, no sorting is needed
- if (orig_arange_size <= 1)
- return;
- // Sort our address range entries
- std::stable_sort(Aranges.begin(), Aranges.end());
-
- // Most address ranges are contiguous from function to function
- // so our new ranges will likely be smaller. We calculate the size
- // of the new ranges since although std::vector objects can be resized,
- // the will never reduce their allocated block size and free any excesss
- // memory, so we might as well start a brand new collection so it is as
- // small as possible.
-
- // First calculate the size of the new minimal arange vector
- // so we don't have to do a bunch of re-allocations as we
- // copy the new minimal stuff over to the new collection.
- size_t minimal_size = 1;
- for (size_t i = 1; i < orig_arange_size; ++i) {
- if (!Range::SortedOverlapCheck(Aranges[i-1], Aranges[i]))
- ++minimal_size;
- }
-
- // If the sizes are the same, then no consecutive aranges can be
- // combined, we are done.
- if (minimal_size == orig_arange_size)
+void DWARFDebugAranges::appendRange(uint32_t CUOffset, uint64_t LowPC,
+ uint64_t HighPC) {
+ if (LowPC >= HighPC)
return;
+ Endpoints.emplace_back(LowPC, CUOffset, true);
+ Endpoints.emplace_back(HighPC, CUOffset, false);
+}
- // Else, make a new RangeColl that _only_ contains what we need.
- RangeColl minimal_aranges;
- minimal_aranges.resize(minimal_size);
- uint32_t j = 0;
- minimal_aranges[j] = Aranges[0];
- for (size_t i = 1; i < orig_arange_size; ++i) {
- if (Range::SortedOverlapCheck(minimal_aranges[j], Aranges[i])) {
- minimal_aranges[j].setHighPC(Aranges[i].HighPC());
+void DWARFDebugAranges::construct() {
+ std::multiset<uint32_t> ValidCUs; // Maintain the set of CUs describing
+ // a current address range.
+ std::sort(Endpoints.begin(), Endpoints.end());
+ uint64_t PrevAddress = -1ULL;
+ for (const auto &E : Endpoints) {
+ if (PrevAddress < E.Address && ValidCUs.size() > 0) {
+ // If the address range between two endpoints is described by some
+ // CU, first try to extend the last range in Aranges. If we can't
+ // do it, start a new range.
+ if (!Aranges.empty() && Aranges.back().HighPC() == PrevAddress &&
+ ValidCUs.find(Aranges.back().CUOffset) != ValidCUs.end()) {
+ Aranges.back().setHighPC(E.Address);
+ } else {
+ Aranges.emplace_back(PrevAddress, E.Address, *ValidCUs.begin());
+ }
+ }
+ // Update the set of valid CUs.
+ if (E.IsRangeStart) {
+ ValidCUs.insert(E.CUOffset);
} else {
- // Only increment j if we aren't merging
- minimal_aranges[++j] = Aranges[i];
+ auto CUPos = ValidCUs.find(E.CUOffset);
+ assert(CUPos != ValidCUs.end());
+ ValidCUs.erase(CUPos);
}
+ PrevAddress = E.Address;
}
- assert(j+1 == minimal_size);
+ assert(ValidCUs.empty());
- // Now swap our new minimal aranges into place. The local
- // minimal_aranges will then contian the old big collection
- // which will get freed.
- minimal_aranges.swap(Aranges);
+ // Endpoints are not needed now.
+ std::vector<RangeEndpoint> EmptyEndpoints;
+ EmptyEndpoints.swap(Endpoints);
}
uint32_t DWARFDebugAranges::findAddress(uint64_t Address) const {
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.h b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.h
index 35ad8e5..a9f37fe 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.h
@@ -10,9 +10,9 @@
#ifndef LLVM_DEBUGINFO_DWARFDEBUGARANGES_H
#define LLVM_DEBUGINFO_DWARFDEBUGARANGES_H
-#include "DWARFDebugArangeSet.h"
#include "llvm/ADT/DenseSet.h"
-#include <list>
+#include "llvm/Support/DataExtractor.h"
+#include <vector>
namespace llvm {
@@ -20,21 +20,16 @@ class DWARFContext;
class DWARFDebugAranges {
public:
- void clear() {
- Aranges.clear();
- ParsedCUOffsets.clear();
- }
-
void generate(DWARFContext *CTX);
-
- // Use appendRange multiple times and then call sortAndMinimize.
- void appendRange(uint32_t CUOffset, uint64_t LowPC, uint64_t HighPC);
-
uint32_t findAddress(uint64_t Address) const;
private:
+ void clear();
void extract(DataExtractor DebugArangesData);
- void sortAndMinimize();
+
+ // Call appendRange multiple times and then call construct.
+ void appendRange(uint32_t CUOffset, uint64_t LowPC, uint64_t HighPC);
+ void construct();
struct Range {
explicit Range(uint64_t LowPC = -1ULL, uint64_t HighPC = -1ULL,
@@ -52,31 +47,39 @@ private:
return LowPC + Length;
return -1ULL;
}
+
bool containsAddress(uint64_t Address) const {
return LowPC <= Address && Address < HighPC();
}
-
- bool operator <(const Range &other) const {
+ bool operator<(const Range &other) const {
return LowPC < other.LowPC;
}
- static bool SortedOverlapCheck(const Range &Left, const Range &Right) {
- if (Left.CUOffset != Right.CUOffset)
- return false;
- return Left.HighPC() >= Right.LowPC;
- }
-
uint64_t LowPC; // Start of address range.
uint32_t Length; // End of address range (not including this address).
uint32_t CUOffset; // Offset of the compile unit or die.
};
+ struct RangeEndpoint {
+ uint64_t Address;
+ uint32_t CUOffset;
+ bool IsRangeStart;
+
+ RangeEndpoint(uint64_t Address, uint32_t CUOffset, bool IsRangeStart)
+ : Address(Address), CUOffset(CUOffset), IsRangeStart(IsRangeStart) {}
+
+ bool operator<(const RangeEndpoint &Other) const {
+ return Address < Other.Address;
+ }
+ };
+
+
typedef std::vector<Range> RangeColl;
typedef RangeColl::const_iterator RangeCollIterator;
- typedef DenseSet<uint32_t> ParsedCUOffsetColl;
+ std::vector<RangeEndpoint> Endpoints;
RangeColl Aranges;
- ParsedCUOffsetColl ParsedCUOffsets;
+ DenseSet<uint32_t> ParsedCUOffsets;
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.cpp
index 3efe6a1..a33548e 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.cpp
@@ -10,8 +10,8 @@
#include "DWARFDebugFrame.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <string>
@@ -26,8 +26,8 @@ using namespace dwarf;
class llvm::FrameEntry {
public:
enum FrameKind {FK_CIE, FK_FDE};
- FrameEntry(FrameKind K, DataExtractor D, uint64_t Offset, uint64_t Length)
- : Kind(K), Data(D), Offset(Offset), Length(Length) {}
+ FrameEntry(FrameKind K, uint64_t Offset, uint64_t Length)
+ : Kind(K), Offset(Offset), Length(Length) {}
virtual ~FrameEntry() {
}
@@ -35,11 +35,12 @@ public:
FrameKind getKind() const { return Kind; }
virtual uint64_t getOffset() const { return Offset; }
- /// \brief Parse and store a sequence of CFI instructions from our data
- /// stream, starting at *Offset and ending at EndOffset. If everything
+ /// \brief Parse and store a sequence of CFI instructions from Data,
+ /// starting at *Offset and ending at EndOffset. If everything
/// goes well, *Offset should be equal to EndOffset when this method
/// returns. Otherwise, an error occurred.
- virtual void parseInstructions(uint32_t *Offset, uint32_t EndOffset);
+ virtual void parseInstructions(DataExtractor Data, uint32_t *Offset,
+ uint32_t EndOffset);
/// \brief Dump the entry header to the given output stream.
virtual void dumpHeader(raw_ostream &OS) const = 0;
@@ -50,10 +51,6 @@ public:
protected:
const FrameKind Kind;
- /// \brief The data stream holding the section from which the entry was
- /// parsed.
- DataExtractor Data;
-
/// \brief Offset of this entry in the section.
uint64_t Offset;
@@ -97,8 +94,8 @@ protected:
const uint8_t DWARF_CFI_PRIMARY_OPCODE_MASK = 0xc0;
const uint8_t DWARF_CFI_PRIMARY_OPERAND_MASK = 0x3f;
-
-void FrameEntry::parseInstructions(uint32_t *Offset, uint32_t EndOffset) {
+void FrameEntry::parseInstructions(DataExtractor Data, uint32_t *Offset,
+ uint32_t EndOffset) {
while (*Offset < EndOffset) {
uint8_t Opcode = Data.getU8(Offset);
// Some instructions have a primary opcode encoded in the top bits.
@@ -125,6 +122,7 @@ void FrameEntry::parseInstructions(uint32_t *Offset, uint32_t EndOffset) {
case DW_CFA_nop:
case DW_CFA_remember_state:
case DW_CFA_restore_state:
+ case DW_CFA_GNU_window_save:
// No operands
addInstruction(Opcode);
break;
@@ -185,10 +183,8 @@ void FrameEntry::parseInstructions(uint32_t *Offset, uint32_t EndOffset) {
void FrameEntry::dumpInstructions(raw_ostream &OS) const {
// TODO: at the moment only instruction names are dumped. Expand this to
// dump operands as well.
- for (std::vector<Instruction>::const_iterator I = Instructions.begin(),
- E = Instructions.end();
- I != E; ++I) {
- uint8_t Opcode = I->Opcode;
+ for (const auto &Instr : Instructions) {
+ uint8_t Opcode = Instr.Opcode;
if (Opcode & DWARF_CFI_PRIMARY_OPCODE_MASK)
Opcode &= DWARF_CFI_PRIMARY_OPCODE_MASK;
OS << " " << CallFrameString(Opcode) << ":\n";
@@ -202,18 +198,18 @@ class CIE : public FrameEntry {
public:
// CIEs (and FDEs) are simply container classes, so the only sensible way to
// create them is by providing the full parsed contents in the constructor.
- CIE(DataExtractor D, uint64_t Offset, uint64_t Length, uint8_t Version,
+ CIE(uint64_t Offset, uint64_t Length, uint8_t Version,
SmallString<8> Augmentation, uint64_t CodeAlignmentFactor,
int64_t DataAlignmentFactor, uint64_t ReturnAddressRegister)
- : FrameEntry(FK_CIE, D, Offset, Length), Version(Version),
- Augmentation(Augmentation), CodeAlignmentFactor(CodeAlignmentFactor),
- DataAlignmentFactor(DataAlignmentFactor),
- ReturnAddressRegister(ReturnAddressRegister) {}
+ : FrameEntry(FK_CIE, Offset, Length), Version(Version),
+ Augmentation(Augmentation), CodeAlignmentFactor(CodeAlignmentFactor),
+ DataAlignmentFactor(DataAlignmentFactor),
+ ReturnAddressRegister(ReturnAddressRegister) {}
~CIE() {
}
- void dumpHeader(raw_ostream &OS) const {
+ void dumpHeader(raw_ostream &OS) const override {
OS << format("%08x %08x %08x CIE",
(uint32_t)Offset, (uint32_t)Length, DW_CIE_ID)
<< "\n";
@@ -230,7 +226,7 @@ public:
static bool classof(const FrameEntry *FE) {
return FE->getKind() == FK_CIE;
- }
+ }
private:
/// The following fields are defined in section 6.4.1 of the DWARF standard v3
@@ -248,16 +244,16 @@ public:
// Each FDE has a CIE it's "linked to". Our FDE contains is constructed with
// an offset to the CIE (provided by parsing the FDE header). The CIE itself
// is obtained lazily once it's actually required.
- FDE(DataExtractor D, uint64_t Offset, uint64_t Length,
- int64_t LinkedCIEOffset, uint64_t InitialLocation, uint64_t AddressRange)
- : FrameEntry(FK_FDE, D, Offset, Length), LinkedCIEOffset(LinkedCIEOffset),
- InitialLocation(InitialLocation), AddressRange(AddressRange),
- LinkedCIE(NULL) {}
+ FDE(uint64_t Offset, uint64_t Length, int64_t LinkedCIEOffset,
+ uint64_t InitialLocation, uint64_t AddressRange)
+ : FrameEntry(FK_FDE, Offset, Length), LinkedCIEOffset(LinkedCIEOffset),
+ InitialLocation(InitialLocation), AddressRange(AddressRange),
+ LinkedCIE(nullptr) {}
~FDE() {
}
- void dumpHeader(raw_ostream &OS) const {
+ void dumpHeader(raw_ostream &OS) const override {
OS << format("%08x %08x %08x FDE ",
(uint32_t)Offset, (uint32_t)Length, (int32_t)LinkedCIEOffset);
OS << format("cie=%08x pc=%08x...%08x\n",
@@ -271,9 +267,9 @@ public:
static bool classof(const FrameEntry *FE) {
return FE->getKind() == FK_FDE;
- }
-private:
+ }
+private:
/// The following fields are defined in section 6.4.1 of the DWARF standard v3
uint64_t LinkedCIEOffset;
uint64_t InitialLocation;
@@ -286,15 +282,9 @@ private:
DWARFDebugFrame::DWARFDebugFrame() {
}
-
DWARFDebugFrame::~DWARFDebugFrame() {
- for (EntryVector::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
- delete *I;
- }
}
-
static void LLVM_ATTRIBUTE_UNUSED dumpDataAux(DataExtractor Data,
uint32_t Offset, int Length) {
errs() << "DUMP: ";
@@ -336,7 +326,6 @@ void DWARFDebugFrame::parse(DataExtractor Data) {
Id = Data.getUnsigned(&Offset, IsDWARF64 ? 8 : 4);
bool IsCIE = ((IsDWARF64 && Id == DW64_CIE_ID) || Id == DW_CIE_ID);
- FrameEntry *Entry = 0;
if (IsCIE) {
// Note: this is specifically DWARFv3 CIE header structure. It was
// changed in DWARFv4. We currently don't support reading DWARFv4
@@ -348,30 +337,25 @@ void DWARFDebugFrame::parse(DataExtractor Data) {
int64_t DataAlignmentFactor = Data.getSLEB128(&Offset);
uint64_t ReturnAddressRegister = Data.getULEB128(&Offset);
- Entry = new CIE(Data, StartOffset, Length, Version,
- StringRef(Augmentation), CodeAlignmentFactor,
- DataAlignmentFactor, ReturnAddressRegister);
+ Entries.emplace_back(new CIE(StartOffset, Length, Version,
+ StringRef(Augmentation), CodeAlignmentFactor,
+ DataAlignmentFactor, ReturnAddressRegister));
} else {
// FDE
uint64_t CIEPointer = Id;
uint64_t InitialLocation = Data.getAddress(&Offset);
uint64_t AddressRange = Data.getAddress(&Offset);
- Entry = new FDE(Data, StartOffset, Length, CIEPointer,
- InitialLocation, AddressRange);
+ Entries.emplace_back(new FDE(StartOffset, Length, CIEPointer,
+ InitialLocation, AddressRange));
}
- assert(Entry && "Expected Entry to be populated with CIE or FDE");
- Entry->parseInstructions(&Offset, EndStructureOffset);
+ Entries.back()->parseInstructions(Data, &Offset, EndStructureOffset);
- if (Offset == EndStructureOffset) {
- // Entry instrucitons parsed successfully.
- Entries.push_back(Entry);
- } else {
+ if (Offset != EndStructureOffset) {
std::string Str;
raw_string_ostream OS(Str);
- OS << format("Parsing entry instructions at %lx failed",
- Entry->getOffset());
+ OS << format("Parsing entry instructions at %lx failed", StartOffset);
report_fatal_error(Str);
}
}
@@ -380,9 +364,7 @@ void DWARFDebugFrame::parse(DataExtractor Data) {
void DWARFDebugFrame::dump(raw_ostream &OS) const {
OS << "\n";
- for (EntryVector::const_iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
- FrameEntry *Entry = *I;
+ for (const auto &Entry : Entries) {
Entry->dumpHeader(OS);
Entry->dumpInstructions(OS);
OS << "\n";
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.h b/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.h
index 48b8d63..bd4ef45 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugFrame.h
@@ -12,14 +12,13 @@
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/raw_ostream.h"
+#include <memory>
#include <vector>
-
namespace llvm {
class FrameEntry;
-
/// \brief A parsed .debug_frame section
///
class DWARFDebugFrame {
@@ -35,12 +34,10 @@ public:
void parse(DataExtractor Data);
private:
- typedef std::vector<FrameEntry *> EntryVector;
- EntryVector Entries;
+ std::vector<std::unique_ptr<FrameEntry>> Entries;
};
} // namespace llvm
-#endif
-
+#endif
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
index babfd2e..2e7a54a 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace dwarf;
+typedef DILineInfoSpecifier::FunctionNameKind FunctionNameKind;
void DWARFDebugInfoEntryMinimal::dump(raw_ostream &OS, const DWARFUnit *u,
unsigned recurseDepth,
@@ -40,11 +41,8 @@ void DWARFDebugInfoEntryMinimal::dump(raw_ostream &OS, const DWARFUnit *u,
AbbrevDecl->hasChildren() ? '*' : ' ');
// Dump all data in the DIE for the attributes.
- const uint32_t numAttributes = AbbrevDecl->getNumAttributes();
- for (uint32_t i = 0; i != numAttributes; ++i) {
- uint16_t attr = AbbrevDecl->getAttrByIndex(i);
- uint16_t form = AbbrevDecl->getFormByIndex(i);
- dumpAttribute(OS, u, &offset, attr, form, indent);
+ for (const auto &AttrSpec : AbbrevDecl->attributes()) {
+ dumpAttribute(OS, u, &offset, AttrSpec.Attr, AttrSpec.Form, indent);
}
const DWARFDebugInfoEntryMinimal *child = getFirstChild();
@@ -102,11 +100,11 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFUnit *U,
uint64_t AbbrCode = DebugInfoData.getULEB128(OffsetPtr);
if (0 == AbbrCode) {
// NULL debug tag entry.
- AbbrevDecl = NULL;
+ AbbrevDecl = nullptr;
return true;
}
AbbrevDecl = U->getAbbreviations()->getAbbreviationDeclaration(AbbrCode);
- if (0 == AbbrevDecl) {
+ if (nullptr == AbbrevDecl) {
// Restore the original offset.
*OffsetPtr = Offset;
return false;
@@ -116,8 +114,8 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFUnit *U,
assert(FixedFormSizes.size() > 0);
// Skip all data in the .debug_info for the attributes
- for (uint32_t i = 0, n = AbbrevDecl->getNumAttributes(); i < n; ++i) {
- uint16_t Form = AbbrevDecl->getFormByIndex(i);
+ for (const auto &AttrSpec : AbbrevDecl->attributes()) {
+ uint16_t Form = AttrSpec.Form;
uint8_t FixedFormSize =
(Form < FixedFormSizes.size()) ? FixedFormSizes[Form] : 0;
@@ -212,6 +210,16 @@ uint64_t DWARFDebugInfoEntryMinimal::getAttributeValueAsSectionOffset(
return Result.hasValue() ? Result.getValue() : FailValue;
}
+uint64_t
+DWARFDebugInfoEntryMinimal::getRangesBaseAttribute(const DWARFUnit *U,
+ uint64_t FailValue) const {
+ uint64_t Result =
+ getAttributeValueAsSectionOffset(U, DW_AT_ranges_base, -1ULL);
+ if (Result != -1ULL)
+ return Result;
+ return getAttributeValueAsSectionOffset(U, DW_AT_GNU_ranges_base, FailValue);
+}
+
bool DWARFDebugInfoEntryMinimal::getLowAndHighPC(const DWARFUnit *U,
uint64_t &LowPC,
uint64_t &HighPC) const {
@@ -229,54 +237,66 @@ bool DWARFDebugInfoEntryMinimal::getLowAndHighPC(const DWARFUnit *U,
return (HighPC != -1ULL);
}
-void DWARFDebugInfoEntryMinimal::buildAddressRangeTable(
- const DWARFUnit *U, DWARFDebugAranges *DebugAranges,
- uint32_t UOffsetInAranges) const {
- if (AbbrevDecl) {
- if (isSubprogramDIE()) {
- uint64_t LowPC, HighPC;
- if (getLowAndHighPC(U, LowPC, HighPC))
- DebugAranges->appendRange(UOffsetInAranges, LowPC, HighPC);
- // FIXME: try to append ranges from .debug_ranges section.
- }
-
- const DWARFDebugInfoEntryMinimal *Child = getFirstChild();
- while (Child) {
- Child->buildAddressRangeTable(U, DebugAranges, UOffsetInAranges);
- Child = Child->getSibling();
- }
- }
-}
-
-bool DWARFDebugInfoEntryMinimal::addressRangeContainsAddress(
- const DWARFUnit *U, const uint64_t Address) const {
+DWARFAddressRangesVector
+DWARFDebugInfoEntryMinimal::getAddressRanges(const DWARFUnit *U) const {
if (isNULL())
- return false;
+ return DWARFAddressRangesVector();
+ // Single range specified by low/high PC.
uint64_t LowPC, HighPC;
- if (getLowAndHighPC(U, LowPC, HighPC))
- return (LowPC <= Address && Address <= HighPC);
- // Try to get address ranges from .debug_ranges section.
+ if (getLowAndHighPC(U, LowPC, HighPC)) {
+ return DWARFAddressRangesVector(1, std::make_pair(LowPC, HighPC));
+ }
+ // Multiple ranges from .debug_ranges section.
uint32_t RangesOffset =
getAttributeValueAsSectionOffset(U, DW_AT_ranges, -1U);
if (RangesOffset != -1U) {
DWARFDebugRangeList RangeList;
if (U->extractRangeList(RangesOffset, RangeList))
- return RangeList.containsAddress(U->getBaseAddress(), Address);
+ return RangeList.getAbsoluteRanges(U->getBaseAddress());
+ }
+ return DWARFAddressRangesVector();
+}
+
+void DWARFDebugInfoEntryMinimal::collectChildrenAddressRanges(
+ const DWARFUnit *U, DWARFAddressRangesVector& Ranges) const {
+ if (isNULL())
+ return;
+ if (isSubprogramDIE()) {
+ const auto &DIERanges = getAddressRanges(U);
+ Ranges.insert(Ranges.end(), DIERanges.begin(), DIERanges.end());
+ }
+
+ const DWARFDebugInfoEntryMinimal *Child = getFirstChild();
+ while (Child) {
+ Child->collectChildrenAddressRanges(U, Ranges);
+ Child = Child->getSibling();
+ }
+}
+
+bool DWARFDebugInfoEntryMinimal::addressRangeContainsAddress(
+ const DWARFUnit *U, const uint64_t Address) const {
+ for (const auto& R : getAddressRanges(U)) {
+ if (R.first <= Address && Address < R.second)
+ return true;
}
return false;
}
const char *
-DWARFDebugInfoEntryMinimal::getSubroutineName(const DWARFUnit *U) const {
- if (!isSubroutineDIE())
- return 0;
- // Try to get mangled name if possible.
- if (const char *name =
- getAttributeValueAsString(U, DW_AT_MIPS_linkage_name, 0))
- return name;
- if (const char *name = getAttributeValueAsString(U, DW_AT_linkage_name, 0))
- return name;
- if (const char *name = getAttributeValueAsString(U, DW_AT_name, 0))
+DWARFDebugInfoEntryMinimal::getSubroutineName(const DWARFUnit *U,
+ FunctionNameKind Kind) const {
+ if (!isSubroutineDIE() || Kind == FunctionNameKind::None)
+ return nullptr;
+ // Try to get mangled name only if it was asked for.
+ if (Kind == FunctionNameKind::LinkageName) {
+ if (const char *name =
+ getAttributeValueAsString(U, DW_AT_MIPS_linkage_name, nullptr))
+ return name;
+ if (const char *name =
+ getAttributeValueAsString(U, DW_AT_linkage_name, nullptr))
+ return name;
+ }
+ if (const char *name = getAttributeValueAsString(U, DW_AT_name, nullptr))
return name;
// Try to get name from specification DIE.
uint32_t spec_ref =
@@ -284,7 +304,7 @@ DWARFDebugInfoEntryMinimal::getSubroutineName(const DWARFUnit *U) const {
if (spec_ref != -1U) {
DWARFDebugInfoEntryMinimal spec_die;
if (spec_die.extractFast(U, &spec_ref)) {
- if (const char *name = spec_die.getSubroutineName(U))
+ if (const char *name = spec_die.getSubroutineName(U, Kind))
return name;
}
}
@@ -294,11 +314,11 @@ DWARFDebugInfoEntryMinimal::getSubroutineName(const DWARFUnit *U) const {
if (abs_origin_ref != -1U) {
DWARFDebugInfoEntryMinimal abs_origin_die;
if (abs_origin_die.extractFast(U, &abs_origin_ref)) {
- if (const char *name = abs_origin_die.getSubroutineName(U))
+ if (const char *name = abs_origin_die.getSubroutineName(U, Kind))
return name;
}
}
- return 0;
+ return nullptr;
}
void DWARFDebugInfoEntryMinimal::getCallerFrame(const DWARFUnit *U,
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
index aa61056..cc58eb6 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
@@ -11,7 +11,9 @@
#define LLVM_DEBUGINFO_DWARFDEBUGINFOENTRY_H
#include "DWARFAbbreviationDeclaration.h"
+#include "DWARFDebugRangeList.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/DIContext.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -28,17 +30,13 @@ class DWARFDebugInfoEntryMinimal {
/// Offset within the .debug_info of the start of this entry.
uint32_t Offset;
- /// How many to subtract from "this" to get the parent.
- /// If zero this die has no parent.
- uint32_t ParentIdx;
-
/// How many to add to "this" to get the sibling.
uint32_t SiblingIdx;
const DWARFAbbreviationDeclaration *AbbrevDecl;
public:
DWARFDebugInfoEntryMinimal()
- : Offset(0), ParentIdx(0), SiblingIdx(0), AbbrevDecl(0) {}
+ : Offset(0), SiblingIdx(0), AbbrevDecl(nullptr) {}
void dump(raw_ostream &OS, const DWARFUnit *u, unsigned recurseDepth,
unsigned indent = 0) const;
@@ -51,7 +49,7 @@ public:
bool extractFast(const DWARFUnit *U, uint32_t *OffsetPtr);
uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; }
- bool isNULL() const { return AbbrevDecl == 0; }
+ bool isNULL() const { return AbbrevDecl == nullptr; }
/// Returns true if DIE represents a subprogram (not inlined).
bool isSubprogramDIE() const;
@@ -60,51 +58,26 @@ public:
bool isSubroutineDIE() const;
uint32_t getOffset() const { return Offset; }
- uint32_t getNumAttributes() const {
- return !isNULL() ? AbbrevDecl->getNumAttributes() : 0;
- }
bool hasChildren() const { return !isNULL() && AbbrevDecl->hasChildren(); }
// We know we are kept in a vector of contiguous entries, so we know
- // our parent will be some index behind "this".
- DWARFDebugInfoEntryMinimal *getParent() {
- return ParentIdx > 0 ? this - ParentIdx : 0;
- }
- const DWARFDebugInfoEntryMinimal *getParent() const {
- return ParentIdx > 0 ? this - ParentIdx : 0;
- }
- // We know we are kept in a vector of contiguous entries, so we know
// our sibling will be some index after "this".
- DWARFDebugInfoEntryMinimal *getSibling() {
- return SiblingIdx > 0 ? this + SiblingIdx : 0;
- }
const DWARFDebugInfoEntryMinimal *getSibling() const {
- return SiblingIdx > 0 ? this + SiblingIdx : 0;
+ return SiblingIdx > 0 ? this + SiblingIdx : nullptr;
}
+
// We know we are kept in a vector of contiguous entries, so we know
// we don't need to store our child pointer, if we have a child it will
// be the next entry in the list...
- DWARFDebugInfoEntryMinimal *getFirstChild() {
- return hasChildren() ? this + 1 : 0;
- }
const DWARFDebugInfoEntryMinimal *getFirstChild() const {
- return hasChildren() ? this + 1 : 0;
+ return hasChildren() ? this + 1 : nullptr;
}
- void setParent(DWARFDebugInfoEntryMinimal *parent) {
- if (parent) {
- // We know we are kept in a vector of contiguous entries, so we know
- // our parent will be some index behind "this".
- ParentIdx = this - parent;
- } else
- ParentIdx = 0;
- }
- void setSibling(DWARFDebugInfoEntryMinimal *sibling) {
- if (sibling) {
+ void setSibling(const DWARFDebugInfoEntryMinimal *Sibling) {
+ if (Sibling) {
// We know we are kept in a vector of contiguous entries, so we know
// our sibling will be some index after "this".
- SiblingIdx = sibling - this;
- sibling->setParent(getParent());
+ SiblingIdx = Sibling - this;
} else
SiblingIdx = 0;
}
@@ -133,14 +106,17 @@ public:
const uint16_t Attr,
uint64_t FailValue) const;
+ uint64_t getRangesBaseAttribute(const DWARFUnit *U, uint64_t FailValue) const;
+
/// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
/// Returns true if both attributes are present.
bool getLowAndHighPC(const DWARFUnit *U, uint64_t &LowPC,
uint64_t &HighPC) const;
- void buildAddressRangeTable(const DWARFUnit *U,
- DWARFDebugAranges *DebugAranges,
- uint32_t CUOffsetInAranges) const;
+ DWARFAddressRangesVector getAddressRanges(const DWARFUnit *U) const;
+
+ void collectChildrenAddressRanges(const DWARFUnit *U,
+ DWARFAddressRangesVector &Ranges) const;
bool addressRangeContainsAddress(const DWARFUnit *U,
const uint64_t Address) const;
@@ -149,7 +125,9 @@ public:
/// returns its mangled name (or short name, if mangled is missing).
/// This name may be fetched from specification or abstract origin
/// for this subprogram. Returns null if no name is found.
- const char *getSubroutineName(const DWARFUnit *U) const;
+ const char *
+ getSubroutineName(const DWARFUnit *U,
+ DILineInfoSpecifier::FunctionNameKind Kind) const;
/// Retrieves values of DW_AT_call_file, DW_AT_call_line and
/// DW_AT_call_column from DIE (or zeroes if they are missing).
@@ -169,7 +147,7 @@ public:
/// (except the last DIE) in this chain is contained in address
/// range for next DIE in the chain.
struct DWARFDebugInfoEntryInlinedChain {
- DWARFDebugInfoEntryInlinedChain() : U(0) {}
+ DWARFDebugInfoEntryInlinedChain() : U(nullptr) {}
SmallVector<DWARFDebugInfoEntryMinimal, 4> DIEs;
const DWARFUnit *U;
};
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
index 13d09dd..ce87635 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
@@ -15,17 +15,32 @@
#include <algorithm>
using namespace llvm;
using namespace dwarf;
+typedef DILineInfoSpecifier::FileLineInfoKind FileLineInfoKind;
+
+DWARFDebugLine::Prologue::Prologue() {
+ clear();
+}
+
+void DWARFDebugLine::Prologue::clear() {
+ TotalLength = Version = PrologueLength = 0;
+ MinInstLength = MaxOpsPerInst = DefaultIsStmt = LineBase = LineRange = 0;
+ OpcodeBase = 0;
+ StandardOpcodeLengths.clear();
+ IncludeDirectories.clear();
+ FileNames.clear();
+}
void DWARFDebugLine::Prologue::dump(raw_ostream &OS) const {
OS << "Line table prologue:\n"
- << format(" total_length: 0x%8.8x\n", TotalLength)
- << format(" version: %u\n", Version)
- << format("prologue_length: 0x%8.8x\n", PrologueLength)
- << format("min_inst_length: %u\n", MinInstLength)
- << format("default_is_stmt: %u\n", DefaultIsStmt)
- << format(" line_base: %i\n", LineBase)
- << format(" line_range: %u\n", LineRange)
- << format(" opcode_base: %u\n", OpcodeBase);
+ << format(" total_length: 0x%8.8x\n", TotalLength)
+ << format(" version: %u\n", Version)
+ << format(" prologue_length: 0x%8.8x\n", PrologueLength)
+ << format(" min_inst_length: %u\n", MinInstLength)
+ << format(Version >= 4 ? "max_ops_per_inst: %u\n" : "", MaxOpsPerInst)
+ << format(" default_is_stmt: %u\n", DefaultIsStmt)
+ << format(" line_base: %i\n", LineBase)
+ << format(" line_range: %u\n", LineRange)
+ << format(" opcode_base: %u\n", OpcodeBase);
for (uint32_t i = 0; i < StandardOpcodeLengths.size(); ++i)
OS << format("standard_opcode_lengths[%s] = %u\n", LNStandardString(i+1),
@@ -50,6 +65,67 @@ void DWARFDebugLine::Prologue::dump(raw_ostream &OS) const {
}
}
+bool DWARFDebugLine::Prologue::parse(DataExtractor debug_line_data,
+ uint32_t *offset_ptr) {
+ const uint32_t prologue_offset = *offset_ptr;
+
+ clear();
+ TotalLength = debug_line_data.getU32(offset_ptr);
+ Version = debug_line_data.getU16(offset_ptr);
+ if (Version < 2)
+ return false;
+
+ PrologueLength = debug_line_data.getU32(offset_ptr);
+ const uint32_t end_prologue_offset = PrologueLength + *offset_ptr;
+ MinInstLength = debug_line_data.getU8(offset_ptr);
+ if (Version >= 4)
+ MaxOpsPerInst = debug_line_data.getU8(offset_ptr);
+ DefaultIsStmt = debug_line_data.getU8(offset_ptr);
+ LineBase = debug_line_data.getU8(offset_ptr);
+ LineRange = debug_line_data.getU8(offset_ptr);
+ OpcodeBase = debug_line_data.getU8(offset_ptr);
+
+ StandardOpcodeLengths.reserve(OpcodeBase - 1);
+ for (uint32_t i = 1; i < OpcodeBase; ++i) {
+ uint8_t op_len = debug_line_data.getU8(offset_ptr);
+ StandardOpcodeLengths.push_back(op_len);
+ }
+
+ while (*offset_ptr < end_prologue_offset) {
+ const char *s = debug_line_data.getCStr(offset_ptr);
+ if (s && s[0])
+ IncludeDirectories.push_back(s);
+ else
+ break;
+ }
+
+ while (*offset_ptr < end_prologue_offset) {
+ const char *name = debug_line_data.getCStr(offset_ptr);
+ if (name && name[0]) {
+ FileNameEntry fileEntry;
+ fileEntry.Name = name;
+ fileEntry.DirIdx = debug_line_data.getULEB128(offset_ptr);
+ fileEntry.ModTime = debug_line_data.getULEB128(offset_ptr);
+ fileEntry.Length = debug_line_data.getULEB128(offset_ptr);
+ FileNames.push_back(fileEntry);
+ } else {
+ break;
+ }
+ }
+
+ if (*offset_ptr != end_prologue_offset) {
+ fprintf(stderr, "warning: parsing line table prologue at 0x%8.8x should"
+ " have ended at 0x%8.8x but it ended at 0x%8.8x\n",
+ prologue_offset, end_prologue_offset, *offset_ptr);
+ return false;
+ }
+ return true;
+}
+
+DWARFDebugLine::Row::Row(bool default_is_stmt) {
+ reset(default_is_stmt);
+}
+
void DWARFDebugLine::Row::postAppend() {
BasicBlock = false;
PrologueEnd = false;
@@ -62,6 +138,7 @@ void DWARFDebugLine::Row::reset(bool default_is_stmt) {
Column = 0;
File = 1;
Isa = 0;
+ Discriminator = 0;
IsStmt = default_is_stmt;
BasicBlock = false;
EndSequence = false;
@@ -71,7 +148,7 @@ void DWARFDebugLine::Row::reset(bool default_is_stmt) {
void DWARFDebugLine::Row::dump(raw_ostream &OS) const {
OS << format("0x%16.16" PRIx64 " %6u %6u", Address, Line, Column)
- << format(" %6u %3u ", File, Isa)
+ << format(" %6u %3u %13u ", File, Isa, Discriminator)
<< (IsStmt ? " is_stmt" : "")
<< (BasicBlock ? " basic_block" : "")
<< (PrologueEnd ? " prologue_end" : "")
@@ -80,63 +157,70 @@ void DWARFDebugLine::Row::dump(raw_ostream &OS) const {
<< '\n';
}
+DWARFDebugLine::Sequence::Sequence() {
+ reset();
+}
+
+void DWARFDebugLine::Sequence::reset() {
+ LowPC = 0;
+ HighPC = 0;
+ FirstRowIndex = 0;
+ LastRowIndex = 0;
+ Empty = true;
+}
+
+DWARFDebugLine::LineTable::LineTable() {
+ clear();
+}
+
void DWARFDebugLine::LineTable::dump(raw_ostream &OS) const {
Prologue.dump(OS);
OS << '\n';
if (!Rows.empty()) {
- OS << "Address Line Column File ISA Flags\n"
- << "------------------ ------ ------ ------ --- -------------\n";
- for (std::vector<Row>::const_iterator pos = Rows.begin(),
- end = Rows.end(); pos != end; ++pos)
- pos->dump(OS);
+ OS << "Address Line Column File ISA Discriminator Flags\n"
+ << "------------------ ------ ------ ------ --- ------------- "
+ "-------------\n";
+ for (const Row &R : Rows) {
+ R.dump(OS);
+ }
}
}
-DWARFDebugLine::State::~State() {}
-
-void DWARFDebugLine::State::appendRowToMatrix(uint32_t offset) {
- if (Sequence::Empty) {
- // Record the beginning of instruction sequence.
- Sequence::Empty = false;
- Sequence::LowPC = Address;
- Sequence::FirstRowIndex = row;
- }
- ++row; // Increase the row number.
- LineTable::appendRow(*this);
- if (EndSequence) {
- // Record the end of instruction sequence.
- Sequence::HighPC = Address;
- Sequence::LastRowIndex = row;
- if (Sequence::isValid())
- LineTable::appendSequence(*this);
- Sequence::reset();
- }
- Row::postAppend();
+void DWARFDebugLine::LineTable::clear() {
+ Prologue.clear();
+ Rows.clear();
+ Sequences.clear();
}
-void DWARFDebugLine::State::finalize() {
- row = DoneParsingLineTable;
- if (!Sequence::Empty) {
- fprintf(stderr, "warning: last sequence in debug line table is not"
- "terminated!\n");
- }
- // Sort all sequences so that address lookup will work faster.
- if (!Sequences.empty()) {
- std::sort(Sequences.begin(), Sequences.end(), Sequence::orderByLowPC);
- // Note: actually, instruction address ranges of sequences should not
- // overlap (in shared objects and executables). If they do, the address
- // lookup would still work, though, but result would be ambiguous.
- // We don't report warning in this case. For example,
- // sometimes .so compiled from multiple object files contains a few
- // rudimentary sequences for address ranges [0x0, 0xsomething).
- }
+DWARFDebugLine::ParsingState::ParsingState(struct LineTable *LT)
+ : LineTable(LT), RowNumber(0) {
+ resetRowAndSequence();
}
-DWARFDebugLine::DumpingState::~DumpingState() {}
+void DWARFDebugLine::ParsingState::resetRowAndSequence() {
+ Row.reset(LineTable->Prologue.DefaultIsStmt);
+ Sequence.reset();
+}
-void DWARFDebugLine::DumpingState::finalize() {
- LineTable::dump(OS);
+void DWARFDebugLine::ParsingState::appendRowToMatrix(uint32_t offset) {
+ if (Sequence.Empty) {
+ // Record the beginning of instruction sequence.
+ Sequence.Empty = false;
+ Sequence.LowPC = Row.Address;
+ Sequence.FirstRowIndex = RowNumber;
+ }
+ ++RowNumber;
+ LineTable->appendRow(Row);
+ if (Row.EndSequence) {
+ // Record the end of instruction sequence.
+ Sequence.HighPC = Row.Address;
+ Sequence.LastRowIndex = RowNumber;
+ if (Sequence.isValid())
+ LineTable->appendSequence(Sequence);
+ Sequence.reset();
+ }
+ Row.postAppend();
}
const DWARFDebugLine::LineTable *
@@ -144,7 +228,7 @@ DWARFDebugLine::getLineTable(uint32_t offset) const {
LineTableConstIter pos = LineTableMap.find(offset);
if (pos != LineTableMap.end())
return &pos->second;
- return 0;
+ return nullptr;
}
const DWARFDebugLine::LineTable *
@@ -152,90 +236,31 @@ DWARFDebugLine::getOrParseLineTable(DataExtractor debug_line_data,
uint32_t offset) {
std::pair<LineTableIter, bool> pos =
LineTableMap.insert(LineTableMapTy::value_type(offset, LineTable()));
+ LineTable *LT = &pos.first->second;
if (pos.second) {
- // Parse and cache the line table for at this offset.
- State state;
- if (!parseStatementTable(debug_line_data, RelocMap, &offset, state))
- return 0;
- pos.first->second = state;
- }
- return &pos.first->second;
-}
-
-bool
-DWARFDebugLine::parsePrologue(DataExtractor debug_line_data,
- uint32_t *offset_ptr, Prologue *prologue) {
- const uint32_t prologue_offset = *offset_ptr;
-
- prologue->clear();
- prologue->TotalLength = debug_line_data.getU32(offset_ptr);
- prologue->Version = debug_line_data.getU16(offset_ptr);
- if (prologue->Version != 2)
- return false;
-
- prologue->PrologueLength = debug_line_data.getU32(offset_ptr);
- const uint32_t end_prologue_offset = prologue->PrologueLength + *offset_ptr;
- prologue->MinInstLength = debug_line_data.getU8(offset_ptr);
- prologue->DefaultIsStmt = debug_line_data.getU8(offset_ptr);
- prologue->LineBase = debug_line_data.getU8(offset_ptr);
- prologue->LineRange = debug_line_data.getU8(offset_ptr);
- prologue->OpcodeBase = debug_line_data.getU8(offset_ptr);
-
- prologue->StandardOpcodeLengths.reserve(prologue->OpcodeBase-1);
- for (uint32_t i = 1; i < prologue->OpcodeBase; ++i) {
- uint8_t op_len = debug_line_data.getU8(offset_ptr);
- prologue->StandardOpcodeLengths.push_back(op_len);
- }
-
- while (*offset_ptr < end_prologue_offset) {
- const char *s = debug_line_data.getCStr(offset_ptr);
- if (s && s[0])
- prologue->IncludeDirectories.push_back(s);
- else
- break;
- }
-
- while (*offset_ptr < end_prologue_offset) {
- const char *name = debug_line_data.getCStr(offset_ptr);
- if (name && name[0]) {
- FileNameEntry fileEntry;
- fileEntry.Name = name;
- fileEntry.DirIdx = debug_line_data.getULEB128(offset_ptr);
- fileEntry.ModTime = debug_line_data.getULEB128(offset_ptr);
- fileEntry.Length = debug_line_data.getULEB128(offset_ptr);
- prologue->FileNames.push_back(fileEntry);
- } else {
- break;
- }
- }
-
- if (*offset_ptr != end_prologue_offset) {
- fprintf(stderr, "warning: parsing line table prologue at 0x%8.8x should"
- " have ended at 0x%8.8x but it ended at 0x%8.8x\n",
- prologue_offset, end_prologue_offset, *offset_ptr);
- return false;
+ if (!LT->parse(debug_line_data, RelocMap, &offset))
+ return nullptr;
}
- return true;
+ return LT;
}
-bool
-DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
- const RelocAddrMap *RMap,
- uint32_t *offset_ptr, State &state) {
+bool DWARFDebugLine::LineTable::parse(DataExtractor debug_line_data,
+ const RelocAddrMap *RMap,
+ uint32_t *offset_ptr) {
const uint32_t debug_line_offset = *offset_ptr;
- Prologue *prologue = &state.Prologue;
+ clear();
- if (!parsePrologue(debug_line_data, offset_ptr, prologue)) {
+ if (!Prologue.parse(debug_line_data, offset_ptr)) {
// Restore our offset and return false to indicate failure!
*offset_ptr = debug_line_offset;
return false;
}
- const uint32_t end_offset = debug_line_offset + prologue->TotalLength +
- sizeof(prologue->TotalLength);
+ const uint32_t end_offset = debug_line_offset + Prologue.TotalLength +
+ sizeof(Prologue.TotalLength);
- state.reset();
+ ParsingState State(this);
while (*offset_ptr < end_offset) {
uint8_t opcode = debug_line_data.getU8(offset_ptr);
@@ -257,9 +282,9 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
// with a DW_LNE_end_sequence instruction which creates a row whose
// address is that of the byte after the last target machine instruction
// of the sequence.
- state.EndSequence = true;
- state.appendRowToMatrix(*offset_ptr);
- state.reset();
+ State.Row.EndSequence = true;
+ State.appendRowToMatrix(*offset_ptr);
+ State.resetRowAndSequence();
break;
case DW_LNE_set_address:
@@ -274,9 +299,10 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
RelocAddrMap::const_iterator AI = RMap->find(*offset_ptr);
if (AI != RMap->end()) {
const std::pair<uint8_t, int64_t> &R = AI->second;
- state.Address = debug_line_data.getAddress(offset_ptr) + R.second;
+ State.Row.Address =
+ debug_line_data.getAddress(offset_ptr) + R.second;
} else
- state.Address = debug_line_data.getAddress(offset_ptr);
+ State.Row.Address = debug_line_data.getAddress(offset_ptr);
}
break;
@@ -307,62 +333,66 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
fileEntry.DirIdx = debug_line_data.getULEB128(offset_ptr);
fileEntry.ModTime = debug_line_data.getULEB128(offset_ptr);
fileEntry.Length = debug_line_data.getULEB128(offset_ptr);
- prologue->FileNames.push_back(fileEntry);
+ Prologue.FileNames.push_back(fileEntry);
}
break;
+ case DW_LNE_set_discriminator:
+ State.Row.Discriminator = debug_line_data.getULEB128(offset_ptr);
+ break;
+
default:
// Length doesn't include the zero opcode byte or the length itself, but
// it does include the sub_opcode, so we have to adjust for that below
(*offset_ptr) += arg_size;
break;
}
- } else if (opcode < prologue->OpcodeBase) {
+ } else if (opcode < Prologue.OpcodeBase) {
switch (opcode) {
// Standard Opcodes
case DW_LNS_copy:
// Takes no arguments. Append a row to the matrix using the
// current values of the state-machine registers. Then set
// the basic_block register to false.
- state.appendRowToMatrix(*offset_ptr);
+ State.appendRowToMatrix(*offset_ptr);
break;
case DW_LNS_advance_pc:
// Takes a single unsigned LEB128 operand, multiplies it by the
// min_inst_length field of the prologue, and adds the
// result to the address register of the state machine.
- state.Address += debug_line_data.getULEB128(offset_ptr) *
- prologue->MinInstLength;
+ State.Row.Address +=
+ debug_line_data.getULEB128(offset_ptr) * Prologue.MinInstLength;
break;
case DW_LNS_advance_line:
// Takes a single signed LEB128 operand and adds that value to
// the line register of the state machine.
- state.Line += debug_line_data.getSLEB128(offset_ptr);
+ State.Row.Line += debug_line_data.getSLEB128(offset_ptr);
break;
case DW_LNS_set_file:
// Takes a single unsigned LEB128 operand and stores it in the file
// register of the state machine.
- state.File = debug_line_data.getULEB128(offset_ptr);
+ State.Row.File = debug_line_data.getULEB128(offset_ptr);
break;
case DW_LNS_set_column:
// Takes a single unsigned LEB128 operand and stores it in the
// column register of the state machine.
- state.Column = debug_line_data.getULEB128(offset_ptr);
+ State.Row.Column = debug_line_data.getULEB128(offset_ptr);
break;
case DW_LNS_negate_stmt:
// Takes no arguments. Set the is_stmt register of the state
// machine to the logical negation of its current value.
- state.IsStmt = !state.IsStmt;
+ State.Row.IsStmt = !State.Row.IsStmt;
break;
case DW_LNS_set_basic_block:
// Takes no arguments. Set the basic_block register of the
// state machine to true
- state.BasicBlock = true;
+ State.Row.BasicBlock = true;
break;
case DW_LNS_const_add_pc:
@@ -378,10 +408,10 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
// than twice that range will it need to use both DW_LNS_advance_pc
// and a special opcode, requiring three or more bytes.
{
- uint8_t adjust_opcode = 255 - prologue->OpcodeBase;
- uint64_t addr_offset = (adjust_opcode / prologue->LineRange) *
- prologue->MinInstLength;
- state.Address += addr_offset;
+ uint8_t adjust_opcode = 255 - Prologue.OpcodeBase;
+ uint64_t addr_offset =
+ (adjust_opcode / Prologue.LineRange) * Prologue.MinInstLength;
+ State.Row.Address += addr_offset;
}
break;
@@ -395,25 +425,25 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
// judge when the computation of a special opcode overflows and
// requires the use of DW_LNS_advance_pc. Such assemblers, however,
// can use DW_LNS_fixed_advance_pc instead, sacrificing compression.
- state.Address += debug_line_data.getU16(offset_ptr);
+ State.Row.Address += debug_line_data.getU16(offset_ptr);
break;
case DW_LNS_set_prologue_end:
// Takes no arguments. Set the prologue_end register of the
// state machine to true
- state.PrologueEnd = true;
+ State.Row.PrologueEnd = true;
break;
case DW_LNS_set_epilogue_begin:
// Takes no arguments. Set the basic_block register of the
// state machine to true
- state.EpilogueBegin = true;
+ State.Row.EpilogueBegin = true;
break;
case DW_LNS_set_isa:
// Takes a single unsigned LEB128 operand and stores it in the
// column register of the state machine.
- state.Isa = debug_line_data.getULEB128(offset_ptr);
+ State.Row.Isa = debug_line_data.getULEB128(offset_ptr);
break;
default:
@@ -421,9 +451,9 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
// of such opcodes because they are specified in the prologue
// as a multiple of LEB128 operands for each opcode.
{
- assert(opcode - 1U < prologue->StandardOpcodeLengths.size());
- uint8_t opcode_length = prologue->StandardOpcodeLengths[opcode - 1];
- for (uint8_t i=0; i<opcode_length; ++i)
+ assert(opcode - 1U < Prologue.StandardOpcodeLengths.size());
+ uint8_t opcode_length = Prologue.StandardOpcodeLengths[opcode - 1];
+ for (uint8_t i = 0; i < opcode_length; ++i)
debug_line_data.getULEB128(offset_ptr);
}
break;
@@ -462,24 +492,37 @@ DWARFDebugLine::parseStatementTable(DataExtractor debug_line_data,
//
// line increment = line_base + (adjusted opcode % line_range)
- uint8_t adjust_opcode = opcode - prologue->OpcodeBase;
- uint64_t addr_offset = (adjust_opcode / prologue->LineRange) *
- prologue->MinInstLength;
- int32_t line_offset = prologue->LineBase +
- (adjust_opcode % prologue->LineRange);
- state.Line += line_offset;
- state.Address += addr_offset;
- state.appendRowToMatrix(*offset_ptr);
+ uint8_t adjust_opcode = opcode - Prologue.OpcodeBase;
+ uint64_t addr_offset =
+ (adjust_opcode / Prologue.LineRange) * Prologue.MinInstLength;
+ int32_t line_offset =
+ Prologue.LineBase + (adjust_opcode % Prologue.LineRange);
+ State.Row.Line += line_offset;
+ State.Row.Address += addr_offset;
+ State.appendRowToMatrix(*offset_ptr);
}
}
- state.finalize();
+ if (!State.Sequence.Empty) {
+ fprintf(stderr, "warning: last sequence in debug line table is not"
+ "terminated!\n");
+ }
+
+ // Sort all sequences so that address lookup will work faster.
+ if (!Sequences.empty()) {
+ std::sort(Sequences.begin(), Sequences.end(), Sequence::orderByLowPC);
+ // Note: actually, instruction address ranges of sequences should not
+ // overlap (in shared objects and executables). If they do, the address
+ // lookup would still work, though, but result would be ambiguous.
+ // We don't report warning in this case. For example,
+ // sometimes .so compiled from multiple object files contains a few
+ // rudimentary sequences for address ranges [0x0, 0xsomething).
+ }
return end_offset;
}
-uint32_t
-DWARFDebugLine::LineTable::lookupAddress(uint64_t address) const {
+uint32_t DWARFDebugLine::LineTable::lookupAddress(uint64_t address) const {
uint32_t unknown_index = UINT32_MAX;
if (Sequences.empty())
return unknown_index;
@@ -524,10 +567,8 @@ DWARFDebugLine::LineTable::lookupAddress(uint64_t address) const {
return index;
}
-bool
-DWARFDebugLine::LineTable::lookupAddressRange(uint64_t address,
- uint64_t size,
- std::vector<uint32_t>& result) const {
+bool DWARFDebugLine::LineTable::lookupAddressRange(
+ uint64_t address, uint64_t size, std::vector<uint32_t> &result) const {
if (Sequences.empty())
return false;
uint64_t end_addr = address + size;
@@ -603,13 +644,14 @@ DWARFDebugLine::LineTable::lookupAddressRange(uint64_t address,
bool
DWARFDebugLine::LineTable::getFileNameByIndex(uint64_t FileIndex,
- bool NeedsAbsoluteFilePath,
+ FileLineInfoKind Kind,
std::string &Result) const {
- if (FileIndex == 0 || FileIndex > Prologue.FileNames.size())
+ if (FileIndex == 0 || FileIndex > Prologue.FileNames.size() ||
+ Kind == FileLineInfoKind::None)
return false;
const FileNameEntry &Entry = Prologue.FileNames[FileIndex - 1];
const char *FileName = Entry.Name;
- if (!NeedsAbsoluteFilePath ||
+ if (Kind != FileLineInfoKind::AbsoluteFilePath ||
sys::path::is_absolute(FileName)) {
Result = FileName;
return true;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
index 2990756..c7b7ec2 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.h
@@ -11,6 +11,7 @@
#define LLVM_DEBUGINFO_DWARFDEBUGLINE_H
#include "DWARFRelocMap.h"
+#include "llvm/DebugInfo/DIContext.h"
#include "llvm/Support/DataExtractor.h"
#include <map>
#include <string>
@@ -24,7 +25,7 @@ class DWARFDebugLine {
public:
DWARFDebugLine(const RelocAddrMap* LineInfoRelocMap) : RelocMap(LineInfoRelocMap) {}
struct FileNameEntry {
- FileNameEntry() : Name(0), DirIdx(0), ModTime(0), Length(0) {}
+ FileNameEntry() : Name(nullptr), DirIdx(0), ModTime(0), Length(0) {}
const char *Name;
uint64_t DirIdx;
@@ -33,9 +34,7 @@ public:
};
struct Prologue {
- Prologue()
- : TotalLength(0), Version(0), PrologueLength(0), MinInstLength(0),
- DefaultIsStmt(0), LineBase(0), LineRange(0), OpcodeBase(0) {}
+ Prologue();
// The size in bytes of the statement information for this compilation unit
// (not including the total_length field itself).
@@ -49,6 +48,9 @@ public:
// program opcodes that alter the address register first multiply their
// operands by this value.
uint8_t MinInstLength;
+ // The maximum number of individual operations that may be encoded in an
+ // instruction.
+ uint8_t MaxOpsPerInst;
// The initial value of theis_stmtregister.
uint8_t DefaultIsStmt;
// This parameter affects the meaning of the special opcodes. See below.
@@ -73,19 +75,16 @@ public:
int32_t getMaxLineIncrementForSpecialOpcode() const {
return LineBase + (int8_t)LineRange - 1;
}
+
+ void clear();
void dump(raw_ostream &OS) const;
- void clear() {
- TotalLength = Version = PrologueLength = 0;
- MinInstLength = LineBase = LineRange = OpcodeBase = 0;
- StandardOpcodeLengths.clear();
- IncludeDirectories.clear();
- FileNames.clear();
- }
+ bool parse(DataExtractor debug_line_data, uint32_t *offset_ptr);
};
// Standard .debug_line state machine structure.
struct Row {
- Row(bool default_is_stmt = false) { reset(default_is_stmt); }
+ explicit Row(bool default_is_stmt = false);
+
/// Called after a row is appended to the matrix.
void postAppend();
void reset(bool default_is_stmt);
@@ -112,6 +111,9 @@ public:
// An unsigned integer whose value encodes the applicable instruction set
// architecture for the current instruction.
uint8_t Isa;
+ // An unsigned integer representing the DWARF path discriminator value
+ // for this location.
+ uint32_t Discriminator;
// A boolean indicating that the current instruction is the beginning of a
// statement.
uint8_t IsStmt:1,
@@ -144,14 +146,9 @@ public:
unsigned LastRowIndex;
bool Empty;
- Sequence() { reset(); }
- void reset() {
- LowPC = 0;
- HighPC = 0;
- FirstRowIndex = 0;
- LastRowIndex = 0;
- Empty = true;
- }
+ Sequence();
+ void reset();
+
static bool orderByLowPC(const Sequence& LHS, const Sequence& RHS) {
return LHS.LowPC < RHS.LowPC;
}
@@ -164,31 +161,34 @@ public:
};
struct LineTable {
- void appendRow(const DWARFDebugLine::Row &state) { Rows.push_back(state); }
- void appendSequence(const DWARFDebugLine::Sequence &sequence) {
- Sequences.push_back(sequence);
+ LineTable();
+
+ void appendRow(const DWARFDebugLine::Row &R) {
+ Rows.push_back(R);
}
- void clear() {
- Prologue.clear();
- Rows.clear();
- Sequences.clear();
+ void appendSequence(const DWARFDebugLine::Sequence &S) {
+ Sequences.push_back(S);
}
// Returns the index of the row with file/line info for a given address,
// or -1 if there is no such row.
uint32_t lookupAddress(uint64_t address) const;
- bool lookupAddressRange(uint64_t address,
- uint64_t size,
- std::vector<uint32_t>& result) const;
+ bool lookupAddressRange(uint64_t address, uint64_t size,
+ std::vector<uint32_t> &result) const;
// Extracts filename by its index in filename table in prologue.
// Returns true on success.
bool getFileNameByIndex(uint64_t FileIndex,
- bool NeedsAbsoluteFilePath,
+ DILineInfoSpecifier::FileLineInfoKind Kind,
std::string &Result) const;
void dump(raw_ostream &OS) const;
+ void clear();
+
+ /// Parse prologue and all rows.
+ bool parse(DataExtractor debug_line_data, const RelocAddrMap *RMap,
+ uint32_t *offset_ptr);
struct Prologue Prologue;
typedef std::vector<Row> RowVector;
@@ -199,48 +199,26 @@ public:
SequenceVector Sequences;
};
- struct State : public Row, public Sequence, public LineTable {
- // Special row codes.
- enum {
- StartParsingLineTable = 0,
- DoneParsingLineTable = -1
- };
-
- State() : row(StartParsingLineTable) {}
- virtual ~State();
-
- virtual void appendRowToMatrix(uint32_t offset);
- virtual void finalize();
- virtual void reset() {
- Row::reset(Prologue.DefaultIsStmt);
- Sequence::reset();
- }
-
- // The row number that starts at zero for the prologue, and increases for
- // each row added to the matrix.
- unsigned row;
- };
-
- struct DumpingState : public State {
- DumpingState(raw_ostream &OS) : OS(OS) {}
- virtual ~DumpingState();
- virtual void finalize();
- private:
- raw_ostream &OS;
- };
-
- static bool parsePrologue(DataExtractor debug_line_data, uint32_t *offset_ptr,
- Prologue *prologue);
- /// Parse a single line table (prologue and all rows).
- static bool parseStatementTable(DataExtractor debug_line_data,
- const RelocAddrMap *RMap,
- uint32_t *offset_ptr, State &state);
-
const LineTable *getLineTable(uint32_t offset) const;
const LineTable *getOrParseLineTable(DataExtractor debug_line_data,
uint32_t offset);
private:
+ struct ParsingState {
+ ParsingState(struct LineTable *LT);
+
+ void resetRowAndSequence();
+ void appendRowToMatrix(uint32_t offset);
+
+ // Line table we're currently parsing.
+ struct LineTable *LineTable;
+ // The row number that starts at zero for the prologue, and increases for
+ // each row added to the matrix.
+ unsigned RowNumber;
+ struct Row Row;
+ struct Sequence Sequence;
+ };
+
typedef std::map<uint32_t, LineTable> LineTableMapTy;
typedef LineTableMapTy::iterator LineTableIter;
typedef LineTableMapTy::const_iterator LineTableConstIter;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.cpp
index 3895ffa..e4aa5dc 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.cpp
@@ -11,23 +11,24 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Dwarf.h"
using namespace llvm;
void DWARFDebugLoc::dump(raw_ostream &OS) const {
- for (LocationLists::const_iterator I = Locations.begin(), E = Locations.end(); I != E; ++I) {
- OS << format("0x%8.8x: ", I->Offset);
+ for (const LocationList &L : Locations) {
+ OS << format("0x%8.8x: ", L.Offset);
const unsigned Indent = 12;
- for (SmallVectorImpl<Entry>::const_iterator I2 = I->Entries.begin(), E2 = I->Entries.end(); I2 != E2; ++I2) {
- if (I2 != I->Entries.begin())
+ for (const Entry &E : L.Entries) {
+ if (&E != L.Entries.begin())
OS.indent(Indent);
- OS << "Beginning address offset: " << format("0x%016" PRIx64, I2->Begin)
+ OS << "Beginning address offset: " << format("0x%016" PRIx64, E.Begin)
<< '\n';
OS.indent(Indent) << " Ending address offset: "
- << format("0x%016" PRIx64, I2->End) << '\n';
+ << format("0x%016" PRIx64, E.End) << '\n';
OS.indent(Indent) << " Location description: ";
- for (SmallVectorImpl<unsigned char>::const_iterator I3 = I2->Loc.begin(), E3 = I2->Loc.end(); I3 != E3; ++I3) {
- OS << format("%2.2x ", *I3);
+ for (unsigned char Loc : E.Loc) {
+ OS << format("%2.2x ", Loc);
}
OS << "\n\n";
}
@@ -36,7 +37,7 @@ void DWARFDebugLoc::dump(raw_ostream &OS) const {
void DWARFDebugLoc::parse(DataExtractor data, unsigned AddressSize) {
uint32_t Offset = 0;
- while (data.isValidOffset(Offset)) {
+ while (data.isValidOffset(Offset+AddressSize-1)) {
Locations.resize(Locations.size() + 1);
LocationList &Loc = Locations.back();
Loc.Offset = Offset;
@@ -68,7 +69,60 @@ void DWARFDebugLoc::parse(DataExtractor data, unsigned AddressSize) {
Offset += Bytes;
E.Loc.reserve(str.size());
std::copy(str.begin(), str.end(), std::back_inserter(E.Loc));
- Loc.Entries.push_back(llvm_move(E));
+ Loc.Entries.push_back(std::move(E));
}
}
+ if (data.isValidOffset(Offset))
+ llvm::errs() << "error: failed to consume entire .debug_loc section\n";
}
+
+void DWARFDebugLocDWO::parse(DataExtractor data) {
+ uint32_t Offset = 0;
+ while (data.isValidOffset(Offset)) {
+ Locations.resize(Locations.size() + 1);
+ LocationList &Loc = Locations.back();
+ Loc.Offset = Offset;
+ dwarf::LocationListEntry Kind;
+ while ((Kind = static_cast<dwarf::LocationListEntry>(
+ data.getU8(&Offset))) != dwarf::DW_LLE_end_of_list_entry) {
+
+ if (Kind != dwarf::DW_LLE_start_length_entry) {
+ llvm::errs() << "error: dumping support for LLE of kind " << (int)Kind
+ << " not implemented\n";
+ return;
+ }
+
+ Entry E;
+
+ E.Start = data.getULEB128(&Offset);
+ E.Length = data.getU32(&Offset);
+
+ unsigned Bytes = data.getU16(&Offset);
+ // A single location description describing the location of the object...
+ StringRef str = data.getData().substr(Offset, Bytes);
+ Offset += Bytes;
+ E.Loc.resize(str.size());
+ std::copy(str.begin(), str.end(), E.Loc.begin());
+
+ Loc.Entries.push_back(std::move(E));
+ }
+ }
+}
+
+void DWARFDebugLocDWO::dump(raw_ostream &OS) const {
+ for (const LocationList &L : Locations) {
+ OS << format("0x%8.8x: ", L.Offset);
+ const unsigned Indent = 12;
+ for (const Entry &E : L.Entries) {
+ if (&E != L.Entries.begin())
+ OS.indent(Indent);
+ OS << "Beginning address index: " << E.Start << '\n';
+ OS.indent(Indent) << " Length: " << E.Length << '\n';
+ OS.indent(Indent) << " Location description: ";
+ for (unsigned char Loc : E.Loc)
+ OS << format("%2.2x ", Loc);
+ OS << "\n\n";
+ }
+ }
+}
+
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.h b/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.h
index d31aaaa..663acbb 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLoc.h
@@ -55,6 +55,27 @@ public:
/// specified address size to interpret the address ranges.
void parse(DataExtractor data, unsigned AddressSize);
};
+
+class DWARFDebugLocDWO {
+ struct Entry {
+ uint64_t Start;
+ uint32_t Length;
+ SmallVector<unsigned char, 4> Loc;
+ };
+
+ struct LocationList {
+ unsigned Offset;
+ SmallVector<Entry, 2> Entries;
+ };
+
+ typedef SmallVector<LocationList, 4> LocationLists;
+
+ LocationLists Locations;
+
+public:
+ void parse(DataExtractor data);
+ void dump(raw_ostream &OS) const;
+};
}
#endif
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp
index 1806bee..07b23b3 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.cpp
@@ -45,23 +45,25 @@ bool DWARFDebugRangeList::extract(DataExtractor data, uint32_t *offset_ptr) {
}
void DWARFDebugRangeList::dump(raw_ostream &OS) const {
- for (int i = 0, n = Entries.size(); i != n; ++i) {
+ for (const RangeListEntry &RLE : Entries) {
const char *format_str = (AddressSize == 4
? "%08x %08" PRIx64 " %08" PRIx64 "\n"
: "%08x %016" PRIx64 " %016" PRIx64 "\n");
- OS << format(format_str, Offset, Entries[i].StartAddress,
- Entries[i].EndAddress);
+ OS << format(format_str, Offset, RLE.StartAddress, RLE.EndAddress);
}
OS << format("%08x <End of list>\n", Offset);
}
-bool DWARFDebugRangeList::containsAddress(uint64_t BaseAddress,
- uint64_t Address) const {
- for (int i = 0, n = Entries.size(); i != n; ++i) {
- if (Entries[i].isBaseAddressSelectionEntry(AddressSize))
- BaseAddress = Entries[i].EndAddress;
- else if (Entries[i].containsAddress(BaseAddress, Address))
- return true;
+DWARFAddressRangesVector
+DWARFDebugRangeList::getAbsoluteRanges(uint64_t BaseAddress) const {
+ DWARFAddressRangesVector Res;
+ for (const RangeListEntry &RLE : Entries) {
+ if (RLE.isBaseAddressSelectionEntry(AddressSize)) {
+ BaseAddress = RLE.EndAddress;
+ } else {
+ Res.push_back(std::make_pair(BaseAddress + RLE.StartAddress,
+ BaseAddress + RLE.EndAddress));
+ }
}
- return false;
+ return Res;
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h
index 4e34a91..587b550 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugRangeList.h
@@ -17,6 +17,9 @@ namespace llvm {
class raw_ostream;
+/// DWARFAddressRangesVector - represents a set of absolute address ranges.
+typedef std::vector<std::pair<uint64_t, uint64_t>> DWARFAddressRangesVector;
+
class DWARFDebugRangeList {
public:
struct RangeListEntry {
@@ -50,10 +53,6 @@ public:
else
return StartAddress == -1ULL;
}
- bool containsAddress(uint64_t BaseAddress, uint64_t Address) const {
- return (BaseAddress + StartAddress <= Address) &&
- (Address < BaseAddress + EndAddress);
- }
};
private:
@@ -67,10 +66,10 @@ public:
void clear();
void dump(raw_ostream &OS) const;
bool extract(DataExtractor data, uint32_t *offset_ptr);
- /// containsAddress - Returns true if range list contains the given
- /// address. Has to be passed base address of the compile unit that
- /// references this range list.
- bool containsAddress(uint64_t BaseAddress, uint64_t Address) const;
+ /// getAbsoluteRanges - Returns absolute address ranges defined by this range
+ /// list. Has to be passed base address of the compile unit referencing this
+ /// range list.
+ DWARFAddressRangesVector getAbsoluteRanges(uint64_t BaseAddress) const;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp b/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
index da71fb3..8d0f966 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
@@ -131,7 +131,7 @@ bool DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr,
const DWARFUnit *cu) {
bool indirect = false;
bool is_block = false;
- Value.data = NULL;
+ Value.data = nullptr;
// Read the value for the form into value and follow and DW_FORM_indirect
// instances we run into
do {
@@ -241,7 +241,7 @@ bool DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr,
if (is_block) {
StringRef str = data.getData().substr(*offset_ptr, Value.uval);
- Value.data = NULL;
+ Value.data = nullptr;
if (!str.empty()) {
Value.data = reinterpret_cast<const uint8_t *>(str.data());
*offset_ptr += Value.uval;
@@ -488,7 +488,7 @@ Optional<const char *> DWARFFormValue::getAsCString(const DWARFUnit *U) const {
return None;
if (Form == DW_FORM_string)
return Value.cstr;
- if (U == 0)
+ if (!U)
return None;
uint32_t Offset = Value.uval;
if (Form == DW_FORM_GNU_str_index) {
@@ -509,7 +509,7 @@ Optional<uint64_t> DWARFFormValue::getAsAddress(const DWARFUnit *U) const {
if (Form == DW_FORM_GNU_addr_index) {
uint32_t Index = Value.uval;
uint64_t Result;
- if (U == 0 || !U->getAddrOffsetSectionItem(Index, Result))
+ if (!U || !U->getAddrOffsetSectionItem(Index, Result))
return None;
return Result;
}
@@ -525,7 +525,7 @@ Optional<uint64_t> DWARFFormValue::getAsReference(const DWARFUnit *U) const {
case DW_FORM_ref4:
case DW_FORM_ref8:
case DW_FORM_ref_udata:
- if (U == 0)
+ if (!U)
return None;
return Value.uval + U->getOffset();
case DW_FORM_ref_addr:
diff --git a/contrib/llvm/lib/DebugInfo/DWARFTypeUnit.h b/contrib/llvm/lib/DebugInfo/DWARFTypeUnit.h
index 7a0dab2..cf773b8 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFTypeUnit.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFTypeUnit.h
@@ -19,14 +19,16 @@ private:
uint64_t TypeHash;
uint32_t TypeOffset;
public:
- DWARFTypeUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef AS,
- StringRef RS, StringRef SS, StringRef SOS, StringRef AOS,
+ DWARFTypeUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef RS,
+ StringRef SS, StringRef SOS, StringRef AOS,
const RelocAddrMap *M, bool LE)
- : DWARFUnit(DA, IS, AS, RS, SS, SOS, AOS, M, LE) {}
- uint32_t getSize() const LLVM_OVERRIDE { return DWARFUnit::getSize() + 12; }
+ : DWARFUnit(DA, IS, RS, SS, SOS, AOS, M, LE) {}
+ uint32_t getHeaderSize() const override {
+ return DWARFUnit::getHeaderSize() + 12;
+ }
void dump(raw_ostream &OS);
protected:
- bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr) LLVM_OVERRIDE;
+ bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr) override;
};
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFUnit.cpp b/contrib/llvm/lib/DebugInfo/DWARFUnit.cpp
index 5167eb9..39d0a0f 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFUnit.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFUnit.cpp
@@ -17,12 +17,12 @@
using namespace llvm;
using namespace dwarf;
-DWARFUnit::DWARFUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef AS,
- StringRef RS, StringRef SS, StringRef SOS, StringRef AOS,
+DWARFUnit::DWARFUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef RS,
+ StringRef SS, StringRef SOS, StringRef AOS,
const RelocAddrMap *M, bool LE)
- : Abbrev(DA), InfoSection(IS), AbbrevSection(AS), RangeSection(RS),
- StringSection(SS), StringOffsetSection(SOS), AddrOffsetSection(AOS),
- RelocMap(M), isLittleEndian(LE) {
+ : Abbrev(DA), InfoSection(IS), RangeSection(RS), StringSection(SS),
+ StringOffsetSection(SOS), AddrOffsetSection(AOS), RelocMap(M),
+ isLittleEndian(LE) {
clear();
}
@@ -54,18 +54,20 @@ bool DWARFUnit::getStringOffsetSectionItem(uint32_t Index,
bool DWARFUnit::extractImpl(DataExtractor debug_info, uint32_t *offset_ptr) {
Length = debug_info.getU32(offset_ptr);
Version = debug_info.getU16(offset_ptr);
- uint64_t abbrOffset = debug_info.getU32(offset_ptr);
+ uint64_t AbbrOffset = debug_info.getU32(offset_ptr);
AddrSize = debug_info.getU8(offset_ptr);
- bool lengthOK = debug_info.isValidOffset(getNextUnitOffset() - 1);
- bool versionOK = DWARFContext::isSupportedVersion(Version);
- bool abbrOffsetOK = AbbrevSection.size() > abbrOffset;
- bool addrSizeOK = AddrSize == 4 || AddrSize == 8;
+ bool LengthOK = debug_info.isValidOffset(getNextUnitOffset() - 1);
+ bool VersionOK = DWARFContext::isSupportedVersion(Version);
+ bool AddrSizeOK = AddrSize == 4 || AddrSize == 8;
- if (!lengthOK || !versionOK || !addrSizeOK || !abbrOffsetOK)
+ if (!LengthOK || !VersionOK || !AddrSizeOK)
+ return false;
+
+ Abbrevs = Abbrev->getAbbreviationDeclarationSet(AbbrOffset);
+ if (Abbrevs == nullptr)
return false;
- Abbrevs = Abbrev->getAbbreviationDeclarationSet(abbrOffset);
return true;
}
@@ -98,7 +100,7 @@ void DWARFUnit::clear() {
Offset = 0;
Length = 0;
Version = 0;
- Abbrevs = 0;
+ Abbrevs = nullptr;
AddrSize = 0;
BaseAddr = 0;
RangeSectionBase = 0;
@@ -110,8 +112,8 @@ void DWARFUnit::clear() {
const char *DWARFUnit::getCompilationDir() {
extractDIEsIfNeeded(true);
if (DieArray.empty())
- return 0;
- return DieArray[0].getAttributeValueAsString(this, DW_AT_comp_dir, 0);
+ return nullptr;
+ return DieArray[0].getAttributeValueAsString(this, DW_AT_comp_dir, nullptr);
}
uint64_t DWARFUnit::getDWOId() {
@@ -124,38 +126,32 @@ uint64_t DWARFUnit::getDWOId() {
}
void DWARFUnit::setDIERelations() {
- if (DieArray.empty())
+ if (DieArray.size() <= 1)
return;
- DWARFDebugInfoEntryMinimal *die_array_begin = &DieArray.front();
- DWARFDebugInfoEntryMinimal *die_array_end = &DieArray.back();
- DWARFDebugInfoEntryMinimal *curr_die;
- // We purposely are skipping the last element in the array in the loop below
- // so that we can always have a valid next item
- for (curr_die = die_array_begin; curr_die < die_array_end; ++curr_die) {
- // Since our loop doesn't include the last element, we can always
- // safely access the next die in the array.
- DWARFDebugInfoEntryMinimal *next_die = curr_die + 1;
-
- const DWARFAbbreviationDeclaration *curr_die_abbrev =
- curr_die->getAbbreviationDeclarationPtr();
-
- if (curr_die_abbrev) {
- // Normal DIE
- if (curr_die_abbrev->hasChildren())
- next_die->setParent(curr_die);
- else
- curr_die->setSibling(next_die);
+
+ std::vector<DWARFDebugInfoEntryMinimal *> ParentChain;
+ DWARFDebugInfoEntryMinimal *SiblingChain = nullptr;
+ for (auto &DIE : DieArray) {
+ if (SiblingChain) {
+ SiblingChain->setSibling(&DIE);
+ }
+ if (const DWARFAbbreviationDeclaration *AbbrDecl =
+ DIE.getAbbreviationDeclarationPtr()) {
+ // Normal DIE.
+ if (AbbrDecl->hasChildren()) {
+ ParentChain.push_back(&DIE);
+ SiblingChain = nullptr;
+ } else {
+ SiblingChain = &DIE;
+ }
} else {
- // NULL DIE that terminates a sibling chain
- DWARFDebugInfoEntryMinimal *parent = curr_die->getParent();
- if (parent)
- parent->setSibling(next_die);
+ // NULL entry terminates the sibling chain.
+ SiblingChain = ParentChain.back();
+ ParentChain.pop_back();
}
}
-
- // Since we skipped the last element, we need to fix it up!
- if (die_array_begin < die_array_end)
- curr_die->setParent(die_array_begin);
+ assert(SiblingChain == nullptr || SiblingChain == &DieArray[0]);
+ assert(ParentChain.empty());
}
void DWARFUnit::extractDIEsToVector(
@@ -166,13 +162,13 @@ void DWARFUnit::extractDIEsToVector(
// Set the offset to that of the first DIE and calculate the start of the
// next compilation unit header.
- uint32_t Offset = getFirstDIEOffset();
+ uint32_t DIEOffset = Offset + getHeaderSize();
uint32_t NextCUOffset = getNextUnitOffset();
DWARFDebugInfoEntryMinimal DIE;
uint32_t Depth = 0;
bool IsCUDie = true;
- while (Offset < NextCUOffset && DIE.extractFast(this, &Offset)) {
+ while (DIEOffset < NextCUOffset && DIE.extractFast(this, &DIEOffset)) {
if (IsCUDie) {
if (AppendCUDie)
Dies.push_back(DIE);
@@ -187,9 +183,8 @@ void DWARFUnit::extractDIEsToVector(
Dies.push_back(DIE);
}
- const DWARFAbbreviationDeclaration *AbbrDecl =
- DIE.getAbbreviationDeclarationPtr();
- if (AbbrDecl) {
+ if (const DWARFAbbreviationDeclaration *AbbrDecl =
+ DIE.getAbbreviationDeclarationPtr()) {
// Normal DIE
if (AbbrDecl->hasChildren())
++Depth;
@@ -205,9 +200,9 @@ void DWARFUnit::extractDIEsToVector(
// Give a little bit of info if we encounter corrupt DWARF (our offset
// should always terminate at or before the start of the next compilation
// unit header).
- if (Offset > NextCUOffset)
+ if (DIEOffset > NextCUOffset)
fprintf(stderr, "warning: DWARF compile unit extends beyond its "
- "bounds cu 0x%8.8x at 0x%8.8x'\n", getOffset(), Offset);
+ "bounds cu 0x%8.8x at 0x%8.8x'\n", getOffset(), DIEOffset);
}
size_t DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
@@ -231,7 +226,9 @@ size_t DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
AddrOffsetSectionBase = DieArray[0].getAttributeValueAsSectionOffset(
this, DW_AT_GNU_addr_base, 0);
RangeSectionBase = DieArray[0].getAttributeValueAsSectionOffset(
- this, DW_AT_GNU_ranges_base, 0);
+ this, DW_AT_ranges_base, 0);
+ // Don't fall back to DW_AT_GNU_ranges_base: it should be ignored for
+ // skeleton CU DIE, so that DWARF users not aware of it are not broken.
}
setDIERelations();
@@ -241,43 +238,44 @@ size_t DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
DWARFUnit::DWOHolder::DWOHolder(object::ObjectFile *DWOFile)
: DWOFile(DWOFile),
DWOContext(cast<DWARFContext>(DIContext::getDWARFContext(DWOFile))),
- DWOU(0) {
+ DWOU(nullptr) {
if (DWOContext->getNumDWOCompileUnits() > 0)
DWOU = DWOContext->getDWOCompileUnitAtIndex(0);
}
bool DWARFUnit::parseDWO() {
- if (DWO.get() != 0)
+ if (DWO.get())
return false;
extractDIEsIfNeeded(true);
if (DieArray.empty())
return false;
const char *DWOFileName =
- DieArray[0].getAttributeValueAsString(this, DW_AT_GNU_dwo_name, 0);
- if (DWOFileName == 0)
+ DieArray[0].getAttributeValueAsString(this, DW_AT_GNU_dwo_name, nullptr);
+ if (!DWOFileName)
return false;
const char *CompilationDir =
- DieArray[0].getAttributeValueAsString(this, DW_AT_comp_dir, 0);
+ DieArray[0].getAttributeValueAsString(this, DW_AT_comp_dir, nullptr);
SmallString<16> AbsolutePath;
- if (sys::path::is_relative(DWOFileName) && CompilationDir != 0) {
+ if (sys::path::is_relative(DWOFileName) && CompilationDir != nullptr) {
sys::path::append(AbsolutePath, CompilationDir);
}
sys::path::append(AbsolutePath, DWOFileName);
- object::ObjectFile *DWOFile =
+ ErrorOr<object::ObjectFile *> DWOFile =
object::ObjectFile::createObjectFile(AbsolutePath);
if (!DWOFile)
return false;
// Reset DWOHolder.
- DWO.reset(new DWOHolder(DWOFile));
+ DWO.reset(new DWOHolder(DWOFile.get()));
DWARFUnit *DWOCU = DWO->getUnit();
// Verify that compile unit in .dwo file is valid.
- if (DWOCU == 0 || DWOCU->getDWOId() != getDWOId()) {
+ if (!DWOCU || DWOCU->getDWOId() != getDWOId()) {
DWO.reset();
return false;
}
// Share .debug_addr and .debug_ranges section with compile unit in .dwo
DWOCU->setAddrOffsetSection(AddrOffsetSection, AddrOffsetSectionBase);
- DWOCU->setRangesSection(RangeSection, RangeSectionBase);
+ uint32_t DWORangesBase = DieArray[0].getRangesBaseAttribute(this, 0);
+ DWOCU->setRangesSection(RangeSection, DWORangesBase);
return true;
}
@@ -298,52 +296,53 @@ void DWARFUnit::clearDIEs(bool KeepCUDie) {
}
}
-void
-DWARFUnit::buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
- bool clear_dies_if_already_not_parsed,
- uint32_t CUOffsetInAranges) {
+void DWARFUnit::collectAddressRanges(DWARFAddressRangesVector &CURanges) {
+ // First, check if CU DIE describes address ranges for the unit.
+ const auto &CUDIERanges = getCompileUnitDIE()->getAddressRanges(this);
+ if (!CUDIERanges.empty()) {
+ CURanges.insert(CURanges.end(), CUDIERanges.begin(), CUDIERanges.end());
+ return;
+ }
+
// This function is usually called if there in no .debug_aranges section
// in order to produce a compile unit level set of address ranges that
// is accurate. If the DIEs weren't parsed, then we don't want all dies for
// all compile units to stay loaded when they weren't needed. So we can end
// up parsing the DWARF and then throwing them all away to keep memory usage
// down.
- const bool clear_dies = extractDIEsIfNeeded(false) > 1 &&
- clear_dies_if_already_not_parsed;
- DieArray[0].buildAddressRangeTable(this, debug_aranges, CUOffsetInAranges);
+ const bool ClearDIEs = extractDIEsIfNeeded(false) > 1;
+ DieArray[0].collectChildrenAddressRanges(this, CURanges);
+
+ // Collect address ranges from DIEs in .dwo if necessary.
bool DWOCreated = parseDWO();
- if (DWO.get()) {
- // If there is a .dwo file for this compile unit, then skeleton CU DIE
- // doesn't have children, and we should instead build address range table
- // from DIEs in the .debug_info.dwo section of .dwo file.
- DWO->getUnit()->buildAddressRangeTable(
- debug_aranges, clear_dies_if_already_not_parsed, CUOffsetInAranges);
- }
- if (DWOCreated && clear_dies_if_already_not_parsed)
+ if (DWO.get())
+ DWO->getUnit()->collectAddressRanges(CURanges);
+ if (DWOCreated)
DWO.reset();
// Keep memory down by clearing DIEs if this generate function
// caused them to be parsed.
- if (clear_dies)
+ if (ClearDIEs)
clearDIEs(true);
}
const DWARFDebugInfoEntryMinimal *
DWARFUnit::getSubprogramForAddress(uint64_t Address) {
extractDIEsIfNeeded(false);
- for (size_t i = 0, n = DieArray.size(); i != n; i++)
- if (DieArray[i].isSubprogramDIE() &&
- DieArray[i].addressRangeContainsAddress(this, Address)) {
- return &DieArray[i];
+ for (const DWARFDebugInfoEntryMinimal &DIE : DieArray) {
+ if (DIE.isSubprogramDIE() &&
+ DIE.addressRangeContainsAddress(this, Address)) {
+ return &DIE;
}
- return 0;
+ }
+ return nullptr;
}
DWARFDebugInfoEntryInlinedChain
DWARFUnit::getInlinedChainForAddress(uint64_t Address) {
// First, find a subprogram that contains the given address (the root
// of inlined chain).
- const DWARFUnit *ChainCU = 0;
+ const DWARFUnit *ChainCU = nullptr;
const DWARFDebugInfoEntryMinimal *SubprogramDIE =
getSubprogramForAddress(Address);
if (SubprogramDIE) {
diff --git a/contrib/llvm/lib/DebugInfo/DWARFUnit.h b/contrib/llvm/lib/DebugInfo/DWARFUnit.h
index bd768a6..471da36 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFUnit.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFUnit.h
@@ -10,7 +10,6 @@
#ifndef LLVM_DEBUGINFO_DWARFUNIT_H
#define LLVM_DEBUGINFO_DWARFUNIT_H
-#include "llvm/ADT/OwningPtr.h"
#include "DWARFDebugAbbrev.h"
#include "DWARFDebugInfoEntry.h"
#include "DWARFDebugRangeList.h"
@@ -30,7 +29,6 @@ class raw_ostream;
class DWARFUnit {
const DWARFDebugAbbrev *Abbrev;
StringRef InfoSection;
- StringRef AbbrevSection;
StringRef RangeSection;
uint32_t RangeSectionBase;
StringRef StringSection;
@@ -50,23 +48,24 @@ class DWARFUnit {
std::vector<DWARFDebugInfoEntryMinimal> DieArray;
class DWOHolder {
- OwningPtr<object::ObjectFile> DWOFile;
- OwningPtr<DWARFContext> DWOContext;
+ std::unique_ptr<object::ObjectFile> DWOFile;
+ std::unique_ptr<DWARFContext> DWOContext;
DWARFUnit *DWOU;
public:
DWOHolder(object::ObjectFile *DWOFile);
DWARFUnit *getUnit() const { return DWOU; }
};
- OwningPtr<DWOHolder> DWO;
+ std::unique_ptr<DWOHolder> DWO;
protected:
virtual bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr);
+ /// Size in bytes of the unit header.
+ virtual uint32_t getHeaderSize() const { return 11; }
public:
-
- DWARFUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef AS,
- StringRef RS, StringRef SS, StringRef SOS, StringRef AOS,
- const RelocAddrMap *M, bool LE);
+ DWARFUnit(const DWARFDebugAbbrev *DA, StringRef IS, StringRef RS,
+ StringRef SS, StringRef SOS, StringRef AOS, const RelocAddrMap *M,
+ bool LE);
virtual ~DWARFUnit();
@@ -103,12 +102,7 @@ public:
DWARFDebugRangeList &RangeList) const;
void clear();
uint32_t getOffset() const { return Offset; }
- /// Size in bytes of the compile unit header.
- virtual uint32_t getSize() const { return 11; }
- uint32_t getFirstDIEOffset() const { return Offset + getSize(); }
uint32_t getNextUnitOffset() const { return Offset + Length + 4; }
- /// Size in bytes of the .debug_info data associated with this compile unit.
- size_t getDebugInfoSize() const { return Length + 4 - getSize(); }
uint32_t getLength() const { return Length; }
uint16_t getVersion() const { return Version; }
const DWARFAbbreviationDeclarationSet *getAbbreviations() const {
@@ -124,15 +118,13 @@ public:
const DWARFDebugInfoEntryMinimal *
getCompileUnitDIE(bool extract_cu_die_only = true) {
extractDIEsIfNeeded(extract_cu_die_only);
- return DieArray.empty() ? NULL : &DieArray[0];
+ return DieArray.empty() ? nullptr : &DieArray[0];
}
const char *getCompilationDir();
uint64_t getDWOId();
- void buildAddressRangeTable(DWARFDebugAranges *debug_aranges,
- bool clear_dies_if_already_not_parsed,
- uint32_t CUOffsetInAranges);
+ void collectAddressRanges(DWARFAddressRangesVector &CURanges);
/// getInlinedChainForAddress - fetches inlined chain for a given address.
/// Returns empty chain if there is no subprogram containing address. The
@@ -140,6 +132,9 @@ public:
DWARFDebugInfoEntryInlinedChain getInlinedChainForAddress(uint64_t Address);
private:
+ /// Size in bytes of the .debug_info data associated with this compile unit.
+ size_t getDebugInfoSize() const { return Length + 4 - getHeaderSize(); }
+
/// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it
/// hasn't already been done. Returns the number of DIEs parsed at this call.
size_t extractDIEsIfNeeded(bool CUDieOnly);
diff --git a/contrib/llvm/lib/DebugInfo/module.modulemap b/contrib/llvm/lib/DebugInfo/module.modulemap
new file mode 100644
index 0000000..1fe5ab1
--- /dev/null
+++ b/contrib/llvm/lib/DebugInfo/module.modulemap
@@ -0,0 +1 @@
+module DebugInfo { requires cplusplus umbrella "." module * { export * } }
diff --git a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
index 314db8b..66645d7 100644
--- a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
+++ b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
@@ -15,10 +15,10 @@
#define EVENT_LISTENER_COMMON_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/ValueHandle.h"
namespace llvm {
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index 2a610d5..b0e985d 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -12,31 +12,33 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
-#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <cmath>
#include <cstring>
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
STATISTIC(NumGlobals , "Number of global vars initialized");
@@ -50,22 +52,31 @@ ExecutionEngine *(*ExecutionEngine::JITCtor)(
std::string *ErrorStr,
JITMemoryManager *JMM,
bool GVsWithCode,
- TargetMachine *TM) = 0;
+ TargetMachine *TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
Module *M,
std::string *ErrorStr,
RTDyldMemoryManager *MCJMM,
bool GVsWithCode,
- TargetMachine *TM) = 0;
+ TargetMachine *TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(Module *M,
- std::string *ErrorStr) = 0;
+ std::string *ErrorStr) =nullptr;
ExecutionEngine::ExecutionEngine(Module *M)
: EEState(*this),
- LazyFunctionCreator(0) {
+ LazyFunctionCreator(nullptr) {
CompilingLazily = false;
GVCompilationDisabled = false;
SymbolSearchingDisabled = false;
+
+ // IR module verification is enabled by default in debug builds, and disabled
+ // by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+
Modules.push_back(M);
assert(M && "Module is null?");
}
@@ -97,7 +108,7 @@ public:
return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
}
- virtual void deleted() {
+ void deleted() override {
// We allocated with operator new and with some extra memory hanging off the
// end, so don't just delete this. I'm not sure if this is actually
// required.
@@ -111,6 +122,10 @@ char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
return GVMemoryBlock::Create(GV, *getDataLayout());
}
+void ExecutionEngine::addObjectFile(std::unique_ptr<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
bool ExecutionEngine::removeModule(Module *M) {
for(SmallVectorImpl<Module *>::iterator I = Modules.begin(),
E = Modules.end(); I != E; ++I) {
@@ -129,19 +144,18 @@ Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
if (Function *F = Modules[i]->getFunction(FnName))
return F;
}
- return 0;
+ return nullptr;
}
-void *ExecutionEngineState::RemoveMapping(const MutexGuard &,
- const GlobalValue *ToUnmap) {
+void *ExecutionEngineState::RemoveMapping(const GlobalValue *ToUnmap) {
GlobalAddressMapTy::iterator I = GlobalAddressMap.find(ToUnmap);
void *OldVal;
// FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
// GlobalAddressMap.
if (I == GlobalAddressMap.end())
- OldVal = 0;
+ OldVal = nullptr;
else {
OldVal = I->second;
GlobalAddressMap.erase(I);
@@ -156,15 +170,15 @@ void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
DEBUG(dbgs() << "JIT: Map \'" << GV->getName()
<< "\' to [" << Addr << "]\n";);
- void *&CurVal = EEState.getGlobalAddressMap(locked)[GV];
- assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
+ void *&CurVal = EEState.getGlobalAddressMap()[GV];
+ assert((!CurVal || !Addr) && "GlobalMapping already established!");
CurVal = Addr;
// If we are using the reverse mapping, add it too.
- if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
AssertingVH<const GlobalValue> &V =
- EEState.getGlobalAddressReverseMap(locked)[Addr];
- assert((V == 0 || GV == 0) && "GlobalMapping already established!");
+ EEState.getGlobalAddressReverseMap()[Addr];
+ assert((!V || !GV) && "GlobalMapping already established!");
V = GV;
}
}
@@ -172,42 +186,42 @@ void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
void ExecutionEngine::clearAllGlobalMappings() {
MutexGuard locked(lock);
- EEState.getGlobalAddressMap(locked).clear();
- EEState.getGlobalAddressReverseMap(locked).clear();
+ EEState.getGlobalAddressMap().clear();
+ EEState.getGlobalAddressReverseMap().clear();
}
void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
MutexGuard locked(lock);
for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI)
- EEState.RemoveMapping(locked, FI);
+ EEState.RemoveMapping(FI);
for (Module::global_iterator GI = M->global_begin(), GE = M->global_end();
GI != GE; ++GI)
- EEState.RemoveMapping(locked, GI);
+ EEState.RemoveMapping(GI);
}
void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) {
MutexGuard locked(lock);
ExecutionEngineState::GlobalAddressMapTy &Map =
- EEState.getGlobalAddressMap(locked);
+ EEState.getGlobalAddressMap();
// Deleting from the mapping?
- if (Addr == 0)
- return EEState.RemoveMapping(locked, GV);
+ if (!Addr)
+ return EEState.RemoveMapping(GV);
void *&CurVal = Map[GV];
void *OldVal = CurVal;
- if (CurVal && !EEState.getGlobalAddressReverseMap(locked).empty())
- EEState.getGlobalAddressReverseMap(locked).erase(CurVal);
+ if (CurVal && !EEState.getGlobalAddressReverseMap().empty())
+ EEState.getGlobalAddressReverseMap().erase(CurVal);
CurVal = Addr;
// If we are using the reverse mapping, add it too.
- if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
AssertingVH<const GlobalValue> &V =
- EEState.getGlobalAddressReverseMap(locked)[Addr];
- assert((V == 0 || GV == 0) && "GlobalMapping already established!");
+ EEState.getGlobalAddressReverseMap()[Addr];
+ assert((!V || !GV) && "GlobalMapping already established!");
V = GV;
}
return OldVal;
@@ -217,25 +231,25 @@ void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
MutexGuard locked(lock);
ExecutionEngineState::GlobalAddressMapTy::iterator I =
- EEState.getGlobalAddressMap(locked).find(GV);
- return I != EEState.getGlobalAddressMap(locked).end() ? I->second : 0;
+ EEState.getGlobalAddressMap().find(GV);
+ return I != EEState.getGlobalAddressMap().end() ? I->second : nullptr;
}
const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
MutexGuard locked(lock);
// If we haven't computed the reverse mapping yet, do so first.
- if (EEState.getGlobalAddressReverseMap(locked).empty()) {
+ if (EEState.getGlobalAddressReverseMap().empty()) {
for (ExecutionEngineState::GlobalAddressMapTy::iterator
- I = EEState.getGlobalAddressMap(locked).begin(),
- E = EEState.getGlobalAddressMap(locked).end(); I != E; ++I)
- EEState.getGlobalAddressReverseMap(locked).insert(std::make_pair(
+ I = EEState.getGlobalAddressMap().begin(),
+ E = EEState.getGlobalAddressMap().end(); I != E; ++I)
+ EEState.getGlobalAddressReverseMap().insert(std::make_pair(
I->second, I->first));
}
std::map<void *, AssertingVH<const GlobalValue> >::iterator I =
- EEState.getGlobalAddressReverseMap(locked).find(Addr);
- return I != EEState.getGlobalAddressReverseMap(locked).end() ? I->second : 0;
+ EEState.getGlobalAddressReverseMap().find(Addr);
+ return I != EEState.getGlobalAddressReverseMap().end() ? I->second : nullptr;
}
namespace {
@@ -243,11 +257,11 @@ class ArgvArray {
char *Array;
std::vector<char*> Values;
public:
- ArgvArray() : Array(NULL) {}
+ ArgvArray() : Array(nullptr) {}
~ArgvArray() { clear(); }
void clear() {
delete[] Array;
- Array = NULL;
+ Array = nullptr;
for (size_t I = 0, E = Values.size(); I != E; ++I) {
delete[] Values[I];
}
@@ -283,7 +297,7 @@ void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
}
// Null terminate it
- EE->StoreValueToMemory(PTOGV(0),
+ EE->StoreValueToMemory(PTOGV(nullptr),
(GenericValue*)(Array+InputArgv.size()*PtrSize),
SBytePtr);
return Array;
@@ -303,11 +317,11 @@ void ExecutionEngine::runStaticConstructorsDestructors(Module *module,
// Should be an array of '{ i32, void ()* }' structs. The first value is
// the init priority, which we ignore.
ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
- if (InitList == 0)
+ if (!InitList)
return;
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
- if (CS == 0) continue;
+ if (!CS) continue;
Constant *FP = CS->getOperand(1);
if (FP->isNullValue())
@@ -397,13 +411,14 @@ ExecutionEngine *ExecutionEngine::create(Module *M,
std::string *ErrorStr,
CodeGenOpt::Level OptLevel,
bool GVsWithCode) {
- EngineBuilder EB = EngineBuilder(M)
- .setEngineKind(ForceInterpreter
- ? EngineKind::Interpreter
- : EngineKind::JIT)
- .setErrorStr(ErrorStr)
- .setOptLevel(OptLevel)
- .setAllocateGVsWithCode(GVsWithCode);
+
+ EngineBuilder EB =
+ EngineBuilder(M)
+ .setEngineKind(ForceInterpreter ? EngineKind::Interpreter
+ : EngineKind::Either)
+ .setErrorStr(ErrorStr)
+ .setOptLevel(OptLevel)
+ .setAllocateGVsWithCode(GVsWithCode);
return EB.create();
}
@@ -418,10 +433,10 @@ ExecutionEngine *ExecutionEngine::createJIT(Module *M,
bool GVsWithCode,
Reloc::Model RM,
CodeModel::Model CMM) {
- if (ExecutionEngine::JITCtor == 0) {
+ if (!ExecutionEngine::JITCtor) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
- return 0;
+ return nullptr;
}
// Use the defaults for extra parameters. Users can use EngineBuilder to
@@ -437,18 +452,39 @@ ExecutionEngine *ExecutionEngine::createJIT(Module *M,
// TODO: permit custom TargetOptions here
TargetMachine *TM = EB.selectTarget();
- if (!TM || (ErrorStr && ErrorStr->length() > 0)) return 0;
+ if (!TM || (ErrorStr && ErrorStr->length() > 0)) return nullptr;
return ExecutionEngine::JITCtor(M, ErrorStr, JMM, GVsWithCode, TM);
}
+void EngineBuilder::InitEngine() {
+ WhichEngine = EngineKind::Either;
+ ErrorStr = nullptr;
+ OptLevel = CodeGenOpt::Default;
+ MCJMM = nullptr;
+ JMM = nullptr;
+ Options = TargetOptions();
+ AllocateGVsWithCode = false;
+ RelocModel = Reloc::Default;
+ CMModel = CodeModel::JITDefault;
+ UseMCJIT = false;
+
+// IR module verification is enabled by default in debug builds, and disabled
+// by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+}
+
ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
- OwningPtr<TargetMachine> TheTM(TM); // Take ownership.
+ std::unique_ptr<TargetMachine> TheTM(TM); // Take ownership.
// Make sure we can resolve symbols in the program as well. The zero arg
// to the function tells DynamicLibrary to load the program, not a library.
- if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr))
- return 0;
+ if (sys::DynamicLibrary::LoadLibraryPermanently(nullptr, ErrorStr))
+ return nullptr;
assert(!(JMM && MCJMM));
@@ -461,7 +497,7 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
else {
if (ErrorStr)
*ErrorStr = "Cannot create an interpreter with a memory manager.";
- return 0;
+ return nullptr;
}
}
@@ -470,7 +506,7 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
*ErrorStr =
"Cannot create a legacy JIT with a runtime dyld memory "
"manager.";
- return 0;
+ return nullptr;
}
// Unless the interpreter was explicitly selected or the JIT is not linked,
@@ -483,16 +519,17 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
<< " a different -march switch.\n";
}
- if (UseMCJIT && ExecutionEngine::MCJITCtor) {
- ExecutionEngine *EE =
- ExecutionEngine::MCJITCtor(M, ErrorStr, MCJMM ? MCJMM : JMM,
- AllocateGVsWithCode, TheTM.take());
- if (EE) return EE;
- } else if (ExecutionEngine::JITCtor) {
- ExecutionEngine *EE =
- ExecutionEngine::JITCtor(M, ErrorStr, JMM,
- AllocateGVsWithCode, TheTM.take());
- if (EE) return EE;
+ ExecutionEngine *EE = nullptr;
+ if (UseMCJIT && ExecutionEngine::MCJITCtor)
+ EE = ExecutionEngine::MCJITCtor(M, ErrorStr, MCJMM ? MCJMM : JMM,
+ AllocateGVsWithCode, TheTM.release());
+ else if (ExecutionEngine::JITCtor)
+ EE = ExecutionEngine::JITCtor(M, ErrorStr, JMM,
+ AllocateGVsWithCode, TheTM.release());
+
+ if (EE) {
+ EE->setVerifyModules(VerifyModules);
+ return EE;
}
}
@@ -503,16 +540,16 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
return ExecutionEngine::InterpCtor(M, ErrorStr);
if (ErrorStr)
*ErrorStr = "Interpreter has not been linked in.";
- return 0;
+ return nullptr;
}
- if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0 &&
- ExecutionEngine::MCJITCtor == 0) {
+ if ((WhichEngine & EngineKind::JIT) && !ExecutionEngine::JITCtor &&
+ !ExecutionEngine::MCJITCtor) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
}
- return 0;
+ return nullptr;
}
void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
@@ -520,7 +557,7 @@ void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
return getPointerToFunction(F);
MutexGuard locked(lock);
- if (void *P = EEState.getGlobalAddressMap(locked)[GV])
+ if (void *P = EEState.getGlobalAddressMap()[GV])
return P;
// Global variable might have been added since interpreter started.
@@ -530,7 +567,7 @@ void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
else
llvm_unreachable("Global hasn't had an address allocated yet!");
- return EEState.getGlobalAddressMap(locked)[GV];
+ return EEState.getGlobalAddressMap()[GV];
}
/// \brief Converts a Constant* into a GenericValue, including handling of
@@ -590,8 +627,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
case Instruction::GetElementPtr: {
// Compute the index
GenericValue Result = getConstantValue(Op0);
- APInt Offset(TD->getPointerSizeInBits(), 0);
- cast<GEPOperator>(CE)->accumulateConstantOffset(*TD, Offset);
+ APInt Offset(DL->getPointerSizeInBits(), 0);
+ cast<GEPOperator>(CE)->accumulateConstantOffset(*DL, Offset);
char* tmp = (char*) Result.PointerVal;
Result = PTOGV(tmp + Offset.getSExtValue());
@@ -678,16 +715,16 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
}
case Instruction::PtrToInt: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType());
+ uint32_t PtrWidth = DL->getTypeSizeInBits(Op0->getType());
assert(PtrWidth <= 64 && "Bad pointer width");
GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
- uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType());
+ uint32_t IntWidth = DL->getTypeSizeInBits(CE->getType());
GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
return GV;
}
case Instruction::IntToPtr: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType());
+ uint32_t PtrWidth = DL->getTypeSizeInBits(CE->getType());
GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
@@ -848,7 +885,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
break;
case Type::PointerTyID:
if (isa<ConstantPointerNull>(C))
- Result.PointerVal = 0;
+ Result.PointerVal = nullptr;
else if (const Function *F = dyn_cast<Function>(C))
Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
@@ -1193,33 +1230,29 @@ void ExecutionEngine::emitGlobals() {
if (Modules.size() != 1) {
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
- for (Module::const_global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I) {
- const GlobalValue *GV = I;
- if (GV->hasLocalLinkage() || GV->isDeclaration() ||
- GV->hasAppendingLinkage() || !GV->hasName())
+ for (const auto &GV : M.globals()) {
+ if (GV.hasLocalLinkage() || GV.isDeclaration() ||
+ GV.hasAppendingLinkage() || !GV.hasName())
continue;// Ignore external globals and globals with internal linkage.
const GlobalValue *&GVEntry =
- LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())];
// If this is the first time we've seen this global, it is the canonical
// version.
if (!GVEntry) {
- GVEntry = GV;
+ GVEntry = &GV;
continue;
}
// If the existing global is strong, never replace it.
- if (GVEntry->hasExternalLinkage() ||
- GVEntry->hasDLLImportLinkage() ||
- GVEntry->hasDLLExportLinkage())
+ if (GVEntry->hasExternalLinkage())
continue;
// Otherwise, we know it's linkonce/weak, replace it if this is a strong
// symbol. FIXME is this right for common?
- if (GV->hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
- GVEntry = GV;
+ if (GV.hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
+ GVEntry = &GV;
}
}
}
@@ -1227,31 +1260,30 @@ void ExecutionEngine::emitGlobals() {
std::vector<const GlobalValue*> NonCanonicalGlobals;
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
+ for (const auto &GV : M.globals()) {
// In the multi-module case, see what this global maps to.
if (!LinkedGlobalsMap.empty()) {
if (const GlobalValue *GVEntry =
- LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())]) {
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())]) {
// If something else is the canonical global, ignore this one.
- if (GVEntry != &*I) {
- NonCanonicalGlobals.push_back(I);
+ if (GVEntry != &GV) {
+ NonCanonicalGlobals.push_back(&GV);
continue;
}
}
}
- if (!I->isDeclaration()) {
- addGlobalMapping(I, getMemoryForGV(I));
+ if (!GV.isDeclaration()) {
+ addGlobalMapping(&GV, getMemoryForGV(&GV));
} else {
// External variable reference. Try to use the dynamic loader to
// get a pointer to it.
if (void *SymAddr =
- sys::DynamicLibrary::SearchForAddressOfSymbol(I->getName()))
- addGlobalMapping(I, SymAddr);
+ sys::DynamicLibrary::SearchForAddressOfSymbol(GV.getName()))
+ addGlobalMapping(&GV, SymAddr);
else {
report_fatal_error("Could not resolve external global address: "
- +I->getName());
+ +GV.getName());
}
}
}
@@ -1271,16 +1303,15 @@ void ExecutionEngine::emitGlobals() {
// Now that all of the globals are set up in memory, loop through them all
// and initialize their contents.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- if (!I->isDeclaration()) {
+ for (const auto &GV : M.globals()) {
+ if (!GV.isDeclaration()) {
if (!LinkedGlobalsMap.empty()) {
if (const GlobalValue *GVEntry =
- LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())])
- if (GVEntry != &*I) // Not the canonical variable.
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())])
+ if (GVEntry != &GV) // Not the canonical variable.
continue;
}
- EmitGlobalVariable(I);
+ EmitGlobalVariable(&GV);
}
}
}
@@ -1292,12 +1323,12 @@ void ExecutionEngine::emitGlobals() {
void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
void *GA = getPointerToGlobalIfAvailable(GV);
- if (GA == 0) {
+ if (!GA) {
// If it's not already specified, allocate memory for the global.
GA = getMemoryForGV(GV);
// If we failed to allocate memory for this global, return.
- if (GA == 0) return;
+ if (!GA) return;
addGlobalMapping(GV, GA);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
index 2d34eea..6ff1e7a 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "llvm-c/ExecutionEngine.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
@@ -23,17 +22,11 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
// Wrapping the C bindings types.
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef)
-inline DataLayout *unwrap(LLVMTargetDataRef P) {
- return reinterpret_cast<DataLayout*>(P);
-}
-
-inline LLVMTargetDataRef wrap(const DataLayout *P) {
- return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
-}
-
inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
return reinterpret_cast<TargetLibraryInfo*>(P);
}
@@ -43,6 +36,11 @@ inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) {
return reinterpret_cast<LLVMTargetLibraryInfoRef>(X);
}
+inline LLVMTargetMachineRef wrap(const TargetMachine *P) {
+ return
+ reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P));
+}
+
/*===-- Operations on generic values --------------------------------------===*/
LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
@@ -323,6 +321,11 @@ LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
return wrap(unwrap(EE)->getDataLayout());
}
+LLVMTargetMachineRef
+LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE) {
+ return wrap(unwrap(EE)->getTargetMachine());
+}
+
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
void* Addr) {
unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
@@ -350,17 +353,17 @@ public:
SimpleBindingMemoryManager(const SimpleBindingMMFunctions& Functions,
void *Opaque);
virtual ~SimpleBindingMemoryManager();
-
- virtual uint8_t *allocateCodeSection(
- uintptr_t Size, unsigned Alignment, unsigned SectionID,
- StringRef SectionName);
- virtual uint8_t *allocateDataSection(
- uintptr_t Size, unsigned Alignment, unsigned SectionID,
- StringRef SectionName, bool isReadOnly);
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ bool finalizeMemory(std::string *ErrMsg) override;
- virtual bool finalizeMemory(std::string *ErrMsg);
-
private:
SimpleBindingMMFunctions Functions;
void *Opaque;
@@ -400,7 +403,7 @@ uint8_t *SimpleBindingMemoryManager::allocateDataSection(
}
bool SimpleBindingMemoryManager::finalizeMemory(std::string *ErrMsg) {
- char *errMsgCString = 0;
+ char *errMsgCString = nullptr;
bool result = Functions.FinalizeMemory(Opaque, &errMsgCString);
assert((result || !errMsgCString) &&
"Did not expect an error message if FinalizeMemory succeeded");
@@ -423,7 +426,7 @@ LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
!Destroy)
- return NULL;
+ return nullptr;
SimpleBindingMMFunctions functions;
functions.AllocateCodeSection = AllocateCodeSection;
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
index 7dc295f..4e22a8b 100644
--- a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -15,12 +15,10 @@
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
-#define DEBUG_TYPE "amplifier-jit-event-listener"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Metadata.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
@@ -28,19 +26,21 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Errno.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
#include "EventListenerCommon.h"
#include "IntelJITEventsWrapper.h"
using namespace llvm;
using namespace llvm::jitprofiling;
+#define DEBUG_TYPE "amplifier-jit-event-listener"
+
namespace {
class IntelJITEventListener : public JITEventListener {
typedef DenseMap<void*, unsigned int> MethodIDMap;
- OwningPtr<IntelJITEventsWrapper> Wrapper;
+ std::unique_ptr<IntelJITEventsWrapper> Wrapper;
MethodIDMap MethodIDs;
FilenameCache Filenames;
@@ -86,7 +86,7 @@ static LineNumberInfo DILineInfoToIntelJITFormat(uintptr_t StartAddress,
LineNumberInfo Result;
Result.Offset = Address - StartAddress;
- Result.LineNumber = Line.getLine();
+ Result.LineNumber = Line.Line;
return Result;
}
@@ -194,11 +194,10 @@ void IntelJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
MethodAddressVector Functions;
// Use symbol info to iterate functions in the object.
- error_code ec;
for (object::symbol_iterator I = Obj.begin_symbols(),
E = Obj.end_symbols();
- I != E && !ec;
- I.increment(ec)) {
+ I != E;
+ ++I) {
std::vector<LineNumberInfo> LineInfo;
std::string SourceFileName;
@@ -234,8 +233,8 @@ void IntelJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
FunctionMessage.line_number_size = 0;
FunctionMessage.line_number_table = 0;
} else {
- SourceFileName = Lines.front().second.getFileName();
- FunctionMessage.source_file_name = (char *)SourceFileName.c_str();
+ SourceFileName = Lines.front().second.FileName;
+ FunctionMessage.source_file_name = const_cast<char *>(SourceFileName.c_str());
FunctionMessage.line_number_size = LineInfo.size();
FunctionMessage.line_number_table = &*LineInfo.begin();
}
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
index f08e287..8d16ee8 100644
--- a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
@@ -164,10 +164,10 @@ typedef struct _iJIT_Method_NIDS
typedef struct _LineNumberInfo
{
- /* x86 Offset from the begining of the method*/
- unsigned int Offset;
-
- /* source line number from the begining of the source file */
+ /* x86 Offset from the beginning of the method*/
+ unsigned int Offset;
+
+ /* source line number from the beginning of the source file */
unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
@@ -191,9 +191,9 @@ typedef struct _iJIT_Method_Load
unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
- unsigned int line_number_size;
-
- /* Pointer to the begining of the line numbers info array */
+ unsigned int line_number_size;
+
+ /* Pointer to the beginning of the line numbers info array */
pLineNumberInfo line_number_table;
/* unique class ID */
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 5de0659..93bb2d1 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -11,23 +11,24 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "interpreter"
#include "Interpreter.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
+#define DEBUG_TYPE "interpreter"
+
STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
@@ -57,7 +58,7 @@ static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(+, Double);
default:
dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -68,7 +69,7 @@ static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(-, Double);
default:
dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -79,7 +80,7 @@ static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(*, Double);
default:
dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -90,7 +91,7 @@ static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(/, Double);
default:
dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -105,7 +106,7 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
break;
default:
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -142,7 +143,7 @@ static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(==);
default:
dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -156,7 +157,7 @@ static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(!=);
default:
dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -170,7 +171,7 @@ static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<);
default:
dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -184,7 +185,7 @@ static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<);
default:
dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -198,7 +199,7 @@ static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>);
default:
dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -212,7 +213,7 @@ static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>);
default:
dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -226,7 +227,7 @@ static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<=);
default:
dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -240,7 +241,7 @@ static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<=);
default:
dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -254,7 +255,7 @@ static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>=);
default:
dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -268,7 +269,7 @@ static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>=);
default:
dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -293,7 +294,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
default:
dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
SetValue(&I, R, SF);
@@ -329,7 +330,7 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(==);
default:
dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -385,7 +386,7 @@ static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(!=);
default:
dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
// in vector case mask out NaN elements
if (Ty->isVectorTy())
@@ -405,7 +406,7 @@ static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(<=);
default:
dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -419,7 +420,7 @@ static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(>=);
default:
dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -433,7 +434,7 @@ static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(<);
default:
dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -447,7 +448,7 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(>);
default:
dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -615,7 +616,7 @@ void Interpreter::visitFCmpInst(FCmpInst &I) {
switch (I.getPredicate()) {
default:
dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
break;
@@ -672,7 +673,7 @@ static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
default:
dbgs() << "Unhandled Cmp predicate\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -726,7 +727,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
switch(I.getOpcode()){
default:
dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
@@ -754,7 +755,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
else {
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
break;
@@ -763,7 +764,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
switch (I.getOpcode()) {
default:
dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
@@ -896,7 +897,7 @@ void Interpreter::visitSwitchInst(SwitchInst &I) {
GenericValue CondVal = getOperandValue(Cond, SF);
// Check to see if any of the cases match...
- BasicBlock *Dest = 0;
+ BasicBlock *Dest = nullptr;
for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
@@ -979,7 +980,7 @@ void Interpreter::visitAllocaInst(AllocaInst &I) {
<< uintptr_t(Memory) << '\n');
GenericValue Result = PTOGV(Memory);
- assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
+ assert(Result.PointerVal && "Null pointer returned by malloc!");
SetValue(&I, Result, SF);
if (I.getOpcode() == Instruction::Alloca)
@@ -1120,7 +1121,7 @@ void Interpreter::visitCallSite(CallSite CS) {
callFunction((Function*)GVTOP(SRC), ArgVals);
}
-// auxilary function for shift operations
+// auxiliary function for shift operations
static unsigned getShiftAmount(uint64_t orgShiftAmount,
llvm::APInt valueToShift) {
unsigned valueWidth = valueToShift.getBitWidth();
@@ -1732,7 +1733,7 @@ void Interpreter::visitVAArgInst(VAArgInst &I) {
IMPLEMENT_VAARG(Double);
default:
dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
// Set the Value of this Instruction.
@@ -1756,7 +1757,7 @@ void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
default:
dbgs() << "Unhandled destination type for extractelement instruction: "
<< *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case Type::IntegerTyID:
Dest.IntVal = Src1.AggregateVal[indx].IntVal;
@@ -2073,7 +2074,7 @@ GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
//
void Interpreter::callFunction(Function *F,
const std::vector<GenericValue> &ArgVals) {
- assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
+ assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
ECStack.back().Caller.arg_size() == ArgVals.size()) &&
"Incorrect number of arguments passed into function call!");
// Make a new stack frame... and fill it in.
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index a03c7f5..671bbee 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -98,13 +98,13 @@ static ExFunc lookupFunction(const Function *F) {
sys::ScopedLock Writer(*FunctionsLock);
ExFunc FnPtr = FuncNames[ExtName];
- if (FnPtr == 0)
+ if (!FnPtr)
FnPtr = FuncNames["lle_X_" + F->getName().str()];
- if (FnPtr == 0) // Try calling a generic function... if it exists...
+ if (!FnPtr) // Try calling a generic function... if it exists...
FnPtr = (ExFunc)(intptr_t)
sys::DynamicLibrary::SearchForAddressOfSymbol("lle_X_" +
F->getName().str());
- if (FnPtr != 0)
+ if (FnPtr)
ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
return FnPtr;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
index 9ee9d94..814efcc 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -34,9 +34,12 @@ extern "C" void LLVMLinkInInterpreter() { }
///
ExecutionEngine *Interpreter::create(Module *M, std::string* ErrStr) {
// Tell this Module to materialize everything and release the GVMaterializer.
- if (M->MaterializeAllPermanently(ErrStr))
+ if (std::error_code EC = M->materializeAllPermanently()) {
+ if (ErrStr)
+ *ErrStr = EC.message();
// We got an error, just return 0
- return 0;
+ return nullptr;
+ }
return new Interpreter(M);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
index 98269ef..2145cde 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -16,10 +16,10 @@
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/InstVisitor.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -108,29 +108,29 @@ public:
/// create - Create an interpreter ExecutionEngine. This can never fail.
///
- static ExecutionEngine *create(Module *M, std::string *ErrorStr = 0);
+ static ExecutionEngine *create(Module *M, std::string *ErrorStr = nullptr);
/// run - Start execution with the specified function and arguments.
///
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
+ GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) override;
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true) {
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override {
// FIXME: not implemented.
- return 0;
+ return nullptr;
}
/// recompileAndRelinkFunction - For the interpreter, functions are always
/// up-to-date.
///
- virtual void *recompileAndRelinkFunction(Function *F) {
+ void *recompileAndRelinkFunction(Function *F) override {
return getPointerToFunction(F);
}
/// freeMachineCodeForFunction - The interpreter does not generate any code.
///
- void freeMachineCodeForFunction(Function *F) { }
+ void freeMachineCodeForFunction(Function *F) override { }
// Methods used to execute code:
// Place a call on the stack
@@ -212,8 +212,8 @@ private: // Helper functions
//
void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
- void *getPointerToFunction(Function *F) { return (void*)F; }
- void *getPointerToBasicBlock(BasicBlock *BB) { return (void*)BB; }
+ void *getPointerToFunction(Function *F) override { return (void*)F; }
+ void *getPointerToBasicBlock(BasicBlock *BB) override { return (void*)BB; }
void initializeExecutionEngine() { }
void initializeExternalFunctions();
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index 246a675..83ec978 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -26,6 +26,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
@@ -78,7 +79,7 @@ ExecutionEngine *JIT::createJIT(Module *M,
// Try to register the program as a source of symbols to resolve against.
//
// FIXME: Don't do this here.
- sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
// If the target supports JIT code generation, create the JIT.
if (TargetJITInfo *TJ = TM->getJITInfo()) {
@@ -86,7 +87,7 @@ ExecutionEngine *JIT::createJIT(Module *M,
} else {
if (ErrorStr)
*ErrorStr = "target does not support JIT code generation";
- return 0;
+ return nullptr;
}
}
@@ -150,12 +151,13 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
// Add target data
MutexGuard locked(lock);
- FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new DataLayout(*TM.getDataLayout()));
+ FunctionPassManager &PM = jitstate->getPM();
+ M->setDataLayout(TM.getDataLayout());
+ PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory that
// may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, !getVerifyModules())) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -182,12 +184,13 @@ void JIT::addModule(Module *M) {
jitstate = new JITState(M);
- FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new DataLayout(*TM.getDataLayout()));
+ FunctionPassManager &PM = jitstate->getPM();
+ M->setDataLayout(TM.getDataLayout());
+ PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, !getVerifyModules())) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -207,18 +210,19 @@ bool JIT::removeModule(Module *M) {
if (jitstate && jitstate->getModule() == M) {
delete jitstate;
- jitstate = 0;
+ jitstate = nullptr;
}
if (!jitstate && !Modules.empty()) {
jitstate = new JITState(Modules[0]);
- FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new DataLayout(*TM.getDataLayout()));
+ FunctionPassManager &PM = jitstate->getPM();
+ M->setDataLayout(TM.getDataLayout());
+ PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, !getVerifyModules())) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -349,7 +353,7 @@ GenericValue JIT::runFunction(Function *F,
// currently don't support varargs.
SmallVector<Value*, 8> Args;
for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
- Constant *C = 0;
+ Constant *C = nullptr;
Type *ArgTy = FTy->getParamType(i);
const GenericValue &AV = ArgValues[i];
switch (ArgTy->getTypeID()) {
@@ -402,13 +406,13 @@ GenericValue JIT::runFunction(Function *F,
}
void JIT::RegisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
EventListeners.push_back(L);
}
void JIT::UnregisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
std::vector<JITEventListener*>::reverse_iterator I=
@@ -446,9 +450,8 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
MachineCodeInfo *const MCI;
public:
MCIListener(MachineCodeInfo *mci) : MCI(mci) {}
- virtual void NotifyFunctionEmitted(const Function &,
- void *Code, size_t Size,
- const EmittedFunctionDetails &) {
+ void NotifyFunctionEmitted(const Function &, void *Code, size_t Size,
+ const EmittedFunctionDetails &) override {
MCI->setAddress(Code);
MCI->setSize(Size);
}
@@ -457,41 +460,41 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
if (MCI)
RegisterJITEventListener(&MCIL);
- runJITOnFunctionUnlocked(F, locked);
+ runJITOnFunctionUnlocked(F);
if (MCI)
UnregisterJITEventListener(&MCIL);
}
-void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
+void JIT::runJITOnFunctionUnlocked(Function *F) {
assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!");
- jitTheFunction(F, locked);
+ jitTheFunctionUnlocked(F);
// If the function referred to another function that had not yet been
// read from bitcode, and we are jitting non-lazily, emit it now.
- while (!jitstate->getPendingFunctions(locked).empty()) {
- Function *PF = jitstate->getPendingFunctions(locked).back();
- jitstate->getPendingFunctions(locked).pop_back();
+ while (!jitstate->getPendingFunctions().empty()) {
+ Function *PF = jitstate->getPendingFunctions().back();
+ jitstate->getPendingFunctions().pop_back();
assert(!PF->hasAvailableExternallyLinkage() &&
"Externally-defined function should not be in pending list.");
- jitTheFunction(PF, locked);
+ jitTheFunctionUnlocked(PF);
// Now that the function has been jitted, ask the JITEmitter to rewrite
// the stub with real address of the function.
- updateFunctionStub(PF);
+ updateFunctionStubUnlocked(PF);
}
}
-void JIT::jitTheFunction(Function *F, const MutexGuard &locked) {
+void JIT::jitTheFunctionUnlocked(Function *F) {
isAlreadyCodeGenerating = true;
- jitstate->getPM(locked).run(*F);
+ jitstate->getPM().run(*F);
isAlreadyCodeGenerating = false;
// clear basic block addresses after this function is done
- getBasicBlockAddressMap(locked).clear();
+ getBasicBlockAddressMap().clear();
}
/// getPointerToFunction - This method is used to get the address of the
@@ -523,7 +526,7 @@ void *JIT::getPointerToFunction(Function *F) {
return Addr;
}
- runJITOnFunctionUnlocked(F, locked);
+ runJITOnFunctionUnlocked(F);
void *Addr = getPointerToGlobalIfAvailable(F);
assert(Addr && "Code generation didn't add function to GlobalAddress table!");
@@ -534,9 +537,9 @@ void JIT::addPointerToBasicBlock(const BasicBlock *BB, void *Addr) {
MutexGuard locked(lock);
BasicBlockAddressMapTy::iterator I =
- getBasicBlockAddressMap(locked).find(BB);
- if (I == getBasicBlockAddressMap(locked).end()) {
- getBasicBlockAddressMap(locked)[BB] = Addr;
+ getBasicBlockAddressMap().find(BB);
+ if (I == getBasicBlockAddressMap().end()) {
+ getBasicBlockAddressMap()[BB] = Addr;
} else {
// ignore repeats: some BBs can be split into few MBBs?
}
@@ -544,7 +547,7 @@ void JIT::addPointerToBasicBlock(const BasicBlock *BB, void *Addr) {
void JIT::clearPointerToBasicBlock(const BasicBlock *BB) {
MutexGuard locked(lock);
- getBasicBlockAddressMap(locked).erase(BB);
+ getBasicBlockAddressMap().erase(BB);
}
void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
@@ -555,8 +558,8 @@ void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
MutexGuard locked(lock);
BasicBlockAddressMapTy::iterator I =
- getBasicBlockAddressMap(locked).find(BB);
- if (I != getBasicBlockAddressMap(locked).end()) {
+ getBasicBlockAddressMap().find(BB);
+ if (I != getBasicBlockAddressMap().end()) {
return I->second;
} else {
llvm_unreachable("JIT does not have BB address for address-of-label, was"
@@ -581,7 +584,7 @@ void *JIT::getPointerToNamedFunction(const std::string &Name,
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
- return 0;
+ return nullptr;
}
@@ -601,7 +604,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
return (void*)&__dso_handle;
#endif
Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(GV->getName());
- if (Ptr == 0) {
+ if (!Ptr) {
report_fatal_error("Could not resolve external global address: "
+GV->getName());
}
@@ -626,10 +629,10 @@ void *JIT::recompileAndRelinkFunction(Function *F) {
void *OldAddr = getPointerToGlobalIfAvailable(F);
// If it's not already compiled there is no reason to patch it up.
- if (OldAddr == 0) { return getPointerToFunction(F); }
+ if (!OldAddr) return getPointerToFunction(F);
// Delete the old function mapping.
- addGlobalMapping(F, 0);
+ addGlobalMapping(F, nullptr);
// Recodegen the function
runJITOnFunction(F);
@@ -685,7 +688,7 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
void JIT::addPendingFunction(Function *F) {
MutexGuard locked(lock);
- jitstate->getPendingFunctions(locked).push_back(F);
+ jitstate->getPendingFunctions().push_back(F);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
index 2ae155b..69a7c36 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
@@ -15,8 +15,8 @@
#define JIT_H
#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/PassManager.h"
-#include "llvm/Support/ValueHandle.h"
namespace llvm {
@@ -39,12 +39,12 @@ private:
public:
explicit JITState(Module *M) : PM(M), M(M) {}
- FunctionPassManager &getPM(const MutexGuard &L) {
+ FunctionPassManager &getPM() {
return PM;
}
Module *getModule() const { return M; }
- std::vector<AssertingVH<Function> > &getPendingFunctions(const MutexGuard &L){
+ std::vector<AssertingVH<Function> > &getPendingFunctions() {
return PendingFunctions;
}
};
@@ -106,16 +106,16 @@ public:
RM, CMM);
}
- virtual void addModule(Module *M);
+ void addModule(Module *M) override;
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
- virtual bool removeModule(Module *M);
+ bool removeModule(Module *M) override;
/// runFunction - Start execution with the specified function and arguments.
///
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
+ GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) override;
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the MemoryManager. As such it is only
@@ -125,8 +125,8 @@ public:
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override;
// CompilationCallback - Invoked the first time that a call site is found,
// which causes lazy compilation of the target function.
@@ -136,7 +136,7 @@ public:
/// getPointerToFunction - This returns the address of the specified function,
/// compiling it if necessary.
///
- void *getPointerToFunction(Function *F);
+ void *getPointerToFunction(Function *F) override;
/// addPointerToBasicBlock - Adds address of the specific basic block.
void addPointerToBasicBlock(const BasicBlock *BB, void *Addr);
@@ -146,18 +146,18 @@ public:
/// getPointerToBasicBlock - This returns the address of the specified basic
/// block, assuming function is compiled.
- void *getPointerToBasicBlock(BasicBlock *BB);
+ void *getPointerToBasicBlock(BasicBlock *BB) override;
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
- void *getOrEmitGlobalVariable(const GlobalVariable *GV);
+ void *getOrEmitGlobalVariable(const GlobalVariable *GV) override;
/// getPointerToFunctionOrStub - If the specified function has been
/// code-gen'd, return a pointer to the function. If not, compile it, or use
/// a stub to implement lazy compilation if available.
///
- void *getPointerToFunctionOrStub(Function *F);
+ void *getPointerToFunctionOrStub(Function *F) override;
/// recompileAndRelinkFunction - This method is used to force a function
/// which has already been compiled, to be compiled again, possibly
@@ -165,12 +165,12 @@ public:
/// with a branch to the new copy. If there was no old copy, this acts
/// just like JIT::getPointerToFunction().
///
- void *recompileAndRelinkFunction(Function *F);
+ void *recompileAndRelinkFunction(Function *F) override;
/// freeMachineCodeForFunction - deallocate memory used to code-generate this
/// Function.
///
- void freeMachineCodeForFunction(Function *F);
+ void freeMachineCodeForFunction(Function *F) override;
/// addPendingFunction - while jitting non-lazily, a called but non-codegen'd
/// function was encountered. Add it to a pending list to be processed after
@@ -189,10 +189,13 @@ public:
TargetMachine *TM);
// Run the JIT on F and return information about the generated code
- void runJITOnFunction(Function *F, MachineCodeInfo *MCI = 0);
+ void runJITOnFunction(Function *F, MachineCodeInfo *MCI = nullptr) override;
+
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
+
+ TargetMachine *getTargetMachine() override { return &TM; }
- virtual void RegisterJITEventListener(JITEventListener *L);
- virtual void UnregisterJITEventListener(JITEventListener *L);
/// These functions correspond to the methods on JITEventListener. They
/// iterate over the registered listeners and call the corresponding method on
/// each.
@@ -202,7 +205,7 @@ public:
void NotifyFreeingMachineCode(void *OldPtr);
BasicBlockAddressMapTy &
- getBasicBlockAddressMap(const MutexGuard &) {
+ getBasicBlockAddressMap() {
return BasicBlockAddressMap;
}
@@ -210,14 +213,14 @@ public:
private:
static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
TargetMachine &tm);
- void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
- void updateFunctionStub(Function *F);
- void jitTheFunction(Function *F, const MutexGuard &locked);
+ void runJITOnFunctionUnlocked(Function *F);
+ void updateFunctionStubUnlocked(Function *F);
+ void jitTheFunctionUnlocked(Function *F);
protected:
/// getMemoryforGV - Allocate memory for a global variable.
- virtual char* getMemoryForGV(const GlobalVariable* GV);
+ char* getMemoryForGV(const GlobalVariable* GV) override;
};
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index acbbfa1..2ba1f86 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -12,14 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "JIT.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/ValueMap.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -27,21 +24,22 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/DebugInfo.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Disassembler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h"
@@ -53,6 +51,8 @@
#endif
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumBytes, "Number of bytes of machine code compiled");
STATISTIC(NumRelos, "Number of relocations applied");
STATISTIC(NumRetries, "Number of retries with more memory");
@@ -120,21 +120,16 @@ namespace {
#endif
}
- FunctionToLazyStubMapTy& getFunctionToLazyStubMap(
- const MutexGuard& locked) {
- assert(locked.holds(TheJIT->lock));
+ FunctionToLazyStubMapTy& getFunctionToLazyStubMap() {
return FunctionToLazyStubMap;
}
- GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& lck) {
- assert(lck.holds(TheJIT->lock));
+ GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap() {
return GlobalToIndirectSymMap;
}
std::pair<void *, Function *> LookupFunctionFromCallSite(
- const MutexGuard &locked, void *CallSite) const {
- assert(locked.holds(TheJIT->lock));
-
+ void *CallSite) const {
// The address given to us for the stub may not be exactly right, it
// might be a little bit after the stub. As such, use upper_bound to
// find it.
@@ -146,9 +141,7 @@ namespace {
return *I;
}
- void AddCallSite(const MutexGuard &locked, void *CallSite, Function *F) {
- assert(locked.holds(TheJIT->lock));
-
+ void AddCallSite(void *CallSite, Function *F) {
bool Inserted = CallSiteToFunctionMap.insert(
std::make_pair(CallSite, F)).second;
(void)Inserted;
@@ -344,7 +337,8 @@ namespace {
void *FunctionBody; // Beginning of the function's allocation.
void *Code; // The address the function's code actually starts at.
void *ExceptionTable;
- EmittedCode() : FunctionBody(0), Code(0), ExceptionTable(0) {}
+ EmittedCode() : FunctionBody(nullptr), Code(nullptr),
+ ExceptionTable(nullptr) {}
};
struct EmittedFunctionConfig : public ValueMapConfig<const Function*> {
typedef JITEmitter *ExtraData;
@@ -361,7 +355,7 @@ namespace {
public:
JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
- : SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
+ : SizeEstimate(0), Resolver(jit, *this), MMI(nullptr), CurFn(nullptr),
EmittedFunctions(this), TheJIT(&jit) {
MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
if (jit.getJITInfo().needsGOT()) {
@@ -376,8 +370,8 @@ namespace {
JITResolver &getJITResolver() { return Resolver; }
- virtual void startFunction(MachineFunction &F);
- virtual bool finishFunction(MachineFunction &F);
+ void startFunction(MachineFunction &F) override;
+ bool finishFunction(MachineFunction &F) override;
void emitConstantPool(MachineConstantPool *MCP);
void initJumpTableInfo(MachineJumpTableInfo *MJTI);
@@ -387,24 +381,23 @@ namespace {
unsigned StubSize, unsigned Alignment = 1);
void startGVStub(void *Buffer, unsigned StubSize);
void finishGVStub();
- virtual void *allocIndirectGV(const GlobalValue *GV,
- const uint8_t *Buffer, size_t Size,
- unsigned Alignment);
+ void *allocIndirectGV(const GlobalValue *GV, const uint8_t *Buffer,
+ size_t Size, unsigned Alignment) override;
/// allocateSpace - Reserves space in the current block if any, or
/// allocate a new one of the given size.
- virtual void *allocateSpace(uintptr_t Size, unsigned Alignment);
+ void *allocateSpace(uintptr_t Size, unsigned Alignment) override;
/// allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
/// this method does not allocate memory in the current output buffer,
/// because a global may live longer than the current function.
- virtual void *allocateGlobal(uintptr_t Size, unsigned Alignment);
+ void *allocateGlobal(uintptr_t Size, unsigned Alignment) override;
- virtual void addRelocation(const MachineRelocation &MR) {
+ void addRelocation(const MachineRelocation &MR) override {
Relocations.push_back(MR);
}
- virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
+ void StartMachineBasicBlock(MachineBasicBlock *MBB) override {
if (MBBLocations.size() <= (unsigned)MBB->getNumber())
MBBLocations.resize((MBB->getNumber()+1)*2);
MBBLocations[MBB->getNumber()] = getCurrentPCValue();
@@ -415,10 +408,11 @@ namespace {
<< (void*) getCurrentPCValue() << "]\n");
}
- virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
- virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
+ uintptr_t getConstantPoolEntryAddress(unsigned Entry) const override;
+ uintptr_t getJumpTableEntryAddress(unsigned Entry) const override;
- virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const{
+ uintptr_t
+ getMachineBasicBlockAddress(MachineBasicBlock *MBB) const override {
assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
MBBLocations[MBB->getNumber()] && "MBB not emitted!");
return MBBLocations[MBB->getNumber()];
@@ -433,22 +427,22 @@ namespace {
/// function body.
void deallocateMemForFunction(const Function *F);
- virtual void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn);
+ void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) override;
- virtual void emitLabel(MCSymbol *Label) {
+ void emitLabel(MCSymbol *Label) override {
LabelLocations[Label] = getCurrentPCValue();
}
- virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
+ DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() override {
return &LabelLocations;
}
- virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
+ uintptr_t getLabelAddress(MCSymbol *Label) const override {
assert(LabelLocations.count(Label) && "Label not emitted!");
return LabelLocations.find(Label)->second;
}
- virtual void setModuleInfo(MachineModuleInfo* Info) {
+ void setModuleInfo(MachineModuleInfo* Info) override {
MMI = Info;
}
@@ -502,7 +496,7 @@ void *JITResolver::getLazyFunctionStubIfAvailable(Function *F) {
MutexGuard locked(TheJIT->lock);
// If we already have a stub for this function, recycle it.
- return state.getFunctionToLazyStubMap(locked).lookup(F);
+ return state.getFunctionToLazyStubMap().lookup(F);
}
/// getFunctionStub - This returns a pointer to a function stub, creating
@@ -511,13 +505,13 @@ void *JITResolver::getLazyFunctionStub(Function *F) {
MutexGuard locked(TheJIT->lock);
// If we already have a lazy stub for this function, recycle it.
- void *&Stub = state.getFunctionToLazyStubMap(locked)[F];
+ void *&Stub = state.getFunctionToLazyStubMap()[F];
if (Stub) return Stub;
// Call the lazy resolver function if we are JIT'ing lazily. Otherwise we
// must resolve the symbol now.
void *Actual = TheJIT->isCompilingLazily()
- ? (void *)(intptr_t)LazyResolverFn : (void *)0;
+ ? (void *)(intptr_t)LazyResolverFn : (void *)nullptr;
// If this is an external declaration, attempt to resolve the address now
// to place in the stub.
@@ -526,7 +520,7 @@ void *JITResolver::getLazyFunctionStub(Function *F) {
// If we resolved the symbol to a null address (eg. a weak external)
// don't emit a stub. Return a null pointer to the application.
- if (!Actual) return 0;
+ if (!Actual) return nullptr;
}
TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
@@ -553,7 +547,7 @@ void *JITResolver::getLazyFunctionStub(Function *F) {
// Finally, keep track of the stub-to-Function mapping so that the
// JITCompilerFn knows which function to compile!
- state.AddCallSite(locked, Stub, F);
+ state.AddCallSite(Stub, F);
} else if (!Actual) {
// If we are JIT'ing non-lazily but need to call a function that does not
// exist yet, add it to the JIT's work list so that we can fill in the
@@ -572,7 +566,7 @@ void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) {
MutexGuard locked(TheJIT->lock);
// If we already have a stub for this global variable, recycle it.
- void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV];
+ void *&IndirectSym = state.getGlobalToIndirectSymMap()[GV];
if (IndirectSym) return IndirectSym;
// Otherwise, codegen a new indirect symbol.
@@ -593,8 +587,8 @@ void *JITResolver::getExternalFunctionStub(void *FnAddr) {
if (Stub) return Stub;
TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
- JE.startGVStub(0, SL.Size, SL.Alignment);
- Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, JE);
+ JE.startGVStub(nullptr, SL.Size, SL.Alignment);
+ Stub = TheJIT->getJITInfo().emitFunctionStub(nullptr, FnAddr, JE);
JE.finishGVStub();
DEBUG(dbgs() << "JIT: Stub emitted at [" << Stub
@@ -620,8 +614,8 @@ void *JITResolver::JITCompilerFn(void *Stub) {
JITResolver *JR = StubToResolverMap->getResolverFromStub(Stub);
assert(JR && "Unable to find the corresponding JITResolver to the call site");
- Function* F = 0;
- void* ActualPtr = 0;
+ Function* F = nullptr;
+ void* ActualPtr = nullptr;
{
// Only lock for getting the Function. The call getPointerToFunction made
@@ -632,7 +626,7 @@ void *JITResolver::JITCompilerFn(void *Stub) {
// The address given to us for the stub may not be exactly right, it might
// be a little bit after the stub. As such, use upper_bound to find it.
std::pair<void*, Function*> I =
- JR->state.LookupFunctionFromCallSite(locked, Stub);
+ JR->state.LookupFunctionFromCallSite(Stub);
F = I.second;
ActualPtr = I.first;
}
@@ -683,13 +677,23 @@ void *JITResolver::JITCompilerFn(void *Stub) {
//===----------------------------------------------------------------------===//
// JITEmitter code.
//
+
+static GlobalObject *getSimpleAliasee(Constant *C) {
+ C = C->stripPointerCasts();
+ return dyn_cast<GlobalObject>(C);
+}
+
void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
bool MayNeedFarStub) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return TheJIT->getOrEmitGlobalVariable(GV);
- if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
- return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false));
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ // We can only handle simple cases.
+ if (GlobalValue *GV = getSimpleAliasee(GA->getAliasee()))
+ return TheJIT->getPointerToGlobal(GV);
+ return nullptr;
+ }
// If we have already compiled the function, return a pointer to its body.
Function *F = cast<Function>(V);
@@ -736,7 +740,7 @@ void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
const LLVMContext &Context = EmissionDetails.MF->getFunction()->getContext();
- if (DL.getScope(Context) != 0 && PrevDL != DL) {
+ if (DL.getScope(Context) != nullptr && PrevDL != DL) {
JITEvent_EmittedFunctionDetails::LineStart NextLine;
NextLine.Address = getCurrentPCValue();
NextLine.Loc = DL;
@@ -825,7 +829,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
// Resolve the relocations to concrete pointers.
for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
MachineRelocation &MR = Relocations[i];
- void *ResultPtr = 0;
+ void *ResultPtr = nullptr;
if (!MR.letTargetResolve()) {
if (MR.isExternalSymbol()) {
ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(),
@@ -871,7 +875,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
}
}
- CurFn = 0;
+ CurFn = nullptr;
TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
Relocations.size(), MemMgr->getGOTBase());
}
@@ -900,7 +904,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
SizeEstimate = 0;
}
- BufferBegin = CurBufferPtr = 0;
+ BufferBegin = CurBufferPtr = nullptr;
NumBytes += FnEnd-FnStart;
// Invalidate the icache if necessary.
@@ -924,11 +928,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
MemMgr->setMemoryExecutable();
DEBUG({
- if (sys::hasDisassembler()) {
- dbgs() << "JIT: Disassembled code:\n";
- dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
- (uintptr_t)FnStart);
- } else {
dbgs() << "JIT: Binary code:\n";
uint8_t* q = FnStart;
for (int i = 0; q < FnEnd; q += 4, ++i) {
@@ -950,7 +949,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
dbgs() << '\n';
}
dbgs()<< '\n';
- }
});
if (MMI)
@@ -1018,7 +1016,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
ConstantPoolBase = allocateSpace(Size, Align);
ConstantPool = MCP;
- if (ConstantPoolBase == 0) return; // Buffer overflow.
+ if (!ConstantPoolBase) return; // Buffer overflow.
DEBUG(dbgs() << "JIT: Emitted constant pool at [" << ConstantPoolBase
<< "] (size: " << Size << ", alignment: " << Align << ")\n");
@@ -1074,7 +1072,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
return;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- if (JT.empty() || JumpTableBase == 0) return;
+ if (JT.empty() || !JumpTableBase) return;
switch (MJTI->getEntryKind()) {
@@ -1224,7 +1222,7 @@ void *JIT::getPointerToFunctionOrStub(Function *F) {
return JE->getJITResolver().getLazyFunctionStub(F);
}
-void JIT::updateFunctionStub(Function *F) {
+void JIT::updateFunctionStubUnlocked(Function *F) {
// Get the empty stub we generated earlier.
JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
void *Stub = JE->getJITResolver().getLazyFunctionStub(F);
@@ -1244,7 +1242,7 @@ void JIT::updateFunctionStub(Function *F) {
void JIT::freeMachineCodeForFunction(Function *F) {
// Delete translation for this from the ExecutionEngine, so it will get
// retranslated next time it is used.
- updateGlobalMapping(F, 0);
+ updateGlobalMapping(F, nullptr);
// Free the actual memory for the function body and related stuff.
static_cast<JITEmitter*>(JCE)->deallocateMemForFunction(F);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
index f58d31b..584b93f 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -40,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT");
JITMemoryManager::~JITMemoryManager() {}
@@ -80,7 +81,7 @@ namespace {
/// getFreeBlockBefore - If the block before this one is free, return it,
/// otherwise return null.
FreeRangeHeader *getFreeBlockBefore() const {
- if (PrevAllocated) return 0;
+ if (PrevAllocated) return nullptr;
intptr_t PrevSize = reinterpret_cast<intptr_t *>(
const_cast<MemoryRangeHeader *>(this))[-1];
return reinterpret_cast<FreeRangeHeader *>(
@@ -174,7 +175,7 @@ FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
// coalesce with it, update our notion of what the free list is.
if (&FollowingFreeBlock == FreeList) {
FreeList = FollowingFreeBlock.Next;
- FreeListToReturn = 0;
+ FreeListToReturn = nullptr;
assert(&FollowingFreeBlock != FreeList && "No tombstone block?");
}
FollowingFreeBlock.RemoveFromFreeList();
@@ -269,13 +270,12 @@ namespace {
class DefaultJITMemoryManager;
- class JITSlabAllocator : public SlabAllocator {
+ class JITAllocator {
DefaultJITMemoryManager &JMM;
public:
- JITSlabAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
- virtual ~JITSlabAllocator() { }
- virtual MemSlab *Allocate(size_t Size);
- virtual void Deallocate(MemSlab *Slab);
+ JITAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
+ void *Allocate(size_t Size, size_t /*Alignment*/);
+ void Deallocate(void *Slab, size_t Size);
};
/// DefaultJITMemoryManager - Manage memory for the JIT code generation.
@@ -285,7 +285,21 @@ namespace {
/// middle of emitting a function, and we don't know how large the function we
/// are emitting is.
class DefaultJITMemoryManager : public JITMemoryManager {
+ public:
+ /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
+ /// least this much unless more is requested. Currently, in 512k slabs.
+ static const size_t DefaultCodeSlabSize = 512 * 1024;
+
+ /// DefaultSlabSize - Allocate globals and stubs into slabs of 64K (probably
+ /// 16 pages) unless we get an allocation above SizeThreshold.
+ static const size_t DefaultSlabSize = 64 * 1024;
+
+ /// DefaultSizeThreshold - For any allocation larger than 16K (probably
+ /// 4 pages), we should allocate a separate slab to avoid wasted space at
+ /// the end of a normal slab.
+ static const size_t DefaultSizeThreshold = 16 * 1024;
+ private:
// Whether to poison freed memory.
bool PoisonMemory;
@@ -299,9 +313,10 @@ namespace {
// Memory slabs allocated by the JIT. We refer to them as slabs so we don't
// confuse them with the blocks of memory described above.
std::vector<sys::MemoryBlock> CodeSlabs;
- JITSlabAllocator BumpSlabAllocator;
- BumpPtrAllocator StubAllocator;
- BumpPtrAllocator DataAllocator;
+ BumpPtrAllocatorImpl<JITAllocator, DefaultSlabSize,
+ DefaultSizeThreshold> StubAllocator;
+ BumpPtrAllocatorImpl<JITAllocator, DefaultSlabSize,
+ DefaultSizeThreshold> DataAllocator;
// Circular list of free blocks.
FreeRangeHeader *FreeMemoryList;
@@ -318,37 +333,26 @@ namespace {
/// last slab it allocated, so that subsequent allocations follow it.
sys::MemoryBlock allocateNewSlab(size_t size);
- /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
- /// least this much unless more is requested.
- static const size_t DefaultCodeSlabSize;
-
- /// DefaultSlabSize - Allocate data into slabs of this size unless we get
- /// an allocation above SizeThreshold.
- static const size_t DefaultSlabSize;
-
- /// DefaultSizeThreshold - For any allocation larger than this threshold, we
- /// should allocate a separate slab.
- static const size_t DefaultSizeThreshold;
-
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the dlsym function call.
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override;
- void AllocateGOT();
+ void AllocateGOT() override;
// Testing methods.
- virtual bool CheckInvariants(std::string &ErrorStr);
- size_t GetDefaultCodeSlabSize() { return DefaultCodeSlabSize; }
- size_t GetDefaultDataSlabSize() { return DefaultSlabSize; }
- size_t GetDefaultStubSlabSize() { return DefaultSlabSize; }
- unsigned GetNumCodeSlabs() { return CodeSlabs.size(); }
- unsigned GetNumDataSlabs() { return DataAllocator.GetNumSlabs(); }
- unsigned GetNumStubSlabs() { return StubAllocator.GetNumSlabs(); }
+ bool CheckInvariants(std::string &ErrorStr) override;
+ size_t GetDefaultCodeSlabSize() override { return DefaultCodeSlabSize; }
+ size_t GetDefaultDataSlabSize() override { return DefaultSlabSize; }
+ size_t GetDefaultStubSlabSize() override { return DefaultSlabSize; }
+ unsigned GetNumCodeSlabs() override { return CodeSlabs.size(); }
+ unsigned GetNumDataSlabs() override { return DataAllocator.GetNumSlabs(); }
+ unsigned GetNumStubSlabs() override { return StubAllocator.GetNumSlabs(); }
/// startFunctionBody - When a function starts, allocate a block of free
/// executable memory, returning a pointer to it and its actual size.
- uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) {
+ uint8_t *startFunctionBody(const Function *F,
+ uintptr_t &ActualSize) override {
FreeRangeHeader* candidateBlock = FreeMemoryList;
FreeRangeHeader* head = FreeMemoryList;
@@ -422,7 +426,7 @@ namespace {
/// endFunctionBody - The function F is now allocated, and takes the memory
/// in the range [FunctionStart,FunctionEnd).
void endFunctionBody(const Function *F, uint8_t *FunctionStart,
- uint8_t *FunctionEnd) {
+ uint8_t *FunctionEnd) override {
assert(FunctionEnd > FunctionStart);
assert(FunctionStart == (uint8_t *)(CurBlock+1) &&
"Mismatched function start/end!");
@@ -435,7 +439,7 @@ namespace {
/// allocateSpace - Allocate a memory block of the given size. This method
/// cannot be called between calls to startFunctionBody and endFunctionBody.
- uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
+ uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) override {
CurBlock = FreeMemoryList;
FreeMemoryList = FreeMemoryList->AllocateBlock();
@@ -453,18 +457,19 @@ namespace {
/// allocateStub - Allocate memory for a function stub.
uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
- unsigned Alignment) {
+ unsigned Alignment) override {
return (uint8_t*)StubAllocator.Allocate(StubSize, Alignment);
}
/// allocateGlobal - Allocate memory for a global.
- uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
+ uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) override {
return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
}
/// allocateCodeSection - Allocate memory for a code section.
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, StringRef SectionName) {
+ unsigned SectionID,
+ StringRef SectionName) override {
// Grow the required block size to account for the block header
Size += sizeof(*CurBlock);
@@ -511,15 +516,15 @@ namespace {
/// allocateDataSection - Allocate memory for a data section.
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID, StringRef SectionName,
- bool IsReadOnly) {
+ bool IsReadOnly) override {
return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
}
- bool finalizeMemory(std::string *ErrMsg) {
+ bool finalizeMemory(std::string *ErrMsg) override {
return false;
}
- uint8_t *getGOTBase() const {
+ uint8_t *getGOTBase() const override {
return GOTBase;
}
@@ -539,57 +544,49 @@ namespace {
/// deallocateFunctionBody - Deallocate all memory for the specified
/// function body.
- void deallocateFunctionBody(void *Body) {
+ void deallocateFunctionBody(void *Body) override {
if (Body) deallocateBlock(Body);
}
/// setMemoryWritable - When code generation is in progress,
/// the code pages may need permissions changed.
- void setMemoryWritable()
- {
+ void setMemoryWritable() override {
for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
sys::Memory::setWritable(CodeSlabs[i]);
}
/// setMemoryExecutable - When code generation is done and we're ready to
/// start execution, the code pages may need permissions changed.
- void setMemoryExecutable()
- {
+ void setMemoryExecutable() override {
for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
sys::Memory::setExecutable(CodeSlabs[i]);
}
/// setPoisonMemory - Controls whether we write garbage over freed memory.
///
- void setPoisonMemory(bool poison) {
+ void setPoisonMemory(bool poison) override {
PoisonMemory = poison;
}
};
}
-MemSlab *JITSlabAllocator::Allocate(size_t Size) {
+void *JITAllocator::Allocate(size_t Size, size_t /*Alignment*/) {
sys::MemoryBlock B = JMM.allocateNewSlab(Size);
- MemSlab *Slab = (MemSlab*)B.base();
- Slab->Size = B.size();
- Slab->NextPtr = 0;
- return Slab;
+ return B.base();
}
-void JITSlabAllocator::Deallocate(MemSlab *Slab) {
- sys::MemoryBlock B(Slab, Slab->Size);
+void JITAllocator::Deallocate(void *Slab, size_t Size) {
+ sys::MemoryBlock B(Slab, Size);
sys::Memory::ReleaseRWX(B);
}
DefaultJITMemoryManager::DefaultJITMemoryManager()
- :
+ :
#ifdef NDEBUG
- PoisonMemory(false),
+ PoisonMemory(false),
#else
- PoisonMemory(true),
+ PoisonMemory(true),
#endif
- LastSlab(0, 0),
- BumpSlabAllocator(*this),
- StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
- DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
+ LastSlab(nullptr, 0), StubAllocator(*this), DataAllocator(*this) {
// Allocate space for code.
sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);
@@ -642,11 +639,11 @@ DefaultJITMemoryManager::DefaultJITMemoryManager()
// Start out with the freelist pointing to Mem0.
FreeMemoryList = Mem0;
- GOTBase = NULL;
+ GOTBase = nullptr;
}
void DefaultJITMemoryManager::AllocateGOT() {
- assert(GOTBase == 0 && "Cannot allocate the got multiple times");
+ assert(!GOTBase && "Cannot allocate the got multiple times");
GOTBase = new uint8_t[sizeof(void*) * 8192];
HasGOT = true;
}
@@ -661,9 +658,9 @@ DefaultJITMemoryManager::~DefaultJITMemoryManager() {
sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
// Allocate a new block close to the last one.
std::string ErrMsg;
- sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0;
+ sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : nullptr;
sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg);
- if (B.base() == 0) {
+ if (!B.base()) {
report_fatal_error("Allocation failed when allocating new memory in the"
" JIT\n" + Twine(ErrMsg));
}
@@ -724,7 +721,7 @@ bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
char *End = Start + I->size();
// Check each memory range.
- for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = NULL;
+ for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = nullptr;
Start <= (char*)Hdr && (char*)Hdr < End;
Hdr = &Hdr->getBlockAfter()) {
if (Hdr->ThisAllocated == 0) {
@@ -893,7 +890,7 @@ void *DefaultJITMemoryManager::getPointerToNamedFunction(const std::string &Name
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
- return 0;
+ return nullptr;
}
@@ -902,11 +899,6 @@ JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
return new DefaultJITMemoryManager();
}
-// Allocate memory for code in 512K slabs.
-const size_t DefaultJITMemoryManager::DefaultCodeSlabSize = 512 * 1024;
-
-// Allocate globals and stubs in slabs of 64K. (probably 16 pages)
-const size_t DefaultJITMemoryManager::DefaultSlabSize = 64 * 1024;
-
-// Waste at most 16K at the end of each bump slab. (probably 4 pages)
-const size_t DefaultJITMemoryManager::DefaultSizeThreshold = 16 * 1024;
+const size_t DefaultJITMemoryManager::DefaultCodeSlabSize;
+const size_t DefaultJITMemoryManager::DefaultSlabSize;
+const size_t DefaultJITMemoryManager::DefaultSizeThreshold;
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 195c458..e9ba96a 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -14,17 +14,20 @@
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/ExecutionEngine/ObjectBuffer.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
-#include "llvm/PassManager.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MutexGuard.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
@@ -47,7 +50,7 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
// Try to register the program as a source of symbols to resolve against.
//
// FIXME: Don't do this here.
- sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
return new MCJIT(M, TM, MemMgr ? MemMgr : new SectionMemoryManager(),
GVsWithCode);
@@ -55,8 +58,8 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM,
bool AllocateGVsWithCode)
- : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(this, MM), Dyld(&MemMgr),
- ObjCache(0) {
+ : ExecutionEngine(m), TM(tm), Ctx(nullptr), MemMgr(this, MM), Dyld(&MemMgr),
+ ObjCache(nullptr) {
OwnedModules.addModule(m);
setDataLayout(TM->getDataLayout());
@@ -77,15 +80,24 @@ MCJIT::~MCJIT() {
Modules.clear();
Dyld.deregisterEHFrames();
- LoadedObjectMap::iterator it, end = LoadedObjects.end();
- for (it = LoadedObjects.begin(); it != end; ++it) {
- ObjectImage *Obj = it->second;
+ LoadedObjectList::iterator it, end;
+ for (it = LoadedObjects.begin(), end = LoadedObjects.end(); it != end; ++it) {
+ ObjectImage *Obj = *it;
if (Obj) {
NotifyFreeingObject(*Obj);
delete Obj;
}
}
LoadedObjects.clear();
+
+
+ SmallVector<object::Archive *, 2>::iterator ArIt, ArEnd;
+ for (ArIt = Archives.begin(), ArEnd = Archives.end(); ArIt != ArEnd; ++ArIt) {
+ object::Archive *A = *ArIt;
+ delete A;
+ }
+ Archives.clear();
+
delete TM;
}
@@ -101,6 +113,21 @@ bool MCJIT::removeModule(Module *M) {
+void MCJIT::addObjectFile(std::unique_ptr<object::ObjectFile> Obj) {
+ ObjectImage *LoadedObject = Dyld.loadObject(std::move(Obj));
+ if (!LoadedObject || Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ LoadedObjects.push_back(LoadedObject);
+
+ NotifyObjectEmitted(*LoadedObject);
+}
+
+void MCJIT::addArchive(object::Archive *A) {
+ Archives.push_back(A);
+}
+
+
void MCJIT::setObjectCache(ObjectCache* NewCache) {
MutexGuard locked(lock);
ObjCache = NewCache;
@@ -115,14 +142,16 @@ ObjectBufferStream* MCJIT::emitObject(Module *M) {
PassManager PM;
- PM.add(new DataLayout(*TM->getDataLayout()));
+ M->setDataLayout(TM->getDataLayout());
+ PM.add(new DataLayoutPass(M));
// The RuntimeDyld will take ownership of this shortly
- OwningPtr<ObjectBufferStream> CompiledObject(new ObjectBufferStream());
+ std::unique_ptr<ObjectBufferStream> CompiledObject(new ObjectBufferStream());
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM->addPassesToEmitMC(PM, Ctx, CompiledObject->getOStream(), false)) {
+ if (TM->addPassesToEmitMC(PM, Ctx, CompiledObject->getOStream(),
+ !getVerifyModules())) {
report_fatal_error("Target does not support MC emission!");
}
@@ -136,11 +165,11 @@ ObjectBufferStream* MCJIT::emitObject(Module *M) {
if (ObjCache) {
// MemoryBuffer is a thin wrapper around the actual memory, so it's OK
// to create a temporary object here and delete it after the call.
- OwningPtr<MemoryBuffer> MB(CompiledObject->getMemBuffer());
+ std::unique_ptr<MemoryBuffer> MB(CompiledObject->getMemBuffer());
ObjCache->notifyObjectCompiled(M, MB.get());
}
- return CompiledObject.take();
+ return CompiledObject.release();
}
void MCJIT::generateCodeForModule(Module *M) {
@@ -155,12 +184,12 @@ void MCJIT::generateCodeForModule(Module *M) {
if (OwnedModules.hasModuleBeenLoaded(M))
return;
- OwningPtr<ObjectBuffer> ObjectToLoad;
+ std::unique_ptr<ObjectBuffer> ObjectToLoad;
// Try to load the pre-compiled object from cache if possible
- if (0 != ObjCache) {
- OwningPtr<MemoryBuffer> PreCompiledObject(ObjCache->getObject(M));
- if (0 != PreCompiledObject.get())
- ObjectToLoad.reset(new ObjectBuffer(PreCompiledObject.take()));
+ if (ObjCache) {
+ std::unique_ptr<MemoryBuffer> PreCompiledObject(ObjCache->getObject(M));
+ if (PreCompiledObject.get())
+ ObjectToLoad.reset(new ObjectBuffer(PreCompiledObject.release()));
}
// If the cache did not contain a suitable object, compile the object
@@ -170,9 +199,9 @@ void MCJIT::generateCodeForModule(Module *M) {
}
// Load the object into the dynamic linker.
- // MCJIT now owns the ObjectImage pointer (via its LoadedObjects map).
- ObjectImage *LoadedObject = Dyld.loadObject(ObjectToLoad.take());
- LoadedObjects[M] = LoadedObject;
+ // MCJIT now owns the ObjectImage pointer (via its LoadedObjects list).
+ ObjectImage *LoadedObject = Dyld.loadObject(ObjectToLoad.release());
+ LoadedObjects.push_back(LoadedObject);
if (!LoadedObject)
report_fatal_error(Dyld.getErrorString());
@@ -231,11 +260,10 @@ void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
}
uint64_t MCJIT::getExistingSymbolAddress(const std::string &Name) {
- // Check with the RuntimeDyld to see if we already have this symbol.
- if (Name[0] == '\1')
- return Dyld.getSymbolLoadAddress(Name.substr(1));
- return Dyld.getSymbolLoadAddress((TM->getMCAsmInfo()->getGlobalPrefix()
- + Name));
+ Mangler Mang(TM->getDataLayout());
+ SmallString<128> FullName;
+ Mang.getNameWithPrefix(FullName, Name);
+ return Dyld.getSymbolLoadAddress(FullName);
}
Module *MCJIT::findModuleForSymbol(const std::string &Name,
@@ -258,7 +286,7 @@ Module *MCJIT::findModuleForSymbol(const std::string &Name,
}
}
// We didn't find the symbol in any of our modules.
- return NULL;
+ return nullptr;
}
uint64_t MCJIT::getSymbolAddress(const std::string &Name,
@@ -271,6 +299,31 @@ uint64_t MCJIT::getSymbolAddress(const std::string &Name,
if (Addr)
return Addr;
+ SmallVector<object::Archive*, 2>::iterator I, E;
+ for (I = Archives.begin(), E = Archives.end(); I != E; ++I) {
+ object::Archive *A = *I;
+ // Look for our symbols in each Archive
+ object::Archive::child_iterator ChildIt = A->findSym(Name);
+ if (ChildIt != A->child_end()) {
+ // FIXME: Support nested archives?
+ ErrorOr<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ ChildIt->getAsBinary();
+ if (ChildBinOrErr.getError())
+ continue;
+ std::unique_ptr<object::Binary> ChildBin = std::move(ChildBinOrErr.get());
+ if (ChildBin->isObject()) {
+ std::unique_ptr<object::ObjectFile> OF(
+ static_cast<object::ObjectFile *>(ChildBin.release()));
+ // This causes the object file to be loaded.
+ addObjectFile(std::move(OF));
+ // The address should be here now.
+ Addr = getExistingSymbolAddress(Name);
+ if (Addr)
+ return Addr;
+ }
+ }
+ }
+
// If it hasn't already been generated, see if it's in one of our modules.
Module *M = findModuleForSymbol(Name, CheckFunctionsOnly);
if (!M)
@@ -317,18 +370,16 @@ void *MCJIT::getPointerToFunction(Function *F) {
generateCodeForModule(M);
else if (!OwnedModules.hasModuleBeenLoaded(M))
// If this function doesn't belong to one of our modules, we're done.
- return NULL;
+ return nullptr;
// FIXME: Should the Dyld be retaining module information? Probably not.
- // FIXME: Should we be using the mangler for this? Probably.
//
// This is the accessor for the target address, so make sure to check the
// load address of the symbol, not the local address.
- StringRef BaseName = F->getName();
- if (BaseName[0] == '\1')
- return (void*)Dyld.getSymbolLoadAddress(BaseName.substr(1));
- return (void*)Dyld.getSymbolLoadAddress((TM->getMCAsmInfo()->getGlobalPrefix()
- + BaseName).str());
+ Mangler Mang(TM->getDataLayout());
+ SmallString<128> Name;
+ TM->getNameWithPrefix(Name, F, Mang);
+ return (void*)Dyld.getSymbolLoadAddress(Name);
}
void *MCJIT::recompileAndRelinkFunction(Function *F) {
@@ -363,7 +414,7 @@ Function *MCJIT::FindFunctionNamedInModulePtrSet(const char *FnName,
if (Function *F = (*I)->getFunction(FnName))
return F;
}
- return 0;
+ return nullptr;
}
Function *MCJIT::FindFunctionNamed(const char *FnName) {
@@ -495,17 +546,17 @@ void *MCJIT::getPointerToNamedFunction(const std::string &Name,
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
- return 0;
+ return nullptr;
}
void MCJIT::RegisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
EventListeners.push_back(L);
}
void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
SmallVector<JITEventListener*, 2>::reverse_iterator I=
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
index 86b478b..100e9a2 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -31,43 +31,53 @@ public:
LinkingMemoryManager(MCJIT *Parent, RTDyldMemoryManager *MM)
: ParentEngine(Parent), ClientMM(MM) {}
- virtual uint64_t getSymbolAddress(const std::string &Name);
+ uint64_t getSymbolAddress(const std::string &Name) override;
// Functions deferred to client memory manager
- virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, StringRef SectionName) {
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override {
return ClientMM->allocateCodeSection(Size, Alignment, SectionID, SectionName);
}
- virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, StringRef SectionName,
- bool IsReadOnly) {
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool IsReadOnly) override {
return ClientMM->allocateDataSection(Size, Alignment,
SectionID, SectionName, IsReadOnly);
}
- virtual void notifyObjectLoaded(ExecutionEngine *EE,
- const ObjectImage *Obj) {
+ void reserveAllocationSpace(uintptr_t CodeSize, uintptr_t DataSizeRO,
+ uintptr_t DataSizeRW) override {
+ return ClientMM->reserveAllocationSpace(CodeSize, DataSizeRO, DataSizeRW);
+ }
+
+ bool needsToReserveAllocationSpace() override {
+ return ClientMM->needsToReserveAllocationSpace();
+ }
+
+ void notifyObjectLoaded(ExecutionEngine *EE,
+ const ObjectImage *Obj) override {
ClientMM->notifyObjectLoaded(EE, Obj);
}
- virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) {
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
ClientMM->registerEHFrames(Addr, LoadAddr, Size);
}
- virtual void deregisterEHFrames(uint8_t *Addr,
- uint64_t LoadAddr,
- size_t Size) {
+ void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
ClientMM->deregisterEHFrames(Addr, LoadAddr, Size);
}
- virtual bool finalizeMemory(std::string *ErrMsg = 0) {
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override {
return ClientMM->finalizeMemory(ErrMsg);
}
private:
MCJIT *ParentEngine;
- OwningPtr<RTDyldMemoryManager> ClientMM;
+ std::unique_ptr<RTDyldMemoryManager> ClientMM;
};
// About Module states: added->loaded->finalized.
@@ -206,8 +216,10 @@ class MCJIT : public ExecutionEngine {
OwningModuleContainer OwnedModules;
- typedef DenseMap<Module *, ObjectImage *> LoadedObjectMap;
- LoadedObjectMap LoadedObjects;
+ SmallVector<object::Archive*, 2> Archives;
+
+ typedef SmallVector<ObjectImage *, 2> LoadedObjectList;
+ LoadedObjectList LoadedObjects;
// An optional ObjectCache to be notified of compiled objects and used to
// perform lookup of pre-compiled code to avoid re-compilation.
@@ -226,18 +238,24 @@ public:
/// @name ExecutionEngine interface implementation
/// @{
- virtual void addModule(Module *M);
- virtual bool removeModule(Module *M);
+ void addModule(Module *M) override;
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override;
+ void addArchive(object::Archive *O) override;
+ bool removeModule(Module *M) override;
/// FindFunctionNamed - Search all of the active modules to find the one that
/// defines FnName. This is very slow operation and shouldn't be used for
/// general code.
- virtual Function *FindFunctionNamed(const char *FnName);
+ Function *FindFunctionNamed(const char *FnName) override;
/// Sets the object manager that MCJIT should use to avoid compilation.
- virtual void setObjectCache(ObjectCache *manager);
+ void setObjectCache(ObjectCache *manager) override;
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ Dyld.setProcessAllSections(ProcessAllSections);
+ }
- virtual void generateCodeForModule(Module *M);
+ void generateCodeForModule(Module *M) override;
/// finalizeObject - ensure the module is fully processed and is usable.
///
@@ -248,7 +266,7 @@ public:
/// object.
/// Is it OK to finalize a set of modules, add modules and finalize again.
// FIXME: Do we really need both of these?
- virtual void finalizeObject();
+ void finalizeObject() override;
virtual void finalizeModule(Module *);
void finalizeLoadedModules();
@@ -256,18 +274,18 @@ public:
/// the static constructors or destructors for a program.
///
/// \param isDtors - Run the destructors instead of constructors.
- void runStaticConstructorsDestructors(bool isDtors);
+ void runStaticConstructorsDestructors(bool isDtors) override;
- virtual void *getPointerToBasicBlock(BasicBlock *BB);
+ void *getPointerToBasicBlock(BasicBlock *BB) override;
- virtual void *getPointerToFunction(Function *F);
+ void *getPointerToFunction(Function *F) override;
- virtual void *recompileAndRelinkFunction(Function *F);
+ void *recompileAndRelinkFunction(Function *F) override;
- virtual void freeMachineCodeForFunction(Function *F);
+ void freeMachineCodeForFunction(Function *F) override;
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
+ GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) override;
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the dlsym function call. As such it is only
@@ -277,25 +295,27 @@ public:
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override;
/// mapSectionAddress - map a section to its target address space value.
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
- virtual void mapSectionAddress(const void *LocalAddress,
- uint64_t TargetAddress) {
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
Dyld.mapSectionAddress(LocalAddress, TargetAddress);
}
- virtual void RegisterJITEventListener(JITEventListener *L);
- virtual void UnregisterJITEventListener(JITEventListener *L);
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
// If successful, these function will implicitly finalize all loaded objects.
// To get a function address within MCJIT without causing a finalize, use
// getSymbolAddress.
- virtual uint64_t getGlobalValueAddress(const std::string &Name);
- virtual uint64_t getFunctionAddress(const std::string &Name);
+ uint64_t getGlobalValueAddress(const std::string &Name) override;
+ uint64_t getFunctionAddress(const std::string &Name) override;
+
+ TargetMachine *getTargetMachine() override { return TM; }
/// @}
/// @name (Private) Registration Interfaces
@@ -322,7 +342,7 @@ protected:
/// emitObject -- Generate a JITed object in memory from the specified module
/// Currently, MCJIT only supports a single module and the module passed to
/// this function call is expected to be the contained module. The module
- /// is passed as a parameter here to prepare for multiple module support in
+ /// is passed as a parameter here to prepare for multiple module support in
/// the future.
ObjectBufferStream* emitObject(Module *M);
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
index cf90e77..5986084 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
@@ -71,15 +71,15 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
//
// FIXME: Initialize the Near member for each memory group to avoid
// interleaving.
- error_code ec;
+ std::error_code ec;
sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(RequiredSize,
&MemGroup.Near,
sys::Memory::MF_READ |
sys::Memory::MF_WRITE,
ec);
if (ec) {
- // FIXME: Add error propogation to the interface.
- return NULL;
+ // FIXME: Add error propagation to the interface.
+ return nullptr;
}
// Save this address as the basis for our next request
@@ -105,7 +105,7 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
{
// FIXME: Should in-progress permissions be reverted if an error occurs?
- error_code ec;
+ std::error_code ec;
// Don't allow free memory blocks to be used after setting protection flags.
CodeMem.FreeMem.clear();
@@ -143,19 +143,20 @@ bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
return false;
}
-error_code SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
- unsigned Permissions) {
+std::error_code
+SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions) {
for (int i = 0, e = MemGroup.AllocatedMem.size(); i != e; ++i) {
- error_code ec;
- ec = sys::Memory::protectMappedMemory(MemGroup.AllocatedMem[i],
- Permissions);
- if (ec) {
- return ec;
- }
+ std::error_code ec;
+ ec =
+ sys::Memory::protectMappedMemory(MemGroup.AllocatedMem[i], Permissions);
+ if (ec) {
+ return ec;
+ }
}
- return error_code::success();
+ return std::error_code();
}
void SectionMemoryManager::invalidateInstructionCache() {
diff --git a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
index f11df82..fd37a13 100644
--- a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -15,10 +15,8 @@
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
-#define DEBUG_TYPE "oprofile-jit-event-listener"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/ExecutionEngine/OProfileWrapper.h"
@@ -34,6 +32,8 @@
using namespace llvm;
using namespace llvm::jitprofiling;
+#define DEBUG_TYPE "oprofile-jit-event-listener"
+
namespace {
class OProfileJITEventListener : public JITEventListener {
@@ -171,11 +171,8 @@ void OProfileJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
}
// Use symbol info to iterate functions in the object.
- error_code ec;
- for (object::symbol_iterator I = Obj.begin_symbols(),
- E = Obj.end_symbols();
- I != E && !ec;
- I.increment(ec)) {
+ for (object::symbol_iterator I = Obj.begin_symbols(), E = Obj.end_symbols();
+ I != E; ++I) {
object::SymbolRef::Type SymType;
if (I->getType(SymType)) continue;
if (SymType == object::SymbolRef::ST_Function) {
@@ -204,11 +201,8 @@ void OProfileJITEventListener::NotifyFreeingObject(const ObjectImage &Obj) {
}
// Use symbol info to iterate functions in the object.
- error_code ec;
- for (object::symbol_iterator I = Obj.begin_symbols(),
- E = Obj.end_symbols();
- I != E && !ec;
- I.increment(ec)) {
+ for (object::symbol_iterator I = Obj.begin_symbols(), E = Obj.end_symbols();
+ I != E; ++I) {
object::SymbolRef::Type SymType;
if (I->getType(SymType)) continue;
if (SymType == object::SymbolRef::ST_Function) {
@@ -229,7 +223,8 @@ void OProfileJITEventListener::NotifyFreeingObject(const ObjectImage &Obj) {
namespace llvm {
JITEventListener *JITEventListener::createOProfileJITEventListener() {
- static OwningPtr<OProfileWrapper> JITProfilingWrapper(new OProfileWrapper);
+ static std::unique_ptr<OProfileWrapper> JITProfilingWrapper(
+ new OProfileWrapper);
return new OProfileJITEventListener(*JITProfilingWrapper);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
index 61d8dc2..04edbd2 100644
--- a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
@@ -13,14 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "oprofile-wrapper"
#include "llvm/ExecutionEngine/OProfileWrapper.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <dirent.h>
#include <fcntl.h>
@@ -29,6 +28,8 @@
#include <sys/stat.h>
#include <unistd.h>
+#define DEBUG_TYPE "oprofile-wrapper"
+
namespace {
// Global mutex to ensure a single thread initializes oprofile agent.
diff --git a/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp
index 26e1fdd..1646937 100644
--- a/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp
@@ -15,7 +15,6 @@
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
-
#include <cstdlib>
#ifdef __linux__
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
index 603c526..8546571 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/ManagedStatic.h"
using namespace llvm;
@@ -44,10 +45,16 @@ extern "C" {
// We put information about the JITed function in this global, which the
// debugger reads. Make sure to specify the version statically, because the
// debugger checks the version before we can set it during runtime.
- struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+ struct jit_descriptor __jit_debug_descriptor = { 1, 0, nullptr, nullptr };
// Debuggers puts a breakpoint in this function.
- LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() { }
+ LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
+ // The noinline and the asm prevent calls to this function from being
+ // optimized out.
+#if !defined(_MSC_VER)
+ asm volatile("":::"memory");
+#endif
+ }
}
@@ -78,12 +85,12 @@ public:
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
- void registerObject(const ObjectBuffer &Object);
+ void registerObject(const ObjectBuffer &Object) override;
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was found in ObjectBufferMap.
- bool deregisterObject(const ObjectBuffer &Object);
+ bool deregisterObject(const ObjectBuffer &Object) override;
private:
/// Deregister the debug info for the given object file from the debugger
@@ -96,16 +103,15 @@ private:
/// modify global variables.
llvm::sys::Mutex JITDebugLock;
-/// Acquire the lock and do the registration.
+/// Do the registration.
void NotifyDebugger(jit_code_entry* JITCodeEntry) {
- llvm::MutexGuard locked(JITDebugLock);
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
// Insert this entry at the head of the list.
- JITCodeEntry->prev_entry = NULL;
+ JITCodeEntry->prev_entry = nullptr;
jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
JITCodeEntry->next_entry = NextEntry;
- if (NextEntry != NULL) {
+ if (NextEntry) {
NextEntry->prev_entry = JITCodeEntry;
}
__jit_debug_descriptor.first_entry = JITCodeEntry;
@@ -115,7 +121,8 @@ void NotifyDebugger(jit_code_entry* JITCodeEntry) {
GDBJITRegistrar::~GDBJITRegistrar() {
// Free all registered object files.
- for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(), E = ObjectBufferMap.end();
+ llvm::MutexGuard locked(JITDebugLock);
+ for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(), E = ObjectBufferMap.end();
I != E; ++I) {
// Call the private method that doesn't update the map so our iterator
// doesn't break.
@@ -130,15 +137,15 @@ void GDBJITRegistrar::registerObject(const ObjectBuffer &Object) {
size_t Size = Object.getBufferSize();
assert(Buffer && "Attempt to register a null object with a debugger.");
+ llvm::MutexGuard locked(JITDebugLock);
assert(ObjectBufferMap.find(Buffer) == ObjectBufferMap.end() &&
"Second attempt to perform debug registration.");
jit_code_entry* JITCodeEntry = new jit_code_entry();
- if (JITCodeEntry == 0) {
+ if (!JITCodeEntry) {
llvm::report_fatal_error(
"Allocation failed when registering a JIT entry!\n");
- }
- else {
+ } else {
JITCodeEntry->symfile_addr = Buffer;
JITCodeEntry->symfile_size = Size;
@@ -149,6 +156,7 @@ void GDBJITRegistrar::registerObject(const ObjectBuffer &Object) {
bool GDBJITRegistrar::deregisterObject(const ObjectBuffer& Object) {
const char *Buffer = Object.getBufferStart();
+ llvm::MutexGuard locked(JITDebugLock);
RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Buffer);
if (I != ObjectBufferMap.end()) {
@@ -164,9 +172,8 @@ void GDBJITRegistrar::deregisterObjectInternal(
jit_code_entry*& JITCodeEntry = I->second.second;
- // Acquire the lock and do the unregistration.
+ // Do the unregistration.
{
- llvm::MutexGuard locked(JITDebugLock);
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
// Remove the jit_code_entry from the linked list.
@@ -190,25 +197,17 @@ void GDBJITRegistrar::deregisterObjectInternal(
}
delete JITCodeEntry;
- JITCodeEntry = NULL;
+ JITCodeEntry = nullptr;
}
+llvm::ManagedStatic<GDBJITRegistrar> TheRegistrar;
+
} // end namespace
namespace llvm {
JITRegistrar& JITRegistrar::getGDBRegistrar() {
- static GDBJITRegistrar* sRegistrar = NULL;
- if (sRegistrar == NULL) {
- // The mutex is here so that it won't slow down access once the registrar
- // is instantiated
- llvm::MutexGuard locked(JITDebugLock);
- // Check again to be sure another thread didn't create this while we waited
- if (sRegistrar == NULL) {
- sRegistrar = new GDBJITRegistrar;
- }
- }
- return *sRegistrar;
+ return *TheRegistrar;
}
} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
index 9cbde5d..c3a2182 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
@@ -1,6 +1,6 @@
//===-- ObjectImageCommon.h - Format independent executuable object image -===//
//
-// The LLVM Compiler Infrastructure
+// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -18,21 +18,27 @@
#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/Object/ObjectFile.h"
+#include <memory>
+
namespace llvm {
+namespace object {
+ class ObjectFile;
+}
+
class ObjectImageCommon : public ObjectImage {
ObjectImageCommon(); // = delete
ObjectImageCommon(const ObjectImageCommon &other); // = delete
- virtual void anchor();
+ void anchor() override;
protected:
- object::ObjectFile *ObjFile;
+ std::unique_ptr<object::ObjectFile> ObjFile;
// This form of the constructor allows subclasses to use
// format-specific subclasses of ObjectFile directly
- ObjectImageCommon(ObjectBuffer *Input, object::ObjectFile *Obj)
+ ObjectImageCommon(ObjectBuffer *Input, std::unique_ptr<object::ObjectFile> Obj)
: ObjectImage(Input), // saves Input as Buffer and takes ownership
- ObjFile(Obj)
+ ObjFile(std::move(Obj))
{
}
@@ -40,40 +46,44 @@ public:
ObjectImageCommon(ObjectBuffer* Input)
: ObjectImage(Input) // saves Input as Buffer and takes ownership
{
- ObjFile = object::ObjectFile::createObjectFile(Buffer->getMemBuffer());
+ // FIXME: error checking? createObjectFile returns an ErrorOr<ObjectFile*>
+ // and should probably be checked for failure.
+ std::unique_ptr<MemoryBuffer> Buf(Buffer->getMemBuffer());
+ ObjFile.reset(object::ObjectFile::createObjectFile(Buf).get());
}
- virtual ~ObjectImageCommon() { delete ObjFile; }
+ ObjectImageCommon(std::unique_ptr<object::ObjectFile> Input)
+ : ObjectImage(nullptr), ObjFile(std::move(Input)) {}
+ virtual ~ObjectImageCommon() { }
- virtual object::symbol_iterator begin_symbols() const
- { return ObjFile->begin_symbols(); }
- virtual object::symbol_iterator end_symbols() const
- { return ObjFile->end_symbols(); }
+ object::symbol_iterator begin_symbols() const override
+ { return ObjFile->symbol_begin(); }
+ object::symbol_iterator end_symbols() const override
+ { return ObjFile->symbol_end(); }
- virtual object::section_iterator begin_sections() const
- { return ObjFile->begin_sections(); }
- virtual object::section_iterator end_sections() const
- { return ObjFile->end_sections(); }
+ object::section_iterator begin_sections() const override
+ { return ObjFile->section_begin(); }
+ object::section_iterator end_sections() const override
+ { return ObjFile->section_end(); }
- virtual /* Triple::ArchType */ unsigned getArch() const
- { return ObjFile->getArch(); }
+ /* Triple::ArchType */ unsigned getArch() const override
+ { return ObjFile->getArch(); }
- virtual StringRef getData() const { return ObjFile->getData(); }
+ StringRef getData() const override { return ObjFile->getData(); }
- virtual object::ObjectFile* getObjectFile() const { return ObjFile; }
+ object::ObjectFile* getObjectFile() const override { return ObjFile.get(); }
// Subclasses can override these methods to update the image with loaded
// addresses for sections and common symbols
- virtual void updateSectionAddress(const object::SectionRef &Sec,
- uint64_t Addr) {}
- virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr)
- {}
+ void updateSectionAddress(const object::SectionRef &Sec,
+ uint64_t Addr) override {}
+ void updateSymbolAddress(const object::SymbolRef &Sym,
+ uint64_t Addr) override {}
// Subclasses can override these methods to provide JIT debugging support
- virtual void registerWithDebugger() {}
- virtual void deregisterWithDebugger() {}
+ void registerWithDebugger() override {}
+ void deregisterWithDebugger() override {}
};
} // end namespace llvm
#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
-
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 161135a..d86a751 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -11,21 +11,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dyld"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "JITRegistrar.h"
#include "ObjectImageCommon.h"
#include "RuntimeDyldELF.h"
#include "RuntimeDyldImpl.h"
#include "RuntimeDyldMachO.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Object/ELF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/Object/ELF.h"
using namespace llvm;
using namespace llvm::object;
+#define DEBUG_TYPE "dyld"
+
// Empty out-of-line virtual destructor as the key function.
RuntimeDyldImpl::~RuntimeDyldImpl() {}
@@ -36,11 +36,9 @@ void ObjectImageCommon::anchor() {}
namespace llvm {
-void RuntimeDyldImpl::registerEHFrames() {
-}
+void RuntimeDyldImpl::registerEHFrames() {}
-void RuntimeDyldImpl::deregisterEHFrames() {
-}
+void RuntimeDyldImpl::deregisterEHFrames() {}
// Resolve the relocations for all symbols we currently know about.
void RuntimeDyldImpl::resolveRelocations() {
@@ -56,9 +54,8 @@ void RuntimeDyldImpl::resolveRelocations() {
// symbol for the relocation is located. The SectionID in the relocation
// entry provides the section to which the relocation will be applied.
uint64_t Addr = Sections[i].LoadAddress;
- DEBUG(dbgs() << "Resolving relocations Section #" << i
- << "\t" << format("%p", (uint8_t *)Addr)
- << "\n");
+ DEBUG(dbgs() << "Resolving relocations Section #" << i << "\t"
+ << format("%p", (uint8_t *)Addr) << "\n");
resolveRelocationList(Relocations[i], Addr);
Relocations.erase(i);
}
@@ -76,22 +73,52 @@ void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
llvm_unreachable("Attempting to remap address of unknown section!");
}
-// Subclasses can implement this method to create specialized image instances.
-// The caller owns the pointer that is returned.
-ObjectImage *RuntimeDyldImpl::createObjectImage(ObjectBuffer *InputBuffer) {
- return new ObjectImageCommon(InputBuffer);
+static std::error_code getOffset(const SymbolRef &Sym, uint64_t &Result) {
+ uint64_t Address;
+ if (std::error_code EC = Sym.getAddress(Address))
+ return EC;
+
+ if (Address == UnknownAddressOrSize) {
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+
+ const ObjectFile *Obj = Sym.getObject();
+ section_iterator SecI(Obj->section_begin());
+ if (std::error_code EC = Sym.getSection(SecI))
+ return EC;
+
+ if (SecI == Obj->section_end()) {
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+
+ uint64_t SectionAddress;
+ if (std::error_code EC = SecI->getAddress(SectionAddress))
+ return EC;
+
+ Result = Address - SectionAddress;
+ return object_error::success;
}
-ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
+ObjectImage *RuntimeDyldImpl::loadObject(ObjectImage *InputObject) {
MutexGuard locked(lock);
- OwningPtr<ObjectImage> obj(createObjectImage(InputBuffer));
- if (!obj)
- report_fatal_error("Unable to create object image from memory buffer!");
+ std::unique_ptr<ObjectImage> Obj(InputObject);
+ if (!Obj)
+ return nullptr;
// Save information about our target
- Arch = (Triple::ArchType)obj->getArch();
- IsTargetLittleEndian = obj->getObjectFile()->isLittleEndian();
+ Arch = (Triple::ArchType)Obj->getArch();
+ IsTargetLittleEndian = Obj->getObjectFile()->isLittleEndian();
+
+ // Compute the memory size required to load all sections to be loaded
+ // and pass this information to the memory manager
+ if (MemMgr->needsToReserveAllocationSpace()) {
+ uint64_t CodeSize = 0, DataSizeRO = 0, DataSizeRW = 0;
+ computeTotalAllocSize(*Obj, CodeSize, DataSizeRO, DataSizeRW);
+ MemMgr->reserveAllocationSpace(CodeSize, DataSizeRO, DataSizeRW);
+ }
// Symbols found in this object
StringMap<SymbolLoc> LocalSymbols;
@@ -103,52 +130,47 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
// Maximum required total memory to allocate all common symbols
uint64_t CommonSize = 0;
- error_code err;
// Parse symbols
DEBUG(dbgs() << "Parse symbols:\n");
- for (symbol_iterator i = obj->begin_symbols(), e = obj->end_symbols();
- i != e; i.increment(err)) {
- Check(err);
+ for (symbol_iterator I = Obj->begin_symbols(), E = Obj->end_symbols(); I != E;
+ ++I) {
object::SymbolRef::Type SymType;
StringRef Name;
- Check(i->getType(SymType));
- Check(i->getName(Name));
+ Check(I->getType(SymType));
+ Check(I->getName(Name));
- uint32_t flags;
- Check(i->getFlags(flags));
+ uint32_t Flags = I->getFlags();
- bool isCommon = flags & SymbolRef::SF_Common;
- if (isCommon) {
+ bool IsCommon = Flags & SymbolRef::SF_Common;
+ if (IsCommon) {
// Add the common symbols to a list. We'll allocate them all below.
- uint32_t Align;
- Check(i->getAlignment(Align));
- uint64_t Size = 0;
- Check(i->getSize(Size));
- CommonSize += Size + Align;
- CommonSymbols[*i] = CommonSymbolInfo(Size, Align);
+ if (!GlobalSymbolTable.count(Name)) {
+ uint32_t Align;
+ Check(I->getAlignment(Align));
+ uint64_t Size = 0;
+ Check(I->getSize(Size));
+ CommonSize += Size + Align;
+ CommonSymbols[*I] = CommonSymbolInfo(Size, Align);
+ }
} else {
if (SymType == object::SymbolRef::ST_Function ||
SymType == object::SymbolRef::ST_Data ||
SymType == object::SymbolRef::ST_Unknown) {
- uint64_t FileOffset;
+ uint64_t SectOffset;
StringRef SectionData;
bool IsCode;
- section_iterator si = obj->end_sections();
- Check(i->getFileOffset(FileOffset));
- Check(i->getSection(si));
- if (si == obj->end_sections()) continue;
- Check(si->getContents(SectionData));
- Check(si->isText(IsCode));
- const uint8_t* SymPtr = (const uint8_t*)InputBuffer->getBufferStart() +
- (uintptr_t)FileOffset;
- uintptr_t SectOffset = (uintptr_t)(SymPtr -
- (const uint8_t*)SectionData.begin());
- unsigned SectionID = findOrEmitSection(*obj, *si, IsCode, LocalSections);
+ section_iterator SI = Obj->end_sections();
+ Check(getOffset(*I, SectOffset));
+ Check(I->getSection(SI));
+ if (SI == Obj->end_sections())
+ continue;
+ Check(SI->getContents(SectionData));
+ Check(SI->isText(IsCode));
+ unsigned SectionID =
+ findOrEmitSection(*Obj, *SI, IsCode, LocalSections);
LocalSymbols[Name.data()] = SymbolLoc(SectionID, SectOffset);
- DEBUG(dbgs() << "\tFileOffset: " << format("%p", (uintptr_t)FileOffset)
- << " flags: " << flags
- << " SID: " << SectionID
- << " Offset: " << format("%p", SectOffset));
+ DEBUG(dbgs() << "\tOffset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << Flags << " SID: " << SectionID);
GlobalSymbolTable[Name] = SymbolLoc(SectionID, SectOffset);
}
}
@@ -157,39 +179,179 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
// Allocate common symbols
if (CommonSize != 0)
- emitCommonSymbols(*obj, CommonSymbols, CommonSize, LocalSymbols);
+ emitCommonSymbols(*Obj, CommonSymbols, CommonSize, GlobalSymbolTable);
// Parse and process relocations
DEBUG(dbgs() << "Parse relocations:\n");
- for (section_iterator si = obj->begin_sections(),
- se = obj->end_sections(); si != se; si.increment(err)) {
- Check(err);
- bool isFirstRelocation = true;
+ for (section_iterator SI = Obj->begin_sections(), SE = Obj->end_sections();
+ SI != SE; ++SI) {
unsigned SectionID = 0;
StubMap Stubs;
- section_iterator RelocatedSection = si->getRelocatedSection();
-
- for (relocation_iterator i = si->begin_relocations(),
- e = si->end_relocations(); i != e; i.increment(err)) {
- Check(err);
-
- // If it's the first relocation in this section, find its SectionID
- if (isFirstRelocation) {
- SectionID =
- findOrEmitSection(*obj, *RelocatedSection, true, LocalSections);
- DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
- isFirstRelocation = false;
+ section_iterator RelocatedSection = SI->getRelocatedSection();
+
+ relocation_iterator I = SI->relocation_begin();
+ relocation_iterator E = SI->relocation_end();
+
+ if (I == E && !ProcessAllSections)
+ continue;
+
+ bool IsCode = false;
+ Check(RelocatedSection->isText(IsCode));
+ SectionID =
+ findOrEmitSection(*Obj, *RelocatedSection, IsCode, LocalSections);
+ DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+
+ for (; I != E;)
+ I = processRelocationRef(SectionID, I, *Obj, LocalSections, LocalSymbols,
+ Stubs);
+ }
+
+ // Give the subclasses a chance to tie-up any loose ends.
+ finalizeLoad(*Obj, LocalSections);
+
+ return Obj.release();
+}
+
+// A helper method for computeTotalAllocSize.
+// Computes the memory size required to allocate sections with the given sizes,
+// assuming that all sections are allocated with the given alignment
+static uint64_t
+computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
+ uint64_t Alignment) {
+ uint64_t TotalSize = 0;
+ for (size_t Idx = 0, Cnt = SectionSizes.size(); Idx < Cnt; Idx++) {
+ uint64_t AlignedSize =
+ (SectionSizes[Idx] + Alignment - 1) / Alignment * Alignment;
+ TotalSize += AlignedSize;
+ }
+ return TotalSize;
+}
+
+// Compute an upper bound of the memory size that is required to load all
+// sections
+void RuntimeDyldImpl::computeTotalAllocSize(ObjectImage &Obj,
+ uint64_t &CodeSize,
+ uint64_t &DataSizeRO,
+ uint64_t &DataSizeRW) {
+ // Compute the size of all sections required for execution
+ std::vector<uint64_t> CodeSectionSizes;
+ std::vector<uint64_t> ROSectionSizes;
+ std::vector<uint64_t> RWSectionSizes;
+ uint64_t MaxAlignment = sizeof(void *);
+
+ // Collect sizes of all sections to be loaded;
+ // also determine the max alignment of all sections
+ for (section_iterator SI = Obj.begin_sections(), SE = Obj.end_sections();
+ SI != SE; ++SI) {
+ const SectionRef &Section = *SI;
+
+ bool IsRequired;
+ Check(Section.isRequiredForExecution(IsRequired));
+
+ // Consider only the sections that are required to be loaded for execution
+ if (IsRequired) {
+ uint64_t DataSize = 0;
+ uint64_t Alignment64 = 0;
+ bool IsCode = false;
+ bool IsReadOnly = false;
+ StringRef Name;
+ Check(Section.getSize(DataSize));
+ Check(Section.getAlignment(Alignment64));
+ Check(Section.isText(IsCode));
+ Check(Section.isReadOnlyData(IsReadOnly));
+ Check(Section.getName(Name));
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+
+ uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
+ uint64_t SectionSize = DataSize + StubBufSize;
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes
+ // padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO
+ // objects.
+ if (Name == ".eh_frame")
+ SectionSize += 4;
+
+ if (SectionSize > 0) {
+ // save the total size of the section
+ if (IsCode) {
+ CodeSectionSizes.push_back(SectionSize);
+ } else if (IsReadOnly) {
+ ROSectionSizes.push_back(SectionSize);
+ } else {
+ RWSectionSizes.push_back(SectionSize);
+ }
+ // update the max alignment
+ if (Alignment > MaxAlignment) {
+ MaxAlignment = Alignment;
+ }
}
+ }
+ }
- processRelocationRef(SectionID, *i, *obj, LocalSections, LocalSymbols,
- Stubs);
+ // Compute the size of all common symbols
+ uint64_t CommonSize = 0;
+ for (symbol_iterator I = Obj.begin_symbols(), E = Obj.end_symbols(); I != E;
+ ++I) {
+ uint32_t Flags = I->getFlags();
+ if (Flags & SymbolRef::SF_Common) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = 0;
+ Check(I->getSize(Size));
+ CommonSize += Size;
}
}
+ if (CommonSize != 0) {
+ RWSectionSizes.push_back(CommonSize);
+ }
- // Give the subclasses a chance to tie-up any loose ends.
- finalizeLoad(LocalSections);
+ // Compute the required allocation space for each different type of sections
+ // (code, read-only data, read-write data) assuming that all sections are
+ // allocated with the max alignment. Note that we cannot compute with the
+ // individual alignments of the sections, because then the required size
+ // depends on the order, in which the sections are allocated.
+ CodeSize = computeAllocationSizeForSections(CodeSectionSizes, MaxAlignment);
+ DataSizeRO = computeAllocationSizeForSections(ROSectionSizes, MaxAlignment);
+ DataSizeRW = computeAllocationSizeForSections(RWSectionSizes, MaxAlignment);
+}
- return obj.take();
+// compute stub buffer size for the given section
+unsigned RuntimeDyldImpl::computeSectionStubBufSize(ObjectImage &Obj,
+ const SectionRef &Section) {
+ unsigned StubSize = getMaxStubSize();
+ if (StubSize == 0) {
+ return 0;
+ }
+ // FIXME: this is an inefficient way to handle this. We should computed the
+ // necessary section allocation size in loadObject by walking all the sections
+ // once.
+ unsigned StubBufSize = 0;
+ for (section_iterator SI = Obj.begin_sections(), SE = Obj.end_sections();
+ SI != SE; ++SI) {
+ section_iterator RelSecI = SI->getRelocatedSection();
+ if (!(RelSecI == Section))
+ continue;
+
+ for (const RelocationRef &Reloc : SI->relocations()) {
+ (void)Reloc;
+ StubBufSize += StubSize;
+ }
+ }
+
+ // Get section data size and alignment
+ uint64_t Alignment64;
+ uint64_t DataSize;
+ Check(Section.getSize(DataSize));
+ Check(Section.getAlignment(Alignment64));
+
+ // Add stubbuf size alignment
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ unsigned StubAlignment = getStubAlignment();
+ unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
+ if (StubAlignment > EndAlignment)
+ StubBufSize += StubAlignment - EndAlignment;
+ return StubBufSize;
}
void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
@@ -198,22 +360,20 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
SymbolTableMap &SymbolTable) {
// Allocate memory for the section
unsigned SectionID = Sections.size();
- uint8_t *Addr = MemMgr->allocateDataSection(
- TotalSize, sizeof(void*), SectionID, StringRef(), false);
+ uint8_t *Addr = MemMgr->allocateDataSection(TotalSize, sizeof(void *),
+ SectionID, StringRef(), false);
if (!Addr)
report_fatal_error("Unable to allocate memory for common symbols!");
uint64_t Offset = 0;
Sections.push_back(SectionEntry(StringRef(), Addr, TotalSize, 0));
memset(Addr, 0, TotalSize);
- DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
- << " new addr: " << format("%p", Addr)
- << " DataSize: " << TotalSize
- << "\n");
+ DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID << " new addr: "
+ << format("%p", Addr) << " DataSize: " << TotalSize << "\n");
// Assign the address of each symbol
for (CommonSymbolMap::const_iterator it = CommonSymbols.begin(),
- itEnd = CommonSymbols.end(); it != itEnd; it++) {
+ itEnd = CommonSymbols.end(); it != itEnd; ++it) {
uint64_t Size = it->second.first;
uint64_t Align = it->second.second;
StringRef Name;
@@ -223,8 +383,8 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
uint64_t AlignOffset = OffsetToAlignment((uint64_t)Addr, Align);
Addr += AlignOffset;
Offset += AlignOffset;
- DEBUG(dbgs() << "Allocating common symbol " << Name << " address " <<
- format("%p\n", Addr));
+ DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p\n", Addr));
}
Obj.updateSymbolAddress(it->first, (uint64_t)Addr);
SymbolTable[Name.data()] = SymbolLoc(SectionID, Offset);
@@ -234,30 +394,7 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
}
unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
- const SectionRef &Section,
- bool IsCode) {
-
- unsigned StubBufSize = 0,
- StubSize = getMaxStubSize();
- error_code err;
- const ObjectFile *ObjFile = Obj.getObjectFile();
- // FIXME: this is an inefficient way to handle this. We should computed the
- // necessary section allocation size in loadObject by walking all the sections
- // once.
- if (StubSize > 0) {
- for (section_iterator SI = ObjFile->begin_sections(),
- SE = ObjFile->end_sections();
- SI != SE; SI.increment(err), Check(err)) {
- section_iterator RelSecI = SI->getRelocatedSection();
- if (!(RelSecI == Section))
- continue;
-
- for (relocation_iterator I = SI->begin_relocations(),
- E = SI->end_relocations(); I != E; I.increment(err), Check(err)) {
- StubBufSize += StubSize;
- }
- }
- }
+ const SectionRef &Section, bool IsCode) {
StringRef data;
uint64_t Alignment64;
@@ -271,6 +408,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
bool IsReadOnly;
uint64_t DataSize;
unsigned PaddingSize = 0;
+ unsigned StubBufSize = 0;
StringRef Name;
Check(Section.isRequiredForExecution(IsRequired));
Check(Section.isVirtual(IsVirtual));
@@ -278,12 +416,8 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
Check(Section.isReadOnlyData(IsReadOnly));
Check(Section.getSize(DataSize));
Check(Section.getName(Name));
- if (StubSize > 0) {
- unsigned StubAlignment = getStubAlignment();
- unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
- if (StubAlignment > EndAlignment)
- StubBufSize += StubAlignment - EndAlignment;
- }
+
+ StubBufSize = computeSectionStubBufSize(Obj, Section);
// The .eh_frame section (at least on Linux) needs an extra four bytes padded
// with zeroes added at the end. For MachO objects, this section has a
@@ -291,19 +425,19 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
if (Name == ".eh_frame")
PaddingSize = 4;
- unsigned Allocate;
+ uintptr_t Allocate;
unsigned SectionID = Sections.size();
uint8_t *Addr;
- const char *pData = 0;
+ const char *pData = nullptr;
// Some sections, such as debug info, don't need to be loaded for execution.
// Leave those where they are.
if (IsRequired) {
Allocate = DataSize + PaddingSize + StubBufSize;
- Addr = IsCode
- ? MemMgr->allocateCodeSection(Allocate, Alignment, SectionID, Name)
- : MemMgr->allocateDataSection(Allocate, Alignment, SectionID, Name,
- IsReadOnly);
+ Addr = IsCode ? MemMgr->allocateCodeSection(Allocate, Alignment, SectionID,
+ Name)
+ : MemMgr->allocateDataSection(Allocate, Alignment, SectionID,
+ Name, IsReadOnly);
if (!Addr)
report_fatal_error("Unable to allocate section memory!");
@@ -324,30 +458,22 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
DataSize += PaddingSize;
}
- DEBUG(dbgs() << "emitSection SectionID: " << SectionID
- << " Name: " << Name
+ DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
<< " obj addr: " << format("%p", pData)
<< " new addr: " << format("%p", Addr)
- << " DataSize: " << DataSize
- << " StubBufSize: " << StubBufSize
- << " Allocate: " << Allocate
- << "\n");
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
Obj.updateSectionAddress(Section, (uint64_t)Addr);
- }
- else {
+ } else {
// Even if we didn't load the section, we need to record an entry for it
// to handle later processing (and by 'handle' I mean don't do anything
// with these sections).
Allocate = 0;
- Addr = 0;
- DEBUG(dbgs() << "emitSection SectionID: " << SectionID
- << " Name: " << Name
- << " obj addr: " << format("%p", data.data())
- << " new addr: 0"
- << " DataSize: " << DataSize
- << " StubBufSize: " << StubBufSize
- << " Allocate: " << Allocate
- << "\n");
+ Addr = nullptr;
+ DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
}
Sections.push_back(SectionEntry(Name, Addr, DataSize, (uintptr_t)pData));
@@ -380,8 +506,7 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
// Relocation by symbol. If the symbol is found in the global symbol table,
// create an appropriate section relocation. Otherwise, add it to
// ExternalSymbolRelocations.
- SymbolTableMap::const_iterator Loc =
- GlobalSymbolTable.find(SymbolName);
+ SymbolTableMap::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
if (Loc == GlobalSymbolTable.end()) {
ExternalSymbolRelocations[SymbolName].push_back(RE);
} else {
@@ -392,12 +517,14 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
}
}
-uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
- if (Arch == Triple::aarch64) {
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
+ unsigned AbiVariant) {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::arm64 || Arch == Triple::arm64_be) {
// This stub has to be able to access the full address space,
// since symbol lookup won't necessarily find a handy, in-range,
// PLT stub for functions which could be anywhere.
- uint32_t *StubAddr = (uint32_t*)Addr;
+ uint32_t *StubAddr = (uint32_t *)Addr;
// Stub can use ip0 (== x16) to calculate address
*StubAddr = 0xd2e00010; // movz ip0, #:abs_g3:<addr>
@@ -411,14 +538,14 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
*StubAddr = 0xd61f0200; // br ip0
return Addr;
- } else if (Arch == Triple::arm) {
+ } else if (Arch == Triple::arm || Arch == Triple::armeb) {
// TODO: There is only ARM far stub now. We should add the Thumb stub,
// and stubs for branches Thumb - ARM and ARM - Thumb.
- uint32_t *StubAddr = (uint32_t*)Addr;
+ uint32_t *StubAddr = (uint32_t *)Addr;
*StubAddr = 0xe51ff004; // ldr pc,<label>
- return (uint8_t*)++StubAddr;
+ return (uint8_t *)++StubAddr;
} else if (Arch == Triple::mipsel || Arch == Triple::mips) {
- uint32_t *StubAddr = (uint32_t*)Addr;
+ uint32_t *StubAddr = (uint32_t *)Addr;
// 0: 3c190000 lui t9,%hi(addr).
// 4: 27390000 addiu t9,t9,%lo(addr).
// 8: 03200008 jr t9.
@@ -435,22 +562,31 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
*StubAddr = NopInstr;
return Addr;
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
- // PowerPC64 stub: the address points to a function descriptor
- // instead of the function itself. Load the function address
- // on r11 and sets it to control register. Also loads the function
- // TOC in r2 and environment pointer to r11.
+ // Depending on which version of the ELF ABI is in use, we need to
+ // generate one of two variants of the stub. They both start with
+ // the same sequence to load the target address into r12.
writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
- writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
- writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
- writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
- writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
- writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
- writeInt32BE(Addr+40, 0x4E800420); // bctr
-
+ if (AbiVariant == 2) {
+ // PowerPC64 stub ELFv2 ABI: The address points to the function itself.
+ // The address is already in r12 as required by the ABI. Branch to it.
+ writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
+ writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
+ writeInt32BE(Addr+28, 0x4E800420); // bctr
+ } else {
+ // PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
+ // Load the function address on r11 and sets it to control register. Also
+ // loads the function TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+ }
return Addr;
} else if (Arch == Triple::systemz) {
writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
@@ -463,6 +599,8 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
*Addr = 0xFF; // jmp
*(Addr+1) = 0x25; // rip
// 32-bit PC-relative address of the GOT entry will be stored at Addr+2
+ } else if (Arch == Triple::x86) {
+ *Addr = 0xE9; // 32-bit pc-relative jump.
}
return Addr;
}
@@ -489,36 +627,37 @@ void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
const RelocationEntry &RE = Relocs[i];
// Ignore relocations for sections that were not loaded
- if (Sections[RE.SectionID].Address == 0)
+ if (Sections[RE.SectionID].Address == nullptr)
continue;
resolveRelocation(RE, Value);
}
}
void RuntimeDyldImpl::resolveExternalSymbols() {
- while(!ExternalSymbolRelocations.empty()) {
+ while (!ExternalSymbolRelocations.empty()) {
StringMap<RelocationList>::iterator i = ExternalSymbolRelocations.begin();
StringRef Name = i->first();
if (Name.size() == 0) {
// This is an absolute symbol, use an address of zero.
- DEBUG(dbgs() << "Resolving absolute relocations." << "\n");
+ DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
RelocationList &Relocs = i->second;
resolveRelocationList(Relocs, 0);
} else {
uint64_t Addr = 0;
SymbolTableMap::const_iterator Loc = GlobalSymbolTable.find(Name);
if (Loc == GlobalSymbolTable.end()) {
- // This is an external symbol, try to get its address from
- // MemoryManager.
- Addr = MemMgr->getSymbolAddress(Name.data());
- // The call to getSymbolAddress may have caused additional modules to
- // be loaded, which may have added new entries to the
- // ExternalSymbolRelocations map. Consquently, we need to update our
- // iterator. This is also why retrieval of the relocation list
- // associated with this symbol is deferred until below this point.
- // New entries may have been added to the relocation list.
- i = ExternalSymbolRelocations.find(Name);
+ // This is an external symbol, try to get its address from
+ // MemoryManager.
+ Addr = MemMgr->getSymbolAddress(Name.data());
+ // The call to getSymbolAddress may have caused additional modules to
+ // be loaded, which may have added new entries to the
+ // ExternalSymbolRelocations map. Consquently, we need to update our
+ // iterator. This is also why retrieval of the relocation list
+ // associated with this symbol is deferred until below this point.
+ // New entries may have been added to the relocation list.
+ i = ExternalSymbolRelocations.find(Name);
} else {
// We found the symbol in our global table. It was probably in a
// Module that we loaded previously.
@@ -529,12 +668,11 @@ void RuntimeDyldImpl::resolveExternalSymbols() {
// FIXME: Implement error handling that doesn't kill the host program!
if (!Addr)
report_fatal_error("Program used external function '" + Name +
- "' which could not be resolved!");
+ "' which could not be resolved!");
updateGOTEntries(Name, Addr);
- DEBUG(dbgs() << "Resolving relocations Name: " << Name
- << "\t" << format("0x%lx", Addr)
- << "\n");
+ DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
// This list may have been updated when we called getSymbolAddress, so
// don't change this code to get the list earlier.
RelocationList &Relocs = i->second;
@@ -545,7 +683,6 @@ void RuntimeDyldImpl::resolveExternalSymbols() {
}
}
-
//===----------------------------------------------------------------------===//
// RuntimeDyld class implementation
RuntimeDyld::RuntimeDyld(RTDyldMemoryManager *mm) {
@@ -555,58 +692,103 @@ RuntimeDyld::RuntimeDyld(RTDyldMemoryManager *mm) {
// though the public class spawns a new 'impl' instance for each load,
// they share a single memory manager. This can become a problem when page
// permissions are applied.
- Dyld = 0;
+ Dyld = nullptr;
MM = mm;
+ ProcessAllSections = false;
+}
+
+RuntimeDyld::~RuntimeDyld() { delete Dyld; }
+
+static std::unique_ptr<RuntimeDyldELF>
+createRuntimeDyldELF(RTDyldMemoryManager *MM, bool ProcessAllSections) {
+ std::unique_ptr<RuntimeDyldELF> Dyld(new RuntimeDyldELF(MM));
+ Dyld->setProcessAllSections(ProcessAllSections);
+ return Dyld;
}
-RuntimeDyld::~RuntimeDyld() {
- delete Dyld;
+static std::unique_ptr<RuntimeDyldMachO>
+createRuntimeDyldMachO(Triple::ArchType Arch, RTDyldMemoryManager *MM,
+ bool ProcessAllSections) {
+ std::unique_ptr<RuntimeDyldMachO> Dyld(RuntimeDyldMachO::create(Arch, MM));
+ Dyld->setProcessAllSections(ProcessAllSections);
+ return Dyld;
+}
+
+ObjectImage *RuntimeDyld::loadObject(std::unique_ptr<ObjectFile> InputObject) {
+ std::unique_ptr<ObjectImage> InputImage;
+
+ ObjectFile &Obj = *InputObject;
+
+ if (InputObject->isELF()) {
+ InputImage.reset(RuntimeDyldELF::createObjectImageFromFile(std::move(InputObject)));
+ if (!Dyld)
+ Dyld = createRuntimeDyldELF(MM, ProcessAllSections).release();
+ } else if (InputObject->isMachO()) {
+ InputImage.reset(RuntimeDyldMachO::createObjectImageFromFile(std::move(InputObject)));
+ if (!Dyld)
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(InputImage->getArch()),
+ MM, ProcessAllSections).release();
+ } else
+ report_fatal_error("Incompatible object format!");
+
+ if (!Dyld->isCompatibleFile(&Obj))
+ report_fatal_error("Incompatible object format!");
+
+ Dyld->loadObject(InputImage.get());
+ return InputImage.release();
}
ObjectImage *RuntimeDyld::loadObject(ObjectBuffer *InputBuffer) {
- if (!Dyld) {
- sys::fs::file_magic Type =
- sys::fs::identify_magic(InputBuffer->getBuffer());
- switch (Type) {
- case sys::fs::file_magic::elf_relocatable:
- case sys::fs::file_magic::elf_executable:
- case sys::fs::file_magic::elf_shared_object:
- case sys::fs::file_magic::elf_core:
- Dyld = new RuntimeDyldELF(MM);
- break;
- case sys::fs::file_magic::macho_object:
- case sys::fs::file_magic::macho_executable:
- case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib:
- case sys::fs::file_magic::macho_core:
- case sys::fs::file_magic::macho_preload_executable:
- case sys::fs::file_magic::macho_dynamically_linked_shared_lib:
- case sys::fs::file_magic::macho_dynamic_linker:
- case sys::fs::file_magic::macho_bundle:
- case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub:
- case sys::fs::file_magic::macho_dsym_companion:
- Dyld = new RuntimeDyldMachO(MM);
- break;
- case sys::fs::file_magic::unknown:
- case sys::fs::file_magic::bitcode:
- case sys::fs::file_magic::archive:
- case sys::fs::file_magic::coff_object:
- case sys::fs::file_magic::coff_import_library:
- case sys::fs::file_magic::pecoff_executable:
- case sys::fs::file_magic::macho_universal_binary:
- case sys::fs::file_magic::windows_resource:
- report_fatal_error("Incompatible object format!");
- }
- } else {
- if (!Dyld->isCompatibleFormat(InputBuffer))
- report_fatal_error("Incompatible object format!");
+ std::unique_ptr<ObjectImage> InputImage;
+ sys::fs::file_magic Type = sys::fs::identify_magic(InputBuffer->getBuffer());
+
+ switch (Type) {
+ case sys::fs::file_magic::elf_relocatable:
+ case sys::fs::file_magic::elf_executable:
+ case sys::fs::file_magic::elf_shared_object:
+ case sys::fs::file_magic::elf_core:
+ InputImage.reset(RuntimeDyldELF::createObjectImage(InputBuffer));
+ if (!Dyld)
+ Dyld = createRuntimeDyldELF(MM, ProcessAllSections).release();
+ break;
+ case sys::fs::file_magic::macho_object:
+ case sys::fs::file_magic::macho_executable:
+ case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib:
+ case sys::fs::file_magic::macho_core:
+ case sys::fs::file_magic::macho_preload_executable:
+ case sys::fs::file_magic::macho_dynamically_linked_shared_lib:
+ case sys::fs::file_magic::macho_dynamic_linker:
+ case sys::fs::file_magic::macho_bundle:
+ case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub:
+ case sys::fs::file_magic::macho_dsym_companion:
+ InputImage.reset(RuntimeDyldMachO::createObjectImage(InputBuffer));
+ if (!Dyld)
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(InputImage->getArch()),
+ MM, ProcessAllSections).release();
+ break;
+ case sys::fs::file_magic::unknown:
+ case sys::fs::file_magic::bitcode:
+ case sys::fs::file_magic::archive:
+ case sys::fs::file_magic::coff_object:
+ case sys::fs::file_magic::coff_import_library:
+ case sys::fs::file_magic::pecoff_executable:
+ case sys::fs::file_magic::macho_universal_binary:
+ case sys::fs::file_magic::windows_resource:
+ report_fatal_error("Incompatible object format!");
}
- return Dyld->loadObject(InputBuffer);
+ if (!Dyld->isCompatibleFormat(InputBuffer))
+ report_fatal_error("Incompatible object format!");
+
+ Dyld->loadObject(InputImage.get());
+ return InputImage.release();
}
void *RuntimeDyld::getSymbolAddress(StringRef Name) {
if (!Dyld)
- return NULL;
+ return nullptr;
return Dyld->getSymbolAddress(Name);
}
@@ -616,12 +798,9 @@ uint64_t RuntimeDyld::getSymbolLoadAddress(StringRef Name) {
return Dyld->getSymbolLoadAddress(Name);
}
-void RuntimeDyld::resolveRelocations() {
- Dyld->resolveRelocations();
-}
+void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
-void RuntimeDyld::reassignSectionAddress(unsigned SectionID,
- uint64_t Addr) {
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
Dyld->reassignSectionAddress(SectionID, Addr);
}
@@ -630,9 +809,9 @@ void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
Dyld->mapSectionAddress(LocalAddress, TargetAddress);
}
-StringRef RuntimeDyld::getErrorString() {
- return Dyld->getErrorString();
-}
+bool RuntimeDyld::hasError() { return Dyld->hasError(); }
+
+StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
void RuntimeDyld::registerEHFrames() {
if (Dyld)
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
new file mode 100644
index 0000000..1e63d92
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -0,0 +1,656 @@
+//===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/StringRefMemoryObject.h"
+#include "RuntimeDyldImpl.h"
+#include <cctype>
+#include <memory>
+
+#define DEBUG_TYPE "rtdyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+ // Helper class that implements the language evaluated by RuntimeDyldChecker.
+ class RuntimeDyldCheckerExprEval {
+ public:
+
+ RuntimeDyldCheckerExprEval(const RuntimeDyldChecker &Checker,
+ llvm::raw_ostream &ErrStream)
+ : Checker(Checker), ErrStream(ErrStream) {}
+
+ bool evaluate(StringRef Expr) const {
+ // Expect equality expression of the form 'LHS = RHS'.
+ Expr = Expr.trim();
+ size_t EQIdx = Expr.find('=');
+
+ // Evaluate LHS.
+ StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
+ StringRef RemainingExpr;
+ EvalResult LHSResult;
+ std::tie(LHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(LHSExpr));
+ if (LHSResult.hasError())
+ return handleError(Expr, LHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
+
+ // Evaluate RHS.
+ StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RHSExpr));
+ if (RHSResult.hasError())
+ return handleError(Expr, RHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
+
+ if (LHSResult.getValue() != RHSResult.getValue()) {
+ ErrStream << "Expression '" << Expr << "' is false: "
+ << format("0x%lx", LHSResult.getValue()) << " != "
+ << format("0x%lx", RHSResult.getValue()) << "\n";
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ const RuntimeDyldChecker &Checker;
+ llvm::raw_ostream &ErrStream;
+
+ enum class BinOpToken : unsigned { Invalid, Add, Sub, BitwiseAnd,
+ BitwiseOr, ShiftLeft, ShiftRight };
+
+ class EvalResult {
+ public:
+ EvalResult()
+ : Value(0), ErrorMsg("") {}
+ EvalResult(uint64_t Value)
+ : Value(Value), ErrorMsg("") {}
+ EvalResult(std::string ErrorMsg)
+ : Value(0), ErrorMsg(ErrorMsg) {}
+ uint64_t getValue() const { return Value; }
+ bool hasError() const { return ErrorMsg != ""; }
+ const std::string& getErrorMsg() const { return ErrorMsg; }
+ private:
+ uint64_t Value;
+ std::string ErrorMsg;
+ };
+
+ StringRef getTokenForError(StringRef Expr) const {
+ if (Expr.empty())
+ return "";
+
+ StringRef Token, Remaining;
+ if (isalpha(Expr[0]))
+ std::tie(Token, Remaining) = parseSymbol(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(Token, Remaining) = parseNumberString(Expr);
+ else {
+ unsigned TokLen = 1;
+ if (Expr.startswith("<<") || Expr.startswith(">>"))
+ TokLen = 2;
+ Token = Expr.substr(0, TokLen);
+ }
+ return Token;
+ }
+
+ EvalResult unexpectedToken(StringRef TokenStart,
+ StringRef SubExpr,
+ StringRef ErrText) const {
+ std::string ErrorMsg("Encountered unexpected token '");
+ ErrorMsg += getTokenForError(TokenStart);
+ if (SubExpr != "") {
+ ErrorMsg += "' while parsing subexpression '";
+ ErrorMsg += SubExpr;
+ }
+ ErrorMsg += "'";
+ if (ErrText != "") {
+ ErrorMsg += " ";
+ ErrorMsg += ErrText;
+ }
+ return EvalResult(std::move(ErrorMsg));
+ }
+
+ bool handleError(StringRef Expr, const EvalResult &R) const {
+ assert(R.hasError() && "Not an error result.");
+ ErrStream << "Error evaluating expression '" << Expr << "': "
+ << R.getErrorMsg() << "\n";
+ return false;
+ }
+
+ std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
+ if (Expr.empty())
+ return std::make_pair(BinOpToken::Invalid, "");
+
+ // Handle the two 2-character tokens.
+ if (Expr.startswith("<<"))
+ return std::make_pair(BinOpToken::ShiftLeft,
+ Expr.substr(2).ltrim());
+ if (Expr.startswith(">>"))
+ return std::make_pair(BinOpToken::ShiftRight,
+ Expr.substr(2).ltrim());
+
+ // Handle one-character tokens.
+ BinOpToken Op;
+ switch (Expr[0]) {
+ default: return std::make_pair(BinOpToken::Invalid, Expr);
+ case '+': Op = BinOpToken::Add; break;
+ case '-': Op = BinOpToken::Sub; break;
+ case '&': Op = BinOpToken::BitwiseAnd; break;
+ case '|': Op = BinOpToken::BitwiseOr; break;
+ }
+
+ return std::make_pair(Op, Expr.substr(1).ltrim());
+ }
+
+ EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
+ const EvalResult &RHSResult) const {
+ switch (Op) {
+ default: llvm_unreachable("Tried to evaluate unrecognized operation.");
+ case BinOpToken::Add:
+ return EvalResult(LHSResult.getValue() + RHSResult.getValue());
+ case BinOpToken::Sub:
+ return EvalResult(LHSResult.getValue() - RHSResult.getValue());
+ case BinOpToken::BitwiseAnd:
+ return EvalResult(LHSResult.getValue() & RHSResult.getValue());
+ case BinOpToken::BitwiseOr:
+ return EvalResult(LHSResult.getValue() | RHSResult.getValue());
+ case BinOpToken::ShiftLeft:
+ return EvalResult(LHSResult.getValue() << RHSResult.getValue());
+ case BinOpToken::ShiftRight:
+ return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
+ }
+ }
+
+ // Parse a symbol and return a (string, string) pair representing the symbol
+ // name and expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
+ size_t FirstNonSymbol =
+ Expr.find_first_not_of("0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ ":_");
+ return std::make_pair(Expr.substr(0, FirstNonSymbol),
+ Expr.substr(FirstNonSymbol).ltrim());
+ }
+
+ // Evaluate a call to decode_operand. Decode the instruction operand at the
+ // given symbol and get the value of the requested operand.
+ // Returns an error if the instruction cannot be decoded, or the requested
+ // operand is not an immediate.
+ // On success, retuns a pair containing the value of the operand, plus
+ // the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(EvalResult(("Cannot decode unknown symbol '" +
+ Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ','"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OpIdxExpr;
+ std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (OpIdxExpr.hasError())
+ return std::make_pair(OpIdxExpr, "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ')'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size))
+ return std::make_pair(EvalResult(("Couldn't decode instruction at '" +
+ Symbol + "'").str()),
+ "");
+
+ unsigned OpIdx = OpIdxExpr.getValue();
+ if (OpIdx >= Inst.getNumOperands()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
+ << "' for instruction '" << Symbol
+ << "'. Instruction has only "
+ << format("%i", Inst.getNumOperands())
+ << " operands.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream,
+ Checker.Disassembler->getContext().getAsmInfo(),
+ Checker.InstPrinter);
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ const MCOperand &Op = Inst.getOperand(OpIdx);
+ if (!Op.isImm()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Operand '" << format("%i", OpIdx)
+ << "' of instruction '" << Symbol
+ << "' is not an immediate.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream,
+ Checker.Disassembler->getContext().getAsmInfo(),
+ Checker.InstPrinter);
+
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
+ }
+
+ // Evaluate a call to next_pc. Decode the instruction at the given
+ // symbol and return the following program counter..
+ // Returns an error if the instruction cannot be decoded.
+ // On success, returns a pair containing the next PC, plus the length of the
+ // expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(EvalResult(("Cannot decode unknown symbol '"
+ + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ')'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size))
+ return std::make_pair(EvalResult(("Couldn't decode instruction at '" +
+ Symbol + "'").str()),
+ "");
+ uint64_t NextPC = Checker.getSymbolAddress(Symbol) + Size;
+
+ return std::make_pair(EvalResult(NextPC), RemainingExpr);
+ }
+
+ // Evaluate an identiefer expr, which may be a symbol, or a call to
+ // one of the builtin functions: get_insn_opcode or get_insn_length.
+ // Return the result, plus the expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr) const {
+ StringRef Symbol;
+ StringRef RemainingExpr;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
+
+ // Check for builtin function calls.
+ if (Symbol == "decode_operand")
+ return evalDecodeOperand(RemainingExpr);
+ else if (Symbol == "next_pc")
+ return evalNextPC(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol)) {
+ std::string ErrMsg("No known address for symbol '");
+ ErrMsg += Symbol;
+ ErrMsg += "'";
+ if (Symbol.startswith("L"))
+ ErrMsg += " (this appears to be an assembler local label - "
+ " perhaps drop the 'L'?)";
+
+ return std::make_pair(EvalResult(ErrMsg), "");
+ }
+
+ // Looks like a plain symbol reference.
+ return std::make_pair(EvalResult(Checker.getSymbolAddress(Symbol)),
+ RemainingExpr);
+ }
+
+ // Parse a number (hexadecimal or decimal) and return a (string, string)
+ // pair representing the number and the expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
+ size_t FirstNonDigit = StringRef::npos;
+ if (Expr.startswith("0x")) {
+ FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ } else {
+ FirstNonDigit = Expr.find_first_not_of("0123456789");
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ }
+ return std::make_pair(Expr.substr(0, FirstNonDigit),
+ Expr.substr(FirstNonDigit));
+ }
+
+ // Evaluate a constant numeric expression (hexidecimal or decimal) and
+ // return a pair containing the result, and the expression remaining to be
+ // evaluated.
+ std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
+ StringRef ValueStr;
+ StringRef RemainingExpr;
+ std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
+
+ if (ValueStr.empty() || !isdigit(ValueStr[0]))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected number"),
+ "");
+ uint64_t Value;
+ ValueStr.getAsInteger(0, Value);
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Evaluate an expression of the form "(<expr>)" and return a pair
+ // containing the result of evaluating <expr>, plus the expression
+ // remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr) const {
+ assert(Expr.startswith("(") && "Not a parenthesized expression");
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim()));
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, "");
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(unexpectedToken(RemainingExpr, Expr,
+ "expected ')'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate an expression in one of the following forms:
+ // *{<number>}<symbol>
+ // *{<number>}(<symbol> + <number>)
+ // *{<number>}(<symbol> - <number>)
+ // Return a pair containing the result, plus the expression remaining to be
+ // parsed.
+ std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
+ assert(Expr.startswith("*") && "Not a load expression");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ // Parse read size.
+ if (!RemainingExpr.startswith("{"))
+ return std::make_pair(EvalResult("Expected '{' following '*'."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ EvalResult ReadSizeExpr;
+ std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (ReadSizeExpr.hasError())
+ return std::make_pair(ReadSizeExpr, RemainingExpr);
+ uint64_t ReadSize = ReadSizeExpr.getValue();
+ if (ReadSize < 1 || ReadSize > 8)
+ return std::make_pair(EvalResult("Invalid size for dereference."), "");
+ if (!RemainingExpr.startswith("}"))
+ return std::make_pair(EvalResult("Missing '}' for dereference."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ // Check for '(symbol +/- constant)' form.
+ bool SymbolPlusConstant = false;
+ if (RemainingExpr.startswith("(")) {
+ SymbolPlusConstant = true;
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ }
+
+ // Read symbol.
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(EvalResult(("Cannot dereference unknown symbol '"
+ + Symbol + "'").str()),
+ "");
+
+ // Set up defaut offset.
+ int64_t Offset = 0;
+
+ // Handle "+/- constant)" portion if necessary.
+ if (SymbolPlusConstant) {
+ char OpChar = RemainingExpr[0];
+ if (OpChar != '+' && OpChar != '-')
+ return std::make_pair(EvalResult("Invalid operator in load address."),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OffsetExpr;
+ std::tie(OffsetExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ Offset = (OpChar == '+') ?
+ OffsetExpr.getValue() : -1 * OffsetExpr.getValue();
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(EvalResult("Missing ')' in load address."),
+ "");
+
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ }
+
+ return std::make_pair(
+ EvalResult(Checker.readMemoryAtSymbol(Symbol, Offset, ReadSize)),
+ RemainingExpr);
+ }
+
+ // Evaluate a "simple" expression. This is any expression that _isn't_ an
+ // un-parenthesized binary expression.
+ //
+ // "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
+ //
+ // Returns a pair containing the result of the evaluation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+
+ if (Expr.empty())
+ return std::make_pair(EvalResult("Unexpected end of expression"), "");
+
+ if (Expr[0] == '(')
+ std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr);
+ else if (Expr[0] == '*')
+ std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
+ else if (isalpha(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
+
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, RemainingExpr);
+
+ // Evaluate bit-slice if present.
+ if (RemainingExpr.startswith("["))
+ std::tie(SubExprResult, RemainingExpr) =
+ evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
+
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate a bit-slice of an expression.
+ // A bit-slice has the form "<expr>[high:low]". The result of evaluating a
+ // slice is the bits between high and low (inclusive) in the original
+ // expression, right shifted so that the "low" bit is in position 0 in the
+ // result.
+ // Returns a pair containing the result of the slice operation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSliceExpr(
+ std::pair<EvalResult, StringRef> Ctx) const{
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) = Ctx;
+
+ assert(RemainingExpr.startswith("[") && "Not a slice expr.");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult HighBitExpr;
+ std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (HighBitExpr.hasError())
+ return std::make_pair(HighBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith(":"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ':'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult LowBitExpr;
+ std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (LowBitExpr.hasError())
+ return std::make_pair(LowBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith("]"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ']'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ unsigned HighBit = HighBitExpr.getValue();
+ unsigned LowBit = LowBitExpr.getValue();
+ uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
+ uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
+ return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
+ }
+
+ // Evaluate a "complex" expression.
+ // Takes an already evaluated subexpression and checks for the presence of a
+ // binary operator, computing the result of the binary operation if one is
+ // found. Used to make arithmetic expressions left-associative.
+ // Returns a pair containing the ultimate result of evaluating the
+ // expression, plus the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalComplexExpr(
+ std::pair<EvalResult, StringRef> Ctx) const {
+ EvalResult LHSResult;
+ StringRef RemainingExpr;
+ std::tie(LHSResult, RemainingExpr) = Ctx;
+
+ // If there was an error, or there's nothing left to evaluate, return the
+ // result.
+ if (LHSResult.hasError() || RemainingExpr == "")
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // Otherwise check if this is a binary expressioan.
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+
+ // If this isn't a recognized expression just return.
+ if (BinOp == BinOpToken::Invalid)
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr);
+
+ // If there was an error evaluating the RHS, return it.
+ if (RHSResult.hasError())
+ return std::make_pair(RHSResult, RemainingExpr);
+
+ // This is a binary expression - evaluate and try to continue as a
+ // complex expr.
+ EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
+
+ return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr));
+ }
+
+ bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size) const {
+ MCDisassembler *Dis = Checker.Disassembler;
+ StringRef SectionMem = Checker.getSubsectionStartingAt(Symbol);
+ StringRefMemoryObject SectionBytes(SectionMem, 0);
+
+ MCDisassembler::DecodeStatus S =
+ Dis->getInstruction(Inst, Size, SectionBytes, 0, nulls(), nulls());
+
+ return (S == MCDisassembler::Success);
+ }
+
+ };
+
+}
+
+bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
+ CheckExpr = CheckExpr.trim();
+ DEBUG(llvm::dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
+ RuntimeDyldCheckerExprEval P(*this, ErrStream);
+ bool Result = P.evaluate(CheckExpr);
+ (void)Result;
+ DEBUG(llvm::dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
+ return Result;
+}
+
+bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer* MemBuf) const {
+ bool DidAllTestsPass = true;
+ unsigned NumRules = 0;
+
+ const char *LineStart = MemBuf->getBufferStart();
+
+ // Eat whitespace.
+ while (LineStart != MemBuf->getBufferEnd() &&
+ std::isspace(*LineStart))
+ ++LineStart;
+
+ while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
+ const char *LineEnd = LineStart;
+ while (LineEnd != MemBuf->getBufferEnd() &&
+ *LineEnd != '\r' && *LineEnd != '\n')
+ ++LineEnd;
+
+ StringRef Line(LineStart, LineEnd - LineStart);
+ if (Line.startswith(RulePrefix)) {
+ DidAllTestsPass &= check(Line.substr(RulePrefix.size()));
+ ++NumRules;
+ }
+
+ // Eat whitespace.
+ LineStart = LineEnd;
+ while (LineStart != MemBuf->getBufferEnd() &&
+ std::isspace(*LineStart))
+ ++LineStart;
+ }
+ return DidAllTestsPass && (NumRules != 0);
+}
+
+bool RuntimeDyldChecker::isSymbolValid(StringRef Symbol) const {
+ return RTDyld.getSymbolAddress(Symbol) != nullptr;
+}
+
+uint64_t RuntimeDyldChecker::getSymbolAddress(StringRef Symbol) const {
+ return RTDyld.getAnySymbolRemoteAddress(Symbol);
+}
+
+uint64_t RuntimeDyldChecker::readMemoryAtSymbol(StringRef Symbol,
+ int64_t Offset,
+ unsigned Size) const {
+ uint8_t *Src = RTDyld.getSymbolAddress(Symbol);
+ uint64_t Result = 0;
+ memcpy(&Result, Src + Offset, Size);
+ return Result;
+}
+
+StringRef RuntimeDyldChecker::getSubsectionStartingAt(StringRef Name) const {
+ RuntimeDyldImpl::SymbolTableMap::const_iterator pos =
+ RTDyld.GlobalSymbolTable.find(Name);
+ if (pos == RTDyld.GlobalSymbolTable.end())
+ return StringRef();
+ RuntimeDyldImpl::SymbolLoc Loc = pos->second;
+ uint8_t *SectionAddr = RTDyld.getSectionAddress(Loc.first);
+ return StringRef(reinterpret_cast<const char*>(SectionAddr) + Loc.second,
+ RTDyld.Sections[Loc.first].Size - Loc.second);
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index f2c69fc..728138e 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -11,12 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dyld"
#include "RuntimeDyldELF.h"
#include "JITRegistrar.h"
#include "ObjectImageCommon.h"
#include "llvm/ADT/IntervalMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
@@ -25,122 +23,125 @@
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/MemoryBuffer.h"
+
using namespace llvm;
using namespace llvm::object;
+#define DEBUG_TYPE "dyld"
+
namespace {
-static inline
-error_code check(error_code Err) {
+static inline std::error_code check(std::error_code Err) {
if (Err) {
report_fatal_error(Err.message());
}
return Err;
}
-template<class ELFT>
-class DyldELFObject
- : public ELFObjectFile<ELFT> {
+template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
typedef Elf_Sym_Impl<ELFT> Elf_Sym;
- typedef
- Elf_Rel_Impl<ELFT, false> Elf_Rel;
- typedef
- Elf_Rel_Impl<ELFT, true> Elf_Rela;
+ typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
+ typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
- typedef typename ELFDataTypeTypedefHelper<
- ELFT>::value_type addr_type;
+ typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type;
+
+ std::unique_ptr<ObjectFile> UnderlyingFile;
public:
- DyldELFObject(MemoryBuffer *Wrapper, error_code &ec);
+ DyldELFObject(std::unique_ptr<ObjectFile> UnderlyingFile,
+ std::unique_ptr<MemoryBuffer> Wrapper, std::error_code &ec);
+
+ DyldELFObject(std::unique_ptr<MemoryBuffer> Wrapper, std::error_code &ec);
void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr);
// Methods for type inquiry through isa, cast and dyn_cast
static inline bool classof(const Binary *v) {
- return (isa<ELFObjectFile<ELFT> >(v)
- && classof(cast<ELFObjectFile
- <ELFT> >(v)));
+ return (isa<ELFObjectFile<ELFT>>(v) &&
+ classof(cast<ELFObjectFile<ELFT>>(v)));
}
- static inline bool classof(
- const ELFObjectFile<ELFT> *v) {
+ static inline bool classof(const ELFObjectFile<ELFT> *v) {
return v->isDyldType();
}
};
-template<class ELFT>
-class ELFObjectImage : public ObjectImageCommon {
- protected:
- DyldELFObject<ELFT> *DyldObj;
- bool Registered;
-
- public:
- ELFObjectImage(ObjectBuffer *Input,
- DyldELFObject<ELFT> *Obj)
- : ObjectImageCommon(Input, Obj),
- DyldObj(Obj),
- Registered(false) {}
-
- virtual ~ELFObjectImage() {
- if (Registered)
- deregisterWithDebugger();
- }
+template <class ELFT> class ELFObjectImage : public ObjectImageCommon {
+ bool Registered;
- // Subclasses can override these methods to update the image with loaded
- // addresses for sections and common symbols
- virtual void updateSectionAddress(const SectionRef &Sec, uint64_t Addr)
- {
- DyldObj->updateSectionAddress(Sec, Addr);
- }
+public:
+ ELFObjectImage(ObjectBuffer *Input, std::unique_ptr<DyldELFObject<ELFT>> Obj)
+ : ObjectImageCommon(Input, std::move(Obj)), Registered(false) {}
- virtual void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr)
- {
- DyldObj->updateSymbolAddress(Sym, Addr);
- }
+ virtual ~ELFObjectImage() {
+ if (Registered)
+ deregisterWithDebugger();
+ }
- virtual void registerWithDebugger()
- {
- JITRegistrar::getGDBRegistrar().registerObject(*Buffer);
- Registered = true;
- }
- virtual void deregisterWithDebugger()
- {
- JITRegistrar::getGDBRegistrar().deregisterObject(*Buffer);
- }
+ // Subclasses can override these methods to update the image with loaded
+ // addresses for sections and common symbols
+ void updateSectionAddress(const SectionRef &Sec, uint64_t Addr) override {
+ static_cast<DyldELFObject<ELFT>*>(getObjectFile())
+ ->updateSectionAddress(Sec, Addr);
+ }
+
+ void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr) override {
+ static_cast<DyldELFObject<ELFT>*>(getObjectFile())
+ ->updateSymbolAddress(Sym, Addr);
+ }
+
+ void registerWithDebugger() override {
+ JITRegistrar::getGDBRegistrar().registerObject(*Buffer);
+ Registered = true;
+ }
+ void deregisterWithDebugger() override {
+ JITRegistrar::getGDBRegistrar().deregisterObject(*Buffer);
+ }
};
// The MemoryBuffer passed into this constructor is just a wrapper around the
// actual memory. Ultimately, the Binary parent class will take ownership of
// this MemoryBuffer object but not the underlying memory.
-template<class ELFT>
-DyldELFObject<ELFT>::DyldELFObject(MemoryBuffer *Wrapper, error_code &ec)
- : ELFObjectFile<ELFT>(Wrapper, ec) {
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(std::unique_ptr<MemoryBuffer> Wrapper,
+ std::error_code &EC)
+ : ELFObjectFile<ELFT>(std::move(Wrapper), EC) {
+ this->isDyldELFObject = true;
+}
+
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(std::unique_ptr<ObjectFile> UnderlyingFile,
+ std::unique_ptr<MemoryBuffer> Wrapper,
+ std::error_code &EC)
+ : ELFObjectFile<ELFT>(std::move(Wrapper), EC),
+ UnderlyingFile(std::move(UnderlyingFile)) {
this->isDyldELFObject = true;
}
-template<class ELFT>
+template <class ELFT>
void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
uint64_t Addr) {
DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
- Elf_Shdr *shdr = const_cast<Elf_Shdr*>(
- reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+ Elf_Shdr *shdr =
+ const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
shdr->sh_addr = static_cast<addr_type>(Addr);
}
-template<class ELFT>
+template <class ELFT>
void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
uint64_t Addr) {
- Elf_Sym *sym = const_cast<Elf_Sym*>(
- ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
+ Elf_Sym *sym = const_cast<Elf_Sym *>(
+ ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
@@ -178,60 +179,94 @@ void RuntimeDyldELF::deregisterEHFrames() {
RegisteredEHFrameSections.clear();
}
+ObjectImage *
+RuntimeDyldELF::createObjectImageFromFile(std::unique_ptr<object::ObjectFile> ObjFile) {
+ if (!ObjFile)
+ return nullptr;
+
+ std::error_code ec;
+ std::unique_ptr<MemoryBuffer> Buffer(
+ MemoryBuffer::getMemBuffer(ObjFile->getData(), "", false));
+
+ if (ObjFile->getBytesInAddress() == 4 && ObjFile->isLittleEndian()) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 2, false>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::little, 2, false>>(
+ nullptr, std::move(Obj));
+ } else if (ObjFile->getBytesInAddress() == 4 && !ObjFile->isLittleEndian()) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::big, 2, false>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::big, 2, false>>(nullptr, std::move(Obj));
+ } else if (ObjFile->getBytesInAddress() == 8 && !ObjFile->isLittleEndian()) {
+ auto Obj = llvm::make_unique<DyldELFObject<ELFType<support::big, 2, true>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::big, 2, true>>(nullptr,
+ std::move(Obj));
+ } else if (ObjFile->getBytesInAddress() == 8 && ObjFile->isLittleEndian()) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 2, true>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::little, 2, true>>(
+ nullptr, std::move(Obj));
+ } else
+ llvm_unreachable("Unexpected ELF format");
+}
+
ObjectImage *RuntimeDyldELF::createObjectImage(ObjectBuffer *Buffer) {
if (Buffer->getBufferSize() < ELF::EI_NIDENT)
llvm_unreachable("Unexpected ELF object size");
- std::pair<unsigned char, unsigned char> Ident = std::make_pair(
- (uint8_t)Buffer->getBufferStart()[ELF::EI_CLASS],
- (uint8_t)Buffer->getBufferStart()[ELF::EI_DATA]);
- error_code ec;
+ std::pair<unsigned char, unsigned char> Ident =
+ std::make_pair((uint8_t)Buffer->getBufferStart()[ELF::EI_CLASS],
+ (uint8_t)Buffer->getBufferStart()[ELF::EI_DATA]);
+ std::error_code ec;
+
+ std::unique_ptr<MemoryBuffer> Buf(Buffer->getMemBuffer());
if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB) {
- DyldELFObject<ELFType<support::little, 4, false> > *Obj =
- new DyldELFObject<ELFType<support::little, 4, false> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::little, 4, false> >(Buffer, Obj);
- }
- else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB) {
- DyldELFObject<ELFType<support::big, 4, false> > *Obj =
- new DyldELFObject<ELFType<support::big, 4, false> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::big, 4, false> >(Buffer, Obj);
- }
- else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB) {
- DyldELFObject<ELFType<support::big, 8, true> > *Obj =
- new DyldELFObject<ELFType<support::big, 8, true> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::big, 8, true> >(Buffer, Obj);
- }
- else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) {
- DyldELFObject<ELFType<support::little, 8, true> > *Obj =
- new DyldELFObject<ELFType<support::little, 8, true> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::little, 8, true> >(Buffer, Obj);
- }
- else
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 4, false>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::little, 4, false>>(
+ Buffer, std::move(Obj));
+ } else if (Ident.first == ELF::ELFCLASS32 &&
+ Ident.second == ELF::ELFDATA2MSB) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::big, 4, false>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::big, 4, false>>(Buffer,
+ std::move(Obj));
+ } else if (Ident.first == ELF::ELFCLASS64 &&
+ Ident.second == ELF::ELFDATA2MSB) {
+ auto Obj = llvm::make_unique<DyldELFObject<ELFType<support::big, 8, true>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::big, 8, true>>(Buffer, std::move(Obj));
+ } else if (Ident.first == ELF::ELFCLASS64 &&
+ Ident.second == ELF::ELFDATA2LSB) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 8, true>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::little, 8, true>>(Buffer, std::move(Obj));
+ } else
llvm_unreachable("Unexpected ELF format");
}
-RuntimeDyldELF::~RuntimeDyldELF() {
-}
+RuntimeDyldELF::~RuntimeDyldELF() {}
void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
uint64_t SymOffset) {
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
- break;
+ break;
case ELF::R_X86_64_64: {
- uint64_t *Target = reinterpret_cast<uint64_t*>(Section.Address + Offset);
+ uint64_t *Target = reinterpret_cast<uint64_t *>(Section.Address + Offset);
*Target = Value + Addend;
- DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend))
- << " at " << format("%p\n",Target));
+ DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Target));
break;
}
case ELF::R_X86_64_32:
@@ -239,20 +274,20 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
Value += Addend;
assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
(Type == ELF::R_X86_64_32S &&
- ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
*Target = TruncatedAddr;
- DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr)
- << " at " << format("%p\n",Target));
+ DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Target));
break;
}
case ELF::R_X86_64_GOTPCREL: {
// findGOTEntry returns the 'G + GOT' part of the relocation calculation
// based on the load/target address of the GOT (not the current/local addr).
uint64_t GOTAddr = findGOTEntry(Value, SymOffset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
- uint64_t FinalAddress = Section.LoadAddress + Offset;
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
// The processRelocationRef method combines the symbol offset and the addend
// and in most cases that's what we want. For this relocation type, we need
// the raw addend, so we subtract the symbol offset to get it.
@@ -265,10 +300,10 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
case ELF::R_X86_64_PC32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
- + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
- uint64_t FinalAddress = Section.LoadAddress + Offset;
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
int64_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
assert(RealOffset <= INT32_MAX && RealOffset >= INT32_MIN);
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
@@ -278,10 +313,10 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
case ELF::R_X86_64_PC64: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint64_t *Placeholder = reinterpret_cast<uint64_t*>(Section.ObjAddress
- + Offset);
- uint64_t *Target = reinterpret_cast<uint64_t*>(Section.Address + Offset);
- uint64_t FinalAddress = Section.LoadAddress + Offset;
+ uint64_t *Placeholder =
+ reinterpret_cast<uint64_t *>(Section.ObjAddress + Offset);
+ uint64_t *Target = reinterpret_cast<uint64_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
*Target = *Placeholder + Value + Addend - FinalAddress;
break;
}
@@ -289,53 +324,48 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
}
void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend) {
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
switch (Type) {
case ELF::R_386_32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
- + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
*Target = *Placeholder + Value + Addend;
break;
}
case ELF::R_386_PC32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
- + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
- uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
uint32_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
*Target = RealOffset;
break;
- }
- default:
- // There are other relocation types, but it appears these are the
- // only ones currently used by the LLVM ELF object writer
- llvm_unreachable("Relocation type not implemented yet!");
- break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
}
}
void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend) {
- uint32_t *TargetPtr = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset);
uint64_t FinalAddress = Section.LoadAddress + Offset;
DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
<< format("%llx", Section.Address + Offset)
- << " FinalAddress: 0x" << format("%llx",FinalAddress)
- << " Value: 0x" << format("%llx",Value)
- << " Type: 0x" << format("%x",Type)
- << " Addend: 0x" << format("%llx",Addend)
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
<< "\n");
switch (Type) {
@@ -343,7 +373,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_AARCH64_ABS64: {
- uint64_t *TargetPtr = reinterpret_cast<uint64_t*>(Section.Address + Offset);
+ uint64_t *TargetPtr =
+ reinterpret_cast<uint64_t *>(Section.Address + Offset);
*TargetPtr = Value + Addend;
break;
}
@@ -419,35 +450,77 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
break;
}
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(static_cast<int64_t>(Result) >= (-1LL << 32) &&
+ static_cast<int64_t>(Result) < (1LL << 32) &&
+ "overflow check failed for relocation");
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0x9f00001fU;
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ *TargetPtr |= ((Result & 0x3000U) << (29 - 12));
+ *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
+ break;
+ }
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
+ // Operation: S + A
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffc003ffU;
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ *TargetPtr |= ((Result & 0xffc) << (10 - 2));
+ break;
+ }
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
+ // Operation: S + A
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffc003ffU;
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ *TargetPtr |= ((Result & 0xff8) << (10 - 3));
+ break;
+ }
}
}
void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend) {
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
// TODO: Add Thumb relocations.
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress +
- Offset);
- uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset);
uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
Value += Addend;
DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
<< Section.Address + Offset
- << " FinalAddress: " << format("%p",FinalAddress)
- << " Value: " << format("%x",Value)
- << " Type: " << format("%x",Type)
- << " Addend: " << format("%x",Addend)
- << "\n");
+ << " FinalAddress: " << format("%p", FinalAddress) << " Value: "
+ << format("%x", Value) << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
- switch(Type) {
+ switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
+ case ELF::R_ARM_NONE:
+ break;
// Write a 32bit value to relocation address, taking into account the
// implicit addend encoded in the target.
+ case ELF::R_ARM_PREL31:
case ELF::R_ARM_TARGET1:
case ELF::R_ARM_ABS32:
*TargetPtr = *Placeholder + Value;
@@ -475,8 +548,8 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
*TargetPtr |= ((Value >> 12) & 0xF) << 16;
break;
// Write 24 bit relative value to the branch instruction.
- case ELF::R_ARM_PC24 : // Fall through.
- case ELF::R_ARM_CALL : // Fall through.
+ case ELF::R_ARM_PC24: // Fall through.
+ case ELF::R_ARM_CALL: // Fall through.
case ELF::R_ARM_JUMP24: {
int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
RelValue = (RelValue & 0x03FFFFFC) >> 2;
@@ -496,25 +569,20 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
}
void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend) {
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress +
- Offset);
- uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset);
Value += Addend;
DEBUG(dbgs() << "resolveMipselocation, LocalAddress: "
- << Section.Address + Offset
- << " FinalAddress: "
- << format("%p",Section.LoadAddress + Offset)
- << " Value: " << format("%x",Value)
- << " Type: " << format("%x",Type)
- << " Addend: " << format("%x",Addend)
- << "\n");
+ << Section.Address + Offset << " FinalAddress: "
+ << format("%p", Section.LoadAddress + Offset) << " Value: "
+ << format("%x", Value) << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
- switch(Type) {
+ switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
break;
@@ -522,13 +590,13 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
*TargetPtr = Value + (*Placeholder);
break;
case ELF::R_MIPS_26:
- *TargetPtr = ((*Placeholder) & 0xfc000000) | (( Value & 0x0fffffff) >> 2);
+ *TargetPtr = ((*Placeholder) & 0xfc000000) | ((Value & 0x0fffffff) >> 2);
break;
case ELF::R_MIPS_HI16:
// Get the higher 16-bits. Also add 1 if bit 15 is 1.
Value += ((*Placeholder) & 0x0000ffff) << 16;
- *TargetPtr = ((*Placeholder) & 0xffff0000) |
- (((Value + 0x8000) >> 16) & 0xffff);
+ *TargetPtr =
+ ((*Placeholder) & 0xffff0000) | (((Value + 0x8000) >> 16) & 0xffff);
break;
case ELF::R_MIPS_LO16:
Value += ((*Placeholder) & 0x0000ffff);
@@ -539,41 +607,47 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
// are used for internal JIT purpose. These relocations are similar to
// R_MIPS_HI16 and R_MIPS_LO16, but they do not take any addend into
// account.
- *TargetPtr = ((*TargetPtr) & 0xffff0000) |
- (((Value + 0x8000) >> 16) & 0xffff);
+ *TargetPtr =
+ ((*TargetPtr) & 0xffff0000) | (((Value + 0x8000) >> 16) & 0xffff);
break;
case ELF::R_MIPS_UNUSED2:
*TargetPtr = ((*TargetPtr) & 0xffff0000) | (Value & 0xffff);
break;
- }
+ }
}
-// Return the .TOC. section address to R_PPC64_TOC relocations.
-uint64_t RuntimeDyldELF::findPPC64TOC() const {
+// Return the .TOC. section and offset.
+void RuntimeDyldELF::findPPC64TOCSection(ObjectImage &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Set a default SectionID in case we do not find a TOC section below.
+ // This may happen for references to TOC base base (sym@toc, .odp
+ // relocation) without a .toc directive. In this case just use the
+ // first section (which is usually the .odp) since the code won't
+ // reference the .toc base directly.
+ Rel.SymbolName = NULL;
+ Rel.SectionID = 0;
+
// The TOC consists of sections .got, .toc, .tocbss, .plt in that
// order. The TOC starts where the first of these sections starts.
- SectionList::const_iterator it = Sections.begin();
- SectionList::const_iterator ite = Sections.end();
- for (; it != ite; ++it) {
- if (it->Name == ".got" ||
- it->Name == ".toc" ||
- it->Name == ".tocbss" ||
- it->Name == ".plt")
+ for (section_iterator si = Obj.begin_sections(), se = Obj.end_sections();
+ si != se; ++si) {
+
+ StringRef SectionName;
+ check(si->getName(SectionName));
+
+ if (SectionName == ".got"
+ || SectionName == ".toc"
+ || SectionName == ".tocbss"
+ || SectionName == ".plt") {
+ Rel.SectionID = findOrEmitSection(Obj, *si, false, LocalSections);
break;
+ }
}
- if (it == ite) {
- // This may happen for
- // * references to TOC base base (sym@toc, .odp relocation) without
- // a .toc directive.
- // In this case just use the first section (which is usually
- // the .odp) since the code won't reference the .toc base
- // directly.
- it = Sections.begin();
- }
- assert (it != ite);
+
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
// thus permitting a full 64 Kbytes segment.
- return it->LoadAddress + 0x8000;
+ Rel.Addend = 0x8000;
}
// Returns the sections and offset associated with the ODP entry referenced
@@ -583,10 +657,8 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
RelocationValueRef &Rel) {
// Get the ELF symbol value (st_value) to compare with Relocation offset in
// .opd entries
-
- error_code err;
- for (section_iterator si = Obj.begin_sections(),
- se = Obj.end_sections(); si != se; si.increment(err)) {
+ for (section_iterator si = Obj.begin_sections(), se = Obj.end_sections();
+ si != se; ++si) {
section_iterator RelSecI = si->getRelocatedSection();
if (RelSecI == Obj.end_sections())
continue;
@@ -596,16 +668,15 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
if (RelSectionName != ".opd")
continue;
- for (relocation_iterator i = si->begin_relocations(),
- e = si->end_relocations(); i != e;) {
- check(err);
-
+ for (relocation_iterator i = si->relocation_begin(),
+ e = si->relocation_end();
+ i != e;) {
// The R_PPC64_ADDR64 relocation indicates the first field
// of a .opd entry
uint64_t TypeFunc;
check(i->getType(TypeFunc));
if (TypeFunc != ELF::R_PPC64_ADDR64) {
- i.increment(err);
+ ++i;
continue;
}
@@ -615,10 +686,9 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
int64_t Addend;
check(getELFRelocationAddend(*i, Addend));
- i = i.increment(err);
+ ++i;
if (i == e)
break;
- check(err);
// Just check if following relocation is a R_PPC64_TOC
uint64_t TypeTOC;
@@ -634,7 +704,9 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
section_iterator tsi(Obj.end_sections());
check(TargetSymbol->getSection(tsi));
- Rel.SectionID = findOrEmitSection(Obj, (*tsi), true, LocalSections);
+ bool IsCode = false;
+ tsi->isText(IsCode);
+ Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections);
Rel.Addend = (intptr_t)Addend;
return;
}
@@ -642,69 +714,103 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
llvm_unreachable("Attempting to get address of ODP entry!");
}
-// Relocation masks following the #lo(value), #hi(value), #higher(value),
-// and #highest(value) macros defined in section 4.5.1. Relocation Types
-// in PPC-elf64abi document.
-//
-static inline
-uint16_t applyPPClo (uint64_t value)
-{
- return value & 0xffff;
-}
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
-static inline
-uint16_t applyPPChi (uint64_t value)
-{
+static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
+
+static inline uint16_t applyPPChi(uint64_t value) {
return (value >> 16) & 0xffff;
}
-static inline
-uint16_t applyPPChigher (uint64_t value)
-{
+static inline uint16_t applyPPCha (uint64_t value) {
+ return ((value + 0x8000) >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPChigher(uint64_t value) {
return (value >> 32) & 0xffff;
}
-static inline
-uint16_t applyPPChighest (uint64_t value)
-{
+static inline uint16_t applyPPChighera (uint64_t value) {
+ return ((value + 0x8000) >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighest(uint64_t value) {
return (value >> 48) & 0xffff;
}
+static inline uint16_t applyPPChighesta (uint64_t value) {
+ return ((value + 0x8000) >> 48) & 0xffff;
+}
+
void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend) {
- uint8_t* LocalAddress = Section.Address + Offset;
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.Address + Offset;
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
- break;
- case ELF::R_PPC64_ADDR16_LO :
- writeInt16BE(LocalAddress, applyPPClo (Value + Addend));
break;
- case ELF::R_PPC64_ADDR16_HI :
- writeInt16BE(LocalAddress, applyPPChi (Value + Addend));
+ case ELF::R_PPC64_ADDR16:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
break;
- case ELF::R_PPC64_ADDR16_HIGHER :
- writeInt16BE(LocalAddress, applyPPChigher (Value + Addend));
+ case ELF::R_PPC64_ADDR16_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
break;
- case ELF::R_PPC64_ADDR16_HIGHEST :
- writeInt16BE(LocalAddress, applyPPChighest (Value + Addend));
+ case ELF::R_PPC64_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
break;
- case ELF::R_PPC64_ADDR14 : {
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14: {
assert(((Value + Addend) & 3) == 0);
// Preserve the AA/LK bits in the branch instruction
- uint8_t aalk = *(LocalAddress+3);
+ uint8_t aalk = *(LocalAddress + 3);
writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
} break;
- case ELF::R_PPC64_ADDR32 : {
+ case ELF::R_PPC64_REL16_LO: {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPClo(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HI: {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPChi(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HA: {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPCha(Delta));
+ } break;
+ case ELF::R_PPC64_ADDR32: {
int32_t Result = static_cast<int32_t>(Value + Addend);
if (SignExtend32<32>(Result) != Result)
llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
writeInt32BE(LocalAddress, Result);
} break;
- case ELF::R_PPC64_REL24 : {
+ case ELF::R_PPC64_REL24: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
if (SignExtend32<24>(delta) != delta)
@@ -712,7 +818,7 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
// Generates a 'bl <address>' instruction
writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC));
} break;
- case ELF::R_PPC64_REL32 : {
+ case ELF::R_PPC64_REL32: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
if (SignExtend32<32>(delta) != delta)
@@ -724,30 +830,15 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
uint64_t Delta = Value - FinalAddress + Addend;
writeInt64BE(LocalAddress, Delta);
} break;
- case ELF::R_PPC64_ADDR64 :
+ case ELF::R_PPC64_ADDR64:
writeInt64BE(LocalAddress, Value + Addend);
break;
- case ELF::R_PPC64_TOC :
- writeInt64BE(LocalAddress, findPPC64TOC());
- break;
- case ELF::R_PPC64_TOC16 : {
- uint64_t TOCStart = findPPC64TOC();
- Value = applyPPClo((Value + Addend) - TOCStart);
- writeInt16BE(LocalAddress, applyPPClo(Value));
- } break;
- case ELF::R_PPC64_TOC16_DS : {
- uint64_t TOCStart = findPPC64TOC();
- Value = ((Value + Addend) - TOCStart);
- writeInt16BE(LocalAddress, applyPPClo(Value));
- } break;
}
}
void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend) {
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
uint8_t *LocalAddress = Section.Address + Offset;
switch (Type) {
default:
@@ -807,66 +898,63 @@ void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
}
void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
uint64_t SymOffset) {
switch (Arch) {
case Triple::x86_64:
resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
break;
case Triple::x86:
- resolveX86Relocation(Section, Offset,
- (uint32_t)(Value & 0xffffffffL), Type,
+ resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::arm64:
+ case Triple::arm64_be:
resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
break;
- case Triple::arm: // Fall through.
+ case Triple::arm: // Fall through.
+ case Triple::armeb:
case Triple::thumb:
- resolveARMRelocation(Section, Offset,
- (uint32_t)(Value & 0xffffffffL), Type,
+ case Triple::thumbeb:
+ resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
- case Triple::mips: // Fall through.
+ case Triple::mips: // Fall through.
case Triple::mipsel:
- resolveMIPSRelocation(Section, Offset,
- (uint32_t)(Value & 0xffffffffL), Type,
- (uint32_t)(Addend & 0xffffffffL));
+ resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL),
+ Type, (uint32_t)(Addend & 0xffffffffL));
break;
- case Triple::ppc64: // Fall through.
+ case Triple::ppc64: // Fall through.
case Triple::ppc64le:
resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
break;
case Triple::systemz:
resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
break;
- default: llvm_unreachable("Unsupported CPU type!");
+ default:
+ llvm_unreachable("Unsupported CPU type!");
}
}
-void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs) {
+relocation_iterator RuntimeDyldELF::processRelocationRef(
+ unsigned SectionID, relocation_iterator RelI, ObjectImage &Obj,
+ ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols,
+ StubMap &Stubs) {
uint64_t RelType;
- Check(RelI.getType(RelType));
+ Check(RelI->getType(RelType));
int64_t Addend;
- Check(getELFRelocationAddend(RelI, Addend));
- symbol_iterator Symbol = RelI.getSymbol();
+ Check(getELFRelocationAddend(*RelI, Addend));
+ symbol_iterator Symbol = RelI->getSymbol();
// Obtain the symbol name which is referenced in the relocation
StringRef TargetName;
if (Symbol != Obj.end_symbols())
Symbol->getName(TargetName);
- DEBUG(dbgs() << "\t\tRelType: " << RelType
- << " Addend: " << Addend
- << " TargetName: " << TargetName
- << "\n");
+ DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
RelocationValueRef Value;
// First search for the symbol in the local symbol table
SymbolTableMap::const_iterator lsi = Symbols.end();
@@ -890,53 +978,49 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
Value.Addend = gsi->second.second + Addend;
} else {
switch (SymType) {
- case SymbolRef::ST_Debug: {
- // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
- // and can be changed by another developers. Maybe best way is add
- // a new symbol type ST_Section to SymbolRef and use it.
- section_iterator si(Obj.end_sections());
- Symbol->getSection(si);
- if (si == Obj.end_sections())
- llvm_unreachable("Symbol section not found, bad object file format!");
- DEBUG(dbgs() << "\t\tThis is section symbol\n");
- // Default to 'true' in case isText fails (though it never does).
- bool isCode = true;
- si->isText(isCode);
- Value.SectionID = findOrEmitSection(Obj,
- (*si),
- isCode,
- ObjSectionToID);
- Value.Addend = Addend;
- break;
- }
- case SymbolRef::ST_Data:
- case SymbolRef::ST_Unknown: {
- Value.SymbolName = TargetName.data();
- Value.Addend = Addend;
-
- // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
- // will manifest here as a NULL symbol name.
- // We can set this as a valid (but empty) symbol name, and rely
- // on addRelocationForSymbol to handle this.
- if (!Value.SymbolName)
- Value.SymbolName = "";
- break;
- }
- default:
- llvm_unreachable("Unresolved symbol type!");
- break;
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ section_iterator si(Obj.end_sections());
+ Symbol->getSection(si);
+ if (si == Obj.end_sections())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ // Default to 'true' in case isText fails (though it never does).
+ bool isCode = true;
+ si->isText(isCode);
+ Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID);
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Data:
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+
+ // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
+ // will manifest here as a NULL symbol name.
+ // We can set this as a valid (but empty) symbol name, and rely
+ // on addRelocationForSymbol to handle this.
+ if (!Value.SymbolName)
+ Value.SymbolName = "";
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
}
}
}
uint64_t Offset;
- Check(RelI.getOffset(Offset));
+ Check(RelI->getOffset(Offset));
- DEBUG(dbgs() << "\t\tSectionID: " << SectionID
- << " Offset: " << Offset
+ DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
<< "\n");
- if (Arch == Triple::aarch64 &&
- (RelType == ELF::R_AARCH64_CALL26 ||
- RelType == ELF::R_AARCH64_JUMP26)) {
+ if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::arm64 || Arch == Triple::arm64_be) &&
+ (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) {
// This is an AArch64 branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
SectionEntry &Section = Sections[SectionID];
@@ -944,24 +1028,21 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + i->second, RelType, 0);
+ resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second,
+ RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
- RelocationEntry REmovz_g3(SectionID,
- StubTargetAddr - Section.Address,
+ RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address,
ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
- RelocationEntry REmovk_g2(SectionID,
- StubTargetAddr - Section.Address + 4,
+ RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4,
ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
- RelocationEntry REmovk_g1(SectionID,
- StubTargetAddr - Section.Address + 8,
+ RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8,
ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
RelocationEntry REmovk_g0(SectionID,
StubTargetAddr - Section.Address + 12,
@@ -979,14 +1060,13 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
addRelocationForSection(REmovk_g0, Value.SectionID);
}
resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + Section.StubOffset,
- RelType, 0);
+ (uint64_t)Section.Address + Section.StubOffset, RelType,
+ 0);
Section.StubOffset += getMaxStubSize();
}
} else if (Arch == Triple::arm &&
- (RelType == ELF::R_ARM_PC24 ||
- RelType == ELF::R_ARM_CALL ||
- RelType == ELF::R_ARM_JUMP24)) {
+ (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24)) {
// This is an ARM branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.");
SectionEntry &Section = Sections[SectionID];
@@ -994,15 +1074,15 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + i->second, RelType, 0);
+ resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second,
+ RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
ELF::R_ARM_PRIVATE_0, Value.Addend);
if (Value.SymbolName)
@@ -1011,8 +1091,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
addRelocationForSection(RE, Value.SectionID);
resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + Section.StubOffset,
- RelType, 0);
+ (uint64_t)Section.Address + Section.StubOffset, RelType,
+ 0);
Section.StubOffset += getMaxStubSize();
}
} else if ((Arch == Triple::mipsel || Arch == Triple::mips) &&
@@ -1038,15 +1118,13 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
// Creating Hi and Lo relocations for the filled stub instructions.
- RelocationEntry REHi(SectionID,
- StubTargetAddr - Section.Address,
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address,
ELF::R_MIPS_UNUSED1, Value.Addend);
- RelocationEntry RELo(SectionID,
- StubTargetAddr - Section.Address + 4,
+ RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4,
ELF::R_MIPS_UNUSED2, Value.Addend);
if (Value.SymbolName) {
@@ -1063,6 +1141,10 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
}
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
if (RelType == ELF::R_PPC64_REL24) {
+ // Determine ABI variant in use for this object.
+ unsigned AbiVariant;
+ Obj.getObjectFile()->getPlatformFlags(AbiVariant);
+ AbiVariant &= ELF::EF_PPC64_ABI;
// A PPC branch relocation will need a stub function if the target is
// an external symbol (Symbol::ST_Unknown) or if the target address
// is not within the signed 24-bits branch address.
@@ -1070,9 +1152,18 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
uint8_t *Target = Section.Address + Offset;
bool RangeOverflow = false;
if (SymType != SymbolRef::ST_Unknown) {
- // A function call may points to the .opd entry, so the final symbol value
- // in calculated based in the relocation values in .opd section.
- findOPDEntrySection(Obj, ObjSectionToID, Value);
+ if (AbiVariant != 2) {
+ // In the ELFv1 ABI, a function call may point to the .opd entry,
+ // so the final symbol value is calculated based on the relocation
+ // values in the .opd section.
+ findOPDEntrySection(Obj, ObjSectionToID, Value);
+ } else {
+ // In the ELFv2 ABI, a function symbol may provide a local entry
+ // point, which must be used for direct calls.
+ uint8_t SymOther;
+ Symbol->getOther(SymOther);
+ Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
+ }
uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend;
int32_t delta = static_cast<int32_t>(Target - RelocTarget);
// If it is within 24-bits branch range, just set the branch target
@@ -1099,53 +1190,100 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset,
+ AbiVariant);
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
ELF::R_PPC64_ADDR64, Value.Addend);
// Generates the 64-bits address loads as exemplified in section
- // 4.5.1 in PPC64 ELF ABI.
- RelocationEntry REhst(SectionID,
- StubTargetAddr - Section.Address + 2,
+ // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
+ // apply to the low part of the instructions, so we have to update
+ // the offset according to the target endianness.
+ uint64_t StubRelocOffset = StubTargetAddr - Section.Address;
+ if (!IsTargetLittleEndian)
+ StubRelocOffset += 2;
+
+ RelocationEntry REhst(SectionID, StubRelocOffset + 0,
ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
- RelocationEntry REhr(SectionID,
- StubTargetAddr - Section.Address + 6,
+ RelocationEntry REhr(SectionID, StubRelocOffset + 4,
ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
- RelocationEntry REh(SectionID,
- StubTargetAddr - Section.Address + 14,
+ RelocationEntry REh(SectionID, StubRelocOffset + 12,
ELF::R_PPC64_ADDR16_HI, Value.Addend);
- RelocationEntry REl(SectionID,
- StubTargetAddr - Section.Address + 18,
+ RelocationEntry REl(SectionID, StubRelocOffset + 16,
ELF::R_PPC64_ADDR16_LO, Value.Addend);
if (Value.SymbolName) {
addRelocationForSymbol(REhst, Value.SymbolName);
- addRelocationForSymbol(REhr, Value.SymbolName);
- addRelocationForSymbol(REh, Value.SymbolName);
- addRelocationForSymbol(REl, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
} else {
addRelocationForSection(REhst, Value.SectionID);
- addRelocationForSection(REhr, Value.SectionID);
- addRelocationForSection(REh, Value.SectionID);
- addRelocationForSection(REl, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
}
resolveRelocation(Section, Offset,
(uint64_t)Section.Address + Section.StubOffset,
RelType, 0);
- if (SymType == SymbolRef::ST_Unknown)
- // Restore the TOC for external calls
- writeInt32BE(Target+4, 0xE8410028); // ld r2,40(r1)
Section.StubOffset += getMaxStubSize();
}
+ if (SymType == SymbolRef::ST_Unknown) {
+ // Restore the TOC for external calls
+ if (AbiVariant == 2)
+ writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1)
+ else
+ writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
+ }
}
+ } else if (RelType == ELF::R_PPC64_TOC16 ||
+ RelType == ELF::R_PPC64_TOC16_DS ||
+ RelType == ELF::R_PPC64_TOC16_LO ||
+ RelType == ELF::R_PPC64_TOC16_LO_DS ||
+ RelType == ELF::R_PPC64_TOC16_HI ||
+ RelType == ELF::R_PPC64_TOC16_HA) {
+ // These relocations are supposed to subtract the TOC address from
+ // the final value. This does not fit cleanly into the RuntimeDyld
+ // scheme, since there may be *two* sections involved in determining
+ // the relocation value (the section of the symbol refered to by the
+ // relocation, and the TOC section associated with the current module).
+ //
+ // Fortunately, these relocations are currently only ever generated
+ // refering to symbols that themselves reside in the TOC, which means
+ // that the two sections are actually the same. Thus they cancel out
+ // and we can immediately resolve the relocation right now.
+ switch (RelType) {
+ case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
+ case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
+ case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
+ case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
+ case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
+ case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
+ default: llvm_unreachable("Wrong relocation type.");
+ }
+
+ RelocationValueRef TOCValue;
+ findPPC64TOCSection(Obj, ObjSectionToID, TOCValue);
+ if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
+ llvm_unreachable("Unsupported TOC relocation.");
+ Value.Addend -= TOCValue.Addend;
+ resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
} else {
+ // There are two ways to refer to the TOC address directly: either
+ // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
+ // ignored), or via any relocation that refers to the magic ".TOC."
+ // symbols (in which case the addend is respected).
+ if (RelType == ELF::R_PPC64_TOC) {
+ RelType = ELF::R_PPC64_ADDR64;
+ findPPC64TOCSection(Obj, ObjSectionToID, Value);
+ } else if (TargetName == ".TOC.") {
+ findPPC64TOCSection(Obj, ObjSectionToID, Value);
+ Value.Addend += Addend;
+ }
+
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
- // Extra check to avoid relocation againt empty symbols (usually
- // the R_PPC64_TOC).
- if (SymType != SymbolRef::ST_Unknown && TargetName.empty())
- Value.SymbolName = NULL;
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
@@ -1153,8 +1291,7 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
addRelocationForSection(RE, Value.SectionID);
}
} else if (Arch == Triple::systemz &&
- (RelType == ELF::R_390_PLT32DBL ||
- RelType == ELF::R_390_GOTENT)) {
+ (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
// Create function stubs for both PLT and GOT references, regardless of
// whether the GOT reference is to data or code. The stub contains the
// full address of the symbol, as needed by GOT references, and the
@@ -1179,14 +1316,14 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
- StubAddress = (BaseAddress + Section.StubOffset +
- StubAlignment - 1) & -StubAlignment;
+ StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
createStubFunction((uint8_t *)StubAddress);
- RelocationEntry RE(SectionID, StubOffset + 8,
- ELF::R_390_64, Value.Addend - Addend);
+ RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
+ Value.Addend - Addend);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
@@ -1195,15 +1332,17 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
}
if (RelType == ELF::R_390_GOTENT)
- resolveRelocation(Section, Offset, StubAddress + 8,
- ELF::R_390_PC32DBL, Addend);
+ resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
+ Addend);
else
resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
} else if (Arch == Triple::x86_64 && RelType == ELF::R_X86_64_PLT32) {
- // The way the PLT relocations normally work is that the linker allocates the
+ // The way the PLT relocations normally work is that the linker allocates
+ // the
// PLT and this relocation makes a PC-relative call into the PLT. The PLT
- // entry will then jump to an address provided by the GOT. On first call, the
- // GOT address will point back into PLT code that resolves the symbol. After
+ // entry will then jump to an address provided by the GOT. On first call,
+ // the
+ // GOT address will point back into PLT code that resolves the symbol. After
// the first call, the GOT entry points to the actual function.
//
// For local functions we're ignoring all of that here and just replacing
@@ -1229,8 +1368,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
- StubAddress = (BaseAddress + Section.StubOffset +
- StubAlignment - 1) & -StubAlignment;
+ StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
createStubFunction((uint8_t *)StubAddress);
@@ -1239,8 +1378,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
GOTEntries.push_back(Value);
// Make our stub function a relative call to the GOT entry.
- RelocationEntry RE(SectionID, StubOffset + 2,
- ELF::R_X86_64_GOTPCREL, -4);
+ RelocationEntry RE(SectionID, StubOffset + 2, ELF::R_X86_64_GOTPCREL,
+ -4);
addRelocationForSymbol(RE, Value.SymbolName);
// Bump our stub offset counter
@@ -1248,8 +1387,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
}
// Make the target call a call into the stub table.
- resolveRelocation(Section, Offset, StubAddress,
- ELF::R_X86_64_PC32, Addend);
+ resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32,
+ Addend);
} else {
RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend,
Value.Offset);
@@ -1265,17 +1404,19 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
else
addRelocationForSection(RE, Value.SectionID);
}
+ return ++RelI;
}
void RuntimeDyldELF::updateGOTEntries(StringRef Name, uint64_t Addr) {
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::iterator it;
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::iterator end = GOTs.end();
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::iterator it;
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::iterator end = GOTs.end();
for (it = GOTs.begin(); it != end; ++it) {
GOTRelocations &GOTEntries = it->second;
for (int i = 0, e = GOTEntries.size(); i != e; ++i) {
- if (GOTEntries[i].SymbolName != 0 && GOTEntries[i].SymbolName == Name) {
+ if (GOTEntries[i].SymbolName != nullptr &&
+ GOTEntries[i].SymbolName == Name) {
GOTEntries[i].Offset = Addr;
}
}
@@ -1289,6 +1430,9 @@ size_t RuntimeDyldELF::getGOTEntrySize() {
switch (Arch) {
case Triple::x86_64:
case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::arm64:
+ case Triple::arm64_be:
case Triple::ppc64:
case Triple::ppc64le:
case Triple::systemz:
@@ -1301,18 +1445,19 @@ size_t RuntimeDyldELF::getGOTEntrySize() {
case Triple::mipsel:
Result = sizeof(uint32_t);
break;
- default: llvm_unreachable("Unsupported CPU type!");
+ default:
+ llvm_unreachable("Unsupported CPU type!");
}
return Result;
}
-uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
- uint64_t Offset) {
+uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress, uint64_t Offset) {
const size_t GOTEntrySize = getGOTEntrySize();
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::const_iterator it;
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::const_iterator end = GOTs.end();
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::const_iterator it;
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::const_iterator end =
+ GOTs.end();
int GOTIndex = -1;
for (it = GOTs.begin(); it != end; ++it) {
@@ -1322,7 +1467,7 @@ uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
// Find the matching entry in our vector.
uint64_t SymbolOffset = 0;
for (int i = 0, e = GOTEntries.size(); i != e; ++i) {
- if (GOTEntries[i].SymbolName == 0) {
+ if (!GOTEntries[i].SymbolName) {
if (getSectionLoadAddress(GOTEntries[i].SectionID) == LoadAddress &&
GOTEntries[i].Offset == Offset) {
GOTIndex = i;
@@ -1342,11 +1487,11 @@ uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
if (GOTIndex != -1) {
if (GOTEntrySize == sizeof(uint64_t)) {
- uint64_t *LocalGOTAddr = (uint64_t*)getSectionAddress(GOTSectionID);
+ uint64_t *LocalGOTAddr = (uint64_t *)getSectionAddress(GOTSectionID);
// Fill in this entry with the address of the symbol being referenced.
LocalGOTAddr[GOTIndex] = LoadAddress + SymbolOffset;
} else {
- uint32_t *LocalGOTAddr = (uint32_t*)getSectionAddress(GOTSectionID);
+ uint32_t *LocalGOTAddr = (uint32_t *)getSectionAddress(GOTSectionID);
// Fill in this entry with the address of the symbol being referenced.
LocalGOTAddr[GOTIndex] = (uint32_t)(LoadAddress + SymbolOffset);
}
@@ -1360,7 +1505,8 @@ uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
return 0;
}
-void RuntimeDyldELF::finalizeLoad(ObjSectionToIDMap &SectionMap) {
+void RuntimeDyldELF::finalizeLoad(ObjectImage &ObjImg,
+ ObjSectionToIDMap &SectionMap) {
// If necessary, allocate the global offset table
if (MemMgr) {
// Allocate the GOT if necessary
@@ -1380,8 +1526,7 @@ void RuntimeDyldELF::finalizeLoad(ObjSectionToIDMap &SectionMap) {
// needed when GOT-based relocations are applied.
memset(Addr, 0, TotalSize);
}
- }
- else {
+ } else {
report_fatal_error("Unable to allocate memory for GOT!");
}
@@ -1401,6 +1546,12 @@ void RuntimeDyldELF::finalizeLoad(ObjSectionToIDMap &SectionMap) {
bool RuntimeDyldELF::isCompatibleFormat(const ObjectBuffer *Buffer) const {
if (Buffer->getBufferSize() < strlen(ELF::ElfMagic))
return false;
- return (memcmp(Buffer->getBufferStart(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ return (memcmp(Buffer->getBufferStart(), ELF::ElfMagic,
+ strlen(ELF::ElfMagic))) == 0;
+}
+
+bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile *Obj) const {
+ return Obj->isELF();
}
+
} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index 3adf827..59fdfbe 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -20,70 +20,46 @@
using namespace llvm;
namespace llvm {
-
namespace {
- // Helper for extensive error checking in debug builds.
- error_code Check(error_code Err) {
- if (Err) {
- report_fatal_error(Err.message());
- }
- return Err;
+// Helper for extensive error checking in debug builds.
+std::error_code Check(std::error_code Err) {
+ if (Err) {
+ report_fatal_error(Err.message());
}
+ return Err;
+}
} // end anonymous namespace
class RuntimeDyldELF : public RuntimeDyldImpl {
- void resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
- uint64_t SymOffset=0);
-
- void resolveX86_64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
+ void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset = 0);
+
+ void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
uint64_t SymOffset);
- void resolveX86Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend);
-
- void resolveAArch64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend);
-
- void resolveARMRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend);
-
- void resolveMIPSRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend);
-
- void resolvePPC64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend);
-
- void resolveSystemZRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend);
-
- unsigned getMaxStubSize() {
- if (Arch == Triple::aarch64)
+ void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveMIPSRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ unsigned getMaxStubSize() override {
+ if (Arch == Triple::aarch64 || Arch == Triple::arm64 ||
+ Arch == Triple::aarch64_be || Arch == Triple::arm64_be)
return 20; // movz; movk; movk; movk; br
if (Arch == Triple::arm || Arch == Triple::thumb)
return 8; // 32-bit instruction and 32-bit address
@@ -99,24 +75,24 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
return 0;
}
- unsigned getStubAlignment() {
+ unsigned getStubAlignment() override {
if (Arch == Triple::systemz)
return 8;
else
return 1;
}
- uint64_t findPPC64TOC() const;
- void findOPDEntrySection(ObjectImage &Obj,
- ObjSectionToIDMap &LocalSections,
+ void findPPC64TOCSection(ObjectImage &Obj, ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+ void findOPDEntrySection(ObjectImage &Obj, ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel);
uint64_t findGOTEntry(uint64_t LoadAddr, uint64_t Offset);
size_t getGOTEntrySize();
- virtual void updateGOTEntries(StringRef Name, uint64_t Addr);
+ void updateGOTEntries(StringRef Name, uint64_t Addr) override;
- // Relocation entries for symbols whose position-independant offset is
+ // Relocation entries for symbols whose position-independent offset is
// updated in a global offset table.
typedef SmallVector<RelocationValueRef, 2> GOTRelocations;
GOTRelocations GOTEntries; // List of entries requiring finalization.
@@ -129,22 +105,23 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
SmallVector<SID, 2> RegisteredEHFrameSections;
public:
- RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm)
- {}
-
- virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value);
- virtual void processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs);
- virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const;
- virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
- virtual void registerEHFrames();
- virtual void deregisterEHFrames();
- virtual void finalizeLoad(ObjSectionToIDMap &SectionMap);
+ RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override;
+ bool isCompatibleFormat(const ObjectBuffer *Buffer) const override;
+ bool isCompatibleFile(const object::ObjectFile *Buffer) const override;
+ void registerEHFrames() override;
+ void deregisterEHFrames() override;
+ void finalizeLoad(ObjectImage &ObjImg,
+ ObjSectionToIDMap &SectionMap) override;
virtual ~RuntimeDyldELF();
+
+ static ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
+ static ObjectImage *createObjectImageFromFile(std::unique_ptr<object::ObjectFile> Obj);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index 3014b30..0211d2b 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -28,8 +29,8 @@
#include "llvm/Support/Mutex.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include <map>
+#include <system_error>
using namespace llvm;
using namespace llvm::object;
@@ -39,7 +40,6 @@ namespace llvm {
class ObjectBuffer;
class Twine;
-
/// SectionEntry - represents a section emitted into memory by the dynamic
/// linker.
class SectionEntry {
@@ -69,8 +69,9 @@ public:
SectionEntry(StringRef name, uint8_t *address, size_t size,
uintptr_t objAddress)
- : Name(name), Address(address), Size(size), LoadAddress((uintptr_t)address),
- StubOffset(size), ObjAddress(objAddress) {}
+ : Name(name), Address(address), Size(size),
+ LoadAddress((uintptr_t)address), StubOffset(size),
+ ObjAddress(objAddress) {}
};
/// RelocationEntry - used to represent relocations internally in the dynamic
@@ -90,9 +91,17 @@ public:
/// used to make a relocation section relative instead of symbol relative.
int64_t Addend;
+ struct SectionPair {
+ uint32_t SectionA;
+ uint32_t SectionB;
+ };
+
/// SymOffset - Section offset of the relocation entry's symbol (used for GOT
/// lookup).
- uint64_t SymOffset;
+ union {
+ uint64_t SymOffset;
+ SectionPair Sections;
+ };
/// True if this is a PCRel relocation (MachO specific).
bool IsPCRel;
@@ -101,33 +110,44 @@ public:
unsigned Size;
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
- : SectionID(id), Offset(offset), RelType(type), Addend(addend),
- SymOffset(0), IsPCRel(false), Size(0) {}
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(false), Size(0) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
uint64_t symoffset)
- : SectionID(id), Offset(offset), RelType(type), Addend(addend),
- SymOffset(symoffset), IsPCRel(false), Size(0) {}
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(symoffset), IsPCRel(false), Size(0) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
bool IsPCRel, unsigned Size)
- : SectionID(id), Offset(offset), RelType(type), Addend(addend),
- SymOffset(0), IsPCRel(IsPCRel), Size(Size) {}
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(IsPCRel), Size(Size) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
+ : SectionID(id), Offset(offset), RelType(type),
+ Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
+ Size(Size) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
};
class RelocationValueRef {
public:
- unsigned SectionID;
- uint64_t Offset;
- int64_t Addend;
+ unsigned SectionID;
+ uint64_t Offset;
+ int64_t Addend;
const char *SymbolName;
- RelocationValueRef(): SectionID(0), Offset(0), Addend(0), SymbolName(0) {}
+ RelocationValueRef() : SectionID(0), Offset(0), Addend(0),
+ SymbolName(nullptr) {}
inline bool operator==(const RelocationValueRef &Other) const {
return SectionID == Other.SectionID && Offset == Other.Offset &&
Addend == Other.Addend && SymbolName == Other.SymbolName;
}
- inline bool operator <(const RelocationValueRef &Other) const {
+ inline bool operator<(const RelocationValueRef &Other) const {
if (SectionID != Other.SectionID)
return SectionID < Other.SectionID;
if (Offset != Other.Offset)
@@ -139,6 +159,15 @@ public:
};
class RuntimeDyldImpl {
+ friend class RuntimeDyldChecker;
+private:
+
+ uint64_t getAnySymbolRemoteAddress(StringRef Symbol) {
+ if (uint64_t InternalSymbolAddr = getSymbolLoadAddress(Symbol))
+ return InternalSymbolAddr;
+ return MemMgr->getSymbolAddress(Symbol);
+ }
+
protected:
// The MemoryManager to load objects into.
RTDyldMemoryManager *MemMgr;
@@ -149,7 +178,7 @@ protected:
SectionList Sections;
typedef unsigned SID; // Type for SectionIDs
- #define RTDYLD_INVALID_SECTION_ID ((SID)(-1))
+#define RTDYLD_INVALID_SECTION_ID ((SID)(-1))
// Keep a map of sections from object file to the SectionID which
// references it.
@@ -187,6 +216,10 @@ protected:
Triple::ArchType Arch;
bool IsTargetLittleEndian;
+ // True if all sections should be passed to the memory manager, false if only
+ // sections containing relocations should be. Defaults to 'false'.
+ bool ProcessAllSections;
+
// This mutex prevents simultaneously loading objects from two different
// threads. This keeps us from having to protect individual data structures
// and guarantees that section allocation requests to the memory manager
@@ -217,52 +250,49 @@ protected:
}
uint8_t *getSectionAddress(unsigned SectionID) {
- return (uint8_t*)Sections[SectionID].Address;
+ return (uint8_t *)Sections[SectionID].Address;
}
void writeInt16BE(uint8_t *Addr, uint16_t Value) {
if (IsTargetLittleEndian)
- Value = sys::SwapByteOrder(Value);
- *Addr = (Value >> 8) & 0xFF;
- *(Addr+1) = Value & 0xFF;
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 8) & 0xFF;
+ *(Addr + 1) = Value & 0xFF;
}
void writeInt32BE(uint8_t *Addr, uint32_t Value) {
if (IsTargetLittleEndian)
- Value = sys::SwapByteOrder(Value);
- *Addr = (Value >> 24) & 0xFF;
- *(Addr+1) = (Value >> 16) & 0xFF;
- *(Addr+2) = (Value >> 8) & 0xFF;
- *(Addr+3) = Value & 0xFF;
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 24) & 0xFF;
+ *(Addr + 1) = (Value >> 16) & 0xFF;
+ *(Addr + 2) = (Value >> 8) & 0xFF;
+ *(Addr + 3) = Value & 0xFF;
}
void writeInt64BE(uint8_t *Addr, uint64_t Value) {
if (IsTargetLittleEndian)
- Value = sys::SwapByteOrder(Value);
- *Addr = (Value >> 56) & 0xFF;
- *(Addr+1) = (Value >> 48) & 0xFF;
- *(Addr+2) = (Value >> 40) & 0xFF;
- *(Addr+3) = (Value >> 32) & 0xFF;
- *(Addr+4) = (Value >> 24) & 0xFF;
- *(Addr+5) = (Value >> 16) & 0xFF;
- *(Addr+6) = (Value >> 8) & 0xFF;
- *(Addr+7) = Value & 0xFF;
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 56) & 0xFF;
+ *(Addr + 1) = (Value >> 48) & 0xFF;
+ *(Addr + 2) = (Value >> 40) & 0xFF;
+ *(Addr + 3) = (Value >> 32) & 0xFF;
+ *(Addr + 4) = (Value >> 24) & 0xFF;
+ *(Addr + 5) = (Value >> 16) & 0xFF;
+ *(Addr + 6) = (Value >> 8) & 0xFF;
+ *(Addr + 7) = Value & 0xFF;
}
/// \brief Given the common symbols discovered in the object file, emit a
/// new section for them and update the symbol mappings in the object and
/// symbol table.
- void emitCommonSymbols(ObjectImage &Obj,
- const CommonSymbolMap &CommonSymbols,
- uint64_t TotalSize,
- SymbolTableMap &SymbolTable);
+ void emitCommonSymbols(ObjectImage &Obj, const CommonSymbolMap &CommonSymbols,
+ uint64_t TotalSize, SymbolTableMap &SymbolTable);
/// \brief Emits section data from the object file to the MemoryManager.
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emits, else allocateDataSection() will be used.
/// \return SectionID.
- unsigned emitSection(ObjectImage &Obj,
- const SectionRef &Section,
+ unsigned emitSection(ObjectImage &Obj, const SectionRef &Section,
bool IsCode);
/// \brief Find Section in LocalSections. If the secton is not found - emit
@@ -270,10 +300,8 @@ protected:
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emmits, else allocateDataSection() will be used.
/// \return SectionID.
- unsigned findOrEmitSection(ObjectImage &Obj,
- const SectionRef &Section,
- bool IsCode,
- ObjSectionToIDMap &LocalSections);
+ unsigned findOrEmitSection(ObjectImage &Obj, const SectionRef &Section,
+ bool IsCode, ObjSectionToIDMap &LocalSections);
// \brief Add a relocation entry that uses the given section.
void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
@@ -284,7 +312,7 @@ protected:
/// \brief Emits long jump instruction to Addr.
/// \return Pointer to the memory area for emitting target address.
- uint8_t* createStubFunction(uint8_t *Addr);
+ uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
/// \brief Resolves relocations from Relocs list with address from Value.
void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
@@ -294,14 +322,14 @@ protected:
/// \param Value Target symbol address to apply the relocation action
virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
- /// \brief Parses the object file relocation and stores it to Relocations
- /// or SymbolRelocations (this depends on the object file type).
- virtual void processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs) = 0;
+ /// \brief Parses one or more object file relocations (some object files use
+ /// relocation pairs) and stores it to Relocations or SymbolRelocations
+ /// (this depends on the object file type).
+ /// \return Iterator to the next relocation that needs to be parsed.
+ virtual relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) = 0;
/// \brief Resolve relocations to external symbols.
void resolveExternalSymbols();
@@ -310,20 +338,34 @@ protected:
// The base class does nothing. ELF overrides this.
virtual void updateGOTEntries(StringRef Name, uint64_t Addr) {}
- virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
+ // \brief Compute an upper bound of the memory that is required to load all
+ // sections
+ void computeTotalAllocSize(ObjectImage &Obj, uint64_t &CodeSize,
+ uint64_t &DataSizeRO, uint64_t &DataSizeRW);
+
+ // \brief Compute the stub buffer size required for a section
+ unsigned computeSectionStubBufSize(ObjectImage &Obj,
+ const SectionRef &Section);
+
public:
- RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {}
+ RuntimeDyldImpl(RTDyldMemoryManager *mm)
+ : MemMgr(mm), ProcessAllSections(false), HasError(false) {
+ }
virtual ~RuntimeDyldImpl();
- ObjectImage *loadObject(ObjectBuffer *InputBuffer);
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ ObjectImage *loadObject(ObjectImage *InputObject);
- void *getSymbolAddress(StringRef Name) {
+ uint8_t* getSymbolAddress(StringRef Name) {
// FIXME: Just look up as a function for now. Overly simple of course.
// Work in progress.
SymbolTableMap::const_iterator pos = GlobalSymbolTable.find(Name);
if (pos == GlobalSymbolTable.end())
- return 0;
+ return nullptr;
SymbolLoc Loc = pos->second;
return getSectionAddress(Loc.first) + Loc.second;
}
@@ -354,15 +396,15 @@ public:
StringRef getErrorString() { return ErrorStr; }
virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const = 0;
+ virtual bool isCompatibleFile(const ObjectFile *Obj) const = 0;
virtual void registerEHFrames();
virtual void deregisterEHFrames();
- virtual void finalizeLoad(ObjSectionToIDMap &SectionMap) {}
+ virtual void finalizeLoad(ObjectImage &ObjImg, ObjSectionToIDMap &SectionMap) {}
};
} // end namespace llvm
-
#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index 5b92867..58fb515 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -11,28 +11,148 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dyld"
#include "RuntimeDyldMachO.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+
+#include "Targets/RuntimeDyldMachOARM.h"
+#include "Targets/RuntimeDyldMachOAArch64.h"
+#include "Targets/RuntimeDyldMachOI386.h"
+#include "Targets/RuntimeDyldMachOX86_64.h"
+
using namespace llvm;
using namespace llvm::object;
+#define DEBUG_TYPE "dyld"
+
namespace llvm {
-static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText, intptr_t DeltaForEH) {
- uint32_t Length = *((uint32_t*)P);
+uint64_t RuntimeDyldMachO::decodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ uint32_t RelType) const {
+ uint64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+ return Addend;
+}
+
+RelocationValueRef RuntimeDyldMachO::getRelocationValueRef(
+ ObjectImage &ObjImg, const relocation_iterator &RI,
+ const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols) {
+
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+ RelocationValueRef Value;
+
+ bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
+ if (IsExternal) {
+ symbol_iterator Symbol = RI->getSymbol();
+ StringRef TargetName;
+ Symbol->getName(TargetName);
+ SymbolTableMap::const_iterator SI = Symbols.find(TargetName.data());
+ if (SI != Symbols.end()) {
+ Value.SectionID = SI->second.first;
+ Value.Addend = SI->second.second + RE.Addend;
+ } else {
+ SI = GlobalSymbolTable.find(TargetName.data());
+ if (SI != GlobalSymbolTable.end()) {
+ Value.SectionID = SI->second.first;
+ Value.Addend = SI->second.second + RE.Addend;
+ } else {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = RE.Addend;
+ }
+ }
+ } else {
+ SectionRef Sec = Obj.getRelocationSection(RelInfo);
+ bool IsCode = false;
+ Sec.isText(IsCode);
+ Value.SectionID = findOrEmitSection(ObjImg, Sec, IsCode, ObjSectionToID);
+ uint64_t Addr;
+ Sec.getAddress(Addr);
+ Value.Addend = RE.Addend - Addr;
+ }
+
+ return Value;
+}
+
+void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
+ ObjectImage &ObjImg,
+ const relocation_iterator &RI) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ if (IsPCRel) {
+ uint64_t RelocAddr = 0;
+ RI->getAddress(RelocAddr);
+ unsigned RelocSize = Obj.getAnyRelocationLength(RelInfo);
+ Value.Addend += RelocAddr + (1ULL << RelocSize);
+ }
+}
+
+void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
+ uint64_t Value) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+
+ dbgs() << "resolveRelocation Section: " << RE.SectionID
+ << " LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%p", Value) << " Addend: " << RE.Addend
+ << " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
+ << " Size: " << (1 << RE.Size) << "\n";
+}
+
+bool RuntimeDyldMachO::writeBytesUnaligned(uint8_t *Addr, uint64_t Value,
+ unsigned Size) {
+ for (unsigned i = 0; i < Size; ++i) {
+ *Addr++ = (uint8_t)Value;
+ Value >>= 8;
+ }
+
+ return false;
+}
+
+bool
+RuntimeDyldMachO::isCompatibleFormat(const ObjectBuffer *InputBuffer) const {
+ if (InputBuffer->getBufferSize() < 4)
+ return false;
+ StringRef Magic(InputBuffer->getBufferStart(), 4);
+ if (Magic == "\xFE\xED\xFA\xCE")
+ return true;
+ if (Magic == "\xCE\xFA\xED\xFE")
+ return true;
+ if (Magic == "\xFE\xED\xFA\xCF")
+ return true;
+ if (Magic == "\xCF\xFA\xED\xFE")
+ return true;
+ return false;
+}
+
+bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile *Obj) const {
+ return Obj->isMachO();
+}
+
+static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText,
+ intptr_t DeltaForEH) {
+ DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
+ uint32_t Length = *((uint32_t *)P);
P += 4;
unsigned char *Ret = P + Length;
- uint32_t Offset = *((uint32_t*)P);
+ uint32_t Offset = *((uint32_t *)P);
if (Offset == 0) // is a CIE
return Ret;
P += 4;
- intptr_t FDELocation = *((intptr_t*)P);
+ intptr_t FDELocation = *((intptr_t *)P);
intptr_t NewLocation = FDELocation - DeltaForText;
- *((intptr_t*)P) = NewLocation;
+ *((intptr_t *)P) = NewLocation;
P += sizeof(intptr_t);
// Skip the FDE address range
@@ -41,16 +161,16 @@ static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText, intptr
uint8_t Augmentationsize = *P;
P += 1;
if (Augmentationsize != 0) {
- intptr_t LSDA = *((intptr_t*)P);
+ intptr_t LSDA = *((intptr_t *)P);
intptr_t NewLSDA = LSDA - DeltaForEH;
- *((intptr_t*)P) = NewLSDA;
+ *((intptr_t *)P) = NewLSDA;
}
return Ret;
}
static intptr_t computeDelta(SectionEntry *A, SectionEntry *B) {
- intptr_t ObjDistance = A->ObjAddress - B->ObjAddress;
+ intptr_t ObjDistance = A->ObjAddress - B->ObjAddress;
intptr_t MemDistance = A->LoadAddress - B->LoadAddress;
return ObjDistance - MemDistance;
}
@@ -66,7 +186,7 @@ void RuntimeDyldMachO::registerEHFrames() {
continue;
SectionEntry *Text = &Sections[SectionInfo.TextSID];
SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
- SectionEntry *ExceptTab = NULL;
+ SectionEntry *ExceptTab = nullptr;
if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
ExceptTab = &Sections[SectionInfo.ExceptTabSID];
@@ -77,382 +197,27 @@ void RuntimeDyldMachO::registerEHFrames() {
unsigned char *P = EHFrame->Address;
unsigned char *End = P + EHFrame->Size;
- do {
+ do {
P = processFDE(P, DeltaForText, DeltaForEH);
- } while(P != End);
+ } while (P != End);
- MemMgr->registerEHFrames(EHFrame->Address,
- EHFrame->LoadAddress,
+ MemMgr->registerEHFrames(EHFrame->Address, EHFrame->LoadAddress,
EHFrame->Size);
}
UnregisteredEHFrameSections.clear();
}
-void RuntimeDyldMachO::finalizeLoad(ObjSectionToIDMap &SectionMap) {
- unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
- unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
- unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
- ObjSectionToIDMap::iterator i, e;
- for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
- const SectionRef &Section = i->first;
- StringRef Name;
- Section.getName(Name);
- if (Name == "__eh_frame")
- EHFrameSID = i->second;
- else if (Name == "__text")
- TextSID = i->second;
- else if (Name == "__gcc_except_tab")
- ExceptTabSID = i->second;
- }
- UnregisteredEHFrameSections.push_back(EHFrameRelatedSections(EHFrameSID,
- TextSID,
- ExceptTabSID));
-}
-
-// The target location for the relocation is described by RE.SectionID and
-// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
-// SectionEntry has three members describing its location.
-// SectionEntry::Address is the address at which the section has been loaded
-// into memory in the current (host) process. SectionEntry::LoadAddress is the
-// address that the section will have in the target process.
-// SectionEntry::ObjAddress is the address of the bits for this section in the
-// original emitted object image (also in the current address space).
-//
-// Relocations will be applied as if the section were loaded at
-// SectionEntry::LoadAddress, but they will be applied at an address based
-// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
-// Target memory contents if they are required for value calculations.
-//
-// The Value parameter here is the load address of the symbol for the
-// relocation to be applied. For relocations which refer to symbols in the
-// current object Value will be the LoadAddress of the section in which
-// the symbol resides (RE.Addend provides additional information about the
-// symbol location). For external symbols, Value will be the address of the
-// symbol in the target address space.
-void RuntimeDyldMachO::resolveRelocation(const RelocationEntry &RE,
- uint64_t Value) {
- const SectionEntry &Section = Sections[RE.SectionID];
- return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
- RE.IsPCRel, RE.Size);
-}
-
-void RuntimeDyldMachO::resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
- bool isPCRel,
- unsigned LogSize) {
- uint8_t *LocalAddress = Section.Address + Offset;
- uint64_t FinalAddress = Section.LoadAddress + Offset;
- unsigned MachoType = Type;
- unsigned Size = 1 << LogSize;
-
- DEBUG(dbgs() << "resolveRelocation LocalAddress: "
- << format("%p", LocalAddress)
- << " FinalAddress: " << format("%p", FinalAddress)
- << " Value: " << format("%p", Value)
- << " Addend: " << Addend
- << " isPCRel: " << isPCRel
- << " MachoType: " << MachoType
- << " Size: " << Size
- << "\n");
-
- // This just dispatches to the proper target specific routine.
+std::unique_ptr<RuntimeDyldMachO>
+llvm::RuntimeDyldMachO::create(Triple::ArchType Arch, RTDyldMemoryManager *MM) {
switch (Arch) {
- default: llvm_unreachable("Unsupported CPU type!");
- case Triple::x86_64:
- resolveX86_64Relocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- MachoType,
- Size,
- Addend);
- break;
- case Triple::x86:
- resolveI386Relocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- MachoType,
- Size,
- Addend);
- break;
- case Triple::arm: // Fall through.
- case Triple::thumb:
- resolveARMRelocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- MachoType,
- Size,
- Addend);
- break;
- }
-}
-
-bool RuntimeDyldMachO::resolveI386Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
- if (isPCRel)
- Value -= FinalAddress + 4; // see resolveX86_64Relocation
-
- switch (Type) {
- default:
- llvm_unreachable("Invalid relocation type!");
- case MachO::GENERIC_RELOC_VANILLA: {
- uint8_t *p = LocalAddress;
- uint64_t ValueToWrite = Value + Addend;
- for (unsigned i = 0; i < Size; ++i) {
- *p++ = (uint8_t)(ValueToWrite & 0xff);
- ValueToWrite >>= 8;
- }
- return false;
- }
- case MachO::GENERIC_RELOC_SECTDIFF:
- case MachO::GENERIC_RELOC_LOCAL_SECTDIFF:
- case MachO::GENERIC_RELOC_PB_LA_PTR:
- return Error("Relocation type not implemented yet!");
- }
-}
-
-bool RuntimeDyldMachO::resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
- // If the relocation is PC-relative, the value to be encoded is the
- // pointer difference.
- if (isPCRel)
- // FIXME: It seems this value needs to be adjusted by 4 for an effective PC
- // address. Is that expected? Only for branches, perhaps?
- Value -= FinalAddress + 4;
-
- switch(Type) {
- default:
- llvm_unreachable("Invalid relocation type!");
- case MachO::X86_64_RELOC_SIGNED_1:
- case MachO::X86_64_RELOC_SIGNED_2:
- case MachO::X86_64_RELOC_SIGNED_4:
- case MachO::X86_64_RELOC_SIGNED:
- case MachO::X86_64_RELOC_UNSIGNED:
- case MachO::X86_64_RELOC_BRANCH: {
- Value += Addend;
- // Mask in the target value a byte at a time (we don't have an alignment
- // guarantee for the target address, so this is safest).
- uint8_t *p = (uint8_t*)LocalAddress;
- for (unsigned i = 0; i < Size; ++i) {
- *p++ = (uint8_t)Value;
- Value >>= 8;
- }
- return false;
- }
- case MachO::X86_64_RELOC_GOT_LOAD:
- case MachO::X86_64_RELOC_GOT:
- case MachO::X86_64_RELOC_SUBTRACTOR:
- case MachO::X86_64_RELOC_TLV:
- return Error("Relocation type not implemented yet!");
- }
-}
-
-bool RuntimeDyldMachO::resolveARMRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
- // If the relocation is PC-relative, the value to be encoded is the
- // pointer difference.
- if (isPCRel) {
- Value -= FinalAddress;
- // ARM PCRel relocations have an effective-PC offset of two instructions
- // (four bytes in Thumb mode, 8 bytes in ARM mode).
- // FIXME: For now, assume ARM mode.
- Value -= 8;
- }
-
- switch(Type) {
default:
- llvm_unreachable("Invalid relocation type!");
- case MachO::ARM_RELOC_VANILLA: {
- // Mask in the target value a byte at a time (we don't have an alignment
- // guarantee for the target address, so this is safest).
- uint8_t *p = (uint8_t*)LocalAddress;
- for (unsigned i = 0; i < Size; ++i) {
- *p++ = (uint8_t)Value;
- Value >>= 8;
- }
+ llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
break;
+ case Triple::arm: return make_unique<RuntimeDyldMachOARM>(MM);
+ case Triple::arm64: return make_unique<RuntimeDyldMachOAArch64>(MM);
+ case Triple::x86: return make_unique<RuntimeDyldMachOI386>(MM);
+ case Triple::x86_64: return make_unique<RuntimeDyldMachOX86_64>(MM);
}
- case MachO::ARM_RELOC_BR24: {
- // Mask the value into the target address. We know instructions are
- // 32-bit aligned, so we can do it all at once.
- uint32_t *p = (uint32_t*)LocalAddress;
- // The low two bits of the value are not encoded.
- Value >>= 2;
- // Mask the value to 24 bits.
- Value &= 0xffffff;
- // FIXME: If the destination is a Thumb function (and the instruction
- // is a non-predicated BL instruction), we need to change it to a BLX
- // instruction instead.
-
- // Insert the value into the instruction.
- *p = (*p & ~0xffffff) | Value;
- break;
- }
- case MachO::ARM_THUMB_RELOC_BR22:
- case MachO::ARM_THUMB_32BIT_BRANCH:
- case MachO::ARM_RELOC_HALF:
- case MachO::ARM_RELOC_HALF_SECTDIFF:
- case MachO::ARM_RELOC_PAIR:
- case MachO::ARM_RELOC_SECTDIFF:
- case MachO::ARM_RELOC_LOCAL_SECTDIFF:
- case MachO::ARM_RELOC_PB_LA_PTR:
- return Error("Relocation type not implemented yet!");
- }
- return false;
-}
-
-void RuntimeDyldMachO::processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs) {
- const ObjectFile *OF = Obj.getObjectFile();
- const MachOObjectFile *MachO = static_cast<const MachOObjectFile*>(OF);
- MachO::any_relocation_info RE= MachO->getRelocation(RelI.getRawDataRefImpl());
-
- uint32_t RelType = MachO->getAnyRelocationType(RE);
-
- // FIXME: Properly handle scattered relocations.
- // For now, optimistically skip these: they can often be ignored, as
- // the static linker will already have applied the relocation, and it
- // only needs to be reapplied if symbols move relative to one another.
- // Note: This will fail horribly where the relocations *do* need to be
- // applied, but that was already the case.
- if (MachO->isRelocationScattered(RE))
- return;
-
- RelocationValueRef Value;
- SectionEntry &Section = Sections[SectionID];
-
- bool isExtern = MachO->getPlainRelocationExternal(RE);
- bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
- unsigned Size = MachO->getAnyRelocationLength(RE);
- uint64_t Offset;
- RelI.getOffset(Offset);
- uint8_t *LocalAddress = Section.Address + Offset;
- unsigned NumBytes = 1 << Size;
- uint64_t Addend = 0;
- memcpy(&Addend, LocalAddress, NumBytes);
-
- if (isExtern) {
- // Obtain the symbol name which is referenced in the relocation
- symbol_iterator Symbol = RelI.getSymbol();
- StringRef TargetName;
- Symbol->getName(TargetName);
- // First search for the symbol in the local symbol table
- SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data());
- if (lsi != Symbols.end()) {
- Value.SectionID = lsi->second.first;
- Value.Addend = lsi->second.second + Addend;
- } else {
- // Search for the symbol in the global symbol table
- SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data());
- if (gsi != GlobalSymbolTable.end()) {
- Value.SectionID = gsi->second.first;
- Value.Addend = gsi->second.second + Addend;
- } else {
- Value.SymbolName = TargetName.data();
- Value.Addend = Addend;
- }
- }
- } else {
- SectionRef Sec = MachO->getRelocationSection(RE);
- Value.SectionID = findOrEmitSection(Obj, Sec, true, ObjSectionToID);
- uint64_t Addr;
- Sec.getAddress(Addr);
- Value.Addend = Addend - Addr;
- }
-
- if (Arch == Triple::x86_64 && (RelType == MachO::X86_64_RELOC_GOT ||
- RelType == MachO::X86_64_RELOC_GOT_LOAD)) {
- assert(IsPCRel);
- assert(Size == 2);
- StubMap::const_iterator i = Stubs.find(Value);
- uint8_t *Addr;
- if (i != Stubs.end()) {
- Addr = Section.Address + i->second;
- } else {
- Stubs[Value] = Section.StubOffset;
- uint8_t *GOTEntry = Section.Address + Section.StubOffset;
- RelocationEntry RE(SectionID, Section.StubOffset,
- MachO::X86_64_RELOC_UNSIGNED, 0, false, 3);
- if (Value.SymbolName)
- addRelocationForSymbol(RE, Value.SymbolName);
- else
- addRelocationForSection(RE, Value.SectionID);
- Section.StubOffset += 8;
- Addr = GOTEntry;
- }
- resolveRelocation(Section, Offset, (uint64_t)Addr,
- MachO::X86_64_RELOC_UNSIGNED, Value.Addend, true, 2);
- } else if (Arch == Triple::arm &&
- (RelType & 0xf) == MachO::ARM_RELOC_BR24) {
- // This is an ARM branch relocation, need to use a stub function.
-
- // Look up for existing stub.
- StubMap::const_iterator i = Stubs.find(Value);
- if (i != Stubs.end())
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + i->second,
- RelType, 0, IsPCRel, Size);
- else {
- // Create a new stub function.
- Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
- RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
- MachO::GENERIC_RELOC_VANILLA, Value.Addend);
- if (Value.SymbolName)
- addRelocationForSymbol(RE, Value.SymbolName);
- else
- addRelocationForSection(RE, Value.SectionID);
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + Section.StubOffset,
- RelType, 0, IsPCRel, Size);
- Section.StubOffset += getMaxStubSize();
- }
- } else {
- RelocationEntry RE(SectionID, Offset, RelType, Value.Addend,
- IsPCRel, Size);
- if (Value.SymbolName)
- addRelocationForSymbol(RE, Value.SymbolName);
- else
- addRelocationForSection(RE, Value.SectionID);
- }
-}
-
-
-bool RuntimeDyldMachO::isCompatibleFormat(
- const ObjectBuffer *InputBuffer) const {
- if (InputBuffer->getBufferSize() < 4)
- return false;
- StringRef Magic(InputBuffer->getBufferStart(), 4);
- if (Magic == "\xFE\xED\xFA\xCE") return true;
- if (Magic == "\xCE\xFA\xED\xFE") return true;
- if (Magic == "\xFE\xED\xFA\xCF") return true;
- if (Magic == "\xCF\xFA\xED\xFE") return true;
- return false;
}
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
index bbf6aa9..7d1dc02 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -14,66 +14,31 @@
#ifndef LLVM_RUNTIME_DYLD_MACHO_H
#define LLVM_RUNTIME_DYLD_MACHO_H
+#include "ObjectImageCommon.h"
#include "RuntimeDyldImpl.h"
-#include "llvm/ADT/IndexedMap.h"
#include "llvm/Object/MachO.h"
#include "llvm/Support/Format.h"
+#define DEBUG_TYPE "dyld"
+
using namespace llvm;
using namespace llvm::object;
-
namespace llvm {
class RuntimeDyldMachO : public RuntimeDyldImpl {
- bool resolveI386Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend);
- bool resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend);
- bool resolveARMRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend);
-
- void resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
- bool isPCRel,
- unsigned Size);
-
- unsigned getMaxStubSize() {
- if (Arch == Triple::arm || Arch == Triple::thumb)
- return 8; // 32-bit instruction and 32-bit address
- else if (Arch == Triple::x86_64)
- return 8; // GOT entry
- else
- return 0;
- }
-
- unsigned getStubAlignment() {
- return 1;
- }
+protected:
+ struct SectionOffsetPair {
+ unsigned SectionID;
+ uint64_t Offset;
+ };
struct EHFrameRelatedSections {
- EHFrameRelatedSections() : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
- TextSID(RTDYLD_INVALID_SECTION_ID),
- ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
- EHFrameRelatedSections(SID EH, SID T, SID Ex)
- : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
+ EHFrameRelatedSections()
+ : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
+ TextSID(RTDYLD_INVALID_SECTION_ID),
+ ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
+ EHFrameRelatedSections(SID EH, SID T, SID Ex)
+ : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
SID EHFrameSID;
SID TextSID;
SID ExceptTabSID;
@@ -83,21 +48,130 @@ class RuntimeDyldMachO : public RuntimeDyldImpl {
// in a table until we receive a request to register all unregistered
// EH frame sections with the memory manager.
SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
-public:
+
RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
- virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value);
- virtual void processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs);
- virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const;
- virtual void registerEHFrames();
- virtual void finalizeLoad(ObjSectionToIDMap &SectionMap);
+ /// Extract the addend encoded in the instruction.
+ uint64_t decodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ uint32_t RelType) const;
+
+ /// Construct a RelocationValueRef representing the relocation target.
+ /// For Symbols in known sections, this will return a RelocationValueRef
+ /// representing a (SectionID, Offset) pair.
+ /// For Symbols whose section is not known, this will return a
+ /// (SymbolName, Offset) pair, where the Offset is taken from the instruction
+ /// immediate (held in RE.Addend).
+ /// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
+ /// should be done by the caller where appropriate by calling makePCRel on
+ /// the RelocationValueRef.
+ RelocationValueRef getRelocationValueRef(ObjectImage &ObjImg,
+ const relocation_iterator &RI,
+ const RelocationEntry &RE,
+ ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols);
+
+ /// Make the RelocationValueRef addend PC-relative.
+ void makeValueAddendPCRel(RelocationValueRef &Value, ObjectImage &ObjImg,
+ const relocation_iterator &RI);
+
+ /// Dump information about the relocation entry (RE) and resolved value.
+ void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
+
+public:
+ /// Create an ObjectImage from the given ObjectBuffer.
+ static ObjectImage *createObjectImage(ObjectBuffer *InputBuffer) {
+ return new ObjectImageCommon(InputBuffer);
+ }
+
+ /// Create an ObjectImage from the given ObjectFile.
+ static ObjectImage *
+ createObjectImageFromFile(std::unique_ptr<object::ObjectFile> InputObject) {
+ return new ObjectImageCommon(std::move(InputObject));
+ }
+
+ /// Create a RuntimeDyldMachO instance for the given target architecture.
+ static std::unique_ptr<RuntimeDyldMachO> create(Triple::ArchType Arch,
+ RTDyldMemoryManager *mm);
+
+ /// Write the least significant 'Size' bytes in 'Value' out at the address
+ /// pointed to by Addr. Check for overflow.
+ bool writeBytesUnaligned(uint8_t *Addr, uint64_t Value, unsigned Size);
+
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ bool isCompatibleFormat(const ObjectBuffer *Buffer) const override;
+ bool isCompatibleFile(const object::ObjectFile *Obj) const override;
+ void registerEHFrames() override;
+};
+
+/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
+/// algorithms and data structures.
+///
+/// Concrete, target specific sub-classes can be accessed via the impl()
+/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
+/// Recurring Template Idiom). Concrete subclasses for each target
+/// can be found in ./Targets.
+template <typename Impl>
+class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
+private:
+ Impl &impl() { return static_cast<Impl &>(*this); }
+ const Impl &impl() const { return static_cast<const Impl &>(*this); }
+
+protected:
+
+ /// Parse the given relocation, which must be a non-scattered, and
+ /// return a RelocationEntry representing the information. The 'Addend' field
+ /// will contain the unmodified instruction immediate.
+ RelocationEntry getBasicRelocationEntry(unsigned SectionID,
+ ObjectImage &ObjImg,
+ const relocation_iterator &RI) const {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ const SectionEntry &Section = Sections[SectionID];
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ unsigned Size = Obj.getAnyRelocationLength(RelInfo);
+ uint64_t Offset;
+ RI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+ uint64_t Addend = impl().decodeAddend(LocalAddress, NumBytes, RelType);
+
+ return RelocationEntry(SectionID, Offset, RelType, Addend, IsPCRel, Size);
+ }
+
+public:
+ RuntimeDyldMachOCRTPBase(RTDyldMemoryManager *mm) : RuntimeDyldMachO(mm) {}
+
+ void finalizeLoad(ObjectImage &ObjImg, ObjSectionToIDMap &SectionMap) {
+ unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
+ ObjSectionToIDMap::iterator i, e;
+
+ for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
+ const SectionRef &Section = i->first;
+ StringRef Name;
+ Section.getName(Name);
+ if (Name == "__eh_frame")
+ EHFrameSID = i->second;
+ else if (Name == "__text")
+ TextSID = i->second;
+ else if (Name == "__gcc_except_tab")
+ ExceptTabSID = i->second;
+ else
+ impl().finalizeSection(ObjImg, i->second, Section);
+ }
+ UnregisteredEHFrameSections.push_back(
+ EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
+ }
};
} // end namespace llvm
+#undef DEBUG_TYPE
+
#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 0000000..775ed9e
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,255 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+ RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ assert(!Obj.isRelocationScattered(RelInfo) && "");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ bool HasExplicitAddend = false;
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ HasExplicitAddend = true;
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = (RawAddend << 40) >> 40;
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ if (HasExplicitAddend) {
+ RE.Addend = ExplicitAddend;
+ Value.Addend = ExplicitAddend;
+ }
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ RE.Addend = Value.Addend;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend = *p & 0x03FFFFFF;
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("branch26 instruction has embedded addend.");
+ else
+ llvm_unreachable("branch26 instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ assert(isInt<26>(PCRelVal) && "Branch target out of range!");
+ // Insert the value into the instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend =
+ ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3);
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("adrp instruction has embedded addend.");
+ else
+ llvm_unreachable("adrp instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ uint64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ // Check that the value fits into 21 bits (+ 12 lower bits).
+ assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
+ // Insert the value into the instruction.
+ uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
+ uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend = *p & 0x003FFC00;
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("adrp instruction has embedded addend.");
+ else
+ llvm_unreachable("adrp instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ // Check which instruction we are updating to obtain the implicit shift
+ // factor from LDR/STR instructions.
+ if (*p & 0x08000000) {
+ uint32_t ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Value & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ case 2:
+ assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ case 3:
+ assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ }
+ // Compensate for implicit shift.
+ Value >>= ImplicitShift;
+ }
+ // Insert the value into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not implemented yet!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert(RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end())
+ Addr = Section.Address + i->second;
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.Address);
+ uintptr_t StubAlignment = getStubAlignment();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(((StubAddress % getStubAlignment()) == 0) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.StubOffset = StubOffset + getMaxStubSize();
+ Addr = (uint8_t *)StubAddress;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 0000000..1de9942
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,154 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOARM_H
+#define LLVM_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+public:
+ RuntimeDyldMachOARM(RTDyldMemoryManager *MM) : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 4; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return ++++RelI;
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ if ((RE.RelType & 0xf) == MachO::ARM_RELOC_BR24)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ // FIXME: For now, assume ARM mode.
+ Value -= 8;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM_RELOC_VANILLA:
+ writeBytesUnaligned(LocalAddress, Value, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // Check for overflow.
+ if (Value != FinalValue) {
+ Error("ARM BR24 relocation out of range.");
+ return;
+ }
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ *p = (*p & ~0xffffff) | FinalValue;
+ break;
+ }
+ case MachO::ARM_THUMB_RELOC_BR22:
+ case MachO::ARM_THUMB_32BIT_BRANCH:
+ case MachO::ARM_RELOC_HALF:
+ case MachO::ARM_RELOC_HALF_SECTDIFF:
+ case MachO::ARM_RELOC_PAIR:
+ case MachO::ARM_RELOC_SECTDIFF:
+ case MachO::ARM_RELOC_LOCAL_SECTDIFF:
+ case MachO::ARM_RELOC_PB_LA_PTR:
+ Error("Relocation type not implemented yet!");
+ return;
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.Address + i->second;
+ } else {
+ // Create a new stub function.
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
+ RelocationEntry StubRE(RE.SectionID, StubTargetAddr - Section.Address,
+ MachO::GENERIC_RELOC_VANILLA, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Addr = Section.Address + Section.StubOffset;
+ Section.StubOffset += getMaxStubSize();
+ }
+ RelocationEntry TargetRE(Value.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOARM_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 0000000..856c6ca
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,315 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOI386_H
+#define LLVM_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+ RuntimeDyldMachOI386(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 0; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, ObjImg,
+ ObjSectionToID);
+ else if (Arch == Triple::x86 && RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processI386ScatteredVANILLA(SectionID, RelI, ObjImg,
+ ObjSectionToID);
+ llvm_unreachable("Unhandled scattered relocation.");
+ }
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ RE.Addend = Value.Addend;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].LoadAddress;
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].LoadAddress;
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(LocalAddress, Value, 1 << RE.Size);
+ break;
+ }
+ case MachO::GENERIC_RELOC_PB_LA_PTR:
+ Error("Relocation type not implemented yet!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ Section.getName(Name);
+
+ if (Name == "__jump_table")
+ populateJumpTable(cast<MachOObjectFile>(*ObjImg.getObjectFile()), Section,
+ SectionID);
+ else if (Name == "__pointers")
+ populatePointersSection(cast<MachOObjectFile>(*ObjImg.getObjectFile()),
+ Section, SectionID);
+ }
+
+private:
+ relocation_iterator
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile *MachO =
+ static_cast<const MachOObjectFile *>(Obj.getObjectFile());
+ MachO::any_relocation_info RE =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO->getAnyRelocationType(RE);
+ bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
+ unsigned Size = MachO->getAnyRelocationLength(RE);
+ uint64_t Offset;
+ RelI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = MachO->getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(*MachO, AddrA);
+ assert(SAI != MachO->section_end() && "Can't find section for address A");
+ uint64_t SectionABase;
+ SAI->getAddress(SectionABase);
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode;
+ SectionA.isText(IsCode);
+ uint32_t SectionAID =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID);
+
+ uint32_t AddrB = MachO->getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(*MachO, AddrB);
+ assert(SBI != MachO->section_end() && "Can't find section for address B");
+ uint64_t SectionBBase;
+ SBI->getAddress(SectionBBase);
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID);
+
+ if (Addend != AddrA - AddrB)
+ Error("Unexpected SECTDIFF relocation addend.");
+
+ DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
+ << ", Addend: " << Addend << ", SectionA ID: " << SectionAID
+ << ", SectionAOffset: " << SectionAOffset
+ << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, 0, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ Size);
+
+ addRelocationForSection(R, SectionAID);
+ addRelocationForSection(R, SectionBID);
+
+ return ++RelI;
+ }
+
+ relocation_iterator processI386ScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI, ObjectImage &Obj,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile *MachO =
+ static_cast<const MachOObjectFile *>(Obj.getObjectFile());
+ MachO::any_relocation_info RE =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO->getAnyRelocationType(RE);
+ bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
+ unsigned Size = MachO->getAnyRelocationLength(RE);
+ uint64_t Offset;
+ RelI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = MachO->getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(*MachO, SymbolBaseAddr);
+ assert(TargetSI != MachO->section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr;
+ TargetSI->getAddress(SectionBaseAddr);
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode;
+ TargetSection.isText(IsCode);
+ uint32_t TargetSectionID =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID);
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ void populateJumpTable(MachOObjectFile &Obj, const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "__jump_table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ assert((JTSectionSize % JTEntrySize) == 0 &&
+ "Jump-table section does not contain a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ SI->getName(IndirectSymbolName);
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+ }
+
+ // Populate __pointers section.
+ void populatePointersSection(MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "__pointers section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ DEBUG(dbgs() << "Populating __pointers, Section ID " << PTSectionID << ", "
+ << NumPTEntries << " entries, " << PTEntrySize
+ << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ SI->getName(IndirectSymbolName);
+ DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ }
+
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr, SSize;
+ SI->getAddress(SAddr);
+ SI->getSize(SSize);
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOI386_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 0000000..99efe9d
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,132 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+ RuntimeDyldMachOX86_64(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ case MachO::X86_64_RELOC_GOT:
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ case MachO::X86_64_RELOC_TLV:
+ Error("Relocation type not implemented yet!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Addend -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.Address + i->second;
+ } else {
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *GOTEntry = Section.Address + Section.StubOffset;
+ RelocationEntry GOTRE(RE.SectionID, Section.StubOffset,
+ MachO::X86_64_RELOC_UNSIGNED, Value.Addend, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.StubOffset += 8;
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOX86_64_H
diff --git a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
index 9b7d348..b10d51f 100644
--- a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -47,7 +47,7 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
TheTriple.setTriple(sys::getProcessTriple());
// Adjust the triple to match what the user requested.
- const Target *TheTarget = 0;
+ const Target *TheTarget = nullptr;
if (!MArch.empty()) {
for (TargetRegistry::iterator it = TargetRegistry::begin(),
ie = TargetRegistry::end(); it != ie; ++it) {
@@ -61,7 +61,7 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
if (ErrorStr)
*ErrorStr = "No available targets are compatible with this -march, "
"see -version for the available targets.\n";
- return 0;
+ return nullptr;
}
// Adjust the triple to match (if known), otherwise stick with the
@@ -72,10 +72,10 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
} else {
std::string Error;
TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
- if (TheTarget == 0) {
+ if (!TheTarget) {
if (ErrorStr)
*ErrorStr = Error;
- return 0;
+ return nullptr;
}
}
diff --git a/contrib/llvm/lib/IR/AsmWriter.cpp b/contrib/llvm/lib/IR/AsmWriter.cpp
index 7decffd..a7499bc 100644
--- a/contrib/llvm/lib/IR/AsmWriter.cpp
+++ b/contrib/llvm/lib/IR/AsmWriter.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This library implements the functionality defined in llvm/Assembly/Writer.h
+// This library implements the functionality defined in llvm/IR/Writer.h
//
// Note that these routines must be extremely tolerant of various errors in the
// LLVM code, because it can be used for debugging transformations.
@@ -15,18 +15,17 @@
//===----------------------------------------------------------------------===//
#include "AsmWriter.h"
-
-#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Assembly/AssemblyAnnotationWriter.h"
-#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/AssemblyAnnotationWriter.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
@@ -34,13 +33,11 @@
#include "llvm/IR/Operator.h"
#include "llvm/IR/TypeFinder.h"
#include "llvm/IR/ValueSymbolTable.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/MathExtras.h"
-
#include <algorithm>
#include <cctype>
using namespace llvm;
@@ -54,19 +51,19 @@ AssemblyAnnotationWriter::~AssemblyAnnotationWriter() {}
static const Module *getModuleFromVal(const Value *V) {
if (const Argument *MA = dyn_cast<Argument>(V))
- return MA->getParent() ? MA->getParent()->getParent() : 0;
+ return MA->getParent() ? MA->getParent()->getParent() : nullptr;
if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
- return BB->getParent() ? BB->getParent()->getParent() : 0;
+ return BB->getParent() ? BB->getParent()->getParent() : nullptr;
if (const Instruction *I = dyn_cast<Instruction>(V)) {
- const Function *M = I->getParent() ? I->getParent()->getParent() : 0;
- return M ? M->getParent() : 0;
+ const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr;
+ return M ? M->getParent() : nullptr;
}
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
return GV->getParent();
- return 0;
+ return nullptr;
}
static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
@@ -76,6 +73,8 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::Cold: Out << "coldcc"; break;
case CallingConv::WebKit_JS: Out << "webkit_jscc"; break;
case CallingConv::AnyReg: Out << "anyregcc"; break;
+ case CallingConv::PreserveMost: Out << "preserve_mostcc"; break;
+ case CallingConv::PreserveAll: Out << "preserve_allcc"; break;
case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break;
case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break;
case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break;
@@ -88,6 +87,8 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::PTX_Device: Out << "ptx_device"; break;
case CallingConv::X86_64_SysV: Out << "x86_64_sysvcc"; break;
case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break;
+ case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
+ case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
}
}
@@ -105,6 +106,7 @@ static void PrintEscapedString(StringRef Name, raw_ostream &Out) {
enum PrefixType {
GlobalPrefix,
+ ComdatPrefix,
LabelPrefix,
LocalPrefix,
NoPrefix
@@ -118,6 +120,7 @@ static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
switch (Prefix) {
case NoPrefix: break;
case GlobalPrefix: OS << '@'; break;
+ case ComdatPrefix: OS << '$'; break;
case LabelPrefix: break;
case LocalPrefix: OS << '%'; break;
}
@@ -192,16 +195,16 @@ void TypePrinting::incorporateTypes(const Module &M) {
/// use of type names or up references to shorten the type name where possible.
void TypePrinting::print(Type *Ty, raw_ostream &OS) {
switch (Ty->getTypeID()) {
- case Type::VoidTyID: OS << "void"; break;
- case Type::HalfTyID: OS << "half"; break;
- case Type::FloatTyID: OS << "float"; break;
- case Type::DoubleTyID: OS << "double"; break;
- case Type::X86_FP80TyID: OS << "x86_fp80"; break;
- case Type::FP128TyID: OS << "fp128"; break;
- case Type::PPC_FP128TyID: OS << "ppc_fp128"; break;
- case Type::LabelTyID: OS << "label"; break;
- case Type::MetadataTyID: OS << "metadata"; break;
- case Type::X86_MMXTyID: OS << "x86_mmx"; break;
+ case Type::VoidTyID: OS << "void"; return;
+ case Type::HalfTyID: OS << "half"; return;
+ case Type::FloatTyID: OS << "float"; return;
+ case Type::DoubleTyID: OS << "double"; return;
+ case Type::X86_FP80TyID: OS << "x86_fp80"; return;
+ case Type::FP128TyID: OS << "fp128"; return;
+ case Type::PPC_FP128TyID: OS << "ppc_fp128"; return;
+ case Type::LabelTyID: OS << "label"; return;
+ case Type::MetadataTyID: OS << "metadata"; return;
+ case Type::X86_MMXTyID: OS << "x86_mmx"; return;
case Type::IntegerTyID:
OS << 'i' << cast<IntegerType>(Ty)->getBitWidth();
return;
@@ -261,10 +264,8 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) {
OS << '>';
return;
}
- default:
- OS << "<unrecognized-type>";
- return;
}
+ llvm_unreachable("Invalid TypeID");
}
void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) {
@@ -421,10 +422,10 @@ static SlotTracker *createSlotTracker(const Value *V) {
if (!MD->isFunctionLocal())
return new SlotTracker(MD->getFunction());
- return new SlotTracker((Function *)0);
+ return new SlotTracker((Function *)nullptr);
}
- return 0;
+ return nullptr;
}
#if 0
@@ -436,21 +437,21 @@ static SlotTracker *createSlotTracker(const Value *V) {
// Module level constructor. Causes the contents of the Module (sans functions)
// to be added to the slot table.
SlotTracker::SlotTracker(const Module *M)
- : TheModule(M), TheFunction(0), FunctionProcessed(false),
+ : TheModule(M), TheFunction(nullptr), FunctionProcessed(false),
mNext(0), fNext(0), mdnNext(0), asNext(0) {
}
// Function level constructor. Causes the contents of the Module and the one
// function provided to be added to the slot table.
SlotTracker::SlotTracker(const Function *F)
- : TheModule(F ? F->getParent() : 0), TheFunction(F), FunctionProcessed(false),
- mNext(0), fNext(0), mdnNext(0), asNext(0) {
+ : TheModule(F ? F->getParent() : nullptr), TheFunction(F),
+ FunctionProcessed(false), mNext(0), fNext(0), mdnNext(0), asNext(0) {
}
inline void SlotTracker::initialize() {
if (TheModule) {
processModule();
- TheModule = 0; ///< Prevent re-processing next time we're called.
+ TheModule = nullptr; ///< Prevent re-processing next time we're called.
}
if (TheFunction && !FunctionProcessed)
@@ -525,7 +526,7 @@ void SlotTracker::processFunction() {
// optimizer.
if (const CallInst *CI = dyn_cast<CallInst>(I)) {
if (Function *F = CI->getCalledFunction())
- if (F->getName().startswith("llvm."))
+ if (F->isIntrinsic())
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (MDNode *N = dyn_cast_or_null<MDNode>(I->getOperand(i)))
CreateMetadataSlot(N);
@@ -560,7 +561,7 @@ void SlotTracker::processFunction() {
void SlotTracker::purgeFunction() {
ST_DEBUG("begin purgeFunction!\n");
fMap.clear(); // Simply discard the function level map
- TheFunction = 0;
+ TheFunction = nullptr;
FunctionProcessed = false;
ST_DEBUG("end purgeFunction!\n");
}
@@ -677,8 +678,6 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
SlotTracker *Machine,
const Module *Context);
-
-
static const char *getPredicateText(unsigned predicate) {
const char * pred = "unknown";
switch (predicate) {
@@ -813,8 +812,8 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
// output the string in hexadecimal format! Note that loading and storing
// floating point types changes the bits of NaNs on some hosts, notably
// x86, so we must not use these types.
- assert(sizeof(double) == sizeof(uint64_t) &&
- "assuming that double is 64 bits!");
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "assuming that double is 64 bits!");
char Buffer[40];
APFloat apf = CFP->getValueAPF();
// Halves and floats are represented in ASCII IR as double, convert.
@@ -1050,7 +1049,7 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
Out << "!{";
for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) {
const Value *V = Node->getOperand(mi);
- if (V == 0)
+ if (!V)
Out << "null";
else {
TypePrinter->print(V->getType(), Out);
@@ -1065,11 +1064,8 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
Out << "}";
}
-
-/// WriteAsOperand - Write the name of the specified value out to the specified
-/// ostream. This can be useful when you just want to print int %reg126, not
-/// the whole instruction that generated it.
-///
+// Full implementation of printing a Value as an operand with support for
+// TypePrinting, etc.
static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
TypePrinting *TypePrinter,
SlotTracker *Machine,
@@ -1131,12 +1127,6 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
return;
}
- if (V->getValueID() == Value::PseudoSourceValueVal ||
- V->getValueID() == Value::FixedStackPseudoSourceValueVal) {
- V->print(Out);
- return;
- }
-
char Prefix = '%';
int Slot;
// If we have a SlotTracker, use it.
@@ -1165,7 +1155,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
Slot = Machine->getLocalSlot(V);
}
delete Machine;
- Machine = 0;
+ Machine = nullptr;
} else {
Slot = -1;
}
@@ -1176,34 +1166,16 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
Out << "<badref>";
}
-void WriteAsOperand(raw_ostream &Out, const Value *V,
- bool PrintType, const Module *Context) {
-
- // Fast path: Don't construct and populate a TypePrinting object if we
- // won't be needing any types printed.
- if (!PrintType &&
- ((!isa<Constant>(V) && !isa<MDNode>(V)) ||
- V->hasName() || isa<GlobalValue>(V))) {
- WriteAsOperandInternal(Out, V, 0, 0, Context);
- return;
- }
-
- if (Context == 0) Context = getModuleFromVal(V);
-
- TypePrinting TypePrinter;
- if (Context)
- TypePrinter.incorporateTypes(*Context);
- if (PrintType) {
- TypePrinter.print(V->getType(), Out);
- Out << ' ';
- }
-
- WriteAsOperandInternal(Out, V, &TypePrinter, 0, Context);
-}
-
void AssemblyWriter::init() {
- if (TheModule)
- TypePrinter.incorporateTypes(*TheModule);
+ if (!TheModule)
+ return;
+ TypePrinter.incorporateTypes(*TheModule);
+ for (const Function &F : *TheModule)
+ if (const Comdat *C = F.getComdat())
+ Comdats.insert(C);
+ for (const GlobalVariable &GV : TheModule->globals())
+ if (const Comdat *C = GV.getComdat())
+ Comdats.insert(C);
}
@@ -1224,7 +1196,7 @@ AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, const Module *M,
AssemblyWriter::~AssemblyWriter() { }
void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
- if (Operand == 0) {
+ if (!Operand) {
Out << "<null operand!>";
return;
}
@@ -1256,9 +1228,40 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
}
}
+void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
+
+ switch (SynchScope) {
+ case SingleThread: Out << " singlethread"; break;
+ case CrossThread: break;
+ }
+
+ switch (SuccessOrdering) {
+ default: Out << " <bad ordering " << int(SuccessOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+
+ switch (FailureOrdering) {
+ default: Out << " <bad ordering " << int(FailureOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+}
+
void AssemblyWriter::writeParamOperand(const Value *Operand,
AttributeSet Attrs, unsigned Idx) {
- if (Operand == 0) {
+ if (!Operand) {
Out << "<null operand!>";
return;
}
@@ -1282,8 +1285,9 @@ void AssemblyWriter::printModule(const Module *M) {
M->getModuleIdentifier().find('\n') == std::string::npos)
Out << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
- if (!M->getDataLayout().empty())
- Out << "target datalayout = \"" << M->getDataLayout() << "\"\n";
+ const std::string &DL = M->getDataLayoutStr();
+ if (!DL.empty())
+ Out << "target datalayout = \"" << DL << "\"\n";
if (!M->getTargetTriple().empty())
Out << "target triple = \"" << M->getTargetTriple() << "\"\n";
@@ -1313,6 +1317,15 @@ void AssemblyWriter::printModule(const Module *M) {
printTypeIdentities();
+ // Output all comdats.
+ if (!Comdats.empty())
+ Out << '\n';
+ for (const Comdat *C : Comdats) {
+ printComdat(C);
+ if (C != Comdats.back())
+ Out << '\n';
+ }
+
// Output all globals.
if (!M->global_empty()) Out << '\n';
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
@@ -1389,10 +1402,6 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT,
switch (LT) {
case GlobalValue::ExternalLinkage: break;
case GlobalValue::PrivateLinkage: Out << "private "; break;
- case GlobalValue::LinkerPrivateLinkage: Out << "linker_private "; break;
- case GlobalValue::LinkerPrivateWeakLinkage:
- Out << "linker_private_weak ";
- break;
case GlobalValue::InternalLinkage: Out << "internal "; break;
case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break;
case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break;
@@ -1400,8 +1409,6 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT,
case GlobalValue::WeakODRLinkage: Out << "weak_odr "; break;
case GlobalValue::CommonLinkage: Out << "common "; break;
case GlobalValue::AppendingLinkage: Out << "appending "; break;
- case GlobalValue::DLLImportLinkage: Out << "dllimport "; break;
- case GlobalValue::DLLExportLinkage: Out << "dllexport "; break;
case GlobalValue::ExternalWeakLinkage: Out << "extern_weak "; break;
case GlobalValue::AvailableExternallyLinkage:
Out << "available_externally ";
@@ -1419,6 +1426,15 @@ static void PrintVisibility(GlobalValue::VisibilityTypes Vis,
}
}
+static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT,
+ formatted_raw_ostream &Out) {
+ switch (SCT) {
+ case GlobalValue::DefaultStorageClass: break;
+ case GlobalValue::DLLImportStorageClass: Out << "dllimport "; break;
+ case GlobalValue::DLLExportStorageClass: Out << "dllexport "; break;
+ }
+}
+
static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM,
formatted_raw_ostream &Out) {
switch (TLM) {
@@ -1451,11 +1467,13 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
PrintLinkage(GV->getLinkage(), Out);
PrintVisibility(GV->getVisibility(), Out);
+ PrintDLLStorageClass(GV->getDLLStorageClass(), Out);
PrintThreadLocalModel(GV->getThreadLocalMode(), Out);
+ if (GV->hasUnnamedAddr())
+ Out << "unnamed_addr ";
if (unsigned AddressSpace = GV->getType()->getAddressSpace())
Out << "addrspace(" << AddressSpace << ") ";
- if (GV->hasUnnamedAddr()) Out << "unnamed_addr ";
if (GV->isExternallyInitialized()) Out << "externally_initialized ";
Out << (GV->isConstant() ? "constant " : "global ");
TypePrinter.print(GV->getType()->getElementType(), Out);
@@ -1470,6 +1488,10 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
PrintEscapedString(GV->getSection(), Out);
Out << '"';
}
+ if (GV->hasComdat()) {
+ Out << ", comdat ";
+ PrintLLVMName(Out, GV->getComdat()->getName(), ComdatPrefix);
+ }
if (GV->getAlignment())
Out << ", align " << GV->getAlignment();
@@ -1488,6 +1510,10 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
Out << " = ";
}
PrintVisibility(GA->getVisibility(), Out);
+ PrintDLLStorageClass(GA->getDLLStorageClass(), Out);
+ PrintThreadLocalModel(GA->getThreadLocalMode(), Out);
+ if (GA->hasUnnamedAddr())
+ Out << "unnamed_addr ";
Out << "alias ";
@@ -1495,7 +1521,7 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
const Constant *Aliasee = GA->getAliasee();
- if (Aliasee == 0) {
+ if (!Aliasee) {
TypePrinter.print(GA->getType(), Out);
Out << " <<NULL ALIASEE>>";
} else {
@@ -1506,6 +1532,10 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
Out << '\n';
}
+void AssemblyWriter::printComdat(const Comdat *C) {
+ C->print(Out);
+}
+
void AssemblyWriter::printTypeIdentities() {
if (TypePrinter.NumberedTypes.empty() &&
TypePrinter.NamedTypes.empty())
@@ -1585,6 +1615,7 @@ void AssemblyWriter::printFunction(const Function *F) {
PrintLinkage(F->getLinkage(), Out);
PrintVisibility(F->getVisibility(), Out);
+ PrintDLLStorageClass(F->getDLLStorageClass(), Out);
// Print the calling convention.
if (F->getCallingConv() != CallingConv::C) {
@@ -1642,6 +1673,10 @@ void AssemblyWriter::printFunction(const Function *F) {
PrintEscapedString(F->getSection(), Out);
Out << '"';
}
+ if (F->hasComdat()) {
+ Out << " comdat ";
+ PrintLLVMName(Out, F->getComdat()->getName(), ComdatPrefix);
+ }
if (F->getAlignment())
Out << " align " << F->getAlignment();
if (F->hasGC())
@@ -1699,7 +1734,7 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
Out << "<badref>";
}
- if (BB->getParent() == 0) {
+ if (!BB->getParent()) {
Out.PadToColumn(50);
Out << "; Error: Block without parent!";
} else if (BB != &BB->getParent()->getEntryBlock()) { // Not the entry block?
@@ -1766,8 +1801,12 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << '%' << SlotNum << " = ";
}
- if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
- Out << "tail ";
+ if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ if (CI->isMustTailCall())
+ Out << "musttail ";
+ else if (CI->isTailCall())
+ Out << "tail ";
+ }
// Print out the opcode...
Out << I.getOpcodeName();
@@ -1777,6 +1816,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
(isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
Out << " atomic";
+ if (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isWeak())
+ Out << " weak";
+
// If this is a volatile operation, print out the volatile marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) ||
@@ -1796,7 +1838,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeAtomicRMWOperation(Out, RMWI->getOperation());
// Print out the type of the operands...
- const Value *Operand = I.getNumOperands() ? I.getOperand(0) : 0;
+ const Value *Operand = I.getNumOperands() ? I.getOperand(0) : nullptr;
// Special case conditional branches to swizzle the condition out to the front
if (isa<BranchInst>(I) && cast<BranchInst>(I).isConditional()) {
@@ -1965,6 +2007,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
Out << ' ';
+ if (AI->isUsedWithInAlloca())
+ Out << "inalloca ";
TypePrinter.print(AI->getAllocatedType(), Out);
if (!AI->getArraySize() || AI->isArrayAllocation()) {
Out << ", ";
@@ -2035,7 +2079,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
+ CXI->getSynchScope());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
@@ -2136,18 +2181,39 @@ void Module::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
W.printModule(this);
}
-void NamedMDNode::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
+void NamedMDNode::print(raw_ostream &ROS) const {
SlotTracker SlotTable(getParent());
formatted_raw_ostream OS(ROS);
- AssemblyWriter W(OS, SlotTable, getParent(), AAW);
+ AssemblyWriter W(OS, SlotTable, getParent(), nullptr);
W.printNamedMDNode(this);
}
-void Type::print(raw_ostream &OS) const {
- if (this == 0) {
- OS << "<null Type>";
- return;
+void Comdat::print(raw_ostream &ROS) const {
+ PrintLLVMName(ROS, getName(), ComdatPrefix);
+ ROS << " = comdat ";
+
+ switch (getSelectionKind()) {
+ case Comdat::Any:
+ ROS << "any";
+ break;
+ case Comdat::ExactMatch:
+ ROS << "exactmatch";
+ break;
+ case Comdat::Largest:
+ ROS << "largest";
+ break;
+ case Comdat::NoDuplicates:
+ ROS << "noduplicates";
+ break;
+ case Comdat::SameSize:
+ ROS << "samesize";
+ break;
}
+
+ ROS << '\n';
+}
+
+void Type::print(raw_ostream &OS) const {
TypePrinting TP;
TP.print(const_cast<Type*>(this), OS);
@@ -2159,24 +2225,20 @@ void Type::print(raw_ostream &OS) const {
}
}
-void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
- if (this == 0) {
- ROS << "printing a <null> value\n";
- return;
- }
+void Value::print(raw_ostream &ROS) const {
formatted_raw_ostream OS(ROS);
if (const Instruction *I = dyn_cast<Instruction>(this)) {
- const Function *F = I->getParent() ? I->getParent()->getParent() : 0;
+ const Function *F = I->getParent() ? I->getParent()->getParent() : nullptr;
SlotTracker SlotTable(F);
- AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), AAW);
+ AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr);
W.printInstruction(*I);
} else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) {
SlotTracker SlotTable(BB->getParent());
- AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), AAW);
+ AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr);
W.printBasicBlock(BB);
} else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) {
SlotTracker SlotTable(GV->getParent());
- AssemblyWriter W(OS, SlotTable, GV->getParent(), AAW);
+ AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr);
if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV))
W.printGlobal(V);
else if (const Function *F = dyn_cast<Function>(GV))
@@ -2186,26 +2248,43 @@ void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
} else if (const MDNode *N = dyn_cast<MDNode>(this)) {
const Function *F = N->getFunction();
SlotTracker SlotTable(F);
- AssemblyWriter W(OS, SlotTable, F ? F->getParent() : 0, AAW);
+ AssemblyWriter W(OS, SlotTable, F ? F->getParent() : nullptr, nullptr);
W.printMDNodeBody(N);
} else if (const Constant *C = dyn_cast<Constant>(this)) {
TypePrinting TypePrinter;
TypePrinter.print(C->getType(), OS);
OS << ' ';
- WriteConstantInternal(OS, C, TypePrinter, 0, 0);
+ WriteConstantInternal(OS, C, TypePrinter, nullptr, nullptr);
} else if (isa<InlineAsm>(this) || isa<MDString>(this) ||
isa<Argument>(this)) {
- WriteAsOperand(OS, this, true, 0);
+ this->printAsOperand(OS);
} else {
- // Otherwise we don't know what it is. Call the virtual function to
- // allow a subclass to print itself.
- printCustom(OS);
+ llvm_unreachable("Unknown value to print out!");
}
}
-// Value::printCustom - subclasses should override this to implement printing.
-void Value::printCustom(raw_ostream &OS) const {
- llvm_unreachable("Unknown value to print out!");
+void Value::printAsOperand(raw_ostream &O, bool PrintType, const Module *M) const {
+ // Fast path: Don't construct and populate a TypePrinting object if we
+ // won't be needing any types printed.
+ if (!PrintType &&
+ ((!isa<Constant>(this) && !isa<MDNode>(this)) ||
+ hasName() || isa<GlobalValue>(this))) {
+ WriteAsOperandInternal(O, this, nullptr, nullptr, M);
+ return;
+ }
+
+ if (!M)
+ M = getModuleFromVal(this);
+
+ TypePrinting TypePrinter;
+ if (M)
+ TypePrinter.incorporateTypes(*M);
+ if (PrintType) {
+ TypePrinter.print(getType(), O);
+ O << ' ';
+ }
+
+ WriteAsOperandInternal(O, this, &TypePrinter, nullptr, M);
}
// Value::dump - allow easy printing of Values from the debugger.
@@ -2215,7 +2294,10 @@ void Value::dump() const { print(dbgs()); dbgs() << '\n'; }
void Type::dump() const { print(dbgs()); }
// Module::dump() - Allow printing of Modules from the debugger.
-void Module::dump() const { print(dbgs(), 0); }
+void Module::dump() const { print(dbgs(), nullptr); }
+
+// \brief Allow printing of Comdats from the debugger.
+void Comdat::dump() const { print(dbgs()); }
// NamedMDNode::dump() - Allow printing of NamedMDNodes from the debugger.
-void NamedMDNode::dump() const { print(dbgs(), 0); }
+void NamedMDNode::dump() const { print(dbgs()); }
diff --git a/contrib/llvm/lib/IR/AsmWriter.h b/contrib/llvm/lib/IR/AsmWriter.h
index 8f4a377..aef9c8a 100644
--- a/contrib/llvm/lib/IR/AsmWriter.h
+++ b/contrib/llvm/lib/IR/AsmWriter.h
@@ -16,7 +16,7 @@
#define LLVM_IR_ASSEMBLYWRITER_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/TypeFinder.h"
@@ -27,6 +27,7 @@ namespace llvm {
class BasicBlock;
class Function;
class GlobalValue;
+class Comdat;
class Module;
class NamedMDNode;
class Value;
@@ -67,10 +68,11 @@ protected:
const Module *TheModule;
private:
- OwningPtr<SlotTracker> ModuleSlotTracker;
+ std::unique_ptr<SlotTracker> ModuleSlotTracker;
SlotTracker &Machine;
TypePrinting TypePrinter;
AssemblyAnnotationWriter *AnnotationWriter;
+ SetVector<const Comdat *> Comdats;
public:
/// Construct an AssemblyWriter with an external SlotTracker
@@ -91,6 +93,9 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx);
void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
@@ -99,6 +104,7 @@ public:
void printTypeIdentities();
void printGlobal(const GlobalVariable *GV);
void printAlias(const GlobalAlias *GV);
+ void printComdat(const Comdat *C);
void printFunction(const Function *F);
void printArgument(const Argument *FA, AttributeSet Attrs, unsigned Idx);
void printBasicBlock(const BasicBlock *BB);
diff --git a/contrib/llvm/lib/IR/AttributeImpl.h b/contrib/llvm/lib/IR/AttributeImpl.h
index ea954ac..cc6d557 100644
--- a/contrib/llvm/lib/IR/AttributeImpl.h
+++ b/contrib/llvm/lib/IR/AttributeImpl.h
@@ -39,7 +39,7 @@ class AttributeImpl : public FoldingSetNode {
protected:
enum AttrEntryKind {
EnumAttrEntry,
- AlignAttrEntry,
+ IntAttrEntry,
StringAttrEntry
};
@@ -49,7 +49,7 @@ public:
virtual ~AttributeImpl();
bool isEnumAttribute() const { return KindID == EnumAttrEntry; }
- bool isAlignAttribute() const { return KindID == AlignAttrEntry; }
+ bool isIntAttribute() const { return KindID == IntAttrEntry; }
bool isStringAttribute() const { return KindID == StringAttrEntry; }
bool hasAttribute(Attribute::AttrKind A) const;
@@ -67,7 +67,7 @@ public:
void Profile(FoldingSetNodeID &ID) const {
if (isEnumAttribute())
Profile(ID, getKindAsEnum(), 0);
- else if (isAlignAttribute())
+ else if (isIntAttribute())
Profile(ID, getKindAsEnum(), getValueAsInt());
else
Profile(ID, getKindAsString(), getValueAsString());
@@ -108,19 +108,20 @@ public:
Attribute::AttrKind getEnumKind() const { return Kind; }
};
-class AlignAttributeImpl : public EnumAttributeImpl {
- virtual void anchor();
- unsigned Align;
+class IntAttributeImpl : public EnumAttributeImpl {
+ void anchor() override;
+ uint64_t Val;
public:
- AlignAttributeImpl(Attribute::AttrKind Kind, unsigned Align)
- : EnumAttributeImpl(AlignAttrEntry, Kind), Align(Align) {
+ IntAttributeImpl(Attribute::AttrKind Kind, uint64_t Val)
+ : EnumAttributeImpl(IntAttrEntry, Kind), Val(Val) {
assert(
- (Kind == Attribute::Alignment || Kind == Attribute::StackAlignment) &&
- "Wrong kind for alignment attribute!");
+ (Kind == Attribute::Alignment || Kind == Attribute::StackAlignment ||
+ Kind == Attribute::Dereferenceable) &&
+ "Wrong kind for int attribute!");
}
- unsigned getAlignment() const { return Align; }
+ uint64_t getValue() const { return Val; }
};
class StringAttributeImpl : public AttributeImpl {
@@ -164,6 +165,7 @@ public:
unsigned getAlignment() const;
unsigned getStackAlignment() const;
+ uint64_t getDereferenceableBytes() const;
std::string getAsString(bool InAttrGrp) const;
typedef const Attribute *iterator;
diff --git a/contrib/llvm/lib/IR/Attributes.cpp b/contrib/llvm/lib/IR/Attributes.cpp
index 0f2b7a0..04545ea 100644
--- a/contrib/llvm/lib/IR/Attributes.cpp
+++ b/contrib/llvm/lib/IR/Attributes.cpp
@@ -16,6 +16,7 @@
#include "llvm/IR/Attributes.h"
#include "AttributeImpl.h"
#include "LLVMContextImpl.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Atomic.h"
@@ -46,7 +47,7 @@ Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind,
if (!Val)
PA = new EnumAttributeImpl(Kind);
else
- PA = new AlignAttributeImpl(Kind, Val);
+ PA = new IntAttributeImpl(Kind, Val);
pImpl->AttrsSet.InsertNode(PA, InsertPoint);
}
@@ -87,6 +88,12 @@ Attribute Attribute::getWithStackAlignment(LLVMContext &Context,
return get(Context, StackAlignment, Align);
}
+Attribute Attribute::getWithDereferenceableBytes(LLVMContext &Context,
+ uint64_t Bytes) {
+ assert(Bytes && "Bytes must be non-zero.");
+ return get(Context, Dereferenceable, Bytes);
+}
+
//===----------------------------------------------------------------------===//
// Attribute Accessor Methods
//===----------------------------------------------------------------------===//
@@ -95,8 +102,8 @@ bool Attribute::isEnumAttribute() const {
return pImpl && pImpl->isEnumAttribute();
}
-bool Attribute::isAlignAttribute() const {
- return pImpl && pImpl->isAlignAttribute();
+bool Attribute::isIntAttribute() const {
+ return pImpl && pImpl->isIntAttribute();
}
bool Attribute::isStringAttribute() const {
@@ -105,15 +112,15 @@ bool Attribute::isStringAttribute() const {
Attribute::AttrKind Attribute::getKindAsEnum() const {
if (!pImpl) return None;
- assert((isEnumAttribute() || isAlignAttribute()) &&
+ assert((isEnumAttribute() || isIntAttribute()) &&
"Invalid attribute type to get the kind as an enum!");
return pImpl ? pImpl->getKindAsEnum() : None;
}
uint64_t Attribute::getValueAsInt() const {
if (!pImpl) return 0;
- assert(isAlignAttribute() &&
- "Expected the attribute to be an alignment attribute!");
+ assert(isIntAttribute() &&
+ "Expected the attribute to be an integer attribute!");
return pImpl ? pImpl->getValueAsInt() : 0;
}
@@ -155,6 +162,14 @@ unsigned Attribute::getStackAlignment() const {
return pImpl->getValueAsInt();
}
+/// This returns the number of dereferenceable bytes.
+uint64_t Attribute::getDereferenceableBytes() const {
+ assert(hasAttribute(Attribute::Dereferenceable) &&
+ "Trying to get dereferenceable bytes from "
+ "non-dereferenceable attribute!");
+ return pImpl->getValueAsInt();
+}
+
std::string Attribute::getAsString(bool InAttrGrp) const {
if (!pImpl) return "";
@@ -166,10 +181,14 @@ std::string Attribute::getAsString(bool InAttrGrp) const {
return "builtin";
if (hasAttribute(Attribute::ByVal))
return "byval";
+ if (hasAttribute(Attribute::InAlloca))
+ return "inalloca";
if (hasAttribute(Attribute::InlineHint))
return "inlinehint";
if (hasAttribute(Attribute::InReg))
return "inreg";
+ if (hasAttribute(Attribute::JumpTable))
+ return "jumptable";
if (hasAttribute(Attribute::MinSize))
return "minsize";
if (hasAttribute(Attribute::Naked))
@@ -190,6 +209,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const {
return "noinline";
if (hasAttribute(Attribute::NonLazyBind))
return "nonlazybind";
+ if (hasAttribute(Attribute::NonNull))
+ return "nonnull";
if (hasAttribute(Attribute::NoRedZone))
return "noredzone";
if (hasAttribute(Attribute::NoReturn))
@@ -256,6 +277,20 @@ std::string Attribute::getAsString(bool InAttrGrp) const {
return Result;
}
+ if (hasAttribute(Attribute::Dereferenceable)) {
+ std::string Result;
+ Result += "dereferenceable";
+ if (InAttrGrp) {
+ Result += "=";
+ Result += utostr(getValueAsInt());
+ } else {
+ Result += "(";
+ Result += utostr(getValueAsInt());
+ Result += ")";
+ }
+ return Result;
+ }
+
// Convert target-dependent attributes to strings of the form:
//
// "kind"
@@ -286,10 +321,10 @@ bool Attribute::operator<(Attribute A) const {
// AttributeImpl Definition
//===----------------------------------------------------------------------===//
-// Pin the vtabels to this file.
+// Pin the vtables to this file.
AttributeImpl::~AttributeImpl() {}
void EnumAttributeImpl::anchor() {}
-void AlignAttributeImpl::anchor() {}
+void IntAttributeImpl::anchor() {}
void StringAttributeImpl::anchor() {}
bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const {
@@ -303,13 +338,13 @@ bool AttributeImpl::hasAttribute(StringRef Kind) const {
}
Attribute::AttrKind AttributeImpl::getKindAsEnum() const {
- assert(isEnumAttribute() || isAlignAttribute());
+ assert(isEnumAttribute() || isIntAttribute());
return static_cast<const EnumAttributeImpl *>(this)->getEnumKind();
}
uint64_t AttributeImpl::getValueAsInt() const {
- assert(isAlignAttribute());
- return static_cast<const AlignAttributeImpl *>(this)->getAlignment();
+ assert(isIntAttribute());
+ return static_cast<const IntAttributeImpl *>(this)->getValue();
}
StringRef AttributeImpl::getKindAsString() const {
@@ -327,18 +362,18 @@ bool AttributeImpl::operator<(const AttributeImpl &AI) const {
// relative to their enum value) and then strings.
if (isEnumAttribute()) {
if (AI.isEnumAttribute()) return getKindAsEnum() < AI.getKindAsEnum();
- if (AI.isAlignAttribute()) return true;
+ if (AI.isIntAttribute()) return true;
if (AI.isStringAttribute()) return true;
}
- if (isAlignAttribute()) {
+ if (isIntAttribute()) {
if (AI.isEnumAttribute()) return false;
- if (AI.isAlignAttribute()) return getValueAsInt() < AI.getValueAsInt();
+ if (AI.isIntAttribute()) return getValueAsInt() < AI.getValueAsInt();
if (AI.isStringAttribute()) return true;
}
if (AI.isEnumAttribute()) return false;
- if (AI.isAlignAttribute()) return false;
+ if (AI.isIntAttribute()) return false;
if (getKindAsString() == AI.getKindAsString())
return getValueAsString() < AI.getValueAsString();
return getKindAsString() < AI.getKindAsString();
@@ -388,6 +423,11 @@ uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) {
case Attribute::Cold: return 1ULL << 40;
case Attribute::Builtin: return 1ULL << 41;
case Attribute::OptimizeNone: return 1ULL << 42;
+ case Attribute::InAlloca: return 1ULL << 43;
+ case Attribute::NonNull: return 1ULL << 44;
+ case Attribute::JumpTable: return 1ULL << 45;
+ case Attribute::Dereferenceable:
+ llvm_unreachable("dereferenceable attribute not supported in raw format");
}
llvm_unreachable("Unsupported attribute type");
}
@@ -399,7 +439,7 @@ uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) {
AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
ArrayRef<Attribute> Attrs) {
if (Attrs.empty())
- return 0;
+ return nullptr;
// Otherwise, build a key to look up the existing attributes.
LLVMContextImpl *pImpl = C.pImpl;
@@ -472,6 +512,13 @@ unsigned AttributeSetNode::getStackAlignment() const {
return 0;
}
+uint64_t AttributeSetNode::getDereferenceableBytes() const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Attribute::Dereferenceable))
+ return I->getDereferenceableBytes();
+ return 0;
+}
+
std::string AttributeSetNode::getAsString(bool InAttrGrp) const {
std::string Str;
for (iterator I = begin(), E = end(); I != E; ++I) {
@@ -505,6 +552,8 @@ uint64_t AttributeSetImpl::Raw(unsigned Index) const {
Mask |= (Log2_32(ASN->getAlignment()) + 1) << 16;
else if (Kind == Attribute::StackAlignment)
Mask |= (Log2_32(ASN->getStackAlignment()) + 1) << 26;
+ else if (Kind == Attribute::Dereferenceable)
+ llvm_unreachable("dereferenceable not supported in bit mask");
else
Mask |= AttributeImpl::getAttrMask(Kind);
}
@@ -592,7 +641,8 @@ AttributeSet AttributeSet::get(LLVMContext &C,
return getImpl(C, Attrs);
}
-AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, AttrBuilder &B) {
+AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
+ const AttrBuilder &B) {
if (!B.hasAttributes())
return AttributeSet();
@@ -609,14 +659,18 @@ AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index, AttrBuilder &B) {
else if (Kind == Attribute::StackAlignment)
Attrs.push_back(std::make_pair(Index, Attribute::
getWithStackAlignment(C, B.getStackAlignment())));
+ else if (Kind == Attribute::Dereferenceable)
+ Attrs.push_back(std::make_pair(Index,
+ Attribute::getWithDereferenceableBytes(C,
+ B.getDereferenceableBytes())));
else
Attrs.push_back(std::make_pair(Index, Attribute::get(C, Kind)));
}
// Add target-dependent (string) attributes.
- for (AttrBuilder::td_iterator I = B.td_begin(), E = B.td_end();
- I != E; ++I)
- Attrs.push_back(std::make_pair(Index, Attribute::get(C, I->first,I->second)));
+ for (const AttrBuilder::td_type &TDA : B.td_attrs())
+ Attrs.push_back(
+ std::make_pair(Index, Attribute::get(C, TDA.first, TDA.second)));
return get(C, Attrs);
}
@@ -833,7 +887,7 @@ bool AttributeSet::hasAttributes(unsigned Index) const {
/// \brief Return true if the specified attribute is set for at least one
/// parameter or for the return value.
bool AttributeSet::hasAttrSomewhere(Attribute::AttrKind Attr) const {
- if (pImpl == 0) return false;
+ if (!pImpl) return false;
for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I)
for (AttributeSetImpl::iterator II = pImpl->begin(I),
@@ -866,6 +920,11 @@ unsigned AttributeSet::getStackAlignment(unsigned Index) const {
return ASN ? ASN->getStackAlignment() : 0;
}
+uint64_t AttributeSet::getDereferenceableBytes(unsigned Index) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getDereferenceableBytes() : 0;
+}
+
std::string AttributeSet::getAsString(unsigned Index,
bool InAttrGrp) const {
AttributeSetNode *ASN = getAttributes(Index);
@@ -874,14 +933,14 @@ std::string AttributeSet::getAsString(unsigned Index,
/// \brief The attributes for the specified index are returned.
AttributeSetNode *AttributeSet::getAttributes(unsigned Index) const {
- if (!pImpl) return 0;
+ if (!pImpl) return nullptr;
// Loop through to find the attribute node we want.
for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I)
if (pImpl->getSlotIndex(I) == Index)
return pImpl->getSlotNode(I);
- return 0;
+ return nullptr;
}
AttributeSet::iterator AttributeSet::begin(unsigned Slot) const {
@@ -945,7 +1004,7 @@ void AttributeSet::dump() const {
//===----------------------------------------------------------------------===//
AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index)
- : Attrs(0), Alignment(0), StackAlignment(0) {
+ : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0) {
AttributeSetImpl *pImpl = AS.pImpl;
if (!pImpl) return;
@@ -962,13 +1021,14 @@ AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index)
void AttrBuilder::clear() {
Attrs.reset();
- Alignment = StackAlignment = 0;
+ Alignment = StackAlignment = DerefBytes = 0;
}
AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Val) {
assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
assert(Val != Attribute::Alignment && Val != Attribute::StackAlignment &&
- "Adding alignment attribute without adding alignment value!");
+ Val != Attribute::Dereferenceable &&
+ "Adding integer attribute without adding a value!");
Attrs[Val] = true;
return *this;
}
@@ -986,6 +1046,8 @@ AttrBuilder &AttrBuilder::addAttribute(Attribute Attr) {
Alignment = Attr.getAlignment();
else if (Kind == Attribute::StackAlignment)
StackAlignment = Attr.getStackAlignment();
+ else if (Kind == Attribute::Dereferenceable)
+ DerefBytes = Attr.getDereferenceableBytes();
return *this;
}
@@ -1002,6 +1064,8 @@ AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
Alignment = 0;
else if (Val == Attribute::StackAlignment)
StackAlignment = 0;
+ else if (Val == Attribute::Dereferenceable)
+ DerefBytes = 0;
return *this;
}
@@ -1018,7 +1082,7 @@ AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) {
for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
Attribute Attr = *I;
- if (Attr.isEnumAttribute() || Attr.isAlignAttribute()) {
+ if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
Attribute::AttrKind Kind = I->getKindAsEnum();
Attrs[Kind] = false;
@@ -1026,6 +1090,8 @@ AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) {
Alignment = 0;
else if (Kind == Attribute::StackAlignment)
StackAlignment = 0;
+ else if (Kind == Attribute::Dereferenceable)
+ DerefBytes = 0;
} else {
assert(Attr.isStringAttribute() && "Invalid attribute type!");
std::map<std::string, std::string>::iterator
@@ -1068,6 +1134,14 @@ AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align) {
return *this;
}
+AttrBuilder &AttrBuilder::addDereferenceableAttr(uint64_t Bytes) {
+ if (Bytes == 0) return *this;
+
+ Attrs[Attribute::Dereferenceable] = true;
+ DerefBytes = Bytes;
+ return *this;
+}
+
AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
// FIXME: What if both have alignments, but they don't match?!
if (!Alignment)
@@ -1076,6 +1150,9 @@ AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
if (!StackAlignment)
StackAlignment = B.StackAlignment;
+ if (!DerefBytes)
+ DerefBytes = B.DerefBytes;
+
Attrs |= B.Attrs;
for (td_const_iterator I = B.TargetDepAttrs.begin(),
@@ -1106,7 +1183,7 @@ bool AttrBuilder::hasAttributes(AttributeSet A, uint64_t Index) const {
for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot);
I != E; ++I) {
Attribute Attr = *I;
- if (Attr.isEnumAttribute() || Attr.isAlignAttribute()) {
+ if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
if (Attrs[I->getKindAsEnum()])
return true;
} else {
@@ -1131,7 +1208,8 @@ bool AttrBuilder::operator==(const AttrBuilder &B) {
if (B.TargetDepAttrs.find(I->first) == B.TargetDepAttrs.end())
return false;
- return Alignment == B.Alignment && StackAlignment == B.StackAlignment;
+ return Alignment == B.Alignment && StackAlignment == B.StackAlignment &&
+ DerefBytes == B.DerefBytes;
}
AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) {
@@ -1140,6 +1218,8 @@ AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) {
for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds;
I = Attribute::AttrKind(I + 1)) {
+ if (I == Attribute::Dereferenceable)
+ continue;
if (uint64_t A = (Val & AttributeImpl::getAttrMask(I))) {
Attrs[I] = true;
@@ -1172,9 +1252,12 @@ AttributeSet AttributeFuncs::typeIncompatible(Type *Ty, uint64_t Index) {
.addAttribute(Attribute::Nest)
.addAttribute(Attribute::NoAlias)
.addAttribute(Attribute::NoCapture)
+ .addAttribute(Attribute::NonNull)
+ .addDereferenceableAttr(1) // the int here is ignored
.addAttribute(Attribute::ReadNone)
.addAttribute(Attribute::ReadOnly)
- .addAttribute(Attribute::StructRet);
+ .addAttribute(Attribute::StructRet)
+ .addAttribute(Attribute::InAlloca);
return AttributeSet::get(Ty->getContext(), Index, Incompatible);
}
diff --git a/contrib/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm/lib/IR/AutoUpgrade.cpp
index d12bf7b..459bd88 100644
--- a/contrib/llvm/lib/IR/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/IR/AutoUpgrade.cpp
@@ -11,17 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/AutoUpgrade.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstring>
using namespace llvm;
@@ -113,8 +114,11 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
Name == "x86.avx.movnt.pd.256" ||
Name == "x86.avx.movnt.ps.256" ||
Name == "x86.sse42.crc32.64.8" ||
+ Name == "x86.avx.vbroadcast.ss" ||
+ Name == "x86.avx.vbroadcast.ss.256" ||
+ Name == "x86.avx.vbroadcast.sd.256" ||
(Name.startswith("x86.xop.vpcom") && F->arg_size() == 2)) {
- NewFn = 0;
+ NewFn = nullptr;
return true;
}
// SSE4.1 ptest functions may have an old signature.
@@ -157,7 +161,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
- NewFn = 0;
+ NewFn = nullptr;
bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
// Upgrade intrinsic attributes. This does not change the function.
@@ -169,7 +173,62 @@ bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
return Upgraded;
}
+static bool UpgradeGlobalStructors(GlobalVariable *GV) {
+ ArrayType *ATy = dyn_cast<ArrayType>(GV->getType()->getElementType());
+ StructType *OldTy =
+ ATy ? dyn_cast<StructType>(ATy->getElementType()) : nullptr;
+
+ // Only upgrade an array of a two field struct with the appropriate field
+ // types.
+ if (!OldTy || OldTy->getNumElements() != 2)
+ return false;
+
+ // Get the upgraded 3 element type.
+ PointerType *VoidPtrTy = Type::getInt8Ty(GV->getContext())->getPointerTo();
+ Type *Tys[3] = {
+ OldTy->getElementType(0),
+ OldTy->getElementType(1),
+ VoidPtrTy
+ };
+ StructType *NewTy =
+ StructType::get(GV->getContext(), Tys, /*isPacked=*/false);
+
+ // Build new constants with a null third field filled in.
+ Constant *OldInitC = GV->getInitializer();
+ ConstantArray *OldInit = dyn_cast<ConstantArray>(OldInitC);
+ if (!OldInit && !isa<ConstantAggregateZero>(OldInitC))
+ return false;
+ std::vector<Constant *> Initializers;
+ if (OldInit) {
+ for (Use &U : OldInit->operands()) {
+ ConstantStruct *Init = cast<ConstantStruct>(&U);
+ Constant *NewInit =
+ ConstantStruct::get(NewTy, Init->getOperand(0), Init->getOperand(1),
+ Constant::getNullValue(VoidPtrTy), nullptr);
+ Initializers.push_back(NewInit);
+ }
+ }
+ assert(Initializers.size() == ATy->getNumElements());
+
+ // Replace the old GV with a new one.
+ ATy = ArrayType::get(NewTy, Initializers.size());
+ Constant *NewInit = ConstantArray::get(ATy, Initializers);
+ GlobalVariable *NewGV = new GlobalVariable(
+ *GV->getParent(), ATy, GV->isConstant(), GV->getLinkage(), NewInit, "",
+ GV, GV->getThreadLocalMode(), GV->getType()->getAddressSpace(),
+ GV->isExternallyInitialized());
+ NewGV->copyAttributesFrom(GV);
+ NewGV->takeName(GV);
+ assert(GV->use_empty() && "program cannot use initializer list");
+ GV->eraseFromParent();
+ return true;
+}
+
bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
+ if (GV->getName() == "llvm.global_ctors" ||
+ GV->getName() == "llvm.global_dtors")
+ return UpgradeGlobalStructors(GV);
+
// Nothing to do yet.
return false;
}
@@ -279,6 +338,19 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
Rep = Builder.CreateCall2(CRC32, Trunc0, CI->getArgOperand(1));
Rep = Builder.CreateZExt(Rep, CI->getType(), "");
+ } else if (Name.startswith("llvm.x86.avx.vbroadcast")) {
+ // Replace broadcasts with a series of insertelements.
+ Type *VecTy = CI->getType();
+ Type *EltTy = VecTy->getVectorElementType();
+ unsigned EltNum = VecTy->getVectorNumElements();
+ Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
+ EltTy->getPointerTo());
+ Value *Load = Builder.CreateLoad(Cast);
+ Type *I32Ty = Type::getInt32Ty(C);
+ Rep = UndefValue::get(VecTy);
+ for (unsigned I = 0; I < EltNum; ++I)
+ Rep = Builder.CreateInsertElement(Rep, Load,
+ ConstantInt::get(I32Ty, I));
} else {
bool PD128 = false, PD256 = false, PS128 = false, PS256 = false;
if (Name == "llvm.x86.avx.vpermil.pd.256")
@@ -410,7 +482,7 @@ void llvm::UpgradeCallsToIntrinsic(Function* F) {
if (UpgradeIntrinsicFunction(F, NewFn)) {
if (NewFn != F) {
// Replace all uses to the old function with the new one if necessary.
- for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
+ for (Value::user_iterator UI = F->user_begin(), UE = F->user_end();
UI != UE; ) {
if (CallInst *CI = dyn_cast<CallInst>(*UI++))
UpgradeIntrinsicCall(CI, NewFn);
@@ -452,9 +524,9 @@ void llvm::UpgradeInstWithTBAATag(Instruction *I) {
Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
Instruction *&Temp) {
if (Opc != Instruction::BitCast)
- return 0;
+ return nullptr;
- Temp = 0;
+ Temp = nullptr;
Type *SrcTy = V->getType();
if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
@@ -468,12 +540,12 @@ Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
}
- return 0;
+ return nullptr;
}
Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
if (Opc != Instruction::BitCast)
- return 0;
+ return nullptr;
Type *SrcTy = C->getType();
if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
@@ -488,14 +560,29 @@ Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
DestTy);
}
- return 0;
+ return nullptr;
}
/// Check the debug info version number, if it is out-dated, drop the debug
/// info. Return true if module is modified.
bool llvm::UpgradeDebugInfo(Module &M) {
- if (getDebugMetadataVersionFromModule(M) == DEBUG_METADATA_VERSION)
+ unsigned Version = getDebugMetadataVersionFromModule(M);
+ if (Version == DEBUG_METADATA_VERSION)
return false;
- return StripDebugInfo(M);
+ bool RetCode = StripDebugInfo(M);
+ if (RetCode) {
+ DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
+ M.getContext().diagnose(DiagVersion);
+ }
+ return RetCode;
+}
+
+void llvm::UpgradeMDStringConstant(std::string &String) {
+ const std::string OldPrefix = "llvm.vectorizer.";
+ if (String == "llvm.vectorizer.unroll") {
+ String = "llvm.loop.interleave.count";
+ } else if (String.find(OldPrefix) == 0) {
+ String.replace(0, OldPrefix.size(), "llvm.loop.vectorize.");
+ }
}
diff --git a/contrib/llvm/lib/IR/BasicBlock.cpp b/contrib/llvm/lib/IR/BasicBlock.cpp
index 41e58ec..ba07433 100644
--- a/contrib/llvm/lib/IR/BasicBlock.cpp
+++ b/contrib/llvm/lib/IR/BasicBlock.cpp
@@ -14,20 +14,24 @@
#include "llvm/IR/BasicBlock.h"
#include "SymbolTableListTraitsImpl.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/LeakDetector.h"
#include <algorithm>
using namespace llvm;
ValueSymbolTable *BasicBlock::getValueSymbolTable() {
if (Function *F = getParent())
return &F->getValueSymbolTable();
- return 0;
+ return nullptr;
+}
+
+const DataLayout *BasicBlock::getDataLayout() const {
+ return getParent()->getDataLayout();
}
LLVMContext &BasicBlock::getContext() const {
@@ -41,7 +45,7 @@ template class llvm::SymbolTableListTraits<Instruction, BasicBlock>;
BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent,
BasicBlock *InsertBefore)
- : Value(Type::getLabelTy(C), Value::BasicBlockVal), Parent(0) {
+ : Value(Type::getLabelTy(C), Value::BasicBlockVal), Parent(nullptr) {
// Make sure that we get added to a function
LeakDetector::addGarbageObject(this);
@@ -70,14 +74,14 @@ BasicBlock::~BasicBlock() {
Constant *Replacement =
ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1);
while (!use_empty()) {
- BlockAddress *BA = cast<BlockAddress>(use_back());
+ BlockAddress *BA = cast<BlockAddress>(user_back());
BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
BA->getType()));
BA->destroyConstant();
}
}
- assert(getParent() == 0 && "BasicBlock still linked into the program!");
+ assert(getParent() == nullptr && "BasicBlock still linked into the program!");
dropAllReferences();
InstList.clear();
}
@@ -118,12 +122,12 @@ void BasicBlock::moveAfter(BasicBlock *MovePos) {
TerminatorInst *BasicBlock::getTerminator() {
- if (InstList.empty()) return 0;
+ if (InstList.empty()) return nullptr;
return dyn_cast<TerminatorInst>(&InstList.back());
}
const TerminatorInst *BasicBlock::getTerminator() const {
- if (InstList.empty()) return 0;
+ if (InstList.empty()) return nullptr;
return dyn_cast<TerminatorInst>(&InstList.back());
}
@@ -182,10 +186,10 @@ void BasicBlock::dropAllReferences() {
/// return the block, otherwise return a null pointer.
BasicBlock *BasicBlock::getSinglePredecessor() {
pred_iterator PI = pred_begin(this), E = pred_end(this);
- if (PI == E) return 0; // No preds.
+ if (PI == E) return nullptr; // No preds.
BasicBlock *ThePred = *PI;
++PI;
- return (PI == E) ? ThePred : 0 /*multiple preds*/;
+ return (PI == E) ? ThePred : nullptr /*multiple preds*/;
}
/// getUniquePredecessor - If this basic block has a unique predecessor block,
@@ -195,12 +199,12 @@ BasicBlock *BasicBlock::getSinglePredecessor() {
/// a switch statement with multiple cases having the same destination).
BasicBlock *BasicBlock::getUniquePredecessor() {
pred_iterator PI = pred_begin(this), E = pred_end(this);
- if (PI == E) return 0; // No preds.
+ if (PI == E) return nullptr; // No preds.
BasicBlock *PredBB = *PI;
++PI;
for (;PI != E; ++PI) {
if (*PI != PredBB)
- return 0;
+ return nullptr;
// The same predecessor appears multiple times in the predecessor list.
// This is OK.
}
@@ -273,7 +277,7 @@ void BasicBlock::removePredecessor(BasicBlock *Pred,
PN->removeIncomingValue(Pred, false);
// If all incoming values to the Phi are the same, we can replace the Phi
// with that value.
- Value* PNV = 0;
+ Value* PNV = nullptr;
if (!DontDeleteUselessPHIs && (PNV = PN->hasConstantValue()))
if (PNV != PN) {
PN->replaceAllUsesWith(PNV);
@@ -300,7 +304,7 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
assert(I != InstList.end() &&
"Trying to get me to create degenerate basic block!");
- BasicBlock *InsertBefore = llvm::next(Function::iterator(this))
+ BasicBlock *InsertBefore = std::next(Function::iterator(this))
.getNodePtrUnchecked();
BasicBlock *New = BasicBlock::Create(getContext(), BBName,
getParent(), InsertBefore);
diff --git a/contrib/llvm/lib/IR/Comdat.cpp b/contrib/llvm/lib/IR/Comdat.cpp
new file mode 100644
index 0000000..80715ff
--- /dev/null
+++ b/contrib/llvm/lib/IR/Comdat.cpp
@@ -0,0 +1,25 @@
+//===-- Comdat.cpp - Implement Metadata classes --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Comdat class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Comdat.h"
+#include "llvm/ADT/StringMap.h"
+using namespace llvm;
+
+Comdat::Comdat(SelectionKind SK, StringMapEntry<Comdat> *Name)
+ : Name(Name), SK(SK) {}
+
+Comdat::Comdat(Comdat &&C) : Name(C.Name), SK(C.SK) {}
+
+Comdat::Comdat() : Name(nullptr), SK(Comdat::Any) {}
+
+StringRef Comdat::getName() const { return Name->first(); }
diff --git a/contrib/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm/lib/IR/ConstantFold.cpp
index f5e225c..395ac39 100644
--- a/contrib/llvm/lib/IR/ConstantFold.cpp
+++ b/contrib/llvm/lib/IR/ConstantFold.cpp
@@ -22,13 +22,13 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include <limits>
@@ -51,7 +51,7 @@ static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
// Analysis/ConstantFolding.cpp
unsigned NumElts = DstTy->getNumElements();
if (NumElts != CV->getType()->getVectorNumElements())
- return 0;
+ return nullptr;
Type *DstEltTy = DstTy->getElementType();
@@ -94,7 +94,7 @@ foldConstantCastPair(
// Let CastInst::isEliminableCastPair do the heavy lifting.
return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy,
- 0, FakeIntPtrTy, 0);
+ nullptr, FakeIntPtrTy, nullptr);
}
static Constant *FoldBitCast(Constant *V, Type *DestTy) {
@@ -139,7 +139,7 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
assert(DestPTy->getBitWidth() == SrcTy->getBitWidth() &&
"Not cast between same sized vectors!");
- SrcTy = NULL;
+ SrcTy = nullptr;
// First, check for null. Undef is already handled.
if (isa<ConstantAggregateZero>(V))
return Constant::getNullValue(DestTy);
@@ -173,7 +173,7 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
CI->getValue()));
// Otherwise, can't fold this (vector?)
- return 0;
+ return nullptr;
}
// Handle ConstantFP input: FP -> Integral.
@@ -181,7 +181,7 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
return ConstantInt::get(FP->getContext(),
FP->getValueAPF().bitcastToAPInt());
- return 0;
+ return nullptr;
}
@@ -216,14 +216,14 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
// In the input is a constant expr, we might be able to recursively simplify.
// If not, we definitely can't do anything.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
- if (CE == 0) return 0;
-
+ if (!CE) return nullptr;
+
switch (CE->getOpcode()) {
- default: return 0;
+ default: return nullptr;
case Instruction::Or: {
Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
- if (RHS == 0)
- return 0;
+ if (!RHS)
+ return nullptr;
// X | -1 -> -1.
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
@@ -231,32 +231,32 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
return RHSC;
Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
- if (LHS == 0)
- return 0;
+ if (!LHS)
+ return nullptr;
return ConstantExpr::getOr(LHS, RHS);
}
case Instruction::And: {
Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
- if (RHS == 0)
- return 0;
+ if (!RHS)
+ return nullptr;
// X & 0 -> 0.
if (RHS->isNullValue())
return RHS;
Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
- if (LHS == 0)
- return 0;
+ if (!LHS)
+ return nullptr;
return ConstantExpr::getAnd(LHS, RHS);
}
case Instruction::LShr: {
ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
- if (Amt == 0)
- return 0;
+ if (!Amt)
+ return nullptr;
unsigned ShAmt = Amt->getZExtValue();
// Cannot analyze non-byte shifts.
if ((ShAmt & 7) != 0)
- return 0;
+ return nullptr;
ShAmt >>= 3;
// If the extract is known to be all zeros, return zero.
@@ -268,17 +268,17 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
return ExtractConstantBytes(CE->getOperand(0), ByteStart+ShAmt, ByteSize);
// TODO: Handle the 'partially zero' case.
- return 0;
+ return nullptr;
}
case Instruction::Shl: {
ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
- if (Amt == 0)
- return 0;
+ if (!Amt)
+ return nullptr;
unsigned ShAmt = Amt->getZExtValue();
// Cannot analyze non-byte shifts.
if ((ShAmt & 7) != 0)
- return 0;
+ return nullptr;
ShAmt >>= 3;
// If the extract is known to be all zeros, return zero.
@@ -290,7 +290,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
return ExtractConstantBytes(CE->getOperand(0), ByteStart-ShAmt, ByteSize);
// TODO: Handle the 'partially zero' case.
- return 0;
+ return nullptr;
}
case Instruction::ZExt: {
@@ -324,7 +324,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
}
// TODO: Handle the 'partially zero' case.
- return 0;
+ return nullptr;
}
}
}
@@ -376,7 +376,7 @@ static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy,
// If there's no interesting folding happening, bail so that we don't create
// a constant that looks like it needs folding but really doesn't.
if (!Folded)
- return 0;
+ return nullptr;
// Base case: Get a regular sizeof expression.
Constant *C = ConstantExpr::getSizeOf(Ty);
@@ -442,7 +442,7 @@ static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy,
// If there's no interesting folding happening, bail so that we don't create
// a constant that looks like it needs folding but really doesn't.
if (!Folded)
- return 0;
+ return nullptr;
// Base case: Get a regular alignof expression.
Constant *C = ConstantExpr::getAlignOf(Ty);
@@ -473,7 +473,7 @@ static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo,
unsigned NumElems = STy->getNumElements();
// An empty struct has no members.
if (NumElems == 0)
- return 0;
+ return nullptr;
// Check for a struct with all members having the same size.
Constant *MemberSize =
getFoldedSizeOf(STy->getElementType(0), DestTy, true);
@@ -497,7 +497,7 @@ static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo,
// If there's no interesting folding happening, bail so that we don't create
// a constant that looks like it needs folding but really doesn't.
if (!Folded)
- return 0;
+ return nullptr;
// Base case: Get a regular offsetof expression.
Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo);
@@ -529,7 +529,10 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
// Try hard to fold cast of cast because they are often eliminable.
if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy))
return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy);
- } else if (CE->getOpcode() == Instruction::GetElementPtr) {
+ } else if (CE->getOpcode() == Instruction::GetElementPtr &&
+ // Do not fold addrspacecast (gep 0, .., 0). It might make the
+ // addrspacecast uncanonicalized.
+ opc != Instruction::AddrSpaceCast) {
// If all of the indexes in the GEP are null values, there is no pointer
// adjustment going on. We might as well cast the source pointer.
bool isAllNull = true;
@@ -582,7 +585,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
APFloat::rmNearestTiesToEven, &ignored);
return ConstantFP::get(V->getContext(), Val);
}
- return 0; // Can't fold.
+ return nullptr; // Can't fold.
case Instruction::FPToUI:
case Instruction::FPToSI:
if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
@@ -595,11 +598,11 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
APInt Val(DestBitWidth, x);
return ConstantInt::get(FPC->getContext(), Val);
}
- return 0; // Can't fold.
+ return nullptr; // Can't fold.
case Instruction::IntToPtr: //always treated as unsigned
if (V->isNullValue()) // Is it an integral null value?
return ConstantPointerNull::get(cast<PointerType>(DestTy));
- return 0; // Other pointer types cannot be casted
+ return nullptr; // Other pointer types cannot be casted
case Instruction::PtrToInt: // always treated as unsigned
// Is it a null pointer value?
if (V->isNullValue())
@@ -643,7 +646,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
}
}
// Other pointer types cannot be casted
- return 0;
+ return nullptr;
case Instruction::UIToFP:
case Instruction::SIToFP:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
@@ -655,21 +658,21 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
APFloat::rmNearestTiesToEven);
return ConstantFP::get(V->getContext(), apf);
}
- return 0;
+ return nullptr;
case Instruction::ZExt:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
return ConstantInt::get(V->getContext(),
CI->getValue().zext(BitWidth));
}
- return 0;
+ return nullptr;
case Instruction::SExt:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
return ConstantInt::get(V->getContext(),
CI->getValue().sext(BitWidth));
}
- return 0;
+ return nullptr;
case Instruction::Trunc: {
uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
@@ -685,12 +688,12 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
if (Constant *Res = ExtractConstantBytes(V, 0, DestBitWidth / 8))
return Res;
- return 0;
+ return nullptr;
}
case Instruction::BitCast:
return FoldBitCast(V, DestTy);
case Instruction::AddrSpaceCast:
- return 0;
+ return nullptr;
}
}
@@ -705,12 +708,21 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
SmallVector<Constant*, 16> Result;
Type *Ty = IntegerType::get(CondV->getContext(), 32);
for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){
- ConstantInt *Cond = dyn_cast<ConstantInt>(CondV->getOperand(i));
- if (Cond == 0) break;
-
- Constant *V = Cond->isNullValue() ? V2 : V1;
- Constant *Res = ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
- Result.push_back(Res);
+ Constant *V;
+ Constant *V1Element = ConstantExpr::getExtractElement(V1,
+ ConstantInt::get(Ty, i));
+ Constant *V2Element = ConstantExpr::getExtractElement(V2,
+ ConstantInt::get(Ty, i));
+ Constant *Cond = dyn_cast<Constant>(CondV->getOperand(i));
+ if (V1Element == V2Element) {
+ V = V1Element;
+ } else if (isa<UndefValue>(Cond)) {
+ V = isa<UndefValue>(V1Element) ? V1Element : V2Element;
+ } else {
+ if (!isa<ConstantInt>(Cond)) break;
+ V = Cond->isNullValue() ? V2Element : V1Element;
+ }
+ Result.push_back(V);
}
// If we were able to build the vector, return it.
@@ -737,7 +749,7 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
}
- return 0;
+ return nullptr;
}
Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
@@ -757,14 +769,14 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
return UndefValue::get(Val->getType()->getVectorElementType());
return Val->getAggregateElement(Index);
}
- return 0;
+ return nullptr;
}
Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
Constant *Elt,
Constant *Idx) {
ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx);
- if (!CIdx) return 0;
+ if (!CIdx) return nullptr;
const APInt &IdxVal = CIdx->getValue();
SmallVector<Constant*, 16> Result;
@@ -794,7 +806,7 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
return UndefValue::get(VectorType::get(EltTy, MaskNumElts));
// Don't break the bitcode reader hack.
- if (isa<ConstantExpr>(Mask)) return 0;
+ if (isa<ConstantExpr>(Mask)) return nullptr;
unsigned SrcNumElts = V1->getType()->getVectorNumElements();
@@ -833,7 +845,7 @@ Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
if (Constant *C = Agg->getAggregateElement(Idxs[0]))
return ConstantFoldExtractValueInstruction(C, Idxs.slice(1));
- return 0;
+ return nullptr;
}
Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
@@ -854,8 +866,8 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
SmallVector<Constant*, 32> Result;
for (unsigned i = 0; i != NumElts; ++i) {
Constant *C = Agg->getAggregateElement(i);
- if (C == 0) return 0;
-
+ if (!C) return nullptr;
+
if (Idxs[0] == i)
C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1));
@@ -1200,7 +1212,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
// We don't know how to fold this.
- return 0;
+ return nullptr;
}
/// isZeroSizedType - This type is zero sized if its an array or structure of
@@ -1280,7 +1292,7 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
if (!isa<ConstantExpr>(V1)) {
if (!isa<ConstantExpr>(V2)) {
// We distilled thisUse the standard constant folder for a few cases
- ConstantInt *R = 0;
+ ConstantInt *R = nullptr;
R = dyn_cast<ConstantInt>(
ConstantExpr::getFCmp(FCmpInst::FCMP_OEQ, V1, V2));
if (R && !R->isZero())
@@ -1322,6 +1334,15 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
return FCmpInst::BAD_FCMP_PREDICATE;
}
+static ICmpInst::Predicate areGlobalsPotentiallyEqual(const GlobalValue *GV1,
+ const GlobalValue *GV2) {
+ // Don't try to decide equality of aliases.
+ if (!isa<GlobalAlias>(GV1) && !isa<GlobalAlias>(GV2))
+ if (!GV1->hasExternalWeakLinkage() || !GV2->hasExternalWeakLinkage())
+ return ICmpInst::ICMP_NE;
+ return ICmpInst::BAD_ICMP_PREDICATE;
+}
+
/// evaluateICmpRelation - This function determines if there is anything we can
/// decide about the two constants provided. This doesn't need to handle simple
/// things like integer comparisons, but should instead handle ConstantExprs
@@ -1346,7 +1367,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
!isa<BlockAddress>(V2)) {
// We distilled this down to a simple case, use the standard constant
// folder.
- ConstantInt *R = 0;
+ ConstantInt *R = nullptr;
ICmpInst::Predicate pred = ICmpInst::ICMP_EQ;
R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
if (R && !R->isZero())
@@ -1383,10 +1404,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
// constant (which, since the types must match, means that it's a
// ConstantPointerNull).
if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
- // Don't try to decide equality of aliases.
- if (!isa<GlobalAlias>(GV) && !isa<GlobalAlias>(GV2))
- if (!GV->hasExternalWeakLinkage() || !GV2->hasExternalWeakLinkage())
- return ICmpInst::ICMP_NE;
+ return areGlobalsPotentiallyEqual(GV, GV2);
} else if (isa<BlockAddress>(V2)) {
return ICmpInst::ICMP_NE; // Globals never equal labels.
} else {
@@ -1451,7 +1469,8 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
}
break;
- case Instruction::GetElementPtr:
+ case Instruction::GetElementPtr: {
+ GEPOperator *CE1GEP = cast<GEPOperator>(CE1);
// Ok, since this is a getelementptr, we know that the constant has a
// pointer type. Check the various cases.
if (isa<ConstantPointerNull>(V2)) {
@@ -1498,7 +1517,8 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
"Surprising getelementptr!");
return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
} else {
- // If they are different globals, we don't know what the value is.
+ if (CE1GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(GV, GV2);
return ICmpInst::BAD_ICMP_PREDICATE;
}
}
@@ -1514,8 +1534,14 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
// By far the most common case to handle is when the base pointers are
// obviously to the same global.
if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
- if (CE1Op0 != CE2Op0) // Don't know relative ordering.
+ // Don't know relative ordering, but check for inequality.
+ if (CE1Op0 != CE2Op0) {
+ GEPOperator *CE2GEP = cast<GEPOperator>(CE2);
+ if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
+ cast<GlobalValue>(CE2Op0));
return ICmpInst::BAD_ICMP_PREDICATE;
+ }
// Ok, we know that both getelementptr instructions are based on the
// same global. From this, we can precisely determine the relative
// ordering of the resultant pointers.
@@ -1561,6 +1587,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
}
}
}
+ }
default:
break;
}
@@ -1876,7 +1903,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
return ConstantExpr::getICmp(pred, C2, C1);
}
}
- return 0;
+ return nullptr;
}
/// isInBoundsIndices - Test whether the given sequence of *normalized* indices
@@ -1942,7 +1969,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
if (isa<UndefValue>(C)) {
PointerType *Ptr = cast<PointerType>(C->getType());
Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs);
- assert(Ty != 0 && "Invalid indices for GEP!");
+ assert(Ty && "Invalid indices for GEP!");
return UndefValue::get(PointerType::get(Ty, Ptr->getAddressSpace()));
}
@@ -1956,7 +1983,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
if (isNull) {
PointerType *Ptr = cast<PointerType>(C->getType());
Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs);
- assert(Ty != 0 && "Invalid indices for GEP!");
+ assert(Ty && "Invalid indices for GEP!");
return ConstantPointerNull::get(PointerType::get(Ty,
Ptr->getAddressSpace()));
}
@@ -1968,7 +1995,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
// getelementptr instructions into a single instruction.
//
if (CE->getOpcode() == Instruction::GetElementPtr) {
- Type *LastTy = 0;
+ Type *LastTy = nullptr;
for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
I != E; ++I)
LastTy = *I;
@@ -2063,7 +2090,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
bool Unknown = false;
SmallVector<Constant *, 8> NewIdxs;
Type *Ty = C->getType();
- Type *Prev = 0;
+ Type *Prev = nullptr;
for (unsigned i = 0, e = Idxs.size(); i != e;
Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
@@ -2121,7 +2148,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
isa<GlobalVariable>(C) && isInBoundsIndices(Idxs))
return ConstantExpr::getInBoundsGetElementPtr(C, Idxs);
- return 0;
+ return nullptr;
}
Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
diff --git a/contrib/llvm/lib/Support/ConstantRange.cpp b/contrib/llvm/lib/IR/ConstantRange.cpp
index 265b6e9..f8e9ba4 100644
--- a/contrib/llvm/lib/Support/ConstantRange.cpp
+++ b/contrib/llvm/lib/IR/ConstantRange.cpp
@@ -22,7 +22,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/InstrTypes.h"
-#include "llvm/Support/ConstantRange.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -39,10 +39,10 @@ ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) {
/// Initialize a range to hold the single specified value.
///
ConstantRange::ConstantRange(APIntMoveTy V)
- : Lower(llvm_move(V)), Upper(Lower + 1) {}
+ : Lower(std::move(V)), Upper(Lower + 1) {}
ConstantRange::ConstantRange(APIntMoveTy L, APIntMoveTy U)
- : Lower(llvm_move(L)), Upper(llvm_move(U)) {
+ : Lower(std::move(L)), Upper(std::move(U)) {
assert(Lower.getBitWidth() == Upper.getBitWidth() &&
"ConstantRange with unequal bit widths");
assert((Lower != Upper || (Lower.isMaxValue() || Lower.isMinValue())) &&
diff --git a/contrib/llvm/lib/IR/Constants.cpp b/contrib/llvm/lib/IR/Constants.cpp
index 690ac59..b815936a 100644
--- a/contrib/llvm/lib/IR/Constants.cpp
+++ b/contrib/llvm/lib/IR/Constants.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
@@ -28,7 +29,6 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -107,6 +107,28 @@ bool Constant::isAllOnesValue() const {
return false;
}
+bool Constant::isMinSignedValue() const {
+ // Check for INT_MIN integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMinValue(/*isSigned=*/true);
+
+ // Check for FP which are bitcasted from INT_MIN integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
+
+ // Check for constant vectors which are splats of INT_MIN values.
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isMinSignedValue();
+
+ // Check for constant vectors which are splats of INT_MIN values.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isMinSignedValue();
+
+ return false;
+}
+
// Constructor to create a '0' constant of arbitrary type...
Constant *Constant::getNullValue(Type *Ty) {
switch (Ty->getTypeID()) {
@@ -182,13 +204,13 @@ Constant *Constant::getAllOnesValue(Type *Ty) {
/// 'this' is a constant expr.
Constant *Constant::getAggregateElement(unsigned Elt) const {
if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(this))
- return Elt < CS->getNumOperands() ? CS->getOperand(Elt) : 0;
+ return Elt < CS->getNumOperands() ? CS->getOperand(Elt) : nullptr;
if (const ConstantArray *CA = dyn_cast<ConstantArray>(this))
- return Elt < CA->getNumOperands() ? CA->getOperand(Elt) : 0;
+ return Elt < CA->getNumOperands() ? CA->getOperand(Elt) : nullptr;
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
- return Elt < CV->getNumOperands() ? CV->getOperand(Elt) : 0;
+ return Elt < CV->getNumOperands() ? CV->getOperand(Elt) : nullptr;
if (const ConstantAggregateZero *CAZ =dyn_cast<ConstantAggregateZero>(this))
return CAZ->getElementValue(Elt);
@@ -197,15 +219,16 @@ Constant *Constant::getAggregateElement(unsigned Elt) const {
return UV->getElementValue(Elt);
if (const ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(this))
- return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt) : 0;
- return 0;
+ return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt)
+ : nullptr;
+ return nullptr;
}
Constant *Constant::getAggregateElement(Constant *Elt) const {
assert(isa<IntegerType>(Elt->getType()) && "Index must be an integer");
if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt))
return getAggregateElement(CI->getZExtValue());
- return 0;
+ return nullptr;
}
@@ -218,7 +241,7 @@ void Constant::destroyConstantImpl() {
// Constants) that they are, in fact, invalid now and should be deleted.
//
while (!use_empty()) {
- Value *V = use_back();
+ Value *V = user_back();
#ifndef NDEBUG // Only in -g mode...
if (!isa<Constant>(V)) {
dbgs() << "While deleting: " << *this
@@ -230,7 +253,7 @@ void Constant::destroyConstantImpl() {
cast<Constant>(V)->destroyConstant();
// The constant should remove itself from our use list...
- assert((use_empty() || use_back() != V) && "Constant not removed!");
+ assert((use_empty() || user_back() != V) && "Constant not removed!");
}
// Value has no outstanding references it is safe to delete it now...
@@ -277,39 +300,52 @@ bool Constant::canTrap() const {
return canTrapImpl(this, NonTrappingOps);
}
-/// isThreadDependent - Return true if the value can vary between threads.
-bool Constant::isThreadDependent() const {
- SmallPtrSet<const Constant*, 64> Visited;
- SmallVector<const Constant*, 64> WorkList;
- WorkList.push_back(this);
- Visited.insert(this);
+/// Check if C contains a GlobalValue for which Predicate is true.
+static bool
+ConstHasGlobalValuePredicate(const Constant *C,
+ bool (*Predicate)(const GlobalValue *)) {
+ SmallPtrSet<const Constant *, 8> Visited;
+ SmallVector<const Constant *, 8> WorkList;
+ WorkList.push_back(C);
+ Visited.insert(C);
while (!WorkList.empty()) {
- const Constant *C = WorkList.pop_back_val();
-
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
- if (GV->isThreadLocal())
+ const Constant *WorkItem = WorkList.pop_back_val();
+ if (const auto *GV = dyn_cast<GlobalValue>(WorkItem))
+ if (Predicate(GV))
return true;
- }
-
- for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) {
- const Constant *D = dyn_cast<Constant>(C->getOperand(I));
- if (!D)
+ for (const Value *Op : WorkItem->operands()) {
+ const Constant *ConstOp = dyn_cast<Constant>(Op);
+ if (!ConstOp)
continue;
- if (Visited.insert(D))
- WorkList.push_back(D);
+ if (Visited.insert(ConstOp))
+ WorkList.push_back(ConstOp);
}
}
-
return false;
}
-/// isConstantUsed - Return true if the constant has users other than constant
-/// exprs and other dangling things.
+/// Return true if the value can vary between threads.
+bool Constant::isThreadDependent() const {
+ auto DLLImportPredicate = [](const GlobalValue *GV) {
+ return GV->isThreadLocal();
+ };
+ return ConstHasGlobalValuePredicate(this, DLLImportPredicate);
+}
+
+bool Constant::isDLLImportDependent() const {
+ auto DLLImportPredicate = [](const GlobalValue *GV) {
+ return GV->hasDLLImportStorageClass();
+ };
+ return ConstHasGlobalValuePredicate(this, DLLImportPredicate);
+}
+
+/// Return true if the constant has users other than constant exprs and other
+/// dangling things.
bool Constant::isConstantUsed() const {
- for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
- const Constant *UC = dyn_cast<Constant>(*UI);
- if (UC == 0 || isa<GlobalValue>(UC))
+ for (const User *U : users()) {
+ const Constant *UC = dyn_cast<Constant>(U);
+ if (!UC || isa<GlobalValue>(UC))
return true;
if (UC->isConstantUsed())
@@ -377,7 +413,7 @@ static bool removeDeadUsersOfConstant(const Constant *C) {
if (isa<GlobalValue>(C)) return false; // Cannot remove this
while (!C->use_empty()) {
- const Constant *User = dyn_cast<Constant>(C->use_back());
+ const Constant *User = dyn_cast<Constant>(C->user_back());
if (!User) return false; // Non-constant usage;
if (!removeDeadUsersOfConstant(User))
return false; // Constant wasn't dead
@@ -393,11 +429,11 @@ static bool removeDeadUsersOfConstant(const Constant *C) {
/// that want to check to see if a global is unused, but don't want to deal
/// with potentially dead constants hanging off of the globals.
void Constant::removeDeadConstantUsers() const {
- Value::const_use_iterator I = use_begin(), E = use_end();
- Value::const_use_iterator LastNonDeadUser = E;
+ Value::const_user_iterator I = user_begin(), E = user_end();
+ Value::const_user_iterator LastNonDeadUser = E;
while (I != E) {
const Constant *User = dyn_cast<Constant>(*I);
- if (User == 0) {
+ if (!User) {
LastNonDeadUser = I;
++I;
continue;
@@ -413,7 +449,7 @@ void Constant::removeDeadConstantUsers() const {
// If the constant was dead, then the iterator is invalidated.
if (LastNonDeadUser == E) {
- I = use_begin();
+ I = user_begin();
if (I == E) break;
} else {
I = LastNonDeadUser;
@@ -431,7 +467,7 @@ void Constant::removeDeadConstantUsers() const {
void ConstantInt::anchor() { }
ConstantInt::ConstantInt(IntegerType *Ty, const APInt& V)
- : Constant(Ty, ConstantIntVal, 0, 0), Val(V) {
+ : Constant(Ty, ConstantIntVal, nullptr, 0), Val(V) {
assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
}
@@ -584,23 +620,21 @@ Constant *ConstantFP::get(Type *Ty, StringRef Str) {
return C;
}
+Constant *ConstantFP::getNegativeZero(Type *Ty) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ APFloat NegZero = APFloat::getZero(Semantics, /*Negative=*/true);
+ Constant *C = get(Ty->getContext(), NegZero);
-ConstantFP *ConstantFP::getNegativeZero(Type *Ty) {
- LLVMContext &Context = Ty->getContext();
- APFloat apf = cast<ConstantFP>(Constant::getNullValue(Ty))->getValueAPF();
- apf.changeSign();
- return get(Context, apf);
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
}
Constant *ConstantFP::getZeroValueForNegation(Type *Ty) {
- Type *ScalarTy = Ty->getScalarType();
- if (ScalarTy->isFloatingPointTy()) {
- Constant *C = getNegativeZero(ScalarTy);
- if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- return ConstantVector::getSplat(VTy->getNumElements(), C);
- return C;
- }
+ if (Ty->isFPOrFPVectorTy())
+ return getNegativeZero(Ty);
return Constant::getNullValue(Ty);
}
@@ -635,14 +669,18 @@ ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
return Slot;
}
-ConstantFP *ConstantFP::getInfinity(Type *Ty, bool Negative) {
- const fltSemantics &Semantics = *TypeToFloatSemantics(Ty);
- return ConstantFP::get(Ty->getContext(),
- APFloat::getInf(Semantics, Negative));
+Constant *ConstantFP::getInfinity(Type *Ty, bool Negative) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ Constant *C = get(Ty->getContext(), APFloat::getInf(Semantics, Negative));
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
}
ConstantFP::ConstantFP(Type *Ty, const APFloat& V)
- : Constant(Ty, ConstantFPVal, 0, 0), Val(V) {
+ : Constant(Ty, ConstantFPVal, nullptr, 0), Val(V) {
assert(&V.getSemantics() == TypeToFloatSemantics(Ty) &&
"FP type Mismatch");
}
@@ -1045,7 +1083,7 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
if (getOpcode() != Instruction::GetElementPtr) return false;
gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
- User::const_op_iterator OI = llvm::next(this->op_begin());
+ User::const_op_iterator OI = std::next(this->op_begin());
// Skip the first index, as it has no static limit.
++GEPI;
@@ -1233,7 +1271,7 @@ ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) {
"Cannot create an aggregate zero of non-aggregate type!");
ConstantAggregateZero *&Entry = Ty->getContext().pImpl->CAZConstants[Ty];
- if (Entry == 0)
+ if (!Entry)
Entry = new ConstantAggregateZero(Ty);
return Entry;
@@ -1281,7 +1319,7 @@ Constant *Constant::getSplatValue() const {
return CV->getSplatValue();
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
return CV->getSplatValue();
- return 0;
+ return nullptr;
}
/// getSplatValue - If this is a splat constant, where all of the
@@ -1292,7 +1330,7 @@ Constant *ConstantVector::getSplatValue() const {
// Then make sure all remaining elements point to the same value.
for (unsigned I = 1, E = getNumOperands(); I < E; ++I)
if (getOperand(I) != Elt)
- return 0;
+ return nullptr;
return Elt;
}
@@ -1313,7 +1351,7 @@ const APInt &Constant::getUniqueInteger() const {
ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) {
ConstantPointerNull *&Entry = Ty->getContext().pImpl->CPNConstants[Ty];
- if (Entry == 0)
+ if (!Entry)
Entry = new ConstantPointerNull(Ty);
return Entry;
@@ -1333,7 +1371,7 @@ void ConstantPointerNull::destroyConstant() {
UndefValue *UndefValue::get(Type *Ty) {
UndefValue *&Entry = Ty->getContext().pImpl->UVConstants[Ty];
- if (Entry == 0)
+ if (!Entry)
Entry = new UndefValue(Ty);
return Entry;
@@ -1351,14 +1389,14 @@ void UndefValue::destroyConstant() {
//
BlockAddress *BlockAddress::get(BasicBlock *BB) {
- assert(BB->getParent() != 0 && "Block must have a parent");
+ assert(BB->getParent() && "Block must have a parent");
return get(BB->getParent(), BB);
}
BlockAddress *BlockAddress::get(Function *F, BasicBlock *BB) {
BlockAddress *&BA =
F->getContext().pImpl->BlockAddresses[std::make_pair(F, BB)];
- if (BA == 0)
+ if (!BA)
BA = new BlockAddress(F, BB);
assert(BA->getFunction() == F && "Basic block moved between functions");
@@ -1373,6 +1411,17 @@ BlockAddress::BlockAddress(Function *F, BasicBlock *BB)
BB->AdjustBlockAddressRefCount(1);
}
+BlockAddress *BlockAddress::lookup(const BasicBlock *BB) {
+ if (!BB->hasAddressTaken())
+ return nullptr;
+
+ const Function *F = BB->getParent();
+ assert(F && "Block must have a parent");
+ BlockAddress *BA =
+ F->getContext().pImpl->BlockAddresses.lookup(std::make_pair(F, BB));
+ assert(BA && "Refcount and block address map disagree!");
+ return BA;
+}
// destroyConstant - Remove the constant from the constant table.
//
@@ -1398,7 +1447,7 @@ void BlockAddress::replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) {
// and return early.
BlockAddress *&NewBA =
getContext().pImpl->BlockAddresses[std::make_pair(NewF, NewBB)];
- if (NewBA == 0) {
+ if (!NewBA) {
getBasicBlock()->AdjustBlockAddressRefCount(-1);
// Remove the old entry, this can't cause the map to rehash (just a
@@ -1684,6 +1733,19 @@ Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy) {
assert(CastInst::castIsValid(Instruction::AddrSpaceCast, C, DstTy) &&
"Invalid constantexpr addrspacecast!");
+ // Canonicalize addrspacecasts between different pointer types by first
+ // bitcasting the pointer type and then converting the address space.
+ PointerType *SrcScalarTy = cast<PointerType>(C->getType()->getScalarType());
+ PointerType *DstScalarTy = cast<PointerType>(DstTy->getScalarType());
+ Type *DstElemTy = DstScalarTy->getElementType();
+ if (SrcScalarTy->getElementType() != DstElemTy) {
+ Type *MidTy = PointerType::get(DstElemTy, SrcScalarTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(DstTy)) {
+ // Handle vectors of pointers.
+ MidTy = VectorType::get(MidTy, VT->getNumElements());
+ }
+ C = getBitCast(C, MidTy);
+ }
return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy);
}
@@ -1779,7 +1841,7 @@ Constant *ConstantExpr::getAlignOf(Type* Ty) {
// Note that a non-inbounds gep is used, as null isn't within any object.
Type *AligningTy =
StructType::get(Type::getInt1Ty(Ty->getContext()), Ty, NULL);
- Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo());
+ Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo(0));
Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0);
Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *Indices[2] = { Zero, One };
@@ -1923,8 +1985,8 @@ ConstantExpr::getFCmp(unsigned short pred, Constant *LHS, Constant *RHS) {
Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx) {
assert(Val->getType()->isVectorTy() &&
"Tried to create extractelement operation on non-vector type!");
- assert(Idx->getType()->isIntegerTy(32) &&
- "Extractelement index must be i32 type!");
+ assert(Idx->getType()->isIntegerTy() &&
+ "Extractelement index must be an integer type!");
if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx))
return FC; // Fold a few common cases.
@@ -1944,7 +2006,7 @@ Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt,
"Tried to create insertelement operation on non-vector type!");
assert(Elt->getType() == Val->getType()->getVectorElementType() &&
"Insertelement types must match!");
- assert(Idx->getType()->isIntegerTy(32) &&
+ assert(Idx->getType()->isIntegerTy() &&
"Insertelement index must be i32 type!");
if (Constant *FC = ConstantFoldInsertElementInstruction(Val, Elt, Idx))
@@ -2132,7 +2194,7 @@ Constant *ConstantExpr::getBinOpIdentity(unsigned Opcode, Type *Ty) {
switch (Opcode) {
default:
// Doesn't have an identity.
- return 0;
+ return nullptr;
case Instruction::Add:
case Instruction::Or:
@@ -2155,7 +2217,7 @@ Constant *ConstantExpr::getBinOpAbsorber(unsigned Opcode, Type *Ty) {
switch (Opcode) {
default:
// Doesn't have an absorber.
- return 0;
+ return nullptr;
case Instruction::Or:
return Constant::getAllOnesValue(Ty);
@@ -2272,7 +2334,7 @@ Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
// of i8, or a 1-element array of i32. They'll both end up in the same
/// StringMap bucket, linked up by their Next pointers. Walk the list.
ConstantDataSequential **Entry = &Slot.getValue();
- for (ConstantDataSequential *Node = *Entry; Node != 0;
+ for (ConstantDataSequential *Node = *Entry; Node;
Entry = &Node->Next, Node = *Entry)
if (Node->getType() == Ty)
return Node;
@@ -2299,7 +2361,7 @@ void ConstantDataSequential::destroyConstant() {
ConstantDataSequential **Entry = &Slot->getValue();
// Remove the entry from the hash table.
- if ((*Entry)->Next == 0) {
+ if (!(*Entry)->Next) {
// If there is only one value in the bucket (common case) it must be this
// entry, and removing the entry should remove the bucket completely.
assert((*Entry) == this && "Hash mismatch in ConstantDataSequential");
@@ -2320,7 +2382,7 @@ void ConstantDataSequential::destroyConstant() {
// If we were part of a list, make sure that we don't delete the list that is
// still owned by the uniquing map.
- Next = 0;
+ Next = nullptr;
// Finally, actually delete it.
destroyConstantImpl();
@@ -2548,7 +2610,7 @@ Constant *ConstantDataVector::getSplatValue() const {
unsigned EltSize = getElementByteSize();
for (unsigned i = 1, e = getNumElements(); i != e; ++i)
if (memcmp(Base, Base+i*EltSize, EltSize))
- return 0;
+ return nullptr;
// If they're all the same, return the 0th one as a representative.
return getElementAsConstant(0);
@@ -2596,7 +2658,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
AllSame &= Val == ToC;
}
- Constant *Replacement = 0;
+ Constant *Replacement = nullptr;
if (AllSame && ToC->isNullValue()) {
Replacement = ConstantAggregateZero::get(getType());
} else if (AllSame && isa<UndefValue>(ToC)) {
@@ -2682,7 +2744,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
LLVMContextImpl *pImpl = getContext().pImpl;
- Constant *Replacement = 0;
+ Constant *Replacement = nullptr;
if (isAllZeros) {
Replacement = ConstantAggregateZero::get(getType());
} else if (isAllUndef) {
@@ -2781,6 +2843,7 @@ Instruction *ConstantExpr::getAsInstruction() {
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
return CastInst::Create((Instruction::CastOps)getOpcode(),
Ops[0], getType());
case Instruction::Select:
diff --git a/contrib/llvm/lib/IR/ConstantsContext.h b/contrib/llvm/lib/IR/ConstantsContext.h
index 32bed95..f06509f 100644
--- a/contrib/llvm/lib/IR/ConstantsContext.h
+++ b/contrib/llvm/lib/IR/ConstantsContext.h
@@ -24,6 +24,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
+#include <tuple>
+
+#define DEBUG_TYPE "ir"
namespace llvm {
template<class ValType>
@@ -32,7 +35,7 @@ struct ConstantTraits;
/// UnaryConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement unary constant exprs.
class UnaryConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
@@ -49,7 +52,7 @@ public:
/// BinaryConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement binary constant exprs.
class BinaryConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly two operands
@@ -70,7 +73,7 @@ public:
/// SelectConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement select constant exprs.
class SelectConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly three operands
@@ -91,7 +94,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// extractelement constant exprs.
class ExtractElementConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly two operands
@@ -112,7 +115,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// insertelement constant exprs.
class InsertElementConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly three operands
@@ -134,7 +137,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// shufflevector constant exprs.
class ShuffleVectorConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly three operands
@@ -159,7 +162,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// extractvalue constant exprs.
class ExtractValueConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
@@ -185,7 +188,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// insertvalue constant exprs.
class InsertValueConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
@@ -212,7 +215,7 @@ public:
/// GetElementPtrConstantExpr - This class is private to Constants.cpp, and is
/// used behind the scenes to implement getelementpr constant exprs.
class GetElementPtrConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
GetElementPtrConstantExpr(Constant *C, ArrayRef<Constant*> IdxList,
Type *DestTy);
public:
@@ -233,7 +236,7 @@ public:
// behind the scenes to implement ICmp and FCmp constant expressions. This is
// needed in order to store the predicate value for these instructions.
class CompareConstantExpr : public ConstantExpr {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly two operands
@@ -334,14 +337,10 @@ struct ExprMapKeyType {
this->indices == that.indices;
}
bool operator<(const ExprMapKeyType & that) const {
- if (this->opcode != that.opcode) return this->opcode < that.opcode;
- if (this->operands != that.operands) return this->operands < that.operands;
- if (this->subclassdata != that.subclassdata)
- return this->subclassdata < that.subclassdata;
- if (this->subclassoptionaldata != that.subclassoptionaldata)
- return this->subclassoptionaldata < that.subclassoptionaldata;
- if (this->indices != that.indices) return this->indices < that.indices;
- return false;
+ return std::tie(opcode, operands, subclassdata, subclassoptionaldata,
+ indices) <
+ std::tie(that.opcode, that.operands, that.subclassdata,
+ that.subclassoptionaldata, that.indices);
}
bool operator!=(const ExprMapKeyType& that) const {
@@ -369,17 +368,10 @@ struct InlineAsmKeyType {
this->asm_dialect == that.asm_dialect;
}
bool operator<(const InlineAsmKeyType& that) const {
- if (this->asm_string != that.asm_string)
- return this->asm_string < that.asm_string;
- if (this->constraints != that.constraints)
- return this->constraints < that.constraints;
- if (this->has_side_effects != that.has_side_effects)
- return this->has_side_effects < that.has_side_effects;
- if (this->is_align_stack != that.is_align_stack)
- return this->is_align_stack < that.is_align_stack;
- if (this->asm_dialect != that.asm_dialect)
- return this->asm_dialect < that.asm_dialect;
- return false;
+ return std::tie(asm_string, constraints, has_side_effects, is_align_stack,
+ asm_dialect) <
+ std::tie(that.asm_string, that.constraints, that.has_side_effects,
+ that.is_align_stack, that.asm_dialect);
}
bool operator!=(const InlineAsmKeyType& that) const {
@@ -595,7 +587,7 @@ public:
/// necessary.
ConstantClass *getOrCreate(TypeClass *Ty, ValRefType V) {
MapKey Lookup(Ty, V);
- ConstantClass* Result = 0;
+ ConstantClass* Result = nullptr;
typename MapTy::iterator I = Map.find(Lookup);
// Is it in the map?
@@ -731,7 +723,7 @@ public:
/// necessary.
ConstantClass *getOrCreate(TypeClass *Ty, Operands V) {
LookupKey Lookup(Ty, V);
- ConstantClass* Result = 0;
+ ConstantClass* Result = nullptr;
typename MapTy::iterator I = Map.find_as(Lookup);
// Is it in the map?
diff --git a/contrib/llvm/lib/IR/Core.cpp b/contrib/llvm/lib/IR/Core.cpp
index c70f459..87099a6 100644
--- a/contrib/llvm/lib/IR/Core.cpp
+++ b/contrib/llvm/lib/IR/Core.cpp
@@ -15,37 +15,41 @@
#include "llvm-c/Core.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/PassManager.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdlib>
#include <cstring>
+#include <system_error>
using namespace llvm;
+#define DEBUG_TYPE "ir"
+
void llvm::initializeCore(PassRegistry &Registry) {
- initializeDominatorTreePass(Registry);
- initializePrintModulePassPass(Registry);
- initializePrintFunctionPassPass(Registry);
+ initializeDominatorTreeWrapperPassPass(Registry);
+ initializePrintModulePassWrapperPass(Registry);
+ initializePrintFunctionPassWrapperPass(Registry);
initializePrintBasicBlockPassPass(Registry);
- initializeVerifierPass(Registry);
- initializePreVerifierPass(Registry);
+ initializeVerifierLegacyPassPass(Registry);
}
void LLVMInitializeCore(LLVMPassRegistryRef R) {
@@ -77,6 +81,21 @@ LLVMContextRef LLVMGetGlobalContext() {
return wrap(&getGlobalContext());
}
+void LLVMContextSetDiagnosticHandler(LLVMContextRef C,
+ LLVMDiagnosticHandler Handler,
+ void *DiagnosticContext) {
+ unwrap(C)->setDiagnosticHandler(
+ LLVM_EXTENSION reinterpret_cast<LLVMContext::DiagnosticHandlerTy>(Handler),
+ DiagnosticContext);
+}
+
+void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
+ void *OpaqueHandle) {
+ auto YieldCallback =
+ LLVM_EXTENSION reinterpret_cast<LLVMContext::YieldCallbackTy>(Callback);
+ unwrap(C)->setYieldCallback(YieldCallback, OpaqueHandle);
+}
+
void LLVMContextDispose(LLVMContextRef C) {
delete unwrap(C);
}
@@ -90,6 +109,40 @@ unsigned LLVMGetMDKindID(const char* Name, unsigned SLen) {
return LLVMGetMDKindIDInContext(LLVMGetGlobalContext(), Name, SLen);
}
+char *LLVMGetDiagInfoDescription(LLVMDiagnosticInfoRef DI) {
+ std::string MsgStorage;
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+
+ unwrap(DI)->print(DP);
+ Stream.flush();
+
+ return LLVMCreateMessage(MsgStorage.c_str());
+}
+
+LLVMDiagnosticSeverity LLVMGetDiagInfoSeverity(LLVMDiagnosticInfoRef DI){
+ LLVMDiagnosticSeverity severity;
+
+ switch(unwrap(DI)->getSeverity()) {
+ default:
+ severity = LLVMDSError;
+ break;
+ case DS_Warning:
+ severity = LLVMDSWarning;
+ break;
+ case DS_Remark:
+ severity = LLVMDSRemark;
+ break;
+ case DS_Note:
+ severity = LLVMDSNote;
+ break;
+ }
+
+ return severity;
+}
+
+
+
/*===-- Operations on modules ---------------------------------------------===*/
@@ -108,7 +161,7 @@ void LLVMDisposeModule(LLVMModuleRef M) {
/*--.. Data layout .........................................................--*/
const char * LLVMGetDataLayout(LLVMModuleRef M) {
- return unwrap(M)->getDataLayout().c_str();
+ return unwrap(M)->getDataLayoutStr().c_str();
}
void LLVMSetDataLayout(LLVMModuleRef M, const char *Triple) {
@@ -131,13 +184,13 @@ void LLVMDumpModule(LLVMModuleRef M) {
LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
char **ErrorMessage) {
std::string error;
- raw_fd_ostream dest(Filename, error);
+ raw_fd_ostream dest(Filename, error, sys::fs::F_Text);
if (!error.empty()) {
*ErrorMessage = strdup(error.c_str());
return true;
}
- unwrap(M)->print(dest, NULL);
+ unwrap(M)->print(dest, nullptr);
if (!error.empty()) {
*ErrorMessage = strdup(error.c_str());
@@ -151,7 +204,7 @@ char *LLVMPrintModuleToString(LLVMModuleRef M) {
std::string buf;
raw_string_ostream os(buf);
- unwrap(M)->print(os, NULL);
+ unwrap(M)->print(os, nullptr);
os.flush();
return strdup(buf.c_str());
@@ -175,7 +228,6 @@ LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) {
LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
switch (unwrap(Ty)->getTypeID()) {
- default: llvm_unreachable("Unhandled TypeID.");
case Type::VoidTyID:
return LLVMVoidTypeKind;
case Type::HalfTyID:
@@ -209,6 +261,7 @@ LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
case Type::X86_MMXTyID:
return LLVMX86_MMXTypeKind;
}
+ llvm_unreachable("Unhandled TypeID.");
}
LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty)
@@ -228,7 +281,11 @@ char *LLVMPrintTypeToString(LLVMTypeRef Ty) {
std::string buf;
raw_string_ostream os(buf);
- unwrap(Ty)->print(os);
+ if (unwrap(Ty))
+ unwrap(Ty)->print(os);
+ else
+ os << "Printing <null> Type";
+
os.flush();
return strdup(buf.c_str());
@@ -375,7 +432,7 @@ const char *LLVMGetStructName(LLVMTypeRef Ty)
{
StructType *Type = unwrap<StructType>(Ty);
if (!Type->hasName())
- return 0;
+ return nullptr;
return Type->getName().data();
}
@@ -478,7 +535,11 @@ char* LLVMPrintValueToString(LLVMValueRef Val) {
std::string buf;
raw_string_ostream os(buf);
- unwrap(Val)->print(os);
+ if (unwrap(Val))
+ unwrap(Val)->print(os);
+ else
+ os << "Printing <null> Value";
+
os.flush();
return strdup(buf.c_str());
@@ -497,7 +558,8 @@ LLVMValueRef LLVMGetMetadata(LLVMValueRef Inst, unsigned KindID) {
}
void LLVMSetMetadata(LLVMValueRef Inst, unsigned KindID, LLVMValueRef MD) {
- unwrap<Instruction>(Inst)->setMetadata(KindID, MD? unwrap<MDNode>(MD) : NULL);
+ unwrap<Instruction>(Inst)->setMetadata(KindID,
+ MD ? unwrap<MDNode>(MD) : nullptr);
}
/*--.. Conversion functions ................................................--*/
@@ -514,15 +576,15 @@ LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val) {
Value *V = unwrap(Val);
Value::use_iterator I = V->use_begin();
if (I == V->use_end())
- return 0;
- return wrap(&(I.getUse()));
+ return nullptr;
+ return wrap(&*I);
}
LLVMUseRef LLVMGetNextUse(LLVMUseRef U) {
Use *Next = unwrap(U)->getNext();
if (Next)
return wrap(Next);
- return 0;
+ return nullptr;
}
LLVMValueRef LLVMGetUser(LLVMUseRef U) {
@@ -612,7 +674,7 @@ const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) {
return S->getString().data();
}
*Len = 0;
- return 0;
+ return nullptr;
}
unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V)
@@ -651,7 +713,7 @@ void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name,
NamedMDNode *N = unwrap(M)->getOrInsertNamedMetadata(name);
if (!N)
return;
- MDNode *Op = Val ? unwrap<MDNode>(Val) : NULL;
+ MDNode *Op = Val ? unwrap<MDNode>(Val) : nullptr;
if (Op)
N->addOperand(Op);
}
@@ -1160,14 +1222,6 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMInternalLinkage;
case GlobalValue::PrivateLinkage:
return LLVMPrivateLinkage;
- case GlobalValue::LinkerPrivateLinkage:
- return LLVMLinkerPrivateLinkage;
- case GlobalValue::LinkerPrivateWeakLinkage:
- return LLVMLinkerPrivateWeakLinkage;
- case GlobalValue::DLLImportLinkage:
- return LLVMDLLImportLinkage;
- case GlobalValue::DLLExportLinkage:
- return LLVMDLLExportLinkage;
case GlobalValue::ExternalWeakLinkage:
return LLVMExternalWeakLinkage;
case GlobalValue::CommonLinkage:
@@ -1213,16 +1267,18 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
GV->setLinkage(GlobalValue::PrivateLinkage);
break;
case LLVMLinkerPrivateLinkage:
- GV->setLinkage(GlobalValue::LinkerPrivateLinkage);
+ GV->setLinkage(GlobalValue::PrivateLinkage);
break;
case LLVMLinkerPrivateWeakLinkage:
- GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage);
+ GV->setLinkage(GlobalValue::PrivateLinkage);
break;
case LLVMDLLImportLinkage:
- GV->setLinkage(GlobalValue::DLLImportLinkage);
+ DEBUG(errs()
+ << "LLVMSetLinkage(): LLVMDLLImportLinkage is no longer supported.");
break;
case LLVMDLLExportLinkage:
- GV->setLinkage(GlobalValue::DLLExportLinkage);
+ DEBUG(errs()
+ << "LLVMSetLinkage(): LLVMDLLExportLinkage is no longer supported.");
break;
case LLVMExternalWeakLinkage:
GV->setLinkage(GlobalValue::ExternalWeakLinkage);
@@ -1238,11 +1294,11 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
}
const char *LLVMGetSection(LLVMValueRef Global) {
- return unwrap<GlobalValue>(Global)->getSection().c_str();
+ return unwrap<GlobalValue>(Global)->getSection();
}
void LLVMSetSection(LLVMValueRef Global, const char *Section) {
- unwrap<GlobalValue>(Global)->setSection(Section);
+ unwrap<GlobalObject>(Global)->setSection(Section);
}
LLVMVisibility LLVMGetVisibility(LLVMValueRef Global) {
@@ -1255,45 +1311,70 @@ void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz) {
->setVisibility(static_cast<GlobalValue::VisibilityTypes>(Viz));
}
+LLVMDLLStorageClass LLVMGetDLLStorageClass(LLVMValueRef Global) {
+ return static_cast<LLVMDLLStorageClass>(
+ unwrap<GlobalValue>(Global)->getDLLStorageClass());
+}
+
+void LLVMSetDLLStorageClass(LLVMValueRef Global, LLVMDLLStorageClass Class) {
+ unwrap<GlobalValue>(Global)->setDLLStorageClass(
+ static_cast<GlobalValue::DLLStorageClassTypes>(Class));
+}
+
+LLVMBool LLVMHasUnnamedAddr(LLVMValueRef Global) {
+ return unwrap<GlobalValue>(Global)->hasUnnamedAddr();
+}
+
+void LLVMSetUnnamedAddr(LLVMValueRef Global, LLVMBool HasUnnamedAddr) {
+ unwrap<GlobalValue>(Global)->setUnnamedAddr(HasUnnamedAddr);
+}
+
/*--.. Operations on global variables, load and store instructions .........--*/
unsigned LLVMGetAlignment(LLVMValueRef V) {
Value *P = unwrap<Value>(V);
if (GlobalValue *GV = dyn_cast<GlobalValue>(P))
return GV->getAlignment();
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
+ return AI->getAlignment();
if (LoadInst *LI = dyn_cast<LoadInst>(P))
return LI->getAlignment();
if (StoreInst *SI = dyn_cast<StoreInst>(P))
return SI->getAlignment();
- llvm_unreachable("only GlobalValue, LoadInst and StoreInst have alignment");
+ llvm_unreachable(
+ "only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");
}
void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) {
Value *P = unwrap<Value>(V);
- if (GlobalValue *GV = dyn_cast<GlobalValue>(P))
+ if (GlobalObject *GV = dyn_cast<GlobalObject>(P))
GV->setAlignment(Bytes);
+ else if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
+ AI->setAlignment(Bytes);
else if (LoadInst *LI = dyn_cast<LoadInst>(P))
LI->setAlignment(Bytes);
else if (StoreInst *SI = dyn_cast<StoreInst>(P))
SI->setAlignment(Bytes);
else
- llvm_unreachable("only GlobalValue, LoadInst and StoreInst have alignment");
+ llvm_unreachable(
+ "only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");
}
/*--.. Operations on global variables ......................................--*/
LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name) {
return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
- GlobalValue::ExternalLinkage, 0, Name));
+ GlobalValue::ExternalLinkage, nullptr, Name));
}
LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty,
const char *Name,
unsigned AddressSpace) {
return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
- GlobalValue::ExternalLinkage, 0, Name, 0,
- GlobalVariable::NotThreadLocal, AddressSpace));
+ GlobalValue::ExternalLinkage, nullptr, Name,
+ nullptr, GlobalVariable::NotThreadLocal,
+ AddressSpace));
}
LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name) {
@@ -1304,7 +1385,7 @@ LLVMValueRef LLVMGetFirstGlobal(LLVMModuleRef M) {
Module *Mod = unwrap(M);
Module::global_iterator I = Mod->global_begin();
if (I == Mod->global_end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1312,7 +1393,7 @@ LLVMValueRef LLVMGetLastGlobal(LLVMModuleRef M) {
Module *Mod = unwrap(M);
Module::global_iterator I = Mod->global_end();
if (I == Mod->global_begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1320,7 +1401,7 @@ LLVMValueRef LLVMGetNextGlobal(LLVMValueRef GlobalVar) {
GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
Module::global_iterator I = GV;
if (++I == GV->getParent()->global_end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1328,7 +1409,7 @@ LLVMValueRef LLVMGetPreviousGlobal(LLVMValueRef GlobalVar) {
GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
Module::global_iterator I = GV;
if (I == GV->getParent()->global_begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1339,7 +1420,7 @@ void LLVMDeleteGlobal(LLVMValueRef GlobalVar) {
LLVMValueRef LLVMGetInitializer(LLVMValueRef GlobalVar) {
GlobalVariable* GV = unwrap<GlobalVariable>(GlobalVar);
if ( !GV->hasInitializer() )
- return 0;
+ return nullptr;
return wrap(GV->getInitializer());
}
@@ -1415,8 +1496,10 @@ void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit) {
LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
const char *Name) {
- return wrap(new GlobalAlias(unwrap(Ty), GlobalValue::ExternalLinkage, Name,
- unwrap<Constant>(Aliasee), unwrap (M)));
+ auto *PTy = cast<PointerType>(unwrap(Ty));
+ return wrap(GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ GlobalValue::ExternalLinkage, Name,
+ unwrap<GlobalObject>(Aliasee), unwrap(M)));
}
/*--.. Operations on functions .............................................--*/
@@ -1435,7 +1518,7 @@ LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M) {
Module *Mod = unwrap(M);
Module::iterator I = Mod->begin();
if (I == Mod->end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1443,7 +1526,7 @@ LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M) {
Module *Mod = unwrap(M);
Module::iterator I = Mod->end();
if (I == Mod->begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1451,7 +1534,7 @@ LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
Module::iterator I = Func;
if (++I == Func->getParent()->end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1459,7 +1542,7 @@ LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
Module::iterator I = Func;
if (I == Func->getParent()->begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1484,7 +1567,7 @@ void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC) {
const char *LLVMGetGC(LLVMValueRef Fn) {
Function *F = unwrap<Function>(Fn);
- return F->hasGC()? F->getGC() : 0;
+ return F->hasGC()? F->getGC() : nullptr;
}
void LLVMSetGC(LLVMValueRef Fn, const char *GC) {
@@ -1565,7 +1648,7 @@ LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
Function::arg_iterator I = Func->arg_begin();
if (I == Func->arg_end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1573,7 +1656,7 @@ LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
Function::arg_iterator I = Func->arg_end();
if (I == Func->arg_begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1581,7 +1664,7 @@ LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg) {
Argument *A = unwrap<Argument>(Arg);
Function::arg_iterator I = A;
if (++I == A->getParent()->arg_end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1589,7 +1672,7 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
Argument *A = unwrap<Argument>(Arg);
Function::arg_iterator I = A;
if (I == A->getParent()->arg_begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1659,7 +1742,7 @@ LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
Function::iterator I = Func->begin();
if (I == Func->end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1667,7 +1750,7 @@ LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
Function::iterator I = Func->end();
if (I == Func->begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1675,7 +1758,7 @@ LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB) {
BasicBlock *Block = unwrap(BB);
Function::iterator I = Block;
if (++I == Block->getParent()->end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1683,7 +1766,7 @@ LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB) {
BasicBlock *Block = unwrap(BB);
Function::iterator I = Block;
if (I == Block->getParent()->begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1735,7 +1818,7 @@ LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB) {
BasicBlock *Block = unwrap(BB);
BasicBlock::iterator I = Block->begin();
if (I == Block->end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1743,7 +1826,7 @@ LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB) {
BasicBlock *Block = unwrap(BB);
BasicBlock::iterator I = Block->end();
if (I == Block->begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1751,7 +1834,7 @@ LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst) {
Instruction *Instr = unwrap<Instruction>(Inst);
BasicBlock::iterator I = Instr;
if (++I == Instr->getParent()->end())
- return 0;
+ return nullptr;
return wrap(I);
}
@@ -1759,7 +1842,7 @@ LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst) {
Instruction *Instr = unwrap<Instruction>(Inst);
BasicBlock::iterator I = Instr;
if (I == Instr->getParent()->begin())
- return 0;
+ return nullptr;
return wrap(--I);
}
@@ -1922,7 +2005,7 @@ void LLVMDisposeBuilder(LLVMBuilderRef Builder) {
/*--.. Metadata builders ...................................................--*/
void LLVMSetCurrentDebugLocation(LLVMBuilderRef Builder, LLVMValueRef L) {
- MDNode *Loc = L ? unwrap<MDNode>(L) : NULL;
+ MDNode *Loc = L ? unwrap<MDNode>(L) : nullptr;
unwrap(Builder)->SetCurrentDebugLocation(DebugLoc::getFromDILocation(Loc));
}
@@ -2178,7 +2261,7 @@ LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
ITy, unwrap(Ty), AllocSize,
- 0, 0, "");
+ nullptr, nullptr, "");
return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
}
@@ -2189,13 +2272,13 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
ITy, unwrap(Ty), AllocSize,
- unwrap(Val), 0, "");
+ unwrap(Val), nullptr, "");
return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
}
LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
const char *Name) {
- return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), 0, Name));
+ return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), nullptr, Name));
}
LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
@@ -2219,6 +2302,29 @@ LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
return wrap(unwrap(B)->CreateStore(unwrap(Val), unwrap(PointerVal)));
}
+static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
+ switch (Ordering) {
+ case LLVMAtomicOrderingNotAtomic: return NotAtomic;
+ case LLVMAtomicOrderingUnordered: return Unordered;
+ case LLVMAtomicOrderingMonotonic: return Monotonic;
+ case LLVMAtomicOrderingAcquire: return Acquire;
+ case LLVMAtomicOrderingRelease: return Release;
+ case LLVMAtomicOrderingAcquireRelease: return AcquireRelease;
+ case LLVMAtomicOrderingSequentiallyConsistent:
+ return SequentiallyConsistent;
+ }
+
+ llvm_unreachable("Invalid LLVMAtomicOrdering value!");
+}
+
+LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
+ LLVMBool isSingleThread, const char *Name) {
+ return wrap(
+ unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
+ isSingleThread ? SingleThread : CrossThread,
+ Name));
+}
+
LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
LLVMValueRef *Indices, unsigned NumIndices,
const char *Name) {
@@ -2476,22 +2582,8 @@ LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
case LLVMAtomicRMWBinOpUMax: intop = AtomicRMWInst::UMax; break;
case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
}
- AtomicOrdering intordering;
- switch (ordering) {
- case LLVMAtomicOrderingNotAtomic: intordering = NotAtomic; break;
- case LLVMAtomicOrderingUnordered: intordering = Unordered; break;
- case LLVMAtomicOrderingMonotonic: intordering = Monotonic; break;
- case LLVMAtomicOrderingAcquire: intordering = Acquire; break;
- case LLVMAtomicOrderingRelease: intordering = Release; break;
- case LLVMAtomicOrderingAcquireRelease:
- intordering = AcquireRelease;
- break;
- case LLVMAtomicOrderingSequentiallyConsistent:
- intordering = SequentiallyConsistent;
- break;
- }
return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
- intordering, singleThread ? SingleThread : CrossThread));
+ mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread));
}
@@ -2514,28 +2606,24 @@ LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(
LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage) {
- OwningPtr<MemoryBuffer> MB;
- error_code ec;
- if (!(ec = MemoryBuffer::getFile(Path, MB))) {
- *OutMemBuf = wrap(MB.take());
- return 0;
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getFile(Path);
+ if (std::error_code EC = MBOrErr.getError()) {
+ *OutMessage = strdup(EC.message().c_str());
+ return 1;
}
-
- *OutMessage = strdup(ec.message().c_str());
- return 1;
+ *OutMemBuf = wrap(MBOrErr.get().release());
+ return 0;
}
LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage) {
- OwningPtr<MemoryBuffer> MB;
- error_code ec;
- if (!(ec = MemoryBuffer::getSTDIN(MB))) {
- *OutMemBuf = wrap(MB.take());
- return 0;
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getSTDIN();
+ if (std::error_code EC = MBOrErr.getError()) {
+ *OutMessage = strdup(EC.message().c_str());
+ return 1;
}
-
- *OutMessage = strdup(ec.message().c_str());
- return 1;
+ *OutMemBuf = wrap(MBOrErr.get().release());
+ return 0;
}
LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRange(
@@ -2616,11 +2704,10 @@ void LLVMDisposePassManager(LLVMPassManagerRef PM) {
/*===-- Threading ------------------------------------------------------===*/
LLVMBool LLVMStartMultithreaded() {
- return llvm_start_multithreaded();
+ return LLVMIsMultithreaded();
}
void LLVMStopMultithreaded() {
- llvm_stop_multithreaded();
}
LLVMBool LLVMIsMultithreaded() {
diff --git a/contrib/llvm/lib/IR/DIBuilder.cpp b/contrib/llvm/lib/IR/DIBuilder.cpp
index c4a9f41..218787c 100644
--- a/contrib/llvm/lib/IR/DIBuilder.cpp
+++ b/contrib/llvm/lib/IR/DIBuilder.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/DIBuilder.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
@@ -30,8 +30,9 @@ static Constant *GetTagConstant(LLVMContext &VMContext, unsigned Tag) {
}
DIBuilder::DIBuilder(Module &m)
- : M(m), VMContext(M.getContext()), TempEnumTypes(0), TempRetainTypes(0),
- TempSubprograms(0), TempGVs(0), DeclareFn(0), ValueFn(0) {}
+ : M(m), VMContext(M.getContext()), TempEnumTypes(nullptr),
+ TempRetainTypes(nullptr), TempSubprograms(nullptr), TempGVs(nullptr),
+ DeclareFn(nullptr), ValueFn(nullptr) {}
/// finalize - Construct any deferred debug info descriptors.
void DIBuilder::finalize() {
@@ -69,7 +70,10 @@ void DIBuilder::finalize() {
DIArray GVs = getOrCreateArray(AllGVs);
DIType(TempGVs).replaceAllUsesWith(GVs);
- DIArray IMs = getOrCreateArray(AllImportedModules);
+ SmallVector<Value *, 16> RetainValuesI;
+ for (unsigned I = 0, E = AllImportedModules.size(); I < E; I++)
+ RetainValuesI.push_back(AllImportedModules[I]);
+ DIArray IMs = getOrCreateArray(RetainValuesI);
DIType(TempImportedModules).replaceAllUsesWith(IMs);
}
@@ -77,7 +81,7 @@ void DIBuilder::finalize() {
/// N.
static MDNode *getNonCompileUnitScope(MDNode *N) {
if (DIDescriptor(N).isCompileUnit())
- return NULL;
+ return nullptr;
return N;
}
@@ -97,8 +101,11 @@ DICompileUnit DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename,
StringRef Directory,
StringRef Producer, bool isOptimized,
StringRef Flags, unsigned RunTimeVer,
- StringRef SplitName) {
- assert(((Lang <= dwarf::DW_LANG_Python && Lang >= dwarf::DW_LANG_C89) ||
+ StringRef SplitName,
+ DebugEmissionKind Kind,
+ bool EmitDebugInfo) {
+
+ assert(((Lang <= dwarf::DW_LANG_OCaml && Lang >= dwarf::DW_LANG_C89) ||
(Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
"Invalid Language tag");
assert(!Filename.empty() &&
@@ -127,26 +134,33 @@ DICompileUnit DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename,
TempSubprograms,
TempGVs,
TempImportedModules,
- MDString::get(VMContext, SplitName)
+ MDString::get(VMContext, SplitName),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Kind)
};
MDNode *CUNode = MDNode::get(VMContext, Elts);
// Create a named metadata so that it is easier to find cu in a module.
- NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
- NMD->addOperand(CUNode);
+ // Note that we only generate this when the caller wants to actually
+ // emit debug information. When we are only interested in tracking
+ // source line locations throughout the backend, we prevent codegen from
+ // emitting debug info in the final output by not generating llvm.dbg.cu.
+ if (EmitDebugInfo) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
+ NMD->addOperand(CUNode);
+ }
return DICompileUnit(CUNode);
}
static DIImportedEntity
-createImportedModule(LLVMContext &C, DIScope Context, DIDescriptor NS,
- unsigned Line, StringRef Name,
- SmallVectorImpl<Value *> &AllImportedModules) {
+createImportedModule(LLVMContext &C, dwarf::Tag Tag, DIScope Context,
+ Value *NS, unsigned Line, StringRef Name,
+ SmallVectorImpl<TrackingVH<MDNode>> &AllImportedModules) {
const MDNode *R;
if (Name.empty()) {
Value *Elts[] = {
- GetTagConstant(C, dwarf::DW_TAG_imported_module),
+ GetTagConstant(C, Tag),
Context,
NS,
ConstantInt::get(Type::getInt32Ty(C), Line),
@@ -154,7 +168,7 @@ createImportedModule(LLVMContext &C, DIScope Context, DIDescriptor NS,
R = MDNode::get(C, Elts);
} else {
Value *Elts[] = {
- GetTagConstant(C, dwarf::DW_TAG_imported_module),
+ GetTagConstant(C, Tag),
Context,
NS,
ConstantInt::get(Type::getInt32Ty(C), Line),
@@ -164,38 +178,37 @@ createImportedModule(LLVMContext &C, DIScope Context, DIDescriptor NS,
}
DIImportedEntity M(R);
assert(M.Verify() && "Imported module should be valid");
- AllImportedModules.push_back(M);
+ AllImportedModules.push_back(TrackingVH<MDNode>(M));
return M;
}
DIImportedEntity DIBuilder::createImportedModule(DIScope Context,
- DINameSpace NS, unsigned Line,
- StringRef Name) {
- return ::createImportedModule(VMContext, Context, NS, Line, Name,
- AllImportedModules);
+ DINameSpace NS,
+ unsigned Line) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, NS, Line, StringRef(), AllImportedModules);
}
DIImportedEntity DIBuilder::createImportedModule(DIScope Context,
DIImportedEntity NS,
- unsigned Line,
- StringRef Name) {
- return ::createImportedModule(VMContext, Context, NS, Line, Name,
+ unsigned Line) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, NS, Line, StringRef(), AllImportedModules);
+}
+
+DIImportedEntity DIBuilder::createImportedDeclaration(DIScope Context,
+ DIScope Decl,
+ unsigned Line, StringRef Name) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_declaration,
+ Context, Decl.getRef(), Line, Name,
AllImportedModules);
}
DIImportedEntity DIBuilder::createImportedDeclaration(DIScope Context,
- DIDescriptor Decl,
- unsigned Line) {
- Value *Elts[] = {
- GetTagConstant(VMContext, dwarf::DW_TAG_imported_declaration),
- Context,
- Decl,
- ConstantInt::get(Type::getInt32Ty(VMContext), Line),
- };
- DIImportedEntity M(MDNode::get(VMContext, Elts));
- assert(M.Verify() && "Imported module should be valid");
- AllImportedModules.push_back(M);
- return M;
+ DIImportedEntity Imp,
+ unsigned Line, StringRef Name) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_declaration,
+ Context, Imp, Line, Name, AllImportedModules);
}
/// createFile - Create a file descriptor to hold debugging information
@@ -226,8 +239,8 @@ DIBasicType DIBuilder::createUnspecifiedType(StringRef Name) {
// size, alignment, offset and flags are always empty here.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_unspecified_type),
- NULL, // Filename
- NULL, // Unused
+ nullptr, // Filename
+ nullptr, // Unused
MDString::get(VMContext, Name),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
@@ -254,8 +267,8 @@ DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
// offset and flags are always empty here.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_base_type),
- NULL, // File/directory name
- NULL, // Unused
+ nullptr, // File/directory name
+ nullptr, // Unused
MDString::get(VMContext, Name),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
@@ -273,8 +286,8 @@ DIDerivedType DIBuilder::createQualifiedType(unsigned Tag, DIType FromTy) {
// Qualified types are encoded in DIDerivedType format.
Value *Elts[] = {
GetTagConstant(VMContext, Tag),
- NULL, // Filename
- NULL, // Unused
+ nullptr, // Filename
+ nullptr, // Unused
MDString::get(VMContext, StringRef()), // Empty name.
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
@@ -293,8 +306,8 @@ DIBuilder::createPointerType(DIType PointeeTy, uint64_t SizeInBits,
// Pointer types are encoded in DIDerivedType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_pointer_type),
- NULL, // Filename
- NULL, // Unused
+ nullptr, // Filename
+ nullptr, // Unused
MDString::get(VMContext, Name),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
@@ -311,9 +324,9 @@ DIDerivedType DIBuilder::createMemberPointerType(DIType PointeeTy,
// Pointer types are encoded in DIDerivedType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_ptr_to_member_type),
- NULL, // Filename
- NULL, // Unused
- NULL,
+ nullptr, // Filename
+ nullptr, // Unused
+ nullptr,
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
@@ -332,9 +345,9 @@ DIDerivedType DIBuilder::createReferenceType(unsigned Tag, DIType RTy) {
// References are encoded in DIDerivedType format.
Value *Elts[] = {
GetTagConstant(VMContext, Tag),
- NULL, // Filename
- NULL, // TheCU,
- NULL, // Name
+ nullptr, // Filename
+ nullptr, // TheCU,
+ nullptr, // Name
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
@@ -349,7 +362,6 @@ DIDerivedType DIBuilder::createReferenceType(unsigned Tag, DIType RTy) {
DIDerivedType DIBuilder::createTypedef(DIType Ty, StringRef Name, DIFile File,
unsigned LineNo, DIDescriptor Context) {
// typedefs are encoded in DIDerivedType format.
- assert(Ty.isType() && "Invalid typedef type!");
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_typedef),
File.getFileNode(),
@@ -372,9 +384,9 @@ DIDerivedType DIBuilder::createFriend(DIType Ty, DIType FriendTy) {
assert(FriendTy.isType() && "Invalid friend type!");
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_friend),
- NULL,
+ nullptr,
Ty.getRef(),
- NULL, // Name
+ nullptr, // Name
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
@@ -394,9 +406,9 @@ DIDerivedType DIBuilder::createInheritance(DIType Ty, DIType BaseTy,
// TAG_inheritance is encoded in DIDerivedType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_inheritance),
- NULL,
+ nullptr,
Ty.getRef(),
- NULL, // Name
+ nullptr, // Name
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
@@ -625,7 +637,8 @@ DICompositeType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
VTableHolder.getRef(),
TemplateParams,
- UniqueIdentifier.empty() ? NULL : MDString::get(VMContext, UniqueIdentifier)
+ UniqueIdentifier.empty() ? nullptr
+ : MDString::get(VMContext, UniqueIdentifier)
};
DICompositeType R(MDNode::get(VMContext, Elts));
assert(R.isCompositeType() &&
@@ -661,8 +674,9 @@ DICompositeType DIBuilder::createStructType(DIDescriptor Context,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
VTableHolder.getRef(),
- NULL,
- UniqueIdentifier.empty() ? NULL : MDString::get(VMContext, UniqueIdentifier)
+ nullptr,
+ UniqueIdentifier.empty() ? nullptr
+ : MDString::get(VMContext, UniqueIdentifier)
};
DICompositeType R(MDNode::get(VMContext, Elts));
assert(R.isCompositeType() &&
@@ -691,12 +705,13 @@ DICompositeType DIBuilder::createUnionType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- NULL,
+ nullptr,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
- NULL,
- NULL,
- UniqueIdentifier.empty() ? NULL : MDString::get(VMContext, UniqueIdentifier)
+ nullptr,
+ nullptr,
+ UniqueIdentifier.empty() ? nullptr
+ : MDString::get(VMContext, UniqueIdentifier)
};
DICompositeType R(MDNode::get(VMContext, Elts));
if (!UniqueIdentifier.empty())
@@ -706,24 +721,25 @@ DICompositeType DIBuilder::createUnionType(DIDescriptor Scope, StringRef Name,
/// createSubroutineType - Create subroutine type.
DICompositeType DIBuilder::createSubroutineType(DIFile File,
- DIArray ParameterTypes) {
+ DIArray ParameterTypes,
+ unsigned Flags) {
// TAG_subroutine_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_subroutine_type),
Constant::getNullValue(Type::getInt32Ty(VMContext)),
- NULL,
+ nullptr,
MDString::get(VMContext, ""),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
- ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
- NULL,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags), // Flags
+ nullptr,
ParameterTypes,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- NULL,
- NULL,
- NULL // Type Identifer
+ nullptr,
+ nullptr,
+ nullptr // Type Identifer
};
return DICompositeType(MDNode::get(VMContext, Elts));
}
@@ -748,9 +764,10 @@ DICompositeType DIBuilder::createEnumerationType(
UnderlyingType.getRef(),
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- NULL,
- NULL,
- UniqueIdentifier.empty() ? NULL : MDString::get(VMContext, UniqueIdentifier)
+ nullptr,
+ nullptr,
+ UniqueIdentifier.empty() ? nullptr
+ : MDString::get(VMContext, UniqueIdentifier)
};
DICompositeType CTy(MDNode::get(VMContext, Elts));
AllEnumTypes.push_back(CTy);
@@ -765,8 +782,8 @@ DICompositeType DIBuilder::createArrayType(uint64_t Size, uint64_t AlignInBits,
// TAG_array_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_array_type),
- NULL, // Filename/Directory,
- NULL, // Unused
+ nullptr, // Filename/Directory,
+ nullptr, // Unused
MDString::get(VMContext, ""),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), Size),
@@ -776,9 +793,9 @@ DICompositeType DIBuilder::createArrayType(uint64_t Size, uint64_t AlignInBits,
Ty.getRef(),
Subscripts,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- NULL,
- NULL,
- NULL // Type Identifer
+ nullptr,
+ nullptr,
+ nullptr // Type Identifer
};
return DICompositeType(MDNode::get(VMContext, Elts));
}
@@ -789,8 +806,8 @@ DICompositeType DIBuilder::createVectorType(uint64_t Size, uint64_t AlignInBits,
// A vector is an array type with the FlagVector flag applied.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_array_type),
- NULL, // Filename/Directory,
- NULL, // Unused
+ nullptr, // Filename/Directory,
+ nullptr, // Unused
MDString::get(VMContext, ""),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
ConstantInt::get(Type::getInt64Ty(VMContext), Size),
@@ -800,9 +817,9 @@ DICompositeType DIBuilder::createVectorType(uint64_t Size, uint64_t AlignInBits,
Ty.getRef(),
Subscripts,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- NULL,
- NULL,
- NULL // Type Identifer
+ nullptr,
+ nullptr,
+ nullptr // Type Identifer
};
return DICompositeType(MDNode::get(VMContext, Elts));
}
@@ -883,12 +900,47 @@ DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Offset
ConstantInt::get(Type::getInt32Ty(VMContext), DIDescriptor::FlagFwdDecl),
- NULL,
+ nullptr,
DIArray(),
ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
- NULL,
- NULL, //TemplateParams
- UniqueIdentifier.empty() ? NULL : MDString::get(VMContext, UniqueIdentifier)
+ nullptr,
+ nullptr, //TemplateParams
+ UniqueIdentifier.empty() ? nullptr
+ : MDString::get(VMContext, UniqueIdentifier)
+ };
+ MDNode *Node = MDNode::get(VMContext, Elts);
+ DICompositeType RetTy(Node);
+ assert(RetTy.isCompositeType() &&
+ "createForwardDecl result should be a DIType");
+ if (!UniqueIdentifier.empty())
+ retainType(RetTy);
+ return RetTy;
+}
+
+/// createForwardDecl - Create a temporary forward-declared type that
+/// can be RAUW'd if the full type is seen.
+DICompositeType DIBuilder::createReplaceableForwardDecl(
+ unsigned Tag, StringRef Name, DIDescriptor Scope, DIFile F, unsigned Line,
+ unsigned RuntimeLang, uint64_t SizeInBits, uint64_t AlignInBits,
+ StringRef UniqueIdentifier) {
+ // Create a temporary MDNode.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, Tag),
+ F.getFileNode(),
+ DIScope(getNonCompileUnitScope(Scope)).getRef(),
+ MDString::get(VMContext, Name),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Line),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), DIDescriptor::FlagFwdDecl),
+ nullptr,
+ DIArray(),
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+ nullptr,
+ nullptr, //TemplateParams
+ UniqueIdentifier.empty() ? nullptr
+ : MDString::get(VMContext, UniqueIdentifier)
};
MDNode *Node = MDNode::getTemporary(VMContext, Elts);
DICompositeType RetTy(Node);
@@ -901,10 +953,6 @@ DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope,
/// getOrCreateArray - Get a DIArray, create one if required.
DIArray DIBuilder::getOrCreateArray(ArrayRef<Value *> Elements) {
- if (Elements.empty()) {
- Value *Null = Constant::getNullValue(Type::getInt32Ty(VMContext));
- return DIArray(MDNode::get(VMContext, Null));
- }
return DIArray(MDNode::get(VMContext, Elements));
}
@@ -924,12 +972,12 @@ DISubrange DIBuilder::getOrCreateSubrange(int64_t Lo, int64_t Count) {
DIGlobalVariable DIBuilder::createGlobalVariable(StringRef Name,
StringRef LinkageName,
DIFile F, unsigned LineNumber,
- DIType Ty, bool isLocalToUnit,
+ DITypeRef Ty, bool isLocalToUnit,
Value *Val) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_variable),
Constant::getNullValue(Type::getInt32Ty(VMContext)),
- NULL, // TheCU,
+ nullptr, // TheCU,
MDString::get(VMContext, Name),
MDString::get(VMContext, Name),
MDString::get(VMContext, LinkageName),
@@ -948,7 +996,8 @@ DIGlobalVariable DIBuilder::createGlobalVariable(StringRef Name,
/// \brief Create a new descriptor for the specified global.
DIGlobalVariable DIBuilder::createGlobalVariable(StringRef Name, DIFile F,
- unsigned LineNumber, DIType Ty,
+ unsigned LineNumber,
+ DITypeRef Ty,
bool isLocalToUnit,
Value *Val) {
return createGlobalVariable(Name, Name, F, LineNumber, Ty, isLocalToUnit,
@@ -961,7 +1010,8 @@ DIGlobalVariable DIBuilder::createStaticVariable(DIDescriptor Context,
StringRef Name,
StringRef LinkageName,
DIFile F, unsigned LineNumber,
- DIType Ty, bool isLocalToUnit,
+ DITypeRef Ty,
+ bool isLocalToUnit,
Value *Val, MDNode *Decl) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_variable),
@@ -986,14 +1036,12 @@ DIGlobalVariable DIBuilder::createStaticVariable(DIDescriptor Context,
/// createVariable - Create a new descriptor for the specified variable.
DIVariable DIBuilder::createLocalVariable(unsigned Tag, DIDescriptor Scope,
StringRef Name, DIFile File,
- unsigned LineNo, DIType Ty,
+ unsigned LineNo, DITypeRef Ty,
bool AlwaysPreserve, unsigned Flags,
unsigned ArgNo) {
DIDescriptor Context(getNonCompileUnitScope(Scope));
assert((!Context || Context.isScope()) &&
"createLocalVariable should be called with a valid Context");
- assert(Ty.isType() &&
- "createLocalVariable should be called with a valid type");
Value *Elts[] = {
GetTagConstant(VMContext, Tag),
getNonCompileUnitScope(Scope),
@@ -1024,20 +1072,22 @@ DIVariable DIBuilder::createLocalVariable(unsigned Tag, DIDescriptor Scope,
DIVariable DIBuilder::createComplexVariable(unsigned Tag, DIDescriptor Scope,
StringRef Name, DIFile F,
unsigned LineNo,
- DIType Ty, ArrayRef<Value *> Addr,
+ DITypeRef Ty,
+ ArrayRef<Value *> Addr,
unsigned ArgNo) {
- SmallVector<Value *, 15> Elts;
- Elts.push_back(GetTagConstant(VMContext, Tag));
- Elts.push_back(getNonCompileUnitScope(Scope)),
- Elts.push_back(MDString::get(VMContext, Name));
- Elts.push_back(F);
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext),
- (LineNo | (ArgNo << 24))));
- Elts.push_back(Ty);
- Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
- Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
- Elts.append(Addr.begin(), Addr.end());
-
+ assert(Addr.size() > 0 && "complex address is empty");
+ Value *Elts[] = {
+ GetTagConstant(VMContext, Tag),
+ getNonCompileUnitScope(Scope),
+ MDString::get(VMContext, Name),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext),
+ (LineNo | (ArgNo << 24))),
+ Ty,
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ MDNode::get(VMContext, Addr)
+ };
return DIVariable(MDNode::get(VMContext, Elts));
}
@@ -1083,7 +1133,7 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context, StringRef Name,
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- NULL,
+ nullptr,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
Fn,
@@ -1098,7 +1148,8 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context, StringRef Name,
if (isDefinition)
AllSubprograms.push_back(Node);
DISubprogram S(Node);
- assert(S.isSubprogram() && "createFunction should return a valid DISubprogram");
+ assert(S.isSubprogram() &&
+ "createFunction should return a valid DISubprogram");
return S;
}
@@ -1116,7 +1167,6 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context, StringRef Name,
assert(getNonCompileUnitScope(Context) &&
"Methods should have both a Context and a context that isn't "
"the compile unit.");
- Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) };
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_subprogram),
F.getFileNode(),
@@ -1136,7 +1186,7 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context, StringRef Name,
Fn,
TParam,
Constant::getNullValue(Type::getInt32Ty(VMContext)),
- MDNode::getTemporary(VMContext, TElts),
+ nullptr,
// FIXME: Do we want to use different scope/lines?
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo)
};
@@ -1182,7 +1232,15 @@ DILexicalBlockFile DIBuilder::createLexicalBlockFile(DIDescriptor Scope,
}
DILexicalBlock DIBuilder::createLexicalBlock(DIDescriptor Scope, DIFile File,
- unsigned Line, unsigned Col) {
+ unsigned Line, unsigned Col,
+ unsigned Discriminator) {
+ // FIXME: This isn't thread safe nor the right way to defeat MDNode uniquing.
+ // I believe the right way is to have a self-referential element in the node.
+ // Also: why do we bother with line/column - they're not used and the
+ // documentation (SourceLevelDebugging.rst) claims the line/col are necessary
+ // for uniquing, yet then we have this other solution (because line/col were
+ // inadequate) anyway. Remove all 3 and replace them with a self-reference.
+
// Defeat MDNode uniquing for lexical blocks by using unique id.
static unsigned int unique_id = 0;
Value *Elts[] = {
@@ -1191,6 +1249,7 @@ DILexicalBlock DIBuilder::createLexicalBlock(DIDescriptor Scope, DIFile File,
getNonCompileUnitScope(Scope),
ConstantInt::get(Type::getInt32Ty(VMContext), Line),
ConstantInt::get(Type::getInt32Ty(VMContext), Col),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Discriminator),
ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++)
};
DILexicalBlock R(MDNode::get(VMContext, Elts));
diff --git a/contrib/llvm/lib/IR/DataLayout.cpp b/contrib/llvm/lib/IR/DataLayout.cpp
index 6bdc09e..dea05fb 100644
--- a/contrib/llvm/lib/IR/DataLayout.cpp
+++ b/contrib/llvm/lib/IR/DataLayout.cpp
@@ -18,11 +18,13 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Mutex.h"
@@ -33,9 +35,8 @@ using namespace llvm;
// Handle the Pass registration stuff necessary to use DataLayout's.
-// Register the default SparcV9 implementation...
-INITIALIZE_PASS(DataLayout, "datalayout", "Data Layout", false, true)
-char DataLayout::ID = 0;
+INITIALIZE_PASS(DataLayoutPass, "datalayout", "Data Layout", false, true)
+char DataLayoutPass::ID = 0;
//===----------------------------------------------------------------------===//
// Support for StructLayout
@@ -118,21 +119,21 @@ LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
}
const LayoutAlignElem
-DataLayout::InvalidAlignmentElem = LayoutAlignElem::get(INVALID_ALIGN, 0, 0, 0);
+DataLayout::InvalidAlignmentElem = { INVALID_ALIGN, 0, 0, 0 };
//===----------------------------------------------------------------------===//
// PointerAlignElem, PointerAlign support
//===----------------------------------------------------------------------===//
PointerAlignElem
-PointerAlignElem::get(uint32_t addr_space, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width) {
- assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+PointerAlignElem::get(uint32_t AddressSpace, unsigned ABIAlign,
+ unsigned PrefAlign, uint32_t TypeByteWidth) {
+ assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
PointerAlignElem retval;
- retval.AddressSpace = addr_space;
- retval.ABIAlign = abi_align;
- retval.PrefAlign = pref_align;
- retval.TypeBitWidth = bit_width;
+ retval.AddressSpace = AddressSpace;
+ retval.ABIAlign = ABIAlign;
+ retval.PrefAlign = PrefAlign;
+ retval.TypeByteWidth = TypeByteWidth;
return retval;
}
@@ -141,36 +142,52 @@ PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
return (ABIAlign == rhs.ABIAlign
&& AddressSpace == rhs.AddressSpace
&& PrefAlign == rhs.PrefAlign
- && TypeBitWidth == rhs.TypeBitWidth);
+ && TypeByteWidth == rhs.TypeByteWidth);
}
const PointerAlignElem
-DataLayout::InvalidPointerElem = PointerAlignElem::get(~0U, 0U, 0U, 0U);
+DataLayout::InvalidPointerElem = { 0U, 0U, 0U, ~0U };
//===----------------------------------------------------------------------===//
// DataLayout Class Implementation
//===----------------------------------------------------------------------===//
-void DataLayout::init(StringRef Desc) {
- initializeDataLayoutPass(*PassRegistry::getPassRegistry());
+const char *DataLayout::getManglingComponent(const Triple &T) {
+ if (T.isOSBinFormatMachO())
+ return "-m:o";
+ if (T.isOSWindows() && T.getArch() == Triple::x86 && T.isOSBinFormatCOFF())
+ return "-m:w";
+ return "-m:e";
+}
+
+static const LayoutAlignElem DefaultAlignments[] = {
+ { INTEGER_ALIGN, 1, 1, 1 }, // i1
+ { INTEGER_ALIGN, 8, 1, 1 }, // i8
+ { INTEGER_ALIGN, 16, 2, 2 }, // i16
+ { INTEGER_ALIGN, 32, 4, 4 }, // i32
+ { INTEGER_ALIGN, 64, 4, 8 }, // i64
+ { FLOAT_ALIGN, 16, 2, 2 }, // half
+ { FLOAT_ALIGN, 32, 4, 4 }, // float
+ { FLOAT_ALIGN, 64, 8, 8 }, // double
+ { FLOAT_ALIGN, 128, 16, 16 }, // ppcf128, quad, ...
+ { VECTOR_ALIGN, 64, 8, 8 }, // v2i32, v1i64, ...
+ { VECTOR_ALIGN, 128, 16, 16 }, // v16i8, v8i16, v4i32, ...
+ { AGGREGATE_ALIGN, 0, 0, 8 } // struct
+};
+
+void DataLayout::reset(StringRef Desc) {
+ clear();
- LayoutMap = 0;
+ LayoutMap = nullptr;
LittleEndian = false;
StackNaturalAlign = 0;
+ ManglingMode = MM_None;
// Default alignments
- setAlignment(INTEGER_ALIGN, 1, 1, 1); // i1
- setAlignment(INTEGER_ALIGN, 1, 1, 8); // i8
- setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16
- setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32
- setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64
- setAlignment(FLOAT_ALIGN, 2, 2, 16); // half
- setAlignment(FLOAT_ALIGN, 4, 4, 32); // float
- setAlignment(FLOAT_ALIGN, 8, 8, 64); // double
- setAlignment(FLOAT_ALIGN, 16, 16, 128); // ppcf128, quad, ...
- setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ...
- setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ...
- setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct
+ for (const LayoutAlignElem &E : DefaultAlignments) {
+ setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign,
+ E.TypeBitWidth);
+ }
setPointerAlignment(0, 8, 8, 8);
parseSpecifier(Desc);
@@ -185,11 +202,12 @@ static std::pair<StringRef, StringRef> split(StringRef Str, char Separator) {
return Split;
}
-/// Get an unsinged integer, including error checks.
+/// Get an unsigned integer, including error checks.
static unsigned getInt(StringRef R) {
unsigned Result;
bool error = R.getAsInteger(10, Result); (void)error;
- assert(!error && "not a number, or does not fit in an unsigned int");
+ if (error)
+ report_fatal_error("not a number, or does not fit in an unsigned int");
return Result;
}
@@ -216,6 +234,10 @@ void DataLayout::parseSpecifier(StringRef Desc) {
Tok = Tok.substr(1);
switch (Specifier) {
+ case 's':
+ // Ignored for backward compatibility.
+ // FIXME: remove this on LLVM 4.0.
+ break;
case 'E':
LittleEndian = false;
break;
@@ -250,8 +272,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
case 'i':
case 'v':
case 'f':
- case 'a':
- case 's': {
+ case 'a': {
AlignTypeEnum AlignType;
switch (Specifier) {
default:
@@ -259,12 +280,14 @@ void DataLayout::parseSpecifier(StringRef Desc) {
case 'v': AlignType = VECTOR_ALIGN; break;
case 'f': AlignType = FLOAT_ALIGN; break;
case 'a': AlignType = AGGREGATE_ALIGN; break;
- case 's': AlignType = STACK_ALIGN; break;
}
// Bit size.
unsigned Size = Tok.empty() ? 0 : getInt(Tok);
+ assert((AlignType != AGGREGATE_ALIGN || Size == 0) &&
+ "These specifications don't have a size");
+
// ABI alignment.
Split = split(Rest, ':');
unsigned ABIAlign = inBytes(getInt(Tok));
@@ -294,6 +317,26 @@ void DataLayout::parseSpecifier(StringRef Desc) {
StackNaturalAlign = inBytes(getInt(Tok));
break;
}
+ case 'm':
+ assert(Tok.empty());
+ assert(Rest.size() == 1);
+ switch(Rest[0]) {
+ default:
+ llvm_unreachable("Unknown mangling in datalayout string");
+ case 'e':
+ ManglingMode = MM_ELF;
+ break;
+ case 'o':
+ ManglingMode = MM_MachO;
+ break;
+ case 'm':
+ ManglingMode = MM_Mips;
+ break;
+ case 'w':
+ ManglingMode = MM_WINCOFF;
+ break;
+ }
+ break;
default:
llvm_unreachable("Unknown specifier in datalayout string");
break;
@@ -301,18 +344,22 @@ void DataLayout::parseSpecifier(StringRef Desc) {
}
}
-/// Default ctor.
-///
-/// @note This has to exist, because this is a pass, but it should never be
-/// used.
-DataLayout::DataLayout() : ImmutablePass(ID) {
- report_fatal_error("Bad DataLayout ctor used. "
- "Tool did not specify a DataLayout to use?");
+DataLayout::DataLayout(const Module *M) : LayoutMap(nullptr) {
+ const DataLayout *Other = M->getDataLayout();
+ if (Other)
+ *this = *Other;
+ else
+ reset("");
}
-DataLayout::DataLayout(const Module *M)
- : ImmutablePass(ID) {
- init(M->getDataLayout());
+bool DataLayout::operator==(const DataLayout &Other) const {
+ bool Ret = LittleEndian == Other.LittleEndian &&
+ StackNaturalAlign == Other.StackNaturalAlign &&
+ ManglingMode == Other.ManglingMode &&
+ LegalIntWidths == Other.LegalIntWidths &&
+ Alignments == Other.Alignments && Pointers == Other.Pointers;
+ assert(Ret == (getStringRepresentation() == Other.getStringRepresentation()));
+ return Ret;
}
void
@@ -321,12 +368,12 @@ DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
assert(pref_align < (1 << 16) && "Alignment doesn't fit in bitfield");
assert(bit_width < (1 << 24) && "Bit width doesn't fit in bitfield");
- for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
- if (Alignments[i].AlignType == (unsigned)align_type &&
- Alignments[i].TypeBitWidth == bit_width) {
+ for (LayoutAlignElem &Elem : Alignments) {
+ if (Elem.AlignType == (unsigned)align_type &&
+ Elem.TypeBitWidth == bit_width) {
// Update the abi, preferred alignments.
- Alignments[i].ABIAlign = abi_align;
- Alignments[i].PrefAlign = pref_align;
+ Elem.ABIAlign = abi_align;
+ Elem.PrefAlign = pref_align;
return;
}
}
@@ -335,18 +382,26 @@ DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
pref_align, bit_width));
}
-void
-DataLayout::setPointerAlignment(uint32_t addr_space, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width) {
- assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
- DenseMap<unsigned,PointerAlignElem>::iterator val = Pointers.find(addr_space);
- if (val == Pointers.end()) {
- Pointers[addr_space] = PointerAlignElem::get(addr_space,
- abi_align, pref_align, bit_width);
+DataLayout::PointersTy::iterator
+DataLayout::findPointerLowerBound(uint32_t AddressSpace) {
+ return std::lower_bound(Pointers.begin(), Pointers.end(), AddressSpace,
+ [](const PointerAlignElem &A, uint32_t AddressSpace) {
+ return A.AddressSpace < AddressSpace;
+ });
+}
+
+void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
+ unsigned PrefAlign,
+ uint32_t TypeByteWidth) {
+ assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
+ PointersTy::iterator I = findPointerLowerBound(AddrSpace);
+ if (I == Pointers.end() || I->AddressSpace != AddrSpace) {
+ Pointers.insert(I, PointerAlignElem::get(AddrSpace, ABIAlign, PrefAlign,
+ TypeByteWidth));
} else {
- val->second.ABIAlign = abi_align;
- val->second.PrefAlign = pref_align;
- val->second.TypeBitWidth = bit_width;
+ I->ABIAlign = ABIAlign;
+ I->PrefAlign = PrefAlign;
+ I->TypeByteWidth = TypeByteWidth;
}
}
@@ -412,11 +467,10 @@ class StructLayoutMap {
LayoutInfoTy LayoutInfo;
public:
- virtual ~StructLayoutMap() {
+ ~StructLayoutMap() {
// Remove any layouts.
- for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end();
- I != E; ++I) {
- StructLayout *Value = I->second;
+ for (const auto &I : LayoutInfo) {
+ StructLayout *Value = I.second;
Value->~StructLayout();
free(Value);
}
@@ -425,21 +479,20 @@ public:
StructLayout *&operator[](StructType *STy) {
return LayoutInfo[STy];
}
-
- // for debugging...
- virtual void dump() const {}
};
} // end anonymous namespace
-DataLayout::~DataLayout() {
- delete static_cast<StructLayoutMap*>(LayoutMap);
+void DataLayout::clear() {
+ LegalIntWidths.clear();
+ Alignments.clear();
+ Pointers.clear();
+ delete static_cast<StructLayoutMap *>(LayoutMap);
+ LayoutMap = nullptr;
}
-bool DataLayout::doFinalization(Module &M) {
- delete static_cast<StructLayoutMap*>(LayoutMap);
- LayoutMap = 0;
- return false;
+DataLayout::~DataLayout() {
+ clear();
}
const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
@@ -470,32 +523,49 @@ std::string DataLayout::getStringRepresentation() const {
raw_string_ostream OS(Result);
OS << (LittleEndian ? "e" : "E");
- SmallVector<unsigned, 8> addrSpaces;
- // Lets get all of the known address spaces and sort them
- // into increasing order so that we can emit the string
- // in a cleaner format.
- for (DenseMap<unsigned, PointerAlignElem>::const_iterator
- pib = Pointers.begin(), pie = Pointers.end();
- pib != pie; ++pib) {
- addrSpaces.push_back(pib->first);
+
+ switch (ManglingMode) {
+ case MM_None:
+ break;
+ case MM_ELF:
+ OS << "-m:e";
+ break;
+ case MM_MachO:
+ OS << "-m:o";
+ break;
+ case MM_WINCOFF:
+ OS << "-m:w";
+ break;
+ case MM_Mips:
+ OS << "-m:m";
+ break;
}
- std::sort(addrSpaces.begin(), addrSpaces.end());
- for (SmallVectorImpl<unsigned>::iterator asb = addrSpaces.begin(),
- ase = addrSpaces.end(); asb != ase; ++asb) {
- const PointerAlignElem &PI = Pointers.find(*asb)->second;
+
+ for (const PointerAlignElem &PI : Pointers) {
+ // Skip default.
+ if (PI.AddressSpace == 0 && PI.ABIAlign == 8 && PI.PrefAlign == 8 &&
+ PI.TypeByteWidth == 8)
+ continue;
+
OS << "-p";
if (PI.AddressSpace) {
OS << PI.AddressSpace;
}
- OS << ":" << PI.TypeBitWidth*8 << ':' << PI.ABIAlign*8
- << ':' << PI.PrefAlign*8;
+ OS << ":" << PI.TypeByteWidth*8 << ':' << PI.ABIAlign*8;
+ if (PI.PrefAlign != PI.ABIAlign)
+ OS << ':' << PI.PrefAlign*8;
}
- OS << "-S" << StackNaturalAlign*8;
- for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
- const LayoutAlignElem &AI = Alignments[i];
- OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':'
- << AI.ABIAlign*8 << ':' << AI.PrefAlign*8;
+ for (const LayoutAlignElem &AI : Alignments) {
+ if (std::find(std::begin(DefaultAlignments), std::end(DefaultAlignments),
+ AI) != std::end(DefaultAlignments))
+ continue;
+ OS << '-' << (char)AI.AlignType;
+ if (AI.TypeBitWidth)
+ OS << AI.TypeBitWidth;
+ OS << ':' << AI.ABIAlign*8;
+ if (AI.ABIAlign != AI.PrefAlign)
+ OS << ':' << AI.PrefAlign*8;
}
if (!LegalIntWidths.empty()) {
@@ -504,9 +574,40 @@ std::string DataLayout::getStringRepresentation() const {
for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i)
OS << ':' << (unsigned)LegalIntWidths[i];
}
+
+ if (StackNaturalAlign)
+ OS << "-S" << StackNaturalAlign*8;
+
return OS.str();
}
+unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->ABIAlign;
+}
+
+unsigned DataLayout::getPointerPrefAlignment(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->PrefAlign;
+}
+
+unsigned DataLayout::getPointerSize(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->TypeByteWidth;
+}
+
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
assert(Ty->isPtrOrPtrVectorTy() &&
"This should only be called with a pointer or pointer vector type");
@@ -586,15 +687,7 @@ unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
- return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0);
-}
-
-unsigned DataLayout::getCallFrameTypeAlignment(Type *Ty) const {
- for (unsigned i = 0, e = Alignments.size(); i != e; ++i)
- if (Alignments[i].AlignType == STACK_ALIGN)
- return Alignments[i].ABIAlign;
-
- return getABITypeAlignment(Ty);
+ return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
}
unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
@@ -615,7 +708,7 @@ IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
Type *DataLayout::getIntPtrType(Type *Ty) const {
assert(Ty->isPtrOrPtrVectorTy() &&
"Expected a pointer or pointer vector type.");
- unsigned NumBits = getTypeSizeInBits(Ty->getScalarType());
+ unsigned NumBits = getPointerTypeSizeInBits(Ty);
IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
return VectorType::get(IntTy, VecTy->getNumElements());
@@ -623,17 +716,15 @@ Type *DataLayout::getIntPtrType(Type *Ty) const {
}
Type *DataLayout::getSmallestLegalIntType(LLVMContext &C, unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (Width <= LegalIntWidths[i])
- return Type::getIntNTy(C, LegalIntWidths[i]);
- return 0;
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (Width <= LegalIntWidth)
+ return Type::getIntNTy(C, LegalIntWidth);
+ return nullptr;
}
unsigned DataLayout::getLargestLegalIntTypeSize() const {
- unsigned MaxWidth = 0;
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- MaxWidth = std::max<unsigned>(MaxWidth, LegalIntWidths[i]);
- return MaxWidth;
+ auto Max = std::max_element(LegalIntWidths.begin(), LegalIntWidths.end());
+ return Max != LegalIntWidths.end() ? *Max : 0;
}
uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
@@ -703,3 +794,19 @@ unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
return Log2_32(getPreferredAlignment(GV));
}
+
+DataLayoutPass::DataLayoutPass() : ImmutablePass(ID), DL("") {
+ report_fatal_error("Bad DataLayoutPass ctor used. Tool did not specify a "
+ "DataLayout to use?");
+}
+
+DataLayoutPass::~DataLayoutPass() {}
+
+DataLayoutPass::DataLayoutPass(const DataLayout &DL)
+ : ImmutablePass(ID), DL(DL) {
+ initializeDataLayoutPassPass(*PassRegistry::getPassRegistry());
+}
+
+DataLayoutPass::DataLayoutPass(const Module *M) : ImmutablePass(ID), DL(M) {
+ initializeDataLayoutPassPass(*PassRegistry::getPassRegistry());
+}
diff --git a/contrib/llvm/lib/IR/DebugInfo.cpp b/contrib/llvm/lib/IR/DebugInfo.cpp
index 70a756f..5e39b24 100644
--- a/contrib/llvm/lib/IR/DebugInfo.cpp
+++ b/contrib/llvm/lib/IR/DebugInfo.cpp
@@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
+#include "LLVMContextImpl.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
@@ -23,9 +24,9 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace llvm::dwarf;
@@ -45,14 +46,15 @@ bool DIDescriptor::Verify() const {
DILexicalBlockFile(DbgNode).Verify() ||
DISubrange(DbgNode).Verify() || DIEnumerator(DbgNode).Verify() ||
DIObjCProperty(DbgNode).Verify() ||
+ DIUnspecifiedParameter(DbgNode).Verify() ||
DITemplateTypeParameter(DbgNode).Verify() ||
DITemplateValueParameter(DbgNode).Verify() ||
DIImportedEntity(DbgNode).Verify());
}
static Value *getField(const MDNode *DbgNode, unsigned Elt) {
- if (DbgNode == 0 || Elt >= DbgNode->getNumOperands())
- return 0;
+ if (!DbgNode || Elt >= DbgNode->getNumOperands())
+ return nullptr;
return DbgNode->getOperand(Elt);
}
@@ -71,7 +73,7 @@ StringRef DIDescriptor::getStringField(unsigned Elt) const {
}
uint64_t DIDescriptor::getUInt64Field(unsigned Elt) const {
- if (DbgNode == 0)
+ if (!DbgNode)
return 0;
if (Elt < DbgNode->getNumOperands())
@@ -83,7 +85,7 @@ uint64_t DIDescriptor::getUInt64Field(unsigned Elt) const {
}
int64_t DIDescriptor::getInt64Field(unsigned Elt) const {
- if (DbgNode == 0)
+ if (!DbgNode)
return 0;
if (Elt < DbgNode->getNumOperands())
@@ -100,34 +102,34 @@ DIDescriptor DIDescriptor::getDescriptorField(unsigned Elt) const {
}
GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const {
- if (DbgNode == 0)
- return 0;
+ if (!DbgNode)
+ return nullptr;
if (Elt < DbgNode->getNumOperands())
return dyn_cast_or_null<GlobalVariable>(DbgNode->getOperand(Elt));
- return 0;
+ return nullptr;
}
Constant *DIDescriptor::getConstantField(unsigned Elt) const {
- if (DbgNode == 0)
- return 0;
+ if (!DbgNode)
+ return nullptr;
if (Elt < DbgNode->getNumOperands())
return dyn_cast_or_null<Constant>(DbgNode->getOperand(Elt));
- return 0;
+ return nullptr;
}
Function *DIDescriptor::getFunctionField(unsigned Elt) const {
- if (DbgNode == 0)
- return 0;
+ if (!DbgNode)
+ return nullptr;
if (Elt < DbgNode->getNumOperands())
return dyn_cast_or_null<Function>(DbgNode->getOperand(Elt));
- return 0;
+ return nullptr;
}
void DIDescriptor::replaceFunctionField(unsigned Elt, Function *F) {
- if (DbgNode == 0)
+ if (!DbgNode)
return;
if (Elt < DbgNode->getNumOperands()) {
@@ -136,8 +138,14 @@ void DIDescriptor::replaceFunctionField(unsigned Elt, Function *F) {
}
}
-unsigned DIVariable::getNumAddrElements() const {
- return DbgNode->getNumOperands() - 8;
+uint64_t DIVariable::getAddrElement(unsigned Idx) const {
+ DIDescriptor ComplexExpr = getDescriptorField(8);
+ if (Idx < ComplexExpr->getNumOperands())
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(ComplexExpr->getOperand(Idx)))
+ return CI->getZExtValue();
+
+ assert(false && "non-existing complex address element requested");
+ return 0;
}
/// getInlinedAt - If this variable is inlined then return inline location.
@@ -333,7 +341,7 @@ unsigned DIArray::getNumElements() const {
/// replaceAllUsesWith - Replace all uses of the MDNode used by this
/// type with the one in the passed descriptor.
-void DIType::replaceAllUsesWith(DIDescriptor &D) {
+void DIType::replaceAllUsesWith(LLVMContext &VMContext, DIDescriptor D) {
assert(DbgNode && "Trying to replace an unverified type!");
@@ -342,13 +350,19 @@ void DIType::replaceAllUsesWith(DIDescriptor &D) {
// which, due to uniquing, has merged with the source. We shield clients from
// this detail by allowing a value to be replaced with replaceAllUsesWith()
// itself.
- if (DbgNode != D) {
- MDNode *Node = const_cast<MDNode *>(DbgNode);
- const MDNode *DN = D;
- const Value *V = cast_or_null<Value>(DN);
- Node->replaceAllUsesWith(const_cast<Value *>(V));
- MDNode::deleteTemporary(Node);
+ const MDNode *DN = D;
+ if (DbgNode == DN) {
+ SmallVector<Value*, 10> Ops(DbgNode->getNumOperands());
+ for (size_t i = 0; i != Ops.size(); ++i)
+ Ops[i] = DbgNode->getOperand(i);
+ DN = MDNode::get(VMContext, Ops);
}
+
+ MDNode *Node = const_cast<MDNode *>(DbgNode);
+ const Value *V = cast_or_null<Value>(DN);
+ Node->replaceAllUsesWith(const_cast<Value *>(V));
+ MDNode::deleteTemporary(Node);
+ DbgNode = D;
}
/// replaceAllUsesWith - Replace all uses of the MDNode used by this
@@ -356,19 +370,12 @@ void DIType::replaceAllUsesWith(DIDescriptor &D) {
void DIType::replaceAllUsesWith(MDNode *D) {
assert(DbgNode && "Trying to replace an unverified type!");
-
- // Since we use a TrackingVH for the node, its easy for clients to manufacture
- // legitimate situations where they want to replaceAllUsesWith() on something
- // which, due to uniquing, has merged with the source. We shield clients from
- // this detail by allowing a value to be replaced with replaceAllUsesWith()
- // itself.
- if (DbgNode != D) {
- MDNode *Node = const_cast<MDNode *>(DbgNode);
- const MDNode *DN = D;
- const Value *V = cast_or_null<Value>(DN);
- Node->replaceAllUsesWith(const_cast<Value *>(V));
- MDNode::deleteTemporary(Node);
- }
+ assert(DbgNode != D && "This replacement should always happen");
+ MDNode *Node = const_cast<MDNode *>(DbgNode);
+ const MDNode *DN = D;
+ const Value *V = cast_or_null<Value>(DN);
+ Node->replaceAllUsesWith(const_cast<Value *>(V));
+ MDNode::deleteTemporary(Node);
}
/// Verify - Verify that a compile unit is well formed.
@@ -381,7 +388,7 @@ bool DICompileUnit::Verify() const {
if (getFilename().empty())
return false;
- return DbgNode->getNumOperands() == 13;
+ return DbgNode->getNumOperands() == 14;
}
/// Verify - Verify that an ObjC property is well formed.
@@ -427,8 +434,10 @@ static bool fieldIsTypeRef(const MDNode *DbgNode, unsigned Elt) {
/// Check if a value can be a ScopeRef.
static bool isScopeRef(const Value *Val) {
return !Val ||
- (isa<MDString>(Val) && !cast<MDString>(Val)->getString().empty()) ||
- (isa<MDNode>(Val) && DIScope(cast<MDNode>(Val)).isScope());
+ (isa<MDString>(Val) && !cast<MDString>(Val)->getString().empty()) ||
+ // Not checking for Val->isScope() here, because it would work
+ // only for lexical scopes and not all subclasses of DIScope.
+ isa<MDNode>(Val);
}
/// Check if a field at position Elt of a MDNode can be a ScopeRef.
@@ -461,14 +470,13 @@ bool DIType::Verify() const {
// DIType is abstract, it should be a BasicType, a DerivedType or
// a CompositeType.
if (isBasicType())
- DIBasicType(DbgNode).Verify();
+ return DIBasicType(DbgNode).Verify();
else if (isCompositeType())
- DICompositeType(DbgNode).Verify();
+ return DICompositeType(DbgNode).Verify();
else if (isDerivedType())
- DIDerivedType(DbgNode).Verify();
+ return DIDerivedType(DbgNode).Verify();
else
return false;
- return true;
}
/// Verify - Verify that a basic type descriptor is well formed.
@@ -505,6 +513,10 @@ bool DICompositeType::Verify() const {
if (!fieldIsMDString(DbgNode, 14))
return false;
+ // A subroutine type can't be both & and &&.
+ if (isLValueReference() && isRValueReference())
+ return false;
+
return DbgNode->getNumOperands() == 15;
}
@@ -521,6 +533,11 @@ bool DISubprogram::Verify() const {
// Containing type @ field 12.
if (!fieldIsTypeRef(DbgNode, 12))
return false;
+
+ // A subprogram can't be both & and &&.
+ if (isLValueReference() && isRValueReference())
+ return false;
+
return DbgNode->getNumOperands() == 20;
}
@@ -531,10 +548,11 @@ bool DIGlobalVariable::Verify() const {
if (getDisplayName().empty())
return false;
- // Make sure context @ field 2 and type @ field 8 are MDNodes.
+ // Make sure context @ field 2 is an MDNode.
if (!fieldIsMDNode(DbgNode, 2))
return false;
- if (!fieldIsMDNode(DbgNode, 8))
+ // Make sure that type @ field 8 is a DITypeRef.
+ if (!fieldIsTypeRef(DbgNode, 8))
return false;
// Make sure StaticDataMemberDeclaration @ field 12 is MDNode.
if (!fieldIsMDNode(DbgNode, 12))
@@ -548,12 +566,19 @@ bool DIVariable::Verify() const {
if (!isVariable())
return false;
- // Make sure context @ field 1 and type @ field 5 are MDNodes.
+ // Make sure context @ field 1 is an MDNode.
if (!fieldIsMDNode(DbgNode, 1))
return false;
- if (!fieldIsMDNode(DbgNode, 5))
+ // Make sure that type @ field 5 is a DITypeRef.
+ if (!fieldIsTypeRef(DbgNode, 5))
return false;
- return DbgNode->getNumOperands() >= 8;
+
+ // Variable without a complex expression.
+ if (DbgNode->getNumOperands() == 8)
+ return true;
+
+ // Make sure the complex expression is an MDNode.
+ return (DbgNode->getNumOperands() == 9 && fieldIsMDNode(DbgNode, 8));
}
/// Verify - Verify that a location descriptor is well formed.
@@ -591,7 +616,7 @@ bool DISubrange::Verify() const {
/// \brief Verify that the lexical block descriptor is well formed.
bool DILexicalBlock::Verify() const {
- return isLexicalBlock() && DbgNode->getNumOperands() == 6;
+ return isLexicalBlock() && DbgNode->getNumOperands() == 7;
}
/// \brief Verify that the file-scoped lexical block descriptor is well formed.
@@ -599,6 +624,11 @@ bool DILexicalBlockFile::Verify() const {
return isLexicalBlockFile() && DbgNode->getNumOperands() == 3;
}
+/// \brief Verify that an unspecified parameter descriptor is well formed.
+bool DIUnspecifiedParameter::Verify() const {
+ return isUnspecifiedParameter() && DbgNode->getNumOperands() == 1;
+}
+
/// \brief Verify that the template type parameter descriptor is well formed.
bool DITemplateTypeParameter::Verify() const {
return isTemplateTypeParameter() && DbgNode->getNumOperands() == 7;
@@ -658,19 +688,6 @@ void DICompositeType::setTypeArray(DIArray Elements, DIArray TParams) {
DbgNode = N;
}
-void DICompositeType::addMember(DIDescriptor D) {
- SmallVector<llvm::Value *, 16> M;
- DIArray OrigM = getTypeArray();
- unsigned Elements = OrigM.getNumElements();
- if (Elements == 1 && !OrigM.getElement(0))
- Elements = 0;
- M.reserve(Elements + 1);
- for (unsigned i = 0; i != Elements; ++i)
- M.push_back(OrigM.getElement(i));
- M.push_back(D);
- setTypeArray(DIArray(MDNode::get(DbgNode->getContext(), M)));
-}
-
/// Generate a reference to this DIType. Uses the type identifier instead
/// of the actual MDNode if possible, to help type uniquing.
DIScopeRef DIScope::getRef() const {
@@ -753,7 +770,7 @@ DIScopeRef DIScope::getContext() const {
return DIScopeRef(DINameSpace(DbgNode).getContext());
assert((isFile() || isCompileUnit()) && "Unhandled type of scope.");
- return DIScopeRef(NULL);
+ return DIScopeRef(nullptr);
}
// If the scope node has a name, return that, else return an empty string.
@@ -817,6 +834,29 @@ DIArray DICompileUnit::getImportedEntities() const {
return DIArray(getNodeField(DbgNode, 11));
}
+/// copyWithNewScope - Return a copy of this location, replacing the
+/// current scope with the given one.
+DILocation DILocation::copyWithNewScope(LLVMContext &Ctx,
+ DILexicalBlock NewScope) {
+ SmallVector<Value *, 10> Elts;
+ assert(Verify());
+ for (unsigned I = 0; I < DbgNode->getNumOperands(); ++I) {
+ if (I != 2)
+ Elts.push_back(DbgNode->getOperand(I));
+ else
+ Elts.push_back(NewScope);
+ }
+ MDNode *NewDIL = MDNode::get(Ctx, Elts);
+ return DILocation(NewDIL);
+}
+
+/// computeNewDiscriminator - Generate a new discriminator value for this
+/// file and line location.
+unsigned DILocation::computeNewDiscriminator(LLVMContext &Ctx) {
+ std::pair<const char *, unsigned> Key(getFilename().data(), getLineNumber());
+ return ++Ctx.pImpl->DiscriminatorTable[Key];
+}
+
/// fixupSubprogramName - Replace contains special characters used
/// in a typical Objective-C names with '.' in a given string.
static void fixupSubprogramName(DISubprogram Fn, SmallVectorImpl<char> &Out) {
@@ -974,7 +1014,7 @@ void DebugInfoFinder::processModule(const Module &M) {
DIGlobalVariable DIG(GVs.getElement(i));
if (addGlobalVariable(DIG)) {
processScope(DIG.getContext());
- processType(DIG.getType());
+ processType(DIG.getType().resolve(TypeIdentifierMap));
}
}
DIArray SPs = CU.getSubprograms();
@@ -989,7 +1029,7 @@ void DebugInfoFinder::processModule(const Module &M) {
DIArray Imports = CU.getImportedEntities();
for (unsigned i = 0, e = Imports.getNumElements(); i != e; ++i) {
DIImportedEntity Import = DIImportedEntity(Imports.getElement(i));
- DIDescriptor Entity = Import.getEntity();
+ DIDescriptor Entity = Import.getEntity().resolve(TypeIdentifierMap);
if (Entity.isType())
processType(DIType(Entity));
else if (Entity.isSubprogram())
@@ -1060,18 +1100,6 @@ void DebugInfoFinder::processScope(DIScope Scope) {
}
}
-/// processLexicalBlock
-void DebugInfoFinder::processLexicalBlock(DILexicalBlock LB) {
- DIScope Context = LB.getContext();
- if (Context.isLexicalBlock())
- return processLexicalBlock(DILexicalBlock(Context));
- else if (Context.isLexicalBlockFile()) {
- DILexicalBlockFile DBF = DILexicalBlockFile(Context);
- return processLexicalBlock(DILexicalBlock(DBF.getScope()));
- } else
- return processSubprogram(DISubprogram(Context));
-}
-
/// processSubprogram - Process DISubprogram.
void DebugInfoFinder::processSubprogram(DISubprogram SP) {
if (!addSubprogram(SP))
@@ -1108,7 +1136,7 @@ void DebugInfoFinder::processDeclare(const Module &M,
if (!NodesSeen.insert(DV))
return;
processScope(DIVariable(N).getContext());
- processType(DIVariable(N).getType());
+ processType(DIVariable(N).getType().resolve(TypeIdentifierMap));
}
void DebugInfoFinder::processValue(const Module &M, const DbgValueInst *DVI) {
@@ -1124,7 +1152,7 @@ void DebugInfoFinder::processValue(const Module &M, const DbgValueInst *DVI) {
if (!NodesSeen.insert(DV))
return;
processScope(DIVariable(N).getContext());
- processType(DIVariable(N).getType());
+ processType(DIVariable(N).getType().resolve(TypeIdentifierMap));
}
/// addType - Add type into Tys.
@@ -1298,6 +1326,12 @@ void DIType::printInternal(raw_ostream &OS) const {
OS << " [vector]";
if (isStaticMember())
OS << " [static]";
+
+ if (isLValueReference())
+ OS << " [reference]";
+
+ if (isRValueReference())
+ OS << " [rvalue reference]";
}
void DIDerivedType::printInternal(raw_ostream &OS) const {
@@ -1337,6 +1371,12 @@ void DISubprogram::printInternal(raw_ostream &OS) const {
else if (isProtected())
OS << " [protected]";
+ if (isLValueReference())
+ OS << " [reference]";
+
+ if (isRValueReference())
+ OS << " [rvalue reference]";
+
StringRef Res = getName();
if (!Res.empty())
OS << " [" << Res << ']';
@@ -1439,7 +1479,7 @@ bool llvm::StripDebugInfo(Module &M) {
// the module.
if (Function *Declare = M.getFunction("llvm.dbg.declare")) {
while (!Declare->use_empty()) {
- CallInst *CI = cast<CallInst>(Declare->use_back());
+ CallInst *CI = cast<CallInst>(Declare->user_back());
CI->eraseFromParent();
}
Declare->eraseFromParent();
@@ -1448,7 +1488,7 @@ bool llvm::StripDebugInfo(Module &M) {
if (Function *DbgVal = M.getFunction("llvm.dbg.value")) {
while (!DbgVal->use_empty()) {
- CallInst *CI = cast<CallInst>(DbgVal->use_back());
+ CallInst *CI = cast<CallInst>(DbgVal->user_back());
CI->eraseFromParent();
}
DbgVal->eraseFromParent();
@@ -1486,3 +1526,23 @@ unsigned llvm::getDebugMetadataVersionFromModule(const Module &M) {
return 0;
return cast<ConstantInt>(Val)->getZExtValue();
}
+
+llvm::DenseMap<const llvm::Function *, llvm::DISubprogram>
+llvm::makeSubprogramMap(const Module &M) {
+ DenseMap<const Function *, DISubprogram> R;
+
+ NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu");
+ if (!CU_Nodes)
+ return R;
+
+ for (MDNode *N : CU_Nodes->operands()) {
+ DICompileUnit CUNode(N);
+ DIArray SPs = CUNode.getSubprograms();
+ for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
+ DISubprogram SP(SPs.getElement(i));
+ if (Function *F = SP.getFunction())
+ R.insert(std::make_pair(F, SP));
+ }
+ }
+ return R;
+}
diff --git a/contrib/llvm/lib/IR/DebugLoc.cpp b/contrib/llvm/lib/IR/DebugLoc.cpp
index c57b5a3..e8bdcce 100644
--- a/contrib/llvm/lib/IR/DebugLoc.cpp
+++ b/contrib/llvm/lib/IR/DebugLoc.cpp
@@ -7,10 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/DebugLoc.h"
+#include "llvm/IR/DebugLoc.h"
#include "LLVMContextImpl.h"
#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -18,7 +18,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
MDNode *DebugLoc::getScope(const LLVMContext &Ctx) const {
- if (ScopeIdx == 0) return 0;
+ if (ScopeIdx == 0) return nullptr;
if (ScopeIdx > 0) {
// Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at
@@ -37,7 +37,7 @@ MDNode *DebugLoc::getScope(const LLVMContext &Ctx) const {
MDNode *DebugLoc::getInlinedAt(const LLVMContext &Ctx) const {
// Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at
// position specified. Zero is invalid.
- if (ScopeIdx >= 0) return 0;
+ if (ScopeIdx >= 0) return nullptr;
// Otherwise, the index is in the ScopeInlinedAtRecords array.
assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() &&
@@ -49,7 +49,7 @@ MDNode *DebugLoc::getInlinedAt(const LLVMContext &Ctx) const {
void DebugLoc::getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA,
const LLVMContext &Ctx) const {
if (ScopeIdx == 0) {
- Scope = IA = 0;
+ Scope = IA = nullptr;
return;
}
@@ -59,7 +59,7 @@ void DebugLoc::getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA,
assert(unsigned(ScopeIdx) <= Ctx.pImpl->ScopeRecords.size() &&
"Invalid ScopeIdx!");
Scope = Ctx.pImpl->ScopeRecords[ScopeIdx-1].get();
- IA = 0;
+ IA = nullptr;
return;
}
@@ -70,14 +70,34 @@ void DebugLoc::getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA,
IA = Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].second.get();
}
+MDNode *DebugLoc::getScopeNode(const LLVMContext &Ctx) const {
+ if (MDNode *InlinedAt = getInlinedAt(Ctx))
+ return DebugLoc::getFromDILocation(InlinedAt).getScopeNode(Ctx);
+ return getScope(Ctx);
+}
+
+DebugLoc DebugLoc::getFnDebugLoc(const LLVMContext &Ctx) const {
+ const MDNode *Scope = getScopeNode(Ctx);
+ DISubprogram SP = getDISubprogram(Scope);
+ if (SP.isSubprogram()) {
+ // Check for number of operands since the compatibility is
+ // cheap here. FIXME: Name the magic constant.
+ if (SP->getNumOperands() > 19)
+ return DebugLoc::get(SP.getScopeLineNumber(), 0, SP);
+ else
+ return DebugLoc::get(SP.getLineNumber(), 0, SP);
+ }
+
+ return DebugLoc();
+}
DebugLoc DebugLoc::get(unsigned Line, unsigned Col,
MDNode *Scope, MDNode *InlinedAt) {
DebugLoc Result;
// If no scope is available, this is an unknown location.
- if (Scope == 0) return Result;
-
+ if (!Scope) return Result;
+
// Saturate line and col to "unknown".
if (Col > 255) Col = 0;
if (Line >= (1 << 24)) Line = 0;
@@ -86,7 +106,7 @@ DebugLoc DebugLoc::get(unsigned Line, unsigned Col,
LLVMContext &Ctx = Scope->getContext();
// If there is no inlined-at location, use the ScopeRecords array.
- if (InlinedAt == 0)
+ if (!InlinedAt)
Result.ScopeIdx = Ctx.pImpl->getOrAddScopeRecordIdxEntry(Scope, 0);
else
Result.ScopeIdx = Ctx.pImpl->getOrAddScopeInlinedAtIdxEntry(Scope,
@@ -96,9 +116,9 @@ DebugLoc DebugLoc::get(unsigned Line, unsigned Col,
}
/// getAsMDNode - This method converts the compressed DebugLoc node into a
-/// DILocation compatible MDNode.
+/// DILocation-compatible MDNode.
MDNode *DebugLoc::getAsMDNode(const LLVMContext &Ctx) const {
- if (isUnknown()) return 0;
+ if (isUnknown()) return nullptr;
MDNode *Scope, *IA;
getScopeAndInlinedAt(Scope, IA, Ctx);
@@ -117,7 +137,7 @@ MDNode *DebugLoc::getAsMDNode(const LLVMContext &Ctx) const {
DebugLoc DebugLoc::getFromDILocation(MDNode *N) {
DILocation Loc(N);
MDNode *Scope = Loc.getScope();
- if (Scope == 0) return DebugLoc();
+ if (!Scope) return DebugLoc();
return get(Loc.getLineNumber(), Loc.getColumnNumber(), Scope,
Loc.getOrigLocation());
}
@@ -126,8 +146,9 @@ DebugLoc DebugLoc::getFromDILocation(MDNode *N) {
DebugLoc DebugLoc::getFromDILexicalBlock(MDNode *N) {
DILexicalBlock LexBlock(N);
MDNode *Scope = LexBlock.getContext();
- if (Scope == 0) return DebugLoc();
- return get(LexBlock.getLineNumber(), LexBlock.getColumnNumber(), Scope, NULL);
+ if (!Scope) return DebugLoc();
+ return get(LexBlock.getLineNumber(), LexBlock.getColumnNumber(), Scope,
+ nullptr);
}
void DebugLoc::dump(const LLVMContext &Ctx) const {
@@ -146,6 +167,28 @@ void DebugLoc::dump(const LLVMContext &Ctx) const {
#endif
}
+void DebugLoc::print(const LLVMContext &Ctx, raw_ostream &OS) const {
+ if (!isUnknown()) {
+ // Print source line info.
+ DIScope Scope(getScope(Ctx));
+ assert((!Scope || Scope.isScope()) &&
+ "Scope of a DebugLoc should be null or a DIScope.");
+ if (Scope)
+ OS << Scope.getFilename();
+ else
+ OS << "<unknown>";
+ OS << ':' << getLine();
+ if (getCol() != 0)
+ OS << ':' << getCol();
+ DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(getInlinedAt(Ctx));
+ if (!InlinedAtDL.isUnknown()) {
+ OS << " @[ ";
+ InlinedAtDL.print(Ctx, OS);
+ OS << " ]";
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// DenseMap specialization
//===----------------------------------------------------------------------===//
@@ -214,7 +257,7 @@ void DebugRecVH::deleted() {
// If this is a non-canonical reference, just drop the value to null, we know
// it doesn't have a map entry.
if (Idx == 0) {
- setValPtr(0);
+ setValPtr(nullptr);
return;
}
@@ -225,7 +268,7 @@ void DebugRecVH::deleted() {
assert(Ctx->ScopeRecordIdx[Cur] == Idx && "Mapping out of date!");
Ctx->ScopeRecordIdx.erase(Cur);
// Reset this VH to null and we're done.
- setValPtr(0);
+ setValPtr(nullptr);
Idx = 0;
return;
}
@@ -239,7 +282,7 @@ void DebugRecVH::deleted() {
MDNode *OldScope = Entry.first.get();
MDNode *OldInlinedAt = Entry.second.get();
- assert(OldScope != 0 && OldInlinedAt != 0 &&
+ assert(OldScope && OldInlinedAt &&
"Entry should be non-canonical if either val dropped to null");
// Otherwise, we do have an entry in it, nuke it and we're done.
@@ -249,7 +292,7 @@ void DebugRecVH::deleted() {
// Reset this VH to null. Drop both 'Idx' values to null to indicate that
// we're in non-canonical form now.
- setValPtr(0);
+ setValPtr(nullptr);
Entry.first.Idx = Entry.second.Idx = 0;
}
@@ -257,8 +300,8 @@ void DebugRecVH::allUsesReplacedWith(Value *NewVa) {
// If being replaced with a non-mdnode value (e.g. undef) handle this as if
// the mdnode got deleted.
MDNode *NewVal = dyn_cast<MDNode>(NewVa);
- if (NewVal == 0) return deleted();
-
+ if (!NewVal) return deleted();
+
// If this is a non-canonical reference, just change it, we know it already
// doesn't have a map entry.
if (Idx == 0) {
@@ -293,7 +336,7 @@ void DebugRecVH::allUsesReplacedWith(Value *NewVa) {
MDNode *OldScope = Entry.first.get();
MDNode *OldInlinedAt = Entry.second.get();
- assert(OldScope != 0 && OldInlinedAt != 0 &&
+ assert(OldScope && OldInlinedAt &&
"Entry should be non-canonical if either val dropped to null");
// Otherwise, we do have an entry in it, nuke it and we're done.
diff --git a/contrib/llvm/lib/IR/DiagnosticInfo.cpp b/contrib/llvm/lib/IR/DiagnosticInfo.cpp
new file mode 100644
index 0000000..37cce2b
--- /dev/null
+++ b/contrib/llvm/lib/IR/DiagnosticInfo.cpp
@@ -0,0 +1,208 @@
+//===- llvm/Support/DiagnosticInfo.cpp - Diagnostic Definitions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the different classes involved in low level diagnostics.
+//
+// Diagnostics reporting is still done as part of the LLVMContext.
+//===----------------------------------------------------------------------===//
+
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Regex.h"
+#include <string>
+
+using namespace llvm;
+
+namespace {
+
+/// \brief Regular expression corresponding to the value given in one of the
+/// -pass-remarks* command line flags. Passes whose name matches this regexp
+/// will emit a diagnostic when calling the associated diagnostic function
+/// (emitOptimizationRemark, emitOptimizationRemarkMissed or
+/// emitOptimizationRemarkAnalysis).
+struct PassRemarksOpt {
+ std::shared_ptr<Regex> Pattern;
+
+ void operator=(const std::string &Val) {
+ // Create a regexp object to match pass names for emitOptimizationRemark.
+ if (!Val.empty()) {
+ Pattern = std::make_shared<Regex>(Val);
+ std::string RegexError;
+ if (!Pattern->isValid(RegexError))
+ report_fatal_error("Invalid regular expression '" + Val +
+ "' in -pass-remarks: " + RegexError,
+ false);
+ }
+ };
+};
+
+static PassRemarksOpt PassRemarksOptLoc;
+static PassRemarksOpt PassRemarksMissedOptLoc;
+static PassRemarksOpt PassRemarksAnalysisOptLoc;
+
+// -pass-remarks
+// Command line flag to enable emitOptimizationRemark()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>>
+PassRemarks("pass-remarks", cl::value_desc("pattern"),
+ cl::desc("Enable optimization remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksOptLoc), cl::ValueRequired,
+ cl::ZeroOrMore);
+
+// -pass-remarks-missed
+// Command line flag to enable emitOptimizationRemarkMissed()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>> PassRemarksMissed(
+ "pass-remarks-missed", cl::value_desc("pattern"),
+ cl::desc("Enable missed optimization remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksMissedOptLoc), cl::ValueRequired,
+ cl::ZeroOrMore);
+
+// -pass-remarks-analysis
+// Command line flag to enable emitOptimizationRemarkAnalysis()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>>
+PassRemarksAnalysis(
+ "pass-remarks-analysis", cl::value_desc("pattern"),
+ cl::desc(
+ "Enable optimization analysis remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired,
+ cl::ZeroOrMore);
+}
+
+int llvm::getNextAvailablePluginDiagnosticKind() {
+ static sys::cas_flag PluginKindID = DK_FirstPluginKind;
+ return (int)sys::AtomicIncrement(&PluginKindID);
+}
+
+DiagnosticInfoInlineAsm::DiagnosticInfoInlineAsm(const Instruction &I,
+ const Twine &MsgStr,
+ DiagnosticSeverity Severity)
+ : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr),
+ Instr(&I) {
+ if (const MDNode *SrcLoc = I.getMetadata("srcloc")) {
+ if (SrcLoc->getNumOperands() != 0)
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(SrcLoc->getOperand(0)))
+ LocCookie = CI->getZExtValue();
+ }
+}
+
+void DiagnosticInfoInlineAsm::print(DiagnosticPrinter &DP) const {
+ DP << getMsgStr();
+ if (getLocCookie())
+ DP << " at line " << getLocCookie();
+}
+
+void DiagnosticInfoStackSize::print(DiagnosticPrinter &DP) const {
+ DP << "stack size limit exceeded (" << getStackSize() << ") in "
+ << getFunction();
+}
+
+void DiagnosticInfoDebugMetadataVersion::print(DiagnosticPrinter &DP) const {
+ DP << "ignoring debug info with an invalid version (" << getMetadataVersion()
+ << ") in " << getModule();
+}
+
+void DiagnosticInfoSampleProfile::print(DiagnosticPrinter &DP) const {
+ if (getFileName() && getLineNum() > 0)
+ DP << getFileName() << ":" << getLineNum() << ": ";
+ else if (getFileName())
+ DP << getFileName() << ": ";
+ DP << getMsg();
+}
+
+bool DiagnosticInfoOptimizationBase::isLocationAvailable() const {
+ return getDebugLoc().isUnknown() == false;
+}
+
+void DiagnosticInfoOptimizationBase::getLocation(StringRef *Filename,
+ unsigned *Line,
+ unsigned *Column) const {
+ DILocation DIL(getDebugLoc().getAsMDNode(getFunction().getContext()));
+ *Filename = DIL.getFilename();
+ *Line = DIL.getLineNumber();
+ *Column = DIL.getColumnNumber();
+}
+
+const std::string DiagnosticInfoOptimizationBase::getLocationStr() const {
+ StringRef Filename("<unknown>");
+ unsigned Line = 0;
+ unsigned Column = 0;
+ if (isLocationAvailable())
+ getLocation(&Filename, &Line, &Column);
+ return Twine(Filename + ":" + Twine(Line) + ":" + Twine(Column)).str();
+}
+
+void DiagnosticInfoOptimizationBase::print(DiagnosticPrinter &DP) const {
+ DP << getLocationStr() << ": " << getMsg();
+}
+
+bool DiagnosticInfoOptimizationRemark::isEnabled() const {
+ return PassRemarksOptLoc.Pattern &&
+ PassRemarksOptLoc.Pattern->match(getPassName());
+}
+
+bool DiagnosticInfoOptimizationRemarkMissed::isEnabled() const {
+ return PassRemarksMissedOptLoc.Pattern &&
+ PassRemarksMissedOptLoc.Pattern->match(getPassName());
+}
+
+bool DiagnosticInfoOptimizationRemarkAnalysis::isEnabled() const {
+ return PassRemarksAnalysisOptLoc.Pattern &&
+ PassRemarksAnalysisOptLoc.Pattern->match(getPassName());
+}
+
+void llvm::emitOptimizationRemark(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationRemark(PassName, Fn, DLoc, Msg));
+}
+
+void llvm::emitOptimizationRemarkMissed(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationRemarkMissed(PassName, Fn, DLoc, Msg));
+}
+
+void llvm::emitOptimizationRemarkAnalysis(LLVMContext &Ctx,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(
+ DiagnosticInfoOptimizationRemarkAnalysis(PassName, Fn, DLoc, Msg));
+}
+
+bool DiagnosticInfoOptimizationFailure::isEnabled() const {
+ // Only print warnings.
+ return getSeverity() == DS_Warning;
+}
+
+void llvm::emitLoopVectorizeWarning(LLVMContext &Ctx, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationFailure(
+ Fn, DLoc, Twine("loop not vectorized: " + Msg)));
+}
+
+void llvm::emitLoopInterleaveWarning(LLVMContext &Ctx, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationFailure(
+ Fn, DLoc, Twine("loop not interleaved: " + Msg)));
+}
diff --git a/contrib/llvm/lib/IR/DiagnosticPrinter.cpp b/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
new file mode 100644
index 0000000..5e16026
--- /dev/null
+++ b/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
@@ -0,0 +1,107 @@
+//===- llvm/Support/DiagnosticInfo.cpp - Diagnostic Definitions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the a diagnostic printer relying on raw_ostream.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(signed char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(StringRef Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const char *Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(
+ const std::string &Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned long N) {
+ Stream << N;
+ return *this;
+}
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(
+ unsigned long long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const void *P) {
+ Stream << P;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned int N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(int N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(double N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Twine &Str) {
+ Str.print(Stream);
+ return *this;
+}
+
+// IR related types.
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Value &V) {
+ Stream << V.getName();
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Module &M) {
+ Stream << M.getModuleIdentifier();
+ return *this;
+}
diff --git a/contrib/llvm/lib/IR/Dominators.cpp b/contrib/llvm/lib/IR/Dominators.cpp
index a1160cd..d6649d6 100644
--- a/contrib/llvm/lib/IR/Dominators.cpp
+++ b/contrib/llvm/lib/IR/Dominators.cpp
@@ -14,17 +14,16 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/DominatorInternals.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
@@ -57,41 +56,23 @@ bool BasicBlockEdge::isSingleEdge() const {
//===----------------------------------------------------------------------===//
//
// Provide public access to DominatorTree information. Implementation details
-// can be found in DominatorInternals.h.
+// can be found in Dominators.h, GenericDomTree.h, and
+// GenericDomTreeConstruction.h.
//
//===----------------------------------------------------------------------===//
TEMPLATE_INSTANTIATION(class llvm::DomTreeNodeBase<BasicBlock>);
TEMPLATE_INSTANTIATION(class llvm::DominatorTreeBase<BasicBlock>);
-char DominatorTree::ID = 0;
-INITIALIZE_PASS(DominatorTree, "domtree",
- "Dominator Tree Construction", true, true)
-
-bool DominatorTree::runOnFunction(Function &F) {
- DT->recalculate(F);
- return false;
-}
-
-void DominatorTree::verifyAnalysis() const {
- if (!VerifyDomInfo) return;
-
- Function &F = *getRoot()->getParent();
-
- DominatorTree OtherDT;
- OtherDT.getBase().recalculate(F);
- if (compare(OtherDT)) {
- errs() << "DominatorTree is not up to date!\nComputed:\n";
- print(errs());
- errs() << "\nActual:\n";
- OtherDT.print(errs());
- abort();
- }
-}
-
-void DominatorTree::print(raw_ostream &OS, const Module *) const {
- DT->print(OS);
-}
+#define LLVM_COMMA ,
+TEMPLATE_INSTANTIATION(void llvm::Calculate<Function LLVM_COMMA BasicBlock *>(
+ DominatorTreeBase<GraphTraits<BasicBlock *>::NodeType> &DT LLVM_COMMA
+ Function &F));
+TEMPLATE_INSTANTIATION(
+ void llvm::Calculate<Function LLVM_COMMA Inverse<BasicBlock *> >(
+ DominatorTreeBase<GraphTraits<Inverse<BasicBlock *> >::NodeType> &DT
+ LLVM_COMMA Function &F));
+#undef LLVM_COMMA
// dominates - Return true if Def dominates a use in User. This performs
// the special checks necessary if Def and User are in the same basic block.
@@ -210,8 +191,7 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE,
return true;
}
-bool DominatorTree::dominates(const BasicBlockEdge &BBE,
- const Use &U) const {
+bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const {
// Assert that we have a single edge. We could handle them by simply
// returning false, but since isSingleEdge is linear on the number of
// edges, the callers can normally handle them more efficiently.
@@ -234,8 +214,7 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE,
return dominates(BBE, UseBB);
}
-bool DominatorTree::dominates(const Instruction *Def,
- const Use &U) const {
+bool DominatorTree::dominates(const Instruction *Def, const Use &U) const {
Instruction *UserInst = cast<Instruction>(U.getUser());
const BasicBlock *DefBB = Def->getParent();
@@ -300,3 +279,44 @@ bool DominatorTree::isReachableFromEntry(const Use &U) const {
// Everything else uses their operands in their own block.
return isReachableFromEntry(I->getParent());
}
+
+void DominatorTree::verifyDomTree() const {
+ if (!VerifyDomInfo)
+ return;
+
+ Function &F = *getRoot()->getParent();
+
+ DominatorTree OtherDT;
+ OtherDT.recalculate(F);
+ if (compare(OtherDT)) {
+ errs() << "DominatorTree is not up to date!\nComputed:\n";
+ print(errs());
+ errs() << "\nActual:\n";
+ OtherDT.print(errs());
+ abort();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTreeWrapperPass Implementation
+//===----------------------------------------------------------------------===//
+//
+// The implementation details of the wrapper pass that holds a DominatorTree.
+//
+//===----------------------------------------------------------------------===//
+
+char DominatorTreeWrapperPass::ID = 0;
+INITIALIZE_PASS(DominatorTreeWrapperPass, "domtree",
+ "Dominator Tree Construction", true, true)
+
+bool DominatorTreeWrapperPass::runOnFunction(Function &F) {
+ DT.recalculate(F);
+ return false;
+}
+
+void DominatorTreeWrapperPass::verifyAnalysis() const { DT.verifyDomTree(); }
+
+void DominatorTreeWrapperPass::print(raw_ostream &OS, const Module *) const {
+ DT.print(OS);
+}
+
diff --git a/contrib/llvm/lib/IR/Function.cpp b/contrib/llvm/lib/IR/Function.cpp
index e8a2402..de59b26 100644
--- a/contrib/llvm/lib/IR/Function.cpp
+++ b/contrib/llvm/lib/IR/Function.cpp
@@ -18,13 +18,13 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/RWMutex.h"
#include "llvm/Support/StringPool.h"
@@ -44,7 +44,7 @@ void Argument::anchor() { }
Argument::Argument(Type *Ty, const Twine &Name, Function *Par)
: Value(Ty, Value::ArgumentVal) {
- Parent = 0;
+ Parent = nullptr;
// Make sure that we get added to a function
LeakDetector::addGarbageObject(this);
@@ -76,6 +76,20 @@ unsigned Argument::getArgNo() const {
return ArgIdx;
}
+/// hasNonNullAttr - Return true if this argument has the nonnull attribute on
+/// it in its containing function. Also returns true if at least one byte is
+/// known to be dereferenceable and the pointer is in addrspace(0).
+bool Argument::hasNonNullAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ if (getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::NonNull))
+ return true;
+ else if (getDereferenceableBytes() > 0 &&
+ getType()->getPointerAddressSpace() == 0)
+ return true;
+ return false;
+}
+
/// hasByValAttr - Return true if this argument has the byval attribute on it
/// in its containing function.
bool Argument::hasByValAttr() const {
@@ -84,12 +98,33 @@ bool Argument::hasByValAttr() const {
hasAttribute(getArgNo()+1, Attribute::ByVal);
}
+/// \brief Return true if this argument has the inalloca attribute on it in
+/// its containing function.
+bool Argument::hasInAllocaAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::InAlloca);
+}
+
+bool Argument::hasByValOrInAllocaAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ AttributeSet Attrs = getParent()->getAttributes();
+ return Attrs.hasAttribute(getArgNo() + 1, Attribute::ByVal) ||
+ Attrs.hasAttribute(getArgNo() + 1, Attribute::InAlloca);
+}
+
unsigned Argument::getParamAlignment() const {
assert(getType()->isPointerTy() && "Only pointers have alignments");
return getParent()->getParamAlignment(getArgNo()+1);
}
+uint64_t Argument::getDereferenceableBytes() const {
+ assert(getType()->isPointerTy() &&
+ "Only pointers have dereferenceable bytes");
+ return getParent()->getDereferenceableBytes(getArgNo()+1);
+}
+
/// hasNestAttr - Return true if this argument has the nest attribute on
/// it in its containing function.
bool Argument::hasNestAttr() const {
@@ -194,8 +229,8 @@ void Function::eraseFromParent() {
Function::Function(FunctionType *Ty, LinkageTypes Linkage,
const Twine &name, Module *ParentModule)
- : GlobalValue(PointerType::getUnqual(Ty),
- Value::FunctionVal, 0, 0, Linkage, name) {
+ : GlobalObject(PointerType::getUnqual(Ty),
+ Value::FunctionVal, nullptr, 0, Linkage, name) {
assert(FunctionType::isValidReturnType(getReturnType()) &&
"invalid return type");
SymTab = new ValueSymbolTable();
@@ -278,7 +313,7 @@ void Function::dropAllReferences() {
BasicBlocks.begin()->eraseFromParent();
// Prefix data is stored in a side table.
- setPrefixData(0);
+ setPrefixData(nullptr);
}
void Function::addAttribute(unsigned i, Attribute::AttrKind attr) {
@@ -333,10 +368,10 @@ void Function::clearGC() {
GCNames->erase(this);
if (GCNames->empty()) {
delete GCNames;
- GCNames = 0;
+ GCNames = nullptr;
if (GCNamePool->empty()) {
delete GCNamePool;
- GCNamePool = 0;
+ GCNamePool = nullptr;
}
}
}
@@ -346,7 +381,7 @@ void Function::clearGC() {
/// create a Function) from the Function Src to this one.
void Function::copyAttributesFrom(const GlobalValue *Src) {
assert(isa<Function>(Src) && "Expected a Function!");
- GlobalValue::copyAttributesFrom(Src);
+ GlobalObject::copyAttributesFrom(Src);
const Function *SrcF = cast<Function>(Src);
setCallingConv(SrcF->getCallingConv());
setAttributes(SrcF->getAttributes());
@@ -357,7 +392,7 @@ void Function::copyAttributesFrom(const GlobalValue *Src) {
if (SrcF->hasPrefixData())
setPrefixData(SrcF->getPrefixData());
else
- setPrefixData(0);
+ setPrefixData(nullptr);
}
/// getIntrinsicID - This method returns the ID number of the specified
@@ -451,11 +486,12 @@ enum IIT_Info {
IIT_STRUCT3 = 20,
IIT_STRUCT4 = 21,
IIT_STRUCT5 = 22,
- IIT_EXTEND_VEC_ARG = 23,
- IIT_TRUNC_VEC_ARG = 24,
+ IIT_EXTEND_ARG = 23,
+ IIT_TRUNC_ARG = 24,
IIT_ANYPTR = 25,
IIT_V1 = 26,
- IIT_VARARG = 27
+ IIT_VARARG = 27,
+ IIT_HALF_VEC_ARG = 28
};
@@ -541,15 +577,21 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
return;
}
- case IIT_EXTEND_VEC_ARG: {
+ case IIT_EXTEND_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_TRUNC_ARG: {
unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
- OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendVecArgument,
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncArgument,
ArgInfo));
return;
}
- case IIT_TRUNC_VEC_ARG: {
+ case IIT_HALF_VEC_ARG: {
unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
- OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncVecArgument,
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::HalfVecArgument,
ArgInfo));
return;
}
@@ -641,12 +683,24 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
case IITDescriptor::Argument:
return Tys[D.getArgumentNumber()];
- case IITDescriptor::ExtendVecArgument:
- return VectorType::getExtendedElementVectorType(cast<VectorType>(
- Tys[D.getArgumentNumber()]));
+ case IITDescriptor::ExtendArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::getExtendedElementVectorType(VTy);
- case IITDescriptor::TruncVecArgument:
- return VectorType::getTruncatedElementVectorType(cast<VectorType>(
+ return IntegerType::get(Context, 2 * cast<IntegerType>(Ty)->getBitWidth());
+ }
+ case IITDescriptor::TruncArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::getTruncatedElementVectorType(VTy);
+
+ IntegerType *ITy = cast<IntegerType>(Ty);
+ assert(ITy->getBitWidth() % 2 == 0);
+ return IntegerType::get(Context, ITy->getBitWidth() / 2);
+ }
+ case IITDescriptor::HalfVecArgument:
+ return VectorType::getHalfElementsVectorType(cast<VectorType>(
Tys[D.getArgumentNumber()]));
}
llvm_unreachable("unhandled");
@@ -693,18 +747,23 @@ Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
#include "llvm/IR/Intrinsics.gen"
#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
+// This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method.
+#define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+
/// hasAddressTaken - returns true if there are any uses of this function
/// other than direct calls or invokes to it.
bool Function::hasAddressTaken(const User* *PutOffender) const {
- for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
- const User *U = *I;
- if (isa<BlockAddress>(U))
+ for (const Use &U : uses()) {
+ const User *FU = U.getUser();
+ if (isa<BlockAddress>(FU))
continue;
- if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
- return PutOffender ? (*PutOffender = U, true) : true;
- ImmutableCallSite CS(cast<Instruction>(U));
- if (!CS.isCallee(I))
- return PutOffender ? (*PutOffender = U, true) : true;
+ if (!isa<CallInst>(FU) && !isa<InvokeInst>(FU))
+ return PutOffender ? (*PutOffender = FU, true) : true;
+ ImmutableCallSite CS(cast<Instruction>(FU));
+ if (!CS.isCallee(&U))
+ return PutOffender ? (*PutOffender = FU, true) : true;
}
return false;
}
@@ -716,8 +775,8 @@ bool Function::isDefTriviallyDead() const {
return false;
// Check if the function is used by anything other than a blockaddress.
- for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I)
- if (!isa<BlockAddress>(*I))
+ for (const User *U : users())
+ if (!isa<BlockAddress>(U))
return false;
return true;
@@ -728,10 +787,8 @@ bool Function::isDefTriviallyDead() const {
bool Function::callsFunctionThatReturnsTwice() const {
for (const_inst_iterator
I = inst_begin(this), E = inst_end(this); I != E; ++I) {
- const CallInst* callInst = dyn_cast<CallInst>(&*I);
- if (!callInst)
- continue;
- if (callInst->canReturnTwice())
+ ImmutableCallSite CS(&*I);
+ if (CS && CS.hasFnAttr(Attribute::ReturnsTwice))
return true;
}
diff --git a/contrib/llvm/lib/IR/GCOV.cpp b/contrib/llvm/lib/IR/GCOV.cpp
index f0f8c7d..1667401 100644
--- a/contrib/llvm/lib/IR/GCOV.cpp
+++ b/contrib/llvm/lib/IR/GCOV.cpp
@@ -1,4 +1,4 @@
-//===- GCOVr.cpp - LLVM coverage tool -------------------------------------===//
+//===- GCOV.cpp - LLVM coverage tool --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,90 +12,95 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/Debug.h"
#include "llvm/Support/GCOV.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryObject.h"
-#include "llvm/Support/system_error.h"
+#include "llvm/Support/Path.h"
+#include <algorithm>
+#include <system_error>
using namespace llvm;
//===----------------------------------------------------------------------===//
// GCOVFile implementation.
-/// ~GCOVFile - Delete GCOVFile and its content.
-GCOVFile::~GCOVFile() {
- DeleteContainerPointers(Functions);
-}
+/// readGCNO - Read GCNO buffer.
+bool GCOVFile::readGCNO(GCOVBuffer &Buffer) {
+ if (!Buffer.readGCNOFormat()) return false;
+ if (!Buffer.readGCOVVersion(Version)) return false;
-/// isGCDAFile - Return true if Format identifies a .gcda file.
-static bool isGCDAFile(GCOV::GCOVFormat Format) {
- return Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404;
-}
+ if (!Buffer.readInt(Checksum)) return false;
+ while (true) {
+ if (!Buffer.readFunctionTag()) break;
+ auto GFun = make_unique<GCOVFunction>(*this);
+ if (!GFun->readGCNO(Buffer, Version))
+ return false;
+ Functions.push_back(std::move(GFun));
+ }
-/// isGCNOFile - Return true if Format identifies a .gcno file.
-static bool isGCNOFile(GCOV::GCOVFormat Format) {
- return Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404;
+ GCNOInitialized = true;
+ return true;
}
-/// read - Read GCOV buffer.
-bool GCOVFile::read(GCOVBuffer &Buffer) {
- GCOV::GCOVFormat Format = Buffer.readGCOVFormat();
- if (Format == GCOV::InvalidGCOV)
+/// readGCDA - Read GCDA buffer. It is required that readGCDA() can only be
+/// called after readGCNO().
+bool GCOVFile::readGCDA(GCOVBuffer &Buffer) {
+ assert(GCNOInitialized && "readGCDA() can only be called after readGCNO()");
+ if (!Buffer.readGCDAFormat()) return false;
+ GCOV::GCOVVersion GCDAVersion;
+ if (!Buffer.readGCOVVersion(GCDAVersion)) return false;
+ if (Version != GCDAVersion) {
+ errs() << "GCOV versions do not match.\n";
return false;
+ }
- if (isGCNOFile(Format)) {
- while (true) {
- if (!Buffer.readFunctionTag()) break;
- GCOVFunction *GFun = new GCOVFunction();
- if (!GFun->read(Buffer, Format))
- return false;
- Functions.push_back(GFun);
- }
+ uint32_t GCDAChecksum;
+ if (!Buffer.readInt(GCDAChecksum)) return false;
+ if (Checksum != GCDAChecksum) {
+ errs() << "File checksums do not match: " << Checksum << " != "
+ << GCDAChecksum << ".\n";
+ return false;
}
- else if (isGCDAFile(Format)) {
- for (size_t i = 0, e = Functions.size(); i < e; ++i) {
- if (!Buffer.readFunctionTag()) {
- errs() << "Unexpected number of functions.\n";
- return false;
- }
- if (!Functions[i]->read(Buffer, Format))
- return false;
- }
- if (Buffer.readObjectTag()) {
- uint32_t Length;
- uint32_t Dummy;
- if (!Buffer.readInt(Length)) return false;
- if (!Buffer.readInt(Dummy)) return false; // checksum
- if (!Buffer.readInt(Dummy)) return false; // num
- if (!Buffer.readInt(RunCount)) return false;;
- Buffer.advanceCursor(Length-3);
- }
- while (Buffer.readProgramTag()) {
- uint32_t Length;
- if (!Buffer.readInt(Length)) return false;
- Buffer.advanceCursor(Length);
- ++ProgramCount;
+ for (size_t i = 0, e = Functions.size(); i < e; ++i) {
+ if (!Buffer.readFunctionTag()) {
+ errs() << "Unexpected number of functions.\n";
+ return false;
}
+ if (!Functions[i]->readGCDA(Buffer, Version))
+ return false;
+ }
+ if (Buffer.readObjectTag()) {
+ uint32_t Length;
+ uint32_t Dummy;
+ if (!Buffer.readInt(Length)) return false;
+ if (!Buffer.readInt(Dummy)) return false; // checksum
+ if (!Buffer.readInt(Dummy)) return false; // num
+ if (!Buffer.readInt(RunCount)) return false;
+ Buffer.advanceCursor(Length-3);
+ }
+ while (Buffer.readProgramTag()) {
+ uint32_t Length;
+ if (!Buffer.readInt(Length)) return false;
+ Buffer.advanceCursor(Length);
+ ++ProgramCount;
}
return true;
}
/// dump - Dump GCOVFile content to dbgs() for debugging purposes.
-void GCOVFile::dump() {
- for (SmallVectorImpl<GCOVFunction *>::iterator I = Functions.begin(),
- E = Functions.end(); I != E; ++I)
- (*I)->dump();
+void GCOVFile::dump() const {
+ for (const auto &FPtr : Functions)
+ FPtr->dump();
}
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
void GCOVFile::collectLineCounts(FileInfo &FI) {
- for (SmallVectorImpl<GCOVFunction *>::iterator I = Functions.begin(),
- E = Functions.end(); I != E; ++I)
- (*I)->collectLineCounts(FI);
+ for (const auto &FPtr : Functions)
+ FPtr->collectLineCounts(FI);
FI.setRunCount(RunCount);
FI.setProgramCount(ProgramCount);
}
@@ -103,57 +108,24 @@ void GCOVFile::collectLineCounts(FileInfo &FI) {
//===----------------------------------------------------------------------===//
// GCOVFunction implementation.
-/// ~GCOVFunction - Delete GCOVFunction and its content.
-GCOVFunction::~GCOVFunction() {
- DeleteContainerPointers(Blocks);
-}
-
-/// read - Read a function from the buffer. Return false if buffer cursor
-/// does not point to a function tag.
-bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) {
+/// readGCNO - Read a function from the GCNO buffer. Return false if an error
+/// occurs.
+bool GCOVFunction::readGCNO(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
uint32_t Dummy;
if (!Buff.readInt(Dummy)) return false; // Function header length
if (!Buff.readInt(Ident)) return false;
- if (!Buff.readInt(Dummy)) return false; // Checksum #1
- if (Format != GCOV::GCNO_402 && Format != GCOV::GCDA_402)
- if (!Buff.readInt(Dummy)) return false; // Checksum #2
-
- if (!Buff.readString(Name)) return false;
-
- if (Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404)
- if (!Buff.readString(Filename)) return false;
-
- if (Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404) {
- if (!Buff.readArcTag()) {
- errs() << "Arc tag not found.\n";
+ if (!Buff.readInt(Checksum)) return false;
+ if (Version != GCOV::V402) {
+ uint32_t CfgChecksum;
+ if (!Buff.readInt(CfgChecksum)) return false;
+ if (Parent.getChecksum() != CfgChecksum) {
+ errs() << "File checksums do not match: " << Parent.getChecksum()
+ << " != " << CfgChecksum << " in (" << Name << ").\n";
return false;
}
- uint32_t Count;
- if (!Buff.readInt(Count)) return false;
- Count /= 2;
-
- // This for loop adds the counts for each block. A second nested loop is
- // required to combine the edge counts that are contained in the GCDA file.
- for (uint32_t Line = 0; Count > 0; ++Line) {
- if (Line >= Blocks.size()) {
- errs() << "Unexpected number of edges.\n";
- return false;
- }
- GCOVBlock &Block = *Blocks[Line];
- for (size_t Edge = 0, End = Block.getNumEdges(); Edge < End; ++Edge) {
- if (Count == 0) {
- errs() << "Unexpected number of edges.\n";
- return false;
- }
- uint64_t ArcCount;
- if (!Buff.readInt64(ArcCount)) return false;
- Block.addCount(ArcCount);
- --Count;
- }
- }
- return true;
}
-
+ if (!Buff.readString(Name)) return false;
+ if (!Buff.readString(Filename)) return false;
if (!Buff.readInt(LineNumber)) return false;
// read blocks.
@@ -165,7 +137,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) {
if (!Buff.readInt(BlockCount)) return false;
for (uint32_t i = 0, e = BlockCount; i != e; ++i) {
if (!Buff.readInt(Dummy)) return false; // Block flags;
- Blocks.push_back(new GCOVBlock(*this, i));
+ Blocks.push_back(make_unique<GCOVBlock>(*this, i));
}
// read edges.
@@ -176,13 +148,17 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) {
uint32_t BlockNo;
if (!Buff.readInt(BlockNo)) return false;
if (BlockNo >= BlockCount) {
- errs() << "Unexpected block number.\n";
+ errs() << "Unexpected block number: " << BlockNo << " (in " << Name
+ << ").\n";
return false;
}
for (uint32_t i = 0, e = EdgeCount; i != e; ++i) {
uint32_t Dst;
if (!Buff.readInt(Dst)) return false;
- Blocks[BlockNo]->addEdge(Dst);
+ Edges.push_back(make_unique<GCOVEdge>(*Blocks[BlockNo], *Blocks[Dst]));
+ GCOVEdge *Edge = Edges.back().get();
+ Blocks[BlockNo]->addDstEdge(Edge);
+ Blocks[Dst]->addSrcEdge(Edge);
if (!Buff.readInt(Dummy)) return false; // Edge flag
}
}
@@ -190,50 +166,154 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) {
// read line table.
while (Buff.readLineTag()) {
uint32_t LineTableLength;
+ // Read the length of this line table.
if (!Buff.readInt(LineTableLength)) return false;
uint32_t EndPos = Buff.getCursor() + LineTableLength*4;
uint32_t BlockNo;
+ // Read the block number this table is associated with.
if (!Buff.readInt(BlockNo)) return false;
if (BlockNo >= BlockCount) {
- errs() << "Unexpected block number.\n";
+ errs() << "Unexpected block number: " << BlockNo << " (in " << Name
+ << ").\n";
return false;
}
- GCOVBlock *Block = Blocks[BlockNo];
- if (!Buff.readInt(Dummy)) return false; // flag
- while (Buff.getCursor() != (EndPos - 4)) {
+ GCOVBlock &Block = *Blocks[BlockNo];
+ // Read the word that pads the beginning of the line table. This may be a
+ // flag of some sort, but seems to always be zero.
+ if (!Buff.readInt(Dummy)) return false;
+
+ // Line information starts here and continues up until the last word.
+ if (Buff.getCursor() != (EndPos - sizeof(uint32_t))) {
StringRef F;
+ // Read the source file name.
if (!Buff.readString(F)) return false;
- if (F != Filename) {
- errs() << "Multiple sources for a single basic block.\n";
+ if (Filename != F) {
+ errs() << "Multiple sources for a single basic block: " << Filename
+ << " != " << F << " (in " << Name << ").\n";
return false;
}
- if (Buff.getCursor() == (EndPos - 4)) break;
- while (true) {
+ // Read lines up to, but not including, the null terminator.
+ while (Buff.getCursor() < (EndPos - 2 * sizeof(uint32_t))) {
uint32_t Line;
if (!Buff.readInt(Line)) return false;
- if (!Line) break;
- Block->addLine(Line);
+ // Line 0 means this instruction was injected by the compiler. Skip it.
+ if (!Line) continue;
+ Block.addLine(Line);
+ }
+ // Read the null terminator.
+ if (!Buff.readInt(Dummy)) return false;
+ }
+ // The last word is either a flag or padding, it isn't clear which. Skip
+ // over it.
+ if (!Buff.readInt(Dummy)) return false;
+ }
+ return true;
+}
+
+/// readGCDA - Read a function from the GCDA buffer. Return false if an error
+/// occurs.
+bool GCOVFunction::readGCDA(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
+ uint32_t Dummy;
+ if (!Buff.readInt(Dummy)) return false; // Function header length
+
+ uint32_t GCDAIdent;
+ if (!Buff.readInt(GCDAIdent)) return false;
+ if (Ident != GCDAIdent) {
+ errs() << "Function identifiers do not match: " << Ident << " != "
+ << GCDAIdent << " (in " << Name << ").\n";
+ return false;
+ }
+
+ uint32_t GCDAChecksum;
+ if (!Buff.readInt(GCDAChecksum)) return false;
+ if (Checksum != GCDAChecksum) {
+ errs() << "Function checksums do not match: " << Checksum << " != "
+ << GCDAChecksum << " (in " << Name << ").\n";
+ return false;
+ }
+
+ uint32_t CfgChecksum;
+ if (Version != GCOV::V402) {
+ if (!Buff.readInt(CfgChecksum)) return false;
+ if (Parent.getChecksum() != CfgChecksum) {
+ errs() << "File checksums do not match: " << Parent.getChecksum()
+ << " != " << CfgChecksum << " (in " << Name << ").\n";
+ return false;
+ }
+ }
+
+ StringRef GCDAName;
+ if (!Buff.readString(GCDAName)) return false;
+ if (Name != GCDAName) {
+ errs() << "Function names do not match: " << Name << " != " << GCDAName
+ << ".\n";
+ return false;
+ }
+
+ if (!Buff.readArcTag()) {
+ errs() << "Arc tag not found (in " << Name << ").\n";
+ return false;
+ }
+
+ uint32_t Count;
+ if (!Buff.readInt(Count)) return false;
+ Count /= 2;
+
+ // This for loop adds the counts for each block. A second nested loop is
+ // required to combine the edge counts that are contained in the GCDA file.
+ for (uint32_t BlockNo = 0; Count > 0; ++BlockNo) {
+ // The last block is always reserved for exit block
+ if (BlockNo >= Blocks.size()-1) {
+ errs() << "Unexpected number of edges (in " << Name << ").\n";
+ return false;
+ }
+ GCOVBlock &Block = *Blocks[BlockNo];
+ for (size_t EdgeNo = 0, End = Block.getNumDstEdges(); EdgeNo < End;
+ ++EdgeNo) {
+ if (Count == 0) {
+ errs() << "Unexpected number of edges (in " << Name << ").\n";
+ return false;
}
+ uint64_t ArcCount;
+ if (!Buff.readInt64(ArcCount)) return false;
+ Block.addCount(EdgeNo, ArcCount);
+ --Count;
}
- if (!Buff.readInt(Dummy)) return false; // flag
+ Block.sortDstEdges();
}
return true;
}
+/// getEntryCount - Get the number of times the function was called by
+/// retrieving the entry block's count.
+uint64_t GCOVFunction::getEntryCount() const {
+ return Blocks.front()->getCount();
+}
+
+/// getExitCount - Get the number of times the function returned by retrieving
+/// the exit block's count.
+uint64_t GCOVFunction::getExitCount() const {
+ return Blocks.back()->getCount();
+}
+
/// dump - Dump GCOVFunction content to dbgs() for debugging purposes.
-void GCOVFunction::dump() {
+void GCOVFunction::dump() const {
dbgs() << "===== " << Name << " @ " << Filename << ":" << LineNumber << "\n";
- for (SmallVectorImpl<GCOVBlock *>::iterator I = Blocks.begin(),
- E = Blocks.end(); I != E; ++I)
- (*I)->dump();
+ for (const auto &Block : Blocks)
+ Block->dump();
}
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
void GCOVFunction::collectLineCounts(FileInfo &FI) {
- for (SmallVectorImpl<GCOVBlock *>::iterator I = Blocks.begin(),
- E = Blocks.end(); I != E; ++I)
- (*I)->collectLineCounts(FI);
+ // If the line number is zero, this is a function that doesn't actually appear
+ // in the source file, so there isn't anything we can do with it.
+ if (LineNumber == 0)
+ return;
+
+ for (const auto &Block : Blocks)
+ Block->collectLineCounts(FI);
+ FI.addFunctionLine(Filename, LineNumber, this);
}
//===----------------------------------------------------------------------===//
@@ -241,31 +321,60 @@ void GCOVFunction::collectLineCounts(FileInfo &FI) {
/// ~GCOVBlock - Delete GCOVBlock and its content.
GCOVBlock::~GCOVBlock() {
- Edges.clear();
+ SrcEdges.clear();
+ DstEdges.clear();
Lines.clear();
}
+/// addCount - Add to block counter while storing the edge count. If the
+/// destination has no outgoing edges, also update that block's count too.
+void GCOVBlock::addCount(size_t DstEdgeNo, uint64_t N) {
+ assert(DstEdgeNo < DstEdges.size()); // up to caller to ensure EdgeNo is valid
+ DstEdges[DstEdgeNo]->Count = N;
+ Counter += N;
+ if (!DstEdges[DstEdgeNo]->Dst.getNumDstEdges())
+ DstEdges[DstEdgeNo]->Dst.Counter += N;
+}
+
+/// sortDstEdges - Sort destination edges by block number, nop if already
+/// sorted. This is required for printing branch info in the correct order.
+void GCOVBlock::sortDstEdges() {
+ if (!DstEdgesAreSorted) {
+ SortDstEdgesFunctor SortEdges;
+ std::stable_sort(DstEdges.begin(), DstEdges.end(), SortEdges);
+ }
+}
+
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
void GCOVBlock::collectLineCounts(FileInfo &FI) {
for (SmallVectorImpl<uint32_t>::iterator I = Lines.begin(),
E = Lines.end(); I != E; ++I)
- FI.addLineCount(Parent.getFilename(), *I, Counter);
+ FI.addBlockLine(Parent.getFilename(), *I, this);
}
/// dump - Dump GCOVBlock content to dbgs() for debugging purposes.
-void GCOVBlock::dump() {
+void GCOVBlock::dump() const {
dbgs() << "Block : " << Number << " Counter : " << Counter << "\n";
- if (!Edges.empty()) {
- dbgs() << "\tEdges : ";
- for (SmallVectorImpl<uint32_t>::iterator I = Edges.begin(), E = Edges.end();
- I != E; ++I)
- dbgs() << (*I) << ",";
+ if (!SrcEdges.empty()) {
+ dbgs() << "\tSource Edges : ";
+ for (EdgeIterator I = SrcEdges.begin(), E = SrcEdges.end(); I != E; ++I) {
+ const GCOVEdge *Edge = *I;
+ dbgs() << Edge->Src.Number << " (" << Edge->Count << "), ";
+ }
+ dbgs() << "\n";
+ }
+ if (!DstEdges.empty()) {
+ dbgs() << "\tDestination Edges : ";
+ for (EdgeIterator I = DstEdges.begin(), E = DstEdges.end(); I != E; ++I) {
+ const GCOVEdge *Edge = *I;
+ dbgs() << Edge->Dst.Number << " (" << Edge->Count << "), ";
+ }
dbgs() << "\n";
}
if (!Lines.empty()) {
dbgs() << "\tLines : ";
- for (SmallVectorImpl<uint32_t>::iterator I = Lines.begin(),
+ for (SmallVectorImpl<uint32_t>::const_iterator I = Lines.begin(),
E = Lines.end(); I != E; ++I)
dbgs() << (*I) << ",";
dbgs() << "\n";
@@ -275,40 +384,387 @@ void GCOVBlock::dump() {
//===----------------------------------------------------------------------===//
// FileInfo implementation.
+// Safe integer division, returns 0 if numerator is 0.
+static uint32_t safeDiv(uint64_t Numerator, uint64_t Divisor) {
+ if (!Numerator)
+ return 0;
+ return Numerator/Divisor;
+}
+
+// This custom division function mimics gcov's branch ouputs:
+// - Round to closest whole number
+// - Only output 0% or 100% if it's exactly that value
+static uint32_t branchDiv(uint64_t Numerator, uint64_t Divisor) {
+ if (!Numerator)
+ return 0;
+ if (Numerator == Divisor)
+ return 100;
+
+ uint8_t Res = (Numerator*100+Divisor/2) / Divisor;
+ if (Res == 0)
+ return 1;
+ if (Res == 100)
+ return 99;
+ return Res;
+}
+
+struct formatBranchInfo {
+ formatBranchInfo(const GCOVOptions &Options, uint64_t Count,
+ uint64_t Total) :
+ Options(Options), Count(Count), Total(Total) {}
+
+ void print(raw_ostream &OS) const {
+ if (!Total)
+ OS << "never executed";
+ else if (Options.BranchCount)
+ OS << "taken " << Count;
+ else
+ OS << "taken " << branchDiv(Count, Total) << "%";
+ }
+
+ const GCOVOptions &Options;
+ uint64_t Count;
+ uint64_t Total;
+};
+
+static raw_ostream &operator<<(raw_ostream &OS, const formatBranchInfo &FBI) {
+ FBI.print(OS);
+ return OS;
+}
+
+namespace {
+class LineConsumer {
+ std::unique_ptr<MemoryBuffer> Buffer;
+ StringRef Remaining;
+public:
+ LineConsumer(StringRef Filename) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFileOrSTDIN(Filename);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ errs() << Filename << ": " << EC.message() << "\n";
+ Remaining = "";
+ } else {
+ Buffer = std::move(BufferOrErr.get());
+ Remaining = Buffer->getBuffer();
+ }
+ }
+ bool empty() { return Remaining.empty(); }
+ void printNext(raw_ostream &OS, uint32_t LineNum) {
+ StringRef Line;
+ if (empty())
+ Line = "/*EOF*/";
+ else
+ std::tie(Line, Remaining) = Remaining.split("\n");
+ OS << format("%5u:", LineNum) << Line << "\n";
+ }
+};
+}
+
+/// Convert a path to a gcov filename. If PreservePaths is true, this
+/// translates "/" to "#", ".." to "^", and drops ".", to match gcov.
+static std::string mangleCoveragePath(StringRef Filename, bool PreservePaths) {
+ if (!PreservePaths)
+ return sys::path::filename(Filename).str();
+
+ // This behaviour is defined by gcov in terms of text replacements, so it's
+ // not likely to do anything useful on filesystems with different textual
+ // conventions.
+ llvm::SmallString<256> Result("");
+ StringRef::iterator I, S, E;
+ for (I = S = Filename.begin(), E = Filename.end(); I != E; ++I) {
+ if (*I != '/')
+ continue;
+
+ if (I - S == 1 && *S == '.') {
+ // ".", the current directory, is skipped.
+ } else if (I - S == 2 && *S == '.' && *(S + 1) == '.') {
+ // "..", the parent directory, is replaced with "^".
+ Result.append("^#");
+ } else {
+ if (S < I)
+ // Leave other components intact,
+ Result.append(S, I);
+ // And separate with "#".
+ Result.push_back('#');
+ }
+ S = I + 1;
+ }
+
+ if (S < I)
+ Result.append(S, I);
+ return Result.str();
+}
+
+std::string FileInfo::getCoveragePath(StringRef Filename,
+ StringRef MainFilename) {
+ if (Options.NoOutput)
+ // This is probably a bug in gcov, but when -n is specified, paths aren't
+ // mangled at all, and the -l and -p options are ignored. Here, we do the
+ // same.
+ return Filename;
+
+ std::string CoveragePath;
+ if (Options.LongFileNames && !Filename.equals(MainFilename))
+ CoveragePath =
+ mangleCoveragePath(MainFilename, Options.PreservePaths) + "##";
+ CoveragePath +=
+ mangleCoveragePath(Filename, Options.PreservePaths) + ".gcov";
+ return CoveragePath;
+}
+
+std::unique_ptr<raw_ostream>
+FileInfo::openCoveragePath(StringRef CoveragePath) {
+ if (Options.NoOutput)
+ return llvm::make_unique<raw_null_ostream>();
+
+ std::string ErrorInfo;
+ auto OS = llvm::make_unique<raw_fd_ostream>(CoveragePath.str().c_str(),
+ ErrorInfo, sys::fs::F_Text);
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << "\n";
+ return llvm::make_unique<raw_null_ostream>();
+ }
+ return std::move(OS);
+}
+
/// print - Print source files with collected line count information.
-void FileInfo::print(raw_fd_ostream &OS, StringRef gcnoFile,
- StringRef gcdaFile) {
- for (StringMap<LineCounts>::iterator I = LineInfo.begin(), E = LineInfo.end();
- I != E; ++I) {
+void FileInfo::print(StringRef MainFilename, StringRef GCNOFile,
+ StringRef GCDAFile) {
+ for (StringMap<LineData>::const_iterator I = LineInfo.begin(),
+ E = LineInfo.end(); I != E; ++I) {
StringRef Filename = I->first();
+ auto AllLines = LineConsumer(Filename);
+
+ std::string CoveragePath = getCoveragePath(Filename, MainFilename);
+ std::unique_ptr<raw_ostream> S = openCoveragePath(CoveragePath);
+ raw_ostream &OS = *S;
+
OS << " -: 0:Source:" << Filename << "\n";
- OS << " -: 0:Graph:" << gcnoFile << "\n";
- OS << " -: 0:Data:" << gcdaFile << "\n";
+ OS << " -: 0:Graph:" << GCNOFile << "\n";
+ OS << " -: 0:Data:" << GCDAFile << "\n";
OS << " -: 0:Runs:" << RunCount << "\n";
OS << " -: 0:Programs:" << ProgramCount << "\n";
- LineCounts &L = LineInfo[Filename];
- OwningPtr<MemoryBuffer> Buff;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buff)) {
- errs() << Filename << ": " << ec.message() << "\n";
- return;
- }
- StringRef AllLines = Buff->getBuffer();
- uint32_t i = 0;
- while (!AllLines.empty()) {
- if (L.find(i) != L.end()) {
- if (L[i] == 0)
- OS << " #####:";
- else
- OS << format("%9" PRIu64 ":", L[i]);
- } else {
+
+ const LineData &Line = I->second;
+ GCOVCoverage FileCoverage(Filename);
+ for (uint32_t LineIndex = 0;
+ LineIndex < Line.LastLine || !AllLines.empty(); ++LineIndex) {
+ if (Options.BranchInfo) {
+ FunctionLines::const_iterator FuncsIt = Line.Functions.find(LineIndex);
+ if (FuncsIt != Line.Functions.end())
+ printFunctionSummary(OS, FuncsIt->second);
+ }
+
+ BlockLines::const_iterator BlocksIt = Line.Blocks.find(LineIndex);
+ if (BlocksIt == Line.Blocks.end()) {
+ // No basic blocks are on this line. Not an executable line of code.
OS << " -:";
+ AllLines.printNext(OS, LineIndex + 1);
+ } else {
+ const BlockVector &Blocks = BlocksIt->second;
+
+ // Add up the block counts to form line counts.
+ DenseMap<const GCOVFunction *, bool> LineExecs;
+ uint64_t LineCount = 0;
+ for (BlockVector::const_iterator I = Blocks.begin(), E = Blocks.end();
+ I != E; ++I) {
+ const GCOVBlock *Block = *I;
+ if (Options.AllBlocks) {
+ // Only take the highest block count for that line.
+ uint64_t BlockCount = Block->getCount();
+ LineCount = LineCount > BlockCount ? LineCount : BlockCount;
+ } else {
+ // Sum up all of the block counts.
+ LineCount += Block->getCount();
+ }
+
+ if (Options.FuncCoverage) {
+ // This is a slightly convoluted way to most accurately gather line
+ // statistics for functions. Basically what is happening is that we
+ // don't want to count a single line with multiple blocks more than
+ // once. However, we also don't simply want to give the total line
+ // count to every function that starts on the line. Thus, what is
+ // happening here are two things:
+ // 1) Ensure that the number of logical lines is only incremented
+ // once per function.
+ // 2) If there are multiple blocks on the same line, ensure that the
+ // number of lines executed is incremented as long as at least
+ // one of the blocks are executed.
+ const GCOVFunction *Function = &Block->getParent();
+ if (FuncCoverages.find(Function) == FuncCoverages.end()) {
+ std::pair<const GCOVFunction *, GCOVCoverage>
+ KeyValue(Function, GCOVCoverage(Function->getName()));
+ FuncCoverages.insert(KeyValue);
+ }
+ GCOVCoverage &FuncCoverage = FuncCoverages.find(Function)->second;
+
+ if (LineExecs.find(Function) == LineExecs.end()) {
+ if (Block->getCount()) {
+ ++FuncCoverage.LinesExec;
+ LineExecs[Function] = true;
+ } else {
+ LineExecs[Function] = false;
+ }
+ ++FuncCoverage.LogicalLines;
+ } else if (!LineExecs[Function] && Block->getCount()) {
+ ++FuncCoverage.LinesExec;
+ LineExecs[Function] = true;
+ }
+ }
+ }
+
+ if (LineCount == 0)
+ OS << " #####:";
+ else {
+ OS << format("%9" PRIu64 ":", LineCount);
+ ++FileCoverage.LinesExec;
+ }
+ ++FileCoverage.LogicalLines;
+
+ AllLines.printNext(OS, LineIndex + 1);
+
+ uint32_t BlockNo = 0;
+ uint32_t EdgeNo = 0;
+ for (BlockVector::const_iterator I = Blocks.begin(), E = Blocks.end();
+ I != E; ++I) {
+ const GCOVBlock *Block = *I;
+
+ // Only print block and branch information at the end of the block.
+ if (Block->getLastLine() != LineIndex+1)
+ continue;
+ if (Options.AllBlocks)
+ printBlockInfo(OS, *Block, LineIndex, BlockNo);
+ if (Options.BranchInfo) {
+ size_t NumEdges = Block->getNumDstEdges();
+ if (NumEdges > 1)
+ printBranchInfo(OS, *Block, FileCoverage, EdgeNo);
+ else if (Options.UncondBranch && NumEdges == 1)
+ printUncondBranchInfo(OS, EdgeNo, (*Block->dst_begin())->Count);
+ }
+ }
}
- std::pair<StringRef, StringRef> P = AllLines.split('\n');
- if (AllLines != P.first)
- OS << format("%5u:", i+1) << P.first;
- OS << "\n";
- AllLines = P.second;
- ++i;
}
+ FileCoverages.push_back(std::make_pair(CoveragePath, FileCoverage));
+ }
+
+ // FIXME: There is no way to detect calls given current instrumentation.
+ if (Options.FuncCoverage)
+ printFuncCoverage();
+ printFileCoverage();
+ return;
+}
+
+/// printFunctionSummary - Print function and block summary.
+void FileInfo::printFunctionSummary(raw_ostream &OS,
+ const FunctionVector &Funcs) const {
+ for (FunctionVector::const_iterator I = Funcs.begin(), E = Funcs.end();
+ I != E; ++I) {
+ const GCOVFunction *Func = *I;
+ uint64_t EntryCount = Func->getEntryCount();
+ uint32_t BlocksExec = 0;
+ for (GCOVFunction::BlockIterator I = Func->block_begin(),
+ E = Func->block_end(); I != E; ++I) {
+ const GCOVBlock &Block = **I;
+ if (Block.getNumDstEdges() && Block.getCount())
+ ++BlocksExec;
+ }
+
+ OS << "function " << Func->getName() << " called " << EntryCount
+ << " returned " << safeDiv(Func->getExitCount()*100, EntryCount)
+ << "% blocks executed "
+ << safeDiv(BlocksExec*100, Func->getNumBlocks()-1) << "%\n";
+ }
+}
+
+/// printBlockInfo - Output counts for each block.
+void FileInfo::printBlockInfo(raw_ostream &OS, const GCOVBlock &Block,
+ uint32_t LineIndex, uint32_t &BlockNo) const {
+ if (Block.getCount() == 0)
+ OS << " $$$$$:";
+ else
+ OS << format("%9" PRIu64 ":", Block.getCount());
+ OS << format("%5u-block %2u\n", LineIndex+1, BlockNo++);
+}
+
+/// printBranchInfo - Print conditional branch probabilities.
+void FileInfo::printBranchInfo(raw_ostream &OS, const GCOVBlock &Block,
+ GCOVCoverage &Coverage, uint32_t &EdgeNo) {
+ SmallVector<uint64_t, 16> BranchCounts;
+ uint64_t TotalCounts = 0;
+ for (GCOVBlock::EdgeIterator I = Block.dst_begin(), E = Block.dst_end();
+ I != E; ++I) {
+ const GCOVEdge *Edge = *I;
+ BranchCounts.push_back(Edge->Count);
+ TotalCounts += Edge->Count;
+ if (Block.getCount()) ++Coverage.BranchesExec;
+ if (Edge->Count) ++Coverage.BranchesTaken;
+ ++Coverage.Branches;
+
+ if (Options.FuncCoverage) {
+ const GCOVFunction *Function = &Block.getParent();
+ GCOVCoverage &FuncCoverage = FuncCoverages.find(Function)->second;
+ if (Block.getCount()) ++FuncCoverage.BranchesExec;
+ if (Edge->Count) ++FuncCoverage.BranchesTaken;
+ ++FuncCoverage.Branches;
+ }
+ }
+
+ for (SmallVectorImpl<uint64_t>::const_iterator I = BranchCounts.begin(),
+ E = BranchCounts.end(); I != E; ++I) {
+ OS << format("branch %2u ", EdgeNo++)
+ << formatBranchInfo(Options, *I, TotalCounts) << "\n";
+ }
+}
+
+/// printUncondBranchInfo - Print unconditional branch probabilities.
+void FileInfo::printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo,
+ uint64_t Count) const {
+ OS << format("unconditional %2u ", EdgeNo++)
+ << formatBranchInfo(Options, Count, Count) << "\n";
+}
+
+// printCoverage - Print generic coverage info used by both printFuncCoverage
+// and printFileCoverage.
+void FileInfo::printCoverage(const GCOVCoverage &Coverage) const {
+ outs() << format("Lines executed:%.2f%% of %u\n",
+ double(Coverage.LinesExec)*100/Coverage.LogicalLines,
+ Coverage.LogicalLines);
+ if (Options.BranchInfo) {
+ if (Coverage.Branches) {
+ outs() << format("Branches executed:%.2f%% of %u\n",
+ double(Coverage.BranchesExec)*100/Coverage.Branches,
+ Coverage.Branches);
+ outs() << format("Taken at least once:%.2f%% of %u\n",
+ double(Coverage.BranchesTaken)*100/Coverage.Branches,
+ Coverage.Branches);
+ } else {
+ outs() << "No branches\n";
+ }
+ outs() << "No calls\n"; // to be consistent with gcov
+ }
+}
+
+// printFuncCoverage - Print per-function coverage info.
+void FileInfo::printFuncCoverage() const {
+ for (FuncCoverageMap::const_iterator I = FuncCoverages.begin(),
+ E = FuncCoverages.end(); I != E; ++I) {
+ const GCOVCoverage &Coverage = I->second;
+ outs() << "Function '" << Coverage.Name << "'\n";
+ printCoverage(Coverage);
+ outs() << "\n";
+ }
+}
+
+// printFileCoverage - Print per-file coverage info.
+void FileInfo::printFileCoverage() const {
+ for (FileCoverageList::const_iterator I = FileCoverages.begin(),
+ E = FileCoverages.end(); I != E; ++I) {
+ const std::string &Filename = I->first;
+ const GCOVCoverage &Coverage = I->second;
+ outs() << "File '" << Coverage.Name << "'\n";
+ printCoverage(Coverage);
+ if (!Options.NoOutput)
+ outs() << Coverage.Name << ":creating '" << Filename << "'\n";
+ outs() << "\n";
}
}
diff --git a/contrib/llvm/lib/IR/GVMaterializer.cpp b/contrib/llvm/lib/IR/GVMaterializer.cpp
index f77a9c9..706926d 100644
--- a/contrib/llvm/lib/IR/GVMaterializer.cpp
+++ b/contrib/llvm/lib/IR/GVMaterializer.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/GVMaterializer.h"
+#include "llvm/IR/GVMaterializer.h"
using namespace llvm;
GVMaterializer::~GVMaterializer() {}
diff --git a/contrib/llvm/lib/IR/Globals.cpp b/contrib/llvm/lib/IR/Globals.cpp
index da3b02a..244e3e4 100644
--- a/contrib/llvm/lib/IR/Globals.cpp
+++ b/contrib/llvm/lib/IR/Globals.cpp
@@ -18,9 +18,10 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/LeakDetector.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -40,6 +41,10 @@ void GlobalValue::Dematerialize() {
getParent()->Dematerialize(this);
}
+const DataLayout *GlobalValue::getDataLayout() const {
+ return getParent()->getDataLayout();
+}
+
/// Override destroyConstant to make sure it doesn't get called on
/// GlobalValue's because they shouldn't be treated like other constants.
void GlobalValue::destroyConstant() {
@@ -49,20 +54,62 @@ void GlobalValue::destroyConstant() {
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a GlobalValue) from the GlobalValue Src to this one.
void GlobalValue::copyAttributesFrom(const GlobalValue *Src) {
- setAlignment(Src->getAlignment());
- setSection(Src->getSection());
setVisibility(Src->getVisibility());
setUnnamedAddr(Src->hasUnnamedAddr());
+ setDLLStorageClass(Src->getDLLStorageClass());
+}
+
+unsigned GlobalValue::getAlignment() const {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getBaseObject())
+ return GO->getAlignment();
+
+ // FIXME: we should also be able to handle:
+ // Alias = Global + Offset
+ // Alias = Absolute
+ return 0;
+ }
+ return cast<GlobalObject>(this)->getAlignment();
}
-void GlobalValue::setAlignment(unsigned Align) {
+void GlobalObject::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- Alignment = Log2_32(Align) + 1;
+ setGlobalValueSubClassData(Log2_32(Align) + 1);
assert(getAlignment() == Align && "Alignment representation error!");
}
+void GlobalObject::copyAttributesFrom(const GlobalValue *Src) {
+ const auto *GV = cast<GlobalObject>(Src);
+ GlobalValue::copyAttributesFrom(GV);
+ setAlignment(GV->getAlignment());
+ setSection(GV->getSection());
+}
+
+const char *GlobalValue::getSection() const {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getBaseObject())
+ return GO->getSection();
+ return "";
+ }
+ return cast<GlobalObject>(this)->getSection();
+}
+
+Comdat *GlobalValue::getComdat() {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getBaseObject())
+ return const_cast<GlobalObject *>(GO)->getComdat();
+ return nullptr;
+ }
+ return cast<GlobalObject>(this)->getComdat();
+}
+
+void GlobalObject::setSection(StringRef S) { Section = S; }
+
bool GlobalValue::isDeclaration() const {
// Globals are definitions if they have an initializer.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(this))
@@ -76,22 +123,21 @@ bool GlobalValue::isDeclaration() const {
assert(isa<GlobalAlias>(this));
return false;
}
-
+
//===----------------------------------------------------------------------===//
// GlobalVariable Implementation
//===----------------------------------------------------------------------===//
GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
- Constant *InitVal,
- const Twine &Name, ThreadLocalMode TLMode,
- unsigned AddressSpace,
+ Constant *InitVal, const Twine &Name,
+ ThreadLocalMode TLMode, unsigned AddressSpace,
bool isExternallyInitialized)
- : GlobalValue(PointerType::get(Ty, AddressSpace),
- Value::GlobalVariableVal,
- OperandTraits<GlobalVariable>::op_begin(this),
- InitVal != 0, Link, Name),
- isConstantGlobal(constant), threadLocalMode(TLMode),
- isExternallyInitializedConstant(isExternallyInitialized) {
+ : GlobalObject(PointerType::get(Ty, AddressSpace), Value::GlobalVariableVal,
+ OperandTraits<GlobalVariable>::op_begin(this),
+ InitVal != nullptr, Link, Name),
+ isConstantGlobal(constant),
+ isExternallyInitializedConstant(isExternallyInitialized) {
+ setThreadLocalMode(TLMode);
if (InitVal) {
assert(InitVal->getType() == Ty &&
"Initializer should be the same type as the GlobalVariable!");
@@ -103,24 +149,23 @@ GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant,
LinkageTypes Link, Constant *InitVal,
- const Twine &Name,
- GlobalVariable *Before, ThreadLocalMode TLMode,
- unsigned AddressSpace,
+ const Twine &Name, GlobalVariable *Before,
+ ThreadLocalMode TLMode, unsigned AddressSpace,
bool isExternallyInitialized)
- : GlobalValue(PointerType::get(Ty, AddressSpace),
- Value::GlobalVariableVal,
- OperandTraits<GlobalVariable>::op_begin(this),
- InitVal != 0, Link, Name),
- isConstantGlobal(constant), threadLocalMode(TLMode),
- isExternallyInitializedConstant(isExternallyInitialized) {
+ : GlobalObject(PointerType::get(Ty, AddressSpace), Value::GlobalVariableVal,
+ OperandTraits<GlobalVariable>::op_begin(this),
+ InitVal != nullptr, Link, Name),
+ isConstantGlobal(constant),
+ isExternallyInitializedConstant(isExternallyInitialized) {
+ setThreadLocalMode(TLMode);
if (InitVal) {
assert(InitVal->getType() == Ty &&
"Initializer should be the same type as the GlobalVariable!");
Op<0>() = InitVal;
}
-
+
LeakDetector::addGarbageObject(this);
-
+
if (Before)
Before->getParent()->getGlobalList().insert(Before, this);
else
@@ -164,9 +209,9 @@ void GlobalVariable::replaceUsesOfWithOnConstant(Value *From, Value *To,
}
void GlobalVariable::setInitializer(Constant *InitVal) {
- if (InitVal == 0) {
+ if (!InitVal) {
if (hasInitializer()) {
- Op<0>().set(0);
+ Op<0>().set(nullptr);
NumOperands = 0;
}
} else {
@@ -182,9 +227,9 @@ void GlobalVariable::setInitializer(Constant *InitVal) {
/// create a GlobalVariable) from the GlobalVariable Src to this one.
void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) {
assert(isa<GlobalVariable>(Src) && "Expected a GlobalVariable!");
- GlobalValue::copyAttributesFrom(Src);
+ GlobalObject::copyAttributesFrom(Src);
const GlobalVariable *SrcVar = cast<GlobalVariable>(Src);
- setThreadLocal(SrcVar->isThreadLocal());
+ setThreadLocalMode(SrcVar->getThreadLocalMode());
}
@@ -192,20 +237,47 @@ void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) {
// GlobalAlias Implementation
//===----------------------------------------------------------------------===//
-GlobalAlias::GlobalAlias(Type *Ty, LinkageTypes Link,
- const Twine &Name, Constant* aliasee,
+GlobalAlias::GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Link,
+ const Twine &Name, Constant *Aliasee,
Module *ParentModule)
- : GlobalValue(Ty, Value::GlobalAliasVal, &Op<0>(), 1, Link, Name) {
+ : GlobalValue(PointerType::get(Ty, AddressSpace), Value::GlobalAliasVal,
+ &Op<0>(), 1, Link, Name) {
LeakDetector::addGarbageObject(this);
-
- if (aliasee)
- assert(aliasee->getType() == Ty && "Alias and aliasee types should match!");
- Op<0>() = aliasee;
+ Op<0>() = Aliasee;
if (ParentModule)
ParentModule->getAliasList().push_back(this);
}
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Link, const Twine &Name,
+ Constant *Aliasee, Module *ParentModule) {
+ return new GlobalAlias(Ty, AddressSpace, Link, Name, Aliasee, ParentModule);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ Module *Parent) {
+ return create(Ty, AddressSpace, Linkage, Name, nullptr, Parent);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ GlobalValue *Aliasee) {
+ return create(Ty, AddressSpace, Linkage, Name, Aliasee, Aliasee->getParent());
+}
+
+GlobalAlias *GlobalAlias::create(LinkageTypes Link, const Twine &Name,
+ GlobalValue *Aliasee) {
+ PointerType *PTy = Aliasee->getType();
+ return create(PTy->getElementType(), PTy->getAddressSpace(), Link, Name,
+ Aliasee);
+}
+
+GlobalAlias *GlobalAlias::create(const Twine &Name, GlobalValue *Aliasee) {
+ return create(Aliasee->getLinkage(), Name, Aliasee);
+}
+
void GlobalAlias::setParent(Module *parent) {
if (getParent())
LeakDetector::addGarbageObject(this);
@@ -225,45 +297,5 @@ void GlobalAlias::eraseFromParent() {
void GlobalAlias::setAliasee(Constant *Aliasee) {
assert((!Aliasee || Aliasee->getType() == getType()) &&
"Alias and aliasee types should match!");
-
setOperand(0, Aliasee);
}
-
-GlobalValue *GlobalAlias::getAliasedGlobal() {
- Constant *C = getAliasee();
- if (C == 0) return 0;
-
- if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
- return GV;
-
- ConstantExpr *CE = cast<ConstantExpr>(C);
- assert((CE->getOpcode() == Instruction::BitCast ||
- CE->getOpcode() == Instruction::GetElementPtr) &&
- "Unsupported aliasee");
-
- return cast<GlobalValue>(CE->getOperand(0));
-}
-
-GlobalValue *GlobalAlias::resolveAliasedGlobal(bool stopOnWeak) {
- SmallPtrSet<GlobalValue*, 3> Visited;
-
- // Check if we need to stop early.
- if (stopOnWeak && mayBeOverridden())
- return this;
-
- GlobalValue *GV = getAliasedGlobal();
- Visited.insert(GV);
-
- // Iterate over aliasing chain, stopping on weak alias if necessary.
- while (GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) {
- if (stopOnWeak && GA->mayBeOverridden())
- break;
-
- GV = GA->getAliasedGlobal();
-
- if (!Visited.insert(GV))
- return 0;
- }
-
- return GV;
-}
diff --git a/contrib/llvm/lib/IR/IRPrintingPasses.cpp b/contrib/llvm/lib/IR/IRPrintingPasses.cpp
new file mode 100644
index 0000000..c8a1747
--- /dev/null
+++ b/contrib/llvm/lib/IR/IRPrintingPasses.cpp
@@ -0,0 +1,127 @@
+//===--- IRPrintingPasses.cpp - Module and Function printing passes -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PrintModulePass and PrintFunctionPass implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+PrintModulePass::PrintModulePass() : OS(dbgs()) {}
+PrintModulePass::PrintModulePass(raw_ostream &OS, const std::string &Banner)
+ : OS(OS), Banner(Banner) {}
+
+PreservedAnalyses PrintModulePass::run(Module *M) {
+ OS << Banner << *M;
+ return PreservedAnalyses::all();
+}
+
+PrintFunctionPass::PrintFunctionPass() : OS(dbgs()) {}
+PrintFunctionPass::PrintFunctionPass(raw_ostream &OS, const std::string &Banner)
+ : OS(OS), Banner(Banner) {}
+
+PreservedAnalyses PrintFunctionPass::run(Function *F) {
+ OS << Banner << static_cast<Value &>(*F);
+ return PreservedAnalyses::all();
+}
+
+namespace {
+
+class PrintModulePassWrapper : public ModulePass {
+ PrintModulePass P;
+
+public:
+ static char ID;
+ PrintModulePassWrapper() : ModulePass(ID) {}
+ PrintModulePassWrapper(raw_ostream &OS, const std::string &Banner)
+ : ModulePass(ID), P(OS, Banner) {}
+
+ bool runOnModule(Module &M) override {
+ P.run(&M);
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+class PrintFunctionPassWrapper : public FunctionPass {
+ PrintFunctionPass P;
+
+public:
+ static char ID;
+ PrintFunctionPassWrapper() : FunctionPass(ID) {}
+ PrintFunctionPassWrapper(raw_ostream &OS, const std::string &Banner)
+ : FunctionPass(ID), P(OS, Banner) {}
+
+ // This pass just prints a banner followed by the function as it's processed.
+ bool runOnFunction(Function &F) override {
+ P.run(&F);
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+class PrintBasicBlockPass : public BasicBlockPass {
+ raw_ostream &Out;
+ std::string Banner;
+
+public:
+ static char ID;
+ PrintBasicBlockPass() : BasicBlockPass(ID), Out(dbgs()) {}
+ PrintBasicBlockPass(raw_ostream &Out, const std::string &Banner)
+ : BasicBlockPass(ID), Out(Out), Banner(Banner) {}
+
+ bool runOnBasicBlock(BasicBlock &BB) override {
+ Out << Banner << BB;
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+}
+
+char PrintModulePassWrapper::ID = 0;
+INITIALIZE_PASS(PrintModulePassWrapper, "print-module",
+ "Print module to stderr", false, false)
+char PrintFunctionPassWrapper::ID = 0;
+INITIALIZE_PASS(PrintFunctionPassWrapper, "print-function",
+ "Print function to stderr", false, false)
+char PrintBasicBlockPass::ID = 0;
+INITIALIZE_PASS(PrintBasicBlockPass, "print-bb", "Print BB to stderr", false,
+ false)
+
+ModulePass *llvm::createPrintModulePass(llvm::raw_ostream &OS,
+ const std::string &Banner) {
+ return new PrintModulePassWrapper(OS, Banner);
+}
+
+FunctionPass *llvm::createPrintFunctionPass(llvm::raw_ostream &OS,
+ const std::string &Banner) {
+ return new PrintFunctionPassWrapper(OS, Banner);
+}
+
+BasicBlockPass *llvm::createPrintBasicBlockPass(llvm::raw_ostream &OS,
+ const std::string &Banner) {
+ return new PrintBasicBlockPass(OS, Banner);
+}
diff --git a/contrib/llvm/lib/IR/InlineAsm.cpp b/contrib/llvm/lib/IR/InlineAsm.cpp
index 9f2a9fe..a3e1da3 100644
--- a/contrib/llvm/lib/IR/InlineAsm.cpp
+++ b/contrib/llvm/lib/IR/InlineAsm.cpp
@@ -64,16 +64,6 @@ InlineAsm::ConstraintInfo::ConstraintInfo() :
currentAlternativeIndex(0) {
}
-/// Copy constructor.
-InlineAsm::ConstraintInfo::ConstraintInfo(const ConstraintInfo &other) :
- Type(other.Type), isEarlyClobber(other.isEarlyClobber),
- MatchingInput(other.MatchingInput), isCommutative(other.isCommutative),
- isIndirect(other.isIndirect), Codes(other.Codes),
- isMultipleAlternative(other.isMultipleAlternative),
- multipleAlternatives(other.multipleAlternatives),
- currentAlternativeIndex(other.currentAlternativeIndex) {
-}
-
/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
@@ -284,7 +274,7 @@ bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
break;
default:
StructType *STy = dyn_cast<StructType>(Ty->getReturnType());
- if (STy == 0 || STy->getNumElements() != NumOutputs)
+ if (!STy || STy->getNumElements() != NumOutputs)
return false;
break;
}
diff --git a/contrib/llvm/lib/IR/Instruction.cpp b/contrib/llvm/lib/IR/Instruction.cpp
index a7773c4..86421c4 100644
--- a/contrib/llvm/lib/IR/Instruction.cpp
+++ b/contrib/llvm/lib/IR/Instruction.cpp
@@ -12,18 +12,18 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/LeakDetector.h"
using namespace llvm;
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
Instruction *InsertBefore)
- : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) {
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
// Make sure that we get added to a basicblock
LeakDetector::addGarbageObject(this);
@@ -35,9 +35,13 @@ Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
}
}
+const DataLayout *Instruction::getDataLayout() const {
+ return getParent()->getDataLayout();
+}
+
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd)
- : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) {
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
// Make sure that we get added to a basicblock
LeakDetector::addGarbageObject(this);
@@ -49,7 +53,7 @@ Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
// Out of line virtual method, so the vtable, etc has a home.
Instruction::~Instruction() {
- assert(Parent == 0 && "Instruction still linked in the program!");
+ assert(!Parent && "Instruction still linked in the program!");
if (hasMetadataHashEntry())
clearMetadataHashEntries();
}
@@ -141,31 +145,31 @@ void Instruction::setFastMathFlags(FastMathFlags FMF) {
/// Determine whether the unsafe-algebra flag is set.
bool Instruction::hasUnsafeAlgebra() const {
- assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
}
/// Determine whether the no-NaNs flag is set.
bool Instruction::hasNoNaNs() const {
- assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasNoNaNs();
}
/// Determine whether the no-infs flag is set.
bool Instruction::hasNoInfs() const {
- assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasNoInfs();
}
/// Determine whether the no-signed-zeros flag is set.
bool Instruction::hasNoSignedZeros() const {
- assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasNoSignedZeros();
}
/// Determine whether the allow-reciprocal flag is set.
bool Instruction::hasAllowReciprocal() const {
- assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->hasAllowReciprocal();
}
@@ -173,7 +177,7 @@ bool Instruction::hasAllowReciprocal() const {
/// operator which supports these flags. See LangRef.html for the meaning of
/// these flats.
FastMathFlags Instruction::getFastMathFlags() const {
- assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->getFastMathFlags();
}
@@ -258,6 +262,59 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
}
}
+/// Return true if both instructions have the same special state
+/// This must be kept in sync with lib/Transforms/IPO/MergeFunctions.cpp.
+static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
+ bool IgnoreAlignment = false) {
+ assert(I1->getOpcode() == I2->getOpcode() &&
+ "Can not compare special state of different instructions");
+
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
+ return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
+ (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
+ IgnoreAlignment) &&
+ LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
+ LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
+ if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
+ return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
+ (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
+ IgnoreAlignment) &&
+ SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
+ SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
+ return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
+ if (const CallInst *CI = dyn_cast<CallInst>(I1))
+ return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
+ CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<CallInst>(I2)->getAttributes();
+ if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
+ return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
+ CI->getAttributes() ==
+ cast<InvokeInst>(I2)->getAttributes();
+ if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
+ return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
+ if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
+ return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
+ if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
+ return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
+ FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
+ if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
+ return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
+ CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
+ CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
+ return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
+ RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
+ RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
+ RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+
+ return true;
+}
+
/// isIdenticalTo - Return true if the specified instruction is exactly
/// identical to the current one. This means that all operands match and any
/// extra information (e.g. load is volatile) agree.
@@ -275,57 +332,22 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
getType() != I->getType())
return false;
+ // If both instructions have no operands, they are identical.
+ if (getNumOperands() == 0 && I->getNumOperands() == 0)
+ return haveSameSpecialState(this, I);
+
// We have two instructions of identical opcode and #operands. Check to see
// if all operands are the same.
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (getOperand(i) != I->getOperand(i))
- return false;
+ if (!std::equal(op_begin(), op_end(), I->op_begin()))
+ return false;
- // Check special state that is a part of some instructions.
- if (const LoadInst *LI = dyn_cast<LoadInst>(this))
- return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
- LI->getAlignment() == cast<LoadInst>(I)->getAlignment() &&
- LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
- if (const StoreInst *SI = dyn_cast<StoreInst>(this))
- return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
- SI->getAlignment() == cast<StoreInst>(I)->getAlignment() &&
- SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
- if (const CmpInst *CI = dyn_cast<CmpInst>(this))
- return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
- if (const CallInst *CI = dyn_cast<CallInst>(this))
- return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
- CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
- CI->getAttributes() == cast<CallInst>(I)->getAttributes();
- if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
- return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
- CI->getAttributes() == cast<InvokeInst>(I)->getAttributes();
- if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this))
- return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
- if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
- return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
- if (const FenceInst *FI = dyn_cast<FenceInst>(this))
- return FI->getOrdering() == cast<FenceInst>(FI)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope();
- if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
- return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
- if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
- return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
- RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
- RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
const PHINode *otherPHI = cast<PHINode>(I);
- for (unsigned i = 0, e = thisPHI->getNumOperands(); i != e; ++i) {
- if (thisPHI->getIncomingBlock(i) != otherPHI->getIncomingBlock(i))
- return false;
- }
- return true;
+ return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
+ otherPHI->block_begin());
}
- return true;
+
+ return haveSameSpecialState(this, I);
}
// isSameOperationAs
@@ -352,65 +374,25 @@ bool Instruction::isSameOperationAs(const Instruction *I,
getOperand(i)->getType() != I->getOperand(i)->getType())
return false;
- // Check special state that is a part of some instructions.
- if (const LoadInst *LI = dyn_cast<LoadInst>(this))
- return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
- (LI->getAlignment() == cast<LoadInst>(I)->getAlignment() ||
- IgnoreAlignment) &&
- LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
- if (const StoreInst *SI = dyn_cast<StoreInst>(this))
- return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
- (SI->getAlignment() == cast<StoreInst>(I)->getAlignment() ||
- IgnoreAlignment) &&
- SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
- if (const CmpInst *CI = dyn_cast<CmpInst>(this))
- return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
- if (const CallInst *CI = dyn_cast<CallInst>(this))
- return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
- CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
- CI->getAttributes() == cast<CallInst>(I)->getAttributes();
- if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
- return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
- CI->getAttributes() ==
- cast<InvokeInst>(I)->getAttributes();
- if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this))
- return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
- if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
- return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
- if (const FenceInst *FI = dyn_cast<FenceInst>(this))
- return FI->getOrdering() == cast<FenceInst>(I)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope();
- if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
- return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
- if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
- return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
- RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
- RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
-
- return true;
+ return haveSameSpecialState(this, I, IgnoreAlignment);
}
/// isUsedOutsideOfBlock - Return true if there are any uses of I outside of the
/// specified block. Note that PHI nodes are considered to evaluate their
/// operands in the corresponding predecessor block.
bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
- for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
+ for (const Use &U : uses()) {
// PHI nodes uses values in the corresponding predecessor block. For other
// instructions, just check to see whether the parent of the use matches up.
- const User *U = *UI;
- const PHINode *PN = dyn_cast<PHINode>(U);
- if (PN == 0) {
- if (cast<Instruction>(U)->getParent() != BB)
+ const Instruction *I = cast<Instruction>(U.getUser());
+ const PHINode *PN = dyn_cast<PHINode>(I);
+ if (!PN) {
+ if (I->getParent() != BB)
return true;
continue;
}
- if (PN->getIncomingBlock(UI) != BB)
+ if (PN->getIncomingBlock(U) != BB)
return true;
}
return false;
@@ -548,8 +530,8 @@ Instruction *Instruction::clone() const {
// new one.
SmallVector<std::pair<unsigned, MDNode*>, 4> TheMDs;
getAllMetadataOtherThanDebugLoc(TheMDs);
- for (unsigned i = 0, e = TheMDs.size(); i != e; ++i)
- New->setMetadata(TheMDs[i].first, TheMDs[i].second);
+ for (const auto &MD : TheMDs)
+ New->setMetadata(MD.first, MD.second);
New->setDebugLoc(getDebugLoc());
return New;
diff --git a/contrib/llvm/lib/IR/Instructions.cpp b/contrib/llvm/lib/IR/Instructions.cpp
index 8a6b77b..9553252 100644
--- a/contrib/llvm/lib/IR/Instructions.cpp
+++ b/contrib/llvm/lib/IR/Instructions.cpp
@@ -14,14 +14,14 @@
#include "llvm/IR/Instructions.h"
#include "LLVMContextImpl.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -68,7 +68,7 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
return "vector select condition element type must be i1";
VectorType *ET = dyn_cast<VectorType>(Op1->getType());
- if (ET == 0)
+ if (!ET)
return "selected values for vector select must be vectors";
if (ET->getNumElements() != VT->getNumElements())
return "vector select requires selected vectors to have "
@@ -76,7 +76,7 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
} else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
return "select condition must be i1 or <n x i1>";
}
- return 0;
+ return nullptr;
}
@@ -123,7 +123,7 @@ Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
// Nuke the last value.
- Op<-1>().set(0);
+ Op<-1>().set(nullptr);
--NumOperands;
// If the PHI node is dead, because it has zero entries, nuke it now.
@@ -164,7 +164,7 @@ Value *PHINode::hasConstantValue() const {
for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
if (ConstantValue != this)
- return 0; // Incoming values not all the same.
+ return nullptr; // Incoming values not all the same.
// The case where the first value is this PHI.
ConstantValue = getIncomingValue(i);
}
@@ -180,14 +180,14 @@ Value *PHINode::hasConstantValue() const {
LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
unsigned NumReservedValues, const Twine &NameStr,
Instruction *InsertBefore)
- : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertBefore) {
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
init(PersonalityFn, 1 + NumReservedValues, NameStr);
}
LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
unsigned NumReservedValues, const Twine &NameStr,
BasicBlock *InsertAtEnd)
- : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertAtEnd) {
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
init(PersonalityFn, 1 + NumReservedValues, NameStr);
}
@@ -248,7 +248,7 @@ void LandingPadInst::growOperands(unsigned Size) {
Use::zap(OldOps, OldOps + e, true);
}
-void LandingPadInst::addClause(Value *Val) {
+void LandingPadInst::addClause(Constant *Val) {
unsigned OpNo = getNumOperands();
growOperands(1);
assert(OpNo < ReservedSpace && "Growing didn't work!");
@@ -324,7 +324,7 @@ CallInst::CallInst(const CallInst &CI)
OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(),
CI.getNumOperands()) {
setAttributes(CI.getAttributes());
- setTailCall(CI.isTailCall());
+ setTailCallKind(CI.getTailCallKind());
setCallingConv(CI.getCallingConv());
std::copy(CI.op_begin(), CI.op_end(), op_begin());
@@ -420,8 +420,8 @@ static Instruction *createMalloc(Instruction *InsertBefore,
// prototype malloc as "void *malloc(size_t)"
MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, NULL);
PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
- CallInst *MCall = NULL;
- Instruction *Result = NULL;
+ CallInst *MCall = nullptr;
+ Instruction *Result = nullptr;
if (InsertBefore) {
MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall", InsertBefore);
Result = MCall;
@@ -458,7 +458,7 @@ Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
Value *AllocSize, Value *ArraySize,
Function * MallocF,
const Twine &Name) {
- return createMalloc(InsertBefore, NULL, IntPtrTy, AllocTy, AllocSize,
+ return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
ArraySize, MallocF, Name);
}
@@ -474,7 +474,7 @@ Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize,
Function *MallocF, const Twine &Name) {
- return createMalloc(NULL, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
+ return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
ArraySize, MallocF, Name);
}
@@ -492,7 +492,7 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
// prototype free as "void free(void*)"
Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, NULL);
- CallInst* Result = NULL;
+ CallInst* Result = nullptr;
Value *PtrCast = Source;
if (InsertBefore) {
if (Source->getType() != IntPtrTy)
@@ -512,14 +512,14 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
/// CreateFree - Generate the IR for a call to the builtin free function.
Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
- return createFree(Source, InsertBefore, NULL);
+ return createFree(Source, InsertBefore, nullptr);
}
/// CreateFree - Generate the IR for a call to the builtin free function.
/// Note: This function does not add the call to the basic block, that is the
/// responsibility of the caller.
Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) {
- Instruction* FreeCall = createFree(Source, NULL, InsertAtEnd);
+ Instruction* FreeCall = createFree(Source, nullptr, InsertAtEnd);
assert(FreeCall && "CreateFree did not create a CallInst");
return FreeCall;
}
@@ -699,11 +699,11 @@ BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
UnreachableInst::UnreachableInst(LLVMContext &Context,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
- 0, 0, InsertBefore) {
+ nullptr, 0, InsertBefore) {
}
UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
- 0, 0, InsertAtEnd) {
+ nullptr, 0, InsertAtEnd) {
}
unsigned UnreachableInst::getNumSuccessorsV() const {
@@ -732,7 +732,7 @@ BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
OperandTraits<BranchInst>::op_end(this) - 1,
1, InsertBefore) {
- assert(IfTrue != 0 && "Branch destination may not be null!");
+ assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
@@ -752,7 +752,7 @@ BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
OperandTraits<BranchInst>::op_end(this) - 1,
1, InsertAtEnd) {
- assert(IfTrue != 0 && "Branch destination may not be null!");
+ assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
@@ -852,7 +852,7 @@ AllocaInst::AllocaInst(Type *Ty, Value *ArraySize,
AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
Instruction *InsertBefore)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
- getAISize(Ty->getContext(), 0), InsertBefore) {
+ getAISize(Ty->getContext(), nullptr), InsertBefore) {
setAlignment(0);
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
@@ -861,7 +861,7 @@ AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
- getAISize(Ty->getContext(), 0), InsertAtEnd) {
+ getAISize(Ty->getContext(), nullptr), InsertAtEnd) {
setAlignment(0);
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
@@ -893,7 +893,8 @@ void AllocaInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- setInstructionSubclassData(Log2_32(Align) + 1);
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
+ (Log2_32(Align) + 1));
assert(getAlignment() == Align && "Alignment representation error!");
}
@@ -916,7 +917,7 @@ bool AllocaInst::isStaticAlloca() const {
// Must be in the entry block.
const BasicBlock *Parent = getParent();
- return Parent == &Parent->getParent()->front();
+ return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
}
//===----------------------------------------------------------------------===//
@@ -1083,7 +1084,7 @@ void StoreInst::AssertOK() {
cast<PointerType>(getOperand(1)->getType())->getElementType()
&& "Ptr must be a pointer to Val type!");
assert(!(isAtomic() && getAlignment() == 0) &&
- "Alignment required for atomic load");
+ "Alignment required for atomic store");
}
@@ -1215,12 +1216,14 @@ void StoreInst::setAlignment(unsigned Align) {
//===----------------------------------------------------------------------===//
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
- setOrdering(Ordering);
+ setSuccessOrdering(SuccessOrdering);
+ setFailureOrdering(FailureOrdering);
setSynchScope(SynchScope);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
@@ -1233,32 +1236,42 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
assert(getOperand(2)->getType() ==
cast<PointerType>(getOperand(0)->getType())->getElementType()
&& "Ptr must be a pointer to NewVal type!");
- assert(Ordering != NotAtomic &&
+ assert(SuccessOrdering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+ assert(FailureOrdering != NotAtomic &&
"AtomicCmpXchg instructions must be atomic!");
+ assert(SuccessOrdering >= FailureOrdering &&
+ "AtomicCmpXchg success ordering must be at least as strong as fail");
+ assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
+ "AtomicCmpXchg failure ordering cannot include release semantics");
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
- : Instruction(Cmp->getType(), AtomicCmpXchg,
- OperandTraits<AtomicCmpXchgInst>::op_begin(this),
- OperandTraits<AtomicCmpXchgInst>::operands(this),
- InsertBefore) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
+ nullptr),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
- : Instruction(Cmp->getType(), AtomicCmpXchg,
- OperandTraits<AtomicCmpXchgInst>::op_begin(this),
- OperandTraits<AtomicCmpXchgInst>::operands(this),
- InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
+ nullptr),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
-
+
//===----------------------------------------------------------------------===//
// AtomicRMWInst Implementation
//===----------------------------------------------------------------------===//
@@ -1312,7 +1325,7 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
- : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertBefore) {
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setOrdering(Ordering);
setSynchScope(SynchScope);
}
@@ -1320,7 +1333,7 @@ FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
- : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setOrdering(Ordering);
setSynchScope(SynchScope);
}
@@ -1358,7 +1371,7 @@ GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
template <typename IndexTy>
static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
PointerType *PTy = dyn_cast<PointerType>(Ptr->getScalarType());
- if (!PTy) return 0; // Type isn't a pointer type!
+ if (!PTy) return nullptr; // Type isn't a pointer type!
Type *Agg = PTy->getElementType();
// Handle the special case of the empty set index set, which is always valid.
@@ -1368,17 +1381,17 @@ static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
// If there is at least one index, the top level type must be sized, otherwise
// it cannot be 'stepped over'.
if (!Agg->isSized())
- return 0;
+ return nullptr;
unsigned CurIdx = 1;
for (; CurIdx != IdxList.size(); ++CurIdx) {
CompositeType *CT = dyn_cast<CompositeType>(Agg);
- if (!CT || CT->isPointerTy()) return 0;
+ if (!CT || CT->isPointerTy()) return nullptr;
IndexTy Index = IdxList[CurIdx];
- if (!CT->indexValid(Index)) return 0;
+ if (!CT->indexValid(Index)) return nullptr;
Agg = CT->getTypeAtIndex(Index);
}
- return CurIdx == IdxList.size() ? Agg : 0;
+ return CurIdx == IdxList.size() ? Agg : nullptr;
}
Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<Value *> IdxList) {
@@ -1468,7 +1481,7 @@ ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
- if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy(32))
+ if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
return false;
return true;
}
@@ -1515,7 +1528,7 @@ bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
return false;// Second operand of insertelement must be vector element type.
- if (!Index->getType()->isIntegerTy(32))
+ if (!Index->getType()->isIntegerTy())
return false; // Third operand of insertelement must be i32.
return true;
}
@@ -1568,7 +1581,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
// Mask must be vector of i32.
VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
- if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
+ if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
return false;
// Check to see if Mask is valid.
@@ -1577,11 +1590,11 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
- for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
+ for (Value *Op : MV->operands()) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
if (CI->uge(V1Size*2))
return false;
- } else if (!isa<UndefValue>(MV->getOperand(i))) {
+ } else if (!isa<UndefValue>(Op)) {
return false;
}
}
@@ -1701,8 +1714,7 @@ ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
//
Type *ExtractValueInst::getIndexedType(Type *Agg,
ArrayRef<unsigned> Idxs) {
- for (unsigned CurIdx = 0; CurIdx != Idxs.size(); ++CurIdx) {
- unsigned Index = Idxs[CurIdx];
+ for (unsigned Index : Idxs) {
// We can't use CompositeType::indexValid(Index) here.
// indexValid() always returns true for arrays because getelementptr allows
// out-of-bounds indices. Since we don't allow those for extractvalue and
@@ -1711,13 +1723,13 @@ Type *ExtractValueInst::getIndexedType(Type *Agg,
// as easy to check those manually as well.
if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
if (Index >= AT->getNumElements())
- return 0;
+ return nullptr;
} else if (StructType *ST = dyn_cast<StructType>(Agg)) {
if (Index >= ST->getNumElements())
- return 0;
+ return nullptr;
} else {
// Not a valid type to index into.
- return 0;
+ return nullptr;
}
Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
@@ -2114,8 +2126,27 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
}
-/// This function determines if a pair of casts can be eliminated and what
-/// opcode should be used in the elimination. This assumes that there are two
+bool CastInst::isNoopCast(const DataLayout *DL) const {
+ if (!DL) {
+ // Assume maximum pointer size.
+ return isNoopCast(Type::getInt64Ty(getContext()));
+ }
+
+ Type *PtrOpTy = nullptr;
+ if (getOpcode() == Instruction::PtrToInt)
+ PtrOpTy = getOperand(0)->getType();
+ else if (getOpcode() == Instruction::IntToPtr)
+ PtrOpTy = getType();
+
+ Type *IntPtrTy = PtrOpTy
+ ? DL->getIntPtrType(PtrOpTy)
+ : DL->getIntPtrType(getContext(), 0);
+
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
+/// This function determines if a pair of casts can be eliminated and what
+/// opcode should be used in the elimination. This assumes that there are two
/// instructions like this:
/// * %F = firstOpcode SrcTy %x to MidTy
/// * %S = secondOpcode MidTy %F to DstTy
@@ -2206,7 +2237,7 @@ unsigned CastInst::isEliminableCastPair(
case 3:
// No-op cast in second op implies firstOp as long as the DestTy
// is integer and we are not converting between a vector and a
- // non vector type.
+ // non-vector type.
if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
return firstOp;
return 0;
@@ -2302,18 +2333,12 @@ unsigned CastInst::isEliminableCastPair(
// Allowed, use first cast's opcode
return firstOp;
case 14:
- // FIXME: this state can be merged with (2), but the following assert
- // is useful to check the correcteness of the sequence due to semantic
- // change of bitcast.
- assert(
- SrcTy->isPtrOrPtrVectorTy() &&
- MidTy->isPtrOrPtrVectorTy() &&
- DstTy->isPtrOrPtrVectorTy() &&
- SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
- MidTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace() &&
- "Illegal bitcast, addrspacecast sequence!");
- // Allowed, use second cast's opcode
- return secondOp;
+ // bitcast, addrspacecast -> addrspacecast if the element type of
+ // bitcast's source is the same as that of addrspacecast's destination.
+ if (SrcTy->getPointerElementType() == DstTy->getPointerElementType())
+ return Instruction::AddrSpaceCast;
+ return 0;
+
case 15:
// FIXME: this state can be merged with (1), but the following assert
// is useful to check the correcteness of the sequence due to semantic
@@ -2453,11 +2478,7 @@ CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
if (Ty->isIntOrIntVectorTy())
return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
- Type *STy = S->getType();
- if (STy->getPointerAddressSpace() != Ty->getPointerAddressSpace())
- return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
-
- return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
}
/// @brief Create a BitCast or a PtrToInt cast instruction
@@ -2475,14 +2496,36 @@ CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
if (Ty->isIntOrIntVectorTy())
return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
- Type *STy = S->getType();
- if (STy->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
bool isSigned, const Twine &Name,
Instruction *InsertBefore) {
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
@@ -2817,30 +2860,55 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
return false;
return SrcTy->getScalarType()->isIntegerTy() &&
DstTy->getScalarType()->isPointerTy();
- case Instruction::BitCast:
+ case Instruction::BitCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+
// BitCast implies a no-op cast of type only. No bits change.
// However, you can't cast pointers to anything but pointers.
- if (SrcTy->isPtrOrPtrVectorTy() != DstTy->isPtrOrPtrVectorTy())
+ if (!SrcPtrTy != !DstPtrTy)
return false;
- // For non pointer cases, the cast is okay if the source and destination bit
+ // For non-pointer cases, the cast is okay if the source and destination bit
// widths are identical.
- if (!SrcTy->isPtrOrPtrVectorTy())
+ if (!SrcPtrTy)
return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
- // If both are pointers then the address spaces must match and vector of
- // pointers must have the same number of elements.
- return SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
- SrcTy->isVectorTy() == DstTy->isVectorTy() &&
- (!SrcTy->isVectorTy() ||
- SrcTy->getVectorNumElements() == SrcTy->getVectorNumElements());
-
- case Instruction::AddrSpaceCast:
- return SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
- SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace() &&
- SrcTy->isVectorTy() == DstTy->isVectorTy() &&
- (!SrcTy->isVectorTy() ||
- SrcTy->getVectorNumElements() == SrcTy->getVectorNumElements());
+ // If both are pointers then the address spaces must match.
+ if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
+ return false;
+
+ // A vector of pointers must have the same number of elements.
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+
+ return false;
+ }
+
+ return true;
+ }
+ case Instruction::AddrSpaceCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ if (!SrcPtrTy)
+ return false;
+
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+ if (!DstPtrTy)
+ return false;
+
+ if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
+ return false;
+
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+
+ return false;
+ }
+
+ return true;
+ }
}
}
@@ -3307,7 +3375,7 @@ void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
- 0, 0, InsertBefore) {
+ nullptr, 0, InsertBefore) {
init(Value, Default, 2+NumCases*2);
}
@@ -3318,12 +3386,12 @@ SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
- 0, 0, InsertAtEnd) {
+ nullptr, 0, InsertAtEnd) {
init(Value, Default, 2+NumCases*2);
}
SwitchInst::SwitchInst(const SwitchInst &SI)
- : TerminatorInst(SI.getType(), Instruction::Switch, 0, 0) {
+ : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) {
init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
NumOperands = SI.getNumOperands();
Use *OL = OperandList, *InOL = SI.OperandList;
@@ -3371,8 +3439,8 @@ void SwitchInst::removeCase(CaseIt i) {
}
// Nuke the last value.
- OL[NumOps-2].set(0);
- OL[NumOps-2+1].set(0);
+ OL[NumOps-2].set(nullptr);
+ OL[NumOps-2+1].set(nullptr);
NumOperands = NumOps-2;
}
@@ -3438,14 +3506,14 @@ void IndirectBrInst::growOperands() {
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
- 0, 0, InsertBefore) {
+ nullptr, 0, InsertBefore) {
init(Address, NumCases);
}
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
- 0, 0, InsertAtEnd) {
+ nullptr, 0, InsertAtEnd) {
init(Address, NumCases);
}
@@ -3487,7 +3555,7 @@ void IndirectBrInst::removeDestination(unsigned idx) {
OL[idx+1] = OL[NumOps-1];
// Nuke the last value.
- OL[NumOps-1].set(0);
+ OL[NumOps-1].set(nullptr);
NumOperands = NumOps-1;
}
@@ -3533,9 +3601,10 @@ InsertValueInst *InsertValueInst::clone_impl() const {
}
AllocaInst *AllocaInst::clone_impl() const {
- return new AllocaInst(getAllocatedType(),
- (Value*)getOperand(0),
- getAlignment());
+ AllocaInst *Result = new AllocaInst(getAllocatedType(),
+ (Value *)getOperand(0), getAlignment());
+ Result->setUsedWithInAlloca(isUsedWithInAlloca());
+ return Result;
}
LoadInst *LoadInst::clone_impl() const {
@@ -3552,8 +3621,10 @@ StoreInst *StoreInst::clone_impl() const {
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
- getOrdering(), getSynchScope());
+ getSuccessOrdering(), getFailureOrdering(),
+ getSynchScope());
Result->setVolatile(isVolatile());
+ Result->setWeak(isWeak());
return Result;
}
diff --git a/contrib/llvm/lib/IR/IntrinsicInst.cpp b/contrib/llvm/lib/IR/IntrinsicInst.cpp
index 51f88d2..5725284 100644
--- a/contrib/llvm/lib/IR/IntrinsicInst.cpp
+++ b/contrib/llvm/lib/IR/IntrinsicInst.cpp
@@ -1,4 +1,4 @@
-//===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers -----*- C++ -*-===//
+//===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -35,7 +35,7 @@ static Value *CastOperand(Value *C) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (CE->isCast())
return CE->getOperand(0);
- return NULL;
+ return nullptr;
}
Value *DbgInfoIntrinsic::StripCast(Value *C) {
@@ -57,7 +57,7 @@ Value *DbgDeclareInst::getAddress() const {
if (MDNode* MD = cast_or_null<MDNode>(getArgOperand(0)))
return MD->getOperand(0);
else
- return NULL;
+ return nullptr;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/IR/LLVMContext.cpp b/contrib/llvm/lib/IR/LLVMContext.cpp
index 883bb98..de825f0 100644
--- a/contrib/llvm/lib/IR/LLVMContext.cpp
+++ b/contrib/llvm/lib/IR/LLVMContext.cpp
@@ -15,6 +15,9 @@
#include "llvm/IR/LLVMContext.h"
#include "LLVMContextImpl.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/ManagedStatic.h"
@@ -98,31 +101,92 @@ void *LLVMContext::getInlineAsmDiagnosticContext() const {
return pImpl->InlineAsmDiagContext;
}
+void LLVMContext::setDiagnosticHandler(DiagnosticHandlerTy DiagnosticHandler,
+ void *DiagnosticContext) {
+ pImpl->DiagnosticHandler = DiagnosticHandler;
+ pImpl->DiagnosticContext = DiagnosticContext;
+}
+
+LLVMContext::DiagnosticHandlerTy LLVMContext::getDiagnosticHandler() const {
+ return pImpl->DiagnosticHandler;
+}
+
+void *LLVMContext::getDiagnosticContext() const {
+ return pImpl->DiagnosticContext;
+}
+
+void LLVMContext::setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle)
+{
+ pImpl->YieldCallback = Callback;
+ pImpl->YieldOpaqueHandle = OpaqueHandle;
+}
+
+void LLVMContext::yield() {
+ if (pImpl->YieldCallback)
+ pImpl->YieldCallback(this, pImpl->YieldOpaqueHandle);
+}
+
void LLVMContext::emitError(const Twine &ErrorStr) {
- emitError(0U, ErrorStr);
+ diagnose(DiagnosticInfoInlineAsm(ErrorStr));
}
void LLVMContext::emitError(const Instruction *I, const Twine &ErrorStr) {
- unsigned LocCookie = 0;
- if (const MDNode *SrcLoc = I->getMetadata("srcloc")) {
- if (SrcLoc->getNumOperands() != 0)
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(SrcLoc->getOperand(0)))
- LocCookie = CI->getZExtValue();
- }
- return emitError(LocCookie, ErrorStr);
+ assert (I && "Invalid instruction");
+ diagnose(DiagnosticInfoInlineAsm(*I, ErrorStr));
}
-void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) {
- // If there is no error handler installed, just print the error and exit.
- if (pImpl->InlineAsmDiagHandler == 0) {
- errs() << "error: " << ErrorStr << "\n";
- exit(1);
+void LLVMContext::diagnose(const DiagnosticInfo &DI) {
+ // If there is a report handler, use it.
+ if (pImpl->DiagnosticHandler) {
+ pImpl->DiagnosticHandler(DI, pImpl->DiagnosticContext);
+ return;
}
- // If we do have an error handler, we can report the error and keep going.
- SMDiagnostic Diag("", SourceMgr::DK_Error, ErrorStr.str());
+ // Optimization remarks are selective. They need to check whether the regexp
+ // pattern, passed via one of the -pass-remarks* flags, matches the name of
+ // the pass that is emitting the diagnostic. If there is no match, ignore the
+ // diagnostic and return.
+ switch (DI.getKind()) {
+ case llvm::DK_OptimizationRemark:
+ if (!cast<DiagnosticInfoOptimizationRemark>(DI).isEnabled())
+ return;
+ break;
+ case llvm::DK_OptimizationRemarkMissed:
+ if (!cast<DiagnosticInfoOptimizationRemarkMissed>(DI).isEnabled())
+ return;
+ break;
+ case llvm::DK_OptimizationRemarkAnalysis:
+ if (!cast<DiagnosticInfoOptimizationRemarkAnalysis>(DI).isEnabled())
+ return;
+ break;
+ default:
+ break;
+ }
- pImpl->InlineAsmDiagHandler(Diag, pImpl->InlineAsmDiagContext, LocCookie);
+ // Otherwise, print the message with a prefix based on the severity.
+ std::string MsgStorage;
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+ DI.print(DP);
+ Stream.flush();
+ switch (DI.getSeverity()) {
+ case DS_Error:
+ errs() << "error: " << MsgStorage << "\n";
+ exit(1);
+ case DS_Warning:
+ errs() << "warning: " << MsgStorage << "\n";
+ break;
+ case DS_Remark:
+ errs() << "remark: " << MsgStorage << "\n";
+ break;
+ case DS_Note:
+ errs() << "note: " << MsgStorage << "\n";
+ break;
+ }
+}
+
+void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) {
+ diagnose(DiagnosticInfoInlineAsm(LocCookie, ErrorStr));
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/IR/LLVMContextImpl.cpp b/contrib/llvm/lib/IR/LLVMContextImpl.cpp
index 6a6a4d6..4c2791f 100644
--- a/contrib/llvm/lib/IR/LLVMContextImpl.cpp
+++ b/contrib/llvm/lib/IR/LLVMContextImpl.cpp
@@ -14,12 +14,13 @@
#include "LLVMContextImpl.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Module.h"
#include <algorithm>
using namespace llvm;
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
- : TheTrueVal(0), TheFalseVal(0),
+ : TheTrueVal(nullptr), TheFalseVal(nullptr),
VoidTy(C, Type::VoidTyID),
LabelTy(C, Type::LabelTyID),
HalfTy(C, Type::HalfTyID),
@@ -35,8 +36,12 @@ LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
Int16Ty(C, 16),
Int32Ty(C, 32),
Int64Ty(C, 64) {
- InlineAsmDiagHandler = 0;
- InlineAsmDiagContext = 0;
+ InlineAsmDiagHandler = nullptr;
+ InlineAsmDiagContext = nullptr;
+ DiagnosticHandler = nullptr;
+ DiagnosticContext = nullptr;
+ YieldCallback = nullptr;
+ YieldOpaqueHandle = nullptr;
NamedStructTypesUniqueID = 0;
}
@@ -44,8 +49,7 @@ namespace {
struct DropReferences {
// Takes the value_type of a ConstantUniqueMap's internal map, whose 'second'
// is a Constant*.
- template<typename PairT>
- void operator()(const PairT &P) {
+ template <typename PairT> void operator()(const PairT &P) {
P.second->dropAllReferences();
}
};
@@ -62,12 +66,11 @@ struct DropFirst {
}
LLVMContextImpl::~LLVMContextImpl() {
- // NOTE: We need to delete the contents of OwnedModules, but we have to
- // duplicate it into a temporary vector, because the destructor of Module
- // will try to remove itself from OwnedModules set. This would cause
- // iterator invalidation if we iterated on the set directly.
- std::vector<Module*> Modules(OwnedModules.begin(), OwnedModules.end());
- DeleteContainerPointers(Modules);
+ // NOTE: We need to delete the contents of OwnedModules, but Module's dtor
+ // will call LLVMContextImpl::removeModule, thus invalidating iterators into
+ // the container. Avoid iterators during this operation:
+ while (!OwnedModules.empty())
+ delete *OwnedModules.begin();
// Free the constants. This is important to do here to ensure that they are
// freed before the LeakDetector is torn down.
diff --git a/contrib/llvm/lib/IR/LLVMContextImpl.h b/contrib/llvm/lib/IR/LLVMContextImpl.h
index 407b985..808c239 100644
--- a/contrib/llvm/lib/IR/LLVMContextImpl.h
+++ b/contrib/llvm/lib/IR/LLVMContextImpl.h
@@ -30,13 +30,16 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
#include <vector>
namespace llvm {
class ConstantInt;
class ConstantFP;
+class DiagnosticInfoOptimizationRemark;
+class DiagnosticInfoOptimizationRemarkMissed;
+class DiagnosticInfoOptimizationRemarkAnalysis;
class LLVMContext;
class Type;
class Value;
@@ -56,8 +59,8 @@ struct DenseMapAPIntKeyInfo {
return hash_combine(Key.type, Key.val);
}
};
- static inline KeyTy getEmptyKey() { return KeyTy(APInt(1,0), 0); }
- static inline KeyTy getTombstoneKey() { return KeyTy(APInt(1,1), 0); }
+ static inline KeyTy getEmptyKey() { return KeyTy(APInt(1,0), nullptr); }
+ static inline KeyTy getTombstoneKey() { return KeyTy(APInt(1,1), nullptr); }
static unsigned getHashValue(const KeyTy &Key) {
return static_cast<unsigned>(hash_value(Key));
}
@@ -225,9 +228,9 @@ public:
MDNode *get() const {
return cast_or_null<MDNode>(getValPtr());
}
-
- virtual void deleted();
- virtual void allUsesReplacedWith(Value *VNew);
+
+ void deleted() override;
+ void allUsesReplacedWith(Value *VNew) override;
};
class LLVMContextImpl {
@@ -238,9 +241,15 @@ public:
LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler;
void *InlineAsmDiagContext;
-
- typedef DenseMap<DenseMapAPIntKeyInfo::KeyTy, ConstantInt*,
- DenseMapAPIntKeyInfo> IntMapTy;
+
+ LLVMContext::DiagnosticHandlerTy DiagnosticHandler;
+ void *DiagnosticContext;
+
+ LLVMContext::YieldCallbackTy YieldCallback;
+ void *YieldOpaqueHandle;
+
+ typedef DenseMap<DenseMapAPIntKeyInfo::KeyTy, ConstantInt *,
+ DenseMapAPIntKeyInfo> IntMapTy;
IntMapTy IntConstants;
typedef DenseMap<DenseMapAPFloatKeyInfo::KeyTy, ConstantFP*,
@@ -278,8 +287,8 @@ public:
StringMap<ConstantDataSequential*> CDSConstants;
-
- DenseMap<std::pair<Function*, BasicBlock*> , BlockAddress*> BlockAddresses;
+ DenseMap<std::pair<const Function *, const BasicBlock *>, BlockAddress *>
+ BlockAddresses;
ConstantUniqueMap<ExprMapKeyType, const ExprMapKeyType&, Type, ConstantExpr>
ExprConstants;
@@ -349,7 +358,12 @@ public:
/// for an index. The ValueHandle ensures that ScopeINlinedAtIdx stays up
/// to date.
std::vector<std::pair<DebugRecVH, DebugRecVH> > ScopeInlinedAtRecords;
-
+
+ /// DiscriminatorTable - This table maps file:line locations to an
+ /// integer representing the next DWARF path discriminator to assign to
+ /// instructions in different blocks at the same location.
+ DenseMap<std::pair<const char *, unsigned>, unsigned> DiscriminatorTable;
+
/// IntrinsicIDCache - Cache of intrinsic name (string) to numeric ID mappings
/// requested in this context
typedef DenseMap<const Function*, unsigned> IntrinsicIDCacheTy;
diff --git a/contrib/llvm/lib/IR/LeakDetector.cpp b/contrib/llvm/lib/IR/LeakDetector.cpp
index 835e5e6..6f71627 100644
--- a/contrib/llvm/lib/IR/LeakDetector.cpp
+++ b/contrib/llvm/lib/IR/LeakDetector.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/LeakDetector.h"
+#include "llvm/IR/LeakDetector.h"
#include "LLVMContextImpl.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Value.h"
diff --git a/contrib/llvm/lib/IR/LeaksContext.h b/contrib/llvm/lib/IR/LeaksContext.h
index 5038dc9..52ac170 100644
--- a/contrib/llvm/lib/IR/LeaksContext.h
+++ b/contrib/llvm/lib/IR/LeaksContext.h
@@ -12,8 +12,12 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_IR_LEAKSCONTEXT_H
+#define LLVM_IR_LEAKSCONTEXT_H
+
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -30,10 +34,10 @@ struct PrinterTrait<Value> {
template <typename T>
struct LeakDetectorImpl {
explicit LeakDetectorImpl(const char* const name = "") :
- Cache(0), Name(name) { }
+ Cache(nullptr), Name(name) { }
void clear() {
- Cache = 0;
+ Cache = nullptr;
Ts.clear();
}
@@ -57,15 +61,15 @@ struct LeakDetectorImpl {
void removeGarbage(const T* o) {
if (o == Cache)
- Cache = 0; // Cache hit
+ Cache = nullptr; // Cache hit
else
Ts.erase(o);
}
bool hasGarbage(const std::string& Message) {
- addGarbage(0); // Flush the Cache
+ addGarbage(nullptr); // Flush the Cache
- assert(Cache == 0 && "No value should be cached anymore!");
+ assert(!Cache && "No value should be cached anymore!");
if (!Ts.empty()) {
errs() << "Leaked " << Name << " objects found: " << Message << ":\n";
@@ -90,3 +94,5 @@ private:
};
}
+
+#endif // LLVM_IR_LEAKSCONTEXT_H
diff --git a/contrib/llvm/lib/IR/LegacyPassManager.cpp b/contrib/llvm/lib/IR/LegacyPassManager.cpp
index a431d82..d3f3482 100644
--- a/contrib/llvm/lib/IR/LegacyPassManager.cpp
+++ b/contrib/llvm/lib/IR/LegacyPassManager.cpp
@@ -12,17 +12,18 @@
//===----------------------------------------------------------------------===//
-#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/IR/Module.h"
#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/IR/LegacyPassNameParser.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
-#include "llvm/Support/PassNameParser.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -119,7 +120,7 @@ bool PMDataManager::isPassDebuggingExecutionsOrMore() const {
void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
- if (V == 0 && M == 0)
+ if (!V && !M)
OS << "Releasing pass '";
else
OS << "Running pass '";
@@ -130,7 +131,7 @@ void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
OS << " on module '" << M->getModuleIdentifier() << "'.\n";
return;
}
- if (V == 0) {
+ if (!V) {
OS << '\n';
return;
}
@@ -144,7 +145,7 @@ void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
OS << "value";
OS << " '";
- WriteAsOperand(OS, V, /*PrintTy=*/false, M);
+ V->printAsOperand(OS, /*PrintTy=*/false, M);
OS << "'\n";
}
@@ -165,28 +166,28 @@ public:
/// Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the function, and if so, return true.
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
- bool doInitialization(Module &M);
+ bool doInitialization(Module &M) override;
bool doInitialization(Function &F);
- bool doFinalization(Module &M);
+ bool doFinalization(Module &M) override;
bool doFinalization(Function &F);
- virtual PMDataManager *getAsPMDataManager() { return this; }
- virtual Pass *getAsPass() { return this; }
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "BasicBlock Pass Manager";
}
// Print passes managed by this manager
- void dumpPassStructure(unsigned Offset) {
- llvm::dbgs().indent(Offset*2) << "BasicBlockPass Manager\n";
+ void dumpPassStructure(unsigned Offset) override {
+ dbgs().indent(Offset*2) << "BasicBlockPass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
BasicBlockPass *BP = getContainedPass(Index);
BP->dumpPassStructure(Offset + 1);
@@ -200,7 +201,7 @@ public:
return BP;
}
- virtual PassManagerType getPassManagerType() const {
+ PassManagerType getPassManagerType() const override {
return PMT_BasicBlockPassManager;
}
};
@@ -235,8 +236,9 @@ public:
}
/// createPrinterPass - Get a function printer pass.
- Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
- return createPrintFunctionPass(Banner, &O);
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintFunctionPass(O, Banner);
}
// Prepare for running an on the fly pass, freeing memory if needed
@@ -249,21 +251,21 @@ public:
/// doInitialization - Run all of the initializers for the function passes.
///
- bool doInitialization(Module &M);
+ bool doInitialization(Module &M) override;
/// doFinalization - Run all of the finalizers for the function passes.
///
- bool doFinalization(Module &M);
+ bool doFinalization(Module &M) override;
- virtual PMDataManager *getAsPMDataManager() { return this; }
- virtual Pass *getAsPass() { return this; }
- virtual PassManagerType getTopLevelPassManagerType() {
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+ PassManagerType getTopLevelPassManagerType() override {
return PMT_FunctionPassManager;
}
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
@@ -304,8 +306,9 @@ public:
}
/// createPrinterPass - Get a module printer pass.
- Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
- return createPrintModulePass(&O, false, Banner);
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintModulePass(O, Banner);
}
/// run - Execute all of the passes scheduled for execution. Keep track of
@@ -324,30 +327,30 @@ public:
bool doFinalization();
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
/// Add RequiredPass into list of lower level passes required by pass P.
/// RequiredPass is run on the fly by Pass Manager when P requests it
/// through getAnalysis interface.
- virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
+ void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) override;
/// Return function pass corresponding to PassInfo PI, that is
/// required by module pass MP. Instantiate analysis pass, by using
/// its runOnFunction() for function F.
- virtual Pass* getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F);
+ Pass* getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Module Pass Manager";
}
- virtual PMDataManager *getAsPMDataManager() { return this; }
- virtual Pass *getAsPass() { return this; }
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
// Print passes managed by this manager
- void dumpPassStructure(unsigned Offset) {
- llvm::dbgs().indent(Offset*2) << "ModulePass Manager\n";
+ void dumpPassStructure(unsigned Offset) override {
+ dbgs().indent(Offset*2) << "ModulePass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
ModulePass *MP = getContainedPass(Index);
MP->dumpPassStructure(Offset + 1);
@@ -364,7 +367,7 @@ public:
return static_cast<ModulePass *>(PassVector[N]);
}
- virtual PassManagerType getPassManagerType() const {
+ PassManagerType getPassManagerType() const override {
return PMT_ModulePassManager;
}
@@ -404,8 +407,9 @@ public:
}
/// createPrinterPass - Get a module printer pass.
- Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
- return createPrintModulePass(&O, false, Banner);
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintModulePass(O, Banner);
}
/// run - Execute all of the passes scheduled for execution. Keep track of
@@ -424,13 +428,13 @@ public:
bool doFinalization();
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
- virtual PMDataManager *getAsPMDataManager() { return this; }
- virtual Pass *getAsPass() { return this; }
- virtual PassManagerType getTopLevelPassManagerType() {
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+ PassManagerType getTopLevelPassManagerType() override {
return PMT_ModulePassManager;
}
@@ -475,18 +479,18 @@ public:
}
// createTheTimeInfo - This method either initializes the TheTimeInfo pointer
- // to a non null value (if the -time-passes option is enabled) or it leaves it
+ // to a non-null value (if the -time-passes option is enabled) or it leaves it
// null. It may be called multiple times.
static void createTheTimeInfo();
/// getPassTimer - Return the timer for the specified pass if it exists.
Timer *getPassTimer(Pass *P) {
if (P->getAsPMDataManager())
- return 0;
+ return nullptr;
sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
Timer *&T = TimingData[P];
- if (T == 0)
+ if (!T)
T = new Timer(P->getPassName(), TG);
return T;
}
@@ -577,7 +581,7 @@ void PMTopLevelManager::collectLastUses(SmallVectorImpl<Pass *> &LastUses,
}
AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) {
- AnalysisUsage *AnUsage = NULL;
+ AnalysisUsage *AnUsage = nullptr;
DenseMap<Pass *, AnalysisUsage *>::iterator DMI = AnUsageMap.find(P);
if (DMI != AnUsageMap.end())
AnUsage = DMI->second;
@@ -624,7 +628,7 @@ void PMTopLevelManager::schedulePass(Pass *P) {
if (!AnalysisPass) {
const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
- if (PI == NULL) {
+ if (!PI) {
// Pass P is not in the global PassRegistry
dbgs() << "Pass '" << P->getPassName() << "' is not initialized." << "\n";
dbgs() << "Verify if there is a pass dependency cycle." << "\n";
@@ -731,7 +735,7 @@ Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) {
}
}
- return 0;
+ return nullptr;
}
// Print passes managed by this top level manager.
@@ -828,7 +832,7 @@ void PMDataManager::recordAvailableAnalysis(Pass *P) {
// This pass is the current implementation of all of the interfaces it
// implements as well.
const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI);
- if (PInf == 0) return;
+ if (!PInf) return;
const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
for (unsigned i = 0, e = II.size(); i != e; ++i)
AvailableAnalysis[II[i]->getTypeInfo()] = P;
@@ -845,7 +849,7 @@ bool PMDataManager::preserveHigherLevelAnalysis(Pass *P) {
for (SmallVectorImpl<Pass *>::iterator I = HigherLevelAnalysis.begin(),
E = HigherLevelAnalysis.end(); I != E; ++I) {
Pass *P1 = *I;
- if (P1->getAsImmutablePass() == 0 &&
+ if (P1->getAsImmutablePass() == nullptr &&
std::find(PreservedSet.begin(), PreservedSet.end(),
P1->getPassID()) ==
PreservedSet.end())
@@ -885,7 +889,7 @@ void PMDataManager::removeNotPreservedAnalysis(Pass *P) {
for (DenseMap<AnalysisID, Pass*>::iterator I = AvailableAnalysis.begin(),
E = AvailableAnalysis.end(); I != E; ) {
DenseMap<AnalysisID, Pass*>::iterator Info = I++;
- if (Info->second->getAsImmutablePass() == 0 &&
+ if (Info->second->getAsImmutablePass() == nullptr &&
std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
PreservedSet.end()) {
// Remove this analysis
@@ -909,7 +913,7 @@ void PMDataManager::removeNotPreservedAnalysis(Pass *P) {
I = InheritedAnalysis[Index]->begin(),
E = InheritedAnalysis[Index]->end(); I != E; ) {
DenseMap<AnalysisID, Pass *>::iterator Info = I++;
- if (Info->second->getAsImmutablePass() == 0 &&
+ if (Info->second->getAsImmutablePass() == nullptr &&
std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
PreservedSet.end()) {
// Remove this analysis
@@ -1026,7 +1030,7 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
// Set P as P's last user until someone starts using P.
// However, if P is a Pass Manager then it does not need
// to record its last user.
- if (P->getAsPMDataManager() == 0)
+ if (!P->getAsPMDataManager())
LastUses.push_back(P);
TPM->setLastUser(LastUses, P);
@@ -1093,7 +1097,7 @@ void PMDataManager::initializeAnalysisImpl(Pass *P) {
I = AnUsage->getRequiredSet().begin(),
E = AnUsage->getRequiredSet().end(); I != E; ++I) {
Pass *Impl = findAnalysisPass(*I, true);
- if (Impl == 0)
+ if (!Impl)
// This may be analysis pass that is initialized on the fly.
// If that is not the case then it will raise an assert when it is used.
continue;
@@ -1117,7 +1121,7 @@ Pass *PMDataManager::findAnalysisPass(AnalysisID AID, bool SearchParent) {
if (SearchParent)
return TPM->findAnalysisPass(AID);
- return NULL;
+ return nullptr;
}
// Print list of passes that are last used by P.
@@ -1133,7 +1137,7 @@ void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
for (SmallVectorImpl<Pass *>::iterator I = LUses.begin(),
E = LUses.end(); I != E; ++I) {
- llvm::dbgs() << "--" << std::string(Offset*2, ' ');
+ dbgs() << "--" << std::string(Offset*2, ' ');
(*I)->dumpPassStructure(0);
}
}
@@ -1156,7 +1160,8 @@ void PMDataManager::dumpPassInfo(Pass *P, enum PassDebuggingString S1,
StringRef Msg) {
if (PassDebugging < Executions)
return;
- dbgs() << (void*)this << std::string(getDepth()*2+1, ' ');
+ dbgs() << "[" << sys::TimeValue::now().str() << "] " << (void *)this
+ << std::string(getDepth() * 2 + 1, ' ');
switch (S1) {
case EXECUTION_MSG:
dbgs() << "Executing Pass '" << P->getPassName();
@@ -1485,8 +1490,10 @@ bool FunctionPassManagerImpl::run(Function &F) {
TimingInfo::createTheTimeInfo();
initializeAllAnalysisInfo();
- for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
Changed |= getContainedManager(Index)->runOnFunction(F);
+ F.getContext().yield();
+ }
for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
getContainedManager(Index)->cleanup();
@@ -1655,6 +1662,8 @@ void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
assert((P->getPotentialPassManagerType() <
RequiredPass->getPotentialPassManagerType()) &&
"Unable to handle Pass that requires lower level Analysis pass");
+ if (!RequiredPass)
+ return;
FunctionPassManagerImpl *FPP = OnTheFlyManagers[P];
if (!FPP) {
@@ -1664,14 +1673,24 @@ void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
OnTheFlyManagers[P] = FPP;
}
- FPP->add(RequiredPass);
+ const PassInfo * RequiredPassPI =
+ PassRegistry::getPassRegistry()->getPassInfo(RequiredPass->getPassID());
- // Register P as the last user of RequiredPass.
- if (RequiredPass) {
- SmallVector<Pass *, 1> LU;
- LU.push_back(RequiredPass);
- FPP->setLastUser(LU, P);
+ Pass *FoundPass = nullptr;
+ if (RequiredPassPI && RequiredPassPI->isAnalysis()) {
+ FoundPass =
+ ((PMTopLevelManager*)FPP)->findAnalysisPass(RequiredPass->getPassID());
+ }
+ if (!FoundPass) {
+ FoundPass = RequiredPass;
+ // This should be guaranteed to add RequiredPass to the passmanager given
+ // that we checked for an avaiable analysis above.
+ FPP->add(RequiredPass);
}
+ // Register P as the last user of FoundPass or RequiredPass.
+ SmallVector<Pass *, 1> LU;
+ LU.push_back(FoundPass);
+ FPP->setLastUser(LU, P);
}
/// Return function pass corresponding to PassInfo PI, that is
@@ -1707,8 +1726,10 @@ bool PassManagerImpl::run(Module &M) {
}
initializeAllAnalysisInfo();
- for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
Changed |= getContainedManager(Index)->runOnModule(M);
+ M.getContext().yield();
+ }
for (SmallVectorImpl<ImmutablePass *>::const_iterator I = IPV.begin(),
E = IPV.end(); I != E; ++I) {
@@ -1755,7 +1776,7 @@ EnableTiming("time-passes", cl::location(TimePassesIsEnabled),
cl::desc("Time each pass, printing elapsed time for each on exit"));
// createTheTimeInfo - This method either initializes the TheTimeInfo pointer to
-// a non null value (if the -time-passes option is enabled) or it leaves it
+// a non-null value (if the -time-passes option is enabled) or it leaves it
// null. It may be called multiple times.
void TimingInfo::createTheTimeInfo() {
if (!TimePassesIsEnabled || TheTimeInfo) return;
@@ -1771,7 +1792,7 @@ void TimingInfo::createTheTimeInfo() {
Timer *llvm::getPassTimer(Pass *P) {
if (TheTimeInfo)
return TheTimeInfo->getPassTimer(P);
- return 0;
+ return nullptr;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/IR/MDBuilder.cpp b/contrib/llvm/lib/IR/MDBuilder.cpp
new file mode 100644
index 0000000..65cdf38
--- /dev/null
+++ b/contrib/llvm/lib/IR/MDBuilder.cpp
@@ -0,0 +1,139 @@
+//===---- llvm/MDBuilder.cpp - Builder for LLVM metadata ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MDBuilder class, which is used as a convenient way to
+// create LLVM metadata with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Metadata.h"
+using namespace llvm;
+
+MDString *MDBuilder::createString(StringRef Str) {
+ return MDString::get(Context, Str);
+}
+
+MDNode *MDBuilder::createFPMath(float Accuracy) {
+ if (Accuracy == 0.0)
+ return nullptr;
+ assert(Accuracy > 0.0 && "Invalid fpmath accuracy!");
+ Value *Op = ConstantFP::get(Type::getFloatTy(Context), Accuracy);
+ return MDNode::get(Context, Op);
+}
+
+MDNode *MDBuilder::createBranchWeights(uint32_t TrueWeight,
+ uint32_t FalseWeight) {
+ uint32_t Weights[] = {TrueWeight, FalseWeight};
+ return createBranchWeights(Weights);
+}
+
+MDNode *MDBuilder::createBranchWeights(ArrayRef<uint32_t> Weights) {
+ assert(Weights.size() >= 2 && "Need at least two branch weights!");
+
+ SmallVector<Value *, 4> Vals(Weights.size() + 1);
+ Vals[0] = createString("branch_weights");
+
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ for (unsigned i = 0, e = Weights.size(); i != e; ++i)
+ Vals[i + 1] = ConstantInt::get(Int32Ty, Weights[i]);
+
+ return MDNode::get(Context, Vals);
+}
+
+MDNode *MDBuilder::createRange(const APInt &Lo, const APInt &Hi) {
+ assert(Lo.getBitWidth() == Hi.getBitWidth() && "Mismatched bitwidths!");
+ // If the range is everything then it is useless.
+ if (Hi == Lo)
+ return nullptr;
+
+ // Return the range [Lo, Hi).
+ Type *Ty = IntegerType::get(Context, Lo.getBitWidth());
+ Value *Range[2] = {ConstantInt::get(Ty, Lo), ConstantInt::get(Ty, Hi)};
+ return MDNode::get(Context, Range);
+}
+
+MDNode *MDBuilder::createAnonymousTBAARoot() {
+ // To ensure uniqueness the root node is self-referential.
+ MDNode *Dummy = MDNode::getTemporary(Context, ArrayRef<Value *>());
+ MDNode *Root = MDNode::get(Context, Dummy);
+ // At this point we have
+ // !0 = metadata !{} <- dummy
+ // !1 = metadata !{metadata !0} <- root
+ // Replace the dummy operand with the root node itself and delete the dummy.
+ Root->replaceOperandWith(0, Root);
+ MDNode::deleteTemporary(Dummy);
+ // We now have
+ // !1 = metadata !{metadata !1} <- self-referential root
+ return Root;
+}
+
+MDNode *MDBuilder::createTBAARoot(StringRef Name) {
+ return MDNode::get(Context, createString(Name));
+}
+
+/// \brief Return metadata for a non-root TBAA node with the given name,
+/// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
+MDNode *MDBuilder::createTBAANode(StringRef Name, MDNode *Parent,
+ bool isConstant) {
+ if (isConstant) {
+ Constant *Flags = ConstantInt::get(Type::getInt64Ty(Context), 1);
+ Value *Ops[3] = {createString(Name), Parent, Flags};
+ return MDNode::get(Context, Ops);
+ } else {
+ Value *Ops[2] = {createString(Name), Parent};
+ return MDNode::get(Context, Ops);
+ }
+}
+
+/// \brief Return metadata for a tbaa.struct node with the given
+/// struct field descriptions.
+MDNode *MDBuilder::createTBAAStructNode(ArrayRef<TBAAStructField> Fields) {
+ SmallVector<Value *, 4> Vals(Fields.size() * 3);
+ Type *Int64 = Type::getInt64Ty(Context);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Vals[i * 3 + 0] = ConstantInt::get(Int64, Fields[i].Offset);
+ Vals[i * 3 + 1] = ConstantInt::get(Int64, Fields[i].Size);
+ Vals[i * 3 + 2] = Fields[i].TBAA;
+ }
+ return MDNode::get(Context, Vals);
+}
+
+/// \brief Return metadata for a TBAA struct node in the type DAG
+/// with the given name, a list of pairs (offset, field type in the type DAG).
+MDNode *MDBuilder::createTBAAStructTypeNode(
+ StringRef Name, ArrayRef<std::pair<MDNode *, uint64_t>> Fields) {
+ SmallVector<Value *, 4> Ops(Fields.size() * 2 + 1);
+ Type *Int64 = Type::getInt64Ty(Context);
+ Ops[0] = createString(Name);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Ops[i * 2 + 1] = Fields[i].first;
+ Ops[i * 2 + 2] = ConstantInt::get(Int64, Fields[i].second);
+ }
+ return MDNode::get(Context, Ops);
+}
+
+/// \brief Return metadata for a TBAA scalar type node with the
+/// given name, an offset and a parent in the TBAA type DAG.
+MDNode *MDBuilder::createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
+ uint64_t Offset) {
+ ConstantInt *Off = ConstantInt::get(Type::getInt64Ty(Context), Offset);
+ Value *Ops[3] = {createString(Name), Parent, Off};
+ return MDNode::get(Context, Ops);
+}
+
+/// \brief Return metadata for a TBAA tag node with the given
+/// base type, access type and offset relative to the base type.
+MDNode *MDBuilder::createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
+ uint64_t Offset) {
+ Type *Int64 = Type::getInt64Ty(Context);
+ Value *Ops[3] = {BaseType, AccessType, ConstantInt::get(Int64, Offset)};
+ return MDNode::get(Context, Ops);
+}
diff --git a/contrib/llvm/lib/IR/Mangler.cpp b/contrib/llvm/lib/IR/Mangler.cpp
new file mode 100644
index 0000000..27d973b
--- /dev/null
+++ b/contrib/llvm/lib/IR/Mangler.cpp
@@ -0,0 +1,144 @@
+//===-- Mangler.cpp - Self-contained c/asm llvm name mangler --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Unified name mangler for assembly backends.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Mangler.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+static void getNameWithPrefixx(raw_ostream &OS, const Twine &GVName,
+ Mangler::ManglerPrefixTy PrefixTy,
+ const DataLayout &DL, bool UseAt) {
+ SmallString<256> TmpData;
+ StringRef Name = GVName.toStringRef(TmpData);
+ assert(!Name.empty() && "getNameWithPrefix requires non-empty name");
+
+ if (PrefixTy == Mangler::Private)
+ OS << DL.getPrivateGlobalPrefix();
+ else if (PrefixTy == Mangler::LinkerPrivate)
+ OS << DL.getLinkerPrivateGlobalPrefix();
+
+ if (UseAt) {
+ OS << '@';
+ } else {
+ char Prefix = DL.getGlobalPrefix();
+ if (Prefix != '\0')
+ OS << Prefix;
+ }
+
+ // If this is a simple string that doesn't need escaping, just append it.
+ OS << Name;
+}
+
+void Mangler::getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
+ ManglerPrefixTy PrefixTy) const {
+ return getNameWithPrefixx(OS, GVName, PrefixTy, *DL, false);
+}
+
+void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const Twine &GVName,
+ ManglerPrefixTy PrefixTy) const {
+ raw_svector_ostream OS(OutName);
+ return getNameWithPrefix(OS, GVName, PrefixTy);
+}
+
+/// AddFastCallStdCallSuffix - Microsoft fastcall and stdcall functions require
+/// a suffix on their name indicating the number of words of arguments they
+/// take.
+static void AddFastCallStdCallSuffix(raw_ostream &OS, const Function *F,
+ const DataLayout &TD) {
+ // Calculate arguments size total.
+ unsigned ArgWords = 0;
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ Type *Ty = AI->getType();
+ // 'Dereference' type in case of byval or inalloca parameter attribute.
+ if (AI->hasByValOrInAllocaAttr())
+ Ty = cast<PointerType>(Ty)->getElementType();
+ // Size should be aligned to DWORD boundary
+ ArgWords += ((TD.getTypeAllocSize(Ty) + 3)/4)*4;
+ }
+
+ OS << '@' << ArgWords;
+}
+
+void Mangler::getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const {
+ ManglerPrefixTy PrefixTy = Mangler::Default;
+ if (GV->hasPrivateLinkage()) {
+ if (CannotUsePrivateLabel)
+ PrefixTy = Mangler::LinkerPrivate;
+ else
+ PrefixTy = Mangler::Private;
+ }
+
+ if (!GV->hasName()) {
+ // Get the ID for the global, assigning a new one if we haven't got one
+ // already.
+ unsigned &ID = AnonGlobalIDs[GV];
+ if (ID == 0)
+ ID = NextAnonGlobalID++;
+
+ // Must mangle the global into a unique ID.
+ getNameWithPrefix(OS, "__unnamed_" + Twine(ID), PrefixTy);
+ return;
+ }
+
+ StringRef Name = GV->getName();
+
+ // No need to do anything special if the global has the special "do not
+ // mangle" flag in the name.
+ if (Name[0] == '\1') {
+ OS << Name.substr(1);
+ return;
+ }
+
+ bool UseAt = false;
+ const Function *MSFunc = nullptr;
+ CallingConv::ID CC;
+ if (DL->hasMicrosoftFastStdCallMangling()) {
+ if ((MSFunc = dyn_cast<Function>(GV))) {
+ CC = MSFunc->getCallingConv();
+ // fastcall functions need to start with @ instead of _.
+ if (CC == CallingConv::X86_FastCall)
+ UseAt = true;
+ }
+ }
+
+ getNameWithPrefixx(OS, Name, PrefixTy, *DL, UseAt);
+
+ if (!MSFunc)
+ return;
+
+ // If we are supposed to add a microsoft-style suffix for stdcall/fastcall,
+ // add it.
+ // fastcall and stdcall functions usually need @42 at the end to specify
+ // the argument info.
+ FunctionType *FT = MSFunc->getFunctionType();
+ if ((CC == CallingConv::X86_FastCall || CC == CallingConv::X86_StdCall) &&
+ // "Pure" variadic functions do not receive @0 suffix.
+ (!FT->isVarArg() || FT->getNumParams() == 0 ||
+ (FT->getNumParams() == 1 && MSFunc->hasStructRetAttr())))
+ AddFastCallStdCallSuffix(OS, MSFunc, *DL);
+}
+
+void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const {
+ raw_svector_ostream OS(OutName);
+ getNameWithPrefix(OS, GV, CannotUsePrivateLabel);
+}
diff --git a/contrib/llvm/lib/IR/Metadata.cpp b/contrib/llvm/lib/IR/Metadata.cpp
index a32d25c..59137e4 100644
--- a/contrib/llvm/lib/IR/Metadata.cpp
+++ b/contrib/llvm/lib/IR/Metadata.cpp
@@ -16,14 +16,15 @@
#include "SymbolTableListTraitsImpl.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/LeakDetector.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -77,8 +78,8 @@ public:
/// the list.
void setAsFirstOperand(unsigned V) { this->setValPtrInt(V); }
- virtual void deleted();
- virtual void allUsesReplacedWith(Value *NV);
+ void deleted() override;
+ void allUsesReplacedWith(Value *NV) override;
};
} // end namespace llvm.
@@ -86,7 +87,7 @@ public:
MDNodeOperand::~MDNodeOperand() {}
void MDNodeOperand::deleted() {
- getParent()->replaceOperand(this, 0);
+ getParent()->replaceOperand(this, nullptr);
}
void MDNodeOperand::allUsesReplacedWith(Value *NV) {
@@ -147,10 +148,10 @@ MDNode::~MDNode() {
}
static const Function *getFunctionForValue(Value *V) {
- if (!V) return NULL;
+ if (!V) return nullptr;
if (Instruction *I = dyn_cast<Instruction>(V)) {
BasicBlock *BB = I->getParent();
- return BB ? BB->getParent() : 0;
+ return BB ? BB->getParent() : nullptr;
}
if (Argument *A = dyn_cast<Argument>(V))
return A->getParent();
@@ -158,15 +159,15 @@ static const Function *getFunctionForValue(Value *V) {
return BB->getParent();
if (MDNode *MD = dyn_cast<MDNode>(V))
return MD->getFunction();
- return NULL;
+ return nullptr;
}
#ifndef NDEBUG
static const Function *assertLocalFunction(const MDNode *N) {
- if (!N->isFunctionLocal()) return 0;
+ if (!N->isFunctionLocal()) return nullptr;
// FIXME: This does not handle cyclic function local metadata.
- const Function *F = 0, *NewF = 0;
+ const Function *F = nullptr, *NewF = nullptr;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (Value *V = N->getOperand(i)) {
if (MDNode *MD = dyn_cast<MDNode>(V))
@@ -174,10 +175,11 @@ static const Function *assertLocalFunction(const MDNode *N) {
else
NewF = getFunctionForValue(V);
}
- if (F == 0)
+ if (!F)
F = NewF;
- else
- assert((NewF == 0 || F == NewF) &&"inconsistent function-local metadata");
+ else
+ assert((NewF == nullptr || F == NewF) &&
+ "inconsistent function-local metadata");
}
return F;
}
@@ -191,11 +193,11 @@ const Function *MDNode::getFunction() const {
#ifndef NDEBUG
return assertLocalFunction(this);
#else
- if (!isFunctionLocal()) return NULL;
+ if (!isFunctionLocal()) return nullptr;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (const Function *F = getFunctionForValue(getOperand(i)))
return F;
- return NULL;
+ return nullptr;
#endif
}
@@ -223,8 +225,8 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, ArrayRef<Value*> Vals,
// Note that if the operands are later nulled out, the node will be
// removed from the uniquing map.
FoldingSetNodeID ID;
- for (unsigned i = 0; i != Vals.size(); ++i)
- ID.AddPointer(Vals[i]);
+ for (Value *V : Vals)
+ ID.AddPointer(V);
void *InsertPoint;
MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint);
@@ -235,8 +237,7 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, ArrayRef<Value*> Vals,
bool isFunctionLocal = false;
switch (FL) {
case FL_Unknown:
- for (unsigned i = 0; i != Vals.size(); ++i) {
- Value *V = Vals[i];
+ for (Value *V : Vals) {
if (!V) continue;
if (isFunctionLocalValue(V)) {
isFunctionLocal = true;
@@ -335,14 +336,14 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
// Likewise if the MDNode is function-local but for a different function.
if (To && isFunctionLocalValue(To)) {
if (!isFunctionLocal())
- To = 0;
+ To = nullptr;
else {
const Function *F = getFunction();
const Function *FV = getFunctionForValue(To);
// Metadata can be function-local without having an associated function.
// So only consider functions to have changed if non-null.
if (F && FV && F != FV)
- To = 0;
+ To = nullptr;
}
}
@@ -366,7 +367,7 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
// anymore. This commonly occurs during destruction, and uniquing these
// brings little reuse. Also, this means we don't need to include
// isFunctionLocal bits in FoldingSetNodeIDs for MDNodes.
- if (To == 0) {
+ if (!To) {
setIsNotUniqued();
return;
}
@@ -407,7 +408,7 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) {
if (!A || !B)
- return NULL;
+ return nullptr;
APFloat AVal = cast<ConstantFP>(A->getOperand(0))->getValueAPF();
APFloat BVal = cast<ConstantFP>(B->getOperand(0))->getValueAPF();
@@ -457,7 +458,7 @@ MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
// the ones that overlap.
if (!A || !B)
- return NULL;
+ return nullptr;
if (A == B)
return A;
@@ -512,7 +513,7 @@ MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
ConstantRange Range(cast<ConstantInt>(EndPoints[0])->getValue(),
cast<ConstantInt>(EndPoints[1])->getValue());
if (Range.isFullSet())
- return NULL;
+ return nullptr;
}
return MDNode::get(A->getContext(), EndPoints);
@@ -527,7 +528,7 @@ static SmallVector<TrackingVH<MDNode>, 4> &getNMDOps(void *Operands) {
}
NamedMDNode::NamedMDNode(const Twine &N)
- : Name(N.str()), Parent(0),
+ : Name(N.str()), Parent(nullptr),
Operands(new SmallVector<TrackingVH<MDNode>, 4>()) {
}
@@ -575,7 +576,7 @@ StringRef NamedMDNode::getName() const {
//
void Instruction::setMetadata(StringRef Kind, MDNode *Node) {
- if (Node == 0 && !hasMetadata()) return;
+ if (!Node && !hasMetadata()) return;
setMetadata(getContext().getMDKindID(Kind), Node);
}
@@ -583,11 +584,55 @@ MDNode *Instruction::getMetadataImpl(StringRef Kind) const {
return getMetadataImpl(getContext().getMDKindID(Kind));
}
+void Instruction::dropUnknownMetadata(ArrayRef<unsigned> KnownIDs) {
+ SmallSet<unsigned, 5> KnownSet;
+ KnownSet.insert(KnownIDs.begin(), KnownIDs.end());
+
+ // Drop debug if needed
+ if (KnownSet.erase(LLVMContext::MD_dbg))
+ DbgLoc = DebugLoc();
+
+ if (!hasMetadataHashEntry())
+ return; // Nothing to remove!
+
+ DenseMap<const Instruction *, LLVMContextImpl::MDMapTy> &MetadataStore =
+ getContext().pImpl->MetadataStore;
+
+ if (KnownSet.empty()) {
+ // Just drop our entry at the store.
+ MetadataStore.erase(this);
+ setHasMetadataHashEntry(false);
+ return;
+ }
+
+ LLVMContextImpl::MDMapTy &Info = MetadataStore[this];
+ unsigned I;
+ unsigned E;
+ // Walk the array and drop any metadata we don't know.
+ for (I = 0, E = Info.size(); I != E;) {
+ if (KnownSet.count(Info[I].first)) {
+ ++I;
+ continue;
+ }
+
+ Info[I] = Info.back();
+ Info.pop_back();
+ --E;
+ }
+ assert(E == Info.size());
+
+ if (E == 0) {
+ // Drop our entry at the store.
+ MetadataStore.erase(this);
+ setHasMetadataHashEntry(false);
+ }
+}
+
/// setMetadata - Set the metadata of of the specified kind to the specified
/// node. This updates/replaces metadata if already present, or removes it if
/// Node is null.
void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
- if (Node == 0 && !hasMetadata()) return;
+ if (!Node && !hasMetadata()) return;
// Handle 'dbg' as a special case since it is not stored in the hash table.
if (KindID == LLVMContext::MD_dbg) {
@@ -604,9 +649,9 @@ void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
setHasMetadataHashEntry(true);
} else {
// Handle replacement of an existing value.
- for (unsigned i = 0, e = Info.size(); i != e; ++i)
- if (Info[i].first == KindID) {
- Info[i].second = Node;
+ for (auto &P : Info)
+ if (P.first == KindID) {
+ P.second = Node;
return;
}
}
@@ -618,7 +663,7 @@ void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
// Otherwise, we're removing metadata from an instruction.
assert((hasMetadataHashEntry() ==
- getContext().pImpl->MetadataStore.count(this)) &&
+ (getContext().pImpl->MetadataStore.count(this) > 0)) &&
"HasMetadata bit out of date!");
if (!hasMetadataHashEntry())
return; // Nothing to remove!
@@ -647,16 +692,15 @@ MDNode *Instruction::getMetadataImpl(unsigned KindID) const {
if (KindID == LLVMContext::MD_dbg)
return DbgLoc.getAsMDNode(getContext());
- if (!hasMetadataHashEntry()) return 0;
+ if (!hasMetadataHashEntry()) return nullptr;
LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this];
assert(!Info.empty() && "bit out of sync with hash table");
- for (LLVMContextImpl::MDMapTy::iterator I = Info.begin(), E = Info.end();
- I != E; ++I)
- if (I->first == KindID)
- return I->second;
- return 0;
+ for (const auto &I : Info)
+ if (I.first == KindID)
+ return I.second;
+ return nullptr;
}
void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,
diff --git a/contrib/llvm/lib/IR/Module.cpp b/contrib/llvm/lib/IR/Module.cpp
index 4f240c7..f1b1f9a 100644
--- a/contrib/llvm/lib/IR/Module.cpp
+++ b/contrib/llvm/lib/IR/Module.cpp
@@ -17,12 +17,15 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/GVMaterializer.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GVMaterializer.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/LeakDetector.h"
+#include "llvm/IR/LeakDetector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/RandomNumberGenerator.h"
#include <algorithm>
#include <cstdarg>
#include <cstdlib>
@@ -42,8 +45,8 @@ template class llvm::SymbolTableListTraits<GlobalAlias, Module>;
// Primitive Module methods.
//
-Module::Module(StringRef MID, LLVMContext& C)
- : Context(C), Materializer(NULL), ModuleID(MID) {
+Module::Module(StringRef MID, LLVMContext &C)
+ : Context(C), Materializer(), ModuleID(MID), RNG(nullptr), DL("") {
ValSymTab = new ValueSymbolTable();
NamedMDSymTab = new StringMap<NamedMDNode *>();
Context.addModule(this);
@@ -58,51 +61,7 @@ Module::~Module() {
NamedMDList.clear();
delete ValSymTab;
delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab);
-}
-
-/// Target endian information.
-Module::Endianness Module::getEndianness() const {
- StringRef temp = DataLayout;
- Module::Endianness ret = AnyEndianness;
-
- while (!temp.empty()) {
- std::pair<StringRef, StringRef> P = getToken(temp, "-");
-
- StringRef token = P.first;
- temp = P.second;
-
- if (token[0] == 'e') {
- ret = LittleEndian;
- } else if (token[0] == 'E') {
- ret = BigEndian;
- }
- }
-
- return ret;
-}
-
-/// Target Pointer Size information.
-Module::PointerSize Module::getPointerSize() const {
- StringRef temp = DataLayout;
- Module::PointerSize ret = AnyPointerSize;
-
- while (!temp.empty()) {
- std::pair<StringRef, StringRef> TmpP = getToken(temp, "-");
- temp = TmpP.second;
- TmpP = getToken(TmpP.first, ":");
- StringRef token = TmpP.second, signalToken = TmpP.first;
-
- if (signalToken[0] == 'p') {
- int size = 0;
- getToken(token, ":").first.getAsInteger(10, size);
- if (size == 32)
- ret = Pointer32;
- else if (size == 64)
- ret = Pointer64;
- }
- }
-
- return ret;
+ delete RNG;
}
/// getNamedValue - Return the first global value in the module with
@@ -140,7 +99,7 @@ Constant *Module::getOrInsertFunction(StringRef Name,
AttributeSet AttributeList) {
// See if we have a definition for the specified function already.
GlobalValue *F = getNamedValue(Name);
- if (F == 0) {
+ if (!F) {
// Nope, add it
Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage, Name);
if (!New->isIntrinsic()) // Intrinsics get attrs set on construction
@@ -149,16 +108,6 @@ Constant *Module::getOrInsertFunction(StringRef Name,
return New; // Return the new prototype.
}
- // Okay, the function exists. Does it have externally visible linkage?
- if (F->hasLocalLinkage()) {
- // Clear the function's name.
- F->setName("");
- // Retry, now there won't be a conflict.
- Constant *NewF = getOrInsertFunction(Name, Ty);
- F->setName(Name);
- return NewF;
- }
-
// If the function exists but has the wrong type, return a bitcast to the
// right type.
if (F->getType() != PointerType::getUnqual(Ty))
@@ -238,7 +187,7 @@ GlobalVariable *Module::getGlobalVariable(StringRef Name, bool AllowLocal) {
dyn_cast_or_null<GlobalVariable>(getNamedValue(Name)))
if (AllowLocal || !Result->hasLocalLinkage())
return Result;
- return 0;
+ return nullptr;
}
/// getOrInsertGlobal - Look up the specified global in the module symbol table.
@@ -250,11 +199,11 @@ GlobalVariable *Module::getGlobalVariable(StringRef Name, bool AllowLocal) {
Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
// See if we have a definition for the specified global already.
GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name));
- if (GV == 0) {
+ if (!GV) {
// Nope, add it
GlobalVariable *New =
new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
- 0, Name);
+ nullptr, Name);
return New; // Return the new declaration.
}
@@ -316,8 +265,7 @@ getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
const NamedMDNode *ModFlags = getModuleFlagsMetadata();
if (!ModFlags) return;
- for (unsigned i = 0, e = ModFlags->getNumOperands(); i != e; ++i) {
- MDNode *Flag = ModFlags->getOperand(i);
+ for (const MDNode *Flag : ModFlags->operands()) {
if (Flag->getNumOperands() >= 3 && isa<ConstantInt>(Flag->getOperand(0)) &&
isa<MDString>(Flag->getOperand(1))) {
// Check the operands of the MDNode before accessing the operands.
@@ -336,12 +284,11 @@ getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
Value *Module::getModuleFlag(StringRef Key) const {
SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
getModuleFlagsMetadata(ModuleFlags);
- for (unsigned I = 0, E = ModuleFlags.size(); I < E; ++I) {
- const ModuleFlagEntry &MFE = ModuleFlags[I];
+ for (const ModuleFlagEntry &MFE : ModuleFlags) {
if (Key == MFE.Key->getString())
return MFE.Val;
}
- return 0;
+ return nullptr;
}
/// getModuleFlagsMetadata - Returns the NamedMDNode in the module that
@@ -383,6 +330,44 @@ void Module::addModuleFlag(MDNode *Node) {
getOrInsertModuleFlagsMetadata()->addOperand(Node);
}
+void Module::setDataLayout(StringRef Desc) {
+ DL.reset(Desc);
+
+ if (Desc.empty()) {
+ DataLayoutStr = "";
+ } else {
+ DataLayoutStr = DL.getStringRepresentation();
+ // DataLayoutStr is now equivalent to Desc, but since the representation
+ // is not unique, they may not be identical.
+ }
+}
+
+void Module::setDataLayout(const DataLayout *Other) {
+ if (!Other) {
+ DataLayoutStr = "";
+ DL.reset("");
+ } else {
+ DL = *Other;
+ DataLayoutStr = DL.getStringRepresentation();
+ }
+}
+
+const DataLayout *Module::getDataLayout() const {
+ if (DataLayoutStr.empty())
+ return nullptr;
+ return &DL;
+}
+
+// We want reproducible builds, but ModuleID may be a full path so we just use
+// the filename to salt the RNG (although it is not guaranteed to be unique).
+RandomNumberGenerator &Module::getRNG() const {
+ if (RNG == nullptr) {
+ StringRef Salt = sys::path::filename(ModuleID);
+ RNG = new RandomNumberGenerator(Salt);
+ }
+ return *RNG;
+}
+
//===----------------------------------------------------------------------===//
// Methods to control the materialization of GlobalValues in the Module.
//
@@ -409,7 +394,7 @@ bool Module::Materialize(GlobalValue *GV, std::string *ErrInfo) {
if (!Materializer)
return false;
- error_code EC = Materializer->Materialize(GV);
+ std::error_code EC = Materializer->Materialize(GV);
if (!EC)
return false;
if (ErrInfo)
@@ -422,22 +407,21 @@ void Module::Dematerialize(GlobalValue *GV) {
return Materializer->Dematerialize(GV);
}
-bool Module::MaterializeAll(std::string *ErrInfo) {
+std::error_code Module::materializeAll() {
if (!Materializer)
- return false;
- error_code EC = Materializer->MaterializeModule(this);
- if (!EC)
- return false;
- if (ErrInfo)
- *ErrInfo = EC.message();
- return true;
+ return std::error_code();
+ return Materializer->MaterializeModule(this);
}
-bool Module::MaterializeAllPermanently(std::string *ErrInfo) {
- if (MaterializeAll(ErrInfo))
- return true;
+std::error_code Module::materializeAllPermanently(bool ReleaseBuffer) {
+ if (std::error_code EC = materializeAll())
+ return EC;
+
+ if (ReleaseBuffer)
+ Materializer->releaseBuffer();
+
Materializer.reset();
- return false;
+ return std::error_code();
}
//===----------------------------------------------------------------------===//
@@ -453,12 +437,27 @@ bool Module::MaterializeAllPermanently(std::string *ErrInfo) {
// has "dropped all references", except operator delete.
//
void Module::dropAllReferences() {
- for(Module::iterator I = begin(), E = end(); I != E; ++I)
- I->dropAllReferences();
+ for (Function &F : *this)
+ F.dropAllReferences();
+
+ for (GlobalVariable &GV : globals())
+ GV.dropAllReferences();
- for(Module::global_iterator I = global_begin(), E = global_end(); I != E; ++I)
- I->dropAllReferences();
+ for (GlobalAlias &GA : aliases())
+ GA.dropAllReferences();
+}
+
+unsigned Module::getDwarfVersion() const {
+ Value *Val = getModuleFlag("Dwarf Version");
+ if (!Val)
+ return dwarf::DWARF_VERSION;
+ return cast<ConstantInt>(Val)->getZExtValue();
+}
- for(Module::alias_iterator I = alias_begin(), E = alias_end(); I != E; ++I)
- I->dropAllReferences();
+Comdat *Module::getOrInsertComdat(StringRef Name) {
+ Comdat C;
+ StringMapEntry<Comdat> &Entry =
+ ComdatSymTab.GetOrCreateValue(Name, std::move(C));
+ Entry.second.Name = &Entry;
+ return &Entry.second;
}
diff --git a/contrib/llvm/lib/IR/Pass.cpp b/contrib/llvm/lib/IR/Pass.cpp
index 7fc4828..91d86ae 100644
--- a/contrib/llvm/lib/IR/Pass.cpp
+++ b/contrib/llvm/lib/IR/Pass.cpp
@@ -14,13 +14,16 @@
//===----------------------------------------------------------------------===//
#include "llvm/Pass.h"
-#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassNameParser.h"
#include "llvm/PassRegistry.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/PassNameParser.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "ir"
+
//===----------------------------------------------------------------------===//
// Pass Implementation
//
@@ -35,7 +38,7 @@ ModulePass::~ModulePass() { }
Pass *ModulePass::createPrinterPass(raw_ostream &O,
const std::string &Banner) const {
- return createPrintModulePass(&O, false, Banner);
+ return createPrintModulePass(O, Banner);
}
PassManagerType ModulePass::getPotentialPassManagerType() const {
@@ -43,7 +46,7 @@ PassManagerType ModulePass::getPotentialPassManagerType() const {
}
bool Pass::mustPreserveAnalysisID(char &AID) const {
- return Resolver->getAnalysisIfAvailable(&AID, true) != 0;
+ return Resolver->getAnalysisIfAvailable(&AID, true) != nullptr;
}
// dumpPassStructure - Implement the -debug-pass=Structure option
@@ -89,11 +92,11 @@ void *Pass::getAdjustedAnalysisPointer(AnalysisID AID) {
}
ImmutablePass *Pass::getAsImmutablePass() {
- return 0;
+ return nullptr;
}
PMDataManager *Pass::getAsPMDataManager() {
- return 0;
+ return nullptr;
}
void Pass::setResolver(AnalysisResolver *AR) {
@@ -111,7 +114,7 @@ void Pass::print(raw_ostream &O,const Module*) const {
// dump - call print(cerr);
void Pass::dump() const {
- print(dbgs(), 0);
+ print(dbgs(), nullptr);
}
//===----------------------------------------------------------------------===//
@@ -130,20 +133,29 @@ void ImmutablePass::initializePass() {
Pass *FunctionPass::createPrinterPass(raw_ostream &O,
const std::string &Banner) const {
- return createPrintFunctionPass(Banner, &O);
+ return createPrintFunctionPass(O, Banner);
}
PassManagerType FunctionPass::getPotentialPassManagerType() const {
return PMT_FunctionPassManager;
}
+bool FunctionPass::skipOptnoneFunction(const Function &F) const {
+ if (F.hasFnAttribute(Attribute::OptimizeNone)) {
+ DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' on function " << F.getName() << "\n");
+ return true;
+ }
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// BasicBlockPass Implementation
//
Pass *BasicBlockPass::createPrinterPass(raw_ostream &O,
const std::string &Banner) const {
- return createPrintBasicBlockPass(&O, false, Banner);
+ return createPrintBasicBlockPass(O, Banner);
}
bool BasicBlockPass::doInitialization(Function &) {
@@ -156,6 +168,18 @@ bool BasicBlockPass::doFinalization(Function &) {
return false;
}
+bool BasicBlockPass::skipOptnoneFunction(const BasicBlock &BB) const {
+ const Function *F = BB.getParent();
+ if (F && F->hasFnAttribute(Attribute::OptimizeNone)) {
+ // Report this only once per function.
+ if (&BB == &F->getEntryBlock())
+ DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' on function " << F->getName() << "\n");
+ return true;
+ }
+ return false;
+}
+
PassManagerType BasicBlockPass::getPotentialPassManagerType() const {
return PMT_BasicBlockPassManager;
}
@@ -171,18 +195,10 @@ const PassInfo *Pass::lookupPassInfo(StringRef Arg) {
Pass *Pass::createPass(AnalysisID ID) {
const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(ID);
if (!PI)
- return NULL;
+ return nullptr;
return PI->createPass();
}
-Pass *PassInfo::createPass() const {
- assert((!isAnalysisGroup() || NormalCtor) &&
- "No default implementation found for analysis group!");
- assert(NormalCtor &&
- "Cannot call createPass on PassInfo without default ctor!");
- return NormalCtor();
-}
-
//===----------------------------------------------------------------------===//
// Analysis Group Implementation Code
//===----------------------------------------------------------------------===//
@@ -200,17 +216,6 @@ RegisterAGBase::RegisterAGBase(const char *Name, const void *InterfaceID,
// PassRegistrationListener implementation
//
-// PassRegistrationListener ctor - Add the current object to the list of
-// PassRegistrationListeners...
-PassRegistrationListener::PassRegistrationListener() {
- PassRegistry::getPassRegistry()->addRegistrationListener(this);
-}
-
-// dtor - Remove object from list of listeners...
-PassRegistrationListener::~PassRegistrationListener() {
- PassRegistry::getPassRegistry()->removeRegistrationListener(this);
-}
-
// enumeratePasses - Iterate over the registered passes, calling the
// passEnumerate callback on each PassInfo object.
//
@@ -218,7 +223,16 @@ void PassRegistrationListener::enumeratePasses() {
PassRegistry::getPassRegistry()->enumerateWith(this);
}
-PassNameParser::~PassNameParser() {}
+PassNameParser::PassNameParser()
+ : Opt(nullptr) {
+ PassRegistry::getPassRegistry()->addRegistrationListener(this);
+}
+
+PassNameParser::~PassNameParser() {
+ // This only gets called during static destruction, in which case the
+ // PassRegistry will have already been destroyed by llvm_shutdown(). So
+ // attempting to remove the registration listener is an error.
+}
//===----------------------------------------------------------------------===//
// AnalysisUsage Class Implementation
@@ -230,7 +244,7 @@ namespace {
VectorType &CFGOnlyList;
GetCFGOnlyPasses(VectorType &L) : CFGOnlyList(L) {}
- void passEnumerate(const PassInfo *P) {
+ void passEnumerate(const PassInfo *P) override {
if (P->isCFGOnlyPass())
CFGOnlyList.push_back(P->getTypeInfo());
}
diff --git a/contrib/llvm/lib/IR/PassManager.cpp b/contrib/llvm/lib/IR/PassManager.cpp
index 966af7d..2e2a7cb 100644
--- a/contrib/llvm/lib/IR/PassManager.cpp
+++ b/contrib/llvm/lib/IR/PassManager.cpp
@@ -1,4 +1,4 @@
-//===- PassManager.h - Infrastructure for managing & running IR passes ----===//
+//===- PassManager.cpp - Infrastructure for managing & running IR passes --===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,147 +7,143 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/PassManager.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
using namespace llvm;
-void ModulePassManager::run() {
- for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx)
- if (Passes[Idx]->run(M))
- if (AM) AM->invalidateAll(M);
-}
+static cl::opt<bool>
+DebugPM("debug-pass-manager", cl::Hidden,
+ cl::desc("Print pass management debugging information"));
+
+PreservedAnalyses ModulePassManager::run(Module *M, ModuleAnalysisManager *AM) {
+ PreservedAnalyses PA = PreservedAnalyses::all();
+
+ if (DebugPM)
+ dbgs() << "Starting module pass manager run.\n";
+
+ for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
+ if (DebugPM)
+ dbgs() << "Running module pass: " << Passes[Idx]->name() << "\n";
+
+ PreservedAnalyses PassPA = Passes[Idx]->run(M, AM);
+ if (AM)
+ AM->invalidate(M, PassPA);
+ PA.intersect(std::move(PassPA));
+
+ M->getContext().yield();
+ }
+
+ if (DebugPM)
+ dbgs() << "Finished module pass manager run.\n";
-bool FunctionPassManager::run(Module *M) {
- bool Changed = false;
- for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
- for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx)
- if (Passes[Idx]->run(I)) {
- Changed = true;
- if (AM) AM->invalidateAll(I);
- }
- return Changed;
+ return PA;
}
-void AnalysisManager::invalidateAll(Function *F) {
- assert(F->getParent() == M && "Invalidating a function from another module!");
+ModuleAnalysisManager::ResultConceptT &
+ModuleAnalysisManager::getResultImpl(void *PassID, Module *M) {
+ ModuleAnalysisResultMapT::iterator RI;
+ bool Inserted;
+ std::tie(RI, Inserted) = ModuleAnalysisResults.insert(std::make_pair(
+ PassID, std::unique_ptr<detail::AnalysisResultConcept<Module *>>()));
- // First invalidate any module results we still have laying about.
- // FIXME: This is a total hack based on the fact that erasure doesn't
- // invalidate iteration for DenseMap.
- for (ModuleAnalysisResultMapT::iterator I = ModuleAnalysisResults.begin(),
- E = ModuleAnalysisResults.end();
- I != E; ++I)
- if (I->second->invalidate(M))
- ModuleAnalysisResults.erase(I);
+ // If we don't have a cached result for this module, look up the pass and run
+ // it to produce a result, which we then add to the cache.
+ if (Inserted)
+ RI->second = lookupPass(PassID).run(M, this);
- // Now clear all the invalidated results associated specifically with this
- // function.
- SmallVector<void *, 8> InvalidatedPassIDs;
- FunctionAnalysisResultListT &ResultsList = FunctionAnalysisResultLists[F];
- for (FunctionAnalysisResultListT::iterator I = ResultsList.begin(),
- E = ResultsList.end();
- I != E;)
- if (I->second->invalidate(F)) {
- InvalidatedPassIDs.push_back(I->first);
- I = ResultsList.erase(I);
- } else {
- ++I;
- }
- while (!InvalidatedPassIDs.empty())
- FunctionAnalysisResults.erase(
- std::make_pair(InvalidatedPassIDs.pop_back_val(), F));
+ return *RI->second;
+}
+
+ModuleAnalysisManager::ResultConceptT *
+ModuleAnalysisManager::getCachedResultImpl(void *PassID, Module *M) const {
+ ModuleAnalysisResultMapT::const_iterator RI =
+ ModuleAnalysisResults.find(PassID);
+ return RI == ModuleAnalysisResults.end() ? nullptr : &*RI->second;
}
-void AnalysisManager::invalidateAll(Module *M) {
- // First invalidate any module results we still have laying about.
+void ModuleAnalysisManager::invalidateImpl(void *PassID, Module *M) {
+ ModuleAnalysisResults.erase(PassID);
+}
+
+void ModuleAnalysisManager::invalidateImpl(Module *M,
+ const PreservedAnalyses &PA) {
// FIXME: This is a total hack based on the fact that erasure doesn't
// invalidate iteration for DenseMap.
for (ModuleAnalysisResultMapT::iterator I = ModuleAnalysisResults.begin(),
E = ModuleAnalysisResults.end();
I != E; ++I)
- if (I->second->invalidate(M))
+ if (I->second->invalidate(M, PA))
ModuleAnalysisResults.erase(I);
-
- // Now walk all of the functions for which there are cached results, and
- // attempt to invalidate each of those as the entire module may have changed.
- // FIXME: How do we handle functions which have been deleted or RAUWed?
- SmallVector<void *, 8> InvalidatedPassIDs;
- for (FunctionAnalysisResultListMapT::iterator
- FI = FunctionAnalysisResultLists.begin(),
- FE = FunctionAnalysisResultLists.end();
- FI != FE; ++FI) {
- Function *F = FI->first;
- FunctionAnalysisResultListT &ResultsList = FI->second;
- for (FunctionAnalysisResultListT::iterator I = ResultsList.begin(),
- E = ResultsList.end();
- I != E;)
- if (I->second->invalidate(F)) {
- InvalidatedPassIDs.push_back(I->first);
- I = ResultsList.erase(I);
- } else {
- ++I;
- }
- while (!InvalidatedPassIDs.empty())
- FunctionAnalysisResults.erase(
- std::make_pair(InvalidatedPassIDs.pop_back_val(), F));
- }
}
-const AnalysisManager::AnalysisResultConcept<Module> &
-AnalysisManager::getResultImpl(void *PassID, Module *M) {
- assert(M == this->M && "Wrong module used when querying the AnalysisManager");
- ModuleAnalysisResultMapT::iterator RI;
- bool Inserted;
- llvm::tie(RI, Inserted) = ModuleAnalysisResults.insert(std::make_pair(
- PassID, polymorphic_ptr<AnalysisResultConcept<Module> >()));
+PreservedAnalyses FunctionPassManager::run(Function *F,
+ FunctionAnalysisManager *AM) {
+ PreservedAnalyses PA = PreservedAnalyses::all();
- if (Inserted) {
- // We don't have a cached result for this result. Look up the pass and run
- // it to produce a result, which we then add to the cache.
- ModuleAnalysisPassMapT::const_iterator PI =
- ModuleAnalysisPasses.find(PassID);
- assert(PI != ModuleAnalysisPasses.end() &&
- "Analysis passes must be registered prior to being queried!");
- RI->second = PI->second->run(M);
+ if (DebugPM)
+ dbgs() << "Starting function pass manager run.\n";
+
+ for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
+ if (DebugPM)
+ dbgs() << "Running function pass: " << Passes[Idx]->name() << "\n";
+
+ PreservedAnalyses PassPA = Passes[Idx]->run(F, AM);
+ if (AM)
+ AM->invalidate(F, PassPA);
+ PA.intersect(std::move(PassPA));
+
+ F->getContext().yield();
}
- return *RI->second;
+ if (DebugPM)
+ dbgs() << "Finished function pass manager run.\n";
+
+ return PA;
+}
+
+bool FunctionAnalysisManager::empty() const {
+ assert(FunctionAnalysisResults.empty() ==
+ FunctionAnalysisResultLists.empty() &&
+ "The storage and index of analysis results disagree on how many there "
+ "are!");
+ return FunctionAnalysisResults.empty();
}
-const AnalysisManager::AnalysisResultConcept<Function> &
-AnalysisManager::getResultImpl(void *PassID, Function *F) {
- assert(F->getParent() == M && "Analyzing a function from another module!");
+void FunctionAnalysisManager::clear() {
+ FunctionAnalysisResults.clear();
+ FunctionAnalysisResultLists.clear();
+}
+FunctionAnalysisManager::ResultConceptT &
+FunctionAnalysisManager::getResultImpl(void *PassID, Function *F) {
FunctionAnalysisResultMapT::iterator RI;
bool Inserted;
- llvm::tie(RI, Inserted) = FunctionAnalysisResults.insert(std::make_pair(
+ std::tie(RI, Inserted) = FunctionAnalysisResults.insert(std::make_pair(
std::make_pair(PassID, F), FunctionAnalysisResultListT::iterator()));
+ // If we don't have a cached result for this function, look up the pass and
+ // run it to produce a result, which we then add to the cache.
if (Inserted) {
- // We don't have a cached result for this result. Look up the pass and run
- // it to produce a result, which we then add to the cache.
- FunctionAnalysisPassMapT::const_iterator PI =
- FunctionAnalysisPasses.find(PassID);
- assert(PI != FunctionAnalysisPasses.end() &&
- "Analysis passes must be registered prior to being queried!");
FunctionAnalysisResultListT &ResultList = FunctionAnalysisResultLists[F];
- ResultList.push_back(std::make_pair(PassID, PI->second->run(F)));
- RI->second = llvm::prior(ResultList.end());
+ ResultList.emplace_back(PassID, lookupPass(PassID).run(F, this));
+ RI->second = std::prev(ResultList.end());
}
return *RI->second->second;
}
-void AnalysisManager::invalidateImpl(void *PassID, Module *M) {
- assert(M == this->M && "Invalidating a pass over a different module!");
- ModuleAnalysisResults.erase(PassID);
+FunctionAnalysisManager::ResultConceptT *
+FunctionAnalysisManager::getCachedResultImpl(void *PassID, Function *F) const {
+ FunctionAnalysisResultMapT::const_iterator RI =
+ FunctionAnalysisResults.find(std::make_pair(PassID, F));
+ return RI == FunctionAnalysisResults.end() ? nullptr : &*RI->second->second;
}
-void AnalysisManager::invalidateImpl(void *PassID, Function *F) {
- assert(F->getParent() == M &&
- "Invalidating a pass over a function from another module!");
-
+void FunctionAnalysisManager::invalidateImpl(void *PassID, Function *F) {
FunctionAnalysisResultMapT::iterator RI =
FunctionAnalysisResults.find(std::make_pair(PassID, F));
if (RI == FunctionAnalysisResults.end())
@@ -155,3 +151,54 @@ void AnalysisManager::invalidateImpl(void *PassID, Function *F) {
FunctionAnalysisResultLists[F].erase(RI->second);
}
+
+void FunctionAnalysisManager::invalidateImpl(Function *F,
+ const PreservedAnalyses &PA) {
+ // Clear all the invalidated results associated specifically with this
+ // function.
+ SmallVector<void *, 8> InvalidatedPassIDs;
+ FunctionAnalysisResultListT &ResultsList = FunctionAnalysisResultLists[F];
+ for (FunctionAnalysisResultListT::iterator I = ResultsList.begin(),
+ E = ResultsList.end();
+ I != E;)
+ if (I->second->invalidate(F, PA)) {
+ InvalidatedPassIDs.push_back(I->first);
+ I = ResultsList.erase(I);
+ } else {
+ ++I;
+ }
+ while (!InvalidatedPassIDs.empty())
+ FunctionAnalysisResults.erase(
+ std::make_pair(InvalidatedPassIDs.pop_back_val(), F));
+ if (ResultsList.empty())
+ FunctionAnalysisResultLists.erase(F);
+}
+
+char FunctionAnalysisManagerModuleProxy::PassID;
+
+FunctionAnalysisManagerModuleProxy::Result
+FunctionAnalysisManagerModuleProxy::run(Module *M) {
+ assert(FAM->empty() && "Function analyses ran prior to the module proxy!");
+ return Result(*FAM);
+}
+
+FunctionAnalysisManagerModuleProxy::Result::~Result() {
+ // Clear out the analysis manager if we're being destroyed -- it means we
+ // didn't even see an invalidate call when we got invalidated.
+ FAM->clear();
+}
+
+bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
+ Module *M, const PreservedAnalyses &PA) {
+ // If this proxy isn't marked as preserved, then we can't even invalidate
+ // individual function analyses, there may be an invalid set of Function
+ // objects in the cache making it impossible to incrementally preserve them.
+ // Just clear the entire manager.
+ if (!PA.preserved(ID()))
+ FAM->clear();
+
+ // Return false to indicate that this result is still a valid proxy.
+ return false;
+}
+
+char ModuleAnalysisManagerFunctionProxy::PassID;
diff --git a/contrib/llvm/lib/IR/PassRegistry.cpp b/contrib/llvm/lib/IR/PassRegistry.cpp
index d3b2f1f..91940a9 100644
--- a/contrib/llvm/lib/IR/PassRegistry.cpp
+++ b/contrib/llvm/lib/IR/PassRegistry.cpp
@@ -13,14 +13,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/PassRegistry.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/IR/Function.h"
#include "llvm/PassSupport.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/Mutex.h"
#include "llvm/Support/RWMutex.h"
#include <vector>
@@ -36,67 +32,23 @@ PassRegistry *PassRegistry::getPassRegistry() {
return &*PassRegistryObj;
}
-static ManagedStatic<sys::SmartRWMutex<true> > Lock;
-
-//===----------------------------------------------------------------------===//
-// PassRegistryImpl
-//
-
-namespace {
-struct PassRegistryImpl {
- /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
- typedef DenseMap<const void*, const PassInfo*> MapType;
- MapType PassInfoMap;
-
- typedef StringMap<const PassInfo*> StringMapType;
- StringMapType PassInfoStringMap;
-
- /// AnalysisGroupInfo - Keep track of information for each analysis group.
- struct AnalysisGroupInfo {
- SmallPtrSet<const PassInfo *, 8> Implementations;
- };
- DenseMap<const PassInfo*, AnalysisGroupInfo> AnalysisGroupInfoMap;
-
- std::vector<const PassInfo*> ToFree;
- std::vector<PassRegistrationListener*> Listeners;
-};
-} // end anonymous namespace
-
-void *PassRegistry::getImpl() const {
- if (!pImpl)
- pImpl = new PassRegistryImpl();
- return pImpl;
-}
-
//===----------------------------------------------------------------------===//
// Accessors
//
PassRegistry::~PassRegistry() {
- sys::SmartScopedWriter<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(pImpl);
-
- for (std::vector<const PassInfo*>::iterator I = Impl->ToFree.begin(),
- E = Impl->ToFree.end(); I != E; ++I)
- delete *I;
-
- delete Impl;
- pImpl = 0;
}
const PassInfo *PassRegistry::getPassInfo(const void *TI) const {
- sys::SmartScopedReader<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.find(TI);
- return I != Impl->PassInfoMap.end() ? I->second : 0;
+ sys::SmartScopedReader<true> Guard(Lock);
+ MapType::const_iterator I = PassInfoMap.find(TI);
+ return I != PassInfoMap.end() ? I->second : nullptr;
}
const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
- sys::SmartScopedReader<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- PassRegistryImpl::StringMapType::const_iterator
- I = Impl->PassInfoStringMap.find(Arg);
- return I != Impl->PassInfoStringMap.end() ? I->second : 0;
+ sys::SmartScopedReader<true> Guard(Lock);
+ StringMapType::const_iterator I = PassInfoStringMap.find(Arg);
+ return I != PassInfoStringMap.end() ? I->second : nullptr;
}
//===----------------------------------------------------------------------===//
@@ -104,39 +56,34 @@ const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
//
void PassRegistry::registerPass(const PassInfo &PI, bool ShouldFree) {
- sys::SmartScopedWriter<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ sys::SmartScopedWriter<true> Guard(Lock);
bool Inserted =
- Impl->PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
+ PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
assert(Inserted && "Pass registered multiple times!");
(void)Inserted;
- Impl->PassInfoStringMap[PI.getPassArgument()] = &PI;
+ PassInfoStringMap[PI.getPassArgument()] = &PI;
// Notify any listeners.
for (std::vector<PassRegistrationListener*>::iterator
- I = Impl->Listeners.begin(), E = Impl->Listeners.end(); I != E; ++I)
+ I = Listeners.begin(), E = Listeners.end(); I != E; ++I)
(*I)->passRegistered(&PI);
- if (ShouldFree) Impl->ToFree.push_back(&PI);
+ if (ShouldFree) ToFree.push_back(std::unique_ptr<const PassInfo>(&PI));
}
void PassRegistry::unregisterPass(const PassInfo &PI) {
- sys::SmartScopedWriter<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- PassRegistryImpl::MapType::iterator I =
- Impl->PassInfoMap.find(PI.getTypeInfo());
- assert(I != Impl->PassInfoMap.end() && "Pass registered but not in map!");
+ sys::SmartScopedWriter<true> Guard(Lock);
+ MapType::iterator I = PassInfoMap.find(PI.getTypeInfo());
+ assert(I != PassInfoMap.end() && "Pass registered but not in map!");
// Remove pass from the map.
- Impl->PassInfoMap.erase(I);
- Impl->PassInfoStringMap.erase(PI.getPassArgument());
+ PassInfoMap.erase(I);
+ PassInfoStringMap.erase(PI.getPassArgument());
}
void PassRegistry::enumerateWith(PassRegistrationListener *L) {
- sys::SmartScopedReader<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- for (PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.begin(),
- E = Impl->PassInfoMap.end(); I != E; ++I)
+ sys::SmartScopedReader<true> Guard(Lock);
+ for (auto I = PassInfoMap.begin(), E = PassInfoMap.end(); I != E; ++I)
L->passEnumerate(I->second);
}
@@ -148,7 +95,7 @@ void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
bool isDefault,
bool ShouldFree) {
PassInfo *InterfaceInfo = const_cast<PassInfo*>(getPassInfo(InterfaceID));
- if (InterfaceInfo == 0) {
+ if (!InterfaceInfo) {
// First reference to Interface, register it now.
registerPass(Registeree);
InterfaceInfo = &Registeree;
@@ -161,50 +108,39 @@ void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
assert(ImplementationInfo &&
"Must register pass before adding to AnalysisGroup!");
- sys::SmartScopedWriter<true> Guard(*Lock);
+ sys::SmartScopedWriter<true> Guard(Lock);
// Make sure we keep track of the fact that the implementation implements
// the interface.
ImplementationInfo->addInterfaceImplemented(InterfaceInfo);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- PassRegistryImpl::AnalysisGroupInfo &AGI =
- Impl->AnalysisGroupInfoMap[InterfaceInfo];
+ AnalysisGroupInfo &AGI = AnalysisGroupInfoMap[InterfaceInfo];
assert(AGI.Implementations.count(ImplementationInfo) == 0 &&
"Cannot add a pass to the same analysis group more than once!");
AGI.Implementations.insert(ImplementationInfo);
if (isDefault) {
- assert(InterfaceInfo->getNormalCtor() == 0 &&
+ assert(InterfaceInfo->getNormalCtor() == nullptr &&
"Default implementation for analysis group already specified!");
assert(ImplementationInfo->getNormalCtor() &&
"Cannot specify pass as default if it does not have a default ctor");
InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
+ InterfaceInfo->setTargetMachineCtor(
+ ImplementationInfo->getTargetMachineCtor());
}
}
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- if (ShouldFree) Impl->ToFree.push_back(&Registeree);
+ if (ShouldFree)
+ ToFree.push_back(std::unique_ptr<const PassInfo>(&Registeree));
}
void PassRegistry::addRegistrationListener(PassRegistrationListener *L) {
- sys::SmartScopedWriter<true> Guard(*Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- Impl->Listeners.push_back(L);
+ sys::SmartScopedWriter<true> Guard(Lock);
+ Listeners.push_back(L);
}
void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) {
- sys::SmartScopedWriter<true> Guard(*Lock);
-
- // NOTE: This is necessary, because removeRegistrationListener() can be called
- // as part of the llvm_shutdown sequence. Since we have no control over the
- // order of that sequence, we need to gracefully handle the case where the
- // PassRegistry is destructed before the object that triggers this call.
- if (!pImpl) return;
+ sys::SmartScopedWriter<true> Guard(Lock);
- PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
- std::vector<PassRegistrationListener*>::iterator I =
- std::find(Impl->Listeners.begin(), Impl->Listeners.end(), L);
- assert(I != Impl->Listeners.end() &&
- "PassRegistrationListener not registered!");
- Impl->Listeners.erase(I);
+ auto I = std::find(Listeners.begin(), Listeners.end(), L);
+ Listeners.erase(I);
}
diff --git a/contrib/llvm/lib/IR/PrintModulePass.cpp b/contrib/llvm/lib/IR/PrintModulePass.cpp
deleted file mode 100644
index 5026bc2..0000000
--- a/contrib/llvm/lib/IR/PrintModulePass.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-//===--- IR/PrintModulePass.cpp - Module/Function Printer -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// PrintModulePass and PrintFunctionPass implementations.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
-
- class PrintModulePass : public ModulePass {
- std::string Banner;
- raw_ostream *Out; // raw_ostream to print on
- bool DeleteStream; // Delete the ostream in our dtor?
- public:
- static char ID;
- PrintModulePass() : ModulePass(ID), Out(&dbgs()),
- DeleteStream(false) {}
- PrintModulePass(const std::string &B, raw_ostream *o, bool DS)
- : ModulePass(ID), Banner(B), Out(o), DeleteStream(DS) {}
-
- ~PrintModulePass() {
- if (DeleteStream) delete Out;
- }
-
- bool runOnModule(Module &M) {
- (*Out) << Banner << M;
- return false;
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- }
- };
-
- class PrintFunctionPass : public FunctionPass {
- std::string Banner; // String to print before each function
- raw_ostream *Out; // raw_ostream to print on
- bool DeleteStream; // Delete the ostream in our dtor?
- public:
- static char ID;
- PrintFunctionPass() : FunctionPass(ID), Banner(""), Out(&dbgs()),
- DeleteStream(false) {}
- PrintFunctionPass(const std::string &B, raw_ostream *o, bool DS)
- : FunctionPass(ID), Banner(B), Out(o), DeleteStream(DS) {}
-
- ~PrintFunctionPass() {
- if (DeleteStream) delete Out;
- }
-
- // runOnFunction - This pass just prints a banner followed by the
- // function as it's processed.
- //
- bool runOnFunction(Function &F) {
- (*Out) << Banner << static_cast<Value&>(F);
- return false;
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- }
- };
-
- class PrintBasicBlockPass : public BasicBlockPass {
- std::string Banner;
- raw_ostream *Out; // raw_ostream to print on
- bool DeleteStream; // Delete the ostream in our dtor?
- public:
- static char ID;
- PrintBasicBlockPass() : BasicBlockPass(ID), Out(&dbgs()),
- DeleteStream(false) {}
- PrintBasicBlockPass(const std::string &B, raw_ostream *o, bool DS)
- : BasicBlockPass(ID), Banner(B), Out(o), DeleteStream(DS) {}
-
- ~PrintBasicBlockPass() {
- if (DeleteStream) delete Out;
- }
-
- bool runOnBasicBlock(BasicBlock &BB) {
- (*Out) << Banner << BB;
- return false;
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- }
- };
-}
-
-char PrintModulePass::ID = 0;
-INITIALIZE_PASS(PrintModulePass, "print-module",
- "Print module to stderr", false, false)
-char PrintFunctionPass::ID = 0;
-INITIALIZE_PASS(PrintFunctionPass, "print-function",
- "Print function to stderr", false, false)
-char PrintBasicBlockPass::ID = 0;
-INITIALIZE_PASS(PrintBasicBlockPass, "print-bb",
- "Print BB to stderr", false, false)
-
-/// createPrintModulePass - Create and return a pass that writes the
-/// module to the specified raw_ostream.
-ModulePass *llvm::createPrintModulePass(llvm::raw_ostream *OS,
- bool DeleteStream,
- const std::string &Banner) {
- return new PrintModulePass(Banner, OS, DeleteStream);
-}
-
-/// createPrintFunctionPass - Create and return a pass that prints
-/// functions to the specified raw_ostream as they are processed.
-FunctionPass *llvm::createPrintFunctionPass(const std::string &Banner,
- llvm::raw_ostream *OS,
- bool DeleteStream) {
- return new PrintFunctionPass(Banner, OS, DeleteStream);
-}
-
-/// createPrintBasicBlockPass - Create and return a pass that writes the
-/// BB to the specified raw_ostream.
-BasicBlockPass *llvm::createPrintBasicBlockPass(llvm::raw_ostream *OS,
- bool DeleteStream,
- const std::string &Banner) {
- return new PrintBasicBlockPass(Banner, OS, DeleteStream);
-}
-
diff --git a/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h b/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h
index 5a383ee..8302597 100644
--- a/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h
+++ b/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h
@@ -65,7 +65,7 @@ void SymbolTableListTraits<ValueSubClass,ItemParentClass>
template<typename ValueSubClass, typename ItemParentClass>
void SymbolTableListTraits<ValueSubClass,ItemParentClass>
::addNodeToList(ValueSubClass *V) {
- assert(V->getParent() == 0 && "Value already in a container!!");
+ assert(!V->getParent() && "Value already in a container!!");
ItemParentClass *Owner = getListOwner();
V->setParent(Owner);
if (V->hasName())
@@ -76,7 +76,7 @@ void SymbolTableListTraits<ValueSubClass,ItemParentClass>
template<typename ValueSubClass, typename ItemParentClass>
void SymbolTableListTraits<ValueSubClass,ItemParentClass>
::removeNodeFromList(ValueSubClass *V) {
- V->setParent(0);
+ V->setParent(nullptr);
if (V->hasName())
if (ValueSymbolTable *ST = TraitsClass::getSymTab(getListOwner()))
ST->removeValueName(V->getValueName());
diff --git a/contrib/llvm/lib/IR/Type.cpp b/contrib/llvm/lib/IR/Type.cpp
index 432cbc9..1efde47 100644
--- a/contrib/llvm/lib/IR/Type.cpp
+++ b/contrib/llvm/lib/IR/Type.cpp
@@ -36,7 +36,7 @@ Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
case MetadataTyID : return getMetadataTy(C);
case X86_MMXTyID : return getX86_MMXTy(C);
default:
- return 0;
+ return nullptr;
}
}
@@ -132,7 +132,7 @@ unsigned Type::getPrimitiveSizeInBits() const {
/// getScalarSizeInBits - If this is a vector type, return the
/// getPrimitiveSizeInBits value for the element type. Otherwise return the
/// getPrimitiveSizeInBits value for this type.
-unsigned Type::getScalarSizeInBits() {
+unsigned Type::getScalarSizeInBits() const {
return getScalarType()->getPrimitiveSizeInBits();
}
@@ -155,20 +155,14 @@ int Type::getFPMantissaWidth() const {
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
-bool Type::isSizedDerivedType() const {
- if (this->isIntegerTy())
- return true;
-
+bool Type::isSizedDerivedType(SmallPtrSet<const Type*, 4> *Visited) const {
if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
- return ATy->getElementType()->isSized();
+ return ATy->getElementType()->isSized(Visited);
if (const VectorType *VTy = dyn_cast<VectorType>(this))
- return VTy->getElementType()->isSized();
-
- if (!this->isStructTy())
- return false;
+ return VTy->getElementType()->isSized(Visited);
- return cast<StructType>(this)->isSized();
+ return cast<StructType>(this)->isSized(Visited);
}
//===----------------------------------------------------------------------===//
@@ -318,8 +312,8 @@ IntegerType *IntegerType::get(LLVMContext &C, unsigned NumBits) {
}
IntegerType *&Entry = C.pImpl->IntegerTypes[NumBits];
-
- if (Entry == 0)
+
+ if (!Entry)
Entry = new (C.pImpl->TypeAllocator) IntegerType(C, NumBits);
return Entry;
@@ -454,7 +448,7 @@ void StructType::setName(StringRef Name) {
if (SymbolTableEntry) {
// Delete the old string data.
((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
- SymbolTableEntry = 0;
+ SymbolTableEntry = nullptr;
}
return;
}
@@ -503,7 +497,7 @@ StructType *StructType::get(LLVMContext &Context, bool isPacked) {
}
StructType *StructType::get(Type *type, ...) {
- assert(type != 0 && "Cannot create a struct type with no elements with this");
+ assert(type && "Cannot create a struct type with no elements with this");
LLVMContext &Ctx = type->getContext();
va_list ap;
SmallVector<llvm::Type*, 8> StructFields;
@@ -544,7 +538,7 @@ StructType *StructType::create(ArrayRef<Type*> Elements) {
}
StructType *StructType::create(StringRef Name, Type *type, ...) {
- assert(type != 0 && "Cannot create a struct type with no elements with this");
+ assert(type && "Cannot create a struct type with no elements with this");
LLVMContext &Ctx = type->getContext();
va_list ap;
SmallVector<llvm::Type*, 8> StructFields;
@@ -556,17 +550,20 @@ StructType *StructType::create(StringRef Name, Type *type, ...) {
return llvm::StructType::create(Ctx, StructFields, Name);
}
-bool StructType::isSized() const {
+bool StructType::isSized(SmallPtrSet<const Type*, 4> *Visited) const {
if ((getSubclassData() & SCDB_IsSized) != 0)
return true;
if (isOpaque())
return false;
+ if (Visited && !Visited->insert(this))
+ return false;
+
// Okay, our struct is sized if all of the elements are, but if one of the
// elements is opaque, the struct isn't sized *yet*, but may become sized in
// the future, so just bail out without caching.
for (element_iterator I = element_begin(), E = element_end(); I != E; ++I)
- if (!(*I)->isSized())
+ if (!(*I)->isSized(Visited))
return false;
// Here we cheat a bit and cast away const-ness. The goal is to memoize when
@@ -579,13 +576,13 @@ bool StructType::isSized() const {
StringRef StructType::getName() const {
assert(!isLiteral() && "Literal structs never have names");
- if (SymbolTableEntry == 0) return StringRef();
-
+ if (!SymbolTableEntry) return StringRef();
+
return ((StringMapEntry<StructType*> *)SymbolTableEntry)->getKey();
}
void StructType::setBody(Type *type, ...) {
- assert(type != 0 && "Cannot create a struct type with no elements with this");
+ assert(type && "Cannot create a struct type with no elements with this");
va_list ap;
SmallVector<llvm::Type*, 8> StructFields;
va_start(ap, type);
@@ -683,8 +680,8 @@ ArrayType *ArrayType::get(Type *elementType, uint64_t NumElements) {
LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
ArrayType *&Entry =
pImpl->ArrayTypes[std::make_pair(ElementType, NumElements)];
-
- if (Entry == 0)
+
+ if (!Entry)
Entry = new (pImpl->TypeAllocator) ArrayType(ElementType, NumElements);
return Entry;
}
@@ -712,8 +709,8 @@ VectorType *VectorType::get(Type *elementType, unsigned NumElements) {
LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
VectorType *&Entry = ElementType->getContext().pImpl
->VectorTypes[std::make_pair(ElementType, NumElements)];
-
- if (Entry == 0)
+
+ if (!Entry)
Entry = new (pImpl->TypeAllocator) VectorType(ElementType, NumElements);
return Entry;
}
@@ -737,7 +734,7 @@ PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
PointerType *&Entry = AddressSpace == 0 ? CImpl->PointerTypes[EltTy]
: CImpl->ASPointerTypes[std::make_pair(EltTy, AddressSpace)];
- if (Entry == 0)
+ if (!Entry)
Entry = new (CImpl->TypeAllocator) PointerType(EltTy, AddressSpace);
return Entry;
}
diff --git a/contrib/llvm/lib/IR/Use.cpp b/contrib/llvm/lib/IR/Use.cpp
index 1d343e8..047861c 100644
--- a/contrib/llvm/lib/IR/Use.cpp
+++ b/contrib/llvm/lib/IR/Use.cpp
@@ -6,111 +6,76 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file implements the algorithm for finding the User of a Use.
-//
-//===----------------------------------------------------------------------===//
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include <new>
namespace llvm {
-//===----------------------------------------------------------------------===//
-// Use swap Implementation
-//===----------------------------------------------------------------------===//
-
void Use::swap(Use &RHS) {
- Value *V1(Val);
- Value *V2(RHS.Val);
- if (V1 != V2) {
- if (V1) {
- removeFromList();
- }
-
- if (V2) {
- RHS.removeFromList();
- Val = V2;
- V2->addUse(*this);
- } else {
- Val = 0;
- }
+ if (Val == RHS.Val)
+ return;
+
+ if (Val)
+ removeFromList();
+
+ Value *OldVal = Val;
+ if (RHS.Val) {
+ RHS.removeFromList();
+ Val = RHS.Val;
+ Val->addUse(*this);
+ } else {
+ Val = nullptr;
+ }
- if (V1) {
- RHS.Val = V1;
- V1->addUse(RHS);
- } else {
- RHS.Val = 0;
- }
+ if (OldVal) {
+ RHS.Val = OldVal;
+ RHS.Val->addUse(RHS);
+ } else {
+ RHS.Val = nullptr;
}
}
-//===----------------------------------------------------------------------===//
-// Use getImpliedUser Implementation
-//===----------------------------------------------------------------------===//
-
-const Use *Use::getImpliedUser() const {
- const Use *Current = this;
-
- while (true) {
- unsigned Tag = (Current++)->Prev.getInt();
- switch (Tag) {
- case zeroDigitTag:
- case oneDigitTag:
- continue;
-
- case stopTag: {
- ++Current;
- ptrdiff_t Offset = 1;
- while (true) {
- unsigned Tag = Current->Prev.getInt();
- switch (Tag) {
- case zeroDigitTag:
- case oneDigitTag:
- ++Current;
- Offset = (Offset << 1) + Tag;
- continue;
- default:
- return Current + Offset;
- }
- }
- }
-
- case fullStopTag:
- return Current;
- }
- }
+User *Use::getUser() const {
+ const Use *End = getImpliedUser();
+ const UserRef *ref = reinterpret_cast<const UserRef *>(End);
+ return ref->getInt() ? ref->getPointer()
+ : reinterpret_cast<User *>(const_cast<Use *>(End));
}
-//===----------------------------------------------------------------------===//
-// Use initTags Implementation
-//===----------------------------------------------------------------------===//
+unsigned Use::getOperandNo() const {
+ return this - getUser()->op_begin();
+}
-Use *Use::initTags(Use * const Start, Use *Stop) {
+// Sets up the waymarking algorithm's tags for a series of Uses. See the
+// algorithm details here:
+//
+// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
+//
+Use *Use::initTags(Use *const Start, Use *Stop) {
ptrdiff_t Done = 0;
while (Done < 20) {
if (Start == Stop--)
return Start;
- static const PrevPtrTag tags[20] = { fullStopTag, oneDigitTag, stopTag,
- oneDigitTag, oneDigitTag, stopTag,
- zeroDigitTag, oneDigitTag, oneDigitTag,
- stopTag, zeroDigitTag, oneDigitTag,
- zeroDigitTag, oneDigitTag, stopTag,
- oneDigitTag, oneDigitTag, oneDigitTag,
- oneDigitTag, stopTag
- };
- new(Stop) Use(tags[Done++]);
+ static const PrevPtrTag tags[20] = {
+ fullStopTag, oneDigitTag, stopTag, oneDigitTag, oneDigitTag,
+ stopTag, zeroDigitTag, oneDigitTag, oneDigitTag, stopTag,
+ zeroDigitTag, oneDigitTag, zeroDigitTag, oneDigitTag, stopTag,
+ oneDigitTag, oneDigitTag, oneDigitTag, oneDigitTag, stopTag};
+ new (Stop) Use(tags[Done++]);
}
ptrdiff_t Count = Done;
while (Start != Stop) {
--Stop;
if (!Count) {
- new(Stop) Use(stopTag);
+ new (Stop) Use(stopTag);
++Done;
Count = Done;
} else {
- new(Stop) Use(PrevPtrTag(Count & 1));
+ new (Stop) Use(PrevPtrTag(Count & 1));
Count >>= 1;
++Done;
}
@@ -119,10 +84,6 @@ Use *Use::initTags(Use * const Start, Use *Stop) {
return Start;
}
-//===----------------------------------------------------------------------===//
-// Use zap Implementation
-//===----------------------------------------------------------------------===//
-
void Use::zap(Use *Start, const Use *Stop, bool del) {
while (Start != Stop)
(--Stop)->~Use();
@@ -130,16 +91,37 @@ void Use::zap(Use *Start, const Use *Stop, bool del) {
::operator delete(Start);
}
-//===----------------------------------------------------------------------===//
-// Use getUser Implementation
-//===----------------------------------------------------------------------===//
+const Use *Use::getImpliedUser() const {
+ const Use *Current = this;
-User *Use::getUser() const {
- const Use *End = getImpliedUser();
- const UserRef *ref = reinterpret_cast<const UserRef*>(End);
- return ref->getInt()
- ? ref->getPointer()
- : reinterpret_cast<User*>(const_cast<Use*>(End));
+ while (true) {
+ unsigned Tag = (Current++)->Prev.getInt();
+ switch (Tag) {
+ case zeroDigitTag:
+ case oneDigitTag:
+ continue;
+
+ case stopTag: {
+ ++Current;
+ ptrdiff_t Offset = 1;
+ while (true) {
+ unsigned Tag = Current->Prev.getInt();
+ switch (Tag) {
+ case zeroDigitTag:
+ case oneDigitTag:
+ ++Current;
+ Offset = (Offset << 1) + Tag;
+ continue;
+ default:
+ return Current + Offset;
+ }
+ }
+ }
+
+ case fullStopTag:
+ return Current;
+ }
+ }
}
} // End llvm namespace
diff --git a/contrib/llvm/lib/IR/Value.cpp b/contrib/llvm/lib/IR/Value.cpp
index 62a3b31..1ab2183 100644
--- a/contrib/llvm/lib/IR/Value.cpp
+++ b/contrib/llvm/lib/IR/Value.cpp
@@ -15,20 +15,22 @@
#include "LLVMContextImpl.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LeakDetector.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/ValueHandle.h"
#include <algorithm>
using namespace llvm;
@@ -38,13 +40,12 @@ using namespace llvm;
static inline Type *checkType(Type *Ty) {
assert(Ty && "Value defined with a null type: Error!");
- return const_cast<Type*>(Ty);
+ return Ty;
}
Value::Value(Type *ty, unsigned scid)
- : SubclassID(scid), HasValueHandle(0),
- SubclassOptionalData(0), SubclassData(0), VTy((Type*)checkType(ty)),
- UseList(0), Name(0) {
+ : VTy(checkType(ty)), UseList(nullptr), Name(nullptr), SubclassID(scid),
+ HasValueHandle(0), SubclassOptionalData(0), SubclassData(0) {
// FIXME: Why isn't this in the subclass gunk??
// Note, we cannot call isa<CallInst> before the CallInst has been
// constructed.
@@ -119,7 +120,7 @@ bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
// Scan both lists simultaneously until one is exhausted. This limits the
// search to the shorter list.
BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
- const_use_iterator UI = use_begin(), UE = use_end();
+ const_user_iterator UI = user_begin(), UE = user_end();
for (; BI != BE && UI != UE; ++BI, ++UI) {
// Scan basic block: Check if this Value is used by the instruction at BI.
if (std::find(BI->op_begin(), BI->op_end(), this) != BI->op_end())
@@ -141,7 +142,7 @@ unsigned Value::getNumUses() const {
}
static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
- ST = 0;
+ ST = nullptr;
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (BasicBlock *P = I->getParent())
if (Function *PP = P->getParent())
@@ -182,6 +183,8 @@ void Value::setName(const Twine &NewName) {
SmallString<256> NameData;
StringRef NameRef = NewName.toStringRef(NameData);
+ assert(NameRef.find_first_of(0) == StringRef::npos &&
+ "Null bytes are not allowed in names");
// Name isn't changing?
if (getName() == NameRef)
@@ -201,7 +204,7 @@ void Value::setName(const Twine &NewName) {
if (NameRef.empty()) {
// Free the name for this value.
Name->Destroy();
- Name = 0;
+ Name = nullptr;
return;
}
@@ -212,7 +215,7 @@ void Value::setName(const Twine &NewName) {
// then reallocated.
// Create the new name.
- Name = ValueName::Create(NameRef.begin(), NameRef.end());
+ Name = ValueName::Create(NameRef);
Name->setValue(this);
return;
}
@@ -223,7 +226,7 @@ void Value::setName(const Twine &NewName) {
// Remove old name.
ST->removeValueName(Name);
Name->Destroy();
- Name = 0;
+ Name = nullptr;
if (NameRef.empty())
return;
@@ -239,7 +242,7 @@ void Value::setName(const Twine &NewName) {
void Value::takeName(Value *V) {
assert(SubclassID != MDStringVal && "Cannot take the name of an MDString!");
- ValueSymbolTable *ST = 0;
+ ValueSymbolTable *ST = nullptr;
// If this value has a name, drop it.
if (hasName()) {
// Get the symtab this is in.
@@ -254,7 +257,7 @@ void Value::takeName(Value *V) {
if (ST)
ST->removeValueName(Name);
Name->Destroy();
- Name = 0;
+ Name = nullptr;
}
// Now we know that this has no name.
@@ -281,7 +284,7 @@ void Value::takeName(Value *V) {
if (ST == VST) {
// Take the name!
Name = V->Name;
- V->Name = 0;
+ V->Name = nullptr;
Name->setValue(this);
return;
}
@@ -292,17 +295,52 @@ void Value::takeName(Value *V) {
if (VST)
VST->removeValueName(V->Name);
Name = V->Name;
- V->Name = 0;
+ V->Name = nullptr;
Name->setValue(this);
if (ST)
ST->reinsertValue(this);
}
+#ifndef NDEBUG
+static bool contains(SmallPtrSet<ConstantExpr *, 4> &Cache, ConstantExpr *Expr,
+ Constant *C) {
+ if (!Cache.insert(Expr))
+ return false;
+
+ for (auto &O : Expr->operands()) {
+ if (O == C)
+ return true;
+ auto *CE = dyn_cast<ConstantExpr>(O);
+ if (!CE)
+ continue;
+ if (contains(Cache, CE, C))
+ return true;
+ }
+ return false;
+}
+
+static bool contains(Value *Expr, Value *V) {
+ if (Expr == V)
+ return true;
+
+ auto *C = dyn_cast<Constant>(V);
+ if (!C)
+ return false;
+
+ auto *CE = dyn_cast<ConstantExpr>(Expr);
+ if (!CE)
+ return false;
+
+ SmallPtrSet<ConstantExpr *, 4> Cache;
+ return contains(Cache, CE, C);
+}
+#endif
void Value::replaceAllUsesWith(Value *New) {
assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
- assert(New != this && "this->replaceAllUsesWith(this) is NOT valid!");
+ assert(!contains(New, this) &&
+ "this->replaceAllUsesWith(expr(this)) is NOT valid!");
assert(New->getType() == getType() &&
"replaceAllUses of value with new value of different type!");
@@ -314,7 +352,7 @@ void Value::replaceAllUsesWith(Value *New) {
Use &U = *UseList;
// Must handle Constants specially, we cannot call replaceUsesOfWith on a
// constant because they are uniqued.
- if (Constant *C = dyn_cast<Constant>(U.getUser())) {
+ if (auto *C = dyn_cast<Constant>(U.getUser())) {
if (!isa<GlobalValue>(C)) {
C->replaceUsesOfWithOnConstant(this, New, &U);
continue;
@@ -417,7 +455,8 @@ Value *Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
return V;
Offset = GEPOffset;
V = GEP->getPointerOperand();
- } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ } else if (Operator::getOpcode(V) == Instruction::BitCast ||
+ Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
V = cast<Operator>(V)->getOperand(0);
} else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
V = GA->getAliasee();
@@ -436,32 +475,67 @@ Value *Value::stripInBoundsOffsets() {
/// isDereferenceablePointer - Test if this value is always a pointer to
/// allocated and suitably aligned memory for a simple load or store.
-static bool isDereferenceablePointer(const Value *V,
+static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
SmallPtrSet<const Value *, 32> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
- // It's also not always safe to follow a bitcast, for example:
- // bitcast i8* (alloca i8) to i32*
- // would result in a 4-byte load from a 1-byte alloca. Some cases could
- // be handled using DataLayout to check sizes and alignments though.
// These are obviously ok.
if (isa<AllocaInst>(V)) return true;
+ // It's not always safe to follow a bitcast, for example:
+ // bitcast i8* (alloca i8) to i32*
+ // would result in a 4-byte load from a 1-byte alloca. However,
+ // if we're casting from a pointer from a type of larger size
+ // to a type of smaller size (or the same size), and the alignment
+ // is at least as large as for the resulting pointer type, then
+ // we can look through the bitcast.
+ if (DL)
+ if (const BitCastInst* BC = dyn_cast<BitCastInst>(V)) {
+ Type *STy = BC->getSrcTy()->getPointerElementType(),
+ *DTy = BC->getDestTy()->getPointerElementType();
+ if (STy->isSized() && DTy->isSized() &&
+ (DL->getTypeStoreSize(STy) >=
+ DL->getTypeStoreSize(DTy)) &&
+ (DL->getABITypeAlignment(STy) >=
+ DL->getABITypeAlignment(DTy)))
+ return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
+ }
+
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return !GV->hasExternalWeakLinkage();
- // byval arguments are ok.
- if (const Argument *A = dyn_cast<Argument>(V))
- return A->hasByValAttr();
+ // byval arguments are okay. Arguments specifically marked as
+ // dereferenceable are okay too.
+ if (const Argument *A = dyn_cast<Argument>(V)) {
+ if (A->hasByValAttr())
+ return true;
+ else if (uint64_t Bytes = A->getDereferenceableBytes()) {
+ Type *Ty = V->getType()->getPointerElementType();
+ if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
+ return true;
+ }
+
+ return false;
+ }
+
+ // Return values from call sites specifically marked as dereferenceable are
+ // also okay.
+ if (ImmutableCallSite CS = V) {
+ if (uint64_t Bytes = CS.getDereferenceableBytes(0)) {
+ Type *Ty = V->getType()->getPointerElementType();
+ if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
+ return true;
+ }
+ }
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Conservatively require that the base pointer be fully dereferenceable.
if (!Visited.insert(GEP->getOperand(0)))
return false;
- if (!isDereferenceablePointer(GEP->getOperand(0), Visited))
+ if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
return false;
// Check the indices.
gep_type_iterator GTI = gep_type_begin(GEP);
@@ -491,15 +565,39 @@ static bool isDereferenceablePointer(const Value *V,
return true;
}
+ if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
+ return isDereferenceablePointer(ASC->getOperand(0), DL, Visited);
+
// If we don't know, assume the worst.
return false;
}
/// isDereferenceablePointer - Test if this value is always a pointer to
/// allocated and suitably aligned memory for a simple load or store.
-bool Value::isDereferenceablePointer() const {
+bool Value::isDereferenceablePointer(const DataLayout *DL) const {
+ // When dereferenceability information is provided by a dereferenceable
+ // attribute, we know exactly how many bytes are dereferenceable. If we can
+ // determine the exact offset to the attributed variable, we can use that
+ // information here.
+ Type *Ty = getType()->getPointerElementType();
+ if (Ty->isSized() && DL) {
+ APInt Offset(DL->getTypeStoreSizeInBits(getType()), 0);
+ const Value *BV = stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
+
+ APInt DerefBytes(Offset.getBitWidth(), 0);
+ if (const Argument *A = dyn_cast<Argument>(BV))
+ DerefBytes = A->getDereferenceableBytes();
+ else if (ImmutableCallSite CS = BV)
+ DerefBytes = CS.getDereferenceableBytes(0);
+
+ if (DerefBytes.getBoolValue() && Offset.isNonNegative()) {
+ if (DerefBytes.uge(Offset + DL->getTypeStoreSize(Ty)))
+ return true;
+ }
+ }
+
SmallPtrSet<const Value *, 32> Visited;
- return ::isDereferenceablePointer(this, Visited);
+ return ::isDereferenceablePointer(this, DL, Visited);
}
/// DoPHITranslation - If this value is a PHI node with CurBB as its parent,
@@ -555,7 +653,7 @@ void ValueHandleBase::AddToUseList() {
// If this value already has a ValueHandle, then it must be in the
// ValueHandles map already.
ValueHandleBase *&Entry = pImpl->ValueHandles[VP.getPointer()];
- assert(Entry != 0 && "Value doesn't have any handles?");
+ assert(Entry && "Value doesn't have any handles?");
AddToExistingUseList(&Entry);
return;
}
@@ -569,7 +667,7 @@ void ValueHandleBase::AddToUseList() {
const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
ValueHandleBase *&Entry = Handles[VP.getPointer()];
- assert(Entry == 0 && "Value really did already have handles?");
+ assert(!Entry && "Value really did already have handles?");
AddToExistingUseList(&Entry);
VP.getPointer()->HasValueHandle = true;
@@ -650,7 +748,7 @@ void ValueHandleBase::ValueIsDeleted(Value *V) {
break;
case Weak:
// Weak just goes to null, which will unlink it from the list.
- Entry->operator=(0);
+ Entry->operator=(nullptr);
break;
case Callback:
// Forward to the subclass's implementation.
diff --git a/contrib/llvm/lib/IR/ValueSymbolTable.cpp b/contrib/llvm/lib/IR/ValueSymbolTable.cpp
index fffacb3..e9e979a 100644
--- a/contrib/llvm/lib/IR/ValueSymbolTable.cpp
+++ b/contrib/llvm/lib/IR/ValueSymbolTable.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "valuesymtab"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/GlobalValue.h"
@@ -20,6 +19,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "valuesymtab"
+
// Class destructor
ValueSymbolTable::~ValueSymbolTable() {
#ifndef NDEBUG // Only do this in -g mode...
@@ -56,7 +57,7 @@ void ValueSymbolTable::reinsertValue(Value* V) {
// Try insert the vmap entry with this suffix.
ValueName &NewName = vmap.GetOrCreateValue(UniqueName);
- if (NewName.getValue() == 0) {
+ if (!NewName.getValue()) {
// Newly inserted name. Success!
NewName.setValue(V);
V->Name = &NewName;
@@ -78,7 +79,7 @@ void ValueSymbolTable::removeValueName(ValueName *V) {
ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
// In the common case, the name is not already in the symbol table.
ValueName &Entry = vmap.GetOrCreateValue(Name);
- if (Entry.getValue() == 0) {
+ if (!Entry.getValue()) {
Entry.setValue(V);
//DEBUG(dbgs() << " Inserted value: " << Entry.getKeyData() << ": "
// << *V << "\n");
@@ -95,7 +96,7 @@ ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
// Try insert the vmap entry with this suffix.
ValueName &NewName = vmap.GetOrCreateValue(UniqueName);
- if (NewName.getValue() == 0) {
+ if (!NewName.getValue()) {
// Newly inserted name. Success!
NewName.setValue(V);
//DEBUG(dbgs() << " Inserted value: " << UniqueName << ": " << *V << "\n");
diff --git a/contrib/llvm/lib/IR/Verifier.cpp b/contrib/llvm/lib/IR/Verifier.cpp
index da6b573..9cf911b 100644
--- a/contrib/llvm/lib/IR/Verifier.cpp
+++ b/contrib/llvm/lib/IR/Verifier.cpp
@@ -45,31 +45,31 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/Verifier.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/InstVisitor.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -77,328 +77,284 @@
#include <cstdarg>
using namespace llvm;
-static cl::opt<bool> DisableDebugInfoVerifier("disable-debug-info-verifier",
- cl::init(true));
-
-namespace { // Anonymous namespace for class
- struct PreVerifier : public FunctionPass {
- static char ID; // Pass ID, replacement for typeid
-
- PreVerifier() : FunctionPass(ID) {
- initializePreVerifierPass(*PassRegistry::getPassRegistry());
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- }
-
- // Check that the prerequisites for successful DominatorTree construction
- // are satisfied.
- bool runOnFunction(Function &F) {
- bool Broken = false;
-
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
- if (I->empty() || !I->back().isTerminator()) {
- dbgs() << "Basic Block in function '" << F.getName()
- << "' does not have terminator!\n";
- WriteAsOperand(dbgs(), I, true);
- dbgs() << "\n";
- Broken = true;
- }
- }
-
- if (Broken)
- report_fatal_error("Broken module, no Basic Block terminator!");
-
- return false;
- }
- };
-}
-
-char PreVerifier::ID = 0;
-INITIALIZE_PASS(PreVerifier, "preverify", "Preliminary module verification",
- false, false)
-static char &PreVerifyID = PreVerifier::ID;
+static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(false));
namespace {
- struct Verifier : public FunctionPass, public InstVisitor<Verifier> {
- static char ID; // Pass ID, replacement for typeid
- bool Broken; // Is this module found to be broken?
- VerifierFailureAction action;
- // What to do if verification fails.
- Module *Mod; // Module we are verifying right now
- LLVMContext *Context; // Context within which we are verifying
- DominatorTree *DT; // Dominator Tree, caution can be null!
- const DataLayout *DL;
-
- std::string Messages;
- raw_string_ostream MessagesStr;
-
- /// InstInThisBlock - when verifying a basic block, keep track of all of the
- /// instructions we have seen so far. This allows us to do efficient
- /// dominance checks for the case when an instruction has an operand that is
- /// an instruction in the same block.
- SmallPtrSet<Instruction*, 16> InstsInThisBlock;
-
- /// MDNodes - keep track of the metadata nodes that have been checked
- /// already.
- SmallPtrSet<MDNode *, 32> MDNodes;
-
- /// PersonalityFn - The personality function referenced by the
- /// LandingPadInsts. All LandingPadInsts within the same function must use
- /// the same personality function.
- const Value *PersonalityFn;
-
- /// Finder keeps track of all debug info MDNodes in a Module.
- DebugInfoFinder Finder;
-
- Verifier()
- : FunctionPass(ID), Broken(false),
- action(AbortProcessAction), Mod(0), Context(0), DT(0), DL(0),
- MessagesStr(Messages), PersonalityFn(0) {
- initializeVerifierPass(*PassRegistry::getPassRegistry());
- }
- explicit Verifier(VerifierFailureAction ctn)
- : FunctionPass(ID), Broken(false), action(ctn), Mod(0),
- Context(0), DT(0), DL(0), MessagesStr(Messages), PersonalityFn(0) {
- initializeVerifierPass(*PassRegistry::getPassRegistry());
- }
+struct VerifierSupport {
+ raw_ostream &OS;
+ const Module *M;
- bool doInitialization(Module &M) {
- Mod = &M;
- Context = &M.getContext();
+ /// \brief Track the brokenness of the module while recursively visiting.
+ bool Broken;
- DL = getAnalysisIfAvailable<DataLayout>();
+ explicit VerifierSupport(raw_ostream &OS)
+ : OS(OS), M(nullptr), Broken(false) {}
- // We must abort before returning back to the pass manager, or else the
- // pass manager may try to run other passes on the broken module.
- return abortIfBroken();
+ void WriteValue(const Value *V) {
+ if (!V)
+ return;
+ if (isa<Instruction>(V)) {
+ OS << *V << '\n';
+ } else {
+ V->printAsOperand(OS, true, M);
+ OS << '\n';
}
+ }
- bool runOnFunction(Function &F) {
- // Get dominator information if we are being run by PassManager
- DT = &getAnalysis<DominatorTree>();
-
- Mod = F.getParent();
- if (!Context) Context = &F.getContext();
+ void WriteType(Type *T) {
+ if (!T)
+ return;
+ OS << ' ' << *T;
+ }
- Finder.reset();
- visit(F);
- InstsInThisBlock.clear();
- PersonalityFn = 0;
+ void WriteComdat(const Comdat *C) {
+ if (!C)
+ return;
+ OS << *C;
+ }
- if (!DisableDebugInfoVerifier)
- // Verify Debug Info.
- verifyDebugInfo();
+ // CheckFailed - A check failed, so print out the condition and the message
+ // that failed. This provides a nice place to put a breakpoint if you want
+ // to see why something is not correct.
+ void CheckFailed(const Twine &Message, const Value *V1 = nullptr,
+ const Value *V2 = nullptr, const Value *V3 = nullptr,
+ const Value *V4 = nullptr) {
+ OS << Message.str() << "\n";
+ WriteValue(V1);
+ WriteValue(V2);
+ WriteValue(V3);
+ WriteValue(V4);
+ Broken = true;
+ }
- // We must abort before returning back to the pass manager, or else the
- // pass manager may try to run other passes on the broken module.
- return abortIfBroken();
- }
+ void CheckFailed(const Twine &Message, const Value *V1, Type *T2,
+ const Value *V3 = nullptr) {
+ OS << Message.str() << "\n";
+ WriteValue(V1);
+ WriteType(T2);
+ WriteValue(V3);
+ Broken = true;
+ }
- bool doFinalization(Module &M) {
- // Scan through, checking all of the external function's linkage now...
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
- visitGlobalValue(*I);
+ void CheckFailed(const Twine &Message, Type *T1, Type *T2 = nullptr,
+ Type *T3 = nullptr) {
+ OS << Message.str() << "\n";
+ WriteType(T1);
+ WriteType(T2);
+ WriteType(T3);
+ Broken = true;
+ }
- // Check to make sure function prototypes are okay.
- if (I->isDeclaration()) visitFunction(*I);
+ void CheckFailed(const Twine &Message, const Comdat *C) {
+ OS << Message.str() << "\n";
+ WriteComdat(C);
+ Broken = true;
+ }
+};
+class Verifier : public InstVisitor<Verifier>, VerifierSupport {
+ friend class InstVisitor<Verifier>;
+
+ LLVMContext *Context;
+ const DataLayout *DL;
+ DominatorTree DT;
+
+ /// \brief When verifying a basic block, keep track of all of the
+ /// instructions we have seen so far.
+ ///
+ /// This allows us to do efficient dominance checks for the case when an
+ /// instruction has an operand that is an instruction in the same block.
+ SmallPtrSet<Instruction *, 16> InstsInThisBlock;
+
+ /// \brief Keep track of the metadata nodes that have been checked already.
+ SmallPtrSet<MDNode *, 32> MDNodes;
+
+ /// \brief The personality function referenced by the LandingPadInsts.
+ /// All LandingPadInsts within the same function must use the same
+ /// personality function.
+ const Value *PersonalityFn;
+
+public:
+ explicit Verifier(raw_ostream &OS = dbgs())
+ : VerifierSupport(OS), Context(nullptr), DL(nullptr),
+ PersonalityFn(nullptr) {}
+
+ bool verify(const Function &F) {
+ M = F.getParent();
+ Context = &M->getContext();
+
+ // First ensure the function is well-enough formed to compute dominance
+ // information.
+ if (F.empty()) {
+ OS << "Function '" << F.getName()
+ << "' does not contain an entry block!\n";
+ return false;
+ }
+ for (Function::const_iterator I = F.begin(), E = F.end(); I != E; ++I) {
+ if (I->empty() || !I->back().isTerminator()) {
+ OS << "Basic Block in function '" << F.getName()
+ << "' does not have terminator!\n";
+ I->printAsOperand(OS, true);
+ OS << "\n";
+ return false;
}
+ }
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- visitGlobalVariable(*I);
+ // Now directly compute a dominance tree. We don't rely on the pass
+ // manager to provide this as it isolates us from a potentially
+ // out-of-date dominator tree and makes it significantly more complex to
+ // run this code outside of a pass manager.
+ // FIXME: It's really gross that we have to cast away constness here.
+ DT.recalculate(const_cast<Function &>(F));
- for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
- I != E; ++I)
- visitGlobalAlias(*I);
+ Broken = false;
+ // FIXME: We strip const here because the inst visitor strips const.
+ visit(const_cast<Function &>(F));
+ InstsInThisBlock.clear();
+ PersonalityFn = nullptr;
- for (Module::named_metadata_iterator I = M.named_metadata_begin(),
- E = M.named_metadata_end(); I != E; ++I)
- visitNamedMDNode(*I);
+ return !Broken;
+ }
- visitModuleFlags(M);
- visitModuleIdents(M);
+ bool verify(const Module &M) {
+ this->M = &M;
+ Context = &M.getContext();
+ Broken = false;
- if (!DisableDebugInfoVerifier) {
- Finder.reset();
- Finder.processModule(M);
- // Verify Debug Info.
- verifyDebugInfo();
- }
+ // Scan through, checking all of the external function's linkage now...
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ visitGlobalValue(*I);
- // If the module is broken, abort at this time.
- return abortIfBroken();
+ // Check to make sure function prototypes are okay.
+ if (I->isDeclaration())
+ visitFunction(*I);
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequiredID(PreVerifyID);
- AU.addRequired<DominatorTree>();
- }
-
- /// abortIfBroken - If the module is broken and we are supposed to abort on
- /// this condition, do so.
- ///
- bool abortIfBroken() {
- if (!Broken) return false;
- MessagesStr << "Broken module found, ";
- switch (action) {
- case AbortProcessAction:
- MessagesStr << "compilation aborted!\n";
- dbgs() << MessagesStr.str();
- // Client should choose different reaction if abort is not desired
- abort();
- case PrintMessageAction:
- MessagesStr << "verification continues.\n";
- dbgs() << MessagesStr.str();
- return false;
- case ReturnStatusAction:
- MessagesStr << "compilation terminated.\n";
- return true;
- }
- llvm_unreachable("Invalid action");
- }
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
+ visitGlobalVariable(*I);
+ for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
+ I != E; ++I)
+ visitGlobalAlias(*I);
- // Verification methods...
- void visitGlobalValue(GlobalValue &GV);
- void visitGlobalVariable(GlobalVariable &GV);
- void visitGlobalAlias(GlobalAlias &GA);
- void visitNamedMDNode(NamedMDNode &NMD);
- void visitMDNode(MDNode &MD, Function *F);
- void visitModuleIdents(Module &M);
- void visitModuleFlags(Module &M);
- void visitModuleFlag(MDNode *Op, DenseMap<MDString*, MDNode*> &SeenIDs,
- SmallVectorImpl<MDNode*> &Requirements);
- void visitFunction(Function &F);
- void visitBasicBlock(BasicBlock &BB);
- using InstVisitor<Verifier>::visit;
-
- void visit(Instruction &I);
-
- void visitTruncInst(TruncInst &I);
- void visitZExtInst(ZExtInst &I);
- void visitSExtInst(SExtInst &I);
- void visitFPTruncInst(FPTruncInst &I);
- void visitFPExtInst(FPExtInst &I);
- void visitFPToUIInst(FPToUIInst &I);
- void visitFPToSIInst(FPToSIInst &I);
- void visitUIToFPInst(UIToFPInst &I);
- void visitSIToFPInst(SIToFPInst &I);
- void visitIntToPtrInst(IntToPtrInst &I);
- void visitPtrToIntInst(PtrToIntInst &I);
- void visitBitCastInst(BitCastInst &I);
- void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
- void visitPHINode(PHINode &PN);
- void visitBinaryOperator(BinaryOperator &B);
- void visitICmpInst(ICmpInst &IC);
- void visitFCmpInst(FCmpInst &FC);
- void visitExtractElementInst(ExtractElementInst &EI);
- void visitInsertElementInst(InsertElementInst &EI);
- void visitShuffleVectorInst(ShuffleVectorInst &EI);
- void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
- void visitCallInst(CallInst &CI);
- void visitInvokeInst(InvokeInst &II);
- void visitGetElementPtrInst(GetElementPtrInst &GEP);
- void visitLoadInst(LoadInst &LI);
- void visitStoreInst(StoreInst &SI);
- void verifyDominatesUse(Instruction &I, unsigned i);
- void visitInstruction(Instruction &I);
- void visitTerminatorInst(TerminatorInst &I);
- void visitBranchInst(BranchInst &BI);
- void visitReturnInst(ReturnInst &RI);
- void visitSwitchInst(SwitchInst &SI);
- void visitIndirectBrInst(IndirectBrInst &BI);
- void visitSelectInst(SelectInst &SI);
- void visitUserOp1(Instruction &I);
- void visitUserOp2(Instruction &I) { visitUserOp1(I); }
- void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI);
- void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
- void visitAtomicRMWInst(AtomicRMWInst &RMWI);
- void visitFenceInst(FenceInst &FI);
- void visitAllocaInst(AllocaInst &AI);
- void visitExtractValueInst(ExtractValueInst &EVI);
- void visitInsertValueInst(InsertValueInst &IVI);
- void visitLandingPadInst(LandingPadInst &LPI);
-
- void VerifyCallSite(CallSite CS);
- bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty,
- int VT, unsigned ArgNo, std::string &Suffix);
- bool VerifyIntrinsicType(Type *Ty,
- ArrayRef<Intrinsic::IITDescriptor> &Infos,
- SmallVectorImpl<Type*> &ArgTys);
- bool VerifyIntrinsicIsVarArg(bool isVarArg,
- ArrayRef<Intrinsic::IITDescriptor> &Infos);
- bool VerifyAttributeCount(AttributeSet Attrs, unsigned Params);
- void VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
- bool isFunction, const Value *V);
- void VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
- bool isReturnValue, const Value *V);
- void VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
- const Value *V);
-
- void VerifyBitcastType(const Value *V, Type *DestTy, Type *SrcTy);
- void VerifyConstantExprBitcastType(const ConstantExpr *CE);
-
- void verifyDebugInfo();
-
- void WriteValue(const Value *V) {
- if (!V) return;
- if (isa<Instruction>(V)) {
- MessagesStr << *V << '\n';
- } else {
- WriteAsOperand(MessagesStr, V, true, Mod);
- MessagesStr << '\n';
- }
- }
+ for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end();
+ I != E; ++I)
+ visitNamedMDNode(*I);
- void WriteType(Type *T) {
- if (!T) return;
- MessagesStr << ' ' << *T;
- }
+ for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
+ visitComdat(SMEC.getValue());
+ visitModuleFlags(M);
+ visitModuleIdents(M);
- // CheckFailed - A check failed, so print out the condition and the message
- // that failed. This provides a nice place to put a breakpoint if you want
- // to see why something is not correct.
- void CheckFailed(const Twine &Message,
- const Value *V1 = 0, const Value *V2 = 0,
- const Value *V3 = 0, const Value *V4 = 0) {
- MessagesStr << Message.str() << "\n";
- WriteValue(V1);
- WriteValue(V2);
- WriteValue(V3);
- WriteValue(V4);
- Broken = true;
- }
+ return !Broken;
+ }
- void CheckFailed(const Twine &Message, const Value *V1,
- Type *T2, const Value *V3 = 0) {
- MessagesStr << Message.str() << "\n";
- WriteValue(V1);
- WriteType(T2);
- WriteValue(V3);
- Broken = true;
- }
+private:
+ // Verification methods...
+ void visitGlobalValue(const GlobalValue &GV);
+ void visitGlobalVariable(const GlobalVariable &GV);
+ void visitGlobalAlias(const GlobalAlias &GA);
+ void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
+ void visitAliaseeSubExpr(SmallPtrSet<const GlobalAlias *, 4> &Visited,
+ const GlobalAlias &A, const Constant &C);
+ void visitNamedMDNode(const NamedMDNode &NMD);
+ void visitMDNode(MDNode &MD, Function *F);
+ void visitComdat(const Comdat &C);
+ void visitModuleIdents(const Module &M);
+ void visitModuleFlags(const Module &M);
+ void visitModuleFlag(const MDNode *Op,
+ DenseMap<const MDString *, const MDNode *> &SeenIDs,
+ SmallVectorImpl<const MDNode *> &Requirements);
+ void visitFunction(const Function &F);
+ void visitBasicBlock(BasicBlock &BB);
+
+ // InstVisitor overrides...
+ using InstVisitor<Verifier>::visit;
+ void visit(Instruction &I);
+
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
+ void visitPHINode(PHINode &PN);
+ void visitBinaryOperator(BinaryOperator &B);
+ void visitICmpInst(ICmpInst &IC);
+ void visitFCmpInst(FCmpInst &FC);
+ void visitExtractElementInst(ExtractElementInst &EI);
+ void visitInsertElementInst(InsertElementInst &EI);
+ void visitShuffleVectorInst(ShuffleVectorInst &EI);
+ void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
+ void visitCallInst(CallInst &CI);
+ void visitInvokeInst(InvokeInst &II);
+ void visitGetElementPtrInst(GetElementPtrInst &GEP);
+ void visitLoadInst(LoadInst &LI);
+ void visitStoreInst(StoreInst &SI);
+ void verifyDominatesUse(Instruction &I, unsigned i);
+ void visitInstruction(Instruction &I);
+ void visitTerminatorInst(TerminatorInst &I);
+ void visitBranchInst(BranchInst &BI);
+ void visitReturnInst(ReturnInst &RI);
+ void visitSwitchInst(SwitchInst &SI);
+ void visitIndirectBrInst(IndirectBrInst &BI);
+ void visitSelectInst(SelectInst &SI);
+ void visitUserOp1(Instruction &I);
+ void visitUserOp2(Instruction &I) { visitUserOp1(I); }
+ void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI);
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
+ void visitAtomicRMWInst(AtomicRMWInst &RMWI);
+ void visitFenceInst(FenceInst &FI);
+ void visitAllocaInst(AllocaInst &AI);
+ void visitExtractValueInst(ExtractValueInst &EVI);
+ void visitInsertValueInst(InsertValueInst &IVI);
+ void visitLandingPadInst(LandingPadInst &LPI);
+
+ void VerifyCallSite(CallSite CS);
+ void verifyMustTailCall(CallInst &CI);
+ bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
+ unsigned ArgNo, std::string &Suffix);
+ bool VerifyIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type *> &ArgTys);
+ bool VerifyIntrinsicIsVarArg(bool isVarArg,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos);
+ bool VerifyAttributeCount(AttributeSet Attrs, unsigned Params);
+ void VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction,
+ const Value *V);
+ void VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
+ bool isReturnValue, const Value *V);
+ void VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
+ const Value *V);
+
+ void VerifyBitcastType(const Value *V, Type *DestTy, Type *SrcTy);
+ void VerifyConstantExprBitcastType(const ConstantExpr *CE);
+};
+class DebugInfoVerifier : public VerifierSupport {
+public:
+ explicit DebugInfoVerifier(raw_ostream &OS = dbgs()) : VerifierSupport(OS) {}
+
+ bool verify(const Module &M) {
+ this->M = &M;
+ verifyDebugInfo();
+ return !Broken;
+ }
- void CheckFailed(const Twine &Message, Type *T1,
- Type *T2 = 0, Type *T3 = 0) {
- MessagesStr << Message.str() << "\n";
- WriteType(T1);
- WriteType(T2);
- WriteType(T3);
- Broken = true;
- }
- };
+private:
+ void verifyDebugInfo();
+ void processInstructions(DebugInfoFinder &Finder);
+ void processCallInst(DebugInfoFinder &Finder, const CallInst &CI);
+};
} // End anonymous namespace
-char Verifier::ID = 0;
-INITIALIZE_PASS_BEGIN(Verifier, "verify", "Module Verifier", false, false)
-INITIALIZE_PASS_DEPENDENCY(PreVerifier)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
-INITIALIZE_PASS_END(Verifier, "verify", "Module Verifier", false, false)
-
// Assert - We know that cond should be true, if not print an error message.
#define Assert(C, M) \
do { if (!(C)) { CheckFailed(M); return; } } while (0)
@@ -413,36 +369,30 @@ INITIALIZE_PASS_END(Verifier, "verify", "Module Verifier", false, false)
void Verifier::visit(Instruction &I) {
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- Assert1(I.getOperand(i) != 0, "Operand is null", &I);
+ Assert1(I.getOperand(i) != nullptr, "Operand is null", &I);
InstVisitor<Verifier>::visit(I);
}
-void Verifier::visitGlobalValue(GlobalValue &GV) {
- Assert1(!GV.isDeclaration() ||
- GV.isMaterializable() ||
- GV.hasExternalLinkage() ||
- GV.hasDLLImportLinkage() ||
- GV.hasExternalWeakLinkage() ||
- (isa<GlobalAlias>(GV) &&
- (GV.hasLocalLinkage() || GV.hasWeakLinkage())),
- "Global is external, but doesn't have external or dllimport or weak linkage!",
+void Verifier::visitGlobalValue(const GlobalValue &GV) {
+ Assert1(!GV.isDeclaration() || GV.isMaterializable() ||
+ GV.hasExternalLinkage() || GV.hasExternalWeakLinkage(),
+ "Global is external, but doesn't have external or weak linkage!",
&GV);
- Assert1(!GV.hasDLLImportLinkage() || GV.isDeclaration(),
- "Global is marked as dllimport, but not external", &GV);
-
+ Assert1(GV.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &GV);
Assert1(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
"Only global variables can have appending linkage!", &GV);
if (GV.hasAppendingLinkage()) {
- GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
Assert1(GVar && GVar->getType()->getElementType()->isArrayTy(),
"Only global arrays can have appending linkage!", GVar);
}
}
-void Verifier::visitGlobalVariable(GlobalVariable &GV) {
+void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
if (GV.hasInitializer()) {
Assert1(GV.getInitializer()->getType() == GV.getType()->getElementType(),
"Global variable initializer type does not match global "
@@ -455,10 +405,10 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) {
"'common' global must have a zero initializer!", &GV);
Assert1(!GV.isConstant(), "'common' global may not be marked constant!",
&GV);
+ Assert1(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
}
} else {
- Assert1(GV.hasExternalLinkage() || GV.hasDLLImportLinkage() ||
- GV.hasExternalWeakLinkage(),
+ Assert1(GV.hasExternalLinkage() || GV.hasExternalWeakLinkage(),
"invalid linkage type for global declaration", &GV);
}
@@ -468,14 +418,22 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) {
"invalid linkage for intrinsic global variable", &GV);
// Don't worry about emitting an error for it not being an array,
// visitGlobalValue will complain on appending non-array.
- if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getType())) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getType()->getElementType())) {
StructType *STy = dyn_cast<StructType>(ATy->getElementType());
PointerType *FuncPtrTy =
FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo();
- Assert1(STy && STy->getNumElements() == 2 &&
+ // FIXME: Reject the 2-field form in LLVM 4.0.
+ Assert1(STy && (STy->getNumElements() == 2 ||
+ STy->getNumElements() == 3) &&
STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
STy->getTypeAtIndex(1) == FuncPtrTy,
"wrong type for intrinsic global variable", &GV);
+ if (STy->getNumElements() == 3) {
+ Type *ETy = STy->getTypeAtIndex(2);
+ Assert1(ETy->isPointerTy() &&
+ cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
+ "wrong type for intrinsic global variable", &GV);
+ }
}
}
@@ -488,8 +446,8 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) {
PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
Assert1(PTy, "wrong type for intrinsic global variable", &GV);
if (GV.hasInitializer()) {
- Constant *Init = GV.getInitializer();
- ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
+ const Constant *Init = GV.getInitializer();
+ const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
Assert1(InitArray, "wrong initalizer for intrinsic global variable",
Init);
for (unsigned i = 0, e = InitArray->getNumOperands(); i != e; ++i) {
@@ -503,6 +461,11 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) {
}
}
+ Assert1(!GV.hasDLLImportStorageClass() ||
+ (GV.isDeclaration() && GV.hasExternalLinkage()) ||
+ GV.hasAvailableExternallyLinkage(),
+ "Global is marked as dllimport, but not external", &GV);
+
if (!GV.hasInitializer()) {
visitGlobalValue(GV);
return;
@@ -533,46 +496,62 @@ void Verifier::visitGlobalVariable(GlobalVariable &GV) {
visitGlobalValue(GV);
}
-void Verifier::visitGlobalAlias(GlobalAlias &GA) {
- Assert1(!GA.getName().empty(),
- "Alias name cannot be empty!", &GA);
- Assert1(GlobalAlias::isValidLinkage(GA.getLinkage()),
- "Alias should have external or external weak linkage!", &GA);
- Assert1(GA.getAliasee(),
- "Aliasee cannot be NULL!", &GA);
- Assert1(GA.getType() == GA.getAliasee()->getType(),
- "Alias and aliasee types should match!", &GA);
- Assert1(!GA.hasUnnamedAddr(), "Alias cannot have unnamed_addr!", &GA);
-
- Constant *Aliasee = GA.getAliasee();
+void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
+ SmallPtrSet<const GlobalAlias*, 4> Visited;
+ Visited.insert(&GA);
+ visitAliaseeSubExpr(Visited, GA, C);
+}
- if (!isa<GlobalValue>(Aliasee)) {
- ConstantExpr *CE = dyn_cast<ConstantExpr>(Aliasee);
- Assert1(CE &&
- (CE->getOpcode() == Instruction::BitCast ||
- CE->getOpcode() == Instruction::GetElementPtr) &&
- isa<GlobalValue>(CE->getOperand(0)),
- "Aliasee should be either GlobalValue or bitcast of GlobalValue",
- &GA);
+void Verifier::visitAliaseeSubExpr(SmallPtrSet<const GlobalAlias *, 4> &Visited,
+ const GlobalAlias &GA, const Constant &C) {
+ if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
+ Assert1(!GV->isDeclaration(), "Alias must point to a definition", &GA);
- if (CE->getOpcode() == Instruction::BitCast) {
- unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
- unsigned DstAS = CE->getType()->getPointerAddressSpace();
+ if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
+ Assert1(Visited.insert(GA2), "Aliases cannot form a cycle", &GA);
- Assert1(SrcAS == DstAS,
- "Alias bitcasts cannot be between different address spaces",
+ Assert1(!GA2->mayBeOverridden(), "Alias cannot point to a weak alias",
&GA);
+ } else {
+ // Only continue verifying subexpressions of GlobalAliases.
+ // Do not recurse into global initializers.
+ return;
}
}
- const GlobalValue* Resolved = GA.resolveAliasedGlobal(/*stopOnWeak*/ false);
- Assert1(Resolved,
- "Aliasing chain should end with function or global variable", &GA);
+ if (const auto *CE = dyn_cast<ConstantExpr>(&C))
+ VerifyConstantExprBitcastType(CE);
+
+ for (const Use &U : C.operands()) {
+ Value *V = &*U;
+ if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
+ visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
+ else if (const auto *C2 = dyn_cast<Constant>(V))
+ visitAliaseeSubExpr(Visited, GA, *C2);
+ }
+}
+
+void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
+ Assert1(!GA.getName().empty(),
+ "Alias name cannot be empty!", &GA);
+ Assert1(GlobalAlias::isValidLinkage(GA.getLinkage()),
+ "Alias should have private, internal, linkonce, weak, linkonce_odr, "
+ "weak_odr, or external linkage!",
+ &GA);
+ const Constant *Aliasee = GA.getAliasee();
+ Assert1(Aliasee, "Aliasee cannot be NULL!", &GA);
+ Assert1(GA.getType() == Aliasee->getType(),
+ "Alias and aliasee types should match!", &GA);
+
+ Assert1(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
+ "Aliasee should be either GlobalValue or ConstantExpr", &GA);
+
+ visitAliaseeSubExpr(GA, *Aliasee);
visitGlobalValue(GA);
}
-void Verifier::visitNamedMDNode(NamedMDNode &NMD) {
+void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i) {
MDNode *MD = NMD.getOperand(i);
if (!MD)
@@ -580,7 +559,7 @@ void Verifier::visitNamedMDNode(NamedMDNode &NMD) {
Assert1(!MD->isFunctionLocal(),
"Named metadata operand cannot be function local!", MD);
- visitMDNode(*MD, 0);
+ visitMDNode(*MD, nullptr);
}
}
@@ -606,7 +585,7 @@ void Verifier::visitMDNode(MDNode &MD, Function *F) {
// If this was an instruction, bb, or argument, verify that it is in the
// function that we expect.
- Function *ActualF = 0;
+ Function *ActualF = nullptr;
if (Instruction *I = dyn_cast<Instruction>(Op))
ActualF = I->getParent()->getParent();
else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op))
@@ -620,7 +599,22 @@ void Verifier::visitMDNode(MDNode &MD, Function *F) {
}
}
-void Verifier::visitModuleIdents(Module &M) {
+void Verifier::visitComdat(const Comdat &C) {
+ // All Comdat::SelectionKind values other than Comdat::Any require a
+ // GlobalValue with the same name as the Comdat.
+ const GlobalValue *GV = M->getNamedValue(C.getName());
+ if (C.getSelectionKind() != Comdat::Any)
+ Assert1(GV,
+ "comdat selection kind requires a global value with the same name",
+ &C);
+ // The Module is invalid if the GlobalValue has private linkage. Entities
+ // with private linkage don't have entries in the symbol table.
+ if (GV)
+ Assert1(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
+ GV);
+}
+
+void Verifier::visitModuleIdents(const Module &M) {
const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
if (!Idents)
return;
@@ -638,24 +632,24 @@ void Verifier::visitModuleIdents(Module &M) {
}
}
-void Verifier::visitModuleFlags(Module &M) {
+void Verifier::visitModuleFlags(const Module &M) {
const NamedMDNode *Flags = M.getModuleFlagsMetadata();
if (!Flags) return;
// Scan each flag, and track the flags and requirements.
- DenseMap<MDString*, MDNode*> SeenIDs;
- SmallVector<MDNode*, 16> Requirements;
+ DenseMap<const MDString*, const MDNode*> SeenIDs;
+ SmallVector<const MDNode*, 16> Requirements;
for (unsigned I = 0, E = Flags->getNumOperands(); I != E; ++I) {
visitModuleFlag(Flags->getOperand(I), SeenIDs, Requirements);
}
// Validate that the requirements in the module are valid.
for (unsigned I = 0, E = Requirements.size(); I != E; ++I) {
- MDNode *Requirement = Requirements[I];
- MDString *Flag = cast<MDString>(Requirement->getOperand(0));
- Value *ReqValue = Requirement->getOperand(1);
+ const MDNode *Requirement = Requirements[I];
+ const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
+ const Value *ReqValue = Requirement->getOperand(1);
- MDNode *Op = SeenIDs.lookup(Flag);
+ const MDNode *Op = SeenIDs.lookup(Flag);
if (!Op) {
CheckFailed("invalid requirement on flag, flag is not present in module",
Flag);
@@ -671,8 +665,10 @@ void Verifier::visitModuleFlags(Module &M) {
}
}
-void Verifier::visitModuleFlag(MDNode *Op, DenseMap<MDString*, MDNode*>&SeenIDs,
- SmallVectorImpl<MDNode*> &Requirements) {
+void
+Verifier::visitModuleFlag(const MDNode *Op,
+ DenseMap<const MDString *, const MDNode *> &SeenIDs,
+ SmallVectorImpl<const MDNode *> &Requirements) {
// Each module flag should have three arguments, the merge behavior (a
// constant int), the flag ID (an MDString), and the value.
Assert1(Op->getNumOperands() == 3,
@@ -778,7 +774,8 @@ void Verifier::VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
I->getKindAsEnum() == Attribute::Builtin ||
I->getKindAsEnum() == Attribute::NoBuiltin ||
I->getKindAsEnum() == Attribute::Cold ||
- I->getKindAsEnum() == Attribute::OptimizeNone) {
+ I->getKindAsEnum() == Attribute::OptimizeNone ||
+ I->getKindAsEnum() == Attribute::JumpTable) {
if (!isFunction) {
CheckFailed("Attribute '" + I->getAsString() +
"' only applies to functions!", V);
@@ -813,26 +810,25 @@ void Verifier::VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
!Attrs.hasAttribute(Idx, Attribute::Nest) &&
!Attrs.hasAttribute(Idx, Attribute::StructRet) &&
!Attrs.hasAttribute(Idx, Attribute::NoCapture) &&
- !Attrs.hasAttribute(Idx, Attribute::Returned),
- "Attribute 'byval', 'nest', 'sret', 'nocapture', and 'returned' "
- "do not apply to return values!", V);
-
- // Check for mutually incompatible attributes.
- Assert1(!((Attrs.hasAttribute(Idx, Attribute::ByVal) &&
- Attrs.hasAttribute(Idx, Attribute::Nest)) ||
- (Attrs.hasAttribute(Idx, Attribute::ByVal) &&
- Attrs.hasAttribute(Idx, Attribute::StructRet)) ||
- (Attrs.hasAttribute(Idx, Attribute::Nest) &&
- Attrs.hasAttribute(Idx, Attribute::StructRet))), "Attributes "
- "'byval, nest, and sret' are incompatible!", V);
-
- Assert1(!((Attrs.hasAttribute(Idx, Attribute::ByVal) &&
- Attrs.hasAttribute(Idx, Attribute::Nest)) ||
- (Attrs.hasAttribute(Idx, Attribute::ByVal) &&
- Attrs.hasAttribute(Idx, Attribute::InReg)) ||
- (Attrs.hasAttribute(Idx, Attribute::Nest) &&
- Attrs.hasAttribute(Idx, Attribute::InReg))), "Attributes "
- "'byval, nest, and inreg' are incompatible!", V);
+ !Attrs.hasAttribute(Idx, Attribute::Returned) &&
+ !Attrs.hasAttribute(Idx, Attribute::InAlloca),
+ "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', and "
+ "'returned' do not apply to return values!", V);
+
+ // Check for mutually incompatible attributes. Only inreg is compatible with
+ // sret.
+ unsigned AttrCount = 0;
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal);
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca);
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) ||
+ Attrs.hasAttribute(Idx, Attribute::InReg);
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest);
+ Assert1(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
+ "and 'sret' are incompatible!", V);
+
+ Assert1(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) &&
+ Attrs.hasAttribute(Idx, Attribute::ReadOnly)), "Attributes "
+ "'inalloca and readonly' are incompatible!", V);
Assert1(!(Attrs.hasAttribute(Idx, Attribute::StructRet) &&
Attrs.hasAttribute(Idx, Attribute::Returned)), "Attributes "
@@ -855,14 +851,18 @@ void Verifier::VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
"Wrong types for attribute: " +
AttributeFuncs::typeIncompatible(Ty, Idx).getAsString(Idx), V);
- if (PointerType *PTy = dyn_cast<PointerType>(Ty))
- Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal) ||
- PTy->getElementType()->isSized(),
- "Attribute 'byval' does not support unsized types!", V);
- else
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ if (!PTy->getElementType()->isSized()) {
+ Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
+ !Attrs.hasAttribute(Idx, Attribute::InAlloca),
+ "Attributes 'byval' and 'inalloca' do not support unsized types!",
+ V);
+ }
+ } else {
Assert1(!Attrs.hasAttribute(Idx, Attribute::ByVal),
"Attribute 'byval' only applies to parameters with pointer type!",
V);
+ }
}
// VerifyFunctionAttrs - Check parameter attributes against a function type.
@@ -874,6 +874,7 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
bool SawNest = false;
bool SawReturned = false;
+ bool SawSRet = false;
for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
unsigned Idx = Attrs.getSlotIndex(i);
@@ -904,8 +905,17 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
SawReturned = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::StructRet))
- Assert1(Idx == 1, "Attribute sret is not on first parameter!", V);
+ if (Attrs.hasAttribute(Idx, Attribute::StructRet)) {
+ Assert1(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
+ Assert1(Idx == 1 || Idx == 2,
+ "Attribute 'sret' is not on first or second parameter!", V);
+ SawSRet = true;
+ }
+
+ if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) {
+ Assert1(Idx == FT->getNumParams(),
+ "inalloca isn't on the last parameter!", V);
+ }
}
if (!Attrs.hasAttributes(AttributeSet::FunctionIndex))
@@ -939,6 +949,14 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
Attribute::MinSize),
"Attributes 'minsize and optnone' are incompatible!", V);
}
+
+ if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::JumpTable)) {
+ const GlobalValue *GV = cast<GlobalValue>(V);
+ Assert1(GV->hasUnnamedAddr(),
+ "Attribute 'jumptable' requires 'unnamed_addr'", V);
+
+ }
}
void Verifier::VerifyBitcastType(const Value *V, Type *DestTy, Type *SrcTy) {
@@ -1001,7 +1019,7 @@ bool Verifier::VerifyAttributeCount(AttributeSet Attrs, unsigned Params) {
// visitFunction - Verify that a function is ok.
//
-void Verifier::visitFunction(Function &F) {
+void Verifier::visitFunction(const Function &F) {
// Check function arguments.
FunctionType *FT = F.getFunctionType();
unsigned NumArgs = F.arg_size();
@@ -1059,8 +1077,8 @@ void Verifier::visitFunction(Function &F) {
// Check that the argument values match the function type for this function...
unsigned i = 0;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
- I != E; ++I, ++i) {
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
+ ++I, ++i) {
Assert2(I->getType() == FT->getParamType(i),
"Argument value does not match function argument type!",
I, FT->getParamType(i));
@@ -1074,8 +1092,7 @@ void Verifier::visitFunction(Function &F) {
if (F.isMaterializable()) {
// Function has a body somewhere we can't see.
} else if (F.isDeclaration()) {
- Assert1(F.hasExternalLinkage() || F.hasDLLImportLinkage() ||
- F.hasExternalWeakLinkage(),
+ Assert1(F.hasExternalLinkage() || F.hasExternalWeakLinkage(),
"invalid linkage type for function declaration", &F);
} else {
// Verify that this function (which has a body) is not named "llvm.*". It
@@ -1083,13 +1100,13 @@ void Verifier::visitFunction(Function &F) {
Assert1(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
// Check the entry node
- BasicBlock *Entry = &F.getEntryBlock();
+ const BasicBlock *Entry = &F.getEntryBlock();
Assert1(pred_begin(Entry) == pred_end(Entry),
"Entry block to function must not have predecessors!", Entry);
// The address of the entry block cannot be taken, unless it is dead.
if (Entry->hasAddressTaken()) {
- Assert1(!BlockAddress::get(Entry)->isConstantUsed(),
+ Assert1(!BlockAddress::lookup(Entry)->isConstantUsed(),
"blockaddress may not be used with the entry block!", Entry);
}
}
@@ -1101,6 +1118,11 @@ void Verifier::visitFunction(Function &F) {
if (F.hasAddressTaken(&U))
Assert1(0, "Invalid user of intrinsic instruction!", U);
}
+
+ Assert1(!F.hasDLLImportStorageClass() ||
+ (F.isDeclaration() && F.hasExternalLinkage()) ||
+ F.hasAvailableExternallyLinkage(),
+ "Function is marked as dllimport, but not external.", &F);
}
// verifyBasicBlock - Verify that a basic block is well formed...
@@ -1533,6 +1555,16 @@ void Verifier::VerifyCallSite(CallSite CS) {
// Verify call attributes.
VerifyFunctionAttrs(FTy, Attrs, I);
+ // Conservatively check the inalloca argument.
+ // We have a bug if we can find that there is an underlying alloca without
+ // inalloca.
+ if (CS.hasInAllocaArgument()) {
+ Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
+ if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
+ Assert2(AI->isUsedWithInAlloca(),
+ "inalloca argument for call has mismatched alloca", AI, I);
+ }
+
if (FTy->isVarArg()) {
// FIXME? is 'nest' even legal here?
bool SawNest = false;
@@ -1566,11 +1598,15 @@ void Verifier::VerifyCallSite(CallSite CS) {
Assert1(!Attrs.hasAttribute(Idx, Attribute::StructRet),
"Attribute 'sret' cannot be used for vararg call arguments!", I);
+
+ if (Attrs.hasAttribute(Idx, Attribute::InAlloca))
+ Assert1(Idx == CS.arg_size(), "inalloca isn't on the last argument!",
+ I);
}
}
// Verify that there's no metadata unless it's a direct call to an intrinsic.
- if (CS.getCalledFunction() == 0 ||
+ if (CS.getCalledFunction() == nullptr ||
!CS.getCalledFunction()->getName().startswith("llvm.")) {
for (FunctionType::param_iterator PI = FTy->param_begin(),
PE = FTy->param_end(); PI != PE; ++PI)
@@ -1581,9 +1617,102 @@ void Verifier::VerifyCallSite(CallSite CS) {
visitInstruction(*I);
}
+/// Two types are "congruent" if they are identical, or if they are both pointer
+/// types with different pointee types and the same address space.
+static bool isTypeCongruent(Type *L, Type *R) {
+ if (L == R)
+ return true;
+ PointerType *PL = dyn_cast<PointerType>(L);
+ PointerType *PR = dyn_cast<PointerType>(R);
+ if (!PL || !PR)
+ return false;
+ return PL->getAddressSpace() == PR->getAddressSpace();
+}
+
+static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) {
+ static const Attribute::AttrKind ABIAttrs[] = {
+ Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
+ Attribute::InReg, Attribute::Returned};
+ AttrBuilder Copy;
+ for (auto AK : ABIAttrs) {
+ if (Attrs.hasAttribute(I + 1, AK))
+ Copy.addAttribute(AK);
+ }
+ if (Attrs.hasAttribute(I + 1, Attribute::Alignment))
+ Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1));
+ return Copy;
+}
+
+void Verifier::verifyMustTailCall(CallInst &CI) {
+ Assert1(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
+
+ // - The caller and callee prototypes must match. Pointer types of
+ // parameters or return types may differ in pointee type, but not
+ // address space.
+ Function *F = CI.getParent()->getParent();
+ auto GetFnTy = [](Value *V) {
+ return cast<FunctionType>(
+ cast<PointerType>(V->getType())->getElementType());
+ };
+ FunctionType *CallerTy = GetFnTy(F);
+ FunctionType *CalleeTy = GetFnTy(CI.getCalledValue());
+ Assert1(CallerTy->getNumParams() == CalleeTy->getNumParams(),
+ "cannot guarantee tail call due to mismatched parameter counts", &CI);
+ Assert1(CallerTy->isVarArg() == CalleeTy->isVarArg(),
+ "cannot guarantee tail call due to mismatched varargs", &CI);
+ Assert1(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
+ "cannot guarantee tail call due to mismatched return types", &CI);
+ for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ Assert1(
+ isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
+ "cannot guarantee tail call due to mismatched parameter types", &CI);
+ }
+
+ // - The calling conventions of the caller and callee must match.
+ Assert1(F->getCallingConv() == CI.getCallingConv(),
+ "cannot guarantee tail call due to mismatched calling conv", &CI);
+
+ // - All ABI-impacting function attributes, such as sret, byval, inreg,
+ // returned, and inalloca, must match.
+ AttributeSet CallerAttrs = F->getAttributes();
+ AttributeSet CalleeAttrs = CI.getAttributes();
+ for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
+ AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
+ Assert2(CallerABIAttrs == CalleeABIAttrs,
+ "cannot guarantee tail call due to mismatched ABI impacting "
+ "function attributes", &CI, CI.getOperand(I));
+ }
+
+ // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
+ // or a pointer bitcast followed by a ret instruction.
+ // - The ret instruction must return the (possibly bitcasted) value
+ // produced by the call or void.
+ Value *RetVal = &CI;
+ Instruction *Next = CI.getNextNode();
+
+ // Handle the optional bitcast.
+ if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
+ Assert1(BI->getOperand(0) == RetVal,
+ "bitcast following musttail call must use the call", BI);
+ RetVal = BI;
+ Next = BI->getNextNode();
+ }
+
+ // Check the return.
+ ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
+ Assert1(Ret, "musttail call must be precede a ret with an optional bitcast",
+ &CI);
+ Assert1(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
+ "musttail call result must be returned", Ret);
+}
+
void Verifier::visitCallInst(CallInst &CI) {
VerifyCallSite(&CI);
+ if (CI.isMustTailCall())
+ verifyMustTailCall(CI);
+
if (Function *F = CI.getCalledFunction())
if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
visitIntrinsicFunctionCall(ID, CI);
@@ -1764,6 +1893,8 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy);
+ Assert1(LI.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &LI);
if (LI.isAtomic()) {
Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
"Load cannot have Release ordering", &LI);
@@ -1771,11 +1902,11 @@ void Verifier::visitLoadInst(LoadInst &LI) {
"Atomic load must specify explicit alignment", &LI);
if (!ElTy->isPointerTy()) {
Assert2(ElTy->isIntegerTy(),
- "atomic store operand must have integer type!",
+ "atomic load operand must have integer type!",
&LI, ElTy);
unsigned Size = ElTy->getPrimitiveSizeInBits();
Assert2(Size >= 8 && !(Size & (Size - 1)),
- "atomic store operand must be power-of-two byte-sized integer",
+ "atomic load operand must be power-of-two byte-sized integer",
&LI, ElTy);
}
} else {
@@ -1839,6 +1970,8 @@ void Verifier::visitStoreInst(StoreInst &SI) {
Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!",
&SI, ElTy);
+ Assert1(SI.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &SI);
if (SI.isAtomic()) {
Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
"Store cannot have Acquire ordering", &SI);
@@ -1861,22 +1994,39 @@ void Verifier::visitStoreInst(StoreInst &SI) {
}
void Verifier::visitAllocaInst(AllocaInst &AI) {
+ SmallPtrSet<const Type*, 4> Visited;
PointerType *PTy = AI.getType();
Assert1(PTy->getAddressSpace() == 0,
"Allocation instruction pointer not in the generic address space!",
&AI);
- Assert1(PTy->getElementType()->isSized(), "Cannot allocate unsized type",
+ Assert1(PTy->getElementType()->isSized(&Visited), "Cannot allocate unsized type",
&AI);
Assert1(AI.getArraySize()->getType()->isIntegerTy(),
"Alloca array size must have integer type", &AI);
+ Assert1(AI.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &AI);
+
visitInstruction(AI);
}
void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
- Assert1(CXI.getOrdering() != NotAtomic,
+
+ // FIXME: more conditions???
+ Assert1(CXI.getSuccessOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert1(CXI.getFailureOrdering() != NotAtomic,
"cmpxchg instructions must be atomic.", &CXI);
- Assert1(CXI.getOrdering() != Unordered,
+ Assert1(CXI.getSuccessOrdering() != Unordered,
"cmpxchg instructions cannot be unordered.", &CXI);
+ Assert1(CXI.getFailureOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ Assert1(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
+ "cmpxchg instructions be at least as constrained on success as fail",
+ &CXI);
+ Assert1(CXI.getFailureOrdering() != Release &&
+ CXI.getFailureOrdering() != AcquireRelease,
+ "cmpxchg failure ordering cannot include release semantics", &CXI);
+
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI);
Type *ElTy = PTy->getElementType();
@@ -1981,8 +2131,7 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
Assert1(isa<Constant>(PersonalityFn), "Personality function is not constant!",
&LPI);
for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
- Value *Clause = LPI.getClause(i);
- Assert1(isa<Constant>(Clause), "Clause is not constant!", &LPI);
+ Constant *Clause = LPI.getClause(i);
if (LPI.isCatch(i)) {
Assert1(isa<PointerType>(Clause->getType()),
"Catch operand does not have pointer type!", &LPI);
@@ -2007,7 +2156,7 @@ void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
}
const Use &U = I.getOperandUse(i);
- Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, U),
+ Assert2(InstsInThisBlock.count(Op) || DT.dominates(Op, U),
"Instruction does not dominate all uses!", Op, &I);
}
@@ -2018,10 +2167,10 @@ void Verifier::visitInstruction(Instruction &I) {
Assert1(BB, "Instruction not embedded in basic block!", &I);
if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
- UI != UE; ++UI)
- Assert1(*UI != (User*)&I || !DT->isReachableFromEntry(BB),
+ for (User *U : I.users()) {
+ Assert1(U != (User*)&I || !DT.isReachableFromEntry(BB),
"Only PHI nodes may reference their own value!", &I);
+ }
}
// Check that void typed values don't have names
@@ -2043,19 +2192,18 @@ void Verifier::visitInstruction(Instruction &I) {
// Check that all uses of the instruction, if they are instructions
// themselves, actually have parent basic blocks. If the use is not an
// instruction, it is an error!
- for (User::use_iterator UI = I.use_begin(), UE = I.use_end();
- UI != UE; ++UI) {
- if (Instruction *Used = dyn_cast<Instruction>(*UI))
- Assert2(Used->getParent() != 0, "Instruction referencing instruction not"
- " embedded in a basic block!", &I, Used);
+ for (Use &U : I.uses()) {
+ if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
+ Assert2(Used->getParent() != nullptr, "Instruction referencing"
+ " instruction not embedded in a basic block!", &I, Used);
else {
- CheckFailed("Use of instruction is not an instruction!", *UI);
+ CheckFailed("Use of instruction is not an instruction!", U);
return;
}
}
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
- Assert1(I.getOperand(i) != 0, "Instruction has null operand!", &I);
+ Assert1(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
// Check to make sure that only first-class-values are operands to
// instructions.
@@ -2071,7 +2219,7 @@ void Verifier::visitInstruction(Instruction &I) {
Assert1(!F->isIntrinsic() || isa<CallInst>(I) ||
F->getIntrinsicID() == Intrinsic::donothing,
"Cannot invoke an intrinsinc other than donothing", &I);
- Assert1(F->getParent() == Mod, "Referencing function in another module!",
+ Assert1(F->getParent() == M, "Referencing function in another module!",
&I);
} else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
Assert1(OpBB->getParent() == BB->getParent(),
@@ -2080,7 +2228,7 @@ void Verifier::visitInstruction(Instruction &I) {
Assert1(OpArg->getParent() == BB->getParent(),
"Referring to an argument in another function!", &I);
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
- Assert1(GV->getParent() == Mod, "Referencing global in another module!",
+ Assert1(GV->getParent() == M, "Referencing global in another module!",
&I);
} else if (isa<Instruction>(I.getOperand(i))) {
verifyDominatesUse(I, i);
@@ -2127,12 +2275,8 @@ void Verifier::visitInstruction(Instruction &I) {
}
MDNode *MD = I.getMetadata(LLVMContext::MD_range);
- Assert1(!MD || isa<LoadInst>(I), "Ranges are only for loads!", &I);
-
- if (!DisableDebugInfoVerifier) {
- MD = I.getMetadata(LLVMContext::MD_dbg);
- Finder.processLocation(*Mod, DILocation(MD));
- }
+ Assert1(!MD || isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
+ "Ranges are only for loads, calls and invokes!", &I);
InstsInThisBlock.insert(&I);
}
@@ -2163,18 +2307,18 @@ bool Verifier::VerifyIntrinsicType(Type *Ty,
case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width);
case IITDescriptor::Vector: {
VectorType *VT = dyn_cast<VectorType>(Ty);
- return VT == 0 || VT->getNumElements() != D.Vector_Width ||
+ return !VT || VT->getNumElements() != D.Vector_Width ||
VerifyIntrinsicType(VT->getElementType(), Infos, ArgTys);
}
case IITDescriptor::Pointer: {
PointerType *PT = dyn_cast<PointerType>(Ty);
- return PT == 0 || PT->getAddressSpace() != D.Pointer_AddressSpace ||
+ return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace ||
VerifyIntrinsicType(PT->getElementType(), Infos, ArgTys);
}
case IITDescriptor::Struct: {
StructType *ST = dyn_cast<StructType>(Ty);
- if (ST == 0 || ST->getNumElements() != D.Struct_NumElements)
+ if (!ST || ST->getNumElements() != D.Struct_NumElements)
return true;
for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
@@ -2202,18 +2346,41 @@ bool Verifier::VerifyIntrinsicType(Type *Ty,
}
llvm_unreachable("all argument kinds not covered");
- case IITDescriptor::ExtendVecArgument:
+ case IITDescriptor::ExtendArgument: {
// This may only be used when referring to a previous vector argument.
- return D.getArgumentNumber() >= ArgTys.size() ||
- !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
- VectorType::getExtendedElementVectorType(
- cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
- case IITDescriptor::TruncVecArgument:
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
+ NewTy = VectorType::getExtendedElementVectorType(VTy);
+ else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
+ NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth());
+ else
+ return true;
+
+ return Ty != NewTy;
+ }
+ case IITDescriptor::TruncArgument: {
+ // This may only be used when referring to a previous vector argument.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
+ NewTy = VectorType::getTruncatedElementVectorType(VTy);
+ else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
+ NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2);
+ else
+ return true;
+
+ return Ty != NewTy;
+ }
+ case IITDescriptor::HalfVecArgument:
// This may only be used when referring to a previous vector argument.
return D.getArgumentNumber() >= ArgTys.size() ||
!isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
- VectorType::getTruncatedElementVectorType(
+ VectorType::getHalfElementsVectorType(
cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
}
llvm_unreachable("unhandled");
@@ -2284,8 +2451,10 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
// know they are legal for the intrinsic!) get the intrinsic name through the
// usual means. This allows us to verify the mangling of argument types into
// the name.
- Assert1(Intrinsic::getName(ID, ArgTys) == IF->getName(),
- "Intrinsic name not mangled correctly for type arguments!", IF);
+ const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
+ Assert1(ExpectedName == IF->getName(),
+ "Intrinsic name not mangled correctly for type arguments! "
+ "Should be: " + ExpectedName, IF);
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
@@ -2308,17 +2477,7 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
MDNode *MD = cast<MDNode>(CI.getArgOperand(0));
Assert1(MD->getNumOperands() == 1,
"invalid llvm.dbg.declare intrinsic call 2", &CI);
- if (!DisableDebugInfoVerifier)
- Finder.processDeclare(*Mod, cast<DbgDeclareInst>(&CI));
} break;
- case Intrinsic::dbg_value: { //llvm.dbg.value
- if (!DisableDebugInfoVerifier) {
- Assert1(CI.getArgOperand(0) && isa<MDNode>(CI.getArgOperand(0)),
- "invalid llvm.dbg.value intrinsic call 1", &CI);
- Finder.processValue(*Mod, cast<DbgValueInst>(&CI));
- }
- break;
- }
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
@@ -2380,63 +2539,176 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
}
}
-void Verifier::verifyDebugInfo() {
+void DebugInfoVerifier::verifyDebugInfo() {
+ if (!VerifyDebugInfo)
+ return;
+
+ DebugInfoFinder Finder;
+ Finder.processModule(*M);
+ processInstructions(Finder);
+
// Verify Debug Info.
- if (!DisableDebugInfoVerifier) {
- for (DebugInfoFinder::iterator I = Finder.compile_unit_begin(),
- E = Finder.compile_unit_end(); I != E; ++I)
- Assert1(DICompileUnit(*I).Verify(), "DICompileUnit does not Verify!", *I);
- for (DebugInfoFinder::iterator I = Finder.subprogram_begin(),
- E = Finder.subprogram_end(); I != E; ++I)
- Assert1(DISubprogram(*I).Verify(), "DISubprogram does not Verify!", *I);
- for (DebugInfoFinder::iterator I = Finder.global_variable_begin(),
- E = Finder.global_variable_end(); I != E; ++I)
- Assert1(DIGlobalVariable(*I).Verify(),
- "DIGlobalVariable does not Verify!", *I);
- for (DebugInfoFinder::iterator I = Finder.type_begin(),
- E = Finder.type_end(); I != E; ++I)
- Assert1(DIType(*I).Verify(), "DIType does not Verify!", *I);
- for (DebugInfoFinder::iterator I = Finder.scope_begin(),
- E = Finder.scope_end(); I != E; ++I)
- Assert1(DIScope(*I).Verify(), "DIScope does not Verify!", *I);
+ //
+ // NOTE: The loud braces are necessary for MSVC compatibility.
+ for (DICompileUnit CU : Finder.compile_units()) {
+ Assert1(CU.Verify(), "DICompileUnit does not Verify!", CU);
}
+ for (DISubprogram S : Finder.subprograms()) {
+ Assert1(S.Verify(), "DISubprogram does not Verify!", S);
+ }
+ for (DIGlobalVariable GV : Finder.global_variables()) {
+ Assert1(GV.Verify(), "DIGlobalVariable does not Verify!", GV);
+ }
+ for (DIType T : Finder.types()) {
+ Assert1(T.Verify(), "DIType does not Verify!", T);
+ }
+ for (DIScope S : Finder.scopes()) {
+ Assert1(S.Verify(), "DIScope does not Verify!", S);
+ }
+}
+
+void DebugInfoVerifier::processInstructions(DebugInfoFinder &Finder) {
+ for (const Function &F : *M)
+ for (auto I = inst_begin(&F), E = inst_end(&F); I != E; ++I) {
+ if (MDNode *MD = I->getMetadata(LLVMContext::MD_dbg))
+ Finder.processLocation(*M, DILocation(MD));
+ if (const CallInst *CI = dyn_cast<CallInst>(&*I))
+ processCallInst(Finder, *CI);
+ }
+}
+
+void DebugInfoVerifier::processCallInst(DebugInfoFinder &Finder,
+ const CallInst &CI) {
+ if (Function *F = CI.getCalledFunction())
+ if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
+ switch (ID) {
+ case Intrinsic::dbg_declare:
+ Finder.processDeclare(*M, cast<DbgDeclareInst>(&CI));
+ break;
+ case Intrinsic::dbg_value:
+ Finder.processValue(*M, cast<DbgValueInst>(&CI));
+ break;
+ default:
+ break;
+ }
}
//===----------------------------------------------------------------------===//
// Implement the public interfaces to this file...
//===----------------------------------------------------------------------===//
-FunctionPass *llvm::createVerifierPass(VerifierFailureAction action) {
- return new Verifier(action);
+bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
+ Function &F = const_cast<Function &>(f);
+ assert(!F.isDeclaration() && "Cannot verify external functions");
+
+ raw_null_ostream NullStr;
+ Verifier V(OS ? *OS : NullStr);
+
+ // Note that this function's return value is inverted from what you would
+ // expect of a function called "verify".
+ return !V.verify(F);
}
+bool llvm::verifyModule(const Module &M, raw_ostream *OS) {
+ raw_null_ostream NullStr;
+ Verifier V(OS ? *OS : NullStr);
-/// verifyFunction - Check a function for errors, printing messages on stderr.
-/// Return true if the function is corrupt.
-///
-bool llvm::verifyFunction(const Function &f, VerifierFailureAction action) {
- Function &F = const_cast<Function&>(f);
- assert(!F.isDeclaration() && "Cannot verify external functions");
+ bool Broken = false;
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
+ if (!I->isDeclaration())
+ Broken |= !V.verify(*I);
- FunctionPassManager FPM(F.getParent());
- Verifier *V = new Verifier(action);
- FPM.add(V);
- FPM.doInitialization();
- FPM.run(F);
- return V->Broken;
+ // Note that this function's return value is inverted from what you would
+ // expect of a function called "verify".
+ DebugInfoVerifier DIV(OS ? *OS : NullStr);
+ return !V.verify(M) || !DIV.verify(M) || Broken;
}
-/// verifyModule - Check a module for errors, printing messages on stderr.
-/// Return true if the module is corrupt.
-///
-bool llvm::verifyModule(const Module &M, VerifierFailureAction action,
- std::string *ErrorInfo) {
- PassManager PM;
- Verifier *V = new Verifier(action);
- PM.add(V);
- PM.run(const_cast<Module&>(M));
-
- if (ErrorInfo && V->Broken)
- *ErrorInfo = V->MessagesStr.str();
- return V->Broken;
+namespace {
+struct VerifierLegacyPass : public FunctionPass {
+ static char ID;
+
+ Verifier V;
+ bool FatalErrors;
+
+ VerifierLegacyPass() : FunctionPass(ID), FatalErrors(true) {
+ initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+ explicit VerifierLegacyPass(bool FatalErrors)
+ : FunctionPass(ID), V(dbgs()), FatalErrors(FatalErrors) {
+ initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (!V.verify(F) && FatalErrors)
+ report_fatal_error("Broken function found, compilation aborted!");
+
+ return false;
+ }
+
+ bool doFinalization(Module &M) override {
+ if (!V.verify(M) && FatalErrors)
+ report_fatal_error("Broken module found, compilation aborted!");
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+struct DebugInfoVerifierLegacyPass : public ModulePass {
+ static char ID;
+
+ DebugInfoVerifier V;
+ bool FatalErrors;
+
+ DebugInfoVerifierLegacyPass() : ModulePass(ID), FatalErrors(true) {
+ initializeDebugInfoVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+ explicit DebugInfoVerifierLegacyPass(bool FatalErrors)
+ : ModulePass(ID), V(dbgs()), FatalErrors(FatalErrors) {
+ initializeDebugInfoVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnModule(Module &M) override {
+ if (!V.verify(M) && FatalErrors)
+ report_fatal_error("Broken debug info found, compilation aborted!");
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+}
+
+char VerifierLegacyPass::ID = 0;
+INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
+
+char DebugInfoVerifierLegacyPass::ID = 0;
+INITIALIZE_PASS(DebugInfoVerifierLegacyPass, "verify-di", "Debug Info Verifier",
+ false, false)
+
+FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
+ return new VerifierLegacyPass(FatalErrors);
+}
+
+ModulePass *llvm::createDebugInfoVerifierPass(bool FatalErrors) {
+ return new DebugInfoVerifierLegacyPass(FatalErrors);
+}
+
+PreservedAnalyses VerifierPass::run(Module *M) {
+ if (verifyModule(*M, &dbgs()) && FatalErrors)
+ report_fatal_error("Broken module found, compilation aborted!");
+
+ return PreservedAnalyses::all();
+}
+
+PreservedAnalyses VerifierPass::run(Function *F) {
+ if (verifyFunction(*F, &dbgs()) && FatalErrors)
+ report_fatal_error("Broken function found, compilation aborted!");
+
+ return PreservedAnalyses::all();
}
diff --git a/contrib/llvm/lib/IR/module.modulemap b/contrib/llvm/lib/IR/module.modulemap
new file mode 100644
index 0000000..9698e91
--- /dev/null
+++ b/contrib/llvm/lib/IR/module.modulemap
@@ -0,0 +1 @@
+module IR { requires cplusplus umbrella "." module * { export * } }
diff --git a/contrib/llvm/lib/IRReader/IRReader.cpp b/contrib/llvm/lib/IRReader/IRReader.cpp
index 935e81d..f8d2f5a 100644
--- a/contrib/llvm/lib/IRReader/IRReader.cpp
+++ b/contrib/llvm/lib/IRReader/IRReader.cpp
@@ -8,18 +8,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/IRReader/IRReader.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Assembly/Parser.h"
+#include "llvm-c/Core.h"
+#include "llvm-c/IRReader.h"
+#include "llvm/AsmParser/Parser.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/system_error.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm-c/Core.h"
-#include "llvm-c/IRReader.h"
+#include <system_error>
using namespace llvm;
@@ -30,36 +29,37 @@ namespace llvm {
static const char *const TimeIRParsingGroupName = "LLVM IR Parsing";
static const char *const TimeIRParsingName = "Parse IR";
-
-Module *llvm::getLazyIRModule(MemoryBuffer *Buffer, SMDiagnostic &Err,
- LLVMContext &Context) {
+static Module *getLazyIRModule(MemoryBuffer *Buffer, SMDiagnostic &Err,
+ LLVMContext &Context) {
if (isBitcode((const unsigned char *)Buffer->getBufferStart(),
(const unsigned char *)Buffer->getBufferEnd())) {
std::string ErrMsg;
- Module *M = getLazyBitcodeModule(Buffer, Context, &ErrMsg);
- if (M == 0) {
+ ErrorOr<Module *> ModuleOrErr = getLazyBitcodeModule(Buffer, Context);
+ if (std::error_code EC = ModuleOrErr.getError()) {
Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error,
- ErrMsg);
- // ParseBitcodeFile does not take ownership of the Buffer in the
+ EC.message());
+ // getLazyBitcodeModule does not take ownership of the Buffer in the
// case of an error.
delete Buffer;
+ return nullptr;
}
- return M;
+ return ModuleOrErr.get();
}
- return ParseAssembly(Buffer, 0, Err, Context);
+ return ParseAssembly(Buffer, nullptr, Err, Context);
}
Module *llvm::getLazyIRFileModule(const std::string &Filename, SMDiagnostic &Err,
LLVMContext &Context) {
- OwningPtr<MemoryBuffer> File;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, File)) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(Filename);
+ if (std::error_code EC = FileOrErr.getError()) {
Err = SMDiagnostic(Filename, SourceMgr::DK_Error,
- "Could not open input file: " + ec.message());
- return 0;
+ "Could not open input file: " + EC.message());
+ return nullptr;
}
- return getLazyIRModule(File.take(), Err, Context);
+ return getLazyIRModule(FileOrErr.get().release(), Err, Context);
}
Module *llvm::ParseIR(MemoryBuffer *Buffer, SMDiagnostic &Err,
@@ -68,29 +68,33 @@ Module *llvm::ParseIR(MemoryBuffer *Buffer, SMDiagnostic &Err,
TimePassesIsEnabled);
if (isBitcode((const unsigned char *)Buffer->getBufferStart(),
(const unsigned char *)Buffer->getBufferEnd())) {
- std::string ErrMsg;
- Module *M = ParseBitcodeFile(Buffer, Context, &ErrMsg);
- if (M == 0)
+ ErrorOr<Module *> ModuleOrErr = parseBitcodeFile(Buffer, Context);
+ Module *M = nullptr;
+ if (std::error_code EC = ModuleOrErr.getError())
Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error,
- ErrMsg);
- // ParseBitcodeFile does not take ownership of the Buffer.
- delete Buffer;
+ EC.message());
+ else
+ M = ModuleOrErr.get();
+ // parseBitcodeFile does not take ownership of the Buffer.
return M;
}
- return ParseAssembly(Buffer, 0, Err, Context);
+ return ParseAssembly(MemoryBuffer::getMemBuffer(
+ Buffer->getBuffer(), Buffer->getBufferIdentifier()),
+ nullptr, Err, Context);
}
Module *llvm::ParseIRFile(const std::string &Filename, SMDiagnostic &Err,
LLVMContext &Context) {
- OwningPtr<MemoryBuffer> File;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, File)) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(Filename);
+ if (std::error_code EC = FileOrErr.getError()) {
Err = SMDiagnostic(Filename, SourceMgr::DK_Error,
- "Could not open input file: " + ec.message());
- return 0;
+ "Could not open input file: " + EC.message());
+ return nullptr;
}
- return ParseIR(File.take(), Err, Context);
+ return ParseIR(FileOrErr.get().get(), Err, Context);
}
//===----------------------------------------------------------------------===//
@@ -102,14 +106,15 @@ LLVMBool LLVMParseIRInContext(LLVMContextRef ContextRef,
char **OutMessage) {
SMDiagnostic Diag;
- *OutM = wrap(ParseIR(unwrap(MemBuf), Diag, *unwrap(ContextRef)));
+ std::unique_ptr<MemoryBuffer> MB(unwrap(MemBuf));
+ *OutM = wrap(ParseIR(MB.get(), Diag, *unwrap(ContextRef)));
if(!*OutM) {
if (OutMessage) {
std::string buf;
raw_string_ostream os(buf);
- Diag.print(NULL, os, false);
+ Diag.print(nullptr, os, false);
os.flush();
*OutMessage = strdup(buf.c_str());
diff --git a/contrib/llvm/lib/LTO/LTOCodeGenerator.cpp b/contrib/llvm/lib/LTO/LTOCodeGenerator.cpp
index 2b3648e..45a49e4 100644
--- a/contrib/llvm/lib/LTO/LTOCodeGenerator.cpp
+++ b/contrib/llvm/lib/LTO/LTOCodeGenerator.cpp
@@ -13,20 +13,23 @@
//===----------------------------------------------------------------------===//
#include "llvm/LTO/LTOCodeGenerator.h"
-#include "llvm/LTO/LTOModule.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/Config/config.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
-#include "llvm/Linker.h"
+#include "llvm/LTO/LTOModule.h"
+#include "llvm/Linker/Linker.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/SubtargetFeature.h"
@@ -40,15 +43,15 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/ToolOutputFile.h"
-#include "llvm/Support/system_error.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/ObjCARC.h"
+#include <system_error>
using namespace llvm;
const char* LTOCodeGenerator::getVersionString() {
@@ -60,19 +63,20 @@ const char* LTOCodeGenerator::getVersionString() {
}
LTOCodeGenerator::LTOCodeGenerator()
- : Context(getGlobalContext()), Linker(new Module("ld-temp.o", Context)),
- TargetMach(NULL), EmitDwarfDebugInfo(false), ScopeRestrictionsDone(false),
- CodeModel(LTO_CODEGEN_PIC_MODEL_DYNAMIC), NativeObjectFile(NULL) {
+ : Context(getGlobalContext()), IRLinker(new Module("ld-temp.o", Context)),
+ TargetMach(nullptr), EmitDwarfDebugInfo(false),
+ ScopeRestrictionsDone(false), CodeModel(LTO_CODEGEN_PIC_MODEL_DEFAULT),
+ NativeObjectFile(nullptr), DiagHandler(nullptr), DiagContext(nullptr) {
initializeLTOPasses();
}
LTOCodeGenerator::~LTOCodeGenerator() {
delete TargetMach;
delete NativeObjectFile;
- TargetMach = NULL;
- NativeObjectFile = NULL;
+ TargetMach = nullptr;
+ NativeObjectFile = nullptr;
- Linker.deleteModule();
+ IRLinker.deleteModule();
for (std::vector<char *>::iterator I = CodegenOptions.begin(),
E = CodegenOptions.end();
@@ -82,8 +86,7 @@ LTOCodeGenerator::~LTOCodeGenerator() {
// Initialize LTO passes. Please keep this funciton in sync with
// PassManagerBuilder::populateLTOPassManager(), and make sure all LTO
-// passes are initialized.
-//
+// passes are initialized.
void LTOCodeGenerator::initializeLTOPasses() {
PassRegistry &R = *PassRegistry::getPassRegistry();
@@ -104,6 +107,7 @@ void LTOCodeGenerator::initializeLTOPasses() {
initializeFunctionAttrsPass(R);
initializeGlobalsModRefPass(R);
initializeLICMPass(R);
+ initializeMergedLoadStoreMotionPass(R);
initializeGVNPass(R);
initializeMemCpyOptPass(R);
initializeDCEPass(R);
@@ -111,7 +115,7 @@ void LTOCodeGenerator::initializeLTOPasses() {
}
bool LTOCodeGenerator::addModule(LTOModule* mod, std::string& errMsg) {
- bool ret = Linker.linkInModule(mod->getLLVVMModule(), &errMsg);
+ bool ret = IRLinker.linkInModule(&mod->getModule(), &errMsg);
const std::vector<const char*> &undefs = mod->getAsmUndefinedRefs();
for (int i = 0, e = undefs.size(); i != e; ++i)
@@ -121,24 +125,7 @@ bool LTOCodeGenerator::addModule(LTOModule* mod, std::string& errMsg) {
}
void LTOCodeGenerator::setTargetOptions(TargetOptions options) {
- Options.LessPreciseFPMADOption = options.LessPreciseFPMADOption;
- Options.NoFramePointerElim = options.NoFramePointerElim;
- Options.AllowFPOpFusion = options.AllowFPOpFusion;
- Options.UnsafeFPMath = options.UnsafeFPMath;
- Options.NoInfsFPMath = options.NoInfsFPMath;
- Options.NoNaNsFPMath = options.NoNaNsFPMath;
- Options.HonorSignDependentRoundingFPMathOption =
- options.HonorSignDependentRoundingFPMathOption;
- Options.UseSoftFloat = options.UseSoftFloat;
- Options.FloatABIType = options.FloatABIType;
- Options.NoZerosInBSS = options.NoZerosInBSS;
- Options.GuaranteedTailCallOpt = options.GuaranteedTailCallOpt;
- Options.DisableTailCalls = options.DisableTailCalls;
- Options.StackAlignmentOverride = options.StackAlignmentOverride;
- Options.TrapFuncName = options.TrapFuncName;
- Options.PositionIndependentExecutable = options.PositionIndependentExecutable;
- Options.EnableSegmentedStacks = options.EnableSegmentedStacks;
- Options.UseInitArray = options.UseInitArray;
+ Options = options;
}
void LTOCodeGenerator::setDebugInfo(lto_debug_model debug) {
@@ -159,6 +146,7 @@ void LTOCodeGenerator::setCodePICModel(lto_codegen_model model) {
case LTO_CODEGEN_PIC_MODEL_STATIC:
case LTO_CODEGEN_PIC_MODEL_DYNAMIC:
case LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC:
+ case LTO_CODEGEN_PIC_MODEL_DEFAULT:
CodeModel = model;
return;
}
@@ -175,7 +163,7 @@ bool LTOCodeGenerator::writeMergedModules(const char *path,
// create output file
std::string ErrInfo;
- tool_output_file Out(path, ErrInfo, sys::fs::F_Binary);
+ tool_output_file Out(path, ErrInfo, sys::fs::F_None);
if (!ErrInfo.empty()) {
errMsg = "could not open bitcode file for writing: ";
errMsg += path;
@@ -183,7 +171,7 @@ bool LTOCodeGenerator::writeMergedModules(const char *path,
}
// write bitcode to it
- WriteBitcodeToFile(Linker.getModule(), Out.os());
+ WriteBitcodeToFile(IRLinker.getModule(), Out.os());
Out.os().close();
if (Out.os().has_error()) {
@@ -197,7 +185,7 @@ bool LTOCodeGenerator::writeMergedModules(const char *path,
return true;
}
-bool LTOCodeGenerator::compile_to_file(const char** name,
+bool LTOCodeGenerator::compile_to_file(const char** name,
bool disableOpt,
bool disableInline,
bool disableGVNLoadPRE,
@@ -205,7 +193,8 @@ bool LTOCodeGenerator::compile_to_file(const char** name,
// make unique temp .o file to put generated object file
SmallString<128> Filename;
int FD;
- error_code EC = sys::fs::createTemporaryFile("lto-llvm", "o", FD, Filename);
+ std::error_code EC =
+ sys::fs::createTemporaryFile("lto-llvm", "o", FD, Filename);
if (EC) {
errMsg = EC.message();
return false;
@@ -242,42 +231,43 @@ const void* LTOCodeGenerator::compile(size_t* length,
const char *name;
if (!compile_to_file(&name, disableOpt, disableInline, disableGVNLoadPRE,
errMsg))
- return NULL;
+ return nullptr;
// remove old buffer if compile() called twice
delete NativeObjectFile;
// read .o file into memory buffer
- OwningPtr<MemoryBuffer> BuffPtr;
- if (error_code ec = MemoryBuffer::getFile(name, BuffPtr, -1, false)) {
- errMsg = ec.message();
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(name, -1, false);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ errMsg = EC.message();
sys::fs::remove(NativeObjectPath);
- return NULL;
+ return nullptr;
}
- NativeObjectFile = BuffPtr.take();
+ NativeObjectFile = BufferOrErr.get().release();
// remove temp files
sys::fs::remove(NativeObjectPath);
// return buffer, unless error
- if (NativeObjectFile == NULL)
- return NULL;
+ if (!NativeObjectFile)
+ return nullptr;
*length = NativeObjectFile->getBufferSize();
return NativeObjectFile->getBufferStart();
}
bool LTOCodeGenerator::determineTarget(std::string &errMsg) {
- if (TargetMach != NULL)
+ if (TargetMach)
return true;
- std::string TripleStr = Linker.getModule()->getTargetTriple();
+ std::string TripleStr = IRLinker.getModule()->getTargetTriple();
if (TripleStr.empty())
TripleStr = sys::getDefaultTargetTriple();
llvm::Triple Triple(TripleStr);
// create target machine from info for merged modules
const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg);
- if (march == NULL)
+ if (!march)
return false;
// The relocation model is actually a static member of TargetMachine and
@@ -293,10 +283,14 @@ bool LTOCodeGenerator::determineTarget(std::string &errMsg) {
case LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC:
RelocModel = Reloc::DynamicNoPIC;
break;
+ case LTO_CODEGEN_PIC_MODEL_DEFAULT:
+ // RelocModel is already the default, so leave it that way.
+ break;
}
- // construct LTOModule, hand over ownership of module and target
- SubtargetFeatures Features;
+ // Construct LTOModule, hand over ownership of module and target. Use MAttr as
+ // the default set of features.
+ SubtargetFeatures Features(MAttr);
Features.getDefaultSubtargetFeatures(Triple);
std::string FeatureStr = Features.getString();
// Set a default CPU for Darwin triples.
@@ -305,6 +299,9 @@ bool LTOCodeGenerator::determineTarget(std::string &errMsg) {
MCpu = "core2";
else if (Triple.getArch() == llvm::Triple::x86)
MCpu = "yonah";
+ else if (Triple.getArch() == llvm::Triple::arm64 ||
+ Triple.getArch() == llvm::Triple::aarch64)
+ MCpu = "cyclone";
}
TargetMach = march->createTargetMachine(TripleStr, MCpu, FeatureStr, Options,
@@ -319,11 +316,17 @@ applyRestriction(GlobalValue &GV,
std::vector<const char*> &MustPreserveList,
SmallPtrSet<GlobalValue*, 8> &AsmUsed,
Mangler &Mangler) {
- SmallString<64> Buffer;
- Mangler.getNameWithPrefix(Buffer, &GV, false);
-
+ // There are no restrictions to apply to declarations.
if (GV.isDeclaration())
return;
+
+ // There is nothing more restrictive than private linkage.
+ if (GV.hasPrivateLinkage())
+ return;
+
+ SmallString<64> Buffer;
+ TargetMach->getNameWithPrefix(Buffer, &GV, Mangler);
+
if (MustPreserveSymbols.count(Buffer))
MustPreserveList.push_back(GV.getName().data());
if (AsmUndefinedRefs.count(Buffer))
@@ -341,7 +344,7 @@ applyRestriction(GlobalValue &GV,
static void findUsedValues(GlobalVariable *LLVMUsed,
SmallPtrSet<GlobalValue*, 8> &UsedValues) {
- if (LLVMUsed == 0) return;
+ if (!LLVMUsed) return;
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i)
@@ -380,14 +383,15 @@ static void accumulateAndSortLibcalls(std::vector<StringRef> &Libcalls,
void LTOCodeGenerator::applyScopeRestrictions() {
if (ScopeRestrictionsDone)
return;
- Module *mergedModule = Linker.getModule();
+ Module *mergedModule = IRLinker.getModule();
// Start off with a verification pass.
PassManager passes;
passes.add(createVerifierPass());
+ passes.add(createDebugInfoVerifierPass());
// mark which symbols can not be internalized
- Mangler Mangler(TargetMach);
+ Mangler Mangler(TargetMach->getDataLayout());
std::vector<const char*> MustPreserveList;
SmallPtrSet<GlobalValue*, 8> AsmUsed;
std::vector<StringRef> Libcalls;
@@ -413,9 +417,7 @@ void LTOCodeGenerator::applyScopeRestrictions() {
if (!AsmUsed.empty()) {
llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(Context);
std::vector<Constant*> asmUsed2;
- for (SmallPtrSet<GlobalValue*, 16>::const_iterator i = AsmUsed.begin(),
- e = AsmUsed.end(); i !=e; ++i) {
- GlobalValue *GV = *i;
+ for (auto *GV : AsmUsed) {
Constant *c = ConstantExpr::getBitCast(GV, i8PTy);
asmUsed2.push_back(c);
}
@@ -447,7 +449,7 @@ bool LTOCodeGenerator::generateObjectFile(raw_ostream &out,
if (!this->determineTarget(errMsg))
return false;
- Module *mergedModule = Linker.getModule();
+ Module *mergedModule = IRLinker.getModule();
// Mark which symbols can not be internalized
this->applyScopeRestrictions();
@@ -457,9 +459,15 @@ bool LTOCodeGenerator::generateObjectFile(raw_ostream &out,
// Start off with a verification pass.
passes.add(createVerifierPass());
+ passes.add(createDebugInfoVerifierPass());
// Add an appropriate DataLayout instance for this module...
- passes.add(new DataLayout(*TargetMach->getDataLayout()));
+ mergedModule->setDataLayout(TargetMach->getDataLayout());
+ passes.add(new DataLayoutPass(mergedModule));
+
+ // Add appropriate TargetLibraryInfo for this module.
+ passes.add(new TargetLibraryInfo(Triple(TargetMach->getTargetTriple())));
+
TargetMach->addAnalysisPasses(passes);
// Enabling internalize here would use its AllButMain variant. It
@@ -473,11 +481,11 @@ bool LTOCodeGenerator::generateObjectFile(raw_ostream &out,
// Make sure everything is still good.
passes.add(createVerifierPass());
+ passes.add(createDebugInfoVerifierPass());
PassManager codeGenPasses;
- codeGenPasses.add(new DataLayout(*TargetMach->getDataLayout()));
- TargetMach->addAnalysisPasses(codeGenPasses);
+ codeGenPasses.add(new DataLayoutPass(mergedModule));
formatted_raw_ostream Out(out);
@@ -519,3 +527,50 @@ void LTOCodeGenerator::parseCodeGenDebugOptions() {
cl::ParseCommandLineOptions(CodegenOptions.size(),
const_cast<char **>(&CodegenOptions[0]));
}
+
+void LTOCodeGenerator::DiagnosticHandler(const DiagnosticInfo &DI,
+ void *Context) {
+ ((LTOCodeGenerator *)Context)->DiagnosticHandler2(DI);
+}
+
+void LTOCodeGenerator::DiagnosticHandler2(const DiagnosticInfo &DI) {
+ // Map the LLVM internal diagnostic severity to the LTO diagnostic severity.
+ lto_codegen_diagnostic_severity_t Severity;
+ switch (DI.getSeverity()) {
+ case DS_Error:
+ Severity = LTO_DS_ERROR;
+ break;
+ case DS_Warning:
+ Severity = LTO_DS_WARNING;
+ break;
+ case DS_Remark:
+ Severity = LTO_DS_REMARK;
+ break;
+ case DS_Note:
+ Severity = LTO_DS_NOTE;
+ break;
+ }
+ // Create the string that will be reported to the external diagnostic handler.
+ std::string MsgStorage;
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+ DI.print(DP);
+ Stream.flush();
+
+ // If this method has been called it means someone has set up an external
+ // diagnostic handler. Assert on that.
+ assert(DiagHandler && "Invalid diagnostic handler");
+ (*DiagHandler)(Severity, MsgStorage.c_str(), DiagContext);
+}
+
+void
+LTOCodeGenerator::setDiagnosticHandler(lto_diagnostic_handler_t DiagHandler,
+ void *Ctxt) {
+ this->DiagHandler = DiagHandler;
+ this->DiagContext = Ctxt;
+ if (!DiagHandler)
+ return Context.setDiagnosticHandler(nullptr, nullptr);
+ // Register the LTOCodeGenerator stub in the LLVMContext to forward the
+ // diagnostic to the external DiagHandler.
+ Context.setDiagnosticHandler(LTOCodeGenerator::DiagnosticHandler, this);
+}
diff --git a/contrib/llvm/lib/LTO/LTOModule.cpp b/contrib/llvm/lib/LTO/LTOModule.cpp
index 65416be..844c0f2 100644
--- a/contrib/llvm/lib/LTO/LTOModule.cpp
+++ b/contrib/llvm/lib/LTO/LTOModule.cpp
@@ -13,38 +13,39 @@
//===----------------------------------------------------------------------===//
#include "llvm/LTO/LTOModule.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
-#include "llvm/Support/system_error.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
+#include <system_error>
using namespace llvm;
-LTOModule::LTOModule(llvm::Module *m, llvm::TargetMachine *t)
- : _module(m), _target(t),
- _context(_target->getMCAsmInfo(), _target->getRegisterInfo(), NULL),
- _mangler(t) {}
+LTOModule::LTOModule(std::unique_ptr<object::IRObjectFile> Obj,
+ llvm::TargetMachine *TM)
+ : IRFile(std::move(Obj)), _target(TM) {}
/// isBitcodeFile - Returns 'true' if the file (or memory contents) is LLVM
/// bitcode.
@@ -60,85 +61,63 @@ bool LTOModule::isBitcodeFile(const char *path) {
return type == sys::fs::file_magic::bitcode;
}
-/// isBitcodeFileForTarget - Returns 'true' if the file (or memory contents) is
-/// LLVM bitcode for the specified triple.
-bool LTOModule::isBitcodeFileForTarget(const void *mem, size_t length,
- const char *triplePrefix) {
- MemoryBuffer *buffer = makeBuffer(mem, length);
- if (!buffer)
- return false;
- return isTargetMatch(buffer, triplePrefix);
-}
-
-bool LTOModule::isBitcodeFileForTarget(const char *path,
- const char *triplePrefix) {
- OwningPtr<MemoryBuffer> buffer;
- if (MemoryBuffer::getFile(path, buffer))
- return false;
- return isTargetMatch(buffer.take(), triplePrefix);
-}
-
-/// isTargetMatch - Returns 'true' if the memory buffer is for the specified
-/// target triple.
-bool LTOModule::isTargetMatch(MemoryBuffer *buffer, const char *triplePrefix) {
+bool LTOModule::isBitcodeForTarget(MemoryBuffer *buffer,
+ StringRef triplePrefix) {
std::string Triple = getBitcodeTargetTriple(buffer, getGlobalContext());
- delete buffer;
- return strncmp(Triple.c_str(), triplePrefix, strlen(triplePrefix)) == 0;
+ return StringRef(Triple).startswith(triplePrefix);
}
-/// makeLTOModule - Create an LTOModule. N.B. These methods take ownership of
-/// the buffer.
-LTOModule *LTOModule::makeLTOModule(const char *path, TargetOptions options,
- std::string &errMsg) {
- OwningPtr<MemoryBuffer> buffer;
- if (error_code ec = MemoryBuffer::getFile(path, buffer)) {
- errMsg = ec.message();
- return NULL;
+LTOModule *LTOModule::createFromFile(const char *path, TargetOptions options,
+ std::string &errMsg) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(path);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ errMsg = EC.message();
+ return nullptr;
}
- return makeLTOModule(buffer.take(), options, errMsg);
+ return makeLTOModule(std::move(BufferOrErr.get()), options, errMsg);
}
-LTOModule *LTOModule::makeLTOModule(int fd, const char *path,
- size_t size, TargetOptions options,
- std::string &errMsg) {
- return makeLTOModule(fd, path, size, 0, options, errMsg);
+LTOModule *LTOModule::createFromOpenFile(int fd, const char *path, size_t size,
+ TargetOptions options,
+ std::string &errMsg) {
+ return createFromOpenFileSlice(fd, path, size, 0, options, errMsg);
}
-LTOModule *LTOModule::makeLTOModule(int fd, const char *path,
- size_t map_size,
- off_t offset,
- TargetOptions options,
- std::string &errMsg) {
- OwningPtr<MemoryBuffer> buffer;
- if (error_code ec =
- MemoryBuffer::getOpenFileSlice(fd, path, buffer, map_size, offset)) {
- errMsg = ec.message();
- return NULL;
+LTOModule *LTOModule::createFromOpenFileSlice(int fd, const char *path,
+ size_t map_size, off_t offset,
+ TargetOptions options,
+ std::string &errMsg) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getOpenFileSlice(fd, path, map_size, offset);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ errMsg = EC.message();
+ return nullptr;
}
- return makeLTOModule(buffer.take(), options, errMsg);
+ return makeLTOModule(std::move(BufferOrErr.get()), options, errMsg);
}
-LTOModule *LTOModule::makeLTOModule(const void *mem, size_t length,
- TargetOptions options,
- std::string &errMsg) {
- OwningPtr<MemoryBuffer> buffer(makeBuffer(mem, length));
+LTOModule *LTOModule::createFromBuffer(const void *mem, size_t length,
+ TargetOptions options,
+ std::string &errMsg, StringRef path) {
+ std::unique_ptr<MemoryBuffer> buffer(makeBuffer(mem, length, path));
if (!buffer)
- return NULL;
- return makeLTOModule(buffer.take(), options, errMsg);
+ return nullptr;
+ return makeLTOModule(std::move(buffer), options, errMsg);
}
-LTOModule *LTOModule::makeLTOModule(MemoryBuffer *buffer,
+LTOModule *LTOModule::makeLTOModule(std::unique_ptr<MemoryBuffer> Buffer,
TargetOptions options,
std::string &errMsg) {
- // parse bitcode buffer
- OwningPtr<Module> m(getLazyBitcodeModule(buffer, getGlobalContext(),
- &errMsg));
- if (!m) {
- delete buffer;
- return NULL;
+ ErrorOr<Module *> MOrErr =
+ getLazyBitcodeModule(Buffer.get(), getGlobalContext());
+ if (std::error_code EC = MOrErr.getError()) {
+ errMsg = EC.message();
+ return nullptr;
}
+ std::unique_ptr<Module> M(MOrErr.get());
- std::string TripleStr = m->getTargetTriple();
+ std::string TripleStr = M->getTargetTriple();
if (TripleStr.empty())
TripleStr = sys::getDefaultTargetTriple();
llvm::Triple Triple(TripleStr);
@@ -146,7 +125,7 @@ LTOModule *LTOModule::makeLTOModule(MemoryBuffer *buffer,
// find machine architecture for this module
const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg);
if (!march)
- return NULL;
+ return nullptr;
// construct LTOModule, hand over ownership of module and target
SubtargetFeatures Features;
@@ -159,25 +138,36 @@ LTOModule *LTOModule::makeLTOModule(MemoryBuffer *buffer,
CPU = "core2";
else if (Triple.getArch() == llvm::Triple::x86)
CPU = "yonah";
+ else if (Triple.getArch() == llvm::Triple::arm64 ||
+ Triple.getArch() == llvm::Triple::aarch64)
+ CPU = "cyclone";
}
TargetMachine *target = march->createTargetMachine(TripleStr, CPU, FeatureStr,
options);
- m->MaterializeAllPermanently();
+ M->materializeAllPermanently(true);
+ M->setDataLayout(target->getDataLayout());
+
+ std::unique_ptr<object::IRObjectFile> IRObj(
+ new object::IRObjectFile(std::move(Buffer), std::move(M)));
+
+ LTOModule *Ret = new LTOModule(std::move(IRObj), target);
- LTOModule *Ret = new LTOModule(m.take(), target);
if (Ret->parseSymbols(errMsg)) {
delete Ret;
- return NULL;
+ return nullptr;
}
+ Ret->parseMetadata();
+
return Ret;
}
-/// makeBuffer - Create a MemoryBuffer from a memory range.
-MemoryBuffer *LTOModule::makeBuffer(const void *mem, size_t length) {
+/// Create a MemoryBuffer from a memory range with an optional name.
+MemoryBuffer *LTOModule::makeBuffer(const void *mem, size_t length,
+ StringRef name) {
const char *startPtr = (const char*)mem;
- return MemoryBuffer::getMemBuffer(StringRef(startPtr, length), "", false);
+ return MemoryBuffer::getMemBuffer(StringRef(startPtr, length), name, false);
}
/// objcClassNameFromExpression - Get string that the data pointer points to.
@@ -280,10 +270,20 @@ void LTOModule::addObjCClassRef(const GlobalVariable *clgv) {
entry.setValue(info);
}
-/// addDefinedDataSymbol - Add a data symbol as defined to the list.
-void LTOModule::addDefinedDataSymbol(const GlobalValue *v) {
+void LTOModule::addDefinedDataSymbol(const object::BasicSymbolRef &Sym) {
+ SmallString<64> Buffer;
+ {
+ raw_svector_ostream OS(Buffer);
+ Sym.printName(OS);
+ }
+
+ const GlobalValue *V = IRFile->getSymbolGV(Sym.getRawDataRefImpl());
+ addDefinedDataSymbol(Buffer.c_str(), V);
+}
+
+void LTOModule::addDefinedDataSymbol(const char *Name, const GlobalValue *v) {
// Add to list of defined symbols.
- addDefinedSymbol(v, false);
+ addDefinedSymbol(Name, v, false);
if (!v->hasSection() /* || !isTargetDarwin */)
return;
@@ -309,34 +309,47 @@ void LTOModule::addDefinedDataSymbol(const GlobalValue *v) {
// from the ObjC data structures generated by the front end.
// special case if this data blob is an ObjC class definition
- if (v->getSection().compare(0, 15, "__OBJC,__class,") == 0) {
+ std::string Section = v->getSection();
+ if (Section.compare(0, 15, "__OBJC,__class,") == 0) {
if (const GlobalVariable *gv = dyn_cast<GlobalVariable>(v)) {
addObjCClass(gv);
}
}
// special case if this data blob is an ObjC category definition
- else if (v->getSection().compare(0, 18, "__OBJC,__category,") == 0) {
+ else if (Section.compare(0, 18, "__OBJC,__category,") == 0) {
if (const GlobalVariable *gv = dyn_cast<GlobalVariable>(v)) {
addObjCCategory(gv);
}
}
// special case if this data blob is the list of referenced classes
- else if (v->getSection().compare(0, 18, "__OBJC,__cls_refs,") == 0) {
+ else if (Section.compare(0, 18, "__OBJC,__cls_refs,") == 0) {
if (const GlobalVariable *gv = dyn_cast<GlobalVariable>(v)) {
addObjCClassRef(gv);
}
}
}
-/// addDefinedFunctionSymbol - Add a function symbol as defined to the list.
-void LTOModule::addDefinedFunctionSymbol(const Function *f) {
+void LTOModule::addDefinedFunctionSymbol(const object::BasicSymbolRef &Sym) {
+ SmallString<64> Buffer;
+ {
+ raw_svector_ostream OS(Buffer);
+ Sym.printName(OS);
+ }
+
+ const Function *F =
+ cast<Function>(IRFile->getSymbolGV(Sym.getRawDataRefImpl()));
+ addDefinedFunctionSymbol(Buffer.c_str(), F);
+}
+
+void LTOModule::addDefinedFunctionSymbol(const char *Name, const Function *F) {
// add to list of defined symbols
- addDefinedSymbol(f, true);
+ addDefinedSymbol(Name, F, true);
}
static bool canBeHidden(const GlobalValue *GV) {
+ // FIXME: this is duplicated with another static function in AsmPrinter.cpp
GlobalValue::LinkageTypes L = GV->getLinkage();
if (L != GlobalValue::LinkOnceODRLinkage)
@@ -345,6 +358,13 @@ static bool canBeHidden(const GlobalValue *GV) {
if (GV->hasUnnamedAddr())
return true;
+ // If it is a non constant variable, it needs to be uniqued across shared
+ // objects.
+ if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
+ if (!Var->isConstant())
+ return false;
+ }
+
GlobalStatus GS;
if (GlobalStatus::analyzeGlobal(GV, GS))
return false;
@@ -352,19 +372,11 @@ static bool canBeHidden(const GlobalValue *GV) {
return !GS.IsCompared;
}
-/// addDefinedSymbol - Add a defined symbol to the list.
-void LTOModule::addDefinedSymbol(const GlobalValue *def, bool isFunction) {
- // ignore all llvm.* symbols
- if (def->getName().startswith("llvm."))
- return;
-
- // string is owned by _defines
- SmallString<64> Buffer;
- _mangler.getNameWithPrefix(Buffer, def, false);
-
+void LTOModule::addDefinedSymbol(const char *Name, const GlobalValue *def,
+ bool isFunction) {
// set alignment part log2() can have rounding errors
uint32_t align = def->getAlignment();
- uint32_t attr = align ? countTrailingZeros(def->getAlignment()) : 0;
+ uint32_t attr = align ? countTrailingZeros(align) : 0;
// set permissions part
if (isFunction) {
@@ -378,8 +390,7 @@ void LTOModule::addDefinedSymbol(const GlobalValue *def, bool isFunction) {
}
// set definition part
- if (def->hasWeakLinkage() || def->hasLinkOnceLinkage() ||
- def->hasLinkerPrivateWeakLinkage())
+ if (def->hasWeakLinkage() || def->hasLinkOnceLinkage())
attr |= LTO_SYMBOL_DEFINITION_WEAK;
else if (def->hasCommonLinkage())
attr |= LTO_SYMBOL_DEFINITION_TENTATIVE;
@@ -387,27 +398,26 @@ void LTOModule::addDefinedSymbol(const GlobalValue *def, bool isFunction) {
attr |= LTO_SYMBOL_DEFINITION_REGULAR;
// set scope part
- if (def->hasHiddenVisibility())
+ if (def->hasLocalLinkage())
+ // Ignore visibility if linkage is local.
+ attr |= LTO_SYMBOL_SCOPE_INTERNAL;
+ else if (def->hasHiddenVisibility())
attr |= LTO_SYMBOL_SCOPE_HIDDEN;
else if (def->hasProtectedVisibility())
attr |= LTO_SYMBOL_SCOPE_PROTECTED;
else if (canBeHidden(def))
attr |= LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN;
- else if (def->hasExternalLinkage() || def->hasWeakLinkage() ||
- def->hasLinkOnceLinkage() || def->hasCommonLinkage() ||
- def->hasLinkerPrivateWeakLinkage())
- attr |= LTO_SYMBOL_SCOPE_DEFAULT;
else
- attr |= LTO_SYMBOL_SCOPE_INTERNAL;
+ attr |= LTO_SYMBOL_SCOPE_DEFAULT;
- StringSet::value_type &entry = _defines.GetOrCreateValue(Buffer);
+ StringSet::value_type &entry = _defines.GetOrCreateValue(Name);
entry.setValue(1);
// fill information structure
NameAndAttributes info;
- StringRef Name = entry.getKey();
- info.name = Name.data();
- assert(info.name[Name.size()] == '\0');
+ StringRef NameRef = entry.getKey();
+ info.name = NameRef.data();
+ assert(info.name[NameRef.size()] == '\0');
info.attributes = attr;
info.isFunction = isFunction;
info.symbol = def;
@@ -430,7 +440,7 @@ void LTOModule::addAsmGlobalSymbol(const char *name,
NameAndAttributes &info = _undefines[entry.getKey().data()];
- if (info.symbol == 0) {
+ if (info.symbol == nullptr) {
// FIXME: This is trying to take care of module ASM like this:
//
// module asm ".zerofill __FOO, __foo, _bar_baz_qux, 0"
@@ -444,7 +454,7 @@ void LTOModule::addAsmGlobalSymbol(const char *name,
info.attributes =
LTO_SYMBOL_PERMISSIONS_DATA | LTO_SYMBOL_DEFINITION_REGULAR | scope;
info.isFunction = false;
- info.symbol = 0;
+ info.symbol = nullptr;
// add to table of symbols
_symbols.push_back(info);
@@ -452,9 +462,9 @@ void LTOModule::addAsmGlobalSymbol(const char *name,
}
if (info.isFunction)
- addDefinedFunctionSymbol(cast<Function>(info.symbol));
+ addDefinedFunctionSymbol(info.name, cast<Function>(info.symbol));
else
- addDefinedDataSymbol(info.symbol);
+ addDefinedDataSymbol(info.name, info.symbol);
_symbols.back().attributes &= ~LTO_SYMBOL_SCOPE_MASK;
_symbols.back().attributes |= scope;
@@ -472,31 +482,25 @@ void LTOModule::addAsmGlobalSymbolUndef(const char *name) {
if (entry.getValue().name)
return;
- uint32_t attr = LTO_SYMBOL_DEFINITION_UNDEFINED;;
+ uint32_t attr = LTO_SYMBOL_DEFINITION_UNDEFINED;
attr |= LTO_SYMBOL_SCOPE_DEFAULT;
NameAndAttributes info;
info.name = entry.getKey().data();
info.attributes = attr;
info.isFunction = false;
- info.symbol = 0;
+ info.symbol = nullptr;
entry.setValue(info);
}
-/// addPotentialUndefinedSymbol - Add a symbol which isn't defined just yet to a
-/// list to be resolved later.
-void
-LTOModule::addPotentialUndefinedSymbol(const GlobalValue *decl, bool isFunc) {
- // ignore all llvm.* symbols
- if (decl->getName().startswith("llvm."))
- return;
-
- // ignore all aliases
- if (isa<GlobalAlias>(decl))
- return;
-
+/// Add a symbol which isn't defined just yet to a list to be resolved later.
+void LTOModule::addPotentialUndefinedSymbol(const object::BasicSymbolRef &Sym,
+ bool isFunc) {
SmallString<64> name;
- _mangler.getNameWithPrefix(name, decl, false);
+ {
+ raw_svector_ostream OS(name);
+ Sym.printName(OS);
+ }
StringMap<NameAndAttributes>::value_type &entry =
_undefines.GetOrCreateValue(name);
@@ -509,6 +513,8 @@ LTOModule::addPotentialUndefinedSymbol(const GlobalValue *decl, bool isFunc) {
info.name = entry.getKey().data();
+ const GlobalValue *decl = IRFile->getSymbolGV(Sym.getRawDataRefImpl());
+
if (decl->hasExternalWeakLinkage())
info.attributes = LTO_SYMBOL_DEFINITION_WEAKUNDEF;
else
@@ -520,264 +526,52 @@ LTOModule::addPotentialUndefinedSymbol(const GlobalValue *decl, bool isFunc) {
entry.setValue(info);
}
-namespace {
- class RecordStreamer : public MCStreamer {
- public:
- enum State { NeverSeen, Global, Defined, DefinedGlobal, Used };
-
- private:
- StringMap<State> Symbols;
-
- void markDefined(const MCSymbol &Symbol) {
- State &S = Symbols[Symbol.getName()];
- switch (S) {
- case DefinedGlobal:
- case Global:
- S = DefinedGlobal;
- break;
- case NeverSeen:
- case Defined:
- case Used:
- S = Defined;
- break;
- }
- }
- void markGlobal(const MCSymbol &Symbol) {
- State &S = Symbols[Symbol.getName()];
- switch (S) {
- case DefinedGlobal:
- case Defined:
- S = DefinedGlobal;
- break;
-
- case NeverSeen:
- case Global:
- case Used:
- S = Global;
- break;
- }
- }
- void markUsed(const MCSymbol &Symbol) {
- State &S = Symbols[Symbol.getName()];
- switch (S) {
- case DefinedGlobal:
- case Defined:
- case Global:
- break;
-
- case NeverSeen:
- case Used:
- S = Used;
- break;
- }
- }
-
- // FIXME: mostly copied for the obj streamer.
- void AddValueSymbols(const MCExpr *Value) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- // FIXME: What should we do in here?
- break;
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbols(BE->getLHS());
- AddValueSymbols(BE->getRHS());
- break;
- }
-
- case MCExpr::SymbolRef:
- markUsed(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbols(cast<MCUnaryExpr>(Value)->getSubExpr());
- break;
+/// parseSymbols - Parse the symbols from the module and model-level ASM and add
+/// them to either the defined or undefined lists.
+bool LTOModule::parseSymbols(std::string &errMsg) {
+ for (auto &Sym : IRFile->symbols()) {
+ const GlobalValue *GV = IRFile->getSymbolGV(Sym.getRawDataRefImpl());
+ uint32_t Flags = Sym.getFlags();
+ if (Flags & object::BasicSymbolRef::SF_FormatSpecific)
+ continue;
+
+ bool IsUndefined = Flags & object::BasicSymbolRef::SF_Undefined;
+
+ if (!GV) {
+ SmallString<64> Buffer;
+ {
+ raw_svector_ostream OS(Buffer);
+ Sym.printName(OS);
}
+ const char *Name = Buffer.c_str();
+
+ if (IsUndefined)
+ addAsmGlobalSymbolUndef(Name);
+ else if (Flags & object::BasicSymbolRef::SF_Global)
+ addAsmGlobalSymbol(Name, LTO_SYMBOL_SCOPE_DEFAULT);
+ else
+ addAsmGlobalSymbol(Name, LTO_SYMBOL_SCOPE_INTERNAL);
+ continue;
}
- public:
- typedef StringMap<State>::const_iterator const_iterator;
-
- const_iterator begin() {
- return Symbols.begin();
+ auto *F = dyn_cast<Function>(GV);
+ if (IsUndefined) {
+ addPotentialUndefinedSymbol(Sym, F != nullptr);
+ continue;
}
- const_iterator end() {
- return Symbols.end();
- }
-
- RecordStreamer(MCContext &Context) : MCStreamer(Context, 0) {}
-
- virtual void EmitInstruction(const MCInst &Inst) {
- // Scan for values.
- for (unsigned i = Inst.getNumOperands(); i--; )
- if (Inst.getOperand(i).isExpr())
- AddValueSymbols(Inst.getOperand(i).getExpr());
- }
- virtual void EmitLabel(MCSymbol *Symbol) {
- Symbol->setSection(*getCurrentSection().first);
- markDefined(*Symbol);
- }
- virtual void EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
- }
- virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
- // FIXME: should we handle aliases?
- markDefined(*Symbol);
- }
- virtual bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
- if (Attribute == MCSA_Global)
- markGlobal(*Symbol);
- return true;
- }
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size , unsigned ByteAlignment) {
- markDefined(*Symbol);
- }
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- markDefined(*Symbol);
+ if (F) {
+ addDefinedFunctionSymbol(Sym);
+ continue;
}
- virtual void EmitBundleAlignMode(unsigned AlignPow2) {}
- virtual void EmitBundleLock(bool AlignToEnd) {}
- virtual void EmitBundleUnlock() {}
-
- // Noop calls.
- virtual void ChangeSection(const MCSection *Section,
- const MCExpr *Subsection) {}
- virtual void InitToTextSection() {}
- virtual void InitSections() {}
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {}
- virtual void EmitThumbFunc(MCSymbol *Func) {}
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
- virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {}
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {}
- virtual void EmitCOFFSymbolStorageClass(int StorageClass) {}
- virtual void EmitCOFFSymbolType(int Type) {}
- virtual void EndCOFFSymbolDef() {}
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {}
- virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment) {}
- virtual void EmitBytes(StringRef Data) {}
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {}
- virtual void EmitULEB128Value(const MCExpr *Value) {}
- virtual void EmitSLEB128Value(const MCExpr *Value) {}
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
- unsigned ValueSize,
- unsigned MaxBytesToEmit) {}
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit) {}
- virtual bool EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value ) { return false; }
- virtual void EmitFileDirective(StringRef Filename) {}
- virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize) {}
- virtual void FinishImpl() {}
- virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
- RecordProcEnd(Frame);
+ if (isa<GlobalVariable>(GV)) {
+ addDefinedDataSymbol(Sym);
+ continue;
}
- };
-} // end anonymous namespace
-
-/// addAsmGlobalSymbols - Add global symbols from module-level ASM to the
-/// defined or undefined lists.
-bool LTOModule::addAsmGlobalSymbols(std::string &errMsg) {
- const std::string &inlineAsm = _module->getModuleInlineAsm();
- if (inlineAsm.empty())
- return false;
- OwningPtr<RecordStreamer> Streamer(new RecordStreamer(_context));
- MemoryBuffer *Buffer = MemoryBuffer::getMemBuffer(inlineAsm);
- SourceMgr SrcMgr;
- SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
- OwningPtr<MCAsmParser> Parser(createMCAsmParser(SrcMgr,
- _context, *Streamer,
- *_target->getMCAsmInfo()));
- const Target &T = _target->getTarget();
- OwningPtr<MCInstrInfo> MCII(T.createMCInstrInfo());
- OwningPtr<MCSubtargetInfo>
- STI(T.createMCSubtargetInfo(_target->getTargetTriple(),
- _target->getTargetCPU(),
- _target->getTargetFeatureString()));
- OwningPtr<MCTargetAsmParser> TAP(T.createMCAsmParser(*STI, *Parser.get(), *MCII));
- if (!TAP) {
- errMsg = "target " + std::string(T.getName()) +
- " does not define AsmParser.";
- return true;
- }
-
- Parser->setTargetParser(*TAP);
- if (Parser->Run(false))
- return true;
-
- for (RecordStreamer::const_iterator i = Streamer->begin(),
- e = Streamer->end(); i != e; ++i) {
- StringRef Key = i->first();
- RecordStreamer::State Value = i->second;
- if (Value == RecordStreamer::DefinedGlobal)
- addAsmGlobalSymbol(Key.data(), LTO_SYMBOL_SCOPE_DEFAULT);
- else if (Value == RecordStreamer::Defined)
- addAsmGlobalSymbol(Key.data(), LTO_SYMBOL_SCOPE_INTERNAL);
- else if (Value == RecordStreamer::Global ||
- Value == RecordStreamer::Used)
- addAsmGlobalSymbolUndef(Key.data());
- }
-
- return false;
-}
-
-/// isDeclaration - Return 'true' if the global value is a declaration.
-static bool isDeclaration(const GlobalValue &V) {
- if (V.hasAvailableExternallyLinkage())
- return true;
-
- if (V.isMaterializable())
- return false;
-
- return V.isDeclaration();
-}
-
-/// parseSymbols - Parse the symbols from the module and model-level ASM and add
-/// them to either the defined or undefined lists.
-bool LTOModule::parseSymbols(std::string &errMsg) {
- // add functions
- for (Module::iterator f = _module->begin(), e = _module->end(); f != e; ++f) {
- if (isDeclaration(*f))
- addPotentialUndefinedSymbol(f, true);
- else
- addDefinedFunctionSymbol(f);
- }
-
- // add data
- for (Module::global_iterator v = _module->global_begin(),
- e = _module->global_end(); v != e; ++v) {
- if (isDeclaration(*v))
- addPotentialUndefinedSymbol(v, false);
- else
- addDefinedDataSymbol(v);
- }
-
- // add asm globals
- if (addAsmGlobalSymbols(errMsg))
- return true;
-
- // add aliases
- for (Module::alias_iterator a = _module->alias_begin(),
- e = _module->alias_end(); a != e; ++a) {
- if (isDeclaration(*a->getAliasedGlobal()))
- // Is an alias to a declaration.
- addPotentialUndefinedSymbol(a, false);
- else
- addDefinedDataSymbol(a);
+ assert(isa<GlobalAlias>(GV));
+ addDefinedDataSymbol(Sym);
}
// make symbols for all undefines
@@ -792,3 +586,27 @@ bool LTOModule::parseSymbols(std::string &errMsg) {
return false;
}
+
+/// parseMetadata - Parse metadata from the module
+void LTOModule::parseMetadata() {
+ // Linker Options
+ if (Value *Val = getModule().getModuleFlag("Linker Options")) {
+ MDNode *LinkerOptions = cast<MDNode>(Val);
+ for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) {
+ MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i));
+ for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) {
+ MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii));
+ StringRef Op = _linkeropt_strings.
+ GetOrCreateValue(MDOption->getString()).getKey();
+ StringRef DepLibName = _target->getTargetLowering()->
+ getObjFileLowering().getDepLibFromLinkerOpt(Op);
+ if (!DepLibName.empty())
+ _deplibs.push_back(DepLibName.data());
+ else if (!Op.empty())
+ _linkeropts.push_back(Op.data());
+ }
+ }
+ }
+
+ // Add other interesting metadata here.
+}
diff --git a/contrib/llvm/lib/LineEditor/CMakeLists.txt b/contrib/llvm/lib/LineEditor/CMakeLists.txt
new file mode 100644
index 0000000..0dec256
--- /dev/null
+++ b/contrib/llvm/lib/LineEditor/CMakeLists.txt
@@ -0,0 +1,11 @@
+if(HAVE_LIBEDIT)
+ set(link_libs edit)
+endif()
+
+add_llvm_library(LLVMLineEditor
+ LineEditor.cpp
+
+ LINK_LIBS
+ LLVMSupport
+ ${link_libs}
+)
diff --git a/contrib/llvm/lib/LineEditor/LLVMBuild.txt b/contrib/llvm/lib/LineEditor/LLVMBuild.txt
new file mode 100644
index 0000000..35f4361
--- /dev/null
+++ b/contrib/llvm/lib/LineEditor/LLVMBuild.txt
@@ -0,0 +1,22 @@
+;===- ./lib/LineEditor/LLVMBuild.txt ---------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = LineEditor
+parent = Libraries
+required_libraries = Support
diff --git a/contrib/llvm/lib/LineEditor/LineEditor.cpp b/contrib/llvm/lib/LineEditor/LineEditor.cpp
new file mode 100644
index 0000000..a50ccc3
--- /dev/null
+++ b/contrib/llvm/lib/LineEditor/LineEditor.cpp
@@ -0,0 +1,319 @@
+//===-- LineEditor.cpp - line editor --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LineEditor/LineEditor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <stdio.h>
+#ifdef HAVE_LIBEDIT
+#include <histedit.h>
+#endif
+
+using namespace llvm;
+
+std::string LineEditor::getDefaultHistoryPath(StringRef ProgName) {
+ SmallString<32> Path;
+ if (sys::path::home_directory(Path)) {
+ sys::path::append(Path, "." + ProgName + "-history");
+ return Path.str();
+ }
+ return std::string();
+}
+
+LineEditor::CompleterConcept::~CompleterConcept() {}
+LineEditor::ListCompleterConcept::~ListCompleterConcept() {}
+
+std::string LineEditor::ListCompleterConcept::getCommonPrefix(
+ const std::vector<Completion> &Comps) {
+ assert(!Comps.empty());
+
+ std::string CommonPrefix = Comps[0].TypedText;
+ for (std::vector<Completion>::const_iterator I = Comps.begin() + 1,
+ E = Comps.end();
+ I != E; ++I) {
+ size_t Len = std::min(CommonPrefix.size(), I->TypedText.size());
+ size_t CommonLen = 0;
+ for (; CommonLen != Len; ++CommonLen) {
+ if (CommonPrefix[CommonLen] != I->TypedText[CommonLen])
+ break;
+ }
+ CommonPrefix.resize(CommonLen);
+ }
+ return CommonPrefix;
+}
+
+LineEditor::CompletionAction
+LineEditor::ListCompleterConcept::complete(StringRef Buffer, size_t Pos) const {
+ CompletionAction Action;
+ std::vector<Completion> Comps = getCompletions(Buffer, Pos);
+ if (Comps.empty()) {
+ Action.Kind = CompletionAction::AK_ShowCompletions;
+ return Action;
+ }
+
+ std::string CommonPrefix = getCommonPrefix(Comps);
+
+ // If the common prefix is non-empty we can simply insert it. If there is a
+ // single completion, this will insert the full completion. If there is more
+ // than one, this might be enough information to jog the user's memory but if
+ // not the user can also hit tab again to see the completions because the
+ // common prefix will then be empty.
+ if (CommonPrefix.empty()) {
+ Action.Kind = CompletionAction::AK_ShowCompletions;
+ for (std::vector<Completion>::iterator I = Comps.begin(), E = Comps.end();
+ I != E; ++I)
+ Action.Completions.push_back(I->DisplayText);
+ } else {
+ Action.Kind = CompletionAction::AK_Insert;
+ Action.Text = CommonPrefix;
+ }
+
+ return Action;
+}
+
+LineEditor::CompletionAction LineEditor::getCompletionAction(StringRef Buffer,
+ size_t Pos) const {
+ if (!Completer) {
+ CompletionAction Action;
+ Action.Kind = CompletionAction::AK_ShowCompletions;
+ return Action;
+ }
+
+ return Completer->complete(Buffer, Pos);
+}
+
+#ifdef HAVE_LIBEDIT
+
+// libedit-based implementation.
+
+struct LineEditor::InternalData {
+ LineEditor *LE;
+
+ History *Hist;
+ EditLine *EL;
+
+ unsigned PrevCount;
+ std::string ContinuationOutput;
+
+ FILE *Out;
+};
+
+static const char *ElGetPromptFn(EditLine *EL) {
+ LineEditor::InternalData *Data;
+ if (el_get(EL, EL_CLIENTDATA, &Data) == 0)
+ return Data->LE->getPrompt().c_str();
+ return "> ";
+}
+
+// Handles tab completion.
+//
+// This function is really horrible. But since the alternative is to get into
+// the line editor business, here we are.
+static unsigned char ElCompletionFn(EditLine *EL, int ch) {
+ LineEditor::InternalData *Data;
+ if (el_get(EL, EL_CLIENTDATA, &Data) == 0) {
+ if (!Data->ContinuationOutput.empty()) {
+ // This is the continuation of the AK_ShowCompletions branch below.
+ FILE *Out = Data->Out;
+
+ // Print the required output (see below).
+ ::fwrite(Data->ContinuationOutput.c_str(),
+ Data->ContinuationOutput.size(), 1, Out);
+
+ // Push a sequence of Ctrl-B characters to move the cursor back to its
+ // original position.
+ std::string Prevs(Data->PrevCount, '\02');
+ ::el_push(EL, const_cast<char *>(Prevs.c_str()));
+
+ Data->ContinuationOutput.clear();
+
+ return CC_REFRESH;
+ }
+
+ const LineInfo *LI = ::el_line(EL);
+ LineEditor::CompletionAction Action = Data->LE->getCompletionAction(
+ StringRef(LI->buffer, LI->lastchar - LI->buffer),
+ LI->cursor - LI->buffer);
+ switch (Action.Kind) {
+ case LineEditor::CompletionAction::AK_Insert:
+ ::el_insertstr(EL, Action.Text.c_str());
+ return CC_REFRESH;
+
+ case LineEditor::CompletionAction::AK_ShowCompletions:
+ if (Action.Completions.empty()) {
+ return CC_REFRESH_BEEP;
+ } else {
+ // Push a Ctrl-E and a tab. The Ctrl-E causes libedit to move the cursor
+ // to the end of the line, so that when we emit a newline we will be on
+ // a new blank line. The tab causes libedit to call this function again
+ // after moving the cursor. There doesn't seem to be anything we can do
+ // from here to cause libedit to move the cursor immediately. This will
+ // break horribly if the user has rebound their keys, so for now we do
+ // not permit user rebinding.
+ ::el_push(EL, const_cast<char *>("\05\t"));
+
+ // This assembles the output for the continuation block above.
+ raw_string_ostream OS(Data->ContinuationOutput);
+
+ // Move cursor to a blank line.
+ OS << "\n";
+
+ // Emit the completions.
+ for (std::vector<std::string>::iterator I = Action.Completions.begin(),
+ E = Action.Completions.end();
+ I != E; ++I) {
+ OS << *I << "\n";
+ }
+
+ // Fool libedit into thinking nothing has changed. Reprint its prompt
+ // and the user input. Note that the cursor will remain at the end of
+ // the line after this.
+ OS << Data->LE->getPrompt()
+ << StringRef(LI->buffer, LI->lastchar - LI->buffer);
+
+ // This is the number of characters we need to tell libedit to go back:
+ // the distance between end of line and the original cursor position.
+ Data->PrevCount = LI->lastchar - LI->cursor;
+
+ return CC_REFRESH;
+ }
+ }
+ }
+ return CC_ERROR;
+}
+
+LineEditor::LineEditor(StringRef ProgName, StringRef HistoryPath, FILE *In,
+ FILE *Out, FILE *Err)
+ : Prompt((ProgName + "> ").str()), HistoryPath(HistoryPath),
+ Data(new InternalData) {
+ if (HistoryPath.empty())
+ this->HistoryPath = getDefaultHistoryPath(ProgName);
+
+ Data->LE = this;
+ Data->Out = Out;
+
+ Data->Hist = ::history_init();
+ assert(Data->Hist);
+
+ Data->EL = ::el_init(ProgName.str().c_str(), In, Out, Err);
+ assert(Data->EL);
+
+ ::el_set(Data->EL, EL_PROMPT, ElGetPromptFn);
+ ::el_set(Data->EL, EL_EDITOR, "emacs");
+ ::el_set(Data->EL, EL_HIST, history, Data->Hist);
+ ::el_set(Data->EL, EL_ADDFN, "tab_complete", "Tab completion function",
+ ElCompletionFn);
+ ::el_set(Data->EL, EL_BIND, "\t", "tab_complete", NULL);
+ ::el_set(Data->EL, EL_BIND, "^r", "em-inc-search-prev",
+ NULL); // Cycle through backwards search, entering string
+ ::el_set(Data->EL, EL_BIND, "^w", "ed-delete-prev-word",
+ NULL); // Delete previous word, behave like bash does.
+ ::el_set(Data->EL, EL_BIND, "\033[3~", "ed-delete-next-char",
+ NULL); // Fix the delete key.
+ ::el_set(Data->EL, EL_CLIENTDATA, Data.get());
+
+ HistEvent HE;
+ ::history(Data->Hist, &HE, H_SETSIZE, 800);
+ ::history(Data->Hist, &HE, H_SETUNIQUE, 1);
+ loadHistory();
+}
+
+LineEditor::~LineEditor() {
+ saveHistory();
+
+ ::history_end(Data->Hist);
+ ::el_end(Data->EL);
+ ::fwrite("\n", 1, 1, Data->Out);
+}
+
+void LineEditor::saveHistory() {
+ if (!HistoryPath.empty()) {
+ HistEvent HE;
+ ::history(Data->Hist, &HE, H_SAVE, HistoryPath.c_str());
+ }
+}
+
+void LineEditor::loadHistory() {
+ if (!HistoryPath.empty()) {
+ HistEvent HE;
+ ::history(Data->Hist, &HE, H_LOAD, HistoryPath.c_str());
+ }
+}
+
+Optional<std::string> LineEditor::readLine() const {
+ // Call el_gets to prompt the user and read the user's input.
+ int LineLen = 0;
+ const char *Line = ::el_gets(Data->EL, &LineLen);
+
+ // Either of these may mean end-of-file.
+ if (!Line || LineLen == 0)
+ return Optional<std::string>();
+
+ // Strip any newlines off the end of the string.
+ while (LineLen > 0 &&
+ (Line[LineLen - 1] == '\n' || Line[LineLen - 1] == '\r'))
+ --LineLen;
+
+ HistEvent HE;
+ if (LineLen > 0)
+ ::history(Data->Hist, &HE, H_ENTER, Line);
+
+ return std::string(Line, LineLen);
+}
+
+#else
+
+// Simple fgets-based implementation.
+
+struct LineEditor::InternalData {
+ FILE *In;
+ FILE *Out;
+};
+
+LineEditor::LineEditor(StringRef ProgName, StringRef HistoryPath, FILE *In,
+ FILE *Out, FILE *Err)
+ : Prompt((ProgName + "> ").str()), Data(new InternalData) {
+ Data->In = In;
+ Data->Out = Out;
+}
+
+LineEditor::~LineEditor() {
+ ::fwrite("\n", 1, 1, Data->Out);
+}
+
+void LineEditor::saveHistory() {}
+void LineEditor::loadHistory() {}
+
+Optional<std::string> LineEditor::readLine() const {
+ ::fprintf(Data->Out, "%s", Prompt.c_str());
+
+ std::string Line;
+ do {
+ char Buf[64];
+ char *Res = ::fgets(Buf, sizeof(Buf), Data->In);
+ if (!Res) {
+ if (Line.empty())
+ return Optional<std::string>();
+ else
+ return Line;
+ }
+ Line.append(Buf);
+ } while (Line.empty() ||
+ (Line[Line.size() - 1] != '\n' && Line[Line.size() - 1] != '\r'));
+
+ while (!Line.empty() &&
+ (Line[Line.size() - 1] == '\n' || Line[Line.size() - 1] == '\r'))
+ Line.resize(Line.size() - 1);
+
+ return Line;
+}
+
+#endif
diff --git a/contrib/llvm/lib/LineEditor/Makefile b/contrib/llvm/lib/LineEditor/Makefile
new file mode 100644
index 0000000..c7ff6d8
--- /dev/null
+++ b/contrib/llvm/lib/LineEditor/Makefile
@@ -0,0 +1,15 @@
+##===- lib/LineEditor/Makefile -----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../..
+LIBRARYNAME = LLVMLineEditor
+BUILD_ARCHIVE := 1
+
+include $(LEVEL)/Makefile.common
+
diff --git a/contrib/llvm/lib/Linker/LinkModules.cpp b/contrib/llvm/lib/Linker/LinkModules.cpp
index 8f2200e..5bb2862 100644
--- a/contrib/llvm/lib/Linker/LinkModules.cpp
+++ b/contrib/llvm/lib/Linker/LinkModules.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Linker.h"
+#include "llvm/Linker/Linker.h"
#include "llvm-c/Linker.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
@@ -19,12 +19,15 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/TypeFinder.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <cctype>
+#include <tuple>
using namespace llvm;
+
//===----------------------------------------------------------------------===//
// TypeMap implementation.
//===----------------------------------------------------------------------===//
@@ -41,12 +44,12 @@ class TypeMapTy : public ValueMapTypeRemapper {
/// we speculatively add types to MappedTypes, but keep track of them here in
/// case we need to roll back.
SmallVector<Type*, 16> SpeculativeTypes;
-
+
/// SrcDefinitionsToResolve - This is a list of non-opaque structs in the
/// source module that are mapped to an opaque struct in the destination
/// module.
SmallVector<StructType*, 16> SrcDefinitionsToResolve;
-
+
/// DstResolvedOpaqueTypes - This is the set of opaque types in the
/// destination modules who are getting a body from the source module.
SmallPtrSet<StructType*, 16> DstResolvedOpaqueTypes;
@@ -63,7 +66,7 @@ public:
/// linkDefinedTypeBodies - Produce a body for an opaque type in the dest
/// module from a type definition in the source module.
void linkDefinedTypeBodies();
-
+
/// get - Return the mapped type to use for the specified input type from the
/// source module.
Type *get(Type *SrcTy);
@@ -85,10 +88,10 @@ public:
private:
Type *getImpl(Type *T);
/// remapType - Implement the ValueMapTypeRemapper interface.
- Type *remapType(Type *SrcTy) {
+ Type *remapType(Type *SrcTy) override {
return get(SrcTy);
}
-
+
bool areTypesIsomorphic(Type *DstTy, Type *SrcTy);
};
}
@@ -96,12 +99,12 @@ private:
void TypeMapTy::addTypeMapping(Type *DstTy, Type *SrcTy) {
Type *&Entry = MappedTypes[SrcTy];
if (Entry) return;
-
+
if (DstTy == SrcTy) {
Entry = DstTy;
return;
}
-
+
// Check to see if these types are recursively isomorphic and establish a
// mapping between them if so.
if (!areTypesIsomorphic(DstTy, SrcTy)) {
@@ -130,7 +133,7 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
Entry = DstTy;
return true;
}
-
+
// Okay, we have two types with identical kinds that we haven't seen before.
// If this is an opaque struct type, special case it.
@@ -156,18 +159,18 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
return true;
}
}
-
+
// If the number of subtypes disagree between the two types, then we fail.
if (SrcTy->getNumContainedTypes() != DstTy->getNumContainedTypes())
return false;
-
+
// Fail if any of the extra properties (e.g. array size) of the type disagree.
if (isa<IntegerType>(DstTy))
return false; // bitwidth disagrees.
if (PointerType *PT = dyn_cast<PointerType>(DstTy)) {
if (PT->getAddressSpace() != cast<PointerType>(SrcTy)->getAddressSpace())
return false;
-
+
} else if (FunctionType *FT = dyn_cast<FunctionType>(DstTy)) {
if (FT->isVarArg() != cast<FunctionType>(SrcTy)->isVarArg())
return false;
@@ -193,7 +196,7 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
if (!areTypesIsomorphic(DstTy->getContainedType(i),
SrcTy->getContainedType(i)))
return false;
-
+
// If everything seems to have lined up, then everything is great.
return true;
}
@@ -203,31 +206,31 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
void TypeMapTy::linkDefinedTypeBodies() {
SmallVector<Type*, 16> Elements;
SmallString<16> TmpName;
-
+
// Note that processing entries in this loop (calling 'get') can add new
// entries to the SrcDefinitionsToResolve vector.
while (!SrcDefinitionsToResolve.empty()) {
StructType *SrcSTy = SrcDefinitionsToResolve.pop_back_val();
StructType *DstSTy = cast<StructType>(MappedTypes[SrcSTy]);
-
+
// TypeMap is a many-to-one mapping, if there were multiple types that
// provide a body for DstSTy then previous iterations of this loop may have
// already handled it. Just ignore this case.
if (!DstSTy->isOpaque()) continue;
assert(!SrcSTy->isOpaque() && "Not resolving a definition?");
-
+
// Map the body of the source type over to a new body for the dest type.
Elements.resize(SrcSTy->getNumElements());
for (unsigned i = 0, e = Elements.size(); i != e; ++i)
Elements[i] = getImpl(SrcSTy->getElementType(i));
-
+
DstSTy->setBody(Elements, SrcSTy->isPacked());
-
+
// If DstSTy has no name or has a longer name than STy, then viciously steal
// STy's name.
if (!SrcSTy->hasName()) continue;
StringRef SrcName = SrcSTy->getName();
-
+
if (!DstSTy->hasName() || DstSTy->getName().size() > SrcName.size()) {
TmpName.insert(TmpName.end(), SrcName.begin(), SrcName.end());
SrcSTy->setName("");
@@ -235,7 +238,7 @@ void TypeMapTy::linkDefinedTypeBodies() {
TmpName.clear();
}
}
-
+
DstResolvedOpaqueTypes.clear();
}
@@ -243,7 +246,7 @@ void TypeMapTy::linkDefinedTypeBodies() {
/// source module.
Type *TypeMapTy::get(Type *Ty) {
Type *Result = getImpl(Ty);
-
+
// If this caused a reference to any struct type, resolve it before returning.
if (!SrcDefinitionsToResolve.empty())
linkDefinedTypeBodies();
@@ -255,7 +258,7 @@ Type *TypeMapTy::getImpl(Type *Ty) {
// If we already have an entry for this type, return it.
Type **Entry = &MappedTypes[Ty];
if (*Entry) return *Entry;
-
+
// If this is not a named struct type, then just map all of the elements and
// then rebuild the type from inside out.
if (!isa<StructType>(Ty) || cast<StructType>(Ty)->isLiteral()) {
@@ -263,7 +266,7 @@ Type *TypeMapTy::getImpl(Type *Ty) {
// true for the anonymous {} struct, things like 'float', integers, etc.
if (Ty->getNumContainedTypes() == 0)
return *Entry = Ty;
-
+
// Remap all of the elements, keeping track of whether any of them change.
bool AnyChange = false;
SmallVector<Type*, 4> ElementTypes;
@@ -272,23 +275,23 @@ Type *TypeMapTy::getImpl(Type *Ty) {
ElementTypes[i] = getImpl(Ty->getContainedType(i));
AnyChange |= ElementTypes[i] != Ty->getContainedType(i);
}
-
+
// If we found our type while recursively processing stuff, just use it.
Entry = &MappedTypes[Ty];
if (*Entry) return *Entry;
-
+
// If all of the element types mapped directly over, then the type is usable
// as-is.
if (!AnyChange)
return *Entry = Ty;
-
+
// Otherwise, rebuild a modified type.
switch (Ty->getTypeID()) {
default: llvm_unreachable("unknown derived type to remap");
case Type::ArrayTyID:
return *Entry = ArrayType::get(ElementTypes[0],
cast<ArrayType>(Ty)->getNumElements());
- case Type::VectorTyID:
+ case Type::VectorTyID:
return *Entry = VectorType::get(ElementTypes[0],
cast<VectorType>(Ty)->getNumElements());
case Type::PointerTyID:
@@ -329,7 +332,7 @@ Type *TypeMapTy::getImpl(Type *Ty) {
// and is not required for the prettiness of the linked module, we just skip
// it and always rebuild a type here.
StructType *STy = cast<StructType>(Ty);
-
+
// If the type is opaque, we can just use it directly.
if (STy->isOpaque()) {
// A named structure type from src module is used. Add it to the Set of
@@ -337,7 +340,7 @@ Type *TypeMapTy::getImpl(Type *Ty) {
DstStructTypesSet.insert(STy);
return *Entry = STy;
}
-
+
// Otherwise we create a new type and resolve its body later. This will be
// resolved by the top level of get().
SrcDefinitionsToResolve.push_back(STy);
@@ -370,15 +373,15 @@ namespace {
LazilyLinkFunctions(LazilyLinkFunctions) {
}
- virtual Value *materializeValueFor(Value *V);
+ Value *materializeValueFor(Value *V) override;
};
/// ModuleLinker - This is an implementation class for the LinkModules
/// function, which is the entrypoint for this file.
class ModuleLinker {
Module *DstM, *SrcM;
-
- TypeMapTy TypeMap;
+
+ TypeMapTy TypeMap;
ValueMaterializerTy ValMaterializer;
/// ValueMap - Mapping of values from what they used to be in Src, to what
@@ -386,33 +389,36 @@ namespace {
/// some overhead due to the use of Value handles which the Linker doesn't
/// actually need, but this allows us to reuse the ValueMapper code.
ValueToValueMapTy ValueMap;
-
+
struct AppendingVarInfo {
GlobalVariable *NewGV; // New aggregate global in dest module.
Constant *DstInit; // Old initializer from dest module.
Constant *SrcInit; // Old initializer from src module.
};
-
+
std::vector<AppendingVarInfo> AppendingVars;
-
+
unsigned Mode; // Mode to treat source module.
-
+
// Set of items not to link in from source.
SmallPtrSet<const Value*, 16> DoNotLinkFromSource;
-
+
// Vector of functions to lazily link in.
std::vector<Function*> LazilyLinkFunctions;
-
+
+ bool SuppressWarnings;
+
public:
std::string ErrorMsg;
-
- ModuleLinker(Module *dstM, TypeSet &Set, Module *srcM, unsigned mode)
- : DstM(dstM), SrcM(srcM), TypeMap(Set),
- ValMaterializer(TypeMap, DstM, LazilyLinkFunctions),
- Mode(mode) { }
-
+
+ ModuleLinker(Module *dstM, TypeSet &Set, Module *srcM, unsigned mode,
+ bool SuppressWarnings=false)
+ : DstM(dstM), SrcM(srcM), TypeMap(Set),
+ ValMaterializer(TypeMap, DstM, LazilyLinkFunctions), Mode(mode),
+ SuppressWarnings(SuppressWarnings) {}
+
bool run();
-
+
private:
/// emitError - Helper method for setting a message and returning an error
/// code.
@@ -420,7 +426,19 @@ namespace {
ErrorMsg = Message.str();
return true;
}
-
+
+ bool getComdatLeader(Module *M, StringRef ComdatName,
+ const GlobalVariable *&GVar);
+ bool computeResultingSelectionKind(StringRef ComdatName,
+ Comdat::SelectionKind Src,
+ Comdat::SelectionKind Dst,
+ Comdat::SelectionKind &Result,
+ bool &LinkFromSrc);
+ std::map<const Comdat *, std::pair<Comdat::SelectionKind, bool>>
+ ComdatsChosen;
+ bool getComdatResult(const Comdat *SrcC, Comdat::SelectionKind &SK,
+ bool &LinkFromSrc);
+
/// getLinkageResult - This analyzes the two global values and determines
/// what the result will look like in the destination module.
bool getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
@@ -434,29 +452,29 @@ namespace {
// If the source has no name it can't link. If it has local linkage,
// there is no name match-up going on.
if (!SrcGV->hasName() || SrcGV->hasLocalLinkage())
- return 0;
-
+ return nullptr;
+
// Otherwise see if we have a match in the destination module's symtab.
GlobalValue *DGV = DstM->getNamedValue(SrcGV->getName());
- if (DGV == 0) return 0;
-
+ if (!DGV) return nullptr;
+
// If we found a global with the same name in the dest module, but it has
// internal linkage, we are really not doing any linkage here.
if (DGV->hasLocalLinkage())
- return 0;
+ return nullptr;
// Otherwise, we do in fact link to the destination global.
return DGV;
}
-
+
void computeTypeMapping();
-
+
bool linkAppendingVarProto(GlobalVariable *DstGV, GlobalVariable *SrcGV);
bool linkGlobalProto(GlobalVariable *SrcGV);
bool linkFunctionProto(Function *SrcF);
bool linkAliasProto(GlobalAlias *SrcA);
bool linkModuleFlagsMetadata();
-
+
void linkAppendingVarInit(const AppendingVarInfo &AVI);
void linkGlobalInits();
void linkFunctionBody(Function *Dst, Function *Src);
@@ -490,10 +508,16 @@ static void forceRenaming(GlobalValue *GV, StringRef Name) {
/// a GlobalValue) from the SrcGV to the DestGV.
static void copyGVAttributes(GlobalValue *DestGV, const GlobalValue *SrcGV) {
// Use the maximum alignment, rather than just copying the alignment of SrcGV.
- unsigned Alignment = std::max(DestGV->getAlignment(), SrcGV->getAlignment());
+ auto *DestGO = dyn_cast<GlobalObject>(DestGV);
+ unsigned Alignment;
+ if (DestGO)
+ Alignment = std::max(DestGO->getAlignment(), SrcGV->getAlignment());
+
DestGV->copyAttributesFrom(SrcGV);
- DestGV->setAlignment(Alignment);
-
+
+ if (DestGO)
+ DestGO->setAlignment(Alignment);
+
forceRenaming(DestGV, SrcGV->getName());
}
@@ -513,7 +537,7 @@ static bool isLessConstraining(GlobalValue::VisibilityTypes a,
Value *ValueMaterializerTy::materializeValueFor(Value *V) {
Function *SF = dyn_cast<Function>(V);
if (!SF)
- return NULL;
+ return nullptr;
Function *DF = Function::Create(TypeMap.get(SF->getFunctionType()),
SF->getLinkage(), SF->getName(), DstM);
@@ -523,6 +547,115 @@ Value *ValueMaterializerTy::materializeValueFor(Value *V) {
return DF;
}
+bool ModuleLinker::getComdatLeader(Module *M, StringRef ComdatName,
+ const GlobalVariable *&GVar) {
+ const GlobalValue *GVal = M->getNamedValue(ComdatName);
+ if (const auto *GA = dyn_cast_or_null<GlobalAlias>(GVal)) {
+ GVal = GA->getBaseObject();
+ if (!GVal)
+ // We cannot resolve the size of the aliasee yet.
+ return emitError("Linking COMDATs named '" + ComdatName +
+ "': COMDAT key involves incomputable alias size.");
+ }
+
+ GVar = dyn_cast_or_null<GlobalVariable>(GVal);
+ if (!GVar)
+ return emitError(
+ "Linking COMDATs named '" + ComdatName +
+ "': GlobalVariable required for data dependent selection!");
+
+ return false;
+}
+
+bool ModuleLinker::computeResultingSelectionKind(StringRef ComdatName,
+ Comdat::SelectionKind Src,
+ Comdat::SelectionKind Dst,
+ Comdat::SelectionKind &Result,
+ bool &LinkFromSrc) {
+ // The ability to mix Comdat::SelectionKind::Any with
+ // Comdat::SelectionKind::Largest is a behavior that comes from COFF.
+ bool DstAnyOrLargest = Dst == Comdat::SelectionKind::Any ||
+ Dst == Comdat::SelectionKind::Largest;
+ bool SrcAnyOrLargest = Src == Comdat::SelectionKind::Any ||
+ Src == Comdat::SelectionKind::Largest;
+ if (DstAnyOrLargest && SrcAnyOrLargest) {
+ if (Dst == Comdat::SelectionKind::Largest ||
+ Src == Comdat::SelectionKind::Largest)
+ Result = Comdat::SelectionKind::Largest;
+ else
+ Result = Comdat::SelectionKind::Any;
+ } else if (Src == Dst) {
+ Result = Dst;
+ } else {
+ return emitError("Linking COMDATs named '" + ComdatName +
+ "': invalid selection kinds!");
+ }
+
+ switch (Result) {
+ case Comdat::SelectionKind::Any:
+ // Go with Dst.
+ LinkFromSrc = false;
+ break;
+ case Comdat::SelectionKind::NoDuplicates:
+ return emitError("Linking COMDATs named '" + ComdatName +
+ "': noduplicates has been violated!");
+ case Comdat::SelectionKind::ExactMatch:
+ case Comdat::SelectionKind::Largest:
+ case Comdat::SelectionKind::SameSize: {
+ const GlobalVariable *DstGV;
+ const GlobalVariable *SrcGV;
+ if (getComdatLeader(DstM, ComdatName, DstGV) ||
+ getComdatLeader(SrcM, ComdatName, SrcGV))
+ return true;
+
+ const DataLayout *DstDL = DstM->getDataLayout();
+ const DataLayout *SrcDL = SrcM->getDataLayout();
+ if (!DstDL || !SrcDL) {
+ return emitError(
+ "Linking COMDATs named '" + ComdatName +
+ "': can't do size dependent selection without DataLayout!");
+ }
+ uint64_t DstSize =
+ DstDL->getTypeAllocSize(DstGV->getType()->getPointerElementType());
+ uint64_t SrcSize =
+ SrcDL->getTypeAllocSize(SrcGV->getType()->getPointerElementType());
+ if (Result == Comdat::SelectionKind::ExactMatch) {
+ if (SrcGV->getInitializer() != DstGV->getInitializer())
+ return emitError("Linking COMDATs named '" + ComdatName +
+ "': ExactMatch violated!");
+ LinkFromSrc = false;
+ } else if (Result == Comdat::SelectionKind::Largest) {
+ LinkFromSrc = SrcSize > DstSize;
+ } else if (Result == Comdat::SelectionKind::SameSize) {
+ if (SrcSize != DstSize)
+ return emitError("Linking COMDATs named '" + ComdatName +
+ "': SameSize violated!");
+ LinkFromSrc = false;
+ } else {
+ llvm_unreachable("unknown selection kind");
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+bool ModuleLinker::getComdatResult(const Comdat *SrcC,
+ Comdat::SelectionKind &Result,
+ bool &LinkFromSrc) {
+ StringRef ComdatName = SrcC->getName();
+ Module::ComdatSymTabType &ComdatSymTab = DstM->getComdatSymbolTable();
+ Module::ComdatSymTabType::iterator DstCI = ComdatSymTab.find(ComdatName);
+ if (DstCI != ComdatSymTab.end()) {
+ const Comdat *DstC = &DstCI->second;
+ Comdat::SelectionKind SSK = SrcC->getSelectionKind();
+ Comdat::SelectionKind DSK = DstC->getSelectionKind();
+ if (computeResultingSelectionKind(ComdatName, SSK, DSK, Result, LinkFromSrc))
+ return true;
+ }
+ return false;
+}
/// getLinkageResult - This analyzes the two global values and determines what
/// the result will look like in the destination module. In particular, it
@@ -536,15 +669,15 @@ bool ModuleLinker::getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
assert(Dest && "Must have two globals being queried");
assert(!Src->hasLocalLinkage() &&
"If Src has internal linkage, Dest shouldn't be set!");
-
+
bool SrcIsDeclaration = Src->isDeclaration() && !Src->isMaterializable();
bool DestIsDeclaration = Dest->isDeclaration();
-
+
if (SrcIsDeclaration) {
// If Src is external or if both Src & Dest are external.. Just link the
// external globals, we aren't adding anything.
- if (Src->hasDLLImportLinkage()) {
- // If one of GVs has DLLImport linkage, result should be dllimport'ed.
+ if (Src->hasDLLImportStorageClass()) {
+ // If one of GVs is marked as DLLImport, result should be dllimport'ed.
if (DestIsDeclaration) {
LinkFromSrc = true;
LT = Src->getLinkage();
@@ -557,7 +690,7 @@ bool ModuleLinker::getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
LinkFromSrc = false;
LT = Dest->getLinkage();
}
- } else if (DestIsDeclaration && !Dest->hasDLLImportLinkage()) {
+ } else if (DestIsDeclaration && !Dest->hasDLLImportStorageClass()) {
// If Dest is external but Src is not:
LinkFromSrc = true;
LT = Src->getLinkage();
@@ -584,10 +717,8 @@ bool ModuleLinker::getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
LT = GlobalValue::ExternalLinkage;
}
} else {
- assert((Dest->hasExternalLinkage() || Dest->hasDLLImportLinkage() ||
- Dest->hasDLLExportLinkage() || Dest->hasExternalWeakLinkage()) &&
- (Src->hasExternalLinkage() || Src->hasDLLImportLinkage() ||
- Src->hasDLLExportLinkage() || Src->hasExternalWeakLinkage()) &&
+ assert((Dest->hasExternalLinkage() || Dest->hasExternalWeakLinkage()) &&
+ (Src->hasExternalLinkage() || Src->hasExternalWeakLinkage()) &&
"Unexpected linkage type!");
return emitError("Linking globals named '" + Src->getName() +
"': symbol multiply defined!");
@@ -595,6 +726,8 @@ bool ModuleLinker::getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
// Compute the visibility. We follow the rules in the System V Application
// Binary Interface.
+ assert(!GlobalValue::isLocalLinkage(LT) &&
+ "Symbols with local linkage should not be merged");
Vis = isLessConstraining(Src->getVisibility(), Dest->getVisibility()) ?
Dest->getVisibility() : Src->getVisibility();
return false;
@@ -609,19 +742,19 @@ void ModuleLinker::computeTypeMapping() {
for (Module::global_iterator I = SrcM->global_begin(),
E = SrcM->global_end(); I != E; ++I) {
GlobalValue *DGV = getLinkedToGlobal(I);
- if (DGV == 0) continue;
-
+ if (!DGV) continue;
+
if (!DGV->hasAppendingLinkage() || !I->hasAppendingLinkage()) {
TypeMap.addTypeMapping(DGV->getType(), I->getType());
- continue;
+ continue;
}
-
+
// Unify the element type of appending arrays.
ArrayType *DAT = cast<ArrayType>(DGV->getType()->getElementType());
ArrayType *SAT = cast<ArrayType>(I->getType()->getElementType());
TypeMap.addTypeMapping(DAT->getElementType(), SAT->getElementType());
}
-
+
// Incorporate functions.
for (Module::iterator I = SrcM->begin(), E = SrcM->end(); I != E; ++I) {
if (GlobalValue *DGV = getLinkedToGlobal(I))
@@ -640,14 +773,14 @@ void ModuleLinker::computeTypeMapping() {
for (unsigned i = 0, e = SrcStructTypes.size(); i != e; ++i) {
StructType *ST = SrcStructTypes[i];
if (!ST->hasName()) continue;
-
+
// Check to see if there is a dot in the name followed by a digit.
size_t DotPos = ST->getName().rfind('.');
if (DotPos == 0 || DotPos == StringRef::npos ||
ST->getName().back() == '.' ||
!isdigit(static_cast<unsigned char>(ST->getName()[DotPos+1])))
continue;
-
+
// Check to see if the destination module has a struct with the prefix name.
if (StructType *DST = DstM->getTypeByName(ST->getName().substr(0, DotPos)))
// Don't use it if this actually came from the source module. They're in
@@ -672,9 +805,9 @@ void ModuleLinker::computeTypeMapping() {
}
// Don't bother incorporating aliases, they aren't generally typed well.
-
+
// Now that we have discovered all of the type equivalences, get a body for
- // any 'opaque' types in the dest module that are now resolved.
+ // any 'opaque' types in the dest module that are now resolved.
TypeMap.linkDefinedTypeBodies();
}
@@ -682,26 +815,26 @@ void ModuleLinker::computeTypeMapping() {
/// them together now. Return true on error.
bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV,
GlobalVariable *SrcGV) {
-
+
if (!SrcGV->hasAppendingLinkage() || !DstGV->hasAppendingLinkage())
return emitError("Linking globals named '" + SrcGV->getName() +
"': can only link appending global with another appending global!");
-
+
ArrayType *DstTy = cast<ArrayType>(DstGV->getType()->getElementType());
ArrayType *SrcTy =
cast<ArrayType>(TypeMap.get(SrcGV->getType()->getElementType()));
Type *EltTy = DstTy->getElementType();
-
+
// Check to see that they two arrays agree on type.
if (EltTy != SrcTy->getElementType())
return emitError("Appending variables with different element types!");
if (DstGV->isConstant() != SrcGV->isConstant())
return emitError("Appending variables linked with different const'ness!");
-
+
if (DstGV->getAlignment() != SrcGV->getAlignment())
return emitError(
"Appending variables with different alignment need to be linked!");
-
+
if (DstGV->getVisibility() != SrcGV->getVisibility())
return emitError(
"Appending variables with different visibility need to be linked!");
@@ -710,23 +843,23 @@ bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV,
return emitError(
"Appending variables with different unnamed_addr need to be linked!");
- if (DstGV->getSection() != SrcGV->getSection())
+ if (StringRef(DstGV->getSection()) != SrcGV->getSection())
return emitError(
"Appending variables with different section name need to be linked!");
-
+
uint64_t NewSize = DstTy->getNumElements() + SrcTy->getNumElements();
ArrayType *NewType = ArrayType::get(EltTy, NewSize);
-
+
// Create the new global variable.
GlobalVariable *NG =
new GlobalVariable(*DstGV->getParent(), NewType, SrcGV->isConstant(),
- DstGV->getLinkage(), /*init*/0, /*name*/"", DstGV,
+ DstGV->getLinkage(), /*init*/nullptr, /*name*/"", DstGV,
DstGV->getThreadLocalMode(),
DstGV->getType()->getAddressSpace());
-
+
// Propagate alignment, visibility and section info.
copyGVAttributes(NG, DstGV);
-
+
AppendingVarInfo AVI;
AVI.NewGV = NG;
AVI.DstInit = DstGV->getInitializer();
@@ -739,10 +872,10 @@ bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV,
DstGV->replaceAllUsesWith(ConstantExpr::getBitCast(NG, DstGV->getType()));
DstGV->eraseFromParent();
-
+
// Track the source variable so we don't try to link it.
DoNotLinkFromSource.insert(SrcGV);
-
+
return false;
}
@@ -753,52 +886,71 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
llvm::Optional<GlobalValue::VisibilityTypes> NewVisibility;
bool HasUnnamedAddr = SGV->hasUnnamedAddr();
+ bool LinkFromSrc = false;
+ Comdat *DC = nullptr;
+ if (const Comdat *SC = SGV->getComdat()) {
+ Comdat::SelectionKind SK;
+ std::tie(SK, LinkFromSrc) = ComdatsChosen[SC];
+ DC = DstM->getOrInsertComdat(SC->getName());
+ DC->setSelectionKind(SK);
+ }
+
if (DGV) {
- // Concatenation of appending linkage variables is magic and handled later.
- if (DGV->hasAppendingLinkage() || SGV->hasAppendingLinkage())
- return linkAppendingVarProto(cast<GlobalVariable>(DGV), SGV);
-
- // Determine whether linkage of these two globals follows the source
- // module's definition or the destination module's definition.
- GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
- GlobalValue::VisibilityTypes NV;
- bool LinkFromSrc = false;
- if (getLinkageResult(DGV, SGV, NewLinkage, NV, LinkFromSrc))
- return true;
- NewVisibility = NV;
- HasUnnamedAddr = HasUnnamedAddr && DGV->hasUnnamedAddr();
+ if (!DC) {
+ // Concatenation of appending linkage variables is magic and handled later.
+ if (DGV->hasAppendingLinkage() || SGV->hasAppendingLinkage())
+ return linkAppendingVarProto(cast<GlobalVariable>(DGV), SGV);
+
+ // Determine whether linkage of these two globals follows the source
+ // module's definition or the destination module's definition.
+ GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
+ GlobalValue::VisibilityTypes NV;
+ if (getLinkageResult(DGV, SGV, NewLinkage, NV, LinkFromSrc))
+ return true;
+ NewVisibility = NV;
+ HasUnnamedAddr = HasUnnamedAddr && DGV->hasUnnamedAddr();
+
+ // If we're not linking from the source, then keep the definition that we
+ // have.
+ if (!LinkFromSrc) {
+ // Special case for const propagation.
+ if (GlobalVariable *DGVar = dyn_cast<GlobalVariable>(DGV))
+ if (DGVar->isDeclaration() && SGV->isConstant() &&
+ !DGVar->isConstant())
+ DGVar->setConstant(true);
+
+ // Set calculated linkage, visibility and unnamed_addr.
+ DGV->setLinkage(NewLinkage);
+ DGV->setVisibility(*NewVisibility);
+ DGV->setUnnamedAddr(HasUnnamedAddr);
+ }
+ }
- // If we're not linking from the source, then keep the definition that we
- // have.
if (!LinkFromSrc) {
- // Special case for const propagation.
- if (GlobalVariable *DGVar = dyn_cast<GlobalVariable>(DGV))
- if (DGVar->isDeclaration() && SGV->isConstant() && !DGVar->isConstant())
- DGVar->setConstant(true);
-
- // Set calculated linkage, visibility and unnamed_addr.
- DGV->setLinkage(NewLinkage);
- DGV->setVisibility(*NewVisibility);
- DGV->setUnnamedAddr(HasUnnamedAddr);
-
// Make sure to remember this mapping.
ValueMap[SGV] = ConstantExpr::getBitCast(DGV,TypeMap.get(SGV->getType()));
-
- // Track the source global so that we don't attempt to copy it over when
+
+ // Track the source global so that we don't attempt to copy it over when
// processing global initializers.
DoNotLinkFromSource.insert(SGV);
-
+
return false;
}
}
-
+
+ // If the Comdat this variable was inside of wasn't selected, skip it.
+ if (DC && !DGV && !LinkFromSrc) {
+ DoNotLinkFromSource.insert(SGV);
+ return false;
+ }
+
// No linking to be performed or linking from the source: simply create an
// identical version of the symbol over in the dest module... the
// initializer will be filled in later by LinkGlobalInits.
GlobalVariable *NewDGV =
new GlobalVariable(*DstM, TypeMap.get(SGV->getType()->getElementType()),
- SGV->isConstant(), SGV->getLinkage(), /*init*/0,
- SGV->getName(), /*insertbefore*/0,
+ SGV->isConstant(), SGV->getLinkage(), /*init*/nullptr,
+ SGV->getName(), /*insertbefore*/nullptr,
SGV->getThreadLocalMode(),
SGV->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
@@ -807,11 +959,14 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
NewDGV->setVisibility(*NewVisibility);
NewDGV->setUnnamedAddr(HasUnnamedAddr);
+ if (DC)
+ NewDGV->setComdat(DC);
+
if (DGV) {
DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDGV, DGV->getType()));
DGV->eraseFromParent();
}
-
+
// Make sure to remember this mapping.
ValueMap[SGV] = NewDGV;
return false;
@@ -824,32 +979,44 @@ bool ModuleLinker::linkFunctionProto(Function *SF) {
llvm::Optional<GlobalValue::VisibilityTypes> NewVisibility;
bool HasUnnamedAddr = SF->hasUnnamedAddr();
+ bool LinkFromSrc = false;
+ Comdat *DC = nullptr;
+ if (const Comdat *SC = SF->getComdat()) {
+ Comdat::SelectionKind SK;
+ std::tie(SK, LinkFromSrc) = ComdatsChosen[SC];
+ DC = DstM->getOrInsertComdat(SC->getName());
+ DC->setSelectionKind(SK);
+ }
+
if (DGV) {
- GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
- bool LinkFromSrc = false;
- GlobalValue::VisibilityTypes NV;
- if (getLinkageResult(DGV, SF, NewLinkage, NV, LinkFromSrc))
- return true;
- NewVisibility = NV;
- HasUnnamedAddr = HasUnnamedAddr && DGV->hasUnnamedAddr();
+ if (!DC) {
+ GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
+ GlobalValue::VisibilityTypes NV;
+ if (getLinkageResult(DGV, SF, NewLinkage, NV, LinkFromSrc))
+ return true;
+ NewVisibility = NV;
+ HasUnnamedAddr = HasUnnamedAddr && DGV->hasUnnamedAddr();
+
+ if (!LinkFromSrc) {
+ // Set calculated linkage
+ DGV->setLinkage(NewLinkage);
+ DGV->setVisibility(*NewVisibility);
+ DGV->setUnnamedAddr(HasUnnamedAddr);
+ }
+ }
if (!LinkFromSrc) {
- // Set calculated linkage
- DGV->setLinkage(NewLinkage);
- DGV->setVisibility(*NewVisibility);
- DGV->setUnnamedAddr(HasUnnamedAddr);
-
// Make sure to remember this mapping.
ValueMap[SF] = ConstantExpr::getBitCast(DGV, TypeMap.get(SF->getType()));
-
- // Track the function from the source module so we don't attempt to remap
+
+ // Track the function from the source module so we don't attempt to remap
// it.
DoNotLinkFromSource.insert(SF);
-
+
return false;
}
}
-
+
// If the function is to be lazily linked, don't create it just yet.
// The ValueMaterializerTy will deal with creating it if it's used.
if (!DGV && (SF->hasLocalLinkage() || SF->hasLinkOnceLinkage() ||
@@ -858,6 +1025,12 @@ bool ModuleLinker::linkFunctionProto(Function *SF) {
return false;
}
+ // If the Comdat this function was inside of wasn't selected, skip it.
+ if (DC && !DGV && !LinkFromSrc) {
+ DoNotLinkFromSource.insert(SF);
+ return false;
+ }
+
// If there is no linkage to be performed or we are linking from the source,
// bring SF over.
Function *NewDF = Function::Create(TypeMap.get(SF->getFunctionType()),
@@ -867,12 +1040,15 @@ bool ModuleLinker::linkFunctionProto(Function *SF) {
NewDF->setVisibility(*NewVisibility);
NewDF->setUnnamedAddr(HasUnnamedAddr);
+ if (DC)
+ NewDF->setComdat(DC);
+
if (DGV) {
// Any uses of DF need to change to NewDF, with cast.
DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDF, DGV->getType()));
DGV->eraseFromParent();
}
-
+
ValueMap[SF] = NewDF;
return false;
}
@@ -882,45 +1058,68 @@ bool ModuleLinker::linkFunctionProto(Function *SF) {
bool ModuleLinker::linkAliasProto(GlobalAlias *SGA) {
GlobalValue *DGV = getLinkedToGlobal(SGA);
llvm::Optional<GlobalValue::VisibilityTypes> NewVisibility;
+ bool HasUnnamedAddr = SGA->hasUnnamedAddr();
+
+ bool LinkFromSrc = false;
+ Comdat *DC = nullptr;
+ if (const Comdat *SC = SGA->getComdat()) {
+ Comdat::SelectionKind SK;
+ std::tie(SK, LinkFromSrc) = ComdatsChosen[SC];
+ DC = DstM->getOrInsertComdat(SC->getName());
+ DC->setSelectionKind(SK);
+ }
if (DGV) {
- GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
- GlobalValue::VisibilityTypes NV;
- bool LinkFromSrc = false;
- if (getLinkageResult(DGV, SGA, NewLinkage, NV, LinkFromSrc))
- return true;
- NewVisibility = NV;
+ if (!DC) {
+ GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
+ GlobalValue::VisibilityTypes NV;
+ if (getLinkageResult(DGV, SGA, NewLinkage, NV, LinkFromSrc))
+ return true;
+ NewVisibility = NV;
+ HasUnnamedAddr = HasUnnamedAddr && DGV->hasUnnamedAddr();
+
+ if (!LinkFromSrc) {
+ // Set calculated linkage.
+ DGV->setLinkage(NewLinkage);
+ DGV->setVisibility(*NewVisibility);
+ DGV->setUnnamedAddr(HasUnnamedAddr);
+ }
+ }
if (!LinkFromSrc) {
- // Set calculated linkage.
- DGV->setLinkage(NewLinkage);
- DGV->setVisibility(*NewVisibility);
-
// Make sure to remember this mapping.
ValueMap[SGA] = ConstantExpr::getBitCast(DGV,TypeMap.get(SGA->getType()));
-
+
// Track the alias from the source module so we don't attempt to remap it.
DoNotLinkFromSource.insert(SGA);
-
+
return false;
}
}
-
+
+ // If the Comdat this alias was inside of wasn't selected, skip it.
+ if (DC && !DGV && !LinkFromSrc) {
+ DoNotLinkFromSource.insert(SGA);
+ return false;
+ }
+
// If there is no linkage to be performed or we're linking from the source,
// bring over SGA.
- GlobalAlias *NewDA = new GlobalAlias(TypeMap.get(SGA->getType()),
- SGA->getLinkage(), SGA->getName(),
- /*aliasee*/0, DstM);
+ auto *PTy = cast<PointerType>(TypeMap.get(SGA->getType()));
+ auto *NewDA =
+ GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ SGA->getLinkage(), SGA->getName(), DstM);
copyGVAttributes(NewDA, SGA);
if (NewVisibility)
NewDA->setVisibility(*NewVisibility);
+ NewDA->setUnnamedAddr(HasUnnamedAddr);
if (DGV) {
// Any uses of DGV need to change to NewDA, with cast.
DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDA, DGV->getType()));
DGV->eraseFromParent();
}
-
+
ValueMap[SGA] = NewDA;
return false;
}
@@ -931,15 +1130,15 @@ static void getArrayElements(Constant *C, SmallVectorImpl<Constant*> &Dest) {
for (unsigned i = 0; i != NumElements; ++i)
Dest.push_back(C->getAggregateElement(i));
}
-
+
void ModuleLinker::linkAppendingVarInit(const AppendingVarInfo &AVI) {
// Merge the initializer.
SmallVector<Constant*, 16> Elements;
getArrayElements(AVI.DstInit, Elements);
-
+
Constant *SrcInit = MapValue(AVI.SrcInit, ValueMap, RF_None, &TypeMap, &ValMaterializer);
getArrayElements(SrcInit, Elements);
-
+
ArrayType *NewType = cast<ArrayType>(AVI.NewGV->getType()->getElementType());
AVI.NewGV->setInitializer(ConstantArray::get(NewType, Elements));
}
@@ -950,10 +1149,10 @@ void ModuleLinker::linkGlobalInits() {
// Loop over all of the globals in the src module, mapping them over as we go
for (Module::const_global_iterator I = SrcM->global_begin(),
E = SrcM->global_end(); I != E; ++I) {
-
+
// Only process initialized GV's or ones not already in dest.
- if (!I->hasInitializer() || DoNotLinkFromSource.count(I)) continue;
-
+ if (!I->hasInitializer() || DoNotLinkFromSource.count(I)) continue;
+
// Grab destination global variable.
GlobalVariable *DGV = cast<GlobalVariable>(ValueMap[I]);
// Figure out what the initializer looks like in the dest module.
@@ -981,7 +1180,7 @@ void ModuleLinker::linkFunctionBody(Function *Dst, Function *Src) {
if (Mode == Linker::DestroySource) {
// Splice the body of the source function into the dest function.
Dst->getBasicBlockList().splice(Dst->end(), Src->getBasicBlockList());
-
+
// At this point, all of the instructions and values of the function are now
// copied over. The only problem is that they are still referencing values in
// the Source function as operands. Loop through all of the operands of the
@@ -990,19 +1189,19 @@ void ModuleLinker::linkFunctionBody(Function *Dst, Function *Src) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
RemapInstruction(I, ValueMap, RF_IgnoreMissingEntries,
&TypeMap, &ValMaterializer);
-
+
} else {
// Clone the body of the function into the dest function.
SmallVector<ReturnInst*, 8> Returns; // Ignore returns.
- CloneFunctionInto(Dst, Src, ValueMap, false, Returns, "", NULL,
+ CloneFunctionInto(Dst, Src, ValueMap, false, Returns, "", nullptr,
&TypeMap, &ValMaterializer);
}
-
+
// There is no need to map the arguments anymore.
for (Function::arg_iterator I = Src->arg_begin(), E = Src->arg_end();
I != E; ++I)
ValueMap.erase(I);
-
+
}
/// linkAliasBodies - Insert all of the aliases in Src into the Dest module.
@@ -1013,8 +1212,9 @@ void ModuleLinker::linkAliasBodies() {
continue;
if (Constant *Aliasee = I->getAliasee()) {
GlobalAlias *DA = cast<GlobalAlias>(ValueMap[I]);
- DA->setAliasee(MapValue(Aliasee, ValueMap, RF_None,
- &TypeMap, &ValMaterializer));
+ Constant *Val =
+ MapValue(Aliasee, ValueMap, RF_None, &TypeMap, &ValMaterializer);
+ DA->setAliasee(Val);
}
}
}
@@ -1124,7 +1324,7 @@ bool ModuleLinker::linkModuleFlagsMetadata() {
// Perform the merge for standard behavior types.
switch (SrcBehaviorValue) {
case Module::Require:
- case Module::Override: assert(0 && "not possible"); break;
+ case Module::Override: llvm_unreachable("not possible");
case Module::Error: {
// Emit an error if the values differ.
if (SrcOp->getOperand(2) != DstOp->getOperand(2)) {
@@ -1136,8 +1336,10 @@ bool ModuleLinker::linkModuleFlagsMetadata() {
case Module::Warning: {
// Emit a warning if the values differ.
if (SrcOp->getOperand(2) != DstOp->getOperand(2)) {
- errs() << "WARNING: linking module flags '" << ID->getString()
- << "': IDs have conflicting values";
+ if (!SuppressWarnings) {
+ errs() << "WARNING: linking module flags '" << ID->getString()
+ << "': IDs have conflicting values";
+ }
}
continue;
}
@@ -1188,30 +1390,39 @@ bool ModuleLinker::linkModuleFlagsMetadata() {
return HasErr;
}
-
+
bool ModuleLinker::run() {
assert(DstM && "Null destination module");
assert(SrcM && "Null source module");
// Inherit the target data from the source module if the destination module
// doesn't have one already.
- if (DstM->getDataLayout().empty() && !SrcM->getDataLayout().empty())
+ if (!DstM->getDataLayout() && SrcM->getDataLayout())
DstM->setDataLayout(SrcM->getDataLayout());
// Copy the target triple from the source to dest if the dest's is empty.
if (DstM->getTargetTriple().empty() && !SrcM->getTargetTriple().empty())
DstM->setTargetTriple(SrcM->getTargetTriple());
- if (!SrcM->getDataLayout().empty() && !DstM->getDataLayout().empty() &&
- SrcM->getDataLayout() != DstM->getDataLayout())
- errs() << "WARNING: Linking two modules of different data layouts!\n";
+ if (SrcM->getDataLayout() && DstM->getDataLayout() &&
+ *SrcM->getDataLayout() != *DstM->getDataLayout()) {
+ if (!SuppressWarnings) {
+ errs() << "WARNING: Linking two modules of different data layouts: '"
+ << SrcM->getModuleIdentifier() << "' is '"
+ << SrcM->getDataLayoutStr() << "' whereas '"
+ << DstM->getModuleIdentifier() << "' is '"
+ << DstM->getDataLayoutStr() << "'\n";
+ }
+ }
if (!SrcM->getTargetTriple().empty() &&
DstM->getTargetTriple() != SrcM->getTargetTriple()) {
- errs() << "WARNING: Linking two modules of different target triples: ";
- if (!SrcM->getModuleIdentifier().empty())
- errs() << SrcM->getModuleIdentifier() << ": ";
- errs() << "'" << SrcM->getTargetTriple() << "' and '"
- << DstM->getTargetTriple() << "'\n";
+ if (!SuppressWarnings) {
+ errs() << "WARNING: Linking two modules of different target triples: "
+ << SrcM->getModuleIdentifier() << "' is '"
+ << SrcM->getTargetTriple() << "' whereas '"
+ << DstM->getModuleIdentifier() << "' is '"
+ << DstM->getTargetTriple() << "'\n";
+ }
}
// Append the module inline asm string.
@@ -1226,6 +1437,18 @@ bool ModuleLinker::run() {
// Loop over all of the linked values to compute type mappings.
computeTypeMapping();
+ ComdatsChosen.clear();
+ for (const StringMapEntry<llvm::Comdat> &SMEC : SrcM->getComdatSymbolTable()) {
+ const Comdat &C = SMEC.getValue();
+ if (ComdatsChosen.count(&C))
+ continue;
+ Comdat::SelectionKind SK;
+ bool LinkFromSrc;
+ if (getComdatResult(&C, SK, LinkFromSrc))
+ return true;
+ ComdatsChosen[&C] = std::make_pair(SK, LinkFromSrc);
+ }
+
// Insert all of the globals in src into the DstM module... without linking
// initializers (which could refer to functions not yet mapped over).
for (Module::global_iterator I = SrcM->global_begin(),
@@ -1250,17 +1473,13 @@ bool ModuleLinker::run() {
for (unsigned i = 0, e = AppendingVars.size(); i != e; ++i)
linkAppendingVarInit(AppendingVars[i]);
-
- // Update the initializers in the DstM module now that all globals that may
- // be referenced are in DstM.
- linkGlobalInits();
// Link in the function bodies that are defined in the source module into
// DstM.
for (Module::iterator SF = SrcM->begin(), E = SrcM->end(); SF != E; ++SF) {
// Skip if not linking from source.
if (DoNotLinkFromSource.count(SF)) continue;
-
+
Function *DF = cast<Function>(ValueMap[SF]);
if (SF->hasPrefixData()) {
// Link in the prefix data.
@@ -1275,7 +1494,7 @@ bool ModuleLinker::run() {
if (SF->Materialize(&ErrorMsg))
return true;
}
-
+
linkFunctionBody(DF, SF);
SF->Dematerialize();
}
@@ -1292,13 +1511,17 @@ bool ModuleLinker::run() {
if (linkModuleFlagsMetadata())
return true;
+ // Update the initializers in the DstM module now that all globals that may
+ // be referenced are in DstM.
+ linkGlobalInits();
+
// Process vector of lazily linked in functions.
bool LinkedInAnyFunctions;
do {
LinkedInAnyFunctions = false;
-
+
for(std::vector<Function*>::iterator I = LazilyLinkFunctions.begin(),
- E = LazilyLinkFunctions.end(); I != E; ++I) {
+ E = LazilyLinkFunctions.end(); I != E; ++I) {
Function *SF = *I;
if (!SF)
continue;
@@ -1320,7 +1543,7 @@ bool ModuleLinker::run() {
if (SF->Materialize(&ErrorMsg))
return true;
}
-
+
// Erase from vector *before* the function body is linked - linkFunctionBody could
// invalidate I.
LazilyLinkFunctions.erase(I);
@@ -1335,15 +1558,16 @@ bool ModuleLinker::run() {
break;
}
} while (LinkedInAnyFunctions);
-
+
// Now that all of the types from the source are used, resolve any structs
// copied over to the dest that didn't exist there.
TypeMap.linkDefinedTypeBodies();
-
+
return false;
}
-Linker::Linker(Module *M) : Composite(M) {
+Linker::Linker(Module *M, bool SuppressWarnings)
+ : Composite(M), SuppressWarnings(SuppressWarnings) {
TypeFinder StructTypes;
StructTypes.run(*M, true);
IdentifiedStructTypes.insert(StructTypes.begin(), StructTypes.end());
@@ -1354,11 +1578,12 @@ Linker::~Linker() {
void Linker::deleteModule() {
delete Composite;
- Composite = NULL;
+ Composite = nullptr;
}
bool Linker::linkInModule(Module *Src, unsigned Mode, std::string *ErrorMsg) {
- ModuleLinker TheLinker(Composite, IdentifiedStructTypes, Src, Mode);
+ ModuleLinker TheLinker(Composite, IdentifiedStructTypes, Src, Mode,
+ SuppressWarnings);
if (TheLinker.run()) {
if (ErrorMsg)
*ErrorMsg = TheLinker.ErrorMsg;
@@ -1376,7 +1601,7 @@ bool Linker::linkInModule(Module *Src, unsigned Mode, std::string *ErrorMsg) {
/// error occurs, true is returned and ErrorMsg (if not null) is set to indicate
/// the problem. Upon failure, the Dest module could be in a modified state,
/// and shouldn't be relied on to be consistent.
-bool Linker::LinkModules(Module *Dest, Module *Src, unsigned Mode,
+bool Linker::LinkModules(Module *Dest, Module *Src, unsigned Mode,
std::string *ErrorMsg) {
Linker L(Dest);
return L.linkInModule(Src, Mode, ErrorMsg);
@@ -1390,7 +1615,7 @@ LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src,
LLVMLinkerMode Mode, char **OutMessages) {
std::string Messages;
LLVMBool Result = Linker::LinkModules(unwrap(Dest), unwrap(Src),
- Mode, OutMessages? &Messages : 0);
+ Mode, OutMessages? &Messages : nullptr);
if (OutMessages)
*OutMessages = strdup(Messages.c_str());
return Result;
diff --git a/contrib/llvm/lib/MC/ConstantPools.cpp b/contrib/llvm/lib/MC/ConstantPools.cpp
new file mode 100644
index 0000000..c4cea60
--- /dev/null
+++ b/contrib/llvm/lib/MC/ConstantPools.cpp
@@ -0,0 +1,98 @@
+//===- ConstantPools.cpp - ConstantPool class --*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ConstantPool and AssemblerConstantPools classes.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/ADT/MapVector.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/ConstantPools.h"
+
+using namespace llvm;
+//
+// ConstantPool implementation
+//
+// Emit the contents of the constant pool using the provided streamer.
+void ConstantPool::emitEntries(MCStreamer &Streamer) {
+ if (Entries.empty())
+ return;
+ Streamer.EmitDataRegion(MCDR_DataRegion);
+ for (EntryVecTy::const_iterator I = Entries.begin(), E = Entries.end();
+ I != E; ++I) {
+ Streamer.EmitCodeAlignment(I->Size); // align naturally
+ Streamer.EmitLabel(I->Label);
+ Streamer.EmitValue(I->Value, I->Size);
+ }
+ Streamer.EmitDataRegion(MCDR_DataRegionEnd);
+ Entries.clear();
+}
+
+const MCExpr *ConstantPool::addEntry(const MCExpr *Value, MCContext &Context,
+ unsigned Size) {
+ MCSymbol *CPEntryLabel = Context.CreateTempSymbol();
+
+ Entries.push_back(ConstantPoolEntry(CPEntryLabel, Value, Size));
+ return MCSymbolRefExpr::Create(CPEntryLabel, Context);
+}
+
+bool ConstantPool::empty() { return Entries.empty(); }
+
+//
+// AssemblerConstantPools implementation
+//
+ConstantPool *
+AssemblerConstantPools::getConstantPool(const MCSection *Section) {
+ ConstantPoolMapTy::iterator CP = ConstantPools.find(Section);
+ if (CP == ConstantPools.end())
+ return nullptr;
+
+ return &CP->second;
+}
+
+ConstantPool &
+AssemblerConstantPools::getOrCreateConstantPool(const MCSection *Section) {
+ return ConstantPools[Section];
+}
+
+static void emitConstantPool(MCStreamer &Streamer, const MCSection *Section,
+ ConstantPool &CP) {
+ if (!CP.empty()) {
+ Streamer.SwitchSection(Section);
+ CP.emitEntries(Streamer);
+ }
+}
+
+void AssemblerConstantPools::emitAll(MCStreamer &Streamer) {
+ // Dump contents of assembler constant pools.
+ for (ConstantPoolMapTy::iterator CPI = ConstantPools.begin(),
+ CPE = ConstantPools.end();
+ CPI != CPE; ++CPI) {
+ const MCSection *Section = CPI->first;
+ ConstantPool &CP = CPI->second;
+
+ emitConstantPool(Streamer, Section, CP);
+ }
+}
+
+void AssemblerConstantPools::emitForCurrentSection(MCStreamer &Streamer) {
+ const MCSection *Section = Streamer.getCurrentSection().first;
+ if (ConstantPool *CP = getConstantPool(Section)) {
+ emitConstantPool(Streamer, Section, *CP);
+ }
+}
+
+const MCExpr *AssemblerConstantPools::addEntry(MCStreamer &Streamer,
+ const MCExpr *Expr,
+ unsigned Size) {
+ const MCSection *Section = Streamer.getCurrentSection().first;
+ return getOrCreateConstantPool(Section).addEntry(Expr, Streamer.getContext(),
+ Size);
+}
diff --git a/contrib/llvm/lib/MC/ELFObjectWriter.cpp b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
index 9899bb2..5779b27 100644
--- a/contrib/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
@@ -28,7 +28,10 @@
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Support/Compression.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include <vector>
@@ -38,16 +41,76 @@ using namespace llvm;
#define DEBUG_TYPE "reloc-info"
namespace {
+class FragmentWriter {
+ bool IsLittleEndian;
+
+public:
+ FragmentWriter(bool IsLittleEndian);
+ template <typename T> void write(MCDataFragment &F, T Val);
+};
+
+typedef DenseMap<const MCSectionELF *, uint32_t> SectionIndexMapTy;
+
+class SymbolTableWriter {
+ MCAssembler &Asm;
+ FragmentWriter &FWriter;
+ bool Is64Bit;
+ SectionIndexMapTy &SectionIndexMap;
+
+ // The symbol .symtab fragment we are writting to.
+ MCDataFragment *SymtabF;
+
+ // .symtab_shndx fragment we are writting to.
+ MCDataFragment *ShndxF;
+
+ // The numbel of symbols written so far.
+ unsigned NumWritten;
+
+ void createSymtabShndx();
+
+ template <typename T> void write(MCDataFragment &F, T Value);
+
+public:
+ SymbolTableWriter(MCAssembler &Asm, FragmentWriter &FWriter, bool Is64Bit,
+ SectionIndexMapTy &SectionIndexMap,
+ MCDataFragment *SymtabF);
+
+ void writeSymbol(uint32_t name, uint8_t info, uint64_t value, uint64_t size,
+ uint8_t other, uint32_t shndx, bool Reserved);
+};
+
+struct ELFRelocationEntry {
+ uint64_t Offset; // Where is the relocation.
+ bool UseSymbol; // Relocate with a symbol, not the section.
+ union {
+ const MCSymbol *Symbol; // The symbol to relocate with.
+ const MCSectionData *Section; // The section to relocate with.
+ };
+ unsigned Type; // The type of the relocation.
+ uint64_t Addend; // The addend to use.
+
+ ELFRelocationEntry(uint64_t Offset, const MCSymbol *Symbol, unsigned Type,
+ uint64_t Addend)
+ : Offset(Offset), UseSymbol(true), Symbol(Symbol), Type(Type),
+ Addend(Addend) {}
+
+ ELFRelocationEntry(uint64_t Offset, const MCSectionData *Section,
+ unsigned Type, uint64_t Addend)
+ : Offset(Offset), UseSymbol(false), Section(Section), Type(Type),
+ Addend(Addend) {}
+};
+
class ELFObjectWriter : public MCObjectWriter {
+ FragmentWriter FWriter;
+
protected:
static bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);
static bool RelocNeedsGOT(MCSymbolRefExpr::VariantKind Variant);
static uint64_t SymbolValue(MCSymbolData &Data, const MCAsmLayout &Layout);
- static bool isInSymtab(const MCAssembler &Asm, const MCSymbolData &Data,
+ static bool isInSymtab(const MCAsmLayout &Layout, const MCSymbolData &Data,
bool Used, bool Renamed);
- static bool isLocal(const MCSymbolData &Data, bool isSignature,
- bool isUsedInReloc);
+ static bool isLocal(const MCSymbolData &Data, bool isUsedInReloc);
static bool IsELFMetaDataSection(const MCSectionData &SD);
static uint64_t DataSectionSize(const MCSectionData &SD);
static uint64_t GetSectionFileSize(const MCAsmLayout &Layout,
@@ -70,30 +133,30 @@ class ELFObjectWriter : public MCObjectWriter {
MCSymbolData *SymbolData;
uint64_t StringIndex;
uint32_t SectionIndex;
+ StringRef Name;
// Support lexicographic sorting.
bool operator<(const ELFSymbolData &RHS) const {
- return SymbolData->getSymbol().getName() <
- RHS.SymbolData->getSymbol().getName();
+ return Name < RHS.Name;
}
};
/// The target specific ELF writer instance.
- llvm::OwningPtr<MCELFObjectTargetWriter> TargetObjectWriter;
+ std::unique_ptr<MCELFObjectTargetWriter> TargetObjectWriter;
SmallPtrSet<const MCSymbol *, 16> UsedInReloc;
SmallPtrSet<const MCSymbol *, 16> WeakrefUsedInReloc;
DenseMap<const MCSymbol *, const MCSymbol *> Renames;
- llvm::DenseMap<const MCSectionData*,
- std::vector<ELFRelocationEntry> > Relocations;
- DenseMap<const MCSection*, uint64_t> SectionStringTableIndex;
+ llvm::DenseMap<const MCSectionData *, std::vector<ELFRelocationEntry>>
+ Relocations;
+ StringTableBuilder ShStrTabBuilder;
/// @}
/// @name Symbol Table Data
/// @{
- SmallString<256> StringTable;
+ StringTableBuilder StrTabBuilder;
std::vector<uint64_t> FileSymbolData;
std::vector<ELFSymbolData> LocalSymbolData;
std::vector<ELFSymbolData> ExternalSymbolData;
@@ -103,8 +166,6 @@ class ELFObjectWriter : public MCObjectWriter {
bool NeedsGOT;
- bool NeedsSymtabShndx;
-
// This holds the symbol table index of the last local symbol.
unsigned LastLocalSymbolIndex;
// This holds the .strtab section index.
@@ -115,45 +176,21 @@ class ELFObjectWriter : public MCObjectWriter {
unsigned ShstrtabIndex;
- const MCSymbol *SymbolToReloc(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
-
// TargetObjectWriter wrappers.
- const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- return TargetObjectWriter->ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel);
- }
- const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- return TargetObjectWriter->undefinedExplicitRelSym(Target, Fixup,
- IsPCRel);
- }
-
bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
bool hasRelocationAddend() const {
return TargetObjectWriter->hasRelocationAddend();
}
unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const {
- return TargetObjectWriter->GetRelocType(Target, Fixup, IsPCRel,
- IsRelocWithSymbol, Addend);
+ bool IsPCRel) const {
+ return TargetObjectWriter->GetRelocType(Target, Fixup, IsPCRel);
}
public:
- ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS, bool IsLittleEndian)
- : MCObjectWriter(_OS, IsLittleEndian),
- TargetObjectWriter(MOTW),
- NeedsGOT(false), NeedsSymtabShndx(false) {
- }
+ ELFObjectWriter(MCELFObjectTargetWriter *MOTW, raw_ostream &_OS,
+ bool IsLittleEndian)
+ : MCObjectWriter(_OS, IsLittleEndian), FWriter(IsLittleEndian),
+ TargetObjectWriter(MOTW), NeedsGOT(false) {}
virtual ~ELFObjectWriter();
@@ -164,96 +201,30 @@ class ELFObjectWriter : public MCObjectWriter {
Write32(W);
}
- void StringLE16(char *buf, uint16_t Value) {
- buf[0] = char(Value >> 0);
- buf[1] = char(Value >> 8);
- }
-
- void StringLE32(char *buf, uint32_t Value) {
- StringLE16(buf, uint16_t(Value >> 0));
- StringLE16(buf + 2, uint16_t(Value >> 16));
- }
-
- void StringLE64(char *buf, uint64_t Value) {
- StringLE32(buf, uint32_t(Value >> 0));
- StringLE32(buf + 4, uint32_t(Value >> 32));
- }
-
- void StringBE16(char *buf ,uint16_t Value) {
- buf[0] = char(Value >> 8);
- buf[1] = char(Value >> 0);
- }
-
- void StringBE32(char *buf, uint32_t Value) {
- StringBE16(buf, uint16_t(Value >> 16));
- StringBE16(buf + 2, uint16_t(Value >> 0));
- }
-
- void StringBE64(char *buf, uint64_t Value) {
- StringBE32(buf, uint32_t(Value >> 32));
- StringBE32(buf + 4, uint32_t(Value >> 0));
- }
-
- void String8(MCDataFragment &F, uint8_t Value) {
- char buf[1];
- buf[0] = Value;
- F.getContents().append(&buf[0], &buf[1]);
- }
-
- void String16(MCDataFragment &F, uint16_t Value) {
- char buf[2];
- if (isLittleEndian())
- StringLE16(buf, Value);
- else
- StringBE16(buf, Value);
- F.getContents().append(&buf[0], &buf[2]);
- }
-
- void String32(MCDataFragment &F, uint32_t Value) {
- char buf[4];
- if (isLittleEndian())
- StringLE32(buf, Value);
- else
- StringBE32(buf, Value);
- F.getContents().append(&buf[0], &buf[4]);
- }
-
- void String64(MCDataFragment &F, uint64_t Value) {
- char buf[8];
- if (isLittleEndian())
- StringLE64(buf, Value);
- else
- StringBE64(buf, Value);
- F.getContents().append(&buf[0], &buf[8]);
+ template <typename T> void write(MCDataFragment &F, T Value) {
+ FWriter.write(F, Value);
}
void WriteHeader(const MCAssembler &Asm,
uint64_t SectionDataSize,
unsigned NumberOfSections);
- void WriteSymbolEntry(MCDataFragment *SymtabF,
- MCDataFragment *ShndxF,
- uint64_t name, uint8_t info,
- uint64_t value, uint64_t size,
- uint8_t other, uint32_t shndx,
- bool Reserved);
-
- void WriteSymbol(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
- ELFSymbolData &MSD,
+ void WriteSymbol(SymbolTableWriter &Writer, ELFSymbolData &MSD,
const MCAsmLayout &Layout);
- typedef DenseMap<const MCSectionELF*, uint32_t> SectionIndexMapTy;
- void WriteSymbolTable(MCDataFragment *SymtabF,
- MCDataFragment *ShndxF,
- const MCAssembler &Asm,
+ void WriteSymbolTable(MCDataFragment *SymtabF, MCAssembler &Asm,
const MCAsmLayout &Layout,
- const SectionIndexMapTy &SectionIndexMap);
+ SectionIndexMapTy &SectionIndexMap);
+
+ bool shouldRelocateWithSymbol(const MCAssembler &Asm,
+ const MCSymbolRefExpr *RefA,
+ const MCSymbolData *SD, uint64_t C,
+ unsigned Type) const;
- virtual void RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue);
+ void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, bool &IsPCRel,
+ uint64_t &FixedValue) override;
uint64_t getSymbolIndexInSymbolTable(const MCAssembler &Asm,
const MCSymbol *S);
@@ -267,13 +238,13 @@ class ELFObjectWriter : public MCObjectWriter {
// Map from a section to its offset
typedef DenseMap<const MCSectionELF*, uint64_t> SectionOffsetMapTy;
- /// ComputeSymbolTable - Compute the symbol table data
+ /// Compute the symbol table data
///
/// \param Asm - The assembler.
/// \param SectionIndexMap - Maps a section to its index.
/// \param RevGroupMap - Maps a signature symbol to the group section.
/// \param NumRegularSections - Number of non-relocation sections.
- void ComputeSymbolTable(MCAssembler &Asm,
+ void computeSymbolTable(MCAssembler &Asm, const MCAsmLayout &Layout,
const SectionIndexMapTy &SectionIndexMap,
RevGroupMapTy RevGroupMap,
unsigned NumRegularSections);
@@ -285,6 +256,8 @@ class ELFObjectWriter : public MCObjectWriter {
void CreateRelocationSections(MCAssembler &Asm, MCAsmLayout &Layout,
RelMapTy &RelMap);
+ void CompressDebugSections(MCAssembler &Asm, MCAsmLayout &Layout);
+
void WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout,
const RelMapTy &RelMap);
@@ -300,8 +273,8 @@ class ELFObjectWriter : public MCObjectWriter {
SectionIndexMapTy &SectionIndexMap,
const RelMapTy &RelMap);
- virtual void ExecutePostLayoutBinding(MCAssembler &Asm,
- const MCAsmLayout &Layout);
+ void ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) override;
void WriteSectionHeader(MCAssembler &Asm, const GroupMapTy &GroupMap,
const MCAsmLayout &Layout,
@@ -320,14 +293,14 @@ class ELFObjectWriter : public MCObjectWriter {
MCDataFragment *F,
const MCSectionData *SD);
- virtual bool
+ bool
IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
const MCSymbolData &DataA,
const MCFragment &FB,
bool InSet,
- bool IsPCRel) const;
+ bool IsPCRel) const override;
- virtual void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
+ void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
void WriteSection(MCAssembler &Asm,
const SectionIndexMapTy &SectionIndexMap,
uint32_t GroupSymbolIndex,
@@ -336,6 +309,88 @@ class ELFObjectWriter : public MCObjectWriter {
};
}
+FragmentWriter::FragmentWriter(bool IsLittleEndian)
+ : IsLittleEndian(IsLittleEndian) {}
+
+template <typename T> void FragmentWriter::write(MCDataFragment &F, T Val) {
+ if (IsLittleEndian)
+ Val = support::endian::byte_swap<T, support::little>(Val);
+ else
+ Val = support::endian::byte_swap<T, support::big>(Val);
+ const char *Start = (const char *)&Val;
+ F.getContents().append(Start, Start + sizeof(T));
+}
+
+void SymbolTableWriter::createSymtabShndx() {
+ if (ShndxF)
+ return;
+
+ MCContext &Ctx = Asm.getContext();
+ const MCSectionELF *SymtabShndxSection =
+ Ctx.getELFSection(".symtab_shndxr", ELF::SHT_SYMTAB_SHNDX, 0,
+ SectionKind::getReadOnly(), 4, "");
+ MCSectionData *SymtabShndxSD =
+ &Asm.getOrCreateSectionData(*SymtabShndxSection);
+ SymtabShndxSD->setAlignment(4);
+ ShndxF = new MCDataFragment(SymtabShndxSD);
+ unsigned Index = SectionIndexMap.size() + 1;
+ SectionIndexMap[SymtabShndxSection] = Index;
+
+ for (unsigned I = 0; I < NumWritten; ++I)
+ write(*ShndxF, uint32_t(0));
+}
+
+template <typename T>
+void SymbolTableWriter::write(MCDataFragment &F, T Value) {
+ FWriter.write(F, Value);
+}
+
+SymbolTableWriter::SymbolTableWriter(MCAssembler &Asm, FragmentWriter &FWriter,
+ bool Is64Bit,
+ SectionIndexMapTy &SectionIndexMap,
+ MCDataFragment *SymtabF)
+ : Asm(Asm), FWriter(FWriter), Is64Bit(Is64Bit),
+ SectionIndexMap(SectionIndexMap), SymtabF(SymtabF), ShndxF(nullptr),
+ NumWritten(0) {}
+
+void SymbolTableWriter::writeSymbol(uint32_t name, uint8_t info, uint64_t value,
+ uint64_t size, uint8_t other,
+ uint32_t shndx, bool Reserved) {
+ bool LargeIndex = shndx >= ELF::SHN_LORESERVE && !Reserved;
+
+ if (LargeIndex)
+ createSymtabShndx();
+
+ if (ShndxF) {
+ if (LargeIndex)
+ write(*ShndxF, shndx);
+ else
+ write(*ShndxF, uint32_t(0));
+ }
+
+ uint16_t Index = LargeIndex ? uint16_t(ELF::SHN_XINDEX) : shndx;
+
+ raw_svector_ostream OS(SymtabF->getContents());
+
+ if (Is64Bit) {
+ write(*SymtabF, name); // st_name
+ write(*SymtabF, info); // st_info
+ write(*SymtabF, other); // st_other
+ write(*SymtabF, Index); // st_shndx
+ write(*SymtabF, value); // st_value
+ write(*SymtabF, size); // st_size
+ } else {
+ write(*SymtabF, name); // st_name
+ write(*SymtabF, uint32_t(value)); // st_value
+ write(*SymtabF, uint32_t(size)); // st_size
+ write(*SymtabF, info); // st_info
+ write(*SymtabF, other); // st_other
+ write(*SymtabF, Index); // st_shndx
+ }
+
+ ++NumWritten;
+}
+
bool ELFObjectWriter::isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind) {
const MCFixupKindInfo &FKI =
Asm.getBackend().getFixupKindInfo((MCFixupKind) Kind);
@@ -431,67 +486,19 @@ void ELFObjectWriter::WriteHeader(const MCAssembler &Asm,
Write16(ShstrtabIndex);
}
-void ELFObjectWriter::WriteSymbolEntry(MCDataFragment *SymtabF,
- MCDataFragment *ShndxF,
- uint64_t name,
- uint8_t info, uint64_t value,
- uint64_t size, uint8_t other,
- uint32_t shndx,
- bool Reserved) {
- if (ShndxF) {
- if (shndx >= ELF::SHN_LORESERVE && !Reserved)
- String32(*ShndxF, shndx);
- else
- String32(*ShndxF, 0);
- }
-
- uint16_t Index = (shndx >= ELF::SHN_LORESERVE && !Reserved) ?
- uint16_t(ELF::SHN_XINDEX) : shndx;
-
- if (is64Bit()) {
- String32(*SymtabF, name); // st_name
- String8(*SymtabF, info); // st_info
- String8(*SymtabF, other); // st_other
- String16(*SymtabF, Index); // st_shndx
- String64(*SymtabF, value); // st_value
- String64(*SymtabF, size); // st_size
- } else {
- String32(*SymtabF, name); // st_name
- String32(*SymtabF, value); // st_value
- String32(*SymtabF, size); // st_size
- String8(*SymtabF, info); // st_info
- String8(*SymtabF, other); // st_other
- String16(*SymtabF, Index); // st_shndx
- }
-}
-
uint64_t ELFObjectWriter::SymbolValue(MCSymbolData &Data,
const MCAsmLayout &Layout) {
if (Data.isCommon() && Data.isExternal())
return Data.getCommonAlignment();
- const MCSymbol &Symbol = Data.getSymbol();
-
- if (Symbol.isAbsolute() && Symbol.isVariable()) {
- if (const MCExpr *Value = Symbol.getVariableValue()) {
- int64_t IntValue;
- if (Value->EvaluateAsAbsolute(IntValue, Layout))
- return (uint64_t)IntValue;
- }
- }
-
- if (!Symbol.isInSection())
+ uint64_t Res;
+ if (!Layout.getSymbolOffset(&Data, Res))
return 0;
+ if (Layout.getAssembler().isThumbFunc(&Data.getSymbol()))
+ Res |= 1;
- if (Data.getFragment()) {
- if (Data.getFlags() & ELF_Other_ThumbFunc)
- return Layout.getSymbolOffset(&Data)+1;
- else
- return Layout.getSymbolOffset(&Data);
- }
-
- return 0;
+ return Res;
}
void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
@@ -499,15 +506,17 @@ void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
// The presence of symbol versions causes undefined symbols and
// versions declared with @@@ to be renamed.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Alias = it->getSymbol();
- const MCSymbol &Symbol = Alias.AliasedSymbol();
- MCSymbolData &SD = Asm.getSymbolData(Symbol);
+ for (MCSymbolData &OriginalData : Asm.symbols()) {
+ const MCSymbol &Alias = OriginalData.getSymbol();
// Not an alias.
- if (&Symbol == &Alias)
+ if (!Alias.isVariable())
continue;
+ auto *Ref = dyn_cast<MCSymbolRefExpr>(Alias.getVariableValue());
+ if (!Ref)
+ continue;
+ const MCSymbol &Symbol = Ref->getSymbol();
+ MCSymbolData &SD = Asm.getSymbolData(Symbol);
StringRef AliasName = Alias.getName();
size_t Pos = AliasName.find('@');
@@ -516,8 +525,8 @@ void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
// Aliases defined with .symvar copy the binding from the symbol they alias.
// This is the first place we are able to copy this information.
- it->setExternal(SD.isExternal());
- MCELF::SetBinding(*it, MCELF::GetBinding(SD));
+ OriginalData.setExternal(SD.isExternal());
+ MCELF::SetBinding(OriginalData, MCELF::GetBinding(SD));
StringRef Rest = AliasName.substr(Pos);
if (!Symbol.isUndefined() && !Rest.startswith("@@@"))
@@ -532,35 +541,77 @@ void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
}
}
-void ELFObjectWriter::WriteSymbol(MCDataFragment *SymtabF,
- MCDataFragment *ShndxF,
- ELFSymbolData &MSD,
+static uint8_t mergeTypeForSet(uint8_t origType, uint8_t newType) {
+ uint8_t Type = newType;
+
+ // Propagation rules:
+ // IFUNC > FUNC > OBJECT > NOTYPE
+ // TLS_OBJECT > OBJECT > NOTYPE
+ //
+ // dont let the new type degrade the old type
+ switch (origType) {
+ default:
+ break;
+ case ELF::STT_GNU_IFUNC:
+ if (Type == ELF::STT_FUNC || Type == ELF::STT_OBJECT ||
+ Type == ELF::STT_NOTYPE || Type == ELF::STT_TLS)
+ Type = ELF::STT_GNU_IFUNC;
+ break;
+ case ELF::STT_FUNC:
+ if (Type == ELF::STT_OBJECT || Type == ELF::STT_NOTYPE ||
+ Type == ELF::STT_TLS)
+ Type = ELF::STT_FUNC;
+ break;
+ case ELF::STT_OBJECT:
+ if (Type == ELF::STT_NOTYPE)
+ Type = ELF::STT_OBJECT;
+ break;
+ case ELF::STT_TLS:
+ if (Type == ELF::STT_OBJECT || Type == ELF::STT_NOTYPE ||
+ Type == ELF::STT_GNU_IFUNC || Type == ELF::STT_FUNC)
+ Type = ELF::STT_TLS;
+ break;
+ }
+
+ return Type;
+}
+
+void ELFObjectWriter::WriteSymbol(SymbolTableWriter &Writer, ELFSymbolData &MSD,
const MCAsmLayout &Layout) {
MCSymbolData &OrigData = *MSD.SymbolData;
- MCSymbolData &Data =
- Layout.getAssembler().getSymbolData(OrigData.getSymbol().AliasedSymbol());
+ assert((!OrigData.getFragment() ||
+ (&OrigData.getFragment()->getParent()->getSection() ==
+ &OrigData.getSymbol().getSection())) &&
+ "The symbol's section doesn't match the fragment's symbol");
+ const MCSymbol *Base = Layout.getBaseSymbol(OrigData.getSymbol());
- bool IsReserved = Data.isCommon() || Data.getSymbol().isAbsolute() ||
- Data.getSymbol().isVariable();
+ // This has to be in sync with when computeSymbolTable uses SHN_ABS or
+ // SHN_COMMON.
+ bool IsReserved = !Base || OrigData.isCommon();
// Binding and Type share the same byte as upper and lower nibbles
uint8_t Binding = MCELF::GetBinding(OrigData);
- uint8_t Type = MCELF::GetType(Data);
+ uint8_t Type = MCELF::GetType(OrigData);
+ MCSymbolData *BaseSD = nullptr;
+ if (Base) {
+ BaseSD = &Layout.getAssembler().getSymbolData(*Base);
+ Type = mergeTypeForSet(Type, MCELF::GetType(*BaseSD));
+ }
uint8_t Info = (Binding << ELF_STB_Shift) | (Type << ELF_STT_Shift);
// Other and Visibility share the same byte with Visibility using the lower
// 2 bits
uint8_t Visibility = MCELF::GetVisibility(OrigData);
- uint8_t Other = MCELF::getOther(OrigData) <<
- (ELF_Other_Shift - ELF_STV_Shift);
+ uint8_t Other = MCELF::getOther(OrigData) << (ELF_STO_Shift - ELF_STV_Shift);
Other |= Visibility;
- uint64_t Value = SymbolValue(Data, Layout);
+ uint64_t Value = SymbolValue(OrigData, Layout);
uint64_t Size = 0;
- assert(!(Data.isCommon() && !Data.isExternal()));
+ const MCExpr *ESize = OrigData.getSize();
+ if (!ESize && Base)
+ ESize = BaseSD->getSize();
- const MCExpr *ESize = Data.getSize();
if (ESize) {
int64_t Res;
if (!ESize->EvaluateAsAbsolute(Res, Layout))
@@ -569,28 +620,27 @@ void ELFObjectWriter::WriteSymbol(MCDataFragment *SymtabF,
}
// Write out the symbol table entry
- WriteSymbolEntry(SymtabF, ShndxF, MSD.StringIndex, Info, Value,
- Size, Other, MSD.SectionIndex, IsReserved);
+ Writer.writeSymbol(MSD.StringIndex, Info, Value, Size, Other,
+ MSD.SectionIndex, IsReserved);
}
void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
- MCDataFragment *ShndxF,
- const MCAssembler &Asm,
+ MCAssembler &Asm,
const MCAsmLayout &Layout,
- const SectionIndexMapTy &SectionIndexMap) {
+ SectionIndexMapTy &SectionIndexMap) {
// The string table must be emitted first because we need the index
// into the string table for all the symbol names.
- assert(StringTable.size() && "Missing string table");
// FIXME: Make sure the start of the symbol table is aligned.
+ SymbolTableWriter Writer(Asm, FWriter, is64Bit(), SectionIndexMap, SymtabF);
+
// The first entry is the undefined symbol entry.
- WriteSymbolEntry(SymtabF, ShndxF, 0, 0, 0, 0, 0, 0, false);
+ Writer.writeSymbol(0, 0, 0, 0, 0, 0, false);
for (unsigned i = 0, e = FileSymbolData.size(); i != e; ++i) {
- WriteSymbolEntry(SymtabF, ShndxF, FileSymbolData[i],
- ELF::STT_FILE | ELF::STB_LOCAL, 0, 0,
- ELF::STV_DEFAULT, ELF::SHN_ABS, true);
+ Writer.writeSymbol(FileSymbolData[i], ELF::STT_FILE | ELF::STB_LOCAL, 0, 0,
+ ELF::STV_DEFAULT, ELF::SHN_ABS, true);
}
// Write the symbol table entries.
@@ -598,7 +648,7 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i) {
ELFSymbolData &MSD = LocalSymbolData[i];
- WriteSymbol(SymtabF, ShndxF, MSD, Layout);
+ WriteSymbol(Writer, MSD, Layout);
}
// Write out a symbol table entry for each regular section.
@@ -612,9 +662,8 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
Section.getType() == ELF::SHT_SYMTAB ||
Section.getType() == ELF::SHT_SYMTAB_SHNDX)
continue;
- WriteSymbolEntry(SymtabF, ShndxF, 0, ELF::STT_SECTION, 0, 0,
- ELF::STV_DEFAULT, SectionIndexMap.lookup(&Section),
- false);
+ Writer.writeSymbol(0, ELF::STT_SECTION, 0, 0, ELF::STV_DEFAULT,
+ SectionIndexMap.lookup(&Section), false);
LastLocalSymbolIndex++;
}
@@ -624,7 +673,7 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
assert(((Data.getFlags() & ELF_STB_Global) ||
(Data.getFlags() & ELF_STB_Weak)) &&
"External symbol requires STB_GLOBAL or STB_WEAK flag");
- WriteSymbol(SymtabF, ShndxF, MSD, Layout);
+ WriteSymbol(Writer, MSD, Layout);
if (MCELF::GetBinding(Data) == ELF::STB_LOCAL)
LastLocalSymbolIndex++;
}
@@ -632,162 +681,245 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i) {
ELFSymbolData &MSD = UndefinedSymbolData[i];
MCSymbolData &Data = *MSD.SymbolData;
- WriteSymbol(SymtabF, ShndxF, MSD, Layout);
+ WriteSymbol(Writer, MSD, Layout);
if (MCELF::GetBinding(Data) == ELF::STB_LOCAL)
LastLocalSymbolIndex++;
}
}
-const MCSymbol *ELFObjectWriter::SymbolToReloc(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- const MCSymbol &Symbol = Target.getSymA()->getSymbol();
- const MCSymbol &ASymbol = Symbol.AliasedSymbol();
- const MCSymbol *Renamed = Renames.lookup(&Symbol);
- const MCSymbolData &SD = Asm.getSymbolData(Symbol);
-
- if (ASymbol.isUndefined()) {
- if (Renamed)
- return Renamed;
- return undefinedExplicitRelSym(Target, Fixup, IsPCRel);
- }
+// It is always valid to create a relocation with a symbol. It is preferable
+// to use a relocation with a section if that is possible. Using the section
+// allows us to omit some local symbols from the symbol table.
+bool ELFObjectWriter::shouldRelocateWithSymbol(const MCAssembler &Asm,
+ const MCSymbolRefExpr *RefA,
+ const MCSymbolData *SD,
+ uint64_t C,
+ unsigned Type) const {
+ // A PCRel relocation to an absolute value has no symbol (or section). We
+ // represent that with a relocation to a null section.
+ if (!RefA)
+ return false;
- if (SD.isExternal()) {
- if (Renamed)
- return Renamed;
- return &Symbol;
- }
+ MCSymbolRefExpr::VariantKind Kind = RefA->getKind();
+ switch (Kind) {
+ default:
+ break;
+ // The .odp creation emits a relocation against the symbol ".TOC." which
+ // create a R_PPC64_TOC relocation. However the relocation symbol name
+ // in final object creation should be NULL, since the symbol does not
+ // really exist, it is just the reference to TOC base for the current
+ // object file. Since the symbol is undefined, returning false results
+ // in a relocation with a null section which is the desired result.
+ case MCSymbolRefExpr::VK_PPC_TOCBASE:
+ return false;
- const MCSectionELF &Section =
- static_cast<const MCSectionELF&>(ASymbol.getSection());
- const SectionKind secKind = Section.getKind();
+ // These VariantKind cause the relocation to refer to something other than
+ // the symbol itself, like a linker generated table. Since the address of
+ // symbol is not relevant, we cannot replace the symbol with the
+ // section and patch the difference in the addend.
+ case MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_PLT:
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ case MCSymbolRefExpr::VK_Mips_GOT:
+ case MCSymbolRefExpr::VK_PPC_GOT_LO:
+ case MCSymbolRefExpr::VK_PPC_GOT_HI:
+ case MCSymbolRefExpr::VK_PPC_GOT_HA:
+ return true;
+ }
- if (secKind.isBSS())
- return ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel);
+ // An undefined symbol is not in any section, so the relocation has to point
+ // to the symbol itself.
+ const MCSymbol &Sym = SD->getSymbol();
+ if (Sym.isUndefined())
+ return true;
- if (secKind.isThreadLocal()) {
- if (Renamed)
- return Renamed;
- return &Symbol;
+ unsigned Binding = MCELF::GetBinding(*SD);
+ switch(Binding) {
+ default:
+ llvm_unreachable("Invalid Binding");
+ case ELF::STB_LOCAL:
+ break;
+ case ELF::STB_WEAK:
+ // If the symbol is weak, it might be overridden by a symbol in another
+ // file. The relocation has to point to the symbol so that the linker
+ // can update it.
+ return true;
+ case ELF::STB_GLOBAL:
+ // Global ELF symbols can be preempted by the dynamic linker. The relocation
+ // has to point to the symbol for a reason analogous to the STB_WEAK case.
+ return true;
}
- MCSymbolRefExpr::VariantKind Kind = Target.getSymA()->getKind();
- const MCSectionELF &Sec2 =
- static_cast<const MCSectionELF&>(F.getParent()->getSection());
-
- if (&Sec2 != &Section &&
- (Kind == MCSymbolRefExpr::VK_PLT ||
- Kind == MCSymbolRefExpr::VK_GOTPCREL ||
- Kind == MCSymbolRefExpr::VK_GOTOFF)) {
- if (Renamed)
- return Renamed;
- return &Symbol;
- }
+ // If a relocation points to a mergeable section, we have to be careful.
+ // If the offset is zero, a relocation with the section will encode the
+ // same information. With a non-zero offset, the situation is different.
+ // For example, a relocation can point 42 bytes past the end of a string.
+ // If we change such a relocation to use the section, the linker would think
+ // that it pointed to another string and subtracting 42 at runtime will
+ // produce the wrong value.
+ auto &Sec = cast<MCSectionELF>(Sym.getSection());
+ unsigned Flags = Sec.getFlags();
+ if (Flags & ELF::SHF_MERGE) {
+ if (C != 0)
+ return true;
- if (Section.getFlags() & ELF::SHF_MERGE) {
- if (Target.getConstant() == 0)
- return ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel);
- if (Renamed)
- return Renamed;
- return &Symbol;
+ // It looks like gold has a bug (http://sourceware.org/PR16794) and can
+ // only handle section relocations to mergeable sections if using RELA.
+ if (!hasRelocationAddend())
+ return true;
}
- return ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel);
+ // Most TLS relocations use a got, so they need the symbol. Even those that
+ // are just an offset (@tpoff), require a symbol in some linkers (gold,
+ // but not bfd ld).
+ if (Flags & ELF::SHF_TLS)
+ return true;
+
+ // If the symbol is a thumb function the final relocation must set the lowest
+ // bit. With a symbol that is done by just having the symbol have that bit
+ // set, so we would lose the bit if we relocated with the section.
+ // FIXME: We could use the section but add the bit to the relocation value.
+ if (Asm.isThumbFunc(&Sym))
+ return true;
+ if (TargetObjectWriter->needsRelocateWithSymbol(*SD, Type))
+ return true;
+ return false;
}
+static const MCSymbol *getWeakRef(const MCSymbolRefExpr &Ref) {
+ const MCSymbol &Sym = Ref.getSymbol();
+
+ if (Ref.getKind() == MCSymbolRefExpr::VK_WEAKREF)
+ return &Sym;
+
+ if (!Sym.isVariable())
+ return nullptr;
+
+ const MCExpr *Expr = Sym.getVariableValue();
+ const auto *Inner = dyn_cast<MCSymbolRefExpr>(Expr);
+ if (!Inner)
+ return nullptr;
+
+ if (Inner->getKind() == MCSymbolRefExpr::VK_WEAKREF)
+ return &Inner->getSymbol();
+ return nullptr;
+}
void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup,
MCValue Target,
+ bool &IsPCRel,
uint64_t &FixedValue) {
- int64_t Addend = 0;
- int Index = 0;
- int64_t Value = Target.getConstant();
- const MCSymbol *RelocSymbol = NULL;
-
- bool IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
- if (!Target.isAbsolute()) {
- const MCSymbol &Symbol = Target.getSymA()->getSymbol();
- const MCSymbol &ASymbol = Symbol.AliasedSymbol();
- RelocSymbol = SymbolToReloc(Asm, Target, *Fragment, Fixup, IsPCRel);
-
- if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
- const MCSymbol &SymbolB = RefB->getSymbol();
- MCSymbolData &SDB = Asm.getSymbolData(SymbolB);
- IsPCRel = true;
-
- // Offset of the symbol in the section
- int64_t a = Layout.getSymbolOffset(&SDB);
-
- // Offset of the relocation in the section
- int64_t b = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
- Value += b - a;
- }
+ const MCSectionData *FixupSection = Fragment->getParent();
+ uint64_t C = Target.getConstant();
+ uint64_t FixupOffset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
+
+ if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
+ assert(RefB->getKind() == MCSymbolRefExpr::VK_None &&
+ "Should not have constructed this");
+
+ // Let A, B and C being the components of Target and R be the location of
+ // the fixup. If the fixup is not pcrel, we want to compute (A - B + C).
+ // If it is pcrel, we want to compute (A - B + C - R).
+
+ // In general, ELF has no relocations for -B. It can only represent (A + C)
+ // or (A + C - R). If B = R + K and the relocation is not pcrel, we can
+ // replace B to implement it: (A - R - K + C)
+ if (IsPCRel)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "No relocation available to represent this relative expression");
+
+ const MCSymbol &SymB = RefB->getSymbol();
+
+ if (SymB.isUndefined())
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ Twine("symbol '") + SymB.getName() +
+ "' can not be undefined in a subtraction expression");
+
+ assert(!SymB.isAbsolute() && "Should have been folded");
+ const MCSection &SecB = SymB.getSection();
+ if (&SecB != &FixupSection->getSection())
+ Asm.getContext().FatalError(
+ Fixup.getLoc(), "Cannot represent a difference across sections");
+
+ const MCSymbolData &SymBD = Asm.getSymbolData(SymB);
+ uint64_t SymBOffset = Layout.getSymbolOffset(&SymBD);
+ uint64_t K = SymBOffset - FixupOffset;
+ IsPCRel = true;
+ C -= K;
+ }
- if (!RelocSymbol) {
- MCSymbolData &SD = Asm.getSymbolData(ASymbol);
- MCFragment *F = SD.getFragment();
+ // We either rejected the fixup or folded B into C at this point.
+ const MCSymbolRefExpr *RefA = Target.getSymA();
+ const MCSymbol *SymA = RefA ? &RefA->getSymbol() : nullptr;
+ const MCSymbolData *SymAD = SymA ? &Asm.getSymbolData(*SymA) : nullptr;
- if (F) {
- Index = F->getParent()->getOrdinal() + 1;
- // Offset of the symbol in the section
- Value += Layout.getSymbolOffset(&SD);
- } else {
- Index = 0;
- }
- } else {
- if (Asm.getSymbolData(Symbol).getFlags() & ELF_Other_Weakref)
- WeakrefUsedInReloc.insert(RelocSymbol);
- else
- UsedInReloc.insert(RelocSymbol);
- Index = -1;
- }
- Addend = Value;
- if (hasRelocationAddend())
- Value = 0;
+ unsigned Type = GetRelocType(Target, Fixup, IsPCRel);
+ bool RelocateWithSymbol = shouldRelocateWithSymbol(Asm, RefA, SymAD, C, Type);
+ if (!RelocateWithSymbol && SymA && !SymA->isUndefined())
+ C += Layout.getSymbolOffset(SymAD);
+
+ uint64_t Addend = 0;
+ if (hasRelocationAddend()) {
+ Addend = C;
+ C = 0;
}
- FixedValue = Value;
- unsigned Type = GetRelocType(Target, Fixup, IsPCRel,
- (RelocSymbol != 0), Addend);
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ FixedValue = C;
+
+ // FIXME: What is this!?!?
+ MCSymbolRefExpr::VariantKind Modifier =
+ RefA ? RefA->getKind() : MCSymbolRefExpr::VK_None;
if (RelocNeedsGOT(Modifier))
NeedsGOT = true;
- uint64_t RelocOffset = Layout.getFragmentOffset(Fragment) +
- Fixup.getOffset();
+ if (!RelocateWithSymbol) {
+ const MCSection *SecA =
+ (SymA && !SymA->isUndefined()) ? &SymA->getSection() : nullptr;
+ const MCSectionData *SecAD = SecA ? &Asm.getSectionData(*SecA) : nullptr;
+ ELFRelocationEntry Rec(FixupOffset, SecAD, Type, Addend);
+ Relocations[FixupSection].push_back(Rec);
+ return;
+ }
- if (!hasRelocationAddend())
- Addend = 0;
+ if (SymA) {
+ if (const MCSymbol *R = Renames.lookup(SymA))
+ SymA = R;
- if (is64Bit())
- assert(isInt<64>(Addend));
- else
- assert(isInt<32>(Addend));
-
- ELFRelocationEntry ERE(RelocOffset, Index, Type, RelocSymbol, Addend, Fixup);
- Relocations[Fragment->getParent()].push_back(ERE);
+ if (const MCSymbol *WeakRef = getWeakRef(*RefA))
+ WeakrefUsedInReloc.insert(WeakRef);
+ else
+ UsedInReloc.insert(SymA);
+ }
+ ELFRelocationEntry Rec(FixupOffset, SymA, Type, Addend);
+ Relocations[FixupSection].push_back(Rec);
+ return;
}
uint64_t
ELFObjectWriter::getSymbolIndexInSymbolTable(const MCAssembler &Asm,
const MCSymbol *S) {
- MCSymbolData &SD = Asm.getSymbolData(*S);
+ const MCSymbolData &SD = Asm.getSymbolData(*S);
return SD.getIndex();
}
-bool ELFObjectWriter::isInSymtab(const MCAssembler &Asm,
- const MCSymbolData &Data,
- bool Used, bool Renamed) {
- if (Data.getFlags() & ELF_Other_Weakref)
- return false;
+bool ELFObjectWriter::isInSymtab(const MCAsmLayout &Layout,
+ const MCSymbolData &Data, bool Used,
+ bool Renamed) {
+ const MCSymbol &Symbol = Data.getSymbol();
+ if (Symbol.isVariable()) {
+ const MCExpr *Expr = Symbol.getVariableValue();
+ if (const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Expr)) {
+ if (Ref->getKind() == MCSymbolRefExpr::VK_WEAKREF)
+ return false;
+ }
+ }
if (Used)
return true;
@@ -795,42 +927,35 @@ bool ELFObjectWriter::isInSymtab(const MCAssembler &Asm,
if (Renamed)
return false;
- const MCSymbol &Symbol = Data.getSymbol();
-
if (Symbol.getName() == "_GLOBAL_OFFSET_TABLE_")
return true;
- const MCSymbol &A = Symbol.AliasedSymbol();
- if (Symbol.isVariable() && !A.isVariable() && A.isUndefined())
- return false;
+ if (Symbol.isVariable()) {
+ const MCSymbol *Base = Layout.getBaseSymbol(Symbol);
+ if (Base && Base->isUndefined())
+ return false;
+ }
bool IsGlobal = MCELF::GetBinding(Data) == ELF::STB_GLOBAL;
if (!Symbol.isVariable() && Symbol.isUndefined() && !IsGlobal)
return false;
- if (!Asm.isSymbolLinkerVisible(Symbol) && !Symbol.isUndefined())
- return false;
-
if (Symbol.isTemporary())
return false;
return true;
}
-bool ELFObjectWriter::isLocal(const MCSymbolData &Data, bool isSignature,
- bool isUsedInReloc) {
+bool ELFObjectWriter::isLocal(const MCSymbolData &Data, bool isUsedInReloc) {
if (Data.isExternal())
return false;
const MCSymbol &Symbol = Data.getSymbol();
- const MCSymbol &RefSymbol = Symbol.AliasedSymbol();
-
- if (RefSymbol.isUndefined() && !RefSymbol.isVariable()) {
- if (isSignature && !isUsedInReloc)
- return true;
+ if (Symbol.isDefined())
+ return true;
+ if (isUsedInReloc)
return false;
- }
return true;
}
@@ -863,10 +988,11 @@ void ELFObjectWriter::ComputeIndexMap(MCAssembler &Asm,
}
}
-void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- RevGroupMapTy RevGroupMap,
- unsigned NumRegularSections) {
+void
+ELFObjectWriter::computeSymbolTable(MCAssembler &Asm, const MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap,
+ RevGroupMapTy RevGroupMap,
+ unsigned NumRegularSections) {
// FIXME: Is this the correct place to do this?
// FIXME: Why is an undefined reference to _GLOBAL_OFFSET_TABLE_ needed?
if (NeedsGOT) {
@@ -877,73 +1003,49 @@ void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
MCELF::SetBinding(Data, ELF::STB_GLOBAL);
}
- // Index 0 is always the empty string.
- StringMap<uint64_t> StringIndexMap;
- StringTable += '\x00';
-
- // FIXME: We could optimize suffixes in strtab in the same way we
- // optimize them in shstrtab.
-
- for (MCAssembler::const_file_name_iterator it = Asm.file_names_begin(),
- ie = Asm.file_names_end();
- it != ie;
- ++it) {
- StringRef Name = *it;
- uint64_t &Entry = StringIndexMap[Name];
- if (!Entry) {
- Entry = StringTable.size();
- StringTable += Name;
- StringTable += '\x00';
- }
- FileSymbolData.push_back(Entry);
- }
-
// Add the data for the symbols.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Symbol = it->getSymbol();
+ for (MCSymbolData &SD : Asm.symbols()) {
+ const MCSymbol &Symbol = SD.getSymbol();
bool Used = UsedInReloc.count(&Symbol);
bool WeakrefUsed = WeakrefUsedInReloc.count(&Symbol);
bool isSignature = RevGroupMap.count(&Symbol);
- if (!isInSymtab(Asm, *it,
+ if (!isInSymtab(Layout, SD,
Used || WeakrefUsed || isSignature,
Renames.count(&Symbol)))
continue;
ELFSymbolData MSD;
- MSD.SymbolData = it;
- const MCSymbol &RefSymbol = Symbol.AliasedSymbol();
+ MSD.SymbolData = &SD;
+ const MCSymbol *BaseSymbol = Layout.getBaseSymbol(Symbol);
// Undefined symbols are global, but this is the first place we
// are able to set it.
- bool Local = isLocal(*it, isSignature, Used);
- if (!Local && MCELF::GetBinding(*it) == ELF::STB_LOCAL) {
- MCSymbolData &SD = Asm.getSymbolData(RefSymbol);
- MCELF::SetBinding(*it, ELF::STB_GLOBAL);
+ bool Local = isLocal(SD, Used);
+ if (!Local && MCELF::GetBinding(SD) == ELF::STB_LOCAL) {
+ assert(BaseSymbol);
+ MCSymbolData &BaseData = Asm.getSymbolData(*BaseSymbol);
MCELF::SetBinding(SD, ELF::STB_GLOBAL);
+ MCELF::SetBinding(BaseData, ELF::STB_GLOBAL);
}
- if (RefSymbol.isUndefined() && !Used && WeakrefUsed)
- MCELF::SetBinding(*it, ELF::STB_WEAK);
-
- if (it->isCommon()) {
+ if (!BaseSymbol) {
+ MSD.SectionIndex = ELF::SHN_ABS;
+ } else if (SD.isCommon()) {
assert(!Local);
MSD.SectionIndex = ELF::SHN_COMMON;
- } else if (Symbol.isAbsolute() || RefSymbol.isVariable()) {
- MSD.SectionIndex = ELF::SHN_ABS;
- } else if (RefSymbol.isUndefined()) {
+ } else if (BaseSymbol->isUndefined()) {
if (isSignature && !Used)
MSD.SectionIndex = SectionIndexMap.lookup(RevGroupMap[&Symbol]);
else
MSD.SectionIndex = ELF::SHN_UNDEF;
+ if (!Used && WeakrefUsed)
+ MCELF::SetBinding(SD, ELF::STB_WEAK);
} else {
const MCSectionELF &Section =
- static_cast<const MCSectionELF&>(RefSymbol.getSection());
+ static_cast<const MCSectionELF&>(BaseSymbol->getSection());
MSD.SectionIndex = SectionIndexMap.lookup(&Section);
- if (MSD.SectionIndex >= ELF::SHN_LORESERVE)
- NeedsSymtabShndx = true;
assert(MSD.SectionIndex && "Invalid section index!");
}
@@ -951,7 +1053,6 @@ void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
// @@ in defined ones.
StringRef Name = Symbol.getName();
SmallString<32> Buf;
-
size_t Pos = Name.find("@@@");
if (Pos != StringRef::npos) {
Buf += Name.substr(0, Pos);
@@ -959,14 +1060,8 @@ void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
Buf += Name.substr(Pos + Skip);
Name = Buf;
}
+ MSD.Name = StrTabBuilder.add(Name);
- uint64_t &Entry = StringIndexMap[Name];
- if (!Entry) {
- Entry = StringTable.size();
- StringTable += Name;
- StringTable += '\x00';
- }
- MSD.StringIndex = Entry;
if (MSD.SectionIndex == ELF::SHN_UNDEF)
UndefinedSymbolData.push_back(MSD);
else if (Local)
@@ -975,6 +1070,21 @@ void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
ExternalSymbolData.push_back(MSD);
}
+ for (auto i = Asm.file_names_begin(), e = Asm.file_names_end(); i != e; ++i)
+ StrTabBuilder.add(*i);
+
+ StrTabBuilder.finalize();
+
+ for (auto i = Asm.file_names_begin(), e = Asm.file_names_end(); i != e; ++i)
+ FileSymbolData.push_back(StrTabBuilder.getOffset(*i));
+
+ for (ELFSymbolData& MSD : LocalSymbolData)
+ MSD.StringIndex = StrTabBuilder.getOffset(MSD.Name);
+ for (ELFSymbolData& MSD : ExternalSymbolData)
+ MSD.StringIndex = StrTabBuilder.getOffset(MSD.Name);
+ for (ELFSymbolData& MSD : UndefinedSymbolData)
+ MSD.StringIndex = StrTabBuilder.getOffset(MSD.Name);
+
// Symbols are required to be in lexicographic order.
array_pod_sort(LocalSymbolData.begin(), LocalSymbolData.end());
array_pod_sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
@@ -992,9 +1102,6 @@ void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
ExternalSymbolData[i].SymbolData->setIndex(Index++);
for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
UndefinedSymbolData[i].SymbolData->setIndex(Index++);
-
- if (Index >= ELF::SHN_LORESERVE)
- NeedsSymtabShndx = true;
}
void ELFObjectWriter::CreateRelocationSections(MCAssembler &Asm,
@@ -1037,6 +1144,151 @@ void ELFObjectWriter::CreateRelocationSections(MCAssembler &Asm,
}
}
+static SmallVector<char, 128>
+getUncompressedData(MCAsmLayout &Layout,
+ MCSectionData::FragmentListType &Fragments) {
+ SmallVector<char, 128> UncompressedData;
+ for (const MCFragment &F : Fragments) {
+ const SmallVectorImpl<char> *Contents;
+ switch (F.getKind()) {
+ case MCFragment::FT_Data:
+ Contents = &cast<MCDataFragment>(F).getContents();
+ break;
+ case MCFragment::FT_Dwarf:
+ Contents = &cast<MCDwarfLineAddrFragment>(F).getContents();
+ break;
+ case MCFragment::FT_DwarfFrame:
+ Contents = &cast<MCDwarfCallFrameFragment>(F).getContents();
+ break;
+ default:
+ llvm_unreachable(
+ "Not expecting any other fragment types in a debug_* section");
+ }
+ UncompressedData.append(Contents->begin(), Contents->end());
+ }
+ return UncompressedData;
+}
+
+// Include the debug info compression header:
+// "ZLIB" followed by 8 bytes representing the uncompressed size of the section,
+// useful for consumers to preallocate a buffer to decompress into.
+static bool
+prependCompressionHeader(uint64_t Size,
+ SmallVectorImpl<char> &CompressedContents) {
+ static const StringRef Magic = "ZLIB";
+ if (Size <= Magic.size() + sizeof(Size) + CompressedContents.size())
+ return false;
+ if (sys::IsLittleEndianHost)
+ sys::swapByteOrder(Size);
+ CompressedContents.insert(CompressedContents.begin(),
+ Magic.size() + sizeof(Size), 0);
+ std::copy(Magic.begin(), Magic.end(), CompressedContents.begin());
+ std::copy(reinterpret_cast<char *>(&Size),
+ reinterpret_cast<char *>(&Size + 1),
+ CompressedContents.begin() + Magic.size());
+ return true;
+}
+
+// Return a single fragment containing the compressed contents of the whole
+// section. Null if the section was not compressed for any reason.
+static std::unique_ptr<MCDataFragment>
+getCompressedFragment(MCAsmLayout &Layout,
+ MCSectionData::FragmentListType &Fragments) {
+ std::unique_ptr<MCDataFragment> CompressedFragment(new MCDataFragment());
+
+ // Gather the uncompressed data from all the fragments, recording the
+ // alignment fragment, if seen, and any fixups.
+ SmallVector<char, 128> UncompressedData =
+ getUncompressedData(Layout, Fragments);
+
+ SmallVectorImpl<char> &CompressedContents = CompressedFragment->getContents();
+
+ zlib::Status Success = zlib::compress(
+ StringRef(UncompressedData.data(), UncompressedData.size()),
+ CompressedContents);
+ if (Success != zlib::StatusOK)
+ return nullptr;
+
+ if (!prependCompressionHeader(UncompressedData.size(), CompressedContents))
+ return nullptr;
+
+ return CompressedFragment;
+}
+
+typedef DenseMap<const MCSectionData *, std::vector<MCSymbolData *>>
+DefiningSymbolMap;
+
+static void UpdateSymbols(const MCAsmLayout &Layout,
+ const std::vector<MCSymbolData *> &Symbols,
+ MCFragment &NewFragment) {
+ for (MCSymbolData *Sym : Symbols) {
+ Sym->setOffset(Sym->getOffset() +
+ Layout.getFragmentOffset(Sym->getFragment()));
+ Sym->setFragment(&NewFragment);
+ }
+}
+
+static void CompressDebugSection(MCAssembler &Asm, MCAsmLayout &Layout,
+ const DefiningSymbolMap &DefiningSymbols,
+ const MCSectionELF &Section,
+ MCSectionData &SD) {
+ StringRef SectionName = Section.getSectionName();
+ MCSectionData::FragmentListType &Fragments = SD.getFragmentList();
+
+ std::unique_ptr<MCDataFragment> CompressedFragment =
+ getCompressedFragment(Layout, Fragments);
+
+ // Leave the section as-is if the fragments could not be compressed.
+ if (!CompressedFragment)
+ return;
+
+ // Update the fragment+offsets of any symbols referring to fragments in this
+ // section to refer to the new fragment.
+ auto I = DefiningSymbols.find(&SD);
+ if (I != DefiningSymbols.end())
+ UpdateSymbols(Layout, I->second, *CompressedFragment);
+
+ // Invalidate the layout for the whole section since it will have new and
+ // different fragments now.
+ Layout.invalidateFragmentsFrom(&Fragments.front());
+ Fragments.clear();
+
+ // Complete the initialization of the new fragment
+ CompressedFragment->setParent(&SD);
+ CompressedFragment->setLayoutOrder(0);
+ Fragments.push_back(CompressedFragment.release());
+
+ // Rename from .debug_* to .zdebug_*
+ Asm.getContext().renameELFSection(&Section,
+ (".z" + SectionName.drop_front(1)).str());
+}
+
+void ELFObjectWriter::CompressDebugSections(MCAssembler &Asm,
+ MCAsmLayout &Layout) {
+ if (!Asm.getContext().getAsmInfo()->compressDebugSections())
+ return;
+
+ DefiningSymbolMap DefiningSymbols;
+
+ for (MCSymbolData &SD : Asm.symbols())
+ if (MCFragment *F = SD.getFragment())
+ DefiningSymbols[F->getParent()].push_back(&SD);
+
+ for (MCSectionData &SD : Asm) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF &>(SD.getSection());
+ StringRef SectionName = Section.getSectionName();
+
+ // Compressing debug_frame requires handling alignment fragments which is
+ // more work (possibly generalizing MCAssembler.cpp:writeFragment to allow
+ // for writing to arbitrary buffers) for little benefit.
+ if (!SectionName.startswith(".debug_") || SectionName == ".debug_frame")
+ continue;
+
+ CompressDebugSection(Asm, Layout, DefiningSymbols, Section, SD);
+ }
+}
+
void ELFObjectWriter::WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout,
const RelMapTy &RelMap) {
for (MCAssembler::const_iterator it = Asm.begin(),
@@ -1074,71 +1326,75 @@ void ELFObjectWriter::WriteSecHdrEntry(uint32_t Name, uint32_t Type,
WriteWord(EntrySize); // sh_entsize
}
+// ELF doesn't require relocations to be in any order. We sort by the r_offset,
+// just to match gnu as for easier comparison. The use type is an arbitrary way
+// of making the sort deterministic.
+static int cmpRel(const ELFRelocationEntry *AP, const ELFRelocationEntry *BP) {
+ const ELFRelocationEntry &A = *AP;
+ const ELFRelocationEntry &B = *BP;
+ if (A.Offset != B.Offset)
+ return B.Offset - A.Offset;
+ if (B.Type != A.Type)
+ return A.Type - B.Type;
+ llvm_unreachable("ELFRelocs might be unstable!");
+}
+
+static void sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs) {
+ array_pod_sort(Relocs.begin(), Relocs.end(), cmpRel);
+}
+
void ELFObjectWriter::WriteRelocationsFragment(const MCAssembler &Asm,
MCDataFragment *F,
const MCSectionData *SD) {
std::vector<ELFRelocationEntry> &Relocs = Relocations[SD];
- // Sort the relocation entries. Most targets just sort by r_offset, but some
- // (e.g., MIPS) have additional constraints.
- TargetObjectWriter->sortRelocs(Asm, Relocs);
+ sortRelocs(Asm, Relocs);
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
- ELFRelocationEntry entry = Relocs[e - i - 1];
+ const ELFRelocationEntry &Entry = Relocs[e - i - 1];
+
+ unsigned Index;
+ if (Entry.UseSymbol) {
+ Index = getSymbolIndexInSymbolTable(Asm, Entry.Symbol);
+ } else {
+ const MCSectionData *Sec = Entry.Section;
+ if (Sec)
+ Index = Sec->getOrdinal() + FileSymbolData.size() +
+ LocalSymbolData.size() + 1;
+ else
+ Index = 0;
+ }
- if (!entry.Index)
- ;
- else if (entry.Index < 0)
- entry.Index = getSymbolIndexInSymbolTable(Asm, entry.Symbol);
- else
- entry.Index += FileSymbolData.size() + LocalSymbolData.size();
if (is64Bit()) {
- String64(*F, entry.r_offset);
+ write(*F, Entry.Offset);
if (TargetObjectWriter->isN64()) {
- String32(*F, entry.Index);
+ write(*F, uint32_t(Index));
- String8(*F, TargetObjectWriter->getRSsym(entry.Type));
- String8(*F, TargetObjectWriter->getRType3(entry.Type));
- String8(*F, TargetObjectWriter->getRType2(entry.Type));
- String8(*F, TargetObjectWriter->getRType(entry.Type));
- }
- else {
+ write(*F, TargetObjectWriter->getRSsym(Entry.Type));
+ write(*F, TargetObjectWriter->getRType3(Entry.Type));
+ write(*F, TargetObjectWriter->getRType2(Entry.Type));
+ write(*F, TargetObjectWriter->getRType(Entry.Type));
+ } else {
struct ELF::Elf64_Rela ERE64;
- ERE64.setSymbolAndType(entry.Index, entry.Type);
- String64(*F, ERE64.r_info);
+ ERE64.setSymbolAndType(Index, Entry.Type);
+ write(*F, ERE64.r_info);
}
if (hasRelocationAddend())
- String64(*F, entry.r_addend);
+ write(*F, Entry.Addend);
} else {
- String32(*F, entry.r_offset);
+ write(*F, uint32_t(Entry.Offset));
struct ELF::Elf32_Rela ERE32;
- ERE32.setSymbolAndType(entry.Index, entry.Type);
- String32(*F, ERE32.r_info);
+ ERE32.setSymbolAndType(Index, Entry.Type);
+ write(*F, ERE32.r_info);
if (hasRelocationAddend())
- String32(*F, entry.r_addend);
+ write(*F, uint32_t(Entry.Addend));
}
}
}
-static int compareBySuffix(const MCSectionELF *const *a,
- const MCSectionELF *const *b) {
- const StringRef &NameA = (*a)->getSectionName();
- const StringRef &NameB = (*b)->getSectionName();
- const unsigned sizeA = NameA.size();
- const unsigned sizeB = NameB.size();
- const unsigned len = std::min(sizeA, sizeB);
- for (unsigned int i = 0; i < len; ++i) {
- char ca = NameA[sizeA - i - 1];
- char cb = NameB[sizeB - i - 1];
- if (ca != cb)
- return cb - ca;
- }
-
- return sizeB - sizeA;
-}
-
void ELFObjectWriter::CreateMetadataSections(MCAssembler &Asm,
MCAsmLayout &Layout,
SectionIndexMapTy &SectionIndexMap,
@@ -1162,16 +1418,6 @@ void ELFObjectWriter::CreateMetadataSections(MCAssembler &Asm,
MCSectionData &SymtabSD = Asm.getOrCreateSectionData(*SymtabSection);
SymtabSD.setAlignment(is64Bit() ? 8 : 4);
- MCSectionData *SymtabShndxSD = NULL;
-
- if (NeedsSymtabShndx) {
- const MCSectionELF *SymtabShndxSection =
- Ctx.getELFSection(".symtab_shndx", ELF::SHT_SYMTAB_SHNDX, 0,
- SectionKind::getReadOnly(), 4, "");
- SymtabShndxSD = &Asm.getOrCreateSectionData(*SymtabShndxSection);
- SymtabShndxSD->setAlignment(4);
- }
-
const MCSectionELF *StrtabSection;
StrtabSection = Ctx.getELFSection(".strtab", ELF::SHT_STRTAB, 0,
SectionKind::getReadOnly());
@@ -1186,52 +1432,23 @@ void ELFObjectWriter::CreateMetadataSections(MCAssembler &Asm,
// Symbol table
F = new MCDataFragment(&SymtabSD);
- MCDataFragment *ShndxF = NULL;
- if (NeedsSymtabShndx) {
- ShndxF = new MCDataFragment(SymtabShndxSD);
- }
- WriteSymbolTable(F, ShndxF, Asm, Layout, SectionIndexMap);
+ WriteSymbolTable(F, Asm, Layout, SectionIndexMap);
F = new MCDataFragment(&StrtabSD);
- F->getContents().append(StringTable.begin(), StringTable.end());
+ F->getContents().append(StrTabBuilder.data().begin(),
+ StrTabBuilder.data().end());
F = new MCDataFragment(&ShstrtabSD);
- std::vector<const MCSectionELF*> Sections;
- for (MCAssembler::const_iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it) {
+ // Section header string table.
+ for (auto it = Asm.begin(), ie = Asm.end(); it != ie; ++it) {
const MCSectionELF &Section =
static_cast<const MCSectionELF&>(it->getSection());
- Sections.push_back(&Section);
- }
- array_pod_sort(Sections.begin(), Sections.end(), compareBySuffix);
-
- // Section header string table.
- //
- // The first entry of a string table holds a null character so skip
- // section 0.
- uint64_t Index = 1;
- F->getContents().push_back('\x00');
-
- for (unsigned int I = 0, E = Sections.size(); I != E; ++I) {
- const MCSectionELF &Section = *Sections[I];
-
- StringRef Name = Section.getSectionName();
- if (I != 0) {
- StringRef PreviousName = Sections[I - 1]->getSectionName();
- if (PreviousName.endswith(Name)) {
- SectionStringTableIndex[&Section] = Index - Name.size() - 1;
- continue;
- }
- }
- // Remember the index into the string table so we can write it
- // into the sh_name field of the section header table.
- SectionStringTableIndex[&Section] = Index;
-
- Index += Name.size() + 1;
- F->getContents().append(Name.begin(), Name.end());
- F->getContents().push_back('\x00');
+ ShStrTabBuilder.add(Section.getSectionName());
}
+ ShStrTabBuilder.finalize();
+ F->getContents().append(ShStrTabBuilder.data().begin(),
+ ShStrTabBuilder.data().end());
}
void ELFObjectWriter::CreateIndexedSections(MCAssembler &Asm,
@@ -1265,7 +1482,7 @@ void ELFObjectWriter::CreateIndexedSections(MCAssembler &Asm,
MCSectionData &Data = Asm.getOrCreateSectionData(*Group);
Data.setAlignment(4);
MCDataFragment *F = new MCDataFragment(&Data);
- String32(*F, ELF::GRP_COMDAT);
+ write(*F, uint32_t(ELF::GRP_COMDAT));
}
GroupMap[Group] = SignatureSymbol;
}
@@ -1283,8 +1500,8 @@ void ELFObjectWriter::CreateIndexedSections(MCAssembler &Asm,
MCSectionData &Data = Asm.getOrCreateSectionData(*Group);
// FIXME: we could use the previous fragment
MCDataFragment *F = new MCDataFragment(&Data);
- unsigned Index = SectionIndexMap.lookup(&Section);
- String32(*F, Index);
+ uint32_t Index = SectionIndexMap.lookup(&Section);
+ write(*F, Index);
}
}
@@ -1299,7 +1516,7 @@ void ELFObjectWriter::WriteSection(MCAssembler &Asm,
switch(Section.getType()) {
case ELF::SHT_DYNAMIC:
- sh_link = SectionStringTableIndex[&Section];
+ sh_link = ShStrTabBuilder.getOffset(Section.getSectionName());
sh_info = 0;
break;
@@ -1348,6 +1565,7 @@ void ELFObjectWriter::WriteSection(MCAssembler &Asm,
case ELF::SHT_X86_64_UNWIND:
case ELF::SHT_MIPS_REGINFO:
case ELF::SHT_MIPS_OPTIONS:
+ case ELF::SHT_MIPS_ABIFLAGS:
// Nothing to do.
break;
@@ -1357,8 +1575,7 @@ void ELFObjectWriter::WriteSection(MCAssembler &Asm,
break;
default:
- assert(0 && "FIXME: sh_type value not supported!");
- break;
+ llvm_unreachable("FIXME: sh_type value not supported!");
}
if (TargetObjectWriter->getEMachine() == ELF::EM_ARM &&
@@ -1380,7 +1597,8 @@ void ELFObjectWriter::WriteSection(MCAssembler &Asm,
}
}
- WriteSecHdrEntry(SectionStringTableIndex[&Section], Section.getType(),
+ WriteSecHdrEntry(ShStrTabBuilder.getOffset(Section.getSectionName()),
+ Section.getType(),
Section.getFlags(), 0, Offset, Size, sh_link, sh_info,
Alignment, Section.getEntrySize());
}
@@ -1514,6 +1732,8 @@ void ELFObjectWriter::WriteObject(MCAssembler &Asm,
unsigned NumUserSections = Asm.size();
+ CompressDebugSections(Asm, const_cast<MCAsmLayout &>(Layout));
+
DenseMap<const MCSectionELF*, const MCSectionELF*> RelMap;
CreateRelocationSections(Asm, const_cast<MCAsmLayout&>(Layout), RelMap);
@@ -1526,8 +1746,8 @@ void ELFObjectWriter::WriteObject(MCAssembler &Asm,
unsigned NumRegularSections = NumUserSections + NumIndexedSections;
// Compute symbol table information.
- ComputeSymbolTable(Asm, SectionIndexMap, RevGroupMap, NumRegularSections);
-
+ computeSymbolTable(Asm, Layout, SectionIndexMap, RevGroupMap,
+ NumRegularSections);
WriteRelocations(Asm, const_cast<MCAsmLayout&>(Layout), RelMap);
@@ -1605,7 +1825,7 @@ ELFObjectWriter::IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
const MCFragment &FB,
bool InSet,
bool IsPCRel) const {
- if (DataA.getFlags() & ELF_STB_Weak)
+ if (DataA.getFlags() & ELF_STB_Weak || MCELF::GetType(DataA) == ELF::STT_GNU_IFUNC)
return false;
return MCObjectWriter::IsSymbolRefDifferenceFullyResolvedImpl(
Asm, DataA, FB,InSet, IsPCRel);
diff --git a/contrib/llvm/lib/MC/MCAnalysis/CMakeLists.txt b/contrib/llvm/lib/MC/MCAnalysis/CMakeLists.txt
new file mode 100644
index 0000000..81eae2d
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCAnalysis/CMakeLists.txt
@@ -0,0 +1,8 @@
+add_llvm_library(LLVMMCAnalysis
+ MCAtom.cpp
+ MCFunction.cpp
+ MCModule.cpp
+ MCModuleYAML.cpp
+ MCObjectDisassembler.cpp
+ MCObjectSymbolizer.cpp
+)
diff --git a/contrib/llvm/lib/MC/MCAnalysis/LLVMBuild.txt b/contrib/llvm/lib/MC/MCAnalysis/LLVMBuild.txt
new file mode 100644
index 0000000..1b58fec
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCAnalysis/LLVMBuild.txt
@@ -0,0 +1,5 @@
+[component_0]
+type = Library
+name = MCAnalysis
+parent = Libraries
+required_libraries = MC Object Support
diff --git a/contrib/llvm/lib/MC/MCAtom.cpp b/contrib/llvm/lib/MC/MCAnalysis/MCAtom.cpp
index bc353cd..82056ee 100644
--- a/contrib/llvm/lib/MC/MCAtom.cpp
+++ b/contrib/llvm/lib/MC/MCAnalysis/MCAtom.cpp
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCAtom.h"
-#include "llvm/MC/MCModule.h"
+#include "llvm/MC/MCAnalysis/MCAtom.h"
+#include "llvm/MC/MCAnalysis/MCModule.h"
#include "llvm/Support/ErrorHandling.h"
#include <iterator>
diff --git a/contrib/llvm/lib/MC/MCFunction.cpp b/contrib/llvm/lib/MC/MCAnalysis/MCFunction.cpp
index 767e1e0..4e09d1a 100644
--- a/contrib/llvm/lib/MC/MCFunction.cpp
+++ b/contrib/llvm/lib/MC/MCAnalysis/MCFunction.cpp
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCFunction.h"
-#include "llvm/MC/MCAtom.h"
-#include "llvm/MC/MCModule.h"
+#include "llvm/MC/MCAnalysis/MCFunction.h"
+#include "llvm/MC/MCAnalysis/MCAtom.h"
+#include "llvm/MC/MCAnalysis/MCModule.h"
#include <algorithm>
using namespace llvm;
@@ -20,22 +20,17 @@ MCFunction::MCFunction(StringRef Name, MCModule *Parent)
: Name(Name), ParentModule(Parent)
{}
-MCFunction::~MCFunction() {
- for (iterator I = begin(), E = end(); I != E; ++I)
- delete *I;
-}
-
MCBasicBlock &MCFunction::createBlock(const MCTextAtom &TA) {
- MCBasicBlock *MCBB = new MCBasicBlock(TA, this);
- Blocks.push_back(MCBB);
- return *MCBB;
+ std::unique_ptr<MCBasicBlock> MCBB(new MCBasicBlock(TA, this));
+ Blocks.push_back(std::move(MCBB));
+ return *Blocks.back();
}
MCBasicBlock *MCFunction::find(uint64_t StartAddr) {
for (const_iterator I = begin(), E = end(); I != E; ++I)
if ((*I)->getInsts()->getBeginAddr() == StartAddr)
- return *I;
- return 0;
+ return I->get();
+ return nullptr;
}
const MCBasicBlock *MCFunction::find(uint64_t StartAddr) const {
diff --git a/contrib/llvm/lib/MC/MCModule.cpp b/contrib/llvm/lib/MC/MCAnalysis/MCModule.cpp
index 7e9e18a..7512299 100644
--- a/contrib/llvm/lib/MC/MCModule.cpp
+++ b/contrib/llvm/lib/MC/MCAnalysis/MCModule.cpp
@@ -7,9 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCModule.h"
-#include "llvm/MC/MCAtom.h"
-#include "llvm/MC/MCFunction.h"
+#include "llvm/MC/MCAnalysis/MCModule.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCAnalysis/MCAtom.h"
+#include "llvm/MC/MCAnalysis/MCFunction.h"
#include <algorithm>
using namespace llvm;
@@ -77,7 +78,7 @@ const MCAtom *MCModule::findAtomContaining(uint64_t Addr) const {
Addr, AtomComp);
if (I != atom_end() && (*I)->getBeginAddr() <= Addr)
return *I;
- return 0;
+ return nullptr;
}
MCAtom *MCModule::findAtomContaining(uint64_t Addr) {
@@ -90,7 +91,7 @@ const MCAtom *MCModule::findFirstAtomAfter(uint64_t Addr) const {
Addr, AtomCompInv);
if (I != atom_end())
return *I;
- return 0;
+ return nullptr;
}
MCAtom *MCModule::findFirstAtomAfter(uint64_t Addr) {
@@ -99,8 +100,9 @@ MCAtom *MCModule::findFirstAtomAfter(uint64_t Addr) {
}
MCFunction *MCModule::createFunction(StringRef Name) {
- Functions.push_back(new MCFunction(Name, this));
- return Functions.back();
+ std::unique_ptr<MCFunction> MCF(new MCFunction(Name, this));
+ Functions.push_back(std::move(MCF));
+ return Functions.back().get();
}
static bool CompBBToAtom(MCBasicBlock *BB, const MCTextAtom *Atom) {
@@ -130,13 +132,11 @@ void MCModule::trackBBForAtom(const MCTextAtom *Atom, MCBasicBlock *BB) {
BBsByAtom.insert(I, BB);
}
+MCModule::MCModule() : Entrypoint(0) { }
+
MCModule::~MCModule() {
for (AtomListTy::iterator AI = atom_begin(),
AE = atom_end();
AI != AE; ++AI)
delete *AI;
- for (FunctionListTy::iterator FI = func_begin(),
- FE = func_end();
- FI != FE; ++FI)
- delete *FI;
}
diff --git a/contrib/llvm/lib/MC/MCModuleYAML.cpp b/contrib/llvm/lib/MC/MCAnalysis/MCModuleYAML.cpp
index e2de578..876b06d 100644
--- a/contrib/llvm/lib/MC/MCModuleYAML.cpp
+++ b/contrib/llvm/lib/MC/MCAnalysis/MCModuleYAML.cpp
@@ -11,14 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCModuleYAML.h"
+#include "llvm/MC/MCAnalysis/MCModuleYAML.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/MC/MCAtom.h"
-#include "llvm/MC/MCFunction.h"
+#include "llvm/MC/MCAnalysis/MCAtom.h"
+#include "llvm/MC/MCAnalysis/MCFunction.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Object/YAML.h"
+#include "llvm/MC/YAML.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/YAMLTraits.h"
#include <vector>
@@ -101,7 +102,7 @@ struct Atom {
uint64_t Size;
std::vector<Inst> Insts;
- object::yaml::BinaryRef Data;
+ yaml::BinaryRef Data;
};
struct BasicBlock {
@@ -162,12 +163,14 @@ template <> struct ScalarTraits<MCModuleYAML::Operand> {
static void output(const MCModuleYAML::Operand &, void *,
llvm::raw_ostream &);
static StringRef input(StringRef, void *, MCModuleYAML::Operand &);
+ static bool mustQuote(StringRef) { return false; }
};
template <> struct ScalarTraits<MCModuleYAML::OpcodeEnum> {
static void output(const MCModuleYAML::OpcodeEnum &, void *,
llvm::raw_ostream &);
static StringRef input(StringRef, void *, MCModuleYAML::OpcodeEnum &);
+ static bool mustQuote(StringRef) { return false; }
};
void ScalarEnumerationTraits<MCAtom::AtomKind>::enumeration(
@@ -276,7 +279,7 @@ class MCModule2YAML {
const MCModule &MCM;
MCModuleYAML::Module YAMLModule;
void dumpAtom(const MCAtom *MCA);
- void dumpFunction(const MCFunction *MCF);
+ void dumpFunction(const MCFunction &MCF);
void dumpBasicBlock(const MCBasicBlock *MCBB);
public:
@@ -300,7 +303,7 @@ MCModule2YAML::MCModule2YAML(const MCModule &MCM) : MCM(MCM), YAMLModule() {
dumpAtom(*AI);
for (MCModule::const_func_iterator FI = MCM.func_begin(), FE = MCM.func_end();
FI != FE; ++FI)
- dumpFunction(*FI);
+ dumpFunction(**FI);
}
void MCModule2YAML::dumpAtom(const MCAtom *MCA) {
@@ -328,22 +331,22 @@ void MCModule2YAML::dumpAtom(const MCAtom *MCA) {
}
}
-void MCModule2YAML::dumpFunction(const MCFunction *MCF) {
+void MCModule2YAML::dumpFunction(const MCFunction &MCF) {
YAMLModule.Functions.resize(YAMLModule.Functions.size() + 1);
MCModuleYAML::Function &F = YAMLModule.Functions.back();
- F.Name = MCF->getName();
- for (MCFunction::const_iterator BBI = MCF->begin(), BBE = MCF->end();
+ F.Name = MCF.getName();
+ for (MCFunction::const_iterator BBI = MCF.begin(), BBE = MCF.end();
BBI != BBE; ++BBI) {
- const MCBasicBlock *MCBB = *BBI;
+ const MCBasicBlock &MCBB = **BBI;
F.BasicBlocks.resize(F.BasicBlocks.size() + 1);
MCModuleYAML::BasicBlock &BB = F.BasicBlocks.back();
- BB.Address = MCBB->getInsts()->getBeginAddr();
- for (MCBasicBlock::pred_const_iterator PI = MCBB->pred_begin(),
- PE = MCBB->pred_end();
+ BB.Address = MCBB.getInsts()->getBeginAddr();
+ for (MCBasicBlock::pred_const_iterator PI = MCBB.pred_begin(),
+ PE = MCBB.pred_end();
PI != PE; ++PI)
BB.Preds.push_back((*PI)->getInsts()->getBeginAddr());
- for (MCBasicBlock::succ_const_iterator SI = MCBB->succ_begin(),
- SE = MCBB->succ_end();
+ for (MCBasicBlock::succ_const_iterator SI = MCBB.succ_begin(),
+ SE = MCBB.succ_end();
SI != SE; ++SI)
BB.Succs.push_back((*SI)->getInsts()->getBeginAddr());
}
@@ -442,7 +445,7 @@ StringRef mcmodule2yaml(raw_ostream &OS, const MCModule &MCM,
return "";
}
-StringRef yaml2mcmodule(OwningPtr<MCModule> &MCM, StringRef YamlContent,
+StringRef yaml2mcmodule(std::unique_ptr<MCModule> &MCM, StringRef YamlContent,
const MCInstrInfo &MII, const MCRegisterInfo &MRI) {
MCM.reset(new MCModule);
YAML2MCModule Parser(*MCM);
@@ -450,7 +453,7 @@ StringRef yaml2mcmodule(OwningPtr<MCModule> &MCM, StringRef YamlContent,
InstrRegInfoHolder IRI(MII, MRI);
yaml::Input YIn(YamlContent, (void *)&IRI);
YIn >> YAMLModule;
- if (error_code ec = YIn.error())
+ if (std::error_code ec = YIn.error())
return ec.message();
StringRef err = Parser.parse(YAMLModule);
if (!err.empty())
diff --git a/contrib/llvm/lib/MC/MCObjectDisassembler.cpp b/contrib/llvm/lib/MC/MCAnalysis/MCObjectDisassembler.cpp
index 16a110f0..0f789ff 100644
--- a/contrib/llvm/lib/MC/MCObjectDisassembler.cpp
+++ b/contrib/llvm/lib/MC/MCAnalysis/MCObjectDisassembler.cpp
@@ -13,11 +13,11 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCAtom.h"
+#include "llvm/MC/MCAnalysis/MCAtom.h"
+#include "llvm/MC/MCAnalysis/MCFunction.h"
+#include "llvm/MC/MCAnalysis/MCModule.h"
#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCFunction.h"
#include "llvm/MC/MCInstrAnalysis.h"
-#include "llvm/MC/MCModule.h"
#include "llvm/MC/MCObjectSymbolizer.h"
#include "llvm/Object/MachO.h"
#include "llvm/Object/ObjectFile.h"
@@ -31,22 +31,20 @@
using namespace llvm;
using namespace object;
+#define DEBUG_TYPE "mc"
+
MCObjectDisassembler::MCObjectDisassembler(const ObjectFile &Obj,
const MCDisassembler &Dis,
const MCInstrAnalysis &MIA)
- : Obj(Obj), Dis(Dis), MIA(MIA), MOS(0) {}
+ : Obj(Obj), Dis(Dis), MIA(MIA), MOS(nullptr) {}
uint64_t MCObjectDisassembler::getEntrypoint() {
- error_code ec;
- for (symbol_iterator SI = Obj.begin_symbols(), SE = Obj.end_symbols();
- SI != SE; SI.increment(ec)) {
- if (ec)
- break;
+ for (const SymbolRef &Symbol : Obj.symbols()) {
StringRef Name;
- SI->getName(Name);
+ Symbol.getName(Name);
if (Name == "main" || Name == "_main") {
uint64_t Entrypoint;
- SI->getAddress(Entrypoint);
+ Symbol.getAddress(Entrypoint);
return getEffectiveLoadAddr(Entrypoint);
}
}
@@ -90,25 +88,24 @@ MCModule *MCObjectDisassembler::buildModule(bool withCFG) {
}
void MCObjectDisassembler::buildSectionAtoms(MCModule *Module) {
- error_code ec;
- for (section_iterator SI = Obj.begin_sections(),
- SE = Obj.end_sections();
- SI != SE;
- SI.increment(ec)) {
- if (ec) break;
-
- bool isText; SI->isText(isText);
- bool isData; SI->isData(isData);
+ for (const SectionRef &Section : Obj.sections()) {
+ bool isText;
+ Section.isText(isText);
+ bool isData;
+ Section.isData(isData);
if (!isData && !isText)
continue;
- uint64_t StartAddr; SI->getAddress(StartAddr);
- uint64_t SecSize; SI->getSize(SecSize);
+ uint64_t StartAddr;
+ Section.getAddress(StartAddr);
+ uint64_t SecSize;
+ Section.getSize(SecSize);
if (StartAddr == UnknownAddressOrSize || SecSize == UnknownAddressOrSize)
continue;
StartAddr = getEffectiveLoadAddr(StartAddr);
- StringRef Contents; SI->getContents(Contents);
+ StringRef Contents;
+ Section.getContents(Contents);
StringRefMemoryObject memoryObject(Contents, StartAddr);
// We don't care about things like non-file-backed sections yet.
@@ -116,11 +113,12 @@ void MCObjectDisassembler::buildSectionAtoms(MCModule *Module) {
continue;
uint64_t EndAddr = StartAddr + SecSize - 1;
- StringRef SecName; SI->getName(SecName);
+ StringRef SecName;
+ Section.getName(SecName);
if (isText) {
- MCTextAtom *Text = 0;
- MCDataAtom *InvalidData = 0;
+ MCTextAtom *Text = nullptr;
+ MCDataAtom *InvalidData = nullptr;
uint64_t InstSize;
for (uint64_t Index = 0; Index < SecSize; Index += InstSize) {
@@ -133,11 +131,11 @@ void MCObjectDisassembler::buildSectionAtoms(MCModule *Module) {
Text->setName(SecName);
}
Text->addInst(Inst, InstSize);
- InvalidData = 0;
+ InvalidData = nullptr;
} else {
assert(InstSize && "getInstruction() consumed no bytes");
if (!InvalidData) {
- Text = 0;
+ Text = nullptr;
InvalidData = Module->createDataAtom(CurAddr, CurAddr+InstSize - 1);
}
for (uint64_t I = 0; I < InstSize; ++I)
@@ -164,7 +162,7 @@ namespace {
BBInfoSetTy Preds;
MCObjectDisassembler::AddressSetTy SuccAddrs;
- BBInfo() : Atom(0), BB(0) {}
+ BBInfo() : Atom(nullptr), BB(nullptr) {}
void addSucc(BBInfo &Succ) {
Succs.insert(&Succ);
@@ -184,16 +182,12 @@ void MCObjectDisassembler::buildCFG(MCModule *Module) {
AddressSetTy Splits;
AddressSetTy Calls;
- error_code ec;
- for (symbol_iterator SI = Obj.begin_symbols(), SE = Obj.end_symbols();
- SI != SE; SI.increment(ec)) {
- if (ec)
- break;
+ for (const SymbolRef &Symbol : Obj.symbols()) {
SymbolRef::Type SymType;
- SI->getType(SymType);
+ Symbol.getType(SymType);
if (SymType == SymbolRef::ST_Function) {
uint64_t SymAddr;
- SI->getAddress(SymAddr);
+ Symbol.getAddress(SymAddr);
SymAddr = getEffectiveLoadAddr(SymAddr);
Calls.push_back(SymAddr);
Splits.push_back(SymAddr);
@@ -488,7 +482,7 @@ MCObjectDisassembler::createFunction(MCModule *Module, uint64_t BeginAddr,
continue;
// FIXME: MCModule should provide a findFunctionByAddr()
if ((*FI)->getEntryBlock()->getInsts()->getBeginAddr() == BeginAddr)
- return *FI;
+ return FI->get();
}
// Finally, just create a new one.
@@ -506,20 +500,16 @@ MCMachOObjectDisassembler::MCMachOObjectDisassembler(
: MCObjectDisassembler(MOOF, Dis, MIA), MOOF(MOOF),
VMAddrSlide(VMAddrSlide), HeaderLoadAddress(HeaderLoadAddress) {
- error_code ec;
- for (section_iterator SI = MOOF.begin_sections(), SE = MOOF.end_sections();
- SI != SE; SI.increment(ec)) {
- if (ec)
- break;
+ for (const SectionRef &Section : MOOF.sections()) {
StringRef Name;
- SI->getName(Name);
+ Section.getName(Name);
// FIXME: We should use the S_ section type instead of the name.
if (Name == "__mod_init_func") {
DEBUG(dbgs() << "Found __mod_init_func section!\n");
- SI->getContents(ModInitContents);
+ Section.getContents(ModInitContents);
} else if (Name == "__mod_exit_func") {
DEBUG(dbgs() << "Found __mod_exit_func section!\n");
- SI->getContents(ModExitContents);
+ Section.getContents(ModExitContents);
}
}
}
diff --git a/contrib/llvm/lib/MC/MCObjectSymbolizer.cpp b/contrib/llvm/lib/MC/MCAnalysis/MCObjectSymbolizer.cpp
index b9131d1..b149596 100644
--- a/contrib/llvm/lib/MC/MCObjectSymbolizer.cpp
+++ b/contrib/llvm/lib/MC/MCAnalysis/MCObjectSymbolizer.cpp
@@ -14,8 +14,8 @@
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRelocationInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/MachO.h"
#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachO.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -34,31 +34,28 @@ class MCMachObjectSymbolizer : public MCObjectSymbolizer {
uint64_t StubsIndSymIndex;
public:
- MCMachObjectSymbolizer(MCContext &Ctx, OwningPtr<MCRelocationInfo> &RelInfo,
+ MCMachObjectSymbolizer(MCContext &Ctx,
+ std::unique_ptr<MCRelocationInfo> RelInfo,
const MachOObjectFile *MOOF);
- StringRef findExternalFunctionAt(uint64_t Addr) LLVM_OVERRIDE;
+ StringRef findExternalFunctionAt(uint64_t Addr) override;
- void tryAddingPcLoadReferenceComment(raw_ostream &cStream,
- int64_t Value,
- uint64_t Address) LLVM_OVERRIDE;
+ void tryAddingPcLoadReferenceComment(raw_ostream &cStream, int64_t Value,
+ uint64_t Address) override;
};
} // End unnamed namespace
+MCMachObjectSymbolizer::MCMachObjectSymbolizer(
+ MCContext &Ctx, std::unique_ptr<MCRelocationInfo> RelInfo,
+ const MachOObjectFile *MOOF)
+ : MCObjectSymbolizer(Ctx, std::move(RelInfo), MOOF), MOOF(MOOF),
+ StubsStart(0), StubsCount(0), StubSize(0), StubsIndSymIndex(0) {
-MCMachObjectSymbolizer::
-MCMachObjectSymbolizer(MCContext &Ctx, OwningPtr<MCRelocationInfo> &RelInfo,
- const MachOObjectFile *MOOF)
- : MCObjectSymbolizer(Ctx, RelInfo, MOOF), MOOF(MOOF),
- StubsStart(0), StubsCount(0), StubSize(0), StubsIndSymIndex(0) {
-
- error_code ec;
- for (section_iterator SI = MOOF->begin_sections(), SE = MOOF->end_sections();
- SI != SE; SI.increment(ec)) {
- if (ec) break;
- StringRef Name; SI->getName(Name);
+ for (const SectionRef &Section : MOOF->sections()) {
+ StringRef Name;
+ Section.getName(Name);
if (Name == "__stubs") {
- SectionRef StubsSec = *SI;
+ SectionRef StubsSec = Section;
if (MOOF->is64Bit()) {
MachO::section_64 S = MOOF->getSection64(StubsSec.getRawDataRefImpl());
StubsIndSymIndex = S.reserved1;
@@ -90,13 +87,11 @@ StringRef MCMachObjectSymbolizer::findExternalFunctionAt(uint64_t Addr) {
MOOF->getIndirectSymbolTableEntry(MOOF->getDysymtabLoadCommand(), StubIdx);
StringRef SymName;
- symbol_iterator SI = MOOF->begin_symbols();
- error_code ec;
- for (uint32_t i = 0; i != SymtabIdx; ++i) {
- SI.increment(ec);
- }
+ symbol_iterator SI = MOOF->symbol_begin();
+ for (uint32_t i = 0; i != SymtabIdx; ++i)
+ ++SI;
SI->getName(SymName);
- assert(SI != MOOF->end_symbols() && "Stub wasn't found in the symbol table!");
+ assert(SI != MOOF->symbol_end() && "Stub wasn't found in the symbol table!");
assert(SymName.front() == '_' && "Mach-O symbol doesn't start with '_'!");
return SymName.substr(1);
}
@@ -125,11 +120,11 @@ tryAddingPcLoadReferenceComment(raw_ostream &cStream, int64_t Value,
//===- MCObjectSymbolizer -------------------------------------------------===//
-MCObjectSymbolizer::MCObjectSymbolizer(MCContext &Ctx,
- OwningPtr<MCRelocationInfo> &RelInfo,
- const ObjectFile *Obj)
- : MCSymbolizer(Ctx, RelInfo), Obj(Obj), SortedSections(), AddrToReloc() {
-}
+MCObjectSymbolizer::MCObjectSymbolizer(
+ MCContext &Ctx, std::unique_ptr<MCRelocationInfo> RelInfo,
+ const ObjectFile *Obj)
+ : MCSymbolizer(Ctx, std::move(RelInfo)), Obj(Obj), SortedSections(),
+ AddrToReloc() {}
bool MCObjectSymbolizer::
tryAddingSymbolicOperand(MCInst &MI, raw_ostream &cStream,
@@ -159,16 +154,17 @@ tryAddingSymbolicOperand(MCInst &MI, raw_ostream &cStream,
return false;
uint64_t UValue = Value;
// FIXME: map instead of looping each time?
- error_code ec;
- for (symbol_iterator SI = Obj->begin_symbols(), SE = Obj->end_symbols();
- SI != SE; SI.increment(ec)) {
- if (ec) break;
- uint64_t SymAddr; SI->getAddress(SymAddr);
- uint64_t SymSize; SI->getSize(SymSize);
- StringRef SymName; SI->getName(SymName);
- SymbolRef::Type SymType; SI->getType(SymType);
- if (SymAddr == UnknownAddressOrSize || SymSize == UnknownAddressOrSize
- || SymName.empty() || SymType != SymbolRef::ST_Function)
+ for (const SymbolRef &Symbol : Obj->symbols()) {
+ uint64_t SymAddr;
+ Symbol.getAddress(SymAddr);
+ uint64_t SymSize;
+ Symbol.getSize(SymSize);
+ StringRef SymName;
+ Symbol.getName(SymName);
+ SymbolRef::Type SymType;
+ Symbol.getType(SymType);
+ if (SymAddr == UnknownAddressOrSize || SymSize == UnknownAddressOrSize ||
+ SymName.empty() || SymType != SymbolRef::ST_Function)
continue;
if ( SymAddr == UValue ||
@@ -195,13 +191,12 @@ StringRef MCObjectSymbolizer::findExternalFunctionAt(uint64_t Addr) {
return StringRef();
}
-MCObjectSymbolizer *
-MCObjectSymbolizer::createObjectSymbolizer(MCContext &Ctx,
- OwningPtr<MCRelocationInfo> &RelInfo,
- const ObjectFile *Obj) {
+MCObjectSymbolizer *MCObjectSymbolizer::createObjectSymbolizer(
+ MCContext &Ctx, std::unique_ptr<MCRelocationInfo> RelInfo,
+ const ObjectFile *Obj) {
if (const MachOObjectFile *MOOF = dyn_cast<MachOObjectFile>(Obj))
- return new MCMachObjectSymbolizer(Ctx, RelInfo, MOOF);
- return new MCObjectSymbolizer(Ctx, RelInfo, Obj);
+ return new MCMachObjectSymbolizer(Ctx, std::move(RelInfo), MOOF);
+ return new MCObjectSymbolizer(Ctx, std::move(RelInfo), Obj);
}
// SortedSections implementation.
@@ -220,11 +215,11 @@ const SectionRef *MCObjectSymbolizer::findSectionContaining(uint64_t Addr) {
It = std::lower_bound(SortedSections.begin(), EndIt,
Addr, SectionStartsBefore);
if (It == EndIt)
- return 0;
+ return nullptr;
uint64_t SAddr; It->getAddress(SAddr);
uint64_t SSize; It->getSize(SSize);
if (Addr >= SAddr + SSize)
- return 0;
+ return nullptr;
return &*It;
}
@@ -234,77 +229,40 @@ const RelocationRef *MCObjectSymbolizer::findRelocationAt(uint64_t Addr) {
AddrToRelocMap::const_iterator RI = AddrToReloc.find(Addr);
if (RI == AddrToReloc.end())
- return 0;
+ return nullptr;
return &RI->second;
}
void MCObjectSymbolizer::buildSectionList() {
- error_code ec;
- for (section_iterator SI = Obj->begin_sections(), SE = Obj->end_sections();
- SI != SE; SI.increment(ec)) {
- if (ec) break;
-
- bool RequiredForExec; SI->isRequiredForExecution(RequiredForExec);
+ for (const SectionRef &Section : Obj->sections()) {
+ bool RequiredForExec;
+ Section.isRequiredForExecution(RequiredForExec);
if (RequiredForExec == false)
continue;
- uint64_t SAddr; SI->getAddress(SAddr);
- uint64_t SSize; SI->getSize(SSize);
- SortedSectionList::iterator It = std::lower_bound(SortedSections.begin(),
- SortedSections.end(),
- SAddr,
- SectionStartsBefore);
+ uint64_t SAddr;
+ Section.getAddress(SAddr);
+ uint64_t SSize;
+ Section.getSize(SSize);
+ SortedSectionList::iterator It =
+ std::lower_bound(SortedSections.begin(), SortedSections.end(), SAddr,
+ SectionStartsBefore);
if (It != SortedSections.end()) {
uint64_t FoundSAddr; It->getAddress(FoundSAddr);
if (FoundSAddr < SAddr + SSize)
llvm_unreachable("Inserting overlapping sections");
}
- SortedSections.insert(It, *SI);
+ SortedSections.insert(It, Section);
}
}
void MCObjectSymbolizer::buildRelocationByAddrMap() {
- error_code ec;
- for (section_iterator SI = Obj->begin_sections(), SE = Obj->end_sections();
- SI != SE; SI.increment(ec)) {
- if (ec) break;
-
- section_iterator RelSecI = SI->getRelocatedSection();
- if (RelSecI == Obj->end_sections())
- continue;
-
- uint64_t StartAddr; RelSecI->getAddress(StartAddr);
- uint64_t Size; RelSecI->getSize(Size);
- bool RequiredForExec; RelSecI->isRequiredForExecution(RequiredForExec);
- if (RequiredForExec == false || Size == 0)
- continue;
- for (relocation_iterator RI = SI->begin_relocations(),
- RE = SI->end_relocations();
- RI != RE;
- RI.increment(ec)) {
- if (ec) break;
- // FIXME: libObject is inconsistent regarding error handling. The
- // overwhelming majority of methods always return object_error::success,
- // and assert for simple errors.. Here, ELFObjectFile::getRelocationOffset
- // asserts when the file type isn't ET_REL.
- // This workaround handles x86-64 elf, the only one that has a relocinfo.
- uint64_t Offset;
- if (Obj->isELF()) {
- const ELF64LEObjectFile *ELFObj = dyn_cast<ELF64LEObjectFile>(Obj);
- if (ELFObj == 0)
- break;
- if (ELFObj->getELFFile()->getHeader()->e_type == ELF::ET_REL) {
- RI->getOffset(Offset);
- Offset += StartAddr;
- } else {
- RI->getAddress(Offset);
- }
- } else {
- RI->getOffset(Offset);
- Offset += StartAddr;
- }
+ for (const SectionRef &Section : Obj->sections()) {
+ for (const RelocationRef &Reloc : Section.relocations()) {
+ uint64_t Address;
+ Reloc.getAddress(Address);
// At a specific address, only keep the first relocation.
- if (AddrToReloc.find(Offset) == AddrToReloc.end())
- AddrToReloc[Offset] = *RI;
+ if (AddrToReloc.find(Address) == AddrToReloc.end())
+ AddrToReloc[Address] = Reloc;
}
}
}
diff --git a/contrib/llvm/lib/MC/MCAnalysis/Makefile b/contrib/llvm/lib/MC/MCAnalysis/Makefile
new file mode 100644
index 0000000..add2dbd
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCAnalysis/Makefile
@@ -0,0 +1,14 @@
+##===- lib/MC/MCAnalysys/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMMCAnalysis
+BUILD_ARCHIVE := 1
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/MC/MCAsmBackend.cpp b/contrib/llvm/lib/MC/MCAsmBackend.cpp
index c4c98cc..c42757b 100644
--- a/contrib/llvm/lib/MC/MCAsmBackend.cpp
+++ b/contrib/llvm/lib/MC/MCAsmBackend.cpp
@@ -12,8 +12,7 @@
#include "llvm/MC/MCFixupKindInfo.h"
using namespace llvm;
-MCAsmBackend::MCAsmBackend()
- : HasReliableSymbolDifference(false), HasDataInCodeSupport(false) {}
+MCAsmBackend::MCAsmBackend() : HasDataInCodeSupport(false) {}
MCAsmBackend::~MCAsmBackend() {}
diff --git a/contrib/llvm/lib/MC/MCAsmInfo.cpp b/contrib/llvm/lib/MC/MCAsmInfo.cpp
index daf19e9..f8081ef 100644
--- a/contrib/llvm/lib/MC/MCAsmInfo.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfo.cpp
@@ -37,12 +37,10 @@ MCAsmInfo::MCAsmInfo() {
MinInstAlignment = 1;
DollarIsPC = false;
SeparatorString = ";";
- CommentColumn = 40;
CommentString = "#";
LabelSuffix = ":";
- DebugLabelSuffix = ":";
- GlobalPrefix = "";
- PrivateGlobalPrefix = ".";
+ UseAssignmentForEHBegin = false;
+ PrivateGlobalPrefix = "L";
LinkerPrivateGlobalPrefix = "";
InlineAsmStart = "APP";
InlineAsmEnd = "NO_APP";
@@ -61,11 +59,10 @@ MCAsmInfo::MCAsmInfo() {
Data64bitsDirective = "\t.quad\t";
SunStyleELFSectionSwitchSyntax = false;
UsesELFSectionDirectiveForBSS = false;
- AlignDirective = "\t.align\t";
AlignmentIsInBytes = true;
TextAlignFillValue = 0;
- GPRel64Directive = 0;
- GPRel32Directive = 0;
+ GPRel64Directive = nullptr;
+ GPRel32Directive = nullptr;
GlobalDirective = "\t.globl\t";
HasSetDirective = true;
HasAggressiveSymbolFolding = true;
@@ -75,7 +72,7 @@ MCAsmInfo::MCAsmInfo() {
HasSingleParameterDotFile = true;
HasIdentDirective = false;
HasNoDeadStrip = false;
- WeakRefDirective = 0;
+ WeakRefDirective = nullptr;
HasWeakDefDirective = false;
HasWeakDefCanBeHiddenDirective = false;
HasLinkOnceDirective = false;
@@ -85,38 +82,31 @@ MCAsmInfo::MCAsmInfo() {
HasLEB128 = false;
SupportsDebugInformation = false;
ExceptionsType = ExceptionHandling::None;
+ WinEHEncodingType = WinEH::EncodingType::ET_Invalid;
DwarfUsesRelocationsAcrossSections = true;
DwarfFDESymbolsUseAbsDiff = false;
DwarfRegNumForCFI = false;
- HasMicrosoftFastStdCallMangling = false;
NeedsDwarfSectionOffsetDirective = false;
-}
-
-MCAsmInfo::~MCAsmInfo() {
-}
+ UseParensForSymbolVariant = false;
+ // FIXME: Clang's logic should be synced with the logic used to initialize
+ // this member and the two implementations should be merged.
+ // For reference:
+ // - Solaris always enables the integrated assembler by default
+ // - SparcELFMCAsmInfo and X86ELFMCAsmInfo are handling this case
+ // - Windows always enables the integrated assembler by default
+ // - MCAsmInfoCOFF is handling this case, should it be MCAsmInfoMicrosoft?
+ // - MachO targets always enables the integrated assembler by default
+ // - MCAsmInfoDarwin is handling this case
+ // - Generic_GCC toolchains enable the integrated assembler on a per
+ // architecture basis.
+ // - The target subclasses for AArch64, ARM, and X86 handle these cases
+ UseIntegratedAssembler = false;
-unsigned MCAsmInfo::getULEB128Size(uint64_t Value) {
- unsigned Size = 0;
- do {
- Value >>= 7;
- Size += sizeof(int8_t);
- } while (Value);
- return Size;
+ CompressDebugSections = false;
}
-unsigned MCAsmInfo::getSLEB128Size(int64_t Value) {
- unsigned Size = 0;
- int Sign = Value >> (8 * sizeof(Value) - 1);
- bool IsMore;
-
- do {
- unsigned Byte = Value & 0x7f;
- Value >>= 7;
- IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
- Size += sizeof(int8_t);
- } while (IsMore);
- return Size;
+MCAsmInfo::~MCAsmInfo() {
}
const MCExpr *
diff --git a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
index 1cac71f..9945637 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
@@ -18,14 +18,12 @@ using namespace llvm;
void MCAsmInfoCOFF::anchor() { }
MCAsmInfoCOFF::MCAsmInfoCOFF() {
- GlobalPrefix = "_";
// MingW 4.5 and later support .comm with log2 alignment, but .lcomm uses byte
// alignment.
COMMDirectiveAlignmentIsInBytes = false;
LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment;
HasDotTypeDotSizeDirective = false;
HasSingleParameterDotFile = false;
- PrivateGlobalPrefix = "L"; // Prefix for private global symbols
WeakRefDirective = "\t.weak\t";
HasLinkOnceDirective = true;
@@ -36,8 +34,9 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() {
// Set up DWARF directives
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
SupportsDebugInformation = true;
- HasMicrosoftFastStdCallMangling = true;
NeedsDwarfSectionOffsetDirective = true;
+
+ UseIntegratedAssembler = true;
}
void MCAsmInfoMicrosoft::anchor() { }
diff --git a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
index 351ec56..eaf28dd 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
@@ -23,8 +23,6 @@ void MCAsmInfoDarwin::anchor() { }
MCAsmInfoDarwin::MCAsmInfoDarwin() {
// Common settings for all Darwin targets.
// Syntax:
- GlobalPrefix = "_";
- PrivateGlobalPrefix = "L";
LinkerPrivateGlobalPrefix = "l";
HasSingleParameterDotFile = false;
HasSubsectionsViaSymbols = true;
@@ -60,4 +58,6 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
HasNoDeadStrip = true;
DwarfUsesRelocationsAcrossSections = false;
+
+ UseIntegratedAssembler = true;
}
diff --git a/contrib/llvm/lib/MC/MCAsmInfoELF.cpp b/contrib/llvm/lib/MC/MCAsmInfoELF.cpp
index 8cf4e4f..ccb3dc3 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoELF.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoELF.cpp
@@ -20,4 +20,5 @@ void MCAsmInfoELF::anchor() { }
MCAsmInfoELF::MCAsmInfoELF() {
HasIdentDirective = true;
WeakRefDirective = "\t.weak\t";
+ PrivateGlobalPrefix = ".L";
}
diff --git a/contrib/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
index ca49f8f..14f0f05 100644
--- a/contrib/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
@@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCStreamer.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmBackend.h"
@@ -32,6 +32,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include <cctype>
+#include <unordered_map>
using namespace llvm;
namespace {
@@ -41,44 +42,33 @@ protected:
formatted_raw_ostream &OS;
const MCAsmInfo *MAI;
private:
- OwningPtr<MCInstPrinter> InstPrinter;
- OwningPtr<MCCodeEmitter> Emitter;
- OwningPtr<MCAsmBackend> AsmBackend;
+ std::unique_ptr<MCInstPrinter> InstPrinter;
+ std::unique_ptr<MCCodeEmitter> Emitter;
+ std::unique_ptr<MCAsmBackend> AsmBackend;
SmallString<128> CommentToEmit;
raw_svector_ostream CommentStream;
unsigned IsVerboseAsm : 1;
unsigned ShowInst : 1;
- unsigned UseLoc : 1;
- unsigned UseCFI : 1;
unsigned UseDwarfDirectory : 1;
- enum EHSymbolFlags { EHGlobal = 1,
- EHWeakDefinition = 1 << 1,
- EHPrivateExtern = 1 << 2 };
- DenseMap<const MCSymbol*, unsigned> FlagMap;
-
- bool needsSet(const MCExpr *Value);
-
void EmitRegisterName(int64_t Register);
- virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
- virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame);
+ void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
+ void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
public:
- MCAsmStreamer(MCContext &Context, MCTargetStreamer *TargetStreamer,
- formatted_raw_ostream &os, bool isVerboseAsm, bool useLoc,
- bool useCFI, bool useDwarfDirectory, MCInstPrinter *printer,
- MCCodeEmitter *emitter, MCAsmBackend *asmbackend, bool showInst)
- : MCStreamer(Context, TargetStreamer), OS(os), MAI(Context.getAsmInfo()),
+ MCAsmStreamer(MCContext &Context, formatted_raw_ostream &os,
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *printer, MCCodeEmitter *emitter,
+ MCAsmBackend *asmbackend, bool showInst)
+ : MCStreamer(Context), OS(os), MAI(Context.getAsmInfo()),
InstPrinter(printer), Emitter(emitter), AsmBackend(asmbackend),
CommentStream(CommentToEmit), IsVerboseAsm(isVerboseAsm),
- ShowInst(showInst), UseLoc(useLoc), UseCFI(useCFI),
- UseDwarfDirectory(useDwarfDirectory) {
+ ShowInst(showInst), UseDwarfDirectory(useDwarfDirectory) {
if (InstPrinter && IsVerboseAsm)
InstPrinter->setCommentStream(CommentStream);
}
- ~MCAsmStreamer() {}
inline void EmitEOL() {
// If we don't have any comments, just emit a \n.
@@ -92,172 +82,163 @@ public:
/// isVerboseAsm - Return true if this streamer supports verbose assembly at
/// all.
- virtual bool isVerboseAsm() const { return IsVerboseAsm; }
+ bool isVerboseAsm() const override { return IsVerboseAsm; }
/// hasRawTextSupport - We support EmitRawText.
- virtual bool hasRawTextSupport() const { return true; }
+ bool hasRawTextSupport() const override { return true; }
/// AddComment - Add a comment that can be emitted to the generated .s
/// file if applicable as a QoI issue to make the output of the compiler
/// more readable. This only affects the MCAsmStreamer, and only when
/// verbose assembly output is enabled.
- virtual void AddComment(const Twine &T);
+ void AddComment(const Twine &T) override;
/// AddEncodingComment - Add a comment showing the encoding of an instruction.
- virtual void AddEncodingComment(const MCInst &Inst);
+ void AddEncodingComment(const MCInst &Inst, const MCSubtargetInfo &);
/// GetCommentOS - Return a raw_ostream that comments can be written to.
/// Unlike AddComment, you are required to terminate comments with \n if you
/// use this method.
- virtual raw_ostream &GetCommentOS() {
+ raw_ostream &GetCommentOS() override {
if (!IsVerboseAsm)
return nulls(); // Discard comments unless in verbose asm mode.
return CommentStream;
}
+ void emitRawComment(const Twine &T, bool TabPrefix = true) override;
+
/// AddBlankLine - Emit a blank line to a .s file to pretty it up.
- virtual void AddBlankLine() {
+ void AddBlankLine() override {
EmitEOL();
}
/// @name MCStreamer Interface
/// @{
- virtual void ChangeSection(const MCSection *Section,
- const MCExpr *Subsection);
-
- virtual void InitSections() {
- InitToTextSection();
- }
-
- virtual void InitToTextSection() {
- SwitchSection(getContext().getObjectFileInfo()->getTextSection());
- }
-
- virtual void EmitLabel(MCSymbol *Symbol);
- virtual void EmitDebugLabel(MCSymbol *Symbol);
-
- virtual void EmitEHSymAttributes(const MCSymbol *Symbol,
- MCSymbol *EHSymbol);
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
- virtual void EmitLinkerOptions(ArrayRef<std::string> Options);
- virtual void EmitDataRegion(MCDataRegionType Kind);
- virtual void EmitThumbFunc(MCSymbol *Func);
-
- virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
- virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
- virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize);
- virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
- const MCSymbol *Label);
-
- virtual bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
-
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol);
- virtual void EmitCOFFSymbolStorageClass(int StorageClass);
- virtual void EmitCOFFSymbolType(int Type);
- virtual void EndCOFFSymbolDef();
- virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
+ void ChangeSection(const MCSection *Section,
+ const MCExpr *Subsection) override;
+
+ void EmitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) override;
+ void EmitLabel(MCSymbol *Symbol) override;
+
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+ void EmitLinkerOptions(ArrayRef<std::string> Options) override;
+ void EmitDataRegion(MCDataRegionType Kind) override;
+ void EmitVersionMin(MCVersionMinType Kind, unsigned Major, unsigned Minor,
+ unsigned Update) override;
+ void EmitThumbFunc(MCSymbol *Func) override;
+
+ void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
+ void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
+ bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+
+ void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+ void BeginCOFFSymbolDef(const MCSymbol *Symbol) override;
+ void EmitCOFFSymbolStorageClass(int StorageClass) override;
+ void EmitCOFFSymbolType(int Type) override;
+ void EndCOFFSymbolDef() override;
+ void EmitCOFFSectionIndex(MCSymbol const *Symbol) override;
+ void EmitCOFFSecRel32(MCSymbol const *Symbol) override;
+ void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
/// EmitLocalCommonSymbol - Emit a local common (.lcomm) symbol.
///
/// @param Symbol - The common symbol to emit.
/// @param Size - The size of the common symbol.
/// @param ByteAlignment - The alignment of the common symbol in bytes.
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
-
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- uint64_t Size = 0, unsigned ByteAlignment = 0);
-
- virtual void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0);
-
- virtual void EmitBytes(StringRef Data);
-
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size);
- virtual void EmitIntValue(uint64_t Value, unsigned Size);
-
- virtual void EmitULEB128Value(const MCExpr *Value);
-
- virtual void EmitSLEB128Value(const MCExpr *Value);
-
- virtual void EmitGPRel64Value(const MCExpr *Value);
-
- virtual void EmitGPRel32Value(const MCExpr *Value);
-
-
- virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue);
-
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0);
-
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0);
-
- virtual bool EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0);
-
- virtual void EmitFileDirective(StringRef Filename);
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
- StringRef Filename, unsigned CUID = 0);
- virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
- unsigned Column, unsigned Flags,
- unsigned Isa, unsigned Discriminator,
- StringRef FileName);
-
- virtual void EmitIdent(StringRef IdentString);
- virtual void EmitCFISections(bool EH, bool Debug);
- virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
- virtual void EmitCFIDefCfaOffset(int64_t Offset);
- virtual void EmitCFIDefCfaRegister(int64_t Register);
- virtual void EmitCFIOffset(int64_t Register, int64_t Offset);
- virtual void EmitCFIPersonality(const MCSymbol *Sym, unsigned Encoding);
- virtual void EmitCFILsda(const MCSymbol *Sym, unsigned Encoding);
- virtual void EmitCFIRememberState();
- virtual void EmitCFIRestoreState();
- virtual void EmitCFISameValue(int64_t Register);
- virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset);
- virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment);
- virtual void EmitCFISignalFrame();
- virtual void EmitCFIUndefined(int64_t Register);
- virtual void EmitCFIRegister(int64_t Register1, int64_t Register2);
- virtual void EmitCFIWindowSave();
-
- virtual void EmitWin64EHStartProc(const MCSymbol *Symbol);
- virtual void EmitWin64EHEndProc();
- virtual void EmitWin64EHStartChained();
- virtual void EmitWin64EHEndChained();
- virtual void EmitWin64EHHandler(const MCSymbol *Sym, bool Unwind,
- bool Except);
- virtual void EmitWin64EHHandlerData();
- virtual void EmitWin64EHPushReg(unsigned Register);
- virtual void EmitWin64EHSetFrame(unsigned Register, unsigned Offset);
- virtual void EmitWin64EHAllocStack(unsigned Size);
- virtual void EmitWin64EHSaveReg(unsigned Register, unsigned Offset);
- virtual void EmitWin64EHSaveXMM(unsigned Register, unsigned Offset);
- virtual void EmitWin64EHPushFrame(bool Code);
- virtual void EmitWin64EHEndProlog();
-
- virtual void EmitInstruction(const MCInst &Inst);
-
- virtual void EmitBundleAlignMode(unsigned AlignPow2);
- virtual void EmitBundleLock(bool AlignToEnd);
- virtual void EmitBundleUnlock();
+ void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+
+ void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = nullptr,
+ uint64_t Size = 0, unsigned ByteAlignment = 0) override;
+
+ void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0) override;
+
+ void EmitBytes(StringRef Data) override;
+
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc = SMLoc()) override;
+ void EmitIntValue(uint64_t Value, unsigned Size) override;
+
+ void EmitULEB128Value(const MCExpr *Value) override;
+
+ void EmitSLEB128Value(const MCExpr *Value) override;
+
+ void EmitGPRel64Value(const MCExpr *Value) override;
+
+ void EmitGPRel32Value(const MCExpr *Value) override;
+
+
+ void EmitFill(uint64_t NumBytes, uint8_t FillValue) override;
+
+ void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0) override;
+
+ void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0) override;
+
+ bool EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0) override;
+
+ void EmitFileDirective(StringRef Filename) override;
+ unsigned EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename,
+ unsigned CUID = 0) override;
+ void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa, unsigned Discriminator,
+ StringRef FileName) override;
+ MCSymbol *getDwarfLineTableSymbol(unsigned CUID) override;
+
+ void EmitIdent(StringRef IdentString) override;
+ void EmitCFISections(bool EH, bool Debug) override;
+ void EmitCFIDefCfa(int64_t Register, int64_t Offset) override;
+ void EmitCFIDefCfaOffset(int64_t Offset) override;
+ void EmitCFIDefCfaRegister(int64_t Register) override;
+ void EmitCFIOffset(int64_t Register, int64_t Offset) override;
+ void EmitCFIPersonality(const MCSymbol *Sym, unsigned Encoding) override;
+ void EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) override;
+ void EmitCFIRememberState() override;
+ void EmitCFIRestoreState() override;
+ void EmitCFISameValue(int64_t Register) override;
+ void EmitCFIRelOffset(int64_t Register, int64_t Offset) override;
+ void EmitCFIAdjustCfaOffset(int64_t Adjustment) override;
+ void EmitCFISignalFrame() override;
+ void EmitCFIUndefined(int64_t Register) override;
+ void EmitCFIRegister(int64_t Register1, int64_t Register2) override;
+ void EmitCFIWindowSave() override;
+
+ void EmitWinCFIStartProc(const MCSymbol *Symbol) override;
+ void EmitWinCFIEndProc() override;
+ void EmitWinCFIStartChained() override;
+ void EmitWinCFIEndChained() override;
+ void EmitWinCFIPushReg(unsigned Register) override;
+ void EmitWinCFISetFrame(unsigned Register, unsigned Offset) override;
+ void EmitWinCFIAllocStack(unsigned Size) override;
+ void EmitWinCFISaveReg(unsigned Register, unsigned Offset) override;
+ void EmitWinCFISaveXMM(unsigned Register, unsigned Offset) override;
+ void EmitWinCFIPushFrame(bool Code) override;
+ void EmitWinCFIEndProlog() override;
+
+ void EmitWinEHHandler(const MCSymbol *Sym, bool Unwind, bool Except) override;
+ void EmitWinEHHandlerData() override;
+
+ void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
+
+ void EmitBundleAlignMode(unsigned AlignPow2) override;
+ void EmitBundleLock(bool AlignToEnd) override;
+ void EmitBundleUnlock() override;
/// EmitRawText - If this file is backed by an assembly streamer, this dumps
/// the specified string in the output .s file. This capability is
/// indicated by the hasRawTextSupport() predicate.
- virtual void EmitRawTextImpl(StringRef String);
+ void EmitRawTextImpl(StringRef String) override;
- virtual void FinishImpl();
+ void FinishImpl() override;
};
} // end anonymous namespace.
@@ -310,27 +291,19 @@ static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) {
return Value & ((uint64_t) (int64_t) -1 >> (64 - Bytes * 8));
}
+void MCAsmStreamer::emitRawComment(const Twine &T, bool TabPrefix) {
+ if (TabPrefix)
+ OS << '\t';
+ OS << MAI->getCommentString() << T;
+ EmitEOL();
+}
+
void MCAsmStreamer::ChangeSection(const MCSection *Section,
const MCExpr *Subsection) {
assert(Section && "Cannot switch to a null section!");
Section->PrintSwitchToSection(*MAI, OS, Subsection);
}
-void MCAsmStreamer::EmitEHSymAttributes(const MCSymbol *Symbol,
- MCSymbol *EHSymbol) {
- if (UseCFI)
- return;
-
- unsigned Flags = FlagMap.lookup(Symbol);
-
- if (Flags & EHGlobal)
- EmitSymbolAttribute(EHSymbol, MCSA_Global);
- if (Flags & EHWeakDefinition)
- EmitSymbolAttribute(EHSymbol, MCSA_WeakDefinition);
- if (Flags & EHPrivateExtern)
- EmitSymbolAttribute(EHSymbol, MCSA_PrivateExtern);
-}
-
void MCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
MCStreamer::EmitLabel(Symbol);
@@ -339,11 +312,24 @@ void MCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
EmitEOL();
}
-void MCAsmStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCStreamer::EmitDebugLabel(Symbol);
+void MCAsmStreamer::EmitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) {
+ StringRef str = MCLOHIdToName(Kind);
+
+#ifndef NDEBUG
+ int NbArgs = MCLOHIdToNbArgs(Kind);
+ assert(NbArgs != -1 && ((size_t)NbArgs) == Args.size() && "Malformed LOH!");
+ assert(str != "" && "Invalid LOH name");
+#endif
- OS << *Symbol << MAI->getDebugLabelSuffix();
+ OS << "\t" << MCLOHDirectiveName() << " " << str << "\t";
+ bool IsFirst = true;
+ for (MCLOHArgs::const_iterator It = Args.begin(), EndIt = Args.end();
+ It != EndIt; ++It) {
+ if (!IsFirst)
+ OS << ", ";
+ IsFirst = false;
+ OS << **It;
+ }
EmitEOL();
}
@@ -381,6 +367,18 @@ void MCAsmStreamer::EmitDataRegion(MCDataRegionType Kind) {
EmitEOL();
}
+void MCAsmStreamer::EmitVersionMin(MCVersionMinType Kind, unsigned Major,
+ unsigned Minor, unsigned Update) {
+ switch (Kind) {
+ case MCVM_IOSVersionMin: OS << "\t.ios_version_min"; break;
+ case MCVM_OSXVersionMin: OS << "\t.macosx_version_min"; break;
+ }
+ OS << " " << Major << ", " << Minor;
+ if (Update)
+ OS << ", " << Update;
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitThumbFunc(MCSymbol *Func) {
// This needs to emit to a temporary string to get properly quoted
// MCSymbols when they have spaces in them.
@@ -395,8 +393,7 @@ void MCAsmStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
OS << *Symbol << " = " << *Value;
EmitEOL();
- // FIXME: Lift context changes into super class.
- Symbol->setVariableValue(Value);
+ MCStreamer::EmitAssignment(Symbol, Value);
}
void MCAsmStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
@@ -404,22 +401,6 @@ void MCAsmStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
EmitEOL();
}
-void MCAsmStreamer::EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize) {
- EmitDwarfSetLineAddr(LineDelta, Label, PointerSize);
-}
-
-void MCAsmStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
- const MCSymbol *Label) {
- EmitIntValue(dwarf::DW_CFA_advance_loc4, 1);
- const MCExpr *AddrDelta = BuildSymbolDiff(getContext(), Label, LastLabel);
- AddrDelta = ForceExpAbs(AddrDelta);
- EmitValue(AddrDelta, 4);
-}
-
-
bool MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
MCSymbolAttr Attribute) {
switch (Attribute) {
@@ -449,7 +430,6 @@ bool MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
return true;
case MCSA_Global: // .globl/.global
OS << MAI->getGlobalDirective();
- FlagMap[Symbol] |= EHGlobal;
break;
case MCSA_Hidden: OS << "\t.hidden\t"; break;
case MCSA_IndirectSymbol: OS << "\t.indirect_symbol\t"; break;
@@ -460,14 +440,12 @@ bool MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_SymbolResolver: OS << "\t.symbol_resolver\t"; break;
case MCSA_PrivateExtern:
OS << "\t.private_extern\t";
- FlagMap[Symbol] |= EHPrivateExtern;
break;
case MCSA_Protected: OS << "\t.protected\t"; break;
case MCSA_Reference: OS << "\t.reference\t"; break;
case MCSA_Weak: OS << "\t.weak\t"; break;
case MCSA_WeakDefinition:
OS << "\t.weak_definition\t";
- FlagMap[Symbol] |= EHWeakDefinition;
break;
// .weak_reference
case MCSA_WeakReference: OS << MAI->getWeakRefDirective(); break;
@@ -505,8 +483,13 @@ void MCAsmStreamer::EndCOFFSymbolDef() {
EmitEOL();
}
+void MCAsmStreamer::EmitCOFFSectionIndex(MCSymbol const *Symbol) {
+ OS << "\t.secidx\t" << *Symbol;
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol) {
- OS << "\t.secrel32\t" << *Symbol << '\n';
+ OS << "\t.secrel32\t" << *Symbol;
EmitEOL();
}
@@ -518,7 +501,7 @@ void MCAsmStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
void MCAsmStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {
// Common symbols do not belong to any actual section.
- AssignSection(Symbol, NULL);
+ AssignSection(Symbol, nullptr);
OS << "\t.comm\t" << *Symbol << ',' << Size;
if (ByteAlignment != 0) {
@@ -537,7 +520,7 @@ void MCAsmStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
void MCAsmStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlign) {
// Common symbols do not belong to any actual section.
- AssignSection(Symbol, NULL);
+ AssignSection(Symbol, nullptr);
OS << "\t.lcomm\t" << *Symbol << ',' << Size;
if (ByteAlign > 1) {
@@ -568,7 +551,7 @@ void MCAsmStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
const MCSectionMachO *MOSection = ((const MCSectionMachO*)Section);
OS << MOSection->getSegmentName() << "," << MOSection->getSectionName();
- if (Symbol != NULL) {
+ if (Symbol) {
OS << ',' << *Symbol << ',' << Size;
if (ByteAlignment != 0)
OS << ',' << Log2_32(ByteAlignment);
@@ -583,7 +566,7 @@ void MCAsmStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment) {
AssignSection(Symbol, Section);
- assert(Symbol != NULL && "Symbol shouldn't be NULL!");
+ assert(Symbol && "Symbol shouldn't be NULL!");
// Instead of using the Section we'll just use the shortcut.
// This is a mach-o specific directive and section.
OS << ".tbss " << *Symbol << ", " << Size;
@@ -660,28 +643,48 @@ void MCAsmStreamer::EmitIntValue(uint64_t Value, unsigned Size) {
EmitValue(MCConstantExpr::Create(Value, getContext()), Size);
}
-void MCAsmStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size) {
+void MCAsmStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) {
+ assert(Size <= 8 && "Invalid size");
assert(getCurrentSection().first &&
"Cannot emit contents before setting section!");
- const char *Directive = 0;
+ const char *Directive = nullptr;
switch (Size) {
default: break;
case 1: Directive = MAI->getData8bitsDirective(); break;
case 2: Directive = MAI->getData16bitsDirective(); break;
case 4: Directive = MAI->getData32bitsDirective(); break;
- case 8:
- Directive = MAI->getData64bitsDirective();
- // If the target doesn't support 64-bit data, emit as two 32-bit halves.
- if (Directive) break;
+ case 8: Directive = MAI->getData64bitsDirective(); break;
+ }
+
+ if (!Directive) {
int64_t IntValue;
if (!Value->EvaluateAsAbsolute(IntValue))
report_fatal_error("Don't know how to emit this value.");
- if (MAI->isLittleEndian()) {
- EmitIntValue((uint32_t)(IntValue >> 0 ), 4);
- EmitIntValue((uint32_t)(IntValue >> 32), 4);
- } else {
- EmitIntValue((uint32_t)(IntValue >> 32), 4);
- EmitIntValue((uint32_t)(IntValue >> 0 ), 4);
+
+ // We couldn't handle the requested integer size so we fallback by breaking
+ // the request down into several, smaller, integers. Since sizes greater
+ // than eight are invalid and size equivalent to eight should have been
+ // handled earlier, we use four bytes as our largest piece of granularity.
+ bool IsLittleEndian = MAI->isLittleEndian();
+ for (unsigned Emitted = 0; Emitted != Size;) {
+ unsigned Remaining = Size - Emitted;
+ // The size of our partial emission must be a power of two less than
+ // eight.
+ unsigned EmissionSize = PowerOf2Floor(Remaining);
+ if (EmissionSize > 4)
+ EmissionSize = 4;
+ // Calculate the byte offset of our partial emission taking into account
+ // the endianness of the target.
+ unsigned ByteOffset =
+ IsLittleEndian ? Emitted : (Remaining - EmissionSize);
+ uint64_t ValueToEmit = IntValue >> (ByteOffset * 8);
+ // We truncate our partial emission to fit within the bounds of the
+ // emission domain. This produces nicer output and silences potential
+ // truncation warnings when round tripping through another assembler.
+ ValueToEmit &= ~0ULL >> (64 - EmissionSize * 8);
+ EmitIntValue(ValueToEmit, EmissionSize);
+ Emitted += EmissionSize;
}
return;
}
@@ -714,13 +717,13 @@ void MCAsmStreamer::EmitSLEB128Value(const MCExpr *Value) {
}
void MCAsmStreamer::EmitGPRel64Value(const MCExpr *Value) {
- assert(MAI->getGPRel64Directive() != 0);
+ assert(MAI->getGPRel64Directive() != nullptr);
OS << MAI->getGPRel64Directive() << *Value;
EmitEOL();
}
void MCAsmStreamer::EmitGPRel32Value(const MCExpr *Value) {
- assert(MAI->getGPRel32Directive() != 0);
+ assert(MAI->getGPRel32Directive() != nullptr);
OS << MAI->getGPRel32Directive() << *Value;
EmitEOL();
}
@@ -750,12 +753,19 @@ void MCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
// emit alignments as a power of two if possible.
if (isPowerOf2_32(ByteAlignment)) {
switch (ValueSize) {
- default: llvm_unreachable("Invalid size for machine code value!");
- case 1: OS << MAI->getAlignDirective(); break;
- // FIXME: use MAI for this!
- case 2: OS << ".p2alignw "; break;
- case 4: OS << ".p2alignl "; break;
- case 8: llvm_unreachable("Unsupported alignment size!");
+ default:
+ llvm_unreachable("Invalid size for machine code value!");
+ case 1:
+ OS << "\t.align\t";
+ break;
+ case 2:
+ OS << ".p2alignw ";
+ break;
+ case 4:
+ OS << ".p2alignl ";
+ break;
+ case 8:
+ llvm_unreachable("Unsupported alignment size!");
}
if (MAI->getAlignmentIsInBytes())
@@ -814,30 +824,42 @@ void MCAsmStreamer::EmitFileDirective(StringRef Filename) {
EmitEOL();
}
-bool MCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
- StringRef Filename, unsigned CUID) {
+unsigned MCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Directory,
+ StringRef Filename,
+ unsigned CUID) {
+ assert(CUID == 0);
+
+ MCDwarfLineTable &Table = getContext().getMCDwarfLineTable(CUID);
+ unsigned NumFiles = Table.getMCDwarfFiles().size();
+ FileNo = Table.getFile(Directory, Filename, FileNo);
+ if (FileNo == 0)
+ return 0;
+ if (NumFiles == Table.getMCDwarfFiles().size())
+ return FileNo;
+
+ SmallString<128> FullPathName;
+
if (!UseDwarfDirectory && !Directory.empty()) {
if (sys::path::is_absolute(Filename))
- return EmitDwarfFileDirective(FileNo, "", Filename, CUID);
-
- SmallString<128> FullPathName = Directory;
- sys::path::append(FullPathName, Filename);
- return EmitDwarfFileDirective(FileNo, "", FullPathName, CUID);
+ Directory = "";
+ else {
+ FullPathName = Directory;
+ sys::path::append(FullPathName, Filename);
+ Directory = "";
+ Filename = FullPathName;
+ }
}
- if (UseLoc) {
- OS << "\t.file\t" << FileNo << ' ';
- if (!Directory.empty()) {
- PrintQuotedString(Directory, OS);
- OS << ' ';
- }
- PrintQuotedString(Filename, OS);
- EmitEOL();
- // All .file will belong to a single CUID.
- CUID = 0;
+ OS << "\t.file\t" << FileNo << ' ';
+ if (!Directory.empty()) {
+ PrintQuotedString(Directory, OS);
+ OS << ' ';
}
- return this->MCStreamer::EmitDwarfFileDirective(FileNo, Directory, Filename,
- CUID);
+ PrintQuotedString(Filename, OS);
+ EmitEOL();
+
+ return FileNo;
}
void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -847,9 +869,6 @@ void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
StringRef FileName) {
this->MCStreamer::EmitDwarfLocDirective(FileNo, Line, Column, Flags,
Isa, Discriminator, FileName);
- if (!UseLoc)
- return;
-
OS << "\t.loc\t" << FileNo << " " << Line << " " << Column;
if (Flags & DWARF2_FLAG_BASIC_BLOCK)
OS << " basic_block";
@@ -869,9 +888,9 @@ void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
}
if (Isa)
- OS << "isa " << Isa;
+ OS << " isa " << Isa;
if (Discriminator)
- OS << "discriminator " << Discriminator;
+ OS << " discriminator " << Discriminator;
if (IsVerboseAsm) {
OS.PadToColumn(MAI->getCommentColumn());
@@ -881,6 +900,12 @@ void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
EmitEOL();
}
+MCSymbol *MCAsmStreamer::getDwarfLineTableSymbol(unsigned CUID) {
+ // Always use the zeroth line table, since asm syntax only supports one line
+ // table for now.
+ return MCStreamer::getDwarfLineTableSymbol(0);
+}
+
void MCAsmStreamer::EmitIdent(StringRef IdentString) {
assert(MAI->hasIdentDirective() && ".ident directive not supported");
OS << "\t.ident\t";
@@ -890,10 +915,6 @@ void MCAsmStreamer::EmitIdent(StringRef IdentString) {
void MCAsmStreamer::EmitCFISections(bool EH, bool Debug) {
MCStreamer::EmitCFISections(EH, Debug);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_sections ";
if (EH) {
OS << ".eh_frame";
@@ -907,25 +928,14 @@ void MCAsmStreamer::EmitCFISections(bool EH, bool Debug) {
}
void MCAsmStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
- if (!UseCFI) {
- RecordProcStart(Frame);
- return;
- }
-
OS << "\t.cfi_startproc";
+ if (Frame.IsSimple)
+ OS << " simple";
EmitEOL();
}
void MCAsmStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
- if (!UseCFI) {
- RecordProcEnd(Frame);
- return;
- }
-
- // Put a dummy non-null value in Frame.End to mark that this frame has been
- // closed.
- Frame.End = (MCSymbol *) 1;
-
+ MCStreamer::EmitCFIEndProcImpl(Frame);
OS << "\t.cfi_endproc";
EmitEOL();
}
@@ -942,10 +952,6 @@ void MCAsmStreamer::EmitRegisterName(int64_t Register) {
void MCAsmStreamer::EmitCFIDefCfa(int64_t Register, int64_t Offset) {
MCStreamer::EmitCFIDefCfa(Register, Offset);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_def_cfa ";
EmitRegisterName(Register);
OS << ", " << Offset;
@@ -954,20 +960,12 @@ void MCAsmStreamer::EmitCFIDefCfa(int64_t Register, int64_t Offset) {
void MCAsmStreamer::EmitCFIDefCfaOffset(int64_t Offset) {
MCStreamer::EmitCFIDefCfaOffset(Offset);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_def_cfa_offset " << Offset;
EmitEOL();
}
void MCAsmStreamer::EmitCFIDefCfaRegister(int64_t Register) {
MCStreamer::EmitCFIDefCfaRegister(Register);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_def_cfa_register ";
EmitRegisterName(Register);
EmitEOL();
@@ -975,10 +973,6 @@ void MCAsmStreamer::EmitCFIDefCfaRegister(int64_t Register) {
void MCAsmStreamer::EmitCFIOffset(int64_t Register, int64_t Offset) {
this->MCStreamer::EmitCFIOffset(Register, Offset);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_offset ";
EmitRegisterName(Register);
OS << ", " << Offset;
@@ -988,50 +982,30 @@ void MCAsmStreamer::EmitCFIOffset(int64_t Register, int64_t Offset) {
void MCAsmStreamer::EmitCFIPersonality(const MCSymbol *Sym,
unsigned Encoding) {
MCStreamer::EmitCFIPersonality(Sym, Encoding);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_personality " << Encoding << ", " << *Sym;
EmitEOL();
}
void MCAsmStreamer::EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) {
MCStreamer::EmitCFILsda(Sym, Encoding);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_lsda " << Encoding << ", " << *Sym;
EmitEOL();
}
void MCAsmStreamer::EmitCFIRememberState() {
MCStreamer::EmitCFIRememberState();
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_remember_state";
EmitEOL();
}
void MCAsmStreamer::EmitCFIRestoreState() {
MCStreamer::EmitCFIRestoreState();
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_restore_state";
EmitEOL();
}
void MCAsmStreamer::EmitCFISameValue(int64_t Register) {
MCStreamer::EmitCFISameValue(Register);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_same_value ";
EmitRegisterName(Register);
EmitEOL();
@@ -1039,10 +1013,6 @@ void MCAsmStreamer::EmitCFISameValue(int64_t Register) {
void MCAsmStreamer::EmitCFIRelOffset(int64_t Register, int64_t Offset) {
MCStreamer::EmitCFIRelOffset(Register, Offset);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_rel_offset ";
EmitRegisterName(Register);
OS << ", " << Offset;
@@ -1051,85 +1021,65 @@ void MCAsmStreamer::EmitCFIRelOffset(int64_t Register, int64_t Offset) {
void MCAsmStreamer::EmitCFIAdjustCfaOffset(int64_t Adjustment) {
MCStreamer::EmitCFIAdjustCfaOffset(Adjustment);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_adjust_cfa_offset " << Adjustment;
EmitEOL();
}
void MCAsmStreamer::EmitCFISignalFrame() {
MCStreamer::EmitCFISignalFrame();
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_signal_frame";
EmitEOL();
}
void MCAsmStreamer::EmitCFIUndefined(int64_t Register) {
MCStreamer::EmitCFIUndefined(Register);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_undefined " << Register;
EmitEOL();
}
void MCAsmStreamer::EmitCFIRegister(int64_t Register1, int64_t Register2) {
MCStreamer::EmitCFIRegister(Register1, Register2);
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_register " << Register1 << ", " << Register2;
EmitEOL();
}
void MCAsmStreamer::EmitCFIWindowSave() {
MCStreamer::EmitCFIWindowSave();
-
- if (!UseCFI)
- return;
-
OS << "\t.cfi_window_save";
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHStartProc(const MCSymbol *Symbol) {
- MCStreamer::EmitWin64EHStartProc(Symbol);
+void MCAsmStreamer::EmitWinCFIStartProc(const MCSymbol *Symbol) {
+ MCStreamer::EmitWinCFIStartProc(Symbol);
OS << ".seh_proc " << *Symbol;
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHEndProc() {
- MCStreamer::EmitWin64EHEndProc();
+void MCAsmStreamer::EmitWinCFIEndProc() {
+ MCStreamer::EmitWinCFIEndProc();
OS << "\t.seh_endproc";
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHStartChained() {
- MCStreamer::EmitWin64EHStartChained();
+void MCAsmStreamer::EmitWinCFIStartChained() {
+ MCStreamer::EmitWinCFIStartChained();
OS << "\t.seh_startchained";
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHEndChained() {
- MCStreamer::EmitWin64EHEndChained();
+void MCAsmStreamer::EmitWinCFIEndChained() {
+ MCStreamer::EmitWinCFIEndChained();
OS << "\t.seh_endchained";
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHHandler(const MCSymbol *Sym, bool Unwind,
- bool Except) {
- MCStreamer::EmitWin64EHHandler(Sym, Unwind, Except);
+void MCAsmStreamer::EmitWinEHHandler(const MCSymbol *Sym, bool Unwind,
+ bool Except) {
+ MCStreamer::EmitWinEHHandler(Sym, Unwind, Except);
OS << "\t.seh_handler " << *Sym;
if (Unwind)
@@ -1152,14 +1102,14 @@ static const MCSection *getWin64EHTableSection(StringRef suffix,
SectionKind::getDataRel());
}
-void MCAsmStreamer::EmitWin64EHHandlerData() {
- MCStreamer::EmitWin64EHHandlerData();
+void MCAsmStreamer::EmitWinEHHandlerData() {
+ MCStreamer::EmitWinEHHandlerData();
// Switch sections. Don't call SwitchSection directly, because that will
// cause the section switch to be visible in the emitted assembly.
// We only do this so the section switch that terminates the handler
// data block is visible.
- MCWin64EHUnwindInfo *CurFrame = getCurrentW64UnwindInfo();
+ MCWinFrameInfo *CurFrame = getCurrentWinFrameInfo();
StringRef suffix=MCWin64EHUnwindEmitter::GetSectionSuffix(CurFrame->Function);
const MCSection *xdataSect = getWin64EHTableSection(suffix, getContext());
if (xdataSect)
@@ -1169,43 +1119,43 @@ void MCAsmStreamer::EmitWin64EHHandlerData() {
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHPushReg(unsigned Register) {
- MCStreamer::EmitWin64EHPushReg(Register);
+void MCAsmStreamer::EmitWinCFIPushReg(unsigned Register) {
+ MCStreamer::EmitWinCFIPushReg(Register);
OS << "\t.seh_pushreg " << Register;
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHSetFrame(unsigned Register, unsigned Offset) {
- MCStreamer::EmitWin64EHSetFrame(Register, Offset);
+void MCAsmStreamer::EmitWinCFISetFrame(unsigned Register, unsigned Offset) {
+ MCStreamer::EmitWinCFISetFrame(Register, Offset);
OS << "\t.seh_setframe " << Register << ", " << Offset;
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHAllocStack(unsigned Size) {
- MCStreamer::EmitWin64EHAllocStack(Size);
+void MCAsmStreamer::EmitWinCFIAllocStack(unsigned Size) {
+ MCStreamer::EmitWinCFIAllocStack(Size);
OS << "\t.seh_stackalloc " << Size;
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHSaveReg(unsigned Register, unsigned Offset) {
- MCStreamer::EmitWin64EHSaveReg(Register, Offset);
+void MCAsmStreamer::EmitWinCFISaveReg(unsigned Register, unsigned Offset) {
+ MCStreamer::EmitWinCFISaveReg(Register, Offset);
OS << "\t.seh_savereg " << Register << ", " << Offset;
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHSaveXMM(unsigned Register, unsigned Offset) {
- MCStreamer::EmitWin64EHSaveXMM(Register, Offset);
+void MCAsmStreamer::EmitWinCFISaveXMM(unsigned Register, unsigned Offset) {
+ MCStreamer::EmitWinCFISaveXMM(Register, Offset);
OS << "\t.seh_savexmm " << Register << ", " << Offset;
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHPushFrame(bool Code) {
- MCStreamer::EmitWin64EHPushFrame(Code);
+void MCAsmStreamer::EmitWinCFIPushFrame(bool Code) {
+ MCStreamer::EmitWinCFIPushFrame(Code);
OS << "\t.seh_pushframe";
if (Code)
@@ -1213,19 +1163,20 @@ void MCAsmStreamer::EmitWin64EHPushFrame(bool Code) {
EmitEOL();
}
-void MCAsmStreamer::EmitWin64EHEndProlog(void) {
- MCStreamer::EmitWin64EHEndProlog();
+void MCAsmStreamer::EmitWinCFIEndProlog(void) {
+ MCStreamer::EmitWinCFIEndProlog();
OS << "\t.seh_endprologue";
EmitEOL();
}
-void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
+void MCAsmStreamer::AddEncodingComment(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
raw_ostream &OS = GetCommentOS();
SmallString<256> Code;
SmallVector<MCFixup, 4> Fixups;
raw_svector_ostream VecOS(Code);
- Emitter->EncodeInstruction(Inst, VecOS, Fixups);
+ Emitter->EncodeInstruction(Inst, VecOS, Fixups, STI);
VecOS.flush();
// If we are showing fixups, create symbolic markers in the encoded
@@ -1304,13 +1255,13 @@ void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
}
}
-void MCAsmStreamer::EmitInstruction(const MCInst &Inst) {
+void MCAsmStreamer::EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) {
assert(getCurrentSection().first &&
"Cannot emit contents before setting section!");
// Show the encoding in a comment if we have a code emitter.
if (Emitter)
- AddEncodingComment(Inst);
+ AddEncodingComment(Inst, STI);
// Show the MCInst if enabled.
if (ShowInst) {
@@ -1354,27 +1305,28 @@ void MCAsmStreamer::EmitRawTextImpl(StringRef String) {
}
void MCAsmStreamer::FinishImpl() {
- // FIXME: This header is duplicated with MCObjectStreamer
- // Dump out the dwarf file & directory tables and line tables.
- const MCSymbol *LineSectionSymbol = NULL;
- if (getContext().hasDwarfFiles() && !UseLoc)
- LineSectionSymbol = MCDwarfFileTable::Emit(this);
-
// If we are generating dwarf for assembly source files dump out the sections.
if (getContext().getGenDwarfForAssembly())
- MCGenDwarfInfo::Emit(this, LineSectionSymbol);
-
- if (!UseCFI)
- EmitFrames(AsmBackend.get(), false);
+ MCGenDwarfInfo::Emit(this);
+
+ // Emit the label for the line table, if requested - since the rest of the
+ // line table will be defined by .loc/.file directives, and not emitted
+ // directly, the label is the only work required here.
+ auto &Tables = getContext().getMCDwarfLineTables();
+ if (!Tables.empty()) {
+ assert(Tables.size() == 1 && "asm output only supports one line table");
+ if (auto *Label = Tables.begin()->second.getLabel()) {
+ SwitchSection(getContext().getObjectFileInfo()->getDwarfLineSection());
+ EmitLabel(Label);
+ }
+ }
}
MCStreamer *llvm::createAsmStreamer(MCContext &Context,
- MCTargetStreamer *TargetStreamer,
formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory, MCInstPrinter *IP,
- MCCodeEmitter *CE, MCAsmBackend *MAB,
- bool ShowInst) {
- return new MCAsmStreamer(Context, TargetStreamer, OS, isVerboseAsm, useLoc,
- useCFI, useDwarfDirectory, IP, CE, MAB, ShowInst);
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *IP, MCCodeEmitter *CE,
+ MCAsmBackend *MAB, bool ShowInst) {
+ return new MCAsmStreamer(Context, OS, isVerboseAsm, useDwarfDirectory, IP, CE,
+ MAB, ShowInst);
}
diff --git a/contrib/llvm/lib/MC/MCAssembler.cpp b/contrib/llvm/lib/MC/MCAssembler.cpp
index 68111f1..a8aad71 100644
--- a/contrib/llvm/lib/MC/MCAssembler.cpp
+++ b/contrib/llvm/lib/MC/MCAssembler.cpp
@@ -7,7 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "assembler"
#include "llvm/MC/MCAssembler.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
@@ -28,9 +27,12 @@
#include "llvm/Support/LEB128.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-
+#include "llvm/MC/MCSectionELF.h"
+#include <tuple>
using namespace llvm;
+#define DEBUG_TYPE "assembler"
+
namespace {
namespace stats {
STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
@@ -116,36 +118,89 @@ uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
return F->Offset;
}
-uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const {
+// Simple getSymbolOffset helper for the non-varibale case.
+static bool getLabelOffset(const MCAsmLayout &Layout, const MCSymbolData &SD,
+ bool ReportError, uint64_t &Val) {
+ if (!SD.getFragment()) {
+ if (ReportError)
+ report_fatal_error("unable to evaluate offset to undefined symbol '" +
+ SD.getSymbol().getName() + "'");
+ return false;
+ }
+ Val = Layout.getFragmentOffset(SD.getFragment()) + SD.getOffset();
+ return true;
+}
+
+static bool getSymbolOffsetImpl(const MCAsmLayout &Layout,
+ const MCSymbolData *SD, bool ReportError,
+ uint64_t &Val) {
const MCSymbol &S = SD->getSymbol();
- // If this is a variable, then recursively evaluate now.
- if (S.isVariable()) {
- MCValue Target;
- if (!S.getVariableValue()->EvaluateAsRelocatable(Target, *this))
- report_fatal_error("unable to evaluate offset for variable '" +
- S.getName() + "'");
+ if (!S.isVariable())
+ return getLabelOffset(Layout, *SD, ReportError, Val);
- // Verify that any used symbols are defined.
- if (Target.getSymA() && Target.getSymA()->getSymbol().isUndefined())
- report_fatal_error("unable to evaluate offset to undefined symbol '" +
- Target.getSymA()->getSymbol().getName() + "'");
- if (Target.getSymB() && Target.getSymB()->getSymbol().isUndefined())
- report_fatal_error("unable to evaluate offset to undefined symbol '" +
- Target.getSymB()->getSymbol().getName() + "'");
-
- uint64_t Offset = Target.getConstant();
- if (Target.getSymA())
- Offset += getSymbolOffset(&Assembler.getSymbolData(
- Target.getSymA()->getSymbol()));
- if (Target.getSymB())
- Offset -= getSymbolOffset(&Assembler.getSymbolData(
- Target.getSymB()->getSymbol()));
- return Offset;
+ // If SD is a variable, evaluate it.
+ MCValue Target;
+ if (!S.getVariableValue()->EvaluateAsValue(Target, &Layout))
+ report_fatal_error("unable to evaluate offset for variable '" +
+ S.getName() + "'");
+
+ uint64_t Offset = Target.getConstant();
+
+ const MCAssembler &Asm = Layout.getAssembler();
+
+ const MCSymbolRefExpr *A = Target.getSymA();
+ if (A) {
+ uint64_t ValA;
+ if (!getLabelOffset(Layout, Asm.getSymbolData(A->getSymbol()), ReportError,
+ ValA))
+ return false;
+ Offset += ValA;
}
- assert(SD->getFragment() && "Invalid getOffset() on undefined symbol!");
- return getFragmentOffset(SD->getFragment()) + SD->getOffset();
+ const MCSymbolRefExpr *B = Target.getSymB();
+ if (B) {
+ uint64_t ValB;
+ if (!getLabelOffset(Layout, Asm.getSymbolData(B->getSymbol()), ReportError,
+ ValB))
+ return false;
+ Offset -= ValB;
+ }
+
+ Val = Offset;
+ return true;
+}
+
+bool MCAsmLayout::getSymbolOffset(const MCSymbolData *SD, uint64_t &Val) const {
+ return getSymbolOffsetImpl(*this, SD, false, Val);
+}
+
+uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const {
+ uint64_t Val;
+ getSymbolOffsetImpl(*this, SD, true, Val);
+ return Val;
+}
+
+const MCSymbol *MCAsmLayout::getBaseSymbol(const MCSymbol &Symbol) const {
+ if (!Symbol.isVariable())
+ return &Symbol;
+
+ const MCExpr *Expr = Symbol.getVariableValue();
+ MCValue Value;
+ if (!Expr->EvaluateAsValue(Value, this))
+ llvm_unreachable("Invalid Expression");
+
+ const MCSymbolRefExpr *RefB = Value.getSymB();
+ if (RefB)
+ Assembler.getContext().FatalError(
+ SMLoc(), Twine("symbol '") + RefB->getSymbol().getName() +
+ "' could not be evaluated in a subtraction expression");
+
+ const MCSymbolRefExpr *A = Value.getSymA();
+ if (!A)
+ return nullptr;
+
+ return &A->getSymbol();
}
uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
@@ -212,7 +267,7 @@ MCFragment::~MCFragment() {
}
MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent)
- : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0))
+ : Kind(_Kind), Parent(_Parent), Atom(nullptr), Offset(~UINT64_C(0))
{
if (Parent)
Parent->getFragmentList().push_back(this);
@@ -230,7 +285,7 @@ MCEncodedFragmentWithFixups::~MCEncodedFragmentWithFixups() {
/* *** */
-MCSectionData::MCSectionData() : Section(0) {}
+MCSectionData::MCSectionData() : Section(nullptr) {}
MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A)
: Section(&_Section),
@@ -250,7 +305,7 @@ MCSectionData::getSubsectionInsertionPoint(unsigned Subsection) {
SmallVectorImpl<std::pair<unsigned, MCFragment *> >::iterator MI =
std::lower_bound(SubsectionFragmentMap.begin(), SubsectionFragmentMap.end(),
- std::make_pair(Subsection, (MCFragment *)0));
+ std::make_pair(Subsection, (MCFragment *)nullptr));
bool ExactMatch = false;
if (MI != SubsectionFragmentMap.end()) {
ExactMatch = MI->first == Subsection;
@@ -275,13 +330,13 @@ MCSectionData::getSubsectionInsertionPoint(unsigned Subsection) {
/* *** */
-MCSymbolData::MCSymbolData() : Symbol(0) {}
+MCSymbolData::MCSymbolData() : Symbol(nullptr) {}
MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment,
uint64_t _Offset, MCAssembler *A)
: Symbol(&_Symbol), Fragment(_Fragment), Offset(_Offset),
IsExternal(false), IsPrivateExtern(false),
- CommonSize(0), SymbolSize(0), CommonAlign(0),
+ CommonSize(0), SymbolSize(nullptr), CommonAlign(0),
Flags(0), Index(0)
{
if (A)
@@ -296,6 +351,7 @@ MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_,
: Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_),
OS(OS_), BundleAlignSize(0), RelaxAll(false), NoExecStack(false),
SubsectionsViaSymbols(false), ELFHeaderEFlags(0) {
+ VersionMinInfo.Major = 0; // Major version == 0 for "none specified"
}
MCAssembler::~MCAssembler() {
@@ -318,6 +374,32 @@ void MCAssembler::reset() {
getBackend().reset();
getEmitter().reset();
getWriter().reset();
+ getLOHContainer().reset();
+}
+
+bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
+ if (ThumbFuncs.count(Symbol))
+ return true;
+
+ if (!Symbol->isVariable())
+ return false;
+
+ // FIXME: It looks like gas supports some cases of the form "foo + 2". It
+ // is not clear if that is a bug or a feature.
+ const MCExpr *Expr = Symbol->getVariableValue();
+ const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Expr);
+ if (!Ref)
+ return false;
+
+ if (Ref->getKind() != MCSymbolRefExpr::VK_None)
+ return false;
+
+ const MCSymbol &Sym = Ref->getSymbol();
+ if (!isThumbFunc(&Sym))
+ return false;
+
+ ThumbFuncs.insert(Symbol); // Cache it.
+ return true;
}
bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
@@ -340,24 +422,39 @@ const MCSymbolData *MCAssembler::getAtom(const MCSymbolData *SD) const {
// Absolute and undefined symbols have no defining atom.
if (!SD->getFragment())
- return 0;
+ return nullptr;
// Non-linker visible symbols in sections which can't be atomized have no
// defining atom.
if (!getBackend().isSectionAtomizable(
SD->getFragment()->getParent()->getSection()))
- return 0;
+ return nullptr;
// Otherwise, return the atom for the containing fragment.
return SD->getFragment()->getAtom();
}
+// Try to fully compute Expr to an absolute value and if that fails produce
+// a relocatable expr.
+// FIXME: Should this be the behavior of EvaluateAsRelocatable itself?
+static bool evaluate(const MCExpr &Expr, const MCAsmLayout &Layout,
+ MCValue &Target) {
+ if (Expr.EvaluateAsValue(Target, &Layout))
+ if (Target.isAbsolute())
+ return true;
+ return Expr.EvaluateAsRelocatable(Target, &Layout);
+}
+
bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
MCValue &Target, uint64_t &Value) const {
++stats::evaluateFixup;
- if (!Fixup.getValue()->EvaluateAsRelocatable(Target, Layout))
+ // FIXME: This code has some duplication with RecordRelocation. We should
+ // probably merge the two into a single callback that tries to evaluate a
+ // fixup and records a relocation if one is needed.
+ const MCExpr *Expr = Fixup.getValue();
+ if (!evaluate(*Expr, Layout, Target))
getContext().FatalError(Fixup.getLoc(), "expected relocatable expression");
bool IsPCRel = Backend.getFixupKindInfo(
@@ -701,8 +798,13 @@ void MCAssembler::writeSectionData(const MCSectionData *SD,
assert(DF.fixup_begin() == DF.fixup_end() &&
"Cannot have fixups in virtual section!");
for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
- assert(DF.getContents()[i] == 0 &&
- "Invalid data value for virtual section!");
+ if (DF.getContents()[i]) {
+ if (auto *ELFSec = dyn_cast<const MCSectionELF>(&SD->getSection()))
+ report_fatal_error("non-zero initializer found in section '" +
+ ELFSec->getSectionName() + "'");
+ else
+ report_fatal_error("non-zero initializer found in virtual section");
+ }
break;
}
case MCFragment::FT_Align:
@@ -734,21 +836,23 @@ void MCAssembler::writeSectionData(const MCSectionData *SD,
Layout.getSectionAddressSize(SD));
}
-
-uint64_t MCAssembler::handleFixup(const MCAsmLayout &Layout,
- MCFragment &F,
- const MCFixup &Fixup) {
- // Evaluate the fixup.
- MCValue Target;
- uint64_t FixedValue;
- if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
- // The fixup was unresolved, we need a relocation. Inform the object
- // writer of the relocation, and give it an opportunity to adjust the
- // fixup value if need be.
- getWriter().RecordRelocation(*this, Layout, &F, Fixup, Target, FixedValue);
- }
- return FixedValue;
- }
+std::pair<uint64_t, bool> MCAssembler::handleFixup(const MCAsmLayout &Layout,
+ MCFragment &F,
+ const MCFixup &Fixup) {
+ // Evaluate the fixup.
+ MCValue Target;
+ uint64_t FixedValue;
+ bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
+ MCFixupKindInfo::FKF_IsPCRel;
+ if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
+ // The fixup was unresolved, we need a relocation. Inform the object
+ // writer of the relocation, and give it an opportunity to adjust the
+ // fixup value if need be.
+ getWriter().RecordRelocation(*this, Layout, &F, Fixup, Target, IsPCRel,
+ FixedValue);
+ }
+ return std::make_pair(FixedValue, IsPCRel);
+}
void MCAssembler::Finish() {
DEBUG_WITH_TYPE("mc-dump", {
@@ -811,9 +915,11 @@ void MCAssembler::Finish() {
for (MCEncodedFragmentWithFixups::fixup_iterator it3 = F->fixup_begin(),
ie3 = F->fixup_end(); it3 != ie3; ++it3) {
MCFixup &Fixup = *it3;
- uint64_t FixedValue = handleFixup(Layout, *F, Fixup);
+ uint64_t FixedValue;
+ bool IsPCRel;
+ std::tie(FixedValue, IsPCRel) = handleFixup(Layout, *F, Fixup);
getBackend().applyFixup(Fixup, F->getContents().data(),
- F->getContents().size(), FixedValue);
+ F->getContents().size(), FixedValue, IsPCRel);
}
}
}
@@ -875,7 +981,7 @@ bool MCAssembler::relaxInstruction(MCAsmLayout &Layout,
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups);
+ getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups, F.getSubtargetInfo());
VecOS.flush();
// Update the fragment.
@@ -942,7 +1048,7 @@ bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSectionData &SD) {
// remain NULL if none were relaxed.
// When a fragment is relaxed, all the fragments following it should get
// invalidated because their offset is going to change.
- MCFragment *FirstRelaxedFragment = NULL;
+ MCFragment *FirstRelaxedFragment = nullptr;
// Attempt to relax all the fragments in the section.
for (MCSectionData::iterator I = SD.begin(), IE = SD.end(); I != IE; ++I) {
@@ -1137,7 +1243,7 @@ void MCSectionData::dump() {
OS << "]>";
}
-void MCSymbolData::dump() {
+void MCSymbolData::dump() const {
raw_ostream &OS = llvm::errs();
OS << "<MCSymbolData Symbol:" << getSymbol()
diff --git a/contrib/llvm/lib/MC/MCContext.cpp b/contrib/llvm/lib/MC/MCContext.cpp
index a0acda5..7702da3 100644
--- a/contrib/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm/lib/MC/MCContext.cpp
@@ -25,43 +25,31 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/SourceMgr.h"
-
#include <map>
using namespace llvm;
-typedef std::pair<std::string, std::string> SectionGroupPair;
-
-typedef StringMap<const MCSectionMachO*> MachOUniqueMapTy;
-typedef std::map<SectionGroupPair, const MCSectionELF *> ELFUniqueMapTy;
-typedef std::map<SectionGroupPair, const MCSectionCOFF *> COFFUniqueMapTy;
-
MCContext::MCContext(const MCAsmInfo *mai, const MCRegisterInfo *mri,
const MCObjectFileInfo *mofi, const SourceMgr *mgr,
- bool DoAutoReset) :
- SrcMgr(mgr), MAI(mai), MRI(mri), MOFI(mofi),
- Allocator(), Symbols(Allocator), UsedNames(Allocator),
- NextUniqueID(0),
- CurrentDwarfLoc(0,0,0,DWARF2_FLAG_IS_STMT,0,0),
- DwarfLocSeen(false), GenDwarfForAssembly(false), GenDwarfFileNumber(0),
- AllowTemporaryLabels(true), DwarfCompileUnitID(0), AutoReset(DoAutoReset) {
-
- error_code EC = llvm::sys::fs::current_path(CompilationDir);
+ bool DoAutoReset)
+ : SrcMgr(mgr), MAI(mai), MRI(mri), MOFI(mofi), Allocator(),
+ Symbols(Allocator), UsedNames(Allocator), NextUniqueID(0),
+ CurrentDwarfLoc(0, 0, 0, DWARF2_FLAG_IS_STMT, 0, 0), DwarfLocSeen(false),
+ GenDwarfForAssembly(false), GenDwarfFileNumber(0), DwarfVersion(4),
+ AllowTemporaryLabels(true), DwarfCompileUnitID(0),
+ AutoReset(DoAutoReset) {
+
+ std::error_code EC = llvm::sys::fs::current_path(CompilationDir);
if (EC)
CompilationDir.clear();
- MachOUniquingMap = 0;
- ELFUniquingMap = 0;
- COFFUniquingMap = 0;
-
SecureLogFile = getenv("AS_SECURE_LOG_FILE");
- SecureLog = 0;
+ SecureLog = nullptr;
SecureLogUsed = false;
- if (SrcMgr && SrcMgr->getNumBuffers() > 0)
- MainFileName = SrcMgr->getMemoryBuffer(0)->getBufferIdentifier();
- else
- MainFileName = "";
+ if (SrcMgr && SrcMgr->getNumBuffers())
+ MainFileName =
+ SrcMgr->getMemoryBuffer(SrcMgr->getMainFileID())->getBufferIdentifier();
}
MCContext::~MCContext() {
@@ -71,7 +59,7 @@ MCContext::~MCContext() {
// NOTE: The symbols are all allocated out of a bump pointer allocator,
// we don't need to free them here.
-
+
// If the stream for the .secure_log_unique directive was created free it.
delete (raw_ostream*)SecureLog;
}
@@ -85,23 +73,15 @@ void MCContext::reset() {
Symbols.clear();
Allocator.Reset();
Instances.clear();
- MCDwarfFilesCUMap.clear();
- MCDwarfDirsCUMap.clear();
+ MCDwarfLineTablesCUMap.clear();
MCGenDwarfLabelEntries.clear();
DwarfDebugFlags = StringRef();
- MCLineSections.clear();
- MCLineSectionOrder.clear();
DwarfCompileUnitID = 0;
- MCLineTableSymbols.clear();
CurrentDwarfLoc = MCDwarfLoc(0,0,0,DWARF2_FLAG_IS_STMT,0,0);
- // If we have the MachO uniquing map, free it.
- delete (MachOUniqueMapTy*)MachOUniquingMap;
- delete (ELFUniqueMapTy*)ELFUniquingMap;
- delete (COFFUniqueMapTy*)COFFUniquingMap;
- MachOUniquingMap = 0;
- ELFUniquingMap = 0;
- COFFUniquingMap = 0;
+ MachOUniquingMap.clear();
+ ELFUniquingMap.clear();
+ COFFUniquingMap.clear();
NextUniqueID = 0;
AllowTemporaryLabels = true;
@@ -138,7 +118,7 @@ MCSymbol *MCContext::CreateSymbol(StringRef Name) {
StringMapEntry<bool> *NameEntry = &UsedNames.GetOrCreateValue(Name);
if (NameEntry->getValue()) {
- assert(isTemporary && "Cannot rename non temporary symbols");
+ assert(isTemporary && "Cannot rename non-temporary symbols");
SmallString<128> NewName = Name;
do {
NewName.resize(Name.size());
@@ -157,8 +137,14 @@ MCSymbol *MCContext::CreateSymbol(StringRef Name) {
MCSymbol *MCContext::GetOrCreateSymbol(const Twine &Name) {
SmallString<128> NameSV;
- Name.toVector(NameSV);
- return GetOrCreateSymbol(NameSV.str());
+ return GetOrCreateSymbol(Name.toStringRef(NameSV));
+}
+
+MCSymbol *MCContext::CreateLinkerPrivateTempSymbol() {
+ SmallString<128> NameSV;
+ raw_svector_ostream(NameSV)
+ << MAI->getLinkerPrivateGlobalPrefix() << "tmp" << NextUniqueID++;
+ return CreateSymbol(NameSV);
}
MCSymbol *MCContext::CreateTempSymbol() {
@@ -168,32 +154,39 @@ MCSymbol *MCContext::CreateTempSymbol() {
return CreateSymbol(NameSV);
}
-unsigned MCContext::NextInstance(int64_t LocalLabelVal) {
+unsigned MCContext::NextInstance(unsigned LocalLabelVal) {
MCLabel *&Label = Instances[LocalLabelVal];
if (!Label)
Label = new (*this) MCLabel(0);
return Label->incInstance();
}
-unsigned MCContext::GetInstance(int64_t LocalLabelVal) {
+unsigned MCContext::GetInstance(unsigned LocalLabelVal) {
MCLabel *&Label = Instances[LocalLabelVal];
if (!Label)
Label = new (*this) MCLabel(0);
return Label->getInstance();
}
-MCSymbol *MCContext::CreateDirectionalLocalSymbol(int64_t LocalLabelVal) {
- return GetOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix()) +
- Twine(LocalLabelVal) +
- "\2" +
- Twine(NextInstance(LocalLabelVal)));
+MCSymbol *MCContext::getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal,
+ unsigned Instance) {
+ MCSymbol *&Sym = LocalSymbols[std::make_pair(LocalLabelVal, Instance)];
+ if (!Sym)
+ Sym = CreateTempSymbol();
+ return Sym;
}
-MCSymbol *MCContext::GetDirectionalLocalSymbol(int64_t LocalLabelVal,
- int bORf) {
- return GetOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix()) +
- Twine(LocalLabelVal) +
- "\2" +
- Twine(GetInstance(LocalLabelVal) + bORf));
+
+MCSymbol *MCContext::CreateDirectionalLocalSymbol(unsigned LocalLabelVal) {
+ unsigned Instance = NextInstance(LocalLabelVal);
+ return getOrCreateDirectionalLocalSymbol(LocalLabelVal, Instance);
+}
+
+MCSymbol *MCContext::GetDirectionalLocalSymbol(unsigned LocalLabelVal,
+ bool Before) {
+ unsigned Instance = GetInstance(LocalLabelVal);
+ if (!Before)
+ ++Instance;
+ return getOrCreateDirectionalLocalSymbol(LocalLabelVal, Instance);
}
MCSymbol *MCContext::LookupSymbol(StringRef Name) const {
@@ -219,11 +212,6 @@ getMachOSection(StringRef Segment, StringRef Section,
// may not have the same flags as the requested section, if so this should be
// diagnosed by the client as an error.
- // Create the map if it doesn't already exist.
- if (MachOUniquingMap == 0)
- MachOUniquingMap = new MachOUniqueMapTy();
- MachOUniqueMapTy &Map = *(MachOUniqueMapTy*)MachOUniquingMap;
-
// Form the name to look up.
SmallString<64> Name;
Name += Segment;
@@ -231,7 +219,7 @@ getMachOSection(StringRef Segment, StringRef Section,
Name += Section;
// Do the lookup, if we have a hit, return it.
- const MCSectionMachO *&Entry = Map[Name.str()];
+ const MCSectionMachO *&Entry = MachOUniquingMap[Name.str()];
if (Entry) return Entry;
// Otherwise, return a new section.
@@ -245,64 +233,71 @@ getELFSection(StringRef Section, unsigned Type, unsigned Flags,
return getELFSection(Section, Type, Flags, Kind, 0, "");
}
+void MCContext::renameELFSection(const MCSectionELF *Section, StringRef Name) {
+ StringRef GroupName;
+ if (const MCSymbol *Group = Section->getGroup())
+ GroupName = Group->getName();
+
+ ELFUniquingMap.erase(SectionGroupPair(Section->getSectionName(), GroupName));
+ auto I =
+ ELFUniquingMap.insert(std::make_pair(SectionGroupPair(Name, GroupName),
+ Section)).first;
+ StringRef CachedName = I->first.first;
+ const_cast<MCSectionELF*>(Section)->setSectionName(CachedName);
+}
+
const MCSectionELF *MCContext::
getELFSection(StringRef Section, unsigned Type, unsigned Flags,
SectionKind Kind, unsigned EntrySize, StringRef Group) {
- if (ELFUniquingMap == 0)
- ELFUniquingMap = new ELFUniqueMapTy();
- ELFUniqueMapTy &Map = *(ELFUniqueMapTy*)ELFUniquingMap;
-
// Do the lookup, if we have a hit, return it.
- std::pair<ELFUniqueMapTy::iterator, bool> Entry = Map.insert(
- std::make_pair(SectionGroupPair(Section, Group), (MCSectionELF *)0));
- if (!Entry.second) return Entry.first->second;
+ auto IterBool = ELFUniquingMap.insert(
+ std::make_pair(SectionGroupPair(Section, Group), nullptr));
+ auto &Entry = *IterBool.first;
+ if (!IterBool.second) return Entry.second;
// Possibly refine the entry size first.
if (!EntrySize) {
EntrySize = MCSectionELF::DetermineEntrySize(Kind);
}
- MCSymbol *GroupSym = NULL;
+ MCSymbol *GroupSym = nullptr;
if (!Group.empty())
GroupSym = GetOrCreateSymbol(Group);
- MCSectionELF *Result = new (*this) MCSectionELF(
- Entry.first->first.first, Type, Flags, Kind, EntrySize, GroupSym);
- Entry.first->second = Result;
+ StringRef CachedName = Entry.first.first;
+ MCSectionELF *Result = new (*this)
+ MCSectionELF(CachedName, Type, Flags, Kind, EntrySize, GroupSym);
+ Entry.second = Result;
return Result;
}
const MCSectionELF *MCContext::CreateELFGroupSection() {
MCSectionELF *Result =
new (*this) MCSectionELF(".group", ELF::SHT_GROUP, 0,
- SectionKind::getReadOnly(), 4, NULL);
+ SectionKind::getReadOnly(), 4, nullptr);
return Result;
}
-const MCSectionCOFF *
-MCContext::getCOFFSection(StringRef Section, unsigned Characteristics,
- SectionKind Kind, StringRef COMDATSymName,
- int Selection, const MCSectionCOFF *Assoc) {
- if (COFFUniquingMap == 0)
- COFFUniquingMap = new COFFUniqueMapTy();
- COFFUniqueMapTy &Map = *(COFFUniqueMapTy*)COFFUniquingMap;
-
+const MCSectionCOFF *MCContext::getCOFFSection(StringRef Section,
+ unsigned Characteristics,
+ SectionKind Kind,
+ StringRef COMDATSymName,
+ int Selection) {
// Do the lookup, if we have a hit, return it.
- SectionGroupPair P(Section, COMDATSymName);
- std::pair<COFFUniqueMapTy::iterator, bool> Entry =
- Map.insert(std::make_pair(P, (MCSectionCOFF *)0));
- COFFUniqueMapTy::iterator Iter = Entry.first;
- if (!Entry.second)
+ SectionGroupTriple T(Section, COMDATSymName, Selection);
+ auto IterBool = COFFUniquingMap.insert(std::make_pair(T, nullptr));
+ auto Iter = IterBool.first;
+ if (!IterBool.second)
return Iter->second;
- const MCSymbol *COMDATSymbol = NULL;
+ MCSymbol *COMDATSymbol = nullptr;
if (!COMDATSymName.empty())
COMDATSymbol = GetOrCreateSymbol(COMDATSymName);
- MCSectionCOFF *Result =
- new (*this) MCSectionCOFF(Iter->first.first, Characteristics,
- COMDATSymbol, Selection, Assoc, Kind);
+ StringRef CachedName = std::get<0>(Iter->first);
+ MCSectionCOFF *Result = new (*this)
+ MCSectionCOFF(CachedName, Characteristics, COMDATSymbol, Selection, Kind);
Iter->second = Result;
return Result;
@@ -315,14 +310,10 @@ MCContext::getCOFFSection(StringRef Section, unsigned Characteristics,
}
const MCSectionCOFF *MCContext::getCOFFSection(StringRef Section) {
- if (COFFUniquingMap == 0)
- COFFUniquingMap = new COFFUniqueMapTy();
- COFFUniqueMapTy &Map = *(COFFUniqueMapTy*)COFFUniquingMap;
-
- SectionGroupPair P(Section, "");
- COFFUniqueMapTy::iterator Iter = Map.find(P);
- if (Iter == Map.end())
- return 0;
+ SectionGroupTriple T(Section, "", 0);
+ auto Iter = COFFUniquingMap.find(T);
+ if (Iter == COFFUniquingMap.end())
+ return nullptr;
return Iter->second;
}
@@ -336,84 +327,48 @@ const MCSectionCOFF *MCContext::getCOFFSection(StringRef Section) {
/// allocated file number is returned. The file numbers may be in any order.
unsigned MCContext::GetDwarfFile(StringRef Directory, StringRef FileName,
unsigned FileNumber, unsigned CUID) {
- // TODO: a FileNumber of zero says to use the next available file number.
- // Note: in GenericAsmParser::ParseDirectiveFile() FileNumber was checked
- // to not be less than one. This needs to be change to be not less than zero.
-
- SmallVectorImpl<MCDwarfFile *>& MCDwarfFiles = MCDwarfFilesCUMap[CUID];
- SmallVectorImpl<StringRef>& MCDwarfDirs = MCDwarfDirsCUMap[CUID];
- // Make space for this FileNumber in the MCDwarfFiles vector if needed.
- if (FileNumber >= MCDwarfFiles.size()) {
- MCDwarfFiles.resize(FileNumber + 1);
- } else {
- MCDwarfFile *&ExistingFile = MCDwarfFiles[FileNumber];
- if (ExistingFile)
- // It is an error to use see the same number more than once.
- return 0;
- }
-
- // Get the new MCDwarfFile slot for this FileNumber.
- MCDwarfFile *&File = MCDwarfFiles[FileNumber];
-
- if (Directory.empty()) {
- // Separate the directory part from the basename of the FileName.
- StringRef tFileName = sys::path::filename(FileName);
- if (!tFileName.empty()) {
- Directory = sys::path::parent_path(FileName);
- if (!Directory.empty())
- FileName = tFileName;
- }
- }
-
- // Find or make a entry in the MCDwarfDirs vector for this Directory.
- // Capture directory name.
- unsigned DirIndex;
- if (Directory.empty()) {
- // For FileNames with no directories a DirIndex of 0 is used.
- DirIndex = 0;
- } else {
- DirIndex = 0;
- for (unsigned End = MCDwarfDirs.size(); DirIndex < End; DirIndex++) {
- if (Directory == MCDwarfDirs[DirIndex])
- break;
- }
- if (DirIndex >= MCDwarfDirs.size()) {
- char *Buf = static_cast<char *>(Allocate(Directory.size()));
- memcpy(Buf, Directory.data(), Directory.size());
- MCDwarfDirs.push_back(StringRef(Buf, Directory.size()));
- }
- // The DirIndex is one based, as DirIndex of 0 is used for FileNames with
- // no directories. MCDwarfDirs[] is unlike MCDwarfFiles[] in that the
- // directory names are stored at MCDwarfDirs[DirIndex-1] where FileNames
- // are stored at MCDwarfFiles[FileNumber].Name .
- DirIndex++;
- }
-
- // Now make the MCDwarfFile entry and place it in the slot in the MCDwarfFiles
- // vector.
- char *Buf = static_cast<char *>(Allocate(FileName.size()));
- memcpy(Buf, FileName.data(), FileName.size());
- File = new (*this) MCDwarfFile(StringRef(Buf, FileName.size()), DirIndex);
-
- // return the allocated FileNumber.
- return FileNumber;
+ MCDwarfLineTable &Table = MCDwarfLineTablesCUMap[CUID];
+ return Table.getFile(Directory, FileName, FileNumber);
}
/// isValidDwarfFileNumber - takes a dwarf file number and returns true if it
/// currently is assigned and false otherwise.
bool MCContext::isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID) {
- SmallVectorImpl<MCDwarfFile *>& MCDwarfFiles = MCDwarfFilesCUMap[CUID];
+ const SmallVectorImpl<MCDwarfFile>& MCDwarfFiles = getMCDwarfFiles(CUID);
if(FileNumber == 0 || FileNumber >= MCDwarfFiles.size())
return false;
- return MCDwarfFiles[FileNumber] != 0;
+ return !MCDwarfFiles[FileNumber].Name.empty();
+}
+
+/// finalizeDwarfSections - Emit end symbols for each non-empty code section.
+/// Also remove empty sections from SectionStartEndSyms, to avoid generating
+/// useless debug info for them.
+void MCContext::finalizeDwarfSections(MCStreamer &MCOS) {
+ MCContext &context = MCOS.getContext();
+
+ auto sec = SectionStartEndSyms.begin();
+ while (sec != SectionStartEndSyms.end()) {
+ assert(sec->second.first && "Start symbol must be set by now");
+ MCOS.SwitchSection(sec->first);
+ if (MCOS.mayHaveInstructions()) {
+ MCSymbol *SectionEndSym = context.CreateTempSymbol();
+ MCOS.EmitLabel(SectionEndSym);
+ sec->second.second = SectionEndSym;
+ ++sec;
+ } else {
+ MapVector<const MCSection *, std::pair<MCSymbol *, MCSymbol *> >::iterator
+ to_erase = sec;
+ sec = SectionStartEndSyms.erase(to_erase);
+ }
+ }
}
-void MCContext::FatalError(SMLoc Loc, const Twine &Msg) {
+void MCContext::FatalError(SMLoc Loc, const Twine &Msg) const {
// If we have a source manager and a location, use it. Otherwise just
// use the generic report_fatal_error().
if (!SrcMgr || Loc == SMLoc())
- report_fatal_error(Msg);
+ report_fatal_error(Msg, false);
// Use the source manager to print the message.
SrcMgr->PrintMessage(Loc, SourceMgr::DK_Error, Msg);
diff --git a/contrib/llvm/lib/MC/MCDisassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler.cpp
index bfd51ab..77d9ce1 100644
--- a/contrib/llvm/lib/MC/MCDisassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler.cpp
@@ -16,23 +16,6 @@ using namespace llvm;
MCDisassembler::~MCDisassembler() {
}
-void
-MCDisassembler::setupForSymbolicDisassembly(
- LLVMOpInfoCallback GetOpInfo,
- LLVMSymbolLookupCallback SymbolLookUp,
- void *DisInfo,
- MCContext *Ctx,
- OwningPtr<MCRelocationInfo> &RelInfo) {
- this->GetOpInfo = GetOpInfo;
- this->SymbolLookUp = SymbolLookUp;
- this->DisInfo = DisInfo;
- this->Ctx = Ctx;
- assert(Ctx != 0 && "No MCContext given for symbolic disassembly");
- if (!Symbolizer)
- Symbolizer.reset(new MCExternalSymbolizer(*Ctx, RelInfo, GetOpInfo,
- SymbolLookUp, DisInfo));
-}
-
bool MCDisassembler::tryAddingSymbolicOperand(MCInst &Inst, int64_t Value,
uint64_t Address, bool IsBranch,
uint64_t Offset,
@@ -51,6 +34,6 @@ void MCDisassembler::tryAddingPcLoadReferenceComment(int64_t Value,
Symbolizer->tryAddingPcLoadReferenceComment(cStream, Value, Address);
}
-void MCDisassembler::setSymbolizer(OwningPtr<MCSymbolizer> &Symzer) {
- Symbolizer.reset(Symzer.take());
+void MCDisassembler::setSymbolizer(std::unique_ptr<MCSymbolizer> Symzer) {
+ Symbolizer = std::move(Symzer);
}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
index a0066c8..0530c26 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
@@ -24,9 +24,6 @@
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/TargetRegistry.h"
-namespace llvm {
-class Target;
-} // namespace llvm
using namespace llvm;
// LLVMCreateDisasm() creates a disassembler for the TripleName. Symbolic
@@ -44,20 +41,20 @@ LLVMDisasmContextRef LLVMCreateDisasmCPU(const char *Triple, const char *CPU,
std::string Error;
const Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
if (!TheTarget)
- return 0;
+ return nullptr;
const MCRegisterInfo *MRI = TheTarget->createMCRegInfo(Triple);
if (!MRI)
- return 0;
+ return nullptr;
// Get the assembler info needed to setup the MCContext.
const MCAsmInfo *MAI = TheTarget->createMCAsmInfo(*MRI, Triple);
if (!MAI)
- return 0;
+ return nullptr;
const MCInstrInfo *MII = TheTarget->createMCInstrInfo();
if (!MII)
- return 0;
+ return nullptr;
// Package up features to be passed to target/subtarget
std::string FeaturesStr;
@@ -65,42 +62,40 @@ LLVMDisasmContextRef LLVMCreateDisasmCPU(const char *Triple, const char *CPU,
const MCSubtargetInfo *STI = TheTarget->createMCSubtargetInfo(Triple, CPU,
FeaturesStr);
if (!STI)
- return 0;
+ return nullptr;
// Set up the MCContext for creating symbols and MCExpr's.
- MCContext *Ctx = new MCContext(MAI, MRI, 0);
+ MCContext *Ctx = new MCContext(MAI, MRI, nullptr);
if (!Ctx)
- return 0;
+ return nullptr;
// Set up disassembler.
- MCDisassembler *DisAsm = TheTarget->createMCDisassembler(*STI);
+ MCDisassembler *DisAsm = TheTarget->createMCDisassembler(*STI, *Ctx);
if (!DisAsm)
- return 0;
+ return nullptr;
- OwningPtr<MCRelocationInfo> RelInfo(
- TheTarget->createMCRelocationInfo(Triple, *Ctx));
+ std::unique_ptr<MCRelocationInfo> RelInfo(
+ TheTarget->createMCRelocationInfo(Triple, *Ctx));
if (!RelInfo)
- return 0;
+ return nullptr;
+
+ std::unique_ptr<MCSymbolizer> Symbolizer(TheTarget->createMCSymbolizer(
+ Triple, GetOpInfo, SymbolLookUp, DisInfo, Ctx, RelInfo.release()));
+ DisAsm->setSymbolizer(std::move(Symbolizer));
- OwningPtr<MCSymbolizer> Symbolizer(
- TheTarget->createMCSymbolizer(Triple, GetOpInfo, SymbolLookUp, DisInfo,
- Ctx, RelInfo.take()));
- DisAsm->setSymbolizer(Symbolizer);
- DisAsm->setupForSymbolicDisassembly(GetOpInfo, SymbolLookUp, DisInfo,
- Ctx, RelInfo);
// Set up the instruction printer.
int AsmPrinterVariant = MAI->getAssemblerDialect();
MCInstPrinter *IP = TheTarget->createMCInstPrinter(AsmPrinterVariant,
*MAI, *MII, *MRI, *STI);
if (!IP)
- return 0;
+ return nullptr;
LLVMDisasmContext *DC = new LLVMDisasmContext(Triple, DisInfo, TagType,
GetOpInfo, SymbolLookUp,
TheTarget, MAI, MRI,
STI, MII, Ctx, DisAsm, IP);
if (!DC)
- return 0;
+ return nullptr;
DC->setCPU(CPU);
return DC;
@@ -132,11 +127,11 @@ class DisasmMemoryObject : public MemoryObject {
public:
DisasmMemoryObject(uint8_t *bytes, uint64_t size, uint64_t basePC) :
Bytes(bytes), Size(size), BasePC(basePC) {}
-
- uint64_t getBase() const { return BasePC; }
- uint64_t getExtent() const { return Size; }
- int readByte(uint64_t Addr, uint8_t *Byte) const {
+ uint64_t getBase() const override { return BasePC; }
+ uint64_t getExtent() const override { return Size; }
+
+ int readByte(uint64_t Addr, uint8_t *Byte) const override {
if (Addr - BasePC >= Size)
return -1;
*Byte = Bytes[Addr - BasePC];
@@ -298,6 +293,7 @@ size_t LLVMDisasmInstruction(LLVMDisasmContextRef DCR, uint8_t *Bytes,
emitLatency(DC, Inst);
emitComments(DC, FormattedOS);
+ OS.flush();
assert(OutStringSize != 0 && "Output buffer cannot be zero size");
size_t OutputSize = std::min(OutStringSize-1, InsnStr.size());
diff --git a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
index 4855af2..d1d40cd 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
@@ -18,7 +18,6 @@
#define LLVM_MC_DISASSEMBLER_H
#include "llvm-c/Disassembler.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include <string>
@@ -56,23 +55,23 @@ private:
// LLVMDisasmInstruction().
//
// The LLVM target corresponding to the disassembler.
- // FIXME: using llvm::OwningPtr<const llvm::Target> causes a malloc error
+ // FIXME: using std::unique_ptr<const llvm::Target> causes a malloc error
// when this LLVMDisasmContext is deleted.
const Target *TheTarget;
// The assembly information for the target architecture.
- llvm::OwningPtr<const llvm::MCAsmInfo> MAI;
+ std::unique_ptr<const llvm::MCAsmInfo> MAI;
// The register information for the target architecture.
- llvm::OwningPtr<const llvm::MCRegisterInfo> MRI;
+ std::unique_ptr<const llvm::MCRegisterInfo> MRI;
// The subtarget information for the target architecture.
- llvm::OwningPtr<const llvm::MCSubtargetInfo> MSI;
+ std::unique_ptr<const llvm::MCSubtargetInfo> MSI;
// The instruction information for the target architecture.
- llvm::OwningPtr<const llvm::MCInstrInfo> MII;
+ std::unique_ptr<const llvm::MCInstrInfo> MII;
// The assembly context for creating symbols and MCExprs.
- llvm::OwningPtr<const llvm::MCContext> Ctx;
+ std::unique_ptr<const llvm::MCContext> Ctx;
// The disassembler for the target architecture.
- llvm::OwningPtr<const llvm::MCDisassembler> DisAsm;
+ std::unique_ptr<const llvm::MCDisassembler> DisAsm;
// The instruction printer for the target architecture.
- llvm::OwningPtr<llvm::MCInstPrinter> IP;
+ std::unique_ptr<llvm::MCInstPrinter> IP;
// The options used to set up the disassembler.
uint64_t Options;
// The CPU string.
diff --git a/contrib/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm/lib/MC/MCDwarf.cpp
index 12070f3..3bf6f0f 100644
--- a/contrib/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm/lib/MC/MCDwarf.cpp
@@ -9,6 +9,7 @@
#include "llvm/MC/MCDwarf.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/config.h"
@@ -16,8 +17,9 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -62,7 +64,7 @@ static inline uint64_t ScaleAddrDelta(MCContext &Context, uint64_t AddrDelta) {
// and if there is information from the last .loc directive that has yet to have
// a line entry made for it is made.
//
-void MCLineEntry::Make(MCStreamer *MCOS, const MCSection *Section) {
+void MCLineEntry::Make(MCObjectStreamer *MCOS, const MCSection *Section) {
if (!MCOS->getContext().getDwarfLocSeen())
return;
@@ -80,23 +82,11 @@ void MCLineEntry::Make(MCStreamer *MCOS, const MCSection *Section) {
// clear DwarfLocSeen saying the current .loc info is now used.
MCOS->getContext().ClearDwarfLocSeen();
- // Get the MCLineSection for this section, if one does not exist for this
- // section create it.
- const DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
- MCOS->getContext().getMCLineSections();
- MCLineSection *LineSection = MCLineSections.lookup(Section);
- if (!LineSection) {
- // Create a new MCLineSection. This will be deleted after the dwarf line
- // table is created using it by iterating through the MCLineSections
- // DenseMap.
- LineSection = new MCLineSection;
- // Save a pointer to the new LineSection into the MCLineSections DenseMap.
- MCOS->getContext().addMCLineSection(Section, LineSection);
- }
-
// Add the line entry to this section's entries.
- LineSection->addLineEntry(LineEntry,
- MCOS->getContext().getDwarfCompileUnitID());
+ MCOS->getContext()
+ .getMCDwarfLineTable(MCOS->getContext().getDwarfCompileUnitID())
+ .getMCLineSections()
+ .addLineEntry(LineEntry, Section);
}
//
@@ -124,25 +114,21 @@ static inline const MCExpr *MakeStartMinusEndExpr(const MCStreamer &MCOS,
// This emits the Dwarf line table for the specified section from the entries
// in the LineSection.
//
-static inline void EmitDwarfLineTable(MCStreamer *MCOS,
- const MCSection *Section,
- const MCLineSection *LineSection,
- unsigned CUID) {
- // This LineSection does not contain any LineEntry for the given Compile Unit.
- if (!LineSection->containEntriesForID(CUID))
- return;
-
+static inline void
+EmitDwarfLineTable(MCObjectStreamer *MCOS, const MCSection *Section,
+ const MCLineSection::MCLineEntryCollection &LineEntries) {
unsigned FileNum = 1;
unsigned LastLine = 1;
unsigned Column = 0;
unsigned Flags = DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0;
unsigned Isa = 0;
- MCSymbol *LastLabel = NULL;
+ unsigned Discriminator = 0;
+ MCSymbol *LastLabel = nullptr;
// Loop through each MCLineEntry and encode the dwarf line number table.
- for (MCLineSection::const_iterator
- it = LineSection->getMCLineEntries(CUID).begin(),
- ie = LineSection->getMCLineEntries(CUID).end(); it != ie; ++it) {
+ for (auto it = LineEntries.begin(),
+ ie = LineEntries.end();
+ it != ie; ++it) {
if (FileNum != it->getFileNum()) {
FileNum = it->getFileNum();
@@ -154,6 +140,14 @@ static inline void EmitDwarfLineTable(MCStreamer *MCOS,
MCOS->EmitIntValue(dwarf::DW_LNS_set_column, 1);
MCOS->EmitULEB128IntValue(Column);
}
+ if (Discriminator != it->getDiscriminator()) {
+ Discriminator = it->getDiscriminator();
+ unsigned Size = getULEB128Size(Discriminator);
+ MCOS->EmitIntValue(dwarf::DW_LNS_extended_op, 1);
+ MCOS->EmitULEB128IntValue(Size + 1);
+ MCOS->EmitIntValue(dwarf::DW_LNE_set_discriminator, 1);
+ MCOS->EmitULEB128IntValue(Discriminator);
+ }
if (Isa != it->getIsa()) {
Isa = it->getIsa();
MCOS->EmitIntValue(dwarf::DW_LNS_set_isa, 1);
@@ -212,38 +206,55 @@ static inline void EmitDwarfLineTable(MCStreamer *MCOS,
//
// This emits the Dwarf file and the line tables.
//
-const MCSymbol *MCDwarfFileTable::Emit(MCStreamer *MCOS) {
+void MCDwarfLineTable::Emit(MCObjectStreamer *MCOS) {
MCContext &context = MCOS->getContext();
+
+ auto &LineTables = context.getMCDwarfLineTables();
+
+ // Bail out early so we don't switch to the debug_line section needlessly and
+ // in doing so create an unnecessary (if empty) section.
+ if (LineTables.empty())
+ return;
+
// Switch to the section where the table will be emitted into.
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfLineSection());
- const DenseMap<unsigned, MCSymbol *> &MCLineTableSymbols =
- MCOS->getContext().getMCLineTableSymbols();
- // CUID and MCLineTableSymbols are set in DwarfDebug, when DwarfDebug does
- // not exist, CUID will be 0 and MCLineTableSymbols will be empty.
- // Handle Compile Unit 0, the line table start symbol is the section symbol.
- const MCSymbol *LineStartSym = EmitCU(MCOS, 0);
// Handle the rest of the Compile Units.
- for (unsigned Is = 1, Ie = MCLineTableSymbols.size(); Is < Ie; Is++)
- EmitCU(MCOS, Is);
-
- // Now delete the MCLineSections that were created in MCLineEntry::Make()
- // and used to emit the line table.
- const DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
- MCOS->getContext().getMCLineSections();
- for (DenseMap<const MCSection *, MCLineSection *>::const_iterator it =
- MCLineSections.begin(), ie = MCLineSections.end(); it != ie;
- ++it)
- delete it->second;
-
- return LineStartSym;
+ for (const auto &CUIDTablePair : LineTables)
+ CUIDTablePair.second.EmitCU(MCOS);
+}
+
+void MCDwarfDwoLineTable::Emit(MCStreamer &MCOS) const {
+ MCOS.EmitLabel(Header.Emit(&MCOS, None).second);
}
-const MCSymbol *MCDwarfFileTable::EmitCU(MCStreamer *MCOS, unsigned CUID) {
+std::pair<MCSymbol *, MCSymbol *> MCDwarfLineTableHeader::Emit(MCStreamer *MCOS) const {
+ static const char StandardOpcodeLengths[] = {
+ 0, // length of DW_LNS_copy
+ 1, // length of DW_LNS_advance_pc
+ 1, // length of DW_LNS_advance_line
+ 1, // length of DW_LNS_set_file
+ 1, // length of DW_LNS_set_column
+ 0, // length of DW_LNS_negate_stmt
+ 0, // length of DW_LNS_set_basic_block
+ 0, // length of DW_LNS_const_add_pc
+ 1, // length of DW_LNS_fixed_advance_pc
+ 0, // length of DW_LNS_set_prologue_end
+ 0, // length of DW_LNS_set_epilogue_begin
+ 1 // DW_LNS_set_isa
+ };
+ assert(array_lengthof(StandardOpcodeLengths) == (DWARF2_LINE_OPCODE_BASE - 1));
+ return Emit(MCOS, StandardOpcodeLengths);
+}
+
+std::pair<MCSymbol *, MCSymbol *>
+MCDwarfLineTableHeader::Emit(MCStreamer *MCOS,
+ ArrayRef<char> StandardOpcodeLengths) const {
+
MCContext &context = MCOS->getContext();
// Create a symbol at the beginning of the line table.
- MCSymbol *LineStartSym = MCOS->getContext().getMCLineTableSymbol(CUID);
+ MCSymbol *LineStartSym = Label;
if (!LineStartSym)
LineStartSym = context.CreateTempSymbol();
// Set the value of the symbol, as we are at the start of the line table.
@@ -275,27 +286,15 @@ const MCSymbol *MCDwarfFileTable::EmitCU(MCStreamer *MCOS, unsigned CUID) {
MCOS->EmitIntValue(DWARF2_LINE_DEFAULT_IS_STMT, 1);
MCOS->EmitIntValue(DWARF2_LINE_BASE, 1);
MCOS->EmitIntValue(DWARF2_LINE_RANGE, 1);
- MCOS->EmitIntValue(DWARF2_LINE_OPCODE_BASE, 1);
+ MCOS->EmitIntValue(StandardOpcodeLengths.size() + 1, 1);
// Standard opcode lengths
- MCOS->EmitIntValue(0, 1); // length of DW_LNS_copy
- MCOS->EmitIntValue(1, 1); // length of DW_LNS_advance_pc
- MCOS->EmitIntValue(1, 1); // length of DW_LNS_advance_line
- MCOS->EmitIntValue(1, 1); // length of DW_LNS_set_file
- MCOS->EmitIntValue(1, 1); // length of DW_LNS_set_column
- MCOS->EmitIntValue(0, 1); // length of DW_LNS_negate_stmt
- MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_basic_block
- MCOS->EmitIntValue(0, 1); // length of DW_LNS_const_add_pc
- MCOS->EmitIntValue(1, 1); // length of DW_LNS_fixed_advance_pc
- MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_prologue_end
- MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_epilogue_begin
- MCOS->EmitIntValue(1, 1); // DW_LNS_set_isa
+ for (char Length : StandardOpcodeLengths)
+ MCOS->EmitIntValue(Length, 1);
// Put out the directory and file tables.
// First the directory table.
- const SmallVectorImpl<StringRef> &MCDwarfDirs =
- context.getMCDwarfDirs(CUID);
for (unsigned i = 0; i < MCDwarfDirs.size(); i++) {
MCOS->EmitBytes(MCDwarfDirs[i]); // the DirectoryName
MCOS->EmitBytes(StringRef("\0", 1)); // the null term. of the string
@@ -303,13 +302,12 @@ const MCSymbol *MCDwarfFileTable::EmitCU(MCStreamer *MCOS, unsigned CUID) {
MCOS->EmitIntValue(0, 1); // Terminate the directory list
// Second the file table.
- const SmallVectorImpl<MCDwarfFile *> &MCDwarfFiles =
- MCOS->getContext().getMCDwarfFiles(CUID);
for (unsigned i = 1; i < MCDwarfFiles.size(); i++) {
- MCOS->EmitBytes(MCDwarfFiles[i]->getName()); // FileName
+ assert(!MCDwarfFiles[i].Name.empty());
+ MCOS->EmitBytes(MCDwarfFiles[i].Name); // FileName
MCOS->EmitBytes(StringRef("\0", 1)); // the null term. of the string
// the Directory num
- MCOS->EmitULEB128IntValue(MCDwarfFiles[i]->getDirIndex());
+ MCOS->EmitULEB128IntValue(MCDwarfFiles[i].DirIndex);
MCOS->EmitIntValue(0, 1); // last modification timestamp (always 0)
MCOS->EmitIntValue(0, 1); // filesize (always 0)
}
@@ -319,21 +317,18 @@ const MCSymbol *MCDwarfFileTable::EmitCU(MCStreamer *MCOS, unsigned CUID) {
// end of the prologue (that was used in a previous expression).
MCOS->EmitLabel(ProEndSym);
+ return std::make_pair(LineStartSym, LineEndSym);
+}
+
+void MCDwarfLineTable::EmitCU(MCObjectStreamer *MCOS) const {
+ MCSymbol *LineEndSym = Header.Emit(MCOS).second;
+
// Put out the line tables.
- const DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
- MCOS->getContext().getMCLineSections();
- const std::vector<const MCSection *> &MCLineSectionOrder =
- MCOS->getContext().getMCLineSectionOrder();
- for (std::vector<const MCSection*>::const_iterator it =
- MCLineSectionOrder.begin(), ie = MCLineSectionOrder.end(); it != ie;
- ++it) {
- const MCSection *Sec = *it;
- const MCLineSection *Line = MCLineSections.lookup(Sec);
- EmitDwarfLineTable(MCOS, Sec, Line, CUID);
- }
+ for (const auto &LineSec : MCLineSections.getMCLineEntries())
+ EmitDwarfLineTable(MCOS, LineSec.first, LineSec.second);
- if (MCOS->getContext().getAsmInfo()->getLinkerRequiresNonEmptyDwarfLines()
- && MCLineSectionOrder.begin() == MCLineSectionOrder.end()) {
+ if (MCOS->getContext().getAsmInfo()->getLinkerRequiresNonEmptyDwarfLines() &&
+ MCLineSections.getMCLineEntries().empty()) {
// The darwin9 linker has a bug (see PR8715). For for 32-bit architectures
// it requires:
// total_length >= prologue_length + 10
@@ -347,8 +342,78 @@ const MCSymbol *MCDwarfFileTable::EmitCU(MCStreamer *MCOS, unsigned CUID) {
// This is the end of the section, so set the value of the symbol at the end
// of this section (that was used in a previous expression).
MCOS->EmitLabel(LineEndSym);
+}
+
+unsigned MCDwarfLineTable::getFile(StringRef &Directory, StringRef &FileName,
+ unsigned FileNumber) {
+ return Header.getFile(Directory, FileName, FileNumber);
+}
+
+unsigned MCDwarfLineTableHeader::getFile(StringRef &Directory,
+ StringRef &FileName,
+ unsigned FileNumber) {
+ if (Directory == CompilationDir)
+ Directory = "";
+ if (FileName.empty()) {
+ FileName = "<stdin>";
+ Directory = "";
+ }
+ assert(!FileName.empty());
+ if (FileNumber == 0) {
+ FileNumber = SourceIdMap.size() + 1;
+ assert((MCDwarfFiles.empty() || FileNumber == MCDwarfFiles.size()) &&
+ "Don't mix autonumbered and explicit numbered line table usage");
+ StringMapEntry<unsigned> &Ent = SourceIdMap.GetOrCreateValue(
+ (Directory + Twine('\0') + FileName).str(), FileNumber);
+ if (Ent.getValue() != FileNumber)
+ return Ent.getValue();
+ }
+ // Make space for this FileNumber in the MCDwarfFiles vector if needed.
+ MCDwarfFiles.resize(FileNumber + 1);
+
+ // Get the new MCDwarfFile slot for this FileNumber.
+ MCDwarfFile &File = MCDwarfFiles[FileNumber];
+
+ // It is an error to use see the same number more than once.
+ if (!File.Name.empty())
+ return 0;
+
+ if (Directory.empty()) {
+ // Separate the directory part from the basename of the FileName.
+ StringRef tFileName = sys::path::filename(FileName);
+ if (!tFileName.empty()) {
+ Directory = sys::path::parent_path(FileName);
+ if (!Directory.empty())
+ FileName = tFileName;
+ }
+ }
+
+ // Find or make an entry in the MCDwarfDirs vector for this Directory.
+ // Capture directory name.
+ unsigned DirIndex;
+ if (Directory.empty()) {
+ // For FileNames with no directories a DirIndex of 0 is used.
+ DirIndex = 0;
+ } else {
+ DirIndex = 0;
+ for (unsigned End = MCDwarfDirs.size(); DirIndex < End; DirIndex++) {
+ if (Directory == MCDwarfDirs[DirIndex])
+ break;
+ }
+ if (DirIndex >= MCDwarfDirs.size())
+ MCDwarfDirs.push_back(Directory);
+ // The DirIndex is one based, as DirIndex of 0 is used for FileNames with
+ // no directories. MCDwarfDirs[] is unlike MCDwarfFiles[] in that the
+ // directory names are stored at MCDwarfDirs[DirIndex-1] where FileNames
+ // are stored at MCDwarfFiles[FileNumber].Name .
+ DirIndex++;
+ }
+
+ File.Name = FileName;
+ File.DirIndex = DirIndex;
- return LineStartSym;
+ // return the allocated FileNumber.
+ return FileNumber;
}
/// Utility function to emit the encoding to a streamer.
@@ -437,16 +502,6 @@ void MCDwarfLineAddr::Encode(MCContext &Context, int64_t LineDelta,
OS << char(Temp);
}
-void MCDwarfFile::print(raw_ostream &OS) const {
- OS << '"' << getName() << '"';
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void MCDwarfFile::dump() const {
- print(dbgs());
-}
-#endif
-
// Utility function to write a tuple for .debug_abbrev.
static void EmitAbbrev(MCStreamer *MCOS, uint64_t Name, uint64_t Form) {
MCOS->EmitULEB128IntValue(Name);
@@ -464,8 +519,13 @@ static void EmitGenDwarfAbbrev(MCStreamer *MCOS) {
MCOS->EmitULEB128IntValue(dwarf::DW_TAG_compile_unit);
MCOS->EmitIntValue(dwarf::DW_CHILDREN_yes, 1);
EmitAbbrev(MCOS, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4);
- EmitAbbrev(MCOS, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr);
- EmitAbbrev(MCOS, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr);
+ if (MCOS->getContext().getGenDwarfSectionSyms().size() > 1 &&
+ MCOS->getContext().getDwarfVersion() >= 3) {
+ EmitAbbrev(MCOS, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4);
+ } else {
+ EmitAbbrev(MCOS, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr);
+ EmitAbbrev(MCOS, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr);
+ }
EmitAbbrev(MCOS, dwarf::DW_AT_name, dwarf::DW_FORM_string);
if (!context.getCompilationDir().empty())
EmitAbbrev(MCOS, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string);
@@ -498,20 +558,14 @@ static void EmitGenDwarfAbbrev(MCStreamer *MCOS) {
}
// When generating dwarf for assembly source files this emits the data for
-// .debug_aranges section. Which contains a header and a table of pairs of
-// PointerSize'ed values for the address and size of section(s) with line table
-// entries (just the default .text in our case) and a terminating pair of zeros.
+// .debug_aranges section. This section contains a header and a table of pairs
+// of PointerSize'ed values for the address and size of section(s) with line
+// table entries.
static void EmitGenDwarfAranges(MCStreamer *MCOS,
const MCSymbol *InfoSectionSymbol) {
MCContext &context = MCOS->getContext();
- // Create a symbol at the end of the section that we are creating the dwarf
- // debugging info to use later in here as part of the expression to calculate
- // the size of the section for the table.
- MCOS->SwitchSection(context.getGenDwarfSection());
- MCSymbol *SectionEndSym = context.CreateTempSymbol();
- MCOS->EmitLabel(SectionEndSym);
- context.setGenDwarfSectionEndSym(SectionEndSym);
+ auto &Sections = context.getGenDwarfSectionSyms();
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection());
@@ -529,8 +583,8 @@ static void EmitGenDwarfAranges(MCStreamer *MCOS,
Length += Pad;
// Add the size of the pair of PointerSize'ed values for the address and size
- // of the one default .text section we have in the table.
- Length += 2 * AddrSize;
+ // of each section we have in the table.
+ Length += 2 * AddrSize * Sections.size();
// And the pair of terminating zeros.
Length += 2 * AddrSize;
@@ -554,14 +608,21 @@ static void EmitGenDwarfAranges(MCStreamer *MCOS,
for(int i = 0; i < Pad; i++)
MCOS->EmitIntValue(0, 1);
- // Now emit the table of pairs of PointerSize'ed values for the section(s)
- // address and size, in our case just the one default .text section.
- const MCExpr *Addr = MCSymbolRefExpr::Create(
- context.getGenDwarfSectionStartSym(), MCSymbolRefExpr::VK_None, context);
- const MCExpr *Size = MakeStartMinusEndExpr(*MCOS,
- *context.getGenDwarfSectionStartSym(), *SectionEndSym, 0);
- MCOS->EmitAbsValue(Addr, AddrSize);
- MCOS->EmitAbsValue(Size, AddrSize);
+ // Now emit the table of pairs of PointerSize'ed values for the section
+ // addresses and sizes.
+ for (const auto &sec : Sections) {
+ MCSymbol *StartSymbol = sec.second.first;
+ MCSymbol *EndSymbol = sec.second.second;
+ assert(StartSymbol && "StartSymbol must not be NULL");
+ assert(EndSymbol && "EndSymbol must not be NULL");
+
+ const MCExpr *Addr = MCSymbolRefExpr::Create(
+ StartSymbol, MCSymbolRefExpr::VK_None, context);
+ const MCExpr *Size = MakeStartMinusEndExpr(*MCOS,
+ *StartSymbol, *EndSymbol, 0);
+ MCOS->EmitValue(Addr, AddrSize);
+ MCOS->EmitAbsValue(Size, AddrSize);
+ }
// And finally the pair of terminating zeros.
MCOS->EmitIntValue(0, AddrSize);
@@ -573,7 +634,8 @@ static void EmitGenDwarfAranges(MCStreamer *MCOS,
// DIE and a list of label DIEs.
static void EmitGenDwarfInfo(MCStreamer *MCOS,
const MCSymbol *AbbrevSectionSymbol,
- const MCSymbol *LineSectionSymbol) {
+ const MCSymbol *LineSectionSymbol,
+ const MCSymbol *RangesSectionSymbol) {
MCContext &context = MCOS->getContext();
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
@@ -591,16 +653,17 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
const MCExpr *Length = MakeStartMinusEndExpr(*MCOS, *InfoStart, *InfoEnd, 4);
MCOS->EmitAbsValue(Length, 4);
- // The 2 byte DWARF version, which is 2.
- MCOS->EmitIntValue(2, 2);
+ // The 2 byte DWARF version.
+ MCOS->EmitIntValue(context.getDwarfVersion(), 2);
+ const MCAsmInfo &AsmInfo = *context.getAsmInfo();
// The 4 byte offset to the debug abbrevs from the start of the .debug_abbrev,
// it is at the start of that section so this is zero.
- if (AbbrevSectionSymbol) {
- MCOS->EmitSymbolValue(AbbrevSectionSymbol, 4);
- } else {
+ if (AbbrevSectionSymbol == nullptr)
MCOS->EmitIntValue(0, 4);
- }
+ else
+ MCOS->EmitSymbolValue(AbbrevSectionSymbol, 4,
+ AsmInfo.needsDwarfSectionOffsetDirective());
const MCAsmInfo *asmInfo = context.getAsmInfo();
int AddrSize = asmInfo->getPointerSize();
@@ -620,27 +683,48 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
MCOS->EmitIntValue(0, 4);
}
- // AT_low_pc, the first address of the default .text section.
- const MCExpr *Start = MCSymbolRefExpr::Create(
- context.getGenDwarfSectionStartSym(), MCSymbolRefExpr::VK_None, context);
- MCOS->EmitAbsValue(Start, AddrSize);
+ if (RangesSectionSymbol) {
+ // There are multiple sections containing code, so we must use the
+ // .debug_ranges sections.
- // AT_high_pc, the last address of the default .text section.
- const MCExpr *End = MCSymbolRefExpr::Create(
- context.getGenDwarfSectionEndSym(), MCSymbolRefExpr::VK_None, context);
- MCOS->EmitAbsValue(End, AddrSize);
+ // AT_ranges, the 4 byte offset from the start of the .debug_ranges section
+ // to the address range list for this compilation unit.
+ MCOS->EmitSymbolValue(RangesSectionSymbol, 4);
+ } else {
+ // If we only have one non-empty code section, we can use the simpler
+ // AT_low_pc and AT_high_pc attributes.
+
+ // Find the first (and only) non-empty text section
+ auto &Sections = context.getGenDwarfSectionSyms();
+ const auto TextSection = Sections.begin();
+ assert(TextSection != Sections.end() && "No text section found");
+
+ MCSymbol *StartSymbol = TextSection->second.first;
+ MCSymbol *EndSymbol = TextSection->second.second;
+ assert(StartSymbol && "StartSymbol must not be NULL");
+ assert(EndSymbol && "EndSymbol must not be NULL");
+
+ // AT_low_pc, the first address of the default .text section.
+ const MCExpr *Start = MCSymbolRefExpr::Create(
+ StartSymbol, MCSymbolRefExpr::VK_None, context);
+ MCOS->EmitValue(Start, AddrSize);
+
+ // AT_high_pc, the last address of the default .text section.
+ const MCExpr *End = MCSymbolRefExpr::Create(
+ EndSymbol, MCSymbolRefExpr::VK_None, context);
+ MCOS->EmitValue(End, AddrSize);
+ }
// AT_name, the name of the source file. Reconstruct from the first directory
// and file table entries.
- const SmallVectorImpl<StringRef> &MCDwarfDirs =
- context.getMCDwarfDirs();
+ const SmallVectorImpl<std::string> &MCDwarfDirs = context.getMCDwarfDirs();
if (MCDwarfDirs.size() > 0) {
MCOS->EmitBytes(MCDwarfDirs[0]);
- MCOS->EmitBytes("/");
+ MCOS->EmitBytes(sys::path::get_separator());
}
- const SmallVectorImpl<MCDwarfFile *> &MCDwarfFiles =
+ const SmallVectorImpl<MCDwarfFile> &MCDwarfFiles =
MCOS->getContext().getMCDwarfFiles();
- MCOS->EmitBytes(MCDwarfFiles[1]->getName());
+ MCOS->EmitBytes(MCDwarfFiles[1].Name);
MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
// AT_comp_dir, the working directory the assembly was done in.
@@ -658,14 +742,10 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
// AT_producer, the version of the assembler tool.
StringRef DwarfDebugProducer = context.getDwarfDebugProducer();
- if (!DwarfDebugProducer.empty()){
+ if (!DwarfDebugProducer.empty())
MCOS->EmitBytes(DwarfDebugProducer);
- }
- else {
- MCOS->EmitBytes(StringRef("llvm-mc (based on LLVM "));
- MCOS->EmitBytes(StringRef(PACKAGE_VERSION));
- MCOS->EmitBytes(StringRef(")"));
- }
+ else
+ MCOS->EmitBytes(StringRef("llvm-mc (based on LLVM " PACKAGE_VERSION ")"));
MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
// AT_language, a 4 byte value. We use DW_LANG_Mips_Assembler as the dwarf2
@@ -675,30 +755,26 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
// Third part: the list of label DIEs.
// Loop on saved info for dwarf labels and create the DIEs for them.
- const std::vector<const MCGenDwarfLabelEntry *> &Entries =
- MCOS->getContext().getMCGenDwarfLabelEntries();
- for (std::vector<const MCGenDwarfLabelEntry *>::const_iterator it =
- Entries.begin(), ie = Entries.end(); it != ie;
- ++it) {
- const MCGenDwarfLabelEntry *Entry = *it;
-
+ const std::vector<MCGenDwarfLabelEntry> &Entries =
+ MCOS->getContext().getMCGenDwarfLabelEntries();
+ for (const auto &Entry : Entries) {
// The DW_TAG_label DIE abbrev (2).
MCOS->EmitULEB128IntValue(2);
// AT_name, of the label without any leading underbar.
- MCOS->EmitBytes(Entry->getName());
+ MCOS->EmitBytes(Entry.getName());
MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
// AT_decl_file, index into the file table.
- MCOS->EmitIntValue(Entry->getFileNumber(), 4);
+ MCOS->EmitIntValue(Entry.getFileNumber(), 4);
// AT_decl_line, source line number.
- MCOS->EmitIntValue(Entry->getLineNumber(), 4);
+ MCOS->EmitIntValue(Entry.getLineNumber(), 4);
// AT_low_pc, start address of the label.
- const MCExpr *AT_low_pc = MCSymbolRefExpr::Create(Entry->getLabel(),
+ const MCExpr *AT_low_pc = MCSymbolRefExpr::Create(Entry.getLabel(),
MCSymbolRefExpr::VK_None, context);
- MCOS->EmitAbsValue(AT_low_pc, AddrSize);
+ MCOS->EmitValue(AT_low_pc, AddrSize);
// DW_AT_prototyped, a one byte flag value of 0 saying we have no prototype.
MCOS->EmitIntValue(0, 1);
@@ -709,14 +785,6 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
// Add the NULL DIE terminating the DW_TAG_unspecified_parameters DIE's.
MCOS->EmitIntValue(0, 1);
}
- // Deallocate the MCGenDwarfLabelEntry classes that saved away the info
- // for the dwarf labels.
- for (std::vector<const MCGenDwarfLabelEntry *>::const_iterator it =
- Entries.begin(), ie = Entries.end(); it != ie;
- ++it) {
- const MCGenDwarfLabelEntry *Entry = *it;
- delete Entry;
- }
// Add the NULL DIE terminating the Compile Unit DIE's.
MCOS->EmitIntValue(0, 1);
@@ -725,20 +793,76 @@ static void EmitGenDwarfInfo(MCStreamer *MCOS,
MCOS->EmitLabel(InfoEnd);
}
+// When generating dwarf for assembly source files this emits the data for
+// .debug_ranges section. We only emit one range list, which spans all of the
+// executable sections of this file.
+static void EmitGenDwarfRanges(MCStreamer *MCOS) {
+ MCContext &context = MCOS->getContext();
+ auto &Sections = context.getGenDwarfSectionSyms();
+
+ const MCAsmInfo *AsmInfo = context.getAsmInfo();
+ int AddrSize = AsmInfo->getPointerSize();
+
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfRangesSection());
+
+ for (const auto sec : Sections) {
+
+ MCSymbol *StartSymbol = sec.second.first;
+ MCSymbol *EndSymbol = sec.second.second;
+ assert(StartSymbol && "StartSymbol must not be NULL");
+ assert(EndSymbol && "EndSymbol must not be NULL");
+
+ // Emit a base address selection entry for the start of this section
+ const MCExpr *SectionStartAddr = MCSymbolRefExpr::Create(
+ StartSymbol, MCSymbolRefExpr::VK_None, context);
+ MCOS->EmitFill(AddrSize, 0xFF);
+ MCOS->EmitValue(SectionStartAddr, AddrSize);
+
+ // Emit a range list entry spanning this section
+ const MCExpr *SectionSize = MakeStartMinusEndExpr(*MCOS,
+ *StartSymbol, *EndSymbol, 0);
+ MCOS->EmitIntValue(0, AddrSize);
+ MCOS->EmitAbsValue(SectionSize, AddrSize);
+ }
+
+ // Emit end of list entry
+ MCOS->EmitIntValue(0, AddrSize);
+ MCOS->EmitIntValue(0, AddrSize);
+}
+
//
// When generating dwarf for assembly source files this emits the Dwarf
// sections.
//
-void MCGenDwarfInfo::Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol) {
- // Create the dwarf sections in this order (.debug_line already created).
+void MCGenDwarfInfo::Emit(MCStreamer *MCOS) {
MCContext &context = MCOS->getContext();
+
+ // Create the dwarf sections in this order (.debug_line already created).
const MCAsmInfo *AsmInfo = context.getAsmInfo();
bool CreateDwarfSectionSymbols =
AsmInfo->doesDwarfUseRelocationsAcrossSections();
- if (!CreateDwarfSectionSymbols)
- LineSectionSymbol = NULL;
- MCSymbol *AbbrevSectionSymbol = NULL;
- MCSymbol *InfoSectionSymbol = NULL;
+ MCSymbol *LineSectionSymbol = nullptr;
+ if (CreateDwarfSectionSymbols)
+ LineSectionSymbol = MCOS->getDwarfLineTableSymbol(0);
+ MCSymbol *AbbrevSectionSymbol = nullptr;
+ MCSymbol *InfoSectionSymbol = nullptr;
+ MCSymbol *RangesSectionSymbol = NULL;
+
+ // Create end symbols for each section, and remove empty sections
+ MCOS->getContext().finalizeDwarfSections(*MCOS);
+
+ // If there are no sections to generate debug info for, we don't need
+ // to do anything
+ if (MCOS->getContext().getGenDwarfSectionSyms().empty())
+ return;
+
+ // We only use the .debug_ranges section if we have multiple code sections,
+ // and we are emitting a DWARF version which supports it.
+ const bool UseRangesSection =
+ MCOS->getContext().getGenDwarfSectionSyms().size() > 1 &&
+ MCOS->getContext().getDwarfVersion() >= 3;
+ CreateDwarfSectionSymbols |= UseRangesSection;
+
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
if (CreateDwarfSectionSymbols) {
InfoSectionSymbol = context.CreateTempSymbol();
@@ -749,20 +873,30 @@ void MCGenDwarfInfo::Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol) {
AbbrevSectionSymbol = context.CreateTempSymbol();
MCOS->EmitLabel(AbbrevSectionSymbol);
}
- MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection());
+ if (UseRangesSection) {
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfRangesSection());
+ if (CreateDwarfSectionSymbols) {
+ RangesSectionSymbol = context.CreateTempSymbol();
+ MCOS->EmitLabel(RangesSectionSymbol);
+ }
+ }
- // If there are no line table entries then do not emit any section contents.
- if (context.getMCLineSections().empty())
- return;
+ assert((RangesSectionSymbol != NULL) || !UseRangesSection);
+
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection());
// Output the data for .debug_aranges section.
EmitGenDwarfAranges(MCOS, InfoSectionSymbol);
+ if (UseRangesSection)
+ EmitGenDwarfRanges(MCOS);
+
// Output the data for .debug_abbrev section.
EmitGenDwarfAbbrev(MCOS);
// Output the data for .debug_info section.
- EmitGenDwarfInfo(MCOS, AbbrevSectionSymbol, LineSectionSymbol);
+ EmitGenDwarfInfo(MCOS, AbbrevSectionSymbol, LineSectionSymbol,
+ RangesSectionSymbol);
}
//
@@ -773,12 +907,13 @@ void MCGenDwarfInfo::Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol) {
//
void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS,
SourceMgr &SrcMgr, SMLoc &Loc) {
- // We won't create dwarf labels for temporary symbols or symbols not in
- // the default text.
+ // We won't create dwarf labels for temporary symbols.
if (Symbol->isTemporary())
return;
MCContext &context = MCOS->getContext();
- if (context.getGenDwarfSection() != MCOS->getCurrentSection().first)
+ // We won't create dwarf labels for symbols in sections that we are not
+ // generating debug info for.
+ if (!context.getGenDwarfSectionSyms().count(MCOS->getCurrentSection().first))
return;
// The dwarf label's name does not have the symbol name's leading
@@ -792,7 +927,7 @@ void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS,
// Finding the line number is the expensive part which is why we just don't
// pass it in as for some symbols we won't create a dwarf label.
- int CurBuffer = SrcMgr.FindBufferContainingLoc(Loc);
+ unsigned CurBuffer = SrcMgr.FindBufferContainingLoc(Loc);
unsigned LineNumber = SrcMgr.FindLineNumber(Loc, CurBuffer);
// We create a temporary symbol for use for the AT_high_pc and AT_low_pc
@@ -803,9 +938,8 @@ void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS,
MCOS->EmitLabel(Label);
// Create and entry for the info and add it to the other entries.
- MCGenDwarfLabelEntry *Entry =
- new MCGenDwarfLabelEntry(Name, FileNumber, LineNumber, Label);
- MCOS->getContext().addMCGenDwarfLabelEntry(Entry);
+ MCOS->getContext().addMCGenDwarfLabelEntry(
+ MCGenDwarfLabelEntry(Name, FileNumber, LineNumber, Label));
}
static int getDataAlignmentFactor(MCStreamer &streamer) {
@@ -841,7 +975,7 @@ static unsigned getSizeForEncoding(MCStreamer &streamer,
static void EmitFDESymbol(MCStreamer &streamer, const MCSymbol &symbol,
unsigned symbolEncoding, bool isEH,
- const char *comment = 0) {
+ const char *comment = nullptr) {
MCContext &context = streamer.getContext();
const MCAsmInfo *asmInfo = context.getAsmInfo();
const MCExpr *v = asmInfo->getExprForFDESymbol(&symbol,
@@ -870,13 +1004,11 @@ namespace {
class FrameEmitterImpl {
int CFAOffset;
int CIENum;
- bool UsingCFI;
bool IsEH;
const MCSymbol *SectionStart;
public:
- FrameEmitterImpl(bool usingCFI, bool isEH)
- : CFAOffset(0), CIENum(0), UsingCFI(usingCFI), IsEH(isEH),
- SectionStart(0) {}
+ FrameEmitterImpl(bool isEH)
+ : CFAOffset(0), CIENum(0), IsEH(isEH), SectionStart(nullptr) {}
void setSectionStart(const MCSymbol *Label) { SectionStart = Label; }
@@ -884,19 +1016,20 @@ namespace {
void EmitCompactUnwind(MCStreamer &streamer,
const MCDwarfFrameInfo &frame);
- const MCSymbol &EmitCIE(MCStreamer &streamer,
+ const MCSymbol &EmitCIE(MCObjectStreamer &streamer,
const MCSymbol *personality,
unsigned personalityEncoding,
const MCSymbol *lsda,
bool IsSignalFrame,
- unsigned lsdaEncoding);
- MCSymbol *EmitFDE(MCStreamer &streamer,
+ unsigned lsdaEncoding,
+ bool IsSimple);
+ MCSymbol *EmitFDE(MCObjectStreamer &streamer,
const MCSymbol &cieStart,
const MCDwarfFrameInfo &frame);
- void EmitCFIInstructions(MCStreamer &streamer,
+ void EmitCFIInstructions(MCObjectStreamer &streamer,
ArrayRef<MCCFIInstruction> Instrs,
MCSymbol *BaseLabel);
- void EmitCFIInstruction(MCStreamer &Streamer,
+ void EmitCFIInstruction(MCObjectStreamer &Streamer,
const MCCFIInstruction &Instr);
};
@@ -947,7 +1080,7 @@ static void EmitEncodingByte(MCStreamer &Streamer, unsigned Encoding,
Streamer.EmitIntValue(Encoding, 1);
}
-void FrameEmitterImpl::EmitCFIInstruction(MCStreamer &Streamer,
+void FrameEmitterImpl::EmitCFIInstruction(MCObjectStreamer &Streamer,
const MCCFIInstruction &Instr) {
int dataAlignmentFactor = getDataAlignmentFactor(Streamer);
bool VerboseAsm = Streamer.isVerboseAsm();
@@ -1099,7 +1232,7 @@ void FrameEmitterImpl::EmitCFIInstruction(MCStreamer &Streamer,
/// EmitFrameMoves - Emit frame instructions to describe the layout of the
/// frame.
-void FrameEmitterImpl::EmitCFIInstructions(MCStreamer &streamer,
+void FrameEmitterImpl::EmitCFIInstructions(MCObjectStreamer &streamer,
ArrayRef<MCCFIInstruction> Instrs,
MCSymbol *BaseLabel) {
for (unsigned i = 0, N = Instrs.size(); i < N; ++i) {
@@ -1160,10 +1293,10 @@ void FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer,
Encoding |= 0x40000000;
// Range Start
- unsigned FDEEncoding = MOFI->getFDEEncoding(UsingCFI);
+ unsigned FDEEncoding = MOFI->getFDEEncoding();
unsigned Size = getSizeForEncoding(Streamer, FDEEncoding);
if (VerboseAsm) Streamer.AddComment("Range Start");
- Streamer.EmitSymbolValue(Frame.Function, Size);
+ Streamer.EmitSymbolValue(Frame.Begin, Size);
// Range Length
const MCExpr *Range = MakeStartMinusEndExpr(Streamer, *Frame.Begin,
@@ -1194,23 +1327,19 @@ void FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer,
Streamer.EmitIntValue(0, Size); // No LSDA
}
-const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
+const MCSymbol &FrameEmitterImpl::EmitCIE(MCObjectStreamer &streamer,
const MCSymbol *personality,
unsigned personalityEncoding,
const MCSymbol *lsda,
bool IsSignalFrame,
- unsigned lsdaEncoding) {
+ unsigned lsdaEncoding,
+ bool IsSimple) {
MCContext &context = streamer.getContext();
const MCRegisterInfo *MRI = context.getRegisterInfo();
const MCObjectFileInfo *MOFI = context.getObjectFileInfo();
bool verboseAsm = streamer.isVerboseAsm();
- MCSymbol *sectionStart;
- if (MOFI->isFunctionEHFrameSymbolPrivate() || !IsEH)
- sectionStart = context.CreateTempSymbol();
- else
- sectionStart = context.GetOrCreateSymbol(Twine("EH_frame") + Twine(CIENum));
-
+ MCSymbol *sectionStart = context.CreateTempSymbol();
streamer.EmitLabel(sectionStart);
CIENum++;
@@ -1229,7 +1358,10 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
// Version
if (verboseAsm) streamer.AddComment("DW_CIE_VERSION");
- streamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1);
+ // For DWARF2, we use CIE version 1
+ // For DWARF3+, we use CIE version 3
+ uint8_t CIEVersion = context.getDwarfVersion() <= 2 ? 1 : 3;
+ streamer.EmitIntValue(CIEVersion, 1);
// Augmentation String
SmallString<8> Augmentation;
@@ -1257,7 +1389,14 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
// Return Address Register
if (verboseAsm) streamer.AddComment("CIE Return Address Column");
- streamer.EmitULEB128IntValue(MRI->getDwarfRegNum(MRI->getRARegister(), true));
+ if (CIEVersion == 1) {
+ assert(MRI->getRARegister() <= 255 &&
+ "DWARF 2 encodes return_address_register in one byte");
+ streamer.EmitIntValue(MRI->getDwarfRegNum(MRI->getRARegister(), true), 1);
+ } else {
+ streamer.EmitULEB128IntValue(
+ MRI->getDwarfRegNum(MRI->getRARegister(), true));
+ }
// Augmentation Data Length (optional)
@@ -1291,16 +1430,17 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
EmitEncodingByte(streamer, lsdaEncoding, "LSDA Encoding");
// Encoding of the FDE pointers
- EmitEncodingByte(streamer, MOFI->getFDEEncoding(UsingCFI),
- "FDE Encoding");
+ EmitEncodingByte(streamer, MOFI->getFDEEncoding(), "FDE Encoding");
}
// Initial Instructions
const MCAsmInfo *MAI = context.getAsmInfo();
- const std::vector<MCCFIInstruction> &Instructions =
- MAI->getInitialFrameState();
- EmitCFIInstructions(streamer, Instructions, NULL);
+ if (!IsSimple) {
+ const std::vector<MCCFIInstruction> &Instructions =
+ MAI->getInitialFrameState();
+ EmitCFIInstructions(streamer, Instructions, nullptr);
+ }
// Padding
streamer.EmitValueToAlignment(IsEH ? 4 : MAI->getPointerSize());
@@ -1309,7 +1449,7 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
return *sectionStart;
}
-MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
+MCSymbol *FrameEmitterImpl::EmitFDE(MCObjectStreamer &streamer,
const MCSymbol &cieStart,
const MCDwarfFrameInfo &frame) {
MCContext &context = streamer.getContext();
@@ -1318,13 +1458,6 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
const MCObjectFileInfo *MOFI = context.getObjectFileInfo();
bool verboseAsm = streamer.isVerboseAsm();
- if (IsEH && frame.Function && !MOFI->isFunctionEHFrameSymbolPrivate()) {
- MCSymbol *EHSym =
- context.GetOrCreateSymbol(frame.Function->getName() + Twine(".eh"));
- streamer.EmitEHSymAttributes(frame.Function, EHSym);
- streamer.EmitLabel(EHSym);
- }
-
// Length
const MCExpr *Length = MakeStartMinusEndExpr(streamer, *fdeStart, *fdeEnd, 0);
if (verboseAsm) streamer.AddComment("FDE Length");
@@ -1348,8 +1481,8 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
}
// PC Begin
- unsigned PCEncoding = IsEH ? MOFI->getFDEEncoding(UsingCFI)
- : (unsigned)dwarf::DW_EH_PE_absptr;
+ unsigned PCEncoding =
+ IsEH ? MOFI->getFDEEncoding() : (unsigned)dwarf::DW_EH_PE_absptr;
unsigned PCSize = getSizeForEncoding(streamer, PCEncoding);
EmitFDESymbol(streamer, *frame.Begin, PCEncoding, IsEH, "FDE initial location");
@@ -1386,18 +1519,23 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
namespace {
struct CIEKey {
- static const CIEKey getEmptyKey() { return CIEKey(0, 0, -1, false); }
- static const CIEKey getTombstoneKey() { return CIEKey(0, -1, 0, false); }
-
- CIEKey(const MCSymbol* Personality_, unsigned PersonalityEncoding_,
- unsigned LsdaEncoding_, bool IsSignalFrame_) :
- Personality(Personality_), PersonalityEncoding(PersonalityEncoding_),
- LsdaEncoding(LsdaEncoding_), IsSignalFrame(IsSignalFrame_) {
+ static const CIEKey getEmptyKey() {
+ return CIEKey(nullptr, 0, -1, false, false);
+ }
+ static const CIEKey getTombstoneKey() {
+ return CIEKey(nullptr, -1, 0, false, false);
}
- const MCSymbol* Personality;
+
+ CIEKey(const MCSymbol *Personality_, unsigned PersonalityEncoding_,
+ unsigned LsdaEncoding_, bool IsSignalFrame_, bool IsSimple_)
+ : Personality(Personality_), PersonalityEncoding(PersonalityEncoding_),
+ LsdaEncoding(LsdaEncoding_), IsSignalFrame(IsSignalFrame_),
+ IsSimple(IsSimple_) {}
+ const MCSymbol *Personality;
unsigned PersonalityEncoding;
unsigned LsdaEncoding;
bool IsSignalFrame;
+ bool IsSimple;
};
}
@@ -1414,28 +1552,31 @@ namespace llvm {
return static_cast<unsigned>(hash_combine(Key.Personality,
Key.PersonalityEncoding,
Key.LsdaEncoding,
- Key.IsSignalFrame));
+ Key.IsSignalFrame,
+ Key.IsSimple));
}
static bool isEqual(const CIEKey &LHS,
const CIEKey &RHS) {
return LHS.Personality == RHS.Personality &&
LHS.PersonalityEncoding == RHS.PersonalityEncoding &&
LHS.LsdaEncoding == RHS.LsdaEncoding &&
- LHS.IsSignalFrame == RHS.IsSignalFrame;
+ LHS.IsSignalFrame == RHS.IsSignalFrame &&
+ LHS.IsSimple == RHS.IsSimple;
}
};
}
-void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer, MCAsmBackend *MAB,
- bool UsingCFI, bool IsEH) {
+void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
+ bool IsEH) {
Streamer.generateCompactUnwindEncodings(MAB);
MCContext &Context = Streamer.getContext();
const MCObjectFileInfo *MOFI = Context.getObjectFileInfo();
- FrameEmitterImpl Emitter(UsingCFI, IsEH);
- ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getFrameInfos();
+ FrameEmitterImpl Emitter(IsEH);
+ ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getDwarfFrameInfos();
// Emit the compact unwind info if available.
+ bool NeedsEHFrameSection = !MOFI->getSupportsCompactUnwindWithoutEHFrame();
if (IsEH && MOFI->getCompactUnwindSection()) {
bool SectionEmitted = false;
for (unsigned i = 0, n = FrameArray.size(); i < n; ++i) {
@@ -1446,37 +1587,55 @@ void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer, MCAsmBackend *MAB,
Streamer.EmitValueToAlignment(Context.getAsmInfo()->getPointerSize());
SectionEmitted = true;
}
+ NeedsEHFrameSection |=
+ Frame.CompactUnwindEncoding ==
+ MOFI->getCompactUnwindDwarfEHFrameOnly();
Emitter.EmitCompactUnwind(Streamer, Frame);
}
}
+ if (!NeedsEHFrameSection) return;
+
const MCSection &Section =
IsEH ? *const_cast<MCObjectFileInfo*>(MOFI)->getEHFrameSection() :
*MOFI->getDwarfFrameSection();
+
Streamer.SwitchSection(&Section);
MCSymbol *SectionStart = Context.CreateTempSymbol();
Streamer.EmitLabel(SectionStart);
Emitter.setSectionStart(SectionStart);
- MCSymbol *FDEEnd = NULL;
- DenseMap<CIEKey, const MCSymbol*> CIEStarts;
+ MCSymbol *FDEEnd = nullptr;
+ DenseMap<CIEKey, const MCSymbol *> CIEStarts;
- const MCSymbol *DummyDebugKey = NULL;
+ const MCSymbol *DummyDebugKey = nullptr;
+ NeedsEHFrameSection = !MOFI->getSupportsCompactUnwindWithoutEHFrame();
for (unsigned i = 0, n = FrameArray.size(); i < n; ++i) {
const MCDwarfFrameInfo &Frame = FrameArray[i];
+
+ // Emit the label from the previous iteration
+ if (FDEEnd) {
+ Streamer.EmitLabel(FDEEnd);
+ FDEEnd = nullptr;
+ }
+
+ if (!NeedsEHFrameSection && Frame.CompactUnwindEncoding !=
+ MOFI->getCompactUnwindDwarfEHFrameOnly())
+ // Don't generate an EH frame if we don't need one. I.e., it's taken care
+ // of by the compact unwind encoding.
+ continue;
+
CIEKey Key(Frame.Personality, Frame.PersonalityEncoding,
- Frame.LsdaEncoding, Frame.IsSignalFrame);
+ Frame.LsdaEncoding, Frame.IsSignalFrame, Frame.IsSimple);
const MCSymbol *&CIEStart = IsEH ? CIEStarts[Key] : DummyDebugKey;
if (!CIEStart)
CIEStart = &Emitter.EmitCIE(Streamer, Frame.Personality,
Frame.PersonalityEncoding, Frame.Lsda,
Frame.IsSignalFrame,
- Frame.LsdaEncoding);
+ Frame.LsdaEncoding,
+ Frame.IsSimple);
FDEEnd = Emitter.EmitFDE(Streamer, *CIEStart, Frame);
-
- if (i != n - 1)
- Streamer.EmitLabel(FDEEnd);
}
Streamer.EmitValueToAlignment(Context.getAsmInfo()->getPointerSize());
@@ -1484,7 +1643,7 @@ void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer, MCAsmBackend *MAB,
Streamer.EmitLabel(FDEEnd);
}
-void MCDwarfFrameEmitter::EmitAdvanceLoc(MCStreamer &Streamer,
+void MCDwarfFrameEmitter::EmitAdvanceLoc(MCObjectStreamer &Streamer,
uint64_t AddrDelta) {
MCContext &Context = Streamer.getContext();
SmallString<256> Tmp;
diff --git a/contrib/llvm/lib/MC/MCELF.cpp b/contrib/llvm/lib/MC/MCELF.cpp
index ebb189e..386c209 100644
--- a/contrib/llvm/lib/MC/MCELF.cpp
+++ b/contrib/llvm/lib/MC/MCELF.cpp
@@ -61,7 +61,7 @@ void MCELF::SetVisibility(MCSymbolData &SD, unsigned Visibility) {
SD.setFlags(OtherFlags | (Visibility << ELF_STV_Shift));
}
-unsigned MCELF::GetVisibility(MCSymbolData &SD) {
+unsigned MCELF::GetVisibility(const MCSymbolData &SD) {
unsigned Visibility =
(SD.getFlags() & (0x3 << ELF_STV_Shift)) >> ELF_STV_Shift;
assert(Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_INTERNAL ||
@@ -72,13 +72,13 @@ unsigned MCELF::GetVisibility(MCSymbolData &SD) {
// Other is stored in the last six bits of st_other
// st_other values are stored in the second byte of get/setFlags
void MCELF::setOther(MCSymbolData &SD, unsigned Other) {
- uint32_t OtherFlags = SD.getFlags() & ~(0x3f << ELF_Other_Shift);
- SD.setFlags(OtherFlags | (Other << ELF_Other_Shift));
+ uint32_t OtherFlags = SD.getFlags() & ~(0x3f << ELF_STO_Shift);
+ SD.setFlags(OtherFlags | (Other << ELF_STO_Shift));
}
-unsigned MCELF::getOther(MCSymbolData &SD) {
+unsigned MCELF::getOther(const MCSymbolData &SD) {
unsigned Other =
- (SD.getFlags() & (0x3f << ELF_Other_Shift)) >> ELF_Other_Shift;
+ (SD.getFlags() & (0x3f << ELF_STO_Shift)) >> ELF_STO_Shift;
return Other;
}
diff --git a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
index 0c39e4a..84176dc 100644
--- a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
+++ b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
@@ -24,38 +24,7 @@ MCELFObjectTargetWriter::MCELFObjectTargetWriter(bool Is64Bit_,
IsN64(IsN64_){
}
-const MCSymbol *MCELFObjectTargetWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- return NULL;
-}
-
-const MCSymbol *MCELFObjectTargetWriter::undefinedExplicitRelSym(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- const MCSymbol &Symbol = Target.getSymA()->getSymbol();
- return &Symbol.AliasedSymbol();
-}
-
-// ELF doesn't require relocations to be in any order. We sort by the r_offset,
-// just to match gnu as for easier comparison. The use type and index is an
-// arbitrary way of making the sort deterministic.
-static int cmpRel(const ELFRelocationEntry *AP, const ELFRelocationEntry *BP) {
- const ELFRelocationEntry &A = *AP;
- const ELFRelocationEntry &B = *BP;
- if (A.r_offset != B.r_offset)
- return B.r_offset - A.r_offset;
- if (B.Type != A.Type)
- return A.Type - B.Type;
- if (B.Index != A.Index)
- return B.Index - A.Index;
- llvm_unreachable("ELFRelocs might be unstable!");
-}
-
-void
-MCELFObjectTargetWriter::sortRelocs(const MCAssembler &Asm,
- std::vector<ELFRelocationEntry> &Relocs) {
- array_pod_sort(Relocs.begin(), Relocs.end(), cmpRel);
+bool MCELFObjectTargetWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const {
+ return false;
}
diff --git a/contrib/llvm/lib/MC/MCELFStreamer.cpp b/contrib/llvm/lib/MC/MCELFStreamer.cpp
index e806cb9..7c70540 100644
--- a/contrib/llvm/lib/MC/MCELFStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCELFStreamer.cpp
@@ -12,16 +12,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCELFStreamer.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/MC/MCAssembler.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELF.h"
#include "llvm/MC/MCELFSymbolFlags.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionELF.h"
@@ -34,50 +35,22 @@
using namespace llvm;
-
-inline void MCELFStreamer::SetSection(StringRef Section, unsigned Type,
- unsigned Flags, SectionKind Kind) {
- SwitchSection(getContext().getELFSection(Section, Type, Flags, Kind));
-}
-
-inline void MCELFStreamer::SetSectionData() {
- SetSection(".data",
- ELF::SHT_PROGBITS,
- ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getDataRel());
- EmitCodeAlignment(4, 0);
-}
-
-inline void MCELFStreamer::SetSectionText() {
- SetSection(".text",
- ELF::SHT_PROGBITS,
- ELF::SHF_EXECINSTR | ELF::SHF_ALLOC,
- SectionKind::getText());
- EmitCodeAlignment(4, 0);
-}
-
-inline void MCELFStreamer::SetSectionBss() {
- SetSection(".bss",
- ELF::SHT_NOBITS,
- ELF::SHF_WRITE | ELF::SHF_ALLOC,
- SectionKind::getBSS());
- EmitCodeAlignment(4, 0);
-}
-
MCELFStreamer::~MCELFStreamer() {
}
-void MCELFStreamer::InitToTextSection() {
- SetSectionText();
-}
-
void MCELFStreamer::InitSections() {
// This emulates the same behavior of GNU as. This makes it easier
// to compare the output as the major sections are in the same order.
- SetSectionText();
- SetSectionData();
- SetSectionBss();
- SetSectionText();
+ SwitchSection(getContext().getObjectFileInfo()->getTextSection());
+ EmitCodeAlignment(4);
+
+ SwitchSection(getContext().getObjectFileInfo()->getDataSection());
+ EmitCodeAlignment(4);
+
+ SwitchSection(getContext().getObjectFileInfo()->getBSSSection());
+ EmitCodeAlignment(4);
+
+ SwitchSection(getContext().getObjectFileInfo()->getTextSection());
}
void MCELFStreamer::EmitLabel(MCSymbol *Symbol) {
@@ -92,10 +65,6 @@ void MCELFStreamer::EmitLabel(MCSymbol *Symbol) {
MCELF::SetType(SD, ELF::STT_TLS);
}
-void MCELFStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
-}
-
void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
// Let the target do whatever target specific stuff it needs to do.
getAssembler().getBackend().handleAssemblerFlag(Flag);
@@ -126,9 +95,8 @@ void MCELFStreamer::ChangeSection(const MCSection *Section,
void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
getAssembler().getOrCreateSymbolData(*Symbol);
- MCSymbolData &AliasSD = getAssembler().getOrCreateSymbolData(*Alias);
- AliasSD.setFlags(AliasSD.getFlags() | ELF_Other_Weakref);
- const MCExpr *Value = MCSymbolRefExpr::Create(Symbol, getContext());
+ const MCExpr *Value = MCSymbolRefExpr::Create(
+ Symbol, MCSymbolRefExpr::VK_WEAKREF, getContext());
Alias->setVariableValue(Value);
}
@@ -303,11 +271,12 @@ void MCELFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
EmitCommonSymbol(Symbol, Size, ByteAlignment);
}
-void MCELFStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size) {
+void MCELFStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) {
if (getCurrentSectionData()->isBundleLocked())
report_fatal_error("Emitting values inside a locked bundle is forbidden");
fixSymbolsInTLSFixups(Value);
- MCObjectStreamer::EmitValueImpl(Value, Size);
+ MCObjectStreamer::EmitValueImpl(Value, Size, Loc);
}
void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
@@ -371,9 +340,6 @@ void MCELFStreamer::fixSymbolsInTLSFixups(const MCExpr *expr) {
case MCSymbolRefExpr::VK_TLSLDM:
case MCSymbolRefExpr::VK_TPOFF:
case MCSymbolRefExpr::VK_DTPOFF:
- case MCSymbolRefExpr::VK_ARM_TLSGD:
- case MCSymbolRefExpr::VK_ARM_TPOFF:
- case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
case MCSymbolRefExpr::VK_Mips_TLSGD:
case MCSymbolRefExpr::VK_Mips_GOTTPREL:
case MCSymbolRefExpr::VK_Mips_TPREL_HI:
@@ -427,20 +393,22 @@ void MCELFStreamer::fixSymbolsInTLSFixups(const MCExpr *expr) {
}
}
-void MCELFStreamer::EmitInstToFragment(const MCInst &Inst) {
- this->MCObjectStreamer::EmitInstToFragment(Inst);
+void MCELFStreamer::EmitInstToFragment(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
+ this->MCObjectStreamer::EmitInstToFragment(Inst, STI);
MCRelaxableFragment &F = *cast<MCRelaxableFragment>(getCurrentFragment());
for (unsigned i = 0, e = F.getFixups().size(); i != e; ++i)
fixSymbolsInTLSFixups(F.getFixups()[i].getValue());
}
-void MCELFStreamer::EmitInstToData(const MCInst &Inst) {
+void MCELFStreamer::EmitInstToData(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
MCAssembler &Assembler = getAssembler();
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- Assembler.getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ Assembler.getEmitter().EncodeInstruction(Inst, VecOS, Fixups, STI);
VecOS.flush();
for (unsigned i = 0, e = Fixups.size(); i != e; ++i)
@@ -566,19 +534,17 @@ void MCELFStreamer::Flush() {
}
void MCELFStreamer::FinishImpl() {
- EmitFrames(NULL, true);
+ EmitFrames(nullptr);
Flush();
this->MCObjectStreamer::FinishImpl();
}
-MCStreamer *llvm::createELFStreamer(MCContext &Context,
- MCTargetStreamer *Streamer,
- MCAsmBackend &MAB, raw_ostream &OS,
- MCCodeEmitter *CE, bool RelaxAll,
- bool NoExecStack) {
- MCELFStreamer *S = new MCELFStreamer(Context, Streamer, MAB, OS, CE);
+MCStreamer *llvm::createELFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll, bool NoExecStack) {
+ MCELFStreamer *S = new MCELFStreamer(Context, MAB, OS, CE);
if (RelaxAll)
S->getAssembler().setRelaxAll(true);
if (NoExecStack)
@@ -590,10 +556,6 @@ void MCELFStreamer::EmitThumbFunc(MCSymbol *Func) {
llvm_unreachable("Generic ELF doesn't support this directive");
}
-MCSymbolData &MCELFStreamer::getOrCreateSymbolData(MCSymbol *Symbol) {
- return getAssembler().getOrCreateSymbolData(*Symbol);
-}
-
void MCELFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
llvm_unreachable("ELF doesn't support this directive");
}
diff --git a/contrib/llvm/lib/MC/MCExpr.cpp b/contrib/llvm/lib/MC/MCExpr.cpp
index c777e64..f724716 100644
--- a/contrib/llvm/lib/MC/MCExpr.cpp
+++ b/contrib/llvm/lib/MC/MCExpr.cpp
@@ -7,10 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mcexpr"
#include "llvm/MC/MCExpr.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
@@ -22,6 +22,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "mcexpr"
+
namespace {
namespace stats {
STATISTIC(MCExprEvaluate, "Number of MCExpr evaluations");
@@ -47,19 +49,12 @@ void MCExpr::print(raw_ostream &OS) const {
else
OS << Sym;
- if (SRE.getKind() == MCSymbolRefExpr::VK_ARM_NONE ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_PLT ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_TLSGD ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOT ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTOFF ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_TPOFF ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTTPOFF ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET1 ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET2 ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_PREL31)
- OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
- else if (SRE.getKind() != MCSymbolRefExpr::VK_None)
- OS << '@' << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
+ if (SRE.getKind() != MCSymbolRefExpr::VK_None) {
+ if (SRE.getMCAsmInfo().useParensForSymbolVariant())
+ OS << '(' << MCSymbolRefExpr::getVariantKindName(SRE.getKind()) << ')';
+ else
+ OS << '@' << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
+ }
return;
}
@@ -158,7 +153,7 @@ const MCConstantExpr *MCConstantExpr::Create(int64_t Value, MCContext &Ctx) {
const MCSymbolRefExpr *MCSymbolRefExpr::Create(const MCSymbol *Sym,
VariantKind Kind,
MCContext &Ctx) {
- return new (Ctx) MCSymbolRefExpr(Sym, Kind);
+ return new (Ctx) MCSymbolRefExpr(Sym, Kind, Ctx.getAsmInfo());
}
const MCSymbolRefExpr *MCSymbolRefExpr::Create(StringRef Name, VariantKind Kind,
@@ -185,17 +180,22 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_TPOFF: return "TPOFF";
case VK_DTPOFF: return "DTPOFF";
case VK_TLVP: return "TLVP";
+ case VK_TLVPPAGE: return "TLVPPAGE";
+ case VK_TLVPPAGEOFF: return "TLVPPAGEOFF";
+ case VK_PAGE: return "PAGE";
+ case VK_PAGEOFF: return "PAGEOFF";
+ case VK_GOTPAGE: return "GOTPAGE";
+ case VK_GOTPAGEOFF: return "GOTPAGEOFF";
case VK_SECREL: return "SECREL32";
- case VK_ARM_NONE: return "(NONE)";
- case VK_ARM_PLT: return "(PLT)";
- case VK_ARM_GOT: return "(GOT)";
- case VK_ARM_GOTOFF: return "(GOTOFF)";
- case VK_ARM_TPOFF: return "(tpoff)";
- case VK_ARM_GOTTPOFF: return "(gottpoff)";
- case VK_ARM_TLSGD: return "(tlsgd)";
- case VK_ARM_TARGET1: return "(target1)";
- case VK_ARM_TARGET2: return "(target2)";
- case VK_ARM_PREL31: return "(prel31)";
+ case VK_WEAKREF: return "WEAKREF";
+ case VK_ARM_NONE: return "none";
+ case VK_ARM_TARGET1: return "target1";
+ case VK_ARM_TARGET2: return "target2";
+ case VK_ARM_PREL31: return "prel31";
+ case VK_ARM_TLSLDO: return "tlsldo";
+ case VK_ARM_TLSCALL: return "tlscall";
+ case VK_ARM_TLSDESC: return "tlsdesc";
+ case VK_ARM_TLSDESCSEQ: return "tlsdescseq";
case VK_PPC_LO: return "l";
case VK_PPC_HI: return "h";
case VK_PPC_HA: return "ha";
@@ -271,6 +271,8 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_Mips_GOT_LO16: return "GOT_LO16";
case VK_Mips_CALL_HI16: return "CALL_HI16";
case VK_Mips_CALL_LO16: return "CALL_LO16";
+ case VK_Mips_PCREL_HI16: return "PCREL_HI16";
+ case VK_Mips_PCREL_LO16: return "PCREL_LO16";
case VK_COFF_IMGREL32: return "IMGREL32";
}
llvm_unreachable("Invalid variant kind");
@@ -285,6 +287,8 @@ MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
.Case("gotoff", VK_GOTOFF)
.Case("GOTPCREL", VK_GOTPCREL)
.Case("gotpcrel", VK_GOTPCREL)
+ .Case("GOT_PREL", VK_GOTPCREL)
+ .Case("got_prel", VK_GOTPCREL)
.Case("GOTTPOFF", VK_GOTTPOFF)
.Case("gottpoff", VK_GOTTPOFF)
.Case("INDNTPOFF", VK_INDNTPOFF)
@@ -307,6 +311,18 @@ MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
.Case("dtpoff", VK_DTPOFF)
.Case("TLVP", VK_TLVP)
.Case("tlvp", VK_TLVP)
+ .Case("TLVPPAGE", VK_TLVPPAGE)
+ .Case("tlvppage", VK_TLVPPAGE)
+ .Case("TLVPPAGEOFF", VK_TLVPPAGEOFF)
+ .Case("tlvppageoff", VK_TLVPPAGEOFF)
+ .Case("PAGE", VK_PAGE)
+ .Case("page", VK_PAGE)
+ .Case("PAGEOFF", VK_PAGEOFF)
+ .Case("pageoff", VK_PAGEOFF)
+ .Case("GOTPAGE", VK_GOTPAGE)
+ .Case("gotpage", VK_GOTPAGE)
+ .Case("GOTPAGEOFF", VK_GOTPAGEOFF)
+ .Case("gotpageoff", VK_GOTPAGEOFF)
.Case("IMGREL", VK_COFF_IMGREL32)
.Case("imgrel", VK_COFF_IMGREL32)
.Case("SECREL32", VK_SECREL)
@@ -409,6 +425,20 @@ MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
.Case("got@tlsld@h", VK_PPC_GOT_TLSLD_HI)
.Case("GOT@TLSLD@HA", VK_PPC_GOT_TLSLD_HA)
.Case("got@tlsld@ha", VK_PPC_GOT_TLSLD_HA)
+ .Case("NONE", VK_ARM_NONE)
+ .Case("none", VK_ARM_NONE)
+ .Case("TARGET1", VK_ARM_TARGET1)
+ .Case("target1", VK_ARM_TARGET1)
+ .Case("TARGET2", VK_ARM_TARGET2)
+ .Case("target2", VK_ARM_TARGET2)
+ .Case("PREL31", VK_ARM_PREL31)
+ .Case("prel31", VK_ARM_PREL31)
+ .Case("TLSLDO", VK_ARM_TLSLDO)
+ .Case("tlsldo", VK_ARM_TLSLDO)
+ .Case("TLSCALL", VK_ARM_TLSCALL)
+ .Case("tlscall", VK_ARM_TLSCALL)
+ .Case("TLSDESC", VK_ARM_TLSDESC)
+ .Case("tlsdesc", VK_ARM_TLSDESC)
.Default(VK_Invalid);
}
@@ -419,12 +449,12 @@ void MCTargetExpr::anchor() {}
/* *** */
bool MCExpr::EvaluateAsAbsolute(int64_t &Res) const {
- return EvaluateAsAbsolute(Res, 0, 0, 0);
+ return EvaluateAsAbsolute(Res, nullptr, nullptr, nullptr);
}
bool MCExpr::EvaluateAsAbsolute(int64_t &Res,
const MCAsmLayout &Layout) const {
- return EvaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, 0);
+ return EvaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, nullptr);
}
bool MCExpr::EvaluateAsAbsolute(int64_t &Res,
@@ -434,7 +464,7 @@ bool MCExpr::EvaluateAsAbsolute(int64_t &Res,
}
bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const {
- return EvaluateAsAbsolute(Res, &Asm, 0, 0);
+ return EvaluateAsAbsolute(Res, &Asm, nullptr, nullptr);
}
bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
@@ -452,7 +482,8 @@ bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
// absolutize differences across sections and that is what the MachO writer
// uses Addrs for.
bool IsRelocatable =
- EvaluateAsRelocatableImpl(Value, Asm, Layout, Addrs, /*InSet*/ Addrs);
+ EvaluateAsRelocatableImpl(Value, Asm, Layout, Addrs, /*InSet*/ Addrs,
+ /*ForceVarExpansion*/ false);
// Record the current value.
Res = Value.getConstant();
@@ -480,8 +511,8 @@ static void AttemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
if (!Asm->getWriter().IsSymbolRefDifferenceFullyResolved(*Asm, A, B, InSet))
return;
- MCSymbolData &AD = Asm->getSymbolData(SA);
- MCSymbolData &BD = Asm->getSymbolData(SB);
+ const MCSymbolData &AD = Asm->getSymbolData(SA);
+ const MCSymbolData &BD = Asm->getSymbolData(SB);
if (AD.getFragment() == BD.getFragment()) {
Addend += (AD.getOffset() - BD.getOffset());
@@ -493,7 +524,7 @@ static void AttemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
// Clear the symbol expr pointers to indicate we have folded these
// operands.
- A = B = 0;
+ A = B = nullptr;
return;
}
@@ -519,7 +550,7 @@ static void AttemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
// Clear the symbol expr pointers to indicate we have folded these
// operands.
- A = B = 0;
+ A = B = nullptr;
}
/// \brief Evaluate the result of an add between (conceptually) two MCValues.
@@ -601,16 +632,22 @@ static bool EvaluateSymbolicAdd(const MCAssembler *Asm,
}
bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
- const MCAsmLayout &Layout) const {
- return EvaluateAsRelocatableImpl(Res, &Layout.getAssembler(), &Layout,
- 0, false);
+ const MCAsmLayout *Layout) const {
+ MCAssembler *Assembler = Layout ? &Layout->getAssembler() : nullptr;
+ return EvaluateAsRelocatableImpl(Res, Assembler, Layout, nullptr, false,
+ /*ForceVarExpansion*/ false);
}
-bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAssembler *Asm,
+bool MCExpr::EvaluateAsValue(MCValue &Res, const MCAsmLayout *Layout) const {
+ MCAssembler *Assembler = Layout ? &Layout->getAssembler() : nullptr;
+ return EvaluateAsRelocatableImpl(Res, Assembler, Layout, nullptr, false,
+ /*ForceVarExpansion*/ true);
+}
+
+bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
- const SectionAddrMap *Addrs,
- bool InSet) const {
+ const SectionAddrMap *Addrs, bool InSet,
+ bool ForceVarExpansion) const {
++stats::MCExprEvaluate;
switch (getKind()) {
@@ -624,20 +661,35 @@ bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
case SymbolRef: {
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(this);
const MCSymbol &Sym = SRE->getSymbol();
+ const MCAsmInfo &MCAsmInfo = SRE->getMCAsmInfo();
// Evaluate recursively if this is a variable.
if (Sym.isVariable() && SRE->getKind() == MCSymbolRefExpr::VK_None) {
- bool Ret = Sym.getVariableValue()->EvaluateAsRelocatableImpl(Res, Asm,
- Layout,
- Addrs,
- true);
- // If we failed to simplify this to a constant, let the target
- // handle it.
- if (Ret && !Res.getSymA() && !Res.getSymB())
- return true;
+ if (Sym.getVariableValue()->EvaluateAsRelocatableImpl(
+ Res, Asm, Layout, Addrs, true, ForceVarExpansion)) {
+ const MCSymbolRefExpr *A = Res.getSymA();
+ const MCSymbolRefExpr *B = Res.getSymB();
+
+ if (MCAsmInfo.hasSubsectionsViaSymbols()) {
+ // FIXME: This is small hack. Given
+ // a = b + 4
+ // .long a
+ // the OS X assembler will completely drop the 4. We should probably
+ // include it in the relocation or produce an error if that is not
+ // possible.
+ if (!A && !B)
+ return true;
+ } else {
+ if (ForceVarExpansion)
+ return true;
+ bool IsSymbol = A && A->getSymbol().isDefined();
+ if (!IsSymbol)
+ return true;
+ }
+ }
}
- Res = MCValue::get(SRE, 0, 0);
+ Res = MCValue::get(SRE, nullptr, 0);
return true;
}
@@ -645,8 +697,8 @@ bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
const MCUnaryExpr *AUE = cast<MCUnaryExpr>(this);
MCValue Value;
- if (!AUE->getSubExpr()->EvaluateAsRelocatableImpl(Value, Asm, Layout,
- Addrs, InSet))
+ if (!AUE->getSubExpr()->EvaluateAsRelocatableImpl(Value, Asm, Layout, Addrs,
+ InSet, ForceVarExpansion))
return false;
switch (AUE->getOpcode()) {
@@ -679,10 +731,10 @@ bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
const MCBinaryExpr *ABE = cast<MCBinaryExpr>(this);
MCValue LHSValue, RHSValue;
- if (!ABE->getLHS()->EvaluateAsRelocatableImpl(LHSValue, Asm, Layout,
- Addrs, InSet) ||
- !ABE->getRHS()->EvaluateAsRelocatableImpl(RHSValue, Asm, Layout,
- Addrs, InSet))
+ if (!ABE->getLHS()->EvaluateAsRelocatableImpl(LHSValue, Asm, Layout, Addrs,
+ InSet, ForceVarExpansion) ||
+ !ABE->getRHS()->EvaluateAsRelocatableImpl(RHSValue, Asm, Layout, Addrs,
+ InSet, ForceVarExpansion))
return false;
// We only support a few operations on non-constant expressions, handle
@@ -756,7 +808,7 @@ const MCSection *MCExpr::FindAssociatedSection() const {
if (Sym.isDefined())
return &Sym.getSection();
- return 0;
+ return nullptr;
}
case Unary:
diff --git a/contrib/llvm/lib/MC/MCExternalSymbolizer.cpp b/contrib/llvm/lib/MC/MCExternalSymbolizer.cpp
index ca368b2..7c3073a 100644
--- a/contrib/llvm/lib/MC/MCExternalSymbolizer.cpp
+++ b/contrib/llvm/lib/MC/MCExternalSymbolizer.cpp
@@ -43,8 +43,19 @@ bool MCExternalSymbolizer::tryAddingSymbolicOperand(MCInst &MI,
!GetOpInfo(DisInfo, Address, Offset, InstSize, 1, &SymbolicOp)) {
// Clear SymbolicOp.Value from above and also all other fields.
std::memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
- if (!SymbolLookUp)
+
+ // At this point, GetOpInfo() did not find any relocation information about
+ // this operand and we are left to use the SymbolLookUp() call back to guess
+ // if the Value is the address of a symbol. In the case this is a branch
+ // that always makes sense to guess. But in the case of an immediate it is
+ // a bit more questionable if it is an address of a symbol or some other
+ // reference. So if the immediate Value comes from a width of 1 byte,
+ // InstSize, we will not guess it is an address of a symbol. Because in
+ // object files assembled starting at address 0 this usually leads to
+ // incorrect symbolication.
+ if (!SymbolLookUp || (InstSize == 1 && !IsBranch))
return false;
+
uint64_t ReferenceType;
if (IsBranch)
ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
@@ -56,6 +67,9 @@ bool MCExternalSymbolizer::tryAddingSymbolicOperand(MCInst &MI,
if (Name) {
SymbolicOp.AddSymbol.Name = Name;
SymbolicOp.AddSymbol.Present = true;
+ // If Name is a C++ symbol name put the human readable name in a comment.
+ if(ReferenceType == LLVMDisassembler_ReferenceType_DeMangled_Name)
+ cStream << ReferenceName;
}
// For branches always create an MCExpr so it gets printed as hex address.
else if (IsBranch) {
@@ -69,7 +83,7 @@ bool MCExternalSymbolizer::tryAddingSymbolicOperand(MCInst &MI,
return false;
}
- const MCExpr *Add = NULL;
+ const MCExpr *Add = nullptr;
if (SymbolicOp.AddSymbol.Present) {
if (SymbolicOp.AddSymbol.Name) {
StringRef Name(SymbolicOp.AddSymbol.Name);
@@ -80,7 +94,7 @@ bool MCExternalSymbolizer::tryAddingSymbolicOperand(MCInst &MI,
}
}
- const MCExpr *Sub = NULL;
+ const MCExpr *Sub = nullptr;
if (SymbolicOp.SubtractSymbol.Present) {
if (SymbolicOp.SubtractSymbol.Name) {
StringRef Name(SymbolicOp.SubtractSymbol.Name);
@@ -91,7 +105,7 @@ bool MCExternalSymbolizer::tryAddingSymbolicOperand(MCInst &MI,
}
}
- const MCExpr *Off = NULL;
+ const MCExpr *Off = nullptr;
if (SymbolicOp.Value != 0)
Off = MCConstantExpr::Create(SymbolicOp.Value, Ctx);
@@ -102,17 +116,17 @@ bool MCExternalSymbolizer::tryAddingSymbolicOperand(MCInst &MI,
LHS = MCBinaryExpr::CreateSub(Add, Sub, Ctx);
else
LHS = MCUnaryExpr::CreateMinus(Sub, Ctx);
- if (Off != 0)
+ if (Off)
Expr = MCBinaryExpr::CreateAdd(LHS, Off, Ctx);
else
Expr = LHS;
} else if (Add) {
- if (Off != 0)
+ if (Off)
Expr = MCBinaryExpr::CreateAdd(Add, Off, Ctx);
else
Expr = Add;
} else {
- if (Off != 0)
+ if (Off)
Expr = Off;
else
Expr = MCConstantExpr::Create(0, Ctx);
@@ -146,8 +160,11 @@ void MCExternalSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
if(ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr)
cStream << "literal pool symbol address: " << ReferenceName;
else if(ReferenceType ==
- LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr)
- cStream << "literal pool for: \"" << ReferenceName << "\"";
+ LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr) {
+ cStream << "literal pool for: \"";
+ cStream.write_escaped(ReferenceName);
+ cStream << "\"";
+ }
else if(ReferenceType ==
LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref)
cStream << "Objc cfstring ref: @\"" << ReferenceName << "\"";
@@ -172,10 +189,10 @@ MCSymbolizer *createMCSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
void *DisInfo,
MCContext *Ctx,
MCRelocationInfo *RelInfo) {
- assert(Ctx != 0 && "No MCContext given for symbolic disassembly");
+ assert(Ctx && "No MCContext given for symbolic disassembly");
- OwningPtr<MCRelocationInfo> RelInfoOwingPtr(RelInfo);
- return new MCExternalSymbolizer(*Ctx, RelInfoOwingPtr, GetOpInfo,
- SymbolLookUp, DisInfo);
+ return new MCExternalSymbolizer(*Ctx,
+ std::unique_ptr<MCRelocationInfo>(RelInfo),
+ GetOpInfo, SymbolLookUp, DisInfo);
}
}
diff --git a/contrib/llvm/lib/MC/MCInst.cpp b/contrib/llvm/lib/MC/MCInst.cpp
index 124cc14..d7b80f5 100644
--- a/contrib/llvm/lib/MC/MCInst.cpp
+++ b/contrib/llvm/lib/MC/MCInst.cpp
@@ -34,7 +34,7 @@ void MCOperand::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCOperand::dump() const {
- print(dbgs(), 0);
+ print(dbgs(), nullptr);
dbgs() << "\n";
}
#endif
@@ -66,7 +66,7 @@ void MCInst::dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI,
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCInst::dump() const {
- print(dbgs(), 0);
+ print(dbgs(), nullptr);
dbgs() << "\n";
}
#endif
diff --git a/contrib/llvm/lib/MC/MCLinkerOptimizationHint.cpp b/contrib/llvm/lib/MC/MCLinkerOptimizationHint.cpp
new file mode 100644
index 0000000..3f8d620
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCLinkerOptimizationHint.cpp
@@ -0,0 +1,35 @@
+//===-- llvm/MC/MCLinkerOptimizationHint.cpp ----- LOH handling -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCLinkerOptimizationHint.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/MC/MCStreamer.h"
+
+using namespace llvm;
+
+// Each LOH is composed by, in this order (each field is encoded using ULEB128):
+// - Its kind.
+// - Its number of arguments (let say N).
+// - Its arg1.
+// - ...
+// - Its argN.
+// <arg1> to <argN> are absolute addresses in the object file, i.e.,
+// relative addresses from the beginning of the object file.
+void MCLOHDirective::Emit_impl(raw_ostream &OutStream,
+ const MachObjectWriter &ObjWriter,
+ const MCAsmLayout &Layout) const {
+ const MCAssembler &Asm = Layout.getAssembler();
+ encodeULEB128(Kind, OutStream);
+ encodeULEB128(Args.size(), OutStream);
+ for (LOHArgs::const_iterator It = Args.begin(), EndIt = Args.end();
+ It != EndIt; ++It)
+ encodeULEB128(ObjWriter.getSymbolAddress(&Asm.getSymbolData(**It), Layout),
+ OutStream);
+}
diff --git a/contrib/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
index 2924dcd..9e8bc94 100644
--- a/contrib/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
@@ -8,6 +8,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCStreamer.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -15,7 +17,9 @@
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
#include "llvm/MC/MCMachOSymbolFlags.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionMachO.h"
@@ -30,79 +34,96 @@ namespace {
class MCMachOStreamer : public MCObjectStreamer {
private:
- virtual void EmitInstToData(const MCInst &Inst);
+ /// LabelSections - true if each section change should emit a linker local
+ /// label for use in relocations for assembler local references. Obviates the
+ /// need for local relocations. False by default.
+ bool LabelSections;
+
+ /// HasSectionLabel - map of which sections have already had a non-local
+ /// label emitted to them. Used so we don't emit extraneous linker local
+ /// labels in the middle of the section.
+ DenseMap<const MCSection*, bool> HasSectionLabel;
+
+ void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &STI) override;
void EmitDataRegion(DataRegionData::KindTy Kind);
void EmitDataRegionEnd();
+
public:
MCMachOStreamer(MCContext &Context, MCAsmBackend &MAB, raw_ostream &OS,
- MCCodeEmitter *Emitter)
- : MCObjectStreamer(Context, 0, MAB, OS, Emitter) {}
+ MCCodeEmitter *Emitter, bool label)
+ : MCObjectStreamer(Context, MAB, OS, Emitter),
+ LabelSections(label) {}
/// @name MCStreamer Interface
/// @{
- virtual void InitSections();
- virtual void InitToTextSection();
- virtual void EmitLabel(MCSymbol *Symbol);
- virtual void EmitDebugLabel(MCSymbol *Symbol);
- virtual void EmitEHSymAttributes(const MCSymbol *Symbol,
- MCSymbol *EHSymbol);
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
- virtual void EmitLinkerOptions(ArrayRef<std::string> Options);
- virtual void EmitDataRegion(MCDataRegionType Kind);
- virtual void EmitThumbFunc(MCSymbol *Func);
- virtual bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ void ChangeSection(const MCSection *Sect, const MCExpr *Subsect) override;
+ void EmitLabel(MCSymbol *Symbol) override;
+ void EmitEHSymAttributes(const MCSymbol *Symbol, MCSymbol *EHSymbol) override;
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+ void EmitLinkerOptions(ArrayRef<std::string> Options) override;
+ void EmitDataRegion(MCDataRegionType Kind) override;
+ void EmitVersionMin(MCVersionMinType Kind, unsigned Major,
+ unsigned Minor, unsigned Update) override;
+ void EmitThumbFunc(MCSymbol *Func) override;
+ bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+ void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void BeginCOFFSymbolDef(const MCSymbol *Symbol) override {
llvm_unreachable("macho doesn't support this directive");
}
- virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
+ void EmitCOFFSymbolStorageClass(int StorageClass) override {
llvm_unreachable("macho doesn't support this directive");
}
- virtual void EmitCOFFSymbolType(int Type) {
+ void EmitCOFFSymbolType(int Type) override {
llvm_unreachable("macho doesn't support this directive");
}
- virtual void EndCOFFSymbolDef() {
+ void EndCOFFSymbolDef() override {
llvm_unreachable("macho doesn't support this directive");
}
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) override {
llvm_unreachable("macho doesn't support this directive");
}
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- uint64_t Size = 0, unsigned ByteAlignment = 0);
+ void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = nullptr,
+ uint64_t Size = 0, unsigned ByteAlignment = 0) override;
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0);
+ uint64_t Size, unsigned ByteAlignment = 0) override;
- virtual void EmitFileDirective(StringRef Filename) {
+ void EmitFileDirective(StringRef Filename) override {
// FIXME: Just ignore the .file; it isn't important enough to fail the
// entire assembly.
// report_fatal_error("unsupported directive: '.file'");
}
- virtual void EmitIdent(StringRef IdentString) {
+ void EmitIdent(StringRef IdentString) override {
llvm_unreachable("macho doesn't support this directive");
}
- virtual void FinishImpl();
+ void EmitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) override {
+ getAssembler().getLOHContainer().addDirective(Kind, Args);
+ }
+
+ void FinishImpl() override;
};
} // end anonymous namespace.
-void MCMachOStreamer::InitSections() {
- InitToTextSection();
-}
-
-void MCMachOStreamer::InitToTextSection() {
- SwitchSection(getContext().getMachOSection(
- "__TEXT", "__text",
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS, 0,
- SectionKind::getText()));
+void MCMachOStreamer::ChangeSection(const MCSection *Section,
+ const MCExpr *Subsection) {
+ // Change the section normally.
+ MCObjectStreamer::ChangeSection(Section, Subsection);
+ // Output a linker-local symbol so we don't need section-relative local
+ // relocations. The linker hates us when we do that.
+ if (LabelSections && !HasSectionLabel[Section]) {
+ MCSymbol *Label = getContext().CreateLinkerPrivateTempSymbol();
+ EmitLabel(Label);
+ HasSectionLabel[Section] = true;
+ }
}
void MCMachOStreamer::EmitEHSymAttributes(const MCSymbol *Symbol,
@@ -140,9 +161,6 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
SD.setFlags(SD.getFlags() & ~SF_ReferenceTypeMask);
}
-void MCMachOStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
-}
void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) {
if (!getAssembler().getBackend().hasDataInCodeSupport())
return;
@@ -150,7 +168,7 @@ void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) {
MCSymbol *Start = getContext().CreateTempSymbol();
EmitLabel(Start);
// Record the region for the object writer to use.
- DataRegionData Data = { Kind, Start, NULL };
+ DataRegionData Data = { Kind, Start, nullptr };
std::vector<DataRegionData> &Regions = getAssembler().getDataRegions();
Regions.push_back(Data);
}
@@ -161,7 +179,7 @@ void MCMachOStreamer::EmitDataRegionEnd() {
std::vector<DataRegionData> &Regions = getAssembler().getDataRegions();
assert(Regions.size() && "Mismatched .end_data_region!");
DataRegionData &Data = Regions.back();
- assert(Data.End == NULL && "Mismatched .end_data_region!");
+ assert(!Data.End && "Mismatched .end_data_region!");
// Create a temporary label to mark the end of the data region.
Data.End = getContext().CreateTempSymbol();
EmitLabel(Data.End);
@@ -206,14 +224,15 @@ void MCMachOStreamer::EmitDataRegion(MCDataRegionType Kind) {
}
}
+void MCMachOStreamer::EmitVersionMin(MCVersionMinType Kind, unsigned Major,
+ unsigned Minor, unsigned Update) {
+ getAssembler().setVersionMinInfo(Kind, Major, Minor, Update);
+}
+
void MCMachOStreamer::EmitThumbFunc(MCSymbol *Symbol) {
// Remember that the function is a thumb function. Fixup and relocation
// values will need adjusted.
getAssembler().setIsThumbFunc(Symbol);
-
- // Mark the thumb bit on the symbol.
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- SD.setFlags(SD.getFlags() | SF_ThumbFunc);
}
bool MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
@@ -325,7 +344,7 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
// FIXME: Darwin 'as' does appear to allow redef of a .comm by itself.
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- AssignSection(Symbol, NULL);
+ AssignSection(Symbol, nullptr);
MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
SD.setExternal(true);
@@ -335,9 +354,7 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
void MCMachOStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {
// '.lcomm' is equivalent to '.zerofill'.
- return EmitZerofill(getContext().getMachOSection("__DATA", "__bss",
- MCSectionMachO::S_ZEROFILL,
- 0, SectionKind::getBSS()),
+ return EmitZerofill(getContext().getObjectFileInfo()->getDataBSSSection(),
Symbol, Size, ByteAlignment);
}
@@ -378,13 +395,14 @@ void MCMachOStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
return;
}
-void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
+void MCMachOStreamer::EmitInstToData(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
MCDataFragment *DF = getOrCreateDataFragment();
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups, STI);
VecOS.flush();
// Add the fixups and data.
@@ -396,7 +414,7 @@ void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
}
void MCMachOStreamer::FinishImpl() {
- EmitFrames(&getAssembler().getBackend(), true);
+ EmitFrames(&getAssembler().getBackend());
// We have to set the fragment atom associations so we can relax properly for
// Mach-O.
@@ -404,13 +422,12 @@ void MCMachOStreamer::FinishImpl() {
// First, scan the symbol table to build a lookup table from fragments to
// defining symbols.
DenseMap<const MCFragment*, MCSymbolData*> DefiningSymbolMap;
- for (MCAssembler::symbol_iterator it = getAssembler().symbol_begin(),
- ie = getAssembler().symbol_end(); it != ie; ++it) {
- if (getAssembler().isSymbolLinkerVisible(it->getSymbol()) &&
- it->getFragment()) {
+ for (MCSymbolData &SD : getAssembler().symbols()) {
+ if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()) &&
+ SD.getFragment()) {
// An atom defining symbol should never be internal to a fragment.
- assert(it->getOffset() == 0 && "Invalid offset in atom defining symbol!");
- DefiningSymbolMap[it->getFragment()] = it;
+ assert(SD.getOffset() == 0 && "Invalid offset in atom defining symbol!");
+ DefiningSymbolMap[SD.getFragment()] = &SD;
}
}
@@ -418,7 +435,7 @@ void MCMachOStreamer::FinishImpl() {
// symbol.
for (MCAssembler::iterator it = getAssembler().begin(),
ie = getAssembler().end(); it != ie; ++it) {
- MCSymbolData *CurrentAtom = 0;
+ MCSymbolData *CurrentAtom = nullptr;
for (MCSectionData::iterator it2 = it->begin(),
ie2 = it->end(); it2 != ie2; ++it2) {
if (MCSymbolData *SD = DefiningSymbolMap.lookup(it2))
@@ -432,8 +449,9 @@ void MCMachOStreamer::FinishImpl() {
MCStreamer *llvm::createMachOStreamer(MCContext &Context, MCAsmBackend &MAB,
raw_ostream &OS, MCCodeEmitter *CE,
- bool RelaxAll) {
- MCMachOStreamer *S = new MCMachOStreamer(Context, MAB, OS, CE);
+ bool RelaxAll,
+ bool LabelSections) {
+ MCMachOStreamer *S = new MCMachOStreamer(Context, MAB, OS, CE, LabelSections);
if (RelaxAll)
S->getAssembler().setRelaxAll(true);
return S;
diff --git a/contrib/llvm/lib/MC/MCNullStreamer.cpp b/contrib/llvm/lib/MC/MCNullStreamer.cpp
index 9b9c4aa..d543402 100644
--- a/contrib/llvm/lib/MC/MCNullStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCNullStreamer.cpp
@@ -19,96 +19,22 @@ namespace {
class MCNullStreamer : public MCStreamer {
public:
- MCNullStreamer(MCContext &Context) : MCStreamer(Context, 0) {}
+ MCNullStreamer(MCContext &Context) : MCStreamer(Context) {}
/// @name MCStreamer Interface
/// @{
- virtual void InitToTextSection() {
- }
-
- virtual void InitSections() {
- }
-
- virtual void ChangeSection(const MCSection *Section,
- const MCExpr *Subsection) {
- }
-
- virtual void EmitLabel(MCSymbol *Symbol) {
- assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- assert(getCurrentSection().first &&"Cannot emit before setting section!");
- AssignSection(Symbol, getCurrentSection().first);
- }
- virtual void EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
- }
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {}
- virtual void EmitThumbFunc(MCSymbol *Func) {}
-
- virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {}
- virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol){}
- virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize) {}
-
- virtual bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute){
+ bool EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) override {
return true;
}
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
-
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {}
- virtual void EmitCOFFSymbolStorageClass(int StorageClass) {}
- virtual void EmitCOFFSymbolType(int Type) {}
- virtual void EndCOFFSymbolDef() {}
- virtual void EmitCOFFSecRel32(MCSymbol const *Symbol) {}
-
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {}
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {}
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- uint64_t Size = 0, unsigned ByteAlignment = 0) {}
- virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment) {}
- virtual void EmitBytes(StringRef Data) {}
-
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {}
- virtual void EmitULEB128Value(const MCExpr *Value) {}
- virtual void EmitSLEB128Value(const MCExpr *Value) {}
- virtual void EmitGPRel32Value(const MCExpr *Value) {}
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0) {}
-
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0) {}
-
- virtual bool EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0) { return false; }
-
- virtual void EmitFileDirective(StringRef Filename) {}
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
- StringRef Filename, unsigned CUID = 0) {
- return false;
- }
- virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
- unsigned Column, unsigned Flags,
- unsigned Isa, unsigned Discriminator,
- StringRef FileName) {}
- virtual void EmitInstruction(const MCInst &Inst) {}
-
- virtual void EmitBundleAlignMode(unsigned AlignPow2) {}
- virtual void EmitBundleLock(bool AlignToEnd) {}
- virtual void EmitBundleUnlock() {}
-
- virtual void FinishImpl() {}
-
- virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
- RecordProcEnd(Frame);
- }
+ void EmitCOFFSecRel32(MCSymbol const *Symbol) override {}
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override {}
+ void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = nullptr,
+ uint64_t Size = 0, unsigned ByteAlignment = 0) override {}
+ void EmitGPRel32Value(const MCExpr *Value) override {}
};
}
diff --git a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
index 799e061..d490ef3 100644
--- a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -8,7 +8,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionCOFF.h"
@@ -16,14 +18,38 @@
#include "llvm/MC/MCSectionMachO.h"
using namespace llvm;
+static bool useCompactUnwind(const Triple &T) {
+ // Only on darwin.
+ if (!T.isOSDarwin())
+ return false;
+
+ // aarch64 always has it.
+ if (T.getArch() == Triple::arm64 || T.getArch() == Triple::aarch64)
+ return true;
+
+ // Use it on newer version of OS X.
+ if (T.isMacOSX() && !T.isMacOSXVersionLT(10, 6))
+ return true;
+
+ // And the iOS simulator.
+ if (T.isiOS() &&
+ (T.getArch() == Triple::x86_64 || T.getArch() == Triple::x86))
+ return true;
+
+ return false;
+}
+
void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
// MachO
- IsFunctionEHFrameSymbolPrivate = false;
SupportsWeakOmittedEHFrame = false;
+ if (T.isOSDarwin() &&
+ (T.getArch() == Triple::arm64 || T.getArch() == Triple::aarch64))
+ SupportsCompactUnwindWithoutEHFrame = true;
+
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel
| dwarf::DW_EH_PE_sdata4;
- LSDAEncoding = FDEEncoding = FDECFIEncoding = dwarf::DW_EH_PE_pcrel;
+ LSDAEncoding = FDECFIEncoding = dwarf::DW_EH_PE_pcrel;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
@@ -33,61 +59,55 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
TextSection // .text
= Ctx->getMachOSection("__TEXT", "__text",
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
SectionKind::getText());
DataSection // .data
= Ctx->getMachOSection("__DATA", "__data", 0,
SectionKind::getDataRel());
// BSSSection might not be expected initialized on msvc.
- BSSSection = 0;
+ BSSSection = nullptr;
TLSDataSection // .tdata
= Ctx->getMachOSection("__DATA", "__thread_data",
- MCSectionMachO::S_THREAD_LOCAL_REGULAR,
+ MachO::S_THREAD_LOCAL_REGULAR,
SectionKind::getDataRel());
TLSBSSSection // .tbss
= Ctx->getMachOSection("__DATA", "__thread_bss",
- MCSectionMachO::S_THREAD_LOCAL_ZEROFILL,
+ MachO::S_THREAD_LOCAL_ZEROFILL,
SectionKind::getThreadBSS());
// TODO: Verify datarel below.
TLSTLVSection // .tlv
= Ctx->getMachOSection("__DATA", "__thread_vars",
- MCSectionMachO::S_THREAD_LOCAL_VARIABLES,
+ MachO::S_THREAD_LOCAL_VARIABLES,
SectionKind::getDataRel());
TLSThreadInitSection
= Ctx->getMachOSection("__DATA", "__thread_init",
- MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
+ MachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
SectionKind::getDataRel());
CStringSection // .cstring
= Ctx->getMachOSection("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS,
+ MachO::S_CSTRING_LITERALS,
SectionKind::getMergeable1ByteCString());
UStringSection
= Ctx->getMachOSection("__TEXT","__ustring", 0,
SectionKind::getMergeable2ByteCString());
FourByteConstantSection // .literal4
= Ctx->getMachOSection("__TEXT", "__literal4",
- MCSectionMachO::S_4BYTE_LITERALS,
+ MachO::S_4BYTE_LITERALS,
SectionKind::getMergeableConst4());
EightByteConstantSection // .literal8
= Ctx->getMachOSection("__TEXT", "__literal8",
- MCSectionMachO::S_8BYTE_LITERALS,
+ MachO::S_8BYTE_LITERALS,
SectionKind::getMergeableConst8());
- // ld_classic doesn't support .literal16 in 32-bit mode, and ld64 falls back
- // to using it in -static mode.
- SixteenByteConstantSection = 0;
- if (RelocM != Reloc::Static &&
- T.getArch() != Triple::x86_64 && T.getArch() != Triple::ppc64 &&
- T.getArch() != Triple::ppc64le)
- SixteenByteConstantSection = // .literal16
- Ctx->getMachOSection("__TEXT", "__literal16",
- MCSectionMachO::S_16BYTE_LITERALS,
- SectionKind::getMergeableConst16());
+ SixteenByteConstantSection // .literal16
+ = Ctx->getMachOSection("__TEXT", "__literal16",
+ MachO::S_16BYTE_LITERALS,
+ SectionKind::getMergeableConst16());
ReadOnlySection // .const
= Ctx->getMachOSection("__TEXT", "__const", 0,
@@ -95,36 +115,36 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
TextCoalSection
= Ctx->getMachOSection("__TEXT", "__textcoal_nt",
- MCSectionMachO::S_COALESCED |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_COALESCED |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
SectionKind::getText());
ConstTextCoalSection
= Ctx->getMachOSection("__TEXT", "__const_coal",
- MCSectionMachO::S_COALESCED,
+ MachO::S_COALESCED,
SectionKind::getReadOnly());
ConstDataSection // .const_data
= Ctx->getMachOSection("__DATA", "__const", 0,
SectionKind::getReadOnlyWithRel());
DataCoalSection
= Ctx->getMachOSection("__DATA","__datacoal_nt",
- MCSectionMachO::S_COALESCED,
+ MachO::S_COALESCED,
SectionKind::getDataRel());
DataCommonSection
= Ctx->getMachOSection("__DATA","__common",
- MCSectionMachO::S_ZEROFILL,
+ MachO::S_ZEROFILL,
SectionKind::getBSS());
DataBSSSection
- = Ctx->getMachOSection("__DATA","__bss", MCSectionMachO::S_ZEROFILL,
+ = Ctx->getMachOSection("__DATA","__bss", MachO::S_ZEROFILL,
SectionKind::getBSS());
LazySymbolPointerSection
= Ctx->getMachOSection("__DATA", "__la_symbol_ptr",
- MCSectionMachO::S_LAZY_SYMBOL_POINTERS,
+ MachO::S_LAZY_SYMBOL_POINTERS,
SectionKind::getMetadata());
NonLazySymbolPointerSection
= Ctx->getMachOSection("__DATA", "__nl_symbol_ptr",
- MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
+ MachO::S_NON_LAZY_SYMBOL_POINTERS,
SectionKind::getMetadata());
if (RelocM == Reloc::Static) {
@@ -137,11 +157,11 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
} else {
StaticCtorSection
= Ctx->getMachOSection("__DATA", "__mod_init_func",
- MCSectionMachO::S_MOD_INIT_FUNC_POINTERS,
+ MachO::S_MOD_INIT_FUNC_POINTERS,
SectionKind::getDataRel());
StaticDtorSection
= Ctx->getMachOSection("__DATA", "__mod_term_func",
- MCSectionMachO::S_MOD_TERM_FUNC_POINTERS,
+ MachO::S_MOD_TERM_FUNC_POINTERS,
SectionKind::getDataRel());
}
@@ -149,90 +169,93 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
LSDASection = Ctx->getMachOSection("__TEXT", "__gcc_except_tab", 0,
SectionKind::getReadOnlyWithRel());
- if (T.isMacOSX() && !T.isMacOSXVersionLT(10, 6)) {
+ COFFDebugSymbolsSection = nullptr;
+
+ if (useCompactUnwind(T)) {
CompactUnwindSection =
- Ctx->getMachOSection("__LD", "__compact_unwind",
- MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getReadOnly());
+ Ctx->getMachOSection("__LD", "__compact_unwind", MachO::S_ATTR_DEBUG,
+ SectionKind::getReadOnly());
if (T.getArch() == Triple::x86_64 || T.getArch() == Triple::x86)
CompactUnwindDwarfEHFrameOnly = 0x04000000;
+ else if (T.getArch() == Triple::arm64 || T.getArch() == Triple::aarch64)
+ CompactUnwindDwarfEHFrameOnly = 0x03000000;
}
// Debug Information.
DwarfAccelNamesSection =
Ctx->getMachOSection("__DWARF", "__apple_names",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfAccelObjCSection =
Ctx->getMachOSection("__DWARF", "__apple_objc",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
// 16 character section limit...
DwarfAccelNamespaceSection =
Ctx->getMachOSection("__DWARF", "__apple_namespac",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfAccelTypesSection =
Ctx->getMachOSection("__DWARF", "__apple_types",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfAbbrevSection =
Ctx->getMachOSection("__DWARF", "__debug_abbrev",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfInfoSection =
Ctx->getMachOSection("__DWARF", "__debug_info",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfLineSection =
Ctx->getMachOSection("__DWARF", "__debug_line",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfFrameSection =
Ctx->getMachOSection("__DWARF", "__debug_frame",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfPubNamesSection =
Ctx->getMachOSection("__DWARF", "__debug_pubnames",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfPubTypesSection =
Ctx->getMachOSection("__DWARF", "__debug_pubtypes",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfGnuPubNamesSection =
Ctx->getMachOSection("__DWARF", "__debug_gnu_pubn",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfGnuPubTypesSection =
Ctx->getMachOSection("__DWARF", "__debug_gnu_pubt",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfStrSection =
Ctx->getMachOSection("__DWARF", "__debug_str",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfLocSection =
Ctx->getMachOSection("__DWARF", "__debug_loc",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfARangesSection =
Ctx->getMachOSection("__DWARF", "__debug_aranges",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfRangesSection =
Ctx->getMachOSection("__DWARF", "__debug_ranges",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfMacroInfoSection =
Ctx->getMachOSection("__DWARF", "__debug_macinfo",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfDebugInlineSection =
Ctx->getMachOSection("__DWARF", "__debug_inlined",
- MCSectionMachO::S_ATTR_DEBUG,
+ MachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
StackMapSection =
Ctx->getMachOSection("__LLVM_STACKMAPS", "__llvm_stackmaps", 0,
@@ -242,29 +265,40 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
}
void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
- if (T.getArch() == Triple::mips ||
- T.getArch() == Triple::mipsel)
+ switch (T.getArch()) {
+ case Triple::mips:
+ case Triple::mipsel:
FDECFIEncoding = dwarf::DW_EH_PE_sdata4;
- else if (T.getArch() == Triple::mips64 ||
- T.getArch() == Triple::mips64el)
+ break;
+ case Triple::mips64:
+ case Triple::mips64el:
FDECFIEncoding = dwarf::DW_EH_PE_sdata8;
- else
+ break;
+ default:
FDECFIEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
+ break;
+ }
- if (T.getArch() == Triple::x86) {
+ switch (T.getArch()) {
+ case Triple::arm:
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ if (Ctx->getAsmInfo()->getExceptionHandlingType() == ExceptionHandling::ARM)
+ break;
+ // Fallthrough if not using EHABI
+ case Triple::x86:
PersonalityEncoding = (RelocM == Reloc::PIC_)
? dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
: dwarf::DW_EH_PE_absptr;
LSDAEncoding = (RelocM == Reloc::PIC_)
? dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
: dwarf::DW_EH_PE_absptr;
- FDEEncoding = (RelocM == Reloc::PIC_)
- ? dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
- : dwarf::DW_EH_PE_absptr;
TTypeEncoding = (RelocM == Reloc::PIC_)
? dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
: dwarf::DW_EH_PE_absptr;
- } else if (T.getArch() == Triple::x86_64) {
+ break;
+ case Triple::x86_64:
if (RelocM == Reloc::PIC_) {
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
((CMModel == CodeModel::Small || CMModel == CodeModel::Medium)
@@ -272,7 +306,6 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
LSDAEncoding = dwarf::DW_EH_PE_pcrel |
(CMModel == CodeModel::Small
? dwarf::DW_EH_PE_sdata4 : dwarf::DW_EH_PE_sdata8);
- FDEEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
((CMModel == CodeModel::Small || CMModel == CodeModel::Medium)
? dwarf::DW_EH_PE_sdata4 : dwarf::DW_EH_PE_sdata8);
@@ -282,11 +315,14 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
? dwarf::DW_EH_PE_udata4 : dwarf::DW_EH_PE_absptr;
LSDAEncoding = (CMModel == CodeModel::Small)
? dwarf::DW_EH_PE_udata4 : dwarf::DW_EH_PE_absptr;
- FDEEncoding = dwarf::DW_EH_PE_udata4;
TTypeEncoding = (CMModel == CodeModel::Small)
? dwarf::DW_EH_PE_udata4 : dwarf::DW_EH_PE_absptr;
}
- } else if (T.getArch() == Triple::aarch64) {
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::arm64:
+ case Triple::arm64_be:
// The small model guarantees static code/data size < 4GB, but not where it
// will be in memory. Most of these could end up >2GB away so even a signed
// pc-relative 32-bit address is insufficient, theoretically.
@@ -294,65 +330,71 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata8;
LSDAEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8;
- FDEEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata8;
} else {
PersonalityEncoding = dwarf::DW_EH_PE_absptr;
LSDAEncoding = dwarf::DW_EH_PE_absptr;
- FDEEncoding = dwarf::DW_EH_PE_udata4;
TTypeEncoding = dwarf::DW_EH_PE_absptr;
}
- } else if (T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le) {
+ break;
+ case Triple::mips:
+ case Triple::mipsel:
+ // MIPS uses indirect pointer to refer personality functions, so that the
+ // eh_frame section can be read-only. DW.ref.personality will be generated
+ // for relocation.
+ PersonalityEncoding = dwarf::DW_EH_PE_indirect;
+ break;
+ case Triple::ppc64:
+ case Triple::ppc64le:
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_udata8;
LSDAEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8;
- FDEEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_udata8;
- } else if (T.getArch() == Triple::sparc) {
+ break;
+ case Triple::sparc:
if (RelocM == Reloc::PIC_) {
LSDAEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
- FDEEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
} else {
LSDAEncoding = dwarf::DW_EH_PE_absptr;
PersonalityEncoding = dwarf::DW_EH_PE_absptr;
- FDEEncoding = dwarf::DW_EH_PE_udata4;
TTypeEncoding = dwarf::DW_EH_PE_absptr;
}
- } else if (T.getArch() == Triple::sparcv9) {
+ break;
+ case Triple::sparcv9:
LSDAEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
if (RelocM == Reloc::PIC_) {
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
- FDEEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
} else {
PersonalityEncoding = dwarf::DW_EH_PE_absptr;
- FDEEncoding = dwarf::DW_EH_PE_udata4;
TTypeEncoding = dwarf::DW_EH_PE_absptr;
}
- } else if (T.getArch() == Triple::systemz) {
+ break;
+ case Triple::systemz:
// All currently-defined code models guarantee that 4-byte PC-relative
// values will be in range.
if (RelocM == Reloc::PIC_) {
PersonalityEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
LSDAEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
- FDEEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
TTypeEncoding = dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
dwarf::DW_EH_PE_sdata4;
} else {
PersonalityEncoding = dwarf::DW_EH_PE_absptr;
LSDAEncoding = dwarf::DW_EH_PE_absptr;
- FDEEncoding = dwarf::DW_EH_PE_absptr;
TTypeEncoding = dwarf::DW_EH_PE_absptr;
}
+ break;
+ default:
+ break;
}
// Solaris requires different flags for .eh_frame to seemingly every other
@@ -457,6 +499,8 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
ELF::SHF_ALLOC,
SectionKind::getReadOnly());
+ COFFDebugSymbolsSection = nullptr;
+
// Debug Info Sections.
DwarfAbbrevSection =
Ctx->getELFSection(".debug_abbrev", ELF::SHT_PROGBITS, 0,
@@ -542,6 +586,12 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
+ bool IsWoA = T.getArch() == Triple::arm || T.getArch() == Triple::thumb;
+
+ // The object file format cannot represent common symbols with explicit
+ // alignments.
+ CommDirectiveSupportsAlignment = false;
+
// COFF
BSSSection =
Ctx->getCOFFSection(".bss",
@@ -551,6 +601,8 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
SectionKind::getBSS());
TextSection =
Ctx->getCOFFSection(".text",
+ (IsWoA ? COFF::IMAGE_SCN_MEM_16BIT
+ : (COFF::SectionCharacteristics)0) |
COFF::IMAGE_SCN_CNT_CODE |
COFF::IMAGE_SCN_MEM_EXECUTE |
COFF::IMAGE_SCN_MEM_READ,
@@ -566,12 +618,18 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getReadOnly());
- if (T.getOS() == Triple::Win32) {
+
+ if (T.isKnownWindowsMSVCEnvironment() || T.isWindowsItaniumEnvironment()) {
StaticCtorSection =
Ctx->getCOFFSection(".CRT$XCU",
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getReadOnly());
+ StaticDtorSection =
+ Ctx->getCOFFSection(".CRT$XTX",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getReadOnly());
} else {
StaticCtorSection =
Ctx->getCOFFSection(".ctors",
@@ -579,16 +637,6 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_MEM_WRITE,
SectionKind::getDataRel());
- }
-
-
- if (T.getOS() == Triple::Win32) {
- StaticDtorSection =
- Ctx->getCOFFSection(".CRT$XTX",
- COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ,
- SectionKind::getReadOnly());
- } else {
StaticDtorSection =
Ctx->getCOFFSection(".dtors",
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
@@ -601,13 +649,25 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
// though it contains relocatable pointers. In PIC mode, this is probably a
// big runtime hit for C++ apps. Either the contents of the LSDA need to be
// adjusted or this should be a data section.
- LSDASection =
- Ctx->getCOFFSection(".gcc_except_table",
+ assert(T.isOSWindows() && "Windows is the only supported COFF target");
+ if (T.getArch() == Triple::x86_64) {
+ // On Windows 64 with SEH, the LSDA is emitted into the .xdata section
+ LSDASection = 0;
+ } else {
+ LSDASection = Ctx->getCOFFSection(".gcc_except_table",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getReadOnly());
+ }
+
+ // Debug info.
+ COFFDebugSymbolsSection =
+ Ctx->getCOFFSection(".debug$S",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ,
- SectionKind::getReadOnly());
+ SectionKind::getMetadata());
- // Debug info.
DwarfAbbrevSection =
Ctx->getCOFFSection(".debug_abbrev",
COFF::IMAGE_SCN_MEM_DISCARDABLE |
@@ -673,10 +733,47 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_MEM_DISCARDABLE |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
+ DwarfInfoDWOSection =
+ Ctx->getCOFFSection(".debug_info.dwo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+ DwarfAbbrevDWOSection =
+ Ctx->getCOFFSection(".debug_abbrev.dwo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+ DwarfStrDWOSection =
+ Ctx->getCOFFSection(".debug_str.dwo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+ DwarfLineDWOSection =
+ Ctx->getCOFFSection(".debug_line.dwo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+ DwarfLocDWOSection =
+ Ctx->getCOFFSection(".debug_loc.dwo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+ DwarfStrOffDWOSection =
+ Ctx->getCOFFSection(".debug_str_offsets.dwo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+
+ DwarfAddrSection =
+ Ctx->getCOFFSection(".debug_addr",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DrectveSection =
Ctx->getCOFFSection(".drectve",
- COFF::IMAGE_SCN_LNK_INFO,
+ COFF::IMAGE_SCN_LNK_INFO |
+ COFF::IMAGE_SCN_LNK_REMOVE,
SectionKind::getMetadata());
PDataSection =
@@ -690,6 +787,7 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getDataRel());
+
TLSDataSection =
Ctx->getCOFFSection(".tls$",
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
@@ -698,7 +796,7 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
SectionKind::getDataRel());
}
-void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm,
+void MCObjectFileInfo::InitMCObjectFileInfo(StringRef T, Reloc::Model relocm,
CodeModel::Model cm,
MCContext &ctx) {
RelocM = relocm;
@@ -708,51 +806,64 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm,
// Common.
CommDirectiveSupportsAlignment = true;
SupportsWeakOmittedEHFrame = true;
- IsFunctionEHFrameSymbolPrivate = true;
+ SupportsCompactUnwindWithoutEHFrame = false;
- PersonalityEncoding = LSDAEncoding = FDEEncoding = FDECFIEncoding =
- TTypeEncoding = dwarf::DW_EH_PE_absptr;
+ PersonalityEncoding = LSDAEncoding = FDECFIEncoding = TTypeEncoding =
+ dwarf::DW_EH_PE_absptr;
CompactUnwindDwarfEHFrameOnly = 0;
- EHFrameSection = 0; // Created on demand.
- CompactUnwindSection = 0; // Used only by selected targets.
- DwarfAccelNamesSection = 0; // Used only by selected targets.
- DwarfAccelObjCSection = 0; // Used only by selected targets.
- DwarfAccelNamespaceSection = 0; // Used only by selected targets.
- DwarfAccelTypesSection = 0; // Used only by selected targets.
+ EHFrameSection = nullptr; // Created on demand.
+ CompactUnwindSection = nullptr; // Used only by selected targets.
+ DwarfAccelNamesSection = nullptr; // Used only by selected targets.
+ DwarfAccelObjCSection = nullptr; // Used only by selected targets.
+ DwarfAccelNamespaceSection = nullptr; // Used only by selected targets.
+ DwarfAccelTypesSection = nullptr; // Used only by selected targets.
- Triple T(TT);
- Triple::ArchType Arch = T.getArch();
+ TT = Triple(T);
+
+ Triple::ArchType Arch = TT.getArch();
// FIXME: Checking for Arch here to filter out bogus triples such as
// cellspu-apple-darwin. Perhaps we should fix in Triple?
if ((Arch == Triple::x86 || Arch == Triple::x86_64 ||
Arch == Triple::arm || Arch == Triple::thumb ||
+ Arch == Triple::arm64 || Arch == Triple::aarch64 ||
Arch == Triple::ppc || Arch == Triple::ppc64 ||
Arch == Triple::UnknownArch) &&
- (T.isOSDarwin() || T.getEnvironment() == Triple::MachO)) {
+ (TT.isOSDarwin() || TT.isOSBinFormatMachO())) {
Env = IsMachO;
- InitMachOMCObjectFileInfo(T);
- } else if ((Arch == Triple::x86 || Arch == Triple::x86_64) &&
- (T.getEnvironment() != Triple::ELF) &&
- (T.getOS() == Triple::MinGW32 || T.getOS() == Triple::Cygwin ||
- T.getOS() == Triple::Win32)) {
+ InitMachOMCObjectFileInfo(TT);
+ } else if ((Arch == Triple::x86 || Arch == Triple::x86_64 ||
+ Arch == Triple::arm || Arch == Triple::thumb) &&
+ (TT.isOSWindows() && TT.getObjectFormat() == Triple::COFF)) {
Env = IsCOFF;
- InitCOFFMCObjectFileInfo(T);
+ InitCOFFMCObjectFileInfo(TT);
} else {
Env = IsELF;
- InitELFMCObjectFileInfo(T);
+ InitELFMCObjectFileInfo(TT);
}
}
+const MCSection *MCObjectFileInfo::getDwarfTypesSection(uint64_t Hash) const {
+ return Ctx->getELFSection(".debug_types", ELF::SHT_PROGBITS, ELF::SHF_GROUP,
+ SectionKind::getMetadata(), 0, utostr(Hash));
+}
+
+const MCSection *
+MCObjectFileInfo::getDwarfTypesDWOSection(uint64_t Hash) const {
+ return Ctx->getELFSection(".debug_types.dwo", ELF::SHT_PROGBITS,
+ ELF::SHF_GROUP, SectionKind::getMetadata(), 0,
+ utostr(Hash));
+}
+
void MCObjectFileInfo::InitEHFrameSection() {
if (Env == IsMachO)
EHFrameSection =
Ctx->getMachOSection("__TEXT", "__eh_frame",
- MCSectionMachO::S_COALESCED |
- MCSectionMachO::S_ATTR_NO_TOC |
- MCSectionMachO::S_ATTR_STRIP_STATIC_SYMS |
- MCSectionMachO::S_ATTR_LIVE_SUPPORT,
+ MachO::S_COALESCED |
+ MachO::S_ATTR_NO_TOC |
+ MachO::S_ATTR_STRIP_STATIC_SYMS |
+ MachO::S_ATTR_LIVE_SUPPORT,
SectionKind::getReadOnly());
else if (Env == IsELF)
EHFrameSection =
diff --git a/contrib/llvm/lib/MC/MCObjectStreamer.cpp b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
index bc14c2a..a721b59 100644
--- a/contrib/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
@@ -17,27 +17,23 @@
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-MCObjectStreamer::MCObjectStreamer(MCContext &Context,
- MCTargetStreamer *TargetStreamer,
- MCAsmBackend &TAB, raw_ostream &OS,
- MCCodeEmitter *Emitter_)
- : MCStreamer(Context, TargetStreamer),
+MCObjectStreamer::MCObjectStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter_)
+ : MCStreamer(Context),
Assembler(new MCAssembler(Context, TAB, *Emitter_,
*TAB.createObjectWriter(OS), OS)),
- CurSectionData(0) {}
+ CurSectionData(nullptr), EmitEHFrame(true), EmitDebugFrame(false) {}
-MCObjectStreamer::MCObjectStreamer(MCContext &Context,
- MCTargetStreamer *TargetStreamer,
- MCAsmBackend &TAB, raw_ostream &OS,
- MCCodeEmitter *Emitter_,
+MCObjectStreamer::MCObjectStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter_,
MCAssembler *_Assembler)
- : MCStreamer(Context, TargetStreamer), Assembler(_Assembler),
- CurSectionData(0) {}
+ : MCStreamer(Context), Assembler(_Assembler), CurSectionData(nullptr),
+ EmitEHFrame(true), EmitDebugFrame(false) {}
MCObjectStreamer::~MCObjectStreamer() {
delete &Assembler->getBackend();
@@ -49,18 +45,31 @@ MCObjectStreamer::~MCObjectStreamer() {
void MCObjectStreamer::reset() {
if (Assembler)
Assembler->reset();
- CurSectionData = 0;
+ CurSectionData = nullptr;
CurInsertionPoint = MCSectionData::iterator();
+ EmitEHFrame = true;
+ EmitDebugFrame = false;
MCStreamer::reset();
}
+void MCObjectStreamer::EmitFrames(MCAsmBackend *MAB) {
+ if (!getNumFrameInfos())
+ return;
+
+ if (EmitEHFrame)
+ MCDwarfFrameEmitter::Emit(*this, MAB, true);
+
+ if (EmitDebugFrame)
+ MCDwarfFrameEmitter::Emit(*this, MAB, false);
+}
+
MCFragment *MCObjectStreamer::getCurrentFragment() const {
assert(getCurrentSectionData() && "No current section!");
if (CurInsertionPoint != getCurrentSectionData()->getFragmentList().begin())
- return prior(CurInsertionPoint);
+ return std::prev(CurInsertionPoint);
- return 0;
+ return nullptr;
}
MCDataFragment *MCObjectStreamer::getOrCreateDataFragment() const {
@@ -74,57 +83,44 @@ MCDataFragment *MCObjectStreamer::getOrCreateDataFragment() const {
return F;
}
-const MCExpr *MCObjectStreamer::AddValueSymbols(const MCExpr *Value) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- cast<MCTargetExpr>(Value)->AddValueSymbols(Assembler);
- break;
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbols(BE->getLHS());
- AddValueSymbols(BE->getRHS());
- break;
- }
-
- case MCExpr::SymbolRef:
- Assembler->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbols(cast<MCUnaryExpr>(Value)->getSubExpr());
- break;
- }
+void MCObjectStreamer::visitUsedSymbol(const MCSymbol &Sym) {
+ Assembler->getOrCreateSymbolData(Sym);
+}
- return Value;
+void MCObjectStreamer::EmitCFISections(bool EH, bool Debug) {
+ MCStreamer::EmitCFISections(EH, Debug);
+ EmitEHFrame = EH;
+ EmitDebugFrame = Debug;
}
-void MCObjectStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size) {
+void MCObjectStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) {
+ MCStreamer::EmitValueImpl(Value, Size, Loc);
MCDataFragment *DF = getOrCreateDataFragment();
MCLineEntry::Make(this, getCurrentSection().first);
// Avoid fixups when possible.
int64_t AbsValue;
- if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue, getAssembler())) {
+ if (Value->EvaluateAsAbsolute(AbsValue, getAssembler())) {
EmitIntValue(AbsValue, Size);
return;
}
DF->getFixups().push_back(
MCFixup::Create(DF->getContents().size(), Value,
- MCFixup::getKindForSize(Size, false)));
+ MCFixup::getKindForSize(Size, false), Loc));
DF->getContents().resize(DF->getContents().size() + Size, 0);
}
void MCObjectStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
- RecordProcStart(Frame);
+ // We need to create a local symbol to avoid relocations.
+ Frame.Begin = getContext().CreateTempSymbol();
+ EmitLabel(Frame.Begin);
}
void MCObjectStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
- RecordProcEnd(Frame);
+ Frame.End = getContext().CreateTempSymbol();
+ EmitLabel(Frame.End);
}
void MCObjectStreamer::EmitLabel(MCSymbol *Symbol) {
@@ -142,10 +138,6 @@ void MCObjectStreamer::EmitLabel(MCSymbol *Symbol) {
SD.setOffset(F->getContents().size());
}
-void MCObjectStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
-}
-
void MCObjectStreamer::EmitULEB128Value(const MCExpr *Value) {
int64_t IntValue;
if (Value->EvaluateAsAbsolute(IntValue, getAssembler())) {
@@ -189,14 +181,12 @@ void MCObjectStreamer::ChangeSection(const MCSection *Section,
void MCObjectStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
getAssembler().getOrCreateSymbolData(*Symbol);
- Symbol->setVariableValue(AddValueSymbols(Value));
+ MCStreamer::EmitAssignment(Symbol, Value);
}
-void MCObjectStreamer::EmitInstruction(const MCInst &Inst) {
- // Scan for values.
- for (unsigned i = Inst.getNumOperands(); i--; )
- if (Inst.getOperand(i).isExpr())
- AddValueSymbols(Inst.getOperand(i).getExpr());
+void MCObjectStreamer::EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
+ MCStreamer::EmitInstruction(Inst, STI);
MCSectionData *SD = getCurrentSectionData();
SD->setHasInstructions(true);
@@ -208,7 +198,7 @@ void MCObjectStreamer::EmitInstruction(const MCInst &Inst) {
// If this instruction doesn't need relaxation, just emit it as data.
MCAssembler &Assembler = getAssembler();
if (!Assembler.getBackend().mayNeedRelaxation(Inst)) {
- EmitInstToData(Inst);
+ EmitInstToData(Inst, STI);
return;
}
@@ -223,23 +213,25 @@ void MCObjectStreamer::EmitInstruction(const MCInst &Inst) {
getAssembler().getBackend().relaxInstruction(Inst, Relaxed);
while (getAssembler().getBackend().mayNeedRelaxation(Relaxed))
getAssembler().getBackend().relaxInstruction(Relaxed, Relaxed);
- EmitInstToData(Relaxed);
+ EmitInstToData(Relaxed, STI);
return;
}
// Otherwise emit to a separate fragment.
- EmitInstToFragment(Inst);
+ EmitInstToFragment(Inst, STI);
}
-void MCObjectStreamer::EmitInstToFragment(const MCInst &Inst) {
+void MCObjectStreamer::EmitInstToFragment(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
// Always create a new, separate fragment here, because its size can change
// during relaxation.
- MCRelaxableFragment *IF = new MCRelaxableFragment(Inst);
+ MCRelaxableFragment *IF = new MCRelaxableFragment(Inst, STI);
insert(IF);
SmallString<128> Code;
raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, IF->getFixups());
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, IF->getFixups(),
+ STI);
VecOS.flush();
IF->getContents().append(Code.begin(), Code.end());
}
@@ -380,14 +372,12 @@ void MCObjectStreamer::EmitZeros(uint64_t NumBytes) {
}
void MCObjectStreamer::FinishImpl() {
- // Dump out the dwarf file & directory tables and line tables.
- const MCSymbol *LineSectionSymbol = NULL;
- if (getContext().hasDwarfFiles())
- LineSectionSymbol = MCDwarfFileTable::Emit(this);
-
// If we are generating dwarf for assembly source files dump out the sections.
if (getContext().getGenDwarfForAssembly())
- MCGenDwarfInfo::Emit(this, LineSectionSymbol);
+ MCGenDwarfInfo::Emit(this);
+
+ // Dump out the dwarf file & directory tables and line tables.
+ MCDwarfLineTable::Emit(this);
getAssembler().Finish();
}
diff --git a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
index b49dd01..145ad4a 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
@@ -22,23 +22,23 @@
using namespace llvm;
AsmLexer::AsmLexer(const MCAsmInfo &_MAI) : MAI(_MAI) {
- CurBuf = NULL;
- CurPtr = NULL;
+ CurPtr = nullptr;
isAtStartOfLine = true;
+ AllowAtInIdentifier = !StringRef(MAI.getCommentString()).startswith("@");
}
AsmLexer::~AsmLexer() {
}
-void AsmLexer::setBuffer(const MemoryBuffer *buf, const char *ptr) {
- CurBuf = buf;
+void AsmLexer::setBuffer(StringRef Buf, const char *ptr) {
+ CurBuf = Buf;
if (ptr)
CurPtr = ptr;
else
- CurPtr = CurBuf->getBufferStart();
+ CurPtr = CurBuf.begin();
- TokStart = 0;
+ TokStart = nullptr;
}
/// ReturnError - Set the error to the specified string at the specified
@@ -57,7 +57,7 @@ int AsmLexer::getNextChar() {
case 0:
// A nul character in the stream is either the end of the current buffer or
// a random nul in the file. Disambiguate that here.
- if (CurPtr-1 != CurBuf->getBufferEnd())
+ if (CurPtr - 1 != CurBuf.end())
return 0; // Just whitespace.
// Otherwise, return end of file.
@@ -139,8 +139,9 @@ AsmToken AsmLexer::LexHexFloatLiteral(bool NoIntDigits) {
}
/// LexIdentifier: [a-zA-Z_.][a-zA-Z0-9_$.@?]*
-static bool IsIdentifierChar(char c) {
- return isalnum(c) || c == '_' || c == '$' || c == '.' || c == '@' || c == '?';
+static bool IsIdentifierChar(char c, bool AllowAt) {
+ return isalnum(c) || c == '_' || c == '$' || c == '.' ||
+ (c == '@' && AllowAt) || c == '?';
}
AsmToken AsmLexer::LexIdentifier() {
// Check for floating point literals.
@@ -148,11 +149,12 @@ AsmToken AsmLexer::LexIdentifier() {
// Disambiguate a .1243foo identifier from a floating literal.
while (isdigit(*CurPtr))
++CurPtr;
- if (*CurPtr == 'e' || *CurPtr == 'E' || !IsIdentifierChar(*CurPtr))
+ if (*CurPtr == 'e' || *CurPtr == 'E' ||
+ !IsIdentifierChar(*CurPtr, AllowAtInIdentifier))
return LexFloatLiteral();
}
- while (IsIdentifierChar(*CurPtr))
+ while (IsIdentifierChar(*CurPtr, AllowAtInIdentifier))
++CurPtr;
// Handle . as a special case.
@@ -198,8 +200,8 @@ AsmToken AsmLexer::LexLineComment() {
CurChar = getNextChar();
if (CurChar == EOF)
- return AsmToken(AsmToken::Eof, StringRef(CurPtr, 0));
- return AsmToken(AsmToken::EndOfStatement, StringRef(CurPtr, 0));
+ return AsmToken(AsmToken::Eof, StringRef(TokStart, 0));
+ return AsmToken(AsmToken::EndOfStatement, StringRef(TokStart, 0));
}
static void SkipIgnoredIntegerSuffix(const char *&CurPtr) {
@@ -215,7 +217,7 @@ static void SkipIgnoredIntegerSuffix(const char *&CurPtr) {
// Look ahead to search for first non-hex digit, if it's [hH], then we treat the
// integer as a hexadecimal, possibly with leading zeroes.
static unsigned doLookAhead(const char *&CurPtr, unsigned DefaultRadix) {
- const char *FirstHex = 0;
+ const char *FirstHex = nullptr;
const char *LookAhead = CurPtr;
while (1) {
if (isdigit(*LookAhead)) {
@@ -235,6 +237,13 @@ static unsigned doLookAhead(const char *&CurPtr, unsigned DefaultRadix) {
return DefaultRadix;
}
+static AsmToken intToken(StringRef Ref, APInt &Value)
+{
+ if (Value.isIntN(64))
+ return AsmToken(AsmToken::Integer, Ref, Value);
+ return AsmToken(AsmToken::BigNum, Ref, Value);
+}
+
/// LexDigit: First character is [0-9].
/// Local Label: [0-9][:]
/// Forward/Backward Label: [0-9][fb]
@@ -255,16 +264,10 @@ AsmToken AsmLexer::LexDigit() {
StringRef Result(TokStart, CurPtr - TokStart);
- long long Value;
- if (Result.getAsInteger(Radix, Value)) {
- // Allow positive values that are too large to fit into a signed 64-bit
- // integer, but that do fit in an unsigned one, we just convert them over.
- unsigned long long UValue;
- if (Result.getAsInteger(Radix, UValue))
- return ReturnError(TokStart, !isHex ? "invalid decimal number" :
+ APInt Value(128, 0, true);
+ if (Result.getAsInteger(Radix, Value))
+ return ReturnError(TokStart, !isHex ? "invalid decimal number" :
"invalid hexdecimal number");
- Value = (long long)UValue;
- }
// Consume the [bB][hH].
if (Radix == 2 || Radix == 16)
@@ -274,7 +277,7 @@ AsmToken AsmLexer::LexDigit() {
// suffices on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
- return AsmToken(AsmToken::Integer, Result, Value);
+ return intToken(Result, Value);
}
if (*CurPtr == 'b') {
@@ -295,7 +298,7 @@ AsmToken AsmLexer::LexDigit() {
StringRef Result(TokStart, CurPtr - TokStart);
- long long Value;
+ APInt Value(128, 0, true);
if (Result.substr(2).getAsInteger(2, Value))
return ReturnError(TokStart, "invalid binary number");
@@ -303,7 +306,7 @@ AsmToken AsmLexer::LexDigit() {
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
- return AsmToken(AsmToken::Integer, Result, Value);
+ return intToken(Result, Value);
}
if (*CurPtr == 'x') {
@@ -321,7 +324,7 @@ AsmToken AsmLexer::LexDigit() {
if (CurPtr == NumStart)
return ReturnError(CurPtr-2, "invalid hexadecimal number");
- unsigned long long Result;
+ APInt Result(128, 0);
if (StringRef(TokStart, CurPtr - TokStart).getAsInteger(0, Result))
return ReturnError(TokStart, "invalid hexadecimal number");
@@ -333,12 +336,11 @@ AsmToken AsmLexer::LexDigit() {
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
- return AsmToken(AsmToken::Integer, StringRef(TokStart, CurPtr - TokStart),
- (int64_t)Result);
+ return intToken(StringRef(TokStart, CurPtr - TokStart), Result);
}
// Either octal or hexadecimal.
- long long Value;
+ APInt Value(128, 0, true);
unsigned Radix = doLookAhead(CurPtr, 8);
bool isHex = Radix == 16;
StringRef Result(TokStart, CurPtr - TokStart);
@@ -354,7 +356,7 @@ AsmToken AsmLexer::LexDigit() {
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
- return AsmToken(AsmToken::Integer, Result, Value);
+ return intToken(Result, Value);
}
/// LexSingleQuote: Integer: 'b'
@@ -417,9 +419,8 @@ StringRef AsmLexer::LexUntilEndOfStatement() {
while (!isAtStartOfComment(*CurPtr) && // Start of line comment.
!isAtStatementSeparator(CurPtr) && // End of statement marker.
- *CurPtr != '\n' &&
- *CurPtr != '\r' &&
- (*CurPtr != 0 || CurPtr != CurBuf->getBufferEnd())) {
+ *CurPtr != '\n' && *CurPtr != '\r' &&
+ (*CurPtr != 0 || CurPtr != CurBuf.end())) {
++CurPtr;
}
return StringRef(TokStart, CurPtr-TokStart);
@@ -428,14 +429,35 @@ StringRef AsmLexer::LexUntilEndOfStatement() {
StringRef AsmLexer::LexUntilEndOfLine() {
TokStart = CurPtr;
- while (*CurPtr != '\n' &&
- *CurPtr != '\r' &&
- (*CurPtr != 0 || CurPtr != CurBuf->getBufferEnd())) {
+ while (*CurPtr != '\n' && *CurPtr != '\r' &&
+ (*CurPtr != 0 || CurPtr != CurBuf.end())) {
++CurPtr;
}
return StringRef(TokStart, CurPtr-TokStart);
}
+const AsmToken AsmLexer::peekTok(bool ShouldSkipSpace) {
+ const char *SavedTokStart = TokStart;
+ const char *SavedCurPtr = CurPtr;
+ bool SavedAtStartOfLine = isAtStartOfLine;
+ bool SavedSkipSpace = SkipSpace;
+
+ std::string SavedErr = getErr();
+ SMLoc SavedErrLoc = getErrLoc();
+
+ SkipSpace = ShouldSkipSpace;
+ AsmToken Token = LexToken();
+
+ SetError(SavedErrLoc, SavedErr);
+
+ SkipSpace = SavedSkipSpace;
+ isAtStartOfLine = SavedAtStartOfLine;
+ CurPtr = SavedCurPtr;
+ TokStart = SavedTokStart;
+
+ return Token;
+}
+
bool AsmLexer::isAtStartOfComment(char Char) {
// FIXME: This won't work for multi-character comment indicators like "//".
return Char == *MAI.getCommentString();
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index 7f8e9df..ed1d704 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -22,6 +22,7 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/AsmCond.h"
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
@@ -38,6 +39,7 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>
+#include <deque>
#include <set>
#include <string>
#include <vector>
@@ -50,11 +52,19 @@ FatalAssemblerWarnings("fatal-assembler-warnings",
MCAsmParserSemaCallback::~MCAsmParserSemaCallback() {}
namespace {
-
/// \brief Helper types for tracking macro definitions.
typedef std::vector<AsmToken> MCAsmMacroArgument;
typedef std::vector<MCAsmMacroArgument> MCAsmMacroArguments;
-typedef std::pair<StringRef, MCAsmMacroArgument> MCAsmMacroParameter;
+
+struct MCAsmMacroParameter {
+ StringRef Name;
+ MCAsmMacroArgument Value;
+ bool Required;
+ bool Vararg;
+
+ MCAsmMacroParameter() : Required(false), Vararg(false) {}
+};
+
typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
struct MCAsmMacro {
@@ -63,11 +73,8 @@ struct MCAsmMacro {
MCAsmMacroParameters Parameters;
public:
- MCAsmMacro(StringRef N, StringRef B, const MCAsmMacroParameters &P) :
+ MCAsmMacro(StringRef N, StringRef B, ArrayRef<MCAsmMacroParameter> P) :
Name(N), Body(B), Parameters(P) {}
-
- MCAsmMacro(const MCAsmMacro& Other)
- : Name(Other.Name), Body(Other.Body), Parameters(Other.Parameters) {}
};
/// \brief Helper class for storing information about an active macro
@@ -95,7 +102,7 @@ public:
struct ParseStatementInfo {
/// \brief The parsed operands from the last parsed statement.
- SmallVector<MCParsedAsmOperand*, 8> ParsedOperands;
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> ParsedOperands;
/// \brief The opcode from the last parsed instruction.
unsigned Opcode;
@@ -105,16 +112,9 @@ struct ParseStatementInfo {
SmallVectorImpl<AsmRewrite> *AsmRewrites;
- ParseStatementInfo() : Opcode(~0U), ParseError(false), AsmRewrites(0) {}
+ ParseStatementInfo() : Opcode(~0U), ParseError(false), AsmRewrites(nullptr) {}
ParseStatementInfo(SmallVectorImpl<AsmRewrite> *rewrites)
: Opcode(~0), ParseError(false), AsmRewrites(rewrites) {}
-
- ~ParseStatementInfo() {
- // Free any parsed operands.
- for (unsigned i = 0, e = ParsedOperands.size(); i != e; ++i)
- delete ParsedOperands[i];
- ParsedOperands.clear();
- }
};
/// \brief The concrete assembly parser instance.
@@ -133,7 +133,7 @@ private:
/// This is the current buffer index we're lexing from as managed by the
/// SourceMgr object.
- int CurBuffer;
+ unsigned CurBuffer;
AsmCond TheCondState;
std::vector<AsmCond> TheCondStack;
@@ -162,13 +162,13 @@ private:
StringRef CppHashFilename;
int64_t CppHashLineNumber;
SMLoc CppHashLoc;
- int CppHashBuf;
+ unsigned CppHashBuf;
/// When generating dwarf for assembly source files we need to calculate the
/// logical line number based on the last parsed cpp hash file line comment
/// and current line. Since this is slow and messes up the SourceMgr's
/// cache we save the last info we queried with SrcMgr.FindLineNumber().
SMLoc LastQueryIDLoc;
- int LastQueryBuffer;
+ unsigned LastQueryBuffer;
unsigned LastQueryLine;
/// AssemblerDialect. ~OU means unset value and use value provided by MAI.
@@ -185,10 +185,10 @@ public:
const MCAsmInfo &MAI);
virtual ~AsmParser();
- virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false);
+ bool Run(bool NoInitialTextSection, bool NoFinalize = false) override;
- virtual void addDirectiveHandler(StringRef Directive,
- ExtensionDirectiveHandler Handler) {
+ void addDirectiveHandler(StringRef Directive,
+ ExtensionDirectiveHandler Handler) override {
ExtensionDirectiveMap[Directive] = Handler;
}
@@ -196,51 +196,52 @@ public:
/// @name MCAsmParser Interface
/// {
- virtual SourceMgr &getSourceManager() { return SrcMgr; }
- virtual MCAsmLexer &getLexer() { return Lexer; }
- virtual MCContext &getContext() { return Ctx; }
- virtual MCStreamer &getStreamer() { return Out; }
- virtual unsigned getAssemblerDialect() {
+ SourceMgr &getSourceManager() override { return SrcMgr; }
+ MCAsmLexer &getLexer() override { return Lexer; }
+ MCContext &getContext() override { return Ctx; }
+ MCStreamer &getStreamer() override { return Out; }
+ unsigned getAssemblerDialect() override {
if (AssemblerDialect == ~0U)
return MAI.getAssemblerDialect();
else
return AssemblerDialect;
}
- virtual void setAssemblerDialect(unsigned i) {
+ void setAssemblerDialect(unsigned i) override {
AssemblerDialect = i;
}
- virtual bool Warning(SMLoc L, const Twine &Msg,
- ArrayRef<SMRange> Ranges = None);
- virtual bool Error(SMLoc L, const Twine &Msg,
- ArrayRef<SMRange> Ranges = None);
+ void Note(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None) override;
+ bool Warning(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None) override;
+ bool Error(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None) override;
- virtual const AsmToken &Lex();
+ const AsmToken &Lex() override;
- void setParsingInlineAsm(bool V) { ParsingInlineAsm = V; }
- bool isParsingInlineAsm() { return ParsingInlineAsm; }
+ void setParsingInlineAsm(bool V) override { ParsingInlineAsm = V; }
+ bool isParsingInlineAsm() override { return ParsingInlineAsm; }
bool parseMSInlineAsm(void *AsmLoc, std::string &AsmString,
unsigned &NumOutputs, unsigned &NumInputs,
SmallVectorImpl<std::pair<void *,bool> > &OpDecls,
SmallVectorImpl<std::string> &Constraints,
SmallVectorImpl<std::string> &Clobbers,
- const MCInstrInfo *MII,
- const MCInstPrinter *IP,
- MCAsmParserSemaCallback &SI);
+ const MCInstrInfo *MII, const MCInstPrinter *IP,
+ MCAsmParserSemaCallback &SI) override;
bool parseExpression(const MCExpr *&Res);
- virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc);
- virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc);
- virtual bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc);
- virtual bool parseAbsoluteExpression(int64_t &Res);
+ bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc) override;
+ bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
+ bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) override;
+ bool parseAbsoluteExpression(int64_t &Res) override;
/// \brief Parse an identifier or string (as a quoted identifier)
/// and set \p Res to the identifier contents.
- virtual bool parseIdentifier(StringRef &Res);
- virtual void eatToEndOfStatement();
+ bool parseIdentifier(StringRef &Res) override;
+ void eatToEndOfStatement() override;
- virtual void checkForValidSection();
+ void checkForValidSection() override;
/// }
private:
@@ -250,10 +251,10 @@ private:
bool parseCppHashLineFilenameComment(const SMLoc &L);
void checkForBadMacro(SMLoc DirectiveLoc, StringRef Name, StringRef Body,
- MCAsmMacroParameters Parameters);
+ ArrayRef<MCAsmMacroParameter> Parameters);
bool expandMacro(raw_svector_ostream &OS, StringRef Body,
- const MCAsmMacroParameters &Parameters,
- const MCAsmMacroArguments &A,
+ ArrayRef<MCAsmMacroParameter> Parameters,
+ ArrayRef<MCAsmMacroArgument> A,
const SMLoc &L);
/// \brief Are macros enabled in the parser?
@@ -285,11 +286,8 @@ private:
/// \brief Handle exit from macro instantiation.
void handleMacroExit();
- /// \brief Extract AsmTokens for a macro argument. If the argument delimiter
- /// is initially unknown, set it to AsmToken::Eof. It will be set to the
- /// correct delimiter by the method.
- bool parseMacroArgument(MCAsmMacroArgument &MA,
- AsmToken::TokenKind &ArgumentDelimiter);
+ /// \brief Extract AsmTokens for a macro argument.
+ bool parseMacroArgument(MCAsmMacroArgument &MA, bool Vararg);
/// \brief Parse all macro arguments for a given macro.
bool parseMacroArguments(const MCAsmMacro *M, MCAsmMacroArguments &A);
@@ -312,14 +310,14 @@ private:
/// current token is not set; clients should ensure Lex() is called
/// subsequently.
///
- /// \param InBuffer If not -1, should be the known buffer id that contains the
+ /// \param InBuffer If not 0, should be the known buffer id that contains the
/// location.
- void jumpToLoc(SMLoc Loc, int InBuffer=-1);
+ void jumpToLoc(SMLoc Loc, unsigned InBuffer = 0);
/// \brief Parse up to the end of statement and a return the contents from the
/// current token until the end of the statement; the current token on exit
/// will be either the EndOfStatement or EOF.
- virtual StringRef parseStringToEndOfStatement();
+ StringRef parseStringToEndOfStatement() override;
/// \brief Parse until the end of a statement or a comma is encountered,
/// return the contents from the current token up to the end or comma.
@@ -338,8 +336,8 @@ private:
enum DirectiveKind {
DK_NO_DIRECTIVE, // Placeholder
DK_SET, DK_EQU, DK_EQUIV, DK_ASCII, DK_ASCIZ, DK_STRING, DK_BYTE, DK_SHORT,
- DK_VALUE, DK_2BYTE, DK_LONG, DK_INT, DK_4BYTE, DK_QUAD, DK_8BYTE, DK_SINGLE,
- DK_FLOAT, DK_DOUBLE, DK_ALIGN, DK_ALIGN32, DK_BALIGN, DK_BALIGNW,
+ DK_VALUE, DK_2BYTE, DK_LONG, DK_INT, DK_4BYTE, DK_QUAD, DK_8BYTE, DK_OCTA,
+ DK_SINGLE, DK_FLOAT, DK_DOUBLE, DK_ALIGN, DK_ALIGN32, DK_BALIGN, DK_BALIGNW,
DK_BALIGNL, DK_P2ALIGN, DK_P2ALIGNW, DK_P2ALIGNL, DK_ORG, DK_FILL, DK_ENDR,
DK_BUNDLE_ALIGN_MODE, DK_BUNDLE_LOCK, DK_BUNDLE_UNLOCK,
DK_ZERO, DK_EXTERN, DK_GLOBL, DK_GLOBAL,
@@ -347,7 +345,8 @@ private:
DK_REFERENCE, DK_WEAK_DEFINITION, DK_WEAK_REFERENCE,
DK_WEAK_DEF_CAN_BE_HIDDEN, DK_COMM, DK_COMMON, DK_LCOMM, DK_ABORT,
DK_INCLUDE, DK_INCBIN, DK_CODE16, DK_CODE16GCC, DK_REPT, DK_IRP, DK_IRPC,
- DK_IF, DK_IFB, DK_IFNB, DK_IFC, DK_IFNC, DK_IFDEF, DK_IFNDEF, DK_IFNOTDEF,
+ DK_IF, DK_IFEQ, DK_IFGE, DK_IFGT, DK_IFLE, DK_IFLT, DK_IFNE, DK_IFB,
+ DK_IFNB, DK_IFC, DK_IFEQS, DK_IFNC, DK_IFDEF, DK_IFNDEF, DK_IFNOTDEF,
DK_ELSEIF, DK_ELSE, DK_ENDIF,
DK_SPACE, DK_SKIP, DK_FILE, DK_LINE, DK_LOC, DK_STABS,
DK_CFI_SECTIONS, DK_CFI_STARTPROC, DK_CFI_ENDPROC, DK_CFI_DEF_CFA,
@@ -357,7 +356,9 @@ private:
DK_CFI_RESTORE, DK_CFI_ESCAPE, DK_CFI_SIGNAL_FRAME, DK_CFI_UNDEFINED,
DK_CFI_REGISTER, DK_CFI_WINDOW_SAVE,
DK_MACROS_ON, DK_MACROS_OFF, DK_MACRO, DK_ENDM, DK_ENDMACRO, DK_PURGEM,
- DK_SLEB128, DK_ULEB128
+ DK_SLEB128, DK_ULEB128,
+ DK_ERR, DK_ERROR,
+ DK_END
};
/// \brief Maps directive name --> DirectiveKind enum, for
@@ -367,6 +368,7 @@ private:
// ".ascii", ".asciz", ".string"
bool parseDirectiveAscii(StringRef IDVal, bool ZeroTerminated);
bool parseDirectiveValue(unsigned Size); // ".byte", ".long", ...
+ bool parseDirectiveOctaValue(); // ".octa"
bool parseDirectiveRealValue(const fltSemantics &); // ".single", ...
bool parseDirectiveFill(); // ".fill"
bool parseDirectiveZero(); // ".zero"
@@ -432,17 +434,20 @@ private:
bool parseDirectiveInclude(); // ".include"
bool parseDirectiveIncbin(); // ".incbin"
- bool parseDirectiveIf(SMLoc DirectiveLoc); // ".if"
+ // ".if", ".ifeq", ".ifge", ".ifgt" , ".ifle", ".iflt" or ".ifne"
+ bool parseDirectiveIf(SMLoc DirectiveLoc, DirectiveKind DirKind);
// ".ifb" or ".ifnb", depending on ExpectBlank.
bool parseDirectiveIfb(SMLoc DirectiveLoc, bool ExpectBlank);
// ".ifc" or ".ifnc", depending on ExpectEqual.
bool parseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual);
+ // ".ifeqs"
+ bool parseDirectiveIfeqs(SMLoc DirectiveLoc);
// ".ifdef" or ".ifndef", depending on expect_defined
bool parseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined);
bool parseDirectiveElseIf(SMLoc DirectiveLoc); // ".elseif"
bool parseDirectiveElse(SMLoc DirectiveLoc); // ".else"
bool parseDirectiveEndIf(SMLoc DirectiveLoc); // .endif
- virtual bool parseEscapedString(std::string &Data);
+ bool parseEscapedString(std::string &Data) override;
const MCExpr *applyModifierToExpr(const MCExpr *E,
MCSymbolRefExpr::VariantKind Variant);
@@ -451,7 +456,7 @@ private:
MCAsmMacro *parseMacroLikeBody(SMLoc DirectiveLoc);
void instantiateMacroLikeBody(MCAsmMacro *M, SMLoc DirectiveLoc,
raw_svector_ostream &OS);
- bool parseDirectiveRept(SMLoc DirectiveLoc); // ".rept"
+ bool parseDirectiveRept(SMLoc DirectiveLoc, StringRef Directive);
bool parseDirectiveIrp(SMLoc DirectiveLoc); // ".irp"
bool parseDirectiveIrpc(SMLoc DirectiveLoc); // ".irpc"
bool parseDirectiveEndr(SMLoc DirectiveLoc); // ".endr"
@@ -463,6 +468,12 @@ private:
// "align"
bool parseDirectiveMSAlign(SMLoc DirectiveLoc, ParseStatementInfo &Info);
+ // "end"
+ bool parseDirectiveEnd(SMLoc DirectiveLoc);
+
+ // ".err" or ".error"
+ bool parseDirectiveError(SMLoc DirectiveLoc, bool WithMessage);
+
void initializeDirectiveKindMap();
};
}
@@ -480,37 +491,39 @@ enum { DEFAULT_ADDRSPACE = 0 };
AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx, MCStreamer &_Out,
const MCAsmInfo &_MAI)
: Lexer(_MAI), Ctx(_Ctx), Out(_Out), MAI(_MAI), SrcMgr(_SM),
- PlatformParser(0), CurBuffer(0), MacrosEnabledFlag(true),
- CppHashLineNumber(0), AssemblerDialect(~0U), IsDarwin(false),
- ParsingInlineAsm(false) {
+ PlatformParser(nullptr), CurBuffer(_SM.getMainFileID()),
+ MacrosEnabledFlag(true), HadError(false), CppHashLineNumber(0),
+ AssemblerDialect(~0U), IsDarwin(false), ParsingInlineAsm(false) {
// Save the old handler.
SavedDiagHandler = SrcMgr.getDiagHandler();
SavedDiagContext = SrcMgr.getDiagContext();
// Set our own handler which calls the saved handler.
SrcMgr.setDiagHandler(DiagHandler, this);
- Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
// Initialize the platform / file format parser.
- //
- // FIXME: This is a hack, we need to (majorly) cleanup how these objects are
- // created.
- if (_MAI.hasMicrosoftFastStdCallMangling()) {
- PlatformParser = createCOFFAsmParser();
- PlatformParser->Initialize(*this);
- } else if (_MAI.hasSubsectionsViaSymbols()) {
- PlatformParser = createDarwinAsmParser();
- PlatformParser->Initialize(*this);
- IsDarwin = true;
- } else {
- PlatformParser = createELFAsmParser();
- PlatformParser->Initialize(*this);
+ switch (_Ctx.getObjectFileInfo()->getObjectFileType()) {
+ case MCObjectFileInfo::IsCOFF:
+ PlatformParser = createCOFFAsmParser();
+ PlatformParser->Initialize(*this);
+ break;
+ case MCObjectFileInfo::IsMachO:
+ PlatformParser = createDarwinAsmParser();
+ PlatformParser->Initialize(*this);
+ IsDarwin = true;
+ break;
+ case MCObjectFileInfo::IsELF:
+ PlatformParser = createELFAsmParser();
+ PlatformParser->Initialize(*this);
+ break;
}
initializeDirectiveKindMap();
}
AsmParser::~AsmParser() {
- assert(ActiveMacros.empty() && "Unexpected active macro instantiation!");
+ assert((HadError || ActiveMacros.empty()) &&
+ "Unexpected active macro instantiation!");
// Destroy any macros.
for (StringMap<MCAsmMacro *>::iterator it = MacroMap.begin(),
@@ -531,6 +544,11 @@ void AsmParser::printMacroInstantiations() {
"while in macro instantiation");
}
+void AsmParser::Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges) {
+ printMessage(L, SourceMgr::DK_Note, Msg, Ranges);
+ printMacroInstantiations();
+}
+
bool AsmParser::Warning(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges) {
if (FatalAssemblerWarnings)
return Error(L, Msg, Ranges);
@@ -548,14 +566,13 @@ bool AsmParser::Error(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges) {
bool AsmParser::enterIncludeFile(const std::string &Filename) {
std::string IncludedFile;
- int NewBuf = SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
- if (NewBuf == -1)
+ unsigned NewBuf =
+ SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
+ if (!NewBuf)
return true;
CurBuffer = NewBuf;
-
- Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
-
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
return false;
}
@@ -564,8 +581,9 @@ bool AsmParser::enterIncludeFile(const std::string &Filename) {
/// returns true on failure.
bool AsmParser::processIncbinFile(const std::string &Filename) {
std::string IncludedFile;
- int NewBuf = SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
- if (NewBuf == -1)
+ unsigned NewBuf =
+ SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
+ if (!NewBuf)
return true;
// Pick up the bytes from the file and emit them.
@@ -573,13 +591,10 @@ bool AsmParser::processIncbinFile(const std::string &Filename) {
return false;
}
-void AsmParser::jumpToLoc(SMLoc Loc, int InBuffer) {
- if (InBuffer != -1) {
- CurBuffer = InBuffer;
- } else {
- CurBuffer = SrcMgr.FindBufferContainingLoc(Loc);
- }
- Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer), Loc.getPointer());
+void AsmParser::jumpToLoc(SMLoc Loc, unsigned InBuffer) {
+ CurBuffer = InBuffer ? InBuffer : SrcMgr.FindBufferContainingLoc(Loc);
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(),
+ Loc.getPointer());
}
const AsmToken &AsmParser::Lex() {
@@ -615,13 +630,14 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
// If we are generating dwarf for assembly source files save the initial text
// section and generate a .file directive.
if (getContext().getGenDwarfForAssembly()) {
- getContext().setGenDwarfSection(getStreamer().getCurrentSection().first);
MCSymbol *SectionStartSym = getContext().CreateTempSymbol();
getStreamer().EmitLabel(SectionStartSym);
- getContext().setGenDwarfSectionStartSym(SectionStartSym);
- getStreamer().EmitDwarfFileDirective(getContext().nextGenDwarfFileNumber(),
- StringRef(),
- getContext().getMainFileName());
+ auto InsertResult = getContext().addGenDwarfSection(
+ getStreamer().getCurrentSection().first);
+ assert(InsertResult.second && ".text section should not have debug info yet");
+ InsertResult.first->second.first = SectionStartSym;
+ getContext().setGenDwarfFileNumber(getStreamer().EmitDwarfFileDirective(
+ 0, StringRef(), getContext().getMainFileName()));
}
// While we have input, parse each statement.
@@ -641,11 +657,15 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
return TokError("unmatched .ifs or .elses");
// Check to see there are no empty DwarfFile slots.
- const SmallVectorImpl<MCDwarfFile *> &MCDwarfFiles =
- getContext().getMCDwarfFiles();
- for (unsigned i = 1; i < MCDwarfFiles.size(); i++) {
- if (!MCDwarfFiles[i])
- TokError("unassigned file number: " + Twine(i) + " for .file directives");
+ const auto &LineTables = getContext().getMCDwarfLineTables();
+ if (!LineTables.empty()) {
+ unsigned Index = 0;
+ for (const auto &File : LineTables.begin()->second.getMCDwarfFiles()) {
+ if (File.Name.empty() && Index != 0)
+ TokError("unassigned file number: " + Twine(Index) +
+ " for .file directives");
+ ++Index;
+ }
}
// Check to see that all assembler local symbols were actually defined.
@@ -682,7 +702,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
void AsmParser::checkForValidSection() {
if (!ParsingInlineAsm && !getStreamer().getCurrentSection().first) {
TokError("expected section directive before assembly directive");
- Out.InitToTextSection();
+ Out.InitSections();
}
}
@@ -784,29 +804,54 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
getContext());
EndLoc = FirstTokenLoc;
return false;
- } else
- return Error(FirstTokenLoc, "invalid token in expression");
- return true;
+ }
+ return Error(FirstTokenLoc, "invalid token in expression");
}
}
+ // Parse symbol variant
+ std::pair<StringRef, StringRef> Split;
+ if (!MAI.useParensForSymbolVariant()) {
+ if (FirstTokenKind == AsmToken::String) {
+ if (Lexer.is(AsmToken::At)) {
+ Lexer.Lex(); // eat @
+ SMLoc AtLoc = getLexer().getLoc();
+ StringRef VName;
+ if (parseIdentifier(VName))
+ return Error(AtLoc, "expected symbol variant after '@'");
+
+ Split = std::make_pair(Identifier, VName);
+ }
+ } else {
+ Split = Identifier.split('@');
+ }
+ } else if (Lexer.is(AsmToken::LParen)) {
+ Lexer.Lex(); // eat (
+ StringRef VName;
+ parseIdentifier(VName);
+ if (Lexer.isNot(AsmToken::RParen)) {
+ return Error(Lexer.getTok().getLoc(),
+ "unexpected token in variant, expected ')'");
+ }
+ Lexer.Lex(); // eat )
+ Split = std::make_pair(Identifier, VName);
+ }
EndLoc = SMLoc::getFromPointer(Identifier.end());
// This is a symbol reference.
StringRef SymbolName = Identifier;
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
- std::pair<StringRef, StringRef> Split = Identifier.split('@');
// Lookup the symbol variant if used.
- if (Split.first.size() != Identifier.size()) {
+ if (Split.second.size()) {
Variant = MCSymbolRefExpr::getVariantKindForName(Split.second);
if (Variant != MCSymbolRefExpr::VK_Invalid) {
SymbolName = Split.first;
- } else if (MAI.doesAllowAtInName()) {
+ } else if (MAI.doesAllowAtInName() && !MAI.useParensForSymbolVariant()) {
Variant = MCSymbolRefExpr::VK_None;
} else {
- Variant = MCSymbolRefExpr::VK_None;
- return TokError("invalid variant '" + Split.second + "'");
+ return Error(SMLoc::getFromPointer(Split.second.begin()),
+ "invalid variant '" + Split.second + "'");
}
}
@@ -826,6 +871,8 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
Res = MCSymbolRefExpr::Create(Sym, Variant, getContext());
return false;
}
+ case AsmToken::BigNum:
+ return TokError("literal value out of range for directive");
case AsmToken::Integer: {
SMLoc Loc = getTok().getLoc();
int64_t IntVal = getTok().getIntVal();
@@ -840,15 +887,13 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
if (Split.first.size() != IDVal.size()) {
Variant = MCSymbolRefExpr::getVariantKindForName(Split.second);
- if (Variant == MCSymbolRefExpr::VK_Invalid) {
- Variant = MCSymbolRefExpr::VK_None;
+ if (Variant == MCSymbolRefExpr::VK_Invalid)
return TokError("invalid variant '" + Split.second + "'");
- }
IDVal = Split.first;
}
if (IDVal == "f" || IDVal == "b") {
MCSymbol *Sym =
- Ctx.GetDirectionalLocalSymbol(IntVal, IDVal == "f" ? 1 : 0);
+ Ctx.GetDirectionalLocalSymbol(IntVal, IDVal == "b");
Res = MCSymbolRefExpr::Create(Sym, Variant, getContext());
if (IDVal == "b" && Sym->isUndefined())
return Error(Loc, "invalid reference to undefined symbol");
@@ -922,7 +967,7 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
switch (E->getKind()) {
case MCExpr::Target:
case MCExpr::Constant:
- return 0;
+ return nullptr;
case MCExpr::SymbolRef: {
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
@@ -940,7 +985,7 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
const MCUnaryExpr *UE = cast<MCUnaryExpr>(E);
const MCExpr *Sub = applyModifierToExpr(UE->getSubExpr(), Variant);
if (!Sub)
- return 0;
+ return nullptr;
return MCUnaryExpr::Create(UE->getOpcode(), Sub, getContext());
}
@@ -950,7 +995,7 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
const MCExpr *RHS = applyModifierToExpr(BE->getRHS(), Variant);
if (!LHS && !RHS)
- return 0;
+ return nullptr;
if (!LHS)
LHS = BE->getLHS();
@@ -976,7 +1021,7 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
///
bool AsmParser::parseExpression(const MCExpr *&Res, SMLoc &EndLoc) {
// Parse the expression.
- Res = 0;
+ Res = nullptr;
if (parsePrimaryExpr(Res, EndLoc) || parseBinOpRHS(1, Res, EndLoc))
return true;
@@ -1013,7 +1058,7 @@ bool AsmParser::parseExpression(const MCExpr *&Res, SMLoc &EndLoc) {
}
bool AsmParser::parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) {
- Res = 0;
+ Res = nullptr;
return parseParenExpr(Res, EndLoc) || parseBinOpRHS(1, Res, EndLoc);
}
@@ -1196,13 +1241,21 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
default:
break;
case DK_IF:
- return parseDirectiveIf(IDLoc);
+ case DK_IFEQ:
+ case DK_IFGE:
+ case DK_IFGT:
+ case DK_IFLE:
+ case DK_IFLT:
+ case DK_IFNE:
+ return parseDirectiveIf(IDLoc, DirKind);
case DK_IFB:
return parseDirectiveIfb(IDLoc, true);
case DK_IFNB:
return parseDirectiveIfb(IDLoc, false);
case DK_IFC:
return parseDirectiveIfc(IDLoc, true);
+ case DK_IFEQS:
+ return parseDirectiveIfeqs(IDLoc);
case DK_IFNC:
return parseDirectiveIfc(IDLoc, false);
case DK_IFDEF:
@@ -1311,7 +1364,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
if (!getTargetParser().ParseDirective(ID))
return false;
- // Next, check the extention directive map to see if any extension has
+ // Next, check the extension directive map to see if any extension has
// registered itself to parse this directive.
std::pair<MCAsmParserExtension *, DirectiveHandler> Handler =
ExtensionDirectiveMap.lookup(IDVal);
@@ -1346,6 +1399,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
case DK_QUAD:
case DK_8BYTE:
return parseDirectiveValue(8);
+ case DK_OCTA:
+ return parseDirectiveOctaValue();
case DK_SINGLE:
case DK_FLOAT:
return parseDirectiveRealValue(APFloat::IEEEsingle);
@@ -1414,7 +1469,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
case DK_CODE16GCC:
return TokError(Twine(IDVal) + " not supported yet");
case DK_REPT:
- return parseDirectiveRept(IDLoc);
+ return parseDirectiveRept(IDLoc, IDVal);
case DK_IRP:
return parseDirectiveIrp(IDLoc);
case DK_IRPC:
@@ -1492,6 +1547,12 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
return parseDirectiveEndMacro(IDVal);
case DK_PURGEM:
return parseDirectivePurgeMacro(IDLoc);
+ case DK_END:
+ return parseDirectiveEnd(IDLoc);
+ case DK_ERR:
+ return parseDirectiveError(IDLoc, false);
+ case DK_ERROR:
+ return parseDirectiveError(IDLoc, true);
}
return Error(IDLoc, "unknown directive");
@@ -1530,26 +1591,21 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
printMessage(IDLoc, SourceMgr::DK_Note, OS.str());
}
- // If we are generating dwarf for assembly source files and the current
- // section is the initial text section then generate a .loc directive for
- // the instruction.
+ // If we are generating dwarf for the current section then generate a .loc
+ // directive for the instruction.
if (!HadError && getContext().getGenDwarfForAssembly() &&
- getContext().getGenDwarfSection() ==
- getStreamer().getCurrentSection().first) {
+ getContext().getGenDwarfSectionSyms().count(
+ getStreamer().getCurrentSection().first)) {
unsigned Line = SrcMgr.FindLineNumber(IDLoc, CurBuffer);
// If we previously parsed a cpp hash file line comment then make sure the
// current Dwarf File is for the CppHashFilename if not then emit the
// Dwarf File table for it and adjust the line number for the .loc.
- const SmallVectorImpl<MCDwarfFile *> &MCDwarfFiles =
- getContext().getMCDwarfFiles();
if (CppHashFilename.size() != 0) {
- if (MCDwarfFiles[getContext().getGenDwarfFileNumber()]->getName() !=
- CppHashFilename)
- getStreamer().EmitDwarfFileDirective(
- getContext().nextGenDwarfFileNumber(), StringRef(),
- CppHashFilename);
+ unsigned FileNumber = getStreamer().EmitDwarfFileDirective(
+ 0, StringRef(), CppHashFilename);
+ getContext().setGenDwarfFileNumber(FileNumber);
// Since SrcMgr.FindLineNumber() is slow and messes up the SourceMgr's
// cache with the different Loc from the call above we save the last
@@ -1575,9 +1631,9 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
// If parsing succeeded, match the instruction.
if (!HadError) {
unsigned ErrorInfo;
- HadError = getTargetParser().MatchAndEmitInstruction(
- IDLoc, Info.Opcode, Info.ParsedOperands, Out, ErrorInfo,
- ParsingInlineAsm);
+ getTargetParser().MatchAndEmitInstruction(IDLoc, Info.Opcode,
+ Info.ParsedOperands, Out,
+ ErrorInfo, ParsingInlineAsm);
}
// Don't skip the rest of the line, the instruction parser is responsible for
@@ -1638,13 +1694,15 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
const SourceMgr &DiagSrcMgr = *Diag.getSourceMgr();
const SMLoc &DiagLoc = Diag.getLoc();
- int DiagBuf = DiagSrcMgr.FindBufferContainingLoc(DiagLoc);
- int CppHashBuf = Parser->SrcMgr.FindBufferContainingLoc(Parser->CppHashLoc);
+ unsigned DiagBuf = DiagSrcMgr.FindBufferContainingLoc(DiagLoc);
+ unsigned CppHashBuf =
+ Parser->SrcMgr.FindBufferContainingLoc(Parser->CppHashLoc);
// Like SourceMgr::printMessage() we need to print the include stack if any
// before printing the message.
- int DiagCurBuffer = DiagSrcMgr.FindBufferContainingLoc(DiagLoc);
- if (!Parser->SavedDiagHandler && DiagCurBuffer > 0) {
+ unsigned DiagCurBuffer = DiagSrcMgr.FindBufferContainingLoc(DiagLoc);
+ if (!Parser->SavedDiagHandler && DiagCurBuffer &&
+ DiagCurBuffer != DiagSrcMgr.getMainFileID()) {
SMLoc ParentIncludeLoc = DiagSrcMgr.getParentIncludeLoc(DiagCurBuffer);
DiagSrcMgr.PrintIncludeStack(ParentIncludeLoc, OS);
}
@@ -1657,7 +1715,7 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
if (Parser->SavedDiagHandler)
Parser->SavedDiagHandler(Diag, Parser->SavedDiagContext);
else
- Diag.print(0, OS);
+ Diag.print(nullptr, OS);
return;
}
@@ -1679,7 +1737,7 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
if (Parser->SavedDiagHandler)
Parser->SavedDiagHandler(NewDiag, Parser->SavedDiagContext);
else
- NewDiag.print(0, OS);
+ NewDiag.print(nullptr, OS);
}
// FIXME: This is mostly duplicated from the function in AsmLexer.cpp. The
@@ -1692,9 +1750,10 @@ static bool isIdentifierChar(char c) {
}
bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
- const MCAsmMacroParameters &Parameters,
- const MCAsmMacroArguments &A, const SMLoc &L) {
+ ArrayRef<MCAsmMacroParameter> Parameters,
+ ArrayRef<MCAsmMacroArgument> A, const SMLoc &L) {
unsigned NParameters = Parameters.size();
+ bool HasVararg = NParameters ? Parameters.back().Vararg : false;
if ((!IsDarwin || NParameters != 0) && NParameters != A.size())
return Error(L, "Wrong number of arguments");
@@ -1765,7 +1824,7 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
StringRef Argument(Begin, I - (Pos + 1));
unsigned Index = 0;
for (; Index < NParameters; ++Index)
- if (Parameters[Index].first == Argument)
+ if (Parameters[Index].Name == Argument)
break;
if (Index == NParameters) {
@@ -1776,13 +1835,16 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
Pos = I;
}
} else {
+ bool VarargParameter = HasVararg && Index == (NParameters - 1);
for (MCAsmMacroArgument::const_iterator it = A[Index].begin(),
ie = A[Index].end();
it != ie; ++it)
- if (it->getKind() == AsmToken::String)
- OS << it->getStringContents();
- else
+ // We expect no quotes around the string's contents when
+ // parsing for varargs.
+ if (it->getKind() != AsmToken::String || VarargParameter)
OS << it->getString();
+ else
+ OS << it->getStringContents();
Pos += 1 + Argument.size();
}
@@ -1830,31 +1892,44 @@ static bool isOperator(AsmToken::TokenKind kind) {
}
}
-bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA,
- AsmToken::TokenKind &ArgumentDelimiter) {
+namespace {
+class AsmLexerSkipSpaceRAII {
+public:
+ AsmLexerSkipSpaceRAII(AsmLexer &Lexer, bool SkipSpace) : Lexer(Lexer) {
+ Lexer.setSkipSpace(SkipSpace);
+ }
+
+ ~AsmLexerSkipSpaceRAII() {
+ Lexer.setSkipSpace(true);
+ }
+
+private:
+ AsmLexer &Lexer;
+};
+}
+
+bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA, bool Vararg) {
+
+ if (Vararg) {
+ if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ StringRef Str = parseStringToEndOfStatement();
+ MA.push_back(AsmToken(AsmToken::String, Str));
+ }
+ return false;
+ }
+
unsigned ParenLevel = 0;
unsigned AddTokens = 0;
- // gas accepts arguments separated by whitespace, except on Darwin
- if (!IsDarwin)
- Lexer.setSkipSpace(false);
+ // Darwin doesn't use spaces to delmit arguments.
+ AsmLexerSkipSpaceRAII ScopedSkipSpace(Lexer, IsDarwin);
for (;;) {
- if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal)) {
- Lexer.setSkipSpace(true);
+ if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal))
return TokError("unexpected token in macro instantiation");
- }
- if (ParenLevel == 0 && Lexer.is(AsmToken::Comma)) {
- // Spaces and commas cannot be mixed to delimit parameters
- if (ArgumentDelimiter == AsmToken::Eof)
- ArgumentDelimiter = AsmToken::Comma;
- else if (ArgumentDelimiter != AsmToken::Comma) {
- Lexer.setSkipSpace(true);
- return TokError("expected ' ' for macro argument separator");
- }
+ if (ParenLevel == 0 && Lexer.is(AsmToken::Comma))
break;
- }
if (Lexer.is(AsmToken::Space)) {
Lex(); // Eat spaces
@@ -1862,8 +1937,7 @@ bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA,
// Spaces can delimit parameters, but could also be part an expression.
// If the token after a space is an operator, add the token and the next
// one into this argument
- if (ArgumentDelimiter == AsmToken::Space ||
- ArgumentDelimiter == AsmToken::Eof) {
+ if (!IsDarwin) {
if (isOperator(Lexer.getKind())) {
// Check to see whether the token is used as an operator,
// or part of an identifier
@@ -1873,9 +1947,6 @@ bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA,
}
if (!AddTokens && ParenLevel == 0) {
- if (ArgumentDelimiter == AsmToken::Eof &&
- !isOperator(Lexer.getKind()))
- ArgumentDelimiter = AsmToken::Space;
break;
}
}
@@ -1899,7 +1970,6 @@ bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA,
Lex();
}
- Lexer.setSkipSpace(true);
if (ParenLevel != 0)
return TokError("unbalanced parentheses in macro argument");
return false;
@@ -1909,51 +1979,107 @@ bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA,
bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
MCAsmMacroArguments &A) {
const unsigned NParameters = M ? M->Parameters.size() : 0;
- // Argument delimiter is initially unknown. It will be set by
- // parseMacroArgument()
- AsmToken::TokenKind ArgumentDelimiter = AsmToken::Eof;
+ bool NamedParametersFound = false;
+ SmallVector<SMLoc, 4> FALocs;
+
+ A.resize(NParameters);
+ FALocs.resize(NParameters);
// Parse two kinds of macro invocations:
// - macros defined without any parameters accept an arbitrary number of them
// - macros defined with parameters accept at most that many of them
+ bool HasVararg = NParameters ? M->Parameters.back().Vararg : false;
for (unsigned Parameter = 0; !NParameters || Parameter < NParameters;
++Parameter) {
- MCAsmMacroArgument MA;
+ SMLoc IDLoc = Lexer.getLoc();
+ MCAsmMacroParameter FA;
+
+ if (Lexer.is(AsmToken::Identifier) && Lexer.peekTok().is(AsmToken::Equal)) {
+ if (parseIdentifier(FA.Name)) {
+ Error(IDLoc, "invalid argument identifier for formal argument");
+ eatToEndOfStatement();
+ return true;
+ }
+
+ if (!Lexer.is(AsmToken::Equal)) {
+ TokError("expected '=' after formal parameter identifier");
+ eatToEndOfStatement();
+ return true;
+ }
+ Lex();
+
+ NamedParametersFound = true;
+ }
+
+ if (NamedParametersFound && FA.Name.empty()) {
+ Error(IDLoc, "cannot mix positional and keyword arguments");
+ eatToEndOfStatement();
+ return true;
+ }
- if (parseMacroArgument(MA, ArgumentDelimiter))
+ bool Vararg = HasVararg && Parameter == (NParameters - 1);
+ if (parseMacroArgument(FA.Value, Vararg))
return true;
- if (!MA.empty() || !NParameters)
- A.push_back(MA);
- else if (NParameters) {
- if (!M->Parameters[Parameter].second.empty())
- A.push_back(M->Parameters[Parameter].second);
+ unsigned PI = Parameter;
+ if (!FA.Name.empty()) {
+ unsigned FAI = 0;
+ for (FAI = 0; FAI < NParameters; ++FAI)
+ if (M->Parameters[FAI].Name == FA.Name)
+ break;
+
+ if (FAI >= NParameters) {
+ assert(M && "expected macro to be defined");
+ Error(IDLoc,
+ "parameter named '" + FA.Name + "' does not exist for macro '" +
+ M->Name + "'");
+ return true;
+ }
+ PI = FAI;
+ }
+
+ if (!FA.Value.empty()) {
+ if (A.size() <= PI)
+ A.resize(PI + 1);
+ A[PI] = FA.Value;
+
+ if (FALocs.size() <= PI)
+ FALocs.resize(PI + 1);
+
+ FALocs[PI] = Lexer.getLoc();
}
// At the end of the statement, fill in remaining arguments that have
// default values. If there aren't any, then the next argument is
// required but missing
if (Lexer.is(AsmToken::EndOfStatement)) {
- if (NParameters && Parameter < NParameters - 1) {
- if (M->Parameters[Parameter + 1].second.empty())
- return TokError("macro argument '" +
- Twine(M->Parameters[Parameter + 1].first) +
- "' is missing");
- else
- continue;
+ bool Failure = false;
+ for (unsigned FAI = 0; FAI < NParameters; ++FAI) {
+ if (A[FAI].empty()) {
+ if (M->Parameters[FAI].Required) {
+ Error(FALocs[FAI].isValid() ? FALocs[FAI] : Lexer.getLoc(),
+ "missing value for required parameter "
+ "'" + M->Parameters[FAI].Name + "' in macro '" + M->Name + "'");
+ Failure = true;
+ }
+
+ if (!M->Parameters[FAI].Value.empty())
+ A[FAI] = M->Parameters[FAI].Value;
+ }
}
- return false;
+ return Failure;
}
if (Lexer.is(AsmToken::Comma))
Lex();
}
- return TokError("Too many arguments");
+
+ return TokError("too many positional arguments");
}
const MCAsmMacro *AsmParser::lookupMacro(StringRef Name) {
StringMap<MCAsmMacro *>::iterator I = MacroMap.find(Name);
- return (I == MacroMap.end()) ? NULL : I->getValue();
+ return (I == MacroMap.end()) ? nullptr : I->getValue();
}
void AsmParser::defineMacro(StringRef Name, const MCAsmMacro &Macro) {
@@ -1978,12 +2104,6 @@ bool AsmParser::handleMacroEntry(const MCAsmMacro *M, SMLoc NameLoc) {
if (parseMacroArguments(M, A))
return true;
- // Remove any trailing empty arguments. Do this after-the-fact as we have
- // to keep empty arguments in the middle of the list or positionality
- // gets off. e.g., "foo 1, , 2" vs. "foo 1, 2,"
- while (!A.empty() && A.back().empty())
- A.pop_back();
-
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
@@ -2008,7 +2128,7 @@ bool AsmParser::handleMacroEntry(const MCAsmMacro *M, SMLoc NameLoc) {
// Jump to the macro instantiation and prime the lexer.
CurBuffer = SrcMgr.AddNewSourceBuffer(MI->Instantiation, SMLoc());
- Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
Lex();
return false;
@@ -2063,12 +2183,6 @@ bool AsmParser::parseAssignment(StringRef Name, bool allow_redef,
if (Lexer.isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in assignment");
- // Error on assignment to '.'.
- if (Name == ".") {
- return Error(EqualLoc, ("assignment to pseudo-symbol '.' is unsupported "
- "(use '.space' or '.org').)"));
- }
-
// Eat the end of statement marker.
Lex();
@@ -2096,11 +2210,15 @@ bool AsmParser::parseAssignment(StringRef Name, bool allow_redef,
// Don't count these checks as uses.
Sym->setUsed(false);
+ } else if (Name == ".") {
+ if (Out.EmitValueToOffset(Value, 0)) {
+ Error(EqualLoc, "expected absolute expression");
+ eatToEndOfStatement();
+ }
+ return false;
} else
Sym = getContext().GetOrCreateSymbol(Name);
- // FIXME: Handle '.'.
-
// Do the assignment.
Out.EmitAssignment(Sym, Value);
if (NoDeadStrip)
@@ -2275,7 +2393,57 @@ bool AsmParser::parseDirectiveValue(unsigned Size) {
return Error(ExprLoc, "literal value out of range for directive");
getStreamer().EmitIntValue(IntValue, Size);
} else
- getStreamer().EmitValue(Value, Size);
+ getStreamer().EmitValue(Value, Size, ExprLoc);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ // FIXME: Improve diagnostic.
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+ }
+ }
+
+ Lex();
+ return false;
+}
+
+/// ParseDirectiveOctaValue
+/// ::= .octa [ hexconstant (, hexconstant)* ]
+bool AsmParser::parseDirectiveOctaValue() {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ checkForValidSection();
+
+ for (;;) {
+ if (Lexer.getKind() == AsmToken::Error)
+ return true;
+ if (Lexer.getKind() != AsmToken::Integer &&
+ Lexer.getKind() != AsmToken::BigNum)
+ return TokError("unknown token in expression");
+
+ SMLoc ExprLoc = getLexer().getLoc();
+ APInt IntValue = getTok().getAPIntVal();
+ Lex();
+
+ uint64_t hi, lo;
+ if (IntValue.isIntN(64)) {
+ hi = 0;
+ lo = IntValue.getZExtValue();
+ } else if (IntValue.isIntN(128)) {
+ // It might actually have more than 128 bits, but the top ones are zero.
+ hi = IntValue.getHiBits(IntValue.getBitWidth() - 64).getZExtValue();
+ lo = IntValue.getLoBits(64).getZExtValue();
+ } else
+ return Error(ExprLoc, "literal value out of range for directive");
+
+ if (MAI.isLittleEndian()) {
+ getStreamer().EmitIntValue(lo, 8);
+ getStreamer().EmitIntValue(hi, 8);
+ } else {
+ getStreamer().EmitIntValue(hi, 8);
+ getStreamer().EmitIntValue(lo, 8);
+ }
if (getLexer().is(AsmToken::EndOfStatement))
break;
@@ -2380,18 +2548,27 @@ bool AsmParser::parseDirectiveZero() {
bool AsmParser::parseDirectiveFill() {
checkForValidSection();
+ SMLoc RepeatLoc = getLexer().getLoc();
int64_t NumValues;
if (parseAbsoluteExpression(NumValues))
return true;
+ if (NumValues < 0) {
+ Warning(RepeatLoc,
+ "'.fill' directive with negative repeat count has no effect");
+ NumValues = 0;
+ }
+
int64_t FillSize = 1;
int64_t FillExpr = 0;
+ SMLoc SizeLoc, ExprLoc;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.fill' directive");
Lex();
+ SizeLoc = getLexer().getLoc();
if (parseAbsoluteExpression(FillSize))
return true;
@@ -2400,6 +2577,7 @@ bool AsmParser::parseDirectiveFill() {
return TokError("unexpected token in '.fill' directive");
Lex();
+ ExprLoc = getLexer().getLoc();
if (parseAbsoluteExpression(FillExpr))
return true;
@@ -2410,11 +2588,25 @@ bool AsmParser::parseDirectiveFill() {
}
}
- if (FillSize != 1 && FillSize != 2 && FillSize != 4 && FillSize != 8)
- return TokError("invalid '.fill' size, expected 1, 2, 4, or 8");
+ if (FillSize < 0) {
+ Warning(SizeLoc, "'.fill' directive with negative size has no effect");
+ NumValues = 0;
+ }
+ if (FillSize > 8) {
+ Warning(SizeLoc, "'.fill' directive with size greater than 8 has been truncated to 8");
+ FillSize = 8;
+ }
- for (uint64_t i = 0, e = NumValues; i != e; ++i)
- getStreamer().EmitIntValue(FillExpr, FillSize);
+ if (!isUInt<32>(FillExpr) && FillSize > 4)
+ Warning(ExprLoc, "'.fill' directive pattern has been truncated to 32-bits");
+
+ int64_t NonZeroFillSize = FillSize > 4 ? 4 : FillSize;
+ FillExpr &= ~0ULL >> (64 - NonZeroFillSize * 8);
+
+ for (uint64_t i = 0, e = NumValues; i != e; ++i) {
+ getStreamer().EmitIntValue(FillExpr, NonZeroFillSize);
+ getStreamer().EmitIntValue(0, FillSize - NonZeroFillSize);
+ }
return false;
}
@@ -2533,7 +2725,9 @@ bool AsmParser::parseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
// Check whether we should use optimal code alignment for this .align
// directive.
- bool UseCodeAlign = getStreamer().getCurrentSection().first->UseCodeAlign();
+ const MCSection *Section = getStreamer().getCurrentSection().first;
+ assert(Section && "must have section to emit alignment");
+ bool UseCodeAlign = Section->UseCodeAlign();
if ((!HasFillExpr || Lexer.getMAI().getTextAlignFillValue() == FillExpr) &&
ValueSize == 1 && UseCodeAlign) {
getStreamer().EmitCodeAlignment(Alignment, MaxBytesToFill);
@@ -2597,7 +2791,8 @@ bool AsmParser::parseDirectiveFile(SMLoc DirectiveLoc) {
"input can't have .file dwarf directives when -g is "
"used to generate dwarf debug info for assembly code");
- if (getStreamer().EmitDwarfFileDirective(FileNumber, Directory, Filename))
+ if (getStreamer().EmitDwarfFileDirective(FileNumber, Directory, Filename) ==
+ 0)
Error(FileNumberLoc, "file number already allocated");
}
@@ -2763,9 +2958,14 @@ bool AsmParser::parseDirectiveCFISections() {
}
/// parseDirectiveCFIStartProc
-/// ::= .cfi_startproc
+/// ::= .cfi_startproc [simple]
bool AsmParser::parseDirectiveCFIStartProc() {
- getStreamer().EmitCFIStartProc();
+ StringRef Simple;
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ if (parseIdentifier(Simple) || Simple != "simple")
+ return TokError("unexpected token in .cfi_startproc directive");
+
+ getStreamer().EmitCFIStartProc(!Simple.empty());
return false;
}
@@ -3058,41 +3258,72 @@ bool AsmParser::parseDirectiveMacrosOnOff(StringRef Directive) {
}
/// parseDirectiveMacro
-/// ::= .macro name [parameters]
+/// ::= .macro name[,] [parameters]
bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
StringRef Name;
if (parseIdentifier(Name))
return TokError("expected identifier in '.macro' directive");
+ if (getLexer().is(AsmToken::Comma))
+ Lex();
+
MCAsmMacroParameters Parameters;
- // Argument delimiter is initially unknown. It will be set by
- // parseMacroArgument()
- AsmToken::TokenKind ArgumentDelimiter = AsmToken::Eof;
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- for (;;) {
- MCAsmMacroParameter Parameter;
- if (parseIdentifier(Parameter.first))
- return TokError("expected identifier in '.macro' directive");
+ while (getLexer().isNot(AsmToken::EndOfStatement)) {
+
+ if (Parameters.size() && Parameters.back().Vararg)
+ return Error(Lexer.getLoc(),
+ "Vararg parameter '" + Parameters.back().Name +
+ "' should be last one in the list of parameters.");
+
+ MCAsmMacroParameter Parameter;
+ if (parseIdentifier(Parameter.Name))
+ return TokError("expected identifier in '.macro' directive");
+
+ if (Lexer.is(AsmToken::Colon)) {
+ Lex(); // consume ':'
+
+ SMLoc QualLoc;
+ StringRef Qualifier;
+
+ QualLoc = Lexer.getLoc();
+ if (parseIdentifier(Qualifier))
+ return Error(QualLoc, "missing parameter qualifier for "
+ "'" + Parameter.Name + "' in macro '" + Name + "'");
+
+ if (Qualifier == "req")
+ Parameter.Required = true;
+ else if (Qualifier == "vararg" && !IsDarwin)
+ Parameter.Vararg = true;
+ else
+ return Error(QualLoc, Qualifier + " is not a valid parameter qualifier "
+ "for '" + Parameter.Name + "' in macro '" + Name + "'");
+ }
- if (getLexer().is(AsmToken::Equal)) {
- Lex();
- if (parseMacroArgument(Parameter.second, ArgumentDelimiter))
- return true;
- }
+ if (getLexer().is(AsmToken::Equal)) {
+ Lex();
- Parameters.push_back(Parameter);
+ SMLoc ParamLoc;
- if (getLexer().is(AsmToken::Comma))
- Lex();
- else if (getLexer().is(AsmToken::EndOfStatement))
- break;
+ ParamLoc = Lexer.getLoc();
+ if (parseMacroArgument(Parameter.Value, /*Vararg=*/false ))
+ return true;
+
+ if (Parameter.Required)
+ Warning(ParamLoc, "pointless default value for required parameter "
+ "'" + Parameter.Name + "' in macro '" + Name + "'");
}
+
+ Parameters.push_back(Parameter);
+
+ if (getLexer().is(AsmToken::Comma))
+ Lex();
}
// Eat the end of statement.
Lex();
AsmToken EndToken, StartToken = getTok();
+ unsigned MacroDepth = 0;
// Lex the macro definition.
for (;;) {
@@ -3101,15 +3332,25 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
return Error(DirectiveLoc, "no matching '.endmacro' in definition");
// Otherwise, check whether we have reach the .endmacro.
- if (getLexer().is(AsmToken::Identifier) &&
- (getTok().getIdentifier() == ".endm" ||
- getTok().getIdentifier() == ".endmacro")) {
- EndToken = getTok();
- Lex();
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '" + EndToken.getIdentifier() +
- "' directive");
- break;
+ if (getLexer().is(AsmToken::Identifier)) {
+ if (getTok().getIdentifier() == ".endm" ||
+ getTok().getIdentifier() == ".endmacro") {
+ if (MacroDepth == 0) { // Outermost macro.
+ EndToken = getTok();
+ Lex();
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '" + EndToken.getIdentifier() +
+ "' directive");
+ break;
+ } else {
+ // Otherwise we just found the end of an inner macro.
+ --MacroDepth;
+ }
+ } else if (getTok().getIdentifier() == ".macro") {
+ // We allow nested macros. Those aren't instantiated until the outermost
+ // macro is expanded so just ignore them for now.
+ ++MacroDepth;
+ }
}
// Otherwise, scan til the end of the statement.
@@ -3132,19 +3373,19 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
///
/// With the support added for named parameters there may be code out there that
/// is transitioning from positional parameters. In versions of gas that did
-/// not support named parameters they would be ignored on the macro defintion.
+/// not support named parameters they would be ignored on the macro definition.
/// But to support both styles of parameters this is not possible so if a macro
-/// defintion has named parameters but does not use them and has what appears
+/// definition has named parameters but does not use them and has what appears
/// to be positional parameters, strings like $1, $2, ... and $n, then issue a
/// warning that the positional parameter found in body which have no effect.
/// Hoping the developer will either remove the named parameters from the macro
-/// definiton so the positional parameters get used if that was what was
+/// definition so the positional parameters get used if that was what was
/// intended or change the macro to use the named parameters. It is possible
/// this warning will trigger when the none of the named parameters are used
/// and the strings like $1 are infact to simply to be passed trough unchanged.
void AsmParser::checkForBadMacro(SMLoc DirectiveLoc, StringRef Name,
StringRef Body,
- MCAsmMacroParameters Parameters) {
+ ArrayRef<MCAsmMacroParameter> Parameters) {
// If this macro is not defined with named parameters the warning we are
// checking for here doesn't apply.
unsigned NParameters = Parameters.size();
@@ -3206,7 +3447,7 @@ void AsmParser::checkForBadMacro(SMLoc DirectiveLoc, StringRef Name,
StringRef Argument(Begin, I - (Pos + 1));
unsigned Index = 0;
for (; Index < NParameters; ++Index)
- if (Parameters[Index].first == Argument)
+ if (Parameters[Index].Name == Argument)
break;
if (Index == NParameters) {
@@ -3569,8 +3810,8 @@ bool AsmParser::parseDirectiveIncbin() {
}
/// parseDirectiveIf
-/// ::= .if expression
-bool AsmParser::parseDirectiveIf(SMLoc DirectiveLoc) {
+/// ::= .if{,eq,ge,gt,le,lt,ne} expression
+bool AsmParser::parseDirectiveIf(SMLoc DirectiveLoc, DirectiveKind DirKind) {
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
if (TheCondState.Ignore) {
@@ -3585,6 +3826,29 @@ bool AsmParser::parseDirectiveIf(SMLoc DirectiveLoc) {
Lex();
+ switch (DirKind) {
+ default:
+ llvm_unreachable("unsupported directive");
+ case DK_IF:
+ case DK_IFNE:
+ break;
+ case DK_IFEQ:
+ ExprValue = ExprValue == 0;
+ break;
+ case DK_IFGE:
+ ExprValue = ExprValue >= 0;
+ break;
+ case DK_IFGT:
+ ExprValue = ExprValue > 0;
+ break;
+ case DK_IFLE:
+ ExprValue = ExprValue <= 0;
+ break;
+ case DK_IFLT:
+ ExprValue = ExprValue < 0;
+ break;
+ }
+
TheCondState.CondMet = ExprValue;
TheCondState.Ignore = !TheCondState.CondMet;
}
@@ -3617,6 +3881,7 @@ bool AsmParser::parseDirectiveIfb(SMLoc DirectiveLoc, bool ExpectBlank) {
/// parseDirectiveIfc
/// ::= .ifc string1, string2
+/// ::= .ifnc string1, string2
bool AsmParser::parseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual) {
TheCondStack.push_back(TheCondState);
TheCondState.TheCond = AsmCond::IfCond;
@@ -3638,13 +3903,50 @@ bool AsmParser::parseDirectiveIfc(SMLoc DirectiveLoc, bool ExpectEqual) {
Lex();
- TheCondState.CondMet = ExpectEqual == (Str1 == Str2);
+ TheCondState.CondMet = ExpectEqual == (Str1.trim() == Str2.trim());
TheCondState.Ignore = !TheCondState.CondMet;
}
return false;
}
+/// parseDirectiveIfeqs
+/// ::= .ifeqs string1, string2
+bool AsmParser::parseDirectiveIfeqs(SMLoc DirectiveLoc) {
+ if (Lexer.isNot(AsmToken::String)) {
+ TokError("expected string parameter for '.ifeqs' directive");
+ eatToEndOfStatement();
+ return true;
+ }
+
+ StringRef String1 = getTok().getStringContents();
+ Lex();
+
+ if (Lexer.isNot(AsmToken::Comma)) {
+ TokError("expected comma after first string for '.ifeqs' directive");
+ eatToEndOfStatement();
+ return true;
+ }
+
+ Lex();
+
+ if (Lexer.isNot(AsmToken::String)) {
+ TokError("expected string parameter for '.ifeqs' directive");
+ eatToEndOfStatement();
+ return true;
+ }
+
+ StringRef String2 = getTok().getStringContents();
+ Lex();
+
+ TheCondStack.push_back(TheCondState);
+ TheCondState.TheCond = AsmCond::IfCond;
+ TheCondState.CondMet = String1 == String2;
+ TheCondState.Ignore = !TheCondState.CondMet;
+
+ return false;
+}
+
/// parseDirectiveIfdef
/// ::= .ifdef symbol
bool AsmParser::parseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined) {
@@ -3663,9 +3965,9 @@ bool AsmParser::parseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined) {
MCSymbol *Sym = getContext().LookupSymbol(Name);
if (expect_defined)
- TheCondState.CondMet = (Sym != NULL && !Sym->isUndefined());
+ TheCondState.CondMet = (Sym && !Sym->isUndefined());
else
- TheCondState.CondMet = (Sym == NULL || Sym->isUndefined());
+ TheCondState.CondMet = (!Sym || Sym->isUndefined());
TheCondState.Ignore = !TheCondState.CondMet;
}
@@ -3727,6 +4029,50 @@ bool AsmParser::parseDirectiveElse(SMLoc DirectiveLoc) {
return false;
}
+/// parseDirectiveEnd
+/// ::= .end
+bool AsmParser::parseDirectiveEnd(SMLoc DirectiveLoc) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.end' directive");
+
+ Lex();
+
+ while (Lexer.isNot(AsmToken::Eof))
+ Lex();
+
+ return false;
+}
+
+/// parseDirectiveError
+/// ::= .err
+/// ::= .error [string]
+bool AsmParser::parseDirectiveError(SMLoc L, bool WithMessage) {
+ if (!TheCondStack.empty()) {
+ if (TheCondStack.back().Ignore) {
+ eatToEndOfStatement();
+ return false;
+ }
+ }
+
+ if (!WithMessage)
+ return Error(L, ".err encountered");
+
+ StringRef Message = ".error directive invoked in source file";
+ if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ if (Lexer.isNot(AsmToken::String)) {
+ TokError(".error argument must be a string");
+ eatToEndOfStatement();
+ return true;
+ }
+
+ Message = getTok().getStringContents();
+ Lex();
+ }
+
+ Error(L, Message);
+ return true;
+}
+
/// parseDirectiveEndIf
/// ::= .endif
bool AsmParser::parseDirectiveEndIf(SMLoc DirectiveLoc) {
@@ -3762,6 +4108,7 @@ void AsmParser::initializeDirectiveKindMap() {
DirectiveKindMap[".4byte"] = DK_4BYTE;
DirectiveKindMap[".quad"] = DK_QUAD;
DirectiveKindMap[".8byte"] = DK_8BYTE;
+ DirectiveKindMap[".octa"] = DK_OCTA;
DirectiveKindMap[".single"] = DK_SINGLE;
DirectiveKindMap[".float"] = DK_FLOAT;
DirectiveKindMap[".double"] = DK_DOUBLE;
@@ -3796,6 +4143,7 @@ void AsmParser::initializeDirectiveKindMap() {
DirectiveKindMap[".code16"] = DK_CODE16;
DirectiveKindMap[".code16gcc"] = DK_CODE16GCC;
DirectiveKindMap[".rept"] = DK_REPT;
+ DirectiveKindMap[".rep"] = DK_REPT;
DirectiveKindMap[".irp"] = DK_IRP;
DirectiveKindMap[".irpc"] = DK_IRPC;
DirectiveKindMap[".endr"] = DK_ENDR;
@@ -3803,15 +4151,23 @@ void AsmParser::initializeDirectiveKindMap() {
DirectiveKindMap[".bundle_lock"] = DK_BUNDLE_LOCK;
DirectiveKindMap[".bundle_unlock"] = DK_BUNDLE_UNLOCK;
DirectiveKindMap[".if"] = DK_IF;
+ DirectiveKindMap[".ifeq"] = DK_IFEQ;
+ DirectiveKindMap[".ifge"] = DK_IFGE;
+ DirectiveKindMap[".ifgt"] = DK_IFGT;
+ DirectiveKindMap[".ifle"] = DK_IFLE;
+ DirectiveKindMap[".iflt"] = DK_IFLT;
+ DirectiveKindMap[".ifne"] = DK_IFNE;
DirectiveKindMap[".ifb"] = DK_IFB;
DirectiveKindMap[".ifnb"] = DK_IFNB;
DirectiveKindMap[".ifc"] = DK_IFC;
+ DirectiveKindMap[".ifeqs"] = DK_IFEQS;
DirectiveKindMap[".ifnc"] = DK_IFNC;
DirectiveKindMap[".ifdef"] = DK_IFDEF;
DirectiveKindMap[".ifndef"] = DK_IFNDEF;
DirectiveKindMap[".ifnotdef"] = DK_IFNOTDEF;
DirectiveKindMap[".elseif"] = DK_ELSEIF;
DirectiveKindMap[".else"] = DK_ELSE;
+ DirectiveKindMap[".end"] = DK_END;
DirectiveKindMap[".endif"] = DK_ENDIF;
DirectiveKindMap[".skip"] = DK_SKIP;
DirectiveKindMap[".space"] = DK_SPACE;
@@ -3847,6 +4203,8 @@ void AsmParser::initializeDirectiveKindMap() {
DirectiveKindMap[".endm"] = DK_ENDM;
DirectiveKindMap[".endmacro"] = DK_ENDMACRO;
DirectiveKindMap[".purgem"] = DK_PURGEM;
+ DirectiveKindMap[".err"] = DK_ERR;
+ DirectiveKindMap[".error"] = DK_ERROR;
}
MCAsmMacro *AsmParser::parseMacroLikeBody(SMLoc DirectiveLoc) {
@@ -3857,7 +4215,7 @@ MCAsmMacro *AsmParser::parseMacroLikeBody(SMLoc DirectiveLoc) {
// Check whether we have reached the end of the file.
if (getLexer().is(AsmToken::Eof)) {
Error(DirectiveLoc, "no matching '.endr' in definition");
- return 0;
+ return nullptr;
}
if (Lexer.is(AsmToken::Identifier) &&
@@ -3872,7 +4230,7 @@ MCAsmMacro *AsmParser::parseMacroLikeBody(SMLoc DirectiveLoc) {
Lex();
if (Lexer.isNot(AsmToken::EndOfStatement)) {
TokError("unexpected token in '.endr' directive");
- return 0;
+ return nullptr;
}
break;
}
@@ -3888,9 +4246,7 @@ MCAsmMacro *AsmParser::parseMacroLikeBody(SMLoc DirectiveLoc) {
StringRef Body = StringRef(BodyStart, BodyEnd - BodyStart);
// We Are Anonymous.
- StringRef Name;
- MCAsmMacroParameters Parameters;
- MacroLikeBodies.push_back(MCAsmMacro(Name, Body, Parameters));
+ MacroLikeBodies.push_back(MCAsmMacro(StringRef(), Body, None));
return &MacroLikeBodies.back();
}
@@ -3909,20 +4265,29 @@ void AsmParser::instantiateMacroLikeBody(MCAsmMacro *M, SMLoc DirectiveLoc,
// Jump to the macro instantiation and prime the lexer.
CurBuffer = SrcMgr.AddNewSourceBuffer(MI->Instantiation, SMLoc());
- Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer());
Lex();
}
-bool AsmParser::parseDirectiveRept(SMLoc DirectiveLoc) {
+/// parseDirectiveRept
+/// ::= .rep | .rept count
+bool AsmParser::parseDirectiveRept(SMLoc DirectiveLoc, StringRef Dir) {
+ const MCExpr *CountExpr;
+ SMLoc CountLoc = getTok().getLoc();
+ if (parseExpression(CountExpr))
+ return true;
+
int64_t Count;
- if (parseAbsoluteExpression(Count))
- return TokError("unexpected token in '.rept' directive");
+ if (!CountExpr->EvaluateAsAbsolute(Count)) {
+ eatToEndOfStatement();
+ return Error(CountLoc, "unexpected token in '" + Dir + "' directive");
+ }
if (Count < 0)
- return TokError("Count is negative");
+ return Error(CountLoc, "Count is negative");
if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.rept' directive");
+ return TokError("unexpected token in '" + Dir + "' directive");
// Eat the end of statement.
Lex();
@@ -3935,11 +4300,9 @@ bool AsmParser::parseDirectiveRept(SMLoc DirectiveLoc) {
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
SmallString<256> Buf;
- MCAsmMacroParameters Parameters;
- MCAsmMacroArguments A;
raw_svector_ostream OS(Buf);
while (Count--) {
- if (expandMacro(OS, M->Body, Parameters, A, getTok().getLoc()))
+ if (expandMacro(OS, M->Body, None, None, getTok().getLoc()))
return true;
}
instantiateMacroLikeBody(M, DirectiveLoc, OS);
@@ -3950,21 +4313,18 @@ bool AsmParser::parseDirectiveRept(SMLoc DirectiveLoc) {
/// parseDirectiveIrp
/// ::= .irp symbol,values
bool AsmParser::parseDirectiveIrp(SMLoc DirectiveLoc) {
- MCAsmMacroParameters Parameters;
MCAsmMacroParameter Parameter;
- if (parseIdentifier(Parameter.first))
+ if (parseIdentifier(Parameter.Name))
return TokError("expected identifier in '.irp' directive");
- Parameters.push_back(Parameter);
-
if (Lexer.isNot(AsmToken::Comma))
return TokError("expected comma in '.irp' directive");
Lex();
MCAsmMacroArguments A;
- if (parseMacroArguments(0, A))
+ if (parseMacroArguments(nullptr, A))
return true;
// Eat the end of statement.
@@ -3981,10 +4341,7 @@ bool AsmParser::parseDirectiveIrp(SMLoc DirectiveLoc) {
raw_svector_ostream OS(Buf);
for (MCAsmMacroArguments::iterator i = A.begin(), e = A.end(); i != e; ++i) {
- MCAsmMacroArguments Args;
- Args.push_back(*i);
-
- if (expandMacro(OS, M->Body, Parameters, Args, getTok().getLoc()))
+ if (expandMacro(OS, M->Body, Parameter, *i, getTok().getLoc()))
return true;
}
@@ -3996,21 +4353,18 @@ bool AsmParser::parseDirectiveIrp(SMLoc DirectiveLoc) {
/// parseDirectiveIrpc
/// ::= .irpc symbol,values
bool AsmParser::parseDirectiveIrpc(SMLoc DirectiveLoc) {
- MCAsmMacroParameters Parameters;
MCAsmMacroParameter Parameter;
- if (parseIdentifier(Parameter.first))
+ if (parseIdentifier(Parameter.Name))
return TokError("expected identifier in '.irpc' directive");
- Parameters.push_back(Parameter);
-
if (Lexer.isNot(AsmToken::Comma))
return TokError("expected comma in '.irpc' directive");
Lex();
MCAsmMacroArguments A;
- if (parseMacroArguments(0, A))
+ if (parseMacroArguments(nullptr, A))
return true;
if (A.size() != 1 || A.front().size() != 1)
@@ -4030,15 +4384,11 @@ bool AsmParser::parseDirectiveIrpc(SMLoc DirectiveLoc) {
raw_svector_ostream OS(Buf);
StringRef Values = A.front().front().getString();
- std::size_t I, End = Values.size();
- for (I = 0; I < End; ++I) {
+ for (std::size_t I = 0, End = Values.size(); I != End; ++I) {
MCAsmMacroArgument Arg;
Arg.push_back(AsmToken(AsmToken::Identifier, Values.slice(I, I + 1)));
- MCAsmMacroArguments Args;
- Args.push_back(Arg);
-
- if (expandMacro(OS, M->Body, Parameters, Args, getTok().getLoc()))
+ if (expandMacro(OS, M->Body, Parameter, Arg, getTok().getLoc()))
return true;
}
@@ -4153,27 +4503,28 @@ bool AsmParser::parseMSInlineAsm(
// Build the list of clobbers, outputs and inputs.
for (unsigned i = 1, e = Info.ParsedOperands.size(); i != e; ++i) {
- MCParsedAsmOperand *Operand = Info.ParsedOperands[i];
+ MCParsedAsmOperand &Operand = *Info.ParsedOperands[i];
// Immediate.
- if (Operand->isImm())
+ if (Operand.isImm())
continue;
// Register operand.
- if (Operand->isReg() && !Operand->needAddressOf()) {
+ if (Operand.isReg() && !Operand.needAddressOf() &&
+ !getTargetParser().OmitRegisterFromClobberLists(Operand.getReg())) {
unsigned NumDefs = Desc.getNumDefs();
// Clobber.
- if (NumDefs && Operand->getMCOperandNum() < NumDefs)
- ClobberRegs.push_back(Operand->getReg());
+ if (NumDefs && Operand.getMCOperandNum() < NumDefs)
+ ClobberRegs.push_back(Operand.getReg());
continue;
}
// Expr/Input or Output.
- StringRef SymName = Operand->getSymName();
+ StringRef SymName = Operand.getSymName();
if (SymName.empty())
continue;
- void *OpDecl = Operand->getOpDecl();
+ void *OpDecl = Operand.getOpDecl();
if (!OpDecl)
continue;
@@ -4182,21 +4533,21 @@ bool AsmParser::parseMSInlineAsm(
if (isOutput) {
++InputIdx;
OutputDecls.push_back(OpDecl);
- OutputDeclsAddressOf.push_back(Operand->needAddressOf());
- OutputConstraints.push_back('=' + Operand->getConstraint().str());
+ OutputDeclsAddressOf.push_back(Operand.needAddressOf());
+ OutputConstraints.push_back('=' + Operand.getConstraint().str());
AsmStrRewrites.push_back(AsmRewrite(AOK_Output, Start, SymName.size()));
} else {
InputDecls.push_back(OpDecl);
- InputDeclsAddressOf.push_back(Operand->needAddressOf());
- InputConstraints.push_back(Operand->getConstraint().str());
+ InputDeclsAddressOf.push_back(Operand.needAddressOf());
+ InputConstraints.push_back(Operand.getConstraint().str());
AsmStrRewrites.push_back(AsmRewrite(AOK_Input, Start, SymName.size()));
}
}
// Consider implicit defs to be clobbers. Think of cpuid and push.
- const uint16_t *ImpDefs = Desc.getImplicitDefs();
- for (unsigned I = 0, E = Desc.getNumImplicitDefs(); I != E; ++I)
- ClobberRegs.push_back(ImpDefs[I]);
+ ArrayRef<uint16_t> ImpDefs(Desc.getImplicitDefs(),
+ Desc.getNumImplicitDefs());
+ ClobberRegs.insert(ClobberRegs.end(), ImpDefs.begin(), ImpDefs.end());
}
// Set the number of Outputs and Inputs.
@@ -4231,27 +4582,26 @@ bool AsmParser::parseMSInlineAsm(
// Build the IR assembly string.
std::string AsmStringIR;
raw_string_ostream OS(AsmStringIR);
- const char *AsmStart = SrcMgr.getMemoryBuffer(0)->getBufferStart();
- const char *AsmEnd = SrcMgr.getMemoryBuffer(0)->getBufferEnd();
+ StringRef ASMString =
+ SrcMgr.getMemoryBuffer(SrcMgr.getMainFileID())->getBuffer();
+ const char *AsmStart = ASMString.begin();
+ const char *AsmEnd = ASMString.end();
array_pod_sort(AsmStrRewrites.begin(), AsmStrRewrites.end(), rewritesSort);
- for (SmallVectorImpl<AsmRewrite>::iterator I = AsmStrRewrites.begin(),
- E = AsmStrRewrites.end();
- I != E; ++I) {
- AsmRewriteKind Kind = (*I).Kind;
+ for (const AsmRewrite &AR : AsmStrRewrites) {
+ AsmRewriteKind Kind = AR.Kind;
if (Kind == AOK_Delete)
continue;
- const char *Loc = (*I).Loc.getPointer();
+ const char *Loc = AR.Loc.getPointer();
assert(Loc >= AsmStart && "Expected Loc to be at or after Start!");
// Emit everything up to the immediate/expression.
- unsigned Len = Loc - AsmStart;
- if (Len)
+ if (unsigned Len = Loc - AsmStart)
OS << StringRef(AsmStart, Len);
// Skip the original expression.
if (Kind == AOK_Skip) {
- AsmStart = Loc + (*I).Len;
+ AsmStart = Loc + AR.Len;
continue;
}
@@ -4261,7 +4611,7 @@ bool AsmParser::parseMSInlineAsm(
default:
break;
case AOK_Imm:
- OS << "$$" << (*I).Val;
+ OS << "$$" << AR.Val;
break;
case AOK_ImmPrefix:
OS << "$$";
@@ -4273,7 +4623,7 @@ bool AsmParser::parseMSInlineAsm(
OS << '$' << OutputIdx++;
break;
case AOK_SizeDirective:
- switch ((*I).Val) {
+ switch (AR.Val) {
default: break;
case 8: OS << "byte ptr "; break;
case 16: OS << "word ptr "; break;
@@ -4288,7 +4638,7 @@ bool AsmParser::parseMSInlineAsm(
OS << ".byte";
break;
case AOK_Align: {
- unsigned Val = (*I).Val;
+ unsigned Val = AR.Val;
OS << ".align " << Val;
// Skip the original immediate.
@@ -4299,14 +4649,14 @@ bool AsmParser::parseMSInlineAsm(
case AOK_DotOperator:
// Insert the dot if the user omitted it.
OS.flush();
- if (AsmStringIR.at(AsmStringIR.size() - 1) != '.')
+ if (AsmStringIR.back() != '.')
OS << '.';
- OS << (*I).Val;
+ OS << AR.Val;
break;
}
// Skip the original expression.
- AsmStart = Loc + (*I).Len + AdditionalSkip;
+ AsmStart = Loc + AR.Len + AdditionalSkip;
}
// Emit the remainder of the asm string.
diff --git a/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
index d8343a3..5ecf9e5 100644
--- a/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
@@ -13,6 +13,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSectionCOFF.h"
@@ -37,12 +38,12 @@ class COFFAsmParser : public MCAsmParserExtension {
bool ParseSectionSwitch(StringRef Section, unsigned Characteristics,
SectionKind Kind, StringRef COMDATSymName,
- COFF::COMDATType Type, const MCSectionCOFF *Assoc);
+ COFF::COMDATType Type);
bool ParseSectionName(StringRef &SectionName);
bool ParseSectionFlags(StringRef FlagsString, unsigned* Flags);
- virtual void Initialize(MCAsmParser &Parser) {
+ void Initialize(MCAsmParser &Parser) override {
// Call the base implementation.
MCAsmParserExtension::Initialize(Parser);
@@ -55,6 +56,7 @@ class COFFAsmParser : public MCAsmParserExtension {
addDirectiveHandler<&COFFAsmParser::ParseDirectiveType>(".type");
addDirectiveHandler<&COFFAsmParser::ParseDirectiveEndef>(".endef");
addDirectiveHandler<&COFFAsmParser::ParseDirectiveSecRel32>(".secrel32");
+ addDirectiveHandler<&COFFAsmParser::ParseDirectiveSecIdx>(".secidx");
addDirectiveHandler<&COFFAsmParser::ParseDirectiveLinkOnce>(".linkonce");
// Win64 EH directives.
@@ -115,8 +117,8 @@ class COFFAsmParser : public MCAsmParserExtension {
bool ParseDirectiveType(StringRef, SMLoc);
bool ParseDirectiveEndef(StringRef, SMLoc);
bool ParseDirectiveSecRel32(StringRef, SMLoc);
- bool parseCOMDATTypeAndAssoc(COFF::COMDATType &Type,
- const MCSectionCOFF *&Assoc);
+ bool ParseDirectiveSecIdx(StringRef, SMLoc);
+ bool parseCOMDATType(COFF::COMDATType &Type);
bool ParseDirectiveLinkOnce(StringRef, SMLoc);
// Win64 EH directives.
@@ -168,8 +170,8 @@ bool COFFAsmParser::ParseSectionFlags(StringRef FlagsString, unsigned* Flags) {
bool ReadOnlyRemoved = false;
unsigned SecFlags = None;
- for (unsigned i = 0; i < FlagsString.size(); ++i) {
- switch (FlagsString[i]) {
+ for (char FlagChar : FlagsString) {
+ switch (FlagChar) {
case 'a':
// Ignored.
break;
@@ -290,22 +292,20 @@ bool COFFAsmParser::ParseDirectiveSymbolAttribute(StringRef Directive, SMLoc) {
bool COFFAsmParser::ParseSectionSwitch(StringRef Section,
unsigned Characteristics,
SectionKind Kind) {
- return ParseSectionSwitch(Section, Characteristics, Kind, "",
- COFF::IMAGE_COMDAT_SELECT_ANY, 0);
+ return ParseSectionSwitch(Section, Characteristics, Kind, "", (COFF::COMDATType)0);
}
bool COFFAsmParser::ParseSectionSwitch(StringRef Section,
unsigned Characteristics,
SectionKind Kind,
StringRef COMDATSymName,
- COFF::COMDATType Type,
- const MCSectionCOFF *Assoc) {
+ COFF::COMDATType Type) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in section switching directive");
Lex();
getStreamer().SwitchSection(getContext().getCOFFSection(
- Section, Characteristics, Kind, COMDATSymName, Type, Assoc));
+ Section, Characteristics, Kind, COMDATSymName, Type));
return false;
}
@@ -356,15 +356,15 @@ bool COFFAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
return true;
}
- COFF::COMDATType Type = COFF::IMAGE_COMDAT_SELECT_ANY;
- const MCSectionCOFF *Assoc = 0;
+ COFF::COMDATType Type = (COFF::COMDATType)0;
StringRef COMDATSymName;
if (getLexer().is(AsmToken::Comma)) {
+ Type = COFF::IMAGE_COMDAT_SELECT_ANY;;
Lex();
Flags |= COFF::IMAGE_SCN_LNK_COMDAT;
- if (parseCOMDATTypeAndAssoc(Type, Assoc))
+ if (parseCOMDATType(Type))
return true;
if (getLexer().isNot(AsmToken::Comma))
@@ -379,7 +379,12 @@ bool COFFAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
return TokError("unexpected token in directive");
SectionKind Kind = computeSectionKind(Flags);
- ParseSectionSwitch(SectionName, Flags, Kind, COMDATSymName, Type, Assoc);
+ if (Kind.isText()) {
+ const Triple &T = getContext().getObjectFileInfo()->getTargetTriple();
+ if (T.getArch() == Triple::arm || T.getArch() == Triple::thumb)
+ Flags |= COFF::IMAGE_SCN_MEM_16BIT;
+ }
+ ParseSectionSwitch(SectionName, Flags, Kind, COMDATSymName, Type);
return false;
}
@@ -432,7 +437,7 @@ bool COFFAsmParser::ParseDirectiveEndef(StringRef, SMLoc) {
bool COFFAsmParser::ParseDirectiveSecRel32(StringRef, SMLoc) {
StringRef SymbolID;
if (getParser().parseIdentifier(SymbolID))
- return true;
+ return TokError("expected identifier in directive");
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in directive");
@@ -444,9 +449,23 @@ bool COFFAsmParser::ParseDirectiveSecRel32(StringRef, SMLoc) {
return false;
}
-/// ::= [ identifier [ identifier ] ]
-bool COFFAsmParser::parseCOMDATTypeAndAssoc(COFF::COMDATType &Type,
- const MCSectionCOFF *&Assoc) {
+bool COFFAsmParser::ParseDirectiveSecIdx(StringRef, SMLoc) {
+ StringRef SymbolID;
+ if (getParser().parseIdentifier(SymbolID))
+ return TokError("expected identifier in directive");
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in directive");
+
+ MCSymbol *Symbol = getContext().GetOrCreateSymbol(SymbolID);
+
+ Lex();
+ getStreamer().EmitCOFFSectionIndex(Symbol);
+ return false;
+}
+
+/// ::= [ identifier ]
+bool COFFAsmParser::parseCOMDATType(COFF::COMDATType &Type) {
StringRef TypeId = getTok().getIdentifier();
Type = StringSwitch<COFF::COMDATType>(TypeId)
@@ -464,48 +483,28 @@ bool COFFAsmParser::parseCOMDATTypeAndAssoc(COFF::COMDATType &Type,
Lex();
- if (Type == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
- SMLoc Loc = getTok().getLoc();
- StringRef AssocName;
- if (ParseSectionName(AssocName))
- return TokError("expected associated section name");
-
- Assoc = static_cast<const MCSectionCOFF*>(
- getContext().getCOFFSection(AssocName));
- if (!Assoc)
- return Error(Loc, "cannot associate unknown section '" + AssocName + "'");
- if (!(Assoc->getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT))
- return Error(Loc, "associated section must be a COMDAT section");
- if (Assoc->getSelection() == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE)
- return Error(Loc, "associated section cannot be itself associative");
- }
-
return false;
}
/// ParseDirectiveLinkOnce
-/// ::= .linkonce [ identifier [ identifier ] ]
+/// ::= .linkonce [ identifier ]
bool COFFAsmParser::ParseDirectiveLinkOnce(StringRef, SMLoc Loc) {
COFF::COMDATType Type = COFF::IMAGE_COMDAT_SELECT_ANY;
- const MCSectionCOFF *Assoc = 0;
if (getLexer().is(AsmToken::Identifier))
- if (parseCOMDATTypeAndAssoc(Type, Assoc))
+ if (parseCOMDATType(Type))
return true;
const MCSectionCOFF *Current = static_cast<const MCSectionCOFF*>(
getStreamer().getCurrentSection().first);
-
- if (Type == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
- if (Assoc == Current)
- return Error(Loc, "cannot associate a section with itself");
- }
+ if (Type == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE)
+ return Error(Loc, "cannot make section associative with .linkonce");
if (Current->getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT)
return Error(Loc, Twine("section '") + Current->getSectionName() +
"' is already linkonce");
- Current->setSelection(Type, Assoc);
+ Current->setSelection(Type);
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in directive");
@@ -524,25 +523,25 @@ bool COFFAsmParser::ParseSEHDirectiveStartProc(StringRef, SMLoc) {
MCSymbol *Symbol = getContext().GetOrCreateSymbol(SymbolID);
Lex();
- getStreamer().EmitWin64EHStartProc(Symbol);
+ getStreamer().EmitWinCFIStartProc(Symbol);
return false;
}
bool COFFAsmParser::ParseSEHDirectiveEndProc(StringRef, SMLoc) {
Lex();
- getStreamer().EmitWin64EHEndProc();
+ getStreamer().EmitWinCFIEndProc();
return false;
}
bool COFFAsmParser::ParseSEHDirectiveStartChained(StringRef, SMLoc) {
Lex();
- getStreamer().EmitWin64EHStartChained();
+ getStreamer().EmitWinCFIStartChained();
return false;
}
bool COFFAsmParser::ParseSEHDirectiveEndChained(StringRef, SMLoc) {
Lex();
- getStreamer().EmitWin64EHEndChained();
+ getStreamer().EmitWinCFIEndChained();
return false;
}
@@ -568,13 +567,13 @@ bool COFFAsmParser::ParseSEHDirectiveHandler(StringRef, SMLoc) {
MCSymbol *handler = getContext().GetOrCreateSymbol(SymbolID);
Lex();
- getStreamer().EmitWin64EHHandler(handler, unwind, except);
+ getStreamer().EmitWinEHHandler(handler, unwind, except);
return false;
}
bool COFFAsmParser::ParseSEHDirectiveHandlerData(StringRef, SMLoc) {
Lex();
- getStreamer().EmitWin64EHHandlerData();
+ getStreamer().EmitWinEHHandlerData();
return false;
}
@@ -587,7 +586,7 @@ bool COFFAsmParser::ParseSEHDirectivePushReg(StringRef, SMLoc L) {
return TokError("unexpected token in directive");
Lex();
- getStreamer().EmitWin64EHPushReg(Reg);
+ getStreamer().EmitWinCFIPushReg(Reg);
return false;
}
@@ -611,7 +610,7 @@ bool COFFAsmParser::ParseSEHDirectiveSetFrame(StringRef, SMLoc L) {
return TokError("unexpected token in directive");
Lex();
- getStreamer().EmitWin64EHSetFrame(Reg, Off);
+ getStreamer().EmitWinCFISetFrame(Reg, Off);
return false;
}
@@ -628,7 +627,7 @@ bool COFFAsmParser::ParseSEHDirectiveAllocStack(StringRef, SMLoc) {
return TokError("unexpected token in directive");
Lex();
- getStreamer().EmitWin64EHAllocStack(Size);
+ getStreamer().EmitWinCFIAllocStack(Size);
return false;
}
@@ -653,7 +652,7 @@ bool COFFAsmParser::ParseSEHDirectiveSaveReg(StringRef, SMLoc L) {
Lex();
// FIXME: Err on %xmm* registers
- getStreamer().EmitWin64EHSaveReg(Reg, Off);
+ getStreamer().EmitWinCFISaveReg(Reg, Off);
return false;
}
@@ -680,7 +679,7 @@ bool COFFAsmParser::ParseSEHDirectiveSaveXMM(StringRef, SMLoc L) {
Lex();
// FIXME: Err on non-%xmm* registers
- getStreamer().EmitWin64EHSaveXMM(Reg, Off);
+ getStreamer().EmitWinCFISaveXMM(Reg, Off);
return false;
}
@@ -701,13 +700,13 @@ bool COFFAsmParser::ParseSEHDirectivePushFrame(StringRef, SMLoc) {
return TokError("unexpected token in directive");
Lex();
- getStreamer().EmitWin64EHPushFrame(Code);
+ getStreamer().EmitWinCFIPushFrame(Code);
return false;
}
bool COFFAsmParser::ParseSEHDirectiveEndProlog(StringRef, SMLoc) {
Lex();
- getStreamer().EmitWin64EHEndProlog();
+ getStreamer().EmitWinCFIEndProlog();
return false;
}
diff --git a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
index 4c9bafa..b2a6785 100644
--- a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
@@ -17,6 +17,7 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
using namespace llvm;
@@ -33,339 +34,343 @@ class DarwinAsmParser : public MCAsmParserExtension {
getParser().addDirectiveHandler(Directive, Handler);
}
- bool ParseSectionSwitch(const char *Segment, const char *Section,
+ bool parseSectionSwitch(const char *Segment, const char *Section,
unsigned TAA = 0, unsigned ImplicitAlign = 0,
unsigned StubSize = 0);
public:
DarwinAsmParser() {}
- virtual void Initialize(MCAsmParser &Parser) {
+ void Initialize(MCAsmParser &Parser) override {
// Call the base implementation.
this->MCAsmParserExtension::Initialize(Parser);
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveDesc>(".desc");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveIndirectSymbol>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveDesc>(".desc");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveIndirectSymbol>(
".indirect_symbol");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveLsym>(".lsym");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveSubsectionsViaSymbols>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveLsym>(".lsym");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveSubsectionsViaSymbols>(
".subsections_via_symbols");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveDumpOrLoad>(".dump");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveDumpOrLoad>(".load");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveSection>(".section");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectivePushSection>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveDumpOrLoad>(".dump");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveDumpOrLoad>(".load");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveSection>(".section");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectivePushSection>(
".pushsection");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectivePopSection>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectivePopSection>(
".popsection");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectivePrevious>(".previous");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveSecureLogUnique>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectivePrevious>(".previous");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveSecureLogUnique>(
".secure_log_unique");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveSecureLogReset>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveSecureLogReset>(
".secure_log_reset");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveTBSS>(".tbss");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveZerofill>(".zerofill");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveTBSS>(".tbss");
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveZerofill>(".zerofill");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveDataRegion>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveDataRegion>(
".data_region");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveDataRegionEnd>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveDataRegionEnd>(
".end_data_region");
// Special section directives.
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveBss>(".bss");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveConst>(".const");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveConstData>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveBss>(".bss");
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveConst>(".const");
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveConstData>(
".const_data");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveConstructor>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveConstructor>(
".constructor");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveCString>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveCString>(
".cstring");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveData>(".data");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveDestructor>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveData>(".data");
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveDestructor>(
".destructor");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveDyld>(".dyld");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveFVMLibInit0>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveDyld>(".dyld");
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveFVMLibInit0>(
".fvmlib_init0");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveFVMLibInit1>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveFVMLibInit1>(
".fvmlib_init1");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveLazySymbolPointers>(
+ &DarwinAsmParser::parseSectionDirectiveLazySymbolPointers>(
".lazy_symbol_pointer");
- addDirectiveHandler<&DarwinAsmParser::ParseDirectiveLinkerOption>(
+ addDirectiveHandler<&DarwinAsmParser::parseDirectiveLinkerOption>(
".linker_option");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveLiteral16>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveLiteral16>(
".literal16");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveLiteral4>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveLiteral4>(
".literal4");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveLiteral8>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveLiteral8>(
".literal8");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveModInitFunc>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveModInitFunc>(
".mod_init_func");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveModTermFunc>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveModTermFunc>(
".mod_term_func");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveNonLazySymbolPointers>(
+ &DarwinAsmParser::parseSectionDirectiveNonLazySymbolPointers>(
".non_lazy_symbol_pointer");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCCatClsMeth>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCCatClsMeth>(
".objc_cat_cls_meth");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCCatInstMeth>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCCatInstMeth>(
".objc_cat_inst_meth");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCCategory>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCCategory>(
".objc_category");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCClass>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCClass>(
".objc_class");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCClassNames>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCClassNames>(
".objc_class_names");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCClassVars>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCClassVars>(
".objc_class_vars");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCClsMeth>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCClsMeth>(
".objc_cls_meth");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCClsRefs>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCClsRefs>(
".objc_cls_refs");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCInstMeth>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCInstMeth>(
".objc_inst_meth");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveObjCInstanceVars>(
+ &DarwinAsmParser::parseSectionDirectiveObjCInstanceVars>(
".objc_instance_vars");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCMessageRefs>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCMessageRefs>(
".objc_message_refs");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCMetaClass>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCMetaClass>(
".objc_meta_class");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveObjCMethVarNames>(
+ &DarwinAsmParser::parseSectionDirectiveObjCMethVarNames>(
".objc_meth_var_names");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveObjCMethVarTypes>(
+ &DarwinAsmParser::parseSectionDirectiveObjCMethVarTypes>(
".objc_meth_var_types");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCModuleInfo>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCModuleInfo>(
".objc_module_info");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCProtocol>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCProtocol>(
".objc_protocol");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveObjCSelectorStrs>(
+ &DarwinAsmParser::parseSectionDirectiveObjCSelectorStrs>(
".objc_selector_strs");
addDirectiveHandler<
- &DarwinAsmParser::ParseSectionDirectiveObjCStringObject>(
+ &DarwinAsmParser::parseSectionDirectiveObjCStringObject>(
".objc_string_object");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveObjCSymbols>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveObjCSymbols>(
".objc_symbols");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectivePICSymbolStub>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectivePICSymbolStub>(
".picsymbol_stub");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveStaticConst>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveStaticConst>(
".static_const");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveStaticData>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveStaticData>(
".static_data");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveSymbolStub>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveSymbolStub>(
".symbol_stub");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveTData>(".tdata");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveText>(".text");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveThreadInitFunc>(
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveTData>(".tdata");
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveText>(".text");
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveThreadInitFunc>(
".thread_init_func");
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveTLV>(".tlv");
-
- addDirectiveHandler<&DarwinAsmParser::ParseSectionDirectiveIdent>(".ident");
- }
-
- bool ParseDirectiveDesc(StringRef, SMLoc);
- bool ParseDirectiveIndirectSymbol(StringRef, SMLoc);
- bool ParseDirectiveDumpOrLoad(StringRef, SMLoc);
- bool ParseDirectiveLsym(StringRef, SMLoc);
- bool ParseDirectiveLinkerOption(StringRef, SMLoc);
- bool ParseDirectiveSection(StringRef, SMLoc);
- bool ParseDirectivePushSection(StringRef, SMLoc);
- bool ParseDirectivePopSection(StringRef, SMLoc);
- bool ParseDirectivePrevious(StringRef, SMLoc);
- bool ParseDirectiveSecureLogReset(StringRef, SMLoc);
- bool ParseDirectiveSecureLogUnique(StringRef, SMLoc);
- bool ParseDirectiveSubsectionsViaSymbols(StringRef, SMLoc);
- bool ParseDirectiveTBSS(StringRef, SMLoc);
- bool ParseDirectiveZerofill(StringRef, SMLoc);
- bool ParseDirectiveDataRegion(StringRef, SMLoc);
- bool ParseDirectiveDataRegionEnd(StringRef, SMLoc);
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveTLV>(".tlv");
+
+ addDirectiveHandler<&DarwinAsmParser::parseSectionDirectiveIdent>(".ident");
+ addDirectiveHandler<&DarwinAsmParser::parseVersionMin>(".ios_version_min");
+ addDirectiveHandler<&DarwinAsmParser::parseVersionMin>(
+ ".macosx_version_min");
+ }
+
+ bool parseDirectiveDesc(StringRef, SMLoc);
+ bool parseDirectiveIndirectSymbol(StringRef, SMLoc);
+ bool parseDirectiveDumpOrLoad(StringRef, SMLoc);
+ bool parseDirectiveLsym(StringRef, SMLoc);
+ bool parseDirectiveLinkerOption(StringRef, SMLoc);
+ bool parseDirectiveSection(StringRef, SMLoc);
+ bool parseDirectivePushSection(StringRef, SMLoc);
+ bool parseDirectivePopSection(StringRef, SMLoc);
+ bool parseDirectivePrevious(StringRef, SMLoc);
+ bool parseDirectiveSecureLogReset(StringRef, SMLoc);
+ bool parseDirectiveSecureLogUnique(StringRef, SMLoc);
+ bool parseDirectiveSubsectionsViaSymbols(StringRef, SMLoc);
+ bool parseDirectiveTBSS(StringRef, SMLoc);
+ bool parseDirectiveZerofill(StringRef, SMLoc);
+ bool parseDirectiveDataRegion(StringRef, SMLoc);
+ bool parseDirectiveDataRegionEnd(StringRef, SMLoc);
// Named Section Directive
- bool ParseSectionDirectiveBss(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__bss");
+ bool parseSectionDirectiveBss(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__bss");
}
- bool ParseSectionDirectiveConst(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__const");
+ bool parseSectionDirectiveConst(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__const");
}
- bool ParseSectionDirectiveStaticConst(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__static_const");
+ bool parseSectionDirectiveStaticConst(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__static_const");
}
- bool ParseSectionDirectiveCString(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
+ bool parseSectionDirectiveCString(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__cstring",
+ MachO::S_CSTRING_LITERALS);
}
- bool ParseSectionDirectiveLiteral4(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__literal4",
- MCSectionMachO::S_4BYTE_LITERALS, 4);
+ bool parseSectionDirectiveLiteral4(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__literal4",
+ MachO::S_4BYTE_LITERALS, 4);
}
- bool ParseSectionDirectiveLiteral8(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__literal8",
- MCSectionMachO::S_8BYTE_LITERALS, 8);
+ bool parseSectionDirectiveLiteral8(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__literal8",
+ MachO::S_8BYTE_LITERALS, 8);
}
- bool ParseSectionDirectiveLiteral16(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__literal16",
- MCSectionMachO::S_16BYTE_LITERALS, 16);
+ bool parseSectionDirectiveLiteral16(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__literal16",
+ MachO::S_16BYTE_LITERALS, 16);
}
- bool ParseSectionDirectiveConstructor(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__constructor");
+ bool parseSectionDirectiveConstructor(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__constructor");
}
- bool ParseSectionDirectiveDestructor(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__destructor");
+ bool parseSectionDirectiveDestructor(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__destructor");
}
- bool ParseSectionDirectiveFVMLibInit0(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__fvmlib_init0");
+ bool parseSectionDirectiveFVMLibInit0(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__fvmlib_init0");
}
- bool ParseSectionDirectiveFVMLibInit1(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__fvmlib_init1");
+ bool parseSectionDirectiveFVMLibInit1(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__fvmlib_init1");
}
- bool ParseSectionDirectiveSymbolStub(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__symbol_stub",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ bool parseSectionDirectiveSymbolStub(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__symbol_stub",
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
// FIXME: Different on PPC and ARM.
0, 16);
}
- bool ParseSectionDirectivePICSymbolStub(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT","__picsymbol_stub",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS, 0, 26);
+ bool parseSectionDirectivePICSymbolStub(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT","__picsymbol_stub",
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_PURE_INSTRUCTIONS, 0, 26);
}
- bool ParseSectionDirectiveData(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__data");
+ bool parseSectionDirectiveData(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__data");
}
- bool ParseSectionDirectiveStaticData(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__static_data");
+ bool parseSectionDirectiveStaticData(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__static_data");
}
- bool ParseSectionDirectiveNonLazySymbolPointers(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__nl_symbol_ptr",
- MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS, 4);
+ bool parseSectionDirectiveNonLazySymbolPointers(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__nl_symbol_ptr",
+ MachO::S_NON_LAZY_SYMBOL_POINTERS, 4);
}
- bool ParseSectionDirectiveLazySymbolPointers(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__la_symbol_ptr",
- MCSectionMachO::S_LAZY_SYMBOL_POINTERS, 4);
+ bool parseSectionDirectiveLazySymbolPointers(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__la_symbol_ptr",
+ MachO::S_LAZY_SYMBOL_POINTERS, 4);
}
- bool ParseSectionDirectiveDyld(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__dyld");
+ bool parseSectionDirectiveDyld(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__dyld");
}
- bool ParseSectionDirectiveModInitFunc(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__mod_init_func",
- MCSectionMachO::S_MOD_INIT_FUNC_POINTERS, 4);
+ bool parseSectionDirectiveModInitFunc(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__mod_init_func",
+ MachO::S_MOD_INIT_FUNC_POINTERS, 4);
}
- bool ParseSectionDirectiveModTermFunc(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__mod_term_func",
- MCSectionMachO::S_MOD_TERM_FUNC_POINTERS, 4);
+ bool parseSectionDirectiveModTermFunc(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__mod_term_func",
+ MachO::S_MOD_TERM_FUNC_POINTERS, 4);
}
- bool ParseSectionDirectiveConstData(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__const");
+ bool parseSectionDirectiveConstData(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__const");
}
- bool ParseSectionDirectiveObjCClass(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__class",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCClass(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__class",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCMetaClass(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__meta_class",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCMetaClass(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__meta_class",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCCatClsMeth(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__cat_cls_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCCatClsMeth(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__cat_cls_meth",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCCatInstMeth(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__cat_inst_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCCatInstMeth(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__cat_inst_meth",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCProtocol(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__protocol",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCProtocol(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__protocol",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCStringObject(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__string_object",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCStringObject(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__string_object",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCClsMeth(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__cls_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCClsMeth(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__cls_meth",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCInstMeth(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__inst_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCInstMeth(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__inst_meth",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCClsRefs(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__cls_refs",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP |
- MCSectionMachO::S_LITERAL_POINTERS, 4);
+ bool parseSectionDirectiveObjCClsRefs(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__cls_refs",
+ MachO::S_ATTR_NO_DEAD_STRIP |
+ MachO::S_LITERAL_POINTERS, 4);
}
- bool ParseSectionDirectiveObjCMessageRefs(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__message_refs",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP |
- MCSectionMachO::S_LITERAL_POINTERS, 4);
+ bool parseSectionDirectiveObjCMessageRefs(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__message_refs",
+ MachO::S_ATTR_NO_DEAD_STRIP |
+ MachO::S_LITERAL_POINTERS, 4);
}
- bool ParseSectionDirectiveObjCSymbols(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__symbols",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCSymbols(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__symbols",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCCategory(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__category",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCCategory(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__category",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCClassVars(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__class_vars",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCClassVars(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__class_vars",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCInstanceVars(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__instance_vars",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCInstanceVars(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__instance_vars",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCModuleInfo(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__module_info",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ bool parseSectionDirectiveObjCModuleInfo(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__module_info",
+ MachO::S_ATTR_NO_DEAD_STRIP);
}
- bool ParseSectionDirectiveObjCClassNames(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
+ bool parseSectionDirectiveObjCClassNames(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__cstring",
+ MachO::S_CSTRING_LITERALS);
}
- bool ParseSectionDirectiveObjCMethVarTypes(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
+ bool parseSectionDirectiveObjCMethVarTypes(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__cstring",
+ MachO::S_CSTRING_LITERALS);
}
- bool ParseSectionDirectiveObjCMethVarNames(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
+ bool parseSectionDirectiveObjCMethVarNames(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__cstring",
+ MachO::S_CSTRING_LITERALS);
}
- bool ParseSectionDirectiveObjCSelectorStrs(StringRef, SMLoc) {
- return ParseSectionSwitch("__OBJC", "__selector_strs",
- MCSectionMachO::S_CSTRING_LITERALS);
+ bool parseSectionDirectiveObjCSelectorStrs(StringRef, SMLoc) {
+ return parseSectionSwitch("__OBJC", "__selector_strs",
+ MachO::S_CSTRING_LITERALS);
}
- bool ParseSectionDirectiveTData(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__thread_data",
- MCSectionMachO::S_THREAD_LOCAL_REGULAR);
+ bool parseSectionDirectiveTData(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__thread_data",
+ MachO::S_THREAD_LOCAL_REGULAR);
}
- bool ParseSectionDirectiveText(StringRef, SMLoc) {
- return ParseSectionSwitch("__TEXT", "__text",
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
+ bool parseSectionDirectiveText(StringRef, SMLoc) {
+ return parseSectionSwitch("__TEXT", "__text",
+ MachO::S_ATTR_PURE_INSTRUCTIONS);
}
- bool ParseSectionDirectiveTLV(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__thread_vars",
- MCSectionMachO::S_THREAD_LOCAL_VARIABLES);
+ bool parseSectionDirectiveTLV(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__thread_vars",
+ MachO::S_THREAD_LOCAL_VARIABLES);
}
- bool ParseSectionDirectiveIdent(StringRef, SMLoc) {
+ bool parseSectionDirectiveIdent(StringRef, SMLoc) {
// Darwin silently ignores the .ident directive.
getParser().eatToEndOfStatement();
return false;
}
- bool ParseSectionDirectiveThreadInitFunc(StringRef, SMLoc) {
- return ParseSectionSwitch("__DATA", "__thread_init",
- MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS);
+ bool parseSectionDirectiveThreadInitFunc(StringRef, SMLoc) {
+ return parseSectionSwitch("__DATA", "__thread_init",
+ MachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS);
}
+ bool parseVersionMin(StringRef, SMLoc);
};
} // end anonymous namespace
-bool DarwinAsmParser::ParseSectionSwitch(const char *Segment,
+bool DarwinAsmParser::parseSectionSwitch(const char *Segment,
const char *Section,
unsigned TAA, unsigned Align,
unsigned StubSize) {
@@ -374,7 +379,7 @@ bool DarwinAsmParser::ParseSectionSwitch(const char *Segment,
Lex();
// FIXME: Arch specific.
- bool isText = TAA & MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS;
+ bool isText = TAA & MachO::S_ATTR_PURE_INSTRUCTIONS;
getStreamer().SwitchSection(getContext().getMachOSection(
Segment, Section, TAA, StubSize,
isText ? SectionKind::getText()
@@ -389,14 +394,14 @@ bool DarwinAsmParser::ParseSectionSwitch(const char *Segment,
// is no good reason for someone to intentionally emit incorrectly sized
// values into the implicitly aligned sections.
if (Align)
- getStreamer().EmitValueToAlignment(Align, 0, 1, 0);
+ getStreamer().EmitValueToAlignment(Align);
return false;
}
-/// ParseDirectiveDesc
+/// parseDirectiveDesc
/// ::= .desc identifier , expression
-bool DarwinAsmParser::ParseDirectiveDesc(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveDesc(StringRef, SMLoc) {
StringRef Name;
if (getParser().parseIdentifier(Name))
return TokError("expected identifier in directive");
@@ -423,15 +428,15 @@ bool DarwinAsmParser::ParseDirectiveDesc(StringRef, SMLoc) {
return false;
}
-/// ParseDirectiveIndirectSymbol
+/// parseDirectiveIndirectSymbol
/// ::= .indirect_symbol identifier
-bool DarwinAsmParser::ParseDirectiveIndirectSymbol(StringRef, SMLoc Loc) {
+bool DarwinAsmParser::parseDirectiveIndirectSymbol(StringRef, SMLoc Loc) {
const MCSectionMachO *Current = static_cast<const MCSectionMachO*>(
getStreamer().getCurrentSection().first);
- unsigned SectionType = Current->getType();
- if (SectionType != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS &&
- SectionType != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
- SectionType != MCSectionMachO::S_SYMBOL_STUBS)
+ MachO::SectionType SectionType = Current->getType();
+ if (SectionType != MachO::S_NON_LAZY_SYMBOL_POINTERS &&
+ SectionType != MachO::S_LAZY_SYMBOL_POINTERS &&
+ SectionType != MachO::S_SYMBOL_STUBS)
return Error(Loc, "indirect symbol not in a symbol pointer or stub "
"section");
@@ -456,9 +461,9 @@ bool DarwinAsmParser::ParseDirectiveIndirectSymbol(StringRef, SMLoc Loc) {
return false;
}
-/// ParseDirectiveDumpOrLoad
+/// parseDirectiveDumpOrLoad
/// ::= ( .dump | .load ) "filename"
-bool DarwinAsmParser::ParseDirectiveDumpOrLoad(StringRef Directive,
+bool DarwinAsmParser::parseDirectiveDumpOrLoad(StringRef Directive,
SMLoc IDLoc) {
bool IsDump = Directive == ".dump";
if (getLexer().isNot(AsmToken::String))
@@ -481,7 +486,7 @@ bool DarwinAsmParser::ParseDirectiveDumpOrLoad(StringRef Directive,
/// ParseDirectiveLinkerOption
/// ::= .linker_option "string" ( , "string" )*
-bool DarwinAsmParser::ParseDirectiveLinkerOption(StringRef IDVal, SMLoc) {
+bool DarwinAsmParser::parseDirectiveLinkerOption(StringRef IDVal, SMLoc) {
SmallVector<std::string, 4> Args;
for (;;) {
if (getLexer().isNot(AsmToken::String))
@@ -506,9 +511,9 @@ bool DarwinAsmParser::ParseDirectiveLinkerOption(StringRef IDVal, SMLoc) {
return false;
}
-/// ParseDirectiveLsym
+/// parseDirectiveLsym
/// ::= .lsym identifier , expression
-bool DarwinAsmParser::ParseDirectiveLsym(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveLsym(StringRef, SMLoc) {
StringRef Name;
if (getParser().parseIdentifier(Name))
return TokError("expected identifier in directive");
@@ -536,9 +541,9 @@ bool DarwinAsmParser::ParseDirectiveLsym(StringRef, SMLoc) {
return TokError("directive '.lsym' is unsupported");
}
-/// ParseDirectiveSection:
+/// parseDirectiveSection:
/// ::= .section identifier (',' identifier)*
-bool DarwinAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveSection(StringRef, SMLoc) {
SMLoc Loc = getLexer().getLoc();
StringRef SectionName;
@@ -585,10 +590,10 @@ bool DarwinAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
/// ParseDirectivePushSection:
/// ::= .pushsection identifier (',' identifier)*
-bool DarwinAsmParser::ParseDirectivePushSection(StringRef S, SMLoc Loc) {
+bool DarwinAsmParser::parseDirectivePushSection(StringRef S, SMLoc Loc) {
getStreamer().PushSection();
- if (ParseDirectiveSection(S, Loc)) {
+ if (parseDirectiveSection(S, Loc)) {
getStreamer().PopSection();
return true;
}
@@ -598,7 +603,7 @@ bool DarwinAsmParser::ParseDirectivePushSection(StringRef S, SMLoc Loc) {
/// ParseDirectivePopSection:
/// ::= .popsection
-bool DarwinAsmParser::ParseDirectivePopSection(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectivePopSection(StringRef, SMLoc) {
if (!getStreamer().PopSection())
return TokError(".popsection without corresponding .pushsection");
return false;
@@ -606,17 +611,17 @@ bool DarwinAsmParser::ParseDirectivePopSection(StringRef, SMLoc) {
/// ParseDirectivePrevious:
/// ::= .previous
-bool DarwinAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) {
+bool DarwinAsmParser::parseDirectivePrevious(StringRef DirName, SMLoc) {
MCSectionSubPair PreviousSection = getStreamer().getPreviousSection();
- if (PreviousSection.first == NULL)
- return TokError(".previous without corresponding .section");
+ if (!PreviousSection.first)
+ return TokError(".previous without corresponding .section");
getStreamer().SwitchSection(PreviousSection.first, PreviousSection.second);
return false;
}
/// ParseDirectiveSecureLogUnique
/// ::= .secure_log_unique ... message ...
-bool DarwinAsmParser::ParseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
+bool DarwinAsmParser::parseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
StringRef LogMessage = getParser().parseStringToEndOfStatement();
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.secure_log_unique' directive");
@@ -626,15 +631,16 @@ bool DarwinAsmParser::ParseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
// Get the secure log path.
const char *SecureLogFile = getContext().getSecureLogFile();
- if (SecureLogFile == NULL)
+ if (!SecureLogFile)
return Error(IDLoc, ".secure_log_unique used but AS_SECURE_LOG_FILE "
"environment variable unset.");
// Open the secure log file if we haven't already.
raw_ostream *OS = getContext().getSecureLog();
- if (OS == NULL) {
+ if (!OS) {
std::string Err;
- OS = new raw_fd_ostream(SecureLogFile, Err, sys::fs::F_Append);
+ OS = new raw_fd_ostream(SecureLogFile, Err,
+ sys::fs::F_Append | sys::fs::F_Text);
if (!Err.empty()) {
delete OS;
return Error(IDLoc, Twine("can't open secure log file: ") +
@@ -644,7 +650,7 @@ bool DarwinAsmParser::ParseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
}
// Write the message.
- int CurBuf = getSourceManager().FindBufferContainingLoc(IDLoc);
+ unsigned CurBuf = getSourceManager().FindBufferContainingLoc(IDLoc);
*OS << getSourceManager().getBufferInfo(CurBuf).Buffer->getBufferIdentifier()
<< ":" << getSourceManager().FindLineNumber(IDLoc, CurBuf) << ":"
<< LogMessage + "\n";
@@ -656,7 +662,7 @@ bool DarwinAsmParser::ParseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
/// ParseDirectiveSecureLogReset
/// ::= .secure_log_reset
-bool DarwinAsmParser::ParseDirectiveSecureLogReset(StringRef, SMLoc IDLoc) {
+bool DarwinAsmParser::parseDirectiveSecureLogReset(StringRef, SMLoc IDLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.secure_log_reset' directive");
@@ -667,9 +673,9 @@ bool DarwinAsmParser::ParseDirectiveSecureLogReset(StringRef, SMLoc IDLoc) {
return false;
}
-/// ParseDirectiveSubsectionsViaSymbols
+/// parseDirectiveSubsectionsViaSymbols
/// ::= .subsections_via_symbols
-bool DarwinAsmParser::ParseDirectiveSubsectionsViaSymbols(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveSubsectionsViaSymbols(StringRef, SMLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.subsections_via_symbols' directive");
@@ -682,7 +688,7 @@ bool DarwinAsmParser::ParseDirectiveSubsectionsViaSymbols(StringRef, SMLoc) {
/// ParseDirectiveTBSS
/// ::= .tbss identifier, size, align
-bool DarwinAsmParser::ParseDirectiveTBSS(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveTBSS(StringRef, SMLoc) {
SMLoc IDLoc = getLexer().getLoc();
StringRef Name;
if (getParser().parseIdentifier(Name))
@@ -728,7 +734,7 @@ bool DarwinAsmParser::ParseDirectiveTBSS(StringRef, SMLoc) {
getStreamer().EmitTBSSSymbol(getContext().getMachOSection(
"__DATA", "__thread_bss",
- MCSectionMachO::S_THREAD_LOCAL_ZEROFILL,
+ MachO::S_THREAD_LOCAL_ZEROFILL,
0, SectionKind::getThreadBSS()),
Sym, Size, 1 << Pow2Alignment);
@@ -738,7 +744,7 @@ bool DarwinAsmParser::ParseDirectiveTBSS(StringRef, SMLoc) {
/// ParseDirectiveZerofill
/// ::= .zerofill segname , sectname [, identifier , size_expression [
/// , align_expression ]]
-bool DarwinAsmParser::ParseDirectiveZerofill(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveZerofill(StringRef, SMLoc) {
StringRef Segment;
if (getParser().parseIdentifier(Segment))
return TokError("expected segment name after '.zerofill' directive");
@@ -757,7 +763,7 @@ bool DarwinAsmParser::ParseDirectiveZerofill(StringRef, SMLoc) {
if (getLexer().is(AsmToken::EndOfStatement)) {
// Create the zerofill section but no symbol
getStreamer().EmitZerofill(getContext().getMachOSection(
- Segment, Section, MCSectionMachO::S_ZEROFILL,
+ Segment, Section, MachO::S_ZEROFILL,
0, SectionKind::getBSS()));
return false;
}
@@ -815,7 +821,7 @@ bool DarwinAsmParser::ParseDirectiveZerofill(StringRef, SMLoc) {
//
// FIXME: Arch specific.
getStreamer().EmitZerofill(getContext().getMachOSection(
- Segment, Section, MCSectionMachO::S_ZEROFILL,
+ Segment, Section, MachO::S_ZEROFILL,
0, SectionKind::getBSS()),
Sym, Size, 1 << Pow2Alignment);
@@ -824,7 +830,7 @@ bool DarwinAsmParser::ParseDirectiveZerofill(StringRef, SMLoc) {
/// ParseDirectiveDataRegion
/// ::= .data_region [ ( jt8 | jt16 | jt32 ) ]
-bool DarwinAsmParser::ParseDirectiveDataRegion(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveDataRegion(StringRef, SMLoc) {
if (getLexer().is(AsmToken::EndOfStatement)) {
Lex();
getStreamer().EmitDataRegion(MCDR_DataRegion);
@@ -849,7 +855,7 @@ bool DarwinAsmParser::ParseDirectiveDataRegion(StringRef, SMLoc) {
/// ParseDirectiveDataRegionEnd
/// ::= .end_data_region
-bool DarwinAsmParser::ParseDirectiveDataRegionEnd(StringRef, SMLoc) {
+bool DarwinAsmParser::parseDirectiveDataRegionEnd(StringRef, SMLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.end_data_region' directive");
@@ -858,6 +864,50 @@ bool DarwinAsmParser::ParseDirectiveDataRegionEnd(StringRef, SMLoc) {
return false;
}
+/// parseVersionMin
+/// ::= .ios_version_min major,minor[,update]
+/// ::= .macosx_version_min major,minor[,update]
+bool DarwinAsmParser::parseVersionMin(StringRef Directive, SMLoc) {
+ int64_t Major = 0, Minor = 0, Update = 0;
+ int Kind = StringSwitch<int>(Directive)
+ .Case(".ios_version_min", MCVM_IOSVersionMin)
+ .Case(".macosx_version_min", MCVM_OSXVersionMin);
+ // Get the major version number.
+ if (getLexer().isNot(AsmToken::Integer))
+ return TokError("invalid OS major version number");
+ Major = getLexer().getTok().getIntVal();
+ if (Major > 65535 || Major <= 0)
+ return TokError("invalid OS major version number");
+ Lex();
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("minor OS version number required, comma expected");
+ Lex();
+ // Get the minor version number.
+ if (getLexer().isNot(AsmToken::Integer))
+ return TokError("invalid OS minor version number");
+ Minor = getLexer().getTok().getIntVal();
+ if (Minor > 255 || Minor < 0)
+ return TokError("invalid OS minor version number");
+ Lex();
+ // Get the update level, if specified
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("invalid update specifier, comma expected");
+ Lex();
+ if (getLexer().isNot(AsmToken::Integer))
+ return TokError("invalid OS update number");
+ Update = getLexer().getTok().getIntVal();
+ if (Update > 255 || Update < 0)
+ return TokError("invalid OS update number");
+ Lex();
+ }
+
+ // We've parsed a correct version specifier, so send it to the streamer.
+ getStreamer().EmitVersionMin((MCVersionMinType)Kind, Major, Minor, Update);
+
+ return false;
+}
+
namespace llvm {
MCAsmParserExtension *createDarwinAsmParser() {
diff --git a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index 8807975..e302004 100644
--- a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -37,7 +37,7 @@ class ELFAsmParser : public MCAsmParserExtension {
public:
ELFAsmParser() { BracketExpressionsSupported = true; }
- virtual void Initialize(MCAsmParser &Parser) {
+ void Initialize(MCAsmParser &Parser) override {
// Call the base implementation.
this->MCAsmParserExtension::Initialize(Parser);
@@ -150,7 +150,8 @@ public:
private:
bool ParseSectionName(StringRef &SectionName);
- bool ParseSectionArguments(bool IsPush);
+ bool ParseSectionArguments(bool IsPush, SMLoc loc);
+ unsigned parseSunStyleSectionFlags();
};
}
@@ -192,7 +193,7 @@ bool ELFAsmParser::ParseDirectiveSymbolAttribute(StringRef Directive, SMLoc) {
bool ELFAsmParser::ParseSectionSwitch(StringRef Section, unsigned Type,
unsigned Flags, SectionKind Kind) {
- const MCExpr *Subsection = 0;
+ const MCExpr *Subsection = nullptr;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if (getParser().parseExpression(Subsection))
return true;
@@ -268,11 +269,37 @@ bool ELFAsmParser::ParseSectionName(StringRef &SectionName) {
return false;
}
-static SectionKind computeSectionKind(unsigned Flags) {
+static SectionKind computeSectionKind(unsigned Flags, unsigned ElemSize) {
if (Flags & ELF::SHF_EXECINSTR)
return SectionKind::getText();
if (Flags & ELF::SHF_TLS)
return SectionKind::getThreadData();
+ if (Flags & ELF::SHF_MERGE) {
+ if (Flags & ELF::SHF_STRINGS) {
+ switch (ElemSize) {
+ default:
+ break;
+ case 1:
+ return SectionKind::getMergeable1ByteCString();
+ case 2:
+ return SectionKind::getMergeable2ByteCString();
+ case 4:
+ return SectionKind::getMergeable4ByteCString();
+ }
+ } else {
+ switch (ElemSize) {
+ default:
+ break;
+ case 4:
+ return SectionKind::getMergeableConst4();
+ case 8:
+ return SectionKind::getMergeableConst8();
+ case 16:
+ return SectionKind::getMergeableConst16();
+ }
+ }
+ }
+
return SectionKind::getDataRel();
}
@@ -322,10 +349,40 @@ static unsigned parseSectionFlags(StringRef flagsStr, bool *UseLastGroup) {
return flags;
}
+unsigned ELFAsmParser::parseSunStyleSectionFlags() {
+ unsigned flags = 0;
+ while (getLexer().is(AsmToken::Hash)) {
+ Lex(); // Eat the #.
+
+ if (!getLexer().is(AsmToken::Identifier))
+ return -1U;
+
+ StringRef flagId = getTok().getIdentifier();
+ if (flagId == "alloc")
+ flags |= ELF::SHF_ALLOC;
+ else if (flagId == "execinstr")
+ flags |= ELF::SHF_EXECINSTR;
+ else if (flagId == "write")
+ flags |= ELF::SHF_WRITE;
+ else if (flagId == "tls")
+ flags |= ELF::SHF_TLS;
+ else
+ return -1U;
+
+ Lex(); // Eat the flag.
+
+ if (!getLexer().is(AsmToken::Comma))
+ break;
+ Lex(); // Eat the comma.
+ }
+ return flags;
+}
+
+
bool ELFAsmParser::ParseDirectivePushSection(StringRef s, SMLoc loc) {
getStreamer().PushSection();
- if (ParseSectionArguments(/*IsPush=*/true)) {
+ if (ParseSectionArguments(/*IsPush=*/true, loc)) {
getStreamer().PopSection();
return true;
}
@@ -340,11 +397,11 @@ bool ELFAsmParser::ParseDirectivePopSection(StringRef, SMLoc) {
}
// FIXME: This is a work in progress.
-bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
- return ParseSectionArguments(/*IsPush=*/false);
+bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc loc) {
+ return ParseSectionArguments(/*IsPush=*/false, loc);
}
-bool ELFAsmParser::ParseSectionArguments(bool IsPush) {
+bool ELFAsmParser::ParseSectionArguments(bool IsPush, SMLoc loc) {
StringRef SectionName;
if (ParseSectionName(SectionName))
@@ -354,7 +411,7 @@ bool ELFAsmParser::ParseSectionArguments(bool IsPush) {
int64_t Size = 0;
StringRef GroupName;
unsigned Flags = 0;
- const MCExpr *Subsection = 0;
+ const MCExpr *Subsection = nullptr;
bool UseLastGroup = false;
// Set the defaults first.
@@ -374,14 +431,20 @@ bool ELFAsmParser::ParseSectionArguments(bool IsPush) {
goto EndStmt;
Lex();
}
-
- if (getLexer().isNot(AsmToken::String))
- return TokError("expected string in directive");
- StringRef FlagsStr = getTok().getStringContents();
- Lex();
+ unsigned extraFlags;
+
+ if (getLexer().isNot(AsmToken::String)) {
+ if (!getContext().getAsmInfo()->usesSunStyleELFSectionSwitchSyntax()
+ || getLexer().isNot(AsmToken::Hash))
+ return TokError("expected string in directive");
+ extraFlags = parseSunStyleSectionFlags();
+ } else {
+ StringRef FlagsStr = getTok().getStringContents();
+ Lex();
+ extraFlags = parseSectionFlags(FlagsStr, &UseLastGroup);
+ }
- unsigned extraFlags = parseSectionFlags(FlagsStr, &UseLastGroup);
if (extraFlags == -1U)
return TokError("unknown flag");
Flags |= extraFlags;
@@ -481,23 +544,50 @@ EndStmt:
}
}
- SectionKind Kind = computeSectionKind(Flags);
- getStreamer().SwitchSection(getContext().getELFSection(SectionName, Type,
- Flags, Kind, Size,
- GroupName),
- Subsection);
+ SectionKind Kind = computeSectionKind(Flags, Size);
+ const MCSection *ELFSection = getContext().getELFSection(
+ SectionName, Type, Flags, Kind, Size, GroupName);
+ getStreamer().SwitchSection(ELFSection, Subsection);
+
+ if (getContext().getGenDwarfForAssembly()) {
+ auto &Sections = getContext().getGenDwarfSectionSyms();
+ auto InsertResult = Sections.insert(
+ std::make_pair(ELFSection, std::make_pair(nullptr, nullptr)));
+ if (InsertResult.second) {
+ if (getContext().getDwarfVersion() <= 2)
+ Warning(loc, "DWARF2 only supports one section per compilation unit");
+
+ MCSymbol *SectionStartSymbol = getContext().CreateTempSymbol();
+ getStreamer().EmitLabel(SectionStartSymbol);
+ InsertResult.first->second.first = SectionStartSymbol;
+ }
+ }
+
return false;
}
bool ELFAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) {
MCSectionSubPair PreviousSection = getStreamer().getPreviousSection();
- if (PreviousSection.first == NULL)
+ if (PreviousSection.first == nullptr)
return TokError(".previous without corresponding .section");
getStreamer().SwitchSection(PreviousSection.first, PreviousSection.second);
return false;
}
+static MCSymbolAttr MCAttrForString(StringRef Type) {
+ return StringSwitch<MCSymbolAttr>(Type)
+ .Cases("STT_FUNC", "function", MCSA_ELF_TypeFunction)
+ .Cases("STT_OBJECT", "object", MCSA_ELF_TypeObject)
+ .Cases("STT_TLS", "tls_object", MCSA_ELF_TypeTLS)
+ .Cases("STT_COMMON", "common", MCSA_ELF_TypeCommon)
+ .Cases("STT_NOTYPE", "notype", MCSA_ELF_TypeNoType)
+ .Cases("STT_GNU_IFUNC", "gnu_indirect_function",
+ MCSA_ELF_TypeIndFunction)
+ .Case("gnu_unique_object", MCSA_ELF_TypeGnuUniqueObject)
+ .Default(MCSA_Invalid);
+}
+
/// ParseDirectiveELFType
/// ::= .type identifier , STT_<TYPE_IN_UPPER_CASE>
/// ::= .type identifier , #attribute
@@ -512,53 +602,36 @@ bool ELFAsmParser::ParseDirectiveType(StringRef, SMLoc) {
// Handle the identifier as the key symbol.
MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- if (getLexer().isNot(AsmToken::Comma))
- return TokError("unexpected token in '.type' directive");
- Lex();
-
- StringRef Type;
- SMLoc TypeLoc;
- MCSymbolAttr Attr;
- if (getLexer().is(AsmToken::Identifier)) {
- TypeLoc = getLexer().getLoc();
- if (getParser().parseIdentifier(Type))
- return TokError("expected symbol type in directive");
- Attr = StringSwitch<MCSymbolAttr>(Type)
- .Case("STT_FUNC", MCSA_ELF_TypeFunction)
- .Case("STT_OBJECT", MCSA_ELF_TypeObject)
- .Case("STT_TLS", MCSA_ELF_TypeTLS)
- .Case("STT_COMMON", MCSA_ELF_TypeCommon)
- .Case("STT_NOTYPE", MCSA_ELF_TypeNoType)
- .Case("STT_GNU_IFUNC", MCSA_ELF_TypeIndFunction)
- .Default(MCSA_Invalid);
- } else if (getLexer().is(AsmToken::Hash) || getLexer().is(AsmToken::At) ||
- getLexer().is(AsmToken::Percent) ||
- getLexer().is(AsmToken::String)) {
- if (!getLexer().is(AsmToken::String))
- Lex();
+ // NOTE the comma is optional in all cases. It is only documented as being
+ // optional in the first case, however, GAS will silently treat the comma as
+ // optional in all cases. Furthermore, although the documentation states that
+ // the first form only accepts STT_<TYPE_IN_UPPER_CASE>, in reality, GAS
+ // accepts both the upper case name as well as the lower case aliases.
+ if (getLexer().is(AsmToken::Comma))
+ Lex();
- TypeLoc = getLexer().getLoc();
- if (getParser().parseIdentifier(Type))
- return TokError("expected symbol type in directive");
- Attr = StringSwitch<MCSymbolAttr>(Type)
- .Case("function", MCSA_ELF_TypeFunction)
- .Case("object", MCSA_ELF_TypeObject)
- .Case("tls_object", MCSA_ELF_TypeTLS)
- .Case("common", MCSA_ELF_TypeCommon)
- .Case("notype", MCSA_ELF_TypeNoType)
- .Case("gnu_unique_object", MCSA_ELF_TypeGnuUniqueObject)
- .Case("gnu_indirect_function", MCSA_ELF_TypeIndFunction)
- .Default(MCSA_Invalid);
- } else
+ if (getLexer().isNot(AsmToken::Identifier) &&
+ getLexer().isNot(AsmToken::Hash) && getLexer().isNot(AsmToken::At) &&
+ getLexer().isNot(AsmToken::Percent) && getLexer().isNot(AsmToken::String))
return TokError("expected STT_<TYPE_IN_UPPER_CASE>, '#<type>', '@<type>', "
"'%<type>' or \"<type>\"");
+ if (getLexer().isNot(AsmToken::String) &&
+ getLexer().isNot(AsmToken::Identifier))
+ Lex();
+
+ SMLoc TypeLoc = getLexer().getLoc();
+
+ StringRef Type;
+ if (getParser().parseIdentifier(Type))
+ return TokError("expected symbol type in directive");
+
+ MCSymbolAttr Attr = MCAttrForString(Type);
if (Attr == MCSA_Invalid)
return Error(TypeLoc, "unsupported attribute in '.type' directive");
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.type' directive");
-
Lex();
getStreamer().EmitSymbolAttribute(Sym, Attr);
@@ -590,7 +663,14 @@ bool ELFAsmParser::ParseDirectiveSymver(StringRef, SMLoc) {
if (getLexer().isNot(AsmToken::Comma))
return TokError("expected a comma");
+ // ARM assembly uses @ for a comment...
+ // except when parsing the second parameter of the .symver directive.
+ // Force the next symbol to allow @ in the identifier, which is
+ // required for this directive and then reset it to its initial state.
+ const bool AllowAtInIdentifier = getLexer().getAllowAtInIdentifier();
+ getLexer().setAllowAtInIdentifier(true);
Lex();
+ getLexer().setAllowAtInIdentifier(AllowAtInIdentifier);
StringRef AliasName;
if (getParser().parseIdentifier(AliasName))
@@ -660,7 +740,7 @@ bool ELFAsmParser::ParseDirectiveWeakref(StringRef, SMLoc) {
}
bool ELFAsmParser::ParseDirectiveSubsection(StringRef, SMLoc) {
- const MCExpr *Subsection = 0;
+ const MCExpr *Subsection = nullptr;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
if (getParser().parseExpression(Subsection))
return true;
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
index 3867691..530814b 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
@@ -13,7 +13,7 @@
using namespace llvm;
MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()),
- TokStart(0), SkipSpace(true) {
+ TokStart(nullptr), SkipSpace(true) {
}
MCAsmLexer::~MCAsmLexer() {
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
index 6e1ebad..e417aa9 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
@@ -17,7 +17,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-MCAsmParser::MCAsmParser() : TargetParser(0), ShowParsedOperands(0) {
+MCAsmParser::MCAsmParser() : TargetParser(nullptr), ShowParsedOperands(0) {
}
MCAsmParser::~MCAsmParser() {
diff --git a/contrib/llvm/lib/MC/MCPureStreamer.cpp b/contrib/llvm/lib/MC/MCPureStreamer.cpp
deleted file mode 100644
index f7bf002..0000000
--- a/contrib/llvm/lib/MC/MCPureStreamer.cpp
+++ /dev/null
@@ -1,235 +0,0 @@
-//===- lib/MC/MCPureStreamer.cpp - MC "Pure" Object Output ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCObjectFileInfo.h"
-#include "llvm/MC/MCObjectStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/ErrorHandling.h"
-
-using namespace llvm;
-
-namespace {
-
-class MCPureStreamer : public MCObjectStreamer {
-private:
- virtual void EmitInstToFragment(const MCInst &Inst);
- virtual void EmitInstToData(const MCInst &Inst);
-
-public:
- MCPureStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
- MCCodeEmitter *Emitter)
- : MCObjectStreamer(Context, 0, TAB, OS, Emitter) {}
-
- /// @name MCStreamer Interface
- /// @{
-
- virtual void InitSections();
- virtual void InitToTextSection();
- virtual void EmitLabel(MCSymbol *Symbol);
- virtual void EmitDebugLabel(MCSymbol *Symbol);
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- uint64_t Size = 0, unsigned ByteAlignment = 0);
- virtual void EmitBytes(StringRef Data);
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0);
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0);
- virtual bool EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0);
- virtual void FinishImpl();
-
-
- virtual bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
- report_fatal_error("unsupported directive in pure streamer");
- return false;
- }
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitThumbFunc(MCSymbol *Func) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitCOFFSymbolType(int Type) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EndCOFFSymbolDef() {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitFileDirective(StringRef Filename) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual void EmitIdent(StringRef IdentString) {
- report_fatal_error("unsupported directive in pure streamer");
- }
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
- StringRef Filename, unsigned CUID = 0) {
- report_fatal_error("unsupported directive in pure streamer");
- }
-};
-
-} // end anonymous namespace.
-
-void MCPureStreamer::InitSections() {
- InitToTextSection();
-}
-
-void MCPureStreamer::InitToTextSection() {
- SwitchSection(getContext().getObjectFileInfo()->getTextSection());
-}
-
-void MCPureStreamer::EmitLabel(MCSymbol *Symbol) {
- assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
- assert(getCurrentSection().first && "Cannot emit before setting section!");
-
- AssignSection(Symbol, getCurrentSection().first);
-
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
-
- // We have to create a new fragment if this is an atom defining symbol,
- // fragments cannot span atoms.
- if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()))
- insert(new MCDataFragment());
-
- // FIXME: This is wasteful, we don't necessarily need to create a data
- // fragment. Instead, we should mark the symbol as pointing into the data
- // fragment if it exists, otherwise we should just queue the label and set its
- // fragment pointer when we emit the next fragment.
- MCDataFragment *F = getOrCreateDataFragment();
- assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
- SD.setFragment(F);
- SD.setOffset(F->getContents().size());
-}
-
-
-void MCPureStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
-}
-
-void MCPureStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment) {
- report_fatal_error("not yet implemented in pure streamer");
-}
-
-void MCPureStreamer::EmitBytes(StringRef Data) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
-}
-
-void MCPureStreamer::EmitValueToAlignment(unsigned ByteAlignment,
- int64_t Value, unsigned ValueSize,
- unsigned MaxBytesToEmit) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- insert(new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit));
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
-void MCPureStreamer::EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- if (MaxBytesToEmit == 0)
- MaxBytesToEmit = ByteAlignment;
- MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit);
- insert(F);
- F->setEmitNops(true);
-
- // Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > getCurrentSectionData()->getAlignment())
- getCurrentSectionData()->setAlignment(ByteAlignment);
-}
-
-bool MCPureStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {
- insert(new MCOrgFragment(*Offset, Value));
- return false;
-}
-
-void MCPureStreamer::EmitInstToFragment(const MCInst &Inst) {
- MCRelaxableFragment *IF = new MCRelaxableFragment(Inst);
- insert(IF);
-
- // Add the fixups and data.
- //
- // FIXME: Revisit this design decision when relaxation is done, we may be
- // able to get away with not storing any extra data in the MCInst.
- SmallVector<MCFixup, 4> Fixups;
- SmallString<256> Code;
- raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
- VecOS.flush();
-
- IF->getContents() = Code;
- IF->getFixups() = Fixups;
-}
-
-void MCPureStreamer::EmitInstToData(const MCInst &Inst) {
- MCDataFragment *DF = getOrCreateDataFragment();
-
- SmallVector<MCFixup, 4> Fixups;
- SmallString<256> Code;
- raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
- VecOS.flush();
-
- // Add the fixups and data.
- for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
- Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
- DF->getFixups().push_back(Fixups[i]);
- }
- DF->getContents().append(Code.begin(), Code.end());
-}
-
-void MCPureStreamer::FinishImpl() {
- // FIXME: Handle DWARF tables?
-
- this->MCObjectStreamer::FinishImpl();
-}
-
-MCStreamer *llvm::createPureStreamer(MCContext &Context, MCAsmBackend &MAB,
- raw_ostream &OS, MCCodeEmitter *CE) {
- return new MCPureStreamer(Context, MAB, OS, CE);
-}
diff --git a/contrib/llvm/lib/MC/MCRelocationInfo.cpp b/contrib/llvm/lib/MC/MCRelocationInfo.cpp
index 53c48ded..a00c009 100644
--- a/contrib/llvm/lib/MC/MCRelocationInfo.cpp
+++ b/contrib/llvm/lib/MC/MCRelocationInfo.cpp
@@ -8,9 +8,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm-c/Disassembler.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm-c/Disassembler.h"
using namespace llvm;
@@ -23,14 +23,14 @@ MCRelocationInfo::~MCRelocationInfo() {
const MCExpr *
MCRelocationInfo::createExprForRelocation(object::RelocationRef Rel) {
- return 0;
+ return nullptr;
}
const MCExpr *
MCRelocationInfo::createExprForCAPIVariantKind(const MCExpr *SubExpr,
unsigned VariantKind) {
if (VariantKind != LLVMDisassembler_VariantKind_None)
- return 0;
+ return nullptr;
return SubExpr;
}
diff --git a/contrib/llvm/lib/MC/MCSectionCOFF.cpp b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
index 64aa2c5..fc2bd36 100644
--- a/contrib/llvm/lib/MC/MCSectionCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
@@ -20,6 +20,8 @@ MCSectionCOFF::~MCSectionCOFF() {} // anchor.
// should be printed before the section name
bool MCSectionCOFF::ShouldOmitSectionDirective(StringRef Name,
const MCAsmInfo &MAI) const {
+ if (COMDATSymbol)
+ return false;
// FIXME: Does .section .bss/.data/.text work everywhere??
if (Name == ".text" || Name == ".data" || Name == ".bss")
@@ -28,14 +30,9 @@ bool MCSectionCOFF::ShouldOmitSectionDirective(StringRef Name,
return false;
}
-void MCSectionCOFF::setSelection(int Selection,
- const MCSectionCOFF *Assoc) const {
+void MCSectionCOFF::setSelection(int Selection) const {
assert(Selection != 0 && "invalid COMDAT selection type");
- assert((Selection == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) ==
- (Assoc != 0) &&
- "associative COMDAT section must have an associated section");
this->Selection = Selection;
- this->Assoc = Assoc;
Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
}
@@ -52,42 +49,50 @@ void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << "\t.section\t" << getSectionName() << ",\"";
if (getKind().isText())
OS << 'x';
+ else if (getKind().isBSS())
+ OS << 'b';
if (getKind().isWriteable())
OS << 'w';
else
OS << 'r';
if (getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE)
OS << 'n';
- OS << "\"\n";
+ if (getCharacteristics() & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA)
+ OS << 'd';
+ OS << '"';
if (getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) {
+ OS << ",";
switch (Selection) {
case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES:
- OS << "\t.linkonce one_only\n";
+ OS << "one_only,";
break;
case COFF::IMAGE_COMDAT_SELECT_ANY:
- OS << "\t.linkonce discard\n";
+ OS << "discard,";
break;
case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE:
- OS << "\t.linkonce same_size\n";
+ OS << "same_size,";
break;
case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH:
- OS << "\t.linkonce same_contents\n";
+ OS << "same_contents,";
break;
case COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE:
- OS << "\t.linkonce associative " << Assoc->getSectionName() << "\n";
+ OS << "associative,";
break;
case COFF::IMAGE_COMDAT_SELECT_LARGEST:
- OS << "\t.linkonce largest\n";
+ OS << "largest,";
break;
case COFF::IMAGE_COMDAT_SELECT_NEWEST:
- OS << "\t.linkonce newest\n";
+ OS << "newest,";
break;
default:
assert (0 && "unsupported COFF selection type");
break;
}
+ assert(COMDATSymbol);
+ OS << *COMDATSymbol;
}
+ OS << '\n';
}
bool MCSectionCOFF::UseCodeAlign() const {
diff --git a/contrib/llvm/lib/MC/MCSectionMachO.cpp b/contrib/llvm/lib/MC/MCSectionMachO.cpp
index 8704513..46beda4 100644
--- a/contrib/llvm/lib/MC/MCSectionMachO.cpp
+++ b/contrib/llvm/lib/MC/MCSectionMachO.cpp
@@ -18,9 +18,9 @@ using namespace llvm;
/// section type list.
static const struct {
const char *AssemblerName, *EnumName;
-} SectionTypeDescriptors[MCSectionMachO::LAST_KNOWN_SECTION_TYPE+1] = {
+} SectionTypeDescriptors[MachO::LAST_KNOWN_SECTION_TYPE+1] = {
{ "regular", "S_REGULAR" }, // 0x00
- { 0, "S_ZEROFILL" }, // 0x01
+ { nullptr, "S_ZEROFILL" }, // 0x01
{ "cstring_literals", "S_CSTRING_LITERALS" }, // 0x02
{ "4byte_literals", "S_4BYTE_LITERALS" }, // 0x03
{ "8byte_literals", "S_8BYTE_LITERALS" }, // 0x04
@@ -31,11 +31,11 @@ static const struct {
{ "mod_init_funcs", "S_MOD_INIT_FUNC_POINTERS" }, // 0x09
{ "mod_term_funcs", "S_MOD_TERM_FUNC_POINTERS" }, // 0x0A
{ "coalesced", "S_COALESCED" }, // 0x0B
- { 0, /*FIXME??*/ "S_GB_ZEROFILL" }, // 0x0C
+ { nullptr, /*FIXME??*/ "S_GB_ZEROFILL" }, // 0x0C
{ "interposing", "S_INTERPOSING" }, // 0x0D
{ "16byte_literals", "S_16BYTE_LITERALS" }, // 0x0E
- { 0, /*FIXME??*/ "S_DTRACE_DOF" }, // 0x0F
- { 0, /*FIXME??*/ "S_LAZY_DYLIB_SYMBOL_POINTERS" }, // 0x10
+ { nullptr, /*FIXME??*/ "S_DTRACE_DOF" }, // 0x0F
+ { nullptr, /*FIXME??*/ "S_LAZY_DYLIB_SYMBOL_POINTERS" }, // 0x10
{ "thread_local_regular", "S_THREAD_LOCAL_REGULAR" }, // 0x11
{ "thread_local_zerofill", "S_THREAD_LOCAL_ZEROFILL" }, // 0x12
{ "thread_local_variables", "S_THREAD_LOCAL_VARIABLES" }, // 0x13
@@ -48,14 +48,13 @@ static const struct {
/// SectionAttrDescriptors - This is an array of descriptors for section
/// attributes. Unlike the SectionTypeDescriptors, this is not directly indexed
-/// by attribute, instead it is searched. The last entry has an AttrFlagEnd
-/// AttrFlag value.
+/// by attribute, instead it is searched.
static const struct {
unsigned AttrFlag;
const char *AssemblerName, *EnumName;
} SectionAttrDescriptors[] = {
#define ENTRY(ASMNAME, ENUM) \
- { MCSectionMachO::ENUM, ASMNAME, #ENUM },
+ { MachO::ENUM, ASMNAME, #ENUM },
ENTRY("pure_instructions", S_ATTR_PURE_INSTRUCTIONS)
ENTRY("no_toc", S_ATTR_NO_TOC)
ENTRY("strip_static_syms", S_ATTR_STRIP_STATIC_SYMS)
@@ -63,13 +62,11 @@ ENTRY("no_dead_strip", S_ATTR_NO_DEAD_STRIP)
ENTRY("live_support", S_ATTR_LIVE_SUPPORT)
ENTRY("self_modifying_code", S_ATTR_SELF_MODIFYING_CODE)
ENTRY("debug", S_ATTR_DEBUG)
-ENTRY(0 /*FIXME*/, S_ATTR_SOME_INSTRUCTIONS)
-ENTRY(0 /*FIXME*/, S_ATTR_EXT_RELOC)
-ENTRY(0 /*FIXME*/, S_ATTR_LOC_RELOC)
+ENTRY(nullptr /*FIXME*/, S_ATTR_SOME_INSTRUCTIONS)
+ENTRY(nullptr /*FIXME*/, S_ATTR_EXT_RELOC)
+ENTRY(nullptr /*FIXME*/, S_ATTR_LOC_RELOC)
#undef ENTRY
- { 0, "none", 0 }, // used if section has no attributes but has a stub size
-#define AttrFlagEnd 0xffffffff // non legal value, multiple attribute bits set
- { AttrFlagEnd, 0, 0 }
+ { 0, "none", nullptr }, // used if section has no attributes but has a stub size
};
MCSectionMachO::MCSectionMachO(StringRef Segment, StringRef Section,
@@ -102,8 +99,8 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
return;
}
- unsigned SectionType = TAA & MCSectionMachO::SECTION_TYPE;
- assert(SectionType <= MCSectionMachO::LAST_KNOWN_SECTION_TYPE &&
+ MachO::SectionType SectionType = getType();
+ assert(SectionType <= MachO::LAST_KNOWN_SECTION_TYPE &&
"Invalid SectionType specified!");
if (SectionTypeDescriptors[SectionType].AssemblerName) {
@@ -116,7 +113,7 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
}
// If we don't have any attributes, we're done.
- unsigned SectionAttrs = TAA & MCSectionMachO::SECTION_ATTRIBUTES;
+ unsigned SectionAttrs = TAA & MachO::SECTION_ATTRIBUTES;
if (SectionAttrs == 0) {
// If we have a S_SYMBOL_STUBS size specified, print it along with 'none' as
// the attribute specifier.
@@ -155,21 +152,13 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
}
bool MCSectionMachO::UseCodeAlign() const {
- return hasAttribute(MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
+ return hasAttribute(MachO::S_ATTR_PURE_INSTRUCTIONS);
}
bool MCSectionMachO::isVirtualSection() const {
- return (getType() == MCSectionMachO::S_ZEROFILL ||
- getType() == MCSectionMachO::S_GB_ZEROFILL ||
- getType() == MCSectionMachO::S_THREAD_LOCAL_ZEROFILL);
-}
-
-/// StripSpaces - This removes leading and trailing spaces from the StringRef.
-static void StripSpaces(StringRef &Str) {
- while (!Str.empty() && isspace(static_cast<unsigned char>(Str[0])))
- Str = Str.substr(1);
- while (!Str.empty() && isspace(static_cast<unsigned char>(Str.back())))
- Str = Str.substr(0, Str.size()-1);
+ return (getType() == MachO::S_ZEROFILL ||
+ getType() == MachO::S_GB_ZEROFILL ||
+ getType() == MachO::S_THREAD_LOCAL_ZEROFILL);
}
/// ParseSectionSpecifier - Parse the section specifier indicated by "Spec".
@@ -184,118 +173,95 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
bool &TAAParsed, // Out.
unsigned &StubSize) { // Out.
TAAParsed = false;
- // Find the first comma.
- std::pair<StringRef, StringRef> Comma = Spec.split(',');
-
- // If there is no comma, we fail.
- if (Comma.second.empty())
- return "mach-o section specifier requires a segment and section "
- "separated by a comma";
- // Capture segment, remove leading and trailing whitespace.
- Segment = Comma.first;
- StripSpaces(Segment);
+ SmallVector<StringRef, 5> SplitSpec;
+ Spec.split(SplitSpec, ",");
+ // Remove leading and trailing whitespace.
+ auto GetEmptyOrTrim = [&SplitSpec](size_t Idx) -> StringRef {
+ return SplitSpec.size() > Idx ? SplitSpec[Idx].trim() : StringRef();
+ };
+ Segment = GetEmptyOrTrim(0);
+ Section = GetEmptyOrTrim(1);
+ StringRef SectionType = GetEmptyOrTrim(2);
+ StringRef Attrs = GetEmptyOrTrim(3);
+ StringRef StubSizeStr = GetEmptyOrTrim(4);
// Verify that the segment is present and not too long.
if (Segment.empty() || Segment.size() > 16)
return "mach-o section specifier requires a segment whose length is "
"between 1 and 16 characters";
- // Split the section name off from any attributes if present.
- Comma = Comma.second.split(',');
-
- // Capture section, remove leading and trailing whitespace.
- Section = Comma.first;
- StripSpaces(Section);
-
// Verify that the section is present and not too long.
- if (Section.empty() || Section.size() > 16)
+ if (Section.empty())
+ return "mach-o section specifier requires a segment and section "
+ "separated by a comma";
+
+ if (Section.size() > 16)
return "mach-o section specifier requires a section whose length is "
"between 1 and 16 characters";
// If there is no comma after the section, we're done.
TAA = 0;
StubSize = 0;
- if (Comma.second.empty())
+ if (SectionType.empty())
return "";
- // Otherwise, we need to parse the section type and attributes.
- Comma = Comma.second.split(',');
-
- // Get the section type.
- StringRef SectionType = Comma.first;
- StripSpaces(SectionType);
-
// Figure out which section type it is.
- unsigned TypeID;
- for (TypeID = 0; TypeID !=MCSectionMachO::LAST_KNOWN_SECTION_TYPE+1; ++TypeID)
- if (SectionTypeDescriptors[TypeID].AssemblerName &&
- SectionType == SectionTypeDescriptors[TypeID].AssemblerName)
- break;
+ auto TypeDescriptor = std::find_if(
+ std::begin(SectionTypeDescriptors), std::end(SectionTypeDescriptors),
+ [&](decltype(*SectionTypeDescriptors) &Descriptor) {
+ return Descriptor.AssemblerName &&
+ SectionType == Descriptor.AssemblerName;
+ });
// If we didn't find the section type, reject it.
- if (TypeID > MCSectionMachO::LAST_KNOWN_SECTION_TYPE)
+ if (TypeDescriptor == std::end(SectionTypeDescriptors))
return "mach-o section specifier uses an unknown section type";
// Remember the TypeID.
- TAA = TypeID;
+ TAA = TypeDescriptor - std::begin(SectionTypeDescriptors);
TAAParsed = true;
// If we have no comma after the section type, there are no attributes.
- if (Comma.second.empty()) {
+ if (Attrs.empty()) {
// S_SYMBOL_STUBS always require a symbol stub size specifier.
- if (TAA == MCSectionMachO::S_SYMBOL_STUBS)
+ if (TAA == MachO::S_SYMBOL_STUBS)
return "mach-o section specifier of type 'symbol_stubs' requires a size "
"specifier";
return "";
}
- // Otherwise, we do have some attributes. Split off the size specifier if
- // present.
- Comma = Comma.second.split(',');
- StringRef Attrs = Comma.first;
-
// The attribute list is a '+' separated list of attributes.
- std::pair<StringRef, StringRef> Plus = Attrs.split('+');
-
- while (1) {
- StringRef Attr = Plus.first;
- StripSpaces(Attr);
-
- // Look up the attribute.
- for (unsigned i = 0; ; ++i) {
- if (SectionAttrDescriptors[i].AttrFlag == AttrFlagEnd)
- return "mach-o section specifier has invalid attribute";
-
- if (SectionAttrDescriptors[i].AssemblerName &&
- Attr == SectionAttrDescriptors[i].AssemblerName) {
- TAA |= SectionAttrDescriptors[i].AttrFlag;
- break;
- }
- }
-
- if (Plus.second.empty()) break;
- Plus = Plus.second.split('+');
- };
+ SmallVector<StringRef, 1> SectionAttrs;
+ Attrs.split(SectionAttrs, "+", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+
+ for (StringRef &SectionAttr : SectionAttrs) {
+ auto AttrDescriptorI = std::find_if(
+ std::begin(SectionAttrDescriptors), std::end(SectionAttrDescriptors),
+ [&](decltype(*SectionAttrDescriptors) &Descriptor) {
+ return Descriptor.AssemblerName &&
+ SectionAttr.trim() == Descriptor.AssemblerName;
+ });
+ if (AttrDescriptorI == std::end(SectionAttrDescriptors))
+ return "mach-o section specifier has invalid attribute";
+
+ TAA |= AttrDescriptorI->AttrFlag;
+ }
// Okay, we've parsed the section attributes, see if we have a stub size spec.
- if (Comma.second.empty()) {
+ if (StubSizeStr.empty()) {
// S_SYMBOL_STUBS always require a symbol stub size specifier.
- if (TAA == MCSectionMachO::S_SYMBOL_STUBS)
+ if (TAA == MachO::S_SYMBOL_STUBS)
return "mach-o section specifier of type 'symbol_stubs' requires a size "
"specifier";
return "";
}
// If we have a stub size spec, we must have a sectiontype of S_SYMBOL_STUBS.
- if ((TAA & MCSectionMachO::SECTION_TYPE) != MCSectionMachO::S_SYMBOL_STUBS)
+ if ((TAA & MachO::SECTION_TYPE) != MachO::S_SYMBOL_STUBS)
return "mach-o section specifier cannot have a stub size specified because "
"it does not have type 'symbol_stubs'";
- // Okay, if we do, it must be a number.
- StringRef StubSizeStr = Comma.second;
- StripSpaces(StubSizeStr);
-
// Convert the stub size from a string to an integer.
if (StubSizeStr.getAsInteger(0, StubSize))
return "mach-o section specifier has a malformed stub size";
diff --git a/contrib/llvm/lib/MC/MCStreamer.cpp b/contrib/llvm/lib/MC/MCStreamer.cpp
index 2e1d69b..46e80cc 100644
--- a/contrib/llvm/lib/MC/MCStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCStreamer.cpp
@@ -14,8 +14,10 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCWin64EH.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/raw_ostream.h"
@@ -24,30 +26,32 @@ using namespace llvm;
// Pin the vtables to this file.
MCTargetStreamer::~MCTargetStreamer() {}
-void ARMTargetStreamer::anchor() {}
-MCStreamer::MCStreamer(MCContext &Ctx, MCTargetStreamer *TargetStreamer)
- : Context(Ctx), TargetStreamer(TargetStreamer), EmitEHFrame(true),
- EmitDebugFrame(false), CurrentW64UnwindInfo(0), LastSymbol(0),
- AutoInitSections(false) {
+MCTargetStreamer::MCTargetStreamer(MCStreamer &S) : Streamer(S) {
+ S.setTargetStreamer(this);
+}
+
+void MCTargetStreamer::emitLabel(MCSymbol *Symbol) {}
+
+void MCTargetStreamer::finish() {}
+
+void MCTargetStreamer::emitAssignment(MCSymbol *Symbol, const MCExpr *Value) {}
+
+MCStreamer::MCStreamer(MCContext &Ctx)
+ : Context(Ctx), CurrentWinFrameInfo(nullptr) {
SectionStack.push_back(std::pair<MCSectionSubPair, MCSectionSubPair>());
- if (TargetStreamer)
- TargetStreamer->setStreamer(this);
}
MCStreamer::~MCStreamer() {
- for (unsigned i = 0; i < getNumW64UnwindInfos(); ++i)
- delete W64UnwindInfos[i];
+ for (unsigned i = 0; i < getNumWinFrameInfos(); ++i)
+ delete WinFrameInfos[i];
}
void MCStreamer::reset() {
- for (unsigned i = 0; i < getNumW64UnwindInfos(); ++i)
- delete W64UnwindInfos[i];
- W64UnwindInfos.clear();
- EmitEHFrame = true;
- EmitDebugFrame = false;
- CurrentW64UnwindInfo = 0;
- LastSymbol = 0;
+ for (unsigned i = 0; i < getNumWinFrameInfos(); ++i)
+ delete WinFrameInfos[i];
+ WinFrameInfos.clear();
+ CurrentWinFrameInfo = nullptr;
SectionStack.clear();
SectionStack.push_back(std::pair<MCSectionSubPair, MCSectionSubPair>());
}
@@ -66,8 +70,8 @@ const MCExpr *MCStreamer::BuildSymbolDiff(MCContext &Context,
}
const MCExpr *MCStreamer::ForceExpAbs(const MCExpr* Expr) {
- if (Context.getAsmInfo()->hasAggressiveSymbolFolding() ||
- isa<MCSymbolRefExpr>(Expr))
+ assert(!isa<MCSymbolRefExpr>(Expr));
+ if (Context.getAsmInfo()->hasAggressiveSymbolFolding())
return Expr;
MCSymbol *ABS = Context.CreateTempSymbol();
@@ -80,11 +84,12 @@ raw_ostream &MCStreamer::GetCommentOS() {
return nulls();
}
+void MCStreamer::emitRawComment(const Twine &T, bool TabPrefix) {}
+
void MCStreamer::generateCompactUnwindEncodings(MCAsmBackend *MAB) {
- for (std::vector<MCDwarfFrameInfo>::iterator I = FrameInfos.begin(),
- E = FrameInfos.end(); I != E; ++I)
- I->CompactUnwindEncoding =
- (MAB ? MAB->generateCompactUnwindEncoding(I->Instructions) : 0);
+ for (auto &FI : DwarfFrameInfos)
+ FI.CompactUnwindEncoding =
+ (MAB ? MAB->generateCompactUnwindEncoding(FI.Instructions) : 0);
}
void MCStreamer::EmitDwarfSetLineAddr(int64_t LineDelta,
@@ -138,12 +143,20 @@ void MCStreamer::EmitAbsValue(const MCExpr *Value, unsigned Size) {
}
-void MCStreamer::EmitValue(const MCExpr *Value, unsigned Size) {
- EmitValueImpl(Value, Size);
+void MCStreamer::EmitValue(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) {
+ EmitValueImpl(Value, Size, Loc);
}
-void MCStreamer::EmitSymbolValue(const MCSymbol *Sym, unsigned Size) {
- EmitValueImpl(MCSymbolRefExpr::Create(Sym, getContext()), Size);
+void MCStreamer::EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ bool IsSectionRelative) {
+ assert((!IsSectionRelative || Size == 4) &&
+ "SectionRelative value requires 4-bytes");
+
+ if (!IsSectionRelative)
+ EmitValueImpl(MCSymbolRefExpr::Create(Sym, getContext()), Size);
+ else
+ EmitCOFFSecRel32(Sym);
}
void MCStreamer::EmitGPRel64Value(const MCExpr *Value) {
@@ -167,10 +180,10 @@ void MCStreamer::EmitZeros(uint64_t NumBytes) {
EmitFill(NumBytes, 0);
}
-bool MCStreamer::EmitDwarfFileDirective(unsigned FileNo,
- StringRef Directory,
- StringRef Filename, unsigned CUID) {
- return getContext().GetDwarfFile(Directory, Filename, FileNo, CUID) == 0;
+unsigned MCStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Directory,
+ StringRef Filename, unsigned CUID) {
+ return getContext().GetDwarfFile(Directory, Filename, FileNo, CUID);
}
void MCStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -182,14 +195,24 @@ void MCStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
Discriminator);
}
-MCDwarfFrameInfo *MCStreamer::getCurrentFrameInfo() {
- if (FrameInfos.empty())
- return 0;
- return &FrameInfos.back();
+MCSymbol *MCStreamer::getDwarfLineTableSymbol(unsigned CUID) {
+ MCDwarfLineTable &Table = getContext().getMCDwarfLineTable(CUID);
+ if (!Table.getLabel()) {
+ StringRef Prefix = Context.getAsmInfo()->getPrivateGlobalPrefix();
+ Table.setLabel(
+ Context.GetOrCreateSymbol(Prefix + "line_table_start" + Twine(CUID)));
+ }
+ return Table.getLabel();
+}
+
+MCDwarfFrameInfo *MCStreamer::getCurrentDwarfFrameInfo() {
+ if (DwarfFrameInfos.empty())
+ return nullptr;
+ return &DwarfFrameInfos.back();
}
-void MCStreamer::EnsureValidFrame() {
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+void MCStreamer::EnsureValidDwarfFrame() {
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
if (!CurFrame || CurFrame->End)
report_fatal_error("No open frame");
}
@@ -198,6 +221,10 @@ void MCStreamer::EmitEHSymAttributes(const MCSymbol *Symbol,
MCSymbol *EHSymbol) {
}
+void MCStreamer::InitSections() {
+ SwitchSection(getContext().getObjectFileInfo()->getTextSection());
+}
+
void MCStreamer::AssignSection(MCSymbol *Symbol, const MCSection *Section) {
if (Section)
Symbol->setSection(*Section);
@@ -213,71 +240,51 @@ void MCStreamer::EmitLabel(MCSymbol *Symbol) {
assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
assert(getCurrentSection().first && "Cannot emit before setting section!");
AssignSection(Symbol, getCurrentSection().first);
- LastSymbol = Symbol;
-}
-void MCStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
- assert(getCurrentSection().first && "Cannot emit before setting section!");
- AssignSection(Symbol, getCurrentSection().first);
- LastSymbol = Symbol;
+ MCTargetStreamer *TS = getTargetStreamer();
+ if (TS)
+ TS->emitLabel(Symbol);
}
void MCStreamer::EmitCompactUnwindEncoding(uint32_t CompactUnwindEncoding) {
- EnsureValidFrame();
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ EnsureValidDwarfFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->CompactUnwindEncoding = CompactUnwindEncoding;
}
void MCStreamer::EmitCFISections(bool EH, bool Debug) {
assert(EH || Debug);
- EmitEHFrame = EH;
- EmitDebugFrame = Debug;
}
-void MCStreamer::EmitCFIStartProc() {
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+void MCStreamer::EmitCFIStartProc(bool IsSimple) {
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
if (CurFrame && !CurFrame->End)
report_fatal_error("Starting a frame before finishing the previous one!");
MCDwarfFrameInfo Frame;
+ Frame.IsSimple = IsSimple;
EmitCFIStartProcImpl(Frame);
- FrameInfos.push_back(Frame);
+ DwarfFrameInfos.push_back(Frame);
}
void MCStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
}
-void MCStreamer::RecordProcStart(MCDwarfFrameInfo &Frame) {
- Frame.Function = LastSymbol;
- // If the function is externally visible, we need to create a local
- // symbol to avoid relocations.
- StringRef Prefix = getContext().getAsmInfo()->getPrivateGlobalPrefix();
- if (LastSymbol && LastSymbol->getName().startswith(Prefix)) {
- Frame.Begin = LastSymbol;
- } else {
- Frame.Begin = getContext().CreateTempSymbol();
- EmitLabel(Frame.Begin);
- }
-}
-
void MCStreamer::EmitCFIEndProc() {
- EnsureValidFrame();
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ EnsureValidDwarfFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
EmitCFIEndProcImpl(*CurFrame);
}
void MCStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
-}
-
-void MCStreamer::RecordProcEnd(MCDwarfFrameInfo &Frame) {
- Frame.End = getContext().CreateTempSymbol();
- EmitLabel(Frame.End);
+ // Put a dummy non-null value in Frame.End to mark that this frame has been
+ // closed.
+ Frame.End = (MCSymbol *) 1;
}
MCSymbol *MCStreamer::EmitCFICommon() {
- EnsureValidFrame();
+ EnsureValidDwarfFrame();
MCSymbol *Label = getContext().CreateTempSymbol();
EmitLabel(Label);
return Label;
@@ -287,7 +294,7 @@ void MCStreamer::EmitCFIDefCfa(int64_t Register, int64_t Offset) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createDefCfa(Label, Register, Offset);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -295,7 +302,7 @@ void MCStreamer::EmitCFIDefCfaOffset(int64_t Offset) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createDefCfaOffset(Label, Offset);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -303,7 +310,7 @@ void MCStreamer::EmitCFIAdjustCfaOffset(int64_t Adjustment) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createAdjustCfaOffset(Label, Adjustment);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -311,7 +318,7 @@ void MCStreamer::EmitCFIDefCfaRegister(int64_t Register) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createDefCfaRegister(Label, Register);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -319,7 +326,7 @@ void MCStreamer::EmitCFIOffset(int64_t Register, int64_t Offset) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createOffset(Label, Register, Offset);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -327,21 +334,21 @@ void MCStreamer::EmitCFIRelOffset(int64_t Register, int64_t Offset) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createRelOffset(Label, Register, Offset);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
void MCStreamer::EmitCFIPersonality(const MCSymbol *Sym,
unsigned Encoding) {
- EnsureValidFrame();
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ EnsureValidDwarfFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Personality = Sym;
CurFrame->PersonalityEncoding = Encoding;
}
void MCStreamer::EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) {
- EnsureValidFrame();
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ EnsureValidDwarfFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Lsda = Sym;
CurFrame->LsdaEncoding = Encoding;
}
@@ -349,7 +356,7 @@ void MCStreamer::EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) {
void MCStreamer::EmitCFIRememberState() {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction = MCCFIInstruction::createRememberState(Label);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -357,7 +364,7 @@ void MCStreamer::EmitCFIRestoreState() {
// FIXME: Error if there is no matching cfi_remember_state.
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction = MCCFIInstruction::createRestoreState(Label);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -365,7 +372,7 @@ void MCStreamer::EmitCFISameValue(int64_t Register) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createSameValue(Label, Register);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -373,20 +380,20 @@ void MCStreamer::EmitCFIRestore(int64_t Register) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createRestore(Label, Register);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
void MCStreamer::EmitCFIEscape(StringRef Values) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction = MCCFIInstruction::createEscape(Label, Values);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
void MCStreamer::EmitCFISignalFrame() {
- EnsureValidFrame();
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ EnsureValidDwarfFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->IsSignalFrame = true;
}
@@ -394,7 +401,7 @@ void MCStreamer::EmitCFIUndefined(int64_t Register) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createUndefined(Label, Register);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -402,7 +409,7 @@ void MCStreamer::EmitCFIRegister(int64_t Register1, int64_t Register2) {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createRegister(Label, Register1, Register2);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
@@ -410,164 +417,164 @@ void MCStreamer::EmitCFIWindowSave() {
MCSymbol *Label = EmitCFICommon();
MCCFIInstruction Instruction =
MCCFIInstruction::createWindowSave(Label);
- MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
CurFrame->Instructions.push_back(Instruction);
}
-void MCStreamer::setCurrentW64UnwindInfo(MCWin64EHUnwindInfo *Frame) {
- W64UnwindInfos.push_back(Frame);
- CurrentW64UnwindInfo = W64UnwindInfos.back();
-}
-
-void MCStreamer::EnsureValidW64UnwindInfo() {
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (!CurFrame || CurFrame->End)
+void MCStreamer::EnsureValidWinFrameInfo() {
+ if (!CurrentWinFrameInfo || CurrentWinFrameInfo->End)
report_fatal_error("No open Win64 EH frame function!");
}
-void MCStreamer::EmitWin64EHStartProc(const MCSymbol *Symbol) {
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (CurFrame && !CurFrame->End)
+void MCStreamer::EmitWinCFIStartProc(const MCSymbol *Symbol) {
+ if (CurrentWinFrameInfo && !CurrentWinFrameInfo->End)
report_fatal_error("Starting a function before ending the previous one!");
- MCWin64EHUnwindInfo *Frame = new MCWin64EHUnwindInfo;
+ MCWinFrameInfo *Frame = new MCWinFrameInfo;
Frame->Begin = getContext().CreateTempSymbol();
Frame->Function = Symbol;
EmitLabel(Frame->Begin);
- setCurrentW64UnwindInfo(Frame);
+ WinFrameInfos.push_back(Frame);
+ CurrentWinFrameInfo = WinFrameInfos.back();
}
-void MCStreamer::EmitWin64EHEndProc() {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (CurFrame->ChainedParent)
+void MCStreamer::EmitWinCFIEndProc() {
+ EnsureValidWinFrameInfo();
+ if (CurrentWinFrameInfo->ChainedParent)
report_fatal_error("Not all chained regions terminated!");
- CurFrame->End = getContext().CreateTempSymbol();
- EmitLabel(CurFrame->End);
+ CurrentWinFrameInfo->End = getContext().CreateTempSymbol();
+ EmitLabel(CurrentWinFrameInfo->End);
}
-void MCStreamer::EmitWin64EHStartChained() {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *Frame = new MCWin64EHUnwindInfo;
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
+void MCStreamer::EmitWinCFIStartChained() {
+ EnsureValidWinFrameInfo();
+ MCWinFrameInfo *Frame = new MCWinFrameInfo;
Frame->Begin = getContext().CreateTempSymbol();
- Frame->Function = CurFrame->Function;
- Frame->ChainedParent = CurFrame;
+ Frame->Function = CurrentWinFrameInfo->Function;
+ Frame->ChainedParent = CurrentWinFrameInfo;
EmitLabel(Frame->Begin);
- setCurrentW64UnwindInfo(Frame);
+ WinFrameInfos.push_back(Frame);
+ CurrentWinFrameInfo = WinFrameInfos.back();
}
-void MCStreamer::EmitWin64EHEndChained() {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (!CurFrame->ChainedParent)
+void MCStreamer::EmitWinCFIEndChained() {
+ EnsureValidWinFrameInfo();
+ if (!CurrentWinFrameInfo->ChainedParent)
report_fatal_error("End of a chained region outside a chained region!");
- CurFrame->End = getContext().CreateTempSymbol();
- EmitLabel(CurFrame->End);
- CurrentW64UnwindInfo = CurFrame->ChainedParent;
+ CurrentWinFrameInfo->End = getContext().CreateTempSymbol();
+ EmitLabel(CurrentWinFrameInfo->End);
+ CurrentWinFrameInfo = CurrentWinFrameInfo->ChainedParent;
}
-void MCStreamer::EmitWin64EHHandler(const MCSymbol *Sym, bool Unwind,
- bool Except) {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (CurFrame->ChainedParent)
+void MCStreamer::EmitWinEHHandler(const MCSymbol *Sym, bool Unwind,
+ bool Except) {
+ EnsureValidWinFrameInfo();
+ if (CurrentWinFrameInfo->ChainedParent)
report_fatal_error("Chained unwind areas can't have handlers!");
- CurFrame->ExceptionHandler = Sym;
+ CurrentWinFrameInfo->ExceptionHandler = Sym;
if (!Except && !Unwind)
report_fatal_error("Don't know what kind of handler this is!");
if (Unwind)
- CurFrame->HandlesUnwind = true;
+ CurrentWinFrameInfo->HandlesUnwind = true;
if (Except)
- CurFrame->HandlesExceptions = true;
+ CurrentWinFrameInfo->HandlesExceptions = true;
}
-void MCStreamer::EmitWin64EHHandlerData() {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (CurFrame->ChainedParent)
+void MCStreamer::EmitWinEHHandlerData() {
+ EnsureValidWinFrameInfo();
+ if (CurrentWinFrameInfo->ChainedParent)
report_fatal_error("Chained unwind areas can't have handlers!");
}
-void MCStreamer::EmitWin64EHPushReg(unsigned Register) {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
+void MCStreamer::EmitWinCFIPushReg(unsigned Register) {
+ EnsureValidWinFrameInfo();
+
MCSymbol *Label = getContext().CreateTempSymbol();
- MCWin64EHInstruction Inst(Win64EH::UOP_PushNonVol, Label, Register);
EmitLabel(Label);
- CurFrame->Instructions.push_back(Inst);
+
+ WinEH::Instruction Inst = Win64EH::Instruction::PushNonVol(Label, Register);
+ CurrentWinFrameInfo->Instructions.push_back(Inst);
}
-void MCStreamer::EmitWin64EHSetFrame(unsigned Register, unsigned Offset) {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (CurFrame->LastFrameInst >= 0)
+void MCStreamer::EmitWinCFISetFrame(unsigned Register, unsigned Offset) {
+ EnsureValidWinFrameInfo();
+ if (CurrentWinFrameInfo->LastFrameInst >= 0)
report_fatal_error("Frame register and offset already specified!");
if (Offset & 0x0F)
report_fatal_error("Misaligned frame pointer offset!");
+ if (Offset > 240)
+ report_fatal_error("Frame offset must be less than or equal to 240!");
+
MCSymbol *Label = getContext().CreateTempSymbol();
- MCWin64EHInstruction Inst(Win64EH::UOP_SetFPReg, Label, Register, Offset);
EmitLabel(Label);
- CurFrame->LastFrameInst = CurFrame->Instructions.size();
- CurFrame->Instructions.push_back(Inst);
+
+ WinEH::Instruction Inst =
+ Win64EH::Instruction::SetFPReg(Label, Register, Offset);
+ CurrentWinFrameInfo->LastFrameInst = CurrentWinFrameInfo->Instructions.size();
+ CurrentWinFrameInfo->Instructions.push_back(Inst);
}
-void MCStreamer::EmitWin64EHAllocStack(unsigned Size) {
- EnsureValidW64UnwindInfo();
+void MCStreamer::EmitWinCFIAllocStack(unsigned Size) {
+ EnsureValidWinFrameInfo();
+ if (Size == 0)
+ report_fatal_error("Allocation size must be non-zero!");
if (Size & 7)
report_fatal_error("Misaligned stack allocation!");
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
+
MCSymbol *Label = getContext().CreateTempSymbol();
- MCWin64EHInstruction Inst(Label, Size);
EmitLabel(Label);
- CurFrame->Instructions.push_back(Inst);
+
+ WinEH::Instruction Inst = Win64EH::Instruction::Alloc(Label, Size);
+ CurrentWinFrameInfo->Instructions.push_back(Inst);
}
-void MCStreamer::EmitWin64EHSaveReg(unsigned Register, unsigned Offset) {
- EnsureValidW64UnwindInfo();
+void MCStreamer::EmitWinCFISaveReg(unsigned Register, unsigned Offset) {
+ EnsureValidWinFrameInfo();
if (Offset & 7)
report_fatal_error("Misaligned saved register offset!");
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
+
MCSymbol *Label = getContext().CreateTempSymbol();
- MCWin64EHInstruction Inst(
- Offset > 512*1024-8 ? Win64EH::UOP_SaveNonVolBig : Win64EH::UOP_SaveNonVol,
- Label, Register, Offset);
EmitLabel(Label);
- CurFrame->Instructions.push_back(Inst);
+
+ WinEH::Instruction Inst =
+ Win64EH::Instruction::SaveNonVol(Label, Register, Offset);
+ CurrentWinFrameInfo->Instructions.push_back(Inst);
}
-void MCStreamer::EmitWin64EHSaveXMM(unsigned Register, unsigned Offset) {
- EnsureValidW64UnwindInfo();
+void MCStreamer::EmitWinCFISaveXMM(unsigned Register, unsigned Offset) {
+ EnsureValidWinFrameInfo();
if (Offset & 0x0F)
report_fatal_error("Misaligned saved vector register offset!");
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
+
MCSymbol *Label = getContext().CreateTempSymbol();
- MCWin64EHInstruction Inst(
- Offset > 512*1024-16 ? Win64EH::UOP_SaveXMM128Big : Win64EH::UOP_SaveXMM128,
- Label, Register, Offset);
EmitLabel(Label);
- CurFrame->Instructions.push_back(Inst);
+
+ WinEH::Instruction Inst =
+ Win64EH::Instruction::SaveXMM(Label, Register, Offset);
+ CurrentWinFrameInfo->Instructions.push_back(Inst);
}
-void MCStreamer::EmitWin64EHPushFrame(bool Code) {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- if (CurFrame->Instructions.size() > 0)
+void MCStreamer::EmitWinCFIPushFrame(bool Code) {
+ EnsureValidWinFrameInfo();
+ if (CurrentWinFrameInfo->Instructions.size() > 0)
report_fatal_error("If present, PushMachFrame must be the first UOP");
+
MCSymbol *Label = getContext().CreateTempSymbol();
- MCWin64EHInstruction Inst(Win64EH::UOP_PushMachFrame, Label, Code);
EmitLabel(Label);
- CurFrame->Instructions.push_back(Inst);
+
+ WinEH::Instruction Inst = Win64EH::Instruction::PushMachFrame(Label, Code);
+ CurrentWinFrameInfo->Instructions.push_back(Inst);
+}
+
+void MCStreamer::EmitWinCFIEndProlog() {
+ EnsureValidWinFrameInfo();
+ CurrentWinFrameInfo->PrologEnd = getContext().CreateTempSymbol();
+ EmitLabel(CurrentWinFrameInfo->PrologEnd);
}
-void MCStreamer::EmitWin64EHEndProlog() {
- EnsureValidW64UnwindInfo();
- MCWin64EHUnwindInfo *CurFrame = CurrentW64UnwindInfo;
- CurFrame->PrologEnd = getContext().CreateTempSymbol();
- EmitLabel(CurFrame->PrologEnd);
+void MCStreamer::EmitCOFFSectionIndex(MCSymbol const *Symbol) {
}
void MCStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol) {
- llvm_unreachable("This file format doesn't support this directive");
}
/// EmitRawText - If this file is backed by an assembly streamer, this dumps
@@ -584,32 +591,101 @@ void MCStreamer::EmitRawText(const Twine &T) {
EmitRawTextImpl(T.toStringRef(Str));
}
-void MCStreamer::EmitFrames(MCAsmBackend *MAB, bool usingCFI) {
- if (!getNumFrameInfos())
- return;
-
- if (EmitEHFrame)
- MCDwarfFrameEmitter::Emit(*this, MAB, usingCFI, true);
-
- if (EmitDebugFrame)
- MCDwarfFrameEmitter::Emit(*this, MAB, usingCFI, false);
-}
-
-void MCStreamer::EmitW64Tables() {
- if (!getNumW64UnwindInfos())
+void MCStreamer::EmitWindowsUnwindTables() {
+ if (!getNumWinFrameInfos())
return;
MCWin64EHUnwindEmitter::Emit(*this);
}
void MCStreamer::Finish() {
- if (!FrameInfos.empty() && !FrameInfos.back().End)
+ if (!DwarfFrameInfos.empty() && !DwarfFrameInfos.back().End)
report_fatal_error("Unfinished frame!");
+ MCTargetStreamer *TS = getTargetStreamer();
+ if (TS)
+ TS->finish();
+
FinishImpl();
}
-MCSymbolData &MCStreamer::getOrCreateSymbolData(MCSymbol *Symbol) {
- report_fatal_error("Not supported!");
- return *(static_cast<MCSymbolData*>(0));
+void MCStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ visitUsedExpr(*Value);
+ Symbol->setVariableValue(Value);
+
+ MCTargetStreamer *TS = getTargetStreamer();
+ if (TS)
+ TS->emitAssignment(Symbol, Value);
+}
+
+void MCStreamer::visitUsedSymbol(const MCSymbol &Sym) {
+}
+
+void MCStreamer::visitUsedExpr(const MCExpr &Expr) {
+ switch (Expr.getKind()) {
+ case MCExpr::Target:
+ cast<MCTargetExpr>(Expr).visitUsedExpr(*this);
+ break;
+
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr &BE = cast<MCBinaryExpr>(Expr);
+ visitUsedExpr(*BE.getLHS());
+ visitUsedExpr(*BE.getRHS());
+ break;
+ }
+
+ case MCExpr::SymbolRef:
+ visitUsedSymbol(cast<MCSymbolRefExpr>(Expr).getSymbol());
+ break;
+
+ case MCExpr::Unary:
+ visitUsedExpr(*cast<MCUnaryExpr>(Expr).getSubExpr());
+ break;
+ }
}
+
+void MCStreamer::EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
+ // Scan for values.
+ for (unsigned i = Inst.getNumOperands(); i--;)
+ if (Inst.getOperand(i).isExpr())
+ visitUsedExpr(*Inst.getOperand(i).getExpr());
+}
+
+void MCStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {}
+void MCStreamer::EmitThumbFunc(MCSymbol *Func) {}
+void MCStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
+void MCStreamer::BeginCOFFSymbolDef(const MCSymbol *Symbol) {}
+void MCStreamer::EndCOFFSymbolDef() {}
+void MCStreamer::EmitFileDirective(StringRef Filename) {}
+void MCStreamer::EmitCOFFSymbolStorageClass(int StorageClass) {}
+void MCStreamer::EmitCOFFSymbolType(int Type) {}
+void MCStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
+void MCStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {}
+void MCStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {}
+void MCStreamer::ChangeSection(const MCSection *, const MCExpr *) {}
+void MCStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {}
+void MCStreamer::EmitBytes(StringRef Data) {}
+void MCStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) {
+ visitUsedExpr(*Value);
+}
+void MCStreamer::EmitULEB128Value(const MCExpr *Value) {}
+void MCStreamer::EmitSLEB128Value(const MCExpr *Value) {}
+void MCStreamer::EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
+ unsigned ValueSize,
+ unsigned MaxBytesToEmit) {}
+void MCStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit) {}
+bool MCStreamer::EmitValueToOffset(const MCExpr *Offset, unsigned char Value) {
+ return false;
+}
+void MCStreamer::EmitBundleAlignMode(unsigned AlignPow2) {}
+void MCStreamer::EmitBundleLock(bool AlignToEnd) {}
+void MCStreamer::FinishImpl() {}
+void MCStreamer::EmitBundleUnlock() {}
diff --git a/contrib/llvm/lib/MC/MCSubtargetInfo.cpp b/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
index 8d8e290..4424c91 100644
--- a/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
+++ b/contrib/llvm/lib/MC/MCSubtargetInfo.cpp
@@ -24,9 +24,7 @@ MCSchedModel MCSchedModel::DefaultSchedModel; // For unknown processors.
void
MCSubtargetInfo::InitMCProcessorInfo(StringRef CPU, StringRef FS) {
SubtargetFeatures Features(FS);
- FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
- ProcFeatures, NumFeatures);
-
+ FeatureBits = Features.getFeatureBits(CPU, ProcDesc, ProcFeatures);
InitCPUSchedModel(CPU);
}
@@ -40,16 +38,15 @@ MCSubtargetInfo::InitCPUSchedModel(StringRef CPU) {
void
MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
- const SubtargetFeatureKV *PF,
- const SubtargetFeatureKV *PD,
+ ArrayRef<SubtargetFeatureKV> PF,
+ ArrayRef<SubtargetFeatureKV> PD,
const SubtargetInfoKV *ProcSched,
const MCWriteProcResEntry *WPR,
const MCWriteLatencyEntry *WL,
const MCReadAdvanceEntry *RA,
const InstrStage *IS,
const unsigned *OC,
- const unsigned *FP,
- unsigned NF, unsigned NP) {
+ const unsigned *FP) {
TargetTriple = TT;
ProcFeatures = PF;
ProcDesc = PD;
@@ -61,8 +58,6 @@ MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
Stages = IS;
OperandCycles = OC;
ForwardingPaths = FP;
- NumFeatures = NF;
- NumProcs = NP;
InitMCProcessorInfo(CPU, FS);
}
@@ -78,8 +73,7 @@ uint64_t MCSubtargetInfo::ToggleFeature(uint64_t FB) {
/// bits. This version will also change all implied bits.
uint64_t MCSubtargetInfo::ToggleFeature(StringRef FS) {
SubtargetFeatures Features;
- FeatureBits = Features.ToggleFeature(FeatureBits, FS,
- ProcFeatures, NumFeatures);
+ FeatureBits = Features.ToggleFeature(FeatureBits, FS, ProcFeatures);
return FeatureBits;
}
@@ -88,6 +82,7 @@ const MCSchedModel *
MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const {
assert(ProcSchedModels && "Processor machine model not available!");
+ unsigned NumProcs = ProcDesc.size();
#ifndef NDEBUG
for (size_t i = 1; i < NumProcs; i++) {
assert(strcmp(ProcSchedModels[i - 1].Key, ProcSchedModels[i].Key) < 0 &&
diff --git a/contrib/llvm/lib/MC/MCSymbolizer.cpp b/contrib/llvm/lib/MC/MCSymbolizer.cpp
index 1020b74..4080e40 100644
--- a/contrib/llvm/lib/MC/MCSymbolizer.cpp
+++ b/contrib/llvm/lib/MC/MCSymbolizer.cpp
@@ -8,13 +8,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCSymbolizer.h"
-#include "llvm/MC/MCRelocationInfo.h"
using namespace llvm;
-MCSymbolizer::MCSymbolizer(MCContext &Ctx, OwningPtr<MCRelocationInfo> &RelInfo)
- : Ctx(Ctx), RelInfo(RelInfo.take()) {
-}
-
MCSymbolizer::~MCSymbolizer() {
}
diff --git a/contrib/llvm/lib/MC/MCTargetOptions.cpp b/contrib/llvm/lib/MC/MCTargetOptions.cpp
new file mode 100644
index 0000000..efd724a
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCTargetOptions.cpp
@@ -0,0 +1,20 @@
+//===- lib/MC/MCTargetOptions.cpp - MC Target Options --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCTargetOptions.h"
+
+namespace llvm {
+
+MCTargetOptions::MCTargetOptions()
+ : SanitizeAddress(false), MCRelaxAll(false), MCNoExecStack(false),
+ MCSaveTempLabels(false), MCUseDwarfDirectory(false),
+ ShowMCEncoding(false), ShowMCInst(false), AsmVerbose(false),
+ DwarfVersion(0) {}
+
+} // end namespace llvm
diff --git a/contrib/llvm/lib/MC/MCValue.cpp b/contrib/llvm/lib/MC/MCValue.cpp
index 4393777..9dfc56e 100644
--- a/contrib/llvm/lib/MC/MCValue.cpp
+++ b/contrib/llvm/lib/MC/MCValue.cpp
@@ -10,6 +10,7 @@
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -20,6 +21,11 @@ void MCValue::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
return;
}
+ // FIXME: prints as a number, which isn't ideal. But the meaning will be
+ // target-specific anyway.
+ if (getRefKind())
+ OS << ':' << getRefKind() << ':';
+
getSymA()->print(OS);
if (getSymB()) {
@@ -33,6 +39,23 @@ void MCValue::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MCValue::dump() const {
- print(dbgs(), 0);
+ print(dbgs(), nullptr);
}
#endif
+
+MCSymbolRefExpr::VariantKind MCValue::getAccessVariant() const {
+ const MCSymbolRefExpr *B = getSymB();
+ if (B) {
+ if (B->getKind() != MCSymbolRefExpr::VK_None)
+ llvm_unreachable("unsupported");
+ }
+
+ const MCSymbolRefExpr *A = getSymA();
+ if (!A)
+ return MCSymbolRefExpr::VK_None;
+
+ MCSymbolRefExpr::VariantKind Kind = A->getKind();
+ if (Kind == MCSymbolRefExpr::VK_WEAKREF)
+ return MCSymbolRefExpr::VK_None;
+ return Kind;
+}
diff --git a/contrib/llvm/lib/MC/MCWin64EH.cpp b/contrib/llvm/lib/MC/MCWin64EH.cpp
index b8b07d3..95e1983 100644
--- a/contrib/llvm/lib/MC/MCWin64EH.cpp
+++ b/contrib/llvm/lib/MC/MCWin64EH.cpp
@@ -15,114 +15,108 @@
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Win64EH.h"
namespace llvm {
// NOTE: All relocations generated here are 4-byte image-relative.
-static uint8_t CountOfUnwindCodes(std::vector<MCWin64EHInstruction> &instArray){
- uint8_t count = 0;
- for (std::vector<MCWin64EHInstruction>::const_iterator I = instArray.begin(),
- E = instArray.end(); I != E; ++I) {
- switch (I->getOperation()) {
+static uint8_t CountOfUnwindCodes(std::vector<WinEH::Instruction> &Insns) {
+ uint8_t Count = 0;
+ for (const auto &I : Insns) {
+ switch (static_cast<Win64EH::UnwindOpcodes>(I.Operation)) {
case Win64EH::UOP_PushNonVol:
case Win64EH::UOP_AllocSmall:
case Win64EH::UOP_SetFPReg:
case Win64EH::UOP_PushMachFrame:
- count += 1;
+ Count += 1;
break;
case Win64EH::UOP_SaveNonVol:
case Win64EH::UOP_SaveXMM128:
- count += 2;
+ Count += 2;
break;
case Win64EH::UOP_SaveNonVolBig:
case Win64EH::UOP_SaveXMM128Big:
- count += 3;
+ Count += 3;
break;
case Win64EH::UOP_AllocLarge:
- if (I->getSize() > 512*1024-8)
- count += 3;
- else
- count += 2;
+ Count += (I.Offset > 512 * 1024 - 8) ? 3 : 2;
break;
}
}
- return count;
+ return Count;
}
-static void EmitAbsDifference(MCStreamer &streamer, MCSymbol *lhs,
- MCSymbol *rhs) {
- MCContext &context = streamer.getContext();
- const MCExpr *diff = MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(
- lhs, context),
- MCSymbolRefExpr::Create(
- rhs, context),
- context);
- streamer.EmitAbsValue(diff, 1);
-
+static void EmitAbsDifference(MCStreamer &Streamer, const MCSymbol *LHS,
+ const MCSymbol *RHS) {
+ MCContext &Context = Streamer.getContext();
+ const MCExpr *Diff =
+ MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(LHS, Context),
+ MCSymbolRefExpr::Create(RHS, Context), Context);
+ Streamer.EmitAbsValue(Diff, 1);
}
static void EmitUnwindCode(MCStreamer &streamer, MCSymbol *begin,
- MCWin64EHInstruction &inst) {
+ WinEH::Instruction &inst) {
uint8_t b2;
uint16_t w;
- b2 = (inst.getOperation() & 0x0F);
- switch (inst.getOperation()) {
+ b2 = (inst.Operation & 0x0F);
+ switch (static_cast<Win64EH::UnwindOpcodes>(inst.Operation)) {
case Win64EH::UOP_PushNonVol:
- EmitAbsDifference(streamer, inst.getLabel(), begin);
- b2 |= (inst.getRegister() & 0x0F) << 4;
+ EmitAbsDifference(streamer, inst.Label, begin);
+ b2 |= (inst.Register & 0x0F) << 4;
streamer.EmitIntValue(b2, 1);
break;
case Win64EH::UOP_AllocLarge:
- EmitAbsDifference(streamer, inst.getLabel(), begin);
- if (inst.getSize() > 512*1024-8) {
+ EmitAbsDifference(streamer, inst.Label, begin);
+ if (inst.Offset > 512 * 1024 - 8) {
b2 |= 0x10;
streamer.EmitIntValue(b2, 1);
- w = inst.getSize() & 0xFFF8;
+ w = inst.Offset & 0xFFF8;
streamer.EmitIntValue(w, 2);
- w = inst.getSize() >> 16;
+ w = inst.Offset >> 16;
} else {
streamer.EmitIntValue(b2, 1);
- w = inst.getSize() >> 3;
+ w = inst.Offset >> 3;
}
streamer.EmitIntValue(w, 2);
break;
case Win64EH::UOP_AllocSmall:
- b2 |= (((inst.getSize()-8) >> 3) & 0x0F) << 4;
- EmitAbsDifference(streamer, inst.getLabel(), begin);
+ b2 |= (((inst.Offset - 8) >> 3) & 0x0F) << 4;
+ EmitAbsDifference(streamer, inst.Label, begin);
streamer.EmitIntValue(b2, 1);
break;
case Win64EH::UOP_SetFPReg:
- EmitAbsDifference(streamer, inst.getLabel(), begin);
+ EmitAbsDifference(streamer, inst.Label, begin);
streamer.EmitIntValue(b2, 1);
break;
case Win64EH::UOP_SaveNonVol:
case Win64EH::UOP_SaveXMM128:
- b2 |= (inst.getRegister() & 0x0F) << 4;
- EmitAbsDifference(streamer, inst.getLabel(), begin);
+ b2 |= (inst.Register & 0x0F) << 4;
+ EmitAbsDifference(streamer, inst.Label, begin);
streamer.EmitIntValue(b2, 1);
- w = inst.getOffset() >> 3;
- if (inst.getOperation() == Win64EH::UOP_SaveXMM128)
+ w = inst.Offset >> 3;
+ if (inst.Operation == Win64EH::UOP_SaveXMM128)
w >>= 1;
streamer.EmitIntValue(w, 2);
break;
case Win64EH::UOP_SaveNonVolBig:
case Win64EH::UOP_SaveXMM128Big:
- b2 |= (inst.getRegister() & 0x0F) << 4;
- EmitAbsDifference(streamer, inst.getLabel(), begin);
+ b2 |= (inst.Register & 0x0F) << 4;
+ EmitAbsDifference(streamer, inst.Label, begin);
streamer.EmitIntValue(b2, 1);
- if (inst.getOperation() == Win64EH::UOP_SaveXMM128Big)
- w = inst.getOffset() & 0xFFF0;
+ if (inst.Operation == Win64EH::UOP_SaveXMM128Big)
+ w = inst.Offset & 0xFFF0;
else
- w = inst.getOffset() & 0xFFF8;
+ w = inst.Offset & 0xFFF8;
streamer.EmitIntValue(w, 2);
- w = inst.getOffset() >> 16;
+ w = inst.Offset >> 16;
streamer.EmitIntValue(w, 2);
break;
case Win64EH::UOP_PushMachFrame:
- if (inst.isPushCodeFrame())
+ if (inst.Offset == 1)
b2 |= 0x10;
- EmitAbsDifference(streamer, inst.getLabel(), begin);
+ EmitAbsDifference(streamer, inst.Label, begin);
streamer.EmitIntValue(b2, 1);
break;
}
@@ -142,7 +136,7 @@ static void EmitSymbolRefWithOfs(MCStreamer &streamer,
}
static void EmitRuntimeFunction(MCStreamer &streamer,
- const MCWin64EHUnwindInfo *info) {
+ const MCWinFrameInfo *info) {
MCContext &context = streamer.getContext();
streamer.EmitValueToAlignment(4);
@@ -153,7 +147,7 @@ static void EmitRuntimeFunction(MCStreamer &streamer,
context), 4);
}
-static void EmitUnwindInfo(MCStreamer &streamer, MCWin64EHUnwindInfo *info) {
+static void EmitUnwindInfo(MCStreamer &streamer, MCWinFrameInfo *info) {
// If this UNWIND_INFO already has a symbol, it's already been emitted.
if (info->Symbol) return;
@@ -184,17 +178,16 @@ static void EmitUnwindInfo(MCStreamer &streamer, MCWin64EHUnwindInfo *info) {
uint8_t frame = 0;
if (info->LastFrameInst >= 0) {
- MCWin64EHInstruction &frameInst = info->Instructions[info->LastFrameInst];
- assert(frameInst.getOperation() == Win64EH::UOP_SetFPReg);
- frame = (frameInst.getRegister() & 0x0F) |
- (frameInst.getOffset() & 0xF0);
+ WinEH::Instruction &frameInst = info->Instructions[info->LastFrameInst];
+ assert(frameInst.Operation == Win64EH::UOP_SetFPReg);
+ frame = (frameInst.Register & 0x0F) | (frameInst.Offset & 0xF0);
}
streamer.EmitIntValue(frame, 1);
// Emit unwind instructions (in reverse order).
uint8_t numInst = info->Instructions.size();
for (uint8_t c = 0; c < numInst; ++c) {
- MCWin64EHInstruction inst = info->Instructions.back();
+ WinEH::Instruction inst = info->Instructions.back();
info->Instructions.pop_back();
EmitUnwindCode(streamer, info->Begin, inst);
}
@@ -263,7 +256,7 @@ static const MCSection *getWin64EHFuncTableSection(StringRef suffix,
}
void MCWin64EHUnwindEmitter::EmitUnwindInfo(MCStreamer &streamer,
- MCWin64EHUnwindInfo *info) {
+ MCWinFrameInfo *info) {
// Switch sections (the static function above is meant to be called from
// here and from Emit().
MCContext &context = streamer.getContext();
@@ -274,23 +267,23 @@ void MCWin64EHUnwindEmitter::EmitUnwindInfo(MCStreamer &streamer,
llvm::EmitUnwindInfo(streamer, info);
}
-void MCWin64EHUnwindEmitter::Emit(MCStreamer &streamer) {
- MCContext &context = streamer.getContext();
+void MCWin64EHUnwindEmitter::Emit(MCStreamer &Streamer) {
+ MCContext &Context = Streamer.getContext();
+
// Emit the unwind info structs first.
- for (unsigned i = 0; i < streamer.getNumW64UnwindInfos(); ++i) {
- MCWin64EHUnwindInfo &info = streamer.getW64UnwindInfo(i);
- const MCSection *xdataSect =
- getWin64EHTableSection(GetSectionSuffix(info.Function), context);
- streamer.SwitchSection(xdataSect);
- llvm::EmitUnwindInfo(streamer, &info);
+ for (const auto &CFI : Streamer.getWinFrameInfos()) {
+ const MCSection *XData =
+ getWin64EHTableSection(GetSectionSuffix(CFI->Function), Context);
+ Streamer.SwitchSection(XData);
+ EmitUnwindInfo(Streamer, CFI);
}
+
// Now emit RUNTIME_FUNCTION entries.
- for (unsigned i = 0; i < streamer.getNumW64UnwindInfos(); ++i) {
- MCWin64EHUnwindInfo &info = streamer.getW64UnwindInfo(i);
- const MCSection *pdataSect =
- getWin64EHFuncTableSection(GetSectionSuffix(info.Function), context);
- streamer.SwitchSection(pdataSect);
- EmitRuntimeFunction(streamer, &info);
+ for (const auto &CFI : Streamer.getWinFrameInfos()) {
+ const MCSection *PData =
+ getWin64EHFuncTableSection(GetSectionSuffix(CFI->Function), Context);
+ Streamer.SwitchSection(PData);
+ EmitRuntimeFunction(Streamer, CFI);
}
}
diff --git a/contrib/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm/lib/MC/MachObjectWriter.cpp
index 8234aff..5214398 100644
--- a/contrib/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MachObjectWriter.cpp
@@ -26,6 +26,8 @@
#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "mc"
+
void MachObjectWriter::reset() {
Relocations.clear();
IndirectSymBase.clear();
@@ -82,7 +84,7 @@ uint64_t MachObjectWriter::getSymbolAddress(const MCSymbolData* SD,
MCValue Target;
- if (!S.getVariableValue()->EvaluateAsRelocatable(Target, Layout))
+ if (!S.getVariableValue()->EvaluateAsRelocatable(Target, &Layout))
report_fatal_error("unable to evaluate offset for variable '" +
S.getName() + "'");
@@ -229,7 +231,7 @@ void MachObjectWriter::WriteSection(const MCAssembler &Asm,
unsigned Flags = Section.getTypeAndAttributes();
if (SD.hasInstructions())
- Flags |= MCSectionMachO::S_ATTR_SOME_INSTRUCTIONS;
+ Flags |= MachO::S_ATTR_SOME_INSTRUCTIONS;
assert(isPowerOf2_32(SD.getAlignment()) && "Invalid alignment!");
Write32(Log2_32(SD.getAlignment()));
@@ -301,20 +303,50 @@ void MachObjectWriter::WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
assert(OS.tell() - Start == sizeof(MachO::dysymtab_command));
}
+MachObjectWriter::MachSymbolData *
+MachObjectWriter::findSymbolData(const MCSymbol &Sym) {
+ for (auto &Entry : LocalSymbolData)
+ if (&Entry.SymbolData->getSymbol() == &Sym)
+ return &Entry;
+
+ for (auto &Entry : ExternalSymbolData)
+ if (&Entry.SymbolData->getSymbol() == &Sym)
+ return &Entry;
+
+ for (auto &Entry : UndefinedSymbolData)
+ if (&Entry.SymbolData->getSymbol() == &Sym)
+ return &Entry;
+
+ return nullptr;
+}
+
void MachObjectWriter::WriteNlist(MachSymbolData &MSD,
const MCAsmLayout &Layout) {
MCSymbolData &Data = *MSD.SymbolData;
- const MCSymbol &Symbol = Data.getSymbol();
+ const MCSymbol *Symbol = &Data.getSymbol();
+ const MCSymbol *AliasedSymbol = &Symbol->AliasedSymbol();
+ uint8_t SectionIndex = MSD.SectionIndex;
uint8_t Type = 0;
uint16_t Flags = Data.getFlags();
uint64_t Address = 0;
+ bool IsAlias = Symbol != AliasedSymbol;
+
+ MachSymbolData *AliaseeInfo;
+ if (IsAlias) {
+ AliaseeInfo = findSymbolData(*AliasedSymbol);
+ if (AliaseeInfo)
+ SectionIndex = AliaseeInfo->SectionIndex;
+ Symbol = AliasedSymbol;
+ }
// Set the N_TYPE bits. See <mach-o/nlist.h>.
//
// FIXME: Are the prebound or indirect fields possible here?
- if (Symbol.isUndefined())
+ if (IsAlias && Symbol->isUndefined())
+ Type = MachO::N_INDR;
+ else if (Symbol->isUndefined())
Type = MachO::N_UNDF;
- else if (Symbol.isAbsolute())
+ else if (Symbol->isAbsolute())
Type = MachO::N_ABS;
else
Type = MachO::N_SECT;
@@ -325,13 +357,15 @@ void MachObjectWriter::WriteNlist(MachSymbolData &MSD,
Type |= MachO::N_PEXT;
// Set external bit.
- if (Data.isExternal() || Symbol.isUndefined())
+ if (Data.isExternal() || (!IsAlias && Symbol->isUndefined()))
Type |= MachO::N_EXT;
// Compute the symbol address.
- if (Symbol.isDefined()) {
+ if (IsAlias && Symbol->isUndefined())
+ Address = AliaseeInfo->StringIndex;
+ else if (Symbol->isDefined())
Address = getSymbolAddress(&Data, Layout);
- } else if (Data.isCommon()) {
+ else if (Data.isCommon()) {
// Common symbols are encoded with the size in the address
// field, and their alignment in the flags.
Address = Data.getCommonSize();
@@ -342,18 +376,21 @@ void MachObjectWriter::WriteNlist(MachSymbolData &MSD,
assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
if (Log2Size > 15)
report_fatal_error("invalid 'common' alignment '" +
- Twine(Align) + "' for '" + Symbol.getName() + "'",
+ Twine(Align) + "' for '" + Symbol->getName() + "'",
false);
// FIXME: Keep this mask with the SymbolFlags enumeration.
Flags = (Flags & 0xF0FF) | (Log2Size << 8);
}
}
+ if (Layout.getAssembler().isThumbFunc(Symbol))
+ Flags |= SF_ThumbFunc;
+
// struct nlist (12 bytes)
Write32(MSD.StringIndex);
Write8(Type);
- Write8(MSD.SectionIndex);
+ Write8(SectionIndex);
// The Mach-O streamer uses the lowest 16-bits of the flags for the 'desc'
// value.
@@ -417,6 +454,7 @@ void MachObjectWriter::RecordRelocation(const MCAssembler &Asm,
const MCFragment *Fragment,
const MCFixup &Fixup,
MCValue Target,
+ bool &IsPCRel,
uint64_t &FixedValue) {
TargetObjectWriter->RecordRelocation(this, Asm, Layout, Fragment, Fixup,
Target, FixedValue);
@@ -437,23 +475,23 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) {
const MCSectionMachO &Section =
cast<MCSectionMachO>(it->SectionData->getSection());
- if (Section.getType() != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS &&
- Section.getType() != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
- Section.getType() != MCSectionMachO::S_SYMBOL_STUBS) {
+ if (Section.getType() != MachO::S_NON_LAZY_SYMBOL_POINTERS &&
+ Section.getType() != MachO::S_LAZY_SYMBOL_POINTERS &&
+ Section.getType() != MachO::S_SYMBOL_STUBS) {
MCSymbol &Symbol = *it->Symbol;
report_fatal_error("indirect symbol '" + Symbol.getName() +
"' not in a symbol pointer or stub section");
}
}
- // Bind non lazy symbol pointers first.
+ // Bind non-lazy symbol pointers first.
unsigned IndirectIndex = 0;
for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
const MCSectionMachO &Section =
cast<MCSectionMachO>(it->SectionData->getSection());
- if (Section.getType() != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS)
+ if (Section.getType() != MachO::S_NON_LAZY_SYMBOL_POINTERS)
continue;
// Initialize the section indirect symbol base, if necessary.
@@ -469,8 +507,8 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) {
const MCSectionMachO &Section =
cast<MCSectionMachO>(it->SectionData->getSection());
- if (Section.getType() != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
- Section.getType() != MCSectionMachO::S_SYMBOL_STUBS)
+ if (Section.getType() != MachO::S_LAZY_SYMBOL_POINTERS &&
+ Section.getType() != MachO::S_SYMBOL_STUBS)
continue;
// Initialize the section indirect symbol base, if necessary.
@@ -515,15 +553,14 @@ ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
// table, then sort the symbols is chosen to match 'as'. Even though it
// doesn't matter for correctness, this is important for letting us diff .o
// files.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Symbol = it->getSymbol();
+ for (MCSymbolData &SD : Asm.symbols()) {
+ const MCSymbol &Symbol = SD.getSymbol();
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
+ if (!Asm.isSymbolLinkerVisible(SD.getSymbol()))
continue;
- if (!it->isExternal() && !Symbol.isUndefined())
+ if (!SD.isExternal() && !Symbol.isUndefined())
continue;
uint64_t &Entry = StringIndexMap[Symbol.getName()];
@@ -534,7 +571,7 @@ ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
}
MachSymbolData MSD;
- MSD.SymbolData = it;
+ MSD.SymbolData = &SD;
MSD.StringIndex = Entry;
if (Symbol.isUndefined()) {
@@ -551,15 +588,14 @@ ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
}
// Now add the data for local symbols.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Symbol = it->getSymbol();
+ for (MCSymbolData &SD : Asm.symbols()) {
+ const MCSymbol &Symbol = SD.getSymbol();
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
+ if (!Asm.isSymbolLinkerVisible(SD.getSymbol()))
continue;
- if (it->isExternal() || Symbol.isUndefined())
+ if (SD.isExternal() || Symbol.isUndefined())
continue;
uint64_t &Entry = StringIndexMap[Symbol.getName()];
@@ -570,7 +606,7 @@ ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
}
MachSymbolData MSD;
- MSD.SymbolData = it;
+ MSD.SymbolData = &SD;
MSD.StringIndex = Entry;
if (Symbol.isAbsolute()) {
@@ -620,10 +656,7 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm,
void MachObjectWriter::markAbsoluteVariableSymbols(MCAssembler &Asm,
const MCAsmLayout &Layout) {
- for (MCAssembler::symbol_iterator i = Asm.symbol_begin(),
- e = Asm.symbol_end();
- i != e; ++i) {
- MCSymbolData &SD = *i;
+ for (MCSymbolData &SD : Asm.symbols()) {
if (!SD.getSymbol().isVariable())
continue;
@@ -631,7 +664,7 @@ void MachObjectWriter::markAbsoluteVariableSymbols(MCAssembler &Asm,
// and neither symbol is external, mark the variable as absolute.
const MCExpr *Expr = SD.getSymbol().getVariableValue();
MCValue Value;
- if (Expr->EvaluateAsRelocatable(Value, Layout)) {
+ if (Expr->EvaluateAsRelocatable(Value, &Layout)) {
if (Value.getSymA() && Value.getSymB())
const_cast<MCSymbol*>(&SD.getSymbol())->setAbsolute();
}
@@ -668,7 +701,7 @@ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
// - addr(atom(B)) - offset(B)
// and the offsets are not relocatable, so the fixup is fully resolved when
// addr(atom(A)) - addr(atom(B)) == 0.
- const MCSymbolData *A_Base = 0, *B_Base = 0;
+ const MCSymbolData *A_Base = nullptr, *B_Base = nullptr;
const MCSymbol &SA = DataA.getSymbol().AliasedSymbol();
const MCSection &SecA = SA.getSection();
@@ -688,7 +721,8 @@ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
// same assumptions about any symbol that we normally make about
// assembler locals.
- if (!Asm.getBackend().hasReliableSymbolDifference()) {
+ bool hasReliableSymbolDifference = isX86_64();
+ if (!hasReliableSymbolDifference) {
if (!SA.isInSection() || &SecA != &SecB ||
(!SA.isTemporary() &&
FB.getAtom() != Asm.getSymbolData(SA).getFragment()->getAtom() &&
@@ -736,6 +770,8 @@ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
void MachObjectWriter::WriteObject(MCAssembler &Asm,
const MCAsmLayout &Layout) {
unsigned NumSections = Asm.size();
+ const MCAssembler::VersionMinInfoType &VersionInfo =
+ Layout.getAssembler().getVersionMinInfo();
// The section data starts after the header, the segment load command (and
// section headers) and the symbol table.
@@ -744,6 +780,12 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
sizeof(MachO::segment_command_64) + NumSections * sizeof(MachO::section_64):
sizeof(MachO::segment_command) + NumSections * sizeof(MachO::section);
+ // Add the deployment target version info load command size, if used.
+ if (VersionInfo.Major != 0) {
+ ++NumLoadCommands;
+ LoadCommandsSize += sizeof(MachO::version_min_command);
+ }
+
// Add the data-in-code load command size, if used.
unsigned NumDataRegions = Asm.getDataRegions().size();
if (NumDataRegions) {
@@ -751,6 +793,14 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
LoadCommandsSize += sizeof(MachO::linkedit_data_command);
}
+ // Add the loh load command size, if used.
+ uint64_t LOHRawSize = Asm.getLOHContainer().getEmitSize(*this, Layout);
+ uint64_t LOHSize = RoundUpToAlignment(LOHRawSize, is64Bit() ? 8 : 4);
+ if (LOHSize) {
+ ++NumLoadCommands;
+ LoadCommandsSize += sizeof(MachO::linkedit_data_command);
+ }
+
// Add the symbol table load command sizes, if used.
unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() +
UndefinedSymbolData.size();
@@ -816,6 +866,20 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
RelocTableEnd += NumRelocs * sizeof(MachO::any_relocation_info);
}
+ // Write out the deployment target information, if it's available.
+ if (VersionInfo.Major != 0) {
+ assert(VersionInfo.Update < 256 && "unencodable update target version");
+ assert(VersionInfo.Minor < 256 && "unencodable minor target version");
+ assert(VersionInfo.Major < 65536 && "unencodable major target version");
+ uint32_t EncodedVersion = VersionInfo.Update | (VersionInfo.Minor << 8) |
+ (VersionInfo.Major << 16);
+ Write32(VersionInfo.Kind == MCVM_OSXVersionMin ? MachO::LC_VERSION_MIN_MACOSX :
+ MachO::LC_VERSION_MIN_IPHONEOS);
+ Write32(sizeof(MachO::version_min_command));
+ Write32(EncodedVersion);
+ Write32(0); // reserved.
+ }
+
// Write the data-in-code load command, if used.
uint64_t DataInCodeTableEnd = RelocTableEnd + NumDataRegions * 8;
if (NumDataRegions) {
@@ -825,6 +889,12 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
DataRegionsSize);
}
+ // Write the loh load command, if used.
+ uint64_t LOHTableEnd = DataInCodeTableEnd + LOHSize;
+ if (LOHSize)
+ WriteLinkeditLoadCommand(MachO::LC_LINKER_OPTIMIZATION_HINT,
+ DataInCodeTableEnd, LOHSize);
+
// Write the symbol table load command, if used.
if (NumSymbols) {
unsigned FirstLocalSymbol = 0;
@@ -841,10 +911,10 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
// If used, the indirect symbols are written after the section data.
if (NumIndirectSymbols)
- IndirectSymbolOffset = DataInCodeTableEnd;
+ IndirectSymbolOffset = LOHTableEnd;
// The symbol table is written after the indirect symbol data.
- uint64_t SymbolTableOffset = DataInCodeTableEnd + IndirectSymbolSize;
+ uint64_t SymbolTableOffset = LOHTableEnd + IndirectSymbolSize;
// The string table is written after symbol table.
uint64_t StringTableOffset =
@@ -911,17 +981,28 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
Write16(Data->Kind);
}
+ // Write out the loh commands, if there is one.
+ if (LOHSize) {
+#ifndef NDEBUG
+ unsigned Start = OS.tell();
+#endif
+ Asm.getLOHContainer().Emit(*this, Layout);
+ // Pad to a multiple of the pointer size.
+ WriteBytes("", OffsetToAlignment(LOHRawSize, is64Bit() ? 8 : 4));
+ assert(OS.tell() - Start == LOHSize);
+ }
+
// Write the symbol table data, if used.
if (NumSymbols) {
// Write the indirect symbol entries.
for (MCAssembler::const_indirect_symbol_iterator
it = Asm.indirect_symbol_begin(),
ie = Asm.indirect_symbol_end(); it != ie; ++it) {
- // Indirect symbols in the non lazy symbol pointer section have some
+ // Indirect symbols in the non-lazy symbol pointer section have some
// special handling.
const MCSectionMachO &Section =
static_cast<const MCSectionMachO&>(it->SectionData->getSection());
- if (Section.getType() == MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS) {
+ if (Section.getType() == MachO::S_NON_LAZY_SYMBOL_POINTERS) {
// If this symbol is defined and internal, mark it as such.
if (it->Symbol->isDefined() &&
!Asm.getSymbolData(*it->Symbol).isExternal()) {
diff --git a/contrib/llvm/lib/MC/StringTableBuilder.cpp b/contrib/llvm/lib/MC/StringTableBuilder.cpp
new file mode 100644
index 0000000..db58ece
--- /dev/null
+++ b/contrib/llvm/lib/MC/StringTableBuilder.cpp
@@ -0,0 +1,51 @@
+//===-- StringTableBuilder.cpp - String table building utility ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace llvm;
+
+static bool compareBySuffix(StringRef a, StringRef b) {
+ size_t sizeA = a.size();
+ size_t sizeB = b.size();
+ size_t len = std::min(sizeA, sizeB);
+ for (size_t i = 0; i < len; ++i) {
+ char ca = a[sizeA - i - 1];
+ char cb = b[sizeB - i - 1];
+ if (ca != cb)
+ return ca > cb;
+ }
+ return sizeA > sizeB;
+}
+
+void StringTableBuilder::finalize() {
+ SmallVector<StringRef, 8> Strings;
+ for (auto i = StringIndexMap.begin(), e = StringIndexMap.end(); i != e; ++i)
+ Strings.push_back(i->getKey());
+
+ std::sort(Strings.begin(), Strings.end(), compareBySuffix);
+
+ // FIXME: Starting with a null byte is ELF specific. Generalize this so we
+ // can use the class with other object formats.
+ StringTable += '\x00';
+
+ StringRef Previous;
+ for (StringRef s : Strings) {
+ if (Previous.endswith(s)) {
+ StringIndexMap[s] = StringTable.size() - 1 - s.size();
+ continue;
+ }
+
+ StringIndexMap[s] = StringTable.size();
+ StringTable += s;
+ StringTable += '\x00';
+ Previous = s;
+ }
+}
diff --git a/contrib/llvm/lib/MC/SubtargetFeature.cpp b/contrib/llvm/lib/MC/SubtargetFeature.cpp
index 2fb91f2..27525c7 100644
--- a/contrib/llvm/lib/MC/SubtargetFeature.cpp
+++ b/contrib/llvm/lib/MC/SubtargetFeature.cpp
@@ -51,40 +51,12 @@ static inline bool isEnabled(const StringRef Feature) {
return Ch == '+';
}
-/// PrependFlag - Return a string with a prepended flag; '+' or '-'.
-///
-static inline std::string PrependFlag(const StringRef Feature,
- bool IsEnabled) {
- assert(!Feature.empty() && "Empty string");
- if (hasFlag(Feature))
- return Feature;
- std::string Prefix = IsEnabled ? "+" : "-";
- Prefix += Feature;
- return Prefix;
-}
-
/// Split - Splits a string of comma separated items in to a vector of strings.
///
static void Split(std::vector<std::string> &V, const StringRef S) {
- if (S.empty())
- return;
-
- // Start at beginning of string.
- size_t Pos = 0;
- while (true) {
- // Find the next comma
- size_t Comma = S.find(',', Pos);
- // If no comma found then the rest of the string is used
- if (Comma == std::string::npos) {
- // Add string to vector
- V.push_back(S.substr(Pos));
- break;
- }
- // Otherwise add substring to vector
- V.push_back(S.substr(Pos, Comma - Pos));
- // Advance to next item
- Pos = Comma + 1;
- }
+ SmallVector<StringRef, 2> Tmp;
+ S.split(Tmp, ",", -1, false /* KeepEmpty */);
+ V.assign(Tmp.begin(), Tmp.end());
}
/// Join a vector of strings to a string with a comma separating each element.
@@ -109,63 +81,55 @@ static std::string Join(const std::vector<std::string> &V) {
}
/// Adding features.
-void SubtargetFeatures::AddFeature(const StringRef String,
- bool IsEnabled) {
- // Don't add empty features
- if (!String.empty()) {
- // Convert to lowercase, prepend flag and add to vector
- Features.push_back(PrependFlag(String.lower(), IsEnabled));
- }
+void SubtargetFeatures::AddFeature(const StringRef String) {
+ // Don't add empty features or features we already have.
+ if (!String.empty())
+ // Convert to lowercase, prepend flag if we don't already have a flag.
+ Features.push_back(hasFlag(String) ? String.str() : "+" + String.lower());
}
/// Find KV in array using binary search.
-static const SubtargetFeatureKV *Find(StringRef S, const SubtargetFeatureKV *A,
- size_t L) {
- // Determine the end of the array
- const SubtargetFeatureKV *Hi = A + L;
+static const SubtargetFeatureKV *Find(StringRef S,
+ ArrayRef<SubtargetFeatureKV> A) {
// Binary search the array
- const SubtargetFeatureKV *F = std::lower_bound(A, Hi, S);
+ auto F = std::lower_bound(A.begin(), A.end(), S);
// If not found then return NULL
- if (F == Hi || StringRef(F->Key) != S) return NULL;
+ if (F == A.end() || StringRef(F->Key) != S) return nullptr;
// Return the found array item
return F;
}
/// getLongestEntryLength - Return the length of the longest entry in the table.
///
-static size_t getLongestEntryLength(const SubtargetFeatureKV *Table,
- size_t Size) {
+static size_t getLongestEntryLength(ArrayRef<SubtargetFeatureKV> Table) {
size_t MaxLen = 0;
- for (size_t i = 0; i < Size; i++)
- MaxLen = std::max(MaxLen, std::strlen(Table[i].Key));
+ for (auto &I : Table)
+ MaxLen = std::max(MaxLen, std::strlen(I.Key));
return MaxLen;
}
/// Display help for feature choices.
///
-static void Help(const SubtargetFeatureKV *CPUTable, size_t CPUTableSize,
- const SubtargetFeatureKV *FeatTable, size_t FeatTableSize) {
+static void Help(ArrayRef<SubtargetFeatureKV> CPUTable,
+ ArrayRef<SubtargetFeatureKV> FeatTable) {
// Determine the length of the longest CPU and Feature entries.
- unsigned MaxCPULen = getLongestEntryLength(CPUTable, CPUTableSize);
- unsigned MaxFeatLen = getLongestEntryLength(FeatTable, FeatTableSize);
+ unsigned MaxCPULen = getLongestEntryLength(CPUTable);
+ unsigned MaxFeatLen = getLongestEntryLength(FeatTable);
// Print the CPU table.
errs() << "Available CPUs for this target:\n\n";
- for (size_t i = 0; i != CPUTableSize; i++)
- errs() << format(" %-*s - %s.\n",
- MaxCPULen, CPUTable[i].Key, CPUTable[i].Desc);
+ for (auto &CPU : CPUTable)
+ errs() << format(" %-*s - %s.\n", MaxCPULen, CPU.Key, CPU.Desc);
errs() << '\n';
// Print the Feature table.
errs() << "Available features for this target:\n\n";
- for (size_t i = 0; i != FeatTableSize; i++)
- errs() << format(" %-*s - %s.\n",
- MaxFeatLen, FeatTable[i].Key, FeatTable[i].Desc);
+ for (auto &Feature : FeatTable)
+ errs() << format(" %-*s - %s.\n", MaxFeatLen, Feature.Key, Feature.Desc);
errs() << '\n';
errs() << "Use +feature to enable a feature, or -feature to disable it.\n"
"For example, llc -mcpu=mycpu -mattr=+feature1,-feature2\n";
- std::exit(1);
}
//===----------------------------------------------------------------------===//
@@ -187,16 +151,13 @@ std::string SubtargetFeatures::getString() const {
///
static
void SetImpliedBits(uint64_t &Bits, const SubtargetFeatureKV *FeatureEntry,
- const SubtargetFeatureKV *FeatureTable,
- size_t FeatureTableSize) {
- for (size_t i = 0; i < FeatureTableSize; ++i) {
- const SubtargetFeatureKV &FE = FeatureTable[i];
-
+ ArrayRef<SubtargetFeatureKV> FeatureTable) {
+ for (auto &FE : FeatureTable) {
if (FeatureEntry->Value == FE.Value) continue;
if (FeatureEntry->Implies & FE.Value) {
Bits |= FE.Value;
- SetImpliedBits(Bits, &FE, FeatureTable, FeatureTableSize);
+ SetImpliedBits(Bits, &FE, FeatureTable);
}
}
}
@@ -206,16 +167,13 @@ void SetImpliedBits(uint64_t &Bits, const SubtargetFeatureKV *FeatureEntry,
///
static
void ClearImpliedBits(uint64_t &Bits, const SubtargetFeatureKV *FeatureEntry,
- const SubtargetFeatureKV *FeatureTable,
- size_t FeatureTableSize) {
- for (size_t i = 0; i < FeatureTableSize; ++i) {
- const SubtargetFeatureKV &FE = FeatureTable[i];
-
+ ArrayRef<SubtargetFeatureKV> FeatureTable) {
+ for (auto &FE : FeatureTable) {
if (FeatureEntry->Value == FE.Value) continue;
if (FE.Implies & FeatureEntry->Value) {
Bits &= ~FE.Value;
- ClearImpliedBits(Bits, &FE, FeatureTable, FeatureTableSize);
+ ClearImpliedBits(Bits, &FE, FeatureTable);
}
}
}
@@ -224,23 +182,23 @@ void ClearImpliedBits(uint64_t &Bits, const SubtargetFeatureKV *FeatureEntry,
/// bits.
uint64_t
SubtargetFeatures::ToggleFeature(uint64_t Bits, const StringRef Feature,
- const SubtargetFeatureKV *FeatureTable,
- size_t FeatureTableSize) {
+ ArrayRef<SubtargetFeatureKV> FeatureTable) {
+
// Find feature in table.
const SubtargetFeatureKV *FeatureEntry =
- Find(StripFlag(Feature), FeatureTable, FeatureTableSize);
+ Find(StripFlag(Feature), FeatureTable);
// If there is a match
if (FeatureEntry) {
if ((Bits & FeatureEntry->Value) == FeatureEntry->Value) {
Bits &= ~FeatureEntry->Value;
// For each feature that implies this, clear it.
- ClearImpliedBits(Bits, FeatureEntry, FeatureTable, FeatureTableSize);
+ ClearImpliedBits(Bits, FeatureEntry, FeatureTable);
} else {
Bits |= FeatureEntry->Value;
// For each feature that this implies, set it.
- SetImpliedBits(Bits, FeatureEntry, FeatureTable, FeatureTableSize);
+ SetImpliedBits(Bits, FeatureEntry, FeatureTable);
}
} else {
errs() << "'" << Feature
@@ -254,20 +212,20 @@ SubtargetFeatures::ToggleFeature(uint64_t Bits, const StringRef Feature,
/// getFeatureBits - Get feature bits a CPU.
///
-uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
- const SubtargetFeatureKV *CPUTable,
- size_t CPUTableSize,
- const SubtargetFeatureKV *FeatureTable,
- size_t FeatureTableSize) {
- if (!FeatureTableSize || !CPUTableSize)
+uint64_t
+SubtargetFeatures::getFeatureBits(const StringRef CPU,
+ ArrayRef<SubtargetFeatureKV> CPUTable,
+ ArrayRef<SubtargetFeatureKV> FeatureTable) {
+
+ if (CPUTable.empty() || FeatureTable.empty())
return 0;
#ifndef NDEBUG
- for (size_t i = 1; i < CPUTableSize; i++) {
+ for (size_t i = 1, e = CPUTable.size(); i != e; ++i) {
assert(strcmp(CPUTable[i - 1].Key, CPUTable[i].Key) < 0 &&
"CPU table is not sorted");
}
- for (size_t i = 1; i < FeatureTableSize; i++) {
+ for (size_t i = 1, e = FeatureTable.size(); i != e; ++i) {
assert(strcmp(FeatureTable[i - 1].Key, FeatureTable[i].Key) < 0 &&
"CPU features table is not sorted");
}
@@ -276,21 +234,21 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
// Check if help is needed
if (CPU == "help")
- Help(CPUTable, CPUTableSize, FeatureTable, FeatureTableSize);
+ Help(CPUTable, FeatureTable);
// Find CPU entry if CPU name is specified.
- if (!CPU.empty()) {
- const SubtargetFeatureKV *CPUEntry = Find(CPU, CPUTable, CPUTableSize);
+ else if (!CPU.empty()) {
+ const SubtargetFeatureKV *CPUEntry = Find(CPU, CPUTable);
+
// If there is a match
if (CPUEntry) {
// Set base feature bits
Bits = CPUEntry->Value;
// Set the feature implied by this CPU feature, if any.
- for (size_t i = 0; i < FeatureTableSize; ++i) {
- const SubtargetFeatureKV &FE = FeatureTable[i];
+ for (auto &FE : FeatureTable) {
if (CPUEntry->Value & FE.Value)
- SetImpliedBits(Bits, &FE, FeatureTable, FeatureTableSize);
+ SetImpliedBits(Bits, &FE, FeatureTable);
}
} else {
errs() << "'" << CPU
@@ -300,16 +258,14 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
}
// Iterate through each feature
- for (size_t i = 0, E = Features.size(); i < E; i++) {
- const StringRef Feature = Features[i];
-
+ for (auto &Feature : Features) {
// Check for help
if (Feature == "+help")
- Help(CPUTable, CPUTableSize, FeatureTable, FeatureTableSize);
+ Help(CPUTable, FeatureTable);
// Find feature in table.
const SubtargetFeatureKV *FeatureEntry =
- Find(StripFlag(Feature), FeatureTable, FeatureTableSize);
+ Find(StripFlag(Feature), FeatureTable);
// If there is a match
if (FeatureEntry) {
// Enable/disable feature in bits
@@ -317,12 +273,12 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
Bits |= FeatureEntry->Value;
// For each feature that this implies, set it.
- SetImpliedBits(Bits, FeatureEntry, FeatureTable, FeatureTableSize);
+ SetImpliedBits(Bits, FeatureEntry, FeatureTable);
} else {
Bits &= ~FeatureEntry->Value;
// For each feature that implies this, clear it.
- ClearImpliedBits(Bits, FeatureEntry, FeatureTable, FeatureTableSize);
+ ClearImpliedBits(Bits, FeatureEntry, FeatureTable);
}
} else {
errs() << "'" << Feature
@@ -337,8 +293,8 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU,
/// print - Print feature string.
///
void SubtargetFeatures::print(raw_ostream &OS) const {
- for (size_t i = 0, e = Features.size(); i != e; ++i)
- OS << Features[i] << " ";
+ for (auto &F : Features)
+ OS << F << " ";
OS << "\n";
}
diff --git a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
index d9ca86d..824895b 100644
--- a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -11,13 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "WinCOFFObjectWriter"
-
#include "llvm/MC/MCWinCOFFObjectWriter.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
@@ -36,6 +34,8 @@
using namespace llvm;
+#define DEBUG_TYPE "WinCOFFObjectWriter"
+
namespace {
typedef SmallString<COFF::NameSize> name;
@@ -82,7 +82,7 @@ struct COFFRelocation {
COFF::relocation Data;
COFFSymbol *Symb;
- COFFRelocation() : Symb(NULL) {}
+ COFFRelocation() : Symb(nullptr) {}
static size_t size() { return COFF::RelocationSize; }
};
@@ -119,13 +119,13 @@ public:
class WinCOFFObjectWriter : public MCObjectWriter {
public:
- typedef std::vector<COFFSymbol*> symbols;
- typedef std::vector<COFFSection*> sections;
+ typedef std::vector<std::unique_ptr<COFFSymbol>> symbols;
+ typedef std::vector<std::unique_ptr<COFFSection>> sections;
typedef DenseMap<MCSymbol const *, COFFSymbol *> symbol_map;
typedef DenseMap<MCSection const *, COFFSection *> section_map;
- llvm::OwningPtr<MCWinCOFFObjectTargetWriter> TargetObjectWriter;
+ std::unique_ptr<MCWinCOFFObjectTargetWriter> TargetObjectWriter;
// Root level file contents.
COFF::header Header;
@@ -138,7 +138,6 @@ public:
symbol_map SymbolMap;
WinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW, raw_ostream &OS);
- virtual ~WinCOFFObjectWriter();
COFFSymbol *createSymbol(StringRef Name);
COFFSymbol *GetOrCreateCOFFSymbol(const MCSymbol * Symbol);
@@ -154,30 +153,29 @@ public:
void MakeSymbolReal(COFFSymbol &S, size_t Index);
void MakeSectionReal(COFFSection &S, size_t Number);
- bool ExportSymbol(MCSymbolData const &SymbolData, MCAssembler &Asm);
+ bool ExportSymbol(const MCSymbol &Symbol, MCAssembler &Asm);
bool IsPhysicalSection(COFFSection *S);
// Entity writing methods.
void WriteFileHeader(const COFF::header &Header);
- void WriteSymbol(const COFFSymbol *S);
+ void WriteSymbol(const COFFSymbol &S);
void WriteAuxiliarySymbols(const COFFSymbol::AuxiliarySymbols &S);
void WriteSectionHeader(const COFF::section &S);
void WriteRelocation(const COFF::relocation &R);
// MCObjectWriter interface implementation.
- void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout);
+ void ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) override;
- void RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target,
- uint64_t &FixedValue);
+ void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, bool &IsPCRel,
+ uint64_t &FixedValue) override;
- void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
+ void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
};
}
@@ -194,10 +192,10 @@ static inline void write_uint32_le(void *Data, uint32_t const &Value) {
COFFSymbol::COFFSymbol(StringRef name)
: Name(name.begin(), name.end())
- , Other(NULL)
- , Section(NULL)
+ , Other(nullptr)
+ , Section(nullptr)
, Relocations(0)
- , MCData(NULL) {
+ , MCData(nullptr) {
memset(&Data, 0, sizeof(Data));
}
@@ -216,7 +214,7 @@ void COFFSymbol::set_name_offset(uint32_t Offset) {
/// logic to decide if the symbol should be reported in the symbol table
bool COFFSymbol::should_keep() const {
// no section means its external, keep it
- if (Section == NULL)
+ if (!Section)
return true;
// if it has relocations pointing at it, keep it
@@ -246,8 +244,8 @@ bool COFFSymbol::should_keep() const {
COFFSection::COFFSection(StringRef name)
: Name(name)
- , MCData(NULL)
- , Symbol(NULL) {
+ , MCData(nullptr)
+ , Symbol(nullptr) {
memset(&Header, 0, sizeof(Header));
}
@@ -310,13 +308,6 @@ WinCOFFObjectWriter::WinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW,
Header.Machine = TargetObjectWriter->getMachine();
}
-WinCOFFObjectWriter::~WinCOFFObjectWriter() {
- for (symbols::iterator I = Symbols.begin(), E = Symbols.end(); I != E; ++I)
- delete *I;
- for (sections::iterator I = Sections.begin(), E = Sections.end(); I != E; ++I)
- delete *I;
-}
-
COFFSymbol *WinCOFFObjectWriter::createSymbol(StringRef Name) {
return createCOFFEntity<COFFSymbol>(Name, Symbols);
}
@@ -340,24 +331,30 @@ COFFSection *WinCOFFObjectWriter::createSection(StringRef Name) {
template <typename object_t, typename list_t>
object_t *WinCOFFObjectWriter::createCOFFEntity(StringRef Name,
list_t &List) {
- object_t *Object = new object_t(Name);
-
- List.push_back(Object);
+ List.push_back(make_unique<object_t>(Name));
- return Object;
+ return List.back().get();
}
/// This function takes a section data object from the assembler
/// and creates the associated COFF section staging object.
void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) {
assert(SectionData.getSection().getVariant() == MCSection::SV_COFF
- && "Got non COFF section in the COFF backend!");
+ && "Got non-COFF section in the COFF backend!");
// FIXME: Not sure how to verify this (at least in a debug build).
MCSectionCOFF const &Sec =
static_cast<MCSectionCOFF const &>(SectionData.getSection());
COFFSection *coff_section = createSection(Sec.getSectionName());
COFFSymbol *coff_symbol = createSymbol(Sec.getSectionName());
+ if (Sec.getSelection() != COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
+ if (const MCSymbol *S = Sec.getCOMDATSymbol()) {
+ COFFSymbol *COMDATSymbol = GetOrCreateCOFFSymbol(S);
+ if (COMDATSymbol->Section)
+ report_fatal_error("two sections have the same comdat");
+ COMDATSymbol->Section = coff_section;
+ }
+ }
coff_section->Symbol = coff_symbol;
coff_symbol->Section = coff_section;
@@ -396,7 +393,19 @@ void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) {
SectionMap[&SectionData.getSection()] = coff_section;
}
-/// This function takes a section data object from the assembler
+static uint64_t getSymbolValue(const MCSymbolData &Data,
+ const MCAsmLayout &Layout) {
+ if (Data.isCommon() && Data.isExternal())
+ return Data.getCommonSize();
+
+ uint64_t Res;
+ if (!Layout.getSymbolOffset(&Data, Res))
+ return 0;
+
+ return Res;
+}
+
+/// This function takes a symbol data object from the assembler
/// and creates the associated COFF symbol staging object.
void WinCOFFObjectWriter::DefineSymbol(MCSymbolData const &SymbolData,
MCAssembler &Assembler,
@@ -438,54 +447,89 @@ void WinCOFFObjectWriter::DefineSymbol(MCSymbolData const &SymbolData,
coff_symbol->MCData = &SymbolData;
} else {
- const MCSymbolData &ResSymData =
- Assembler.getSymbolData(Symbol.AliasedSymbol());
-
- if (Symbol.isVariable()) {
- int64_t Addr;
- if (Symbol.getVariableValue()->EvaluateAsAbsolute(Addr, Layout))
- coff_symbol->Data.Value = Addr;
- }
+ const MCSymbolData &ResSymData = Assembler.getSymbolData(Symbol);
+ const MCSymbol *Base = Layout.getBaseSymbol(Symbol);
+ coff_symbol->Data.Value = getSymbolValue(ResSymData, Layout);
coff_symbol->Data.Type = (ResSymData.getFlags() & 0x0000FFFF) >> 0;
coff_symbol->Data.StorageClass = (ResSymData.getFlags() & 0x00FF0000) >> 16;
// If no storage class was specified in the streamer, define it here.
if (coff_symbol->Data.StorageClass == 0) {
- bool external = ResSymData.isExternal() || (ResSymData.Fragment == NULL);
+ bool IsExternal =
+ ResSymData.isExternal() ||
+ (!ResSymData.getFragment() && !ResSymData.getSymbol().isVariable());
- coff_symbol->Data.StorageClass =
- external ? COFF::IMAGE_SYM_CLASS_EXTERNAL : COFF::IMAGE_SYM_CLASS_STATIC;
+ coff_symbol->Data.StorageClass = IsExternal
+ ? COFF::IMAGE_SYM_CLASS_EXTERNAL
+ : COFF::IMAGE_SYM_CLASS_STATIC;
}
- if (Symbol.isAbsolute() || Symbol.AliasedSymbol().isVariable())
+ if (!Base) {
coff_symbol->Data.SectionNumber = COFF::IMAGE_SYM_ABSOLUTE;
- else if (ResSymData.Fragment != NULL)
- coff_symbol->Section =
- SectionMap[&ResSymData.Fragment->getParent()->getSection()];
+ } else {
+ const MCSymbolData &BaseData = Assembler.getSymbolData(*Base);
+ if (BaseData.Fragment) {
+ COFFSection *Sec =
+ SectionMap[&BaseData.Fragment->getParent()->getSection()];
+
+ if (coff_symbol->Section && coff_symbol->Section != Sec)
+ report_fatal_error("conflicting sections for symbol");
+
+ coff_symbol->Section = Sec;
+ }
+ }
coff_symbol->MCData = &ResSymData;
}
}
+// Maximum offsets for different string table entry encodings.
+static const unsigned Max6DecimalOffset = 999999;
+static const unsigned Max7DecimalOffset = 9999999;
+static const uint64_t MaxBase64Offset = 0xFFFFFFFFFULL; // 64^6, including 0
+
+// Encode a string table entry offset in base 64, padded to 6 chars, and
+// prefixed with a double slash: '//AAAAAA', '//AAAAAB', ...
+// Buffer must be at least 8 bytes large. No terminating null appended.
+static void encodeBase64StringEntry(char* Buffer, uint64_t Value) {
+ assert(Value > Max7DecimalOffset && Value <= MaxBase64Offset &&
+ "Illegal section name encoding for value");
+
+ static const char Alphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
+
+ Buffer[0] = '/';
+ Buffer[1] = '/';
+
+ char* Ptr = Buffer + 7;
+ for (unsigned i = 0; i < 6; ++i) {
+ unsigned Rem = Value % 64;
+ Value /= 64;
+ *(Ptr--) = Alphabet[Rem];
+ }
+}
+
/// making a section real involves assigned it a number and putting
/// name into the string table if needed
void WinCOFFObjectWriter::MakeSectionReal(COFFSection &S, size_t Number) {
if (S.Name.size() > COFF::NameSize) {
- const unsigned Max6DecimalSize = 999999;
- const unsigned Max7DecimalSize = 9999999;
uint64_t StringTableEntry = Strings.insert(S.Name.c_str());
- if (StringTableEntry <= Max6DecimalSize) {
+ if (StringTableEntry <= Max6DecimalOffset) {
std::sprintf(S.Header.Name, "/%d", unsigned(StringTableEntry));
- } else if (StringTableEntry <= Max7DecimalSize) {
+ } else if (StringTableEntry <= Max7DecimalOffset) {
// With seven digits, we have to skip the terminating null. Because
// sprintf always appends it, we use a larger temporary buffer.
char buffer[9] = { };
std::sprintf(buffer, "/%d", unsigned(StringTableEntry));
std::memcpy(S.Header.Name, buffer, 8);
+ } else if (StringTableEntry <= MaxBase64Offset) {
+ // Starting with 10,000,000, offsets are encoded as base64.
+ encodeBase64StringEntry(S.Header.Name, StringTableEntry);
} else {
- report_fatal_error("COFF string table is greater than 9,999,999 bytes.");
+ report_fatal_error("COFF string table is greater than 64 GB.");
}
} else
std::memcpy(S.Header.Name, S.Name.c_str(), S.Name.size());
@@ -505,16 +549,24 @@ void WinCOFFObjectWriter::MakeSymbolReal(COFFSymbol &S, size_t Index) {
S.Index = Index;
}
-bool WinCOFFObjectWriter::ExportSymbol(MCSymbolData const &SymbolData,
+bool WinCOFFObjectWriter::ExportSymbol(const MCSymbol &Symbol,
MCAssembler &Asm) {
// This doesn't seem to be right. Strings referred to from the .data section
// need symbols so they can be linked to code in the .text section right?
- // return Asm.isSymbolLinkerVisible (&SymbolData);
+ // return Asm.isSymbolLinkerVisible(Symbol);
+
+ // Non-temporary labels should always be visible to the linker.
+ if (!Symbol.isTemporary())
+ return true;
+
+ // Absolute temporary labels are never visible.
+ if (!Symbol.isInSection())
+ return false;
// For now, all non-variable symbols are exported,
// the linker will sort the rest out for us.
- return SymbolData.isExternal() || !SymbolData.getSymbol().isVariable();
+ return !Symbol.isVariable();
}
bool WinCOFFObjectWriter::IsPhysicalSection(COFFSection *S) {
@@ -535,14 +587,14 @@ void WinCOFFObjectWriter::WriteFileHeader(const COFF::header &Header) {
WriteLE16(Header.Characteristics);
}
-void WinCOFFObjectWriter::WriteSymbol(const COFFSymbol *S) {
- WriteBytes(StringRef(S->Data.Name, COFF::NameSize));
- WriteLE32(S->Data.Value);
- WriteLE16(S->Data.SectionNumber);
- WriteLE16(S->Data.Type);
- Write8(S->Data.StorageClass);
- Write8(S->Data.NumberOfAuxSymbols);
- WriteAuxiliarySymbols(S->Aux);
+void WinCOFFObjectWriter::WriteSymbol(const COFFSymbol &S) {
+ WriteBytes(StringRef(S.Data.Name, COFF::NameSize));
+ WriteLE32(S.Data.Value);
+ WriteLE16(S.Data.SectionNumber);
+ WriteLE16(S.Data.Type);
+ Write8(S.Data.StorageClass);
+ Write8(S.Data.NumberOfAuxSymbols);
+ WriteAuxiliarySymbols(S.Aux);
}
void WinCOFFObjectWriter::WriteAuxiliarySymbols(
@@ -614,16 +666,42 @@ void WinCOFFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
// "Define" each section & symbol. This creates section & symbol
// entries in the staging area.
- for (MCAssembler::const_iterator i = Asm.begin(), e = Asm.end(); i != e; i++)
- DefineSection(*i);
+ static_assert(sizeof(((COFF::AuxiliaryFile *)nullptr)->FileName) == COFF::SymbolSize,
+ "size mismatch for COFF::AuxiliaryFile::FileName");
+ for (auto FI = Asm.file_names_begin(), FE = Asm.file_names_end();
+ FI != FE; ++FI) {
+ // round up to calculate the number of auxiliary symbols required
+ unsigned Count = (FI->size() + COFF::SymbolSize - 1) / COFF::SymbolSize;
+
+ COFFSymbol *file = createSymbol(".file");
+ file->Data.SectionNumber = COFF::IMAGE_SYM_DEBUG;
+ file->Data.StorageClass = COFF::IMAGE_SYM_CLASS_FILE;
+ file->Aux.resize(Count);
+
+ unsigned Offset = 0;
+ unsigned Length = FI->size();
+ for (auto & Aux : file->Aux) {
+ Aux.AuxType = ATFile;
+
+ if (Length > COFF::SymbolSize) {
+ memcpy(Aux.Aux.File.FileName, FI->c_str() + Offset, COFF::SymbolSize);
+ Length = Length - COFF::SymbolSize;
+ } else {
+ memcpy(Aux.Aux.File.FileName, FI->c_str() + Offset, Length);
+ memset(&Aux.Aux.File.FileName[Length], 0, COFF::SymbolSize - Length);
+ Length = 0;
+ }
- for (MCAssembler::const_symbol_iterator i = Asm.symbol_begin(),
- e = Asm.symbol_end();
- i != e; i++) {
- if (ExportSymbol(*i, Asm)) {
- DefineSymbol(*i, Asm, Layout);
+ Offset = Offset + COFF::SymbolSize;
}
}
+
+ for (const auto & Section : Asm)
+ DefineSection(Section);
+
+ for (MCSymbolData &SD : Asm.symbols())
+ if (ExportSymbol(SD.getSymbol(), Asm))
+ DefineSymbol(SD, Asm, Layout);
}
void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
@@ -631,12 +709,18 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
const MCFragment *Fragment,
const MCFixup &Fixup,
MCValue Target,
+ bool &IsPCRel,
uint64_t &FixedValue) {
- assert(Target.getSymA() != NULL && "Relocation must reference a symbol!");
+ assert(Target.getSymA() && "Relocation must reference a symbol!");
const MCSymbol &Symbol = Target.getSymA()->getSymbol();
const MCSymbol &A = Symbol.AliasedSymbol();
- MCSymbolData &A_SD = Asm.getSymbolData(A);
+ if (!Asm.hasSymbolData(A))
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ Twine("symbol '") + A.getName() + "' can not be undefined");
+
+ const MCSymbolData &A_SD = Asm.getSymbolData(A);
MCSectionData const *SectionData = Fragment->getParent();
@@ -648,14 +732,25 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
COFFSection *coff_section = SectionMap[&SectionData->getSection()];
COFFSymbol *coff_symbol = SymbolMap[&A_SD.getSymbol()];
- const MCSymbolRefExpr *SymA = Target.getSymA();
const MCSymbolRefExpr *SymB = Target.getSymB();
- const bool CrossSection = SymB &&
- &SymA->getSymbol().getSection() != &SymB->getSymbol().getSection();
+ bool CrossSection = false;
- if (Target.getSymB()) {
- const MCSymbol *B = &Target.getSymB()->getSymbol();
- MCSymbolData &B_SD = Asm.getSymbolData(*B);
+ if (SymB) {
+ const MCSymbol *B = &SymB->getSymbol();
+ const MCSymbolData &B_SD = Asm.getSymbolData(*B);
+ if (!B_SD.getFragment())
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ Twine("symbol '") + B->getName() +
+ "' can not be undefined in a subtraction expression");
+
+ if (!A_SD.getFragment())
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ Twine("symbol '") + Symbol.getName() +
+ "' can not be undefined in a subtraction expression");
+
+ CrossSection = &Symbol.getSection() != &B->getSection();
// Offset of the symbol in the section
int64_t a = Layout.getSymbolOffset(&B_SD);
@@ -694,11 +789,52 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
// FIXME: Can anyone explain what this does other than adjust for the size
// of the offset?
- if (Reloc.Data.Type == COFF::IMAGE_REL_AMD64_REL32 ||
- Reloc.Data.Type == COFF::IMAGE_REL_I386_REL32)
+ if ((Header.Machine == COFF::IMAGE_FILE_MACHINE_AMD64 &&
+ Reloc.Data.Type == COFF::IMAGE_REL_AMD64_REL32) ||
+ (Header.Machine == COFF::IMAGE_FILE_MACHINE_I386 &&
+ Reloc.Data.Type == COFF::IMAGE_REL_I386_REL32))
FixedValue += 4;
- coff_section->Relocations.push_back(Reloc);
+ if (Header.Machine == COFF::IMAGE_FILE_MACHINE_ARMNT) {
+ switch (Reloc.Data.Type) {
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ case COFF::IMAGE_REL_ARM_ADDR32:
+ case COFF::IMAGE_REL_ARM_ADDR32NB:
+ case COFF::IMAGE_REL_ARM_TOKEN:
+ case COFF::IMAGE_REL_ARM_SECTION:
+ case COFF::IMAGE_REL_ARM_SECREL:
+ break;
+ case COFF::IMAGE_REL_ARM_BRANCH11:
+ case COFF::IMAGE_REL_ARM_BLX11:
+ // IMAGE_REL_ARM_BRANCH11 and IMAGE_REL_ARM_BLX11 are only used for
+ // pre-ARMv7, which implicitly rules it out of ARMNT (it would be valid
+ // for Windows CE).
+ case COFF::IMAGE_REL_ARM_BRANCH24:
+ case COFF::IMAGE_REL_ARM_BLX24:
+ case COFF::IMAGE_REL_ARM_MOV32A:
+ // IMAGE_REL_ARM_BRANCH24, IMAGE_REL_ARM_BLX24, IMAGE_REL_ARM_MOV32A are
+ // only used for ARM mode code, which is documented as being unsupported
+ // by Windows on ARM. Empirical proof indicates that masm is able to
+ // generate the relocations however the rest of the MSVC toolchain is
+ // unable to handle it.
+ llvm_unreachable("unsupported relocation");
+ break;
+ case COFF::IMAGE_REL_ARM_MOV32T:
+ break;
+ case COFF::IMAGE_REL_ARM_BRANCH20T:
+ case COFF::IMAGE_REL_ARM_BRANCH24T:
+ case COFF::IMAGE_REL_ARM_BLX23T:
+ // IMAGE_REL_BRANCH20T, IMAGE_REL_ARM_BRANCH24T, IMAGE_REL_ARM_BLX23T all
+ // perform a 4 byte adjustment to the relocation. Relative branches are
+ // offset by 4 on ARM, however, because there is no RELA relocations, all
+ // branches are offset by 4.
+ FixedValue = FixedValue + 4;
+ break;
+ }
+ }
+
+ if (TargetObjectWriter->recordRelocation(Fixup))
+ coff_section->Relocations.push_back(Reloc);
}
void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
@@ -707,77 +843,64 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
Header.NumberOfSections = 0;
DenseMap<COFFSection *, uint16_t> SectionIndices;
- for (sections::iterator i = Sections.begin(),
- e = Sections.end(); i != e; i++) {
- if (Layout.getSectionAddressSize((*i)->MCData) > 0) {
- size_t Number = ++Header.NumberOfSections;
- SectionIndices[*i] = Number;
- MakeSectionReal(**i, Number);
- } else {
- (*i)->Number = -1;
- }
+ for (auto & Section : Sections) {
+ size_t Number = ++Header.NumberOfSections;
+ SectionIndices[Section.get()] = Number;
+ MakeSectionReal(*Section, Number);
}
Header.NumberOfSymbols = 0;
- for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++) {
- COFFSymbol *coff_symbol = *i;
- MCSymbolData const *SymbolData = coff_symbol->MCData;
-
+ for (auto & Symbol : Symbols) {
// Update section number & offset for symbols that have them.
- if ((SymbolData != NULL) && (SymbolData->Fragment != NULL)) {
- assert(coff_symbol->Section != NULL);
+ if (Symbol->Section)
+ Symbol->Data.SectionNumber = Symbol->Section->Number;
- coff_symbol->Data.SectionNumber = coff_symbol->Section->Number;
- coff_symbol->Data.Value = Layout.getFragmentOffset(SymbolData->Fragment)
- + SymbolData->Offset;
- }
-
- if (coff_symbol->should_keep()) {
- MakeSymbolReal(*coff_symbol, Header.NumberOfSymbols++);
+ if (Symbol->should_keep()) {
+ MakeSymbolReal(*Symbol, Header.NumberOfSymbols++);
// Update auxiliary symbol info.
- coff_symbol->Data.NumberOfAuxSymbols = coff_symbol->Aux.size();
- Header.NumberOfSymbols += coff_symbol->Data.NumberOfAuxSymbols;
+ Symbol->Data.NumberOfAuxSymbols = Symbol->Aux.size();
+ Header.NumberOfSymbols += Symbol->Data.NumberOfAuxSymbols;
} else
- coff_symbol->Index = -1;
+ Symbol->Index = -1;
}
// Fixup weak external references.
- for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++) {
- COFFSymbol *coff_symbol = *i;
- if (coff_symbol->Other != NULL) {
- assert(coff_symbol->Index != -1);
- assert(coff_symbol->Aux.size() == 1 &&
- "Symbol must contain one aux symbol!");
- assert(coff_symbol->Aux[0].AuxType == ATWeakExternal &&
+ for (auto & Symbol : Symbols) {
+ if (Symbol->Other) {
+ assert(Symbol->Index != -1);
+ assert(Symbol->Aux.size() == 1 && "Symbol must contain one aux symbol!");
+ assert(Symbol->Aux[0].AuxType == ATWeakExternal &&
"Symbol's aux symbol must be a Weak External!");
- coff_symbol->Aux[0].Aux.WeakExternal.TagIndex = coff_symbol->Other->Index;
+ Symbol->Aux[0].Aux.WeakExternal.TagIndex = Symbol->Other->Index;
}
}
// Fixup associative COMDAT sections.
- for (sections::iterator i = Sections.begin(),
- e = Sections.end(); i != e; i++) {
- if ((*i)->Symbol->Aux[0].Aux.SectionDefinition.Selection !=
+ for (auto & Section : Sections) {
+ if (Section->Symbol->Aux[0].Aux.SectionDefinition.Selection !=
COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE)
continue;
- const MCSectionCOFF &MCSec = static_cast<const MCSectionCOFF &>(
- (*i)->MCData->getSection());
+ const MCSectionCOFF &MCSec =
+ static_cast<const MCSectionCOFF &>(Section->MCData->getSection());
- COFFSection *Assoc = SectionMap.lookup(MCSec.getAssocSection());
- if (!Assoc) {
- report_fatal_error(Twine("Missing associated COMDAT section ") +
- MCSec.getAssocSection()->getSectionName() +
- " for section " + MCSec.getSectionName());
- }
+ const MCSymbol *COMDAT = MCSec.getCOMDATSymbol();
+ assert(COMDAT);
+ COFFSymbol *COMDATSymbol = GetOrCreateCOFFSymbol(COMDAT);
+ assert(COMDATSymbol);
+ COFFSection *Assoc = COMDATSymbol->Section;
+ if (!Assoc)
+ report_fatal_error(
+ Twine("Missing associated COMDAT section for section ") +
+ MCSec.getSectionName());
// Skip this section if the associated section is unused.
if (Assoc->Number == -1)
continue;
- (*i)->Symbol->Aux[0].Aux.SectionDefinition.Number = SectionIndices[Assoc];
+ Section->Symbol->Aux[0].Aux.SectionDefinition.Number = SectionIndices[Assoc];
}
@@ -788,15 +911,13 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
offset += COFF::HeaderSize;
offset += COFF::SectionSize * Header.NumberOfSections;
- for (MCAssembler::const_iterator i = Asm.begin(),
- e = Asm.end();
- i != e; i++) {
- COFFSection *Sec = SectionMap[&i->getSection()];
+ for (const auto & Section : Asm) {
+ COFFSection *Sec = SectionMap[&Section.getSection()];
if (Sec->Number == -1)
continue;
- Sec->Header.SizeOfRawData = Layout.getSectionAddressSize(i);
+ Sec->Header.SizeOfRawData = Layout.getSectionAddressSize(&Section);
if (IsPhysicalSection(Sec)) {
Sec->Header.PointerToRawData = offset;
@@ -823,16 +944,14 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
offset += COFF::RelocationSize * Sec->Relocations.size();
- for (relocations::iterator cr = Sec->Relocations.begin(),
- er = Sec->Relocations.end();
- cr != er; ++cr) {
- assert((*cr).Symb->Index != -1);
- (*cr).Data.SymbolTableIndex = (*cr).Symb->Index;
+ for (auto & Relocation : Sec->Relocations) {
+ assert(Relocation.Symb->Index != -1);
+ Relocation.Data.SymbolTableIndex = Relocation.Symb->Index;
}
}
- assert(Sec->Symbol->Aux.size() == 1
- && "Section's symbol must have one aux!");
+ assert(Sec->Symbol->Aux.size() == 1 &&
+ "Section's symbol must have one aux!");
AuxSymbol &Aux = Sec->Symbol->Aux[0];
assert(Aux.AuxType == ATSectionDefinition &&
"Section's symbol's aux symbol must be a Section Definition!");
@@ -845,7 +964,8 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
Header.PointerToSymbolTable = offset;
- Header.TimeDateStamp = sys::TimeValue::now().toEpochTime();
+ // We want a deterministic output. It looks like GNU as also writes 0 in here.
+ Header.TimeDateStamp = 0;
// Write it all to disk...
WriteFileHeader(Header);
@@ -854,13 +974,13 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
sections::iterator i, ie;
MCAssembler::const_iterator j, je;
- for (i = Sections.begin(), ie = Sections.end(); i != ie; i++)
- if ((*i)->Number != -1) {
- if ((*i)->Relocations.size() >= 0xffff) {
- (*i)->Header.Characteristics |= COFF::IMAGE_SCN_LNK_NRELOC_OVFL;
- }
- WriteSectionHeader((*i)->Header);
+ for (auto & Section : Sections) {
+ if (Section->Number != -1) {
+ if (Section->Relocations.size() >= 0xffff)
+ Section->Header.Characteristics |= COFF::IMAGE_SCN_LNK_NRELOC_OVFL;
+ WriteSectionHeader(Section->Header);
}
+ }
for (i = Sections.begin(), ie = Sections.end(),
j = Asm.begin(), je = Asm.end();
@@ -890,11 +1010,8 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
WriteRelocation(r);
}
- for (relocations::const_iterator k = (*i)->Relocations.begin(),
- ke = (*i)->Relocations.end();
- k != ke; k++) {
- WriteRelocation(k->Data);
- }
+ for (const auto & Relocation : (*i)->Relocations)
+ WriteRelocation(Relocation.Data);
} else
assert((*i)->Header.PointerToRelocations == 0 &&
"Section::PointerToRelocations is insane!");
@@ -904,9 +1021,9 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
assert(OS.tell() == Header.PointerToSymbolTable &&
"Header::PointerToSymbolTable is insane!");
- for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++)
- if ((*i)->Index != -1)
- WriteSymbol(*i);
+ for (auto & Symbol : Symbols)
+ if (Symbol->Index != -1)
+ WriteSymbol(*Symbol);
OS.write((char const *)&Strings.Data.front(), Strings.Data.size());
}
diff --git a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
index 5b5aad7..d391a3f 100644
--- a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -7,12 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains an implementation of a Win32 COFF object file streamer.
+// This file contains an implementation of a Windows COFF object file streamer.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "WinCOFFStreamer"
-
+#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmLayout.h"
@@ -20,12 +19,14 @@
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCWin64EH.h"
+#include "llvm/MC/MCWinCOFFStreamer.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -34,308 +35,220 @@
using namespace llvm;
-namespace {
-class WinCOFFStreamer : public MCObjectStreamer {
-public:
- MCSymbol const *CurSymbol;
-
- WinCOFFStreamer(MCContext &Context,
- MCAsmBackend &MAB,
- MCCodeEmitter &CE,
- raw_ostream &OS);
-
- void AddCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment, bool External);
-
- // MCStreamer interface
-
- virtual void InitSections();
- virtual void InitToTextSection();
- virtual void EmitLabel(MCSymbol *Symbol);
- virtual void EmitDebugLabel(MCSymbol *Symbol);
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
- virtual void EmitThumbFunc(MCSymbol *Func);
- virtual bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
- virtual void BeginCOFFSymbolDef(MCSymbol const *Symbol);
- virtual void EmitCOFFSymbolStorageClass(int StorageClass);
- virtual void EmitCOFFSymbolType(int Type);
- virtual void EndCOFFSymbolDef();
- virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size,unsigned ByteAlignment);
- virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment);
- virtual void EmitFileDirective(StringRef Filename);
- virtual void EmitIdent(StringRef IdentString);
- virtual void EmitWin64EHHandlerData();
- virtual void FinishImpl();
-
-private:
- virtual void EmitInstToData(const MCInst &Inst) {
- MCDataFragment *DF = getOrCreateDataFragment();
-
- SmallVector<MCFixup, 4> Fixups;
- SmallString<256> Code;
- raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
- VecOS.flush();
-
- // Add the fixups and data.
- for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
- Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
- DF->getFixups().push_back(Fixups[i]);
- }
- DF->getContents().append(Code.begin(), Code.end());
- }
+#define DEBUG_TYPE "WinCOFFStreamer"
- void SetSection(StringRef Section,
- unsigned Characteristics,
- SectionKind Kind) {
- SwitchSection(getContext().getCOFFSection(Section, Characteristics, Kind));
- }
+namespace llvm {
+MCWinCOFFStreamer::MCWinCOFFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ MCCodeEmitter &CE, raw_ostream &OS)
+ : MCObjectStreamer(Context, MAB, OS, &CE), CurSymbol(nullptr) {}
- void SetSectionText() {
- SetSection(".text",
- COFF::IMAGE_SCN_CNT_CODE
- | COFF::IMAGE_SCN_MEM_EXECUTE
- | COFF::IMAGE_SCN_MEM_READ,
- SectionKind::getText());
- EmitCodeAlignment(4, 0);
- }
+void MCWinCOFFStreamer::EmitInstToData(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
+ MCDataFragment *DF = getOrCreateDataFragment();
- void SetSectionData() {
- SetSection(".data",
- COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
- | COFF::IMAGE_SCN_MEM_READ
- | COFF::IMAGE_SCN_MEM_WRITE,
- SectionKind::getDataRel());
- EmitCodeAlignment(4, 0);
- }
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups, STI);
+ VecOS.flush();
- void SetSectionBSS() {
- SetSection(".bss",
- COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA
- | COFF::IMAGE_SCN_MEM_READ
- | COFF::IMAGE_SCN_MEM_WRITE,
- SectionKind::getBSS());
- EmitCodeAlignment(4, 0);
+ // Add the fixups and data.
+ for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
+ Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
+ DF->getFixups().push_back(Fixups[i]);
}
-};
-} // end anonymous namespace.
-
-WinCOFFStreamer::WinCOFFStreamer(MCContext &Context, MCAsmBackend &MAB,
- MCCodeEmitter &CE, raw_ostream &OS)
- : MCObjectStreamer(Context, 0, MAB, OS, &CE), CurSymbol(NULL) {}
-
-void WinCOFFStreamer::AddCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment, bool External) {
- assert(!Symbol->isInSection() && "Symbol must not already have a section!");
-
- std::string SectionName(".bss$linkonce");
- SectionName.append(Symbol->getName().begin(), Symbol->getName().end());
-
- MCSymbolData &SymbolData = getAssembler().getOrCreateSymbolData(*Symbol);
-
- unsigned Characteristics =
- COFF::IMAGE_SCN_LNK_COMDAT |
- COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE;
-
- int Selection = COFF::IMAGE_COMDAT_SELECT_LARGEST;
-
- const MCSection *Section = MCStreamer::getContext().getCOFFSection(
- SectionName, Characteristics, SectionKind::getBSS(), Symbol->getName(),
- Selection);
- MCSectionData &SectionData = getAssembler().getOrCreateSectionData(*Section);
-
- if (SectionData.getAlignment() < ByteAlignment)
- SectionData.setAlignment(ByteAlignment);
-
- SymbolData.setExternal(External);
-
- AssignSection(Symbol, Section);
-
- if (ByteAlignment != 1)
- new MCAlignFragment(ByteAlignment, 0, 0, ByteAlignment, &SectionData);
-
- SymbolData.setFragment(new MCFillFragment(0, 0, Size, &SectionData));
+ DF->getContents().append(Code.begin(), Code.end());
}
-// MCStreamer interface
+void MCWinCOFFStreamer::InitSections() {
+ // FIXME: this is identical to the ELF one.
+ // This emulates the same behavior of GNU as. This makes it easier
+ // to compare the output as the major sections are in the same order.
+ SwitchSection(getContext().getObjectFileInfo()->getTextSection());
+ EmitCodeAlignment(4);
-void WinCOFFStreamer::InitToTextSection() {
- SetSectionText();
-}
+ SwitchSection(getContext().getObjectFileInfo()->getDataSection());
+ EmitCodeAlignment(4);
+
+ SwitchSection(getContext().getObjectFileInfo()->getBSSSection());
+ EmitCodeAlignment(4);
-void WinCOFFStreamer::InitSections() {
- SetSectionText();
- SetSectionData();
- SetSectionBSS();
- SetSectionText();
+ SwitchSection(getContext().getObjectFileInfo()->getTextSection());
}
-void WinCOFFStreamer::EmitLabel(MCSymbol *Symbol) {
+void MCWinCOFFStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
MCObjectStreamer::EmitLabel(Symbol);
}
-void WinCOFFStreamer::EmitDebugLabel(MCSymbol *Symbol) {
- EmitLabel(Symbol);
-}
-void WinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
+void MCWinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::EmitThumbFunc(MCSymbol *Func) {
+void MCWinCOFFStreamer::EmitThumbFunc(MCSymbol *Func) {
llvm_unreachable("not implemented");
}
-bool WinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
- MCSymbolAttr Attribute) {
+bool MCWinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) {
assert(Symbol && "Symbol must be non-null!");
- assert((Symbol->isInSection()
- ? Symbol->getSection().getVariant() == MCSection::SV_COFF
- : true) && "Got non COFF section in the COFF backend!");
+ assert((!Symbol->isInSection() ||
+ Symbol->getSection().getVariant() == MCSection::SV_COFF) &&
+ "Got non-COFF section in the COFF backend!");
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
switch (Attribute) {
+ default: return false;
case MCSA_WeakReference:
- case MCSA_Weak: {
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- SD.modifyFlags(COFF::SF_WeakExternal, COFF::SF_WeakExternal);
- SD.setExternal(true);
- }
+ case MCSA_Weak:
+ SD.modifyFlags(COFF::SF_WeakExternal, COFF::SF_WeakExternal);
+ SD.setExternal(true);
break;
-
case MCSA_Global:
- getAssembler().getOrCreateSymbolData(*Symbol).setExternal(true);
+ SD.setExternal(true);
break;
-
- default:
- return false;
}
return true;
}
-void WinCOFFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+void MCWinCOFFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::BeginCOFFSymbolDef(MCSymbol const *Symbol) {
- assert((Symbol->isInSection()
- ? Symbol->getSection().getVariant() == MCSection::SV_COFF
- : true) && "Got non COFF section in the COFF backend!");
- assert(CurSymbol == NULL && "EndCOFFSymbolDef must be called between calls "
- "to BeginCOFFSymbolDef!");
+void MCWinCOFFStreamer::BeginCOFFSymbolDef(MCSymbol const *Symbol) {
+ assert((!Symbol->isInSection() ||
+ Symbol->getSection().getVariant() == MCSection::SV_COFF) &&
+ "Got non-COFF section in the COFF backend!");
+
+ if (CurSymbol)
+ FatalError("starting a new symbol definition without completing the "
+ "previous one");
CurSymbol = Symbol;
}
-void WinCOFFStreamer::EmitCOFFSymbolStorageClass(int StorageClass) {
- assert(CurSymbol != NULL && "BeginCOFFSymbolDef must be called first!");
- assert((StorageClass & ~0xFF) == 0 && "StorageClass must only have data in "
- "the first byte!");
+void MCWinCOFFStreamer::EmitCOFFSymbolStorageClass(int StorageClass) {
+ if (!CurSymbol)
+ FatalError("storage class specified outside of symbol definition");
+
+ if (StorageClass & ~0xff)
+ FatalError(Twine("storage class value '") + itostr(StorageClass) +
+ "' out of range");
- getAssembler().getOrCreateSymbolData(*CurSymbol).modifyFlags(
- StorageClass << COFF::SF_ClassShift,
- COFF::SF_ClassMask);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*CurSymbol);
+ SD.modifyFlags(StorageClass << COFF::SF_ClassShift, COFF::SF_ClassMask);
}
-void WinCOFFStreamer::EmitCOFFSymbolType(int Type) {
- assert(CurSymbol != NULL && "BeginCOFFSymbolDef must be called first!");
- assert((Type & ~0xFFFF) == 0 && "Type must only have data in the first 2 "
- "bytes");
+void MCWinCOFFStreamer::EmitCOFFSymbolType(int Type) {
+ if (!CurSymbol)
+ FatalError("symbol type specified outside of a symbol definition");
- getAssembler().getOrCreateSymbolData(*CurSymbol).modifyFlags(
- Type << COFF::SF_TypeShift,
- COFF::SF_TypeMask);
+ if (Type & ~0xffff)
+ FatalError(Twine("type value '") + itostr(Type) + "' out of range");
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*CurSymbol);
+ SD.modifyFlags(Type << COFF::SF_TypeShift, COFF::SF_TypeMask);
}
-void WinCOFFStreamer::EndCOFFSymbolDef() {
- assert(CurSymbol != NULL && "BeginCOFFSymbolDef must be called first!");
- CurSymbol = NULL;
+void MCWinCOFFStreamer::EndCOFFSymbolDef() {
+ if (!CurSymbol)
+ FatalError("ending symbol definition without starting one");
+ CurSymbol = nullptr;
}
-void WinCOFFStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol)
-{
+void MCWinCOFFStreamer::EmitCOFFSectionIndex(MCSymbol const *Symbol) {
MCDataFragment *DF = getOrCreateDataFragment();
+ const MCSymbolRefExpr *SRE = MCSymbolRefExpr::Create(Symbol, getContext());
+ MCFixup Fixup = MCFixup::Create(DF->getContents().size(), SRE, FK_SecRel_2);
+ DF->getFixups().push_back(Fixup);
+ DF->getContents().resize(DF->getContents().size() + 4, 0);
+}
- DF->getFixups().push_back(
- MCFixup::Create(DF->getContents().size(),
- MCSymbolRefExpr::Create (Symbol, getContext ()),
- FK_SecRel_4));
+void MCWinCOFFStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol) {
+ MCDataFragment *DF = getOrCreateDataFragment();
+ const MCSymbolRefExpr *SRE = MCSymbolRefExpr::Create(Symbol, getContext());
+ MCFixup Fixup = MCFixup::Create(DF->getContents().size(), SRE, FK_SecRel_4);
+ DF->getFixups().push_back(Fixup);
DF->getContents().resize(DF->getContents().size() + 4, 0);
}
-void WinCOFFStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
- llvm_unreachable("not implemented");
+void MCWinCOFFStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ llvm_unreachable("not supported");
}
-void WinCOFFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- assert((Symbol->isInSection()
- ? Symbol->getSection().getVariant() == MCSection::SV_COFF
- : true) && "Got non COFF section in the COFF backend!");
- AddCommonSymbol(Symbol, Size, ByteAlignment, true);
+void MCWinCOFFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ assert((!Symbol->isInSection() ||
+ Symbol->getSection().getVariant() == MCSection::SV_COFF) &&
+ "Got non-COFF section in the COFF backend!");
+
+ if (ByteAlignment > 32)
+ report_fatal_error("alignment is limited to 32-bytes");
+
+ AssignSection(Symbol, nullptr);
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SD.setExternal(true);
+ SD.setCommon(Size, ByteAlignment);
}
-void WinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- assert((Symbol->isInSection()
- ? Symbol->getSection().getVariant() == MCSection::SV_COFF
- : true) && "Got non COFF section in the COFF backend!");
- AddCommonSymbol(Symbol, Size, ByteAlignment, false);
+void MCWinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ assert(!Symbol->isInSection() && "Symbol must not already have a section!");
+
+ const MCSection *Section = getContext().getObjectFileInfo()->getBSSSection();
+ MCSectionData &SectionData = getAssembler().getOrCreateSectionData(*Section);
+ if (SectionData.getAlignment() < ByteAlignment)
+ SectionData.setAlignment(ByteAlignment);
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SD.setExternal(false);
+
+ AssignSection(Symbol, Section);
+
+ if (ByteAlignment != 1)
+ new MCAlignFragment(ByteAlignment, /*_Value=*/0, /*_ValueSize=*/0,
+ ByteAlignment, &SectionData);
+
+ MCFillFragment *Fragment =
+ new MCFillFragment(/*_Value=*/0, /*_ValueSize=*/0, Size, &SectionData);
+ SD.setFragment(Fragment);
}
-void WinCOFFStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size,unsigned ByteAlignment) {
+void MCWinCOFFStreamer::EmitZerofill(const MCSection *Section,
+ MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment) {
+void MCWinCOFFStreamer::EmitTBSSSymbol(const MCSection *Section,
+ MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::EmitFileDirective(StringRef Filename) {
- // Ignore for now, linkers don't care, and proper debug
- // info will be a much large effort.
+void MCWinCOFFStreamer::EmitFileDirective(StringRef Filename) {
+ getAssembler().addFileName(Filename);
}
// TODO: Implement this if you want to emit .comment section in COFF obj files.
-void WinCOFFStreamer::EmitIdent(StringRef IdentString) {
- llvm_unreachable("unsupported directive");
+void MCWinCOFFStreamer::EmitIdent(StringRef IdentString) {
+ llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::EmitWin64EHHandlerData() {
- MCStreamer::EmitWin64EHHandlerData();
-
- // We have to emit the unwind info now, because this directive
- // actually switches to the .xdata section!
- MCWin64EHUnwindEmitter::EmitUnwindInfo(*this, getCurrentW64UnwindInfo());
+void MCWinCOFFStreamer::EmitWinEHHandlerData() {
+ llvm_unreachable("not implemented");
}
-void WinCOFFStreamer::FinishImpl() {
- EmitFrames(NULL, true);
- EmitW64Tables();
+void MCWinCOFFStreamer::FinishImpl() {
MCObjectStreamer::FinishImpl();
}
-namespace llvm
-{
- MCStreamer *createWinCOFFStreamer(MCContext &Context,
- MCAsmBackend &MAB,
- MCCodeEmitter &CE,
- raw_ostream &OS,
- bool RelaxAll) {
- WinCOFFStreamer *S = new WinCOFFStreamer(Context, MAB, CE, OS);
- S->getAssembler().setRelaxAll(RelaxAll);
- return S;
- }
+LLVM_ATTRIBUTE_NORETURN
+void MCWinCOFFStreamer::FatalError(const Twine &Msg) const {
+ getContext().FatalError(SMLoc(), Msg);
+}
}
+
diff --git a/contrib/llvm/lib/Object/YAML.cpp b/contrib/llvm/lib/MC/YAML.cpp
index c527bde..067e91a 100644
--- a/contrib/llvm/lib/Object/YAML.cpp
+++ b/contrib/llvm/lib/MC/YAML.cpp
@@ -12,21 +12,20 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Object/YAML.h"
+#include "llvm/MC/YAML.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>
using namespace llvm;
-using namespace object::yaml;
-void yaml::ScalarTraits<object::yaml::BinaryRef>::output(
- const object::yaml::BinaryRef &Val, void *, llvm::raw_ostream &Out) {
+void yaml::ScalarTraits<yaml::BinaryRef>::output(
+ const yaml::BinaryRef &Val, void *, llvm::raw_ostream &Out) {
Val.writeAsHex(Out);
}
-StringRef yaml::ScalarTraits<object::yaml::BinaryRef>::input(
- StringRef Scalar, void *, object::yaml::BinaryRef &Val) {
+StringRef yaml::ScalarTraits<yaml::BinaryRef>::input(StringRef Scalar, void *,
+ yaml::BinaryRef &Val) {
if (Scalar.size() % 2 != 0)
return "BinaryRef hex string must contain an even number of nybbles.";
// TODO: Can we improve YAMLIO to permit a more accurate diagnostic here?
@@ -34,11 +33,11 @@ StringRef yaml::ScalarTraits<object::yaml::BinaryRef>::input(
for (unsigned I = 0, N = Scalar.size(); I != N; ++I)
if (!isxdigit(Scalar[I]))
return "BinaryRef hex string must contain only hex digits.";
- Val = object::yaml::BinaryRef(Scalar);
+ Val = yaml::BinaryRef(Scalar);
return StringRef();
}
-void BinaryRef::writeAsBinary(raw_ostream &OS) const {
+void yaml::BinaryRef::writeAsBinary(raw_ostream &OS) const {
if (!DataIsHexString) {
OS.write((const char *)Data.data(), Data.size());
return;
@@ -50,11 +49,9 @@ void BinaryRef::writeAsBinary(raw_ostream &OS) const {
}
}
-void BinaryRef::writeAsHex(raw_ostream &OS) const {
- if (binary_size() == 0) {
- OS << "\"\"";
+void yaml::BinaryRef::writeAsHex(raw_ostream &OS) const {
+ if (binary_size() == 0)
return;
- }
if (DataIsHexString) {
OS.write((const char *)Data.data(), Data.size());
return;
diff --git a/contrib/llvm/lib/Object/Archive.cpp b/contrib/llvm/lib/Object/Archive.cpp
index 71efca2..e32bdd5 100644
--- a/contrib/llvm/lib/Object/Archive.cpp
+++ b/contrib/llvm/lib/Object/Archive.cpp
@@ -110,23 +110,19 @@ Archive::Child Archive::Child::getNext() const {
// Check to see if this is past the end of the archive.
if (NextLoc >= Parent->Data->getBufferEnd())
- return Child(Parent, NULL);
+ return Child(Parent, nullptr);
return Child(Parent, NextLoc);
}
-error_code Archive::Child::getName(StringRef &Result) const {
+ErrorOr<StringRef> Archive::Child::getName() const {
StringRef name = getRawName();
// Check if it's a special name.
if (name[0] == '/') {
- if (name.size() == 1) { // Linker member.
- Result = name;
- return object_error::success;
- }
- if (name.size() == 2 && name[1] == '/') { // String table.
- Result = name;
- return object_error::success;
- }
+ if (name.size() == 1) // Linker member.
+ return name;
+ if (name.size() == 2 && name[1] == '/') // String table.
+ return name;
// It's a long name.
// Get the offset.
std::size_t offset;
@@ -136,7 +132,7 @@ error_code Archive::Child::getName(StringRef &Result) const {
+ sizeof(ArchiveMemberHeader)
+ offset;
// Verify it.
- if (Parent->StringTable == Parent->end_children()
+ if (Parent->StringTable == Parent->child_end()
|| addr < (Parent->StringTable->Data.begin()
+ sizeof(ArchiveMemberHeader))
|| addr > (Parent->StringTable->Data.begin()
@@ -147,65 +143,67 @@ error_code Archive::Child::getName(StringRef &Result) const {
// GNU long file names end with a /.
if (Parent->kind() == K_GNU) {
StringRef::size_type End = StringRef(addr).find('/');
- Result = StringRef(addr, End);
- } else {
- Result = addr;
+ return StringRef(addr, End);
}
- return object_error::success;
+ return StringRef(addr);
} else if (name.startswith("#1/")) {
uint64_t name_size;
if (name.substr(3).rtrim(" ").getAsInteger(10, name_size))
llvm_unreachable("Long name length is not an ingeter");
- Result = Data.substr(sizeof(ArchiveMemberHeader), name_size)
+ return Data.substr(sizeof(ArchiveMemberHeader), name_size)
.rtrim(StringRef("\0", 1));
- return object_error::success;
}
// It's a simple name.
if (name[name.size() - 1] == '/')
- Result = name.substr(0, name.size() - 1);
- else
- Result = name;
- return object_error::success;
+ return name.substr(0, name.size() - 1);
+ return name;
}
-error_code Archive::Child::getMemoryBuffer(OwningPtr<MemoryBuffer> &Result,
- bool FullPath) const {
- StringRef Name;
- if (error_code ec = getName(Name))
- return ec;
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+Archive::Child::getMemoryBuffer(bool FullPath) const {
+ ErrorOr<StringRef> NameOrErr = getName();
+ if (std::error_code EC = NameOrErr.getError())
+ return EC;
+ StringRef Name = NameOrErr.get();
SmallString<128> Path;
- Result.reset(MemoryBuffer::getMemBuffer(
- getBuffer(), FullPath ? (Twine(Parent->getFileName()) + "(" + Name + ")")
- .toStringRef(Path)
- : Name,
+ std::unique_ptr<MemoryBuffer> Ret(MemoryBuffer::getMemBuffer(
+ getBuffer(),
+ FullPath
+ ? (Twine(Parent->getFileName()) + "(" + Name + ")").toStringRef(Path)
+ : Name,
false));
- return error_code::success();
+ return std::move(Ret);
+}
+
+ErrorOr<std::unique_ptr<Binary>>
+Archive::Child::getAsBinary(LLVMContext *Context) const {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BuffOrErr = getMemoryBuffer();
+ if (std::error_code EC = BuffOrErr.getError())
+ return EC;
+
+ return createBinary(std::move(*BuffOrErr), Context);
}
-error_code Archive::Child::getAsBinary(OwningPtr<Binary> &Result) const {
- OwningPtr<Binary> ret;
- OwningPtr<MemoryBuffer> Buff;
- if (error_code ec = getMemoryBuffer(Buff))
- return ec;
- if (error_code ec = createBinary(Buff.take(), ret))
- return ec;
- Result.swap(ret);
- return object_error::success;
+ErrorOr<Archive *> Archive::create(std::unique_ptr<MemoryBuffer> Source) {
+ std::error_code EC;
+ std::unique_ptr<Archive> Ret(new Archive(std::move(Source), EC));
+ if (EC)
+ return EC;
+ return Ret.release();
}
-Archive::Archive(MemoryBuffer *source, error_code &ec)
- : Binary(Binary::ID_Archive, source), SymbolTable(end_children()) {
+Archive::Archive(std::unique_ptr<MemoryBuffer> Source, std::error_code &ec)
+ : Binary(Binary::ID_Archive, std::move(Source)), SymbolTable(child_end()) {
// Check for sufficient magic.
- assert(source);
- if (source->getBufferSize() < 8 ||
- StringRef(source->getBufferStart(), 8) != Magic) {
+ if (Data->getBufferSize() < 8 ||
+ StringRef(Data->getBufferStart(), 8) != Magic) {
ec = object_error::invalid_file_type;
return;
}
// Get the special members.
- child_iterator i = begin_children(false);
- child_iterator e = end_children();
+ child_iterator i = child_begin(false);
+ child_iterator e = child_end();
if (i == e) {
ec = object_error::success;
@@ -245,9 +243,11 @@ Archive::Archive(MemoryBuffer *source, error_code &ec)
if (Name.startswith("#1/")) {
Format = K_BSD;
// We know this is BSD, so getName will work since there is no string table.
- ec = i->getName(Name);
+ ErrorOr<StringRef> NameOrErr = i->getName();
+ ec = NameOrErr.getError();
if (ec)
return;
+ Name = NameOrErr.get();
if (Name == "__.SYMDEF SORTED") {
SymbolTable = i;
++i;
@@ -309,9 +309,9 @@ Archive::Archive(MemoryBuffer *source, error_code &ec)
ec = object_error::success;
}
-Archive::child_iterator Archive::begin_children(bool SkipInternal) const {
+Archive::child_iterator Archive::child_begin(bool SkipInternal) const {
if (Data->getBufferSize() == 8) // empty archive.
- return end_children();
+ return child_end();
if (SkipInternal)
return FirstRegular;
@@ -321,16 +321,15 @@ Archive::child_iterator Archive::begin_children(bool SkipInternal) const {
return c;
}
-Archive::child_iterator Archive::end_children() const {
- return Child(this, NULL);
+Archive::child_iterator Archive::child_end() const {
+ return Child(this, nullptr);
}
-error_code Archive::Symbol::getName(StringRef &Result) const {
- Result = StringRef(Parent->SymbolTable->getBuffer().begin() + StringIndex);
- return object_error::success;
+StringRef Archive::Symbol::getName() const {
+ return Parent->SymbolTable->getBuffer().begin() + StringIndex;
}
-error_code Archive::Symbol::getMember(child_iterator &Result) const {
+ErrorOr<Archive::child_iterator> Archive::Symbol::getMember() const {
const char *Buf = Parent->SymbolTable->getBuffer().begin();
const char *Offsets = Buf + 4;
uint32_t Offset = 0;
@@ -338,7 +337,14 @@ error_code Archive::Symbol::getMember(child_iterator &Result) const {
Offset = *(reinterpret_cast<const support::ubig32_t*>(Offsets)
+ SymbolIndex);
} else if (Parent->kind() == K_BSD) {
- llvm_unreachable("BSD format is not supported");
+ // The SymbolIndex is an index into the ranlib structs that start at
+ // Offsets (the first uint32_t is the number of bytes of the ranlib
+ // structs). The ranlib structs are a pair of uint32_t's the first
+ // being a string table offset and the second being the offset into
+ // the archive of the member that defines the symbol. Which is what
+ // is needed here.
+ Offset = *(reinterpret_cast<const support::ulittle32_t *>(Offsets) +
+ (SymbolIndex * 2) + 1);
} else {
uint32_t MemberCount = *reinterpret_cast<const support::ulittle32_t*>(Buf);
@@ -370,21 +376,54 @@ error_code Archive::Symbol::getMember(child_iterator &Result) const {
}
const char *Loc = Parent->getData().begin() + Offset;
- Result = Child(Parent, Loc);
-
- return object_error::success;
+ child_iterator Iter(Child(Parent, Loc));
+ return Iter;
}
Archive::Symbol Archive::Symbol::getNext() const {
Symbol t(*this);
- // Go to one past next null.
- t.StringIndex =
- Parent->SymbolTable->getBuffer().find('\0', t.StringIndex) + 1;
+ if (Parent->kind() == K_BSD) {
+ // t.StringIndex is an offset from the start of the __.SYMDEF or
+ // "__.SYMDEF SORTED" member into the string table for the ranlib
+ // struct indexed by t.SymbolIndex . To change t.StringIndex to the
+ // offset in the string table for t.SymbolIndex+1 we subtract the
+ // its offset from the start of the string table for t.SymbolIndex
+ // and add the offset of the string table for t.SymbolIndex+1.
+
+ // The __.SYMDEF or "__.SYMDEF SORTED" member starts with a uint32_t
+ // which is the number of bytes of ranlib structs that follow. The ranlib
+ // structs are a pair of uint32_t's the first being a string table offset
+ // and the second being the offset into the archive of the member that
+ // define the symbol. After that the next uint32_t is the byte count of
+ // the string table followed by the string table.
+ const char *Buf = Parent->SymbolTable->getBuffer().begin();
+ uint32_t RanlibCount = 0;
+ RanlibCount = (*reinterpret_cast<const support::ulittle32_t *>(Buf)) /
+ (sizeof(uint32_t) * 2);
+ // If t.SymbolIndex + 1 will be past the count of symbols (the RanlibCount)
+ // don't change the t.StringIndex as we don't want to reference a ranlib
+ // past RanlibCount.
+ if (t.SymbolIndex + 1 < RanlibCount) {
+ const char *Ranlibs = Buf + 4;
+ uint32_t CurRanStrx = 0;
+ uint32_t NextRanStrx = 0;
+ CurRanStrx = *(reinterpret_cast<const support::ulittle32_t *>(Ranlibs) +
+ (t.SymbolIndex * 2));
+ NextRanStrx = *(reinterpret_cast<const support::ulittle32_t *>(Ranlibs) +
+ ((t.SymbolIndex + 1) * 2));
+ t.StringIndex -= CurRanStrx;
+ t.StringIndex += NextRanStrx;
+ }
+ } else {
+ // Go to one past next null.
+ t.StringIndex =
+ Parent->SymbolTable->getBuffer().find('\0', t.StringIndex) + 1;
+ }
++t.SymbolIndex;
return t;
}
-Archive::symbol_iterator Archive::begin_symbols() const {
+Archive::symbol_iterator Archive::symbol_begin() const {
if (!hasSymbolTable())
return symbol_iterator(Symbol(this, 0, 0));
@@ -394,7 +433,22 @@ Archive::symbol_iterator Archive::begin_symbols() const {
symbol_count = *reinterpret_cast<const support::ubig32_t*>(buf);
buf += sizeof(uint32_t) + (symbol_count * (sizeof(uint32_t)));
} else if (kind() == K_BSD) {
- llvm_unreachable("BSD archive format is not supported");
+ // The __.SYMDEF or "__.SYMDEF SORTED" member starts with a uint32_t
+ // which is the number of bytes of ranlib structs that follow. The ranlib
+ // structs are a pair of uint32_t's the first being a string table offset
+ // and the second being the offset into the archive of the member that
+ // define the symbol. After that the next uint32_t is the byte count of
+ // the string table followed by the string table.
+ uint32_t ranlib_count = 0;
+ ranlib_count = (*reinterpret_cast<const support::ulittle32_t *>(buf)) /
+ (sizeof(uint32_t) * 2);
+ const char *ranlibs = buf + 4;
+ uint32_t ran_strx = 0;
+ ran_strx = *(reinterpret_cast<const support::ulittle32_t *>(ranlibs));
+ buf += sizeof(uint32_t) + (ranlib_count * (2 * (sizeof(uint32_t))));
+ // Skip the byte count of the string table.
+ buf += sizeof(uint32_t);
+ buf += ran_strx;
} else {
uint32_t member_count = 0;
uint32_t symbol_count = 0;
@@ -407,7 +461,7 @@ Archive::symbol_iterator Archive::begin_symbols() const {
return symbol_iterator(Symbol(this, 0, string_start_offset));
}
-Archive::symbol_iterator Archive::end_symbols() const {
+Archive::symbol_iterator Archive::symbol_end() const {
if (!hasSymbolTable())
return symbol_iterator(Symbol(this, 0, 0));
@@ -416,7 +470,8 @@ Archive::symbol_iterator Archive::end_symbols() const {
if (kind() == K_GNU) {
symbol_count = *reinterpret_cast<const support::ubig32_t*>(buf);
} else if (kind() == K_BSD) {
- llvm_unreachable("BSD archive format is not supported");
+ symbol_count = (*reinterpret_cast<const support::ulittle32_t *>(buf)) /
+ (sizeof(uint32_t) * 2);
} else {
uint32_t member_count = 0;
member_count = *reinterpret_cast<const support::ulittle32_t*>(buf);
@@ -428,23 +483,22 @@ Archive::symbol_iterator Archive::end_symbols() const {
}
Archive::child_iterator Archive::findSym(StringRef name) const {
- Archive::symbol_iterator bs = begin_symbols();
- Archive::symbol_iterator es = end_symbols();
- Archive::child_iterator result;
-
- StringRef symname;
+ Archive::symbol_iterator bs = symbol_begin();
+ Archive::symbol_iterator es = symbol_end();
+
for (; bs != es; ++bs) {
- if (bs->getName(symname))
- return end_children();
- if (symname == name) {
- if (bs->getMember(result))
- return end_children();
- return result;
+ StringRef SymName = bs->getName();
+ if (SymName == name) {
+ ErrorOr<Archive::child_iterator> ResultOrErr = bs->getMember();
+ // FIXME: Should we really eat the error?
+ if (ResultOrErr.getError())
+ return child_end();
+ return ResultOrErr.get();
}
}
- return end_children();
+ return child_end();
}
bool Archive::hasSymbolTable() const {
- return SymbolTable != end_children();
+ return SymbolTable != child_end();
}
diff --git a/contrib/llvm/lib/Object/Binary.cpp b/contrib/llvm/lib/Object/Binary.cpp
index de57b4c..552d5db 100644
--- a/contrib/llvm/lib/Object/Binary.cpp
+++ b/contrib/llvm/lib/Object/Binary.cpp
@@ -13,26 +13,22 @@
#include "llvm/Object/Binary.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
// Include headers for createBinary.
#include "llvm/Object/Archive.h"
-#include "llvm/Object/COFF.h"
#include "llvm/Object/MachOUniversal.h"
#include "llvm/Object/ObjectFile.h"
using namespace llvm;
using namespace object;
-Binary::~Binary() {
- delete Data;
-}
+Binary::~Binary() {}
-Binary::Binary(unsigned int Type, MemoryBuffer *Source)
- : TypeID(Type)
- , Data(Source) {}
+Binary::Binary(unsigned int Type, std::unique_ptr<MemoryBuffer> Source)
+ : TypeID(Type), Data(std::move(Source)) {}
StringRef Binary::getData() const {
return Data->getBuffer();
@@ -42,31 +38,17 @@ StringRef Binary::getFileName() const {
return Data->getBufferIdentifier();
}
-error_code object::createBinary(MemoryBuffer *Source,
- OwningPtr<Binary> &Result) {
- OwningPtr<MemoryBuffer> scopedSource(Source);
- if (!Source)
- return make_error_code(errc::invalid_argument);
- sys::fs::file_magic type = sys::fs::identify_magic(Source->getBuffer());
- error_code ec;
- switch (type) {
- case sys::fs::file_magic::archive: {
- OwningPtr<Binary> ret(new Archive(scopedSource.take(), ec));
- if (ec) return ec;
- Result.swap(ret);
- return object_error::success;
- }
+ErrorOr<Binary *> object::createBinary(std::unique_ptr<MemoryBuffer> Buffer,
+ LLVMContext *Context) {
+ sys::fs::file_magic Type = sys::fs::identify_magic(Buffer->getBuffer());
+
+ switch (Type) {
+ case sys::fs::file_magic::archive:
+ return Archive::create(std::move(Buffer));
case sys::fs::file_magic::elf_relocatable:
case sys::fs::file_magic::elf_executable:
case sys::fs::file_magic::elf_shared_object:
- case sys::fs::file_magic::elf_core: {
- OwningPtr<Binary> ret(
- ObjectFile::createELFObjectFile(scopedSource.take()));
- if (!ret)
- return object_error::invalid_file_type;
- Result.swap(ret);
- return object_error::success;
- }
+ case sys::fs::file_magic::elf_core:
case sys::fs::file_magic::macho_object:
case sys::fs::file_magic::macho_executable:
case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib:
@@ -76,43 +58,26 @@ error_code object::createBinary(MemoryBuffer *Source,
case sys::fs::file_magic::macho_dynamic_linker:
case sys::fs::file_magic::macho_bundle:
case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub:
- case sys::fs::file_magic::macho_dsym_companion: {
- OwningPtr<Binary> ret(
- ObjectFile::createMachOObjectFile(scopedSource.take()));
- if (!ret)
- return object_error::invalid_file_type;
- Result.swap(ret);
- return object_error::success;
- }
- case sys::fs::file_magic::macho_universal_binary: {
- OwningPtr<Binary> ret(new MachOUniversalBinary(scopedSource.take(), ec));
- if (ec) return ec;
- Result.swap(ret);
- return object_error::success;
- }
+ case sys::fs::file_magic::macho_dsym_companion:
case sys::fs::file_magic::coff_object:
case sys::fs::file_magic::coff_import_library:
- case sys::fs::file_magic::pecoff_executable: {
- OwningPtr<Binary> ret(
- ObjectFile::createCOFFObjectFile(scopedSource.take()));
- if (!ret)
- return object_error::invalid_file_type;
- Result.swap(ret);
- return object_error::success;
- }
- case sys::fs::file_magic::unknown:
+ case sys::fs::file_magic::pecoff_executable:
case sys::fs::file_magic::bitcode:
- case sys::fs::file_magic::windows_resource: {
+ return ObjectFile::createSymbolicFile(Buffer, Type, Context);
+ case sys::fs::file_magic::macho_universal_binary:
+ return MachOUniversalBinary::create(std::move(Buffer));
+ case sys::fs::file_magic::unknown:
+ case sys::fs::file_magic::windows_resource:
// Unrecognized object file format.
return object_error::invalid_file_type;
- }
}
llvm_unreachable("Unexpected Binary File Type");
}
-error_code object::createBinary(StringRef Path, OwningPtr<Binary> &Result) {
- OwningPtr<MemoryBuffer> File;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(Path, File))
- return ec;
- return createBinary(File.take(), Result);
+ErrorOr<Binary *> object::createBinary(StringRef Path) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(Path);
+ if (std::error_code EC = FileOrErr.getError())
+ return EC;
+ return createBinary(std::move(*FileOrErr));
}
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
index 42066c3..46ef87d 100644
--- a/contrib/llvm/lib/Object/COFFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -16,25 +16,25 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Support/COFF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>
+#include <limits>
using namespace llvm;
using namespace object;
-namespace {
using support::ulittle8_t;
using support::ulittle16_t;
using support::ulittle32_t;
using support::little16_t;
-}
-namespace {
// Returns false if size is greater than the buffer size. And sets ec.
-bool checkSize(const MemoryBuffer *m, error_code &ec, uint64_t size) {
- if (m->getBufferSize() < size) {
- ec = object_error::unexpected_eof;
+static bool checkSize(const MemoryBuffer &M, std::error_code &EC,
+ uint64_t Size) {
+ if (M.getBufferSize() < Size) {
+ EC = object_error::unexpected_eof;
return false;
}
return true;
@@ -42,332 +42,364 @@ bool checkSize(const MemoryBuffer *m, error_code &ec, uint64_t size) {
// Sets Obj unless any bytes in [addr, addr + size) fall outsize of m.
// Returns unexpected_eof if error.
-template<typename T>
-error_code getObject(const T *&Obj, const MemoryBuffer *M, const uint8_t *Ptr,
- const size_t Size = sizeof(T)) {
+template <typename T>
+static std::error_code getObject(const T *&Obj, const MemoryBuffer &M,
+ const uint8_t *Ptr,
+ const size_t Size = sizeof(T)) {
uintptr_t Addr = uintptr_t(Ptr);
- if (Addr + Size < Addr ||
- Addr + Size < Size ||
- Addr + Size > uintptr_t(M->getBufferEnd())) {
+ if (Addr + Size < Addr || Addr + Size < Size ||
+ Addr + Size > uintptr_t(M.getBufferEnd())) {
return object_error::unexpected_eof;
}
Obj = reinterpret_cast<const T *>(Addr);
return object_error::success;
}
+
+// Decode a string table entry in base 64 (//AAAAAA). Expects \arg Str without
+// prefixed slashes.
+static bool decodeBase64StringEntry(StringRef Str, uint32_t &Result) {
+ assert(Str.size() <= 6 && "String too long, possible overflow.");
+ if (Str.size() > 6)
+ return true;
+
+ uint64_t Value = 0;
+ while (!Str.empty()) {
+ unsigned CharVal;
+ if (Str[0] >= 'A' && Str[0] <= 'Z') // 0..25
+ CharVal = Str[0] - 'A';
+ else if (Str[0] >= 'a' && Str[0] <= 'z') // 26..51
+ CharVal = Str[0] - 'a' + 26;
+ else if (Str[0] >= '0' && Str[0] <= '9') // 52..61
+ CharVal = Str[0] - '0' + 52;
+ else if (Str[0] == '+') // 62
+ CharVal = 62;
+ else if (Str[0] == '/') // 63
+ CharVal = 63;
+ else
+ return true;
+
+ Value = (Value * 64) + CharVal;
+ Str = Str.substr(1);
+ }
+
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return true;
+
+ Result = static_cast<uint32_t>(Value);
+ return false;
}
-const coff_symbol *COFFObjectFile::toSymb(DataRefImpl Symb) const {
- const coff_symbol *addr = reinterpret_cast<const coff_symbol*>(Symb.p);
+const coff_symbol *COFFObjectFile::toSymb(DataRefImpl Ref) const {
+ const coff_symbol *Addr = reinterpret_cast<const coff_symbol*>(Ref.p);
# ifndef NDEBUG
// Verify that the symbol points to a valid entry in the symbol table.
- uintptr_t offset = uintptr_t(addr) - uintptr_t(base());
- if (offset < COFFHeader->PointerToSymbolTable
- || offset >= COFFHeader->PointerToSymbolTable
+ uintptr_t Offset = uintptr_t(Addr) - uintptr_t(base());
+ if (Offset < COFFHeader->PointerToSymbolTable
+ || Offset >= COFFHeader->PointerToSymbolTable
+ (COFFHeader->NumberOfSymbols * sizeof(coff_symbol)))
report_fatal_error("Symbol was outside of symbol table.");
- assert((offset - COFFHeader->PointerToSymbolTable) % sizeof(coff_symbol)
+ assert((Offset - COFFHeader->PointerToSymbolTable) % sizeof(coff_symbol)
== 0 && "Symbol did not point to the beginning of a symbol");
# endif
- return addr;
+ return Addr;
}
-const coff_section *COFFObjectFile::toSec(DataRefImpl Sec) const {
- const coff_section *addr = reinterpret_cast<const coff_section*>(Sec.p);
+const coff_section *COFFObjectFile::toSec(DataRefImpl Ref) const {
+ const coff_section *Addr = reinterpret_cast<const coff_section*>(Ref.p);
# ifndef NDEBUG
// Verify that the section points to a valid entry in the section table.
- if (addr < SectionTable
- || addr >= (SectionTable + COFFHeader->NumberOfSections))
+ if (Addr < SectionTable
+ || Addr >= (SectionTable + COFFHeader->NumberOfSections))
report_fatal_error("Section was outside of section table.");
- uintptr_t offset = uintptr_t(addr) - uintptr_t(SectionTable);
- assert(offset % sizeof(coff_section) == 0 &&
+ uintptr_t Offset = uintptr_t(Addr) - uintptr_t(SectionTable);
+ assert(Offset % sizeof(coff_section) == 0 &&
"Section did not point to the beginning of a section");
# endif
- return addr;
+ return Addr;
}
-error_code COFFObjectFile::getSymbolNext(DataRefImpl Symb,
- SymbolRef &Result) const {
- const coff_symbol *symb = toSymb(Symb);
- symb += 1 + symb->NumberOfAuxSymbols;
- Symb.p = reinterpret_cast<uintptr_t>(symb);
- Result = SymbolRef(Symb, this);
- return object_error::success;
+void COFFObjectFile::moveSymbolNext(DataRefImpl &Ref) const {
+ const coff_symbol *Symb = toSymb(Ref);
+ Symb += 1 + Symb->NumberOfAuxSymbols;
+ Ref.p = reinterpret_cast<uintptr_t>(Symb);
}
- error_code COFFObjectFile::getSymbolName(DataRefImpl Symb,
- StringRef &Result) const {
- const coff_symbol *symb = toSymb(Symb);
- return getSymbolName(symb, Result);
-}
-
-error_code COFFObjectFile::getSymbolFileOffset(DataRefImpl Symb,
- uint64_t &Result) const {
- const coff_symbol *symb = toSymb(Symb);
- const coff_section *Section = NULL;
- if (error_code ec = getSection(symb->SectionNumber, Section))
- return ec;
-
- if (symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
- Result = UnknownAddressOrSize;
- else if (Section)
- Result = Section->PointerToRawData + symb->Value;
- else
- Result = symb->Value;
- return object_error::success;
+std::error_code COFFObjectFile::getSymbolName(DataRefImpl Ref,
+ StringRef &Result) const {
+ const coff_symbol *Symb = toSymb(Ref);
+ return getSymbolName(Symb, Result);
}
-error_code COFFObjectFile::getSymbolAddress(DataRefImpl Symb,
- uint64_t &Result) const {
- const coff_symbol *symb = toSymb(Symb);
- const coff_section *Section = NULL;
- if (error_code ec = getSection(symb->SectionNumber, Section))
- return ec;
+std::error_code COFFObjectFile::getSymbolAddress(DataRefImpl Ref,
+ uint64_t &Result) const {
+ const coff_symbol *Symb = toSymb(Ref);
+ const coff_section *Section = nullptr;
+ if (std::error_code EC = getSection(Symb->SectionNumber, Section))
+ return EC;
- if (symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
+ if (Symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
Result = UnknownAddressOrSize;
else if (Section)
- Result = Section->VirtualAddress + symb->Value;
+ Result = Section->VirtualAddress + Symb->Value;
else
- Result = symb->Value;
+ Result = Symb->Value;
return object_error::success;
}
-error_code COFFObjectFile::getSymbolType(DataRefImpl Symb,
- SymbolRef::Type &Result) const {
- const coff_symbol *symb = toSymb(Symb);
+std::error_code COFFObjectFile::getSymbolType(DataRefImpl Ref,
+ SymbolRef::Type &Result) const {
+ const coff_symbol *Symb = toSymb(Ref);
Result = SymbolRef::ST_Other;
- if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
- symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED) {
+ if (Symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
+ Symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED) {
Result = SymbolRef::ST_Unknown;
+ } else if (Symb->isFunctionDefinition()) {
+ Result = SymbolRef::ST_Function;
} else {
- if (symb->getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION) {
- Result = SymbolRef::ST_Function;
- } else {
- uint32_t Characteristics = 0;
- if (symb->SectionNumber > 0) {
- const coff_section *Section = NULL;
- if (error_code ec = getSection(symb->SectionNumber, Section))
- return ec;
- Characteristics = Section->Characteristics;
- }
- if (Characteristics & COFF::IMAGE_SCN_MEM_READ &&
- ~Characteristics & COFF::IMAGE_SCN_MEM_WRITE) // Read only.
- Result = SymbolRef::ST_Data;
+ uint32_t Characteristics = 0;
+ if (!COFF::isReservedSectionNumber(Symb->SectionNumber)) {
+ const coff_section *Section = nullptr;
+ if (std::error_code EC = getSection(Symb->SectionNumber, Section))
+ return EC;
+ Characteristics = Section->Characteristics;
}
+ if (Characteristics & COFF::IMAGE_SCN_MEM_READ &&
+ ~Characteristics & COFF::IMAGE_SCN_MEM_WRITE) // Read only.
+ Result = SymbolRef::ST_Data;
}
return object_error::success;
}
-error_code COFFObjectFile::getSymbolFlags(DataRefImpl Symb,
- uint32_t &Result) const {
- const coff_symbol *symb = toSymb(Symb);
- Result = SymbolRef::SF_None;
+uint32_t COFFObjectFile::getSymbolFlags(DataRefImpl Ref) const {
+ const coff_symbol *Symb = toSymb(Ref);
+ uint32_t Result = SymbolRef::SF_None;
+
+ // TODO: Correctly set SF_FormatSpecific, SF_Common
- // TODO: Correctly set SF_FormatSpecific, SF_ThreadLocal, SF_Common
+ if (Symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED) {
+ if (Symb->Value == 0)
+ Result |= SymbolRef::SF_Undefined;
+ else
+ Result |= SymbolRef::SF_Common;
+ }
- if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
- symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
- Result |= SymbolRef::SF_Undefined;
// TODO: This are certainly too restrictive.
- if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL)
+ if (Symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL)
Result |= SymbolRef::SF_Global;
- if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL)
+ if (Symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL)
Result |= SymbolRef::SF_Weak;
- if (symb->SectionNumber == COFF::IMAGE_SYM_ABSOLUTE)
+ if (Symb->SectionNumber == COFF::IMAGE_SYM_ABSOLUTE)
Result |= SymbolRef::SF_Absolute;
- return object_error::success;
+ return Result;
}
-error_code COFFObjectFile::getSymbolSize(DataRefImpl Symb,
- uint64_t &Result) const {
+std::error_code COFFObjectFile::getSymbolSize(DataRefImpl Ref,
+ uint64_t &Result) const {
// FIXME: Return the correct size. This requires looking at all the symbols
// in the same section as this symbol, and looking for either the next
// symbol, or the end of the section.
- const coff_symbol *symb = toSymb(Symb);
- const coff_section *Section = NULL;
- if (error_code ec = getSection(symb->SectionNumber, Section))
- return ec;
+ const coff_symbol *Symb = toSymb(Ref);
+ const coff_section *Section = nullptr;
+ if (std::error_code EC = getSection(Symb->SectionNumber, Section))
+ return EC;
- if (symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
+ if (Symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
Result = UnknownAddressOrSize;
else if (Section)
- Result = Section->SizeOfRawData - symb->Value;
+ Result = Section->SizeOfRawData - Symb->Value;
else
Result = 0;
return object_error::success;
}
-error_code COFFObjectFile::getSymbolSection(DataRefImpl Symb,
- section_iterator &Result) const {
- const coff_symbol *symb = toSymb(Symb);
- if (symb->SectionNumber <= COFF::IMAGE_SYM_UNDEFINED)
- Result = end_sections();
- else {
- const coff_section *sec = 0;
- if (error_code ec = getSection(symb->SectionNumber, sec)) return ec;
- DataRefImpl Sec;
- Sec.p = reinterpret_cast<uintptr_t>(sec);
- Result = section_iterator(SectionRef(Sec, this));
+std::error_code
+COFFObjectFile::getSymbolSection(DataRefImpl Ref,
+ section_iterator &Result) const {
+ const coff_symbol *Symb = toSymb(Ref);
+ if (COFF::isReservedSectionNumber(Symb->SectionNumber)) {
+ Result = section_end();
+ } else {
+ const coff_section *Sec = nullptr;
+ if (std::error_code EC = getSection(Symb->SectionNumber, Sec))
+ return EC;
+ DataRefImpl Ref;
+ Ref.p = reinterpret_cast<uintptr_t>(Sec);
+ Result = section_iterator(SectionRef(Ref, this));
}
return object_error::success;
}
-error_code COFFObjectFile::getSymbolValue(DataRefImpl Symb,
- uint64_t &Val) const {
- report_fatal_error("getSymbolValue unimplemented in COFFObjectFile");
+void COFFObjectFile::moveSectionNext(DataRefImpl &Ref) const {
+ const coff_section *Sec = toSec(Ref);
+ Sec += 1;
+ Ref.p = reinterpret_cast<uintptr_t>(Sec);
}
-error_code COFFObjectFile::getSectionNext(DataRefImpl Sec,
- SectionRef &Result) const {
- const coff_section *sec = toSec(Sec);
- sec += 1;
- Sec.p = reinterpret_cast<uintptr_t>(sec);
- Result = SectionRef(Sec, this);
- return object_error::success;
+std::error_code COFFObjectFile::getSectionName(DataRefImpl Ref,
+ StringRef &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ return getSectionName(Sec, Result);
}
-error_code COFFObjectFile::getSectionName(DataRefImpl Sec,
- StringRef &Result) const {
- const coff_section *sec = toSec(Sec);
- return getSectionName(sec, Result);
-}
-
-error_code COFFObjectFile::getSectionAddress(DataRefImpl Sec,
- uint64_t &Result) const {
- const coff_section *sec = toSec(Sec);
- Result = sec->VirtualAddress;
+std::error_code COFFObjectFile::getSectionAddress(DataRefImpl Ref,
+ uint64_t &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ Result = Sec->VirtualAddress;
return object_error::success;
}
-error_code COFFObjectFile::getSectionSize(DataRefImpl Sec,
- uint64_t &Result) const {
- const coff_section *sec = toSec(Sec);
- Result = sec->SizeOfRawData;
+std::error_code COFFObjectFile::getSectionSize(DataRefImpl Ref,
+ uint64_t &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ Result = Sec->SizeOfRawData;
return object_error::success;
}
-error_code COFFObjectFile::getSectionContents(DataRefImpl Sec,
- StringRef &Result) const {
- const coff_section *sec = toSec(Sec);
+std::error_code COFFObjectFile::getSectionContents(DataRefImpl Ref,
+ StringRef &Result) const {
+ const coff_section *Sec = toSec(Ref);
ArrayRef<uint8_t> Res;
- error_code EC = getSectionContents(sec, Res);
+ std::error_code EC = getSectionContents(Sec, Res);
Result = StringRef(reinterpret_cast<const char*>(Res.data()), Res.size());
return EC;
}
-error_code COFFObjectFile::getSectionAlignment(DataRefImpl Sec,
- uint64_t &Res) const {
- const coff_section *sec = toSec(Sec);
- if (!sec)
+std::error_code COFFObjectFile::getSectionAlignment(DataRefImpl Ref,
+ uint64_t &Res) const {
+ const coff_section *Sec = toSec(Ref);
+ if (!Sec)
return object_error::parse_failed;
- Res = uint64_t(1) << (((sec->Characteristics & 0x00F00000) >> 20) - 1);
+ Res = uint64_t(1) << (((Sec->Characteristics & 0x00F00000) >> 20) - 1);
return object_error::success;
}
-error_code COFFObjectFile::isSectionText(DataRefImpl Sec,
- bool &Result) const {
- const coff_section *sec = toSec(Sec);
- Result = sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE;
+std::error_code COFFObjectFile::isSectionText(DataRefImpl Ref,
+ bool &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ Result = Sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE;
return object_error::success;
}
-error_code COFFObjectFile::isSectionData(DataRefImpl Sec,
- bool &Result) const {
- const coff_section *sec = toSec(Sec);
- Result = sec->Characteristics & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA;
+std::error_code COFFObjectFile::isSectionData(DataRefImpl Ref,
+ bool &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ Result = Sec->Characteristics & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA;
return object_error::success;
}
-error_code COFFObjectFile::isSectionBSS(DataRefImpl Sec,
- bool &Result) const {
- const coff_section *sec = toSec(Sec);
- Result = sec->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+std::error_code COFFObjectFile::isSectionBSS(DataRefImpl Ref,
+ bool &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ Result = Sec->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
return object_error::success;
}
-error_code COFFObjectFile::isSectionRequiredForExecution(DataRefImpl Sec,
- bool &Result) const {
+std::error_code
+COFFObjectFile::isSectionRequiredForExecution(DataRefImpl Ref,
+ bool &Result) const {
// FIXME: Unimplemented
Result = true;
return object_error::success;
}
-error_code COFFObjectFile::isSectionVirtual(DataRefImpl Sec,
- bool &Result) const {
- const coff_section *sec = toSec(Sec);
- Result = sec->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+std::error_code COFFObjectFile::isSectionVirtual(DataRefImpl Ref,
+ bool &Result) const {
+ const coff_section *Sec = toSec(Ref);
+ Result = Sec->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
return object_error::success;
}
-error_code COFFObjectFile::isSectionZeroInit(DataRefImpl Sec,
- bool &Result) const {
+std::error_code COFFObjectFile::isSectionZeroInit(DataRefImpl Ref,
+ bool &Result) const {
// FIXME: Unimplemented.
Result = false;
return object_error::success;
}
-error_code COFFObjectFile::isSectionReadOnlyData(DataRefImpl Sec,
- bool &Result) const {
+std::error_code COFFObjectFile::isSectionReadOnlyData(DataRefImpl Ref,
+ bool &Result) const {
// FIXME: Unimplemented.
Result = false;
return object_error::success;
}
-error_code COFFObjectFile::sectionContainsSymbol(DataRefImpl Sec,
- DataRefImpl Symb,
- bool &Result) const {
- const coff_section *sec = toSec(Sec);
- const coff_symbol *symb = toSymb(Symb);
- const coff_section *symb_sec = 0;
- if (error_code ec = getSection(symb->SectionNumber, symb_sec)) return ec;
- if (symb_sec == sec)
+std::error_code COFFObjectFile::sectionContainsSymbol(DataRefImpl SecRef,
+ DataRefImpl SymbRef,
+ bool &Result) const {
+ const coff_section *Sec = toSec(SecRef);
+ const coff_symbol *Symb = toSymb(SymbRef);
+ const coff_section *SymbSec = nullptr;
+ if (std::error_code EC = getSection(Symb->SectionNumber, SymbSec))
+ return EC;
+ if (SymbSec == Sec)
Result = true;
else
Result = false;
return object_error::success;
}
-relocation_iterator COFFObjectFile::section_rel_begin(DataRefImpl Sec) const {
- const coff_section *sec = toSec(Sec);
- DataRefImpl ret;
- if (sec->NumberOfRelocations == 0)
- ret.p = 0;
- else
- ret.p = reinterpret_cast<uintptr_t>(base() + sec->PointerToRelocations);
-
- return relocation_iterator(RelocationRef(ret, this));
+relocation_iterator COFFObjectFile::section_rel_begin(DataRefImpl Ref) const {
+ const coff_section *Sec = toSec(Ref);
+ DataRefImpl Ret;
+ if (Sec->NumberOfRelocations == 0) {
+ Ret.p = 0;
+ } else {
+ auto begin = reinterpret_cast<const coff_relocation*>(
+ base() + Sec->PointerToRelocations);
+ if (Sec->hasExtendedRelocations()) {
+ // Skip the first relocation entry repurposed to store the number of
+ // relocations.
+ begin++;
+ }
+ Ret.p = reinterpret_cast<uintptr_t>(begin);
+ }
+ return relocation_iterator(RelocationRef(Ret, this));
+}
+
+static uint32_t getNumberOfRelocations(const coff_section *Sec,
+ const uint8_t *base) {
+ // The field for the number of relocations in COFF section table is only
+ // 16-bit wide. If a section has more than 65535 relocations, 0xFFFF is set to
+ // NumberOfRelocations field, and the actual relocation count is stored in the
+ // VirtualAddress field in the first relocation entry.
+ if (Sec->hasExtendedRelocations()) {
+ auto *FirstReloc = reinterpret_cast<const coff_relocation*>(
+ base + Sec->PointerToRelocations);
+ return FirstReloc->VirtualAddress;
+ }
+ return Sec->NumberOfRelocations;
}
-relocation_iterator COFFObjectFile::section_rel_end(DataRefImpl Sec) const {
- const coff_section *sec = toSec(Sec);
- DataRefImpl ret;
- if (sec->NumberOfRelocations == 0)
- ret.p = 0;
- else
- ret.p = reinterpret_cast<uintptr_t>(
- reinterpret_cast<const coff_relocation*>(
- base() + sec->PointerToRelocations)
- + sec->NumberOfRelocations);
-
- return relocation_iterator(RelocationRef(ret, this));
+relocation_iterator COFFObjectFile::section_rel_end(DataRefImpl Ref) const {
+ const coff_section *Sec = toSec(Ref);
+ DataRefImpl Ret;
+ if (Sec->NumberOfRelocations == 0) {
+ Ret.p = 0;
+ } else {
+ auto begin = reinterpret_cast<const coff_relocation*>(
+ base() + Sec->PointerToRelocations);
+ uint32_t NumReloc = getNumberOfRelocations(Sec, base());
+ Ret.p = reinterpret_cast<uintptr_t>(begin + NumReloc);
+ }
+ return relocation_iterator(RelocationRef(Ret, this));
}
// Initialize the pointer to the symbol table.
-error_code COFFObjectFile::initSymbolTablePtr() {
- if (error_code ec = getObject(
- SymbolTable, Data, base() + COFFHeader->PointerToSymbolTable,
+std::error_code COFFObjectFile::initSymbolTablePtr() {
+ if (std::error_code EC = getObject(
+ SymbolTable, *Data, base() + COFFHeader->PointerToSymbolTable,
COFFHeader->NumberOfSymbols * sizeof(coff_symbol)))
- return ec;
+ return EC;
// Find string table. The first four byte of the string table contains the
// total size of the string table, including the size field itself. If the
@@ -376,32 +408,42 @@ error_code COFFObjectFile::initSymbolTablePtr() {
base() + COFFHeader->PointerToSymbolTable +
COFFHeader->NumberOfSymbols * sizeof(coff_symbol);
const ulittle32_t *StringTableSizePtr;
- if (error_code ec = getObject(StringTableSizePtr, Data, StringTableAddr))
- return ec;
+ if (std::error_code EC =
+ getObject(StringTableSizePtr, *Data, StringTableAddr))
+ return EC;
StringTableSize = *StringTableSizePtr;
- if (error_code ec =
- getObject(StringTable, Data, StringTableAddr, StringTableSize))
- return ec;
+ if (std::error_code EC =
+ getObject(StringTable, *Data, StringTableAddr, StringTableSize))
+ return EC;
+
+ // Treat table sizes < 4 as empty because contrary to the PECOFF spec, some
+ // tools like cvtres write a size of 0 for an empty table instead of 4.
+ if (StringTableSize < 4)
+ StringTableSize = 4;
// Check that the string table is null terminated if has any in it.
- if (StringTableSize < 4 ||
- (StringTableSize > 4 && StringTable[StringTableSize - 1] != 0))
+ if (StringTableSize > 4 && StringTable[StringTableSize - 1] != 0)
return object_error::parse_failed;
return object_error::success;
}
+// Returns the file offset for the given VA.
+std::error_code COFFObjectFile::getVaPtr(uint64_t Addr, uintptr_t &Res) const {
+ uint64_t ImageBase = PE32Header ? (uint64_t)PE32Header->ImageBase
+ : (uint64_t)PE32PlusHeader->ImageBase;
+ uint64_t Rva = Addr - ImageBase;
+ assert(Rva <= UINT32_MAX);
+ return getRvaPtr((uint32_t)Rva, Res);
+}
+
// Returns the file offset for the given RVA.
-error_code COFFObjectFile::getRvaPtr(uint32_t Rva, uintptr_t &Res) const {
- error_code ec;
- for (section_iterator i = begin_sections(), e = end_sections(); i != e;
- i.increment(ec)) {
- if (ec)
- return ec;
- const coff_section *Section = getCOFFSection(i);
+std::error_code COFFObjectFile::getRvaPtr(uint32_t Addr, uintptr_t &Res) const {
+ for (const SectionRef &S : sections()) {
+ const coff_section *Section = getCOFFSection(S);
uint32_t SectionStart = Section->VirtualAddress;
uint32_t SectionEnd = Section->VirtualAddress + Section->VirtualSize;
- if (SectionStart <= Rva && Rva < SectionEnd) {
- uint32_t Offset = Rva - SectionStart;
+ if (SectionStart <= Addr && Addr < SectionEnd) {
+ uint32_t Offset = Addr - SectionStart;
Res = uintptr_t(base()) + Section->PointerToRawData + Offset;
return object_error::success;
}
@@ -411,11 +453,11 @@ error_code COFFObjectFile::getRvaPtr(uint32_t Rva, uintptr_t &Res) const {
// Returns hint and name fields, assuming \p Rva is pointing to a Hint/Name
// table entry.
-error_code COFFObjectFile::
-getHintName(uint32_t Rva, uint16_t &Hint, StringRef &Name) const {
+std::error_code COFFObjectFile::getHintName(uint32_t Rva, uint16_t &Hint,
+ StringRef &Name) const {
uintptr_t IntPtr = 0;
- if (error_code ec = getRvaPtr(Rva, IntPtr))
- return ec;
+ if (std::error_code EC = getRvaPtr(Rva, IntPtr))
+ return EC;
const uint8_t *Ptr = reinterpret_cast<const uint8_t *>(IntPtr);
Hint = *reinterpret_cast<const ulittle16_t *>(Ptr);
Name = StringRef(reinterpret_cast<const char *>(Ptr + 2));
@@ -423,7 +465,7 @@ getHintName(uint32_t Rva, uint16_t &Hint, StringRef &Name) const {
}
// Find the import table.
-error_code COFFObjectFile::initImportTablePtr() {
+std::error_code COFFObjectFile::initImportTablePtr() {
// First, we get the RVA of the import table. If the file lacks a pointer to
// the import table, do nothing.
const data_directory *DataEntry;
@@ -441,118 +483,139 @@ error_code COFFObjectFile::initImportTablePtr() {
// Find the section that contains the RVA. This is needed because the RVA is
// the import table's memory address which is different from its file offset.
uintptr_t IntPtr = 0;
- if (error_code ec = getRvaPtr(ImportTableRva, IntPtr))
- return ec;
+ if (std::error_code EC = getRvaPtr(ImportTableRva, IntPtr))
+ return EC;
ImportDirectory = reinterpret_cast<
const import_directory_table_entry *>(IntPtr);
+ return object_error::success;
+}
- // It's an error if there's no section containing the Import Table RVA.
- return object_error::parse_failed;
+// Find the export table.
+std::error_code COFFObjectFile::initExportTablePtr() {
+ // First, we get the RVA of the export table. If the file lacks a pointer to
+ // the export table, do nothing.
+ const data_directory *DataEntry;
+ if (getDataDirectory(COFF::EXPORT_TABLE, DataEntry))
+ return object_error::success;
+
+ // Do nothing if the pointer to export table is NULL.
+ if (DataEntry->RelativeVirtualAddress == 0)
+ return object_error::success;
+
+ uint32_t ExportTableRva = DataEntry->RelativeVirtualAddress;
+ uintptr_t IntPtr = 0;
+ if (std::error_code EC = getRvaPtr(ExportTableRva, IntPtr))
+ return EC;
+ ExportDirectory =
+ reinterpret_cast<const export_directory_table_entry *>(IntPtr);
+ return object_error::success;
}
-COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec)
- : ObjectFile(Binary::ID_COFF, Object)
- , COFFHeader(0)
- , PE32Header(0)
- , DataDirectory(0)
- , SectionTable(0)
- , SymbolTable(0)
- , StringTable(0)
- , StringTableSize(0)
- , ImportDirectory(0)
- , NumberOfImportDirectory(0) {
+COFFObjectFile::COFFObjectFile(std::unique_ptr<MemoryBuffer> Object,
+ std::error_code &EC)
+ : ObjectFile(Binary::ID_COFF, std::move(Object)), COFFHeader(nullptr),
+ PE32Header(nullptr), PE32PlusHeader(nullptr), DataDirectory(nullptr),
+ SectionTable(nullptr), SymbolTable(nullptr), StringTable(nullptr),
+ StringTableSize(0), ImportDirectory(nullptr), NumberOfImportDirectory(0),
+ ExportDirectory(nullptr) {
// Check that we at least have enough room for a header.
- if (!checkSize(Data, ec, sizeof(coff_file_header))) return;
+ if (!checkSize(*Data, EC, sizeof(coff_file_header)))
+ return;
// The current location in the file where we are looking at.
uint64_t CurPtr = 0;
// PE header is optional and is present only in executables. If it exists,
// it is placed right after COFF header.
- bool hasPEHeader = false;
+ bool HasPEHeader = false;
// Check if this is a PE/COFF file.
if (base()[0] == 0x4d && base()[1] == 0x5a) {
// PE/COFF, seek through MS-DOS compatibility stub and 4-byte
// PE signature to find 'normal' COFF header.
- if (!checkSize(Data, ec, 0x3c + 8)) return;
+ if (!checkSize(*Data, EC, 0x3c + 8))
+ return;
CurPtr = *reinterpret_cast<const ulittle16_t *>(base() + 0x3c);
// Check the PE magic bytes. ("PE\0\0")
if (std::memcmp(base() + CurPtr, "PE\0\0", 4) != 0) {
- ec = object_error::parse_failed;
+ EC = object_error::parse_failed;
return;
}
CurPtr += 4; // Skip the PE magic bytes.
- hasPEHeader = true;
+ HasPEHeader = true;
}
- if ((ec = getObject(COFFHeader, Data, base() + CurPtr)))
+ if ((EC = getObject(COFFHeader, *Data, base() + CurPtr)))
return;
CurPtr += sizeof(coff_file_header);
- if (hasPEHeader) {
- if ((ec = getObject(PE32Header, Data, base() + CurPtr)))
+ if (HasPEHeader) {
+ const pe32_header *Header;
+ if ((EC = getObject(Header, *Data, base() + CurPtr)))
+ return;
+
+ const uint8_t *DataDirAddr;
+ uint64_t DataDirSize;
+ if (Header->Magic == 0x10b) {
+ PE32Header = Header;
+ DataDirAddr = base() + CurPtr + sizeof(pe32_header);
+ DataDirSize = sizeof(data_directory) * PE32Header->NumberOfRvaAndSize;
+ } else if (Header->Magic == 0x20b) {
+ PE32PlusHeader = reinterpret_cast<const pe32plus_header *>(Header);
+ DataDirAddr = base() + CurPtr + sizeof(pe32plus_header);
+ DataDirSize = sizeof(data_directory) * PE32PlusHeader->NumberOfRvaAndSize;
+ } else {
+ // It's neither PE32 nor PE32+.
+ EC = object_error::parse_failed;
return;
- if (PE32Header->Magic != 0x10b) {
- // We only support PE32. If this is PE32 (not PE32+), the magic byte
- // should be 0x10b. If this is not PE32, continue as if there's no PE
- // header in this file.
- PE32Header = 0;
- } else if (PE32Header->NumberOfRvaAndSize > 0) {
- const uint8_t *addr = base() + CurPtr + sizeof(pe32_header);
- uint64_t size = sizeof(data_directory) * PE32Header->NumberOfRvaAndSize;
- if ((ec = getObject(DataDirectory, Data, addr, size)))
- return;
}
+ if ((EC = getObject(DataDirectory, *Data, DataDirAddr, DataDirSize)))
+ return;
CurPtr += COFFHeader->SizeOfOptionalHeader;
}
- if (!COFFHeader->isImportLibrary())
- if ((ec = getObject(SectionTable, Data, base() + CurPtr,
- COFFHeader->NumberOfSections * sizeof(coff_section))))
- return;
+ if (COFFHeader->isImportLibrary())
+ return;
+
+ if ((EC = getObject(SectionTable, *Data, base() + CurPtr,
+ COFFHeader->NumberOfSections * sizeof(coff_section))))
+ return;
// Initialize the pointer to the symbol table.
if (COFFHeader->PointerToSymbolTable != 0)
- if ((ec = initSymbolTablePtr()))
+ if ((EC = initSymbolTablePtr()))
return;
// Initialize the pointer to the beginning of the import table.
- if ((ec = initImportTablePtr()))
+ if ((EC = initImportTablePtr()))
return;
- ec = object_error::success;
-}
+ // Initialize the pointer to the export table.
+ if ((EC = initExportTablePtr()))
+ return;
-symbol_iterator COFFObjectFile::begin_symbols() const {
- DataRefImpl ret;
- ret.p = reinterpret_cast<uintptr_t>(SymbolTable);
- return symbol_iterator(SymbolRef(ret, this));
+ EC = object_error::success;
}
-symbol_iterator COFFObjectFile::end_symbols() const {
- // The symbol table ends where the string table begins.
- DataRefImpl ret;
- ret.p = reinterpret_cast<uintptr_t>(StringTable);
- return symbol_iterator(SymbolRef(ret, this));
+basic_symbol_iterator COFFObjectFile::symbol_begin_impl() const {
+ DataRefImpl Ret;
+ Ret.p = reinterpret_cast<uintptr_t>(SymbolTable);
+ return basic_symbol_iterator(SymbolRef(Ret, this));
}
-symbol_iterator COFFObjectFile::begin_dynamic_symbols() const {
- // TODO: implement
- report_fatal_error("Dynamic symbols unimplemented in COFFObjectFile");
-}
-
-symbol_iterator COFFObjectFile::end_dynamic_symbols() const {
- // TODO: implement
- report_fatal_error("Dynamic symbols unimplemented in COFFObjectFile");
+basic_symbol_iterator COFFObjectFile::symbol_end_impl() const {
+ // The symbol table ends where the string table begins.
+ DataRefImpl Ret;
+ Ret.p = reinterpret_cast<uintptr_t>(StringTable);
+ return basic_symbol_iterator(SymbolRef(Ret, this));
}
-library_iterator COFFObjectFile::begin_libraries_needed() const {
+library_iterator COFFObjectFile::needed_library_begin() const {
// TODO: implement
report_fatal_error("Libraries needed unimplemented in COFFObjectFile");
}
-library_iterator COFFObjectFile::end_libraries_needed() const {
+library_iterator COFFObjectFile::needed_library_end() const {
// TODO: implement
report_fatal_error("Libraries needed unimplemented in COFFObjectFile");
}
@@ -563,34 +626,40 @@ StringRef COFFObjectFile::getLoadName() const {
}
import_directory_iterator COFFObjectFile::import_directory_begin() const {
- DataRefImpl Imp;
- Imp.p = reinterpret_cast<uintptr_t>(ImportDirectory);
- return import_directory_iterator(ImportDirectoryEntryRef(Imp, this));
+ return import_directory_iterator(
+ ImportDirectoryEntryRef(ImportDirectory, 0, this));
}
import_directory_iterator COFFObjectFile::import_directory_end() const {
- DataRefImpl Imp;
- if (ImportDirectory) {
- Imp.p = reinterpret_cast<uintptr_t>(
- ImportDirectory + (NumberOfImportDirectory - 1));
- } else {
- Imp.p = 0;
- }
- return import_directory_iterator(ImportDirectoryEntryRef(Imp, this));
+ return import_directory_iterator(
+ ImportDirectoryEntryRef(ImportDirectory, NumberOfImportDirectory, this));
+}
+
+export_directory_iterator COFFObjectFile::export_directory_begin() const {
+ return export_directory_iterator(
+ ExportDirectoryEntryRef(ExportDirectory, 0, this));
+}
+
+export_directory_iterator COFFObjectFile::export_directory_end() const {
+ if (!ExportDirectory)
+ return export_directory_iterator(ExportDirectoryEntryRef(nullptr, 0, this));
+ ExportDirectoryEntryRef Ref(ExportDirectory,
+ ExportDirectory->AddressTableEntries, this);
+ return export_directory_iterator(Ref);
}
-section_iterator COFFObjectFile::begin_sections() const {
- DataRefImpl ret;
- ret.p = reinterpret_cast<uintptr_t>(SectionTable);
- return section_iterator(SectionRef(ret, this));
+section_iterator COFFObjectFile::section_begin() const {
+ DataRefImpl Ret;
+ Ret.p = reinterpret_cast<uintptr_t>(SectionTable);
+ return section_iterator(SectionRef(Ret, this));
}
-section_iterator COFFObjectFile::end_sections() const {
- DataRefImpl ret;
- int numSections = COFFHeader->isImportLibrary()
+section_iterator COFFObjectFile::section_end() const {
+ DataRefImpl Ret;
+ int NumSections = COFFHeader->isImportLibrary()
? 0 : COFFHeader->NumberOfSections;
- ret.p = reinterpret_cast<uintptr_t>(SectionTable + numSections);
- return section_iterator(SectionRef(ret, this));
+ Ret.p = reinterpret_cast<uintptr_t>(SectionTable + NumSections);
+ return section_iterator(SectionRef(Ret, this));
}
uint8_t COFFObjectFile::getBytesInAddress() const {
@@ -603,6 +672,8 @@ StringRef COFFObjectFile::getFileFormatName() const {
return "COFF-i386";
case COFF::IMAGE_FILE_MACHINE_AMD64:
return "COFF-x86-64";
+ case COFF::IMAGE_FILE_MACHINE_ARMNT:
+ return "COFF-ARM";
default:
return "COFF-<unknown arch>";
}
@@ -614,6 +685,8 @@ unsigned COFFObjectFile::getArch() const {
return Triple::x86;
case COFF::IMAGE_FILE_MACHINE_AMD64:
return Triple::x86_64;
+ case COFF::IMAGE_FILE_MACHINE_ARMNT:
+ return Triple::thumb;
default:
return Triple::UnknownArch;
}
@@ -621,107 +694,119 @@ unsigned COFFObjectFile::getArch() const {
// This method is kept here because lld uses this. As soon as we make
// lld to use getCOFFHeader, this method will be removed.
-error_code COFFObjectFile::getHeader(const coff_file_header *&Res) const {
+std::error_code COFFObjectFile::getHeader(const coff_file_header *&Res) const {
return getCOFFHeader(Res);
}
-error_code COFFObjectFile::getCOFFHeader(const coff_file_header *&Res) const {
+std::error_code
+COFFObjectFile::getCOFFHeader(const coff_file_header *&Res) const {
Res = COFFHeader;
return object_error::success;
}
-error_code COFFObjectFile::getPE32Header(const pe32_header *&Res) const {
+std::error_code COFFObjectFile::getPE32Header(const pe32_header *&Res) const {
Res = PE32Header;
return object_error::success;
}
-error_code COFFObjectFile::getDataDirectory(uint32_t index,
- const data_directory *&Res) const {
+std::error_code
+COFFObjectFile::getPE32PlusHeader(const pe32plus_header *&Res) const {
+ Res = PE32PlusHeader;
+ return object_error::success;
+}
+
+std::error_code
+COFFObjectFile::getDataDirectory(uint32_t Index,
+ const data_directory *&Res) const {
// Error if if there's no data directory or the index is out of range.
- if (!DataDirectory || index > PE32Header->NumberOfRvaAndSize)
+ if (!DataDirectory)
return object_error::parse_failed;
- Res = &DataDirectory[index];
+ assert(PE32Header || PE32PlusHeader);
+ uint32_t NumEnt = PE32Header ? PE32Header->NumberOfRvaAndSize
+ : PE32PlusHeader->NumberOfRvaAndSize;
+ if (Index > NumEnt)
+ return object_error::parse_failed;
+ Res = &DataDirectory[Index];
return object_error::success;
}
-error_code COFFObjectFile::getSection(int32_t index,
- const coff_section *&Result) const {
+std::error_code COFFObjectFile::getSection(int32_t Index,
+ const coff_section *&Result) const {
// Check for special index values.
- if (index == COFF::IMAGE_SYM_UNDEFINED ||
- index == COFF::IMAGE_SYM_ABSOLUTE ||
- index == COFF::IMAGE_SYM_DEBUG)
- Result = NULL;
- else if (index > 0 && index <= COFFHeader->NumberOfSections)
+ if (COFF::isReservedSectionNumber(Index))
+ Result = nullptr;
+ else if (Index > 0 && Index <= COFFHeader->NumberOfSections)
// We already verified the section table data, so no need to check again.
- Result = SectionTable + (index - 1);
+ Result = SectionTable + (Index - 1);
else
return object_error::parse_failed;
return object_error::success;
}
-error_code COFFObjectFile::getString(uint32_t offset,
- StringRef &Result) const {
+std::error_code COFFObjectFile::getString(uint32_t Offset,
+ StringRef &Result) const {
if (StringTableSize <= 4)
// Tried to get a string from an empty string table.
return object_error::parse_failed;
- if (offset >= StringTableSize)
+ if (Offset >= StringTableSize)
return object_error::unexpected_eof;
- Result = StringRef(StringTable + offset);
+ Result = StringRef(StringTable + Offset);
return object_error::success;
}
-error_code COFFObjectFile::getSymbol(uint32_t index,
- const coff_symbol *&Result) const {
- if (index < COFFHeader->NumberOfSymbols)
- Result = SymbolTable + index;
+std::error_code COFFObjectFile::getSymbol(uint32_t Index,
+ const coff_symbol *&Result) const {
+ if (Index < COFFHeader->NumberOfSymbols)
+ Result = SymbolTable + Index;
else
return object_error::parse_failed;
return object_error::success;
}
-error_code COFFObjectFile::getSymbolName(const coff_symbol *symbol,
- StringRef &Res) const {
+std::error_code COFFObjectFile::getSymbolName(const coff_symbol *Symbol,
+ StringRef &Res) const {
// Check for string table entry. First 4 bytes are 0.
- if (symbol->Name.Offset.Zeroes == 0) {
- uint32_t Offset = symbol->Name.Offset.Offset;
- if (error_code ec = getString(Offset, Res))
- return ec;
+ if (Symbol->Name.Offset.Zeroes == 0) {
+ uint32_t Offset = Symbol->Name.Offset.Offset;
+ if (std::error_code EC = getString(Offset, Res))
+ return EC;
return object_error::success;
}
- if (symbol->Name.ShortName[7] == 0)
+ if (Symbol->Name.ShortName[7] == 0)
// Null terminated, let ::strlen figure out the length.
- Res = StringRef(symbol->Name.ShortName);
+ Res = StringRef(Symbol->Name.ShortName);
else
// Not null terminated, use all 8 bytes.
- Res = StringRef(symbol->Name.ShortName, 8);
+ Res = StringRef(Symbol->Name.ShortName, 8);
return object_error::success;
}
ArrayRef<uint8_t> COFFObjectFile::getSymbolAuxData(
- const coff_symbol *symbol) const {
- const uint8_t *aux = NULL;
+ const coff_symbol *Symbol) const {
+ const uint8_t *Aux = nullptr;
- if ( symbol->NumberOfAuxSymbols > 0 ) {
+ if (Symbol->NumberOfAuxSymbols > 0) {
// AUX data comes immediately after the symbol in COFF
- aux = reinterpret_cast<const uint8_t *>(symbol + 1);
+ Aux = reinterpret_cast<const uint8_t *>(Symbol + 1);
# ifndef NDEBUG
- // Verify that the aux symbol points to a valid entry in the symbol table.
- uintptr_t offset = uintptr_t(aux) - uintptr_t(base());
- if (offset < COFFHeader->PointerToSymbolTable
- || offset >= COFFHeader->PointerToSymbolTable
+ // Verify that the Aux symbol points to a valid entry in the symbol table.
+ uintptr_t Offset = uintptr_t(Aux) - uintptr_t(base());
+ if (Offset < COFFHeader->PointerToSymbolTable
+ || Offset >= COFFHeader->PointerToSymbolTable
+ (COFFHeader->NumberOfSymbols * sizeof(coff_symbol)))
report_fatal_error("Aux Symbol data was outside of symbol table.");
- assert((offset - COFFHeader->PointerToSymbolTable) % sizeof(coff_symbol)
+ assert((Offset - COFFHeader->PointerToSymbolTable) % sizeof(coff_symbol)
== 0 && "Aux Symbol data did not point to the beginning of a symbol");
# endif
}
- return ArrayRef<uint8_t>(aux, symbol->NumberOfAuxSymbols * sizeof(coff_symbol));
+ return ArrayRef<uint8_t>(Aux,
+ Symbol->NumberOfAuxSymbols * sizeof(coff_symbol));
}
-error_code COFFObjectFile::getSectionName(const coff_section *Sec,
- StringRef &Res) const {
+std::error_code COFFObjectFile::getSectionName(const coff_section *Sec,
+ StringRef &Res) const {
StringRef Name;
if (Sec->Name[7] == 0)
// Null terminated, let ::strlen figure out the length.
@@ -733,18 +818,24 @@ error_code COFFObjectFile::getSectionName(const coff_section *Sec,
// Check for string table entry. First byte is '/'.
if (Name[0] == '/') {
uint32_t Offset;
- if (Name.substr(1).getAsInteger(10, Offset))
- return object_error::parse_failed;
- if (error_code ec = getString(Offset, Name))
- return ec;
+ if (Name[1] == '/') {
+ if (decodeBase64StringEntry(Name.substr(2), Offset))
+ return object_error::parse_failed;
+ } else {
+ if (Name.substr(1).getAsInteger(10, Offset))
+ return object_error::parse_failed;
+ }
+ if (std::error_code EC = getString(Offset, Name))
+ return EC;
}
Res = Name;
return object_error::success;
}
-error_code COFFObjectFile::getSectionContents(const coff_section *Sec,
- ArrayRef<uint8_t> &Res) const {
+std::error_code
+COFFObjectFile::getSectionContents(const coff_section *Sec,
+ ArrayRef<uint8_t> &Res) const {
// The only thing that we need to verify is that the contents is contained
// within the file bounds. We don't need to make sure it doesn't cover other
// data, as there's nothing that says that is not allowed.
@@ -760,58 +851,65 @@ error_code COFFObjectFile::getSectionContents(const coff_section *Sec,
const coff_relocation *COFFObjectFile::toRel(DataRefImpl Rel) const {
return reinterpret_cast<const coff_relocation*>(Rel.p);
}
-error_code COFFObjectFile::getRelocationNext(DataRefImpl Rel,
- RelocationRef &Res) const {
+
+void COFFObjectFile::moveRelocationNext(DataRefImpl &Rel) const {
Rel.p = reinterpret_cast<uintptr_t>(
reinterpret_cast<const coff_relocation*>(Rel.p) + 1);
- Res = RelocationRef(Rel, this);
- return object_error::success;
}
-error_code COFFObjectFile::getRelocationAddress(DataRefImpl Rel,
- uint64_t &Res) const {
+
+std::error_code COFFObjectFile::getRelocationAddress(DataRefImpl Rel,
+ uint64_t &Res) const {
report_fatal_error("getRelocationAddress not implemented in COFFObjectFile");
}
-error_code COFFObjectFile::getRelocationOffset(DataRefImpl Rel,
- uint64_t &Res) const {
+
+std::error_code COFFObjectFile::getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const {
Res = toRel(Rel)->VirtualAddress;
return object_error::success;
}
+
symbol_iterator COFFObjectFile::getRelocationSymbol(DataRefImpl Rel) const {
const coff_relocation* R = toRel(Rel);
- DataRefImpl Symb;
- Symb.p = reinterpret_cast<uintptr_t>(SymbolTable + R->SymbolTableIndex);
- return symbol_iterator(SymbolRef(Symb, this));
+ DataRefImpl Ref;
+ Ref.p = reinterpret_cast<uintptr_t>(SymbolTable + R->SymbolTableIndex);
+ return symbol_iterator(SymbolRef(Ref, this));
}
-error_code COFFObjectFile::getRelocationType(DataRefImpl Rel,
- uint64_t &Res) const {
+
+std::error_code COFFObjectFile::getRelocationType(DataRefImpl Rel,
+ uint64_t &Res) const {
const coff_relocation* R = toRel(Rel);
Res = R->Type;
return object_error::success;
}
-const coff_section *COFFObjectFile::getCOFFSection(section_iterator &It) const {
- return toSec(It->getRawDataRefImpl());
+const coff_section *
+COFFObjectFile::getCOFFSection(const SectionRef &Section) const {
+ return toSec(Section.getRawDataRefImpl());
}
-const coff_symbol *COFFObjectFile::getCOFFSymbol(symbol_iterator &It) const {
- return toSymb(It->getRawDataRefImpl());
+const coff_symbol *
+COFFObjectFile::getCOFFSymbol(const SymbolRef &Symbol) const {
+ return toSymb(Symbol.getRawDataRefImpl());
}
-const coff_relocation *COFFObjectFile::getCOFFRelocation(
- relocation_iterator &It) const {
- return toRel(It->getRawDataRefImpl());
+const coff_relocation *
+COFFObjectFile::getCOFFRelocation(const RelocationRef &Reloc) const {
+ return toRel(Reloc.getRawDataRefImpl());
}
-#define LLVM_COFF_SWITCH_RELOC_TYPE_NAME(enum) \
- case COFF::enum: res = #enum; break;
+#define LLVM_COFF_SWITCH_RELOC_TYPE_NAME(reloc_type) \
+ case COFF::reloc_type: \
+ Res = #reloc_type; \
+ break;
-error_code COFFObjectFile::getRelocationTypeName(DataRefImpl Rel,
- SmallVectorImpl<char> &Result) const {
- const coff_relocation *reloc = toRel(Rel);
- StringRef res;
+std::error_code
+COFFObjectFile::getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const {
+ const coff_relocation *Reloc = toRel(Rel);
+ StringRef Res;
switch (COFFHeader->Machine) {
case COFF::IMAGE_FILE_MACHINE_AMD64:
- switch (reloc->Type) {
+ switch (Reloc->Type) {
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ABSOLUTE);
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ADDR64);
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ADDR32);
@@ -830,11 +928,32 @@ error_code COFFObjectFile::getRelocationTypeName(DataRefImpl Rel,
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_PAIR);
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_SSPAN32);
default:
- res = "Unknown";
+ Res = "Unknown";
+ }
+ break;
+ case COFF::IMAGE_FILE_MACHINE_ARMNT:
+ switch (Reloc->Type) {
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_ABSOLUTE);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_ADDR32);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_ADDR32NB);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH24);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH11);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_TOKEN);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BLX24);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BLX11);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_SECTION);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_SECREL);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_MOV32A);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_MOV32T);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH20T);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH24T);
+ LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BLX23T);
+ default:
+ Res = "Unknown";
}
break;
case COFF::IMAGE_FILE_MACHINE_I386:
- switch (reloc->Type) {
+ switch (Reloc->Type) {
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_ABSOLUTE);
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_DIR16);
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_REL16);
@@ -847,92 +966,158 @@ error_code COFFObjectFile::getRelocationTypeName(DataRefImpl Rel,
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_SECREL7);
LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_REL32);
default:
- res = "Unknown";
+ Res = "Unknown";
}
break;
default:
- res = "Unknown";
+ Res = "Unknown";
}
- Result.append(res.begin(), res.end());
+ Result.append(Res.begin(), Res.end());
return object_error::success;
}
#undef LLVM_COFF_SWITCH_RELOC_TYPE_NAME
-error_code COFFObjectFile::getRelocationValueString(DataRefImpl Rel,
- SmallVectorImpl<char> &Result) const {
- const coff_relocation *reloc = toRel(Rel);
- const coff_symbol *symb = 0;
- if (error_code ec = getSymbol(reloc->SymbolTableIndex, symb)) return ec;
- DataRefImpl sym;
- sym.p = reinterpret_cast<uintptr_t>(symb);
- StringRef symname;
- if (error_code ec = getSymbolName(sym, symname)) return ec;
- Result.append(symname.begin(), symname.end());
+std::error_code
+COFFObjectFile::getRelocationValueString(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const {
+ const coff_relocation *Reloc = toRel(Rel);
+ const coff_symbol *Symb = nullptr;
+ if (std::error_code EC = getSymbol(Reloc->SymbolTableIndex, Symb))
+ return EC;
+ DataRefImpl Sym;
+ Sym.p = reinterpret_cast<uintptr_t>(Symb);
+ StringRef SymName;
+ if (std::error_code EC = getSymbolName(Sym, SymName))
+ return EC;
+ Result.append(SymName.begin(), SymName.end());
return object_error::success;
}
-error_code COFFObjectFile::getLibraryNext(DataRefImpl LibData,
- LibraryRef &Result) const {
+std::error_code COFFObjectFile::getLibraryNext(DataRefImpl LibData,
+ LibraryRef &Result) const {
report_fatal_error("getLibraryNext not implemented in COFFObjectFile");
}
-error_code COFFObjectFile::getLibraryPath(DataRefImpl LibData,
- StringRef &Result) const {
+std::error_code COFFObjectFile::getLibraryPath(DataRefImpl LibData,
+ StringRef &Result) const {
report_fatal_error("getLibraryPath not implemented in COFFObjectFile");
}
bool ImportDirectoryEntryRef::
operator==(const ImportDirectoryEntryRef &Other) const {
- return ImportDirectoryPimpl == Other.ImportDirectoryPimpl;
+ return ImportTable == Other.ImportTable && Index == Other.Index;
}
-static const import_directory_table_entry *toImportEntry(DataRefImpl Imp) {
- return reinterpret_cast<const import_directory_table_entry *>(Imp.p);
+void ImportDirectoryEntryRef::moveNext() {
+ ++Index;
}
-error_code
-ImportDirectoryEntryRef::getNext(ImportDirectoryEntryRef &Result) const {
- const import_directory_table_entry *Dir = toImportEntry(ImportDirectoryPimpl);
- Dir += 1;
- DataRefImpl Next;
- Next.p = reinterpret_cast<uintptr_t>(Dir);
- Result = ImportDirectoryEntryRef(Next, OwningObject);
+std::error_code ImportDirectoryEntryRef::getImportTableEntry(
+ const import_directory_table_entry *&Result) const {
+ Result = ImportTable;
return object_error::success;
}
-error_code ImportDirectoryEntryRef::
-getImportTableEntry(const import_directory_table_entry *&Result) const {
- Result = toImportEntry(ImportDirectoryPimpl);
+std::error_code ImportDirectoryEntryRef::getName(StringRef &Result) const {
+ uintptr_t IntPtr = 0;
+ if (std::error_code EC =
+ OwningObject->getRvaPtr(ImportTable->NameRVA, IntPtr))
+ return EC;
+ Result = StringRef(reinterpret_cast<const char *>(IntPtr));
return object_error::success;
}
-error_code ImportDirectoryEntryRef::getName(StringRef &Result) const {
- const import_directory_table_entry *Dir = toImportEntry(ImportDirectoryPimpl);
+std::error_code ImportDirectoryEntryRef::getImportLookupEntry(
+ const import_lookup_table_entry32 *&Result) const {
uintptr_t IntPtr = 0;
- if (error_code ec = OwningObject->getRvaPtr(Dir->NameRVA, IntPtr))
- return ec;
- const char *Ptr = reinterpret_cast<const char *>(IntPtr);
- Result = StringRef(Ptr);
+ if (std::error_code EC =
+ OwningObject->getRvaPtr(ImportTable->ImportLookupTableRVA, IntPtr))
+ return EC;
+ Result = reinterpret_cast<const import_lookup_table_entry32 *>(IntPtr);
return object_error::success;
}
-error_code ImportDirectoryEntryRef::getImportLookupEntry(
- const import_lookup_table_entry32 *&Result) const {
- const import_directory_table_entry *Dir = toImportEntry(ImportDirectoryPimpl);
+bool ExportDirectoryEntryRef::
+operator==(const ExportDirectoryEntryRef &Other) const {
+ return ExportTable == Other.ExportTable && Index == Other.Index;
+}
+
+void ExportDirectoryEntryRef::moveNext() {
+ ++Index;
+}
+
+// Returns the name of the current export symbol. If the symbol is exported only
+// by ordinal, the empty string is set as a result.
+std::error_code ExportDirectoryEntryRef::getDllName(StringRef &Result) const {
uintptr_t IntPtr = 0;
- if (error_code ec = OwningObject->getRvaPtr(
- Dir->ImportLookupTableRVA, IntPtr))
- return ec;
- Result = reinterpret_cast<const import_lookup_table_entry32 *>(IntPtr);
+ if (std::error_code EC =
+ OwningObject->getRvaPtr(ExportTable->NameRVA, IntPtr))
+ return EC;
+ Result = StringRef(reinterpret_cast<const char *>(IntPtr));
return object_error::success;
}
-namespace llvm {
+// Returns the starting ordinal number.
+std::error_code
+ExportDirectoryEntryRef::getOrdinalBase(uint32_t &Result) const {
+ Result = ExportTable->OrdinalBase;
+ return object_error::success;
+}
+
+// Returns the export ordinal of the current export symbol.
+std::error_code ExportDirectoryEntryRef::getOrdinal(uint32_t &Result) const {
+ Result = ExportTable->OrdinalBase + Index;
+ return object_error::success;
+}
+
+// Returns the address of the current export symbol.
+std::error_code ExportDirectoryEntryRef::getExportRVA(uint32_t &Result) const {
+ uintptr_t IntPtr = 0;
+ if (std::error_code EC =
+ OwningObject->getRvaPtr(ExportTable->ExportAddressTableRVA, IntPtr))
+ return EC;
+ const export_address_table_entry *entry =
+ reinterpret_cast<const export_address_table_entry *>(IntPtr);
+ Result = entry[Index].ExportRVA;
+ return object_error::success;
+}
- ObjectFile *ObjectFile::createCOFFObjectFile(MemoryBuffer *Object) {
- error_code ec;
- return new COFFObjectFile(Object, ec);
+// Returns the name of the current export symbol. If the symbol is exported only
+// by ordinal, the empty string is set as a result.
+std::error_code
+ExportDirectoryEntryRef::getSymbolName(StringRef &Result) const {
+ uintptr_t IntPtr = 0;
+ if (std::error_code EC =
+ OwningObject->getRvaPtr(ExportTable->OrdinalTableRVA, IntPtr))
+ return EC;
+ const ulittle16_t *Start = reinterpret_cast<const ulittle16_t *>(IntPtr);
+
+ uint32_t NumEntries = ExportTable->NumberOfNamePointers;
+ int Offset = 0;
+ for (const ulittle16_t *I = Start, *E = Start + NumEntries;
+ I < E; ++I, ++Offset) {
+ if (*I != Index)
+ continue;
+ if (std::error_code EC =
+ OwningObject->getRvaPtr(ExportTable->NamePointerRVA, IntPtr))
+ return EC;
+ const ulittle32_t *NamePtr = reinterpret_cast<const ulittle32_t *>(IntPtr);
+ if (std::error_code EC = OwningObject->getRvaPtr(NamePtr[Offset], IntPtr))
+ return EC;
+ Result = StringRef(reinterpret_cast<const char *>(IntPtr));
+ return object_error::success;
}
+ Result = "";
+ return object_error::success;
+}
-} // end namespace llvm
+ErrorOr<ObjectFile *>
+ObjectFile::createCOFFObjectFile(std::unique_ptr<MemoryBuffer> Object) {
+ std::error_code EC;
+ std::unique_ptr<COFFObjectFile> Ret(
+ new COFFObjectFile(std::move(Object), EC));
+ if (EC)
+ return EC;
+ return Ret.release();
+}
diff --git a/contrib/llvm/lib/Object/COFFYAML.cpp b/contrib/llvm/lib/Object/COFFYAML.cpp
index e549b4e..49c5dda 100644
--- a/contrib/llvm/lib/Object/COFFYAML.cpp
+++ b/contrib/llvm/lib/Object/COFFYAML.cpp
@@ -23,13 +23,39 @@ Object::Object() { memset(&Header, 0, sizeof(COFF::header)); }
}
namespace yaml {
+void ScalarEnumerationTraits<COFFYAML::COMDATType>::enumeration(
+ IO &IO, COFFYAML::COMDATType &Value) {
+ IO.enumCase(Value, "0", 0);
+ ECase(IMAGE_COMDAT_SELECT_NODUPLICATES);
+ ECase(IMAGE_COMDAT_SELECT_ANY);
+ ECase(IMAGE_COMDAT_SELECT_SAME_SIZE);
+ ECase(IMAGE_COMDAT_SELECT_EXACT_MATCH);
+ ECase(IMAGE_COMDAT_SELECT_ASSOCIATIVE);
+ ECase(IMAGE_COMDAT_SELECT_LARGEST);
+ ECase(IMAGE_COMDAT_SELECT_NEWEST);
+}
+
+void
+ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics>::enumeration(
+ IO &IO, COFFYAML::WeakExternalCharacteristics &Value) {
+ IO.enumCase(Value, "0", 0);
+ ECase(IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY);
+ ECase(IMAGE_WEAK_EXTERN_SEARCH_LIBRARY);
+ ECase(IMAGE_WEAK_EXTERN_SEARCH_ALIAS);
+}
+
+void ScalarEnumerationTraits<COFFYAML::AuxSymbolType>::enumeration(
+ IO &IO, COFFYAML::AuxSymbolType &Value) {
+ ECase(IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF);
+}
+
void ScalarEnumerationTraits<COFF::MachineTypes>::enumeration(
IO &IO, COFF::MachineTypes &Value) {
ECase(IMAGE_FILE_MACHINE_UNKNOWN);
ECase(IMAGE_FILE_MACHINE_AM33);
ECase(IMAGE_FILE_MACHINE_AMD64);
ECase(IMAGE_FILE_MACHINE_ARM);
- ECase(IMAGE_FILE_MACHINE_ARMV7);
+ ECase(IMAGE_FILE_MACHINE_ARMNT);
ECase(IMAGE_FILE_MACHINE_EBC);
ECase(IMAGE_FILE_MACHINE_I386);
ECase(IMAGE_FILE_MACHINE_IA64);
@@ -107,8 +133,8 @@ void ScalarEnumerationTraits<COFF::SymbolComplexType>::enumeration(
ECase(IMAGE_SYM_DTYPE_ARRAY);
}
-void ScalarEnumerationTraits<COFF::RelocationTypeX86>::enumeration(
- IO &IO, COFF::RelocationTypeX86 &Value) {
+void ScalarEnumerationTraits<COFF::RelocationTypeI386>::enumeration(
+ IO &IO, COFF::RelocationTypeI386 &Value) {
ECase(IMAGE_REL_I386_ABSOLUTE);
ECase(IMAGE_REL_I386_DIR16);
ECase(IMAGE_REL_I386_REL16);
@@ -120,6 +146,10 @@ void ScalarEnumerationTraits<COFF::RelocationTypeX86>::enumeration(
ECase(IMAGE_REL_I386_TOKEN);
ECase(IMAGE_REL_I386_SECREL7);
ECase(IMAGE_REL_I386_REL32);
+}
+
+void ScalarEnumerationTraits<COFF::RelocationTypeAMD64>::enumeration(
+ IO &IO, COFF::RelocationTypeAMD64 &Value) {
ECase(IMAGE_REL_AMD64_ABSOLUTE);
ECase(IMAGE_REL_AMD64_ADDR64);
ECase(IMAGE_REL_AMD64_ADDR32);
@@ -187,6 +217,24 @@ void ScalarBitSetTraits<COFF::SectionCharacteristics>::bitset(
#undef BCase
namespace {
+struct NSectionSelectionType {
+ NSectionSelectionType(IO &)
+ : SelectionType(COFFYAML::COMDATType(0)) {}
+ NSectionSelectionType(IO &, uint8_t C)
+ : SelectionType(COFFYAML::COMDATType(C)) {}
+ uint8_t denormalize(IO &) { return SelectionType; }
+ COFFYAML::COMDATType SelectionType;
+};
+
+struct NWeakExternalCharacteristics {
+ NWeakExternalCharacteristics(IO &)
+ : Characteristics(COFFYAML::WeakExternalCharacteristics(0)) {}
+ NWeakExternalCharacteristics(IO &, uint32_t C)
+ : Characteristics(COFFYAML::WeakExternalCharacteristics(C)) {}
+ uint32_t denormalize(IO &) { return Characteristics; }
+ COFFYAML::WeakExternalCharacteristics Characteristics;
+};
+
struct NSectionCharacteristics {
NSectionCharacteristics(IO &)
: Characteristics(COFF::SectionCharacteristics(0)) {}
@@ -196,6 +244,15 @@ struct NSectionCharacteristics {
COFF::SectionCharacteristics Characteristics;
};
+struct NAuxTokenType {
+ NAuxTokenType(IO &)
+ : AuxType(COFFYAML::AuxSymbolType(0)) {}
+ NAuxTokenType(IO &, uint8_t C)
+ : AuxType(COFFYAML::AuxSymbolType(C)) {}
+ uint32_t denormalize(IO &) { return AuxType; }
+ COFFYAML::AuxSymbolType AuxType;
+};
+
struct NStorageClass {
NStorageClass(IO &) : StorageClass(COFF::SymbolStorageClass(0)) {}
NStorageClass(IO &, uint8_t S) : StorageClass(COFF::SymbolStorageClass(S)) {}
@@ -220,22 +277,33 @@ struct NHeaderCharacteristics {
COFF::Characteristics Characteristics;
};
+template <typename RelocType>
struct NType {
- NType(IO &) : Type(COFF::RelocationTypeX86(0)) {}
- NType(IO &, uint16_t T) : Type(COFF::RelocationTypeX86(T)) {}
+ NType(IO &) : Type(RelocType(0)) {}
+ NType(IO &, uint16_t T) : Type(RelocType(T)) {}
uint16_t denormalize(IO &) { return Type; }
- COFF::RelocationTypeX86 Type;
+ RelocType Type;
};
}
void MappingTraits<COFFYAML::Relocation>::mapping(IO &IO,
COFFYAML::Relocation &Rel) {
- MappingNormalization<NType, uint16_t> NT(IO, Rel.Type);
-
IO.mapRequired("VirtualAddress", Rel.VirtualAddress);
IO.mapRequired("SymbolName", Rel.SymbolName);
- IO.mapRequired("Type", NT->Type);
+
+ COFF::header &H = *static_cast<COFF::header *>(IO.getContext());
+ if (H.Machine == COFF::IMAGE_FILE_MACHINE_I386) {
+ MappingNormalization<NType<COFF::RelocationTypeI386>, uint16_t> NT(
+ IO, Rel.Type);
+ IO.mapRequired("Type", NT->Type);
+ } else if (H.Machine == COFF::IMAGE_FILE_MACHINE_AMD64) {
+ MappingNormalization<NType<COFF::RelocationTypeAMD64>, uint16_t> NT(
+ IO, Rel.Type);
+ IO.mapRequired("Type", NT->Type);
+ } else {
+ IO.mapRequired("Type", Rel.Type);
+ }
}
void MappingTraits<COFF::header>::mapping(IO &IO, COFF::header &H) {
@@ -245,6 +313,49 @@ void MappingTraits<COFF::header>::mapping(IO &IO, COFF::header &H) {
IO.mapRequired("Machine", NM->Machine);
IO.mapOptional("Characteristics", NC->Characteristics);
+ IO.setContext(static_cast<void *>(&H));
+}
+
+void MappingTraits<COFF::AuxiliaryFunctionDefinition>::mapping(
+ IO &IO, COFF::AuxiliaryFunctionDefinition &AFD) {
+ IO.mapRequired("TagIndex", AFD.TagIndex);
+ IO.mapRequired("TotalSize", AFD.TotalSize);
+ IO.mapRequired("PointerToLinenumber", AFD.PointerToLinenumber);
+ IO.mapRequired("PointerToNextFunction", AFD.PointerToNextFunction);
+}
+
+void MappingTraits<COFF::AuxiliarybfAndefSymbol>::mapping(
+ IO &IO, COFF::AuxiliarybfAndefSymbol &AAS) {
+ IO.mapRequired("Linenumber", AAS.Linenumber);
+ IO.mapRequired("PointerToNextFunction", AAS.PointerToNextFunction);
+}
+
+void MappingTraits<COFF::AuxiliaryWeakExternal>::mapping(
+ IO &IO, COFF::AuxiliaryWeakExternal &AWE) {
+ MappingNormalization<NWeakExternalCharacteristics, uint32_t> NWEC(
+ IO, AWE.Characteristics);
+ IO.mapRequired("TagIndex", AWE.TagIndex);
+ IO.mapRequired("Characteristics", NWEC->Characteristics);
+}
+
+void MappingTraits<COFF::AuxiliarySectionDefinition>::mapping(
+ IO &IO, COFF::AuxiliarySectionDefinition &ASD) {
+ MappingNormalization<NSectionSelectionType, uint8_t> NSST(
+ IO, ASD.Selection);
+
+ IO.mapRequired("Length", ASD.Length);
+ IO.mapRequired("NumberOfRelocations", ASD.NumberOfRelocations);
+ IO.mapRequired("NumberOfLinenumbers", ASD.NumberOfLinenumbers);
+ IO.mapRequired("CheckSum", ASD.CheckSum);
+ IO.mapRequired("Number", ASD.Number);
+ IO.mapOptional("Selection", NSST->SelectionType, COFFYAML::COMDATType(0));
+}
+
+void MappingTraits<COFF::AuxiliaryCLRToken>::mapping(
+ IO &IO, COFF::AuxiliaryCLRToken &ACT) {
+ MappingNormalization<NAuxTokenType, uint8_t> NATT(IO, ACT.AuxType);
+ IO.mapRequired("AuxType", NATT->AuxType);
+ IO.mapRequired("SymbolTableIndex", ACT.SymbolTableIndex);
}
void MappingTraits<COFFYAML::Symbol>::mapping(IO &IO, COFFYAML::Symbol &S) {
@@ -256,9 +367,12 @@ void MappingTraits<COFFYAML::Symbol>::mapping(IO &IO, COFFYAML::Symbol &S) {
IO.mapRequired("SimpleType", S.SimpleType);
IO.mapRequired("ComplexType", S.ComplexType);
IO.mapRequired("StorageClass", NS->StorageClass);
- IO.mapOptional("NumberOfAuxSymbols", S.Header.NumberOfAuxSymbols,
- (uint8_t) 0);
- IO.mapOptional("AuxiliaryData", S.AuxiliaryData, object::yaml::BinaryRef());
+ IO.mapOptional("FunctionDefinition", S.FunctionDefinition);
+ IO.mapOptional("bfAndefSymbol", S.bfAndefSymbol);
+ IO.mapOptional("WeakExternal", S.WeakExternal);
+ IO.mapOptional("File", S.File, StringRef());
+ IO.mapOptional("SectionDefinition", S.SectionDefinition);
+ IO.mapOptional("CLRToken", S.CLRToken);
}
void MappingTraits<COFFYAML::Section>::mapping(IO &IO, COFFYAML::Section &Sec) {
diff --git a/contrib/llvm/lib/Object/ELF.cpp b/contrib/llvm/lib/Object/ELF.cpp
index 41d0519..d999106 100644
--- a/contrib/llvm/lib/Object/ELF.cpp
+++ b/contrib/llvm/lib/Object/ELF.cpp
@@ -159,6 +159,15 @@ StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type) {
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_TLS_TPREL_HI16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_TLS_TPREL_LO16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_GLOB_DAT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PC21_S2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PC26_S2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PC18_S3);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PC19_S2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PCHI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PCLO16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS16_GOT16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS16_HI16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS16_LO16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_COPY);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_JUMP_SLOT);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_26_S1);
@@ -170,11 +179,14 @@ StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type) {
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_GOT_DISP);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_GOT_PAGE);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_GOT_OFST);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_TLS_GD);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_TLS_LDM);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_TLS_DTPREL_HI16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_TLS_DTPREL_LO16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_TLS_TPREL_HI16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MICROMIPS_TLS_TPREL_LO16);
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_NUM);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_MIPS_PC32);
default:
break;
}
diff --git a/contrib/llvm/lib/Object/ELFObjectFile.cpp b/contrib/llvm/lib/Object/ELFObjectFile.cpp
index 15bc6be..4f0f60b 100644
--- a/contrib/llvm/lib/Object/ELFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/ELFObjectFile.cpp
@@ -17,57 +17,70 @@
namespace llvm {
using namespace object;
-// Creates an in-memory object-file by default: createELFObjectFile(Buffer)
-ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
- std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object);
- error_code ec;
-
+ErrorOr<ObjectFile *>
+ObjectFile::createELFObjectFile(std::unique_ptr<MemoryBuffer> &Obj) {
+ std::pair<unsigned char, unsigned char> Ident =
+ getElfArchType(Obj->getBuffer());
std::size_t MaxAlignment =
- 1ULL << countTrailingZeros(uintptr_t(Object->getBufferStart()));
+ 1ULL << countTrailingZeros(uintptr_t(Obj->getBufferStart()));
+ std::error_code EC;
+ std::unique_ptr<ObjectFile> R;
if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB)
#if !LLVM_IS_UNALIGNED_ACCESS_FAST
if (MaxAlignment >= 4)
- return new ELFObjectFile<ELFType<support::little, 4, false> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::little, 4, false>>(
+ std::move(Obj), EC));
else
#endif
if (MaxAlignment >= 2)
- return new ELFObjectFile<ELFType<support::little, 2, false> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::little, 2, false>>(
+ std::move(Obj), EC));
else
- llvm_unreachable("Invalid alignment for ELF file!");
+ return object_error::parse_failed;
else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB)
#if !LLVM_IS_UNALIGNED_ACCESS_FAST
if (MaxAlignment >= 4)
- return new ELFObjectFile<ELFType<support::big, 4, false> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::big, 4, false>>(std::move(Obj),
+ EC));
else
#endif
if (MaxAlignment >= 2)
- return new ELFObjectFile<ELFType<support::big, 2, false> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::big, 2, false>>(std::move(Obj),
+ EC));
else
- llvm_unreachable("Invalid alignment for ELF file!");
+ return object_error::parse_failed;
else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB)
#if !LLVM_IS_UNALIGNED_ACCESS_FAST
if (MaxAlignment >= 8)
- return new ELFObjectFile<ELFType<support::big, 8, true> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::big, 8, true>>(std::move(Obj),
+ EC));
else
#endif
if (MaxAlignment >= 2)
- return new ELFObjectFile<ELFType<support::big, 2, true> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::big, 2, true>>(std::move(Obj),
+ EC));
else
- llvm_unreachable("Invalid alignment for ELF file!");
+ return object_error::parse_failed;
else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) {
#if !LLVM_IS_UNALIGNED_ACCESS_FAST
if (MaxAlignment >= 8)
- return new ELFObjectFile<ELFType<support::little, 8, true> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::little, 8, true>>(
+ std::move(Obj), EC));
else
#endif
if (MaxAlignment >= 2)
- return new ELFObjectFile<ELFType<support::little, 2, true> >(Object, ec);
+ R.reset(new ELFObjectFile<ELFType<support::little, 2, true>>(
+ std::move(Obj), EC));
else
- llvm_unreachable("Invalid alignment for ELF file!");
+ return object_error::parse_failed;
}
+ else
+ llvm_unreachable("Buffer is not an ELF object file!");
- report_fatal_error("Buffer is not an ELF object file!");
+ if (EC)
+ return EC;
+ return R.release();
}
} // end namespace llvm
diff --git a/contrib/llvm/lib/Object/ELFYAML.cpp b/contrib/llvm/lib/Object/ELFYAML.cpp
index 2f35cf9..6340841 100644
--- a/contrib/llvm/lib/Object/ELFYAML.cpp
+++ b/contrib/llvm/lib/Object/ELFYAML.cpp
@@ -12,8 +12,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/Object/ELFYAML.h"
+#include "llvm/Support/Casting.h"
namespace llvm {
+
+ELFYAML::Section::~Section() {}
+
namespace yaml {
void
@@ -237,8 +241,65 @@ void ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI>::enumeration(
#undef ECase
}
+void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
+ ELFYAML::ELF_EF &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+#define BCase(X) IO.bitSetCase(Value, #X, ELF::X);
+#define BCaseMask(X, M) IO.maskedBitSetCase(Value, #X, ELF::X, ELF::M);
+ switch (Object->Header.Machine) {
+ case ELF::EM_ARM:
+ BCase(EF_ARM_SOFT_FLOAT)
+ BCase(EF_ARM_VFP_FLOAT)
+ BCaseMask(EF_ARM_EABI_UNKNOWN, EF_ARM_EABIMASK)
+ BCaseMask(EF_ARM_EABI_VER1, EF_ARM_EABIMASK)
+ BCaseMask(EF_ARM_EABI_VER2, EF_ARM_EABIMASK)
+ BCaseMask(EF_ARM_EABI_VER3, EF_ARM_EABIMASK)
+ BCaseMask(EF_ARM_EABI_VER4, EF_ARM_EABIMASK)
+ BCaseMask(EF_ARM_EABI_VER5, EF_ARM_EABIMASK)
+ break;
+ case ELF::EM_MIPS:
+ BCase(EF_MIPS_NOREORDER)
+ BCase(EF_MIPS_PIC)
+ BCase(EF_MIPS_CPIC)
+ BCase(EF_MIPS_ABI2)
+ BCase(EF_MIPS_32BITMODE)
+ BCase(EF_MIPS_ABI_O32)
+ BCase(EF_MIPS_MICROMIPS)
+ BCase(EF_MIPS_ARCH_ASE_M16)
+ BCaseMask(EF_MIPS_ARCH_1, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_2, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_3, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_4, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_5, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_32, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_64, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_32R2, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_64R2, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_32R6, EF_MIPS_ARCH)
+ BCaseMask(EF_MIPS_ARCH_64R6, EF_MIPS_ARCH)
+ break;
+ case ELF::EM_HEXAGON:
+ BCase(EF_HEXAGON_MACH_V2)
+ BCase(EF_HEXAGON_MACH_V3)
+ BCase(EF_HEXAGON_MACH_V4)
+ BCase(EF_HEXAGON_MACH_V5)
+ BCase(EF_HEXAGON_ISA_V2)
+ BCase(EF_HEXAGON_ISA_V3)
+ BCase(EF_HEXAGON_ISA_V4)
+ BCase(EF_HEXAGON_ISA_V5)
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+#undef BCase
+#undef BCaseMask
+}
+
void ScalarEnumerationTraits<ELFYAML::ELF_SHT>::enumeration(
IO &IO, ELFYAML::ELF_SHT &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
#define ECase(X) IO.enumCase(Value, #X, ELF::X);
ECase(SHT_NULL)
ECase(SHT_PROGBITS)
@@ -258,6 +319,37 @@ void ScalarEnumerationTraits<ELFYAML::ELF_SHT>::enumeration(
ECase(SHT_PREINIT_ARRAY)
ECase(SHT_GROUP)
ECase(SHT_SYMTAB_SHNDX)
+ ECase(SHT_LOOS)
+ ECase(SHT_GNU_ATTRIBUTES)
+ ECase(SHT_GNU_HASH)
+ ECase(SHT_GNU_verdef)
+ ECase(SHT_GNU_verneed)
+ ECase(SHT_GNU_versym)
+ ECase(SHT_HIOS)
+ ECase(SHT_LOPROC)
+ switch (Object->Header.Machine) {
+ case ELF::EM_ARM:
+ ECase(SHT_ARM_EXIDX)
+ ECase(SHT_ARM_PREEMPTMAP)
+ ECase(SHT_ARM_ATTRIBUTES)
+ ECase(SHT_ARM_DEBUGOVERLAY)
+ ECase(SHT_ARM_OVERLAYSECTION)
+ break;
+ case ELF::EM_HEXAGON:
+ ECase(SHT_HEX_ORDERED)
+ break;
+ case ELF::EM_X86_64:
+ ECase(SHT_X86_64_UNWIND)
+ break;
+ case ELF::EM_MIPS:
+ ECase(SHT_MIPS_REGINFO)
+ ECase(SHT_MIPS_OPTIONS)
+ ECase(SHT_MIPS_ABIFLAGS)
+ break;
+ default:
+ // Nothing to do.
+ break;
+ }
#undef ECase
}
@@ -292,6 +384,280 @@ void ScalarEnumerationTraits<ELFYAML::ELF_STT>::enumeration(
#undef ECase
}
+void ScalarEnumerationTraits<ELFYAML::ELF_STV>::enumeration(
+ IO &IO, ELFYAML::ELF_STV &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X);
+ ECase(STV_DEFAULT)
+ ECase(STV_INTERNAL)
+ ECase(STV_HIDDEN)
+ ECase(STV_PROTECTED)
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_REL>::enumeration(
+ IO &IO, ELFYAML::ELF_REL &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+#define ECase(X) IO.enumCase(Value, #X, ELF::X);
+ switch (Object->Header.Machine) {
+ case ELF::EM_X86_64:
+ ECase(R_X86_64_NONE)
+ ECase(R_X86_64_64)
+ ECase(R_X86_64_PC32)
+ ECase(R_X86_64_GOT32)
+ ECase(R_X86_64_PLT32)
+ ECase(R_X86_64_COPY)
+ ECase(R_X86_64_GLOB_DAT)
+ ECase(R_X86_64_JUMP_SLOT)
+ ECase(R_X86_64_RELATIVE)
+ ECase(R_X86_64_GOTPCREL)
+ ECase(R_X86_64_32)
+ ECase(R_X86_64_32S)
+ ECase(R_X86_64_16)
+ ECase(R_X86_64_PC16)
+ ECase(R_X86_64_8)
+ ECase(R_X86_64_PC8)
+ ECase(R_X86_64_DTPMOD64)
+ ECase(R_X86_64_DTPOFF64)
+ ECase(R_X86_64_TPOFF64)
+ ECase(R_X86_64_TLSGD)
+ ECase(R_X86_64_TLSLD)
+ ECase(R_X86_64_DTPOFF32)
+ ECase(R_X86_64_GOTTPOFF)
+ ECase(R_X86_64_TPOFF32)
+ ECase(R_X86_64_PC64)
+ ECase(R_X86_64_GOTOFF64)
+ ECase(R_X86_64_GOTPC32)
+ ECase(R_X86_64_GOT64)
+ ECase(R_X86_64_GOTPCREL64)
+ ECase(R_X86_64_GOTPC64)
+ ECase(R_X86_64_GOTPLT64)
+ ECase(R_X86_64_PLTOFF64)
+ ECase(R_X86_64_SIZE32)
+ ECase(R_X86_64_SIZE64)
+ ECase(R_X86_64_GOTPC32_TLSDESC)
+ ECase(R_X86_64_TLSDESC_CALL)
+ ECase(R_X86_64_TLSDESC)
+ ECase(R_X86_64_IRELATIVE)
+ break;
+ case ELF::EM_MIPS:
+ ECase(R_MIPS_NONE)
+ ECase(R_MIPS_16)
+ ECase(R_MIPS_32)
+ ECase(R_MIPS_REL32)
+ ECase(R_MIPS_26)
+ ECase(R_MIPS_HI16)
+ ECase(R_MIPS_LO16)
+ ECase(R_MIPS_GPREL16)
+ ECase(R_MIPS_LITERAL)
+ ECase(R_MIPS_GOT16)
+ ECase(R_MIPS_PC16)
+ ECase(R_MIPS_CALL16)
+ ECase(R_MIPS_GPREL32)
+ ECase(R_MIPS_UNUSED1)
+ ECase(R_MIPS_UNUSED2)
+ ECase(R_MIPS_SHIFT5)
+ ECase(R_MIPS_SHIFT6)
+ ECase(R_MIPS_64)
+ ECase(R_MIPS_GOT_DISP)
+ ECase(R_MIPS_GOT_PAGE)
+ ECase(R_MIPS_GOT_OFST)
+ ECase(R_MIPS_GOT_HI16)
+ ECase(R_MIPS_GOT_LO16)
+ ECase(R_MIPS_SUB)
+ ECase(R_MIPS_INSERT_A)
+ ECase(R_MIPS_INSERT_B)
+ ECase(R_MIPS_DELETE)
+ ECase(R_MIPS_HIGHER)
+ ECase(R_MIPS_HIGHEST)
+ ECase(R_MIPS_CALL_HI16)
+ ECase(R_MIPS_CALL_LO16)
+ ECase(R_MIPS_SCN_DISP)
+ ECase(R_MIPS_REL16)
+ ECase(R_MIPS_ADD_IMMEDIATE)
+ ECase(R_MIPS_PJUMP)
+ ECase(R_MIPS_RELGOT)
+ ECase(R_MIPS_JALR)
+ ECase(R_MIPS_TLS_DTPMOD32)
+ ECase(R_MIPS_TLS_DTPREL32)
+ ECase(R_MIPS_TLS_DTPMOD64)
+ ECase(R_MIPS_TLS_DTPREL64)
+ ECase(R_MIPS_TLS_GD)
+ ECase(R_MIPS_TLS_LDM)
+ ECase(R_MIPS_TLS_DTPREL_HI16)
+ ECase(R_MIPS_TLS_DTPREL_LO16)
+ ECase(R_MIPS_TLS_GOTTPREL)
+ ECase(R_MIPS_TLS_TPREL32)
+ ECase(R_MIPS_TLS_TPREL64)
+ ECase(R_MIPS_TLS_TPREL_HI16)
+ ECase(R_MIPS_TLS_TPREL_LO16)
+ ECase(R_MIPS_GLOB_DAT)
+ ECase(R_MIPS_PC21_S2)
+ ECase(R_MIPS_PC26_S2)
+ ECase(R_MIPS_PC18_S3)
+ ECase(R_MIPS_PC19_S2)
+ ECase(R_MIPS_PCHI16)
+ ECase(R_MIPS_PCLO16)
+ ECase(R_MIPS16_GOT16)
+ ECase(R_MIPS16_HI16)
+ ECase(R_MIPS16_LO16)
+ ECase(R_MIPS_COPY)
+ ECase(R_MIPS_JUMP_SLOT)
+ ECase(R_MICROMIPS_26_S1)
+ ECase(R_MICROMIPS_HI16)
+ ECase(R_MICROMIPS_LO16)
+ ECase(R_MICROMIPS_GOT16)
+ ECase(R_MICROMIPS_PC16_S1)
+ ECase(R_MICROMIPS_CALL16)
+ ECase(R_MICROMIPS_GOT_DISP)
+ ECase(R_MICROMIPS_GOT_PAGE)
+ ECase(R_MICROMIPS_GOT_OFST)
+ ECase(R_MICROMIPS_TLS_GD)
+ ECase(R_MICROMIPS_TLS_LDM)
+ ECase(R_MICROMIPS_TLS_DTPREL_HI16)
+ ECase(R_MICROMIPS_TLS_DTPREL_LO16)
+ ECase(R_MICROMIPS_TLS_TPREL_HI16)
+ ECase(R_MICROMIPS_TLS_TPREL_LO16)
+ ECase(R_MIPS_NUM)
+ ECase(R_MIPS_PC32)
+ break;
+ case ELF::EM_HEXAGON:
+ ECase(R_HEX_NONE)
+ ECase(R_HEX_B22_PCREL)
+ ECase(R_HEX_B15_PCREL)
+ ECase(R_HEX_B7_PCREL)
+ ECase(R_HEX_LO16)
+ ECase(R_HEX_HI16)
+ ECase(R_HEX_32)
+ ECase(R_HEX_16)
+ ECase(R_HEX_8)
+ ECase(R_HEX_GPREL16_0)
+ ECase(R_HEX_GPREL16_1)
+ ECase(R_HEX_GPREL16_2)
+ ECase(R_HEX_GPREL16_3)
+ ECase(R_HEX_HL16)
+ ECase(R_HEX_B13_PCREL)
+ ECase(R_HEX_B9_PCREL)
+ ECase(R_HEX_B32_PCREL_X)
+ ECase(R_HEX_32_6_X)
+ ECase(R_HEX_B22_PCREL_X)
+ ECase(R_HEX_B15_PCREL_X)
+ ECase(R_HEX_B13_PCREL_X)
+ ECase(R_HEX_B9_PCREL_X)
+ ECase(R_HEX_B7_PCREL_X)
+ ECase(R_HEX_16_X)
+ ECase(R_HEX_12_X)
+ ECase(R_HEX_11_X)
+ ECase(R_HEX_10_X)
+ ECase(R_HEX_9_X)
+ ECase(R_HEX_8_X)
+ ECase(R_HEX_7_X)
+ ECase(R_HEX_6_X)
+ ECase(R_HEX_32_PCREL)
+ ECase(R_HEX_COPY)
+ ECase(R_HEX_GLOB_DAT)
+ ECase(R_HEX_JMP_SLOT)
+ ECase(R_HEX_RELATIVE)
+ ECase(R_HEX_PLT_B22_PCREL)
+ ECase(R_HEX_GOTREL_LO16)
+ ECase(R_HEX_GOTREL_HI16)
+ ECase(R_HEX_GOTREL_32)
+ ECase(R_HEX_GOT_LO16)
+ ECase(R_HEX_GOT_HI16)
+ ECase(R_HEX_GOT_32)
+ ECase(R_HEX_GOT_16)
+ ECase(R_HEX_DTPMOD_32)
+ ECase(R_HEX_DTPREL_LO16)
+ ECase(R_HEX_DTPREL_HI16)
+ ECase(R_HEX_DTPREL_32)
+ ECase(R_HEX_DTPREL_16)
+ ECase(R_HEX_GD_PLT_B22_PCREL)
+ ECase(R_HEX_GD_GOT_LO16)
+ ECase(R_HEX_GD_GOT_HI16)
+ ECase(R_HEX_GD_GOT_32)
+ ECase(R_HEX_GD_GOT_16)
+ ECase(R_HEX_IE_LO16)
+ ECase(R_HEX_IE_HI16)
+ ECase(R_HEX_IE_32)
+ ECase(R_HEX_IE_GOT_LO16)
+ ECase(R_HEX_IE_GOT_HI16)
+ ECase(R_HEX_IE_GOT_32)
+ ECase(R_HEX_IE_GOT_16)
+ ECase(R_HEX_TPREL_LO16)
+ ECase(R_HEX_TPREL_HI16)
+ ECase(R_HEX_TPREL_32)
+ ECase(R_HEX_TPREL_16)
+ ECase(R_HEX_6_PCREL_X)
+ ECase(R_HEX_GOTREL_32_6_X)
+ ECase(R_HEX_GOTREL_16_X)
+ ECase(R_HEX_GOTREL_11_X)
+ ECase(R_HEX_GOT_32_6_X)
+ ECase(R_HEX_GOT_16_X)
+ ECase(R_HEX_GOT_11_X)
+ ECase(R_HEX_DTPREL_32_6_X)
+ ECase(R_HEX_DTPREL_16_X)
+ ECase(R_HEX_DTPREL_11_X)
+ ECase(R_HEX_GD_GOT_32_6_X)
+ ECase(R_HEX_GD_GOT_16_X)
+ ECase(R_HEX_GD_GOT_11_X)
+ ECase(R_HEX_IE_32_6_X)
+ ECase(R_HEX_IE_16_X)
+ ECase(R_HEX_IE_GOT_32_6_X)
+ ECase(R_HEX_IE_GOT_16_X)
+ ECase(R_HEX_IE_GOT_11_X)
+ ECase(R_HEX_TPREL_32_6_X)
+ ECase(R_HEX_TPREL_16_X)
+ ECase(R_HEX_TPREL_11_X)
+ break;
+ case ELF::EM_386:
+ ECase(R_386_NONE)
+ ECase(R_386_32)
+ ECase(R_386_PC32)
+ ECase(R_386_GOT32)
+ ECase(R_386_PLT32)
+ ECase(R_386_COPY)
+ ECase(R_386_GLOB_DAT)
+ ECase(R_386_JUMP_SLOT)
+ ECase(R_386_RELATIVE)
+ ECase(R_386_GOTOFF)
+ ECase(R_386_GOTPC)
+ ECase(R_386_32PLT)
+ ECase(R_386_TLS_TPOFF)
+ ECase(R_386_TLS_IE)
+ ECase(R_386_TLS_GOTIE)
+ ECase(R_386_TLS_LE)
+ ECase(R_386_TLS_GD)
+ ECase(R_386_TLS_LDM)
+ ECase(R_386_16)
+ ECase(R_386_PC16)
+ ECase(R_386_8)
+ ECase(R_386_PC8)
+ ECase(R_386_TLS_GD_32)
+ ECase(R_386_TLS_GD_PUSH)
+ ECase(R_386_TLS_GD_CALL)
+ ECase(R_386_TLS_GD_POP)
+ ECase(R_386_TLS_LDM_32)
+ ECase(R_386_TLS_LDM_PUSH)
+ ECase(R_386_TLS_LDM_CALL)
+ ECase(R_386_TLS_LDM_POP)
+ ECase(R_386_TLS_LDO_32)
+ ECase(R_386_TLS_IE_32)
+ ECase(R_386_TLS_LE_32)
+ ECase(R_386_TLS_DTPMOD32)
+ ECase(R_386_TLS_DTPOFF32)
+ ECase(R_386_TLS_TPOFF32)
+ ECase(R_386_TLS_GOTDESC)
+ ECase(R_386_TLS_DESC_CALL)
+ ECase(R_386_TLS_DESC)
+ ECase(R_386_IRELATIVE)
+ ECase(R_386_NUM)
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+#undef ECase
+}
+
void MappingTraits<ELFYAML::FileHeader>::mapping(IO &IO,
ELFYAML::FileHeader &FileHdr) {
IO.mapRequired("Class", FileHdr.Class);
@@ -299,6 +665,7 @@ void MappingTraits<ELFYAML::FileHeader>::mapping(IO &IO,
IO.mapOptional("OSABI", FileHdr.OSABI, ELFYAML::ELF_ELFOSABI(0));
IO.mapRequired("Type", FileHdr.Type);
IO.mapRequired("Machine", FileHdr.Machine);
+ IO.mapOptional("Flags", FileHdr.Flags, ELFYAML::ELF_EF(0));
IO.mapOptional("Entry", FileHdr.Entry, Hex64(0));
}
@@ -308,6 +675,7 @@ void MappingTraits<ELFYAML::Symbol>::mapping(IO &IO, ELFYAML::Symbol &Symbol) {
IO.mapOptional("Section", Symbol.Section, StringRef());
IO.mapOptional("Value", Symbol.Value, Hex64(0));
IO.mapOptional("Size", Symbol.Size, Hex64(0));
+ IO.mapOptional("Visibility", Symbol.Visibility, ELFYAML::ELF_STV(0));
}
void MappingTraits<ELFYAML::LocalGlobalWeakSymbols>::mapping(
@@ -317,21 +685,72 @@ void MappingTraits<ELFYAML::LocalGlobalWeakSymbols>::mapping(
IO.mapOptional("Weak", Symbols.Weak);
}
-void MappingTraits<ELFYAML::Section>::mapping(IO &IO,
- ELFYAML::Section &Section) {
+static void commonSectionMapping(IO &IO, ELFYAML::Section &Section) {
IO.mapOptional("Name", Section.Name, StringRef());
IO.mapRequired("Type", Section.Type);
IO.mapOptional("Flags", Section.Flags, ELFYAML::ELF_SHF(0));
IO.mapOptional("Address", Section.Address, Hex64(0));
- IO.mapOptional("Content", Section.Content);
- IO.mapOptional("Link", Section.Link);
+ IO.mapOptional("Link", Section.Link, StringRef());
IO.mapOptional("AddressAlign", Section.AddressAlign, Hex64(0));
}
+static void sectionMapping(IO &IO, ELFYAML::RawContentSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Content", Section.Content);
+ IO.mapOptional("Size", Section.Size, Hex64(Section.Content.binary_size()));
+}
+
+static void sectionMapping(IO &IO, ELFYAML::RelocationSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Info", Section.Info, StringRef());
+ IO.mapOptional("Relocations", Section.Relocations);
+}
+
+void MappingTraits<std::unique_ptr<ELFYAML::Section>>::mapping(
+ IO &IO, std::unique_ptr<ELFYAML::Section> &Section) {
+ ELFYAML::ELF_SHT sectionType;
+ if (IO.outputting())
+ sectionType = Section->Type;
+ else
+ IO.mapRequired("Type", sectionType);
+
+ switch (sectionType) {
+ case ELF::SHT_REL:
+ case ELF::SHT_RELA:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::RelocationSection());
+ sectionMapping(IO, *cast<ELFYAML::RelocationSection>(Section.get()));
+ break;
+ default:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::RawContentSection());
+ sectionMapping(IO, *cast<ELFYAML::RawContentSection>(Section.get()));
+ }
+}
+
+StringRef MappingTraits<std::unique_ptr<ELFYAML::Section>>::validate(
+ IO &io, std::unique_ptr<ELFYAML::Section> &Section) {
+ const auto *RawSection = dyn_cast<ELFYAML::RawContentSection>(Section.get());
+ if (!RawSection || RawSection->Size >= RawSection->Content.binary_size())
+ return StringRef();
+ return "Section size must be greater or equal to the content size";
+}
+
+void MappingTraits<ELFYAML::Relocation>::mapping(IO &IO,
+ ELFYAML::Relocation &Rel) {
+ IO.mapRequired("Offset", Rel.Offset);
+ IO.mapRequired("Symbol", Rel.Symbol);
+ IO.mapRequired("Type", Rel.Type);
+ IO.mapOptional("Addend", Rel.Addend);
+}
+
void MappingTraits<ELFYAML::Object>::mapping(IO &IO, ELFYAML::Object &Object) {
+ assert(!IO.getContext() && "The IO context is initialized already");
+ IO.setContext(&Object);
IO.mapRequired("FileHeader", Object.Header);
IO.mapOptional("Sections", Object.Sections);
IO.mapOptional("Symbols", Object.Symbols);
+ IO.setContext(nullptr);
}
} // end namespace yaml
diff --git a/contrib/llvm/lib/Object/Error.cpp b/contrib/llvm/lib/Object/Error.cpp
index 47ce38c..9d25269 100644
--- a/contrib/llvm/lib/Object/Error.cpp
+++ b/contrib/llvm/lib/Object/Error.cpp
@@ -18,11 +18,10 @@ using namespace llvm;
using namespace object;
namespace {
-class _object_error_category : public _do_message {
+class _object_error_category : public std::error_category {
public:
- virtual const char* name() const;
- virtual std::string message(int ev) const;
- virtual error_condition default_error_condition(int ev) const;
+ const char* name() const LLVM_NOEXCEPT override;
+ std::string message(int ev) const override;
};
}
@@ -30,8 +29,8 @@ const char *_object_error_category::name() const {
return "llvm.object";
}
-std::string _object_error_category::message(int ev) const {
- object_error::Impl E = static_cast<object_error::Impl>(ev);
+std::string _object_error_category::message(int EV) const {
+ object_error E = static_cast<object_error>(EV);
switch (E) {
case object_error::success: return "Success";
case object_error::arch_not_found:
@@ -47,13 +46,7 @@ std::string _object_error_category::message(int ev) const {
"defined.");
}
-error_condition _object_error_category::default_error_condition(int ev) const {
- if (ev == object_error::success)
- return errc::success;
- return errc::invalid_argument;
-}
-
-const error_category &object::object_category() {
+const std::error_category &object::object_category() {
static _object_error_category o;
return o;
}
diff --git a/contrib/llvm/lib/Object/IRObjectFile.cpp b/contrib/llvm/lib/Object/IRObjectFile.cpp
new file mode 100644
index 0000000..5323d92
--- /dev/null
+++ b/contrib/llvm/lib/Object/IRObjectFile.cpp
@@ -0,0 +1,279 @@
+//===- IRObjectFile.cpp - IR object file implementation ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the IRObjectFile class implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/IRObjectFile.h"
+#include "RecordStreamer.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+using namespace object;
+
+IRObjectFile::IRObjectFile(std::unique_ptr<MemoryBuffer> Object,
+ std::unique_ptr<Module> Mod)
+ : SymbolicFile(Binary::ID_IR, std::move(Object)), M(std::move(Mod)) {
+ // If we have a DataLayout, setup a mangler.
+ const DataLayout *DL = M->getDataLayout();
+ if (!DL)
+ return;
+
+ Mang.reset(new Mangler(DL));
+
+ const std::string &InlineAsm = M->getModuleInlineAsm();
+ if (InlineAsm.empty())
+ return;
+
+ StringRef Triple = M->getTargetTriple();
+ std::string Err;
+ const Target *T = TargetRegistry::lookupTarget(Triple, Err);
+ if (!T)
+ return;
+
+ std::unique_ptr<MCRegisterInfo> MRI(T->createMCRegInfo(Triple));
+ if (!MRI)
+ return;
+
+ std::unique_ptr<MCAsmInfo> MAI(T->createMCAsmInfo(*MRI, Triple));
+ if (!MAI)
+ return;
+
+ std::unique_ptr<MCSubtargetInfo> STI(
+ T->createMCSubtargetInfo(Triple, "", ""));
+ if (!STI)
+ return;
+
+ std::unique_ptr<MCInstrInfo> MCII(T->createMCInstrInfo());
+ if (!MCII)
+ return;
+
+ MCObjectFileInfo MOFI;
+ MCContext MCCtx(MAI.get(), MRI.get(), &MOFI);
+ MOFI.InitMCObjectFileInfo(Triple, Reloc::Default, CodeModel::Default, MCCtx);
+ std::unique_ptr<RecordStreamer> Streamer(new RecordStreamer(MCCtx));
+
+ std::unique_ptr<MemoryBuffer> Buffer(MemoryBuffer::getMemBuffer(InlineAsm));
+ SourceMgr SrcMgr;
+ SrcMgr.AddNewSourceBuffer(Buffer.release(), SMLoc());
+ std::unique_ptr<MCAsmParser> Parser(
+ createMCAsmParser(SrcMgr, MCCtx, *Streamer, *MAI));
+
+ MCTargetOptions MCOptions;
+ std::unique_ptr<MCTargetAsmParser> TAP(
+ T->createMCAsmParser(*STI, *Parser, *MCII, MCOptions));
+ if (!TAP)
+ return;
+
+ Parser->setTargetParser(*TAP);
+ if (Parser->Run(false))
+ return;
+
+ for (auto &KV : *Streamer) {
+ StringRef Key = KV.first();
+ RecordStreamer::State Value = KV.second;
+ uint32_t Res = BasicSymbolRef::SF_None;
+ switch (Value) {
+ case RecordStreamer::NeverSeen:
+ llvm_unreachable("foo");
+ case RecordStreamer::DefinedGlobal:
+ Res |= BasicSymbolRef::SF_Global;
+ break;
+ case RecordStreamer::Defined:
+ break;
+ case RecordStreamer::Global:
+ case RecordStreamer::Used:
+ Res |= BasicSymbolRef::SF_Undefined;
+ Res |= BasicSymbolRef::SF_Global;
+ break;
+ }
+ AsmSymbols.push_back(
+ std::make_pair<std::string, uint32_t>(Key, std::move(Res)));
+ }
+}
+
+IRObjectFile::~IRObjectFile() {
+ GVMaterializer *GVM = M->getMaterializer();
+ if (GVM)
+ GVM->releaseBuffer();
+ }
+
+static const GlobalValue *getGV(DataRefImpl &Symb) {
+ if ((Symb.p & 3) == 3)
+ return nullptr;
+
+ return reinterpret_cast<GlobalValue*>(Symb.p & ~uintptr_t(3));
+}
+
+static uintptr_t skipEmpty(Module::const_alias_iterator I, const Module &M) {
+ if (I == M.alias_end())
+ return 3;
+ const GlobalValue *GV = &*I;
+ return reinterpret_cast<uintptr_t>(GV) | 2;
+}
+
+static uintptr_t skipEmpty(Module::const_global_iterator I, const Module &M) {
+ if (I == M.global_end())
+ return skipEmpty(M.alias_begin(), M);
+ const GlobalValue *GV = &*I;
+ return reinterpret_cast<uintptr_t>(GV) | 1;
+}
+
+static uintptr_t skipEmpty(Module::const_iterator I, const Module &M) {
+ if (I == M.end())
+ return skipEmpty(M.global_begin(), M);
+ const GlobalValue *GV = &*I;
+ return reinterpret_cast<uintptr_t>(GV) | 0;
+}
+
+static unsigned getAsmSymIndex(DataRefImpl Symb) {
+ assert((Symb.p & uintptr_t(3)) == 3);
+ uintptr_t Index = Symb.p & ~uintptr_t(3);
+ Index >>= 2;
+ return Index;
+}
+
+void IRObjectFile::moveSymbolNext(DataRefImpl &Symb) const {
+ const GlobalValue *GV = getGV(Symb);
+ uintptr_t Res;
+
+ switch (Symb.p & 3) {
+ case 0: {
+ Module::const_iterator Iter(static_cast<const Function*>(GV));
+ ++Iter;
+ Res = skipEmpty(Iter, *M);
+ break;
+ }
+ case 1: {
+ Module::const_global_iterator Iter(static_cast<const GlobalVariable*>(GV));
+ ++Iter;
+ Res = skipEmpty(Iter, *M);
+ break;
+ }
+ case 2: {
+ Module::const_alias_iterator Iter(static_cast<const GlobalAlias*>(GV));
+ ++Iter;
+ Res = skipEmpty(Iter, *M);
+ break;
+ }
+ case 3: {
+ unsigned Index = getAsmSymIndex(Symb);
+ assert(Index < AsmSymbols.size());
+ ++Index;
+ Res = (Index << 2) | 3;
+ break;
+ }
+ }
+
+ Symb.p = Res;
+}
+
+std::error_code IRObjectFile::printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const {
+ const GlobalValue *GV = getGV(Symb);
+ if (!GV) {
+ unsigned Index = getAsmSymIndex(Symb);
+ assert(Index <= AsmSymbols.size());
+ OS << AsmSymbols[Index].first;
+ return object_error::success;;
+ }
+
+ if (Mang)
+ Mang->getNameWithPrefix(OS, GV, false);
+ else
+ OS << GV->getName();
+
+ return object_error::success;
+}
+
+static bool isDeclaration(const GlobalValue &V) {
+ if (V.hasAvailableExternallyLinkage())
+ return true;
+
+ if (V.isMaterializable())
+ return false;
+
+ return V.isDeclaration();
+}
+
+uint32_t IRObjectFile::getSymbolFlags(DataRefImpl Symb) const {
+ const GlobalValue *GV = getGV(Symb);
+
+ if (!GV) {
+ unsigned Index = getAsmSymIndex(Symb);
+ assert(Index <= AsmSymbols.size());
+ return AsmSymbols[Index].second;
+ }
+
+ uint32_t Res = BasicSymbolRef::SF_None;
+ if (isDeclaration(*GV))
+ Res |= BasicSymbolRef::SF_Undefined;
+ if (GV->hasPrivateLinkage())
+ Res |= BasicSymbolRef::SF_FormatSpecific;
+ if (!GV->hasLocalLinkage())
+ Res |= BasicSymbolRef::SF_Global;
+ if (GV->hasCommonLinkage())
+ Res |= BasicSymbolRef::SF_Common;
+ if (GV->hasLinkOnceLinkage() || GV->hasWeakLinkage())
+ Res |= BasicSymbolRef::SF_Weak;
+
+ if (GV->getName().startswith("llvm."))
+ Res |= BasicSymbolRef::SF_FormatSpecific;
+ else if (auto *Var = dyn_cast<GlobalVariable>(GV)) {
+ if (Var->getSection() == StringRef("llvm.metadata"))
+ Res |= BasicSymbolRef::SF_FormatSpecific;
+ }
+
+ return Res;
+}
+
+const GlobalValue *IRObjectFile::getSymbolGV(DataRefImpl Symb) const {
+ const GlobalValue *GV = getGV(Symb);
+ return GV;
+}
+
+basic_symbol_iterator IRObjectFile::symbol_begin_impl() const {
+ Module::const_iterator I = M->begin();
+ DataRefImpl Ret;
+ Ret.p = skipEmpty(I, *M);
+ return basic_symbol_iterator(BasicSymbolRef(Ret, this));
+}
+
+basic_symbol_iterator IRObjectFile::symbol_end_impl() const {
+ DataRefImpl Ret;
+ uint64_t NumAsm = AsmSymbols.size();
+ NumAsm <<= 2;
+ Ret.p = 3 | NumAsm;
+ return basic_symbol_iterator(BasicSymbolRef(Ret, this));
+}
+
+ErrorOr<IRObjectFile *> llvm::object::IRObjectFile::createIRObjectFile(
+ std::unique_ptr<MemoryBuffer> Object, LLVMContext &Context) {
+ ErrorOr<Module *> MOrErr = getLazyBitcodeModule(Object.get(), Context);
+ if (std::error_code EC = MOrErr.getError())
+ return EC;
+
+ std::unique_ptr<Module> M(MOrErr.get());
+ return new IRObjectFile(std::move(Object), std::move(M));
+}
diff --git a/contrib/llvm/lib/Object/MachOObjectFile.cpp b/contrib/llvm/lib/Object/MachOObjectFile.cpp
index d2cb8bd..51c4c33 100644
--- a/contrib/llvm/lib/Object/MachOObjectFile.cpp
+++ b/contrib/llvm/lib/Object/MachOObjectFile.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Object/MachO.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Format.h"
@@ -26,206 +27,19 @@
using namespace llvm;
using namespace object;
-namespace llvm {
-namespace object {
-
-struct nlist_base {
- uint32_t n_strx;
- uint8_t n_type;
- uint8_t n_sect;
- uint16_t n_desc;
-};
-
-struct section_base {
- char sectname[16];
- char segname[16];
-};
-
-template<typename T>
-static void SwapValue(T &Value) {
- Value = sys::SwapByteOrder(Value);
+namespace {
+ struct section_base {
+ char sectname[16];
+ char segname[16];
+ };
}
template<typename T>
-static void SwapStruct(T &Value);
-
-template<>
-void SwapStruct(MachO::any_relocation_info &H) {
- SwapValue(H.r_word0);
- SwapValue(H.r_word1);
-}
-
-template<>
-void SwapStruct(MachO::load_command &L) {
- SwapValue(L.cmd);
- SwapValue(L.cmdsize);
-}
-
-template<>
-void SwapStruct(nlist_base &S) {
- SwapValue(S.n_strx);
- SwapValue(S.n_desc);
-}
-
-template<>
-void SwapStruct(MachO::section &S) {
- SwapValue(S.addr);
- SwapValue(S.size);
- SwapValue(S.offset);
- SwapValue(S.align);
- SwapValue(S.reloff);
- SwapValue(S.nreloc);
- SwapValue(S.flags);
- SwapValue(S.reserved1);
- SwapValue(S.reserved2);
-}
-
-template<>
-void SwapStruct(MachO::section_64 &S) {
- SwapValue(S.addr);
- SwapValue(S.size);
- SwapValue(S.offset);
- SwapValue(S.align);
- SwapValue(S.reloff);
- SwapValue(S.nreloc);
- SwapValue(S.flags);
- SwapValue(S.reserved1);
- SwapValue(S.reserved2);
- SwapValue(S.reserved3);
-}
-
-template<>
-void SwapStruct(MachO::nlist &S) {
- SwapValue(S.n_strx);
- SwapValue(S.n_desc);
- SwapValue(S.n_value);
-}
-
-template<>
-void SwapStruct(MachO::nlist_64 &S) {
- SwapValue(S.n_strx);
- SwapValue(S.n_desc);
- SwapValue(S.n_value);
-}
-
-template<>
-void SwapStruct(MachO::mach_header &H) {
- SwapValue(H.magic);
- SwapValue(H.cputype);
- SwapValue(H.cpusubtype);
- SwapValue(H.filetype);
- SwapValue(H.ncmds);
- SwapValue(H.sizeofcmds);
- SwapValue(H.flags);
-}
-
-template<>
-void SwapStruct(MachO::mach_header_64 &H) {
- SwapValue(H.magic);
- SwapValue(H.cputype);
- SwapValue(H.cpusubtype);
- SwapValue(H.filetype);
- SwapValue(H.ncmds);
- SwapValue(H.sizeofcmds);
- SwapValue(H.flags);
- SwapValue(H.reserved);
-}
-
-template<>
-void SwapStruct(MachO::symtab_command &C) {
- SwapValue(C.cmd);
- SwapValue(C.cmdsize);
- SwapValue(C.symoff);
- SwapValue(C.nsyms);
- SwapValue(C.stroff);
- SwapValue(C.strsize);
-}
-
-template<>
-void SwapStruct(MachO::dysymtab_command &C) {
- SwapValue(C.cmd);
- SwapValue(C.cmdsize);
- SwapValue(C.ilocalsym);
- SwapValue(C.nlocalsym);
- SwapValue(C.iextdefsym);
- SwapValue(C.nextdefsym);
- SwapValue(C.iundefsym);
- SwapValue(C.nundefsym);
- SwapValue(C.tocoff);
- SwapValue(C.ntoc);
- SwapValue(C.modtaboff);
- SwapValue(C.nmodtab);
- SwapValue(C.extrefsymoff);
- SwapValue(C.nextrefsyms);
- SwapValue(C.indirectsymoff);
- SwapValue(C.nindirectsyms);
- SwapValue(C.extreloff);
- SwapValue(C.nextrel);
- SwapValue(C.locreloff);
- SwapValue(C.nlocrel);
-}
-
-template<>
-void SwapStruct(MachO::linkedit_data_command &C) {
- SwapValue(C.cmd);
- SwapValue(C.cmdsize);
- SwapValue(C.dataoff);
- SwapValue(C.datasize);
-}
-
-template<>
-void SwapStruct(MachO::segment_command &C) {
- SwapValue(C.cmd);
- SwapValue(C.cmdsize);
- SwapValue(C.vmaddr);
- SwapValue(C.vmsize);
- SwapValue(C.fileoff);
- SwapValue(C.filesize);
- SwapValue(C.maxprot);
- SwapValue(C.initprot);
- SwapValue(C.nsects);
- SwapValue(C.flags);
-}
-
-template<>
-void SwapStruct(MachO::segment_command_64 &C) {
- SwapValue(C.cmd);
- SwapValue(C.cmdsize);
- SwapValue(C.vmaddr);
- SwapValue(C.vmsize);
- SwapValue(C.fileoff);
- SwapValue(C.filesize);
- SwapValue(C.maxprot);
- SwapValue(C.initprot);
- SwapValue(C.nsects);
- SwapValue(C.flags);
-}
-
-template<>
-void SwapStruct(uint32_t &C) {
- SwapValue(C);
-}
-
-template<>
-void SwapStruct(MachO::linker_options_command &C) {
- SwapValue(C.cmd);
- SwapValue(C.cmdsize);
- SwapValue(C.count);
-}
-
-template<>
-void SwapStruct(MachO::data_in_code_entry &C) {
- SwapValue(C.offset);
- SwapValue(C.length);
- SwapValue(C.kind);
-}
-
-template<typename T>
-T getStruct(const MachOObjectFile *O, const char *P) {
+static T getStruct(const MachOObjectFile *O, const char *P) {
T Cmd;
memcpy(&Cmd, P, sizeof(T));
if (O->isLittleEndian() != sys::IsLittleEndianHost)
- SwapStruct(Cmd);
+ MachO::swapStruct(Cmd);
return Cmd;
}
@@ -259,10 +73,10 @@ static const char *getPtr(const MachOObjectFile *O, size_t Offset) {
return O->getData().substr(Offset, 1).data();
}
-static nlist_base
+static MachO::nlist_base
getSymbolTableEntryBase(const MachOObjectFile *O, DataRefImpl DRI) {
const char *P = reinterpret_cast<const char *>(DRI.p);
- return getStruct<nlist_base>(O, P);
+ return getStruct<MachO::nlist_base>(O, P);
}
static StringRef parseSegmentOrSectionName(const char *P) {
@@ -275,18 +89,9 @@ static StringRef parseSegmentOrSectionName(const char *P) {
// Helper to advance a section or symbol iterator multiple increments at a time.
template<class T>
-static error_code advance(T &it, size_t Val) {
- error_code ec;
- while (Val--) {
- it.increment(ec);
- }
- return ec;
-}
-
-template<class T>
-static void advanceTo(T &it, size_t Val) {
- if (error_code ec = advance(it, Val))
- report_fatal_error(ec.message());
+static void advance(T &it, size_t Val) {
+ while (Val--)
+ ++it;
}
static unsigned getCPUType(const MachOObjectFile *O) {
@@ -305,18 +110,16 @@ static void printRelocationTargetName(const MachOObjectFile *O,
if (IsScattered) {
uint32_t Val = O->getPlainRelocationSymbolNum(RE);
- error_code ec;
- for (symbol_iterator SI = O->begin_symbols(), SE = O->end_symbols();
- SI != SE; SI.increment(ec)) {
- if (ec) report_fatal_error(ec.message());
-
+ for (const SymbolRef &Symbol : O->symbols()) {
+ std::error_code ec;
uint64_t Addr;
StringRef Name;
- if ((ec = SI->getAddress(Addr)))
+ if ((ec = Symbol.getAddress(Addr)))
report_fatal_error(ec.message());
- if (Addr != Val) continue;
- if ((ec = SI->getName(Name)))
+ if (Addr != Val)
+ continue;
+ if ((ec = Symbol.getName(Name)))
report_fatal_error(ec.message());
fmt << Name;
return;
@@ -324,17 +127,16 @@ static void printRelocationTargetName(const MachOObjectFile *O,
// If we couldn't find a symbol that this relocation refers to, try
// to find a section beginning instead.
- for (section_iterator SI = O->begin_sections(), SE = O->end_sections();
- SI != SE; SI.increment(ec)) {
- if (ec) report_fatal_error(ec.message());
-
+ for (const SectionRef &Section : O->sections()) {
+ std::error_code ec;
uint64_t Addr;
StringRef Name;
- if ((ec = SI->getAddress(Addr)))
+ if ((ec = Section.getAddress(Addr)))
report_fatal_error(ec.message());
- if (Addr != Val) continue;
- if ((ec = SI->getName(Name)))
+ if (Addr != Val)
+ continue;
+ if ((ec = Section.getName(Name)))
report_fatal_error(ec.message());
fmt << Name;
return;
@@ -349,13 +151,13 @@ static void printRelocationTargetName(const MachOObjectFile *O,
uint64_t Val = O->getPlainRelocationSymbolNum(RE);
if (isExtern) {
- symbol_iterator SI = O->begin_symbols();
- advanceTo(SI, Val);
+ symbol_iterator SI = O->symbol_begin();
+ advance(SI, Val);
SI->getName(S);
} else {
- section_iterator SI = O->begin_sections();
+ section_iterator SI = O->section_begin();
// Adjust for the fact that sections are 1-indexed.
- advanceTo(SI, Val - 1);
+ advance(SI, Val - 1);
SI->getName(S);
}
@@ -419,11 +221,12 @@ static uint32_t getSectionFlags(const MachOObjectFile *O,
return Sect.flags;
}
-MachOObjectFile::MachOObjectFile(MemoryBuffer *Object,
+MachOObjectFile::MachOObjectFile(std::unique_ptr<MemoryBuffer> Object,
bool IsLittleEndian, bool Is64bits,
- error_code &ec)
- : ObjectFile(getMachOType(IsLittleEndian, Is64bits), Object),
- SymtabLoadCmd(NULL), DysymtabLoadCmd(NULL), DataInCodeLoadCmd(NULL) {
+ std::error_code &EC)
+ : ObjectFile(getMachOType(IsLittleEndian, Is64bits), std::move(Object)),
+ SymtabLoadCmd(nullptr), DysymtabLoadCmd(nullptr),
+ DataInCodeLoadCmd(nullptr) {
uint32_t LoadCommandCount = this->getHeader().ncmds;
MachO::LoadCommandType SegmentLoadType = is64Bit() ?
MachO::LC_SEGMENT_64 : MachO::LC_SEGMENT;
@@ -445,6 +248,12 @@ MachOObjectFile::MachOObjectFile(MemoryBuffer *Object,
const char *Sec = getSectionPtr(this, Load, J);
Sections.push_back(Sec);
}
+ } else if (Load.C.cmd == MachO::LC_LOAD_DYLIB ||
+ Load.C.cmd == MachO::LC_LOAD_WEAK_DYLIB ||
+ Load.C.cmd == MachO::LC_LAZY_LOAD_DYLIB ||
+ Load.C.cmd == MachO::LC_REEXPORT_DYLIB ||
+ Load.C.cmd == MachO::LC_LOAD_UPWARD_DYLIB) {
+ Libraries.push_back(Load.Ptr);
}
if (I == LoadCommandCount - 1)
@@ -454,66 +263,71 @@ MachOObjectFile::MachOObjectFile(MemoryBuffer *Object,
}
}
-error_code MachOObjectFile::getSymbolNext(DataRefImpl Symb,
- SymbolRef &Res) const {
+void MachOObjectFile::moveSymbolNext(DataRefImpl &Symb) const {
unsigned SymbolTableEntrySize = is64Bit() ?
sizeof(MachO::nlist_64) :
sizeof(MachO::nlist);
Symb.p += SymbolTableEntrySize;
- Res = SymbolRef(Symb, this);
- return object_error::success;
}
-error_code MachOObjectFile::getSymbolName(DataRefImpl Symb,
- StringRef &Res) const {
+std::error_code MachOObjectFile::getSymbolName(DataRefImpl Symb,
+ StringRef &Res) const {
StringRef StringTable = getStringTableData();
- nlist_base Entry = getSymbolTableEntryBase(this, Symb);
+ MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb);
const char *Start = &StringTable.data()[Entry.n_strx];
Res = StringRef(Start);
return object_error::success;
}
-error_code MachOObjectFile::getSymbolAddress(DataRefImpl Symb,
- uint64_t &Res) const {
+// getIndirectName() returns the name of the alias'ed symbol who's string table
+// index is in the n_value field.
+std::error_code MachOObjectFile::getIndirectName(DataRefImpl Symb,
+ StringRef &Res) const {
+ StringRef StringTable = getStringTableData();
+ uint64_t NValue;
if (is64Bit()) {
MachO::nlist_64 Entry = getSymbol64TableEntry(Symb);
- Res = Entry.n_value;
+ NValue = Entry.n_value;
+ if ((Entry.n_type & MachO::N_TYPE) != MachO::N_INDR)
+ return object_error::parse_failed;
} else {
MachO::nlist Entry = getSymbolTableEntry(Symb);
- Res = Entry.n_value;
+ NValue = Entry.n_value;
+ if ((Entry.n_type & MachO::N_TYPE) != MachO::N_INDR)
+ return object_error::parse_failed;
}
+ if (NValue >= StringTable.size())
+ return object_error::parse_failed;
+ const char *Start = &StringTable.data()[NValue];
+ Res = StringRef(Start);
return object_error::success;
}
-error_code
-MachOObjectFile::getSymbolFileOffset(DataRefImpl Symb,
- uint64_t &Res) const {
- nlist_base Entry = getSymbolTableEntryBase(this, Symb);
- getSymbolAddress(Symb, Res);
- if (Entry.n_sect) {
- uint64_t Delta;
- DataRefImpl SecRel;
- SecRel.d.a = Entry.n_sect-1;
- if (is64Bit()) {
- MachO::section_64 Sec = getSection64(SecRel);
- Delta = Sec.offset - Sec.addr;
- } else {
- MachO::section Sec = getSection(SecRel);
- Delta = Sec.offset - Sec.addr;
- }
-
- Res += Delta;
+std::error_code MachOObjectFile::getSymbolAddress(DataRefImpl Symb,
+ uint64_t &Res) const {
+ if (is64Bit()) {
+ MachO::nlist_64 Entry = getSymbol64TableEntry(Symb);
+ if ((Entry.n_type & MachO::N_TYPE) == MachO::N_UNDF &&
+ Entry.n_value == 0)
+ Res = UnknownAddressOrSize;
+ else
+ Res = Entry.n_value;
+ } else {
+ MachO::nlist Entry = getSymbolTableEntry(Symb);
+ if ((Entry.n_type & MachO::N_TYPE) == MachO::N_UNDF &&
+ Entry.n_value == 0)
+ Res = UnknownAddressOrSize;
+ else
+ Res = Entry.n_value;
}
-
return object_error::success;
}
-error_code MachOObjectFile::getSymbolAlignment(DataRefImpl DRI,
- uint32_t &Result) const {
- uint32_t flags;
- this->getSymbolFlags(DRI, flags);
+std::error_code MachOObjectFile::getSymbolAlignment(DataRefImpl DRI,
+ uint32_t &Result) const {
+ uint32_t flags = getSymbolFlags(DRI);
if (flags & SymbolRef::SF_Common) {
- nlist_base Entry = getSymbolTableEntryBase(this, DRI);
+ MachO::nlist_base Entry = getSymbolTableEntryBase(this, DRI);
Result = 1 << MachO::GET_COMM_ALIGN(Entry.n_desc);
} else {
Result = 0;
@@ -521,22 +335,25 @@ error_code MachOObjectFile::getSymbolAlignment(DataRefImpl DRI,
return object_error::success;
}
-error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI,
- uint64_t &Result) const {
+std::error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI,
+ uint64_t &Result) const {
uint64_t BeginOffset;
uint64_t EndOffset = 0;
uint8_t SectionIndex;
- nlist_base Entry = getSymbolTableEntryBase(this, DRI);
+ MachO::nlist_base Entry = getSymbolTableEntryBase(this, DRI);
uint64_t Value;
getSymbolAddress(DRI, Value);
+ if (Value == UnknownAddressOrSize) {
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
BeginOffset = Value;
SectionIndex = Entry.n_sect;
if (!SectionIndex) {
- uint32_t flags = SymbolRef::SF_None;
- this->getSymbolFlags(DRI, flags);
+ uint32_t flags = getSymbolFlags(DRI);
if (flags & SymbolRef::SF_Common)
Result = Value;
else
@@ -545,12 +362,12 @@ error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI,
}
// Unfortunately symbols are unsorted so we need to touch all
// symbols from load command
- error_code ec;
- for (symbol_iterator I = begin_symbols(), E = end_symbols(); I != E;
- I.increment(ec)) {
- DataRefImpl DRI = I->getRawDataRefImpl();
+ for (const SymbolRef &Symbol : symbols()) {
+ DataRefImpl DRI = Symbol.getRawDataRefImpl();
Entry = getSymbolTableEntryBase(this, DRI);
getSymbolAddress(DRI, Value);
+ if (Value == UnknownAddressOrSize)
+ continue;
if (Entry.n_sect == SectionIndex && Value > BeginOffset)
if (!EndOffset || Value < EndOffset)
EndOffset = Value;
@@ -567,9 +384,9 @@ error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI,
return object_error::success;
}
-error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
- SymbolRef::Type &Res) const {
- nlist_base Entry = getSymbolTableEntryBase(this, Symb);
+std::error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
+ SymbolRef::Type &Res) const {
+ MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb);
uint8_t n_type = Entry.n_type;
Res = SymbolRef::ST_Other;
@@ -591,19 +408,20 @@ error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
return object_error::success;
}
-error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI,
- uint32_t &Result) const {
- nlist_base Entry = getSymbolTableEntryBase(this, DRI);
+uint32_t MachOObjectFile::getSymbolFlags(DataRefImpl DRI) const {
+ MachO::nlist_base Entry = getSymbolTableEntryBase(this, DRI);
uint8_t MachOType = Entry.n_type;
uint16_t MachOFlags = Entry.n_desc;
- // TODO: Correctly set SF_ThreadLocal
- Result = SymbolRef::SF_None;
+ uint32_t Result = SymbolRef::SF_None;
if ((MachOType & MachO::N_TYPE) == MachO::N_UNDF)
Result |= SymbolRef::SF_Undefined;
+ if ((MachOType & MachO::N_TYPE) == MachO::N_INDR)
+ Result |= SymbolRef::SF_Indirect;
+
if (MachOType & MachO::N_STAB)
Result |= SymbolRef::SF_FormatSpecific;
@@ -612,7 +430,7 @@ error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI,
if ((MachOType & MachO::N_TYPE) == MachO::N_UNDF) {
uint64_t Value;
getSymbolAddress(DRI, Value);
- if (Value)
+ if (Value && Value != UnknownAddressOrSize)
Result |= SymbolRef::SF_Common;
}
}
@@ -623,17 +441,16 @@ error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI,
if ((MachOType & MachO::N_TYPE) == MachO::N_ABS)
Result |= SymbolRef::SF_Absolute;
- return object_error::success;
+ return Result;
}
-error_code
-MachOObjectFile::getSymbolSection(DataRefImpl Symb,
- section_iterator &Res) const {
- nlist_base Entry = getSymbolTableEntryBase(this, Symb);
+std::error_code MachOObjectFile::getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const {
+ MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb);
uint8_t index = Entry.n_sect;
if (index == 0) {
- Res = end_sections();
+ Res = section_end();
} else {
DataRefImpl DRI;
DRI.d.a = index - 1;
@@ -643,27 +460,19 @@ MachOObjectFile::getSymbolSection(DataRefImpl Symb,
return object_error::success;
}
-error_code MachOObjectFile::getSymbolValue(DataRefImpl Symb,
- uint64_t &Val) const {
- report_fatal_error("getSymbolValue unimplemented in MachOObjectFile");
-}
-
-error_code MachOObjectFile::getSectionNext(DataRefImpl Sec,
- SectionRef &Res) const {
+void MachOObjectFile::moveSectionNext(DataRefImpl &Sec) const {
Sec.d.a++;
- Res = SectionRef(Sec, this);
- return object_error::success;
}
-error_code
-MachOObjectFile::getSectionName(DataRefImpl Sec, StringRef &Result) const {
+std::error_code MachOObjectFile::getSectionName(DataRefImpl Sec,
+ StringRef &Result) const {
ArrayRef<char> Raw = getSectionRawName(Sec);
Result = parseSegmentOrSectionName(Raw.data());
return object_error::success;
}
-error_code
-MachOObjectFile::getSectionAddress(DataRefImpl Sec, uint64_t &Res) const {
+std::error_code MachOObjectFile::getSectionAddress(DataRefImpl Sec,
+ uint64_t &Res) const {
if (is64Bit()) {
MachO::section_64 Sect = getSection64(Sec);
Res = Sect.addr;
@@ -674,8 +483,8 @@ MachOObjectFile::getSectionAddress(DataRefImpl Sec, uint64_t &Res) const {
return object_error::success;
}
-error_code
-MachOObjectFile::getSectionSize(DataRefImpl Sec, uint64_t &Res) const {
+std::error_code MachOObjectFile::getSectionSize(DataRefImpl Sec,
+ uint64_t &Res) const {
if (is64Bit()) {
MachO::section_64 Sect = getSection64(Sec);
Res = Sect.size;
@@ -687,8 +496,8 @@ MachOObjectFile::getSectionSize(DataRefImpl Sec, uint64_t &Res) const {
return object_error::success;
}
-error_code
-MachOObjectFile::getSectionContents(DataRefImpl Sec, StringRef &Res) const {
+std::error_code MachOObjectFile::getSectionContents(DataRefImpl Sec,
+ StringRef &Res) const {
uint32_t Offset;
uint64_t Size;
@@ -706,8 +515,8 @@ MachOObjectFile::getSectionContents(DataRefImpl Sec, StringRef &Res) const {
return object_error::success;
}
-error_code
-MachOObjectFile::getSectionAlignment(DataRefImpl Sec, uint64_t &Res) const {
+std::error_code MachOObjectFile::getSectionAlignment(DataRefImpl Sec,
+ uint64_t &Res) const {
uint32_t Align;
if (is64Bit()) {
MachO::section_64 Sect = getSection64(Sec);
@@ -721,26 +530,34 @@ MachOObjectFile::getSectionAlignment(DataRefImpl Sec, uint64_t &Res) const {
return object_error::success;
}
-error_code
-MachOObjectFile::isSectionText(DataRefImpl Sec, bool &Res) const {
+std::error_code MachOObjectFile::isSectionText(DataRefImpl Sec,
+ bool &Res) const {
uint32_t Flags = getSectionFlags(this, Sec);
Res = Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
return object_error::success;
}
-error_code MachOObjectFile::isSectionData(DataRefImpl DRI, bool &Result) const {
- // FIXME: Unimplemented.
- Result = false;
+std::error_code MachOObjectFile::isSectionData(DataRefImpl Sec,
+ bool &Result) const {
+ uint32_t Flags = getSectionFlags(this, Sec);
+ unsigned SectionType = Flags & MachO::SECTION_TYPE;
+ Result = !(Flags & MachO::S_ATTR_PURE_INSTRUCTIONS) &&
+ !(SectionType == MachO::S_ZEROFILL ||
+ SectionType == MachO::S_GB_ZEROFILL);
return object_error::success;
}
-error_code MachOObjectFile::isSectionBSS(DataRefImpl DRI, bool &Result) const {
- // FIXME: Unimplemented.
- Result = false;
+std::error_code MachOObjectFile::isSectionBSS(DataRefImpl Sec,
+ bool &Result) const {
+ uint32_t Flags = getSectionFlags(this, Sec);
+ unsigned SectionType = Flags & MachO::SECTION_TYPE;
+ Result = !(Flags & MachO::S_ATTR_PURE_INSTRUCTIONS) &&
+ (SectionType == MachO::S_ZEROFILL ||
+ SectionType == MachO::S_GB_ZEROFILL);
return object_error::success;
}
-error_code
+std::error_code
MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec,
bool &Result) const {
// FIXME: Unimplemented.
@@ -748,15 +565,15 @@ MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec,
return object_error::success;
}
-error_code MachOObjectFile::isSectionVirtual(DataRefImpl Sec,
- bool &Result) const {
+std::error_code MachOObjectFile::isSectionVirtual(DataRefImpl Sec,
+ bool &Result) const {
// FIXME: Unimplemented.
Result = false;
return object_error::success;
}
-error_code
-MachOObjectFile::isSectionZeroInit(DataRefImpl Sec, bool &Res) const {
+std::error_code MachOObjectFile::isSectionZeroInit(DataRefImpl Sec,
+ bool &Res) const {
uint32_t Flags = getSectionFlags(this, Sec);
unsigned SectionType = Flags & MachO::SECTION_TYPE;
Res = SectionType == MachO::S_ZEROFILL ||
@@ -764,8 +581,8 @@ MachOObjectFile::isSectionZeroInit(DataRefImpl Sec, bool &Res) const {
return object_error::success;
}
-error_code MachOObjectFile::isSectionReadOnlyData(DataRefImpl Sec,
- bool &Result) const {
+std::error_code MachOObjectFile::isSectionReadOnlyData(DataRefImpl Sec,
+ bool &Result) const {
// Consider using the code from isSectionText to look for __const sections.
// Alternately, emit S_ATTR_PURE_INSTRUCTIONS and/or S_ATTR_SOME_INSTRUCTIONS
// to use section attributes to distinguish code from data.
@@ -775,9 +592,9 @@ error_code MachOObjectFile::isSectionReadOnlyData(DataRefImpl Sec,
return object_error::success;
}
-error_code
-MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
- bool &Result) const {
+std::error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec,
+ DataRefImpl Symb,
+ bool &Result) const {
SymbolRef::Type ST;
this->getSymbolType(Symb, ST);
if (ST == SymbolRef::ST_Unknown) {
@@ -798,58 +615,50 @@ MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
}
relocation_iterator MachOObjectFile::section_rel_begin(DataRefImpl Sec) const {
- uint32_t Offset;
- if (is64Bit()) {
- MachO::section_64 Sect = getSection64(Sec);
- Offset = Sect.reloff;
- } else {
- MachO::section Sect = getSection(Sec);
- Offset = Sect.reloff;
- }
-
DataRefImpl Ret;
- Ret.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset));
+ Ret.d.a = Sec.d.a;
+ Ret.d.b = 0;
return relocation_iterator(RelocationRef(Ret, this));
}
relocation_iterator
MachOObjectFile::section_rel_end(DataRefImpl Sec) const {
- uint32_t Offset;
uint32_t Num;
if (is64Bit()) {
MachO::section_64 Sect = getSection64(Sec);
- Offset = Sect.reloff;
Num = Sect.nreloc;
} else {
MachO::section Sect = getSection(Sec);
- Offset = Sect.reloff;
Num = Sect.nreloc;
}
- const MachO::any_relocation_info *P =
- reinterpret_cast<const MachO::any_relocation_info *>(getPtr(this, Offset));
-
DataRefImpl Ret;
- Ret.p = reinterpret_cast<uintptr_t>(P + Num);
+ Ret.d.a = Sec.d.a;
+ Ret.d.b = Num;
return relocation_iterator(RelocationRef(Ret, this));
}
-error_code MachOObjectFile::getRelocationNext(DataRefImpl Rel,
- RelocationRef &Res) const {
- const MachO::any_relocation_info *P =
- reinterpret_cast<const MachO::any_relocation_info *>(Rel.p);
- Rel.p = reinterpret_cast<uintptr_t>(P + 1);
- Res = RelocationRef(Rel, this);
- return object_error::success;
+void MachOObjectFile::moveRelocationNext(DataRefImpl &Rel) const {
+ ++Rel.d.b;
}
-error_code
-MachOObjectFile::getRelocationAddress(DataRefImpl Rel, uint64_t &Res) const {
- report_fatal_error("getRelocationAddress not implemented in MachOObjectFile");
+std::error_code MachOObjectFile::getRelocationAddress(DataRefImpl Rel,
+ uint64_t &Res) const {
+ uint64_t Offset;
+ getRelocationOffset(Rel, Offset);
+
+ DataRefImpl Sec;
+ Sec.d.a = Rel.d.a;
+ uint64_t SecAddress;
+ getSectionAddress(Sec, SecAddress);
+ Res = SecAddress + Offset;
+ return object_error::success;
}
-error_code MachOObjectFile::getRelocationOffset(DataRefImpl Rel,
- uint64_t &Res) const {
+std::error_code MachOObjectFile::getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const {
+ assert(getHeader().filetype == MachO::MH_OBJECT &&
+ "Only implemented for MH_OBJECT");
MachO::any_relocation_info RE = getRelocation(Rel);
Res = getAnyRelocationAddress(RE);
return object_error::success;
@@ -858,10 +667,13 @@ error_code MachOObjectFile::getRelocationOffset(DataRefImpl Rel,
symbol_iterator
MachOObjectFile::getRelocationSymbol(DataRefImpl Rel) const {
MachO::any_relocation_info RE = getRelocation(Rel);
+ if (isRelocationScattered(RE))
+ return symbol_end();
+
uint32_t SymbolIdx = getPlainRelocationSymbolNum(RE);
bool isExtern = getPlainRelocationExternal(RE);
if (!isExtern)
- return end_symbols();
+ return symbol_end();
MachO::symtab_command S = getSymtabLoadCommand();
unsigned SymbolTableEntrySize = is64Bit() ?
@@ -873,14 +685,14 @@ MachOObjectFile::getRelocationSymbol(DataRefImpl Rel) const {
return symbol_iterator(SymbolRef(Sym, this));
}
-error_code MachOObjectFile::getRelocationType(DataRefImpl Rel,
- uint64_t &Res) const {
+std::error_code MachOObjectFile::getRelocationType(DataRefImpl Rel,
+ uint64_t &Res) const {
MachO::any_relocation_info RE = getRelocation(Rel);
Res = getAnyRelocationType(RE);
return object_error::success;
}
-error_code
+std::error_code
MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const {
StringRef res;
@@ -899,7 +711,7 @@ MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
"GENERIC_RELOC_LOCAL_SECTDIFF",
"GENERIC_RELOC_TLV" };
- if (RType > 6)
+ if (RType > 5)
res = "Unknown";
else
res = Table[RType];
@@ -943,6 +755,23 @@ MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
res = Table[RType];
break;
}
+ case Triple::arm64:
+ case Triple::aarch64: {
+ static const char *const Table[] = {
+ "ARM64_RELOC_UNSIGNED", "ARM64_RELOC_SUBTRACTOR",
+ "ARM64_RELOC_BRANCH26", "ARM64_RELOC_PAGE21",
+ "ARM64_RELOC_PAGEOFF12", "ARM64_RELOC_GOT_LOAD_PAGE21",
+ "ARM64_RELOC_GOT_LOAD_PAGEOFF12", "ARM64_RELOC_POINTER_TO_GOT",
+ "ARM64_RELOC_TLVP_LOAD_PAGE21", "ARM64_RELOC_TLVP_LOAD_PAGEOFF12",
+ "ARM64_RELOC_ADDEND"
+ };
+
+ if (RType >= array_lengthof(Table))
+ res = "Unknown";
+ else
+ res = Table[RType];
+ break;
+ }
case Triple::ppc: {
static const char *const Table[] = {
"PPC_RELOC_VANILLA",
@@ -962,7 +791,10 @@ MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
"PPC_RELOC_LO14_SECTDIFF",
"PPC_RELOC_LOCAL_SECTDIFF" };
- res = Table[RType];
+ if (RType > 15)
+ res = "Unknown";
+ else
+ res = Table[RType];
break;
}
case Triple::UnknownArch:
@@ -973,7 +805,7 @@ MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
return object_error::success;
}
-error_code
+std::error_code
MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const {
MachO::any_relocation_info RE = getRelocation(Rel);
@@ -1002,7 +834,7 @@ MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
}
case MachO::X86_64_RELOC_SUBTRACTOR: {
DataRefImpl RelNext = Rel;
- RelNext.d.a++;
+ moveRelocationNext(RelNext);
MachO::any_relocation_info RENext = getRelocation(RelNext);
// X86_64_RELOC_SUBTRACTOR must be followed by a relocation of type
@@ -1050,7 +882,7 @@ MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
return object_error::success;
case MachO::GENERIC_RELOC_SECTDIFF: {
DataRefImpl RelNext = Rel;
- RelNext.d.a++;
+ moveRelocationNext(RelNext);
MachO::any_relocation_info RENext = getRelocation(RelNext);
// X86 sect diff's must be followed by a relocation of type
@@ -1072,7 +904,7 @@ MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
switch (Type) {
case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
DataRefImpl RelNext = Rel;
- RelNext.d.a++;
+ moveRelocationNext(RelNext);
MachO::any_relocation_info RENext = getRelocation(RelNext);
// X86 sect diff's must be followed by a relocation of type
@@ -1111,7 +943,7 @@ MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
printRelocationTargetName(this, RE, fmt);
DataRefImpl RelNext = Rel;
- RelNext.d.a++;
+ moveRelocationNext(RelNext);
MachO::any_relocation_info RENext = getRelocation(RelNext);
// ARM half relocs must be followed by a relocation of type
@@ -1149,8 +981,8 @@ MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
return object_error::success;
}
-error_code
-MachOObjectFile::getRelocationHidden(DataRefImpl Rel, bool &Result) const {
+std::error_code MachOObjectFile::getRelocationHidden(DataRefImpl Rel,
+ bool &Result) const {
unsigned Arch = getArch();
uint64_t Type;
getRelocationType(Rel, Type);
@@ -1177,30 +1009,207 @@ MachOObjectFile::getRelocationHidden(DataRefImpl Rel, bool &Result) const {
return object_error::success;
}
-error_code MachOObjectFile::getLibraryNext(DataRefImpl LibData,
- LibraryRef &Res) const {
+std::error_code MachOObjectFile::getLibraryNext(DataRefImpl LibData,
+ LibraryRef &Res) const {
report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
}
-error_code MachOObjectFile::getLibraryPath(DataRefImpl LibData,
- StringRef &Res) const {
+std::error_code MachOObjectFile::getLibraryPath(DataRefImpl LibData,
+ StringRef &Res) const {
report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
}
-symbol_iterator MachOObjectFile::begin_symbols() const {
- DataRefImpl DRI;
- if (!SymtabLoadCmd)
- return symbol_iterator(SymbolRef(DRI, this));
+//
+// guessLibraryShortName() is passed a name of a dynamic library and returns a
+// guess on what the short name is. Then name is returned as a substring of the
+// StringRef Name passed in. The name of the dynamic library is recognized as
+// a framework if it has one of the two following forms:
+// Foo.framework/Versions/A/Foo
+// Foo.framework/Foo
+// Where A and Foo can be any string. And may contain a trailing suffix
+// starting with an underbar. If the Name is recognized as a framework then
+// isFramework is set to true else it is set to false. If the Name has a
+// suffix then Suffix is set to the substring in Name that contains the suffix
+// else it is set to a NULL StringRef.
+//
+// The Name of the dynamic library is recognized as a library name if it has
+// one of the two following forms:
+// libFoo.A.dylib
+// libFoo.dylib
+// The library may have a suffix trailing the name Foo of the form:
+// libFoo_profile.A.dylib
+// libFoo_profile.dylib
+//
+// The Name of the dynamic library is also recognized as a library name if it
+// has the following form:
+// Foo.qtx
+//
+// If the Name of the dynamic library is none of the forms above then a NULL
+// StringRef is returned.
+//
+StringRef MachOObjectFile::guessLibraryShortName(StringRef Name,
+ bool &isFramework,
+ StringRef &Suffix) {
+ StringRef Foo, F, DotFramework, V, Dylib, Lib, Dot, Qtx;
+ size_t a, b, c, d, Idx;
+
+ isFramework = false;
+ Suffix = StringRef();
+
+ // Pull off the last component and make Foo point to it
+ a = Name.rfind('/');
+ if (a == Name.npos || a == 0)
+ goto guess_library;
+ Foo = Name.slice(a+1, Name.npos);
+
+ // Look for a suffix starting with a '_'
+ Idx = Foo.rfind('_');
+ if (Idx != Foo.npos && Foo.size() >= 2) {
+ Suffix = Foo.slice(Idx, Foo.npos);
+ Foo = Foo.slice(0, Idx);
+ }
- MachO::symtab_command Symtab = getSymtabLoadCommand();
- DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Symtab.symoff));
- return symbol_iterator(SymbolRef(DRI, this));
+ // First look for the form Foo.framework/Foo
+ b = Name.rfind('/', a);
+ if (b == Name.npos)
+ Idx = 0;
+ else
+ Idx = b+1;
+ F = Name.slice(Idx, Idx + Foo.size());
+ DotFramework = Name.slice(Idx + Foo.size(),
+ Idx + Foo.size() + sizeof(".framework/")-1);
+ if (F == Foo && DotFramework == ".framework/") {
+ isFramework = true;
+ return Foo;
+ }
+
+ // Next look for the form Foo.framework/Versions/A/Foo
+ if (b == Name.npos)
+ goto guess_library;
+ c = Name.rfind('/', b);
+ if (c == Name.npos || c == 0)
+ goto guess_library;
+ V = Name.slice(c+1, Name.npos);
+ if (!V.startswith("Versions/"))
+ goto guess_library;
+ d = Name.rfind('/', c);
+ if (d == Name.npos)
+ Idx = 0;
+ else
+ Idx = d+1;
+ F = Name.slice(Idx, Idx + Foo.size());
+ DotFramework = Name.slice(Idx + Foo.size(),
+ Idx + Foo.size() + sizeof(".framework/")-1);
+ if (F == Foo && DotFramework == ".framework/") {
+ isFramework = true;
+ return Foo;
+ }
+
+guess_library:
+ // pull off the suffix after the "." and make a point to it
+ a = Name.rfind('.');
+ if (a == Name.npos || a == 0)
+ return StringRef();
+ Dylib = Name.slice(a, Name.npos);
+ if (Dylib != ".dylib")
+ goto guess_qtx;
+
+ // First pull off the version letter for the form Foo.A.dylib if any.
+ if (a >= 3) {
+ Dot = Name.slice(a-2, a-1);
+ if (Dot == ".")
+ a = a - 2;
+ }
+
+ b = Name.rfind('/', a);
+ if (b == Name.npos)
+ b = 0;
+ else
+ b = b+1;
+ // ignore any suffix after an underbar like Foo_profile.A.dylib
+ Idx = Name.find('_', b);
+ if (Idx != Name.npos && Idx != b) {
+ Lib = Name.slice(b, Idx);
+ Suffix = Name.slice(Idx, a);
+ }
+ else
+ Lib = Name.slice(b, a);
+ // There are incorrect library names of the form:
+ // libATS.A_profile.dylib so check for these.
+ if (Lib.size() >= 3) {
+ Dot = Lib.slice(Lib.size()-2, Lib.size()-1);
+ if (Dot == ".")
+ Lib = Lib.slice(0, Lib.size()-2);
+ }
+ return Lib;
+
+guess_qtx:
+ Qtx = Name.slice(a, Name.npos);
+ if (Qtx != ".qtx")
+ return StringRef();
+ b = Name.rfind('/', a);
+ if (b == Name.npos)
+ Lib = Name.slice(0, a);
+ else
+ Lib = Name.slice(b+1, a);
+ // There are library names of the form: QT.A.qtx so check for these.
+ if (Lib.size() >= 3) {
+ Dot = Lib.slice(Lib.size()-2, Lib.size()-1);
+ if (Dot == ".")
+ Lib = Lib.slice(0, Lib.size()-2);
+ }
+ return Lib;
+}
+
+// getLibraryShortNameByIndex() is used to get the short name of the library
+// for an undefined symbol in a linked Mach-O binary that was linked with the
+// normal two-level namespace default (that is MH_TWOLEVEL in the header).
+// It is passed the index (0 - based) of the library as translated from
+// GET_LIBRARY_ORDINAL (1 - based).
+std::error_code MachOObjectFile::getLibraryShortNameByIndex(unsigned Index,
+ StringRef &Res) {
+ if (Index >= Libraries.size())
+ return object_error::parse_failed;
+
+ MachO::dylib_command D =
+ getStruct<MachO::dylib_command>(this, Libraries[Index]);
+ if (D.dylib.name >= D.cmdsize)
+ return object_error::parse_failed;
+
+ // If the cache of LibrariesShortNames is not built up do that first for
+ // all the Libraries.
+ if (LibrariesShortNames.size() == 0) {
+ for (unsigned i = 0; i < Libraries.size(); i++) {
+ MachO::dylib_command D =
+ getStruct<MachO::dylib_command>(this, Libraries[i]);
+ if (D.dylib.name >= D.cmdsize) {
+ LibrariesShortNames.push_back(StringRef());
+ continue;
+ }
+ const char *P = (const char *)(Libraries[i]) + D.dylib.name;
+ StringRef Name = StringRef(P);
+ StringRef Suffix;
+ bool isFramework;
+ StringRef shortName = guessLibraryShortName(Name, isFramework, Suffix);
+ if (shortName == StringRef())
+ LibrariesShortNames.push_back(Name);
+ else
+ LibrariesShortNames.push_back(shortName);
+ }
+ }
+
+ Res = LibrariesShortNames[Index];
+ return object_error::success;
}
-symbol_iterator MachOObjectFile::end_symbols() const {
+basic_symbol_iterator MachOObjectFile::symbol_begin_impl() const {
+ return getSymbolByIndex(0);
+}
+
+basic_symbol_iterator MachOObjectFile::symbol_end_impl() const {
DataRefImpl DRI;
if (!SymtabLoadCmd)
- return symbol_iterator(SymbolRef(DRI, this));
+ return basic_symbol_iterator(SymbolRef(DRI, this));
MachO::symtab_command Symtab = getSymtabLoadCommand();
unsigned SymbolTableEntrySize = is64Bit() ?
@@ -1209,36 +1218,40 @@ symbol_iterator MachOObjectFile::end_symbols() const {
unsigned Offset = Symtab.symoff +
Symtab.nsyms * SymbolTableEntrySize;
DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset));
- return symbol_iterator(SymbolRef(DRI, this));
+ return basic_symbol_iterator(SymbolRef(DRI, this));
}
-symbol_iterator MachOObjectFile::begin_dynamic_symbols() const {
- // TODO: implement
- report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile");
-}
+basic_symbol_iterator MachOObjectFile::getSymbolByIndex(unsigned Index) const {
+ DataRefImpl DRI;
+ if (!SymtabLoadCmd)
+ return basic_symbol_iterator(SymbolRef(DRI, this));
-symbol_iterator MachOObjectFile::end_dynamic_symbols() const {
- // TODO: implement
- report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile");
+ MachO::symtab_command Symtab = getSymtabLoadCommand();
+ assert(Index < Symtab.nsyms && "Requested symbol index is out of range.");
+ unsigned SymbolTableEntrySize =
+ is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist);
+ DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Symtab.symoff));
+ DRI.p += Index * SymbolTableEntrySize;
+ return basic_symbol_iterator(SymbolRef(DRI, this));
}
-section_iterator MachOObjectFile::begin_sections() const {
+section_iterator MachOObjectFile::section_begin() const {
DataRefImpl DRI;
return section_iterator(SectionRef(DRI, this));
}
-section_iterator MachOObjectFile::end_sections() const {
+section_iterator MachOObjectFile::section_end() const {
DataRefImpl DRI;
DRI.d.a = Sections.size();
return section_iterator(SectionRef(DRI, this));
}
-library_iterator MachOObjectFile::begin_libraries_needed() const {
+library_iterator MachOObjectFile::needed_library_begin() const {
// TODO: implement
report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
}
-library_iterator MachOObjectFile::end_libraries_needed() const {
+library_iterator MachOObjectFile::needed_library_end() const {
// TODO: implement
report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
}
@@ -1272,6 +1285,8 @@ StringRef MachOObjectFile::getFileFormatName() const {
switch (CPUType) {
case llvm::MachO::CPU_TYPE_X86_64:
return "Mach-O 64-bit x86-64";
+ case llvm::MachO::CPU_TYPE_ARM64:
+ return "Mach-O arm64";
case llvm::MachO::CPU_TYPE_POWERPC64:
return "Mach-O 64-bit ppc64";
default:
@@ -1287,6 +1302,8 @@ Triple::ArchType MachOObjectFile::getArch(uint32_t CPUType) {
return Triple::x86_64;
case llvm::MachO::CPU_TYPE_ARM:
return Triple::arm;
+ case llvm::MachO::CPU_TYPE_ARM64:
+ return Triple::arm64;
case llvm::MachO::CPU_TYPE_POWERPC:
return Triple::ppc;
case llvm::MachO::CPU_TYPE_POWERPC64:
@@ -1296,6 +1313,108 @@ Triple::ArchType MachOObjectFile::getArch(uint32_t CPUType) {
}
}
+Triple MachOObjectFile::getArch(uint32_t CPUType, uint32_t CPUSubType) {
+ switch (CPUType) {
+ case MachO::CPU_TYPE_I386:
+ switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) {
+ case MachO::CPU_SUBTYPE_I386_ALL:
+ return Triple("i386-apple-darwin");
+ default:
+ return Triple();
+ }
+ case MachO::CPU_TYPE_X86_64:
+ switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) {
+ case MachO::CPU_SUBTYPE_X86_64_ALL:
+ return Triple("x86_64-apple-darwin");
+ case MachO::CPU_SUBTYPE_X86_64_H:
+ return Triple("x86_64h-apple-darwin");
+ default:
+ return Triple();
+ }
+ case MachO::CPU_TYPE_ARM:
+ switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) {
+ case MachO::CPU_SUBTYPE_ARM_V4T:
+ return Triple("armv4t-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V5TEJ:
+ return Triple("armv5e-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V6:
+ return Triple("armv6-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V6M:
+ return Triple("armv6m-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V7EM:
+ return Triple("armv7em-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V7K:
+ return Triple("armv7k-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V7M:
+ return Triple("armv7m-apple-darwin");
+ case MachO::CPU_SUBTYPE_ARM_V7S:
+ return Triple("armv7s-apple-darwin");
+ default:
+ return Triple();
+ }
+ case MachO::CPU_TYPE_ARM64:
+ switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) {
+ case MachO::CPU_SUBTYPE_ARM64_ALL:
+ return Triple("arm64-apple-darwin");
+ default:
+ return Triple();
+ }
+ case MachO::CPU_TYPE_POWERPC:
+ switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) {
+ case MachO::CPU_SUBTYPE_POWERPC_ALL:
+ return Triple("ppc-apple-darwin");
+ default:
+ return Triple();
+ }
+ case MachO::CPU_TYPE_POWERPC64:
+ switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) {
+ case MachO::CPU_SUBTYPE_POWERPC_ALL:
+ return Triple("ppc64-apple-darwin");
+ default:
+ return Triple();
+ }
+ default:
+ return Triple();
+ }
+}
+
+Triple MachOObjectFile::getHostArch() {
+ return Triple(sys::getDefaultTargetTriple());
+}
+
+Triple MachOObjectFile::getArch(StringRef ArchFlag) {
+ if (ArchFlag == "i386")
+ return Triple("i386-apple-darwin");
+ else if (ArchFlag == "x86_64")
+ return Triple("x86_64-apple-darwin");
+ else if (ArchFlag == "x86_64h")
+ return Triple("x86_64h-apple-darwin");
+ else if (ArchFlag == "armv4t" || ArchFlag == "arm")
+ return Triple("armv4t-apple-darwin");
+ else if (ArchFlag == "armv5e")
+ return Triple("armv5e-apple-darwin");
+ else if (ArchFlag == "armv6")
+ return Triple("armv6-apple-darwin");
+ else if (ArchFlag == "armv6m")
+ return Triple("armv6m-apple-darwin");
+ else if (ArchFlag == "armv7em")
+ return Triple("armv7em-apple-darwin");
+ else if (ArchFlag == "armv7k")
+ return Triple("armv7k-apple-darwin");
+ else if (ArchFlag == "armv7k")
+ return Triple("armv7m-apple-darwin");
+ else if (ArchFlag == "armv7s")
+ return Triple("armv7s-apple-darwin");
+ else if (ArchFlag == "arm64")
+ return Triple("arm64-apple-darwin");
+ else if (ArchFlag == "ppc")
+ return Triple("ppc-apple-darwin");
+ else if (ArchFlag == "ppc64")
+ return Triple("ppc64-apple-darwin");
+ else
+ return Triple();
+}
+
unsigned MachOObjectFile::getArch() const {
return getArch(getCPUType(this));
}
@@ -1423,7 +1542,7 @@ SectionRef
MachOObjectFile::getRelocationSection(
const MachO::any_relocation_info &RE) const {
if (isRelocationScattered(RE) || getPlainRelocationExternal(RE))
- return *end_sections();
+ return *section_end();
unsigned SecNum = getPlainRelocationSymbolNum(RE) - 1;
DataRefImpl DRI;
DRI.d.a = SecNum;
@@ -1501,10 +1620,34 @@ MachOObjectFile::getLinkerOptionsLoadCommand(const LoadCommandInfo &L) const {
return getStruct<MachO::linker_options_command>(this, L.Ptr);
}
+MachO::version_min_command
+MachOObjectFile::getVersionMinLoadCommand(const LoadCommandInfo &L) const {
+ return getStruct<MachO::version_min_command>(this, L.Ptr);
+}
+
+MachO::dylib_command
+MachOObjectFile::getDylibIDLoadCommand(const LoadCommandInfo &L) const {
+ return getStruct<MachO::dylib_command>(this, L.Ptr);
+}
+
+
MachO::any_relocation_info
MachOObjectFile::getRelocation(DataRefImpl Rel) const {
- const char *P = reinterpret_cast<const char *>(Rel.p);
- return getStruct<MachO::any_relocation_info>(this, P);
+ DataRefImpl Sec;
+ Sec.d.a = Rel.d.a;
+ uint32_t Offset;
+ if (is64Bit()) {
+ MachO::section_64 Sect = getSection64(Sec);
+ Offset = Sect.reloff;
+ } else {
+ MachO::section Sect = getSection(Sec);
+ Offset = Sect.reloff;
+ }
+
+ auto P = reinterpret_cast<const MachO::any_relocation_info *>(
+ getPtr(this, Offset)) + Rel.d.b;
+ return getStruct<MachO::any_relocation_info>(
+ this, reinterpret_cast<const char *>(P));
}
MachO::data_in_code_entry
@@ -1579,27 +1722,24 @@ void MachOObjectFile::ReadULEB128s(uint64_t Index,
}
}
-ObjectFile *ObjectFile::createMachOObjectFile(MemoryBuffer *Buffer) {
+ErrorOr<ObjectFile *>
+ObjectFile::createMachOObjectFile(std::unique_ptr<MemoryBuffer> &Buffer) {
StringRef Magic = Buffer->getBuffer().slice(0, 4);
- error_code ec;
- OwningPtr<ObjectFile> Ret;
+ std::error_code EC;
+ std::unique_ptr<MachOObjectFile> Ret;
if (Magic == "\xFE\xED\xFA\xCE")
- Ret.reset(new MachOObjectFile(Buffer, false, false, ec));
+ Ret.reset(new MachOObjectFile(std::move(Buffer), false, false, EC));
else if (Magic == "\xCE\xFA\xED\xFE")
- Ret.reset(new MachOObjectFile(Buffer, true, false, ec));
+ Ret.reset(new MachOObjectFile(std::move(Buffer), true, false, EC));
else if (Magic == "\xFE\xED\xFA\xCF")
- Ret.reset(new MachOObjectFile(Buffer, false, true, ec));
+ Ret.reset(new MachOObjectFile(std::move(Buffer), false, true, EC));
else if (Magic == "\xCF\xFA\xED\xFE")
- Ret.reset(new MachOObjectFile(Buffer, true, true, ec));
- else {
- delete Buffer;
- return NULL;
- }
+ Ret.reset(new MachOObjectFile(std::move(Buffer), true, true, EC));
+ else
+ return object_error::parse_failed;
- if (ec)
- return NULL;
- return Ret.take();
+ if (EC)
+ return EC;
+ return Ret.release();
}
-} // end namespace object
-} // end namespace llvm
diff --git a/contrib/llvm/lib/Object/MachOUniversal.cpp b/contrib/llvm/lib/Object/MachOUniversal.cpp
index 75160af..4ba5d96 100644
--- a/contrib/llvm/lib/Object/MachOUniversal.cpp
+++ b/contrib/llvm/lib/Object/MachOUniversal.cpp
@@ -12,9 +12,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Object/MachOUniversal.h"
-
#include "llvm/Object/MachO.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/Archive.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -23,26 +23,21 @@ using namespace llvm;
using namespace object;
template<typename T>
-static void SwapValue(T &Value) {
- Value = sys::SwapByteOrder(Value);
-}
-
-template<typename T>
static void SwapStruct(T &Value);
template<>
void SwapStruct(MachO::fat_header &H) {
- SwapValue(H.magic);
- SwapValue(H.nfat_arch);
+ sys::swapByteOrder(H.magic);
+ sys::swapByteOrder(H.nfat_arch);
}
template<>
void SwapStruct(MachO::fat_arch &H) {
- SwapValue(H.cputype);
- SwapValue(H.cpusubtype);
- SwapValue(H.offset);
- SwapValue(H.size);
- SwapValue(H.align);
+ sys::swapByteOrder(H.cputype);
+ sys::swapByteOrder(H.cpusubtype);
+ sys::swapByteOrder(H.offset);
+ sys::swapByteOrder(H.size);
+ sys::swapByteOrder(H.align);
}
template<typename T>
@@ -58,7 +53,7 @@ static T getUniversalBinaryStruct(const char *Ptr) {
MachOUniversalBinary::ObjectForArch::ObjectForArch(
const MachOUniversalBinary *Parent, uint32_t Index)
: Parent(Parent), Index(Index) {
- if (Parent == 0 || Index > Parent->getNumberOfObjects()) {
+ if (!Parent || Index >= Parent->getNumberOfObjects()) {
clear();
} else {
// Parse object header.
@@ -72,31 +67,53 @@ MachOUniversalBinary::ObjectForArch::ObjectForArch(
}
}
-error_code MachOUniversalBinary::ObjectForArch::getAsObjectFile(
- OwningPtr<ObjectFile> &Result) const {
+ErrorOr<std::unique_ptr<ObjectFile>>
+MachOUniversalBinary::ObjectForArch::getAsObjectFile() const {
if (Parent) {
StringRef ParentData = Parent->getData();
StringRef ObjectData = ParentData.substr(Header.offset, Header.size);
- std::string ObjectName =
- Parent->getFileName().str() + ":" +
- Triple::getArchTypeName(MachOObjectFile::getArch(Header.cputype));
- MemoryBuffer *ObjBuffer = MemoryBuffer::getMemBuffer(
- ObjectData, ObjectName, false);
- if (ObjectFile *Obj = ObjectFile::createMachOObjectFile(ObjBuffer)) {
- Result.reset(Obj);
- return object_error::success;
- }
+ std::string ObjectName = Parent->getFileName().str();
+ std::unique_ptr<MemoryBuffer> ObjBuffer(
+ MemoryBuffer::getMemBuffer(ObjectData, ObjectName, false));
+ return ObjectFile::createMachOObjectFile(ObjBuffer);
+ }
+ return object_error::parse_failed;
+}
+
+std::error_code MachOUniversalBinary::ObjectForArch::getAsArchive(
+ std::unique_ptr<Archive> &Result) const {
+ if (Parent) {
+ StringRef ParentData = Parent->getData();
+ StringRef ObjectData = ParentData.substr(Header.offset, Header.size);
+ std::string ObjectName = Parent->getFileName().str();
+ std::unique_ptr<MemoryBuffer> ObjBuffer(
+ MemoryBuffer::getMemBuffer(ObjectData, ObjectName, false));
+ ErrorOr<Archive *> Obj = Archive::create(std::move(ObjBuffer));
+ if (std::error_code EC = Obj.getError())
+ return EC;
+ Result.reset(Obj.get());
+ return object_error::success;
}
return object_error::parse_failed;
}
void MachOUniversalBinary::anchor() { }
-MachOUniversalBinary::MachOUniversalBinary(MemoryBuffer *Source,
- error_code &ec)
- : Binary(Binary::ID_MachOUniversalBinary, Source),
- NumberOfObjects(0) {
- if (Source->getBufferSize() < sizeof(MachO::fat_header)) {
+ErrorOr<MachOUniversalBinary *>
+MachOUniversalBinary::create(std::unique_ptr<MemoryBuffer> Source) {
+ std::error_code EC;
+ std::unique_ptr<MachOUniversalBinary> Ret(
+ new MachOUniversalBinary(std::move(Source), EC));
+ if (EC)
+ return EC;
+ return Ret.release();
+}
+
+MachOUniversalBinary::MachOUniversalBinary(std::unique_ptr<MemoryBuffer> Source,
+ std::error_code &ec)
+ : Binary(Binary::ID_MachOUniversalBinary, std::move(Source)),
+ NumberOfObjects(0) {
+ if (Data->getBufferSize() < sizeof(MachO::fat_header)) {
ec = object_error::invalid_file_type;
return;
}
@@ -125,15 +142,14 @@ static bool getCTMForArch(Triple::ArchType Arch, MachO::CPUType &CTM) {
}
}
-error_code
-MachOUniversalBinary::getObjectForArch(Triple::ArchType Arch,
- OwningPtr<ObjectFile> &Result) const {
+ErrorOr<std::unique_ptr<ObjectFile>>
+MachOUniversalBinary::getObjectForArch(Triple::ArchType Arch) const {
MachO::CPUType CTM;
if (!getCTMForArch(Arch, CTM))
return object_error::arch_not_found;
for (object_iterator I = begin_objects(), E = end_objects(); I != E; ++I) {
if (I->getCPUType() == static_cast<uint32_t>(CTM))
- return I->getAsObjectFile(Result);
+ return I->getAsObjectFile();
}
return object_error::arch_not_found;
}
diff --git a/contrib/llvm/lib/Object/Object.cpp b/contrib/llvm/lib/Object/Object.cpp
index 6941708..567d87f 100644
--- a/contrib/llvm/lib/Object/Object.cpp
+++ b/contrib/llvm/lib/Object/Object.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Object/ObjectFile.h"
#include "llvm-c/Object.h"
+#include "llvm/Object/ObjectFile.h"
using namespace llvm;
using namespace object;
@@ -59,7 +59,11 @@ wrap(const relocation_iterator *SI) {
// ObjectFile creation
LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf) {
- return wrap(ObjectFile::createObjectFile(unwrap(MemBuf)));
+ std::unique_ptr<MemoryBuffer> Buf(unwrap(MemBuf));
+ ErrorOr<ObjectFile *> ObjOrErr(ObjectFile::createObjectFile(Buf));
+ Buf.release();
+ ObjectFile *Obj = ObjOrErr ? ObjOrErr.get() : nullptr;
+ return wrap(Obj);
}
void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile) {
@@ -68,7 +72,7 @@ void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile) {
// ObjectFile Section iterators
LLVMSectionIteratorRef LLVMGetSections(LLVMObjectFileRef ObjectFile) {
- section_iterator SI = unwrap(ObjectFile)->begin_sections();
+ section_iterator SI = unwrap(ObjectFile)->section_begin();
return wrap(new section_iterator(SI));
}
@@ -78,24 +82,22 @@ void LLVMDisposeSectionIterator(LLVMSectionIteratorRef SI) {
LLVMBool LLVMIsSectionIteratorAtEnd(LLVMObjectFileRef ObjectFile,
LLVMSectionIteratorRef SI) {
- return (*unwrap(SI) == unwrap(ObjectFile)->end_sections()) ? 1 : 0;
+ return (*unwrap(SI) == unwrap(ObjectFile)->section_end()) ? 1 : 0;
}
void LLVMMoveToNextSection(LLVMSectionIteratorRef SI) {
- error_code ec;
- unwrap(SI)->increment(ec);
- if (ec) report_fatal_error("LLVMMoveToNextSection failed: " + ec.message());
+ ++(*unwrap(SI));
}
void LLVMMoveToContainingSection(LLVMSectionIteratorRef Sect,
LLVMSymbolIteratorRef Sym) {
- if (error_code ec = (*unwrap(Sym))->getSection(*unwrap(Sect)))
+ if (std::error_code ec = (*unwrap(Sym))->getSection(*unwrap(Sect)))
report_fatal_error(ec.message());
}
// ObjectFile Symbol iterators
LLVMSymbolIteratorRef LLVMGetSymbols(LLVMObjectFileRef ObjectFile) {
- symbol_iterator SI = unwrap(ObjectFile)->begin_symbols();
+ symbol_iterator SI = unwrap(ObjectFile)->symbol_begin();
return wrap(new symbol_iterator(SI));
}
@@ -105,40 +107,38 @@ void LLVMDisposeSymbolIterator(LLVMSymbolIteratorRef SI) {
LLVMBool LLVMIsSymbolIteratorAtEnd(LLVMObjectFileRef ObjectFile,
LLVMSymbolIteratorRef SI) {
- return (*unwrap(SI) == unwrap(ObjectFile)->end_symbols()) ? 1 : 0;
+ return (*unwrap(SI) == unwrap(ObjectFile)->symbol_end()) ? 1 : 0;
}
void LLVMMoveToNextSymbol(LLVMSymbolIteratorRef SI) {
- error_code ec;
- unwrap(SI)->increment(ec);
- if (ec) report_fatal_error("LLVMMoveToNextSymbol failed: " + ec.message());
+ ++(*unwrap(SI));
}
// SectionRef accessors
const char *LLVMGetSectionName(LLVMSectionIteratorRef SI) {
StringRef ret;
- if (error_code ec = (*unwrap(SI))->getName(ret))
+ if (std::error_code ec = (*unwrap(SI))->getName(ret))
report_fatal_error(ec.message());
return ret.data();
}
uint64_t LLVMGetSectionSize(LLVMSectionIteratorRef SI) {
uint64_t ret;
- if (error_code ec = (*unwrap(SI))->getSize(ret))
+ if (std::error_code ec = (*unwrap(SI))->getSize(ret))
report_fatal_error(ec.message());
return ret;
}
const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI) {
StringRef ret;
- if (error_code ec = (*unwrap(SI))->getContents(ret))
+ if (std::error_code ec = (*unwrap(SI))->getContents(ret))
report_fatal_error(ec.message());
return ret.data();
}
uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI) {
uint64_t ret;
- if (error_code ec = (*unwrap(SI))->getAddress(ret))
+ if (std::error_code ec = (*unwrap(SI))->getAddress(ret))
report_fatal_error(ec.message());
return ret;
}
@@ -146,14 +146,14 @@ uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI) {
LLVMBool LLVMGetSectionContainsSymbol(LLVMSectionIteratorRef SI,
LLVMSymbolIteratorRef Sym) {
bool ret;
- if (error_code ec = (*unwrap(SI))->containsSymbol(**unwrap(Sym), ret))
+ if (std::error_code ec = (*unwrap(SI))->containsSymbol(**unwrap(Sym), ret))
report_fatal_error(ec.message());
return ret;
}
// Section Relocation iterators
LLVMRelocationIteratorRef LLVMGetRelocations(LLVMSectionIteratorRef Section) {
- relocation_iterator SI = (*unwrap(Section))->begin_relocations();
+ relocation_iterator SI = (*unwrap(Section))->relocation_begin();
return wrap(new relocation_iterator(SI));
}
@@ -163,42 +163,32 @@ void LLVMDisposeRelocationIterator(LLVMRelocationIteratorRef SI) {
LLVMBool LLVMIsRelocationIteratorAtEnd(LLVMSectionIteratorRef Section,
LLVMRelocationIteratorRef SI) {
- return (*unwrap(SI) == (*unwrap(Section))->end_relocations()) ? 1 : 0;
+ return (*unwrap(SI) == (*unwrap(Section))->relocation_end()) ? 1 : 0;
}
void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef SI) {
- error_code ec;
- unwrap(SI)->increment(ec);
- if (ec) report_fatal_error("LLVMMoveToNextRelocation failed: " +
- ec.message());
+ ++(*unwrap(SI));
}
// SymbolRef accessors
const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI) {
StringRef ret;
- if (error_code ec = (*unwrap(SI))->getName(ret))
+ if (std::error_code ec = (*unwrap(SI))->getName(ret))
report_fatal_error(ec.message());
return ret.data();
}
uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI) {
uint64_t ret;
- if (error_code ec = (*unwrap(SI))->getAddress(ret))
- report_fatal_error(ec.message());
- return ret;
-}
-
-uint64_t LLVMGetSymbolFileOffset(LLVMSymbolIteratorRef SI) {
- uint64_t ret;
- if (error_code ec = (*unwrap(SI))->getFileOffset(ret))
+ if (std::error_code ec = (*unwrap(SI))->getAddress(ret))
report_fatal_error(ec.message());
return ret;
}
uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI) {
uint64_t ret;
- if (error_code ec = (*unwrap(SI))->getSize(ret))
+ if (std::error_code ec = (*unwrap(SI))->getSize(ret))
report_fatal_error(ec.message());
return ret;
}
@@ -206,14 +196,14 @@ uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI) {
// RelocationRef accessors
uint64_t LLVMGetRelocationAddress(LLVMRelocationIteratorRef RI) {
uint64_t ret;
- if (error_code ec = (*unwrap(RI))->getAddress(ret))
+ if (std::error_code ec = (*unwrap(RI))->getAddress(ret))
report_fatal_error(ec.message());
return ret;
}
uint64_t LLVMGetRelocationOffset(LLVMRelocationIteratorRef RI) {
uint64_t ret;
- if (error_code ec = (*unwrap(RI))->getOffset(ret))
+ if (std::error_code ec = (*unwrap(RI))->getOffset(ret))
report_fatal_error(ec.message());
return ret;
}
@@ -225,7 +215,7 @@ LLVMSymbolIteratorRef LLVMGetRelocationSymbol(LLVMRelocationIteratorRef RI) {
uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI) {
uint64_t ret;
- if (error_code ec = (*unwrap(RI))->getType(ret))
+ if (std::error_code ec = (*unwrap(RI))->getType(ret))
report_fatal_error(ec.message());
return ret;
}
@@ -233,7 +223,7 @@ uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI) {
// NOTE: Caller takes ownership of returned string.
const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI) {
SmallVector<char, 0> ret;
- if (error_code ec = (*unwrap(RI))->getTypeName(ret))
+ if (std::error_code ec = (*unwrap(RI))->getTypeName(ret))
report_fatal_error(ec.message());
char *str = static_cast<char*>(malloc(ret.size()));
@@ -244,7 +234,7 @@ const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI) {
// NOTE: Caller takes ownership of returned string.
const char *LLVMGetRelocationValueString(LLVMRelocationIteratorRef RI) {
SmallVector<char, 0> ret;
- if (error_code ec = (*unwrap(RI))->getValueString(ret))
+ if (std::error_code ec = (*unwrap(RI))->getValueString(ret))
report_fatal_error(ec.message());
char *str = static_cast<char*>(malloc(ret.size()));
diff --git a/contrib/llvm/lib/Object/ObjectFile.cpp b/contrib/llvm/lib/Object/ObjectFile.cpp
index 0e626d6..f5488c6 100644
--- a/contrib/llvm/lib/Object/ObjectFile.cpp
+++ b/contrib/llvm/lib/Object/ObjectFile.cpp
@@ -12,23 +12,31 @@
//===----------------------------------------------------------------------===//
#include "llvm/Object/ObjectFile.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/system_error.h"
+#include "llvm/Support/raw_ostream.h"
+#include <system_error>
using namespace llvm;
using namespace object;
void ObjectFile::anchor() { }
-ObjectFile::ObjectFile(unsigned int Type, MemoryBuffer *source)
- : Binary(Type, source) {
+ObjectFile::ObjectFile(unsigned int Type, std::unique_ptr<MemoryBuffer> Source)
+ : SymbolicFile(Type, std::move(Source)) {}
+
+std::error_code ObjectFile::printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const {
+ StringRef Name;
+ if (std::error_code EC = getSymbolName(Symb, Name))
+ return EC;
+ OS << Name;
+ return object_error::success;
}
-error_code ObjectFile::getSymbolAlignment(DataRefImpl DRI,
- uint32_t &Result) const {
+std::error_code ObjectFile::getSymbolAlignment(DataRefImpl DRI,
+ uint32_t &Result) const {
Result = 0;
return object_error::success;
}
@@ -37,21 +45,19 @@ section_iterator ObjectFile::getRelocatedSection(DataRefImpl Sec) const {
return section_iterator(SectionRef(Sec, this));
}
-ObjectFile *ObjectFile::createObjectFile(MemoryBuffer *Object) {
- if (Object->getBufferSize() < 64) {
- delete Object;
- return 0;
- }
+ErrorOr<ObjectFile *>
+ObjectFile::createObjectFile(std::unique_ptr<MemoryBuffer> &Object,
+ sys::fs::file_magic Type) {
+ if (Type == sys::fs::file_magic::unknown)
+ Type = sys::fs::identify_magic(Object->getBuffer());
- sys::fs::file_magic Type = sys::fs::identify_magic(Object->getBuffer());
switch (Type) {
case sys::fs::file_magic::unknown:
case sys::fs::file_magic::bitcode:
case sys::fs::file_magic::archive:
case sys::fs::file_magic::macho_universal_binary:
case sys::fs::file_magic::windows_resource:
- delete Object;
- return 0;
+ return object_error::invalid_file_type;
case sys::fs::file_magic::elf_relocatable:
case sys::fs::file_magic::elf_executable:
case sys::fs::file_magic::elf_shared_object:
@@ -71,14 +77,15 @@ ObjectFile *ObjectFile::createObjectFile(MemoryBuffer *Object) {
case sys::fs::file_magic::coff_object:
case sys::fs::file_magic::coff_import_library:
case sys::fs::file_magic::pecoff_executable:
- return createCOFFObjectFile(Object);
+ return createCOFFObjectFile(std::move(Object));
}
llvm_unreachable("Unexpected Object File Type");
}
-ObjectFile *ObjectFile::createObjectFile(StringRef ObjectPath) {
- OwningPtr<MemoryBuffer> File;
- if (MemoryBuffer::getFile(ObjectPath, File))
- return NULL;
- return createObjectFile(File.take());
+ErrorOr<ObjectFile *> ObjectFile::createObjectFile(StringRef ObjectPath) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFile(ObjectPath);
+ if (std::error_code EC = FileOrErr.getError())
+ return EC;
+ return createObjectFile(FileOrErr.get());
}
diff --git a/contrib/llvm/lib/Object/RecordStreamer.cpp b/contrib/llvm/lib/Object/RecordStreamer.cpp
new file mode 100644
index 0000000..081fadd
--- /dev/null
+++ b/contrib/llvm/lib/Object/RecordStreamer.cpp
@@ -0,0 +1,100 @@
+//===-- RecordStreamer.cpp - Record asm definde and used symbols ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RecordStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+using namespace llvm;
+
+void RecordStreamer::markDefined(const MCSymbol &Symbol) {
+ State &S = Symbols[Symbol.getName()];
+ switch (S) {
+ case DefinedGlobal:
+ case Global:
+ S = DefinedGlobal;
+ break;
+ case NeverSeen:
+ case Defined:
+ case Used:
+ S = Defined;
+ break;
+ }
+}
+
+void RecordStreamer::markGlobal(const MCSymbol &Symbol) {
+ State &S = Symbols[Symbol.getName()];
+ switch (S) {
+ case DefinedGlobal:
+ case Defined:
+ S = DefinedGlobal;
+ break;
+
+ case NeverSeen:
+ case Global:
+ case Used:
+ S = Global;
+ break;
+ }
+}
+
+void RecordStreamer::markUsed(const MCSymbol &Symbol) {
+ State &S = Symbols[Symbol.getName()];
+ switch (S) {
+ case DefinedGlobal:
+ case Defined:
+ case Global:
+ break;
+
+ case NeverSeen:
+ case Used:
+ S = Used;
+ break;
+ }
+}
+
+void RecordStreamer::visitUsedSymbol(const MCSymbol &Sym) { markUsed(Sym); }
+
+RecordStreamer::const_iterator RecordStreamer::begin() {
+ return Symbols.begin();
+}
+
+RecordStreamer::const_iterator RecordStreamer::end() { return Symbols.end(); }
+
+RecordStreamer::RecordStreamer(MCContext &Context) : MCStreamer(Context) {}
+
+void RecordStreamer::EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
+ MCStreamer::EmitInstruction(Inst, STI);
+}
+
+void RecordStreamer::EmitLabel(MCSymbol *Symbol) {
+ MCStreamer::EmitLabel(Symbol);
+ markDefined(*Symbol);
+}
+
+void RecordStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ markDefined(*Symbol);
+ MCStreamer::EmitAssignment(Symbol, Value);
+}
+
+bool RecordStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) {
+ if (Attribute == MCSA_Global)
+ markGlobal(*Symbol);
+ return true;
+}
+
+void RecordStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {
+ markDefined(*Symbol);
+}
+
+void RecordStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ markDefined(*Symbol);
+}
diff --git a/contrib/llvm/lib/Object/RecordStreamer.h b/contrib/llvm/lib/Object/RecordStreamer.h
new file mode 100644
index 0000000..10e70ef
--- /dev/null
+++ b/contrib/llvm/lib/Object/RecordStreamer.h
@@ -0,0 +1,42 @@
+//===-- RecordStreamer.h - Record asm defined and used symbols ---*- C++ -*===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_RECORD_STREAMER
+#define LLVM_OBJECT_RECORD_STREAMER
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class RecordStreamer : public MCStreamer {
+public:
+ enum State { NeverSeen, Global, Defined, DefinedGlobal, Used };
+
+private:
+ StringMap<State> Symbols;
+ void markDefined(const MCSymbol &Symbol);
+ void markGlobal(const MCSymbol &Symbol);
+ void markUsed(const MCSymbol &Symbol);
+ void visitUsedSymbol(const MCSymbol &Sym) override;
+
+public:
+ typedef StringMap<State>::const_iterator const_iterator;
+ const_iterator begin();
+ const_iterator end();
+ RecordStreamer(MCContext &Context);
+ void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
+ void EmitLabel(MCSymbol *Symbol) override;
+ void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
+ bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+ void EmitZerofill(const MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+};
+}
+#endif
diff --git a/contrib/llvm/lib/Object/SymbolicFile.cpp b/contrib/llvm/lib/Object/SymbolicFile.cpp
new file mode 100644
index 0000000..30cf1a0
--- /dev/null
+++ b/contrib/llvm/lib/Object/SymbolicFile.cpp
@@ -0,0 +1,65 @@
+//===- SymbolicFile.cpp - Interface that only provides symbols --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a file format independent SymbolicFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+using namespace object;
+
+SymbolicFile::SymbolicFile(unsigned int Type,
+ std::unique_ptr<MemoryBuffer> Source)
+ : Binary(Type, std::move(Source)) {}
+
+SymbolicFile::~SymbolicFile() {}
+
+ErrorOr<SymbolicFile *>
+SymbolicFile::createSymbolicFile(std::unique_ptr<MemoryBuffer> &Object,
+ sys::fs::file_magic Type,
+ LLVMContext *Context) {
+ if (Type == sys::fs::file_magic::unknown)
+ Type = sys::fs::identify_magic(Object->getBuffer());
+
+ switch (Type) {
+ case sys::fs::file_magic::bitcode:
+ if (Context)
+ return IRObjectFile::createIRObjectFile(std::move(Object), *Context);
+ // Fallthrough
+ case sys::fs::file_magic::unknown:
+ case sys::fs::file_magic::archive:
+ case sys::fs::file_magic::macho_universal_binary:
+ case sys::fs::file_magic::windows_resource:
+ return object_error::invalid_file_type;
+ case sys::fs::file_magic::elf_relocatable:
+ case sys::fs::file_magic::elf_executable:
+ case sys::fs::file_magic::elf_shared_object:
+ case sys::fs::file_magic::elf_core:
+ case sys::fs::file_magic::macho_object:
+ case sys::fs::file_magic::macho_executable:
+ case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib:
+ case sys::fs::file_magic::macho_core:
+ case sys::fs::file_magic::macho_preload_executable:
+ case sys::fs::file_magic::macho_dynamically_linked_shared_lib:
+ case sys::fs::file_magic::macho_dynamic_linker:
+ case sys::fs::file_magic::macho_bundle:
+ case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub:
+ case sys::fs::file_magic::macho_dsym_companion:
+ case sys::fs::file_magic::coff_object:
+ case sys::fs::file_magic::coff_import_library:
+ case sys::fs::file_magic::pecoff_executable:
+ return ObjectFile::createObjectFile(Object, Type);
+ }
+ llvm_unreachable("Unexpected Binary File Type");
+}
diff --git a/contrib/llvm/lib/Option/ArgList.cpp b/contrib/llvm/lib/Option/ArgList.cpp
index 15f7e8b..5848bb1 100644
--- a/contrib/llvm/lib/Option/ArgList.cpp
+++ b/contrib/llvm/lib/Option/ArgList.cpp
@@ -9,6 +9,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/Option.h"
@@ -32,11 +33,6 @@ void arg_iterator::SkipToNextArg() {
}
}
-//
-
-ArgList::ArgList() {
-}
-
ArgList::~ArgList() {
}
@@ -45,14 +41,9 @@ void ArgList::append(Arg *A) {
}
void ArgList::eraseArg(OptSpecifier Id) {
- for (iterator it = begin(), ie = end(); it != ie; ) {
- if ((*it)->getOption().matches(Id)) {
- it = Args.erase(it);
- ie = end();
- } else {
- ++it;
- }
- }
+ Args.erase(std::remove_if(begin(), end(),
+ [=](Arg *A) { return A->getOption().matches(Id); }),
+ end());
}
Arg *ArgList::getLastArgNoClaim(OptSpecifier Id) const {
@@ -60,11 +51,11 @@ Arg *ArgList::getLastArgNoClaim(OptSpecifier Id) const {
for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it)
if ((*it)->getOption().matches(Id))
return *it;
- return 0;
+ return nullptr;
}
Arg *ArgList::getLastArg(OptSpecifier Id) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id)) {
Res = *it;
@@ -76,7 +67,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id) const {
}
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1)) {
@@ -91,7 +82,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const {
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
@@ -106,7 +97,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2, OptSpecifier Id3) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
@@ -123,7 +114,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2, OptSpecifier Id3,
OptSpecifier Id4) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
@@ -141,7 +132,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2, OptSpecifier Id3,
OptSpecifier Id4, OptSpecifier Id5) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
@@ -161,7 +152,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2, OptSpecifier Id3,
OptSpecifier Id4, OptSpecifier Id5,
OptSpecifier Id6) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
@@ -182,7 +173,7 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2, OptSpecifier Id3,
OptSpecifier Id4, OptSpecifier Id5,
OptSpecifier Id6, OptSpecifier Id7) const {
- Arg *Res = 0;
+ Arg *Res = nullptr;
for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
@@ -243,44 +234,40 @@ void ArgList::AddLastArg(ArgStringList &Output, OptSpecifier Id0,
void ArgList::AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
OptSpecifier Id1, OptSpecifier Id2) const {
- for (arg_iterator it = filtered_begin(Id0, Id1, Id2),
- ie = filtered_end(); it != ie; ++it) {
- (*it)->claim();
- (*it)->render(*this, Output);
+ for (auto Arg: filtered(Id0, Id1, Id2)) {
+ Arg->claim();
+ Arg->render(*this, Output);
}
}
void ArgList::AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
OptSpecifier Id1, OptSpecifier Id2) const {
- for (arg_iterator it = filtered_begin(Id0, Id1, Id2),
- ie = filtered_end(); it != ie; ++it) {
- (*it)->claim();
- for (unsigned i = 0, e = (*it)->getNumValues(); i != e; ++i)
- Output.push_back((*it)->getValue(i));
+ for (auto Arg : filtered(Id0, Id1, Id2)) {
+ Arg->claim();
+ for (unsigned i = 0, e = Arg->getNumValues(); i != e; ++i)
+ Output.push_back(Arg->getValue(i));
}
}
void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
const char *Translation,
bool Joined) const {
- for (arg_iterator it = filtered_begin(Id0),
- ie = filtered_end(); it != ie; ++it) {
- (*it)->claim();
+ for (auto Arg: filtered(Id0)) {
+ Arg->claim();
if (Joined) {
Output.push_back(MakeArgString(StringRef(Translation) +
- (*it)->getValue(0)));
+ Arg->getValue(0)));
} else {
Output.push_back(Translation);
- Output.push_back((*it)->getValue(0));
+ Output.push_back(Arg->getValue(0));
}
}
}
void ArgList::ClaimAllArgs(OptSpecifier Id0) const {
- for (arg_iterator it = filtered_begin(Id0),
- ie = filtered_end(); it != ie; ++it)
- (*it)->claim();
+ for (auto Arg : filtered(Id0))
+ Arg->claim();
}
void ArgList::ClaimAllArgs() const {
@@ -291,8 +278,7 @@ void ArgList::ClaimAllArgs() const {
const char *ArgList::MakeArgString(const Twine &T) const {
SmallString<256> Str;
- T.toVector(Str);
- return MakeArgString(Str.str());
+ return MakeArgString(T.toStringRef(Str));
}
const char *ArgList::GetOrMakeJoinedArgString(unsigned Index,
@@ -349,52 +335,46 @@ DerivedArgList::DerivedArgList(const InputArgList &_BaseArgs)
: BaseArgs(_BaseArgs) {
}
-DerivedArgList::~DerivedArgList() {
- // We only own the arguments we explicitly synthesized.
- for (iterator it = SynthesizedArgs.begin(), ie = SynthesizedArgs.end();
- it != ie; ++it)
- delete *it;
-}
+DerivedArgList::~DerivedArgList() {}
const char *DerivedArgList::MakeArgString(StringRef Str) const {
return BaseArgs.MakeArgString(Str);
}
+void DerivedArgList::AddSynthesizedArg(Arg *A) {
+ SynthesizedArgs.push_back(std::unique_ptr<Arg>(A));
+}
+
Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option Opt) const {
- Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
- Twine(Opt.getName())),
- BaseArgs.MakeIndex(Opt.getName()), BaseArg);
- SynthesizedArgs.push_back(A);
- return A;
+ SynthesizedArgs.push_back(
+ make_unique<Arg>(Opt, MakeArgString(Opt.getPrefix() + Opt.getName()),
+ BaseArgs.MakeIndex(Opt.getName()), BaseArg));
+ return SynthesizedArgs.back().get();
}
Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const {
unsigned Index = BaseArgs.MakeIndex(Value);
- Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
- Twine(Opt.getName())),
- Index, BaseArgs.getArgString(Index), BaseArg);
- SynthesizedArgs.push_back(A);
- return A;
+ SynthesizedArgs.push_back(
+ make_unique<Arg>(Opt, MakeArgString(Opt.getPrefix() + Opt.getName()),
+ Index, BaseArgs.getArgString(Index), BaseArg));
+ return SynthesizedArgs.back().get();
}
Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const {
unsigned Index = BaseArgs.MakeIndex(Opt.getName(), Value);
- Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
- Twine(Opt.getName())),
- Index, BaseArgs.getArgString(Index + 1), BaseArg);
- SynthesizedArgs.push_back(A);
- return A;
+ SynthesizedArgs.push_back(
+ make_unique<Arg>(Opt, MakeArgString(Opt.getPrefix() + Opt.getName()),
+ Index, BaseArgs.getArgString(Index + 1), BaseArg));
+ return SynthesizedArgs.back().get();
}
Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option Opt,
StringRef Value) const {
unsigned Index = BaseArgs.MakeIndex(Opt.getName().str() + Value.str());
- Arg *A = new Arg(Opt, ArgList::MakeArgString(Twine(Opt.getPrefix()) +
- Twine(Opt.getName())), Index,
- BaseArgs.getArgString(Index) + Opt.getName().size(),
- BaseArg);
- SynthesizedArgs.push_back(A);
- return A;
+ SynthesizedArgs.push_back(make_unique<Arg>(
+ Opt, MakeArgString(Opt.getPrefix() + Opt.getName()), Index,
+ BaseArgs.getArgString(Index) + Opt.getName().size(), BaseArg));
+ return SynthesizedArgs.back().get();
}
diff --git a/contrib/llvm/lib/Option/OptTable.cpp b/contrib/llvm/lib/Option/OptTable.cpp
index 6fa459a..6842f4d 100644
--- a/contrib/llvm/lib/Option/OptTable.cpp
+++ b/contrib/llvm/lib/Option/OptTable.cpp
@@ -62,7 +62,7 @@ static inline bool operator<(const OptTable::Info &A, const OptTable::Info &B) {
for (const char * const *APre = A.Prefixes,
* const *BPre = B.Prefixes;
- *APre != 0 && *BPre != 0; ++APre, ++BPre) {
+ *APre != nullptr && *BPre != nullptr; ++APre, ++BPre){
if (int N = StrCmpOptionName(*APre, *BPre))
return N < 0;
}
@@ -136,7 +136,7 @@ OptTable::OptTable(const Info *_OptionInfos, unsigned _NumOptionInfos,
for (unsigned i = FirstSearchableIndex + 1, e = getNumOptions() + 1;
i != e; ++i) {
if (const char *const *P = getInfo(i).Prefixes) {
- for (; *P != 0; ++P) {
+ for (; *P != nullptr; ++P) {
PrefixesUnion.insert(*P);
}
}
@@ -160,7 +160,7 @@ OptTable::~OptTable() {
const Option OptTable::getOption(OptSpecifier Opt) const {
unsigned id = Opt.getID();
if (id == 0)
- return Option(0, 0);
+ return Option(nullptr, nullptr);
assert((unsigned) (id - 1) < getNumOptions() && "Invalid ID.");
return Option(&getInfo(id), this);
}
@@ -178,7 +178,7 @@ static bool isInput(const llvm::StringSet<> &Prefixes, StringRef Arg) {
/// \returns Matched size. 0 means no match.
static unsigned matchOption(const OptTable::Info *I, StringRef Str,
bool IgnoreCase) {
- for (const char * const *Pre = I->Prefixes; *Pre != 0; ++Pre) {
+ for (const char * const *Pre = I->Prefixes; *Pre != nullptr; ++Pre) {
StringRef Prefix(*Pre);
if (Str.startswith(Prefix)) {
StringRef Rest = Str.substr(Prefix.size());
@@ -240,7 +240,7 @@ Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index,
// Otherwise, see if this argument was missing values.
if (Prev != Index)
- return 0;
+ return nullptr;
}
// If we failed to find an option and this arg started with /, then it's
diff --git a/contrib/llvm/lib/Option/Option.cpp b/contrib/llvm/lib/Option/Option.cpp
index 7b5ff2b..10662a3 100644
--- a/contrib/llvm/lib/Option/Option.cpp
+++ b/contrib/llvm/lib/Option/Option.cpp
@@ -58,8 +58,8 @@ void Option::dump() const {
if (Info->Prefixes) {
llvm::errs() << " Prefixes:[";
- for (const char * const *Pre = Info->Prefixes; *Pre != 0; ++Pre) {
- llvm::errs() << '"' << *Pre << (*(Pre + 1) == 0 ? "\"" : "\", ");
+ for (const char * const *Pre = Info->Prefixes; *Pre != nullptr; ++Pre) {
+ llvm::errs() << '"' << *Pre << (*(Pre + 1) == nullptr ? "\"" : "\", ");
}
llvm::errs() << ']';
}
@@ -116,7 +116,7 @@ Arg *Option::accept(const ArgList &Args,
switch (getKind()) {
case FlagClass: {
if (ArgSize != strlen(Args.getArgString(Index)))
- return 0;
+ return nullptr;
Arg *A = new Arg(UnaliasedOption, Spelling, Index++);
if (getAliasArgs()) {
@@ -166,11 +166,11 @@ Arg *Option::accept(const ArgList &Args,
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
if (ArgSize != strlen(Args.getArgString(Index)))
- return 0;
+ return nullptr;
Index += 2;
if (Index > Args.getNumInputArgStrings())
- return 0;
+ return nullptr;
return new Arg(UnaliasedOption, Spelling,
Index - 2, Args.getArgString(Index - 1));
@@ -178,11 +178,11 @@ Arg *Option::accept(const ArgList &Args,
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
if (ArgSize != strlen(Args.getArgString(Index)))
- return 0;
+ return nullptr;
Index += 1 + getNumArgs();
if (Index > Args.getNumInputArgStrings())
- return 0;
+ return nullptr;
Arg *A = new Arg(UnaliasedOption, Spelling, Index - 1 - getNumArgs(),
Args.getArgString(Index - getNumArgs()));
@@ -201,7 +201,7 @@ Arg *Option::accept(const ArgList &Args,
// Otherwise it must be separate.
Index += 2;
if (Index > Args.getNumInputArgStrings())
- return 0;
+ return nullptr;
return new Arg(UnaliasedOption, Spelling,
Index - 2, Args.getArgString(Index - 1));
@@ -210,7 +210,7 @@ Arg *Option::accept(const ArgList &Args,
// Always matches.
Index += 2;
if (Index > Args.getNumInputArgStrings())
- return 0;
+ return nullptr;
return new Arg(UnaliasedOption, Spelling, Index - 2,
Args.getArgString(Index - 2) + ArgSize,
@@ -219,7 +219,7 @@ Arg *Option::accept(const ArgList &Args,
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
if (ArgSize != strlen(Args.getArgString(Index)))
- return 0;
+ return nullptr;
Arg *A = new Arg(UnaliasedOption, Spelling, Index++);
while (Index < Args.getNumInputArgStrings())
A->getValues().push_back(Args.getArgString(Index++));
diff --git a/contrib/llvm/lib/ProfileData/CMakeLists.txt b/contrib/llvm/lib/ProfileData/CMakeLists.txt
new file mode 100644
index 0000000..aefb16c
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_llvm_library(LLVMProfileData
+ InstrProf.cpp
+ InstrProfReader.cpp
+ InstrProfWriter.cpp
+ )
diff --git a/contrib/llvm/lib/ProfileData/InstrProf.cpp b/contrib/llvm/lib/ProfileData/InstrProf.cpp
new file mode 100644
index 0000000..0121222
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/InstrProf.cpp
@@ -0,0 +1,61 @@
+//=-- InstrProf.cpp - Instrumented profiling format support -----------------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for clang's instrumentation based PGO and
+// coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+class InstrProfErrorCategoryType : public std::error_category {
+ const char *name() const LLVM_NOEXCEPT override { return "llvm.instrprof"; }
+ std::string message(int IE) const override {
+ instrprof_error E = static_cast<instrprof_error>(IE);
+ switch (E) {
+ case instrprof_error::success:
+ return "Success";
+ case instrprof_error::eof:
+ return "End of File";
+ case instrprof_error::bad_magic:
+ return "Invalid file format (bad magic)";
+ case instrprof_error::bad_header:
+ return "Invalid header";
+ case instrprof_error::unsupported_version:
+ return "Unsupported format version";
+ case instrprof_error::unsupported_hash_type:
+ return "Unsupported hash function";
+ case instrprof_error::too_large:
+ return "Too much profile data";
+ case instrprof_error::truncated:
+ return "Truncated profile data";
+ case instrprof_error::malformed:
+ return "Malformed profile data";
+ case instrprof_error::unknown_function:
+ return "No profile data available for function";
+ case instrprof_error::hash_mismatch:
+ return "Function hash mismatch";
+ case instrprof_error::count_mismatch:
+ return "Function count mismatch";
+ case instrprof_error::counter_overflow:
+ return "Counter overflow";
+ }
+ llvm_unreachable("A value of instrprof_error has no message.");
+ }
+};
+}
+
+const std::error_category &llvm::instrprof_category() {
+ static InstrProfErrorCategoryType C;
+ return C;
+}
diff --git a/contrib/llvm/lib/ProfileData/InstrProfIndexed.h b/contrib/llvm/lib/ProfileData/InstrProfIndexed.h
new file mode 100644
index 0000000..7761704
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/InstrProfIndexed.h
@@ -0,0 +1,55 @@
+//=-- InstrProfIndexed.h - Indexed profiling format support -------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared header for the instrumented profile data reader and writer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROF_INDEXED_H_
+#define LLVM_PROFILEDATA_INSTRPROF_INDEXED_H_
+
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MD5.h"
+
+namespace llvm {
+
+namespace IndexedInstrProf {
+enum class HashT : uint32_t {
+ MD5,
+
+ Last = MD5
+};
+
+static inline uint64_t MD5Hash(StringRef Str) {
+ MD5 Hash;
+ Hash.update(Str);
+ llvm::MD5::MD5Result Result;
+ Hash.final(Result);
+ // Return the least significant 8 bytes. Our MD5 implementation returns the
+ // result in little endian, so we may need to swap bytes.
+ using namespace llvm::support;
+ return endian::read<uint64_t, little, unaligned>(Result);
+}
+
+static inline uint64_t ComputeHash(HashT Type, StringRef K) {
+ switch (Type) {
+ case HashT::MD5:
+ return IndexedInstrProf::MD5Hash(K);
+ }
+ llvm_unreachable("Unhandled hash type");
+}
+
+const uint64_t Magic = 0x8169666f72706cff; // "\xfflprofi\x81"
+const uint64_t Version = 1;
+const HashT HashType = HashT::MD5;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_INSTRPROF_INDEXED_H_
diff --git a/contrib/llvm/lib/ProfileData/InstrProfReader.cpp b/contrib/llvm/lib/ProfileData/InstrProfReader.cpp
new file mode 100644
index 0000000..0b36728
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/InstrProfReader.cpp
@@ -0,0 +1,360 @@
+//=-- InstrProfReader.cpp - Instrumented profiling reader -------------------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for reading profiling data for clang's
+// instrumentation based PGO and coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/ProfileData/InstrProf.h"
+
+#include "InstrProfIndexed.h"
+
+#include <cassert>
+
+using namespace llvm;
+
+static std::error_code
+setupMemoryBuffer(std::string Path, std::unique_ptr<MemoryBuffer> &Buffer) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFileOrSTDIN(Path);
+ if (std::error_code EC = BufferOrErr.getError())
+ return EC;
+ Buffer = std::move(BufferOrErr.get());
+
+ // Sanity check the file.
+ if (Buffer->getBufferSize() > std::numeric_limits<unsigned>::max())
+ return instrprof_error::too_large;
+ return instrprof_error::success;
+}
+
+static std::error_code initializeReader(InstrProfReader &Reader) {
+ return Reader.readHeader();
+}
+
+std::error_code
+InstrProfReader::create(std::string Path,
+ std::unique_ptr<InstrProfReader> &Result) {
+ // Set up the buffer to read.
+ std::unique_ptr<MemoryBuffer> Buffer;
+ if (std::error_code EC = setupMemoryBuffer(Path, Buffer))
+ return EC;
+
+ // Create the reader.
+ if (IndexedInstrProfReader::hasFormat(*Buffer))
+ Result.reset(new IndexedInstrProfReader(std::move(Buffer)));
+ else if (RawInstrProfReader64::hasFormat(*Buffer))
+ Result.reset(new RawInstrProfReader64(std::move(Buffer)));
+ else if (RawInstrProfReader32::hasFormat(*Buffer))
+ Result.reset(new RawInstrProfReader32(std::move(Buffer)));
+ else
+ Result.reset(new TextInstrProfReader(std::move(Buffer)));
+
+ // Initialize the reader and return the result.
+ return initializeReader(*Result);
+}
+
+std::error_code IndexedInstrProfReader::create(
+ std::string Path, std::unique_ptr<IndexedInstrProfReader> &Result) {
+ // Set up the buffer to read.
+ std::unique_ptr<MemoryBuffer> Buffer;
+ if (std::error_code EC = setupMemoryBuffer(Path, Buffer))
+ return EC;
+
+ // Create the reader.
+ if (!IndexedInstrProfReader::hasFormat(*Buffer))
+ return instrprof_error::bad_magic;
+ Result.reset(new IndexedInstrProfReader(std::move(Buffer)));
+
+ // Initialize the reader and return the result.
+ return initializeReader(*Result);
+}
+
+void InstrProfIterator::Increment() {
+ if (Reader->readNextRecord(Record))
+ *this = InstrProfIterator();
+}
+
+std::error_code TextInstrProfReader::readNextRecord(InstrProfRecord &Record) {
+ // Skip empty lines.
+ while (!Line.is_at_end() && Line->empty())
+ ++Line;
+ // If we hit EOF while looking for a name, we're done.
+ if (Line.is_at_end())
+ return error(instrprof_error::eof);
+
+ // Read the function name.
+ Record.Name = *Line++;
+
+ // Read the function hash.
+ if (Line.is_at_end())
+ return error(instrprof_error::truncated);
+ if ((Line++)->getAsInteger(10, Record.Hash))
+ return error(instrprof_error::malformed);
+
+ // Read the number of counters.
+ uint64_t NumCounters;
+ if (Line.is_at_end())
+ return error(instrprof_error::truncated);
+ if ((Line++)->getAsInteger(10, NumCounters))
+ return error(instrprof_error::malformed);
+ if (NumCounters == 0)
+ return error(instrprof_error::malformed);
+
+ // Read each counter and fill our internal storage with the values.
+ Counts.clear();
+ Counts.reserve(NumCounters);
+ for (uint64_t I = 0; I < NumCounters; ++I) {
+ if (Line.is_at_end())
+ return error(instrprof_error::truncated);
+ uint64_t Count;
+ if ((Line++)->getAsInteger(10, Count))
+ return error(instrprof_error::malformed);
+ Counts.push_back(Count);
+ }
+ // Give the record a reference to our internal counter storage.
+ Record.Counts = Counts;
+
+ return success();
+}
+
+template <class IntPtrT>
+static uint64_t getRawMagic();
+
+template <>
+uint64_t getRawMagic<uint64_t>() {
+ return
+ uint64_t(255) << 56 |
+ uint64_t('l') << 48 |
+ uint64_t('p') << 40 |
+ uint64_t('r') << 32 |
+ uint64_t('o') << 24 |
+ uint64_t('f') << 16 |
+ uint64_t('r') << 8 |
+ uint64_t(129);
+}
+
+template <>
+uint64_t getRawMagic<uint32_t>() {
+ return
+ uint64_t(255) << 56 |
+ uint64_t('l') << 48 |
+ uint64_t('p') << 40 |
+ uint64_t('r') << 32 |
+ uint64_t('o') << 24 |
+ uint64_t('f') << 16 |
+ uint64_t('R') << 8 |
+ uint64_t(129);
+}
+
+template <class IntPtrT>
+bool RawInstrProfReader<IntPtrT>::hasFormat(const MemoryBuffer &DataBuffer) {
+ if (DataBuffer.getBufferSize() < sizeof(uint64_t))
+ return false;
+ uint64_t Magic =
+ *reinterpret_cast<const uint64_t *>(DataBuffer.getBufferStart());
+ return getRawMagic<IntPtrT>() == Magic ||
+ sys::getSwappedBytes(getRawMagic<IntPtrT>()) == Magic;
+}
+
+template <class IntPtrT>
+std::error_code RawInstrProfReader<IntPtrT>::readHeader() {
+ if (!hasFormat(*DataBuffer))
+ return error(instrprof_error::bad_magic);
+ if (DataBuffer->getBufferSize() < sizeof(RawHeader))
+ return error(instrprof_error::bad_header);
+ auto *Header =
+ reinterpret_cast<const RawHeader *>(DataBuffer->getBufferStart());
+ ShouldSwapBytes = Header->Magic != getRawMagic<IntPtrT>();
+ return readHeader(*Header);
+}
+
+template <class IntPtrT>
+std::error_code
+RawInstrProfReader<IntPtrT>::readNextHeader(const char *CurrentPos) {
+ const char *End = DataBuffer->getBufferEnd();
+ // Skip zero padding between profiles.
+ while (CurrentPos != End && *CurrentPos == 0)
+ ++CurrentPos;
+ // If there's nothing left, we're done.
+ if (CurrentPos == End)
+ return instrprof_error::eof;
+ // If there isn't enough space for another header, this is probably just
+ // garbage at the end of the file.
+ if (CurrentPos + sizeof(RawHeader) > End)
+ return instrprof_error::malformed;
+ // The magic should have the same byte order as in the previous header.
+ uint64_t Magic = *reinterpret_cast<const uint64_t *>(CurrentPos);
+ if (Magic != swap(getRawMagic<IntPtrT>()))
+ return instrprof_error::bad_magic;
+
+ // There's another profile to read, so we need to process the header.
+ auto *Header = reinterpret_cast<const RawHeader *>(CurrentPos);
+ return readHeader(*Header);
+}
+
+static uint64_t getRawVersion() {
+ return 1;
+}
+
+template <class IntPtrT>
+std::error_code
+RawInstrProfReader<IntPtrT>::readHeader(const RawHeader &Header) {
+ if (swap(Header.Version) != getRawVersion())
+ return error(instrprof_error::unsupported_version);
+
+ CountersDelta = swap(Header.CountersDelta);
+ NamesDelta = swap(Header.NamesDelta);
+ auto DataSize = swap(Header.DataSize);
+ auto CountersSize = swap(Header.CountersSize);
+ auto NamesSize = swap(Header.NamesSize);
+
+ ptrdiff_t DataOffset = sizeof(RawHeader);
+ ptrdiff_t CountersOffset = DataOffset + sizeof(ProfileData) * DataSize;
+ ptrdiff_t NamesOffset = CountersOffset + sizeof(uint64_t) * CountersSize;
+ size_t ProfileSize = NamesOffset + sizeof(char) * NamesSize;
+
+ auto *Start = reinterpret_cast<const char *>(&Header);
+ if (Start + ProfileSize > DataBuffer->getBufferEnd())
+ return error(instrprof_error::bad_header);
+
+ Data = reinterpret_cast<const ProfileData *>(Start + DataOffset);
+ DataEnd = Data + DataSize;
+ CountersStart = reinterpret_cast<const uint64_t *>(Start + CountersOffset);
+ NamesStart = Start + NamesOffset;
+ ProfileEnd = Start + ProfileSize;
+
+ return success();
+}
+
+template <class IntPtrT>
+std::error_code
+RawInstrProfReader<IntPtrT>::readNextRecord(InstrProfRecord &Record) {
+ if (Data == DataEnd)
+ if (std::error_code EC = readNextHeader(ProfileEnd))
+ return EC;
+
+ // Get the raw data.
+ StringRef RawName(getName(Data->NamePtr), swap(Data->NameSize));
+ uint32_t NumCounters = swap(Data->NumCounters);
+ if (NumCounters == 0)
+ return error(instrprof_error::malformed);
+ auto RawCounts = makeArrayRef(getCounter(Data->CounterPtr), NumCounters);
+
+ // Check bounds.
+ auto *NamesStartAsCounter = reinterpret_cast<const uint64_t *>(NamesStart);
+ if (RawName.data() < NamesStart ||
+ RawName.data() + RawName.size() > DataBuffer->getBufferEnd() ||
+ RawCounts.data() < CountersStart ||
+ RawCounts.data() + RawCounts.size() > NamesStartAsCounter)
+ return error(instrprof_error::malformed);
+
+ // Store the data in Record, byte-swapping as necessary.
+ Record.Hash = swap(Data->FuncHash);
+ Record.Name = RawName;
+ if (ShouldSwapBytes) {
+ Counts.clear();
+ Counts.reserve(RawCounts.size());
+ for (uint64_t Count : RawCounts)
+ Counts.push_back(swap(Count));
+ Record.Counts = Counts;
+ } else
+ Record.Counts = RawCounts;
+
+ // Iterate.
+ ++Data;
+ return success();
+}
+
+namespace llvm {
+template class RawInstrProfReader<uint32_t>;
+template class RawInstrProfReader<uint64_t>;
+}
+
+InstrProfLookupTrait::hash_value_type
+InstrProfLookupTrait::ComputeHash(StringRef K) {
+ return IndexedInstrProf::ComputeHash(HashType, K);
+}
+
+bool IndexedInstrProfReader::hasFormat(const MemoryBuffer &DataBuffer) {
+ if (DataBuffer.getBufferSize() < 8)
+ return false;
+ using namespace support;
+ uint64_t Magic =
+ endian::read<uint64_t, little, aligned>(DataBuffer.getBufferStart());
+ return Magic == IndexedInstrProf::Magic;
+}
+
+std::error_code IndexedInstrProfReader::readHeader() {
+ const unsigned char *Start =
+ (const unsigned char *)DataBuffer->getBufferStart();
+ const unsigned char *Cur = Start;
+ if ((const unsigned char *)DataBuffer->getBufferEnd() - Cur < 24)
+ return error(instrprof_error::truncated);
+
+ using namespace support;
+
+ // Check the magic number.
+ uint64_t Magic = endian::readNext<uint64_t, little, unaligned>(Cur);
+ if (Magic != IndexedInstrProf::Magic)
+ return error(instrprof_error::bad_magic);
+
+ // Read the version.
+ uint64_t Version = endian::readNext<uint64_t, little, unaligned>(Cur);
+ if (Version != IndexedInstrProf::Version)
+ return error(instrprof_error::unsupported_version);
+
+ // Read the maximal function count.
+ MaxFunctionCount = endian::readNext<uint64_t, little, unaligned>(Cur);
+
+ // Read the hash type and start offset.
+ IndexedInstrProf::HashT HashType = static_cast<IndexedInstrProf::HashT>(
+ endian::readNext<uint64_t, little, unaligned>(Cur));
+ if (HashType > IndexedInstrProf::HashT::Last)
+ return error(instrprof_error::unsupported_hash_type);
+ uint64_t HashOffset = endian::readNext<uint64_t, little, unaligned>(Cur);
+
+ // The rest of the file is an on disk hash table.
+ Index.reset(InstrProfReaderIndex::Create(Start + HashOffset, Cur, Start,
+ InstrProfLookupTrait(HashType)));
+ // Set up our iterator for readNextRecord.
+ RecordIterator = Index->data_begin();
+
+ return success();
+}
+
+std::error_code IndexedInstrProfReader::getFunctionCounts(
+ StringRef FuncName, uint64_t &FuncHash, std::vector<uint64_t> &Counts) {
+ const auto &Iter = Index->find(FuncName);
+ if (Iter == Index->end())
+ return error(instrprof_error::unknown_function);
+
+ // Found it. Make sure it's valid before giving back a result.
+ const InstrProfRecord &Record = *Iter;
+ if (Record.Name.empty())
+ return error(instrprof_error::malformed);
+ FuncHash = Record.Hash;
+ Counts = Record.Counts;
+ return success();
+}
+
+std::error_code
+IndexedInstrProfReader::readNextRecord(InstrProfRecord &Record) {
+ // Are we out of records?
+ if (RecordIterator == Index->data_end())
+ return error(instrprof_error::eof);
+
+ // Read the next one.
+ Record = *RecordIterator;
+ ++RecordIterator;
+ if (Record.Name.empty())
+ return error(instrprof_error::malformed);
+ return success();
+}
diff --git a/contrib/llvm/lib/ProfileData/InstrProfWriter.cpp b/contrib/llvm/lib/ProfileData/InstrProfWriter.cpp
new file mode 100644
index 0000000..e55c299
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -0,0 +1,127 @@
+//=-- InstrProfWriter.cpp - Instrumented profiling writer -------------------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing profiling data for clang's
+// instrumentation based PGO and coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ProfileData/InstrProfWriter.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/OnDiskHashTable.h"
+
+#include "InstrProfIndexed.h"
+
+using namespace llvm;
+
+namespace {
+class InstrProfRecordTrait {
+public:
+ typedef StringRef key_type;
+ typedef StringRef key_type_ref;
+
+ typedef const InstrProfWriter::CounterData *const data_type;
+ typedef const InstrProfWriter::CounterData *const data_type_ref;
+
+ typedef uint64_t hash_value_type;
+ typedef uint64_t offset_type;
+
+ static hash_value_type ComputeHash(key_type_ref K) {
+ return IndexedInstrProf::ComputeHash(IndexedInstrProf::HashType, K);
+ }
+
+ static std::pair<offset_type, offset_type>
+ EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ offset_type N = K.size();
+ LE.write<offset_type>(N);
+
+ offset_type M = (1 + V->Counts.size()) * sizeof(uint64_t);
+ LE.write<offset_type>(M);
+
+ return std::make_pair(N, M);
+ }
+
+ static void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N){
+ Out.write(K.data(), N);
+ }
+
+ static void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V,
+ offset_type) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+ LE.write<uint64_t>(V->Hash);
+ for (uint64_t I : V->Counts)
+ LE.write<uint64_t>(I);
+ }
+};
+}
+
+std::error_code
+InstrProfWriter::addFunctionCounts(StringRef FunctionName,
+ uint64_t FunctionHash,
+ ArrayRef<uint64_t> Counters) {
+ auto Where = FunctionData.find(FunctionName);
+ if (Where == FunctionData.end()) {
+ // If this is the first time we've seen this function, just add it.
+ auto &Data = FunctionData[FunctionName];
+ Data.Hash = FunctionHash;
+ Data.Counts = Counters;
+ return instrprof_error::success;
+ }
+
+ auto &Data = Where->getValue();
+ // We can only add to existing functions if they match, so we check the hash
+ // and number of counters.
+ if (Data.Hash != FunctionHash)
+ return instrprof_error::hash_mismatch;
+ if (Data.Counts.size() != Counters.size())
+ return instrprof_error::count_mismatch;
+ // These match, add up the counters.
+ for (size_t I = 0, E = Counters.size(); I < E; ++I) {
+ if (Data.Counts[I] + Counters[I] < Data.Counts[I])
+ return instrprof_error::counter_overflow;
+ Data.Counts[I] += Counters[I];
+ }
+ return instrprof_error::success;
+}
+
+void InstrProfWriter::write(raw_fd_ostream &OS) {
+ OnDiskChainedHashTableGenerator<InstrProfRecordTrait> Generator;
+ uint64_t MaxFunctionCount = 0;
+
+ // Populate the hash table generator.
+ for (const auto &I : FunctionData) {
+ Generator.insert(I.getKey(), &I.getValue());
+ if (I.getValue().Counts[0] > MaxFunctionCount)
+ MaxFunctionCount = I.getValue().Counts[0];
+ }
+
+ using namespace llvm::support;
+ endian::Writer<little> LE(OS);
+
+ // Write the header.
+ LE.write<uint64_t>(IndexedInstrProf::Magic);
+ LE.write<uint64_t>(IndexedInstrProf::Version);
+ LE.write<uint64_t>(MaxFunctionCount);
+ LE.write<uint64_t>(static_cast<uint64_t>(IndexedInstrProf::HashType));
+
+ // Save a space to write the hash table start location.
+ uint64_t HashTableStartLoc = OS.tell();
+ LE.write<uint64_t>(0);
+ // Write the hash table.
+ uint64_t HashTableStart = Generator.Emit(OS);
+
+ // Go back and fill in the hash table start.
+ OS.seek(HashTableStartLoc);
+ LE.write<uint64_t>(HashTableStart);
+}
diff --git a/contrib/llvm/lib/ProfileData/LLVMBuild.txt b/contrib/llvm/lib/ProfileData/LLVMBuild.txt
new file mode 100644
index 0000000..0a8cbe3
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/LLVMBuild.txt
@@ -0,0 +1,22 @@
+;===- ./lib/ProfileData/LLVMBuild.txt --------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = ProfileData
+parent = Libraries
+required_libraries = Support
diff --git a/contrib/llvm/lib/ProfileData/Makefile b/contrib/llvm/lib/ProfileData/Makefile
new file mode 100644
index 0000000..2674361
--- /dev/null
+++ b/contrib/llvm/lib/ProfileData/Makefile
@@ -0,0 +1,14 @@
+##===- lib/ProfileData/Makefile ----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../..
+LIBRARYNAME = LLVMProfileData
+BUILD_ARCHIVE := 1
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index 676e2d4..7989e30 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -683,6 +683,20 @@ APFloat::operator=(const APFloat &rhs)
return *this;
}
+APFloat &
+APFloat::operator=(APFloat &&rhs) {
+ freeSignificand();
+
+ semantics = rhs.semantics;
+ significand = rhs.significand;
+ exponent = rhs.exponent;
+ category = rhs.category;
+ sign = rhs.sign;
+
+ rhs.semantics = &Bogus;
+ return *this;
+}
+
bool
APFloat::isDenormal() const {
return isFiniteNonZero() && (exponent == semantics->minExponent) &&
@@ -806,6 +820,10 @@ APFloat::APFloat(const APFloat &rhs) {
assign(rhs);
}
+APFloat::APFloat(APFloat &&rhs) : semantics(&Bogus) {
+ *this = std::move(rhs);
+}
+
APFloat::~APFloat()
{
freeSignificand();
@@ -1340,7 +1358,7 @@ APFloat::addOrSubtractSpecials(const APFloat &rhs, bool subtract)
{
switch (PackCategoriesIntoKey(category, rhs.category)) {
default:
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
@@ -1354,7 +1372,9 @@ APFloat::addOrSubtractSpecials(const APFloat &rhs, bool subtract)
case PackCategoriesIntoKey(fcZero, fcNaN):
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
- sign = false;
+ // We need to be sure to flip the sign here for subtraction because we
+ // don't have a separate negate operation so -NaN becomes 0 - NaN here.
+ sign = rhs.sign ^ subtract;
category = fcNaN;
copySignificand(rhs);
return opOK;
@@ -1467,7 +1487,7 @@ APFloat::multiplySpecials(const APFloat &rhs)
{
switch (PackCategoriesIntoKey(category, rhs.category)) {
default:
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
@@ -1511,7 +1531,7 @@ APFloat::divideSpecials(const APFloat &rhs)
{
switch (PackCategoriesIntoKey(category, rhs.category)) {
default:
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
case PackCategoriesIntoKey(fcZero, fcNaN):
case PackCategoriesIntoKey(fcNormal, fcNaN):
@@ -1552,7 +1572,7 @@ APFloat::modSpecials(const APFloat &rhs)
{
switch (PackCategoriesIntoKey(category, rhs.category)) {
default:
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
@@ -1661,7 +1681,7 @@ APFloat::multiply(const APFloat &rhs, roundingMode rounding_mode)
fs = multiplySpecials(rhs);
if (isFiniteNonZero()) {
- lostFraction lost_fraction = multiplySignificand(rhs, 0);
+ lostFraction lost_fraction = multiplySignificand(rhs, nullptr);
fs = normalize(rounding_mode, lost_fraction);
if (lost_fraction != lfExactlyZero)
fs = (opStatus) (fs | opInexact);
@@ -1864,7 +1884,7 @@ APFloat::compare(const APFloat &rhs) const
switch (PackCategoriesIntoKey(category, rhs.category)) {
default:
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
@@ -2421,7 +2441,7 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts,
if (exp >= 0) {
/* multiplySignificand leaves the precision-th bit set to 1. */
- calcLostFraction = decSig.multiplySignificand(pow5, NULL);
+ calcLostFraction = decSig.multiplySignificand(pow5, nullptr);
powHUerr = powStatus != opOK;
} else {
calcLostFraction = decSig.divideSignificand(pow5);
@@ -3313,7 +3333,7 @@ APFloat::initFromAPInt(const fltSemantics* Sem, const APInt& api)
if (Sem == &PPCDoubleDouble)
return initFromPPCDoubleDoubleAPInt(api);
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
APFloat
@@ -3776,8 +3796,8 @@ APFloat::opStatus APFloat::next(bool nextDown) {
// change the payload.
if (isSignaling()) {
result = opInvalidOp;
- // For consistency, propogate the sign of the sNaN to the qNaN.
- makeNaN(false, isNegative(), 0);
+ // For consistency, propagate the sign of the sNaN to the qNaN.
+ makeNaN(false, isNegative(), nullptr);
}
break;
case fcZero:
@@ -3816,7 +3836,7 @@ APFloat::opStatus APFloat::next(bool nextDown) {
// Decrement the significand.
//
// We always do this since:
- // 1. If we are dealing with a non binade decrement, by definition we
+ // 1. If we are dealing with a non-binade decrement, by definition we
// just decrement the significand.
// 2. If we are dealing with a normal -> normal binade decrement, since
// we have an explicit integral bit the fact that all bits but the
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index 89f96bd..fa929eb 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "apint"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
@@ -28,6 +27,8 @@
#include <limits>
using namespace llvm;
+#define DEBUG_TYPE "apint"
+
/// A utility function for allocating memory, checking for allocation failures,
/// and ensuring the contents are zeroed.
inline static uint64_t* getClearedMemory(unsigned numWords) {
@@ -1096,7 +1097,7 @@ APInt APInt::ashr(unsigned shiftAmt) const {
// to include in this word.
val[breakWord] = pVal[breakWord+offset] >> wordShift;
- // Deal with sign extenstion in the break word, and possibly the word before
+ // Deal with sign extension in the break word, and possibly the word before
// it.
if (isNegative()) {
if (wordShift > bitsInWord) {
@@ -1683,10 +1684,10 @@ void APInt::divide(const APInt LHS, unsigned lhsWords,
// Allocate space for the temporary values we need either on the stack, if
// it will fit, or on the heap if it won't.
unsigned SPACE[128];
- unsigned *U = 0;
- unsigned *V = 0;
- unsigned *Q = 0;
- unsigned *R = 0;
+ unsigned *U = nullptr;
+ unsigned *V = nullptr;
+ unsigned *Q = nullptr;
+ unsigned *R = nullptr;
if ((Remainder?4:3)*n+2*m+1 <= 128) {
U = &SPACE[0];
V = &SPACE[m+n+1];
@@ -1872,7 +1873,7 @@ APInt APInt::udiv(const APInt& RHS) const {
// We have to compute it the hard way. Invoke the Knuth divide algorithm.
APInt Quotient(1,0); // to hold result.
- divide(*this, lhsWords, RHS, rhsWords, &Quotient, 0);
+ divide(*this, lhsWords, RHS, rhsWords, &Quotient, nullptr);
return Quotient;
}
@@ -1920,7 +1921,7 @@ APInt APInt::urem(const APInt& RHS) const {
// We have to compute it the hard way. Invoke the Knuth divide algorithm.
APInt Remainder(1,0);
- divide(*this, lhsWords, RHS, rhsWords, 0, &Remainder);
+ divide(*this, lhsWords, RHS, rhsWords, nullptr, &Remainder);
return Remainder;
}
diff --git a/contrib/llvm/lib/Support/ARMBuildAttrs.cpp b/contrib/llvm/lib/Support/ARMBuildAttrs.cpp
new file mode 100644
index 0000000..960a0f1
--- /dev/null
+++ b/contrib/llvm/lib/Support/ARMBuildAttrs.cpp
@@ -0,0 +1,95 @@
+//===-- ARMBuildAttrs.cpp - ARM Build Attributes --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+
+namespace {
+const struct {
+ ARMBuildAttrs::AttrType Attr;
+ const char *TagName;
+} ARMAttributeTags[] = {
+ { ARMBuildAttrs::File, "Tag_File" },
+ { ARMBuildAttrs::Section, "Tag_Section" },
+ { ARMBuildAttrs::Symbol, "Tag_Symbol" },
+ { ARMBuildAttrs::CPU_raw_name, "Tag_CPU_raw_name" },
+ { ARMBuildAttrs::CPU_name, "Tag_CPU_name" },
+ { ARMBuildAttrs::CPU_arch, "Tag_CPU_arch" },
+ { ARMBuildAttrs::CPU_arch_profile, "Tag_CPU_arch_profile" },
+ { ARMBuildAttrs::ARM_ISA_use, "Tag_ARM_ISA_use" },
+ { ARMBuildAttrs::THUMB_ISA_use, "Tag_THUMB_ISA_use" },
+ { ARMBuildAttrs::FP_arch, "Tag_FP_arch" },
+ { ARMBuildAttrs::WMMX_arch, "Tag_WMMX_arch" },
+ { ARMBuildAttrs::Advanced_SIMD_arch, "Tag_Advanced_SIMD_arch" },
+ { ARMBuildAttrs::PCS_config, "Tag_PCS_config" },
+ { ARMBuildAttrs::ABI_PCS_R9_use, "Tag_ABI_PCS_R9_use" },
+ { ARMBuildAttrs::ABI_PCS_RW_data, "Tag_ABI_PCS_RW_data" },
+ { ARMBuildAttrs::ABI_PCS_RO_data, "Tag_ABI_PCS_RO_data" },
+ { ARMBuildAttrs::ABI_PCS_GOT_use, "Tag_ABI_PCS_GOT_use" },
+ { ARMBuildAttrs::ABI_PCS_wchar_t, "Tag_ABI_PCS_wchar_t" },
+ { ARMBuildAttrs::ABI_FP_rounding, "Tag_ABI_FP_rounding" },
+ { ARMBuildAttrs::ABI_FP_denormal, "Tag_ABI_FP_denormal" },
+ { ARMBuildAttrs::ABI_FP_exceptions, "Tag_ABI_FP_exceptions" },
+ { ARMBuildAttrs::ABI_FP_user_exceptions, "Tag_ABI_FP_user_exceptions" },
+ { ARMBuildAttrs::ABI_FP_number_model, "Tag_ABI_FP_number_model" },
+ { ARMBuildAttrs::ABI_align_needed, "Tag_ABI_align_needed" },
+ { ARMBuildAttrs::ABI_align_preserved, "Tag_ABI_align_preserved" },
+ { ARMBuildAttrs::ABI_enum_size, "Tag_ABI_enum_size" },
+ { ARMBuildAttrs::ABI_HardFP_use, "Tag_ABI_HardFP_use" },
+ { ARMBuildAttrs::ABI_VFP_args, "Tag_ABI_VFP_args" },
+ { ARMBuildAttrs::ABI_WMMX_args, "Tag_ABI_WMMX_args" },
+ { ARMBuildAttrs::ABI_optimization_goals, "Tag_ABI_optimization_goals" },
+ { ARMBuildAttrs::ABI_FP_optimization_goals, "Tag_ABI_FP_optimization_goals" },
+ { ARMBuildAttrs::compatibility, "Tag_compatibility" },
+ { ARMBuildAttrs::CPU_unaligned_access, "Tag_CPU_unaligned_access" },
+ { ARMBuildAttrs::FP_HP_extension, "Tag_FP_HP_extension" },
+ { ARMBuildAttrs::ABI_FP_16bit_format, "Tag_ABI_FP_16bit_format" },
+ { ARMBuildAttrs::MPextension_use, "Tag_MPextension_use" },
+ { ARMBuildAttrs::DIV_use, "Tag_DIV_use" },
+ { ARMBuildAttrs::nodefaults, "Tag_nodefaults" },
+ { ARMBuildAttrs::also_compatible_with, "Tag_also_compatible_with" },
+ { ARMBuildAttrs::T2EE_use, "Tag_T2EE_use" },
+ { ARMBuildAttrs::conformance, "Tag_conformance" },
+ { ARMBuildAttrs::Virtualization_use, "Tag_Virtualization_use" },
+
+ // Legacy Names
+ { ARMBuildAttrs::FP_arch, "Tag_VFP_arch" },
+ { ARMBuildAttrs::FP_HP_extension, "Tag_VFP_HP_extension" },
+ { ARMBuildAttrs::ABI_align_needed, "Tag_ABI_align8_needed" },
+ { ARMBuildAttrs::ABI_align_preserved, "Tag_ABI_align8_preserved" },
+};
+}
+
+namespace llvm {
+namespace ARMBuildAttrs {
+StringRef AttrTypeAsString(unsigned Attr, bool HasTagPrefix) {
+ return AttrTypeAsString(static_cast<AttrType>(Attr), HasTagPrefix);
+}
+
+StringRef AttrTypeAsString(AttrType Attr, bool HasTagPrefix) {
+ for (unsigned TI = 0, TE = sizeof(ARMAttributeTags) / sizeof(*ARMAttributeTags);
+ TI != TE; ++TI)
+ if (ARMAttributeTags[TI].Attr == Attr)
+ return ARMAttributeTags[TI].TagName + (HasTagPrefix ? 0 : 4);
+ return "";
+}
+
+int AttrTypeFromString(StringRef Tag) {
+ bool HasTagPrefix = Tag.startswith("Tag_");
+ for (unsigned TI = 0, TE = sizeof(ARMAttributeTags) / sizeof(*ARMAttributeTags);
+ TI != TE; ++TI)
+ if (StringRef(ARMAttributeTags[TI].TagName + (HasTagPrefix ? 0 : 4)) == Tag)
+ return ARMAttributeTags[TI].Attr;
+ return -1;
+}
+}
+}
+
diff --git a/contrib/llvm/lib/Support/ARMWinEH.cpp b/contrib/llvm/lib/Support/ARMWinEH.cpp
new file mode 100644
index 0000000..03c150f
--- /dev/null
+++ b/contrib/llvm/lib/Support/ARMWinEH.cpp
@@ -0,0 +1,38 @@
+//===-- ARMWinEH.cpp - Windows on ARM EH Support Functions ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ARMWinEH.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace ARM {
+namespace WinEH {
+std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF) {
+ uint8_t NumRegisters = RF.Reg();
+ uint8_t RegistersVFP = RF.R();
+ uint8_t LinkRegister = RF.L();
+ uint8_t ChainedFrame = RF.C();
+
+ uint16_t GPRMask = (ChainedFrame << 11) | (LinkRegister << 14);
+ uint32_t VFPMask = 0;
+
+ if (RegistersVFP)
+ VFPMask |= (((1 << ((NumRegisters + 1) % 8)) - 1) << 8);
+ else
+ GPRMask |= (((1 << (NumRegisters + 1)) - 1) << 4);
+
+ if (PrologueFolding(RF))
+ GPRMask |= (((1 << (NumRegisters + 1)) - 1) << (~RF.StackAdjust() & 0x3));
+
+ return std::make_pair(GPRMask, VFPMask);
+}
+}
+}
+}
+
diff --git a/contrib/llvm/lib/Support/Allocator.cpp b/contrib/llvm/lib/Support/Allocator.cpp
index 6e7a541..7c306b2 100644
--- a/contrib/llvm/lib/Support/Allocator.cpp
+++ b/contrib/llvm/lib/Support/Allocator.cpp
@@ -21,149 +21,10 @@
namespace llvm {
-BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
- SlabAllocator &allocator)
- : SlabSize(size), SizeThreshold(std::min(size, threshold)),
- Allocator(allocator), CurSlab(0), BytesAllocated(0) { }
-
-BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
- : SlabSize(size), SizeThreshold(std::min(size, threshold)),
- Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0) { }
-
-BumpPtrAllocator::~BumpPtrAllocator() {
- DeallocateSlabs(CurSlab);
-}
-
-/// AlignPtr - Align Ptr to Alignment bytes, rounding up. Alignment should
-/// be a power of two. This method rounds up, so AlignPtr(7, 4) == 8 and
-/// AlignPtr(8, 4) == 8.
-char *BumpPtrAllocator::AlignPtr(char *Ptr, size_t Alignment) {
- assert(Alignment && (Alignment & (Alignment - 1)) == 0 &&
- "Alignment is not a power of two!");
-
- // Do the alignment.
- return (char*)(((uintptr_t)Ptr + Alignment - 1) &
- ~(uintptr_t)(Alignment - 1));
-}
-
-/// StartNewSlab - Allocate a new slab and move the bump pointers over into
-/// the new slab. Modifies CurPtr and End.
-void BumpPtrAllocator::StartNewSlab() {
- // If we allocated a big number of slabs already it's likely that we're going
- // to allocate more. Increase slab size to reduce mallocs and possibly memory
- // overhead. The factors are chosen conservatively to avoid overallocation.
- if (BytesAllocated >= SlabSize * 128)
- SlabSize *= 2;
-
- MemSlab *NewSlab = Allocator.Allocate(SlabSize);
- NewSlab->NextPtr = CurSlab;
- CurSlab = NewSlab;
- CurPtr = (char*)(CurSlab + 1);
- End = ((char*)CurSlab) + CurSlab->Size;
-}
-
-/// DeallocateSlabs - Deallocate all memory slabs after and including this
-/// one.
-void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
- while (Slab) {
- MemSlab *NextSlab = Slab->NextPtr;
-#ifndef NDEBUG
- // Poison the memory so stale pointers crash sooner. Note we must
- // preserve the Size and NextPtr fields at the beginning.
- sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
- memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
-#endif
- Allocator.Deallocate(Slab);
- Slab = NextSlab;
- }
-}
-
-/// Reset - Deallocate all but the current slab and reset the current pointer
-/// to the beginning of it, freeing all memory allocated so far.
-void BumpPtrAllocator::Reset() {
- if (!CurSlab)
- return;
- DeallocateSlabs(CurSlab->NextPtr);
- CurSlab->NextPtr = 0;
- CurPtr = (char*)(CurSlab + 1);
- End = ((char*)CurSlab) + CurSlab->Size;
- BytesAllocated = 0;
-}
-
-/// Allocate - Allocate space at the specified alignment.
-///
-void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
- if (!CurSlab) // Start a new slab if we haven't allocated one already.
- StartNewSlab();
-
- // Keep track of how many bytes we've allocated.
- BytesAllocated += Size;
-
- // 0-byte alignment means 1-byte alignment.
- if (Alignment == 0) Alignment = 1;
-
- // Allocate the aligned space, going forwards from CurPtr.
- char *Ptr = AlignPtr(CurPtr, Alignment);
-
- // Check if we can hold it.
- if (Ptr + Size <= End) {
- CurPtr = Ptr + Size;
- // Update the allocation point of this memory block in MemorySanitizer.
- // Without this, MemorySanitizer messages for values originated from here
- // will point to the allocation of the entire slab.
- __msan_allocated_memory(Ptr, Size);
- return Ptr;
- }
-
- // If Size is really big, allocate a separate slab for it.
- size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
- if (PaddedSize > SizeThreshold) {
- MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
-
- // Put the new slab after the current slab, since we are not allocating
- // into it.
- NewSlab->NextPtr = CurSlab->NextPtr;
- CurSlab->NextPtr = NewSlab;
-
- Ptr = AlignPtr((char*)(NewSlab + 1), Alignment);
- assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
- __msan_allocated_memory(Ptr, Size);
- return Ptr;
- }
-
- // Otherwise, start a new slab and try again.
- StartNewSlab();
- Ptr = AlignPtr(CurPtr, Alignment);
- CurPtr = Ptr + Size;
- assert(CurPtr <= End && "Unable to allocate memory!");
- __msan_allocated_memory(Ptr, Size);
- return Ptr;
-}
-
-unsigned BumpPtrAllocator::GetNumSlabs() const {
- unsigned NumSlabs = 0;
- for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
- ++NumSlabs;
- }
- return NumSlabs;
-}
-
-size_t BumpPtrAllocator::getTotalMemory() const {
- size_t TotalMemory = 0;
- for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
- TotalMemory += Slab->Size;
- }
- return TotalMemory;
-}
-
-void BumpPtrAllocator::PrintStats() const {
- unsigned NumSlabs = 0;
- size_t TotalMemory = 0;
- for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
- TotalMemory += Slab->Size;
- ++NumSlabs;
- }
+namespace detail {
+void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
+ size_t TotalMemory) {
errs() << "\nNumber of memory regions: " << NumSlabs << '\n'
<< "Bytes used: " << BytesAllocated << '\n'
<< "Bytes allocated: " << TotalMemory << '\n'
@@ -171,20 +32,7 @@ void BumpPtrAllocator::PrintStats() const {
<< " (includes alignment, etc)\n";
}
-SlabAllocator::~SlabAllocator() { }
-
-MallocSlabAllocator::~MallocSlabAllocator() { }
-
-MemSlab *MallocSlabAllocator::Allocate(size_t Size) {
- MemSlab *Slab = (MemSlab*)Allocator.Allocate(Size, 0);
- Slab->Size = Size;
- Slab->NextPtr = 0;
- return Slab;
-}
-
-void MallocSlabAllocator::Deallocate(MemSlab *Slab) {
- Allocator.Deallocate(Slab);
-}
+} // End namespace detail.
void PrintRecyclerStats(size_t Size,
size_t Align,
diff --git a/contrib/llvm/lib/Support/Atomic.cpp b/contrib/llvm/lib/Support/Atomic.cpp
index 9559ad7..ac4ff3e 100644
--- a/contrib/llvm/lib/Support/Atomic.cpp
+++ b/contrib/llvm/lib/Support/Atomic.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file implements atomic operations.
+// This file implements atomic operations.
//
//===----------------------------------------------------------------------===//
@@ -17,6 +17,7 @@
using namespace llvm;
#if defined(_MSC_VER)
+#include <Intrin.h>
#include <windows.h>
#undef MemoryFence
#endif
diff --git a/contrib/llvm/lib/Support/BlockFrequency.cpp b/contrib/llvm/lib/Support/BlockFrequency.cpp
index 00efe90..6f7e341 100644
--- a/contrib/llvm/lib/Support/BlockFrequency.cpp
+++ b/contrib/llvm/lib/Support/BlockFrequency.cpp
@@ -18,94 +18,8 @@
using namespace llvm;
-/// Multiply FREQ by N and store result in W array.
-static void mult96bit(uint64_t freq, uint32_t N, uint32_t W[3]) {
- uint64_t u0 = freq & UINT32_MAX;
- uint64_t u1 = freq >> 32;
-
- // Represent 96-bit value as W[2]:W[1]:W[0];
- uint64_t t = u0 * N;
- uint64_t k = t >> 32;
- W[0] = t;
- t = u1 * N + k;
- W[1] = t;
- W[2] = t >> 32;
-}
-
-/// Divide 96-bit value stored in W[2]:W[1]:W[0] by D. Since our word size is a
-/// 32 bit unsigned integer, we can use a short division algorithm.
-static uint64_t divrem96bit(uint32_t W[3], uint32_t D, uint32_t *Rout) {
- // We assume that W[2] is non-zero since if W[2] is not then the user should
- // just use hardware division.
- assert(W[2] && "This routine assumes that W[2] is non-zero since if W[2] is "
- "zero, the caller should just use 64/32 hardware.");
- uint32_t Q[3] = { 0, 0, 0 };
-
- // The generalized short division algorithm sets i to m + n - 1, where n is
- // the number of words in the divisior and m is the number of words by which
- // the divident exceeds the divisor (i.e. m + n == the length of the dividend
- // in words). Due to our assumption that W[2] is non-zero, we know that the
- // dividend is of length 3 implying since n is 1 that m = 2. Thus we set i to
- // m + n - 1 = 2 + 1 - 1 = 2.
- uint32_t R = 0;
- for (int i = 2; i >= 0; --i) {
- uint64_t PartialD = uint64_t(R) << 32 | W[i];
- if (PartialD == 0) {
- Q[i] = 0;
- R = 0;
- } else if (PartialD < D) {
- Q[i] = 0;
- R = uint32_t(PartialD);
- } else if (PartialD == D) {
- Q[i] = 1;
- R = 0;
- } else {
- Q[i] = uint32_t(PartialD / D);
- R = uint32_t(PartialD - (Q[i] * D));
- }
- }
-
- // If Q[2] is non-zero, then we overflowed.
- uint64_t Result;
- if (Q[2]) {
- Result = UINT64_MAX;
- R = D;
- } else {
- // Form the final uint64_t result, avoiding endianness issues.
- Result = uint64_t(Q[0]) | (uint64_t(Q[1]) << 32);
- }
-
- if (Rout)
- *Rout = R;
-
- return Result;
-}
-
-uint32_t BlockFrequency::scale(uint32_t N, uint32_t D) {
- assert(D != 0 && "Division by zero");
-
- // Calculate Frequency * N.
- uint64_t MulLo = (Frequency & UINT32_MAX) * N;
- uint64_t MulHi = (Frequency >> 32) * N;
- uint64_t MulRes = (MulHi << 32) + MulLo;
-
- // If the product fits in 64 bits, just use built-in division.
- if (MulHi <= UINT32_MAX && MulRes >= MulLo) {
- Frequency = MulRes / D;
- return MulRes % D;
- }
-
- // Product overflowed, use 96-bit operations.
- // 96-bit value represented as W[2]:W[1]:W[0].
- uint32_t W[3];
- uint32_t R;
- mult96bit(Frequency, N, W);
- Frequency = divrem96bit(W, D, &R);
- return R;
-}
-
BlockFrequency &BlockFrequency::operator*=(const BranchProbability &Prob) {
- scale(Prob.getNumerator(), Prob.getDenominator());
+ Frequency = Prob.scale(Frequency);
return *this;
}
@@ -117,7 +31,7 @@ BlockFrequency::operator*(const BranchProbability &Prob) const {
}
BlockFrequency &BlockFrequency::operator/=(const BranchProbability &Prob) {
- scale(Prob.getDenominator(), Prob.getNumerator());
+ Frequency = Prob.scaleByInverse(Frequency);
return *this;
}
@@ -145,28 +59,14 @@ BlockFrequency::operator+(const BlockFrequency &Prob) const {
return Freq;
}
-uint32_t BlockFrequency::scale(const BranchProbability &Prob) {
- return scale(Prob.getNumerator(), Prob.getDenominator());
-}
-
-void BlockFrequency::print(raw_ostream &OS) const {
- // Convert fixed-point number to decimal.
- OS << Frequency / getEntryFrequency() << ".";
- uint64_t Rem = Frequency % getEntryFrequency();
- uint64_t Eps = 1;
- do {
- Rem *= 10;
- Eps *= 10;
- OS << Rem / getEntryFrequency();
- Rem = Rem % getEntryFrequency();
- } while (Rem >= Eps/2);
-}
+BlockFrequency &BlockFrequency::operator>>=(const unsigned count) {
+ // Frequency can never be 0 by design.
+ assert(Frequency != 0);
-namespace llvm {
-
-raw_ostream &operator<<(raw_ostream &OS, const BlockFrequency &Freq) {
- Freq.print(OS);
- return OS;
-}
+ // Shift right by count.
+ Frequency >>= count;
+ // Saturate to 1 if we are 0.
+ Frequency |= Frequency == 0;
+ return *this;
}
diff --git a/contrib/llvm/lib/Support/BranchProbability.cpp b/contrib/llvm/lib/Support/BranchProbability.cpp
index e8b83e5..65878d6 100644
--- a/contrib/llvm/lib/Support/BranchProbability.cpp
+++ b/contrib/llvm/lib/Support/BranchProbability.cpp
@@ -18,19 +18,56 @@
using namespace llvm;
-void BranchProbability::print(raw_ostream &OS) const {
- OS << N << " / " << D << " = " << format("%g%%", ((double)N / D) * 100.0);
+raw_ostream &BranchProbability::print(raw_ostream &OS) const {
+ return OS << N << " / " << D << " = "
+ << format("%g%%", ((double)N / D) * 100.0);
}
-void BranchProbability::dump() const {
- dbgs() << *this << '\n';
-}
+void BranchProbability::dump() const { print(dbgs()) << '\n'; }
+
+static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D) {
+ assert(D && "divide by 0");
+
+ // Fast path for multiplying by 1.0.
+ if (!Num || D == N)
+ return Num;
+
+ // Split Num into upper and lower parts to multiply, then recombine.
+ uint64_t ProductHigh = (Num >> 32) * N;
+ uint64_t ProductLow = (Num & UINT32_MAX) * N;
+
+ // Split into 32-bit digits.
+ uint32_t Upper32 = ProductHigh >> 32;
+ uint32_t Lower32 = ProductLow & UINT32_MAX;
+ uint32_t Mid32Partial = ProductHigh & UINT32_MAX;
+ uint32_t Mid32 = Mid32Partial + (ProductLow >> 32);
+
+ // Carry.
+ Upper32 += Mid32 < Mid32Partial;
-namespace llvm {
+ // Check for overflow.
+ if (Upper32 >= D)
+ return UINT64_MAX;
+
+ uint64_t Rem = (uint64_t(Upper32) << 32) | Mid32;
+ uint64_t UpperQ = Rem / D;
+
+ // Check for overflow.
+ if (UpperQ > UINT32_MAX)
+ return UINT64_MAX;
+
+ Rem = ((Rem % D) << 32) | Lower32;
+ uint64_t LowerQ = Rem / D;
+ uint64_t Q = (UpperQ << 32) + LowerQ;
+
+ // Check for overflow.
+ return Q < LowerQ ? UINT64_MAX : Q;
+}
-raw_ostream &operator<<(raw_ostream &OS, const BranchProbability &Prob) {
- Prob.print(OS);
- return OS;
+uint64_t BranchProbability::scale(uint64_t Num) const {
+ return ::scale(Num, N, D);
}
+uint64_t BranchProbability::scaleByInverse(uint64_t Num) const {
+ return ::scale(Num, D, N);
}
diff --git a/contrib/llvm/lib/Support/CommandLine.cpp b/contrib/llvm/lib/Support/CommandLine.cpp
index 44a88d8..586ecea 100644
--- a/contrib/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm/lib/Support/CommandLine.cpp
@@ -18,7 +18,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
@@ -32,13 +31,15 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include <cerrno>
#include <cstdlib>
#include <map>
+#include <system_error>
using namespace llvm;
using namespace cl;
+#define DEBUG_TYPE "commandline"
+
//===----------------------------------------------------------------------===//
// Template instantiations and anchors.
//
@@ -82,7 +83,7 @@ void StringSaver::anchor() {}
// Globals for name and overview of program. Program name is not a string to
// avoid static ctor/dtor issues.
static char ProgramName[80] = "<premain>";
-static const char *ProgramOverview = 0;
+static const char *ProgramOverview = nullptr;
// This collects additional help to be printed.
static ManagedStatic<std::vector<const char*> > MoreHelp;
@@ -101,16 +102,23 @@ void cl::MarkOptionsChanged() {
/// RegisteredOptionList - This is the list of the command line options that
/// have statically constructed themselves.
-static Option *RegisteredOptionList = 0;
+static Option *RegisteredOptionList = nullptr;
void Option::addArgument() {
- assert(NextRegistered == 0 && "argument multiply registered!");
+ assert(!NextRegistered && "argument multiply registered!");
NextRegistered = RegisteredOptionList;
RegisteredOptionList = this;
MarkOptionsChanged();
}
+void Option::removeArgument() {
+ assert(NextRegistered && "argument never registered");
+ assert(RegisteredOptionList == this && "argument is not the last registered");
+ RegisteredOptionList = NextRegistered;
+ MarkOptionsChanged();
+}
+
// This collects the different option categories that have been registered.
typedef SmallPtrSet<OptionCategory*,16> OptionCatSet;
static ManagedStatic<OptionCatSet> RegisteredOptionCategories;
@@ -118,8 +126,13 @@ static ManagedStatic<OptionCatSet> RegisteredOptionCategories;
// Initialise the general option category.
OptionCategory llvm::cl::GeneralCategory("General options");
-void OptionCategory::registerCategory()
-{
+void OptionCategory::registerCategory() {
+ assert(std::count_if(RegisteredOptionCategories->begin(),
+ RegisteredOptionCategories->end(),
+ [this](const OptionCategory *Category) {
+ return getName() == Category->getName();
+ }) == 0 && "Duplicate option categories");
+
RegisteredOptionCategories->insert(this);
}
@@ -132,8 +145,9 @@ void OptionCategory::registerCategory()
static void GetOptionInfo(SmallVectorImpl<Option*> &PositionalOpts,
SmallVectorImpl<Option*> &SinkOpts,
StringMap<Option*> &OptionsMap) {
+ bool HadErrors = false;
SmallVector<const char*, 16> OptionNames;
- Option *CAOpt = 0; // The ConsumeAfter option if it exists.
+ Option *CAOpt = nullptr; // The ConsumeAfter option if it exists.
for (Option *O = RegisteredOptionList; O; O = O->getNextRegisteredOption()) {
// If this option wants to handle multiple option names, get the full set.
// This handles enum options like "-O1 -O2" etc.
@@ -145,8 +159,9 @@ static void GetOptionInfo(SmallVectorImpl<Option*> &PositionalOpts,
for (size_t i = 0, e = OptionNames.size(); i != e; ++i) {
// Add argument to the argument map!
if (OptionsMap.GetOrCreateValue(OptionNames[i], O).second != O) {
- errs() << ProgramName << ": CommandLine Error: Argument '"
- << OptionNames[i] << "' defined more than once!\n";
+ errs() << ProgramName << ": CommandLine Error: Option '"
+ << OptionNames[i] << "' registered more than once!\n";
+ HadErrors = true;
}
}
@@ -158,8 +173,10 @@ static void GetOptionInfo(SmallVectorImpl<Option*> &PositionalOpts,
else if (O->getMiscFlags() & cl::Sink) // Remember sink options
SinkOpts.push_back(O);
else if (O->getNumOccurrencesFlag() == cl::ConsumeAfter) {
- if (CAOpt)
+ if (CAOpt) {
O->error("Cannot specify more than one option with cl::ConsumeAfter!");
+ HadErrors = true;
+ }
CAOpt = O;
}
}
@@ -169,6 +186,12 @@ static void GetOptionInfo(SmallVectorImpl<Option*> &PositionalOpts,
// Make sure that they are in order of registration not backwards.
std::reverse(PositionalOpts.begin(), PositionalOpts.end());
+
+ // Fail hard if there were errors. These are strictly unrecoverable and
+ // indicate serious issues such as conflicting option names or an incorrectly
+ // linked LLVM distribution.
+ if (HadErrors)
+ report_fatal_error("inconsistency in registered CommandLine options");
}
@@ -178,7 +201,7 @@ static void GetOptionInfo(SmallVectorImpl<Option*> &PositionalOpts,
static Option *LookupOption(StringRef &Arg, StringRef &Value,
const StringMap<Option*> &OptionsMap) {
// Reject all dashes.
- if (Arg.empty()) return 0;
+ if (Arg.empty()) return nullptr;
size_t EqualPos = Arg.find('=');
@@ -186,14 +209,14 @@ static Option *LookupOption(StringRef &Arg, StringRef &Value,
if (EqualPos == StringRef::npos) {
// Look up the option.
StringMap<Option*>::const_iterator I = OptionsMap.find(Arg);
- return I != OptionsMap.end() ? I->second : 0;
+ return I != OptionsMap.end() ? I->second : nullptr;
}
// If the argument before the = is a valid option name, we match. If not,
// return Arg unmolested.
StringMap<Option*>::const_iterator I =
OptionsMap.find(Arg.substr(0, EqualPos));
- if (I == OptionsMap.end()) return 0;
+ if (I == OptionsMap.end()) return nullptr;
Value = Arg.substr(EqualPos+1);
Arg = Arg.substr(0, EqualPos);
@@ -208,7 +231,7 @@ static Option *LookupNearestOption(StringRef Arg,
const StringMap<Option*> &OptionsMap,
std::string &NearestString) {
// Reject all dashes.
- if (Arg.empty()) return 0;
+ if (Arg.empty()) return nullptr;
// Split on any equal sign.
std::pair<StringRef, StringRef> SplitArg = Arg.split('=');
@@ -216,7 +239,7 @@ static Option *LookupNearestOption(StringRef Arg,
StringRef &RHS = SplitArg.second;
// Find the closest match.
- Option *Best = 0;
+ Option *Best = nullptr;
unsigned BestDistance = 0;
for (StringMap<Option*>::const_iterator it = OptionsMap.begin(),
ie = OptionsMap.end(); it != ie; ++it) {
@@ -246,12 +269,11 @@ static Option *LookupNearestOption(StringRef Arg,
return Best;
}
-/// CommaSeparateAndAddOccurence - A wrapper around Handler->addOccurence() that
-/// does special handling of cl::CommaSeparated options.
-static bool CommaSeparateAndAddOccurence(Option *Handler, unsigned pos,
- StringRef ArgName,
- StringRef Value, bool MultiArg = false)
-{
+/// CommaSeparateAndAddOccurrence - A wrapper around Handler->addOccurrence()
+/// that does special handling of cl::CommaSeparated options.
+static bool CommaSeparateAndAddOccurrence(Option *Handler, unsigned pos,
+ StringRef ArgName, StringRef Value,
+ bool MultiArg = false) {
// Check to see if this option accepts a comma separated list of values. If
// it does, we have to split up the value into multiple values.
if (Handler->getMiscFlags() & CommaSeparated) {
@@ -290,7 +312,7 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
// Enforce value requirements
switch (Handler->getValueExpectedFlag()) {
case ValueRequired:
- if (Value.data() == 0) { // No value specified?
+ if (!Value.data()) { // No value specified?
if (i+1 >= argc)
return Handler->error("requires a value!");
// Steal the next argument, like for '-o filename'
@@ -312,13 +334,13 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
// If this isn't a multi-arg option, just run the handler.
if (NumAdditionalVals == 0)
- return CommaSeparateAndAddOccurence(Handler, i, ArgName, Value);
+ return CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value);
// If it is, run the handle several times.
bool MultiArg = false;
if (Value.data()) {
- if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg))
+ if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg))
return true;
--NumAdditionalVals;
MultiArg = true;
@@ -329,7 +351,7 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
return Handler->error("not enough values!");
Value = argv[++i];
- if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg))
+ if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg))
return true;
MultiArg = true;
--NumAdditionalVals;
@@ -339,7 +361,7 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
static bool ProvidePositionalOption(Option *Handler, StringRef Arg, int i) {
int Dummy = i;
- return ProvideOption(Handler, Handler->ArgStr, Arg, 0, 0, Dummy);
+ return ProvideOption(Handler, Handler->ArgStr, Arg, 0, nullptr, Dummy);
}
@@ -375,7 +397,7 @@ static Option *getOptionPred(StringRef Name, size_t &Length,
Length = Name.size();
return OMI->second; // Found one!
}
- return 0; // No option found!
+ return nullptr; // No option found!
}
/// HandlePrefixedOrGroupedOption - The specified argument string (which started
@@ -385,12 +407,12 @@ static Option *getOptionPred(StringRef Name, size_t &Length,
static Option *HandlePrefixedOrGroupedOption(StringRef &Arg, StringRef &Value,
bool &ErrorParsing,
const StringMap<Option*> &OptionsMap) {
- if (Arg.size() == 1) return 0;
+ if (Arg.size() == 1) return nullptr;
// Do the lookup!
size_t Length = 0;
Option *PGOpt = getOptionPred(Arg, Length, isPrefixedOrGrouping, OptionsMap);
- if (PGOpt == 0) return 0;
+ if (!PGOpt) return nullptr;
// If the option is a prefixed option, then the value is simply the
// rest of the name... so fall through to later processing, by
@@ -417,7 +439,7 @@ static Option *HandlePrefixedOrGroupedOption(StringRef &Arg, StringRef &Value,
"Option can not be cl::Grouping AND cl::ValueRequired!");
int Dummy = 0;
ErrorParsing |= ProvideOption(PGOpt, OneArgName,
- StringRef(), 0, 0, Dummy);
+ StringRef(), 0, nullptr, Dummy);
// Get the next grouping option.
PGOpt = getOptionPred(Arg, Length, isGrouping, OptionsMap);
@@ -609,9 +631,11 @@ void cl::TokenizeWindowsCommandLine(StringRef Src, StringSaver &Saver,
static bool ExpandResponseFile(const char *FName, StringSaver &Saver,
TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &NewArgv) {
- OwningPtr<MemoryBuffer> MemBuf;
- if (MemoryBuffer::getFile(FName, MemBuf))
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MemBufOrErr =
+ MemoryBuffer::getFile(FName);
+ if (!MemBufOrErr)
return false;
+ std::unique_ptr<MemoryBuffer> MemBuf = std::move(MemBufOrErr.get());
StringRef Str(MemBuf->getBufferStart(), MemBuf->getBufferSize());
// If we have a UTF-16 byte order mark, convert to UTF-8 for parsing.
@@ -634,7 +658,7 @@ static bool ExpandResponseFile(const char *FName, StringSaver &Saver,
bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
SmallVectorImpl<const char *> &Argv) {
unsigned RspFiles = 0;
- bool AllExpanded = false;
+ bool AllExpanded = true;
// Don't cache Argv.size() because it can change.
for (unsigned I = 0; I != Argv.size(); ) {
@@ -655,7 +679,10 @@ bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
// the cwd of the process or the response file?
SmallVector<const char *, 0> ExpandedArgv;
if (!ExpandResponseFile(Arg + 1, Saver, Tokenizer, ExpandedArgv)) {
+ // We couldn't read this file, so we leave it in the argument stream and
+ // move on.
AllExpanded = false;
+ ++I;
continue;
}
Argv.erase(Argv.begin() + I);
@@ -675,7 +702,7 @@ namespace {
free(Dup);
}
}
- const char *SaveString(const char *Str) LLVM_OVERRIDE {
+ const char *SaveString(const char *Str) override {
char *Dup = strdup(Str);
Dups.push_back(Dup);
return Dup;
@@ -733,7 +760,7 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
argc = static_cast<int>(newArgv.size());
// Copy the program name into ProgName, making sure not to overflow it.
- std::string ProgName = sys::path::filename(argv[0]);
+ StringRef ProgName = sys::path::filename(argv[0]);
size_t Len = std::min(ProgName.size(), size_t(79));
memcpy(ProgramName, ProgName.data(), Len);
ProgramName[Len] = '\0';
@@ -747,7 +774,7 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
// Determine whether or not there are an unlimited number of positionals
bool HasUnlimitedPositionals = false;
- Option *ConsumeAfterOpt = 0;
+ Option *ConsumeAfterOpt = nullptr;
if (!PositionalOpts.empty()) {
if (PositionalOpts[0]->getNumOccurrencesFlag() == cl::ConsumeAfter) {
assert(PositionalOpts.size() > 1 &&
@@ -757,7 +784,7 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
// Calculate how many positional values are _required_.
bool UnboundedFound = false;
- for (size_t i = ConsumeAfterOpt != 0, e = PositionalOpts.size();
+ for (size_t i = ConsumeAfterOpt ? 1 : 0, e = PositionalOpts.size();
i != e; ++i) {
Option *Opt = PositionalOpts[i];
if (RequiresValue(Opt))
@@ -793,13 +820,13 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
// If the program has named positional arguments, and the name has been run
// across, keep track of which positional argument was named. Otherwise put
// the positional args into the PositionalVals list...
- Option *ActivePositionalArg = 0;
+ Option *ActivePositionalArg = nullptr;
// Loop over all of the arguments... processing them.
bool DashDashFound = false; // Have we read '--'?
for (int i = 1; i < argc; ++i) {
- Option *Handler = 0;
- Option *NearestHandler = 0;
+ Option *Handler = nullptr;
+ Option *NearestHandler = nullptr;
std::string NearestHandlerString;
StringRef Value;
StringRef ArgName = "";
@@ -832,8 +859,7 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
// All of the positional arguments have been fulfulled, give the rest to
// the consume after option... if it's specified...
//
- if (PositionalVals.size() >= NumPositionalRequired &&
- ConsumeAfterOpt != 0) {
+ if (PositionalVals.size() >= NumPositionalRequired && ConsumeAfterOpt) {
for (++i; i < argc; ++i)
PositionalVals.push_back(std::make_pair(argv[i],i));
break; // Handle outside of the argument processing loop...
@@ -871,18 +897,18 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
Handler = LookupOption(ArgName, Value, Opts);
// Check to see if this "option" is really a prefixed or grouped argument.
- if (Handler == 0)
+ if (!Handler)
Handler = HandlePrefixedOrGroupedOption(ArgName, Value,
ErrorParsing, Opts);
// Otherwise, look for the closest available option to report to the user
// in the upcoming error.
- if (Handler == 0 && SinkOpts.empty())
+ if (!Handler && SinkOpts.empty())
NearestHandler = LookupNearestOption(ArgName, Opts,
NearestHandlerString);
}
- if (Handler == 0) {
+ if (!Handler) {
if (SinkOpts.empty()) {
errs() << ProgramName << ": Unknown command line argument '"
<< argv[i] << "'. Try: '" << argv[0] << " -help'\n";
@@ -926,7 +952,7 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
<< " positional arguments: See: " << argv[0] << " -help\n";
ErrorParsing = true;
- } else if (ConsumeAfterOpt == 0) {
+ } else if (!ConsumeAfterOpt) {
// Positional args have already been handled if ConsumeAfter is specified.
unsigned ValNo = 0, NumVals = static_cast<unsigned>(PositionalVals.size());
for (size_t i = 0, e = PositionalOpts.size(); i != e; ++i) {
@@ -992,13 +1018,12 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
}
// Loop over args and make sure all required args are specified!
- for (StringMap<Option*>::iterator I = Opts.begin(),
- E = Opts.end(); I != E; ++I) {
- switch (I->second->getNumOccurrencesFlag()) {
+ for (const auto &Opt : Opts) {
+ switch (Opt.second->getNumOccurrencesFlag()) {
case Required:
case OneOrMore:
- if (I->second->getNumOccurrences() == 0) {
- I->second->error("must be specified at least once!");
+ if (Opt.second->getNumOccurrences() == 0) {
+ Opt.second->error("must be specified at least once!");
ErrorParsing = true;
}
// Fall through
@@ -1031,7 +1056,7 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv,
//
bool Option::error(const Twine &Message, StringRef ArgName) {
- if (ArgName.data() == 0) ArgName = ArgStr;
+ if (!ArgName.data()) ArgName = ArgStr;
if (ArgName.empty())
errs() << HelpStr; // Be nice for positional arguments
else
@@ -1442,12 +1467,12 @@ public:
outs() << "USAGE: " << ProgramName << " [options]";
// Print out the positional options.
- Option *CAOpt = 0; // The cl::ConsumeAfter option, if it exists...
+ Option *CAOpt = nullptr; // The cl::ConsumeAfter option, if it exists...
if (!PositionalOpts.empty() &&
PositionalOpts[0]->getNumOccurrencesFlag() == ConsumeAfter)
CAOpt = PositionalOpts[0];
- for (size_t i = CAOpt != 0, e = PositionalOpts.size(); i != e; ++i) {
+ for (size_t i = CAOpt != nullptr, e = PositionalOpts.size(); i != e; ++i) {
if (PositionalOpts[i]->ArgStr[0])
outs() << " --" << PositionalOpts[i]->ArgStr;
outs() << " " << PositionalOpts[i]->HelpStr;
@@ -1474,7 +1499,7 @@ public:
MoreHelp->clear();
// Halt the program since help information was printed
- exit(1);
+ exit(0);
}
};
@@ -1486,25 +1511,24 @@ public:
// It shall return true if A's name should be lexographically
// ordered before B's name. It returns false otherwise.
static bool OptionCategoryCompare(OptionCategory *A, OptionCategory *B) {
- int Length = strcmp(A->getName(), B->getName());
- assert(Length != 0 && "Duplicate option categories");
- return Length < 0;
+ return strcmp(A->getName(), B->getName()) < 0;
}
// Make sure we inherit our base class's operator=()
using HelpPrinter::operator= ;
protected:
- virtual void printOptions(StrOptionPairVector &Opts, size_t MaxArgLen) {
+ void printOptions(StrOptionPairVector &Opts, size_t MaxArgLen) override {
std::vector<OptionCategory *> SortedCategories;
std::map<OptionCategory *, std::vector<Option *> > CategorizedOptions;
- // Collect registered option categories into vector in preperation for
+ // Collect registered option categories into vector in preparation for
// sorting.
for (OptionCatSet::const_iterator I = RegisteredOptionCategories->begin(),
E = RegisteredOptionCategories->end();
- I != E; ++I)
+ I != E; ++I) {
SortedCategories.push_back(*I);
+ }
// Sort the different option categories alphabetically.
assert(SortedCategories.size() > 0 && "No option categories registered!");
@@ -1543,7 +1567,7 @@ protected:
outs() << (*Category)->getName() << ":\n";
// Check if description is set.
- if ((*Category)->getDescription() != 0)
+ if ((*Category)->getDescription() != nullptr)
outs() << (*Category)->getDescription() << "\n\n";
else
outs() << "\n";
@@ -1674,9 +1698,9 @@ void cl::PrintOptionValues() {
Opts[i].second->printOptionValue(MaxArgLen, PrintAllOptions);
}
-static void (*OverrideVersionPrinter)() = 0;
+static void (*OverrideVersionPrinter)() = nullptr;
-static std::vector<void (*)()>* ExtraVersionPrinters = 0;
+static std::vector<void (*)()>* ExtraVersionPrinters = nullptr;
namespace {
class VersionPrinter {
@@ -1686,7 +1710,7 @@ public:
OS << "LLVM (http://llvm.org/):\n"
<< " " << PACKAGE_NAME << " version " << PACKAGE_VERSION;
#ifdef LLVM_VERSION_INFO
- OS << LLVM_VERSION_INFO;
+ OS << " " << LLVM_VERSION_INFO;
#endif
OS << "\n ";
#ifndef __OPTIMIZE__
@@ -1709,15 +1733,15 @@ public:
void operator=(bool OptionWasSpecified) {
if (!OptionWasSpecified) return;
- if (OverrideVersionPrinter != 0) {
+ if (OverrideVersionPrinter != nullptr) {
(*OverrideVersionPrinter)();
- exit(1);
+ exit(0);
}
print();
// Iterate over any registered extra printers and call them to add further
// information.
- if (ExtraVersionPrinters != 0) {
+ if (ExtraVersionPrinters != nullptr) {
outs() << '\n';
for (std::vector<void (*)()>::iterator I = ExtraVersionPrinters->begin(),
E = ExtraVersionPrinters->end();
@@ -1725,7 +1749,7 @@ public:
(*I)();
}
- exit(1);
+ exit(0);
}
};
} // End anonymous namespace
@@ -1767,7 +1791,7 @@ void cl::SetVersionPrinter(void (*func)()) {
}
void cl::AddExtraVersionPrinter(void (*func)()) {
- if (ExtraVersionPrinters == 0)
+ if (!ExtraVersionPrinters)
ExtraVersionPrinters = new std::vector<void (*)()>;
ExtraVersionPrinters->push_back(func);
diff --git a/contrib/llvm/lib/Support/Compression.cpp b/contrib/llvm/lib/Support/Compression.cpp
index b5ddb70..c32eb21 100644
--- a/contrib/llvm/lib/Support/Compression.cpp
+++ b/contrib/llvm/lib/Support/Compression.cpp
@@ -12,12 +12,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Compression.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryBuffer.h"
#if LLVM_ENABLE_ZLIB == 1 && HAVE_ZLIB_H
#include <zlib.h>
#endif
@@ -48,36 +46,26 @@ static zlib::Status encodeZlibReturnValue(int ReturnValue) {
bool zlib::isAvailable() { return true; }
zlib::Status zlib::compress(StringRef InputBuffer,
- OwningPtr<MemoryBuffer> &CompressedBuffer,
+ SmallVectorImpl<char> &CompressedBuffer,
CompressionLevel Level) {
unsigned long CompressedSize = ::compressBound(InputBuffer.size());
- OwningArrayPtr<char> TmpBuffer(new char[CompressedSize]);
+ CompressedBuffer.resize(CompressedSize);
int CLevel = encodeZlibCompressionLevel(Level);
Status Res = encodeZlibReturnValue(::compress2(
- (Bytef *)TmpBuffer.get(), &CompressedSize,
+ (Bytef *)CompressedBuffer.data(), &CompressedSize,
(const Bytef *)InputBuffer.data(), InputBuffer.size(), CLevel));
- if (Res == StatusOK) {
- CompressedBuffer.reset(MemoryBuffer::getMemBufferCopy(
- StringRef(TmpBuffer.get(), CompressedSize)));
- // Tell MSan that memory initialized by zlib is valid.
- __msan_unpoison(CompressedBuffer->getBufferStart(), CompressedSize);
- }
+ CompressedBuffer.resize(CompressedSize);
return Res;
}
zlib::Status zlib::uncompress(StringRef InputBuffer,
- OwningPtr<MemoryBuffer> &UncompressedBuffer,
+ SmallVectorImpl<char> &UncompressedBuffer,
size_t UncompressedSize) {
- OwningArrayPtr<char> TmpBuffer(new char[UncompressedSize]);
- Status Res = encodeZlibReturnValue(
- ::uncompress((Bytef *)TmpBuffer.get(), (uLongf *)&UncompressedSize,
- (const Bytef *)InputBuffer.data(), InputBuffer.size()));
- if (Res == StatusOK) {
- UncompressedBuffer.reset(MemoryBuffer::getMemBufferCopy(
- StringRef(TmpBuffer.get(), UncompressedSize)));
- // Tell MSan that memory initialized by zlib is valid.
- __msan_unpoison(UncompressedBuffer->getBufferStart(), UncompressedSize);
- }
+ UncompressedBuffer.resize(UncompressedSize);
+ Status Res = encodeZlibReturnValue(::uncompress(
+ (Bytef *)UncompressedBuffer.data(), (uLongf *)&UncompressedSize,
+ (const Bytef *)InputBuffer.data(), InputBuffer.size()));
+ UncompressedBuffer.resize(UncompressedSize);
return Res;
}
@@ -88,12 +76,12 @@ uint32_t zlib::crc32(StringRef Buffer) {
#else
bool zlib::isAvailable() { return false; }
zlib::Status zlib::compress(StringRef InputBuffer,
- OwningPtr<MemoryBuffer> &CompressedBuffer,
+ SmallVectorImpl<char> &CompressedBuffer,
CompressionLevel Level) {
return zlib::StatusUnsupported;
}
zlib::Status zlib::uncompress(StringRef InputBuffer,
- OwningPtr<MemoryBuffer> &UncompressedBuffer,
+ SmallVectorImpl<char> &UncompressedBuffer,
size_t UncompressedSize) {
return zlib::StatusUnsupported;
}
diff --git a/contrib/llvm/lib/Support/ConvertUTF.c b/contrib/llvm/lib/Support/ConvertUTF.c
index 23f17ca..128459a 100644
--- a/contrib/llvm/lib/Support/ConvertUTF.c
+++ b/contrib/llvm/lib/Support/ConvertUTF.c
@@ -51,6 +51,7 @@
#ifdef CVTUTF_DEBUG
#include <stdio.h>
#endif
+#include <assert.h>
static const int halfShift = 10; /* used for shifting by 10 bits */
@@ -392,6 +393,99 @@ Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
/* --------------------------------------------------------------------- */
+static unsigned
+findMaximalSubpartOfIllFormedUTF8Sequence(const UTF8 *source,
+ const UTF8 *sourceEnd) {
+ UTF8 b1, b2, b3;
+
+ assert(!isLegalUTF8Sequence(source, sourceEnd));
+
+ /*
+ * Unicode 6.3.0, D93b:
+ *
+ * Maximal subpart of an ill-formed subsequence: The longest code unit
+ * subsequence starting at an unconvertible offset that is either:
+ * a. the initial subsequence of a well-formed code unit sequence, or
+ * b. a subsequence of length one.
+ */
+
+ if (source == sourceEnd)
+ return 0;
+
+ /*
+ * Perform case analysis. See Unicode 6.3.0, Table 3-7. Well-Formed UTF-8
+ * Byte Sequences.
+ */
+
+ b1 = *source;
+ ++source;
+ if (b1 >= 0xC2 && b1 <= 0xDF) {
+ /*
+ * First byte is valid, but we know that this code unit sequence is
+ * invalid, so the maximal subpart has to end after the first byte.
+ */
+ return 1;
+ }
+
+ if (source == sourceEnd)
+ return 1;
+
+ b2 = *source;
+ ++source;
+
+ if (b1 == 0xE0) {
+ return (b2 >= 0xA0 && b2 <= 0xBF) ? 2 : 1;
+ }
+ if (b1 >= 0xE1 && b1 <= 0xEC) {
+ return (b2 >= 0x80 && b2 <= 0xBF) ? 2 : 1;
+ }
+ if (b1 == 0xED) {
+ return (b2 >= 0x80 && b2 <= 0x9F) ? 2 : 1;
+ }
+ if (b1 >= 0xEE && b1 <= 0xEF) {
+ return (b2 >= 0x80 && b2 <= 0xBF) ? 2 : 1;
+ }
+ if (b1 == 0xF0) {
+ if (b2 >= 0x90 && b2 <= 0xBF) {
+ if (source == sourceEnd)
+ return 2;
+
+ b3 = *source;
+ return (b3 >= 0x80 && b3 <= 0xBF) ? 3 : 2;
+ }
+ return 1;
+ }
+ if (b1 >= 0xF1 && b1 <= 0xF3) {
+ if (b2 >= 0x80 && b2 <= 0xBF) {
+ if (source == sourceEnd)
+ return 2;
+
+ b3 = *source;
+ return (b3 >= 0x80 && b3 <= 0xBF) ? 3 : 2;
+ }
+ return 1;
+ }
+ if (b1 == 0xF4) {
+ if (b2 >= 0x80 && b2 <= 0x8F) {
+ if (source == sourceEnd)
+ return 2;
+
+ b3 = *source;
+ return (b3 >= 0x80 && b3 <= 0xBF) ? 3 : 2;
+ }
+ return 1;
+ }
+
+ assert((b1 >= 0x80 && b1 <= 0xC1) || b1 >= 0xF5);
+ /*
+ * There are no valid sequences that start with these bytes. Maximal subpart
+ * is defined to have length 1 in these cases.
+ */
+ return 1;
+}
+
+/* --------------------------------------------------------------------- */
+
/*
* Exported function to return the total number of bytes in a codepoint
* represented in UTF-8, given the value of the first byte.
@@ -491,9 +585,10 @@ ConversionResult ConvertUTF8toUTF16 (
/* --------------------------------------------------------------------- */
-ConversionResult ConvertUTF8toUTF32 (
+static ConversionResult ConvertUTF8toUTF32Impl(
const UTF8** sourceStart, const UTF8* sourceEnd,
- UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags,
+ Boolean InputIsPartial) {
ConversionResult result = conversionOK;
const UTF8* source = *sourceStart;
UTF32* target = *targetStart;
@@ -501,12 +596,42 @@ ConversionResult ConvertUTF8toUTF32 (
UTF32 ch = 0;
unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
if (extraBytesToRead >= sourceEnd - source) {
- result = sourceExhausted; break;
+ if (flags == strictConversion || InputIsPartial) {
+ result = sourceExhausted;
+ break;
+ } else {
+ result = sourceIllegal;
+
+ /*
+ * Replace the maximal subpart of ill-formed sequence with
+ * replacement character.
+ */
+ source += findMaximalSubpartOfIllFormedUTF8Sequence(source,
+ sourceEnd);
+ *target++ = UNI_REPLACEMENT_CHAR;
+ continue;
+ }
}
+ if (target >= targetEnd) {
+ result = targetExhausted; break;
+ }
+
/* Do this check whether lenient or strict */
if (!isLegalUTF8(source, extraBytesToRead+1)) {
result = sourceIllegal;
- break;
+ if (flags == strictConversion) {
+ /* Abort conversion. */
+ break;
+ } else {
+ /*
+ * Replace the maximal subpart of ill-formed sequence with
+ * replacement character.
+ */
+ source += findMaximalSubpartOfIllFormedUTF8Sequence(source,
+ sourceEnd);
+ *target++ = UNI_REPLACEMENT_CHAR;
+ continue;
+ }
}
/*
* The cases all fall through. See "Note A" below.
@@ -521,10 +646,6 @@ ConversionResult ConvertUTF8toUTF32 (
}
ch -= offsetsFromUTF8[extraBytesToRead];
- if (target >= targetEnd) {
- source -= (extraBytesToRead+1); /* Back up the source pointer! */
- result = targetExhausted; break;
- }
if (ch <= UNI_MAX_LEGAL_UTF32) {
/*
* UTF-16 surrogate values are illegal in UTF-32, and anything
@@ -551,6 +672,22 @@ ConversionResult ConvertUTF8toUTF32 (
return result;
}
+ConversionResult ConvertUTF8toUTF32Partial(const UTF8 **sourceStart,
+ const UTF8 *sourceEnd,
+ UTF32 **targetStart,
+ UTF32 *targetEnd,
+ ConversionFlags flags) {
+ return ConvertUTF8toUTF32Impl(sourceStart, sourceEnd, targetStart, targetEnd,
+ flags, /*InputIsPartial=*/true);
+}
+
+ConversionResult ConvertUTF8toUTF32(const UTF8 **sourceStart,
+ const UTF8 *sourceEnd, UTF32 **targetStart,
+ UTF32 *targetEnd, ConversionFlags flags) {
+ return ConvertUTF8toUTF32Impl(sourceStart, sourceEnd, targetStart, targetEnd,
+ flags, /*InputIsPartial=*/false);
+}
+
/* ---------------------------------------------------------------------
Note A.
diff --git a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
index 92c370d..9b0e443 100644
--- a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
+++ b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
@@ -22,7 +22,8 @@ namespace {
struct CrashRecoveryContextImpl;
-static ManagedStatic<sys::ThreadLocal<const CrashRecoveryContextImpl> > CurrentContext;
+static ManagedStatic<
+ sys::ThreadLocal<const CrashRecoveryContextImpl> > CurrentContext;
struct CrashRecoveryContextImpl {
CrashRecoveryContext *CRC;
@@ -89,16 +90,16 @@ CrashRecoveryContext::~CrashRecoveryContext() {
}
bool CrashRecoveryContext::isRecoveringFromCrash() {
- return tlIsRecoveringFromCrash->get() != 0;
+ return tlIsRecoveringFromCrash->get() != nullptr;
}
CrashRecoveryContext *CrashRecoveryContext::GetCurrent() {
if (!gCrashRecoveryEnabled)
- return 0;
+ return nullptr;
const CrashRecoveryContextImpl *CRCI = CurrentContext->get();
if (!CRCI)
- return 0;
+ return nullptr;
return CRCI->CRC;
}
@@ -120,7 +121,7 @@ CrashRecoveryContext::unregisterCleanup(CrashRecoveryContextCleanup *cleanup) {
if (cleanup == head) {
head = cleanup->next;
if (head)
- head->prev = 0;
+ head->prev = nullptr;
}
else {
cleanup->prev->next = cleanup->next;
@@ -132,7 +133,7 @@ CrashRecoveryContext::unregisterCleanup(CrashRecoveryContextCleanup *cleanup) {
#ifdef LLVM_ON_WIN32
-#include "Windows/Windows.h"
+#include "Windows/WindowsSupport.h"
// On Windows, we can make use of vectored exception handling to
// catch most crashing situations. Note that this does mean
@@ -231,7 +232,8 @@ void CrashRecoveryContext::Disable() {
#include <signal.h>
-static const int Signals[] = { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP };
+static const int Signals[] =
+ { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP };
static const unsigned NumSignals = sizeof(Signals) / sizeof(Signals[0]);
static struct sigaction PrevActions[NumSignals];
@@ -261,7 +263,7 @@ static void CrashRecoverySignalHandler(int Signal) {
sigset_t SigMask;
sigemptyset(&SigMask);
sigaddset(&SigMask, Signal);
- sigprocmask(SIG_UNBLOCK, &SigMask, 0);
+ sigprocmask(SIG_UNBLOCK, &SigMask, nullptr);
if (CRCI)
const_cast<CrashRecoveryContextImpl*>(CRCI)->HandleCrash();
@@ -296,12 +298,12 @@ void CrashRecoveryContext::Disable() {
// Restore the previous signal handlers.
for (unsigned i = 0; i != NumSignals; ++i)
- sigaction(Signals[i], &PrevActions[i], 0);
+ sigaction(Signals[i], &PrevActions[i], nullptr);
}
#endif
-bool CrashRecoveryContext::RunSafely(void (*Fn)(void*), void *UserData) {
+bool CrashRecoveryContext::RunSafely(function_ref<void()> Fn) {
// If crash recovery is disabled, do nothing.
if (gCrashRecoveryEnabled) {
assert(!Impl && "Crash recovery context already initialized!");
@@ -313,7 +315,7 @@ bool CrashRecoveryContext::RunSafely(void (*Fn)(void*), void *UserData) {
}
}
- Fn(UserData);
+ Fn();
return true;
}
@@ -330,13 +332,26 @@ const std::string &CrashRecoveryContext::getBacktrace() const {
return CRC->Backtrace;
}
-//
+// FIXME: Portability.
+static void setThreadBackgroundPriority() {
+#ifdef __APPLE__
+ setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG);
+#endif
+}
+
+static bool hasThreadBackgroundPriority() {
+#ifdef __APPLE__
+ return getpriority(PRIO_DARWIN_THREAD, 0) == 1;
+#else
+ return false;
+#endif
+}
namespace {
struct RunSafelyOnThreadInfo {
- void (*UserFn)(void*);
- void *UserData;
+ function_ref<void()> Fn;
CrashRecoveryContext *CRC;
+ bool UseBackgroundPriority;
bool Result;
};
}
@@ -344,11 +359,16 @@ struct RunSafelyOnThreadInfo {
static void RunSafelyOnThread_Dispatch(void *UserData) {
RunSafelyOnThreadInfo *Info =
reinterpret_cast<RunSafelyOnThreadInfo*>(UserData);
- Info->Result = Info->CRC->RunSafely(Info->UserFn, Info->UserData);
+
+ if (Info->UseBackgroundPriority)
+ setThreadBackgroundPriority();
+
+ Info->Result = Info->CRC->RunSafely(Info->Fn);
}
-bool CrashRecoveryContext::RunSafelyOnThread(void (*Fn)(void*), void *UserData,
+bool CrashRecoveryContext::RunSafelyOnThread(function_ref<void()> Fn,
unsigned RequestedStackSize) {
- RunSafelyOnThreadInfo Info = { Fn, UserData, this, false };
+ bool UseBackgroundPriority = hasThreadBackgroundPriority();
+ RunSafelyOnThreadInfo Info = { Fn, this, UseBackgroundPriority, false };
llvm_execute_on_thread(RunSafelyOnThread_Dispatch, &Info, RequestedStackSize);
if (CrashRecoveryContextImpl *CRC = (CrashRecoveryContextImpl *)Impl)
CRC->setSwitchedThread();
diff --git a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
index 34e82cf..0d504ee 100644
--- a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
+++ b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
@@ -42,6 +42,8 @@
#include <map>
using namespace llvm;
+#define DEBUG_TYPE "dag-delta"
+
namespace {
class DAGDeltaAlgorithmImpl {
@@ -162,12 +164,12 @@ class DeltaActiveSetHelper : public DeltaAlgorithm {
protected:
/// UpdatedSearchState - Callback used when the search state changes.
- virtual void UpdatedSearchState(const changeset_ty &Changes,
- const changesetlist_ty &Sets) LLVM_OVERRIDE {
+ void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets) override {
DDAI.UpdatedSearchState(Changes, Sets, Required);
}
- virtual bool ExecuteOneTest(const changeset_ty &S) LLVM_OVERRIDE {
+ bool ExecuteOneTest(const changeset_ty &S) override {
return DDAI.GetTestResult(S, Required);
}
diff --git a/contrib/llvm/lib/Support/DataExtractor.cpp b/contrib/llvm/lib/Support/DataExtractor.cpp
index a564d21..5d6d60a 100644
--- a/contrib/llvm/lib/Support/DataExtractor.cpp
+++ b/contrib/llvm/lib/Support/DataExtractor.cpp
@@ -21,7 +21,7 @@ static T getU(uint32_t *offset_ptr, const DataExtractor *de,
if (de->isValidOffsetForDataOfSize(offset, sizeof(val))) {
std::memcpy(&val, &Data[offset], sizeof(val));
if (sys::IsLittleEndianHost != isLittleEndian)
- val = sys::SwapByteOrder(val);
+ sys::swapByteOrder(val);
// Advance the offset
*offset_ptr += sizeof(val);
@@ -44,7 +44,7 @@ static T *getUs(uint32_t *offset_ptr, T *dst, uint32_t count,
// success
return dst;
}
- return NULL;
+ return nullptr;
}
uint8_t DataExtractor::getU8(uint32_t *offset_ptr) const {
@@ -125,7 +125,7 @@ const char *DataExtractor::getCStr(uint32_t *offset_ptr) const {
*offset_ptr = pos + 1;
return Data.data() + offset;
}
- return NULL;
+ return nullptr;
}
uint64_t DataExtractor::getULEB128(uint32_t *offset_ptr) const {
diff --git a/contrib/llvm/lib/Support/DataStream.cpp b/contrib/llvm/lib/Support/DataStream.cpp
index 0bd0c68..32653de 100644
--- a/contrib/llvm/lib/Support/DataStream.cpp
+++ b/contrib/llvm/lib/Support/DataStream.cpp
@@ -14,15 +14,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "Data-stream"
#include "llvm/Support/DataStream.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/system_error.h"
#include <cerrno>
#include <cstdio>
#include <string>
+#include <system_error>
#if !defined(_MSC_VER) && !defined(__MINGW32__)
#include <unistd.h>
#else
@@ -30,6 +29,8 @@
#endif
using namespace llvm;
+#define DEBUG_TYPE "Data-stream"
+
// Interface goals:
// * StreamableMemoryObject doesn't care about complexities like using
// threads/async callbacks to actually overlap download+compile
@@ -58,16 +59,16 @@ public:
virtual ~DataFileStreamer() {
close(Fd);
}
- virtual size_t GetBytes(unsigned char *buf, size_t len) LLVM_OVERRIDE {
+ size_t GetBytes(unsigned char *buf, size_t len) override {
NumStreamFetches++;
return read(Fd, buf, len);
}
- error_code OpenFile(const std::string &Filename) {
+ std::error_code OpenFile(const std::string &Filename) {
if (Filename == "-") {
Fd = 0;
sys::ChangeStdinToBinary();
- return error_code::success();
+ return std::error_code();
}
return sys::fs::openFileForRead(Filename, Fd);
@@ -80,10 +81,10 @@ namespace llvm {
DataStreamer *getDataFileStreamer(const std::string &Filename,
std::string *StrError) {
DataFileStreamer *s = new DataFileStreamer();
- if (error_code e = s->OpenFile(Filename)) {
+ if (std::error_code e = s->OpenFile(Filename)) {
*StrError = std::string("Could not open ") + Filename + ": " +
e.message() + "\n";
- return NULL;
+ return nullptr;
}
return s;
}
diff --git a/contrib/llvm/lib/Support/Debug.cpp b/contrib/llvm/lib/Support/Debug.cpp
index d9cb8a9..ad4d4ef 100644
--- a/contrib/llvm/lib/Support/Debug.cpp
+++ b/contrib/llvm/lib/Support/Debug.cpp
@@ -109,7 +109,7 @@ raw_ostream &llvm::dbgs() {
if (EnableDebugBuffering && DebugFlag && DebugBufferSize != 0)
// TODO: Add a handler for SIGUSER1-type signals so the user can
// force a debug dump.
- sys::AddSignalHandler(&debug_user_sig_handler, 0);
+ sys::AddSignalHandler(&debug_user_sig_handler, nullptr);
// Otherwise we've already set the debug stream buffer size to
// zero, disabling buffering so it will output directly to errs().
}
diff --git a/contrib/llvm/lib/Support/Disassembler.cpp b/contrib/llvm/lib/Support/Disassembler.cpp
deleted file mode 100644
index 27df3a9..0000000
--- a/contrib/llvm/lib/Support/Disassembler.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-//===- lib/Support/Disassembler.cpp -----------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the necessary glue to call external disassembler
-// libraries.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/Disassembler.h"
-#include "llvm/Config/config.h"
-#include <cassert>
-#include <iomanip>
-#include <sstream>
-#include <string>
-
-#if USE_UDIS86
-#include <udis86.h>
-#endif
-
-using namespace llvm;
-
-bool llvm::sys::hasDisassembler()
-{
-#if defined (__i386__) || defined (__amd64__) || defined (__x86_64__)
- // We have option to enable udis86 library.
-# if USE_UDIS86
- return true;
-#else
- return false;
-#endif
-#else
- return false;
-#endif
-}
-
-std::string llvm::sys::disassembleBuffer(uint8_t* start, size_t length,
- uint64_t pc) {
-#if (defined (__i386__) || defined (__amd64__) || defined (__x86_64__)) \
- && USE_UDIS86
- std::stringstream res;
-
- unsigned bits;
-# if defined(__i386__)
- bits = 32;
-# else
- bits = 64;
-# endif
-
- ud_t ud_obj;
-
- ud_init(&ud_obj);
- ud_set_input_buffer(&ud_obj, start, length);
- ud_set_mode(&ud_obj, bits);
- ud_set_pc(&ud_obj, pc);
- ud_set_syntax(&ud_obj, UD_SYN_ATT);
-
- res << std::setbase(16)
- << std::setw(bits/4);
-
- while (ud_disassemble(&ud_obj)) {
- res << ud_insn_off(&ud_obj) << ":\t" << ud_insn_asm(&ud_obj) << "\n";
- }
-
- return res.str();
-#else
- return "No disassembler available. See configure help for options.\n";
-#endif
-}
diff --git a/contrib/llvm/lib/Support/Dwarf.cpp b/contrib/llvm/lib/Support/Dwarf.cpp
index c000b63..c9efa61 100644
--- a/contrib/llvm/lib/Support/Dwarf.cpp
+++ b/contrib/llvm/lib/Support/Dwarf.cpp
@@ -84,6 +84,9 @@ const char *llvm::dwarf::TagString(unsigned Tag) {
case DW_TAG_arg_variable: return "DW_TAG_arg_variable";
case DW_TAG_rvalue_reference_type: return "DW_TAG_rvalue_reference_type";
case DW_TAG_template_alias: return "DW_TAG_template_alias";
+ case DW_TAG_coarray_type: return "DW_TAG_coarray_type";
+ case DW_TAG_generic_subrange: return "DW_TAG_generic_subrange";
+ case DW_TAG_dynamic_type: return "DW_TAG_dynamic_type";
case DW_TAG_MIPS_loop: return "DW_TAG_MIPS_loop";
case DW_TAG_type_unit: return "DW_TAG_type_unit";
case DW_TAG_format_label: return "DW_TAG_format_label";
@@ -97,7 +100,7 @@ const char *llvm::dwarf::TagString(unsigned Tag) {
return "DW_TAG_GNU_formal_parameter_pack";
case DW_TAG_APPLE_property: return "DW_TAG_APPLE_property";
}
- return 0;
+ return nullptr;
}
/// ChildrenString - Return the string for the specified children flag.
@@ -107,7 +110,7 @@ const char *llvm::dwarf::ChildrenString(unsigned Children) {
case DW_CHILDREN_no: return "DW_CHILDREN_no";
case DW_CHILDREN_yes: return "DW_CHILDREN_yes";
}
- return 0;
+ return nullptr;
}
/// AttributeString - Return the string for the specified attribute.
@@ -206,6 +209,16 @@ const char *llvm::dwarf::AttributeString(unsigned Attribute) {
case DW_AT_const_expr: return "DW_AT_const_expr";
case DW_AT_enum_class: return "DW_AT_enum_class";
case DW_AT_linkage_name: return "DW_AT_linkage_name";
+ case DW_AT_string_length_bit_size: return "DW_AT_string_length_bit_size";
+ case DW_AT_string_length_byte_size: return "DW_AT_string_length_byte_size";
+ case DW_AT_rank: return "DW_AT_rank";
+ case DW_AT_str_offsets_base: return "DW_AT_str_offsets_base";
+ case DW_AT_addr_base: return "DW_AT_addr_base";
+ case DW_AT_ranges_base: return "DW_AT_ranges_base";
+ case DW_AT_dwo_id: return "DW_AT_dwo_id";
+ case DW_AT_dwo_name: return "DW_AT_dwo_name";
+ case DW_AT_reference: return "DW_AT_reference";
+ case DW_AT_rvalue_reference: return "DW_AT_rvalue_reference";
case DW_AT_MIPS_loop_begin: return "DW_AT_MIPS_loop_begin";
case DW_AT_MIPS_tail_loop_begin: return "DW_AT_MIPS_tail_loop_begin";
case DW_AT_MIPS_epilog_begin: return "DW_AT_MIPS_epilog_begin";
@@ -258,7 +271,7 @@ const char *llvm::dwarf::AttributeString(unsigned Attribute) {
case DW_AT_GNU_pubnames: return "DW_AT_GNU_pubnames";
case DW_AT_GNU_pubtypes: return "DW_AT_GNU_pubtypes";
}
- return 0;
+ return nullptr;
}
/// FormEncodingString - Return the string for the specified form encoding.
@@ -295,7 +308,7 @@ const char *llvm::dwarf::FormEncodingString(unsigned Encoding) {
case DW_FORM_GNU_addr_index: return "DW_FORM_GNU_addr_index";
case DW_FORM_GNU_str_index: return "DW_FORM_GNU_str_index";
}
- return 0;
+ return nullptr;
}
/// OperationEncodingString - Return the string for the specified operation
@@ -464,7 +477,7 @@ const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) {
case DW_OP_GNU_addr_index: return "DW_OP_GNU_addr_index";
case DW_OP_GNU_const_index: return "DW_OP_GNU_const_index";
}
- return 0;
+ return nullptr;
}
/// AttributeEncodingString - Return the string for the specified attribute
@@ -490,7 +503,7 @@ const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
case DW_ATE_lo_user: return "DW_ATE_lo_user";
case DW_ATE_hi_user: return "DW_ATE_hi_user";
}
- return 0;
+ return nullptr;
}
/// DecimalSignString - Return the string for the specified decimal sign
@@ -503,7 +516,7 @@ const char *llvm::dwarf::DecimalSignString(unsigned Sign) {
case DW_DS_leading_separate: return "DW_DS_leading_separate";
case DW_DS_trailing_separate: return "DW_DS_trailing_separate";
}
- return 0;
+ return nullptr;
}
/// EndianityString - Return the string for the specified endianity.
@@ -516,7 +529,7 @@ const char *llvm::dwarf::EndianityString(unsigned Endian) {
case DW_END_lo_user: return "DW_END_lo_user";
case DW_END_hi_user: return "DW_END_hi_user";
}
- return 0;
+ return nullptr;
}
/// AccessibilityString - Return the string for the specified accessibility.
@@ -528,7 +541,7 @@ const char *llvm::dwarf::AccessibilityString(unsigned Access) {
case DW_ACCESS_protected: return "DW_ACCESS_protected";
case DW_ACCESS_private: return "DW_ACCESS_private";
}
- return 0;
+ return nullptr;
}
/// VisibilityString - Return the string for the specified visibility.
@@ -539,7 +552,7 @@ const char *llvm::dwarf::VisibilityString(unsigned Visibility) {
case DW_VIS_exported: return "DW_VIS_exported";
case DW_VIS_qualified: return "DW_VIS_qualified";
}
- return 0;
+ return nullptr;
}
/// VirtualityString - Return the string for the specified virtuality.
@@ -550,7 +563,7 @@ const char *llvm::dwarf::VirtualityString(unsigned Virtuality) {
case DW_VIRTUALITY_virtual: return "DW_VIRTUALITY_virtual";
case DW_VIRTUALITY_pure_virtual: return "DW_VIRTUALITY_pure_virtual";
}
- return 0;
+ return nullptr;
}
/// LanguageString - Return the string for the specified language.
@@ -576,10 +589,18 @@ const char *llvm::dwarf::LanguageString(unsigned Language) {
case DW_LANG_ObjC_plus_plus: return "DW_LANG_ObjC_plus_plus";
case DW_LANG_UPC: return "DW_LANG_UPC";
case DW_LANG_D: return "DW_LANG_D";
+ case DW_LANG_Python: return "DW_LANG_Python";
+ case DW_LANG_OpenCL: return "DW_LANG_OpenCL";
+ case DW_LANG_Go: return "DW_LANG_Go";
+ case DW_LANG_Modula3: return "DW_LANG_Modula3";
+ case DW_LANG_Haskell: return "DW_LANG_Haskell";
+ case DW_LANG_C_plus_plus_03: return "DW_LANG_C_plus_plus_03";
+ case DW_LANG_C_plus_plus_11: return "DW_LANG_C_plus_plus_11";
+ case DW_LANG_OCaml: return "DW_LANG_OCaml";
case DW_LANG_lo_user: return "DW_LANG_lo_user";
case DW_LANG_hi_user: return "DW_LANG_hi_user";
}
- return 0;
+ return nullptr;
}
/// CaseString - Return the string for the specified identifier case.
@@ -591,7 +612,7 @@ const char *llvm::dwarf::CaseString(unsigned Case) {
case DW_ID_down_case: return "DW_ID_down_case";
case DW_ID_case_insensitive: return "DW_ID_case_insensitive";
}
- return 0;
+ return nullptr;
}
/// ConventionString - Return the string for the specified calling convention.
@@ -604,7 +625,7 @@ const char *llvm::dwarf::ConventionString(unsigned Convention) {
case DW_CC_lo_user: return "DW_CC_lo_user";
case DW_CC_hi_user: return "DW_CC_hi_user";
}
- return 0;
+ return nullptr;
}
/// InlineCodeString - Return the string for the specified inline code.
@@ -616,7 +637,7 @@ const char *llvm::dwarf::InlineCodeString(unsigned Code) {
case DW_INL_declared_not_inlined: return "DW_INL_declared_not_inlined";
case DW_INL_declared_inlined: return "DW_INL_declared_inlined";
}
- return 0;
+ return nullptr;
}
/// ArrayOrderString - Return the string for the specified array order.
@@ -626,7 +647,7 @@ const char *llvm::dwarf::ArrayOrderString(unsigned Order) {
case DW_ORD_row_major: return "DW_ORD_row_major";
case DW_ORD_col_major: return "DW_ORD_col_major";
}
- return 0;
+ return nullptr;
}
/// DiscriminantString - Return the string for the specified discriminant
@@ -636,7 +657,7 @@ const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) {
case DW_DSC_label: return "DW_DSC_label";
case DW_DSC_range: return "DW_DSC_range";
}
- return 0;
+ return nullptr;
}
/// LNStandardString - Return the string for the specified line number standard.
@@ -656,7 +677,7 @@ const char *llvm::dwarf::LNStandardString(unsigned Standard) {
case DW_LNS_set_epilogue_begin: return "DW_LNS_set_epilogue_begin";
case DW_LNS_set_isa: return "DW_LNS_set_isa";
}
- return 0;
+ return nullptr;
}
/// LNExtendedString - Return the string for the specified line number extended
@@ -671,7 +692,7 @@ const char *llvm::dwarf::LNExtendedString(unsigned Encoding) {
case DW_LNE_lo_user: return "DW_LNE_lo_user";
case DW_LNE_hi_user: return "DW_LNE_hi_user";
}
- return 0;
+ return nullptr;
}
/// MacinfoString - Return the string for the specified macinfo type encodings.
@@ -685,7 +706,7 @@ const char *llvm::dwarf::MacinfoString(unsigned Encoding) {
case DW_MACINFO_end_file: return "DW_MACINFO_end_file";
case DW_MACINFO_vendor_ext: return "DW_MACINFO_vendor_ext";
}
- return 0;
+ return nullptr;
}
/// CallFrameString - Return the string for the specified call frame instruction
@@ -724,7 +745,7 @@ const char *llvm::dwarf::CallFrameString(unsigned Encoding) {
case DW_CFA_lo_user: return "DW_CFA_lo_user";
case DW_CFA_hi_user: return "DW_CFA_hi_user";
}
- return 0;
+ return nullptr;
}
const char *llvm::dwarf::AtomTypeString(unsigned AT) {
@@ -740,7 +761,7 @@ const char *llvm::dwarf::AtomTypeString(unsigned AT) {
case DW_ATOM_type_flags:
return "DW_ATOM_type_flags";
}
- return 0;
+ return nullptr;
}
const char *llvm::dwarf::GDBIndexEntryKindString(GDBIndexEntryKind Kind) {
diff --git a/contrib/llvm/lib/Support/DynamicLibrary.cpp b/contrib/llvm/lib/Support/DynamicLibrary.cpp
index a825c68..d2b551e 100644
--- a/contrib/llvm/lib/Support/DynamicLibrary.cpp
+++ b/contrib/llvm/lib/Support/DynamicLibrary.cpp
@@ -7,19 +7,19 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file implements the operating system DynamicLibrary concept.
+// This file implements the operating system DynamicLibrary concept.
//
// FIXME: This file leaks ExplicitSymbols and OpenedHandles!
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Support/ManagedStatic.h"
+#include "llvm-c/Support.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Config/config.h"
+#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
-#include "llvm-c/Support.h"
#include <cstdio>
#include <cstring>
@@ -51,14 +51,14 @@ using namespace llvm::sys;
//=== independent code.
//===----------------------------------------------------------------------===//
-static DenseSet<void *> *OpenedHandles = 0;
+static DenseSet<void *> *OpenedHandles = nullptr;
DynamicLibrary DynamicLibrary::getPermanentLibrary(const char *filename,
std::string *errMsg) {
SmartScopedLock<true> lock(*SymbolsMutex);
void *handle = dlopen(filename, RTLD_LAZY|RTLD_GLOBAL);
- if (handle == 0) {
+ if (!handle) {
if (errMsg) *errMsg = dlerror();
return DynamicLibrary();
}
@@ -66,11 +66,11 @@ DynamicLibrary DynamicLibrary::getPermanentLibrary(const char *filename,
#ifdef __CYGWIN__
// Cygwin searches symbols only in the main
// with the handle of dlopen(NULL, RTLD_GLOBAL).
- if (filename == NULL)
+ if (!filename)
handle = RTLD_DEFAULT;
#endif
- if (OpenedHandles == 0)
+ if (!OpenedHandles)
OpenedHandles = new DenseSet<void *>();
// If we've already loaded this library, dlclose() the handle in order to
@@ -83,7 +83,7 @@ DynamicLibrary DynamicLibrary::getPermanentLibrary(const char *filename,
void *DynamicLibrary::getAddressOfSymbol(const char *symbolName) {
if (!isValid())
- return NULL;
+ return nullptr;
return dlsym(Data, symbolName);
}
@@ -166,7 +166,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char *symbolName) {
#endif
#undef EXPLICIT_SYMBOL
- return 0;
+ return nullptr;
}
#endif // LLVM_ON_WIN32
diff --git a/contrib/llvm/lib/Support/ErrorHandling.cpp b/contrib/llvm/lib/Support/ErrorHandling.cpp
index 1eafb96..c36007f 100644
--- a/contrib/llvm/lib/Support/ErrorHandling.cpp
+++ b/contrib/llvm/lib/Support/ErrorHandling.cpp
@@ -13,14 +13,18 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/ErrorHandling.h"
+#include "llvm-c/Core.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/Signals.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Threading.h"
+#include "llvm/Support/WindowsError.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm-c/Core.h"
#include <cassert>
#include <cstdlib>
@@ -34,20 +38,23 @@
using namespace llvm;
-static fatal_error_handler_t ErrorHandler = 0;
-static void *ErrorHandlerUserData = 0;
+static fatal_error_handler_t ErrorHandler = nullptr;
+static void *ErrorHandlerUserData = nullptr;
+
+static sys::Mutex ErrorHandlerMutex;
void llvm::install_fatal_error_handler(fatal_error_handler_t handler,
void *user_data) {
- assert(!llvm_is_multithreaded() &&
- "Cannot register error handlers after starting multithreaded mode!\n");
+ llvm::MutexGuard Lock(ErrorHandlerMutex);
assert(!ErrorHandler && "Error handler already registered!\n");
ErrorHandler = handler;
ErrorHandlerUserData = user_data;
}
void llvm::remove_fatal_error_handler() {
- ErrorHandler = 0;
+ llvm::MutexGuard Lock(ErrorHandlerMutex);
+ ErrorHandler = nullptr;
+ ErrorHandlerUserData = nullptr;
}
void llvm::report_fatal_error(const char *Reason, bool GenCrashDiag) {
@@ -63,8 +70,18 @@ void llvm::report_fatal_error(StringRef Reason, bool GenCrashDiag) {
}
void llvm::report_fatal_error(const Twine &Reason, bool GenCrashDiag) {
- if (ErrorHandler) {
- ErrorHandler(ErrorHandlerUserData, Reason.str(), GenCrashDiag);
+ llvm::fatal_error_handler_t handler = nullptr;
+ void* handlerData = nullptr;
+ {
+ // Only acquire the mutex while reading the handler, so as not to invoke a
+ // user-supplied callback under a lock.
+ llvm::MutexGuard Lock(ErrorHandlerMutex);
+ handler = ErrorHandler;
+ handlerData = ErrorHandlerUserData;
+ }
+
+ if (handler) {
+ handler(handlerData, Reason.str(), GenCrashDiag);
} else {
// Blast the result out to stderr. We don't try hard to make sure this
// succeeds (e.g. handling EINTR) and we can't use errs() here because
@@ -119,3 +136,70 @@ void LLVMInstallFatalErrorHandler(LLVMFatalErrorHandler Handler) {
void LLVMResetFatalErrorHandler() {
remove_fatal_error_handler();
}
+
+#ifdef LLVM_ON_WIN32
+
+#include <winerror.h>
+
+// I'd rather not double the line count of the following.
+#define MAP_ERR_TO_COND(x, y) \
+ case x: \
+ return make_error_code(errc::y)
+
+std::error_code llvm::mapWindowsError(unsigned EV) {
+ switch (EV) {
+ MAP_ERR_TO_COND(ERROR_ACCESS_DENIED, permission_denied);
+ MAP_ERR_TO_COND(ERROR_ALREADY_EXISTS, file_exists);
+ MAP_ERR_TO_COND(ERROR_BAD_UNIT, no_such_device);
+ MAP_ERR_TO_COND(ERROR_BUFFER_OVERFLOW, filename_too_long);
+ MAP_ERR_TO_COND(ERROR_BUSY, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_BUSY_DRIVE, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_CANNOT_MAKE, permission_denied);
+ MAP_ERR_TO_COND(ERROR_CANTOPEN, io_error);
+ MAP_ERR_TO_COND(ERROR_CANTREAD, io_error);
+ MAP_ERR_TO_COND(ERROR_CANTWRITE, io_error);
+ MAP_ERR_TO_COND(ERROR_CURRENT_DIRECTORY, permission_denied);
+ MAP_ERR_TO_COND(ERROR_DEV_NOT_EXIST, no_such_device);
+ MAP_ERR_TO_COND(ERROR_DEVICE_IN_USE, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_DIR_NOT_EMPTY, directory_not_empty);
+ MAP_ERR_TO_COND(ERROR_DIRECTORY, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_DISK_FULL, no_space_on_device);
+ MAP_ERR_TO_COND(ERROR_FILE_EXISTS, file_exists);
+ MAP_ERR_TO_COND(ERROR_FILE_NOT_FOUND, no_such_file_or_directory);
+ MAP_ERR_TO_COND(ERROR_HANDLE_DISK_FULL, no_space_on_device);
+ MAP_ERR_TO_COND(ERROR_INVALID_ACCESS, permission_denied);
+ MAP_ERR_TO_COND(ERROR_INVALID_DRIVE, no_such_device);
+ MAP_ERR_TO_COND(ERROR_INVALID_FUNCTION, function_not_supported);
+ MAP_ERR_TO_COND(ERROR_INVALID_HANDLE, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_INVALID_NAME, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_LOCK_VIOLATION, no_lock_available);
+ MAP_ERR_TO_COND(ERROR_LOCKED, no_lock_available);
+ MAP_ERR_TO_COND(ERROR_NEGATIVE_SEEK, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_NOACCESS, permission_denied);
+ MAP_ERR_TO_COND(ERROR_NOT_ENOUGH_MEMORY, not_enough_memory);
+ MAP_ERR_TO_COND(ERROR_NOT_READY, resource_unavailable_try_again);
+ MAP_ERR_TO_COND(ERROR_OPEN_FAILED, io_error);
+ MAP_ERR_TO_COND(ERROR_OPEN_FILES, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_OUTOFMEMORY, not_enough_memory);
+ MAP_ERR_TO_COND(ERROR_PATH_NOT_FOUND, no_such_file_or_directory);
+ MAP_ERR_TO_COND(ERROR_BAD_NETPATH, no_such_file_or_directory);
+ MAP_ERR_TO_COND(ERROR_READ_FAULT, io_error);
+ MAP_ERR_TO_COND(ERROR_RETRY, resource_unavailable_try_again);
+ MAP_ERR_TO_COND(ERROR_SEEK, io_error);
+ MAP_ERR_TO_COND(ERROR_SHARING_VIOLATION, permission_denied);
+ MAP_ERR_TO_COND(ERROR_TOO_MANY_OPEN_FILES, too_many_files_open);
+ MAP_ERR_TO_COND(ERROR_WRITE_FAULT, io_error);
+ MAP_ERR_TO_COND(ERROR_WRITE_PROTECT, permission_denied);
+ MAP_ERR_TO_COND(WSAEACCES, permission_denied);
+ MAP_ERR_TO_COND(WSAEBADF, bad_file_descriptor);
+ MAP_ERR_TO_COND(WSAEFAULT, bad_address);
+ MAP_ERR_TO_COND(WSAEINTR, interrupted);
+ MAP_ERR_TO_COND(WSAEINVAL, invalid_argument);
+ MAP_ERR_TO_COND(WSAEMFILE, too_many_files_open);
+ MAP_ERR_TO_COND(WSAENAMETOOLONG, filename_too_long);
+ default:
+ return std::error_code(EV, std::system_category());
+ }
+}
+
+#endif
diff --git a/contrib/llvm/lib/Support/FileOutputBuffer.cpp b/contrib/llvm/lib/Support/FileOutputBuffer.cpp
index ed084fa..94bcdc5 100644
--- a/contrib/llvm/lib/Support/FileOutputBuffer.cpp
+++ b/contrib/llvm/lib/Support/FileOutputBuffer.cpp
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Support/Errc.h"
#include "llvm/Support/FileOutputBuffer.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
+#include <system_error>
using llvm::sys::fs::mapped_file_region;
@@ -28,17 +28,16 @@ FileOutputBuffer::FileOutputBuffer(mapped_file_region * R,
}
FileOutputBuffer::~FileOutputBuffer() {
- bool Existed;
- sys::fs::remove(Twine(TempPath), Existed);
+ sys::fs::remove(Twine(TempPath));
}
-error_code FileOutputBuffer::create(StringRef FilePath,
- size_t Size,
- OwningPtr<FileOutputBuffer> &Result,
- unsigned Flags) {
+std::error_code
+FileOutputBuffer::create(StringRef FilePath, size_t Size,
+ std::unique_ptr<FileOutputBuffer> &Result,
+ unsigned Flags) {
// If file already exists, it must be a regular file (to be mappable).
sys::fs::file_status Stat;
- error_code EC = sys::fs::status(FilePath, Stat);
+ std::error_code EC = sys::fs::status(FilePath, Stat);
switch (Stat.type()) {
case sys::fs::file_type::file_not_found:
// If file does not exist, we'll create one.
@@ -57,8 +56,7 @@ error_code FileOutputBuffer::create(StringRef FilePath,
}
// Delete target file.
- bool Existed;
- EC = sys::fs::remove(FilePath, Existed);
+ EC = sys::fs::remove(FilePath);
if (EC)
return EC;
@@ -75,25 +73,25 @@ error_code FileOutputBuffer::create(StringRef FilePath,
if (EC)
return EC;
- OwningPtr<mapped_file_region> MappedFile(new mapped_file_region(
+ std::unique_ptr<mapped_file_region> MappedFile(new mapped_file_region(
FD, true, mapped_file_region::readwrite, Size, 0, EC));
if (EC)
return EC;
Result.reset(new FileOutputBuffer(MappedFile.get(), FilePath, TempFilePath));
if (Result)
- MappedFile.take();
+ MappedFile.release();
- return error_code::success();
+ return std::error_code();
}
-error_code FileOutputBuffer::commit(int64_t NewSmallerSize) {
+std::error_code FileOutputBuffer::commit(int64_t NewSmallerSize) {
// Unmap buffer, letting OS flush dirty pages to file on disk.
- Region.reset(0);
+ Region.reset();
// If requested, resize file as part of commit.
if ( NewSmallerSize != -1 ) {
- error_code EC = sys::fs::resize_file(Twine(TempPath), NewSmallerSize);
+ std::error_code EC = sys::fs::resize_file(Twine(TempPath), NewSmallerSize);
if (EC)
return EC;
}
diff --git a/contrib/llvm/lib/Support/FileUtilities.cpp b/contrib/llvm/lib/Support/FileUtilities.cpp
index 7f5d540..8a23491 100644
--- a/contrib/llvm/lib/Support/FileUtilities.cpp
+++ b/contrib/llvm/lib/Support/FileUtilities.cpp
@@ -13,15 +13,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/FileUtilities.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include <cctype>
#include <cstdlib>
#include <cstring>
+#include <system_error>
using namespace llvm;
static bool isSignedChar(char C) {
@@ -177,18 +176,21 @@ int llvm::DiffFilesWithTolerance(StringRef NameA,
std::string *Error) {
// Now its safe to mmap the files into memory because both files
// have a non-zero size.
- OwningPtr<MemoryBuffer> F1;
- if (error_code ec = MemoryBuffer::getFile(NameA, F1)) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> F1OrErr = MemoryBuffer::getFile(NameA);
+ if (std::error_code EC = F1OrErr.getError()) {
if (Error)
- *Error = ec.message();
+ *Error = EC.message();
return 2;
}
- OwningPtr<MemoryBuffer> F2;
- if (error_code ec = MemoryBuffer::getFile(NameB, F2)) {
+ std::unique_ptr<MemoryBuffer> F1 = std::move(F1OrErr.get());
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> F2OrErr = MemoryBuffer::getFile(NameB);
+ if (std::error_code EC = F2OrErr.getError()) {
if (Error)
- *Error = ec.message();
+ *Error = EC.message();
return 2;
}
+ std::unique_ptr<MemoryBuffer> F2 = std::move(F2OrErr.get());
// Okay, now that we opened the files, scan them for the first difference.
const char *File1Start = F1->getBufferStart();
diff --git a/contrib/llvm/lib/Support/FoldingSet.cpp b/contrib/llvm/lib/Support/FoldingSet.cpp
index 145f12d..4635114 100644
--- a/contrib/llvm/lib/Support/FoldingSet.cpp
+++ b/contrib/llvm/lib/Support/FoldingSet.cpp
@@ -190,7 +190,7 @@ FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const {
static FoldingSetImpl::Node *GetNextPtr(void *NextInBucketPtr) {
// The low bit is set if this is the pointer back to the bucket.
if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1)
- return 0;
+ return nullptr;
return static_cast<FoldingSetImpl::Node*>(NextInBucketPtr);
}
@@ -262,7 +262,7 @@ void FoldingSetImpl::GrowHashTable() {
while (Node *NodeInBucket = GetNextPtr(Probe)) {
// Figure out the next link, remove NodeInBucket from the old link.
Probe = NodeInBucket->getNextInBucket();
- NodeInBucket->SetNextInBucket(0);
+ NodeInBucket->SetNextInBucket(nullptr);
// Insert the node into the new bucket, after recomputing the hash.
InsertNode(NodeInBucket,
@@ -285,7 +285,7 @@ FoldingSetImpl::Node
void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets);
void *Probe = *Bucket;
- InsertPos = 0;
+ InsertPos = nullptr;
FoldingSetNodeID TempID;
while (Node *NodeInBucket = GetNextPtr(Probe)) {
@@ -298,14 +298,14 @@ FoldingSetImpl::Node
// Didn't find the node, return null with the bucket as the InsertPos.
InsertPos = Bucket;
- return 0;
+ return nullptr;
}
/// InsertNode - Insert the specified node into the folding set, knowing that it
/// is not already in the map. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void FoldingSetImpl::InsertNode(Node *N, void *InsertPos) {
- assert(N->getNextInBucket() == 0);
+ assert(!N->getNextInBucket());
// Do we need to grow the hashtable?
if (NumNodes+1 > NumBuckets*2) {
GrowHashTable();
@@ -323,7 +323,7 @@ void FoldingSetImpl::InsertNode(Node *N, void *InsertPos) {
// If this is the first insertion into this bucket, its next pointer will be
// null. Pretend as if it pointed to itself, setting the low bit to indicate
// that it is a pointer to the bucket.
- if (Next == 0)
+ if (!Next)
Next = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(Bucket)|1);
// Set the node's next pointer, and make the bucket point to the node.
@@ -337,10 +337,10 @@ bool FoldingSetImpl::RemoveNode(Node *N) {
// Because each bucket is a circular list, we don't need to compute N's hash
// to remove it.
void *Ptr = N->getNextInBucket();
- if (Ptr == 0) return false; // Not in folding set.
+ if (!Ptr) return false; // Not in folding set.
--NumNodes;
- N->SetNextInBucket(0);
+ N->SetNextInBucket(nullptr);
// Remember what N originally pointed to, either a bucket or another node.
void *NodeNextPtr = Ptr;
@@ -390,7 +390,7 @@ FoldingSetImpl::Node *FoldingSetImpl::GetOrInsertNode(FoldingSetImpl::Node *N) {
FoldingSetIteratorImpl::FoldingSetIteratorImpl(void **Bucket) {
// Skip to the first non-null non-self-cycle bucket.
while (*Bucket != reinterpret_cast<void*>(-1) &&
- (*Bucket == 0 || GetNextPtr(*Bucket) == 0))
+ (!*Bucket || !GetNextPtr(*Bucket)))
++Bucket;
NodePtr = static_cast<FoldingSetNode*>(*Bucket);
@@ -410,7 +410,7 @@ void FoldingSetIteratorImpl::advance() {
do {
++Bucket;
} while (*Bucket != reinterpret_cast<void*>(-1) &&
- (*Bucket == 0 || GetNextPtr(*Bucket) == 0));
+ (!*Bucket || !GetNextPtr(*Bucket)));
NodePtr = static_cast<FoldingSetNode*>(*Bucket);
}
@@ -420,5 +420,5 @@ void FoldingSetIteratorImpl::advance() {
// FoldingSetBucketIteratorImpl Implementation
FoldingSetBucketIteratorImpl::FoldingSetBucketIteratorImpl(void **Bucket) {
- Ptr = (*Bucket == 0 || GetNextPtr(*Bucket) == 0) ? (void*) Bucket : *Bucket;
+ Ptr = (!*Bucket || !GetNextPtr(*Bucket)) ? (void*) Bucket : *Bucket;
}
diff --git a/contrib/llvm/lib/Support/FormattedStream.cpp b/contrib/llvm/lib/Support/FormattedStream.cpp
index 9febf66..618ec26 100644
--- a/contrib/llvm/lib/Support/FormattedStream.cpp
+++ b/contrib/llvm/lib/Support/FormattedStream.cpp
@@ -81,7 +81,7 @@ void formatted_raw_ostream::write_impl(const char *Ptr, size_t Size) {
TheStream->write(Ptr, Size);
// Reset the scanning pointer.
- Scanned = 0;
+ Scanned = nullptr;
}
/// fouts() - This returns a reference to a formatted_raw_ostream for
diff --git a/contrib/llvm/lib/Support/GraphWriter.cpp b/contrib/llvm/lib/Support/GraphWriter.cpp
index 85be415..e68ee43 100644
--- a/contrib/llvm/lib/Support/GraphWriter.cpp
+++ b/contrib/llvm/lib/Support/GraphWriter.cpp
@@ -68,7 +68,7 @@ StringRef llvm::DOT::getColorString(unsigned ColorNumber) {
std::string llvm::createGraphFilename(const Twine &Name, int &FD) {
FD = -1;
SmallString<128> Filename;
- error_code EC = sys::fs::createTemporaryFile(Name, "dot", FD, Filename);
+ std::error_code EC = sys::fs::createTemporaryFile(Name, "dot", FD, Filename);
if (EC) {
errs() << "Error: " << EC.message() << "\n";
return "";
@@ -78,149 +78,165 @@ std::string llvm::createGraphFilename(const Twine &Name, int &FD) {
return Filename.str();
}
-// Execute the graph viewer. Return true if successful.
-static bool LLVM_ATTRIBUTE_UNUSED
-ExecGraphViewer(StringRef ExecPath, std::vector<const char*> &args,
- StringRef Filename, bool wait, std::string &ErrMsg) {
+// Execute the graph viewer. Return true if there were errors.
+static bool ExecGraphViewer(StringRef ExecPath, std::vector<const char *> &args,
+ StringRef Filename, bool wait,
+ std::string &ErrMsg) {
+ assert(args.back() == nullptr);
if (wait) {
- if (sys::ExecuteAndWait(ExecPath, &args[0],0,0,0,0,&ErrMsg)) {
+ if (sys::ExecuteAndWait(ExecPath, args.data(), nullptr, nullptr, 0, 0,
+ &ErrMsg)) {
errs() << "Error: " << ErrMsg << "\n";
- return false;
+ return true;
}
- bool Existed;
- sys::fs::remove(Filename, Existed);
+ sys::fs::remove(Filename);
errs() << " done. \n";
- }
- else {
- sys::ExecuteNoWait(ExecPath, &args[0],0,0,0,&ErrMsg);
+ } else {
+ sys::ExecuteNoWait(ExecPath, args.data(), nullptr, nullptr, 0, &ErrMsg);
errs() << "Remember to erase graph file: " << Filename.str() << "\n";
}
- return true;
+ return false;
+}
+
+struct GraphSession {
+ std::string LogBuffer;
+ bool TryFindProgram(StringRef Names, std::string &ProgramPath) {
+ raw_string_ostream Log(LogBuffer);
+ SmallVector<StringRef, 8> parts;
+ Names.split(parts, "|");
+ for (auto Name : parts) {
+ ProgramPath = sys::FindProgramByName(Name);
+ if (!ProgramPath.empty())
+ return true;
+ Log << " Tried '" << Name << "'\n";
+ }
+ return false;
+ }
+};
+
+static const char *getProgramName(GraphProgram::Name program) {
+ switch (program) {
+ case GraphProgram::DOT:
+ return "dot";
+ case GraphProgram::FDP:
+ return "fdp";
+ case GraphProgram::NEATO:
+ return "neato";
+ case GraphProgram::TWOPI:
+ return "twopi";
+ case GraphProgram::CIRCO:
+ return "circo";
+ }
+ llvm_unreachable("bad kind");
}
-void llvm::DisplayGraph(StringRef FilenameRef, bool wait,
+bool llvm::DisplayGraph(StringRef FilenameRef, bool wait,
GraphProgram::Name program) {
std::string Filename = FilenameRef;
wait &= !ViewBackground;
std::string ErrMsg;
-#if HAVE_GRAPHVIZ
- std::string Graphviz(LLVM_PATH_GRAPHVIZ);
-
- std::vector<const char*> args;
- args.push_back(Graphviz.c_str());
- args.push_back(Filename.c_str());
- args.push_back(0);
-
- errs() << "Running 'Graphviz' program... ";
- if (!ExecGraphViewer(Graphviz, args, Filename, wait, ErrMsg))
- return;
-
-#elif HAVE_XDOT
- std::vector<const char*> args;
- args.push_back(LLVM_PATH_XDOT);
- args.push_back(Filename.c_str());
-
- switch (program) {
- case GraphProgram::DOT: args.push_back("-f"); args.push_back("dot"); break;
- case GraphProgram::FDP: args.push_back("-f"); args.push_back("fdp"); break;
- case GraphProgram::NEATO: args.push_back("-f"); args.push_back("neato");break;
- case GraphProgram::TWOPI: args.push_back("-f"); args.push_back("twopi");break;
- case GraphProgram::CIRCO: args.push_back("-f"); args.push_back("circo");break;
+ std::string ViewerPath;
+ GraphSession S;
+
+ // Graphviz
+ if (S.TryFindProgram("Graphviz", ViewerPath)) {
+ std::vector<const char *> args;
+ args.push_back(ViewerPath.c_str());
+ args.push_back(Filename.c_str());
+ args.push_back(nullptr);
+
+ errs() << "Running 'Graphviz' program... ";
+ return ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg);
}
- args.push_back(0);
+ // xdot
+ if (S.TryFindProgram("xdot|xdot.py", ViewerPath)) {
+ std::vector<const char *> args;
+ args.push_back(ViewerPath.c_str());
+ args.push_back(Filename.c_str());
- errs() << "Running 'xdot.py' program... ";
- if (!ExecGraphViewer(LLVM_PATH_XDOT, args, Filename, wait, ErrMsg))
- return;
+ args.push_back("-f");
+ args.push_back(getProgramName(program));
-#elif (HAVE_GV && (HAVE_DOT || HAVE_FDP || HAVE_NEATO || \
- HAVE_TWOPI || HAVE_CIRCO))
- std::string PSFilename = Filename + ".ps";
- std::string prog;
+ args.push_back(nullptr);
- // Set default grapher
-#if HAVE_CIRCO
- prog = LLVM_PATH_CIRCO;
-#endif
-#if HAVE_TWOPI
- prog = LLVM_PATH_TWOPI;
-#endif
-#if HAVE_NEATO
- prog = LLVM_PATH_NEATO;
-#endif
-#if HAVE_FDP
- prog = LLVM_PATH_FDP;
-#endif
-#if HAVE_DOT
- prog = LLVM_PATH_DOT;
-#endif
+ errs() << "Running 'xdot.py' program... ";
+ return ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg);
+ }
- // Find which program the user wants
-#if HAVE_DOT
- if (program == GraphProgram::DOT)
- prog = LLVM_PATH_DOT;
-#endif
-#if (HAVE_FDP)
- if (program == GraphProgram::FDP)
- prog = LLVM_PATH_FDP;
-#endif
-#if (HAVE_NEATO)
- if (program == GraphProgram::NEATO)
- prog = LLVM_PATH_NEATO;
-#endif
-#if (HAVE_TWOPI)
- if (program == GraphProgram::TWOPI)
- prog = LLVM_PATH_TWOPI;
-#endif
-#if (HAVE_CIRCO)
- if (program == GraphProgram::CIRCO)
- prog = LLVM_PATH_CIRCO;
+ enum PSViewerKind { PSV_None, PSV_OSXOpen, PSV_XDGOpen, PSV_Ghostview };
+ PSViewerKind PSViewer = PSV_None;
+#ifdef __APPLE__
+ if (!PSViewer && S.TryFindProgram("open", ViewerPath))
+ PSViewer = PSV_OSXOpen;
#endif
+ if (!PSViewer && S.TryFindProgram("gv", ViewerPath))
+ PSViewer = PSV_Ghostview;
+ if (!PSViewer && S.TryFindProgram("xdg-open", ViewerPath))
+ PSViewer = PSV_XDGOpen;
+
+ // PostScript graph generator + PostScript viewer
+ std::string GeneratorPath;
+ if (PSViewer &&
+ (S.TryFindProgram(getProgramName(program), GeneratorPath) ||
+ S.TryFindProgram("dot|fdp|neato|twopi|circo", GeneratorPath))) {
+ std::string PSFilename = Filename + ".ps";
+
+ std::vector<const char *> args;
+ args.push_back(GeneratorPath.c_str());
+ args.push_back("-Tps");
+ args.push_back("-Nfontname=Courier");
+ args.push_back("-Gsize=7.5,10");
+ args.push_back(Filename.c_str());
+ args.push_back("-o");
+ args.push_back(PSFilename.c_str());
+ args.push_back(nullptr);
+
+ errs() << "Running '" << GeneratorPath << "' program... ";
+
+ if (ExecGraphViewer(GeneratorPath, args, Filename, wait, ErrMsg))
+ return true;
+
+ args.clear();
+ args.push_back(ViewerPath.c_str());
+ switch (PSViewer) {
+ case PSV_OSXOpen:
+ args.push_back("-W");
+ args.push_back(PSFilename.c_str());
+ break;
+ case PSV_XDGOpen:
+ wait = false;
+ args.push_back(PSFilename.c_str());
+ break;
+ case PSV_Ghostview:
+ args.push_back("--spartan");
+ args.push_back(PSFilename.c_str());
+ break;
+ case PSV_None:
+ llvm_unreachable("Invalid viewer");
+ }
+ args.push_back(nullptr);
- std::vector<const char*> args;
- args.push_back(prog.c_str());
- args.push_back("-Tps");
- args.push_back("-Nfontname=Courier");
- args.push_back("-Gsize=7.5,10");
- args.push_back(Filename.c_str());
- args.push_back("-o");
- args.push_back(PSFilename.c_str());
- args.push_back(0);
-
- errs() << "Running '" << prog << "' program... ";
-
- if (!ExecGraphViewer(prog, args, Filename, wait, ErrMsg))
- return;
-
- std::string gv(LLVM_PATH_GV);
- args.clear();
- args.push_back(gv.c_str());
- args.push_back(PSFilename.c_str());
- args.push_back("--spartan");
- args.push_back(0);
-
- ErrMsg.clear();
- if (!ExecGraphViewer(gv, args, PSFilename, wait, ErrMsg))
- return;
-
-#elif HAVE_DOTTY
- std::string dotty(LLVM_PATH_DOTTY);
+ ErrMsg.clear();
+ return ExecGraphViewer(ViewerPath, args, PSFilename, wait, ErrMsg);
+ }
- std::vector<const char*> args;
- args.push_back(dotty.c_str());
- args.push_back(Filename.c_str());
- args.push_back(0);
+ // dotty
+ if (S.TryFindProgram("dotty", ViewerPath)) {
+ std::vector<const char *> args;
+ args.push_back(ViewerPath.c_str());
+ args.push_back(Filename.c_str());
+ args.push_back(nullptr);
// Dotty spawns another app and doesn't wait until it returns
-#if defined (__MINGW32__) || defined (_WINDOWS)
- wait = false;
-#endif
- errs() << "Running 'dotty' program... ";
- if (!ExecGraphViewer(dotty, args, Filename, wait, ErrMsg))
- return;
-#else
- (void)Filename;
- (void)ErrMsg;
+#ifdef LLVM_ON_WIN32
+ wait = false;
#endif
+ errs() << "Running 'dotty' program... ";
+ return ExecGraphViewer(ViewerPath, args, Filename, wait, ErrMsg);
+ }
+
+ errs() << "Error: Couldn't find a usable graph viewer program:\n";
+ errs() << S.LogBuffer << "\n";
+ return true;
}
diff --git a/contrib/llvm/lib/Support/Host.cpp b/contrib/llvm/lib/Support/Host.cpp
index 6e9a5c9..e2dd6d5 100644
--- a/contrib/llvm/lib/Support/Host.cpp
+++ b/contrib/llvm/lib/Support/Host.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file implements the operating system Host concept.
+// This file implements the operating system Host concept.
//
//===----------------------------------------------------------------------===//
@@ -39,6 +39,8 @@
#include <mach/machine.h>
#endif
+#define DEBUG_TYPE "host-detection"
+
//===----------------------------------------------------------------------===//
//
// Implementations of the CPU detection routines
@@ -98,8 +100,9 @@ static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
/// GetX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return the
/// 4 values in the specified arguments. If we can't run cpuid on the host,
/// return true.
-bool GetX86CpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX,
- unsigned *rEBX, unsigned *rECX, unsigned *rEDX) {
+static bool GetX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
+ unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
+ unsigned *rEDX) {
#if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
#if defined(__GNUC__)
// gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
@@ -192,7 +195,7 @@ static void DetectX86FamilyModel(unsigned EAX, unsigned &Family,
}
}
-std::string sys::getHostCPUName() {
+StringRef sys::getHostCPUName() {
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
if (GetX86CpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX))
return "generic";
@@ -220,6 +223,7 @@ std::string sys::getHostCPUName() {
(EBX & 0x20);
GetX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
bool Em64T = (EDX >> 29) & 0x1;
+ bool HasTBM = (ECX >> 21) & 0x1;
if (memcmp(text.c, "GenuineIntel", 12) == 0) {
switch (Family) {
@@ -432,9 +436,11 @@ std::string sys::getHostCPUName() {
case 21:
if (!HasAVX) // If the OS doesn't support AVX provide a sane fallback.
return "btver1";
+ if (Model >= 0x50)
+ return "bdver4"; // 50h-6Fh: Excavator
if (Model >= 0x30)
return "bdver3"; // 30h-3Fh: Steamroller
- if (Model >= 0x10)
+ if (Model >= 0x10 || HasTBM)
return "bdver2"; // 10h-1Fh: Piledriver
return "bdver1"; // 00h-0Fh: Bulldozer
case 22:
@@ -448,7 +454,7 @@ std::string sys::getHostCPUName() {
return "generic";
}
#elif defined(__APPLE__) && (defined(__ppc__) || defined(__powerpc__))
-std::string sys::getHostCPUName() {
+StringRef sys::getHostCPUName() {
host_basic_info_data_t hostInfo;
mach_msg_type_number_t infoCount;
@@ -477,7 +483,7 @@ std::string sys::getHostCPUName() {
return "generic";
}
#elif defined(__linux__) && (defined(__ppc__) || defined(__powerpc__))
-std::string sys::getHostCPUName() {
+StringRef sys::getHostCPUName() {
// Access to the Processor Version Register (PVR) on PowerPC is privileged,
// and so we must use an operating-system interface to determine the current
// processor type. On Linux, this is exposed through the /proc/cpuinfo file.
@@ -564,10 +570,12 @@ std::string sys::getHostCPUName() {
.Case("A2", "a2")
.Case("POWER6", "pwr6")
.Case("POWER7", "pwr7")
+ .Case("POWER8", "pwr8")
+ .Case("POWER8E", "pwr8")
.Default(generic);
}
#elif defined(__linux__) && defined(__arm__)
-std::string sys::getHostCPUName() {
+StringRef sys::getHostCPUName() {
// The cpuid register on arm is not accessible from user space. On Linux,
// it is exposed through the /proc/cpuinfo file.
// Note: We cannot mmap /proc/cpuinfo here and then process the resulting
@@ -619,10 +627,21 @@ std::string sys::getHostCPUName() {
.Case("0xc24", "cortex-m4")
.Default("generic");
+ if (Implementer == "0x51") // Qualcomm Technologies, Inc.
+ // Look for the CPU part line.
+ for (unsigned I = 0, E = Lines.size(); I != E; ++I)
+ if (Lines[I].startswith("CPU part"))
+ // The CPU part is a 3 digit hexadecimal number with a 0x prefix. The
+ // values correspond to the "Part number" in the CP15/c0 register. The
+ // contents are specified in the various processor manuals.
+ return StringSwitch<const char *>(Lines[I].substr(8).ltrim("\t :"))
+ .Case("0x06f", "krait") // APQ8064
+ .Default("generic");
+
return "generic";
}
#elif defined(__linux__) && defined(__s390x__)
-std::string sys::getHostCPUName() {
+StringRef sys::getHostCPUName() {
// STIDP is a privileged operation, so use /proc/cpuinfo instead.
// Note: We cannot mmap /proc/cpuinfo here and then process the resulting
// memory buffer because the 'file' has 0 size (it can be read from only
@@ -664,12 +683,12 @@ std::string sys::getHostCPUName() {
return "generic";
}
#else
-std::string sys::getHostCPUName() {
+StringRef sys::getHostCPUName() {
return "generic";
}
#endif
-#if defined(__linux__) && defined(__arm__)
+#if defined(__linux__) && (defined(__arm__) || defined(__aarch64__))
bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
std::string Err;
DataStreamer *DS = getDataFileStreamer("/proc/cpuinfo", &Err);
@@ -698,8 +717,24 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
break;
}
+#if defined(__aarch64__)
+ // Keep track of which crypto features we have seen
+ enum {
+ CAP_AES = 0x1,
+ CAP_PMULL = 0x2,
+ CAP_SHA1 = 0x4,
+ CAP_SHA2 = 0x8
+ };
+ uint32_t crypto = 0;
+#endif
+
for (unsigned I = 0, E = CPUFeatures.size(); I != E; ++I) {
StringRef LLVMFeatureStr = StringSwitch<StringRef>(CPUFeatures[I])
+#if defined(__aarch64__)
+ .Case("asimd", "neon")
+ .Case("fp", "fp-armv8")
+ .Case("crc32", "crc")
+#else
.Case("half", "fp16")
.Case("neon", "neon")
.Case("vfpv3", "vfp3")
@@ -707,12 +742,32 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
.Case("vfpv4", "vfp4")
.Case("idiva", "hwdiv-arm")
.Case("idivt", "hwdiv")
+#endif
.Default("");
+#if defined(__aarch64__)
+ // We need to check crypto separately since we need all of the crypto
+ // extensions to enable the subtarget feature
+ if (CPUFeatures[I] == "aes")
+ crypto |= CAP_AES;
+ else if (CPUFeatures[I] == "pmull")
+ crypto |= CAP_PMULL;
+ else if (CPUFeatures[I] == "sha1")
+ crypto |= CAP_SHA1;
+ else if (CPUFeatures[I] == "sha2")
+ crypto |= CAP_SHA2;
+#endif
+
if (LLVMFeatureStr != "")
Features.GetOrCreateValue(LLVMFeatureStr).setValue(true);
}
+#if defined(__aarch64__)
+ // If we have all crypto bits we can add the feature
+ if (crypto == (CAP_AES | CAP_PMULL | CAP_SHA1 | CAP_SHA2))
+ Features.GetOrCreateValue("crypto").setValue(true);
+#endif
+
return true;
}
#else
diff --git a/contrib/llvm/lib/Support/IntervalMap.cpp b/contrib/llvm/lib/Support/IntervalMap.cpp
index 4dfcc40..e11a7f2 100644
--- a/contrib/llvm/lib/Support/IntervalMap.cpp
+++ b/contrib/llvm/lib/Support/IntervalMap.cpp
@@ -58,7 +58,7 @@ void Path::moveLeft(unsigned Level) {
}
} else if (height() < Level)
// end() may have created a height=0 path.
- path.resize(Level + 1, Entry(0, 0, 0));
+ path.resize(Level + 1, Entry(nullptr, 0, 0));
// NR is the subtree containing our left sibling.
--path[l].offset;
diff --git a/contrib/llvm/lib/Support/LEB128.cpp b/contrib/llvm/lib/Support/LEB128.cpp
new file mode 100644
index 0000000..449626f
--- /dev/null
+++ b/contrib/llvm/lib/Support/LEB128.cpp
@@ -0,0 +1,44 @@
+//===- LEB128.cpp - LEB128 utility functions implementation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some utility functions for encoding SLEB128 and
+// ULEB128 values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/LEB128.h"
+
+namespace llvm {
+
+/// Utility function to get the size of the ULEB128-encoded value.
+unsigned getULEB128Size(uint64_t Value) {
+ unsigned Size = 0;
+ do {
+ Value >>= 7;
+ Size += sizeof(int8_t);
+ } while (Value);
+ return Size;
+}
+
+/// Utility function to get the size of the SLEB128-encoded value.
+unsigned getSLEB128Size(int64_t Value) {
+ unsigned Size = 0;
+ int Sign = Value >> (8 * sizeof(Value) - 1);
+ bool IsMore;
+
+ do {
+ unsigned Byte = Value & 0x7f;
+ Value >>= 7;
+ IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
+ Size += sizeof(int8_t);
+ } while (IsMore);
+ return Size;
+}
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/Support/LineIterator.cpp b/contrib/llvm/lib/Support/LineIterator.cpp
new file mode 100644
index 0000000..947a8fb
--- /dev/null
+++ b/contrib/llvm/lib/Support/LineIterator.cpp
@@ -0,0 +1,69 @@
+//===- LineIterator.cpp - Implementation of line iteration ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+
+line_iterator::line_iterator(const MemoryBuffer &Buffer, char CommentMarker)
+ : Buffer(Buffer.getBufferSize() ? &Buffer : nullptr),
+ CommentMarker(CommentMarker), LineNumber(1),
+ CurrentLine(Buffer.getBufferSize() ? Buffer.getBufferStart() : nullptr,
+ 0) {
+ // Ensure that if we are constructed on a non-empty memory buffer that it is
+ // a null terminated buffer.
+ if (Buffer.getBufferSize()) {
+ assert(Buffer.getBufferEnd()[0] == '\0');
+ advance();
+ }
+}
+
+void line_iterator::advance() {
+ assert(Buffer && "Cannot advance past the end!");
+
+ const char *Pos = CurrentLine.end();
+ assert(Pos == Buffer->getBufferStart() || *Pos == '\n' || *Pos == '\0');
+
+ if (CommentMarker == '\0') {
+ // If we're not stripping comments, this is simpler.
+ size_t Blanks = 0;
+ while (Pos[Blanks] == '\n')
+ ++Blanks;
+ Pos += Blanks;
+ LineNumber += Blanks;
+ } else {
+ // Skip comments and count line numbers, which is a bit more complex.
+ for (;;) {
+ if (*Pos == CommentMarker)
+ do {
+ ++Pos;
+ } while (*Pos != '\0' && *Pos != '\n');
+ if (*Pos != '\n')
+ break;
+ ++Pos;
+ ++LineNumber;
+ }
+ }
+
+ if (*Pos == '\0') {
+ // We've hit the end of the buffer, reset ourselves to the end state.
+ Buffer = nullptr;
+ CurrentLine = StringRef();
+ return;
+ }
+
+ // Measure the line.
+ size_t Length = 0;
+ do {
+ ++Length;
+ } while (Pos[Length] != '\0' && Pos[Length] != '\n');
+
+ CurrentLine = StringRef(Pos, Length);
+}
diff --git a/contrib/llvm/lib/Support/LockFileManager.cpp b/contrib/llvm/lib/Support/LockFileManager.cpp
index eeec274..3f224e0 100644
--- a/contrib/llvm/lib/Support/LockFileManager.cpp
+++ b/contrib/llvm/lib/Support/LockFileManager.cpp
@@ -9,8 +9,10 @@
#include "llvm/Support/LockFileManager.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <sys/stat.h>
#include <sys/types.h>
@@ -29,25 +31,26 @@ using namespace llvm;
/// \returns The process ID of the process that owns this lock file
Optional<std::pair<std::string, int> >
LockFileManager::readLockFile(StringRef LockFileName) {
- // Check whether the lock file exists. If not, clearly there's nothing
- // to read, so we just return.
- bool Exists = false;
- if (sys::fs::exists(LockFileName, Exists) || !Exists)
- return None;
-
// Read the owning host and PID out of the lock file. If it appears that the
// owning process is dead, the lock file is invalid.
- OwningPtr<MemoryBuffer> MB;
- if (MemoryBuffer::getFile(LockFileName, MB))
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
+ MemoryBuffer::getFile(LockFileName);
+ if (!MBOrErr) {
+ sys::fs::remove(LockFileName);
return None;
+ }
+ std::unique_ptr<MemoryBuffer> MB = std::move(MBOrErr.get());
StringRef Hostname;
StringRef PIDStr;
- tie(Hostname, PIDStr) = getToken(MB->getBuffer(), " ");
+ std::tie(Hostname, PIDStr) = getToken(MB->getBuffer(), " ");
PIDStr = PIDStr.substr(PIDStr.find_first_not_of(" "));
int PID;
- if (!PIDStr.getAsInteger(10, PID))
- return std::make_pair(std::string(Hostname), PID);
+ if (!PIDStr.getAsInteger(10, PID)) {
+ auto Owner = std::make_pair(std::string(Hostname), PID);
+ if (processStillExecuting(Owner.first, Owner.second))
+ return Owner;
+ }
// Delete the lock file. It's invalid anyway.
sys::fs::remove(LockFileName);
@@ -71,7 +74,11 @@ bool LockFileManager::processStillExecuting(StringRef Hostname, int PID) {
LockFileManager::LockFileManager(StringRef FileName)
{
this->FileName = FileName;
- LockFileName = FileName;
+ if (std::error_code EC = sys::fs::make_absolute(this->FileName)) {
+ Error = EC;
+ return;
+ }
+ LockFileName = this->FileName;
LockFileName += ".lock";
// If the lock file already exists, don't bother to try to create our own
@@ -83,10 +90,8 @@ LockFileManager::LockFileManager(StringRef FileName)
UniqueLockFileName = LockFileName;
UniqueLockFileName += "-%%%%%%%%";
int UniqueLockFileID;
- if (error_code EC
- = sys::fs::createUniqueFile(UniqueLockFileName.str(),
- UniqueLockFileID,
- UniqueLockFileName)) {
+ if (std::error_code EC = sys::fs::createUniqueFile(
+ UniqueLockFileName.str(), UniqueLockFileID, UniqueLockFileName)) {
Error = EC;
return;
}
@@ -111,41 +116,44 @@ LockFileManager::LockFileManager(StringRef FileName)
// We failed to write out PID, so make up an excuse, remove the
// unique lock file, and fail.
Error = make_error_code(errc::no_space_on_device);
- bool Existed;
- sys::fs::remove(UniqueLockFileName.c_str(), Existed);
+ sys::fs::remove(UniqueLockFileName.c_str());
return;
}
}
- // Create a hard link from the lock file name. If this succeeds, we're done.
- error_code EC
- = sys::fs::create_hard_link(UniqueLockFileName.str(),
- LockFileName.str());
- if (EC == errc::success)
- return;
+ while (1) {
+ // Create a link from the lock file name. If this succeeds, we're done.
+ std::error_code EC =
+ sys::fs::create_link(UniqueLockFileName.str(), LockFileName.str());
+ if (!EC)
+ return;
- // Creating the hard link failed.
+ if (EC != errc::file_exists) {
+ Error = EC;
+ return;
+ }
-#ifdef LLVM_ON_UNIX
- // The creation of the hard link may appear to fail, but if stat'ing the
- // unique file returns a link count of 2, then we can still declare success.
- struct stat StatBuf;
- if (stat(UniqueLockFileName.c_str(), &StatBuf) == 0 &&
- StatBuf.st_nlink == 2)
- return;
-#endif
+ // Someone else managed to create the lock file first. Read the process ID
+ // from the lock file.
+ if ((Owner = readLockFile(LockFileName))) {
+ // Wipe out our unique lock file (it's useless now)
+ sys::fs::remove(UniqueLockFileName.str());
+ return;
+ }
- // Someone else managed to create the lock file first. Wipe out our unique
- // lock file (it's useless now) and read the process ID from the lock file.
- bool Existed;
- sys::fs::remove(UniqueLockFileName.str(), Existed);
- if ((Owner = readLockFile(LockFileName)))
- return;
+ if (!sys::fs::exists(LockFileName.str())) {
+ // The previous owner released the lock file before we could read it.
+ // Try to get ownership again.
+ continue;
+ }
- // There is a lock file that nobody owns; try to clean it up and report
- // an error.
- sys::fs::remove(LockFileName.str(), Existed);
- Error = EC;
+ // There is a lock file that nobody owns; try to clean it up and get
+ // ownership.
+ if ((EC = sys::fs::remove(LockFileName.str()))) {
+ Error = EC;
+ return;
+ }
+ }
}
LockFileManager::LockFileState LockFileManager::getState() const {
@@ -163,14 +171,13 @@ LockFileManager::~LockFileManager() {
return;
// Since we own the lock, remove the lock file and our own unique lock file.
- bool Existed;
- sys::fs::remove(LockFileName.str(), Existed);
- sys::fs::remove(UniqueLockFileName.str(), Existed);
+ sys::fs::remove(LockFileName.str());
+ sys::fs::remove(UniqueLockFileName.str());
}
-void LockFileManager::waitForUnlock() {
+LockFileManager::WaitForUnlockResult LockFileManager::waitForUnlock() {
if (getState() != LFS_Shared)
- return;
+ return Res_Success;
#if LLVM_ON_WIN32
unsigned long Interval = 1;
@@ -190,26 +197,25 @@ void LockFileManager::waitForUnlock() {
#if LLVM_ON_WIN32
Sleep(Interval);
#else
- nanosleep(&Interval, NULL);
+ nanosleep(&Interval, nullptr);
#endif
- bool Exists = false;
bool LockFileJustDisappeared = false;
// If the lock file is still expected to be there, check whether it still
// is.
if (!LockFileGone) {
+ bool Exists;
if (!sys::fs::exists(LockFileName.str(), Exists) && !Exists) {
LockFileGone = true;
LockFileJustDisappeared = true;
- Exists = false;
}
}
// If the lock file is no longer there, check if the original file is
// available now.
if (LockFileGone) {
- if (!sys::fs::exists(FileName.str(), Exists) && Exists) {
- return;
+ if (sys::fs::exists(FileName.str())) {
+ return Res_Success;
}
// The lock file is gone, so now we're waiting for the original file to
@@ -232,7 +238,7 @@ void LockFileManager::waitForUnlock() {
// owning the lock died without cleaning up, just bail out.
if (!LockFileGone &&
!processStillExecuting((*Owner).first, (*Owner).second)) {
- return;
+ return Res_OwnerDied;
}
// Exponentially increase the time we wait for the lock to be removed.
@@ -255,4 +261,5 @@ void LockFileManager::waitForUnlock() {
);
// Give up.
+ return Res_Timeout;
}
diff --git a/contrib/llvm/lib/Support/ManagedStatic.cpp b/contrib/llvm/lib/Support/ManagedStatic.cpp
index 098cccb..b8fb284 100644
--- a/contrib/llvm/lib/Support/ManagedStatic.cpp
+++ b/contrib/llvm/lib/Support/ManagedStatic.cpp
@@ -14,18 +14,29 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
#include <cassert>
using namespace llvm;
-static const ManagedStaticBase *StaticList = 0;
+static const ManagedStaticBase *StaticList = nullptr;
+
+static sys::Mutex& getManagedStaticMutex() {
+ // We need to use a function local static here, since this can get called
+ // during a static constructor and we need to guarantee that it's initialized
+ // correctly.
+ static sys::Mutex ManagedStaticMutex;
+ return ManagedStaticMutex;
+}
void ManagedStaticBase::RegisterManagedStatic(void *(*Creator)(),
void (*Deleter)(void*)) const {
+ assert(Creator);
if (llvm_is_multithreaded()) {
- llvm_acquire_global_lock();
+ MutexGuard Lock(getManagedStaticMutex());
- if (Ptr == 0) {
- void* tmp = Creator ? Creator() : 0;
+ if (!Ptr) {
+ void* tmp = Creator();
TsanHappensBefore(this);
sys::MemoryFence();
@@ -42,12 +53,10 @@ void ManagedStaticBase::RegisterManagedStatic(void *(*Creator)(),
Next = StaticList;
StaticList = this;
}
-
- llvm_release_global_lock();
} else {
- assert(Ptr == 0 && DeleterFn == 0 && Next == 0 &&
+ assert(!Ptr && !DeleterFn && !Next &&
"Partially initialized ManagedStatic!?");
- Ptr = Creator ? Creator() : 0;
+ Ptr = Creator();
DeleterFn = Deleter;
// Add to list of managed statics.
@@ -62,20 +71,20 @@ void ManagedStaticBase::destroy() const {
"Not destroyed in reverse order of construction?");
// Unlink from list.
StaticList = Next;
- Next = 0;
+ Next = nullptr;
// Destroy memory.
DeleterFn(Ptr);
// Cleanup.
- Ptr = 0;
- DeleterFn = 0;
+ Ptr = nullptr;
+ DeleterFn = nullptr;
}
/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
void llvm::llvm_shutdown() {
+ MutexGuard Lock(getManagedStaticMutex());
+
while (StaticList)
StaticList->destroy();
-
- if (llvm_is_multithreaded()) llvm_stop_multithreaded();
}
diff --git a/contrib/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm/lib/Support/MemoryBuffer.cpp
index dcd5529..5f4b7da 100644
--- a/contrib/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm/lib/Support/MemoryBuffer.cpp
@@ -12,34 +12,26 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Config/config.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/system_error.h"
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <new>
-#include <sys/stat.h>
#include <sys/types.h>
+#include <system_error>
#if !defined(_MSC_VER) && !defined(__MINGW32__)
#include <unistd.h>
#else
#include <io.h>
-// Simplistic definitinos of these macros for use in getOpenFile.
-#ifndef S_ISREG
-#define S_ISREG(x) (1)
-#endif
-#ifndef S_ISBLK
-#define S_ISBLK(x) (0)
-#endif
#endif
using namespace llvm;
@@ -91,12 +83,12 @@ public:
init(InputData.begin(), InputData.end(), RequiresNullTerminator);
}
- virtual const char *getBufferIdentifier() const LLVM_OVERRIDE {
+ const char *getBufferIdentifier() const override {
// The name is stored after the class itself.
return reinterpret_cast<const char*>(this + 1);
}
- virtual BufferKind getBufferKind() const LLVM_OVERRIDE {
+ BufferKind getBufferKind() const override {
return MemoryBuffer_Malloc;
}
};
@@ -117,7 +109,7 @@ MemoryBuffer *MemoryBuffer::getMemBuffer(StringRef InputData,
MemoryBuffer *MemoryBuffer::getMemBufferCopy(StringRef InputData,
StringRef BufferName) {
MemoryBuffer *Buf = getNewUninitMemBuffer(InputData.size(), BufferName);
- if (!Buf) return 0;
+ if (!Buf) return nullptr;
memcpy(const_cast<char*>(Buf->getBufferStart()), InputData.data(),
InputData.size());
return Buf;
@@ -131,12 +123,13 @@ MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size,
StringRef BufferName) {
// Allocate space for the MemoryBuffer, the data and the name. It is important
// that MemoryBuffer and data are aligned so PointerIntPair works with them.
+ // TODO: Is 16-byte alignment enough? We copy small object files with large
+ // alignment expectations into this buffer.
size_t AlignedStringLen =
- RoundUpToAlignment(sizeof(MemoryBufferMem) + BufferName.size() + 1,
- sizeof(void*)); // TODO: Is sizeof(void*) enough?
+ RoundUpToAlignment(sizeof(MemoryBufferMem) + BufferName.size() + 1, 16);
size_t RealLen = AlignedStringLen + Size + 1;
char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
- if (!Mem) return 0;
+ if (!Mem) return nullptr;
// The name is stored after the class itself.
CopyStringRef(Mem + sizeof(MemoryBufferMem), BufferName);
@@ -154,24 +147,19 @@ MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size,
/// the MemoryBuffer object.
MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size, StringRef BufferName) {
MemoryBuffer *SB = getNewUninitMemBuffer(Size, BufferName);
- if (!SB) return 0;
+ if (!SB) return nullptr;
memset(const_cast<char*>(SB->getBufferStart()), 0, Size);
return SB;
}
-
-/// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin
-/// if the Filename is "-". If an error occurs, this returns null and fills
-/// in *ErrStr with a reason. If stdin is empty, this API (unlike getSTDIN)
-/// returns an empty buffer.
-error_code MemoryBuffer::getFileOrSTDIN(StringRef Filename,
- OwningPtr<MemoryBuffer> &result,
- int64_t FileSize) {
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+MemoryBuffer::getFileOrSTDIN(StringRef Filename, int64_t FileSize) {
if (Filename == "-")
- return getSTDIN(result);
- return getFile(Filename, result, FileSize);
+ return getSTDIN();
+ return getFile(Filename, FileSize);
}
+
//===----------------------------------------------------------------------===//
// MemoryBuffer::getFile implementation.
//===----------------------------------------------------------------------===//
@@ -197,7 +185,7 @@ class MemoryBufferMMapFile : public MemoryBuffer {
public:
MemoryBufferMMapFile(bool RequiresNullTerminator, int FD, uint64_t Len,
- uint64_t Offset, error_code EC)
+ uint64_t Offset, std::error_code EC)
: MFR(FD, false, sys::fs::mapped_file_region::readonly,
getLegalMapSize(Len, Offset), getLegalMapOffset(Offset), EC) {
if (!EC) {
@@ -206,20 +194,19 @@ public:
}
}
- virtual const char *getBufferIdentifier() const LLVM_OVERRIDE {
+ const char *getBufferIdentifier() const override {
// The name is stored after the class itself.
return reinterpret_cast<const char *>(this + 1);
}
- virtual BufferKind getBufferKind() const LLVM_OVERRIDE {
+ BufferKind getBufferKind() const override {
return MemoryBuffer_MMap;
}
};
}
-static error_code getMemoryBufferForStream(int FD,
- StringRef BufferName,
- OwningPtr<MemoryBuffer> &result) {
+static ErrorOr<std::unique_ptr<MemoryBuffer>>
+getMemoryBufferForStream(int FD, StringRef BufferName) {
const ssize_t ChunkSize = 4096*4;
SmallString<ChunkSize> Buffer;
ssize_t ReadBytes;
@@ -229,47 +216,48 @@ static error_code getMemoryBufferForStream(int FD,
ReadBytes = read(FD, Buffer.end(), ChunkSize);
if (ReadBytes == -1) {
if (errno == EINTR) continue;
- return error_code(errno, posix_category());
+ return std::error_code(errno, std::generic_category());
}
Buffer.set_size(Buffer.size() + ReadBytes);
} while (ReadBytes != 0);
- result.reset(MemoryBuffer::getMemBufferCopy(Buffer, BufferName));
- return error_code::success();
+ std::unique_ptr<MemoryBuffer> Ret(
+ MemoryBuffer::getMemBufferCopy(Buffer, BufferName));
+ return std::move(Ret);
}
-static error_code getFileAux(const char *Filename,
- OwningPtr<MemoryBuffer> &result, int64_t FileSize,
- bool RequiresNullTerminator);
+static ErrorOr<std::unique_ptr<MemoryBuffer>>
+getFileAux(const char *Filename, int64_t FileSize, bool RequiresNullTerminator,
+ bool IsVolatileSize);
-error_code MemoryBuffer::getFile(Twine Filename,
- OwningPtr<MemoryBuffer> &result,
- int64_t FileSize,
- bool RequiresNullTerminator) {
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+MemoryBuffer::getFile(Twine Filename, int64_t FileSize,
+ bool RequiresNullTerminator, bool IsVolatileSize) {
// Ensure the path is null terminated.
SmallString<256> PathBuf;
StringRef NullTerminatedName = Filename.toNullTerminatedStringRef(PathBuf);
- return getFileAux(NullTerminatedName.data(), result, FileSize,
- RequiresNullTerminator);
+ return getFileAux(NullTerminatedName.data(), FileSize, RequiresNullTerminator,
+ IsVolatileSize);
}
-static error_code getOpenFileImpl(int FD, const char *Filename,
- OwningPtr<MemoryBuffer> &Result,
- uint64_t FileSize, uint64_t MapSize,
- int64_t Offset, bool RequiresNullTerminator);
+static ErrorOr<std::unique_ptr<MemoryBuffer>>
+getOpenFileImpl(int FD, const char *Filename, uint64_t FileSize,
+ uint64_t MapSize, int64_t Offset, bool RequiresNullTerminator,
+ bool IsVolatileSize);
-static error_code getFileAux(const char *Filename,
- OwningPtr<MemoryBuffer> &result, int64_t FileSize,
- bool RequiresNullTerminator) {
+static ErrorOr<std::unique_ptr<MemoryBuffer>>
+getFileAux(const char *Filename, int64_t FileSize, bool RequiresNullTerminator,
+ bool IsVolatileSize) {
int FD;
- error_code EC = sys::fs::openFileForRead(Filename, FD);
+ std::error_code EC = sys::fs::openFileForRead(Filename, FD);
if (EC)
return EC;
- error_code ret = getOpenFileImpl(FD, Filename, result, FileSize, FileSize, 0,
- RequiresNullTerminator);
+ ErrorOr<std::unique_ptr<MemoryBuffer>> Ret =
+ getOpenFileImpl(FD, Filename, FileSize, FileSize, 0,
+ RequiresNullTerminator, IsVolatileSize);
close(FD);
- return ret;
+ return Ret;
}
static bool shouldUseMmap(int FD,
@@ -277,7 +265,14 @@ static bool shouldUseMmap(int FD,
size_t MapSize,
off_t Offset,
bool RequiresNullTerminator,
- int PageSize) {
+ int PageSize,
+ bool IsVolatileSize) {
+ // mmap may leave the buffer without null terminator if the file size changed
+ // by the time the last page is mapped in, so avoid it if the file size is
+ // likely to change.
+ if (IsVolatileSize)
+ return false;
+
// We don't use mmap for small files because this can severely fragment our
// address space.
if (MapSize < 4 * 4096 || MapSize < (unsigned)PageSize)
@@ -293,9 +288,8 @@ static bool shouldUseMmap(int FD,
// RequiresNullTerminator = false and MapSize != -1.
if (FileSize == size_t(-1)) {
sys::fs::file_status Status;
- error_code EC = sys::fs::status(FD, Status);
- if (EC)
- return EC;
+ if (sys::fs::status(FD, Status))
+ return false;
FileSize = Status.getSize();
}
@@ -306,15 +300,6 @@ static bool shouldUseMmap(int FD,
if (End != FileSize)
return false;
-#if defined(_WIN32) || defined(__CYGWIN__)
- // Don't peek the next page if file is multiple of *physical* pagesize(4k)
- // but is not multiple of AllocationGranularity(64k),
- // when a null terminator is required.
- // FIXME: It's not good to hardcode 4096 here. dwPageSize shows 4096.
- if ((FileSize & (4096 - 1)) == 0)
- return false;
-#endif
-
// Don't try to map files that are exactly a multiple of the system page size
// if we need a null terminator.
if ((FileSize & (PageSize -1)) == 0)
@@ -323,10 +308,10 @@ static bool shouldUseMmap(int FD,
return true;
}
-static error_code getOpenFileImpl(int FD, const char *Filename,
- OwningPtr<MemoryBuffer> &result,
- uint64_t FileSize, uint64_t MapSize,
- int64_t Offset, bool RequiresNullTerminator) {
+static ErrorOr<std::unique_ptr<MemoryBuffer>>
+getOpenFileImpl(int FD, const char *Filename, uint64_t FileSize,
+ uint64_t MapSize, int64_t Offset, bool RequiresNullTerminator,
+ bool IsVolatileSize) {
static int PageSize = sys::process::get_self()->page_size();
// Default is to map the full file.
@@ -335,7 +320,7 @@ static error_code getOpenFileImpl(int FD, const char *Filename,
// file descriptor is cheaper than stat on a random path.
if (FileSize == uint64_t(-1)) {
sys::fs::file_status Status;
- error_code EC = sys::fs::status(FD, Status);
+ std::error_code EC = sys::fs::status(FD, Status);
if (EC)
return EC;
@@ -345,7 +330,7 @@ static error_code getOpenFileImpl(int FD, const char *Filename,
sys::fs::file_type Type = Status.type();
if (Type != sys::fs::file_type::regular_file &&
Type != sys::fs::file_type::block_file)
- return getMemoryBufferForStream(FD, Filename, result);
+ return getMemoryBufferForStream(FD, Filename);
FileSize = Status.getSize();
}
@@ -353,12 +338,13 @@ static error_code getOpenFileImpl(int FD, const char *Filename,
}
if (shouldUseMmap(FD, FileSize, MapSize, Offset, RequiresNullTerminator,
- PageSize)) {
- error_code EC;
- result.reset(new (NamedBufferAlloc(Filename)) MemoryBufferMMapFile(
- RequiresNullTerminator, FD, MapSize, Offset, EC));
+ PageSize, IsVolatileSize)) {
+ std::error_code EC;
+ std::unique_ptr<MemoryBuffer> Result(
+ new (NamedBufferAlloc(Filename))
+ MemoryBufferMMapFile(RequiresNullTerminator, FD, MapSize, Offset, EC));
if (!EC)
- return error_code::success();
+ return std::move(Result);
}
MemoryBuffer *Buf = MemoryBuffer::getNewUninitMemBuffer(MapSize, Filename);
@@ -368,13 +354,13 @@ static error_code getOpenFileImpl(int FD, const char *Filename,
return make_error_code(errc::not_enough_memory);
}
- OwningPtr<MemoryBuffer> SB(Buf);
+ std::unique_ptr<MemoryBuffer> SB(Buf);
char *BufPtr = const_cast<char*>(SB->getBufferStart());
size_t BytesLeft = MapSize;
#ifndef HAVE_PREAD
if (lseek(FD, Offset, SEEK_SET) == -1)
- return error_code(errno, posix_category());
+ return std::error_code(errno, std::generic_category());
#endif
while (BytesLeft) {
@@ -387,46 +373,39 @@ static error_code getOpenFileImpl(int FD, const char *Filename,
if (errno == EINTR)
continue;
// Error while reading.
- return error_code(errno, posix_category());
+ return std::error_code(errno, std::generic_category());
}
if (NumRead == 0) {
- assert(0 && "We got inaccurate FileSize value or fstat reported an "
- "invalid file size.");
- *BufPtr = '\0'; // null-terminate at the actual size.
+ memset(BufPtr, 0, BytesLeft); // zero-initialize rest of the buffer.
break;
}
BytesLeft -= NumRead;
BufPtr += NumRead;
}
- result.swap(SB);
- return error_code::success();
+ return std::move(SB);
}
-error_code MemoryBuffer::getOpenFile(int FD, const char *Filename,
- OwningPtr<MemoryBuffer> &Result,
- uint64_t FileSize,
- bool RequiresNullTerminator) {
- return getOpenFileImpl(FD, Filename, Result, FileSize, FileSize, 0,
- RequiresNullTerminator);
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+MemoryBuffer::getOpenFile(int FD, const char *Filename, uint64_t FileSize,
+ bool RequiresNullTerminator, bool IsVolatileSize) {
+ return getOpenFileImpl(FD, Filename, FileSize, FileSize, 0,
+ RequiresNullTerminator, IsVolatileSize);
}
-error_code MemoryBuffer::getOpenFileSlice(int FD, const char *Filename,
- OwningPtr<MemoryBuffer> &Result,
- uint64_t MapSize, int64_t Offset) {
- return getOpenFileImpl(FD, Filename, Result, -1, MapSize, Offset, false);
+ErrorOr<std::unique_ptr<MemoryBuffer>>
+MemoryBuffer::getOpenFileSlice(int FD, const char *Filename, uint64_t MapSize,
+ int64_t Offset, bool IsVolatileSize) {
+ return getOpenFileImpl(FD, Filename, -1, MapSize, Offset, false,
+ IsVolatileSize);
}
-//===----------------------------------------------------------------------===//
-// MemoryBuffer::getSTDIN implementation.
-//===----------------------------------------------------------------------===//
-
-error_code MemoryBuffer::getSTDIN(OwningPtr<MemoryBuffer> &result) {
+ErrorOr<std::unique_ptr<MemoryBuffer>> MemoryBuffer::getSTDIN() {
// Read in all of the data from stdin, we cannot mmap stdin.
//
// FIXME: That isn't necessarily true, we should try to mmap stdin and
// fallback if it fails.
sys::ChangeStdinToBinary();
- return getMemoryBufferForStream(0, "<stdin>", result);
+ return getMemoryBufferForStream(0, "<stdin>");
}
diff --git a/contrib/llvm/lib/Support/Mutex.cpp b/contrib/llvm/lib/Support/Mutex.cpp
index 4e4a026..c8d3844 100644
--- a/contrib/llvm/lib/Support/Mutex.cpp
+++ b/contrib/llvm/lib/Support/Mutex.cpp
@@ -42,7 +42,7 @@ using namespace sys;
// Construct a Mutex using pthread calls
MutexImpl::MutexImpl( bool recursive)
- : data_(0)
+ : data_(nullptr)
{
// Declare the pthread_mutex data structures
pthread_mutex_t* mutex =
@@ -59,13 +59,6 @@ MutexImpl::MutexImpl( bool recursive)
errorcode = pthread_mutexattr_settype(&attr, kind);
assert(errorcode == 0);
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && \
- !defined(__DragonFly__) && !defined(__Bitrig__)
- // Make it a process local mutex
- errorcode = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
- assert(errorcode == 0);
-#endif
-
// Initialize the mutex
errorcode = pthread_mutex_init(mutex, &attr);
assert(errorcode == 0);
@@ -82,7 +75,7 @@ MutexImpl::MutexImpl( bool recursive)
MutexImpl::~MutexImpl()
{
pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
+ assert(mutex != nullptr);
pthread_mutex_destroy(mutex);
free(mutex);
}
@@ -91,7 +84,7 @@ bool
MutexImpl::acquire()
{
pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
+ assert(mutex != nullptr);
int errorcode = pthread_mutex_lock(mutex);
return errorcode == 0;
@@ -101,7 +94,7 @@ bool
MutexImpl::release()
{
pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
+ assert(mutex != nullptr);
int errorcode = pthread_mutex_unlock(mutex);
return errorcode == 0;
@@ -111,7 +104,7 @@ bool
MutexImpl::tryacquire()
{
pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
+ assert(mutex != nullptr);
int errorcode = pthread_mutex_trylock(mutex);
return errorcode == 0;
diff --git a/contrib/llvm/lib/Support/Path.cpp b/contrib/llvm/lib/Support/Path.cpp
index c869b30..d5a0ec5 100644
--- a/contrib/llvm/lib/Support/Path.cpp
+++ b/contrib/llvm/lib/Support/Path.cpp
@@ -11,10 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Support/Errc.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Process.h"
#include <cctype>
#include <cstdio>
#include <cstring>
@@ -26,16 +28,18 @@
#include <io.h>
#endif
+using namespace llvm;
+
namespace {
using llvm::StringRef;
using llvm::sys::path::is_separator;
#ifdef LLVM_ON_WIN32
const char *separators = "\\/";
- const char prefered_separator = '\\';
+ const char preferred_separator = '\\';
#else
const char separators = '/';
- const char prefered_separator = '/';
+ const char preferred_separator = '/';
#endif
StringRef find_first_component(StringRef path) {
@@ -161,10 +165,76 @@ enum FSEntity {
};
// Implemented in Unix/Path.inc and Windows/Path.inc.
-static llvm::error_code
-createUniqueEntity(const llvm::Twine &Model, int &ResultFD,
- llvm::SmallVectorImpl<char> &ResultPath,
- bool MakeAbsolute, unsigned Mode, FSEntity Type);
+static std::error_code TempDir(SmallVectorImpl<char> &result);
+
+static std::error_code createUniqueEntity(const Twine &Model, int &ResultFD,
+ SmallVectorImpl<char> &ResultPath,
+ bool MakeAbsolute, unsigned Mode,
+ FSEntity Type) {
+ SmallString<128> ModelStorage;
+ Model.toVector(ModelStorage);
+
+ if (MakeAbsolute) {
+ // Make model absolute by prepending a temp directory if it's not already.
+ if (!sys::path::is_absolute(Twine(ModelStorage))) {
+ SmallString<128> TDir;
+ if (std::error_code EC = TempDir(TDir))
+ return EC;
+ sys::path::append(TDir, Twine(ModelStorage));
+ ModelStorage.swap(TDir);
+ }
+ }
+
+ // From here on, DO NOT modify model. It may be needed if the randomly chosen
+ // path already exists.
+ ResultPath = ModelStorage;
+ // Null terminate.
+ ResultPath.push_back(0);
+ ResultPath.pop_back();
+
+retry_random_path:
+ // Replace '%' with random chars.
+ for (unsigned i = 0, e = ModelStorage.size(); i != e; ++i) {
+ if (ModelStorage[i] == '%')
+ ResultPath[i] = "0123456789abcdef"[sys::Process::GetRandomNumber() & 15];
+ }
+
+ // Try to open + create the file.
+ switch (Type) {
+ case FS_File: {
+ if (std::error_code EC =
+ sys::fs::openFileForWrite(Twine(ResultPath.begin()), ResultFD,
+ sys::fs::F_RW | sys::fs::F_Excl, Mode)) {
+ if (EC == errc::file_exists)
+ goto retry_random_path;
+ return EC;
+ }
+
+ return std::error_code();
+ }
+
+ case FS_Name: {
+ bool Exists;
+ std::error_code EC = sys::fs::exists(ResultPath.begin(), Exists);
+ if (EC)
+ return EC;
+ if (Exists)
+ goto retry_random_path;
+ return std::error_code();
+ }
+
+ case FS_Dir: {
+ if (std::error_code EC =
+ sys::fs::create_directory(ResultPath.begin(), false)) {
+ if (EC == errc::file_exists)
+ goto retry_random_path;
+ return EC;
+ }
+ return std::error_code();
+ }
+ }
+ llvm_unreachable("Invalid Type");
+}
namespace llvm {
namespace sys {
@@ -239,21 +309,18 @@ const_iterator &const_iterator::operator++() {
}
const_iterator &const_iterator::operator--() {
- // If we're at the end and the previous char was a '/', return '.'.
+ // If we're at the end and the previous char was a '/', return '.' unless
+ // we are the root path.
+ size_t root_dir_pos = root_dir_start(Path);
if (Position == Path.size() &&
- Path.size() > 1 &&
- is_separator(Path[Position - 1])
-#ifdef LLVM_ON_WIN32
- && Path[Position - 2] != ':'
-#endif
- ) {
+ Path.size() > root_dir_pos + 1 &&
+ is_separator(Path[Position - 1])) {
--Position;
Component = ".";
return *this;
}
// Skip separators unless it's the root directory.
- size_t root_dir_pos = root_dir_start(Path);
size_t end_pos = Position;
while(end_pos > 0 &&
@@ -403,7 +470,7 @@ void append(SmallVectorImpl<char> &path, const Twine &a,
if (!component_has_sep && !(path.empty() || is_root_name)) {
// Add a separator.
- path.push_back(prefered_separator);
+ path.push_back(preferred_separator);
}
path.append(i->begin(), i->end());
@@ -504,14 +571,21 @@ bool is_separator(char value) {
}
}
+static const char preferred_separator_string[] = { preferred_separator, '\0' };
+
+const StringRef get_separator() {
+ return preferred_separator_string;
+}
+
void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result) {
result.clear();
-#ifdef __APPLE__
+#if defined(_CS_DARWIN_USER_TEMP_DIR) && defined(_CS_DARWIN_USER_CACHE_DIR)
// On Darwin, use DARWIN_USER_TEMP_DIR or DARWIN_USER_CACHE_DIR.
+ // macros defined in <unistd.h> on darwin >= 9
int ConfName = erasedOnReboot? _CS_DARWIN_USER_TEMP_DIR
: _CS_DARWIN_USER_CACHE_DIR;
- size_t ConfLen = confstr(ConfName, 0, 0);
+ size_t ConfLen = confstr(ConfName, nullptr, 0);
if (ConfLen > 0) {
do {
result.resize(ConfLen);
@@ -633,29 +707,30 @@ bool is_relative(const Twine &path) {
namespace fs {
-error_code getUniqueID(const Twine Path, UniqueID &Result) {
+std::error_code getUniqueID(const Twine Path, UniqueID &Result) {
file_status Status;
- error_code EC = status(Path, Status);
+ std::error_code EC = status(Path, Status);
if (EC)
return EC;
Result = Status.getUniqueID();
- return error_code::success();
+ return std::error_code();
}
-error_code createUniqueFile(const Twine &Model, int &ResultFd,
- SmallVectorImpl<char> &ResultPath, unsigned Mode) {
+std::error_code createUniqueFile(const Twine &Model, int &ResultFd,
+ SmallVectorImpl<char> &ResultPath,
+ unsigned Mode) {
return createUniqueEntity(Model, ResultFd, ResultPath, false, Mode, FS_File);
}
-error_code createUniqueFile(const Twine &Model,
- SmallVectorImpl<char> &ResultPath) {
+std::error_code createUniqueFile(const Twine &Model,
+ SmallVectorImpl<char> &ResultPath) {
int Dummy;
return createUniqueEntity(Model, Dummy, ResultPath, false, 0, FS_Name);
}
-static error_code createTemporaryFile(const Twine &Model, int &ResultFD,
- llvm::SmallVectorImpl<char> &ResultPath,
- FSEntity Type) {
+static std::error_code
+createTemporaryFile(const Twine &Model, int &ResultFD,
+ llvm::SmallVectorImpl<char> &ResultPath, FSEntity Type) {
SmallString<128> Storage;
StringRef P = Model.toNullTerminatedStringRef(Storage);
assert(P.find_first_of(separators) == StringRef::npos &&
@@ -665,24 +740,22 @@ static error_code createTemporaryFile(const Twine &Model, int &ResultFD,
true, owner_read | owner_write, Type);
}
-static error_code
+static std::error_code
createTemporaryFile(const Twine &Prefix, StringRef Suffix, int &ResultFD,
- llvm::SmallVectorImpl<char> &ResultPath,
- FSEntity Type) {
+ llvm::SmallVectorImpl<char> &ResultPath, FSEntity Type) {
const char *Middle = Suffix.empty() ? "-%%%%%%" : "-%%%%%%.";
return createTemporaryFile(Prefix + Middle + Suffix, ResultFD, ResultPath,
Type);
}
-
-error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
- int &ResultFD,
- SmallVectorImpl<char> &ResultPath) {
+std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
+ int &ResultFD,
+ SmallVectorImpl<char> &ResultPath) {
return createTemporaryFile(Prefix, Suffix, ResultFD, ResultPath, FS_File);
}
-error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
- SmallVectorImpl<char> &ResultPath) {
+std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
+ SmallVectorImpl<char> &ResultPath) {
int Dummy;
return createTemporaryFile(Prefix, Suffix, Dummy, ResultPath, FS_Name);
}
@@ -690,14 +763,14 @@ error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
// This is a mkdtemp with a different pattern. We use createUniqueEntity mostly
// for consistency. We should try using mkdtemp.
-error_code createUniqueDirectory(const Twine &Prefix,
- SmallVectorImpl<char> &ResultPath) {
+std::error_code createUniqueDirectory(const Twine &Prefix,
+ SmallVectorImpl<char> &ResultPath) {
int Dummy;
return createUniqueEntity(Prefix + "-%%%%%%", Dummy, ResultPath,
true, 0, FS_Dir);
}
-error_code make_absolute(SmallVectorImpl<char> &path) {
+std::error_code make_absolute(SmallVectorImpl<char> &path) {
StringRef p(path.data(), path.size());
bool rootDirectory = path::has_root_directory(p),
@@ -709,11 +782,12 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
// Already absolute.
if (rootName && rootDirectory)
- return error_code::success();
+ return std::error_code();
// All of the following conditions will need the current directory.
SmallString<128> current_dir;
- if (error_code ec = current_path(current_dir)) return ec;
+ if (std::error_code ec = current_path(current_dir))
+ return ec;
// Relative path. Prepend the current directory.
if (!rootName && !rootDirectory) {
@@ -721,7 +795,7 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
path::append(current_dir, p);
// Set path to the result.
path.swap(current_dir);
- return error_code::success();
+ return std::error_code();
}
if (!rootName && rootDirectory) {
@@ -730,7 +804,7 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
path::append(curDirRootName, p);
// Set path to the result.
path.swap(curDirRootName);
- return error_code::success();
+ return std::error_code();
}
if (rootName && !rootDirectory) {
@@ -742,27 +816,68 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
SmallString<128> res;
path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath);
path.swap(res);
- return error_code::success();
+ return std::error_code();
}
llvm_unreachable("All rootName and rootDirectory combinations should have "
"occurred above!");
}
-error_code create_directories(const Twine &path, bool &existed) {
- SmallString<128> path_storage;
- StringRef p = path.toStringRef(path_storage);
+std::error_code create_directories(const Twine &Path, bool IgnoreExisting) {
+ SmallString<128> PathStorage;
+ StringRef P = Path.toStringRef(PathStorage);
+
+ // Be optimistic and try to create the directory
+ std::error_code EC = create_directory(P, IgnoreExisting);
+ // If we succeeded, or had any error other than the parent not existing, just
+ // return it.
+ if (EC != errc::no_such_file_or_directory)
+ return EC;
- StringRef parent = path::parent_path(p);
- if (!parent.empty()) {
- bool parent_exists;
- if (error_code ec = fs::exists(parent, parent_exists)) return ec;
+ // We failed because of a no_such_file_or_directory, try to create the
+ // parent.
+ StringRef Parent = path::parent_path(P);
+ if (Parent.empty())
+ return EC;
+
+ if ((EC = create_directories(Parent)))
+ return EC;
+
+ return create_directory(P, IgnoreExisting);
+}
- if (!parent_exists)
- if (error_code ec = create_directories(parent, existed)) return ec;
+std::error_code copy_file(const Twine &From, const Twine &To) {
+ int ReadFD, WriteFD;
+ if (std::error_code EC = openFileForRead(From, ReadFD))
+ return EC;
+ if (std::error_code EC = openFileForWrite(To, WriteFD, F_None)) {
+ close(ReadFD);
+ return EC;
}
- return create_directory(p, existed);
+ const size_t BufSize = 4096;
+ char *Buf = new char[BufSize];
+ int BytesRead = 0, BytesWritten = 0;
+ for (;;) {
+ BytesRead = read(ReadFD, Buf, BufSize);
+ if (BytesRead <= 0)
+ break;
+ while (BytesRead) {
+ BytesWritten = write(WriteFD, Buf, BytesRead);
+ if (BytesWritten < 0)
+ break;
+ BytesRead -= BytesWritten;
+ }
+ if (BytesWritten < 0)
+ break;
+ }
+ close(ReadFD);
+ close(WriteFD);
+ delete[] Buf;
+
+ if (BytesRead < 0 || BytesWritten < 0)
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
}
bool exists(file_status status) {
@@ -777,43 +892,30 @@ bool is_directory(file_status status) {
return status.type() == file_type::directory_file;
}
-error_code is_directory(const Twine &path, bool &result) {
+std::error_code is_directory(const Twine &path, bool &result) {
file_status st;
- if (error_code ec = status(path, st))
+ if (std::error_code ec = status(path, st))
return ec;
result = is_directory(st);
- return error_code::success();
+ return std::error_code();
}
bool is_regular_file(file_status status) {
return status.type() == file_type::regular_file;
}
-error_code is_regular_file(const Twine &path, bool &result) {
+std::error_code is_regular_file(const Twine &path, bool &result) {
file_status st;
- if (error_code ec = status(path, st))
+ if (std::error_code ec = status(path, st))
return ec;
result = is_regular_file(st);
- return error_code::success();
-}
-
-bool is_symlink(file_status status) {
- return status.type() == file_type::symlink_file;
-}
-
-error_code is_symlink(const Twine &path, bool &result) {
- file_status st;
- if (error_code ec = status(path, st))
- return ec;
- result = is_symlink(st);
- return error_code::success();
+ return std::error_code();
}
bool is_other(file_status status) {
return exists(status) &&
!is_regular_file(status) &&
- !is_directory(status) &&
- !is_symlink(status);
+ !is_directory(status);
}
void directory_entry::replace_filename(const Twine &filename, file_status st) {
@@ -824,26 +926,8 @@ void directory_entry::replace_filename(const Twine &filename, file_status st) {
Status = st;
}
-error_code has_magic(const Twine &path, const Twine &magic, bool &result) {
- SmallString<32> MagicStorage;
- StringRef Magic = magic.toStringRef(MagicStorage);
- SmallString<32> Buffer;
-
- if (error_code ec = get_magic(path, Magic.size(), Buffer)) {
- if (ec == errc::value_too_large) {
- // Magic.size() > file_size(Path).
- result = false;
- return error_code::success();
- }
- return ec;
- }
-
- result = Magic == Buffer;
- return error_code::success();
-}
-
/// @brief Identify the magic in magic.
- file_magic identify_magic(StringRef Magic) {
+file_magic identify_magic(StringRef Magic) {
if (Magic.size() < 4)
return file_magic::unknown;
switch ((unsigned char)Magic[0]) {
@@ -943,6 +1027,7 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result) {
case 0x66: // MPS R4000 Windows
case 0x50: // mc68K
case 0x4c: // 80386 Windows
+ case 0xc4: // ARMNT Windows
if (Magic[1] == 0x01)
return file_magic::coff_object;
@@ -973,56 +1058,21 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result) {
return file_magic::unknown;
}
-error_code identify_magic(const Twine &path, file_magic &result) {
- SmallString<32> Magic;
- error_code ec = get_magic(path, Magic.capacity(), Magic);
- if (ec && ec != errc::value_too_large)
- return ec;
-
- result = identify_magic(Magic);
- return error_code::success();
-}
-
-namespace {
-error_code remove_all_r(StringRef path, file_type ft, uint32_t &count) {
- if (ft == file_type::directory_file) {
- // This code would be a lot better with exceptions ;/.
- error_code ec;
- directory_iterator i(path, ec);
- if (ec) return ec;
- for (directory_iterator e; i != e; i.increment(ec)) {
- if (ec) return ec;
- file_status st;
- if (error_code ec = i->status(st)) return ec;
- if (error_code ec = remove_all_r(i->path(), st.type(), count)) return ec;
- }
- bool obviously_this_exists;
- if (error_code ec = remove(path, obviously_this_exists)) return ec;
- assert(obviously_this_exists);
- ++count; // Include the directory itself in the items removed.
- } else {
- bool obviously_this_exists;
- if (error_code ec = remove(path, obviously_this_exists)) return ec;
- assert(obviously_this_exists);
- ++count;
- }
-
- return error_code::success();
-}
-} // end unnamed namespace
+std::error_code identify_magic(const Twine &Path, file_magic &Result) {
+ int FD;
+ if (std::error_code EC = openFileForRead(Path, FD))
+ return EC;
-error_code remove_all(const Twine &path, uint32_t &num_removed) {
- SmallString<128> path_storage;
- StringRef p = path.toStringRef(path_storage);
+ char Buffer[32];
+ int Length = read(FD, Buffer, sizeof(Buffer));
+ if (close(FD) != 0 || Length < 0)
+ return std::error_code(errno, std::generic_category());
- file_status fs;
- if (error_code ec = status(path, fs))
- return ec;
- num_removed = 0;
- return remove_all_r(p, fs.type(), num_removed);
+ Result = identify_magic(StringRef(Buffer, Length));
+ return std::error_code();
}
-error_code directory_entry::status(file_status &result) const {
+std::error_code directory_entry::status(file_status &result) const {
return fs::status(Path, result);
}
diff --git a/contrib/llvm/lib/Support/PrettyStackTrace.cpp b/contrib/llvm/lib/Support/PrettyStackTrace.cpp
index 722f4ca..987778a 100644
--- a/contrib/llvm/lib/Support/PrettyStackTrace.cpp
+++ b/contrib/llvm/lib/Support/PrettyStackTrace.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm-c/Core.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Config/config.h" // Get autoconf configuration settings
#include "llvm/Support/ManagedStatic.h"
@@ -20,7 +21,6 @@
#include "llvm/Support/ThreadLocal.h"
#include "llvm/Support/Watchdog.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm-c/Core.h"
#ifdef HAVE_CRASHREPORTERCLIENT_H
#include <CrashReporterClient.h>
@@ -46,7 +46,7 @@ static unsigned PrintStack(const PrettyStackTraceEntry *Entry, raw_ostream &OS){
/// PrintCurStackTrace - Print the current stack trace to the specified stream.
static void PrintCurStackTrace(raw_ostream &OS) {
// Don't print an empty trace.
- if (PrettyStackTraceHead->get() == 0) return;
+ if (!PrettyStackTraceHead->get()) return;
// If there are pretty stack frames registered, walk and emit them.
OS << "Stack dump:\n";
@@ -136,7 +136,7 @@ void PrettyStackTraceProgram::print(raw_ostream &OS) const {
}
static bool RegisterCrashPrinter() {
- sys::AddSignalHandler(CrashHandler, 0);
+ sys::AddSignalHandler(CrashHandler, nullptr);
return false;
}
diff --git a/contrib/llvm/lib/Support/Process.cpp b/contrib/llvm/lib/Support/Process.cpp
index d5168f0..0d42e0e 100644
--- a/contrib/llvm/lib/Support/Process.cpp
+++ b/contrib/llvm/lib/Support/Process.cpp
@@ -7,13 +7,16 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file implements the operating system Process concept.
+// This file implements the operating system Process concept.
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/config.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
using namespace llvm;
using namespace sys;
@@ -34,14 +37,6 @@ self_process *process::get_self() {
return SP;
}
-#if defined(_MSC_VER)
-// Visual Studio complains that the self_process destructor never exits. This
-// doesn't make much sense, as that's the whole point of calling abort... Just
-// silence this warning.
-#pragma warning(push)
-#pragma warning(disable:4722)
-#endif
-
// The destructor for the self_process subclass must never actually be
// executed. There should be at most one instance of this class, and that
// instance should live until the process terminates to avoid the potential for
@@ -74,10 +69,32 @@ TimeValue self_process::get_wall_time() const {
return getElapsedWallTime();
}
+Optional<std::string> Process::FindInEnvPath(const std::string& EnvName,
+ const std::string& FileName)
+{
+ Optional<std::string> FoundPath;
+ Optional<std::string> OptPath = Process::GetEnv(EnvName);
+ if (!OptPath.hasValue())
+ return FoundPath;
+
+ const char EnvPathSeparatorStr[] = {EnvPathSeparator, '\0'};
+ SmallVector<StringRef, 8> Dirs;
+ SplitString(OptPath.getValue(), Dirs, EnvPathSeparatorStr);
+
+ for (const auto &Dir : Dirs) {
+ if (Dir.empty())
+ continue;
+
+ SmallString<128> FilePath(Dir);
+ path::append(FilePath, FileName);
+ if (fs::exists(Twine(FilePath))) {
+ FoundPath = FilePath.str();
+ break;
+ }
+ }
-#if defined(_MSC_VER)
-#pragma warning(pop)
-#endif
+ return FoundPath;
+}
#define COLOR(FGBG, CODE, BOLD) "\033[0;" BOLD FGBG CODE "m"
diff --git a/contrib/llvm/lib/Support/Program.cpp b/contrib/llvm/lib/Support/Program.cpp
index 83f2ec4..b84b82b 100644
--- a/contrib/llvm/lib/Support/Program.cpp
+++ b/contrib/llvm/lib/Support/Program.cpp
@@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file implements the operating system Program concept.
+// This file implements the operating system Program concept.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/Program.h"
#include "llvm/Config/config.h"
-#include "llvm/Support/system_error.h"
+#include <system_error>
using namespace llvm;
using namespace sys;
@@ -34,7 +34,8 @@ int sys::ExecuteAndWait(StringRef Program, const char **args, const char **envp,
if (Execute(PI, Program, args, envp, redirects, memoryLimit, ErrMsg)) {
if (ExecutionFailed)
*ExecutionFailed = false;
- ProcessInfo Result = Wait(PI, secondsToWait, true, ErrMsg);
+ ProcessInfo Result = Wait(
+ PI, secondsToWait, /*WaitUntilTerminates=*/secondsToWait == 0, ErrMsg);
return Result.ReturnCode;
}
diff --git a/contrib/llvm/lib/Support/RWMutex.cpp b/contrib/llvm/lib/Support/RWMutex.cpp
index 6a34f2d..3b6309c 100644
--- a/contrib/llvm/lib/Support/RWMutex.cpp
+++ b/contrib/llvm/lib/Support/RWMutex.cpp
@@ -44,7 +44,7 @@ using namespace sys;
// Construct a RWMutex using pthread calls
RWMutexImpl::RWMutexImpl()
- : data_(0)
+ : data_(nullptr)
{
// Declare the pthread_rwlock data structures
pthread_rwlock_t* rwlock =
@@ -56,7 +56,7 @@ RWMutexImpl::RWMutexImpl()
#endif
// Initialize the rwlock
- int errorcode = pthread_rwlock_init(rwlock, NULL);
+ int errorcode = pthread_rwlock_init(rwlock, nullptr);
(void)errorcode;
assert(errorcode == 0);
@@ -68,7 +68,7 @@ RWMutexImpl::RWMutexImpl()
RWMutexImpl::~RWMutexImpl()
{
pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
+ assert(rwlock != nullptr);
pthread_rwlock_destroy(rwlock);
free(rwlock);
}
@@ -77,7 +77,7 @@ bool
RWMutexImpl::reader_acquire()
{
pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
+ assert(rwlock != nullptr);
int errorcode = pthread_rwlock_rdlock(rwlock);
return errorcode == 0;
@@ -87,7 +87,7 @@ bool
RWMutexImpl::reader_release()
{
pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
+ assert(rwlock != nullptr);
int errorcode = pthread_rwlock_unlock(rwlock);
return errorcode == 0;
@@ -97,7 +97,7 @@ bool
RWMutexImpl::writer_acquire()
{
pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
+ assert(rwlock != nullptr);
int errorcode = pthread_rwlock_wrlock(rwlock);
return errorcode == 0;
@@ -107,7 +107,7 @@ bool
RWMutexImpl::writer_release()
{
pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
+ assert(rwlock != nullptr);
int errorcode = pthread_rwlock_unlock(rwlock);
return errorcode == 0;
diff --git a/contrib/llvm/lib/Support/RandomNumberGenerator.cpp b/contrib/llvm/lib/Support/RandomNumberGenerator.cpp
new file mode 100644
index 0000000..c50e7cb
--- /dev/null
+++ b/contrib/llvm/lib/Support/RandomNumberGenerator.cpp
@@ -0,0 +1,61 @@
+//===-- RandomNumberGenerator.cpp - Implement RNG class -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements random number generation (RNG).
+// The current implementation is NOT cryptographically secure as it uses
+// the C++11 <random> facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "rng"
+#include "llvm/Support/RandomNumberGenerator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+// Tracking BUG: 19665
+// http://llvm.org/bugs/show_bug.cgi?id=19665
+//
+// Do not change to cl::opt<uint64_t> since this silently breaks argument parsing.
+static cl::opt<unsigned long long>
+Seed("rng-seed", cl::value_desc("seed"),
+ cl::desc("Seed for the random number generator"), cl::init(0));
+
+RandomNumberGenerator::RandomNumberGenerator(StringRef Salt) {
+ DEBUG(
+ if (Seed == 0)
+ errs() << "Warning! Using unseeded random number generator.\n"
+ );
+
+ // Combine seed and salt using std::seed_seq.
+ // Entropy: Seed-low, Seed-high, Salt...
+ std::vector<uint32_t> Data;
+ Data.reserve(2 + Salt.size()/4 + 1);
+ Data.push_back(Seed);
+ Data.push_back(Seed >> 32);
+
+ uint32_t Pack = 0;
+ for (size_t I = 0; I < Salt.size(); ++I) {
+ Pack <<= 8;
+ Pack += Salt[I];
+
+ if (I%4 == 3)
+ Data.push_back(Pack);
+ }
+ Data.push_back(Pack);
+
+ std::seed_seq SeedSeq(Data.begin(), Data.end());
+ Generator.seed(SeedSeq);
+}
+
+uint64_t RandomNumberGenerator::next(uint64_t Max) {
+ std::uniform_int_distribution<uint64_t> distribution(0, Max - 1);
+ return distribution(Generator);
+}
diff --git a/contrib/llvm/lib/Support/Regex.cpp b/contrib/llvm/lib/Support/Regex.cpp
index 5413641..f7fe1e4 100644
--- a/contrib/llvm/lib/Support/Regex.cpp
+++ b/contrib/llvm/lib/Support/Regex.cpp
@@ -33,15 +33,17 @@ Regex::Regex(StringRef regex, unsigned Flags) {
}
Regex::~Regex() {
- llvm_regfree(preg);
- delete preg;
+ if (preg) {
+ llvm_regfree(preg);
+ delete preg;
+ }
}
bool Regex::isValid(std::string &Error) {
if (!error)
return true;
- size_t len = llvm_regerror(error, preg, NULL, 0);
+ size_t len = llvm_regerror(error, preg, nullptr, 0);
Error.resize(len - 1);
llvm_regerror(error, preg, &Error[0], len);
@@ -169,9 +171,23 @@ std::string Regex::sub(StringRef Repl, StringRef String,
return Res;
}
+// These are the special characters matched in functions like "p_ere_exp".
+static const char RegexMetachars[] = "()^$|*+?.[]\\{}";
+
bool Regex::isLiteralERE(StringRef Str) {
// Check for regex metacharacters. This list was derived from our regex
// implementation in regcomp.c and double checked against the POSIX extended
// regular expression specification.
- return Str.find_first_of("()^$|*+?.[]\\{}") == StringRef::npos;
+ return Str.find_first_of(RegexMetachars) == StringRef::npos;
+}
+
+std::string Regex::escape(StringRef String) {
+ std::string RegexStr;
+ for (unsigned i = 0, e = String.size(); i != e; ++i) {
+ if (strchr(RegexMetachars, String[i]))
+ RegexStr += '\\';
+ RegexStr += String[i];
+ }
+
+ return RegexStr;
}
diff --git a/contrib/llvm/lib/Support/ScaledNumber.cpp b/contrib/llvm/lib/Support/ScaledNumber.cpp
new file mode 100644
index 0000000..3fe027b
--- /dev/null
+++ b/contrib/llvm/lib/Support/ScaledNumber.cpp
@@ -0,0 +1,319 @@
+//==- lib/Support/ScaledNumber.cpp - Support for scaled numbers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of some scaled number algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ScaledNumber.h"
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+using namespace llvm::ScaledNumbers;
+
+std::pair<uint64_t, int16_t> ScaledNumbers::multiply64(uint64_t LHS,
+ uint64_t RHS) {
+ // Separate into two 32-bit digits (U.L).
+ auto getU = [](uint64_t N) { return N >> 32; };
+ auto getL = [](uint64_t N) { return N & UINT32_MAX; };
+ uint64_t UL = getU(LHS), LL = getL(LHS), UR = getU(RHS), LR = getL(RHS);
+
+ // Compute cross products.
+ uint64_t P1 = UL * UR, P2 = UL * LR, P3 = LL * UR, P4 = LL * LR;
+
+ // Sum into two 64-bit digits.
+ uint64_t Upper = P1, Lower = P4;
+ auto addWithCarry = [&](uint64_t N) {
+ uint64_t NewLower = Lower + (getL(N) << 32);
+ Upper += getU(N) + (NewLower < Lower);
+ Lower = NewLower;
+ };
+ addWithCarry(P2);
+ addWithCarry(P3);
+
+ // Check whether the upper digit is empty.
+ if (!Upper)
+ return std::make_pair(Lower, 0);
+
+ // Shift as little as possible to maximize precision.
+ unsigned LeadingZeros = countLeadingZeros(Upper);
+ int Shift = 64 - LeadingZeros;
+ if (LeadingZeros)
+ Upper = Upper << LeadingZeros | Lower >> Shift;
+ return getRounded(Upper, Shift,
+ Shift && (Lower & UINT64_C(1) << (Shift - 1)));
+}
+
+static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
+
+std::pair<uint32_t, int16_t> ScaledNumbers::divide32(uint32_t Dividend,
+ uint32_t Divisor) {
+ assert(Dividend && "expected non-zero dividend");
+ assert(Divisor && "expected non-zero divisor");
+
+ // Use 64-bit math and canonicalize the dividend to gain precision.
+ uint64_t Dividend64 = Dividend;
+ int Shift = 0;
+ if (int Zeros = countLeadingZeros(Dividend64)) {
+ Shift -= Zeros;
+ Dividend64 <<= Zeros;
+ }
+ uint64_t Quotient = Dividend64 / Divisor;
+ uint64_t Remainder = Dividend64 % Divisor;
+
+ // If Quotient needs to be shifted, leave the rounding to getAdjusted().
+ if (Quotient > UINT32_MAX)
+ return getAdjusted<uint32_t>(Quotient, Shift);
+
+ // Round based on the value of the next bit.
+ return getRounded<uint32_t>(Quotient, Shift, Remainder >= getHalf(Divisor));
+}
+
+std::pair<uint64_t, int16_t> ScaledNumbers::divide64(uint64_t Dividend,
+ uint64_t Divisor) {
+ assert(Dividend && "expected non-zero dividend");
+ assert(Divisor && "expected non-zero divisor");
+
+ // Minimize size of divisor.
+ int Shift = 0;
+ if (int Zeros = countTrailingZeros(Divisor)) {
+ Shift -= Zeros;
+ Divisor >>= Zeros;
+ }
+
+ // Check for powers of two.
+ if (Divisor == 1)
+ return std::make_pair(Dividend, Shift);
+
+ // Maximize size of dividend.
+ if (int Zeros = countLeadingZeros(Dividend)) {
+ Shift -= Zeros;
+ Dividend <<= Zeros;
+ }
+
+ // Start with the result of a divide.
+ uint64_t Quotient = Dividend / Divisor;
+ Dividend %= Divisor;
+
+ // Continue building the quotient with long division.
+ while (!(Quotient >> 63) && Dividend) {
+ // Shift Dividend and check for overflow.
+ bool IsOverflow = Dividend >> 63;
+ Dividend <<= 1;
+ --Shift;
+
+ // Get the next bit of Quotient.
+ Quotient <<= 1;
+ if (IsOverflow || Divisor <= Dividend) {
+ Quotient |= 1;
+ Dividend -= Divisor;
+ }
+ }
+
+ return getRounded(Quotient, Shift, Dividend >= getHalf(Divisor));
+}
+
+int ScaledNumbers::compareImpl(uint64_t L, uint64_t R, int ScaleDiff) {
+ assert(ScaleDiff >= 0 && "wrong argument order");
+ assert(ScaleDiff < 64 && "numbers too far apart");
+
+ uint64_t L_adjusted = L >> ScaleDiff;
+ if (L_adjusted < R)
+ return -1;
+ if (L_adjusted > R)
+ return 1;
+
+ return L > L_adjusted << ScaleDiff ? 1 : 0;
+}
+
+static void appendDigit(std::string &Str, unsigned D) {
+ assert(D < 10);
+ Str += '0' + D % 10;
+}
+
+static void appendNumber(std::string &Str, uint64_t N) {
+ while (N) {
+ appendDigit(Str, N % 10);
+ N /= 10;
+ }
+}
+
+static bool doesRoundUp(char Digit) {
+ switch (Digit) {
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ return true;
+ default:
+ return false;
+ }
+}
+
+static std::string toStringAPFloat(uint64_t D, int E, unsigned Precision) {
+ assert(E >= ScaledNumbers::MinScale);
+ assert(E <= ScaledNumbers::MaxScale);
+
+ // Find a new E, but don't let it increase past MaxScale.
+ int LeadingZeros = ScaledNumberBase::countLeadingZeros64(D);
+ int NewE = std::min(ScaledNumbers::MaxScale, E + 63 - LeadingZeros);
+ int Shift = 63 - (NewE - E);
+ assert(Shift <= LeadingZeros);
+ assert(Shift == LeadingZeros || NewE == ScaledNumbers::MaxScale);
+ D <<= Shift;
+ E = NewE;
+
+ // Check for a denormal.
+ unsigned AdjustedE = E + 16383;
+ if (!(D >> 63)) {
+ assert(E == ScaledNumbers::MaxScale);
+ AdjustedE = 0;
+ }
+
+ // Build the float and print it.
+ uint64_t RawBits[2] = {D, AdjustedE};
+ APFloat Float(APFloat::x87DoubleExtended, APInt(80, RawBits));
+ SmallVector<char, 24> Chars;
+ Float.toString(Chars, Precision, 0);
+ return std::string(Chars.begin(), Chars.end());
+}
+
+static std::string stripTrailingZeros(const std::string &Float) {
+ size_t NonZero = Float.find_last_not_of('0');
+ assert(NonZero != std::string::npos && "no . in floating point string");
+
+ if (Float[NonZero] == '.')
+ ++NonZero;
+
+ return Float.substr(0, NonZero + 1);
+}
+
+std::string ScaledNumberBase::toString(uint64_t D, int16_t E, int Width,
+ unsigned Precision) {
+ if (!D)
+ return "0.0";
+
+ // Canonicalize exponent and digits.
+ uint64_t Above0 = 0;
+ uint64_t Below0 = 0;
+ uint64_t Extra = 0;
+ int ExtraShift = 0;
+ if (E == 0) {
+ Above0 = D;
+ } else if (E > 0) {
+ if (int Shift = std::min(int16_t(countLeadingZeros64(D)), E)) {
+ D <<= Shift;
+ E -= Shift;
+
+ if (!E)
+ Above0 = D;
+ }
+ } else if (E > -64) {
+ Above0 = D >> -E;
+ Below0 = D << (64 + E);
+ } else if (E > -120) {
+ Below0 = D >> (-E - 64);
+ Extra = D << (128 + E);
+ ExtraShift = -64 - E;
+ }
+
+ // Fall back on APFloat for very small and very large numbers.
+ if (!Above0 && !Below0)
+ return toStringAPFloat(D, E, Precision);
+
+ // Append the digits before the decimal.
+ std::string Str;
+ size_t DigitsOut = 0;
+ if (Above0) {
+ appendNumber(Str, Above0);
+ DigitsOut = Str.size();
+ } else
+ appendDigit(Str, 0);
+ std::reverse(Str.begin(), Str.end());
+
+ // Return early if there's nothing after the decimal.
+ if (!Below0)
+ return Str + ".0";
+
+ // Append the decimal and beyond.
+ Str += '.';
+ uint64_t Error = UINT64_C(1) << (64 - Width);
+
+ // We need to shift Below0 to the right to make space for calculating
+ // digits. Save the precision we're losing in Extra.
+ Extra = (Below0 & 0xf) << 56 | (Extra >> 8);
+ Below0 >>= 4;
+ size_t SinceDot = 0;
+ size_t AfterDot = Str.size();
+ do {
+ if (ExtraShift) {
+ --ExtraShift;
+ Error *= 5;
+ } else
+ Error *= 10;
+
+ Below0 *= 10;
+ Extra *= 10;
+ Below0 += (Extra >> 60);
+ Extra = Extra & (UINT64_MAX >> 4);
+ appendDigit(Str, Below0 >> 60);
+ Below0 = Below0 & (UINT64_MAX >> 4);
+ if (DigitsOut || Str.back() != '0')
+ ++DigitsOut;
+ ++SinceDot;
+ } while (Error && (Below0 << 4 | Extra >> 60) >= Error / 2 &&
+ (!Precision || DigitsOut <= Precision || SinceDot < 2));
+
+ // Return early for maximum precision.
+ if (!Precision || DigitsOut <= Precision)
+ return stripTrailingZeros(Str);
+
+ // Find where to truncate.
+ size_t Truncate =
+ std::max(Str.size() - (DigitsOut - Precision), AfterDot + 1);
+
+ // Check if there's anything to truncate.
+ if (Truncate >= Str.size())
+ return stripTrailingZeros(Str);
+
+ bool Carry = doesRoundUp(Str[Truncate]);
+ if (!Carry)
+ return stripTrailingZeros(Str.substr(0, Truncate));
+
+ // Round with the first truncated digit.
+ for (std::string::reverse_iterator I(Str.begin() + Truncate), E = Str.rend();
+ I != E; ++I) {
+ if (*I == '.')
+ continue;
+ if (*I == '9') {
+ *I = '0';
+ continue;
+ }
+
+ ++*I;
+ Carry = false;
+ break;
+ }
+
+ // Add "1" in front if we still need to carry.
+ return stripTrailingZeros(std::string(Carry, '1') + Str.substr(0, Truncate));
+}
+
+raw_ostream &ScaledNumberBase::print(raw_ostream &OS, uint64_t D, int16_t E,
+ int Width, unsigned Precision) {
+ return OS << toString(D, E, Width, Precision);
+}
+
+void ScaledNumberBase::dump(uint64_t D, int16_t E, int Width) {
+ print(dbgs(), D, E, Width, 0) << "[" << Width << ":" << D << "*2^" << E
+ << "]";
+}
diff --git a/contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp b/contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp
index 2d23902..55f3320 100644
--- a/contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp
+++ b/contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp
@@ -48,7 +48,7 @@ static void *DoSearch(const char* symbolName) {
#endif
#undef EXPLICIT_SYMBOL
- return 0;
+ return nullptr;
}
namespace llvm {
diff --git a/contrib/llvm/lib/Support/SmallPtrSet.cpp b/contrib/llvm/lib/Support/SmallPtrSet.cpp
index dd417b4..a80e095 100644
--- a/contrib/llvm/lib/Support/SmallPtrSet.cpp
+++ b/contrib/llvm/lib/Support/SmallPtrSet.cpp
@@ -20,7 +20,7 @@
using namespace llvm;
-void SmallPtrSetImpl::shrink_and_clear() {
+void SmallPtrSetImplBase::shrink_and_clear() {
assert(!isSmall() && "Can't shrink a small set!");
free(CurArray);
@@ -34,7 +34,7 @@ void SmallPtrSetImpl::shrink_and_clear() {
memset(CurArray, -1, CurArraySize*sizeof(void*));
}
-bool SmallPtrSetImpl::insert_imp(const void * Ptr) {
+bool SmallPtrSetImplBase::insert_imp(const void * Ptr) {
if (isSmall()) {
// Check to see if it is already in the set.
for (const void **APtr = SmallArray, **E = SmallArray+NumElements;
@@ -71,7 +71,7 @@ bool SmallPtrSetImpl::insert_imp(const void * Ptr) {
return true;
}
-bool SmallPtrSetImpl::erase_imp(const void * Ptr) {
+bool SmallPtrSetImplBase::erase_imp(const void * Ptr) {
if (isSmall()) {
// Check to see if it is in the set.
for (const void **APtr = SmallArray, **E = SmallArray+NumElements;
@@ -98,12 +98,12 @@ bool SmallPtrSetImpl::erase_imp(const void * Ptr) {
return true;
}
-const void * const *SmallPtrSetImpl::FindBucketFor(const void *Ptr) const {
+const void * const *SmallPtrSetImplBase::FindBucketFor(const void *Ptr) const {
unsigned Bucket = DenseMapInfo<void *>::getHashValue(Ptr) & (CurArraySize-1);
unsigned ArraySize = CurArraySize;
unsigned ProbeAmt = 1;
const void *const *Array = CurArray;
- const void *const *Tombstone = 0;
+ const void *const *Tombstone = nullptr;
while (1) {
// Found Ptr's bucket?
if (Array[Bucket] == Ptr)
@@ -127,7 +127,7 @@ const void * const *SmallPtrSetImpl::FindBucketFor(const void *Ptr) const {
/// Grow - Allocate a larger backing store for the buckets and move it over.
///
-void SmallPtrSetImpl::Grow(unsigned NewSize) {
+void SmallPtrSetImplBase::Grow(unsigned NewSize) {
// Allocate at twice as many buckets, but at least 128.
unsigned OldSize = CurArraySize;
@@ -163,8 +163,8 @@ void SmallPtrSetImpl::Grow(unsigned NewSize) {
}
}
-SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage,
- const SmallPtrSetImpl& that) {
+SmallPtrSetImplBase::SmallPtrSetImplBase(const void **SmallStorage,
+ const SmallPtrSetImplBase& that) {
SmallArray = SmallStorage;
// If we're becoming small, prepare to insert into our stack space
@@ -186,9 +186,39 @@ SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage,
NumTombstones = that.NumTombstones;
}
+SmallPtrSetImplBase::SmallPtrSetImplBase(const void **SmallStorage,
+ unsigned SmallSize,
+ SmallPtrSetImplBase &&that) {
+ SmallArray = SmallStorage;
+
+ // Copy over the basic members.
+ CurArraySize = that.CurArraySize;
+ NumElements = that.NumElements;
+ NumTombstones = that.NumTombstones;
+
+ // When small, just copy into our small buffer.
+ if (that.isSmall()) {
+ CurArray = SmallArray;
+ memcpy(CurArray, that.CurArray, sizeof(void *) * CurArraySize);
+ return;
+ }
+
+ // Otherwise, we steal the large memory allocation and no copy is needed.
+ CurArray = that.CurArray;
+ that.CurArray = that.SmallArray;
+
+ // Make the "that" object small and empty.
+ that.CurArraySize = SmallSize;
+ assert(that.CurArray == that.SmallArray);
+ that.NumElements = 0;
+ that.NumTombstones = 0;
+}
+
/// CopyFrom - implement operator= from a smallptrset that has the same pointer
/// type, but may have a different small size.
-void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) {
+void SmallPtrSetImplBase::CopyFrom(const SmallPtrSetImplBase &RHS) {
+ assert(&RHS != this && "Self-copy should be handled by the caller.");
+
if (isSmall() && RHS.isSmall())
assert(CurArraySize == RHS.CurArraySize &&
"Cannot assign sets with different small sizes");
@@ -222,7 +252,35 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) {
NumTombstones = RHS.NumTombstones;
}
-void SmallPtrSetImpl::swap(SmallPtrSetImpl &RHS) {
+void SmallPtrSetImplBase::MoveFrom(unsigned SmallSize,
+ SmallPtrSetImplBase &&RHS) {
+ assert(&RHS != this && "Self-move should be handled by the caller.");
+
+ if (!isSmall())
+ free(CurArray);
+
+ if (RHS.isSmall()) {
+ // Copy a small RHS rather than moving.
+ CurArray = SmallArray;
+ memcpy(CurArray, RHS.CurArray, sizeof(void*)*RHS.CurArraySize);
+ } else {
+ CurArray = RHS.CurArray;
+ RHS.CurArray = RHS.SmallArray;
+ }
+
+ // Copy the rest of the trivial members.
+ CurArraySize = RHS.CurArraySize;
+ NumElements = RHS.NumElements;
+ NumTombstones = RHS.NumTombstones;
+
+ // Make the RHS small and empty.
+ RHS.CurArraySize = SmallSize;
+ assert(RHS.CurArray == RHS.SmallArray);
+ RHS.NumElements = 0;
+ RHS.NumTombstones = 0;
+}
+
+void SmallPtrSetImplBase::swap(SmallPtrSetImplBase &RHS) {
if (this == &RHS) return;
// We can only avoid copying elements if neither set is small.
@@ -272,7 +330,7 @@ void SmallPtrSetImpl::swap(SmallPtrSetImpl &RHS) {
std::swap(this->NumElements, RHS.NumElements);
}
-SmallPtrSetImpl::~SmallPtrSetImpl() {
+SmallPtrSetImplBase::~SmallPtrSetImplBase() {
if (!isSmall())
free(CurArray);
}
diff --git a/contrib/llvm/lib/Support/SourceMgr.cpp b/contrib/llvm/lib/Support/SourceMgr.cpp
index d4b94f8..003cb56 100644
--- a/contrib/llvm/lib/Support/SourceMgr.cpp
+++ b/contrib/llvm/lib/Support/SourceMgr.cpp
@@ -14,20 +14,20 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/SourceMgr.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Locale.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
+#include <system_error>
using namespace llvm;
static const size_t TabStop = 8;
namespace {
struct LineNoCacheTy {
- int LastQueryBufferID;
+ unsigned LastQueryBufferID;
const char *LastQuery;
unsigned LineNoOfQuery;
};
@@ -49,48 +49,44 @@ SourceMgr::~SourceMgr() {
}
}
-/// AddIncludeFile - Search for a file with the specified name in the current
-/// directory or in one of the IncludeDirs. If no file is found, this returns
-/// ~0, otherwise it returns the buffer ID of the stacked file.
-size_t SourceMgr::AddIncludeFile(const std::string &Filename,
- SMLoc IncludeLoc,
- std::string &IncludedFile) {
- OwningPtr<MemoryBuffer> NewBuf;
+unsigned SourceMgr::AddIncludeFile(const std::string &Filename,
+ SMLoc IncludeLoc,
+ std::string &IncludedFile) {
IncludedFile = Filename;
- MemoryBuffer::getFile(IncludedFile.c_str(), NewBuf);
+ ErrorOr<std::unique_ptr<MemoryBuffer>> NewBufOrErr =
+ MemoryBuffer::getFile(IncludedFile.c_str());
// If the file didn't exist directly, see if it's in an include path.
- for (unsigned i = 0, e = IncludeDirectories.size(); i != e && !NewBuf; ++i) {
- IncludedFile = IncludeDirectories[i] + "/" + Filename;
- MemoryBuffer::getFile(IncludedFile.c_str(), NewBuf);
+ for (unsigned i = 0, e = IncludeDirectories.size(); i != e && !NewBufOrErr;
+ ++i) {
+ IncludedFile =
+ IncludeDirectories[i] + sys::path::get_separator().data() + Filename;
+ NewBufOrErr = MemoryBuffer::getFile(IncludedFile.c_str());
}
- if (!NewBuf) return ~0U;
+ if (!NewBufOrErr)
+ return 0;
- return AddNewSourceBuffer(NewBuf.take(), IncludeLoc);
+ return AddNewSourceBuffer(NewBufOrErr.get().release(), IncludeLoc);
}
-
-/// FindBufferContainingLoc - Return the ID of the buffer containing the
-/// specified location, returning -1 if not found.
-int SourceMgr::FindBufferContainingLoc(SMLoc Loc) const {
+unsigned SourceMgr::FindBufferContainingLoc(SMLoc Loc) const {
for (unsigned i = 0, e = Buffers.size(); i != e; ++i)
if (Loc.getPointer() >= Buffers[i].Buffer->getBufferStart() &&
// Use <= here so that a pointer to the null at the end of the buffer
// is included as part of the buffer.
Loc.getPointer() <= Buffers[i].Buffer->getBufferEnd())
- return i;
- return -1;
+ return i + 1;
+ return 0;
}
-/// getLineAndColumn - Find the line and column number for the specified
-/// location in the specified file. This is not a fast method.
std::pair<unsigned, unsigned>
-SourceMgr::getLineAndColumn(SMLoc Loc, int BufferID) const {
- if (BufferID == -1) BufferID = FindBufferContainingLoc(Loc);
- assert(BufferID != -1 && "Invalid Location!");
+SourceMgr::getLineAndColumn(SMLoc Loc, unsigned BufferID) const {
+ if (!BufferID)
+ BufferID = FindBufferContainingLoc(Loc);
+ assert(BufferID && "Invalid Location!");
- MemoryBuffer *Buff = getBufferInfo(BufferID).Buffer;
+ const MemoryBuffer *Buff = getMemoryBuffer(BufferID);
// Count the number of \n's between the start of the file and the specified
// location.
@@ -115,7 +111,7 @@ SourceMgr::getLineAndColumn(SMLoc Loc, int BufferID) const {
if (*Ptr == '\n') ++LineNo;
// Allocate the line number cache if it doesn't exist.
- if (LineNoCache == 0)
+ if (!LineNoCache)
LineNoCache = new LineNoCacheTy();
// Update the line # cache.
@@ -132,8 +128,8 @@ SourceMgr::getLineAndColumn(SMLoc Loc, int BufferID) const {
void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const {
if (IncludeLoc == SMLoc()) return; // Top of stack.
- int CurBuf = FindBufferContainingLoc(IncludeLoc);
- assert(CurBuf != -1 && "Invalid or unspecified location!");
+ unsigned CurBuf = FindBufferContainingLoc(IncludeLoc);
+ assert(CurBuf && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
@@ -143,11 +139,6 @@ void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const {
}
-/// GetMessage - Return an SMDiagnostic at the specified location with the
-/// specified string.
-///
-/// @param Type - If non-null, the kind of message (e.g., "error") which is
-/// prefixed to the message.
SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
const Twine &Msg,
ArrayRef<SMRange> Ranges,
@@ -161,10 +152,10 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
std::string LineStr;
if (Loc.isValid()) {
- int CurBuf = FindBufferContainingLoc(Loc);
- assert(CurBuf != -1 && "Invalid or unspecified location!");
+ unsigned CurBuf = FindBufferContainingLoc(Loc);
+ assert(CurBuf && "Invalid or unspecified location!");
- MemoryBuffer *CurMB = getBufferInfo(CurBuf).Buffer;
+ const MemoryBuffer *CurMB = getMemoryBuffer(CurBuf);
BufferID = CurMB->getBufferIdentifier();
// Scan backward to find the start of the line.
@@ -211,25 +202,28 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
LineStr, ColRanges, FixIts);
}
-void SourceMgr::PrintMessage(raw_ostream &OS, SMLoc Loc,
- SourceMgr::DiagKind Kind,
- const Twine &Msg, ArrayRef<SMRange> Ranges,
- ArrayRef<SMFixIt> FixIts, bool ShowColors) const {
- SMDiagnostic Diagnostic = GetMessage(Loc, Kind, Msg, Ranges, FixIts);
-
+void SourceMgr::PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
+ bool ShowColors) const {
// Report the message with the diagnostic handler if present.
if (DiagHandler) {
DiagHandler(Diagnostic, DiagContext);
return;
}
- if (Loc != SMLoc()) {
- int CurBuf = FindBufferContainingLoc(Loc);
- assert(CurBuf != -1 && "Invalid or unspecified location!");
+ if (Diagnostic.getLoc().isValid()) {
+ unsigned CurBuf = FindBufferContainingLoc(Diagnostic.getLoc());
+ assert(CurBuf && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
}
- Diagnostic.print(0, OS, ShowColors);
+ Diagnostic.print(nullptr, OS, ShowColors);
+}
+
+void SourceMgr::PrintMessage(raw_ostream &OS, SMLoc Loc,
+ SourceMgr::DiagKind Kind,
+ const Twine &Msg, ArrayRef<SMRange> Ranges,
+ ArrayRef<SMFixIt> FixIts, bool ShowColors) const {
+ PrintMessage(OS, GetMessage(Loc, Kind, Msg, Ranges, FixIts), ShowColors);
}
void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
diff --git a/contrib/llvm/lib/Transforms/Utils/SpecialCaseList.cpp b/contrib/llvm/lib/Support/SpecialCaseList.cpp
index 2ef692c..21e43c5 100644
--- a/contrib/llvm/lib/Transforms/Utils/SpecialCaseList.cpp
+++ b/contrib/llvm/lib/Support/SpecialCaseList.cpp
@@ -14,21 +14,16 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/SpecialCaseList.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include <string>
+#include <system_error>
#include <utility>
namespace llvm {
@@ -39,10 +34,12 @@ namespace llvm {
/// reason for doing so is efficiency; StringSet is much faster at matching
/// literal strings than Regex.
struct SpecialCaseList::Entry {
- StringSet<> Strings;
- Regex *RegEx;
+ Entry() {}
+ Entry(Entry &&Other)
+ : Strings(std::move(Other.Strings)), RegEx(std::move(Other.RegEx)) {}
- Entry() : RegEx(0) {}
+ StringSet<> Strings;
+ std::unique_ptr<Regex> RegEx;
bool match(StringRef Query) const {
return Strings.count(Query) || (RegEx && RegEx->match(Query));
@@ -55,20 +52,21 @@ SpecialCaseList *SpecialCaseList::create(
const StringRef Path, std::string &Error) {
if (Path.empty())
return new SpecialCaseList();
- OwningPtr<MemoryBuffer> File;
- if (error_code EC = MemoryBuffer::getFile(Path, File)) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFile(Path);
+ if (std::error_code EC = FileOrErr.getError()) {
Error = (Twine("Can't open file '") + Path + "': " + EC.message()).str();
- return 0;
+ return nullptr;
}
- return create(File.get(), Error);
+ return create(FileOrErr.get().get(), Error);
}
SpecialCaseList *SpecialCaseList::create(
const MemoryBuffer *MB, std::string &Error) {
- OwningPtr<SpecialCaseList> SCL(new SpecialCaseList());
+ std::unique_ptr<SpecialCaseList> SCL(new SpecialCaseList());
if (!SCL->parse(MB, Error))
- return 0;
- return SCL.take();
+ return nullptr;
+ return SCL.release();
}
SpecialCaseList *SpecialCaseList::createOrDie(const StringRef Path) {
@@ -151,66 +149,16 @@ bool SpecialCaseList::parse(const MemoryBuffer *MB, std::string &Error) {
for (StringMap<std::string>::const_iterator II = I->second.begin(),
IE = I->second.end();
II != IE; ++II) {
- Entries[I->getKey()][II->getKey()].RegEx = new Regex(II->getValue());
+ Entries[I->getKey()][II->getKey()].RegEx.reset(new Regex(II->getValue()));
}
}
return true;
}
-SpecialCaseList::~SpecialCaseList() {
- for (StringMap<StringMap<Entry> >::iterator I = Entries.begin(),
- E = Entries.end();
- I != E; ++I) {
- for (StringMap<Entry>::const_iterator II = I->second.begin(),
- IE = I->second.end();
- II != IE; ++II) {
- delete II->second.RegEx;
- }
- }
-}
-
-bool SpecialCaseList::isIn(const Function& F, const StringRef Category) const {
- return isIn(*F.getParent(), Category) ||
- inSectionCategory("fun", F.getName(), Category);
-}
-
-static StringRef GetGlobalTypeString(const GlobalValue &G) {
- // Types of GlobalVariables are always pointer types.
- Type *GType = G.getType()->getElementType();
- // For now we support blacklisting struct types only.
- if (StructType *SGType = dyn_cast<StructType>(GType)) {
- if (!SGType->isLiteral())
- return SGType->getName();
- }
- return "<unknown type>";
-}
-
-bool SpecialCaseList::isIn(const GlobalVariable &G,
- const StringRef Category) const {
- return isIn(*G.getParent(), Category) ||
- inSectionCategory("global", G.getName(), Category) ||
- inSectionCategory("type", GetGlobalTypeString(G), Category);
-}
-
-bool SpecialCaseList::isIn(const GlobalAlias &GA,
- const StringRef Category) const {
- if (isIn(*GA.getParent(), Category))
- return true;
-
- if (isa<FunctionType>(GA.getType()->getElementType()))
- return inSectionCategory("fun", GA.getName(), Category);
-
- return inSectionCategory("global", GA.getName(), Category) ||
- inSectionCategory("type", GetGlobalTypeString(GA), Category);
-}
-
-bool SpecialCaseList::isIn(const Module &M, const StringRef Category) const {
- return inSectionCategory("src", M.getModuleIdentifier(), Category);
-}
+SpecialCaseList::~SpecialCaseList() {}
-bool SpecialCaseList::inSectionCategory(const StringRef Section,
- const StringRef Query,
- const StringRef Category) const {
+bool SpecialCaseList::inSection(const StringRef Section, const StringRef Query,
+ const StringRef Category) const {
StringMap<StringMap<Entry> >::const_iterator I = Entries.find(Section);
if (I == Entries.end()) return false;
StringMap<Entry>::const_iterator II = I->second.find(Category);
diff --git a/contrib/llvm/lib/Support/Statistic.cpp b/contrib/llvm/lib/Support/Statistic.cpp
index 9c28176..56c3b0f 100644
--- a/contrib/llvm/lib/Support/Statistic.cpp
+++ b/contrib/llvm/lib/Support/Statistic.cpp
@@ -84,20 +84,6 @@ void Statistic::RegisterStatistic() {
}
}
-namespace {
-
-struct NameCompare {
- bool operator()(const Statistic *LHS, const Statistic *RHS) const {
- int Cmp = std::strcmp(LHS->getName(), RHS->getName());
- if (Cmp != 0) return Cmp < 0;
-
- // Secondary key is the description.
- return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0;
- }
-};
-
-}
-
// Print information when destroyed, iff command line option is specified.
StatisticInfo::~StatisticInfo() {
llvm::PrintStatistics();
@@ -124,7 +110,14 @@ void llvm::PrintStatistics(raw_ostream &OS) {
}
// Sort the fields by name.
- std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(), NameCompare());
+ std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(),
+ [](const Statistic *LHS, const Statistic *RHS) {
+ if (int Cmp = std::strcmp(LHS->getName(), RHS->getName()))
+ return Cmp < 0;
+
+ // Secondary key is the description.
+ return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0;
+ });
// Print out the statistics header...
OS << "===" << std::string(73, '-') << "===\n"
diff --git a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
index 2ed7c5c..5cb0680 100644
--- a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
+++ b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
@@ -10,6 +10,7 @@
#include "llvm/Support/StreamableMemoryObject.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
+#include <cstddef>
#include <cstring>
@@ -24,20 +25,18 @@ public:
assert(LastChar >= FirstChar && "Invalid start/end range");
}
- virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; }
- virtual uint64_t getExtent() const LLVM_OVERRIDE {
+ uint64_t getBase() const override { return 0; }
+ uint64_t getExtent() const override {
return LastChar - FirstChar;
}
- virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE;
- virtual int readBytes(uint64_t address,
- uint64_t size,
- uint8_t *buf) const LLVM_OVERRIDE;
- virtual const uint8_t *getPointer(uint64_t address,
- uint64_t size) const LLVM_OVERRIDE;
- virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE {
+ int readByte(uint64_t address, uint8_t* ptr) const override;
+ int readBytes(uint64_t address, uint64_t size,
+ uint8_t *buf) const override;
+ const uint8_t *getPointer(uint64_t address, uint64_t size) const override;
+ bool isValidAddress(uint64_t address) const override {
return validAddress(address);
}
- virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE {
+ bool isObjectEnd(uint64_t address) const override {
return objectEnd(address);
}
@@ -48,10 +47,10 @@ private:
// These are implemented as inline functions here to avoid multiple virtual
// calls per public function
bool validAddress(uint64_t address) const {
- return static_cast<ptrdiff_t>(address) < LastChar - FirstChar;
+ return static_cast<std::ptrdiff_t>(address) < LastChar - FirstChar;
}
bool objectEnd(uint64_t address) const {
- return static_cast<ptrdiff_t>(address) == LastChar - FirstChar;
+ return static_cast<std::ptrdiff_t>(address) == LastChar - FirstChar;
}
RawMemoryObject(const RawMemoryObject&) LLVM_DELETED_FUNCTION;
diff --git a/contrib/llvm/lib/Support/StringMap.cpp b/contrib/llvm/lib/Support/StringMap.cpp
index 9ac1f86..ddb7349 100644
--- a/contrib/llvm/lib/Support/StringMap.cpp
+++ b/contrib/llvm/lib/Support/StringMap.cpp
@@ -27,7 +27,7 @@ StringMapImpl::StringMapImpl(unsigned InitSize, unsigned itemSize) {
}
// Otherwise, initialize it with zero buckets to avoid the allocation.
- TheTable = 0;
+ TheTable = nullptr;
NumBuckets = 0;
NumItems = 0;
NumTombstones = 0;
@@ -70,7 +70,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) {
while (1) {
StringMapEntryBase *BucketItem = TheTable[BucketNo];
// If we found an empty bucket, this key isn't in the table yet, return it.
- if (LLVM_LIKELY(BucketItem == 0)) {
+ if (LLVM_LIKELY(!BucketItem)) {
// If we found a tombstone, we want to reuse the tombstone instead of an
// empty bucket. This reduces probing.
if (FirstTombstone != -1) {
@@ -124,7 +124,7 @@ int StringMapImpl::FindKey(StringRef Key) const {
while (1) {
StringMapEntryBase *BucketItem = TheTable[BucketNo];
// If we found an empty bucket, this key isn't in the table yet, return.
- if (LLVM_LIKELY(BucketItem == 0))
+ if (LLVM_LIKELY(!BucketItem))
return -1;
if (BucketItem == getTombstoneVal()) {
@@ -166,7 +166,7 @@ void StringMapImpl::RemoveKey(StringMapEntryBase *V) {
/// table, returning it. If the key is not in the table, this returns null.
StringMapEntryBase *StringMapImpl::RemoveKey(StringRef Key) {
int Bucket = FindKey(Key);
- if (Bucket == -1) return 0;
+ if (Bucket == -1) return nullptr;
StringMapEntryBase *Result = TheTable[Bucket];
TheTable[Bucket] = getTombstoneVal();
@@ -181,7 +181,7 @@ StringMapEntryBase *StringMapImpl::RemoveKey(StringRef Key) {
/// RehashTable - Grow the table, redistributing values into the buckets with
/// the appropriate mod-of-hashtable-size.
-void StringMapImpl::RehashTable() {
+unsigned StringMapImpl::RehashTable(unsigned BucketNo) {
unsigned NewSize;
unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1);
@@ -193,9 +193,10 @@ void StringMapImpl::RehashTable() {
} else if (NumBuckets-(NumItems+NumTombstones) <= NumBuckets/8) {
NewSize = NumBuckets;
} else {
- return;
+ return BucketNo;
}
+ unsigned NewBucketNo = BucketNo;
// Allocate one extra bucket which will always be non-empty. This allows the
// iterators to stop at end.
StringMapEntryBase **NewTableArray =
@@ -212,9 +213,11 @@ void StringMapImpl::RehashTable() {
// Fast case, bucket available.
unsigned FullHash = HashTable[I];
unsigned NewBucket = FullHash & (NewSize-1);
- if (NewTableArray[NewBucket] == 0) {
+ if (!NewTableArray[NewBucket]) {
NewTableArray[FullHash & (NewSize-1)] = Bucket;
NewHashArray[FullHash & (NewSize-1)] = FullHash;
+ if (I == BucketNo)
+ NewBucketNo = NewBucket;
continue;
}
@@ -227,6 +230,8 @@ void StringMapImpl::RehashTable() {
// Finally found a slot. Fill it in.
NewTableArray[NewBucket] = Bucket;
NewHashArray[NewBucket] = FullHash;
+ if (I == BucketNo)
+ NewBucketNo = NewBucket;
}
}
@@ -235,4 +240,5 @@ void StringMapImpl::RehashTable() {
TheTable = NewTableArray;
NumBuckets = NewSize;
NumTombstones = 0;
+ return NewBucketNo;
}
diff --git a/contrib/llvm/lib/Support/StringPool.cpp b/contrib/llvm/lib/Support/StringPool.cpp
index ff607cf..76faabc 100644
--- a/contrib/llvm/lib/Support/StringPool.cpp
+++ b/contrib/llvm/lib/Support/StringPool.cpp
@@ -27,7 +27,7 @@ PooledStringPtr StringPool::intern(StringRef Key) {
if (I != InternTable.end())
return PooledStringPtr(&*I);
- entry_t *S = entry_t::Create(Key.begin(), Key.end());
+ entry_t *S = entry_t::Create(Key);
S->getValue().Pool = this;
InternTable.insert(S);
diff --git a/contrib/llvm/lib/Support/StringRef.cpp b/contrib/llvm/lib/Support/StringRef.cpp
index bfae754..cde8258 100644
--- a/contrib/llvm/lib/Support/StringRef.cpp
+++ b/contrib/llvm/lib/Support/StringRef.cpp
@@ -10,7 +10,6 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/edit_distance.h"
#include <bitset>
@@ -282,7 +281,7 @@ void StringRef::split(SmallVectorImpl<StringRef> &A,
// rest.data() is used to distinguish cases like "a," that splits into
// "a" + "" and "a" that splits into "a" + 0.
for (int splits = 0;
- rest.data() != NULL && (MaxSplit < 0 || splits < MaxSplit);
+ rest.data() != nullptr && (MaxSplit < 0 || splits < MaxSplit);
++splits) {
std::pair<StringRef, StringRef> p = rest.split(Separators);
@@ -291,7 +290,7 @@ void StringRef::split(SmallVectorImpl<StringRef> &A,
rest = p.second;
}
// If we have a tail left, add it.
- if (rest.data() != NULL && (rest.size() != 0 || KeepEmpty))
+ if (rest.data() != nullptr && (rest.size() != 0 || KeepEmpty))
A.push_back(rest);
}
diff --git a/contrib/llvm/lib/Support/TargetRegistry.cpp b/contrib/llvm/lib/Support/TargetRegistry.cpp
index 0c90c17..f691883 100644
--- a/contrib/llvm/lib/Support/TargetRegistry.cpp
+++ b/contrib/llvm/lib/Support/TargetRegistry.cpp
@@ -17,7 +17,7 @@
using namespace llvm;
// Clients are responsible for avoid race conditions in registration.
-static Target *FirstTarget = 0;
+static Target *FirstTarget = nullptr;
TargetRegistry::iterator TargetRegistry::begin() {
return iterator(FirstTarget);
@@ -29,7 +29,7 @@ const Target *TargetRegistry::lookupTarget(const std::string &ArchName,
// Allocate target machine. First, check whether the user has explicitly
// specified an architecture to compile for. If so we have to look it up by
// name, because it might be a backend that has no mapping to a target triple.
- const Target *TheTarget = 0;
+ const Target *TheTarget = nullptr;
if (!ArchName.empty()) {
for (TargetRegistry::iterator it = TargetRegistry::begin(),
ie = TargetRegistry::end(); it != ie; ++it) {
@@ -41,7 +41,7 @@ const Target *TargetRegistry::lookupTarget(const std::string &ArchName,
if (!TheTarget) {
Error = "error: invalid target '" + ArchName + "'.\n";
- return 0;
+ return nullptr;
}
// Adjust the triple to match (if known), otherwise stick with the
@@ -53,11 +53,11 @@ const Target *TargetRegistry::lookupTarget(const std::string &ArchName,
// Get the target specific parser.
std::string TempError;
TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), TempError);
- if (TheTarget == 0) {
+ if (!TheTarget) {
Error = ": error: unable to get target for '"
+ TheTriple.getTriple()
+ "', see --version and --triple.\n";
- return 0;
+ return nullptr;
}
}
@@ -69,44 +69,36 @@ const Target *TargetRegistry::lookupTarget(const std::string &TT,
// Provide special warning when no targets are initialized.
if (begin() == end()) {
Error = "Unable to find target for this triple (no targets are registered)";
- return 0;
+ return nullptr;
}
- const Target *Best = 0, *EquallyBest = 0;
- unsigned BestQuality = 0;
+ const Target *Matching = nullptr;
+ Triple::ArchType Arch = Triple(TT).getArch();
for (iterator it = begin(), ie = end(); it != ie; ++it) {
- if (unsigned Qual = it->TripleMatchQualityFn(TT)) {
- if (!Best || Qual > BestQuality) {
- Best = &*it;
- EquallyBest = 0;
- BestQuality = Qual;
- } else if (Qual == BestQuality)
- EquallyBest = &*it;
+ if (it->ArchMatchFn(Arch)) {
+ if (Matching) {
+ Error = std::string("Cannot choose between targets \"") +
+ Matching->Name + "\" and \"" + it->Name + "\"";
+ return nullptr;
+ }
+ Matching = &*it;
}
}
- if (!Best) {
+ if (!Matching) {
Error = "No available targets are compatible with this triple, "
"see -version for the available targets.";
- return 0;
+ return nullptr;
}
- // Otherwise, take the best target, but make sure we don't have two equally
- // good best targets.
- if (EquallyBest) {
- Error = std::string("Cannot choose between targets \"") +
- Best->Name + "\" and \"" + EquallyBest->Name + "\"";
- return 0;
- }
-
- return Best;
+ return Matching;
}
void TargetRegistry::RegisterTarget(Target &T,
const char *Name,
const char *ShortDesc,
- Target::TripleMatchQualityFnTy TQualityFn,
+ Target::ArchMatchFnTy ArchMatchFn,
bool HasJIT) {
- assert(Name && ShortDesc && TQualityFn &&
+ assert(Name && ShortDesc && ArchMatchFn &&
"Missing required target information!");
// Check if this target has already been initialized, we allow this as a
@@ -120,21 +112,10 @@ void TargetRegistry::RegisterTarget(Target &T,
T.Name = Name;
T.ShortDesc = ShortDesc;
- T.TripleMatchQualityFn = TQualityFn;
+ T.ArchMatchFn = ArchMatchFn;
T.HasJIT = HasJIT;
}
-const Target *TargetRegistry::getClosestTargetForJIT(std::string &Error) {
- const Target *TheTarget = lookupTarget(sys::getDefaultTargetTriple(), Error);
-
- if (TheTarget && !TheTarget->hasJIT()) {
- Error = "No JIT compatible target available for this host";
- return 0;
- }
-
- return TheTarget;
-}
-
static int TargetArraySortFn(const std::pair<StringRef, const Target *> *LHS,
const std::pair<StringRef, const Target *> *RHS) {
return LHS->first.compare(RHS->first);
diff --git a/contrib/llvm/lib/Support/ThreadLocal.cpp b/contrib/llvm/lib/Support/ThreadLocal.cpp
index 868b6ea..2dec9eb 100644
--- a/contrib/llvm/lib/Support/ThreadLocal.cpp
+++ b/contrib/llvm/lib/Support/ThreadLocal.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ThreadLocal.h"
//===----------------------------------------------------------------------===//
@@ -26,7 +27,7 @@ using namespace sys;
ThreadLocalImpl::ThreadLocalImpl() : data() { }
ThreadLocalImpl::~ThreadLocalImpl() { }
void ThreadLocalImpl::setInstance(const void* d) {
- typedef int SIZE_TOO_BIG[sizeof(d) <= sizeof(data) ? 1 : -1];
+ static_assert(sizeof(d) <= sizeof(data), "size too big");
void **pd = reinterpret_cast<void**>(&data);
*pd = const_cast<void*>(d);
}
@@ -50,9 +51,9 @@ namespace llvm {
using namespace sys;
ThreadLocalImpl::ThreadLocalImpl() : data() {
- typedef int SIZE_TOO_BIG[sizeof(pthread_key_t) <= sizeof(data) ? 1 : -1];
+ static_assert(sizeof(pthread_key_t) <= sizeof(data), "size too big");
pthread_key_t* key = reinterpret_cast<pthread_key_t*>(&data);
- int errorcode = pthread_key_create(key, NULL);
+ int errorcode = pthread_key_create(key, nullptr);
assert(errorcode == 0);
(void) errorcode;
}
@@ -77,7 +78,7 @@ const void* ThreadLocalImpl::getInstance() {
}
void ThreadLocalImpl::removeInstance() {
- setInstance(0);
+ setInstance(nullptr);
}
}
diff --git a/contrib/llvm/lib/Support/Threading.cpp b/contrib/llvm/lib/Support/Threading.cpp
index 13fba2e..ca7f3f6 100644
--- a/contrib/llvm/lib/Support/Threading.cpp
+++ b/contrib/llvm/lib/Support/Threading.cpp
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements llvm_start_multithreaded() and friends.
+// This file defines helper functions for running LLVM in a multi-threaded
+// environment.
//
//===----------------------------------------------------------------------===//
@@ -19,50 +20,14 @@
using namespace llvm;
-static bool multithreaded_mode = false;
-
-static sys::Mutex* global_lock = 0;
-
-bool llvm::llvm_start_multithreaded() {
+bool llvm::llvm_is_multithreaded() {
#if LLVM_ENABLE_THREADS != 0
- assert(!multithreaded_mode && "Already multithreaded!");
- multithreaded_mode = true;
- global_lock = new sys::Mutex(true);
-
- // We fence here to ensure that all initialization is complete BEFORE we
- // return from llvm_start_multithreaded().
- sys::MemoryFence();
return true;
#else
return false;
#endif
}
-void llvm::llvm_stop_multithreaded() {
-#if LLVM_ENABLE_THREADS != 0
- assert(multithreaded_mode && "Not currently multithreaded!");
-
- // We fence here to insure that all threaded operations are complete BEFORE we
- // return from llvm_stop_multithreaded().
- sys::MemoryFence();
-
- multithreaded_mode = false;
- delete global_lock;
-#endif
-}
-
-bool llvm::llvm_is_multithreaded() {
- return multithreaded_mode;
-}
-
-void llvm::llvm_acquire_global_lock() {
- if (multithreaded_mode) global_lock->acquire();
-}
-
-void llvm::llvm_release_global_lock() {
- if (multithreaded_mode) global_lock->release();
-}
-
#if LLVM_ENABLE_THREADS != 0 && defined(HAVE_PTHREAD_H)
#include <pthread.h>
@@ -73,7 +38,7 @@ struct ThreadInfo {
static void *ExecuteOnThread_Dispatch(void *Arg) {
ThreadInfo *TI = reinterpret_cast<ThreadInfo*>(Arg);
TI->UserFn(TI->UserData);
- return 0;
+ return nullptr;
}
void llvm::llvm_execute_on_thread(void (*Fn)(void*), void *UserData,
@@ -97,13 +62,13 @@ void llvm::llvm_execute_on_thread(void (*Fn)(void*), void *UserData,
goto error;
// Wait for the thread and clean up.
- ::pthread_join(Thread, 0);
+ ::pthread_join(Thread, nullptr);
error:
::pthread_attr_destroy(&Attr);
}
#elif LLVM_ENABLE_THREADS!=0 && defined(LLVM_ON_WIN32)
-#include "Windows/Windows.h"
+#include "Windows/WindowsSupport.h"
#include <process.h>
struct ThreadInfo {
diff --git a/contrib/llvm/lib/Support/TimeValue.cpp b/contrib/llvm/lib/Support/TimeValue.cpp
index bd8af17..4a70797 100644
--- a/contrib/llvm/lib/Support/TimeValue.cpp
+++ b/contrib/llvm/lib/Support/TimeValue.cpp
@@ -53,7 +53,7 @@ TimeValue::normalize( void ) {
}
-/// Include the platform specific portion of TimeValue class
+/// Include the platform-specific portion of TimeValue class
#ifdef LLVM_ON_UNIX
#include "Unix/TimeValue.inc"
#endif
diff --git a/contrib/llvm/lib/Support/Timer.cpp b/contrib/llvm/lib/Support/Timer.cpp
index 100b21e..210bda7 100644
--- a/contrib/llvm/lib/Support/Timer.cpp
+++ b/contrib/llvm/lib/Support/Timer.cpp
@@ -12,13 +12,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Timer.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -66,8 +67,8 @@ raw_ostream *llvm::CreateInfoOutputFile() {
// compensate for this, the test-suite Makefiles have code to delete the
// info output file before running commands which write to it.
std::string Error;
- raw_ostream *Result =
- new raw_fd_ostream(OutputFilename.c_str(), Error, sys::fs::F_Append);
+ raw_ostream *Result = new raw_fd_ostream(
+ OutputFilename.c_str(), Error, sys::fs::F_Append | sys::fs::F_Text);
if (Error.empty())
return Result;
@@ -78,20 +79,19 @@ raw_ostream *llvm::CreateInfoOutputFile() {
}
-static TimerGroup *DefaultTimerGroup = 0;
+static TimerGroup *DefaultTimerGroup = nullptr;
static TimerGroup *getDefaultTimerGroup() {
TimerGroup *tmp = DefaultTimerGroup;
sys::MemoryFence();
if (tmp) return tmp;
- llvm_acquire_global_lock();
+ sys::SmartScopedLock<true> Lock(*TimerLock);
tmp = DefaultTimerGroup;
if (!tmp) {
tmp = new TimerGroup("Miscellaneous Ungrouped Timers");
sys::MemoryFence();
DefaultTimerGroup = tmp;
}
- llvm_release_global_lock();
return tmp;
}
@@ -101,7 +101,7 @@ static TimerGroup *getDefaultTimerGroup() {
//===----------------------------------------------------------------------===//
void Timer::init(StringRef N) {
- assert(TG == 0 && "Timer already initialized");
+ assert(!TG && "Timer already initialized");
Name.assign(N.begin(), N.end());
Started = false;
TG = getDefaultTimerGroup();
@@ -109,7 +109,7 @@ void Timer::init(StringRef N) {
}
void Timer::init(StringRef N, TimerGroup &tg) {
- assert(TG == 0 && "Timer already initialized");
+ assert(!TG && "Timer already initialized");
Name.assign(N.begin(), N.end());
Started = false;
TG = &tg;
@@ -236,11 +236,11 @@ static Timer &getNamedRegionTimer(StringRef Name) {
NamedRegionTimer::NamedRegionTimer(StringRef Name,
bool Enabled)
- : TimeRegion(!Enabled ? 0 : &getNamedRegionTimer(Name)) {}
+ : TimeRegion(!Enabled ? nullptr : &getNamedRegionTimer(Name)) {}
NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName,
bool Enabled)
- : TimeRegion(!Enabled ? 0 : &NamedGroupedTimers->get(Name, GroupName)) {}
+ : TimeRegion(!Enabled ? nullptr : &NamedGroupedTimers->get(Name, GroupName)){}
//===----------------------------------------------------------------------===//
// TimerGroup Implementation
@@ -248,10 +248,10 @@ NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName,
/// TimerGroupList - This is the global list of TimerGroups, maintained by the
/// TimerGroup ctor/dtor and is protected by the TimerLock lock.
-static TimerGroup *TimerGroupList = 0;
+static TimerGroup *TimerGroupList = nullptr;
TimerGroup::TimerGroup(StringRef name)
- : Name(name.begin(), name.end()), FirstTimer(0) {
+ : Name(name.begin(), name.end()), FirstTimer(nullptr) {
// Add the group to TimerGroupList.
sys::SmartScopedLock<true> L(*TimerLock);
@@ -265,7 +265,7 @@ TimerGroup::TimerGroup(StringRef name)
TimerGroup::~TimerGroup() {
// If the timer group is destroyed before the timers it owns, accumulate and
// print the timing data.
- while (FirstTimer != 0)
+ while (FirstTimer)
removeTimer(*FirstTimer);
// Remove the group from the TimerGroupList.
@@ -283,7 +283,7 @@ void TimerGroup::removeTimer(Timer &T) {
if (T.Started)
TimersToPrint.push_back(std::make_pair(T.Time, T.Name));
- T.TG = 0;
+ T.TG = nullptr;
// Unlink the timer from our list.
*T.Prev = T.Next;
@@ -292,7 +292,7 @@ void TimerGroup::removeTimer(Timer &T) {
// Print the report when all timers in this group are destroyed if some of
// them were started.
- if (FirstTimer != 0 || TimersToPrint.empty())
+ if (FirstTimer || TimersToPrint.empty())
return;
raw_ostream *OutStream = CreateInfoOutputFile();
diff --git a/contrib/llvm/lib/Support/ToolOutputFile.cpp b/contrib/llvm/lib/Support/ToolOutputFile.cpp
index 5c1268a..b5fb20f 100644
--- a/contrib/llvm/lib/Support/ToolOutputFile.cpp
+++ b/contrib/llvm/lib/Support/ToolOutputFile.cpp
@@ -25,10 +25,8 @@ tool_output_file::CleanupInstaller::CleanupInstaller(const char *filename)
tool_output_file::CleanupInstaller::~CleanupInstaller() {
// Delete the file if the client hasn't told us not to.
- if (!Keep && Filename != "-") {
- bool Existed;
- sys::fs::remove(Filename, Existed);
- }
+ if (!Keep && Filename != "-")
+ sys::fs::remove(Filename);
// Ok, the file is successfully written and closed, or deleted. There's no
// further need to clean it up on signals.
diff --git a/contrib/llvm/lib/Support/Triple.cpp b/contrib/llvm/lib/Support/Triple.cpp
index 6c978a0..ca68775 100644
--- a/contrib/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm/lib/Support/Triple.cpp
@@ -19,32 +19,38 @@ const char *Triple::getArchTypeName(ArchType Kind) {
switch (Kind) {
case UnknownArch: return "unknown";
- case aarch64: return "aarch64";
- case arm: return "arm";
- case hexagon: return "hexagon";
- case mips: return "mips";
- case mipsel: return "mipsel";
- case mips64: return "mips64";
- case mips64el:return "mips64el";
- case msp430: return "msp430";
- case ppc64: return "powerpc64";
- case ppc64le: return "powerpc64le";
- case ppc: return "powerpc";
- case r600: return "r600";
- case sparc: return "sparc";
- case sparcv9: return "sparcv9";
- case systemz: return "s390x";
- case tce: return "tce";
- case thumb: return "thumb";
- case x86: return "i386";
- case x86_64: return "x86_64";
- case xcore: return "xcore";
- case nvptx: return "nvptx";
- case nvptx64: return "nvptx64";
- case le32: return "le32";
- case amdil: return "amdil";
- case spir: return "spir";
- case spir64: return "spir64";
+ case aarch64: return "aarch64";
+ case aarch64_be: return "aarch64_be";
+ case arm: return "arm";
+ case armeb: return "armeb";
+ case arm64: return "arm64";
+ case arm64_be: return "arm64_be";
+ case hexagon: return "hexagon";
+ case mips: return "mips";
+ case mipsel: return "mipsel";
+ case mips64: return "mips64";
+ case mips64el: return "mips64el";
+ case msp430: return "msp430";
+ case ppc64: return "powerpc64";
+ case ppc64le: return "powerpc64le";
+ case ppc: return "powerpc";
+ case r600: return "r600";
+ case sparc: return "sparc";
+ case sparcv9: return "sparcv9";
+ case systemz: return "s390x";
+ case tce: return "tce";
+ case thumb: return "thumb";
+ case thumbeb: return "thumbeb";
+ case x86: return "i386";
+ case x86_64: return "x86_64";
+ case xcore: return "xcore";
+ case nvptx: return "nvptx";
+ case nvptx64: return "nvptx64";
+ case le32: return "le32";
+ case amdil: return "amdil";
+ case spir: return "spir";
+ case spir64: return "spir64";
+ case kalimba: return "kalimba";
}
llvm_unreachable("Invalid ArchType!");
@@ -53,42 +59,49 @@ const char *Triple::getArchTypeName(ArchType Kind) {
const char *Triple::getArchTypePrefix(ArchType Kind) {
switch (Kind) {
default:
- return 0;
+ return nullptr;
- case aarch64: return "aarch64";
+ case arm64:
+ case arm64_be:
+ case aarch64:
+ case aarch64_be: return "aarch64";
case arm:
- case thumb: return "arm";
+ case armeb:
+ case thumb:
+ case thumbeb: return "arm";
case ppc64:
case ppc64le:
- case ppc: return "ppc";
+ case ppc: return "ppc";
case mips:
case mipsel:
case mips64:
- case mips64el:return "mips";
+ case mips64el: return "mips";
- case hexagon: return "hexagon";
+ case hexagon: return "hexagon";
- case r600: return "r600";
+ case r600: return "r600";
case sparcv9:
- case sparc: return "sparc";
+ case sparc: return "sparc";
- case systemz: return "systemz";
+ case systemz: return "systemz";
case x86:
- case x86_64: return "x86";
+ case x86_64: return "x86";
+
+ case xcore: return "xcore";
- case xcore: return "xcore";
+ case nvptx: return "nvptx";
+ case nvptx64: return "nvptx";
- case nvptx: return "nvptx";
- case nvptx64: return "nvptx";
- case le32: return "le32";
- case amdil: return "amdil";
- case spir: return "spir";
- case spir64: return "spir";
+ case le32: return "le32";
+ case amdil: return "amdil";
+ case spir: return "spir";
+ case spir64: return "spir";
+ case kalimba: return "kalimba";
}
}
@@ -103,7 +116,10 @@ const char *Triple::getVendorTypeName(VendorType Kind) {
case BGQ: return "bgq";
case Freescale: return "fsl";
case IBM: return "ibm";
+ case ImaginationTechnologies: return "img";
+ case MipsTechnologies: return "mti";
case NVIDIA: return "nvidia";
+ case CSR: return "csr";
}
llvm_unreachable("Invalid VendorType!");
@@ -127,7 +143,7 @@ const char *Triple::getOSTypeName(OSType Kind) {
case NetBSD: return "netbsd";
case OpenBSD: return "openbsd";
case Solaris: return "solaris";
- case Win32: return "win32";
+ case Win32: return "windows";
case Haiku: return "haiku";
case Minix: return "minix";
case RTEMS: return "rtems";
@@ -149,10 +165,13 @@ const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) {
case GNUEABIHF: return "gnueabihf";
case GNUEABI: return "gnueabi";
case GNUX32: return "gnux32";
+ case CODE16: return "code16";
case EABI: return "eabi";
- case MachO: return "macho";
+ case EABIHF: return "eabihf";
case Android: return "android";
- case ELF: return "elf";
+ case MSVC: return "msvc";
+ case Itanium: return "itanium";
+ case Cygnus: return "cygnus";
}
llvm_unreachable("Invalid EnvironmentType!");
@@ -161,7 +180,11 @@ const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) {
Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
return StringSwitch<Triple::ArchType>(Name)
.Case("aarch64", aarch64)
+ .Case("aarch64_be", aarch64_be)
.Case("arm", arm)
+ .Case("armeb", armeb)
+ .Case("arm64", arm64)
+ .Case("arm64_be", arm64_be)
.Case("mips", mips)
.Case("mipsel", mipsel)
.Case("mips64", mips64)
@@ -178,6 +201,7 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("systemz", systemz)
.Case("tce", tce)
.Case("thumb", thumb)
+ .Case("thumbeb", thumbeb)
.Case("x86", x86)
.Case("x86-64", x86_64)
.Case("xcore", xcore)
@@ -187,13 +211,14 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("amdil", amdil)
.Case("spir", spir)
.Case("spir64", spir64)
+ .Case("kalimba", kalimba)
.Default(UnknownArch);
}
// Returns architecture name that is understood by the target assembler.
const char *Triple::getArchNameForAssembler() {
if (!isOSDarwin() && getVendor() != Triple::Apple)
- return NULL;
+ return nullptr;
return StringSwitch<const char*>(getArchName())
.Case("i386", "i386")
@@ -206,6 +231,9 @@ const char *Triple::getArchNameForAssembler() {
.Cases("armv5", "armv5e", "thumbv5", "thumbv5e", "armv5")
.Cases("armv6", "thumbv6", "armv6")
.Cases("armv7", "thumbv7", "armv7")
+ .Case("armeb", "armeb")
+ .Case("arm64", "arm64")
+ .Case("arm64_be", "arm64")
.Case("r600", "r600")
.Case("nvptx", "nvptx")
.Case("nvptx64", "nvptx64")
@@ -213,7 +241,7 @@ const char *Triple::getArchNameForAssembler() {
.Case("amdil", "amdil")
.Case("spir", "spir")
.Case("spir64", "spir64")
- .Default(NULL);
+ .Default(nullptr);
}
static Triple::ArchType parseArch(StringRef ArchName) {
@@ -226,12 +254,19 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Cases("powerpc64", "ppu", Triple::ppc64)
.Case("powerpc64le", Triple::ppc64le)
.Case("aarch64", Triple::aarch64)
+ .Case("aarch64_be", Triple::aarch64_be)
.Cases("arm", "xscale", Triple::arm)
// FIXME: It would be good to replace these with explicit names for all the
// various suffixes supported.
.StartsWith("armv", Triple::arm)
+ .Case("armeb", Triple::armeb)
+ .StartsWith("armebv", Triple::armeb)
.Case("thumb", Triple::thumb)
.StartsWith("thumbv", Triple::thumb)
+ .Case("thumbeb", Triple::thumbeb)
+ .StartsWith("thumbebv", Triple::thumbeb)
+ .Case("arm64", Triple::arm64)
+ .Case("arm64_be", Triple::arm64_be)
.Case("msp430", Triple::msp430)
.Cases("mips", "mipseb", "mipsallegrex", Triple::mips)
.Cases("mipsel", "mipsallegrexel", Triple::mipsel)
@@ -250,6 +285,7 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Case("amdil", Triple::amdil)
.Case("spir", Triple::spir)
.Case("spir64", Triple::spir64)
+ .Case("kalimba", Triple::kalimba)
.Default(Triple::UnknownArch);
}
@@ -262,7 +298,10 @@ static Triple::VendorType parseVendor(StringRef VendorName) {
.Case("bgq", Triple::BGQ)
.Case("fsl", Triple::Freescale)
.Case("ibm", Triple::IBM)
+ .Case("img", Triple::ImaginationTechnologies)
+ .Case("mti", Triple::MipsTechnologies)
.Case("nvidia", Triple::NVIDIA)
+ .Case("csr", Triple::CSR)
.Default(Triple::UnknownVendor);
}
@@ -283,6 +322,7 @@ static Triple::OSType parseOS(StringRef OSName) {
.StartsWith("openbsd", Triple::OpenBSD)
.StartsWith("solaris", Triple::Solaris)
.StartsWith("win32", Triple::Win32)
+ .StartsWith("windows", Triple::Win32)
.StartsWith("haiku", Triple::Haiku)
.StartsWith("minix", Triple::Minix)
.StartsWith("rtems", Triple::RTEMS)
@@ -297,17 +337,68 @@ static Triple::OSType parseOS(StringRef OSName) {
static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) {
return StringSwitch<Triple::EnvironmentType>(EnvironmentName)
+ .StartsWith("eabihf", Triple::EABIHF)
.StartsWith("eabi", Triple::EABI)
.StartsWith("gnueabihf", Triple::GNUEABIHF)
.StartsWith("gnueabi", Triple::GNUEABI)
.StartsWith("gnux32", Triple::GNUX32)
+ .StartsWith("code16", Triple::CODE16)
.StartsWith("gnu", Triple::GNU)
- .StartsWith("macho", Triple::MachO)
.StartsWith("android", Triple::Android)
- .StartsWith("elf", Triple::ELF)
+ .StartsWith("msvc", Triple::MSVC)
+ .StartsWith("itanium", Triple::Itanium)
+ .StartsWith("cygnus", Triple::Cygnus)
.Default(Triple::UnknownEnvironment);
}
+static Triple::ObjectFormatType parseFormat(StringRef EnvironmentName) {
+ return StringSwitch<Triple::ObjectFormatType>(EnvironmentName)
+ .EndsWith("coff", Triple::COFF)
+ .EndsWith("elf", Triple::ELF)
+ .EndsWith("macho", Triple::MachO)
+ .Default(Triple::UnknownObjectFormat);
+}
+
+static Triple::SubArchType parseSubArch(StringRef SubArchName) {
+ return StringSwitch<Triple::SubArchType>(SubArchName)
+ .EndsWith("v8", Triple::ARMSubArch_v8)
+ .EndsWith("v8a", Triple::ARMSubArch_v8)
+ .EndsWith("v7", Triple::ARMSubArch_v7)
+ .EndsWith("v7a", Triple::ARMSubArch_v7)
+ .EndsWith("v7em", Triple::ARMSubArch_v7em)
+ .EndsWith("v7l", Triple::ARMSubArch_v7)
+ .EndsWith("v7m", Triple::ARMSubArch_v7m)
+ .EndsWith("v7r", Triple::ARMSubArch_v7)
+ .EndsWith("v7s", Triple::ARMSubArch_v7s)
+ .EndsWith("v6", Triple::ARMSubArch_v6)
+ .EndsWith("v6m", Triple::ARMSubArch_v6m)
+ .EndsWith("v6t2", Triple::ARMSubArch_v6t2)
+ .EndsWith("v5", Triple::ARMSubArch_v5)
+ .EndsWith("v5e", Triple::ARMSubArch_v5)
+ .EndsWith("v5t", Triple::ARMSubArch_v5)
+ .EndsWith("v5te", Triple::ARMSubArch_v5te)
+ .EndsWith("v4t", Triple::ARMSubArch_v4t)
+ .Default(Triple::NoSubArch);
+}
+
+static const char *getObjectFormatTypeName(Triple::ObjectFormatType Kind) {
+ switch (Kind) {
+ case Triple::UnknownObjectFormat: return "";
+ case Triple::COFF: return "coff";
+ case Triple::ELF: return "elf";
+ case Triple::MachO: return "macho";
+ }
+ llvm_unreachable("unknown object format type");
+}
+
+static Triple::ObjectFormatType getDefaultFormat(const Triple &T) {
+ if (T.isOSDarwin())
+ return Triple::MachO;
+ else if (T.isOSWindows())
+ return Triple::COFF;
+ return Triple::ELF;
+}
+
/// \brief Construct a triple from the string representation provided.
///
/// This stores the string representation and parses the various pieces into
@@ -315,9 +406,13 @@ static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) {
Triple::Triple(const Twine &Str)
: Data(Str.str()),
Arch(parseArch(getArchName())),
+ SubArch(parseSubArch(getArchName())),
Vendor(parseVendor(getVendorName())),
OS(parseOS(getOSName())),
- Environment(parseEnvironment(getEnvironmentName())) {
+ Environment(parseEnvironment(getEnvironmentName())),
+ ObjectFormat(parseFormat(getEnvironmentName())) {
+ if (ObjectFormat == Triple::UnknownObjectFormat)
+ ObjectFormat = getDefaultFormat(*this);
}
/// \brief Construct a triple from string representations of the architecture,
@@ -329,9 +424,11 @@ Triple::Triple(const Twine &Str)
Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr)
: Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr).str()),
Arch(parseArch(ArchStr.str())),
+ SubArch(parseSubArch(ArchStr.str())),
Vendor(parseVendor(VendorStr.str())),
OS(parseOS(OSStr.str())),
- Environment() {
+ Environment(), ObjectFormat(Triple::UnknownObjectFormat) {
+ ObjectFormat = getDefaultFormat(*this);
}
/// \brief Construct a triple from string representations of the architecture,
@@ -344,9 +441,13 @@ Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
: Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr + Twine('-') +
EnvironmentStr).str()),
Arch(parseArch(ArchStr.str())),
+ SubArch(parseSubArch(ArchStr.str())),
Vendor(parseVendor(VendorStr.str())),
OS(parseOS(OSStr.str())),
- Environment(parseEnvironment(EnvironmentStr.str())) {
+ Environment(parseEnvironment(EnvironmentStr.str())),
+ ObjectFormat(parseFormat(EnvironmentStr.str())) {
+ if (ObjectFormat == Triple::UnknownObjectFormat)
+ ObjectFormat = getDefaultFormat(*this);
}
std::string Triple::normalize(StringRef Str) {
@@ -371,6 +472,9 @@ std::string Triple::normalize(StringRef Str) {
EnvironmentType Environment = UnknownEnvironment;
if (Components.size() > 3)
Environment = parseEnvironment(Components[3]);
+ ObjectFormatType ObjectFormat = UnknownObjectFormat;
+ if (Components.size() > 4)
+ ObjectFormat = parseFormat(Components[4]);
// Note which components are already in their final position. These will not
// be moved.
@@ -412,6 +516,10 @@ std::string Triple::normalize(StringRef Str) {
case 3:
Environment = parseEnvironment(Comp);
Valid = Environment != UnknownEnvironment;
+ if (!Valid) {
+ ObjectFormat = parseFormat(Comp);
+ Valid = ObjectFormat != UnknownObjectFormat;
+ }
break;
}
if (!Valid)
@@ -474,6 +582,32 @@ std::string Triple::normalize(StringRef Str) {
// Special case logic goes here. At this point Arch, Vendor and OS have the
// correct values for the computed components.
+ if (OS == Triple::Win32) {
+ Components.resize(4);
+ Components[2] = "windows";
+ if (Environment == UnknownEnvironment) {
+ if (ObjectFormat == UnknownObjectFormat || ObjectFormat == Triple::COFF)
+ Components[3] = "msvc";
+ else
+ Components[3] = getObjectFormatTypeName(ObjectFormat);
+ }
+ } else if (OS == Triple::MinGW32) {
+ Components.resize(4);
+ Components[2] = "windows";
+ Components[3] = "gnu";
+ } else if (OS == Triple::Cygwin) {
+ Components.resize(4);
+ Components[2] = "windows";
+ Components[3] = "cygnus";
+ }
+ if (OS == Triple::MinGW32 || OS == Triple::Cygwin ||
+ (OS == Triple::Win32 && Environment != UnknownEnvironment)) {
+ if (ObjectFormat != UnknownObjectFormat && ObjectFormat != Triple::COFF) {
+ Components.resize(5);
+ Components[4] = getObjectFormatTypeName(ObjectFormat);
+ }
+ }
+
// Stick the corrected components back together to form the normalized string.
std::string Normalized;
for (unsigned i = 0, e = Components.size(); i != e; ++i) {
@@ -600,15 +734,15 @@ void Triple::getiOSVersion(unsigned &Major, unsigned &Minor,
// the clang driver combines OS X and IOS support into a common Darwin
// toolchain that wants to know the iOS version number even when targeting
// OS X.
- Major = 3;
+ Major = 5;
Minor = 0;
Micro = 0;
break;
case IOS:
getOSVersion(Major, Minor, Micro);
- // Default to 3.0.
+ // Default to 5.0 (or 7.0 for arm64).
if (Major == 0)
- Major = 3;
+ Major = (getArch() == arm64) ? 7 : 5;
break;
}
}
@@ -633,6 +767,14 @@ void Triple::setEnvironment(EnvironmentType Kind) {
setEnvironmentName(getEnvironmentTypeName(Kind));
}
+void Triple::setObjectFormat(ObjectFormatType Kind) {
+ if (Environment == UnknownEnvironment)
+ return setEnvironmentName(getObjectFormatTypeName(Kind));
+
+ setEnvironmentName((getEnvironmentTypeName(Environment) + Twine("-") +
+ getObjectFormatTypeName(Kind)).str());
+}
+
void Triple::setArchName(StringRef Str) {
// Work around a miscompilation bug for Twines in gcc 4.0.3.
SmallString<64> Triple;
@@ -675,6 +817,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::amdil:
case llvm::Triple::arm:
+ case llvm::Triple::armeb:
case llvm::Triple::hexagon:
case llvm::Triple::le32:
case llvm::Triple::mips:
@@ -685,12 +828,17 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::sparc:
case llvm::Triple::tce:
case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
case llvm::Triple::x86:
case llvm::Triple::xcore:
case llvm::Triple::spir:
+ case llvm::Triple::kalimba:
return 32;
+ case llvm::Triple::arm64:
+ case llvm::Triple::arm64_be:
case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::nvptx64:
@@ -722,6 +870,9 @@ Triple Triple::get32BitArchVariant() const {
switch (getArch()) {
case Triple::UnknownArch:
case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::arm64:
+ case Triple::arm64_be:
case Triple::msp430:
case Triple::systemz:
case Triple::ppc64le:
@@ -731,7 +882,9 @@ Triple Triple::get32BitArchVariant() const {
case Triple::amdil:
case Triple::spir:
case Triple::arm:
+ case Triple::armeb:
case Triple::hexagon:
+ case Triple::kalimba:
case Triple::le32:
case Triple::mips:
case Triple::mipsel:
@@ -741,6 +894,7 @@ Triple Triple::get32BitArchVariant() const {
case Triple::sparc:
case Triple::tce:
case Triple::thumb:
+ case Triple::thumbeb:
case Triple::x86:
case Triple::xcore:
// Already 32-bit.
@@ -749,7 +903,7 @@ Triple Triple::get32BitArchVariant() const {
case Triple::mips64: T.setArch(Triple::mips); break;
case Triple::mips64el: T.setArch(Triple::mipsel); break;
case Triple::nvptx64: T.setArch(Triple::nvptx); break;
- case Triple::ppc64: T.setArch(Triple::ppc); break;
+ case Triple::ppc64: T.setArch(Triple::ppc); break;
case Triple::sparcv9: T.setArch(Triple::sparc); break;
case Triple::x86_64: T.setArch(Triple::x86); break;
case Triple::spir64: T.setArch(Triple::spir); break;
@@ -763,17 +917,21 @@ Triple Triple::get64BitArchVariant() const {
case Triple::UnknownArch:
case Triple::amdil:
case Triple::arm:
+ case Triple::armeb:
case Triple::hexagon:
+ case Triple::kalimba:
case Triple::le32:
case Triple::msp430:
case Triple::r600:
case Triple::tce:
case Triple::thumb:
+ case Triple::thumbeb:
case Triple::xcore:
T.setArch(UnknownArch);
break;
case Triple::aarch64:
+ case Triple::aarch64_be:
case Triple::spir64:
case Triple::mips64:
case Triple::mips64el:
@@ -783,6 +941,8 @@ Triple Triple::get64BitArchVariant() const {
case Triple::sparcv9:
case Triple::systemz:
case Triple::x86_64:
+ case Triple::arm64:
+ case Triple::arm64_be:
// Already 64-bit.
break;
@@ -796,3 +956,86 @@ Triple Triple::get64BitArchVariant() const {
}
return T;
}
+
+// FIXME: tblgen this.
+const char *Triple::getARMCPUForArch(StringRef MArch) const {
+ if (MArch.empty())
+ MArch = getArchName();
+
+ switch (getOS()) {
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ if (MArch == "armv6")
+ return "arm1176jzf-s";
+ break;
+ case llvm::Triple::Win32:
+ // FIXME: this is invalid for WindowsCE
+ return "cortex-a9";
+ default:
+ break;
+ }
+
+ const char *result = nullptr;
+ size_t offset = StringRef::npos;
+ if (MArch.startswith("arm"))
+ offset = 3;
+ if (MArch.startswith("thumb"))
+ offset = 5;
+ if (offset != StringRef::npos && MArch.substr(offset, 2) == "eb")
+ offset += 2;
+ if (offset != StringRef::npos)
+ result = llvm::StringSwitch<const char *>(MArch.substr(offset))
+ .Cases("v2", "v2a", "arm2")
+ .Case("v3", "arm6")
+ .Case("v3m", "arm7m")
+ .Case("v4", "strongarm")
+ .Case("v4t", "arm7tdmi")
+ .Cases("v5", "v5t", "arm10tdmi")
+ .Cases("v5e", "v5te", "arm1022e")
+ .Case("v5tej", "arm926ej-s")
+ .Cases("v6", "v6k", "arm1136jf-s")
+ .Case("v6j", "arm1136j-s")
+ .Cases("v6z", "v6zk", "arm1176jzf-s")
+ .Case("v6t2", "arm1156t2-s")
+ .Cases("v6m", "v6-m", "cortex-m0")
+ .Cases("v7", "v7a", "v7-a", "v7l", "v7-l", "cortex-a8")
+ .Cases("v7s", "v7-s", "swift")
+ .Cases("v7r", "v7-r", "cortex-r4")
+ .Cases("v7m", "v7-m", "cortex-m3")
+ .Cases("v7em", "v7e-m", "cortex-m4")
+ .Cases("v8", "v8a", "v8-a", "cortex-a53")
+ .Default(nullptr);
+ else
+ result = llvm::StringSwitch<const char *>(MArch)
+ .Case("ep9312", "ep9312")
+ .Case("iwmmxt", "iwmmxt")
+ .Case("xscale", "xscale")
+ .Default(nullptr);
+
+ if (result)
+ return result;
+
+ // If all else failed, return the most base CPU with thumb interworking
+ // supported by LLVM.
+ // FIXME: Should warn once that we're falling back.
+ switch (getOS()) {
+ case llvm::Triple::NetBSD:
+ switch (getEnvironment()) {
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::EABI:
+ return "arm926ej-s";
+ default:
+ return "strongarm";
+ }
+ default:
+ switch (getEnvironment()) {
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ return "arm1176jzf-s";
+ default:
+ return "arm7tdmi";
+ }
+ }
+}
diff --git a/contrib/llvm/lib/Support/Twine.cpp b/contrib/llvm/lib/Support/Twine.cpp
index 3d04bc3..56ed964 100644
--- a/contrib/llvm/lib/Support/Twine.cpp
+++ b/contrib/llvm/lib/Support/Twine.cpp
@@ -163,9 +163,9 @@ void Twine::printRepr(raw_ostream &OS) const {
}
void Twine::dump() const {
- print(llvm::dbgs());
+ print(dbgs());
}
void Twine::dumpRepr() const {
- printRepr(llvm::dbgs());
+ printRepr(dbgs());
}
diff --git a/contrib/llvm/lib/Support/Unix/Host.inc b/contrib/llvm/lib/Support/Unix/Host.inc
index 7f79db0..7941166 100644
--- a/contrib/llvm/lib/Support/Unix/Host.inc
+++ b/contrib/llvm/lib/Support/Unix/Host.inc
@@ -64,6 +64,6 @@ std::string sys::getDefaultTargetTriple() {
Triple += getOSVersion();
}
- return Triple;
+ return Triple::normalize(Triple);
}
#endif // __FreeBSD__
diff --git a/contrib/llvm/lib/Support/Unix/Memory.inc b/contrib/llvm/lib/Support/Unix/Memory.inc
index dcfd76e..c9d89a8 100644
--- a/contrib/llvm/lib/Support/Unix/Memory.inc
+++ b/contrib/llvm/lib/Support/Unix/Memory.inc
@@ -83,8 +83,8 @@ MemoryBlock
Memory::allocateMappedMemory(size_t NumBytes,
const MemoryBlock *const NearBlock,
unsigned PFlags,
- error_code &EC) {
- EC = error_code::success();
+ std::error_code &EC) {
+ EC = std::error_code();
if (NumBytes == 0)
return MemoryBlock();
@@ -95,7 +95,7 @@ Memory::allocateMappedMemory(size_t NumBytes,
#ifdef NEED_DEV_ZERO_FOR_MMAP
static int zero_fd = open("/dev/zero", O_RDWR);
if (zero_fd == -1) {
- EC = error_code(errno, system_category());
+ EC = std::error_code(errno, std::generic_category());
return MemoryBlock();
}
fd = zero_fd;
@@ -121,9 +121,9 @@ Memory::allocateMappedMemory(size_t NumBytes,
Protect, MMFlags, fd, 0);
if (Addr == MAP_FAILED) {
if (NearBlock) //Try again without a near hint
- return allocateMappedMemory(NumBytes, 0, PFlags, EC);
+ return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
- EC = error_code(errno, system_category());
+ EC = std::error_code(errno, std::generic_category());
return MemoryBlock();
}
@@ -137,38 +137,38 @@ Memory::allocateMappedMemory(size_t NumBytes,
return Result;
}
-error_code
+std::error_code
Memory::releaseMappedMemory(MemoryBlock &M) {
- if (M.Address == 0 || M.Size == 0)
- return error_code::success();
+ if (M.Address == nullptr || M.Size == 0)
+ return std::error_code();
if (0 != ::munmap(M.Address, M.Size))
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
- M.Address = 0;
+ M.Address = nullptr;
M.Size = 0;
- return error_code::success();
+ return std::error_code();
}
-error_code
+std::error_code
Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
- if (M.Address == 0 || M.Size == 0)
- return error_code::success();
+ if (M.Address == nullptr || M.Size == 0)
+ return std::error_code();
if (!Flags)
- return error_code(EINVAL, generic_category());
+ return std::error_code(EINVAL, std::generic_category());
int Protect = getPosixProtectionFlags(Flags);
int Result = ::mprotect(M.Address, M.Size, Protect);
if (Result != 0)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
if (Flags & MF_EXEC)
Memory::InvalidateInstructionCache(M.Address, M.Size);
- return error_code::success();
+ return std::error_code();
}
/// AllocateRWX - Allocate a slab of memory with read/write/execute
@@ -203,9 +203,9 @@ Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
;
void* start = NearBlock ? (unsigned char*)NearBlock->base() +
- NearBlock->size() : 0;
+ NearBlock->size() : nullptr;
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
flags, fd, 0);
#else
@@ -214,13 +214,13 @@ Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
#endif
if (pa == MAP_FAILED) {
if (NearBlock) //Try again without a near hint
- return AllocateRWX(NumBytes, 0);
+ return AllocateRWX(NumBytes, nullptr);
MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
return MemoryBlock();
}
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
(vm_size_t)(PageSize*NumPages), 0,
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
@@ -246,14 +246,14 @@ Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
}
bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
- if (M.Address == 0 || M.Size == 0) return false;
+ if (M.Address == nullptr || M.Size == 0) return false;
if (0 != ::munmap(M.Address, M.Size))
return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
return false;
}
bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
if (M.Address == 0 || M.Size == 0) return false;
Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
@@ -265,7 +265,7 @@ bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
}
bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
if (M.Address == 0 || M.Size == 0) return false;
Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
@@ -280,7 +280,7 @@ bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
}
bool Memory::setRangeWritable(const void *Addr, size_t Size) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
VM_PROT_READ | VM_PROT_WRITE);
@@ -291,7 +291,7 @@ bool Memory::setRangeWritable(const void *Addr, size_t Size) {
}
bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
@@ -311,7 +311,8 @@ void Memory::InvalidateInstructionCache(const void *Addr,
#if defined(__APPLE__)
# if (defined(__POWERPC__) || defined (__ppc__) || \
- defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
+ defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
+ defined(__arm64__))
sys_icache_invalidate(const_cast<void *>(Addr), Len);
# endif
@@ -332,7 +333,7 @@ void Memory::InvalidateInstructionCache(const void *Addr,
for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
asm volatile("icbi 0, %0" : : "r"(Line));
asm volatile("isync");
-# elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__) && !defined(__FreeBSD__)
+# elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__)
// FIXME: Can we safely always call this for __GNUC__ everywhere?
const char *Start = static_cast<const char *>(Addr);
const char *End = Start + Len;
diff --git a/contrib/llvm/lib/Support/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc
index c9dc871..623547a 100644
--- a/contrib/llvm/lib/Support/Unix/Path.inc
+++ b/contrib/llvm/lib/Support/Unix/Path.inc
@@ -17,7 +17,6 @@
//===----------------------------------------------------------------------===//
#include "Unix.h"
-#include "llvm/Support/Process.h"
#include <limits.h>
#include <stdio.h>
#if HAVE_SYS_STAT_H
@@ -86,95 +85,22 @@ namespace {
operator int() const {return FileDescriptor;}
};
+}
- error_code TempDir(SmallVectorImpl<char> &result) {
- // FIXME: Don't use TMPDIR if program is SUID or SGID enabled.
- const char *dir = 0;
- (dir = std::getenv("TMPDIR" )) ||
- (dir = std::getenv("TMP" )) ||
- (dir = std::getenv("TEMP" )) ||
- (dir = std::getenv("TEMPDIR")) ||
+static std::error_code TempDir(SmallVectorImpl<char> &result) {
+ // FIXME: Don't use TMPDIR if program is SUID or SGID enabled.
+ const char *dir = nullptr;
+ (dir = std::getenv("TMPDIR")) || (dir = std::getenv("TMP")) ||
+ (dir = std::getenv("TEMP")) || (dir = std::getenv("TEMPDIR")) ||
#ifdef P_tmpdir
- (dir = P_tmpdir) ||
+ (dir = P_tmpdir) ||
#endif
- (dir = "/tmp");
-
- result.clear();
- StringRef d(dir);
- result.append(d.begin(), d.end());
- return error_code::success();
- }
-}
-
-static error_code createUniqueEntity(const Twine &Model, int &ResultFD,
- SmallVectorImpl<char> &ResultPath,
- bool MakeAbsolute, unsigned Mode,
- FSEntity Type) {
- SmallString<128> ModelStorage;
- Model.toVector(ModelStorage);
-
- if (MakeAbsolute) {
- // Make model absolute by prepending a temp directory if it's not already.
- bool absolute = sys::path::is_absolute(Twine(ModelStorage));
- if (!absolute) {
- SmallString<128> TDir;
- if (error_code ec = TempDir(TDir)) return ec;
- sys::path::append(TDir, Twine(ModelStorage));
- ModelStorage.swap(TDir);
- }
- }
-
- // From here on, DO NOT modify model. It may be needed if the randomly chosen
- // path already exists.
- ResultPath = ModelStorage;
- // Null terminate.
- ResultPath.push_back(0);
- ResultPath.pop_back();
-
-retry_random_path:
- // Replace '%' with random chars.
- for (unsigned i = 0, e = ModelStorage.size(); i != e; ++i) {
- if (ModelStorage[i] == '%')
- ResultPath[i] = "0123456789abcdef"[sys::Process::GetRandomNumber() & 15];
- }
-
- // Try to open + create the file.
- switch (Type) {
- case FS_File: {
- int RandomFD = ::open(ResultPath.begin(), O_RDWR | O_CREAT | O_EXCL, Mode);
- if (RandomFD == -1) {
- int SavedErrno = errno;
- // If the file existed, try again, otherwise, error.
- if (SavedErrno == errc::file_exists)
- goto retry_random_path;
- return error_code(SavedErrno, system_category());
- }
-
- ResultFD = RandomFD;
- return error_code::success();
- }
-
- case FS_Name: {
- bool Exists;
- error_code EC = sys::fs::exists(ResultPath.begin(), Exists);
- if (EC)
- return EC;
- if (Exists)
- goto retry_random_path;
- return error_code::success();
- }
+ (dir = "/tmp");
- case FS_Dir: {
- bool Existed;
- error_code EC = sys::fs::create_directory(ResultPath.begin(), Existed);
- if (EC)
- return EC;
- if (Existed)
- goto retry_random_path;
- return error_code::success();
- }
- }
- llvm_unreachable("Invalid Type");
+ result.clear();
+ StringRef d(dir);
+ result.append(d.begin(), d.end());
+ return std::error_code();
}
namespace llvm {
@@ -184,15 +110,15 @@ namespace fs {
defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) || \
defined(__linux__) || defined(__CYGWIN__) || defined(__DragonFly__)
static int
-test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
- const char *dir, const char *bin)
-{
+test_dir(char ret[PATH_MAX], const char *dir, const char *bin)
+{
struct stat sb;
+ char fullpath[PATH_MAX];
- snprintf(buf, PATH_MAX, "%s/%s", dir, bin);
- if (realpath(buf, ret) == NULL)
+ snprintf(fullpath, PATH_MAX, "%s/%s", dir, bin);
+ if (realpath(fullpath, ret) == NULL)
return (1);
- if (stat(buf, &sb) != 0)
+ if (stat(fullpath, &sb) != 0)
return (1);
return (0);
@@ -201,20 +127,21 @@ test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
static char *
getprogpath(char ret[PATH_MAX], const char *bin)
{
- char *pv, *s, *t, buf[PATH_MAX];
+ char *pv, *s, *t;
/* First approach: absolute path. */
if (bin[0] == '/') {
- if (test_dir(buf, ret, "/", bin) == 0)
+ if (test_dir(ret, "/", bin) == 0)
return (ret);
return (NULL);
}
/* Second approach: relative path. */
if (strchr(bin, '/') != NULL) {
- if (getcwd(buf, PATH_MAX) == NULL)
+ char cwd[PATH_MAX];
+ if (getcwd(cwd, PATH_MAX) == NULL)
return (NULL);
- if (test_dir(buf, ret, buf, bin) == 0)
+ if (test_dir(ret, cwd, bin) == 0)
return (ret);
return (NULL);
}
@@ -226,7 +153,7 @@ getprogpath(char ret[PATH_MAX], const char *bin)
if (pv == NULL)
return (NULL);
while ((t = strsep(&s, ":")) != NULL) {
- if (test_dir(buf, ret, t, bin) == 0) {
+ if (test_dir(ret, t, bin) == 0) {
free(pv);
return (ret);
}
@@ -298,7 +225,7 @@ UniqueID file_status::getUniqueID() const {
return UniqueID(fs_st_dev, fs_st_ino);
}
-error_code current_path(SmallVectorImpl<char> &result) {
+std::error_code current_path(SmallVectorImpl<char> &result) {
result.clear();
const char *pwd = ::getenv("PWD");
@@ -308,7 +235,7 @@ error_code current_path(SmallVectorImpl<char> &result) {
!llvm::sys::fs::status(".", DotStatus) &&
PWDStatus.getUniqueID() == DotStatus.getUniqueID()) {
result.append(pwd, pwd + strlen(pwd));
- return error_code::success();
+ return std::error_code();
}
#ifdef MAXPATHLEN
@@ -319,10 +246,10 @@ error_code current_path(SmallVectorImpl<char> &result) {
#endif
while (true) {
- if (::getcwd(result.data(), result.capacity()) == 0) {
+ if (::getcwd(result.data(), result.capacity()) == nullptr) {
// See if there was a real error.
- if (errno != errc::not_enough_memory)
- return error_code(errno, system_category());
+ if (errno != ENOMEM)
+ return std::error_code(errno, std::generic_category());
// Otherwise there just wasn't enough space.
result.reserve(result.capacity() * 2);
} else
@@ -330,37 +257,37 @@ error_code current_path(SmallVectorImpl<char> &result) {
}
result.set_size(strlen(result.data()));
- return error_code::success();
+ return std::error_code();
}
-error_code create_directory(const Twine &path, bool &existed) {
+std::error_code create_directory(const Twine &path, bool IgnoreExisting) {
SmallString<128> path_storage;
StringRef p = path.toNullTerminatedStringRef(path_storage);
if (::mkdir(p.begin(), S_IRWXU | S_IRWXG) == -1) {
- if (errno != errc::file_exists)
- return error_code(errno, system_category());
- existed = true;
- } else
- existed = false;
+ if (errno != EEXIST || !IgnoreExisting)
+ return std::error_code(errno, std::generic_category());
+ }
- return error_code::success();
+ return std::error_code();
}
-error_code create_hard_link(const Twine &to, const Twine &from) {
- // Get arguments.
- SmallString<128> from_storage;
- SmallString<128> to_storage;
- StringRef f = from.toNullTerminatedStringRef(from_storage);
- StringRef t = to.toNullTerminatedStringRef(to_storage);
-
- if (::link(t.begin(), f.begin()) == -1)
- return error_code(errno, system_category());
-
- return error_code::success();
+std::error_code normalize_separators(SmallVectorImpl<char> &Path) {
+ for (auto PI = Path.begin(), PE = Path.end(); PI < PE; ++PI) {
+ if (*PI == '\\') {
+ auto PN = PI + 1;
+ if (PN < PE && *PN == '\\')
+ ++PI; // increment once, the for loop will move over the escaped slash
+ else
+ *PI = '/';
+ }
+ }
+ return std::error_code();
}
-error_code create_symlink(const Twine &to, const Twine &from) {
+// Note that we are using symbolic link because hard links are not supported by
+// all filesystems (SMB doesn't).
+std::error_code create_link(const Twine &to, const Twine &from) {
// Get arguments.
SmallString<128> from_storage;
SmallString<128> to_storage;
@@ -368,21 +295,20 @@ error_code create_symlink(const Twine &to, const Twine &from) {
StringRef t = to.toNullTerminatedStringRef(to_storage);
if (::symlink(t.begin(), f.begin()) == -1)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
- return error_code::success();
+ return std::error_code();
}
-error_code remove(const Twine &path, bool &existed) {
+std::error_code remove(const Twine &path, bool IgnoreNonExisting) {
SmallString<128> path_storage;
StringRef p = path.toNullTerminatedStringRef(path_storage);
struct stat buf;
- if (stat(p.begin(), &buf) != 0) {
- if (errno != errc::no_such_file_or_directory)
- return error_code(errno, system_category());
- existed = false;
- return error_code::success();
+ if (lstat(p.begin(), &buf) != 0) {
+ if (errno != ENOENT || !IgnoreNonExisting)
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
}
// Note: this check catches strange situations. In all cases, LLVM should
@@ -390,20 +316,18 @@ error_code remove(const Twine &path, bool &existed) {
// check ensures that what we're trying to erase is a regular file. It
// effectively prevents LLVM from erasing things like /dev/null, any block
// special file, or other things that aren't "regular" files.
- if (!S_ISREG(buf.st_mode) && !S_ISDIR(buf.st_mode))
+ if (!S_ISREG(buf.st_mode) && !S_ISDIR(buf.st_mode) && !S_ISLNK(buf.st_mode))
return make_error_code(errc::operation_not_permitted);
if (::remove(p.begin()) == -1) {
- if (errno != errc::no_such_file_or_directory)
- return error_code(errno, system_category());
- existed = false;
- } else
- existed = true;
+ if (errno != ENOENT || !IgnoreNonExisting)
+ return std::error_code(errno, std::generic_category());
+ }
- return error_code::success();
+ return std::error_code();
}
-error_code rename(const Twine &from, const Twine &to) {
+std::error_code rename(const Twine &from, const Twine &to) {
// Get arguments.
SmallString<128> from_storage;
SmallString<128> to_storage;
@@ -411,33 +335,33 @@ error_code rename(const Twine &from, const Twine &to) {
StringRef t = to.toNullTerminatedStringRef(to_storage);
if (::rename(f.begin(), t.begin()) == -1)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
- return error_code::success();
+ return std::error_code();
}
-error_code resize_file(const Twine &path, uint64_t size) {
+std::error_code resize_file(const Twine &path, uint64_t size) {
SmallString<128> path_storage;
StringRef p = path.toNullTerminatedStringRef(path_storage);
if (::truncate(p.begin(), size) == -1)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
- return error_code::success();
+ return std::error_code();
}
-error_code exists(const Twine &path, bool &result) {
+std::error_code exists(const Twine &path, bool &result) {
SmallString<128> path_storage;
StringRef p = path.toNullTerminatedStringRef(path_storage);
if (::access(p.begin(), F_OK) == -1) {
- if (errno != errc::no_such_file_or_directory)
- return error_code(errno, system_category());
+ if (errno != ENOENT)
+ return std::error_code(errno, std::generic_category());
result = false;
} else
result = true;
- return error_code::success();
+ return std::error_code();
}
bool can_write(const Twine &Path) {
@@ -466,18 +390,20 @@ bool equivalent(file_status A, file_status B) {
A.fs_st_ino == B.fs_st_ino;
}
-error_code equivalent(const Twine &A, const Twine &B, bool &result) {
+std::error_code equivalent(const Twine &A, const Twine &B, bool &result) {
file_status fsA, fsB;
- if (error_code ec = status(A, fsA)) return ec;
- if (error_code ec = status(B, fsB)) return ec;
+ if (std::error_code ec = status(A, fsA))
+ return ec;
+ if (std::error_code ec = status(B, fsB))
+ return ec;
result = equivalent(fsA, fsB);
- return error_code::success();
+ return std::error_code();
}
-static error_code fillStatus(int StatRet, const struct stat &Status,
+static std::error_code fillStatus(int StatRet, const struct stat &Status,
file_status &Result) {
if (StatRet != 0) {
- error_code ec(errno, system_category());
+ std::error_code ec(errno, std::generic_category());
if (ec == errc::no_such_file_or_directory)
Result = file_status(file_type::file_not_found);
else
@@ -505,10 +431,10 @@ static error_code fillStatus(int StatRet, const struct stat &Status,
file_status(Type, Perms, Status.st_dev, Status.st_ino, Status.st_mtime,
Status.st_uid, Status.st_gid, Status.st_size);
- return error_code::success();
+ return std::error_code();
}
-error_code status(const Twine &Path, file_status &Result) {
+std::error_code status(const Twine &Path, file_status &Result) {
SmallString<128> PathStorage;
StringRef P = Path.toNullTerminatedStringRef(PathStorage);
@@ -517,33 +443,36 @@ error_code status(const Twine &Path, file_status &Result) {
return fillStatus(StatRet, Status, Result);
}
-error_code status(int FD, file_status &Result) {
+std::error_code status(int FD, file_status &Result) {
struct stat Status;
int StatRet = ::fstat(FD, &Status);
return fillStatus(StatRet, Status, Result);
}
-error_code setLastModificationAndAccessTime(int FD, TimeValue Time) {
+std::error_code setLastModificationAndAccessTime(int FD, TimeValue Time) {
#if defined(HAVE_FUTIMENS)
timespec Times[2];
- Times[0].tv_sec = Time.toPosixTime();
+ Times[0].tv_sec = Time.toEpochTime();
Times[0].tv_nsec = 0;
Times[1] = Times[0];
if (::futimens(FD, Times))
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
#elif defined(HAVE_FUTIMES)
timeval Times[2];
- Times[0].tv_sec = Time.toPosixTime();
+ Times[0].tv_sec = Time.toEpochTime();
Times[0].tv_usec = 0;
Times[1] = Times[0];
if (::futimes(FD, Times))
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
#else
-#error Missing futimes() and futimens()
+#warning Missing futimes() and futimens()
+ return make_error_code(errc::function_not_supported);
#endif
- return error_code(errno, system_category());
- return error_code::success();
}
-error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
+std::error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
AutoFD ScopedFD(FD);
if (!CloseFD)
ScopedFD.take();
@@ -551,7 +480,7 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
// Figure out how large the file is.
struct stat FileInfo;
if (fstat(FD, &FileInfo) == -1)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
uint64_t FileSize = FileInfo.st_size;
if (Size == 0)
@@ -559,7 +488,7 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
else if (FileSize < Size) {
// We need to grow the file.
if (ftruncate(FD, Size) == -1)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
}
int flags = (Mode == readwrite) ? MAP_SHARED : MAP_PRIVATE;
@@ -567,17 +496,17 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
#ifdef MAP_FILE
flags |= MAP_FILE;
#endif
- Mapping = ::mmap(0, Size, prot, flags, FD, Offset);
+ Mapping = ::mmap(nullptr, Size, prot, flags, FD, Offset);
if (Mapping == MAP_FAILED)
- return error_code(errno, system_category());
- return error_code::success();
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
}
mapped_file_region::mapped_file_region(const Twine &path,
mapmode mode,
uint64_t length,
uint64_t offset,
- error_code &ec)
+ std::error_code &ec)
: Mode(mode)
, Size(length)
, Mapping() {
@@ -592,13 +521,13 @@ mapped_file_region::mapped_file_region(const Twine &path,
int oflags = (mode == readonly) ? O_RDONLY : O_RDWR;
int ofd = ::open(name.begin(), oflags);
if (ofd == -1) {
- ec = error_code(errno, system_category());
+ ec = std::error_code(errno, std::generic_category());
return;
}
ec = init(ofd, true, offset);
if (ec)
- Mapping = 0;
+ Mapping = nullptr;
}
mapped_file_region::mapped_file_region(int fd,
@@ -606,7 +535,7 @@ mapped_file_region::mapped_file_region(int fd,
mapmode mode,
uint64_t length,
uint64_t offset,
- error_code &ec)
+ std::error_code &ec)
: Mode(mode)
, Size(length)
, Mapping() {
@@ -618,7 +547,7 @@ mapped_file_region::mapped_file_region(int fd,
ec = init(fd, closefd, offset);
if (ec)
- Mapping = 0;
+ Mapping = nullptr;
}
mapped_file_region::~mapped_file_region() {
@@ -626,12 +555,10 @@ mapped_file_region::~mapped_file_region() {
::munmap(Mapping, Size);
}
-#if LLVM_HAS_RVALUE_REFERENCES
mapped_file_region::mapped_file_region(mapped_file_region &&other)
: Mode(other.Mode), Size(other.Size), Mapping(other.Mapping) {
- other.Mapping = 0;
+ other.Mapping = nullptr;
}
-#endif
mapped_file_region::mapmode mapped_file_region::flags() const {
assert(Mapping && "Mapping failed but used anyway!");
@@ -645,7 +572,7 @@ uint64_t mapped_file_region::size() const {
char *mapped_file_region::data() const {
assert(Mapping && "Mapping failed but used anyway!");
- assert(Mode != readonly && "Cannot get non const data for readonly mapping!");
+ assert(Mode != readonly && "Cannot get non-const data for readonly mapping!");
return reinterpret_cast<char*>(Mapping);
}
@@ -658,12 +585,12 @@ int mapped_file_region::alignment() {
return process::get_self()->page_size();
}
-error_code detail::directory_iterator_construct(detail::DirIterState &it,
+std::error_code detail::directory_iterator_construct(detail::DirIterState &it,
StringRef path){
SmallString<128> path_null(path);
DIR *directory = ::opendir(path_null.c_str());
- if (directory == 0)
- return error_code(errno, system_category());
+ if (!directory)
+ return std::error_code(errno, std::generic_category());
it.IterationHandle = reinterpret_cast<intptr_t>(directory);
// Add something for replace_filename to replace.
@@ -672,20 +599,20 @@ error_code detail::directory_iterator_construct(detail::DirIterState &it,
return directory_iterator_increment(it);
}
-error_code detail::directory_iterator_destruct(detail::DirIterState &it) {
+std::error_code detail::directory_iterator_destruct(detail::DirIterState &it) {
if (it.IterationHandle)
::closedir(reinterpret_cast<DIR *>(it.IterationHandle));
it.IterationHandle = 0;
it.CurrentEntry = directory_entry();
- return error_code::success();
+ return std::error_code();
}
-error_code detail::directory_iterator_increment(detail::DirIterState &it) {
+std::error_code detail::directory_iterator_increment(detail::DirIterState &it) {
errno = 0;
dirent *cur_dir = ::readdir(reinterpret_cast<DIR *>(it.IterationHandle));
- if (cur_dir == 0 && errno != 0) {
- return error_code(errno, system_category());
- } else if (cur_dir != 0) {
+ if (cur_dir == nullptr && errno != 0) {
+ return std::error_code(errno, std::generic_category());
+ } else if (cur_dir != nullptr) {
StringRef name(cur_dir->d_name, NAMLEN(cur_dir));
if ((name.size() == 1 && name[0] == '.') ||
(name.size() == 2 && name[0] == '.' && name[1] == '.'))
@@ -694,86 +621,31 @@ error_code detail::directory_iterator_increment(detail::DirIterState &it) {
} else
return directory_iterator_destruct(it);
- return error_code::success();
-}
-
-error_code get_magic(const Twine &path, uint32_t len,
- SmallVectorImpl<char> &result) {
- SmallString<128> PathStorage;
- StringRef Path = path.toNullTerminatedStringRef(PathStorage);
- result.set_size(0);
-
- // Open path.
- std::FILE *file = std::fopen(Path.data(), "rb");
- if (file == 0)
- return error_code(errno, system_category());
-
- // Reserve storage.
- result.reserve(len);
-
- // Read magic!
- size_t size = std::fread(result.data(), 1, len, file);
- if (std::ferror(file) != 0) {
- std::fclose(file);
- return error_code(errno, system_category());
- } else if (size != len) {
- if (std::feof(file) != 0) {
- std::fclose(file);
- result.set_size(size);
- return make_error_code(errc::value_too_large);
- }
- }
- std::fclose(file);
- result.set_size(size);
- return error_code::success();
+ return std::error_code();
}
-error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
- bool map_writable, void *&result) {
- SmallString<128> path_storage;
- StringRef name = path.toNullTerminatedStringRef(path_storage);
- int oflags = map_writable ? O_RDWR : O_RDONLY;
- int ofd = ::open(name.begin(), oflags);
- if ( ofd == -1 )
- return error_code(errno, system_category());
- AutoFD fd(ofd);
- int flags = map_writable ? MAP_SHARED : MAP_PRIVATE;
- int prot = map_writable ? (PROT_READ|PROT_WRITE) : PROT_READ;
-#ifdef MAP_FILE
- flags |= MAP_FILE;
-#endif
- result = ::mmap(0, size, prot, flags, fd, file_offset);
- if (result == MAP_FAILED) {
- return error_code(errno, system_category());
- }
-
- return error_code::success();
-}
-
-error_code unmap_file_pages(void *base, size_t size) {
- if ( ::munmap(base, size) == -1 )
- return error_code(errno, system_category());
-
- return error_code::success();
-}
-
-error_code openFileForRead(const Twine &Name, int &ResultFD) {
+std::error_code openFileForRead(const Twine &Name, int &ResultFD) {
SmallString<128> Storage;
StringRef P = Name.toNullTerminatedStringRef(Storage);
while ((ResultFD = open(P.begin(), O_RDONLY)) < 0) {
if (errno != EINTR)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
}
- return error_code::success();
+ return std::error_code();
}
-error_code openFileForWrite(const Twine &Name, int &ResultFD,
+std::error_code openFileForWrite(const Twine &Name, int &ResultFD,
sys::fs::OpenFlags Flags, unsigned Mode) {
// Verify that we don't have both "append" and "excl".
assert((!(Flags & sys::fs::F_Excl) || !(Flags & sys::fs::F_Append)) &&
"Cannot specify both 'excl' and 'append' file creation flags!");
- int OpenFlags = O_WRONLY | O_CREAT;
+ int OpenFlags = O_CREAT;
+
+ if (Flags & F_RW)
+ OpenFlags |= O_RDWR;
+ else
+ OpenFlags |= O_WRONLY;
if (Flags & F_Append)
OpenFlags |= O_APPEND;
@@ -787,11 +659,26 @@ error_code openFileForWrite(const Twine &Name, int &ResultFD,
StringRef P = Name.toNullTerminatedStringRef(Storage);
while ((ResultFD = open(P.begin(), OpenFlags, Mode)) < 0) {
if (errno != EINTR)
- return error_code(errno, system_category());
+ return std::error_code(errno, std::generic_category());
}
- return error_code::success();
+ return std::error_code();
}
} // end namespace fs
+
+namespace path {
+
+bool home_directory(SmallVectorImpl<char> &result) {
+ if (char *RequestedDir = getenv("HOME")) {
+ result.clear();
+ result.append(RequestedDir, RequestedDir + strlen(RequestedDir));
+ return true;
+ }
+
+ return false;
+}
+
+} // end namespace path
+
} // end namespace sys
} // end namespace llvm
diff --git a/contrib/llvm/lib/Support/Unix/Process.inc b/contrib/llvm/lib/Support/Unix/Process.inc
index c5778e7..d2c5dbc 100644
--- a/contrib/llvm/lib/Support/Unix/Process.inc
+++ b/contrib/llvm/lib/Support/Unix/Process.inc
@@ -47,7 +47,6 @@
using namespace llvm;
using namespace sys;
-
process::id_type self_process::get_id() {
return getpid();
}
@@ -135,7 +134,7 @@ size_t Process::GetMallocUsage() {
void Process::GetTimeUsage(TimeValue &elapsed, TimeValue &user_time,
TimeValue &sys_time) {
elapsed = TimeValue::now();
- llvm::tie(user_time, sys_time) = getRUsageTimes();
+ std::tie(user_time, sys_time) = getRUsageTimes();
}
#if defined(HAVE_MACH_MACH_H) && !defined(__GNU__)
@@ -190,12 +189,13 @@ Optional<std::string> Process::GetEnv(StringRef Name) {
return std::string(Val);
}
-error_code Process::GetArgumentVector(SmallVectorImpl<const char *> &ArgsOut,
- ArrayRef<const char *> ArgsIn,
- SpecificBumpPtrAllocator<char> &) {
+std::error_code
+Process::GetArgumentVector(SmallVectorImpl<const char *> &ArgsOut,
+ ArrayRef<const char *> ArgsIn,
+ SpecificBumpPtrAllocator<char> &) {
ArgsOut.append(ArgsIn.begin(), ArgsIn.end());
- return error_code::success();
+ return std::error_code();
}
bool Process::StandardInIsUserInput() {
@@ -270,7 +270,7 @@ static bool terminalHasColors(int fd) {
MutexGuard G(M);
int errret = 0;
- if (setupterm((char *)0, fd, &errret) != 0)
+ if (setupterm((char *)nullptr, fd, &errret) != 0)
// Regardless of why, if we can't get terminfo, we shouldn't try to print
// colors.
return false;
@@ -292,7 +292,7 @@ static bool terminalHasColors(int fd) {
// Now extract the structure allocated by setupterm and free its memory
// through a really silly dance.
- struct term *termp = set_curterm((struct term *)0);
+ struct term *termp = set_curterm((struct term *)nullptr);
(void)del_curterm(termp); // Drop any errors here.
// Return true if we found a color capabilities for the current terminal.
@@ -343,7 +343,7 @@ const char *Process::ResetColor() {
return "\033[0m";
}
-#if !defined(HAVE_ARC4RANDOM)
+#if !defined(HAVE_DECL_ARC4RANDOM) || !HAVE_DECL_ARC4RANDOM
static unsigned GetRandomNumberSeed() {
// Attempt to get the initial seed from /dev/urandom, if possible.
if (FILE *RandomSource = ::fopen("/dev/urandom", "r")) {
@@ -364,7 +364,7 @@ static unsigned GetRandomNumberSeed() {
#endif
unsigned llvm::sys::Process::GetRandomNumber() {
-#if defined(HAVE_ARC4RANDOM)
+#if defined(HAVE_DECL_ARC4RANDOM) && HAVE_DECL_ARC4RANDOM
return arc4random();
#else
static int x = (::srand(GetRandomNumberSeed()), 0);
diff --git a/contrib/llvm/lib/Support/Unix/Program.inc b/contrib/llvm/lib/Support/Unix/Program.inc
index 78b2971..06a33cd 100644
--- a/contrib/llvm/lib/Support/Unix/Program.inc
+++ b/contrib/llvm/lib/Support/Unix/Program.inc
@@ -48,6 +48,7 @@
#endif
namespace llvm {
+
using namespace sys;
ProcessInfo::ProcessInfo() : Pid(0), ReturnCode(0) {}
@@ -70,7 +71,7 @@ sys::FindProgramByName(const std::string& progName) {
// Get the path. If its empty, we can't do anything to find it.
const char *PathStr = getenv("PATH");
- if (PathStr == 0)
+ if (!PathStr)
return "";
// Now we have a colon separated list of directories to search; try them.
@@ -99,7 +100,7 @@ sys::FindProgramByName(const std::string& progName) {
}
static bool RedirectIO(const StringRef *Path, int FD, std::string* ErrMsg) {
- if (Path == 0) // Noop
+ if (!Path) // Noop
return false;
std::string File;
if (Path->empty())
@@ -129,7 +130,7 @@ static bool RedirectIO(const StringRef *Path, int FD, std::string* ErrMsg) {
#ifdef HAVE_POSIX_SPAWN
static bool RedirectIO_PS(const std::string *Path, int FD, std::string *ErrMsg,
posix_spawn_file_actions_t *FileActions) {
- if (Path == 0) // Noop
+ if (!Path) // Noop
return false;
const char *File;
if (Path->empty())
@@ -195,7 +196,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
#ifdef HAVE_POSIX_SPAWN
if (memoryLimit == 0) {
posix_spawn_file_actions_t FileActionsStore;
- posix_spawn_file_actions_t *FileActions = 0;
+ posix_spawn_file_actions_t *FileActions = nullptr;
// If we call posix_spawn_file_actions_addopen we have to make sure the
// c strings we pass to it stay alive until the call to posix_spawn,
@@ -203,7 +204,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
std::string RedirectsStorage[3];
if (redirects) {
- std::string *RedirectsStr[3] = {0, 0, 0};
+ std::string *RedirectsStr[3] = {nullptr, nullptr, nullptr};
for (int I = 0; I < 3; ++I) {
if (redirects[I]) {
RedirectsStorage[I] = *redirects[I];
@@ -218,7 +219,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
if (RedirectIO_PS(RedirectsStr[0], 0, ErrMsg, FileActions) ||
RedirectIO_PS(RedirectsStr[1], 1, ErrMsg, FileActions))
return false;
- if (redirects[1] == 0 || redirects[2] == 0 ||
+ if (redirects[1] == nullptr || redirects[2] == nullptr ||
*redirects[1] != *redirects[2]) {
// Just redirect stderr
if (RedirectIO_PS(RedirectsStr[2], 2, ErrMsg, FileActions))
@@ -242,8 +243,9 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
// Explicitly initialized to prevent what appears to be a valgrind false
// positive.
pid_t PID = 0;
- int Err = posix_spawn(&PID, Program.str().c_str(), FileActions, /*attrp*/0,
- const_cast<char **>(args), const_cast<char **>(envp));
+ int Err = posix_spawn(&PID, Program.str().c_str(), FileActions,
+ /*attrp*/nullptr, const_cast<char **>(args),
+ const_cast<char **>(envp));
if (FileActions)
posix_spawn_file_actions_destroy(FileActions);
@@ -294,7 +296,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
// Execute!
std::string PathStr = Program;
- if (envp != 0)
+ if (envp != nullptr)
execve(PathStr.c_str(),
const_cast<char **>(args),
const_cast<char **>(envp));
@@ -348,7 +350,11 @@ ProcessInfo sys::Wait(const ProcessInfo &PI, unsigned SecondsToWait,
// Parent process: Wait for the child process to terminate.
int status;
ProcessInfo WaitResult;
- WaitResult.Pid = waitpid(ChildPid, &status, WaitPidOptions);
+
+ do {
+ WaitResult.Pid = waitpid(ChildPid, &status, WaitPidOptions);
+ } while (WaitUntilTerminates && WaitResult.Pid == -1 && errno == EINTR);
+
if (WaitResult.Pid != PI.Pid) {
if (WaitResult.Pid == 0) {
// Non-blocking wait.
@@ -360,7 +366,7 @@ ProcessInfo sys::Wait(const ProcessInfo &PI, unsigned SecondsToWait,
// Turn off the alarm and restore the signal handler
alarm(0);
- sigaction(SIGALRM, &Old, 0);
+ sigaction(SIGALRM, &Old, nullptr);
// Wait for child to die
if (wait(&status) != ChildPid)
@@ -381,7 +387,7 @@ ProcessInfo sys::Wait(const ProcessInfo &PI, unsigned SecondsToWait,
// We exited normally without timeout, so turn off the timer.
if (SecondsToWait && !WaitUntilTerminates) {
alarm(0);
- sigaction(SIGALRM, &Old, 0);
+ sigaction(SIGALRM, &Old, nullptr);
}
// Return the proper exit status. Detect error conditions
@@ -418,24 +424,20 @@ ProcessInfo sys::Wait(const ProcessInfo &PI, unsigned SecondsToWait,
#else
if (ErrMsg)
*ErrMsg = "Program::Wait is not implemented on this platform yet!";
+ ProcessInfo WaitResult;
WaitResult.ReturnCode = -2;
#endif
return WaitResult;
}
-error_code sys::ChangeStdinToBinary(){
- // Do nothing, as Unix doesn't differentiate between text and binary.
- return make_error_code(errc::success);
-}
-
-error_code sys::ChangeStdoutToBinary(){
+ std::error_code sys::ChangeStdinToBinary(){
// Do nothing, as Unix doesn't differentiate between text and binary.
- return make_error_code(errc::success);
+ return std::error_code();
}
-error_code sys::ChangeStderrToBinary(){
+ std::error_code sys::ChangeStdoutToBinary(){
// Do nothing, as Unix doesn't differentiate between text and binary.
- return make_error_code(errc::success);
+ return std::error_code();
}
bool llvm::sys::argumentsFitWithinSystemLimits(ArrayRef<const char*> Args) {
diff --git a/contrib/llvm/lib/Support/Unix/RWMutex.inc b/contrib/llvm/lib/Support/Unix/RWMutex.inc
index 40e87ff..edcbd52 100644
--- a/contrib/llvm/lib/Support/Unix/RWMutex.inc
+++ b/contrib/llvm/lib/Support/Unix/RWMutex.inc
@@ -16,28 +16,36 @@
//=== is guaranteed to work on *all* UNIX variants.
//===----------------------------------------------------------------------===//
+#include "llvm/Support/Mutex.h"
+
namespace llvm {
using namespace sys;
-RWMutexImpl::RWMutexImpl() { }
+// This naive implementation treats readers the same as writers. This
+// will therefore deadlock if a thread tries to acquire a read lock
+// multiple times.
+
+RWMutexImpl::RWMutexImpl() : data_(new Mutex(false)) { }
-RWMutexImpl::~RWMutexImpl() { }
+RWMutexImpl::~RWMutexImpl() {
+ delete static_cast<Mutex *>(data_);
+}
bool RWMutexImpl::reader_acquire() {
- return true;
+ return static_cast<Mutex *>(data_)->acquire();
}
bool RWMutexImpl::reader_release() {
- return true;
+ return static_cast<Mutex *>(data_)->release();
}
bool RWMutexImpl::writer_acquire() {
- return true;
+ return static_cast<Mutex *>(data_)->acquire();
}
bool RWMutexImpl::writer_release() {
- return true;
+ return static_cast<Mutex *>(data_)->release();
}
}
diff --git a/contrib/llvm/lib/Support/Unix/Signals.inc b/contrib/llvm/lib/Support/Unix/Signals.inc
index b4c78d6..1841fea 100644
--- a/contrib/llvm/lib/Support/Unix/Signals.inc
+++ b/contrib/llvm/lib/Support/Unix/Signals.inc
@@ -44,7 +44,7 @@ static RETSIGTYPE SignalHandler(int Sig); // defined below.
static SmartMutex<true> SignalsMutex;
/// InterruptFunction - The function to call if ctrl-c is pressed.
-static void (*InterruptFunction)() = 0;
+static void (*InterruptFunction)() = nullptr;
static std::vector<std::string> FilesToRemove;
static std::vector<std::pair<void(*)(void*), void*> > CallBacksToRun;
@@ -55,7 +55,7 @@ static std::vector<std::pair<void(*)(void*), void*> > CallBacksToRun;
static const int IntSigs[] = {
SIGHUP, SIGINT, SIGPIPE, SIGTERM, SIGUSR1, SIGUSR2
};
-static const int *const IntSigsEnd = array_endof(IntSigs);
+static const int *const IntSigsEnd = std::end(IntSigs);
// KillSigs - Signals that represent that we have a bug, and our prompt
// termination has been ordered.
@@ -74,7 +74,7 @@ static const int KillSigs[] = {
, SIGEMT
#endif
};
-static const int *const KillSigsEnd = array_endof(KillSigs);
+static const int *const KillSigsEnd = std::end(KillSigs);
static unsigned NumRegisteredSignals = 0;
static struct {
@@ -113,7 +113,7 @@ static void UnregisterHandlers() {
// Restore all of the signal handlers to how they were before we showed up.
for (unsigned i = 0, e = NumRegisteredSignals; i != e; ++i)
sigaction(RegisteredSignalInfo[i].SigNo,
- &RegisteredSignalInfo[i].SA, 0);
+ &RegisteredSignalInfo[i].SA, nullptr);
NumRegisteredSignals = 0;
}
@@ -160,7 +160,7 @@ static RETSIGTYPE SignalHandler(int Sig) {
// Unmask all potentially blocked kill signals.
sigset_t SigMask;
sigfillset(&SigMask);
- sigprocmask(SIG_UNBLOCK, &SigMask, 0);
+ sigprocmask(SIG_UNBLOCK, &SigMask, nullptr);
SignalsMutex.acquire();
RemoveFilesToRemove();
@@ -169,7 +169,7 @@ static RETSIGTYPE SignalHandler(int Sig) {
if (InterruptFunction) {
void (*IF)() = InterruptFunction;
SignalsMutex.release();
- InterruptFunction = 0;
+ InterruptFunction = nullptr;
IF(); // run the interrupt function.
return;
}
@@ -212,7 +212,7 @@ void llvm::sys::SetInterruptFunction(void (*IF)()) {
bool llvm::sys::RemoveFileOnSignal(StringRef Filename,
std::string* ErrMsg) {
SignalsMutex.acquire();
- std::string *OldPtr = FilesToRemove.empty() ? 0 : &FilesToRemove[0];
+ std::string *OldPtr = FilesToRemove.empty() ? nullptr : &FilesToRemove[0];
FilesToRemove.push_back(Filename);
// We want to call 'c_str()' on every std::string in this vector so that if
@@ -279,8 +279,8 @@ void llvm::sys::PrintStackTrace(FILE *FD) {
const char* name = strrchr(dlinfo.dli_fname, '/');
int nwidth;
- if (name == NULL) nwidth = strlen(dlinfo.dli_fname);
- else nwidth = strlen(name) - 1;
+ if (!name) nwidth = strlen(dlinfo.dli_fname);
+ else nwidth = strlen(name) - 1;
if (nwidth > width) width = nwidth;
}
@@ -292,22 +292,22 @@ void llvm::sys::PrintStackTrace(FILE *FD) {
fprintf(FD, "%-2d", i);
const char* name = strrchr(dlinfo.dli_fname, '/');
- if (name == NULL) fprintf(FD, " %-*s", width, dlinfo.dli_fname);
- else fprintf(FD, " %-*s", width, name+1);
+ if (!name) fprintf(FD, " %-*s", width, dlinfo.dli_fname);
+ else fprintf(FD, " %-*s", width, name+1);
fprintf(FD, " %#0*lx",
(int)(sizeof(void*) * 2) + 2, (unsigned long)StackTrace[i]);
- if (dlinfo.dli_sname != NULL) {
+ if (dlinfo.dli_sname != nullptr) {
fputc(' ', FD);
# if HAVE_CXXABI_H
int res;
- char* d = abi::__cxa_demangle(dlinfo.dli_sname, NULL, NULL, &res);
+ char* d = abi::__cxa_demangle(dlinfo.dli_sname, nullptr, nullptr, &res);
# else
char* d = NULL;
# endif
- if (d == NULL) fputs(dlinfo.dli_sname, FD);
- else fputs(d, FD);
+ if (!d) fputs(dlinfo.dli_sname, FD);
+ else fputs(d, FD);
free(d);
// FIXME: When we move to C++11, use %t length modifier. It's not in
@@ -331,7 +331,7 @@ static void PrintStackTraceSignalHandler(void *) {
/// PrintStackTraceOnErrorSignal - When an error signal (such as SIGABRT or
/// SIGSEGV) is delivered to the process, print a stack trace and then exit.
void llvm::sys::PrintStackTraceOnErrorSignal() {
- AddSignalHandler(PrintStackTraceSignalHandler, 0);
+ AddSignalHandler(PrintStackTraceSignalHandler, nullptr);
#if defined(__APPLE__) && defined(ENABLE_CRASH_OVERRIDES)
// Environment variable to disable any kind of crash dialog.
diff --git a/contrib/llvm/lib/Support/Unix/TimeValue.inc b/contrib/llvm/lib/Support/Unix/TimeValue.inc
index 80532b0..7d4acf7 100644
--- a/contrib/llvm/lib/Support/Unix/TimeValue.inc
+++ b/contrib/llvm/lib/Support/Unix/TimeValue.inc
@@ -26,15 +26,17 @@ std::string TimeValue::str() const {
struct tm Storage;
struct tm *LT = ::localtime_r(&OurTime, &Storage);
assert(LT);
- char Buffer[25];
- strftime(Buffer, 25, "%b %e %H:%M %Y", LT);
- return std::string(Buffer);
+ char Buffer1[sizeof("YYYY-MM-DD HH:MM:SS")];
+ strftime(Buffer1, sizeof(Buffer1), "%Y-%m-%d %H:%M:%S", LT);
+ char Buffer2[sizeof("YYYY-MM-DD HH:MM:SS.MMMUUUNNN")];
+ snprintf(Buffer2, sizeof(Buffer2), "%s.%.9u", Buffer1, this->nanoseconds());
+ return std::string(Buffer2);
}
TimeValue TimeValue::now() {
struct timeval the_time;
timerclear(&the_time);
- if (0 != ::gettimeofday(&the_time,0)) {
+ if (0 != ::gettimeofday(&the_time,nullptr)) {
// This is *really* unlikely to occur because the only gettimeofday
// errors concern the timezone parameter which we're passing in as 0.
// In the unlikely case it does happen, just return MinTime, no error
diff --git a/contrib/llvm/lib/Support/Unix/system_error.inc b/contrib/llvm/lib/Support/Unix/system_error.inc
deleted file mode 100644
index 681e919..0000000
--- a/contrib/llvm/lib/Support/Unix/system_error.inc
+++ /dev/null
@@ -1,34 +0,0 @@
-//===- llvm/Support/Unix/system_error.inc - Unix error_code ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides the Unix specific implementation of the error_code
-// and error_condition classes.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only generic UNIX code that
-//=== is guaranteed to work on *all* UNIX variants.
-//===----------------------------------------------------------------------===//
-
-using namespace llvm;
-
-std::string
-_system_error_category::message(int ev) const {
- return _do_message::message(ev);
-}
-
-error_condition
-_system_error_category::default_error_condition(int ev) const {
-#ifdef ELAST
- if (ev > ELAST)
- return error_condition(ev, system_category());
-#endif // ELAST
- return error_condition(ev, generic_category());
-}
diff --git a/contrib/llvm/lib/Support/Valgrind.cpp b/contrib/llvm/lib/Support/Valgrind.cpp
index 2b250a3..2c6d6aa 100644
--- a/contrib/llvm/lib/Support/Valgrind.cpp
+++ b/contrib/llvm/lib/Support/Valgrind.cpp
@@ -55,13 +55,21 @@ void llvm::sys::ValgrindDiscardTranslations(const void *Addr, size_t Len) {
#if LLVM_ENABLE_THREADS != 0 && !defined(NDEBUG)
// These functions require no implementation, tsan just looks at the arguments
-// they're called with.
+// they're called with. However, they are required to be weak as some other
+// application or library may already be providing these definitions for the
+// same reason we are.
extern "C" {
+LLVM_ATTRIBUTE_WEAK void AnnotateHappensAfter(const char *file, int line,
+ const volatile void *cv);
+void AnnotateHappensAfter(const char *file, int line, const volatile void *cv) {
+}
+LLVM_ATTRIBUTE_WEAK void AnnotateHappensBefore(const char *file, int line,
+ const volatile void *cv);
void AnnotateHappensBefore(const char *file, int line,
const volatile void *cv) {}
-void AnnotateHappensAfter(const char *file, int line,
- const volatile void *cv) {}
+LLVM_ATTRIBUTE_WEAK void AnnotateIgnoreWritesBegin(const char *file, int line);
void AnnotateIgnoreWritesBegin(const char *file, int line) {}
+LLVM_ATTRIBUTE_WEAK void AnnotateIgnoreWritesEnd(const char *file, int line);
void AnnotateIgnoreWritesEnd(const char *file, int line) {}
}
#endif
diff --git a/contrib/llvm/lib/Support/Windows/DynamicLibrary.inc b/contrib/llvm/lib/Support/Windows/DynamicLibrary.inc
index 5a7b219..5ed0b70 100644
--- a/contrib/llvm/lib/Support/Windows/DynamicLibrary.inc
+++ b/contrib/llvm/lib/Support/Windows/DynamicLibrary.inc
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Windows.h"
+#include "WindowsSupport.h"
#ifdef __MINGW32__
#include <imagehlp.h>
@@ -58,7 +58,7 @@ extern "C" {
stricmp(ModuleName, "msvcr70") != 0 &&
#ifndef __MINGW32__
// Mingw32 uses msvcrt.dll by default. Don't ignore it.
- // Otherwise, user should be aware, what he's doing :)
+ // Otherwise the user should be aware what they are doing.
stricmp(ModuleName, "msvcrt") != 0 &&
#endif
stricmp(ModuleName, "msvcrt20") != 0 &&
@@ -85,7 +85,7 @@ DynamicLibrary DynamicLibrary::getPermanentLibrary(const char *filename,
}
SmallVector<wchar_t, MAX_PATH> filenameUnicode;
- if (error_code ec = windows::UTF8ToUTF16(filename, filenameUnicode)) {
+ if (std::error_code ec = windows::UTF8ToUTF16(filename, filenameUnicode)) {
SetLastError(ec.value());
MakeErrMsg(errMsg, std::string(filename) + ": Can't convert to UTF-16: ");
return DynamicLibrary();
diff --git a/contrib/llvm/lib/Support/Windows/Host.inc b/contrib/llvm/lib/Support/Windows/Host.inc
index 2e6d6f1..0c02bf9 100644
--- a/contrib/llvm/lib/Support/Windows/Host.inc
+++ b/contrib/llvm/lib/Support/Windows/Host.inc
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "Windows.h"
+#include "WindowsSupport.h"
#include <cstdio>
#include <string>
using namespace llvm;
std::string sys::getDefaultTargetTriple() {
- return LLVM_DEFAULT_TARGET_TRIPLE;
+ return Triple::normalize(LLVM_DEFAULT_TARGET_TRIPLE);
}
diff --git a/contrib/llvm/lib/Support/Windows/Memory.inc b/contrib/llvm/lib/Support/Windows/Memory.inc
index 1260452..ae8371a 100644
--- a/contrib/llvm/lib/Support/Windows/Memory.inc
+++ b/contrib/llvm/lib/Support/Windows/Memory.inc
@@ -15,9 +15,10 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/WindowsError.h"
// The Windows.h header must be the last one included.
-#include "Windows.h"
+#include "WindowsSupport.h"
namespace {
@@ -69,8 +70,8 @@ namespace sys {
MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
const MemoryBlock *const NearBlock,
unsigned Flags,
- error_code &EC) {
- EC = error_code::success();
+ std::error_code &EC) {
+ EC = std::error_code();
if (NumBytes == 0)
return MemoryBlock();
@@ -99,7 +100,7 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
// Try again without the NearBlock hint
return allocateMappedMemory(NumBytes, NULL, Flags, EC);
}
- EC = error_code(::GetLastError(), system_category());
+ EC = mapWindowsError(::GetLastError());
return MemoryBlock();
}
@@ -113,34 +114,34 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
return Result;
}
-error_code Memory::releaseMappedMemory(MemoryBlock &M) {
+ std::error_code Memory::releaseMappedMemory(MemoryBlock &M) {
if (M.Address == 0 || M.Size == 0)
- return error_code::success();
+ return std::error_code();
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
- return error_code(::GetLastError(), system_category());
+ return mapWindowsError(::GetLastError());
M.Address = 0;
M.Size = 0;
- return error_code::success();
+ return std::error_code();
}
-error_code Memory::protectMappedMemory(const MemoryBlock &M,
+ std::error_code Memory::protectMappedMemory(const MemoryBlock &M,
unsigned Flags) {
if (M.Address == 0 || M.Size == 0)
- return error_code::success();
+ return std::error_code();
DWORD Protect = getWindowsProtectionFlags(Flags);
DWORD OldFlags;
if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
- return error_code(::GetLastError(), system_category());
+ return mapWindowsError(::GetLastError());
if (Flags & MF_EXEC)
Memory::InvalidateInstructionCache(M.Address, M.Size);
- return error_code::success();
+ return std::error_code();
}
/// InvalidateInstructionCache - Before the JIT can run a block of code
@@ -156,18 +157,18 @@ MemoryBlock Memory::AllocateRWX(size_t NumBytes,
const MemoryBlock *NearBlock,
std::string *ErrMsg) {
MemoryBlock MB;
- error_code EC;
+ std::error_code EC;
MB = allocateMappedMemory(NumBytes, NearBlock,
MF_READ|MF_WRITE|MF_EXEC, EC);
- if (EC != error_code::success() && ErrMsg) {
+ if (EC != std::error_code() && ErrMsg) {
MakeErrMsg(ErrMsg, EC.message());
}
return MB;
}
bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
- error_code EC = releaseMappedMemory(M);
- if (EC == error_code::success())
+ std::error_code EC = releaseMappedMemory(M);
+ if (EC == std::error_code())
return false;
MakeErrMsg(ErrMsg, EC.message());
return true;
diff --git a/contrib/llvm/lib/Support/Windows/Mutex.inc b/contrib/llvm/lib/Support/Windows/Mutex.inc
index 583dc63..ab79d07 100644
--- a/contrib/llvm/lib/Support/Windows/Mutex.inc
+++ b/contrib/llvm/lib/Support/Windows/Mutex.inc
@@ -16,7 +16,7 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Windows.h"
+#include "WindowsSupport.h"
#include "llvm/Support/Mutex.h"
namespace llvm {
diff --git a/contrib/llvm/lib/Support/Windows/Path.inc b/contrib/llvm/lib/Support/Windows/Path.inc
index 0b39198..7a1bc0447 100644
--- a/contrib/llvm/lib/Support/Windows/Path.inc
+++ b/contrib/llvm/lib/Support/Windows/Path.inc
@@ -17,12 +17,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
-#include "Windows.h"
+#include "llvm/Support/WindowsError.h"
#include <fcntl.h>
#include <io.h>
#include <sys/stat.h>
#include <sys/types.h>
+// These two headers must be included last, and make sure shlobj is required
+// after Windows.h to make sure it picks up our definition of _WIN32_WINNT
+#include "WindowsSupport.h"
+#include <shlobj.h>
+
#undef max
// MinGW doesn't define this.
@@ -40,175 +45,35 @@ using namespace llvm;
using llvm::sys::windows::UTF8ToUTF16;
using llvm::sys::windows::UTF16ToUTF8;
-namespace {
- typedef BOOLEAN (WINAPI *PtrCreateSymbolicLinkW)(
- /*__in*/ LPCWSTR lpSymlinkFileName,
- /*__in*/ LPCWSTR lpTargetFileName,
- /*__in*/ DWORD dwFlags);
-
- PtrCreateSymbolicLinkW create_symbolic_link_api =
- PtrCreateSymbolicLinkW(::GetProcAddress(
- ::GetModuleHandleW(L"Kernel32.dll"), "CreateSymbolicLinkW"));
-
- error_code TempDir(SmallVectorImpl<wchar_t> &result) {
- retry_temp_dir:
- DWORD len = ::GetTempPathW(result.capacity(), result.begin());
-
- if (len == 0)
- return windows_error(::GetLastError());
-
- if (len > result.capacity()) {
- result.reserve(len);
- goto retry_temp_dir;
- }
-
- result.set_size(len);
- return error_code::success();
- }
-
- bool is_separator(const wchar_t value) {
- switch (value) {
- case L'\\':
- case L'/':
- return true;
- default:
- return false;
- }
- }
+static std::error_code windows_error(DWORD E) {
+ return mapWindowsError(E);
}
-// FIXME: mode should be used here and default to user r/w only,
-// it currently comes in as a UNIX mode.
-static error_code createUniqueEntity(const Twine &model, int &result_fd,
- SmallVectorImpl<char> &result_path,
- bool makeAbsolute, unsigned mode,
- FSEntity Type) {
- // Use result_path as temp storage.
- result_path.set_size(0);
- StringRef m = model.toStringRef(result_path);
-
- SmallVector<wchar_t, 128> model_utf16;
- if (error_code ec = UTF8ToUTF16(m, model_utf16)) return ec;
-
- if (makeAbsolute) {
- // Make model absolute by prepending a temp directory if it's not already.
- bool absolute = sys::path::is_absolute(m);
-
- if (!absolute) {
- SmallVector<wchar_t, 64> temp_dir;
- if (error_code ec = TempDir(temp_dir)) return ec;
- // Handle c: by removing it.
- if (model_utf16.size() > 2 && model_utf16[1] == L':') {
- model_utf16.erase(model_utf16.begin(), model_utf16.begin() + 2);
- }
- model_utf16.insert(model_utf16.begin(), temp_dir.begin(), temp_dir.end());
- }
- }
+static std::error_code TempDir(SmallVectorImpl<char> &Result) {
+ SmallVector<wchar_t, 64> Res;
+retry_temp_dir:
+ DWORD Len = ::GetTempPathW(Res.capacity(), Res.begin());
- // Replace '%' with random chars. From here on, DO NOT modify model. It may be
- // needed if the randomly chosen path already exists.
- SmallVector<wchar_t, 128> random_path_utf16;
-
- // Get a Crypto Provider for CryptGenRandom.
- HCRYPTPROV HCPC;
- if (!::CryptAcquireContextW(&HCPC,
- NULL,
- NULL,
- PROV_RSA_FULL,
- CRYPT_VERIFYCONTEXT))
+ if (Len == 0)
return windows_error(::GetLastError());
- ScopedCryptContext CryptoProvider(HCPC);
-
-retry_random_path:
- random_path_utf16.set_size(0);
- for (SmallVectorImpl<wchar_t>::const_iterator i = model_utf16.begin(),
- e = model_utf16.end();
- i != e; ++i) {
- if (*i == L'%') {
- BYTE val = 0;
- if (!::CryptGenRandom(CryptoProvider, 1, &val))
- return windows_error(::GetLastError());
- random_path_utf16.push_back(L"0123456789abcdef"[val & 15]);
- }
- else
- random_path_utf16.push_back(*i);
- }
- // Make random_path_utf16 null terminated.
- random_path_utf16.push_back(0);
- random_path_utf16.pop_back();
-
- HANDLE TempFileHandle = INVALID_HANDLE_VALUE;
-
- switch (Type) {
- case FS_File: {
- // Try to create + open the path.
- TempFileHandle =
- ::CreateFileW(random_path_utf16.begin(), GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ, NULL,
- // Return ERROR_FILE_EXISTS if the file
- // already exists.
- CREATE_NEW, FILE_ATTRIBUTE_TEMPORARY, NULL);
- if (TempFileHandle == INVALID_HANDLE_VALUE) {
- // If the file existed, try again, otherwise, error.
- error_code ec = windows_error(::GetLastError());
- if (ec == windows_error::file_exists)
- goto retry_random_path;
-
- return ec;
- }
-
- // Convert the Windows API file handle into a C-runtime handle.
- int fd = ::_open_osfhandle(intptr_t(TempFileHandle), 0);
- if (fd == -1) {
- ::CloseHandle(TempFileHandle);
- ::DeleteFileW(random_path_utf16.begin());
- // MSDN doesn't say anything about _open_osfhandle setting errno or
- // GetLastError(), so just return invalid_handle.
- return windows_error::invalid_handle;
- }
- result_fd = fd;
- break;
+ if (Len > Res.capacity()) {
+ Res.reserve(Len);
+ goto retry_temp_dir;
}
- case FS_Name: {
- DWORD attributes = ::GetFileAttributesW(random_path_utf16.begin());
- if (attributes != INVALID_FILE_ATTRIBUTES)
- goto retry_random_path;
- error_code EC = make_error_code(windows_error(::GetLastError()));
- if (EC != windows_error::file_not_found &&
- EC != windows_error::path_not_found)
- return EC;
- break;
- }
-
- case FS_Dir:
- if (!::CreateDirectoryW(random_path_utf16.begin(), NULL)) {
- error_code EC = windows_error(::GetLastError());
- if (EC != windows_error::already_exists)
- return EC;
- goto retry_random_path;
- }
- break;
- }
+ Res.set_size(Len);
+ return UTF16ToUTF8(Res.begin(), Res.size(), Result);
+}
- // Set result_path to the utf-8 representation of the path.
- if (error_code ec = UTF16ToUTF8(random_path_utf16.begin(),
- random_path_utf16.size(), result_path)) {
- switch (Type) {
- case FS_File:
- ::CloseHandle(TempFileHandle);
- ::DeleteFileW(random_path_utf16.begin());
- case FS_Name:
- break;
- case FS_Dir:
- ::RemoveDirectoryW(random_path_utf16.begin());
- break;
- }
- return ec;
+static bool is_separator(const wchar_t value) {
+ switch (value) {
+ case L'\\':
+ case L'/':
+ return true;
+ default:
+ return false;
}
-
- return error_code::success();
}
namespace llvm {
@@ -259,7 +124,7 @@ TimeValue file_status::getLastModificationTime() const {
return Ret;
}
-error_code current_path(SmallVectorImpl<char> &result) {
+std::error_code current_path(SmallVectorImpl<char> &result) {
SmallVector<wchar_t, MAX_PATH> cur_path;
DWORD len = MAX_PATH;
@@ -281,50 +146,30 @@ error_code current_path(SmallVectorImpl<char> &result) {
return UTF16ToUTF8(cur_path.begin(), cur_path.size(), result);
}
-error_code create_directory(const Twine &path, bool &existed) {
+std::error_code create_directory(const Twine &path, bool IgnoreExisting) {
SmallString<128> path_storage;
SmallVector<wchar_t, 128> path_utf16;
- if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
- path_utf16))
+ if (std::error_code ec =
+ UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))
return ec;
if (!::CreateDirectoryW(path_utf16.begin(), NULL)) {
- error_code ec = windows_error(::GetLastError());
- if (ec == windows_error::already_exists)
- existed = true;
- else
- return ec;
- } else
- existed = false;
+ DWORD LastError = ::GetLastError();
+ if (LastError != ERROR_ALREADY_EXISTS || !IgnoreExisting)
+ return windows_error(LastError);
+ }
- return error_code::success();
+ return std::error_code();
}
-error_code create_hard_link(const Twine &to, const Twine &from) {
- // Get arguments.
- SmallString<128> from_storage;
- SmallString<128> to_storage;
- StringRef f = from.toStringRef(from_storage);
- StringRef t = to.toStringRef(to_storage);
-
- // Convert to utf-16.
- SmallVector<wchar_t, 128> wide_from;
- SmallVector<wchar_t, 128> wide_to;
- if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
- if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
-
- if (!::CreateHardLinkW(wide_from.begin(), wide_to.begin(), NULL))
- return windows_error(::GetLastError());
-
- return error_code::success();
+std::error_code normalize_separators(SmallVectorImpl<char> &Path) {
+ (void) Path;
+ return std::error_code();
}
-error_code create_symlink(const Twine &to, const Twine &from) {
- // Only do it if the function is available at runtime.
- if (!create_symbolic_link_api)
- return make_error_code(errc::function_not_supported);
-
+// We can't use symbolic links for windows.
+std::error_code create_link(const Twine &to, const Twine &from) {
// Get arguments.
SmallString<128> from_storage;
SmallString<128> to_storage;
@@ -334,56 +179,49 @@ error_code create_symlink(const Twine &to, const Twine &from) {
// Convert to utf-16.
SmallVector<wchar_t, 128> wide_from;
SmallVector<wchar_t, 128> wide_to;
- if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
- if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
+ if (std::error_code ec = UTF8ToUTF16(f, wide_from))
+ return ec;
+ if (std::error_code ec = UTF8ToUTF16(t, wide_to))
+ return ec;
- if (!create_symbolic_link_api(wide_from.begin(), wide_to.begin(), 0))
+ if (!::CreateHardLinkW(wide_from.begin(), wide_to.begin(), NULL))
return windows_error(::GetLastError());
- return error_code::success();
+ return std::error_code();
}
-error_code remove(const Twine &path, bool &existed) {
+std::error_code remove(const Twine &path, bool IgnoreNonExisting) {
SmallString<128> path_storage;
SmallVector<wchar_t, 128> path_utf16;
- file_status st;
- error_code EC = status(path, st);
- if (EC) {
- if (EC == windows_error::file_not_found ||
- EC == windows_error::path_not_found) {
- existed = false;
- return error_code::success();
- }
- return EC;
+ file_status ST;
+ if (std::error_code EC = status(path, ST)) {
+ if (EC != errc::no_such_file_or_directory || !IgnoreNonExisting)
+ return EC;
+ return std::error_code();
}
- if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
- path_utf16))
+ if (std::error_code ec =
+ UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))
return ec;
- if (st.type() == file_type::directory_file) {
+ if (ST.type() == file_type::directory_file) {
if (!::RemoveDirectoryW(c_str(path_utf16))) {
- error_code ec = windows_error(::GetLastError());
- if (ec != windows_error::file_not_found)
- return ec;
- existed = false;
- } else
- existed = true;
- } else {
- if (!::DeleteFileW(c_str(path_utf16))) {
- error_code ec = windows_error(::GetLastError());
- if (ec != windows_error::file_not_found)
- return ec;
- existed = false;
- } else
- existed = true;
+ std::error_code EC = windows_error(::GetLastError());
+ if (EC != errc::no_such_file_or_directory || !IgnoreNonExisting)
+ return EC;
+ }
+ return std::error_code();
}
-
- return error_code::success();
+ if (!::DeleteFileW(c_str(path_utf16))) {
+ std::error_code EC = windows_error(::GetLastError());
+ if (EC != errc::no_such_file_or_directory || !IgnoreNonExisting)
+ return EC;
+ }
+ return std::error_code();
}
-error_code rename(const Twine &from, const Twine &to) {
+std::error_code rename(const Twine &from, const Twine &to) {
// Get arguments.
SmallString<128> from_storage;
SmallString<128> to_storage;
@@ -393,16 +231,18 @@ error_code rename(const Twine &from, const Twine &to) {
// Convert to utf-16.
SmallVector<wchar_t, 128> wide_from;
SmallVector<wchar_t, 128> wide_to;
- if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
- if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
+ if (std::error_code ec = UTF8ToUTF16(f, wide_from))
+ return ec;
+ if (std::error_code ec = UTF8ToUTF16(t, wide_to))
+ return ec;
- error_code ec = error_code::success();
+ std::error_code ec = std::error_code();
for (int i = 0; i < 2000; i++) {
if (::MoveFileExW(wide_from.begin(), wide_to.begin(),
MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING))
- return error_code::success();
- ec = windows_error(::GetLastError());
- if (ec != windows_error::access_denied)
+ return std::error_code();
+ DWORD LastError = ::GetLastError();
+ if (LastError != ERROR_ACCESS_DENIED)
break;
// Retry MoveFile() at ACCESS_DENIED.
// System scanners (eg. indexer) might open the source file when
@@ -413,46 +253,46 @@ error_code rename(const Twine &from, const Twine &to) {
return ec;
}
-error_code resize_file(const Twine &path, uint64_t size) {
+std::error_code resize_file(const Twine &path, uint64_t size) {
SmallString<128> path_storage;
SmallVector<wchar_t, 128> path_utf16;
- if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
- path_utf16))
+ if (std::error_code ec =
+ UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))
return ec;
int fd = ::_wopen(path_utf16.begin(), O_BINARY | _O_RDWR, S_IWRITE);
if (fd == -1)
- return error_code(errno, generic_category());
+ return std::error_code(errno, std::generic_category());
#ifdef HAVE__CHSIZE_S
errno_t error = ::_chsize_s(fd, size);
#else
errno_t error = ::_chsize(fd, size);
#endif
::close(fd);
- return error_code(error, generic_category());
+ return std::error_code(error, std::generic_category());
}
-error_code exists(const Twine &path, bool &result) {
+std::error_code exists(const Twine &path, bool &result) {
SmallString<128> path_storage;
SmallVector<wchar_t, 128> path_utf16;
- if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
- path_utf16))
+ if (std::error_code ec =
+ UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))
return ec;
DWORD attributes = ::GetFileAttributesW(path_utf16.begin());
if (attributes == INVALID_FILE_ATTRIBUTES) {
// See if the file didn't actually exist.
- error_code ec = make_error_code(windows_error(::GetLastError()));
- if (ec != windows_error::file_not_found &&
- ec != windows_error::path_not_found)
- return ec;
+ DWORD LastError = ::GetLastError();
+ if (LastError != ERROR_FILE_NOT_FOUND &&
+ LastError != ERROR_PATH_NOT_FOUND)
+ return windows_error(LastError);
result = false;
} else
result = true;
- return error_code::success();
+ return std::error_code();
}
bool can_write(const Twine &Path) {
@@ -489,12 +329,14 @@ bool equivalent(file_status A, file_status B) {
A.VolumeSerialNumber == B.VolumeSerialNumber;
}
-error_code equivalent(const Twine &A, const Twine &B, bool &result) {
+std::error_code equivalent(const Twine &A, const Twine &B, bool &result) {
file_status fsA, fsB;
- if (error_code ec = status(A, fsA)) return ec;
- if (error_code ec = status(B, fsB)) return ec;
+ if (std::error_code ec = status(A, fsA))
+ return ec;
+ if (std::error_code ec = status(B, fsB))
+ return ec;
result = equivalent(fsA, fsB);
- return error_code::success();
+ return std::error_code();
}
static bool isReservedName(StringRef path) {
@@ -520,7 +362,7 @@ static bool isReservedName(StringRef path) {
return false;
}
-static error_code getStatus(HANDLE FileHandle, file_status &Result) {
+static std::error_code getStatus(HANDLE FileHandle, file_status &Result) {
if (FileHandle == INVALID_HANDLE_VALUE)
goto handle_status_error;
@@ -532,16 +374,16 @@ static error_code getStatus(HANDLE FileHandle, file_status &Result) {
if (Err != NO_ERROR)
return windows_error(Err);
Result = file_status(file_type::type_unknown);
- return error_code::success();
+ return std::error_code();
}
case FILE_TYPE_DISK:
break;
case FILE_TYPE_CHAR:
Result = file_status(file_type::character_file);
- return error_code::success();
+ return std::error_code();
case FILE_TYPE_PIPE:
Result = file_status(file_type::fifo_file);
- return error_code::success();
+ return std::error_code();
}
BY_HANDLE_FILE_INFORMATION Info;
@@ -557,32 +399,32 @@ static error_code getStatus(HANDLE FileHandle, file_status &Result) {
Info.ftLastWriteTime.dwLowDateTime,
Info.dwVolumeSerialNumber, Info.nFileSizeHigh,
Info.nFileSizeLow, Info.nFileIndexHigh, Info.nFileIndexLow);
- return error_code::success();
+ return std::error_code();
}
handle_status_error:
- error_code EC = windows_error(::GetLastError());
- if (EC == windows_error::file_not_found ||
- EC == windows_error::path_not_found)
+ DWORD LastError = ::GetLastError();
+ if (LastError == ERROR_FILE_NOT_FOUND ||
+ LastError == ERROR_PATH_NOT_FOUND)
Result = file_status(file_type::file_not_found);
- else if (EC == windows_error::sharing_violation)
+ else if (LastError == ERROR_SHARING_VIOLATION)
Result = file_status(file_type::type_unknown);
else
Result = file_status(file_type::status_error);
- return EC;
+ return windows_error(LastError);
}
-error_code status(const Twine &path, file_status &result) {
+std::error_code status(const Twine &path, file_status &result) {
SmallString<128> path_storage;
SmallVector<wchar_t, 128> path_utf16;
StringRef path8 = path.toStringRef(path_storage);
if (isReservedName(path8)) {
result = file_status(file_type::character_file);
- return error_code::success();
+ return std::error_code();
}
- if (error_code ec = UTF8ToUTF16(path8, path_utf16))
+ if (std::error_code ec = UTF8ToUTF16(path8, path_utf16))
return ec;
DWORD attr = ::GetFileAttributesW(path_utf16.begin());
@@ -613,12 +455,12 @@ error_code status(const Twine &path, file_status &result) {
return getStatus(h, result);
}
-error_code status(int FD, file_status &Result) {
+std::error_code status(int FD, file_status &Result) {
HANDLE FileHandle = reinterpret_cast<HANDLE>(_get_osfhandle(FD));
return getStatus(FileHandle, Result);
}
-error_code setLastModificationAndAccessTime(int FD, TimeValue Time) {
+std::error_code setLastModificationAndAccessTime(int FD, TimeValue Time) {
ULARGE_INTEGER UI;
UI.QuadPart = Time.toWin32Time();
FILETIME FT;
@@ -627,52 +469,10 @@ error_code setLastModificationAndAccessTime(int FD, TimeValue Time) {
HANDLE FileHandle = reinterpret_cast<HANDLE>(_get_osfhandle(FD));
if (!SetFileTime(FileHandle, NULL, &FT, &FT))
return windows_error(::GetLastError());
- return error_code::success();
-}
-
-error_code get_magic(const Twine &path, uint32_t len,
- SmallVectorImpl<char> &result) {
- SmallString<128> path_storage;
- SmallVector<wchar_t, 128> path_utf16;
- result.set_size(0);
-
- // Convert path to UTF-16.
- if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
- path_utf16))
- return ec;
-
- // Open file.
- HANDLE file = ::CreateFileW(c_str(path_utf16),
- GENERIC_READ,
- FILE_SHARE_READ,
- NULL,
- OPEN_EXISTING,
- FILE_ATTRIBUTE_READONLY,
- NULL);
- if (file == INVALID_HANDLE_VALUE)
- return windows_error(::GetLastError());
-
- // Allocate buffer.
- result.reserve(len);
-
- // Get magic!
- DWORD bytes_read = 0;
- BOOL read_success = ::ReadFile(file, result.data(), len, &bytes_read, NULL);
- error_code ec = windows_error(::GetLastError());
- ::CloseHandle(file);
- if (!read_success || (bytes_read != len)) {
- // Set result size to the number of bytes read if it's valid.
- if (bytes_read <= len)
- result.set_size(bytes_read);
- // ERROR_HANDLE_EOF is mapped to errc::value_too_large.
- return ec;
- }
-
- result.set_size(len);
- return error_code::success();
+ return std::error_code();
}
-error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
+std::error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
FileDescriptor = FD;
// Make sure that the requested size fits within SIZE_T.
if (Size > std::numeric_limits<SIZE_T>::max()) {
@@ -697,7 +497,7 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
(Offset + Size) & 0xffffffff,
0);
if (FileMappingHandle == NULL) {
- error_code ec = windows_error(GetLastError());
+ std::error_code ec = windows_error(GetLastError());
if (FileDescriptor) {
if (CloseFD)
_close(FileDescriptor);
@@ -718,7 +518,7 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
Offset & 0xffffffff,
Size);
if (Mapping == NULL) {
- error_code ec = windows_error(GetLastError());
+ std::error_code ec = windows_error(GetLastError());
::CloseHandle(FileMappingHandle);
if (FileDescriptor) {
if (CloseFD)
@@ -732,7 +532,7 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
MEMORY_BASIC_INFORMATION mbi;
SIZE_T Result = VirtualQuery(Mapping, &mbi, sizeof(mbi));
if (Result == 0) {
- error_code ec = windows_error(GetLastError());
+ std::error_code ec = windows_error(GetLastError());
::UnmapViewOfFile(Mapping);
::CloseHandle(FileMappingHandle);
if (FileDescriptor) {
@@ -753,14 +553,14 @@ error_code mapped_file_region::init(int FD, bool CloseFD, uint64_t Offset) {
_close(FileDescriptor); // Also closes FileHandle.
} else
::CloseHandle(FileHandle);
- return error_code::success();
+ return std::error_code();
}
mapped_file_region::mapped_file_region(const Twine &path,
mapmode mode,
uint64_t length,
uint64_t offset,
- error_code &ec)
+ std::error_code &ec)
: Mode(mode)
, Size(length)
, Mapping()
@@ -805,7 +605,7 @@ mapped_file_region::mapped_file_region(int fd,
mapmode mode,
uint64_t length,
uint64_t offset,
- error_code &ec)
+ std::error_code &ec)
: Mode(mode)
, Size(length)
, Mapping()
@@ -834,7 +634,6 @@ mapped_file_region::~mapped_file_region() {
::UnmapViewOfFile(Mapping);
}
-#if LLVM_HAS_RVALUE_REFERENCES
mapped_file_region::mapped_file_region(mapped_file_region &&other)
: Mode(other.Mode)
, Size(other.Size)
@@ -846,7 +645,6 @@ mapped_file_region::mapped_file_region(mapped_file_region &&other)
other.FileHandle = INVALID_HANDLE_VALUE;
other.FileDescriptor = 0;
}
-#endif
mapped_file_region::mapmode mapped_file_region::flags() const {
assert(Mapping && "Mapping failed but used anyway!");
@@ -859,7 +657,7 @@ uint64_t mapped_file_region::size() const {
}
char *mapped_file_region::data() const {
- assert(Mode != readonly && "Cannot get non const data for readonly mapping!");
+ assert(Mode != readonly && "Cannot get non-const data for readonly mapping!");
assert(Mapping && "Mapping failed but used anyway!");
return reinterpret_cast<char*>(Mapping);
}
@@ -875,12 +673,11 @@ int mapped_file_region::alignment() {
return SysInfo.dwAllocationGranularity;
}
-error_code detail::directory_iterator_construct(detail::DirIterState &it,
+std::error_code detail::directory_iterator_construct(detail::DirIterState &it,
StringRef path){
SmallVector<wchar_t, 128> path_utf16;
- if (error_code ec = UTF8ToUTF16(path,
- path_utf16))
+ if (std::error_code ec = UTF8ToUTF16(path, path_utf16))
return ec;
// Convert path to the format that Windows is happy with.
@@ -904,19 +701,19 @@ error_code detail::directory_iterator_construct(detail::DirIterState &it,
(FilenameLen == 2 && FirstFind.cFileName[0] == L'.' &&
FirstFind.cFileName[1] == L'.'))
if (!::FindNextFileW(FindHandle, &FirstFind)) {
- error_code ec = windows_error(::GetLastError());
+ DWORD LastError = ::GetLastError();
// Check for end.
- if (ec == windows_error::no_more_files)
+ if (LastError == ERROR_NO_MORE_FILES)
return detail::directory_iterator_destruct(it);
- return ec;
+ return windows_error(LastError);
} else
FilenameLen = ::wcslen(FirstFind.cFileName);
// Construct the current directory entry.
SmallString<128> directory_entry_name_utf8;
- if (error_code ec = UTF16ToUTF8(FirstFind.cFileName,
- ::wcslen(FirstFind.cFileName),
- directory_entry_name_utf8))
+ if (std::error_code ec =
+ UTF16ToUTF8(FirstFind.cFileName, ::wcslen(FirstFind.cFileName),
+ directory_entry_name_utf8))
return ec;
it.IterationHandle = intptr_t(FindHandle.take());
@@ -924,26 +721,26 @@ error_code detail::directory_iterator_construct(detail::DirIterState &it,
path::append(directory_entry_path, directory_entry_name_utf8.str());
it.CurrentEntry = directory_entry(directory_entry_path.str());
- return error_code::success();
+ return std::error_code();
}
-error_code detail::directory_iterator_destruct(detail::DirIterState &it) {
+std::error_code detail::directory_iterator_destruct(detail::DirIterState &it) {
if (it.IterationHandle != 0)
// Closes the handle if it's valid.
ScopedFindHandle close(HANDLE(it.IterationHandle));
it.IterationHandle = 0;
it.CurrentEntry = directory_entry();
- return error_code::success();
+ return std::error_code();
}
-error_code detail::directory_iterator_increment(detail::DirIterState &it) {
+std::error_code detail::directory_iterator_increment(detail::DirIterState &it) {
WIN32_FIND_DATAW FindData;
if (!::FindNextFileW(HANDLE(it.IterationHandle), &FindData)) {
- error_code ec = windows_error(::GetLastError());
+ DWORD LastError = ::GetLastError();
// Check for end.
- if (ec == windows_error::no_more_files)
+ if (LastError == ERROR_NO_MORE_FILES)
return detail::directory_iterator_destruct(it);
- return ec;
+ return windows_error(LastError);
}
size_t FilenameLen = ::wcslen(FindData.cFileName);
@@ -953,60 +750,50 @@ error_code detail::directory_iterator_increment(detail::DirIterState &it) {
return directory_iterator_increment(it);
SmallString<128> directory_entry_path_utf8;
- if (error_code ec = UTF16ToUTF8(FindData.cFileName,
- ::wcslen(FindData.cFileName),
- directory_entry_path_utf8))
+ if (std::error_code ec =
+ UTF16ToUTF8(FindData.cFileName, ::wcslen(FindData.cFileName),
+ directory_entry_path_utf8))
return ec;
it.CurrentEntry.replace_filename(Twine(directory_entry_path_utf8));
- return error_code::success();
-}
-
-error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
- bool map_writable, void *&result) {
- assert(0 && "NOT IMPLEMENTED");
- return windows_error::invalid_function;
+ return std::error_code();
}
-error_code unmap_file_pages(void *base, size_t size) {
- assert(0 && "NOT IMPLEMENTED");
- return windows_error::invalid_function;
-}
-
-error_code openFileForRead(const Twine &Name, int &ResultFD) {
+std::error_code openFileForRead(const Twine &Name, int &ResultFD) {
SmallString<128> PathStorage;
SmallVector<wchar_t, 128> PathUTF16;
- if (error_code EC = UTF8ToUTF16(Name.toStringRef(PathStorage),
- PathUTF16))
+ if (std::error_code EC =
+ UTF8ToUTF16(Name.toStringRef(PathStorage), PathUTF16))
return EC;
HANDLE H = ::CreateFileW(PathUTF16.begin(), GENERIC_READ,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (H == INVALID_HANDLE_VALUE) {
- error_code EC = windows_error(::GetLastError());
+ DWORD LastError = ::GetLastError();
+ std::error_code EC = windows_error(LastError);
// Provide a better error message when trying to open directories.
// This only runs if we failed to open the file, so there is probably
// no performances issues.
- if (EC != windows_error::access_denied)
+ if (LastError != ERROR_ACCESS_DENIED)
return EC;
if (is_directory(Name))
- return error_code(errc::is_a_directory, posix_category());
+ return make_error_code(errc::is_a_directory);
return EC;
}
int FD = ::_open_osfhandle(intptr_t(H), 0);
if (FD == -1) {
::CloseHandle(H);
- return windows_error::invalid_handle;
+ return windows_error(ERROR_INVALID_HANDLE);
}
ResultFD = FD;
- return error_code::success();
+ return std::error_code();
}
-error_code openFileForWrite(const Twine &Name, int &ResultFD,
+std::error_code openFileForWrite(const Twine &Name, int &ResultFD,
sys::fs::OpenFlags Flags, unsigned Mode) {
// Verify that we don't have both "append" and "excl".
assert((!(Flags & sys::fs::F_Excl) || !(Flags & sys::fs::F_Append)) &&
@@ -1015,8 +802,8 @@ error_code openFileForWrite(const Twine &Name, int &ResultFD,
SmallString<128> PathStorage;
SmallVector<wchar_t, 128> PathUTF16;
- if (error_code EC = UTF8ToUTF16(Name.toStringRef(PathStorage),
- PathUTF16))
+ if (std::error_code EC =
+ UTF8ToUTF16(Name.toStringRef(PathStorage), PathUTF16))
return EC;
DWORD CreationDisposition;
@@ -1027,19 +814,24 @@ error_code openFileForWrite(const Twine &Name, int &ResultFD,
else
CreationDisposition = CREATE_ALWAYS;
- HANDLE H = ::CreateFileW(PathUTF16.begin(), GENERIC_WRITE,
+ DWORD Access = GENERIC_WRITE;
+ if (Flags & F_RW)
+ Access |= GENERIC_READ;
+
+ HANDLE H = ::CreateFileW(PathUTF16.begin(), Access,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
CreationDisposition, FILE_ATTRIBUTE_NORMAL, NULL);
if (H == INVALID_HANDLE_VALUE) {
- error_code EC = windows_error(::GetLastError());
+ DWORD LastError = ::GetLastError();
+ std::error_code EC = windows_error(LastError);
// Provide a better error message when trying to open directories.
// This only runs if we failed to open the file, so there is probably
// no performances issues.
- if (EC != windows_error::access_denied)
+ if (LastError != ERROR_ACCESS_DENIED)
return EC;
if (is_directory(Name))
- return error_code(errc::is_a_directory, posix_category());
+ return make_error_code(errc::is_a_directory);
return EC;
}
@@ -1047,75 +839,89 @@ error_code openFileForWrite(const Twine &Name, int &ResultFD,
if (Flags & F_Append)
OpenFlags |= _O_APPEND;
- if (!(Flags & F_Binary))
+ if (Flags & F_Text)
OpenFlags |= _O_TEXT;
int FD = ::_open_osfhandle(intptr_t(H), OpenFlags);
if (FD == -1) {
::CloseHandle(H);
- return windows_error::invalid_handle;
+ return windows_error(ERROR_INVALID_HANDLE);
}
ResultFD = FD;
- return error_code::success();
+ return std::error_code();
}
} // end namespace fs
+namespace path {
+
+bool home_directory(SmallVectorImpl<char> &result) {
+ wchar_t Path[MAX_PATH];
+ if (::SHGetFolderPathW(0, CSIDL_APPDATA | CSIDL_FLAG_CREATE, 0,
+ /*SHGFP_TYPE_CURRENT*/0, Path) != S_OK)
+ return false;
+
+ if (UTF16ToUTF8(Path, ::wcslen(Path), result))
+ return false;
+
+ return true;
+}
+
+} // end namespace path
+
namespace windows {
-llvm::error_code UTF8ToUTF16(llvm::StringRef utf8,
- llvm::SmallVectorImpl<wchar_t> &utf16) {
- int len = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS,
- utf8.begin(), utf8.size(),
- utf16.begin(), 0);
+std::error_code UTF8ToUTF16(llvm::StringRef utf8,
+ llvm::SmallVectorImpl<wchar_t> &utf16) {
+ if (!utf8.empty()) {
+ int len = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, utf8.begin(),
+ utf8.size(), utf16.begin(), 0);
- if (len == 0)
- return llvm::windows_error(::GetLastError());
+ if (len == 0)
+ return windows_error(::GetLastError());
- utf16.reserve(len + 1);
- utf16.set_size(len);
+ utf16.reserve(len + 1);
+ utf16.set_size(len);
- len = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS,
- utf8.begin(), utf8.size(),
- utf16.begin(), utf16.size());
+ len = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, utf8.begin(),
+ utf8.size(), utf16.begin(), utf16.size());
- if (len == 0)
- return llvm::windows_error(::GetLastError());
+ if (len == 0)
+ return windows_error(::GetLastError());
+ }
// Make utf16 null terminated.
utf16.push_back(0);
utf16.pop_back();
- return llvm::error_code::success();
+ return std::error_code();
}
-llvm::error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
- llvm::SmallVectorImpl<char> &utf8) {
- // Get length.
- int len = ::WideCharToMultiByte(CP_UTF8, 0,
- utf16, utf16_len,
- utf8.begin(), 0,
- NULL, NULL);
+std::error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
+ llvm::SmallVectorImpl<char> &utf8) {
+ if (utf16_len) {
+ // Get length.
+ int len = ::WideCharToMultiByte(CP_UTF8, 0, utf16, utf16_len, utf8.begin(),
+ 0, NULL, NULL);
- if (len == 0)
- return llvm::windows_error(::GetLastError());
+ if (len == 0)
+ return windows_error(::GetLastError());
- utf8.reserve(len);
- utf8.set_size(len);
+ utf8.reserve(len);
+ utf8.set_size(len);
- // Now do the actual conversion.
- len = ::WideCharToMultiByte(CP_UTF8, 0,
- utf16, utf16_len,
- utf8.data(), utf8.size(),
- NULL, NULL);
+ // Now do the actual conversion.
+ len = ::WideCharToMultiByte(CP_UTF8, 0, utf16, utf16_len, utf8.data(),
+ utf8.size(), NULL, NULL);
- if (len == 0)
- return llvm::windows_error(::GetLastError());
+ if (len == 0)
+ return windows_error(::GetLastError());
+ }
// Make utf8 null terminated.
utf8.push_back(0);
utf8.pop_back();
- return llvm::error_code::success();
+ return std::error_code();
}
} // end namespace windows
} // end namespace sys
diff --git a/contrib/llvm/lib/Support/Windows/Process.inc b/contrib/llvm/lib/Support/Windows/Process.inc
index f9a3db9..61749a7 100644
--- a/contrib/llvm/lib/Support/Windows/Process.inc
+++ b/contrib/llvm/lib/Support/Windows/Process.inc
@@ -12,11 +12,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/WindowsError.h"
+#include <malloc.h>
+
+// The Windows.h header must be after LLVM and standard headers.
+#include "WindowsSupport.h"
-#include "Windows.h"
#include <direct.h>
#include <io.h>
-#include <malloc.h>
#include <psapi.h>
#include <shellapi.h>
@@ -45,7 +49,6 @@
using namespace llvm;
using namespace sys;
-
process::id_type self_process::get_id() {
return GetCurrentProcessId();
}
@@ -80,16 +83,14 @@ TimeValue self_process::get_system_time() const {
return getTimeValueFromFILETIME(KernelTime);
}
-// This function retrieves the page size using GetSystemInfo and is present
-// solely so it can be called once to initialize the self_process member below.
+// This function retrieves the page size using GetNativeSystemInfo() and is
+// present solely so it can be called once to initialize the self_process member
+// below.
static unsigned getPageSize() {
- // NOTE: A 32-bit application running under WOW64 is supposed to use
- // GetNativeSystemInfo. However, this interface is not present prior
- // to Windows XP so to use it requires dynamic linking. It is not clear
- // how this affects the reported page size, if at all. One could argue
- // that LLVM ought to run as 64-bits on a 64-bit system, anyway.
+ // GetNativeSystemInfo() provides the physical page size which may differ
+ // from GetSystemInfo() in 32-bit applications running under WOW64.
SYSTEM_INFO info;
- GetSystemInfo(&info);
+ GetNativeSystemInfo(&info);
// FIXME: FileOffset in MapViewOfFile() should be aligned to not dwPageSize,
// but dwAllocationGranularity.
return static_cast<unsigned>(info.dwPageSize);
@@ -152,7 +153,7 @@ void Process::PreventCoreFiles() {
Optional<std::string> Process::GetEnv(StringRef Name) {
// Convert the argument to UTF-16 to pass it to _wgetenv().
SmallVector<wchar_t, 128> NameUTF16;
- if (error_code ec = windows::UTF8ToUTF16(Name, NameUTF16))
+ if (windows::UTF8ToUTF16(Name, NameUTF16))
return None;
// Environment variable can be encoded in non-UTF8 encoding, and there's no
@@ -173,42 +174,103 @@ Optional<std::string> Process::GetEnv(StringRef Name) {
// Convert the result from UTF-16 to UTF-8.
SmallVector<char, MAX_PATH> Res;
- if (error_code ec = windows::UTF16ToUTF8(Buf.data(), Size, Res))
+ if (windows::UTF16ToUTF8(Buf.data(), Size, Res))
return None;
return std::string(Res.data());
}
-error_code
+static std::error_code windows_error(DWORD E) {
+ return mapWindowsError(E);
+}
+
+static void AllocateAndPush(const SmallVectorImpl<char> &S,
+ SmallVectorImpl<const char *> &Vector,
+ SpecificBumpPtrAllocator<char> &Allocator) {
+ char *Buffer = Allocator.Allocate(S.size() + 1);
+ ::memcpy(Buffer, S.data(), S.size());
+ Buffer[S.size()] = '\0';
+ Vector.push_back(Buffer);
+}
+
+/// Convert Arg from UTF-16 to UTF-8 and push it onto Args.
+static std::error_code
+ConvertAndPushArg(const wchar_t *Arg, SmallVectorImpl<const char *> &Args,
+ SpecificBumpPtrAllocator<char> &Allocator) {
+ SmallVector<char, MAX_PATH> ArgString;
+ if (std::error_code ec = windows::UTF16ToUTF8(Arg, wcslen(Arg), ArgString))
+ return ec;
+ AllocateAndPush(ArgString, Args, Allocator);
+ return std::error_code();
+}
+
+/// \brief Perform wildcard expansion of Arg, or just push it into Args if it
+/// doesn't have wildcards or doesn't match any files.
+static std::error_code
+WildcardExpand(const wchar_t *Arg, SmallVectorImpl<const char *> &Args,
+ SpecificBumpPtrAllocator<char> &Allocator) {
+ if (!wcspbrk(Arg, L"*?")) {
+ // Arg does not contain any wildcard characters. This is the common case.
+ return ConvertAndPushArg(Arg, Args, Allocator);
+ }
+
+ if (wcscmp(Arg, L"/?") == 0 || wcscmp(Arg, L"-?") == 0) {
+ // Don't wildcard expand /?. Always treat it as an option.
+ return ConvertAndPushArg(Arg, Args, Allocator);
+ }
+
+ // Extract any directory part of the argument.
+ SmallVector<char, MAX_PATH> Dir;
+ if (std::error_code ec = windows::UTF16ToUTF8(Arg, wcslen(Arg), Dir))
+ return ec;
+ sys::path::remove_filename(Dir);
+ const int DirSize = Dir.size();
+
+ // Search for matching files.
+ WIN32_FIND_DATAW FileData;
+ HANDLE FindHandle = FindFirstFileW(Arg, &FileData);
+ if (FindHandle == INVALID_HANDLE_VALUE) {
+ return ConvertAndPushArg(Arg, Args, Allocator);
+ }
+
+ std::error_code ec;
+ do {
+ SmallVector<char, MAX_PATH> FileName;
+ ec = windows::UTF16ToUTF8(FileData.cFileName, wcslen(FileData.cFileName),
+ FileName);
+ if (ec)
+ break;
+
+ // Push the filename onto Dir, and remove it afterwards.
+ llvm::sys::path::append(Dir, StringRef(FileName.data(), FileName.size()));
+ AllocateAndPush(Dir, Args, Allocator);
+ Dir.resize(DirSize);
+ } while (FindNextFileW(FindHandle, &FileData));
+
+ FindClose(FindHandle);
+ return ec;
+}
+
+std::error_code
Process::GetArgumentVector(SmallVectorImpl<const char *> &Args,
ArrayRef<const char *>,
SpecificBumpPtrAllocator<char> &ArgAllocator) {
- int NewArgCount;
- error_code ec;
-
- wchar_t **UnicodeCommandLine = CommandLineToArgvW(GetCommandLineW(),
- &NewArgCount);
+ int ArgCount;
+ wchar_t **UnicodeCommandLine =
+ CommandLineToArgvW(GetCommandLineW(), &ArgCount);
if (!UnicodeCommandLine)
return windows_error(::GetLastError());
- Args.reserve(NewArgCount);
+ Args.reserve(ArgCount);
+ std::error_code ec;
- for (int i = 0; i < NewArgCount; ++i) {
- SmallVector<char, MAX_PATH> NewArgString;
- ec = windows::UTF16ToUTF8(UnicodeCommandLine[i],
- wcslen(UnicodeCommandLine[i]),
- NewArgString);
+ for (int i = 0; i < ArgCount; ++i) {
+ ec = WildcardExpand(UnicodeCommandLine[i], Args, ArgAllocator);
if (ec)
break;
-
- char *Buffer = ArgAllocator.Allocate(NewArgString.size() + 1);
- ::memcpy(Buffer, NewArgString.data(), NewArgString.size() + 1);
- Args.push_back(Buffer);
}
- LocalFree(UnicodeCommandLine);
- if (ec)
- return ec;
- return error_code::success();
+ LocalFree(UnicodeCommandLine);
+ return ec;
}
bool Process::StandardInIsUserInput() {
@@ -358,3 +420,17 @@ const char *Process::ResetColor() {
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), defaultColors());
return 0;
}
+
+unsigned Process::GetRandomNumber() {
+ HCRYPTPROV HCPC;
+ if (!::CryptAcquireContextW(&HCPC, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT))
+ report_fatal_error("Could not acquire a cryptographic context");
+
+ ScopedCryptContext CryptoProvider(HCPC);
+ unsigned Ret;
+ if (!::CryptGenRandom(CryptoProvider, sizeof(Ret),
+ reinterpret_cast<BYTE *>(&Ret)))
+ report_fatal_error("Could not generate a random number");
+ return Ret;
+}
diff --git a/contrib/llvm/lib/Support/Windows/Program.inc b/contrib/llvm/lib/Support/Windows/Program.inc
index dc09738..b2f71ae 100644
--- a/contrib/llvm/lib/Support/Windows/Program.inc
+++ b/contrib/llvm/lib/Support/Windows/Program.inc
@@ -11,8 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Windows.h"
-#include "llvm/ADT/OwningPtr.h"
+#include "WindowsSupport.h"
#include "llvm/Support/FileSystem.h"
#include <cstdio>
#include <fcntl.h>
@@ -187,7 +186,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
}
// Now build the command line.
- OwningArrayPtr<char> command(new char[len+1]);
+ std::unique_ptr<char[]> command(new char[len+1]);
char *p = command.get();
for (unsigned i = 0; args[i]; i++) {
@@ -227,7 +226,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
// an environment block by concatenating them.
for (unsigned i = 0; envp[i]; ++i) {
SmallVector<wchar_t, MAX_PATH> EnvString;
- if (error_code ec = windows::UTF8ToUTF16(envp[i], EnvString)) {
+ if (std::error_code ec = windows::UTF8ToUTF16(envp[i], EnvString)) {
SetLastError(ec.value());
MakeErrMsg(ErrMsg, "Unable to convert environment variable to UTF-16");
return false;
@@ -291,7 +290,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
fflush(stderr);
SmallVector<wchar_t, MAX_PATH> ProgramUtf16;
- if (error_code ec = windows::UTF8ToUTF16(Program, ProgramUtf16)) {
+ if (std::error_code ec = windows::UTF8ToUTF16(Program, ProgramUtf16)) {
SetLastError(ec.value());
MakeErrMsg(ErrMsg,
std::string("Unable to convert application name to UTF-16"));
@@ -299,7 +298,7 @@ static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
}
SmallVector<wchar_t, MAX_PATH> CommandUtf16;
- if (error_code ec = windows::UTF8ToUTF16(command.get(), CommandUtf16)) {
+ if (std::error_code ec = windows::UTF8ToUTF16(command.get(), CommandUtf16)) {
SetLastError(ec.value());
MakeErrMsg(ErrMsg,
std::string("Unable to convert command-line to UTF-16"));
@@ -423,25 +422,18 @@ ProcessInfo sys::Wait(const ProcessInfo &PI, unsigned SecondsToWait,
return WaitResult;
}
-error_code sys::ChangeStdinToBinary(){
+ std::error_code sys::ChangeStdinToBinary(){
int result = _setmode( _fileno(stdin), _O_BINARY );
if (result == -1)
- return error_code(errno, generic_category());
- return make_error_code(errc::success);
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
}
-error_code sys::ChangeStdoutToBinary(){
+ std::error_code sys::ChangeStdoutToBinary(){
int result = _setmode( _fileno(stdout), _O_BINARY );
if (result == -1)
- return error_code(errno, generic_category());
- return make_error_code(errc::success);
-}
-
-error_code sys::ChangeStderrToBinary(){
- int result = _setmode( _fileno(stderr), _O_BINARY );
- if (result == -1)
- return error_code(errno, generic_category());
- return make_error_code(errc::success);
+ return std::error_code(errno, std::generic_category());
+ return std::error_code();
}
bool llvm::sys::argumentsFitWithinSystemLimits(ArrayRef<const char*> Args) {
diff --git a/contrib/llvm/lib/Support/Windows/RWMutex.inc b/contrib/llvm/lib/Support/Windows/RWMutex.inc
index c431844..00d0e93 100644
--- a/contrib/llvm/lib/Support/Windows/RWMutex.inc
+++ b/contrib/llvm/lib/Support/Windows/RWMutex.inc
@@ -16,7 +16,7 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Windows.h"
+#include "WindowsSupport.h"
namespace llvm {
using namespace sys;
diff --git a/contrib/llvm/lib/Support/Windows/Signals.inc b/contrib/llvm/lib/Support/Windows/Signals.inc
index 4b40d51..35ba6f8 100644
--- a/contrib/llvm/lib/Support/Windows/Signals.inc
+++ b/contrib/llvm/lib/Support/Windows/Signals.inc
@@ -12,12 +12,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/FileSystem.h"
-
-#include "Windows.h"
#include <algorithm>
#include <stdio.h>
#include <vector>
+// The Windows.h header must be after LLVM and standard headers.
+#include "WindowsSupport.h"
+
#ifdef __MINGW32__
#include <imagehlp.h>
#else
@@ -317,8 +318,7 @@ static void Cleanup() {
if (FilesToRemove != NULL)
while (!FilesToRemove->empty()) {
- bool Existed;
- llvm::sys::fs::remove(FilesToRemove->back(), Existed);
+ llvm::sys::fs::remove(FilesToRemove->back());
FilesToRemove->pop_back();
}
diff --git a/contrib/llvm/lib/Support/Windows/ThreadLocal.inc b/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
index 057deb3..14ce619 100644
--- a/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
+++ b/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
@@ -16,14 +16,14 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Windows.h"
+#include "WindowsSupport.h"
#include "llvm/Support/ThreadLocal.h"
namespace llvm {
using namespace sys;
ThreadLocalImpl::ThreadLocalImpl() : data() {
- typedef int SIZE_TOO_BIG[sizeof(DWORD) <= sizeof(data) ? 1 : -1];
+ static_assert(sizeof(DWORD) <= sizeof(data), "size too big");
DWORD* tls = reinterpret_cast<DWORD*>(&data);
*tls = TlsAlloc();
assert(*tls != TLS_OUT_OF_INDEXES);
diff --git a/contrib/llvm/lib/Support/Windows/TimeValue.inc b/contrib/llvm/lib/Support/Windows/TimeValue.inc
index 98b07d6..0223ab4 100644
--- a/contrib/llvm/lib/Support/Windows/TimeValue.inc
+++ b/contrib/llvm/lib/Support/Windows/TimeValue.inc
@@ -11,7 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "Windows.h"
+#include "WindowsSupport.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
#include <cctype>
#include <time.h>
@@ -32,6 +34,7 @@ TimeValue TimeValue::now() {
}
std::string TimeValue::str() const {
+ std::string S;
struct tm *LT;
#ifdef __MINGW32__
// Old versions of mingw don't have _localtime64_s. Remove this once we drop support
@@ -47,13 +50,11 @@ std::string TimeValue::str() const {
LT = &Storage;
#endif
- char Buffer[25];
- // FIXME: the windows version of strftime doesn't support %e
- strftime(Buffer, 25, "%b %d %H:%M %Y", LT);
- assert((Buffer[3] == ' ' && isdigit(Buffer[5]) && Buffer[6] == ' ') &&
- "Unexpected format in strftime()!");
- // Emulate %e on %d to mute '0'.
- if (Buffer[4] == '0')
- Buffer[4] = ' ';
- return std::string(Buffer);
+ char Buffer[sizeof("YYYY-MM-DD HH:MM:SS")];
+ strftime(Buffer, sizeof(Buffer), "%Y-%m-%d %H:%M:%S", LT);
+ raw_string_ostream OS(S);
+ OS << format("%s.%.9u", static_cast<const char *>(Buffer),
+ this->nanoseconds());
+ OS.flush();
+ return S;
}
diff --git a/contrib/llvm/lib/Support/Windows/Windows.h b/contrib/llvm/lib/Support/Windows/WindowsSupport.h
index 1f3417d..f68835b 100644
--- a/contrib/llvm/lib/Support/Windows/Windows.h
+++ b/contrib/llvm/lib/Support/Windows/WindowsSupport.h
@@ -1,4 +1,4 @@
-//===- Win32/Win32.h - Common Win32 Include File ----------------*- C++ -*-===//
+//===- WindowsSupport.h - Common Windows Include File -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,10 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines things specific to Win32 implementations.
+// This file defines things specific to Windows implementations. In addition to
+// providing some helpers for working with win32 APIs, this header wraps
+// <windows.h> with some portability macros. Always include WindowsSupport.h
+// instead of including <windows.h> directly.
//
//===----------------------------------------------------------------------===//
@@ -18,6 +21,7 @@
// mingw-w64 tends to define it as 0x0502 in its headers.
#undef _WIN32_WINNT
+#undef _WIN32_IE
// Require at least Windows XP(5.1) API.
#define _WIN32_WINNT 0x0501
@@ -28,7 +32,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/config.h" // Get build system configuration settings
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/system_error.h"
+#include <system_error>
#include <windows.h>
#include <wincrypt.h>
#include <cassert>
@@ -159,10 +163,9 @@ c_str(SmallVectorImpl<T> &str) {
namespace sys {
namespace windows {
-error_code UTF8ToUTF16(StringRef utf8,
- SmallVectorImpl<wchar_t> &utf16);
-error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
- SmallVectorImpl<char> &utf8);
+std::error_code UTF8ToUTF16(StringRef utf8, SmallVectorImpl<wchar_t> &utf16);
+std::error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
+ SmallVectorImpl<char> &utf8);
} // end namespace windows
} // end namespace sys
} // end namespace llvm.
diff --git a/contrib/llvm/lib/Support/Windows/system_error.inc b/contrib/llvm/lib/Support/Windows/system_error.inc
deleted file mode 100644
index 37ec81d..0000000
--- a/contrib/llvm/lib/Support/Windows/system_error.inc
+++ /dev/null
@@ -1,142 +0,0 @@
-//===- llvm/Support/Win32/system_error.inc - Windows error_code --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides the Windows specific implementation of the error_code
-// and error_condition classes.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only generic Windows code that
-//=== is guaranteed to work on *all* Windows variants.
-//===----------------------------------------------------------------------===//
-
-#include <windows.h>
-#include <winerror.h>
-
-using namespace llvm;
-
-std::string
-_system_error_category::message(int ev) const {
- LPVOID lpMsgBuf = 0;
- DWORD retval = ::FormatMessageA(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- ev,
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
- (LPSTR) &lpMsgBuf,
- 0,
- NULL);
- if (retval == 0) {
- ::LocalFree(lpMsgBuf);
- return std::string("Unknown error");
- }
-
- std::string str( static_cast<LPCSTR>(lpMsgBuf) );
- ::LocalFree(lpMsgBuf);
-
- while (str.size()
- && (str[str.size()-1] == '\n' || str[str.size()-1] == '\r'))
- str.erase( str.size()-1 );
- if (str.size() && str[str.size()-1] == '.')
- str.erase( str.size()-1 );
- return str;
-}
-
-// I'd rather not double the line count of the following.
-#define MAP_ERR_TO_COND(x, y) case x: return make_error_condition(errc::y)
-
-error_condition
-_system_error_category::default_error_condition(int ev) const {
- switch (ev) {
- MAP_ERR_TO_COND(0, success);
- // Windows system -> posix_errno decode table ---------------------------//
- // see WinError.h comments for descriptions of errors
- MAP_ERR_TO_COND(ERROR_ACCESS_DENIED, permission_denied);
- MAP_ERR_TO_COND(ERROR_ALREADY_EXISTS, file_exists);
- MAP_ERR_TO_COND(ERROR_BAD_UNIT, no_such_device);
- MAP_ERR_TO_COND(ERROR_BUFFER_OVERFLOW, filename_too_long);
- MAP_ERR_TO_COND(ERROR_BUSY, device_or_resource_busy);
- MAP_ERR_TO_COND(ERROR_BUSY_DRIVE, device_or_resource_busy);
- MAP_ERR_TO_COND(ERROR_CANNOT_MAKE, permission_denied);
- MAP_ERR_TO_COND(ERROR_CANTOPEN, io_error);
- MAP_ERR_TO_COND(ERROR_CANTREAD, io_error);
- MAP_ERR_TO_COND(ERROR_CANTWRITE, io_error);
- MAP_ERR_TO_COND(ERROR_CURRENT_DIRECTORY, permission_denied);
- MAP_ERR_TO_COND(ERROR_DEV_NOT_EXIST, no_such_device);
- MAP_ERR_TO_COND(ERROR_DEVICE_IN_USE, device_or_resource_busy);
- MAP_ERR_TO_COND(ERROR_DIR_NOT_EMPTY, directory_not_empty);
- MAP_ERR_TO_COND(ERROR_DIRECTORY, invalid_argument);
- MAP_ERR_TO_COND(ERROR_DISK_FULL, no_space_on_device);
- MAP_ERR_TO_COND(ERROR_FILE_EXISTS, file_exists);
- MAP_ERR_TO_COND(ERROR_FILE_NOT_FOUND, no_such_file_or_directory);
- MAP_ERR_TO_COND(ERROR_HANDLE_DISK_FULL, no_space_on_device);
- MAP_ERR_TO_COND(ERROR_HANDLE_EOF, value_too_large);
- MAP_ERR_TO_COND(ERROR_INVALID_ACCESS, permission_denied);
- MAP_ERR_TO_COND(ERROR_INVALID_DRIVE, no_such_device);
- MAP_ERR_TO_COND(ERROR_INVALID_FUNCTION, function_not_supported);
- MAP_ERR_TO_COND(ERROR_INVALID_HANDLE, invalid_argument);
- MAP_ERR_TO_COND(ERROR_INVALID_NAME, invalid_argument);
- MAP_ERR_TO_COND(ERROR_LOCK_VIOLATION, no_lock_available);
- MAP_ERR_TO_COND(ERROR_LOCKED, no_lock_available);
- MAP_ERR_TO_COND(ERROR_NEGATIVE_SEEK, invalid_argument);
- MAP_ERR_TO_COND(ERROR_NOACCESS, permission_denied);
- MAP_ERR_TO_COND(ERROR_NOT_ENOUGH_MEMORY, not_enough_memory);
- MAP_ERR_TO_COND(ERROR_NOT_READY, resource_unavailable_try_again);
- MAP_ERR_TO_COND(ERROR_NOT_SAME_DEVICE, cross_device_link);
- MAP_ERR_TO_COND(ERROR_OPEN_FAILED, io_error);
- MAP_ERR_TO_COND(ERROR_OPEN_FILES, device_or_resource_busy);
- MAP_ERR_TO_COND(ERROR_OPERATION_ABORTED, operation_canceled);
- MAP_ERR_TO_COND(ERROR_OUTOFMEMORY, not_enough_memory);
- MAP_ERR_TO_COND(ERROR_PATH_NOT_FOUND, no_such_file_or_directory);
- MAP_ERR_TO_COND(ERROR_BAD_NETPATH, no_such_file_or_directory);
- MAP_ERR_TO_COND(ERROR_READ_FAULT, io_error);
- MAP_ERR_TO_COND(ERROR_RETRY, resource_unavailable_try_again);
- MAP_ERR_TO_COND(ERROR_SEEK, io_error);
- MAP_ERR_TO_COND(ERROR_SHARING_VIOLATION, permission_denied);
- MAP_ERR_TO_COND(ERROR_TOO_MANY_OPEN_FILES, too_many_files_open);
- MAP_ERR_TO_COND(ERROR_WRITE_FAULT, io_error);
- MAP_ERR_TO_COND(ERROR_WRITE_PROTECT, permission_denied);
- MAP_ERR_TO_COND(ERROR_SEM_TIMEOUT, timed_out);
- MAP_ERR_TO_COND(WSAEACCES, permission_denied);
- MAP_ERR_TO_COND(WSAEADDRINUSE, address_in_use);
- MAP_ERR_TO_COND(WSAEADDRNOTAVAIL, address_not_available);
- MAP_ERR_TO_COND(WSAEAFNOSUPPORT, address_family_not_supported);
- MAP_ERR_TO_COND(WSAEALREADY, connection_already_in_progress);
- MAP_ERR_TO_COND(WSAEBADF, bad_file_descriptor);
- MAP_ERR_TO_COND(WSAECONNABORTED, connection_aborted);
- MAP_ERR_TO_COND(WSAECONNREFUSED, connection_refused);
- MAP_ERR_TO_COND(WSAECONNRESET, connection_reset);
- MAP_ERR_TO_COND(WSAEDESTADDRREQ, destination_address_required);
- MAP_ERR_TO_COND(WSAEFAULT, bad_address);
- MAP_ERR_TO_COND(WSAEHOSTUNREACH, host_unreachable);
- MAP_ERR_TO_COND(WSAEINPROGRESS, operation_in_progress);
- MAP_ERR_TO_COND(WSAEINTR, interrupted);
- MAP_ERR_TO_COND(WSAEINVAL, invalid_argument);
- MAP_ERR_TO_COND(WSAEISCONN, already_connected);
- MAP_ERR_TO_COND(WSAEMFILE, too_many_files_open);
- MAP_ERR_TO_COND(WSAEMSGSIZE, message_size);
- MAP_ERR_TO_COND(WSAENAMETOOLONG, filename_too_long);
- MAP_ERR_TO_COND(WSAENETDOWN, network_down);
- MAP_ERR_TO_COND(WSAENETRESET, network_reset);
- MAP_ERR_TO_COND(WSAENETUNREACH, network_unreachable);
- MAP_ERR_TO_COND(WSAENOBUFS, no_buffer_space);
- MAP_ERR_TO_COND(WSAENOPROTOOPT, no_protocol_option);
- MAP_ERR_TO_COND(WSAENOTCONN, not_connected);
- MAP_ERR_TO_COND(WSAENOTSOCK, not_a_socket);
- MAP_ERR_TO_COND(WSAEOPNOTSUPP, operation_not_supported);
- MAP_ERR_TO_COND(WSAEPROTONOSUPPORT, protocol_not_supported);
- MAP_ERR_TO_COND(WSAEPROTOTYPE, wrong_protocol_type);
- MAP_ERR_TO_COND(WSAETIMEDOUT, timed_out);
- MAP_ERR_TO_COND(WSAEWOULDBLOCK, operation_would_block);
- default: return error_condition(ev, system_category());
- }
-}
diff --git a/contrib/llvm/lib/Support/YAMLParser.cpp b/contrib/llvm/lib/Support/YAMLParser.cpp
index 9495cd4..3be02ee 100644
--- a/contrib/llvm/lib/Support/YAMLParser.cpp
+++ b/contrib/llvm/lib/Support/YAMLParser.cpp
@@ -378,9 +378,6 @@ private:
/// sequence of ns-uri-char.
StringRef scan_ns_uri_char();
- /// @brief Scan ns-plain-one-line[133] starting at \a Cur.
- StringRef scan_ns_plain_one_line();
-
/// @brief Consume a minimal well-formed code unit subsequence starting at
/// \a Cur. Return false if it is not the same Unicode scalar value as
/// \a Expected. This updates \a Column.
@@ -873,42 +870,6 @@ StringRef Scanner::scan_ns_uri_char() {
return StringRef(Start, Current - Start);
}
-StringRef Scanner::scan_ns_plain_one_line() {
- StringRef::iterator start = Current;
- // The first character must already be verified.
- ++Current;
- while (true) {
- if (Current == End) {
- break;
- } else if (*Current == ':') {
- // Check if the next character is a ns-char.
- if (Current + 1 == End)
- break;
- StringRef::iterator i = skip_ns_char(Current + 1);
- if (Current + 1 != i) {
- Current = i;
- Column += 2; // Consume both the ':' and ns-char.
- } else
- break;
- } else if (*Current == '#') {
- // Check if the previous character was a ns-char.
- // The & 0x80 check is to check for the trailing byte of a utf-8
- if (*(Current - 1) & 0x80 || skip_ns_char(Current - 1) == Current) {
- ++Current;
- ++Column;
- } else
- break;
- } else {
- StringRef::iterator i = skip_nb_char(Current);
- if (i == Current)
- break;
- Current = i;
- ++Column;
- }
- }
- return StringRef(start, Current - start);
-}
-
bool Scanner::consume(uint32_t Expected) {
if (Expected >= 0x80)
report_fatal_error("Not dealing with this yet");
@@ -1561,12 +1522,10 @@ bool Scanner::fetchMoreTokens() {
}
Stream::Stream(StringRef Input, SourceMgr &SM)
- : scanner(new Scanner(Input, SM))
- , CurrentDoc(0) {}
+ : scanner(new Scanner(Input, SM)), CurrentDoc() {}
Stream::Stream(MemoryBuffer *InputBuffer, SourceMgr &SM)
- : scanner(new Scanner(InputBuffer, SM))
- , CurrentDoc(0) {}
+ : scanner(new Scanner(InputBuffer, SM)), CurrentDoc() {}
Stream::~Stream() {}
@@ -1601,11 +1560,9 @@ void Stream::skip() {
i->skip();
}
-Node::Node(unsigned int Type, OwningPtr<Document> &D, StringRef A, StringRef T)
- : Doc(D)
- , TypeID(Type)
- , Anchor(A)
- , Tag(T) {
+Node::Node(unsigned int Type, std::unique_ptr<Document> &D, StringRef A,
+ StringRef T)
+ : Doc(D), TypeID(Type), Anchor(A), Tag(T) {
SMLoc Start = SMLoc::getFromPointer(peekNext().Range.begin());
SourceRange = SMRange(Start, Start);
}
@@ -1617,11 +1574,11 @@ std::string Node::getVerbatimTag() const {
if (Raw.find_last_of('!') == 0) {
Ret = Doc->getTagMap().find("!")->second;
Ret += Raw.substr(1);
- return llvm_move(Ret);
+ return std::move(Ret);
} else if (Raw.startswith("!!")) {
Ret = Doc->getTagMap().find("!!")->second;
Ret += Raw.substr(2);
- return llvm_move(Ret);
+ return std::move(Ret);
} else {
StringRef TagHandle = Raw.substr(0, Raw.find_last_of('!') + 1);
std::map<StringRef, StringRef>::const_iterator It =
@@ -1635,7 +1592,7 @@ std::string Node::getVerbatimTag() const {
setError(Twine("Unknown tag handle ") + TagHandle, T);
}
Ret += Raw.substr(Raw.find_last_of('!') + 1);
- return llvm_move(Ret);
+ return std::move(Ret);
}
}
@@ -1919,14 +1876,14 @@ Node *KeyValueNode::getValue() {
void MappingNode::increment() {
if (failed()) {
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
return;
}
if (CurrentEntry) {
CurrentEntry->skip();
if (Type == MT_Inline) {
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
return;
}
}
@@ -1939,13 +1896,13 @@ void MappingNode::increment() {
case Token::TK_BlockEnd:
getNext();
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
break;
default:
setError("Unexpected token. Expected Key or Block End", T);
case Token::TK_Error:
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
}
} else {
switch (T.Kind) {
@@ -1958,14 +1915,14 @@ void MappingNode::increment() {
case Token::TK_Error:
// Set this to end iterator.
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
break;
default:
setError( "Unexpected token. Expected Key, Flow Entry, or Flow "
"Mapping End."
, T);
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
}
}
}
@@ -1973,7 +1930,7 @@ void MappingNode::increment() {
void SequenceNode::increment() {
if (failed()) {
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
return;
}
if (CurrentEntry)
@@ -1984,37 +1941,37 @@ void SequenceNode::increment() {
case Token::TK_BlockEntry:
getNext();
CurrentEntry = parseBlockNode();
- if (CurrentEntry == 0) { // An error occurred.
+ if (!CurrentEntry) { // An error occurred.
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
}
break;
case Token::TK_BlockEnd:
getNext();
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
break;
default:
setError( "Unexpected token. Expected Block Entry or Block End."
, T);
case Token::TK_Error:
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
}
} else if (SeqType == ST_Indentless) {
switch (T.Kind) {
case Token::TK_BlockEntry:
getNext();
CurrentEntry = parseBlockNode();
- if (CurrentEntry == 0) { // An error occurred.
+ if (!CurrentEntry) { // An error occurred.
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
}
break;
default:
case Token::TK_Error:
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
}
} else if (SeqType == ST_Flow) {
switch (T.Kind) {
@@ -2028,7 +1985,7 @@ void SequenceNode::increment() {
case Token::TK_Error:
// Set this to end iterator.
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
break;
case Token::TK_StreamEnd:
case Token::TK_DocumentEnd:
@@ -2036,13 +1993,13 @@ void SequenceNode::increment() {
setError("Could not find closing ]!", T);
// Set this to end iterator.
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
break;
default:
if (!WasPreviousTokenFlowEntry) {
setError("Expected , between entries!", T);
IsAtEnd = true;
- CurrentEntry = 0;
+ CurrentEntry = nullptr;
break;
}
// Otherwise it must be a flow entry.
@@ -2056,7 +2013,7 @@ void SequenceNode::increment() {
}
}
-Document::Document(Stream &S) : stream(S), Root(0) {
+Document::Document(Stream &S) : stream(S), Root(nullptr) {
// Tag maps starts with two default mappings.
TagMap["!"] = "!";
TagMap["!!"] = "tag:yaml.org,2002:";
@@ -2113,7 +2070,7 @@ parse_property:
case Token::TK_Anchor:
if (AnchorInfo.Kind == Token::TK_Anchor) {
setError("Already encountered an anchor for this node!", T);
- return 0;
+ return nullptr;
}
AnchorInfo = getNext(); // Consume TK_Anchor.
T = peekNext();
@@ -2121,7 +2078,7 @@ parse_property:
case Token::TK_Tag:
if (TagInfo.Kind == Token::TK_Tag) {
setError("Already encountered a tag for this node!", T);
- return 0;
+ return nullptr;
}
TagInfo = getNext(); // Consume TK_Tag.
T = peekNext();
@@ -2189,10 +2146,10 @@ parse_property:
// !!null null.
return new (NodeAllocator) NullNode(stream.CurrentDoc);
case Token::TK_Error:
- return 0;
+ return nullptr;
}
llvm_unreachable("Control flow shouldn't reach here.");
- return 0;
+ return nullptr;
}
bool Document::parseDirectives() {
diff --git a/contrib/llvm/lib/Support/YAMLTraits.cpp b/contrib/llvm/lib/Support/YAMLTraits.cpp
index 42bff96..5212624 100644
--- a/contrib/llvm/lib/Support/YAMLTraits.cpp
+++ b/contrib/llvm/lib/Support/YAMLTraits.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Support/Errc.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
@@ -14,8 +15,8 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
-#include <cstring>
#include <cctype>
+#include <cstring>
using namespace llvm;
using namespace yaml;
@@ -47,7 +48,7 @@ Input::Input(StringRef InputContent,
void *DiagHandlerCtxt)
: IO(Ctxt),
Strm(new Stream(InputContent, SrcMgr)),
- CurrentNode(NULL) {
+ CurrentNode(nullptr) {
if (DiagHandler)
SrcMgr.setDiagHandler(DiagHandler, DiagHandlerCtxt);
DocIterator = Strm->begin();
@@ -56,16 +57,14 @@ Input::Input(StringRef InputContent,
Input::~Input() {
}
-error_code Input::error() {
- return EC;
-}
+std::error_code Input::error() { return EC; }
// Pin the vtables to this file.
void Input::HNode::anchor() {}
void Input::EmptyHNode::anchor() {}
void Input::ScalarHNode::anchor() {}
-bool Input::outputting() const {
+bool Input::outputting() {
return false;
}
@@ -90,8 +89,8 @@ bool Input::setCurrentDocument() {
return false;
}
-void Input::nextDocument() {
- ++DocIterator;
+bool Input::nextDocument() {
+ return ++DocIterator != Strm->end();
}
bool Input::mapTag(StringRef Tag, bool Default) {
@@ -158,10 +157,9 @@ void Input::endMapping() {
MapHNode *MN = dyn_cast_or_null<MapHNode>(CurrentNode);
if (!MN)
return;
- for (MapHNode::NameToNode::iterator i = MN->Mapping.begin(),
- End = MN->Mapping.end(); i != End; ++i) {
- if (!MN->isValidKey(i->first())) {
- setError(i->second, Twine("unknown key '") + i->first() + "'");
+ for (const auto &NN : MN->Mapping) {
+ if (!MN->isValidKey(NN.first())) {
+ setError(NN.second, Twine("unknown key '") + NN.first() + "'");
break;
}
}
@@ -255,9 +253,8 @@ bool Input::bitSetMatch(const char *Str, bool) {
return false;
if (SequenceHNode *SQ = dyn_cast<SequenceHNode>(CurrentNode)) {
unsigned Index = 0;
- for (std::vector<HNode *>::iterator i = SQ->Entries.begin(),
- End = SQ->Entries.end(); i != End; ++i) {
- if (ScalarHNode *SN = dyn_cast<ScalarHNode>(*i)) {
+ for (HNode *N : SQ->Entries) {
+ if (ScalarHNode *SN = dyn_cast<ScalarHNode>(N)) {
if (SN->value().equals(Str)) {
BitValuesUsed[Index] = true;
return true;
@@ -287,7 +284,7 @@ void Input::endBitSetScalar() {
}
}
-void Input::scalarString(StringRef &S) {
+void Input::scalarString(StringRef &S, bool) {
if (ScalarHNode *SN = dyn_cast<ScalarHNode>(CurrentNode)) {
S = SN->value();
} else {
@@ -319,9 +316,8 @@ Input::HNode *Input::createHNodes(Node *N) {
return new ScalarHNode(N, KeyStr);
} else if (SequenceNode *SQ = dyn_cast<SequenceNode>(N)) {
SequenceHNode *SQHNode = new SequenceHNode(N);
- for (SequenceNode::iterator i = SQ->begin(), End = SQ->end(); i != End;
- ++i) {
- HNode *Entry = this->createHNodes(i);
+ for (Node &SN : *SQ) {
+ HNode *Entry = this->createHNodes(&SN);
if (EC)
break;
SQHNode->Entries.push_back(Entry);
@@ -329,9 +325,8 @@ Input::HNode *Input::createHNodes(Node *N) {
return SQHNode;
} else if (MappingNode *Map = dyn_cast<MappingNode>(N)) {
MapHNode *mapHNode = new MapHNode(N);
- for (MappingNode::iterator i = Map->begin(), End = Map->end(); i != End;
- ++i) {
- ScalarNode *KeyScalar = dyn_cast<ScalarNode>(i->getKey());
+ for (KeyValueNode &KVN : *Map) {
+ ScalarNode *KeyScalar = dyn_cast<ScalarNode>(KVN.getKey());
StringStorage.clear();
StringRef KeyStr = KeyScalar->getValue(StringStorage);
if (!StringStorage.empty()) {
@@ -341,7 +336,7 @@ Input::HNode *Input::createHNodes(Node *N) {
memcpy(Buf, &StringStorage[0], Len);
KeyStr = StringRef(Buf, Len);
}
- HNode *ValueHNode = this->createHNodes(i->getValue());
+ HNode *ValueHNode = this->createHNodes(KVN.getValue());
if (EC)
break;
mapHNode->Mapping[KeyStr] = ValueHNode;
@@ -351,14 +346,13 @@ Input::HNode *Input::createHNodes(Node *N) {
return new EmptyHNode(N);
} else {
setError(N, "unknown node kind");
- return NULL;
+ return nullptr;
}
}
bool Input::MapHNode::isValidKey(StringRef Key) {
- for (SmallVectorImpl<const char *>::iterator i = ValidKeys.begin(),
- End = ValidKeys.end(); i != End; ++i) {
- if (Key.equals(*i))
+ for (const char *K : ValidKeys) {
+ if (Key.equals(K))
return true;
}
return false;
@@ -373,17 +367,13 @@ bool Input::canElideEmptySequence() {
}
Input::MapHNode::~MapHNode() {
- for (MapHNode::NameToNode::iterator i = Mapping.begin(), End = Mapping.end();
- i != End; ++i) {
- delete i->second;
- }
+ for (auto &N : Mapping)
+ delete N.second;
}
Input::SequenceHNode::~SequenceHNode() {
- for (std::vector<HNode*>::iterator i = Entries.begin(), End = Entries.end();
- i != End; ++i) {
- delete *i;
- }
+ for (HNode *N : Entries)
+ delete N;
}
@@ -406,7 +396,7 @@ Output::Output(raw_ostream &yout, void *context)
Output::~Output() {
}
-bool Output::outputting() const {
+bool Output::outputting() {
return true;
}
@@ -550,10 +540,7 @@ void Output::endBitSetScalar() {
this->outputUpToEndOfLine(" ]");
}
-void Output::scalarString(StringRef &S) {
- const char ScalarSafeChars[] = "abcdefghijklmnopqrstuvwxyz"
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-/^., \t";
-
+void Output::scalarString(StringRef &S, bool MustQuote) {
this->newLineCheck();
if (S.empty()) {
// Print '' for the empty string because leaving the field empty is not
@@ -561,10 +548,8 @@ void Output::scalarString(StringRef &S) {
this->outputUpToEndOfLine("''");
return;
}
- if (S.find_first_not_of(ScalarSafeChars) == StringRef::npos &&
- !isspace(S.front()) && !isspace(S.back())) {
- // If the string consists only of safe characters, print it out without
- // quotes.
+ if (!MustQuote) {
+ // Only quote if we must.
this->outputUpToEndOfLine(S);
return;
}
@@ -689,6 +674,17 @@ StringRef ScalarTraits<StringRef>::input(StringRef Scalar, void *,
Val = Scalar;
return StringRef();
}
+
+void ScalarTraits<std::string>::output(const std::string &Val, void *,
+ raw_ostream &Out) {
+ Out << Val;
+}
+
+StringRef ScalarTraits<std::string>::input(StringRef Scalar, void *,
+ std::string &Val) {
+ Val = Scalar.str();
+ return StringRef();
+}
void ScalarTraits<uint8_t>::output(const uint8_t &Val, void *,
raw_ostream &Out) {
diff --git a/contrib/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm/lib/Support/raw_ostream.cpp
index cb96489..0790be5 100644
--- a/contrib/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm/lib/Support/raw_ostream.cpp
@@ -22,10 +22,10 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/system_error.h"
#include <cctype>
#include <cerrno>
#include <sys/stat.h>
+#include <system_error>
// <fcntl.h> may provide O_BINARY.
#if defined(HAVE_FCNTL_H)
@@ -87,8 +87,8 @@ void raw_ostream::SetBuffered() {
void raw_ostream::SetBufferAndMode(char *BufferStart, size_t Size,
BufferKind Mode) {
- assert(((Mode == Unbuffered && BufferStart == 0 && Size == 0) ||
- (Mode != Unbuffered && BufferStart && Size)) &&
+ assert(((Mode == Unbuffered && !BufferStart && Size == 0) ||
+ (Mode != Unbuffered && BufferStart && Size != 0)) &&
"stream must be unbuffered or have at least one byte");
// Make sure the current buffer is free of content (we can't flush here; the
// child buffer management logic will be in write_impl).
@@ -227,11 +227,17 @@ raw_ostream &raw_ostream::operator<<(double N) {
// On MSVCRT and compatible, output of %e is incompatible to Posix
// by default. Number of exponent digits should be at least 2. "%+03d"
// FIXME: Implement our formatter to here or Support/Format.h!
+#if __cplusplus >= 201103L && defined(__MINGW32__)
+ // FIXME: It should be generic to C++11.
+ if (N == 0.0 && std::signbit(N))
+ return *this << "-0.000000e+00";
+#else
int fpcl = _fpclass(N);
// negative zero
if (fpcl == _FPCLASS_NZ)
return *this << "-0.000000e+00";
+#endif
char buf[16];
unsigned len;
@@ -427,7 +433,7 @@ void format_object_base::home() {
raw_fd_ostream::raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
sys::fs::OpenFlags Flags)
: Error(false), UseAtomicWrites(false), pos(0) {
- assert(Filename != 0 && "Filename is null");
+ assert(Filename && "Filename is null");
ErrorInfo.clear();
// Handle "-" as stdout. Note that when we do this, we consider ourself
@@ -437,14 +443,14 @@ raw_fd_ostream::raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
FD = STDOUT_FILENO;
// If user requested binary then put stdout into binary mode if
// possible.
- if (Flags & sys::fs::F_Binary)
+ if (!(Flags & sys::fs::F_Text))
sys::ChangeStdoutToBinary();
// Close stdout when we're done, to detect any output errors.
ShouldClose = true;
return;
}
- error_code EC = sys::fs::openFileForWrite(Filename, FD, Flags);
+ std::error_code EC = sys::fs::openFileForWrite(Filename, FD, Flags);
if (EC) {
ErrorInfo = "Error opening output file '" + std::string(Filename) + "': " +
@@ -463,9 +469,10 @@ raw_fd_ostream::raw_fd_ostream(int fd, bool shouldClose, bool unbuffered)
: raw_ostream(unbuffered), FD(fd),
ShouldClose(shouldClose), Error(false), UseAtomicWrites(false) {
#ifdef O_BINARY
- // Setting STDOUT and STDERR to binary mode is necessary in Win32
+ // Setting STDOUT to binary mode is necessary in Win32
// to avoid undesirable linefeed conversion.
- if (fd == STDOUT_FILENO || fd == STDERR_FILENO)
+ // Don't touch STDERR, or w*printf() (in assert()) would barf wide chars.
+ if (fd == STDOUT_FILENO)
setmode(fd, O_BINARY);
#endif
@@ -653,7 +660,7 @@ bool raw_fd_ostream::has_colors() const {
/// Use it like: outs() << "foo" << "bar";
raw_ostream &llvm::outs() {
// Set buffer settings to model stdout behavior.
- // Delete the file descriptor when the program exists, forcing error
+ // Delete the file descriptor when the program exits, forcing error
// detection. If you don't want this behavior, don't use outs().
static raw_fd_ostream S(STDOUT_FILENO, true);
return S;
@@ -722,24 +729,17 @@ void raw_svector_ostream::resync() {
}
void raw_svector_ostream::write_impl(const char *Ptr, size_t Size) {
- // If we're writing bytes from the end of the buffer into the smallvector, we
- // don't need to copy the bytes, just commit the bytes because they are
- // already in the right place.
if (Ptr == OS.end()) {
- assert(OS.size() + Size <= OS.capacity() && "Invalid write_impl() call!");
- OS.set_size(OS.size() + Size);
+ // Grow the buffer to include the scratch area without copying.
+ size_t NewSize = OS.size() + Size;
+ assert(NewSize <= OS.capacity() && "Invalid write_impl() call!");
+ OS.set_size(NewSize);
} else {
- assert(GetNumBytesInBuffer() == 0 &&
- "Should be writing from buffer if some bytes in it");
- // Otherwise, do copy the bytes.
- OS.append(Ptr, Ptr+Size);
+ assert(!GetNumBytesInBuffer());
+ OS.append(Ptr, Ptr + Size);
}
- // Grow the vector if necessary.
- if (OS.capacity() - OS.size() < 64)
- OS.reserve(OS.capacity() * 2);
-
- // Update the buffer position.
+ OS.reserve(OS.size() + 64);
SetBuffer(OS.end(), OS.capacity() - OS.size());
}
diff --git a/contrib/llvm/lib/Support/regcclass.h b/contrib/llvm/lib/Support/regcclass.h
index 2cea3e4..7fd6604 100644
--- a/contrib/llvm/lib/Support/regcclass.h
+++ b/contrib/llvm/lib/Support/regcclass.h
@@ -37,6 +37,9 @@
* @(#)cclass.h 8.3 (Berkeley) 3/20/94
*/
+#ifndef LLVM_SUPPORT_REGCCLASS_H
+#define LLVM_SUPPORT_REGCCLASS_H
+
/* character-class table */
static struct cclass {
const char *name;
@@ -68,3 +71,5 @@ static struct cclass {
""} ,
{ NULL, 0, "" }
};
+
+#endif
diff --git a/contrib/llvm/lib/Support/regcname.h b/contrib/llvm/lib/Support/regcname.h
index 3c0bb24..891d255 100644
--- a/contrib/llvm/lib/Support/regcname.h
+++ b/contrib/llvm/lib/Support/regcname.h
@@ -35,6 +35,9 @@
* @(#)cname.h 8.3 (Berkeley) 3/20/94
*/
+#ifndef LLVM_SUPPORT_REGCNAME_H
+#define LLVM_SUPPORT_REGCNAME_H
+
/* character-name table */
static struct cname {
const char *name;
@@ -137,3 +140,5 @@ static struct cname {
{ "DEL", '\177' },
{ NULL, 0 }
};
+
+#endif
diff --git a/contrib/llvm/lib/Support/regcomp.c b/contrib/llvm/lib/Support/regcomp.c
index 74d9186..0b5b765 100644
--- a/contrib/llvm/lib/Support/regcomp.c
+++ b/contrib/llvm/lib/Support/regcomp.c
@@ -532,10 +532,10 @@ p_simp_re(struct parse *p,
sopno subno;
# define BACKSL (1<<CHAR_BIT)
- pos = HERE(); /* repetion op, if any, covers from here */
+ pos = HERE(); /* repetition op, if any, covers from here */
- assert(MORE()); /* caller should have ensured this */
- c = GETNEXT();
+ assert(MORE()); /* caller should have ensured this */
+ c = GETNEXT();
if (c == '\\') {
REQUIRE(MORE(), REG_EESCAPE);
c = BACKSL | GETNEXT();
diff --git a/contrib/llvm/lib/Support/regengine.inc b/contrib/llvm/lib/Support/regengine.inc
index 7e41f96..62d8c26 100644
--- a/contrib/llvm/lib/Support/regengine.inc
+++ b/contrib/llvm/lib/Support/regengine.inc
@@ -205,7 +205,7 @@ matcher(struct re_guts *g, const char *string, size_t nmatch,
if (nmatch == 1 && !g->backrefs)
break; /* no further info needed */
- /* oh my, he wants the subexpressions... */
+ /* oh my, they want the subexpressions... */
if (m->pmatch == NULL)
m->pmatch = (llvm_regmatch_t *)malloc((m->g->nsub + 1) *
sizeof(llvm_regmatch_t));
diff --git a/contrib/llvm/lib/Support/regex2.h b/contrib/llvm/lib/Support/regex2.h
index 21659c3..d81bfbc 100644
--- a/contrib/llvm/lib/Support/regex2.h
+++ b/contrib/llvm/lib/Support/regex2.h
@@ -35,6 +35,9 @@
* @(#)regex2.h 8.4 (Berkeley) 3/20/94
*/
+#ifndef LLVM_SUPPORT_REGEX2_H
+#define LLVM_SUPPORT_REGEX2_H
+
/*
* internals of regex_t
*/
@@ -155,3 +158,5 @@ struct re_guts {
/* misc utilities */
#define OUT (CHAR_MAX+1) /* a non-character value */
#define ISWORD(c) (isalnum(c&0xff) || (c) == '_')
+
+#endif
diff --git a/contrib/llvm/lib/Support/regutils.h b/contrib/llvm/lib/Support/regutils.h
index d0ee100..49a975c 100644
--- a/contrib/llvm/lib/Support/regutils.h
+++ b/contrib/llvm/lib/Support/regutils.h
@@ -35,6 +35,9 @@
* @(#)utils.h 8.3 (Berkeley) 3/20/94
*/
+#ifndef LLVM_SUPPORT_REGUTILS_H
+#define LLVM_SUPPORT_REGUTILS_H
+
/* utility definitions */
#define NC (CHAR_MAX - CHAR_MIN + 1)
typedef unsigned char uch;
@@ -51,3 +54,5 @@ typedef unsigned char uch;
#ifdef USEBCOPY
#define memmove(d, s, c) bcopy(s, d, c)
#endif
+
+#endif
diff --git a/contrib/llvm/lib/Support/system_error.cpp b/contrib/llvm/lib/Support/system_error.cpp
deleted file mode 100644
index b22745a..0000000
--- a/contrib/llvm/lib/Support/system_error.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-//===---------------------- system_error.cpp ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This was lifted from libc++ and modified for C++03.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/system_error.h"
-#include "llvm/Support/Errno.h"
-#include <cstring>
-#include <string>
-
-namespace llvm {
-
-// class error_category
-
-error_category::error_category() {
-}
-
-error_category::~error_category() {
-}
-
-error_condition
-error_category::default_error_condition(int ev) const {
- return error_condition(ev, *this);
-}
-
-bool
-error_category::equivalent(int code, const error_condition& condition) const {
- return default_error_condition(code) == condition;
-}
-
-bool
-error_category::equivalent(const error_code& code, int condition) const {
- return *this == code.category() && code.value() == condition;
-}
-
-std::string
-_do_message::message(int ev) const {
- return std::string(sys::StrError(ev));
-}
-
-class _generic_error_category : public _do_message {
-public:
- virtual const char* name() const LLVM_OVERRIDE;
- virtual std::string message(int ev) const LLVM_OVERRIDE;
-};
-
-const char*
-_generic_error_category::name() const {
- return "generic";
-}
-
-std::string
-_generic_error_category::message(int ev) const {
-#ifdef ELAST
- if (ev > ELAST)
- return std::string("unspecified generic_category error");
-#endif // ELAST
- return _do_message::message(ev);
-}
-
-const error_category&
-generic_category() {
- static _generic_error_category s;
- return s;
-}
-
-class _system_error_category : public _do_message {
-public:
- virtual const char* name() const LLVM_OVERRIDE;
- virtual std::string message(int ev) const LLVM_OVERRIDE;
- virtual error_condition default_error_condition(int ev) const LLVM_OVERRIDE;
-};
-
-const char*
-_system_error_category::name() const {
- return "system";
-}
-
-// std::string _system_error_category::message(int ev) const {
-// Is in Platform/system_error.inc
-
-// error_condition _system_error_category::default_error_condition(int ev) const
-// Is in Platform/system_error.inc
-
-const error_category&
-system_category() {
- static _system_error_category s;
- return s;
-}
-
-const error_category&
-posix_category() {
-#ifdef LLVM_ON_WIN32
- return generic_category();
-#else
- return system_category();
-#endif
-}
-
-// error_condition
-
-std::string
-error_condition::message() const {
- return _cat_->message(_val_);
-}
-
-// error_code
-
-std::string
-error_code::message() const {
- return _cat_->message(_val_);
-}
-
-} // end namespace llvm
-
-// Include the truly platform-specific parts of this class.
-#if defined(LLVM_ON_UNIX)
-#include "Unix/system_error.inc"
-#endif
-#if defined(LLVM_ON_WIN32)
-#include "Windows/system_error.inc"
-#endif
diff --git a/contrib/llvm/lib/TableGen/Error.cpp b/contrib/llvm/lib/TableGen/Error.cpp
index 928b120..8d9ae20 100644
--- a/contrib/llvm/lib/TableGen/Error.cpp
+++ b/contrib/llvm/lib/TableGen/Error.cpp
@@ -62,12 +62,12 @@ void PrintError(const Twine &Msg) {
errs() << "error:" << Msg << "\n";
}
-void PrintFatalError(const std::string &Msg) {
- PrintError(Twine(Msg));
+void PrintFatalError(const Twine &Msg) {
+ PrintError(Msg);
std::exit(1);
}
-void PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const std::string &Msg) {
+void PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg) {
PrintError(ErrorLoc, Msg);
std::exit(1);
}
diff --git a/contrib/llvm/lib/TableGen/Main.cpp b/contrib/llvm/lib/TableGen/Main.cpp
index 7fe47bc..e317fbf 100644
--- a/contrib/llvm/lib/TableGen/Main.cpp
+++ b/contrib/llvm/lib/TableGen/Main.cpp
@@ -16,16 +16,16 @@
//===----------------------------------------------------------------------===//
#include "TGParser.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ToolOutputFile.h"
-#include "llvm/Support/system_error.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Main.h"
#include "llvm/TableGen/Record.h"
#include <algorithm>
#include <cstdio>
+#include <system_error>
using namespace llvm;
namespace {
@@ -57,7 +57,7 @@ static int createDependencyFile(const TGParser &Parser, const char *argv0) {
return 1;
}
std::string Error;
- tool_output_file DepOut(DependFilename.c_str(), Error);
+ tool_output_file DepOut(DependFilename.c_str(), Error, sys::fs::F_Text);
if (!Error.empty()) {
errs() << argv0 << ": error opening " << DependFilename
<< ":" << Error << "\n";
@@ -81,14 +81,14 @@ int TableGenMain(char *argv0, TableGenMainFn *MainFn) {
RecordKeeper Records;
// Parse the input file.
- OwningPtr<MemoryBuffer> File;
- if (error_code ec =
- MemoryBuffer::getFileOrSTDIN(InputFilename, File)) {
- errs() << "Could not open input file '" << InputFilename << "': "
- << ec.message() <<"\n";
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(InputFilename);
+ if (std::error_code EC = FileOrErr.getError()) {
+ errs() << "Could not open input file '" << InputFilename
+ << "': " << EC.message() << "\n";
return 1;
}
- MemoryBuffer *F = File.take();
+ MemoryBuffer *F = FileOrErr.get().release();
// Tell SrcMgr about this buffer, which is what TGParser will pick up.
SrcMgr.AddNewSourceBuffer(F, SMLoc());
@@ -103,7 +103,7 @@ int TableGenMain(char *argv0, TableGenMainFn *MainFn) {
return 1;
std::string Error;
- tool_output_file Out(OutputFilename.c_str(), Error);
+ tool_output_file Out(OutputFilename.c_str(), Error, sys::fs::F_Text);
if (!Error.empty()) {
errs() << argv0 << ": error opening " << OutputFilename
<< ":" << Error << "\n";
diff --git a/contrib/llvm/lib/TableGen/Record.cpp b/contrib/llvm/lib/TableGen/Record.cpp
index 431f4aa..0f40904 100644
--- a/contrib/llvm/lib/TableGen/Record.cpp
+++ b/contrib/llvm/lib/TableGen/Record.cpp
@@ -101,13 +101,13 @@ bool RecTy::baseClassOf(const RecTy *RHS) const{
}
Init *BitRecTy::convertValue(BitsInit *BI) {
- if (BI->getNumBits() != 1) return 0; // Only accept if just one bit!
+ if (BI->getNumBits() != 1) return nullptr; // Only accept if just one bit!
return BI->getBit(0);
}
Init *BitRecTy::convertValue(IntInit *II) {
int64_t Val = II->getValue();
- if (Val != 0 && Val != 1) return 0; // Only accept 0 or 1 for a bit!
+ if (Val != 0 && Val != 1) return nullptr; // Only accept 0 or 1 for a bit!
return BitInit::get(Val != 0);
}
@@ -116,7 +116,7 @@ Init *BitRecTy::convertValue(TypedInit *VI) {
RecTy *Ty = VI->getType();
if (isa<BitRecTy>(Ty) || isa<BitsRecTy>(Ty) || isa<IntRecTy>(Ty))
return VI; // Accept variable if it is already of bit type!
- return 0;
+ return nullptr;
}
bool BitRecTy::baseClassOf(const RecTy *RHS) const{
@@ -151,8 +151,8 @@ Init *BitsRecTy::convertValue(UnsetInit *UI) {
}
Init *BitsRecTy::convertValue(BitInit *UI) {
- if (Size != 1) return 0; // Can only convert single bit.
- return BitsInit::get(UI);
+ if (Size != 1) return nullptr; // Can only convert single bit.
+ return BitsInit::get(UI);
}
/// canFitInBitfield - Return true if the number of bits is large enough to hold
@@ -170,7 +170,7 @@ Init *BitsRecTy::convertValue(IntInit *II) {
int64_t Value = II->getValue();
// Make sure this bitfield is large enough to hold the integer value.
if (!canFitInBitfield(Value, Size))
- return 0;
+ return nullptr;
SmallVector<Init *, 16> NewBits(Size);
@@ -184,7 +184,7 @@ Init *BitsRecTy::convertValue(BitsInit *BI) {
// If the number of bits is right, return it. Otherwise we need to expand or
// truncate.
if (BI->getNumBits() == Size) return BI;
- return 0;
+ return nullptr;
}
Init *BitsRecTy::convertValue(TypedInit *VI) {
@@ -199,7 +199,7 @@ Init *BitsRecTy::convertValue(TypedInit *VI) {
return BitsInit::get(NewBits);
}
- return 0;
+ return nullptr;
}
bool BitsRecTy::baseClassOf(const RecTy *RHS) const{
@@ -219,7 +219,7 @@ Init *IntRecTy::convertValue(BitsInit *BI) {
if (BitInit *Bit = dyn_cast<BitInit>(BI->getBit(i))) {
Result |= Bit->getValue() << i;
} else {
- return 0;
+ return nullptr;
}
return IntInit::get(Result);
}
@@ -227,7 +227,7 @@ Init *IntRecTy::convertValue(BitsInit *BI) {
Init *IntRecTy::convertValue(TypedInit *TI) {
if (TI->getType()->typeIsConvertibleTo(this))
return TI; // Accept variable if already of the right type!
- return 0;
+ return nullptr;
}
bool IntRecTy::baseClassOf(const RecTy *RHS) const{
@@ -238,7 +238,7 @@ bool IntRecTy::baseClassOf(const RecTy *RHS) const{
Init *StringRecTy::convertValue(UnOpInit *BO) {
if (BO->getOpcode() == UnOpInit::CAST) {
Init *L = BO->getOperand()->convertInitializerTo(this);
- if (L == 0) return 0;
+ if (!L) return nullptr;
if (L != BO->getOperand())
return UnOpInit::get(UnOpInit::CAST, L, new StringRecTy);
return BO;
@@ -251,7 +251,7 @@ Init *StringRecTy::convertValue(BinOpInit *BO) {
if (BO->getOpcode() == BinOpInit::STRCONCAT) {
Init *L = BO->getLHS()->convertInitializerTo(this);
Init *R = BO->getRHS()->convertInitializerTo(this);
- if (L == 0 || R == 0) return 0;
+ if (!L || !R) return nullptr;
if (L != BO->getLHS() || R != BO->getRHS())
return BinOpInit::get(BinOpInit::STRCONCAT, L, R, new StringRecTy);
return BO;
@@ -264,7 +264,7 @@ Init *StringRecTy::convertValue(BinOpInit *BO) {
Init *StringRecTy::convertValue(TypedInit *TI) {
if (isa<StringRecTy>(TI->getType()))
return TI; // Accept variable if already of the right type!
- return 0;
+ return nullptr;
}
std::string ListRecTy::getAsString() const {
@@ -280,10 +280,10 @@ Init *ListRecTy::convertValue(ListInit *LI) {
if (Init *CI = LI->getElement(i)->convertInitializerTo(Ty))
Elements.push_back(CI);
else
- return 0;
+ return nullptr;
if (!isa<ListRecTy>(LI->getType()))
- return 0;
+ return nullptr;
return ListInit::get(Elements, this);
}
@@ -293,7 +293,7 @@ Init *ListRecTy::convertValue(TypedInit *TI) {
if (ListRecTy *LRT = dyn_cast<ListRecTy>(TI->getType()))
if (LRT->getElementType()->typeIsConvertibleTo(getElementType()))
return TI;
- return 0;
+ return nullptr;
}
bool ListRecTy::baseClassOf(const RecTy *RHS) const{
@@ -305,30 +305,30 @@ bool ListRecTy::baseClassOf(const RecTy *RHS) const{
Init *DagRecTy::convertValue(TypedInit *TI) {
if (TI->getType()->typeIsConvertibleTo(this))
return TI;
- return 0;
+ return nullptr;
}
Init *DagRecTy::convertValue(UnOpInit *BO) {
if (BO->getOpcode() == UnOpInit::CAST) {
Init *L = BO->getOperand()->convertInitializerTo(this);
- if (L == 0) return 0;
+ if (!L) return nullptr;
if (L != BO->getOperand())
return UnOpInit::get(UnOpInit::CAST, L, new DagRecTy);
return BO;
}
- return 0;
+ return nullptr;
}
Init *DagRecTy::convertValue(BinOpInit *BO) {
if (BO->getOpcode() == BinOpInit::CONCAT) {
Init *L = BO->getLHS()->convertInitializerTo(this);
Init *R = BO->getRHS()->convertInitializerTo(this);
- if (L == 0 || R == 0) return 0;
+ if (!L || !R) return nullptr;
if (L != BO->getLHS() || R != BO->getRHS())
return BinOpInit::get(BinOpInit::CONCAT, L, R, new DagRecTy);
return BO;
}
- return 0;
+ return nullptr;
}
RecordRecTy *RecordRecTy::get(Record *R) {
@@ -342,7 +342,7 @@ std::string RecordRecTy::getAsString() const {
Init *RecordRecTy::convertValue(DefInit *DI) {
// Ensure that DI is a subclass of Rec.
if (!DI->getDef()->isSubClassOf(Rec))
- return 0;
+ return nullptr;
return DI;
}
@@ -352,7 +352,7 @@ Init *RecordRecTy::convertValue(TypedInit *TI) {
if (RRT->getRecord()->isSubClassOf(getRecord()) ||
RRT->getRecord() == getRecord())
return TI;
- return 0;
+ return nullptr;
}
bool RecordRecTy::baseClassOf(const RecTy *RHS) const{
@@ -391,7 +391,7 @@ RecTy *llvm::resolveTypes(RecTy *T1, RecTy *T2) {
++i) {
RecordRecTy *SuperRecTy1 = RecordRecTy::get(*i);
RecTy *NewType1 = resolveTypes(SuperRecTy1, T2);
- if (NewType1 != 0) {
+ if (NewType1) {
if (NewType1 != SuperRecTy1) {
delete SuperRecTy1;
}
@@ -409,7 +409,7 @@ RecTy *llvm::resolveTypes(RecTy *T1, RecTy *T2) {
++i) {
RecordRecTy *SuperRecTy2 = RecordRecTy::get(*i);
RecTy *NewType2 = resolveTypes(T1, SuperRecTy2);
- if (NewType2 != 0) {
+ if (NewType2) {
if (NewType2 != SuperRecTy2) {
delete SuperRecTy2;
}
@@ -417,7 +417,7 @@ RecTy *llvm::resolveTypes(RecTy *T1, RecTy *T2) {
}
}
}
- return 0;
+ return nullptr;
}
@@ -462,7 +462,7 @@ BitsInit *BitsInit::get(ArrayRef<Init *> Range) {
FoldingSetNodeID ID;
ProfileBitsInit(ID, Range);
- void *IP = 0;
+ void *IP = nullptr;
if (BitsInit *I = ThePool.FindNodeOrInsertPos(ID, IP))
return I;
@@ -482,7 +482,7 @@ BitsInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Bits[i] >= getNumBits())
- return 0;
+ return nullptr;
NewBits[i] = getBit(Bits[i]);
}
return BitsInit::get(NewBits);
@@ -516,8 +516,8 @@ Init *BitsInit::resolveReferences(Record &R, const RecordVal *RV) const {
bool Changed = false;
SmallVector<Init *, 16> NewBits(getNumBits());
- Init *CachedInit = 0;
- Init *CachedBitVar = 0;
+ Init *CachedInit = nullptr;
+ Init *CachedBitVar = nullptr;
bool CachedBitVarChanged = false;
for (unsigned i = 0, e = getNumBits(); i != e; ++i) {
@@ -590,7 +590,7 @@ IntInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Bits[i] >= 64)
- return 0;
+ return nullptr;
NewBits[i] = BitInit::get(Value & (INT64_C(1) << Bits[i]));
}
@@ -623,18 +623,18 @@ static void ProfileListInit(FoldingSetNodeID &ID,
ListInit *ListInit::get(ArrayRef<Init *> Range, RecTy *EltTy) {
typedef FoldingSet<ListInit> Pool;
static Pool ThePool;
+ static std::vector<std::unique_ptr<ListInit>> TheActualPool;
- // Just use the FoldingSetNodeID to compute a hash. Use a DenseMap
- // for actual storage.
FoldingSetNodeID ID;
ProfileListInit(ID, Range, EltTy);
- void *IP = 0;
+ void *IP = nullptr;
if (ListInit *I = ThePool.FindNodeOrInsertPos(ID, IP))
return I;
ListInit *I = new ListInit(Range, EltTy);
ThePool.InsertNode(I, IP);
+ TheActualPool.push_back(std::unique_ptr<ListInit>(I));
return I;
}
@@ -651,7 +651,7 @@ ListInit::convertInitListSlice(const std::vector<unsigned> &Elements) const {
std::vector<Init*> Vals;
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
if (Elements[i] >= getSize())
- return 0;
+ return nullptr;
Vals.push_back(getElement(Elements[i]));
}
return ListInit::get(Vals, getType());
@@ -660,7 +660,7 @@ ListInit::convertInitListSlice(const std::vector<unsigned> &Elements) const {
Record *ListInit::getElementAsRecord(unsigned i) const {
assert(i < Values.size() && "List element index out of range!");
DefInit *DI = dyn_cast<DefInit>(Values[i]);
- if (DI == 0)
+ if (!DI)
PrintFatalError("Expected record in list!");
return DI->getDef();
}
@@ -690,14 +690,14 @@ Init *ListInit::resolveReferences(Record &R, const RecordVal *RV) const {
Init *ListInit::resolveListElementReference(Record &R, const RecordVal *IRV,
unsigned Elt) const {
if (Elt >= getSize())
- return 0; // Out of range reference.
+ return nullptr; // Out of range reference.
Init *E = getElement(Elt);
// If the element is set to some value, or if we are resolving a reference
// to a specific variable and that variable is explicitly unset, then
// replace the VarListElementInit with it.
if (IRV || !isa<UnsetInit>(E))
return E;
- return 0;
+ return nullptr;
}
std::string ListInit::getAsString() const {
@@ -714,7 +714,7 @@ Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV,
Init *Resolved = resolveReferences(R, IRV);
OpInit *OResolved = dyn_cast<OpInit>(Resolved);
if (OResolved) {
- Resolved = OResolved->Fold(&R, 0);
+ Resolved = OResolved->Fold(&R, nullptr);
}
if (Resolved != this) {
@@ -728,7 +728,7 @@ Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV,
}
}
- return 0;
+ return nullptr;
}
Init *OpInit::getBit(unsigned Bit) const {
@@ -811,20 +811,14 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
}
case HEAD: {
if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
- if (LHSl->getSize() == 0) {
- assert(0 && "Empty list in car");
- return 0;
- }
+ assert(LHSl->getSize() != 0 && "Empty list in car");
return LHSl->getElement(0);
}
break;
}
case TAIL: {
if (ListInit *LHSl = dyn_cast<ListInit>(LHS)) {
- if (LHSl->getSize() == 0) {
- assert(0 && "Empty list in cdr");
- return 0;
- }
+ assert(LHSl->getSize() != 0 && "Empty list in cdr");
// Note the +1. We can't just pass the result of getValues()
// directly.
ArrayRef<Init *>::iterator begin = LHSl->getValues().begin()+1;
@@ -862,8 +856,8 @@ Init *UnOpInit::resolveReferences(Record &R, const RecordVal *RV) const {
Init *lhs = LHS->resolveReferences(R, RV);
if (LHS != lhs)
- return (UnOpInit::get(getOpcode(), lhs, getType()))->Fold(&R, 0);
- return Fold(&R, 0);
+ return (UnOpInit::get(getOpcode(), lhs, getType()))->Fold(&R, nullptr);
+ return Fold(&R, nullptr);
}
std::string UnOpInit::getAsString() const {
@@ -902,7 +896,7 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
if (LHSs && RHSs) {
DefInit *LOp = dyn_cast<DefInit>(LHSs->getOperator());
DefInit *ROp = dyn_cast<DefInit>(RHSs->getOperator());
- if (LOp == 0 || ROp == 0 || LOp->getDef() != ROp->getDef())
+ if (!LOp || !ROp || LOp->getDef() != ROp->getDef())
PrintFatalError("Concated Dag operators do not match!");
std::vector<Init*> Args;
std::vector<std::string> ArgNames;
@@ -918,6 +912,18 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
}
break;
}
+ case LISTCONCAT: {
+ ListInit *LHSs = dyn_cast<ListInit>(LHS);
+ ListInit *RHSs = dyn_cast<ListInit>(RHS);
+ if (LHSs && RHSs) {
+ std::vector<Init *> Args;
+ Args.insert(Args.end(), LHSs->begin(), LHSs->end());
+ Args.insert(Args.end(), RHSs->begin(), RHSs->end());
+ return ListInit::get(
+ Args, static_cast<ListRecTy *>(LHSs->getType())->getElementType());
+ }
+ break;
+ }
case STRCONCAT: {
StringInit *LHSs = dyn_cast<StringInit>(LHS);
StringInit *RHSs = dyn_cast<StringInit>(RHS);
@@ -949,8 +955,10 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
case SHL:
case SRA:
case SRL: {
- IntInit *LHSi = dyn_cast<IntInit>(LHS);
- IntInit *RHSi = dyn_cast<IntInit>(RHS);
+ IntInit *LHSi =
+ dyn_cast_or_null<IntInit>(LHS->convertInitializerTo(IntRecTy::get()));
+ IntInit *RHSi =
+ dyn_cast_or_null<IntInit>(RHS->convertInitializerTo(IntRecTy::get()));
if (LHSi && RHSi) {
int64_t LHSv = LHSi->getValue(), RHSv = RHSi->getValue();
int64_t Result;
@@ -974,8 +982,8 @@ Init *BinOpInit::resolveReferences(Record &R, const RecordVal *RV) const {
Init *rhs = RHS->resolveReferences(R, RV);
if (LHS != lhs || RHS != rhs)
- return (BinOpInit::get(getOpcode(), lhs, rhs, getType()))->Fold(&R, 0);
- return Fold(&R, 0);
+ return (BinOpInit::get(getOpcode(), lhs, rhs, getType()))->Fold(&R,nullptr);
+ return Fold(&R, nullptr);
}
std::string BinOpInit::getAsString() const {
@@ -987,6 +995,7 @@ std::string BinOpInit::getAsString() const {
case SRA: Result = "!sra"; break;
case SRL: Result = "!srl"; break;
case EQ: Result = "!eq"; break;
+ case LISTCONCAT: Result = "!listconcat"; break;
case STRCONCAT: Result = "!strconcat"; break;
}
return Result + "(" + LHS->getAsString() + ", " + RHS->getAsString() + ")";
@@ -1031,11 +1040,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg,
if (TArg && TArg->getType()->getAsString() == "dag") {
Init *Result = ForeachHelper(LHS, Arg, RHSo, Type,
CurRec, CurMultiClass);
- if (Result != 0) {
- return Result;
- } else {
- return 0;
- }
+ return Result;
}
for (int i = 0; i < RHSo->getNumOperands(); ++i) {
@@ -1044,7 +1049,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg,
if (RHSoo) {
Init *Result = EvaluateOperation(RHSoo, LHS, Arg,
Type, CurRec, CurMultiClass);
- if (Result != 0) {
+ if (Result) {
NewOperands.push_back(Result);
} else {
NewOperands.push_back(Arg);
@@ -1059,10 +1064,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg,
// Now run the operator and use its result as the new leaf
const OpInit *NewOp = RHSo->clone(NewOperands);
Init *NewVal = NewOp->Fold(CurRec, CurMultiClass);
- if (NewVal != NewOp)
- return NewVal;
-
- return 0;
+ return (NewVal != NewOp) ? NewVal : nullptr;
}
static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
@@ -1086,7 +1088,7 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
Init *Val = MHSd->getOperator();
Init *Result = EvaluateOperation(RHSo, LHS, Val,
Type, CurRec, CurMultiClass);
- if (Result != 0) {
+ if (Result) {
Val = Result;
}
@@ -1100,7 +1102,7 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
// Process args
Init *Result = EvaluateOperation(RHSo, LHS, Arg, Type,
CurRec, CurMultiClass);
- if (Result != 0) {
+ if (Result) {
Arg = Result;
}
@@ -1138,7 +1140,7 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
return ListInit::get(NewList, MHSl->getType());
}
}
- return 0;
+ return nullptr;
}
Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
@@ -1195,7 +1197,7 @@ Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
case FOREACH: {
Init *Result = ForeachHelper(LHS, MHS, RHS, getType(),
CurRec, CurMultiClass);
- if (Result != 0) {
+ if (Result) {
return Result;
}
break;
@@ -1227,16 +1229,16 @@ Init *TernOpInit::resolveReferences(Record &R,
IntInit *Value = dyn_cast<IntInit>(lhs);
if (Init *I = lhs->convertInitializerTo(IntRecTy::get()))
Value = dyn_cast<IntInit>(I);
- if (Value != 0) {
+ if (Value) {
// Short-circuit
if (Value->getValue()) {
Init *mhs = MHS->resolveReferences(R, RV);
return (TernOpInit::get(getOpcode(), lhs, mhs,
- RHS, getType()))->Fold(&R, 0);
+ RHS, getType()))->Fold(&R, nullptr);
} else {
Init *rhs = RHS->resolveReferences(R, RV);
return (TernOpInit::get(getOpcode(), lhs, MHS,
- rhs, getType()))->Fold(&R, 0);
+ rhs, getType()))->Fold(&R, nullptr);
}
}
}
@@ -1246,8 +1248,8 @@ Init *TernOpInit::resolveReferences(Record &R,
if (LHS != lhs || MHS != mhs || RHS != rhs)
return (TernOpInit::get(getOpcode(), lhs, mhs, rhs,
- getType()))->Fold(&R, 0);
- return Fold(&R, 0);
+ getType()))->Fold(&R, nullptr);
+ return Fold(&R, nullptr);
}
std::string TernOpInit::getAsString() const {
@@ -1265,19 +1267,19 @@ RecTy *TypedInit::getFieldType(const std::string &FieldName) const {
if (RecordRecTy *RecordType = dyn_cast<RecordRecTy>(getType()))
if (RecordVal *Field = RecordType->getRecord()->getValue(FieldName))
return Field->getType();
- return 0;
+ return nullptr;
}
Init *
TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
BitsRecTy *T = dyn_cast<BitsRecTy>(getType());
- if (T == 0) return 0; // Cannot subscript a non-bits variable.
+ if (!T) return nullptr; // Cannot subscript a non-bits variable.
unsigned NumBits = T->getNumBits();
SmallVector<Init *, 16> NewBits(Bits.size());
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Bits[i] >= NumBits)
- return 0;
+ return nullptr;
NewBits[i] = VarBitInit::get(const_cast<TypedInit *>(this), Bits[i]);
}
@@ -1287,7 +1289,7 @@ TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
Init *
TypedInit::convertInitListSlice(const std::vector<unsigned> &Elements) const {
ListRecTy *T = dyn_cast<ListRecTy>(getType());
- if (T == 0) return 0; // Cannot subscript a non-list variable.
+ if (!T) return nullptr; // Cannot subscript a non-list variable.
if (Elements.size() == 1)
return VarListElementInit::get(const_cast<TypedInit *>(this), Elements[0]);
@@ -1332,8 +1334,8 @@ Init *VarInit::getBit(unsigned Bit) const {
Init *VarInit::resolveListElementReference(Record &R,
const RecordVal *IRV,
unsigned Elt) const {
- if (R.isTemplateArg(getNameInit())) return 0;
- if (IRV && IRV->getNameInit() != getNameInit()) return 0;
+ if (R.isTemplateArg(getNameInit())) return nullptr;
+ if (IRV && IRV->getNameInit() != getNameInit()) return nullptr;
RecordVal *RV = R.getValue(getNameInit());
assert(RV && "Reference to a non-existent variable?");
@@ -1345,14 +1347,14 @@ Init *VarInit::resolveListElementReference(Record &R,
}
if (Elt >= LI->getSize())
- return 0; // Out of range reference.
+ return nullptr; // Out of range reference.
Init *E = LI->getElement(Elt);
// If the element is set to some value, or if we are resolving a reference
// to a specific variable and that variable is explicitly unset, then
// replace the VarListElementInit with it.
if (IRV || !isa<UnsetInit>(E))
return E;
- return 0;
+ return nullptr;
}
@@ -1360,7 +1362,7 @@ RecTy *VarInit::getFieldType(const std::string &FieldName) const {
if (RecordRecTy *RTy = dyn_cast<RecordRecTy>(getType()))
if (const RecordVal *RV = RTy->getRecord()->getValue(FieldName))
return RV->getType();
- return 0;
+ return nullptr;
}
Init *VarInit::getFieldInit(Record &R, const RecordVal *RV,
@@ -1368,15 +1370,15 @@ Init *VarInit::getFieldInit(Record &R, const RecordVal *RV,
if (isa<RecordRecTy>(getType()))
if (const RecordVal *Val = R.getValue(VarName)) {
if (RV != Val && (RV || isa<UnsetInit>(Val->getValue())))
- return 0;
+ return nullptr;
Init *TheInit = Val->getValue();
assert(TheInit != this && "Infinite loop detected!");
if (Init *I = TheInit->getFieldInit(R, RV, FieldName))
return I;
else
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
/// resolveReferences - This method is used by classes that refer to other
@@ -1386,7 +1388,7 @@ Init *VarInit::getFieldInit(Record &R, const RecordVal *RV,
///
Init *VarInit::resolveReferences(Record &R, const RecordVal *RV) const {
if (RecordVal *Val = R.getValue(VarName))
- if (RV == Val || (RV == 0 && !isa<UnsetInit>(Val->getValue())))
+ if (RV == Val || (!RV && !isa<UnsetInit>(Val->getValue())))
return Val->getValue();
return const_cast<VarInit *>(this);
}
@@ -1462,7 +1464,7 @@ Init *VarListElementInit:: resolveListElementReference(Record &R,
return Result;
}
- return 0;
+ return nullptr;
}
DefInit *DefInit::get(Record *R) {
@@ -1472,7 +1474,7 @@ DefInit *DefInit::get(Record *R) {
RecTy *DefInit::getFieldType(const std::string &FieldName) const {
if (const RecordVal *RV = Def->getValue(FieldName))
return RV->getType();
- return 0;
+ return nullptr;
}
Init *DefInit::getFieldInit(Record &R, const RecordVal *RV,
@@ -1507,7 +1509,7 @@ Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const {
if (Init *ListVal = Rec->getFieldInit(R, RV, FieldName))
if (ListInit *LI = dyn_cast<ListInit>(ListVal)) {
- if (Elt >= LI->getSize()) return 0;
+ if (Elt >= LI->getSize()) return nullptr;
Init *E = LI->getElement(Elt);
// If the element is set to some value, or if we are resolving a
@@ -1516,7 +1518,7 @@ Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV,
if (RV || !isa<UnsetInit>(E))
return E;
}
- return 0;
+ return nullptr;
}
Init *FieldInit::resolveReferences(Record &R, const RecordVal *RV) const {
@@ -1560,7 +1562,7 @@ DagInit::get(Init *V, const std::string &VN,
FoldingSetNodeID ID;
ProfileDagInit(ID, V, VN, ArgRange, NameRange);
- void *IP = 0;
+ void *IP = nullptr;
if (DagInit *I = ThePool.FindNodeOrInsertPos(ID, IP))
return I;
@@ -1784,9 +1786,9 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) {
///
Init *Record::getValueInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
return R->getValue();
}
@@ -1797,14 +1799,14 @@ Init *Record::getValueInit(StringRef FieldName) const {
///
std::string Record::getValueAsString(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (StringInit *SI = dyn_cast<StringInit>(R->getValue()))
return SI->getValue();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a string initializer!");
+ FieldName + "' does not have a string initializer!");
}
/// getValueAsBitsInit - This method looks up the specified field and returns
@@ -1813,14 +1815,14 @@ std::string Record::getValueAsString(StringRef FieldName) const {
///
BitsInit *Record::getValueAsBitsInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (BitsInit *BI = dyn_cast<BitsInit>(R->getValue()))
return BI;
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a BitsInit initializer!");
+ FieldName + "' does not have a BitsInit initializer!");
}
/// getValueAsListInit - This method looks up the specified field and returns
@@ -1829,14 +1831,14 @@ BitsInit *Record::getValueAsBitsInit(StringRef FieldName) const {
///
ListInit *Record::getValueAsListInit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (ListInit *LI = dyn_cast<ListInit>(R->getValue()))
return LI;
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a list initializer!");
+ FieldName + "' does not have a list initializer!");
}
/// getValueAsListOfDefs - This method looks up the specified field and returns
@@ -1852,7 +1854,7 @@ Record::getValueAsListOfDefs(StringRef FieldName) const {
Defs.push_back(DI->getDef());
} else {
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' list is not entirely DefInit!");
+ FieldName + "' list is not entirely DefInit!");
}
}
return Defs;
@@ -1864,14 +1866,14 @@ Record::getValueAsListOfDefs(StringRef FieldName) const {
///
int64_t Record::getValueAsInt(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (IntInit *II = dyn_cast<IntInit>(R->getValue()))
return II->getValue();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have an int initializer!");
+ FieldName + "' does not have an int initializer!");
}
/// getValueAsListOfInts - This method looks up the specified field and returns
@@ -1887,7 +1889,7 @@ Record::getValueAsListOfInts(StringRef FieldName) const {
Ints.push_back(II->getValue());
} else {
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a list of ints initializer!");
+ FieldName + "' does not have a list of ints initializer!");
}
}
return Ints;
@@ -1906,7 +1908,7 @@ Record::getValueAsListOfStrings(StringRef FieldName) const {
Strings.push_back(II->getValue());
} else {
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a list of strings initializer!");
+ FieldName + "' does not have a list of strings initializer!");
}
}
return Strings;
@@ -1918,14 +1920,14 @@ Record::getValueAsListOfStrings(StringRef FieldName) const {
///
Record *Record::getValueAsDef(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (DefInit *DI = dyn_cast<DefInit>(R->getValue()))
return DI->getDef();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a def initializer!");
+ FieldName + "' does not have a def initializer!");
}
/// getValueAsBit - This method looks up the specified field and returns its
@@ -1934,19 +1936,19 @@ Record *Record::getValueAsDef(StringRef FieldName) const {
///
bool Record::getValueAsBit(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (BitInit *BI = dyn_cast<BitInit>(R->getValue()))
return BI->getValue();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a bit initializer!");
+ FieldName + "' does not have a bit initializer!");
}
bool Record::getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
"' does not have a field named `" + FieldName.str() + "'!\n");
@@ -1958,7 +1960,7 @@ bool Record::getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const {
if (BitInit *BI = dyn_cast<BitInit>(R->getValue()))
return BI->getValue();
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a bit initializer!");
+ FieldName + "' does not have a bit initializer!");
}
/// getValueAsDag - This method looks up the specified field and returns its
@@ -1967,14 +1969,14 @@ bool Record::getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const {
///
DagInit *Record::getValueAsDag(StringRef FieldName) const {
const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
+ if (!R || !R->getValue())
PrintFatalError(getLoc(), "Record `" + getName() +
- "' does not have a field named `" + FieldName.str() + "'!\n");
+ "' does not have a field named `" + FieldName + "'!\n");
if (DagInit *DI = dyn_cast<DagInit>(R->getValue()))
return DI;
PrintFatalError(getLoc(), "Record `" + getName() + "', field `" +
- FieldName.str() + "' does not have a dag initializer!");
+ FieldName + "' does not have a dag initializer!");
}
diff --git a/contrib/llvm/lib/TableGen/SetTheory.cpp b/contrib/llvm/lib/TableGen/SetTheory.cpp
new file mode 100644
index 0000000..c99c2ba
--- /dev/null
+++ b/contrib/llvm/lib/TableGen/SetTheory.cpp
@@ -0,0 +1,323 @@
+//===- SetTheory.cpp - Generate ordered sets from DAG expressions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SetTheory class that computes ordered sets of
+// Records from DAG expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/Format.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/SetTheory.h"
+
+using namespace llvm;
+
+// Define the standard operators.
+namespace {
+
+typedef SetTheory::RecSet RecSet;
+typedef SetTheory::RecVec RecVec;
+
+// (add a, b, ...) Evaluate and union all arguments.
+struct AddOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
+ }
+};
+
+// (sub Add, Sub, ...) Set difference.
+struct SubOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ if (Expr->arg_size() < 2)
+ PrintFatalError(Loc, "Set difference needs at least two arguments: " +
+ Expr->getAsString());
+ RecSet Add, Sub;
+ ST.evaluate(*Expr->arg_begin(), Add, Loc);
+ ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub, Loc);
+ for (RecSet::iterator I = Add.begin(), E = Add.end(); I != E; ++I)
+ if (!Sub.count(*I))
+ Elts.insert(*I);
+ }
+};
+
+// (and S1, S2) Set intersection.
+struct AndOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ if (Expr->arg_size() != 2)
+ PrintFatalError(Loc, "Set intersection requires two arguments: " +
+ Expr->getAsString());
+ RecSet S1, S2;
+ ST.evaluate(Expr->arg_begin()[0], S1, Loc);
+ ST.evaluate(Expr->arg_begin()[1], S2, Loc);
+ for (RecSet::iterator I = S1.begin(), E = S1.end(); I != E; ++I)
+ if (S2.count(*I))
+ Elts.insert(*I);
+ }
+};
+
+// SetIntBinOp - Abstract base class for (Op S, N) operators.
+struct SetIntBinOp : public SetTheory::Operator {
+ virtual void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N,
+ RecSet &Elts, ArrayRef<SMLoc> Loc) = 0;
+
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ if (Expr->arg_size() != 2)
+ PrintFatalError(Loc, "Operator requires (Op Set, Int) arguments: " +
+ Expr->getAsString());
+ RecSet Set;
+ ST.evaluate(Expr->arg_begin()[0], Set, Loc);
+ IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[1]);
+ if (!II)
+ PrintFatalError(Loc, "Second argument must be an integer: " +
+ Expr->getAsString());
+ apply2(ST, Expr, Set, II->getValue(), Elts, Loc);
+ }
+};
+
+// (shl S, N) Shift left, remove the first N elements.
+struct ShlOp : public SetIntBinOp {
+ void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N,
+ RecSet &Elts, ArrayRef<SMLoc> Loc) override {
+ if (N < 0)
+ PrintFatalError(Loc, "Positive shift required: " +
+ Expr->getAsString());
+ if (unsigned(N) < Set.size())
+ Elts.insert(Set.begin() + N, Set.end());
+ }
+};
+
+// (trunc S, N) Truncate after the first N elements.
+struct TruncOp : public SetIntBinOp {
+ void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N,
+ RecSet &Elts, ArrayRef<SMLoc> Loc) override {
+ if (N < 0)
+ PrintFatalError(Loc, "Positive length required: " +
+ Expr->getAsString());
+ if (unsigned(N) > Set.size())
+ N = Set.size();
+ Elts.insert(Set.begin(), Set.begin() + N);
+ }
+};
+
+// Left/right rotation.
+struct RotOp : public SetIntBinOp {
+ const bool Reverse;
+
+ RotOp(bool Rev) : Reverse(Rev) {}
+
+ void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N,
+ RecSet &Elts, ArrayRef<SMLoc> Loc) override {
+ if (Reverse)
+ N = -N;
+ // N > 0 -> rotate left, N < 0 -> rotate right.
+ if (Set.empty())
+ return;
+ if (N < 0)
+ N = Set.size() - (-N % Set.size());
+ else
+ N %= Set.size();
+ Elts.insert(Set.begin() + N, Set.end());
+ Elts.insert(Set.begin(), Set.begin() + N);
+ }
+};
+
+// (decimate S, N) Pick every N'th element of S.
+struct DecimateOp : public SetIntBinOp {
+ void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N,
+ RecSet &Elts, ArrayRef<SMLoc> Loc) override {
+ if (N <= 0)
+ PrintFatalError(Loc, "Positive stride required: " +
+ Expr->getAsString());
+ for (unsigned I = 0; I < Set.size(); I += N)
+ Elts.insert(Set[I]);
+ }
+};
+
+// (interleave S1, S2, ...) Interleave elements of the arguments.
+struct InterleaveOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ // Evaluate the arguments individually.
+ SmallVector<RecSet, 4> Args(Expr->getNumArgs());
+ unsigned MaxSize = 0;
+ for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i) {
+ ST.evaluate(Expr->getArg(i), Args[i], Loc);
+ MaxSize = std::max(MaxSize, unsigned(Args[i].size()));
+ }
+ // Interleave arguments into Elts.
+ for (unsigned n = 0; n != MaxSize; ++n)
+ for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i)
+ if (n < Args[i].size())
+ Elts.insert(Args[i][n]);
+ }
+};
+
+// (sequence "Format", From, To) Generate a sequence of records by name.
+struct SequenceOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ int Step = 1;
+ if (Expr->arg_size() > 4)
+ PrintFatalError(Loc, "Bad args to (sequence \"Format\", From, To): " +
+ Expr->getAsString());
+ else if (Expr->arg_size() == 4) {
+ if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[3])) {
+ Step = II->getValue();
+ } else
+ PrintFatalError(Loc, "Stride must be an integer: " +
+ Expr->getAsString());
+ }
+
+ std::string Format;
+ if (StringInit *SI = dyn_cast<StringInit>(Expr->arg_begin()[0]))
+ Format = SI->getValue();
+ else
+ PrintFatalError(Loc, "Format must be a string: " + Expr->getAsString());
+
+ int64_t From, To;
+ if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[1]))
+ From = II->getValue();
+ else
+ PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString());
+ if (From < 0 || From >= (1 << 30))
+ PrintFatalError(Loc, "From out of range");
+
+ if (IntInit *II = dyn_cast<IntInit>(Expr->arg_begin()[2]))
+ To = II->getValue();
+ else
+ PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString());
+ if (To < 0 || To >= (1 << 30))
+ PrintFatalError(Loc, "To out of range");
+
+ RecordKeeper &Records =
+ cast<DefInit>(Expr->getOperator())->getDef()->getRecords();
+
+ Step *= From <= To ? 1 : -1;
+ while (true) {
+ if (Step > 0 && From > To)
+ break;
+ else if (Step < 0 && From < To)
+ break;
+ std::string Name;
+ raw_string_ostream OS(Name);
+ OS << format(Format.c_str(), unsigned(From));
+ Record *Rec = Records.getDef(OS.str());
+ if (!Rec)
+ PrintFatalError(Loc, "No def named '" + Name + "': " +
+ Expr->getAsString());
+ // Try to reevaluate Rec in case it is a set.
+ if (const RecVec *Result = ST.expand(Rec))
+ Elts.insert(Result->begin(), Result->end());
+ else
+ Elts.insert(Rec);
+
+ From += Step;
+ }
+ }
+};
+
+// Expand a Def into a set by evaluating one of its fields.
+struct FieldExpander : public SetTheory::Expander {
+ StringRef FieldName;
+
+ FieldExpander(StringRef fn) : FieldName(fn) {}
+
+ void expand(SetTheory &ST, Record *Def, RecSet &Elts) override {
+ ST.evaluate(Def->getValueInit(FieldName), Elts, Def->getLoc());
+ }
+};
+} // end anonymous namespace
+
+// Pin the vtables to this file.
+void SetTheory::Operator::anchor() {}
+void SetTheory::Expander::anchor() {}
+
+
+SetTheory::SetTheory() {
+ addOperator("add", new AddOp);
+ addOperator("sub", new SubOp);
+ addOperator("and", new AndOp);
+ addOperator("shl", new ShlOp);
+ addOperator("trunc", new TruncOp);
+ addOperator("rotl", new RotOp(false));
+ addOperator("rotr", new RotOp(true));
+ addOperator("decimate", new DecimateOp);
+ addOperator("interleave", new InterleaveOp);
+ addOperator("sequence", new SequenceOp);
+}
+
+void SetTheory::addOperator(StringRef Name, Operator *Op) {
+ Operators[Name] = Op;
+}
+
+void SetTheory::addExpander(StringRef ClassName, Expander *E) {
+ Expanders[ClassName] = E;
+}
+
+void SetTheory::addFieldExpander(StringRef ClassName, StringRef FieldName) {
+ addExpander(ClassName, new FieldExpander(FieldName));
+}
+
+void SetTheory::evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc) {
+ // A def in a list can be a just an element, or it may expand.
+ if (DefInit *Def = dyn_cast<DefInit>(Expr)) {
+ if (const RecVec *Result = expand(Def->getDef()))
+ return Elts.insert(Result->begin(), Result->end());
+ Elts.insert(Def->getDef());
+ return;
+ }
+
+ // Lists simply expand.
+ if (ListInit *LI = dyn_cast<ListInit>(Expr))
+ return evaluate(LI->begin(), LI->end(), Elts, Loc);
+
+ // Anything else must be a DAG.
+ DagInit *DagExpr = dyn_cast<DagInit>(Expr);
+ if (!DagExpr)
+ PrintFatalError(Loc, "Invalid set element: " + Expr->getAsString());
+ DefInit *OpInit = dyn_cast<DefInit>(DagExpr->getOperator());
+ if (!OpInit)
+ PrintFatalError(Loc, "Bad set expression: " + Expr->getAsString());
+ Operator *Op = Operators.lookup(OpInit->getDef()->getName());
+ if (!Op)
+ PrintFatalError(Loc, "Unknown set operator: " + Expr->getAsString());
+ Op->apply(*this, DagExpr, Elts, Loc);
+}
+
+const RecVec *SetTheory::expand(Record *Set) {
+ // Check existing entries for Set and return early.
+ ExpandMap::iterator I = Expansions.find(Set);
+ if (I != Expansions.end())
+ return &I->second;
+
+ // This is the first time we see Set. Find a suitable expander.
+ const std::vector<Record*> &SC = Set->getSuperClasses();
+ for (unsigned i = 0, e = SC.size(); i != e; ++i) {
+ // Skip unnamed superclasses.
+ if (!dyn_cast<StringInit>(SC[i]->getNameInit()))
+ continue;
+ if (Expander *Exp = Expanders.lookup(SC[i]->getName())) {
+ // This breaks recursive definitions.
+ RecVec &EltVec = Expansions[Set];
+ RecSet Elts;
+ Exp->expand(*this, Set, Elts);
+ EltVec.assign(Elts.begin(), Elts.end());
+ return &EltVec;
+ }
+ }
+
+ // Set is not expandable.
+ return nullptr;
+}
+
diff --git a/contrib/llvm/lib/TableGen/TGLexer.cpp b/contrib/llvm/lib/TableGen/TGLexer.cpp
index c6be4f8..fc1d3ca 100644
--- a/contrib/llvm/lib/TableGen/TGLexer.cpp
+++ b/contrib/llvm/lib/TableGen/TGLexer.cpp
@@ -27,10 +27,10 @@
using namespace llvm;
TGLexer::TGLexer(SourceMgr &SM) : SrcMgr(SM) {
- CurBuffer = 0;
- CurBuf = SrcMgr.getMemoryBuffer(CurBuffer);
- CurPtr = CurBuf->getBufferStart();
- TokStart = 0;
+ CurBuffer = SrcMgr.getMainFileID();
+ CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
+ CurPtr = CurBuf.begin();
+ TokStart = nullptr;
}
SMLoc TGLexer::getLoc() const {
@@ -52,7 +52,7 @@ int TGLexer::getNextChar() {
case 0: {
// A nul character in the stream is either the end of the current buffer or
// a random nul in the file. Disambiguate that here.
- if (CurPtr-1 != CurBuf->getBufferEnd())
+ if (CurPtr-1 != CurBuf.end())
return 0; // Just whitespace.
// If this is the end of an included file, pop the parent file off the
@@ -60,7 +60,7 @@ int TGLexer::getNextChar() {
SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer);
if (ParentIncludeLoc != SMLoc()) {
CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc);
- CurBuf = SrcMgr.getMemoryBuffer(CurBuffer);
+ CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
CurPtr = ParentIncludeLoc.getPointer();
return getNextChar();
}
@@ -187,7 +187,7 @@ tgtok::TokKind TGLexer::LexString() {
while (*CurPtr != '"') {
// If we hit the end of the buffer, report an error.
- if (*CurPtr == 0 && CurPtr == CurBuf->getBufferEnd())
+ if (*CurPtr == 0 && CurPtr == CurBuf.end())
return ReturnError(StrStart, "End of file in string literal");
if (*CurPtr == '\n' || *CurPtr == '\r')
@@ -220,7 +220,7 @@ tgtok::TokKind TGLexer::LexString() {
// If we hit the end of the buffer, report an error.
case '\0':
- if (CurPtr == CurBuf->getBufferEnd())
+ if (CurPtr == CurBuf.end())
return ReturnError(StrStart, "End of file in string literal");
// FALL THROUGH
default:
@@ -304,7 +304,7 @@ bool TGLexer::LexInclude() {
CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr),
IncludedFile);
- if (CurBuffer == -1) {
+ if (!CurBuffer) {
PrintError(getLoc(), "Could not find include file '" + Filename + "'");
return true;
}
@@ -319,8 +319,8 @@ bool TGLexer::LexInclude() {
}
Dependencies.insert(std::make_pair(IncludedFile, getLoc()));
// Save the line number and lex buffer of the includer.
- CurBuf = SrcMgr.getMemoryBuffer(CurBuffer);
- CurPtr = CurBuf->getBufferStart();
+ CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer();
+ CurPtr = CurBuf.begin();
return false;
}
@@ -333,7 +333,7 @@ void TGLexer::SkipBCPLComment() {
return; // Newline is end of comment.
case 0:
// If this is the end of the buffer, end the comment.
- if (CurPtr == CurBuf->getBufferEnd())
+ if (CurPtr == CurBuf.end())
return;
break;
}
@@ -389,12 +389,12 @@ tgtok::TokKind TGLexer::LexNumber() {
return ReturnError(TokStart, "Invalid hexadecimal number");
errno = 0;
- CurIntVal = strtoll(NumStart, 0, 16);
+ CurIntVal = strtoll(NumStart, nullptr, 16);
if (errno == EINVAL)
return ReturnError(TokStart, "Invalid hexadecimal number");
if (errno == ERANGE) {
errno = 0;
- CurIntVal = (int64_t)strtoull(NumStart, 0, 16);
+ CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16);
if (errno == EINVAL)
return ReturnError(TokStart, "Invalid hexadecimal number");
if (errno == ERANGE)
@@ -410,7 +410,7 @@ tgtok::TokKind TGLexer::LexNumber() {
// Requires at least one binary digit.
if (CurPtr == NumStart)
return ReturnError(CurPtr-2, "Invalid binary number");
- CurIntVal = strtoll(NumStart, 0, 2);
+ CurIntVal = strtoll(NumStart, nullptr, 2);
return tgtok::IntVal;
}
}
@@ -425,7 +425,7 @@ tgtok::TokKind TGLexer::LexNumber() {
while (isdigit(CurPtr[0]))
++CurPtr;
- CurIntVal = strtoll(TokStart, 0, 10);
+ CurIntVal = strtoll(TokStart, nullptr, 10);
return tgtok::IntVal;
}
@@ -478,6 +478,7 @@ tgtok::TokKind TGLexer::LexExclaim() {
.Case("empty", tgtok::XEmpty)
.Case("subst", tgtok::XSubst)
.Case("foreach", tgtok::XForEach)
+ .Case("listconcat", tgtok::XListConcat)
.Case("strconcat", tgtok::XStrConcat)
.Default(tgtok::Error);
diff --git a/contrib/llvm/lib/TableGen/TGLexer.h b/contrib/llvm/lib/TableGen/TGLexer.h
index d1bd70d..a2c95ca 100644
--- a/contrib/llvm/lib/TableGen/TGLexer.h
+++ b/contrib/llvm/lib/TableGen/TGLexer.h
@@ -14,6 +14,7 @@
#ifndef TGLEXER_H
#define TGLEXER_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/SMLoc.h"
#include <cassert>
@@ -21,7 +22,6 @@
#include <string>
namespace llvm {
-class MemoryBuffer;
class SourceMgr;
class SMLoc;
class Twine;
@@ -47,7 +47,7 @@ namespace tgtok {
MultiClass, String,
// !keywords.
- XConcat, XADD, XSRA, XSRL, XSHL, XStrConcat, XCast, XSubst,
+ XConcat, XADD, XSRA, XSRL, XSHL, XListConcat, XStrConcat, XCast, XSubst,
XForEach, XHead, XTail, XEmpty, XIf, XEq,
// Integer value.
@@ -63,7 +63,7 @@ class TGLexer {
SourceMgr &SrcMgr;
const char *CurPtr;
- const MemoryBuffer *CurBuf;
+ StringRef CurBuf;
// Information about the current token.
const char *TokStart;
@@ -73,7 +73,7 @@ class TGLexer {
/// CurBuffer - This is the current buffer index we're lexing from as managed
/// by the SourceMgr object.
- int CurBuffer;
+ unsigned CurBuffer;
public:
typedef std::map<std::string, SMLoc> DependenciesMapTy;
diff --git a/contrib/llvm/lib/TableGen/TGParser.cpp b/contrib/llvm/lib/TableGen/TGParser.cpp
index daac574..0550692 100644
--- a/contrib/llvm/lib/TableGen/TGParser.cpp
+++ b/contrib/llvm/lib/TableGen/TGParser.cpp
@@ -29,18 +29,18 @@ struct SubClassReference {
SMRange RefRange;
Record *Rec;
std::vector<Init*> TemplateArgs;
- SubClassReference() : Rec(0) {}
+ SubClassReference() : Rec(nullptr) {}
- bool isInvalid() const { return Rec == 0; }
+ bool isInvalid() const { return Rec == nullptr; }
};
struct SubMultiClassReference {
SMRange RefRange;
MultiClass *MC;
std::vector<Init*> TemplateArgs;
- SubMultiClassReference() : MC(0) {}
+ SubMultiClassReference() : MC(nullptr) {}
- bool isInvalid() const { return MC == 0; }
+ bool isInvalid() const { return MC == nullptr; }
void dump() const;
};
@@ -61,7 +61,7 @@ void SubMultiClassReference::dump() const {
} // end namespace llvm
bool TGParser::AddValue(Record *CurRec, SMLoc Loc, const RecordVal &RV) {
- if (CurRec == 0)
+ if (!CurRec)
CurRec = &CurMultiClass->Rec;
if (RecordVal *ERV = CurRec->getValue(RV.getNameInit())) {
@@ -83,10 +83,10 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
const std::vector<unsigned> &BitList, Init *V) {
if (!V) return false;
- if (CurRec == 0) CurRec = &CurMultiClass->Rec;
+ if (!CurRec) CurRec = &CurMultiClass->Rec;
RecordVal *RV = CurRec->getValue(ValName);
- if (RV == 0)
+ if (!RV)
return Error(Loc, "Value '" + ValName->getAsUnquotedString()
+ "' unknown!");
@@ -103,19 +103,19 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
//
if (!BitList.empty()) {
BitsInit *CurVal = dyn_cast<BitsInit>(RV->getValue());
- if (CurVal == 0)
+ if (!CurVal)
return Error(Loc, "Value '" + ValName->getAsUnquotedString()
+ "' is not a bits type");
// Convert the incoming value to a bits type of the appropriate size...
Init *BI = V->convertInitializerTo(BitsRecTy::get(BitList.size()));
- if (BI == 0) {
+ if (!BI) {
return Error(Loc, "Initializer is not compatible with bit range");
}
// We should have a BitsInit type now.
BitsInit *BInit = dyn_cast<BitsInit>(BI);
- assert(BInit != 0);
+ assert(BInit != nullptr);
SmallVector<Init *, 16> NewBits(CurVal->getNumBits());
@@ -129,7 +129,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
}
for (unsigned i = 0, e = CurVal->getNumBits(); i != e; ++i)
- if (NewBits[i] == 0)
+ if (!NewBits[i])
NewBits[i] = CurVal->getBit(i);
V = BitsInit::get(NewBits);
@@ -314,14 +314,14 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){
assert(IterVals.size() < Loops.size());
ForeachLoop &CurLoop = Loops[IterVals.size()];
ListInit *List = dyn_cast<ListInit>(CurLoop.ListValue);
- if (List == 0) {
+ if (!List) {
Error(Loc, "Loop list is not a list");
return true;
}
// Process each value.
for (int64_t i = 0; i < List->getSize(); ++i) {
- Init *ItemVal = List->resolveListElementReference(*CurRec, 0, i);
+ Init *ItemVal = List->resolveListElementReference(*CurRec, nullptr, i);
IterVals.push_back(IterRecord(CurLoop.IterVar, ItemVal));
if (ProcessForeachDefs(CurRec, Loc, IterVals))
return true;
@@ -339,7 +339,7 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){
for (unsigned i = 0, e = IterVals.size(); i != e; ++i) {
VarInit *IterVar = IterVals[i].IterVar;
TypedInit *IVal = dyn_cast<TypedInit>(IterVals[i].IterValue);
- if (IVal == 0) {
+ if (!IVal) {
Error(Loc, "foreach iterator value is untyped");
return true;
}
@@ -360,8 +360,13 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){
}
if (Records.getDef(IterRec->getNameInitAsString())) {
- Error(Loc, "def already exists: " + IterRec->getNameInitAsString());
- return true;
+ // If this record is anonymous, it's no problem, just generate a new name
+ if (IterRec->isAnonymous())
+ IterRec->setName(GetNewAnonymousName());
+ else {
+ Error(Loc, "def already exists: " + IterRec->getNameInitAsString());
+ return true;
+ }
}
Records.addDef(IterRec);
@@ -380,10 +385,11 @@ static bool isObjectStart(tgtok::TokKind K) {
K == tgtok::MultiClass || K == tgtok::Foreach;
}
-static std::string GetNewAnonymousName() {
- static unsigned AnonCounter = 0;
+/// GetNewAnonymousName - Generate a unique anonymous name that can be used as
+/// an identifier.
+std::string TGParser::GetNewAnonymousName() {
unsigned Tmp = AnonCounter++; // MSVC2012 ICEs without this.
- return "anonymous." + utostr(Tmp);
+ return "anonymous_" + utostr(Tmp);
}
/// ParseObjectName - If an object name is specified, return it. Otherwise,
@@ -399,21 +405,21 @@ Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
// These are all of the tokens that can begin an object body.
// Some of these can also begin values but we disallow those cases
// because they are unlikely to be useful.
- return 0;
+ return nullptr;
default:
break;
}
- Record *CurRec = 0;
+ Record *CurRec = nullptr;
if (CurMultiClass)
CurRec = &CurMultiClass->Rec;
- RecTy *Type = 0;
+ RecTy *Type = nullptr;
if (CurRec) {
const TypedInit *CurRecName = dyn_cast<TypedInit>(CurRec->getNameInit());
if (!CurRecName) {
TokError("Record name is not typed!");
- return 0;
+ return nullptr;
}
Type = CurRecName->getType();
}
@@ -429,11 +435,11 @@ Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
Record *TGParser::ParseClassID() {
if (Lex.getCode() != tgtok::Id) {
TokError("expected name for ClassID");
- return 0;
+ return nullptr;
}
Record *Result = Records.getClass(Lex.getCurStrVal());
- if (Result == 0)
+ if (!Result)
TokError("Couldn't find class '" + Lex.getCurStrVal() + "'");
Lex.Lex();
@@ -448,11 +454,11 @@ Record *TGParser::ParseClassID() {
MultiClass *TGParser::ParseMultiClassID() {
if (Lex.getCode() != tgtok::Id) {
TokError("expected name for MultiClassID");
- return 0;
+ return nullptr;
}
MultiClass *Result = MultiClasses[Lex.getCurStrVal()];
- if (Result == 0)
+ if (!Result)
TokError("Couldn't find multiclass '" + Lex.getCurStrVal() + "'");
Lex.Lex();
@@ -476,7 +482,7 @@ ParseSubClassReference(Record *CurRec, bool isDefm) {
} else {
Result.Rec = ParseClassID();
}
- if (Result.Rec == 0) return Result;
+ if (!Result.Rec) return Result;
// If there is no template arg list, we're done.
if (Lex.getCode() != tgtok::less) {
@@ -487,19 +493,19 @@ ParseSubClassReference(Record *CurRec, bool isDefm) {
if (Lex.getCode() == tgtok::greater) {
TokError("subclass reference requires a non-empty list of template values");
- Result.Rec = 0;
+ Result.Rec = nullptr;
return Result;
}
Result.TemplateArgs = ParseValueList(CurRec, Result.Rec);
if (Result.TemplateArgs.empty()) {
- Result.Rec = 0; // Error parsing value list.
+ Result.Rec = nullptr; // Error parsing value list.
return Result;
}
if (Lex.getCode() != tgtok::greater) {
TokError("expected '>' in template value list");
- Result.Rec = 0;
+ Result.Rec = nullptr;
return Result;
}
Lex.Lex();
@@ -521,7 +527,7 @@ ParseSubMultiClassReference(MultiClass *CurMC) {
Result.RefRange.Start = Lex.getLoc();
Result.MC = ParseMultiClassID();
- if (Result.MC == 0) return Result;
+ if (!Result.MC) return Result;
// If there is no template arg list, we're done.
if (Lex.getCode() != tgtok::less) {
@@ -532,19 +538,19 @@ ParseSubMultiClassReference(MultiClass *CurMC) {
if (Lex.getCode() == tgtok::greater) {
TokError("subclass reference requires a non-empty list of template values");
- Result.MC = 0;
+ Result.MC = nullptr;
return Result;
}
Result.TemplateArgs = ParseValueList(&CurMC->Rec, &Result.MC->Rec);
if (Result.TemplateArgs.empty()) {
- Result.MC = 0; // Error parsing value list.
+ Result.MC = nullptr; // Error parsing value list.
return Result;
}
if (Lex.getCode() != tgtok::greater) {
TokError("expected '>' in template value list");
- Result.MC = 0;
+ Result.MC = nullptr;
return Result;
}
Lex.Lex();
@@ -676,7 +682,7 @@ bool TGParser::ParseOptionalBitList(std::vector<unsigned> &Ranges) {
///
RecTy *TGParser::ParseType() {
switch (Lex.getCode()) {
- default: TokError("Unknown token when expecting a type"); return 0;
+ default: TokError("Unknown token when expecting a type"); return nullptr;
case tgtok::String: Lex.Lex(); return StringRecTy::get();
case tgtok::Code: Lex.Lex(); return StringRecTy::get();
case tgtok::Bit: Lex.Lex(); return BitRecTy::get();
@@ -684,20 +690,20 @@ RecTy *TGParser::ParseType() {
case tgtok::Dag: Lex.Lex(); return DagRecTy::get();
case tgtok::Id:
if (Record *R = ParseClassID()) return RecordRecTy::get(R);
- return 0;
+ return nullptr;
case tgtok::Bits: {
if (Lex.Lex() != tgtok::less) { // Eat 'bits'
TokError("expected '<' after bits type");
- return 0;
+ return nullptr;
}
if (Lex.Lex() != tgtok::IntVal) { // Eat '<'
TokError("expected integer in bits<n> type");
- return 0;
+ return nullptr;
}
uint64_t Val = Lex.getCurIntVal();
if (Lex.Lex() != tgtok::greater) { // Eat count.
TokError("expected '>' at end of bits<n> type");
- return 0;
+ return nullptr;
}
Lex.Lex(); // Eat '>'
return BitsRecTy::get(Val);
@@ -705,15 +711,15 @@ RecTy *TGParser::ParseType() {
case tgtok::List: {
if (Lex.Lex() != tgtok::less) { // Eat 'bits'
TokError("expected '<' after list type");
- return 0;
+ return nullptr;
}
Lex.Lex(); // Eat '<'
RecTy *SubType = ParseType();
- if (SubType == 0) return 0;
+ if (!SubType) return nullptr;
if (Lex.getCode() != tgtok::greater) {
TokError("expected '>' at end of list<ty> type");
- return 0;
+ return nullptr;
}
Lex.Lex(); // Eat '>'
return ListRecTy::get(SubType);
@@ -721,22 +727,6 @@ RecTy *TGParser::ParseType() {
}
}
-/// ParseIDValue - Parse an ID as a value and decode what it means.
-///
-/// IDValue ::= ID [def local value]
-/// IDValue ::= ID [def template arg]
-/// IDValue ::= ID [multiclass local value]
-/// IDValue ::= ID [multiclass template argument]
-/// IDValue ::= ID [def name]
-///
-Init *TGParser::ParseIDValue(Record *CurRec, IDParseMode Mode) {
- assert(Lex.getCode() == tgtok::Id && "Expected ID in ParseIDValue");
- std::string Name = Lex.getCurStrVal();
- SMLoc Loc = Lex.getLoc();
- Lex.Lex();
- return ParseIDValue(CurRec, Name, Loc);
-}
-
/// ParseIDValue - This is just like ParseIDValue above, but it assumes the ID
/// has already been read.
Init *TGParser::ParseIDValue(Record *CurRec,
@@ -787,7 +777,7 @@ Init *TGParser::ParseIDValue(Record *CurRec,
if (Mode == ParseValueMode) {
Error(NameLoc, "Variable not defined: '" + Name + "'");
- return 0;
+ return nullptr;
}
return StringInit::get(Name);
@@ -797,17 +787,17 @@ Init *TGParser::ParseIDValue(Record *CurRec,
///
/// Operation ::= XOperator ['<' Type '>'] '(' Args ')'
///
-Init *TGParser::ParseOperation(Record *CurRec) {
+Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
switch (Lex.getCode()) {
default:
TokError("unknown operation");
- return 0;
+ return nullptr;
case tgtok::XHead:
case tgtok::XTail:
case tgtok::XEmpty:
case tgtok::XCast: { // Value ::= !unop '(' Value ')'
UnOpInit::UnaryOp Code;
- RecTy *Type = 0;
+ RecTy *Type = nullptr;
switch (Lex.getCode()) {
default: llvm_unreachable("Unhandled code!");
@@ -817,9 +807,9 @@ Init *TGParser::ParseOperation(Record *CurRec) {
Type = ParseOperatorType();
- if (Type == 0) {
+ if (!Type) {
TokError("did not get type for unary operator");
- return 0;
+ return nullptr;
}
break;
@@ -839,12 +829,12 @@ Init *TGParser::ParseOperation(Record *CurRec) {
}
if (Lex.getCode() != tgtok::l_paren) {
TokError("expected '(' after unary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the '('
Init *LHS = ParseValue(CurRec);
- if (LHS == 0) return 0;
+ if (!LHS) return nullptr;
if (Code == UnOpInit::HEAD
|| Code == UnOpInit::TAIL
@@ -852,36 +842,36 @@ Init *TGParser::ParseOperation(Record *CurRec) {
ListInit *LHSl = dyn_cast<ListInit>(LHS);
StringInit *LHSs = dyn_cast<StringInit>(LHS);
TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
- if (LHSl == 0 && LHSs == 0 && LHSt == 0) {
+ if (!LHSl && !LHSs && !LHSt) {
TokError("expected list or string type argument in unary operator");
- return 0;
+ return nullptr;
}
if (LHSt) {
ListRecTy *LType = dyn_cast<ListRecTy>(LHSt->getType());
StringRecTy *SType = dyn_cast<StringRecTy>(LHSt->getType());
- if (LType == 0 && SType == 0) {
- TokError("expected list or string type argumnet in unary operator");
- return 0;
+ if (!LType && !SType) {
+ TokError("expected list or string type argument in unary operator");
+ return nullptr;
}
}
if (Code == UnOpInit::HEAD
|| Code == UnOpInit::TAIL) {
- if (LHSl == 0 && LHSt == 0) {
- TokError("expected list type argumnet in unary operator");
- return 0;
+ if (!LHSl && !LHSt) {
+ TokError("expected list type argument in unary operator");
+ return nullptr;
}
if (LHSl && LHSl->getSize() == 0) {
TokError("empty list argument in unary operator");
- return 0;
+ return nullptr;
}
if (LHSl) {
Init *Item = LHSl->getElement(0);
TypedInit *Itemt = dyn_cast<TypedInit>(Item);
- if (Itemt == 0) {
+ if (!Itemt) {
TokError("untyped list element in unary operator");
- return 0;
+ return nullptr;
}
if (Code == UnOpInit::HEAD) {
Type = Itemt->getType();
@@ -891,9 +881,9 @@ Init *TGParser::ParseOperation(Record *CurRec) {
} else {
assert(LHSt && "expected list type argument in unary operator");
ListRecTy *LType = dyn_cast<ListRecTy>(LHSt->getType());
- if (LType == 0) {
- TokError("expected list type argumnet in unary operator");
- return 0;
+ if (!LType) {
+ TokError("expected list type argument in unary operator");
+ return nullptr;
}
if (Code == UnOpInit::HEAD) {
Type = LType->getElementType();
@@ -906,7 +896,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
if (Lex.getCode() != tgtok::r_paren) {
TokError("expected ')' in unary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ')'
return (UnOpInit::get(Code, LHS, Type))->Fold(CurRec, CurMultiClass);
@@ -918,13 +908,14 @@ Init *TGParser::ParseOperation(Record *CurRec) {
case tgtok::XSRL:
case tgtok::XSHL:
case tgtok::XEq:
+ case tgtok::XListConcat:
case tgtok::XStrConcat: { // Value ::= !binop '(' Value ',' Value ')'
tgtok::TokKind OpTok = Lex.getCode();
SMLoc OpLoc = Lex.getLoc();
Lex.Lex(); // eat the operation
BinOpInit::BinaryOp Code;
- RecTy *Type = 0;
+ RecTy *Type = nullptr;
switch (OpTok) {
default: llvm_unreachable("Unhandled code!");
@@ -934,6 +925,10 @@ Init *TGParser::ParseOperation(Record *CurRec) {
case tgtok::XSRL: Code = BinOpInit::SRL; Type = IntRecTy::get(); break;
case tgtok::XSHL: Code = BinOpInit::SHL; Type = IntRecTy::get(); break;
case tgtok::XEq: Code = BinOpInit::EQ; Type = BitRecTy::get(); break;
+ case tgtok::XListConcat:
+ Code = BinOpInit::LISTCONCAT;
+ // We don't know the list type until we parse the first argument
+ break;
case tgtok::XStrConcat:
Code = BinOpInit::STRCONCAT;
Type = StringRecTy::get();
@@ -942,31 +937,44 @@ Init *TGParser::ParseOperation(Record *CurRec) {
if (Lex.getCode() != tgtok::l_paren) {
TokError("expected '(' after binary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the '('
SmallVector<Init*, 2> InitList;
InitList.push_back(ParseValue(CurRec));
- if (InitList.back() == 0) return 0;
+ if (!InitList.back()) return nullptr;
while (Lex.getCode() == tgtok::comma) {
Lex.Lex(); // eat the ','
InitList.push_back(ParseValue(CurRec));
- if (InitList.back() == 0) return 0;
+ if (!InitList.back()) return nullptr;
}
if (Lex.getCode() != tgtok::r_paren) {
TokError("expected ')' in operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ')'
+ // If we are doing !listconcat, we should know the type by now
+ if (OpTok == tgtok::XListConcat) {
+ if (VarInit *Arg0 = dyn_cast<VarInit>(InitList[0]))
+ Type = Arg0->getType();
+ else if (ListInit *Arg0 = dyn_cast<ListInit>(InitList[0]))
+ Type = Arg0->getType();
+ else {
+ InitList[0]->dump();
+ Error(OpLoc, "expected a list");
+ return nullptr;
+ }
+ }
+
// We allow multiple operands to associative operators like !strconcat as
// shorthand for nesting them.
- if (Code == BinOpInit::STRCONCAT) {
+ if (Code == BinOpInit::STRCONCAT || Code == BinOpInit::LISTCONCAT) {
while (InitList.size() > 2) {
Init *RHS = InitList.pop_back_val();
RHS = (BinOpInit::get(Code, InitList.back(), RHS, Type))
@@ -980,14 +988,14 @@ Init *TGParser::ParseOperation(Record *CurRec) {
->Fold(CurRec, CurMultiClass);
Error(OpLoc, "expected two operands to operator");
- return 0;
+ return nullptr;
}
case tgtok::XIf:
case tgtok::XForEach:
case tgtok::XSubst: { // Value ::= !ternop '(' Value ',' Value ',' Value ')'
TernOpInit::TernaryOp Code;
- RecTy *Type = 0;
+ RecTy *Type = nullptr;
tgtok::TokKind LexCode = Lex.getCode();
Lex.Lex(); // eat the operation
@@ -1005,42 +1013,44 @@ Init *TGParser::ParseOperation(Record *CurRec) {
}
if (Lex.getCode() != tgtok::l_paren) {
TokError("expected '(' after ternary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the '('
Init *LHS = ParseValue(CurRec);
- if (LHS == 0) return 0;
+ if (!LHS) return nullptr;
if (Lex.getCode() != tgtok::comma) {
TokError("expected ',' in ternary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ','
- Init *MHS = ParseValue(CurRec);
- if (MHS == 0) return 0;
+ Init *MHS = ParseValue(CurRec, ItemType);
+ if (!MHS)
+ return nullptr;
if (Lex.getCode() != tgtok::comma) {
TokError("expected ',' in ternary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ','
- Init *RHS = ParseValue(CurRec);
- if (RHS == 0) return 0;
+ Init *RHS = ParseValue(CurRec, ItemType);
+ if (!RHS)
+ return nullptr;
if (Lex.getCode() != tgtok::r_paren) {
TokError("expected ')' in binary operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ')'
switch (LexCode) {
default: llvm_unreachable("Unhandled code!");
case tgtok::XIf: {
- RecTy *MHSTy = 0;
- RecTy *RHSTy = 0;
+ RecTy *MHSTy = nullptr;
+ RecTy *RHSTy = nullptr;
if (TypedInit *MHSt = dyn_cast<TypedInit>(MHS))
MHSTy = MHSt->getType();
@@ -1064,7 +1074,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
if (!MHSTy || !RHSTy) {
TokError("could not get type for !if");
- return 0;
+ return nullptr;
}
if (MHSTy->typeIsConvertibleTo(RHSTy)) {
@@ -1073,24 +1083,24 @@ Init *TGParser::ParseOperation(Record *CurRec) {
Type = MHSTy;
} else {
TokError("inconsistent types for !if");
- return 0;
+ return nullptr;
}
break;
}
case tgtok::XForEach: {
TypedInit *MHSt = dyn_cast<TypedInit>(MHS);
- if (MHSt == 0) {
+ if (!MHSt) {
TokError("could not get type for !foreach");
- return 0;
+ return nullptr;
}
Type = MHSt->getType();
break;
}
case tgtok::XSubst: {
TypedInit *RHSt = dyn_cast<TypedInit>(RHS);
- if (RHSt == 0) {
+ if (!RHSt) {
TokError("could not get type for !subst");
- return 0;
+ return nullptr;
}
Type = RHSt->getType();
break;
@@ -1108,24 +1118,24 @@ Init *TGParser::ParseOperation(Record *CurRec) {
/// OperatorType ::= '<' Type '>'
///
RecTy *TGParser::ParseOperatorType() {
- RecTy *Type = 0;
+ RecTy *Type = nullptr;
if (Lex.getCode() != tgtok::less) {
TokError("expected type name for operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the <
Type = ParseType();
- if (Type == 0) {
+ if (!Type) {
TokError("expected type name for operator");
- return 0;
+ return nullptr;
}
if (Lex.getCode() != tgtok::greater) {
TokError("expected type name for operator");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the >
@@ -1149,11 +1159,12 @@ RecTy *TGParser::ParseOperatorType() {
/// SimpleValue ::= SHLTOK '(' Value ',' Value ')'
/// SimpleValue ::= SRATOK '(' Value ',' Value ')'
/// SimpleValue ::= SRLTOK '(' Value ',' Value ')'
+/// SimpleValue ::= LISTCONCATTOK '(' Value ',' Value ')'
/// SimpleValue ::= STRCONCATTOK '(' Value ',' Value ')'
///
Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
IDParseMode Mode) {
- Init *R = 0;
+ Init *R = nullptr;
switch (Lex.getCode()) {
default: TokError("Unknown token when parsing a value"); break;
case tgtok::paste:
@@ -1192,7 +1203,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
// Value ::= ID '<' ValueListNE '>'
if (Lex.Lex() == tgtok::greater) {
TokError("expected non-empty value list");
- return 0;
+ return nullptr;
}
// This is a CLASS<initvalslist> expression. This is supposed to synthesize
@@ -1201,24 +1212,21 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
Record *Class = Records.getClass(Name);
if (!Class) {
Error(NameLoc, "Expected a class name, got '" + Name + "'");
- return 0;
+ return nullptr;
}
std::vector<Init*> ValueList = ParseValueList(CurRec, Class);
- if (ValueList.empty()) return 0;
+ if (ValueList.empty()) return nullptr;
if (Lex.getCode() != tgtok::greater) {
TokError("expected '>' at end of value list");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the '>'
SMLoc EndLoc = Lex.getLoc();
// Create the new record, set it as CurRec temporarily.
- static unsigned AnonCounter = 0;
- Record *NewRec = new Record("anonymous.val."+utostr(AnonCounter++),
- NameLoc,
- Records,
+ Record *NewRec = new Record(GetNewAnonymousName(), NameLoc, Records,
/*IsAnonymous=*/true);
SubClassReference SCRef;
SCRef.RefRange = SMRange(NameLoc, EndLoc);
@@ -1226,9 +1234,37 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
SCRef.TemplateArgs = ValueList;
// Add info about the subclass to NewRec.
if (AddSubClass(NewRec, SCRef))
- return 0;
- NewRec->resolveReferences();
- Records.addDef(NewRec);
+ return nullptr;
+ if (!CurMultiClass) {
+ NewRec->resolveReferences();
+ Records.addDef(NewRec);
+ } else {
+ // Otherwise, we're inside a multiclass, add it to the multiclass.
+ CurMultiClass->DefPrototypes.push_back(NewRec);
+
+ // Copy the template arguments for the multiclass into the def.
+ const std::vector<Init *> &TArgs =
+ CurMultiClass->Rec.getTemplateArgs();
+
+ for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
+ const RecordVal *RV = CurMultiClass->Rec.getValue(TArgs[i]);
+ assert(RV && "Template arg doesn't exist?");
+ NewRec->addValue(*RV);
+ }
+
+ // We can't return the prototype def here, instead return:
+ // !cast<ItemType>(!strconcat(NAME, AnonName)).
+ const RecordVal *MCNameRV = CurMultiClass->Rec.getValue("NAME");
+ assert(MCNameRV && "multiclass record must have a NAME");
+
+ return UnOpInit::get(UnOpInit::CAST,
+ BinOpInit::get(BinOpInit::STRCONCAT,
+ VarInit::get(MCNameRV->getName(),
+ MCNameRV->getType()),
+ NewRec->getNameInit(),
+ StringRecTy::get()),
+ Class->getDefInit()->getType());
+ }
// The result of the expression is a reference to the new record.
return DefInit::get(NewRec);
@@ -1240,11 +1276,11 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
if (Lex.getCode() != tgtok::r_brace) {
Vals = ParseValueList(CurRec);
- if (Vals.empty()) return 0;
+ if (Vals.empty()) return nullptr;
}
if (Lex.getCode() != tgtok::r_brace) {
TokError("expected '}' at end of bit list value");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the '}'
@@ -1252,10 +1288,10 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
Init *Bit = Vals[i]->convertInitializerTo(BitRecTy::get());
- if (Bit == 0) {
+ if (!Bit) {
Error(BraceLoc, "Element #" + utostr(i) + " (" + Vals[i]->getAsString()+
") is not convertable to a bit");
- return 0;
+ return nullptr;
}
NewBits[Vals.size()-i-1] = Bit;
}
@@ -1265,87 +1301,87 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
Lex.Lex(); // eat the '['
std::vector<Init*> Vals;
- RecTy *DeducedEltTy = 0;
- ListRecTy *GivenListTy = 0;
+ RecTy *DeducedEltTy = nullptr;
+ ListRecTy *GivenListTy = nullptr;
- if (ItemType != 0) {
+ if (ItemType) {
ListRecTy *ListType = dyn_cast<ListRecTy>(ItemType);
- if (ListType == 0) {
+ if (!ListType) {
std::string s;
raw_string_ostream ss(s);
ss << "Type mismatch for list, expected list type, got "
<< ItemType->getAsString();
TokError(ss.str());
- return 0;
+ return nullptr;
}
GivenListTy = ListType;
}
if (Lex.getCode() != tgtok::r_square) {
- Vals = ParseValueList(CurRec, 0,
- GivenListTy ? GivenListTy->getElementType() : 0);
- if (Vals.empty()) return 0;
+ Vals = ParseValueList(CurRec, nullptr,
+ GivenListTy ? GivenListTy->getElementType() : nullptr);
+ if (Vals.empty()) return nullptr;
}
if (Lex.getCode() != tgtok::r_square) {
TokError("expected ']' at end of list value");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ']'
- RecTy *GivenEltTy = 0;
+ RecTy *GivenEltTy = nullptr;
if (Lex.getCode() == tgtok::less) {
// Optional list element type
Lex.Lex(); // eat the '<'
GivenEltTy = ParseType();
- if (GivenEltTy == 0) {
+ if (!GivenEltTy) {
// Couldn't parse element type
- return 0;
+ return nullptr;
}
if (Lex.getCode() != tgtok::greater) {
TokError("expected '>' at end of list element type");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the '>'
}
// Check elements
- RecTy *EltTy = 0;
+ RecTy *EltTy = nullptr;
for (std::vector<Init *>::iterator i = Vals.begin(), ie = Vals.end();
i != ie;
++i) {
TypedInit *TArg = dyn_cast<TypedInit>(*i);
- if (TArg == 0) {
+ if (!TArg) {
TokError("Untyped list element");
- return 0;
+ return nullptr;
}
- if (EltTy != 0) {
+ if (EltTy) {
EltTy = resolveTypes(EltTy, TArg->getType());
- if (EltTy == 0) {
+ if (!EltTy) {
TokError("Incompatible types in list elements");
- return 0;
+ return nullptr;
}
} else {
EltTy = TArg->getType();
}
}
- if (GivenEltTy != 0) {
- if (EltTy != 0) {
+ if (GivenEltTy) {
+ if (EltTy) {
// Verify consistency
if (!EltTy->typeIsConvertibleTo(GivenEltTy)) {
TokError("Incompatible types in list elements");
- return 0;
+ return nullptr;
}
}
EltTy = GivenEltTy;
}
- if (EltTy == 0) {
- if (ItemType == 0) {
+ if (!EltTy) {
+ if (!ItemType) {
TokError("No type for list");
- return 0;
+ return nullptr;
}
DeducedEltTy = GivenListTy->getElementType();
} else {
@@ -1353,7 +1389,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
if (GivenListTy) {
if (!EltTy->typeIsConvertibleTo(GivenListTy->getElementType())) {
TokError("Element type mismatch for list");
- return 0;
+ return nullptr;
}
}
DeducedEltTy = EltTy;
@@ -1365,18 +1401,18 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
Lex.Lex(); // eat the '('
if (Lex.getCode() != tgtok::Id && Lex.getCode() != tgtok::XCast) {
TokError("expected identifier in dag init");
- return 0;
+ return nullptr;
}
Init *Operator = ParseValue(CurRec);
- if (Operator == 0) return 0;
+ if (!Operator) return nullptr;
// If the operator name is present, parse it.
std::string OperatorName;
if (Lex.getCode() == tgtok::colon) {
if (Lex.Lex() != tgtok::VarName) { // eat the ':'
TokError("expected variable name in dag operator");
- return 0;
+ return nullptr;
}
OperatorName = Lex.getCurStrVal();
Lex.Lex(); // eat the VarName.
@@ -1385,12 +1421,12 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
std::vector<std::pair<llvm::Init*, std::string> > DagArgs;
if (Lex.getCode() != tgtok::r_paren) {
DagArgs = ParseDagArgList(CurRec);
- if (DagArgs.empty()) return 0;
+ if (DagArgs.empty()) return nullptr;
}
if (Lex.getCode() != tgtok::r_paren) {
TokError("expected ')' in dag init");
- return 0;
+ return nullptr;
}
Lex.Lex(); // eat the ')'
@@ -1407,11 +1443,12 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
case tgtok::XSRL:
case tgtok::XSHL:
case tgtok::XEq:
+ case tgtok::XListConcat:
case tgtok::XStrConcat: // Value ::= !binop '(' Value ',' Value ')'
case tgtok::XIf:
case tgtok::XForEach:
case tgtok::XSubst: { // Value ::= !ternop '(' Value ',' Value ',' Value ')'
- return ParseOperation(CurRec);
+ return ParseOperation(CurRec, ItemType);
}
}
@@ -1427,7 +1464,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
///
Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
Init *Result = ParseSimpleValue(CurRec, ItemType, Mode);
- if (Result == 0) return 0;
+ if (!Result) return nullptr;
// Parse the suffixes now if present.
while (1) {
@@ -1441,20 +1478,20 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
SMLoc CurlyLoc = Lex.getLoc();
Lex.Lex(); // eat the '{'
std::vector<unsigned> Ranges = ParseRangeList();
- if (Ranges.empty()) return 0;
+ if (Ranges.empty()) return nullptr;
// Reverse the bitlist.
std::reverse(Ranges.begin(), Ranges.end());
Result = Result->convertInitializerBitRange(Ranges);
- if (Result == 0) {
+ if (!Result) {
Error(CurlyLoc, "Invalid bit range for value");
- return 0;
+ return nullptr;
}
// Eat the '}'.
if (Lex.getCode() != tgtok::r_brace) {
TokError("expected '}' at end of bit range list");
- return 0;
+ return nullptr;
}
Lex.Lex();
break;
@@ -1463,18 +1500,18 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
SMLoc SquareLoc = Lex.getLoc();
Lex.Lex(); // eat the '['
std::vector<unsigned> Ranges = ParseRangeList();
- if (Ranges.empty()) return 0;
+ if (Ranges.empty()) return nullptr;
Result = Result->convertInitListSlice(Ranges);
- if (Result == 0) {
+ if (!Result) {
Error(SquareLoc, "Invalid range for list slice");
- return 0;
+ return nullptr;
}
// Eat the ']'.
if (Lex.getCode() != tgtok::r_square) {
TokError("expected ']' at end of list slice");
- return 0;
+ return nullptr;
}
Lex.Lex();
break;
@@ -1482,12 +1519,12 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
case tgtok::period:
if (Lex.Lex() != tgtok::Id) { // eat the .
TokError("expected field identifier after '.'");
- return 0;
+ return nullptr;
}
if (!Result->getFieldType(Lex.getCurStrVal())) {
TokError("Cannot access field '" + Lex.getCurStrVal() + "' of value '" +
Result->getAsString() + "'");
- return 0;
+ return nullptr;
}
Result = FieldInit::get(Result, Lex.getCurStrVal());
Lex.Lex(); // eat field name
@@ -1502,14 +1539,14 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
TypedInit *LHS = dyn_cast<TypedInit>(Result);
if (!LHS) {
Error(PasteLoc, "LHS of paste is not typed!");
- return 0;
+ return nullptr;
}
if (LHS->getType() != StringRecTy::get()) {
LHS = UnOpInit::get(UnOpInit::CAST, LHS, StringRecTy::get());
}
- TypedInit *RHS = 0;
+ TypedInit *RHS = nullptr;
Lex.Lex(); // Eat the '#'.
switch (Lex.getCode()) {
@@ -1529,7 +1566,7 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
RHS = dyn_cast<TypedInit>(RHSResult);
if (!RHS) {
Error(PasteLoc, "RHS of paste is not typed!");
- return 0;
+ return nullptr;
}
if (RHS->getType() != StringRecTy::get()) {
@@ -1565,7 +1602,7 @@ TGParser::ParseDagArgList(Record *CurRec) {
} else {
// DagArg ::= Value (':' VARNAME)?
Init *Val = ParseValue(CurRec);
- if (Val == 0)
+ if (!Val)
return std::vector<std::pair<llvm::Init*, std::string> >();
// If the variable name is present, add it.
@@ -1600,7 +1637,7 @@ std::vector<Init*> TGParser::ParseValueList(Record *CurRec, Record *ArgsRec,
std::vector<Init*> Result;
RecTy *ItemType = EltTy;
unsigned int ArgN = 0;
- if (ArgsRec != 0 && EltTy == 0) {
+ if (ArgsRec && !EltTy) {
const std::vector<Init *> &TArgs = ArgsRec->getTemplateArgs();
if (!TArgs.size()) {
TokError("template argument provided to non-template class");
@@ -1616,12 +1653,12 @@ std::vector<Init*> TGParser::ParseValueList(Record *CurRec, Record *ArgsRec,
++ArgN;
}
Result.push_back(ParseValue(CurRec, ItemType));
- if (Result.back() == 0) return std::vector<Init*>();
+ if (!Result.back()) return std::vector<Init*>();
while (Lex.getCode() == tgtok::comma) {
Lex.Lex(); // Eat the comma
- if (ArgsRec != 0 && EltTy == 0) {
+ if (ArgsRec && !EltTy) {
const std::vector<Init *> &TArgs = ArgsRec->getTemplateArgs();
if (ArgN >= TArgs.size()) {
TokError("too many template arguments");
@@ -1633,7 +1670,7 @@ std::vector<Init*> TGParser::ParseValueList(Record *CurRec, Record *ArgsRec,
++ArgN;
}
Result.push_back(ParseValue(CurRec, ItemType));
- if (Result.back() == 0) return std::vector<Init*>();
+ if (!Result.back()) return std::vector<Init*>();
}
return Result;
@@ -1657,11 +1694,11 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
if (HasField) Lex.Lex();
RecTy *Type = ParseType();
- if (Type == 0) return 0;
+ if (!Type) return nullptr;
if (Lex.getCode() != tgtok::Id) {
TokError("Expected identifier in declaration");
- return 0;
+ return nullptr;
}
SMLoc IdLoc = Lex.getLoc();
@@ -1681,16 +1718,16 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
// Add the value.
if (AddValue(CurRec, IdLoc, RecordVal(DeclName, Type, HasField)))
- return 0;
+ return nullptr;
// If a value is present, parse it.
if (Lex.getCode() == tgtok::equal) {
Lex.Lex();
SMLoc ValLoc = Lex.getLoc();
Init *Val = ParseValue(CurRec, Type);
- if (Val == 0 ||
+ if (!Val ||
SetValue(CurRec, ValLoc, DeclName, std::vector<unsigned>(), Val))
- return 0;
+ return nullptr;
}
return DeclName;
@@ -1707,7 +1744,7 @@ Init *TGParser::ParseDeclaration(Record *CurRec,
VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
if (Lex.getCode() != tgtok::Id) {
TokError("Expected identifier in foreach declaration");
- return 0;
+ return nullptr;
}
Init *DeclName = StringInit::get(Lex.getCurStrVal());
@@ -1716,27 +1753,27 @@ VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
// If a value is present, parse it.
if (Lex.getCode() != tgtok::equal) {
TokError("Expected '=' in foreach declaration");
- return 0;
+ return nullptr;
}
Lex.Lex(); // Eat the '='
- RecTy *IterType = 0;
+ RecTy *IterType = nullptr;
std::vector<unsigned> Ranges;
switch (Lex.getCode()) {
- default: TokError("Unknown token when expecting a range list"); return 0;
+ default: TokError("Unknown token when expecting a range list"); return nullptr;
case tgtok::l_square: { // '[' ValueList ']'
- Init *List = ParseSimpleValue(0, 0, ParseForeachMode);
+ Init *List = ParseSimpleValue(nullptr, nullptr, ParseForeachMode);
ForeachListValue = dyn_cast<ListInit>(List);
- if (ForeachListValue == 0) {
+ if (!ForeachListValue) {
TokError("Expected a Value list");
- return 0;
+ return nullptr;
}
RecTy *ValueType = ForeachListValue->getType();
ListRecTy *ListType = dyn_cast<ListRecTy>(ValueType);
- if (ListType == 0) {
+ if (!ListType) {
TokError("Value list is not of list type");
- return 0;
+ return nullptr;
}
IterType = ListType->getElementType();
break;
@@ -1744,7 +1781,7 @@ VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
case tgtok::IntVal: { // RangePiece.
if (ParseRangePiece(Ranges))
- return 0;
+ return nullptr;
break;
}
@@ -1753,7 +1790,7 @@ VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
Ranges = ParseRangeList();
if (Lex.getCode() != tgtok::r_brace) {
TokError("expected '}' at end of bit range list");
- return 0;
+ return nullptr;
}
Lex.Lex();
break;
@@ -1770,7 +1807,7 @@ VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) {
}
if (!IterType)
- return 0;
+ return nullptr;
return VarInit::get(DeclName, IterType);
}
@@ -1790,7 +1827,7 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
// Read the first declaration.
Init *TemplArg = ParseDeclaration(CurRec, true/*templateargs*/);
- if (TemplArg == 0)
+ if (!TemplArg)
return true;
TheRecToAddTo->addTemplateArg(TemplArg);
@@ -1800,7 +1837,7 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
// Read the following declarations.
TemplArg = ParseDeclaration(CurRec, true/*templateargs*/);
- if (TemplArg == 0)
+ if (!TemplArg)
return true;
TheRecToAddTo->addTemplateArg(TemplArg);
}
@@ -1818,7 +1855,7 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
/// BodyItem ::= LET ID OptionalBitList '=' Value ';'
bool TGParser::ParseBodyItem(Record *CurRec) {
if (Lex.getCode() != tgtok::Let) {
- if (ParseDeclaration(CurRec, false) == 0)
+ if (!ParseDeclaration(CurRec, false))
return true;
if (Lex.getCode() != tgtok::semi)
@@ -1845,13 +1882,13 @@ bool TGParser::ParseBodyItem(Record *CurRec) {
Lex.Lex(); // eat the '='.
RecordVal *Field = CurRec->getValue(FieldName);
- if (Field == 0)
+ if (!Field)
return TokError("Value '" + FieldName + "' unknown!");
RecTy *Type = Field->getType();
Init *Val = ParseValue(CurRec, Type);
- if (Val == 0) return true;
+ if (!Val) return true;
if (Lex.getCode() != tgtok::semi)
return TokError("expected ';' after let expression");
@@ -1917,7 +1954,7 @@ bool TGParser::ParseObjectBody(Record *CurRec) {
SubClassReference SubClass = ParseSubClassReference(CurRec, false);
while (1) {
// Check for error.
- if (SubClass.Rec == 0) return true;
+ if (!SubClass.Rec) return true;
// Add it.
if (AddSubClass(CurRec, SubClass))
@@ -1964,7 +2001,18 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
return true;
}
Records.addDef(CurRec);
+
+ if (ParseObjectBody(CurRec))
+ return true;
} else if (CurMultiClass) {
+ // Parse the body before adding this prototype to the DefPrototypes vector.
+ // That way implicit definitions will be added to the DefPrototypes vector
+ // before this object, instantiated prior to defs derived from this object,
+ // and this available for indirect name resolution when defs derived from
+ // this object are instantiated.
+ if (ParseObjectBody(CurRec))
+ return true;
+
// Otherwise, a def inside a multiclass, add it to the multiclass.
for (unsigned i = 0, e = CurMultiClass->DefPrototypes.size(); i != e; ++i)
if (CurMultiClass->DefPrototypes[i]->getNameInit()
@@ -1974,12 +2022,10 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
return true;
}
CurMultiClass->DefPrototypes.push_back(CurRec);
- }
-
- if (ParseObjectBody(CurRec))
+ } else if (ParseObjectBody(CurRec))
return true;
- if (CurMultiClass == 0) // Def's in multiclasses aren't really defs.
+ if (!CurMultiClass) // Def's in multiclasses aren't really defs.
// See Record::setName(). This resolve step will see any new name
// for the def that might have been created when resolving
// inheritance, values and arguments above.
@@ -2021,9 +2067,9 @@ bool TGParser::ParseForeach(MultiClass *CurMultiClass) {
// Make a temporary object to record items associated with the for
// loop.
- ListInit *ListValue = 0;
+ ListInit *ListValue = nullptr;
VarInit *IterName = ParseForeachDeclaration(ListValue);
- if (IterName == 0)
+ if (!IterName)
return TokError("expected declaration in for");
if (Lex.getCode() != tgtok::In)
@@ -2125,8 +2171,8 @@ std::vector<LetRecord> TGParser::ParseLetList() {
}
Lex.Lex(); // eat the '='.
- Init *Val = ParseValue(0);
- if (Val == 0) return std::vector<LetRecord>();
+ Init *Val = ParseValue(nullptr);
+ if (!Val) return std::vector<LetRecord>();
// Now that we have everything, add the record.
Result.push_back(LetRecord(Name, Bits, Val, NameLoc));
@@ -2209,7 +2255,7 @@ bool TGParser::ParseMultiClass() {
// If there are template args, parse them.
if (Lex.getCode() == tgtok::less)
- if (ParseTemplateArgList(0))
+ if (ParseTemplateArgList(nullptr))
return true;
bool inherits = false;
@@ -2225,7 +2271,7 @@ bool TGParser::ParseMultiClass() {
ParseSubMultiClassReference(CurMultiClass);
while (1) {
// Check for error.
- if (SubMultiClass.MC == 0) return true;
+ if (!SubMultiClass.MC) return true;
// Add it.
if (AddSubMultiClass(CurMultiClass, SubMultiClass))
@@ -2264,14 +2310,14 @@ bool TGParser::ParseMultiClass() {
Lex.Lex(); // eat the '}'.
}
- CurMultiClass = 0;
+ CurMultiClass = nullptr;
return false;
}
Record *TGParser::
InstantiateMulticlassDef(MultiClass &MC,
Record *DefProto,
- Init *DefmPrefix,
+ Init *&DefmPrefix,
SMRange DefmPrefixRange) {
// We need to preserve DefProto so it can be reused for later
// instantiations, so create a new Record to inherit from it.
@@ -2282,7 +2328,7 @@ InstantiateMulticlassDef(MultiClass &MC,
// as a prefix.
bool IsAnonymous = false;
- if (DefmPrefix == 0) {
+ if (!DefmPrefix) {
DefmPrefix = StringInit::get(GetNewAnonymousName());
IsAnonymous = true;
}
@@ -2291,7 +2337,7 @@ InstantiateMulticlassDef(MultiClass &MC,
StringInit *DefNameString = dyn_cast<StringInit>(DefName);
- if (DefNameString != 0) {
+ if (DefNameString) {
// We have a fully expanded string so there are no operators to
// resolve. We should concatenate the given prefix and name.
DefName =
@@ -2319,13 +2365,13 @@ InstantiateMulticlassDef(MultiClass &MC,
Error(DefmPrefixRange.Start, "Could not resolve "
+ CurRec->getNameInitAsString() + ":NAME to '"
+ DefmPrefix->getAsUnquotedString() + "'");
- return 0;
+ return nullptr;
}
// If the DefNameString didn't resolve, we probably have a reference to
// NAME and need to replace it. We need to do at least this much greedily,
// otherwise nested multiclasses will end up with incorrect NAME expansions.
- if (DefNameString == 0) {
+ if (!DefNameString) {
RecordVal *DefNameRV = CurRec->getValue("NAME");
CurRec->resolveReferencesTo(DefNameRV);
}
@@ -2350,7 +2396,7 @@ InstantiateMulticlassDef(MultiClass &MC,
Error(DefmPrefixRange.Start, "def '" + CurRec->getNameInitAsString() +
"' already defined, instantiating defm with subdef '" +
DefProto->getNameInitAsString() + "'");
- return 0;
+ return nullptr;
}
Records.addDef(CurRec);
@@ -2434,7 +2480,7 @@ bool TGParser::ResolveMulticlassDef(MultiClass &MC,
bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Defm && "Unexpected token!");
SMLoc DefmLoc = Lex.getLoc();
- Init *DefmPrefix = 0;
+ Init *DefmPrefix = nullptr;
if (Lex.Lex() == tgtok::Id) { // eat the defm.
DefmPrefix = ParseObjectName(CurMultiClass);
@@ -2454,10 +2500,10 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
Lex.Lex();
SMLoc SubClassLoc = Lex.getLoc();
- SubClassReference Ref = ParseSubClassReference(0, true);
+ SubClassReference Ref = ParseSubClassReference(nullptr, true);
while (1) {
- if (Ref.Rec == 0) return true;
+ if (!Ref.Rec) return true;
// To instantiate a multiclass, we need to first get the multiclass, then
// instantiate each def contained in the multiclass with the SubClassRef
@@ -2503,21 +2549,21 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
// A defm can inherit from regular classes (non-multiclass) as
// long as they come in the end of the inheritance list.
- InheritFromClass = (Records.getClass(Lex.getCurStrVal()) != 0);
+ InheritFromClass = (Records.getClass(Lex.getCurStrVal()) != nullptr);
if (InheritFromClass)
break;
- Ref = ParseSubClassReference(0, true);
+ Ref = ParseSubClassReference(nullptr, true);
}
if (InheritFromClass) {
// Process all the classes to inherit as if they were part of a
// regular 'def' and inherit all record values.
- SubClassReference SubClass = ParseSubClassReference(0, false);
+ SubClassReference SubClass = ParseSubClassReference(nullptr, false);
while (1) {
// Check for error.
- if (SubClass.Rec == 0) return true;
+ if (!SubClass.Rec) return true;
// Get the expanded definition prototypes and teach them about
// the record values the current class to inherit has
@@ -2534,7 +2580,7 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
if (Lex.getCode() != tgtok::comma) break;
Lex.Lex(); // eat ','.
- SubClass = ParseSubClassReference(0, false);
+ SubClass = ParseSubClassReference(nullptr, false);
}
}
diff --git a/contrib/llvm/lib/TableGen/TGParser.h b/contrib/llvm/lib/TableGen/TGParser.h
index 044e3a0..9f4b7e9 100644
--- a/contrib/llvm/lib/TableGen/TGParser.h
+++ b/contrib/llvm/lib/TableGen/TGParser.h
@@ -69,6 +69,8 @@ class TGParser {
// Record tracker
RecordKeeper &Records;
+ unsigned AnonCounter;
+
// A "named boolean" indicating how to parse identifiers. Usually
// identifiers map to some existing object but in special cases
// (e.g. parsing def names) no such object exists yet because we are
@@ -82,8 +84,8 @@ class TGParser {
};
public:
- TGParser(SourceMgr &SrcMgr, RecordKeeper &records) :
- Lex(SrcMgr), CurMultiClass(0), Records(records) {}
+ TGParser(SourceMgr &SrcMgr, RecordKeeper &records)
+ : Lex(SrcMgr), CurMultiClass(nullptr), Records(records), AnonCounter(0) {}
/// ParseFile - Main entrypoint for parsing a tblgen file. These parser
/// routines return true on error, or false on success.
@@ -112,6 +114,8 @@ private: // Semantic analysis methods.
bool AddSubMultiClass(MultiClass *CurMC,
SubMultiClassReference &SubMultiClass);
+ std::string GetNewAnonymousName();
+
// IterRecord: Map an iterator name to a value.
struct IterRecord {
VarInit *IterVar;
@@ -127,13 +131,13 @@ private: // Semantic analysis methods.
bool ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals);
private: // Parser methods.
- bool ParseObjectList(MultiClass *MC = 0);
+ bool ParseObjectList(MultiClass *MC = nullptr);
bool ParseObject(MultiClass *MC);
bool ParseClass();
bool ParseMultiClass();
Record *InstantiateMulticlassDef(MultiClass &MC,
Record *DefProto,
- Init *DefmPrefix,
+ Init *&DefmPrefix,
SMRange DefmPrefixRange);
bool ResolveMulticlassDefArgs(MultiClass &MC,
Record *DefProto,
@@ -163,22 +167,21 @@ private: // Parser methods.
SubClassReference ParseSubClassReference(Record *CurRec, bool isDefm);
SubMultiClassReference ParseSubMultiClassReference(MultiClass *CurMC);
- Init *ParseIDValue(Record *CurRec, IDParseMode Mode = ParseValueMode);
Init *ParseIDValue(Record *CurRec, const std::string &Name, SMLoc NameLoc,
IDParseMode Mode = ParseValueMode);
- Init *ParseSimpleValue(Record *CurRec, RecTy *ItemType = 0,
+ Init *ParseSimpleValue(Record *CurRec, RecTy *ItemType = nullptr,
IDParseMode Mode = ParseValueMode);
- Init *ParseValue(Record *CurRec, RecTy *ItemType = 0,
+ Init *ParseValue(Record *CurRec, RecTy *ItemType = nullptr,
IDParseMode Mode = ParseValueMode);
- std::vector<Init*> ParseValueList(Record *CurRec, Record *ArgsRec = 0,
- RecTy *EltTy = 0);
+ std::vector<Init*> ParseValueList(Record *CurRec, Record *ArgsRec = nullptr,
+ RecTy *EltTy = nullptr);
std::vector<std::pair<llvm::Init*, std::string> > ParseDagArgList(Record *);
bool ParseOptionalRangeList(std::vector<unsigned> &Ranges);
bool ParseOptionalBitList(std::vector<unsigned> &Ranges);
std::vector<unsigned> ParseRangeList();
bool ParseRangePiece(std::vector<unsigned> &Ranges);
RecTy *ParseType();
- Init *ParseOperation(Record *CurRec);
+ Init *ParseOperation(Record *CurRec, RecTy *ItemType);
RecTy *ParseOperatorType();
Init *ParseObjectName(MultiClass *CurMultiClass);
Record *ParseClassID();
diff --git a/contrib/llvm/lib/TableGen/module.modulemap b/contrib/llvm/lib/TableGen/module.modulemap
new file mode 100644
index 0000000..8dac0a2
--- /dev/null
+++ b/contrib/llvm/lib/TableGen/module.modulemap
@@ -0,0 +1 @@
+module TableGen { requires cplusplus umbrella "." module * { export * } }
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64.h b/contrib/llvm/lib/Target/AArch64/AArch64.h
index 4de4faa..1c022aa 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64.h
@@ -1,4 +1,4 @@
-//==-- AArch64.h - Top-level interface for AArch64 representation -*- C++ -*-=//
+//==-- AArch64.h - Top-level interface for AArch64 --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -12,31 +12,38 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_AARCH64_H
-#define LLVM_TARGET_AARCH64_H
+#ifndef TARGET_AArch64_H
+#define TARGET_AArch64_H
+#include "Utils/AArch64BaseInfo.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
-class AArch64AsmPrinter;
-class FunctionPass;
class AArch64TargetMachine;
-class MachineInstr;
-class MCInst;
-
-FunctionPass *createAArch64ISelDAG(AArch64TargetMachine &TM,
- CodeGenOpt::Level OptLevel);
+class FunctionPass;
+class MachineFunctionPass;
+
+FunctionPass *createAArch64DeadRegisterDefinitions();
+FunctionPass *createAArch64ConditionalCompares();
+FunctionPass *createAArch64AdvSIMDScalar();
+FunctionPass *createAArch64BranchRelaxation();
+FunctionPass *createAArch64ISelDag(AArch64TargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+FunctionPass *createAArch64StorePairSuppressPass();
+FunctionPass *createAArch64ExpandPseudoPass();
+FunctionPass *createAArch64LoadStoreOptimizationPass();
+ModulePass *createAArch64PromoteConstantPass();
+FunctionPass *createAArch64AddressTypePromotionPass();
+/// \brief Creates an ARM-specific Target Transformation Info pass.
+ImmutablePass *
+createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM);
FunctionPass *createAArch64CleanupLocalDynamicTLSPass();
-FunctionPass *createAArch64BranchFixupPass();
-
-void LowerAArch64MachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
- AArch64AsmPrinter &AP);
-
-
-}
+FunctionPass *createAArch64CollectLOHPass();
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64.td b/contrib/llvm/lib/Target/AArch64/AArch64.td
index 9c2c69a..e6a27c3 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64.td
@@ -1,4 +1,4 @@
-//===- AArch64.td - Describe the AArch64 Target Machine -------*- tblgen -*-==//
+//=- AArch64.td - Describe the AArch64 Target Machine --------*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This is the top level entry point for the AArch64 target.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Target-independent interfaces
+// Target-independent interfaces which we are implementing
//===----------------------------------------------------------------------===//
include "llvm/Target/Target.td"
@@ -22,7 +21,7 @@ include "llvm/Target/Target.td"
//
def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", "true",
- "Enable ARMv8 FP">;
+ "Enable ARMv8 FP">;
def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
"Enable Advanced SIMD instructions", [FeatureFPARMv8]>;
@@ -30,44 +29,107 @@ def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
"Enable cryptographic instructions">;
-//===----------------------------------------------------------------------===//
-// AArch64 Processors
-//
+def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true",
+ "Enable ARMv8 CRC-32 checksum instructions">;
-include "AArch64Schedule.td"
+/// Cyclone has register move instructions which are "free".
+def FeatureZCRegMove : SubtargetFeature<"zcm", "HasZeroCycleRegMove", "true",
+ "Has zero-cycle register moves">;
-def : Processor<"generic", GenericItineraries, [FeatureFPARMv8]>;
+/// Cyclone has instructions which zero registers for "free".
+def FeatureZCZeroing : SubtargetFeature<"zcz", "HasZeroCycleZeroing", "true",
+ "Has zero-cycle zeroing instructions">;
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
include "AArch64RegisterInfo.td"
-
-include "AArch64CallingConv.td"
+include "AArch64CallingConvention.td"
//===----------------------------------------------------------------------===//
// Instruction Descriptions
//===----------------------------------------------------------------------===//
+include "AArch64Schedule.td"
include "AArch64InstrInfo.td"
def AArch64InstrInfo : InstrInfo;
//===----------------------------------------------------------------------===//
-// Assembly printer
+// AArch64 Processors supported.
+//
+include "AArch64SchedA53.td"
+include "AArch64SchedA57.td"
+include "AArch64SchedCyclone.td"
+
+def ProcA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
+ "Cortex-A53 ARM processors",
+ [FeatureFPARMv8,
+ FeatureNEON,
+ FeatureCrypto,
+ FeatureCRC]>;
+
+def ProcA57 : SubtargetFeature<"a57", "ARMProcFamily", "CortexA57",
+ "Cortex-A57 ARM processors",
+ [FeatureFPARMv8,
+ FeatureNEON,
+ FeatureCrypto,
+ FeatureCRC]>;
+
+def ProcCyclone : SubtargetFeature<"cyclone", "ARMProcFamily", "Cyclone",
+ "Cyclone",
+ [FeatureFPARMv8,
+ FeatureNEON,
+ FeatureCrypto,
+ FeatureCRC,
+ FeatureZCRegMove, FeatureZCZeroing]>;
+
+def : ProcessorModel<"generic", NoSchedModel, [FeatureFPARMv8,
+ FeatureNEON,
+ FeatureCRC]>;
+
+def : ProcessorModel<"cortex-a53", CortexA53Model, [ProcA53]>;
+def : ProcessorModel<"cortex-a57", CortexA57Model, [ProcA57]>;
+def : ProcessorModel<"cyclone", CycloneModel, [ProcCyclone]>;
+
+//===----------------------------------------------------------------------===//
+// Assembly parser
//===----------------------------------------------------------------------===//
-def A64InstPrinter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
+def GenericAsmParserVariant : AsmParserVariant {
+ int Variant = 0;
+ string Name = "generic";
+}
+
+def AppleAsmParserVariant : AsmParserVariant {
+ int Variant = 1;
+ string Name = "apple-neon";
+}
+
+//===----------------------------------------------------------------------===//
+// Assembly printer
+//===----------------------------------------------------------------------===//
+// AArch64 Uses the MC printer for asm output, so make sure the TableGen
+// AsmWriter bits get associated with the correct class.
+def GenericAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ int Variant = 0;
bit isMCAsmWriter = 1;
}
+def AppleAsmWriter : AsmWriter {
+ let AsmWriterClassName = "AppleInstPrinter";
+ int Variant = 1;
+ int isMCAsmWriter = 1;
+}
+
//===----------------------------------------------------------------------===//
-// Declare the target which we are implementing
+// Target Declaration
//===----------------------------------------------------------------------===//
def AArch64 : Target {
let InstructionSet = AArch64InstrInfo;
- let AssemblyWriters = [A64InstPrinter];
+ let AssemblyParserVariants = [GenericAsmParserVariant, AppleAsmParserVariant];
+ let AssemblyWriters = [GenericAsmWriter, AppleAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
new file mode 100644
index 0000000..ab2c4b7
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
@@ -0,0 +1,491 @@
+//===-- AArch64AddressTypePromotion.cpp --- Promote type for addr accesses -==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to promote the computations use to obtained a sign extended
+// value used into memory accesses.
+// E.g.
+// a = add nsw i32 b, 3
+// d = sext i32 a to i64
+// e = getelementptr ..., i64 d
+//
+// =>
+// f = sext i32 b to i64
+// a = add nsw i64 f, 3
+// e = getelementptr ..., i64 a
+//
+// This is legal to do so if the computations are markers with either nsw or nuw
+// markers.
+// Moreover, the current heuristic is simple: it does not create new sext
+// operations, i.e., it gives up when a sext would have forked (e.g., if
+// a = add i32 b, c, two sexts are required to promote the computation).
+//
+// FIXME: This pass may be useful for other targets too.
+// ===---------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-type-promotion"
+
+static cl::opt<bool>
+EnableAddressTypePromotion("aarch64-type-promotion", cl::Hidden,
+ cl::desc("Enable the type promotion pass"),
+ cl::init(true));
+static cl::opt<bool>
+EnableMerge("aarch64-type-promotion-merge", cl::Hidden,
+ cl::desc("Enable merging of redundant sexts when one is dominating"
+ " the other."),
+ cl::init(true));
+
+//===----------------------------------------------------------------------===//
+// AArch64AddressTypePromotion
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+void initializeAArch64AddressTypePromotionPass(PassRegistry &);
+}
+
+namespace {
+class AArch64AddressTypePromotion : public FunctionPass {
+
+public:
+ static char ID;
+ AArch64AddressTypePromotion()
+ : FunctionPass(ID), Func(nullptr), ConsideredSExtType(nullptr) {
+ initializeAArch64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
+ }
+
+ const char *getPassName() const override {
+ return "AArch64 Address Type Promotion";
+ }
+
+ /// Iterate over the functions and promote the computation of interesting
+ // sext instructions.
+ bool runOnFunction(Function &F) override;
+
+private:
+ /// The current function.
+ Function *Func;
+ /// Filter out all sexts that does not have this type.
+ /// Currently initialized with Int64Ty.
+ Type *ConsideredSExtType;
+
+ // This transformation requires dominator info.
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ typedef SmallPtrSet<Instruction *, 32> SetOfInstructions;
+ typedef SmallVector<Instruction *, 16> Instructions;
+ typedef DenseMap<Value *, Instructions> ValueToInsts;
+
+ /// Check if it is profitable to move a sext through this instruction.
+ /// Currently, we consider it is profitable if:
+ /// - Inst is used only once (no need to insert truncate).
+ /// - Inst has only one operand that will require a sext operation (we do
+ /// do not create new sext operation).
+ bool shouldGetThrough(const Instruction *Inst);
+
+ /// Check if it is possible and legal to move a sext through this
+ /// instruction.
+ /// Current heuristic considers that we can get through:
+ /// - Arithmetic operation marked with the nsw or nuw flag.
+ /// - Other sext operation.
+ /// - Truncate operation if it was just dropping sign extended bits.
+ bool canGetThrough(const Instruction *Inst);
+
+ /// Move sext operations through safe to sext instructions.
+ bool propagateSignExtension(Instructions &SExtInsts);
+
+ /// Is this sext should be considered for code motion.
+ /// We look for sext with ConsideredSExtType and uses in at least one
+ // GetElementPtrInst.
+ bool shouldConsiderSExt(const Instruction *SExt) const;
+
+ /// Collect all interesting sext operations, i.e., the ones with the right
+ /// type and used in memory accesses.
+ /// More precisely, a sext instruction is considered as interesting if it
+ /// is used in a "complex" getelementptr or it exits at least another
+ /// sext instruction that sign extended the same initial value.
+ /// A getelementptr is considered as "complex" if it has more than 2
+ // operands.
+ void analyzeSExtension(Instructions &SExtInsts);
+
+ /// Merge redundant sign extension operations in common dominator.
+ void mergeSExts(ValueToInsts &ValToSExtendedUses,
+ SetOfInstructions &ToRemove);
+};
+} // end anonymous namespace.
+
+char AArch64AddressTypePromotion::ID = 0;
+
+INITIALIZE_PASS_BEGIN(AArch64AddressTypePromotion, "aarch64-type-promotion",
+ "AArch64 Type Promotion Pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(AArch64AddressTypePromotion, "aarch64-type-promotion",
+ "AArch64 Type Promotion Pass", false, false)
+
+FunctionPass *llvm::createAArch64AddressTypePromotionPass() {
+ return new AArch64AddressTypePromotion();
+}
+
+bool AArch64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
+ if (isa<SExtInst>(Inst))
+ return true;
+
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
+ if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
+ (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap()))
+ return true;
+
+ // sext(trunc(sext)) --> sext
+ if (isa<TruncInst>(Inst) && isa<SExtInst>(Inst->getOperand(0))) {
+ const Instruction *Opnd = cast<Instruction>(Inst->getOperand(0));
+ // Check that the truncate just drop sign extended bits.
+ if (Inst->getType()->getIntegerBitWidth() >=
+ Opnd->getOperand(0)->getType()->getIntegerBitWidth() &&
+ Inst->getOperand(0)->getType()->getIntegerBitWidth() <=
+ ConsideredSExtType->getIntegerBitWidth())
+ return true;
+ }
+
+ return false;
+}
+
+bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
+ // If the type of the sext is the same as the considered one, this sext
+ // will become useless.
+ // Otherwise, we will have to do something to preserve the original value,
+ // unless it is used once.
+ if (isa<SExtInst>(Inst) &&
+ (Inst->getType() == ConsideredSExtType || Inst->hasOneUse()))
+ return true;
+
+ // If the Inst is used more that once, we may need to insert truncate
+ // operations and we don't do that at the moment.
+ if (!Inst->hasOneUse())
+ return false;
+
+ // This truncate is used only once, thus if we can get thourgh, it will become
+ // useless.
+ if (isa<TruncInst>(Inst))
+ return true;
+
+ // If both operands are not constant, a new sext will be created here.
+ // Current heuristic is: each step should be profitable.
+ // Therefore we don't allow to increase the number of sext even if it may
+ // be profitable later on.
+ if (isa<BinaryOperator>(Inst) && isa<ConstantInt>(Inst->getOperand(1)))
+ return true;
+
+ return false;
+}
+
+static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
+ if (isa<SelectInst>(Inst) && OpIdx == 0)
+ return false;
+ return true;
+}
+
+bool
+AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
+ if (SExt->getType() != ConsideredSExtType)
+ return false;
+
+ for (const User *U : SExt->users()) {
+ if (isa<GetElementPtrInst>(U))
+ return true;
+ }
+
+ return false;
+}
+
+// Input:
+// - SExtInsts contains all the sext instructions that are use direclty in
+// GetElementPtrInst, i.e., access to memory.
+// Algorithm:
+// - For each sext operation in SExtInsts:
+// Let var be the operand of sext.
+// while it is profitable (see shouldGetThrough), legal, and safe
+// (see canGetThrough) to move sext through var's definition:
+// * promote the type of var's definition.
+// * fold var into sext uses.
+// * move sext above var's definition.
+// * update sext operand to use the operand of var that should be sign
+// extended (by construction there is only one).
+//
+// E.g.,
+// a = ... i32 c, 3
+// b = sext i32 a to i64 <- is it legal/safe/profitable to get through 'a'
+// ...
+// = b
+// => Yes, update the code
+// b = sext i32 c to i64
+// a = ... i64 b, 3
+// ...
+// = a
+// Iterate on 'c'.
+bool
+AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
+ DEBUG(dbgs() << "*** Propagate Sign Extension ***\n");
+
+ bool LocalChange = false;
+ SetOfInstructions ToRemove;
+ ValueToInsts ValToSExtendedUses;
+ while (!SExtInsts.empty()) {
+ // Get through simple chain.
+ Instruction *SExt = SExtInsts.pop_back_val();
+
+ DEBUG(dbgs() << "Consider:\n" << *SExt << '\n');
+
+ // If this SExt has already been merged continue.
+ if (SExt->use_empty() && ToRemove.count(SExt)) {
+ DEBUG(dbgs() << "No uses => marked as delete\n");
+ continue;
+ }
+
+ // Now try to get through the chain of definitions.
+ while (auto *Inst = dyn_cast<Instruction>(SExt->getOperand(0))) {
+ DEBUG(dbgs() << "Try to get through:\n" << *Inst << '\n');
+ if (!canGetThrough(Inst) || !shouldGetThrough(Inst)) {
+ // We cannot get through something that is not an Instruction
+ // or not safe to SExt.
+ DEBUG(dbgs() << "Cannot get through\n");
+ break;
+ }
+
+ LocalChange = true;
+ // If this is a sign extend, it becomes useless.
+ if (isa<SExtInst>(Inst) || isa<TruncInst>(Inst)) {
+ DEBUG(dbgs() << "SExt or trunc, mark it as to remove\n");
+ // We cannot use replaceAllUsesWith here because we may trigger some
+ // assertion on the type as all involved sext operation may have not
+ // been moved yet.
+ while (!Inst->use_empty()) {
+ Use &U = *Inst->use_begin();
+ Instruction *User = dyn_cast<Instruction>(U.getUser());
+ assert(User && "User of sext is not an Instruction!");
+ User->setOperand(U.getOperandNo(), SExt);
+ }
+ ToRemove.insert(Inst);
+ SExt->setOperand(0, Inst->getOperand(0));
+ SExt->moveBefore(Inst);
+ continue;
+ }
+
+ // Get through the Instruction:
+ // 1. Update its type.
+ // 2. Replace the uses of SExt by Inst.
+ // 3. Sign extend each operand that needs to be sign extended.
+
+ // Step #1.
+ Inst->mutateType(SExt->getType());
+ // Step #2.
+ SExt->replaceAllUsesWith(Inst);
+ // Step #3.
+ Instruction *SExtForOpnd = SExt;
+
+ DEBUG(dbgs() << "Propagate SExt to operands\n");
+ for (int OpIdx = 0, EndOpIdx = Inst->getNumOperands(); OpIdx != EndOpIdx;
+ ++OpIdx) {
+ DEBUG(dbgs() << "Operand:\n" << *(Inst->getOperand(OpIdx)) << '\n');
+ if (Inst->getOperand(OpIdx)->getType() == SExt->getType() ||
+ !shouldSExtOperand(Inst, OpIdx)) {
+ DEBUG(dbgs() << "No need to propagate\n");
+ continue;
+ }
+ // Check if we can statically sign extend the operand.
+ Value *Opnd = Inst->getOperand(OpIdx);
+ if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
+ DEBUG(dbgs() << "Statically sign extend\n");
+ Inst->setOperand(OpIdx, ConstantInt::getSigned(SExt->getType(),
+ Cst->getSExtValue()));
+ continue;
+ }
+ // UndefValue are typed, so we have to statically sign extend them.
+ if (isa<UndefValue>(Opnd)) {
+ DEBUG(dbgs() << "Statically sign extend\n");
+ Inst->setOperand(OpIdx, UndefValue::get(SExt->getType()));
+ continue;
+ }
+
+ // Otherwise we have to explicity sign extend it.
+ assert(SExtForOpnd &&
+ "Only one operand should have been sign extended");
+
+ SExtForOpnd->setOperand(0, Opnd);
+
+ DEBUG(dbgs() << "Move before:\n" << *Inst << "\nSign extend\n");
+ // Move the sign extension before the insertion point.
+ SExtForOpnd->moveBefore(Inst);
+ Inst->setOperand(OpIdx, SExtForOpnd);
+ // If more sext are required, new instructions will have to be created.
+ SExtForOpnd = nullptr;
+ }
+ if (SExtForOpnd == SExt) {
+ DEBUG(dbgs() << "Sign extension is useless now\n");
+ ToRemove.insert(SExt);
+ break;
+ }
+ }
+
+ // If the use is already of the right type, connect its uses to its argument
+ // and delete it.
+ // This can happen for an Instruction which all uses are sign extended.
+ if (!ToRemove.count(SExt) &&
+ SExt->getType() == SExt->getOperand(0)->getType()) {
+ DEBUG(dbgs() << "Sign extension is useless, attach its use to "
+ "its argument\n");
+ SExt->replaceAllUsesWith(SExt->getOperand(0));
+ ToRemove.insert(SExt);
+ } else
+ ValToSExtendedUses[SExt->getOperand(0)].push_back(SExt);
+ }
+
+ if (EnableMerge)
+ mergeSExts(ValToSExtendedUses, ToRemove);
+
+ // Remove all instructions marked as ToRemove.
+ for (Instruction *I: ToRemove)
+ I->eraseFromParent();
+ return LocalChange;
+}
+
+void AArch64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
+ SetOfInstructions &ToRemove) {
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
+ for (auto &Entry : ValToSExtendedUses) {
+ Instructions &Insts = Entry.second;
+ Instructions CurPts;
+ for (Instruction *Inst : Insts) {
+ if (ToRemove.count(Inst))
+ continue;
+ bool inserted = false;
+ for (auto &Pt : CurPts) {
+ if (DT.dominates(Inst, Pt)) {
+ DEBUG(dbgs() << "Replace all uses of:\n" << *Pt << "\nwith:\n"
+ << *Inst << '\n');
+ Pt->replaceAllUsesWith(Inst);
+ ToRemove.insert(Pt);
+ Pt = Inst;
+ inserted = true;
+ break;
+ }
+ if (!DT.dominates(Pt, Inst))
+ // Give up if we need to merge in a common dominator as the
+ // expermients show it is not profitable.
+ continue;
+
+ DEBUG(dbgs() << "Replace all uses of:\n" << *Inst << "\nwith:\n"
+ << *Pt << '\n');
+ Inst->replaceAllUsesWith(Pt);
+ ToRemove.insert(Inst);
+ inserted = true;
+ break;
+ }
+ if (!inserted)
+ CurPts.push_back(Inst);
+ }
+ }
+}
+
+void AArch64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
+ DEBUG(dbgs() << "*** Analyze Sign Extensions ***\n");
+
+ DenseMap<Value *, Instruction *> SeenChains;
+
+ for (auto &BB : *Func) {
+ for (auto &II : BB) {
+ Instruction *SExt = &II;
+
+ // Collect all sext operation per type.
+ if (!isa<SExtInst>(SExt) || !shouldConsiderSExt(SExt))
+ continue;
+
+ DEBUG(dbgs() << "Found:\n" << (*SExt) << '\n');
+
+ // Cases where we actually perform the optimization:
+ // 1. SExt is used in a getelementptr with more than 2 operand =>
+ // likely we can merge some computation if they are done on 64 bits.
+ // 2. The beginning of the SExt chain is SExt several time. =>
+ // code sharing is possible.
+
+ bool insert = false;
+ // #1.
+ for (const User *U : SExt->users()) {
+ const Instruction *Inst = dyn_cast<GetElementPtrInst>(U);
+ if (Inst && Inst->getNumOperands() > 2) {
+ DEBUG(dbgs() << "Interesting use in GetElementPtrInst\n" << *Inst
+ << '\n');
+ insert = true;
+ break;
+ }
+ }
+
+ // #2.
+ // Check the head of the chain.
+ Instruction *Inst = SExt;
+ Value *Last;
+ do {
+ int OpdIdx = 0;
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
+ if (BinOp && isa<ConstantInt>(BinOp->getOperand(0)))
+ OpdIdx = 1;
+ Last = Inst->getOperand(OpdIdx);
+ Inst = dyn_cast<Instruction>(Last);
+ } while (Inst && canGetThrough(Inst) && shouldGetThrough(Inst));
+
+ DEBUG(dbgs() << "Head of the chain:\n" << *Last << '\n');
+ DenseMap<Value *, Instruction *>::iterator AlreadySeen =
+ SeenChains.find(Last);
+ if (insert || AlreadySeen != SeenChains.end()) {
+ DEBUG(dbgs() << "Insert\n");
+ SExtInsts.push_back(SExt);
+ if (AlreadySeen != SeenChains.end() && AlreadySeen->second != nullptr) {
+ DEBUG(dbgs() << "Insert chain member\n");
+ SExtInsts.push_back(AlreadySeen->second);
+ SeenChains[Last] = nullptr;
+ }
+ } else {
+ DEBUG(dbgs() << "Record its chain membership\n");
+ SeenChains[Last] = SExt;
+ }
+ }
+ }
+}
+
+bool AArch64AddressTypePromotion::runOnFunction(Function &F) {
+ if (!EnableAddressTypePromotion || F.isDeclaration())
+ return false;
+ Func = &F;
+ ConsideredSExtType = Type::getInt64Ty(Func->getContext());
+
+ DEBUG(dbgs() << "*** " << getPassName() << ": " << Func->getName() << '\n');
+
+ Instructions SExtInsts;
+ analyzeSExtension(SExtInsts);
+ return propagateSignExtension(SExtInsts);
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/contrib/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
new file mode 100644
index 0000000..734fb21
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -0,0 +1,387 @@
+//===-- AArch64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// When profitable, replace GPR targeting i64 instructions with their
+// AdvSIMD scalar equivalents. Generally speaking, "profitable" is defined
+// as minimizing the number of cross-class register copies.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// TODO: Graph based predicate heuristics.
+// Walking the instruction list linearly will get many, perhaps most, of
+// the cases, but to do a truly thorough job of this, we need a more
+// wholistic approach.
+//
+// This optimization is very similar in spirit to the register allocator's
+// spill placement, only here we're determining where to place cross-class
+// register copies rather than spills. As such, a similar approach is
+// called for.
+//
+// We want to build up a set of graphs of all instructions which are candidates
+// for transformation along with instructions which generate their inputs and
+// consume their outputs. For each edge in the graph, we assign a weight
+// based on whether there is a copy required there (weight zero if not) and
+// the block frequency of the block containing the defining or using
+// instruction, whichever is less. Our optimization is then a graph problem
+// to minimize the total weight of all the graphs, then transform instructions
+// and add or remove copy instructions as called for to implement the
+// solution.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64RegisterInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-simd-scalar"
+
+// Allow forcing all i64 operations with equivalent SIMD instructions to use
+// them. For stress-testing the transformation function.
+static cl::opt<bool>
+TransformAll("aarch64-simd-scalar-force-all",
+ cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
+ cl::init(false), cl::Hidden);
+
+STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used");
+STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
+STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
+
+namespace {
+class AArch64AdvSIMDScalar : public MachineFunctionPass {
+ MachineRegisterInfo *MRI;
+ const AArch64InstrInfo *TII;
+
+private:
+ // isProfitableToTransform - Predicate function to determine whether an
+ // instruction should be transformed to its equivalent AdvSIMD scalar
+ // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
+ bool isProfitableToTransform(const MachineInstr *MI) const;
+
+ // transformInstruction - Perform the transformation of an instruction
+ // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
+ // to be the correct register class, minimizing cross-class copies.
+ void transformInstruction(MachineInstr *MI);
+
+ // processMachineBasicBlock - Main optimzation loop.
+ bool processMachineBasicBlock(MachineBasicBlock *MBB);
+
+public:
+ static char ID; // Pass identification, replacement for typeid.
+ explicit AArch64AdvSIMDScalar() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ const char *getPassName() const override {
+ return "AdvSIMD Scalar Operation Optimization";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+char AArch64AdvSIMDScalar::ID = 0;
+} // end anonymous namespace
+
+static bool isGPR64(unsigned Reg, unsigned SubReg,
+ const MachineRegisterInfo *MRI) {
+ if (SubReg)
+ return false;
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
+ return AArch64::GPR64RegClass.contains(Reg);
+}
+
+static bool isFPR64(unsigned Reg, unsigned SubReg,
+ const MachineRegisterInfo *MRI) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
+ SubReg == 0) ||
+ (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
+ SubReg == AArch64::dsub);
+ // Physical register references just check the register class directly.
+ return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
+ (AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub);
+}
+
+// getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64
+// copy instruction. Return zero_reg if the instruction is not a copy.
+static unsigned getSrcFromCopy(const MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &SubReg) {
+ SubReg = 0;
+ // The "FMOV Xd, Dn" instruction is the typical form.
+ if (MI->getOpcode() == AArch64::FMOVDXr ||
+ MI->getOpcode() == AArch64::FMOVXDr)
+ return MI->getOperand(1).getReg();
+ // A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see
+ // these at this stage, but it's easy to check for.
+ if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
+ SubReg = AArch64::dsub;
+ return MI->getOperand(1).getReg();
+ }
+ // Or just a plain COPY instruction. This can be directly to/from FPR64,
+ // or it can be a dsub subreg reference to an FPR128.
+ if (MI->getOpcode() == AArch64::COPY) {
+ if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
+ MRI) &&
+ isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
+ return MI->getOperand(1).getReg();
+ if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
+ MRI) &&
+ isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(),
+ MRI)) {
+ SubReg = MI->getOperand(1).getSubReg();
+ return MI->getOperand(1).getReg();
+ }
+ }
+
+ // Otherwise, this is some other kind of instruction.
+ return 0;
+}
+
+// getTransformOpcode - For any opcode for which there is an AdvSIMD equivalent
+// that we're considering transforming to, return that AdvSIMD opcode. For all
+// others, return the original opcode.
+static int getTransformOpcode(unsigned Opc) {
+ switch (Opc) {
+ default:
+ break;
+ // FIXME: Lots more possibilities.
+ case AArch64::ADDXrr:
+ return AArch64::ADDv1i64;
+ case AArch64::SUBXrr:
+ return AArch64::SUBv1i64;
+ }
+ // No AdvSIMD equivalent, so just return the original opcode.
+ return Opc;
+}
+
+static bool isTransformable(const MachineInstr *MI) {
+ int Opc = MI->getOpcode();
+ return Opc != getTransformOpcode(Opc);
+}
+
+// isProfitableToTransform - Predicate function to determine whether an
+// instruction should be transformed to its equivalent AdvSIMD scalar
+// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
+bool
+AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
+ // If this instruction isn't eligible to be transformed (no SIMD equivalent),
+ // early exit since that's the common case.
+ if (!isTransformable(MI))
+ return false;
+
+ // Count the number of copies we'll need to add and approximate the number
+ // of copies that a transform will enable us to remove.
+ unsigned NumNewCopies = 3;
+ unsigned NumRemovableCopies = 0;
+
+ unsigned OrigSrc0 = MI->getOperand(1).getReg();
+ unsigned OrigSrc1 = MI->getOperand(2).getReg();
+ unsigned Src0 = 0, SubReg0;
+ unsigned Src1 = 0, SubReg1;
+ if (!MRI->def_empty(OrigSrc0)) {
+ MachineRegisterInfo::def_instr_iterator Def =
+ MRI->def_instr_begin(OrigSrc0);
+ assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
+ Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
+ // If the source was from a copy, we don't need to insert a new copy.
+ if (Src0)
+ --NumNewCopies;
+ // If there are no other users of the original source, we can delete
+ // that instruction.
+ if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0))
+ ++NumRemovableCopies;
+ }
+ if (!MRI->def_empty(OrigSrc1)) {
+ MachineRegisterInfo::def_instr_iterator Def =
+ MRI->def_instr_begin(OrigSrc1);
+ assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
+ Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
+ if (Src1)
+ --NumNewCopies;
+ // If there are no other users of the original source, we can delete
+ // that instruction.
+ if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1))
+ ++NumRemovableCopies;
+ }
+
+ // If any of the uses of the original instructions is a cross class copy,
+ // that's a copy that will be removable if we transform. Likewise, if
+ // any of the uses is a transformable instruction, it's likely the tranforms
+ // will chain, enabling us to save a copy there, too. This is an aggressive
+ // heuristic that approximates the graph based cost analysis described above.
+ unsigned Dst = MI->getOperand(0).getReg();
+ bool AllUsesAreCopies = true;
+ for (MachineRegisterInfo::use_instr_nodbg_iterator
+ Use = MRI->use_instr_nodbg_begin(Dst),
+ E = MRI->use_instr_nodbg_end();
+ Use != E; ++Use) {
+ unsigned SubReg;
+ if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use))
+ ++NumRemovableCopies;
+ // If the use is an INSERT_SUBREG, that's still something that can
+ // directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's
+ // preferable to have it use the FPR64 in most cases, as if the source
+ // vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely.
+ // Ditto for a lane insert.
+ else if (Use->getOpcode() == AArch64::INSERT_SUBREG ||
+ Use->getOpcode() == AArch64::INSvi64gpr)
+ ;
+ else
+ AllUsesAreCopies = false;
+ }
+ // If all of the uses of the original destination register are copies to
+ // FPR64, then we won't end up having a new copy back to GPR64 either.
+ if (AllUsesAreCopies)
+ --NumNewCopies;
+
+ // If a transform will not increase the number of cross-class copies required,
+ // return true.
+ if (NumNewCopies <= NumRemovableCopies)
+ return true;
+
+ // Finally, even if we otherwise wouldn't transform, check if we're forcing
+ // transformation of everything.
+ return TransformAll;
+}
+
+static MachineInstr *insertCopy(const AArch64InstrInfo *TII, MachineInstr *MI,
+ unsigned Dst, unsigned Src, bool IsKill) {
+ MachineInstrBuilder MIB =
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY),
+ Dst)
+ .addReg(Src, getKillRegState(IsKill));
+ DEBUG(dbgs() << " adding copy: " << *MIB);
+ ++NumCopiesInserted;
+ return MIB;
+}
+
+// transformInstruction - Perform the transformation of an instruction
+// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
+// to be the correct register class, minimizing cross-class copies.
+void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
+ DEBUG(dbgs() << "Scalar transform: " << *MI);
+
+ MachineBasicBlock *MBB = MI->getParent();
+ int OldOpc = MI->getOpcode();
+ int NewOpc = getTransformOpcode(OldOpc);
+ assert(OldOpc != NewOpc && "transform an instruction to itself?!");
+
+ // Check if we need a copy for the source registers.
+ unsigned OrigSrc0 = MI->getOperand(1).getReg();
+ unsigned OrigSrc1 = MI->getOperand(2).getReg();
+ unsigned Src0 = 0, SubReg0;
+ unsigned Src1 = 0, SubReg1;
+ if (!MRI->def_empty(OrigSrc0)) {
+ MachineRegisterInfo::def_instr_iterator Def =
+ MRI->def_instr_begin(OrigSrc0);
+ assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
+ Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
+ // If there are no other users of the original source, we can delete
+ // that instruction.
+ if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) {
+ assert(Src0 && "Can't delete copy w/o a valid original source!");
+ Def->eraseFromParent();
+ ++NumCopiesDeleted;
+ }
+ }
+ if (!MRI->def_empty(OrigSrc1)) {
+ MachineRegisterInfo::def_instr_iterator Def =
+ MRI->def_instr_begin(OrigSrc1);
+ assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
+ Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
+ // If there are no other users of the original source, we can delete
+ // that instruction.
+ if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) {
+ assert(Src1 && "Can't delete copy w/o a valid original source!");
+ Def->eraseFromParent();
+ ++NumCopiesDeleted;
+ }
+ }
+ // If we weren't able to reference the original source directly, create a
+ // copy.
+ if (!Src0) {
+ SubReg0 = 0;
+ Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
+ insertCopy(TII, MI, Src0, OrigSrc0, true);
+ }
+ if (!Src1) {
+ SubReg1 = 0;
+ Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
+ insertCopy(TII, MI, Src1, OrigSrc1, true);
+ }
+
+ // Create a vreg for the destination.
+ // FIXME: No need to do this if the ultimate user expects an FPR64.
+ // Check for that and avoid the copy if possible.
+ unsigned Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
+
+ // For now, all of the new instructions have the same simple three-register
+ // form, so no need to special case based on what instruction we're
+ // building.
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst)
+ .addReg(Src0, getKillRegState(true), SubReg0)
+ .addReg(Src1, getKillRegState(true), SubReg1);
+
+ // Now copy the result back out to a GPR.
+ // FIXME: Try to avoid this if all uses could actually just use the FPR64
+ // directly.
+ insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true);
+
+ // Erase the old instruction.
+ MI->eraseFromParent();
+
+ ++NumScalarInsnsUsed;
+}
+
+// processMachineBasicBlock - Main optimzation loop.
+bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
+ bool Changed = false;
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
+ MachineInstr *MI = I;
+ ++I;
+ if (isProfitableToTransform(MI)) {
+ transformInstruction(MI);
+ Changed = true;
+ }
+ }
+ return Changed;
+}
+
+// runOnMachineFunction - Pass entry point from PassManager.
+bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
+ bool Changed = false;
+ DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
+
+ const TargetMachine &TM = mf.getTarget();
+ MRI = &mf.getRegInfo();
+ TII = static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());
+
+ // Just check things on a one-block-at-a-time basis.
+ for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
+ if (processMachineBasicBlock(I))
+ Changed = true;
+ return Changed;
+}
+
+// createAArch64AdvSIMDScalar - Factory function used by AArch64TargetMachine
+// to add the pass to the PassManager.
+FunctionPass *llvm::createAArch64AdvSIMDScalar() {
+ return new AArch64AdvSIMDScalar();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index d59ca56..cd94e24 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64AsmPrinter.cpp - Print machine code to an AArch64 .s file --===//
+//===-- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,240 +8,337 @@
//===----------------------------------------------------------------------===//
//
// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to GAS-format AArch64 assembly language.
+// of machine-dependent LLVM code to the AArch64 assembly language.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
-#include "AArch64AsmPrinter.h"
+#include "AArch64.h"
+#include "AArch64MachineFunctionInfo.h"
+#include "AArch64MCInstLower.h"
+#include "AArch64RegisterInfo.h"
+#include "AArch64Subtarget.h"
#include "InstPrinter/AArch64InstPrinter.h"
-#include "llvm/DebugInfo.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/Mangler.h"
-
using namespace llvm;
-/// Try to print a floating-point register as if it belonged to a specified
-/// register-class. For example the inline asm operand modifier "b" requires its
-/// argument to be printed as "bN".
-static bool printModifiedFPRAsmOperand(const MachineOperand &MO,
- const TargetRegisterInfo *TRI,
- char RegType, raw_ostream &O) {
- if (!MO.isReg())
- return true;
-
- for (MCRegAliasIterator AR(MO.getReg(), TRI, true); AR.isValid(); ++AR) {
- if (AArch64::FPR8RegClass.contains(*AR)) {
- O << RegType << TRI->getEncodingValue(MO.getReg());
- return false;
+#define DEBUG_TYPE "asm-printer"
+
+namespace {
+
+class AArch64AsmPrinter : public AsmPrinter {
+ /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
+ /// make the right decision when printing asm code for different targets.
+ const AArch64Subtarget *Subtarget;
+
+ AArch64MCInstLower MCInstLowering;
+ StackMaps SM;
+
+public:
+ AArch64AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer),
+ Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
+ MCInstLowering(OutContext, *Mang, *this), SM(*this), AArch64FI(nullptr),
+ LOHLabelCounter(0) {}
+
+ const char *getPassName() const override {
+ return "AArch64 Assembly Printer";
+ }
+
+ /// \brief Wrapper for MCInstLowering.lowerOperand() for the
+ /// tblgen'erated pseudo lowering.
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
+ return MCInstLowering.lowerOperand(MO, MCOp);
+ }
+
+ void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+ void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI);
+ /// \brief tblgen'erated driver function for lowering simple MI->MC
+ /// pseudo instructions.
+ bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
+ const MachineInstr *MI);
+
+ void EmitInstruction(const MachineInstr *MI) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AsmPrinter::getAnalysisUsage(AU);
+ AU.setPreservesAll();
+ }
+
+ bool runOnMachineFunction(MachineFunction &F) override {
+ AArch64FI = F.getInfo<AArch64FunctionInfo>();
+ return AsmPrinter::runOnMachineFunction(F);
+ }
+
+private:
+ MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
+ void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
+ bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
+ bool printAsmRegInClass(const MachineOperand &MO,
+ const TargetRegisterClass *RC, bool isVector,
+ raw_ostream &O);
+
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
+
+ void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
+
+ void EmitFunctionBodyEnd() override;
+
+ MCSymbol *GetCPISymbol(unsigned CPID) const override;
+ void EmitEndOfAsmFile(Module &M) override;
+ AArch64FunctionInfo *AArch64FI;
+
+ /// \brief Emit the LOHs contained in AArch64FI.
+ void EmitLOHs();
+
+ typedef std::map<const MachineInstr *, MCSymbol *> MInstToMCSymbol;
+ MInstToMCSymbol LOHInstToLabel;
+ unsigned LOHLabelCounter;
+};
+
+} // end of anonymous namespace
+
+//===----------------------------------------------------------------------===//
+
+void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
+ if (Subtarget->isTargetMachO()) {
+ // Funny Darwin hack: This flag tells the linker that no global symbols
+ // contain code that falls through to other global symbols (e.g. the obvious
+ // implementation of multiple entry points). If this doesn't occur, the
+ // linker can safely perform dead code stripping. Since LLVM never
+ // generates code that does this, it is always safe to set.
+ OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
+ SM.serializeToStackMapSection();
+ }
+
+ // Emit a .data.rel section containing any stubs that were created.
+ if (Subtarget->isTargetELF()) {
+ const TargetLoweringObjectFileELF &TLOFELF =
+ static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
+
+ MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
+
+ // Output stubs for external and common global variables.
+ MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
+ if (!Stubs.empty()) {
+ OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
+ const DataLayout *TD = TM.getDataLayout();
+
+ for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ OutStreamer.EmitLabel(Stubs[i].first);
+ OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
+ TD->getPointerSize(0));
+ }
+ Stubs.clear();
}
}
- // The register doesn't correspond to anything floating-point like.
- return true;
}
-/// Implements the 'w' and 'x' inline asm operand modifiers, which print a GPR
-/// with the obvious type and an immediate 0 as either wzr or xzr.
-static bool printModifiedGPRAsmOperand(const MachineOperand &MO,
- const TargetRegisterInfo *TRI,
- const TargetRegisterClass &RegClass,
- raw_ostream &O) {
- char Prefix = &RegClass == &AArch64::GPR32RegClass ? 'w' : 'x';
+MachineLocation
+AArch64AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
+ MachineLocation Location;
+ assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
+ // Frame address. Currently handles register +- offset only.
+ if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm())
+ Location.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
+ else {
+ DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
+ }
+ return Location;
+}
- if (MO.isImm() && MO.getImm() == 0) {
- O << Prefix << "zr";
- return false;
- } else if (MO.isReg()) {
- if (MO.getReg() == AArch64::XSP || MO.getReg() == AArch64::WSP) {
- O << (Prefix == 'x' ? "sp" : "wsp");
- return false;
- }
+void AArch64AsmPrinter::EmitLOHs() {
+ SmallVector<MCSymbol *, 3> MCArgs;
- for (MCRegAliasIterator AR(MO.getReg(), TRI, true); AR.isValid(); ++AR) {
- if (RegClass.contains(*AR)) {
- O << AArch64InstPrinter::getRegisterName(*AR);
- return false;
- }
+ for (const auto &D : AArch64FI->getLOHContainer()) {
+ for (const MachineInstr *MI : D.getArgs()) {
+ MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
+ assert(LabelIt != LOHInstToLabel.end() &&
+ "Label hasn't been inserted for LOH related instruction");
+ MCArgs.push_back(LabelIt->second);
}
+ OutStreamer.EmitLOHDirective(D.getKind(), MCArgs);
+ MCArgs.clear();
}
+}
+
+void AArch64AsmPrinter::EmitFunctionBodyEnd() {
+ if (!AArch64FI->getLOHRelated().empty())
+ EmitLOHs();
+}
+
+/// GetCPISymbol - Return the symbol for the specified constant pool entry.
+MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
+ // Darwin uses a linker-private symbol name for constant-pools (to
+ // avoid addends on the relocation?), ELF has no such concept and
+ // uses a normal private symbol.
+ if (getDataLayout().getLinkerPrivateGlobalPrefix()[0])
+ return OutContext.GetOrCreateSymbol(
+ Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
+ Twine(getFunctionNumber()) + "_" + Twine(CPID));
- return true;
+ return OutContext.GetOrCreateSymbol(
+ Twine(getDataLayout().getPrivateGlobalPrefix()) + "CPI" +
+ Twine(getFunctionNumber()) + "_" + Twine(CPID));
}
-bool AArch64AsmPrinter::printSymbolicAddress(const MachineOperand &MO,
- bool PrintImmediatePrefix,
- StringRef Suffix, raw_ostream &O) {
- StringRef Name;
- StringRef Modifier;
+void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNum);
switch (MO.getType()) {
default:
- return true;
- case MachineOperand::MO_GlobalAddress:
- Name = getSymbol(MO.getGlobal())->getName();
-
- // Global variables may be accessed either via a GOT or in various fun and
- // interesting TLS-model specific ways. Set the prefix modifier as
- // appropriate here.
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(MO.getGlobal())) {
- Reloc::Model RelocM = TM.getRelocationModel();
- if (GV->isThreadLocal()) {
- switch (TM.getTLSModel(GV)) {
- case TLSModel::GeneralDynamic:
- Modifier = "tlsdesc";
- break;
- case TLSModel::LocalDynamic:
- Modifier = "dtprel";
- break;
- case TLSModel::InitialExec:
- Modifier = "gottprel";
- break;
- case TLSModel::LocalExec:
- Modifier = "tprel";
- break;
- }
- } else if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) {
- Modifier = "got";
- }
- }
+ llvm_unreachable("<unknown operand type>");
+ case MachineOperand::MO_Register: {
+ unsigned Reg = MO.getReg();
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ assert(!MO.getSubReg() && "Subregs should be eliminated!");
+ O << AArch64InstPrinter::getRegisterName(Reg);
break;
- case MachineOperand::MO_BlockAddress:
- Name = GetBlockAddressSymbol(MO.getBlockAddress())->getName();
+ }
+ case MachineOperand::MO_Immediate: {
+ int64_t Imm = MO.getImm();
+ O << '#' << Imm;
break;
- case MachineOperand::MO_ExternalSymbol:
- Name = MO.getSymbolName();
+ }
+ }
+}
+
+bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
+ raw_ostream &O) {
+ unsigned Reg = MO.getReg();
+ switch (Mode) {
+ default:
+ return true; // Unknown mode.
+ case 'w':
+ Reg = getWRegFromXReg(Reg);
break;
- case MachineOperand::MO_ConstantPoolIndex:
- Name = GetCPISymbol(MO.getIndex())->getName();
+ case 'x':
+ Reg = getXRegFromWReg(Reg);
break;
}
- // Some instructions (notably ADRP) don't take the # prefix for
- // immediates. Only print it if asked to.
- if (PrintImmediatePrefix)
- O << '#';
-
- // Only need the joining "_" if both the prefix and the suffix are
- // non-null. This little block simply takes care of the four possibly
- // combinations involved there.
- if (Modifier == "" && Suffix == "")
- O << Name;
- else if (Modifier == "" && Suffix != "")
- O << ":" << Suffix << ':' << Name;
- else if (Modifier != "" && Suffix == "")
- O << ":" << Modifier << ':' << Name;
- else
- O << ":" << Modifier << '_' << Suffix << ':' << Name;
+ O << AArch64InstPrinter::getRegisterName(Reg);
+ return false;
+}
+// Prints the register in MO using class RC using the offset in the
+// new register class. This should not be used for cross class
+// printing.
+bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
+ const TargetRegisterClass *RC,
+ bool isVector, raw_ostream &O) {
+ assert(MO.isReg() && "Should only get here with a register!");
+ const AArch64RegisterInfo *RI =
+ static_cast<const AArch64RegisterInfo *>(TM.getRegisterInfo());
+ unsigned Reg = MO.getReg();
+ unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
+ assert(RI->regsOverlap(RegToPrint, Reg));
+ O << AArch64InstPrinter::getRegisterName(
+ RegToPrint, isVector ? AArch64::vreg : AArch64::NoRegAltName);
return false;
}
bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant,
const char *ExtraCode, raw_ostream &O) {
- const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ const MachineOperand &MO = MI->getOperand(OpNum);
+
+ // First try the generic code, which knows about modifiers like 'c' and 'n'.
+ if (!AsmPrinter::PrintAsmOperand(MI, OpNum, AsmVariant, ExtraCode, O))
+ return false;
- if (!ExtraCode)
- ExtraCode = "";
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0)
+ return true; // Unknown modifier.
- switch(ExtraCode[0]) {
- default:
- if (!AsmPrinter::PrintAsmOperand(MI, OpNum, AsmVariant, ExtraCode, O))
+ switch (ExtraCode[0]) {
+ default:
+ return true; // Unknown modifier.
+ case 'w': // Print W register
+ case 'x': // Print X register
+ if (MO.isReg())
+ return printAsmMRegister(MO, ExtraCode[0], O);
+ if (MO.isImm() && MO.getImm() == 0) {
+ unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
+ O << AArch64InstPrinter::getRegisterName(Reg);
return false;
- break;
- case 'w':
- // Output 32-bit general register operand, constant zero as wzr, or stack
- // pointer as wsp. Ignored when used with other operand types.
- if (!printModifiedGPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::GPR32RegClass, O))
- return false;
- break;
- case 'x':
- // Output 64-bit general register operand, constant zero as xzr, or stack
- // pointer as sp. Ignored when used with other operand types.
- if (!printModifiedGPRAsmOperand(MI->getOperand(OpNum), TRI,
- AArch64::GPR64RegClass, O))
- return false;
- break;
- case 'H':
- // Output higher numbered of a 64-bit general register pair
- case 'Q':
- // Output least significant register of a 64-bit general register pair
- case 'R':
- // Output most significant register of a 64-bit general register pair
-
- // FIXME note: these three operand modifiers will require, to some extent,
- // adding a paired GPR64 register class. Initial investigation suggests that
- // assertions are hit unless it has a type and is made legal for that type
- // in ISelLowering. After that step is made, the number of modifications
- // needed explodes (operation legality, calling conventions, stores, reg
- // copies ...).
- llvm_unreachable("FIXME: Unimplemented register pairs");
- case 'b':
- case 'h':
- case 's':
- case 'd':
- case 'q':
- if (!printModifiedFPRAsmOperand(MI->getOperand(OpNum), TRI,
- ExtraCode[0], O))
- return false;
- break;
- case 'A':
- // Output symbolic address with appropriate relocation modifier (also
- // suitable for ADRP).
- if (!printSymbolicAddress(MI->getOperand(OpNum), false, "", O))
- return false;
- break;
- case 'L':
- // Output bits 11:0 of symbolic address with appropriate :lo12: relocation
- // modifier.
- if (!printSymbolicAddress(MI->getOperand(OpNum), true, "lo12", O))
+ }
+ printOperand(MI, OpNum, O);
return false;
- break;
- case 'G':
- // Output bits 23:12 of symbolic address with appropriate :hi12: relocation
- // modifier (currently only for TLS local exec).
- if (!printSymbolicAddress(MI->getOperand(OpNum), true, "hi12", O))
+ case 'b': // Print B register.
+ case 'h': // Print H register.
+ case 's': // Print S register.
+ case 'd': // Print D register.
+ case 'q': // Print Q register.
+ if (MO.isReg()) {
+ const TargetRegisterClass *RC;
+ switch (ExtraCode[0]) {
+ case 'b':
+ RC = &AArch64::FPR8RegClass;
+ break;
+ case 'h':
+ RC = &AArch64::FPR16RegClass;
+ break;
+ case 's':
+ RC = &AArch64::FPR32RegClass;
+ break;
+ case 'd':
+ RC = &AArch64::FPR64RegClass;
+ break;
+ case 'q':
+ RC = &AArch64::FPR128RegClass;
+ break;
+ default:
+ return true;
+ }
+ return printAsmRegInClass(MO, RC, false /* vector */, O);
+ }
+ printOperand(MI, OpNum, O);
return false;
- break;
- case 'a':
- return PrintAsmMemoryOperand(MI, OpNum, AsmVariant, ExtraCode, O);
+ }
}
- // There's actually no operand modifier, which leads to a slightly eclectic
- // set of behaviour which we have to handle here.
- const MachineOperand &MO = MI->getOperand(OpNum);
- switch (MO.getType()) {
- default:
- llvm_unreachable("Unexpected operand for inline assembly");
- case MachineOperand::MO_Register:
- // GCC prints the unmodified operand of a 'w' constraint as the vector
- // register. Technically, we could allocate the argument as a VPR128, but
- // that leads to extremely dodgy copies being generated to get the data
- // there.
- if (printModifiedFPRAsmOperand(MO, TRI, 'v', O))
- O << AArch64InstPrinter::getRegisterName(MO.getReg());
- break;
- case MachineOperand::MO_Immediate:
- O << '#' << MO.getImm();
- break;
- case MachineOperand::MO_FPImmediate:
- assert(MO.getFPImm()->isExactlyValue(0.0) && "Only FP 0.0 expected");
- O << "#0.0";
- break;
- case MachineOperand::MO_BlockAddress:
- case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol:
- return printSymbolicAddress(MO, false, "", O);
+ // According to ARM, we should emit x and v registers unless we have a
+ // modifier.
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+
+ // If this is a w or x register, print an x register.
+ if (AArch64::GPR32allRegClass.contains(Reg) ||
+ AArch64::GPR64allRegClass.contains(Reg))
+ return printAsmMRegister(MO, 'x', O);
+
+ // If this is a b, h, s, d, or q register, print it as a v register.
+ return printAsmRegInClass(MO, &AArch64::FPR128RegClass, true /* vector */,
+ O);
}
+ printOperand(MI, OpNum, O);
return false;
}
@@ -250,15 +347,90 @@ bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &O) {
- // Currently both the memory constraints (m and Q) behave the same and amount
- // to the address as a single register. In future, we may allow "m" to provide
- // both a base and an offset.
+ if (ExtraCode && ExtraCode[0])
+ return true; // Unknown modifier.
+
const MachineOperand &MO = MI->getOperand(OpNum);
- assert(MO.isReg() && "unexpected inline assembly memory operand");
- O << '[' << AArch64InstPrinter::getRegisterName(MO.getReg()) << ']';
+ assert(MO.isReg() && "unexpected inline asm memory operand");
+ O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
return false;
}
+void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
+ raw_ostream &OS) {
+ unsigned NOps = MI->getNumOperands();
+ assert(NOps == 4);
+ OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
+ // cast away const; DIetc do not take const operands for some reason.
+ DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps - 1).getMetadata()));
+ OS << V.getName();
+ OS << " <- ";
+ // Frame address. Currently handles register +- offset only.
+ assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
+ OS << '[';
+ printOperand(MI, 0, OS);
+ OS << '+';
+ printOperand(MI, 1, OS);
+ OS << ']';
+ OS << "+";
+ printOperand(MI, NOps - 2, OS);
+}
+
+void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ unsigned NumNOPBytes = MI.getOperand(1).getImm();
+
+ SM.recordStackMap(MI);
+ // Emit padding.
+ assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
+ for (unsigned i = 0; i < NumNOPBytes; i += 4)
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
+}
+
+// Lower a patchpoint of the form:
+// [<def>], <id>, <numBytes>, <target>, <numArgs>
+void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
+ const MachineInstr &MI) {
+ SM.recordPatchPoint(MI);
+
+ PatchPointOpers Opers(&MI);
+
+ int64_t CallTarget = Opers.getMetaOper(PatchPointOpers::TargetPos).getImm();
+ unsigned EncodedBytes = 0;
+ if (CallTarget) {
+ assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
+ "High 16 bits of call target should be zero.");
+ unsigned ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
+ EncodedBytes = 16;
+ // Materialize the jump address:
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZWi)
+ .addReg(ScratchReg)
+ .addImm((CallTarget >> 32) & 0xFFFF)
+ .addImm(32));
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKWi)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm((CallTarget >> 16) & 0xFFFF)
+ .addImm(16));
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKWi)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(CallTarget & 0xFFFF)
+ .addImm(0));
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
+ }
+ // Emit padding.
+ unsigned NumBytes = Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
+ assert(NumBytes >= EncodedBytes &&
+ "Patchpoint can't request size less than the length of a call.");
+ assert((NumBytes - EncodedBytes) % 4 == 0 &&
+ "Invalid number of NOP bytes requested!");
+ for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
+}
+
+// Simple pseudo-instructions have their lowering (with expansion to real
+// instructions) auto-generated.
#include "AArch64GenMCPseudoLowering.inc"
void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
@@ -266,40 +438,87 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (emitPseudoExpansionLowering(OutStreamer, MI))
return;
- MCInst TmpInst;
- LowerAArch64MachineInstrToMCInst(MI, TmpInst, *this);
- OutStreamer.EmitInstruction(TmpInst);
-}
+ if (AArch64FI->getLOHRelated().count(MI)) {
+ // Generate a label for LOH related instruction
+ MCSymbol *LOHLabel = GetTempSymbol("loh", LOHLabelCounter++);
+ // Associate the instruction with the label
+ LOHInstToLabel[MI] = LOHLabel;
+ OutStreamer.EmitLabel(LOHLabel);
+ }
-void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetELF()) {
- const TargetLoweringObjectFileELF &TLOFELF =
- static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
+ // Do any manual lowerings.
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::DBG_VALUE: {
+ if (isVerbose() && OutStreamer.hasRawTextSupport()) {
+ SmallString<128> TmpStr;
+ raw_svector_ostream OS(TmpStr);
+ PrintDebugValueComment(MI, OS);
+ OutStreamer.EmitRawText(StringRef(OS.str()));
+ }
+ return;
+ }
- MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
+ // Tail calls use pseudo instructions so they have the proper code-gen
+ // attributes (isCall, isReturn, etc.). We lower them to the real
+ // instruction here.
+ case AArch64::TCRETURNri: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(AArch64::BR);
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ EmitToStreamer(OutStreamer, TmpInst);
+ return;
+ }
+ case AArch64::TCRETURNdi: {
+ MCOperand Dest;
+ MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
+ MCInst TmpInst;
+ TmpInst.setOpcode(AArch64::B);
+ TmpInst.addOperand(Dest);
+ EmitToStreamer(OutStreamer, TmpInst);
+ return;
+ }
+ case AArch64::TLSDESC_BLR: {
+ MCOperand Callee, Sym;
+ MCInstLowering.lowerOperand(MI->getOperand(0), Callee);
+ MCInstLowering.lowerOperand(MI->getOperand(1), Sym);
- // Output stubs for external and common global variables.
- MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
- if (!Stubs.empty()) {
- OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getDataLayout();
+ // First emit a relocation-annotation. This expands to no code, but requests
+ // the following instruction gets an R_AARCH64_TLSDESC_CALL.
+ MCInst TLSDescCall;
+ TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
+ TLSDescCall.addOperand(Sym);
+ EmitToStreamer(OutStreamer, TLSDescCall);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- OutStreamer.EmitLabel(Stubs[i].first);
- OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
- TD->getPointerSize(0));
- }
- Stubs.clear();
- }
+ // Other than that it's just a normal indirect call to the function loaded
+ // from the descriptor.
+ MCInst BLR;
+ BLR.setOpcode(AArch64::BLR);
+ BLR.addOperand(Callee);
+ EmitToStreamer(OutStreamer, BLR);
+
+ return;
}
-}
-bool AArch64AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- return AsmPrinter::runOnMachineFunction(MF);
+ case TargetOpcode::STACKMAP:
+ return LowerSTACKMAP(OutStreamer, SM, *MI);
+
+ case TargetOpcode::PATCHPOINT:
+ return LowerPATCHPOINT(OutStreamer, SM, *MI);
+ }
+
+ // Finally, do the automated lowerings for everything else.
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
}
// Force static initialization.
extern "C" void LLVMInitializeAArch64AsmPrinter() {
- RegisterAsmPrinter<AArch64AsmPrinter> X(TheAArch64Target);
-}
+ RegisterAsmPrinter<AArch64AsmPrinter> X(TheAArch64leTarget);
+ RegisterAsmPrinter<AArch64AsmPrinter> Y(TheAArch64beTarget);
+ RegisterAsmPrinter<AArch64AsmPrinter> Z(TheARM64leTarget);
+ RegisterAsmPrinter<AArch64AsmPrinter> W(TheARM64beTarget);
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.h b/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.h
deleted file mode 100644
index 824f003..0000000
--- a/contrib/llvm/lib/Target/AArch64/AArch64AsmPrinter.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// AArch64AsmPrinter.h - Print machine code to an AArch64 .s file -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the AArch64 assembly printer class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64ASMPRINTER_H
-#define LLVM_AARCH64ASMPRINTER_H
-
-#include "AArch64.h"
-#include "AArch64TargetMachine.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
-
-class MCOperand;
-
-class LLVM_LIBRARY_VISIBILITY AArch64AsmPrinter : public AsmPrinter {
-
- /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
- /// make the right decision when printing asm code for different targets.
- const AArch64Subtarget *Subtarget;
-
- // emitPseudoExpansionLowering - tblgen'erated.
- bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
- const MachineInstr *MI);
-
- public:
- explicit AArch64AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
- Subtarget = &TM.getSubtarget<AArch64Subtarget>();
- }
-
- bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const;
-
- MCOperand lowerSymbolOperand(const MachineOperand &MO,
- const MCSymbol *Sym) const;
-
- void EmitInstruction(const MachineInstr *MI);
- void EmitEndOfAsmFile(Module &M);
-
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
- bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
-
- /// printSymbolicAddress - Given some kind of reasonably bare symbolic
- /// reference, print out the appropriate asm string to represent it. If
- /// appropriate, a relocation-specifier will be produced, composed of a
- /// general class derived from the MO parameter and an instruction-specific
- /// suffix, provided in Suffix. E.g. ":got_lo12:" if a Suffix of "lo12" is
- /// given.
- bool printSymbolicAddress(const MachineOperand &MO,
- bool PrintImmediatePrefix,
- StringRef Suffix, raw_ostream &O);
-
- virtual const char *getPassName() const {
- return "AArch64 Assembly Printer";
- }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-};
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64BranchFixupPass.cpp b/contrib/llvm/lib/Target/AArch64/AArch64BranchFixupPass.cpp
deleted file mode 100644
index 11e7f41..0000000
--- a/contrib/llvm/lib/Target/AArch64/AArch64BranchFixupPass.cpp
+++ /dev/null
@@ -1,600 +0,0 @@
-//===-- AArch64BranchFixupPass.cpp - AArch64 branch fixup -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a pass that fixes AArch64 branches which have ended up out
-// of range for their immediate operands.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "aarch64-branch-fixup"
-#include "AArch64.h"
-#include "AArch64InstrInfo.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumSplit, "Number of uncond branches inserted");
-STATISTIC(NumCBrFixed, "Number of cond branches fixed");
-
-/// Return the worst case padding that could result from unknown offset bits.
-/// This does not include alignment padding caused by known offset bits.
-///
-/// @param LogAlign log2(alignment)
-/// @param KnownBits Number of known low offset bits.
-static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
- if (KnownBits < LogAlign)
- return (1u << LogAlign) - (1u << KnownBits);
- return 0;
-}
-
-namespace {
- /// Due to limited PC-relative displacements, conditional branches to distant
- /// blocks may need converting into an unconditional equivalent. For example:
- /// tbz w1, #0, far_away
- /// becomes
- /// tbnz w1, #0, skip
- /// b far_away
- /// skip:
- class AArch64BranchFixup : public MachineFunctionPass {
- /// Information about the offset and size of a single basic block.
- struct BasicBlockInfo {
- /// Distance from the beginning of the function to the beginning of this
- /// basic block.
- ///
- /// Offsets are computed assuming worst case padding before an aligned
- /// block. This means that subtracting basic block offsets always gives a
- /// conservative estimate of the real distance which may be smaller.
- ///
- /// Because worst case padding is used, the computed offset of an aligned
- /// block may not actually be aligned.
- unsigned Offset;
-
- /// Size of the basic block in bytes. If the block contains inline
- /// assembly, this is a worst case estimate.
- ///
- /// The size does not include any alignment padding whether from the
- /// beginning of the block, or from an aligned jump table at the end.
- unsigned Size;
-
- /// The number of low bits in Offset that are known to be exact. The
- /// remaining bits of Offset are an upper bound.
- uint8_t KnownBits;
-
- /// When non-zero, the block contains instructions (inline asm) of unknown
- /// size. The real size may be smaller than Size bytes by a multiple of 1
- /// << Unalign.
- uint8_t Unalign;
-
- BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0) {}
-
- /// Compute the number of known offset bits internally to this block.
- /// This number should be used to predict worst case padding when
- /// splitting the block.
- unsigned internalKnownBits() const {
- unsigned Bits = Unalign ? Unalign : KnownBits;
- // If the block size isn't a multiple of the known bits, assume the
- // worst case padding.
- if (Size & ((1u << Bits) - 1))
- Bits = countTrailingZeros(Size);
- return Bits;
- }
-
- /// Compute the offset immediately following this block. If LogAlign is
- /// specified, return the offset the successor block will get if it has
- /// this alignment.
- unsigned postOffset(unsigned LogAlign = 0) const {
- unsigned PO = Offset + Size;
- if (!LogAlign)
- return PO;
- // Add alignment padding from the terminator.
- return PO + UnknownPadding(LogAlign, internalKnownBits());
- }
-
- /// Compute the number of known low bits of postOffset. If this block
- /// contains inline asm, the number of known bits drops to the
- /// instruction alignment. An aligned terminator may increase the number
- /// of know bits.
- /// If LogAlign is given, also consider the alignment of the next block.
- unsigned postKnownBits(unsigned LogAlign = 0) const {
- return std::max(LogAlign, internalKnownBits());
- }
- };
-
- std::vector<BasicBlockInfo> BBInfo;
-
- /// One per immediate branch, keeping the machine instruction pointer,
- /// conditional or unconditional, the max displacement, and (if IsCond is
- /// true) the corresponding inverted branch opcode.
- struct ImmBranch {
- MachineInstr *MI;
- unsigned OffsetBits : 31;
- bool IsCond : 1;
- ImmBranch(MachineInstr *mi, unsigned offsetbits, bool cond)
- : MI(mi), OffsetBits(offsetbits), IsCond(cond) {}
- };
-
- /// Keep track of all the immediate branch instructions.
- ///
- std::vector<ImmBranch> ImmBranches;
-
- MachineFunction *MF;
- const AArch64InstrInfo *TII;
- public:
- static char ID;
- AArch64BranchFixup() : MachineFunctionPass(ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "AArch64 branch fixup pass";
- }
-
- private:
- void initializeFunctionInfo();
- MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
- void adjustBBOffsetsAfter(MachineBasicBlock *BB);
- bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned OffsetBits);
- bool fixupImmediateBr(ImmBranch &Br);
- bool fixupConditionalBr(ImmBranch &Br);
-
- void computeBlockSize(MachineBasicBlock *MBB);
- unsigned getOffsetOf(MachineInstr *MI) const;
- void dumpBBs();
- void verify();
- };
- char AArch64BranchFixup::ID = 0;
-}
-
-/// check BBOffsets
-void AArch64BranchFixup::verify() {
-#ifndef NDEBUG
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock *MBB = MBBI;
- unsigned MBBId = MBB->getNumber();
- assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
- }
-#endif
-}
-
-/// print block size and offset information - debugging
-void AArch64BranchFixup::dumpBBs() {
- DEBUG({
- for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
- const BasicBlockInfo &BBI = BBInfo[J];
- dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
- << " kb=" << unsigned(BBI.KnownBits)
- << " ua=" << unsigned(BBI.Unalign)
- << format(" size=%#x\n", BBInfo[J].Size);
- }
- });
-}
-
-/// Returns an instance of the branch fixup pass.
-FunctionPass *llvm::createAArch64BranchFixupPass() {
- return new AArch64BranchFixup();
-}
-
-bool AArch64BranchFixup::runOnMachineFunction(MachineFunction &mf) {
- MF = &mf;
- DEBUG(dbgs() << "***** AArch64BranchFixup ******");
- TII = (const AArch64InstrInfo*)MF->getTarget().getInstrInfo();
-
- // This pass invalidates liveness information when it splits basic blocks.
- MF->getRegInfo().invalidateLiveness();
-
- // Renumber all of the machine basic blocks in the function, guaranteeing that
- // the numbers agree with the position of the block in the function.
- MF->RenumberBlocks();
-
- // Do the initial scan of the function, building up information about the
- // sizes of each block and location of each immediate branch.
- initializeFunctionInfo();
-
- // Iteratively fix up branches until there is no change.
- unsigned NoBRIters = 0;
- bool MadeChange = false;
- while (true) {
- DEBUG(dbgs() << "Beginning iteration #" << NoBRIters << '\n');
- bool BRChange = false;
- for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
- BRChange |= fixupImmediateBr(ImmBranches[i]);
- if (BRChange && ++NoBRIters > 30)
- report_fatal_error("Branch Fix Up pass failed to converge!");
- DEBUG(dumpBBs());
-
- if (!BRChange)
- break;
- MadeChange = true;
- }
-
- // After a while, this might be made debug-only, but it is not expensive.
- verify();
-
- DEBUG(dbgs() << '\n'; dumpBBs());
-
- BBInfo.clear();
- ImmBranches.clear();
-
- return MadeChange;
-}
-
-/// Return true if the specified basic block can fallthrough into the block
-/// immediately after it.
-static bool BBHasFallthrough(MachineBasicBlock *MBB) {
- // Get the next machine basic block in the function.
- MachineFunction::iterator MBBI = MBB;
- // Can't fall off end of function.
- if (llvm::next(MBBI) == MBB->getParent()->end())
- return false;
-
- MachineBasicBlock *NextBB = llvm::next(MBBI);
- for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
- E = MBB->succ_end(); I != E; ++I)
- if (*I == NextBB)
- return true;
-
- return false;
-}
-
-/// Do the initial scan of the function, building up information about the sizes
-/// of each block, and each immediate branch.
-void AArch64BranchFixup::initializeFunctionInfo() {
- BBInfo.clear();
- BBInfo.resize(MF->getNumBlockIDs());
-
- // First thing, compute the size of all basic blocks, and see if the function
- // has any inline assembly in it. If so, we have to be conservative about
- // alignment assumptions, as we don't know for sure the size of any
- // instructions in the inline assembly.
- for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
- computeBlockSize(I);
-
- // The known bits of the entry block offset are determined by the function
- // alignment.
- BBInfo.front().KnownBits = MF->getAlignment();
-
- // Compute block offsets and known bits.
- adjustBBOffsetsAfter(MF->begin());
-
- // Now go back through the instructions and build up our data structures.
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
- continue;
-
- int Opc = I->getOpcode();
- if (I->isBranch()) {
- bool IsCond = false;
-
- // The offsets encoded in instructions here scale by the instruction
- // size (4 bytes), effectively increasing their range by 2 bits.
- unsigned Bits = 0;
- switch (Opc) {
- default:
- continue; // Ignore other JT branches
- case AArch64::TBZxii:
- case AArch64::TBZwii:
- case AArch64::TBNZxii:
- case AArch64::TBNZwii:
- IsCond = true;
- Bits = 14 + 2;
- break;
- case AArch64::Bcc:
- case AArch64::CBZx:
- case AArch64::CBZw:
- case AArch64::CBNZx:
- case AArch64::CBNZw:
- IsCond = true;
- Bits = 19 + 2;
- break;
- case AArch64::Bimm:
- Bits = 26 + 2;
- break;
- }
-
- // Record this immediate branch.
- ImmBranches.push_back(ImmBranch(I, Bits, IsCond));
- }
- }
- }
-}
-
-/// Compute the size and some alignment information for MBB. This function
-/// updates BBInfo directly.
-void AArch64BranchFixup::computeBlockSize(MachineBasicBlock *MBB) {
- BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
- BBI.Size = 0;
- BBI.Unalign = 0;
-
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
- ++I) {
- BBI.Size += TII->getInstSizeInBytes(*I);
- // For inline asm, GetInstSizeInBytes returns a conservative estimate.
- // The actual size may be smaller, but still a multiple of the instr size.
- if (I->isInlineAsm())
- BBI.Unalign = 2;
- }
-}
-
-/// Return the current offset of the specified machine instruction from the
-/// start of the function. This offset changes as stuff is moved around inside
-/// the function.
-unsigned AArch64BranchFixup::getOffsetOf(MachineInstr *MI) const {
- MachineBasicBlock *MBB = MI->getParent();
-
- // The offset is composed of two things: the sum of the sizes of all MBB's
- // before this instruction's block, and the offset from the start of the block
- // it is in.
- unsigned Offset = BBInfo[MBB->getNumber()].Offset;
-
- // Sum instructions before MI in MBB.
- for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
- assert(I != MBB->end() && "Didn't find MI in its own basic block?");
- Offset += TII->getInstSizeInBytes(*I);
- }
- return Offset;
-}
-
-/// Split the basic block containing MI into two blocks, which are joined by
-/// an unconditional branch. Update data structures and renumber blocks to
-/// account for this change and returns the newly created block.
-MachineBasicBlock *
-AArch64BranchFixup::splitBlockBeforeInstr(MachineInstr *MI) {
- MachineBasicBlock *OrigBB = MI->getParent();
-
- // Create a new MBB for the code after the OrigBB.
- MachineBasicBlock *NewBB =
- MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
- MachineFunction::iterator MBBI = OrigBB; ++MBBI;
- MF->insert(MBBI, NewBB);
-
- // Splice the instructions starting with MI over to NewBB.
- NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
-
- // Add an unconditional branch from OrigBB to NewBB.
- // Note the new unconditional branch is not being recorded.
- // There doesn't seem to be meaningful DebugInfo available; this doesn't
- // correspond to anything in the source.
- BuildMI(OrigBB, DebugLoc(), TII->get(AArch64::Bimm)).addMBB(NewBB);
- ++NumSplit;
-
- // Update the CFG. All succs of OrigBB are now succs of NewBB.
- NewBB->transferSuccessors(OrigBB);
-
- // OrigBB branches to NewBB.
- OrigBB->addSuccessor(NewBB);
-
- // Update internal data structures to account for the newly inserted MBB.
- MF->RenumberBlocks(NewBB);
-
- // Insert an entry into BBInfo to align it properly with the (newly
- // renumbered) block numbers.
- BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
-
- // Figure out how large the OrigBB is. As the first half of the original
- // block, it cannot contain a tablejump. The size includes
- // the new jump we added. (It should be possible to do this without
- // recounting everything, but it's very confusing, and this is rarely
- // executed.)
- computeBlockSize(OrigBB);
-
- // Figure out how large the NewMBB is. As the second half of the original
- // block, it may contain a tablejump.
- computeBlockSize(NewBB);
-
- // All BBOffsets following these blocks must be modified.
- adjustBBOffsetsAfter(OrigBB);
-
- return NewBB;
-}
-
-void AArch64BranchFixup::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
- unsigned BBNum = BB->getNumber();
- for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
- // Get the offset and known bits at the end of the layout predecessor.
- // Include the alignment of the current block.
- unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
- unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
- unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
-
- // This is where block i begins. Stop if the offset is already correct,
- // and we have updated 2 blocks. This is the maximum number of blocks
- // changed before calling this function.
- if (i > BBNum + 2 &&
- BBInfo[i].Offset == Offset &&
- BBInfo[i].KnownBits == KnownBits)
- break;
-
- BBInfo[i].Offset = Offset;
- BBInfo[i].KnownBits = KnownBits;
- }
-}
-
-/// Returns true if the distance between specific MI and specific BB can fit in
-/// MI's displacement field.
-bool AArch64BranchFixup::isBBInRange(MachineInstr *MI,
- MachineBasicBlock *DestBB,
- unsigned OffsetBits) {
- int64_t BrOffset = getOffsetOf(MI);
- int64_t DestOffset = BBInfo[DestBB->getNumber()].Offset;
-
- DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
- << " from BB#" << MI->getParent()->getNumber()
- << " bits available=" << OffsetBits
- << " from " << getOffsetOf(MI) << " to " << DestOffset
- << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
-
- return isIntN(OffsetBits, DestOffset - BrOffset);
-}
-
-/// Fix up an immediate branch whose destination is too far away to fit in its
-/// displacement field.
-bool AArch64BranchFixup::fixupImmediateBr(ImmBranch &Br) {
- MachineInstr *MI = Br.MI;
- MachineBasicBlock *DestBB = 0;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- if (MI->getOperand(i).isMBB()) {
- DestBB = MI->getOperand(i).getMBB();
- break;
- }
- }
- assert(DestBB && "Branch with no destination BB?");
-
- // Check to see if the DestBB is already in-range.
- if (isBBInRange(MI, DestBB, Br.OffsetBits))
- return false;
-
- assert(Br.IsCond && "Only conditional branches should need fixup");
- return fixupConditionalBr(Br);
-}
-
-/// Fix up a conditional branch whose destination is too far away to fit in its
-/// displacement field. It is converted to an inverse conditional branch + an
-/// unconditional branch to the destination.
-bool
-AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) {
- MachineInstr *MI = Br.MI;
- MachineBasicBlock *MBB = MI->getParent();
- unsigned CondBrMBBOperand = 0;
-
- // The general idea is to add an unconditional branch to the destination and
- // invert the conditional branch to jump over it. Complications occur around
- // fallthrough and unreachable ends to the block.
- // b.lt L1
- // =>
- // b.ge L2
- // b L1
- // L2:
-
- // First we invert the conditional branch, by creating a replacement if
- // necessary. This if statement contains all the special handling of different
- // branch types.
- if (MI->getOpcode() == AArch64::Bcc) {
- // The basic block is operand number 1 for Bcc
- CondBrMBBOperand = 1;
-
- A64CC::CondCodes CC = (A64CC::CondCodes)MI->getOperand(0).getImm();
- CC = A64InvertCondCode(CC);
- MI->getOperand(0).setImm(CC);
- } else {
- MachineInstrBuilder InvertedMI;
- int InvertedOpcode;
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Unknown branch type");
- case AArch64::TBZxii: InvertedOpcode = AArch64::TBNZxii; break;
- case AArch64::TBZwii: InvertedOpcode = AArch64::TBNZwii; break;
- case AArch64::TBNZxii: InvertedOpcode = AArch64::TBZxii; break;
- case AArch64::TBNZwii: InvertedOpcode = AArch64::TBZwii; break;
- case AArch64::CBZx: InvertedOpcode = AArch64::CBNZx; break;
- case AArch64::CBZw: InvertedOpcode = AArch64::CBNZw; break;
- case AArch64::CBNZx: InvertedOpcode = AArch64::CBZx; break;
- case AArch64::CBNZw: InvertedOpcode = AArch64::CBZw; break;
- }
-
- InvertedMI = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(InvertedOpcode));
- for (unsigned i = 0, e= MI->getNumOperands(); i != e; ++i) {
- InvertedMI.addOperand(MI->getOperand(i));
- if (MI->getOperand(i).isMBB())
- CondBrMBBOperand = i;
- }
-
- MI->eraseFromParent();
- MI = Br.MI = InvertedMI;
- }
-
- // If the branch is at the end of its MBB and that has a fall-through block,
- // direct the updated conditional branch to the fall-through
- // block. Otherwise, split the MBB before the next instruction.
- MachineInstr *BMI = &MBB->back();
- bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
-
- ++NumCBrFixed;
- if (BMI != MI) {
- if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
- BMI->getOpcode() == AArch64::Bimm) {
- // Last MI in the BB is an unconditional branch. We can swap destinations:
- // b.eq L1 (temporarily b.ne L1 after first change)
- // b L2
- // =>
- // b.ne L2
- // b L1
- MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
- if (isBBInRange(MI, NewDest, Br.OffsetBits)) {
- DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
- << *BMI);
- MachineBasicBlock *DestBB = MI->getOperand(CondBrMBBOperand).getMBB();
- BMI->getOperand(0).setMBB(DestBB);
- MI->getOperand(CondBrMBBOperand).setMBB(NewDest);
- return true;
- }
- }
- }
-
- if (NeedSplit) {
- MachineBasicBlock::iterator MBBI = MI; ++MBBI;
- splitBlockBeforeInstr(MBBI);
- // No need for the branch to the next block. We're adding an unconditional
- // branch to the destination.
- int delta = TII->getInstSizeInBytes(MBB->back());
- BBInfo[MBB->getNumber()].Size -= delta;
- MBB->back().eraseFromParent();
- // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
- }
-
- // After splitting and removing the unconditional branch from the original BB,
- // the structure is now:
- // oldbb:
- // [things]
- // b.invertedCC L1
- // splitbb/fallthroughbb:
- // [old b L2/real continuation]
- //
- // We now have to change the conditional branch to point to splitbb and add an
- // unconditional branch after it to L1, giving the final structure:
- // oldbb:
- // [things]
- // b.invertedCC splitbb
- // b L1
- // splitbb/fallthroughbb:
- // [old b L2/real continuation]
- MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
-
- DEBUG(dbgs() << " Insert B to BB#"
- << MI->getOperand(CondBrMBBOperand).getMBB()->getNumber()
- << " also invert condition and change dest. to BB#"
- << NextBB->getNumber() << "\n");
-
- // Insert a new unconditional branch and fixup the destination of the
- // conditional one. Also update the ImmBranch as well as adding a new entry
- // for the new branch.
- BuildMI(MBB, DebugLoc(), TII->get(AArch64::Bimm))
- .addMBB(MI->getOperand(CondBrMBBOperand).getMBB());
- MI->getOperand(CondBrMBBOperand).setMBB(NextBB);
-
- BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
-
- // 26 bits written down in Bimm, specifying a multiple of 4.
- unsigned OffsetBits = 26 + 2;
- ImmBranches.push_back(ImmBranch(&MBB->back(), OffsetBits, false));
-
- adjustBBOffsetsAfter(MBB);
- return true;
-}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp b/contrib/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp
new file mode 100644
index 0000000..484e7e8
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp
@@ -0,0 +1,510 @@
+//===-- AArch64BranchRelaxation.cpp - AArch64 branch relaxation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64MachineFunctionInfo.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-branch-relax"
+
+static cl::opt<bool>
+BranchRelaxation("aarch64-branch-relax", cl::Hidden, cl::init(true),
+ cl::desc("Relax out of range conditional branches"));
+
+static cl::opt<unsigned>
+TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
+ cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
+
+static cl::opt<unsigned>
+CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
+ cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
+
+static cl::opt<unsigned>
+BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
+ cl::desc("Restrict range of Bcc instructions (DEBUG)"));
+
+STATISTIC(NumSplit, "Number of basic blocks split");
+STATISTIC(NumRelaxed, "Number of conditional branches relaxed");
+
+namespace {
+class AArch64BranchRelaxation : public MachineFunctionPass {
+ /// BasicBlockInfo - Information about the offset and size of a single
+ /// basic block.
+ struct BasicBlockInfo {
+ /// Offset - Distance from the beginning of the function to the beginning
+ /// of this basic block.
+ ///
+ /// The offset is always aligned as required by the basic block.
+ unsigned Offset;
+
+ /// Size - Size of the basic block in bytes. If the block contains
+ /// inline assembly, this is a worst case estimate.
+ ///
+ /// The size does not include any alignment padding whether from the
+ /// beginning of the block, or from an aligned jump table at the end.
+ unsigned Size;
+
+ BasicBlockInfo() : Offset(0), Size(0) {}
+
+ /// Compute the offset immediately following this block. If LogAlign is
+ /// specified, return the offset the successor block will get if it has
+ /// this alignment.
+ unsigned postOffset(unsigned LogAlign = 0) const {
+ unsigned PO = Offset + Size;
+ unsigned Align = 1 << LogAlign;
+ return (PO + Align - 1) / Align * Align;
+ }
+ };
+
+ SmallVector<BasicBlockInfo, 16> BlockInfo;
+
+ MachineFunction *MF;
+ const AArch64InstrInfo *TII;
+
+ bool relaxBranchInstructions();
+ void scanFunction();
+ MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
+ void adjustBlockOffsets(MachineBasicBlock &MBB);
+ bool isBlockInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
+ bool fixupConditionalBranch(MachineInstr *MI);
+ void computeBlockSize(const MachineBasicBlock &MBB);
+ unsigned getInstrOffset(MachineInstr *MI) const;
+ void dumpBBs();
+ void verify();
+
+public:
+ static char ID;
+ AArch64BranchRelaxation() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "AArch64 branch relaxation pass";
+ }
+};
+char AArch64BranchRelaxation::ID = 0;
+}
+
+/// verify - check BBOffsets, BBSizes, alignment of islands
+void AArch64BranchRelaxation::verify() {
+#ifndef NDEBUG
+ unsigned PrevNum = MF->begin()->getNumber();
+ for (MachineBasicBlock &MBB : *MF) {
+ unsigned Align = MBB.getAlignment();
+ unsigned Num = MBB.getNumber();
+ assert(BlockInfo[Num].Offset % (1u << Align) == 0);
+ assert(!Num || BlockInfo[PrevNum].postOffset() <= BlockInfo[Num].Offset);
+ PrevNum = Num;
+ }
+#endif
+}
+
+/// print block size and offset information - debugging
+void AArch64BranchRelaxation::dumpBBs() {
+ for (auto &MBB : *MF) {
+ const BasicBlockInfo &BBI = BlockInfo[MBB.getNumber()];
+ dbgs() << format("BB#%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset)
+ << format("size=%#x\n", BBI.Size);
+ }
+}
+
+/// BBHasFallthrough - Return true if the specified basic block can fallthrough
+/// into the block immediately after it.
+static bool BBHasFallthrough(MachineBasicBlock *MBB) {
+ // Get the next machine basic block in the function.
+ MachineFunction::iterator MBBI = MBB;
+ // Can't fall off end of function.
+ MachineBasicBlock *NextBB = std::next(MBBI);
+ if (NextBB == MBB->getParent()->end())
+ return false;
+
+ for (MachineBasicBlock *S : MBB->successors())
+ if (S == NextBB)
+ return true;
+
+ return false;
+}
+
+/// scanFunction - Do the initial scan of the function, building up
+/// information about each block.
+void AArch64BranchRelaxation::scanFunction() {
+ BlockInfo.clear();
+ BlockInfo.resize(MF->getNumBlockIDs());
+
+ // First thing, compute the size of all basic blocks, and see if the function
+ // has any inline assembly in it. If so, we have to be conservative about
+ // alignment assumptions, as we don't know for sure the size of any
+ // instructions in the inline assembly.
+ for (MachineBasicBlock &MBB : *MF)
+ computeBlockSize(MBB);
+
+ // Compute block offsets and known bits.
+ adjustBlockOffsets(*MF->begin());
+}
+
+/// computeBlockSize - Compute the size for MBB.
+/// This function updates BlockInfo directly.
+void AArch64BranchRelaxation::computeBlockSize(const MachineBasicBlock &MBB) {
+ unsigned Size = 0;
+ for (const MachineInstr &MI : MBB)
+ Size += TII->GetInstSizeInBytes(&MI);
+ BlockInfo[MBB.getNumber()].Size = Size;
+}
+
+/// getInstrOffset - Return the current offset of the specified machine
+/// instruction from the start of the function. This offset changes as stuff is
+/// moved around inside the function.
+unsigned AArch64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+
+ // The offset is composed of two things: the sum of the sizes of all MBB's
+ // before this instruction's block, and the offset from the start of the block
+ // it is in.
+ unsigned Offset = BlockInfo[MBB->getNumber()].Offset;
+
+ // Sum instructions before MI in MBB.
+ for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
+ assert(I != MBB->end() && "Didn't find MI in its own basic block?");
+ Offset += TII->GetInstSizeInBytes(I);
+ }
+ return Offset;
+}
+
+void AArch64BranchRelaxation::adjustBlockOffsets(MachineBasicBlock &Start) {
+ unsigned PrevNum = Start.getNumber();
+ for (auto &MBB : make_range(MachineFunction::iterator(Start), MF->end())) {
+ unsigned Num = MBB.getNumber();
+ if (!Num) // block zero is never changed from offset zero.
+ continue;
+ // Get the offset and known bits at the end of the layout predecessor.
+ // Include the alignment of the current block.
+ unsigned LogAlign = MBB.getAlignment();
+ BlockInfo[Num].Offset = BlockInfo[PrevNum].postOffset(LogAlign);
+ PrevNum = Num;
+ }
+}
+
+/// Split the basic block containing MI into two blocks, which are joined by
+/// an unconditional branch. Update data structures and renumber blocks to
+/// account for this change and returns the newly created block.
+/// NOTE: Successor list of the original BB is out of date after this function,
+/// and must be updated by the caller! Other transforms follow using this
+/// utility function, so no point updating now rather than waiting.
+MachineBasicBlock *
+AArch64BranchRelaxation::splitBlockBeforeInstr(MachineInstr *MI) {
+ MachineBasicBlock *OrigBB = MI->getParent();
+
+ // Create a new MBB for the code after the OrigBB.
+ MachineBasicBlock *NewBB =
+ MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
+ MachineFunction::iterator MBBI = OrigBB;
+ ++MBBI;
+ MF->insert(MBBI, NewBB);
+
+ // Splice the instructions starting with MI over to NewBB.
+ NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
+
+ // Add an unconditional branch from OrigBB to NewBB.
+ // Note the new unconditional branch is not being recorded.
+ // There doesn't seem to be meaningful DebugInfo available; this doesn't
+ // correspond to anything in the source.
+ BuildMI(OrigBB, DebugLoc(), TII->get(AArch64::B)).addMBB(NewBB);
+
+ // Insert an entry into BlockInfo to align it properly with the block numbers.
+ BlockInfo.insert(BlockInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Figure out how large the OrigBB is. As the first half of the original
+ // block, it cannot contain a tablejump. The size includes
+ // the new jump we added. (It should be possible to do this without
+ // recounting everything, but it's very confusing, and this is rarely
+ // executed.)
+ computeBlockSize(*OrigBB);
+
+ // Figure out how large the NewMBB is. As the second half of the original
+ // block, it may contain a tablejump.
+ computeBlockSize(*NewBB);
+
+ // All BBOffsets following these blocks must be modified.
+ adjustBlockOffsets(*OrigBB);
+
+ ++NumSplit;
+
+ return NewBB;
+}
+
+/// isBlockInRange - Returns true if the distance between specific MI and
+/// specific BB can fit in MI's displacement field.
+bool AArch64BranchRelaxation::isBlockInRange(MachineInstr *MI,
+ MachineBasicBlock *DestBB,
+ unsigned Bits) {
+ unsigned MaxOffs = ((1 << (Bits - 1)) - 1) << 2;
+ unsigned BrOffset = getInstrOffset(MI);
+ unsigned DestOffset = BlockInfo[DestBB->getNumber()].Offset;
+
+ DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
+ << " from BB#" << MI->getParent()->getNumber()
+ << " max delta=" << MaxOffs << " from " << getInstrOffset(MI)
+ << " to " << DestOffset << " offset "
+ << int(DestOffset - BrOffset) << "\t" << *MI);
+
+ // Branch before the Dest.
+ if (BrOffset <= DestOffset)
+ return (DestOffset - BrOffset <= MaxOffs);
+ return (BrOffset - DestOffset <= MaxOffs);
+}
+
+static bool isConditionalBranch(unsigned Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::TBZW:
+ case AArch64::TBNZW:
+ case AArch64::TBZX:
+ case AArch64::TBNZX:
+ case AArch64::CBZW:
+ case AArch64::CBNZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZX:
+ case AArch64::Bcc:
+ return true;
+ }
+}
+
+static MachineBasicBlock *getDestBlock(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case AArch64::TBZW:
+ case AArch64::TBNZW:
+ case AArch64::TBZX:
+ case AArch64::TBNZX:
+ return MI->getOperand(2).getMBB();
+ case AArch64::CBZW:
+ case AArch64::CBNZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZX:
+ case AArch64::Bcc:
+ return MI->getOperand(1).getMBB();
+ }
+}
+
+static unsigned getOppositeConditionOpcode(unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case AArch64::TBNZW: return AArch64::TBZW;
+ case AArch64::TBNZX: return AArch64::TBZX;
+ case AArch64::TBZW: return AArch64::TBNZW;
+ case AArch64::TBZX: return AArch64::TBNZX;
+ case AArch64::CBNZW: return AArch64::CBZW;
+ case AArch64::CBNZX: return AArch64::CBZX;
+ case AArch64::CBZW: return AArch64::CBNZW;
+ case AArch64::CBZX: return AArch64::CBNZX;
+ case AArch64::Bcc: return AArch64::Bcc; // Condition is an operand for Bcc.
+ }
+}
+
+static unsigned getBranchDisplacementBits(unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case AArch64::TBNZW:
+ case AArch64::TBZW:
+ case AArch64::TBNZX:
+ case AArch64::TBZX:
+ return TBZDisplacementBits;
+ case AArch64::CBNZW:
+ case AArch64::CBZW:
+ case AArch64::CBNZX:
+ case AArch64::CBZX:
+ return CBZDisplacementBits;
+ case AArch64::Bcc:
+ return BCCDisplacementBits;
+ }
+}
+
+static inline void invertBccCondition(MachineInstr *MI) {
+ assert(MI->getOpcode() == AArch64::Bcc && "Unexpected opcode!");
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(0).getImm();
+ CC = AArch64CC::getInvertedCondCode(CC);
+ MI->getOperand(0).setImm((int64_t)CC);
+}
+
+/// fixupConditionalBranch - Fix up a conditional branch whose destination is
+/// too far away to fit in its displacement field. It is converted to an inverse
+/// conditional branch + an unconditional branch to the destination.
+bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
+ MachineBasicBlock *DestBB = getDestBlock(MI);
+
+ // Add an unconditional branch to the destination and invert the branch
+ // condition to jump over it:
+ // tbz L1
+ // =>
+ // tbnz L2
+ // b L1
+ // L2:
+
+ // If the branch is at the end of its MBB and that has a fall-through block,
+ // direct the updated conditional branch to the fall-through block. Otherwise,
+ // split the MBB before the next instruction.
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *BMI = &MBB->back();
+ bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
+
+ if (BMI != MI) {
+ if (std::next(MachineBasicBlock::iterator(MI)) ==
+ std::prev(MBB->getLastNonDebugInstr()) &&
+ BMI->getOpcode() == AArch64::B) {
+ // Last MI in the BB is an unconditional branch. Can we simply invert the
+ // condition and swap destinations:
+ // beq L1
+ // b L2
+ // =>
+ // bne L2
+ // b L1
+ MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
+ if (isBlockInRange(MI, NewDest,
+ getBranchDisplacementBits(MI->getOpcode()))) {
+ DEBUG(dbgs() << " Invert condition and swap its destination with "
+ << *BMI);
+ BMI->getOperand(0).setMBB(DestBB);
+ unsigned OpNum = (MI->getOpcode() == AArch64::TBZW ||
+ MI->getOpcode() == AArch64::TBNZW ||
+ MI->getOpcode() == AArch64::TBZX ||
+ MI->getOpcode() == AArch64::TBNZX)
+ ? 2
+ : 1;
+ MI->getOperand(OpNum).setMBB(NewDest);
+ MI->setDesc(TII->get(getOppositeConditionOpcode(MI->getOpcode())));
+ if (MI->getOpcode() == AArch64::Bcc)
+ invertBccCondition(MI);
+ return true;
+ }
+ }
+ }
+
+ if (NeedSplit) {
+ // Analyze the branch so we know how to update the successor lists.
+ MachineBasicBlock *TBB, *FBB;
+ SmallVector<MachineOperand, 2> Cond;
+ TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, false);
+
+ MachineBasicBlock *NewBB = splitBlockBeforeInstr(MI);
+ // No need for the branch to the next block. We're adding an unconditional
+ // branch to the destination.
+ int delta = TII->GetInstSizeInBytes(&MBB->back());
+ BlockInfo[MBB->getNumber()].Size -= delta;
+ MBB->back().eraseFromParent();
+ // BlockInfo[SplitBB].Offset is wrong temporarily, fixed below
+
+ // Update the successor lists according to the transformation to follow.
+ // Do it here since if there's no split, no update is needed.
+ MBB->replaceSuccessor(FBB, NewBB);
+ NewBB->addSuccessor(FBB);
+ }
+ MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
+
+ DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
+ << ", invert condition and change dest. to BB#"
+ << NextBB->getNumber() << "\n");
+
+ // Insert a new conditional branch and a new unconditional branch.
+ MachineInstrBuilder MIB = BuildMI(
+ MBB, DebugLoc(), TII->get(getOppositeConditionOpcode(MI->getOpcode())))
+ .addOperand(MI->getOperand(0));
+ if (MI->getOpcode() == AArch64::TBZW || MI->getOpcode() == AArch64::TBNZW ||
+ MI->getOpcode() == AArch64::TBZX || MI->getOpcode() == AArch64::TBNZX)
+ MIB.addOperand(MI->getOperand(1));
+ if (MI->getOpcode() == AArch64::Bcc)
+ invertBccCondition(MIB);
+ MIB.addMBB(NextBB);
+ BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BuildMI(MBB, DebugLoc(), TII->get(AArch64::B)).addMBB(DestBB);
+ BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+
+ // Remove the old conditional branch. It may or may not still be in MBB.
+ BlockInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
+ MI->eraseFromParent();
+
+ // Finally, keep the block offsets up to date.
+ adjustBlockOffsets(*MBB);
+ return true;
+}
+
+bool AArch64BranchRelaxation::relaxBranchInstructions() {
+ bool Changed = false;
+ // Relaxing branches involves creating new basic blocks, so re-eval
+ // end() for termination.
+ for (auto &MBB : *MF) {
+ MachineInstr *MI = MBB.getFirstTerminator();
+ if (isConditionalBranch(MI->getOpcode()) &&
+ !isBlockInRange(MI, getDestBlock(MI),
+ getBranchDisplacementBits(MI->getOpcode()))) {
+ fixupConditionalBranch(MI);
+ ++NumRelaxed;
+ Changed = true;
+ }
+ }
+ return Changed;
+}
+
+bool AArch64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+
+ // If the pass is disabled, just bail early.
+ if (!BranchRelaxation)
+ return false;
+
+ DEBUG(dbgs() << "***** AArch64BranchRelaxation *****\n");
+
+ TII = (const AArch64InstrInfo *)MF->getTarget().getInstrInfo();
+
+ // Renumber all of the machine basic blocks in the function, guaranteeing that
+ // the numbers agree with the position of the block in the function.
+ MF->RenumberBlocks();
+
+ // Do the initial scan of the function, building up information about the
+ // sizes of each block.
+ scanFunction();
+
+ DEBUG(dbgs() << " Basic blocks before relaxation\n");
+ DEBUG(dumpBBs());
+
+ bool MadeChange = false;
+ while (relaxBranchInstructions())
+ MadeChange = true;
+
+ // After a while, this might be made debug-only, but it is not expensive.
+ verify();
+
+ DEBUG(dbgs() << " Basic blocks after relaxation\n");
+ DEBUG(dbgs() << '\n'; dumpBBs());
+
+ BlockInfo.clear();
+
+ return MadeChange;
+}
+
+/// createAArch64BranchRelaxation - returns an instance of the constpool
+/// island pass.
+FunctionPass *llvm::createAArch64BranchRelaxation() {
+ return new AArch64BranchRelaxation();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64CallingConv.td b/contrib/llvm/lib/Target/AArch64/AArch64CallingConv.td
deleted file mode 100644
index a2a9f3f..0000000
--- a/contrib/llvm/lib/Target/AArch64/AArch64CallingConv.td
+++ /dev/null
@@ -1,197 +0,0 @@
-//==-- AArch64CallingConv.td - Calling Conventions for ARM ----*- tblgen -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This describes the calling conventions for AArch64 architecture.
-//===----------------------------------------------------------------------===//
-
-
-// The AArch64 Procedure Call Standard is unfortunately specified at a slightly
-// higher level of abstraction than LLVM's target interface presents. In
-// particular, it refers (like other ABIs, in fact) directly to
-// structs. However, generic LLVM code takes the liberty of lowering structure
-// arguments to the component fields before we see them.
-//
-// As a result, the obvious direct map from LLVM IR to PCS concepts can't be
-// implemented, so the goals of this calling convention are, in decreasing
-// priority order:
-// 1. Expose *some* way to express the concepts required to implement the
-// generic PCS from a front-end.
-// 2. Provide a sane ABI for pure LLVM.
-// 3. Follow the generic PCS as closely as is naturally possible.
-//
-// The suggested front-end implementation of PCS features is:
-// * Integer, float and vector arguments of all sizes which end up in
-// registers are passed and returned via the natural LLVM type.
-// * Structure arguments with size <= 16 bytes are passed and returned in
-// registers as similar integer or composite types. For example:
-// [1 x i64], [2 x i64] or [1 x i128] (if alignment 16 needed).
-// * HFAs in registers follow rules similar to small structs: appropriate
-// composite types.
-// * Structure arguments with size > 16 bytes are passed via a pointer,
-// handled completely by the front-end.
-// * Structure return values > 16 bytes via an sret pointer argument.
-// * Other stack-based arguments (not large structs) are passed using byval
-// pointers. Padding arguments are added beforehand to guarantee a large
-// struct doesn't later use integer registers.
-//
-// N.b. this means that it is the front-end's responsibility (if it cares about
-// PCS compliance) to check whether enough registers are available for an
-// argument when deciding how to pass it.
-
-class CCIfAlign<int Align, CCAction A>:
- CCIf<"ArgFlags.getOrigAlign() == " # Align, A>;
-
-def CC_A64_APCS : CallingConv<[
- // SRet is an LLVM-specific concept, so it takes precedence over general ABI
- // concerns. However, this rule will be used by C/C++ frontends to implement
- // structure return.
- CCIfSRet<CCAssignToReg<[X8]>>,
-
- // Put ByVal arguments directly on the stack. Minimum size and alignment of a
- // slot is 64-bit.
- CCIfByVal<CCPassByVal<8, 8>>,
-
- // Canonicalise the various types that live in different floating-point
- // registers. This makes sense because the PCS does not distinguish Short
- // Vectors and Floating-point types.
- CCIfType<[v1i16, v2i8], CCBitConvertToType<f16>>,
- CCIfType<[v1i32, v4i8, v2i16, v1f32], CCBitConvertToType<f32>>,
- CCIfType<[v8i8, v4i16, v2i32, v2f32, v1i64, v1f64], CCBitConvertToType<f64>>,
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCBitConvertToType<f128>>,
-
- // PCS: "C.1: If the argument is a Half-, Single-, Double- or Quad- precision
- // Floating-point or Short Vector Type and the NSRN is less than 8, then the
- // argument is allocated to the least significant bits of register
- // v[NSRN]. The NSRN is incremented by one. The argument has now been
- // allocated."
- CCIfType<[v1i8], CCAssignToReg<[B0, B1, B2, B3, B4, B5, B6, B7]>>,
- CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
- CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
- CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
- CCIfType<[f128], CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
-
- // PCS: "C.2: If the argument is an HFA and there are sufficient unallocated
- // SIMD and Floating-point registers (NSRN - number of elements < 8), then the
- // argument is allocated to SIMD and Floating-point registers (with one
- // register per element of the HFA). The NSRN is incremented by the number of
- // registers used. The argument has now been allocated."
- //
- // N.b. As above, this rule is the responsibility of the front-end.
-
- // "C.3: If the argument is an HFA then the NSRN is set to 8 and the size of
- // the argument is rounded up to the nearest multiple of 8 bytes."
- //
- // "C.4: If the argument is an HFA, a Quad-precision Floating-point or Short
- // Vector Type then the NSAA is rounded up to the larger of 8 or the Natural
- // Alignment of the Argument's type."
- //
- // It is expected that these will be satisfied by adding dummy arguments to
- // the prototype.
-
- // PCS: "C.5: If the argument is a Half- or Single- precision Floating-point
- // type then the size of the argument is set to 8 bytes. The effect is as if
- // the argument had been copied to the least significant bits of a 64-bit
- // register and the remaining bits filled with unspecified values."
- CCIfType<[f16, f32], CCPromoteToType<f64>>,
-
- // PCS: "C.6: If the argument is an HFA, a Half-, Single-, Double- or Quad-
- // precision Floating-point or Short Vector Type, then the argument is copied
- // to memory at the adjusted NSAA. The NSAA is incremented by the size of the
- // argument. The argument has now been allocated."
- CCIfType<[f64], CCAssignToStack<8, 8>>,
- CCIfType<[f128], CCAssignToStack<16, 16>>,
-
- // PCS: "C.7: If the argument is an Integral Type, the size of the argument is
- // less than or equal to 8 bytes and the NGRN is less than 8, the argument is
- // copied to the least significant bits of x[NGRN]. The NGRN is incremented by
- // one. The argument has now been allocated."
-
- // First we implement C.8 and C.9 (128-bit types get even registers). i128 is
- // represented as two i64s, the first one being split. If we delayed this
- // operation C.8 would never be reached.
- CCIfType<[i64],
- CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6], [X0, X1, X3, X5]>>>,
-
- // Note: the promotion also implements C.14.
- CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
-
- // And now the real implementation of C.7
- CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
-
- // PCS: "C.8: If the argument has an alignment of 16 then the NGRN is rounded
- // up to the next even number."
- //
- // "C.9: If the argument is an Integral Type, the size of the argument is
- // equal to 16 and the NGRN is less than 7, the argument is copied to x[NGRN]
- // and x[NGRN+1], x[NGRN] shall contain the lower addressed double-word of the
- // memory representation of the argument. The NGRN is incremented by two. The
- // argument has now been allocated."
- //
- // Subtlety here: what if alignment is 16 but it is not an integral type? All
- // floating-point types have been allocated already, which leaves composite
- // types: this is why a front-end may need to produce i128 for a struct <= 16
- // bytes.
-
- // PCS: "C.10 If the argument is a Composite Type and the size in double-words
- // of the argument is not more than 8 minus NGRN, then the argument is copied
- // into consecutive general-purpose registers, starting at x[NGRN]. The
- // argument is passed as though it had been loaded into the registers from a
- // double-word aligned address with an appropriate sequence of LDR
- // instructions loading consecutive registers from memory (the contents of any
- // unused parts of the registers are unspecified by this standard). The NGRN
- // is incremented by the number of registers used. The argument has now been
- // allocated."
- //
- // Another one that's the responsibility of the front-end (sigh).
-
- // PCS: "C.11: The NGRN is set to 8."
- CCCustom<"CC_AArch64NoMoreRegs">,
-
- // PCS: "C.12: The NSAA is rounded up to the larger of 8 or the Natural
- // Alignment of the argument's type."
- //
- // PCS: "C.13: If the argument is a composite type then the argument is copied
- // to memory at the adjusted NSAA. The NSAA is by the size of the
- // argument. The argument has now been allocated."
- //
- // Note that the effect of this corresponds to a memcpy rather than register
- // stores so that the struct ends up correctly addressable at the adjusted
- // NSAA.
-
- // PCS: "C.14: If the size of the argument is less than 8 bytes then the size
- // of the argument is set to 8 bytes. The effect is as if the argument was
- // copied to the least significant bits of a 64-bit register and the remaining
- // bits filled with unspecified values."
- //
- // Integer types were widened above. Floating-point and composite types have
- // already been allocated completely. Nothing to do.
-
- // PCS: "C.15: The argument is copied to memory at the adjusted NSAA. The NSAA
- // is incremented by the size of the argument. The argument has now been
- // allocated."
- CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
- CCIfType<[i64], CCAssignToStack<8, 8>>
-
-]>;
-
-// According to the PCS, X19-X30 are callee-saved, however only the low 64-bits
-// of vector registers (8-15) are callee-saved. The order here is is picked up
-// by PrologEpilogInserter.cpp to allocate stack slots, starting from top of
-// stack upon entry. This gives the customary layout of x30 at [sp-8], x29 at
-// [sp-16], ...
-def CSR_PCS : CalleeSavedRegs<(add (sequence "X%u", 30, 19),
- (sequence "D%u", 15, 8))>;
-
-
-// TLS descriptor calls are extremely restricted in their changes, to allow
-// optimisations in the (hopefully) more common fast path where no real action
-// is needed. They actually have to preserve all registers, except for the
-// unavoidable X30 and the return register X0.
-def TLSDesc : CalleeSavedRegs<(add (sequence "X%u", 29, 1),
- (sequence "Q%u", 31, 0))>;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/contrib/llvm/lib/Target/AArch64/AArch64CallingConvention.td
new file mode 100644
index 0000000..1fe5138
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -0,0 +1,242 @@
+//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for AArch64 architecture.
+//
+//===----------------------------------------------------------------------===//
+
+/// CCIfAlign - Match of the original alignment of the arg
+class CCIfAlign<string Align, CCAction A> :
+ CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
+/// CCIfBigEndian - Match only if we're in big endian mode.
+class CCIfBigEndian<CCAction A> :
+ CCIf<"State.getTarget().getDataLayout()->isBigEndian()", A>;
+
+//===----------------------------------------------------------------------===//
+// ARM AAPCS64 Calling Convention
+//===----------------------------------------------------------------------===//
+
+def CC_AArch64_AAPCS : CallingConv<[
+ CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
+ CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
+
+ // Big endian vectors must be passed as if they were 1-element vectors so that
+ // their lanes are in a consistent order.
+ CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
+ CCBitConvertToType<f64>>>,
+ CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
+ CCBitConvertToType<f128>>>,
+
+ // An SRet is passed in X8, not X0 like a normal pointer parameter.
+ CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
+
+ // Put ByVal arguments directly on the stack. Minimum size and alignment of a
+ // slot is 64-bit.
+ CCIfByVal<CCPassByVal<8, 8>>,
+
+ // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
+ // up to eight each of GPR and FPR.
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
+ [X0, X1, X2, X3, X4, X5, X6, X7]>>,
+ // i128 is split to two i64s, we can't fit half to register X7.
+ CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
+ [X0, X1, X3, X5]>>>,
+
+ // i128 is split to two i64s, and its stack alignment is 16 bytes.
+ CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
+
+ CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
+ [W0, W1, W2, W3, W4, W5, W6, W7]>>,
+ CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
+ CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
+ CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+
+ // If more than will fit in registers, pass them on the stack instead.
+ CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>,
+ CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
+ CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
+ CCAssignToStack<8, 8>>,
+ CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
+ CCAssignToStack<16, 16>>
+]>;
+
+def RetCC_AArch64_AAPCS : CallingConv<[
+ CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
+ CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
+
+ // Big endian vectors must be passed as if they were 1-element vectors so that
+ // their lanes are in a consistent order.
+ CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
+ CCBitConvertToType<f64>>>,
+ CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
+ CCBitConvertToType<f128>>>,
+
+ CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
+ [X0, X1, X2, X3, X4, X5, X6, X7]>>,
+ CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
+ [W0, W1, W2, W3, W4, W5, W6, W7]>>,
+ CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
+ CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
+ CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
+]>;
+
+
+// Darwin uses a calling convention which differs in only two ways
+// from the standard one at this level:
+// + i128s (i.e. split i64s) don't need even registers.
+// + Stack slots are sized as needed rather than being at least 64-bit.
+def CC_AArch64_DarwinPCS : CallingConv<[
+ CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
+ CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
+
+ // An SRet is passed in X8, not X0 like a normal pointer parameter.
+ CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
+
+ // Put ByVal arguments directly on the stack. Minimum size and alignment of a
+ // slot is 64-bit.
+ CCIfByVal<CCPassByVal<8, 8>>,
+
+ // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
+ // up to eight each of GPR and FPR.
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
+ [X0, X1, X2, X3, X4, X5, X6, X7]>>,
+ // i128 is split to two i64s, we can't fit half to register X7.
+ CCIfType<[i64],
+ CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6],
+ [W0, W1, W2, W3, W4, W5, W6]>>>,
+ // i128 is split to two i64s, and its stack alignment is 16 bytes.
+ CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
+
+ CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
+ [W0, W1, W2, W3, W4, W5, W6, W7]>>,
+ CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32],
+ CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64],
+ CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+
+ // If more than will fit in registers, pass them on the stack instead.
+ CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
+ CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>,
+ CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
+ CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8],
+ CCAssignToStack<8, 8>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
+]>;
+
+def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
+ CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
+ CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
+
+ // Handle all scalar types as either i64 or f64.
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+ CCIfType<[f16, f32], CCPromoteToType<f64>>,
+
+ // Everything is on the stack.
+ // i128 is split to two i64s, and its stack alignment is 16 bytes.
+ CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
+ CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32], CCAssignToStack<8, 8>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64], CCAssignToStack<16, 16>>
+]>;
+
+// The WebKit_JS calling convention only passes the first argument (the callee)
+// in register and the remaining arguments on stack. We allow 32bit stack slots,
+// so that WebKit can write partial values in the stack and define the other
+// 32bit quantity as undef.
+def CC_AArch64_WebKit_JS : CallingConv<[
+ // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
+ CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,
+
+ // Pass the remaining arguments on the stack instead.
+ CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>
+]>;
+
+def RetCC_AArch64_WebKit_JS : CallingConv<[
+ CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
+ [X0, X1, X2, X3, X4, X5, X6, X7]>>,
+ CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
+ [W0, W1, W2, W3, W4, W5, W6, W7]>>,
+ CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+ CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
+ [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
+]>;
+
+// FIXME: LR is only callee-saved in the sense that *we* preserve it and are
+// presumably a callee to someone. External functions may not do so, but this
+// is currently safe since BL has LR as an implicit-def and what happens after a
+// tail call doesn't matter.
+//
+// It would be better to model its preservation semantics properly (create a
+// vreg on entry, use it in RET & tail call generation; make that vreg def if we
+// end up saving LR as part of a call frame). Watch this space...
+def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
+ X23, X24, X25, X26, X27, X28,
+ D8, D9, D10, D11,
+ D12, D13, D14, D15)>;
+
+// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
+// 'this' and the pointer return value are both passed in X0 in these cases,
+// this can be partially modelled by treating X0 as a callee-saved register;
+// only the resulting RegMask is used; the SaveList is ignored
+//
+// (For generic ARM 64-bit ABI code, clang will not generate constructors or
+// destructors with 'this' returns, so this RegMask will not be used in that
+// case)
+def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
+
+// The function used by Darwin to obtain the address of a thread-local variable
+// guarantees more than a normal AAPCS function. x16 and x17 are used on the
+// fast path for calculation, but other registers except X0 (argument/return)
+// and LR (it is a call, after all) are preserved.
+def CSR_AArch64_TLS_Darwin
+ : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
+ FP,
+ (sequence "Q%u", 0, 31))>;
+
+// The ELF stub used for TLS-descriptor access saves every feasible
+// register. Only X0 and LR are clobbered.
+def CSR_AArch64_TLS_ELF
+ : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
+ (sequence "Q%u", 0, 31))>;
+
+def CSR_AArch64_AllRegs
+ : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
+ (sequence "X%u", 0, 28), FP, LR, SP,
+ (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
+ (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
+ (sequence "Q%u", 0, 31))>;
+
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/contrib/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
new file mode 100644
index 0000000..4d23dc5
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -0,0 +1,147 @@
+//===-- AArch64CleanupLocalDynamicTLSPass.cpp ---------------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Local-dynamic access to thread-local variables proceeds in three stages.
+//
+// 1. The offset of this Module's thread-local area from TPIDR_EL0 is calculated
+// in much the same way as a general-dynamic TLS-descriptor access against
+// the special symbol _TLS_MODULE_BASE.
+// 2. The variable's offset from _TLS_MODULE_BASE_ is calculated using
+// instructions with "dtprel" modifiers.
+// 3. These two are added, together with TPIDR_EL0, to obtain the variable's
+// true address.
+//
+// This is only better than general-dynamic access to the variable if two or
+// more of the first stage TLS-descriptor calculations can be combined. This
+// pass looks through a function and performs such combinations.
+//
+//===----------------------------------------------------------------------===//
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64MachineFunctionInfo.h"
+#include "AArch64TargetMachine.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
+namespace {
+struct LDTLSCleanup : public MachineFunctionPass {
+ static char ID;
+ LDTLSCleanup() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ if (AFI->getNumLocalDynamicTLSAccesses() < 2) {
+ // No point folding accesses if there isn't at least two.
+ return false;
+ }
+
+ MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
+ return VisitNode(DT->getRootNode(), 0);
+ }
+
+ // Visit the dominator subtree rooted at Node in pre-order.
+ // If TLSBaseAddrReg is non-null, then use that to replace any
+ // TLS_base_addr instructions. Otherwise, create the register
+ // when the first such instruction is seen, and then use it
+ // as we encounter more instructions.
+ bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
+ MachineBasicBlock *BB = Node->getBlock();
+ bool Changed = false;
+
+ // Traverse the current block.
+ for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
+ ++I) {
+ switch (I->getOpcode()) {
+ case AArch64::TLSDESC_BLR:
+ // Make sure it's a local dynamic access.
+ if (!I->getOperand(1).isSymbol() ||
+ strcmp(I->getOperand(1).getSymbolName(), "_TLS_MODULE_BASE_"))
+ break;
+
+ if (TLSBaseAddrReg)
+ I = replaceTLSBaseAddrCall(I, TLSBaseAddrReg);
+ else
+ I = setRegister(I, &TLSBaseAddrReg);
+ Changed = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Visit the children of this block in the dominator tree.
+ for (MachineDomTreeNode *N : *Node) {
+ Changed |= VisitNode(N, TLSBaseAddrReg);
+ }
+
+ return Changed;
+ }
+
+ // Replace the TLS_base_addr instruction I with a copy from
+ // TLSBaseAddrReg, returning the new instruction.
+ MachineInstr *replaceTLSBaseAddrCall(MachineInstr *I,
+ unsigned TLSBaseAddrReg) {
+ MachineFunction *MF = I->getParent()->getParent();
+ const AArch64TargetMachine *TM =
+ static_cast<const AArch64TargetMachine *>(&MF->getTarget());
+ const AArch64InstrInfo *TII = TM->getInstrInfo();
+
+ // Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
+ // code sequence assumes the address will be.
+ MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ AArch64::X0).addReg(TLSBaseAddrReg);
+
+ // Erase the TLS_base_addr instruction.
+ I->eraseFromParent();
+
+ return Copy;
+ }
+
+ // Create a virtal register in *TLSBaseAddrReg, and populate it by
+ // inserting a copy instruction after I. Returns the new instruction.
+ MachineInstr *setRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
+ MachineFunction *MF = I->getParent()->getParent();
+ const AArch64TargetMachine *TM =
+ static_cast<const AArch64TargetMachine *>(&MF->getTarget());
+ const AArch64InstrInfo *TII = TM->getInstrInfo();
+
+ // Create a virtual register for the TLS base address.
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ *TLSBaseAddrReg = RegInfo.createVirtualRegister(&AArch64::GPR64RegClass);
+
+ // Insert a copy from X0 to TLSBaseAddrReg for later.
+ MachineInstr *Next = I->getNextNode();
+ MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ *TLSBaseAddrReg).addReg(AArch64::X0);
+
+ return Copy;
+ }
+
+ const char *getPassName() const override {
+ return "Local Dynamic TLS Access Clean-up";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+}
+
+char LDTLSCleanup::ID = 0;
+FunctionPass *llvm::createAArch64CleanupLocalDynamicTLSPass() {
+ return new LDTLSCleanup();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp b/contrib/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
new file mode 100644
index 0000000..6b1f096
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -0,0 +1,1117 @@
+//===---------- AArch64CollectLOH.cpp - AArch64 collect LOH pass --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that collect the Linker Optimization Hint (LOH).
+// This pass should be run at the very end of the compilation flow, just before
+// assembly printer.
+// To be useful for the linker, the LOH must be printed into the assembly file.
+//
+// A LOH describes a sequence of instructions that may be optimized by the
+// linker.
+// This same sequence cannot be optimized by the compiler because some of
+// the information will be known at link time.
+// For instance, consider the following sequence:
+// L1: adrp xA, sym@PAGE
+// L2: add xB, xA, sym@PAGEOFF
+// L3: ldr xC, [xB, #imm]
+// This sequence can be turned into:
+// A literal load if sym@PAGE + sym@PAGEOFF + #imm - address(L3) is < 1MB:
+// L3: ldr xC, sym+#imm
+// It may also be turned into either the following more efficient
+// code sequences:
+// - If sym@PAGEOFF + #imm fits the encoding space of L3.
+// L1: adrp xA, sym@PAGE
+// L3: ldr xC, [xB, sym@PAGEOFF + #imm]
+// - If sym@PAGE + sym@PAGEOFF - address(L1) < 1MB:
+// L1: adr xA, sym
+// L3: ldr xC, [xB, #imm]
+//
+// To be valid a LOH must meet all the requirements needed by all the related
+// possible linker transformations.
+// For instance, using the running example, the constraints to emit
+// ".loh AdrpAddLdr" are:
+// - L1, L2, and L3 instructions are of the expected type, i.e.,
+// respectively ADRP, ADD (immediate), and LD.
+// - The result of L1 is used only by L2.
+// - The register argument (xA) used in the ADD instruction is defined
+// only by L1.
+// - The result of L2 is used only by L3.
+// - The base address (xB) in L3 is defined only L2.
+// - The ADRP in L1 and the ADD in L2 must reference the same symbol using
+// @PAGE/@PAGEOFF with no additional constants
+//
+// Currently supported LOHs are:
+// * So called non-ADRP-related:
+// - .loh AdrpAddLdr L1, L2, L3:
+// L1: adrp xA, sym@PAGE
+// L2: add xB, xA, sym@PAGEOFF
+// L3: ldr xC, [xB, #imm]
+// - .loh AdrpLdrGotLdr L1, L2, L3:
+// L1: adrp xA, sym@GOTPAGE
+// L2: ldr xB, [xA, sym@GOTPAGEOFF]
+// L3: ldr xC, [xB, #imm]
+// - .loh AdrpLdr L1, L3:
+// L1: adrp xA, sym@PAGE
+// L3: ldr xC, [xA, sym@PAGEOFF]
+// - .loh AdrpAddStr L1, L2, L3:
+// L1: adrp xA, sym@PAGE
+// L2: add xB, xA, sym@PAGEOFF
+// L3: str xC, [xB, #imm]
+// - .loh AdrpLdrGotStr L1, L2, L3:
+// L1: adrp xA, sym@GOTPAGE
+// L2: ldr xB, [xA, sym@GOTPAGEOFF]
+// L3: str xC, [xB, #imm]
+// - .loh AdrpAdd L1, L2:
+// L1: adrp xA, sym@PAGE
+// L2: add xB, xA, sym@PAGEOFF
+// For all these LOHs, L1, L2, L3 form a simple chain:
+// L1 result is used only by L2 and L2 result by L3.
+// L3 LOH-related argument is defined only by L2 and L2 LOH-related argument
+// by L1.
+// All these LOHs aim at using more efficient load/store patterns by folding
+// some instructions used to compute the address directly into the load/store.
+//
+// * So called ADRP-related:
+// - .loh AdrpAdrp L2, L1:
+// L2: ADRP xA, sym1@PAGE
+// L1: ADRP xA, sym2@PAGE
+// L2 dominates L1 and xA is not redifined between L2 and L1
+// This LOH aims at getting rid of redundant ADRP instructions.
+//
+// The overall design for emitting the LOHs is:
+// 1. AArch64CollectLOH (this pass) records the LOHs in the AArch64FunctionInfo.
+// 2. AArch64AsmPrinter reads the LOHs from AArch64FunctionInfo and it:
+// 1. Associates them a label.
+// 2. Emits them in a MCStreamer (EmitLOHDirective).
+// - The MCMachOStreamer records them into the MCAssembler.
+// - The MCAsmStreamer prints them.
+// - Other MCStreamers ignore them.
+// 3. Closes the MCStreamer:
+// - The MachObjectWriter gets them from the MCAssembler and writes
+// them in the object file.
+// - Other ObjectWriters ignore them.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64MachineFunctionInfo.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-collect-loh"
+
+static cl::opt<bool>
+PreCollectRegister("aarch64-collect-loh-pre-collect-register", cl::Hidden,
+ cl::desc("Restrict analysis to registers invovled"
+ " in LOHs"),
+ cl::init(true));
+
+static cl::opt<bool>
+BasicBlockScopeOnly("aarch64-collect-loh-bb-only", cl::Hidden,
+ cl::desc("Restrict analysis at basic block scope"),
+ cl::init(true));
+
+STATISTIC(NumADRPSimpleCandidate,
+ "Number of simplifiable ADRP dominate by another");
+STATISTIC(NumADRPComplexCandidate2,
+ "Number of simplifiable ADRP reachable by 2 defs");
+STATISTIC(NumADRPComplexCandidate3,
+ "Number of simplifiable ADRP reachable by 3 defs");
+STATISTIC(NumADRPComplexCandidateOther,
+ "Number of simplifiable ADRP reachable by 4 or more defs");
+STATISTIC(NumADDToSTRWithImm,
+ "Number of simplifiable STR with imm reachable by ADD");
+STATISTIC(NumLDRToSTRWithImm,
+ "Number of simplifiable STR with imm reachable by LDR");
+STATISTIC(NumADDToSTR, "Number of simplifiable STR reachable by ADD");
+STATISTIC(NumLDRToSTR, "Number of simplifiable STR reachable by LDR");
+STATISTIC(NumADDToLDRWithImm,
+ "Number of simplifiable LDR with imm reachable by ADD");
+STATISTIC(NumLDRToLDRWithImm,
+ "Number of simplifiable LDR with imm reachable by LDR");
+STATISTIC(NumADDToLDR, "Number of simplifiable LDR reachable by ADD");
+STATISTIC(NumLDRToLDR, "Number of simplifiable LDR reachable by LDR");
+STATISTIC(NumADRPToLDR, "Number of simplifiable LDR reachable by ADRP");
+STATISTIC(NumCplxLvl1, "Number of complex case of level 1");
+STATISTIC(NumTooCplxLvl1, "Number of too complex case of level 1");
+STATISTIC(NumCplxLvl2, "Number of complex case of level 2");
+STATISTIC(NumTooCplxLvl2, "Number of too complex case of level 2");
+STATISTIC(NumADRSimpleCandidate, "Number of simplifiable ADRP + ADD");
+STATISTIC(NumADRComplexCandidate, "Number of too complex ADRP + ADD");
+
+namespace llvm {
+void initializeAArch64CollectLOHPass(PassRegistry &);
+}
+
+namespace {
+struct AArch64CollectLOH : public MachineFunctionPass {
+ static char ID;
+ AArch64CollectLOH() : MachineFunctionPass(ID) {
+ initializeAArch64CollectLOHPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "AArch64 Collect Linker Optimization Hint (LOH)";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ AU.addRequired<MachineDominatorTree>();
+ }
+
+private:
+};
+
+/// A set of MachineInstruction.
+typedef SetVector<const MachineInstr *> SetOfMachineInstr;
+/// Map a basic block to a set of instructions per register.
+/// This is used to represent the exposed uses of a basic block
+/// per register.
+typedef MapVector<const MachineBasicBlock *, SetOfMachineInstr *>
+BlockToSetOfInstrsPerColor;
+/// Map a basic block to an instruction per register.
+/// This is used to represent the live-out definitions of a basic block
+/// per register.
+typedef MapVector<const MachineBasicBlock *, const MachineInstr **>
+BlockToInstrPerColor;
+/// Map an instruction to a set of instructions. Used to represent the
+/// mapping def to reachable uses or use to definitions.
+typedef MapVector<const MachineInstr *, SetOfMachineInstr> InstrToInstrs;
+/// Map a basic block to a BitVector.
+/// This is used to record the kill registers per basic block.
+typedef MapVector<const MachineBasicBlock *, BitVector> BlockToRegSet;
+
+/// Map a register to a dense id.
+typedef DenseMap<unsigned, unsigned> MapRegToId;
+/// Map a dense id to a register. Used for debug purposes.
+typedef SmallVector<unsigned, 32> MapIdToReg;
+} // end anonymous namespace.
+
+char AArch64CollectLOH::ID = 0;
+
+INITIALIZE_PASS_BEGIN(AArch64CollectLOH, "aarch64-collect-loh",
+ "AArch64 Collect Linker Optimization Hint (LOH)", false,
+ false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(AArch64CollectLOH, "aarch64-collect-loh",
+ "AArch64 Collect Linker Optimization Hint (LOH)", false,
+ false)
+
+/// Given a couple (MBB, reg) get the corresponding set of instruction from
+/// the given "sets".
+/// If this couple does not reference any set, an empty set is added to "sets"
+/// for this couple and returned.
+/// \param nbRegs is used internally allocate some memory. It must be consistent
+/// with the way sets is used.
+static SetOfMachineInstr &getSet(BlockToSetOfInstrsPerColor &sets,
+ const MachineBasicBlock &MBB, unsigned reg,
+ unsigned nbRegs) {
+ SetOfMachineInstr *result;
+ BlockToSetOfInstrsPerColor::iterator it = sets.find(&MBB);
+ if (it != sets.end())
+ result = it->second;
+ else
+ result = sets[&MBB] = new SetOfMachineInstr[nbRegs];
+
+ return result[reg];
+}
+
+/// Given a couple (reg, MI) get the corresponding set of instructions from the
+/// the given "sets".
+/// This is used to get the uses record in sets of a definition identified by
+/// MI and reg, i.e., MI defines reg.
+/// If the couple does not reference anything, an empty set is added to
+/// "sets[reg]".
+/// \pre set[reg] is valid.
+static SetOfMachineInstr &getUses(InstrToInstrs *sets, unsigned reg,
+ const MachineInstr &MI) {
+ return sets[reg][&MI];
+}
+
+/// Same as getUses but does not modify the input map: sets.
+/// \return NULL if the couple (reg, MI) is not in sets.
+static const SetOfMachineInstr *getUses(const InstrToInstrs *sets, unsigned reg,
+ const MachineInstr &MI) {
+ InstrToInstrs::const_iterator Res = sets[reg].find(&MI);
+ if (Res != sets[reg].end())
+ return &(Res->second);
+ return nullptr;
+}
+
+/// Initialize the reaching definition algorithm:
+/// For each basic block BB in MF, record:
+/// - its kill set.
+/// - its reachable uses (uses that are exposed to BB's predecessors).
+/// - its the generated definitions.
+/// \param DummyOp if not NULL, specifies a Dummy Operation to be added to
+/// the list of uses of exposed defintions.
+/// \param ADRPMode specifies to only consider ADRP instructions for generated
+/// definition. It also consider definitions of ADRP instructions as uses and
+/// ignore other uses. The ADRPMode is used to collect the information for LHO
+/// that involve ADRP operation only.
+static void initReachingDef(MachineFunction &MF,
+ InstrToInstrs *ColorOpToReachedUses,
+ BlockToInstrPerColor &Gen, BlockToRegSet &Kill,
+ BlockToSetOfInstrsPerColor &ReachableUses,
+ const MapRegToId &RegToId,
+ const MachineInstr *DummyOp, bool ADRPMode) {
+ const TargetMachine &TM = MF.getTarget();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+
+ unsigned NbReg = RegToId.size();
+
+ for (MachineBasicBlock &MBB : MF) {
+ const MachineInstr **&BBGen = Gen[&MBB];
+ BBGen = new const MachineInstr *[NbReg];
+ memset(BBGen, 0, sizeof(const MachineInstr *) * NbReg);
+
+ BitVector &BBKillSet = Kill[&MBB];
+ BBKillSet.resize(NbReg);
+ for (const MachineInstr &MI : MBB) {
+ bool IsADRP = MI.getOpcode() == AArch64::ADRP;
+
+ // Process uses first.
+ if (IsADRP || !ADRPMode)
+ for (const MachineOperand &MO : MI.operands()) {
+ // Treat ADRP def as use, as the goal of the analysis is to find
+ // ADRP defs reached by other ADRP defs.
+ if (!MO.isReg() || (!ADRPMode && !MO.isUse()) ||
+ (ADRPMode && (!IsADRP || !MO.isDef())))
+ continue;
+ unsigned CurReg = MO.getReg();
+ MapRegToId::const_iterator ItCurRegId = RegToId.find(CurReg);
+ if (ItCurRegId == RegToId.end())
+ continue;
+ CurReg = ItCurRegId->second;
+
+ // if CurReg has not been defined, this use is reachable.
+ if (!BBGen[CurReg] && !BBKillSet.test(CurReg))
+ getSet(ReachableUses, MBB, CurReg, NbReg).insert(&MI);
+ // current basic block definition for this color, if any, is in Gen.
+ if (BBGen[CurReg])
+ getUses(ColorOpToReachedUses, CurReg, *BBGen[CurReg]).insert(&MI);
+ }
+
+ // Process clobbers.
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isRegMask())
+ continue;
+ // Clobbers kill the related colors.
+ const uint32_t *PreservedRegs = MO.getRegMask();
+
+ // Set generated regs.
+ for (const auto Entry : RegToId) {
+ unsigned Reg = Entry.second;
+ // Use the global register ID when querying APIs external to this
+ // pass.
+ if (MachineOperand::clobbersPhysReg(PreservedRegs, Entry.first)) {
+ // Do not register clobbered definition for no ADRP.
+ // This definition is not used anyway (otherwise register
+ // allocation is wrong).
+ BBGen[Reg] = ADRPMode ? &MI : nullptr;
+ BBKillSet.set(Reg);
+ }
+ }
+ }
+
+ // Process register defs.
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ unsigned CurReg = MO.getReg();
+ MapRegToId::const_iterator ItCurRegId = RegToId.find(CurReg);
+ if (ItCurRegId == RegToId.end())
+ continue;
+
+ for (MCRegAliasIterator AI(CurReg, TRI, true); AI.isValid(); ++AI) {
+ MapRegToId::const_iterator ItRegId = RegToId.find(*AI);
+ assert(ItRegId != RegToId.end() &&
+ "Sub-register of an "
+ "involved register, not recorded as involved!");
+ BBKillSet.set(ItRegId->second);
+ BBGen[ItRegId->second] = &MI;
+ }
+ BBGen[ItCurRegId->second] = &MI;
+ }
+ }
+
+ // If we restrict our analysis to basic block scope, conservatively add a
+ // dummy
+ // use for each generated value.
+ if (!ADRPMode && DummyOp && !MBB.succ_empty())
+ for (unsigned CurReg = 0; CurReg < NbReg; ++CurReg)
+ if (BBGen[CurReg])
+ getUses(ColorOpToReachedUses, CurReg, *BBGen[CurReg]).insert(DummyOp);
+ }
+}
+
+/// Reaching def core algorithm:
+/// while an Out has changed
+/// for each bb
+/// for each color
+/// In[bb][color] = U Out[bb.predecessors][color]
+/// insert reachableUses[bb][color] in each in[bb][color]
+/// op.reachedUses
+///
+/// Out[bb] = Gen[bb] U (In[bb] - Kill[bb])
+static void reachingDefAlgorithm(MachineFunction &MF,
+ InstrToInstrs *ColorOpToReachedUses,
+ BlockToSetOfInstrsPerColor &In,
+ BlockToSetOfInstrsPerColor &Out,
+ BlockToInstrPerColor &Gen, BlockToRegSet &Kill,
+ BlockToSetOfInstrsPerColor &ReachableUses,
+ unsigned NbReg) {
+ bool HasChanged;
+ do {
+ HasChanged = false;
+ for (MachineBasicBlock &MBB : MF) {
+ unsigned CurReg;
+ for (CurReg = 0; CurReg < NbReg; ++CurReg) {
+ SetOfMachineInstr &BBInSet = getSet(In, MBB, CurReg, NbReg);
+ SetOfMachineInstr &BBReachableUses =
+ getSet(ReachableUses, MBB, CurReg, NbReg);
+ SetOfMachineInstr &BBOutSet = getSet(Out, MBB, CurReg, NbReg);
+ unsigned Size = BBOutSet.size();
+ // In[bb][color] = U Out[bb.predecessors][color]
+ for (MachineBasicBlock *PredMBB : MBB.predecessors()) {
+ SetOfMachineInstr &PredOutSet = getSet(Out, *PredMBB, CurReg, NbReg);
+ BBInSet.insert(PredOutSet.begin(), PredOutSet.end());
+ }
+ // insert reachableUses[bb][color] in each in[bb][color] op.reachedses
+ for (const MachineInstr *MI : BBInSet) {
+ SetOfMachineInstr &OpReachedUses =
+ getUses(ColorOpToReachedUses, CurReg, *MI);
+ OpReachedUses.insert(BBReachableUses.begin(), BBReachableUses.end());
+ }
+ // Out[bb] = Gen[bb] U (In[bb] - Kill[bb])
+ if (!Kill[&MBB].test(CurReg))
+ BBOutSet.insert(BBInSet.begin(), BBInSet.end());
+ if (Gen[&MBB][CurReg])
+ BBOutSet.insert(Gen[&MBB][CurReg]);
+ HasChanged |= BBOutSet.size() != Size;
+ }
+ }
+ } while (HasChanged);
+}
+
+/// Release all memory dynamically allocated during the reaching
+/// definition algorithm.
+static void finitReachingDef(BlockToSetOfInstrsPerColor &In,
+ BlockToSetOfInstrsPerColor &Out,
+ BlockToInstrPerColor &Gen,
+ BlockToSetOfInstrsPerColor &ReachableUses) {
+ for (auto &IT : Out)
+ delete[] IT.second;
+ for (auto &IT : In)
+ delete[] IT.second;
+ for (auto &IT : ReachableUses)
+ delete[] IT.second;
+ for (auto &IT : Gen)
+ delete[] IT.second;
+}
+
+/// Reaching definition algorithm.
+/// \param MF function on which the algorithm will operate.
+/// \param[out] ColorOpToReachedUses will contain the result of the reaching
+/// def algorithm.
+/// \param ADRPMode specify whether the reaching def algorithm should be tuned
+/// for ADRP optimization. \see initReachingDef for more details.
+/// \param DummyOp if not NULL, the algorithm will work at
+/// basic block scope and will set for every exposed definition a use to
+/// @p DummyOp.
+/// \pre ColorOpToReachedUses is an array of at least number of registers of
+/// InstrToInstrs.
+static void reachingDef(MachineFunction &MF,
+ InstrToInstrs *ColorOpToReachedUses,
+ const MapRegToId &RegToId, bool ADRPMode = false,
+ const MachineInstr *DummyOp = nullptr) {
+ // structures:
+ // For each basic block.
+ // Out: a set per color of definitions that reach the
+ // out boundary of this block.
+ // In: Same as Out but for in boundary.
+ // Gen: generated color in this block (one operation per color).
+ // Kill: register set of killed color in this block.
+ // ReachableUses: a set per color of uses (operation) reachable
+ // for "In" definitions.
+ BlockToSetOfInstrsPerColor Out, In, ReachableUses;
+ BlockToInstrPerColor Gen;
+ BlockToRegSet Kill;
+
+ // Initialize Gen, kill and reachableUses.
+ initReachingDef(MF, ColorOpToReachedUses, Gen, Kill, ReachableUses, RegToId,
+ DummyOp, ADRPMode);
+
+ // Algo.
+ if (!DummyOp)
+ reachingDefAlgorithm(MF, ColorOpToReachedUses, In, Out, Gen, Kill,
+ ReachableUses, RegToId.size());
+
+ // finit.
+ finitReachingDef(In, Out, Gen, ReachableUses);
+}
+
+#ifndef NDEBUG
+/// print the result of the reaching definition algorithm.
+static void printReachingDef(const InstrToInstrs *ColorOpToReachedUses,
+ unsigned NbReg, const TargetRegisterInfo *TRI,
+ const MapIdToReg &IdToReg) {
+ unsigned CurReg;
+ for (CurReg = 0; CurReg < NbReg; ++CurReg) {
+ if (ColorOpToReachedUses[CurReg].empty())
+ continue;
+ DEBUG(dbgs() << "*** Reg " << PrintReg(IdToReg[CurReg], TRI) << " ***\n");
+
+ for (const auto &DefsIt : ColorOpToReachedUses[CurReg]) {
+ DEBUG(dbgs() << "Def:\n");
+ DEBUG(DefsIt.first->print(dbgs()));
+ DEBUG(dbgs() << "Reachable uses:\n");
+ for (const MachineInstr *MI : DefsIt.second) {
+ DEBUG(MI->print(dbgs()));
+ }
+ }
+ }
+}
+#endif // NDEBUG
+
+/// Answer the following question: Can Def be one of the definition
+/// involved in a part of a LOH?
+static bool canDefBePartOfLOH(const MachineInstr *Def) {
+ unsigned Opc = Def->getOpcode();
+ // Accept ADRP, ADDLow and LOADGot.
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::ADRP:
+ return true;
+ case AArch64::ADDXri:
+ // Check immediate to see if the immediate is an address.
+ switch (Def->getOperand(2).getType()) {
+ default:
+ return false;
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_JumpTableIndex:
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_BlockAddress:
+ return true;
+ }
+ case AArch64::LDRXui:
+ // Check immediate to see if the immediate is an address.
+ switch (Def->getOperand(2).getType()) {
+ default:
+ return false;
+ case MachineOperand::MO_GlobalAddress:
+ return true;
+ }
+ }
+ // Unreachable.
+ return false;
+}
+
+/// Check whether the given instruction can the end of a LOH chain involving a
+/// store.
+static bool isCandidateStore(const MachineInstr *Instr) {
+ switch (Instr->getOpcode()) {
+ default:
+ return false;
+ case AArch64::STRBui:
+ case AArch64::STRHui:
+ case AArch64::STRWui:
+ case AArch64::STRXui:
+ case AArch64::STRSui:
+ case AArch64::STRDui:
+ case AArch64::STRQui:
+ // In case we have str xA, [xA, #imm], this is two different uses
+ // of xA and we cannot fold, otherwise the xA stored may be wrong,
+ // even if #imm == 0.
+ if (Instr->getOperand(0).getReg() != Instr->getOperand(1).getReg())
+ return true;
+ }
+ return false;
+}
+
+/// Given the result of a reaching definition algorithm in ColorOpToReachedUses,
+/// Build the Use to Defs information and filter out obvious non-LOH candidates.
+/// In ADRPMode, non-LOH candidates are "uses" with non-ADRP definitions.
+/// In non-ADRPMode, non-LOH candidates are "uses" with several definition,
+/// i.e., no simple chain.
+/// \param ADRPMode -- \see initReachingDef.
+static void reachedUsesToDefs(InstrToInstrs &UseToReachingDefs,
+ const InstrToInstrs *ColorOpToReachedUses,
+ const MapRegToId &RegToId,
+ bool ADRPMode = false) {
+
+ SetOfMachineInstr NotCandidate;
+ unsigned NbReg = RegToId.size();
+ MapRegToId::const_iterator EndIt = RegToId.end();
+ for (unsigned CurReg = 0; CurReg < NbReg; ++CurReg) {
+ // If this color is never defined, continue.
+ if (ColorOpToReachedUses[CurReg].empty())
+ continue;
+
+ for (const auto &DefsIt : ColorOpToReachedUses[CurReg]) {
+ for (const MachineInstr *MI : DefsIt.second) {
+ const MachineInstr *Def = DefsIt.first;
+ MapRegToId::const_iterator It;
+ // if all the reaching defs are not adrp, this use will not be
+ // simplifiable.
+ if ((ADRPMode && Def->getOpcode() != AArch64::ADRP) ||
+ (!ADRPMode && !canDefBePartOfLOH(Def)) ||
+ (!ADRPMode && isCandidateStore(MI) &&
+ // store are LOH candidate iff the end of the chain is used as
+ // base.
+ ((It = RegToId.find((MI)->getOperand(1).getReg())) == EndIt ||
+ It->second != CurReg))) {
+ NotCandidate.insert(MI);
+ continue;
+ }
+ // Do not consider self reaching as a simplifiable case for ADRP.
+ if (!ADRPMode || MI != DefsIt.first) {
+ UseToReachingDefs[MI].insert(DefsIt.first);
+ // If UsesIt has several reaching definitions, it is not
+ // candidate for simplificaton in non-ADRPMode.
+ if (!ADRPMode && UseToReachingDefs[MI].size() > 1)
+ NotCandidate.insert(MI);
+ }
+ }
+ }
+ }
+ for (const MachineInstr *Elem : NotCandidate) {
+ DEBUG(dbgs() << "Too many reaching defs: " << *Elem << "\n");
+ // It would have been better if we could just remove the entry
+ // from the map. Because of that, we have to filter the garbage
+ // (second.empty) in the subsequence analysis.
+ UseToReachingDefs[Elem].clear();
+ }
+}
+
+/// Based on the use to defs information (in ADRPMode), compute the
+/// opportunities of LOH ADRP-related.
+static void computeADRP(const InstrToInstrs &UseToDefs,
+ AArch64FunctionInfo &AArch64FI,
+ const MachineDominatorTree *MDT) {
+ DEBUG(dbgs() << "*** Compute LOH for ADRP\n");
+ for (const auto &Entry : UseToDefs) {
+ unsigned Size = Entry.second.size();
+ if (Size == 0)
+ continue;
+ if (Size == 1) {
+ const MachineInstr *L2 = *Entry.second.begin();
+ const MachineInstr *L1 = Entry.first;
+ if (!MDT->dominates(L2, L1)) {
+ DEBUG(dbgs() << "Dominance check failed:\n" << *L2 << '\n' << *L1
+ << '\n');
+ continue;
+ }
+ DEBUG(dbgs() << "Record AdrpAdrp:\n" << *L2 << '\n' << *L1 << '\n');
+ SmallVector<const MachineInstr *, 2> Args;
+ Args.push_back(L2);
+ Args.push_back(L1);
+ AArch64FI.addLOHDirective(MCLOH_AdrpAdrp, Args);
+ ++NumADRPSimpleCandidate;
+ }
+#ifdef DEBUG
+ else if (Size == 2)
+ ++NumADRPComplexCandidate2;
+ else if (Size == 3)
+ ++NumADRPComplexCandidate3;
+ else
+ ++NumADRPComplexCandidateOther;
+#endif
+ // if Size < 1, the use should have been removed from the candidates
+ assert(Size >= 1 && "No reaching defs for that use!");
+ }
+}
+
+/// Check whether the given instruction can be the end of a LOH chain
+/// involving a load.
+static bool isCandidateLoad(const MachineInstr *Instr) {
+ switch (Instr->getOpcode()) {
+ default:
+ return false;
+ case AArch64::LDRSBWui:
+ case AArch64::LDRSBXui:
+ case AArch64::LDRSHWui:
+ case AArch64::LDRSHXui:
+ case AArch64::LDRSWui:
+ case AArch64::LDRBui:
+ case AArch64::LDRHui:
+ case AArch64::LDRWui:
+ case AArch64::LDRXui:
+ case AArch64::LDRSui:
+ case AArch64::LDRDui:
+ case AArch64::LDRQui:
+ if (Instr->getOperand(2).getTargetFlags() & AArch64II::MO_GOT)
+ return false;
+ return true;
+ }
+ // Unreachable.
+ return false;
+}
+
+/// Check whether the given instruction can load a litteral.
+static bool supportLoadFromLiteral(const MachineInstr *Instr) {
+ switch (Instr->getOpcode()) {
+ default:
+ return false;
+ case AArch64::LDRSWui:
+ case AArch64::LDRWui:
+ case AArch64::LDRXui:
+ case AArch64::LDRSui:
+ case AArch64::LDRDui:
+ case AArch64::LDRQui:
+ return true;
+ }
+ // Unreachable.
+ return false;
+}
+
+/// Check whether the given instruction is a LOH candidate.
+/// \param UseToDefs is used to check that Instr is at the end of LOH supported
+/// chain.
+/// \pre UseToDefs contains only on def per use, i.e., obvious non candidate are
+/// already been filtered out.
+static bool isCandidate(const MachineInstr *Instr,
+ const InstrToInstrs &UseToDefs,
+ const MachineDominatorTree *MDT) {
+ if (!isCandidateLoad(Instr) && !isCandidateStore(Instr))
+ return false;
+
+ const MachineInstr *Def = *UseToDefs.find(Instr)->second.begin();
+ if (Def->getOpcode() != AArch64::ADRP) {
+ // At this point, Def is ADDXri or LDRXui of the right type of
+ // symbol, because we filtered out the uses that were not defined
+ // by these kind of instructions (+ ADRP).
+
+ // Check if this forms a simple chain: each intermediate node must
+ // dominates the next one.
+ if (!MDT->dominates(Def, Instr))
+ return false;
+ // Move one node up in the simple chain.
+ if (UseToDefs.find(Def) ==
+ UseToDefs.end()
+ // The map may contain garbage we have to ignore.
+ ||
+ UseToDefs.find(Def)->second.empty())
+ return false;
+ Instr = Def;
+ Def = *UseToDefs.find(Def)->second.begin();
+ }
+ // Check if we reached the top of the simple chain:
+ // - top is ADRP.
+ // - check the simple chain property: each intermediate node must
+ // dominates the next one.
+ if (Def->getOpcode() == AArch64::ADRP)
+ return MDT->dominates(Def, Instr);
+ return false;
+}
+
+static bool registerADRCandidate(const MachineInstr &Use,
+ const InstrToInstrs &UseToDefs,
+ const InstrToInstrs *DefsPerColorToUses,
+ AArch64FunctionInfo &AArch64FI,
+ SetOfMachineInstr *InvolvedInLOHs,
+ const MapRegToId &RegToId) {
+ // Look for opportunities to turn ADRP -> ADD or
+ // ADRP -> LDR GOTPAGEOFF into ADR.
+ // If ADRP has more than one use. Give up.
+ if (Use.getOpcode() != AArch64::ADDXri &&
+ (Use.getOpcode() != AArch64::LDRXui ||
+ !(Use.getOperand(2).getTargetFlags() & AArch64II::MO_GOT)))
+ return false;
+ InstrToInstrs::const_iterator It = UseToDefs.find(&Use);
+ // The map may contain garbage that we need to ignore.
+ if (It == UseToDefs.end() || It->second.empty())
+ return false;
+ const MachineInstr &Def = **It->second.begin();
+ if (Def.getOpcode() != AArch64::ADRP)
+ return false;
+ // Check the number of users of ADRP.
+ const SetOfMachineInstr *Users =
+ getUses(DefsPerColorToUses,
+ RegToId.find(Def.getOperand(0).getReg())->second, Def);
+ if (Users->size() > 1) {
+ ++NumADRComplexCandidate;
+ return false;
+ }
+ ++NumADRSimpleCandidate;
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(&Def)) &&
+ "ADRP already involved in LOH.");
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(&Use)) &&
+ "ADD already involved in LOH.");
+ DEBUG(dbgs() << "Record AdrpAdd\n" << Def << '\n' << Use << '\n');
+
+ SmallVector<const MachineInstr *, 2> Args;
+ Args.push_back(&Def);
+ Args.push_back(&Use);
+
+ AArch64FI.addLOHDirective(Use.getOpcode() == AArch64::ADDXri ? MCLOH_AdrpAdd
+ : MCLOH_AdrpLdrGot,
+ Args);
+ return true;
+}
+
+/// Based on the use to defs information (in non-ADRPMode), compute the
+/// opportunities of LOH non-ADRP-related
+static void computeOthers(const InstrToInstrs &UseToDefs,
+ const InstrToInstrs *DefsPerColorToUses,
+ AArch64FunctionInfo &AArch64FI, const MapRegToId &RegToId,
+ const MachineDominatorTree *MDT) {
+ SetOfMachineInstr *InvolvedInLOHs = nullptr;
+#ifdef DEBUG
+ SetOfMachineInstr InvolvedInLOHsStorage;
+ InvolvedInLOHs = &InvolvedInLOHsStorage;
+#endif // DEBUG
+ DEBUG(dbgs() << "*** Compute LOH for Others\n");
+ // ADRP -> ADD/LDR -> LDR/STR pattern.
+ // Fall back to ADRP -> ADD pattern if we fail to catch the bigger pattern.
+
+ // FIXME: When the statistics are not important,
+ // This initial filtering loop can be merged into the next loop.
+ // Currently, we didn't do it to have the same code for both DEBUG and
+ // NDEBUG builds. Indeed, the iterator of the second loop would need
+ // to be changed.
+ SetOfMachineInstr PotentialCandidates;
+ SetOfMachineInstr PotentialADROpportunities;
+ for (auto &Use : UseToDefs) {
+ // If no definition is available, this is a non candidate.
+ if (Use.second.empty())
+ continue;
+ // Keep only instructions that are load or store and at the end of
+ // a ADRP -> ADD/LDR/Nothing chain.
+ // We already filtered out the no-chain cases.
+ if (!isCandidate(Use.first, UseToDefs, MDT)) {
+ PotentialADROpportunities.insert(Use.first);
+ continue;
+ }
+ PotentialCandidates.insert(Use.first);
+ }
+
+ // Make the following distinctions for statistics as the linker does
+ // know how to decode instructions:
+ // - ADD/LDR/Nothing make there different patterns.
+ // - LDR/STR make two different patterns.
+ // Hence, 6 - 1 base patterns.
+ // (because ADRP-> Nothing -> STR is not simplifiable)
+
+ // The linker is only able to have a simple semantic, i.e., if pattern A
+ // do B.
+ // However, we want to see the opportunity we may miss if we were able to
+ // catch more complex cases.
+
+ // PotentialCandidates are result of a chain ADRP -> ADD/LDR ->
+ // A potential candidate becomes a candidate, if its current immediate
+ // operand is zero and all nodes of the chain have respectively only one user
+#ifdef DEBUG
+ SetOfMachineInstr DefsOfPotentialCandidates;
+#endif
+ for (const MachineInstr *Candidate : PotentialCandidates) {
+ // Get the definition of the candidate i.e., ADD or LDR.
+ const MachineInstr *Def = *UseToDefs.find(Candidate)->second.begin();
+ // Record the elements of the chain.
+ const MachineInstr *L1 = Def;
+ const MachineInstr *L2 = nullptr;
+ unsigned ImmediateDefOpc = Def->getOpcode();
+ if (Def->getOpcode() != AArch64::ADRP) {
+ // Check the number of users of this node.
+ const SetOfMachineInstr *Users =
+ getUses(DefsPerColorToUses,
+ RegToId.find(Def->getOperand(0).getReg())->second, *Def);
+ if (Users->size() > 1) {
+#ifdef DEBUG
+ // if all the uses of this def are in potential candidate, this is
+ // a complex candidate of level 2.
+ bool IsLevel2 = true;
+ for (const MachineInstr *MI : *Users) {
+ if (!PotentialCandidates.count(MI)) {
+ ++NumTooCplxLvl2;
+ IsLevel2 = false;
+ break;
+ }
+ }
+ if (IsLevel2)
+ ++NumCplxLvl2;
+#endif // DEBUG
+ PotentialADROpportunities.insert(Def);
+ continue;
+ }
+ L2 = Def;
+ Def = *UseToDefs.find(Def)->second.begin();
+ L1 = Def;
+ } // else the element in the middle of the chain is nothing, thus
+ // Def already contains the first element of the chain.
+
+ // Check the number of users of the first node in the chain, i.e., ADRP
+ const SetOfMachineInstr *Users =
+ getUses(DefsPerColorToUses,
+ RegToId.find(Def->getOperand(0).getReg())->second, *Def);
+ if (Users->size() > 1) {
+#ifdef DEBUG
+ // if all the uses of this def are in the defs of the potential candidate,
+ // this is a complex candidate of level 1
+ if (DefsOfPotentialCandidates.empty()) {
+ // lazy init
+ DefsOfPotentialCandidates = PotentialCandidates;
+ for (const MachineInstr *Candidate : PotentialCandidates) {
+ if (!UseToDefs.find(Candidate)->second.empty())
+ DefsOfPotentialCandidates.insert(
+ *UseToDefs.find(Candidate)->second.begin());
+ }
+ }
+ bool Found = false;
+ for (auto &Use : *Users) {
+ if (!DefsOfPotentialCandidates.count(Use)) {
+ ++NumTooCplxLvl1;
+ Found = true;
+ break;
+ }
+ }
+ if (!Found)
+ ++NumCplxLvl1;
+#endif // DEBUG
+ continue;
+ }
+
+ bool IsL2Add = (ImmediateDefOpc == AArch64::ADDXri);
+ // If the chain is three instructions long and ldr is the second element,
+ // then this ldr must load form GOT, otherwise this is not a correct chain.
+ if (L2 && !IsL2Add && L2->getOperand(2).getTargetFlags() != AArch64II::MO_GOT)
+ continue;
+ SmallVector<const MachineInstr *, 3> Args;
+ MCLOHType Kind;
+ if (isCandidateLoad(Candidate)) {
+ if (!L2) {
+ // At this point, the candidate LOH indicates that the ldr instruction
+ // may use a direct access to the symbol. There is not such encoding
+ // for loads of byte and half.
+ if (!supportLoadFromLiteral(Candidate))
+ continue;
+
+ DEBUG(dbgs() << "Record AdrpLdr:\n" << *L1 << '\n' << *Candidate
+ << '\n');
+ Kind = MCLOH_AdrpLdr;
+ Args.push_back(L1);
+ Args.push_back(Candidate);
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(L1)) &&
+ "L1 already involved in LOH.");
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(Candidate)) &&
+ "Candidate already involved in LOH.");
+ ++NumADRPToLDR;
+ } else {
+ DEBUG(dbgs() << "Record Adrp" << (IsL2Add ? "Add" : "LdrGot")
+ << "Ldr:\n" << *L1 << '\n' << *L2 << '\n' << *Candidate
+ << '\n');
+
+ Kind = IsL2Add ? MCLOH_AdrpAddLdr : MCLOH_AdrpLdrGotLdr;
+ Args.push_back(L1);
+ Args.push_back(L2);
+ Args.push_back(Candidate);
+
+ PotentialADROpportunities.remove(L2);
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(L1)) &&
+ "L1 already involved in LOH.");
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(L2)) &&
+ "L2 already involved in LOH.");
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(Candidate)) &&
+ "Candidate already involved in LOH.");
+#ifdef DEBUG
+ // get the immediate of the load
+ if (Candidate->getOperand(2).getImm() == 0)
+ if (ImmediateDefOpc == AArch64::ADDXri)
+ ++NumADDToLDR;
+ else
+ ++NumLDRToLDR;
+ else if (ImmediateDefOpc == AArch64::ADDXri)
+ ++NumADDToLDRWithImm;
+ else
+ ++NumLDRToLDRWithImm;
+#endif // DEBUG
+ }
+ } else {
+ if (ImmediateDefOpc == AArch64::ADRP)
+ continue;
+ else {
+
+ DEBUG(dbgs() << "Record Adrp" << (IsL2Add ? "Add" : "LdrGot")
+ << "Str:\n" << *L1 << '\n' << *L2 << '\n' << *Candidate
+ << '\n');
+
+ Kind = IsL2Add ? MCLOH_AdrpAddStr : MCLOH_AdrpLdrGotStr;
+ Args.push_back(L1);
+ Args.push_back(L2);
+ Args.push_back(Candidate);
+
+ PotentialADROpportunities.remove(L2);
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(L1)) &&
+ "L1 already involved in LOH.");
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(L2)) &&
+ "L2 already involved in LOH.");
+ assert((!InvolvedInLOHs || InvolvedInLOHs->insert(Candidate)) &&
+ "Candidate already involved in LOH.");
+#ifdef DEBUG
+ // get the immediate of the store
+ if (Candidate->getOperand(2).getImm() == 0)
+ if (ImmediateDefOpc == AArch64::ADDXri)
+ ++NumADDToSTR;
+ else
+ ++NumLDRToSTR;
+ else if (ImmediateDefOpc == AArch64::ADDXri)
+ ++NumADDToSTRWithImm;
+ else
+ ++NumLDRToSTRWithImm;
+#endif // DEBUG
+ }
+ }
+ AArch64FI.addLOHDirective(Kind, Args);
+ }
+
+ // Now, we grabbed all the big patterns, check ADR opportunities.
+ for (const MachineInstr *Candidate : PotentialADROpportunities)
+ registerADRCandidate(*Candidate, UseToDefs, DefsPerColorToUses, AArch64FI,
+ InvolvedInLOHs, RegToId);
+}
+
+/// Look for every register defined by potential LOHs candidates.
+/// Map these registers with dense id in @p RegToId and vice-versa in
+/// @p IdToReg. @p IdToReg is populated only in DEBUG mode.
+static void collectInvolvedReg(MachineFunction &MF, MapRegToId &RegToId,
+ MapIdToReg &IdToReg,
+ const TargetRegisterInfo *TRI) {
+ unsigned CurRegId = 0;
+ if (!PreCollectRegister) {
+ unsigned NbReg = TRI->getNumRegs();
+ for (; CurRegId < NbReg; ++CurRegId) {
+ RegToId[CurRegId] = CurRegId;
+ DEBUG(IdToReg.push_back(CurRegId));
+ DEBUG(assert(IdToReg[CurRegId] == CurRegId && "Reg index mismatches"));
+ }
+ return;
+ }
+
+ DEBUG(dbgs() << "** Collect Involved Register\n");
+ for (const auto &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ if (!canDefBePartOfLOH(&MI))
+ continue;
+
+ // Process defs
+ for (MachineInstr::const_mop_iterator IO = MI.operands_begin(),
+ IOEnd = MI.operands_end();
+ IO != IOEnd; ++IO) {
+ if (!IO->isReg() || !IO->isDef())
+ continue;
+ unsigned CurReg = IO->getReg();
+ for (MCRegAliasIterator AI(CurReg, TRI, true); AI.isValid(); ++AI)
+ if (RegToId.find(*AI) == RegToId.end()) {
+ DEBUG(IdToReg.push_back(*AI);
+ assert(IdToReg[CurRegId] == *AI &&
+ "Reg index mismatches insertion index."));
+ RegToId[*AI] = CurRegId++;
+ DEBUG(dbgs() << "Register: " << PrintReg(*AI, TRI) << '\n');
+ }
+ }
+ }
+ }
+}
+
+bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
+ const TargetMachine &TM = MF.getTarget();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
+
+ MapRegToId RegToId;
+ MapIdToReg IdToReg;
+ AArch64FunctionInfo *AArch64FI = MF.getInfo<AArch64FunctionInfo>();
+ assert(AArch64FI && "No MachineFunctionInfo for this function!");
+
+ DEBUG(dbgs() << "Looking for LOH in " << MF.getName() << '\n');
+
+ collectInvolvedReg(MF, RegToId, IdToReg, TRI);
+ if (RegToId.empty())
+ return false;
+
+ MachineInstr *DummyOp = nullptr;
+ if (BasicBlockScopeOnly) {
+ const AArch64InstrInfo *TII =
+ static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());
+ // For local analysis, create a dummy operation to record uses that are not
+ // local.
+ DummyOp = MF.CreateMachineInstr(TII->get(AArch64::COPY), DebugLoc());
+ }
+
+ unsigned NbReg = RegToId.size();
+ bool Modified = false;
+
+ // Start with ADRP.
+ InstrToInstrs *ColorOpToReachedUses = new InstrToInstrs[NbReg];
+
+ // Compute the reaching def in ADRP mode, meaning ADRP definitions
+ // are first considered as uses.
+ reachingDef(MF, ColorOpToReachedUses, RegToId, true, DummyOp);
+ DEBUG(dbgs() << "ADRP reaching defs\n");
+ DEBUG(printReachingDef(ColorOpToReachedUses, NbReg, TRI, IdToReg));
+
+ // Translate the definition to uses map into a use to definitions map to ease
+ // statistic computation.
+ InstrToInstrs ADRPToReachingDefs;
+ reachedUsesToDefs(ADRPToReachingDefs, ColorOpToReachedUses, RegToId, true);
+
+ // Compute LOH for ADRP.
+ computeADRP(ADRPToReachingDefs, *AArch64FI, MDT);
+ delete[] ColorOpToReachedUses;
+
+ // Continue with general ADRP -> ADD/LDR -> LDR/STR pattern.
+ ColorOpToReachedUses = new InstrToInstrs[NbReg];
+
+ // first perform a regular reaching def analysis.
+ reachingDef(MF, ColorOpToReachedUses, RegToId, false, DummyOp);
+ DEBUG(dbgs() << "All reaching defs\n");
+ DEBUG(printReachingDef(ColorOpToReachedUses, NbReg, TRI, IdToReg));
+
+ // Turn that into a use to defs to ease statistic computation.
+ InstrToInstrs UsesToReachingDefs;
+ reachedUsesToDefs(UsesToReachingDefs, ColorOpToReachedUses, RegToId, false);
+
+ // Compute other than AdrpAdrp LOH.
+ computeOthers(UsesToReachingDefs, ColorOpToReachedUses, *AArch64FI, RegToId,
+ MDT);
+ delete[] ColorOpToReachedUses;
+
+ if (BasicBlockScopeOnly)
+ MF.DeleteMachineInstr(DummyOp);
+
+ return Modified;
+}
+
+/// createAArch64CollectLOHPass - returns an instance of the Statistic for
+/// linker optimization pass.
+FunctionPass *llvm::createAArch64CollectLOHPass() {
+ return new AArch64CollectLOH();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
new file mode 100644
index 0000000..452cdec
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -0,0 +1,919 @@
+//===-- AArch64ConditionalCompares.cpp --- CCMP formation for AArch64 -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AArch64ConditionalCompares pass which reduces
+// branching and code size by using the conditional compare instructions CCMP,
+// CCMN, and FCMP.
+//
+// The CFG transformations for forming conditional compares are very similar to
+// if-conversion, and this pass should run immediately before the early
+// if-conversion pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineTraceMetrics.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-ccmp"
+
+// Absolute maximum number of instructions allowed per speculated block.
+// This bypasses all other heuristics, so it should be set fairly high.
+static cl::opt<unsigned> BlockInstrLimit(
+ "aarch64-ccmp-limit", cl::init(30), cl::Hidden,
+ cl::desc("Maximum number of instructions per speculated block."));
+
+// Stress testing mode - disable heuristics.
+static cl::opt<bool> Stress("aarch64-stress-ccmp", cl::Hidden,
+ cl::desc("Turn all knobs to 11"));
+
+STATISTIC(NumConsidered, "Number of ccmps considered");
+STATISTIC(NumPhiRejs, "Number of ccmps rejected (PHI)");
+STATISTIC(NumPhysRejs, "Number of ccmps rejected (Physregs)");
+STATISTIC(NumPhi2Rejs, "Number of ccmps rejected (PHI2)");
+STATISTIC(NumHeadBranchRejs, "Number of ccmps rejected (Head branch)");
+STATISTIC(NumCmpBranchRejs, "Number of ccmps rejected (CmpBB branch)");
+STATISTIC(NumCmpTermRejs, "Number of ccmps rejected (CmpBB is cbz...)");
+STATISTIC(NumImmRangeRejs, "Number of ccmps rejected (Imm out of range)");
+STATISTIC(NumLiveDstRejs, "Number of ccmps rejected (Cmp dest live)");
+STATISTIC(NumMultNZCVUses, "Number of ccmps rejected (NZCV used)");
+STATISTIC(NumUnknNZCVDefs, "Number of ccmps rejected (NZCV def unknown)");
+
+STATISTIC(NumSpeculateRejs, "Number of ccmps rejected (Can't speculate)");
+
+STATISTIC(NumConverted, "Number of ccmp instructions created");
+STATISTIC(NumCompBranches, "Number of cbz/cbnz branches converted");
+
+//===----------------------------------------------------------------------===//
+// SSACCmpConv
+//===----------------------------------------------------------------------===//
+//
+// The SSACCmpConv class performs ccmp-conversion on SSA form machine code
+// after determining if it is possible. The class contains no heuristics;
+// external code should be used to determine when ccmp-conversion is a good
+// idea.
+//
+// CCmp-formation works on a CFG representing chained conditions, typically
+// from C's short-circuit || and && operators:
+//
+// From: Head To: Head
+// / | CmpBB
+// / | / |
+// | CmpBB / |
+// | / | Tail |
+// | / | | |
+// Tail | | |
+// | | | |
+// ... ... ... ...
+//
+// The Head block is terminated by a br.cond instruction, and the CmpBB block
+// contains compare + br.cond. Tail must be a successor of both.
+//
+// The cmp-conversion turns the compare instruction in CmpBB into a conditional
+// compare, and merges CmpBB into Head, speculatively executing its
+// instructions. The AArch64 conditional compare instructions have an immediate
+// operand that specifies the NZCV flag values when the condition is false and
+// the compare isn't executed. This makes it possible to chain compares with
+// different condition codes.
+//
+// Example:
+//
+// if (a == 5 || b == 17)
+// foo();
+//
+// Head:
+// cmp w0, #5
+// b.eq Tail
+// CmpBB:
+// cmp w1, #17
+// b.eq Tail
+// ...
+// Tail:
+// bl _foo
+//
+// Becomes:
+//
+// Head:
+// cmp w0, #5
+// ccmp w1, #17, 4, ne ; 4 = nZcv
+// b.eq Tail
+// ...
+// Tail:
+// bl _foo
+//
+// The ccmp condition code is the one that would cause the Head terminator to
+// branch to CmpBB.
+//
+// FIXME: It should also be possible to speculate a block on the critical edge
+// between Head and Tail, just like if-converting a diamond.
+//
+// FIXME: Handle PHIs in Tail by turning them into selects (if-conversion).
+
+namespace {
+class SSACCmpConv {
+ MachineFunction *MF;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+
+public:
+ /// The first block containing a conditional branch, dominating everything
+ /// else.
+ MachineBasicBlock *Head;
+
+ /// The block containing cmp+br.cond with a successor shared with Head.
+ MachineBasicBlock *CmpBB;
+
+ /// The common successor for Head and CmpBB.
+ MachineBasicBlock *Tail;
+
+ /// The compare instruction in CmpBB that can be converted to a ccmp.
+ MachineInstr *CmpMI;
+
+private:
+ /// The branch condition in Head as determined by AnalyzeBranch.
+ SmallVector<MachineOperand, 4> HeadCond;
+
+ /// The condition code that makes Head branch to CmpBB.
+ AArch64CC::CondCode HeadCmpBBCC;
+
+ /// The branch condition in CmpBB.
+ SmallVector<MachineOperand, 4> CmpBBCond;
+
+ /// The condition code that makes CmpBB branch to Tail.
+ AArch64CC::CondCode CmpBBTailCC;
+
+ /// Check if the Tail PHIs are trivially convertible.
+ bool trivialTailPHIs();
+
+ /// Remove CmpBB from the Tail PHIs.
+ void updateTailPHIs();
+
+ /// Check if an operand defining DstReg is dead.
+ bool isDeadDef(unsigned DstReg);
+
+ /// Find the compare instruction in MBB that controls the conditional branch.
+ /// Return NULL if a convertible instruction can't be found.
+ MachineInstr *findConvertibleCompare(MachineBasicBlock *MBB);
+
+ /// Return true if all non-terminator instructions in MBB can be safely
+ /// speculated.
+ bool canSpeculateInstrs(MachineBasicBlock *MBB, const MachineInstr *CmpMI);
+
+public:
+ /// runOnMachineFunction - Initialize per-function data structures.
+ void runOnMachineFunction(MachineFunction &MF) {
+ this->MF = &MF;
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ }
+
+ /// If the sub-CFG headed by MBB can be cmp-converted, initialize the
+ /// internal state, and return true.
+ bool canConvert(MachineBasicBlock *MBB);
+
+ /// Cmo-convert the last block passed to canConvertCmp(), assuming
+ /// it is possible. Add any erased blocks to RemovedBlocks.
+ void convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks);
+
+ /// Return the expected code size delta if the conversion into a
+ /// conditional compare is performed.
+ int expectedCodeSizeDelta() const;
+};
+} // end anonymous namespace
+
+// Check that all PHIs in Tail are selecting the same value from Head and CmpBB.
+// This means that no if-conversion is required when merging CmpBB into Head.
+bool SSACCmpConv::trivialTailPHIs() {
+ for (auto &I : *Tail) {
+ if (!I.isPHI())
+ break;
+ unsigned HeadReg = 0, CmpBBReg = 0;
+ // PHI operands come in (VReg, MBB) pairs.
+ for (unsigned oi = 1, oe = I.getNumOperands(); oi != oe; oi += 2) {
+ MachineBasicBlock *MBB = I.getOperand(oi + 1).getMBB();
+ unsigned Reg = I.getOperand(oi).getReg();
+ if (MBB == Head) {
+ assert((!HeadReg || HeadReg == Reg) && "Inconsistent PHI operands");
+ HeadReg = Reg;
+ }
+ if (MBB == CmpBB) {
+ assert((!CmpBBReg || CmpBBReg == Reg) && "Inconsistent PHI operands");
+ CmpBBReg = Reg;
+ }
+ }
+ if (HeadReg != CmpBBReg)
+ return false;
+ }
+ return true;
+}
+
+// Assuming that trivialTailPHIs() is true, update the Tail PHIs by simply
+// removing the CmpBB operands. The Head operands will be identical.
+void SSACCmpConv::updateTailPHIs() {
+ for (auto &I : *Tail) {
+ if (!I.isPHI())
+ break;
+ // I is a PHI. It can have multiple entries for CmpBB.
+ for (unsigned oi = I.getNumOperands(); oi > 2; oi -= 2) {
+ // PHI operands are (Reg, MBB) at (oi-2, oi-1).
+ if (I.getOperand(oi - 1).getMBB() == CmpBB) {
+ I.RemoveOperand(oi - 1);
+ I.RemoveOperand(oi - 2);
+ }
+ }
+ }
+}
+
+// This pass runs before the AArch64DeadRegisterDefinitions pass, so compares
+// are still writing virtual registers without any uses.
+bool SSACCmpConv::isDeadDef(unsigned DstReg) {
+ // Writes to the zero register are dead.
+ if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
+ return true;
+ if (!TargetRegisterInfo::isVirtualRegister(DstReg))
+ return false;
+ // A virtual register def without any uses will be marked dead later, and
+ // eventually replaced by the zero register.
+ return MRI->use_nodbg_empty(DstReg);
+}
+
+// Parse a condition code returned by AnalyzeBranch, and compute the CondCode
+// corresponding to TBB.
+// Return
+static bool parseCond(ArrayRef<MachineOperand> Cond, AArch64CC::CondCode &CC) {
+ // A normal br.cond simply has the condition code.
+ if (Cond[0].getImm() != -1) {
+ assert(Cond.size() == 1 && "Unknown Cond array format");
+ CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
+ return true;
+ }
+ // For tbz and cbz instruction, the opcode is next.
+ switch (Cond[1].getImm()) {
+ default:
+ // This includes tbz / tbnz branches which can't be converted to
+ // ccmp + br.cond.
+ return false;
+ case AArch64::CBZW:
+ case AArch64::CBZX:
+ assert(Cond.size() == 3 && "Unknown Cond array format");
+ CC = AArch64CC::EQ;
+ return true;
+ case AArch64::CBNZW:
+ case AArch64::CBNZX:
+ assert(Cond.size() == 3 && "Unknown Cond array format");
+ CC = AArch64CC::NE;
+ return true;
+ }
+}
+
+MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
+ MachineBasicBlock::iterator I = MBB->getFirstTerminator();
+ if (I == MBB->end())
+ return nullptr;
+ // The terminator must be controlled by the flags.
+ if (!I->readsRegister(AArch64::NZCV)) {
+ switch (I->getOpcode()) {
+ case AArch64::CBZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZW:
+ case AArch64::CBNZX:
+ // These can be converted into a ccmp against #0.
+ return I;
+ }
+ ++NumCmpTermRejs;
+ DEBUG(dbgs() << "Flags not used by terminator: " << *I);
+ return nullptr;
+ }
+
+ // Now find the instruction controlling the terminator.
+ for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
+ --I;
+ assert(!I->isTerminator() && "Spurious terminator");
+ switch (I->getOpcode()) {
+ // cmp is an alias for subs with a dead destination register.
+ case AArch64::SUBSWri:
+ case AArch64::SUBSXri:
+ // cmn is an alias for adds with a dead destination register.
+ case AArch64::ADDSWri:
+ case AArch64::ADDSXri:
+ // Check that the immediate operand is within range, ccmp wants a uimm5.
+ // Rd = SUBSri Rn, imm, shift
+ if (I->getOperand(3).getImm() || !isUInt<5>(I->getOperand(2).getImm())) {
+ DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I);
+ ++NumImmRangeRejs;
+ return nullptr;
+ }
+ // Fall through.
+ case AArch64::SUBSWrr:
+ case AArch64::SUBSXrr:
+ case AArch64::ADDSWrr:
+ case AArch64::ADDSXrr:
+ if (isDeadDef(I->getOperand(0).getReg()))
+ return I;
+ DEBUG(dbgs() << "Can't convert compare with live destination: " << *I);
+ ++NumLiveDstRejs;
+ return nullptr;
+ case AArch64::FCMPSrr:
+ case AArch64::FCMPDrr:
+ case AArch64::FCMPESrr:
+ case AArch64::FCMPEDrr:
+ return I;
+ }
+
+ // Check for flag reads and clobbers.
+ MIOperands::PhysRegInfo PRI =
+ MIOperands(I).analyzePhysReg(AArch64::NZCV, TRI);
+
+ if (PRI.Reads) {
+ // The ccmp doesn't produce exactly the same flags as the original
+ // compare, so reject the transform if there are uses of the flags
+ // besides the terminators.
+ DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I);
+ ++NumMultNZCVUses;
+ return nullptr;
+ }
+
+ if (PRI.Clobbers) {
+ DEBUG(dbgs() << "Not convertible compare: " << *I);
+ ++NumUnknNZCVDefs;
+ return nullptr;
+ }
+ }
+ DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n');
+ return nullptr;
+}
+
+/// Determine if all the instructions in MBB can safely
+/// be speculated. The terminators are not considered.
+///
+/// Only CmpMI is allowed to clobber the flags.
+///
+bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
+ const MachineInstr *CmpMI) {
+ // Reject any live-in physregs. It's probably NZCV/EFLAGS, and very hard to
+ // get right.
+ if (!MBB->livein_empty()) {
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n");
+ return false;
+ }
+
+ unsigned InstrCount = 0;
+
+ // Check all instructions, except the terminators. It is assumed that
+ // terminators never have side effects or define any used register values.
+ for (auto &I : make_range(MBB->begin(), MBB->getFirstTerminator())) {
+ if (I.isDebugValue())
+ continue;
+
+ if (++InstrCount > BlockInstrLimit && !Stress) {
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than "
+ << BlockInstrLimit << " instructions.\n");
+ return false;
+ }
+
+ // There shouldn't normally be any phis in a single-predecessor block.
+ if (I.isPHI()) {
+ DEBUG(dbgs() << "Can't hoist: " << I);
+ return false;
+ }
+
+ // Don't speculate loads. Note that it may be possible and desirable to
+ // speculate GOT or constant pool loads that are guaranteed not to trap,
+ // but we don't support that for now.
+ if (I.mayLoad()) {
+ DEBUG(dbgs() << "Won't speculate load: " << I);
+ return false;
+ }
+
+ // We never speculate stores, so an AA pointer isn't necessary.
+ bool DontMoveAcrossStore = true;
+ if (!I.isSafeToMove(TII, nullptr, DontMoveAcrossStore)) {
+ DEBUG(dbgs() << "Can't speculate: " << I);
+ return false;
+ }
+
+ // Only CmpMI is allowed to clobber the flags.
+ if (&I != CmpMI && I.modifiesRegister(AArch64::NZCV, TRI)) {
+ DEBUG(dbgs() << "Clobbers flags: " << I);
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Analyze the sub-cfg rooted in MBB, and return true if it is a potential
+/// candidate for cmp-conversion. Fill out the internal state.
+///
+bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
+ Head = MBB;
+ Tail = CmpBB = nullptr;
+
+ if (Head->succ_size() != 2)
+ return false;
+ MachineBasicBlock *Succ0 = Head->succ_begin()[0];
+ MachineBasicBlock *Succ1 = Head->succ_begin()[1];
+
+ // CmpBB can only have a single predecessor. Tail is allowed many.
+ if (Succ0->pred_size() != 1)
+ std::swap(Succ0, Succ1);
+
+ // Succ0 is our candidate for CmpBB.
+ if (Succ0->pred_size() != 1 || Succ0->succ_size() != 2)
+ return false;
+
+ CmpBB = Succ0;
+ Tail = Succ1;
+
+ if (!CmpBB->isSuccessor(Tail))
+ return false;
+
+ // The CFG topology checks out.
+ DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber() << " -> BB#"
+ << CmpBB->getNumber() << " -> BB#" << Tail->getNumber() << '\n');
+ ++NumConsidered;
+
+ // Tail is allowed to have many predecessors, but we can't handle PHIs yet.
+ //
+ // FIXME: Real PHIs could be if-converted as long as the CmpBB values are
+ // defined before The CmpBB cmp clobbers the flags. Alternatively, it should
+ // always be safe to sink the ccmp down to immediately before the CmpBB
+ // terminators.
+ if (!trivialTailPHIs()) {
+ DEBUG(dbgs() << "Can't handle phis in Tail.\n");
+ ++NumPhiRejs;
+ return false;
+ }
+
+ if (!Tail->livein_empty()) {
+ DEBUG(dbgs() << "Can't handle live-in physregs in Tail.\n");
+ ++NumPhysRejs;
+ return false;
+ }
+
+ // CmpBB should never have PHIs since Head is its only predecessor.
+ // FIXME: Clean them up if it happens.
+ if (!CmpBB->empty() && CmpBB->front().isPHI()) {
+ DEBUG(dbgs() << "Can't handle phis in CmpBB.\n");
+ ++NumPhi2Rejs;
+ return false;
+ }
+
+ if (!CmpBB->livein_empty()) {
+ DEBUG(dbgs() << "Can't handle live-in physregs in CmpBB.\n");
+ ++NumPhysRejs;
+ return false;
+ }
+
+ // The branch we're looking to eliminate must be analyzable.
+ HeadCond.clear();
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
+ if (TII->AnalyzeBranch(*Head, TBB, FBB, HeadCond)) {
+ DEBUG(dbgs() << "Head branch not analyzable.\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ // This is weird, probably some sort of degenerate CFG, or an edge to a
+ // landing pad.
+ if (!TBB || HeadCond.empty()) {
+ DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch in Head.\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ if (!parseCond(HeadCond, HeadCmpBBCC)) {
+ DEBUG(dbgs() << "Unsupported branch type on Head\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ // Make sure the branch direction is right.
+ if (TBB != CmpBB) {
+ assert(TBB == Tail && "Unexpected TBB");
+ HeadCmpBBCC = AArch64CC::getInvertedCondCode(HeadCmpBBCC);
+ }
+
+ CmpBBCond.clear();
+ TBB = FBB = nullptr;
+ if (TII->AnalyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) {
+ DEBUG(dbgs() << "CmpBB branch not analyzable.\n");
+ ++NumCmpBranchRejs;
+ return false;
+ }
+
+ if (!TBB || CmpBBCond.empty()) {
+ DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch in CmpBB.\n");
+ ++NumCmpBranchRejs;
+ return false;
+ }
+
+ if (!parseCond(CmpBBCond, CmpBBTailCC)) {
+ DEBUG(dbgs() << "Unsupported branch type on CmpBB\n");
+ ++NumCmpBranchRejs;
+ return false;
+ }
+
+ if (TBB != Tail)
+ CmpBBTailCC = AArch64CC::getInvertedCondCode(CmpBBTailCC);
+
+ DEBUG(dbgs() << "Head->CmpBB on " << AArch64CC::getCondCodeName(HeadCmpBBCC)
+ << ", CmpBB->Tail on " << AArch64CC::getCondCodeName(CmpBBTailCC)
+ << '\n');
+
+ CmpMI = findConvertibleCompare(CmpBB);
+ if (!CmpMI)
+ return false;
+
+ if (!canSpeculateInstrs(CmpBB, CmpMI)) {
+ ++NumSpeculateRejs;
+ return false;
+ }
+ return true;
+}
+
+void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
+ DEBUG(dbgs() << "Merging BB#" << CmpBB->getNumber() << " into BB#"
+ << Head->getNumber() << ":\n" << *CmpBB);
+
+ // All CmpBB instructions are moved into Head, and CmpBB is deleted.
+ // Update the CFG first.
+ updateTailPHIs();
+ Head->removeSuccessor(CmpBB);
+ CmpBB->removeSuccessor(Tail);
+ Head->transferSuccessorsAndUpdatePHIs(CmpBB);
+ DebugLoc TermDL = Head->getFirstTerminator()->getDebugLoc();
+ TII->RemoveBranch(*Head);
+
+ // If the Head terminator was one of the cbz / tbz branches with built-in
+ // compare, we need to insert an explicit compare instruction in its place.
+ if (HeadCond[0].getImm() == -1) {
+ ++NumCompBranches;
+ unsigned Opc = 0;
+ switch (HeadCond[1].getImm()) {
+ case AArch64::CBZW:
+ case AArch64::CBNZW:
+ Opc = AArch64::SUBSWri;
+ break;
+ case AArch64::CBZX:
+ case AArch64::CBNZX:
+ Opc = AArch64::SUBSXri;
+ break;
+ default:
+ llvm_unreachable("Cannot convert Head branch");
+ }
+ const MCInstrDesc &MCID = TII->get(Opc);
+ // Create a dummy virtual register for the SUBS def.
+ unsigned DestReg =
+ MRI->createVirtualRegister(TII->getRegClass(MCID, 0, TRI, *MF));
+ // Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz.
+ BuildMI(*Head, Head->end(), TermDL, MCID)
+ .addReg(DestReg, RegState::Define | RegState::Dead)
+ .addOperand(HeadCond[2])
+ .addImm(0)
+ .addImm(0);
+ // SUBS uses the GPR*sp register classes.
+ MRI->constrainRegClass(HeadCond[2].getReg(),
+ TII->getRegClass(MCID, 1, TRI, *MF));
+ }
+
+ Head->splice(Head->end(), CmpBB, CmpBB->begin(), CmpBB->end());
+
+ // Now replace CmpMI with a ccmp instruction that also considers the incoming
+ // flags.
+ unsigned Opc = 0;
+ unsigned FirstOp = 1; // First CmpMI operand to copy.
+ bool isZBranch = false; // CmpMI is a cbz/cbnz instruction.
+ switch (CmpMI->getOpcode()) {
+ default:
+ llvm_unreachable("Unknown compare opcode");
+ case AArch64::SUBSWri: Opc = AArch64::CCMPWi; break;
+ case AArch64::SUBSWrr: Opc = AArch64::CCMPWr; break;
+ case AArch64::SUBSXri: Opc = AArch64::CCMPXi; break;
+ case AArch64::SUBSXrr: Opc = AArch64::CCMPXr; break;
+ case AArch64::ADDSWri: Opc = AArch64::CCMNWi; break;
+ case AArch64::ADDSWrr: Opc = AArch64::CCMNWr; break;
+ case AArch64::ADDSXri: Opc = AArch64::CCMNXi; break;
+ case AArch64::ADDSXrr: Opc = AArch64::CCMNXr; break;
+ case AArch64::FCMPSrr: Opc = AArch64::FCCMPSrr; FirstOp = 0; break;
+ case AArch64::FCMPDrr: Opc = AArch64::FCCMPDrr; FirstOp = 0; break;
+ case AArch64::FCMPESrr: Opc = AArch64::FCCMPESrr; FirstOp = 0; break;
+ case AArch64::FCMPEDrr: Opc = AArch64::FCCMPEDrr; FirstOp = 0; break;
+ case AArch64::CBZW:
+ case AArch64::CBNZW:
+ Opc = AArch64::CCMPWi;
+ FirstOp = 0;
+ isZBranch = true;
+ break;
+ case AArch64::CBZX:
+ case AArch64::CBNZX:
+ Opc = AArch64::CCMPXi;
+ FirstOp = 0;
+ isZBranch = true;
+ break;
+ }
+
+ // The ccmp instruction should set the flags according to the comparison when
+ // Head would have branched to CmpBB.
+ // The NZCV immediate operand should provide flags for the case where Head
+ // would have branched to Tail. These flags should cause the new Head
+ // terminator to branch to tail.
+ unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CmpBBTailCC);
+ const MCInstrDesc &MCID = TII->get(Opc);
+ MRI->constrainRegClass(CmpMI->getOperand(FirstOp).getReg(),
+ TII->getRegClass(MCID, 0, TRI, *MF));
+ if (CmpMI->getOperand(FirstOp + 1).isReg())
+ MRI->constrainRegClass(CmpMI->getOperand(FirstOp + 1).getReg(),
+ TII->getRegClass(MCID, 1, TRI, *MF));
+ MachineInstrBuilder MIB =
+ BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID)
+ .addOperand(CmpMI->getOperand(FirstOp)); // Register Rn
+ if (isZBranch)
+ MIB.addImm(0); // cbz/cbnz Rn -> ccmp Rn, #0
+ else
+ MIB.addOperand(CmpMI->getOperand(FirstOp + 1)); // Register Rm / Immediate
+ MIB.addImm(NZCV).addImm(HeadCmpBBCC);
+
+ // If CmpMI was a terminator, we need a new conditional branch to replace it.
+ // This now becomes a Head terminator.
+ if (isZBranch) {
+ bool isNZ = CmpMI->getOpcode() == AArch64::CBNZW ||
+ CmpMI->getOpcode() == AArch64::CBNZX;
+ BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), TII->get(AArch64::Bcc))
+ .addImm(isNZ ? AArch64CC::NE : AArch64CC::EQ)
+ .addOperand(CmpMI->getOperand(1)); // Branch target.
+ }
+ CmpMI->eraseFromParent();
+ Head->updateTerminator();
+
+ RemovedBlocks.push_back(CmpBB);
+ CmpBB->eraseFromParent();
+ DEBUG(dbgs() << "Result:\n" << *Head);
+ ++NumConverted;
+}
+
+int SSACCmpConv::expectedCodeSizeDelta() const {
+ int delta = 0;
+ // If the Head terminator was one of the cbz / tbz branches with built-in
+ // compare, we need to insert an explicit compare instruction in its place
+ // plus a branch instruction.
+ if (HeadCond[0].getImm() == -1) {
+ switch (HeadCond[1].getImm()) {
+ case AArch64::CBZW:
+ case AArch64::CBNZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZX:
+ // Therefore delta += 1
+ delta = 1;
+ break;
+ default:
+ llvm_unreachable("Cannot convert Head branch");
+ }
+ }
+ // If the Cmp terminator was one of the cbz / tbz branches with
+ // built-in compare, it will be turned into a compare instruction
+ // into Head, but we do not save any instruction.
+ // Otherwise, we save the branch instruction.
+ switch (CmpMI->getOpcode()) {
+ default:
+ --delta;
+ break;
+ case AArch64::CBZW:
+ case AArch64::CBNZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZX:
+ break;
+ }
+ return delta;
+}
+
+//===----------------------------------------------------------------------===//
+// AArch64ConditionalCompares Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AArch64ConditionalCompares : public MachineFunctionPass {
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const MCSchedModel *SchedModel;
+ // Does the proceeded function has Oz attribute.
+ bool MinSize;
+ MachineRegisterInfo *MRI;
+ MachineDominatorTree *DomTree;
+ MachineLoopInfo *Loops;
+ MachineTraceMetrics *Traces;
+ MachineTraceMetrics::Ensemble *MinInstr;
+ SSACCmpConv CmpConv;
+
+public:
+ static char ID;
+ AArch64ConditionalCompares() : MachineFunctionPass(ID) {}
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ const char *getPassName() const override {
+ return "AArch64 Conditional Compares";
+ }
+
+private:
+ bool tryConvert(MachineBasicBlock *);
+ void updateDomTree(ArrayRef<MachineBasicBlock *> Removed);
+ void updateLoops(ArrayRef<MachineBasicBlock *> Removed);
+ void invalidateTraces();
+ bool shouldConvert();
+};
+} // end anonymous namespace
+
+char AArch64ConditionalCompares::ID = 0;
+
+namespace llvm {
+void initializeAArch64ConditionalComparesPass(PassRegistry &);
+}
+
+INITIALIZE_PASS_BEGIN(AArch64ConditionalCompares, "aarch64-ccmp",
+ "AArch64 CCMP Pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
+INITIALIZE_PASS_END(AArch64ConditionalCompares, "aarch64-ccmp",
+ "AArch64 CCMP Pass", false, false)
+
+FunctionPass *llvm::createAArch64ConditionalCompares() {
+ return new AArch64ConditionalCompares();
+}
+
+void AArch64ConditionalCompares::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<MachineTraceMetrics>();
+ AU.addPreserved<MachineTraceMetrics>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+/// Update the dominator tree after if-conversion erased some blocks.
+void AArch64ConditionalCompares::updateDomTree(
+ ArrayRef<MachineBasicBlock *> Removed) {
+ // convert() removes CmpBB which was previously dominated by Head.
+ // CmpBB children should be transferred to Head.
+ MachineDomTreeNode *HeadNode = DomTree->getNode(CmpConv.Head);
+ for (unsigned i = 0, e = Removed.size(); i != e; ++i) {
+ MachineDomTreeNode *Node = DomTree->getNode(Removed[i]);
+ assert(Node != HeadNode && "Cannot erase the head node");
+ assert(Node->getIDom() == HeadNode && "CmpBB should be dominated by Head");
+ while (Node->getNumChildren())
+ DomTree->changeImmediateDominator(Node->getChildren().back(), HeadNode);
+ DomTree->eraseNode(Removed[i]);
+ }
+}
+
+/// Update LoopInfo after if-conversion.
+void
+AArch64ConditionalCompares::updateLoops(ArrayRef<MachineBasicBlock *> Removed) {
+ if (!Loops)
+ return;
+ for (unsigned i = 0, e = Removed.size(); i != e; ++i)
+ Loops->removeBlock(Removed[i]);
+}
+
+/// Invalidate MachineTraceMetrics before if-conversion.
+void AArch64ConditionalCompares::invalidateTraces() {
+ Traces->invalidate(CmpConv.Head);
+ Traces->invalidate(CmpConv.CmpBB);
+}
+
+/// Apply cost model and heuristics to the if-conversion in IfConv.
+/// Return true if the conversion is a good idea.
+///
+bool AArch64ConditionalCompares::shouldConvert() {
+ // Stress testing mode disables all cost considerations.
+ if (Stress)
+ return true;
+ if (!MinInstr)
+ MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
+
+ // Head dominates CmpBB, so it is always included in its trace.
+ MachineTraceMetrics::Trace Trace = MinInstr->getTrace(CmpConv.CmpBB);
+
+ // If code size is the main concern
+ if (MinSize) {
+ int CodeSizeDelta = CmpConv.expectedCodeSizeDelta();
+ DEBUG(dbgs() << "Code size delta: " << CodeSizeDelta << '\n');
+ // If we are minimizing the code size, do the conversion whatever
+ // the cost is.
+ if (CodeSizeDelta < 0)
+ return true;
+ if (CodeSizeDelta > 0) {
+ DEBUG(dbgs() << "Code size is increasing, give up on this one.\n");
+ return false;
+ }
+ // CodeSizeDelta == 0, continue with the regular heuristics
+ }
+
+ // Heuristic: The compare conversion delays the execution of the branch
+ // instruction because we must wait for the inputs to the second compare as
+ // well. The branch has no dependent instructions, but delaying it increases
+ // the cost of a misprediction.
+ //
+ // Set a limit on the delay we will accept.
+ unsigned DelayLimit = SchedModel->MispredictPenalty * 3 / 4;
+
+ // Instruction depths can be computed for all trace instructions above CmpBB.
+ unsigned HeadDepth =
+ Trace.getInstrCycles(CmpConv.Head->getFirstTerminator()).Depth;
+ unsigned CmpBBDepth =
+ Trace.getInstrCycles(CmpConv.CmpBB->getFirstTerminator()).Depth;
+ DEBUG(dbgs() << "Head depth: " << HeadDepth
+ << "\nCmpBB depth: " << CmpBBDepth << '\n');
+ if (CmpBBDepth > HeadDepth + DelayLimit) {
+ DEBUG(dbgs() << "Branch delay would be larger than " << DelayLimit
+ << " cycles.\n");
+ return false;
+ }
+
+ // Check the resource depth at the bottom of CmpBB - these instructions will
+ // be speculated.
+ unsigned ResDepth = Trace.getResourceDepth(true);
+ DEBUG(dbgs() << "Resources: " << ResDepth << '\n');
+
+ // Heuristic: The speculatively executed instructions must all be able to
+ // merge into the Head block. The Head critical path should dominate the
+ // resource cost of the speculated instructions.
+ if (ResDepth > HeadDepth) {
+ DEBUG(dbgs() << "Too many instructions to speculate.\n");
+ return false;
+ }
+ return true;
+}
+
+bool AArch64ConditionalCompares::tryConvert(MachineBasicBlock *MBB) {
+ bool Changed = false;
+ while (CmpConv.canConvert(MBB) && shouldConvert()) {
+ invalidateTraces();
+ SmallVector<MachineBasicBlock *, 4> RemovedBlocks;
+ CmpConv.convert(RemovedBlocks);
+ Changed = true;
+ updateDomTree(RemovedBlocks);
+ updateLoops(RemovedBlocks);
+ }
+ return Changed;
+}
+
+bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
+ << "********** Function: " << MF.getName() << '\n');
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ SchedModel =
+ MF.getTarget().getSubtarget<TargetSubtargetInfo>().getSchedModel();
+ MRI = &MF.getRegInfo();
+ DomTree = &getAnalysis<MachineDominatorTree>();
+ Loops = getAnalysisIfAvailable<MachineLoopInfo>();
+ Traces = &getAnalysis<MachineTraceMetrics>();
+ MinInstr = nullptr;
+ MinSize = MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize);
+
+ bool Changed = false;
+ CmpConv.runOnMachineFunction(MF);
+
+ // Visit blocks in dominator tree pre-order. The pre-order enables multiple
+ // cmp-conversions from the same head block.
+ // Note that updateDomTree() modifies the children of the DomTree node
+ // currently being visited. The df_iterator supports that; it doesn't look at
+ // child_begin() / child_end() until after a node has been visited.
+ for (auto *I : depth_first(DomTree))
+ if (tryConvert(I->getBlock()))
+ Changed = true;
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/contrib/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
new file mode 100644
index 0000000..a2d853c
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -0,0 +1,134 @@
+//==-- AArch64DeadRegisterDefinitions.cpp - Replace dead defs w/ zero reg --==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// When allowed by the instruction, replace a dead definition of a GPR with
+// the zero register. This makes the code a bit friendlier towards the
+// hardware's register renamer.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64RegisterInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-dead-defs"
+
+STATISTIC(NumDeadDefsReplaced, "Number of dead definitions replaced");
+
+namespace {
+class AArch64DeadRegisterDefinitions : public MachineFunctionPass {
+private:
+ const TargetRegisterInfo *TRI;
+ bool implicitlyDefinesOverlappingReg(unsigned Reg, const MachineInstr &MI);
+ bool processMachineBasicBlock(MachineBasicBlock &MBB);
+ bool usesFrameIndex(const MachineInstr &MI);
+public:
+ static char ID; // Pass identification, replacement for typeid.
+ explicit AArch64DeadRegisterDefinitions() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &F) override;
+
+ const char *getPassName() const override { return "Dead register definitions"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+char AArch64DeadRegisterDefinitions::ID = 0;
+} // end anonymous namespace
+
+bool AArch64DeadRegisterDefinitions::implicitlyDefinesOverlappingReg(
+ unsigned Reg, const MachineInstr &MI) {
+ for (const MachineOperand &MO : MI.implicit_operands())
+ if (MO.isReg() && MO.isDef())
+ if (TRI->regsOverlap(Reg, MO.getReg()))
+ return true;
+ return false;
+}
+
+bool AArch64DeadRegisterDefinitions::usesFrameIndex(const MachineInstr &MI) {
+ for (const MachineOperand &Op : MI.uses())
+ if (Op.isFI())
+ return true;
+ return false;
+}
+
+bool AArch64DeadRegisterDefinitions::processMachineBasicBlock(
+ MachineBasicBlock &MBB) {
+ bool Changed = false;
+ for (MachineInstr &MI : MBB) {
+ if (usesFrameIndex(MI)) {
+ // We need to skip this instruction because while it appears to have a
+ // dead def it uses a frame index which might expand into a multi
+ // instruction sequence during EPI.
+ DEBUG(dbgs() << " Ignoring, operand is frame index\n");
+ continue;
+ }
+ for (int i = 0, e = MI.getDesc().getNumDefs(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (MO.isReg() && MO.isDead() && MO.isDef()) {
+ assert(!MO.isImplicit() && "Unexpected implicit def!");
+ DEBUG(dbgs() << " Dead def operand #" << i << " in:\n ";
+ MI.print(dbgs()));
+ // Be careful not to change the register if it's a tied operand.
+ if (MI.isRegTiedToUseOperand(i)) {
+ DEBUG(dbgs() << " Ignoring, def is tied operand.\n");
+ continue;
+ }
+ // Don't change the register if there's an implicit def of a subreg or
+ // supperreg.
+ if (implicitlyDefinesOverlappingReg(MO.getReg(), MI)) {
+ DEBUG(dbgs() << " Ignoring, implicitly defines overlap reg.\n");
+ continue;
+ }
+ // Make sure the instruction take a register class that contains
+ // the zero register and replace it if so.
+ unsigned NewReg;
+ switch (MI.getDesc().OpInfo[i].RegClass) {
+ default:
+ DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
+ continue;
+ case AArch64::GPR32RegClassID:
+ NewReg = AArch64::WZR;
+ break;
+ case AArch64::GPR64RegClassID:
+ NewReg = AArch64::XZR;
+ break;
+ }
+ DEBUG(dbgs() << " Replacing with zero register. New:\n ");
+ MO.setReg(NewReg);
+ DEBUG(MI.print(dbgs()));
+ ++NumDeadDefsReplaced;
+ }
+ }
+ }
+ return Changed;
+}
+
+// Scan the function for instructions that have a dead definition of a
+// register. Replace that register with the zero register when possible.
+bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
+ TRI = MF.getTarget().getRegisterInfo();
+ bool Changed = false;
+ DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n");
+
+ for (auto &MBB : MF)
+ if (processMachineBasicBlock(MBB))
+ Changed = true;
+ return Changed;
+}
+
+FunctionPass *llvm::createAArch64DeadRegisterDefinitions() {
+ return new AArch64DeadRegisterDefinitions();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
new file mode 100644
index 0000000..8839085
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -0,0 +1,736 @@
+//==-- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that expands pseudo instructions into target
+// instructions to allow proper scheduling and other late optimizations. This
+// pass should be run after register allocation but before the post-regalloc
+// scheduling pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "AArch64InstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/MathExtras.h"
+using namespace llvm;
+
+namespace {
+class AArch64ExpandPseudo : public MachineFunctionPass {
+public:
+ static char ID;
+ AArch64ExpandPseudo() : MachineFunctionPass(ID) {}
+
+ const AArch64InstrInfo *TII;
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ const char *getPassName() const override {
+ return "AArch64 pseudo instruction expansion pass";
+ }
+
+private:
+ bool expandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ unsigned BitSize);
+};
+char AArch64ExpandPseudo::ID = 0;
+}
+
+/// \brief Transfer implicit operands on the pseudo instruction to the
+/// instructions created from the expansion.
+static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
+ MachineInstrBuilder &DefMI) {
+ const MCInstrDesc &Desc = OldMI.getDesc();
+ for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
+ ++i) {
+ const MachineOperand &MO = OldMI.getOperand(i);
+ assert(MO.isReg() && MO.getReg());
+ if (MO.isUse())
+ UseMI.addOperand(MO);
+ else
+ DefMI.addOperand(MO);
+ }
+}
+
+/// \brief Helper function which extracts the specified 16-bit chunk from a
+/// 64-bit value.
+static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
+ assert(ChunkIdx < 4 && "Out of range chunk index specified!");
+
+ return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
+}
+
+/// \brief Helper function which replicates a 16-bit chunk within a 64-bit
+/// value. Indices correspond to element numbers in a v4i16.
+static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) {
+ assert((FromIdx < 4) && (ToIdx < 4) && "Out of range chunk index specified!");
+ const unsigned ShiftAmt = ToIdx * 16;
+
+ // Replicate the source chunk to the destination position.
+ const uint64_t Chunk = getChunk(Imm, FromIdx) << ShiftAmt;
+ // Clear the destination chunk.
+ Imm &= ~(0xFFFFLL << ShiftAmt);
+ // Insert the replicated chunk.
+ return Imm | Chunk;
+}
+
+/// \brief Helper function which tries to materialize a 64-bit value with an
+/// ORR + MOVK instruction sequence.
+static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const AArch64InstrInfo *TII, unsigned ChunkIdx) {
+ assert(ChunkIdx < 4 && "Out of range chunk index specified!");
+ const unsigned ShiftAmt = ChunkIdx * 16;
+
+ uint64_t Encoding;
+ if (AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding)) {
+ // Create the ORR-immediate instruction.
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
+ .addOperand(MI.getOperand(0))
+ .addReg(AArch64::XZR)
+ .addImm(Encoding);
+
+ // Create the MOVK instruction.
+ const unsigned Imm16 = getChunk(UImm, ChunkIdx);
+ const unsigned DstReg = MI.getOperand(0).getReg();
+ const bool DstIsDead = MI.getOperand(0).isDead();
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg)
+ .addImm(Imm16)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
+
+ transferImpOps(MI, MIB, MIB1);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Check whether the given 16-bit chunk replicated to full 64-bit width
+/// can be materialized with an ORR instruction.
+static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
+ Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
+
+ return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
+}
+
+/// \brief Check for identical 16-bit chunks within the constant and if so
+/// materialize them with a single ORR instruction. The remaining one or two
+/// 16-bit chunks will be materialized with MOVK instructions.
+///
+/// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
+/// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
+/// an ORR instruction.
+///
+static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const AArch64InstrInfo *TII) {
+ typedef DenseMap<uint64_t, unsigned> CountMap;
+ CountMap Counts;
+
+ // Scan the constant and count how often every chunk occurs.
+ for (unsigned Idx = 0; Idx < 4; ++Idx)
+ ++Counts[getChunk(UImm, Idx)];
+
+ // Traverse the chunks to find one which occurs more than once.
+ for (CountMap::const_iterator Chunk = Counts.begin(), End = Counts.end();
+ Chunk != End; ++Chunk) {
+ const uint64_t ChunkVal = Chunk->first;
+ const unsigned Count = Chunk->second;
+
+ uint64_t Encoding = 0;
+
+ // We are looking for chunks which have two or three instances and can be
+ // materialized with an ORR instruction.
+ if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
+ continue;
+
+ const bool CountThree = Count == 3;
+ // Create the ORR-immediate instruction.
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
+ .addOperand(MI.getOperand(0))
+ .addReg(AArch64::XZR)
+ .addImm(Encoding);
+
+ const unsigned DstReg = MI.getOperand(0).getReg();
+ const bool DstIsDead = MI.getOperand(0).isDead();
+
+ unsigned ShiftAmt = 0;
+ uint64_t Imm16 = 0;
+ // Find the first chunk not materialized with the ORR instruction.
+ for (; ShiftAmt < 64; ShiftAmt += 16) {
+ Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
+
+ if (Imm16 != ChunkVal)
+ break;
+ }
+
+ // Create the first MOVK instruction.
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
+ .addReg(DstReg,
+ RegState::Define | getDeadRegState(DstIsDead && CountThree))
+ .addReg(DstReg)
+ .addImm(Imm16)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
+
+ // In case we have three instances the whole constant is now materialized
+ // and we can exit.
+ if (CountThree) {
+ transferImpOps(MI, MIB, MIB1);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Find the remaining chunk which needs to be materialized.
+ for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
+ Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
+
+ if (Imm16 != ChunkVal)
+ break;
+ }
+
+ // Create the second MOVK instruction.
+ MachineInstrBuilder MIB2 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg)
+ .addImm(Imm16)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
+
+ transferImpOps(MI, MIB, MIB2);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Check whether this chunk matches the pattern '1...0...'. This pattern
+/// starts a contiguous sequence of ones if we look at the bits from the LSB
+/// towards the MSB.
+static bool isStartChunk(uint64_t Chunk) {
+ if (Chunk == 0 || Chunk == UINT64_MAX)
+ return false;
+
+ return (CountLeadingOnes_64(Chunk) + countTrailingZeros(Chunk)) == 64;
+}
+
+/// \brief Check whether this chunk matches the pattern '0...1...' This pattern
+/// ends a contiguous sequence of ones if we look at the bits from the LSB
+/// towards the MSB.
+static bool isEndChunk(uint64_t Chunk) {
+ if (Chunk == 0 || Chunk == UINT64_MAX)
+ return false;
+
+ return (countLeadingZeros(Chunk) + CountTrailingOnes_64(Chunk)) == 64;
+}
+
+/// \brief Clear or set all bits in the chunk at the given index.
+static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
+ const uint64_t Mask = 0xFFFF;
+
+ if (Clear)
+ // Clear chunk in the immediate.
+ Imm &= ~(Mask << (Idx * 16));
+ else
+ // Set all bits in the immediate for the particular chunk.
+ Imm |= Mask << (Idx * 16);
+
+ return Imm;
+}
+
+/// \brief Check whether the constant contains a sequence of contiguous ones,
+/// which might be interrupted by one or two chunks. If so, materialize the
+/// sequence of contiguous ones with an ORR instruction.
+/// Materialize the chunks which are either interrupting the sequence or outside
+/// of the sequence with a MOVK instruction.
+///
+/// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
+/// which ends the sequence (0...1...). Then we are looking for constants which
+/// contain at least one S and E chunk.
+/// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
+///
+/// We are also looking for constants like |S|A|B|E| where the contiguous
+/// sequence of ones wraps around the MSB into the LSB.
+///
+static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const AArch64InstrInfo *TII) {
+ const int NotSet = -1;
+ const uint64_t Mask = 0xFFFF;
+
+ int StartIdx = NotSet;
+ int EndIdx = NotSet;
+ // Try to find the chunks which start/end a contiguous sequence of ones.
+ for (int Idx = 0; Idx < 4; ++Idx) {
+ int64_t Chunk = getChunk(UImm, Idx);
+ // Sign extend the 16-bit chunk to 64-bit.
+ Chunk = (Chunk << 48) >> 48;
+
+ if (isStartChunk(Chunk))
+ StartIdx = Idx;
+ else if (isEndChunk(Chunk))
+ EndIdx = Idx;
+ }
+
+ // Early exit in case we can't find a start/end chunk.
+ if (StartIdx == NotSet || EndIdx == NotSet)
+ return false;
+
+ // Outside of the contiguous sequence of ones everything needs to be zero.
+ uint64_t Outside = 0;
+ // Chunks between the start and end chunk need to have all their bits set.
+ uint64_t Inside = Mask;
+
+ // If our contiguous sequence of ones wraps around from the MSB into the LSB,
+ // just swap indices and pretend we are materializing a contiguous sequence
+ // of zeros surrounded by a contiguous sequence of ones.
+ if (StartIdx > EndIdx) {
+ std::swap(StartIdx, EndIdx);
+ std::swap(Outside, Inside);
+ }
+
+ uint64_t OrrImm = UImm;
+ int FirstMovkIdx = NotSet;
+ int SecondMovkIdx = NotSet;
+
+ // Find out which chunks we need to patch up to obtain a contiguous sequence
+ // of ones.
+ for (int Idx = 0; Idx < 4; ++Idx) {
+ const uint64_t Chunk = getChunk(UImm, Idx);
+
+ // Check whether we are looking at a chunk which is not part of the
+ // contiguous sequence of ones.
+ if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
+ OrrImm = updateImm(OrrImm, Idx, Outside == 0);
+
+ // Remember the index we need to patch.
+ if (FirstMovkIdx == NotSet)
+ FirstMovkIdx = Idx;
+ else
+ SecondMovkIdx = Idx;
+
+ // Check whether we are looking a chunk which is part of the contiguous
+ // sequence of ones.
+ } else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
+ OrrImm = updateImm(OrrImm, Idx, Inside != Mask);
+
+ // Remember the index we need to patch.
+ if (FirstMovkIdx == NotSet)
+ FirstMovkIdx = Idx;
+ else
+ SecondMovkIdx = Idx;
+ }
+ }
+ assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!");
+
+ // Create the ORR-immediate instruction.
+ uint64_t Encoding = 0;
+ AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
+ .addOperand(MI.getOperand(0))
+ .addReg(AArch64::XZR)
+ .addImm(Encoding);
+
+ const unsigned DstReg = MI.getOperand(0).getReg();
+ const bool DstIsDead = MI.getOperand(0).isDead();
+
+ const bool SingleMovk = SecondMovkIdx == NotSet;
+ // Create the first MOVK instruction.
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
+ .addReg(DstReg,
+ RegState::Define | getDeadRegState(DstIsDead && SingleMovk))
+ .addReg(DstReg)
+ .addImm(getChunk(UImm, FirstMovkIdx))
+ .addImm(
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, FirstMovkIdx * 16));
+
+ // Early exit in case we only need to emit a single MOVK instruction.
+ if (SingleMovk) {
+ transferImpOps(MI, MIB, MIB1);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Create the second MOVK instruction.
+ MachineInstrBuilder MIB2 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg)
+ .addImm(getChunk(UImm, SecondMovkIdx))
+ .addImm(
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, SecondMovkIdx * 16));
+
+ transferImpOps(MI, MIB, MIB2);
+ MI.eraseFromParent();
+ return true;
+}
+
+/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
+/// real move-immediate instructions to synthesize the immediate.
+bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned BitSize) {
+ MachineInstr &MI = *MBBI;
+ uint64_t Imm = MI.getOperand(1).getImm();
+ const unsigned Mask = 0xFFFF;
+
+ // Try a MOVI instruction (aka ORR-immediate with the zero register).
+ uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
+ uint64_t Encoding;
+ if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
+ unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
+ .addOperand(MI.getOperand(0))
+ .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
+ .addImm(Encoding);
+ transferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Scan the immediate and count the number of 16-bit chunks which are either
+ // all ones or all zeros.
+ unsigned OneChunks = 0;
+ unsigned ZeroChunks = 0;
+ for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
+ const unsigned Chunk = (Imm >> Shift) & Mask;
+ if (Chunk == Mask)
+ OneChunks++;
+ else if (Chunk == 0)
+ ZeroChunks++;
+ }
+
+ // Since we can't materialize the constant with a single ORR instruction,
+ // let's see whether we can materialize 3/4 of the constant with an ORR
+ // instruction and use an additional MOVK instruction to materialize the
+ // remaining 1/4.
+ //
+ // We are looking for constants with a pattern like: |A|X|B|X| or |X|A|X|B|.
+ //
+ // E.g. assuming |A|X|A|X| is a pattern which can be materialized with ORR,
+ // we would create the following instruction sequence:
+ //
+ // ORR x0, xzr, |A|X|A|X|
+ // MOVK x0, |B|, LSL #16
+ //
+ // Only look at 64-bit constants which can't be materialized with a single
+ // instruction e.g. which have less than either three all zero or all one
+ // chunks.
+ //
+ // Ignore 32-bit constants here, they always can be materialized with a
+ // MOVZ/MOVN + MOVK pair. Since the 32-bit constant can't be materialized
+ // with a single ORR, the best sequence we can achieve is a ORR + MOVK pair.
+ // Thus we fall back to the default code below which in the best case creates
+ // a single MOVZ/MOVN instruction (in case one chunk is all zero or all one).
+ //
+ if (BitSize == 64 && OneChunks < 3 && ZeroChunks < 3) {
+ // If we interpret the 64-bit constant as a v4i16, are elements 0 and 2
+ // identical?
+ if (getChunk(UImm, 0) == getChunk(UImm, 2)) {
+ // See if we can come up with a constant which can be materialized with
+ // ORR-immediate by replicating element 3 into element 1.
+ uint64_t OrrImm = replicateChunk(UImm, 3, 1);
+ if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 1))
+ return true;
+
+ // See if we can come up with a constant which can be materialized with
+ // ORR-immediate by replicating element 1 into element 3.
+ OrrImm = replicateChunk(UImm, 1, 3);
+ if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 3))
+ return true;
+
+ // If we interpret the 64-bit constant as a v4i16, are elements 1 and 3
+ // identical?
+ } else if (getChunk(UImm, 1) == getChunk(UImm, 3)) {
+ // See if we can come up with a constant which can be materialized with
+ // ORR-immediate by replicating element 2 into element 0.
+ uint64_t OrrImm = replicateChunk(UImm, 2, 0);
+ if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 0))
+ return true;
+
+ // See if we can come up with a constant which can be materialized with
+ // ORR-immediate by replicating element 1 into element 3.
+ OrrImm = replicateChunk(UImm, 0, 2);
+ if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 2))
+ return true;
+ }
+ }
+
+ // Check for identical 16-bit chunks within the constant and if so materialize
+ // them with a single ORR instruction. The remaining one or two 16-bit chunks
+ // will be materialized with MOVK instructions.
+ if (BitSize == 64 && tryToreplicateChunks(UImm, MI, MBB, MBBI, TII))
+ return true;
+
+ // Check whether the constant contains a sequence of contiguous ones, which
+ // might be interrupted by one or two chunks. If so, materialize the sequence
+ // of contiguous ones with an ORR instruction. Materialize the chunks which
+ // are either interrupting the sequence or outside of the sequence with a
+ // MOVK instruction.
+ if (BitSize == 64 && trySequenceOfOnes(UImm, MI, MBB, MBBI, TII))
+ return true;
+
+ // Use a MOVZ or MOVN instruction to set the high bits, followed by one or
+ // more MOVK instructions to insert additional 16-bit portions into the
+ // lower bits.
+ bool isNeg = false;
+
+ // Use MOVN to materialize the high bits if we have more all one chunks
+ // than all zero chunks.
+ if (OneChunks > ZeroChunks) {
+ isNeg = true;
+ Imm = ~Imm;
+ }
+
+ unsigned FirstOpc;
+ if (BitSize == 32) {
+ Imm &= (1LL << 32) - 1;
+ FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
+ } else {
+ FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
+ }
+ unsigned Shift = 0; // LSL amount for high bits with MOVZ/MOVN
+ unsigned LastShift = 0; // LSL amount for last MOVK
+ if (Imm != 0) {
+ unsigned LZ = countLeadingZeros(Imm);
+ unsigned TZ = countTrailingZeros(Imm);
+ Shift = ((63 - LZ) / 16) * 16;
+ LastShift = (TZ / 16) * 16;
+ }
+ unsigned Imm16 = (Imm >> Shift) & Mask;
+ unsigned DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(FirstOpc))
+ .addReg(DstReg, RegState::Define |
+ getDeadRegState(DstIsDead && Shift == LastShift))
+ .addImm(Imm16)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
+
+ // If a MOVN was used for the high bits of a negative value, flip the rest
+ // of the bits back for use with MOVK.
+ if (isNeg)
+ Imm = ~Imm;
+
+ if (Shift == LastShift) {
+ transferImpOps(MI, MIB1, MIB1);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ MachineInstrBuilder MIB2;
+ unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
+ while (Shift != LastShift) {
+ Shift -= 16;
+ Imm16 = (Imm >> Shift) & Mask;
+ if (Imm16 == (isNeg ? Mask : 0))
+ continue; // This 16-bit portion is already set correctly.
+ MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
+ .addReg(DstReg,
+ RegState::Define |
+ getDeadRegState(DstIsDead && Shift == LastShift))
+ .addReg(DstReg)
+ .addImm(Imm16)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
+ }
+
+ transferImpOps(MI, MIB1, MIB2);
+ MI.eraseFromParent();
+ return true;
+}
+
+/// \brief If MBBI references a pseudo instruction that should be expanded here,
+/// do the expansion and return true. Otherwise return false.
+bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
+ default:
+ break;
+
+ case AArch64::ADDWrr:
+ case AArch64::SUBWrr:
+ case AArch64::ADDXrr:
+ case AArch64::SUBXrr:
+ case AArch64::ADDSWrr:
+ case AArch64::SUBSWrr:
+ case AArch64::ADDSXrr:
+ case AArch64::SUBSXrr:
+ case AArch64::ANDWrr:
+ case AArch64::ANDXrr:
+ case AArch64::BICWrr:
+ case AArch64::BICXrr:
+ case AArch64::ANDSWrr:
+ case AArch64::ANDSXrr:
+ case AArch64::BICSWrr:
+ case AArch64::BICSXrr:
+ case AArch64::EONWrr:
+ case AArch64::EONXrr:
+ case AArch64::EORWrr:
+ case AArch64::EORXrr:
+ case AArch64::ORNWrr:
+ case AArch64::ORNXrr:
+ case AArch64::ORRWrr:
+ case AArch64::ORRXrr: {
+ unsigned Opcode;
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
+ case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
+ case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
+ case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
+ case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
+ case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
+ case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
+ case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
+ case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
+ case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
+ case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
+ case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
+ case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
+ case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
+ case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
+ case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
+ case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
+ case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
+ case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
+ case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
+ case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
+ case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
+ case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
+ case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
+ }
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
+ MI.getOperand(0).getReg())
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(2))
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ transferImpOps(MI, MIB1, MIB1);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ case AArch64::LOADgot: {
+ // Expand into ADRP + LDR.
+ unsigned DstReg = MI.getOperand(0).getReg();
+ const MachineOperand &MO1 = MI.getOperand(1);
+ unsigned Flags = MO1.getTargetFlags();
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
+ MachineInstrBuilder MIB2 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
+ .addOperand(MI.getOperand(0))
+ .addReg(DstReg);
+
+ if (MO1.isGlobal()) {
+ MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
+ MIB2.addGlobalAddress(MO1.getGlobal(), 0,
+ Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ } else if (MO1.isSymbol()) {
+ MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
+ MIB2.addExternalSymbol(MO1.getSymbolName(),
+ Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ } else {
+ assert(MO1.isCPI() &&
+ "Only expect globals, externalsymbols, or constant pools");
+ MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
+ Flags | AArch64II::MO_PAGE);
+ MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
+ Flags | AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ }
+
+ transferImpOps(MI, MIB1, MIB2);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ case AArch64::MOVaddr:
+ case AArch64::MOVaddrJT:
+ case AArch64::MOVaddrCP:
+ case AArch64::MOVaddrBA:
+ case AArch64::MOVaddrTLS:
+ case AArch64::MOVaddrEXT: {
+ // Expand into ADRP + ADD.
+ unsigned DstReg = MI.getOperand(0).getReg();
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
+ .addOperand(MI.getOperand(1));
+
+ MachineInstrBuilder MIB2 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
+ .addOperand(MI.getOperand(0))
+ .addReg(DstReg)
+ .addOperand(MI.getOperand(2))
+ .addImm(0);
+
+ transferImpOps(MI, MIB1, MIB2);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ case AArch64::MOVi32imm:
+ return expandMOVImm(MBB, MBBI, 32);
+ case AArch64::MOVi64imm:
+ return expandMOVImm(MBB, MBBI, 64);
+ case AArch64::RET_ReallyLR:
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
+ .addReg(AArch64::LR);
+ MI.eraseFromParent();
+ return true;
+ }
+ return false;
+}
+
+/// \brief Iterate over the instructions in basic block MBB and expand any
+/// pseudo instructions. Return true if anything was modified.
+bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= expandMI(MBB, MBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
+
+ bool Modified = false;
+ for (auto &MBB : MF)
+ Modified |= expandMBB(MBB);
+ return Modified;
+}
+
+/// \brief Returns an instance of the pseudo instruction expansion pass.
+FunctionPass *llvm::createAArch64ExpandPseudoPass() {
+ return new AArch64ExpandPseudo();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/contrib/llvm/lib/Target/AArch64/AArch64FastISel.cpp
new file mode 100644
index 0000000..2164d77
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -0,0 +1,1994 @@
+//===-- AArch6464FastISel.cpp - AArch64 FastISel implementation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AArch64-specific support for the FastISel class. Some
+// of the target-specific code is generated by tablegen in the file
+// AArch64GenFastISel.inc, which is #included here.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64TargetMachine.h"
+#include "AArch64Subtarget.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+namespace {
+
+class AArch64FastISel : public FastISel {
+
+ class Address {
+ public:
+ typedef enum {
+ RegBase,
+ FrameIndexBase
+ } BaseKind;
+
+ private:
+ BaseKind Kind;
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+ int64_t Offset;
+
+ public:
+ Address() : Kind(RegBase), Offset(0) { Base.Reg = 0; }
+ void setKind(BaseKind K) { Kind = K; }
+ BaseKind getKind() const { return Kind; }
+ bool isRegBase() const { return Kind == RegBase; }
+ bool isFIBase() const { return Kind == FrameIndexBase; }
+ void setReg(unsigned Reg) {
+ assert(isRegBase() && "Invalid base register access!");
+ Base.Reg = Reg;
+ }
+ unsigned getReg() const {
+ assert(isRegBase() && "Invalid base register access!");
+ return Base.Reg;
+ }
+ void setFI(unsigned FI) {
+ assert(isFIBase() && "Invalid base frame index access!");
+ Base.FI = FI;
+ }
+ unsigned getFI() const {
+ assert(isFIBase() && "Invalid base frame index access!");
+ return Base.FI;
+ }
+ void setOffset(int64_t O) { Offset = O; }
+ int64_t getOffset() { return Offset; }
+
+ bool isValid() { return isFIBase() || (isRegBase() && getReg() != 0); }
+ };
+
+ /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const AArch64Subtarget *Subtarget;
+ LLVMContext *Context;
+
+private:
+ // Selection routines.
+ bool SelectLoad(const Instruction *I);
+ bool SelectStore(const Instruction *I);
+ bool SelectBranch(const Instruction *I);
+ bool SelectIndirectBr(const Instruction *I);
+ bool SelectCmp(const Instruction *I);
+ bool SelectSelect(const Instruction *I);
+ bool SelectFPExt(const Instruction *I);
+ bool SelectFPTrunc(const Instruction *I);
+ bool SelectFPToInt(const Instruction *I, bool Signed);
+ bool SelectIntToFP(const Instruction *I, bool Signed);
+ bool SelectRem(const Instruction *I, unsigned ISDOpcode);
+ bool SelectCall(const Instruction *I, const char *IntrMemName);
+ bool SelectIntrinsicCall(const IntrinsicInst &I);
+ bool SelectRet(const Instruction *I);
+ bool SelectTrunc(const Instruction *I);
+ bool SelectIntExt(const Instruction *I);
+ bool SelectMul(const Instruction *I);
+
+ // Utility helper routines.
+ bool isTypeLegal(Type *Ty, MVT &VT);
+ bool isLoadStoreTypeLegal(Type *Ty, MVT &VT);
+ bool ComputeAddress(const Value *Obj, Address &Addr);
+ bool SimplifyAddress(Address &Addr, MVT VT, int64_t ScaleFactor,
+ bool UseUnscaled);
+ void AddLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
+ unsigned Flags, bool UseUnscaled);
+ bool IsMemCpySmall(uint64_t Len, unsigned Alignment);
+ bool TryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
+ unsigned Alignment);
+ // Emit functions.
+ bool EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt);
+ bool EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
+ bool UseUnscaled = false);
+ bool EmitStore(MVT VT, unsigned SrcReg, Address Addr,
+ bool UseUnscaled = false);
+ unsigned EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
+ unsigned Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt);
+
+ unsigned AArch64MaterializeFP(const ConstantFP *CFP, MVT VT);
+ unsigned AArch64MaterializeGV(const GlobalValue *GV);
+
+ // Call handling routines.
+private:
+ CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
+ bool ProcessCallArgs(SmallVectorImpl<Value *> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs, CallingConv::ID CC,
+ unsigned &NumBytes);
+ bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC, unsigned &NumBytes);
+
+public:
+ // Backend specific FastISel code.
+ unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned TargetMaterializeConstant(const Constant *C) override;
+
+ explicit AArch64FastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo)
+ : FastISel(funcInfo, libInfo) {
+ Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+ Context = &funcInfo.Fn->getContext();
+ }
+
+ bool TargetSelectInstruction(const Instruction *I) override;
+
+#include "AArch64GenFastISel.inc"
+};
+
+} // end anonymous namespace
+
+#include "AArch64GenCallingConv.inc"
+
+CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {
+ if (CC == CallingConv::WebKit_JS)
+ return CC_AArch64_WebKit_JS;
+ return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
+}
+
+unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
+ assert(TLI.getValueType(AI->getType(), true) == MVT::i64 &&
+ "Alloca should always return a pointer.");
+
+ // Don't handle dynamic allocas.
+ if (!FuncInfo.StaticAllocaMap.count(AI))
+ return 0;
+
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
+ ResultReg)
+ .addFrameIndex(SI->second)
+ .addImm(0)
+ .addImm(0);
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
+ if (VT != MVT::f32 && VT != MVT::f64)
+ return 0;
+
+ const APFloat Val = CFP->getValueAPF();
+ bool is64bit = (VT == MVT::f64);
+
+ // This checks to see if we can use FMOV instructions to materialize
+ // a constant, otherwise we have to materialize via the constant pool.
+ if (TLI.isFPImmLegal(Val, VT)) {
+ int Imm;
+ unsigned Opc;
+ if (is64bit) {
+ Imm = AArch64_AM::getFP64Imm(Val);
+ Opc = AArch64::FMOVDi;
+ } else {
+ Imm = AArch64_AM::getFP32Imm(Val);
+ Opc = AArch64::FMOVSi;
+ }
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addImm(Imm);
+ return ResultReg;
+ }
+
+ // Materialize via constant pool. MachineConstantPool wants an explicit
+ // alignment.
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
+ if (Align == 0)
+ Align = DL.getTypeAllocSize(CFP->getType());
+
+ unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
+ unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
+ ADRPReg).addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGE);
+
+ unsigned Opc = is64bit ? AArch64::LDRDui : AArch64::LDRSui;
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(ADRPReg)
+ .addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ return ResultReg;
+}
+
+unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) {
+ // We can't handle thread-local variables quickly yet.
+ if (GV->isThreadLocal())
+ return 0;
+
+ // MachO still uses GOT for large code-model accesses, but ELF requires
+ // movz/movk sequences, which FastISel doesn't handle yet.
+ if (TM.getCodeModel() != CodeModel::Small && !Subtarget->isTargetMachO())
+ return 0;
+
+ unsigned char OpFlags = Subtarget->ClassifyGlobalReference(GV, TM);
+
+ EVT DestEVT = TLI.getValueType(GV->getType(), true);
+ if (!DestEVT.isSimple())
+ return 0;
+
+ unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ unsigned ResultReg;
+
+ if (OpFlags & AArch64II::MO_GOT) {
+ // ADRP + LDRX
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
+ ADRPReg)
+ .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGE);
+
+ ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui),
+ ResultReg)
+ .addReg(ADRPReg)
+ .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ } else {
+ // ADRP + ADDX
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
+ ADRPReg).addGlobalAddress(GV, 0, AArch64II::MO_PAGE);
+
+ ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
+ ResultReg)
+ .addReg(ADRPReg)
+ .addGlobalAddress(GV, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC)
+ .addImm(0);
+ }
+ return ResultReg;
+}
+
+unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
+ EVT CEVT = TLI.getValueType(C->getType(), true);
+
+ // Only handle simple types.
+ if (!CEVT.isSimple())
+ return 0;
+ MVT VT = CEVT.getSimpleVT();
+
+ // FIXME: Handle ConstantInt.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return AArch64MaterializeFP(CFP, VT);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return AArch64MaterializeGV(GV);
+
+ return 0;
+}
+
+// Computes the address to get to an object.
+bool AArch64FastISel::ComputeAddress(const Value *Obj, Address &Addr) {
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return ComputeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ return ComputeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ return ComputeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ uint64_t TmpOffset = Addr.getOffset();
+
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ gep_type_iterator GTI = gep_type_begin(U);
+ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
+ ++i, ++GTI) {
+ const Value *Op = *i;
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (ComputeAddress(U->getOperand(0), Addr))
+ return true;
+
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Addr.setKind(Address::FrameIndexBase);
+ Addr.setFI(SI->second);
+ return true;
+ }
+ break;
+ }
+ }
+
+ // Try to get this in a register if nothing else has worked.
+ if (!Addr.isValid())
+ Addr.setReg(getRegForValue(Obj));
+ return Addr.isValid();
+}
+
+bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) {
+ EVT evt = TLI.getValueType(Ty, true);
+
+ // Only handle simple types.
+ if (evt == MVT::Other || !evt.isSimple())
+ return false;
+ VT = evt.getSimpleVT();
+
+ // This is a legal type, but it's not something we handle in fast-isel.
+ if (VT == MVT::f128)
+ return false;
+
+ // Handle all other legal types, i.e. a register that will directly hold this
+ // value.
+ return TLI.isTypeLegal(VT);
+}
+
+bool AArch64FastISel::isLoadStoreTypeLegal(Type *Ty, MVT &VT) {
+ if (isTypeLegal(Ty, VT))
+ return true;
+
+ // If this is a type than can be sign or zero-extended to a basic operation
+ // go ahead and accept it now. For stores, this reflects truncation.
+ if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
+ return true;
+
+ return false;
+}
+
+bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT,
+ int64_t ScaleFactor, bool UseUnscaled) {
+ bool needsLowering = false;
+ int64_t Offset = Addr.getOffset();
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f32:
+ case MVT::f64:
+ if (!UseUnscaled)
+ // Using scaled, 12-bit, unsigned immediate offsets.
+ needsLowering = ((Offset & 0xfff) != Offset);
+ else
+ // Using unscaled, 9-bit, signed immediate offsets.
+ needsLowering = (Offset > 256 || Offset < -256);
+ break;
+ }
+
+ //If this is a stack pointer and the offset needs to be simplified then put
+ // the alloca address into a register, set the base type back to register and
+ // continue. This should almost never happen.
+ if (needsLowering && Addr.getKind() == Address::FrameIndexBase) {
+ unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
+ ResultReg)
+ .addFrameIndex(Addr.getFI())
+ .addImm(0)
+ .addImm(0);
+ Addr.setKind(Address::RegBase);
+ Addr.setReg(ResultReg);
+ }
+
+ // Since the offset is too large for the load/store instruction get the
+ // reg+offset into a register.
+ if (needsLowering) {
+ uint64_t UnscaledOffset = Addr.getOffset() * ScaleFactor;
+ unsigned ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(), false,
+ UnscaledOffset, MVT::i64);
+ if (ResultReg == 0)
+ return false;
+ Addr.setReg(ResultReg);
+ Addr.setOffset(0);
+ }
+ return true;
+}
+
+void AArch64FastISel::AddLoadStoreOperands(Address &Addr,
+ const MachineInstrBuilder &MIB,
+ unsigned Flags, bool UseUnscaled) {
+ int64_t Offset = Addr.getOffset();
+ // Frame base works a bit differently. Handle it separately.
+ if (Addr.getKind() == Address::FrameIndexBase) {
+ int FI = Addr.getFI();
+ // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
+ // and alignment should be based on the VT.
+ MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(FI, Offset), Flags,
+ MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
+ // Now add the rest of the operands.
+ MIB.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
+ } else {
+ // Now add the rest of the operands.
+ MIB.addReg(Addr.getReg());
+ MIB.addImm(Offset);
+ }
+}
+
+bool AArch64FastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address Addr,
+ bool UseUnscaled) {
+ // Negative offsets require unscaled, 9-bit, signed immediate offsets.
+ // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
+ if (!UseUnscaled && Addr.getOffset() < 0)
+ UseUnscaled = true;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ bool VTIsi1 = false;
+ int64_t ScaleFactor = 0;
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::i1:
+ VTIsi1 = true;
+ // Intentional fall-through.
+ case MVT::i8:
+ Opc = UseUnscaled ? AArch64::LDURBBi : AArch64::LDRBBui;
+ RC = &AArch64::GPR32RegClass;
+ ScaleFactor = 1;
+ break;
+ case MVT::i16:
+ Opc = UseUnscaled ? AArch64::LDURHHi : AArch64::LDRHHui;
+ RC = &AArch64::GPR32RegClass;
+ ScaleFactor = 2;
+ break;
+ case MVT::i32:
+ Opc = UseUnscaled ? AArch64::LDURWi : AArch64::LDRWui;
+ RC = &AArch64::GPR32RegClass;
+ ScaleFactor = 4;
+ break;
+ case MVT::i64:
+ Opc = UseUnscaled ? AArch64::LDURXi : AArch64::LDRXui;
+ RC = &AArch64::GPR64RegClass;
+ ScaleFactor = 8;
+ break;
+ case MVT::f32:
+ Opc = UseUnscaled ? AArch64::LDURSi : AArch64::LDRSui;
+ RC = TLI.getRegClassFor(VT);
+ ScaleFactor = 4;
+ break;
+ case MVT::f64:
+ Opc = UseUnscaled ? AArch64::LDURDi : AArch64::LDRDui;
+ RC = TLI.getRegClassFor(VT);
+ ScaleFactor = 8;
+ break;
+ }
+ // Scale the offset.
+ if (!UseUnscaled) {
+ int64_t Offset = Addr.getOffset();
+ if (Offset & (ScaleFactor - 1))
+ // Retry using an unscaled, 9-bit, signed immediate offset.
+ return EmitLoad(VT, ResultReg, Addr, /*UseUnscaled*/ true);
+
+ Addr.setOffset(Offset / ScaleFactor);
+ }
+
+ // Simplify this down to something we can handle.
+ if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled))
+ return false;
+
+ // Create the base instruction, then add the operands.
+ ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg);
+ AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, UseUnscaled);
+
+ // Loading an i1 requires special handling.
+ if (VTIsi1) {
+ MRI.constrainRegClass(ResultReg, &AArch64::GPR32RegClass);
+ unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
+ ANDReg)
+ .addReg(ResultReg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
+ ResultReg = ANDReg;
+ }
+ return true;
+}
+
+bool AArch64FastISel::SelectLoad(const Instruction *I) {
+ MVT VT;
+ // Verify we have a legal type before going any further. Currently, we handle
+ // simple types that will directly fit in a register (i32/f32/i64/f64) or
+ // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
+ if (!isLoadStoreTypeLegal(I->getType(), VT) || cast<LoadInst>(I)->isAtomic())
+ return false;
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!ComputeAddress(I->getOperand(0), Addr))
+ return false;
+
+ unsigned ResultReg;
+ if (!EmitLoad(VT, ResultReg, Addr))
+ return false;
+
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::EmitStore(MVT VT, unsigned SrcReg, Address Addr,
+ bool UseUnscaled) {
+ // Negative offsets require unscaled, 9-bit, signed immediate offsets.
+ // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
+ if (!UseUnscaled && Addr.getOffset() < 0)
+ UseUnscaled = true;
+
+ unsigned StrOpc;
+ bool VTIsi1 = false;
+ int64_t ScaleFactor = 0;
+ // Using scaled, 12-bit, unsigned immediate offsets.
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::i1:
+ VTIsi1 = true;
+ case MVT::i8:
+ StrOpc = UseUnscaled ? AArch64::STURBBi : AArch64::STRBBui;
+ ScaleFactor = 1;
+ break;
+ case MVT::i16:
+ StrOpc = UseUnscaled ? AArch64::STURHHi : AArch64::STRHHui;
+ ScaleFactor = 2;
+ break;
+ case MVT::i32:
+ StrOpc = UseUnscaled ? AArch64::STURWi : AArch64::STRWui;
+ ScaleFactor = 4;
+ break;
+ case MVT::i64:
+ StrOpc = UseUnscaled ? AArch64::STURXi : AArch64::STRXui;
+ ScaleFactor = 8;
+ break;
+ case MVT::f32:
+ StrOpc = UseUnscaled ? AArch64::STURSi : AArch64::STRSui;
+ ScaleFactor = 4;
+ break;
+ case MVT::f64:
+ StrOpc = UseUnscaled ? AArch64::STURDi : AArch64::STRDui;
+ ScaleFactor = 8;
+ break;
+ }
+ // Scale the offset.
+ if (!UseUnscaled) {
+ int64_t Offset = Addr.getOffset();
+ if (Offset & (ScaleFactor - 1))
+ // Retry using an unscaled, 9-bit, signed immediate offset.
+ return EmitStore(VT, SrcReg, Addr, /*UseUnscaled*/ true);
+
+ Addr.setOffset(Offset / ScaleFactor);
+ }
+
+ // Simplify this down to something we can handle.
+ if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled))
+ return false;
+
+ // Storing an i1 requires special handling.
+ if (VTIsi1) {
+ MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
+ unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
+ ANDReg)
+ .addReg(SrcReg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
+ SrcReg = ANDReg;
+ }
+ // Create the base instruction, then add the operands.
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(StrOpc)).addReg(SrcReg);
+ AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, UseUnscaled);
+ return true;
+}
+
+bool AArch64FastISel::SelectStore(const Instruction *I) {
+ MVT VT;
+ Value *Op0 = I->getOperand(0);
+ // Verify we have a legal type before going any further. Currently, we handle
+ // simple types that will directly fit in a register (i32/f32/i64/f64) or
+ // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
+ if (!isLoadStoreTypeLegal(Op0->getType(), VT) ||
+ cast<StoreInst>(I)->isAtomic())
+ return false;
+
+ // Get the value to be stored into a register.
+ unsigned SrcReg = getRegForValue(Op0);
+ if (SrcReg == 0)
+ return false;
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!ComputeAddress(I->getOperand(1), Addr))
+ return false;
+
+ if (!EmitStore(VT, SrcReg, Addr))
+ return false;
+ return true;
+}
+
+static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) {
+ switch (Pred) {
+ case CmpInst::FCMP_ONE:
+ case CmpInst::FCMP_UEQ:
+ default:
+ // AL is our "false" for now. The other two need more compares.
+ return AArch64CC::AL;
+ case CmpInst::ICMP_EQ:
+ case CmpInst::FCMP_OEQ:
+ return AArch64CC::EQ;
+ case CmpInst::ICMP_SGT:
+ case CmpInst::FCMP_OGT:
+ return AArch64CC::GT;
+ case CmpInst::ICMP_SGE:
+ case CmpInst::FCMP_OGE:
+ return AArch64CC::GE;
+ case CmpInst::ICMP_UGT:
+ case CmpInst::FCMP_UGT:
+ return AArch64CC::HI;
+ case CmpInst::FCMP_OLT:
+ return AArch64CC::MI;
+ case CmpInst::ICMP_ULE:
+ case CmpInst::FCMP_OLE:
+ return AArch64CC::LS;
+ case CmpInst::FCMP_ORD:
+ return AArch64CC::VC;
+ case CmpInst::FCMP_UNO:
+ return AArch64CC::VS;
+ case CmpInst::FCMP_UGE:
+ return AArch64CC::PL;
+ case CmpInst::ICMP_SLT:
+ case CmpInst::FCMP_ULT:
+ return AArch64CC::LT;
+ case CmpInst::ICMP_SLE:
+ case CmpInst::FCMP_ULE:
+ return AArch64CC::LE;
+ case CmpInst::FCMP_UNE:
+ case CmpInst::ICMP_NE:
+ return AArch64CC::NE;
+ case CmpInst::ICMP_UGE:
+ return AArch64CC::HS;
+ case CmpInst::ICMP_ULT:
+ return AArch64CC::LO;
+ }
+}
+
+bool AArch64FastISel::SelectBranch(const Instruction *I) {
+ const BranchInst *BI = cast<BranchInst>(I);
+ MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
+
+ if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
+ if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
+ // We may not handle every CC for now.
+ AArch64CC::CondCode CC = getCompareCC(CI->getPredicate());
+ if (CC == AArch64CC::AL)
+ return false;
+
+ // Emit the cmp.
+ if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
+ return false;
+
+ // Emit the branch.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
+ .addImm(CC)
+ .addMBB(TBB);
+ FuncInfo.MBB->addSuccessor(TBB);
+
+ FastEmitBranch(FBB, DbgLoc);
+ return true;
+ }
+ } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
+ MVT SrcVT;
+ if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
+ (isLoadStoreTypeLegal(TI->getOperand(0)->getType(), SrcVT))) {
+ unsigned CondReg = getRegForValue(TI->getOperand(0));
+ if (CondReg == 0)
+ return false;
+
+ // Issue an extract_subreg to get the lower 32-bits.
+ if (SrcVT == MVT::i64)
+ CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, /*Kill=*/true,
+ AArch64::sub_32);
+
+ MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
+ unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(AArch64::ANDWri), ANDReg)
+ .addReg(CondReg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(AArch64::SUBSWri))
+ .addReg(ANDReg)
+ .addReg(ANDReg)
+ .addImm(0)
+ .addImm(0);
+
+ unsigned CC = AArch64CC::NE;
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ CC = AArch64CC::EQ;
+ }
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
+ .addImm(CC)
+ .addMBB(TBB);
+ FuncInfo.MBB->addSuccessor(TBB);
+ FastEmitBranch(FBB, DbgLoc);
+ return true;
+ }
+ } else if (const ConstantInt *CI =
+ dyn_cast<ConstantInt>(BI->getCondition())) {
+ uint64_t Imm = CI->getZExtValue();
+ MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B))
+ .addMBB(Target);
+ FuncInfo.MBB->addSuccessor(Target);
+ return true;
+ }
+
+ unsigned CondReg = getRegForValue(BI->getCondition());
+ if (CondReg == 0)
+ return false;
+
+ // We've been divorced from our compare! Our block was split, and
+ // now our compare lives in a predecessor block. We musn't
+ // re-compare here, as the children of the compare aren't guaranteed
+ // live across the block boundary (we *could* check for this).
+ // Regardless, the compare has been done in the predecessor block,
+ // and it left a value for us in a virtual register. Ergo, we test
+ // the one-bit value left in the virtual register.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri),
+ AArch64::WZR)
+ .addReg(CondReg)
+ .addImm(0)
+ .addImm(0);
+
+ unsigned CC = AArch64CC::NE;
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ CC = AArch64CC::EQ;
+ }
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc))
+ .addImm(CC)
+ .addMBB(TBB);
+ FuncInfo.MBB->addSuccessor(TBB);
+ FastEmitBranch(FBB, DbgLoc);
+ return true;
+}
+
+bool AArch64FastISel::SelectIndirectBr(const Instruction *I) {
+ const IndirectBrInst *BI = cast<IndirectBrInst>(I);
+ unsigned AddrReg = getRegForValue(BI->getOperand(0));
+ if (AddrReg == 0)
+ return false;
+
+ // Emit the indirect branch.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BR))
+ .addReg(AddrReg);
+
+ // Make sure the CFG is up-to-date.
+ for (unsigned i = 0, e = BI->getNumSuccessors(); i != e; ++i)
+ FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[BI->getSuccessor(i)]);
+
+ return true;
+}
+
+bool AArch64FastISel::EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt) {
+ Type *Ty = Src1Value->getType();
+ EVT SrcEVT = TLI.getValueType(Ty, true);
+ if (!SrcEVT.isSimple())
+ return false;
+ MVT SrcVT = SrcEVT.getSimpleVT();
+
+ // Check to see if the 2nd operand is a constant that we can encode directly
+ // in the compare.
+ uint64_t Imm;
+ bool UseImm = false;
+ bool isNegativeImm = false;
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
+ if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 ||
+ SrcVT == MVT::i8 || SrcVT == MVT::i1) {
+ const APInt &CIVal = ConstInt->getValue();
+
+ Imm = (isZExt) ? CIVal.getZExtValue() : CIVal.getSExtValue();
+ if (CIVal.isNegative()) {
+ isNegativeImm = true;
+ Imm = -Imm;
+ }
+ // FIXME: We can handle more immediates using shifts.
+ UseImm = ((Imm & 0xfff) == Imm);
+ }
+ } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
+ if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
+ if (ConstFP->isZero() && !ConstFP->isNegative())
+ UseImm = true;
+ }
+
+ unsigned ZReg;
+ unsigned CmpOpc;
+ bool isICmp = true;
+ bool needsExt = false;
+ switch (SrcVT.SimpleTy) {
+ default:
+ return false;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ needsExt = true;
+ // Intentional fall-through.
+ case MVT::i32:
+ ZReg = AArch64::WZR;
+ if (UseImm)
+ CmpOpc = isNegativeImm ? AArch64::ADDSWri : AArch64::SUBSWri;
+ else
+ CmpOpc = AArch64::SUBSWrr;
+ break;
+ case MVT::i64:
+ ZReg = AArch64::XZR;
+ if (UseImm)
+ CmpOpc = isNegativeImm ? AArch64::ADDSXri : AArch64::SUBSXri;
+ else
+ CmpOpc = AArch64::SUBSXrr;
+ break;
+ case MVT::f32:
+ isICmp = false;
+ CmpOpc = UseImm ? AArch64::FCMPSri : AArch64::FCMPSrr;
+ break;
+ case MVT::f64:
+ isICmp = false;
+ CmpOpc = UseImm ? AArch64::FCMPDri : AArch64::FCMPDrr;
+ break;
+ }
+
+ unsigned SrcReg1 = getRegForValue(Src1Value);
+ if (SrcReg1 == 0)
+ return false;
+
+ unsigned SrcReg2;
+ if (!UseImm) {
+ SrcReg2 = getRegForValue(Src2Value);
+ if (SrcReg2 == 0)
+ return false;
+ }
+
+ // We have i1, i8, or i16, we need to either zero extend or sign extend.
+ if (needsExt) {
+ SrcReg1 = EmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
+ if (SrcReg1 == 0)
+ return false;
+ if (!UseImm) {
+ SrcReg2 = EmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
+ if (SrcReg2 == 0)
+ return false;
+ }
+ }
+
+ if (isICmp) {
+ if (UseImm)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
+ .addReg(ZReg)
+ .addReg(SrcReg1)
+ .addImm(Imm)
+ .addImm(0);
+ else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
+ .addReg(ZReg)
+ .addReg(SrcReg1)
+ .addReg(SrcReg2);
+ } else {
+ if (UseImm)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
+ .addReg(SrcReg1);
+ else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
+ .addReg(SrcReg1)
+ .addReg(SrcReg2);
+ }
+ return true;
+}
+
+bool AArch64FastISel::SelectCmp(const Instruction *I) {
+ const CmpInst *CI = cast<CmpInst>(I);
+
+ // We may not handle every CC for now.
+ AArch64CC::CondCode CC = getCompareCC(CI->getPredicate());
+ if (CC == AArch64CC::AL)
+ return false;
+
+ // Emit the cmp.
+ if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
+ return false;
+
+ // Now set a register based on the comparison.
+ AArch64CC::CondCode invertedCC = getInvertedCondCode(CC);
+ unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
+ ResultReg)
+ .addReg(AArch64::WZR)
+ .addReg(AArch64::WZR)
+ .addImm(invertedCC);
+
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::SelectSelect(const Instruction *I) {
+ const SelectInst *SI = cast<SelectInst>(I);
+
+ EVT DestEVT = TLI.getValueType(SI->getType(), true);
+ if (!DestEVT.isSimple())
+ return false;
+
+ MVT DestVT = DestEVT.getSimpleVT();
+ if (DestVT != MVT::i32 && DestVT != MVT::i64 && DestVT != MVT::f32 &&
+ DestVT != MVT::f64)
+ return false;
+
+ unsigned CondReg = getRegForValue(SI->getCondition());
+ if (CondReg == 0)
+ return false;
+ unsigned TrueReg = getRegForValue(SI->getTrueValue());
+ if (TrueReg == 0)
+ return false;
+ unsigned FalseReg = getRegForValue(SI->getFalseValue());
+ if (FalseReg == 0)
+ return false;
+
+
+ MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass);
+ unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
+ ANDReg)
+ .addReg(CondReg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri))
+ .addReg(ANDReg)
+ .addReg(ANDReg)
+ .addImm(0)
+ .addImm(0);
+
+ unsigned SelectOpc;
+ switch (DestVT.SimpleTy) {
+ default:
+ return false;
+ case MVT::i32:
+ SelectOpc = AArch64::CSELWr;
+ break;
+ case MVT::i64:
+ SelectOpc = AArch64::CSELXr;
+ break;
+ case MVT::f32:
+ SelectOpc = AArch64::FCSELSrrr;
+ break;
+ case MVT::f64:
+ SelectOpc = AArch64::FCSELDrrr;
+ break;
+ }
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SelectOpc),
+ ResultReg)
+ .addReg(TrueReg)
+ .addReg(FalseReg)
+ .addImm(AArch64CC::NE);
+
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::SelectFPExt(const Instruction *I) {
+ Value *V = I->getOperand(0);
+ if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
+ return false;
+
+ unsigned Op = getRegForValue(V);
+ if (Op == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
+ ResultReg).addReg(Op);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::SelectFPTrunc(const Instruction *I) {
+ Value *V = I->getOperand(0);
+ if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
+ return false;
+
+ unsigned Op = getRegForValue(V);
+ if (Op == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
+ ResultReg).addReg(Op);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+// FPToUI and FPToSI
+bool AArch64FastISel::SelectFPToInt(const Instruction *I, bool Signed) {
+ MVT DestVT;
+ if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
+ return false;
+
+ unsigned SrcReg = getRegForValue(I->getOperand(0));
+ if (SrcReg == 0)
+ return false;
+
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
+ if (SrcVT == MVT::f128)
+ return false;
+
+ unsigned Opc;
+ if (SrcVT == MVT::f64) {
+ if (Signed)
+ Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr;
+ else
+ Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr;
+ } else {
+ if (Signed)
+ Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr;
+ else
+ Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
+ }
+ unsigned ResultReg = createResultReg(
+ DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(SrcReg);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
+ MVT DestVT;
+ if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
+ return false;
+ assert ((DestVT == MVT::f32 || DestVT == MVT::f64) &&
+ "Unexpected value type.");
+
+ unsigned SrcReg = getRegForValue(I->getOperand(0));
+ if (SrcReg == 0)
+ return false;
+
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
+
+ // Handle sign-extension.
+ if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
+ SrcReg =
+ EmitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
+ if (SrcReg == 0)
+ return false;
+ }
+
+ MRI.constrainRegClass(SrcReg, SrcVT == MVT::i64 ? &AArch64::GPR64RegClass
+ : &AArch64::GPR32RegClass);
+
+ unsigned Opc;
+ if (SrcVT == MVT::i64) {
+ if (Signed)
+ Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri;
+ else
+ Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri;
+ } else {
+ if (Signed)
+ Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri;
+ else
+ Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
+ }
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(SrcReg);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::ProcessCallArgs(
+ SmallVectorImpl<Value *> &Args, SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<MVT> &ArgVTs, SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs, CallingConv::ID CC,
+ unsigned &NumBytes) {
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ NumBytes = CCInfo.getNextStackOffset();
+
+ // Issue CALLSEQ_START
+ unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
+ .addImm(NumBytes);
+
+ // Process the args.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ unsigned Arg = ArgRegs[VA.getValNo()];
+ MVT ArgVT = ArgVTs[VA.getValNo()];
+
+ // Handle arg promotion: SExt, ZExt, AExt.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt: {
+ MVT DestVT = VA.getLocVT();
+ MVT SrcVT = ArgVT;
+ Arg = EmitIntExt(SrcVT, Arg, DestVT, /*isZExt*/ false);
+ if (Arg == 0)
+ return false;
+ break;
+ }
+ case CCValAssign::AExt:
+ // Intentional fall-through.
+ case CCValAssign::ZExt: {
+ MVT DestVT = VA.getLocVT();
+ MVT SrcVT = ArgVT;
+ Arg = EmitIntExt(SrcVT, Arg, DestVT, /*isZExt*/ true);
+ if (Arg == 0)
+ return false;
+ break;
+ }
+ default:
+ llvm_unreachable("Unknown arg promotion!");
+ }
+
+ // Now copy/store arg to correct locations.
+ if (VA.isRegLoc() && !VA.needsCustom()) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
+ RegArgs.push_back(VA.getLocReg());
+ } else if (VA.needsCustom()) {
+ // FIXME: Handle custom args.
+ return false;
+ } else {
+ assert(VA.isMemLoc() && "Assuming store on stack.");
+
+ // Need to store on the stack.
+ unsigned ArgSize = (ArgVT.getSizeInBits() + 7) / 8;
+
+ unsigned BEAlign = 0;
+ if (ArgSize < 8 && !Subtarget->isLittleEndian())
+ BEAlign = 8 - ArgSize;
+
+ Address Addr;
+ Addr.setKind(Address::RegBase);
+ Addr.setReg(AArch64::SP);
+ Addr.setOffset(VA.getLocMemOffset() + BEAlign);
+
+ if (!EmitStore(ArgVT, Arg, Addr))
+ return false;
+ }
+ }
+ return true;
+}
+
+bool AArch64FastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes) {
+ // Issue CALLSEQ_END
+ unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
+ .addImm(NumBytes)
+ .addImm(0);
+
+ // Now the return value.
+ if (RetVT != MVT::isVoid) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC));
+
+ // Only handle a single return value.
+ if (RVLocs.size() != 1)
+ return false;
+
+ // Copy all of the result registers out of their specified physreg.
+ MVT CopyVT = RVLocs[0].getValVT();
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
+ // Finally update the result.
+ UpdateValueMap(I, ResultReg);
+ }
+
+ return true;
+}
+
+bool AArch64FastISel::SelectCall(const Instruction *I,
+ const char *IntrMemName = nullptr) {
+ const CallInst *CI = cast<CallInst>(I);
+ const Value *Callee = CI->getCalledValue();
+
+ // Don't handle inline asm or intrinsics.
+ if (isa<InlineAsm>(Callee))
+ return false;
+
+ // Only handle global variable Callees.
+ const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
+ if (!GV)
+ return false;
+
+ // Check the calling convention.
+ ImmutableCallSite CS(CI);
+ CallingConv::ID CC = CS.getCallingConv();
+
+ // Let SDISel handle vararg functions.
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ if (FTy->isVarArg())
+ return false;
+
+ // Handle *simple* calls for now.
+ MVT RetVT;
+ Type *RetTy = I->getType();
+ if (RetTy->isVoidTy())
+ RetVT = MVT::isVoid;
+ else if (!isTypeLegal(RetTy, RetVT))
+ return false;
+
+ // Set up the argument vectors.
+ SmallVector<Value *, 8> Args;
+ SmallVector<unsigned, 8> ArgRegs;
+ SmallVector<MVT, 8> ArgVTs;
+ SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
+ Args.reserve(CS.arg_size());
+ ArgRegs.reserve(CS.arg_size());
+ ArgVTs.reserve(CS.arg_size());
+ ArgFlags.reserve(CS.arg_size());
+
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ i != e; ++i) {
+ // If we're lowering a memory intrinsic instead of a regular call, skip the
+ // last two arguments, which shouldn't be passed to the underlying function.
+ if (IntrMemName && e - i <= 2)
+ break;
+
+ unsigned Arg = getRegForValue(*i);
+ if (Arg == 0)
+ return false;
+
+ ISD::ArgFlagsTy Flags;
+ unsigned AttrInd = i - CS.arg_begin() + 1;
+ if (CS.paramHasAttr(AttrInd, Attribute::SExt))
+ Flags.setSExt();
+ if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
+ Flags.setZExt();
+
+ // FIXME: Only handle *easy* calls for now.
+ if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
+ CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
+ CS.paramHasAttr(AttrInd, Attribute::Nest) ||
+ CS.paramHasAttr(AttrInd, Attribute::ByVal))
+ return false;
+
+ MVT ArgVT;
+ Type *ArgTy = (*i)->getType();
+ if (!isTypeLegal(ArgTy, ArgVT) &&
+ !(ArgVT == MVT::i1 || ArgVT == MVT::i8 || ArgVT == MVT::i16))
+ return false;
+
+ // We don't handle vector parameters yet.
+ if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
+ return false;
+
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
+ Flags.setOrigAlign(OriginalAlignment);
+
+ Args.push_back(*i);
+ ArgRegs.push_back(Arg);
+ ArgVTs.push_back(ArgVT);
+ ArgFlags.push_back(Flags);
+ }
+
+ // Handle the arguments now that we've gotten them.
+ SmallVector<unsigned, 4> RegArgs;
+ unsigned NumBytes;
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ return false;
+
+ // Issue the call.
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BL));
+ if (!IntrMemName)
+ MIB.addGlobalAddress(GV, 0, 0);
+ else
+ MIB.addExternalSymbol(IntrMemName, 0);
+
+ // Add implicit physical register uses to the call.
+ for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
+ MIB.addReg(RegArgs[i], RegState::Implicit);
+
+ // Add a register mask with the call-preserved registers.
+ // Proper defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
+
+ // Finish off the call including any return values.
+ SmallVector<unsigned, 4> UsedRegs;
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes))
+ return false;
+
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
+ return true;
+}
+
+bool AArch64FastISel::IsMemCpySmall(uint64_t Len, unsigned Alignment) {
+ if (Alignment)
+ return Len / Alignment <= 4;
+ else
+ return Len < 32;
+}
+
+bool AArch64FastISel::TryEmitSmallMemCpy(Address Dest, Address Src,
+ uint64_t Len, unsigned Alignment) {
+ // Make sure we don't bloat code by inlining very large memcpy's.
+ if (!IsMemCpySmall(Len, Alignment))
+ return false;
+
+ int64_t UnscaledOffset = 0;
+ Address OrigDest = Dest;
+ Address OrigSrc = Src;
+
+ while (Len) {
+ MVT VT;
+ if (!Alignment || Alignment >= 8) {
+ if (Len >= 8)
+ VT = MVT::i64;
+ else if (Len >= 4)
+ VT = MVT::i32;
+ else if (Len >= 2)
+ VT = MVT::i16;
+ else {
+ VT = MVT::i8;
+ }
+ } else {
+ // Bound based on alignment.
+ if (Len >= 4 && Alignment == 4)
+ VT = MVT::i32;
+ else if (Len >= 2 && Alignment == 2)
+ VT = MVT::i16;
+ else {
+ VT = MVT::i8;
+ }
+ }
+
+ bool RV;
+ unsigned ResultReg;
+ RV = EmitLoad(VT, ResultReg, Src);
+ if (!RV)
+ return false;
+
+ RV = EmitStore(VT, ResultReg, Dest);
+ if (!RV)
+ return false;
+
+ int64_t Size = VT.getSizeInBits() / 8;
+ Len -= Size;
+ UnscaledOffset += Size;
+
+ // We need to recompute the unscaled offset for each iteration.
+ Dest.setOffset(OrigDest.getOffset() + UnscaledOffset);
+ Src.setOffset(OrigSrc.getOffset() + UnscaledOffset);
+ }
+
+ return true;
+}
+
+bool AArch64FastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
+ // FIXME: Handle more intrinsics.
+ switch (I.getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ const MemTransferInst &MTI = cast<MemTransferInst>(I);
+ // Don't handle volatile.
+ if (MTI.isVolatile())
+ return false;
+
+ // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
+ // we would emit dead code because we don't currently handle memmoves.
+ bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
+ if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
+ // Small memcpy's are common enough that we want to do them without a call
+ // if possible.
+ uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
+ unsigned Alignment = MTI.getAlignment();
+ if (IsMemCpySmall(Len, Alignment)) {
+ Address Dest, Src;
+ if (!ComputeAddress(MTI.getRawDest(), Dest) ||
+ !ComputeAddress(MTI.getRawSource(), Src))
+ return false;
+ if (TryEmitSmallMemCpy(Dest, Src, Len, Alignment))
+ return true;
+ }
+ }
+
+ if (!MTI.getLength()->getType()->isIntegerTy(64))
+ return false;
+
+ if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
+ return SelectCall(&I, IntrMemName);
+ }
+ case Intrinsic::memset: {
+ const MemSetInst &MSI = cast<MemSetInst>(I);
+ // Don't handle volatile.
+ if (MSI.isVolatile())
+ return false;
+
+ if (!MSI.getLength()->getType()->isIntegerTy(64))
+ return false;
+
+ if (MSI.getDestAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ return SelectCall(&I, "memset");
+ }
+ case Intrinsic::trap: {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
+ .addImm(1);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AArch64FastISel::SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ if (F.isVarArg())
+ return false;
+
+ // Build a list of return value registers.
+ SmallVector<unsigned, 4> RetRegs;
+
+ if (Ret->getNumOperands() > 0) {
+ CallingConv::ID CC = F.getCallingConv();
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
+ I->getContext());
+ CCAssignFn *RetCC = CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
+ : RetCC_AArch64_AAPCS;
+ CCInfo.AnalyzeReturn(Outs, RetCC);
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+ const Value *RV = Ret->getOperand(0);
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ unsigned SrcReg = Reg + VA.getValNo();
+ unsigned DestReg = VA.getLocReg();
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!MRI.getRegClass(SrcReg)->contains(DestReg))
+ return false;
+
+ EVT RVEVT = TLI.getValueType(RV->getType());
+ if (!RVEVT.isSimple())
+ return false;
+
+ // Vectors (of > 1 lane) in big endian need tricky handling.
+ if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1)
+ return false;
+
+ MVT RVVT = RVEVT.getSimpleVT();
+ if (RVVT == MVT::f128)
+ return false;
+ MVT DestVT = VA.getValVT();
+ // Special handling for extended integers.
+ if (RVVT != DestVT) {
+ if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
+ return false;
+
+ if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
+ return false;
+
+ bool isZExt = Outs[0].Flags.isZExt();
+ SrcReg = EmitIntExt(RVVT, SrcReg, DestVT, isZExt);
+ if (SrcReg == 0)
+ return false;
+ }
+
+ // Make the copy.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
+
+ // Add register to return instruction.
+ RetRegs.push_back(VA.getLocReg());
+ }
+
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(AArch64::RET_ReallyLR));
+ for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
+ MIB.addReg(RetRegs[i], RegState::Implicit);
+ return true;
+}
+
+bool AArch64FastISel::SelectTrunc(const Instruction *I) {
+ Type *DestTy = I->getType();
+ Value *Op = I->getOperand(0);
+ Type *SrcTy = Op->getType();
+
+ EVT SrcEVT = TLI.getValueType(SrcTy, true);
+ EVT DestEVT = TLI.getValueType(DestTy, true);
+ if (!SrcEVT.isSimple())
+ return false;
+ if (!DestEVT.isSimple())
+ return false;
+
+ MVT SrcVT = SrcEVT.getSimpleVT();
+ MVT DestVT = DestEVT.getSimpleVT();
+
+ if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
+ SrcVT != MVT::i8)
+ return false;
+ if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 &&
+ DestVT != MVT::i1)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Op);
+ if (!SrcReg)
+ return false;
+
+ // If we're truncating from i64 to a smaller non-legal type then generate an
+ // AND. Otherwise, we know the high bits are undefined and a truncate doesn't
+ // generate any code.
+ if (SrcVT == MVT::i64) {
+ uint64_t Mask = 0;
+ switch (DestVT.SimpleTy) {
+ default:
+ // Trunc i64 to i32 is handled by the target-independent fast-isel.
+ return false;
+ case MVT::i1:
+ Mask = 0x1;
+ break;
+ case MVT::i8:
+ Mask = 0xff;
+ break;
+ case MVT::i16:
+ Mask = 0xffff;
+ break;
+ }
+ // Issue an extract_subreg to get the lower 32-bits.
+ unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, /*Kill=*/true,
+ AArch64::sub_32);
+ MRI.constrainRegClass(Reg32, &AArch64::GPR32RegClass);
+ // Create the AND instruction which performs the actual truncation.
+ unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
+ ANDReg)
+ .addReg(Reg32)
+ .addImm(AArch64_AM::encodeLogicalImmediate(Mask, 32));
+ SrcReg = ANDReg;
+ }
+
+ UpdateValueMap(I, SrcReg);
+ return true;
+}
+
+unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
+ assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
+ DestVT == MVT::i64) &&
+ "Unexpected value type.");
+ // Handle i8 and i16 as i32.
+ if (DestVT == MVT::i8 || DestVT == MVT::i16)
+ DestVT = MVT::i32;
+
+ if (isZExt) {
+ MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
+ unsigned ResultReg = createResultReg(&AArch64::GPR32spRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri),
+ ResultReg)
+ .addReg(SrcReg)
+ .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
+
+ if (DestVT == MVT::i64) {
+ // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
+ // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
+ unsigned Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(AArch64::SUBREG_TO_REG), Reg64)
+ .addImm(0)
+ .addReg(ResultReg)
+ .addImm(AArch64::sub_32);
+ ResultReg = Reg64;
+ }
+ return ResultReg;
+ } else {
+ if (DestVT == MVT::i64) {
+ // FIXME: We're SExt i1 to i64.
+ return 0;
+ }
+ unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SBFMWri),
+ ResultReg)
+ .addReg(SrcReg)
+ .addImm(0)
+ .addImm(0);
+ return ResultReg;
+ }
+}
+
+unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
+ bool isZExt) {
+ assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?");
+
+ // FastISel does not have plumbing to deal with extensions where the SrcVT or
+ // DestVT are odd things, so test to make sure that they are both types we can
+ // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
+ // bail out to SelectionDAG.
+ if (((DestVT != MVT::i8) && (DestVT != MVT::i16) &&
+ (DestVT != MVT::i32) && (DestVT != MVT::i64)) ||
+ ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) &&
+ (SrcVT != MVT::i16) && (SrcVT != MVT::i32)))
+ return 0;
+
+ unsigned Opc;
+ unsigned Imm = 0;
+
+ switch (SrcVT.SimpleTy) {
+ default:
+ return 0;
+ case MVT::i1:
+ return Emiti1Ext(SrcReg, DestVT, isZExt);
+ case MVT::i8:
+ if (DestVT == MVT::i64)
+ Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
+ else
+ Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
+ Imm = 7;
+ break;
+ case MVT::i16:
+ if (DestVT == MVT::i64)
+ Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
+ else
+ Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
+ Imm = 15;
+ break;
+ case MVT::i32:
+ assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?");
+ Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
+ Imm = 31;
+ break;
+ }
+
+ // Handle i8 and i16 as i32.
+ if (DestVT == MVT::i8 || DestVT == MVT::i16)
+ DestVT = MVT::i32;
+ else if (DestVT == MVT::i64) {
+ unsigned Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(AArch64::SUBREG_TO_REG), Src64)
+ .addImm(0)
+ .addReg(SrcReg)
+ .addImm(AArch64::sub_32);
+ SrcReg = Src64;
+ }
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(SrcReg)
+ .addImm(0)
+ .addImm(Imm);
+
+ return ResultReg;
+}
+
+bool AArch64FastISel::SelectIntExt(const Instruction *I) {
+ // On ARM, in general, integer casts don't involve legal types; this code
+ // handles promotable integers. The high bits for a type smaller than
+ // the register size are assumed to be undefined.
+ Type *DestTy = I->getType();
+ Value *Src = I->getOperand(0);
+ Type *SrcTy = Src->getType();
+
+ bool isZExt = isa<ZExtInst>(I);
+ unsigned SrcReg = getRegForValue(Src);
+ if (!SrcReg)
+ return false;
+
+ EVT SrcEVT = TLI.getValueType(SrcTy, true);
+ EVT DestEVT = TLI.getValueType(DestTy, true);
+ if (!SrcEVT.isSimple())
+ return false;
+ if (!DestEVT.isSimple())
+ return false;
+
+ MVT SrcVT = SrcEVT.getSimpleVT();
+ MVT DestVT = DestEVT.getSimpleVT();
+ unsigned ResultReg = EmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
+ if (ResultReg == 0)
+ return false;
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
+ EVT DestEVT = TLI.getValueType(I->getType(), true);
+ if (!DestEVT.isSimple())
+ return false;
+
+ MVT DestVT = DestEVT.getSimpleVT();
+ if (DestVT != MVT::i64 && DestVT != MVT::i32)
+ return false;
+
+ unsigned DivOpc;
+ bool is64bit = (DestVT == MVT::i64);
+ switch (ISDOpcode) {
+ default:
+ return false;
+ case ISD::SREM:
+ DivOpc = is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
+ break;
+ case ISD::UREM:
+ DivOpc = is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
+ break;
+ }
+ unsigned MSubOpc = is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
+ unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ if (!Src0Reg)
+ return false;
+
+ unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ if (!Src1Reg)
+ return false;
+
+ unsigned QuotReg = createResultReg(TLI.getRegClassFor(DestVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(DivOpc), QuotReg)
+ .addReg(Src0Reg)
+ .addReg(Src1Reg);
+ // The remainder is computed as numerator - (quotient * denominator) using the
+ // MSUB instruction.
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MSubOpc), ResultReg)
+ .addReg(QuotReg)
+ .addReg(Src1Reg)
+ .addReg(Src0Reg);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::SelectMul(const Instruction *I) {
+ EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType(), true);
+ if (!SrcEVT.isSimple())
+ return false;
+ MVT SrcVT = SrcEVT.getSimpleVT();
+
+ // Must be simple value type. Don't handle vectors.
+ if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
+ SrcVT != MVT::i8)
+ return false;
+
+ unsigned Opc;
+ unsigned ZReg;
+ switch (SrcVT.SimpleTy) {
+ default:
+ return false;
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ ZReg = AArch64::WZR;
+ Opc = AArch64::MADDWrrr;
+ SrcVT = MVT::i32;
+ break;
+ case MVT::i64:
+ ZReg = AArch64::XZR;
+ Opc = AArch64::MADDXrrr;
+ break;
+ }
+
+ unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ if (!Src0Reg)
+ return false;
+
+ unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ if (!Src1Reg)
+ return false;
+
+ // Create the base instruction, then add the operands.
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(SrcVT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(Src0Reg)
+ .addReg(Src1Reg)
+ .addReg(ZReg);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
+ switch (I->getOpcode()) {
+ default:
+ break;
+ case Instruction::Load:
+ return SelectLoad(I);
+ case Instruction::Store:
+ return SelectStore(I);
+ case Instruction::Br:
+ return SelectBranch(I);
+ case Instruction::IndirectBr:
+ return SelectIndirectBr(I);
+ case Instruction::FCmp:
+ case Instruction::ICmp:
+ return SelectCmp(I);
+ case Instruction::Select:
+ return SelectSelect(I);
+ case Instruction::FPExt:
+ return SelectFPExt(I);
+ case Instruction::FPTrunc:
+ return SelectFPTrunc(I);
+ case Instruction::FPToSI:
+ return SelectFPToInt(I, /*Signed=*/true);
+ case Instruction::FPToUI:
+ return SelectFPToInt(I, /*Signed=*/false);
+ case Instruction::SIToFP:
+ return SelectIntToFP(I, /*Signed=*/true);
+ case Instruction::UIToFP:
+ return SelectIntToFP(I, /*Signed=*/false);
+ case Instruction::SRem:
+ return SelectRem(I, ISD::SREM);
+ case Instruction::URem:
+ return SelectRem(I, ISD::UREM);
+ case Instruction::Call:
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
+ return SelectIntrinsicCall(*II);
+ return SelectCall(I);
+ case Instruction::Ret:
+ return SelectRet(I);
+ case Instruction::Trunc:
+ return SelectTrunc(I);
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ return SelectIntExt(I);
+ case Instruction::Mul:
+ // FIXME: This really should be handled by the target-independent selector.
+ return SelectMul(I);
+ }
+ return false;
+ // Silence warnings.
+ (void)&CC_AArch64_DarwinPCS_VarArg;
+}
+
+namespace llvm {
+llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) {
+ return new AArch64FastISel(funcInfo, libInfo);
+}
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 7318230..9c33717 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1,4 +1,4 @@
-//===- AArch64FrameLowering.cpp - AArch64 Frame Information ---------------===//
+//===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====//
//
// The LLVM Compiler Infrastructure
//
@@ -11,236 +11,445 @@
//
//===----------------------------------------------------------------------===//
-#include "AArch64.h"
#include "AArch64FrameLowering.h"
-#include "AArch64MachineFunctionInfo.h"
#include "AArch64InstrInfo.h"
+#include "AArch64MachineFunctionInfo.h"
+#include "AArch64Subtarget.h"
+#include "AArch64TargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/IR/Function.h"
-#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-void AArch64FrameLowering::splitSPAdjustments(uint64_t Total,
- uint64_t &Initial,
- uint64_t &Residual) const {
- // 0x1f0 here is a pessimistic (i.e. realistic) boundary: x-register LDP
- // instructions have a 7-bit signed immediate scaled by 8, giving a reach of
- // 0x1f8, but stack adjustment should always be a multiple of 16.
- if (Total <= 0x1f0) {
- Initial = Total;
- Residual = 0;
- } else {
- Initial = 0x1f0;
- Residual = Total - Initial;
+#define DEBUG_TYPE "frame-info"
+
+static cl::opt<bool> EnableRedZone("aarch64-redzone",
+ cl::desc("enable use of redzone on AArch64"),
+ cl::init(false), cl::Hidden);
+
+STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
+
+static unsigned estimateStackSize(MachineFunction &MF) {
+ const MachineFrameInfo *FFI = MF.getFrameInfo();
+ int Offset = 0;
+ for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
+ int FixedOff = -FFI->getObjectOffset(i);
+ if (FixedOff > Offset)
+ Offset = FixedOff;
+ }
+ for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
+ if (FFI->isDeadObjectIndex(i))
+ continue;
+ Offset += FFI->getObjectSize(i);
+ unsigned Align = FFI->getObjectAlignment(i);
+ // Adjust to alignment boundary
+ Offset = (Offset + Align - 1) / Align * Align;
}
+ // This does not include the 16 bytes used for fp and lr.
+ return (unsigned)Offset;
}
-void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
- AArch64MachineFunctionInfo *FuncInfo =
- MF.getInfo<AArch64MachineFunctionInfo>();
- MachineBasicBlock &MBB = MF.front();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
+ if (!EnableRedZone)
+ return false;
+ // Don't use the red zone if the function explicitly asks us not to.
+ // This is typically used for kernel code.
+ if (MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::NoRedZone))
+ return false;
- MachineModuleInfo &MMI = MF.getMMI();
- const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
- bool NeedsFrameMoves = MMI.hasDebugInfo()
- || MF.getFunction()->needsUnwindTableEntry();
-
- uint64_t NumInitialBytes, NumResidualBytes;
-
- // Currently we expect the stack to be laid out by
- // sub sp, sp, #initial
- // stp x29, x30, [sp, #offset]
- // ...
- // str xxx, [sp, #offset]
- // sub sp, sp, #rest (possibly via extra instructions).
- if (MFI->getCalleeSavedInfo().size()) {
- // If there are callee-saved registers, we want to store them efficiently as
- // a block, and virtual base assignment happens too early to do it for us so
- // we adjust the stack in two phases: first just for callee-saved fiddling,
- // then to allocate the rest of the frame.
- splitSPAdjustments(MFI->getStackSize(), NumInitialBytes, NumResidualBytes);
- } else {
- // If there aren't any callee-saved registers, two-phase adjustment is
- // inefficient. It's more efficient to adjust with NumInitialBytes too
- // because when we're in a "callee pops argument space" situation, that pop
- // must be tacked onto Initial for correctness.
- NumInitialBytes = MFI->getStackSize();
- NumResidualBytes = 0;
- }
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ unsigned NumBytes = AFI->getLocalStackSize();
+
+ // Note: currently hasFP() is always true for hasCalls(), but that's an
+ // implementation detail of the current code, not a strict requirement,
+ // so stay safe here and check both.
+ if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128)
+ return false;
+ return true;
+}
- // Tell everyone else how much adjustment we're expecting them to use. In
- // particular if an adjustment is required for a tail call the epilogue could
- // have a different view of things.
- FuncInfo->setInitialStackAdjust(NumInitialBytes);
-
- emitSPUpdate(MBB, MBBI, DL, TII, AArch64::X16, -NumInitialBytes,
- MachineInstr::FrameSetup);
-
- if (NeedsFrameMoves && NumInitialBytes) {
- // We emit this update even if the CFA is set from a frame pointer later so
- // that the CFA is valid in the interim.
- MCSymbol *SPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
- .addSym(SPLabel);
-
- MachineLocation Dst(MachineLocation::VirtualFP);
- unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfa(SPLabel, Reg, -NumInitialBytes));
+/// hasFP - Return true if the specified function should have a dedicated frame
+/// pointer register.
+bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+#ifndef NDEBUG
+ const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+ assert(!RegInfo->needsStackRealignment(MF) &&
+ "No stack realignment on AArch64!");
+#endif
+
+ return (MFI->hasCalls() || MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken());
+}
+
+/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+/// not required, we reserve argument space for call sites in the function
+/// immediately on entry to the current function. This eliminates the need for
+/// add/sub sp brackets around call sites. Returns true if the call frame is
+/// included as part of the stack frame.
+bool
+AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+void AArch64FrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ const AArch64InstrInfo *TII =
+ static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
+ DebugLoc DL = I->getDebugLoc();
+ int Opc = I->getOpcode();
+ bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
+ uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
+
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ if (!TFI->hasReservedCallFrame(MF)) {
+ unsigned Align = getStackAlignment();
+
+ int64_t Amount = I->getOperand(0).getImm();
+ Amount = RoundUpToAlignment(Amount, Align);
+ if (!IsDestroy)
+ Amount = -Amount;
+
+ // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
+ // doesn't have to pop anything), then the first operand will be zero too so
+ // this adjustment is a no-op.
+ if (CalleePopAmount == 0) {
+ // FIXME: in-function stack adjustment for calls is limited to 24-bits
+ // because there's no guaranteed temporary register available.
+ //
+ // ADD/SUB (immediate) has only LSL #0 and LSL #12 avaiable.
+ // 1) For offset <= 12-bit, we use LSL #0
+ // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses
+ // LSL #0, and the other uses LSL #12.
+ //
+ // Mostly call frames will be allocated at the start of a function so
+ // this is OK, but it is a limitation that needs dealing with.
+ assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
+ emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII);
+ }
+ } else if (CalleePopAmount != 0) {
+ // If the calling convention demands that the callee pops arguments from the
+ // stack, we want to add it back if we have a reserved call frame.
+ assert(CalleePopAmount < 0xffffff && "call frame too large");
+ emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount,
+ TII);
}
+ MBB.erase(I);
+}
- // Otherwise we need to set the frame pointer and/or add a second stack
- // adjustment.
-
- bool FPNeedsSetting = hasFP(MF);
- for (; MBBI != MBB.end(); ++MBBI) {
- // Note that this search makes strong assumptions about the operation used
- // to store the frame-pointer: it must be "STP x29, x30, ...". This could
- // change in future, but until then there's no point in implementing
- // untestable more generic cases.
- if (FPNeedsSetting && MBBI->getOpcode() == AArch64::LSPair64_STR
- && MBBI->getOperand(0).getReg() == AArch64::X29) {
- int64_t X29FrameIdx = MBBI->getOperand(2).getIndex();
- FuncInfo->setFramePointerOffset(MFI->getObjectOffset(X29FrameIdx));
-
- ++MBBI;
- emitRegUpdate(MBB, MBBI, DL, TII, AArch64::X29, AArch64::XSP,
- AArch64::X29,
- NumInitialBytes + MFI->getObjectOffset(X29FrameIdx),
- MachineInstr::FrameSetup);
+void AArch64FrameLowering::emitCalleeSavedFrameMoves(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ unsigned FramePtr) const {
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ DebugLoc DL = MBB.findDebugLoc(MBBI);
- // The offset adjustment used when emitting debugging locations relative
- // to whatever frame base is set. AArch64 uses the default frame base (FP
- // or SP) and this adjusts the calculations to be correct.
- MFI->setOffsetAdjustment(- MFI->getObjectOffset(X29FrameIdx)
- - MFI->getStackSize());
-
- if (NeedsFrameMoves) {
- MCSymbol *FPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
- .addSym(FPLabel);
- unsigned Reg = MRI->getDwarfRegNum(AArch64::X29, true);
- unsigned Offset = MFI->getObjectOffset(X29FrameIdx);
- MMI.addFrameInst(MCCFIInstruction::createDefCfa(FPLabel, Reg, Offset));
- }
+ // Add callee saved registers to move list.
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ if (CSI.empty())
+ return;
- FPNeedsSetting = false;
+ const DataLayout *TD = MF.getTarget().getDataLayout();
+ bool HasFP = hasFP(MF);
+
+ // Calculate amount of bytes used for return address storing.
+ int stackGrowth = -TD->getPointerSize(0);
+
+ // Calculate offsets.
+ int64_t saveAreaOffset = (HasFP ? 2 : 1) * stackGrowth;
+ unsigned TotalSkipped = 0;
+ for (const auto &Info : CSI) {
+ unsigned Reg = Info.getReg();
+ int64_t Offset = MFI->getObjectOffset(Info.getFrameIdx()) -
+ getOffsetOfLocalArea() + saveAreaOffset;
+
+ // Don't output a new CFI directive if we're re-saving the frame pointer or
+ // link register. This happens when the PrologEpilogInserter has inserted an
+ // extra "STP" of the frame pointer and link register -- the "emitPrologue"
+ // method automatically generates the directives when frame pointers are
+ // used. If we generate CFI directives for the extra "STP"s, the linker will
+ // lose track of the correct values for the frame pointer and link register.
+ if (HasFP && (FramePtr == Reg || Reg == AArch64::LR)) {
+ TotalSkipped += stackGrowth;
+ continue;
}
- if (!MBBI->getFlag(MachineInstr::FrameSetup))
- break;
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, DwarfReg, Offset - TotalSkipped));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
+}
- assert(!FPNeedsSetting && "Frame pointer couldn't be set");
+void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const Function *Fn = MF.getFunction();
+ const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
+ bool HasFP = hasFP(MF);
+ DebugLoc DL = MBB.findDebugLoc(MBBI);
- emitSPUpdate(MBB, MBBI, DL, TII, AArch64::X16, -NumResidualBytes,
- MachineInstr::FrameSetup);
+ int NumBytes = (int)MFI->getStackSize();
+ if (!AFI->hasStackFrame()) {
+ assert(!HasFP && "unexpected function without stack frame but with FP");
+
+ // All of the stack allocation is for locals.
+ AFI->setLocalStackSize(NumBytes);
+
+ // Label used to tie together the PROLOG_LABEL and the MachineMoves.
+ MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
+
+ // REDZONE: If the stack size is less than 128 bytes, we don't need
+ // to actually allocate.
+ if (NumBytes && !canUseRedZone(MF)) {
+ emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
+ MachineInstr::FrameSetup);
+
+ // Encode the stack size of the leaf function.
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ } else if (NumBytes) {
+ ++NumRedZoneFunctions;
+ }
- // Now we emit the rest of the frame setup information, if necessary: we've
- // already noted the FP and initial SP moves so we're left with the prologue's
- // final SP update and callee-saved register locations.
- if (!NeedsFrameMoves)
return;
+ }
+
+ // Only set up FP if we actually need to.
+ int FPOffset = 0;
+ if (HasFP) {
+ // First instruction must a) allocate the stack and b) have an immediate
+ // that is a multiple of -2.
+ assert((MBBI->getOpcode() == AArch64::STPXpre ||
+ MBBI->getOpcode() == AArch64::STPDpre) &&
+ MBBI->getOperand(3).getReg() == AArch64::SP &&
+ MBBI->getOperand(4).getImm() < 0 &&
+ (MBBI->getOperand(4).getImm() & 1) == 0);
+
+ // Frame pointer is fp = sp - 16. Since the STPXpre subtracts the space
+ // required for the callee saved register area we get the frame pointer
+ // by addding that offset - 16 = -getImm()*8 - 2*8 = -(getImm() + 2) * 8.
+ FPOffset = -(MBBI->getOperand(4).getImm() + 2) * 8;
+ assert(FPOffset >= 0 && "Bad Framepointer Offset");
+ }
- // Reuse the label if appropriate, so create it in this outer scope.
- MCSymbol *CSLabel = 0;
+ // Move past the saves of the callee-saved registers.
+ while (MBBI->getOpcode() == AArch64::STPXi ||
+ MBBI->getOpcode() == AArch64::STPDi ||
+ MBBI->getOpcode() == AArch64::STPXpre ||
+ MBBI->getOpcode() == AArch64::STPDpre) {
+ ++MBBI;
+ NumBytes -= 16;
+ }
+ assert(NumBytes >= 0 && "Negative stack allocation size!?");
+ if (HasFP) {
+ // Issue sub fp, sp, FPOffset or
+ // mov fp,sp when FPOffset is zero.
+ // Note: All stores of callee-saved registers are marked as "FrameSetup".
+ // This code marks the instruction(s) that set the FP also.
+ emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII,
+ MachineInstr::FrameSetup);
+ }
- // The rest of the stack adjustment
- if (!hasFP(MF) && NumResidualBytes) {
- CSLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
- .addSym(CSLabel);
+ // All of the remaining stack allocations are for locals.
+ AFI->setLocalStackSize(NumBytes);
- MachineLocation Dst(MachineLocation::VirtualFP);
- unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true);
- unsigned Offset = NumResidualBytes + NumInitialBytes;
- MMI.addFrameInst(MCCFIInstruction::createDefCfa(CSLabel, Reg, -Offset));
+ // Allocate space for the rest of the frame.
+ if (NumBytes) {
+ // If we're a leaf function, try using the red zone.
+ if (!canUseRedZone(MF))
+ emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
+ MachineInstr::FrameSetup);
}
- // And any callee-saved registers (it's fine to leave them to the end here,
- // because the old values are still valid at this point.
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- if (CSI.size()) {
- if (!CSLabel) {
- CSLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
- .addSym(CSLabel);
+ // If we need a base pointer, set it up here. It's whatever the value of the
+ // stack pointer is at this point. Any variable size objects will be allocated
+ // after this, so we can still use the base pointer to reference locals.
+ //
+ // FIXME: Clarify FrameSetup flags here.
+ // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
+ // needed.
+ //
+ if (RegInfo->hasBasePointer(MF))
+ TII->copyPhysReg(MBB, MBBI, DL, AArch64::X19, AArch64::SP, false);
+
+ if (needsFrameMoves) {
+ const DataLayout *TD = MF.getTarget().getDataLayout();
+ const int StackGrowth = -TD->getPointerSize(0);
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+
+ // An example of the prologue:
+ //
+ // .globl __foo
+ // .align 2
+ // __foo:
+ // Ltmp0:
+ // .cfi_startproc
+ // .cfi_personality 155, ___gxx_personality_v0
+ // Leh_func_begin:
+ // .cfi_lsda 16, Lexception33
+ //
+ // stp xa,bx, [sp, -#offset]!
+ // ...
+ // stp x28, x27, [sp, #offset-32]
+ // stp fp, lr, [sp, #offset-16]
+ // add fp, sp, #offset - 16
+ // sub sp, sp, #1360
+ //
+ // The Stack:
+ // +-------------------------------------------+
+ // 10000 | ........ | ........ | ........ | ........ |
+ // 10004 | ........ | ........ | ........ | ........ |
+ // +-------------------------------------------+
+ // 10008 | ........ | ........ | ........ | ........ |
+ // 1000c | ........ | ........ | ........ | ........ |
+ // +===========================================+
+ // 10010 | X28 Register |
+ // 10014 | X28 Register |
+ // +-------------------------------------------+
+ // 10018 | X27 Register |
+ // 1001c | X27 Register |
+ // +===========================================+
+ // 10020 | Frame Pointer |
+ // 10024 | Frame Pointer |
+ // +-------------------------------------------+
+ // 10028 | Link Register |
+ // 1002c | Link Register |
+ // +===========================================+
+ // 10030 | ........ | ........ | ........ | ........ |
+ // 10034 | ........ | ........ | ........ | ........ |
+ // +-------------------------------------------+
+ // 10038 | ........ | ........ | ........ | ........ |
+ // 1003c | ........ | ........ | ........ | ........ |
+ // +-------------------------------------------+
+ //
+ // [sp] = 10030 :: >>initial value<<
+ // sp = 10020 :: stp fp, lr, [sp, #-16]!
+ // fp = sp == 10020 :: mov fp, sp
+ // [sp] == 10020 :: stp x28, x27, [sp, #-16]!
+ // sp == 10010 :: >>final value<<
+ //
+ // The frame pointer (w29) points to address 10020. If we use an offset of
+ // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
+ // for w27, and -32 for w28:
+ //
+ // Ltmp1:
+ // .cfi_def_cfa w29, 16
+ // Ltmp2:
+ // .cfi_offset w30, -8
+ // Ltmp3:
+ // .cfi_offset w29, -16
+ // Ltmp4:
+ // .cfi_offset w27, -24
+ // Ltmp5:
+ // .cfi_offset w28, -32
+
+ if (HasFP) {
+ // Define the current CFA rule to use the provided FP.
+ unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Record the location of the stored LR
+ unsigned LR = RegInfo->getDwarfRegNum(AArch64::LR, true);
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, LR, StackGrowth));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Record the location of the stored FP
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg, 2 * StackGrowth));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ } else {
+ // Encode the stack size of the leaf function.
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, -MFI->getStackSize()));
+ BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
- for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
- E = CSI.end(); I != E; ++I) {
- unsigned Offset = MFI->getObjectOffset(I->getFrameIdx());
- unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, Reg, Offset));
- }
+ // Now emit the moves for whatever callee saved regs we have.
+ emitCalleeSavedFrameMoves(MBB, MBBI, FramePtr);
}
}
-void
-AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- AArch64MachineFunctionInfo *FuncInfo =
- MF.getInfo<AArch64MachineFunctionInfo>();
+static bool isCalleeSavedRegister(unsigned Reg, const MCPhysReg *CSRegs) {
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ if (Reg == CSRegs[i])
+ return true;
+ return false;
+}
+
+static bool isCSRestore(MachineInstr *MI, const MCPhysReg *CSRegs) {
+ unsigned RtIdx = 0;
+ if (MI->getOpcode() == AArch64::LDPXpost ||
+ MI->getOpcode() == AArch64::LDPDpost)
+ RtIdx = 1;
+
+ if (MI->getOpcode() == AArch64::LDPXpost ||
+ MI->getOpcode() == AArch64::LDPDpost ||
+ MI->getOpcode() == AArch64::LDPXi || MI->getOpcode() == AArch64::LDPDi) {
+ if (!isCalleeSavedRegister(MI->getOperand(RtIdx).getReg(), CSRegs) ||
+ !isCalleeSavedRegister(MI->getOperand(RtIdx + 1).getReg(), CSRegs) ||
+ MI->getOperand(RtIdx + 2).getReg() != AArch64::SP)
+ return false;
+ return true;
+ }
+
+ return false;
+}
+void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const AArch64InstrInfo *TII =
+ static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
+ const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
DebugLoc DL = MBBI->getDebugLoc();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned RetOpcode = MBBI->getOpcode();
+ int NumBytes = MFI->getStackSize();
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+
// Initial and residual are named for consitency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
- uint64_t NumInitialBytes = FuncInfo->getInitialStackAdjust();
- uint64_t NumResidualBytes = MFI.getStackSize() - NumInitialBytes;
uint64_t ArgumentPopSize = 0;
- if (RetOpcode == AArch64::TC_RETURNdi ||
- RetOpcode == AArch64::TC_RETURNxi) {
- MachineOperand &JumpTarget = MBBI->getOperand(0);
+ if (RetOpcode == AArch64::TCRETURNdi || RetOpcode == AArch64::TCRETURNri) {
MachineOperand &StackAdjust = MBBI->getOperand(1);
- MachineInstrBuilder MIB;
- if (RetOpcode == AArch64::TC_RETURNdi) {
- MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::TAIL_Bimm));
- if (JumpTarget.isGlobal()) {
- MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
- JumpTarget.getTargetFlags());
- } else {
- assert(JumpTarget.isSymbol() && "unexpected tail call destination");
- MIB.addExternalSymbol(JumpTarget.getSymbolName(),
- JumpTarget.getTargetFlags());
- }
- } else {
- assert(RetOpcode == AArch64::TC_RETURNxi && JumpTarget.isReg()
- && "Unexpected tail call");
-
- MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::TAIL_BRx));
- MIB.addReg(JumpTarget.getReg(), RegState::Kill);
- }
-
- // Add the extra operands onto the new tail call instruction even though
- // they're not used directly (so that liveness is tracked properly etc).
- for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
- MIB->addOperand(MBBI->getOperand(i));
-
-
- // Delete the pseudo instruction TC_RETURN.
- MachineInstr *NewMI = prior(MBBI);
- MBB.erase(MBBI);
- MBBI = NewMI;
-
// For a tail-call in a callee-pops-arguments environment, some or all of
// the stack may actually be in use for the call's arguments, this is
// calculated during LowerCall and consumed here...
@@ -250,386 +459,434 @@ AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// conveniently stored in the MachineFunctionInfo by
// LowerFormalArguments. This will, of course, be zero for the C calling
// convention.
- ArgumentPopSize = FuncInfo->getArgumentStackToRestore();
+ ArgumentPopSize = AFI->getArgumentStackToRestore();
}
- assert(NumInitialBytes % 16 == 0 && NumResidualBytes % 16 == 0
- && "refusing to adjust stack by misaligned amt");
-
- // We may need to address callee-saved registers differently, so find out the
- // bound on the frame indices.
- const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
- int MinCSFI = 0;
- int MaxCSFI = -1;
-
- if (CSI.size()) {
- MinCSFI = CSI[0].getFrameIdx();
- MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+ // The stack frame should be like below,
+ //
+ // ---------------------- ---
+ // | | |
+ // | BytesInStackArgArea| CalleeArgStackSize
+ // | (NumReusableBytes) | (of tail call)
+ // | | ---
+ // | | |
+ // ---------------------| --- |
+ // | | | |
+ // | CalleeSavedReg | | |
+ // | (NumRestores * 16) | | |
+ // | | | |
+ // ---------------------| | NumBytes
+ // | | StackSize (StackAdjustUp)
+ // | LocalStackSize | | |
+ // | (covering callee | | |
+ // | args) | | |
+ // | | | |
+ // ---------------------- --- ---
+ //
+ // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
+ // = StackSize + ArgumentPopSize
+ //
+ // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
+ // it as the 2nd argument of AArch64ISD::TC_RETURN.
+ NumBytes += ArgumentPopSize;
+
+ unsigned NumRestores = 0;
+ // Move past the restores of the callee-saved registers.
+ MachineBasicBlock::iterator LastPopI = MBBI;
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
+ if (LastPopI != MBB.begin()) {
+ do {
+ ++NumRestores;
+ --LastPopI;
+ } while (LastPopI != MBB.begin() && isCSRestore(LastPopI, CSRegs));
+ if (!isCSRestore(LastPopI, CSRegs)) {
+ ++LastPopI;
+ --NumRestores;
+ }
}
-
- // The "residual" stack update comes first from this direction and guarantees
- // that SP is NumInitialBytes below its value on function entry, either by a
- // direct update or restoring it from the frame pointer.
- if (NumInitialBytes + ArgumentPopSize != 0) {
- emitSPUpdate(MBB, MBBI, DL, TII, AArch64::X16,
- NumInitialBytes + ArgumentPopSize);
- --MBBI;
+ NumBytes -= NumRestores * 16;
+ assert(NumBytes >= 0 && "Negative stack allocation size!?");
+
+ if (!hasFP(MF)) {
+ // If this was a redzone leaf function, we don't need to restore the
+ // stack pointer.
+ if (!canUseRedZone(MF))
+ emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes,
+ TII);
+ return;
}
+ // Restore the original stack pointer.
+ // FIXME: Rather than doing the math here, we should instead just use
+ // non-post-indexed loads for the restores if we aren't actually going to
+ // be able to save any instructions.
+ if (NumBytes || MFI->hasVarSizedObjects())
+ emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
+ -(NumRestores - 1) * 16, TII, MachineInstr::NoFlags);
+}
- // MBBI now points to the instruction just past the last callee-saved
- // restoration (either RET/B if NumInitialBytes == 0, or the "ADD sp, sp"
- // otherwise).
+/// getFrameIndexOffset - Returns the displacement from the frame register to
+/// the stack frame of the specified index.
+int AArch64FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ unsigned FrameReg;
+ return getFrameIndexReference(MF, FI, FrameReg);
+}
- // Now we need to find out where to put the bulk of the stack adjustment
- MachineBasicBlock::iterator FirstEpilogue = MBBI;
- while (MBBI != MBB.begin()) {
- --MBBI;
+/// getFrameIndexReference - Provide a base+offset reference to an FI slot for
+/// debug info. It's the same as what we use for resolving the code-gen
+/// references for now. FIXME: This can go wrong when references are
+/// SP-relative and simple call frames aren't used.
+int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
+ int FI,
+ unsigned &FrameReg) const {
+ return resolveFrameIndexReference(MF, FI, FrameReg);
+}
- unsigned FrameOp;
- for (FrameOp = 0; FrameOp < MBBI->getNumOperands(); ++FrameOp) {
- if (MBBI->getOperand(FrameOp).isFI())
- break;
+int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
+ int FI, unsigned &FrameReg,
+ bool PreferFP) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ int FPOffset = MFI->getObjectOffset(FI) + 16;
+ int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
+ bool isFixed = MFI->isFixedObjectIndex(FI);
+
+ // Use frame pointer to reference fixed objects. Use it for locals if
+ // there are VLAs (and thus the SP isn't reliable as a base).
+ // Make sure useFPForScavengingIndex() does the right thing for the emergency
+ // spill slot.
+ bool UseFP = false;
+ if (AFI->hasStackFrame()) {
+ // Note: Keeping the following as multiple 'if' statements rather than
+ // merging to a single expression for readability.
+ //
+ // Argument access should always use the FP.
+ if (isFixed) {
+ UseFP = hasFP(MF);
+ } else if (hasFP(MF) && !RegInfo->hasBasePointer(MF)) {
+ // Use SP or FP, whichever gives us the best chance of the offset
+ // being in range for direct access. If the FPOffset is positive,
+ // that'll always be best, as the SP will be even further away.
+ // If the FPOffset is negative, we have to keep in mind that the
+ // available offset range for negative offsets is smaller than for
+ // positive ones. If we have variable sized objects, we're stuck with
+ // using the FP regardless, though, as the SP offset is unknown
+ // and we don't have a base pointer available. If an offset is
+ // available via the FP and the SP, use whichever is closest.
+ if (PreferFP || MFI->hasVarSizedObjects() || FPOffset >= 0 ||
+ (FPOffset >= -256 && Offset > -FPOffset))
+ UseFP = true;
}
-
- // If this instruction doesn't have a frame index we've reached the end of
- // the callee-save restoration.
- if (FrameOp == MBBI->getNumOperands())
- break;
-
- // Likewise if it *is* a local reference, but not to a callee-saved object.
- int FrameIdx = MBBI->getOperand(FrameOp).getIndex();
- if (FrameIdx < MinCSFI || FrameIdx > MaxCSFI)
- break;
-
- FirstEpilogue = MBBI;
}
- if (MF.getFrameInfo()->hasVarSizedObjects()) {
- int64_t StaticFrameBase;
- StaticFrameBase = -(NumInitialBytes + FuncInfo->getFramePointerOffset());
- emitRegUpdate(MBB, FirstEpilogue, DL, TII,
- AArch64::XSP, AArch64::X29, AArch64::NoRegister,
- StaticFrameBase);
- } else {
- emitSPUpdate(MBB, FirstEpilogue, DL,TII, AArch64::X16, NumResidualBytes);
+ if (UseFP) {
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return FPOffset;
}
-}
-
-int64_t
-AArch64FrameLowering::resolveFrameIndexReference(MachineFunction &MF,
- int FrameIndex,
- unsigned &FrameReg,
- int SPAdj,
- bool IsCalleeSaveOp) const {
- AArch64MachineFunctionInfo *FuncInfo =
- MF.getInfo<AArch64MachineFunctionInfo>();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int64_t TopOfFrameOffset = MFI->getObjectOffset(FrameIndex);
-
- assert(!(IsCalleeSaveOp && FuncInfo->getInitialStackAdjust() == 0)
- && "callee-saved register in unexpected place");
-
- // If the frame for this function is particularly large, we adjust the stack
- // in two phases which means the callee-save related operations see a
- // different (intermediate) stack size.
- int64_t FrameRegPos;
- if (IsCalleeSaveOp) {
- FrameReg = AArch64::XSP;
- FrameRegPos = -static_cast<int64_t>(FuncInfo->getInitialStackAdjust());
- } else if (useFPForAddressing(MF)) {
- // Have to use the frame pointer since we have no idea where SP is.
- FrameReg = AArch64::X29;
- FrameRegPos = FuncInfo->getFramePointerOffset();
- } else {
- FrameReg = AArch64::XSP;
- FrameRegPos = -static_cast<int64_t>(MFI->getStackSize()) + SPAdj;
+ // Use the base pointer if we have one.
+ if (RegInfo->hasBasePointer(MF))
+ FrameReg = RegInfo->getBaseRegister();
+ else {
+ FrameReg = AArch64::SP;
+ // If we're using the red zone for this function, the SP won't actually
+ // be adjusted, so the offsets will be negative. They're also all
+ // within range of the signed 9-bit immediate instructions.
+ if (canUseRedZone(MF))
+ Offset -= AFI->getLocalStackSize();
}
- return TopOfFrameOffset - FrameRegPos;
+ return Offset;
}
-void
-AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- const AArch64RegisterInfo *RegInfo =
- static_cast<const AArch64RegisterInfo *>(MF.getTarget().getRegisterInfo());
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const AArch64InstrInfo &TII =
- *static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
-
- if (hasFP(MF)) {
- MF.getRegInfo().setPhysRegUsed(AArch64::X29);
- MF.getRegInfo().setPhysRegUsed(AArch64::X30);
- }
-
- // If addressing of local variables is going to be more complicated than
- // shoving a base register and an offset into the instruction then we may well
- // need to scavenge registers. We should either specifically add an
- // callee-save register for this purpose or allocate an extra spill slot.
- bool BigStack =
- MFI->estimateStackSize(MF) >= TII.estimateRSStackLimit(MF)
- || MFI->hasVarSizedObjects() // Access will be from X29: messes things up
- || (MFI->adjustsStack() && !hasReservedCallFrame(MF));
-
- if (!BigStack)
- return;
-
- // We certainly need some slack space for the scavenger, preferably an extra
- // register.
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
- uint16_t ExtraReg = AArch64::NoRegister;
-
- for (unsigned i = 0; CSRegs[i]; ++i) {
- if (AArch64::GPR64RegClass.contains(CSRegs[i]) &&
- !MF.getRegInfo().isPhysRegUsed(CSRegs[i])) {
- ExtraReg = CSRegs[i];
- break;
- }
- }
+static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
+ if (Reg != AArch64::LR)
+ return getKillRegState(true);
- if (ExtraReg != 0) {
- MF.getRegInfo().setPhysRegUsed(ExtraReg);
- } else {
- assert(RS && "Expect register scavenger to be available");
-
- // Create a stack slot for scavenging purposes. PrologEpilogInserter
- // helpfully places it near either SP or FP for us to avoid
- // infinitely-regression during scavenging.
- const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
- RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
+ // LR maybe referred to later by an @llvm.returnaddress intrinsic.
+ bool LRLiveIn = MF.getRegInfo().isLiveIn(AArch64::LR);
+ bool LRKill = !(LRLiveIn && MF.getFrameInfo()->isReturnAddressTaken());
+ return getKillRegState(LRKill);
}
-bool AArch64FrameLowering::determinePrologueDeath(MachineBasicBlock &MBB,
- unsigned Reg) const {
- // If @llvm.returnaddress is called then it will refer to X30 by some means;
- // the prologue store does not kill the register.
- if (Reg == AArch64::X30) {
- if (MBB.getParent()->getFrameInfo()->isReturnAddressTaken()
- && MBB.getParent()->getRegInfo().isLiveIn(Reg))
- return false;
+bool AArch64FrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ unsigned Count = CSI.size();
+ DebugLoc DL;
+ assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
+
+ if (MI != MBB.end())
+ DL = MI->getDebugLoc();
+
+ for (unsigned i = 0; i < Count; i += 2) {
+ unsigned idx = Count - i - 2;
+ unsigned Reg1 = CSI[idx].getReg();
+ unsigned Reg2 = CSI[idx + 1].getReg();
+ // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
+ // list to come in sorted by frame index so that we can issue the store
+ // pair instructions directly. Assert if we see anything otherwise.
+ //
+ // The order of the registers in the list is controlled by
+ // getCalleeSavedRegs(), so they will always be in-order, as well.
+ assert(CSI[idx].getFrameIdx() + 1 == CSI[idx + 1].getFrameIdx() &&
+ "Out of order callee saved regs!");
+ unsigned StrOpc;
+ assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
+ assert((i & 1) == 0 && "Odd index for callee-saved reg spill!");
+ // Issue sequence of non-sp increment and pi sp spills for cs regs. The
+ // first spill is a pre-increment that allocates the stack.
+ // For example:
+ // stp x22, x21, [sp, #-48]! // addImm(-6)
+ // stp x20, x19, [sp, #16] // addImm(+2)
+ // stp fp, lr, [sp, #32] // addImm(+4)
+ // Rationale: This sequence saves uop updates compared to a sequence of
+ // pre-increment spills like stp xi,xj,[sp,#-16]!
+ // Note: Similar rational and sequence for restores in epilog.
+ if (AArch64::GPR64RegClass.contains(Reg1)) {
+ assert(AArch64::GPR64RegClass.contains(Reg2) &&
+ "Expected GPR64 callee-saved register pair!");
+ // For first spill use pre-increment store.
+ if (i == 0)
+ StrOpc = AArch64::STPXpre;
+ else
+ StrOpc = AArch64::STPXi;
+ } else if (AArch64::FPR64RegClass.contains(Reg1)) {
+ assert(AArch64::FPR64RegClass.contains(Reg2) &&
+ "Expected FPR64 callee-saved register pair!");
+ // For first spill use pre-increment store.
+ if (i == 0)
+ StrOpc = AArch64::STPDpre;
+ else
+ StrOpc = AArch64::STPDi;
+ } else
+ llvm_unreachable("Unexpected callee saved register!");
+ DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1) << ", "
+ << TRI->getName(Reg2) << ") -> fi#(" << CSI[idx].getFrameIdx()
+ << ", " << CSI[idx + 1].getFrameIdx() << ")\n");
+ // Compute offset: i = 0 => offset = -Count;
+ // i = 2 => offset = -(Count - 2) + Count = 2 = i; etc.
+ const int Offset = (i == 0) ? -Count : i;
+ assert((Offset >= -64 && Offset <= 63) &&
+ "Offset out of bounds for STP immediate");
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
+ if (StrOpc == AArch64::STPDpre || StrOpc == AArch64::STPXpre)
+ MIB.addReg(AArch64::SP, RegState::Define);
+
+ MIB.addReg(Reg2, getPrologueDeath(MF, Reg2))
+ .addReg(Reg1, getPrologueDeath(MF, Reg1))
+ .addReg(AArch64::SP)
+ .addImm(Offset) // [sp, #offset * 8], where factor * 8 is implicit
+ .setMIFlag(MachineInstr::FrameSetup);
}
-
- // In all other cases, physical registers are dead after they've been saved
- // but live at the beginning of the prologue block.
- MBB.addLiveIn(Reg);
return true;
}
-void
-AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI,
- const LoadStoreMethod PossClasses[],
- unsigned NumClasses) const {
- DebugLoc DL = MBB.findDebugLoc(MBBI);
+bool AArch64FrameLowering::restoreCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ unsigned Count = CSI.size();
+ DebugLoc DL;
+ assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
+
+ if (MI != MBB.end())
+ DL = MI->getDebugLoc();
+
+ for (unsigned i = 0; i < Count; i += 2) {
+ unsigned Reg1 = CSI[i].getReg();
+ unsigned Reg2 = CSI[i + 1].getReg();
+ // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
+ // list to come in sorted by frame index so that we can issue the store
+ // pair instructions directly. Assert if we see anything otherwise.
+ assert(CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx() &&
+ "Out of order callee saved regs!");
+ // Issue sequence of non-sp increment and sp-pi restores for cs regs. Only
+ // the last load is sp-pi post-increment and de-allocates the stack:
+ // For example:
+ // ldp fp, lr, [sp, #32] // addImm(+4)
+ // ldp x20, x19, [sp, #16] // addImm(+2)
+ // ldp x22, x21, [sp], #48 // addImm(+6)
+ // Note: see comment in spillCalleeSavedRegisters()
+ unsigned LdrOpc;
+
+ assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
+ assert((i & 1) == 0 && "Odd index for callee-saved reg spill!");
+ if (AArch64::GPR64RegClass.contains(Reg1)) {
+ assert(AArch64::GPR64RegClass.contains(Reg2) &&
+ "Expected GPR64 callee-saved register pair!");
+ if (i == Count - 2)
+ LdrOpc = AArch64::LDPXpost;
+ else
+ LdrOpc = AArch64::LDPXi;
+ } else if (AArch64::FPR64RegClass.contains(Reg1)) {
+ assert(AArch64::FPR64RegClass.contains(Reg2) &&
+ "Expected FPR64 callee-saved register pair!");
+ if (i == Count - 2)
+ LdrOpc = AArch64::LDPDpost;
+ else
+ LdrOpc = AArch64::LDPDi;
+ } else
+ llvm_unreachable("Unexpected callee saved register!");
+ DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1) << ", "
+ << TRI->getName(Reg2) << ") -> fi#(" << CSI[i].getFrameIdx()
+ << ", " << CSI[i + 1].getFrameIdx() << ")\n");
+
+ // Compute offset: i = 0 => offset = Count - 2; i = 2 => offset = Count - 4;
+ // etc.
+ const int Offset = (i == Count - 2) ? Count : Count - i - 2;
+ assert((Offset >= -64 && Offset <= 63) &&
+ "Offset out of bounds for LDP immediate");
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
+ if (LdrOpc == AArch64::LDPXpost || LdrOpc == AArch64::LDPDpost)
+ MIB.addReg(AArch64::SP, RegState::Define);
+
+ MIB.addReg(Reg2, getDefRegState(true))
+ .addReg(Reg1, getDefRegState(true))
+ .addReg(AArch64::SP)
+ .addImm(Offset); // [sp], #offset * 8 or [sp, #offset * 8]
+ // where the factor * 8 is implicit
+ }
+ return true;
+}
- // A certain amount of implicit contract is present here. The actual stack
- // offsets haven't been allocated officially yet, so for strictly correct code
- // we rely on the fact that the elements of CSI are allocated in order
- // starting at SP, purely as dictated by size and alignment. In practice since
- // this function handles the only accesses to those slots it's not quite so
- // important.
- //
- // We have also ordered the Callee-saved register list in AArch64CallingConv
- // so that the above scheme puts registers in order: in particular we want
- // &X30 to be &X29+8 for an ABI-correct frame record (PCS 5.2.2)
- for (unsigned i = 0, e = CSI.size(); i < e; ++i) {
- unsigned Reg = CSI[i].getReg();
-
- // First we need to find out which register class the register belongs to so
- // that we can use the correct load/store instrucitons.
- unsigned ClassIdx;
- for (ClassIdx = 0; ClassIdx < NumClasses; ++ClassIdx) {
- if (PossClasses[ClassIdx].RegClass->contains(Reg))
- break;
- }
- assert(ClassIdx != NumClasses
- && "Asked to store register in unexpected class");
- const TargetRegisterClass &TheClass = *PossClasses[ClassIdx].RegClass;
-
- // Now we need to decide whether it's possible to emit a paired instruction:
- // for this we want the next register to be in the same class.
- MachineInstrBuilder NewMI;
- bool Pair = false;
- if (i + 1 < CSI.size() && TheClass.contains(CSI[i+1].getReg())) {
- Pair = true;
- unsigned StLow = 0, StHigh = 0;
- if (isPrologue) {
- // Most of these registers will be live-in to the MBB and killed by our
- // store, though there are exceptions (see determinePrologueDeath).
- StLow = getKillRegState(determinePrologueDeath(MBB, CSI[i+1].getReg()));
- StHigh = getKillRegState(determinePrologueDeath(MBB, CSI[i].getReg()));
- } else {
- StLow = RegState::Define;
- StHigh = RegState::Define;
- }
+void AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(
+ MachineFunction &MF, RegScavenger *RS) const {
+ const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
+ SmallVector<unsigned, 4> UnspilledCSGPRs;
+ SmallVector<unsigned, 4> UnspilledCSFPRs;
- NewMI = BuildMI(MBB, MBBI, DL, TII.get(PossClasses[ClassIdx].PairOpcode))
- .addReg(CSI[i+1].getReg(), StLow)
- .addReg(CSI[i].getReg(), StHigh);
+ // The frame record needs to be created by saving the appropriate registers
+ if (hasFP(MF)) {
+ MRI->setPhysRegUsed(AArch64::FP);
+ MRI->setPhysRegUsed(AArch64::LR);
+ }
- // If it's a paired op, we've consumed two registers
- ++i;
- } else {
- unsigned State;
- if (isPrologue) {
- State = getKillRegState(determinePrologueDeath(MBB, CSI[i].getReg()));
+ // Spill the BasePtr if it's used. Do this first thing so that the
+ // getCalleeSavedRegs() below will get the right answer.
+ if (RegInfo->hasBasePointer(MF))
+ MRI->setPhysRegUsed(RegInfo->getBaseRegister());
+
+ // If any callee-saved registers are used, the frame cannot be eliminated.
+ unsigned NumGPRSpilled = 0;
+ unsigned NumFPRSpilled = 0;
+ bool ExtraCSSpill = false;
+ bool CanEliminateFrame = true;
+ DEBUG(dbgs() << "*** processFunctionBeforeCalleeSavedScan\nUsed CSRs:");
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
+
+ // Check pairs of consecutive callee-saved registers.
+ for (unsigned i = 0; CSRegs[i]; i += 2) {
+ assert(CSRegs[i + 1] && "Odd number of callee-saved registers!");
+
+ const unsigned OddReg = CSRegs[i];
+ const unsigned EvenReg = CSRegs[i + 1];
+ assert((AArch64::GPR64RegClass.contains(OddReg) &&
+ AArch64::GPR64RegClass.contains(EvenReg)) ^
+ (AArch64::FPR64RegClass.contains(OddReg) &&
+ AArch64::FPR64RegClass.contains(EvenReg)) &&
+ "Register class mismatch!");
+
+ const bool OddRegUsed = MRI->isPhysRegUsed(OddReg);
+ const bool EvenRegUsed = MRI->isPhysRegUsed(EvenReg);
+
+ // Early exit if none of the registers in the register pair is actually
+ // used.
+ if (!OddRegUsed && !EvenRegUsed) {
+ if (AArch64::GPR64RegClass.contains(OddReg)) {
+ UnspilledCSGPRs.push_back(OddReg);
+ UnspilledCSGPRs.push_back(EvenReg);
} else {
- State = RegState::Define;
+ UnspilledCSFPRs.push_back(OddReg);
+ UnspilledCSFPRs.push_back(EvenReg);
}
+ continue;
+ }
- NewMI = BuildMI(MBB, MBBI, DL,
- TII.get(PossClasses[ClassIdx].SingleOpcode))
- .addReg(CSI[i].getReg(), State);
+ unsigned Reg = AArch64::NoRegister;
+ // If only one of the registers of the register pair is used, make sure to
+ // mark the other one as used as well.
+ if (OddRegUsed ^ EvenRegUsed) {
+ // Find out which register is the additional spill.
+ Reg = OddRegUsed ? EvenReg : OddReg;
+ MRI->setPhysRegUsed(Reg);
}
- // Note that the FrameIdx refers to the second register in a pair: it will
- // be allocated the smaller numeric address and so is the one an LDP/STP
- // address must use.
- int FrameIdx = CSI[i].getFrameIdx();
- MachineMemOperand::MemOperandFlags Flags;
- Flags = isPrologue ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
- Flags,
- Pair ? TheClass.getSize() * 2 : TheClass.getSize(),
- MFI.getObjectAlignment(FrameIdx));
-
- NewMI.addFrameIndex(FrameIdx)
- .addImm(0) // address-register offset
- .addMemOperand(MMO);
-
- if (isPrologue)
- NewMI.setMIFlags(MachineInstr::FrameSetup);
-
- // For aesthetic reasons, during an epilogue we want to emit complementary
- // operations to the prologue, but in the opposite order. So we still
- // iterate through the CalleeSavedInfo list in order, but we put the
- // instructions successively earlier in the MBB.
- if (!isPrologue)
- --MBBI;
+ DEBUG(dbgs() << ' ' << PrintReg(OddReg, RegInfo));
+ DEBUG(dbgs() << ' ' << PrintReg(EvenReg, RegInfo));
+
+ assert(((OddReg == AArch64::LR && EvenReg == AArch64::FP) ||
+ (RegInfo->getEncodingValue(OddReg) + 1 ==
+ RegInfo->getEncodingValue(EvenReg))) &&
+ "Register pair of non-adjacent registers!");
+ if (AArch64::GPR64RegClass.contains(OddReg)) {
+ NumGPRSpilled += 2;
+ // If it's not a reserved register, we can use it in lieu of an
+ // emergency spill slot for the register scavenger.
+ // FIXME: It would be better to instead keep looking and choose another
+ // unspilled register that isn't reserved, if there is one.
+ if (Reg != AArch64::NoRegister && !RegInfo->isReservedReg(MF, Reg))
+ ExtraCSSpill = true;
+ } else
+ NumFPRSpilled += 2;
+
+ CanEliminateFrame = false;
}
-}
-
-bool
-AArch64FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- static const LoadStoreMethod PossibleClasses[] = {
- {&AArch64::GPR64RegClass, AArch64::LSPair64_STR, AArch64::LS64_STR},
- {&AArch64::FPR64RegClass, AArch64::LSFPPair64_STR, AArch64::LSFP64_STR},
- };
- const unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
-
- emitFrameMemOps(/* isPrologue = */ true, MBB, MBBI, CSI, TRI,
- PossibleClasses, NumClasses);
-
- return true;
-}
-
-bool
-AArch64FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
-
- if (CSI.empty())
- return false;
-
- static const LoadStoreMethod PossibleClasses[] = {
- {&AArch64::GPR64RegClass, AArch64::LSPair64_LDR, AArch64::LS64_LDR},
- {&AArch64::FPR64RegClass, AArch64::LSFPPair64_LDR, AArch64::LSFP64_LDR},
- };
- const unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
-
- emitFrameMemOps(/* isPrologue = */ false, MBB, MBBI, CSI, TRI,
- PossibleClasses, NumClasses);
-
- return true;
-}
-
-bool
-AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterInfo *RI = MF.getTarget().getRegisterInfo();
-
- // This is a decision of ABI compliance. The AArch64 PCS gives various options
- // for conformance, and even at the most stringent level more or less permits
- // elimination for leaf functions because there's no loss of functionality
- // (for debugging etc)..
- if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->hasCalls())
- return true;
-
- // The following are hard-limits: incorrect code will be generated if we try
- // to omit the frame.
- return (RI->needsStackRealignment(MF) ||
- MFI->hasVarSizedObjects() ||
- MFI->isFrameAddressTaken());
-}
-
-bool
-AArch64FrameLowering::useFPForAddressing(const MachineFunction &MF) const {
- return MF.getFrameInfo()->hasVarSizedObjects();
-}
-
-bool
-AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Of the various reasons for having a frame pointer, it's actually only
- // variable-sized objects that prevent reservation of a call frame.
- return !(hasFP(MF) && MFI->hasVarSizedObjects());
-}
-
-void
-AArch64FrameLowering::eliminateCallFramePseudoInstr(
- MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const {
- const AArch64InstrInfo &TII =
- *static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
- DebugLoc dl = MI->getDebugLoc();
- int Opcode = MI->getOpcode();
- bool IsDestroy = Opcode == TII.getCallFrameDestroyOpcode();
- uint64_t CalleePopAmount = IsDestroy ? MI->getOperand(1).getImm() : 0;
-
- if (!hasReservedCallFrame(MF)) {
- unsigned Align = getStackAlignment();
- int64_t Amount = MI->getOperand(0).getImm();
- Amount = RoundUpToAlignment(Amount, Align);
- if (!IsDestroy) Amount = -Amount;
+ // FIXME: Set BigStack if any stack slot references may be out of range.
+ // For now, just conservatively guestimate based on unscaled indexing
+ // range. We'll end up allocating an unnecessary spill slot a lot, but
+ // realistically that's not a big deal at this stage of the game.
+ // The CSR spill slots have not been allocated yet, so estimateStackSize
+ // won't include them.
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned CFSize = estimateStackSize(MF) + 8 * (NumGPRSpilled + NumFPRSpilled);
+ DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
+ bool BigStack = (CFSize >= 256);
+ if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
+ AFI->setHasStackFrame(true);
+
+ // Estimate if we might need to scavenge a register at some point in order
+ // to materialize a stack offset. If so, either spill one additional
+ // callee-saved register or reserve a special spill slot to facilitate
+ // register scavenging. If we already spilled an extra callee-saved register
+ // above to keep the number of spills even, we don't need to do anything else
+ // here.
+ if (BigStack && !ExtraCSSpill) {
+
+ // If we're adding a register to spill here, we have to add two of them
+ // to keep the number of regs to spill even.
+ assert(((UnspilledCSGPRs.size() & 1) == 0) && "Odd number of registers!");
+ unsigned Count = 0;
+ while (!UnspilledCSGPRs.empty() && Count < 2) {
+ unsigned Reg = UnspilledCSGPRs.back();
+ UnspilledCSGPRs.pop_back();
+ DEBUG(dbgs() << "Spilling " << PrintReg(Reg, RegInfo)
+ << " to get a scratch register.\n");
+ MRI->setPhysRegUsed(Reg);
+ ExtraCSSpill = true;
+ ++Count;
+ }
- // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
- // doesn't have to pop anything), then the first operand will be zero too so
- // this adjustment is a no-op.
- if (CalleePopAmount == 0) {
- // FIXME: in-function stack adjustment for calls is limited to 12-bits
- // because there's no guaranteed temporary register available. Mostly call
- // frames will be allocated at the start of a function so this is OK, but
- // it is a limitation that needs dealing with.
- assert(Amount > -0xfff && Amount < 0xfff && "call frame too large");
- emitSPUpdate(MBB, MI, dl, TII, AArch64::NoRegister, Amount);
+ // If we didn't find an extra callee-saved register to spill, create
+ // an emergency spill slot.
+ if (!ExtraCSSpill) {
+ const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
+ int FI = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false);
+ RS->addScavengingFrameIndex(FI);
+ DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
+ << " as the emergency spill slot.\n");
}
- } else if (CalleePopAmount != 0) {
- // If the calling convention demands that the callee pops arguments from the
- // stack, we want to add it back if we have a reserved call frame.
- assert(CalleePopAmount < 0xfff && "call frame too large");
- emitSPUpdate(MBB, MI, dl, TII, AArch64::NoRegister, -CalleePopAmount);
}
-
- MBB.erase(MI);
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 032dd90..7686e6f 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -1,4 +1,4 @@
-//==- AArch64FrameLowering.h - Define frame lowering for AArch64 -*- C++ -*--=//
+//==-- AArch64FrameLowering.h - TargetFrameLowering for AArch64 --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,100 +7,60 @@
//
//===----------------------------------------------------------------------===//
//
-// This class implements the AArch64-specific parts of the TargetFrameLowering
-// class.
+//
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64_FRAMEINFO_H
-#define LLVM_AARCH64_FRAMEINFO_H
+#ifndef AArch64_FRAMELOWERING_H
+#define AArch64_FRAMELOWERING_H
-#include "AArch64Subtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
-class AArch64Subtarget;
class AArch64FrameLowering : public TargetFrameLowering {
-private:
- // In order to unify the spilling and restoring of callee-saved registers into
- // emitFrameMemOps, we need to be able to specify which instructions to use
- // for the relevant memory operations on each register class. An array of the
- // following struct is populated and passed in to achieve this.
- struct LoadStoreMethod {
- const TargetRegisterClass *RegClass; // E.g. GPR64RegClass
-
- // The preferred instruction.
- unsigned PairOpcode; // E.g. LSPair64_STR
-
- // Sometimes only a single register can be handled at once.
- unsigned SingleOpcode; // E.g. LS64_STR
- };
-protected:
- const AArch64Subtarget &STI;
-
public:
- explicit AArch64FrameLowering(const AArch64Subtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 16, 0, 16),
- STI(sti) {
- }
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- virtual void emitPrologue(MachineFunction &MF) const;
- virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
- /// Decides how much stack adjustment to perform in each phase of the prologue
- /// and epilogue.
- void splitSPAdjustments(uint64_t Total, uint64_t &Initial,
- uint64_t &Residual) const;
-
- int64_t resolveFrameIndexReference(MachineFunction &MF, int FrameIndex,
- unsigned &FrameReg, int SPAdj,
- bool IsCalleeSaveOp) const;
+ explicit AArch64FrameLowering()
+ : TargetFrameLowering(StackGrowsDown, 16, 0, 16,
+ false /*StackRealignable*/) {}
- virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
-
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned FramePtr) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
-
- /// If the register is X30 (i.e. LR) and the return address is used in the
- /// function then the callee-save store doesn't actually kill the register,
- /// otherwise it does.
- bool determinePrologueDeath(MachineBasicBlock &MBB, unsigned Reg) const;
-
- /// This function emits the loads or stores required during prologue and
- /// epilogue as efficiently as possible.
- ///
- /// The operations involved in setting up and tearing down the frame are
- /// similar enough to warrant a shared function, particularly as discrepancies
- /// between the two would be disastrous.
- void emitFrameMemOps(bool isStore, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI,
- const LoadStoreMethod PossibleClasses[],
- unsigned NumClasses) const;
-
-
- virtual bool hasFP(const MachineFunction &MF) const;
-
- virtual bool useFPForAddressing(const MachineFunction &MF) const;
-
- /// On AA
- virtual bool hasReservedCallFrame(const MachineFunction &MF) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
+ int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
+ int resolveFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg,
+ bool PreferFP = false) const;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+
+ /// \brief Can this function use the red zone for local allocations.
+ bool canUseRedZone(const MachineFunction &MF) const;
+
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index ef99541..3f49fab 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -11,118 +11,119 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "aarch64-isel"
-#include "AArch64.h"
-#include "AArch64InstrInfo.h"
-#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
-#include "Utils/AArch64BaseInfo.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/IR/Function.h" // To access function attributes.
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "aarch64-isel"
+
//===--------------------------------------------------------------------===//
-/// AArch64 specific code to select AArch64 machine instructions for
-/// SelectionDAG operations.
+/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
+/// instructions for SelectionDAG operations.
///
namespace {
class AArch64DAGToDAGISel : public SelectionDAGISel {
AArch64TargetMachine &TM;
- /// Keep a pointer to the AArch64Subtarget around so that we can
+ /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
/// make the right decision when generating code for different targets.
const AArch64Subtarget *Subtarget;
+ bool ForCodeSize;
+
public:
explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel), TM(tm),
- Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
- }
+ : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
+ ForCodeSize(false) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "AArch64 Instruction Selection";
}
- // Include the pieces autogenerated from the target description.
-#include "AArch64GenDAGISel.inc"
-
- template<unsigned MemSize>
- bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
- const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
- if (!CN || CN->getZExtValue() % MemSize != 0
- || CN->getZExtValue() / MemSize > 0xfff)
- return false;
-
- UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
- return true;
- }
-
- template<unsigned RegWidth>
- bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
- return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
- }
-
- /// Used for pre-lowered address-reference nodes, so we already know
- /// the fields match. This operand's job is simply to add an
- /// appropriate shift operand to the MOVZ/MOVK instruction.
- template<unsigned LogShift>
- bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
- Imm = N;
- Shift = CurDAG->getTargetConstant(LogShift, MVT::i32);
- return true;
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ AttributeSet FnAttrs = MF.getFunction()->getAttributes();
+ ForCodeSize =
+ FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize) ||
+ FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
}
- bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
-
- bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
- unsigned RegWidth);
+ SDNode *Select(SDNode *Node) override;
+ /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
+ /// inline asm expressions.
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
- std::vector<SDValue> &OutOps);
-
- bool SelectLogicalImm(SDValue N, SDValue &Imm);
-
- template<unsigned RegWidth>
- bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
- return SelectTSTBOperand(N, FixedPos, RegWidth);
+ std::vector<SDValue> &OutOps) override;
+
+ SDNode *SelectMLAV64LaneV128(SDNode *N);
+ SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
+ bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
+ bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
+ bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
+ bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
+ return SelectShiftedRegister(N, false, Reg, Shift);
+ }
+ bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
+ return SelectShiftedRegister(N, true, Reg, Shift);
+ }
+ bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeIndexed(N, 1, Base, OffImm);
+ }
+ bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeIndexed(N, 2, Base, OffImm);
+ }
+ bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeIndexed(N, 4, Base, OffImm);
+ }
+ bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeIndexed(N, 8, Base, OffImm);
+ }
+ bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeIndexed(N, 16, Base, OffImm);
+ }
+ bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeUnscaled(N, 1, Base, OffImm);
+ }
+ bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeUnscaled(N, 2, Base, OffImm);
+ }
+ bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeUnscaled(N, 4, Base, OffImm);
+ }
+ bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeUnscaled(N, 8, Base, OffImm);
+ }
+ bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
+ return SelectAddrModeUnscaled(N, 16, Base, OffImm);
}
- bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
-
- SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32,
- unsigned Op64);
-
- /// Put the given constant into a pool and return a DAG which will give its
- /// address.
- SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV);
-
- SDNode *TrySelectToMoveImm(SDNode *N);
- SDNode *LowerToFPLitPool(SDNode *Node);
- SDNode *SelectToLitPool(SDNode *N);
-
- SDNode* Select(SDNode*);
-private:
- /// Get the opcode for table lookup instruction
- unsigned getTBLOpc(bool IsExt, bool Is64Bit, unsigned NumOfVec);
-
- /// Select NEON table lookup intrinsics. NumVecs should be 1, 2, 3 or 4.
- /// IsExt is to indicate if the result will be extended with an argument.
- SDNode *SelectVTBL(SDNode *N, unsigned NumVecs, bool IsExt);
+ template<int Width>
+ bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend, SDValue &DoShift) {
+ return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
+ }
- /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4.
- SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *Opcode);
+ template<int Width>
+ bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend, SDValue &DoShift) {
+ return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
+ }
- /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4.
- SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *Opcodes);
/// Form sequences of consecutive 64/128-bit registers for use in NEON
/// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
@@ -136,315 +137,711 @@ private:
SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
unsigned SubRegs[]);
- /// Select NEON load-duplicate intrinsics. NumVecs should be 2, 3 or 4.
- /// The opcode array specifies the instructions used for load.
- SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *Opcodes);
+ SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
+
+ SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
+
+ SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ unsigned SubRegIdx);
+ SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ unsigned SubRegIdx);
+ SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+
+ SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+
+ SDNode *SelectBitfieldExtractOp(SDNode *N);
+ SDNode *SelectBitfieldInsertOp(SDNode *N);
+
+ SDNode *SelectLIBM(SDNode *N);
+
+// Include the pieces autogenerated from the target description.
+#include "AArch64GenDAGISel.inc"
+
+private:
+ bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
+ SDValue &Shift);
+ bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
+ SDValue &OffImm);
+ bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
+ SDValue &OffImm);
+ bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
+ SDValue &Offset, SDValue &SignExtend,
+ SDValue &DoShift);
+ bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
+ SDValue &Offset, SDValue &SignExtend,
+ SDValue &DoShift);
+ bool isWorthFolding(SDValue V) const;
+ bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
+ SDValue &Offset, SDValue &SignExtend);
- /// Select NEON load/store lane intrinsics. NumVecs should be 2, 3 or 4.
- /// The opcode arrays specify the instructions used for load/store.
- SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
- unsigned NumVecs, const uint16_t *Opcodes);
+ template<unsigned RegWidth>
+ bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
+ return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
+ }
- SDValue getTargetSubregToReg(int SRIdx, SDLoc DL, EVT VT, EVT VTD,
- SDValue Operand);
+ bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
};
+} // end anonymous namespace
+
+/// isIntImmediate - This method tests to see if the node is a constant
+/// operand. If so Imm will receive the 32-bit value.
+static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
+ if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
+ Imm = C->getZExtValue();
+ return true;
+ }
+ return false;
}
-bool
-AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
- unsigned RegWidth) {
- const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
- if (!CN) return false;
+// isIntImmediate - This method tests to see if a constant operand.
+// If so Imm will receive the value.
+static bool isIntImmediate(SDValue N, uint64_t &Imm) {
+ return isIntImmediate(N.getNode(), Imm);
+}
- // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
- // is between 1 and 32 for a destination w-register, or 1 and 64 for an
- // x-register.
- //
- // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
- // want THIS_NODE to be 2^fbits. This is much easier to deal with using
- // integers.
- bool IsExact;
+// isOpcWithIntImmediate - This method tests to see if the node is a specific
+// opcode and that it has a immediate integer right operand.
+// If so Imm will receive the 32 bit value.
+static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
+ uint64_t &Imm) {
+ return N->getOpcode() == Opc &&
+ isIntImmediate(N->getOperand(1).getNode(), Imm);
+}
- // fbits is between 1 and 64 in the worst-case, which means the fmul
- // could have 2^64 as an actual operand. Need 65 bits of precision.
- APSInt IntVal(65, true);
- CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
+bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
+ const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
+ assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
+ // Require the address to be in a register. That is safe for all AArch64
+ // variants and it is hard to do anything much smarter without knowing
+ // how the operand is used.
+ OutOps.push_back(Op);
+ return false;
+}
- // N.b. isPowerOf2 also checks for > 0.
- if (!IsExact || !IntVal.isPowerOf2()) return false;
- unsigned FBits = IntVal.logBase2();
+/// SelectArithImmed - Select an immediate value that can be represented as
+/// a 12-bit value shifted left by either 0 or 12. If so, return true with
+/// Val set to the 12-bit value and Shift set to the shifter operand.
+bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
+ SDValue &Shift) {
+ // This function is called from the addsub_shifted_imm ComplexPattern,
+ // which lists [imm] as the list of opcode it's interested in, however
+ // we still need to check whether the operand is actually an immediate
+ // here because the ComplexPattern opcode list is only used in
+ // root-level opcode matching.
+ if (!isa<ConstantSDNode>(N.getNode()))
+ return false;
- // Checks above should have guaranteed that we haven't lost information in
- // finding FBits, but it must still be in range.
- if (FBits == 0 || FBits > RegWidth) return false;
+ uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ unsigned ShiftAmt;
+
+ if (Immed >> 12 == 0) {
+ ShiftAmt = 0;
+ } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
+ ShiftAmt = 12;
+ Immed = Immed >> 12;
+ } else
+ return false;
- FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
+ unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
+ Val = CurDAG->getTargetConstant(Immed, MVT::i32);
+ Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
return true;
}
-bool
-AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps) {
- switch (ConstraintCode) {
- default: llvm_unreachable("Unrecognised AArch64 memory constraint");
- case 'm':
- // FIXME: more freedom is actually permitted for 'm'. We can go
- // hunting for a base and an offset if we want. Of course, since
- // we don't really know how the operand is going to be used we're
- // probably restricted to the load/store pair's simm7 as an offset
- // range anyway.
- case 'Q':
- OutOps.push_back(Op);
+/// SelectNegArithImmed - As above, but negates the value before trying to
+/// select it.
+bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
+ SDValue &Shift) {
+ // This function is called from the addsub_shifted_imm ComplexPattern,
+ // which lists [imm] as the list of opcode it's interested in, however
+ // we still need to check whether the operand is actually an immediate
+ // here because the ComplexPattern opcode list is only used in
+ // root-level opcode matching.
+ if (!isa<ConstantSDNode>(N.getNode()))
+ return false;
+
+ // The immediate operand must be a 24-bit zero-extended immediate.
+ uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+
+ // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
+ // have the opposite effect on the C flag, so this pattern mustn't match under
+ // those circumstances.
+ if (Immed == 0)
+ return false;
+
+ if (N.getValueType() == MVT::i32)
+ Immed = ~((uint32_t)Immed) + 1;
+ else
+ Immed = ~Immed + 1ULL;
+ if (Immed & 0xFFFFFFFFFF000000ULL)
+ return false;
+
+ Immed &= 0xFFFFFFULL;
+ return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
+}
+
+/// getShiftTypeForNode - Translate a shift node to the corresponding
+/// ShiftType value.
+static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
+ switch (N.getOpcode()) {
+ default:
+ return AArch64_AM::InvalidShiftExtend;
+ case ISD::SHL:
+ return AArch64_AM::LSL;
+ case ISD::SRL:
+ return AArch64_AM::LSR;
+ case ISD::SRA:
+ return AArch64_AM::ASR;
+ case ISD::ROTR:
+ return AArch64_AM::ROR;
}
+}
+/// \brief Determine wether it is worth to fold V into an extended register.
+bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
+ // it hurts if the a value is used at least twice, unless we are optimizing
+ // for code size.
+ if (ForCodeSize || V.hasOneUse())
+ return true;
return false;
}
-bool
-AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
- ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
- if (!Imm || !Imm->getValueAPF().isPosZero())
+/// SelectShiftedRegister - Select a "shifted register" operand. If the value
+/// is not shifted, set the Shift operand to default of "LSL 0". The logical
+/// instructions allow the shifted register to be rotated, but the arithmetic
+/// instructions do not. The AllowROR parameter specifies whether ROR is
+/// supported.
+bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
+ SDValue &Reg, SDValue &Shift) {
+ AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
+ if (ShType == AArch64_AM::InvalidShiftExtend)
+ return false;
+ if (!AllowROR && ShType == AArch64_AM::ROR)
return false;
- // Doesn't actually carry any information, but keeps TableGen quiet.
- Dummy = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ unsigned Val = RHS->getZExtValue() & (BitSize - 1);
+ unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
+
+ Reg = N.getOperand(0);
+ Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
+ return isWorthFolding(N);
+ }
+
+ return false;
}
-bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
- uint32_t Bits;
- uint32_t RegWidth = N.getValueType().getSizeInBits();
+/// getExtendTypeForNode - Translate an extend node to the corresponding
+/// ExtendType value.
+static AArch64_AM::ShiftExtendType
+getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
+ if (N.getOpcode() == ISD::SIGN_EXTEND ||
+ N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ EVT SrcVT;
+ if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
+ SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
+ else
+ SrcVT = N.getOperand(0).getValueType();
+
+ if (!IsLoadStore && SrcVT == MVT::i8)
+ return AArch64_AM::SXTB;
+ else if (!IsLoadStore && SrcVT == MVT::i16)
+ return AArch64_AM::SXTH;
+ else if (SrcVT == MVT::i32)
+ return AArch64_AM::SXTW;
+ assert(SrcVT != MVT::i64 && "extend from 64-bits?");
+
+ return AArch64_AM::InvalidShiftExtend;
+ } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
+ N.getOpcode() == ISD::ANY_EXTEND) {
+ EVT SrcVT = N.getOperand(0).getValueType();
+ if (!IsLoadStore && SrcVT == MVT::i8)
+ return AArch64_AM::UXTB;
+ else if (!IsLoadStore && SrcVT == MVT::i16)
+ return AArch64_AM::UXTH;
+ else if (SrcVT == MVT::i32)
+ return AArch64_AM::UXTW;
+ assert(SrcVT != MVT::i64 && "extend from 64-bits?");
+
+ return AArch64_AM::InvalidShiftExtend;
+ } else if (N.getOpcode() == ISD::AND) {
+ ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!CSD)
+ return AArch64_AM::InvalidShiftExtend;
+ uint64_t AndMask = CSD->getZExtValue();
+
+ switch (AndMask) {
+ default:
+ return AArch64_AM::InvalidShiftExtend;
+ case 0xFF:
+ return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
+ case 0xFFFF:
+ return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
+ case 0xFFFFFFFF:
+ return AArch64_AM::UXTW;
+ }
+ }
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
- if (!CN) return false;
+ return AArch64_AM::InvalidShiftExtend;
+}
- if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
+// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
+static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
+ if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
+ DL->getOpcode() != AArch64ISD::DUPLANE32)
return false;
- Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
+ SDValue SV = DL->getOperand(0);
+ if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
+ return false;
+
+ SDValue EV = SV.getOperand(1);
+ if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
+ return false;
+
+ ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
+ ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
+ LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
+ LaneOp = EV.getOperand(0);
+
return true;
}
-SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
- SDNode *ResNode;
- SDLoc dl(Node);
- EVT DestType = Node->getValueType(0);
- unsigned DestWidth = DestType.getSizeInBits();
-
- unsigned MOVOpcode;
- EVT MOVType;
- int UImm16, Shift;
- uint32_t LogicalBits;
-
- uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
- if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
- MOVType = DestType;
- MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
- } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
- MOVType = DestType;
- MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
- } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
- // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
- // use a 32-bit instruction: "movn w0, 0xedbc".
- MOVType = MVT::i32;
- MOVOpcode = AArch64::MOVNwii;
- } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) {
- MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
- uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
-
- return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
- CurDAG->getRegister(ZR, DestType),
- CurDAG->getTargetConstant(LogicalBits, MVT::i32));
- } else {
- // Can't handle it in one instruction. There's scope for permitting two (or
- // more) instructions, but that'll need more thought.
- return NULL;
+// Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
+// high lane extract.
+static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
+ SDValue &LaneOp, int &LaneIdx) {
+
+ if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
+ std::swap(Op0, Op1);
+ if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
+ return false;
+ }
+ StdOp = Op1;
+ return true;
+}
+
+/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
+/// is a lane in the upper half of a 128-bit vector. Recognize and select this
+/// so that we don't emit unnecessary lane extracts.
+SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
+ SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
+ int LaneIdx = -1; // Will hold the lane index.
+
+ if (Op1.getOpcode() != ISD::MUL ||
+ !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
+ LaneIdx)) {
+ std::swap(Op0, Op1);
+ if (Op1.getOpcode() != ISD::MUL ||
+ !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
+ LaneIdx))
+ return nullptr;
}
- ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
- CurDAG->getTargetConstant(UImm16, MVT::i32),
- CurDAG->getTargetConstant(Shift, MVT::i32));
+ SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
- if (MOVType != DestType) {
- ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
- MVT::i64, MVT::i32, MVT::Other,
- CurDAG->getTargetConstant(0, MVT::i64),
- SDValue(ResNode, 0),
- CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
+ SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
+
+ unsigned MLAOpc = ~0U;
+
+ switch (N->getSimpleValueType(0).SimpleTy) {
+ default:
+ llvm_unreachable("Unrecognized MLA.");
+ case MVT::v4i16:
+ MLAOpc = AArch64::MLAv4i16_indexed;
+ break;
+ case MVT::v8i16:
+ MLAOpc = AArch64::MLAv8i16_indexed;
+ break;
+ case MVT::v2i32:
+ MLAOpc = AArch64::MLAv2i32_indexed;
+ break;
+ case MVT::v4i32:
+ MLAOpc = AArch64::MLAv4i32_indexed;
+ break;
}
- return ResNode;
+ return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
+}
+
+SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
+ SDValue SMULLOp0;
+ SDValue SMULLOp1;
+ int LaneIdx;
+
+ if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
+ LaneIdx))
+ return nullptr;
+
+ SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
+
+ SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
+
+ unsigned SMULLOpc = ~0U;
+
+ if (IntNo == Intrinsic::aarch64_neon_smull) {
+ switch (N->getSimpleValueType(0).SimpleTy) {
+ default:
+ llvm_unreachable("Unrecognized SMULL.");
+ case MVT::v4i32:
+ SMULLOpc = AArch64::SMULLv4i16_indexed;
+ break;
+ case MVT::v2i64:
+ SMULLOpc = AArch64::SMULLv2i32_indexed;
+ break;
+ }
+ } else if (IntNo == Intrinsic::aarch64_neon_umull) {
+ switch (N->getSimpleValueType(0).SimpleTy) {
+ default:
+ llvm_unreachable("Unrecognized SMULL.");
+ case MVT::v4i32:
+ SMULLOpc = AArch64::UMULLv4i16_indexed;
+ break;
+ case MVT::v2i64:
+ SMULLOpc = AArch64::UMULLv2i32_indexed;
+ break;
+ }
+ } else
+ llvm_unreachable("Unrecognized intrinsic.");
+
+ return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
+}
+
+/// Instructions that accept extend modifiers like UXTW expect the register
+/// being extended to be a GPR32, but the incoming DAG might be acting on a
+/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
+/// this is the case.
+static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
+ if (N.getValueType() == MVT::i32)
+ return N;
+
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ SDLoc(N), MVT::i32, N, SubReg);
+ return SDValue(Node, 0);
+}
+
+
+/// SelectArithExtendedRegister - Select a "extended register" operand. This
+/// operand folds in an extend followed by an optional left shift.
+bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
+ SDValue &Shift) {
+ unsigned ShiftVal = 0;
+ AArch64_AM::ShiftExtendType Ext;
+
+ if (N.getOpcode() == ISD::SHL) {
+ ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!CSD)
+ return false;
+ ShiftVal = CSD->getZExtValue();
+ if (ShiftVal > 4)
+ return false;
+
+ Ext = getExtendTypeForNode(N.getOperand(0));
+ if (Ext == AArch64_AM::InvalidShiftExtend)
+ return false;
+
+ Reg = N.getOperand(0).getOperand(0);
+ } else {
+ Ext = getExtendTypeForNode(N);
+ if (Ext == AArch64_AM::InvalidShiftExtend)
+ return false;
+
+ Reg = N.getOperand(0);
+ }
+
+ // AArch64 mandates that the RHS of the operation must use the smallest
+ // register classs that could contain the size being extended from. Thus,
+ // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
+ // there might not be an actual 32-bit value in the program. We can
+ // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
+ assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
+ Reg = narrowIfNeeded(CurDAG, Reg);
+ Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
+ return isWorthFolding(N);
}
-SDValue
-AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
- const Constant *CV) {
- EVT PtrVT = getTargetLowering()->getPointerTy();
-
- switch (getTargetLowering()->getTargetMachine().getCodeModel()) {
- case CodeModel::Small: {
- unsigned Alignment =
- getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
- return CurDAG->getNode(
- AArch64ISD::WrapperSmall, DL, PtrVT,
- CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
- CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12),
- CurDAG->getConstant(Alignment, MVT::i32));
- }
- case CodeModel::Large: {
- SDNode *LitAddr;
- LitAddr = CurDAG->getMachineNode(
- AArch64::MOVZxii, DL, PtrVT,
- CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
- CurDAG->getTargetConstant(3, MVT::i32));
- LitAddr = CurDAG->getMachineNode(
- AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
- CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
- CurDAG->getTargetConstant(2, MVT::i32));
- LitAddr = CurDAG->getMachineNode(
- AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
- CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
- CurDAG->getTargetConstant(1, MVT::i32));
- LitAddr = CurDAG->getMachineNode(
- AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
- CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
- CurDAG->getTargetConstant(0, MVT::i32));
- return SDValue(LitAddr, 0);
+/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
+/// immediate" address. The "Size" argument is the size in bytes of the memory
+/// reference, which determines the scale.
+bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
+ SDValue &Base, SDValue &OffImm) {
+ const TargetLowering *TLI = getTargetLowering();
+ if (N.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(N)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
+ OffImm = CurDAG->getTargetConstant(0, MVT::i64);
+ return true;
}
- default:
- llvm_unreachable("Only small and large code models supported now");
+
+ if (N.getOpcode() == AArch64ISD::ADDlow) {
+ GlobalAddressSDNode *GAN =
+ dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
+ Base = N.getOperand(0);
+ OffImm = N.getOperand(1);
+ if (!GAN)
+ return true;
+
+ const GlobalValue *GV = GAN->getGlobal();
+ unsigned Alignment = GV->getAlignment();
+ const DataLayout *DL = TLI->getDataLayout();
+ Type *Ty = GV->getType()->getElementType();
+ if (Alignment == 0 && Ty->isSized() && !Subtarget->isTargetDarwin())
+ Alignment = DL->getABITypeAlignment(Ty);
+
+ if (Alignment >= Size)
+ return true;
+ }
+
+ if (CurDAG->isBaseWithConstantOffset(N)) {
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ int64_t RHSC = (int64_t)RHS->getZExtValue();
+ unsigned Scale = Log2_32(Size);
+ if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
+ }
+ OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
+ return true;
+ }
+ }
}
+
+ // Before falling back to our general case, check if the unscaled
+ // instructions can handle this. If so, that's preferable.
+ if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
+ return false;
+
+ // Base only. The address will be materialized into a register before
+ // the memory is accessed.
+ // add x0, Xbase, #offset
+ // ldr x0, [x0]
+ Base = N;
+ OffImm = CurDAG->getTargetConstant(0, MVT::i64);
+ return true;
}
-SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
- SDLoc DL(Node);
- uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
- int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
- EVT DestType = Node->getValueType(0);
-
- // Since we may end up loading a 64-bit constant from a 32-bit entry the
- // constant in the pool may have a different type to the eventual node.
- ISD::LoadExtType Extension;
- EVT MemType;
-
- assert((DestType == MVT::i64 || DestType == MVT::i32)
- && "Only expect integer constants at the moment");
-
- if (DestType == MVT::i32) {
- Extension = ISD::NON_EXTLOAD;
- MemType = MVT::i32;
- } else if (UnsignedVal <= UINT32_MAX) {
- Extension = ISD::ZEXTLOAD;
- MemType = MVT::i32;
- } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
- Extension = ISD::SEXTLOAD;
- MemType = MVT::i32;
- } else {
- Extension = ISD::NON_EXTLOAD;
- MemType = MVT::i64;
- }
-
- Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
- MemType.getSizeInBits()),
- UnsignedVal);
- SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
- unsigned Alignment =
- getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
-
- return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
- PoolAddr,
- MachinePointerInfo::getConstantPool(), MemType,
- /* isVolatile = */ false,
- /* isNonTemporal = */ false,
- Alignment).getNode();
+/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
+/// immediate" address. This should only match when there is an offset that
+/// is not valid for a scaled immediate addressing mode. The "Size" argument
+/// is the size in bytes of the memory reference, which is needed here to know
+/// what is valid for a scaled immediate.
+bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
+ SDValue &Base,
+ SDValue &OffImm) {
+ if (!CurDAG->isBaseWithConstantOffset(N))
+ return false;
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ int64_t RHSC = RHS->getSExtValue();
+ // If the offset is valid as a scaled immediate, don't match here.
+ if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
+ RHSC < (0x1000 << Log2_32(Size)))
+ return false;
+ if (RHSC >= -256 && RHSC < 256) {
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ const TargetLowering *TLI = getTargetLowering();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
+ }
+ OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
+ return true;
+ }
+ }
+ return false;
}
-SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
- SDLoc DL(Node);
- const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
- EVT DestType = Node->getValueType(0);
-
- unsigned Alignment =
- getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType());
- SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
-
- return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
- MachinePointerInfo::getConstantPool(),
- /* isVolatile = */ false,
- /* isNonTemporal = */ false,
- /* isInvariant = */ true,
- Alignment).getNode();
+static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDValue ImpDef = SDValue(
+ CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
+ 0);
+ MachineSDNode *Node = CurDAG->getMachineNode(
+ TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
+ return SDValue(Node, 0);
}
-bool
-AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
- unsigned RegWidth) {
- const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
- if (!CN) return false;
+/// \brief Check if the given SHL node (\p N), can be used to form an
+/// extended register for an addressing mode.
+bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
+ bool WantExtend, SDValue &Offset,
+ SDValue &SignExtend) {
+ assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
+ ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
+ return false;
- uint64_t Val = CN->getZExtValue();
+ if (WantExtend) {
+ AArch64_AM::ShiftExtendType Ext =
+ getExtendTypeForNode(N.getOperand(0), true);
+ if (Ext == AArch64_AM::InvalidShiftExtend)
+ return false;
- if (!isPowerOf2_64(Val)) return false;
+ Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ } else {
+ Offset = N.getOperand(0);
+ SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
+ }
- unsigned TestedBit = Log2_64(Val);
- // Checks above should have guaranteed that we haven't lost information in
- // finding TestedBit, but it must still be in range.
- if (TestedBit >= RegWidth) return false;
+ unsigned LegalShiftVal = Log2_32(Size);
+ unsigned ShiftVal = CSD->getZExtValue();
- FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
- return true;
+ if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
+ return false;
+
+ if (isWorthFolding(N))
+ return true;
+
+ return false;
}
-SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
- unsigned Op16,unsigned Op32,
- unsigned Op64) {
- // Mostly direct translation to the given operations, except that we preserve
- // the AtomicOrdering for use later on.
- AtomicSDNode *AN = cast<AtomicSDNode>(Node);
- EVT VT = AN->getMemoryVT();
-
- unsigned Op;
- if (VT == MVT::i8)
- Op = Op8;
- else if (VT == MVT::i16)
- Op = Op16;
- else if (VT == MVT::i32)
- Op = Op32;
- else if (VT == MVT::i64)
- Op = Op64;
- else
- llvm_unreachable("Unexpected atomic operation");
+bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
+ SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend,
+ SDValue &DoShift) {
+ if (N.getOpcode() != ISD::ADD)
+ return false;
+ SDValue LHS = N.getOperand(0);
+ SDValue RHS = N.getOperand(1);
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 1; i < AN->getNumOperands(); ++i)
- Ops.push_back(AN->getOperand(i));
+ // We don't want to match immediate adds here, because they are better lowered
+ // to the register-immediate addressing modes.
+ if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
+ return false;
- Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
- Ops.push_back(AN->getOperand(0)); // Chain moves to the end
+ // Check if this particular node is reused in any non-memory related
+ // operation. If yes, do not try to fold this node into the address
+ // computation, since the computation will be kept.
+ const SDNode *Node = N.getNode();
+ for (SDNode *UI : Node->uses()) {
+ if (!isa<MemSDNode>(*UI))
+ return false;
+ }
+
+ // Remember if it is worth folding N when it produces extended register.
+ bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
+
+ // Try to match a shifted extend on the RHS.
+ if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
+ SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
+ Base = LHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ return true;
+ }
+
+ // Try to match a shifted extend on the LHS.
+ if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
+ SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
+ Base = RHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ return true;
+ }
+
+ // There was no shift, whatever else we find.
+ DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+
+ AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
+ // Try to match an unshifted extend on the LHS.
+ if (IsExtendedRegisterWorthFolding &&
+ (Ext = getExtendTypeForNode(LHS, true)) !=
+ AArch64_AM::InvalidShiftExtend) {
+ Base = RHS;
+ Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ if (isWorthFolding(LHS))
+ return true;
+ }
- return CurDAG->SelectNodeTo(Node, Op,
- AN->getValueType(0), MVT::Other,
- &Ops[0], Ops.size());
+ // Try to match an unshifted extend on the RHS.
+ if (IsExtendedRegisterWorthFolding &&
+ (Ext = getExtendTypeForNode(RHS, true)) !=
+ AArch64_AM::InvalidShiftExtend) {
+ Base = LHS;
+ Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ if (isWorthFolding(RHS))
+ return true;
+ }
+
+ return false;
+}
+
+bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
+ SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend,
+ SDValue &DoShift) {
+ if (N.getOpcode() != ISD::ADD)
+ return false;
+ SDValue LHS = N.getOperand(0);
+ SDValue RHS = N.getOperand(1);
+
+ // We don't want to match immediate adds here, because they are better lowered
+ // to the register-immediate addressing modes.
+ if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
+ return false;
+
+ // Check if this particular node is reused in any non-memory related
+ // operation. If yes, do not try to fold this node into the address
+ // computation, since the computation will be kept.
+ const SDNode *Node = N.getNode();
+ for (SDNode *UI : Node->uses()) {
+ if (!isa<MemSDNode>(*UI))
+ return false;
+ }
+
+ // Remember if it is worth folding N when it produces extended register.
+ bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
+
+ // Try to match a shifted extend on the RHS.
+ if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
+ SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
+ Base = LHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ return true;
+ }
+
+ // Try to match a shifted extend on the LHS.
+ if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
+ SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
+ Base = RHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ return true;
+ }
+
+ // Match any non-shifted, non-extend, non-immediate add expression.
+ Base = LHS;
+ Offset = RHS;
+ SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+ // Reg1 + Reg2 is free: no check needed.
+ return true;
}
SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
- static unsigned RegClassIDs[] = { AArch64::DPairRegClassID,
- AArch64::DTripleRegClassID,
- AArch64::DQuadRegClassID };
- static unsigned SubRegs[] = { AArch64::dsub_0, AArch64::dsub_1,
- AArch64::dsub_2, AArch64::dsub_3 };
+ static unsigned RegClassIDs[] = {
+ AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
+ static unsigned SubRegs[] = { AArch64::dsub0, AArch64::dsub1,
+ AArch64::dsub2, AArch64::dsub3 };
return createTuple(Regs, RegClassIDs, SubRegs);
}
SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
- static unsigned RegClassIDs[] = { AArch64::QPairRegClassID,
- AArch64::QTripleRegClassID,
- AArch64::QQuadRegClassID };
- static unsigned SubRegs[] = { AArch64::qsub_0, AArch64::qsub_1,
- AArch64::qsub_2, AArch64::qsub_3 };
+ static unsigned RegClassIDs[] = {
+ AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
+ static unsigned SubRegs[] = { AArch64::qsub0, AArch64::qsub1,
+ AArch64::qsub2, AArch64::qsub3 };
return createTuple(Regs, RegClassIDs, SubRegs);
}
@@ -478,1109 +875,2159 @@ SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
return SDValue(N, 0);
}
+SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
+ unsigned Opc, bool isExt) {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+
+ unsigned ExtOff = isExt;
-// Get the register stride update opcode of a VLD/VST instruction that
-// is otherwise equivalent to the given fixed stride updating instruction.
-static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
- switch (Opc) {
- default: break;
- case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register;
- case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register;
- case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register;
- case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register;
- case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register;
- case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register;
- case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register;
- case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register;
-
- case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
- case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
- case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
- case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
- case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
- case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
- case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register;
-
- case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
- case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
- case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
- case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
- case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
- case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
- case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register;
-
- case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
- case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
- case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
- case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
- case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
- case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
- case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
-
- case AArch64::LD1x2WB_8B_fixed: return AArch64::LD1x2WB_8B_register;
- case AArch64::LD1x2WB_4H_fixed: return AArch64::LD1x2WB_4H_register;
- case AArch64::LD1x2WB_2S_fixed: return AArch64::LD1x2WB_2S_register;
- case AArch64::LD1x2WB_1D_fixed: return AArch64::LD1x2WB_1D_register;
- case AArch64::LD1x2WB_16B_fixed: return AArch64::LD1x2WB_16B_register;
- case AArch64::LD1x2WB_8H_fixed: return AArch64::LD1x2WB_8H_register;
- case AArch64::LD1x2WB_4S_fixed: return AArch64::LD1x2WB_4S_register;
- case AArch64::LD1x2WB_2D_fixed: return AArch64::LD1x2WB_2D_register;
-
- case AArch64::LD1x3WB_8B_fixed: return AArch64::LD1x3WB_8B_register;
- case AArch64::LD1x3WB_4H_fixed: return AArch64::LD1x3WB_4H_register;
- case AArch64::LD1x3WB_2S_fixed: return AArch64::LD1x3WB_2S_register;
- case AArch64::LD1x3WB_1D_fixed: return AArch64::LD1x3WB_1D_register;
- case AArch64::LD1x3WB_16B_fixed: return AArch64::LD1x3WB_16B_register;
- case AArch64::LD1x3WB_8H_fixed: return AArch64::LD1x3WB_8H_register;
- case AArch64::LD1x3WB_4S_fixed: return AArch64::LD1x3WB_4S_register;
- case AArch64::LD1x3WB_2D_fixed: return AArch64::LD1x3WB_2D_register;
-
- case AArch64::LD1x4WB_8B_fixed: return AArch64::LD1x4WB_8B_register;
- case AArch64::LD1x4WB_4H_fixed: return AArch64::LD1x4WB_4H_register;
- case AArch64::LD1x4WB_2S_fixed: return AArch64::LD1x4WB_2S_register;
- case AArch64::LD1x4WB_1D_fixed: return AArch64::LD1x4WB_1D_register;
- case AArch64::LD1x4WB_16B_fixed: return AArch64::LD1x4WB_16B_register;
- case AArch64::LD1x4WB_8H_fixed: return AArch64::LD1x4WB_8H_register;
- case AArch64::LD1x4WB_4S_fixed: return AArch64::LD1x4WB_4S_register;
- case AArch64::LD1x4WB_2D_fixed: return AArch64::LD1x4WB_2D_register;
-
- case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
- case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
- case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
- case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register;
- case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register;
- case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register;
- case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register;
- case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register;
-
- case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
- case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
- case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
- case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
- case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
- case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
- case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register;
-
- case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
- case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
- case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
- case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
- case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
- case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
- case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register;
-
- case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
- case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
- case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
- case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
- case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
- case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
- case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
-
- case AArch64::ST1x2WB_8B_fixed: return AArch64::ST1x2WB_8B_register;
- case AArch64::ST1x2WB_4H_fixed: return AArch64::ST1x2WB_4H_register;
- case AArch64::ST1x2WB_2S_fixed: return AArch64::ST1x2WB_2S_register;
- case AArch64::ST1x2WB_1D_fixed: return AArch64::ST1x2WB_1D_register;
- case AArch64::ST1x2WB_16B_fixed: return AArch64::ST1x2WB_16B_register;
- case AArch64::ST1x2WB_8H_fixed: return AArch64::ST1x2WB_8H_register;
- case AArch64::ST1x2WB_4S_fixed: return AArch64::ST1x2WB_4S_register;
- case AArch64::ST1x2WB_2D_fixed: return AArch64::ST1x2WB_2D_register;
-
- case AArch64::ST1x3WB_8B_fixed: return AArch64::ST1x3WB_8B_register;
- case AArch64::ST1x3WB_4H_fixed: return AArch64::ST1x3WB_4H_register;
- case AArch64::ST1x3WB_2S_fixed: return AArch64::ST1x3WB_2S_register;
- case AArch64::ST1x3WB_1D_fixed: return AArch64::ST1x3WB_1D_register;
- case AArch64::ST1x3WB_16B_fixed: return AArch64::ST1x3WB_16B_register;
- case AArch64::ST1x3WB_8H_fixed: return AArch64::ST1x3WB_8H_register;
- case AArch64::ST1x3WB_4S_fixed: return AArch64::ST1x3WB_4S_register;
- case AArch64::ST1x3WB_2D_fixed: return AArch64::ST1x3WB_2D_register;
-
- case AArch64::ST1x4WB_8B_fixed: return AArch64::ST1x4WB_8B_register;
- case AArch64::ST1x4WB_4H_fixed: return AArch64::ST1x4WB_4H_register;
- case AArch64::ST1x4WB_2S_fixed: return AArch64::ST1x4WB_2S_register;
- case AArch64::ST1x4WB_1D_fixed: return AArch64::ST1x4WB_1D_register;
- case AArch64::ST1x4WB_16B_fixed: return AArch64::ST1x4WB_16B_register;
- case AArch64::ST1x4WB_8H_fixed: return AArch64::ST1x4WB_8H_register;
- case AArch64::ST1x4WB_4S_fixed: return AArch64::ST1x4WB_4S_register;
- case AArch64::ST1x4WB_2D_fixed: return AArch64::ST1x4WB_2D_register;
-
- // Post-index of duplicate loads
- case AArch64::LD2R_WB_8B_fixed: return AArch64::LD2R_WB_8B_register;
- case AArch64::LD2R_WB_4H_fixed: return AArch64::LD2R_WB_4H_register;
- case AArch64::LD2R_WB_2S_fixed: return AArch64::LD2R_WB_2S_register;
- case AArch64::LD2R_WB_1D_fixed: return AArch64::LD2R_WB_1D_register;
- case AArch64::LD2R_WB_16B_fixed: return AArch64::LD2R_WB_16B_register;
- case AArch64::LD2R_WB_8H_fixed: return AArch64::LD2R_WB_8H_register;
- case AArch64::LD2R_WB_4S_fixed: return AArch64::LD2R_WB_4S_register;
- case AArch64::LD2R_WB_2D_fixed: return AArch64::LD2R_WB_2D_register;
-
- case AArch64::LD3R_WB_8B_fixed: return AArch64::LD3R_WB_8B_register;
- case AArch64::LD3R_WB_4H_fixed: return AArch64::LD3R_WB_4H_register;
- case AArch64::LD3R_WB_2S_fixed: return AArch64::LD3R_WB_2S_register;
- case AArch64::LD3R_WB_1D_fixed: return AArch64::LD3R_WB_1D_register;
- case AArch64::LD3R_WB_16B_fixed: return AArch64::LD3R_WB_16B_register;
- case AArch64::LD3R_WB_8H_fixed: return AArch64::LD3R_WB_8H_register;
- case AArch64::LD3R_WB_4S_fixed: return AArch64::LD3R_WB_4S_register;
- case AArch64::LD3R_WB_2D_fixed: return AArch64::LD3R_WB_2D_register;
-
- case AArch64::LD4R_WB_8B_fixed: return AArch64::LD4R_WB_8B_register;
- case AArch64::LD4R_WB_4H_fixed: return AArch64::LD4R_WB_4H_register;
- case AArch64::LD4R_WB_2S_fixed: return AArch64::LD4R_WB_2S_register;
- case AArch64::LD4R_WB_1D_fixed: return AArch64::LD4R_WB_1D_register;
- case AArch64::LD4R_WB_16B_fixed: return AArch64::LD4R_WB_16B_register;
- case AArch64::LD4R_WB_8H_fixed: return AArch64::LD4R_WB_8H_register;
- case AArch64::LD4R_WB_4S_fixed: return AArch64::LD4R_WB_4S_register;
- case AArch64::LD4R_WB_2D_fixed: return AArch64::LD4R_WB_2D_register;
-
- // Post-index of lane loads
- case AArch64::LD2LN_WB_B_fixed: return AArch64::LD2LN_WB_B_register;
- case AArch64::LD2LN_WB_H_fixed: return AArch64::LD2LN_WB_H_register;
- case AArch64::LD2LN_WB_S_fixed: return AArch64::LD2LN_WB_S_register;
- case AArch64::LD2LN_WB_D_fixed: return AArch64::LD2LN_WB_D_register;
-
- case AArch64::LD3LN_WB_B_fixed: return AArch64::LD3LN_WB_B_register;
- case AArch64::LD3LN_WB_H_fixed: return AArch64::LD3LN_WB_H_register;
- case AArch64::LD3LN_WB_S_fixed: return AArch64::LD3LN_WB_S_register;
- case AArch64::LD3LN_WB_D_fixed: return AArch64::LD3LN_WB_D_register;
-
- case AArch64::LD4LN_WB_B_fixed: return AArch64::LD4LN_WB_B_register;
- case AArch64::LD4LN_WB_H_fixed: return AArch64::LD4LN_WB_H_register;
- case AArch64::LD4LN_WB_S_fixed: return AArch64::LD4LN_WB_S_register;
- case AArch64::LD4LN_WB_D_fixed: return AArch64::LD4LN_WB_D_register;
-
- // Post-index of lane stores
- case AArch64::ST2LN_WB_B_fixed: return AArch64::ST2LN_WB_B_register;
- case AArch64::ST2LN_WB_H_fixed: return AArch64::ST2LN_WB_H_register;
- case AArch64::ST2LN_WB_S_fixed: return AArch64::ST2LN_WB_S_register;
- case AArch64::ST2LN_WB_D_fixed: return AArch64::ST2LN_WB_D_register;
-
- case AArch64::ST3LN_WB_B_fixed: return AArch64::ST3LN_WB_B_register;
- case AArch64::ST3LN_WB_H_fixed: return AArch64::ST3LN_WB_H_register;
- case AArch64::ST3LN_WB_S_fixed: return AArch64::ST3LN_WB_S_register;
- case AArch64::ST3LN_WB_D_fixed: return AArch64::ST3LN_WB_D_register;
-
- case AArch64::ST4LN_WB_B_fixed: return AArch64::ST4LN_WB_B_register;
- case AArch64::ST4LN_WB_H_fixed: return AArch64::ST4LN_WB_H_register;
- case AArch64::ST4LN_WB_S_fixed: return AArch64::ST4LN_WB_S_register;
- case AArch64::ST4LN_WB_D_fixed: return AArch64::ST4LN_WB_D_register;
- }
- return Opc; // If not one we handle, return it unchanged.
+ // Form a REG_SEQUENCE to force register allocation.
+ unsigned Vec0Off = ExtOff + 1;
+ SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
+ N->op_begin() + Vec0Off + NumVecs);
+ SDValue RegSeq = createQTuple(Regs);
+
+ SmallVector<SDValue, 6> Ops;
+ if (isExt)
+ Ops.push_back(N->getOperand(1));
+ Ops.push_back(RegSeq);
+ Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
+ return CurDAG->getMachineNode(Opc, dl, VT, Ops);
}
-SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating,
- unsigned NumVecs,
- const uint16_t *Opcodes) {
- assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
+SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (LD->isUnindexed())
+ return nullptr;
+ EVT VT = LD->getMemoryVT();
+ EVT DstVT = N->getValueType(0);
+ ISD::MemIndexedMode AM = LD->getAddressingMode();
+ bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
+
+ // We're not doing validity checking here. That was done when checking
+ // if we should mark the load as indexed or not. We're just selecting
+ // the right instruction.
+ unsigned Opcode = 0;
+
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ bool InsertTo64 = false;
+ if (VT == MVT::i64)
+ Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
+ else if (VT == MVT::i32) {
+ if (ExtType == ISD::NON_EXTLOAD)
+ Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
+ else if (ExtType == ISD::SEXTLOAD)
+ Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
+ else {
+ Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
+ InsertTo64 = true;
+ // The result of the load is only i32. It's the subreg_to_reg that makes
+ // it into an i64.
+ DstVT = MVT::i32;
+ }
+ } else if (VT == MVT::i16) {
+ if (ExtType == ISD::SEXTLOAD) {
+ if (DstVT == MVT::i64)
+ Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
+ else
+ Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
+ } else {
+ Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
+ InsertTo64 = DstVT == MVT::i64;
+ // The result of the load is only i32. It's the subreg_to_reg that makes
+ // it into an i64.
+ DstVT = MVT::i32;
+ }
+ } else if (VT == MVT::i8) {
+ if (ExtType == ISD::SEXTLOAD) {
+ if (DstVT == MVT::i64)
+ Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
+ else
+ Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
+ } else {
+ Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
+ InsertTo64 = DstVT == MVT::i64;
+ // The result of the load is only i32. It's the subreg_to_reg that makes
+ // it into an i64.
+ DstVT = MVT::i32;
+ }
+ } else if (VT == MVT::f32) {
+ Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
+ } else if (VT == MVT::f64 || VT.is64BitVector()) {
+ Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
+ } else if (VT.is128BitVector()) {
+ Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
+ } else
+ return nullptr;
+ SDValue Chain = LD->getChain();
+ SDValue Base = LD->getBasePtr();
+ ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
+ int OffsetVal = (int)OffsetOp->getZExtValue();
+ SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
+ SDValue Ops[] = { Base, Offset, Chain };
+ SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
+ MVT::Other, Ops);
+ // Either way, we're replacing the node, so tell the caller that.
+ Done = true;
+ SDValue LoadedVal = SDValue(Res, 1);
+ if (InsertTo64) {
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ LoadedVal =
+ SDValue(CurDAG->getMachineNode(
+ AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
+ CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
+ 0);
+ }
+
+ ReplaceUses(SDValue(N, 0), LoadedVal);
+ ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
+ ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
+
+ return nullptr;
+}
+SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
+ unsigned Opc, unsigned SubRegIdx) {
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
- unsigned OpcodeIndex;
- bool is64BitVector = VT.is64BitVector();
- switch (VT.getScalarType().getSizeInBits()) {
- case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
- case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
- case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
- case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
- default: llvm_unreachable("unhandled vector load type");
- }
- unsigned Opc = Opcodes[OpcodeIndex];
+ SDValue Chain = N->getOperand(0);
- SmallVector<SDValue, 2> Ops;
- unsigned AddrOpIdx = isUpdating ? 1 : 2;
- Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(N->getOperand(2)); // Mem operand;
+ Ops.push_back(Chain);
- if (isUpdating) {
- SDValue Inc = N->getOperand(AddrOpIdx + 1);
- if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
- Opc = getVLDSTRegisterUpdateOpcode(Opc);
- Ops.push_back(Inc);
- }
+ std::vector<EVT> ResTys;
+ ResTys.push_back(MVT::Untyped);
+ ResTys.push_back(MVT::Other);
- Ops.push_back(N->getOperand(0)); // Push back the Chain
+ SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ SDValue SuperReg = SDValue(Ld, 0);
+ for (unsigned i = 0; i < NumVecs; ++i)
+ ReplaceUses(SDValue(N, i),
+ CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
- SmallVector<EVT, 3> ResTys;
- // Push back the type of return super register
- if (NumVecs == 1)
- ResTys.push_back(VT);
- else if (NumVecs == 3)
- ResTys.push_back(MVT::Untyped);
- else {
- EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
- is64BitVector ? NumVecs : NumVecs * 2);
- ResTys.push_back(ResTy);
- }
-
- if (isUpdating)
- ResTys.push_back(MVT::i64); // Type of the updated register
- ResTys.push_back(MVT::Other); // Type of the Chain
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
+ return nullptr;
+}
+
+SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
+ unsigned Opc, unsigned SubRegIdx) {
SDLoc dl(N);
- SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ EVT VT = N->getValueType(0);
+ SDValue Chain = N->getOperand(0);
- // Transfer memoperands.
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
- cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(N->getOperand(1)); // Mem operand
+ Ops.push_back(N->getOperand(2)); // Incremental
+ Ops.push_back(Chain);
+ std::vector<EVT> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Untyped);
+ ResTys.push_back(MVT::Other);
+
+ SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Update uses of write back register
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
+
+ // Update uses of vector list
+ SDValue SuperReg = SDValue(Ld, 1);
if (NumVecs == 1)
- return VLd;
-
- // If NumVecs > 1, the return result is a super register containing 2-4
- // consecutive vector registers.
- SDValue SuperReg = SDValue(VLd, 0);
-
- unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- ReplaceUses(SDValue(N, Vec),
- CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
- // Update users of the Chain
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
- if (isUpdating)
- ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
-
- return NULL;
+ ReplaceUses(SDValue(N, 0), SuperReg);
+ else
+ for (unsigned i = 0; i < NumVecs; ++i)
+ ReplaceUses(SDValue(N, i),
+ CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
+
+ // Update the chain
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
+ return nullptr;
}
-SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, bool isUpdating,
- unsigned NumVecs,
- const uint16_t *Opcodes) {
- assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
+SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
+ EVT VT = N->getOperand(2)->getValueType(0);
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
+ // Form a REG_SEQUENCE to force register allocation.
+ bool Is128Bit = VT.getSizeInBits() == 128;
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
+ SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
- unsigned AddrOpIdx = isUpdating ? 1 : 2;
- unsigned Vec0Idx = 3;
- EVT VT = N->getOperand(Vec0Idx).getValueType();
- unsigned OpcodeIndex;
- bool is64BitVector = VT.is64BitVector();
- switch (VT.getScalarType().getSizeInBits()) {
- case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
- case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
- case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
- case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
- default: llvm_unreachable("unhandled vector store type");
- }
- unsigned Opc = Opcodes[OpcodeIndex];
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(N->getOperand(NumVecs + 2));
+ Ops.push_back(N->getOperand(0));
+ SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
+ return St;
+}
+
+SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
+ SDLoc dl(N);
+ EVT VT = N->getOperand(2)->getValueType(0);
SmallVector<EVT, 2> ResTys;
- if (isUpdating)
- ResTys.push_back(MVT::i64);
+ ResTys.push_back(MVT::i64); // Type of the write back register
ResTys.push_back(MVT::Other); // Type for the Chain
+ // Form a REG_SEQUENCE to force register allocation.
+ bool Is128Bit = VT.getSizeInBits() == 128;
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
+ SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
+
SmallVector<SDValue, 6> Ops;
- Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
+ Ops.push_back(RegSeq);
+ Ops.push_back(N->getOperand(NumVecs + 1)); // base register
+ Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
+ Ops.push_back(N->getOperand(0)); // Chain
+ SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
- if (isUpdating) {
- SDValue Inc = N->getOperand(AddrOpIdx + 1);
- if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
- Opc = getVLDSTRegisterUpdateOpcode(Opc);
- Ops.push_back(Inc);
+ return St;
+}
+
+/// WidenVector - Given a value in the V64 register class, produce the
+/// equivalent value in the V128 register class.
+class WidenVector {
+ SelectionDAG &DAG;
+
+public:
+ WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
+
+ SDValue operator()(SDValue V64Reg) {
+ EVT VT = V64Reg.getValueType();
+ unsigned NarrowSize = VT.getVectorNumElements();
+ MVT EltTy = VT.getVectorElementType().getSimpleVT();
+ MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
+ SDLoc DL(V64Reg);
+
+ SDValue Undef =
+ SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
+ return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
}
+};
- SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
- N->op_begin() + Vec0Idx + NumVecs);
- SDValue SrcReg = is64BitVector ? createDTuple(Regs) : createQTuple(Regs);
- Ops.push_back(SrcReg);
+/// NarrowVector - Given a value in the V128 register class, produce the
+/// equivalent value in the V64 register class.
+static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
+ EVT VT = V128Reg.getValueType();
+ unsigned WideSize = VT.getVectorNumElements();
+ MVT EltTy = VT.getVectorElementType().getSimpleVT();
+ MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
- // Push back the Chain
+ return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
+ V128Reg);
+}
+
+SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ bool Narrow = VT.getSizeInBits() == 64;
+
+ // Form a REG_SEQUENCE to force register allocation.
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
+
+ if (Narrow)
+ std::transform(Regs.begin(), Regs.end(), Regs.begin(),
+ WidenVector(*CurDAG));
+
+ SDValue RegSeq = createQTuple(Regs);
+
+ std::vector<EVT> ResTys;
+ ResTys.push_back(MVT::Untyped);
+ ResTys.push_back(MVT::Other);
+
+ unsigned LaneNo =
+ cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
+ Ops.push_back(N->getOperand(NumVecs + 3));
Ops.push_back(N->getOperand(0));
+ SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ SDValue SuperReg = SDValue(Ld, 0);
+
+ EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
+ static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
+ AArch64::qsub3 };
+ for (unsigned i = 0; i < NumVecs; ++i) {
+ SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
+ if (Narrow)
+ NV = NarrowVector(NV, *CurDAG);
+ ReplaceUses(SDValue(N, i), NV);
+ }
- // Transfer memoperands.
- SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
- cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
- return VSt;
+ return Ld;
}
-SDValue
-AArch64DAGToDAGISel::getTargetSubregToReg(int SRIdx, SDLoc DL, EVT VT, EVT VTD,
- SDValue Operand) {
- SDNode *Reg = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, DL,
- VT, VTD, MVT::Other,
- CurDAG->getTargetConstant(0, MVT::i64),
- Operand,
- CurDAG->getTargetConstant(AArch64::sub_64, MVT::i32));
- return SDValue(Reg, 0);
+SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ bool Narrow = VT.getSizeInBits() == 64;
+
+ // Form a REG_SEQUENCE to force register allocation.
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
+
+ if (Narrow)
+ std::transform(Regs.begin(), Regs.end(), Regs.begin(),
+ WidenVector(*CurDAG));
+
+ SDValue RegSeq = createQTuple(Regs);
+
+ std::vector<EVT> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Untyped);
+ ResTys.push_back(MVT::Other);
+
+ unsigned LaneNo =
+ cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
+ Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
+ Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
+ Ops.push_back(N->getOperand(0));
+ SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Update uses of the write back register
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
+
+ // Update uses of the vector list
+ SDValue SuperReg = SDValue(Ld, 1);
+ if (NumVecs == 1) {
+ ReplaceUses(SDValue(N, 0),
+ Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
+ } else {
+ EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
+ static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
+ AArch64::qsub3 };
+ for (unsigned i = 0; i < NumVecs; ++i) {
+ SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
+ SuperReg);
+ if (Narrow)
+ NV = NarrowVector(NV, *CurDAG);
+ ReplaceUses(SDValue(N, i), NV);
+ }
+ }
+
+ // Update the Chain
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
+
+ return Ld;
}
-SDNode *AArch64DAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
- unsigned NumVecs,
- const uint16_t *Opcodes) {
- assert(NumVecs >=2 && NumVecs <= 4 && "Load Dup NumVecs out-of-range");
+SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
+ EVT VT = N->getOperand(2)->getValueType(0);
+ bool Narrow = VT.getSizeInBits() == 64;
+
+ // Form a REG_SEQUENCE to force register allocation.
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
+
+ if (Narrow)
+ std::transform(Regs.begin(), Regs.end(), Regs.begin(),
+ WidenVector(*CurDAG));
+
+ SDValue RegSeq = createQTuple(Regs);
+
+ unsigned LaneNo =
+ cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- EVT VT = N->getValueType(0);
- unsigned OpcodeIndex;
- bool is64BitVector = VT.is64BitVector();
- switch (VT.getScalarType().getSizeInBits()) {
- case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
- case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
- case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
- case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
- default: llvm_unreachable("unhandled vector duplicate lane load type");
- }
- unsigned Opc = Opcodes[OpcodeIndex];
-
- SDValue SuperReg;
SmallVector<SDValue, 6> Ops;
- Ops.push_back(N->getOperand(1)); // Push back the Memory Address
- if (isUpdating) {
- SDValue Inc = N->getOperand(2);
- if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
- Opc = getVLDSTRegisterUpdateOpcode(Opc);
- Ops.push_back(Inc);
- }
- Ops.push_back(N->getOperand(0)); // Push back the Chain
-
- SmallVector<EVT, 3> ResTys;
- // Push back the type of return super register
- if (NumVecs == 3)
- ResTys.push_back(MVT::Untyped);
- else {
- EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
- is64BitVector ? NumVecs : NumVecs * 2);
- ResTys.push_back(ResTy);
- }
- if (isUpdating)
- ResTys.push_back(MVT::i64); // Type of the updated register
- ResTys.push_back(MVT::Other); // Type of the Chain
- SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ Ops.push_back(RegSeq);
+ Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
+ Ops.push_back(N->getOperand(NumVecs + 3));
+ Ops.push_back(N->getOperand(0));
+ SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
// Transfer memoperands.
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
- cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
-
- SuperReg = SDValue(VLdDup, 0);
- unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
- // Update uses of each registers in super register
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- ReplaceUses(SDValue(N, Vec),
- CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
- // Update uses of the Chain
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
- if (isUpdating)
- ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
- return NULL;
+ cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
+
+ return St;
}
-// We only have 128-bit vector type of load/store lane instructions.
-// If it is 64-bit vector, we also select it to the 128-bit instructions.
-// Just use SUBREG_TO_REG to adapt the input to 128-bit vector and
-// EXTRACT_SUBREG to get the 64-bit vector from the 128-bit vector output.
-SDNode *AArch64DAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
- bool isUpdating, unsigned NumVecs,
- const uint16_t *Opcodes) {
- assert(NumVecs >= 2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
+SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
- unsigned AddrOpIdx = isUpdating ? 1 : 2;
- unsigned Vec0Idx = 3;
+ EVT VT = N->getOperand(2)->getValueType(0);
+ bool Narrow = VT.getSizeInBits() == 64;
- SDValue Chain = N->getOperand(0);
- unsigned Lane =
- cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
- EVT VT = N->getOperand(Vec0Idx).getValueType();
- bool is64BitVector = VT.is64BitVector();
- EVT VT64; // 64-bit Vector Type
-
- if (is64BitVector) {
- VT64 = VT;
- VT = EVT::getVectorVT(*CurDAG->getContext(), VT.getVectorElementType(),
- VT.getVectorNumElements() * 2);
- }
-
- unsigned OpcodeIndex;
- switch (VT.getScalarType().getSizeInBits()) {
- case 8: OpcodeIndex = 0; break;
- case 16: OpcodeIndex = 1; break;
- case 32: OpcodeIndex = 2; break;
- case 64: OpcodeIndex = 3; break;
- default: llvm_unreachable("unhandled vector lane load/store type");
- }
- unsigned Opc = Opcodes[OpcodeIndex];
-
- SmallVector<EVT, 3> ResTys;
- if (IsLoad) {
- // Push back the type of return super register
- if (NumVecs == 3)
- ResTys.push_back(MVT::Untyped);
- else {
- EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
- is64BitVector ? NumVecs : NumVecs * 2);
- ResTys.push_back(ResTy);
- }
- }
- if (isUpdating)
- ResTys.push_back(MVT::i64); // Type of the updated register
- ResTys.push_back(MVT::Other); // Type of Chain
- SmallVector<SDValue, 5> Ops;
- Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
- if (isUpdating) {
- SDValue Inc = N->getOperand(AddrOpIdx + 1);
- if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
- Opc = getVLDSTRegisterUpdateOpcode(Opc);
- Ops.push_back(Inc);
- }
-
- SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
- N->op_begin() + Vec0Idx + NumVecs);
- if (is64BitVector)
- for (unsigned i = 0; i < Regs.size(); i++)
- Regs[i] = getTargetSubregToReg(AArch64::sub_64, dl, VT, VT64, Regs[i]);
- SDValue SuperReg = createQTuple(Regs);
-
- Ops.push_back(SuperReg); // Source Reg
- SDValue LaneValue = CurDAG->getTargetConstant(Lane, MVT::i32);
- Ops.push_back(LaneValue);
- Ops.push_back(Chain); // Push back the Chain
-
- SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+ // Form a REG_SEQUENCE to force register allocation.
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
+
+ if (Narrow)
+ std::transform(Regs.begin(), Regs.end(), Regs.begin(),
+ WidenVector(*CurDAG));
+
+ SDValue RegSeq = createQTuple(Regs);
+
+ SmallVector<EVT, 2> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Other);
+
+ unsigned LaneNo =
+ cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
+ Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
+ Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
+ Ops.push_back(N->getOperand(0));
+ SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Transfer memoperands.
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
- cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
- if (!IsLoad)
- return VLdLn;
-
- // Extract the subregisters.
- SuperReg = SDValue(VLdLn, 0);
- unsigned Sub0 = AArch64::qsub_0;
- // Update uses of each registers in super register
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue SUB0 = CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg);
- if (is64BitVector) {
- SUB0 = CurDAG->getTargetExtractSubreg(AArch64::sub_64, dl, VT64, SUB0);
- }
- ReplaceUses(SDValue(N, Vec), SUB0);
+ cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
+
+ return St;
+}
+
+static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
+ unsigned &Opc, SDValue &Opd0,
+ unsigned &LSB, unsigned &MSB,
+ unsigned NumberOfIgnoredLowBits,
+ bool BiggerPattern) {
+ assert(N->getOpcode() == ISD::AND &&
+ "N must be a AND operation to call this function");
+
+ EVT VT = N->getValueType(0);
+
+ // Here we can test the type of VT and return false when the type does not
+ // match, but since it is done prior to that call in the current context
+ // we turned that into an assert to avoid redundant code.
+ assert((VT == MVT::i32 || VT == MVT::i64) &&
+ "Type checking must have been done before calling this function");
+
+ // FIXME: simplify-demanded-bits in DAGCombine will probably have
+ // changed the AND node to a 32-bit mask operation. We'll have to
+ // undo that as part of the transform here if we want to catch all
+ // the opportunities.
+ // Currently the NumberOfIgnoredLowBits argument helps to recover
+ // form these situations when matching bigger pattern (bitfield insert).
+
+ // For unsigned extracts, check for a shift right and mask
+ uint64_t And_imm = 0;
+ if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
+ return false;
+
+ const SDNode *Op0 = N->getOperand(0).getNode();
+
+ // Because of simplify-demanded-bits in DAGCombine, the mask may have been
+ // simplified. Try to undo that
+ And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
+
+ // The immediate is a mask of the low bits iff imm & (imm+1) == 0
+ if (And_imm & (And_imm + 1))
+ return false;
+
+ bool ClampMSB = false;
+ uint64_t Srl_imm = 0;
+ // Handle the SRL + ANY_EXTEND case.
+ if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
+ isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
+ // Extend the incoming operand of the SRL to 64-bit.
+ Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
+ // Make sure to clamp the MSB so that we preserve the semantics of the
+ // original operations.
+ ClampMSB = true;
+ } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
+ isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
+ Srl_imm)) {
+ // If the shift result was truncated, we can still combine them.
+ Opd0 = Op0->getOperand(0).getOperand(0);
+
+ // Use the type of SRL node.
+ VT = Opd0->getValueType(0);
+ } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
+ Opd0 = Op0->getOperand(0);
+ } else if (BiggerPattern) {
+ // Let's pretend a 0 shift right has been performed.
+ // The resulting code will be at least as good as the original one
+ // plus it may expose more opportunities for bitfield insert pattern.
+ // FIXME: Currently we limit this to the bigger pattern, because
+ // some optimizations expect AND and not UBFM
+ Opd0 = N->getOperand(0);
+ } else
+ return false;
+
+ assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
+ "bad amount in shift node!");
+
+ LSB = Srl_imm;
+ MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
+ : CountTrailingOnes_64(And_imm)) -
+ 1;
+ if (ClampMSB)
+ // Since we're moving the extend before the right shift operation, we need
+ // to clamp the MSB to make sure we don't shift in undefined bits instead of
+ // the zeros which would get shifted in with the original right shift
+ // operation.
+ MSB = MSB > 31 ? 31 : MSB;
+
+ Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
+ return true;
+}
+
+static bool isOneBitExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
+ unsigned &LSB, unsigned &MSB) {
+ // We are looking for the following pattern which basically extracts a single
+ // bit from the source value and places it in the LSB of the destination
+ // value, all other bits of the destination value or set to zero:
+ //
+ // Value2 = AND Value, MaskImm
+ // SRL Value2, ShiftImm
+ //
+ // with MaskImm >> ShiftImm == 1.
+ //
+ // This gets selected into a single UBFM:
+ //
+ // UBFM Value, ShiftImm, ShiftImm
+ //
+
+ if (N->getOpcode() != ISD::SRL)
+ return false;
+
+ uint64_t And_mask = 0;
+ if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
+ return false;
+
+ Opd0 = N->getOperand(0).getOperand(0);
+
+ uint64_t Srl_imm = 0;
+ if (!isIntImmediate(N->getOperand(1), Srl_imm))
+ return false;
+
+ // Check whether we really have a one bit extract here.
+ if (And_mask >> Srl_imm == 0x1) {
+ if (N->getValueType(0) == MVT::i32)
+ Opc = AArch64::UBFMWri;
+ else
+ Opc = AArch64::UBFMXri;
+
+ LSB = MSB = Srl_imm;
+
+ return true;
}
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
- if (isUpdating)
- ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
- return NULL;
+
+ return false;
}
-unsigned AArch64DAGToDAGISel::getTBLOpc(bool IsExt, bool Is64Bit,
- unsigned NumOfVec) {
- assert(NumOfVec >= 1 && NumOfVec <= 4 && "VST NumVecs out-of-range");
+static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
+ unsigned &LSB, unsigned &MSB,
+ bool BiggerPattern) {
+ assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
+ "N must be a SHR/SRA operation to call this function");
+
+ EVT VT = N->getValueType(0);
+
+ // Here we can test the type of VT and return false when the type does not
+ // match, but since it is done prior to that call in the current context
+ // we turned that into an assert to avoid redundant code.
+ assert((VT == MVT::i32 || VT == MVT::i64) &&
+ "Type checking must have been done before calling this function");
+
+ // Check for AND + SRL doing a one bit extract.
+ if (isOneBitExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
+ return true;
+
+ // we're looking for a shift of a shift
+ uint64_t Shl_imm = 0;
+ uint64_t Trunc_bits = 0;
+ if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
+ Opd0 = N->getOperand(0).getOperand(0);
+ } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
+ N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
+ // We are looking for a shift of truncate. Truncate from i64 to i32 could
+ // be considered as setting high 32 bits as zero. Our strategy here is to
+ // always generate 64bit UBFM. This consistency will help the CSE pass
+ // later find more redundancy.
+ Opd0 = N->getOperand(0).getOperand(0);
+ Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
+ VT = Opd0->getValueType(0);
+ assert(VT == MVT::i64 && "the promoted type should be i64");
+ } else if (BiggerPattern) {
+ // Let's pretend a 0 shift left has been performed.
+ // FIXME: Currently we limit this to the bigger pattern case,
+ // because some optimizations expect AND and not UBFM
+ Opd0 = N->getOperand(0);
+ } else
+ return false;
+
+ assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
+ uint64_t Srl_imm = 0;
+ if (!isIntImmediate(N->getOperand(1), Srl_imm))
+ return false;
- unsigned Opc = 0;
- switch (NumOfVec) {
+ assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
+ "bad amount in shift node!");
+ // Note: The width operand is encoded as width-1.
+ unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
+ int sLSB = Srl_imm - Shl_imm;
+ if (sLSB < 0)
+ return false;
+ LSB = sLSB;
+ MSB = LSB + Width;
+ // SRA requires a signed extraction
+ if (VT == MVT::i32)
+ Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
+ else
+ Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
+ return true;
+}
+
+static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
+ SDValue &Opd0, unsigned &LSB, unsigned &MSB,
+ unsigned NumberOfIgnoredLowBits = 0,
+ bool BiggerPattern = false) {
+ if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
+ return false;
+
+ switch (N->getOpcode()) {
default:
+ if (!N->isMachineOpcode())
+ return false;
break;
- case 1:
- if (IsExt)
- Opc = Is64Bit ? AArch64::TBX1_8b : AArch64::TBX1_16b;
+ case ISD::AND:
+ return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
+ NumberOfIgnoredLowBits, BiggerPattern);
+ case ISD::SRL:
+ case ISD::SRA:
+ return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
+ }
+
+ unsigned NOpc = N->getMachineOpcode();
+ switch (NOpc) {
+ default:
+ return false;
+ case AArch64::SBFMWri:
+ case AArch64::UBFMWri:
+ case AArch64::SBFMXri:
+ case AArch64::UBFMXri:
+ Opc = NOpc;
+ Opd0 = N->getOperand(0);
+ LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
+ MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
+ return true;
+ }
+ // Unreachable
+ return false;
+}
+
+SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
+ unsigned Opc, LSB, MSB;
+ SDValue Opd0;
+ if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
+ return nullptr;
+
+ EVT VT = N->getValueType(0);
+
+ // If the bit extract operation is 64bit but the original type is 32bit, we
+ // need to add one EXTRACT_SUBREG.
+ if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
+ SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
+ CurDAG->getTargetConstant(MSB, MVT::i64)};
+
+ SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ MachineSDNode *Node =
+ CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
+ SDValue(BFM, 0), SubReg);
+ return Node;
+ }
+
+ SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
+ CurDAG->getTargetConstant(MSB, VT)};
+ return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+}
+
+/// Does DstMask form a complementary pair with the mask provided by
+/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
+/// this asks whether DstMask zeroes precisely those bits that will be set by
+/// the other half.
+static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
+ unsigned NumberOfIgnoredHighBits, EVT VT) {
+ assert((VT == MVT::i32 || VT == MVT::i64) &&
+ "i32 or i64 mask type expected!");
+ unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
+
+ APInt SignificantDstMask = APInt(BitWidth, DstMask);
+ APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
+
+ return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
+ (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
+}
+
+// Look for bits that will be useful for later uses.
+// A bit is consider useless as soon as it is dropped and never used
+// before it as been dropped.
+// E.g., looking for useful bit of x
+// 1. y = x & 0x7
+// 2. z = y >> 2
+// After #1, x useful bits are 0x7, then the useful bits of x, live through
+// y.
+// After #2, the useful bits of x are 0x4.
+// However, if x is used on an unpredicatable instruction, then all its bits
+// are useful.
+// E.g.
+// 1. y = x & 0x7
+// 2. z = y >> 2
+// 3. str x, [@x]
+static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
+
+static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
+ unsigned Depth) {
+ uint64_t Imm =
+ cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
+ Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
+ UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
+ getUsefulBits(Op, UsefulBits, Depth + 1);
+}
+
+static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
+ uint64_t Imm, uint64_t MSB,
+ unsigned Depth) {
+ // inherit the bitwidth value
+ APInt OpUsefulBits(UsefulBits);
+ OpUsefulBits = 1;
+
+ if (MSB >= Imm) {
+ OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
+ --OpUsefulBits;
+ // The interesting part will be in the lower part of the result
+ getUsefulBits(Op, OpUsefulBits, Depth + 1);
+ // The interesting part was starting at Imm in the argument
+ OpUsefulBits = OpUsefulBits.shl(Imm);
+ } else {
+ OpUsefulBits = OpUsefulBits.shl(MSB + 1);
+ --OpUsefulBits;
+ // The interesting part will be shifted in the result
+ OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
+ getUsefulBits(Op, OpUsefulBits, Depth + 1);
+ // The interesting part was at zero in the argument
+ OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
+ }
+
+ UsefulBits &= OpUsefulBits;
+}
+
+static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
+ unsigned Depth) {
+ uint64_t Imm =
+ cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
+ uint64_t MSB =
+ cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
+
+ getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
+}
+
+static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
+ unsigned Depth) {
+ uint64_t ShiftTypeAndValue =
+ cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
+ APInt Mask(UsefulBits);
+ Mask.clearAllBits();
+ Mask.flipAllBits();
+
+ if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
+ // Shift Left
+ uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
+ Mask = Mask.shl(ShiftAmt);
+ getUsefulBits(Op, Mask, Depth + 1);
+ Mask = Mask.lshr(ShiftAmt);
+ } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
+ // Shift Right
+ // We do not handle AArch64_AM::ASR, because the sign will change the
+ // number of useful bits
+ uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
+ Mask = Mask.lshr(ShiftAmt);
+ getUsefulBits(Op, Mask, Depth + 1);
+ Mask = Mask.shl(ShiftAmt);
+ } else
+ return;
+
+ UsefulBits &= Mask;
+}
+
+static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
+ unsigned Depth) {
+ uint64_t Imm =
+ cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
+ uint64_t MSB =
+ cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
+
+ if (Op.getOperand(1) == Orig)
+ return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
+
+ APInt OpUsefulBits(UsefulBits);
+ OpUsefulBits = 1;
+
+ if (MSB >= Imm) {
+ OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
+ --OpUsefulBits;
+ UsefulBits &= ~OpUsefulBits;
+ getUsefulBits(Op, UsefulBits, Depth + 1);
+ } else {
+ OpUsefulBits = OpUsefulBits.shl(MSB + 1);
+ --OpUsefulBits;
+ UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
+ getUsefulBits(Op, UsefulBits, Depth + 1);
+ }
+}
+
+static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
+ SDValue Orig, unsigned Depth) {
+
+ // Users of this node should have already been instruction selected
+ // FIXME: Can we turn that into an assert?
+ if (!UserNode->isMachineOpcode())
+ return;
+
+ switch (UserNode->getMachineOpcode()) {
+ default:
+ return;
+ case AArch64::ANDSWri:
+ case AArch64::ANDSXri:
+ case AArch64::ANDWri:
+ case AArch64::ANDXri:
+ // We increment Depth only when we call the getUsefulBits
+ return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
+ Depth);
+ case AArch64::UBFMWri:
+ case AArch64::UBFMXri:
+ return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
+
+ case AArch64::ORRWrs:
+ case AArch64::ORRXrs:
+ if (UserNode->getOperand(1) != Orig)
+ return;
+ return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
+ Depth);
+ case AArch64::BFMWri:
+ case AArch64::BFMXri:
+ return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
+ }
+}
+
+static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
+ if (Depth >= 6)
+ return;
+ // Initialize UsefulBits
+ if (!Depth) {
+ unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
+ // At the beginning, assume every produced bits is useful
+ UsefulBits = APInt(Bitwidth, 0);
+ UsefulBits.flipAllBits();
+ }
+ APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
+
+ for (SDNode *Node : Op.getNode()->uses()) {
+ // A use cannot produce useful bits
+ APInt UsefulBitsForUse = APInt(UsefulBits);
+ getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
+ UsersUsefulBits |= UsefulBitsForUse;
+ }
+ // UsefulBits contains the produced bits that are meaningful for the
+ // current definition, thus a user cannot make a bit meaningful at
+ // this point
+ UsefulBits &= UsersUsefulBits;
+}
+
+/// Create a machine node performing a notional SHL of Op by ShlAmount. If
+/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
+/// 0, return Op unchanged.
+static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
+ if (ShlAmount == 0)
+ return Op;
+
+ EVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
+ unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
+
+ SDNode *ShiftNode;
+ if (ShlAmount > 0) {
+ // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
+ ShiftNode = CurDAG->getMachineNode(
+ UBFMOpc, SDLoc(Op), VT, Op,
+ CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
+ CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
+ } else {
+ // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
+ assert(ShlAmount < 0 && "expected right shift");
+ int ShrAmount = -ShlAmount;
+ ShiftNode = CurDAG->getMachineNode(
+ UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
+ CurDAG->getTargetConstant(BitWidth - 1, VT));
+ }
+
+ return SDValue(ShiftNode, 0);
+}
+
+/// Does this tree qualify as an attempt to move a bitfield into position,
+/// essentially "(and (shl VAL, N), Mask)".
+static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
+ SDValue &Src, int &ShiftAmount,
+ int &MaskWidth) {
+ EVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
+ (void)BitWidth;
+ assert(BitWidth == 32 || BitWidth == 64);
+
+ APInt KnownZero, KnownOne;
+ CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
+
+ // Non-zero in the sense that they're not provably zero, which is the key
+ // point if we want to use this value
+ uint64_t NonZeroBits = (~KnownZero).getZExtValue();
+
+ // Discard a constant AND mask if present. It's safe because the node will
+ // already have been factored into the computeKnownBits calculation above.
+ uint64_t AndImm;
+ if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
+ assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
+ Op = Op.getOperand(0);
+ }
+
+ uint64_t ShlImm;
+ if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
+ return false;
+ Op = Op.getOperand(0);
+
+ if (!isShiftedMask_64(NonZeroBits))
+ return false;
+
+ ShiftAmount = countTrailingZeros(NonZeroBits);
+ MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
+
+ // BFI encompasses sufficiently many nodes that it's worth inserting an extra
+ // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
+ // amount.
+ Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
+
+ return true;
+}
+
+// Given a OR operation, check if we have the following pattern
+// ubfm c, b, imm, imm2 (or something that does the same jobs, see
+// isBitfieldExtractOp)
+// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
+// countTrailingZeros(mask2) == imm2 - imm + 1
+// f = d | c
+// if yes, given reference arguments will be update so that one can replace
+// the OR instruction with:
+// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
+static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
+ SDValue &Src, unsigned &ImmR,
+ unsigned &ImmS, SelectionDAG *CurDAG) {
+ assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
+
+ // Set Opc
+ EVT VT = N->getValueType(0);
+ if (VT == MVT::i32)
+ Opc = AArch64::BFMWri;
+ else if (VT == MVT::i64)
+ Opc = AArch64::BFMXri;
+ else
+ return false;
+
+ // Because of simplify-demanded-bits in DAGCombine, involved masks may not
+ // have the expected shape. Try to undo that.
+ APInt UsefulBits;
+ getUsefulBits(SDValue(N, 0), UsefulBits);
+
+ unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
+ unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
+
+ // OR is commutative, check both possibilities (does llvm provide a
+ // way to do that directely, e.g., via code matcher?)
+ SDValue OrOpd1Val = N->getOperand(1);
+ SDNode *OrOpd0 = N->getOperand(0).getNode();
+ SDNode *OrOpd1 = N->getOperand(1).getNode();
+ for (int i = 0; i < 2;
+ ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
+ unsigned BFXOpc;
+ int DstLSB, Width;
+ if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
+ NumberOfIgnoredLowBits, true)) {
+ // Check that the returned opcode is compatible with the pattern,
+ // i.e., same type and zero extended (U and not S)
+ if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
+ (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
+ continue;
+
+ // Compute the width of the bitfield insertion
+ DstLSB = 0;
+ Width = ImmS - ImmR + 1;
+ // FIXME: This constraint is to catch bitfield insertion we may
+ // want to widen the pattern if we want to grab general bitfied
+ // move case
+ if (Width <= 0)
+ continue;
+
+ // If the mask on the insertee is correct, we have a BFXIL operation. We
+ // can share the ImmR and ImmS values from the already-computed UBFM.
+ } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
+ DstLSB, Width)) {
+ ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
+ ImmS = Width - 1;
+ } else
+ continue;
+
+ // Check the second part of the pattern
+ EVT VT = OrOpd1->getValueType(0);
+ assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
+
+ // Compute the Known Zero for the candidate of the first operand.
+ // This allows to catch more general case than just looking for
+ // AND with imm. Indeed, simplify-demanded-bits may have removed
+ // the AND instruction because it proves it was useless.
+ APInt KnownZero, KnownOne;
+ CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
+
+ // Check if there is enough room for the second operand to appear
+ // in the first one
+ APInt BitsToBeInserted =
+ APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
+
+ if ((BitsToBeInserted & ~KnownZero) != 0)
+ continue;
+
+ // Set the first operand
+ uint64_t Imm;
+ if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
+ isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
+ // In that case, we can eliminate the AND
+ Dst = OrOpd1->getOperand(0);
else
- Opc = Is64Bit ? AArch64::TBL1_8b : AArch64::TBL1_16b;
+ // Maybe the AND has been removed by simplify-demanded-bits
+ // or is useful because it discards more bits
+ Dst = OrOpd1Val;
+
+ // both parts match
+ return true;
+ }
+
+ return false;
+}
+
+SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
+ if (N->getOpcode() != ISD::OR)
+ return nullptr;
+
+ unsigned Opc;
+ unsigned LSB, MSB;
+ SDValue Opd0, Opd1;
+
+ if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
+ return nullptr;
+
+ EVT VT = N->getValueType(0);
+ SDValue Ops[] = { Opd0,
+ Opd1,
+ CurDAG->getTargetConstant(LSB, VT),
+ CurDAG->getTargetConstant(MSB, VT) };
+ return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+}
+
+SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
+ EVT VT = N->getValueType(0);
+ unsigned Variant;
+ unsigned Opc;
+ unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
+
+ if (VT == MVT::f32) {
+ Variant = 0;
+ } else if (VT == MVT::f64) {
+ Variant = 1;
+ } else
+ return nullptr; // Unrecognized argument type. Fall back on default codegen.
+
+ // Pick the FRINTX variant needed to set the flags.
+ unsigned FRINTXOpc = FRINTXOpcs[Variant];
+
+ switch (N->getOpcode()) {
+ default:
+ return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
+ case ISD::FCEIL: {
+ unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
+ Opc = FRINTPOpcs[Variant];
break;
- case 2:
- if (IsExt)
- Opc = Is64Bit ? AArch64::TBX2_8b : AArch64::TBX2_16b;
- else
- Opc = Is64Bit ? AArch64::TBL2_8b : AArch64::TBL2_16b;
+ }
+ case ISD::FFLOOR: {
+ unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
+ Opc = FRINTMOpcs[Variant];
break;
- case 3:
- if (IsExt)
- Opc = Is64Bit ? AArch64::TBX3_8b : AArch64::TBX3_16b;
- else
- Opc = Is64Bit ? AArch64::TBL3_8b : AArch64::TBL3_16b;
+ }
+ case ISD::FTRUNC: {
+ unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
+ Opc = FRINTZOpcs[Variant];
break;
- case 4:
- if (IsExt)
- Opc = Is64Bit ? AArch64::TBX4_8b : AArch64::TBX4_16b;
- else
- Opc = Is64Bit ? AArch64::TBL4_8b : AArch64::TBL4_16b;
+ }
+ case ISD::FROUND: {
+ unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
+ Opc = FRINTAOpcs[Variant];
break;
}
+ }
+
+ SDLoc dl(N);
+ SDValue In = N->getOperand(0);
+ SmallVector<SDValue, 2> Ops;
+ Ops.push_back(In);
+
+ if (!TM.Options.UnsafeFPMath) {
+ SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
+ Ops.push_back(SDValue(FRINTX, 1));
+ }
- return Opc;
+ return CurDAG->getMachineNode(Opc, dl, VT, Ops);
}
-SDNode *AArch64DAGToDAGISel::SelectVTBL(SDNode *N, unsigned NumVecs,
- bool IsExt) {
- assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
- SDLoc dl(N);
+bool
+AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
+ unsigned RegWidth) {
+ APFloat FVal(0.0);
+ if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
+ FVal = CN->getValueAPF();
+ else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
+ // Some otherwise illegal constants are allowed in this case.
+ if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
+ !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
+ return false;
+
+ ConstantPoolSDNode *CN =
+ dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
+ FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
+ } else
+ return false;
+
+ // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
+ // is between 1 and 32 for a destination w-register, or 1 and 64 for an
+ // x-register.
+ //
+ // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
+ // want THIS_NODE to be 2^fbits. This is much easier to deal with using
+ // integers.
+ bool IsExact;
- // Check the element of look up table is 64-bit or not
- unsigned Vec0Idx = IsExt ? 2 : 1;
- assert(!N->getOperand(Vec0Idx + 0).getValueType().is64BitVector() &&
- "The element of lookup table for vtbl and vtbx must be 128-bit");
+ // fbits is between 1 and 64 in the worst-case, which means the fmul
+ // could have 2^64 as an actual operand. Need 65 bits of precision.
+ APSInt IntVal(65, true);
+ FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
- // Check the return value type is 64-bit or not
- EVT ResVT = N->getValueType(0);
- bool is64BitRes = ResVT.is64BitVector();
+ // N.b. isPowerOf2 also checks for > 0.
+ if (!IsExact || !IntVal.isPowerOf2()) return false;
+ unsigned FBits = IntVal.logBase2();
- // Create new SDValue for vector list
- SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
- N->op_begin() + Vec0Idx + NumVecs);
- SDValue TblReg = createQTuple(Regs);
- unsigned Opc = getTBLOpc(IsExt, is64BitRes, NumVecs);
+ // Checks above should have guaranteed that we haven't lost information in
+ // finding FBits, but it must still be in range.
+ if (FBits == 0 || FBits > RegWidth) return false;
- SmallVector<SDValue, 3> Ops;
- if (IsExt)
- Ops.push_back(N->getOperand(1));
- Ops.push_back(TblReg);
- Ops.push_back(N->getOperand(Vec0Idx + NumVecs));
- return CurDAG->getMachineNode(Opc, dl, ResVT, Ops);
+ FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
+ return true;
}
SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
- DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
+ DEBUG(errs() << "Selecting: ");
+ DEBUG(Node->dump(CurDAG));
+ DEBUG(errs() << "\n");
+ // If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
+ DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return NULL;
+ return nullptr;
}
- switch (Node->getOpcode()) {
- case ISD::ATOMIC_LOAD_ADD:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_ADD_I8,
- AArch64::ATOMIC_LOAD_ADD_I16,
- AArch64::ATOMIC_LOAD_ADD_I32,
- AArch64::ATOMIC_LOAD_ADD_I64);
- case ISD::ATOMIC_LOAD_SUB:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_SUB_I8,
- AArch64::ATOMIC_LOAD_SUB_I16,
- AArch64::ATOMIC_LOAD_SUB_I32,
- AArch64::ATOMIC_LOAD_SUB_I64);
- case ISD::ATOMIC_LOAD_AND:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_AND_I8,
- AArch64::ATOMIC_LOAD_AND_I16,
- AArch64::ATOMIC_LOAD_AND_I32,
- AArch64::ATOMIC_LOAD_AND_I64);
- case ISD::ATOMIC_LOAD_OR:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_OR_I8,
- AArch64::ATOMIC_LOAD_OR_I16,
- AArch64::ATOMIC_LOAD_OR_I32,
- AArch64::ATOMIC_LOAD_OR_I64);
- case ISD::ATOMIC_LOAD_XOR:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_XOR_I8,
- AArch64::ATOMIC_LOAD_XOR_I16,
- AArch64::ATOMIC_LOAD_XOR_I32,
- AArch64::ATOMIC_LOAD_XOR_I64);
- case ISD::ATOMIC_LOAD_NAND:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_NAND_I8,
- AArch64::ATOMIC_LOAD_NAND_I16,
- AArch64::ATOMIC_LOAD_NAND_I32,
- AArch64::ATOMIC_LOAD_NAND_I64);
- case ISD::ATOMIC_LOAD_MIN:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_MIN_I8,
- AArch64::ATOMIC_LOAD_MIN_I16,
- AArch64::ATOMIC_LOAD_MIN_I32,
- AArch64::ATOMIC_LOAD_MIN_I64);
- case ISD::ATOMIC_LOAD_MAX:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_MAX_I8,
- AArch64::ATOMIC_LOAD_MAX_I16,
- AArch64::ATOMIC_LOAD_MAX_I32,
- AArch64::ATOMIC_LOAD_MAX_I64);
- case ISD::ATOMIC_LOAD_UMIN:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_UMIN_I8,
- AArch64::ATOMIC_LOAD_UMIN_I16,
- AArch64::ATOMIC_LOAD_UMIN_I32,
- AArch64::ATOMIC_LOAD_UMIN_I64);
- case ISD::ATOMIC_LOAD_UMAX:
- return SelectAtomic(Node,
- AArch64::ATOMIC_LOAD_UMAX_I8,
- AArch64::ATOMIC_LOAD_UMAX_I16,
- AArch64::ATOMIC_LOAD_UMAX_I32,
- AArch64::ATOMIC_LOAD_UMAX_I64);
- case ISD::ATOMIC_SWAP:
- return SelectAtomic(Node,
- AArch64::ATOMIC_SWAP_I8,
- AArch64::ATOMIC_SWAP_I16,
- AArch64::ATOMIC_SWAP_I32,
- AArch64::ATOMIC_SWAP_I64);
- case ISD::ATOMIC_CMP_SWAP:
- return SelectAtomic(Node,
- AArch64::ATOMIC_CMP_SWAP_I8,
- AArch64::ATOMIC_CMP_SWAP_I16,
- AArch64::ATOMIC_CMP_SWAP_I32,
- AArch64::ATOMIC_CMP_SWAP_I64);
- case ISD::FrameIndex: {
- int FI = cast<FrameIndexSDNode>(Node)->getIndex();
- EVT PtrTy = getTargetLowering()->getPointerTy();
- SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
- return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
- TFI, CurDAG->getTargetConstant(0, PtrTy));
- }
- case ISD::ConstantPool: {
- // Constant pools are fine, just create a Target entry.
- ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
- const Constant *C = CN->getConstVal();
- SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
+ // Few custom selection stuff.
+ SDNode *ResNode = nullptr;
+ EVT VT = Node->getValueType(0);
- ReplaceUses(SDValue(Node, 0), CP);
- return NULL;
- }
- case ISD::Constant: {
- SDNode *ResNode = 0;
- if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
- // XZR and WZR are probably even better than an actual move: most of the
- // time they can be folded into another instruction with *no* cost.
-
- EVT Ty = Node->getValueType(0);
- assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
- uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
- ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
- SDLoc(Node),
- Register, Ty).getNode();
- }
-
- // Next best option is a move-immediate, see if we can do that.
- if (!ResNode) {
- ResNode = TrySelectToMoveImm(Node);
- }
-
- if (ResNode)
- return ResNode;
+ switch (Node->getOpcode()) {
+ default:
+ break;
- // If even that fails we fall back to a lit-pool entry at the moment. Future
- // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
- ResNode = SelectToLitPool(Node);
- assert(ResNode && "We need *some* way to materialise a constant");
+ case ISD::ADD:
+ if (SDNode *I = SelectMLAV64LaneV128(Node))
+ return I;
+ break;
- // We want to continue selection at this point since the litpool access
- // generated used generic nodes for simplicity.
- ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
- Node = ResNode;
+ case ISD::LOAD: {
+ // Try to select as an indexed load. Fall through to normal processing
+ // if we can't.
+ bool Done = false;
+ SDNode *I = SelectIndexedLoad(Node, Done);
+ if (Done)
+ return I;
break;
}
- case ISD::ConstantFP: {
- if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
- // FMOV will take care of it from TableGen
- break;
- }
- SDNode *ResNode = LowerToFPLitPool(Node);
- ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
+ case ISD::SRL:
+ case ISD::AND:
+ case ISD::SRA:
+ if (SDNode *I = SelectBitfieldExtractOp(Node))
+ return I;
+ break;
- // We want to continue selection at this point since the litpool access
- // generated used generic nodes for simplicity.
- Node = ResNode;
+ case ISD::OR:
+ if (SDNode *I = SelectBitfieldInsertOp(Node))
+ return I;
break;
+
+ case ISD::EXTRACT_VECTOR_ELT: {
+ // Extracting lane zero is a special case where we can just use a plain
+ // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
+ // the rest of the compiler, especially the register allocator and copyi
+ // propagation, to reason about, so is preferred when it's possible to
+ // use it.
+ ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
+ // Bail and use the default Select() for non-zero lanes.
+ if (LaneNode->getZExtValue() != 0)
+ break;
+ // If the element type is not the same as the result type, likewise
+ // bail and use the default Select(), as there's more to do than just
+ // a cross-class COPY. This catches extracts of i8 and i16 elements
+ // since they will need an explicit zext.
+ if (VT != Node->getOperand(0).getValueType().getVectorElementType())
+ break;
+ unsigned SubReg;
+ switch (Node->getOperand(0)
+ .getValueType()
+ .getVectorElementType()
+ .getSizeInBits()) {
+ default:
+ llvm_unreachable("Unexpected vector element type!");
+ case 64:
+ SubReg = AArch64::dsub;
+ break;
+ case 32:
+ SubReg = AArch64::ssub;
+ break;
+ case 16: // FALLTHROUGH
+ case 8:
+ llvm_unreachable("unexpected zext-requiring extract element!");
+ }
+ SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
+ Node->getOperand(0));
+ DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
+ DEBUG(Extract->dumpr(CurDAG));
+ DEBUG(dbgs() << "\n");
+ return Extract.getNode();
}
- case AArch64ISD::NEON_LD1_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed,
- AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed,
- AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed,
- AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed
- };
- return SelectVLD(Node, true, 1, Opcodes);
- }
- case AArch64ISD::NEON_LD2_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed,
- AArch64::LD2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
- AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
- AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed
- };
- return SelectVLD(Node, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_LD3_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed,
- AArch64::LD3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
- AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
- AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed
- };
- return SelectVLD(Node, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_LD4_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed,
- AArch64::LD4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
- AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
- AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed
- };
- return SelectVLD(Node, true, 4, Opcodes);
- }
- case AArch64ISD::NEON_LD1x2_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1x2WB_8B_fixed, AArch64::LD1x2WB_4H_fixed,
- AArch64::LD1x2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
- AArch64::LD1x2WB_16B_fixed, AArch64::LD1x2WB_8H_fixed,
- AArch64::LD1x2WB_4S_fixed, AArch64::LD1x2WB_2D_fixed
- };
- return SelectVLD(Node, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_LD1x3_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1x3WB_8B_fixed, AArch64::LD1x3WB_4H_fixed,
- AArch64::LD1x3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
- AArch64::LD1x3WB_16B_fixed, AArch64::LD1x3WB_8H_fixed,
- AArch64::LD1x3WB_4S_fixed, AArch64::LD1x3WB_2D_fixed
- };
- return SelectVLD(Node, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_LD1x4_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1x4WB_8B_fixed, AArch64::LD1x4WB_4H_fixed,
- AArch64::LD1x4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
- AArch64::LD1x4WB_16B_fixed, AArch64::LD1x4WB_8H_fixed,
- AArch64::LD1x4WB_4S_fixed, AArch64::LD1x4WB_2D_fixed
- };
- return SelectVLD(Node, true, 4, Opcodes);
- }
- case AArch64ISD::NEON_ST1_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed,
- AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed,
- AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed,
- AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed
- };
- return SelectVST(Node, true, 1, Opcodes);
- }
- case AArch64ISD::NEON_ST2_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed,
- AArch64::ST2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
- AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
- AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed
- };
- return SelectVST(Node, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_ST3_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed,
- AArch64::ST3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
- AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
- AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed
- };
- return SelectVST(Node, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_ST4_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed,
- AArch64::ST4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
- AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
- AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed
- };
- return SelectVST(Node, true, 4, Opcodes);
- }
- case AArch64ISD::NEON_LD2DUP: {
- static const uint16_t Opcodes[] = {
- AArch64::LD2R_8B, AArch64::LD2R_4H, AArch64::LD2R_2S,
- AArch64::LD2R_1D, AArch64::LD2R_16B, AArch64::LD2R_8H,
- AArch64::LD2R_4S, AArch64::LD2R_2D
- };
- return SelectVLDDup(Node, false, 2, Opcodes);
- }
- case AArch64ISD::NEON_LD3DUP: {
- static const uint16_t Opcodes[] = {
- AArch64::LD3R_8B, AArch64::LD3R_4H, AArch64::LD3R_2S,
- AArch64::LD3R_1D, AArch64::LD3R_16B, AArch64::LD3R_8H,
- AArch64::LD3R_4S, AArch64::LD3R_2D
- };
- return SelectVLDDup(Node, false, 3, Opcodes);
- }
- case AArch64ISD::NEON_LD4DUP: {
- static const uint16_t Opcodes[] = {
- AArch64::LD4R_8B, AArch64::LD4R_4H, AArch64::LD4R_2S,
- AArch64::LD4R_1D, AArch64::LD4R_16B, AArch64::LD4R_8H,
- AArch64::LD4R_4S, AArch64::LD4R_2D
- };
- return SelectVLDDup(Node, false, 4, Opcodes);
- }
- case AArch64ISD::NEON_LD2DUP_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD2R_WB_8B_fixed, AArch64::LD2R_WB_4H_fixed,
- AArch64::LD2R_WB_2S_fixed, AArch64::LD2R_WB_1D_fixed,
- AArch64::LD2R_WB_16B_fixed, AArch64::LD2R_WB_8H_fixed,
- AArch64::LD2R_WB_4S_fixed, AArch64::LD2R_WB_2D_fixed
- };
- return SelectVLDDup(Node, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_LD3DUP_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD3R_WB_8B_fixed, AArch64::LD3R_WB_4H_fixed,
- AArch64::LD3R_WB_2S_fixed, AArch64::LD3R_WB_1D_fixed,
- AArch64::LD3R_WB_16B_fixed, AArch64::LD3R_WB_8H_fixed,
- AArch64::LD3R_WB_4S_fixed, AArch64::LD3R_WB_2D_fixed
- };
- return SelectVLDDup(Node, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_LD4DUP_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD4R_WB_8B_fixed, AArch64::LD4R_WB_4H_fixed,
- AArch64::LD4R_WB_2S_fixed, AArch64::LD4R_WB_1D_fixed,
- AArch64::LD4R_WB_16B_fixed, AArch64::LD4R_WB_8H_fixed,
- AArch64::LD4R_WB_4S_fixed, AArch64::LD4R_WB_2D_fixed
- };
- return SelectVLDDup(Node, true, 4, Opcodes);
- }
- case AArch64ISD::NEON_LD2LN_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD2LN_WB_B_fixed, AArch64::LD2LN_WB_H_fixed,
- AArch64::LD2LN_WB_S_fixed, AArch64::LD2LN_WB_D_fixed
- };
- return SelectVLDSTLane(Node, true, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_LD3LN_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD3LN_WB_B_fixed, AArch64::LD3LN_WB_H_fixed,
- AArch64::LD3LN_WB_S_fixed, AArch64::LD3LN_WB_D_fixed
- };
- return SelectVLDSTLane(Node, true, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_LD4LN_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::LD4LN_WB_B_fixed, AArch64::LD4LN_WB_H_fixed,
- AArch64::LD4LN_WB_S_fixed, AArch64::LD4LN_WB_D_fixed
- };
- return SelectVLDSTLane(Node, true, true, 4, Opcodes);
- }
- case AArch64ISD::NEON_ST2LN_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST2LN_WB_B_fixed, AArch64::ST2LN_WB_H_fixed,
- AArch64::ST2LN_WB_S_fixed, AArch64::ST2LN_WB_D_fixed
- };
- return SelectVLDSTLane(Node, false, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_ST3LN_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST3LN_WB_B_fixed, AArch64::ST3LN_WB_H_fixed,
- AArch64::ST3LN_WB_S_fixed, AArch64::ST3LN_WB_D_fixed
- };
- return SelectVLDSTLane(Node, false, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_ST4LN_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST4LN_WB_B_fixed, AArch64::ST4LN_WB_H_fixed,
- AArch64::ST4LN_WB_S_fixed, AArch64::ST4LN_WB_D_fixed
- };
- return SelectVLDSTLane(Node, false, true, 4, Opcodes);
- }
- case AArch64ISD::NEON_ST1x2_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1x2WB_8B_fixed, AArch64::ST1x2WB_4H_fixed,
- AArch64::ST1x2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
- AArch64::ST1x2WB_16B_fixed, AArch64::ST1x2WB_8H_fixed,
- AArch64::ST1x2WB_4S_fixed, AArch64::ST1x2WB_2D_fixed
- };
- return SelectVST(Node, true, 2, Opcodes);
- }
- case AArch64ISD::NEON_ST1x3_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1x3WB_8B_fixed, AArch64::ST1x3WB_4H_fixed,
- AArch64::ST1x3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
- AArch64::ST1x3WB_16B_fixed, AArch64::ST1x3WB_8H_fixed,
- AArch64::ST1x3WB_4S_fixed, AArch64::ST1x3WB_2D_fixed
- };
- return SelectVST(Node, true, 3, Opcodes);
- }
- case AArch64ISD::NEON_ST1x4_UPD: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1x4WB_8B_fixed, AArch64::ST1x4WB_4H_fixed,
- AArch64::ST1x4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
- AArch64::ST1x4WB_16B_fixed, AArch64::ST1x4WB_8H_fixed,
- AArch64::ST1x4WB_4S_fixed, AArch64::ST1x4WB_2D_fixed
- };
- return SelectVST(Node, true, 4, Opcodes);
- }
- case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
- bool IsExt = false;
- switch (IntNo) {
- default:
- break;
- case Intrinsic::aarch64_neon_vtbx1:
- IsExt = true;
- case Intrinsic::aarch64_neon_vtbl1:
- return SelectVTBL(Node, 1, IsExt);
- case Intrinsic::aarch64_neon_vtbx2:
- IsExt = true;
- case Intrinsic::aarch64_neon_vtbl2:
- return SelectVTBL(Node, 2, IsExt);
- case Intrinsic::aarch64_neon_vtbx3:
- IsExt = true;
- case Intrinsic::aarch64_neon_vtbl3:
- return SelectVTBL(Node, 3, IsExt);
- case Intrinsic::aarch64_neon_vtbx4:
- IsExt = true;
- case Intrinsic::aarch64_neon_vtbl4:
- return SelectVTBL(Node, 4, IsExt);
+ case ISD::Constant: {
+ // Materialize zero constants as copies from WZR/XZR. This allows
+ // the coalescer to propagate these into other instructions.
+ ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
+ if (ConstNode->isNullValue()) {
+ if (VT == MVT::i32)
+ return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
+ AArch64::WZR, MVT::i32).getNode();
+ else if (VT == MVT::i64)
+ return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
+ AArch64::XZR, MVT::i64).getNode();
}
break;
}
- case ISD::INTRINSIC_VOID:
+
+ case ISD::FrameIndex: {
+ // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
+ int FI = cast<FrameIndexSDNode>(Node)->getIndex();
+ unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
+ const TargetLowering *TLI = getTargetLowering();
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
+ SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
+ CurDAG->getTargetConstant(Shifter, MVT::i32) };
+ return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
+ }
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
default:
break;
- case Intrinsic::arm_neon_vld1: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1_8B, AArch64::LD1_4H, AArch64::LD1_2S, AArch64::LD1_1D,
- AArch64::LD1_16B, AArch64::LD1_8H, AArch64::LD1_4S, AArch64::LD1_2D
- };
- return SelectVLD(Node, false, 1, Opcodes);
- }
- case Intrinsic::arm_neon_vld2: {
- static const uint16_t Opcodes[] = {
- AArch64::LD2_8B, AArch64::LD2_4H, AArch64::LD2_2S, AArch64::LD1x2_1D,
- AArch64::LD2_16B, AArch64::LD2_8H, AArch64::LD2_4S, AArch64::LD2_2D
- };
- return SelectVLD(Node, false, 2, Opcodes);
- }
- case Intrinsic::arm_neon_vld3: {
- static const uint16_t Opcodes[] = {
- AArch64::LD3_8B, AArch64::LD3_4H, AArch64::LD3_2S, AArch64::LD1x3_1D,
- AArch64::LD3_16B, AArch64::LD3_8H, AArch64::LD3_4S, AArch64::LD3_2D
- };
- return SelectVLD(Node, false, 3, Opcodes);
+ case Intrinsic::aarch64_ldaxp:
+ case Intrinsic::aarch64_ldxp: {
+ unsigned Op =
+ IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
+ SDValue MemAddr = Node->getOperand(2);
+ SDLoc DL(Node);
+ SDValue Chain = Node->getOperand(0);
+
+ SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
+ MVT::Other, MemAddr, Chain);
+
+ // Transfer memoperands.
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
+ cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
+ return Ld;
}
- case Intrinsic::arm_neon_vld4: {
- static const uint16_t Opcodes[] = {
- AArch64::LD4_8B, AArch64::LD4_4H, AArch64::LD4_2S, AArch64::LD1x4_1D,
- AArch64::LD4_16B, AArch64::LD4_8H, AArch64::LD4_4S, AArch64::LD4_2D
- };
- return SelectVLD(Node, false, 4, Opcodes);
+ case Intrinsic::aarch64_stlxp:
+ case Intrinsic::aarch64_stxp: {
+ unsigned Op =
+ IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
+ SDLoc DL(Node);
+ SDValue Chain = Node->getOperand(0);
+ SDValue ValLo = Node->getOperand(2);
+ SDValue ValHi = Node->getOperand(3);
+ SDValue MemAddr = Node->getOperand(4);
+
+ // Place arguments in the right order.
+ SmallVector<SDValue, 7> Ops;
+ Ops.push_back(ValLo);
+ Ops.push_back(ValHi);
+ Ops.push_back(MemAddr);
+ Ops.push_back(Chain);
+
+ SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
+ // Transfer memoperands.
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
+ cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
+
+ return St;
}
- case Intrinsic::aarch64_neon_vld1x2: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1x2_8B, AArch64::LD1x2_4H, AArch64::LD1x2_2S,
- AArch64::LD1x2_1D, AArch64::LD1x2_16B, AArch64::LD1x2_8H,
- AArch64::LD1x2_4S, AArch64::LD1x2_2D
- };
- return SelectVLD(Node, false, 2, Opcodes);
- }
- case Intrinsic::aarch64_neon_vld1x3: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1x3_8B, AArch64::LD1x3_4H, AArch64::LD1x3_2S,
- AArch64::LD1x3_1D, AArch64::LD1x3_16B, AArch64::LD1x3_8H,
- AArch64::LD1x3_4S, AArch64::LD1x3_2D
- };
- return SelectVLD(Node, false, 3, Opcodes);
- }
- case Intrinsic::aarch64_neon_vld1x4: {
- static const uint16_t Opcodes[] = {
- AArch64::LD1x4_8B, AArch64::LD1x4_4H, AArch64::LD1x4_2S,
- AArch64::LD1x4_1D, AArch64::LD1x4_16B, AArch64::LD1x4_8H,
- AArch64::LD1x4_4S, AArch64::LD1x4_2D
- };
- return SelectVLD(Node, false, 4, Opcodes);
- }
- case Intrinsic::arm_neon_vst1: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1_8B, AArch64::ST1_4H, AArch64::ST1_2S, AArch64::ST1_1D,
- AArch64::ST1_16B, AArch64::ST1_8H, AArch64::ST1_4S, AArch64::ST1_2D
- };
- return SelectVST(Node, false, 1, Opcodes);
- }
- case Intrinsic::arm_neon_vst2: {
- static const uint16_t Opcodes[] = {
- AArch64::ST2_8B, AArch64::ST2_4H, AArch64::ST2_2S, AArch64::ST1x2_1D,
- AArch64::ST2_16B, AArch64::ST2_8H, AArch64::ST2_4S, AArch64::ST2_2D
- };
- return SelectVST(Node, false, 2, Opcodes);
+ case Intrinsic::aarch64_neon_ld1x2:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld1x3:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld1x4:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld2:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld3:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld4:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld2r:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld3r:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld4r:
+ if (VT == MVT::v8i8)
+ return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
+ break;
+ case Intrinsic::aarch64_neon_ld2lane:
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectLoadLane(Node, 2, AArch64::LD2i8);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectLoadLane(Node, 2, AArch64::LD2i16);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectLoadLane(Node, 2, AArch64::LD2i32);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectLoadLane(Node, 2, AArch64::LD2i64);
+ break;
+ case Intrinsic::aarch64_neon_ld3lane:
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectLoadLane(Node, 3, AArch64::LD3i8);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectLoadLane(Node, 3, AArch64::LD3i16);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectLoadLane(Node, 3, AArch64::LD3i32);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectLoadLane(Node, 3, AArch64::LD3i64);
+ break;
+ case Intrinsic::aarch64_neon_ld4lane:
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectLoadLane(Node, 4, AArch64::LD4i8);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectLoadLane(Node, 4, AArch64::LD4i16);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectLoadLane(Node, 4, AArch64::LD4i32);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectLoadLane(Node, 4, AArch64::LD4i64);
+ break;
}
- case Intrinsic::arm_neon_vst3: {
- static const uint16_t Opcodes[] = {
- AArch64::ST3_8B, AArch64::ST3_4H, AArch64::ST3_2S, AArch64::ST1x3_1D,
- AArch64::ST3_16B, AArch64::ST3_8H, AArch64::ST3_4S, AArch64::ST3_2D
- };
- return SelectVST(Node, false, 3, Opcodes);
+ } break;
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::aarch64_neon_tbl2:
+ return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
+ : AArch64::TBLv16i8Two,
+ false);
+ case Intrinsic::aarch64_neon_tbl3:
+ return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
+ : AArch64::TBLv16i8Three,
+ false);
+ case Intrinsic::aarch64_neon_tbl4:
+ return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
+ : AArch64::TBLv16i8Four,
+ false);
+ case Intrinsic::aarch64_neon_tbx2:
+ return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
+ : AArch64::TBXv16i8Two,
+ true);
+ case Intrinsic::aarch64_neon_tbx3:
+ return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
+ : AArch64::TBXv16i8Three,
+ true);
+ case Intrinsic::aarch64_neon_tbx4:
+ return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
+ : AArch64::TBXv16i8Four,
+ true);
+ case Intrinsic::aarch64_neon_smull:
+ case Intrinsic::aarch64_neon_umull:
+ if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
+ return N;
+ break;
}
- case Intrinsic::arm_neon_vst4: {
- static const uint16_t Opcodes[] = {
- AArch64::ST4_8B, AArch64::ST4_4H, AArch64::ST4_2S, AArch64::ST1x4_1D,
- AArch64::ST4_16B, AArch64::ST4_8H, AArch64::ST4_4S, AArch64::ST4_2D
- };
- return SelectVST(Node, false, 4, Opcodes);
+ break;
+ }
+ case ISD::INTRINSIC_VOID: {
+ unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ if (Node->getNumOperands() >= 3)
+ VT = Node->getOperand(2)->getValueType(0);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::aarch64_neon_st1x2: {
+ if (VT == MVT::v8i8)
+ return SelectStore(Node, 2, AArch64::ST1Twov8b);
+ else if (VT == MVT::v16i8)
+ return SelectStore(Node, 2, AArch64::ST1Twov16b);
+ else if (VT == MVT::v4i16)
+ return SelectStore(Node, 2, AArch64::ST1Twov4h);
+ else if (VT == MVT::v8i16)
+ return SelectStore(Node, 2, AArch64::ST1Twov8h);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectStore(Node, 2, AArch64::ST1Twov2s);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectStore(Node, 2, AArch64::ST1Twov4s);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectStore(Node, 2, AArch64::ST1Twov2d);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectStore(Node, 2, AArch64::ST1Twov1d);
+ break;
}
- case Intrinsic::aarch64_neon_vst1x2: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1x2_8B, AArch64::ST1x2_4H, AArch64::ST1x2_2S,
- AArch64::ST1x2_1D, AArch64::ST1x2_16B, AArch64::ST1x2_8H,
- AArch64::ST1x2_4S, AArch64::ST1x2_2D
- };
- return SelectVST(Node, false, 2, Opcodes);
+ case Intrinsic::aarch64_neon_st1x3: {
+ if (VT == MVT::v8i8)
+ return SelectStore(Node, 3, AArch64::ST1Threev8b);
+ else if (VT == MVT::v16i8)
+ return SelectStore(Node, 3, AArch64::ST1Threev16b);
+ else if (VT == MVT::v4i16)
+ return SelectStore(Node, 3, AArch64::ST1Threev4h);
+ else if (VT == MVT::v8i16)
+ return SelectStore(Node, 3, AArch64::ST1Threev8h);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectStore(Node, 3, AArch64::ST1Threev2s);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectStore(Node, 3, AArch64::ST1Threev4s);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectStore(Node, 3, AArch64::ST1Threev2d);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectStore(Node, 3, AArch64::ST1Threev1d);
+ break;
}
- case Intrinsic::aarch64_neon_vst1x3: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1x3_8B, AArch64::ST1x3_4H, AArch64::ST1x3_2S,
- AArch64::ST1x3_1D, AArch64::ST1x3_16B, AArch64::ST1x3_8H,
- AArch64::ST1x3_4S, AArch64::ST1x3_2D
- };
- return SelectVST(Node, false, 3, Opcodes);
+ case Intrinsic::aarch64_neon_st1x4: {
+ if (VT == MVT::v8i8)
+ return SelectStore(Node, 4, AArch64::ST1Fourv8b);
+ else if (VT == MVT::v16i8)
+ return SelectStore(Node, 4, AArch64::ST1Fourv16b);
+ else if (VT == MVT::v4i16)
+ return SelectStore(Node, 4, AArch64::ST1Fourv4h);
+ else if (VT == MVT::v8i16)
+ return SelectStore(Node, 4, AArch64::ST1Fourv8h);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectStore(Node, 4, AArch64::ST1Fourv2s);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectStore(Node, 4, AArch64::ST1Fourv4s);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectStore(Node, 4, AArch64::ST1Fourv2d);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ break;
}
- case Intrinsic::aarch64_neon_vst1x4: {
- static const uint16_t Opcodes[] = {
- AArch64::ST1x4_8B, AArch64::ST1x4_4H, AArch64::ST1x4_2S,
- AArch64::ST1x4_1D, AArch64::ST1x4_16B, AArch64::ST1x4_8H,
- AArch64::ST1x4_4S, AArch64::ST1x4_2D
- };
- return SelectVST(Node, false, 4, Opcodes);
+ case Intrinsic::aarch64_neon_st2: {
+ if (VT == MVT::v8i8)
+ return SelectStore(Node, 2, AArch64::ST2Twov8b);
+ else if (VT == MVT::v16i8)
+ return SelectStore(Node, 2, AArch64::ST2Twov16b);
+ else if (VT == MVT::v4i16)
+ return SelectStore(Node, 2, AArch64::ST2Twov4h);
+ else if (VT == MVT::v8i16)
+ return SelectStore(Node, 2, AArch64::ST2Twov8h);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectStore(Node, 2, AArch64::ST2Twov2s);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectStore(Node, 2, AArch64::ST2Twov4s);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectStore(Node, 2, AArch64::ST2Twov2d);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectStore(Node, 2, AArch64::ST1Twov1d);
+ break;
}
- case Intrinsic::arm_neon_vld2lane: {
- static const uint16_t Opcodes[] = {
- AArch64::LD2LN_B, AArch64::LD2LN_H, AArch64::LD2LN_S, AArch64::LD2LN_D
- };
- return SelectVLDSTLane(Node, true, false, 2, Opcodes);
+ case Intrinsic::aarch64_neon_st3: {
+ if (VT == MVT::v8i8)
+ return SelectStore(Node, 3, AArch64::ST3Threev8b);
+ else if (VT == MVT::v16i8)
+ return SelectStore(Node, 3, AArch64::ST3Threev16b);
+ else if (VT == MVT::v4i16)
+ return SelectStore(Node, 3, AArch64::ST3Threev4h);
+ else if (VT == MVT::v8i16)
+ return SelectStore(Node, 3, AArch64::ST3Threev8h);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectStore(Node, 3, AArch64::ST3Threev2s);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectStore(Node, 3, AArch64::ST3Threev4s);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectStore(Node, 3, AArch64::ST3Threev2d);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectStore(Node, 3, AArch64::ST1Threev1d);
+ break;
}
- case Intrinsic::arm_neon_vld3lane: {
- static const uint16_t Opcodes[] = {
- AArch64::LD3LN_B, AArch64::LD3LN_H, AArch64::LD3LN_S, AArch64::LD3LN_D
- };
- return SelectVLDSTLane(Node, true, false, 3, Opcodes);
+ case Intrinsic::aarch64_neon_st4: {
+ if (VT == MVT::v8i8)
+ return SelectStore(Node, 4, AArch64::ST4Fourv8b);
+ else if (VT == MVT::v16i8)
+ return SelectStore(Node, 4, AArch64::ST4Fourv16b);
+ else if (VT == MVT::v4i16)
+ return SelectStore(Node, 4, AArch64::ST4Fourv4h);
+ else if (VT == MVT::v8i16)
+ return SelectStore(Node, 4, AArch64::ST4Fourv8h);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectStore(Node, 4, AArch64::ST4Fourv2s);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectStore(Node, 4, AArch64::ST4Fourv4s);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectStore(Node, 4, AArch64::ST4Fourv2d);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ break;
}
- case Intrinsic::arm_neon_vld4lane: {
- static const uint16_t Opcodes[] = {
- AArch64::LD4LN_B, AArch64::LD4LN_H, AArch64::LD4LN_S, AArch64::LD4LN_D
- };
- return SelectVLDSTLane(Node, true, false, 4, Opcodes);
+ case Intrinsic::aarch64_neon_st2lane: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectStoreLane(Node, 2, AArch64::ST2i8);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectStoreLane(Node, 2, AArch64::ST2i16);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectStoreLane(Node, 2, AArch64::ST2i32);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectStoreLane(Node, 2, AArch64::ST2i64);
+ break;
}
- case Intrinsic::arm_neon_vst2lane: {
- static const uint16_t Opcodes[] = {
- AArch64::ST2LN_B, AArch64::ST2LN_H, AArch64::ST2LN_S, AArch64::ST2LN_D
- };
- return SelectVLDSTLane(Node, false, false, 2, Opcodes);
+ case Intrinsic::aarch64_neon_st3lane: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectStoreLane(Node, 3, AArch64::ST3i8);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectStoreLane(Node, 3, AArch64::ST3i16);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectStoreLane(Node, 3, AArch64::ST3i32);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectStoreLane(Node, 3, AArch64::ST3i64);
+ break;
}
- case Intrinsic::arm_neon_vst3lane: {
- static const uint16_t Opcodes[] = {
- AArch64::ST3LN_B, AArch64::ST3LN_H, AArch64::ST3LN_S, AArch64::ST3LN_D
- };
- return SelectVLDSTLane(Node, false, false, 3, Opcodes);
+ case Intrinsic::aarch64_neon_st4lane: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectStoreLane(Node, 4, AArch64::ST4i8);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectStoreLane(Node, 4, AArch64::ST4i16);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectStoreLane(Node, 4, AArch64::ST4i32);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectStoreLane(Node, 4, AArch64::ST4i64);
+ break;
}
- case Intrinsic::arm_neon_vst4lane: {
- static const uint16_t Opcodes[] = {
- AArch64::ST4LN_B, AArch64::ST4LN_H, AArch64::ST4LN_S, AArch64::ST4LN_D
- };
- return SelectVLDSTLane(Node, false, false, 4, Opcodes);
}
- } // End of switch IntNo
+ }
+ case AArch64ISD::LD2post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD3post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD4post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD1x2post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD1x3post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD1x4post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD1DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD2DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD3DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD4DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
+ break;
+ }
+ case AArch64ISD::LD1LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
+ break;
+ }
+ case AArch64ISD::LD2LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
+ break;
+ }
+ case AArch64ISD::LD3LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
+ break;
+ }
+ case AArch64ISD::LD4LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
+ break;
+ }
+ case AArch64ISD::ST2post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ break;
+ }
+ case AArch64ISD::ST3post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ break;
+ }
+ case AArch64ISD::ST4post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ break;
+ }
+ case AArch64ISD::ST1x2post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
+ break;
+ }
+ case AArch64ISD::ST1x3post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
+ break;
+ }
+ case AArch64ISD::ST1x4post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
+ break;
+ }
+ case AArch64ISD::ST2LANEpost: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
+ break;
+ }
+ case AArch64ISD::ST3LANEpost: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
+ break;
+ }
+ case AArch64ISD::ST4LANEpost: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
break;
- } // End of case ISD::INTRINSIC_VOID and :ISD::INTRINSIC_W_CHAIN
- default:
- break; // Let generic code handle it
}
- SDNode *ResNode = SelectCode(Node);
+ case ISD::FCEIL:
+ case ISD::FFLOOR:
+ case ISD::FTRUNC:
+ case ISD::FROUND:
+ if (SDNode *I = SelectLIBM(Node))
+ return I;
+ break;
+ }
- DEBUG(dbgs() << "=> ";
- if (ResNode == NULL || ResNode == Node)
- Node->dump(CurDAG);
- else
- ResNode->dump(CurDAG);
- dbgs() << "\n");
+ // Select the default instruction
+ ResNode = SelectCode(Node);
+
+ DEBUG(errs() << "=> ");
+ if (ResNode == nullptr || ResNode == Node)
+ DEBUG(Node->dump(CurDAG));
+ else
+ DEBUG(ResNode->dump(CurDAG));
+ DEBUG(errs() << "\n");
return ResNode;
}
-/// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
-/// instruction scheduling.
-FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
+/// createAArch64ISelDag - This pass converts a legalized DAG into a
+/// AArch64-specific DAG, ready for instruction scheduling.
+FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
CodeGenOpt::Level OptLevel) {
return new AArch64DAGToDAGISel(TM, OptLevel);
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index cf7aec3..f2004ea 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
+//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,45 +7,87 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the interfaces that AArch64 uses to lower LLVM code into a
-// selection DAG.
+// This file implements the AArch64TargetLowering class.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "aarch64-isel"
-#include "AArch64.h"
#include "AArch64ISelLowering.h"
+#include "AArch64PerfectShuffle.h"
+#include "AArch64Subtarget.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64TargetMachine.h"
#include "AArch64TargetObjectFile.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/CodeGen/Analysis.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/IR/CallingConv.h"
-
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOptions.h"
using namespace llvm;
-static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
- const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
- assert (Subtarget->isTargetELF() && "unknown subtarget type");
- return new AArch64ElfTargetObjectFile();
-}
+#define DEBUG_TYPE "aarch64-lower"
+
+STATISTIC(NumTailCalls, "Number of tail calls");
+STATISTIC(NumShiftInserts, "Number of vector shift inserts");
+
+enum AlignMode {
+ StrictAlign,
+ NoStrictAlign
+};
+
+static cl::opt<AlignMode>
+Align(cl::desc("Load/store alignment support"),
+ cl::Hidden, cl::init(NoStrictAlign),
+ cl::values(
+ clEnumValN(StrictAlign, "aarch64-strict-align",
+ "Disallow all unaligned memory accesses"),
+ clEnumValN(NoStrictAlign, "aarch64-no-strict-align",
+ "Allow unaligned memory accesses"),
+ clEnumValEnd));
+
+// Place holder until extr generation is tested fully.
+static cl::opt<bool>
+EnableAArch64ExtrGeneration("aarch64-extr-generation", cl::Hidden,
+ cl::desc("Allow AArch64 (or (shift)(shift))->extract"),
+ cl::init(true));
+
+static cl::opt<bool>
+EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden,
+ cl::desc("Allow AArch64 SLI/SRI formation"),
+ cl::init(false));
-AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
- : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
+//===----------------------------------------------------------------------===//
+// AArch64 Lowering public interface.
+//===----------------------------------------------------------------------===//
+static TargetLoweringObjectFile *createTLOF(const Triple &TT) {
+ if (TT.isOSBinFormatMachO())
+ return new AArch64_MachoTargetObjectFile();
+
+ return new AArch64_ELFTargetObjectFile();
+}
- const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
+AArch64TargetLowering::AArch64TargetLowering(TargetMachine &TM)
+ : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))) {
+ Subtarget = &TM.getSubtarget<AArch64Subtarget>();
- // SIMD compares set the entire lane's bits to 1
+ // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
+ // we have to make something up. Arbitrarily, choose ZeroOrOne.
+ setBooleanContents(ZeroOrOneBooleanContent);
+ // When comparing vectors the result sets the different elements in the
+ // vector to all-one or all-zero.
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- // Scalar register <-> type mapping
- addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
- addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass);
+ addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass);
if (Subtarget->hasFPARMv8()) {
addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
@@ -55,196 +97,86 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
}
if (Subtarget->hasNEON()) {
- // And the vectors
- addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass);
- addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
- addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
- addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass);
- addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
- addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
- addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
- addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
- addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
- addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
- addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
+ addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
+ addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
+ // Someone set us up the NEON.
+ addDRTypeForNEON(MVT::v2f32);
+ addDRTypeForNEON(MVT::v8i8);
+ addDRTypeForNEON(MVT::v4i16);
+ addDRTypeForNEON(MVT::v2i32);
+ addDRTypeForNEON(MVT::v1i64);
+ addDRTypeForNEON(MVT::v1f64);
+
+ addQRTypeForNEON(MVT::v4f32);
+ addQRTypeForNEON(MVT::v2f64);
+ addQRTypeForNEON(MVT::v16i8);
+ addQRTypeForNEON(MVT::v8i16);
+ addQRTypeForNEON(MVT::v4i32);
+ addQRTypeForNEON(MVT::v2i64);
}
+ // Compute derived properties from the register classes
computeRegisterProperties();
- // We combine OR nodes for bitfield and NEON BSL operations.
- setTargetDAGCombine(ISD::OR);
-
- setTargetDAGCombine(ISD::AND);
- setTargetDAGCombine(ISD::SRA);
- setTargetDAGCombine(ISD::SRL);
- setTargetDAGCombine(ISD::SHL);
-
- setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
- setTargetDAGCombine(ISD::INTRINSIC_VOID);
- setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
-
- // AArch64 does not have i1 loads, or much of anything for i1 really.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
-
- setStackPointerRegisterToSaveRestore(AArch64::XSP);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
-
- // We'll lower globals to wrappers for selection.
+ // Provide all sorts of operation actions
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
-
- // A64 instructions have the comparison predicate attached to the user of the
- // result, but having a separate comparison is valuable for matching.
+ setOperationAction(ISD::SETCC, MVT::i32, Custom);
+ setOperationAction(ISD::SETCC, MVT::i64, Custom);
+ setOperationAction(ISD::SETCC, MVT::f32, Custom);
+ setOperationAction(ISD::SETCC, MVT::f64, Custom);
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
setOperationAction(ISD::BR_CC, MVT::i64, Custom);
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
setOperationAction(ISD::BR_CC, MVT::f64, Custom);
-
setOperationAction(ISD::SELECT, MVT::i32, Custom);
setOperationAction(ISD::SELECT, MVT::i64, Custom);
setOperationAction(ISD::SELECT, MVT::f32, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Custom);
-
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
-
- setOperationAction(ISD::BRCOND, MVT::Other, Custom);
-
- setOperationAction(ISD::SETCC, MVT::i32, Custom);
- setOperationAction(ISD::SETCC, MVT::i64, Custom);
- setOperationAction(ISD::SETCC, MVT::f32, Custom);
- setOperationAction(ISD::SETCC, MVT::f64, Custom);
-
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom);
- setOperationAction(ISD::VASTART, MVT::Other, Custom);
- setOperationAction(ISD::VACOPY, MVT::Other, Custom);
- setOperationAction(ISD::VAEND, MVT::Other, Expand);
- setOperationAction(ISD::VAARG, MVT::Other, Expand);
-
- setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
-
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTL, MVT::i64, Expand);
-
- setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::UREM, MVT::i64, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
-
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i64, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
-
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
- setOperationAction(ISD::CTPOP, MVT::i64, Expand);
-
- // Legal floating-point operations.
- setOperationAction(ISD::FABS, MVT::f32, Legal);
- setOperationAction(ISD::FABS, MVT::f64, Legal);
-
- setOperationAction(ISD::FCEIL, MVT::f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::f64, Legal);
-
- setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
-
- setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
-
- setOperationAction(ISD::FNEG, MVT::f32, Legal);
- setOperationAction(ISD::FNEG, MVT::f64, Legal);
-
- setOperationAction(ISD::FRINT, MVT::f32, Legal);
- setOperationAction(ISD::FRINT, MVT::f64, Legal);
-
- setOperationAction(ISD::FSQRT, MVT::f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::f64, Legal);
-
- setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
-
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
-
- // Illegal floating-point operations.
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-
- setOperationAction(ISD::FCOS, MVT::f32, Expand);
- setOperationAction(ISD::FCOS, MVT::f64, Expand);
-
- setOperationAction(ISD::FEXP, MVT::f32, Expand);
- setOperationAction(ISD::FEXP, MVT::f64, Expand);
-
- setOperationAction(ISD::FEXP2, MVT::f32, Expand);
- setOperationAction(ISD::FEXP2, MVT::f64, Expand);
-
- setOperationAction(ISD::FLOG, MVT::f32, Expand);
- setOperationAction(ISD::FLOG, MVT::f64, Expand);
-
- setOperationAction(ISD::FLOG2, MVT::f32, Expand);
- setOperationAction(ISD::FLOG2, MVT::f64, Expand);
-
- setOperationAction(ISD::FLOG10, MVT::f32, Expand);
- setOperationAction(ISD::FLOG10, MVT::f64, Expand);
-
- setOperationAction(ISD::FPOW, MVT::f32, Expand);
- setOperationAction(ISD::FPOW, MVT::f64, Expand);
-
- setOperationAction(ISD::FPOWI, MVT::f32, Expand);
- setOperationAction(ISD::FPOWI, MVT::f64, Expand);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
setOperationAction(ISD::FREM, MVT::f32, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
+ setOperationAction(ISD::FREM, MVT::f80, Expand);
- setOperationAction(ISD::FSIN, MVT::f32, Expand);
- setOperationAction(ISD::FSIN, MVT::f64, Expand);
-
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
+ // Custom lowering hooks are needed for XOR
+ // to fold it into CSINC/CSINV.
+ setOperationAction(ISD::XOR, MVT::i32, Custom);
+ setOperationAction(ISD::XOR, MVT::i64, Custom);
// Virtually no operation on f128 is legal, but LLVM can't expand them when
// there's a valid register class, so we need custom operations in most cases.
- setOperationAction(ISD::FABS, MVT::f128, Expand);
- setOperationAction(ISD::FADD, MVT::f128, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
- setOperationAction(ISD::FCOS, MVT::f128, Expand);
- setOperationAction(ISD::FDIV, MVT::f128, Custom);
- setOperationAction(ISD::FMA, MVT::f128, Expand);
- setOperationAction(ISD::FMUL, MVT::f128, Custom);
- setOperationAction(ISD::FNEG, MVT::f128, Expand);
- setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
- setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
- setOperationAction(ISD::FPOW, MVT::f128, Expand);
- setOperationAction(ISD::FREM, MVT::f128, Expand);
- setOperationAction(ISD::FRINT, MVT::f128, Expand);
- setOperationAction(ISD::FSIN, MVT::f128, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
- setOperationAction(ISD::FSQRT, MVT::f128, Expand);
- setOperationAction(ISD::FSUB, MVT::f128, Custom);
- setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
- setOperationAction(ISD::SETCC, MVT::f128, Custom);
- setOperationAction(ISD::BR_CC, MVT::f128, Custom);
- setOperationAction(ISD::SELECT, MVT::f128, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
- setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
+ setOperationAction(ISD::FABS, MVT::f128, Expand);
+ setOperationAction(ISD::FADD, MVT::f128, Custom);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
+ setOperationAction(ISD::FCOS, MVT::f128, Expand);
+ setOperationAction(ISD::FDIV, MVT::f128, Custom);
+ setOperationAction(ISD::FMA, MVT::f128, Expand);
+ setOperationAction(ISD::FMUL, MVT::f128, Custom);
+ setOperationAction(ISD::FNEG, MVT::f128, Expand);
+ setOperationAction(ISD::FPOW, MVT::f128, Expand);
+ setOperationAction(ISD::FREM, MVT::f128, Expand);
+ setOperationAction(ISD::FRINT, MVT::f128, Expand);
+ setOperationAction(ISD::FSIN, MVT::f128, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
+ setOperationAction(ISD::FSQRT, MVT::f128, Expand);
+ setOperationAction(ISD::FSUB, MVT::f128, Custom);
+ setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
+ setOperationAction(ISD::SETCC, MVT::f128, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f128, Custom);
+ setOperationAction(ISD::SELECT, MVT::f128, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
+ setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
// Lowering for many of the conversions is actually specified by the non-f128
// type. The LowerXXX function will be trivial when f128 isn't involved.
@@ -260,456 +192,588 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
- // This prevents LLVM trying to compress double constants into a floating
- // constant-pool entry and trying to load from there. It's of doubtful benefit
- // for A64: we'd need LDR followed by FCVT, I believe.
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ // Variable arguments.
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Custom);
+ setOperationAction(ISD::VACOPY, MVT::Other, Custom);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
- setTruncStoreAction(MVT::f128, MVT::f64, Expand);
- setTruncStoreAction(MVT::f128, MVT::f32, Expand);
- setTruncStoreAction(MVT::f128, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ // Variable-sized objects.
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
+ // Exception handling.
+ // FIXME: These are guesses. Has this been defined yet?
setExceptionPointerRegister(AArch64::X0);
setExceptionSelectorRegister(AArch64::X1);
- if (Subtarget->hasNEON()) {
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
-
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
-
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
- setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
-
- setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
- setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
- setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
- setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
- setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
- setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
- setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
- setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
- setOperationAction(ISD::SETCC, MVT::v1f32, Custom);
- setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
- setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
- setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
- setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
-
- setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
-
- setOperationAction(ISD::FCEIL, MVT::v2f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::v1f64, Legal);
- setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
-
- setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
-
- setOperationAction(ISD::FRINT, MVT::v2f32, Legal);
- setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
- setOperationAction(ISD::FRINT, MVT::v1f64, Legal);
- setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
-
- setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
-
- setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
- setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
- setOperationAction(ISD::FROUND, MVT::v1f64, Legal);
- setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
- }
-}
+ // Constant pool entries
+ setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
-EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
- // It's reasonably important that this value matches the "natural" legal
- // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
- // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
- if (!VT.isVector()) return MVT::i32;
- return VT.changeVectorElementTypeToInteger();
-}
+ // BlockAddress
+ setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
-static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
- unsigned &LdrOpc,
- unsigned &StrOpc) {
- static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
- AArch64::LDXR_word, AArch64::LDXR_dword};
- static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
- AArch64::LDAXR_word, AArch64::LDAXR_dword};
- static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
- AArch64::STXR_word, AArch64::STXR_dword};
- static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
- AArch64::STLXR_word, AArch64::STLXR_dword};
-
- const unsigned *LoadOps, *StoreOps;
- if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
- LoadOps = LoadAcqs;
- else
- LoadOps = LoadBares;
+ // Add/Sub overflow ops with MVT::Glues are lowered to NZCV dependences.
+ setOperationAction(ISD::ADDC, MVT::i32, Custom);
+ setOperationAction(ISD::ADDE, MVT::i32, Custom);
+ setOperationAction(ISD::SUBC, MVT::i32, Custom);
+ setOperationAction(ISD::SUBE, MVT::i32, Custom);
+ setOperationAction(ISD::ADDC, MVT::i64, Custom);
+ setOperationAction(ISD::ADDE, MVT::i64, Custom);
+ setOperationAction(ISD::SUBC, MVT::i64, Custom);
+ setOperationAction(ISD::SUBE, MVT::i64, Custom);
+
+ // AArch64 lacks both left-rotate and popcount instructions.
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
- if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
- StoreOps = StoreRels;
- else
- StoreOps = StoreBares;
+ // AArch64 doesn't have {U|S}MUL_LOHI.
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- assert(isPowerOf2_32(Size) && Size <= 8 &&
- "unsupported size for atomic binary op!");
- LdrOpc = LoadOps[Log2_32(Size)];
- StrOpc = StoreOps[Log2_32(Size)];
-}
+ // Expand the undefined-at-zero variants to cttz/ctlz to their defined-at-zero
+ // counterparts, which AArch64 supports directly.
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
-// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really
-// have value type mapped, and they are both being defined as MVT::untyped.
-// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost
-// would fail to figure out the register pressure correctly.
-std::pair<const TargetRegisterClass*, uint8_t>
-AArch64TargetLowering::findRepresentativeClass(MVT VT) const{
- const TargetRegisterClass *RRC = 0;
- uint8_t Cost = 1;
- switch (VT.SimpleTy) {
- default:
- return TargetLowering::findRepresentativeClass(VT);
- case MVT::v4i64:
- RRC = &AArch64::QPairRegClass;
- Cost = 2;
- break;
- case MVT::v8i64:
- RRC = &AArch64::QQuadRegClass;
- Cost = 4;
- break;
+ setOperationAction(ISD::CTPOP, MVT::i32, Custom);
+ setOperationAction(ISD::CTPOP, MVT::i64, Custom);
+
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+
+ // Custom lower Add/Sub/Mul with overflow.
+ setOperationAction(ISD::SADDO, MVT::i32, Custom);
+ setOperationAction(ISD::SADDO, MVT::i64, Custom);
+ setOperationAction(ISD::UADDO, MVT::i32, Custom);
+ setOperationAction(ISD::UADDO, MVT::i64, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i32, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i64, Custom);
+ setOperationAction(ISD::USUBO, MVT::i32, Custom);
+ setOperationAction(ISD::USUBO, MVT::i64, Custom);
+ setOperationAction(ISD::SMULO, MVT::i32, Custom);
+ setOperationAction(ISD::SMULO, MVT::i64, Custom);
+ setOperationAction(ISD::UMULO, MVT::i32, Custom);
+ setOperationAction(ISD::UMULO, MVT::i64, Custom);
+
+ setOperationAction(ISD::FSIN, MVT::f32, Expand);
+ setOperationAction(ISD::FSIN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOS, MVT::f32, Expand);
+ setOperationAction(ISD::FCOS, MVT::f64, Expand);
+ setOperationAction(ISD::FPOW, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+
+ // AArch64 has implementations of a lot of rounding-like FP operations.
+ static MVT RoundingTypes[] = { MVT::f32, MVT::f64};
+ for (unsigned I = 0; I < array_lengthof(RoundingTypes); ++I) {
+ MVT Ty = RoundingTypes[I];
+ setOperationAction(ISD::FFLOOR, Ty, Legal);
+ setOperationAction(ISD::FNEARBYINT, Ty, Legal);
+ setOperationAction(ISD::FCEIL, Ty, Legal);
+ setOperationAction(ISD::FRINT, Ty, Legal);
+ setOperationAction(ISD::FTRUNC, Ty, Legal);
+ setOperationAction(ISD::FROUND, Ty, Legal);
}
- return std::make_pair(RRC, Cost);
-}
-MachineBasicBlock *
-AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size,
- unsigned BinOpcode) const {
- // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction *MF = BB->getParent();
- MachineFunction::iterator It = BB;
- ++It;
+ if (Subtarget->isTargetMachO()) {
+ // For iOS, we don't want to the normal expansion of a libcall to
+ // sincos. We want to issue a libcall to __sincos_stret to avoid memory
+ // traffic.
+ setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
+ setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
+ } else {
+ setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
+ }
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned incr = MI->getOperand(2).getReg();
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
- DebugLoc dl = MI->getDebugLoc();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
-
- MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loopMBB);
- MF->insert(It, exitMBB);
-
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- const TargetRegisterClass *TRC
- = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
-
- // thisMBB:
- // ...
- // fallthrough --> loopMBB
- BB->addSuccessor(loopMBB);
-
- // loopMBB:
- // ldxr dest, ptr
- // <binop> scratch, dest, incr
- // stxr stxr_status, scratch, ptr
- // cbnz stxr_status, loopMBB
- // fallthrough --> exitMBB
- BB = loopMBB;
- BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
- if (BinOpcode) {
- // All arithmetic operations we'll be creating are designed to take an extra
- // shift or extend operand, which we can conveniently set to zero.
-
- // Operand order needs to go the other way for NAND.
- if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
- BuildMI(BB, dl, TII->get(BinOpcode), scratch)
- .addReg(incr).addReg(dest).addImm(0);
- else
- BuildMI(BB, dl, TII->get(BinOpcode), scratch)
- .addReg(dest).addReg(incr).addImm(0);
+ // AArch64 does not have floating-point extending loads, i1 sign-extending
+ // load, floating-point truncating stores, or v2i32->v2i16 truncating store.
+ setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f80, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f64, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f16, Expand);
+
+ setOperationAction(ISD::BITCAST, MVT::i16, Custom);
+ setOperationAction(ISD::BITCAST, MVT::f16, Custom);
+
+ // Indexed loads and stores are supported.
+ for (unsigned im = (unsigned)ISD::PRE_INC;
+ im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
+ setIndexedLoadAction(im, MVT::i8, Legal);
+ setIndexedLoadAction(im, MVT::i16, Legal);
+ setIndexedLoadAction(im, MVT::i32, Legal);
+ setIndexedLoadAction(im, MVT::i64, Legal);
+ setIndexedLoadAction(im, MVT::f64, Legal);
+ setIndexedLoadAction(im, MVT::f32, Legal);
+ setIndexedStoreAction(im, MVT::i8, Legal);
+ setIndexedStoreAction(im, MVT::i16, Legal);
+ setIndexedStoreAction(im, MVT::i32, Legal);
+ setIndexedStoreAction(im, MVT::i64, Legal);
+ setIndexedStoreAction(im, MVT::f64, Legal);
+ setIndexedStoreAction(im, MVT::f32, Legal);
}
- // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
- unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
- MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
+ // Trap.
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
- BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
- BuildMI(BB, dl, TII->get(AArch64::CBNZw))
- .addReg(stxr_status).addMBB(loopMBB);
+ // We combine OR nodes for bitfield operations.
+ setTargetDAGCombine(ISD::OR);
- BB->addSuccessor(loopMBB);
- BB->addSuccessor(exitMBB);
+ // Vector add and sub nodes may conceal a high-half opportunity.
+ // Also, try to fold ADD into CSINC/CSINV..
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::SUB);
- // exitMBB:
- // ...
- BB = exitMBB;
+ setTargetDAGCombine(ISD::XOR);
+ setTargetDAGCombine(ISD::SINT_TO_FP);
+ setTargetDAGCombine(ISD::UINT_TO_FP);
- MI->eraseFromParent(); // The instruction is gone now.
+ setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
- return BB;
-}
+ setTargetDAGCombine(ISD::ANY_EXTEND);
+ setTargetDAGCombine(ISD::ZERO_EXTEND);
+ setTargetDAGCombine(ISD::SIGN_EXTEND);
+ setTargetDAGCombine(ISD::BITCAST);
+ setTargetDAGCombine(ISD::CONCAT_VECTORS);
+ setTargetDAGCombine(ISD::STORE);
-MachineBasicBlock *
-AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size,
- unsigned CmpOp,
- A64CC::CondCodes Cond) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ setTargetDAGCombine(ISD::MUL);
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction *MF = BB->getParent();
- MachineFunction::iterator It = BB;
- ++It;
+ setTargetDAGCombine(ISD::SELECT);
+ setTargetDAGCombine(ISD::VSELECT);
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned incr = MI->getOperand(2).getReg();
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
+ setTargetDAGCombine(ISD::INTRINSIC_VOID);
+ setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
+ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
- unsigned oldval = dest;
- DebugLoc dl = MI->getDebugLoc();
+ MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 8;
+ MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 4;
+ MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 4;
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- const TargetRegisterClass *TRC, *TRCsp;
- if (Size == 8) {
- TRC = &AArch64::GPR64RegClass;
- TRCsp = &AArch64::GPR64xspRegClass;
- } else {
- TRC = &AArch64::GPR32RegClass;
- TRCsp = &AArch64::GPR32wspRegClass;
- }
+ setStackPointerRegisterToSaveRestore(AArch64::SP);
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
+ setSchedulingPreference(Sched::Hybrid);
- MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loopMBB);
- MF->insert(It, exitMBB);
+ // Enable TBZ/TBNZ
+ MaskAndBranchFoldingIsLegal = true;
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
+ setMinFunctionAlignment(2);
- unsigned scratch = MRI.createVirtualRegister(TRC);
- MRI.constrainRegClass(scratch, TRCsp);
+ RequireStrictAlign = (Align == StrictAlign);
- // thisMBB:
- // ...
- // fallthrough --> loopMBB
- BB->addSuccessor(loopMBB);
+ setHasExtractBitsInsn(true);
- // loopMBB:
- // ldxr dest, ptr
- // cmp incr, dest (, sign extend if necessary)
- // csel scratch, dest, incr, cond
- // stxr stxr_status, scratch, ptr
- // cbnz stxr_status, loopMBB
- // fallthrough --> exitMBB
- BB = loopMBB;
- BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
+ if (Subtarget->hasNEON()) {
+ // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
+ // silliness like this:
+ setOperationAction(ISD::FABS, MVT::v1f64, Expand);
+ setOperationAction(ISD::FADD, MVT::v1f64, Expand);
+ setOperationAction(ISD::FCEIL, MVT::v1f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::v1f64, Expand);
+ setOperationAction(ISD::FCOS, MVT::v1f64, Expand);
+ setOperationAction(ISD::FDIV, MVT::v1f64, Expand);
+ setOperationAction(ISD::FFLOOR, MVT::v1f64, Expand);
+ setOperationAction(ISD::FMA, MVT::v1f64, Expand);
+ setOperationAction(ISD::FMUL, MVT::v1f64, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Expand);
+ setOperationAction(ISD::FNEG, MVT::v1f64, Expand);
+ setOperationAction(ISD::FPOW, MVT::v1f64, Expand);
+ setOperationAction(ISD::FREM, MVT::v1f64, Expand);
+ setOperationAction(ISD::FROUND, MVT::v1f64, Expand);
+ setOperationAction(ISD::FRINT, MVT::v1f64, Expand);
+ setOperationAction(ISD::FSIN, MVT::v1f64, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::v1f64, Expand);
+ setOperationAction(ISD::FSQRT, MVT::v1f64, Expand);
+ setOperationAction(ISD::FSUB, MVT::v1f64, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::v1f64, Expand);
+ setOperationAction(ISD::SETCC, MVT::v1f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::v1f64, Expand);
+ setOperationAction(ISD::SELECT, MVT::v1f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::v1f64, Expand);
+ setOperationAction(ISD::FP_EXTEND, MVT::v1f64, Expand);
+
+ setOperationAction(ISD::FP_TO_SINT, MVT::v1i64, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v1i64, Expand);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v1i64, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v1i64, Expand);
+ setOperationAction(ISD::FP_ROUND, MVT::v1f64, Expand);
+
+ setOperationAction(ISD::MUL, MVT::v1i64, Expand);
+
+ // AArch64 doesn't have a direct vector ->f32 conversion instructions for
+ // elements smaller than i32, so promote the input to i32 first.
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Promote);
+ // Similarly, there is no direct i32 -> f64 vector conversion instruction.
+ setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom);
+
+ // AArch64 doesn't have MUL.2d:
+ setOperationAction(ISD::MUL, MVT::v2i64, Expand);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
+ setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
+ // Likewise, narrowing and extending vector loads/stores aren't handled
+ // directly.
+ for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,
+ Expand);
+
+ setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+
+ setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
+
+ for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
+ setTruncStoreAction((MVT::SimpleValueType)VT,
+ (MVT::SimpleValueType)InnerVT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ }
- // Build compare and cmov instructions.
- MRI.constrainRegClass(incr, TRCsp);
- BuildMI(BB, dl, TII->get(CmpOp))
- .addReg(incr).addReg(oldval).addImm(0);
+ // AArch64 has implementations of a lot of rounding-like FP operations.
+ static MVT RoundingVecTypes[] = {MVT::v2f32, MVT::v4f32, MVT::v2f64 };
+ for (unsigned I = 0; I < array_lengthof(RoundingVecTypes); ++I) {
+ MVT Ty = RoundingVecTypes[I];
+ setOperationAction(ISD::FFLOOR, Ty, Legal);
+ setOperationAction(ISD::FNEARBYINT, Ty, Legal);
+ setOperationAction(ISD::FCEIL, Ty, Legal);
+ setOperationAction(ISD::FRINT, Ty, Legal);
+ setOperationAction(ISD::FTRUNC, Ty, Legal);
+ setOperationAction(ISD::FROUND, Ty, Legal);
+ }
+ }
+}
- BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
- scratch)
- .addReg(oldval).addReg(incr).addImm(Cond);
+void AArch64TargetLowering::addTypeForNEON(EVT VT, EVT PromotedBitwiseVT) {
+ if (VT == MVT::v2f32) {
+ setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
+ AddPromotedToType(ISD::LOAD, VT.getSimpleVT(), MVT::v2i32);
- unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
- MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
+ setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
+ AddPromotedToType(ISD::STORE, VT.getSimpleVT(), MVT::v2i32);
+ } else if (VT == MVT::v2f64 || VT == MVT::v4f32) {
+ setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
+ AddPromotedToType(ISD::LOAD, VT.getSimpleVT(), MVT::v2i64);
- BuildMI(BB, dl, TII->get(strOpc), stxr_status)
- .addReg(scratch).addReg(ptr);
- BuildMI(BB, dl, TII->get(AArch64::CBNZw))
- .addReg(stxr_status).addMBB(loopMBB);
+ setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
+ AddPromotedToType(ISD::STORE, VT.getSimpleVT(), MVT::v2i64);
+ }
- BB->addSuccessor(loopMBB);
- BB->addSuccessor(exitMBB);
+ // Mark vector float intrinsics as expand.
+ if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
+ setOperationAction(ISD::FSIN, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FCOS, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FPOWI, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FPOW, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FLOG, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FLOG2, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FLOG10, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FEXP, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FEXP2, VT.getSimpleVT(), Expand);
+ }
- // exitMBB:
- // ...
- BB = exitMBB;
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::AND, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::OR, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
+
+ setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::VSELECT, VT.getSimpleVT(), Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
+
+ // CNT supports only B element sizes.
+ if (VT != MVT::v8i8 && VT != MVT::v16i8)
+ setOperationAction(ISD::CTPOP, VT.getSimpleVT(), Expand);
+
+ setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
+
+ setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom);
+
+ if (Subtarget->isLittleEndian()) {
+ for (unsigned im = (unsigned)ISD::PRE_INC;
+ im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
+ setIndexedLoadAction(im, VT.getSimpleVT(), Legal);
+ setIndexedStoreAction(im, VT.getSimpleVT(), Legal);
+ }
+ }
+}
- MI->eraseFromParent(); // The instruction is gone now.
+void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
+ addRegisterClass(VT, &AArch64::FPR64RegClass);
+ addTypeForNEON(VT, MVT::v2i32);
+}
- return BB;
+void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
+ addRegisterClass(VT, &AArch64::FPR128RegClass);
+ addTypeForNEON(VT, MVT::v4i32);
}
-MachineBasicBlock *
-AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned oldval = MI->getOperand(2).getReg();
- unsigned newval = MI->getOperand(3).getReg();
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- const TargetRegisterClass *TRCsp;
- TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
-
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
-
- MachineFunction *MF = BB->getParent();
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It; // insert the new blocks after the current block
-
- MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loop1MBB);
- MF->insert(It, loop2MBB);
- MF->insert(It, exitMBB);
-
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- // thisMBB:
- // ...
- // fallthrough --> loop1MBB
- BB->addSuccessor(loop1MBB);
-
- // loop1MBB:
- // ldxr dest, [ptr]
- // cmp dest, oldval
- // b.ne exitMBB
- BB = loop1MBB;
- BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
-
- unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
- MRI.constrainRegClass(dest, TRCsp);
- BuildMI(BB, dl, TII->get(CmpOp))
- .addReg(dest).addReg(oldval).addImm(0);
- BuildMI(BB, dl, TII->get(AArch64::Bcc))
- .addImm(A64CC::NE).addMBB(exitMBB);
- BB->addSuccessor(loop2MBB);
- BB->addSuccessor(exitMBB);
-
- // loop2MBB:
- // strex stxr_status, newval, [ptr]
- // cbnz stxr_status, loop1MBB
- BB = loop2MBB;
- unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
- MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
-
- BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
- BuildMI(BB, dl, TII->get(AArch64::CBNZw))
- .addReg(stxr_status).addMBB(loop1MBB);
- BB->addSuccessor(loop1MBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MI->eraseFromParent(); // The instruction is gone now.
-
- return BB;
+EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+}
+
+/// computeKnownBitsForTargetNode - Determine which of the bits specified in
+/// Mask are known to be either zero or one and return them in the
+/// KnownZero/KnownOne bitsets.
+void AArch64TargetLowering::computeKnownBitsForTargetNode(
+ const SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ const SelectionDAG &DAG, unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ default:
+ break;
+ case AArch64ISD::CSEL: {
+ APInt KnownZero2, KnownOne2;
+ DAG.computeKnownBits(Op->getOperand(0), KnownZero, KnownOne, Depth + 1);
+ DAG.computeKnownBits(Op->getOperand(1), KnownZero2, KnownOne2, Depth + 1);
+ KnownZero &= KnownZero2;
+ KnownOne &= KnownOne2;
+ break;
+ }
+ case ISD::INTRINSIC_W_CHAIN: {
+ ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
+ Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
+ switch (IntID) {
+ default: return;
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr: {
+ unsigned BitWidth = KnownOne.getBitWidth();
+ EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
+ unsigned MemBits = VT.getScalarType().getSizeInBits();
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
+ return;
+ }
+ }
+ break;
+ }
+ case ISD::INTRINSIC_WO_CHAIN:
+ case ISD::INTRINSIC_VOID: {
+ unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::aarch64_neon_umaxv:
+ case Intrinsic::aarch64_neon_uminv: {
+ // Figure out the datatype of the vector operand. The UMINV instruction
+ // will zero extend the result, so we can mark as known zero all the
+ // bits larger than the element datatype. 32-bit or larget doesn't need
+ // this as those are legal types and will be handled by isel directly.
+ MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
+ unsigned BitWidth = KnownZero.getBitWidth();
+ if (VT == MVT::v8i8 || VT == MVT::v16i8) {
+ assert(BitWidth >= 8 && "Unexpected width!");
+ APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
+ KnownZero |= Mask;
+ } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
+ assert(BitWidth >= 16 && "Unexpected width!");
+ APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
+ KnownZero |= Mask;
+ }
+ break;
+ } break;
+ }
+ }
+ }
+}
+
+MVT AArch64TargetLowering::getScalarShiftAmountTy(EVT LHSTy) const {
+ return MVT::i64;
+}
+
+unsigned AArch64TargetLowering::getMaximalGlobalOffset() const {
+ // FIXME: On AArch64, this depends on the type.
+ // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
+ // and the offset has to be a multiple of the related size in bytes.
+ return 4095;
+}
+
+FastISel *
+AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const {
+ return AArch64::createFastISel(funcInfo, libInfo);
+}
+
+const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default:
+ return nullptr;
+ case AArch64ISD::CALL: return "AArch64ISD::CALL";
+ case AArch64ISD::ADRP: return "AArch64ISD::ADRP";
+ case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow";
+ case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot";
+ case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG";
+ case AArch64ISD::BRCOND: return "AArch64ISD::BRCOND";
+ case AArch64ISD::CSEL: return "AArch64ISD::CSEL";
+ case AArch64ISD::FCSEL: return "AArch64ISD::FCSEL";
+ case AArch64ISD::CSINV: return "AArch64ISD::CSINV";
+ case AArch64ISD::CSNEG: return "AArch64ISD::CSNEG";
+ case AArch64ISD::CSINC: return "AArch64ISD::CSINC";
+ case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
+ case AArch64ISD::TLSDESC_CALL: return "AArch64ISD::TLSDESC_CALL";
+ case AArch64ISD::ADC: return "AArch64ISD::ADC";
+ case AArch64ISD::SBC: return "AArch64ISD::SBC";
+ case AArch64ISD::ADDS: return "AArch64ISD::ADDS";
+ case AArch64ISD::SUBS: return "AArch64ISD::SUBS";
+ case AArch64ISD::ADCS: return "AArch64ISD::ADCS";
+ case AArch64ISD::SBCS: return "AArch64ISD::SBCS";
+ case AArch64ISD::ANDS: return "AArch64ISD::ANDS";
+ case AArch64ISD::FCMP: return "AArch64ISD::FCMP";
+ case AArch64ISD::FMIN: return "AArch64ISD::FMIN";
+ case AArch64ISD::FMAX: return "AArch64ISD::FMAX";
+ case AArch64ISD::DUP: return "AArch64ISD::DUP";
+ case AArch64ISD::DUPLANE8: return "AArch64ISD::DUPLANE8";
+ case AArch64ISD::DUPLANE16: return "AArch64ISD::DUPLANE16";
+ case AArch64ISD::DUPLANE32: return "AArch64ISD::DUPLANE32";
+ case AArch64ISD::DUPLANE64: return "AArch64ISD::DUPLANE64";
+ case AArch64ISD::MOVI: return "AArch64ISD::MOVI";
+ case AArch64ISD::MOVIshift: return "AArch64ISD::MOVIshift";
+ case AArch64ISD::MOVIedit: return "AArch64ISD::MOVIedit";
+ case AArch64ISD::MOVImsl: return "AArch64ISD::MOVImsl";
+ case AArch64ISD::FMOV: return "AArch64ISD::FMOV";
+ case AArch64ISD::MVNIshift: return "AArch64ISD::MVNIshift";
+ case AArch64ISD::MVNImsl: return "AArch64ISD::MVNImsl";
+ case AArch64ISD::BICi: return "AArch64ISD::BICi";
+ case AArch64ISD::ORRi: return "AArch64ISD::ORRi";
+ case AArch64ISD::BSL: return "AArch64ISD::BSL";
+ case AArch64ISD::NEG: return "AArch64ISD::NEG";
+ case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
+ case AArch64ISD::ZIP1: return "AArch64ISD::ZIP1";
+ case AArch64ISD::ZIP2: return "AArch64ISD::ZIP2";
+ case AArch64ISD::UZP1: return "AArch64ISD::UZP1";
+ case AArch64ISD::UZP2: return "AArch64ISD::UZP2";
+ case AArch64ISD::TRN1: return "AArch64ISD::TRN1";
+ case AArch64ISD::TRN2: return "AArch64ISD::TRN2";
+ case AArch64ISD::REV16: return "AArch64ISD::REV16";
+ case AArch64ISD::REV32: return "AArch64ISD::REV32";
+ case AArch64ISD::REV64: return "AArch64ISD::REV64";
+ case AArch64ISD::EXT: return "AArch64ISD::EXT";
+ case AArch64ISD::VSHL: return "AArch64ISD::VSHL";
+ case AArch64ISD::VLSHR: return "AArch64ISD::VLSHR";
+ case AArch64ISD::VASHR: return "AArch64ISD::VASHR";
+ case AArch64ISD::CMEQ: return "AArch64ISD::CMEQ";
+ case AArch64ISD::CMGE: return "AArch64ISD::CMGE";
+ case AArch64ISD::CMGT: return "AArch64ISD::CMGT";
+ case AArch64ISD::CMHI: return "AArch64ISD::CMHI";
+ case AArch64ISD::CMHS: return "AArch64ISD::CMHS";
+ case AArch64ISD::FCMEQ: return "AArch64ISD::FCMEQ";
+ case AArch64ISD::FCMGE: return "AArch64ISD::FCMGE";
+ case AArch64ISD::FCMGT: return "AArch64ISD::FCMGT";
+ case AArch64ISD::CMEQz: return "AArch64ISD::CMEQz";
+ case AArch64ISD::CMGEz: return "AArch64ISD::CMGEz";
+ case AArch64ISD::CMGTz: return "AArch64ISD::CMGTz";
+ case AArch64ISD::CMLEz: return "AArch64ISD::CMLEz";
+ case AArch64ISD::CMLTz: return "AArch64ISD::CMLTz";
+ case AArch64ISD::FCMEQz: return "AArch64ISD::FCMEQz";
+ case AArch64ISD::FCMGEz: return "AArch64ISD::FCMGEz";
+ case AArch64ISD::FCMGTz: return "AArch64ISD::FCMGTz";
+ case AArch64ISD::FCMLEz: return "AArch64ISD::FCMLEz";
+ case AArch64ISD::FCMLTz: return "AArch64ISD::FCMLTz";
+ case AArch64ISD::NOT: return "AArch64ISD::NOT";
+ case AArch64ISD::BIT: return "AArch64ISD::BIT";
+ case AArch64ISD::CBZ: return "AArch64ISD::CBZ";
+ case AArch64ISD::CBNZ: return "AArch64ISD::CBNZ";
+ case AArch64ISD::TBZ: return "AArch64ISD::TBZ";
+ case AArch64ISD::TBNZ: return "AArch64ISD::TBNZ";
+ case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
+ case AArch64ISD::SITOF: return "AArch64ISD::SITOF";
+ case AArch64ISD::UITOF: return "AArch64ISD::UITOF";
+ case AArch64ISD::SQSHL_I: return "AArch64ISD::SQSHL_I";
+ case AArch64ISD::UQSHL_I: return "AArch64ISD::UQSHL_I";
+ case AArch64ISD::SRSHR_I: return "AArch64ISD::SRSHR_I";
+ case AArch64ISD::URSHR_I: return "AArch64ISD::URSHR_I";
+ case AArch64ISD::SQSHLU_I: return "AArch64ISD::SQSHLU_I";
+ case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
+ case AArch64ISD::LD2post: return "AArch64ISD::LD2post";
+ case AArch64ISD::LD3post: return "AArch64ISD::LD3post";
+ case AArch64ISD::LD4post: return "AArch64ISD::LD4post";
+ case AArch64ISD::ST2post: return "AArch64ISD::ST2post";
+ case AArch64ISD::ST3post: return "AArch64ISD::ST3post";
+ case AArch64ISD::ST4post: return "AArch64ISD::ST4post";
+ case AArch64ISD::LD1x2post: return "AArch64ISD::LD1x2post";
+ case AArch64ISD::LD1x3post: return "AArch64ISD::LD1x3post";
+ case AArch64ISD::LD1x4post: return "AArch64ISD::LD1x4post";
+ case AArch64ISD::ST1x2post: return "AArch64ISD::ST1x2post";
+ case AArch64ISD::ST1x3post: return "AArch64ISD::ST1x3post";
+ case AArch64ISD::ST1x4post: return "AArch64ISD::ST1x4post";
+ case AArch64ISD::LD1DUPpost: return "AArch64ISD::LD1DUPpost";
+ case AArch64ISD::LD2DUPpost: return "AArch64ISD::LD2DUPpost";
+ case AArch64ISD::LD3DUPpost: return "AArch64ISD::LD3DUPpost";
+ case AArch64ISD::LD4DUPpost: return "AArch64ISD::LD4DUPpost";
+ case AArch64ISD::LD1LANEpost: return "AArch64ISD::LD1LANEpost";
+ case AArch64ISD::LD2LANEpost: return "AArch64ISD::LD2LANEpost";
+ case AArch64ISD::LD3LANEpost: return "AArch64ISD::LD3LANEpost";
+ case AArch64ISD::LD4LANEpost: return "AArch64ISD::LD4LANEpost";
+ case AArch64ISD::ST2LANEpost: return "AArch64ISD::ST2LANEpost";
+ case AArch64ISD::ST3LANEpost: return "AArch64ISD::ST3LANEpost";
+ case AArch64ISD::ST4LANEpost: return "AArch64ISD::ST4LANEpost";
+ }
}
MachineBasicBlock *
AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- // We materialise the F128CSEL pseudo-instruction using conditional branches
- // and loads, giving an instruciton sequence like:
- // str q0, [sp]
- // b.ne IfTrue
- // b Finish
- // IfTrue:
- // str q1, [sp]
- // Finish:
- // ldr q0, [sp]
- //
- // Using virtual registers would probably not be beneficial since COPY
- // instructions are expensive for f128 (there's no actual instruction to
- // implement them).
- //
- // An alternative would be to do an integer-CSEL on some address. E.g.:
- // mov x0, sp
- // add x1, sp, #16
- // str q0, [x0]
- // str q1, [x1]
- // csel x0, x0, x1, ne
- // ldr q0, [x0]
- //
- // It's unclear which approach is actually optimal.
+ // We materialise the F128CSEL pseudo-instruction as some control flow and a
+ // phi node:
+
+ // OrigBB:
+ // [... previous instrs leading to comparison ...]
+ // b.ne TrueBB
+ // b EndBB
+ // TrueBB:
+ // ; Fallthrough
+ // EndBB:
+ // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
+
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
MachineFunction *MF = MBB->getParent();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
@@ -729,54 +793,28 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
MF->insert(It, EndBB);
// Transfer rest of current basic-block to EndBB
- EndBB->splice(EndBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
+ EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
MBB->end());
EndBB->transferSuccessorsAndUpdatePHIs(MBB);
- // We need somewhere to store the f128 value needed.
- int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
-
- // [... start of incoming MBB ...]
- // str qIFFALSE, [sp]
- // b.cc IfTrue
- // b Done
- BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
- .addReg(IfFalseReg)
- .addFrameIndex(ScratchFI)
- .addImm(0);
- BuildMI(MBB, DL, TII->get(AArch64::Bcc))
- .addImm(CondCode)
- .addMBB(TrueBB);
- BuildMI(MBB, DL, TII->get(AArch64::Bimm))
- .addMBB(EndBB);
+ BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB);
+ BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB);
MBB->addSuccessor(TrueBB);
MBB->addSuccessor(EndBB);
+ // TrueBB falls through to the end.
+ TrueBB->addSuccessor(EndBB);
+
if (!NZCVKilled) {
- // NZCV is live-through TrueBB.
TrueBB->addLiveIn(AArch64::NZCV);
EndBB->addLiveIn(AArch64::NZCV);
}
- // IfTrue:
- // str qIFTRUE, [sp]
- BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
- .addReg(IfTrueReg)
- .addFrameIndex(ScratchFI)
- .addImm(0);
-
- // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
- // blocks.
- TrueBB->addSuccessor(EndBB);
-
- // Done:
- // ldr qDEST, [sp]
- // [... rest of incoming MBB ...]
- MachineInstr *StartOfEnd = EndBB->begin();
- BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
- .addFrameIndex(ScratchFI)
- .addImm(0);
+ BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg)
+ .addReg(IfTrueReg)
+ .addMBB(TrueBB)
+ .addReg(IfFalseReg)
+ .addMBB(MBB);
MI->eraseFromParent();
return EndBB;
@@ -784,837 +822,1154 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
MachineBasicBlock *
AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
+ MachineBasicBlock *BB) const {
switch (MI->getOpcode()) {
- default: llvm_unreachable("Unhandled instruction with custom inserter");
+ default:
+#ifndef NDEBUG
+ MI->dump();
+#endif
+ llvm_unreachable("Unexpected instruction for custom inserter!");
+
case AArch64::F128CSEL:
- return EmitF128CSEL(MI, MBB);
- case AArch64::ATOMIC_LOAD_ADD_I8:
- return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
- case AArch64::ATOMIC_LOAD_ADD_I16:
- return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
- case AArch64::ATOMIC_LOAD_ADD_I32:
- return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
- case AArch64::ATOMIC_LOAD_ADD_I64:
- return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
-
- case AArch64::ATOMIC_LOAD_SUB_I8:
- return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
- case AArch64::ATOMIC_LOAD_SUB_I16:
- return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
- case AArch64::ATOMIC_LOAD_SUB_I32:
- return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
- case AArch64::ATOMIC_LOAD_SUB_I64:
- return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
-
- case AArch64::ATOMIC_LOAD_AND_I8:
- return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
- case AArch64::ATOMIC_LOAD_AND_I16:
- return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
- case AArch64::ATOMIC_LOAD_AND_I32:
- return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
- case AArch64::ATOMIC_LOAD_AND_I64:
- return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
-
- case AArch64::ATOMIC_LOAD_OR_I8:
- return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
- case AArch64::ATOMIC_LOAD_OR_I16:
- return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
- case AArch64::ATOMIC_LOAD_OR_I32:
- return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
- case AArch64::ATOMIC_LOAD_OR_I64:
- return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
-
- case AArch64::ATOMIC_LOAD_XOR_I8:
- return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
- case AArch64::ATOMIC_LOAD_XOR_I16:
- return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
- case AArch64::ATOMIC_LOAD_XOR_I32:
- return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
- case AArch64::ATOMIC_LOAD_XOR_I64:
- return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
-
- case AArch64::ATOMIC_LOAD_NAND_I8:
- return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
- case AArch64::ATOMIC_LOAD_NAND_I16:
- return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
- case AArch64::ATOMIC_LOAD_NAND_I32:
- return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
- case AArch64::ATOMIC_LOAD_NAND_I64:
- return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
-
- case AArch64::ATOMIC_LOAD_MIN_I8:
- return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
- case AArch64::ATOMIC_LOAD_MIN_I16:
- return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
- case AArch64::ATOMIC_LOAD_MIN_I32:
- return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
- case AArch64::ATOMIC_LOAD_MIN_I64:
- return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
-
- case AArch64::ATOMIC_LOAD_MAX_I8:
- return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
- case AArch64::ATOMIC_LOAD_MAX_I16:
- return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
- case AArch64::ATOMIC_LOAD_MAX_I32:
- return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
- case AArch64::ATOMIC_LOAD_MAX_I64:
- return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
-
- case AArch64::ATOMIC_LOAD_UMIN_I8:
- return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
- case AArch64::ATOMIC_LOAD_UMIN_I16:
- return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
- case AArch64::ATOMIC_LOAD_UMIN_I32:
- return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
- case AArch64::ATOMIC_LOAD_UMIN_I64:
- return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
-
- case AArch64::ATOMIC_LOAD_UMAX_I8:
- return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
- case AArch64::ATOMIC_LOAD_UMAX_I16:
- return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
- case AArch64::ATOMIC_LOAD_UMAX_I32:
- return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
- case AArch64::ATOMIC_LOAD_UMAX_I64:
- return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
-
- case AArch64::ATOMIC_SWAP_I8:
- return emitAtomicBinary(MI, MBB, 1, 0);
- case AArch64::ATOMIC_SWAP_I16:
- return emitAtomicBinary(MI, MBB, 2, 0);
- case AArch64::ATOMIC_SWAP_I32:
- return emitAtomicBinary(MI, MBB, 4, 0);
- case AArch64::ATOMIC_SWAP_I64:
- return emitAtomicBinary(MI, MBB, 8, 0);
-
- case AArch64::ATOMIC_CMP_SWAP_I8:
- return emitAtomicCmpSwap(MI, MBB, 1);
- case AArch64::ATOMIC_CMP_SWAP_I16:
- return emitAtomicCmpSwap(MI, MBB, 2);
- case AArch64::ATOMIC_CMP_SWAP_I32:
- return emitAtomicCmpSwap(MI, MBB, 4);
- case AArch64::ATOMIC_CMP_SWAP_I64:
- return emitAtomicCmpSwap(MI, MBB, 8);
+ return EmitF128CSEL(MI, BB);
+
+ case TargetOpcode::STACKMAP:
+ case TargetOpcode::PATCHPOINT:
+ return emitPatchPoint(MI, BB);
}
}
+//===----------------------------------------------------------------------===//
+// AArch64 Lowering private implementation.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Lowering Code
+//===----------------------------------------------------------------------===//
-const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
- case AArch64ISD::Call: return "AArch64ISD::Call";
- case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
- case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
- case AArch64ISD::BFI: return "AArch64ISD::BFI";
- case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
- case AArch64ISD::Ret: return "AArch64ISD::Ret";
- case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
- case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
- case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
- case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
- case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
- case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
- case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
- case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
-
- case AArch64ISD::NEON_BSL:
- return "AArch64ISD::NEON_BSL";
- case AArch64ISD::NEON_MOVIMM:
- return "AArch64ISD::NEON_MOVIMM";
- case AArch64ISD::NEON_MVNIMM:
- return "AArch64ISD::NEON_MVNIMM";
- case AArch64ISD::NEON_FMOVIMM:
- return "AArch64ISD::NEON_FMOVIMM";
- case AArch64ISD::NEON_CMP:
- return "AArch64ISD::NEON_CMP";
- case AArch64ISD::NEON_CMPZ:
- return "AArch64ISD::NEON_CMPZ";
- case AArch64ISD::NEON_TST:
- return "AArch64ISD::NEON_TST";
- case AArch64ISD::NEON_QSHLs:
- return "AArch64ISD::NEON_QSHLs";
- case AArch64ISD::NEON_QSHLu:
- return "AArch64ISD::NEON_QSHLu";
- case AArch64ISD::NEON_VDUP:
- return "AArch64ISD::NEON_VDUP";
- case AArch64ISD::NEON_VDUPLANE:
- return "AArch64ISD::NEON_VDUPLANE";
- case AArch64ISD::NEON_REV16:
- return "AArch64ISD::NEON_REV16";
- case AArch64ISD::NEON_REV32:
- return "AArch64ISD::NEON_REV32";
- case AArch64ISD::NEON_REV64:
- return "AArch64ISD::NEON_REV64";
- case AArch64ISD::NEON_UZP1:
- return "AArch64ISD::NEON_UZP1";
- case AArch64ISD::NEON_UZP2:
- return "AArch64ISD::NEON_UZP2";
- case AArch64ISD::NEON_ZIP1:
- return "AArch64ISD::NEON_ZIP1";
- case AArch64ISD::NEON_ZIP2:
- return "AArch64ISD::NEON_ZIP2";
- case AArch64ISD::NEON_TRN1:
- return "AArch64ISD::NEON_TRN1";
- case AArch64ISD::NEON_TRN2:
- return "AArch64ISD::NEON_TRN2";
- case AArch64ISD::NEON_LD1_UPD:
- return "AArch64ISD::NEON_LD1_UPD";
- case AArch64ISD::NEON_LD2_UPD:
- return "AArch64ISD::NEON_LD2_UPD";
- case AArch64ISD::NEON_LD3_UPD:
- return "AArch64ISD::NEON_LD3_UPD";
- case AArch64ISD::NEON_LD4_UPD:
- return "AArch64ISD::NEON_LD4_UPD";
- case AArch64ISD::NEON_ST1_UPD:
- return "AArch64ISD::NEON_ST1_UPD";
- case AArch64ISD::NEON_ST2_UPD:
- return "AArch64ISD::NEON_ST2_UPD";
- case AArch64ISD::NEON_ST3_UPD:
- return "AArch64ISD::NEON_ST3_UPD";
- case AArch64ISD::NEON_ST4_UPD:
- return "AArch64ISD::NEON_ST4_UPD";
- case AArch64ISD::NEON_LD1x2_UPD:
- return "AArch64ISD::NEON_LD1x2_UPD";
- case AArch64ISD::NEON_LD1x3_UPD:
- return "AArch64ISD::NEON_LD1x3_UPD";
- case AArch64ISD::NEON_LD1x4_UPD:
- return "AArch64ISD::NEON_LD1x4_UPD";
- case AArch64ISD::NEON_ST1x2_UPD:
- return "AArch64ISD::NEON_ST1x2_UPD";
- case AArch64ISD::NEON_ST1x3_UPD:
- return "AArch64ISD::NEON_ST1x3_UPD";
- case AArch64ISD::NEON_ST1x4_UPD:
- return "AArch64ISD::NEON_ST1x4_UPD";
- case AArch64ISD::NEON_LD2DUP:
- return "AArch64ISD::NEON_LD2DUP";
- case AArch64ISD::NEON_LD3DUP:
- return "AArch64ISD::NEON_LD3DUP";
- case AArch64ISD::NEON_LD4DUP:
- return "AArch64ISD::NEON_LD4DUP";
- case AArch64ISD::NEON_LD2DUP_UPD:
- return "AArch64ISD::NEON_LD2DUP_UPD";
- case AArch64ISD::NEON_LD3DUP_UPD:
- return "AArch64ISD::NEON_LD3DUP_UPD";
- case AArch64ISD::NEON_LD4DUP_UPD:
- return "AArch64ISD::NEON_LD4DUP_UPD";
- case AArch64ISD::NEON_LD2LN_UPD:
- return "AArch64ISD::NEON_LD2LN_UPD";
- case AArch64ISD::NEON_LD3LN_UPD:
- return "AArch64ISD::NEON_LD3LN_UPD";
- case AArch64ISD::NEON_LD4LN_UPD:
- return "AArch64ISD::NEON_LD4LN_UPD";
- case AArch64ISD::NEON_ST2LN_UPD:
- return "AArch64ISD::NEON_ST2LN_UPD";
- case AArch64ISD::NEON_ST3LN_UPD:
- return "AArch64ISD::NEON_ST3LN_UPD";
- case AArch64ISD::NEON_ST4LN_UPD:
- return "AArch64ISD::NEON_ST4LN_UPD";
- case AArch64ISD::NEON_VEXTRACT:
- return "AArch64ISD::NEON_VEXTRACT";
+/// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
+/// CC
+static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
+ switch (CC) {
default:
- return NULL;
+ llvm_unreachable("Unknown condition code!");
+ case ISD::SETNE:
+ return AArch64CC::NE;
+ case ISD::SETEQ:
+ return AArch64CC::EQ;
+ case ISD::SETGT:
+ return AArch64CC::GT;
+ case ISD::SETGE:
+ return AArch64CC::GE;
+ case ISD::SETLT:
+ return AArch64CC::LT;
+ case ISD::SETLE:
+ return AArch64CC::LE;
+ case ISD::SETUGT:
+ return AArch64CC::HI;
+ case ISD::SETUGE:
+ return AArch64CC::HS;
+ case ISD::SETULT:
+ return AArch64CC::LO;
+ case ISD::SETULE:
+ return AArch64CC::LS;
}
}
-static const uint16_t AArch64FPRArgRegs[] = {
- AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
- AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
-};
-static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
+/// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
+static void changeFPCCToAArch64CC(ISD::CondCode CC,
+ AArch64CC::CondCode &CondCode,
+ AArch64CC::CondCode &CondCode2) {
+ CondCode2 = AArch64CC::AL;
+ switch (CC) {
+ default:
+ llvm_unreachable("Unknown FP condition!");
+ case ISD::SETEQ:
+ case ISD::SETOEQ:
+ CondCode = AArch64CC::EQ;
+ break;
+ case ISD::SETGT:
+ case ISD::SETOGT:
+ CondCode = AArch64CC::GT;
+ break;
+ case ISD::SETGE:
+ case ISD::SETOGE:
+ CondCode = AArch64CC::GE;
+ break;
+ case ISD::SETOLT:
+ CondCode = AArch64CC::MI;
+ break;
+ case ISD::SETOLE:
+ CondCode = AArch64CC::LS;
+ break;
+ case ISD::SETONE:
+ CondCode = AArch64CC::MI;
+ CondCode2 = AArch64CC::GT;
+ break;
+ case ISD::SETO:
+ CondCode = AArch64CC::VC;
+ break;
+ case ISD::SETUO:
+ CondCode = AArch64CC::VS;
+ break;
+ case ISD::SETUEQ:
+ CondCode = AArch64CC::EQ;
+ CondCode2 = AArch64CC::VS;
+ break;
+ case ISD::SETUGT:
+ CondCode = AArch64CC::HI;
+ break;
+ case ISD::SETUGE:
+ CondCode = AArch64CC::PL;
+ break;
+ case ISD::SETLT:
+ case ISD::SETULT:
+ CondCode = AArch64CC::LT;
+ break;
+ case ISD::SETLE:
+ case ISD::SETULE:
+ CondCode = AArch64CC::LE;
+ break;
+ case ISD::SETNE:
+ case ISD::SETUNE:
+ CondCode = AArch64CC::NE;
+ break;
+ }
+}
-static const uint16_t AArch64ArgRegs[] = {
- AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
- AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
-};
-static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
+/// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
+/// CC usable with the vector instructions. Fewer operations are available
+/// without a real NZCV register, so we have to use less efficient combinations
+/// to get the same effect.
+static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
+ AArch64CC::CondCode &CondCode,
+ AArch64CC::CondCode &CondCode2,
+ bool &Invert) {
+ Invert = false;
+ switch (CC) {
+ default:
+ // Mostly the scalar mappings work fine.
+ changeFPCCToAArch64CC(CC, CondCode, CondCode2);
+ break;
+ case ISD::SETUO:
+ Invert = true; // Fallthrough
+ case ISD::SETO:
+ CondCode = AArch64CC::MI;
+ CondCode2 = AArch64CC::GE;
+ break;
+ case ISD::SETUEQ:
+ case ISD::SETULT:
+ case ISD::SETULE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ // All of the compare-mask comparisons are ordered, but we can switch
+ // between the two by a double inversion. E.g. ULE == !OGT.
+ Invert = true;
+ changeFPCCToAArch64CC(getSetCCInverse(CC, false), CondCode, CondCode2);
+ break;
+ }
+}
-static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
- // Mark all remaining general purpose registers as allocated. We don't
- // backtrack: if (for example) an i128 gets put on the stack, no subsequent
- // i64 will go in registers (C.11).
- for (unsigned i = 0; i < NumArgRegs; ++i)
- State.AllocateReg(AArch64ArgRegs[i]);
+static bool isLegalArithImmed(uint64_t C) {
+ // Matches AArch64DAGToDAGISel::SelectArithImmed().
+ return (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
+}
- return false;
+static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
+ SDLoc dl, SelectionDAG &DAG) {
+ EVT VT = LHS.getValueType();
+
+ if (VT.isFloatingPoint())
+ return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
+
+ // The CMP instruction is just an alias for SUBS, and representing it as
+ // SUBS means that it's possible to get CSE with subtract operations.
+ // A later phase can perform the optimization of setting the destination
+ // register to WZR/XZR if it ends up being unused.
+ unsigned Opcode = AArch64ISD::SUBS;
+
+ if (RHS.getOpcode() == ISD::SUB && isa<ConstantSDNode>(RHS.getOperand(0)) &&
+ cast<ConstantSDNode>(RHS.getOperand(0))->getZExtValue() == 0 &&
+ (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+ // We'd like to combine a (CMP op1, (sub 0, op2) into a CMN instruction on
+ // the grounds that "op1 - (-op2) == op1 + op2". However, the C and V flags
+ // can be set differently by this operation. It comes down to whether
+ // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
+ // everything is fine. If not then the optimization is wrong. Thus general
+ // comparisons are only valid if op2 != 0.
+
+ // So, finally, the only LLVM-native comparisons that don't mention C and V
+ // are SETEQ and SETNE. They're the only ones we can safely use CMN for in
+ // the absence of information about op2.
+ Opcode = AArch64ISD::ADDS;
+ RHS = RHS.getOperand(1);
+ } else if (LHS.getOpcode() == ISD::AND && isa<ConstantSDNode>(RHS) &&
+ cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
+ !isUnsignedIntSetCC(CC)) {
+ // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
+ // (a.k.a. ANDS) except that the flags are only guaranteed to work for one
+ // of the signed comparisons.
+ Opcode = AArch64ISD::ANDS;
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ }
+
+ return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS)
+ .getValue(1);
}
-#include "AArch64GenCallingConv.inc"
+static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
+ SDValue &AArch64cc, SelectionDAG &DAG, SDLoc dl) {
+ if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
+ EVT VT = RHS.getValueType();
+ uint64_t C = RHSC->getZExtValue();
+ if (!isLegalArithImmed(C)) {
+ // Constant does not fit, try adjusting it by one?
+ switch (CC) {
+ default:
+ break;
+ case ISD::SETLT:
+ case ISD::SETGE:
+ if ((VT == MVT::i32 && C != 0x80000000 &&
+ isLegalArithImmed((uint32_t)(C - 1))) ||
+ (VT == MVT::i64 && C != 0x80000000ULL &&
+ isLegalArithImmed(C - 1ULL))) {
+ CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
+ C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
+ RHS = DAG.getConstant(C, VT);
+ }
+ break;
+ case ISD::SETULT:
+ case ISD::SETUGE:
+ if ((VT == MVT::i32 && C != 0 &&
+ isLegalArithImmed((uint32_t)(C - 1))) ||
+ (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
+ CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
+ C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
+ RHS = DAG.getConstant(C, VT);
+ }
+ break;
+ case ISD::SETLE:
+ case ISD::SETGT:
+ if ((VT == MVT::i32 && C != 0x7fffffff &&
+ isLegalArithImmed((uint32_t)(C + 1))) ||
+ (VT == MVT::i64 && C != 0x7ffffffffffffffULL &&
+ isLegalArithImmed(C + 1ULL))) {
+ CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
+ C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
+ RHS = DAG.getConstant(C, VT);
+ }
+ break;
+ case ISD::SETULE:
+ case ISD::SETUGT:
+ if ((VT == MVT::i32 && C != 0xffffffff &&
+ isLegalArithImmed((uint32_t)(C + 1))) ||
+ (VT == MVT::i64 && C != 0xfffffffffffffffULL &&
+ isLegalArithImmed(C + 1ULL))) {
+ CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
+ C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
+ RHS = DAG.getConstant(C, VT);
+ }
+ break;
+ }
+ }
+ }
-CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
+ SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
+ AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
+ AArch64cc = DAG.getConstant(AArch64CC, MVT::i32);
+ return Cmp;
+}
- switch(CC) {
- default: llvm_unreachable("Unsupported calling convention");
- case CallingConv::Fast:
- case CallingConv::C:
- return CC_A64_APCS;
+static std::pair<SDValue, SDValue>
+getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
+ assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&
+ "Unsupported value type");
+ SDValue Value, Overflow;
+ SDLoc DL(Op);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ unsigned Opc = 0;
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("Unknown overflow instruction!");
+ case ISD::SADDO:
+ Opc = AArch64ISD::ADDS;
+ CC = AArch64CC::VS;
+ break;
+ case ISD::UADDO:
+ Opc = AArch64ISD::ADDS;
+ CC = AArch64CC::HS;
+ break;
+ case ISD::SSUBO:
+ Opc = AArch64ISD::SUBS;
+ CC = AArch64CC::VS;
+ break;
+ case ISD::USUBO:
+ Opc = AArch64ISD::SUBS;
+ CC = AArch64CC::LO;
+ break;
+ // Multiply needs a little bit extra work.
+ case ISD::SMULO:
+ case ISD::UMULO: {
+ CC = AArch64CC::NE;
+ bool IsSigned = (Op.getOpcode() == ISD::SMULO) ? true : false;
+ if (Op.getValueType() == MVT::i32) {
+ unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ // For a 32 bit multiply with overflow check we want the instruction
+ // selector to generate a widening multiply (SMADDL/UMADDL). For that we
+ // need to generate the following pattern:
+ // (i64 add 0, (i64 mul (i64 sext|zext i32 %a), (i64 sext|zext i32 %b))
+ LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS);
+ RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
+ SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
+ SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Mul,
+ DAG.getConstant(0, MVT::i64));
+ // On AArch64 the upper 32 bits are always zero extended for a 32 bit
+ // operation. We need to clear out the upper 32 bits, because we used a
+ // widening multiply that wrote all 64 bits. In the end this should be a
+ // noop.
+ Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Add);
+ if (IsSigned) {
+ // The signed overflow check requires more than just a simple check for
+ // any bit set in the upper 32 bits of the result. These bits could be
+ // just the sign bits of a negative number. To perform the overflow
+ // check we have to arithmetic shift right the 32nd bit of the result by
+ // 31 bits. Then we compare the result to the upper 32 bits.
+ SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Add,
+ DAG.getConstant(32, MVT::i64));
+ UpperBits = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, UpperBits);
+ SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i32, Value,
+ DAG.getConstant(31, MVT::i64));
+ // It is important that LowerBits is last, otherwise the arithmetic
+ // shift will not be folded into the compare (SUBS).
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32);
+ Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
+ .getValue(1);
+ } else {
+ // The overflow check for unsigned multiply is easy. We only need to
+ // check if any of the upper 32 bits are set. This can be done with a
+ // CMP (shifted register). For that we need to generate the following
+ // pattern:
+ // (i64 AArch64ISD::SUBS i64 0, (i64 srl i64 %Mul, i64 32)
+ SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
+ DAG.getConstant(32, MVT::i64));
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
+ Overflow =
+ DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, MVT::i64),
+ UpperBits).getValue(1);
+ }
+ break;
+ }
+ assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type");
+ // For the 64 bit multiply
+ Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
+ if (IsSigned) {
+ SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
+ SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
+ DAG.getConstant(63, MVT::i64));
+ // It is important that LowerBits is last, otherwise the arithmetic
+ // shift will not be folded into the compare (SUBS).
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
+ Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
+ .getValue(1);
+ } else {
+ SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
+ Overflow =
+ DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, MVT::i64),
+ UpperBits).getValue(1);
+ }
+ break;
+ }
+ } // switch (...)
+
+ if (Opc) {
+ SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
+
+ // Emit the AArch64 operation with overflow check.
+ Value = DAG.getNode(Opc, DL, VTs, LHS, RHS);
+ Overflow = Value.getValue(1);
}
+ return std::make_pair(Value, Overflow);
}
-void
-AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
- SDLoc DL, SDValue &Chain) const {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- AArch64MachineFunctionInfo *FuncInfo
- = MF.getInfo<AArch64MachineFunctionInfo>();
+SDValue AArch64TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
+ RTLIB::Libcall Call) const {
+ SmallVector<SDValue, 2> Ops;
+ for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i)
+ Ops.push_back(Op.getOperand(i));
- SmallVector<SDValue, 8> MemOps;
+ return makeLibCall(DAG, Call, MVT::f128, &Ops[0], Ops.size(), false,
+ SDLoc(Op)).first;
+}
- unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
- NumArgRegs);
- unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
- NumFPRArgRegs);
+static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) {
+ SDValue Sel = Op.getOperand(0);
+ SDValue Other = Op.getOperand(1);
- unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
- int GPRIdx = 0;
- if (GPRSaveSize != 0) {
- GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
+ // If neither operand is a SELECT_CC, give up.
+ if (Sel.getOpcode() != ISD::SELECT_CC)
+ std::swap(Sel, Other);
+ if (Sel.getOpcode() != ISD::SELECT_CC)
+ return Op;
- SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
+ // The folding we want to perform is:
+ // (xor x, (select_cc a, b, cc, 0, -1) )
+ // -->
+ // (csel x, (xor x, -1), cc ...)
+ //
+ // The latter will get matched to a CSINV instruction.
- for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
- SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(i * 8),
- false, false, 0);
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
- DAG.getConstant(8, getPointerTy()));
- }
- }
+ ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get();
+ SDValue LHS = Sel.getOperand(0);
+ SDValue RHS = Sel.getOperand(1);
+ SDValue TVal = Sel.getOperand(2);
+ SDValue FVal = Sel.getOperand(3);
+ SDLoc dl(Sel);
- if (getSubtarget()->hasFPARMv8()) {
- unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
- int FPRIdx = 0;
- // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
- // can omit a register save area if we know we'll never use registers of
- // that class.
- if (FPRSaveSize != 0) {
- FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
+ // FIXME: This could be generalized to non-integer comparisons.
+ if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
+ return Op;
- SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
+ ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
+ ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
- for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
- &AArch64::FPR128RegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
- SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(i * 16),
- false, false, 0);
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
- DAG.getConstant(16, getPointerTy()));
- }
- }
- FuncInfo->setVariadicFPRIdx(FPRIdx);
- FuncInfo->setVariadicFPRSize(FPRSaveSize);
+ // The the values aren't constants, this isn't the pattern we're looking for.
+ if (!CFVal || !CTVal)
+ return Op;
+
+ // We can commute the SELECT_CC by inverting the condition. This
+ // might be needed to make this fit into a CSINV pattern.
+ if (CTVal->isAllOnesValue() && CFVal->isNullValue()) {
+ std::swap(TVal, FVal);
+ std::swap(CTVal, CFVal);
+ CC = ISD::getSetCCInverse(CC, true);
}
- int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
+ // If the constants line up, perform the transform!
+ if (CTVal->isNullValue() && CFVal->isAllOnesValue()) {
+ SDValue CCVal;
+ SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
- FuncInfo->setVariadicStackIdx(StackIdx);
- FuncInfo->setVariadicGPRIdx(GPRIdx);
- FuncInfo->setVariadicGPRSize(GPRSaveSize);
+ FVal = Other;
+ TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
+ DAG.getConstant(-1ULL, Other.getValueType()));
- if (!MemOps.empty()) {
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
- MemOps.size());
+ return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
+ CCVal, Cmp);
}
+
+ return Op;
}
+static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
-SDValue
-AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- AArch64MachineFunctionInfo *FuncInfo
- = MF.getInfo<AArch64MachineFunctionInfo>();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
+ // Let legalize expand this if it isn't a legal type yet.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return SDValue();
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
+ SDVTList VTs = DAG.getVTList(VT, MVT::i32);
- SmallVector<SDValue, 16> ArgValues;
+ unsigned Opc;
+ bool ExtraOp = false;
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("Invalid code");
+ case ISD::ADDC:
+ Opc = AArch64ISD::ADDS;
+ break;
+ case ISD::SUBC:
+ Opc = AArch64ISD::SUBS;
+ break;
+ case ISD::ADDE:
+ Opc = AArch64ISD::ADCS;
+ ExtraOp = true;
+ break;
+ case ISD::SUBE:
+ Opc = AArch64ISD::SBCS;
+ ExtraOp = true;
+ break;
+ }
- SDValue ArgValue;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ if (!ExtraOp)
+ return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
+ return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
+ Op.getOperand(2));
+}
- if (Flags.isByVal()) {
- // Byval is used for small structs and HFAs in the PCS, but the system
- // should work in a non-compliant manner for larger structs.
- EVT PtrTy = getPointerTy();
- int Size = Flags.getByValSize();
- unsigned NumRegs = (Size + 7) / 8;
+static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
+ // Let legalize expand this if it isn't a legal type yet.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
+ return SDValue();
- unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
- VA.getLocMemOffset(),
- false);
- SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
- InVals.push_back(FrameIdxN);
+ AArch64CC::CondCode CC;
+ // The actual operation that sets the overflow or carry flag.
+ SDValue Value, Overflow;
+ std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
- continue;
- } else if (VA.isRegLoc()) {
- MVT RegVT = VA.getLocVT();
- const TargetRegisterClass *RC = getRegClassFor(RegVT);
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ // We use 0 and 1 as false and true values.
+ SDValue TVal = DAG.getConstant(1, MVT::i32);
+ SDValue FVal = DAG.getConstant(0, MVT::i32);
- ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
- } else { // VA.isRegLoc()
- assert(VA.isMemLoc());
+ // We use an inverted condition, because the conditional select is inverted
+ // too. This will allow it to be selected to a single instruction:
+ // CSINC Wd, WZR, WZR, invert(cond).
+ SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), MVT::i32);
+ Overflow = DAG.getNode(AArch64ISD::CSEL, SDLoc(Op), MVT::i32, FVal, TVal,
+ CCVal, Overflow);
- int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
- VA.getLocMemOffset(), true);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+ return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
+}
- SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, false, 0);
+// Prefetch operands are:
+// 1: Address to prefetch
+// 2: bool isWrite
+// 3: int locality (0 = no locality ... 3 = extreme locality)
+// 4: bool isDataCache
+static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ // The data thing is not used.
+ // unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+
+ bool IsStream = !Locality;
+ // When the locality number is set
+ if (Locality) {
+ // The front-end should have filtered out the out-of-range values
+ assert(Locality <= 3 && "Prefetch locality out-of-range");
+ // The locality degree is the opposite of the cache speed.
+ // Put the number the other way around.
+ // The encoding starts at 0 for level 1
+ Locality = 3 - Locality;
+ }
+ // built the mask value encoding the expected behavior.
+ unsigned PrfOp = (IsWrite << 4) | // Load/Store bit
+ (Locality << 1) | // Cache level bits
+ (unsigned)IsStream; // Stream bit
+ return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(PrfOp, MVT::i32), Op.getOperand(1));
+}
- }
+SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::BCvt:
- ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
- break;
- case CCValAssign::SExt:
- case CCValAssign::ZExt:
- case CCValAssign::AExt: {
- unsigned DestSize = VA.getValVT().getSizeInBits();
- unsigned DestSubReg;
-
- switch (DestSize) {
- case 8: DestSubReg = AArch64::sub_8; break;
- case 16: DestSubReg = AArch64::sub_16; break;
- case 32: DestSubReg = AArch64::sub_32; break;
- case 64: DestSubReg = AArch64::sub_64; break;
- default: llvm_unreachable("Unexpected argument promotion");
- }
+ RTLIB::Libcall LC;
+ LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
- ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
- VA.getValVT(), ArgValue,
- DAG.getTargetConstant(DestSubReg, MVT::i32)),
- 0);
- break;
- }
- }
+ return LowerF128Call(Op, DAG, LC);
+}
- InVals.push_back(ArgValue);
+SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (Op.getOperand(0).getValueType() != MVT::f128) {
+ // It's legal except when f128 is involved
+ return Op;
}
- if (isVarArg)
- SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
+ RTLIB::Libcall LC;
+ LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
- unsigned StackArgSize = CCInfo.getNextStackOffset();
- if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
- // This is a non-standard ABI so by fiat I say we're allowed to make full
- // use of the stack area to be popped, which must be aligned to 16 bytes in
- // any case:
- StackArgSize = RoundUpToAlignment(StackArgSize, 16);
+ // FP_ROUND node has a second operand indicating whether it is known to be
+ // precise. That doesn't take part in the LibCall so we can't directly use
+ // LowerF128Call.
+ SDValue SrcVal = Op.getOperand(0);
+ return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
+ /*isSigned*/ false, SDLoc(Op)).first;
+}
- // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
- // a multiple of 16.
- FuncInfo->setArgumentStackToRestore(StackArgSize);
+static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
+ // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
+ // Any additional optimization in this function should be recorded
+ // in the cost tables.
+ EVT InVT = Op.getOperand(0).getValueType();
+ EVT VT = Op.getValueType();
- // This realignment carries over to the available bytes below. Our own
- // callers will guarantee the space is free by giving an aligned value to
- // CALLSEQ_START.
+ if (VT.getSizeInBits() < InVT.getSizeInBits()) {
+ SDLoc dl(Op);
+ SDValue Cv =
+ DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(),
+ Op.getOperand(0));
+ return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
}
- // Even if we're not expected to free up the space, it's useful to know how
- // much is there while considering tail calls (because we can reuse it).
- FuncInfo->setBytesInStackArgArea(StackArgSize);
- return Chain;
+ if (VT.getSizeInBits() > InVT.getSizeInBits()) {
+ SDLoc dl(Op);
+ SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Op.getOperand(0));
+ return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
+ }
+
+ // Type changing conversions are illegal.
+ return Op;
}
-SDValue
-AArch64TargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
- // CCValAssign - represent the assignment of the return value to a location.
- SmallVector<CCValAssign, 16> RVLocs;
+SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (Op.getOperand(0).getValueType().isVector())
+ return LowerVectorFP_TO_INT(Op, DAG);
- // CCState - Info about the registers and stack slots.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ if (Op.getOperand(0).getValueType() != MVT::f128) {
+ // It's legal except when f128 is involved
+ return Op;
+ }
- // Analyze outgoing return values.
- CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
+ RTLIB::Libcall LC;
+ if (Op.getOpcode() == ISD::FP_TO_SINT)
+ LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
+ else
+ LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
- SDValue Flag;
- SmallVector<SDValue, 4> RetOps(1, Chain);
+ SmallVector<SDValue, 2> Ops;
+ for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i)
+ Ops.push_back(Op.getOperand(i));
- for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
- // PCS: "If the type, T, of the result of a function is such that
- // void func(T arg) would require that arg be passed as a value in a
- // register (or set of registers) according to the rules in 5.4, then the
- // result is returned in the same registers as would be used for such an
- // argument.
- //
- // Otherwise, the caller shall reserve a block of memory of sufficient
- // size and alignment to hold the result. The address of the memory block
- // shall be passed as an additional argument to the function in x8."
- //
- // This is implemented in two places. The register-return values are dealt
- // with here, more complex returns are passed as an sret parameter, which
- // means we don't have to worry about it during actual return.
- CCValAssign &VA = RVLocs[i];
- assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
+ return makeLibCall(DAG, LC, Op.getValueType(), &Ops[0], Ops.size(), false,
+ SDLoc(Op)).first;
+}
+static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
+ // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
+ // Any additional optimization in this function should be recorded
+ // in the cost tables.
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ SDValue In = Op.getOperand(0);
+ EVT InVT = In.getValueType();
+
+ if (VT.getSizeInBits() < InVT.getSizeInBits()) {
+ MVT CastVT =
+ MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
+ InVT.getVectorNumElements());
+ In = DAG.getNode(Op.getOpcode(), dl, CastVT, In);
+ return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0));
+ }
- SDValue Arg = OutVals[i];
+ if (VT.getSizeInBits() > InVT.getSizeInBits()) {
+ unsigned CastOpc =
+ Op.getOpcode() == ISD::SINT_TO_FP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ EVT CastVT = VT.changeVectorElementTypeToInteger();
+ In = DAG.getNode(CastOpc, dl, CastVT, In);
+ return DAG.getNode(Op.getOpcode(), dl, VT, In);
+ }
- // There's no convenient note in the ABI about this as there is for normal
- // arguments, but it says return values are passed in the same registers as
- // an argument would be. I believe that includes the comments about
- // unspecified higher bits, putting the burden of widening on the *caller*
- // for return values.
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- case CCValAssign::ZExt:
- case CCValAssign::AExt:
- // Floating-point values should only be extended when they're going into
- // memory, which can't happen here so an integer extend is acceptable.
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
- break;
- }
+ return Op;
+}
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
- Flag = Chain.getValue(1);
- RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
- }
+SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (Op.getValueType().isVector())
+ return LowerVectorINT_TO_FP(Op, DAG);
- RetOps[0] = Chain; // Update chain.
+ // i128 conversions are libcalls.
+ if (Op.getOperand(0).getValueType() == MVT::i128)
+ return SDValue();
- // Add the flag if we have it.
- if (Flag.getNode())
- RetOps.push_back(Flag);
+ // Other conversions are legal, unless it's to the completely software-based
+ // fp128.
+ if (Op.getValueType() != MVT::f128)
+ return Op;
- return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
- &RetOps[0], RetOps.size());
+ RTLIB::Libcall LC;
+ if (Op.getOpcode() == ISD::SINT_TO_FP)
+ LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
+ else
+ LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
+
+ return LowerF128Call(Op, DAG, LC);
}
-SDValue
-AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- SDLoc &dl = CLI.DL;
- SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
- SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
- SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &IsTailCall = CLI.IsTailCall;
- CallingConv::ID CallConv = CLI.CallConv;
- bool IsVarArg = CLI.IsVarArg;
+SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
+ SelectionDAG &DAG) const {
+ // For iOS, we want to call an alternative entry point: __sincos_stret,
+ // which returns the values in two S / D registers.
+ SDLoc dl(Op);
+ SDValue Arg = Op.getOperand(0);
+ EVT ArgVT = Arg.getValueType();
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
- MachineFunction &MF = DAG.getMachineFunction();
- AArch64MachineFunctionInfo *FuncInfo
- = MF.getInfo<AArch64MachineFunctionInfo>();
- bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
- bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
- bool IsSibCall = false;
+ ArgListTy Args;
+ ArgListEntry Entry;
- if (IsTailCall) {
- IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
- IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
- Outs, OutVals, Ins, DAG);
+ Entry.Node = Arg;
+ Entry.Ty = ArgTy;
+ Entry.isSExt = false;
+ Entry.isZExt = false;
+ Args.push_back(Entry);
- // A sibling call is one where we're under the usual C ABI and not planning
- // to change that but can still do a tail call:
- if (!TailCallOpt && IsTailCall)
- IsSibCall = true;
- }
+ const char *LibcallName =
+ (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret";
+ SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy());
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
+ StructType *RetTy = StructType::get(ArgTy, ArgTy, NULL);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
+ .setCallee(CallingConv::Fast, RetTy, Callee, std::move(Args), 0);
- // On AArch64 (and all other architectures I'm aware of) the most this has to
- // do is adjust the stack pointer.
- unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
- if (IsSibCall) {
- // Since we're not changing the ABI to make this a tail call, the memory
- // operands are already available in the caller's incoming argument space.
- NumBytes = 0;
- }
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
+ return CallResult.first;
+}
- // FPDiff is the byte offset of the call's argument area from the callee's.
- // Stores to callee stack arguments will be placed in FixedStackSlots offset
- // by this amount for a tail call. In a sibling call it must be 0 because the
- // caller will deallocate the entire stack and the callee still expects its
- // arguments to begin at SP+0. Completely unused for non-tail calls.
- int FPDiff = 0;
+static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) {
+ if (Op.getValueType() != MVT::f16)
+ return SDValue();
- if (IsTailCall && !IsSibCall) {
- unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
+ assert(Op.getOperand(0).getValueType() == MVT::i16);
+ SDLoc DL(Op);
- // FPDiff will be negative if this tail call requires more space than we
- // would automatically have in our incoming argument space. Positive if we
- // can actually shrink the stack.
- FPDiff = NumReusableBytes - NumBytes;
+ Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0));
+ Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
+ return SDValue(
+ DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::f16, Op,
+ DAG.getTargetConstant(AArch64::hsub, MVT::i32)),
+ 0);
+}
- // The stack pointer must be 16-byte aligned at all times it's used for a
- // memory operation, which in practice means at *all* times and in
- // particular across call boundaries. Therefore our own arguments started at
- // a 16-byte aligned SP and the delta applied for the tail call should
- // satisfy the same constraint.
- assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
+
+SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("unimplemented operand");
+ return SDValue();
+ case ISD::BITCAST:
+ return LowerBITCAST(Op, DAG);
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ return LowerGlobalTLSAddress(Op, DAG);
+ case ISD::SETCC:
+ return LowerSETCC(Op, DAG);
+ case ISD::BR_CC:
+ return LowerBR_CC(Op, DAG);
+ case ISD::SELECT:
+ return LowerSELECT(Op, DAG);
+ case ISD::SELECT_CC:
+ return LowerSELECT_CC(Op, DAG);
+ case ISD::JumpTable:
+ return LowerJumpTable(Op, DAG);
+ case ISD::ConstantPool:
+ return LowerConstantPool(Op, DAG);
+ case ISD::BlockAddress:
+ return LowerBlockAddress(Op, DAG);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG);
+ case ISD::VACOPY:
+ return LowerVACOPY(Op, DAG);
+ case ISD::VAARG:
+ return LowerVAARG(Op, DAG);
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::SUBC:
+ case ISD::SUBE:
+ return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
+ case ISD::SADDO:
+ case ISD::UADDO:
+ case ISD::SSUBO:
+ case ISD::USUBO:
+ case ISD::SMULO:
+ case ISD::UMULO:
+ return LowerXALUO(Op, DAG);
+ case ISD::FADD:
+ return LowerF128Call(Op, DAG, RTLIB::ADD_F128);
+ case ISD::FSUB:
+ return LowerF128Call(Op, DAG, RTLIB::SUB_F128);
+ case ISD::FMUL:
+ return LowerF128Call(Op, DAG, RTLIB::MUL_F128);
+ case ISD::FDIV:
+ return LowerF128Call(Op, DAG, RTLIB::DIV_F128);
+ case ISD::FP_ROUND:
+ return LowerFP_ROUND(Op, DAG);
+ case ISD::FP_EXTEND:
+ return LowerFP_EXTEND(Op, DAG);
+ case ISD::FRAMEADDR:
+ return LowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR:
+ return LowerRETURNADDR(Op, DAG);
+ case ISD::INSERT_VECTOR_ELT:
+ return LowerINSERT_VECTOR_ELT(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT:
+ return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::VECTOR_SHUFFLE:
+ return LowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR:
+ return LowerEXTRACT_SUBVECTOR(Op, DAG);
+ case ISD::SRA:
+ case ISD::SRL:
+ case ISD::SHL:
+ return LowerVectorSRA_SRL_SHL(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRL_PARTS:
+ case ISD::SRA_PARTS:
+ return LowerShiftRightParts(Op, DAG);
+ case ISD::CTPOP:
+ return LowerCTPOP(Op, DAG);
+ case ISD::FCOPYSIGN:
+ return LowerFCOPYSIGN(Op, DAG);
+ case ISD::AND:
+ return LowerVectorAND(Op, DAG);
+ case ISD::OR:
+ return LowerVectorOR(Op, DAG);
+ case ISD::XOR:
+ return LowerXOR(Op, DAG);
+ case ISD::PREFETCH:
+ return LowerPREFETCH(Op, DAG);
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ return LowerINT_TO_FP(Op, DAG);
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ return LowerFP_TO_INT(Op, DAG);
+ case ISD::FSINCOS:
+ return LowerFSINCOS(Op, DAG);
}
+}
- if (!IsSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
- dl);
+/// getFunctionAlignment - Return the Log2 alignment of this function.
+unsigned AArch64TargetLowering::getFunctionAlignment(const Function *F) const {
+ return 2;
+}
- SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
- getPointerTy());
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//===----------------------------------------------------------------------===//
- SmallVector<SDValue, 8> MemOpChains;
- SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+#include "AArch64GenCallingConv.inc"
+/// Selects the correct CCAssignFn for a the given CallingConvention
+/// value.
+CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
+ bool IsVarArg) const {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported calling convention.");
+ case CallingConv::WebKit_JS:
+ return CC_AArch64_WebKit_JS;
+ case CallingConv::C:
+ case CallingConv::Fast:
+ if (!Subtarget->isTargetDarwin())
+ return CC_AArch64_AAPCS;
+ return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS;
+ }
+}
+
+SDValue AArch64TargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+
+ // At this point, Ins[].VT may already be promoted to i32. To correctly
+ // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
+ // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
+ // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here
+ // we use a special version of AnalyzeFormalArguments to pass in ValVT and
+ // LocVT.
+ unsigned NumArgs = Ins.size();
+ Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ unsigned CurArgIdx = 0;
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ MVT ValVT = Ins[i].VT;
+ std::advance(CurOrigArg, Ins[i].OrigArgIndex - CurArgIdx);
+ CurArgIdx = Ins[i].OrigArgIndex;
+
+ // Get type of the original argument.
+ EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
+ MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
+ // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
+ if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
+ ValVT = MVT::i8;
+ else if (ActualMVT == MVT::i16)
+ ValVT = MVT::i16;
+
+ CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
+ bool Res =
+ AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
+ assert(!Res && "Call operand has unhandled type");
+ (void)Res;
+ }
+ assert(ArgLocs.size() == Ins.size());
+ SmallVector<SDValue, 16> ArgValues;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- SDValue Arg = OutVals[i];
- // Callee does the actual widening, so all extensions just use an implicit
- // definition of the rest of the Loc. Aesthetically, this would be nicer as
- // an ANY_EXTEND, but that isn't valid for floating-point types and this
- // alternative works on integer types too.
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- case CCValAssign::ZExt:
- case CCValAssign::AExt: {
- unsigned SrcSize = VA.getValVT().getSizeInBits();
- unsigned SrcSubReg;
-
- switch (SrcSize) {
- case 8: SrcSubReg = AArch64::sub_8; break;
- case 16: SrcSubReg = AArch64::sub_16; break;
- case 32: SrcSubReg = AArch64::sub_32; break;
- case 64: SrcSubReg = AArch64::sub_64; break;
- default: llvm_unreachable("Unexpected argument promotion");
- }
-
- Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VA.getLocVT(),
- DAG.getUNDEF(VA.getLocVT()),
- Arg,
- DAG.getTargetConstant(SrcSubReg, MVT::i32)),
- 0);
+ if (Ins[i].Flags.isByVal()) {
+ // Byval is used for HFAs in the PCS, but the system should work in a
+ // non-compliant manner for larger structs.
+ EVT PtrTy = getPointerTy();
+ int Size = Ins[i].Flags.getByValSize();
+ unsigned NumRegs = (Size + 7) / 8;
- break;
- }
- case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
- break;
- }
+ // FIXME: This works on big-endian for composite byvals, which are the common
+ // case. It should also work for fundamental types too.
+ unsigned FrameIdx =
+ MFI->CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false);
+ SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
+ InVals.push_back(FrameIdxN);
- if (VA.isRegLoc()) {
- // A normal register (sub-) argument. For now we just note it down because
- // we want to copy things into registers as late as possible to avoid
- // register-pressure (and possibly worse).
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
continue;
}
+
+ if (VA.isRegLoc()) {
+ // Arguments stored in registers.
+ EVT RegVT = VA.getLocVT();
+
+ SDValue ArgValue;
+ const TargetRegisterClass *RC;
+
+ if (RegVT == MVT::i32)
+ RC = &AArch64::GPR32RegClass;
+ else if (RegVT == MVT::i64)
+ RC = &AArch64::GPR64RegClass;
+ else if (RegVT == MVT::f16)
+ RC = &AArch64::FPR16RegClass;
+ else if (RegVT == MVT::f32)
+ RC = &AArch64::FPR32RegClass;
+ else if (RegVT == MVT::f64 || RegVT.is64BitVector())
+ RC = &AArch64::FPR64RegClass;
+ else if (RegVT == MVT::f128 || RegVT.is128BitVector())
+ RC = &AArch64::FPR128RegClass;
+ else
+ llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
+
+ // Transform the arguments in physical registers into virtual ones.
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
- assert(VA.isMemLoc() && "unexpected argument location");
+ // If this is an 8, 16 or 32-bit value, it is really passed promoted
+ // to 64 bits. Insert an assert[sz]ext to capture this, then
+ // truncate to the right size.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
+ break;
+ case CCValAssign::AExt:
+ case CCValAssign::SExt:
+ case CCValAssign::ZExt:
+ // SelectionDAGBuilder will insert appropriate AssertZExt & AssertSExt
+ // nodes after our lowering.
+ assert(RegVT == Ins[i].VT && "incorrect register location selected");
+ break;
+ }
- SDValue DstAddr;
- MachinePointerInfo DstInfo;
- if (IsTailCall) {
- uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
- VA.getLocVT().getSizeInBits();
- OpSize = (OpSize + 7) / 8;
- int32_t Offset = VA.getLocMemOffset() + FPDiff;
- int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
+ InVals.push_back(ArgValue);
- DstAddr = DAG.getFrameIndex(FI, getPointerTy());
- DstInfo = MachinePointerInfo::getFixedStack(FI);
+ } else { // VA.isRegLoc()
+ assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem");
+ unsigned ArgOffset = VA.getLocMemOffset();
+ unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8;
- // Make sure any stack arguments overlapping with where we're storing are
- // loaded before this eventual operation. Otherwise they'll be clobbered.
- Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
- } else {
- SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
+ uint32_t BEAlign = 0;
+ if (ArgSize < 8 && !Subtarget->isLittleEndian())
+ BEAlign = 8 - ArgSize;
- DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
- DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
- }
+ int FI = MFI->CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
- if (Flags.isByVal()) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
- SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
- Flags.getByValAlign(),
- /*isVolatile = */ false,
- /*alwaysInline = */ false,
- DstInfo, MachinePointerInfo(0));
- MemOpChains.push_back(Cpy);
- } else {
- // Normal stack argument, put it where it's needed.
- SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
- false, false, 0);
- MemOpChains.push_back(Store);
- }
- }
+ // Create load nodes to retrieve arguments from the stack.
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue ArgValue;
- // The loads and stores generated above shouldn't clash with each
- // other. Combining them with this TokenFactor notes that fact for the rest of
- // the backend.
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
+ ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
+ MVT MemVT = VA.getValVT();
- // Most of the rest of the instructions need to be glued together; we don't
- // want assignments to actual registers used by a call to be rearranged by a
- // well-meaning scheduler.
- SDValue InFlag;
+ switch (VA.getLocInfo()) {
+ default:
+ break;
+ case CCValAssign::BCvt:
+ MemVT = VA.getLocVT();
+ break;
+ case CCValAssign::SExt:
+ ExtType = ISD::SEXTLOAD;
+ break;
+ case CCValAssign::ZExt:
+ ExtType = ISD::ZEXTLOAD;
+ break;
+ case CCValAssign::AExt:
+ ExtType = ISD::EXTLOAD;
+ break;
+ }
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
+ ArgValue = DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ MemVT, false, false, false, nullptr);
- // The linker is responsible for inserting veneers when necessary to put a
- // function call destination in range, so we don't need to bother with a
- // wrapper here.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
- } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const char *Sym = S->getSymbol();
- Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
+ InVals.push_back(ArgValue);
+ }
}
- // We don't usually want to end the call-sequence here because we would tidy
- // the frame up *after* the call, however in the ABI-changing tail-call case
- // we've carefully laid out the parameters so that when sp is reset they'll be
- // in the correct location.
- if (IsTailCall && !IsSibCall) {
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
- InFlag = Chain.getValue(1);
+ // varargs
+ if (isVarArg) {
+ if (!Subtarget->isTargetDarwin()) {
+ // The AAPCS variadic function ABI is identical to the non-variadic
+ // one. As a result there may be more arguments in registers and we should
+ // save them for future reference.
+ saveVarArgRegisters(CCInfo, DAG, DL, Chain);
+ }
+
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ // This will point to the next argument passed via stack.
+ unsigned StackOffset = CCInfo.getNextStackOffset();
+ // We currently pass all varargs at 8-byte alignment.
+ StackOffset = ((StackOffset + 7) & ~7);
+ AFI->setVarArgsStackIndex(MFI->CreateFixedObject(4, StackOffset, true));
}
- // We produce the following DAG scheme for the actual call instruction:
- // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
- //
- // Most arguments aren't going to be used and just keep the values live as
- // far as LLVM is concerned. It's expected to be selected as simply "bl
- // callee" (for a direct, non-tail call).
- std::vector<SDValue> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
+ AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
+ unsigned StackArgSize = CCInfo.getNextStackOffset();
+ bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
+ if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
+ // This is a non-standard ABI so by fiat I say we're allowed to make full
+ // use of the stack area to be popped, which must be aligned to 16 bytes in
+ // any case:
+ StackArgSize = RoundUpToAlignment(StackArgSize, 16);
- if (IsTailCall) {
- // Each tail call may have to adjust the stack by a different amount, so
- // this information must travel along with the operation for eventual
- // consumption by emitEpilogue.
- Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
+ // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
+ // a multiple of 16.
+ FuncInfo->setArgumentStackToRestore(StackArgSize);
+
+ // This realignment carries over to the available bytes below. Our own
+ // callers will guarantee the space is free by giving an aligned value to
+ // CALLSEQ_START.
}
+ // Even if we're not expected to free up the space, it's useful to know how
+ // much is there while considering tail calls (because we can reuse it).
+ FuncInfo->setBytesInStackArgArea(StackArgSize);
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
+ return Chain;
+}
+void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
+ SelectionDAG &DAG, SDLoc DL,
+ SDValue &Chain) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- // Add a register mask operand representing the call-preserved registers. This
- // is used later in codegen to constrain register-allocation.
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
- const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
- assert(Mask && "Missing call preserved mask for calling convention");
- Ops.push_back(DAG.getRegisterMask(Mask));
+ SmallVector<SDValue, 8> MemOps;
- // If we needed glue, put it in as the last argument.
- if (InFlag.getNode())
- Ops.push_back(InFlag);
+ static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
+ AArch64::X3, AArch64::X4, AArch64::X5,
+ AArch64::X6, AArch64::X7 };
+ static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
+ unsigned FirstVariadicGPR =
+ CCInfo.getFirstUnallocated(GPRArgRegs, NumGPRArgRegs);
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
+ int GPRIdx = 0;
+ if (GPRSaveSize != 0) {
+ GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
- if (IsTailCall) {
- return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+ SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
+
+ for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
+ unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), DL, Val, FIN,
+ MachinePointerInfo::getStack(i * 8), false, false, 0);
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
+ DAG.getConstant(8, getPointerTy()));
+ }
}
+ FuncInfo->setVarArgsGPRIndex(GPRIdx);
+ FuncInfo->setVarArgsGPRSize(GPRSaveSize);
- Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
- InFlag = Chain.getValue(1);
+ if (Subtarget->hasFPARMv8()) {
+ static const MCPhysReg FPRArgRegs[] = {
+ AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
+ AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
+ static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
+ unsigned FirstVariadicFPR =
+ CCInfo.getFirstUnallocated(FPRArgRegs, NumFPRArgRegs);
+
+ unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
+ int FPRIdx = 0;
+ if (FPRSaveSize != 0) {
+ FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
- // Now we can reclaim the stack, just as well do it before working out where
- // our return value is.
- if (!IsSibCall) {
- uint64_t CalleePopBytes
- = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
+ SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(CalleePopBytes, true),
- InFlag, dl);
- InFlag = Chain.getValue(1);
+ for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
+ unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
+
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), DL, Val, FIN,
+ MachinePointerInfo::getStack(i * 16), false, false, 0);
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
+ DAG.getConstant(16, getPointerTy()));
+ }
+ }
+ FuncInfo->setVarArgsFPRIndex(FPRIdx);
+ FuncInfo->setVarArgsFPRSize(FPRSaveSize);
}
- return LowerCallResult(Chain, InFlag, CallConv,
- IsVarArg, Ins, dl, DAG, InVals);
+ if (!MemOps.empty()) {
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
+ }
}
-SDValue
-AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+/// LowerCallResult - Lower the result values of a call into the
+/// appropriate copies out of appropriate physical registers.
+SDValue AArch64TargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
+ SDValue ThisVal) const {
+ CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
+ ? RetCC_AArch64_WebKit_JS
+ : RetCC_AArch64_AAPCS;
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
- CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
+ CCInfo.AnalyzeCallResult(Ins, RetCC);
+ // Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign VA = RVLocs[i];
- // Return values that are too big to fit into registers should use an sret
- // pointer, so this can be a lot simpler than the main argument code.
- assert(VA.isRegLoc() && "Memory locations not expected for call return");
+ // Pass 'this' value directly from the argument to return value, to avoid
+ // reg unit interference
+ if (i == 0 && isThisReturn) {
+ assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
+ "unexpected return calling convention register assignment");
+ InVals.push_back(ThisVal);
+ continue;
+ }
- SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
- InFlag);
+ SDValue Val =
+ DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
Chain = Val.getValue(1);
InFlag = Val.getValue(2);
switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
break;
- case CCValAssign::ZExt:
- case CCValAssign::SExt:
- case CCValAssign::AExt:
- // Floating-point arguments only get extended/truncated if they're going
- // in memory, so using the integer operation is acceptable here.
- Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
+ case CCValAssign::BCvt:
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
break;
}
@@ -1624,17 +1979,12 @@ AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
return Chain;
}
-bool
-AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
- CallingConv::ID CalleeCC,
- bool IsVarArg,
- bool IsCalleeStructRet,
- bool IsCallerStructRet,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SelectionDAG& DAG) const {
-
+bool AArch64TargetLowering::isEligibleForTailCallOptimization(
+ SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
+ bool isCalleeStructRet, bool isCallerStructRet,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
// For CallingConv::C this function knows whether the ABI needs
// changing. That's not true for other conventions so they will have to opt in
// manually.
@@ -1650,7 +2000,8 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// we want to reuse during a tail call. Working around this *is* possible (see
// X86) but less efficient and uglier in LowerCall.
for (Function::const_arg_iterator i = CallerF->arg_begin(),
- e = CallerF->arg_end(); i != e; ++i)
+ e = CallerF->arg_end();
+ i != e; ++i)
if (i->hasByValAttr())
return false;
@@ -1666,10 +2017,10 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// I want anyone implementing a new calling convention to think long and hard
// about this assert.
- assert((!IsVarArg || CalleeCC == CallingConv::C)
- && "Unexpected variadic calling convention");
+ assert((!isVarArg || CalleeCC == CallingConv::C) &&
+ "Unexpected variadic calling convention");
- if (IsVarArg && !Outs.empty()) {
+ if (isVarArg && !Outs.empty()) {
// At least two cases here: if caller is fastcc then we can't have any
// memory arguments (we'd be expected to clean up the stack afterwards). If
// caller is C then we could potentially use its argument area.
@@ -1677,10 +2028,10 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// FIXME: for now we take the most conservative of these in both cases:
// disallow all variadic memory operands.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
+ CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
+ CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true));
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
if (!ArgLocs[i].isRegLoc())
return false;
@@ -1692,12 +2043,12 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
SmallVector<CCValAssign, 16> RVLocs1;
CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
getTargetMachine(), RVLocs1, *DAG.getContext());
- CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
+ CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForCall(CalleeCC, isVarArg));
SmallVector<CCValAssign, 16> RVLocs2;
CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
getTargetMachine(), RVLocs2, *DAG.getContext());
- CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
+ CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForCall(CallerCC, isVarArg));
if (RVLocs1.size() != RVLocs2.size())
return false;
@@ -1721,28 +2072,18 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
return true;
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
+ CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
+ CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
- const AArch64MachineFunctionInfo *FuncInfo
- = MF.getInfo<AArch64MachineFunctionInfo>();
+ const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
// If the stack arguments for this call would fit into our own save area then
// the call can be made tail.
return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
}
-bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
- bool TailCallOpt) const {
- return CallCC == CallingConv::Fast && TailCallOpt;
-}
-
-bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
- return CallCC == CallingConv::Fast;
-}
-
SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
SelectionDAG &DAG,
MachineFrameInfo *MFI,
@@ -1758,7 +2099,8 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
// Add a chain value for each stack argument corresponding
for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
- UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
+ UE = DAG.getEntryNode().getNode()->use_end();
+ U != UE; ++U)
if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
if (FI->getIndex() < 0) {
@@ -1771,512 +2113,607 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
ArgChains.push_back(SDValue(L, 1));
}
- // Build a tokenfactor for all the chains.
- return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
- &ArgChains[0], ArgChains.size());
+ // Build a tokenfactor for all the chains.
+ return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
}
-static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
- switch (CC) {
- case ISD::SETEQ: return A64CC::EQ;
- case ISD::SETGT: return A64CC::GT;
- case ISD::SETGE: return A64CC::GE;
- case ISD::SETLT: return A64CC::LT;
- case ISD::SETLE: return A64CC::LE;
- case ISD::SETNE: return A64CC::NE;
- case ISD::SETUGT: return A64CC::HI;
- case ISD::SETUGE: return A64CC::HS;
- case ISD::SETULT: return A64CC::LO;
- case ISD::SETULE: return A64CC::LS;
- default: llvm_unreachable("Unexpected condition code");
- }
+bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
+ bool TailCallOpt) const {
+ return CallCC == CallingConv::Fast && TailCallOpt;
}
-bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
- // icmp is implemented using adds/subs immediate, which take an unsigned
- // 12-bit immediate, optionally shifted left by 12 bits.
-
- // Symmetric by using adds/subs
- if (Val < 0)
- Val = -Val;
-
- return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
+bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
+ return CallCC == CallingConv::Fast;
}
-SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, SDValue &A64cc,
- SelectionDAG &DAG, SDLoc &dl) const {
- if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
- int64_t C = 0;
- EVT VT = RHSC->getValueType(0);
- bool knownInvalid = false;
-
- // I'm not convinced the rest of LLVM handles these edge cases properly, but
- // we can at least get it right.
- if (isSignedIntSetCC(CC)) {
- C = RHSC->getSExtValue();
- } else if (RHSC->getZExtValue() > INT64_MAX) {
- // A 64-bit constant not representable by a signed 64-bit integer is far
- // too big to fit into a SUBS immediate anyway.
- knownInvalid = true;
- } else {
- C = RHSC->getZExtValue();
- }
+/// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
+/// and add input and output parameter nodes.
+SDValue
+AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &DL = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
- if (!knownInvalid && !isLegalICmpImmediate(C)) {
- // Constant does not fit, try adjusting it by one?
- switch (CC) {
- default: break;
- case ISD::SETLT:
- case ISD::SETGE:
- if (isLegalICmpImmediate(C-1)) {
- CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
- RHS = DAG.getConstant(C-1, VT);
- }
- break;
- case ISD::SETULT:
- case ISD::SETUGE:
- if (isLegalICmpImmediate(C-1)) {
- CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
- RHS = DAG.getConstant(C-1, VT);
- }
- break;
- case ISD::SETLE:
- case ISD::SETGT:
- if (isLegalICmpImmediate(C+1)) {
- CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
- RHS = DAG.getConstant(C+1, VT);
- }
- break;
- case ISD::SETULE:
- case ISD::SETUGT:
- if (isLegalICmpImmediate(C+1)) {
- CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
- RHS = DAG.getConstant(C+1, VT);
- }
- break;
- }
- }
- }
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
+ bool IsThisReturn = false;
- A64CC::CondCodes CondCode = IntCCToA64CC(CC);
- A64cc = DAG.getConstant(CondCode, MVT::i32);
- return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
- DAG.getCondCode(CC));
-}
+ AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
+ bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
+ bool IsSibCall = false;
-static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
- A64CC::CondCodes &Alternative) {
- A64CC::CondCodes CondCode = A64CC::Invalid;
- Alternative = A64CC::Invalid;
+ if (IsTailCall) {
+ // Check if it's really possible to do a tail call.
+ IsTailCall = isEligibleForTailCallOptimization(
+ Callee, CallConv, IsVarArg, IsStructRet,
+ MF.getFunction()->hasStructRetAttr(), Outs, OutVals, Ins, DAG);
+ if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
- switch (CC) {
- default: llvm_unreachable("Unknown FP condition!");
- case ISD::SETEQ:
- case ISD::SETOEQ: CondCode = A64CC::EQ; break;
- case ISD::SETGT:
- case ISD::SETOGT: CondCode = A64CC::GT; break;
- case ISD::SETGE:
- case ISD::SETOGE: CondCode = A64CC::GE; break;
- case ISD::SETOLT: CondCode = A64CC::MI; break;
- case ISD::SETOLE: CondCode = A64CC::LS; break;
- case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
- case ISD::SETO: CondCode = A64CC::VC; break;
- case ISD::SETUO: CondCode = A64CC::VS; break;
- case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
- case ISD::SETUGT: CondCode = A64CC::HI; break;
- case ISD::SETUGE: CondCode = A64CC::PL; break;
- case ISD::SETLT:
- case ISD::SETULT: CondCode = A64CC::LT; break;
- case ISD::SETLE:
- case ISD::SETULE: CondCode = A64CC::LE; break;
- case ISD::SETNE:
- case ISD::SETUNE: CondCode = A64CC::NE; break;
+ // A sibling call is one where we're under the usual C ABI and not planning
+ // to change that but can still do a tail call:
+ if (!TailCallOpt && IsTailCall)
+ IsSibCall = true;
+
+ if (IsTailCall)
+ ++NumTailCalls;
}
- return CondCode;
-}
-SDValue
-AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT PtrVT = getPointerTy();
- const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
- switch(getTargetMachine().getCodeModel()) {
- case CodeModel::Small:
- // The most efficient code is PC-relative anyway for the small memory model,
- // so we don't need to worry about relocation model.
- return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- DAG.getTargetBlockAddress(BA, PtrVT, 0,
- AArch64II::MO_NO_FLAG),
- DAG.getTargetBlockAddress(BA, PtrVT, 0,
- AArch64II::MO_LO12),
- DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
- case CodeModel::Large:
- return DAG.getNode(
- AArch64ISD::WrapperLarge, DL, PtrVT,
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
- default:
- llvm_unreachable("Only small and large code models supported now");
+ if (IsVarArg) {
+ // Handle fixed and variable vector arguments differently.
+ // Variable vector arguments always go into memory.
+ unsigned NumArgs = Outs.size();
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ MVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ CCAssignFn *AssignFn = CCAssignFnForCall(CallConv,
+ /*IsVarArg=*/ !Outs[i].IsFixed);
+ bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+ assert(!Res && "Call operand has unhandled type");
+ (void)Res;
+ }
+ } else {
+ // At this point, Outs[].VT may already be promoted to i32. To correctly
+ // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
+ // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
+ // Since AnalyzeCallOperands uses Ins[].VT for both ValVT and LocVT, here
+ // we use a special version of AnalyzeCallOperands to pass in ValVT and
+ // LocVT.
+ unsigned NumArgs = Outs.size();
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ MVT ValVT = Outs[i].VT;
+ // Get type of the original argument.
+ EVT ActualVT = getValueType(CLI.getArgs()[Outs[i].OrigArgIndex].Ty,
+ /*AllowUnknown*/ true);
+ MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ValVT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
+ if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
+ ValVT = MVT::i8;
+ else if (ActualMVT == MVT::i16)
+ ValVT = MVT::i16;
+
+ CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
+ bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, ArgFlags, CCInfo);
+ assert(!Res && "Call operand has unhandled type");
+ (void)Res;
+ }
}
-}
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
-// (BRCOND chain, val, dest)
-SDValue
-AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue TheBit = Op.getOperand(1);
- SDValue DestBB = Op.getOperand(2);
-
- // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
- // that as the consumer we are responsible for ignoring rubbish in higher
- // bits.
- TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
- DAG.getConstant(1, MVT::i32));
+ if (IsSibCall) {
+ // Since we're not changing the ABI to make this a tail call, the memory
+ // operands are already available in the caller's incoming argument space.
+ NumBytes = 0;
+ }
- SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
- DAG.getConstant(0, TheBit.getValueType()),
- DAG.getCondCode(ISD::SETNE));
+ // FPDiff is the byte offset of the call's argument area from the callee's.
+ // Stores to callee stack arguments will be placed in FixedStackSlots offset
+ // by this amount for a tail call. In a sibling call it must be 0 because the
+ // caller will deallocate the entire stack and the callee still expects its
+ // arguments to begin at SP+0. Completely unused for non-tail calls.
+ int FPDiff = 0;
- return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
- A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
- DestBB);
-}
+ if (IsTailCall && !IsSibCall) {
+ unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
-// (BR_CC chain, condcode, lhs, rhs, dest)
-SDValue
-AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- SDValue Chain = Op.getOperand(0);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
- SDValue LHS = Op.getOperand(2);
- SDValue RHS = Op.getOperand(3);
- SDValue DestBB = Op.getOperand(4);
+ // Since callee will pop argument stack as a tail call, we must keep the
+ // popped size 16-byte aligned.
+ NumBytes = RoundUpToAlignment(NumBytes, 16);
- if (LHS.getValueType() == MVT::f128) {
- // f128 comparisons are lowered to runtime calls by a routine which sets
- // LHS, RHS and CC appropriately for the rest of this function to continue.
- softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
+ // FPDiff will be negative if this tail call requires more space than we
+ // would automatically have in our incoming argument space. Positive if we
+ // can actually shrink the stack.
+ FPDiff = NumReusableBytes - NumBytes;
- // If softenSetCCOperands returned a scalar, we need to compare the result
- // against zero to select between true and false values.
- if (RHS.getNode() == 0) {
- RHS = DAG.getConstant(0, LHS.getValueType());
- CC = ISD::SETNE;
- }
+ // The stack pointer must be 16-byte aligned at all times it's used for a
+ // memory operation, which in practice means at *all* times and in
+ // particular across call boundaries. Therefore our own arguments started at
+ // a 16-byte aligned SP and the delta applied for the tail call should
+ // satisfy the same constraint.
+ assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
}
- if (LHS.getValueType().isInteger()) {
- SDValue A64cc;
+ // Adjust the stack pointer for the new arguments...
+ // These operations are automatically eliminated by the prolog/epilog pass
+ if (!IsSibCall)
+ Chain =
+ DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), DL);
- // Integers are handled in a separate function because the combinations of
- // immediates and tests can get hairy and we may want to fiddle things.
- SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
+ SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, getPointerTy());
- return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
- Chain, CmpOp, A64cc, DestBB);
- }
-
- // Note that some LLVM floating-point CondCodes can't be lowered to a single
- // conditional branch, hence FPCCToA64CC can set a second test, where either
- // passing is sufficient.
- A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
- CondCode = FPCCToA64CC(CC, Alternative);
- SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
- SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
- DAG.getCondCode(CC));
- SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
- Chain, SetCC, A64cc, DestBB);
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
- if (Alternative != A64CC::Invalid) {
- A64cc = DAG.getConstant(Alternative, MVT::i32);
- A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
- A64BR_CC, SetCC, A64cc, DestBB);
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
+ ++i, ++realArgIdx) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = OutVals[realArgIdx];
+ ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
- }
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ if (Outs[realArgIdx].ArgVT == MVT::i1) {
+ // AAPCS requires i1 to be zero-extended to 8-bits by the caller.
+ Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg);
+ }
+ Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::FPExt:
+ Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ }
- return A64BR_CC;
-}
+ if (VA.isRegLoc()) {
+ if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i64) {
+ assert(VA.getLocVT() == MVT::i64 &&
+ "unexpected calling convention register assignment");
+ assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&
+ "unexpected use of 'returned'");
+ IsThisReturn = true;
+ }
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ } else {
+ assert(VA.isMemLoc());
-SDValue
-AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
- RTLIB::Libcall Call) const {
- ArgListTy Args;
- ArgListEntry Entry;
- for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
- EVT ArgVT = Op.getOperand(i).getValueType();
- Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
- Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
- Entry.isSExt = false;
- Entry.isZExt = false;
- Args.push_back(Entry);
- }
- SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
+ SDValue DstAddr;
+ MachinePointerInfo DstInfo;
- Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
+ // FIXME: This works on big-endian for composite byvals, which are the
+ // common case. It should also work for fundamental types too.
+ uint32_t BEAlign = 0;
+ unsigned OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
+ : VA.getLocVT().getSizeInBits();
+ OpSize = (OpSize + 7) / 8;
+ if (!Subtarget->isLittleEndian() && !Flags.isByVal()) {
+ if (OpSize < 8)
+ BEAlign = 8 - OpSize;
+ }
+ unsigned LocMemOffset = VA.getLocMemOffset();
+ int32_t Offset = LocMemOffset + BEAlign;
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
+
+ if (IsTailCall) {
+ Offset = Offset + FPDiff;
+ int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
+
+ DstAddr = DAG.getFrameIndex(FI, getPointerTy());
+ DstInfo = MachinePointerInfo::getFixedStack(FI);
+
+ // Make sure any stack arguments overlapping with where we're storing
+ // are loaded before this eventual operation. Otherwise they'll be
+ // clobbered.
+ Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
+ } else {
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset);
- // By default, the input chain to this libcall is the entry node of the
- // function. If the libcall is going to be emitted as a tail call then
- // isUsedByReturnOnly will change it to the right chain if the return
- // node which is being folded has a non-entry input chain.
- SDValue InChain = DAG.getEntryNode();
+ DstAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
+ DstInfo = MachinePointerInfo::getStack(LocMemOffset);
+ }
- // isTailCall may be true since the callee does not reference caller stack
- // frame. Check if it's in the right position.
- SDValue TCChain = InChain;
- bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
- if (isTailCall)
- InChain = TCChain;
+ if (Outs[i].Flags.isByVal()) {
+ SDValue SizeNode =
+ DAG.getConstant(Outs[i].Flags.getByValSize(), MVT::i64);
+ SDValue Cpy = DAG.getMemcpy(
+ Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
+ /*isVolatile = */ false,
+ /*alwaysInline = */ false, DstInfo, MachinePointerInfo());
- TargetLowering::
- CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
- 0, getLibcallCallingConv(Call), isTailCall,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, SDLoc(Op));
- std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+ MemOpChains.push_back(Cpy);
+ } else {
+ // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already
+ // promoted to a legal register type i32, we should truncate Arg back to
+ // i1/i8/i16.
+ if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 ||
+ VA.getValVT() == MVT::i16)
+ Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
+
+ SDValue Store =
+ DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, false, false, 0);
+ MemOpChains.push_back(Store);
+ }
+ }
+ }
- if (!CallInfo.second.getNode())
- // It's a tailcall, return the chain (which is the DAG root).
- return DAG.getRoot();
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
- return CallInfo.first;
-}
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into the appropriate regs.
+ SDValue InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
-SDValue
-AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
- if (Op.getOperand(0).getValueType() != MVT::f128) {
- // It's legal except when f128 is involved
- return Op;
+ // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
+ // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
+ // node so that legalize doesn't hack it.
+ if (getTargetMachine().getCodeModel() == CodeModel::Large &&
+ Subtarget->isTargetMachO()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = G->getGlobal();
+ bool InternalLinkage = GV->hasInternalLinkage();
+ if (InternalLinkage)
+ Callee = DAG.getTargetGlobalAddress(GV, DL, getPointerTy(), 0, 0);
+ else {
+ Callee = DAG.getTargetGlobalAddress(GV, DL, getPointerTy(), 0,
+ AArch64II::MO_GOT);
+ Callee = DAG.getNode(AArch64ISD::LOADgot, DL, getPointerTy(), Callee);
+ }
+ } else if (ExternalSymbolSDNode *S =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const char *Sym = S->getSymbol();
+ Callee =
+ DAG.getTargetExternalSymbol(Sym, getPointerTy(), AArch64II::MO_GOT);
+ Callee = DAG.getNode(AArch64ISD::LOADgot, DL, getPointerTy(), Callee);
+ }
+ } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = G->getGlobal();
+ Callee = DAG.getTargetGlobalAddress(GV, DL, getPointerTy(), 0, 0);
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const char *Sym = S->getSymbol();
+ Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), 0);
}
- RTLIB::Libcall LC;
- LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
+ // We don't usually want to end the call-sequence here because we would tidy
+ // the frame up *after* the call, however in the ABI-changing tail-call case
+ // we've carefully laid out the parameters so that when sp is reset they'll be
+ // in the correct location.
+ if (IsTailCall && !IsSibCall) {
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(0, true), InFlag, DL);
+ InFlag = Chain.getValue(1);
+ }
- SDValue SrcVal = Op.getOperand(0);
- return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
- /*isSigned*/ false, SDLoc(Op)).first;
-}
+ std::vector<SDValue> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
-SDValue
-AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
- assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
+ if (IsTailCall) {
+ // Each tail call may have to adjust the stack by a different amount, so
+ // this information must travel along with the operation for eventual
+ // consumption by emitEpilogue.
+ Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
+ }
- RTLIB::Libcall LC;
- LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
+ // Add argument registers to the end of the list so that they are known live
+ // into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
- return LowerF128ToCall(Op, DAG, LC);
-}
+ // Add a register mask operand representing the call-preserved registers.
+ const uint32_t *Mask;
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const AArch64RegisterInfo *ARI =
+ static_cast<const AArch64RegisterInfo *>(TRI);
+ if (IsThisReturn) {
+ // For 'this' returns, use the X0-preserving mask if applicable
+ Mask = ARI->getThisReturnPreservedMask(CallConv);
+ if (!Mask) {
+ IsThisReturn = false;
+ Mask = ARI->getCallPreservedMask(CallConv);
+ }
+ } else
+ Mask = ARI->getCallPreservedMask(CallConv);
-SDValue
-AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
- bool IsSigned) const {
- if (Op.getOperand(0).getValueType() != MVT::f128) {
- // It's legal except when f128 is involved
- return Op;
- }
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
- RTLIB::Libcall LC;
- if (IsSigned)
- LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
- else
- LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
+ if (InFlag.getNode())
+ Ops.push_back(InFlag);
- return LowerF128ToCall(Op, DAG, LC);
-}
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
-SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MFI->setReturnAddressIsTaken(true);
+ // If we're doing a tall call, use a TC_RETURN here rather than an
+ // actual call instruction.
+ if (IsTailCall)
+ return DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops);
- EVT VT = Op.getValueType();
- SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- if (Depth) {
- SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset = DAG.getConstant(8, MVT::i64);
- return DAG.getLoad(VT, dl, DAG.getEntryNode(),
- DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
- }
+ // Returns a chain and a flag for retval copy to use.
+ Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
+ InFlag = Chain.getValue(1);
- // Return X30, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
- return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
-}
+ uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
+ ? RoundUpToAlignment(NumBytes, 16)
+ : 0;
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(CalleePopBytes, true),
+ InFlag, DL);
+ if (!Ins.empty())
+ InFlag = Chain.getValue(1);
-SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
- const {
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- MFI->setFrameAddressIsTaken(true);
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
+ InVals, IsThisReturn,
+ IsThisReturn ? OutVals[0] : SDValue());
+}
- EVT VT = Op.getValueType();
- SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- unsigned FrameReg = AArch64::X29;
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
- while (Depth--)
- FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(),
- false, false, false, 0);
- return FrameAddr;
+bool AArch64TargetLowering::CanLowerReturn(
+ CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
+ CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
+ ? RetCC_AArch64_WebKit_JS
+ : RetCC_AArch64_AAPCS;
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, RetCC);
}
SDValue
-AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
- SelectionDAG &DAG) const {
- assert(getTargetMachine().getCodeModel() == CodeModel::Large);
- assert(getTargetMachine().getRelocationModel() == Reloc::Static);
+AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const {
+ CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
+ ? RetCC_AArch64_WebKit_JS
+ : RetCC_AArch64_AAPCS;
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), RVLocs, *DAG.getContext());
+ CCInfo.AnalyzeReturn(Outs, RetCC);
- EVT PtrVT = getPointerTy();
- SDLoc dl(Op);
- const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
- const GlobalValue *GV = GN->getGlobal();
+ // Copy the result values into the output registers.
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+ for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size();
+ ++i, ++realRVLocIdx) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+ SDValue Arg = OutVals[realRVLocIdx];
- SDValue GlobalAddr = DAG.getNode(
- AArch64ISD::WrapperLarge, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
- DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
- DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
- DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ if (Outs[i].ArgVT == MVT::i1) {
+ // AAPCS requires i1 to be zero-extended to i8 by the producer of the
+ // value. This is strictly redundant on Darwin (which uses "zeroext
+ // i1"), but will be optimised out before ISel.
+ Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
+ }
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
+ break;
+ }
- if (GN->getOffset() != 0)
- return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
- DAG.getConstant(GN->getOffset(), PtrVT));
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the flag if we have it.
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
- return GlobalAddr;
+ return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps);
}
-SDValue
-AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
- SelectionDAG &DAG) const {
- assert(getTargetMachine().getCodeModel() == CodeModel::Small);
+//===----------------------------------------------------------------------===//
+// Other Lowering Code
+//===----------------------------------------------------------------------===//
+SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
- SDLoc dl(Op);
- const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
- const GlobalValue *GV = GN->getGlobal();
- unsigned Alignment = GV->getAlignment();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
- if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
- // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
- // to zero when they remain undefined. In PIC mode the GOT can take care of
- // this, but in absolute mode we use a constant pool load.
- SDValue PoolAddr;
- PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
- DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
- AArch64II::MO_NO_FLAG),
- DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
- AArch64II::MO_LO12),
- DAG.getConstant(8, MVT::i32));
- SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
- MachinePointerInfo::getConstantPool(),
- /*isVolatile=*/ false,
- /*isNonTemporal=*/ true,
- /*isInvariant=*/ true, 8);
- if (GN->getOffset() != 0)
- return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
- DAG.getConstant(GN->getOffset(), PtrVT));
-
- return GlobalAddr;
- }
-
- if (Alignment == 0) {
- const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
- if (GVPtrTy->getElementType()->isSized()) {
- Alignment
- = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
- } else {
- // Be conservative if we can't guess, not that it really matters:
- // functions and labels aren't valid for loads, and the methods used to
- // actually calculate an address work with any alignment.
- Alignment = 1;
- }
+ SDLoc DL(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ unsigned char OpFlags =
+ Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
+
+ assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
+ "unexpected offset in global node");
+
+ // This also catched the large code model case for Darwin.
+ if ((OpFlags & AArch64II::MO_GOT) != 0) {
+ SDValue GotAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
+ // FIXME: Once remat is capable of dealing with instructions with register
+ // operands, expand this into two nodes instead of using a wrapper node.
+ return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr);
}
- unsigned char HiFixup, LoFixup;
- bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
-
- if (UseGOT) {
- HiFixup = AArch64II::MO_GOT;
- LoFixup = AArch64II::MO_GOT_LO12;
- Alignment = 8;
+ if (getTargetMachine().getCodeModel() == CodeModel::Large) {
+ const unsigned char MO_NC = AArch64II::MO_NC;
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, DL, PtrVT,
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G3),
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G2 | MO_NC),
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G1 | MO_NC),
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G0 | MO_NC));
} else {
- HiFixup = AArch64II::MO_NO_FLAG;
- LoFixup = AArch64II::MO_LO12;
+ // Use ADRP/ADD or ADRP/LDR for everything else: the small model on ELF and
+ // the only correct model on Darwin.
+ SDValue Hi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
+ OpFlags | AArch64II::MO_PAGE);
+ unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC;
+ SDValue Lo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, LoFlags);
+
+ SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
+ return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
}
+}
- // AArch64's small model demands the following sequence:
- // ADRP x0, somewhere
- // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
- SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- HiFixup),
- DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
- LoFixup),
- DAG.getConstant(Alignment, MVT::i32));
+/// \brief Convert a TLS address reference into the correct sequence of loads
+/// and calls to compute the variable's address (for Darwin, currently) and
+/// return an SDValue containing the final node.
- if (UseGOT) {
- GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
- GlobalRef);
- }
+/// Darwin only has one TLS scheme which must be capable of dealing with the
+/// fully general situation, in the worst case. This means:
+/// + "extern __thread" declaration.
+/// + Defined in a possibly unknown dynamic library.
+///
+/// The general system is that each __thread variable has a [3 x i64] descriptor
+/// which contains information used by the runtime to calculate the address. The
+/// only part of this the compiler needs to know about is the first xword, which
+/// contains a function pointer that must be called with the address of the
+/// entire descriptor in "x0".
+///
+/// Since this descriptor may be in a different unit, in general even the
+/// descriptor must be accessed via an indirect load. The "ideal" code sequence
+/// is:
+/// adrp x0, _var@TLVPPAGE
+/// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor
+/// ldr x1, [x0] ; x1 contains 1st entry of descriptor,
+/// ; the function pointer
+/// blr x1 ; Uses descriptor address in x0
+/// ; Address of _var is now in x0.
+///
+/// If the address of _var's descriptor *is* known to the linker, then it can
+/// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
+/// a slight efficiency gain.
+SDValue
+AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin");
- if (GN->getOffset() != 0)
- return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
- DAG.getConstant(GN->getOffset(), PtrVT));
+ SDLoc DL(Op);
+ MVT PtrVT = getPointerTy();
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- return GlobalRef;
-}
+ SDValue TLVPAddr =
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
+ SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr);
-SDValue
-AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
- SelectionDAG &DAG) const {
- // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
- // we make those distinctions here.
-
- switch (getTargetMachine().getCodeModel()) {
- case CodeModel::Small:
- return LowerGlobalAddressELFSmall(Op, DAG);
- case CodeModel::Large:
- return LowerGlobalAddressELFLarge(Op, DAG);
- default:
- llvm_unreachable("Only small and large code models supported now");
- }
+ // The first entry in the descriptor is a function pointer that we must call
+ // to obtain the address of the variable.
+ SDValue Chain = DAG.getEntryNode();
+ SDValue FuncTLVGet =
+ DAG.getLoad(MVT::i64, DL, Chain, DescAddr, MachinePointerInfo::getGOT(),
+ false, true, true, 8);
+ Chain = FuncTLVGet.getValue(1);
+
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setAdjustsStack(true);
+
+ // TLS calls preserve all registers except those that absolutely must be
+ // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
+ // silly).
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const AArch64RegisterInfo *ARI =
+ static_cast<const AArch64RegisterInfo *>(TRI);
+ const uint32_t *Mask = ARI->getTLSCallPreservedMask();
+
+ // Finally, we can make the call. This is just a degenerate version of a
+ // normal AArch64 call node: x0 takes the address of the descriptor, and
+ // returns the address of the variable in this thread.
+ Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue());
+ Chain =
+ DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
+ Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64),
+ DAG.getRegisterMask(Mask), Chain.getValue(1));
+ return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1));
}
-SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
- SDValue DescAddr,
- SDLoc DL,
- SelectionDAG &DAG) const {
+/// When accessing thread-local variables under either the general-dynamic or
+/// local-dynamic system, we make a "TLS-descriptor" call. The variable will
+/// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
+/// is a function pointer to carry out the resolution. This function takes the
+/// address of the descriptor in X0 and returns the TPIDR_EL0 offset in X0. All
+/// other registers (except LR, NZCV) are preserved.
+///
+/// Thus, the ideal call sequence on AArch64 is:
+///
+/// adrp x0, :tlsdesc:thread_var
+/// ldr x8, [x0, :tlsdesc_lo12:thread_var]
+/// add x0, x0, :tlsdesc_lo12:thread_var
+/// .tlsdesccall thread_var
+/// blr x8
+/// (TPIDR_EL0 offset now in x0).
+///
+/// The ".tlsdesccall" directive instructs the assembler to insert a particular
+/// relocation to help the linker relax this sequence if it turns out to be too
+/// conservative.
+///
+/// FIXME: we currently produce an extra, duplicated, ADRP instruction, but this
+/// is harmless.
+SDValue AArch64TargetLowering::LowerELFTLSDescCall(SDValue SymAddr,
+ SDValue DescAddr, SDLoc DL,
+ SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
// The function we need to call is simply the first entry in the GOT for this
// descriptor, load it in preparation.
- SDValue Func, Chain;
- Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
- DescAddr);
+ SDValue Func = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, SymAddr);
+
+ // TLS calls preserve all registers except those that absolutely must be
+ // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
+ // silly).
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const AArch64RegisterInfo *ARI =
+ static_cast<const AArch64RegisterInfo *>(TRI);
+ const uint32_t *Mask = ARI->getTLSCallPreservedMask();
// The function takes only one argument: the address of the descriptor itself
// in X0.
- SDValue Glue;
+ SDValue Glue, Chain;
Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
Glue = Chain.getValue(1);
- // Finally, there's a special calling-convention which means that the lookup
- // must preserve all registers (except X0, obviously).
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
- const AArch64RegisterInfo *A64RI
- = static_cast<const AArch64RegisterInfo *>(TRI);
- const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
-
// We're now ready to populate the argument list, as with a normal call:
- std::vector<SDValue> Ops;
+ SmallVector<SDValue, 6> Ops;
Ops.push_back(Chain);
Ops.push_back(Func);
Ops.push_back(SymAddr);
@@ -2285,22 +2722,18 @@ SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
Ops.push_back(Glue);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
- Ops.size());
+ Chain = DAG.getNode(AArch64ISD::TLSDESC_CALL, DL, NodeTys, Ops);
Glue = Chain.getValue(1);
- // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
- // back to the generic handling code.
return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
}
SDValue
-AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) const {
- assert(getSubtarget()->isTargetELF() &&
- "TLS not implemented for non-ELF targets");
- assert(getTargetMachine().getCodeModel() == CodeModel::Small
- && "TLS only supported in small memory model");
+AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetELF() && "This function expects an ELF target");
+ assert(getTargetMachine().getCodeModel() == CodeModel::Small &&
+ "ELF TLS only supported in small memory model");
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
@@ -2312,39 +2745,22 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
- if (Model == TLSModel::InitialExec) {
- TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
- AArch64II::MO_GOTTPREL),
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
- AArch64II::MO_GOTTPREL_LO12),
- DAG.getConstant(8, MVT::i32));
- TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
- TPOff);
- } else if (Model == TLSModel::LocalExec) {
- SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
- AArch64II::MO_TPREL_G1);
- SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
- AArch64II::MO_TPREL_G0_NC);
-
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
- DAG.getTargetConstant(1, MVT::i32)), 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
- TPOff, LoVar,
- DAG.getTargetConstant(0, MVT::i32)), 0);
- } else if (Model == TLSModel::GeneralDynamic) {
- // Accesses used in this sequence go via the TLS descriptor which lives in
- // the GOT. Prepare an address we can use to handle this.
- SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
- AArch64II::MO_TLSDESC);
- SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
- AArch64II::MO_TLSDESC_LO12);
- SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- HiDesc, LoDesc,
- DAG.getConstant(8, MVT::i32));
- SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
-
- TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
+ if (Model == TLSModel::LocalExec) {
+ SDValue HiVar = DAG.getTargetGlobalAddress(
+ GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
+ SDValue LoVar = DAG.getTargetGlobalAddress(
+ GV, DL, PtrVT, 0,
+ AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
+
+ TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
+ DAG.getTargetConstant(16, MVT::i32)),
+ 0);
+ TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
+ DAG.getTargetConstant(0, MVT::i32)),
+ 0);
+ } else if (Model == TLSModel::InitialExec) {
+ TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
+ TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff);
} else if (Model == TLSModel::LocalDynamic) {
// Local-dynamic accesses proceed in two phases. A general-dynamic TLS
// descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
@@ -2352,449 +2768,758 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
// calculation.
// These accesses will need deduplicating if there's more than one.
- AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
- .getInfo<AArch64MachineFunctionInfo>();
+ AArch64FunctionInfo *MFI =
+ DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
MFI->incNumLocalDynamicTLSAccesses();
-
- // Get the location of _TLS_MODULE_BASE_:
- SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
- AArch64II::MO_TLSDESC);
- SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
- AArch64II::MO_TLSDESC_LO12);
- SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- HiDesc, LoDesc,
- DAG.getConstant(8, MVT::i32));
- SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
-
- ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
-
- // Get the variable's offset from _TLS_MODULE_BASE_
- SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
- AArch64II::MO_DTPREL_G1);
- SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
- AArch64II::MO_DTPREL_G0_NC);
-
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
- DAG.getTargetConstant(0, MVT::i32)), 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
- TPOff, LoVar,
- DAG.getTargetConstant(0, MVT::i32)), 0);
+ // Accesses used in this sequence go via the TLS descriptor which lives in
+ // the GOT. Prepare an address we can use to handle this.
+ SDValue HiDesc = DAG.getTargetExternalSymbol(
+ "_TLS_MODULE_BASE_", PtrVT, AArch64II::MO_TLS | AArch64II::MO_PAGE);
+ SDValue LoDesc = DAG.getTargetExternalSymbol(
+ "_TLS_MODULE_BASE_", PtrVT,
+ AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+
+ // First argument to the descriptor call is the address of the descriptor
+ // itself.
+ SDValue DescAddr = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, HiDesc);
+ DescAddr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, DescAddr, LoDesc);
+
+ // The call needs a relocation too for linker relaxation. It doesn't make
+ // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
+ // the address.
+ SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
+ AArch64II::MO_TLS);
+
+ // Now we can calculate the offset from TPIDR_EL0 to this module's
+ // thread-local area.
+ TPOff = LowerELFTLSDescCall(SymAddr, DescAddr, DL, DAG);
+
+ // Now use :dtprel_whatever: operations to calculate this variable's offset
+ // in its thread-storage area.
+ SDValue HiVar = DAG.getTargetGlobalAddress(
+ GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
+ SDValue LoVar = DAG.getTargetGlobalAddress(
+ GV, DL, MVT::i64, 0,
+ AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
+
+ SDValue DTPOff =
+ SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
+ DAG.getTargetConstant(16, MVT::i32)),
+ 0);
+ DTPOff =
+ SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, DTPOff, LoVar,
+ DAG.getTargetConstant(0, MVT::i32)),
+ 0);
+
+ TPOff = DAG.getNode(ISD::ADD, DL, PtrVT, TPOff, DTPOff);
+ } else if (Model == TLSModel::GeneralDynamic) {
+ // Accesses used in this sequence go via the TLS descriptor which lives in
+ // the GOT. Prepare an address we can use to handle this.
+ SDValue HiDesc = DAG.getTargetGlobalAddress(
+ GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGE);
+ SDValue LoDesc = DAG.getTargetGlobalAddress(
+ GV, DL, PtrVT, 0,
+ AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+
+ // First argument to the descriptor call is the address of the descriptor
+ // itself.
+ SDValue DescAddr = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, HiDesc);
+ DescAddr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, DescAddr, LoDesc);
+
+ // The call needs a relocation too for linker relaxation. It doesn't make
+ // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
+ // the address.
+ SDValue SymAddr =
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
+
+ // Finally we can make a call to calculate the offset from tpidr_el0.
+ TPOff = LowerELFTLSDescCall(SymAddr, DescAddr, DL, DAG);
} else
- llvm_unreachable("Unsupported TLS access model");
-
+ llvm_unreachable("Unsupported ELF TLS access model");
return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
}
-SDValue
-AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
- bool IsSigned) const {
- if (Op.getValueType() != MVT::f128) {
- // Legal for everything except f128.
- return Op;
- }
+SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (Subtarget->isTargetDarwin())
+ return LowerDarwinGlobalTLSAddress(Op, DAG);
+ else if (Subtarget->isTargetELF())
+ return LowerELFGlobalTLSAddress(Op, DAG);
- RTLIB::Libcall LC;
- if (IsSigned)
- LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
- else
- LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
-
- return LowerF128ToCall(Op, DAG, LC);
-}
-
-
-SDValue
-AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- SDLoc dl(JT);
- EVT PtrVT = getPointerTy();
-
- // When compiling PIC, jump tables get put in the code section so a static
- // relocation-style is acceptable for both cases.
- switch (getTargetMachine().getCodeModel()) {
- case CodeModel::Small:
- return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
- AArch64II::MO_LO12),
- DAG.getConstant(1, MVT::i32));
- case CodeModel::Large:
- return DAG.getNode(
- AArch64ISD::WrapperLarge, dl, PtrVT,
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
- default:
- llvm_unreachable("Only small and large code models supported now");
- }
+ llvm_unreachable("Unexpected platform trying to use TLS");
}
-
-// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
-SDValue
-AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
+ SDValue LHS = Op.getOperand(2);
+ SDValue RHS = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
SDLoc dl(Op);
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue IfTrue = Op.getOperand(2);
- SDValue IfFalse = Op.getOperand(3);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+ // Handle f128 first, since lowering it will result in comparing the return
+ // value of a libcall against zero, which is just what the rest of LowerBR_CC
+ // is expecting to deal with.
if (LHS.getValueType() == MVT::f128) {
- // f128 comparisons are lowered to libcalls, but slot in nicely here
- // afterwards.
softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (RHS.getNode() == 0) {
+ if (!RHS.getNode()) {
RHS = DAG.getConstant(0, LHS.getValueType());
CC = ISD::SETNE;
}
}
- if (LHS.getValueType().isInteger()) {
- SDValue A64cc;
+ // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
+ // instruction.
+ unsigned Opc = LHS.getOpcode();
+ if (LHS.getResNo() == 1 && isa<ConstantSDNode>(RHS) &&
+ cast<ConstantSDNode>(RHS)->isOne() &&
+ (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
+ Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)) {
+ assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
+ "Unexpected condition code.");
+ // Only lower legal XALUO ops.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
+ return SDValue();
+
+ // The actual operation with overflow check.
+ AArch64CC::CondCode OFCC;
+ SDValue Value, Overflow;
+ std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG);
- // Integers are handled in a separate function because the combinations of
- // immediates and tests can get hairy and we may want to fiddle things.
- SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
+ if (CC == ISD::SETNE)
+ OFCC = getInvertedCondCode(OFCC);
+ SDValue CCVal = DAG.getConstant(OFCC, MVT::i32);
- return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
- CmpOp, IfTrue, IfFalse, A64cc);
+ return DAG.getNode(AArch64ISD::BRCOND, SDLoc(LHS), MVT::Other, Chain, Dest,
+ CCVal, Overflow);
}
- // Note that some LLVM floating-point CondCodes can't be lowered to a single
- // conditional branch, hence FPCCToA64CC can set a second test, where either
- // passing is sufficient.
- A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
- CondCode = FPCCToA64CC(CC, Alternative);
- SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
- SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
- DAG.getCondCode(CC));
- SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
- Op.getValueType(),
- SetCC, IfTrue, IfFalse, A64cc);
+ if (LHS.getValueType().isInteger()) {
+ assert((LHS.getValueType() == RHS.getValueType()) &&
+ (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
+
+ // If the RHS of the comparison is zero, we can potentially fold this
+ // to a specialized branch.
+ const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
+ if (RHSC && RHSC->getZExtValue() == 0) {
+ if (CC == ISD::SETEQ) {
+ // See if we can use a TBZ to fold in an AND as well.
+ // TBZ has a smaller branch displacement than CBZ. If the offset is
+ // out of bounds, a late MI-layer pass rewrites branches.
+ // 403.gcc is an example that hits this case.
+ if (LHS.getOpcode() == ISD::AND &&
+ isa<ConstantSDNode>(LHS.getOperand(1)) &&
+ isPowerOf2_64(LHS.getConstantOperandVal(1))) {
+ SDValue Test = LHS.getOperand(0);
+ uint64_t Mask = LHS.getConstantOperandVal(1);
+
+ // TBZ only operates on i64's, but the ext should be free.
+ if (Test.getValueType() == MVT::i32)
+ Test = DAG.getAnyExtOrTrunc(Test, dl, MVT::i64);
+
+ return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
+ DAG.getConstant(Log2_64(Mask), MVT::i64), Dest);
+ }
- if (Alternative != A64CC::Invalid) {
- A64cc = DAG.getConstant(Alternative, MVT::i32);
- A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
- SetCC, IfTrue, A64SELECT_CC, A64cc);
+ return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
+ } else if (CC == ISD::SETNE) {
+ // See if we can use a TBZ to fold in an AND as well.
+ // TBZ has a smaller branch displacement than CBZ. If the offset is
+ // out of bounds, a late MI-layer pass rewrites branches.
+ // 403.gcc is an example that hits this case.
+ if (LHS.getOpcode() == ISD::AND &&
+ isa<ConstantSDNode>(LHS.getOperand(1)) &&
+ isPowerOf2_64(LHS.getConstantOperandVal(1))) {
+ SDValue Test = LHS.getOperand(0);
+ uint64_t Mask = LHS.getConstantOperandVal(1);
+
+ // TBNZ only operates on i64's, but the ext should be free.
+ if (Test.getValueType() == MVT::i32)
+ Test = DAG.getAnyExtOrTrunc(Test, dl, MVT::i64);
+
+ return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
+ DAG.getConstant(Log2_64(Mask), MVT::i64), Dest);
+ }
+
+ return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
+ }
+ }
+
+ SDValue CCVal;
+ SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
+ return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
+ Cmp);
+ }
+ assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
+
+ // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
+ // clean. Some of them require two branches to implement.
+ SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
+ AArch64CC::CondCode CC1, CC2;
+ changeFPCCToAArch64CC(CC, CC1, CC2);
+ SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue BR1 =
+ DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
+ if (CC2 != AArch64CC::AL) {
+ SDValue CC2Val = DAG.getConstant(CC2, MVT::i32);
+ return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
+ Cmp);
}
- return A64SELECT_CC;
+ return BR1;
}
-// (SELECT testbit, iftrue, iffalse)
-SDValue
-AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- SDValue TheBit = Op.getOperand(0);
- SDValue IfTrue = Op.getOperand(1);
- SDValue IfFalse = Op.getOperand(2);
+SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ SDValue In1 = Op.getOperand(0);
+ SDValue In2 = Op.getOperand(1);
+ EVT SrcVT = In2.getValueType();
+ if (SrcVT != VT) {
+ if (SrcVT == MVT::f32 && VT == MVT::f64)
+ In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
+ else if (SrcVT == MVT::f64 && VT == MVT::f32)
+ In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0));
+ else
+ // FIXME: Src type is different, bail out for now. Can VT really be a
+ // vector type?
+ return SDValue();
+ }
+
+ EVT VecVT;
+ EVT EltVT;
+ SDValue EltMask, VecVal1, VecVal2;
+ if (VT == MVT::f32 || VT == MVT::v2f32 || VT == MVT::v4f32) {
+ EltVT = MVT::i32;
+ VecVT = MVT::v4i32;
+ EltMask = DAG.getConstant(0x80000000ULL, EltVT);
+
+ if (!VT.isVector()) {
+ VecVal1 = DAG.getTargetInsertSubreg(AArch64::ssub, DL, VecVT,
+ DAG.getUNDEF(VecVT), In1);
+ VecVal2 = DAG.getTargetInsertSubreg(AArch64::ssub, DL, VecVT,
+ DAG.getUNDEF(VecVT), In2);
+ } else {
+ VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1);
+ VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2);
+ }
+ } else if (VT == MVT::f64 || VT == MVT::v2f64) {
+ EltVT = MVT::i64;
+ VecVT = MVT::v2i64;
+
+ // We want to materialize a mask with the the high bit set, but the AdvSIMD
+ // immediate moves cannot materialize that in a single instruction for
+ // 64-bit elements. Instead, materialize zero and then negate it.
+ EltMask = DAG.getConstant(0, EltVT);
+
+ if (!VT.isVector()) {
+ VecVal1 = DAG.getTargetInsertSubreg(AArch64::dsub, DL, VecVT,
+ DAG.getUNDEF(VecVT), In1);
+ VecVal2 = DAG.getTargetInsertSubreg(AArch64::dsub, DL, VecVT,
+ DAG.getUNDEF(VecVT), In2);
+ } else {
+ VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1);
+ VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2);
+ }
+ } else {
+ llvm_unreachable("Invalid type for copysign!");
+ }
+
+ std::vector<SDValue> BuildVectorOps;
+ for (unsigned i = 0; i < VecVT.getVectorNumElements(); ++i)
+ BuildVectorOps.push_back(EltMask);
+
+ SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, BuildVectorOps);
- // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
- // that as the consumer we are responsible for ignoring rubbish in higher
- // bits.
- TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
- DAG.getConstant(1, MVT::i32));
- SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
- DAG.getConstant(0, TheBit.getValueType()),
- DAG.getCondCode(ISD::SETNE));
+ // If we couldn't materialize the mask above, then the mask vector will be
+ // the zero vector, and we need to negate it here.
+ if (VT == MVT::f64 || VT == MVT::v2f64) {
+ BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, BuildVec);
+ BuildVec = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, BuildVec);
+ BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, BuildVec);
+ }
+
+ SDValue Sel =
+ DAG.getNode(AArch64ISD::BIT, DL, VecVT, VecVal1, VecVal2, BuildVec);
- return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
- A64CMP, IfTrue, IfFalse,
- DAG.getConstant(A64CC::NE, MVT::i32));
+ if (VT == MVT::f32)
+ return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, Sel);
+ else if (VT == MVT::f64)
+ return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, Sel);
+ else
+ return DAG.getNode(ISD::BITCAST, DL, VT, Sel);
}
-static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
+SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
+ if (DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::NoImplicitFloat))
+ return SDValue();
+
+ // While there is no integer popcount instruction, it can
+ // be more efficiently lowered to the following sequence that uses
+ // AdvSIMD registers/instructions as long as the copies to/from
+ // the AdvSIMD registers are cheap.
+ // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd
+ // CNT V0.8B, V0.8B // 8xbyte pop-counts
+ // ADDV B0, V0.8B // sum 8xbyte pop-counts
+ // UMOV X0, V0.B[0] // copy byte result back to integer reg
+ SDValue Val = Op.getOperand(0);
SDLoc DL(Op);
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
EVT VT = Op.getValueType();
- bool Invert = false;
- SDValue Op0, Op1;
- unsigned Opcode;
+ SDValue ZeroVec = DAG.getUNDEF(MVT::v8i8);
- if (LHS.getValueType().isInteger()) {
+ SDValue VecVal;
+ if (VT == MVT::i32) {
+ VecVal = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
+ VecVal = DAG.getTargetInsertSubreg(AArch64::ssub, DL, MVT::v8i8, ZeroVec,
+ VecVal);
+ } else {
+ VecVal = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
+ }
- // Attempt to use Vector Integer Compare Mask Test instruction.
- // TST = icmp ne (and (op0, op1), zero).
- if (CC == ISD::SETNE) {
- if (((LHS.getOpcode() == ISD::AND) &&
- ISD::isBuildVectorAllZeros(RHS.getNode())) ||
- ((RHS.getOpcode() == ISD::AND) &&
- ISD::isBuildVectorAllZeros(LHS.getNode()))) {
-
- SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
- SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
- SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
- return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
- }
- }
+ SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, VecVal);
+ SDValue UaddLV = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+ DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, MVT::i32), CtPop);
- // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
- // Note: Compare against Zero does not support unsigned predicates.
- if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
- ISD::isBuildVectorAllZeros(LHS.getNode())) &&
- !isUnsignedIntSetCC(CC)) {
-
- // If LHS is the zero value, swap operands and CondCode.
- if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
- CC = getSetCCSwappedOperands(CC);
- Op0 = RHS;
- } else
- Op0 = LHS;
-
- // Ensure valid CondCode for Compare Mask against Zero instruction:
- // EQ, GE, GT, LE, LT.
- if (ISD::SETNE == CC) {
- Invert = true;
- CC = ISD::SETEQ;
- }
+ if (VT == MVT::i64)
+ UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
+ return UaddLV;
+}
- // Using constant type to differentiate integer and FP compares with zero.
- Op1 = DAG.getConstant(0, MVT::i32);
- Opcode = AArch64ISD::NEON_CMPZ;
+SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- } else {
- // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
- // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
- bool Swap = false;
- switch (CC) {
- default:
- llvm_unreachable("Illegal integer comparison.");
- case ISD::SETEQ:
- case ISD::SETGT:
- case ISD::SETGE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- break;
- case ISD::SETNE:
- Invert = true;
- CC = ISD::SETEQ;
- break;
- case ISD::SETULT:
- case ISD::SETULE:
- case ISD::SETLT:
- case ISD::SETLE:
- Swap = true;
- CC = getSetCCSwappedOperands(CC);
- }
+ if (Op.getValueType().isVector())
+ return LowerVSETCC(Op, DAG);
- if (Swap)
- std::swap(LHS, RHS);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDLoc dl(Op);
- Opcode = AArch64ISD::NEON_CMP;
- Op0 = LHS;
- Op1 = RHS;
- }
+ // We chose ZeroOrOneBooleanContents, so use zero and one.
+ EVT VT = Op.getValueType();
+ SDValue TVal = DAG.getConstant(1, VT);
+ SDValue FVal = DAG.getConstant(0, VT);
- // Generate Compare Mask instr or Compare Mask against Zero instr.
- SDValue NeonCmp =
- DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
+ // Handle f128 first, since one possible outcome is a normal integer
+ // comparison which gets picked up by the next if statement.
+ if (LHS.getValueType() == MVT::f128) {
+ softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
- if (Invert)
- NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
+ // If softenSetCCOperands returned a scalar, use it.
+ if (!RHS.getNode()) {
+ assert(LHS.getValueType() == Op.getValueType() &&
+ "Unexpected setcc expansion!");
+ return LHS;
+ }
+ }
- return NeonCmp;
+ if (LHS.getValueType().isInteger()) {
+ SDValue CCVal;
+ SDValue Cmp =
+ getAArch64Cmp(LHS, RHS, ISD::getSetCCInverse(CC, true), CCVal, DAG, dl);
+
+ // Note that we inverted the condition above, so we reverse the order of
+ // the true and false operands here. This will allow the setcc to be
+ // matched to a single CSINC instruction.
+ return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
}
- // Now handle Floating Point cases.
- // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
- if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
- ISD::isBuildVectorAllZeros(LHS.getNode())) {
+ // Now we know we're dealing with FP values.
+ assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
+
+ // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead
+ // and do the comparison.
+ SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
- // If LHS is the zero value, swap operands and CondCode.
- if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
- CC = getSetCCSwappedOperands(CC);
- Op0 = RHS;
- } else
- Op0 = LHS;
+ AArch64CC::CondCode CC1, CC2;
+ changeFPCCToAArch64CC(CC, CC1, CC2);
+ if (CC2 == AArch64CC::AL) {
+ changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, false), CC1, CC2);
+ SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
- // Using constant type to differentiate integer and FP compares with zero.
- Op1 = DAG.getConstantFP(0, MVT::f32);
- Opcode = AArch64ISD::NEON_CMPZ;
+ // Note that we inverted the condition above, so we reverse the order of
+ // the true and false operands here. This will allow the setcc to be
+ // matched to a single CSINC instruction.
+ return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp);
} else {
- // Attempt to use Vector Floating Point Compare Mask instruction.
- Op0 = LHS;
- Op1 = RHS;
- Opcode = AArch64ISD::NEON_CMP;
+ // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
+ // totally clean. Some of them require two CSELs to implement. As is in
+ // this case, we emit the first CSEL and then emit a second using the output
+ // of the first as the RHS. We're effectively OR'ing the two CC's together.
+
+ // FIXME: It would be nice if we could match the two CSELs to two CSINCs.
+ SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue CS1 =
+ DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
+
+ SDValue CC2Val = DAG.getConstant(CC2, MVT::i32);
+ return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
}
+}
- SDValue NeonCmpAlt;
- // Some register compares have to be implemented with swapped CC and operands,
- // e.g.: OLT implemented as OGT with swapped operands.
- bool SwapIfRegArgs = false;
+/// A SELECT_CC operation is really some kind of max or min if both values being
+/// compared are, in some sense, equal to the results in either case. However,
+/// it is permissible to compare f32 values and produce directly extended f64
+/// values.
+///
+/// Extending the comparison operands would also be allowed, but is less likely
+/// to happen in practice since their use is right here. Note that truncate
+/// operations would *not* be semantically equivalent.
+static bool selectCCOpsAreFMaxCompatible(SDValue Cmp, SDValue Result) {
+ if (Cmp == Result)
+ return true;
- // Ensure valid CondCode for FP Compare Mask against Zero instruction:
- // EQ, GE, GT, LE, LT.
- // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
- switch (CC) {
- default:
- llvm_unreachable("Illegal FP comparison");
- case ISD::SETUNE:
- case ISD::SETNE:
- Invert = true; // Fallthrough
- case ISD::SETOEQ:
- case ISD::SETEQ:
- CC = ISD::SETEQ;
- break;
- case ISD::SETOLT:
- case ISD::SETLT:
- CC = ISD::SETLT;
- SwapIfRegArgs = true;
- break;
- case ISD::SETOGT:
- case ISD::SETGT:
- CC = ISD::SETGT;
- break;
- case ISD::SETOLE:
- case ISD::SETLE:
- CC = ISD::SETLE;
- SwapIfRegArgs = true;
- break;
- case ISD::SETOGE:
- case ISD::SETGE:
- CC = ISD::SETGE;
- break;
- case ISD::SETUGE:
- Invert = true;
- CC = ISD::SETLT;
- SwapIfRegArgs = true;
- break;
- case ISD::SETULE:
- Invert = true;
- CC = ISD::SETGT;
- break;
- case ISD::SETUGT:
- Invert = true;
- CC = ISD::SETLE;
- SwapIfRegArgs = true;
- break;
- case ISD::SETULT:
- Invert = true;
- CC = ISD::SETGE;
- break;
- case ISD::SETUEQ:
- Invert = true; // Fallthrough
- case ISD::SETONE:
- // Expand this to (OGT |OLT).
- NeonCmpAlt =
- DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
- CC = ISD::SETLT;
- SwapIfRegArgs = true;
- break;
- case ISD::SETUO:
- Invert = true; // Fallthrough
- case ISD::SETO:
- // Expand this to (OGE | OLT).
- NeonCmpAlt =
- DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
- CC = ISD::SETLT;
- SwapIfRegArgs = true;
- break;
+ ConstantFPSDNode *CCmp = dyn_cast<ConstantFPSDNode>(Cmp);
+ ConstantFPSDNode *CResult = dyn_cast<ConstantFPSDNode>(Result);
+ if (CCmp && CResult && Cmp.getValueType() == MVT::f32 &&
+ Result.getValueType() == MVT::f64) {
+ bool Lossy;
+ APFloat CmpVal = CCmp->getValueAPF();
+ CmpVal.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &Lossy);
+ return CResult->getValueAPF().bitwiseIsEqual(CmpVal);
}
- if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
- CC = getSetCCSwappedOperands(CC);
- std::swap(Op0, Op1);
- }
+ return Result->getOpcode() == ISD::FP_EXTEND && Result->getOperand(0) == Cmp;
+}
- // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
- SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
+SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CC = Op->getOperand(0);
+ SDValue TVal = Op->getOperand(1);
+ SDValue FVal = Op->getOperand(2);
+ SDLoc DL(Op);
- if (NeonCmpAlt.getNode())
- NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
+ unsigned Opc = CC.getOpcode();
+ // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select
+ // instruction.
+ if (CC.getResNo() == 1 &&
+ (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
+ Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)) {
+ // Only lower legal XALUO ops.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(CC->getValueType(0)))
+ return SDValue();
- if (Invert)
- NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
+ AArch64CC::CondCode OFCC;
+ SDValue Value, Overflow;
+ std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CC.getValue(0), DAG);
+ SDValue CCVal = DAG.getConstant(OFCC, MVT::i32);
- return NeonCmp;
+ return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
+ CCVal, Overflow);
+ }
+
+ if (CC.getOpcode() == ISD::SETCC)
+ return DAG.getSelectCC(DL, CC.getOperand(0), CC.getOperand(1), TVal, FVal,
+ cast<CondCodeSDNode>(CC.getOperand(2))->get());
+ else
+ return DAG.getSelectCC(DL, CC, DAG.getConstant(0, CC.getValueType()), TVal,
+ FVal, ISD::SETNE);
}
-// (SETCC lhs, rhs, condcode)
-SDValue
-AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
+SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
+ SelectionDAG &DAG) const {
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- EVT VT = Op.getValueType();
-
- if (VT.isVector())
- return LowerVectorSETCC(Op, DAG);
+ SDValue TVal = Op.getOperand(2);
+ SDValue FVal = Op.getOperand(3);
+ SDLoc dl(Op);
+ // Handle f128 first, because it will result in a comparison of some RTLIB
+ // call result against zero.
if (LHS.getValueType() == MVT::f128) {
- // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
- // for the rest of the function (some i32 or i64 values).
softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
- // If softenSetCCOperands returned a scalar, use it.
- if (RHS.getNode() == 0) {
- assert(LHS.getValueType() == Op.getValueType() &&
- "Unexpected setcc expansion!");
- return LHS;
+ // If softenSetCCOperands returned a scalar, we need to compare the result
+ // against zero to select between true and false values.
+ if (!RHS.getNode()) {
+ RHS = DAG.getConstant(0, LHS.getValueType());
+ CC = ISD::SETNE;
}
}
+ // Handle integers first.
if (LHS.getValueType().isInteger()) {
- SDValue A64cc;
+ assert((LHS.getValueType() == RHS.getValueType()) &&
+ (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
+
+ unsigned Opcode = AArch64ISD::CSEL;
+
+ // If both the TVal and the FVal are constants, see if we can swap them in
+ // order to for a CSINV or CSINC out of them.
+ ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
+ ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
+
+ if (CTVal && CFVal && CTVal->isAllOnesValue() && CFVal->isNullValue()) {
+ std::swap(TVal, FVal);
+ std::swap(CTVal, CFVal);
+ CC = ISD::getSetCCInverse(CC, true);
+ } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isNullValue()) {
+ std::swap(TVal, FVal);
+ std::swap(CTVal, CFVal);
+ CC = ISD::getSetCCInverse(CC, true);
+ } else if (TVal.getOpcode() == ISD::XOR) {
+ // If TVal is a NOT we want to swap TVal and FVal so that we can match
+ // with a CSINV rather than a CSEL.
+ ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(TVal.getOperand(1));
+
+ if (CVal && CVal->isAllOnesValue()) {
+ std::swap(TVal, FVal);
+ std::swap(CTVal, CFVal);
+ CC = ISD::getSetCCInverse(CC, true);
+ }
+ } else if (TVal.getOpcode() == ISD::SUB) {
+ // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so
+ // that we can match with a CSNEG rather than a CSEL.
+ ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(TVal.getOperand(0));
+
+ if (CVal && CVal->isNullValue()) {
+ std::swap(TVal, FVal);
+ std::swap(CTVal, CFVal);
+ CC = ISD::getSetCCInverse(CC, true);
+ }
+ } else if (CTVal && CFVal) {
+ const int64_t TrueVal = CTVal->getSExtValue();
+ const int64_t FalseVal = CFVal->getSExtValue();
+ bool Swap = false;
+
+ // If both TVal and FVal are constants, see if FVal is the
+ // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC
+ // instead of a CSEL in that case.
+ if (TrueVal == ~FalseVal) {
+ Opcode = AArch64ISD::CSINV;
+ } else if (TrueVal == -FalseVal) {
+ Opcode = AArch64ISD::CSNEG;
+ } else if (TVal.getValueType() == MVT::i32) {
+ // If our operands are only 32-bit wide, make sure we use 32-bit
+ // arithmetic for the check whether we can use CSINC. This ensures that
+ // the addition in the check will wrap around properly in case there is
+ // an overflow (which would not be the case if we do the check with
+ // 64-bit arithmetic).
+ const uint32_t TrueVal32 = CTVal->getZExtValue();
+ const uint32_t FalseVal32 = CFVal->getZExtValue();
+
+ if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
+ Opcode = AArch64ISD::CSINC;
+
+ if (TrueVal32 > FalseVal32) {
+ Swap = true;
+ }
+ }
+ // 64-bit check whether we can use CSINC.
+ } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
+ Opcode = AArch64ISD::CSINC;
+
+ if (TrueVal > FalseVal) {
+ Swap = true;
+ }
+ }
+
+ // Swap TVal and FVal if necessary.
+ if (Swap) {
+ std::swap(TVal, FVal);
+ std::swap(CTVal, CFVal);
+ CC = ISD::getSetCCInverse(CC, true);
+ }
+
+ if (Opcode != AArch64ISD::CSEL) {
+ // Drop FVal since we can get its value by simply inverting/negating
+ // TVal.
+ FVal = TVal;
+ }
+ }
+
+ SDValue CCVal;
+ SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
+
+ EVT VT = Op.getValueType();
+ return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
+ }
+
+ // Now we know we're dealing with FP values.
+ assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
+ assert(LHS.getValueType() == RHS.getValueType());
+ EVT VT = Op.getValueType();
- // Integers are handled in a separate function because the combinations of
- // immediates and tests can get hairy and we may want to fiddle things.
- SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
+ // Try to match this select into a max/min operation, which have dedicated
+ // opcode in the instruction set.
+ // FIXME: This is not correct in the presence of NaNs, so we only enable this
+ // in no-NaNs mode.
+ if (getTargetMachine().Options.NoNaNsFPMath) {
+ SDValue MinMaxLHS = TVal, MinMaxRHS = FVal;
+ if (selectCCOpsAreFMaxCompatible(LHS, MinMaxRHS) &&
+ selectCCOpsAreFMaxCompatible(RHS, MinMaxLHS)) {
+ CC = ISD::getSetCCSwappedOperands(CC);
+ std::swap(MinMaxLHS, MinMaxRHS);
+ }
- return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
- CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
- A64cc);
+ if (selectCCOpsAreFMaxCompatible(LHS, MinMaxLHS) &&
+ selectCCOpsAreFMaxCompatible(RHS, MinMaxRHS)) {
+ switch (CC) {
+ default:
+ break;
+ case ISD::SETGT:
+ case ISD::SETGE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ case ISD::SETOGT:
+ case ISD::SETOGE:
+ return DAG.getNode(AArch64ISD::FMAX, dl, VT, MinMaxLHS, MinMaxRHS);
+ break;
+ case ISD::SETLT:
+ case ISD::SETLE:
+ case ISD::SETULT:
+ case ISD::SETULE:
+ case ISD::SETOLT:
+ case ISD::SETOLE:
+ return DAG.getNode(AArch64ISD::FMIN, dl, VT, MinMaxLHS, MinMaxRHS);
+ break;
+ }
+ }
}
- // Note that some LLVM floating-point CondCodes can't be lowered to a single
- // conditional branch, hence FPCCToA64CC can set a second test, where either
- // passing is sufficient.
- A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
- CondCode = FPCCToA64CC(CC, Alternative);
- SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
- SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
- DAG.getCondCode(CC));
- SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
- CmpOp, DAG.getConstant(1, VT),
- DAG.getConstant(0, VT), A64cc);
+ // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead
+ // and do the comparison.
+ SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
+
+ // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
+ // clean. Some of them require two CSELs to implement.
+ AArch64CC::CondCode CC1, CC2;
+ changeFPCCToAArch64CC(CC, CC1, CC2);
+ SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
+
+ // If we need a second CSEL, emit it, using the output of the first as the
+ // RHS. We're effectively OR'ing the two CC's together.
+ if (CC2 != AArch64CC::AL) {
+ SDValue CC2Val = DAG.getConstant(CC2, MVT::i32);
+ return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
+ }
- if (Alternative != A64CC::Invalid) {
- A64cc = DAG.getConstant(Alternative, MVT::i32);
- A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
- DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
+ // Otherwise, return the output of the first CSEL.
+ return CS1;
+}
+
+SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Jump table entries as PC relative offsets. No additional tweaking
+ // is necessary here. Just get the address of the jump table.
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ EVT PtrVT = getPointerTy();
+ SDLoc DL(Op);
+
+ if (getTargetMachine().getCodeModel() == CodeModel::Large &&
+ !Subtarget->isTargetMachO()) {
+ const unsigned char MO_NC = AArch64II::MO_NC;
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, DL, PtrVT,
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G3),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G2 | MO_NC),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G1 | MO_NC),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
+ AArch64II::MO_G0 | MO_NC));
}
- return A64SELECT_CC;
+ SDValue Hi =
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_PAGE);
+ SDValue Lo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
+ AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
+ return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
}
-SDValue
-AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
- const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
- const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
+SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
+ EVT PtrVT = getPointerTy();
+ SDLoc DL(Op);
+
+ if (getTargetMachine().getCodeModel() == CodeModel::Large) {
+ // Use the GOT for the large code model on iOS.
+ if (Subtarget->isTargetMachO()) {
+ SDValue GotAddr = DAG.getTargetConstantPool(
+ CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(),
+ AArch64II::MO_GOT);
+ return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr);
+ }
- // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
- // rather than just 8.
- return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
- Op.getOperand(1), Op.getOperand(2),
- DAG.getConstant(32, MVT::i32), 8, false, false,
- MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
+ const unsigned char MO_NC = AArch64II::MO_NC;
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, DL, PtrVT,
+ DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
+ CP->getOffset(), AArch64II::MO_G3),
+ DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
+ CP->getOffset(), AArch64II::MO_G2 | MO_NC),
+ DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
+ CP->getOffset(), AArch64II::MO_G1 | MO_NC),
+ DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
+ CP->getOffset(), AArch64II::MO_G0 | MO_NC));
+ } else {
+ // Use ADRP/ADD or ADRP/LDR for everything else: the small memory model on
+ // ELF, the only valid one on Darwin.
+ SDValue Hi =
+ DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
+ CP->getOffset(), AArch64II::MO_PAGE);
+ SDValue Lo = DAG.getTargetConstantPool(
+ CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(),
+ AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+
+ SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
+ return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ }
}
-SDValue
-AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+ EVT PtrVT = getPointerTy();
+ SDLoc DL(Op);
+ if (getTargetMachine().getCodeModel() == CodeModel::Large &&
+ !Subtarget->isTargetMachO()) {
+ const unsigned char MO_NC = AArch64II::MO_NC;
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, DL, PtrVT,
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G3),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G2 | MO_NC),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G1 | MO_NC),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G0 | MO_NC));
+ } else {
+ SDValue Hi = DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_PAGE);
+ SDValue Lo = DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
+ return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ }
+}
+
+SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
+ SelectionDAG &DAG) const {
+ AArch64FunctionInfo *FuncInfo =
+ DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
+
+ SDLoc DL(Op);
+ SDValue FR =
+ DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), getPointerTy());
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
+ MachinePointerInfo(SV), false, false, 0);
+}
+
+SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
+ SelectionDAG &DAG) const {
// The layout of the va_list struct is specified in the AArch64 Procedure Call
// Standard, section B.3.
MachineFunction &MF = DAG.getMachineFunction();
- AArch64MachineFunctionInfo *FuncInfo
- = MF.getInfo<AArch64MachineFunctionInfo>();
+ AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
@@ -2803,492 +3528,3002 @@ AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SmallVector<SDValue, 4> MemOps;
// void *__stack at offset 0
- SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
- getPointerTy());
+ SDValue Stack =
+ DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), getPointerTy());
MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
- MachinePointerInfo(SV), false, false, 0));
+ MachinePointerInfo(SV), false, false, 8));
// void *__gr_top at offset 8
- int GPRSize = FuncInfo->getVariadicGPRSize();
+ int GPRSize = FuncInfo->getVarArgsGPRSize();
if (GPRSize > 0) {
SDValue GRTop, GRTopAddr;
GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
DAG.getConstant(8, getPointerTy()));
- GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
+ GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), getPointerTy());
GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
DAG.getConstant(GPRSize, getPointerTy()));
MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
- MachinePointerInfo(SV, 8),
- false, false, 0));
+ MachinePointerInfo(SV, 8), false, false, 8));
}
// void *__vr_top at offset 16
- int FPRSize = FuncInfo->getVariadicFPRSize();
+ int FPRSize = FuncInfo->getVarArgsFPRSize();
if (FPRSize > 0) {
SDValue VRTop, VRTopAddr;
VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
DAG.getConstant(16, getPointerTy()));
- VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
+ VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), getPointerTy());
VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
DAG.getConstant(FPRSize, getPointerTy()));
MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
- MachinePointerInfo(SV, 16),
- false, false, 0));
+ MachinePointerInfo(SV, 16), false, false, 8));
}
// int __gr_offs at offset 24
SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
DAG.getConstant(24, getPointerTy()));
MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
- GROffsAddr, MachinePointerInfo(SV, 24),
- false, false, 0));
+ GROffsAddr, MachinePointerInfo(SV, 24), false,
+ false, 4));
// int __vr_offs at offset 28
SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
DAG.getConstant(28, getPointerTy()));
MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
- VROffsAddr, MachinePointerInfo(SV, 28),
- false, false, 0));
+ VROffsAddr, MachinePointerInfo(SV, 28), false,
+ false, 4));
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
- MemOps.size());
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
-SDValue
-AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- default: llvm_unreachable("Don't know how to custom lower this!");
- case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
- case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
- case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
- case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
- case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
- case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
- case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
- case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
- case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
- case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
- case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
- case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
-
- case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
- case ISD::BRCOND: return LowerBRCOND(Op, DAG);
- case ISD::BR_CC: return LowerBR_CC(Op, DAG);
- case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
- case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
- case ISD::JumpTable: return LowerJumpTable(Op, DAG);
- case ISD::SELECT: return LowerSELECT(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::SETCC: return LowerSETCC(Op, DAG);
- case ISD::VACOPY: return LowerVACOPY(Op, DAG);
- case ISD::VASTART: return LowerVASTART(Op, DAG);
- case ISD::BUILD_VECTOR:
- return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
- case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
+SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
+ SelectionDAG &DAG) const {
+ return Subtarget->isTargetDarwin() ? LowerDarwin_VASTART(Op, DAG)
+ : LowerAAPCS_VASTART(Op, DAG);
+}
+
+SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
+ SelectionDAG &DAG) const {
+ // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
+ // pointer.
+ unsigned VaListSize = Subtarget->isTargetDarwin() ? 8 : 32;
+ const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
+ const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
+
+ return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), Op.getOperand(1),
+ Op.getOperand(2), DAG.getConstant(VaListSize, MVT::i32),
+ 8, false, false, MachinePointerInfo(DestSV),
+ MachinePointerInfo(SrcSV));
+}
+
+SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetDarwin() &&
+ "automatic va_arg instruction only works on Darwin");
+
+ const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue Chain = Op.getOperand(0);
+ SDValue Addr = Op.getOperand(1);
+ unsigned Align = Op.getConstantOperandVal(3);
+
+ SDValue VAList = DAG.getLoad(getPointerTy(), DL, Chain, Addr,
+ MachinePointerInfo(V), false, false, false, 0);
+ Chain = VAList.getValue(1);
+
+ if (Align > 8) {
+ assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2");
+ VAList = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
+ DAG.getConstant(Align - 1, getPointerTy()));
+ VAList = DAG.getNode(ISD::AND, DL, getPointerTy(), VAList,
+ DAG.getConstant(-(int64_t)Align, getPointerTy()));
}
- return SDValue();
+ Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
+ uint64_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
+
+ // Scalar integer and FP values smaller than 64 bits are implicitly extended
+ // up to 64 bits. At the very least, we have to increase the striding of the
+ // vaargs list to match this, and for FP values we need to introduce
+ // FP_ROUND nodes as well.
+ if (VT.isInteger() && !VT.isVector())
+ ArgSize = 8;
+ bool NeedFPTrunc = false;
+ if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) {
+ ArgSize = 8;
+ NeedFPTrunc = true;
+ }
+
+ // Increment the pointer, VAList, to the next vaarg
+ SDValue VANext = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
+ DAG.getConstant(ArgSize, getPointerTy()));
+ // Store the incremented VAList to the legalized pointer
+ SDValue APStore = DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V),
+ false, false, 0);
+
+ // Load the actual argument out of the pointer VAList
+ if (NeedFPTrunc) {
+ // Load the value as an f64.
+ SDValue WideFP = DAG.getLoad(MVT::f64, DL, APStore, VAList,
+ MachinePointerInfo(), false, false, false, 0);
+ // Round the value down to an f32.
+ SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
+ DAG.getIntPtrConstant(1));
+ SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
+ // Merge the rounded value with the chain output of the load.
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo(), false,
+ false, false, 0);
}
-/// Check if the specified splat value corresponds to a valid vector constant
-/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
-/// so, return the encoded 8-bit immediate and the OpCmode instruction fields
-/// values.
-static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
- unsigned SplatBitSize, SelectionDAG &DAG,
- bool is128Bits, NeonModImmType type, EVT &VT,
- unsigned &Imm, unsigned &OpCmode) {
- switch (SplatBitSize) {
+SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ SDValue FrameAddr =
+ DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(), false, false, false, 0);
+ return FrameAddr;
+}
+
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned AArch64TargetLowering::getRegisterByName(const char* RegName,
+ EVT VT) const {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("sp", AArch64::SP)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ report_fatal_error("Invalid register name global variable");
+}
+
+SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(8, getPointerTy());
+ return DAG.getLoad(VT, DL, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
+ MachinePointerInfo(), false, false, false, 0);
+ }
+
+ // Return LR, which contains the return address. Mark it an implicit live-in.
+ unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
+ return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
+}
+
+/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
+/// i64 values and take a 2 x i64 value to shift plus a shift amount.
+SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ SDValue ARMcc;
+ unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
+
+ assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
+ DAG.getConstant(VTBits, MVT::i64), ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
+ DAG.getConstant(VTBits, MVT::i64));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
+
+ SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, MVT::i64),
+ ISD::SETGE, dl, DAG);
+ SDValue CCVal = DAG.getConstant(AArch64CC::GE, MVT::i32);
+
+ SDValue FalseValLo = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueValLo = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
+ SDValue Lo =
+ DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValLo, FalseValLo, CCVal, Cmp);
+
+ // AArch64 shifts larger than the register width are wrapped rather than
+ // clamped, so we can't just emit "hi >> x".
+ SDValue FalseValHi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue TrueValHi = Opc == ISD::SRA
+ ? DAG.getNode(Opc, dl, VT, ShOpHi,
+ DAG.getConstant(VTBits - 1, MVT::i64))
+ : DAG.getConstant(0, VT);
+ SDValue Hi =
+ DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValHi, FalseValHi, CCVal, Cmp);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
+/// i64 values and take a 2 x i64 value to shift plus a shift amount.
+SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ SDValue ARMcc;
+
+ assert(Op.getOpcode() == ISD::SHL_PARTS);
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
+ DAG.getConstant(VTBits, MVT::i64), ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
+ DAG.getConstant(VTBits, MVT::i64));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
+ SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
+
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+
+ SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, MVT::i64),
+ ISD::SETGE, dl, DAG);
+ SDValue CCVal = DAG.getConstant(AArch64CC::GE, MVT::i32);
+ SDValue Hi =
+ DAG.getNode(AArch64ISD::CSEL, dl, VT, Tmp3, FalseVal, CCVal, Cmp);
+
+ // AArch64 shifts of larger than register sizes are wrapped rather than
+ // clamped, so we can't just emit "lo << a" if a is too big.
+ SDValue TrueValLo = DAG.getConstant(0, VT);
+ SDValue FalseValLo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ SDValue Lo =
+ DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValLo, FalseValLo, CCVal, Cmp);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+bool AArch64TargetLowering::isOffsetFoldingLegal(
+ const GlobalAddressSDNode *GA) const {
+ // The AArch64 target doesn't support folding offsets into global addresses.
+ return false;
+}
+
+bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
+ // We can materialize #0.0 as fmov $Rd, XZR for 64-bit and 32-bit cases.
+ // FIXME: We should be able to handle f128 as well with a clever lowering.
+ if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32))
+ return true;
+
+ if (VT == MVT::f64)
+ return AArch64_AM::getFP64Imm(Imm) != -1;
+ else if (VT == MVT::f32)
+ return AArch64_AM::getFP32Imm(Imm) != -1;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// AArch64 Optimization Hooks
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// AArch64 Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+// Table of Constraints
+// TODO: This is the current set of constraints supported by ARM for the
+// compiler, not all of them may make sense, e.g. S may be difficult to support.
+//
+// r - A general register
+// w - An FP/SIMD register of some size in the range v0-v31
+// x - An FP/SIMD register of some size in the range v0-v15
+// I - Constant that can be used with an ADD instruction
+// J - Constant that can be used with a SUB instruction
+// K - Constant that can be used with a 32-bit logical instruction
+// L - Constant that can be used with a 64-bit logical instruction
+// M - Constant that can be used as a 32-bit MOV immediate
+// N - Constant that can be used as a 64-bit MOV immediate
+// Q - A memory reference with base register and no offset
+// S - A symbolic address
+// Y - Floating point constant zero
+// Z - Integer constant zero
+//
+// Note that general register operands will be output using their 64-bit x
+// register name, whatever the size of the variable, unless the asm operand
+// is prefixed by the %w modifier. Floating-point and SIMD register operands
+// will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
+// %q modifier.
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+AArch64TargetLowering::ConstraintType
+AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'z':
+ return C_Other;
+ case 'x':
+ case 'w':
+ return C_RegisterClass;
+ // An address with a single base register. Due to the way we
+ // currently handle addresses it is the same as 'r'.
+ case 'Q':
+ return C_Memory;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+AArch64TargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (!CallOperandVal)
+ return CW_Default;
+ Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
default:
- llvm_unreachable("unexpected size for isNeonModifiedImm");
- case 8: {
- if (type != Neon_Mov_Imm)
- return false;
- assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
- // Neon movi per byte: Op=0, Cmode=1110.
- OpCmode = 0xe;
- Imm = SplatBits;
- VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'x':
+ case 'w':
+ if (type->isFloatingPointTy() || type->isVectorTy())
+ weight = CW_Register;
+ break;
+ case 'z':
+ weight = CW_Constant;
break;
}
- case 16: {
- // Neon move inst per halfword
- VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
- if ((SplatBits & ~0xff) == 0) {
- // Value = 0x00nn is 0x00nn LSL 0
- // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
- // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
- // Op=x, Cmode=100y
- Imm = SplatBits;
- OpCmode = 0x8;
+ return weight;
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+AArch64TargetLowering::getRegForInlineAsmConstraint(
+ const std::string &Constraint, MVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r':
+ if (VT.getSizeInBits() == 64)
+ return std::make_pair(0U, &AArch64::GPR64commonRegClass);
+ return std::make_pair(0U, &AArch64::GPR32commonRegClass);
+ case 'w':
+ if (VT == MVT::f32)
+ return std::make_pair(0U, &AArch64::FPR32RegClass);
+ if (VT.getSizeInBits() == 64)
+ return std::make_pair(0U, &AArch64::FPR64RegClass);
+ if (VT.getSizeInBits() == 128)
+ return std::make_pair(0U, &AArch64::FPR128RegClass);
break;
- }
- if ((SplatBits & ~0xff00) == 0) {
- // Value = 0xnn00 is 0x00nn LSL 8
- // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
- // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
- // Op=x, Cmode=101x
- Imm = SplatBits >> 8;
- OpCmode = 0xa;
+ // The instructions that this constraint is designed for can
+ // only take 128-bit registers so just use that regclass.
+ case 'x':
+ if (VT.getSizeInBits() == 128)
+ return std::make_pair(0U, &AArch64::FPR128_loRegClass);
break;
}
- // can't handle any other
- return false;
}
+ if (StringRef("{cc}").equals_lower(Constraint))
+ return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
- case 32: {
- // First the LSL variants (MSL is unusable by some interested instructions).
+ // Use the default implementation in TargetLowering to convert the register
+ // constraint into a member of a register class.
+ std::pair<unsigned, const TargetRegisterClass *> Res;
+ Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+
+ // Not found as a standard register?
+ if (!Res.second) {
+ unsigned Size = Constraint.size();
+ if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
+ tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
+ const std::string Reg =
+ std::string(&Constraint[2], &Constraint[Size - 1]);
+ int RegNo = atoi(Reg.c_str());
+ if (RegNo >= 0 && RegNo <= 31) {
+ // v0 - v31 are aliases of q0 - q31.
+ // By default we'll emit v0-v31 for this unless there's a modifier where
+ // we'll emit the correct register as well.
+ Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
+ Res.second = &AArch64::FPR128RegClass;
+ }
+ }
+ }
- // Neon move instr per word, shift zeros
- VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
- if ((SplatBits & ~0xff) == 0) {
- // Value = 0x000000nn is 0x000000nn LSL 0
- // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
- // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
- // Op=x, Cmode=000x
- Imm = SplatBits;
- OpCmode = 0;
- break;
+ return Res;
+}
+
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void AArch64TargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ SDValue Result;
+
+ // Currently only support length 1 constraints.
+ if (Constraint.length() != 1)
+ return;
+
+ char ConstraintLetter = Constraint[0];
+ switch (ConstraintLetter) {
+ default:
+ break;
+
+ // This set of constraints deal with valid constants for various instructions.
+ // Validate and return a target constant for them if we can.
+ case 'z': {
+ // 'z' maps to xzr or wzr so it needs an input of 0.
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
+ if (!C || C->getZExtValue() != 0)
+ return;
+
+ if (Op.getValueType() == MVT::i64)
+ Result = DAG.getRegister(AArch64::XZR, MVT::i64);
+ else
+ Result = DAG.getRegister(AArch64::WZR, MVT::i32);
+ break;
+ }
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
+ if (!C)
+ return;
+
+ // Grab the value and do some validation.
+ uint64_t CVal = C->getZExtValue();
+ switch (ConstraintLetter) {
+ // The I constraint applies only to simple ADD or SUB immediate operands:
+ // i.e. 0 to 4095 with optional shift by 12
+ // The J constraint applies only to ADD or SUB immediates that would be
+ // valid when negated, i.e. if [an add pattern] were to be output as a SUB
+ // instruction [or vice versa], in other words -1 to -4095 with optional
+ // left shift by 12.
+ case 'I':
+ if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
+ break;
+ return;
+ case 'J': {
+ uint64_t NVal = -C->getSExtValue();
+ if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal))
+ break;
+ return;
}
- if ((SplatBits & ~0xff00) == 0) {
- // Value = 0x0000nn00 is 0x000000nn LSL 8
- // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
- // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
- // Op=x, Cmode=001x
- Imm = SplatBits >> 8;
- OpCmode = 0x2;
- break;
+ // The K and L constraints apply *only* to logical immediates, including
+ // what used to be the MOVI alias for ORR (though the MOVI alias has now
+ // been removed and MOV should be used). So these constraints have to
+ // distinguish between bit patterns that are valid 32-bit or 64-bit
+ // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but
+ // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice
+ // versa.
+ case 'K':
+ if (AArch64_AM::isLogicalImmediate(CVal, 32))
+ break;
+ return;
+ case 'L':
+ if (AArch64_AM::isLogicalImmediate(CVal, 64))
+ break;
+ return;
+ // The M and N constraints are a superset of K and L respectively, for use
+ // with the MOV (immediate) alias. As well as the logical immediates they
+ // also match 32 or 64-bit immediates that can be loaded either using a
+ // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca
+ // (M) or 64-bit 0x1234000000000000 (N) etc.
+ // As a note some of this code is liberally stolen from the asm parser.
+ case 'M': {
+ if (!isUInt<32>(CVal))
+ return;
+ if (AArch64_AM::isLogicalImmediate(CVal, 32))
+ break;
+ if ((CVal & 0xFFFF) == CVal)
+ break;
+ if ((CVal & 0xFFFF0000ULL) == CVal)
+ break;
+ uint64_t NCVal = ~(uint32_t)CVal;
+ if ((NCVal & 0xFFFFULL) == NCVal)
+ break;
+ if ((NCVal & 0xFFFF0000ULL) == NCVal)
+ break;
+ return;
}
- if ((SplatBits & ~0xff0000) == 0) {
- // Value = 0x00nn0000 is 0x000000nn LSL 16
- // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
- // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
- // Op=x, Cmode=010x
- Imm = SplatBits >> 16;
- OpCmode = 0x4;
- break;
+ case 'N': {
+ if (AArch64_AM::isLogicalImmediate(CVal, 64))
+ break;
+ if ((CVal & 0xFFFFULL) == CVal)
+ break;
+ if ((CVal & 0xFFFF0000ULL) == CVal)
+ break;
+ if ((CVal & 0xFFFF00000000ULL) == CVal)
+ break;
+ if ((CVal & 0xFFFF000000000000ULL) == CVal)
+ break;
+ uint64_t NCVal = ~CVal;
+ if ((NCVal & 0xFFFFULL) == NCVal)
+ break;
+ if ((NCVal & 0xFFFF0000ULL) == NCVal)
+ break;
+ if ((NCVal & 0xFFFF00000000ULL) == NCVal)
+ break;
+ if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
+ break;
+ return;
}
- if ((SplatBits & ~0xff000000) == 0) {
- // Value = 0xnn000000 is 0x000000nn LSL 24
- // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
- // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
- // Op=x, Cmode=011x
- Imm = SplatBits >> 24;
- OpCmode = 0x6;
- break;
+ default:
+ return;
}
- // Now the MSL immediates.
+ // All assembler immediates are 64-bit integers.
+ Result = DAG.getTargetConstant(CVal, MVT::i64);
+ break;
+ }
- // Neon move instr per word, shift ones
- if ((SplatBits & ~0xffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xff) == 0xff) {
- // Value = 0x0000nnff is 0x000000nn MSL 8
- // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
- // Op=x, Cmode=1100
- Imm = SplatBits >> 8;
- OpCmode = 0xc;
- break;
+ if (Result.getNode()) {
+ Ops.push_back(Result);
+ return;
+ }
+
+ return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+//===----------------------------------------------------------------------===//
+// AArch64 Advanced SIMD Support
+//===----------------------------------------------------------------------===//
+
+/// WidenVector - Given a value in the V64 register class, produce the
+/// equivalent value in the V128 register class.
+static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
+ EVT VT = V64Reg.getValueType();
+ unsigned NarrowSize = VT.getVectorNumElements();
+ MVT EltTy = VT.getVectorElementType().getSimpleVT();
+ MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
+ SDLoc DL(V64Reg);
+
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
+ V64Reg, DAG.getConstant(0, MVT::i32));
+}
+
+/// getExtFactor - Determine the adjustment factor for the position when
+/// generating an "extract from vector registers" instruction.
+static unsigned getExtFactor(SDValue &V) {
+ EVT EltType = V.getValueType().getVectorElementType();
+ return EltType.getSizeInBits() / 8;
+}
+
+/// NarrowVector - Given a value in the V128 register class, produce the
+/// equivalent value in the V64 register class.
+static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
+ EVT VT = V128Reg.getValueType();
+ unsigned WideSize = VT.getVectorNumElements();
+ MVT EltTy = VT.getVectorElementType().getSimpleVT();
+ MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
+ SDLoc DL(V128Reg);
+
+ return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg);
+}
+
+// Gather data to see if the operation can be modelled as a
+// shuffle in combination with VEXTs.
+SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ unsigned NumElts = VT.getVectorNumElements();
+
+ SmallVector<SDValue, 2> SourceVecs;
+ SmallVector<unsigned, 2> MinElts;
+ SmallVector<unsigned, 2> MaxElts;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
+ // A shuffle can only come from building a vector from various
+ // elements of other vectors.
+ return SDValue();
}
- if ((SplatBits & ~0xffffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
- // Value = 0x00nnffff is 0x000000nn MSL 16
- // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
- // Op=x, Cmode=1101
- Imm = SplatBits >> 16;
- OpCmode = 0xd;
- break;
+
+ // Record this extraction against the appropriate vector if possible...
+ SDValue SourceVec = V.getOperand(0);
+ unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
+ bool FoundSource = false;
+ for (unsigned j = 0; j < SourceVecs.size(); ++j) {
+ if (SourceVecs[j] == SourceVec) {
+ if (MinElts[j] > EltNo)
+ MinElts[j] = EltNo;
+ if (MaxElts[j] < EltNo)
+ MaxElts[j] = EltNo;
+ FoundSource = true;
+ break;
+ }
+ }
+
+ // Or record a new source if not...
+ if (!FoundSource) {
+ SourceVecs.push_back(SourceVec);
+ MinElts.push_back(EltNo);
+ MaxElts.push_back(EltNo);
+ }
+ }
+
+ // Currently only do something sane when at most two source vectors
+ // involved.
+ if (SourceVecs.size() > 2)
+ return SDValue();
+
+ SDValue ShuffleSrcs[2] = { DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
+ int VEXTOffsets[2] = { 0, 0 };
+ int OffsetMultipliers[2] = { 1, 1 };
+
+ // This loop extracts the usage patterns of the source vectors
+ // and prepares appropriate SDValues for a shuffle if possible.
+ for (unsigned i = 0; i < SourceVecs.size(); ++i) {
+ unsigned NumSrcElts = SourceVecs[i].getValueType().getVectorNumElements();
+ SDValue CurSource = SourceVecs[i];
+ if (SourceVecs[i].getValueType().getVectorElementType() !=
+ VT.getVectorElementType()) {
+ // It may hit this case if SourceVecs[i] is AssertSext/AssertZext.
+ // Then bitcast it to the vector which holds asserted element type,
+ // and record the multiplier of element width between SourceVecs and
+ // Build_vector which is needed to extract the correct lanes later.
+ EVT CastVT =
+ EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+ SourceVecs[i].getValueSizeInBits() /
+ VT.getVectorElementType().getSizeInBits());
+
+ CurSource = DAG.getNode(ISD::BITCAST, dl, CastVT, SourceVecs[i]);
+ OffsetMultipliers[i] = CastVT.getVectorNumElements() / NumSrcElts;
+ NumSrcElts *= OffsetMultipliers[i];
+ MaxElts[i] *= OffsetMultipliers[i];
+ MinElts[i] *= OffsetMultipliers[i];
+ }
+
+ if (CurSource.getValueType() == VT) {
+ // No VEXT necessary
+ ShuffleSrcs[i] = CurSource;
+ VEXTOffsets[i] = 0;
+ continue;
+ } else if (NumSrcElts < NumElts) {
+ // We can pad out the smaller vector for free, so if it's part of a
+ // shuffle...
+ ShuffleSrcs[i] = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, CurSource,
+ DAG.getUNDEF(CurSource.getValueType()));
+ continue;
+ }
+
+ // Since only 64-bit and 128-bit vectors are legal on ARM and
+ // we've eliminated the other cases...
+ assert(NumSrcElts == 2 * NumElts &&
+ "unexpected vector sizes in ReconstructShuffle");
+
+ if (MaxElts[i] - MinElts[i] >= NumElts) {
+ // Span too large for a VEXT to cope
+ return SDValue();
+ }
+
+ if (MinElts[i] >= NumElts) {
+ // The extraction can just take the second half
+ VEXTOffsets[i] = NumElts;
+ ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CurSource,
+ DAG.getIntPtrConstant(NumElts));
+ } else if (MaxElts[i] < NumElts) {
+ // The extraction can just take the first half
+ VEXTOffsets[i] = 0;
+ ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CurSource,
+ DAG.getIntPtrConstant(0));
+ } else {
+ // An actual VEXT is needed
+ VEXTOffsets[i] = MinElts[i];
+ SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CurSource,
+ DAG.getIntPtrConstant(0));
+ SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CurSource,
+ DAG.getIntPtrConstant(NumElts));
+ unsigned Imm = VEXTOffsets[i] * getExtFactor(VEXTSrc1);
+ ShuffleSrcs[i] = DAG.getNode(AArch64ISD::EXT, dl, VT, VEXTSrc1, VEXTSrc2,
+ DAG.getConstant(Imm, MVT::i32));
+ }
+ }
+
+ SmallVector<int, 8> Mask;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Entry = Op.getOperand(i);
+ if (Entry.getOpcode() == ISD::UNDEF) {
+ Mask.push_back(-1);
+ continue;
+ }
+
+ SDValue ExtractVec = Entry.getOperand(0);
+ int ExtractElt =
+ cast<ConstantSDNode>(Op.getOperand(i).getOperand(1))->getSExtValue();
+ if (ExtractVec == SourceVecs[0]) {
+ Mask.push_back(ExtractElt * OffsetMultipliers[0] - VEXTOffsets[0]);
+ } else {
+ Mask.push_back(ExtractElt * OffsetMultipliers[1] + NumElts -
+ VEXTOffsets[1]);
}
- // can't handle any other
+ }
+
+ // Final check before we try to produce nonsense...
+ if (isShuffleMaskLegal(Mask, VT))
+ return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
+ &Mask[0]);
+
+ return SDValue();
+}
+
+// check if an EXT instruction can handle the shuffle mask when the
+// vector sources of the shuffle are the same.
+static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ // Assume that the first shuffle index is not UNDEF. Fail if it is.
+ if (M[0] < 0)
+ return false;
+
+ Imm = M[0];
+
+ // If this is a VEXT shuffle, the immediate value is the index of the first
+ // element. The other shuffle indices must be the successive elements after
+ // the first one.
+ unsigned ExpectedElt = Imm;
+ for (unsigned i = 1; i < NumElts; ++i) {
+ // Increment the expected index. If it wraps around, just follow it
+ // back to index zero and keep going.
+ ++ExpectedElt;
+ if (ExpectedElt == NumElts)
+ ExpectedElt = 0;
+
+ if (M[i] < 0)
+ continue; // ignore UNDEF indices
+ if (ExpectedElt != static_cast<unsigned>(M[i]))
+ return false;
+ }
+
+ return true;
+}
+
+// check if an EXT instruction can handle the shuffle mask when the
+// vector sources of the shuffle are different.
+static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
+ unsigned &Imm) {
+ // Look for the first non-undef element.
+ const int *FirstRealElt = std::find_if(M.begin(), M.end(),
+ [](int Elt) {return Elt >= 0;});
+
+ // Benefit form APInt to handle overflow when calculating expected element.
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
+ APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
+ // The following shuffle indices must be the successive elements after the
+ // first real element.
+ const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(),
+ [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;});
+ if (FirstWrongElt != M.end())
+ return false;
+
+ // The index of an EXT is the first element if it is not UNDEF.
+ // Watch out for the beginning UNDEFs. The EXT index should be the expected
+ // value of the first element. E.g.
+ // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
+ // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
+ // ExpectedElt is the last mask index plus 1.
+ Imm = ExpectedElt.getZExtValue();
+
+ // There are two difference cases requiring to reverse input vectors.
+ // For example, for vector <4 x i32> we have the following cases,
+ // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
+ // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
+ // For both cases, we finally use mask <5, 6, 7, 0>, which requires
+ // to reverse two input vectors.
+ if (Imm < NumElts)
+ ReverseEXT = true;
+ else
+ Imm -= NumElts;
+
+ return true;
+}
+
+/// isREVMask - Check if a vector shuffle corresponds to a REV
+/// instruction with the specified blocksize. (The order of the elements
+/// within each block of the vector is reversed.)
+static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
+ assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
+ "Only possible block sizes for REV are: 16, 32, 64");
+
+ unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ if (EltSz == 64)
return false;
+
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned BlockElts = M[0] + 1;
+ // If the first shuffle index is UNDEF, be optimistic.
+ if (M[0] < 0)
+ BlockElts = BlockSize / EltSz;
+
+ if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
+ return false;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ if (M[i] < 0)
+ continue; // ignore UNDEF indices
+ if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
+ return false;
}
- case 64: {
- if (type != Neon_Mov_Imm)
+ return true;
+}
+
+static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
+ unsigned NumElts = VT.getVectorNumElements();
+ WhichResult = (M[0] == 0 ? 0 : 1);
+ unsigned Idx = WhichResult * NumElts / 2;
+ for (unsigned i = 0; i != NumElts; i += 2) {
+ if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
+ (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
return false;
- // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
- // movi Op=1, Cmode=1110.
- OpCmode = 0x1e;
- uint64_t BitMask = 0xff;
- uint64_t Val = 0;
- unsigned ImmMask = 1;
- Imm = 0;
- for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
- if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
- Val |= BitMask;
- Imm |= ImmMask;
- } else if ((SplatBits & BitMask) != 0) {
+ Idx += 1;
+ }
+
+ return true;
+}
+
+static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
+ unsigned NumElts = VT.getVectorNumElements();
+ WhichResult = (M[0] == 0 ? 0 : 1);
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (M[i] < 0)
+ continue; // ignore UNDEF indices
+ if ((unsigned)M[i] != 2 * i + WhichResult)
+ return false;
+ }
+
+ return true;
+}
+
+static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
+ unsigned NumElts = VT.getVectorNumElements();
+ WhichResult = (M[0] == 0 ? 0 : 1);
+ for (unsigned i = 0; i < NumElts; i += 2) {
+ if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
+ (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult))
+ return false;
+ }
+ return true;
+}
+
+/// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
+/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
+/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
+static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
+ unsigned NumElts = VT.getVectorNumElements();
+ WhichResult = (M[0] == 0 ? 0 : 1);
+ unsigned Idx = WhichResult * NumElts / 2;
+ for (unsigned i = 0; i != NumElts; i += 2) {
+ if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
+ (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx))
+ return false;
+ Idx += 1;
+ }
+
+ return true;
+}
+
+/// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
+/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
+/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
+static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
+ unsigned Half = VT.getVectorNumElements() / 2;
+ WhichResult = (M[0] == 0 ? 0 : 1);
+ for (unsigned j = 0; j != 2; ++j) {
+ unsigned Idx = WhichResult;
+ for (unsigned i = 0; i != Half; ++i) {
+ int MIdx = M[i + j * Half];
+ if (MIdx >= 0 && (unsigned)MIdx != Idx)
return false;
- }
- BitMask <<= 8;
- ImmMask <<= 1;
+ Idx += 2;
}
- SplatBits = Val;
- VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
- break;
}
+
+ return true;
+}
+
+/// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
+/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
+/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
+static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
+ unsigned NumElts = VT.getVectorNumElements();
+ WhichResult = (M[0] == 0 ? 0 : 1);
+ for (unsigned i = 0; i < NumElts; i += 2) {
+ if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
+ (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult))
+ return false;
+ }
+ return true;
+}
+
+static bool isINSMask(ArrayRef<int> M, int NumInputElements,
+ bool &DstIsLeft, int &Anomaly) {
+ if (M.size() != static_cast<size_t>(NumInputElements))
+ return false;
+
+ int NumLHSMatch = 0, NumRHSMatch = 0;
+ int LastLHSMismatch = -1, LastRHSMismatch = -1;
+
+ for (int i = 0; i < NumInputElements; ++i) {
+ if (M[i] == -1) {
+ ++NumLHSMatch;
+ ++NumRHSMatch;
+ continue;
+ }
+
+ if (M[i] == i)
+ ++NumLHSMatch;
+ else
+ LastLHSMismatch = i;
+
+ if (M[i] == i + NumInputElements)
+ ++NumRHSMatch;
+ else
+ LastRHSMismatch = i;
+ }
+
+ if (NumLHSMatch == NumInputElements - 1) {
+ DstIsLeft = true;
+ Anomaly = LastLHSMismatch;
+ return true;
+ } else if (NumRHSMatch == NumInputElements - 1) {
+ DstIsLeft = false;
+ Anomaly = LastRHSMismatch;
+ return true;
+ }
+
+ return false;
+}
+
+static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {
+ if (VT.getSizeInBits() != 128)
+ return false;
+
+ unsigned NumElts = VT.getVectorNumElements();
+
+ for (int I = 0, E = NumElts / 2; I != E; I++) {
+ if (Mask[I] != I)
+ return false;
+ }
+
+ int Offset = NumElts / 2;
+ for (int I = NumElts / 2, E = NumElts; I != E; I++) {
+ if (Mask[I] != I + SplitLHS * Offset)
+ return false;
}
return true;
}
-static SDValue PerformANDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue V0 = Op.getOperand(0);
+ SDValue V1 = Op.getOperand(1);
+ ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
+ if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
+ VT.getVectorElementType() != V1.getValueType().getVectorElementType())
+ return SDValue();
- // We're looking for an SRA/SHL pair which form an SBFX.
+ bool SplitV0 = V0.getValueType().getSizeInBits() == 128;
- if (VT != MVT::i32 && VT != MVT::i64)
+ if (!isConcatMask(Mask, VT, SplitV0))
return SDValue();
- if (!isa<ConstantSDNode>(N->getOperand(1)))
+ EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+ VT.getVectorNumElements() / 2);
+ if (SplitV0) {
+ V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
+ DAG.getConstant(0, MVT::i64));
+ }
+ if (V1.getValueType().getSizeInBits() == 128) {
+ V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
+ DAG.getConstant(0, MVT::i64));
+ }
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
+}
+
+/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
+/// the specified operations to build the shuffle.
+static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
+ SDValue RHS, SelectionDAG &DAG,
+ SDLoc dl) {
+ unsigned OpNum = (PFEntry >> 26) & 0x0F;
+ unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
+ unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
+
+ enum {
+ OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
+ OP_VREV,
+ OP_VDUP0,
+ OP_VDUP1,
+ OP_VDUP2,
+ OP_VDUP3,
+ OP_VEXT1,
+ OP_VEXT2,
+ OP_VEXT3,
+ OP_VUZPL, // VUZP, left result
+ OP_VUZPR, // VUZP, right result
+ OP_VZIPL, // VZIP, left result
+ OP_VZIPR, // VZIP, right result
+ OP_VTRNL, // VTRN, left result
+ OP_VTRNR // VTRN, right result
+ };
+
+ if (OpNum == OP_COPY) {
+ if (LHSID == (1 * 9 + 2) * 9 + 3)
+ return LHS;
+ assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!");
+ return RHS;
+ }
+
+ SDValue OpLHS, OpRHS;
+ OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
+ OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
+ EVT VT = OpLHS.getValueType();
+
+ switch (OpNum) {
+ default:
+ llvm_unreachable("Unknown shuffle opcode!");
+ case OP_VREV:
+ // VREV divides the vector in half and swaps within the half.
+ if (VT.getVectorElementType() == MVT::i32 ||
+ VT.getVectorElementType() == MVT::f32)
+ return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
+ // vrev <4 x i16> -> REV32
+ if (VT.getVectorElementType() == MVT::i16)
+ return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
+ // vrev <4 x i8> -> REV16
+ assert(VT.getVectorElementType() == MVT::i8);
+ return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS);
+ case OP_VDUP0:
+ case OP_VDUP1:
+ case OP_VDUP2:
+ case OP_VDUP3: {
+ EVT EltTy = VT.getVectorElementType();
+ unsigned Opcode;
+ if (EltTy == MVT::i8)
+ Opcode = AArch64ISD::DUPLANE8;
+ else if (EltTy == MVT::i16)
+ Opcode = AArch64ISD::DUPLANE16;
+ else if (EltTy == MVT::i32 || EltTy == MVT::f32)
+ Opcode = AArch64ISD::DUPLANE32;
+ else if (EltTy == MVT::i64 || EltTy == MVT::f64)
+ Opcode = AArch64ISD::DUPLANE64;
+ else
+ llvm_unreachable("Invalid vector element type?");
+
+ if (VT.getSizeInBits() == 64)
+ OpLHS = WidenVector(OpLHS, DAG);
+ SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, MVT::i64);
+ return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
+ }
+ case OP_VEXT1:
+ case OP_VEXT2:
+ case OP_VEXT3: {
+ unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
+ return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
+ DAG.getConstant(Imm, MVT::i32));
+ }
+ case OP_VUZPL:
+ return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
+ OpRHS);
+ case OP_VUZPR:
+ return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS,
+ OpRHS);
+ case OP_VZIPL:
+ return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS,
+ OpRHS);
+ case OP_VZIPR:
+ return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS,
+ OpRHS);
+ case OP_VTRNL:
+ return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS,
+ OpRHS);
+ case OP_VTRNR:
+ return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS,
+ OpRHS);
+ }
+}
+
+static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
+ SelectionDAG &DAG) {
+ // Check to see if we can use the TBL instruction.
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ SDLoc DL(Op);
+
+ EVT EltVT = Op.getValueType().getVectorElementType();
+ unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
+
+ SmallVector<SDValue, 8> TBLMask;
+ for (int Val : ShuffleMask) {
+ for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
+ unsigned Offset = Byte + Val * BytesPerElt;
+ TBLMask.push_back(DAG.getConstant(Offset, MVT::i32));
+ }
+ }
+
+ MVT IndexVT = MVT::v8i8;
+ unsigned IndexLen = 8;
+ if (Op.getValueType().getSizeInBits() == 128) {
+ IndexVT = MVT::v16i8;
+ IndexLen = 16;
+ }
+
+ SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1);
+ SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
+
+ SDValue Shuffle;
+ if (V2.getNode()->getOpcode() == ISD::UNDEF) {
+ if (IndexLen == 8)
+ V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
+ Shuffle = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl1, MVT::i32), V1Cst,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
+ makeArrayRef(TBLMask.data(), IndexLen)));
+ } else {
+ if (IndexLen == 8) {
+ V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
+ Shuffle = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl1, MVT::i32), V1Cst,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
+ makeArrayRef(TBLMask.data(), IndexLen)));
+ } else {
+ // FIXME: We cannot, for the moment, emit a TBL2 instruction because we
+ // cannot currently represent the register constraints on the input
+ // table registers.
+ // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
+ // DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
+ // &TBLMask[0], IndexLen));
+ Shuffle = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl2, MVT::i32), V1Cst, V2Cst,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
+ makeArrayRef(TBLMask.data(), IndexLen)));
+ }
+ }
+ return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
+}
+
+static unsigned getDUPLANEOp(EVT EltType) {
+ if (EltType == MVT::i8)
+ return AArch64ISD::DUPLANE8;
+ if (EltType == MVT::i16)
+ return AArch64ISD::DUPLANE16;
+ if (EltType == MVT::i32 || EltType == MVT::f32)
+ return AArch64ISD::DUPLANE32;
+ if (EltType == MVT::i64 || EltType == MVT::f64)
+ return AArch64ISD::DUPLANE64;
+
+ llvm_unreachable("Invalid vector element type?");
+}
+
+SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
+
+ // Convert shuffles that are directly supported on NEON to target-specific
+ // DAG nodes, instead of keeping them as shuffles and matching them again
+ // during code selection. This is more efficient and avoids the possibility
+ // of inconsistencies between legalization and selection.
+ ArrayRef<int> ShuffleMask = SVN->getMask();
+
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+
+ if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0],
+ V1.getValueType().getSimpleVT())) {
+ int Lane = SVN->getSplatIndex();
+ // If this is undef splat, generate it via "just" vdup, if possible.
+ if (Lane == -1)
+ Lane = 0;
+
+ if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
+ return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(),
+ V1.getOperand(0));
+ // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non-
+ // constant. If so, we can just reference the lane's definition directly.
+ if (V1.getOpcode() == ISD::BUILD_VECTOR &&
+ !isa<ConstantSDNode>(V1.getOperand(Lane)))
+ return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane));
+
+ // Otherwise, duplicate from the lane of the input vector.
+ unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType());
+
+ // SelectionDAGBuilder may have "helpfully" already extracted or conatenated
+ // to make a vector of the same size as this SHUFFLE. We can ignore the
+ // extract entirely, and canonicalise the concat using WidenVector.
+ if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+ Lane += cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
+ V1 = V1.getOperand(0);
+ } else if (V1.getOpcode() == ISD::CONCAT_VECTORS) {
+ unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2;
+ Lane -= Idx * VT.getVectorNumElements() / 2;
+ V1 = WidenVector(V1.getOperand(Idx), DAG);
+ } else if (VT.getSizeInBits() == 64)
+ V1 = WidenVector(V1, DAG);
+
+ return DAG.getNode(Opcode, dl, VT, V1, DAG.getConstant(Lane, MVT::i64));
+ }
+
+ if (isREVMask(ShuffleMask, VT, 64))
+ return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2);
+ if (isREVMask(ShuffleMask, VT, 32))
+ return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2);
+ if (isREVMask(ShuffleMask, VT, 16))
+ return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2);
+
+ bool ReverseEXT = false;
+ unsigned Imm;
+ if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
+ if (ReverseEXT)
+ std::swap(V1, V2);
+ Imm *= getExtFactor(V1);
+ return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
+ DAG.getConstant(Imm, MVT::i32));
+ } else if (V2->getOpcode() == ISD::UNDEF &&
+ isSingletonEXTMask(ShuffleMask, VT, Imm)) {
+ Imm *= getExtFactor(V1);
+ return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
+ DAG.getConstant(Imm, MVT::i32));
+ }
+
+ unsigned WhichResult;
+ if (isZIPMask(ShuffleMask, VT, WhichResult)) {
+ unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
+ return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
+ }
+ if (isUZPMask(ShuffleMask, VT, WhichResult)) {
+ unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
+ return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
+ }
+ if (isTRNMask(ShuffleMask, VT, WhichResult)) {
+ unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
+ return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
+ }
+
+ if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
+ unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
+ return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
+ }
+ if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
+ unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
+ return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
+ }
+ if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
+ unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
+ return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
+ }
+
+ SDValue Concat = tryFormConcatFromShuffle(Op, DAG);
+ if (Concat.getNode())
+ return Concat;
+
+ bool DstIsLeft;
+ int Anomaly;
+ int NumInputElements = V1.getValueType().getVectorNumElements();
+ if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
+ SDValue DstVec = DstIsLeft ? V1 : V2;
+ SDValue DstLaneV = DAG.getConstant(Anomaly, MVT::i64);
+
+ SDValue SrcVec = V1;
+ int SrcLane = ShuffleMask[Anomaly];
+ if (SrcLane >= NumInputElements) {
+ SrcVec = V2;
+ SrcLane -= VT.getVectorNumElements();
+ }
+ SDValue SrcLaneV = DAG.getConstant(SrcLane, MVT::i64);
+
+ EVT ScalarVT = VT.getVectorElementType();
+ if (ScalarVT.getSizeInBits() < 32)
+ ScalarVT = MVT::i32;
+
+ return DAG.getNode(
+ ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV),
+ DstLaneV);
+ }
+
+ // If the shuffle is not directly supported and it has 4 elements, use
+ // the PerfectShuffle-generated table to synthesize it from other shuffles.
+ unsigned NumElts = VT.getVectorNumElements();
+ if (NumElts == 4) {
+ unsigned PFIndexes[4];
+ for (unsigned i = 0; i != 4; ++i) {
+ if (ShuffleMask[i] < 0)
+ PFIndexes[i] = 8;
+ else
+ PFIndexes[i] = ShuffleMask[i];
+ }
+
+ // Compute the index in the perfect shuffle table.
+ unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
+ PFIndexes[2] * 9 + PFIndexes[3];
+ unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
+ unsigned Cost = (PFEntry >> 30);
+
+ if (Cost <= 4)
+ return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
+ }
+
+ return GenerateTBL(Op, ShuffleMask, DAG);
+}
+
+static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
+ APInt &UndefBits) {
+ EVT VT = BVN->getValueType(0);
+ APInt SplatBits, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
+ unsigned NumSplats = VT.getSizeInBits() / SplatBitSize;
+
+ for (unsigned i = 0; i < NumSplats; ++i) {
+ CnstBits <<= SplatBitSize;
+ UndefBits <<= SplatBitSize;
+ CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits());
+ UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits());
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
+ SelectionDAG &DAG) const {
+ BuildVectorSDNode *BVN =
+ dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
+ SDValue LHS = Op.getOperand(0);
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+
+ if (!BVN)
+ return Op;
+
+ APInt CnstBits(VT.getSizeInBits(), 0);
+ APInt UndefBits(VT.getSizeInBits(), 0);
+ if (resolveBuildVector(BVN, CnstBits, UndefBits)) {
+ // We only have BIC vector immediate instruction, which is and-not.
+ CnstBits = ~CnstBits;
+
+ // We make use of a little bit of goto ickiness in order to avoid having to
+ // duplicate the immediate matching logic for the undef toggled case.
+ bool SecondTry = false;
+ AttemptModImm:
+
+ if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
+ CnstBits = CnstBits.zextOrTrunc(64);
+ uint64_t CnstVal = CnstBits.getZExtValue();
+
+ if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(16, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(24, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+ }
+
+ if (SecondTry)
+ goto FailedModImm;
+ SecondTry = true;
+ CnstBits = ~UndefBits;
+ goto AttemptModImm;
+ }
+
+// We can always fall back to a non-immediate AND.
+FailedModImm:
+ return Op;
+}
+
+// Specialized code to quickly find if PotentialBVec is a BuildVector that
+// consists of only the same constant int value, returned in reference arg
+// ConstVal
+static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
+ uint64_t &ConstVal) {
+ BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec);
+ if (!Bvec)
+ return false;
+ ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0));
+ if (!FirstElt)
+ return false;
+ EVT VT = Bvec->getValueType(0);
+ unsigned NumElts = VT.getVectorNumElements();
+ for (unsigned i = 1; i < NumElts; ++i)
+ if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt)
+ return false;
+ ConstVal = FirstElt->getZExtValue();
+ return true;
+}
+
+static unsigned getIntrinsicID(const SDNode *N) {
+ unsigned Opcode = N->getOpcode();
+ switch (Opcode) {
+ default:
+ return Intrinsic::not_intrinsic;
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ if (IID < Intrinsic::num_intrinsics)
+ return IID;
+ return Intrinsic::not_intrinsic;
+ }
+ }
+}
+
+// Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
+// to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
+// BUILD_VECTORs with constant element C1, C2 is a constant, and C1 == ~C2.
+// Also, logical shift right -> sri, with the same structure.
+static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+
+ if (!VT.isVector())
return SDValue();
- uint64_t TruncMask = N->getConstantOperandVal(1);
- if (!isMask_64(TruncMask))
+ SDLoc DL(N);
+
+ // Is the first op an AND?
+ const SDValue And = N->getOperand(0);
+ if (And.getOpcode() != ISD::AND)
return SDValue();
- uint64_t Width = CountPopulation_64(TruncMask);
- SDValue Shift = N->getOperand(0);
+ // Is the second op an shl or lshr?
+ SDValue Shift = N->getOperand(1);
+ // This will have been turned into: AArch64ISD::VSHL vector, #shift
+ // or AArch64ISD::VLSHR vector, #shift
+ unsigned ShiftOpc = Shift.getOpcode();
+ if ((ShiftOpc != AArch64ISD::VSHL && ShiftOpc != AArch64ISD::VLSHR))
+ return SDValue();
+ bool IsShiftRight = ShiftOpc == AArch64ISD::VLSHR;
- if (Shift.getOpcode() != ISD::SRL)
+ // Is the shift amount constant?
+ ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
+ if (!C2node)
return SDValue();
- if (!isa<ConstantSDNode>(Shift->getOperand(1)))
+ // Is the and mask vector all constant?
+ uint64_t C1;
+ if (!isAllConstantBuildVector(And.getOperand(1), C1))
return SDValue();
- uint64_t LSB = Shift->getConstantOperandVal(1);
- if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
+ // Is C1 == ~C2, taking into account how much one can shift elements of a
+ // particular size?
+ uint64_t C2 = C2node->getZExtValue();
+ unsigned ElemSizeInBits = VT.getVectorElementType().getSizeInBits();
+ if (C2 > ElemSizeInBits)
+ return SDValue();
+ unsigned ElemMask = (1 << ElemSizeInBits) - 1;
+ if ((C1 & ElemMask) != (~C2 & ElemMask))
return SDValue();
- return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
- DAG.getConstant(LSB, MVT::i64),
- DAG.getConstant(LSB + Width - 1, MVT::i64));
-}
-
-/// For a true bitfield insert, the bits getting into that contiguous mask
-/// should come from the low part of an existing value: they must be formed from
-/// a compatible SHL operation (unless they're already low). This function
-/// checks that condition and returns the least-significant bit that's
-/// intended. If the operation not a field preparation, -1 is returned.
-static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
- SDValue &MaskedVal, uint64_t Mask) {
- if (!isShiftedMask_64(Mask))
- return -1;
-
- // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
- // instruction. BFI will do a left-shift by LSB before applying the mask we've
- // spotted, so in general we should pre-emptively "undo" that by making sure
- // the incoming bits have had a right-shift applied to them.
- //
- // This right shift, however, will combine with existing left/right shifts. In
- // the simplest case of a completely straight bitfield operation, it will be
- // expected to completely cancel out with an existing SHL. More complicated
- // cases (e.g. bitfield to bitfield copy) may still need a real shift before
- // the BFI.
-
- uint64_t LSB = countTrailingZeros(Mask);
- int64_t ShiftRightRequired = LSB;
- if (MaskedVal.getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
- ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
- MaskedVal = MaskedVal.getOperand(0);
- } else if (MaskedVal.getOpcode() == ISD::SRL &&
- isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
- ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
- MaskedVal = MaskedVal.getOperand(0);
- }
-
- if (ShiftRightRequired > 0)
- MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
- DAG.getConstant(ShiftRightRequired, MVT::i64));
- else if (ShiftRightRequired < 0) {
- // We could actually end up with a residual left shift, for example with
- // "struc.bitfield = val << 1".
- MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
- DAG.getConstant(-ShiftRightRequired, MVT::i64));
- }
-
- return LSB;
-}
-
-/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
-/// a mask and an extension. Returns true if a BFI was found and provides
-/// information on its surroundings.
-static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
- bool &Extended) {
- Extended = false;
- if (N.getOpcode() == ISD::ZERO_EXTEND) {
- Extended = true;
- N = N.getOperand(0);
- }
-
- if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
- Mask = N->getConstantOperandVal(1);
- N = N.getOperand(0);
- } else {
- // Mask is the whole width.
- Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
+ SDValue X = And.getOperand(0);
+ SDValue Y = Shift.getOperand(0);
+
+ unsigned Intrin =
+ IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli;
+ SDValue ResultSLI =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrin, MVT::i32), X, Y, Shift.getOperand(1));
+
+ DEBUG(dbgs() << "aarch64-lower: transformed: \n");
+ DEBUG(N->dump(&DAG));
+ DEBUG(dbgs() << "into: \n");
+ DEBUG(ResultSLI->dump(&DAG));
+
+ ++NumShiftInserts;
+ return ResultSLI;
+}
+
+SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
+ if (EnableAArch64SlrGeneration) {
+ SDValue Res = tryLowerToSLI(Op.getNode(), DAG);
+ if (Res.getNode())
+ return Res;
}
- if (N.getOpcode() == AArch64ISD::BFI) {
- BFI = N;
- return true;
+ BuildVectorSDNode *BVN =
+ dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
+ SDValue LHS = Op.getOperand(1);
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+
+ // OR commutes, so try swapping the operands.
+ if (!BVN) {
+ LHS = Op.getOperand(0);
+ BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
}
+ if (!BVN)
+ return Op;
- return false;
+ APInt CnstBits(VT.getSizeInBits(), 0);
+ APInt UndefBits(VT.getSizeInBits(), 0);
+ if (resolveBuildVector(BVN, CnstBits, UndefBits)) {
+ // We make use of a little bit of goto ickiness in order to avoid having to
+ // duplicate the immediate matching logic for the undef toggled case.
+ bool SecondTry = false;
+ AttemptModImm:
+
+ if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
+ CnstBits = CnstBits.zextOrTrunc(64);
+ uint64_t CnstVal = CnstBits.getZExtValue();
+
+ if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(16, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(24, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+ }
+
+ if (SecondTry)
+ goto FailedModImm;
+ SecondTry = true;
+ CnstBits = UndefBits;
+ goto AttemptModImm;
+ }
+
+// We can always fall back to a non-immediate OR.
+FailedModImm:
+ return Op;
}
-/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
-/// is roughly equivalent to (and (BFI ...), mask). This form is used because it
-/// can often be further combined with a larger mask. Ultimately, we want mask
-/// to be 2^32-1 or 2^64-1 so the AND can be skipped.
-static SDValue tryCombineToBFI(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
+// Normalize the operands of BUILD_VECTOR. The value of constant operands will
+// be truncated to fit element width.
+static SDValue NormalizeBuildVector(SDValue Op,
+ SelectionDAG &DAG) {
+ assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ EVT EltTy= VT.getVectorElementType();
- assert(N->getOpcode() == ISD::OR && "Unexpected root");
+ if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16)
+ return Op;
- // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
- // abandon the effort.
- SDValue LHS = N->getOperand(0);
- if (LHS.getOpcode() != ISD::AND)
+ SmallVector<SDValue, 16> Ops;
+ for (unsigned I = 0, E = VT.getVectorNumElements(); I != E; ++I) {
+ SDValue Lane = Op.getOperand(I);
+ if (Lane.getOpcode() == ISD::Constant) {
+ APInt LowBits(EltTy.getSizeInBits(),
+ cast<ConstantSDNode>(Lane)->getZExtValue());
+ Lane = DAG.getConstant(LowBits.getZExtValue(), MVT::i32);
+ }
+ Ops.push_back(Lane);
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
+}
+
+SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ Op = NormalizeBuildVector(Op, DAG);
+ BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
+
+ APInt CnstBits(VT.getSizeInBits(), 0);
+ APInt UndefBits(VT.getSizeInBits(), 0);
+ if (resolveBuildVector(BVN, CnstBits, UndefBits)) {
+ // We make use of a little bit of goto ickiness in order to avoid having to
+ // duplicate the immediate matching logic for the undef toggled case.
+ bool SecondTry = false;
+ AttemptModImm:
+
+ if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
+ CnstBits = CnstBits.zextOrTrunc(64);
+ uint64_t CnstVal = CnstBits.getZExtValue();
+
+ // Certain magic vector constants (used to express things like NOT
+ // and NEG) are passed through unmodified. This allows codegen patterns
+ // for these operations to match. Special-purpose patterns will lower
+ // these immediates to MOVIs if it proves necessary.
+ if (VT.isInteger() && (CnstVal == 0 || CnstVal == ~0ULL))
+ return Op;
+
+ // The many faces of MOVI...
+ if (AArch64_AM::isAdvSIMDModImmType10(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType10(CnstVal);
+ if (VT.getSizeInBits() == 128) {
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::v2i64,
+ DAG.getConstant(CnstVal, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ // Support the V64 version via subregister insertion.
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::f64,
+ DAG.getConstant(CnstVal, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(16, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(24, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType7(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(264, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType8(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(272, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType9(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType9(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
+ SDValue Mov = DAG.getNode(AArch64ISD::MOVI, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ // The few faces of FMOV...
+ if (AArch64_AM::isAdvSIMDModImmType11(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType11(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4f32 : MVT::v2f32;
+ SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType12(CnstVal) &&
+ VT.getSizeInBits() == 128) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType12(CnstVal);
+ SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MVT::v2f64,
+ DAG.getConstant(CnstVal, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ // The many faces of MVNI...
+ CnstVal = ~CnstVal;
+ if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(16, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(24, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(8, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType7(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(264, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+
+ if (AArch64_AM::isAdvSIMDModImmType8(CnstVal)) {
+ CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal);
+ MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
+ SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy,
+ DAG.getConstant(CnstVal, MVT::i32),
+ DAG.getConstant(272, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Mov);
+ }
+ }
+
+ if (SecondTry)
+ goto FailedModImm;
+ SecondTry = true;
+ CnstBits = UndefBits;
+ goto AttemptModImm;
+ }
+FailedModImm:
+
+ // Scan through the operands to find some interesting properties we can
+ // exploit:
+ // 1) If only one value is used, we can use a DUP, or
+ // 2) if only the low element is not undef, we can just insert that, or
+ // 3) if only one constant value is used (w/ some non-constant lanes),
+ // we can splat the constant value into the whole vector then fill
+ // in the non-constant lanes.
+ // 4) FIXME: If different constant values are used, but we can intelligently
+ // select the values we'll be overwriting for the non-constant
+ // lanes such that we can directly materialize the vector
+ // some other way (MOVI, e.g.), we can be sneaky.
+ unsigned NumElts = VT.getVectorNumElements();
+ bool isOnlyLowElement = true;
+ bool usesOnlyOneValue = true;
+ bool usesOnlyOneConstantValue = true;
+ bool isConstant = true;
+ unsigned NumConstantLanes = 0;
+ SDValue Value;
+ SDValue ConstantValue;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ if (i > 0)
+ isOnlyLowElement = false;
+ if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
+ isConstant = false;
+
+ if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) {
+ ++NumConstantLanes;
+ if (!ConstantValue.getNode())
+ ConstantValue = V;
+ else if (ConstantValue != V)
+ usesOnlyOneConstantValue = false;
+ }
+
+ if (!Value.getNode())
+ Value = V;
+ else if (V != Value)
+ usesOnlyOneValue = false;
+ }
+
+ if (!Value.getNode())
+ return DAG.getUNDEF(VT);
+
+ if (isOnlyLowElement)
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
+
+ // Use DUP for non-constant splats. For f32 constant splats, reduce to
+ // i32 and try again.
+ if (usesOnlyOneValue) {
+ if (!isConstant) {
+ if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ Value.getValueType() != VT)
+ return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
+
+ // This is actually a DUPLANExx operation, which keeps everything vectory.
+
+ // DUPLANE works on 128-bit vectors, widen it if necessary.
+ SDValue Lane = Value.getOperand(1);
+ Value = Value.getOperand(0);
+ if (Value.getValueType().getSizeInBits() == 64)
+ Value = WidenVector(Value, DAG);
+
+ unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
+ return DAG.getNode(Opcode, dl, VT, Value, Lane);
+ }
+
+ if (VT.getVectorElementType().isFloatingPoint()) {
+ SmallVector<SDValue, 8> Ops;
+ MVT NewType =
+ (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
+ for (unsigned i = 0; i < NumElts; ++i)
+ Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
+ SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
+ Val = LowerBUILD_VECTOR(Val, DAG);
+ if (Val.getNode())
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
+ }
+ }
+
+ // If there was only one constant value used and for more than one lane,
+ // start by splatting that value, then replace the non-constant lanes. This
+ // is better than the default, which will perform a separate initialization
+ // for each lane.
+ if (NumConstantLanes > 0 && usesOnlyOneConstantValue) {
+ SDValue Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
+ // Now insert the non-constant lanes.
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
+ if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) {
+ // Note that type legalization likely mucked about with the VT of the
+ // source operand, so we may have to convert it here before inserting.
+ Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx);
+ }
+ }
+ return Val;
+ }
+
+ // If all elements are constants and the case above didn't get hit, fall back
+ // to the default expansion, which will generate a load from the constant
+ // pool.
+ if (isConstant)
return SDValue();
- uint64_t LHSMask;
- if (isa<ConstantSDNode>(LHS.getOperand(1)))
- LHSMask = LHS->getConstantOperandVal(1);
- else
+ // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
+ if (NumElts >= 4) {
+ SDValue shuffle = ReconstructShuffle(Op, DAG);
+ if (shuffle != SDValue())
+ return shuffle;
+ }
+
+ // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
+ // know the default expansion would otherwise fall back on something even
+ // worse. For a vector with one or two non-undef values, that's
+ // scalar_to_vector for the elements followed by a shuffle (provided the
+ // shuffle is valid for the target) and materialization element by element
+ // on the stack followed by a load for everything else.
+ if (!isConstant && !usesOnlyOneValue) {
+ SDValue Vec = DAG.getUNDEF(VT);
+ SDValue Op0 = Op.getOperand(0);
+ unsigned ElemSize = VT.getVectorElementType().getSizeInBits();
+ unsigned i = 0;
+ // For 32 and 64 bit types, use INSERT_SUBREG for lane zero to
+ // a) Avoid a RMW dependency on the full vector register, and
+ // b) Allow the register coalescer to fold away the copy if the
+ // value is already in an S or D register.
+ if (Op0.getOpcode() != ISD::UNDEF && (ElemSize == 32 || ElemSize == 64)) {
+ unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub;
+ MachineSDNode *N =
+ DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, VT, Vec, Op0,
+ DAG.getTargetConstant(SubIdx, MVT::i32));
+ Vec = SDValue(N, 0);
+ ++i;
+ }
+ for (; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
+ }
+ return Vec;
+ }
+
+ // Just use the default expansion. We failed to find a better alternative.
+ return SDValue();
+}
+
+SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
+
+ // Check for non-constant or out of range lane.
+ EVT VT = Op.getOperand(0).getValueType();
+ ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
return SDValue();
- // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
- // is or abandon the effort.
- SDValue RHS = N->getOperand(1);
- if (RHS.getOpcode() != ISD::AND)
+
+ // Insertion/extraction are legal for V128 types.
+ if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
+ VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
+ return Op;
+
+ if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
+ VT != MVT::v1i64 && VT != MVT::v2f32)
return SDValue();
- uint64_t RHSMask;
- if (isa<ConstantSDNode>(RHS.getOperand(1)))
- RHSMask = RHS->getConstantOperandVal(1);
- else
+ // For V64 types, we perform insertion by expanding the value
+ // to a V128 type and perform the insertion on that.
+ SDLoc DL(Op);
+ SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
+ EVT WideTy = WideVec.getValueType();
+
+ SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec,
+ Op.getOperand(1), Op.getOperand(2));
+ // Re-narrow the resultant vector.
+ return NarrowVector(Node, DAG);
+}
+
+SDValue
+AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
+
+ // Check for non-constant or out of range lane.
+ EVT VT = Op.getOperand(0).getValueType();
+ ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
+ return SDValue();
+
+
+ // Insertion/extraction are legal for V128 types.
+ if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
+ VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
+ return Op;
+
+ if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
+ VT != MVT::v1i64 && VT != MVT::v2f32)
return SDValue();
- // Can't do anything if the masks are incompatible.
- if (LHSMask & RHSMask)
+ // For V64 types, we perform extraction by expanding the value
+ // to a V128 type and perform the extraction on that.
+ SDLoc DL(Op);
+ SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
+ EVT WideTy = WideVec.getValueType();
+
+ EVT ExtrTy = WideTy.getVectorElementType();
+ if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8)
+ ExtrTy = MVT::i32;
+
+ // For extractions, we just return the result directly.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec,
+ Op.getOperand(1));
+}
+
+SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getOperand(0).getValueType();
+ SDLoc dl(Op);
+ // Just in case...
+ if (!VT.isVector())
return SDValue();
- // Now we need one of the masks to be a contiguous field. Without loss of
- // generality that should be the RHS one.
- SDValue Bitfield = LHS.getOperand(0);
- if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
- // We know that LHS is a candidate new value, and RHS isn't already a better
- // one.
- std::swap(LHS, RHS);
- std::swap(LHSMask, RHSMask);
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ if (!Cst)
+ return SDValue();
+ unsigned Val = Cst->getZExtValue();
+
+ unsigned Size = Op.getValueType().getSizeInBits();
+ if (Val == 0) {
+ switch (Size) {
+ case 8:
+ return DAG.getTargetExtractSubreg(AArch64::bsub, dl, Op.getValueType(),
+ Op.getOperand(0));
+ case 16:
+ return DAG.getTargetExtractSubreg(AArch64::hsub, dl, Op.getValueType(),
+ Op.getOperand(0));
+ case 32:
+ return DAG.getTargetExtractSubreg(AArch64::ssub, dl, Op.getValueType(),
+ Op.getOperand(0));
+ case 64:
+ return DAG.getTargetExtractSubreg(AArch64::dsub, dl, Op.getValueType(),
+ Op.getOperand(0));
+ default:
+ llvm_unreachable("Unexpected vector type in extract_subvector!");
+ }
+ }
+ // If this is extracting the upper 64-bits of a 128-bit vector, we match
+ // that directly.
+ if (Size == 64 && Val * VT.getVectorElementType().getSizeInBits() == 64)
+ return Op;
+
+ return SDValue();
+}
+
+bool AArch64TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
+ EVT VT) const {
+ if (VT.getVectorNumElements() == 4 &&
+ (VT.is128BitVector() || VT.is64BitVector())) {
+ unsigned PFIndexes[4];
+ for (unsigned i = 0; i != 4; ++i) {
+ if (M[i] < 0)
+ PFIndexes[i] = 8;
+ else
+ PFIndexes[i] = M[i];
+ }
+
+ // Compute the index in the perfect shuffle table.
+ unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
+ PFIndexes[2] * 9 + PFIndexes[3];
+ unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
+ unsigned Cost = (PFEntry >> 30);
+
+ if (Cost <= 4)
+ return true;
}
- // We've done our best to put the right operands in the right places, all we
- // can do now is check whether a BFI exists.
- Bitfield = RHS.getOperand(0);
- int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
- if (LSB == -1)
+ bool DummyBool;
+ int DummyInt;
+ unsigned DummyUnsigned;
+
+ return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
+ isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
+ isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
+ // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
+ isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
+ isZIPMask(M, VT, DummyUnsigned) ||
+ isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
+ isUZP_v_undef_Mask(M, VT, DummyUnsigned) ||
+ isZIP_v_undef_Mask(M, VT, DummyUnsigned) ||
+ isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) ||
+ isConcatMask(M, VT, VT.getSizeInBits() == 128));
+}
+
+/// getVShiftImm - Check if this is a valid build_vector for the immediate
+/// operand of a vector shift operation, where all the elements of the
+/// build_vector must have the same constant integer value.
+static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
+ // Ignore bit_converts.
+ while (Op.getOpcode() == ISD::BITCAST)
+ Op = Op.getOperand(0);
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
+ APInt SplatBits, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
+ HasAnyUndefs, ElementBits) ||
+ SplatBitSize > ElementBits)
+ return false;
+ Cnt = SplatBits.getSExtValue();
+ return true;
+}
+
+/// isVShiftLImm - Check if this is a valid build_vector for the immediate
+/// operand of a vector shift left operation. That value must be in the range:
+/// 0 <= Value < ElementBits for a left shift; or
+/// 0 <= Value <= ElementBits for a long left shift.
+static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
+ assert(VT.isVector() && "vector shift count is not a vector type");
+ unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
+ if (!getVShiftImm(Op, ElementBits, Cnt))
+ return false;
+ return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
+}
+
+/// isVShiftRImm - Check if this is a valid build_vector for the immediate
+/// operand of a vector shift right operation. For a shift opcode, the value
+/// is positive, but for an intrinsic the value count must be negative. The
+/// absolute value must be in the range:
+/// 1 <= |Value| <= ElementBits for a right shift; or
+/// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
+static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
+ int64_t &Cnt) {
+ assert(VT.isVector() && "vector shift count is not a vector type");
+ unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
+ if (!getVShiftImm(Op, ElementBits, Cnt))
+ return false;
+ if (isIntrinsic)
+ Cnt = -Cnt;
+ return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
+}
+
+SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ int64_t Cnt;
+
+ if (!Op.getOperand(1).getValueType().isVector())
+ return Op;
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("unexpected shift opcode");
+
+ case ISD::SHL:
+ if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
+ return DAG.getNode(AArch64ISD::VSHL, SDLoc(Op), VT, Op.getOperand(0),
+ DAG.getConstant(Cnt, MVT::i32));
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::aarch64_neon_ushl, MVT::i32),
+ Op.getOperand(0), Op.getOperand(1));
+ case ISD::SRA:
+ case ISD::SRL:
+ // Right shift immediate
+ if (isVShiftRImm(Op.getOperand(1), VT, false, false, Cnt) &&
+ Cnt < EltSize) {
+ unsigned Opc =
+ (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
+ return DAG.getNode(Opc, SDLoc(Op), VT, Op.getOperand(0),
+ DAG.getConstant(Cnt, MVT::i32));
+ }
+
+ // Right shift register. Note, there is not a shift right register
+ // instruction, but the shift left register instruction takes a signed
+ // value, where negative numbers specify a right shift.
+ unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl
+ : Intrinsic::aarch64_neon_ushl;
+ // negate the shift amount
+ SDValue NegShift = DAG.getNode(AArch64ISD::NEG, DL, VT, Op.getOperand(1));
+ SDValue NegShiftLeft =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Opc, MVT::i32), Op.getOperand(0), NegShift);
+ return NegShiftLeft;
+ }
+
+ return SDValue();
+}
+
+static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
+ AArch64CC::CondCode CC, bool NoNans, EVT VT,
+ SDLoc dl, SelectionDAG &DAG) {
+ EVT SrcVT = LHS.getValueType();
+
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
+ APInt CnstBits(VT.getSizeInBits(), 0);
+ APInt UndefBits(VT.getSizeInBits(), 0);
+ bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits);
+ bool IsZero = IsCnst && (CnstBits == 0);
+
+ if (SrcVT.getVectorElementType().isFloatingPoint()) {
+ switch (CC) {
+ default:
+ return SDValue();
+ case AArch64CC::NE: {
+ SDValue Fcmeq;
+ if (IsZero)
+ Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
+ else
+ Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
+ return DAG.getNode(AArch64ISD::NOT, dl, VT, Fcmeq);
+ }
+ case AArch64CC::EQ:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
+ case AArch64CC::GE:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS);
+ case AArch64CC::GT:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS);
+ case AArch64CC::LS:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS);
+ case AArch64CC::LT:
+ if (!NoNans)
+ return SDValue();
+ // If we ignore NaNs then we can use to the MI implementation.
+ // Fallthrough.
+ case AArch64CC::MI:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS);
+ }
+ }
+
+ switch (CC) {
+ default:
+ return SDValue();
+ case AArch64CC::NE: {
+ SDValue Cmeq;
+ if (IsZero)
+ Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
+ else
+ Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
+ return DAG.getNode(AArch64ISD::NOT, dl, VT, Cmeq);
+ }
+ case AArch64CC::EQ:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
+ case AArch64CC::GE:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS);
+ case AArch64CC::GT:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS);
+ case AArch64CC::LE:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS);
+ case AArch64CC::LS:
+ return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS);
+ case AArch64CC::LO:
+ return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS);
+ case AArch64CC::LT:
+ if (IsZero)
+ return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS);
+ return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS);
+ case AArch64CC::HI:
+ return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS);
+ case AArch64CC::HS:
+ return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS);
+ }
+}
+
+SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
+ SelectionDAG &DAG) const {
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDLoc dl(Op);
+
+ if (LHS.getValueType().getVectorElementType().isInteger()) {
+ assert(LHS.getValueType() == RHS.getValueType());
+ AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
+ return EmitVectorComparison(LHS, RHS, AArch64CC, false, Op.getValueType(),
+ dl, DAG);
+ }
+
+ assert(LHS.getValueType().getVectorElementType() == MVT::f32 ||
+ LHS.getValueType().getVectorElementType() == MVT::f64);
+
+ // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
+ // clean. Some of them require two branches to implement.
+ AArch64CC::CondCode CC1, CC2;
+ bool ShouldInvert;
+ changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert);
+
+ bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath;
+ SDValue Cmp =
+ EmitVectorComparison(LHS, RHS, CC1, NoNaNs, Op.getValueType(), dl, DAG);
+ if (!Cmp.getNode())
return SDValue();
- uint32_t Width = CountPopulation_64(RHSMask);
- assert(Width && "Expected non-zero bitfield width");
+ if (CC2 != AArch64CC::AL) {
+ SDValue Cmp2 =
+ EmitVectorComparison(LHS, RHS, CC2, NoNaNs, Op.getValueType(), dl, DAG);
+ if (!Cmp2.getNode())
+ return SDValue();
- SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
- LHS.getOperand(0), Bitfield,
- DAG.getConstant(LSB, MVT::i64),
- DAG.getConstant(Width, MVT::i64));
+ Cmp = DAG.getNode(ISD::OR, dl, Cmp.getValueType(), Cmp, Cmp2);
+ }
- // Mask is trivial
- if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
- return BFI;
+ if (ShouldInvert)
+ return Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
- return DAG.getNode(ISD::AND, DL, VT, BFI,
- DAG.getConstant(LHSMask | RHSMask, VT));
+ return Cmp;
}
-/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
-/// original input. This is surprisingly common because SROA splits things up
-/// into i8 chunks, so the originally detected MaskedBFI may actually only act
-/// on the low (say) byte of a word. This is then orred into the rest of the
-/// word afterwards.
-///
-/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
-///
-/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
-/// MaskedBFI. We can also deal with a certain amount of extend/truncate being
-/// involved.
-static SDValue tryCombineToLargerBFI(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
+/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
+/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
+/// specified in the intrinsic calls.
+bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ case Intrinsic::aarch64_neon_ld2:
+ case Intrinsic::aarch64_neon_ld3:
+ case Intrinsic::aarch64_neon_ld4:
+ case Intrinsic::aarch64_neon_ld1x2:
+ case Intrinsic::aarch64_neon_ld1x3:
+ case Intrinsic::aarch64_neon_ld1x4:
+ case Intrinsic::aarch64_neon_ld2lane:
+ case Intrinsic::aarch64_neon_ld3lane:
+ case Intrinsic::aarch64_neon_ld4lane:
+ case Intrinsic::aarch64_neon_ld2r:
+ case Intrinsic::aarch64_neon_ld3r:
+ case Intrinsic::aarch64_neon_ld4r: {
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ // Conservatively set memVT to the entire set of vectors loaded.
+ uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
+ Info.offset = 0;
+ Info.align = 0;
+ Info.vol = false; // volatile loads with NEON intrinsics not supported
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::aarch64_neon_st2:
+ case Intrinsic::aarch64_neon_st3:
+ case Intrinsic::aarch64_neon_st4:
+ case Intrinsic::aarch64_neon_st1x2:
+ case Intrinsic::aarch64_neon_st1x3:
+ case Intrinsic::aarch64_neon_st1x4:
+ case Intrinsic::aarch64_neon_st2lane:
+ case Intrinsic::aarch64_neon_st3lane:
+ case Intrinsic::aarch64_neon_st4lane: {
+ Info.opc = ISD::INTRINSIC_VOID;
+ // Conservatively set memVT to the entire set of vectors stored.
+ unsigned NumElts = 0;
+ for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
+ Type *ArgTy = I.getArgOperand(ArgI)->getType();
+ if (!ArgTy->isVectorTy())
+ break;
+ NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
+ }
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
+ Info.offset = 0;
+ Info.align = 0;
+ Info.vol = false; // volatile stores with NEON intrinsics not supported
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr: {
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
+ Info.vol = true;
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::aarch64_stlxr:
+ case Intrinsic::aarch64_stxr: {
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(1);
+ Info.offset = 0;
+ Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
+ Info.vol = true;
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
+ case Intrinsic::aarch64_ldaxp:
+ case Intrinsic::aarch64_ldxp: {
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i128;
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = 16;
+ Info.vol = true;
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::aarch64_stlxp:
+ case Intrinsic::aarch64_stxp: {
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::i128;
+ Info.ptrVal = I.getArgOperand(2);
+ Info.offset = 0;
+ Info.align = 16;
+ Info.vol = true;
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
+ default:
+ break;
+ }
+
+ return false;
+}
+
+// Truncations from 64-bit GPR to 32-bit GPR is free.
+bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+ unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
+ unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
+ return NumBits1 > NumBits2;
+}
+bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
+ if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
+ return false;
+ unsigned NumBits1 = VT1.getSizeInBits();
+ unsigned NumBits2 = VT2.getSizeInBits();
+ return NumBits1 > NumBits2;
+}
+
+// All 32-bit GPR operations implicitly zero the high-half of the corresponding
+// 64-bit GPR.
+bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+ unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
+ unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
+ return NumBits1 == 32 && NumBits2 == 64;
+}
+bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
+ if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
+ return false;
+ unsigned NumBits1 = VT1.getSizeInBits();
+ unsigned NumBits2 = VT2.getSizeInBits();
+ return NumBits1 == 32 && NumBits2 == 64;
+}
+
+bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ EVT VT1 = Val.getValueType();
+ if (isZExtFree(VT1, VT2)) {
+ return true;
+ }
+
+ if (Val.getOpcode() != ISD::LOAD)
+ return false;
+
+ // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
+ return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() &&
+ VT2.isSimple() && !VT2.isVector() && VT2.isInteger() &&
+ VT1.getSizeInBits() <= 32);
+}
+
+bool AArch64TargetLowering::hasPairedLoad(Type *LoadedType,
+ unsigned &RequiredAligment) const {
+ if (!LoadedType->isIntegerTy() && !LoadedType->isFloatTy())
+ return false;
+ // Cyclone supports unaligned accesses.
+ RequiredAligment = 0;
+ unsigned NumBits = LoadedType->getPrimitiveSizeInBits();
+ return NumBits == 32 || NumBits == 64;
+}
+
+bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
+ unsigned &RequiredAligment) const {
+ if (!LoadedType.isSimple() ||
+ (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
+ return false;
+ // Cyclone supports unaligned accesses.
+ RequiredAligment = 0;
+ unsigned NumBits = LoadedType.getSizeInBits();
+ return NumBits == 32 || NumBits == 64;
+}
+
+static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
+ unsigned AlignCheck) {
+ return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
+ (DstAlign == 0 || DstAlign % AlignCheck == 0));
+}
+
+EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
+ unsigned SrcAlign, bool IsMemset,
+ bool ZeroMemset,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const {
+ // Don't use AdvSIMD to implement 16-byte memset. It would have taken one
+ // instruction to materialize the v2i64 zero and one store (with restrictive
+ // addressing mode). Just do two i64 store of zero-registers.
+ bool Fast;
+ const Function *F = MF.getFunction();
+ if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 &&
+ !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoImplicitFloat) &&
+ (memOpAlign(SrcAlign, DstAlign, 16) ||
+ (allowsUnalignedMemoryAccesses(MVT::f128, 0, &Fast) && Fast)))
+ return MVT::f128;
+
+ return Size >= 8 ? MVT::i64 : MVT::i32;
+}
+
+// 12-bit optionally shifted immediates are legal for adds.
+bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
+ if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0))
+ return true;
+ return false;
+}
+
+// Integer comparisons are implemented with ADDS/SUBS, so the range of valid
+// immediates is the same as for an add or a sub.
+bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
+ if (Immed < 0)
+ Immed *= -1;
+ return isLegalAddImmediate(Immed);
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented
+/// by AM is legal for this target, for a load/store of the specified type.
+bool AArch64TargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ // AArch64 has five basic addressing modes:
+ // reg
+ // reg + 9-bit signed offset
+ // reg + SIZE_IN_BYTES * 12-bit unsigned offset
+ // reg1 + reg2
+ // reg + SIZE_IN_BYTES * reg
+
+ // No global is ever allowed as a base.
+ if (AM.BaseGV)
+ return false;
+
+ // No reg+reg+imm addressing.
+ if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
+ return false;
+
+ // check reg + imm case:
+ // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
+ uint64_t NumBytes = 0;
+ if (Ty->isSized()) {
+ uint64_t NumBits = getDataLayout()->getTypeSizeInBits(Ty);
+ NumBytes = NumBits / 8;
+ if (!isPowerOf2_64(NumBits))
+ NumBytes = 0;
+ }
+
+ if (!AM.Scale) {
+ int64_t Offset = AM.BaseOffs;
+
+ // 9-bit signed offset
+ if (Offset >= -(1LL << 9) && Offset <= (1LL << 9) - 1)
+ return true;
+
+ // 12-bit unsigned offset
+ unsigned shift = Log2_64(NumBytes);
+ if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
+ // Must be a multiple of NumBytes (NumBytes is a power of 2)
+ (Offset >> shift) << shift == Offset)
+ return true;
+ return false;
+ }
+
+ // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
+
+ if (!AM.Scale || AM.Scale == 1 ||
+ (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes))
+ return true;
+ return false;
+}
+
+int AArch64TargetLowering::getScalingFactorCost(const AddrMode &AM,
+ Type *Ty) const {
+ // Scaling factors are not free at all.
+ // Operands | Rt Latency
+ // -------------------------------------------
+ // Rt, [Xn, Xm] | 4
+ // -------------------------------------------
+ // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5
+ // Rt, [Xn, Wm, <extend> #imm] |
+ if (isLegalAddressingMode(AM, Ty))
+ // Scale represents reg2 * scale, thus account for 1 if
+ // it is not equal to 0 or 1.
+ return AM.Scale != 0 && AM.Scale != 1;
+ return -1;
+}
+
+bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ case MVT::f64:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+const MCPhysReg *
+AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
+ // LR is a callee-save register, but we must treat it as clobbered by any call
+ // site. Hence we include LR in the scratch registers, which are in turn added
+ // as implicit-defs for stackmaps and patchpoints.
+ static const MCPhysReg ScratchRegs[] = {
+ AArch64::X16, AArch64::X17, AArch64::LR, 0
+ };
+ return ScratchRegs;
+}
+
+bool
+AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N) const {
EVT VT = N->getValueType(0);
+ // If N is unsigned bit extraction: ((x >> C) & mask), then do not combine
+ // it with shift to let it be lowered to UBFX.
+ if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
+ isa<ConstantSDNode>(N->getOperand(1))) {
+ uint64_t TruncMask = N->getConstantOperandVal(1);
+ if (isMask_64(TruncMask) &&
+ N->getOperand(0).getOpcode() == ISD::SRL &&
+ isa<ConstantSDNode>(N->getOperand(0)->getOperand(1)))
+ return false;
+ }
+ return true;
+}
- // First job is to hunt for a MaskedBFI on either the left or right. Swap
- // operands if it's actually on the right.
- SDValue BFI;
- SDValue PossExtraMask;
- uint64_t ExistingMask = 0;
- bool Extended = false;
- if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
- PossExtraMask = N->getOperand(1);
- else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
- PossExtraMask = N->getOperand(0);
- else
+bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0)
+ return false;
+
+ int64_t Val = Imm.getSExtValue();
+ if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize))
+ return true;
+
+ if ((int64_t)Val < 0)
+ Val = ~Val;
+ if (BitSize == 32)
+ Val &= (1LL << 32) - 1;
+
+ unsigned LZ = countLeadingZeros((uint64_t)Val);
+ unsigned Shift = (63 - LZ) / 16;
+ // MOVZ is free so return true for one or fewer MOVK.
+ return (Shift < 3) ? true : false;
+}
+
+// Generate SUBS and CSEL for integer abs.
+static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDLoc DL(N);
+
+ // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
+ // and change it to SUB and CSEL.
+ if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
+ N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 &&
+ N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0))
+ if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
+ if (Y1C->getAPIntValue() == VT.getSizeInBits() - 1) {
+ SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
+ N0.getOperand(0));
+ // Generate SUBS & CSEL.
+ SDValue Cmp =
+ DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
+ N0.getOperand(0), DAG.getConstant(0, VT));
+ return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0.getOperand(0), Neg,
+ DAG.getConstant(AArch64CC::PL, MVT::i32),
+ SDValue(Cmp.getNode(), 1));
+ }
+ return SDValue();
+}
+
+// performXorCombine - Attempts to handle integer ABS.
+static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *Subtarget) {
+ if (DCI.isBeforeLegalizeOps())
return SDValue();
- // We can only combine a BFI with another compatible mask.
- if (PossExtraMask.getOpcode() != ISD::AND ||
- !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
+ return performIntegerAbsCombine(N, DAG);
+}
+
+static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *Subtarget) {
+ if (DCI.isBeforeLegalizeOps())
return SDValue();
- uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
+ // Multiplication of a power of two plus/minus one can be done more
+ // cheaply as as shift+add/sub. For now, this is true unilaterally. If
+ // future CPUs have a cheaper MADD instruction, this may need to be
+ // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
+ // 64-bit is 5 cycles, so this is always a win.
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
+ APInt Value = C->getAPIntValue();
+ EVT VT = N->getValueType(0);
+ if (Value.isNonNegative()) {
+ // (mul x, 2^N + 1) => (add (shl x, N), x)
+ APInt VM1 = Value - 1;
+ if (VM1.isPowerOf2()) {
+ SDValue ShiftedVal =
+ DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
+ DAG.getConstant(VM1.logBase2(), MVT::i64));
+ return DAG.getNode(ISD::ADD, SDLoc(N), VT, ShiftedVal,
+ N->getOperand(0));
+ }
+ // (mul x, 2^N - 1) => (sub (shl x, N), x)
+ APInt VP1 = Value + 1;
+ if (VP1.isPowerOf2()) {
+ SDValue ShiftedVal =
+ DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
+ DAG.getConstant(VP1.logBase2(), MVT::i64));
+ return DAG.getNode(ISD::SUB, SDLoc(N), VT, ShiftedVal,
+ N->getOperand(0));
+ }
+ } else {
+ // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
+ APInt VNM1 = -Value - 1;
+ if (VNM1.isPowerOf2()) {
+ SDValue ShiftedVal =
+ DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
+ DAG.getConstant(VNM1.logBase2(), MVT::i64));
+ SDValue Add =
+ DAG.getNode(ISD::ADD, SDLoc(N), VT, ShiftedVal, N->getOperand(0));
+ return DAG.getNode(ISD::SUB, SDLoc(N), VT, DAG.getConstant(0, VT), Add);
+ }
+ // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
+ APInt VNP1 = -Value + 1;
+ if (VNP1.isPowerOf2()) {
+ SDValue ShiftedVal =
+ DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
+ DAG.getConstant(VNP1.logBase2(), MVT::i64));
+ return DAG.getNode(ISD::SUB, SDLoc(N), VT, N->getOperand(0),
+ ShiftedVal);
+ }
+ }
+ }
+ return SDValue();
+}
- // Masks must be compatible.
- if (ExtraMask & ExistingMask)
+static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
+ SelectionDAG &DAG) {
+ // Take advantage of vector comparisons producing 0 or -1 in each lane to
+ // optimize away operation when it's from a constant.
+ //
+ // The general transformation is:
+ // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
+ // AND(VECTOR_CMP(x,y), constant2)
+ // constant2 = UNARYOP(constant)
+
+ // Early exit if this isn't a vector operation, the operand of the
+ // unary operation isn't a bitwise AND, or if the sizes of the operations
+ // aren't the same.
+ EVT VT = N->getValueType(0);
+ if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
+ N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
+ VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
return SDValue();
- SDValue OldBFIVal = BFI.getOperand(0);
- SDValue NewBFIVal = BFI.getOperand(1);
- if (Extended) {
- // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
- // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
- // need to be made compatible.
- assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
- && "Invalid types for BFI");
- OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
- NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
+ // Now check that the other operand of the AND is a constant splat. We could
+ // make the transformation for non-constant splats as well, but it's unclear
+ // that would be a benefit as it would not eliminate any operations, just
+ // perform one more step in scalar code before moving to the vector unit.
+ if (BuildVectorSDNode *BV =
+ dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
+ // Bail out if the vector isn't a constant splat.
+ if (!BV->getConstantSplatNode())
+ return SDValue();
+
+ // Everything checks out. Build up the new and improved node.
+ SDLoc DL(N);
+ EVT IntVT = BV->getValueType(0);
+ // Create a new constant of the appropriate type for the transformed
+ // DAG.
+ SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
+ // The AND node needs bitcasts to/from an integer vector type around it.
+ SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
+ SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
+ N->getOperand(0)->getOperand(0), MaskConst);
+ SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
+ return Res;
}
- // We need the MaskedBFI to be combined with a mask of the *same* value.
- if (PossExtraMask.getOperand(0) != OldBFIVal)
+ return SDValue();
+}
+
+static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG) {
+ // First try to optimize away the conversion when it's conditionally from
+ // a constant. Vectors only.
+ SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
+ if (Res != SDValue())
+ return Res;
+
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::f32 && VT != MVT::f64)
return SDValue();
- BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
- OldBFIVal, NewBFIVal,
- BFI.getOperand(2), BFI.getOperand(3));
+ // Only optimize when the source and destination types have the same width.
+ if (VT.getSizeInBits() != N->getOperand(0).getValueType().getSizeInBits())
+ return SDValue();
- // If the masking is trivial, we don't need to create it.
- if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
- return BFI;
+ // If the result of an integer load is only used by an integer-to-float
+ // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
+ // This eliminates an "integer-to-vector-move UOP and improve throughput.
+ SDValue N0 = N->getOperand(0);
+ if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
+ // Do not change the width of a volatile load.
+ !cast<LoadSDNode>(N0)->isVolatile()) {
+ LoadSDNode *LN0 = cast<LoadSDNode>(N0);
+ SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
+ LN0->getPointerInfo(), LN0->isVolatile(),
+ LN0->isNonTemporal(), LN0->isInvariant(),
+ LN0->getAlignment());
+
+ // Make sure successors of the original load stay after it by updating them
+ // to use the new Chain.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1));
+
+ unsigned Opcode =
+ (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF;
+ return DAG.getNode(Opcode, SDLoc(N), VT, Load);
+ }
- return DAG.getNode(ISD::AND, DL, VT, BFI,
- DAG.getConstant(ExtraMask | ExistingMask, VT));
+ return SDValue();
}
/// An EXTR instruction is made up of two shifts, ORed together. This helper
@@ -3351,237 +6586,952 @@ static SDValue tryCombineToEXTR(SDNode *N,
std::swap(ShiftLHS, ShiftRHS);
}
- return DAG.getNode(AArch64ISD::EXTR, DL, VT,
- LHS, RHS,
+ return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
DAG.getConstant(ShiftRHS, MVT::i64));
}
-/// Target-specific dag combine xforms for ISD::OR
-static SDValue PerformORCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *Subtarget) {
-
+static SDValue tryCombineToBSL(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(N);
+
+ if (!VT.isVector())
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+ if (N0.getOpcode() != ISD::AND)
+ return SDValue();
+
+ SDValue N1 = N->getOperand(1);
+ if (N1.getOpcode() != ISD::AND)
+ return SDValue();
+
+ // We only have to look for constant vectors here since the general, variable
+ // case can be handled in TableGen.
+ unsigned Bits = VT.getVectorElementType().getSizeInBits();
+ uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
+ for (int i = 1; i >= 0; --i)
+ for (int j = 1; j >= 0; --j) {
+ BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
+ BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
+ if (!BVN0 || !BVN1)
+ continue;
+
+ bool FoundMatch = true;
+ for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
+ ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
+ ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
+ if (!CN0 || !CN1 ||
+ CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
+ FoundMatch = false;
+ break;
+ }
+ }
+
+ if (FoundMatch)
+ return DAG.getNode(AArch64ISD::BSL, DL, VT, SDValue(BVN0, 0),
+ N0->getOperand(1 - i), N1->getOperand(1 - j));
+ }
+
+ return SDValue();
+}
+
+static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *Subtarget) {
+ // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
+ if (!EnableAArch64ExtrGeneration)
+ return SDValue();
+ SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
- if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
- // Attempt to recognise bitfield-insert operations.
- SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
+ SDValue Res = tryCombineToEXTR(N, DCI);
if (Res.getNode())
return Res;
- // Attempt to combine an existing MaskedBFI operation into one with a larger
- // mask.
- Res = tryCombineToLargerBFI(N, DCI, Subtarget);
+ Res = tryCombineToBSL(N, DCI);
if (Res.getNode())
return Res;
- Res = tryCombineToEXTR(N, DCI);
- if (Res.getNode())
- return Res;
+ return SDValue();
+}
- if (!Subtarget->hasNEON())
+static SDValue performBitcastCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ // Wait 'til after everything is legalized to try this. That way we have
+ // legal vector types and such.
+ if (DCI.isBeforeLegalizeOps())
return SDValue();
- // Attempt to use vector immediate-form BSL
- // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
+ // Remove extraneous bitcasts around an extract_subvector.
+ // For example,
+ // (v4i16 (bitconvert
+ // (extract_subvector (v2i64 (bitconvert (v8i16 ...)), (i64 1)))))
+ // becomes
+ // (extract_subvector ((v8i16 ...), (i64 4)))
- SDValue N0 = N->getOperand(0);
- if (N0.getOpcode() != ISD::AND)
+ // Only interested in 64-bit vectors as the ultimate result.
+ EVT VT = N->getValueType(0);
+ if (!VT.isVector())
return SDValue();
-
- SDValue N1 = N->getOperand(1);
- if (N1.getOpcode() != ISD::AND)
+ if (VT.getSimpleVT().getSizeInBits() != 64)
return SDValue();
-
- if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
- APInt SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
- APInt SplatBits0;
- if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
- HasAnyUndefs) &&
- !HasAnyUndefs) {
- BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
- APInt SplatBits1;
- if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
- HasAnyUndefs) &&
- !HasAnyUndefs && SplatBits0 == ~SplatBits1) {
- // Canonicalize the vector type to make instruction selection simpler.
- EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8;
- SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT,
- N0->getOperand(1), N0->getOperand(0),
- N1->getOperand(0));
- return DAG.getNode(ISD::BITCAST, DL, VT, Result);
- }
- }
+ // Is the operand an extract_subvector starting at the beginning or halfway
+ // point of the vector? A low half may also come through as an
+ // EXTRACT_SUBREG, so look for that, too.
+ SDValue Op0 = N->getOperand(0);
+ if (Op0->getOpcode() != ISD::EXTRACT_SUBVECTOR &&
+ !(Op0->isMachineOpcode() &&
+ Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG))
+ return SDValue();
+ uint64_t idx = cast<ConstantSDNode>(Op0->getOperand(1))->getZExtValue();
+ if (Op0->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+ if (Op0->getValueType(0).getVectorNumElements() != idx && idx != 0)
+ return SDValue();
+ } else if (Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG) {
+ if (idx != AArch64::dsub)
+ return SDValue();
+ // The dsub reference is equivalent to a lane zero subvector reference.
+ idx = 0;
}
+ // Look through the bitcast of the input to the extract.
+ if (Op0->getOperand(0)->getOpcode() != ISD::BITCAST)
+ return SDValue();
+ SDValue Source = Op0->getOperand(0)->getOperand(0);
+ // If the source type has twice the number of elements as our destination
+ // type, we know this is an extract of the high or low half of the vector.
+ EVT SVT = Source->getValueType(0);
+ if (SVT.getVectorNumElements() != VT.getVectorNumElements() * 2)
+ return SDValue();
- return SDValue();
+ DEBUG(dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n");
+
+ // Create the simplified form to just extract the low or high half of the
+ // vector directly rather than bothering with the bitcasts.
+ SDLoc dl(N);
+ unsigned NumElements = VT.getVectorNumElements();
+ if (idx) {
+ SDValue HalfIdx = DAG.getConstant(NumElements, MVT::i64);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Source, HalfIdx);
+ } else {
+ SDValue SubReg = DAG.getTargetConstant(AArch64::dsub, MVT::i32);
+ return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, VT,
+ Source, SubReg),
+ 0);
+ }
}
-/// Target-specific dag combine xforms for ISD::SRA
-static SDValue PerformSRACombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue performConcatVectorsCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ // Wait 'til after everything is legalized to try this. That way we have
+ // legal vector types and such.
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
- // We're looking for an SRA/SHL pair which form an SBFX.
+ // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector
+ // splat. The indexed instructions are going to be expecting a DUPLANE64, so
+ // canonicalise to that.
+ if (N->getOperand(0) == N->getOperand(1) && VT.getVectorNumElements() == 2) {
+ assert(VT.getVectorElementType().getSizeInBits() == 64);
+ return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT,
+ WidenVector(N->getOperand(0), DAG),
+ DAG.getConstant(0, MVT::i64));
+ }
- if (VT != MVT::i32 && VT != MVT::i64)
+ // Canonicalise concat_vectors so that the right-hand vector has as few
+ // bit-casts as possible before its real operation. The primary matching
+ // destination for these operations will be the narrowing "2" instructions,
+ // which depend on the operation being performed on this right-hand vector.
+ // For example,
+ // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS))))
+ // becomes
+ // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS))
+
+ SDValue Op1 = N->getOperand(1);
+ if (Op1->getOpcode() != ISD::BITCAST)
return SDValue();
+ SDValue RHS = Op1->getOperand(0);
+ MVT RHSTy = RHS.getValueType().getSimpleVT();
+ // If the RHS is not a vector, this is not the pattern we're looking for.
+ if (!RHSTy.isVector())
+ return SDValue();
+
+ DEBUG(dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
+
+ MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
+ RHSTy.getVectorNumElements() * 2);
+ return DAG.getNode(
+ ISD::BITCAST, dl, VT,
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy,
+ DAG.getNode(ISD::BITCAST, dl, RHSTy, N->getOperand(0)), RHS));
+}
- if (!isa<ConstantSDNode>(N->getOperand(1)))
+static SDValue tryCombineFixedPointConvert(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ // Wait 'til after everything is legalized to try this. That way we have
+ // legal vector types and such.
+ if (DCI.isBeforeLegalizeOps())
return SDValue();
+ // Transform a scalar conversion of a value from a lane extract into a
+ // lane extract of a vector conversion. E.g., from foo1 to foo2:
+ // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); }
+ // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; }
+ //
+ // The second form interacts better with instruction selection and the
+ // register allocator to avoid cross-class register copies that aren't
+ // coalescable due to a lane reference.
+
+ // Check the operand and see if it originates from a lane extract.
+ SDValue Op1 = N->getOperand(1);
+ if (Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ // Yep, no additional predication needed. Perform the transform.
+ SDValue IID = N->getOperand(0);
+ SDValue Shift = N->getOperand(2);
+ SDValue Vec = Op1.getOperand(0);
+ SDValue Lane = Op1.getOperand(1);
+ EVT ResTy = N->getValueType(0);
+ EVT VecResTy;
+ SDLoc DL(N);
+
+ // The vector width should be 128 bits by the time we get here, even
+ // if it started as 64 bits (the extract_vector handling will have
+ // done so).
+ assert(Vec.getValueType().getSizeInBits() == 128 &&
+ "unexpected vector size on extract_vector_elt!");
+ if (Vec.getValueType() == MVT::v4i32)
+ VecResTy = MVT::v4f32;
+ else if (Vec.getValueType() == MVT::v2i64)
+ VecResTy = MVT::v2f64;
+ else
+ llvm_unreachable("unexpected vector type!");
- uint64_t ExtraSignBits = N->getConstantOperandVal(1);
- SDValue Shift = N->getOperand(0);
+ SDValue Convert =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
+ }
+ return SDValue();
+}
- if (Shift.getOpcode() != ISD::SHL)
+// AArch64 high-vector "long" operations are formed by performing the non-high
+// version on an extract_subvector of each operand which gets the high half:
+//
+// (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS))
+//
+// However, there are cases which don't have an extract_high explicitly, but
+// have another operation that can be made compatible with one for free. For
+// example:
+//
+// (dupv64 scalar) --> (extract_high (dup128 scalar))
+//
+// This routine does the actual conversion of such DUPs, once outer routines
+// have determined that everything else is in order.
+static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
+ // We can handle most types of duplicate, but the lane ones have an extra
+ // operand saying *which* lane, so we need to know.
+ bool IsDUPLANE;
+ switch (N.getOpcode()) {
+ case AArch64ISD::DUP:
+ IsDUPLANE = false;
+ break;
+ case AArch64ISD::DUPLANE8:
+ case AArch64ISD::DUPLANE16:
+ case AArch64ISD::DUPLANE32:
+ case AArch64ISD::DUPLANE64:
+ IsDUPLANE = true;
+ break;
+ default:
return SDValue();
+ }
- if (!isa<ConstantSDNode>(Shift->getOperand(1)))
+ MVT NarrowTy = N.getSimpleValueType();
+ if (!NarrowTy.is64BitVector())
return SDValue();
- uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
- uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
- uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
+ MVT ElementTy = NarrowTy.getVectorElementType();
+ unsigned NumElems = NarrowTy.getVectorNumElements();
+ MVT NewDUPVT = MVT::getVectorVT(ElementTy, NumElems * 2);
- if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
- return SDValue();
+ SDValue NewDUP;
+ if (IsDUPLANE)
+ NewDUP = DAG.getNode(N.getOpcode(), SDLoc(N), NewDUPVT, N.getOperand(0),
+ N.getOperand(1));
+ else
+ NewDUP = DAG.getNode(AArch64ISD::DUP, SDLoc(N), NewDUPVT, N.getOperand(0));
- return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
- DAG.getConstant(LSB, MVT::i64),
- DAG.getConstant(LSB + Width - 1, MVT::i64));
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N.getNode()), NarrowTy,
+ NewDUP, DAG.getConstant(NumElems, MVT::i64));
}
-/// Check if this is a valid build_vector for the immediate operand of
-/// a vector shift operation, where all the elements of the build_vector
-/// must have the same constant integer value.
-static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
- // Ignore bit_converts.
- while (Op.getOpcode() == ISD::BITCAST)
- Op = Op.getOperand(0);
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
- HasAnyUndefs, ElementBits) ||
- SplatBitSize > ElementBits)
- return false;
- Cnt = SplatBits.getSExtValue();
- return true;
+static bool isEssentiallyExtractSubvector(SDValue N) {
+ if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR)
+ return true;
+
+ return N.getOpcode() == ISD::BITCAST &&
+ N.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR;
}
-/// Check if this is a valid build_vector for the immediate operand of
-/// a vector shift left operation. That value must be in the range:
-/// 0 <= Value < ElementBits
-static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
- assert(VT.isVector() && "vector shift count is not a vector type");
- unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
- if (!getVShiftImm(Op, ElementBits, Cnt))
+/// \brief Helper structure to keep track of ISD::SET_CC operands.
+struct GenericSetCCInfo {
+ const SDValue *Opnd0;
+ const SDValue *Opnd1;
+ ISD::CondCode CC;
+};
+
+/// \brief Helper structure to keep track of a SET_CC lowered into AArch64 code.
+struct AArch64SetCCInfo {
+ const SDValue *Cmp;
+ AArch64CC::CondCode CC;
+};
+
+/// \brief Helper structure to keep track of SetCC information.
+union SetCCInfo {
+ GenericSetCCInfo Generic;
+ AArch64SetCCInfo AArch64;
+};
+
+/// \brief Helper structure to be able to read SetCC information. If set to
+/// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
+/// GenericSetCCInfo.
+struct SetCCInfoAndKind {
+ SetCCInfo Info;
+ bool IsAArch64;
+};
+
+/// \brief Check whether or not \p Op is a SET_CC operation, either a generic or
+/// an
+/// AArch64 lowered one.
+/// \p SetCCInfo is filled accordingly.
+/// \post SetCCInfo is meanginfull only when this function returns true.
+/// \return True when Op is a kind of SET_CC operation.
+static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
+ // If this is a setcc, this is straight forward.
+ if (Op.getOpcode() == ISD::SETCC) {
+ SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0);
+ SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1);
+ SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SetCCInfo.IsAArch64 = false;
+ return true;
+ }
+ // Otherwise, check if this is a matching csel instruction.
+ // In other words:
+ // - csel 1, 0, cc
+ // - csel 0, 1, !cc
+ if (Op.getOpcode() != AArch64ISD::CSEL)
+ return false;
+ // Set the information about the operands.
+ // TODO: we want the operands of the Cmp not the csel
+ SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
+ SetCCInfo.IsAArch64 = true;
+ SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
+ cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
+
+ // Check that the operands matches the constraints:
+ // (1) Both operands must be constants.
+ // (2) One must be 1 and the other must be 0.
+ ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0));
+ ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+
+ // Check (1).
+ if (!TValue || !FValue)
return false;
- return (Cnt >= 0 && Cnt < ElementBits);
+
+ // Check (2).
+ if (!TValue->isOne()) {
+ // Update the comparison when we are interested in !cc.
+ std::swap(TValue, FValue);
+ SetCCInfo.Info.AArch64.CC =
+ AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC);
+ }
+ return TValue->isOne() && FValue->isNullValue();
}
-/// Check if this is a valid build_vector for the immediate operand of a
-/// vector shift right operation. The value must be in the range:
-/// 1 <= Value <= ElementBits
-static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
- assert(VT.isVector() && "vector shift count is not a vector type");
- unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
- if (!getVShiftImm(Op, ElementBits, Cnt))
- return false;
- return (Cnt >= 1 && Cnt <= ElementBits);
+// Returns true if Op is setcc or zext of setcc.
+static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {
+ if (isSetCC(Op, Info))
+ return true;
+ return ((Op.getOpcode() == ISD::ZERO_EXTEND) &&
+ isSetCC(Op->getOperand(0), Info));
}
-/// Checks for immediate versions of vector shifts and lowers them.
-static SDValue PerformShiftCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const AArch64Subtarget *ST) {
- SelectionDAG &DAG = DCI.DAG;
- EVT VT = N->getValueType(0);
- if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
- return PerformSRACombine(N, DCI);
+// The folding we want to perform is:
+// (add x, [zext] (setcc cc ...) )
+// -->
+// (csel x, (add x, 1), !cc ...)
+//
+// The latter will get matched to a CSINC instruction.
+static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
+ assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!");
+ SDValue LHS = Op->getOperand(0);
+ SDValue RHS = Op->getOperand(1);
+ SetCCInfoAndKind InfoAndKind;
+
+ // If neither operand is a SET_CC, give up.
+ if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) {
+ std::swap(LHS, RHS);
+ if (!isSetCCOrZExtSetCC(LHS, InfoAndKind))
+ return SDValue();
+ }
- // Nothing to be done for scalar shifts.
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (!VT.isVector() || !TLI.isTypeLegal(VT))
+ // FIXME: This could be generatized to work for FP comparisons.
+ EVT CmpVT = InfoAndKind.IsAArch64
+ ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
+ : InfoAndKind.Info.Generic.Opnd0->getValueType();
+ if (CmpVT != MVT::i32 && CmpVT != MVT::i64)
return SDValue();
- assert(ST->hasNEON() && "unexpected vector shift");
- int64_t Cnt;
+ SDValue CCVal;
+ SDValue Cmp;
+ SDLoc dl(Op);
+ if (InfoAndKind.IsAArch64) {
+ CCVal = DAG.getConstant(
+ AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), MVT::i32);
+ Cmp = *InfoAndKind.Info.AArch64.Cmp;
+ } else
+ Cmp = getAArch64Cmp(*InfoAndKind.Info.Generic.Opnd0,
+ *InfoAndKind.Info.Generic.Opnd1,
+ ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, true),
+ CCVal, DAG, dl);
+
+ EVT VT = Op->getValueType(0);
+ LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, VT));
+ return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
+}
- switch (N->getOpcode()) {
- default:
- llvm_unreachable("unexpected shift opcode");
+// The basic add/sub long vector instructions have variants with "2" on the end
+// which act on the high-half of their inputs. They are normally matched by
+// patterns like:
+//
+// (add (zeroext (extract_high LHS)),
+// (zeroext (extract_high RHS)))
+// -> uaddl2 vD, vN, vM
+//
+// However, if one of the extracts is something like a duplicate, this
+// instruction can still be used profitably. This function puts the DAG into a
+// more appropriate form for those patterns to trigger.
+static SDValue performAddSubLongCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
- case ISD::SHL:
- if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
- SDValue RHS =
- DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
- DAG.getConstant(Cnt, MVT::i32));
- return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
- }
- break;
+ MVT VT = N->getSimpleValueType(0);
+ if (!VT.is128BitVector()) {
+ if (N->getOpcode() == ISD::ADD)
+ return performSetccAddFolding(N, DAG);
+ return SDValue();
+ }
- case ISD::SRA:
- case ISD::SRL:
- if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
- SDValue RHS =
- DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
- DAG.getConstant(Cnt, MVT::i32));
- return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
- }
+ // Make sure both branches are extended in the same way.
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ if ((LHS.getOpcode() != ISD::ZERO_EXTEND &&
+ LHS.getOpcode() != ISD::SIGN_EXTEND) ||
+ LHS.getOpcode() != RHS.getOpcode())
+ return SDValue();
+
+ unsigned ExtType = LHS.getOpcode();
+
+ // It's not worth doing if at least one of the inputs isn't already an
+ // extract, but we don't know which it'll be so we have to try both.
+ if (isEssentiallyExtractSubvector(LHS.getOperand(0))) {
+ RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG);
+ if (!RHS.getNode())
+ return SDValue();
+
+ RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS);
+ } else if (isEssentiallyExtractSubvector(RHS.getOperand(0))) {
+ LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG);
+ if (!LHS.getNode())
+ return SDValue();
+
+ LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS);
+ }
+
+ return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS);
+}
+
+// Massage DAGs which we can use the high-half "long" operations on into
+// something isel will recognize better. E.g.
+//
+// (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) -->
+// (aarch64_neon_umull (extract_high (v2i64 vec)))
+// (extract_high (v2i64 (dup128 scalar)))))
+//
+static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ assert(LHS.getValueType().is64BitVector() &&
+ RHS.getValueType().is64BitVector() &&
+ "unexpected shape for long operation");
+
+ // Either node could be a DUP, but it's not worth doing both of them (you'd
+ // just as well use the non-high version) so look for a corresponding extract
+ // operation on the other "wing".
+ if (isEssentiallyExtractSubvector(LHS)) {
+ RHS = tryExtendDUPToExtractHigh(RHS, DAG);
+ if (!RHS.getNode())
+ return SDValue();
+ } else if (isEssentiallyExtractSubvector(RHS)) {
+ LHS = tryExtendDUPToExtractHigh(LHS, DAG);
+ if (!LHS.getNode())
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0),
+ N->getOperand(0), LHS, RHS);
+}
+
+static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
+ MVT ElemTy = N->getSimpleValueType(0).getScalarType();
+ unsigned ElemBits = ElemTy.getSizeInBits();
+
+ int64_t ShiftAmount;
+ if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) {
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, ElemBits) ||
+ SplatBitSize != ElemBits)
+ return SDValue();
+
+ ShiftAmount = SplatValue.getSExtValue();
+ } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
+ ShiftAmount = CVN->getSExtValue();
+ } else
+ return SDValue();
+
+ unsigned Opcode;
+ bool IsRightShift;
+ switch (IID) {
+ default:
+ llvm_unreachable("Unknown shift intrinsic");
+ case Intrinsic::aarch64_neon_sqshl:
+ Opcode = AArch64ISD::SQSHL_I;
+ IsRightShift = false;
+ break;
+ case Intrinsic::aarch64_neon_uqshl:
+ Opcode = AArch64ISD::UQSHL_I;
+ IsRightShift = false;
+ break;
+ case Intrinsic::aarch64_neon_srshl:
+ Opcode = AArch64ISD::SRSHR_I;
+ IsRightShift = true;
+ break;
+ case Intrinsic::aarch64_neon_urshl:
+ Opcode = AArch64ISD::URSHR_I;
+ IsRightShift = true;
+ break;
+ case Intrinsic::aarch64_neon_sqshlu:
+ Opcode = AArch64ISD::SQSHLU_I;
+ IsRightShift = false;
break;
}
+ if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits)
+ return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), N->getOperand(1),
+ DAG.getConstant(-ShiftAmount, MVT::i32));
+ else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits)
+ return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), N->getOperand(1),
+ DAG.getConstant(ShiftAmount, MVT::i32));
+
return SDValue();
}
-/// ARM-specific DAG combining for intrinsics.
-static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+// The CRC32[BH] instructions ignore the high bits of their data operand. Since
+// the intrinsics must be legal and take an i32, this means there's almost
+// certainly going to be a zext in the DAG which we can eliminate.
+static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
+ SDValue AndN = N->getOperand(2);
+ if (AndN.getOpcode() != ISD::AND)
+ return SDValue();
+
+ ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1));
+ if (!CMask || CMask->getZExtValue() != Mask)
+ return SDValue();
+
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32,
+ N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
+}
- switch (IntNo) {
+static SDValue performIntrinsicCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const AArch64Subtarget *Subtarget) {
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned IID = getIntrinsicID(N);
+ switch (IID) {
default:
- // Don't do anything for most intrinsics.
break;
+ case Intrinsic::aarch64_neon_vcvtfxs2fp:
+ case Intrinsic::aarch64_neon_vcvtfxu2fp:
+ return tryCombineFixedPointConvert(N, DCI, DAG);
+ break;
+ case Intrinsic::aarch64_neon_fmax:
+ return DAG.getNode(AArch64ISD::FMAX, SDLoc(N), N->getValueType(0),
+ N->getOperand(1), N->getOperand(2));
+ case Intrinsic::aarch64_neon_fmin:
+ return DAG.getNode(AArch64ISD::FMIN, SDLoc(N), N->getValueType(0),
+ N->getOperand(1), N->getOperand(2));
+ case Intrinsic::aarch64_neon_smull:
+ case Intrinsic::aarch64_neon_umull:
+ case Intrinsic::aarch64_neon_pmull:
+ case Intrinsic::aarch64_neon_sqdmull:
+ return tryCombineLongOpWithDup(IID, N, DCI, DAG);
+ case Intrinsic::aarch64_neon_sqshl:
+ case Intrinsic::aarch64_neon_uqshl:
+ case Intrinsic::aarch64_neon_sqshlu:
+ case Intrinsic::aarch64_neon_srshl:
+ case Intrinsic::aarch64_neon_urshl:
+ return tryCombineShiftImm(IID, N, DAG);
+ case Intrinsic::aarch64_crc32b:
+ case Intrinsic::aarch64_crc32cb:
+ return tryCombineCRC32(0xff, N, DAG);
+ case Intrinsic::aarch64_crc32h:
+ case Intrinsic::aarch64_crc32ch:
+ return tryCombineCRC32(0xffff, N, DAG);
+ }
+ return SDValue();
+}
- case Intrinsic::arm_neon_vqshifts:
- case Intrinsic::arm_neon_vqshiftu:
- EVT VT = N->getOperand(1).getValueType();
- int64_t Cnt;
- if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
- break;
- unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
- ? AArch64ISD::NEON_QSHLs
- : AArch64ISD::NEON_QSHLu;
- return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
- N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
+static SDValue performExtendCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then
+ // we can convert that DUP into another extract_high (of a bigger DUP), which
+ // helps the backend to decide that an sabdl2 would be useful, saving a real
+ // extract_high operation.
+ if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
+ N->getOperand(0).getOpcode() == ISD::INTRINSIC_WO_CHAIN) {
+ SDNode *ABDNode = N->getOperand(0).getNode();
+ unsigned IID = getIntrinsicID(ABDNode);
+ if (IID == Intrinsic::aarch64_neon_sabd ||
+ IID == Intrinsic::aarch64_neon_uabd) {
+ SDValue NewABD = tryCombineLongOpWithDup(IID, ABDNode, DCI, DAG);
+ if (!NewABD.getNode())
+ return SDValue();
+
+ return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0),
+ NewABD);
+ }
}
+ // This is effectively a custom type legalization for AArch64.
+ //
+ // Type legalization will split an extend of a small, legal, type to a larger
+ // illegal type by first splitting the destination type, often creating
+ // illegal source types, which then get legalized in isel-confusing ways,
+ // leading to really terrible codegen. E.g.,
+ // %result = v8i32 sext v8i8 %value
+ // becomes
+ // %losrc = extract_subreg %value, ...
+ // %hisrc = extract_subreg %value, ...
+ // %lo = v4i32 sext v4i8 %losrc
+ // %hi = v4i32 sext v4i8 %hisrc
+ // Things go rapidly downhill from there.
+ //
+ // For AArch64, the [sz]ext vector instructions can only go up one element
+ // size, so we can, e.g., extend from i8 to i16, but to go from i8 to i32
+ // take two instructions.
+ //
+ // This implies that the most efficient way to do the extend from v8i8
+ // to two v4i32 values is to first extend the v8i8 to v8i16, then do
+ // the normal splitting to happen for the v8i16->v8i32.
+
+ // This is pre-legalization to catch some cases where the default
+ // type legalization will create ill-tempered code.
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ // We're only interested in cleaning things up for non-legal vector types
+ // here. If both the source and destination are legal, things will just
+ // work naturally without any fiddling.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT ResVT = N->getValueType(0);
+ if (!ResVT.isVector() || TLI.isTypeLegal(ResVT))
+ return SDValue();
+ // If the vector type isn't a simple VT, it's beyond the scope of what
+ // we're worried about here. Let legalization do its thing and hope for
+ // the best.
+ if (!ResVT.isSimple())
+ return SDValue();
+
+ SDValue Src = N->getOperand(0);
+ MVT SrcVT = Src->getValueType(0).getSimpleVT();
+ // If the source VT is a 64-bit vector, we can play games and get the
+ // better results we want.
+ if (SrcVT.getSizeInBits() != 64)
+ return SDValue();
+
+ unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits();
+ unsigned ElementCount = SrcVT.getVectorNumElements();
+ SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount);
+ SDLoc DL(N);
+ Src = DAG.getNode(N->getOpcode(), DL, SrcVT, Src);
+
+ // Now split the rest of the operation into two halves, each with a 64
+ // bit source.
+ EVT LoVT, HiVT;
+ SDValue Lo, Hi;
+ unsigned NumElements = ResVT.getVectorNumElements();
+ assert(!(NumElements & 1) && "Splitting vector, but not in half!");
+ LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(),
+ ResVT.getVectorElementType(), NumElements / 2);
+
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(),
+ LoVT.getVectorNumElements());
+ Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src,
+ DAG.getIntPtrConstant(0));
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src,
+ DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ Lo = DAG.getNode(N->getOpcode(), DL, LoVT, Lo);
+ Hi = DAG.getNode(N->getOpcode(), DL, HiVT, Hi);
+
+ // Now combine the parts back together so we still have a single result
+ // like the combiner expects.
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi);
+}
+
+/// Replace a splat of a scalar to a vector store by scalar stores of the scalar
+/// value. The load store optimizer pass will merge them to store pair stores.
+/// This has better performance than a splat of the scalar followed by a split
+/// vector store. Even if the stores are not merged it is four stores vs a dup,
+/// followed by an ext.b and two stores.
+static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) {
+ SDValue StVal = St->getValue();
+ EVT VT = StVal.getValueType();
+
+ // Don't replace floating point stores, they possibly won't be transformed to
+ // stp because of the store pair suppress pass.
+ if (VT.isFloatingPoint())
+ return SDValue();
+
+ // Check for insert vector elements.
+ if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT)
+ return SDValue();
+
+ // We can express a splat as store pair(s) for 2 or 4 elements.
+ unsigned NumVecElts = VT.getVectorNumElements();
+ if (NumVecElts != 4 && NumVecElts != 2)
+ return SDValue();
+ SDValue SplatVal = StVal.getOperand(1);
+ unsigned RemainInsertElts = NumVecElts - 1;
+
+ // Check that this is a splat.
+ while (--RemainInsertElts) {
+ SDValue NextInsertElt = StVal.getOperand(0);
+ if (NextInsertElt.getOpcode() != ISD::INSERT_VECTOR_ELT)
+ return SDValue();
+ if (NextInsertElt.getOperand(1) != SplatVal)
+ return SDValue();
+ StVal = NextInsertElt;
+ }
+ unsigned OrigAlignment = St->getAlignment();
+ unsigned EltOffset = NumVecElts == 4 ? 4 : 8;
+ unsigned Alignment = std::min(OrigAlignment, EltOffset);
+
+ // Create scalar stores. This is at least as good as the code sequence for a
+ // split unaligned store wich is a dup.s, ext.b, and two stores.
+ // Most of the time the three stores should be replaced by store pair
+ // instructions (stp).
+ SDLoc DL(St);
+ SDValue BasePtr = St->getBasePtr();
+ SDValue NewST1 =
+ DAG.getStore(St->getChain(), DL, SplatVal, BasePtr, St->getPointerInfo(),
+ St->isVolatile(), St->isNonTemporal(), St->getAlignment());
+
+ unsigned Offset = EltOffset;
+ while (--NumVecElts) {
+ SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
+ DAG.getConstant(Offset, MVT::i64));
+ NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
+ St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(), Alignment);
+ Offset += EltOffset;
+ }
+ return NewST1;
+}
+
+static SDValue performSTORECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
+ if (!DCI.isBeforeLegalize())
+ return SDValue();
+
+ StoreSDNode *S = cast<StoreSDNode>(N);
+ if (S->isVolatile())
+ return SDValue();
+
+ // Cyclone has bad performance on unaligned 16B stores when crossing line and
+ // page boundries. We want to split such stores.
+ if (!Subtarget->isCyclone())
+ return SDValue();
+
+ // Don't split at Oz.
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool IsMinSize = MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize);
+ if (IsMinSize)
+ return SDValue();
+
+ SDValue StVal = S->getValue();
+ EVT VT = StVal.getValueType();
+
+ // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
+ // those up regresses performance on micro-benchmarks and olden/bh.
+ if (!VT.isVector() || VT.getVectorNumElements() < 2 || VT == MVT::v2i64)
+ return SDValue();
+
+ // Split unaligned 16B stores. They are terrible for performance.
+ // Don't split stores with alignment of 1 or 2. Code that uses clang vector
+ // extensions can use this to mark that it does not want splitting to happen
+ // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of
+ // eliminating alignment hazards is only 1 in 8 for alignment of 2.
+ if (VT.getSizeInBits() != 128 || S->getAlignment() >= 16 ||
+ S->getAlignment() <= 2)
+ return SDValue();
+
+ // If we get a splat of a scalar convert this vector store to a store of
+ // scalars. They will be merged into store pairs thereby removing two
+ // instructions.
+ SDValue ReplacedSplat = replaceSplatVectorStore(DAG, S);
+ if (ReplacedSplat != SDValue())
+ return ReplacedSplat;
+
+ SDLoc DL(S);
+ unsigned NumElts = VT.getVectorNumElements() / 2;
+ // Split VT into two.
+ EVT HalfVT =
+ EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), NumElts);
+ SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
+ DAG.getIntPtrConstant(0));
+ SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
+ DAG.getIntPtrConstant(NumElts));
+ SDValue BasePtr = S->getBasePtr();
+ SDValue NewST1 =
+ DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
+ S->isVolatile(), S->isNonTemporal(), S->getAlignment());
+ SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
+ DAG.getConstant(8, MVT::i64));
+ return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
+ S->getPointerInfo(), S->isVolatile(), S->isNonTemporal(),
+ S->getAlignment());
+}
+
+/// Target-specific DAG combine function for post-increment LD1 (lane) and
+/// post-increment LD1R.
+static SDValue performPostLD1Combine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ bool IsLaneOp) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+
+ unsigned LoadIdx = IsLaneOp ? 1 : 0;
+ SDNode *LD = N->getOperand(LoadIdx).getNode();
+ // If it is not LOAD, can not do such combine.
+ if (LD->getOpcode() != ISD::LOAD)
+ return SDValue();
+
+ LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
+ EVT MemVT = LoadSDN->getMemoryVT();
+ // Check if memory operand is the same type as the vector element.
+ if (MemVT != VT.getVectorElementType())
+ return SDValue();
+
+ // Check if there are other uses. If so, do not combine as it will introduce
+ // an extra load.
+ for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE;
+ ++UI) {
+ if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result.
+ continue;
+ if (*UI != N)
+ return SDValue();
+ }
+
+ SDValue Addr = LD->getOperand(1);
+ SDValue Vector = N->getOperand(0);
+ // Search for a use of the address operand that is an increment.
+ for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE =
+ Addr.getNode()->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::ADD
+ || UI.getUse().getResNo() != Addr.getResNo())
+ continue;
+
+ // Check that the add is independent of the load. Otherwise, folding it
+ // would create a cycle.
+ if (User->isPredecessorOf(LD) || LD->isPredecessorOf(User))
+ continue;
+ // Also check that add is not used in the vector operand. This would also
+ // create a cycle.
+ if (User->isPredecessorOf(Vector.getNode()))
+ continue;
+
+ // If the increment is a constant, it must match the memory ref size.
+ SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
+ if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
+ uint32_t IncVal = CInc->getZExtValue();
+ unsigned NumBytes = VT.getScalarSizeInBits() / 8;
+ if (IncVal != NumBytes)
+ continue;
+ Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
+ }
+
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(LD->getOperand(0)); // Chain
+ if (IsLaneOp) {
+ Ops.push_back(Vector); // The vector to be inserted
+ Ops.push_back(N->getOperand(2)); // The lane to be inserted in the vector
+ }
+ Ops.push_back(Addr);
+ Ops.push_back(Inc);
+
+ EVT Tys[3] = { VT, MVT::i64, MVT::Other };
+ SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, 3));
+ unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost;
+ SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops,
+ MemVT,
+ LoadSDN->getMemOperand());
+
+ // Update the uses.
+ std::vector<SDValue> NewResults;
+ NewResults.push_back(SDValue(LD, 0)); // The result of load
+ NewResults.push_back(SDValue(UpdN.getNode(), 2)); // Chain
+ DCI.CombineTo(LD, NewResults);
+ DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result
+ DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register
+
+ break;
+ }
return SDValue();
}
/// Target-specific DAG combine function for NEON load/store intrinsics
/// to merge base address updates.
-static SDValue CombineBaseUpdate(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue performNEONPostLDSTCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
- SelectionDAG &DAG = DCI.DAG;
- bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
- N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
- unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
+ unsigned AddrOpIdx = N->getNumOperands() - 1;
SDValue Addr = N->getOperand(AddrOpIdx);
// Search for a use of the address operand that is an increment.
@@ -3598,106 +7548,96 @@ static SDValue CombineBaseUpdate(SDNode *N,
continue;
// Find the new opcode for the updating load/store.
- bool isLoad = true;
- bool isLaneOp = false;
+ bool IsStore = false;
+ bool IsLaneOp = false;
+ bool IsDupOp = false;
unsigned NewOpc = 0;
unsigned NumVecs = 0;
- if (isIntrinsic) {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- switch (IntNo) {
- default: llvm_unreachable("unexpected intrinsic for Neon base update");
- case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD;
- NumVecs = 1; break;
- case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD;
- NumVecs = 2; break;
- case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD;
- NumVecs = 3; break;
- case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD;
- NumVecs = 4; break;
- case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD;
- NumVecs = 1; isLoad = false; break;
- case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD;
- NumVecs = 2; isLoad = false; break;
- case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD;
- NumVecs = 3; isLoad = false; break;
- case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD;
- NumVecs = 4; isLoad = false; break;
- case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
- NumVecs = 2; break;
- case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
- NumVecs = 3; break;
- case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
- NumVecs = 4; break;
- case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
- NumVecs = 2; isLoad = false; break;
- case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
- NumVecs = 3; isLoad = false; break;
- case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
- NumVecs = 4; isLoad = false; break;
- case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD;
- NumVecs = 2; isLaneOp = true; break;
- case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD;
- NumVecs = 3; isLaneOp = true; break;
- case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD;
- NumVecs = 4; isLaneOp = true; break;
- case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD;
- NumVecs = 2; isLoad = false; isLaneOp = true; break;
- case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD;
- NumVecs = 3; isLoad = false; isLaneOp = true; break;
- case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD;
- NumVecs = 4; isLoad = false; isLaneOp = true; break;
- }
- } else {
- isLaneOp = true;
- switch (N->getOpcode()) {
- default: llvm_unreachable("unexpected opcode for Neon base update");
- case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD;
- NumVecs = 2; break;
- case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD;
- NumVecs = 3; break;
- case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD;
- NumVecs = 4; break;
- }
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default: llvm_unreachable("unexpected intrinsic for Neon base update");
+ case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post;
+ NumVecs = 2; break;
+ case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post;
+ NumVecs = 3; break;
+ case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post;
+ NumVecs = 4; break;
+ case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post;
+ NumVecs = 2; IsStore = true; break;
+ case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post;
+ NumVecs = 3; IsStore = true; break;
+ case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post;
+ NumVecs = 4; IsStore = true; break;
+ case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post;
+ NumVecs = 2; break;
+ case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post;
+ NumVecs = 3; break;
+ case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post;
+ NumVecs = 4; break;
+ case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post;
+ NumVecs = 2; IsStore = true; break;
+ case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post;
+ NumVecs = 3; IsStore = true; break;
+ case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post;
+ NumVecs = 4; IsStore = true; break;
+ case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost;
+ NumVecs = 2; IsDupOp = true; break;
+ case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost;
+ NumVecs = 3; IsDupOp = true; break;
+ case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost;
+ NumVecs = 4; IsDupOp = true; break;
+ case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost;
+ NumVecs = 2; IsLaneOp = true; break;
+ case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost;
+ NumVecs = 3; IsLaneOp = true; break;
+ case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost;
+ NumVecs = 4; IsLaneOp = true; break;
+ case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost;
+ NumVecs = 2; IsStore = true; IsLaneOp = true; break;
+ case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost;
+ NumVecs = 3; IsStore = true; IsLaneOp = true; break;
+ case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost;
+ NumVecs = 4; IsStore = true; IsLaneOp = true; break;
}
- // Find the size of memory referenced by the load/store.
EVT VecTy;
- if (isLoad)
- VecTy = N->getValueType(0);
+ if (IsStore)
+ VecTy = N->getOperand(2).getValueType();
else
- VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
- unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
- if (isLaneOp)
- NumBytes /= VecTy.getVectorNumElements();
+ VecTy = N->getValueType(0);
// If the increment is a constant, it must match the memory ref size.
SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
uint32_t IncVal = CInc->getZExtValue();
+ unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
+ if (IsLaneOp || IsDupOp)
+ NumBytes /= VecTy.getVectorNumElements();
if (IncVal != NumBytes)
continue;
- Inc = DAG.getTargetConstant(IncVal, MVT::i32);
+ Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
}
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(N->getOperand(0)); // Incoming chain
+ // Load lane and store have vector list as input.
+ if (IsLaneOp || IsStore)
+ for (unsigned i = 2; i < AddrOpIdx; ++i)
+ Ops.push_back(N->getOperand(i));
+ Ops.push_back(Addr); // Base register
+ Ops.push_back(Inc);
- // Create the new updating load/store node.
+ // Return Types.
EVT Tys[6];
- unsigned NumResultVecs = (isLoad ? NumVecs : 0);
+ unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
unsigned n;
for (n = 0; n < NumResultVecs; ++n)
Tys[n] = VecTy;
- Tys[n++] = MVT::i64;
- Tys[n] = MVT::Other;
- SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2);
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(N->getOperand(0)); // incoming chain
- Ops.push_back(N->getOperand(AddrOpIdx));
- Ops.push_back(Inc);
- for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
- Ops.push_back(N->getOperand(i));
- }
+ Tys[n++] = MVT::i64; // Type of write back register
+ Tys[n] = MVT::Other; // Type of the chain
+ SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, NumResultVecs + 2));
+
MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
- SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
- Ops.data(), Ops.size(),
+ SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops,
MemInt->getMemoryVT(),
MemInt->getMemOperand());
@@ -3706,7 +7646,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
for (unsigned i = 0; i < NumResultVecs; ++i) {
NewResults.push_back(SDValue(UpdN.getNode(), i));
}
- NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
+ NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1));
DCI.CombineTo(N, NewResults);
DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
@@ -3715,881 +7655,441 @@ static SDValue CombineBaseUpdate(SDNode *N,
return SDValue();
}
-/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1)
-/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs.
-/// If so, combine them to a vldN-dup operation and return true.
-static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
- SelectionDAG &DAG = DCI.DAG;
- EVT VT = N->getValueType(0);
-
- // Check if the VDUPLANE operand is a vldN-dup intrinsic.
- SDNode *VLD = N->getOperand(0).getNode();
- if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
+// Optimize compare with zero and branch.
+static SDValue performBRCONDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
+ SDValue Chain = N->getOperand(0);
+ SDValue Dest = N->getOperand(1);
+ SDValue CCVal = N->getOperand(2);
+ SDValue Cmp = N->getOperand(3);
+
+ assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
+ unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
+ if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
return SDValue();
- unsigned NumVecs = 0;
- unsigned NewOpc = 0;
- unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
- if (IntNo == Intrinsic::arm_neon_vld2lane) {
- NumVecs = 2;
- NewOpc = AArch64ISD::NEON_LD2DUP;
- } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
- NumVecs = 3;
- NewOpc = AArch64ISD::NEON_LD3DUP;
- } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
- NumVecs = 4;
- NewOpc = AArch64ISD::NEON_LD4DUP;
- } else {
+
+ unsigned CmpOpc = Cmp.getOpcode();
+ if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS)
return SDValue();
- }
- // First check that all the vldN-lane uses are VDUPLANEs and that the lane
- // numbers match the load.
- unsigned VLDLaneNo =
- cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue();
- for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
- UI != UE; ++UI) {
- // Ignore uses of the chain result.
- if (UI.getUse().getResNo() == NumVecs)
- continue;
- SDNode *User = *UI;
- if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE ||
- VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
- return SDValue();
- }
+ // Only attempt folding if there is only one use of the flag and no use of the
+ // value.
+ if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
+ return SDValue();
- // Create the vldN-dup node.
- EVT Tys[5];
- unsigned n;
- for (n = 0; n < NumVecs; ++n)
- Tys[n] = VT;
- Tys[n] = MVT::Other;
- SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1);
- SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
- MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
- SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2,
- VLDMemInt->getMemoryVT(),
- VLDMemInt->getMemOperand());
-
- // Update the uses.
- for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
- UI != UE; ++UI) {
- unsigned ResNo = UI.getUse().getResNo();
- // Ignore uses of the chain result.
- if (ResNo == NumVecs)
- continue;
- SDNode *User = *UI;
- DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
- }
+ SDValue LHS = Cmp.getOperand(0);
+ SDValue RHS = Cmp.getOperand(1);
- // Now the vldN-lane intrinsic is dead except for its chain result.
- // Update uses of the chain.
- std::vector<SDValue> VLDDupResults;
- for (unsigned n = 0; n < NumVecs; ++n)
- VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
- VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
- DCI.CombineTo(VLD, VLDDupResults);
+ assert(LHS.getValueType() == RHS.getValueType() &&
+ "Expected the value type to be the same for both operands!");
+ if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
+ return SDValue();
- return SDValue(N, 0);
-}
+ if (isa<ConstantSDNode>(LHS) && cast<ConstantSDNode>(LHS)->isNullValue())
+ std::swap(LHS, RHS);
-SDValue
-AArch64TargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- switch (N->getOpcode()) {
- default: break;
- case ISD::AND: return PerformANDCombine(N, DCI);
- case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL:
- return PerformShiftCombine(N, DCI, getSubtarget());
- case ISD::INTRINSIC_WO_CHAIN:
- return PerformIntrinsicCombine(N, DCI.DAG);
- case AArch64ISD::NEON_VDUPLANE:
- return CombineVLDDUP(N, DCI);
- case AArch64ISD::NEON_LD2DUP:
- case AArch64ISD::NEON_LD3DUP:
- case AArch64ISD::NEON_LD4DUP:
- return CombineBaseUpdate(N, DCI);
- case ISD::INTRINSIC_VOID:
- case ISD::INTRINSIC_W_CHAIN:
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
- case Intrinsic::arm_neon_vld1:
- case Intrinsic::arm_neon_vld2:
- case Intrinsic::arm_neon_vld3:
- case Intrinsic::arm_neon_vld4:
- case Intrinsic::arm_neon_vst1:
- case Intrinsic::arm_neon_vst2:
- case Intrinsic::arm_neon_vst3:
- case Intrinsic::arm_neon_vst4:
- case Intrinsic::arm_neon_vld2lane:
- case Intrinsic::arm_neon_vld3lane:
- case Intrinsic::arm_neon_vld4lane:
- case Intrinsic::aarch64_neon_vld1x2:
- case Intrinsic::aarch64_neon_vld1x3:
- case Intrinsic::aarch64_neon_vld1x4:
- case Intrinsic::aarch64_neon_vst1x2:
- case Intrinsic::aarch64_neon_vst1x3:
- case Intrinsic::aarch64_neon_vst1x4:
- case Intrinsic::arm_neon_vst2lane:
- case Intrinsic::arm_neon_vst3lane:
- case Intrinsic::arm_neon_vst4lane:
- return CombineBaseUpdate(N, DCI);
- default:
- break;
- }
- }
- return SDValue();
-}
+ if (!isa<ConstantSDNode>(RHS) || !cast<ConstantSDNode>(RHS)->isNullValue())
+ return SDValue();
-bool
-AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
- VT = VT.getScalarType();
+ if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA ||
+ LHS.getOpcode() == ISD::SRL)
+ return SDValue();
- if (!VT.isSimple())
- return false;
+ // Fold the compare into the branch instruction.
+ SDValue BR;
+ if (CC == AArch64CC::EQ)
+ BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
+ else
+ BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::f16:
- case MVT::f32:
- case MVT::f64:
- return true;
- case MVT::f128:
- return false;
- default:
- break;
- }
+ // Do not add new nodes to DAG combiner worklist.
+ DCI.CombineTo(N, BR, false);
- return false;
+ return SDValue();
}
-// Check whether a Build Vector could be presented as Shuffle Vector. If yes,
-// try to call LowerVECTOR_SHUFFLE to lower it.
-bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG,
- SDValue &Res) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- unsigned NumElts = VT.getVectorNumElements();
- unsigned V0NumElts = 0;
- int Mask[16];
- SDValue V0, V1;
-
- // Check if all elements are extracted from less than 3 vectors.
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Elt = Op.getOperand(i);
- if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return false;
+// vselect (v1i1 setcc) ->
+// vselect (v1iXX setcc) (XX is the size of the compared operand type)
+// FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
+// condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
+// such VSELECT.
+static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
+ SDValue N0 = N->getOperand(0);
+ EVT CCVT = N0.getValueType();
- if (V0.getNode() == 0) {
- V0 = Elt.getOperand(0);
- V0NumElts = V0.getValueType().getVectorNumElements();
- }
- if (Elt.getOperand(0) == V0) {
- Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue());
- continue;
- } else if (V1.getNode() == 0) {
- V1 = Elt.getOperand(0);
- }
- if (Elt.getOperand(0) == V1) {
- unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue();
- Mask[i] = (Lane + V0NumElts);
- continue;
- } else {
- return false;
- }
- }
+ if (N0.getOpcode() != ISD::SETCC || CCVT.getVectorNumElements() != 1 ||
+ CCVT.getVectorElementType() != MVT::i1)
+ return SDValue();
- if (!V1.getNode() && V0NumElts == NumElts * 2) {
- V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
- DAG.getConstant(NumElts, MVT::i64));
- V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
- DAG.getConstant(0, MVT::i64));
- V0NumElts = V0.getValueType().getVectorNumElements();
- }
+ EVT ResVT = N->getValueType(0);
+ EVT CmpVT = N0.getOperand(0).getValueType();
+ // Only combine when the result type is of the same size as the compared
+ // operands.
+ if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
+ return SDValue();
- if (V1.getNode() && NumElts == V0NumElts &&
- V0NumElts == V1.getValueType().getVectorNumElements()) {
- SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask);
- Res = LowerVECTOR_SHUFFLE(Shuffle, DAG);
- return true;
- } else
- return false;
+ SDValue IfTrue = N->getOperand(1);
+ SDValue IfFalse = N->getOperand(2);
+ SDValue SetCC =
+ DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
+ N0.getOperand(0), N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
+ IfTrue, IfFalse);
}
-// If this is a case we can't handle, return null and let the default
-// expansion code take care of it.
-SDValue
-AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
- const AArch64Subtarget *ST) const {
-
- BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
-
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
-
- unsigned UseNeonMov = VT.getSizeInBits() >= 64;
-
- // Note we favor lowering MOVI over MVNI.
- // This has implications on the definition of patterns in TableGen to select
- // BIC immediate instructions but not ORR immediate instructions.
- // If this lowering order is changed, TableGen patterns for BIC immediate and
- // ORR immediate instructions have to be updated.
- if (UseNeonMov &&
- BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
- if (SplatBitSize <= 64) {
- // First attempt to use vector immediate-form MOVI
- EVT NeonMovVT;
- unsigned Imm = 0;
- unsigned OpCmode = 0;
-
- if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
- SplatBitSize, DAG, VT.is128BitVector(),
- Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
- SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
- SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
-
- if (ImmVal.getNode() && OpCmodeVal.getNode()) {
- SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
- ImmVal, OpCmodeVal);
- return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
- }
- }
-
- // Then attempt to use vector immediate-form MVNI
- uint64_t NegatedImm = (~SplatBits).getZExtValue();
- if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
- Imm, OpCmode)) {
- SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
- SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
- if (ImmVal.getNode() && OpCmodeVal.getNode()) {
- SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
- ImmVal, OpCmodeVal);
- return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
- }
- }
-
- // Attempt to use vector immediate-form FMOV
- if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
- (VT == MVT::v2f64 && SplatBitSize == 64)) {
- APFloat RealVal(
- SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
- SplatBits);
- uint32_t ImmVal;
- if (A64Imms::isFPImm(RealVal, ImmVal)) {
- SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
- return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
- }
- }
- }
- }
-
- unsigned NumElts = VT.getVectorNumElements();
- bool isOnlyLowElement = true;
- bool usesOnlyOneValue = true;
- bool hasDominantValue = false;
- bool isConstant = true;
+/// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with
+/// the compare-mask instructions rather than going via NZCV, even if LHS and
+/// RHS are really scalar. This replaces any scalar setcc in the above pattern
+/// with a vector one followed by a DUP shuffle on the result.
+static SDValue performSelectCombine(SDNode *N, SelectionDAG &DAG) {
+ SDValue N0 = N->getOperand(0);
+ EVT ResVT = N->getValueType(0);
- // Map of the number of times a particular SDValue appears in the
- // element list.
- DenseMap<SDValue, unsigned> ValueCounts;
- SDValue Value;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
- continue;
- if (i > 0)
- isOnlyLowElement = false;
- if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
- isConstant = false;
+ if (!N->getOperand(1).getValueType().isVector())
+ return SDValue();
- ValueCounts.insert(std::make_pair(V, 0));
- unsigned &Count = ValueCounts[V];
+ if (N0.getOpcode() != ISD::SETCC || N0.getValueType() != MVT::i1)
+ return SDValue();
- // Is this value dominant? (takes up more than half of the lanes)
- if (++Count > (NumElts / 2)) {
- hasDominantValue = true;
- Value = V;
- }
- }
- if (ValueCounts.size() != 1)
- usesOnlyOneValue = false;
- if (!Value.getNode() && ValueCounts.size() > 0)
- Value = ValueCounts.begin()->first;
+ SDLoc DL(N0);
- if (ValueCounts.size() == 0)
- return DAG.getUNDEF(VT);
+ EVT SrcVT = N0.getOperand(0).getValueType();
+ SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT,
+ ResVT.getSizeInBits() / SrcVT.getSizeInBits());
+ EVT CCVT = SrcVT.changeVectorElementTypeToInteger();
- // Loads are better lowered with insert_vector_elt.
- // Keep going if we are hitting this case.
- if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
- return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
+ // First perform a vector comparison, where lane 0 is the one we're interested
+ // in.
+ SDValue LHS =
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0));
+ SDValue RHS =
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1));
+ SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2));
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (hasDominantValue && EltSize <= 64) {
- // Use VDUP for non-constant splats.
- if (!isConstant) {
- SDValue N;
-
- // If we are DUPing a value that comes directly from a vector, we could
- // just use DUPLANE. We can only do this if the lane being extracted
- // is at a constant index, as the DUP from lane instructions only have
- // constant-index forms.
- if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- isa<ConstantSDNode>(Value->getOperand(1))) {
- N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT,
- Value->getOperand(0), Value->getOperand(1));
- } else
- N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
-
- if (!usesOnlyOneValue) {
- // The dominant value was splatted as 'N', but we now have to insert
- // all differing elements.
- for (unsigned I = 0; I < NumElts; ++I) {
- if (Op.getOperand(I) == Value)
- continue;
- SmallVector<SDValue, 3> Ops;
- Ops.push_back(N);
- Ops.push_back(Op.getOperand(I));
- Ops.push_back(DAG.getConstant(I, MVT::i64));
- N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
- }
- }
- return N;
- }
- if (usesOnlyOneValue && isConstant) {
- return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
- }
- }
- // If all elements are constants and the case above didn't get hit, fall back
- // to the default expansion, which will generate a load from the constant
- // pool.
- if (isConstant)
- return SDValue();
+ // Now duplicate the comparison mask we want across all other lanes.
+ SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
+ SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask.data());
+ Mask = DAG.getNode(ISD::BITCAST, DL, ResVT.changeVectorElementTypeToInteger(),
+ Mask);
- // Try to lower this in lowering ShuffleVector way.
- SDValue Shuf;
- if (isKnownShuffleVector(Op, DAG, Shuf))
- return Shuf;
+ return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2));
+}
- // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
- // know the default expansion would otherwise fall back on something even
- // worse. For a vector with one or two non-undef values, that's
- // scalar_to_vector for the elements followed by a shuffle (provided the
- // shuffle is valid for the target) and materialization element by element
- // on the stack followed by a load for everything else.
- if (!isConstant && !usesOnlyOneValue) {
- SDValue Vec = DAG.getUNDEF(VT);
- for (unsigned i = 0 ; i < NumElts; ++i) {
- SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
- continue;
- SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
- Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
+SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ switch (N->getOpcode()) {
+ default:
+ break;
+ case ISD::ADD:
+ case ISD::SUB:
+ return performAddSubLongCombine(N, DCI, DAG);
+ case ISD::XOR:
+ return performXorCombine(N, DAG, DCI, Subtarget);
+ case ISD::MUL:
+ return performMulCombine(N, DAG, DCI, Subtarget);
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ return performIntToFpCombine(N, DAG);
+ case ISD::OR:
+ return performORCombine(N, DCI, Subtarget);
+ case ISD::INTRINSIC_WO_CHAIN:
+ return performIntrinsicCombine(N, DCI, Subtarget);
+ case ISD::ANY_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::SIGN_EXTEND:
+ return performExtendCombine(N, DCI, DAG);
+ case ISD::BITCAST:
+ return performBitcastCombine(N, DCI, DAG);
+ case ISD::CONCAT_VECTORS:
+ return performConcatVectorsCombine(N, DCI, DAG);
+ case ISD::SELECT:
+ return performSelectCombine(N, DAG);
+ case ISD::VSELECT:
+ return performVSelectCombine(N, DCI.DAG);
+ case ISD::STORE:
+ return performSTORECombine(N, DCI, DAG, Subtarget);
+ case AArch64ISD::BRCOND:
+ return performBRCONDCombine(N, DCI, DAG);
+ case AArch64ISD::DUP:
+ return performPostLD1Combine(N, DCI, false);
+ case ISD::INSERT_VECTOR_ELT:
+ return performPostLD1Combine(N, DCI, true);
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN:
+ switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ case Intrinsic::aarch64_neon_ld2:
+ case Intrinsic::aarch64_neon_ld3:
+ case Intrinsic::aarch64_neon_ld4:
+ case Intrinsic::aarch64_neon_ld1x2:
+ case Intrinsic::aarch64_neon_ld1x3:
+ case Intrinsic::aarch64_neon_ld1x4:
+ case Intrinsic::aarch64_neon_ld2lane:
+ case Intrinsic::aarch64_neon_ld3lane:
+ case Intrinsic::aarch64_neon_ld4lane:
+ case Intrinsic::aarch64_neon_ld2r:
+ case Intrinsic::aarch64_neon_ld3r:
+ case Intrinsic::aarch64_neon_ld4r:
+ case Intrinsic::aarch64_neon_st2:
+ case Intrinsic::aarch64_neon_st3:
+ case Intrinsic::aarch64_neon_st4:
+ case Intrinsic::aarch64_neon_st1x2:
+ case Intrinsic::aarch64_neon_st1x3:
+ case Intrinsic::aarch64_neon_st1x4:
+ case Intrinsic::aarch64_neon_st2lane:
+ case Intrinsic::aarch64_neon_st3lane:
+ case Intrinsic::aarch64_neon_st4lane:
+ return performNEONPostLDSTCombine(N, DCI, DAG);
+ default:
+ break;
}
- return Vec;
}
return SDValue();
}
-/// isREVMask - Check if a vector shuffle corresponds to a REV
-/// instruction with the specified blocksize. (The order of the elements
-/// within each block of the vector is reversed.)
-static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
- assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
- "Only possible block sizes for REV are: 16, 32, 64");
-
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
+// Check if the return value is used as only a return value, as otherwise
+// we can't perform a tail-call. In particular, we need to check for
+// target ISD nodes that are returns and any other "odd" constructs
+// that the generic analysis code won't necessarily catch.
+bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
+ SDValue &Chain) const {
+ if (N->getNumValues() != 1)
+ return false;
+ if (!N->hasNUsesOfValue(1, 0))
return false;
- unsigned NumElts = VT.getVectorNumElements();
- unsigned BlockElts = M[0] + 1;
- // If the first shuffle index is UNDEF, be optimistic.
- if (M[0] < 0)
- BlockElts = BlockSize / EltSz;
-
- if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
+ SDValue TCChain = Chain;
+ SDNode *Copy = *N->use_begin();
+ if (Copy->getOpcode() == ISD::CopyToReg) {
+ // If the copy has a glue operand, we conservatively assume it isn't safe to
+ // perform a tail call.
+ if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
+ MVT::Glue)
+ return false;
+ TCChain = Copy->getOperand(0);
+ } else if (Copy->getOpcode() != ISD::FP_EXTEND)
return false;
- for (unsigned i = 0; i < NumElts; ++i) {
- if (M[i] < 0)
- continue; // ignore UNDEF indices
- if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
+ bool HasRet = false;
+ for (SDNode *Node : Copy->uses()) {
+ if (Node->getOpcode() != AArch64ISD::RET_FLAG)
return false;
+ HasRet = true;
}
+ if (!HasRet)
+ return false;
+
+ Chain = TCChain;
return true;
}
-// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and
-// TRN instruction.
-static unsigned isPermuteMask(ArrayRef<int> M, EVT VT) {
- unsigned NumElts = VT.getVectorNumElements();
- if (NumElts < 4)
- return 0;
-
- bool ismatch = true;
-
- // Check UZP1
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned)M[i] != i * 2) {
- ismatch = false;
- break;
- }
- }
- if (ismatch)
- return AArch64ISD::NEON_UZP1;
-
- // Check UZP2
- ismatch = true;
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned)M[i] != i * 2 + 1) {
- ismatch = false;
- break;
- }
- }
- if (ismatch)
- return AArch64ISD::NEON_UZP2;
-
- // Check ZIP1
- ismatch = true;
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned)M[i] != i / 2 + NumElts * (i % 2)) {
- ismatch = false;
- break;
- }
- }
- if (ismatch)
- return AArch64ISD::NEON_ZIP1;
-
- // Check ZIP2
- ismatch = true;
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) {
- ismatch = false;
- break;
- }
- }
- if (ismatch)
- return AArch64ISD::NEON_ZIP2;
-
- // Check TRN1
- ismatch = true;
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned)M[i] != i + (NumElts - 1) * (i % 2)) {
- ismatch = false;
- break;
- }
- }
- if (ismatch)
- return AArch64ISD::NEON_TRN1;
-
- // Check TRN2
- ismatch = true;
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) {
- ismatch = false;
- break;
- }
- }
- if (ismatch)
- return AArch64ISD::NEON_TRN2;
+// Return whether the an instruction can potentially be optimized to a tail
+// call. This will cause the optimizers to attempt to move, or duplicate,
+// return instructions to help enable tail call optimizations for this
+// instruction.
+bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+ if (!CI->isTailCall())
+ return false;
- return 0;
+ return true;
}
-SDValue
-AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- SDLoc dl(Op);
- EVT VT = Op.getValueType();
- ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
-
- // Convert shuffles that are directly supported on NEON to target-specific
- // DAG nodes, instead of keeping them as shuffles and matching them again
- // during code selection. This is more efficient and avoids the possibility
- // of inconsistencies between legalization and selection.
- ArrayRef<int> ShuffleMask = SVN->getMask();
-
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (EltSize > 64)
- return SDValue();
-
- if (isREVMask(ShuffleMask, VT, 64))
- return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1);
- if (isREVMask(ShuffleMask, VT, 32))
- return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1);
- if (isREVMask(ShuffleMask, VT, 16))
- return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1);
-
- unsigned ISDNo = isPermuteMask(ShuffleMask, VT);
- if (ISDNo)
- return DAG.getNode(ISDNo, dl, VT, V1, V2);
-
- // If the element of shuffle mask are all the same constant, we can
- // transform it into either NEON_VDUP or NEON_VDUPLANE
- if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
- int Lane = SVN->getSplatIndex();
- // If this is undef splat, generate it via "just" vdup, if possible.
- if (Lane == -1) Lane = 0;
-
- // Test if V1 is a SCALAR_TO_VECTOR.
- if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
- }
- // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
- if (V1.getOpcode() == ISD::BUILD_VECTOR) {
- bool IsScalarToVector = true;
- for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
- if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
- i != (unsigned)Lane) {
- IsScalarToVector = false;
- break;
- }
- if (IsScalarToVector)
- return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
- V1.getOperand(Lane));
- }
-
- // Test if V1 is a EXTRACT_SUBVECTOR.
- if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
- int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
- return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0),
- DAG.getConstant(Lane + ExtLane, MVT::i64));
- }
- // Test if V1 is a CONCAT_VECTORS.
- if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
- V1.getOperand(1).getOpcode() == ISD::UNDEF) {
- SDValue Op0 = V1.getOperand(0);
- assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() &&
- "Invalid vector lane access");
- return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0,
- DAG.getConstant(Lane, MVT::i64));
- }
+bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
+ SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ bool &IsInc,
+ SelectionDAG &DAG) const {
+ if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
+ return false;
- return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
- DAG.getConstant(Lane, MVT::i64));
+ Base = Op->getOperand(0);
+ // All of the indexed addressing mode instructions take a signed
+ // 9 bit immediate offset.
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
+ int64_t RHSC = (int64_t)RHS->getZExtValue();
+ if (RHSC >= 256 || RHSC <= -256)
+ return false;
+ IsInc = (Op->getOpcode() == ISD::ADD);
+ Offset = Op->getOperand(1);
+ return true;
}
+ return false;
+}
- int Length = ShuffleMask.size();
- int V1EltNum = V1.getValueType().getVectorNumElements();
+bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
+ SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const {
+ EVT VT;
+ SDValue Ptr;
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ VT = LD->getMemoryVT();
+ Ptr = LD->getBasePtr();
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
+ VT = ST->getMemoryVT();
+ Ptr = ST->getBasePtr();
+ } else
+ return false;
- // If the number of v1 elements is the same as the number of shuffle mask
- // element and the shuffle masks are sequential values, we can transform
- // it into NEON_VEXTRACT.
- if (V1EltNum == Length) {
- // Check if the shuffle mask is sequential.
- bool IsSequential = true;
- int CurMask = ShuffleMask[0];
- for (int I = 0; I < Length; ++I) {
- if (ShuffleMask[I] != CurMask) {
- IsSequential = false;
- break;
- }
- CurMask++;
- }
- if (IsSequential) {
- assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
- unsigned VecSize = EltSize * V1EltNum;
- unsigned Index = (EltSize/8) * ShuffleMask[0];
- if (VecSize == 64 || VecSize == 128)
- return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
- DAG.getConstant(Index, MVT::i64));
- }
- }
+ bool IsInc;
+ if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
+ return false;
+ AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
+ return true;
+}
- // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
- // by element from V2 to V1 .
- // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
- // better choice to be inserted than V1 as less insert needed, so we count
- // element to be inserted for both V1 and V2, and select less one as insert
- // target.
-
- // Collect elements need to be inserted and their index.
- SmallVector<int, 8> NV1Elt;
- SmallVector<int, 8> N1Index;
- SmallVector<int, 8> NV2Elt;
- SmallVector<int, 8> N2Index;
- for (int I = 0; I != Length; ++I) {
- if (ShuffleMask[I] != I) {
- NV1Elt.push_back(ShuffleMask[I]);
- N1Index.push_back(I);
- }
- }
- for (int I = 0; I != Length; ++I) {
- if (ShuffleMask[I] != (I + V1EltNum)) {
- NV2Elt.push_back(ShuffleMask[I]);
- N2Index.push_back(I);
- }
- }
+bool AArch64TargetLowering::getPostIndexedAddressParts(
+ SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {
+ EVT VT;
+ SDValue Ptr;
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ VT = LD->getMemoryVT();
+ Ptr = LD->getBasePtr();
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
+ VT = ST->getMemoryVT();
+ Ptr = ST->getBasePtr();
+ } else
+ return false;
- // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
- // will be inserted.
- SDValue InsV = V1;
- SmallVector<int, 8> InsMasks = NV1Elt;
- SmallVector<int, 8> InsIndex = N1Index;
- if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
- if (NV1Elt.size() > NV2Elt.size()) {
- InsV = V2;
- InsMasks = NV2Elt;
- InsIndex = N2Index;
- }
- } else {
- InsV = DAG.getNode(ISD::UNDEF, dl, VT);
- }
+ bool IsInc;
+ if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
+ return false;
+ // Post-indexing updates the base, so it's not a valid transform
+ // if that's not the same as the load's pointer.
+ if (Ptr != Base)
+ return false;
+ AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
+ return true;
+}
- for (int I = 0, E = InsMasks.size(); I != E; ++I) {
- SDValue ExtV = V1;
- int Mask = InsMasks[I];
- if (Mask >= V1EltNum) {
- ExtV = V2;
- Mask -= V1EltNum;
- }
- // Any value type smaller than i32 is illegal in AArch64, and this lower
- // function is called after legalize pass, so we need to legalize
- // the result here.
- EVT EltVT;
- if (VT.getVectorElementType().isFloatingPoint())
- EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
- else
- EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
+static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) {
+ if (N->getValueType(0) != MVT::i16)
+ return;
- if (Mask >= 0) {
- ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
- DAG.getConstant(Mask, MVT::i64));
- InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
- DAG.getConstant(InsIndex[I], MVT::i64));
- }
- }
- return InsV;
+ SDLoc DL(N);
+ SDValue Op = N->getOperand(0);
+ assert(Op.getValueType() == MVT::f16 &&
+ "Inconsistent bitcast? Only 16-bit types should be i16 or f16");
+ Op = SDValue(
+ DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
+ DAG.getUNDEF(MVT::i32), Op,
+ DAG.getTargetConstant(AArch64::hsub, MVT::i32)),
+ 0);
+ Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
}
-AArch64TargetLowering::ConstraintType
-AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default: break;
- case 'w': // An FP/SIMD vector register
- return C_RegisterClass;
- case 'I': // Constant that can be used with an ADD instruction
- case 'J': // Constant that can be used with a SUB instruction
- case 'K': // Constant that can be used with a 32-bit logical instruction
- case 'L': // Constant that can be used with a 64-bit logical instruction
- case 'M': // Constant that can be used as a 32-bit MOV immediate
- case 'N': // Constant that can be used as a 64-bit MOV immediate
- case 'Y': // Floating point constant zero
- case 'Z': // Integer constant zero
- return C_Other;
- case 'Q': // A memory reference with base register and no offset
- return C_Memory;
- case 'S': // A symbolic address
- return C_Other;
- }
+void AArch64TargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to custom expand this");
+ case ISD::BITCAST:
+ ReplaceBITCASTResults(N, Results, DAG);
+ return;
+ case ISD::FP_TO_UINT:
+ case ISD::FP_TO_SINT:
+ assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
+ // Let normal code take care of it by not adding anything to Results.
+ return;
}
-
- // FIXME: Ump, Utf, Usa, Ush
- // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
- // whatever they may be
- // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
- // Usa: An absolute symbolic address
- // Ush: The high part (bits 32:12) of a pc-relative symbolic address
- assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
- && Constraint != "Ush" && "Unimplemented constraints");
-
- return TargetLowering::getConstraintType(Constraint);
}
-TargetLowering::ConstraintWeight
-AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
- const char *Constraint) const {
-
- llvm_unreachable("Constraint weight unimplemented");
+bool AArch64TargetLowering::shouldExpandAtomicInIR(Instruction *Inst) const {
+ // Loads and stores less than 128-bits are already atomic; ones above that
+ // are doomed anyway, so defer to the default libcall and blame the OS when
+ // things go wrong:
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128;
+ else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ return LI->getType()->getPrimitiveSizeInBits() == 128;
+
+ // For the real atomic operations, we have ldxr/stxr up to 128 bits.
+ return Inst->getType()->getPrimitiveSizeInBits() <= 128;
}
-void
-AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const {
- SDValue Result(0, 0);
-
- // Only length 1 constraints are C_Other.
- if (Constraint.size() != 1) return;
-
- // Only C_Other constraints get lowered like this. That means constants for us
- // so return early if there's no hope the constraint can be lowered.
-
- switch(Constraint[0]) {
- default: break;
- case 'I': case 'J': case 'K': case 'L':
- case 'M': case 'N': case 'Z': {
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
- if (!C)
- return;
-
- uint64_t CVal = C->getZExtValue();
- uint32_t Bits;
+TargetLoweringBase::LegalizeTypeAction
+AArch64TargetLowering::getPreferredVectorAction(EVT VT) const {
+ MVT SVT = VT.getSimpleVT();
+ // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8,
+ // v4i16, v2i32 instead of to promote.
+ if (SVT == MVT::v1i8 || SVT == MVT::v1i16 || SVT == MVT::v1i32
+ || SVT == MVT::v1f32)
+ return TypeWidenVector;
- switch (Constraint[0]) {
- default:
- // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
- // is a peculiarly useless SUB constraint.
- llvm_unreachable("Unimplemented C_Other constraint");
- case 'I':
- if (CVal <= 0xfff)
- break;
- return;
- case 'K':
- if (A64Imms::isLogicalImm(32, CVal, Bits))
- break;
- return;
- case 'L':
- if (A64Imms::isLogicalImm(64, CVal, Bits))
- break;
- return;
- case 'Z':
- if (CVal == 0)
- break;
- return;
- }
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
- Result = DAG.getTargetConstant(CVal, Op.getValueType());
- break;
- }
- case 'S': {
- // An absolute symbolic address or label reference.
- if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
- Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
- GA->getValueType(0));
- } else if (const BlockAddressSDNode *BA
- = dyn_cast<BlockAddressSDNode>(Op)) {
- Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
- BA->getValueType(0));
- } else if (const ExternalSymbolSDNode *ES
- = dyn_cast<ExternalSymbolSDNode>(Op)) {
- Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
- ES->getValueType(0));
- } else
- return;
- break;
- }
- case 'Y':
- if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
- if (CFP->isExactlyValue(0.0)) {
- Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
- break;
- }
- }
- return;
+Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ Module *M = Builder.GetInsertBlock()->getParent()->getParent();
+ Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
+ bool IsAcquire =
+ Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent;
+
+ // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
+ // intrinsic must return {i64, i64} and we have to recombine them into a
+ // single i128 here.
+ if (ValTy->getPrimitiveSizeInBits() == 128) {
+ Intrinsic::ID Int =
+ IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
+ Function *Ldxr = llvm::Intrinsic::getDeclaration(M, Int);
+
+ Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
+ Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
+
+ Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
+ Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
+ Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
+ Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
+ return Builder.CreateOr(
+ Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}
- if (Result.getNode()) {
- Ops.push_back(Result);
- return;
- }
+ Type *Tys[] = { Addr->getType() };
+ Intrinsic::ID Int =
+ IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
+ Function *Ldxr = llvm::Intrinsic::getDeclaration(M, Int, Tys);
- // It's an unknown constraint for us. Let generic code have a go.
- TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+ return Builder.CreateTruncOrBitCast(
+ Builder.CreateCall(Ldxr, Addr),
+ cast<PointerType>(Addr->getType())->getElementType());
}
-std::pair<unsigned, const TargetRegisterClass*>
-AArch64TargetLowering::getRegForInlineAsmConstraint(
- const std::string &Constraint,
- MVT VT) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'r':
- if (VT.getSizeInBits() <= 32)
- return std::make_pair(0U, &AArch64::GPR32RegClass);
- else if (VT == MVT::i64)
- return std::make_pair(0U, &AArch64::GPR64RegClass);
- break;
- case 'w':
- if (VT == MVT::f16)
- return std::make_pair(0U, &AArch64::FPR16RegClass);
- else if (VT == MVT::f32)
- return std::make_pair(0U, &AArch64::FPR32RegClass);
- else if (VT.getSizeInBits() == 64)
- return std::make_pair(0U, &AArch64::FPR64RegClass);
- else if (VT.getSizeInBits() == 128)
- return std::make_pair(0U, &AArch64::FPR128RegClass);
- break;
- }
+Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
+ Value *Val, Value *Addr,
+ AtomicOrdering Ord) const {
+ Module *M = Builder.GetInsertBlock()->getParent()->getParent();
+ bool IsRelease =
+ Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent;
+
+ // Since the intrinsics must have legal type, the i128 intrinsics take two
+ // parameters: "i64, i64". We must marshal Val into the appropriate form
+ // before the call.
+ if (Val->getType()->getPrimitiveSizeInBits() == 128) {
+ Intrinsic::ID Int =
+ IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
+ Function *Stxr = Intrinsic::getDeclaration(M, Int);
+ Type *Int64Ty = Type::getInt64Ty(M->getContext());
+
+ Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
+ Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
+ Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
+ return Builder.CreateCall3(Stxr, Lo, Hi, Addr);
}
- // Use the default implementation in TargetLowering to convert the register
- // constraint into a member of a register class.
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-}
-
-/// Represent NEON load and store intrinsics as MemIntrinsicNodes.
-/// The associated MachineMemOperands record the alignment specified
-/// in the intrinsic calls.
-bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallInst &I,
- unsigned Intrinsic) const {
- switch (Intrinsic) {
- case Intrinsic::arm_neon_vld1:
- case Intrinsic::arm_neon_vld2:
- case Intrinsic::arm_neon_vld3:
- case Intrinsic::arm_neon_vld4:
- case Intrinsic::aarch64_neon_vld1x2:
- case Intrinsic::aarch64_neon_vld1x3:
- case Intrinsic::aarch64_neon_vld1x4:
- case Intrinsic::arm_neon_vld2lane:
- case Intrinsic::arm_neon_vld3lane:
- case Intrinsic::arm_neon_vld4lane: {
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- // Conservatively set memVT to the entire set of vectors loaded.
- uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
- Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
- Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
- Info.vol = false; // volatile loads with NEON intrinsics not supported
- Info.readMem = true;
- Info.writeMem = false;
- return true;
- }
- case Intrinsic::arm_neon_vst1:
- case Intrinsic::arm_neon_vst2:
- case Intrinsic::arm_neon_vst3:
- case Intrinsic::arm_neon_vst4:
- case Intrinsic::aarch64_neon_vst1x2:
- case Intrinsic::aarch64_neon_vst1x3:
- case Intrinsic::aarch64_neon_vst1x4:
- case Intrinsic::arm_neon_vst2lane:
- case Intrinsic::arm_neon_vst3lane:
- case Intrinsic::arm_neon_vst4lane: {
- Info.opc = ISD::INTRINSIC_VOID;
- // Conservatively set memVT to the entire set of vectors stored.
- unsigned NumElts = 0;
- for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
- Type *ArgTy = I.getArgOperand(ArgI)->getType();
- if (!ArgTy->isVectorTy())
- break;
- NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
- }
- Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
- Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
- Info.vol = false; // volatile stores with NEON intrinsics not supported
- Info.readMem = false;
- Info.writeMem = true;
- return true;
- }
- default:
- break;
- }
+ Intrinsic::ID Int =
+ IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
+ Type *Tys[] = { Addr->getType() };
+ Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
- return false;
+ return Builder.CreateCall2(
+ Stxr, Builder.CreateZExtOrBitCast(
+ Val, Stxr->getFunctionType()->getParamType(0)),
+ Addr);
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 8ad5a79..cb0b9ef 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -12,356 +12,456 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_AARCH64_ISELLOWERING_H
-#define LLVM_TARGET_AARCH64_ISELLOWERING_H
+#ifndef LLVM_TARGET_AArch64_ISELLOWERING_H
+#define LLVM_TARGET_AArch64_ISELLOWERING_H
-#include "Utils/AArch64BaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/IR/CallingConv.h"
#include "llvm/Target/TargetLowering.h"
-#include "llvm/IR/Intrinsics.h"
namespace llvm {
+
namespace AArch64ISD {
- enum NodeType {
- // Start the numbering from where ISD NodeType finishes.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- // This is a conditional branch which also notes the flag needed
- // (eq/sgt/...). A64 puts this information on the branches rather than
- // compares as LLVM does.
- BR_CC,
-
- // A node to be selected to an actual call operation: either BL or BLR in
- // the absence of tail calls.
- Call,
-
- // Indicates a floating-point immediate which fits into the format required
- // by the FMOV instructions. First (and only) operand is the 8-bit encoded
- // value of that immediate.
- FPMOV,
-
- // Corresponds directly to an EXTR instruction. Operands are an LHS an RHS
- // and an LSB.
- EXTR,
-
- // Wraps a load from the GOT, which should always be performed with a 64-bit
- // load instruction. This prevents the DAG combiner folding a truncate to
- // form a smaller memory access.
- GOTLoad,
-
- // Performs a bitfield insert. Arguments are: the value being inserted into;
- // the value being inserted; least significant bit changed; width of the
- // field.
- BFI,
-
- // Simply a convenient node inserted during ISelLowering to represent
- // procedure return. Will almost certainly be selected to "RET".
- Ret,
-
- /// Extracts a field of contiguous bits from the source and sign extends
- /// them into a single register. Arguments are: source; immr; imms. Note
- /// these are pre-encoded since DAG matching can't cope with combining LSB
- /// and Width into these values itself.
- SBFX,
-
- /// This is an A64-ification of the standard LLVM SELECT_CC operation. The
- /// main difference is that it only has the values and an A64 condition,
- /// which will be produced by a setcc instruction.
- SELECT_CC,
-
- /// This serves most of the functions of the LLVM SETCC instruction, for two
- /// purposes. First, it prevents optimisations from fiddling with the
- /// compare after we've moved the CondCode information onto the SELECT_CC or
- /// BR_CC instructions. Second, it gives a legal instruction for the actual
- /// comparison.
- ///
- /// It keeps a record of the condition flags asked for because certain
- /// instructions are only valid for a subset of condition codes.
- SETCC,
-
- // Designates a node which is a tail call: both a call and a return
- // instruction as far as selction is concerned. It should be selected to an
- // unconditional branch. Has the usual plethora of call operands, but: 1st
- // is callee, 2nd is stack adjustment required immediately before branch.
- TC_RETURN,
-
- // Designates a call used to support the TLS descriptor ABI. The call itself
- // will be indirect ("BLR xN") but a relocation-specifier (".tlsdesccall
- // var") must be attached somehow during code generation. It takes two
- // operands: the callee and the symbol to be relocated against.
- TLSDESCCALL,
-
- // Leaf node which will be lowered to an appropriate MRS to obtain the
- // thread pointer: TPIDR_EL0.
- THREAD_POINTER,
-
- /// Extracts a field of contiguous bits from the source and zero extends
- /// them into a single register. Arguments are: source; immr; imms. Note
- /// these are pre-encoded since DAG matching can't cope with combining LSB
- /// and Width into these values itself.
- UBFX,
-
- // Wraps an address which the ISelLowering phase has decided should be
- // created using the large memory model style: i.e. a sequence of four
- // movz/movk instructions.
- WrapperLarge,
-
- // Wraps an address which the ISelLowering phase has decided should be
- // created using the small memory model style: i.e. adrp/add or
- // adrp/mem-op. This exists to prevent bare TargetAddresses which may never
- // get selected.
- WrapperSmall,
-
- // Vector bitwise select
- NEON_BSL,
-
- // Vector move immediate
- NEON_MOVIMM,
-
- // Vector Move Inverted Immediate
- NEON_MVNIMM,
-
- // Vector FP move immediate
- NEON_FMOVIMM,
-
- // Vector permute
- NEON_UZP1,
- NEON_UZP2,
- NEON_ZIP1,
- NEON_ZIP2,
- NEON_TRN1,
- NEON_TRN2,
-
- // Vector Element reverse
- NEON_REV64,
- NEON_REV32,
- NEON_REV16,
-
- // Vector compare
- NEON_CMP,
-
- // Vector compare zero
- NEON_CMPZ,
-
- // Vector compare bitwise test
- NEON_TST,
-
- // Vector saturating shift
- NEON_QSHLs,
- NEON_QSHLu,
-
- // Vector dup
- NEON_VDUP,
-
- // Vector dup by lane
- NEON_VDUPLANE,
-
- // Vector extract
- NEON_VEXTRACT,
-
- // NEON duplicate lane loads
- NEON_LD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
- NEON_LD3DUP,
- NEON_LD4DUP,
-
- // NEON loads with post-increment base updates:
- NEON_LD1_UPD,
- NEON_LD2_UPD,
- NEON_LD3_UPD,
- NEON_LD4_UPD,
- NEON_LD1x2_UPD,
- NEON_LD1x3_UPD,
- NEON_LD1x4_UPD,
-
- // NEON stores with post-increment base updates:
- NEON_ST1_UPD,
- NEON_ST2_UPD,
- NEON_ST3_UPD,
- NEON_ST4_UPD,
- NEON_ST1x2_UPD,
- NEON_ST1x3_UPD,
- NEON_ST1x4_UPD,
-
- // NEON duplicate lane loads with post-increment base updates:
- NEON_LD2DUP_UPD,
- NEON_LD3DUP_UPD,
- NEON_LD4DUP_UPD,
-
- // NEON lane loads with post-increment base updates:
- NEON_LD2LN_UPD,
- NEON_LD3LN_UPD,
- NEON_LD4LN_UPD,
-
- // NEON lane store with post-increment base updates:
- NEON_ST2LN_UPD,
- NEON_ST3LN_UPD,
- NEON_ST4LN_UPD
- };
-}
+enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
+ CALL, // Function call.
+
+ // Almost the same as a normal call node, except that a TLSDesc relocation is
+ // needed so the linker can relax it correctly if possible.
+ TLSDESC_CALL,
+ ADRP, // Page address of a TargetGlobalAddress operand.
+ ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
+ LOADgot, // Load from automatically generated descriptor (e.g. Global
+ // Offset Table, TLS record).
+ RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
+ BRCOND, // Conditional branch instruction; "b.cond".
+ CSEL,
+ FCSEL, // Conditional move instruction.
+ CSINV, // Conditional select invert.
+ CSNEG, // Conditional select negate.
+ CSINC, // Conditional select increment.
+
+ // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
+ // ELF.
+ THREAD_POINTER,
+ ADC,
+ SBC, // adc, sbc instructions
+
+ // Arithmetic instructions which write flags.
+ ADDS,
+ SUBS,
+ ADCS,
+ SBCS,
+ ANDS,
+
+ // Floating point comparison
+ FCMP,
+
+ // Floating point max and min instructions.
+ FMAX,
+ FMIN,
+
+ // Scalar extract
+ EXTR,
+
+ // Scalar-to-vector duplication
+ DUP,
+ DUPLANE8,
+ DUPLANE16,
+ DUPLANE32,
+ DUPLANE64,
+
+ // Vector immedate moves
+ MOVI,
+ MOVIshift,
+ MOVIedit,
+ MOVImsl,
+ FMOV,
+ MVNIshift,
+ MVNImsl,
+
+ // Vector immediate ops
+ BICi,
+ ORRi,
+
+ // Vector bit select: similar to ISD::VSELECT but not all bits within an
+ // element must be identical.
+ BSL,
+
+ // Vector arithmetic negation
+ NEG,
+
+ // Vector shuffles
+ ZIP1,
+ ZIP2,
+ UZP1,
+ UZP2,
+ TRN1,
+ TRN2,
+ REV16,
+ REV32,
+ REV64,
+ EXT,
+
+ // Vector shift by scalar
+ VSHL,
+ VLSHR,
+ VASHR,
+
+ // Vector shift by scalar (again)
+ SQSHL_I,
+ UQSHL_I,
+ SQSHLU_I,
+ SRSHR_I,
+ URSHR_I,
+
+ // Vector comparisons
+ CMEQ,
+ CMGE,
+ CMGT,
+ CMHI,
+ CMHS,
+ FCMEQ,
+ FCMGE,
+ FCMGT,
+
+ // Vector zero comparisons
+ CMEQz,
+ CMGEz,
+ CMGTz,
+ CMLEz,
+ CMLTz,
+ FCMEQz,
+ FCMGEz,
+ FCMGTz,
+ FCMLEz,
+ FCMLTz,
+
+ // Vector bitwise negation
+ NOT,
+
+ // Vector bitwise selection
+ BIT,
+
+ // Compare-and-branch
+ CBZ,
+ CBNZ,
+ TBZ,
+ TBNZ,
+
+ // Tail calls
+ TC_RETURN,
+
+ // Custom prefetch handling
+ PREFETCH,
+
+ // {s|u}int to FP within a FP register.
+ SITOF,
+ UITOF,
+
+ // NEON Load/Store with post-increment base updates
+ LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ LD3post,
+ LD4post,
+ ST2post,
+ ST3post,
+ ST4post,
+ LD1x2post,
+ LD1x3post,
+ LD1x4post,
+ ST1x2post,
+ ST1x3post,
+ ST1x4post,
+ LD1DUPpost,
+ LD2DUPpost,
+ LD3DUPpost,
+ LD4DUPpost,
+ LD1LANEpost,
+ LD2LANEpost,
+ LD3LANEpost,
+ LD4LANEpost,
+ ST2LANEpost,
+ ST3LANEpost,
+ ST4LANEpost
+};
+
+} // end namespace AArch64ISD
class AArch64Subtarget;
class AArch64TargetMachine;
class AArch64TargetLowering : public TargetLowering {
-public:
- explicit AArch64TargetLowering(AArch64TargetMachine &TM);
-
- const char *getTargetNodeName(unsigned Opcode) const;
+ bool RequireStrictAlign;
- CCAssignFn *CCAssignFnForNode(CallingConv::ID CC) const;
+public:
+ explicit AArch64TargetLowering(TargetMachine &TM);
+
+ /// Selects the correct CCAssignFn for a the given CallingConvention
+ /// value.
+ CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
+
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified in
+ /// Mask are known to be either zero or one and return them in the
+ /// KnownZero/KnownOne bitsets.
+ void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
+ APInt &KnownOne, const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
+
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override;
+
+ /// allowsUnalignedMemoryAccesses - Returns true if the target allows
+ /// unaligned memory accesses. of the specified type.
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
+ bool *Fast = nullptr) const override {
+ if (RequireStrictAlign)
+ return false;
+ // FIXME: True for Cyclone, but not necessary others.
+ if (Fast)
+ *Fast = true;
+ return true;
+ }
- SDValue LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ /// LowerOperation - Provide custom lowering hooks for some operations.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
- SDValue LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
-
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- bool isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, SDValue &Res) const;
+ /// getFunctionAlignment - Return the Log2 alignment of this function.
+ unsigned getFunctionAlignment(const Function *F) const;
- SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
- const AArch64Subtarget *ST) const;
+ /// getMaximalGlobalOffset - Returns the maximal possible offset which can
+ /// be used for loads / stores from the global.
+ unsigned getMaximalGlobalOffset() const override;
- SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ /// Returns true if a cast between SrcAS and DestAS is a noop.
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
+ // Addrspacecasts are always noops.
+ return true;
+ }
- void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
- SDValue &Chain) const;
+ /// createFastISel - This method returns a target specific FastISel object,
+ /// or null if the target does not support "fast" ISel.
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const override;
- /// IsEligibleForTailCallOptimization - Check whether the call is eligible
- /// for tail call optimization. Targets which want to do tail call
- /// optimization should implement this function.
- bool IsEligibleForTailCallOptimization(SDValue Callee,
- CallingConv::ID CalleeCC,
- bool IsVarArg,
- bool IsCalleeStructRet,
- bool IsCallerStructRet,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SelectionDAG& DAG) const;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
- /// Finds the incoming stack arguments which overlap the given fixed stack
- /// object and incorporates their load into the current chain. This prevents
- /// an upcoming store from clobbering the stack argument before it's used.
- SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
- MachineFrameInfo *MFI, int ClobberedFI) const;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
- EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ /// isShuffleMaskLegal - Return true if the given shuffle mask can be
+ /// codegen'd directly, or if it should be stack expanded.
+ bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
- bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
+ /// getSetCCResultType - Return the ISD::SETCC ValueType
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- bool IsTailCallConvention(CallingConv::ID CallCC) const;
+ SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
- bool isLegalICmpImmediate(int64_t Val) const;
- SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const override;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ unsigned Intrinsic) const override;
- MachineBasicBlock *
- emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB,
- unsigned Size, unsigned Opcode) const;
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
- MachineBasicBlock *
- emitAtomicBinaryMinMax(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size, unsigned CmpOp,
- A64CC::CondCodes Cond) const;
- MachineBasicBlock *
- emitAtomicCmpSwap(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size) const;
+ bool isZExtFree(Type *Ty1, Type *Ty2) const override;
+ bool isZExtFree(EVT VT1, EVT VT2) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
- MachineBasicBlock *
- EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ bool hasPairedLoad(Type *LoadedType,
+ unsigned &RequiredAligment) const override;
+ bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
- SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
- RTLIB::Libcall Call) const;
- SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
- SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ bool isLegalAddImmediate(int64_t) const override;
+ bool isLegalICmpImmediate(int64_t) const override;
- SDValue LowerGlobalAddressELFSmall(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
+ EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
+ MachineFunction &MF) const override;
- SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
- SelectionDAG &DAG) const;
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
- SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ /// isLegalAddressingMode - Return true if the addressing mode represented
+ /// by AM is legal for this target, for a load/store of the specified type.
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ /// \brief Return the cost of the scaling factor used in the addressing
+ /// mode represented by AM for this target, for a load/store
+ /// of the specified type.
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
/// expanded to FMAs when this method returns true, otherwise fmuladd is
/// expanded to fmul + fadd.
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
- ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info,
- const char *Constraint) const;
- void LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
+ bool isDesirableToCommuteWithShift(const SDNode *N) const override;
+
+ /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// to just the constant itself.
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+ Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const override;
+ Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr, AtomicOrdering Ord) const override;
- virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
- unsigned Intrinsic) const LLVM_OVERRIDE;
+ bool shouldExpandAtomicInIR(Instruction *Inst) const override;
-protected:
- std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(MVT VT) const;
+ TargetLoweringBase::LegalizeTypeAction
+ getPreferredVectorAction(EVT VT) const override;
private:
- const InstrItineraryData *Itins;
+ /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const AArch64Subtarget *Subtarget;
- const AArch64Subtarget *getSubtarget() const {
- return &getTargetMachine().getSubtarget<AArch64Subtarget>();
- }
-};
-enum NeonModImmType {
- Neon_Mov_Imm,
- Neon_Mvn_Imm
+ void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
+ void addDRTypeForNEON(MVT VT);
+ void addQRTypeForNEON(MVT VT);
+
+ SDValue
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerCall(CallLoweringInfo & /*CLI*/,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
+ bool isThisReturn, SDValue ThisVal) const;
+
+ bool isEligibleForTailCallOptimization(
+ SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
+ bool isCalleeStructRet, bool isCallerStructRet,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
+
+ /// Finds the incoming stack arguments which overlap the given fixed stack
+ /// object and incorporates their load into the current chain. This prevents
+ /// an upcoming store from clobbering the stack argument before it's used.
+ SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
+ MachineFrameInfo *MFI, int ClobberedFI) const;
+
+ bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
+
+ bool IsTailCallConvention(CallingConv::ID CallCC) const;
+
+ void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
+ SDValue &Chain) const;
+
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
+ SelectionDAG &DAG) const override;
+
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerELFTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
+ SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
+ RTLIB::Libcall Call) const;
+ SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
+
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const override;
+
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT VT) const override;
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
+ bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
+ bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM, bool &IsInc,
+ SelectionDAG &DAG) const;
+ bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
+ bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
+ SDValue &Offset, ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
+
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
};
-extern SDValue ScanBUILD_VECTOR(SDValue Op, bool &isOnlyLowElement,
- bool &usesOnlyOneValue, bool &hasDominantValue,
- bool &isConstant, bool &isUNDEF);
-} // namespace llvm
+namespace AArch64 {
+FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
+} // end namespace AArch64
+
+} // end namespace llvm
-#endif // LLVM_TARGET_AARCH64_ISELLOWERING_H
+#endif // LLVM_TARGET_AArch64_ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
new file mode 100644
index 0000000..3b9e3c6
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -0,0 +1,364 @@
+//=- AArch64InstrAtomics.td - AArch64 Atomic codegen support -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AArch64 Atomic operand code-gen constructs.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------
+// Atomic fences
+//===----------------------------------
+def : Pat<(atomic_fence (i64 4), (imm)), (DMB (i32 0x9))>;
+def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
+
+//===----------------------------------
+// Atomic loads
+//===----------------------------------
+
+// When they're actually atomic, only one addressing mode (GPR64sp) is
+// supported, but when they're relaxed and anything can be used, all the
+// standard modes would be valid and may give efficiency gains.
+
+// A atomic load operation that actually needs acquire semantics.
+class acquiring_load<PatFrag base>
+ : PatFrag<(ops node:$ptr), (base node:$ptr), [{
+ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
+ assert(Ordering != AcquireRelease && "unexpected load ordering");
+ return Ordering == Acquire || Ordering == SequentiallyConsistent;
+}]>;
+
+// An atomic load operation that does not need either acquire or release
+// semantics.
+class relaxed_load<PatFrag base>
+ : PatFrag<(ops node:$ptr), (base node:$ptr), [{
+ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
+ return Ordering == Monotonic || Ordering == Unordered;
+}]>;
+
+// 8-bit loads
+def : Pat<(acquiring_load<atomic_load_8> GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
+def : Pat<(relaxed_load<atomic_load_8> (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend8:$offset)),
+ (LDRBBroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$offset)>;
+def : Pat<(relaxed_load<atomic_load_8> (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend8:$offset)),
+ (LDRBBroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$offset)>;
+def : Pat<(relaxed_load<atomic_load_8> (am_indexed8 GPR64sp:$Rn,
+ uimm12s1:$offset)),
+ (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
+def : Pat<(relaxed_load<atomic_load_8>
+ (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
+ (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
+
+// 16-bit loads
+def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
+def : Pat<(relaxed_load<atomic_load_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend16:$extend)),
+ (LDRHHroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
+def : Pat<(relaxed_load<atomic_load_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend16:$extend)),
+ (LDRHHroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
+def : Pat<(relaxed_load<atomic_load_16> (am_indexed16 GPR64sp:$Rn,
+ uimm12s2:$offset)),
+ (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
+def : Pat<(relaxed_load<atomic_load_16>
+ (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
+ (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
+
+// 32-bit loads
+def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
+def : Pat<(relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend32:$extend)),
+ (LDRWroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
+def : Pat<(relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend32:$extend)),
+ (LDRWroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
+def : Pat<(relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
+ uimm12s4:$offset)),
+ (LDRWui GPR64sp:$Rn, uimm12s4:$offset)>;
+def : Pat<(relaxed_load<atomic_load_32>
+ (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
+ (LDURWi GPR64sp:$Rn, simm9:$offset)>;
+
+// 64-bit loads
+def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
+def : Pat<(relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend)),
+ (LDRXroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
+def : Pat<(relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend)),
+ (LDRXroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
+def : Pat<(relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
+ uimm12s8:$offset)),
+ (LDRXui GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat<(relaxed_load<atomic_load_64>
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (LDURXi GPR64sp:$Rn, simm9:$offset)>;
+
+//===----------------------------------
+// Atomic stores
+//===----------------------------------
+
+// When they're actually atomic, only one addressing mode (GPR64sp) is
+// supported, but when they're relaxed and anything can be used, all the
+// standard modes would be valid and may give efficiency gains.
+
+// A store operation that actually needs release semantics.
+class releasing_store<PatFrag base>
+ : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
+ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
+ assert(Ordering != AcquireRelease && "unexpected store ordering");
+ return Ordering == Release || Ordering == SequentiallyConsistent;
+}]>;
+
+// An atomic store operation that doesn't actually need to be atomic on AArch64.
+class relaxed_store<PatFrag base>
+ : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
+ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
+ return Ordering == Monotonic || Ordering == Unordered;
+}]>;
+
+// 8-bit stores
+def : Pat<(releasing_store<atomic_store_8> GPR64sp:$ptr, GPR32:$val),
+ (STLRB GPR32:$val, GPR64sp:$ptr)>;
+def : Pat<(relaxed_store<atomic_store_8>
+ (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
+ GPR32:$val),
+ (STRBBroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend)>;
+def : Pat<(relaxed_store<atomic_store_8>
+ (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
+ GPR32:$val),
+ (STRBBroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend)>;
+def : Pat<(relaxed_store<atomic_store_8>
+ (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), GPR32:$val),
+ (STRBBui GPR32:$val, GPR64sp:$Rn, uimm12s1:$offset)>;
+def : Pat<(relaxed_store<atomic_store_8>
+ (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
+ (STURBBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
+
+// 16-bit stores
+def : Pat<(releasing_store<atomic_store_16> GPR64sp:$ptr, GPR32:$val),
+ (STLRH GPR32:$val, GPR64sp:$ptr)>;
+def : Pat<(relaxed_store<atomic_store_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend16:$extend),
+ GPR32:$val),
+ (STRHHroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
+def : Pat<(relaxed_store<atomic_store_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend16:$extend),
+ GPR32:$val),
+ (STRHHroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
+def : Pat<(relaxed_store<atomic_store_16>
+ (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), GPR32:$val),
+ (STRHHui GPR32:$val, GPR64sp:$Rn, uimm12s2:$offset)>;
+def : Pat<(relaxed_store<atomic_store_16>
+ (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
+ (STURHHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
+
+// 32-bit stores
+def : Pat<(releasing_store<atomic_store_32> GPR64sp:$ptr, GPR32:$val),
+ (STLRW GPR32:$val, GPR64sp:$ptr)>;
+def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend32:$extend),
+ GPR32:$val),
+ (STRWroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
+def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend32:$extend),
+ GPR32:$val),
+ (STRWroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
+def : Pat<(relaxed_store<atomic_store_32>
+ (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), GPR32:$val),
+ (STRWui GPR32:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
+def : Pat<(relaxed_store<atomic_store_32>
+ (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
+ (STURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
+
+// 64-bit stores
+def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val),
+ (STLRX GPR64:$val, GPR64sp:$ptr)>;
+def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend16:$extend),
+ GPR64:$val),
+ (STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
+def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend16:$extend),
+ GPR64:$val),
+ (STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
+def : Pat<(relaxed_store<atomic_store_64>
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), GPR64:$val),
+ (STRXui GPR64:$val, GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat<(relaxed_store<atomic_store_64>
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
+ (STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
+
+//===----------------------------------
+// Low-level exclusive operations
+//===----------------------------------
+
+// Load-exclusives.
+
+def ldxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def ldxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def ldxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def ldxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+
+def : Pat<(ldxr_1 GPR64sp:$addr),
+ (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
+def : Pat<(ldxr_2 GPR64sp:$addr),
+ (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
+def : Pat<(ldxr_4 GPR64sp:$addr),
+ (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
+def : Pat<(ldxr_8 GPR64sp:$addr), (LDXRX GPR64sp:$addr)>;
+
+def : Pat<(and (ldxr_1 GPR64sp:$addr), 0xff),
+ (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
+def : Pat<(and (ldxr_2 GPR64sp:$addr), 0xffff),
+ (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
+def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),
+ (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
+
+// Load-exclusives.
+
+def ldaxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def ldaxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def ldaxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def ldaxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+
+def : Pat<(ldaxr_1 GPR64sp:$addr),
+ (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
+def : Pat<(ldaxr_2 GPR64sp:$addr),
+ (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
+def : Pat<(ldaxr_4 GPR64sp:$addr),
+ (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
+def : Pat<(ldaxr_8 GPR64sp:$addr), (LDAXRX GPR64sp:$addr)>;
+
+def : Pat<(and (ldaxr_1 GPR64sp:$addr), 0xff),
+ (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
+def : Pat<(and (ldaxr_2 GPR64sp:$addr), 0xffff),
+ (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
+def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
+ (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
+
+// Store-exclusives.
+
+def stxr_1 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def stxr_2 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def stxr_4 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+
+
+def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr),
+ (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stxr_2 GPR64:$val, GPR64sp:$addr),
+ (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stxr_4 GPR64:$val, GPR64sp:$addr),
+ (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stxr_8 GPR64:$val, GPR64sp:$addr),
+ (STXRX GPR64:$val, GPR64sp:$addr)>;
+
+def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
+ (STXRB GPR32:$val, GPR64sp:$addr)>;
+def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
+ (STXRH GPR32:$val, GPR64sp:$addr)>;
+def : Pat<(stxr_4 (zext GPR32:$val), GPR64sp:$addr),
+ (STXRW GPR32:$val, GPR64sp:$addr)>;
+
+def : Pat<(stxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
+ (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
+ (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
+ (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+
+// Store-release-exclusives.
+
+def stlxr_1 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stlxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def stlxr_2 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stlxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def stlxr_4 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stlxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
+ (int_aarch64_stlxr node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+
+
+def : Pat<(stlxr_1 GPR64:$val, GPR64sp:$addr),
+ (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stlxr_2 GPR64:$val, GPR64sp:$addr),
+ (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stlxr_4 GPR64:$val, GPR64sp:$addr),
+ (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stlxr_8 GPR64:$val, GPR64sp:$addr),
+ (STLXRX GPR64:$val, GPR64sp:$addr)>;
+
+def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
+ (STLXRB GPR32:$val, GPR64sp:$addr)>;
+def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
+ (STLXRH GPR32:$val, GPR64sp:$addr)>;
+def : Pat<(stlxr_4 (zext GPR32:$val), GPR64sp:$addr),
+ (STLXRW GPR32:$val, GPR64sp:$addr)>;
+
+def : Pat<(stlxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
+ (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
+ (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
+ (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
+
+
+// And clear exclusive.
+
+def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 34f917c..e88c0c0 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -1,4 +1,4 @@
-//===- AArch64InstrFormats.td - AArch64 Instruction Formats --*- tablegen -*-=//
+//===- AArch64InstrFormats.td - AArch64 Instruction Formats --*- tblgen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,1484 +6,8620 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-// This file describes AArch64 instruction formats, down to the level of the
-// instruction's overall class.
-//===----------------------------------------------------------------------===//
-
//===----------------------------------------------------------------------===//
-// A64 Instruction Format Definitions.
-//===----------------------------------------------------------------------===//
+// Describe AArch64 instructions format here
+//
+
+// Format specifies the encoding used by the instruction. This is part of the
+// ad-hoc solution used to emit machine instruction encodings by our machine
+// code emitter.
+class Format<bits<2> val> {
+ bits<2> Value = val;
+}
-// A64 is currently the only instruction set supported by the AArch64
-// architecture.
-class A64Inst<dag outs, dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : Instruction {
- // All A64 instructions are 32-bit. This field will be filled in
- // gradually going down the hierarchy.
- field bits<32> Inst;
+def PseudoFrm : Format<0>;
+def NormalFrm : Format<1>; // Do we need any others?
+// AArch64 Instruction Format
+class AArch64Inst<Format f, string cstr> : Instruction {
+ field bits<32> Inst; // Instruction encoding.
+ // Mask of bits that cause an encoding to be UNPREDICTABLE.
+ // If a bit is set, then if the corresponding bit in the
+ // target encoding differs from its value in the "Inst" field,
+ // the instruction is UNPREDICTABLE (SoftFail in abstract parlance).
field bits<32> Unpredictable = 0;
// SoftFail is the generic name for this field, but we alias it so
// as to make it more obvious what it means in ARM-land.
field bits<32> SoftFail = Unpredictable;
+ let Namespace = "AArch64";
+ Format F = f;
+ bits<2> Form = F.Value;
+ let Pattern = [];
+ let Constraints = cstr;
+}
+
+// Pseudo instructions (don't have encoding information)
+class Pseudo<dag oops, dag iops, list<dag> pattern, string cstr = "">
+ : AArch64Inst<PseudoFrm, cstr> {
+ dag OutOperandList = oops;
+ dag InOperandList = iops;
+ let Pattern = pattern;
+ let isCodeGenOnly = 1;
+}
- // LLVM-level model of the AArch64/A64 distinction.
- let Namespace = "AArch64";
- let DecoderNamespace = "A64";
+// Real instructions (have encoding information)
+class EncodedI<string cstr, list<dag> pattern> : AArch64Inst<NormalFrm, cstr> {
+ let Pattern = pattern;
let Size = 4;
+}
- // Set the templated fields
- let OutOperandList = outs;
- let InOperandList = ins;
- let AsmString = asmstr;
- let Pattern = patterns;
- let Itinerary = itin;
+// Normal instructions
+class I<dag oops, dag iops, string asm, string operands, string cstr,
+ list<dag> pattern>
+ : EncodedI<cstr, pattern> {
+ dag OutOperandList = oops;
+ dag InOperandList = iops;
+ let AsmString = !strconcat(asm, operands);
}
-class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction {
- let Namespace = "AArch64";
+class TriOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$MHS, node:$RHS), res>;
+class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
+class UnOpFrag<dag res> : PatFrag<(ops node:$LHS), res>;
+
+// Helper fragment for an extract of the high portion of a 128-bit vector.
+def extract_high_v16i8 :
+ UnOpFrag<(extract_subvector (v16i8 node:$LHS), (i64 8))>;
+def extract_high_v8i16 :
+ UnOpFrag<(extract_subvector (v8i16 node:$LHS), (i64 4))>;
+def extract_high_v4i32 :
+ UnOpFrag<(extract_subvector (v4i32 node:$LHS), (i64 2))>;
+def extract_high_v2i64 :
+ UnOpFrag<(extract_subvector (v2i64 node:$LHS), (i64 1))>;
+
+//===----------------------------------------------------------------------===//
+// Asm Operand Classes.
+//
- let OutOperandList = outs;
- let InOperandList= ins;
- let Pattern = patterns;
- let isCodeGenOnly = 1;
- let isPseudo = 1;
+// Shifter operand for arithmetic shifted encodings.
+def ShifterOperand : AsmOperandClass {
+ let Name = "Shifter";
}
-// Represents a pseudo-instruction that represents a single A64 instruction for
-// whatever reason, the eventual result will be a 32-bit real instruction.
-class A64PseudoInst<dag outs, dag ins, list<dag> patterns>
- : PseudoInst<outs, ins, patterns> {
- let Size = 4;
+// Shifter operand for mov immediate encodings.
+def MovImm32ShifterOperand : AsmOperandClass {
+ let SuperClasses = [ShifterOperand];
+ let Name = "MovImm32Shifter";
+ let RenderMethod = "addShifterOperands";
+ let DiagnosticType = "InvalidMovImm32Shift";
+}
+def MovImm64ShifterOperand : AsmOperandClass {
+ let SuperClasses = [ShifterOperand];
+ let Name = "MovImm64Shifter";
+ let RenderMethod = "addShifterOperands";
+ let DiagnosticType = "InvalidMovImm64Shift";
+}
+
+// Shifter operand for arithmetic register shifted encodings.
+class ArithmeticShifterOperand<int width> : AsmOperandClass {
+ let SuperClasses = [ShifterOperand];
+ let Name = "ArithmeticShifter" # width;
+ let PredicateMethod = "isArithmeticShifter<" # width # ">";
+ let RenderMethod = "addShifterOperands";
+ let DiagnosticType = "AddSubRegShift" # width;
}
-// As above, this will be a single A64 instruction, but we can actually give the
-// expansion in TableGen.
-class A64PseudoExpand<dag outs, dag ins, list<dag> patterns, dag Result>
- : A64PseudoInst<outs, ins, patterns>,
- PseudoInstExpansion<Result>;
+def ArithmeticShifterOperand32 : ArithmeticShifterOperand<32>;
+def ArithmeticShifterOperand64 : ArithmeticShifterOperand<64>;
+// Shifter operand for logical register shifted encodings.
+class LogicalShifterOperand<int width> : AsmOperandClass {
+ let SuperClasses = [ShifterOperand];
+ let Name = "LogicalShifter" # width;
+ let PredicateMethod = "isLogicalShifter<" # width # ">";
+ let RenderMethod = "addShifterOperands";
+ let DiagnosticType = "AddSubRegShift" # width;
+}
-// First, some common cross-hierarchy register formats.
+def LogicalShifterOperand32 : LogicalShifterOperand<32>;
+def LogicalShifterOperand64 : LogicalShifterOperand<64>;
-class A64InstRd<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- bits<5> Rd;
+// Shifter operand for logical vector 128/64-bit shifted encodings.
+def LogicalVecShifterOperand : AsmOperandClass {
+ let SuperClasses = [ShifterOperand];
+ let Name = "LogicalVecShifter";
+ let RenderMethod = "addShifterOperands";
+}
+def LogicalVecHalfWordShifterOperand : AsmOperandClass {
+ let SuperClasses = [LogicalVecShifterOperand];
+ let Name = "LogicalVecHalfWordShifter";
+ let RenderMethod = "addShifterOperands";
+}
- let Inst{4-0} = Rd;
+// The "MSL" shifter on the vector MOVI instruction.
+def MoveVecShifterOperand : AsmOperandClass {
+ let SuperClasses = [ShifterOperand];
+ let Name = "MoveVecShifter";
+ let RenderMethod = "addShifterOperands";
}
-class A64InstRt<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- bits<5> Rt;
+// Extend operand for arithmetic encodings.
+def ExtendOperand : AsmOperandClass {
+ let Name = "Extend";
+ let DiagnosticType = "AddSubRegExtendLarge";
+}
+def ExtendOperand64 : AsmOperandClass {
+ let SuperClasses = [ExtendOperand];
+ let Name = "Extend64";
+ let DiagnosticType = "AddSubRegExtendSmall";
+}
+// 'extend' that's a lsl of a 64-bit register.
+def ExtendOperandLSL64 : AsmOperandClass {
+ let SuperClasses = [ExtendOperand];
+ let Name = "ExtendLSL64";
+ let RenderMethod = "addExtend64Operands";
+ let DiagnosticType = "AddSubRegExtendLarge";
+}
+
+// 8-bit floating-point immediate encodings.
+def FPImmOperand : AsmOperandClass {
+ let Name = "FPImm";
+ let ParserMethod = "tryParseFPImm";
+ let DiagnosticType = "InvalidFPImm";
+}
+
+def CondCode : AsmOperandClass {
+ let Name = "CondCode";
+ let DiagnosticType = "InvalidCondCode";
+}
+
+// A 32-bit register pasrsed as 64-bit
+def GPR32as64Operand : AsmOperandClass {
+ let Name = "GPR32as64";
+}
+def GPR32as64 : RegisterOperand<GPR32> {
+ let ParserMatchClass = GPR32as64Operand;
+}
+
+// 8-bit immediate for AdvSIMD where 64-bit values of the form:
+// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+// are encoded as the eight bit value 'abcdefgh'.
+def SIMDImmType10Operand : AsmOperandClass { let Name = "SIMDImmType10"; }
+
+
+//===----------------------------------------------------------------------===//
+// Operand Definitions.
+//
+
+// ADR[P] instruction labels.
+def AdrpOperand : AsmOperandClass {
+ let Name = "AdrpLabel";
+ let ParserMethod = "tryParseAdrpLabel";
+ let DiagnosticType = "InvalidLabel";
+}
+def adrplabel : Operand<i64> {
+ let EncoderMethod = "getAdrLabelOpValue";
+ let PrintMethod = "printAdrpLabel";
+ let ParserMatchClass = AdrpOperand;
+}
+
+def AdrOperand : AsmOperandClass {
+ let Name = "AdrLabel";
+ let ParserMethod = "tryParseAdrLabel";
+ let DiagnosticType = "InvalidLabel";
+}
+def adrlabel : Operand<i64> {
+ let EncoderMethod = "getAdrLabelOpValue";
+ let ParserMatchClass = AdrOperand;
+}
+
+// simm9 predicate - True if the immediate is in the range [-256, 255].
+def SImm9Operand : AsmOperandClass {
+ let Name = "SImm9";
+ let DiagnosticType = "InvalidMemoryIndexedSImm9";
+}
+def simm9 : Operand<i64>, ImmLeaf<i64, [{ return Imm >= -256 && Imm < 256; }]> {
+ let ParserMatchClass = SImm9Operand;
+}
+
+// simm7sN predicate - True if the immediate is a multiple of N in the range
+// [-64 * N, 63 * N].
+class SImm7Scaled<int Scale> : AsmOperandClass {
+ let Name = "SImm7s" # Scale;
+ let DiagnosticType = "InvalidMemoryIndexed" # Scale # "SImm7";
+}
+
+def SImm7s4Operand : SImm7Scaled<4>;
+def SImm7s8Operand : SImm7Scaled<8>;
+def SImm7s16Operand : SImm7Scaled<16>;
+
+def simm7s4 : Operand<i32> {
+ let ParserMatchClass = SImm7s4Operand;
+ let PrintMethod = "printImmScale<4>";
+}
+
+def simm7s8 : Operand<i32> {
+ let ParserMatchClass = SImm7s8Operand;
+ let PrintMethod = "printImmScale<8>";
+}
+
+def simm7s16 : Operand<i32> {
+ let ParserMatchClass = SImm7s16Operand;
+ let PrintMethod = "printImmScale<16>";
+}
+
+class AsmImmRange<int Low, int High> : AsmOperandClass {
+ let Name = "Imm" # Low # "_" # High;
+ let DiagnosticType = "InvalidImm" # Low # "_" # High;
+}
+
+def Imm1_8Operand : AsmImmRange<1, 8>;
+def Imm1_16Operand : AsmImmRange<1, 16>;
+def Imm1_32Operand : AsmImmRange<1, 32>;
+def Imm1_64Operand : AsmImmRange<1, 64>;
+
+def MovZSymbolG3AsmOperand : AsmOperandClass {
+ let Name = "MovZSymbolG3";
+ let RenderMethod = "addImmOperands";
+}
+
+def movz_symbol_g3 : Operand<i32> {
+ let ParserMatchClass = MovZSymbolG3AsmOperand;
+}
+
+def MovZSymbolG2AsmOperand : AsmOperandClass {
+ let Name = "MovZSymbolG2";
+ let RenderMethod = "addImmOperands";
+}
+
+def movz_symbol_g2 : Operand<i32> {
+ let ParserMatchClass = MovZSymbolG2AsmOperand;
+}
+
+def MovZSymbolG1AsmOperand : AsmOperandClass {
+ let Name = "MovZSymbolG1";
+ let RenderMethod = "addImmOperands";
+}
+
+def movz_symbol_g1 : Operand<i32> {
+ let ParserMatchClass = MovZSymbolG1AsmOperand;
+}
+
+def MovZSymbolG0AsmOperand : AsmOperandClass {
+ let Name = "MovZSymbolG0";
+ let RenderMethod = "addImmOperands";
+}
+
+def movz_symbol_g0 : Operand<i32> {
+ let ParserMatchClass = MovZSymbolG0AsmOperand;
+}
+
+def MovKSymbolG3AsmOperand : AsmOperandClass {
+ let Name = "MovKSymbolG3";
+ let RenderMethod = "addImmOperands";
+}
+
+def movk_symbol_g3 : Operand<i32> {
+ let ParserMatchClass = MovKSymbolG3AsmOperand;
+}
+
+def MovKSymbolG2AsmOperand : AsmOperandClass {
+ let Name = "MovKSymbolG2";
+ let RenderMethod = "addImmOperands";
+}
+
+def movk_symbol_g2 : Operand<i32> {
+ let ParserMatchClass = MovKSymbolG2AsmOperand;
+}
+
+def MovKSymbolG1AsmOperand : AsmOperandClass {
+ let Name = "MovKSymbolG1";
+ let RenderMethod = "addImmOperands";
+}
+
+def movk_symbol_g1 : Operand<i32> {
+ let ParserMatchClass = MovKSymbolG1AsmOperand;
+}
+
+def MovKSymbolG0AsmOperand : AsmOperandClass {
+ let Name = "MovKSymbolG0";
+ let RenderMethod = "addImmOperands";
+}
+
+def movk_symbol_g0 : Operand<i32> {
+ let ParserMatchClass = MovKSymbolG0AsmOperand;
+}
+
+class fixedpoint_i32<ValueType FloatVT>
+ : Operand<FloatVT>,
+ ComplexPattern<FloatVT, 1, "SelectCVTFixedPosOperand<32>", [fpimm, ld]> {
+ let EncoderMethod = "getFixedPointScaleOpValue";
+ let DecoderMethod = "DecodeFixedPointScaleImm32";
+ let ParserMatchClass = Imm1_32Operand;
+}
+
+class fixedpoint_i64<ValueType FloatVT>
+ : Operand<FloatVT>,
+ ComplexPattern<FloatVT, 1, "SelectCVTFixedPosOperand<64>", [fpimm, ld]> {
+ let EncoderMethod = "getFixedPointScaleOpValue";
+ let DecoderMethod = "DecodeFixedPointScaleImm64";
+ let ParserMatchClass = Imm1_64Operand;
+}
+
+def fixedpoint_f32_i32 : fixedpoint_i32<f32>;
+def fixedpoint_f64_i32 : fixedpoint_i32<f64>;
+
+def fixedpoint_f32_i64 : fixedpoint_i64<f32>;
+def fixedpoint_f64_i64 : fixedpoint_i64<f64>;
+
+def vecshiftR8 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 9);
+}]> {
+ let EncoderMethod = "getVecShiftR8OpValue";
+ let DecoderMethod = "DecodeVecShiftR8Imm";
+ let ParserMatchClass = Imm1_8Operand;
+}
+def vecshiftR16 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 17);
+}]> {
+ let EncoderMethod = "getVecShiftR16OpValue";
+ let DecoderMethod = "DecodeVecShiftR16Imm";
+ let ParserMatchClass = Imm1_16Operand;
+}
+def vecshiftR16Narrow : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 9);
+}]> {
+ let EncoderMethod = "getVecShiftR16OpValue";
+ let DecoderMethod = "DecodeVecShiftR16ImmNarrow";
+ let ParserMatchClass = Imm1_8Operand;
+}
+def vecshiftR32 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 33);
+}]> {
+ let EncoderMethod = "getVecShiftR32OpValue";
+ let DecoderMethod = "DecodeVecShiftR32Imm";
+ let ParserMatchClass = Imm1_32Operand;
+}
+def vecshiftR32Narrow : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 17);
+}]> {
+ let EncoderMethod = "getVecShiftR32OpValue";
+ let DecoderMethod = "DecodeVecShiftR32ImmNarrow";
+ let ParserMatchClass = Imm1_16Operand;
+}
+def vecshiftR64 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 65);
+}]> {
+ let EncoderMethod = "getVecShiftR64OpValue";
+ let DecoderMethod = "DecodeVecShiftR64Imm";
+ let ParserMatchClass = Imm1_64Operand;
+}
+def vecshiftR64Narrow : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 33);
+}]> {
+ let EncoderMethod = "getVecShiftR64OpValue";
+ let DecoderMethod = "DecodeVecShiftR64ImmNarrow";
+ let ParserMatchClass = Imm1_32Operand;
+}
+
+def Imm0_7Operand : AsmImmRange<0, 7>;
+def Imm0_15Operand : AsmImmRange<0, 15>;
+def Imm0_31Operand : AsmImmRange<0, 31>;
+def Imm0_63Operand : AsmImmRange<0, 63>;
+
+def vecshiftL8 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) < 8);
+}]> {
+ let EncoderMethod = "getVecShiftL8OpValue";
+ let DecoderMethod = "DecodeVecShiftL8Imm";
+ let ParserMatchClass = Imm0_7Operand;
+}
+def vecshiftL16 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) < 16);
+}]> {
+ let EncoderMethod = "getVecShiftL16OpValue";
+ let DecoderMethod = "DecodeVecShiftL16Imm";
+ let ParserMatchClass = Imm0_15Operand;
+}
+def vecshiftL32 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) < 32);
+}]> {
+ let EncoderMethod = "getVecShiftL32OpValue";
+ let DecoderMethod = "DecodeVecShiftL32Imm";
+ let ParserMatchClass = Imm0_31Operand;
+}
+def vecshiftL64 : Operand<i32>, ImmLeaf<i32, [{
+ return (((uint32_t)Imm) < 64);
+}]> {
+ let EncoderMethod = "getVecShiftL64OpValue";
+ let DecoderMethod = "DecodeVecShiftL64Imm";
+ let ParserMatchClass = Imm0_63Operand;
+}
+
+
+// Crazy immediate formats used by 32-bit and 64-bit logical immediate
+// instructions for splatting repeating bit patterns across the immediate.
+def logical_imm32_XFORM : SDNodeXForm<imm, [{
+ uint64_t enc = AArch64_AM::encodeLogicalImmediate(N->getZExtValue(), 32);
+ return CurDAG->getTargetConstant(enc, MVT::i32);
+}]>;
+def logical_imm64_XFORM : SDNodeXForm<imm, [{
+ uint64_t enc = AArch64_AM::encodeLogicalImmediate(N->getZExtValue(), 64);
+ return CurDAG->getTargetConstant(enc, MVT::i32);
+}]>;
+
+let DiagnosticType = "LogicalSecondSource" in {
+ def LogicalImm32Operand : AsmOperandClass {
+ let Name = "LogicalImm32";
+ }
+ def LogicalImm64Operand : AsmOperandClass {
+ let Name = "LogicalImm64";
+ }
+ def LogicalImm32NotOperand : AsmOperandClass {
+ let Name = "LogicalImm32Not";
+ }
+ def LogicalImm64NotOperand : AsmOperandClass {
+ let Name = "LogicalImm64Not";
+ }
+}
+def logical_imm32 : Operand<i32>, PatLeaf<(imm), [{
+ return AArch64_AM::isLogicalImmediate(N->getZExtValue(), 32);
+}], logical_imm32_XFORM> {
+ let PrintMethod = "printLogicalImm32";
+ let ParserMatchClass = LogicalImm32Operand;
+}
+def logical_imm64 : Operand<i64>, PatLeaf<(imm), [{
+ return AArch64_AM::isLogicalImmediate(N->getZExtValue(), 64);
+}], logical_imm64_XFORM> {
+ let PrintMethod = "printLogicalImm64";
+ let ParserMatchClass = LogicalImm64Operand;
+}
+def logical_imm32_not : Operand<i32> {
+ let ParserMatchClass = LogicalImm32NotOperand;
+}
+def logical_imm64_not : Operand<i64> {
+ let ParserMatchClass = LogicalImm64NotOperand;
+}
+
+// imm0_65535 predicate - True if the immediate is in the range [0,65535].
+def Imm0_65535Operand : AsmImmRange<0, 65535>;
+def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
+ return ((uint32_t)Imm) < 65536;
+}]> {
+ let ParserMatchClass = Imm0_65535Operand;
+ let PrintMethod = "printHexImm";
+}
+
+// imm0_255 predicate - True if the immediate is in the range [0,255].
+def Imm0_255Operand : AsmOperandClass { let Name = "Imm0_255"; }
+def imm0_255 : Operand<i32>, ImmLeaf<i32, [{
+ return ((uint32_t)Imm) < 256;
+}]> {
+ let ParserMatchClass = Imm0_255Operand;
+ let PrintMethod = "printHexImm";
+}
+
+// imm0_127 predicate - True if the immediate is in the range [0,127]
+def Imm0_127Operand : AsmImmRange<0, 127>;
+def imm0_127 : Operand<i32>, ImmLeaf<i32, [{
+ return ((uint32_t)Imm) < 128;
+}]> {
+ let ParserMatchClass = Imm0_127Operand;
+ let PrintMethod = "printHexImm";
+}
+
+// NOTE: These imm0_N operands have to be of type i64 because i64 is the size
+// for all shift-amounts.
+
+// imm0_63 predicate - True if the immediate is in the range [0,63]
+def imm0_63 : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 64;
+}]> {
+ let ParserMatchClass = Imm0_63Operand;
+}
+
+// imm0_31 predicate - True if the immediate is in the range [0,31]
+def imm0_31 : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 32;
+}]> {
+ let ParserMatchClass = Imm0_31Operand;
+}
+
+// imm0_15 predicate - True if the immediate is in the range [0,15]
+def imm0_15 : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 16;
+}]> {
+ let ParserMatchClass = Imm0_15Operand;
+}
+// imm0_7 predicate - True if the immediate is in the range [0,7]
+def imm0_7 : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 8;
+}]> {
+ let ParserMatchClass = Imm0_7Operand;
+}
+
+// imm32_0_15 predicate - True if the 32-bit immediate is in the range [0,15]
+def imm32_0_15 : Operand<i32>, ImmLeaf<i32, [{
+ return ((uint32_t)Imm) < 16;
+}]>;
+
+// An arithmetic shifter operand:
+// {7-6} - shift type: 00 = lsl, 01 = lsr, 10 = asr
+// {5-0} - imm6
+class arith_shift<ValueType Ty, int width> : Operand<Ty> {
+ let PrintMethod = "printShifter";
+ let ParserMatchClass = !cast<AsmOperandClass>(
+ "ArithmeticShifterOperand" # width);
+}
+
+def arith_shift32 : arith_shift<i32, 32>;
+def arith_shift64 : arith_shift<i64, 64>;
+
+class arith_shifted_reg<ValueType Ty, RegisterClass regclass, int width>
+ : Operand<Ty>,
+ ComplexPattern<Ty, 2, "SelectArithShiftedRegister", []> {
+ let PrintMethod = "printShiftedRegister";
+ let MIOperandInfo = (ops regclass, !cast<Operand>("arith_shift" # width));
+}
+
+def arith_shifted_reg32 : arith_shifted_reg<i32, GPR32, 32>;
+def arith_shifted_reg64 : arith_shifted_reg<i64, GPR64, 64>;
+
+// An arithmetic shifter operand:
+// {7-6} - shift type: 00 = lsl, 01 = lsr, 10 = asr, 11 = ror
+// {5-0} - imm6
+class logical_shift<int width> : Operand<i32> {
+ let PrintMethod = "printShifter";
+ let ParserMatchClass = !cast<AsmOperandClass>(
+ "LogicalShifterOperand" # width);
+}
+
+def logical_shift32 : logical_shift<32>;
+def logical_shift64 : logical_shift<64>;
+
+class logical_shifted_reg<ValueType Ty, RegisterClass regclass, Operand shiftop>
+ : Operand<Ty>,
+ ComplexPattern<Ty, 2, "SelectLogicalShiftedRegister", []> {
+ let PrintMethod = "printShiftedRegister";
+ let MIOperandInfo = (ops regclass, shiftop);
+}
+
+def logical_shifted_reg32 : logical_shifted_reg<i32, GPR32, logical_shift32>;
+def logical_shifted_reg64 : logical_shifted_reg<i64, GPR64, logical_shift64>;
+
+// A logical vector shifter operand:
+// {7-6} - shift type: 00 = lsl
+// {5-0} - imm6: #0, #8, #16, or #24
+def logical_vec_shift : Operand<i32> {
+ let PrintMethod = "printShifter";
+ let EncoderMethod = "getVecShifterOpValue";
+ let ParserMatchClass = LogicalVecShifterOperand;
+}
+
+// A logical vector half-word shifter operand:
+// {7-6} - shift type: 00 = lsl
+// {5-0} - imm6: #0 or #8
+def logical_vec_hw_shift : Operand<i32> {
+ let PrintMethod = "printShifter";
+ let EncoderMethod = "getVecShifterOpValue";
+ let ParserMatchClass = LogicalVecHalfWordShifterOperand;
+}
+
+// A vector move shifter operand:
+// {0} - imm1: #8 or #16
+def move_vec_shift : Operand<i32> {
+ let PrintMethod = "printShifter";
+ let EncoderMethod = "getMoveVecShifterOpValue";
+ let ParserMatchClass = MoveVecShifterOperand;
+}
+
+def AddSubImmOperand : AsmOperandClass {
+ let Name = "AddSubImm";
+ let ParserMethod = "tryParseAddSubImm";
+ let DiagnosticType = "AddSubSecondSource";
+}
+// An ADD/SUB immediate shifter operand:
+// second operand:
+// {7-6} - shift type: 00 = lsl
+// {5-0} - imm6: #0 or #12
+class addsub_shifted_imm<ValueType Ty>
+ : Operand<Ty>, ComplexPattern<Ty, 2, "SelectArithImmed", [imm]> {
+ let PrintMethod = "printAddSubImm";
+ let EncoderMethod = "getAddSubImmOpValue";
+ let ParserMatchClass = AddSubImmOperand;
+ let MIOperandInfo = (ops i32imm, i32imm);
+}
+
+def addsub_shifted_imm32 : addsub_shifted_imm<i32>;
+def addsub_shifted_imm64 : addsub_shifted_imm<i64>;
+
+class neg_addsub_shifted_imm<ValueType Ty>
+ : Operand<Ty>, ComplexPattern<Ty, 2, "SelectNegArithImmed", [imm]> {
+ let PrintMethod = "printAddSubImm";
+ let EncoderMethod = "getAddSubImmOpValue";
+ let ParserMatchClass = AddSubImmOperand;
+ let MIOperandInfo = (ops i32imm, i32imm);
+}
+
+def neg_addsub_shifted_imm32 : neg_addsub_shifted_imm<i32>;
+def neg_addsub_shifted_imm64 : neg_addsub_shifted_imm<i64>;
+
+// An extend operand:
+// {5-3} - extend type
+// {2-0} - imm3
+def arith_extend : Operand<i32> {
+ let PrintMethod = "printArithExtend";
+ let ParserMatchClass = ExtendOperand;
+}
+def arith_extend64 : Operand<i32> {
+ let PrintMethod = "printArithExtend";
+ let ParserMatchClass = ExtendOperand64;
+}
+
+// 'extend' that's a lsl of a 64-bit register.
+def arith_extendlsl64 : Operand<i32> {
+ let PrintMethod = "printArithExtend";
+ let ParserMatchClass = ExtendOperandLSL64;
+}
+
+class arith_extended_reg32<ValueType Ty> : Operand<Ty>,
+ ComplexPattern<Ty, 2, "SelectArithExtendedRegister", []> {
+ let PrintMethod = "printExtendedRegister";
+ let MIOperandInfo = (ops GPR32, arith_extend);
+}
+
+class arith_extended_reg32to64<ValueType Ty> : Operand<Ty>,
+ ComplexPattern<Ty, 2, "SelectArithExtendedRegister", []> {
+ let PrintMethod = "printExtendedRegister";
+ let MIOperandInfo = (ops GPR32, arith_extend64);
+}
+
+// Floating-point immediate.
+def fpimm32 : Operand<f32>,
+ PatLeaf<(f32 fpimm), [{
+ return AArch64_AM::getFP32Imm(N->getValueAPF()) != -1;
+ }], SDNodeXForm<fpimm, [{
+ APFloat InVal = N->getValueAPF();
+ uint32_t enc = AArch64_AM::getFP32Imm(InVal);
+ return CurDAG->getTargetConstant(enc, MVT::i32);
+ }]>> {
+ let ParserMatchClass = FPImmOperand;
+ let PrintMethod = "printFPImmOperand";
+}
+def fpimm64 : Operand<f64>,
+ PatLeaf<(f64 fpimm), [{
+ return AArch64_AM::getFP64Imm(N->getValueAPF()) != -1;
+ }], SDNodeXForm<fpimm, [{
+ APFloat InVal = N->getValueAPF();
+ uint32_t enc = AArch64_AM::getFP64Imm(InVal);
+ return CurDAG->getTargetConstant(enc, MVT::i32);
+ }]>> {
+ let ParserMatchClass = FPImmOperand;
+ let PrintMethod = "printFPImmOperand";
+}
+
+def fpimm8 : Operand<i32> {
+ let ParserMatchClass = FPImmOperand;
+ let PrintMethod = "printFPImmOperand";
+}
+
+def fpimm0 : PatLeaf<(fpimm), [{
+ return N->isExactlyValue(+0.0);
+}]>;
+
+// Vector lane operands
+class AsmVectorIndex<string Suffix> : AsmOperandClass {
+ let Name = "VectorIndex" # Suffix;
+ let DiagnosticType = "InvalidIndex" # Suffix;
+}
+def VectorIndex1Operand : AsmVectorIndex<"1">;
+def VectorIndexBOperand : AsmVectorIndex<"B">;
+def VectorIndexHOperand : AsmVectorIndex<"H">;
+def VectorIndexSOperand : AsmVectorIndex<"S">;
+def VectorIndexDOperand : AsmVectorIndex<"D">;
+
+def VectorIndex1 : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) == 1;
+}]> {
+ let ParserMatchClass = VectorIndex1Operand;
+ let PrintMethod = "printVectorIndex";
+ let MIOperandInfo = (ops i64imm);
+}
+def VectorIndexB : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 16;
+}]> {
+ let ParserMatchClass = VectorIndexBOperand;
+ let PrintMethod = "printVectorIndex";
+ let MIOperandInfo = (ops i64imm);
+}
+def VectorIndexH : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 8;
+}]> {
+ let ParserMatchClass = VectorIndexHOperand;
+ let PrintMethod = "printVectorIndex";
+ let MIOperandInfo = (ops i64imm);
+}
+def VectorIndexS : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 4;
+}]> {
+ let ParserMatchClass = VectorIndexSOperand;
+ let PrintMethod = "printVectorIndex";
+ let MIOperandInfo = (ops i64imm);
+}
+def VectorIndexD : Operand<i64>, ImmLeaf<i64, [{
+ return ((uint64_t)Imm) < 2;
+}]> {
+ let ParserMatchClass = VectorIndexDOperand;
+ let PrintMethod = "printVectorIndex";
+ let MIOperandInfo = (ops i64imm);
+}
+
+// 8-bit immediate for AdvSIMD where 64-bit values of the form:
+// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+// are encoded as the eight bit value 'abcdefgh'.
+def simdimmtype10 : Operand<i32>,
+ PatLeaf<(f64 fpimm), [{
+ return AArch64_AM::isAdvSIMDModImmType10(N->getValueAPF()
+ .bitcastToAPInt()
+ .getZExtValue());
+ }], SDNodeXForm<fpimm, [{
+ APFloat InVal = N->getValueAPF();
+ uint32_t enc = AArch64_AM::encodeAdvSIMDModImmType10(N->getValueAPF()
+ .bitcastToAPInt()
+ .getZExtValue());
+ return CurDAG->getTargetConstant(enc, MVT::i32);
+ }]>> {
+ let ParserMatchClass = SIMDImmType10Operand;
+ let PrintMethod = "printSIMDType10Operand";
+}
+
+
+//---
+// System management
+//---
+
+// Base encoding for system instruction operands.
+let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
+class BaseSystemI<bit L, dag oops, dag iops, string asm, string operands,
+ list<dag> pattern = []>
+ : I<oops, iops, asm, operands, "", pattern> {
+ let Inst{31-22} = 0b1101010100;
+ let Inst{21} = L;
+}
+
+// System instructions which do not have an Rt register.
+class SimpleSystemI<bit L, dag iops, string asm, string operands,
+ list<dag> pattern = []>
+ : BaseSystemI<L, (outs), iops, asm, operands, pattern> {
+ let Inst{4-0} = 0b11111;
+}
+
+// System instructions which have an Rt register.
+class RtSystemI<bit L, dag oops, dag iops, string asm, string operands>
+ : BaseSystemI<L, oops, iops, asm, operands>,
+ Sched<[WriteSys]> {
+ bits<5> Rt;
let Inst{4-0} = Rt;
}
+// Hint instructions that take both a CRm and a 3-bit immediate.
+// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
+// model patterns with sufficiently fine granularity
+let mayStore = 1, mayLoad = 1, hasSideEffects = 1 in
+ class HintI<string mnemonic>
+ : SimpleSystemI<0, (ins imm0_127:$imm), mnemonic#" $imm", "",
+ [(int_aarch64_hint imm0_127:$imm)]>,
+ Sched<[WriteHint]> {
+ bits <7> imm;
+ let Inst{20-12} = 0b000110010;
+ let Inst{11-5} = imm;
+ }
+
+// System instructions taking a single literal operand which encodes into
+// CRm. op2 differentiates the opcodes.
+def BarrierAsmOperand : AsmOperandClass {
+ let Name = "Barrier";
+ let ParserMethod = "tryParseBarrierOperand";
+}
+def barrier_op : Operand<i32> {
+ let PrintMethod = "printBarrierOption";
+ let ParserMatchClass = BarrierAsmOperand;
+}
+class CRmSystemI<Operand crmtype, bits<3> opc, string asm,
+ list<dag> pattern = []>
+ : SimpleSystemI<0, (ins crmtype:$CRm), asm, "\t$CRm", pattern>,
+ Sched<[WriteBarrier]> {
+ bits<4> CRm;
+ let Inst{20-12} = 0b000110011;
+ let Inst{11-8} = CRm;
+ let Inst{7-5} = opc;
+}
-class A64InstRdn<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRd<outs, ins, asmstr, patterns, itin> {
- // Inherit rdt
- bits<5> Rn;
+// MRS/MSR system instructions. These have different operand classes because
+// a different subset of registers can be accessed through each instruction.
+def MRSSystemRegisterOperand : AsmOperandClass {
+ let Name = "MRSSystemRegister";
+ let ParserMethod = "tryParseSysReg";
+ let DiagnosticType = "MRS";
+}
+// concatenation of 1, op0, op1, CRn, CRm, op2. 16-bit immediate.
+def mrs_sysreg_op : Operand<i32> {
+ let ParserMatchClass = MRSSystemRegisterOperand;
+ let DecoderMethod = "DecodeMRSSystemRegister";
+ let PrintMethod = "printMRSSystemRegister";
+}
- let Inst{9-5} = Rn;
+def MSRSystemRegisterOperand : AsmOperandClass {
+ let Name = "MSRSystemRegister";
+ let ParserMethod = "tryParseSysReg";
+ let DiagnosticType = "MSR";
+}
+def msr_sysreg_op : Operand<i32> {
+ let ParserMatchClass = MSRSystemRegisterOperand;
+ let DecoderMethod = "DecodeMSRSystemRegister";
+ let PrintMethod = "printMSRSystemRegister";
}
-class A64InstRtn<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRt<outs, ins, asmstr, patterns, itin> {
- // Inherit rdt
- bits<5> Rn;
+class MRSI : RtSystemI<1, (outs GPR64:$Rt), (ins mrs_sysreg_op:$systemreg),
+ "mrs", "\t$Rt, $systemreg"> {
+ bits<15> systemreg;
+ let Inst{20} = 1;
+ let Inst{19-5} = systemreg;
+}
- let Inst{9-5} = Rn;
+// FIXME: Some of these def NZCV, others don't. Best way to model that?
+// Explicitly modeling each of the system register as a register class
+// would do it, but feels like overkill at this point.
+class MSRI : RtSystemI<0, (outs), (ins msr_sysreg_op:$systemreg, GPR64:$Rt),
+ "msr", "\t$systemreg, $Rt"> {
+ bits<15> systemreg;
+ let Inst{20} = 1;
+ let Inst{19-5} = systemreg;
}
-// Instructions taking Rt,Rt2,Rn
-class A64InstRtt2n<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<5> Rt2;
+def SystemPStateFieldOperand : AsmOperandClass {
+ let Name = "SystemPStateField";
+ let ParserMethod = "tryParseSysReg";
+}
+def pstatefield_op : Operand<i32> {
+ let ParserMatchClass = SystemPStateFieldOperand;
+ let PrintMethod = "printSystemPStateField";
+}
- let Inst{14-10} = Rt2;
+let Defs = [NZCV] in
+class MSRpstateI
+ : SimpleSystemI<0, (ins pstatefield_op:$pstate_field, imm0_15:$imm),
+ "msr", "\t$pstate_field, $imm">,
+ Sched<[WriteSys]> {
+ bits<6> pstatefield;
+ bits<4> imm;
+ let Inst{20-19} = 0b00;
+ let Inst{18-16} = pstatefield{5-3};
+ let Inst{15-12} = 0b0100;
+ let Inst{11-8} = imm;
+ let Inst{7-5} = pstatefield{2-0};
+
+ let DecoderMethod = "DecodeSystemPStateInstruction";
}
-class A64InstRdnm<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<5> Rm;
+// SYS and SYSL generic system instructions.
+def SysCRAsmOperand : AsmOperandClass {
+ let Name = "SysCR";
+ let ParserMethod = "tryParseSysCROperand";
+}
- let Inst{20-16} = Rm;
+def sys_cr_op : Operand<i32> {
+ let PrintMethod = "printSysCROperand";
+ let ParserMatchClass = SysCRAsmOperand;
}
-class A64InstRtnm<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<5> Rm;
+class SystemXtI<bit L, string asm>
+ : RtSystemI<L, (outs),
+ (ins imm0_7:$op1, sys_cr_op:$Cn, sys_cr_op:$Cm, imm0_7:$op2, GPR64:$Rt),
+ asm, "\t$op1, $Cn, $Cm, $op2, $Rt"> {
+ bits<3> op1;
+ bits<4> Cn;
+ bits<4> Cm;
+ bits<3> op2;
+ let Inst{20-19} = 0b01;
+ let Inst{18-16} = op1;
+ let Inst{15-12} = Cn;
+ let Inst{11-8} = Cm;
+ let Inst{7-5} = op2;
+}
- let Inst{20-16} = Rm;
+class SystemLXtI<bit L, string asm>
+ : RtSystemI<L, (outs),
+ (ins GPR64:$Rt, imm0_7:$op1, sys_cr_op:$Cn, sys_cr_op:$Cm, imm0_7:$op2),
+ asm, "\t$Rt, $op1, $Cn, $Cm, $op2"> {
+ bits<3> op1;
+ bits<4> Cn;
+ bits<4> Cm;
+ bits<3> op2;
+ let Inst{20-19} = 0b01;
+ let Inst{18-16} = op1;
+ let Inst{15-12} = Cn;
+ let Inst{11-8} = Cm;
+ let Inst{7-5} = op2;
}
-//===----------------------------------------------------------------------===//
-//
-// Actual A64 Instruction Formats
+
+// Branch (register) instructions:
//
+// case opc of
+// 0001 blr
+// 0000 br
+// 0101 dret
+// 0100 eret
+// 0010 ret
+// otherwise UNDEFINED
+class BaseBranchReg<bits<4> opc, dag oops, dag iops, string asm,
+ string operands, list<dag> pattern>
+ : I<oops, iops, asm, operands, "", pattern>, Sched<[WriteBrReg]> {
+ let Inst{31-25} = 0b1101011;
+ let Inst{24-21} = opc;
+ let Inst{20-16} = 0b11111;
+ let Inst{15-10} = 0b000000;
+ let Inst{4-0} = 0b00000;
+}
-// Format for Add-subtract (extended register) instructions.
-class A64I_addsubext<bit sf, bit op, bit S, bits<2> opt, bits<3> option,
- dag outs, dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<3> Imm3;
-
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = S;
- let Inst{28-24} = 0b01011;
- let Inst{23-22} = opt;
- let Inst{21} = 0b1;
- // Rm inherited in 20-16
- let Inst{15-13} = option;
- let Inst{12-10} = Imm3;
- // Rn inherited in 9-5
- // Rd inherited in 4-0
-}
-
-// Format for Add-subtract (immediate) instructions.
-class A64I_addsubimm<bit sf, bit op, bit S, bits<2> shift,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<12> Imm12;
+class BranchReg<bits<4> opc, string asm, list<dag> pattern>
+ : BaseBranchReg<opc, (outs), (ins GPR64:$Rn), asm, "\t$Rn", pattern> {
+ bits<5> Rn;
+ let Inst{9-5} = Rn;
+}
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = S;
- let Inst{28-24} = 0b10001;
- let Inst{23-22} = shift;
- let Inst{21-10} = Imm12;
-}
-
-// Format for Add-subtract (shifted register) instructions.
-class A64I_addsubshift<bit sf, bit op, bit S, bits<2> shift,
- dag outs, dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<6> Imm6;
-
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = S;
- let Inst{28-24} = 0b01011;
- let Inst{23-22} = shift;
- let Inst{21} = 0b0;
- // Rm inherited in 20-16
- let Inst{15-10} = Imm6;
- // Rn inherited in 9-5
- // Rd inherited in 4-0
-}
-
-// Format for Add-subtract (with carry) instructions.
-class A64I_addsubcarry<bit sf, bit op, bit S, bits<6> opcode2,
- dag outs, dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = S;
- let Inst{28-21} = 0b11010000;
- // Rm inherited in 20-16
- let Inst{15-10} = opcode2;
- // Rn inherited in 9-5
- // Rd inherited in 4-0
-}
-
-
-// Format for Bitfield instructions
-class A64I_bitfield<bit sf, bits<2> opc, bit n,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<6> ImmR;
- bits<6> ImmS;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 1, isReturn = 1 in
+class SpecialReturn<bits<4> opc, string asm>
+ : BaseBranchReg<opc, (outs), (ins), asm, "", []> {
+ let Inst{9-5} = 0b11111;
+}
- let Inst{31} = sf;
- let Inst{30-29} = opc;
- let Inst{28-23} = 0b100110;
- let Inst{22} = n;
- let Inst{21-16} = ImmR;
- let Inst{15-10} = ImmS;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+//---
+// Conditional branch instruction.
+//---
+
+// Condition code.
+// 4-bit immediate. Pretty-printed as <cc>
+def ccode : Operand<i32> {
+ let PrintMethod = "printCondCode";
+ let ParserMatchClass = CondCode;
+}
+def inv_ccode : Operand<i32> {
+ // AL and NV are invalid in the aliases which use inv_ccode
+ let PrintMethod = "printInverseCondCode";
+ let ParserMatchClass = CondCode;
+ let MCOperandPredicate = [{
+ return MCOp.isImm() &&
+ MCOp.getImm() != AArch64CC::AL &&
+ MCOp.getImm() != AArch64CC::NV;
+ }];
+}
+
+// Conditional branch target. 19-bit immediate. The low two bits of the target
+// offset are implied zero and so are not part of the immediate.
+def PCRelLabel19Operand : AsmOperandClass {
+ let Name = "PCRelLabel19";
+ let DiagnosticType = "InvalidLabel";
+}
+def am_brcond : Operand<OtherVT> {
+ let EncoderMethod = "getCondBranchTargetOpValue";
+ let DecoderMethod = "DecodePCRelLabel19";
+ let PrintMethod = "printAlignedLabel";
+ let ParserMatchClass = PCRelLabel19Operand;
+}
+
+class BranchCond : I<(outs), (ins ccode:$cond, am_brcond:$target),
+ "b", ".$cond\t$target", "",
+ [(AArch64brcond bb:$target, imm:$cond, NZCV)]>,
+ Sched<[WriteBr]> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let Uses = [NZCV];
+
+ bits<4> cond;
+ bits<19> target;
+ let Inst{31-24} = 0b01010100;
+ let Inst{23-5} = target;
+ let Inst{4} = 0;
+ let Inst{3-0} = cond;
}
-// Format for compare and branch (immediate) instructions.
-class A64I_cmpbr<bit sf, bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRt<outs, ins, asmstr, patterns, itin> {
- bits<19> Label;
+//---
+// Compare-and-branch instructions.
+//---
+class BaseCmpBranch<RegisterClass regtype, bit op, string asm, SDNode node>
+ : I<(outs), (ins regtype:$Rt, am_brcond:$target),
+ asm, "\t$Rt, $target", "",
+ [(node regtype:$Rt, bb:$target)]>,
+ Sched<[WriteBr]> {
+ let isBranch = 1;
+ let isTerminator = 1;
- let Inst{31} = sf;
+ bits<5> Rt;
+ bits<19> target;
let Inst{30-25} = 0b011010;
- let Inst{24} = op;
- let Inst{23-5} = Label;
- // Inherit Rt in 4-0
+ let Inst{24} = op;
+ let Inst{23-5} = target;
+ let Inst{4-0} = Rt;
}
-// Format for conditional branch (immediate) instructions.
-class A64I_condbr<bit o1, bit o0,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- bits<19> Label;
- bits<4> Cond;
+multiclass CmpBranch<bit op, string asm, SDNode node> {
+ def W : BaseCmpBranch<GPR32, op, asm, node> {
+ let Inst{31} = 0;
+ }
+ def X : BaseCmpBranch<GPR64, op, asm, node> {
+ let Inst{31} = 1;
+ }
+}
+
+//---
+// Test-bit-and-branch instructions.
+//---
+// Test-and-branch target. 14-bit sign-extended immediate. The low two bits of
+// the target offset are implied zero and so are not part of the immediate.
+def BranchTarget14Operand : AsmOperandClass {
+ let Name = "BranchTarget14";
+}
+def am_tbrcond : Operand<OtherVT> {
+ let EncoderMethod = "getTestBranchTargetOpValue";
+ let PrintMethod = "printAlignedLabel";
+ let ParserMatchClass = BranchTarget14Operand;
+}
+
+// AsmOperand classes to emit (or not) special diagnostics
+def TBZImm0_31Operand : AsmOperandClass {
+ let Name = "TBZImm0_31";
+ let PredicateMethod = "isImm0_31";
+ let RenderMethod = "addImm0_31Operands";
+}
+def TBZImm32_63Operand : AsmOperandClass {
+ let Name = "Imm32_63";
+ let DiagnosticType = "InvalidImm0_63";
+}
+
+class tbz_imm0_31<AsmOperandClass matcher> : Operand<i64>, ImmLeaf<i64, [{
+ return (((uint32_t)Imm) < 32);
+}]> {
+ let ParserMatchClass = matcher;
+}
+
+def tbz_imm0_31_diag : tbz_imm0_31<Imm0_31Operand>;
+def tbz_imm0_31_nodiag : tbz_imm0_31<TBZImm0_31Operand>;
+
+def tbz_imm32_63 : Operand<i64>, ImmLeaf<i64, [{
+ return (((uint32_t)Imm) > 31) && (((uint32_t)Imm) < 64);
+}]> {
+ let ParserMatchClass = TBZImm32_63Operand;
+}
- let Inst{31-25} = 0b0101010;
- let Inst{24} = o1;
- let Inst{23-5} = Label;
- let Inst{4} = o0;
- let Inst{3-0} = Cond;
+class BaseTestBranch<RegisterClass regtype, Operand immtype,
+ bit op, string asm, SDNode node>
+ : I<(outs), (ins regtype:$Rt, immtype:$bit_off, am_tbrcond:$target),
+ asm, "\t$Rt, $bit_off, $target", "",
+ [(node regtype:$Rt, immtype:$bit_off, bb:$target)]>,
+ Sched<[WriteBr]> {
+ let isBranch = 1;
+ let isTerminator = 1;
+
+ bits<5> Rt;
+ bits<6> bit_off;
+ bits<14> target;
+
+ let Inst{30-25} = 0b011011;
+ let Inst{24} = op;
+ let Inst{23-19} = bit_off{4-0};
+ let Inst{18-5} = target;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodeTestAndBranch";
+}
+
+multiclass TestBranch<bit op, string asm, SDNode node> {
+ def W : BaseTestBranch<GPR32, tbz_imm0_31_diag, op, asm, node> {
+ let Inst{31} = 0;
+ }
+
+ def X : BaseTestBranch<GPR64, tbz_imm32_63, op, asm, node> {
+ let Inst{31} = 1;
+ }
+
+ // Alias X-reg with 0-31 imm to W-Reg.
+ def : InstAlias<asm # "\t$Rd, $imm, $target",
+ (!cast<Instruction>(NAME#"W") GPR32as64:$Rd,
+ tbz_imm0_31_nodiag:$imm, am_tbrcond:$target), 0>;
+ def : Pat<(node GPR64:$Rn, tbz_imm0_31_diag:$imm, bb:$target),
+ (!cast<Instruction>(NAME#"W") (EXTRACT_SUBREG GPR64:$Rn, sub_32),
+ tbz_imm0_31_diag:$imm, bb:$target)>;
+}
+
+//---
+// Unconditional branch (immediate) instructions.
+//---
+def BranchTarget26Operand : AsmOperandClass {
+ let Name = "BranchTarget26";
+ let DiagnosticType = "InvalidLabel";
+}
+def am_b_target : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue";
+ let PrintMethod = "printAlignedLabel";
+ let ParserMatchClass = BranchTarget26Operand;
+}
+def am_bl_target : Operand<i64> {
+ let EncoderMethod = "getBranchTargetOpValue";
+ let PrintMethod = "printAlignedLabel";
+ let ParserMatchClass = BranchTarget26Operand;
}
-// Format for conditional compare (immediate) instructions.
-class A64I_condcmpimm<bit sf, bit op, bit o2, bit o3, bit s,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
+class BImm<bit op, dag iops, string asm, list<dag> pattern>
+ : I<(outs), iops, asm, "\t$addr", "", pattern>, Sched<[WriteBr]> {
+ bits<26> addr;
+ let Inst{31} = op;
+ let Inst{30-26} = 0b00101;
+ let Inst{25-0} = addr;
+
+ let DecoderMethod = "DecodeUnconditionalBranch";
+}
+
+class BranchImm<bit op, string asm, list<dag> pattern>
+ : BImm<op, (ins am_b_target:$addr), asm, pattern>;
+class CallImm<bit op, string asm, list<dag> pattern>
+ : BImm<op, (ins am_bl_target:$addr), asm, pattern>;
+
+//---
+// Basic one-operand data processing instructions.
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseOneOperandData<bits<3> opc, RegisterClass regtype, string asm,
+ SDPatternOperator node>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn), asm, "\t$Rd, $Rn", "",
+ [(set regtype:$Rd, (node regtype:$Rn))]>,
+ Sched<[WriteI, ReadI]> {
+ bits<5> Rd;
bits<5> Rn;
- bits<5> UImm5;
- bits<4> NZCVImm;
- bits<4> Cond;
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = s;
- let Inst{28-21} = 0b11010010;
- let Inst{20-16} = UImm5;
- let Inst{15-12} = Cond;
- let Inst{11} = 0b1;
- let Inst{10} = o2;
- let Inst{9-5} = Rn;
- let Inst{4} = o3;
- let Inst{3-0} = NZCVImm;
+ let Inst{30-13} = 0b101101011000000000;
+ let Inst{12-10} = opc;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+multiclass OneOperandData<bits<3> opc, string asm,
+ SDPatternOperator node = null_frag> {
+ def Wr : BaseOneOperandData<opc, GPR32, asm, node> {
+ let Inst{31} = 0;
+ }
+
+ def Xr : BaseOneOperandData<opc, GPR64, asm, node> {
+ let Inst{31} = 1;
+ }
+}
+
+class OneWRegData<bits<3> opc, string asm, SDPatternOperator node>
+ : BaseOneOperandData<opc, GPR32, asm, node> {
+ let Inst{31} = 0;
}
-// Format for conditional compare (register) instructions.
-class A64I_condcmpreg<bit sf, bit op, bit o2, bit o3, bit s,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
+class OneXRegData<bits<3> opc, string asm, SDPatternOperator node>
+ : BaseOneOperandData<opc, GPR64, asm, node> {
+ let Inst{31} = 1;
+}
+
+//---
+// Basic two-operand data processing instructions.
+//---
+class BaseBaseAddSubCarry<bit isSub, RegisterClass regtype, string asm,
+ list<dag> pattern>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "", pattern>,
+ Sched<[WriteI, ReadI, ReadI]> {
+ let Uses = [NZCV];
+ bits<5> Rd;
bits<5> Rn;
bits<5> Rm;
- bits<4> NZCVImm;
- bits<4> Cond;
+ let Inst{30} = isSub;
+ let Inst{28-21} = 0b11010000;
+ let Inst{20-16} = Rm;
+ let Inst{15-10} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+class BaseAddSubCarry<bit isSub, RegisterClass regtype, string asm,
+ SDNode OpNode>
+ : BaseBaseAddSubCarry<isSub, regtype, asm,
+ [(set regtype:$Rd, (OpNode regtype:$Rn, regtype:$Rm, NZCV))]>;
+
+class BaseAddSubCarrySetFlags<bit isSub, RegisterClass regtype, string asm,
+ SDNode OpNode>
+ : BaseBaseAddSubCarry<isSub, regtype, asm,
+ [(set regtype:$Rd, (OpNode regtype:$Rn, regtype:$Rm, NZCV)),
+ (implicit NZCV)]> {
+ let Defs = [NZCV];
+}
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = s;
- let Inst{28-21} = 0b11010010;
+multiclass AddSubCarry<bit isSub, string asm, string asm_setflags,
+ SDNode OpNode, SDNode OpNode_setflags> {
+ def Wr : BaseAddSubCarry<isSub, GPR32, asm, OpNode> {
+ let Inst{31} = 0;
+ let Inst{29} = 0;
+ }
+ def Xr : BaseAddSubCarry<isSub, GPR64, asm, OpNode> {
+ let Inst{31} = 1;
+ let Inst{29} = 0;
+ }
+
+ // Sets flags.
+ def SWr : BaseAddSubCarrySetFlags<isSub, GPR32, asm_setflags,
+ OpNode_setflags> {
+ let Inst{31} = 0;
+ let Inst{29} = 1;
+ }
+ def SXr : BaseAddSubCarrySetFlags<isSub, GPR64, asm_setflags,
+ OpNode_setflags> {
+ let Inst{31} = 1;
+ let Inst{29} = 1;
+ }
+}
+
+class BaseTwoOperand<bits<4> opc, RegisterClass regtype, string asm,
+ SDPatternOperator OpNode>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "",
+ [(set regtype:$Rd, (OpNode regtype:$Rn, regtype:$Rm))]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{30-21} = 0b0011010110;
let Inst{20-16} = Rm;
- let Inst{15-12} = Cond;
- let Inst{11} = 0b0;
- let Inst{10} = o2;
- let Inst{9-5} = Rn;
- let Inst{4} = o3;
- let Inst{3-0} = NZCVImm;
+ let Inst{15-14} = 0b00;
+ let Inst{13-10} = opc;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format for conditional select instructions.
-class A64I_condsel<bit sf, bit op, bit s, bits<2> op2,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<4> Cond;
+class BaseDiv<bit isSigned, RegisterClass regtype, string asm,
+ SDPatternOperator OpNode>
+ : BaseTwoOperand<{0,0,1,?}, regtype, asm, OpNode> {
+ let Inst{10} = isSigned;
+}
- let Inst{31} = sf;
- let Inst{30} = op;
- let Inst{29} = s;
- let Inst{28-21} = 0b11010100;
- // Inherit Rm in 20-16
- let Inst{15-12} = Cond;
- let Inst{11-10} = op2;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+multiclass Div<bit isSigned, string asm, SDPatternOperator OpNode> {
+ def Wr : BaseDiv<isSigned, GPR32, asm, OpNode>,
+ Sched<[WriteID32, ReadID, ReadID]> {
+ let Inst{31} = 0;
+ }
+ def Xr : BaseDiv<isSigned, GPR64, asm, OpNode>,
+ Sched<[WriteID64, ReadID, ReadID]> {
+ let Inst{31} = 1;
+ }
}
-// Format for data processing (1 source) instructions
-class A64I_dp_1src<bit sf, bit S, bits<5> opcode2, bits<6> opcode,
- string asmstr, dag outs, dag ins,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = sf;
- let Inst{30} = 0b1;
- let Inst{29} = S;
- let Inst{28-21} = 0b11010110;
- let Inst{20-16} = opcode2;
- let Inst{15-10} = opcode;
-}
-
-// Format for data processing (2 source) instructions
-class A64I_dp_2src<bit sf, bits<6> opcode, bit S,
- string asmstr, dag outs, dag ins,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = sf;
- let Inst{30} = 0b0;
- let Inst{29} = S;
- let Inst{28-21} = 0b11010110;
- let Inst{15-10} = opcode;
+class BaseShift<bits<2> shift_type, RegisterClass regtype, string asm,
+ SDPatternOperator OpNode = null_frag>
+ : BaseTwoOperand<{1,0,?,?}, regtype, asm, OpNode>,
+ Sched<[WriteIS, ReadI]> {
+ let Inst{11-10} = shift_type;
}
-// Format for data-processing (3 source) instructions
+multiclass Shift<bits<2> shift_type, string asm, SDNode OpNode> {
+ def Wr : BaseShift<shift_type, GPR32, asm> {
+ let Inst{31} = 0;
+ }
-class A64I_dp3<bit sf, bits<6> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<5> Ra;
+ def Xr : BaseShift<shift_type, GPR64, asm, OpNode> {
+ let Inst{31} = 1;
+ }
- let Inst{31} = sf;
- let Inst{30-29} = opcode{5-4};
- let Inst{28-24} = 0b11011;
- let Inst{23-21} = opcode{3-1};
- // Inherits Rm in 20-16
- let Inst{15} = opcode{0};
+ def : Pat<(i32 (OpNode GPR32:$Rn, i64:$Rm)),
+ (!cast<Instruction>(NAME # "Wr") GPR32:$Rn,
+ (EXTRACT_SUBREG i64:$Rm, sub_32))>;
+
+ def : Pat<(i32 (OpNode GPR32:$Rn, (i64 (zext GPR32:$Rm)))),
+ (!cast<Instruction>(NAME # "Wr") GPR32:$Rn, GPR32:$Rm)>;
+
+ def : Pat<(i32 (OpNode GPR32:$Rn, (i64 (anyext GPR32:$Rm)))),
+ (!cast<Instruction>(NAME # "Wr") GPR32:$Rn, GPR32:$Rm)>;
+
+ def : Pat<(i32 (OpNode GPR32:$Rn, (i64 (sext GPR32:$Rm)))),
+ (!cast<Instruction>(NAME # "Wr") GPR32:$Rn, GPR32:$Rm)>;
+}
+
+class ShiftAlias<string asm, Instruction inst, RegisterClass regtype>
+ : InstAlias<asm#" $dst, $src1, $src2",
+ (inst regtype:$dst, regtype:$src1, regtype:$src2), 0>;
+
+class BaseMulAccum<bit isSub, bits<3> opc, RegisterClass multype,
+ RegisterClass addtype, string asm,
+ list<dag> pattern>
+ : I<(outs addtype:$Rd), (ins multype:$Rn, multype:$Rm, addtype:$Ra),
+ asm, "\t$Rd, $Rn, $Rm, $Ra", "", pattern> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<5> Ra;
+ let Inst{30-24} = 0b0011011;
+ let Inst{23-21} = opc;
+ let Inst{20-16} = Rm;
+ let Inst{15} = isSub;
let Inst{14-10} = Ra;
- // Inherits Rn in 9-5
- // Inherits Rd in 4-0
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format for exception generation instructions
-class A64I_exception<bits<3> opc, bits<3> op2, bits<2> ll,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- bits<16> UImm16;
+multiclass MulAccum<bit isSub, string asm, SDNode AccNode> {
+ def Wrrr : BaseMulAccum<isSub, 0b000, GPR32, GPR32, asm,
+ [(set GPR32:$Rd, (AccNode GPR32:$Ra, (mul GPR32:$Rn, GPR32:$Rm)))]>,
+ Sched<[WriteIM32, ReadIM, ReadIM, ReadIMA]> {
+ let Inst{31} = 0;
+ }
+
+ def Xrrr : BaseMulAccum<isSub, 0b000, GPR64, GPR64, asm,
+ [(set GPR64:$Rd, (AccNode GPR64:$Ra, (mul GPR64:$Rn, GPR64:$Rm)))]>,
+ Sched<[WriteIM64, ReadIM, ReadIM, ReadIMA]> {
+ let Inst{31} = 1;
+ }
+}
- let Inst{31-24} = 0b11010100;
+class WideMulAccum<bit isSub, bits<3> opc, string asm,
+ SDNode AccNode, SDNode ExtNode>
+ : BaseMulAccum<isSub, opc, GPR32, GPR64, asm,
+ [(set GPR64:$Rd, (AccNode GPR64:$Ra,
+ (mul (ExtNode GPR32:$Rn), (ExtNode GPR32:$Rm))))]>,
+ Sched<[WriteIM32, ReadIM, ReadIM, ReadIMA]> {
+ let Inst{31} = 1;
+}
+
+class MulHi<bits<3> opc, string asm, SDNode OpNode>
+ : I<(outs GPR64:$Rd), (ins GPR64:$Rn, GPR64:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "",
+ [(set GPR64:$Rd, (OpNode GPR64:$Rn, GPR64:$Rm))]>,
+ Sched<[WriteIM64, ReadIM, ReadIM]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31-24} = 0b10011011;
let Inst{23-21} = opc;
- let Inst{20-5} = UImm16;
- let Inst{4-2} = op2;
- let Inst{1-0} = ll;
+ let Inst{20-16} = Rm;
+ let Inst{15} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
+ // (i.e. all bits 1) but is ignored by the processor.
+ let PostEncoderMethod = "fixMulHigh";
}
-// Format for extract (immediate) instructions
-class A64I_extract<bit sf, bits<3> op, bit n,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<6> LSB;
+class MulAccumWAlias<string asm, Instruction inst>
+ : InstAlias<asm#" $dst, $src1, $src2",
+ (inst GPR32:$dst, GPR32:$src1, GPR32:$src2, WZR)>;
+class MulAccumXAlias<string asm, Instruction inst>
+ : InstAlias<asm#" $dst, $src1, $src2",
+ (inst GPR64:$dst, GPR64:$src1, GPR64:$src2, XZR)>;
+class WideMulAccumAlias<string asm, Instruction inst>
+ : InstAlias<asm#" $dst, $src1, $src2",
+ (inst GPR64:$dst, GPR32:$src1, GPR32:$src2, XZR)>;
+
+class BaseCRC32<bit sf, bits<2> sz, bit C, RegisterClass StreamReg,
+ SDPatternOperator OpNode, string asm>
+ : I<(outs GPR32:$Rd), (ins GPR32:$Rn, StreamReg:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "",
+ [(set GPR32:$Rd, (OpNode GPR32:$Rn, StreamReg:$Rm))]>,
+ Sched<[WriteISReg, ReadI, ReadISReg]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
let Inst{31} = sf;
- let Inst{30-29} = op{2-1};
- let Inst{28-23} = 0b100111;
- let Inst{22} = n;
- let Inst{21} = op{0};
- // Inherits Rm in bits 20-16
- let Inst{15-10} = LSB;
- // Inherits Rn in 9-5
- // Inherits Rd in 4-0
+ let Inst{30-21} = 0b0011010110;
+ let Inst{20-16} = Rm;
+ let Inst{15-13} = 0b010;
+ let Inst{12} = C;
+ let Inst{11-10} = sz;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+ let Predicates = [HasCRC];
}
-let Predicates = [HasFPARMv8] in {
+//---
+// Address generation.
+//---
+
+class ADRI<bit page, string asm, Operand adr, list<dag> pattern>
+ : I<(outs GPR64:$Xd), (ins adr:$label), asm, "\t$Xd, $label", "",
+ pattern>,
+ Sched<[WriteI]> {
+ bits<5> Xd;
+ bits<21> label;
+ let Inst{31} = page;
+ let Inst{30-29} = label{1-0};
+ let Inst{28-24} = 0b10000;
+ let Inst{23-5} = label{20-2};
+ let Inst{4-0} = Xd;
+
+ let DecoderMethod = "DecodeAdrInstruction";
+}
+
+//---
+// Move immediate.
+//---
+
+def movimm32_imm : Operand<i32> {
+ let ParserMatchClass = Imm0_65535Operand;
+ let EncoderMethod = "getMoveWideImmOpValue";
+ let PrintMethod = "printHexImm";
+}
+def movimm32_shift : Operand<i32> {
+ let PrintMethod = "printShifter";
+ let ParserMatchClass = MovImm32ShifterOperand;
+}
+def movimm64_shift : Operand<i32> {
+ let PrintMethod = "printShifter";
+ let ParserMatchClass = MovImm64ShifterOperand;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseMoveImmediate<bits<2> opc, RegisterClass regtype, Operand shifter,
+ string asm>
+ : I<(outs regtype:$Rd), (ins movimm32_imm:$imm, shifter:$shift),
+ asm, "\t$Rd, $imm$shift", "", []>,
+ Sched<[WriteImm]> {
+ bits<5> Rd;
+ bits<16> imm;
+ bits<6> shift;
+ let Inst{30-29} = opc;
+ let Inst{28-23} = 0b100101;
+ let Inst{22-21} = shift{5-4};
+ let Inst{20-5} = imm;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeMoveImmInstruction";
+}
+
+multiclass MoveImmediate<bits<2> opc, string asm> {
+ def Wi : BaseMoveImmediate<opc, GPR32, movimm32_shift, asm> {
+ let Inst{31} = 0;
+ }
+
+ def Xi : BaseMoveImmediate<opc, GPR64, movimm64_shift, asm> {
+ let Inst{31} = 1;
+ }
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseInsertImmediate<bits<2> opc, RegisterClass regtype, Operand shifter,
+ string asm>
+ : I<(outs regtype:$Rd),
+ (ins regtype:$src, movimm32_imm:$imm, shifter:$shift),
+ asm, "\t$Rd, $imm$shift", "$src = $Rd", []>,
+ Sched<[WriteI, ReadI]> {
+ bits<5> Rd;
+ bits<16> imm;
+ bits<6> shift;
+ let Inst{30-29} = opc;
+ let Inst{28-23} = 0b100101;
+ let Inst{22-21} = shift{5-4};
+ let Inst{20-5} = imm;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeMoveImmInstruction";
+}
+
+multiclass InsertImmediate<bits<2> opc, string asm> {
+ def Wi : BaseInsertImmediate<opc, GPR32, movimm32_shift, asm> {
+ let Inst{31} = 0;
+ }
+
+ def Xi : BaseInsertImmediate<opc, GPR64, movimm64_shift, asm> {
+ let Inst{31} = 1;
+ }
+}
+
+//---
+// Add/Subtract
+//---
+
+class BaseAddSubImm<bit isSub, bit setFlags, RegisterClass dstRegtype,
+ RegisterClass srcRegtype, addsub_shifted_imm immtype,
+ string asm, SDPatternOperator OpNode>
+ : I<(outs dstRegtype:$Rd), (ins srcRegtype:$Rn, immtype:$imm),
+ asm, "\t$Rd, $Rn, $imm", "",
+ [(set dstRegtype:$Rd, (OpNode srcRegtype:$Rn, immtype:$imm))]>,
+ Sched<[WriteI, ReadI]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<14> imm;
+ let Inst{30} = isSub;
+ let Inst{29} = setFlags;
+ let Inst{28-24} = 0b10001;
+ let Inst{23-22} = imm{13-12}; // '00' => lsl #0, '01' => lsl #12
+ let Inst{21-10} = imm{11-0};
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+ let DecoderMethod = "DecodeBaseAddSubImm";
+}
-// Format for floating-point compare instructions.
-class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
+class BaseAddSubRegPseudo<RegisterClass regtype,
+ SDPatternOperator OpNode>
+ : Pseudo<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm),
+ [(set regtype:$Rd, (OpNode regtype:$Rn, regtype:$Rm))]>,
+ Sched<[WriteI, ReadI, ReadI]>;
+
+class BaseAddSubSReg<bit isSub, bit setFlags, RegisterClass regtype,
+ arith_shifted_reg shifted_regtype, string asm,
+ SDPatternOperator OpNode>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, shifted_regtype:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "",
+ [(set regtype:$Rd, (OpNode regtype:$Rn, shifted_regtype:$Rm))]>,
+ Sched<[WriteISReg, ReadI, ReadISReg]> {
+ // The operands are in order to match the 'addr' MI operands, so we
+ // don't need an encoder method and by-name matching. Just use the default
+ // in-order handling. Since we're using by-order, make sure the names
+ // do not match.
+ bits<5> dst;
+ bits<5> src1;
+ bits<5> src2;
+ bits<8> shift;
+ let Inst{30} = isSub;
+ let Inst{29} = setFlags;
+ let Inst{28-24} = 0b01011;
+ let Inst{23-22} = shift{7-6};
+ let Inst{21} = 0;
+ let Inst{20-16} = src2;
+ let Inst{15-10} = shift{5-0};
+ let Inst{9-5} = src1;
+ let Inst{4-0} = dst;
+
+ let DecoderMethod = "DecodeThreeAddrSRegInstruction";
+}
+
+class BaseAddSubEReg<bit isSub, bit setFlags, RegisterClass dstRegtype,
+ RegisterClass src1Regtype, Operand src2Regtype,
+ string asm, SDPatternOperator OpNode>
+ : I<(outs dstRegtype:$R1),
+ (ins src1Regtype:$R2, src2Regtype:$R3),
+ asm, "\t$R1, $R2, $R3", "",
+ [(set dstRegtype:$R1, (OpNode src1Regtype:$R2, src2Regtype:$R3))]>,
+ Sched<[WriteIEReg, ReadI, ReadIEReg]> {
+ bits<5> Rd;
bits<5> Rn;
bits<5> Rm;
+ bits<6> ext;
+ let Inst{30} = isSub;
+ let Inst{29} = setFlags;
+ let Inst{28-24} = 0b01011;
+ let Inst{23-21} = 0b001;
+ let Inst{20-16} = Rm;
+ let Inst{15-13} = ext{5-3};
+ let Inst{12-10} = ext{2-0};
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
+ let DecoderMethod = "DecodeAddSubERegInstruction";
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseAddSubEReg64<bit isSub, bit setFlags, RegisterClass dstRegtype,
+ RegisterClass src1Regtype, RegisterClass src2Regtype,
+ Operand ext_op, string asm>
+ : I<(outs dstRegtype:$Rd),
+ (ins src1Regtype:$Rn, src2Regtype:$Rm, ext_op:$ext),
+ asm, "\t$Rd, $Rn, $Rm$ext", "", []>,
+ Sched<[WriteIEReg, ReadI, ReadIEReg]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<6> ext;
+ let Inst{30} = isSub;
+ let Inst{29} = setFlags;
+ let Inst{28-24} = 0b01011;
+ let Inst{23-21} = 0b001;
let Inst{20-16} = Rm;
- let Inst{15-14} = op;
- let Inst{13-10} = 0b1000;
- let Inst{9-5} = Rn;
- let Inst{4-0} = opcode2;
+ let Inst{15} = ext{5};
+ let Inst{12-10} = ext{2-0};
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeAddSubERegInstruction";
}
-// Format for floating-point conditional compare instructions.
-class A64I_fpccmp<bit m, bit s, bits<2> type, bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
+// Aliases for register+register add/subtract.
+class AddSubRegAlias<string asm, Instruction inst, RegisterClass dstRegtype,
+ RegisterClass src1Regtype, RegisterClass src2Regtype,
+ int shiftExt>
+ : InstAlias<asm#" $dst, $src1, $src2",
+ (inst dstRegtype:$dst, src1Regtype:$src1, src2Regtype:$src2,
+ shiftExt)>;
+
+multiclass AddSub<bit isSub, string mnemonic,
+ SDPatternOperator OpNode = null_frag> {
+ let hasSideEffects = 0 in {
+ // Add/Subtract immediate
+ def Wri : BaseAddSubImm<isSub, 0, GPR32sp, GPR32sp, addsub_shifted_imm32,
+ mnemonic, OpNode> {
+ let Inst{31} = 0;
+ }
+ def Xri : BaseAddSubImm<isSub, 0, GPR64sp, GPR64sp, addsub_shifted_imm64,
+ mnemonic, OpNode> {
+ let Inst{31} = 1;
+ }
+
+ // Add/Subtract register - Only used for CodeGen
+ def Wrr : BaseAddSubRegPseudo<GPR32, OpNode>;
+ def Xrr : BaseAddSubRegPseudo<GPR64, OpNode>;
+
+ // Add/Subtract shifted register
+ def Wrs : BaseAddSubSReg<isSub, 0, GPR32, arith_shifted_reg32, mnemonic,
+ OpNode> {
+ let Inst{31} = 0;
+ }
+ def Xrs : BaseAddSubSReg<isSub, 0, GPR64, arith_shifted_reg64, mnemonic,
+ OpNode> {
+ let Inst{31} = 1;
+ }
+ }
+
+ // Add/Subtract extended register
+ let AddedComplexity = 1, hasSideEffects = 0 in {
+ def Wrx : BaseAddSubEReg<isSub, 0, GPR32sp, GPR32sp,
+ arith_extended_reg32<i32>, mnemonic, OpNode> {
+ let Inst{31} = 0;
+ }
+ def Xrx : BaseAddSubEReg<isSub, 0, GPR64sp, GPR64sp,
+ arith_extended_reg32to64<i64>, mnemonic, OpNode> {
+ let Inst{31} = 1;
+ }
+ }
+
+ def Xrx64 : BaseAddSubEReg64<isSub, 0, GPR64sp, GPR64sp, GPR64,
+ arith_extendlsl64, mnemonic> {
+ // UXTX and SXTX only.
+ let Inst{14-13} = 0b11;
+ let Inst{31} = 1;
+ }
+
+ // Register/register aliases with no shift when SP is not used.
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Wrs"),
+ GPR32, GPR32, GPR32, 0>;
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Xrs"),
+ GPR64, GPR64, GPR64, 0>;
+
+ // Register/register aliases with no shift when either the destination or
+ // first source register is SP.
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Wrx"),
+ GPR32sponly, GPR32sp, GPR32, 16>; // UXTW #0
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Wrx"),
+ GPR32sp, GPR32sponly, GPR32, 16>; // UXTW #0
+ def : AddSubRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Xrx64"),
+ GPR64sponly, GPR64sp, GPR64, 24>; // UXTX #0
+ def : AddSubRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Xrx64"),
+ GPR64sp, GPR64sponly, GPR64, 24>; // UXTX #0
+}
+
+multiclass AddSubS<bit isSub, string mnemonic, SDNode OpNode, string cmp> {
+ let isCompare = 1, Defs = [NZCV] in {
+ // Add/Subtract immediate
+ def Wri : BaseAddSubImm<isSub, 1, GPR32, GPR32sp, addsub_shifted_imm32,
+ mnemonic, OpNode> {
+ let Inst{31} = 0;
+ }
+ def Xri : BaseAddSubImm<isSub, 1, GPR64, GPR64sp, addsub_shifted_imm64,
+ mnemonic, OpNode> {
+ let Inst{31} = 1;
+ }
+
+ // Add/Subtract register
+ def Wrr : BaseAddSubRegPseudo<GPR32, OpNode>;
+ def Xrr : BaseAddSubRegPseudo<GPR64, OpNode>;
+
+ // Add/Subtract shifted register
+ def Wrs : BaseAddSubSReg<isSub, 1, GPR32, arith_shifted_reg32, mnemonic,
+ OpNode> {
+ let Inst{31} = 0;
+ }
+ def Xrs : BaseAddSubSReg<isSub, 1, GPR64, arith_shifted_reg64, mnemonic,
+ OpNode> {
+ let Inst{31} = 1;
+ }
+
+ // Add/Subtract extended register
+ let AddedComplexity = 1 in {
+ def Wrx : BaseAddSubEReg<isSub, 1, GPR32, GPR32sp,
+ arith_extended_reg32<i32>, mnemonic, OpNode> {
+ let Inst{31} = 0;
+ }
+ def Xrx : BaseAddSubEReg<isSub, 1, GPR64, GPR64sp,
+ arith_extended_reg32<i64>, mnemonic, OpNode> {
+ let Inst{31} = 1;
+ }
+ }
+
+ def Xrx64 : BaseAddSubEReg64<isSub, 1, GPR64, GPR64sp, GPR64,
+ arith_extendlsl64, mnemonic> {
+ // UXTX and SXTX only.
+ let Inst{14-13} = 0b11;
+ let Inst{31} = 1;
+ }
+ } // Defs = [NZCV]
+
+ // Compare aliases
+ def : InstAlias<cmp#" $src, $imm", (!cast<Instruction>(NAME#"Wri")
+ WZR, GPR32sp:$src, addsub_shifted_imm32:$imm), 5>;
+ def : InstAlias<cmp#" $src, $imm", (!cast<Instruction>(NAME#"Xri")
+ XZR, GPR64sp:$src, addsub_shifted_imm64:$imm), 5>;
+ def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Wrx")
+ WZR, GPR32sp:$src1, GPR32:$src2, arith_extend:$sh), 4>;
+ def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Xrx")
+ XZR, GPR64sp:$src1, GPR32:$src2, arith_extend:$sh), 4>;
+ def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Xrx64")
+ XZR, GPR64sp:$src1, GPR64:$src2, arith_extendlsl64:$sh), 4>;
+ def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Wrs")
+ WZR, GPR32:$src1, GPR32:$src2, arith_shift32:$sh), 4>;
+ def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Xrs")
+ XZR, GPR64:$src1, GPR64:$src2, arith_shift64:$sh), 4>;
+
+ // Compare shorthands
+ def : InstAlias<cmp#" $src1, $src2", (!cast<Instruction>(NAME#"Wrs")
+ WZR, GPR32:$src1, GPR32:$src2, 0), 5>;
+ def : InstAlias<cmp#" $src1, $src2", (!cast<Instruction>(NAME#"Xrs")
+ XZR, GPR64:$src1, GPR64:$src2, 0), 5>;
+ def : InstAlias<cmp#" $src1, $src2", (!cast<Instruction>(NAME#"Wrx")
+ WZR, GPR32sponly:$src1, GPR32:$src2, 16), 5>;
+ def : InstAlias<cmp#" $src1, $src2", (!cast<Instruction>(NAME#"Xrx64")
+ XZR, GPR64sponly:$src1, GPR64:$src2, 24), 5>;
+
+ // Register/register aliases with no shift when SP is not used.
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Wrs"),
+ GPR32, GPR32, GPR32, 0>;
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Xrs"),
+ GPR64, GPR64, GPR64, 0>;
+
+ // Register/register aliases with no shift when the first source register
+ // is SP.
+ def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Wrx"),
+ GPR32, GPR32sponly, GPR32, 16>; // UXTW #0
+ def : AddSubRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Xrx64"),
+ GPR64, GPR64sponly, GPR64, 24>; // UXTX #0
+}
+
+//---
+// Extract
+//---
+def SDTA64EXTR : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisPtrTy<3>]>;
+def AArch64Extr : SDNode<"AArch64ISD::EXTR", SDTA64EXTR>;
+
+class BaseExtractImm<RegisterClass regtype, Operand imm_type, string asm,
+ list<dag> patterns>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm, imm_type:$imm),
+ asm, "\t$Rd, $Rn, $Rm, $imm", "", patterns>,
+ Sched<[WriteExtr, ReadExtrHi]> {
+ bits<5> Rd;
bits<5> Rn;
bits<5> Rm;
- bits<4> NZCVImm;
- bits<4> Cond;
+ bits<6> imm;
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
+ let Inst{30-23} = 0b00100111;
+ let Inst{21} = 0;
let Inst{20-16} = Rm;
- let Inst{15-12} = Cond;
- let Inst{11-10} = 0b01;
- let Inst{9-5} = Rn;
- let Inst{4} = op;
- let Inst{3-0} = NZCVImm;
+ let Inst{15-10} = imm;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format for floating-point conditional select instructions.
-class A64I_fpcondsel<bit m, bit s, bits<2> type,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<4> Cond;
+multiclass ExtractImm<string asm> {
+ def Wrri : BaseExtractImm<GPR32, imm0_31, asm,
+ [(set GPR32:$Rd,
+ (AArch64Extr GPR32:$Rn, GPR32:$Rm, imm0_31:$imm))]> {
+ let Inst{31} = 0;
+ let Inst{22} = 0;
+ // imm<5> must be zero.
+ let imm{5} = 0;
+ }
+ def Xrri : BaseExtractImm<GPR64, imm0_63, asm,
+ [(set GPR64:$Rd,
+ (AArch64Extr GPR64:$Rn, GPR64:$Rm, imm0_63:$imm))]> {
+
+ let Inst{31} = 1;
+ let Inst{22} = 1;
+ }
+}
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
- // Inherit Rm in 20-16
- let Inst{15-12} = Cond;
- let Inst{11-10} = 0b11;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+//---
+// Bitfield
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseBitfieldImm<bits<2> opc,
+ RegisterClass regtype, Operand imm_type, string asm>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, imm_type:$immr, imm_type:$imms),
+ asm, "\t$Rd, $Rn, $immr, $imms", "", []>,
+ Sched<[WriteIS, ReadI]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<6> immr;
+ bits<6> imms;
+
+ let Inst{30-29} = opc;
+ let Inst{28-23} = 0b100110;
+ let Inst{21-16} = immr;
+ let Inst{15-10} = imms;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
+multiclass BitfieldImm<bits<2> opc, string asm> {
+ def Wri : BaseBitfieldImm<opc, GPR32, imm0_31, asm> {
+ let Inst{31} = 0;
+ let Inst{22} = 0;
+ // imms<5> and immr<5> must be zero, else ReservedValue().
+ let Inst{21} = 0;
+ let Inst{15} = 0;
+ }
+ def Xri : BaseBitfieldImm<opc, GPR64, imm0_63, asm> {
+ let Inst{31} = 1;
+ let Inst{22} = 1;
+ }
+}
-// Format for floating-point data-processing (1 source) instructions.
-class A64I_fpdp1<bit m, bit s, bits<2> type, bits<6> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
- let Inst{20-15} = opcode;
- let Inst{14-10} = 0b10000;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format for floating-point data-processing (2 sources) instructions.
-class A64I_fpdp2<bit m, bit s, bits<2> type, bits<4> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
- // Inherit Rm in 20-16
- let Inst{15-12} = opcode;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseBitfieldImmWith2RegArgs<bits<2> opc,
+ RegisterClass regtype, Operand imm_type, string asm>
+ : I<(outs regtype:$Rd), (ins regtype:$src, regtype:$Rn, imm_type:$immr,
+ imm_type:$imms),
+ asm, "\t$Rd, $Rn, $immr, $imms", "$src = $Rd", []>,
+ Sched<[WriteIS, ReadI]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<6> immr;
+ bits<6> imms;
+
+ let Inst{30-29} = opc;
+ let Inst{28-23} = 0b100110;
+ let Inst{21-16} = immr;
+ let Inst{15-10} = imms;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass BitfieldImmWith2RegArgs<bits<2> opc, string asm> {
+ def Wri : BaseBitfieldImmWith2RegArgs<opc, GPR32, imm0_31, asm> {
+ let Inst{31} = 0;
+ let Inst{22} = 0;
+ // imms<5> and immr<5> must be zero, else ReservedValue().
+ let Inst{21} = 0;
+ let Inst{15} = 0;
+ }
+ def Xri : BaseBitfieldImmWith2RegArgs<opc, GPR64, imm0_63, asm> {
+ let Inst{31} = 1;
+ let Inst{22} = 1;
+ }
+}
+
+//---
+// Logical
+//---
+
+// Logical (immediate)
+class BaseLogicalImm<bits<2> opc, RegisterClass dregtype,
+ RegisterClass sregtype, Operand imm_type, string asm,
+ list<dag> pattern>
+ : I<(outs dregtype:$Rd), (ins sregtype:$Rn, imm_type:$imm),
+ asm, "\t$Rd, $Rn, $imm", "", pattern>,
+ Sched<[WriteI, ReadI]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<13> imm;
+ let Inst{30-29} = opc;
+ let Inst{28-23} = 0b100100;
+ let Inst{22} = imm{12};
+ let Inst{21-16} = imm{11-6};
+ let Inst{15-10} = imm{5-0};
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeLogicalImmInstruction";
+}
+
+// Logical (shifted register)
+class BaseLogicalSReg<bits<2> opc, bit N, RegisterClass regtype,
+ logical_shifted_reg shifted_regtype, string asm,
+ list<dag> pattern>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, shifted_regtype:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "", pattern>,
+ Sched<[WriteISReg, ReadI, ReadISReg]> {
+ // The operands are in order to match the 'addr' MI operands, so we
+ // don't need an encoder method and by-name matching. Just use the default
+ // in-order handling. Since we're using by-order, make sure the names
+ // do not match.
+ bits<5> dst;
+ bits<5> src1;
+ bits<5> src2;
+ bits<8> shift;
+ let Inst{30-29} = opc;
+ let Inst{28-24} = 0b01010;
+ let Inst{23-22} = shift{7-6};
+ let Inst{21} = N;
+ let Inst{20-16} = src2;
+ let Inst{15-10} = shift{5-0};
+ let Inst{9-5} = src1;
+ let Inst{4-0} = dst;
+
+ let DecoderMethod = "DecodeThreeAddrSRegInstruction";
+}
+
+// Aliases for register+register logical instructions.
+class LogicalRegAlias<string asm, Instruction inst, RegisterClass regtype>
+ : InstAlias<asm#" $dst, $src1, $src2",
+ (inst regtype:$dst, regtype:$src1, regtype:$src2, 0)>;
+
+multiclass LogicalImm<bits<2> opc, string mnemonic, SDNode OpNode,
+ string Alias> {
+ let AddedComplexity = 6 in
+ def Wri : BaseLogicalImm<opc, GPR32sp, GPR32, logical_imm32, mnemonic,
+ [(set GPR32sp:$Rd, (OpNode GPR32:$Rn,
+ logical_imm32:$imm))]> {
+ let Inst{31} = 0;
+ let Inst{22} = 0; // 64-bit version has an additional bit of immediate.
+ }
+ let AddedComplexity = 6 in
+ def Xri : BaseLogicalImm<opc, GPR64sp, GPR64, logical_imm64, mnemonic,
+ [(set GPR64sp:$Rd, (OpNode GPR64:$Rn,
+ logical_imm64:$imm))]> {
+ let Inst{31} = 1;
+ }
+
+ def : InstAlias<Alias # " $Rd, $Rn, $imm",
+ (!cast<Instruction>(NAME # "Wri") GPR32sp:$Rd, GPR32:$Rn,
+ logical_imm32_not:$imm), 0>;
+ def : InstAlias<Alias # " $Rd, $Rn, $imm",
+ (!cast<Instruction>(NAME # "Xri") GPR64sp:$Rd, GPR64:$Rn,
+ logical_imm64_not:$imm), 0>;
+}
+
+multiclass LogicalImmS<bits<2> opc, string mnemonic, SDNode OpNode,
+ string Alias> {
+ let isCompare = 1, Defs = [NZCV] in {
+ def Wri : BaseLogicalImm<opc, GPR32, GPR32, logical_imm32, mnemonic,
+ [(set GPR32:$Rd, (OpNode GPR32:$Rn, logical_imm32:$imm))]> {
+ let Inst{31} = 0;
+ let Inst{22} = 0; // 64-bit version has an additional bit of immediate.
+ }
+ def Xri : BaseLogicalImm<opc, GPR64, GPR64, logical_imm64, mnemonic,
+ [(set GPR64:$Rd, (OpNode GPR64:$Rn, logical_imm64:$imm))]> {
+ let Inst{31} = 1;
+ }
+ } // end Defs = [NZCV]
+
+ def : InstAlias<Alias # " $Rd, $Rn, $imm",
+ (!cast<Instruction>(NAME # "Wri") GPR32:$Rd, GPR32:$Rn,
+ logical_imm32_not:$imm), 0>;
+ def : InstAlias<Alias # " $Rd, $Rn, $imm",
+ (!cast<Instruction>(NAME # "Xri") GPR64:$Rd, GPR64:$Rn,
+ logical_imm64_not:$imm), 0>;
+}
+
+class BaseLogicalRegPseudo<RegisterClass regtype, SDPatternOperator OpNode>
+ : Pseudo<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm),
+ [(set regtype:$Rd, (OpNode regtype:$Rn, regtype:$Rm))]>,
+ Sched<[WriteI, ReadI, ReadI]>;
+
+// Split from LogicalImm as not all instructions have both.
+multiclass LogicalReg<bits<2> opc, bit N, string mnemonic,
+ SDPatternOperator OpNode> {
+ def Wrr : BaseLogicalRegPseudo<GPR32, OpNode>;
+ def Xrr : BaseLogicalRegPseudo<GPR64, OpNode>;
+
+ def Wrs : BaseLogicalSReg<opc, N, GPR32, logical_shifted_reg32, mnemonic,
+ [(set GPR32:$Rd, (OpNode GPR32:$Rn,
+ logical_shifted_reg32:$Rm))]> {
+ let Inst{31} = 0;
+ }
+ def Xrs : BaseLogicalSReg<opc, N, GPR64, logical_shifted_reg64, mnemonic,
+ [(set GPR64:$Rd, (OpNode GPR64:$Rn,
+ logical_shifted_reg64:$Rm))]> {
+ let Inst{31} = 1;
+ }
+
+ def : LogicalRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Wrs"), GPR32>;
+ def : LogicalRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Xrs"), GPR64>;
+}
+
+// Split from LogicalReg to allow setting NZCV Defs
+multiclass LogicalRegS<bits<2> opc, bit N, string mnemonic,
+ SDPatternOperator OpNode = null_frag> {
+ let Defs = [NZCV], mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def Wrr : BaseLogicalRegPseudo<GPR32, OpNode>;
+ def Xrr : BaseLogicalRegPseudo<GPR64, OpNode>;
+
+ def Wrs : BaseLogicalSReg<opc, N, GPR32, logical_shifted_reg32, mnemonic,
+ [(set GPR32:$Rd, (OpNode GPR32:$Rn, logical_shifted_reg32:$Rm))]> {
+ let Inst{31} = 0;
+ }
+ def Xrs : BaseLogicalSReg<opc, N, GPR64, logical_shifted_reg64, mnemonic,
+ [(set GPR64:$Rd, (OpNode GPR64:$Rn, logical_shifted_reg64:$Rm))]> {
+ let Inst{31} = 1;
+ }
+ } // Defs = [NZCV]
+
+ def : LogicalRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Wrs"), GPR32>;
+ def : LogicalRegAlias<mnemonic,
+ !cast<Instruction>(NAME#"Xrs"), GPR64>;
+}
+
+//---
+// Conditionally set flags
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseCondSetFlagsImm<bit op, RegisterClass regtype, string asm>
+ : I<(outs), (ins regtype:$Rn, imm0_31:$imm, imm0_15:$nzcv, ccode:$cond),
+ asm, "\t$Rn, $imm, $nzcv, $cond", "", []>,
+ Sched<[WriteI, ReadI]> {
+ let Uses = [NZCV];
+ let Defs = [NZCV];
+
+ bits<5> Rn;
+ bits<5> imm;
+ bits<4> nzcv;
+ bits<4> cond;
+
+ let Inst{30} = op;
+ let Inst{29-21} = 0b111010010;
+ let Inst{20-16} = imm;
+ let Inst{15-12} = cond;
let Inst{11-10} = 0b10;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+ let Inst{9-5} = Rn;
+ let Inst{4} = 0b0;
+ let Inst{3-0} = nzcv;
}
-// Format for floating-point data-processing (3 sources) instructions.
-class A64I_fpdp3<bit m, bit s, bits<2> type, bit o1, bit o0,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<5> Ra;
+multiclass CondSetFlagsImm<bit op, string asm> {
+ def Wi : BaseCondSetFlagsImm<op, GPR32, asm> {
+ let Inst{31} = 0;
+ }
+ def Xi : BaseCondSetFlagsImm<op, GPR64, asm> {
+ let Inst{31} = 1;
+ }
+}
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11111;
- let Inst{23-22} = type;
- let Inst{21} = o1;
- // Inherit Rm in 20-16
- let Inst{15} = o0;
- let Inst{14-10} = Ra;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseCondSetFlagsReg<bit op, RegisterClass regtype, string asm>
+ : I<(outs), (ins regtype:$Rn, regtype:$Rm, imm0_15:$nzcv, ccode:$cond),
+ asm, "\t$Rn, $Rm, $nzcv, $cond", "", []>,
+ Sched<[WriteI, ReadI, ReadI]> {
+ let Uses = [NZCV];
+ let Defs = [NZCV];
+
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<4> nzcv;
+ bits<4> cond;
+
+ let Inst{30} = op;
+ let Inst{29-21} = 0b111010010;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = cond;
+ let Inst{11-10} = 0b00;
+ let Inst{9-5} = Rn;
+ let Inst{4} = 0b0;
+ let Inst{3-0} = nzcv;
}
-// Format for floating-point <-> fixed-point conversion instructions.
-class A64I_fpfixed<bit sf, bit s, bits<2> type, bits<2> mode, bits<3> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<6> Scale;
+multiclass CondSetFlagsReg<bit op, string asm> {
+ def Wr : BaseCondSetFlagsReg<op, GPR32, asm> {
+ let Inst{31} = 0;
+ }
+ def Xr : BaseCondSetFlagsReg<op, GPR64, asm> {
+ let Inst{31} = 1;
+ }
+}
- let Inst{31} = sf;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b0;
- let Inst{20-19} = mode;
- let Inst{18-16} = opcode;
- let Inst{15-10} = Scale;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+//---
+// Conditional select
+//---
+
+class BaseCondSelect<bit op, bits<2> op2, RegisterClass regtype, string asm>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm, ccode:$cond),
+ asm, "\t$Rd, $Rn, $Rm, $cond", "",
+ [(set regtype:$Rd,
+ (AArch64csel regtype:$Rn, regtype:$Rm, (i32 imm:$cond), NZCV))]>,
+ Sched<[WriteI, ReadI, ReadI]> {
+ let Uses = [NZCV];
+
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<4> cond;
+
+ let Inst{30} = op;
+ let Inst{29-21} = 0b011010100;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = cond;
+ let Inst{11-10} = op2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format for floating-point <-> integer conversion instructions.
-class A64I_fpint<bit sf, bit s, bits<2> type, bits<2> rmode, bits<3> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = sf;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
- let Inst{20-19} = rmode;
- let Inst{18-16} = opcode;
- let Inst{15-10} = 0b000000;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+multiclass CondSelect<bit op, bits<2> op2, string asm> {
+ def Wr : BaseCondSelect<op, op2, GPR32, asm> {
+ let Inst{31} = 0;
+ }
+ def Xr : BaseCondSelect<op, op2, GPR64, asm> {
+ let Inst{31} = 1;
+ }
}
+class BaseCondSelectOp<bit op, bits<2> op2, RegisterClass regtype, string asm,
+ PatFrag frag>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm, ccode:$cond),
+ asm, "\t$Rd, $Rn, $Rm, $cond", "",
+ [(set regtype:$Rd,
+ (AArch64csel regtype:$Rn, (frag regtype:$Rm),
+ (i32 imm:$cond), NZCV))]>,
+ Sched<[WriteI, ReadI, ReadI]> {
+ let Uses = [NZCV];
-// Format for floating-point immediate instructions.
-class A64I_fpimm<bit m, bit s, bits<2> type, bits<5> imm5,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRd<outs, ins, asmstr, patterns, itin> {
- bits<8> Imm8;
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<4> cond;
- let Inst{31} = m;
- let Inst{30} = 0b0;
- let Inst{29} = s;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = type;
- let Inst{21} = 0b1;
- let Inst{20-13} = Imm8;
- let Inst{12-10} = 0b100;
- let Inst{9-5} = imm5;
- // Inherit Rd in 4-0
+ let Inst{30} = op;
+ let Inst{29-21} = 0b011010100;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = cond;
+ let Inst{11-10} = op2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+def inv_cond_XFORM : SDNodeXForm<imm, [{
+ AArch64CC::CondCode CC = static_cast<AArch64CC::CondCode>(N->getZExtValue());
+ return CurDAG->getTargetConstant(AArch64CC::getInvertedCondCode(CC), MVT::i32);
+}]>;
+
+multiclass CondSelectOp<bit op, bits<2> op2, string asm, PatFrag frag> {
+ def Wr : BaseCondSelectOp<op, op2, GPR32, asm, frag> {
+ let Inst{31} = 0;
+ }
+ def Xr : BaseCondSelectOp<op, op2, GPR64, asm, frag> {
+ let Inst{31} = 1;
+ }
+
+ def : Pat<(AArch64csel (frag GPR32:$Rm), GPR32:$Rn, (i32 imm:$cond), NZCV),
+ (!cast<Instruction>(NAME # Wr) GPR32:$Rn, GPR32:$Rm,
+ (inv_cond_XFORM imm:$cond))>;
+
+ def : Pat<(AArch64csel (frag GPR64:$Rm), GPR64:$Rn, (i32 imm:$cond), NZCV),
+ (!cast<Instruction>(NAME # Xr) GPR64:$Rn, GPR64:$Rm,
+ (inv_cond_XFORM imm:$cond))>;
+}
+
+//---
+// Special Mask Value
+//---
+def maski8_or_more : Operand<i32>,
+ ImmLeaf<i32, [{ return (Imm & 0xff) == 0xff; }]> {
}
+def maski16_or_more : Operand<i32>,
+ ImmLeaf<i32, [{ return (Imm & 0xffff) == 0xffff; }]> {
+}
+
+
+//---
+// Load/store
+//---
+
+// (unsigned immediate)
+// Indexed for 8-bit registers. offset is in range [0,4095].
+def am_indexed8 : ComplexPattern<i64, 2, "SelectAddrModeIndexed8", []>;
+def am_indexed16 : ComplexPattern<i64, 2, "SelectAddrModeIndexed16", []>;
+def am_indexed32 : ComplexPattern<i64, 2, "SelectAddrModeIndexed32", []>;
+def am_indexed64 : ComplexPattern<i64, 2, "SelectAddrModeIndexed64", []>;
+def am_indexed128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed128", []>;
+
+class UImm12OffsetOperand<int Scale> : AsmOperandClass {
+ let Name = "UImm12Offset" # Scale;
+ let RenderMethod = "addUImm12OffsetOperands<" # Scale # ">";
+ let PredicateMethod = "isUImm12Offset<" # Scale # ">";
+ let DiagnosticType = "InvalidMemoryIndexed" # Scale;
+}
+
+def UImm12OffsetScale1Operand : UImm12OffsetOperand<1>;
+def UImm12OffsetScale2Operand : UImm12OffsetOperand<2>;
+def UImm12OffsetScale4Operand : UImm12OffsetOperand<4>;
+def UImm12OffsetScale8Operand : UImm12OffsetOperand<8>;
+def UImm12OffsetScale16Operand : UImm12OffsetOperand<16>;
+
+class uimm12_scaled<int Scale> : Operand<i64> {
+ let ParserMatchClass
+ = !cast<AsmOperandClass>("UImm12OffsetScale" # Scale # "Operand");
+ let EncoderMethod
+ = "getLdStUImm12OpValue<AArch64::fixup_aarch64_ldst_imm12_scale" # Scale # ">";
+ let PrintMethod = "printUImm12Offset<" # Scale # ">";
+}
+
+def uimm12s1 : uimm12_scaled<1>;
+def uimm12s2 : uimm12_scaled<2>;
+def uimm12s4 : uimm12_scaled<4>;
+def uimm12s8 : uimm12_scaled<8>;
+def uimm12s16 : uimm12_scaled<16>;
+
+class BaseLoadStoreUI<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
+ string asm, list<dag> pattern>
+ : I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> {
+ bits<5> Rt;
+
+ bits<5> Rn;
+ bits<12> offset;
+
+ let Inst{31-30} = sz;
+ let Inst{29-27} = 0b111;
+ let Inst{26} = V;
+ let Inst{25-24} = 0b01;
+ let Inst{23-22} = opc;
+ let Inst{21-10} = offset;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodeUnsignedLdStInstruction";
+}
+
+multiclass LoadUI<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ Operand indextype, string asm, list<dag> pattern> {
+ let AddedComplexity = 10, mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+ def ui : BaseLoadStoreUI<sz, V, opc, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, indextype:$offset),
+ asm, pattern>,
+ Sched<[WriteLD]>;
+
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "ui") regtype:$Rt, GPR64sp:$Rn, 0)>;
+}
+
+multiclass StoreUI<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ Operand indextype, string asm, list<dag> pattern> {
+ let AddedComplexity = 10, mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+ def ui : BaseLoadStoreUI<sz, V, opc, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, indextype:$offset),
+ asm, pattern>,
+ Sched<[WriteST]>;
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "ui") regtype:$Rt, GPR64sp:$Rn, 0)>;
}
-// Format for load-register (literal) instructions.
-class A64I_LDRlit<bits<2> opc, bit v,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRt<outs, ins, asmstr, patterns, itin> {
- bits<19> Imm19;
+def PrefetchOperand : AsmOperandClass {
+ let Name = "Prefetch";
+ let ParserMethod = "tryParsePrefetch";
+}
+def prfop : Operand<i32> {
+ let PrintMethod = "printPrefetchOp";
+ let ParserMatchClass = PrefetchOperand;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
+class PrefetchUI<bits<2> sz, bit V, bits<2> opc, string asm, list<dag> pat>
+ : BaseLoadStoreUI<sz, V, opc,
+ (outs), (ins prfop:$Rt, GPR64sp:$Rn, uimm12s8:$offset),
+ asm, pat>,
+ Sched<[WriteLD]>;
+
+//---
+// Load literal
+//---
+
+// Load literal address: 19-bit immediate. The low two bits of the target
+// offset are implied zero and so are not part of the immediate.
+def am_ldrlit : Operand<OtherVT> {
+ let EncoderMethod = "getLoadLiteralOpValue";
+ let DecoderMethod = "DecodePCRelLabel19";
+ let PrintMethod = "printAlignedLabel";
+ let ParserMatchClass = PCRelLabel19Operand;
+}
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+class LoadLiteral<bits<2> opc, bit V, RegisterClass regtype, string asm>
+ : I<(outs regtype:$Rt), (ins am_ldrlit:$label),
+ asm, "\t$Rt, $label", "", []>,
+ Sched<[WriteLD]> {
+ bits<5> Rt;
+ bits<19> label;
let Inst{31-30} = opc;
let Inst{29-27} = 0b011;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-24} = 0b00;
- let Inst{23-5} = Imm19;
- // Inherit Rt in 4-0
+ let Inst{23-5} = label;
+ let Inst{4-0} = Rt;
}
-// Format for load-store exclusive instructions.
-class A64I_LDSTex_tn<bits<2> size, bit o2, bit L, bit o1, bit o0,
- dag outs, dag ins, string asmstr,
- list <dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- let Inst{31-30} = size;
- let Inst{29-24} = 0b001000;
- let Inst{23} = o2;
- let Inst{22} = L;
- let Inst{21} = o1;
- let Inst{15} = o0;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
+class PrefetchLiteral<bits<2> opc, bit V, string asm, list<dag> pat>
+ : I<(outs), (ins prfop:$Rt, am_ldrlit:$label),
+ asm, "\t$Rt, $label", "", pat>,
+ Sched<[WriteLD]> {
+ bits<5> Rt;
+ bits<19> label;
+ let Inst{31-30} = opc;
+ let Inst{29-27} = 0b011;
+ let Inst{26} = V;
+ let Inst{25-24} = 0b00;
+ let Inst{23-5} = label;
+ let Inst{4-0} = Rt;
}
-class A64I_LDSTex_tt2n<bits<2> size, bit o2, bit L, bit o1, bit o0,
- dag outs, dag ins, string asmstr,
- list <dag> patterns, InstrItinClass itin>:
- A64I_LDSTex_tn<size, o2, L, o1, o0, outs, ins, asmstr, patterns, itin>{
- bits<5> Rt2;
- let Inst{14-10} = Rt2;
+//---
+// Load/store register offset
+//---
+
+def ro_Xindexed8 : ComplexPattern<i64, 4, "SelectAddrModeXRO<8>", []>;
+def ro_Xindexed16 : ComplexPattern<i64, 4, "SelectAddrModeXRO<16>", []>;
+def ro_Xindexed32 : ComplexPattern<i64, 4, "SelectAddrModeXRO<32>", []>;
+def ro_Xindexed64 : ComplexPattern<i64, 4, "SelectAddrModeXRO<64>", []>;
+def ro_Xindexed128 : ComplexPattern<i64, 4, "SelectAddrModeXRO<128>", []>;
+
+def ro_Windexed8 : ComplexPattern<i64, 4, "SelectAddrModeWRO<8>", []>;
+def ro_Windexed16 : ComplexPattern<i64, 4, "SelectAddrModeWRO<16>", []>;
+def ro_Windexed32 : ComplexPattern<i64, 4, "SelectAddrModeWRO<32>", []>;
+def ro_Windexed64 : ComplexPattern<i64, 4, "SelectAddrModeWRO<64>", []>;
+def ro_Windexed128 : ComplexPattern<i64, 4, "SelectAddrModeWRO<128>", []>;
+
+class MemExtendOperand<string Reg, int Width> : AsmOperandClass {
+ let Name = "Mem" # Reg # "Extend" # Width;
+ let PredicateMethod = "isMem" # Reg # "Extend<" # Width # ">";
+ let RenderMethod = "addMemExtendOperands";
+ let DiagnosticType = "InvalidMemory" # Reg # "Extend" # Width;
}
-class A64I_LDSTex_stn<bits<2> size, bit o2, bit L, bit o1, bit o0,
- dag outs, dag ins, string asmstr,
- list <dag> patterns, InstrItinClass itin>:
- A64I_LDSTex_tn<size, o2, L, o1, o0, outs, ins, asmstr, patterns, itin>{
- bits<5> Rs;
- let Inst{20-16} = Rs;
+def MemWExtend8Operand : MemExtendOperand<"W", 8> {
+ // The address "[x0, x1, lsl #0]" actually maps to the variant which performs
+ // the trivial shift.
+ let RenderMethod = "addMemExtend8Operands";
+}
+def MemWExtend16Operand : MemExtendOperand<"W", 16>;
+def MemWExtend32Operand : MemExtendOperand<"W", 32>;
+def MemWExtend64Operand : MemExtendOperand<"W", 64>;
+def MemWExtend128Operand : MemExtendOperand<"W", 128>;
+
+def MemXExtend8Operand : MemExtendOperand<"X", 8> {
+ // The address "[x0, x1, lsl #0]" actually maps to the variant which performs
+ // the trivial shift.
+ let RenderMethod = "addMemExtend8Operands";
+}
+def MemXExtend16Operand : MemExtendOperand<"X", 16>;
+def MemXExtend32Operand : MemExtendOperand<"X", 32>;
+def MemXExtend64Operand : MemExtendOperand<"X", 64>;
+def MemXExtend128Operand : MemExtendOperand<"X", 128>;
+
+class ro_extend<AsmOperandClass ParserClass, string Reg, int Width>
+ : Operand<i32> {
+ let ParserMatchClass = ParserClass;
+ let PrintMethod = "printMemExtend<'" # Reg # "', " # Width # ">";
+ let DecoderMethod = "DecodeMemExtend";
+ let EncoderMethod = "getMemExtendOpValue";
+ let MIOperandInfo = (ops i32imm:$signed, i32imm:$doshift);
}
-class A64I_LDSTex_stt2n<bits<2> size, bit o2, bit L, bit o1, bit o0,
- dag outs, dag ins, string asmstr,
- list <dag> patterns, InstrItinClass itin>:
- A64I_LDSTex_stn<size, o2, L, o1, o0, outs, ins, asmstr, patterns, itin>{
- bits<5> Rt2;
- let Inst{14-10} = Rt2;
+def ro_Wextend8 : ro_extend<MemWExtend8Operand, "w", 8>;
+def ro_Wextend16 : ro_extend<MemWExtend16Operand, "w", 16>;
+def ro_Wextend32 : ro_extend<MemWExtend32Operand, "w", 32>;
+def ro_Wextend64 : ro_extend<MemWExtend64Operand, "w", 64>;
+def ro_Wextend128 : ro_extend<MemWExtend128Operand, "w", 128>;
+
+def ro_Xextend8 : ro_extend<MemXExtend8Operand, "x", 8>;
+def ro_Xextend16 : ro_extend<MemXExtend16Operand, "x", 16>;
+def ro_Xextend32 : ro_extend<MemXExtend32Operand, "x", 32>;
+def ro_Xextend64 : ro_extend<MemXExtend64Operand, "x", 64>;
+def ro_Xextend128 : ro_extend<MemXExtend128Operand, "x", 128>;
+
+class ROAddrMode<ComplexPattern windex, ComplexPattern xindex,
+ Operand wextend, Operand xextend> {
+ // CodeGen-level pattern covering the entire addressing mode.
+ ComplexPattern Wpat = windex;
+ ComplexPattern Xpat = xindex;
+
+ // Asm-level Operand covering the valid "uxtw #3" style syntax.
+ Operand Wext = wextend;
+ Operand Xext = xextend;
}
-// Format for load-store register (immediate post-indexed) instructions
-class A64I_LSpostind<bits<2> size, bit v, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<9> SImm9;
+def ro8 : ROAddrMode<ro_Windexed8, ro_Xindexed8, ro_Wextend8, ro_Xextend8>;
+def ro16 : ROAddrMode<ro_Windexed16, ro_Xindexed16, ro_Wextend16, ro_Xextend16>;
+def ro32 : ROAddrMode<ro_Windexed32, ro_Xindexed32, ro_Wextend32, ro_Xextend32>;
+def ro64 : ROAddrMode<ro_Windexed64, ro_Xindexed64, ro_Wextend64, ro_Xextend64>;
+def ro128 : ROAddrMode<ro_Windexed128, ro_Xindexed128, ro_Wextend128,
+ ro_Xextend128>;
- let Inst{31-30} = size;
+class LoadStore8RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, dag ins, dag outs, list<dag> pat>
+ : I<ins, outs, asm, "\t$Rt, [$Rn, $Rm, $extend]", "", pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<2> extend;
+ let Inst{31-30} = sz;
let Inst{29-27} = 0b111;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-24} = 0b00;
let Inst{23-22} = opc;
- let Inst{21} = 0b0;
- let Inst{20-12} = SImm9;
- let Inst{11-10} = 0b01;
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15} = extend{1}; // sign extend Rm?
+ let Inst{14} = 1;
+ let Inst{12} = extend{0}; // do shift?
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
}
-// Format for load-store register (immediate pre-indexed) instructions
-class A64I_LSpreind<bits<2> size, bit v, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<9> SImm9;
+class ROInstAlias<string asm, RegisterClass regtype, Instruction INST>
+ : InstAlias<asm # " $Rt, [$Rn, $Rm]",
+ (INST regtype:$Rt, GPR64sp:$Rn, GPR64:$Rm, 0, 0)>;
+
+multiclass Load8RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator loadop> {
+ let AddedComplexity = 10 in
+ def roW : LoadStore8RO<sz, V, opc, regtype, asm,
+ (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend8:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10 in
+ def roX : LoadStore8RO<sz, V, opc, regtype, asm,
+ (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend8:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
+multiclass Store8RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator storeop> {
+ let AddedComplexity = 10 in
+ def roW : LoadStore8RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend8:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10 in
+ def roX : LoadStore8RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend8:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
- let Inst{31-30} = size;
+class LoadStore16RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, dag ins, dag outs, list<dag> pat>
+ : I<ins, outs, asm, "\t$Rt, [$Rn, $Rm, $extend]", "", pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<2> extend;
+ let Inst{31-30} = sz;
let Inst{29-27} = 0b111;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-24} = 0b00;
let Inst{23-22} = opc;
- let Inst{21} = 0b0;
- let Inst{20-12} = SImm9;
- let Inst{11-10} = 0b11;
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15} = extend{1}; // sign extend Rm?
+ let Inst{14} = 1;
+ let Inst{12} = extend{0}; // do shift?
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
}
-// Format for load-store register (unprivileged) instructions
-class A64I_LSunpriv<bits<2> size, bit v, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<9> SImm9;
+multiclass Load16RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator loadop> {
+ let AddedComplexity = 10 in
+ def roW : LoadStore16RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend16:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10 in
+ def roX : LoadStore16RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend16:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
+multiclass Store16RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator storeop> {
+ let AddedComplexity = 10 in
+ def roW : LoadStore16RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend16:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10 in
+ def roX : LoadStore16RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend16:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
- let Inst{31-30} = size;
+class LoadStore32RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, dag ins, dag outs, list<dag> pat>
+ : I<ins, outs, asm, "\t$Rt, [$Rn, $Rm, $extend]", "", pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<2> extend;
+ let Inst{31-30} = sz;
let Inst{29-27} = 0b111;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-24} = 0b00;
let Inst{23-22} = opc;
- let Inst{21} = 0b0;
- let Inst{20-12} = SImm9;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15} = extend{1}; // sign extend Rm?
+ let Inst{14} = 1;
+ let Inst{12} = extend{0}; // do shift?
let Inst{11-10} = 0b10;
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
}
-// Format for load-store (unscaled immediate) instructions.
-class A64I_LSunalimm<bits<2> size, bit v, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<9> SImm9;
+multiclass Load32RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator loadop> {
+ let AddedComplexity = 10 in
+ def roW : LoadStore32RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend32:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10 in
+ def roX : LoadStore32RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend32:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
+
+multiclass Store32RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator storeop> {
+ let AddedComplexity = 10 in
+ def roW : LoadStore32RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend32:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10 in
+ def roX : LoadStore32RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend32:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
- let Inst{31-30} = size;
+class LoadStore64RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, dag ins, dag outs, list<dag> pat>
+ : I<ins, outs, asm, "\t$Rt, [$Rn, $Rm, $extend]", "", pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<2> extend;
+ let Inst{31-30} = sz;
let Inst{29-27} = 0b111;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-24} = 0b00;
let Inst{23-22} = opc;
- let Inst{21} = 0b0;
- let Inst{20-12} = SImm9;
- let Inst{11-10} = 0b00;
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15} = extend{1}; // sign extend Rm?
+ let Inst{14} = 1;
+ let Inst{12} = extend{0}; // do shift?
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
}
+multiclass Load64RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator loadop> {
+ let AddedComplexity = 10, mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+ def roW : LoadStore64RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10, mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+ def roX : LoadStore64RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
-// Format for load-store (unsigned immediate) instructions.
-class A64I_LSunsigimm<bits<2> size, bit v, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<12> UImm12;
+multiclass Store64RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator storeop> {
+ let AddedComplexity = 10, mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+ def roW : LoadStore64RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10, mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+ def roX : LoadStore64RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
- let Inst{31-30} = size;
+class LoadStore128RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, dag ins, dag outs, list<dag> pat>
+ : I<ins, outs, asm, "\t$Rt, [$Rn, $Rm, $extend]", "", pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<2> extend;
+ let Inst{31-30} = sz;
let Inst{29-27} = 0b111;
- let Inst{26} = v;
- let Inst{25-24} = 0b01;
+ let Inst{26} = V;
+ let Inst{25-24} = 0b00;
let Inst{23-22} = opc;
- let Inst{21-10} = UImm12;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15} = extend{1}; // sign extend Rm?
+ let Inst{14} = 1;
+ let Inst{12} = extend{0}; // do shift?
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
}
-// Format for load-store register (register offset) instructions.
-class A64I_LSregoff<bits<2> size, bit v, bits<2> opc, bit optionlo,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin> {
- bits<5> Rm;
+multiclass Load128RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator loadop> {
+ let AddedComplexity = 10, mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+ def roW : LoadStore128RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend128:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10, mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+ def roX : LoadStore128RO<sz, V, opc, regtype, asm, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, GPR64:$Rm, ro_Xextend128:$extend),
+ [(set (Ty regtype:$Rt),
+ (loadop (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend128:$extend)))]>,
+ Sched<[WriteLDIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
- // Complex operand selection needed for these instructions, so they
- // need an "addr" field for encoding/decoding to be generated.
- bits<3> Ext;
- // OptionHi = Ext{2-1}
- // S = Ext{0}
+multiclass Store128RO<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, ValueType Ty, SDPatternOperator storeop> {
+ let AddedComplexity = 10, mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+ def roW : LoadStore128RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend128:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b0;
+ }
+
+ let AddedComplexity = 10, mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+ def roX : LoadStore128RO<sz, V, opc, regtype, asm, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend128:$extend),
+ [(storeop (Ty regtype:$Rt),
+ (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend128:$extend))]>,
+ Sched<[WriteSTIdx, ReadAdrBase]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : ROInstAlias<asm, regtype, !cast<Instruction>(NAME # "roX")>;
+}
- let Inst{31-30} = size;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
+class BasePrefetchRO<bits<2> sz, bit V, bits<2> opc, dag outs, dag ins,
+ string asm, list<dag> pat>
+ : I<outs, ins, asm, "\t$Rt, [$Rn, $Rm, $extend]", "", pat>,
+ Sched<[WriteLD]> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<2> extend;
+ let Inst{31-30} = sz;
let Inst{29-27} = 0b111;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-24} = 0b00;
let Inst{23-22} = opc;
- let Inst{21} = 0b1;
+ let Inst{21} = 1;
let Inst{20-16} = Rm;
- let Inst{15-14} = Ext{2-1};
- let Inst{13} = optionlo;
- let Inst{12} = Ext{0};
+ let Inst{15} = extend{1}; // sign extend Rm?
+ let Inst{14} = 1;
+ let Inst{12} = extend{0}; // do shift?
let Inst{11-10} = 0b10;
- // Inherits Rn in 9-5
- // Inherits Rt in 4-0
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+}
- let AddedComplexity = 50;
+multiclass PrefetchRO<bits<2> sz, bit V, bits<2> opc, string asm> {
+ def roW : BasePrefetchRO<sz, V, opc, (outs),
+ (ins prfop:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend),
+ asm, [(AArch64Prefetch imm:$Rt,
+ (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend))]> {
+ let Inst{13} = 0b0;
+ }
+
+ def roX : BasePrefetchRO<sz, V, opc, (outs),
+ (ins prfop:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend),
+ asm, [(AArch64Prefetch imm:$Rt,
+ (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend))]> {
+ let Inst{13} = 0b1;
+ }
+
+ def : InstAlias<"prfm $Rt, [$Rn, $Rm]",
+ (!cast<Instruction>(NAME # "roX") prfop:$Rt,
+ GPR64sp:$Rn, GPR64:$Rm, 0, 0)>;
}
-// Format for Load-store register pair (offset) instructions
-class A64I_LSPoffset<bits<2> opc, bit v, bit l,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
- bits<7> SImm7;
+//---
+// Load/store unscaled immediate
+//---
- let Inst{31-30} = opc;
- let Inst{29-27} = 0b101;
- let Inst{26} = v;
- let Inst{25-23} = 0b010;
- let Inst{22} = l;
- let Inst{21-15} = SImm7;
- // Inherit Rt2 in 14-10
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+def am_unscaled8 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled8", []>;
+def am_unscaled16 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled16", []>;
+def am_unscaled32 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled32", []>;
+def am_unscaled64 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled64", []>;
+def am_unscaled128 :ComplexPattern<i64, 2, "SelectAddrModeUnscaled128", []>;
+
+class BaseLoadStoreUnscale<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
+ string asm, list<dag> pattern>
+ : I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<9> offset;
+ let Inst{31-30} = sz;
+ let Inst{29-27} = 0b111;
+ let Inst{26} = V;
+ let Inst{25-24} = 0b00;
+ let Inst{23-22} = opc;
+ let Inst{21} = 0;
+ let Inst{20-12} = offset;
+ let Inst{11-10} = 0b00;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodeSignedLdStInstruction";
+}
+
+multiclass LoadUnscaled<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, list<dag> pattern> {
+ let AddedComplexity = 1 in // try this before LoadUI
+ def i : BaseLoadStoreUnscale<sz, V, opc, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, simm9:$offset), asm, pattern>,
+ Sched<[WriteLD]>;
+
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, GPR64sp:$Rn, 0)>;
+}
+
+multiclass StoreUnscaled<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, list<dag> pattern> {
+ let AddedComplexity = 1 in // try this before StoreUI
+ def i : BaseLoadStoreUnscale<sz, V, opc, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, simm9:$offset),
+ asm, pattern>,
+ Sched<[WriteST]>;
+
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, GPR64sp:$Rn, 0)>;
+}
+
+multiclass PrefetchUnscaled<bits<2> sz, bit V, bits<2> opc, string asm,
+ list<dag> pat> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
+ def i : BaseLoadStoreUnscale<sz, V, opc, (outs),
+ (ins prfop:$Rt, GPR64sp:$Rn, simm9:$offset),
+ asm, pat>,
+ Sched<[WriteLD]>;
+
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "i") prfop:$Rt, GPR64sp:$Rn, 0)>;
+}
+
+//---
+// Load/store unscaled immediate, unprivileged
+//---
+
+class BaseLoadStoreUnprivileged<bits<2> sz, bit V, bits<2> opc,
+ dag oops, dag iops, string asm>
+ : I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", []> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<9> offset;
+ let Inst{31-30} = sz;
+ let Inst{29-27} = 0b111;
+ let Inst{26} = V;
+ let Inst{25-24} = 0b00;
+ let Inst{23-22} = opc;
+ let Inst{21} = 0;
+ let Inst{20-12} = offset;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodeSignedLdStInstruction";
+}
+
+multiclass LoadUnprivileged<bits<2> sz, bit V, bits<2> opc,
+ RegisterClass regtype, string asm> {
+ let mayStore = 0, mayLoad = 1, hasSideEffects = 0 in
+ def i : BaseLoadStoreUnprivileged<sz, V, opc, (outs regtype:$Rt),
+ (ins GPR64sp:$Rn, simm9:$offset), asm>,
+ Sched<[WriteLD]>;
+
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, GPR64sp:$Rn, 0)>;
+}
+
+multiclass StoreUnprivileged<bits<2> sz, bit V, bits<2> opc,
+ RegisterClass regtype, string asm> {
+ let mayStore = 1, mayLoad = 0, hasSideEffects = 0 in
+ def i : BaseLoadStoreUnprivileged<sz, V, opc, (outs),
+ (ins regtype:$Rt, GPR64sp:$Rn, simm9:$offset),
+ asm>,
+ Sched<[WriteST]>;
+
+ def : InstAlias<asm # " $Rt, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, GPR64sp:$Rn, 0)>;
+}
+
+//---
+// Load/store pre-indexed
+//---
+
+class BaseLoadStorePreIdx<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
+ string asm, string cstr, list<dag> pat>
+ : I<oops, iops, asm, "\t$Rt, [$Rn, $offset]!", cstr, pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<9> offset;
+ let Inst{31-30} = sz;
+ let Inst{29-27} = 0b111;
+ let Inst{26} = V;
+ let Inst{25-24} = 0;
+ let Inst{23-22} = opc;
+ let Inst{21} = 0;
+ let Inst{20-12} = offset;
+ let Inst{11-10} = 0b11;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodeSignedLdStInstruction";
}
-// Format for Load-store register pair (post-indexed) instructions
-class A64I_LSPpostind<bits<2> opc, bit v, bit l,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
- bits<7> SImm7;
+let hasSideEffects = 0 in {
+let mayStore = 0, mayLoad = 1 in
+class LoadPreIdx<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm>
+ : BaseLoadStorePreIdx<sz, V, opc,
+ (outs GPR64sp:$wback, regtype:$Rt),
+ (ins GPR64sp:$Rn, simm9:$offset), asm,
+ "$Rn = $wback", []>,
+ Sched<[WriteLD, WriteAdr]>;
+
+let mayStore = 1, mayLoad = 0 in
+class StorePreIdx<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, SDPatternOperator storeop, ValueType Ty>
+ : BaseLoadStorePreIdx<sz, V, opc,
+ (outs GPR64sp:$wback),
+ (ins regtype:$Rt, GPR64sp:$Rn, simm9:$offset),
+ asm, "$Rn = $wback",
+ [(set GPR64sp:$wback,
+ (storeop (Ty regtype:$Rt), GPR64sp:$Rn, simm9:$offset))]>,
+ Sched<[WriteAdr, WriteST]>;
+} // hasSideEffects = 0
+
+//---
+// Load/store post-indexed
+//---
+
+// (pre-index) load/stores.
+class BaseLoadStorePostIdx<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
+ string asm, string cstr, list<dag> pat>
+ : I<oops, iops, asm, "\t$Rt, [$Rn], $offset", cstr, pat> {
+ bits<5> Rt;
+ bits<5> Rn;
+ bits<9> offset;
+ let Inst{31-30} = sz;
+ let Inst{29-27} = 0b111;
+ let Inst{26} = V;
+ let Inst{25-24} = 0b00;
+ let Inst{23-22} = opc;
+ let Inst{21} = 0b0;
+ let Inst{20-12} = offset;
+ let Inst{11-10} = 0b01;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+ let DecoderMethod = "DecodeSignedLdStInstruction";
+}
+
+let hasSideEffects = 0 in {
+let mayStore = 0, mayLoad = 1 in
+class LoadPostIdx<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm>
+ : BaseLoadStorePostIdx<sz, V, opc,
+ (outs GPR64sp:$wback, regtype:$Rt),
+ (ins GPR64sp:$Rn, simm9:$offset),
+ asm, "$Rn = $wback", []>,
+ Sched<[WriteLD, WriteI]>;
+
+let mayStore = 1, mayLoad = 0 in
+class StorePostIdx<bits<2> sz, bit V, bits<2> opc, RegisterClass regtype,
+ string asm, SDPatternOperator storeop, ValueType Ty>
+ : BaseLoadStorePostIdx<sz, V, opc,
+ (outs GPR64sp:$wback),
+ (ins regtype:$Rt, GPR64sp:$Rn, simm9:$offset),
+ asm, "$Rn = $wback",
+ [(set GPR64sp:$wback,
+ (storeop (Ty regtype:$Rt), GPR64sp:$Rn, simm9:$offset))]>,
+ Sched<[WriteAdr, WriteST, ReadAdrBase]>;
+} // hasSideEffects = 0
+
+
+//---
+// Load/store pair
+//---
+
+// (indexed, offset)
+
+class BaseLoadStorePairOffset<bits<2> opc, bit V, bit L, dag oops, dag iops,
+ string asm>
+ : I<oops, iops, asm, "\t$Rt, $Rt2, [$Rn, $offset]", "", []> {
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<5> Rn;
+ bits<7> offset;
let Inst{31-30} = opc;
let Inst{29-27} = 0b101;
- let Inst{26} = v;
- let Inst{25-23} = 0b001;
- let Inst{22} = l;
- let Inst{21-15} = SImm7;
- // Inherit Rt2 in 14-10
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+ let Inst{26} = V;
+ let Inst{25-23} = 0b010;
+ let Inst{22} = L;
+ let Inst{21-15} = offset;
+ let Inst{14-10} = Rt2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodePairLdStInstruction";
+}
+
+multiclass LoadPairOffset<bits<2> opc, bit V, RegisterClass regtype,
+ Operand indextype, string asm> {
+ let hasSideEffects = 0, mayStore = 0, mayLoad = 1 in
+ def i : BaseLoadStorePairOffset<opc, V, 1,
+ (outs regtype:$Rt, regtype:$Rt2),
+ (ins GPR64sp:$Rn, indextype:$offset), asm>,
+ Sched<[WriteLD, WriteLDHi]>;
+
+ def : InstAlias<asm # " $Rt, $Rt2, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, 0)>;
}
-// Format for Load-store register pair (pre-indexed) instructions
-class A64I_LSPpreind<bits<2> opc, bit v, bit l,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
- bits<7> SImm7;
+multiclass StorePairOffset<bits<2> opc, bit V, RegisterClass regtype,
+ Operand indextype, string asm> {
+ let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
+ def i : BaseLoadStorePairOffset<opc, V, 0, (outs),
+ (ins regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, indextype:$offset),
+ asm>,
+ Sched<[WriteSTP]>;
+
+ def : InstAlias<asm # " $Rt, $Rt2, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, 0)>;
+}
+
+// (pre-indexed)
+class BaseLoadStorePairPreIdx<bits<2> opc, bit V, bit L, dag oops, dag iops,
+ string asm>
+ : I<oops, iops, asm, "\t$Rt, $Rt2, [$Rn, $offset]!", "$Rn = $wback", []> {
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<5> Rn;
+ bits<7> offset;
let Inst{31-30} = opc;
let Inst{29-27} = 0b101;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-23} = 0b011;
- let Inst{22} = l;
- let Inst{21-15} = SImm7;
- // Inherit Rt2 in 14-10
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
+ let Inst{22} = L;
+ let Inst{21-15} = offset;
+ let Inst{14-10} = Rt2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodePairLdStInstruction";
}
-// Format for Load-store non-temporal register pair (offset) instructions
-class A64I_LSPnontemp<bits<2> opc, bit v, bit l,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtt2n<outs, ins, asmstr, patterns, itin> {
- bits<7> SImm7;
+let hasSideEffects = 0 in {
+let mayStore = 0, mayLoad = 1 in
+class LoadPairPreIdx<bits<2> opc, bit V, RegisterClass regtype,
+ Operand indextype, string asm>
+ : BaseLoadStorePairPreIdx<opc, V, 1,
+ (outs GPR64sp:$wback, regtype:$Rt, regtype:$Rt2),
+ (ins GPR64sp:$Rn, indextype:$offset), asm>,
+ Sched<[WriteLD, WriteLDHi, WriteAdr]>;
+
+let mayStore = 1, mayLoad = 0 in
+class StorePairPreIdx<bits<2> opc, bit V, RegisterClass regtype,
+ Operand indextype, string asm>
+ : BaseLoadStorePairPreIdx<opc, V, 0, (outs GPR64sp:$wback),
+ (ins regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, indextype:$offset),
+ asm>,
+ Sched<[WriteAdr, WriteSTP]>;
+} // hasSideEffects = 0
+
+// (post-indexed)
+
+class BaseLoadStorePairPostIdx<bits<2> opc, bit V, bit L, dag oops, dag iops,
+ string asm>
+ : I<oops, iops, asm, "\t$Rt, $Rt2, [$Rn], $offset", "$Rn = $wback", []> {
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<5> Rn;
+ bits<7> offset;
+ let Inst{31-30} = opc;
+ let Inst{29-27} = 0b101;
+ let Inst{26} = V;
+ let Inst{25-23} = 0b001;
+ let Inst{22} = L;
+ let Inst{21-15} = offset;
+ let Inst{14-10} = Rt2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let DecoderMethod = "DecodePairLdStInstruction";
+}
+let hasSideEffects = 0 in {
+let mayStore = 0, mayLoad = 1 in
+class LoadPairPostIdx<bits<2> opc, bit V, RegisterClass regtype,
+ Operand idxtype, string asm>
+ : BaseLoadStorePairPostIdx<opc, V, 1,
+ (outs GPR64sp:$wback, regtype:$Rt, regtype:$Rt2),
+ (ins GPR64sp:$Rn, idxtype:$offset), asm>,
+ Sched<[WriteLD, WriteLDHi, WriteAdr]>;
+
+let mayStore = 1, mayLoad = 0 in
+class StorePairPostIdx<bits<2> opc, bit V, RegisterClass regtype,
+ Operand idxtype, string asm>
+ : BaseLoadStorePairPostIdx<opc, V, 0, (outs),
+ (ins GPR64sp:$wback, regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, idxtype:$offset),
+ asm>,
+ Sched<[WriteAdr, WriteSTP]>;
+} // hasSideEffects = 0
+
+// (no-allocate)
+
+class BaseLoadStorePairNoAlloc<bits<2> opc, bit V, bit L, dag oops, dag iops,
+ string asm>
+ : I<oops, iops, asm, "\t$Rt, $Rt2, [$Rn, $offset]", "", []> {
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<5> Rn;
+ bits<7> offset;
let Inst{31-30} = opc;
let Inst{29-27} = 0b101;
- let Inst{26} = v;
+ let Inst{26} = V;
let Inst{25-23} = 0b000;
- let Inst{22} = l;
- let Inst{21-15} = SImm7;
- // Inherit Rt2 in 14-10
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format for Logical (immediate) instructions
-class A64I_logicalimm<bit sf, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bit N;
- bits<6> ImmR;
- bits<6> ImmS;
-
- // N, ImmR and ImmS have no separate existence in any assembly syntax (or for
- // selection), so we'll combine them into a single field here.
- bits<13> Imm;
- // N = Imm{12};
- // ImmR = Imm{11-6};
- // ImmS = Imm{5-0};
+ let Inst{22} = L;
+ let Inst{21-15} = offset;
+ let Inst{14-10} = Rt2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
- let Inst{31} = sf;
- let Inst{30-29} = opc;
- let Inst{28-23} = 0b100100;
- let Inst{22} = Imm{12};
- let Inst{21-16} = Imm{11-6};
- let Inst{15-10} = Imm{5-0};
- // Rn inherited in 9-5
- // Rd inherited in 4-0
+ let DecoderMethod = "DecodePairLdStInstruction";
}
-// Format for Logical (shifted register) instructions
-class A64I_logicalshift<bit sf, bits<2> opc, bits<2> shift, bit N,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- bits<6> Imm6;
+multiclass LoadPairNoAlloc<bits<2> opc, bit V, RegisterClass regtype,
+ Operand indextype, string asm> {
+ let hasSideEffects = 0, mayStore = 0, mayLoad = 1 in
+ def i : BaseLoadStorePairNoAlloc<opc, V, 1,
+ (outs regtype:$Rt, regtype:$Rt2),
+ (ins GPR64sp:$Rn, indextype:$offset), asm>,
+ Sched<[WriteLD, WriteLDHi]>;
- let Inst{31} = sf;
- let Inst{30-29} = opc;
- let Inst{28-24} = 0b01010;
- let Inst{23-22} = shift;
- let Inst{21} = N;
- // Rm inherited
- let Inst{15-10} = Imm6;
- // Rn inherited
- // Rd inherited
-}
-
-// Format for Move wide (immediate)
-class A64I_movw<bit sf, bits<2> opc,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRd<outs, ins, asmstr, patterns, itin> {
- bits<16> UImm16;
- bits<2> Shift; // Called "hw" officially
- let Inst{31} = sf;
- let Inst{30-29} = opc;
- let Inst{28-23} = 0b100101;
- let Inst{22-21} = Shift;
- let Inst{20-5} = UImm16;
- // Inherits Rd in 4-0
+ def : InstAlias<asm # "\t$Rt, $Rt2, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, 0)>;
}
-// Format for PC-relative addressing instructions, ADR and ADRP.
-class A64I_PCADR<bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRd<outs, ins, asmstr, patterns, itin> {
- bits<21> Label;
+multiclass StorePairNoAlloc<bits<2> opc, bit V, RegisterClass regtype,
+ Operand indextype, string asm> {
+ let hasSideEffects = 0, mayStore = 1, mayLoad = 0 in
+ def i : BaseLoadStorePairNoAlloc<opc, V, 0, (outs),
+ (ins regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, indextype:$offset),
+ asm>,
+ Sched<[WriteSTP]>;
+
+ def : InstAlias<asm # "\t$Rt, $Rt2, [$Rn]",
+ (!cast<Instruction>(NAME # "i") regtype:$Rt, regtype:$Rt2,
+ GPR64sp:$Rn, 0)>;
+}
- let Inst{31} = op;
- let Inst{30-29} = Label{1-0};
- let Inst{28-24} = 0b10000;
- let Inst{23-5} = Label{20-2};
+//---
+// Load/store exclusive
+//---
+
+// True exclusive operations write to and/or read from the system's exclusive
+// monitors, which as far as a compiler is concerned can be modelled as a
+// random shared memory address. Hence LoadExclusive mayStore.
+//
+// Since these instructions have the undefined register bits set to 1 in
+// their canonical form, we need a post encoder method to set those bits
+// to 1 when encoding these instructions. We do this using the
+// fixLoadStoreExclusive function. This function has template parameters:
+//
+// fixLoadStoreExclusive<int hasRs, int hasRt2>
+//
+// hasRs indicates that the instruction uses the Rs field, so we won't set
+// it to 1 (and the same for Rt2). We don't need template parameters for
+// the other register fields since Rt and Rn are always used.
+//
+let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
+class BaseLoadStoreExclusive<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ dag oops, dag iops, string asm, string operands>
+ : I<oops, iops, asm, operands, "", []> {
+ let Inst{31-30} = sz;
+ let Inst{29-24} = 0b001000;
+ let Inst{23} = o2;
+ let Inst{22} = L;
+ let Inst{21} = o1;
+ let Inst{15} = o0;
+
+ let DecoderMethod = "DecodeExclusiveLdStInstruction";
}
-// Format for system instructions
-class A64I_system<bit l,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- bits<2> Op0;
- bits<3> Op1;
- bits<4> CRn;
- bits<4> CRm;
- bits<3> Op2;
+// Neither Rs nor Rt2 operands.
+class LoadStoreExclusiveSimple<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ dag oops, dag iops, string asm, string operands>
+ : BaseLoadStoreExclusive<sz, o2, L, o1, o0, oops, iops, asm, operands> {
bits<5> Rt;
+ bits<5> Rn;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
- let Inst{31-22} = 0b1101010100;
- let Inst{21} = l;
- let Inst{20-19} = Op0;
- let Inst{18-16} = Op1;
- let Inst{15-12} = CRn;
- let Inst{11-8} = CRm;
- let Inst{7-5} = Op2;
+ let PostEncoderMethod = "fixLoadStoreExclusive<0,0>";
+}
+
+// Simple load acquires don't set the exclusive monitor
+let mayLoad = 1, mayStore = 0 in
+class LoadAcquire<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ RegisterClass regtype, string asm>
+ : LoadStoreExclusiveSimple<sz, o2, L, o1, o0, (outs regtype:$Rt),
+ (ins GPR64sp0:$Rn), asm, "\t$Rt, [$Rn]">,
+ Sched<[WriteLD]>;
+
+class LoadExclusive<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ RegisterClass regtype, string asm>
+ : LoadStoreExclusiveSimple<sz, o2, L, o1, o0, (outs regtype:$Rt),
+ (ins GPR64sp0:$Rn), asm, "\t$Rt, [$Rn]">,
+ Sched<[WriteLD]>;
+
+class LoadExclusivePair<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ RegisterClass regtype, string asm>
+ : BaseLoadStoreExclusive<sz, o2, L, o1, o0,
+ (outs regtype:$Rt, regtype:$Rt2),
+ (ins GPR64sp0:$Rn), asm,
+ "\t$Rt, $Rt2, [$Rn]">,
+ Sched<[WriteLD, WriteLDHi]> {
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<5> Rn;
+ let Inst{14-10} = Rt2;
+ let Inst{9-5} = Rn;
let Inst{4-0} = Rt;
- // These instructions can do horrible things.
- let hasSideEffects = 1;
+ let PostEncoderMethod = "fixLoadStoreExclusive<0,1>";
}
-// Format for unconditional branch (immediate) instructions
-class A64I_Bimm<bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- // Doubly special in not even sharing register fields with other
- // instructions, so we create our own Rn here.
- bits<26> Label;
+// Simple store release operations do not check the exclusive monitor.
+let mayLoad = 0, mayStore = 1 in
+class StoreRelease<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ RegisterClass regtype, string asm>
+ : LoadStoreExclusiveSimple<sz, o2, L, o1, o0, (outs),
+ (ins regtype:$Rt, GPR64sp0:$Rn),
+ asm, "\t$Rt, [$Rn]">,
+ Sched<[WriteST]>;
+
+let mayLoad = 1, mayStore = 1 in
+class StoreExclusive<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ RegisterClass regtype, string asm>
+ : BaseLoadStoreExclusive<sz, o2, L, o1, o0, (outs GPR32:$Ws),
+ (ins regtype:$Rt, GPR64sp0:$Rn),
+ asm, "\t$Ws, $Rt, [$Rn]">,
+ Sched<[WriteSTX]> {
+ bits<5> Ws;
+ bits<5> Rt;
+ bits<5> Rn;
+ let Inst{20-16} = Ws;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
- let Inst{31} = op;
- let Inst{30-26} = 0b00101;
- let Inst{25-0} = Label;
+ let Constraints = "@earlyclobber $Ws";
+ let PostEncoderMethod = "fixLoadStoreExclusive<1,0>";
+}
+
+class StoreExclusivePair<bits<2> sz, bit o2, bit L, bit o1, bit o0,
+ RegisterClass regtype, string asm>
+ : BaseLoadStoreExclusive<sz, o2, L, o1, o0,
+ (outs GPR32:$Ws),
+ (ins regtype:$Rt, regtype:$Rt2, GPR64sp0:$Rn),
+ asm, "\t$Ws, $Rt, $Rt2, [$Rn]">,
+ Sched<[WriteSTX]> {
+ bits<5> Ws;
+ bits<5> Rt;
+ bits<5> Rt2;
+ bits<5> Rn;
+ let Inst{20-16} = Ws;
+ let Inst{14-10} = Rt2;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rt;
+
+ let Constraints = "@earlyclobber $Ws";
}
-// Format for Test & branch (immediate) instructions
-class A64I_TBimm<bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRt<outs, ins, asmstr, patterns, itin> {
- // Doubly special in not even sharing register fields with other
- // instructions, so we create our own Rn here.
- bits<6> Imm;
- bits<14> Label;
+//---
+// Exception generation
+//---
- let Inst{31} = Imm{5};
- let Inst{30-25} = 0b011011;
- let Inst{24} = op;
- let Inst{23-19} = Imm{4-0};
- let Inst{18-5} = Label;
- // Inherit Rt in 4-0
+let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in
+class ExceptionGeneration<bits<3> op1, bits<2> ll, string asm>
+ : I<(outs), (ins imm0_65535:$imm), asm, "\t$imm", "", []>,
+ Sched<[WriteSys]> {
+ bits<16> imm;
+ let Inst{31-24} = 0b11010100;
+ let Inst{23-21} = op1;
+ let Inst{20-5} = imm;
+ let Inst{4-2} = 0b000;
+ let Inst{1-0} = ll;
}
-// Format for Unconditional branch (register) instructions, including
-// RET. Shares no fields with instructions further up the hierarchy
-// so top-level.
-class A64I_Breg<bits<4> opc, bits<5> op2, bits<6> op3, bits<5> op4,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin> {
- // Doubly special in not even sharing register fields with other
- // instructions, so we create our own Rn here.
+let Predicates = [HasFPARMv8] in {
+
+//---
+// Floating point to integer conversion
+//---
+
+class BaseFPToIntegerUnscaled<bits<2> type, bits<2> rmode, bits<3> opcode,
+ RegisterClass srcType, RegisterClass dstType,
+ string asm, list<dag> pattern>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn),
+ asm, "\t$Rd, $Rn", "", pattern>,
+ Sched<[WriteFCvt]> {
+ bits<5> Rd;
bits<5> Rn;
+ let Inst{30-29} = 0b00;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = type;
+ let Inst{21} = 1;
+ let Inst{20-19} = rmode;
+ let Inst{18-16} = opcode;
+ let Inst{15-10} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
- let Inst{31-25} = 0b1101011;
- let Inst{24-21} = opc;
- let Inst{20-16} = op2;
- let Inst{15-10} = op3;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseFPToInteger<bits<2> type, bits<2> rmode, bits<3> opcode,
+ RegisterClass srcType, RegisterClass dstType,
+ Operand immType, string asm, list<dag> pattern>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn, immType:$scale),
+ asm, "\t$Rd, $Rn, $scale", "", pattern>,
+ Sched<[WriteFCvt]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<6> scale;
+ let Inst{30-29} = 0b00;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = type;
+ let Inst{21} = 0;
+ let Inst{20-19} = rmode;
+ let Inst{18-16} = opcode;
+ let Inst{15-10} = scale;
let Inst{9-5} = Rn;
- let Inst{4-0} = op4;
+ let Inst{4-0} = Rd;
}
+multiclass FPToIntegerUnscaled<bits<2> rmode, bits<3> opcode, string asm,
+ SDPatternOperator OpN> {
+ // Unscaled single-precision to 32-bit
+ def UWSr : BaseFPToIntegerUnscaled<0b00, rmode, opcode, FPR32, GPR32, asm,
+ [(set GPR32:$Rd, (OpN FPR32:$Rn))]> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ }
+
+ // Unscaled single-precision to 64-bit
+ def UXSr : BaseFPToIntegerUnscaled<0b00, rmode, opcode, FPR32, GPR64, asm,
+ [(set GPR64:$Rd, (OpN FPR32:$Rn))]> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ }
+
+ // Unscaled double-precision to 32-bit
+ def UWDr : BaseFPToIntegerUnscaled<0b01, rmode, opcode, FPR64, GPR32, asm,
+ [(set GPR32:$Rd, (OpN (f64 FPR64:$Rn)))]> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ }
+
+ // Unscaled double-precision to 64-bit
+ def UXDr : BaseFPToIntegerUnscaled<0b01, rmode, opcode, FPR64, GPR64, asm,
+ [(set GPR64:$Rd, (OpN (f64 FPR64:$Rn)))]> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ }
+}
-//===----------------------------------------------------------------------===//
-//
-// Neon Instruction Format Definitions.
-//
+multiclass FPToIntegerScaled<bits<2> rmode, bits<3> opcode, string asm,
+ SDPatternOperator OpN> {
+ // Scaled single-precision to 32-bit
+ def SWSri : BaseFPToInteger<0b00, rmode, opcode, FPR32, GPR32,
+ fixedpoint_f32_i32, asm,
+ [(set GPR32:$Rd, (OpN (fmul FPR32:$Rn,
+ fixedpoint_f32_i32:$scale)))]> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let scale{5} = 1;
+ }
+
+ // Scaled single-precision to 64-bit
+ def SXSri : BaseFPToInteger<0b00, rmode, opcode, FPR32, GPR64,
+ fixedpoint_f32_i64, asm,
+ [(set GPR64:$Rd, (OpN (fmul FPR32:$Rn,
+ fixedpoint_f32_i64:$scale)))]> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ }
+
+ // Scaled double-precision to 32-bit
+ def SWDri : BaseFPToInteger<0b01, rmode, opcode, FPR64, GPR32,
+ fixedpoint_f64_i32, asm,
+ [(set GPR32:$Rd, (OpN (fmul FPR64:$Rn,
+ fixedpoint_f64_i32:$scale)))]> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let scale{5} = 1;
+ }
+
+ // Scaled double-precision to 64-bit
+ def SXDri : BaseFPToInteger<0b01, rmode, opcode, FPR64, GPR64,
+ fixedpoint_f64_i64, asm,
+ [(set GPR64:$Rd, (OpN (fmul FPR64:$Rn,
+ fixedpoint_f64_i64:$scale)))]> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ }
+}
+
+//---
+// Integer to floating point conversion
+//---
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseIntegerToFP<bit isUnsigned,
+ RegisterClass srcType, RegisterClass dstType,
+ Operand immType, string asm, list<dag> pattern>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn, immType:$scale),
+ asm, "\t$Rd, $Rn, $scale", "", pattern>,
+ Sched<[WriteFCvt]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<6> scale;
+ let Inst{30-23} = 0b00111100;
+ let Inst{21-17} = 0b00001;
+ let Inst{16} = isUnsigned;
+ let Inst{15-10} = scale;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+class BaseIntegerToFPUnscaled<bit isUnsigned,
+ RegisterClass srcType, RegisterClass dstType,
+ ValueType dvt, string asm, SDNode node>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn),
+ asm, "\t$Rd, $Rn", "", [(set (dvt dstType:$Rd), (node srcType:$Rn))]>,
+ Sched<[WriteFCvt]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<6> scale;
+ let Inst{30-23} = 0b00111100;
+ let Inst{21-17} = 0b10001;
+ let Inst{16} = isUnsigned;
+ let Inst{15-10} = 0b000000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass IntegerToFP<bit isUnsigned, string asm, SDNode node> {
+ // Unscaled
+ def UWSri: BaseIntegerToFPUnscaled<isUnsigned, GPR32, FPR32, f32, asm, node> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let Inst{22} = 0; // 32-bit FPR flag
+ }
+
+ def UWDri: BaseIntegerToFPUnscaled<isUnsigned, GPR32, FPR64, f64, asm, node> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let Inst{22} = 1; // 64-bit FPR flag
+ }
+
+ def UXSri: BaseIntegerToFPUnscaled<isUnsigned, GPR64, FPR32, f32, asm, node> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ let Inst{22} = 0; // 32-bit FPR flag
+ }
+
+ def UXDri: BaseIntegerToFPUnscaled<isUnsigned, GPR64, FPR64, f64, asm, node> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ let Inst{22} = 1; // 64-bit FPR flag
+ }
+
+ // Scaled
+ def SWSri: BaseIntegerToFP<isUnsigned, GPR32, FPR32, fixedpoint_f32_i32, asm,
+ [(set FPR32:$Rd,
+ (fdiv (node GPR32:$Rn),
+ fixedpoint_f32_i32:$scale))]> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let Inst{22} = 0; // 32-bit FPR flag
+ let scale{5} = 1;
+ }
+
+ def SWDri: BaseIntegerToFP<isUnsigned, GPR32, FPR64, fixedpoint_f64_i32, asm,
+ [(set FPR64:$Rd,
+ (fdiv (node GPR32:$Rn),
+ fixedpoint_f64_i32:$scale))]> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let Inst{22} = 1; // 64-bit FPR flag
+ let scale{5} = 1;
+ }
+
+ def SXSri: BaseIntegerToFP<isUnsigned, GPR64, FPR32, fixedpoint_f32_i64, asm,
+ [(set FPR32:$Rd,
+ (fdiv (node GPR64:$Rn),
+ fixedpoint_f32_i64:$scale))]> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ let Inst{22} = 0; // 32-bit FPR flag
+ }
+
+ def SXDri: BaseIntegerToFP<isUnsigned, GPR64, FPR64, fixedpoint_f64_i64, asm,
+ [(set FPR64:$Rd,
+ (fdiv (node GPR64:$Rn),
+ fixedpoint_f64_i64:$scale))]> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ let Inst{22} = 1; // 64-bit FPR flag
+ }
+}
+
+//---
+// Unscaled integer <-> floating point conversion (i.e. FMOV)
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseUnscaledConversion<bits<2> rmode, bits<3> opcode,
+ RegisterClass srcType, RegisterClass dstType,
+ string asm>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn), asm, "\t$Rd, $Rn", "",
+ // We use COPY_TO_REGCLASS for these bitconvert operations.
+ // copyPhysReg() expands the resultant COPY instructions after
+ // regalloc is done. This gives greater freedom for the allocator
+ // and related passes (coalescing, copy propagation, et. al.) to
+ // be more effective.
+ [/*(set (dvt dstType:$Rd), (bitconvert (svt srcType:$Rn)))*/]>,
+ Sched<[WriteFCopy]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{30-23} = 0b00111100;
+ let Inst{21} = 1;
+ let Inst{20-19} = rmode;
+ let Inst{18-16} = opcode;
+ let Inst{15-10} = 0b000000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseUnscaledConversionToHigh<bits<2> rmode, bits<3> opcode,
+ RegisterClass srcType, RegisterOperand dstType, string asm,
+ string kind>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn, VectorIndex1:$idx), asm,
+ "{\t$Rd"#kind#"$idx, $Rn|"#kind#"\t$Rd$idx, $Rn}", "", []>,
+ Sched<[WriteFCopy]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{30-23} = 0b00111101;
+ let Inst{21} = 1;
+ let Inst{20-19} = rmode;
+ let Inst{18-16} = opcode;
+ let Inst{15-10} = 0b000000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeFMOVLaneInstruction";
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseUnscaledConversionFromHigh<bits<2> rmode, bits<3> opcode,
+ RegisterOperand srcType, RegisterClass dstType, string asm,
+ string kind>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn, VectorIndex1:$idx), asm,
+ "{\t$Rd, $Rn"#kind#"$idx|"#kind#"\t$Rd, $Rn$idx}", "", []>,
+ Sched<[WriteFCopy]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{30-23} = 0b00111101;
+ let Inst{21} = 1;
+ let Inst{20-19} = rmode;
+ let Inst{18-16} = opcode;
+ let Inst{15-10} = 0b000000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+
+ let DecoderMethod = "DecodeFMOVLaneInstruction";
+}
+
+
+
+multiclass UnscaledConversion<string asm> {
+ def WSr : BaseUnscaledConversion<0b00, 0b111, GPR32, FPR32, asm> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let Inst{22} = 0; // 32-bit FPR flag
+ }
+
+ def XDr : BaseUnscaledConversion<0b00, 0b111, GPR64, FPR64, asm> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ let Inst{22} = 1; // 64-bit FPR flag
+ }
+
+ def SWr : BaseUnscaledConversion<0b00, 0b110, FPR32, GPR32, asm> {
+ let Inst{31} = 0; // 32-bit GPR flag
+ let Inst{22} = 0; // 32-bit FPR flag
+ }
+
+ def DXr : BaseUnscaledConversion<0b00, 0b110, FPR64, GPR64, asm> {
+ let Inst{31} = 1; // 64-bit GPR flag
+ let Inst{22} = 1; // 64-bit FPR flag
+ }
+
+ def XDHighr : BaseUnscaledConversionToHigh<0b01, 0b111, GPR64, V128,
+ asm, ".d"> {
+ let Inst{31} = 1;
+ let Inst{22} = 0;
+ }
+
+ def DXHighr : BaseUnscaledConversionFromHigh<0b01, 0b110, V128, GPR64,
+ asm, ".d"> {
+ let Inst{31} = 1;
+ let Inst{22} = 0;
+ }
+}
+
+//---
+// Floating point conversion
+//---
+
+class BaseFPConversion<bits<2> type, bits<2> opcode, RegisterClass dstType,
+ RegisterClass srcType, string asm, list<dag> pattern>
+ : I<(outs dstType:$Rd), (ins srcType:$Rn), asm, "\t$Rd, $Rn", "", pattern>,
+ Sched<[WriteFCvt]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-24} = 0b00011110;
+ let Inst{23-22} = type;
+ let Inst{21-17} = 0b10001;
+ let Inst{16-15} = opcode;
+ let Inst{14-10} = 0b10000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass FPConversion<string asm> {
+ // Double-precision to Half-precision
+ def HDr : BaseFPConversion<0b01, 0b11, FPR16, FPR64, asm,
+ [(set FPR16:$Rd, (fround FPR64:$Rn))]>;
+
+ // Double-precision to Single-precision
+ def SDr : BaseFPConversion<0b01, 0b00, FPR32, FPR64, asm,
+ [(set FPR32:$Rd, (fround FPR64:$Rn))]>;
+
+ // Half-precision to Double-precision
+ def DHr : BaseFPConversion<0b11, 0b01, FPR64, FPR16, asm,
+ [(set FPR64:$Rd, (fextend FPR16:$Rn))]>;
+
+ // Half-precision to Single-precision
+ def SHr : BaseFPConversion<0b11, 0b00, FPR32, FPR16, asm,
+ [(set FPR32:$Rd, (fextend FPR16:$Rn))]>;
+
+ // Single-precision to Double-precision
+ def DSr : BaseFPConversion<0b00, 0b01, FPR64, FPR32, asm,
+ [(set FPR64:$Rd, (fextend FPR32:$Rn))]>;
+
+ // Single-precision to Half-precision
+ def HSr : BaseFPConversion<0b00, 0b11, FPR16, FPR32, asm,
+ [(set FPR16:$Rd, (fround FPR32:$Rn))]>;
+}
+
+//---
+// Single operand floating point data processing
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSingleOperandFPData<bits<4> opcode, RegisterClass regtype,
+ ValueType vt, string asm, SDPatternOperator node>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn), asm, "\t$Rd, $Rn", "",
+ [(set (vt regtype:$Rd), (node (vt regtype:$Rn)))]>,
+ Sched<[WriteF]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-23} = 0b000111100;
+ let Inst{21-19} = 0b100;
+ let Inst{18-15} = opcode;
+ let Inst{14-10} = 0b10000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SingleOperandFPData<bits<4> opcode, string asm,
+ SDPatternOperator node = null_frag> {
+ def Sr : BaseSingleOperandFPData<opcode, FPR32, f32, asm, node> {
+ let Inst{22} = 0; // 32-bit size flag
+ }
+
+ def Dr : BaseSingleOperandFPData<opcode, FPR64, f64, asm, node> {
+ let Inst{22} = 1; // 64-bit size flag
+ }
+}
+
+//---
+// Two operand floating point data processing
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseTwoOperandFPData<bits<4> opcode, RegisterClass regtype,
+ string asm, list<dag> pat>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm),
+ asm, "\t$Rd, $Rn, $Rm", "", pat>,
+ Sched<[WriteF]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31-23} = 0b000111100;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass TwoOperandFPData<bits<4> opcode, string asm,
+ SDPatternOperator node = null_frag> {
+ def Srr : BaseTwoOperandFPData<opcode, FPR32, asm,
+ [(set (f32 FPR32:$Rd),
+ (node (f32 FPR32:$Rn), (f32 FPR32:$Rm)))]> {
+ let Inst{22} = 0; // 32-bit size flag
+ }
+
+ def Drr : BaseTwoOperandFPData<opcode, FPR64, asm,
+ [(set (f64 FPR64:$Rd),
+ (node (f64 FPR64:$Rn), (f64 FPR64:$Rm)))]> {
+ let Inst{22} = 1; // 64-bit size flag
+ }
+}
+
+multiclass TwoOperandFPDataNeg<bits<4> opcode, string asm, SDNode node> {
+ def Srr : BaseTwoOperandFPData<opcode, FPR32, asm,
+ [(set FPR32:$Rd, (fneg (node FPR32:$Rn, (f32 FPR32:$Rm))))]> {
+ let Inst{22} = 0; // 32-bit size flag
+ }
+
+ def Drr : BaseTwoOperandFPData<opcode, FPR64, asm,
+ [(set FPR64:$Rd, (fneg (node FPR64:$Rn, (f64 FPR64:$Rm))))]> {
+ let Inst{22} = 1; // 64-bit size flag
+ }
+}
+
+
+//---
+// Three operand floating point data processing
+//---
+
+class BaseThreeOperandFPData<bit isNegated, bit isSub,
+ RegisterClass regtype, string asm, list<dag> pat>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm, regtype: $Ra),
+ asm, "\t$Rd, $Rn, $Rm, $Ra", "", pat>,
+ Sched<[WriteFMul]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<5> Ra;
+ let Inst{31-23} = 0b000111110;
+ let Inst{21} = isNegated;
+ let Inst{20-16} = Rm;
+ let Inst{15} = isSub;
+ let Inst{14-10} = Ra;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass ThreeOperandFPData<bit isNegated, bit isSub,string asm,
+ SDPatternOperator node> {
+ def Srrr : BaseThreeOperandFPData<isNegated, isSub, FPR32, asm,
+ [(set FPR32:$Rd,
+ (node (f32 FPR32:$Rn), (f32 FPR32:$Rm), (f32 FPR32:$Ra)))]> {
+ let Inst{22} = 0; // 32-bit size flag
+ }
+
+ def Drrr : BaseThreeOperandFPData<isNegated, isSub, FPR64, asm,
+ [(set FPR64:$Rd,
+ (node (f64 FPR64:$Rn), (f64 FPR64:$Rm), (f64 FPR64:$Ra)))]> {
+ let Inst{22} = 1; // 64-bit size flag
+ }
+}
+
+//---
+// Floating point data comparisons
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseOneOperandFPComparison<bit signalAllNans,
+ RegisterClass regtype, string asm,
+ list<dag> pat>
+ : I<(outs), (ins regtype:$Rn), asm, "\t$Rn, #0.0", "", pat>,
+ Sched<[WriteFCmp]> {
+ bits<5> Rn;
+ let Inst{31-23} = 0b000111100;
+ let Inst{21} = 1;
+
+ let Inst{15-10} = 0b001000;
+ let Inst{9-5} = Rn;
+ let Inst{4} = signalAllNans;
+ let Inst{3-0} = 0b1000;
+
+ // Rm should be 0b00000 canonically, but we need to accept any value.
+ let PostEncoderMethod = "fixOneOperandFPComparison";
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseTwoOperandFPComparison<bit signalAllNans, RegisterClass regtype,
+ string asm, list<dag> pat>
+ : I<(outs), (ins regtype:$Rn, regtype:$Rm), asm, "\t$Rn, $Rm", "", pat>,
+ Sched<[WriteFCmp]> {
+ bits<5> Rm;
+ bits<5> Rn;
+ let Inst{31-23} = 0b000111100;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-10} = 0b001000;
+ let Inst{9-5} = Rn;
+ let Inst{4} = signalAllNans;
+ let Inst{3-0} = 0b0000;
+}
+
+multiclass FPComparison<bit signalAllNans, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ let Defs = [NZCV] in {
+ def Srr : BaseTwoOperandFPComparison<signalAllNans, FPR32, asm,
+ [(OpNode FPR32:$Rn, (f32 FPR32:$Rm)), (implicit NZCV)]> {
+ let Inst{22} = 0;
+ }
+
+ def Sri : BaseOneOperandFPComparison<signalAllNans, FPR32, asm,
+ [(OpNode (f32 FPR32:$Rn), fpimm0), (implicit NZCV)]> {
+ let Inst{22} = 0;
+ }
+
+ def Drr : BaseTwoOperandFPComparison<signalAllNans, FPR64, asm,
+ [(OpNode FPR64:$Rn, (f64 FPR64:$Rm)), (implicit NZCV)]> {
+ let Inst{22} = 1;
+ }
+
+ def Dri : BaseOneOperandFPComparison<signalAllNans, FPR64, asm,
+ [(OpNode (f64 FPR64:$Rn), fpimm0), (implicit NZCV)]> {
+ let Inst{22} = 1;
+ }
+ } // Defs = [NZCV]
+}
+
+//---
+// Floating point conditional comparisons
+//---
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseFPCondComparison<bit signalAllNans,
+ RegisterClass regtype, string asm>
+ : I<(outs), (ins regtype:$Rn, regtype:$Rm, imm0_15:$nzcv, ccode:$cond),
+ asm, "\t$Rn, $Rm, $nzcv, $cond", "", []>,
+ Sched<[WriteFCmp]> {
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<4> nzcv;
+ bits<4> cond;
+
+ let Inst{31-23} = 0b000111100;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = cond;
+ let Inst{11-10} = 0b01;
+ let Inst{9-5} = Rn;
+ let Inst{4} = signalAllNans;
+ let Inst{3-0} = nzcv;
+}
+
+multiclass FPCondComparison<bit signalAllNans, string asm> {
+ let Defs = [NZCV], Uses = [NZCV] in {
+ def Srr : BaseFPCondComparison<signalAllNans, FPR32, asm> {
+ let Inst{22} = 0;
+ }
+
+ def Drr : BaseFPCondComparison<signalAllNans, FPR64, asm> {
+ let Inst{22} = 1;
+ }
+ } // Defs = [NZCV], Uses = [NZCV]
+}
+
+//---
+// Floating point conditional select
+//---
+
+class BaseFPCondSelect<RegisterClass regtype, ValueType vt, string asm>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm, ccode:$cond),
+ asm, "\t$Rd, $Rn, $Rm, $cond", "",
+ [(set regtype:$Rd,
+ (AArch64csel (vt regtype:$Rn), regtype:$Rm,
+ (i32 imm:$cond), NZCV))]>,
+ Sched<[WriteF]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<4> cond;
+
+ let Inst{31-23} = 0b000111100;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = cond;
+ let Inst{11-10} = 0b11;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass FPCondSelect<string asm> {
+ let Uses = [NZCV] in {
+ def Srrr : BaseFPCondSelect<FPR32, f32, asm> {
+ let Inst{22} = 0;
+ }
+
+ def Drrr : BaseFPCondSelect<FPR64, f64, asm> {
+ let Inst{22} = 1;
+ }
+ } // Uses = [NZCV]
+}
+
+//---
+// Floating move immediate
+//---
+
+class BaseFPMoveImmediate<RegisterClass regtype, Operand fpimmtype, string asm>
+ : I<(outs regtype:$Rd), (ins fpimmtype:$imm), asm, "\t$Rd, $imm", "",
+ [(set regtype:$Rd, fpimmtype:$imm)]>,
+ Sched<[WriteFImm]> {
+ bits<5> Rd;
+ bits<8> imm;
+ let Inst{31-23} = 0b000111100;
+ let Inst{21} = 1;
+ let Inst{20-13} = imm;
+ let Inst{12-5} = 0b10000000;
+ let Inst{4-0} = Rd;
+}
+
+multiclass FPMoveImmediate<string asm> {
+ def Si : BaseFPMoveImmediate<FPR32, fpimm32, asm> {
+ let Inst{22} = 0;
+ }
+
+ def Di : BaseFPMoveImmediate<FPR64, fpimm64, asm> {
+ let Inst{22} = 1;
+ }
+}
+} // end of 'let Predicates = [HasFPARMv8]'
+
+//----------------------------------------------------------------------------
+// AdvSIMD
+//----------------------------------------------------------------------------
let Predicates = [HasNEON] in {
-class NeonInstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit> {
+//----------------------------------------------------------------------------
+// AdvSIMD three register vector instructions
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDThreeSameVector<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand regtype, string asm, string kind,
+ list<dag> pattern>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm), asm,
+ "{\t$Rd" # kind # ", $Rn" # kind # ", $Rm" # kind #
+ "|" # kind # "\t$Rd, $Rn, $Rm|}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-11} = opcode;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDThreeSameVectorTied<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand regtype, string asm, string kind,
+ list<dag> pattern>
+ : I<(outs regtype:$dst), (ins regtype:$Rd, regtype:$Rn, regtype:$Rm), asm,
+ "{\t$Rd" # kind # ", $Rn" # kind # ", $Rm" # kind #
+ "|" # kind # "\t$Rd, $Rn, $Rm}", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-11} = opcode;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+// All operand sizes distinguished in the encoding.
+multiclass SIMDThreeSameVector<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDThreeSameVector<0, U, 0b00, opc, V64,
+ asm, ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8 : BaseSIMDThreeSameVector<1, U, 0b00, opc, V128,
+ asm, ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn), (v16i8 V128:$Rm)))]>;
+ def v4i16 : BaseSIMDThreeSameVector<0, U, 0b01, opc, V64,
+ asm, ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16 : BaseSIMDThreeSameVector<1, U, 0b01, opc, V128,
+ asm, ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn), (v8i16 V128:$Rm)))]>;
+ def v2i32 : BaseSIMDThreeSameVector<0, U, 0b10, opc, V64,
+ asm, ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32 : BaseSIMDThreeSameVector<1, U, 0b10, opc, V128,
+ asm, ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn), (v4i32 V128:$Rm)))]>;
+ def v2i64 : BaseSIMDThreeSameVector<1, U, 0b11, opc, V128,
+ asm, ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i64 V128:$Rn), (v2i64 V128:$Rm)))]>;
+}
+
+// As above, but D sized elements unsupported.
+multiclass SIMDThreeSameVectorBHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDThreeSameVector<0, U, 0b00, opc, V64,
+ asm, ".8b",
+ [(set V64:$Rd, (v8i8 (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm))))]>;
+ def v16i8 : BaseSIMDThreeSameVector<1, U, 0b00, opc, V128,
+ asm, ".16b",
+ [(set V128:$Rd, (v16i8 (OpNode (v16i8 V128:$Rn), (v16i8 V128:$Rm))))]>;
+ def v4i16 : BaseSIMDThreeSameVector<0, U, 0b01, opc, V64,
+ asm, ".4h",
+ [(set V64:$Rd, (v4i16 (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm))))]>;
+ def v8i16 : BaseSIMDThreeSameVector<1, U, 0b01, opc, V128,
+ asm, ".8h",
+ [(set V128:$Rd, (v8i16 (OpNode (v8i16 V128:$Rn), (v8i16 V128:$Rm))))]>;
+ def v2i32 : BaseSIMDThreeSameVector<0, U, 0b10, opc, V64,
+ asm, ".2s",
+ [(set V64:$Rd, (v2i32 (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm))))]>;
+ def v4i32 : BaseSIMDThreeSameVector<1, U, 0b10, opc, V128,
+ asm, ".4s",
+ [(set V128:$Rd, (v4i32 (OpNode (v4i32 V128:$Rn), (v4i32 V128:$Rm))))]>;
+}
+
+multiclass SIMDThreeSameVectorBHSTied<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDThreeSameVectorTied<0, U, 0b00, opc, V64,
+ asm, ".8b",
+ [(set (v8i8 V64:$dst),
+ (OpNode (v8i8 V64:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8 : BaseSIMDThreeSameVectorTied<1, U, 0b00, opc, V128,
+ asm, ".16b",
+ [(set (v16i8 V128:$dst),
+ (OpNode (v16i8 V128:$Rd), (v16i8 V128:$Rn), (v16i8 V128:$Rm)))]>;
+ def v4i16 : BaseSIMDThreeSameVectorTied<0, U, 0b01, opc, V64,
+ asm, ".4h",
+ [(set (v4i16 V64:$dst),
+ (OpNode (v4i16 V64:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16 : BaseSIMDThreeSameVectorTied<1, U, 0b01, opc, V128,
+ asm, ".8h",
+ [(set (v8i16 V128:$dst),
+ (OpNode (v8i16 V128:$Rd), (v8i16 V128:$Rn), (v8i16 V128:$Rm)))]>;
+ def v2i32 : BaseSIMDThreeSameVectorTied<0, U, 0b10, opc, V64,
+ asm, ".2s",
+ [(set (v2i32 V64:$dst),
+ (OpNode (v2i32 V64:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32 : BaseSIMDThreeSameVectorTied<1, U, 0b10, opc, V128,
+ asm, ".4s",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn), (v4i32 V128:$Rm)))]>;
+}
+
+// As above, but only B sized elements supported.
+multiclass SIMDThreeSameVectorB<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDThreeSameVector<0, U, 0b00, opc, V64,
+ asm, ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8 : BaseSIMDThreeSameVector<1, U, 0b00, opc, V128,
+ asm, ".16b",
+ [(set (v16i8 V128:$Rd),
+ (OpNode (v16i8 V128:$Rn), (v16i8 V128:$Rm)))]>;
+}
+
+// As above, but only S and D sized floating point elements supported.
+multiclass SIMDThreeSameVectorFP<bit U, bit S, bits<5> opc,
+ string asm, SDPatternOperator OpNode> {
+ def v2f32 : BaseSIMDThreeSameVector<0, U, {S,0}, opc, V64,
+ asm, ".2s",
+ [(set (v2f32 V64:$Rd), (OpNode (v2f32 V64:$Rn), (v2f32 V64:$Rm)))]>;
+ def v4f32 : BaseSIMDThreeSameVector<1, U, {S,0}, opc, V128,
+ asm, ".4s",
+ [(set (v4f32 V128:$Rd), (OpNode (v4f32 V128:$Rn), (v4f32 V128:$Rm)))]>;
+ def v2f64 : BaseSIMDThreeSameVector<1, U, {S,1}, opc, V128,
+ asm, ".2d",
+ [(set (v2f64 V128:$Rd), (OpNode (v2f64 V128:$Rn), (v2f64 V128:$Rm)))]>;
+}
+
+multiclass SIMDThreeSameVectorFPCmp<bit U, bit S, bits<5> opc,
+ string asm,
+ SDPatternOperator OpNode> {
+ def v2f32 : BaseSIMDThreeSameVector<0, U, {S,0}, opc, V64,
+ asm, ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2f32 V64:$Rn), (v2f32 V64:$Rm)))]>;
+ def v4f32 : BaseSIMDThreeSameVector<1, U, {S,0}, opc, V128,
+ asm, ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4f32 V128:$Rn), (v4f32 V128:$Rm)))]>;
+ def v2f64 : BaseSIMDThreeSameVector<1, U, {S,1}, opc, V128,
+ asm, ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2f64 V128:$Rn), (v2f64 V128:$Rm)))]>;
}
-// Format AdvSIMD bitwise extract
-class NeonI_BitExtract<bit q, bits<2> op2,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
+multiclass SIMDThreeSameVectorFPTied<bit U, bit S, bits<5> opc,
+ string asm, SDPatternOperator OpNode> {
+ def v2f32 : BaseSIMDThreeSameVectorTied<0, U, {S,0}, opc, V64,
+ asm, ".2s",
+ [(set (v2f32 V64:$dst),
+ (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), (v2f32 V64:$Rm)))]>;
+ def v4f32 : BaseSIMDThreeSameVectorTied<1, U, {S,0}, opc, V128,
+ asm, ".4s",
+ [(set (v4f32 V128:$dst),
+ (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), (v4f32 V128:$Rm)))]>;
+ def v2f64 : BaseSIMDThreeSameVectorTied<1, U, {S,1}, opc, V128,
+ asm, ".2d",
+ [(set (v2f64 V128:$dst),
+ (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn), (v2f64 V128:$Rm)))]>;
+}
+
+// As above, but D and B sized elements unsupported.
+multiclass SIMDThreeSameVectorHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v4i16 : BaseSIMDThreeSameVector<0, U, 0b01, opc, V64,
+ asm, ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16 : BaseSIMDThreeSameVector<1, U, 0b01, opc, V128,
+ asm, ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn), (v8i16 V128:$Rm)))]>;
+ def v2i32 : BaseSIMDThreeSameVector<0, U, 0b10, opc, V64,
+ asm, ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32 : BaseSIMDThreeSameVector<1, U, 0b10, opc, V128,
+ asm, ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn), (v4i32 V128:$Rm)))]>;
+}
+
+// Logical three vector ops share opcode bits, and only use B sized elements.
+multiclass SIMDLogicalThreeVector<bit U, bits<2> size, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v8i8 : BaseSIMDThreeSameVector<0, U, size, 0b00011, V64,
+ asm, ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode V64:$Rn, V64:$Rm))]>;
+ def v16i8 : BaseSIMDThreeSameVector<1, U, size, 0b00011, V128,
+ asm, ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode V128:$Rn, V128:$Rm))]>;
+
+ def : Pat<(v4i16 (OpNode V64:$LHS, V64:$RHS)),
+ (!cast<Instruction>(NAME#"v8i8") V64:$LHS, V64:$RHS)>;
+ def : Pat<(v2i32 (OpNode V64:$LHS, V64:$RHS)),
+ (!cast<Instruction>(NAME#"v8i8") V64:$LHS, V64:$RHS)>;
+ def : Pat<(v1i64 (OpNode V64:$LHS, V64:$RHS)),
+ (!cast<Instruction>(NAME#"v8i8") V64:$LHS, V64:$RHS)>;
+
+ def : Pat<(v8i16 (OpNode V128:$LHS, V128:$RHS)),
+ (!cast<Instruction>(NAME#"v16i8") V128:$LHS, V128:$RHS)>;
+ def : Pat<(v4i32 (OpNode V128:$LHS, V128:$RHS)),
+ (!cast<Instruction>(NAME#"v16i8") V128:$LHS, V128:$RHS)>;
+ def : Pat<(v2i64 (OpNode V128:$LHS, V128:$RHS)),
+ (!cast<Instruction>(NAME#"v16i8") V128:$LHS, V128:$RHS)>;
+}
+
+multiclass SIMDLogicalThreeVectorTied<bit U, bits<2> size,
+ string asm, SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDThreeSameVectorTied<0, U, size, 0b00011, V64,
+ asm, ".8b",
+ [(set (v8i8 V64:$dst),
+ (OpNode (v8i8 V64:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8 : BaseSIMDThreeSameVectorTied<1, U, size, 0b00011, V128,
+ asm, ".16b",
+ [(set (v16i8 V128:$dst),
+ (OpNode (v16i8 V128:$Rd), (v16i8 V128:$Rn),
+ (v16i8 V128:$Rm)))]>;
+
+ def : Pat<(v4i16 (OpNode (v4i16 V64:$LHS), (v4i16 V64:$MHS),
+ (v4i16 V64:$RHS))),
+ (!cast<Instruction>(NAME#"v8i8")
+ V64:$LHS, V64:$MHS, V64:$RHS)>;
+ def : Pat<(v2i32 (OpNode (v2i32 V64:$LHS), (v2i32 V64:$MHS),
+ (v2i32 V64:$RHS))),
+ (!cast<Instruction>(NAME#"v8i8")
+ V64:$LHS, V64:$MHS, V64:$RHS)>;
+ def : Pat<(v1i64 (OpNode (v1i64 V64:$LHS), (v1i64 V64:$MHS),
+ (v1i64 V64:$RHS))),
+ (!cast<Instruction>(NAME#"v8i8")
+ V64:$LHS, V64:$MHS, V64:$RHS)>;
+
+ def : Pat<(v8i16 (OpNode (v8i16 V128:$LHS), (v8i16 V128:$MHS),
+ (v8i16 V128:$RHS))),
+ (!cast<Instruction>(NAME#"v16i8")
+ V128:$LHS, V128:$MHS, V128:$RHS)>;
+ def : Pat<(v4i32 (OpNode (v4i32 V128:$LHS), (v4i32 V128:$MHS),
+ (v4i32 V128:$RHS))),
+ (!cast<Instruction>(NAME#"v16i8")
+ V128:$LHS, V128:$MHS, V128:$RHS)>;
+ def : Pat<(v2i64 (OpNode (v2i64 V128:$LHS), (v2i64 V128:$MHS),
+ (v2i64 V128:$RHS))),
+ (!cast<Instruction>(NAME#"v16i8")
+ V128:$LHS, V128:$MHS, V128:$RHS)>;
+}
+
+
+//----------------------------------------------------------------------------
+// AdvSIMD two register vector instructions.
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDTwoSameVector<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand regtype, string asm, string dstkind,
+ string srckind, list<dag> pattern>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn), asm,
+ "{\t$Rd" # dstkind # ", $Rn" # srckind #
+ "|" # dstkind # "\t$Rd, $Rn}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDTwoSameVectorTied<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand regtype, string asm, string dstkind,
+ string srckind, list<dag> pattern>
+ : I<(outs regtype:$dst), (ins regtype:$Rd, regtype:$Rn), asm,
+ "{\t$Rd" # dstkind # ", $Rn" # srckind #
+ "|" # dstkind # "\t$Rd, $Rn}", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+// Supports B, H, and S element sizes.
+multiclass SIMDTwoVectorBHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDTwoSameVector<0, U, 0b00, opc, V64,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn)))]>;
+ def v16i8 : BaseSIMDTwoSameVector<1, U, 0b00, opc, V128,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn)))]>;
+ def v4i16 : BaseSIMDTwoSameVector<0, U, 0b01, opc, V64,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i16 V64:$Rn)))]>;
+ def v8i16 : BaseSIMDTwoSameVector<1, U, 0b01, opc, V128,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn)))]>;
+ def v2i32 : BaseSIMDTwoSameVector<0, U, 0b10, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn)))]>;
+ def v4i32 : BaseSIMDTwoSameVector<1, U, 0b10, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn)))]>;
+}
+
+class BaseSIMDVectorLShiftLongBySize<bit Q, bits<2> size,
+ RegisterOperand regtype, string asm, string dstkind,
+ string srckind, string amount>
+ : I<(outs V128:$Rd), (ins regtype:$Rn), asm,
+ "{\t$Rd" # dstkind # ", $Rn" # srckind # ", #" # amount #
+ "|" # dstkind # "\t$Rd, $Rn, #" # amount # "}", "", []>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
let Inst{29-24} = 0b101110;
- let Inst{23-22} = op2;
- let Inst{21} = 0b0;
- // Inherit Rm in 20-16
- let Inst{15} = 0b0;
- // imm4 in 14-11
- let Inst{10} = 0b0;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD perm
-class NeonI_Perm<bit q, bits<2> size, bits<3> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29-24} = 0b001110;
let Inst{23-22} = size;
- let Inst{21} = 0b0;
- // Inherit Rm in 20-16
- let Inst{15} = 0b0;
- let Inst{14-12} = opcode;
+ let Inst{21-10} = 0b100001001110;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDVectorLShiftLongBySizeBHS {
+ let neverHasSideEffects = 1 in {
+ def v8i8 : BaseSIMDVectorLShiftLongBySize<0, 0b00, V64,
+ "shll", ".8h", ".8b", "8">;
+ def v16i8 : BaseSIMDVectorLShiftLongBySize<1, 0b00, V128,
+ "shll2", ".8h", ".16b", "8">;
+ def v4i16 : BaseSIMDVectorLShiftLongBySize<0, 0b01, V64,
+ "shll", ".4s", ".4h", "16">;
+ def v8i16 : BaseSIMDVectorLShiftLongBySize<1, 0b01, V128,
+ "shll2", ".4s", ".8h", "16">;
+ def v2i32 : BaseSIMDVectorLShiftLongBySize<0, 0b10, V64,
+ "shll", ".2d", ".2s", "32">;
+ def v4i32 : BaseSIMDVectorLShiftLongBySize<1, 0b10, V128,
+ "shll2", ".2d", ".4s", "32">;
+ }
+}
+
+// Supports all element sizes.
+multiclass SIMDLongTwoVector<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_v4i16 : BaseSIMDTwoSameVector<0, U, 0b00, opc, V64,
+ asm, ".4h", ".8b",
+ [(set (v4i16 V64:$Rd), (OpNode (v8i8 V64:$Rn)))]>;
+ def v16i8_v8i16 : BaseSIMDTwoSameVector<1, U, 0b00, opc, V128,
+ asm, ".8h", ".16b",
+ [(set (v8i16 V128:$Rd), (OpNode (v16i8 V128:$Rn)))]>;
+ def v4i16_v2i32 : BaseSIMDTwoSameVector<0, U, 0b01, opc, V64,
+ asm, ".2s", ".4h",
+ [(set (v2i32 V64:$Rd), (OpNode (v4i16 V64:$Rn)))]>;
+ def v8i16_v4i32 : BaseSIMDTwoSameVector<1, U, 0b01, opc, V128,
+ asm, ".4s", ".8h",
+ [(set (v4i32 V128:$Rd), (OpNode (v8i16 V128:$Rn)))]>;
+ def v2i32_v1i64 : BaseSIMDTwoSameVector<0, U, 0b10, opc, V64,
+ asm, ".1d", ".2s",
+ [(set (v1i64 V64:$Rd), (OpNode (v2i32 V64:$Rn)))]>;
+ def v4i32_v2i64 : BaseSIMDTwoSameVector<1, U, 0b10, opc, V128,
+ asm, ".2d", ".4s",
+ [(set (v2i64 V128:$Rd), (OpNode (v4i32 V128:$Rn)))]>;
+}
+
+multiclass SIMDLongTwoVectorTied<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_v4i16 : BaseSIMDTwoSameVectorTied<0, U, 0b00, opc, V64,
+ asm, ".4h", ".8b",
+ [(set (v4i16 V64:$dst), (OpNode (v4i16 V64:$Rd),
+ (v8i8 V64:$Rn)))]>;
+ def v16i8_v8i16 : BaseSIMDTwoSameVectorTied<1, U, 0b00, opc, V128,
+ asm, ".8h", ".16b",
+ [(set (v8i16 V128:$dst), (OpNode (v8i16 V128:$Rd),
+ (v16i8 V128:$Rn)))]>;
+ def v4i16_v2i32 : BaseSIMDTwoSameVectorTied<0, U, 0b01, opc, V64,
+ asm, ".2s", ".4h",
+ [(set (v2i32 V64:$dst), (OpNode (v2i32 V64:$Rd),
+ (v4i16 V64:$Rn)))]>;
+ def v8i16_v4i32 : BaseSIMDTwoSameVectorTied<1, U, 0b01, opc, V128,
+ asm, ".4s", ".8h",
+ [(set (v4i32 V128:$dst), (OpNode (v4i32 V128:$Rd),
+ (v8i16 V128:$Rn)))]>;
+ def v2i32_v1i64 : BaseSIMDTwoSameVectorTied<0, U, 0b10, opc, V64,
+ asm, ".1d", ".2s",
+ [(set (v1i64 V64:$dst), (OpNode (v1i64 V64:$Rd),
+ (v2i32 V64:$Rn)))]>;
+ def v4i32_v2i64 : BaseSIMDTwoSameVectorTied<1, U, 0b10, opc, V128,
+ asm, ".2d", ".4s",
+ [(set (v2i64 V128:$dst), (OpNode (v2i64 V128:$Rd),
+ (v4i32 V128:$Rn)))]>;
+}
+
+// Supports all element sizes, except 1xD.
+multiclass SIMDTwoVectorBHSDTied<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDTwoSameVectorTied<0, U, 0b00, opc, V64,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$dst), (OpNode (v8i8 V64:$Rd), (v8i8 V64:$Rn)))]>;
+ def v16i8 : BaseSIMDTwoSameVectorTied<1, U, 0b00, opc, V128,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$dst), (OpNode (v16i8 V128:$Rd), (v16i8 V128:$Rn)))]>;
+ def v4i16 : BaseSIMDTwoSameVectorTied<0, U, 0b01, opc, V64,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$dst), (OpNode (v4i16 V64:$Rd), (v4i16 V64:$Rn)))]>;
+ def v8i16 : BaseSIMDTwoSameVectorTied<1, U, 0b01, opc, V128,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$dst), (OpNode (v8i16 V128:$Rd), (v8i16 V128:$Rn)))]>;
+ def v2i32 : BaseSIMDTwoSameVectorTied<0, U, 0b10, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$dst), (OpNode (v2i32 V64:$Rd), (v2i32 V64:$Rn)))]>;
+ def v4i32 : BaseSIMDTwoSameVectorTied<1, U, 0b10, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$dst), (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn)))]>;
+ def v2i64 : BaseSIMDTwoSameVectorTied<1, U, 0b11, opc, V128,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$dst), (OpNode (v2i64 V128:$Rd), (v2i64 V128:$Rn)))]>;
+}
+
+multiclass SIMDTwoVectorBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v8i8 : BaseSIMDTwoSameVector<0, U, 0b00, opc, V64,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn)))]>;
+ def v16i8 : BaseSIMDTwoSameVector<1, U, 0b00, opc, V128,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn)))]>;
+ def v4i16 : BaseSIMDTwoSameVector<0, U, 0b01, opc, V64,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i16 V64:$Rn)))]>;
+ def v8i16 : BaseSIMDTwoSameVector<1, U, 0b01, opc, V128,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn)))]>;
+ def v2i32 : BaseSIMDTwoSameVector<0, U, 0b10, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn)))]>;
+ def v4i32 : BaseSIMDTwoSameVector<1, U, 0b10, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn)))]>;
+ def v2i64 : BaseSIMDTwoSameVector<1, U, 0b11, opc, V128,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i64 V128:$Rn)))]>;
+}
+
+
+// Supports only B element sizes.
+multiclass SIMDTwoVectorB<bit U, bits<2> size, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDTwoSameVector<0, U, size, opc, V64,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn)))]>;
+ def v16i8 : BaseSIMDTwoSameVector<1, U, size, opc, V128,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn)))]>;
+
+}
+
+// Supports only B and H element sizes.
+multiclass SIMDTwoVectorBH<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDTwoSameVector<0, U, 0b00, opc, V64,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode V64:$Rn))]>;
+ def v16i8 : BaseSIMDTwoSameVector<1, U, 0b00, opc, V128,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode V128:$Rn))]>;
+ def v4i16 : BaseSIMDTwoSameVector<0, U, 0b01, opc, V64,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode V64:$Rn))]>;
+ def v8i16 : BaseSIMDTwoSameVector<1, U, 0b01, opc, V128,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode V128:$Rn))]>;
+}
+
+// Supports only S and D element sizes, uses high bit of the size field
+// as an extra opcode bit.
+multiclass SIMDTwoVectorFP<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v2f32 : BaseSIMDTwoSameVector<0, U, {S,0}, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2f32 V64:$Rd), (OpNode (v2f32 V64:$Rn)))]>;
+ def v4f32 : BaseSIMDTwoSameVector<1, U, {S,0}, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4f32 V128:$Rd), (OpNode (v4f32 V128:$Rn)))]>;
+ def v2f64 : BaseSIMDTwoSameVector<1, U, {S,1}, opc, V128,
+ asm, ".2d", ".2d",
+ [(set (v2f64 V128:$Rd), (OpNode (v2f64 V128:$Rn)))]>;
+}
+
+// Supports only S element size.
+multiclass SIMDTwoVectorS<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v2i32 : BaseSIMDTwoSameVector<0, U, {S,0}, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn)))]>;
+ def v4i32 : BaseSIMDTwoSameVector<1, U, {S,0}, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn)))]>;
+}
+
+
+multiclass SIMDTwoVectorFPToInt<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v2f32 : BaseSIMDTwoSameVector<0, U, {S,0}, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2f32 V64:$Rn)))]>;
+ def v4f32 : BaseSIMDTwoSameVector<1, U, {S,0}, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4f32 V128:$Rn)))]>;
+ def v2f64 : BaseSIMDTwoSameVector<1, U, {S,1}, opc, V128,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2f64 V128:$Rn)))]>;
+}
+
+multiclass SIMDTwoVectorIntToFP<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v2f32 : BaseSIMDTwoSameVector<0, U, {S,0}, opc, V64,
+ asm, ".2s", ".2s",
+ [(set (v2f32 V64:$Rd), (OpNode (v2i32 V64:$Rn)))]>;
+ def v4f32 : BaseSIMDTwoSameVector<1, U, {S,0}, opc, V128,
+ asm, ".4s", ".4s",
+ [(set (v4f32 V128:$Rd), (OpNode (v4i32 V128:$Rn)))]>;
+ def v2f64 : BaseSIMDTwoSameVector<1, U, {S,1}, opc, V128,
+ asm, ".2d", ".2d",
+ [(set (v2f64 V128:$Rd), (OpNode (v2i64 V128:$Rn)))]>;
+}
+
+
+class BaseSIMDMixedTwoVector<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand inreg, RegisterOperand outreg,
+ string asm, string outkind, string inkind,
+ list<dag> pattern>
+ : I<(outs outreg:$Rd), (ins inreg:$Rn), asm,
+ "{\t$Rd" # outkind # ", $Rn" # inkind #
+ "|" # outkind # "\t$Rd, $Rn}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
let Inst{11-10} = 0b10;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format AdvSIMD table lookup
-class NeonI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29-24} = 0b001110;
- let Inst{23-22} = op2;
- let Inst{21} = 0b0;
- // Inherit Rm in 20-16
- let Inst{15} = 0b0;
- let Inst{14-13} = len;
- let Inst{12} = op;
- let Inst{11-10} = 0b00;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD 3 vector registers with same vector type
-class NeonI_3VSame<bit q, bit u, bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = u;
+class BaseSIMDMixedTwoVectorTied<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand inreg, RegisterOperand outreg,
+ string asm, string outkind, string inkind,
+ list<dag> pattern>
+ : I<(outs outreg:$dst), (ins outreg:$Rd, inreg:$Rn), asm,
+ "{\t$Rd" # outkind # ", $Rn" # inkind #
+ "|" # outkind # "\t$Rd, $Rn}", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
let Inst{28-24} = 0b01110;
let Inst{23-22} = size;
- let Inst{21} = 0b1;
- // Inherit Rm in 20-16
- let Inst{15-11} = opcode;
- let Inst{10} = 0b1;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD 3 vector registers with different vector type
-class NeonI_3VDiff<bit q, bit u, bits<2> size, bits<4> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = u;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDMixedTwoVector<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8 : BaseSIMDMixedTwoVector<0, U, 0b00, opc, V128, V64,
+ asm, ".8b", ".8h",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i16 V128:$Rn)))]>;
+ def v16i8 : BaseSIMDMixedTwoVectorTied<1, U, 0b00, opc, V128, V128,
+ asm#"2", ".16b", ".8h", []>;
+ def v4i16 : BaseSIMDMixedTwoVector<0, U, 0b01, opc, V128, V64,
+ asm, ".4h", ".4s",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i32 V128:$Rn)))]>;
+ def v8i16 : BaseSIMDMixedTwoVectorTied<1, U, 0b01, opc, V128, V128,
+ asm#"2", ".8h", ".4s", []>;
+ def v2i32 : BaseSIMDMixedTwoVector<0, U, 0b10, opc, V128, V64,
+ asm, ".2s", ".2d",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i64 V128:$Rn)))]>;
+ def v4i32 : BaseSIMDMixedTwoVectorTied<1, U, 0b10, opc, V128, V128,
+ asm#"2", ".4s", ".2d", []>;
+
+ def : Pat<(concat_vectors (v8i8 V64:$Rd), (OpNode (v8i16 V128:$Rn))),
+ (!cast<Instruction>(NAME # "v16i8")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
+ def : Pat<(concat_vectors (v4i16 V64:$Rd), (OpNode (v4i32 V128:$Rn))),
+ (!cast<Instruction>(NAME # "v8i16")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
+ def : Pat<(concat_vectors (v2i32 V64:$Rd), (OpNode (v2i64 V128:$Rn))),
+ (!cast<Instruction>(NAME # "v4i32")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
+}
+
+class BaseSIMDCmpTwoVector<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand regtype,
+ string asm, string kind, string zero,
+ ValueType dty, ValueType sty, SDNode OpNode>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn), asm,
+ "{\t$Rd" # kind # ", $Rn" # kind # ", #" # zero #
+ "|" # kind # "\t$Rd, $Rn, #" # zero # "}", "",
+ [(set (dty regtype:$Rd), (OpNode (sty regtype:$Rn)))]>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
let Inst{28-24} = 0b01110;
let Inst{23-22} = size;
- let Inst{21} = 0b1;
- // Inherit Rm in 20-16
- let Inst{15-12} = opcode;
- let Inst{11} = 0b0;
- let Inst{10} = 0b0;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD two registers and an element
-class NeonI_2VElem<bit q, bit u, bits<2> size, bits<4> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = u;
- let Inst{28-24} = 0b01111;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+// Comparisons support all element sizes, except 1xD.
+multiclass SIMDCmpTwoVector<bit U, bits<5> opc, string asm,
+ SDNode OpNode> {
+ def v8i8rz : BaseSIMDCmpTwoVector<0, U, 0b00, opc, V64,
+ asm, ".8b", "0",
+ v8i8, v8i8, OpNode>;
+ def v16i8rz : BaseSIMDCmpTwoVector<1, U, 0b00, opc, V128,
+ asm, ".16b", "0",
+ v16i8, v16i8, OpNode>;
+ def v4i16rz : BaseSIMDCmpTwoVector<0, U, 0b01, opc, V64,
+ asm, ".4h", "0",
+ v4i16, v4i16, OpNode>;
+ def v8i16rz : BaseSIMDCmpTwoVector<1, U, 0b01, opc, V128,
+ asm, ".8h", "0",
+ v8i16, v8i16, OpNode>;
+ def v2i32rz : BaseSIMDCmpTwoVector<0, U, 0b10, opc, V64,
+ asm, ".2s", "0",
+ v2i32, v2i32, OpNode>;
+ def v4i32rz : BaseSIMDCmpTwoVector<1, U, 0b10, opc, V128,
+ asm, ".4s", "0",
+ v4i32, v4i32, OpNode>;
+ def v2i64rz : BaseSIMDCmpTwoVector<1, U, 0b11, opc, V128,
+ asm, ".2d", "0",
+ v2i64, v2i64, OpNode>;
+}
+
+// FP Comparisons support only S and D element sizes.
+multiclass SIMDFPCmpTwoVector<bit U, bit S, bits<5> opc,
+ string asm, SDNode OpNode> {
+
+ def v2i32rz : BaseSIMDCmpTwoVector<0, U, {S,0}, opc, V64,
+ asm, ".2s", "0.0",
+ v2i32, v2f32, OpNode>;
+ def v4i32rz : BaseSIMDCmpTwoVector<1, U, {S,0}, opc, V128,
+ asm, ".4s", "0.0",
+ v4i32, v4f32, OpNode>;
+ def v2i64rz : BaseSIMDCmpTwoVector<1, U, {S,1}, opc, V128,
+ asm, ".2d", "0.0",
+ v2i64, v2f64, OpNode>;
+
+ def : InstAlias<asm # " $Vd.2s, $Vn.2s, #0",
+ (!cast<Instruction>(NAME # v2i32rz) V64:$Vd, V64:$Vn), 0>;
+ def : InstAlias<asm # " $Vd.4s, $Vn.4s, #0",
+ (!cast<Instruction>(NAME # v4i32rz) V128:$Vd, V128:$Vn), 0>;
+ def : InstAlias<asm # " $Vd.2d, $Vn.2d, #0",
+ (!cast<Instruction>(NAME # v2i64rz) V128:$Vd, V128:$Vn), 0>;
+ def : InstAlias<asm # ".2s $Vd, $Vn, #0",
+ (!cast<Instruction>(NAME # v2i32rz) V64:$Vd, V64:$Vn), 0>;
+ def : InstAlias<asm # ".4s $Vd, $Vn, #0",
+ (!cast<Instruction>(NAME # v4i32rz) V128:$Vd, V128:$Vn), 0>;
+ def : InstAlias<asm # ".2d $Vd, $Vn, #0",
+ (!cast<Instruction>(NAME # v2i64rz) V128:$Vd, V128:$Vn), 0>;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDFPCvtTwoVector<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand outtype, RegisterOperand intype,
+ string asm, string VdTy, string VnTy,
+ list<dag> pattern>
+ : I<(outs outtype:$Rd), (ins intype:$Rn), asm,
+ !strconcat("\t$Rd", VdTy, ", $Rn", VnTy), "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+class BaseSIMDFPCvtTwoVectorTied<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand outtype, RegisterOperand intype,
+ string asm, string VdTy, string VnTy,
+ list<dag> pattern>
+ : I<(outs outtype:$dst), (ins outtype:$Rd, intype:$Rn), asm,
+ !strconcat("\t$Rd", VdTy, ", $Rn", VnTy), "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
let Inst{23-22} = size;
- // l in Inst{21}
- // m in Inst{20}
- // Inherit Rm in 19-16
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDFPWidenTwoVector<bit U, bit S, bits<5> opc, string asm> {
+ def v4i16 : BaseSIMDFPCvtTwoVector<0, U, {S,0}, opc, V128, V64,
+ asm, ".4s", ".4h", []>;
+ def v8i16 : BaseSIMDFPCvtTwoVector<1, U, {S,0}, opc, V128, V128,
+ asm#"2", ".4s", ".8h", []>;
+ def v2i32 : BaseSIMDFPCvtTwoVector<0, U, {S,1}, opc, V128, V64,
+ asm, ".2d", ".2s", []>;
+ def v4i32 : BaseSIMDFPCvtTwoVector<1, U, {S,1}, opc, V128, V128,
+ asm#"2", ".2d", ".4s", []>;
+}
+
+multiclass SIMDFPNarrowTwoVector<bit U, bit S, bits<5> opc, string asm> {
+ def v4i16 : BaseSIMDFPCvtTwoVector<0, U, {S,0}, opc, V64, V128,
+ asm, ".4h", ".4s", []>;
+ def v8i16 : BaseSIMDFPCvtTwoVectorTied<1, U, {S,0}, opc, V128, V128,
+ asm#"2", ".8h", ".4s", []>;
+ def v2i32 : BaseSIMDFPCvtTwoVector<0, U, {S,1}, opc, V64, V128,
+ asm, ".2s", ".2d", []>;
+ def v4i32 : BaseSIMDFPCvtTwoVectorTied<1, U, {S,1}, opc, V128, V128,
+ asm#"2", ".4s", ".2d", []>;
+}
+
+multiclass SIMDFPInexactCvtTwoVector<bit U, bit S, bits<5> opc, string asm,
+ Intrinsic OpNode> {
+ def v2f32 : BaseSIMDFPCvtTwoVector<0, U, {S,1}, opc, V64, V128,
+ asm, ".2s", ".2d",
+ [(set (v2f32 V64:$Rd), (OpNode (v2f64 V128:$Rn)))]>;
+ def v4f32 : BaseSIMDFPCvtTwoVectorTied<1, U, {S,1}, opc, V128, V128,
+ asm#"2", ".4s", ".2d", []>;
+
+ def : Pat<(concat_vectors (v2f32 V64:$Rd), (OpNode (v2f64 V128:$Rn))),
+ (!cast<Instruction>(NAME # "v4f32")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD three register different-size vector instructions.
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDDifferentThreeVector<bit U, bits<3> size, bits<4> opcode,
+ RegisterOperand outtype, RegisterOperand intype1,
+ RegisterOperand intype2, string asm,
+ string outkind, string inkind1, string inkind2,
+ list<dag> pattern>
+ : I<(outs outtype:$Rd), (ins intype1:$Rn, intype2:$Rm), asm,
+ "{\t$Rd" # outkind # ", $Rn" # inkind1 # ", $Rm" # inkind2 #
+ "|" # outkind # "\t$Rd, $Rn, $Rm}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31} = 0;
+ let Inst{30} = size{0};
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size{2-1};
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
let Inst{15-12} = opcode;
- // h in Inst{11}
- let Inst{10} = 0b0;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD 1 vector register with modified immediate
-class NeonI_1VModImm<bit q, bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRd<outs,ins, asmstr, patterns, itin> {
- bits<8> Imm;
- bits<4> cmode;
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = op;
- let Inst{28-19} = 0b0111100000;
- let Inst{15-12} = cmode;
- let Inst{11} = 0b0; // o2
- let Inst{10} = 1;
- // Inherit Rd in 4-0
- let Inst{18-16} = Imm{7-5}; // imm a:b:c
- let Inst{9-5} = Imm{4-0}; // imm d:e:f:g:h
+ let Inst{11-10} = 0b00;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format AdvSIMD 3 scalar registers with same type
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDDifferentThreeVectorTied<bit U, bits<3> size, bits<4> opcode,
+ RegisterOperand outtype, RegisterOperand intype1,
+ RegisterOperand intype2, string asm,
+ string outkind, string inkind1, string inkind2,
+ list<dag> pattern>
+ : I<(outs outtype:$dst), (ins outtype:$Rd, intype1:$Rn, intype2:$Rm), asm,
+ "{\t$Rd" # outkind # ", $Rn" # inkind1 # ", $Rm" # inkind2 #
+ "|" # outkind # "\t$Rd, $Rn, $Rm}", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31} = 0;
+ let Inst{30} = size{0};
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size{2-1};
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = 0b00;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+// FIXME: TableGen doesn't know how to deal with expanded types that also
+// change the element count (in this case, placing the results in
+// the high elements of the result register rather than the low
+// elements). Until that's fixed, we can't code-gen those.
+multiclass SIMDNarrowThreeVectorBHS<bit U, bits<4> opc, string asm,
+ Intrinsic IntOp> {
+ def v8i16_v8i8 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
+ V64, V128, V128,
+ asm, ".8b", ".8h", ".8h",
+ [(set (v8i8 V64:$Rd), (IntOp (v8i16 V128:$Rn), (v8i16 V128:$Rm)))]>;
+ def v8i16_v16i8 : BaseSIMDDifferentThreeVectorTied<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".16b", ".8h", ".8h",
+ []>;
+ def v4i32_v4i16 : BaseSIMDDifferentThreeVector<U, 0b010, opc,
+ V64, V128, V128,
+ asm, ".4h", ".4s", ".4s",
+ [(set (v4i16 V64:$Rd), (IntOp (v4i32 V128:$Rn), (v4i32 V128:$Rm)))]>;
+ def v4i32_v8i16 : BaseSIMDDifferentThreeVectorTied<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".4s", ".4s",
+ []>;
+ def v2i64_v2i32 : BaseSIMDDifferentThreeVector<U, 0b100, opc,
+ V64, V128, V128,
+ asm, ".2s", ".2d", ".2d",
+ [(set (v2i32 V64:$Rd), (IntOp (v2i64 V128:$Rn), (v2i64 V128:$Rm)))]>;
+ def v2i64_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".2d", ".2d",
+ []>;
+
+
+ // Patterns for the '2' variants involve INSERT_SUBREG, which you can't put in
+ // a version attached to an instruction.
+ def : Pat<(concat_vectors (v8i8 V64:$Rd), (IntOp (v8i16 V128:$Rn),
+ (v8i16 V128:$Rm))),
+ (!cast<Instruction>(NAME # "v8i16_v16i8")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+ def : Pat<(concat_vectors (v4i16 V64:$Rd), (IntOp (v4i32 V128:$Rn),
+ (v4i32 V128:$Rm))),
+ (!cast<Instruction>(NAME # "v4i32_v8i16")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+ def : Pat<(concat_vectors (v2i32 V64:$Rd), (IntOp (v2i64 V128:$Rn),
+ (v2i64 V128:$Rm))),
+ (!cast<Instruction>(NAME # "v2i64_v4i32")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+}
+
+multiclass SIMDDifferentThreeVectorBD<bit U, bits<4> opc, string asm,
+ Intrinsic IntOp> {
+ def v8i8 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
+ V128, V64, V64,
+ asm, ".8h", ".8b", ".8b",
+ [(set (v8i16 V128:$Rd), (IntOp (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8 : BaseSIMDDifferentThreeVector<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".16b", ".16b", []>;
+ let Predicates = [HasCrypto] in {
+ def v1i64 : BaseSIMDDifferentThreeVector<U, 0b110, opc,
+ V128, V64, V64,
+ asm, ".1q", ".1d", ".1d", []>;
+ def v2i64 : BaseSIMDDifferentThreeVector<U, 0b111, opc,
+ V128, V128, V128,
+ asm#"2", ".1q", ".2d", ".2d", []>;
+ }
+
+ def : Pat<(v8i16 (IntOp (v8i8 (extract_high_v16i8 V128:$Rn)),
+ (v8i8 (extract_high_v16i8 V128:$Rm)))),
+ (!cast<Instruction>(NAME#"v16i8") V128:$Rn, V128:$Rm)>;
+}
+
+multiclass SIMDLongThreeVectorHS<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b010, opc,
+ V128, V64, V64,
+ asm, ".4s", ".4h", ".4h",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".8h", ".8h",
+ [(set (v4i32 V128:$Rd), (OpNode (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm)))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b100, opc,
+ V128, V64, V64,
+ asm, ".2d", ".2s", ".2s",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".4s", ".4s",
+ [(set (v2i64 V128:$Rd), (OpNode (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm)))]>;
+}
+
+multiclass SIMDLongThreeVectorBHSabdl<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v8i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
+ V128, V64, V64,
+ asm, ".8h", ".8b", ".8b",
+ [(set (v8i16 V128:$Rd),
+ (zext (v8i8 (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm)))))]>;
+ def v16i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".16b", ".16b",
+ [(set (v8i16 V128:$Rd),
+ (zext (v8i8 (OpNode (extract_high_v16i8 V128:$Rn),
+ (extract_high_v16i8 V128:$Rm)))))]>;
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b010, opc,
+ V128, V64, V64,
+ asm, ".4s", ".4h", ".4h",
+ [(set (v4i32 V128:$Rd),
+ (zext (v4i16 (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm)))))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".8h", ".8h",
+ [(set (v4i32 V128:$Rd),
+ (zext (v4i16 (OpNode (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm)))))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b100, opc,
+ V128, V64, V64,
+ asm, ".2d", ".2s", ".2s",
+ [(set (v2i64 V128:$Rd),
+ (zext (v2i32 (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm)))))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".4s", ".4s",
+ [(set (v2i64 V128:$Rd),
+ (zext (v2i32 (OpNode (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm)))))]>;
+}
+
+multiclass SIMDLongThreeVectorTiedBHSabal<bit U, bits<4> opc,
+ string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_v8i16 : BaseSIMDDifferentThreeVectorTied<U, 0b000, opc,
+ V128, V64, V64,
+ asm, ".8h", ".8b", ".8b",
+ [(set (v8i16 V128:$dst),
+ (add (v8i16 V128:$Rd),
+ (zext (v8i8 (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm))))))]>;
+ def v16i8_v8i16 : BaseSIMDDifferentThreeVectorTied<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".16b", ".16b",
+ [(set (v8i16 V128:$dst),
+ (add (v8i16 V128:$Rd),
+ (zext (v8i8 (OpNode (extract_high_v16i8 V128:$Rn),
+ (extract_high_v16i8 V128:$Rm))))))]>;
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b010, opc,
+ V128, V64, V64,
+ asm, ".4s", ".4h", ".4h",
+ [(set (v4i32 V128:$dst),
+ (add (v4i32 V128:$Rd),
+ (zext (v4i16 (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm))))))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".8h", ".8h",
+ [(set (v4i32 V128:$dst),
+ (add (v4i32 V128:$Rd),
+ (zext (v4i16 (OpNode (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm))))))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVectorTied<U, 0b100, opc,
+ V128, V64, V64,
+ asm, ".2d", ".2s", ".2s",
+ [(set (v2i64 V128:$dst),
+ (add (v2i64 V128:$Rd),
+ (zext (v2i32 (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm))))))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVectorTied<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".4s", ".4s",
+ [(set (v2i64 V128:$dst),
+ (add (v2i64 V128:$Rd),
+ (zext (v2i32 (OpNode (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm))))))]>;
+}
+
+multiclass SIMDLongThreeVectorBHS<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v8i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
+ V128, V64, V64,
+ asm, ".8h", ".8b", ".8b",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".16b", ".16b",
+ [(set (v8i16 V128:$Rd), (OpNode (extract_high_v16i8 V128:$Rn),
+ (extract_high_v16i8 V128:$Rm)))]>;
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b010, opc,
+ V128, V64, V64,
+ asm, ".4s", ".4h", ".4h",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i16 V64:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".8h", ".8h",
+ [(set (v4i32 V128:$Rd), (OpNode (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm)))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b100, opc,
+ V128, V64, V64,
+ asm, ".2d", ".2s", ".2s",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i32 V64:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".4s", ".4s",
+ [(set (v2i64 V128:$Rd), (OpNode (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm)))]>;
+}
+
+multiclass SIMDLongThreeVectorTiedBHS<bit U, bits<4> opc,
+ string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_v8i16 : BaseSIMDDifferentThreeVectorTied<U, 0b000, opc,
+ V128, V64, V64,
+ asm, ".8h", ".8b", ".8b",
+ [(set (v8i16 V128:$dst),
+ (OpNode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8_v8i16 : BaseSIMDDifferentThreeVectorTied<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".16b", ".16b",
+ [(set (v8i16 V128:$dst),
+ (OpNode (v8i16 V128:$Rd),
+ (extract_high_v16i8 V128:$Rn),
+ (extract_high_v16i8 V128:$Rm)))]>;
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b010, opc,
+ V128, V64, V64,
+ asm, ".4s", ".4h", ".4h",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".8h", ".8h",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd),
+ (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm)))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVectorTied<U, 0b100, opc,
+ V128, V64, V64,
+ asm, ".2d", ".2s", ".2s",
+ [(set (v2i64 V128:$dst),
+ (OpNode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVectorTied<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".4s", ".4s",
+ [(set (v2i64 V128:$dst),
+ (OpNode (v2i64 V128:$Rd),
+ (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm)))]>;
+}
+
+multiclass SIMDLongThreeVectorSQDMLXTiedHS<bit U, bits<4> opc, string asm,
+ SDPatternOperator Accum> {
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b010, opc,
+ V128, V64, V64,
+ asm, ".4s", ".4h", ".4h",
+ [(set (v4i32 V128:$dst),
+ (Accum (v4i32 V128:$Rd),
+ (v4i32 (int_aarch64_neon_sqdmull (v4i16 V64:$Rn),
+ (v4i16 V64:$Rm)))))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVectorTied<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".8h", ".8h",
+ [(set (v4i32 V128:$dst),
+ (Accum (v4i32 V128:$Rd),
+ (v4i32 (int_aarch64_neon_sqdmull (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm)))))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVectorTied<U, 0b100, opc,
+ V128, V64, V64,
+ asm, ".2d", ".2s", ".2s",
+ [(set (v2i64 V128:$dst),
+ (Accum (v2i64 V128:$Rd),
+ (v2i64 (int_aarch64_neon_sqdmull (v2i32 V64:$Rn),
+ (v2i32 V64:$Rm)))))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVectorTied<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".4s", ".4s",
+ [(set (v2i64 V128:$dst),
+ (Accum (v2i64 V128:$Rd),
+ (v2i64 (int_aarch64_neon_sqdmull (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm)))))]>;
+}
+
+multiclass SIMDWideThreeVectorBHS<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b000, opc,
+ V128, V128, V64,
+ asm, ".8h", ".8h", ".8b",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn), (v8i8 V64:$Rm)))]>;
+ def v16i8_v8i16 : BaseSIMDDifferentThreeVector<U, 0b001, opc,
+ V128, V128, V128,
+ asm#"2", ".8h", ".8h", ".16b",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn),
+ (extract_high_v16i8 V128:$Rm)))]>;
+ def v4i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b010, opc,
+ V128, V128, V64,
+ asm, ".4s", ".4s", ".4h",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn), (v4i16 V64:$Rm)))]>;
+ def v8i16_v4i32 : BaseSIMDDifferentThreeVector<U, 0b011, opc,
+ V128, V128, V128,
+ asm#"2", ".4s", ".4s", ".8h",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn),
+ (extract_high_v8i16 V128:$Rm)))]>;
+ def v2i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b100, opc,
+ V128, V128, V64,
+ asm, ".2d", ".2d", ".2s",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i64 V128:$Rn), (v2i32 V64:$Rm)))]>;
+ def v4i32_v2i64 : BaseSIMDDifferentThreeVector<U, 0b101, opc,
+ V128, V128, V128,
+ asm#"2", ".2d", ".2d", ".4s",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i64 V128:$Rn),
+ (extract_high_v4i32 V128:$Rm)))]>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD bitwise extract from vector
+//----------------------------------------------------------------------------
+
+class BaseSIMDBitwiseExtract<bit size, RegisterOperand regtype, ValueType vty,
+ string asm, string kind>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm, i32imm:$imm), asm,
+ "{\t$Rd" # kind # ", $Rn" # kind # ", $Rm" # kind # ", $imm" #
+ "|" # kind # "\t$Rd, $Rn, $Rm, $imm}", "",
+ [(set (vty regtype:$Rd),
+ (AArch64ext regtype:$Rn, regtype:$Rm, (i32 imm:$imm)))]>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ bits<4> imm;
+ let Inst{31} = 0;
+ let Inst{30} = size;
+ let Inst{29-21} = 0b101110000;
+ let Inst{20-16} = Rm;
+ let Inst{15} = 0;
+ let Inst{14-11} = imm;
+ let Inst{10} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+
+multiclass SIMDBitwiseExtract<string asm> {
+ def v8i8 : BaseSIMDBitwiseExtract<0, V64, v8i8, asm, ".8b"> {
+ let imm{3} = 0;
+ }
+ def v16i8 : BaseSIMDBitwiseExtract<1, V128, v16i8, asm, ".16b">;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD zip vector
+//----------------------------------------------------------------------------
+
+class BaseSIMDZipVector<bits<3> size, bits<3> opc, RegisterOperand regtype,
+ string asm, string kind, SDNode OpNode, ValueType valty>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm), asm,
+ "{\t$Rd" # kind # ", $Rn" # kind # ", $Rm" # kind #
+ "|" # kind # "\t$Rd, $Rn, $Rm}", "",
+ [(set (valty regtype:$Rd), (OpNode regtype:$Rn, regtype:$Rm))]>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31} = 0;
+ let Inst{30} = size{0};
+ let Inst{29-24} = 0b001110;
+ let Inst{23-22} = size{2-1};
+ let Inst{21} = 0;
+ let Inst{20-16} = Rm;
+ let Inst{15} = 0;
+ let Inst{14-12} = opc;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDZipVector<bits<3>opc, string asm,
+ SDNode OpNode> {
+ def v8i8 : BaseSIMDZipVector<0b000, opc, V64,
+ asm, ".8b", OpNode, v8i8>;
+ def v16i8 : BaseSIMDZipVector<0b001, opc, V128,
+ asm, ".16b", OpNode, v16i8>;
+ def v4i16 : BaseSIMDZipVector<0b010, opc, V64,
+ asm, ".4h", OpNode, v4i16>;
+ def v8i16 : BaseSIMDZipVector<0b011, opc, V128,
+ asm, ".8h", OpNode, v8i16>;
+ def v2i32 : BaseSIMDZipVector<0b100, opc, V64,
+ asm, ".2s", OpNode, v2i32>;
+ def v4i32 : BaseSIMDZipVector<0b101, opc, V128,
+ asm, ".4s", OpNode, v4i32>;
+ def v2i64 : BaseSIMDZipVector<0b111, opc, V128,
+ asm, ".2d", OpNode, v2i64>;
+
+ def : Pat<(v2f32 (OpNode V64:$Rn, V64:$Rm)),
+ (!cast<Instruction>(NAME#"v2i32") V64:$Rn, V64:$Rm)>;
+ def : Pat<(v4f32 (OpNode V128:$Rn, V128:$Rm)),
+ (!cast<Instruction>(NAME#"v4i32") V128:$Rn, V128:$Rm)>;
+ def : Pat<(v2f64 (OpNode V128:$Rn, V128:$Rm)),
+ (!cast<Instruction>(NAME#"v2i64") V128:$Rn, V128:$Rm)>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD three register scalar instructions
+//----------------------------------------------------------------------------
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDThreeScalar<bit U, bits<2> size, bits<5> opcode,
+ RegisterClass regtype, string asm,
+ list<dag> pattern>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn, regtype:$Rm), asm,
+ "\t$Rd, $Rn, $Rm", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = size;
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
+ let Inst{15-11} = opcode;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDThreeScalarD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v1i64 : BaseSIMDThreeScalar<U, 0b11, opc, FPR64, asm,
+ [(set (v1i64 FPR64:$Rd), (OpNode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm)))]>;
+}
+
+multiclass SIMDThreeScalarBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v1i64 : BaseSIMDThreeScalar<U, 0b11, opc, FPR64, asm,
+ [(set (v1i64 FPR64:$Rd), (OpNode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm)))]>;
+ def v1i32 : BaseSIMDThreeScalar<U, 0b10, opc, FPR32, asm, []>;
+ def v1i16 : BaseSIMDThreeScalar<U, 0b01, opc, FPR16, asm, []>;
+ def v1i8 : BaseSIMDThreeScalar<U, 0b00, opc, FPR8 , asm, []>;
+
+ def : Pat<(i64 (OpNode (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
+ (!cast<Instruction>(NAME#"v1i64") FPR64:$Rn, FPR64:$Rm)>;
+ def : Pat<(i32 (OpNode (i32 FPR32:$Rn), (i32 FPR32:$Rm))),
+ (!cast<Instruction>(NAME#"v1i32") FPR32:$Rn, FPR32:$Rm)>;
+}
+
+multiclass SIMDThreeScalarHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v1i32 : BaseSIMDThreeScalar<U, 0b10, opc, FPR32, asm,
+ [(set FPR32:$Rd, (OpNode FPR32:$Rn, FPR32:$Rm))]>;
+ def v1i16 : BaseSIMDThreeScalar<U, 0b01, opc, FPR16, asm, []>;
+}
+
+multiclass SIMDThreeScalarSD<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def #NAME#64 : BaseSIMDThreeScalar<U, {S,1}, opc, FPR64, asm,
+ [(set (f64 FPR64:$Rd), (OpNode (f64 FPR64:$Rn), (f64 FPR64:$Rm)))]>;
+ def #NAME#32 : BaseSIMDThreeScalar<U, {S,0}, opc, FPR32, asm,
+ [(set FPR32:$Rd, (OpNode FPR32:$Rn, FPR32:$Rm))]>;
+ }
+
+ def : Pat<(v1f64 (OpNode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (!cast<Instruction>(NAME # "64") FPR64:$Rn, FPR64:$Rm)>;
+}
-class NeonI_Scalar3Same<bit u, bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = 0b1;
- let Inst{29} = u;
+multiclass SIMDThreeScalarFPCmp<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def #NAME#64 : BaseSIMDThreeScalar<U, {S,1}, opc, FPR64, asm,
+ [(set (i64 FPR64:$Rd), (OpNode (f64 FPR64:$Rn), (f64 FPR64:$Rm)))]>;
+ def #NAME#32 : BaseSIMDThreeScalar<U, {S,0}, opc, FPR32, asm,
+ [(set (i32 FPR32:$Rd), (OpNode (f32 FPR32:$Rn), (f32 FPR32:$Rm)))]>;
+ }
+
+ def : Pat<(v1i64 (OpNode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (!cast<Instruction>(NAME # "64") FPR64:$Rn, FPR64:$Rm)>;
+}
+
+class BaseSIMDThreeScalarMixed<bit U, bits<2> size, bits<5> opcode,
+ dag oops, dag iops, string asm, string cstr, list<dag> pat>
+ : I<oops, iops, asm,
+ "\t$Rd, $Rn, $Rm", cstr, pat>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
let Inst{28-24} = 0b11110;
let Inst{23-22} = size;
- let Inst{21} = 0b1;
- // Inherit Rm in 20-16
+ let Inst{21} = 1;
+ let Inst{20-16} = Rm;
let Inst{15-11} = opcode;
- let Inst{10} = 0b1;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+ let Inst{10} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDThreeScalarMixedHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def i16 : BaseSIMDThreeScalarMixed<U, 0b01, opc,
+ (outs FPR32:$Rd),
+ (ins FPR16:$Rn, FPR16:$Rm), asm, "", []>;
+ def i32 : BaseSIMDThreeScalarMixed<U, 0b10, opc,
+ (outs FPR64:$Rd),
+ (ins FPR32:$Rn, FPR32:$Rm), asm, "",
+ [(set (i64 FPR64:$Rd), (OpNode (i32 FPR32:$Rn), (i32 FPR32:$Rm)))]>;
}
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDThreeScalarMixedTiedHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def i16 : BaseSIMDThreeScalarMixed<U, 0b01, opc,
+ (outs FPR32:$dst),
+ (ins FPR32:$Rd, FPR16:$Rn, FPR16:$Rm),
+ asm, "$Rd = $dst", []>;
+ def i32 : BaseSIMDThreeScalarMixed<U, 0b10, opc,
+ (outs FPR64:$dst),
+ (ins FPR64:$Rd, FPR32:$Rn, FPR32:$Rm),
+ asm, "$Rd = $dst",
+ [(set (i64 FPR64:$dst),
+ (OpNode (i64 FPR64:$Rd), (i32 FPR32:$Rn), (i32 FPR32:$Rm)))]>;
+}
-// Format AdvSIMD 2 vector registers miscellaneous
-class NeonI_2VMisc<bit q, bit u, bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = u;
- let Inst{28-24} = 0b01110;
+//----------------------------------------------------------------------------
+// AdvSIMD two register scalar instructions
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDTwoScalar<bit U, bits<2> size, bits<5> opcode,
+ RegisterClass regtype, RegisterClass regtype2,
+ string asm, list<dag> pat>
+ : I<(outs regtype:$Rd), (ins regtype2:$Rn), asm,
+ "\t$Rd, $Rn", "", pat>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b11110;
let Inst{23-22} = size;
let Inst{21-17} = 0b10000;
let Inst{16-12} = opcode;
let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDTwoScalarTied<bit U, bits<2> size, bits<5> opcode,
+ RegisterClass regtype, RegisterClass regtype2,
+ string asm, list<dag> pat>
+ : I<(outs regtype:$dst), (ins regtype:$Rd, regtype2:$Rn), asm,
+ "\t$Rd, $Rn", "$Rd = $dst", pat>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b11110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format AdvSIMD 2 vector 1 immediate shift
-class NeonI_2VShiftImm<bit q, bit u, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<7> Imm;
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = u;
- let Inst{28-23} = 0b011110;
- let Inst{22-16} = Imm;
- let Inst{15-11} = opcode;
- let Inst{10} = 0b1;
-
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD duplicate and insert
-class NeonI_copy<bit q, bit op, bits<4> imm4,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<5> Imm5;
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = op;
- let Inst{28-21} = 0b01110000;
- let Inst{20-16} = Imm5;
- let Inst{15} = 0b0;
- let Inst{14-11} = imm4;
- let Inst{10} = 0b1;
-
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-// Format AdvSIMD insert from element to vector
-class NeonI_insert<bit q, bit op,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<5> Imm5;
- bits<4> Imm4;
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = op;
- let Inst{28-21} = 0b01110000;
- let Inst{20-16} = Imm5;
- let Inst{15} = 0b0;
- let Inst{14-11} = Imm4;
- let Inst{10} = 0b1;
-
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD scalar pairwise
-class NeonI_ScalarPair<bit u, bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = 0b1;
- let Inst{29} = u;
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDCmpTwoScalar<bit U, bits<2> size, bits<5> opcode,
+ RegisterClass regtype, string asm, string zero>
+ : I<(outs regtype:$Rd), (ins regtype:$Rn), asm,
+ "\t$Rd, $Rn, #" # zero, "", []>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
let Inst{28-24} = 0b11110;
let Inst{23-22} = size;
- let Inst{21-17} = 0b11000;
+ let Inst{21-17} = 0b10000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+class SIMDInexactCvtTwoScalar<bits<5> opcode, string asm>
+ : I<(outs FPR32:$Rd), (ins FPR64:$Rn), asm, "\t$Rd, $Rn", "",
+ [(set (f32 FPR32:$Rd), (int_aarch64_sisd_fcvtxn (f64 FPR64:$Rn)))]>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-17} = 0b011111100110000;
let Inst{16-12} = opcode;
let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDCmpTwoScalarD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v1i64rz : BaseSIMDCmpTwoScalar<U, 0b11, opc, FPR64, asm, "0">;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+ def : Pat<(v1i64 (OpNode FPR64:$Rn)),
+ (!cast<Instruction>(NAME # v1i64rz) FPR64:$Rn)>;
}
-// Format AdvSIMD 2 vector across lanes
-class NeonI_2VAcross<bit q, bit u, bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29} = u;
- let Inst{28-24} = 0b01110;
+multiclass SIMDCmpTwoScalarSD<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v1i64rz : BaseSIMDCmpTwoScalar<U, {S,1}, opc, FPR64, asm, "0.0">;
+ def v1i32rz : BaseSIMDCmpTwoScalar<U, {S,0}, opc, FPR32, asm, "0.0">;
+
+ def : InstAlias<asm # " $Rd, $Rn, #0",
+ (!cast<Instruction>(NAME # v1i64rz) FPR64:$Rd, FPR64:$Rn), 0>;
+ def : InstAlias<asm # " $Rd, $Rn, #0",
+ (!cast<Instruction>(NAME # v1i32rz) FPR32:$Rd, FPR32:$Rn), 0>;
+
+ def : Pat<(v1i64 (OpNode (v1f64 FPR64:$Rn))),
+ (!cast<Instruction>(NAME # v1i64rz) FPR64:$Rn)>;
+}
+
+multiclass SIMDTwoScalarD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v1i64 : BaseSIMDTwoScalar<U, 0b11, opc, FPR64, FPR64, asm,
+ [(set (v1i64 FPR64:$Rd), (OpNode (v1i64 FPR64:$Rn)))]>;
+
+ def : Pat<(i64 (OpNode (i64 FPR64:$Rn))),
+ (!cast<Instruction>(NAME # "v1i64") FPR64:$Rn)>;
+}
+
+multiclass SIMDTwoScalarSD<bit U, bit S, bits<5> opc, string asm> {
+ def v1i64 : BaseSIMDTwoScalar<U, {S,1}, opc, FPR64, FPR64, asm,[]>;
+ def v1i32 : BaseSIMDTwoScalar<U, {S,0}, opc, FPR32, FPR32, asm,[]>;
+}
+
+multiclass SIMDTwoScalarCVTSD<bit U, bit S, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v1i64 : BaseSIMDTwoScalar<U, {S,1}, opc, FPR64, FPR64, asm,
+ [(set FPR64:$Rd, (OpNode (f64 FPR64:$Rn)))]>;
+ def v1i32 : BaseSIMDTwoScalar<U, {S,0}, opc, FPR32, FPR32, asm,
+ [(set FPR32:$Rd, (OpNode (f32 FPR32:$Rn)))]>;
+}
+
+multiclass SIMDTwoScalarBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def v1i64 : BaseSIMDTwoScalar<U, 0b11, opc, FPR64, FPR64, asm,
+ [(set (i64 FPR64:$Rd), (OpNode (i64 FPR64:$Rn)))]>;
+ def v1i32 : BaseSIMDTwoScalar<U, 0b10, opc, FPR32, FPR32, asm,
+ [(set (i32 FPR32:$Rd), (OpNode (i32 FPR32:$Rn)))]>;
+ def v1i16 : BaseSIMDTwoScalar<U, 0b01, opc, FPR16, FPR16, asm, []>;
+ def v1i8 : BaseSIMDTwoScalar<U, 0b00, opc, FPR8 , FPR8 , asm, []>;
+ }
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rn))),
+ (!cast<Instruction>(NAME # v1i64) FPR64:$Rn)>;
+}
+
+multiclass SIMDTwoScalarBHSDTied<bit U, bits<5> opc, string asm,
+ Intrinsic OpNode> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def v1i64 : BaseSIMDTwoScalarTied<U, 0b11, opc, FPR64, FPR64, asm,
+ [(set (i64 FPR64:$dst), (OpNode (i64 FPR64:$Rd), (i64 FPR64:$Rn)))]>;
+ def v1i32 : BaseSIMDTwoScalarTied<U, 0b10, opc, FPR32, FPR32, asm,
+ [(set (i32 FPR32:$dst), (OpNode (i32 FPR32:$Rd), (i32 FPR32:$Rn)))]>;
+ def v1i16 : BaseSIMDTwoScalarTied<U, 0b01, opc, FPR16, FPR16, asm, []>;
+ def v1i8 : BaseSIMDTwoScalarTied<U, 0b00, opc, FPR8 , FPR8 , asm, []>;
+ }
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn))),
+ (!cast<Instruction>(NAME # v1i64) FPR64:$Rd, FPR64:$Rn)>;
+}
+
+
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDTwoScalarMixedBHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v1i32 : BaseSIMDTwoScalar<U, 0b10, opc, FPR32, FPR64, asm,
+ [(set (i32 FPR32:$Rd), (OpNode (i64 FPR64:$Rn)))]>;
+ def v1i16 : BaseSIMDTwoScalar<U, 0b01, opc, FPR16, FPR32, asm, []>;
+ def v1i8 : BaseSIMDTwoScalar<U, 0b00, opc, FPR8 , FPR16, asm, []>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD scalar pairwise instructions
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDPairwiseScalar<bit U, bits<2> size, bits<5> opcode,
+ RegisterOperand regtype, RegisterOperand vectype,
+ string asm, string kind>
+ : I<(outs regtype:$Rd), (ins vectype:$Rn), asm,
+ "{\t$Rd, $Rn" # kind # "|" # kind # "\t$Rd, $Rn}", "", []>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b11110;
let Inst{23-22} = size;
let Inst{21-17} = 0b11000;
let Inst{16-12} = opcode;
let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+multiclass SIMDPairwiseScalarD<bit U, bits<5> opc, string asm> {
+ def v2i64p : BaseSIMDPairwiseScalar<U, 0b11, opc, FPR64Op, V128,
+ asm, ".2d">;
}
-// Format AdvSIMD scalar two registers miscellaneous
-class NeonI_Scalar2SameMisc<bit u, bits<2> size, bits<5> opcode, dag outs, dag ins,
- string asmstr, list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31} = 0b0;
- let Inst{30} = 0b1;
- let Inst{29} = u;
- let Inst{28-24} = 0b11110;
+multiclass SIMDPairwiseScalarSD<bit U, bit S, bits<5> opc, string asm> {
+ def v2i32p : BaseSIMDPairwiseScalar<U, {S,0}, opc, FPR32Op, V64,
+ asm, ".2s">;
+ def v2i64p : BaseSIMDPairwiseScalar<U, {S,1}, opc, FPR64Op, V128,
+ asm, ".2d">;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD across lanes instructions
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDAcrossLanes<bit Q, bit U, bits<2> size, bits<5> opcode,
+ RegisterClass regtype, RegisterOperand vectype,
+ string asm, string kind, list<dag> pattern>
+ : I<(outs regtype:$Rd), (ins vectype:$Rn), asm,
+ "{\t$Rd, $Rn" # kind # "|" # kind # "\t$Rd, $Rn}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-24} = 0b01110;
let Inst{23-22} = size;
- let Inst{21-17} = 0b10000;
+ let Inst{21-17} = 0b11000;
let Inst{16-12} = opcode;
let Inst{11-10} = 0b10;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD vector load/store multiple N-element structure
-class NeonI_LdStMult<bit q, bit l, bits<4> opcode, bits<2> size,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin>
-{
- let Inst{31} = 0b0;
- let Inst{30} = q;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDAcrossLanesBHS<bit U, bits<5> opcode,
+ string asm> {
+ def v8i8v : BaseSIMDAcrossLanes<0, U, 0b00, opcode, FPR8, V64,
+ asm, ".8b", []>;
+ def v16i8v : BaseSIMDAcrossLanes<1, U, 0b00, opcode, FPR8, V128,
+ asm, ".16b", []>;
+ def v4i16v : BaseSIMDAcrossLanes<0, U, 0b01, opcode, FPR16, V64,
+ asm, ".4h", []>;
+ def v8i16v : BaseSIMDAcrossLanes<1, U, 0b01, opcode, FPR16, V128,
+ asm, ".8h", []>;
+ def v4i32v : BaseSIMDAcrossLanes<1, U, 0b10, opcode, FPR32, V128,
+ asm, ".4s", []>;
+}
+
+multiclass SIMDAcrossLanesHSD<bit U, bits<5> opcode, string asm> {
+ def v8i8v : BaseSIMDAcrossLanes<0, U, 0b00, opcode, FPR16, V64,
+ asm, ".8b", []>;
+ def v16i8v : BaseSIMDAcrossLanes<1, U, 0b00, opcode, FPR16, V128,
+ asm, ".16b", []>;
+ def v4i16v : BaseSIMDAcrossLanes<0, U, 0b01, opcode, FPR32, V64,
+ asm, ".4h", []>;
+ def v8i16v : BaseSIMDAcrossLanes<1, U, 0b01, opcode, FPR32, V128,
+ asm, ".8h", []>;
+ def v4i32v : BaseSIMDAcrossLanes<1, U, 0b10, opcode, FPR64, V128,
+ asm, ".4s", []>;
+}
+
+multiclass SIMDAcrossLanesS<bits<5> opcode, bit sz1, string asm,
+ Intrinsic intOp> {
+ def v4i32v : BaseSIMDAcrossLanes<1, 1, {sz1, 0}, opcode, FPR32, V128,
+ asm, ".4s",
+ [(set FPR32:$Rd, (intOp (v4f32 V128:$Rn)))]>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD INS/DUP instructions
+//----------------------------------------------------------------------------
+
+// FIXME: There has got to be a better way to factor these. ugh.
+
+class BaseSIMDInsDup<bit Q, bit op, dag outs, dag ins, string asm,
+ string operands, string constraints, list<dag> pattern>
+ : I<outs, ins, asm, operands, constraints, pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = op;
+ let Inst{28-21} = 0b01110000;
+ let Inst{15} = 0;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+class SIMDDupFromMain<bit Q, bits<5> imm5, string size, ValueType vectype,
+ RegisterOperand vecreg, RegisterClass regtype>
+ : BaseSIMDInsDup<Q, 0, (outs vecreg:$Rd), (ins regtype:$Rn), "dup",
+ "{\t$Rd" # size # ", $Rn" #
+ "|" # size # "\t$Rd, $Rn}", "",
+ [(set (vectype vecreg:$Rd), (AArch64dup regtype:$Rn))]> {
+ let Inst{20-16} = imm5;
+ let Inst{14-11} = 0b0001;
+}
+
+class SIMDDupFromElement<bit Q, string dstkind, string srckind,
+ ValueType vectype, ValueType insreg,
+ RegisterOperand vecreg, Operand idxtype,
+ ValueType elttype, SDNode OpNode>
+ : BaseSIMDInsDup<Q, 0, (outs vecreg:$Rd), (ins V128:$Rn, idxtype:$idx), "dup",
+ "{\t$Rd" # dstkind # ", $Rn" # srckind # "$idx" #
+ "|" # dstkind # "\t$Rd, $Rn$idx}", "",
+ [(set (vectype vecreg:$Rd),
+ (OpNode (insreg V128:$Rn), idxtype:$idx))]> {
+ let Inst{14-11} = 0b0000;
+}
+
+class SIMDDup64FromElement
+ : SIMDDupFromElement<1, ".2d", ".d", v2i64, v2i64, V128,
+ VectorIndexD, i64, AArch64duplane64> {
+ bits<1> idx;
+ let Inst{20} = idx;
+ let Inst{19-16} = 0b1000;
+}
+
+class SIMDDup32FromElement<bit Q, string size, ValueType vectype,
+ RegisterOperand vecreg>
+ : SIMDDupFromElement<Q, size, ".s", vectype, v4i32, vecreg,
+ VectorIndexS, i64, AArch64duplane32> {
+ bits<2> idx;
+ let Inst{20-19} = idx;
+ let Inst{18-16} = 0b100;
+}
+
+class SIMDDup16FromElement<bit Q, string size, ValueType vectype,
+ RegisterOperand vecreg>
+ : SIMDDupFromElement<Q, size, ".h", vectype, v8i16, vecreg,
+ VectorIndexH, i64, AArch64duplane16> {
+ bits<3> idx;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+}
+
+class SIMDDup8FromElement<bit Q, string size, ValueType vectype,
+ RegisterOperand vecreg>
+ : SIMDDupFromElement<Q, size, ".b", vectype, v16i8, vecreg,
+ VectorIndexB, i64, AArch64duplane8> {
+ bits<4> idx;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+}
+
+class BaseSIMDMov<bit Q, string size, bits<4> imm4, RegisterClass regtype,
+ Operand idxtype, string asm, list<dag> pattern>
+ : BaseSIMDInsDup<Q, 0, (outs regtype:$Rd), (ins V128:$Rn, idxtype:$idx), asm,
+ "{\t$Rd, $Rn" # size # "$idx" #
+ "|" # size # "\t$Rd, $Rn$idx}", "", pattern> {
+ let Inst{14-11} = imm4;
+}
+
+class SIMDSMov<bit Q, string size, RegisterClass regtype,
+ Operand idxtype>
+ : BaseSIMDMov<Q, size, 0b0101, regtype, idxtype, "smov", []>;
+class SIMDUMov<bit Q, string size, ValueType vectype, RegisterClass regtype,
+ Operand idxtype>
+ : BaseSIMDMov<Q, size, 0b0111, regtype, idxtype, "umov",
+ [(set regtype:$Rd, (vector_extract (vectype V128:$Rn), idxtype:$idx))]>;
+
+class SIMDMovAlias<string asm, string size, Instruction inst,
+ RegisterClass regtype, Operand idxtype>
+ : InstAlias<asm#"{\t$dst, $src"#size#"$idx" #
+ "|" # size # "\t$dst, $src$idx}",
+ (inst regtype:$dst, V128:$src, idxtype:$idx)>;
+
+multiclass SMov {
+ def vi8to32 : SIMDSMov<0, ".b", GPR32, VectorIndexB> {
+ bits<4> idx;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+ }
+ def vi8to64 : SIMDSMov<1, ".b", GPR64, VectorIndexB> {
+ bits<4> idx;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+ }
+ def vi16to32 : SIMDSMov<0, ".h", GPR32, VectorIndexH> {
+ bits<3> idx;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+ }
+ def vi16to64 : SIMDSMov<1, ".h", GPR64, VectorIndexH> {
+ bits<3> idx;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+ }
+ def vi32to64 : SIMDSMov<1, ".s", GPR64, VectorIndexS> {
+ bits<2> idx;
+ let Inst{20-19} = idx;
+ let Inst{18-16} = 0b100;
+ }
+}
+
+multiclass UMov {
+ def vi8 : SIMDUMov<0, ".b", v16i8, GPR32, VectorIndexB> {
+ bits<4> idx;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+ }
+ def vi16 : SIMDUMov<0, ".h", v8i16, GPR32, VectorIndexH> {
+ bits<3> idx;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+ }
+ def vi32 : SIMDUMov<0, ".s", v4i32, GPR32, VectorIndexS> {
+ bits<2> idx;
+ let Inst{20-19} = idx;
+ let Inst{18-16} = 0b100;
+ }
+ def vi64 : SIMDUMov<1, ".d", v2i64, GPR64, VectorIndexD> {
+ bits<1> idx;
+ let Inst{20} = idx;
+ let Inst{19-16} = 0b1000;
+ }
+ def : SIMDMovAlias<"mov", ".s",
+ !cast<Instruction>(NAME#"vi32"),
+ GPR32, VectorIndexS>;
+ def : SIMDMovAlias<"mov", ".d",
+ !cast<Instruction>(NAME#"vi64"),
+ GPR64, VectorIndexD>;
+}
+
+class SIMDInsFromMain<string size, ValueType vectype,
+ RegisterClass regtype, Operand idxtype>
+ : BaseSIMDInsDup<1, 0, (outs V128:$dst),
+ (ins V128:$Rd, idxtype:$idx, regtype:$Rn), "ins",
+ "{\t$Rd" # size # "$idx, $Rn" #
+ "|" # size # "\t$Rd$idx, $Rn}",
+ "$Rd = $dst",
+ [(set V128:$dst,
+ (vector_insert (vectype V128:$Rd), regtype:$Rn, idxtype:$idx))]> {
+ let Inst{14-11} = 0b0011;
+}
+
+class SIMDInsFromElement<string size, ValueType vectype,
+ ValueType elttype, Operand idxtype>
+ : BaseSIMDInsDup<1, 1, (outs V128:$dst),
+ (ins V128:$Rd, idxtype:$idx, V128:$Rn, idxtype:$idx2), "ins",
+ "{\t$Rd" # size # "$idx, $Rn" # size # "$idx2" #
+ "|" # size # "\t$Rd$idx, $Rn$idx2}",
+ "$Rd = $dst",
+ [(set V128:$dst,
+ (vector_insert
+ (vectype V128:$Rd),
+ (elttype (vector_extract (vectype V128:$Rn), idxtype:$idx2)),
+ idxtype:$idx))]>;
+
+class SIMDInsMainMovAlias<string size, Instruction inst,
+ RegisterClass regtype, Operand idxtype>
+ : InstAlias<"mov" # "{\t$dst" # size # "$idx, $src" #
+ "|" # size #"\t$dst$idx, $src}",
+ (inst V128:$dst, idxtype:$idx, regtype:$src)>;
+class SIMDInsElementMovAlias<string size, Instruction inst,
+ Operand idxtype>
+ : InstAlias<"mov" # "{\t$dst" # size # "$idx, $src" # size # "$idx2" #
+ # "|" # size #" $dst$idx, $src$idx2}",
+ (inst V128:$dst, idxtype:$idx, V128:$src, idxtype:$idx2)>;
+
+
+multiclass SIMDIns {
+ def vi8gpr : SIMDInsFromMain<".b", v16i8, GPR32, VectorIndexB> {
+ bits<4> idx;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+ }
+ def vi16gpr : SIMDInsFromMain<".h", v8i16, GPR32, VectorIndexH> {
+ bits<3> idx;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+ }
+ def vi32gpr : SIMDInsFromMain<".s", v4i32, GPR32, VectorIndexS> {
+ bits<2> idx;
+ let Inst{20-19} = idx;
+ let Inst{18-16} = 0b100;
+ }
+ def vi64gpr : SIMDInsFromMain<".d", v2i64, GPR64, VectorIndexD> {
+ bits<1> idx;
+ let Inst{20} = idx;
+ let Inst{19-16} = 0b1000;
+ }
+
+ def vi8lane : SIMDInsFromElement<".b", v16i8, i32, VectorIndexB> {
+ bits<4> idx;
+ bits<4> idx2;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+ let Inst{14-11} = idx2;
+ }
+ def vi16lane : SIMDInsFromElement<".h", v8i16, i32, VectorIndexH> {
+ bits<3> idx;
+ bits<3> idx2;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+ let Inst{14-12} = idx2;
+ let Inst{11} = 0;
+ }
+ def vi32lane : SIMDInsFromElement<".s", v4i32, i32, VectorIndexS> {
+ bits<2> idx;
+ bits<2> idx2;
+ let Inst{20-19} = idx;
+ let Inst{18-16} = 0b100;
+ let Inst{14-13} = idx2;
+ let Inst{12-11} = 0;
+ }
+ def vi64lane : SIMDInsFromElement<".d", v2i64, i64, VectorIndexD> {
+ bits<1> idx;
+ bits<1> idx2;
+ let Inst{20} = idx;
+ let Inst{19-16} = 0b1000;
+ let Inst{14} = idx2;
+ let Inst{13-11} = 0;
+ }
+
+ // For all forms of the INS instruction, the "mov" mnemonic is the
+ // preferred alias. Why they didn't just call the instruction "mov" in
+ // the first place is a very good question indeed...
+ def : SIMDInsMainMovAlias<".b", !cast<Instruction>(NAME#"vi8gpr"),
+ GPR32, VectorIndexB>;
+ def : SIMDInsMainMovAlias<".h", !cast<Instruction>(NAME#"vi16gpr"),
+ GPR32, VectorIndexH>;
+ def : SIMDInsMainMovAlias<".s", !cast<Instruction>(NAME#"vi32gpr"),
+ GPR32, VectorIndexS>;
+ def : SIMDInsMainMovAlias<".d", !cast<Instruction>(NAME#"vi64gpr"),
+ GPR64, VectorIndexD>;
+
+ def : SIMDInsElementMovAlias<".b", !cast<Instruction>(NAME#"vi8lane"),
+ VectorIndexB>;
+ def : SIMDInsElementMovAlias<".h", !cast<Instruction>(NAME#"vi16lane"),
+ VectorIndexH>;
+ def : SIMDInsElementMovAlias<".s", !cast<Instruction>(NAME#"vi32lane"),
+ VectorIndexS>;
+ def : SIMDInsElementMovAlias<".d", !cast<Instruction>(NAME#"vi64lane"),
+ VectorIndexD>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD TBL/TBX
+//----------------------------------------------------------------------------
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDTableLookup<bit Q, bits<2> len, bit op, RegisterOperand vectype,
+ RegisterOperand listtype, string asm, string kind>
+ : I<(outs vectype:$Vd), (ins listtype:$Vn, vectype:$Vm), asm,
+ "\t$Vd" # kind # ", $Vn, $Vm" # kind, "", []>,
+ Sched<[WriteV]> {
+ bits<5> Vd;
+ bits<5> Vn;
+ bits<5> Vm;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29-21} = 0b001110000;
+ let Inst{20-16} = Vm;
+ let Inst{15} = 0;
+ let Inst{14-13} = len;
+ let Inst{12} = op;
+ let Inst{11-10} = 0b00;
+ let Inst{9-5} = Vn;
+ let Inst{4-0} = Vd;
+}
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDTableLookupTied<bit Q, bits<2> len, bit op, RegisterOperand vectype,
+ RegisterOperand listtype, string asm, string kind>
+ : I<(outs vectype:$dst), (ins vectype:$Vd, listtype:$Vn, vectype:$Vm), asm,
+ "\t$Vd" # kind # ", $Vn, $Vm" # kind, "$Vd = $dst", []>,
+ Sched<[WriteV]> {
+ bits<5> Vd;
+ bits<5> Vn;
+ bits<5> Vm;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29-21} = 0b001110000;
+ let Inst{20-16} = Vm;
+ let Inst{15} = 0;
+ let Inst{14-13} = len;
+ let Inst{12} = op;
+ let Inst{11-10} = 0b00;
+ let Inst{9-5} = Vn;
+ let Inst{4-0} = Vd;
+}
+
+class SIMDTableLookupAlias<string asm, Instruction inst,
+ RegisterOperand vectype, RegisterOperand listtype>
+ : InstAlias<!strconcat(asm, "\t$dst, $lst, $index"),
+ (inst vectype:$dst, listtype:$lst, vectype:$index), 0>;
+
+multiclass SIMDTableLookup<bit op, string asm> {
+ def v8i8One : BaseSIMDTableLookup<0, 0b00, op, V64, VecListOne16b,
+ asm, ".8b">;
+ def v8i8Two : BaseSIMDTableLookup<0, 0b01, op, V64, VecListTwo16b,
+ asm, ".8b">;
+ def v8i8Three : BaseSIMDTableLookup<0, 0b10, op, V64, VecListThree16b,
+ asm, ".8b">;
+ def v8i8Four : BaseSIMDTableLookup<0, 0b11, op, V64, VecListFour16b,
+ asm, ".8b">;
+ def v16i8One : BaseSIMDTableLookup<1, 0b00, op, V128, VecListOne16b,
+ asm, ".16b">;
+ def v16i8Two : BaseSIMDTableLookup<1, 0b01, op, V128, VecListTwo16b,
+ asm, ".16b">;
+ def v16i8Three: BaseSIMDTableLookup<1, 0b10, op, V128, VecListThree16b,
+ asm, ".16b">;
+ def v16i8Four : BaseSIMDTableLookup<1, 0b11, op, V128, VecListFour16b,
+ asm, ".16b">;
+
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8One"),
+ V64, VecListOne128>;
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8Two"),
+ V64, VecListTwo128>;
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8Three"),
+ V64, VecListThree128>;
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8Four"),
+ V64, VecListFour128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8One"),
+ V128, VecListOne128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8Two"),
+ V128, VecListTwo128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8Three"),
+ V128, VecListThree128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8Four"),
+ V128, VecListFour128>;
+}
+
+multiclass SIMDTableLookupTied<bit op, string asm> {
+ def v8i8One : BaseSIMDTableLookupTied<0, 0b00, op, V64, VecListOne16b,
+ asm, ".8b">;
+ def v8i8Two : BaseSIMDTableLookupTied<0, 0b01, op, V64, VecListTwo16b,
+ asm, ".8b">;
+ def v8i8Three : BaseSIMDTableLookupTied<0, 0b10, op, V64, VecListThree16b,
+ asm, ".8b">;
+ def v8i8Four : BaseSIMDTableLookupTied<0, 0b11, op, V64, VecListFour16b,
+ asm, ".8b">;
+ def v16i8One : BaseSIMDTableLookupTied<1, 0b00, op, V128, VecListOne16b,
+ asm, ".16b">;
+ def v16i8Two : BaseSIMDTableLookupTied<1, 0b01, op, V128, VecListTwo16b,
+ asm, ".16b">;
+ def v16i8Three: BaseSIMDTableLookupTied<1, 0b10, op, V128, VecListThree16b,
+ asm, ".16b">;
+ def v16i8Four : BaseSIMDTableLookupTied<1, 0b11, op, V128, VecListFour16b,
+ asm, ".16b">;
+
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8One"),
+ V64, VecListOne128>;
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8Two"),
+ V64, VecListTwo128>;
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8Three"),
+ V64, VecListThree128>;
+ def : SIMDTableLookupAlias<asm # ".8b",
+ !cast<Instruction>(NAME#"v8i8Four"),
+ V64, VecListFour128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8One"),
+ V128, VecListOne128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8Two"),
+ V128, VecListTwo128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8Three"),
+ V128, VecListThree128>;
+ def : SIMDTableLookupAlias<asm # ".16b",
+ !cast<Instruction>(NAME#"v16i8Four"),
+ V128, VecListFour128>;
+}
+
+
+//----------------------------------------------------------------------------
+// AdvSIMD scalar CPY
+//----------------------------------------------------------------------------
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDScalarCPY<RegisterClass regtype, RegisterOperand vectype,
+ string kind, Operand idxtype>
+ : I<(outs regtype:$dst), (ins vectype:$src, idxtype:$idx), "mov",
+ "{\t$dst, $src" # kind # "$idx" #
+ "|\t$dst, $src$idx}", "", []>,
+ Sched<[WriteV]> {
+ bits<5> dst;
+ bits<5> src;
+ let Inst{31-21} = 0b01011110000;
+ let Inst{15-10} = 0b000001;
+ let Inst{9-5} = src;
+ let Inst{4-0} = dst;
+}
+
+class SIMDScalarCPYAlias<string asm, string size, Instruction inst,
+ RegisterClass regtype, RegisterOperand vectype, Operand idxtype>
+ : InstAlias<asm # "{\t$dst, $src" # size # "$index" #
+ # "|\t$dst, $src$index}",
+ (inst regtype:$dst, vectype:$src, idxtype:$index), 0>;
+
+
+multiclass SIMDScalarCPY<string asm> {
+ def i8 : BaseSIMDScalarCPY<FPR8, V128, ".b", VectorIndexB> {
+ bits<4> idx;
+ let Inst{20-17} = idx;
+ let Inst{16} = 1;
+ }
+ def i16 : BaseSIMDScalarCPY<FPR16, V128, ".h", VectorIndexH> {
+ bits<3> idx;
+ let Inst{20-18} = idx;
+ let Inst{17-16} = 0b10;
+ }
+ def i32 : BaseSIMDScalarCPY<FPR32, V128, ".s", VectorIndexS> {
+ bits<2> idx;
+ let Inst{20-19} = idx;
+ let Inst{18-16} = 0b100;
+ }
+ def i64 : BaseSIMDScalarCPY<FPR64, V128, ".d", VectorIndexD> {
+ bits<1> idx;
+ let Inst{20} = idx;
+ let Inst{19-16} = 0b1000;
+ }
+
+ def : Pat<(v1i64 (scalar_to_vector (i64 (vector_extract (v2i64 V128:$src),
+ VectorIndexD:$idx)))),
+ (!cast<Instruction>(NAME # i64) V128:$src, VectorIndexD:$idx)>;
+
+ // 'DUP' mnemonic aliases.
+ def : SIMDScalarCPYAlias<"dup", ".b",
+ !cast<Instruction>(NAME#"i8"),
+ FPR8, V128, VectorIndexB>;
+ def : SIMDScalarCPYAlias<"dup", ".h",
+ !cast<Instruction>(NAME#"i16"),
+ FPR16, V128, VectorIndexH>;
+ def : SIMDScalarCPYAlias<"dup", ".s",
+ !cast<Instruction>(NAME#"i32"),
+ FPR32, V128, VectorIndexS>;
+ def : SIMDScalarCPYAlias<"dup", ".d",
+ !cast<Instruction>(NAME#"i64"),
+ FPR64, V128, VectorIndexD>;
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD modified immediate instructions
+//----------------------------------------------------------------------------
+
+class BaseSIMDModifiedImm<bit Q, bit op, dag oops, dag iops,
+ string asm, string op_string,
+ string cstr, list<dag> pattern>
+ : I<oops, iops, asm, op_string, cstr, pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<8> imm8;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = op;
+ let Inst{28-19} = 0b0111100000;
+ let Inst{18-16} = imm8{7-5};
+ let Inst{11-10} = 0b01;
+ let Inst{9-5} = imm8{4-0};
+ let Inst{4-0} = Rd;
+}
+
+class BaseSIMDModifiedImmVector<bit Q, bit op, RegisterOperand vectype,
+ Operand immtype, dag opt_shift_iop,
+ string opt_shift, string asm, string kind,
+ list<dag> pattern>
+ : BaseSIMDModifiedImm<Q, op, (outs vectype:$Rd),
+ !con((ins immtype:$imm8), opt_shift_iop), asm,
+ "{\t$Rd" # kind # ", $imm8" # opt_shift #
+ "|" # kind # "\t$Rd, $imm8" # opt_shift # "}",
+ "", pattern> {
+ let DecoderMethod = "DecodeModImmInstruction";
+}
+
+class BaseSIMDModifiedImmVectorTied<bit Q, bit op, RegisterOperand vectype,
+ Operand immtype, dag opt_shift_iop,
+ string opt_shift, string asm, string kind,
+ list<dag> pattern>
+ : BaseSIMDModifiedImm<Q, op, (outs vectype:$dst),
+ !con((ins vectype:$Rd, immtype:$imm8), opt_shift_iop),
+ asm, "{\t$Rd" # kind # ", $imm8" # opt_shift #
+ "|" # kind # "\t$Rd, $imm8" # opt_shift # "}",
+ "$Rd = $dst", pattern> {
+ let DecoderMethod = "DecodeModImmTiedInstruction";
+}
+
+class BaseSIMDModifiedImmVectorShift<bit Q, bit op, bits<2> b15_b12,
+ RegisterOperand vectype, string asm,
+ string kind, list<dag> pattern>
+ : BaseSIMDModifiedImmVector<Q, op, vectype, imm0_255,
+ (ins logical_vec_shift:$shift),
+ "$shift", asm, kind, pattern> {
+ bits<2> shift;
+ let Inst{15} = b15_b12{1};
+ let Inst{14-13} = shift;
+ let Inst{12} = b15_b12{0};
+}
+
+class BaseSIMDModifiedImmVectorShiftTied<bit Q, bit op, bits<2> b15_b12,
+ RegisterOperand vectype, string asm,
+ string kind, list<dag> pattern>
+ : BaseSIMDModifiedImmVectorTied<Q, op, vectype, imm0_255,
+ (ins logical_vec_shift:$shift),
+ "$shift", asm, kind, pattern> {
+ bits<2> shift;
+ let Inst{15} = b15_b12{1};
+ let Inst{14-13} = shift;
+ let Inst{12} = b15_b12{0};
+}
+
+
+class BaseSIMDModifiedImmVectorShiftHalf<bit Q, bit op, bits<2> b15_b12,
+ RegisterOperand vectype, string asm,
+ string kind, list<dag> pattern>
+ : BaseSIMDModifiedImmVector<Q, op, vectype, imm0_255,
+ (ins logical_vec_hw_shift:$shift),
+ "$shift", asm, kind, pattern> {
+ bits<2> shift;
+ let Inst{15} = b15_b12{1};
+ let Inst{14} = 0;
+ let Inst{13} = shift{0};
+ let Inst{12} = b15_b12{0};
+}
+
+class BaseSIMDModifiedImmVectorShiftHalfTied<bit Q, bit op, bits<2> b15_b12,
+ RegisterOperand vectype, string asm,
+ string kind, list<dag> pattern>
+ : BaseSIMDModifiedImmVectorTied<Q, op, vectype, imm0_255,
+ (ins logical_vec_hw_shift:$shift),
+ "$shift", asm, kind, pattern> {
+ bits<2> shift;
+ let Inst{15} = b15_b12{1};
+ let Inst{14} = 0;
+ let Inst{13} = shift{0};
+ let Inst{12} = b15_b12{0};
+}
+
+multiclass SIMDModifiedImmVectorShift<bit op, bits<2> hw_cmode, bits<2> w_cmode,
+ string asm> {
+ def v4i16 : BaseSIMDModifiedImmVectorShiftHalf<0, op, hw_cmode, V64,
+ asm, ".4h", []>;
+ def v8i16 : BaseSIMDModifiedImmVectorShiftHalf<1, op, hw_cmode, V128,
+ asm, ".8h", []>;
+
+ def v2i32 : BaseSIMDModifiedImmVectorShift<0, op, w_cmode, V64,
+ asm, ".2s", []>;
+ def v4i32 : BaseSIMDModifiedImmVectorShift<1, op, w_cmode, V128,
+ asm, ".4s", []>;
+}
+
+multiclass SIMDModifiedImmVectorShiftTied<bit op, bits<2> hw_cmode,
+ bits<2> w_cmode, string asm,
+ SDNode OpNode> {
+ def v4i16 : BaseSIMDModifiedImmVectorShiftHalfTied<0, op, hw_cmode, V64,
+ asm, ".4h",
+ [(set (v4i16 V64:$dst), (OpNode V64:$Rd,
+ imm0_255:$imm8,
+ (i32 imm:$shift)))]>;
+ def v8i16 : BaseSIMDModifiedImmVectorShiftHalfTied<1, op, hw_cmode, V128,
+ asm, ".8h",
+ [(set (v8i16 V128:$dst), (OpNode V128:$Rd,
+ imm0_255:$imm8,
+ (i32 imm:$shift)))]>;
+
+ def v2i32 : BaseSIMDModifiedImmVectorShiftTied<0, op, w_cmode, V64,
+ asm, ".2s",
+ [(set (v2i32 V64:$dst), (OpNode V64:$Rd,
+ imm0_255:$imm8,
+ (i32 imm:$shift)))]>;
+ def v4i32 : BaseSIMDModifiedImmVectorShiftTied<1, op, w_cmode, V128,
+ asm, ".4s",
+ [(set (v4i32 V128:$dst), (OpNode V128:$Rd,
+ imm0_255:$imm8,
+ (i32 imm:$shift)))]>;
+}
+
+class SIMDModifiedImmMoveMSL<bit Q, bit op, bits<4> cmode,
+ RegisterOperand vectype, string asm,
+ string kind, list<dag> pattern>
+ : BaseSIMDModifiedImmVector<Q, op, vectype, imm0_255,
+ (ins move_vec_shift:$shift),
+ "$shift", asm, kind, pattern> {
+ bits<1> shift;
+ let Inst{15-13} = cmode{3-1};
+ let Inst{12} = shift;
+}
+
+class SIMDModifiedImmVectorNoShift<bit Q, bit op, bits<4> cmode,
+ RegisterOperand vectype,
+ Operand imm_type, string asm,
+ string kind, list<dag> pattern>
+ : BaseSIMDModifiedImmVector<Q, op, vectype, imm_type, (ins), "",
+ asm, kind, pattern> {
+ let Inst{15-12} = cmode;
+}
+
+class SIMDModifiedImmScalarNoShift<bit Q, bit op, bits<4> cmode, string asm,
+ list<dag> pattern>
+ : BaseSIMDModifiedImm<Q, op, (outs FPR64:$Rd), (ins simdimmtype10:$imm8), asm,
+ "\t$Rd, $imm8", "", pattern> {
+ let Inst{15-12} = cmode;
+ let DecoderMethod = "DecodeModImmInstruction";
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD indexed element
+//----------------------------------------------------------------------------
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDIndexed<bit Q, bit U, bit Scalar, bits<2> size, bits<4> opc,
+ RegisterOperand dst_reg, RegisterOperand lhs_reg,
+ RegisterOperand rhs_reg, Operand vec_idx, string asm,
+ string apple_kind, string dst_kind, string lhs_kind,
+ string rhs_kind, list<dag> pattern>
+ : I<(outs dst_reg:$Rd), (ins lhs_reg:$Rn, rhs_reg:$Rm, vec_idx:$idx),
+ asm,
+ "{\t$Rd" # dst_kind # ", $Rn" # lhs_kind # ", $Rm" # rhs_kind # "$idx" #
+ "|" # apple_kind # "\t$Rd, $Rn, $Rm$idx}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28} = Scalar;
+ let Inst{27-24} = 0b1111;
+ let Inst{23-22} = size;
+ // Bit 21 must be set by the derived class.
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = opc;
+ // Bit 11 must be set by the derived class.
+ let Inst{10} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDIndexedTied<bit Q, bit U, bit Scalar, bits<2> size, bits<4> opc,
+ RegisterOperand dst_reg, RegisterOperand lhs_reg,
+ RegisterOperand rhs_reg, Operand vec_idx, string asm,
+ string apple_kind, string dst_kind, string lhs_kind,
+ string rhs_kind, list<dag> pattern>
+ : I<(outs dst_reg:$dst),
+ (ins dst_reg:$Rd, lhs_reg:$Rn, rhs_reg:$Rm, vec_idx:$idx), asm,
+ "{\t$Rd" # dst_kind # ", $Rn" # lhs_kind # ", $Rm" # rhs_kind # "$idx" #
+ "|" # apple_kind # "\t$Rd, $Rn, $Rm$idx}", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28} = Scalar;
+ let Inst{27-24} = 0b1111;
+ let Inst{23-22} = size;
+ // Bit 21 must be set by the derived class.
+ let Inst{20-16} = Rm;
+ let Inst{15-12} = opc;
+ // Bit 11 must be set by the derived class.
+ let Inst{10} = 0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDFPIndexedSD<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v2i32_indexed : BaseSIMDIndexed<0, U, 0, 0b10, opc,
+ V64, V64,
+ V128, VectorIndexS,
+ asm, ".2s", ".2s", ".2s", ".s",
+ [(set (v2f32 V64:$Rd),
+ (OpNode (v2f32 V64:$Rn),
+ (v2f32 (AArch64duplane32 (v4f32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexed<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm, ".4s", ".4s", ".4s", ".s",
+ [(set (v4f32 V128:$Rd),
+ (OpNode (v4f32 V128:$Rn),
+ (v4f32 (AArch64duplane32 (v4f32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v2i64_indexed : BaseSIMDIndexed<1, U, 0, 0b11, opc,
+ V128, V128,
+ V128, VectorIndexD,
+ asm, ".2d", ".2d", ".2d", ".d",
+ [(set (v2f64 V128:$Rd),
+ (OpNode (v2f64 V128:$Rn),
+ (v2f64 (AArch64duplane64 (v2f64 V128:$Rm), VectorIndexD:$idx))))]> {
+ bits<1> idx;
+ let Inst{11} = idx{0};
+ let Inst{21} = 0;
+ }
+
+ def v1i32_indexed : BaseSIMDIndexed<1, U, 1, 0b10, opc,
+ FPR32Op, FPR32Op, V128, VectorIndexS,
+ asm, ".s", "", "", ".s",
+ [(set (f32 FPR32Op:$Rd),
+ (OpNode (f32 FPR32Op:$Rn),
+ (f32 (vector_extract (v4f32 V128:$Rm),
+ VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v1i64_indexed : BaseSIMDIndexed<1, U, 1, 0b11, opc,
+ FPR64Op, FPR64Op, V128, VectorIndexD,
+ asm, ".d", "", "", ".d",
+ [(set (f64 FPR64Op:$Rd),
+ (OpNode (f64 FPR64Op:$Rn),
+ (f64 (vector_extract (v2f64 V128:$Rm),
+ VectorIndexD:$idx))))]> {
+ bits<1> idx;
+ let Inst{11} = idx{0};
+ let Inst{21} = 0;
+ }
+}
+
+multiclass SIMDFPIndexedSDTiedPatterns<string INST, SDPatternOperator OpNode> {
+ // 2 variants for the .2s version: DUPLANE from 128-bit and DUP scalar.
+ def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
+ (AArch64duplane32 (v4f32 V128:$Rm),
+ VectorIndexS:$idx))),
+ (!cast<Instruction>(INST # v2i32_indexed)
+ V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
+ (AArch64dup (f32 FPR32Op:$Rm)))),
+ (!cast<Instruction>(INST # "v2i32_indexed") V64:$Rd, V64:$Rn,
+ (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
+
+
+ // 2 variants for the .4s version: DUPLANE from 128-bit and DUP scalar.
+ def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
+ (AArch64duplane32 (v4f32 V128:$Rm),
+ VectorIndexS:$idx))),
+ (!cast<Instruction>(INST # "v4i32_indexed")
+ V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
+ (AArch64dup (f32 FPR32Op:$Rm)))),
+ (!cast<Instruction>(INST # "v4i32_indexed") V128:$Rd, V128:$Rn,
+ (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
+
+ // 2 variants for the .2d version: DUPLANE from 128-bit and DUP scalar.
+ def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
+ (AArch64duplane64 (v2f64 V128:$Rm),
+ VectorIndexD:$idx))),
+ (!cast<Instruction>(INST # "v2i64_indexed")
+ V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
+ (AArch64dup (f64 FPR64Op:$Rm)))),
+ (!cast<Instruction>(INST # "v2i64_indexed") V128:$Rd, V128:$Rn,
+ (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
+
+ // 2 variants for 32-bit scalar version: extract from .2s or from .4s
+ def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
+ (vector_extract (v4f32 V128:$Rm), VectorIndexS:$idx))),
+ (!cast<Instruction>(INST # "v1i32_indexed") FPR32:$Rd, FPR32:$Rn,
+ V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
+ (vector_extract (v2f32 V64:$Rm), VectorIndexS:$idx))),
+ (!cast<Instruction>(INST # "v1i32_indexed") FPR32:$Rd, FPR32:$Rn,
+ (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
+
+ // 1 variant for 64-bit scalar version: extract from .1d or from .2d
+ def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
+ (vector_extract (v2f64 V128:$Rm), VectorIndexD:$idx))),
+ (!cast<Instruction>(INST # "v1i64_indexed") FPR64:$Rd, FPR64:$Rn,
+ V128:$Rm, VectorIndexD:$idx)>;
+}
+
+multiclass SIMDFPIndexedSDTied<bit U, bits<4> opc, string asm> {
+ def v2i32_indexed : BaseSIMDIndexedTied<0, U, 0, 0b10, opc, V64, V64,
+ V128, VectorIndexS,
+ asm, ".2s", ".2s", ".2s", ".s", []> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexedTied<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm, ".4s", ".4s", ".4s", ".s", []> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v2i64_indexed : BaseSIMDIndexedTied<1, U, 0, 0b11, opc,
+ V128, V128,
+ V128, VectorIndexD,
+ asm, ".2d", ".2d", ".2d", ".d", []> {
+ bits<1> idx;
+ let Inst{11} = idx{0};
+ let Inst{21} = 0;
+ }
+
+
+ def v1i32_indexed : BaseSIMDIndexedTied<1, U, 1, 0b10, opc,
+ FPR32Op, FPR32Op, V128, VectorIndexS,
+ asm, ".s", "", "", ".s", []> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v1i64_indexed : BaseSIMDIndexedTied<1, U, 1, 0b11, opc,
+ FPR64Op, FPR64Op, V128, VectorIndexD,
+ asm, ".d", "", "", ".d", []> {
+ bits<1> idx;
+ let Inst{11} = idx{0};
+ let Inst{21} = 0;
+ }
+}
+
+multiclass SIMDIndexedHS<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v4i16_indexed : BaseSIMDIndexed<0, U, 0, 0b01, opc, V64, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4h", ".4h", ".4h", ".h",
+ [(set (v4i16 V64:$Rd),
+ (OpNode (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v8i16_indexed : BaseSIMDIndexed<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm, ".8h", ".8h", ".8h", ".h",
+ [(set (v8i16 V128:$Rd),
+ (OpNode (v8i16 V128:$Rn),
+ (v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexed<0, U, 0, 0b10, opc,
+ V64, V64,
+ V128, VectorIndexS,
+ asm, ".2s", ".2s", ".2s", ".s",
+ [(set (v2i32 V64:$Rd),
+ (OpNode (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexed<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm, ".4s", ".4s", ".4s", ".s",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (v4i32 V128:$Rn),
+ (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v1i16_indexed : BaseSIMDIndexed<1, U, 1, 0b01, opc,
+ FPR16Op, FPR16Op, V128_lo, VectorIndexH,
+ asm, ".h", "", "", ".h", []> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v1i32_indexed : BaseSIMDIndexed<1, U, 1, 0b10, opc,
+ FPR32Op, FPR32Op, V128, VectorIndexS,
+ asm, ".s", "", "", ".s",
+ [(set (i32 FPR32Op:$Rd),
+ (OpNode FPR32Op:$Rn,
+ (i32 (vector_extract (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+}
+
+multiclass SIMDVectorIndexedHS<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v4i16_indexed : BaseSIMDIndexed<0, U, 0, 0b01, opc,
+ V64, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4h", ".4h", ".4h", ".h",
+ [(set (v4i16 V64:$Rd),
+ (OpNode (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v8i16_indexed : BaseSIMDIndexed<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm, ".8h", ".8h", ".8h", ".h",
+ [(set (v8i16 V128:$Rd),
+ (OpNode (v8i16 V128:$Rn),
+ (v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexed<0, U, 0, 0b10, opc,
+ V64, V64,
+ V128, VectorIndexS,
+ asm, ".2s", ".2s", ".2s", ".s",
+ [(set (v2i32 V64:$Rd),
+ (OpNode (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexed<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm, ".4s", ".4s", ".4s", ".s",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (v4i32 V128:$Rn),
+ (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+}
+
+multiclass SIMDVectorIndexedHSTied<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v4i16_indexed : BaseSIMDIndexedTied<0, U, 0, 0b01, opc, V64, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4h", ".4h", ".4h", ".h",
+ [(set (v4i16 V64:$dst),
+ (OpNode (v4i16 V64:$Rd),(v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v8i16_indexed : BaseSIMDIndexedTied<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm, ".8h", ".8h", ".8h", ".h",
+ [(set (v8i16 V128:$dst),
+ (OpNode (v8i16 V128:$Rd), (v8i16 V128:$Rn),
+ (v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexedTied<0, U, 0, 0b10, opc,
+ V64, V64,
+ V128, VectorIndexS,
+ asm, ".2s", ".2s", ".2s", ".s",
+ [(set (v2i32 V64:$dst),
+ (OpNode (v2i32 V64:$Rd), (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexedTied<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm, ".4s", ".4s", ".4s", ".s",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn),
+ (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+}
+
+multiclass SIMDIndexedLongSD<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v4i16_indexed : BaseSIMDIndexed<0, U, 0, 0b01, opc,
+ V128, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4s", ".4s", ".4h", ".h",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v8i16_indexed : BaseSIMDIndexed<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm#"2", ".4s", ".4s", ".8h", ".h",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx))))]> {
+
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexed<0, U, 0, 0b10, opc,
+ V128, V64,
+ V128, VectorIndexS,
+ asm, ".2d", ".2d", ".2s", ".s",
+ [(set (v2i64 V128:$Rd),
+ (OpNode (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexed<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm#"2", ".2d", ".2d", ".4s", ".s",
+ [(set (v2i64 V128:$Rd),
+ (OpNode (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v1i32_indexed : BaseSIMDIndexed<1, U, 1, 0b01, opc,
+ FPR32Op, FPR16Op, V128_lo, VectorIndexH,
+ asm, ".h", "", "", ".h", []> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v1i64_indexed : BaseSIMDIndexed<1, U, 1, 0b10, opc,
+ FPR64Op, FPR32Op, V128, VectorIndexS,
+ asm, ".s", "", "", ".s", []> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+}
+
+multiclass SIMDIndexedLongSQDMLXSDTied<bit U, bits<4> opc, string asm,
+ SDPatternOperator Accum> {
+ def v4i16_indexed : BaseSIMDIndexedTied<0, U, 0, 0b01, opc,
+ V128, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4s", ".4s", ".4h", ".h",
+ [(set (v4i32 V128:$dst),
+ (Accum (v4i32 V128:$Rd),
+ (v4i32 (int_aarch64_neon_sqdmull
+ (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx))))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ // FIXME: it would be nice to use the scalar (v1i32) instruction here, but an
+ // intermediate EXTRACT_SUBREG would be untyped.
+ def : Pat<(i32 (Accum (i32 FPR32Op:$Rd),
+ (i32 (vector_extract (v4i32
+ (int_aarch64_neon_sqdmull (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx)))),
+ (i64 0))))),
+ (EXTRACT_SUBREG
+ (!cast<Instruction>(NAME # v4i16_indexed)
+ (SUBREG_TO_REG (i32 0), FPR32Op:$Rd, ssub), V64:$Rn,
+ V128_lo:$Rm, VectorIndexH:$idx),
+ ssub)>;
+
+ def v8i16_indexed : BaseSIMDIndexedTied<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm#"2", ".4s", ".4s", ".8h", ".h",
+ [(set (v4i32 V128:$dst),
+ (Accum (v4i32 V128:$Rd),
+ (v4i32 (int_aarch64_neon_sqdmull
+ (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16
+ (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx))))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexedTied<0, U, 0, 0b10, opc,
+ V128, V64,
+ V128, VectorIndexS,
+ asm, ".2d", ".2d", ".2s", ".s",
+ [(set (v2i64 V128:$dst),
+ (Accum (v2i64 V128:$Rd),
+ (v2i64 (int_aarch64_neon_sqdmull
+ (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexedTied<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm#"2", ".2d", ".2d", ".4s", ".s",
+ [(set (v2i64 V128:$dst),
+ (Accum (v2i64 V128:$Rd),
+ (v2i64 (int_aarch64_neon_sqdmull
+ (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32
+ (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v1i32_indexed : BaseSIMDIndexedTied<1, U, 1, 0b01, opc,
+ FPR32Op, FPR16Op, V128_lo, VectorIndexH,
+ asm, ".h", "", "", ".h", []> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+
+ def v1i64_indexed : BaseSIMDIndexedTied<1, U, 1, 0b10, opc,
+ FPR64Op, FPR32Op, V128, VectorIndexS,
+ asm, ".s", "", "", ".s",
+ [(set (i64 FPR64Op:$dst),
+ (Accum (i64 FPR64Op:$Rd),
+ (i64 (int_aarch64_neon_sqdmulls_scalar
+ (i32 FPR32Op:$Rn),
+ (i32 (vector_extract (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))))]> {
+
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+}
+
+multiclass SIMDVectorIndexedLongSD<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def v4i16_indexed : BaseSIMDIndexed<0, U, 0, 0b01, opc,
+ V128, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4s", ".4s", ".4h", ".h",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v8i16_indexed : BaseSIMDIndexed<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm#"2", ".4s", ".4s", ".8h", ".h",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx))))]> {
+
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexed<0, U, 0, 0b10, opc,
+ V128, V64,
+ V128, VectorIndexS,
+ asm, ".2d", ".2d", ".2s", ".s",
+ [(set (v2i64 V128:$Rd),
+ (OpNode (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexed<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm#"2", ".2d", ".2d", ".4s", ".s",
+ [(set (v2i64 V128:$Rd),
+ (OpNode (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+ }
+}
+
+multiclass SIMDVectorIndexedLongSDTied<bit U, bits<4> opc, string asm,
+ SDPatternOperator OpNode> {
+ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
+ def v4i16_indexed : BaseSIMDIndexedTied<0, U, 0, 0b01, opc,
+ V128, V64,
+ V128_lo, VectorIndexH,
+ asm, ".4s", ".4s", ".4h", ".h",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i16 V64:$Rn),
+ (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v8i16_indexed : BaseSIMDIndexedTied<1, U, 0, 0b01, opc,
+ V128, V128,
+ V128_lo, VectorIndexH,
+ asm#"2", ".4s", ".4s", ".8h", ".h",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd),
+ (extract_high_v8i16 V128:$Rn),
+ (extract_high_v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm),
+ VectorIndexH:$idx))))]> {
+ bits<3> idx;
+ let Inst{11} = idx{2};
+ let Inst{21} = idx{1};
+ let Inst{20} = idx{0};
+ }
+
+ def v2i32_indexed : BaseSIMDIndexedTied<0, U, 0, 0b10, opc,
+ V128, V64,
+ V128, VectorIndexS,
+ asm, ".2d", ".2d", ".2s", ".s",
+ [(set (v2i64 V128:$dst),
+ (OpNode (v2i64 V128:$Rd), (v2i32 V64:$Rn),
+ (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+
+ def v4i32_indexed : BaseSIMDIndexedTied<1, U, 0, 0b10, opc,
+ V128, V128,
+ V128, VectorIndexS,
+ asm#"2", ".2d", ".2d", ".4s", ".s",
+ [(set (v2i64 V128:$dst),
+ (OpNode (v2i64 V128:$Rd),
+ (extract_high_v4i32 V128:$Rn),
+ (extract_high_v4i32 (AArch64duplane32 (v4i32 V128:$Rm),
+ VectorIndexS:$idx))))]> {
+ bits<2> idx;
+ let Inst{11} = idx{1};
+ let Inst{21} = idx{0};
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD scalar shift by immediate
+//----------------------------------------------------------------------------
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDScalarShift<bit U, bits<5> opc, bits<7> fixed_imm,
+ RegisterClass regtype1, RegisterClass regtype2,
+ Operand immtype, string asm, list<dag> pattern>
+ : I<(outs regtype1:$Rd), (ins regtype2:$Rn, immtype:$imm),
+ asm, "\t$Rd, $Rn, $imm", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<7> imm;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
+ let Inst{28-23} = 0b111110;
+ let Inst{22-16} = fixed_imm;
+ let Inst{15-11} = opc;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDScalarShiftTied<bit U, bits<5> opc, bits<7> fixed_imm,
+ RegisterClass regtype1, RegisterClass regtype2,
+ Operand immtype, string asm, list<dag> pattern>
+ : I<(outs regtype1:$dst), (ins regtype1:$Rd, regtype2:$Rn, immtype:$imm),
+ asm, "\t$Rd, $Rn, $imm", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<7> imm;
+ let Inst{31-30} = 0b01;
+ let Inst{29} = U;
+ let Inst{28-23} = 0b111110;
+ let Inst{22-16} = fixed_imm;
+ let Inst{15-11} = opc;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+
+multiclass SIMDScalarRShiftSD<bit U, bits<5> opc, string asm> {
+ def s : BaseSIMDScalarShift<U, opc, {0,1,?,?,?,?,?},
+ FPR32, FPR32, vecshiftR32, asm, []> {
+ let Inst{20-16} = imm{4-0};
+ }
+
+ def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftR64, asm, []> {
+ let Inst{21-16} = imm{5-0};
+ }
+}
+
+multiclass SIMDScalarRShiftD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftR64, asm,
+ [(set (i64 FPR64:$Rd),
+ (OpNode (i64 FPR64:$Rn), (i32 vecshiftR64:$imm)))]> {
+ let Inst{21-16} = imm{5-0};
+ }
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rn), (i32 vecshiftR64:$imm))),
+ (!cast<Instruction>(NAME # "d") FPR64:$Rn, vecshiftR64:$imm)>;
+}
+
+multiclass SIMDScalarRShiftDTied<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def d : BaseSIMDScalarShiftTied<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftR64, asm,
+ [(set (i64 FPR64:$dst), (OpNode (i64 FPR64:$Rd), (i64 FPR64:$Rn),
+ (i32 vecshiftR64:$imm)))]> {
+ let Inst{21-16} = imm{5-0};
+ }
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
+ (i32 vecshiftR64:$imm))),
+ (!cast<Instruction>(NAME # "d") FPR64:$Rd, FPR64:$Rn,
+ vecshiftR64:$imm)>;
+}
+
+multiclass SIMDScalarLShiftD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftL64, asm,
+ [(set (v1i64 FPR64:$Rd),
+ (OpNode (v1i64 FPR64:$Rn), (i32 vecshiftL64:$imm)))]> {
+ let Inst{21-16} = imm{5-0};
+ }
+}
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+multiclass SIMDScalarLShiftDTied<bit U, bits<5> opc, string asm> {
+ def d : BaseSIMDScalarShiftTied<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftL64, asm, []> {
+ let Inst{21-16} = imm{5-0};
+ }
+}
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+multiclass SIMDScalarRShiftBHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def b : BaseSIMDScalarShift<U, opc, {0,0,0,1,?,?,?},
+ FPR8, FPR16, vecshiftR8, asm, []> {
+ let Inst{18-16} = imm{2-0};
+ }
+
+ def h : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
+ FPR16, FPR32, vecshiftR16, asm, []> {
+ let Inst{19-16} = imm{3-0};
+ }
+
+ def s : BaseSIMDScalarShift<U, opc, {0,1,?,?,?,?,?},
+ FPR32, FPR64, vecshiftR32, asm,
+ [(set (i32 FPR32:$Rd), (OpNode (i64 FPR64:$Rn), vecshiftR32:$imm))]> {
+ let Inst{20-16} = imm{4-0};
+ }
+}
+
+multiclass SIMDScalarLShiftBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def b : BaseSIMDScalarShift<U, opc, {0,0,0,1,?,?,?},
+ FPR8, FPR8, vecshiftL8, asm, []> {
+ let Inst{18-16} = imm{2-0};
+ }
+
+ def h : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
+ FPR16, FPR16, vecshiftL16, asm, []> {
+ let Inst{19-16} = imm{3-0};
+ }
+
+ def s : BaseSIMDScalarShift<U, opc, {0,1,?,?,?,?,?},
+ FPR32, FPR32, vecshiftL32, asm,
+ [(set (i32 FPR32:$Rd), (OpNode (i32 FPR32:$Rn), (i32 vecshiftL32:$imm)))]> {
+ let Inst{20-16} = imm{4-0};
+ }
+
+ def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftL64, asm,
+ [(set (i64 FPR64:$Rd), (OpNode (i64 FPR64:$Rn), (i32 vecshiftL64:$imm)))]> {
+ let Inst{21-16} = imm{5-0};
+ }
+
+ def : Pat<(v1i64 (OpNode (v1i64 FPR64:$Rn), (i32 vecshiftL64:$imm))),
+ (!cast<Instruction>(NAME # "d") FPR64:$Rn, vecshiftL64:$imm)>;
+}
+
+multiclass SIMDScalarRShiftBHSD<bit U, bits<5> opc, string asm> {
+ def b : BaseSIMDScalarShift<U, opc, {0,0,0,1,?,?,?},
+ FPR8, FPR8, vecshiftR8, asm, []> {
+ let Inst{18-16} = imm{2-0};
+ }
+
+ def h : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
+ FPR16, FPR16, vecshiftR16, asm, []> {
+ let Inst{19-16} = imm{3-0};
+ }
+
+ def s : BaseSIMDScalarShift<U, opc, {0,1,?,?,?,?,?},
+ FPR32, FPR32, vecshiftR32, asm, []> {
+ let Inst{20-16} = imm{4-0};
+ }
+
+ def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
+ FPR64, FPR64, vecshiftR64, asm, []> {
+ let Inst{21-16} = imm{5-0};
+ }
+}
+
+//----------------------------------------------------------------------------
+// AdvSIMD vector x indexed element
+//----------------------------------------------------------------------------
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDVectorShift<bit Q, bit U, bits<5> opc, bits<7> fixed_imm,
+ RegisterOperand dst_reg, RegisterOperand src_reg,
+ Operand immtype,
+ string asm, string dst_kind, string src_kind,
+ list<dag> pattern>
+ : I<(outs dst_reg:$Rd), (ins src_reg:$Rn, immtype:$imm),
+ asm, "{\t$Rd" # dst_kind # ", $Rn" # src_kind # ", $imm" #
+ "|" # dst_kind # "\t$Rd, $Rn, $imm}", "", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-23} = 0b011110;
+ let Inst{22-16} = fixed_imm;
+ let Inst{15-11} = opc;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in
+class BaseSIMDVectorShiftTied<bit Q, bit U, bits<5> opc, bits<7> fixed_imm,
+ RegisterOperand vectype1, RegisterOperand vectype2,
+ Operand immtype,
+ string asm, string dst_kind, string src_kind,
+ list<dag> pattern>
+ : I<(outs vectype1:$dst), (ins vectype1:$Rd, vectype2:$Rn, immtype:$imm),
+ asm, "{\t$Rd" # dst_kind # ", $Rn" # src_kind # ", $imm" #
+ "|" # dst_kind # "\t$Rd, $Rn, $imm}", "$Rd = $dst", pattern>,
+ Sched<[WriteV]> {
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
+ let Inst{29} = U;
+ let Inst{28-23} = 0b011110;
+ let Inst{22-16} = fixed_imm;
+ let Inst{15-11} = opc;
+ let Inst{10} = 1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
+}
+
+multiclass SIMDVectorRShiftSD<bit U, bits<5> opc, string asm,
+ Intrinsic OpNode> {
+ def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V64, vecshiftR32,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2f32 V64:$Rn), (i32 imm:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftR32,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4f32 V128:$Rn), (i32 imm:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v2i64_shift : BaseSIMDVectorShift<1, U, opc, {1,?,?,?,?,?,?},
+ V128, V128, vecshiftR64,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2f64 V128:$Rn), (i32 imm:$imm)))]> {
+ bits<6> imm;
+ let Inst{21-16} = imm;
+ }
+}
+
+multiclass SIMDVectorRShiftSDToFP<bit U, bits<5> opc, string asm,
+ Intrinsic OpNode> {
+ def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V64, vecshiftR32,
+ asm, ".2s", ".2s",
+ [(set (v2f32 V64:$Rd), (OpNode (v2i32 V64:$Rn), (i32 imm:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftR32,
+ asm, ".4s", ".4s",
+ [(set (v4f32 V128:$Rd), (OpNode (v4i32 V128:$Rn), (i32 imm:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v2i64_shift : BaseSIMDVectorShift<1, U, opc, {1,?,?,?,?,?,?},
+ V128, V128, vecshiftR64,
+ asm, ".2d", ".2d",
+ [(set (v2f64 V128:$Rd), (OpNode (v2i64 V128:$Rn), (i32 imm:$imm)))]> {
+ bits<6> imm;
+ let Inst{21-16} = imm;
+ }
+}
+
+multiclass SIMDVectorRShiftNarrowBHS<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_shift : BaseSIMDVectorShift<0, U, opc, {0,0,0,1,?,?,?},
+ V64, V128, vecshiftR16Narrow,
+ asm, ".8b", ".8h",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v16i8_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,0,0,1,?,?,?},
+ V128, V128, vecshiftR16Narrow,
+ asm#"2", ".16b", ".8h", []> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ let hasSideEffects = 0;
+ }
+
+ def v4i16_shift : BaseSIMDVectorShift<0, U, opc, {0,0,1,?,?,?,?},
+ V64, V128, vecshiftR32Narrow,
+ asm, ".4h", ".4s",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v8i16_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,0,1,?,?,?,?},
+ V128, V128, vecshiftR32Narrow,
+ asm#"2", ".8h", ".4s", []> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ let hasSideEffects = 0;
+ }
+
+ def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V128, vecshiftR64Narrow,
+ asm, ".2s", ".2d",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftR64Narrow,
+ asm#"2", ".4s", ".2d", []> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ let hasSideEffects = 0;
+ }
+
+ // TableGen doesn't like patters w/ INSERT_SUBREG on the instructions
+ // themselves, so put them here instead.
+
+ // Patterns involving what's effectively an insert high and a normal
+ // intrinsic, represented by CONCAT_VECTORS.
+ def : Pat<(concat_vectors (v8i8 V64:$Rd),(OpNode (v8i16 V128:$Rn),
+ vecshiftR16Narrow:$imm)),
+ (!cast<Instruction>(NAME # "v16i8_shift")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, vecshiftR16Narrow:$imm)>;
+ def : Pat<(concat_vectors (v4i16 V64:$Rd), (OpNode (v4i32 V128:$Rn),
+ vecshiftR32Narrow:$imm)),
+ (!cast<Instruction>(NAME # "v8i16_shift")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, vecshiftR32Narrow:$imm)>;
+ def : Pat<(concat_vectors (v2i32 V64:$Rd), (OpNode (v2i64 V128:$Rn),
+ vecshiftR64Narrow:$imm)),
+ (!cast<Instruction>(NAME # "v4i32_shift")
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, vecshiftR64Narrow:$imm)>;
+}
+
+multiclass SIMDVectorLShiftBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_shift : BaseSIMDVectorShift<0, U, opc, {0,0,0,1,?,?,?},
+ V64, V64, vecshiftL8,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn),
+ (i32 vecshiftL8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v16i8_shift : BaseSIMDVectorShift<1, U, opc, {0,0,0,1,?,?,?},
+ V128, V128, vecshiftL8,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn),
+ (i32 vecshiftL8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v4i16_shift : BaseSIMDVectorShift<0, U, opc, {0,0,1,?,?,?,?},
+ V64, V64, vecshiftL16,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i16 V64:$Rn),
+ (i32 vecshiftL16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v8i16_shift : BaseSIMDVectorShift<1, U, opc, {0,0,1,?,?,?,?},
+ V128, V128, vecshiftL16,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn),
+ (i32 vecshiftL16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V64, vecshiftL32,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn),
+ (i32 vecshiftL32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftL32,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn),
+ (i32 vecshiftL32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v2i64_shift : BaseSIMDVectorShift<1, U, opc, {1,?,?,?,?,?,?},
+ V128, V128, vecshiftL64,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i64 V128:$Rn),
+ (i32 vecshiftL64:$imm)))]> {
+ bits<6> imm;
+ let Inst{21-16} = imm;
+ }
+}
+
+multiclass SIMDVectorRShiftBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_shift : BaseSIMDVectorShift<0, U, opc, {0,0,0,1,?,?,?},
+ V64, V64, vecshiftR8,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$Rd), (OpNode (v8i8 V64:$Rn),
+ (i32 vecshiftR8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v16i8_shift : BaseSIMDVectorShift<1, U, opc, {0,0,0,1,?,?,?},
+ V128, V128, vecshiftR8,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn),
+ (i32 vecshiftR8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v4i16_shift : BaseSIMDVectorShift<0, U, opc, {0,0,1,?,?,?,?},
+ V64, V64, vecshiftR16,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$Rd), (OpNode (v4i16 V64:$Rn),
+ (i32 vecshiftR16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v8i16_shift : BaseSIMDVectorShift<1, U, opc, {0,0,1,?,?,?,?},
+ V128, V128, vecshiftR16,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i16 V128:$Rn),
+ (i32 vecshiftR16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V64, vecshiftR32,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$Rd), (OpNode (v2i32 V64:$Rn),
+ (i32 vecshiftR32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftR32,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i32 V128:$Rn),
+ (i32 vecshiftR32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v2i64_shift : BaseSIMDVectorShift<1, U, opc, {1,?,?,?,?,?,?},
+ V128, V128, vecshiftR64,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i64 V128:$Rn),
+ (i32 vecshiftR64:$imm)))]> {
+ bits<6> imm;
+ let Inst{21-16} = imm;
+ }
+}
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDVectorRShiftBHSDTied<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v8i8_shift : BaseSIMDVectorShiftTied<0, U, opc, {0,0,0,1,?,?,?},
+ V64, V64, vecshiftR8, asm, ".8b", ".8b",
+ [(set (v8i8 V64:$dst),
+ (OpNode (v8i8 V64:$Rd), (v8i8 V64:$Rn),
+ (i32 vecshiftR8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v16i8_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,0,0,1,?,?,?},
+ V128, V128, vecshiftR8, asm, ".16b", ".16b",
+ [(set (v16i8 V128:$dst),
+ (OpNode (v16i8 V128:$Rd), (v16i8 V128:$Rn),
+ (i32 vecshiftR8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v4i16_shift : BaseSIMDVectorShiftTied<0, U, opc, {0,0,1,?,?,?,?},
+ V64, V64, vecshiftR16, asm, ".4h", ".4h",
+ [(set (v4i16 V64:$dst),
+ (OpNode (v4i16 V64:$Rd), (v4i16 V64:$Rn),
+ (i32 vecshiftR16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v8i16_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,0,1,?,?,?,?},
+ V128, V128, vecshiftR16, asm, ".8h", ".8h",
+ [(set (v8i16 V128:$dst),
+ (OpNode (v8i16 V128:$Rd), (v8i16 V128:$Rn),
+ (i32 vecshiftR16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v2i32_shift : BaseSIMDVectorShiftTied<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V64, vecshiftR32, asm, ".2s", ".2s",
+ [(set (v2i32 V64:$dst),
+ (OpNode (v2i32 V64:$Rd), (v2i32 V64:$Rn),
+ (i32 vecshiftR32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftR32, asm, ".4s", ".4s",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn),
+ (i32 vecshiftR32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v2i64_shift : BaseSIMDVectorShiftTied<1, U, opc, {1,?,?,?,?,?,?},
+ V128, V128, vecshiftR64,
+ asm, ".2d", ".2d", [(set (v2i64 V128:$dst),
+ (OpNode (v2i64 V128:$Rd), (v2i64 V128:$Rn),
+ (i32 vecshiftR64:$imm)))]> {
+ bits<6> imm;
+ let Inst{21-16} = imm;
+ }
+}
+
+multiclass SIMDVectorLShiftBHSDTied<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode = null_frag> {
+ def v8i8_shift : BaseSIMDVectorShiftTied<0, U, opc, {0,0,0,1,?,?,?},
+ V64, V64, vecshiftL8,
+ asm, ".8b", ".8b",
+ [(set (v8i8 V64:$dst),
+ (OpNode (v8i8 V64:$Rd), (v8i8 V64:$Rn),
+ (i32 vecshiftL8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v16i8_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,0,0,1,?,?,?},
+ V128, V128, vecshiftL8,
+ asm, ".16b", ".16b",
+ [(set (v16i8 V128:$dst),
+ (OpNode (v16i8 V128:$Rd), (v16i8 V128:$Rn),
+ (i32 vecshiftL8:$imm)))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v4i16_shift : BaseSIMDVectorShiftTied<0, U, opc, {0,0,1,?,?,?,?},
+ V64, V64, vecshiftL16,
+ asm, ".4h", ".4h",
+ [(set (v4i16 V64:$dst),
+ (OpNode (v4i16 V64:$Rd), (v4i16 V64:$Rn),
+ (i32 vecshiftL16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v8i16_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,0,1,?,?,?,?},
+ V128, V128, vecshiftL16,
+ asm, ".8h", ".8h",
+ [(set (v8i16 V128:$dst),
+ (OpNode (v8i16 V128:$Rd), (v8i16 V128:$Rn),
+ (i32 vecshiftL16:$imm)))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v2i32_shift : BaseSIMDVectorShiftTied<0, U, opc, {0,1,?,?,?,?,?},
+ V64, V64, vecshiftL32,
+ asm, ".2s", ".2s",
+ [(set (v2i32 V64:$dst),
+ (OpNode (v2i32 V64:$Rd), (v2i32 V64:$Rn),
+ (i32 vecshiftL32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShiftTied<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftL32,
+ asm, ".4s", ".4s",
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn),
+ (i32 vecshiftL32:$imm)))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v2i64_shift : BaseSIMDVectorShiftTied<1, U, opc, {1,?,?,?,?,?,?},
+ V128, V128, vecshiftL64,
+ asm, ".2d", ".2d",
+ [(set (v2i64 V128:$dst),
+ (OpNode (v2i64 V128:$Rd), (v2i64 V128:$Rn),
+ (i32 vecshiftL64:$imm)))]> {
+ bits<6> imm;
+ let Inst{21-16} = imm;
+ }
+}
+
+multiclass SIMDVectorLShiftLongBHSD<bit U, bits<5> opc, string asm,
+ SDPatternOperator OpNode> {
+ def v8i8_shift : BaseSIMDVectorShift<0, U, opc, {0,0,0,1,?,?,?},
+ V128, V64, vecshiftL8, asm, ".8h", ".8b",
+ [(set (v8i16 V128:$Rd), (OpNode (v8i8 V64:$Rn), vecshiftL8:$imm))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v16i8_shift : BaseSIMDVectorShift<1, U, opc, {0,0,0,1,?,?,?},
+ V128, V128, vecshiftL8,
+ asm#"2", ".8h", ".16b",
+ [(set (v8i16 V128:$Rd),
+ (OpNode (extract_high_v16i8 V128:$Rn), vecshiftL8:$imm))]> {
+ bits<3> imm;
+ let Inst{18-16} = imm;
+ }
+
+ def v4i16_shift : BaseSIMDVectorShift<0, U, opc, {0,0,1,?,?,?,?},
+ V128, V64, vecshiftL16, asm, ".4s", ".4h",
+ [(set (v4i32 V128:$Rd), (OpNode (v4i16 V64:$Rn), vecshiftL16:$imm))]> {
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v8i16_shift : BaseSIMDVectorShift<1, U, opc, {0,0,1,?,?,?,?},
+ V128, V128, vecshiftL16,
+ asm#"2", ".4s", ".8h",
+ [(set (v4i32 V128:$Rd),
+ (OpNode (extract_high_v8i16 V128:$Rn), vecshiftL16:$imm))]> {
+
+ bits<4> imm;
+ let Inst{19-16} = imm;
+ }
+
+ def v2i32_shift : BaseSIMDVectorShift<0, U, opc, {0,1,?,?,?,?,?},
+ V128, V64, vecshiftL32, asm, ".2d", ".2s",
+ [(set (v2i64 V128:$Rd), (OpNode (v2i32 V64:$Rn), vecshiftL32:$imm))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+
+ def v4i32_shift : BaseSIMDVectorShift<1, U, opc, {0,1,?,?,?,?,?},
+ V128, V128, vecshiftL32,
+ asm#"2", ".2d", ".4s",
+ [(set (v2i64 V128:$Rd),
+ (OpNode (extract_high_v4i32 V128:$Rn), vecshiftL32:$imm))]> {
+ bits<5> imm;
+ let Inst{20-16} = imm;
+ }
+}
+
+
+//---
+// Vector load/store
+//---
+// SIMD ldX/stX no-index memory references don't allow the optional
+// ", #0" constant and handle post-indexing explicitly, so we use
+// a more specialized parse method for them. Otherwise, it's the same as
+// the general GPR64sp handling.
+
+class BaseSIMDLdSt<bit Q, bit L, bits<4> opcode, bits<2> size,
+ string asm, dag oops, dag iops, list<dag> pattern>
+ : I<oops, iops, asm, "\t$Vt, [$Rn]", "", pattern> {
+ bits<5> Vt;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
let Inst{29-23} = 0b0011000;
- let Inst{22} = l;
+ let Inst{22} = L;
let Inst{21-16} = 0b000000;
let Inst{15-12} = opcode;
let Inst{11-10} = size;
-
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format AdvSIMD vector load/store multiple N-element structure (post-index)
-class NeonI_LdStMult_Post<bit q, bit l, bits<4> opcode, bits<2> size,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtnm<outs, ins, asmstr, patterns, itin>
-{
- let Inst{31} = 0b0;
- let Inst{30} = q;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Vt;
+}
+
+class BaseSIMDLdStPost<bit Q, bit L, bits<4> opcode, bits<2> size,
+ string asm, dag oops, dag iops>
+ : I<oops, iops, asm, "\t$Vt, [$Rn], $Xm", "$Rn = $wback", []> {
+ bits<5> Vt;
+ bits<5> Rn;
+ bits<5> Xm;
+ let Inst{31} = 0;
+ let Inst{30} = Q;
let Inst{29-23} = 0b0011001;
- let Inst{22} = l;
- let Inst{21} = 0b0;
- // Inherit Rm in 20-16
+ let Inst{22} = L;
+ let Inst{21} = 0;
+ let Inst{20-16} = Xm;
let Inst{15-12} = opcode;
let Inst{11-10} = size;
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format AdvSIMD vector load Single N-element structure to all lanes
-class NeonI_LdOne_Dup<bit q, bit r, bits<3> opcode, bits<2> size, dag outs,
- dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin>
-{
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29-23} = 0b0011010;
- let Inst{22} = 0b1;
- let Inst{21} = r;
- let Inst{20-16} = 0b00000;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Vt;
+}
+
+// The immediate form of AdvSIMD post-indexed addressing is encoded with
+// register post-index addressing from the zero register.
+multiclass SIMDLdStAliases<string asm, string layout, string Count,
+ int Offset, int Size> {
+ // E.g. "ld1 { v0.8b, v1.8b }, [x1], #16"
+ // "ld1\t$Vt, [$Rn], #16"
+ // may get mapped to
+ // (LD1Twov8b_POST VecListTwo8b:$Vt, GPR64sp:$Rn, XZR)
+ def : InstAlias<asm # "\t$Vt, [$Rn], #" # Offset,
+ (!cast<Instruction>(NAME # Count # "v" # layout # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # layout):$Vt,
+ XZR), 1>;
+
+ // E.g. "ld1.8b { v0, v1 }, [x1], #16"
+ // "ld1.8b\t$Vt, [$Rn], #16"
+ // may get mapped to
+ // (LD1Twov8b_POST VecListTwo64:$Vt, GPR64sp:$Rn, XZR)
+ def : InstAlias<asm # "." # layout # "\t$Vt, [$Rn], #" # Offset,
+ (!cast<Instruction>(NAME # Count # "v" # layout # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # Size):$Vt,
+ XZR), 0>;
+
+ // E.g. "ld1.8b { v0, v1 }, [x1]"
+ // "ld1\t$Vt, [$Rn]"
+ // may get mapped to
+ // (LD1Twov8b VecListTwo64:$Vt, GPR64sp:$Rn)
+ def : InstAlias<asm # "." # layout # "\t$Vt, [$Rn]",
+ (!cast<Instruction>(NAME # Count # "v" # layout)
+ !cast<RegisterOperand>("VecList" # Count # Size):$Vt,
+ GPR64sp:$Rn), 0>;
+
+ // E.g. "ld1.8b { v0, v1 }, [x1], x2"
+ // "ld1\t$Vt, [$Rn], $Xm"
+ // may get mapped to
+ // (LD1Twov8b_POST VecListTwo64:$Vt, GPR64sp:$Rn, GPR64pi8:$Xm)
+ def : InstAlias<asm # "." # layout # "\t$Vt, [$Rn], $Xm",
+ (!cast<Instruction>(NAME # Count # "v" # layout # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # Size):$Vt,
+ !cast<RegisterOperand>("GPR64pi" # Offset):$Xm), 0>;
+}
+
+multiclass BaseSIMDLdN<string Count, string asm, string veclist, int Offset128,
+ int Offset64, bits<4> opcode> {
+ let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def v16b: BaseSIMDLdSt<1, 1, opcode, 0b00, asm,
+ (outs !cast<RegisterOperand>(veclist # "16b"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+ def v8h : BaseSIMDLdSt<1, 1, opcode, 0b01, asm,
+ (outs !cast<RegisterOperand>(veclist # "8h"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+ def v4s : BaseSIMDLdSt<1, 1, opcode, 0b10, asm,
+ (outs !cast<RegisterOperand>(veclist # "4s"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+ def v2d : BaseSIMDLdSt<1, 1, opcode, 0b11, asm,
+ (outs !cast<RegisterOperand>(veclist # "2d"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+ def v8b : BaseSIMDLdSt<0, 1, opcode, 0b00, asm,
+ (outs !cast<RegisterOperand>(veclist # "8b"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+ def v4h : BaseSIMDLdSt<0, 1, opcode, 0b01, asm,
+ (outs !cast<RegisterOperand>(veclist # "4h"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+ def v2s : BaseSIMDLdSt<0, 1, opcode, 0b10, asm,
+ (outs !cast<RegisterOperand>(veclist # "2s"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+
+
+ def v16b_POST: BaseSIMDLdStPost<1, 1, opcode, 0b00, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "16b"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v8h_POST : BaseSIMDLdStPost<1, 1, opcode, 0b01, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "8h"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v4s_POST : BaseSIMDLdStPost<1, 1, opcode, 0b10, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "4s"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v2d_POST : BaseSIMDLdStPost<1, 1, opcode, 0b11, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "2d"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v8b_POST : BaseSIMDLdStPost<0, 1, opcode, 0b00, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "8b"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ def v4h_POST : BaseSIMDLdStPost<0, 1, opcode, 0b01, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "4h"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ def v2s_POST : BaseSIMDLdStPost<0, 1, opcode, 0b10, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "2s"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ }
+
+ defm : SIMDLdStAliases<asm, "16b", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "8h", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "4s", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "2d", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "8b", Count, Offset64, 64>;
+ defm : SIMDLdStAliases<asm, "4h", Count, Offset64, 64>;
+ defm : SIMDLdStAliases<asm, "2s", Count, Offset64, 64>;
+}
+
+// Only ld1/st1 has a v1d version.
+multiclass BaseSIMDStN<string Count, string asm, string veclist, int Offset128,
+ int Offset64, bits<4> opcode> {
+ let hasSideEffects = 0, mayStore = 1, mayLoad = 0 in {
+ def v16b : BaseSIMDLdSt<1, 0, opcode, 0b00, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "16b"):$Vt,
+ GPR64sp:$Rn), []>;
+ def v8h : BaseSIMDLdSt<1, 0, opcode, 0b01, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "8h"):$Vt,
+ GPR64sp:$Rn), []>;
+ def v4s : BaseSIMDLdSt<1, 0, opcode, 0b10, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "4s"):$Vt,
+ GPR64sp:$Rn), []>;
+ def v2d : BaseSIMDLdSt<1, 0, opcode, 0b11, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "2d"):$Vt,
+ GPR64sp:$Rn), []>;
+ def v8b : BaseSIMDLdSt<0, 0, opcode, 0b00, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "8b"):$Vt,
+ GPR64sp:$Rn), []>;
+ def v4h : BaseSIMDLdSt<0, 0, opcode, 0b01, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "4h"):$Vt,
+ GPR64sp:$Rn), []>;
+ def v2s : BaseSIMDLdSt<0, 0, opcode, 0b10, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "2s"):$Vt,
+ GPR64sp:$Rn), []>;
+
+ def v16b_POST : BaseSIMDLdStPost<1, 0, opcode, 0b00, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "16b"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v8h_POST : BaseSIMDLdStPost<1, 0, opcode, 0b01, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "8h"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v4s_POST : BaseSIMDLdStPost<1, 0, opcode, 0b10, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "4s"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v2d_POST : BaseSIMDLdStPost<1, 0, opcode, 0b11, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "2d"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset128):$Xm)>;
+ def v8b_POST : BaseSIMDLdStPost<0, 0, opcode, 0b00, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "8b"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ def v4h_POST : BaseSIMDLdStPost<0, 0, opcode, 0b01, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "4h"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ def v2s_POST : BaseSIMDLdStPost<0, 0, opcode, 0b10, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "2s"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ }
+
+ defm : SIMDLdStAliases<asm, "16b", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "8h", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "4s", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "2d", Count, Offset128, 128>;
+ defm : SIMDLdStAliases<asm, "8b", Count, Offset64, 64>;
+ defm : SIMDLdStAliases<asm, "4h", Count, Offset64, 64>;
+ defm : SIMDLdStAliases<asm, "2s", Count, Offset64, 64>;
+}
+
+multiclass BaseSIMDLd1<string Count, string asm, string veclist,
+ int Offset128, int Offset64, bits<4> opcode>
+ : BaseSIMDLdN<Count, asm, veclist, Offset128, Offset64, opcode> {
+
+ // LD1 instructions have extra "1d" variants.
+ let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+ def v1d : BaseSIMDLdSt<0, 1, opcode, 0b11, asm,
+ (outs !cast<RegisterOperand>(veclist # "1d"):$Vt),
+ (ins GPR64sp:$Rn), []>;
+
+ def v1d_POST : BaseSIMDLdStPost<0, 1, opcode, 0b11, asm,
+ (outs GPR64sp:$wback,
+ !cast<RegisterOperand>(veclist # "1d"):$Vt),
+ (ins GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ }
+
+ defm : SIMDLdStAliases<asm, "1d", Count, Offset64, 64>;
+}
+
+multiclass BaseSIMDSt1<string Count, string asm, string veclist,
+ int Offset128, int Offset64, bits<4> opcode>
+ : BaseSIMDStN<Count, asm, veclist, Offset128, Offset64, opcode> {
+
+ // ST1 instructions have extra "1d" variants.
+ let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
+ def v1d : BaseSIMDLdSt<0, 0, opcode, 0b11, asm, (outs),
+ (ins !cast<RegisterOperand>(veclist # "1d"):$Vt,
+ GPR64sp:$Rn), []>;
+
+ def v1d_POST : BaseSIMDLdStPost<0, 0, opcode, 0b11, asm,
+ (outs GPR64sp:$wback),
+ (ins !cast<RegisterOperand>(veclist # "1d"):$Vt,
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("GPR64pi" # Offset64):$Xm)>;
+ }
+
+ defm : SIMDLdStAliases<asm, "1d", Count, Offset64, 64>;
+}
+
+multiclass SIMDLd1Multiple<string asm> {
+ defm One : BaseSIMDLd1<"One", asm, "VecListOne", 16, 8, 0b0111>;
+ defm Two : BaseSIMDLd1<"Two", asm, "VecListTwo", 32, 16, 0b1010>;
+ defm Three : BaseSIMDLd1<"Three", asm, "VecListThree", 48, 24, 0b0110>;
+ defm Four : BaseSIMDLd1<"Four", asm, "VecListFour", 64, 32, 0b0010>;
+}
+
+multiclass SIMDSt1Multiple<string asm> {
+ defm One : BaseSIMDSt1<"One", asm, "VecListOne", 16, 8, 0b0111>;
+ defm Two : BaseSIMDSt1<"Two", asm, "VecListTwo", 32, 16, 0b1010>;
+ defm Three : BaseSIMDSt1<"Three", asm, "VecListThree", 48, 24, 0b0110>;
+ defm Four : BaseSIMDSt1<"Four", asm, "VecListFour", 64, 32, 0b0010>;
+}
+
+multiclass SIMDLd2Multiple<string asm> {
+ defm Two : BaseSIMDLdN<"Two", asm, "VecListTwo", 32, 16, 0b1000>;
+}
+
+multiclass SIMDSt2Multiple<string asm> {
+ defm Two : BaseSIMDStN<"Two", asm, "VecListTwo", 32, 16, 0b1000>;
+}
+
+multiclass SIMDLd3Multiple<string asm> {
+ defm Three : BaseSIMDLdN<"Three", asm, "VecListThree", 48, 24, 0b0100>;
+}
+
+multiclass SIMDSt3Multiple<string asm> {
+ defm Three : BaseSIMDStN<"Three", asm, "VecListThree", 48, 24, 0b0100>;
+}
+
+multiclass SIMDLd4Multiple<string asm> {
+ defm Four : BaseSIMDLdN<"Four", asm, "VecListFour", 64, 32, 0b0000>;
+}
+
+multiclass SIMDSt4Multiple<string asm> {
+ defm Four : BaseSIMDStN<"Four", asm, "VecListFour", 64, 32, 0b0000>;
+}
+
+//---
+// AdvSIMD Load/store single-element
+//---
+
+class BaseSIMDLdStSingle<bit L, bit R, bits<3> opcode,
+ string asm, string operands, string cst,
+ dag oops, dag iops, list<dag> pattern>
+ : I<oops, iops, asm, operands, cst, pattern> {
+ bits<5> Vt;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{29-24} = 0b001101;
+ let Inst{22} = L;
+ let Inst{21} = R;
+ let Inst{15-13} = opcode;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Vt;
+}
+
+class BaseSIMDLdStSingleTied<bit L, bit R, bits<3> opcode,
+ string asm, string operands, string cst,
+ dag oops, dag iops, list<dag> pattern>
+ : I<oops, iops, asm, operands, "$Vt = $dst," # cst, pattern> {
+ bits<5> Vt;
+ bits<5> Rn;
+ let Inst{31} = 0;
+ let Inst{29-24} = 0b001101;
+ let Inst{22} = L;
+ let Inst{21} = R;
let Inst{15-13} = opcode;
- let Inst{12} = 0b0;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Vt;
+}
+
+
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDLdR<bit Q, bit R, bits<3> opcode, bit S, bits<2> size, string asm,
+ Operand listtype>
+ : BaseSIMDLdStSingle<1, R, opcode, asm, "\t$Vt, [$Rn]", "",
+ (outs listtype:$Vt), (ins GPR64sp:$Rn),
+ []> {
+ let Inst{30} = Q;
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = S;
+ let Inst{11-10} = size;
+}
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+class BaseSIMDLdRPost<bit Q, bit R, bits<3> opcode, bit S, bits<2> size,
+ string asm, Operand listtype, Operand GPR64pi>
+ : BaseSIMDLdStSingle<1, R, opcode, asm, "\t$Vt, [$Rn], $Xm",
+ "$Rn = $wback",
+ (outs GPR64sp:$wback, listtype:$Vt),
+ (ins GPR64sp:$Rn, GPR64pi:$Xm), []> {
+ bits<5> Xm;
+ let Inst{30} = Q;
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = S;
let Inst{11-10} = size;
+}
+
+multiclass SIMDLdrAliases<string asm, string layout, string Count,
+ int Offset, int Size> {
+ // E.g. "ld1r { v0.8b }, [x1], #1"
+ // "ld1r.8b\t$Vt, [$Rn], #1"
+ // may get mapped to
+ // (LD1Rv8b_POST VecListOne8b:$Vt, GPR64sp:$Rn, XZR)
+ def : InstAlias<asm # "\t$Vt, [$Rn], #" # Offset,
+ (!cast<Instruction>(NAME # "v" # layout # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # layout):$Vt,
+ XZR), 1>;
+
+ // E.g. "ld1r.8b { v0 }, [x1], #1"
+ // "ld1r.8b\t$Vt, [$Rn], #1"
+ // may get mapped to
+ // (LD1Rv8b_POST VecListOne64:$Vt, GPR64sp:$Rn, XZR)
+ def : InstAlias<asm # "." # layout # "\t$Vt, [$Rn], #" # Offset,
+ (!cast<Instruction>(NAME # "v" # layout # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # Size):$Vt,
+ XZR), 0>;
+
+ // E.g. "ld1r.8b { v0 }, [x1]"
+ // "ld1r.8b\t$Vt, [$Rn]"
+ // may get mapped to
+ // (LD1Rv8b VecListOne64:$Vt, GPR64sp:$Rn)
+ def : InstAlias<asm # "." # layout # "\t$Vt, [$Rn]",
+ (!cast<Instruction>(NAME # "v" # layout)
+ !cast<RegisterOperand>("VecList" # Count # Size):$Vt,
+ GPR64sp:$Rn), 0>;
+
+ // E.g. "ld1r.8b { v0 }, [x1], x2"
+ // "ld1r.8b\t$Vt, [$Rn], $Xm"
+ // may get mapped to
+ // (LD1Rv8b_POST VecListOne64:$Vt, GPR64sp:$Rn, GPR64pi1:$Xm)
+ def : InstAlias<asm # "." # layout # "\t$Vt, [$Rn], $Xm",
+ (!cast<Instruction>(NAME # "v" # layout # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # Size):$Vt,
+ !cast<RegisterOperand>("GPR64pi" # Offset):$Xm), 0>;
+}
+
+multiclass SIMDLdR<bit R, bits<3> opcode, bit S, string asm, string Count,
+ int Offset1, int Offset2, int Offset4, int Offset8> {
+ def v8b : BaseSIMDLdR<0, R, opcode, S, 0b00, asm,
+ !cast<Operand>("VecList" # Count # "8b")>;
+ def v16b: BaseSIMDLdR<1, R, opcode, S, 0b00, asm,
+ !cast<Operand>("VecList" # Count #"16b")>;
+ def v4h : BaseSIMDLdR<0, R, opcode, S, 0b01, asm,
+ !cast<Operand>("VecList" # Count #"4h")>;
+ def v8h : BaseSIMDLdR<1, R, opcode, S, 0b01, asm,
+ !cast<Operand>("VecList" # Count #"8h")>;
+ def v2s : BaseSIMDLdR<0, R, opcode, S, 0b10, asm,
+ !cast<Operand>("VecList" # Count #"2s")>;
+ def v4s : BaseSIMDLdR<1, R, opcode, S, 0b10, asm,
+ !cast<Operand>("VecList" # Count #"4s")>;
+ def v1d : BaseSIMDLdR<0, R, opcode, S, 0b11, asm,
+ !cast<Operand>("VecList" # Count #"1d")>;
+ def v2d : BaseSIMDLdR<1, R, opcode, S, 0b11, asm,
+ !cast<Operand>("VecList" # Count #"2d")>;
+
+ def v8b_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b00, asm,
+ !cast<Operand>("VecList" # Count # "8b"),
+ !cast<Operand>("GPR64pi" # Offset1)>;
+ def v16b_POST: BaseSIMDLdRPost<1, R, opcode, S, 0b00, asm,
+ !cast<Operand>("VecList" # Count # "16b"),
+ !cast<Operand>("GPR64pi" # Offset1)>;
+ def v4h_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b01, asm,
+ !cast<Operand>("VecList" # Count # "4h"),
+ !cast<Operand>("GPR64pi" # Offset2)>;
+ def v8h_POST : BaseSIMDLdRPost<1, R, opcode, S, 0b01, asm,
+ !cast<Operand>("VecList" # Count # "8h"),
+ !cast<Operand>("GPR64pi" # Offset2)>;
+ def v2s_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b10, asm,
+ !cast<Operand>("VecList" # Count # "2s"),
+ !cast<Operand>("GPR64pi" # Offset4)>;
+ def v4s_POST : BaseSIMDLdRPost<1, R, opcode, S, 0b10, asm,
+ !cast<Operand>("VecList" # Count # "4s"),
+ !cast<Operand>("GPR64pi" # Offset4)>;
+ def v1d_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b11, asm,
+ !cast<Operand>("VecList" # Count # "1d"),
+ !cast<Operand>("GPR64pi" # Offset8)>;
+ def v2d_POST : BaseSIMDLdRPost<1, R, opcode, S, 0b11, asm,
+ !cast<Operand>("VecList" # Count # "2d"),
+ !cast<Operand>("GPR64pi" # Offset8)>;
+
+ defm : SIMDLdrAliases<asm, "8b", Count, Offset1, 64>;
+ defm : SIMDLdrAliases<asm, "16b", Count, Offset1, 128>;
+ defm : SIMDLdrAliases<asm, "4h", Count, Offset2, 64>;
+ defm : SIMDLdrAliases<asm, "8h", Count, Offset2, 128>;
+ defm : SIMDLdrAliases<asm, "2s", Count, Offset4, 64>;
+ defm : SIMDLdrAliases<asm, "4s", Count, Offset4, 128>;
+ defm : SIMDLdrAliases<asm, "1d", Count, Offset8, 64>;
+ defm : SIMDLdrAliases<asm, "2d", Count, Offset8, 128>;
+}
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format AdvSIMD vector load/store Single N-element structure to/from one lane
-class NeonI_LdStOne_Lane<bit l, bit r, bits<2> op2_1, bit op0, dag outs,
- dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin>
-{
- bits<4> lane;
- let Inst{31} = 0b0;
- let Inst{29-23} = 0b0011010;
- let Inst{22} = l;
- let Inst{21} = r;
+class SIMDLdStSingleB<bit L, bit R, bits<3> opcode, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "", oops, iops,
+ pattern> {
+ // idx encoded in Q:S:size fields.
+ bits<4> idx;
+ let Inst{30} = idx{3};
+ let Inst{23} = 0;
let Inst{20-16} = 0b00000;
- let Inst{15-14} = op2_1;
- let Inst{13} = op0;
-
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format AdvSIMD post-index vector load Single N-element structure to all lanes
-class NeonI_LdOne_Dup_Post<bit q, bit r, bits<3> opcode, bits<2> size, dag outs,
- dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : A64InstRtnm<outs, ins, asmstr, patterns, itin>
-{
- let Inst{31} = 0b0;
- let Inst{30} = q;
- let Inst{29-23} = 0b0011011;
- let Inst{22} = 0b1;
- let Inst{21} = r;
- // Inherit Rm in 20-16
- let Inst{15-13} = opcode;
- let Inst{12} = 0b0;
+ let Inst{12} = idx{2};
+ let Inst{11-10} = idx{1-0};
+}
+class SIMDLdStSingleBTied<bit L, bit R, bits<3> opcode, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "",
+ oops, iops, pattern> {
+ // idx encoded in Q:S:size fields.
+ bits<4> idx;
+ let Inst{30} = idx{3};
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = idx{2};
+ let Inst{11-10} = idx{1-0};
+}
+class SIMDLdStSingleBPost<bit L, bit R, bits<3> opcode, string asm,
+ dag oops, dag iops>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q:S:size fields.
+ bits<4> idx;
+ bits<5> Xm;
+ let Inst{30} = idx{3};
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = idx{2};
+ let Inst{11-10} = idx{1-0};
+}
+class SIMDLdStSingleBTiedPost<bit L, bit R, bits<3> opcode, string asm,
+ dag oops, dag iops>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q:S:size fields.
+ bits<4> idx;
+ bits<5> Xm;
+ let Inst{30} = idx{3};
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = idx{2};
+ let Inst{11-10} = idx{1-0};
+}
+
+class SIMDLdStSingleH<bit L, bit R, bits<3> opcode, bit size, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "", oops, iops,
+ pattern> {
+ // idx encoded in Q:S:size<1> fields.
+ bits<3> idx;
+ let Inst{30} = idx{2};
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = idx{1};
+ let Inst{11} = idx{0};
+ let Inst{10} = size;
+}
+class SIMDLdStSingleHTied<bit L, bit R, bits<3> opcode, bit size, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "",
+ oops, iops, pattern> {
+ // idx encoded in Q:S:size<1> fields.
+ bits<3> idx;
+ let Inst{30} = idx{2};
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = idx{1};
+ let Inst{11} = idx{0};
+ let Inst{10} = size;
+}
+
+class SIMDLdStSingleHPost<bit L, bit R, bits<3> opcode, bit size, string asm,
+ dag oops, dag iops>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q:S:size<1> fields.
+ bits<3> idx;
+ bits<5> Xm;
+ let Inst{30} = idx{2};
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = idx{1};
+ let Inst{11} = idx{0};
+ let Inst{10} = size;
+}
+class SIMDLdStSingleHTiedPost<bit L, bit R, bits<3> opcode, bit size, string asm,
+ dag oops, dag iops>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q:S:size<1> fields.
+ bits<3> idx;
+ bits<5> Xm;
+ let Inst{30} = idx{2};
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = idx{1};
+ let Inst{11} = idx{0};
+ let Inst{10} = size;
+}
+class SIMDLdStSingleS<bit L, bit R, bits<3> opcode, bits<2> size, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "", oops, iops,
+ pattern> {
+ // idx encoded in Q:S fields.
+ bits<2> idx;
+ let Inst{30} = idx{1};
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = idx{0};
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleSTied<bit L, bit R, bits<3> opcode, bits<2> size, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "",
+ oops, iops, pattern> {
+ // idx encoded in Q:S fields.
+ bits<2> idx;
+ let Inst{30} = idx{1};
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = idx{0};
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleSPost<bit L, bit R, bits<3> opcode, bits<2> size,
+ string asm, dag oops, dag iops>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q:S fields.
+ bits<2> idx;
+ bits<5> Xm;
+ let Inst{30} = idx{1};
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = idx{0};
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleSTiedPost<bit L, bit R, bits<3> opcode, bits<2> size,
+ string asm, dag oops, dag iops>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q:S fields.
+ bits<2> idx;
+ bits<5> Xm;
+ let Inst{30} = idx{1};
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = idx{0};
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleD<bit L, bit R, bits<3> opcode, bits<2> size, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "", oops, iops,
+ pattern> {
+ // idx encoded in Q field.
+ bits<1> idx;
+ let Inst{30} = idx;
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = 0;
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleDTied<bit L, bit R, bits<3> opcode, bits<2> size, string asm,
+ dag oops, dag iops, list<dag> pattern>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn]", "",
+ oops, iops, pattern> {
+ // idx encoded in Q field.
+ bits<1> idx;
+ let Inst{30} = idx;
+ let Inst{23} = 0;
+ let Inst{20-16} = 0b00000;
+ let Inst{12} = 0;
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleDPost<bit L, bit R, bits<3> opcode, bits<2> size,
+ string asm, dag oops, dag iops>
+ : BaseSIMDLdStSingle<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q field.
+ bits<1> idx;
+ bits<5> Xm;
+ let Inst{30} = idx;
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = 0;
+ let Inst{11-10} = size;
+}
+class SIMDLdStSingleDTiedPost<bit L, bit R, bits<3> opcode, bits<2> size,
+ string asm, dag oops, dag iops>
+ : BaseSIMDLdStSingleTied<L, R, opcode, asm, "\t$Vt$idx, [$Rn], $Xm",
+ "$Rn = $wback", oops, iops, []> {
+ // idx encoded in Q field.
+ bits<1> idx;
+ bits<5> Xm;
+ let Inst{30} = idx;
+ let Inst{23} = 1;
+ let Inst{20-16} = Xm;
+ let Inst{12} = 0;
let Inst{11-10} = size;
+}
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format AdvSIMD post-index vector load/store Single N-element structure
-// to/from one lane
-class NeonI_LdStOne_Lane_Post<bit l, bit r, bits<2> op2_1, bit op0, dag outs,
- dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRtnm<outs, ins, asmstr, patterns, itin>
-{
- bits<4> lane;
- let Inst{31} = 0b0;
- let Inst{29-23} = 0b0011011;
- let Inst{22} = l;
- let Inst{21} = r;
- // Inherit Rm in 20-16
- let Inst{15-14} = op2_1;
- let Inst{13} = op0;
-
- // Inherit Rn in 9-5
- // Inherit Rt in 4-0
-}
-
-// Format AdvSIMD 3 scalar registers with different type
-
-class NeonI_Scalar3Diff<bit u, bits<2> size, bits<4> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31-30} = 0b01;
- let Inst{29} = u;
- let Inst{28-24} = 0b11110;
- let Inst{23-22} = size;
- let Inst{21} = 0b1;
- // Inherit Rm in 20-16
- let Inst{15-12} = opcode;
- let Inst{11-10} = 0b00;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDLdSingleBTied<bit R, bits<3> opcode, string asm,
+ RegisterOperand listtype,
+ RegisterOperand GPR64pi> {
+ def i8 : SIMDLdStSingleBTied<1, R, opcode, asm,
+ (outs listtype:$dst),
+ (ins listtype:$Vt, VectorIndexB:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i8_POST : SIMDLdStSingleBTiedPost<1, R, opcode, asm,
+ (outs GPR64sp:$wback, listtype:$dst),
+ (ins listtype:$Vt, VectorIndexB:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDLdSingleHTied<bit R, bits<3> opcode, bit size, string asm,
+ RegisterOperand listtype,
+ RegisterOperand GPR64pi> {
+ def i16 : SIMDLdStSingleHTied<1, R, opcode, size, asm,
+ (outs listtype:$dst),
+ (ins listtype:$Vt, VectorIndexH:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i16_POST : SIMDLdStSingleHTiedPost<1, R, opcode, size, asm,
+ (outs GPR64sp:$wback, listtype:$dst),
+ (ins listtype:$Vt, VectorIndexH:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDLdSingleSTied<bit R, bits<3> opcode, bits<2> size,string asm,
+ RegisterOperand listtype,
+ RegisterOperand GPR64pi> {
+ def i32 : SIMDLdStSingleSTied<1, R, opcode, size, asm,
+ (outs listtype:$dst),
+ (ins listtype:$Vt, VectorIndexS:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i32_POST : SIMDLdStSingleSTiedPost<1, R, opcode, size, asm,
+ (outs GPR64sp:$wback, listtype:$dst),
+ (ins listtype:$Vt, VectorIndexS:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in
+multiclass SIMDLdSingleDTied<bit R, bits<3> opcode, bits<2> size, string asm,
+ RegisterOperand listtype, RegisterOperand GPR64pi> {
+ def i64 : SIMDLdStSingleDTied<1, R, opcode, size, asm,
+ (outs listtype:$dst),
+ (ins listtype:$Vt, VectorIndexD:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i64_POST : SIMDLdStSingleDTiedPost<1, R, opcode, size, asm,
+ (outs GPR64sp:$wback, listtype:$dst),
+ (ins listtype:$Vt, VectorIndexD:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+multiclass SIMDStSingleB<bit R, bits<3> opcode, string asm,
+ RegisterOperand listtype, RegisterOperand GPR64pi> {
+ def i8 : SIMDLdStSingleB<0, R, opcode, asm,
+ (outs), (ins listtype:$Vt, VectorIndexB:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i8_POST : SIMDLdStSingleBPost<0, R, opcode, asm,
+ (outs GPR64sp:$wback),
+ (ins listtype:$Vt, VectorIndexB:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+multiclass SIMDStSingleH<bit R, bits<3> opcode, bit size, string asm,
+ RegisterOperand listtype, RegisterOperand GPR64pi> {
+ def i16 : SIMDLdStSingleH<0, R, opcode, size, asm,
+ (outs), (ins listtype:$Vt, VectorIndexH:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i16_POST : SIMDLdStSingleHPost<0, R, opcode, size, asm,
+ (outs GPR64sp:$wback),
+ (ins listtype:$Vt, VectorIndexH:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+multiclass SIMDStSingleS<bit R, bits<3> opcode, bits<2> size,string asm,
+ RegisterOperand listtype, RegisterOperand GPR64pi> {
+ def i32 : SIMDLdStSingleS<0, R, opcode, size, asm,
+ (outs), (ins listtype:$Vt, VectorIndexS:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i32_POST : SIMDLdStSingleSPost<0, R, opcode, size, asm,
+ (outs GPR64sp:$wback),
+ (ins listtype:$Vt, VectorIndexS:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
+}
+let mayLoad = 0, mayStore = 1, hasSideEffects = 0 in
+multiclass SIMDStSingleD<bit R, bits<3> opcode, bits<2> size, string asm,
+ RegisterOperand listtype, RegisterOperand GPR64pi> {
+ def i64 : SIMDLdStSingleD<0, R, opcode, size, asm,
+ (outs), (ins listtype:$Vt, VectorIndexD:$idx,
+ GPR64sp:$Rn), []>;
+
+ def i64_POST : SIMDLdStSingleDPost<0, R, opcode, size, asm,
+ (outs GPR64sp:$wback),
+ (ins listtype:$Vt, VectorIndexD:$idx,
+ GPR64sp:$Rn, GPR64pi:$Xm)>;
}
-// Format AdvSIMD scalar shift by immediate
+multiclass SIMDLdStSingleAliases<string asm, string layout, string Type,
+ string Count, int Offset, Operand idxtype> {
+ // E.g. "ld1 { v0.8b }[0], [x1], #1"
+ // "ld1\t$Vt, [$Rn], #1"
+ // may get mapped to
+ // (LD1Rv8b_POST VecListOne8b:$Vt, GPR64sp:$Rn, XZR)
+ def : InstAlias<asm # "\t$Vt$idx, [$Rn], #" # Offset,
+ (!cast<Instruction>(NAME # Type # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # layout):$Vt,
+ idxtype:$idx, XZR), 1>;
+
+ // E.g. "ld1.8b { v0 }[0], [x1], #1"
+ // "ld1.8b\t$Vt, [$Rn], #1"
+ // may get mapped to
+ // (LD1Rv8b_POST VecListOne64:$Vt, GPR64sp:$Rn, XZR)
+ def : InstAlias<asm # "." # layout # "\t$Vt$idx, [$Rn], #" # Offset,
+ (!cast<Instruction>(NAME # Type # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # "128"):$Vt,
+ idxtype:$idx, XZR), 0>;
+
+ // E.g. "ld1.8b { v0 }[0], [x1]"
+ // "ld1.8b\t$Vt, [$Rn]"
+ // may get mapped to
+ // (LD1Rv8b VecListOne64:$Vt, GPR64sp:$Rn)
+ def : InstAlias<asm # "." # layout # "\t$Vt$idx, [$Rn]",
+ (!cast<Instruction>(NAME # Type)
+ !cast<RegisterOperand>("VecList" # Count # "128"):$Vt,
+ idxtype:$idx, GPR64sp:$Rn), 0>;
+
+ // E.g. "ld1.8b { v0 }[0], [x1], x2"
+ // "ld1.8b\t$Vt, [$Rn], $Xm"
+ // may get mapped to
+ // (LD1Rv8b_POST VecListOne64:$Vt, GPR64sp:$Rn, GPR64pi1:$Xm)
+ def : InstAlias<asm # "." # layout # "\t$Vt$idx, [$Rn], $Xm",
+ (!cast<Instruction>(NAME # Type # "_POST")
+ GPR64sp:$Rn,
+ !cast<RegisterOperand>("VecList" # Count # "128"):$Vt,
+ idxtype:$idx,
+ !cast<RegisterOperand>("GPR64pi" # Offset):$Xm), 0>;
+}
-class NeonI_ScalarShiftImm<bit u, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- bits<4> Imm4;
- bits<3> Imm3;
- let Inst{31-30} = 0b01;
- let Inst{29} = u;
- let Inst{28-23} = 0b111110;
- let Inst{22-19} = Imm4;
- let Inst{18-16} = Imm3;
- let Inst{15-11} = opcode;
- let Inst{10} = 0b1;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+multiclass SIMDLdSt1SingleAliases<string asm> {
+ defm : SIMDLdStSingleAliases<asm, "b", "i8", "One", 1, VectorIndexB>;
+ defm : SIMDLdStSingleAliases<asm, "h", "i16", "One", 2, VectorIndexH>;
+ defm : SIMDLdStSingleAliases<asm, "s", "i32", "One", 4, VectorIndexS>;
+ defm : SIMDLdStSingleAliases<asm, "d", "i64", "One", 8, VectorIndexD>;
}
-// Format AdvSIMD crypto AES
-class NeonI_Crypto_AES<bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31-24} = 0b01001110;
- let Inst{23-22} = size;
- let Inst{21-17} = 0b10100;
- let Inst{16-12} = opcode;
- let Inst{11-10} = 0b10;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+multiclass SIMDLdSt2SingleAliases<string asm> {
+ defm : SIMDLdStSingleAliases<asm, "b", "i8", "Two", 2, VectorIndexB>;
+ defm : SIMDLdStSingleAliases<asm, "h", "i16", "Two", 4, VectorIndexH>;
+ defm : SIMDLdStSingleAliases<asm, "s", "i32", "Two", 8, VectorIndexS>;
+ defm : SIMDLdStSingleAliases<asm, "d", "i64", "Two", 16, VectorIndexD>;
}
-// Format AdvSIMD crypto SHA
-class NeonI_Crypto_SHA<bits<2> size, bits<5> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin> {
- let Inst{31-24} = 0b01011110;
- let Inst{23-22} = size;
- let Inst{21-17} = 0b10100;
- let Inst{16-12} = opcode;
+multiclass SIMDLdSt3SingleAliases<string asm> {
+ defm : SIMDLdStSingleAliases<asm, "b", "i8", "Three", 3, VectorIndexB>;
+ defm : SIMDLdStSingleAliases<asm, "h", "i16", "Three", 6, VectorIndexH>;
+ defm : SIMDLdStSingleAliases<asm, "s", "i32", "Three", 12, VectorIndexS>;
+ defm : SIMDLdStSingleAliases<asm, "d", "i64", "Three", 24, VectorIndexD>;
+}
+
+multiclass SIMDLdSt4SingleAliases<string asm> {
+ defm : SIMDLdStSingleAliases<asm, "b", "i8", "Four", 4, VectorIndexB>;
+ defm : SIMDLdStSingleAliases<asm, "h", "i16", "Four", 8, VectorIndexH>;
+ defm : SIMDLdStSingleAliases<asm, "s", "i32", "Four", 16, VectorIndexS>;
+ defm : SIMDLdStSingleAliases<asm, "d", "i64", "Four", 32, VectorIndexD>;
+}
+} // end of 'let Predicates = [HasNEON]'
+
+//----------------------------------------------------------------------------
+// Crypto extensions
+//----------------------------------------------------------------------------
+
+let Predicates = [HasCrypto] in {
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class AESBase<bits<4> opc, string asm, dag outs, dag ins, string cstr,
+ list<dag> pat>
+ : I<outs, ins, asm, "{\t$Rd.16b, $Rn.16b|.16b\t$Rd, $Rn}", cstr, pat>,
+ Sched<[WriteV]>{
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-16} = 0b0100111000101000;
+ let Inst{15-12} = opc;
let Inst{11-10} = 0b10;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
-// Format AdvSIMD crypto 3V SHA
-class NeonI_Crypto_3VSHA<bits<2> size, bits<3> opcode,
- dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
- let Inst{31-24} = 0b01011110;
- let Inst{23-22} = size;
- let Inst{21} = 0b0;
- // Inherit Rm in 20-16
- let Inst{15} = 0b0;
- let Inst{14-12} = opcode;
+class AESInst<bits<4> opc, string asm, Intrinsic OpNode>
+ : AESBase<opc, asm, (outs V128:$Rd), (ins V128:$Rn), "",
+ [(set (v16i8 V128:$Rd), (OpNode (v16i8 V128:$Rn)))]>;
+
+class AESTiedInst<bits<4> opc, string asm, Intrinsic OpNode>
+ : AESBase<opc, asm, (outs V128:$dst), (ins V128:$Rd, V128:$Rn),
+ "$Rd = $dst",
+ [(set (v16i8 V128:$dst),
+ (OpNode (v16i8 V128:$Rd), (v16i8 V128:$Rn)))]>;
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class SHA3OpTiedInst<bits<3> opc, string asm, string dst_lhs_kind,
+ dag oops, dag iops, list<dag> pat>
+ : I<oops, iops, asm,
+ "{\t$Rd" # dst_lhs_kind # ", $Rn" # dst_lhs_kind # ", $Rm.4s" #
+ "|.4s\t$Rd, $Rn, $Rm}", "$Rd = $dst", pat>,
+ Sched<[WriteV]>{
+ bits<5> Rd;
+ bits<5> Rn;
+ bits<5> Rm;
+ let Inst{31-21} = 0b01011110000;
+ let Inst{20-16} = Rm;
+ let Inst{15} = 0;
+ let Inst{14-12} = opc;
let Inst{11-10} = 0b00;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-
-// Format AdvSIMD scalar x indexed element
-class NeonI_ScalarXIndexedElem<bit u, bit szhi, bit szlo,
- bits<4> opcode, dag outs, dag ins,
- string asmstr, list<dag> patterns,
- InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
- let Inst{31} = 0b0;
- let Inst{30} = 0b1;
- let Inst{29} = u;
- let Inst{28-24} = 0b11111;
- let Inst{23} = szhi;
- let Inst{22} = szlo;
- // l in Inst{21}
- // m in Instr{20}
- // Inherit Rm in 19-16
- let Inst{15-12} = opcode;
- // h in Inst{11}
- let Inst{10} = 0b0;
- // Inherit Rn in 9-5
- // Inherit Rd in 4-0
-}
-// Format AdvSIMD scalar copy - insert from element to scalar
-class NeonI_ScalarCopy<dag outs, dag ins, string asmstr,
- list<dag> patterns, InstrItinClass itin>
- : NeonI_copy<0b1, 0b0, 0b0000, outs, ins, asmstr, patterns, itin> {
- let Inst{28} = 0b1;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
+
+class SHATiedInstQSV<bits<3> opc, string asm, Intrinsic OpNode>
+ : SHA3OpTiedInst<opc, asm, "", (outs FPR128:$dst),
+ (ins FPR128:$Rd, FPR32:$Rn, V128:$Rm),
+ [(set (v4i32 FPR128:$dst),
+ (OpNode (v4i32 FPR128:$Rd), (i32 FPR32:$Rn),
+ (v4i32 V128:$Rm)))]>;
+
+class SHATiedInstVVV<bits<3> opc, string asm, Intrinsic OpNode>
+ : SHA3OpTiedInst<opc, asm, ".4s", (outs V128:$dst),
+ (ins V128:$Rd, V128:$Rn, V128:$Rm),
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn),
+ (v4i32 V128:$Rm)))]>;
+
+class SHATiedInstQQV<bits<3> opc, string asm, Intrinsic OpNode>
+ : SHA3OpTiedInst<opc, asm, "", (outs FPR128:$dst),
+ (ins FPR128:$Rd, FPR128:$Rn, V128:$Rm),
+ [(set (v4i32 FPR128:$dst),
+ (OpNode (v4i32 FPR128:$Rd), (v4i32 FPR128:$Rn),
+ (v4i32 V128:$Rm)))]>;
+
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
+class SHA2OpInst<bits<4> opc, string asm, string kind,
+ string cstr, dag oops, dag iops,
+ list<dag> pat>
+ : I<oops, iops, asm, "{\t$Rd" # kind # ", $Rn" # kind #
+ "|" # kind # "\t$Rd, $Rn}", cstr, pat>,
+ Sched<[WriteV]>{
+ bits<5> Rd;
+ bits<5> Rn;
+ let Inst{31-16} = 0b0101111000101000;
+ let Inst{15-12} = opc;
+ let Inst{11-10} = 0b10;
+ let Inst{9-5} = Rn;
+ let Inst{4-0} = Rd;
}
+class SHATiedInstVV<bits<4> opc, string asm, Intrinsic OpNode>
+ : SHA2OpInst<opc, asm, ".4s", "$Rd = $dst", (outs V128:$dst),
+ (ins V128:$Rd, V128:$Rn),
+ [(set (v4i32 V128:$dst),
+ (OpNode (v4i32 V128:$Rd), (v4i32 V128:$Rn)))]>;
+
+class SHAInstSS<bits<4> opc, string asm, Intrinsic OpNode>
+ : SHA2OpInst<opc, asm, "", "", (outs FPR32:$Rd), (ins FPR32:$Rn),
+ [(set (i32 FPR32:$Rd), (OpNode (i32 FPR32:$Rn)))]>;
+} // end of 'let Predicates = [HasCrypto]'
+
+// Allow the size specifier tokens to be upper case, not just lower.
+def : TokenAlias<".8B", ".8b">;
+def : TokenAlias<".4H", ".4h">;
+def : TokenAlias<".2S", ".2s">;
+def : TokenAlias<".1D", ".1d">;
+def : TokenAlias<".16B", ".16b">;
+def : TokenAlias<".8H", ".8h">;
+def : TokenAlias<".4S", ".4s">;
+def : TokenAlias<".2D", ".2d">;
+def : TokenAlias<".1Q", ".1q">;
+def : TokenAlias<".B", ".b">;
+def : TokenAlias<".H", ".h">;
+def : TokenAlias<".S", ".s">;
+def : TokenAlias<".D", ".d">;
+def : TokenAlias<".Q", ".q">;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 180110a..ce85b2c 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -11,188 +11,89 @@
//
//===----------------------------------------------------------------------===//
-#include "AArch64.h"
#include "AArch64InstrInfo.h"
-#include "AArch64MachineFunctionInfo.h"
-#include "AArch64TargetMachine.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineDominators.h"
+#include "AArch64Subtarget.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Function.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#include <algorithm>
+using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "AArch64GenInstrInfo.inc"
-using namespace llvm;
-
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
- : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
- Subtarget(STI) {}
+ : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
+ RI(this, &STI), Subtarget(STI) {}
-void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- unsigned Opc = 0;
- unsigned ZeroReg = 0;
- if (DestReg == AArch64::XSP || SrcReg == AArch64::XSP) {
- // E.g. ADD xDst, xsp, #0 (, lsl #0)
- BuildMI(MBB, I, DL, get(AArch64::ADDxxi_lsl0_s), DestReg)
- .addReg(SrcReg)
- .addImm(0);
- return;
- } else if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
- // E.g. ADD wDST, wsp, #0 (, lsl #0)
- BuildMI(MBB, I, DL, get(AArch64::ADDwwi_lsl0_s), DestReg)
- .addReg(SrcReg)
- .addImm(0);
- return;
- } else if (DestReg == AArch64::NZCV) {
- assert(AArch64::GPR64RegClass.contains(SrcReg));
- // E.g. MSR NZCV, xDST
- BuildMI(MBB, I, DL, get(AArch64::MSRix))
- .addImm(A64SysReg::NZCV)
- .addReg(SrcReg);
- } else if (SrcReg == AArch64::NZCV) {
- assert(AArch64::GPR64RegClass.contains(DestReg));
- // E.g. MRS xDST, NZCV
- BuildMI(MBB, I, DL, get(AArch64::MRSxi), DestReg)
- .addImm(A64SysReg::NZCV);
- } else if (AArch64::GPR64RegClass.contains(DestReg)) {
- if(AArch64::GPR64RegClass.contains(SrcReg)){
- Opc = AArch64::ORRxxx_lsl;
- ZeroReg = AArch64::XZR;
- } else{
- assert(AArch64::FPR64RegClass.contains(SrcReg));
- BuildMI(MBB, I, DL, get(AArch64::FMOVxd), DestReg)
- .addReg(SrcReg);
- return;
- }
- } else if (AArch64::GPR32RegClass.contains(DestReg)) {
- if(AArch64::GPR32RegClass.contains(SrcReg)){
- Opc = AArch64::ORRwww_lsl;
- ZeroReg = AArch64::WZR;
- } else{
- assert(AArch64::FPR32RegClass.contains(SrcReg));
- BuildMI(MBB, I, DL, get(AArch64::FMOVws), DestReg)
- .addReg(SrcReg);
- return;
- }
- } else if (AArch64::FPR32RegClass.contains(DestReg)) {
- if(AArch64::FPR32RegClass.contains(SrcReg)){
- BuildMI(MBB, I, DL, get(AArch64::FMOVss), DestReg)
- .addReg(SrcReg);
- return;
- }
- else {
- assert(AArch64::GPR32RegClass.contains(SrcReg));
- BuildMI(MBB, I, DL, get(AArch64::FMOVsw), DestReg)
- .addReg(SrcReg);
- return;
- }
- } else if (AArch64::FPR64RegClass.contains(DestReg)) {
- if(AArch64::FPR64RegClass.contains(SrcReg)){
- BuildMI(MBB, I, DL, get(AArch64::FMOVdd), DestReg)
- .addReg(SrcReg);
- return;
- }
- else {
- assert(AArch64::GPR64RegClass.contains(SrcReg));
- BuildMI(MBB, I, DL, get(AArch64::FMOVdx), DestReg)
- .addReg(SrcReg);
- return;
- }
- } else if (AArch64::FPR128RegClass.contains(DestReg)) {
- assert(AArch64::FPR128RegClass.contains(SrcReg));
+/// GetInstSize - Return the number of bytes of code the specified
+/// instruction may be. This returns the maximum number of bytes.
+unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
+ const MachineBasicBlock &MBB = *MI->getParent();
+ const MachineFunction *MF = MBB.getParent();
+ const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
- // If NEON is enable, we use ORR to implement this copy.
- // If NEON isn't available, emit STR and LDR to handle this.
- if(getSubTarget().hasNEON()) {
- BuildMI(MBB, I, DL, get(AArch64::ORRvvv_16B), DestReg)
- .addReg(SrcReg)
- .addReg(SrcReg);
- return;
- } else {
- BuildMI(MBB, I, DL, get(AArch64::LSFP128_PreInd_STR), AArch64::XSP)
- .addReg(SrcReg)
- .addReg(AArch64::XSP)
- .addImm(0x1ff & -16);
+ if (MI->getOpcode() == AArch64::INLINEASM)
+ return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
- BuildMI(MBB, I, DL, get(AArch64::LSFP128_PostInd_LDR), DestReg)
- .addReg(AArch64::XSP, RegState::Define)
- .addReg(AArch64::XSP)
- .addImm(16);
- return;
- }
- } else {
- llvm_unreachable("Unknown register class in copyPhysReg");
+ const MCInstrDesc &Desc = MI->getDesc();
+ switch (Desc.getOpcode()) {
+ default:
+ // Anything not explicitly designated otherwise is a nomal 4-byte insn.
+ return 4;
+ case TargetOpcode::DBG_VALUE:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ return 0;
}
- // E.g. ORR xDst, xzr, xSrc, lsl #0
- BuildMI(MBB, I, DL, get(Opc), DestReg)
- .addReg(ZeroReg)
- .addReg(SrcReg)
- .addImm(0);
-}
-
-/// Does the Opcode represent a conditional branch that we can remove and re-add
-/// at the end of a basic block?
-static bool isCondBranch(unsigned Opc) {
- return Opc == AArch64::Bcc || Opc == AArch64::CBZw || Opc == AArch64::CBZx ||
- Opc == AArch64::CBNZw || Opc == AArch64::CBNZx ||
- Opc == AArch64::TBZwii || Opc == AArch64::TBZxii ||
- Opc == AArch64::TBNZwii || Opc == AArch64::TBNZxii;
+ llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
}
-/// Takes apart a given conditional branch MachineInstr (see isCondBranch),
-/// setting TBB to the destination basic block and populating the Cond vector
-/// with data necessary to recreate the conditional branch at a later
-/// date. First element will be the opcode, and subsequent ones define the
-/// conditions being branched on in an instruction-specific manner.
-static void classifyCondBranch(MachineInstr *I, MachineBasicBlock *&TBB,
- SmallVectorImpl<MachineOperand> &Cond) {
- switch(I->getOpcode()) {
- case AArch64::Bcc:
- case AArch64::CBZw:
- case AArch64::CBZx:
- case AArch64::CBNZw:
- case AArch64::CBNZx:
- // These instructions just have one predicate operand in position 0 (either
- // a condition code or a register being compared).
- Cond.push_back(MachineOperand::CreateImm(I->getOpcode()));
- Cond.push_back(I->getOperand(0));
- TBB = I->getOperand(1).getMBB();
- return;
- case AArch64::TBZwii:
- case AArch64::TBZxii:
- case AArch64::TBNZwii:
- case AArch64::TBNZxii:
- // These have two predicate operands: a register and a bit position.
- Cond.push_back(MachineOperand::CreateImm(I->getOpcode()));
- Cond.push_back(I->getOperand(0));
- Cond.push_back(I->getOperand(1));
- TBB = I->getOperand(2).getMBB();
- return;
+static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
+ SmallVectorImpl<MachineOperand> &Cond) {
+ // Block ends with fall-through condbranch.
+ switch (LastInst->getOpcode()) {
default:
- llvm_unreachable("Unknown conditional branch to classify");
+ llvm_unreachable("Unknown branch instruction?");
+ case AArch64::Bcc:
+ Target = LastInst->getOperand(1).getMBB();
+ Cond.push_back(LastInst->getOperand(0));
+ break;
+ case AArch64::CBZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZW:
+ case AArch64::CBNZX:
+ Target = LastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(-1));
+ Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
+ Cond.push_back(LastInst->getOperand(0));
+ break;
+ case AArch64::TBZW:
+ case AArch64::TBZX:
+ case AArch64::TBNZW:
+ case AArch64::TBNZX:
+ Target = LastInst->getOperand(2).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(-1));
+ Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
+ Cond.push_back(LastInst->getOperand(0));
+ Cond.push_back(LastInst->getOperand(1));
}
}
-
-bool
-AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+// Branch analysis.
+bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin())
@@ -212,15 +113,16 @@ AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// If there is only one terminator instruction, process it.
unsigned LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- if (LastOpc == AArch64::Bimm) {
+ if (isUncondBranchOpcode(LastOpc)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
- if (isCondBranch(LastOpc)) {
- classifyCondBranch(LastInst, TBB, Cond);
+ if (isCondBranchOpcode(LastOpc)) {
+ // Block ends with fall-through condbranch.
+ parseCondBranch(LastInst, TBB, Cond);
return false;
}
- return true; // Can't handle indirect branch.
+ return true; // Can't handle indirect branch.
}
// Get the instruction before it if it is a terminator.
@@ -229,8 +131,8 @@ AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// If AllowModify is true and the block ends with two or more unconditional
// branches, delete all but the first unconditional branch.
- if (AllowModify && LastOpc == AArch64::Bimm) {
- while (SecondLastOpc == AArch64::Bimm) {
+ if (AllowModify && isUncondBranchOpcode(LastOpc)) {
+ while (isUncondBranchOpcode(SecondLastOpc)) {
LastInst->eraseFromParent();
LastInst = SecondLastInst;
LastOpc = LastInst->getOpcode();
@@ -250,23 +152,15 @@ AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
return true;
// If the block ends with a B and a Bcc, handle it.
- if (LastOpc == AArch64::Bimm) {
- if (SecondLastOpc == AArch64::Bcc) {
- TBB = SecondLastInst->getOperand(1).getMBB();
- Cond.push_back(MachineOperand::CreateImm(AArch64::Bcc));
- Cond.push_back(SecondLastInst->getOperand(0));
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- } else if (isCondBranch(SecondLastOpc)) {
- classifyCondBranch(SecondLastInst, TBB, Cond);
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- }
+ if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
+ parseCondBranch(SecondLastInst, TBB, Cond);
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
}
// If the block ends with two unconditional branches, handle it. The second
// one is not executed, so remove it.
- if (SecondLastOpc == AArch64::Bimm && LastOpc == AArch64::Bimm) {
+ if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
if (AllowModify)
@@ -274,84 +168,72 @@ AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
return false;
}
+ // ...likewise if it ends with an indirect branch followed by an unconditional
+ // branch.
+ if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
+ I = LastInst;
+ if (AllowModify)
+ I->eraseFromParent();
+ return true;
+ }
+
// Otherwise, can't handle this.
return true;
}
bool AArch64InstrInfo::ReverseBranchCondition(
- SmallVectorImpl<MachineOperand> &Cond) const {
- switch (Cond[0].getImm()) {
- case AArch64::Bcc: {
- A64CC::CondCodes CC = static_cast<A64CC::CondCodes>(Cond[1].getImm());
- CC = A64InvertCondCode(CC);
- Cond[1].setImm(CC);
- return false;
- }
- case AArch64::CBZw:
- Cond[0].setImm(AArch64::CBNZw);
- return false;
- case AArch64::CBZx:
- Cond[0].setImm(AArch64::CBNZx);
- return false;
- case AArch64::CBNZw:
- Cond[0].setImm(AArch64::CBZw);
- return false;
- case AArch64::CBNZx:
- Cond[0].setImm(AArch64::CBZx);
- return false;
- case AArch64::TBZwii:
- Cond[0].setImm(AArch64::TBNZwii);
- return false;
- case AArch64::TBZxii:
- Cond[0].setImm(AArch64::TBNZxii);
- return false;
- case AArch64::TBNZwii:
- Cond[0].setImm(AArch64::TBZwii);
- return false;
- case AArch64::TBNZxii:
- Cond[0].setImm(AArch64::TBZxii);
- return false;
- default:
- llvm_unreachable("Unknown branch type");
- }
-}
-
-
-unsigned
-AArch64InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- if (FBB == 0 && Cond.empty()) {
- BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(TBB);
- return 1;
- } else if (FBB == 0) {
- MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
- for (int i = 1, e = Cond.size(); i != e; ++i)
- MIB.addOperand(Cond[i]);
- MIB.addMBB(TBB);
- return 1;
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ if (Cond[0].getImm() != -1) {
+ // Regular Bcc
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
+ Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
+ } else {
+ // Folded compare-and-branch
+ switch (Cond[1].getImm()) {
+ default:
+ llvm_unreachable("Unknown conditional branch!");
+ case AArch64::CBZW:
+ Cond[1].setImm(AArch64::CBNZW);
+ break;
+ case AArch64::CBNZW:
+ Cond[1].setImm(AArch64::CBZW);
+ break;
+ case AArch64::CBZX:
+ Cond[1].setImm(AArch64::CBNZX);
+ break;
+ case AArch64::CBNZX:
+ Cond[1].setImm(AArch64::CBZX);
+ break;
+ case AArch64::TBZW:
+ Cond[1].setImm(AArch64::TBNZW);
+ break;
+ case AArch64::TBNZW:
+ Cond[1].setImm(AArch64::TBZW);
+ break;
+ case AArch64::TBZX:
+ Cond[1].setImm(AArch64::TBNZX);
+ break;
+ case AArch64::TBNZX:
+ Cond[1].setImm(AArch64::TBZX);
+ break;
+ }
}
- MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
- for (int i = 1, e = Cond.size(); i != e; ++i)
- MIB.addOperand(Cond[i]);
- MIB.addMBB(TBB);
-
- BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(FBB);
- return 2;
+ return false;
}
unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin()) return 0;
+ if (I == MBB.begin())
+ return 0;
--I;
while (I->isDebugValue()) {
if (I == MBB.begin())
return 0;
--I;
}
- if (I->getOpcode() != AArch64::Bimm && !isCondBranch(I->getOpcode()))
+ if (!isUncondBranchOpcode(I->getOpcode()) &&
+ !isCondBranchOpcode(I->getOpcode()))
return 0;
// Remove the branch.
@@ -359,9 +241,10 @@ unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
I = MBB.end();
- if (I == MBB.begin()) return 1;
+ if (I == MBB.begin())
+ return 1;
--I;
- if (!isCondBranch(I->getOpcode()))
+ if (!isCondBranchOpcode(I->getOpcode()))
return 1;
// Remove the branch.
@@ -369,471 +252,1838 @@ unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
-bool
-AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const {
- MachineInstr &MI = *MBBI;
- MachineBasicBlock &MBB = *MI.getParent();
+void AArch64InstrInfo::instantiateCondBranch(
+ MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
+ const SmallVectorImpl<MachineOperand> &Cond) const {
+ if (Cond[0].getImm() != -1) {
+ // Regular Bcc
+ BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
+ } else {
+ // Folded compare-and-branch
+ const MachineInstrBuilder MIB =
+ BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
+ if (Cond.size() > 3)
+ MIB.addImm(Cond[3].getImm());
+ MIB.addMBB(TBB);
+ }
+}
- unsigned Opcode = MI.getOpcode();
- switch (Opcode) {
- case AArch64::TLSDESC_BLRx: {
- MachineInstr *NewMI =
- BuildMI(MBB, MBBI, MI.getDebugLoc(), get(AArch64::TLSDESCCALL))
- .addOperand(MI.getOperand(1));
- MI.setDesc(get(AArch64::BLRx));
-
- llvm::finalizeBundle(MBB, NewMI, *++MBBI);
- return true;
- }
+unsigned AArch64InstrInfo::InsertBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+
+ if (!FBB) {
+ if (Cond.empty()) // Unconditional branch?
+ BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
+ else
+ instantiateCondBranch(MBB, DL, TBB, Cond);
+ return 1;
+ }
+
+ // Two-way conditional branch.
+ instantiateCondBranch(MBB, DL, TBB, Cond);
+ BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
+ return 2;
+}
+
+// Find the original register that VReg is copied from.
+static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
+ while (TargetRegisterInfo::isVirtualRegister(VReg)) {
+ const MachineInstr *DefMI = MRI.getVRegDef(VReg);
+ if (!DefMI->isFullCopy())
+ return VReg;
+ VReg = DefMI->getOperand(1).getReg();
+ }
+ return VReg;
+}
+
+// Determine if VReg is defined by an instruction that can be folded into a
+// csel instruction. If so, return the folded opcode, and the replacement
+// register.
+static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
+ unsigned *NewVReg = nullptr) {
+ VReg = removeCopies(MRI, VReg);
+ if (!TargetRegisterInfo::isVirtualRegister(VReg))
+ return 0;
+
+ bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
+ const MachineInstr *DefMI = MRI.getVRegDef(VReg);
+ unsigned Opc = 0;
+ unsigned SrcOpNum = 0;
+ switch (DefMI->getOpcode()) {
+ case AArch64::ADDSXri:
+ case AArch64::ADDSWri:
+ // if NZCV is used, do not fold.
+ if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
+ return 0;
+ // fall-through to ADDXri and ADDWri.
+ case AArch64::ADDXri:
+ case AArch64::ADDWri:
+ // add x, 1 -> csinc.
+ if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
+ DefMI->getOperand(3).getImm() != 0)
+ return 0;
+ SrcOpNum = 1;
+ Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
+ break;
+
+ case AArch64::ORNXrr:
+ case AArch64::ORNWrr: {
+ // not x -> csinv, represented as orn dst, xzr, src.
+ unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
+ if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
+ return 0;
+ SrcOpNum = 2;
+ Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
+ break;
+ }
+
+ case AArch64::SUBSXrr:
+ case AArch64::SUBSWrr:
+ // if NZCV is used, do not fold.
+ if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
+ return 0;
+ // fall-through to SUBXrr and SUBWrr.
+ case AArch64::SUBXrr:
+ case AArch64::SUBWrr: {
+ // neg x -> csneg, represented as sub dst, xzr, src.
+ unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
+ if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
+ return 0;
+ SrcOpNum = 2;
+ Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
+ break;
+ }
default:
+ return 0;
+ }
+ assert(Opc && SrcOpNum && "Missing parameters");
+
+ if (NewVReg)
+ *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
+ return Opc;
+}
+
+bool AArch64InstrInfo::canInsertSelect(
+ const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
+ int &FalseCycles) const {
+ // Check register classes.
+ const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterClass *RC =
+ RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
+ if (!RC)
return false;
+
+ // Expanding cbz/tbz requires an extra cycle of latency on the condition.
+ unsigned ExtraCondLat = Cond.size() != 1;
+
+ // GPRs are handled by csel.
+ // FIXME: Fold in x+1, -x, and ~x when applicable.
+ if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
+ AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
+ // Single-cycle csel, csinc, csinv, and csneg.
+ CondCycles = 1 + ExtraCondLat;
+ TrueCycles = FalseCycles = 1;
+ if (canFoldIntoCSel(MRI, TrueReg))
+ TrueCycles = 0;
+ else if (canFoldIntoCSel(MRI, FalseReg))
+ FalseCycles = 0;
+ return true;
}
+ // Scalar floating point is handled by fcsel.
+ // FIXME: Form fabs, fmin, and fmax when applicable.
+ if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
+ AArch64::FPR32RegClass.hasSubClassEq(RC)) {
+ CondCycles = 5 + ExtraCondLat;
+ TrueCycles = FalseCycles = 2;
+ return true;
+ }
+
+ // Can't do vectors.
return false;
}
-void
-AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill,
- int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL = MBB.findDebugLoc(MBBI);
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- unsigned Align = MFI.getObjectAlignment(FrameIdx);
-
- MachineMemOperand *MMO
- = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
- MachineMemOperand::MOStore,
- MFI.getObjectSize(FrameIdx),
- Align);
-
- unsigned StoreOp = 0;
- if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
- switch(RC->getSize()) {
- case 4: StoreOp = AArch64::LS32_STR; break;
- case 8: StoreOp = AArch64::LS64_STR; break;
+void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ // Parse the condition code, see parseCondBranch() above.
+ AArch64CC::CondCode CC;
+ switch (Cond.size()) {
+ default:
+ llvm_unreachable("Unknown condition opcode in Cond");
+ case 1: // b.cc
+ CC = AArch64CC::CondCode(Cond[0].getImm());
+ break;
+ case 3: { // cbz/cbnz
+ // We must insert a compare against 0.
+ bool Is64Bit;
+ switch (Cond[1].getImm()) {
default:
- llvm_unreachable("Unknown size for regclass");
+ llvm_unreachable("Unknown branch opcode in Cond");
+ case AArch64::CBZW:
+ Is64Bit = 0;
+ CC = AArch64CC::EQ;
+ break;
+ case AArch64::CBZX:
+ Is64Bit = 1;
+ CC = AArch64CC::EQ;
+ break;
+ case AArch64::CBNZW:
+ Is64Bit = 0;
+ CC = AArch64CC::NE;
+ break;
+ case AArch64::CBNZX:
+ Is64Bit = 1;
+ CC = AArch64CC::NE;
+ break;
}
- } else {
- assert((RC->hasType(MVT::f32) || RC->hasType(MVT::f64) ||
- RC->hasType(MVT::f128))
- && "Expected integer or floating type for store");
- switch (RC->getSize()) {
- case 4: StoreOp = AArch64::LSFP32_STR; break;
- case 8: StoreOp = AArch64::LSFP64_STR; break;
- case 16: StoreOp = AArch64::LSFP128_STR; break;
+ unsigned SrcReg = Cond[2].getReg();
+ if (Is64Bit) {
+ // cmp reg, #0 is actually subs xzr, reg, #0.
+ MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
+ BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
+ .addReg(SrcReg)
+ .addImm(0)
+ .addImm(0);
+ } else {
+ MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
+ BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
+ .addReg(SrcReg)
+ .addImm(0)
+ .addImm(0);
+ }
+ break;
+ }
+ case 4: { // tbz/tbnz
+ // We must insert a tst instruction.
+ switch (Cond[1].getImm()) {
default:
- llvm_unreachable("Unknown size for regclass");
+ llvm_unreachable("Unknown branch opcode in Cond");
+ case AArch64::TBZW:
+ case AArch64::TBZX:
+ CC = AArch64CC::EQ;
+ break;
+ case AArch64::TBNZW:
+ case AArch64::TBNZX:
+ CC = AArch64CC::NE;
+ break;
+ }
+ // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
+ if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
+ BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
+ .addReg(Cond[2].getReg())
+ .addImm(
+ AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
+ else
+ BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
+ .addReg(Cond[2].getReg())
+ .addImm(
+ AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
+ break;
+ }
+ }
+
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = nullptr;
+ bool TryFold = false;
+ if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
+ RC = &AArch64::GPR64RegClass;
+ Opc = AArch64::CSELXr;
+ TryFold = true;
+ } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
+ RC = &AArch64::GPR32RegClass;
+ Opc = AArch64::CSELWr;
+ TryFold = true;
+ } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
+ RC = &AArch64::FPR64RegClass;
+ Opc = AArch64::FCSELDrrr;
+ } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
+ RC = &AArch64::FPR32RegClass;
+ Opc = AArch64::FCSELSrrr;
+ }
+ assert(RC && "Unsupported regclass");
+
+ // Try folding simple instructions into the csel.
+ if (TryFold) {
+ unsigned NewVReg = 0;
+ unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
+ if (FoldedOpc) {
+ // The folded opcodes csinc, csinc and csneg apply the operation to
+ // FalseReg, so we need to invert the condition.
+ CC = AArch64CC::getInvertedCondCode(CC);
+ TrueReg = FalseReg;
+ } else
+ FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
+
+ // Fold the operation. Leave any dead instructions for DCE to clean up.
+ if (FoldedOpc) {
+ FalseReg = NewVReg;
+ Opc = FoldedOpc;
+ // The extends the live range of NewVReg.
+ MRI.clearKillFlags(NewVReg);
}
}
- MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp));
- NewMI.addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FrameIdx)
- .addImm(0)
- .addMemOperand(MMO);
+ // Pull all virtual register into the appropriate class.
+ MRI.constrainRegClass(TrueReg, RC);
+ MRI.constrainRegClass(FalseReg, RC);
+ // Insert the csel.
+ BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
+ CC);
}
-void
-AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL = MBB.findDebugLoc(MBBI);
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- unsigned Align = MFI.getObjectAlignment(FrameIdx);
-
- MachineMemOperand *MMO
- = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
- MachineMemOperand::MOLoad,
- MFI.getObjectSize(FrameIdx),
- Align);
-
- unsigned LoadOp = 0;
- if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) {
- switch(RC->getSize()) {
- case 4: LoadOp = AArch64::LS32_LDR; break;
- case 8: LoadOp = AArch64::LS64_LDR; break;
- default:
- llvm_unreachable("Unknown size for regclass");
- }
- } else {
- assert((RC->hasType(MVT::f32) || RC->hasType(MVT::f64)
- || RC->hasType(MVT::f128))
- && "Expected integer or floating type for store");
- switch (RC->getSize()) {
- case 4: LoadOp = AArch64::LSFP32_LDR; break;
- case 8: LoadOp = AArch64::LSFP64_LDR; break;
- case 16: LoadOp = AArch64::LSFP128_LDR; break;
+bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case AArch64::SBFMXri: // aka sxtw
+ case AArch64::UBFMXri: // aka uxtw
+ // Check for the 32 -> 64 bit extension case, these instructions can do
+ // much more.
+ if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
+ return false;
+ // This is a signed or unsigned 32 -> 64 bit extension.
+ SrcReg = MI.getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ SubIdx = AArch64::sub_32;
+ return true;
+ }
+}
+
+/// analyzeCompare - For a comparison instruction, return the source registers
+/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
+/// Return true if the comparison instruction can be analyzed.
+bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::SUBSWrr:
+ case AArch64::SUBSWrs:
+ case AArch64::SUBSWrx:
+ case AArch64::SUBSXrr:
+ case AArch64::SUBSXrs:
+ case AArch64::SUBSXrx:
+ case AArch64::ADDSWrr:
+ case AArch64::ADDSWrs:
+ case AArch64::ADDSWrx:
+ case AArch64::ADDSXrr:
+ case AArch64::ADDSXrs:
+ case AArch64::ADDSXrx:
+ // Replace SUBSWrr with SUBWrr if NZCV is not used.
+ SrcReg = MI->getOperand(1).getReg();
+ SrcReg2 = MI->getOperand(2).getReg();
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
+ case AArch64::SUBSWri:
+ case AArch64::ADDSWri:
+ case AArch64::SUBSXri:
+ case AArch64::ADDSXri:
+ SrcReg = MI->getOperand(1).getReg();
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = MI->getOperand(2).getImm();
+ return true;
+ case AArch64::ANDSWri:
+ case AArch64::ANDSXri:
+ // ANDS does not use the same encoding scheme as the others xxxS
+ // instructions.
+ SrcReg = MI->getOperand(1).getReg();
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = AArch64_AM::decodeLogicalImmediate(
+ MI->getOperand(2).getImm(),
+ MI->getOpcode() == AArch64::ANDSWri ? 32 : 64);
+ return true;
+ }
+
+ return false;
+}
+
+static bool UpdateOperandRegClass(MachineInstr *Instr) {
+ MachineBasicBlock *MBB = Instr->getParent();
+ assert(MBB && "Can't get MachineBasicBlock here");
+ MachineFunction *MF = MBB->getParent();
+ assert(MF && "Can't get MachineFunction here");
+ const TargetMachine *TM = &MF->getTarget();
+ const TargetInstrInfo *TII = TM->getInstrInfo();
+ const TargetRegisterInfo *TRI = TM->getRegisterInfo();
+ MachineRegisterInfo *MRI = &MF->getRegInfo();
+
+ for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
+ ++OpIdx) {
+ MachineOperand &MO = Instr->getOperand(OpIdx);
+ const TargetRegisterClass *OpRegCstraints =
+ Instr->getRegClassConstraint(OpIdx, TII, TRI);
+
+ // If there's no constraint, there's nothing to do.
+ if (!OpRegCstraints)
+ continue;
+ // If the operand is a frame index, there's nothing to do here.
+ // A frame index operand will resolve correctly during PEI.
+ if (MO.isFI())
+ continue;
+
+ assert(MO.isReg() &&
+ "Operand has register constraints without being a register!");
+
+ unsigned Reg = MO.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (!OpRegCstraints->contains(Reg))
+ return false;
+ } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
+ !MRI->constrainRegClass(Reg, OpRegCstraints))
+ return false;
+ }
+
+ return true;
+}
+
+/// optimizeCompareInstr - Convert the instruction supplying the argument to the
+/// comparison into one that sets the zero bit in the flags register.
+bool AArch64InstrInfo::optimizeCompareInstr(
+ MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
+ int CmpValue, const MachineRegisterInfo *MRI) const {
+
+ // Replace SUBSWrr with SUBWrr if NZCV is not used.
+ int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
+ if (Cmp_NZCV != -1) {
+ unsigned NewOpc;
+ switch (CmpInstr->getOpcode()) {
default:
- llvm_unreachable("Unknown size for regclass");
+ return false;
+ case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break;
+ case AArch64::ADDSWri: NewOpc = AArch64::ADDWri; break;
+ case AArch64::ADDSWrs: NewOpc = AArch64::ADDWrs; break;
+ case AArch64::ADDSWrx: NewOpc = AArch64::ADDWrx; break;
+ case AArch64::ADDSXrr: NewOpc = AArch64::ADDXrr; break;
+ case AArch64::ADDSXri: NewOpc = AArch64::ADDXri; break;
+ case AArch64::ADDSXrs: NewOpc = AArch64::ADDXrs; break;
+ case AArch64::ADDSXrx: NewOpc = AArch64::ADDXrx; break;
+ case AArch64::SUBSWrr: NewOpc = AArch64::SUBWrr; break;
+ case AArch64::SUBSWri: NewOpc = AArch64::SUBWri; break;
+ case AArch64::SUBSWrs: NewOpc = AArch64::SUBWrs; break;
+ case AArch64::SUBSWrx: NewOpc = AArch64::SUBWrx; break;
+ case AArch64::SUBSXrr: NewOpc = AArch64::SUBXrr; break;
+ case AArch64::SUBSXri: NewOpc = AArch64::SUBXri; break;
+ case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break;
+ case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break;
}
+
+ const MCInstrDesc &MCID = get(NewOpc);
+ CmpInstr->setDesc(MCID);
+ CmpInstr->RemoveOperand(Cmp_NZCV);
+ bool succeeded = UpdateOperandRegClass(CmpInstr);
+ (void)succeeded;
+ assert(succeeded && "Some operands reg class are incompatible!");
+ return true;
}
- MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg);
- NewMI.addFrameIndex(FrameIdx)
- .addImm(0)
- .addMemOperand(MMO);
-}
+ // Continue only if we have a "ri" where immediate is zero.
+ if (CmpValue != 0 || SrcReg2 != 0)
+ return false;
-unsigned AArch64InstrInfo::estimateRSStackLimit(MachineFunction &MF) const {
- unsigned Limit = (1 << 16) - 1;
- for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
- for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- if (!I->getOperand(i).isFI()) continue;
+ // CmpInstr is a Compare instruction if destination register is not used.
+ if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
+ return false;
- // When using ADDxxi_lsl0_s to get the address of a stack object, 0xfff
- // is the largest offset guaranteed to fit in the immediate offset.
- if (I->getOpcode() == AArch64::ADDxxi_lsl0_s) {
- Limit = std::min(Limit, 0xfffu);
- break;
- }
+ // Get the unique definition of SrcReg.
+ MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
+ if (!MI)
+ return false;
+
+ // We iterate backward, starting from the instruction before CmpInstr and
+ // stop when reaching the definition of the source register or done with the
+ // basic block, to check whether NZCV is used or modified in between.
+ MachineBasicBlock::iterator I = CmpInstr, E = MI,
+ B = CmpInstr->getParent()->begin();
- int AccessScale, MinOffset, MaxOffset;
- getAddressConstraints(*I, AccessScale, MinOffset, MaxOffset);
- Limit = std::min(Limit, static_cast<unsigned>(MaxOffset));
+ // Early exit if CmpInstr is at the beginning of the BB.
+ if (I == B)
+ return false;
- break; // At most one FI per instruction
+ // Check whether the definition of SrcReg is in the same basic block as
+ // Compare. If not, we can't optimize away the Compare.
+ if (MI->getParent() != CmpInstr->getParent())
+ return false;
+
+ // Check that NZCV isn't set between the comparison instruction and the one we
+ // want to change.
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ for (--I; I != E; --I) {
+ const MachineInstr &Instr = *I;
+
+ if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
+ Instr.readsRegister(AArch64::NZCV, TRI))
+ // This instruction modifies or uses NZCV after the one we want to
+ // change. We can't do this transformation.
+ return false;
+ if (I == B)
+ // The 'and' is below the comparison instruction.
+ return false;
+ }
+
+ unsigned NewOpc = MI->getOpcode();
+ switch (MI->getOpcode()) {
+ default:
+ return false;
+ case AArch64::ADDSWrr:
+ case AArch64::ADDSWri:
+ case AArch64::ADDSXrr:
+ case AArch64::ADDSXri:
+ case AArch64::SUBSWrr:
+ case AArch64::SUBSWri:
+ case AArch64::SUBSXrr:
+ case AArch64::SUBSXri:
+ break;
+ case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
+ case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
+ case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
+ case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
+ case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
+ case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
+ case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
+ case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
+ case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
+ case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
+ case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
+ case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
+ case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
+ case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
+ }
+
+ // Scan forward for the use of NZCV.
+ // When checking against MI: if it's a conditional code requires
+ // checking of V bit, then this is not safe to do.
+ // It is safe to remove CmpInstr if NZCV is redefined or killed.
+ // If we are done with the basic block, we need to check whether NZCV is
+ // live-out.
+ bool IsSafe = false;
+ for (MachineBasicBlock::iterator I = CmpInstr,
+ E = CmpInstr->getParent()->end();
+ !IsSafe && ++I != E;) {
+ const MachineInstr &Instr = *I;
+ for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
+ ++IO) {
+ const MachineOperand &MO = Instr.getOperand(IO);
+ if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
+ IsSafe = true;
+ break;
+ }
+ if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
+ continue;
+ if (MO.isDef()) {
+ IsSafe = true;
+ break;
+ }
+
+ // Decode the condition code.
+ unsigned Opc = Instr.getOpcode();
+ AArch64CC::CondCode CC;
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::Bcc:
+ CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
+ break;
+ case AArch64::CSINVWr:
+ case AArch64::CSINVXr:
+ case AArch64::CSINCWr:
+ case AArch64::CSINCXr:
+ case AArch64::CSELWr:
+ case AArch64::CSELXr:
+ case AArch64::CSNEGWr:
+ case AArch64::CSNEGXr:
+ case AArch64::FCSELSrrr:
+ case AArch64::FCSELDrrr:
+ CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
+ break;
+ }
+
+ // It is not safe to remove Compare instruction if Overflow(V) is used.
+ switch (CC) {
+ default:
+ // NZCV can be used multiple times, we should continue.
+ break;
+ case AArch64CC::VS:
+ case AArch64CC::VC:
+ case AArch64CC::GE:
+ case AArch64CC::LT:
+ case AArch64CC::GT:
+ case AArch64CC::LE:
+ return false;
}
}
}
- return Limit;
+ // If NZCV is not killed nor re-defined, we should check whether it is
+ // live-out. If it is live-out, do not optimize.
+ if (!IsSafe) {
+ MachineBasicBlock *ParentBlock = CmpInstr->getParent();
+ for (auto *MBB : ParentBlock->successors())
+ if (MBB->isLiveIn(AArch64::NZCV))
+ return false;
+ }
+
+ // Update the instruction to set NZCV.
+ MI->setDesc(get(NewOpc));
+ CmpInstr->eraseFromParent();
+ bool succeeded = UpdateOperandRegClass(MI);
+ (void)succeeded;
+ assert(succeeded && "Some operands reg class are incompatible!");
+ MI->addRegisterDefined(AArch64::NZCV, TRI);
+ return true;
}
-void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
- int &AccessScale, int &MinOffset,
- int &MaxOffset) const {
- switch (MI.getOpcode()) {
- default: llvm_unreachable("Unkown load/store kind");
- case TargetOpcode::DBG_VALUE:
- AccessScale = 1;
- MinOffset = INT_MIN;
- MaxOffset = INT_MAX;
- return;
- case AArch64::LS8_LDR: case AArch64::LS8_STR:
- case AArch64::LSFP8_LDR: case AArch64::LSFP8_STR:
- case AArch64::LDRSBw:
- case AArch64::LDRSBx:
- AccessScale = 1;
- MinOffset = 0;
- MaxOffset = 0xfff;
- return;
- case AArch64::LS16_LDR: case AArch64::LS16_STR:
- case AArch64::LSFP16_LDR: case AArch64::LSFP16_STR:
- case AArch64::LDRSHw:
- case AArch64::LDRSHx:
- AccessScale = 2;
- MinOffset = 0;
- MaxOffset = 0xfff * AccessScale;
- return;
- case AArch64::LS32_LDR: case AArch64::LS32_STR:
- case AArch64::LSFP32_LDR: case AArch64::LSFP32_STR:
- case AArch64::LDRSWx:
- case AArch64::LDPSWx:
- AccessScale = 4;
- MinOffset = 0;
- MaxOffset = 0xfff * AccessScale;
- return;
- case AArch64::LS64_LDR: case AArch64::LS64_STR:
- case AArch64::LSFP64_LDR: case AArch64::LSFP64_STR:
- case AArch64::PRFM:
- AccessScale = 8;
- MinOffset = 0;
- MaxOffset = 0xfff * AccessScale;
- return;
- case AArch64::LSFP128_LDR: case AArch64::LSFP128_STR:
- AccessScale = 16;
- MinOffset = 0;
- MaxOffset = 0xfff * AccessScale;
- return;
- case AArch64::LSPair32_LDR: case AArch64::LSPair32_STR:
- case AArch64::LSFPPair32_LDR: case AArch64::LSFPPair32_STR:
- AccessScale = 4;
- MinOffset = -0x40 * AccessScale;
- MaxOffset = 0x3f * AccessScale;
- return;
- case AArch64::LSPair64_LDR: case AArch64::LSPair64_STR:
- case AArch64::LSFPPair64_LDR: case AArch64::LSFPPair64_STR:
- AccessScale = 8;
- MinOffset = -0x40 * AccessScale;
- MaxOffset = 0x3f * AccessScale;
- return;
- case AArch64::LSFPPair128_LDR: case AArch64::LSFPPair128_STR:
- AccessScale = 16;
- MinOffset = -0x40 * AccessScale;
- MaxOffset = 0x3f * AccessScale;
- return;
+
+/// Return true if this is this instruction has a non-zero immediate
+bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::ADDSWrs:
+ case AArch64::ADDSXrs:
+ case AArch64::ADDWrs:
+ case AArch64::ADDXrs:
+ case AArch64::ANDSWrs:
+ case AArch64::ANDSXrs:
+ case AArch64::ANDWrs:
+ case AArch64::ANDXrs:
+ case AArch64::BICSWrs:
+ case AArch64::BICSXrs:
+ case AArch64::BICWrs:
+ case AArch64::BICXrs:
+ case AArch64::CRC32Brr:
+ case AArch64::CRC32CBrr:
+ case AArch64::CRC32CHrr:
+ case AArch64::CRC32CWrr:
+ case AArch64::CRC32CXrr:
+ case AArch64::CRC32Hrr:
+ case AArch64::CRC32Wrr:
+ case AArch64::CRC32Xrr:
+ case AArch64::EONWrs:
+ case AArch64::EONXrs:
+ case AArch64::EORWrs:
+ case AArch64::EORXrs:
+ case AArch64::ORNWrs:
+ case AArch64::ORNXrs:
+ case AArch64::ORRWrs:
+ case AArch64::ORRXrs:
+ case AArch64::SUBSWrs:
+ case AArch64::SUBSXrs:
+ case AArch64::SUBWrs:
+ case AArch64::SUBXrs:
+ if (MI->getOperand(3).isImm()) {
+ unsigned val = MI->getOperand(3).getImm();
+ return (val != 0);
+ }
+ break;
}
+ return false;
}
-unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
- const MCInstrDesc &MCID = MI.getDesc();
- const MachineBasicBlock &MBB = *MI.getParent();
- const MachineFunction &MF = *MBB.getParent();
- const MCAsmInfo &MAI = *MF.getTarget().getMCAsmInfo();
+/// Return true if this is this instruction has a non-zero immediate
+bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::ADDSWrx:
+ case AArch64::ADDSXrx:
+ case AArch64::ADDSXrx64:
+ case AArch64::ADDWrx:
+ case AArch64::ADDXrx:
+ case AArch64::ADDXrx64:
+ case AArch64::SUBSWrx:
+ case AArch64::SUBSXrx:
+ case AArch64::SUBSXrx64:
+ case AArch64::SUBWrx:
+ case AArch64::SUBXrx:
+ case AArch64::SUBXrx64:
+ if (MI->getOperand(3).isImm()) {
+ unsigned val = MI->getOperand(3).getImm();
+ return (val != 0);
+ }
+ break;
+ }
- if (MCID.getSize())
- return MCID.getSize();
+ return false;
+}
- if (MI.getOpcode() == AArch64::INLINEASM)
- return getInlineAsmLength(MI.getOperand(0).getSymbolName(), MAI);
+// Return true if this instruction simply sets its single destination register
+// to zero. This is equivalent to a register rename of the zero-register.
+bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::MOVZWi:
+ case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
+ if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
+ assert(MI->getDesc().getNumOperands() == 3 &&
+ MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
+ return true;
+ }
+ break;
+ case AArch64::ANDWri: // and Rd, Rzr, #imm
+ return MI->getOperand(1).getReg() == AArch64::WZR;
+ case AArch64::ANDXri:
+ return MI->getOperand(1).getReg() == AArch64::XZR;
+ case TargetOpcode::COPY:
+ return MI->getOperand(1).getReg() == AArch64::WZR;
+ }
+ return false;
+}
- if (MI.isLabel())
- return 0;
+// Return true if this instruction simply renames a general register without
+// modifying bits.
+bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case TargetOpcode::COPY: {
+ // GPR32 copies will by lowered to ORRXrs
+ unsigned DstReg = MI->getOperand(0).getReg();
+ return (AArch64::GPR32RegClass.contains(DstReg) ||
+ AArch64::GPR64RegClass.contains(DstReg));
+ }
+ case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
+ if (MI->getOperand(1).getReg() == AArch64::XZR) {
+ assert(MI->getDesc().getNumOperands() == 4 &&
+ MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
+ return true;
+ }
+ case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
+ if (MI->getOperand(2).getImm() == 0) {
+ assert(MI->getDesc().getNumOperands() == 4 &&
+ MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
+ return true;
+ }
+ }
+ return false;
+}
- switch (MI.getOpcode()) {
- case TargetOpcode::BUNDLE:
- return getInstBundleLength(MI);
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case TargetOpcode::PROLOG_LABEL:
- case TargetOpcode::EH_LABEL:
- case TargetOpcode::DBG_VALUE:
- return 0;
- case AArch64::TLSDESCCALL:
- return 0;
+// Return true if this instruction simply renames a general register without
+// modifying bits.
+bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
default:
- llvm_unreachable("Unknown instruction class");
+ break;
+ case TargetOpcode::COPY: {
+ // FPR64 copies will by lowered to ORR.16b
+ unsigned DstReg = MI->getOperand(0).getReg();
+ return (AArch64::FPR64RegClass.contains(DstReg) ||
+ AArch64::FPR128RegClass.contains(DstReg));
+ }
+ case AArch64::ORRv16i8:
+ if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
+ assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
+ "invalid ORRv16i8 operands");
+ return true;
+ }
}
+ return false;
}
-unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const {
- unsigned Size = 0;
- MachineBasicBlock::const_instr_iterator I = MI;
- MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
- while (++I != E && I->isInsideBundle()) {
- assert(!I->isBundle() && "No nested bundle!");
- Size += getInstSizeInBytes(*I);
+unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::LDRWui:
+ case AArch64::LDRXui:
+ case AArch64::LDRBui:
+ case AArch64::LDRHui:
+ case AArch64::LDRSui:
+ case AArch64::LDRDui:
+ case AArch64::LDRQui:
+ if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
}
- return Size;
+
+ return 0;
}
-bool llvm::rewriteA64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int &Offset,
- const AArch64InstrInfo &TII) {
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
+unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::STRWui:
+ case AArch64::STRXui:
+ case AArch64::STRBui:
+ case AArch64::STRHui:
+ case AArch64::STRSui:
+ case AArch64::STRDui:
+ case AArch64::STRQui:
+ if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ }
+ return 0;
+}
- MFI.getObjectOffset(FrameRegIdx);
- llvm_unreachable("Unimplemented rewriteFrameIndex");
+/// Return true if this is load/store scales or extends its register offset.
+/// This refers to scaling a dynamic index as opposed to scaled immediates.
+/// MI should be a memory op that allows scaled addressing.
+bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::LDRBBroW:
+ case AArch64::LDRBroW:
+ case AArch64::LDRDroW:
+ case AArch64::LDRHHroW:
+ case AArch64::LDRHroW:
+ case AArch64::LDRQroW:
+ case AArch64::LDRSBWroW:
+ case AArch64::LDRSBXroW:
+ case AArch64::LDRSHWroW:
+ case AArch64::LDRSHXroW:
+ case AArch64::LDRSWroW:
+ case AArch64::LDRSroW:
+ case AArch64::LDRWroW:
+ case AArch64::LDRXroW:
+ case AArch64::STRBBroW:
+ case AArch64::STRBroW:
+ case AArch64::STRDroW:
+ case AArch64::STRHHroW:
+ case AArch64::STRHroW:
+ case AArch64::STRQroW:
+ case AArch64::STRSroW:
+ case AArch64::STRWroW:
+ case AArch64::STRXroW:
+ case AArch64::LDRBBroX:
+ case AArch64::LDRBroX:
+ case AArch64::LDRDroX:
+ case AArch64::LDRHHroX:
+ case AArch64::LDRHroX:
+ case AArch64::LDRQroX:
+ case AArch64::LDRSBWroX:
+ case AArch64::LDRSBXroX:
+ case AArch64::LDRSHWroX:
+ case AArch64::LDRSHXroX:
+ case AArch64::LDRSWroX:
+ case AArch64::LDRSroX:
+ case AArch64::LDRWroX:
+ case AArch64::LDRXroX:
+ case AArch64::STRBBroX:
+ case AArch64::STRBroX:
+ case AArch64::STRDroX:
+ case AArch64::STRHHroX:
+ case AArch64::STRHroX:
+ case AArch64::STRQroX:
+ case AArch64::STRSroX:
+ case AArch64::STRWroX:
+ case AArch64::STRXroX:
+
+ unsigned Val = MI->getOperand(3).getImm();
+ AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
+ return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
+ }
+ return false;
}
-void llvm::emitRegUpdate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- DebugLoc dl, const TargetInstrInfo &TII,
- unsigned DstReg, unsigned SrcReg, unsigned ScratchReg,
- int64_t NumBytes, MachineInstr::MIFlag MIFlags) {
- if (NumBytes == 0 && DstReg == SrcReg)
+/// Check all MachineMemOperands for a hint to suppress pairing.
+bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
+ assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
+ "Too many target MO flags");
+ for (auto *MM : MI->memoperands()) {
+ if (MM->getFlags() &
+ (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Set a flag on the first MachineMemOperand to suppress pairing.
+void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
+ if (MI->memoperands_empty())
return;
- else if (abs64(NumBytes) & ~0xffffff) {
- // Generically, we have to materialize the offset into a temporary register
- // and subtract it. There are a couple of ways this could be done, for now
- // we'll use a movz/movk or movn/movk sequence.
- uint64_t Bits = static_cast<uint64_t>(abs64(NumBytes));
- BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVZxii), ScratchReg)
- .addImm(0xffff & Bits).addImm(0)
- .setMIFlags(MIFlags);
-
- Bits >>= 16;
- if (Bits & 0xffff) {
- BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
- .addReg(ScratchReg)
- .addImm(0xffff & Bits).addImm(1)
- .setMIFlags(MIFlags);
- }
-
- Bits >>= 16;
- if (Bits & 0xffff) {
- BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
- .addReg(ScratchReg)
- .addImm(0xffff & Bits).addImm(2)
- .setMIFlags(MIFlags);
- }
-
- Bits >>= 16;
- if (Bits & 0xffff) {
- BuildMI(MBB, MBBI, dl, TII.get(AArch64::MOVKxii), ScratchReg)
- .addReg(ScratchReg)
- .addImm(0xffff & Bits).addImm(3)
- .setMIFlags(MIFlags);
- }
-
- // ADD DST, SRC, xTMP (, lsl #0)
- unsigned AddOp = NumBytes > 0 ? AArch64::ADDxxx_uxtx : AArch64::SUBxxx_uxtx;
- BuildMI(MBB, MBBI, dl, TII.get(AddOp), DstReg)
- .addReg(SrcReg, RegState::Kill)
- .addReg(ScratchReg, RegState::Kill)
- .addImm(0)
- .setMIFlag(MIFlags);
+
+ assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
+ "Too many target MO flags");
+ (*MI->memoperands_begin())
+ ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
+}
+
+bool
+AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
+ switch (LdSt->getOpcode()) {
+ default:
+ return false;
+ case AArch64::STRSui:
+ case AArch64::STRDui:
+ case AArch64::STRQui:
+ case AArch64::STRXui:
+ case AArch64::STRWui:
+ case AArch64::LDRSui:
+ case AArch64::LDRDui:
+ case AArch64::LDRQui:
+ case AArch64::LDRXui:
+ case AArch64::LDRWui:
+ if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
+ return false;
+ BaseReg = LdSt->getOperand(1).getReg();
+ MachineFunction &MF = *LdSt->getParent()->getParent();
+ unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
+ Offset = LdSt->getOperand(2).getImm() * Width;
+ return true;
+ };
+}
+
+/// Detect opportunities for ldp/stp formation.
+///
+/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
+bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
+ MachineInstr *SecondLdSt,
+ unsigned NumLoads) const {
+ // Only cluster up to a single pair.
+ if (NumLoads > 1)
+ return false;
+ if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
+ return false;
+ // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
+ unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
+ // Allow 6 bits of positive range.
+ if (Ofs1 > 64)
+ return false;
+ // The caller should already have ordered First/SecondLdSt by offset.
+ unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
+ return Ofs1 + 1 == Ofs2;
+}
+
+bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
+ MachineInstr *Second) const {
+ // Cyclone can fuse CMN, CMP followed by Bcc.
+
+ // FIXME: B0 can also fuse:
+ // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
+ if (Second->getOpcode() != AArch64::Bcc)
+ return false;
+ switch (First->getOpcode()) {
+ default:
+ return false;
+ case AArch64::SUBSWri:
+ case AArch64::ADDSWri:
+ case AArch64::ANDSWri:
+ case AArch64::SUBSXri:
+ case AArch64::ADDSXri:
+ case AArch64::ANDSXri:
+ return true;
+ }
+}
+
+MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx,
+ uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc DL) const {
+ MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
+ .addFrameIndex(FrameIx)
+ .addImm(0)
+ .addImm(Offset)
+ .addMetadata(MDPtr);
+ return &*MIB;
+}
+
+static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
+ unsigned Reg, unsigned SubIdx,
+ unsigned State,
+ const TargetRegisterInfo *TRI) {
+ if (!SubIdx)
+ return MIB.addReg(Reg, State);
+
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
+ return MIB.addReg(Reg, State, SubIdx);
+}
+
+static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
+ unsigned NumRegs) {
+ // We really want the positive remainder mod 32 here, that happens to be
+ // easily obtainable with a mask.
+ return ((DestReg - SrcReg) & 0x1f) < NumRegs;
+}
+
+void AArch64InstrInfo::copyPhysRegTuple(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
+ llvm::ArrayRef<unsigned> Indices) const {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register copy without NEON");
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
+ uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
+ unsigned NumRegs = Indices.size();
+
+ int SubReg = 0, End = NumRegs, Incr = 1;
+ if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
+ SubReg = NumRegs - 1;
+ End = -1;
+ Incr = -1;
+ }
+
+ for (; SubReg != End; SubReg += Incr) {
+ const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
+ AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
+ AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
+ AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
+ }
+}
+
+void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (AArch64::GPR32spRegClass.contains(DestReg) &&
+ (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+
+ if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
+ // If either operand is WSP, expand to ADD #0.
+ if (Subtarget.hasZeroCycleRegMove()) {
+ // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
+ unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
+ &AArch64::GPR64spRegClass);
+ unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
+ &AArch64::GPR64spRegClass);
+ // This instruction is reading and writing X registers. This may upset
+ // the register scavenger and machine verifier, so we need to indicate
+ // that we are reading an undefined value from SrcRegX, but a proper
+ // value from SrcReg.
+ BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
+ .addReg(SrcRegX, RegState::Undef)
+ .addImm(0)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
+ .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
+ } else {
+ BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ }
+ } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
+ BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ } else {
+ if (Subtarget.hasZeroCycleRegMove()) {
+ // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
+ unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
+ &AArch64::GPR64spRegClass);
+ unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
+ &AArch64::GPR64spRegClass);
+ // This instruction is reading and writing X registers. This may upset
+ // the register scavenger and machine verifier, so we need to indicate
+ // that we are reading an undefined value from SrcRegX, but a proper
+ // value from SrcReg.
+ BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
+ .addReg(AArch64::XZR)
+ .addReg(SrcRegX, RegState::Undef)
+ .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
+ } else {
+ // Otherwise, expand to ORR WZR.
+ BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
+ .addReg(AArch64::WZR)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+ }
return;
}
- // Now we know that the adjustment can be done in at most two add/sub
- // (immediate) instructions, which is always more efficient than a
- // literal-pool load, or even a hypothetical movz/movk/add sequence
+ if (AArch64::GPR64spRegClass.contains(DestReg) &&
+ (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
+ if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
+ // If either operand is SP, expand to ADD #0.
+ BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
+ BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ } else {
+ // Otherwise, expand to ORR XZR.
+ BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
+ .addReg(AArch64::XZR)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+ return;
+ }
- // Decide whether we're doing addition or subtraction
- unsigned LowOp, HighOp;
- if (NumBytes >= 0) {
- LowOp = AArch64::ADDxxi_lsl0_s;
- HighOp = AArch64::ADDxxi_lsl12_s;
- } else {
- LowOp = AArch64::SUBxxi_lsl0_s;
- HighOp = AArch64::SUBxxi_lsl12_s;
- NumBytes = abs64(NumBytes);
+ // Copy a DDDD register quad by copying the individual sub-registers.
+ if (AArch64::DDDDRegClass.contains(DestReg) &&
+ AArch64::DDDDRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
+ AArch64::dsub2, AArch64::dsub3 };
+ copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
+ Indices);
+ return;
}
- // If we're here, at the very least a move needs to be produced, which just
- // happens to be materializable by an ADD.
- if ((NumBytes & 0xfff) || NumBytes == 0) {
- BuildMI(MBB, MBBI, dl, TII.get(LowOp), DstReg)
- .addReg(SrcReg, RegState::Kill)
- .addImm(NumBytes & 0xfff)
- .setMIFlag(MIFlags);
+ // Copy a DDD register triple by copying the individual sub-registers.
+ if (AArch64::DDDRegClass.contains(DestReg) &&
+ AArch64::DDDRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
+ AArch64::dsub2 };
+ copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
+ Indices);
+ return;
+ }
- // Next update should use the register we've just defined.
- SrcReg = DstReg;
+ // Copy a DD register pair by copying the individual sub-registers.
+ if (AArch64::DDRegClass.contains(DestReg) &&
+ AArch64::DDRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
+ copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
+ Indices);
+ return;
}
- if (NumBytes & 0xfff000) {
- BuildMI(MBB, MBBI, dl, TII.get(HighOp), DstReg)
- .addReg(SrcReg, RegState::Kill)
- .addImm(NumBytes >> 12)
- .setMIFlag(MIFlags);
+ // Copy a QQQQ register quad by copying the individual sub-registers.
+ if (AArch64::QQQQRegClass.contains(DestReg) &&
+ AArch64::QQQQRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
+ AArch64::qsub2, AArch64::qsub3 };
+ copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
+ Indices);
+ return;
}
-}
-void llvm::emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc dl, const TargetInstrInfo &TII,
- unsigned ScratchReg, int64_t NumBytes,
- MachineInstr::MIFlag MIFlags) {
- emitRegUpdate(MBB, MI, dl, TII, AArch64::XSP, AArch64::XSP, AArch64::X16,
- NumBytes, MIFlags);
-}
+ // Copy a QQQ register triple by copying the individual sub-registers.
+ if (AArch64::QQQRegClass.contains(DestReg) &&
+ AArch64::QQQRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
+ AArch64::qsub2 };
+ copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
+ Indices);
+ return;
+ }
+ // Copy a QQ register pair by copying the individual sub-registers.
+ if (AArch64::QQRegClass.contains(DestReg) &&
+ AArch64::QQRegClass.contains(SrcReg)) {
+ static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
+ copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
+ Indices);
+ return;
+ }
-namespace {
- struct LDTLSCleanup : public MachineFunctionPass {
- static char ID;
- LDTLSCleanup() : MachineFunctionPass(ID) {}
+ if (AArch64::FPR128RegClass.contains(DestReg) &&
+ AArch64::FPR128RegClass.contains(SrcReg)) {
+ if(Subtarget.hasNEON()) {
+ BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ BuildMI(MBB, I, DL, get(AArch64::STRQpre))
+ .addReg(AArch64::SP, RegState::Define)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(AArch64::SP)
+ .addImm(-16);
+ BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
+ .addReg(AArch64::SP, RegState::Define)
+ .addReg(DestReg, RegState::Define)
+ .addReg(AArch64::SP)
+ .addImm(16);
+ }
+ return;
+ }
- virtual bool runOnMachineFunction(MachineFunction &MF) {
- AArch64MachineFunctionInfo* MFI
- = MF.getInfo<AArch64MachineFunctionInfo>();
- if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
- // No point folding accesses if there isn't at least two.
- return false;
- }
+ if (AArch64::FPR64RegClass.contains(DestReg) &&
+ AArch64::FPR64RegClass.contains(SrcReg)) {
+ if(Subtarget.hasNEON()) {
+ DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
+ &AArch64::FPR128RegClass);
+ SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
+ &AArch64::FPR128RegClass);
+ BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+ return;
+ }
- MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
- return VisitNode(DT->getRootNode(), 0);
- }
-
- // Visit the dominator subtree rooted at Node in pre-order.
- // If TLSBaseAddrReg is non-null, then use that to replace any
- // TLS_base_addr instructions. Otherwise, create the register
- // when the first such instruction is seen, and then use it
- // as we encounter more instructions.
- bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
- MachineBasicBlock *BB = Node->getBlock();
- bool Changed = false;
-
- // Traverse the current block.
- for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
- ++I) {
- switch (I->getOpcode()) {
- case AArch64::TLSDESC_BLRx:
- // Make sure it's a local dynamic access.
- if (!I->getOperand(1).isSymbol() ||
- strcmp(I->getOperand(1).getSymbolName(), "_TLS_MODULE_BASE_"))
- break;
-
- if (TLSBaseAddrReg)
- I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
- else
- I = SetRegister(I, &TLSBaseAddrReg);
- Changed = true;
- break;
- default:
- break;
- }
- }
+ if (AArch64::FPR32RegClass.contains(DestReg) &&
+ AArch64::FPR32RegClass.contains(SrcReg)) {
+ if(Subtarget.hasNEON()) {
+ DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
+ &AArch64::FPR128RegClass);
+ SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
+ &AArch64::FPR128RegClass);
+ BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+ return;
+ }
- // Visit the children of this block in the dominator tree.
- for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
- I != E; ++I) {
- Changed |= VisitNode(*I, TLSBaseAddrReg);
- }
+ if (AArch64::FPR16RegClass.contains(DestReg) &&
+ AArch64::FPR16RegClass.contains(SrcReg)) {
+ if(Subtarget.hasNEON()) {
+ DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
+ &AArch64::FPR128RegClass);
+ SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
+ &AArch64::FPR128RegClass);
+ BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
+ &AArch64::FPR32RegClass);
+ SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
+ &AArch64::FPR32RegClass);
+ BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ }
+ return;
+ }
- return Changed;
+ if (AArch64::FPR8RegClass.contains(DestReg) &&
+ AArch64::FPR8RegClass.contains(SrcReg)) {
+ if(Subtarget.hasNEON()) {
+ DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
+ &AArch64::FPR128RegClass);
+ SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
+ &AArch64::FPR128RegClass);
+ BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
+ &AArch64::FPR32RegClass);
+ SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
+ &AArch64::FPR32RegClass);
+ BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
+ return;
+ }
+
+ // Copies between GPR64 and FPR64.
+ if (AArch64::FPR64RegClass.contains(DestReg) &&
+ AArch64::GPR64RegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ if (AArch64::GPR64RegClass.contains(DestReg) &&
+ AArch64::FPR64RegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ // Copies between GPR32 and FPR32.
+ if (AArch64::FPR32RegClass.contains(DestReg) &&
+ AArch64::GPR32RegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ if (AArch64::GPR32RegClass.contains(DestReg) &&
+ AArch64::FPR32RegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
- // Replace the TLS_base_addr instruction I with a copy from
- // TLSBaseAddrReg, returning the new instruction.
- MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
- unsigned TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
- const AArch64TargetMachine *TM =
- static_cast<const AArch64TargetMachine *>(&MF->getTarget());
- const AArch64InstrInfo *TII = TM->getInstrInfo();
+ if (DestReg == AArch64::NZCV) {
+ assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
+ BuildMI(MBB, I, DL, get(AArch64::MSR))
+ .addImm(AArch64SysReg::NZCV)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
+ return;
+ }
- // Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
- // code sequence assumes the address will be.
- MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- AArch64::X0)
- .addReg(TLSBaseAddrReg);
+ if (SrcReg == AArch64::NZCV) {
+ assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
+ BuildMI(MBB, I, DL, get(AArch64::MRS))
+ .addReg(DestReg)
+ .addImm(AArch64SysReg::NZCV)
+ .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
+ return;
+ }
- // Erase the TLS_base_addr instruction.
- I->eraseFromParent();
+ llvm_unreachable("unimplemented reg-to-reg copy");
+}
- return Copy;
+void AArch64InstrInfo::storeRegToStackSlot(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
+ bool isKill, int FI, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (MBBI != MBB.end())
+ DL = MBBI->getDebugLoc();
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ unsigned Align = MFI.getObjectAlignment(FI);
+
+ MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
+ unsigned Opc = 0;
+ bool Offset = true;
+ switch (RC->getSize()) {
+ case 1:
+ if (AArch64::FPR8RegClass.hasSubClassEq(RC))
+ Opc = AArch64::STRBui;
+ break;
+ case 2:
+ if (AArch64::FPR16RegClass.hasSubClassEq(RC))
+ Opc = AArch64::STRHui;
+ break;
+ case 4:
+ if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
+ Opc = AArch64::STRWui;
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg))
+ MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
+ else
+ assert(SrcReg != AArch64::WSP);
+ } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
+ Opc = AArch64::STRSui;
+ break;
+ case 8:
+ if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
+ Opc = AArch64::STRXui;
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg))
+ MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
+ else
+ assert(SrcReg != AArch64::SP);
+ } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
+ Opc = AArch64::STRDui;
+ break;
+ case 16:
+ if (AArch64::FPR128RegClass.hasSubClassEq(RC))
+ Opc = AArch64::STRQui;
+ else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register store without NEON");
+ Opc = AArch64::ST1Twov1d, Offset = false;
+ }
+ break;
+ case 24:
+ if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register store without NEON");
+ Opc = AArch64::ST1Threev1d, Offset = false;
+ }
+ break;
+ case 32:
+ if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register store without NEON");
+ Opc = AArch64::ST1Fourv1d, Offset = false;
+ } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register store without NEON");
+ Opc = AArch64::ST1Twov2d, Offset = false;
}
+ break;
+ case 48:
+ if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register store without NEON");
+ Opc = AArch64::ST1Threev2d, Offset = false;
+ }
+ break;
+ case 64:
+ if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register store without NEON");
+ Opc = AArch64::ST1Fourv2d, Offset = false;
+ }
+ break;
+ }
+ assert(Opc && "Unknown register class");
- // Create a virtal register in *TLSBaseAddrReg, and populate it by
- // inserting a copy instruction after I. Returns the new instruction.
- MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
- const AArch64TargetMachine *TM =
- static_cast<const AArch64TargetMachine *>(&MF->getTarget());
- const AArch64InstrInfo *TII = TM->getInstrInfo();
+ const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
+ .addReg(SrcReg, getKillRegState(isKill))
+ .addFrameIndex(FI);
- // Create a virtual register for the TLS base address.
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- *TLSBaseAddrReg = RegInfo.createVirtualRegister(&AArch64::GPR64RegClass);
+ if (Offset)
+ MI.addImm(0);
+ MI.addMemOperand(MMO);
+}
- // Insert a copy from X0 to TLSBaseAddrReg for later.
- MachineInstr *Next = I->getNextNode();
- MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- *TLSBaseAddrReg)
- .addReg(AArch64::X0);
+void AArch64InstrInfo::loadRegFromStackSlot(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
+ int FI, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (MBBI != MBB.end())
+ DL = MBBI->getDebugLoc();
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ unsigned Align = MFI.getObjectAlignment(FI);
+ MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
- return Copy;
+ unsigned Opc = 0;
+ bool Offset = true;
+ switch (RC->getSize()) {
+ case 1:
+ if (AArch64::FPR8RegClass.hasSubClassEq(RC))
+ Opc = AArch64::LDRBui;
+ break;
+ case 2:
+ if (AArch64::FPR16RegClass.hasSubClassEq(RC))
+ Opc = AArch64::LDRHui;
+ break;
+ case 4:
+ if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
+ Opc = AArch64::LDRWui;
+ if (TargetRegisterInfo::isVirtualRegister(DestReg))
+ MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
+ else
+ assert(DestReg != AArch64::WSP);
+ } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
+ Opc = AArch64::LDRSui;
+ break;
+ case 8:
+ if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
+ Opc = AArch64::LDRXui;
+ if (TargetRegisterInfo::isVirtualRegister(DestReg))
+ MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
+ else
+ assert(DestReg != AArch64::SP);
+ } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
+ Opc = AArch64::LDRDui;
+ break;
+ case 16:
+ if (AArch64::FPR128RegClass.hasSubClassEq(RC))
+ Opc = AArch64::LDRQui;
+ else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register load without NEON");
+ Opc = AArch64::LD1Twov1d, Offset = false;
+ }
+ break;
+ case 24:
+ if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register load without NEON");
+ Opc = AArch64::LD1Threev1d, Offset = false;
+ }
+ break;
+ case 32:
+ if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register load without NEON");
+ Opc = AArch64::LD1Fourv1d, Offset = false;
+ } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register load without NEON");
+ Opc = AArch64::LD1Twov2d, Offset = false;
}
+ break;
+ case 48:
+ if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register load without NEON");
+ Opc = AArch64::LD1Threev2d, Offset = false;
+ }
+ break;
+ case 64:
+ if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
+ assert(Subtarget.hasNEON() &&
+ "Unexpected register load without NEON");
+ Opc = AArch64::LD1Fourv2d, Offset = false;
+ }
+ break;
+ }
+ assert(Opc && "Unknown register class");
+
+ const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
+ .addReg(DestReg, getDefRegState(true))
+ .addFrameIndex(FI);
+ if (Offset)
+ MI.addImm(0);
+ MI.addMemOperand(MMO);
+}
- virtual const char *getPassName() const {
- return "Local Dynamic TLS Access Clean-up";
+void llvm::emitFrameOffset(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg, int Offset,
+ const TargetInstrInfo *TII,
+ MachineInstr::MIFlag Flag, bool SetNZCV) {
+ if (DestReg == SrcReg && Offset == 0)
+ return;
+
+ bool isSub = Offset < 0;
+ if (isSub)
+ Offset = -Offset;
+
+ // FIXME: If the offset won't fit in 24-bits, compute the offset into a
+ // scratch register. If DestReg is a virtual register, use it as the
+ // scratch register; otherwise, create a new virtual register (to be
+ // replaced by the scavenger at the end of PEI). That case can be optimized
+ // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
+ // register can be loaded with offset%8 and the add/sub can use an extending
+ // instruction with LSL#3.
+ // Currently the function handles any offsets but generates a poor sequence
+ // of code.
+ // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
+
+ unsigned Opc;
+ if (SetNZCV)
+ Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
+ else
+ Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
+ const unsigned MaxEncoding = 0xfff;
+ const unsigned ShiftSize = 12;
+ const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
+ while (((unsigned)Offset) >= (1 << ShiftSize)) {
+ unsigned ThisVal;
+ if (((unsigned)Offset) > MaxEncodableValue) {
+ ThisVal = MaxEncodableValue;
+ } else {
+ ThisVal = Offset & MaxEncodableValue;
}
+ assert((ThisVal >> ShiftSize) <= MaxEncoding &&
+ "Encoding cannot handle value that big");
+ BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
+ .addReg(SrcReg)
+ .addImm(ThisVal >> ShiftSize)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
+ .setMIFlag(Flag);
+
+ SrcReg = DestReg;
+ Offset -= ThisVal;
+ if (Offset == 0)
+ return;
+ }
+ BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
+ .addReg(SrcReg)
+ .addImm(Offset)
+ .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
+ .setMIFlag(Flag);
+}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<MachineDominatorTree>();
- MachineFunctionPass::getAnalysisUsage(AU);
+MachineInstr *
+AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ // This is a bit of a hack. Consider this instruction:
+ //
+ // %vreg0<def> = COPY %SP; GPR64all:%vreg0
+ //
+ // We explicitly chose GPR64all for the virtual register so such a copy might
+ // be eliminated by RegisterCoalescer. However, that may not be possible, and
+ // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
+ // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
+ //
+ // To prevent that, we are going to constrain the %vreg0 register class here.
+ //
+ // <rdar://problem/11522048>
+ //
+ if (MI->isCopy()) {
+ unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ if (SrcReg == AArch64::SP &&
+ TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
+ return nullptr;
}
- };
+ if (DstReg == AArch64::SP &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
+ return nullptr;
+ }
+ }
+
+ // Cannot fold.
+ return nullptr;
}
-char LDTLSCleanup::ID = 0;
-FunctionPass*
-llvm::createAArch64CleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
+int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
+ bool *OutUseUnscaledOp,
+ unsigned *OutUnscaledOp,
+ int *EmittableOffset) {
+ int Scale = 1;
+ bool IsSigned = false;
+ // The ImmIdx should be changed case by case if it is not 2.
+ unsigned ImmIdx = 2;
+ unsigned UnscaledOp = 0;
+ // Set output values in case of early exit.
+ if (EmittableOffset)
+ *EmittableOffset = 0;
+ if (OutUseUnscaledOp)
+ *OutUseUnscaledOp = false;
+ if (OutUnscaledOp)
+ *OutUnscaledOp = 0;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
+ // Vector spills/fills can't take an immediate offset.
+ case AArch64::LD1Twov2d:
+ case AArch64::LD1Threev2d:
+ case AArch64::LD1Fourv2d:
+ case AArch64::LD1Twov1d:
+ case AArch64::LD1Threev1d:
+ case AArch64::LD1Fourv1d:
+ case AArch64::ST1Twov2d:
+ case AArch64::ST1Threev2d:
+ case AArch64::ST1Fourv2d:
+ case AArch64::ST1Twov1d:
+ case AArch64::ST1Threev1d:
+ case AArch64::ST1Fourv1d:
+ return AArch64FrameOffsetCannotUpdate;
+ case AArch64::PRFMui:
+ Scale = 8;
+ UnscaledOp = AArch64::PRFUMi;
+ break;
+ case AArch64::LDRXui:
+ Scale = 8;
+ UnscaledOp = AArch64::LDURXi;
+ break;
+ case AArch64::LDRWui:
+ Scale = 4;
+ UnscaledOp = AArch64::LDURWi;
+ break;
+ case AArch64::LDRBui:
+ Scale = 1;
+ UnscaledOp = AArch64::LDURBi;
+ break;
+ case AArch64::LDRHui:
+ Scale = 2;
+ UnscaledOp = AArch64::LDURHi;
+ break;
+ case AArch64::LDRSui:
+ Scale = 4;
+ UnscaledOp = AArch64::LDURSi;
+ break;
+ case AArch64::LDRDui:
+ Scale = 8;
+ UnscaledOp = AArch64::LDURDi;
+ break;
+ case AArch64::LDRQui:
+ Scale = 16;
+ UnscaledOp = AArch64::LDURQi;
+ break;
+ case AArch64::LDRBBui:
+ Scale = 1;
+ UnscaledOp = AArch64::LDURBBi;
+ break;
+ case AArch64::LDRHHui:
+ Scale = 2;
+ UnscaledOp = AArch64::LDURHHi;
+ break;
+ case AArch64::LDRSBXui:
+ Scale = 1;
+ UnscaledOp = AArch64::LDURSBXi;
+ break;
+ case AArch64::LDRSBWui:
+ Scale = 1;
+ UnscaledOp = AArch64::LDURSBWi;
+ break;
+ case AArch64::LDRSHXui:
+ Scale = 2;
+ UnscaledOp = AArch64::LDURSHXi;
+ break;
+ case AArch64::LDRSHWui:
+ Scale = 2;
+ UnscaledOp = AArch64::LDURSHWi;
+ break;
+ case AArch64::LDRSWui:
+ Scale = 4;
+ UnscaledOp = AArch64::LDURSWi;
+ break;
+
+ case AArch64::STRXui:
+ Scale = 8;
+ UnscaledOp = AArch64::STURXi;
+ break;
+ case AArch64::STRWui:
+ Scale = 4;
+ UnscaledOp = AArch64::STURWi;
+ break;
+ case AArch64::STRBui:
+ Scale = 1;
+ UnscaledOp = AArch64::STURBi;
+ break;
+ case AArch64::STRHui:
+ Scale = 2;
+ UnscaledOp = AArch64::STURHi;
+ break;
+ case AArch64::STRSui:
+ Scale = 4;
+ UnscaledOp = AArch64::STURSi;
+ break;
+ case AArch64::STRDui:
+ Scale = 8;
+ UnscaledOp = AArch64::STURDi;
+ break;
+ case AArch64::STRQui:
+ Scale = 16;
+ UnscaledOp = AArch64::STURQi;
+ break;
+ case AArch64::STRBBui:
+ Scale = 1;
+ UnscaledOp = AArch64::STURBBi;
+ break;
+ case AArch64::STRHHui:
+ Scale = 2;
+ UnscaledOp = AArch64::STURHHi;
+ break;
+
+ case AArch64::LDPXi:
+ case AArch64::LDPDi:
+ case AArch64::STPXi:
+ case AArch64::STPDi:
+ IsSigned = true;
+ Scale = 8;
+ break;
+ case AArch64::LDPQi:
+ case AArch64::STPQi:
+ IsSigned = true;
+ Scale = 16;
+ break;
+ case AArch64::LDPWi:
+ case AArch64::LDPSi:
+ case AArch64::STPWi:
+ case AArch64::STPSi:
+ IsSigned = true;
+ Scale = 4;
+ break;
+
+ case AArch64::LDURXi:
+ case AArch64::LDURWi:
+ case AArch64::LDURBi:
+ case AArch64::LDURHi:
+ case AArch64::LDURSi:
+ case AArch64::LDURDi:
+ case AArch64::LDURQi:
+ case AArch64::LDURHHi:
+ case AArch64::LDURBBi:
+ case AArch64::LDURSBXi:
+ case AArch64::LDURSBWi:
+ case AArch64::LDURSHXi:
+ case AArch64::LDURSHWi:
+ case AArch64::LDURSWi:
+ case AArch64::STURXi:
+ case AArch64::STURWi:
+ case AArch64::STURBi:
+ case AArch64::STURHi:
+ case AArch64::STURSi:
+ case AArch64::STURDi:
+ case AArch64::STURQi:
+ case AArch64::STURBBi:
+ case AArch64::STURHHi:
+ Scale = 1;
+ break;
+ }
+
+ Offset += MI.getOperand(ImmIdx).getImm() * Scale;
+
+ bool useUnscaledOp = false;
+ // If the offset doesn't match the scale, we rewrite the instruction to
+ // use the unscaled instruction instead. Likewise, if we have a negative
+ // offset (and have an unscaled op to use).
+ if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
+ useUnscaledOp = true;
+
+ // Use an unscaled addressing mode if the instruction has a negative offset
+ // (or if the instruction is already using an unscaled addressing mode).
+ unsigned MaskBits;
+ if (IsSigned) {
+ // ldp/stp instructions.
+ MaskBits = 7;
+ Offset /= Scale;
+ } else if (UnscaledOp == 0 || useUnscaledOp) {
+ MaskBits = 9;
+ IsSigned = true;
+ Scale = 1;
+ } else {
+ MaskBits = 12;
+ IsSigned = false;
+ Offset /= Scale;
+ }
+
+ // Attempt to fold address computation.
+ int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
+ int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
+ if (Offset >= MinOff && Offset <= MaxOff) {
+ if (EmittableOffset)
+ *EmittableOffset = Offset;
+ Offset = 0;
+ } else {
+ int NewOff = Offset < 0 ? MinOff : MaxOff;
+ if (EmittableOffset)
+ *EmittableOffset = NewOff;
+ Offset = (Offset - NewOff) * Scale;
+ }
+ if (OutUseUnscaledOp)
+ *OutUseUnscaledOp = useUnscaledOp;
+ if (OutUnscaledOp)
+ *OutUnscaledOp = UnscaledOp;
+ return AArch64FrameOffsetCanUpdate |
+ (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
+}
+
+bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
+ unsigned FrameReg, int &Offset,
+ const AArch64InstrInfo *TII) {
+ unsigned Opcode = MI.getOpcode();
+ unsigned ImmIdx = FrameRegIdx + 1;
+
+ if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
+ Offset += MI.getOperand(ImmIdx).getImm();
+ emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
+ MI.getOperand(0).getReg(), FrameReg, Offset, TII,
+ MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
+ MI.eraseFromParent();
+ Offset = 0;
+ return true;
+ }
+
+ int NewOffset;
+ unsigned UnscaledOp;
+ bool UseUnscaledOp;
+ int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
+ &UnscaledOp, &NewOffset);
+ if (Status & AArch64FrameOffsetCanUpdate) {
+ if (Status & AArch64FrameOffsetIsLegal)
+ // Replace the FrameIndex with FrameReg.
+ MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
+ if (UseUnscaledOp)
+ MI.setDesc(TII->get(UnscaledOp));
+
+ MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
+ return Offset == 0;
+ }
+
+ return false;
+}
+
+void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
+ NopInst.setOpcode(AArch64::HINT);
+ NopInst.addOperand(MCOperand::CreateImm(0));
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 620ecc9..f70b82b 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -11,11 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_AARCH64INSTRINFO_H
-#define LLVM_TARGET_AARCH64INSTRINFO_H
+#ifndef LLVM_TARGET_AArch64INSTRINFO_H
+#define LLVM_TARGET_AArch64INSTRINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
+#include "AArch64.h"
#include "AArch64RegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "AArch64GenInstrInfo.inc"
@@ -23,86 +24,206 @@
namespace llvm {
class AArch64Subtarget;
+class AArch64TargetMachine;
class AArch64InstrInfo : public AArch64GenInstrInfo {
+ // Reserve bits in the MachineMemOperand target hint flags, starting at 1.
+ // They will be shifted into MOTargetHintStart when accessed.
+ enum TargetMemOperandFlags {
+ MOSuppressPair = 1
+ };
+
const AArch64RegisterInfo RI;
const AArch64Subtarget &Subtarget;
+
public:
- explicit AArch64InstrInfo(const AArch64Subtarget &TM);
+ explicit AArch64InstrInfo(const AArch64Subtarget &STI);
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
- ///
- const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
+
+ unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+
+ bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &DstReg, unsigned &SubIdx) const override;
+
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+
+ /// Returns true if there is a shiftable register and that the shift value
+ /// is non-zero.
+ bool hasShiftedReg(const MachineInstr *MI) const;
+
+ /// Returns true if there is an extendable register and that the extending
+ /// value is non-zero.
+ bool hasExtendedReg(const MachineInstr *MI) const;
+
+ /// \brief Does this instruction set its full destination register to zero?
+ bool isGPRZero(const MachineInstr *MI) const;
+
+ /// \brief Does this instruction rename a GPR without modifying bits?
+ bool isGPRCopy(const MachineInstr *MI) const;
+
+ /// \brief Does this instruction rename an FPR without modifying bits?
+ bool isFPRCopy(const MachineInstr *MI) const;
+
+ /// Return true if this is load/store scales or extends its register offset.
+ /// This refers to scaling a dynamic index as opposed to scaled immediates.
+ /// MI should be a memory op that allows scaled addressing.
+ bool isScaledAddr(const MachineInstr *MI) const;
+
+ /// Return true if pairing the given load or store is hinted to be
+ /// unprofitable.
+ bool isLdStPairSuppressed(const MachineInstr *MI) const;
+
+ /// Hint that pairing the given load or store is unprofitable.
+ void suppressLdStPair(MachineInstr *MI) const;
+
+ bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
- const AArch64Subtarget &getSubTarget() const { return Subtarget; }
+ bool enableClusterLoads() const override { return true; }
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ bool shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt,
+ unsigned NumLoads) const override;
+
+ bool shouldScheduleAdjacent(MachineInstr *First,
+ MachineInstr *Second) const override;
+
+ MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
+ uint64_t Offset, const MDNode *MDPtr,
+ DebugLoc DL) const;
+ void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc, unsigned Opcode,
+ llvm::ArrayRef<unsigned> Indices) const;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
+ MachineBasicBlock::iterator MBBI, unsigned SrcReg,
+ bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
+
void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MBBI, unsigned DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify = false) const;
+ bool AllowModify = false) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
- unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
-
- /// Look through the instructions in this function and work out the largest
- /// the stack frame can be while maintaining the ability to address local
- /// slots with no complexities.
- unsigned estimateRSStackLimit(MachineFunction &MF) const;
-
- /// getAddressConstraints - For loads and stores (and PRFMs) taking an
- /// immediate offset, this function determines the constraints required for
- /// the immediate. It must satisfy:
- /// + MinOffset <= imm <= MaxOffset
- /// + imm % OffsetScale == 0
- void getAddressConstraints(const MachineInstr &MI, int &AccessScale,
- int &MinOffset, int &MaxOffset) const;
-
-
- unsigned getInstSizeInBytes(const MachineInstr &MI) const;
-
- unsigned getInstBundleLength(const MachineInstr &MI) const;
-
+ DebugLoc DL) const override;
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+ bool canInsertSelect(const MachineBasicBlock &,
+ const SmallVectorImpl<MachineOperand> &Cond, unsigned,
+ unsigned, int &, int &, int &) const override;
+ void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ DebugLoc DL, unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const override;
+ void getNoopForMachoTarget(MCInst &NopInst) const override;
+
+ /// analyzeCompare - For a comparison instruction, return the source registers
+ /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
+ /// Return true if the comparison instruction can be analyzed.
+ bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const override;
+ /// optimizeCompareInstr - Convert the instruction supplying the argument to
+ /// the comparison into one that sets the zero bit in the flags register.
+ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const override;
+
+private:
+ void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
+ MachineBasicBlock *TBB,
+ const SmallVectorImpl<MachineOperand> &Cond) const;
};
-bool rewriteA64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int &Offset,
- const AArch64InstrInfo &TII);
-
+/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
+/// plus Offset. This is intended to be used from within the prolog/epilog
+/// insertion (PEI) pass, where a virtual scratch register may be allocated
+/// if necessary, to be replaced by the scavenger at the end of PEI.
+void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg, int Offset,
+ const TargetInstrInfo *TII,
+ MachineInstr::MIFlag = MachineInstr::NoFlags,
+ bool SetNZCV = false);
+
+/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
+/// FP. Return false if the offset could not be handled directly in MI, and
+/// return the left-over portion by reference.
+bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
+ unsigned FrameReg, int &Offset,
+ const AArch64InstrInfo *TII);
+
+/// \brief Use to report the frame offset status in isAArch64FrameOffsetLegal.
+enum AArch64FrameOffsetStatus {
+ AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
+ AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
+ AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
+};
-void emitRegUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc dl, const TargetInstrInfo &TII,
- unsigned DstReg, unsigned SrcReg, unsigned ScratchReg,
- int64_t NumBytes,
- MachineInstr::MIFlag MIFlags = MachineInstr::NoFlags);
+/// \brief Check if the @p Offset is a valid frame offset for @p MI.
+/// The returned value reports the validity of the frame offset for @p MI.
+/// It uses the values defined by AArch64FrameOffsetStatus for that.
+/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
+/// use an offset.eq
+/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
+/// rewriten in @p MI.
+/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
+/// amount that is off the limit of the legal offset.
+/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
+/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
+/// If set, @p EmittableOffset contains the amount that can be set in @p MI
+/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
+/// is a legal offset.
+int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
+ bool *OutUseUnscaledOp = nullptr,
+ unsigned *OutUnscaledOp = nullptr,
+ int *EmittableOffset = nullptr);
+
+static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
+
+static inline bool isCondBranchOpcode(int Opc) {
+ switch (Opc) {
+ case AArch64::Bcc:
+ case AArch64::CBZW:
+ case AArch64::CBZX:
+ case AArch64::CBNZW:
+ case AArch64::CBNZX:
+ case AArch64::TBZW:
+ case AArch64::TBZX:
+ case AArch64::TBNZW:
+ case AArch64::TBNZX:
+ return true;
+ default:
+ return false;
+ }
+}
-void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc dl, const TargetInstrInfo &TII,
- unsigned ScratchReg, int64_t NumBytes,
- MachineInstr::MIFlag MIFlags = MachineInstr::NoFlags);
+static inline bool isIndirectBranchOpcode(int Opc) { return Opc == AArch64::BR; }
-}
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 8e5a4d3..0ba069e 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -1,4 +1,4 @@
-//===----- AArch64InstrInfo.td - AArch64 Instruction Info ----*- tablegen -*-=//
+//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file describes the AArch64 scalar instructions in TableGen format.
+// AArch64 Instruction definitions.
//
//===----------------------------------------------------------------------===//
@@ -19,5176 +19,5272 @@ def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
def HasNEON : Predicate<"Subtarget->hasNEON()">,
AssemblerPredicate<"FeatureNEON", "neon">;
def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
- AssemblerPredicate<"FeatureCrypto","crypto">;
-
-// Use fused MAC if more precision in FP computation is allowed.
-def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
- " FPOpFusion::Fast)">;
-include "AArch64InstrFormats.td"
+ AssemblerPredicate<"FeatureCrypto", "crypto">;
+def HasCRC : Predicate<"Subtarget->hasCRC()">,
+ AssemblerPredicate<"FeatureCRC", "crc">;
+def IsLE : Predicate<"Subtarget->isLittleEndian()">;
+def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
//===----------------------------------------------------------------------===//
-// Target-specific ISD nodes and profiles
-//===----------------------------------------------------------------------===//
-
-def SDT_A64ret : SDTypeProfile<0, 0, []>;
-def A64ret : SDNode<"AArch64ISD::Ret", SDT_A64ret, [SDNPHasChain,
- SDNPOptInGlue,
- SDNPVariadic]>;
-
-// (ins NZCV, Condition, Dest)
-def SDT_A64br_cc : SDTypeProfile<0, 3, [SDTCisVT<0, i32>]>;
-def A64br_cc : SDNode<"AArch64ISD::BR_CC", SDT_A64br_cc, [SDNPHasChain]>;
-
-// (outs Result), (ins NZCV, IfTrue, IfFalse, Condition)
-def SDT_A64select_cc : SDTypeProfile<1, 4, [SDTCisVT<1, i32>,
- SDTCisSameAs<0, 2>,
- SDTCisSameAs<2, 3>]>;
-def A64select_cc : SDNode<"AArch64ISD::SELECT_CC", SDT_A64select_cc>;
-
-// (outs NZCV), (ins LHS, RHS, Condition)
-def SDT_A64setcc : SDTypeProfile<1, 3, [SDTCisVT<0, i32>,
- SDTCisSameAs<1, 2>]>;
-def A64setcc : SDNode<"AArch64ISD::SETCC", SDT_A64setcc>;
-
-
-// (outs GPR64), (ins)
-def A64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
-
-// A64 compares don't care about the cond really (they set all flags) so a
-// simple binary operator is useful.
-def A64cmp : PatFrag<(ops node:$lhs, node:$rhs),
- (A64setcc node:$lhs, node:$rhs, cond)>;
-
-
-// When matching a notional (CMP op1, (sub 0, op2)), we'd like to use a CMN
-// instruction on the grounds that "op1 - (-op2) == op1 + op2". However, the C
-// and V flags can be set differently by this operation. It comes down to
-// whether "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are
-// then everything is fine. If not then the optimization is wrong. Thus general
-// comparisons are only valid if op2 != 0.
-
-// So, finally, the only LLVM-native comparisons that don't mention C and V are
-// SETEQ and SETNE. They're the only ones we can safely use CMN for in the
-// absence of information about op2.
-def equality_cond : PatLeaf<(cond), [{
- return N->get() == ISD::SETEQ || N->get() == ISD::SETNE;
-}]>;
-
-def A64cmn : PatFrag<(ops node:$lhs, node:$rhs),
- (A64setcc node:$lhs, (sub 0, node:$rhs), equality_cond)>;
-
-// There are two layers of indirection here, driven by the following
-// considerations.
-// + TableGen does not know CodeModel or Reloc so that decision should be
-// made for a variable/address at ISelLowering.
-// + The output of ISelLowering should be selectable (hence the Wrapper,
-// rather than a bare target opcode)
-def SDTAArch64WrapperLarge : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>,
- SDTCisSameAs<0, 3>,
- SDTCisSameAs<0, 4>,
- SDTCisPtrTy<0>]>;
-
-def A64WrapperLarge :SDNode<"AArch64ISD::WrapperLarge", SDTAArch64WrapperLarge>;
-
-def SDTAArch64WrapperSmall : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
- SDTCisSameAs<1, 2>,
- SDTCisVT<3, i32>,
- SDTCisPtrTy<0>]>;
-
-def A64WrapperSmall :SDNode<"AArch64ISD::WrapperSmall", SDTAArch64WrapperSmall>;
-
-
-def SDTAArch64GOTLoad : SDTypeProfile<1, 1, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
-def A64GOTLoad : SDNode<"AArch64ISD::GOTLoad", SDTAArch64GOTLoad,
- [SDNPHasChain]>;
-
-
-// (A64BFI LHS, RHS, LSB, Width)
-def SDTA64BFI : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
- SDTCisSameAs<1, 2>,
- SDTCisVT<3, i64>,
- SDTCisVT<4, i64>]>;
-
-def A64Bfi : SDNode<"AArch64ISD::BFI", SDTA64BFI>;
-
-// (A64EXTR HiReg, LoReg, LSB)
-def SDTA64EXTR : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
- SDTCisVT<3, i64>]>;
-def A64Extr : SDNode<"AArch64ISD::EXTR", SDTA64EXTR>;
-
-// (A64[SU]BFX Field, ImmR, ImmS).
+// AArch64-specific DAG Nodes.
//
-// Note that ImmR and ImmS are already encoded for the actual instructions. The
-// more natural LSB and Width mix together to form ImmR and ImmS, something
-// which TableGen can't handle.
-def SDTA64BFX : SDTypeProfile<1, 3, [SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
-def A64Sbfx : SDNode<"AArch64ISD::SBFX", SDTA64BFX>;
-def A64Ubfx : SDNode<"AArch64ISD::UBFX", SDTA64BFX>;
+// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
+def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
+ [SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>,
+ SDTCisInt<0>, SDTCisVT<1, i32>]>;
+
+// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
+def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisInt<0>,
+ SDTCisVT<3, i32>]>;
+
+// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
+def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
+ [SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>,
+ SDTCisInt<0>,
+ SDTCisVT<1, i32>,
+ SDTCisVT<4, i32>]>;
+
+def SDT_AArch64Brcond : SDTypeProfile<0, 3,
+ [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
+ SDTCisVT<2, i32>]>;
+def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
+def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisVT<2, OtherVT>]>;
+
+
+def SDT_AArch64CSel : SDTypeProfile<1, 4,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisInt<3>,
+ SDTCisVT<4, i32>]>;
+def SDT_AArch64FCmp : SDTypeProfile<0, 2,
+ [SDTCisFP<0>,
+ SDTCisSameAs<0, 1>]>;
+def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
+def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
+def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
+ SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>]>;
+def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
+def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisInt<2>, SDTCisInt<3>]>;
+def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
+def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>, SDTCisInt<3>]>;
+def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
+
+def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
+def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
+def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
+def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>;
+def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>,
+ SDTCisSameAs<0,3>]>;
+def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
+def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
+
+def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
+
+def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>]>;
+def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
+ [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
+ SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
+ SDTCisSameAs<1, 4>]>;
+
+
+// Node definitions.
+def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
+def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
+def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
+def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
+ SDCallSeqStart<[ SDTCisVT<0, i32> ]>,
+ [SDNPHasChain, SDNPOutGlue]>;
+def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END",
+ SDCallSeqEnd<[ SDTCisVT<0, i32>,
+ SDTCisVT<1, i32> ]>,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def AArch64call : SDNode<"AArch64ISD::CALL",
+ SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
+ [SDNPHasChain]>;
+def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
+ [SDNPHasChain]>;
+def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
+ [SDNPHasChain]>;
+def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
+ [SDNPHasChain]>;
+def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
+ [SDNPHasChain]>;
+
+
+def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
+def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
+def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
+def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
+def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >;
+def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>;
+def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut,
+ [SDNPCommutative]>;
+def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
+def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
+ [SDNPCommutative]>;
+def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
+def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
+
+def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
+
+def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
+
+def AArch64fmax : SDNode<"AArch64ISD::FMAX", SDTFPBinOp>;
+def AArch64fmin : SDNode<"AArch64ISD::FMIN", SDTFPBinOp>;
+
+def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
+def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
+def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
+def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
+def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
+
+def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
+def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
+def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
+def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
+def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
+def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
+
+def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
+def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
+def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
+def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
+def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
+def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
+def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
+
+def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
+def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
+def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
+def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
+
+def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
+def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
+def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
+def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
+def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
+def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
+def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
+def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
+
+def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
+def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
+def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
+
+def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
+def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
+def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
+def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
+def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
+
+def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
+def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
+def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
+
+def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
+def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
+def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
+def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
+def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
+def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
+ (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
+
+def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
+def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
+def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
+def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
+def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
+
+def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
+def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
+
+def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
+
+def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
+def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
+def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
+
+def AArch64tlsdesc_call : SDNode<"AArch64ISD::TLSDESC_CALL",
+ SDT_AArch64TLSDescCall,
+ [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
+ SDNPVariadic]>;
+
+def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
+ SDT_AArch64WrapperLarge>;
-class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
//===----------------------------------------------------------------------===//
-// Call sequence pseudo-instructions
-//===----------------------------------------------------------------------===//
-
-
-def SDT_AArch64Call : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
-def AArch64Call : SDNode<"AArch64ISD::Call", SDT_AArch64Call,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;
-def AArch64tcret : SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64Call,
- [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+//===----------------------------------------------------------------------===//
-// The TLSDESCCALL node is a variant call which goes to an indirectly calculated
-// destination but needs a relocation against a fixed symbol. As such it has two
-// certain operands: the callee and the relocated variable.
+// AArch64 Instruction Predicate Definitions.
//
-// The TLS ABI only allows it to be selected to a BLR instructin (with
-// appropriate relocation).
-def SDTTLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
-
-def A64tlsdesc_blr : SDNode<"AArch64ISD::TLSDESCCALL", SDTTLSDescCall,
- [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
- SDNPVariadic]>;
-
-
-def SDT_AArch64CallSeqStart : SDCallSeqStart<[ SDTCisPtrTy<0> ]>;
-def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AArch64CallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
-
-def SDT_AArch64CallSeqEnd : SDCallSeqEnd<[ SDTCisPtrTy<0>, SDTCisPtrTy<1> ]>;
-def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AArch64CallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-
-
-
-// These pseudo-instructions have special semantics by virtue of being passed to
-// the InstrInfo constructor. CALLSEQ_START/CALLSEQ_END are produced by
-// LowerCall to (in our case) tell the back-end about stack adjustments for
-// arguments passed on the stack. Here we select those markers to
-// pseudo-instructions which explicitly set the stack, and finally in the
-// RegisterInfo we convert them to a true stack adjustment.
-let Defs = [XSP], Uses = [XSP] in {
- def ADJCALLSTACKDOWN : PseudoInst<(outs), (ins i64imm:$amt),
- [(AArch64callseq_start timm:$amt)]>;
+def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
+def NoZCZ : Predicate<"!Subtarget->hasZeroCycleZeroing()">;
+def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
+def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
+def ForCodeSize : Predicate<"ForCodeSize">;
+def NotForCodeSize : Predicate<"!ForCodeSize">;
- def ADJCALLSTACKUP : PseudoInst<(outs), (ins i64imm:$amt1, i64imm:$amt2),
- [(AArch64callseq_end timm:$amt1, timm:$amt2)]>;
-}
+include "AArch64InstrFormats.td"
//===----------------------------------------------------------------------===//
-// Atomic operation pseudo-instructions
-//===----------------------------------------------------------------------===//
-
-// These get selected from C++ code as a pretty much direct translation from the
-// generic DAG nodes. The one exception is the AtomicOrdering is added as an
-// operand so that the eventual lowering can make use of it and choose
-// acquire/release operations when required.
-
-let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1 in {
-multiclass AtomicSizes {
- def _I8 : PseudoInst<(outs GPR32:$dst),
- (ins GPR64xsp:$ptr, GPR32:$incr, i32imm:$ordering), []>;
- def _I16 : PseudoInst<(outs GPR32:$dst),
- (ins GPR64xsp:$ptr, GPR32:$incr, i32imm:$ordering), []>;
- def _I32 : PseudoInst<(outs GPR32:$dst),
- (ins GPR64xsp:$ptr, GPR32:$incr, i32imm:$ordering), []>;
- def _I64 : PseudoInst<(outs GPR64:$dst),
- (ins GPR64xsp:$ptr, GPR64:$incr, i32imm:$ordering), []>;
-}
-}
-
-defm ATOMIC_LOAD_ADD : AtomicSizes;
-defm ATOMIC_LOAD_SUB : AtomicSizes;
-defm ATOMIC_LOAD_AND : AtomicSizes;
-defm ATOMIC_LOAD_OR : AtomicSizes;
-defm ATOMIC_LOAD_XOR : AtomicSizes;
-defm ATOMIC_LOAD_NAND : AtomicSizes;
-defm ATOMIC_SWAP : AtomicSizes;
-let Defs = [NZCV] in {
- // These operations need a CMP to calculate the correct value
- defm ATOMIC_LOAD_MIN : AtomicSizes;
- defm ATOMIC_LOAD_MAX : AtomicSizes;
- defm ATOMIC_LOAD_UMIN : AtomicSizes;
- defm ATOMIC_LOAD_UMAX : AtomicSizes;
-}
-
-class AtomicCmpSwap<RegisterClass GPRData>
- : PseudoInst<(outs GPRData:$dst),
- (ins GPR64xsp:$ptr, GPRData:$old, GPRData:$new,
- i32imm:$ordering), []> {
- let usesCustomInserter = 1;
- let hasCtrlDep = 1;
- let mayLoad = 1;
- let mayStore = 1;
- let Defs = [NZCV];
-}
-
-def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<GPR32>;
-def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<GPR32>;
-def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<GPR32>;
-def ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap<GPR64>;
//===----------------------------------------------------------------------===//
-// Add-subtract (extended register) instructions
+// Miscellaneous instructions.
//===----------------------------------------------------------------------===//
-// Contains: ADD, ADDS, SUB, SUBS + aliases CMN, CMP
-
-// The RHS of these operations is conceptually a sign/zero-extended
-// register, optionally shifted left by 1-4. The extension can be a
-// NOP (e.g. "sxtx" sign-extending a 64-bit register to 64-bits) but
-// must be specified with one exception:
-
-// If one of the registers is sp/wsp then LSL is an alias for UXTW in
-// 32-bit instructions and UXTX in 64-bit versions, the shift amount
-// is not optional in that case (but can explicitly be 0), and the
-// entire suffix can be skipped (e.g. "add sp, x3, x2").
-
-multiclass extend_operands<string PREFIX, string Diag> {
- def _asmoperand : AsmOperandClass {
- let Name = PREFIX;
- let RenderMethod = "addRegExtendOperands";
- let PredicateMethod = "isRegExtend<A64SE::" # PREFIX # ">";
- let DiagnosticType = "AddSubRegExtend" # Diag;
- }
-
- def _operand : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 4; }]> {
- let PrintMethod = "printRegExtendOperand<A64SE::" # PREFIX # ">";
- let DecoderMethod = "DecodeRegExtendOperand";
- let ParserMatchClass = !cast<AsmOperandClass>(PREFIX # "_asmoperand");
- }
-}
-defm UXTB : extend_operands<"UXTB", "Small">;
-defm UXTH : extend_operands<"UXTH", "Small">;
-defm UXTW : extend_operands<"UXTW", "Small">;
-defm UXTX : extend_operands<"UXTX", "Large">;
-defm SXTB : extend_operands<"SXTB", "Small">;
-defm SXTH : extend_operands<"SXTH", "Small">;
-defm SXTW : extend_operands<"SXTW", "Small">;
-defm SXTX : extend_operands<"SXTX", "Large">;
-
-def LSL_extasmoperand : AsmOperandClass {
- let Name = "RegExtendLSL";
- let RenderMethod = "addRegExtendOperands";
- let DiagnosticType = "AddSubRegExtendLarge";
-}
-
-def LSL_extoperand : Operand<i64> {
- let ParserMatchClass = LSL_extasmoperand;
-}
-
-
-// The patterns for various sign-extensions are a little ugly and
-// non-uniform because everything has already been promoted to the
-// legal i64 and i32 types. We'll wrap the various variants up in a
-// class for use later.
-class extend_types {
- dag uxtb; dag uxth; dag uxtw; dag uxtx;
- dag sxtb; dag sxth; dag sxtw; dag sxtx;
- ValueType ty;
- RegisterClass GPR;
-}
-
-def extends_to_i64 : extend_types {
- let uxtb = (and (anyext i32:$Rm), 255);
- let uxth = (and (anyext i32:$Rm), 65535);
- let uxtw = (zext i32:$Rm);
- let uxtx = (i64 $Rm);
-
- let sxtb = (sext_inreg (anyext i32:$Rm), i8);
- let sxth = (sext_inreg (anyext i32:$Rm), i16);
- let sxtw = (sext i32:$Rm);
- let sxtx = (i64 $Rm);
-
- let ty = i64;
- let GPR = GPR64xsp;
-}
-
-
-def extends_to_i32 : extend_types {
- let uxtb = (and i32:$Rm, 255);
- let uxth = (and i32:$Rm, 65535);
- let uxtw = (i32 i32:$Rm);
- let uxtx = (i32 i32:$Rm);
-
- let sxtb = (sext_inreg i32:$Rm, i8);
- let sxth = (sext_inreg i32:$Rm, i16);
- let sxtw = (i32 i32:$Rm);
- let sxtx = (i32 i32:$Rm);
-
- let ty = i32;
- let GPR = GPR32wsp;
-}
-
-// Now, six of the extensions supported are easy and uniform: if the source size
-// is 32-bits or less, then Rm is always a 32-bit register. We'll instantiate
-// those instructions in one block.
-
-// The uxtx/sxtx could potentially be merged in, but three facts dissuaded me:
-// + It would break the naming scheme: either ADDxx_uxtx or ADDww_uxtx would
-// be impossible.
-// + Patterns are very different as well.
-// + Passing different registers would be ugly (more fields in extend_types
-// would probably be the best option).
-multiclass addsub_exts<bit sf, bit op, bit S, string asmop,
- SDPatternOperator opfrag,
- dag outs, extend_types exts> {
- def w_uxtb : A64I_addsubext<sf, op, S, 0b00, 0b000,
- outs, (ins exts.GPR:$Rn, GPR32:$Rm, UXTB_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag exts.ty:$Rn, (shl exts.uxtb, UXTB_operand:$Imm3))],
- NoItinerary>;
- def w_uxth : A64I_addsubext<sf, op, S, 0b00, 0b001,
- outs, (ins exts.GPR:$Rn, GPR32:$Rm, UXTH_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag exts.ty:$Rn, (shl exts.uxth, UXTH_operand:$Imm3))],
- NoItinerary>;
- def w_uxtw : A64I_addsubext<sf, op, S, 0b00, 0b010,
- outs, (ins exts.GPR:$Rn, GPR32:$Rm, UXTW_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag exts.ty:$Rn, (shl exts.uxtw, UXTW_operand:$Imm3))],
- NoItinerary>;
-
- def w_sxtb : A64I_addsubext<sf, op, S, 0b00, 0b100,
- outs, (ins exts.GPR:$Rn, GPR32:$Rm, SXTB_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag exts.ty:$Rn, (shl exts.sxtb, SXTB_operand:$Imm3))],
- NoItinerary>;
- def w_sxth : A64I_addsubext<sf, op, S, 0b00, 0b101,
- outs, (ins exts.GPR:$Rn, GPR32:$Rm, SXTH_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag exts.ty:$Rn, (shl exts.sxth, SXTH_operand:$Imm3))],
- NoItinerary>;
- def w_sxtw : A64I_addsubext<sf, op, S, 0b00, 0b110,
- outs, (ins exts.GPR:$Rn, GPR32:$Rm, SXTW_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag exts.ty:$Rn, (shl exts.sxtw, SXTW_operand:$Imm3))],
- NoItinerary>;
-}
-
-// These two could be merge in with the above, but their patterns aren't really
-// necessary and the naming-scheme would necessarily break:
-multiclass addsub_xxtx<bit op, bit S, string asmop, SDPatternOperator opfrag,
- dag outs> {
- def x_uxtx : A64I_addsubext<0b1, op, S, 0b00, 0b011,
- outs,
- (ins GPR64xsp:$Rn, GPR64:$Rm, UXTX_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [(opfrag i64:$Rn, (shl i64:$Rm, UXTX_operand:$Imm3))],
- NoItinerary>;
-
- def x_sxtx : A64I_addsubext<0b1, op, S, 0b00, 0b111,
- outs,
- (ins GPR64xsp:$Rn, GPR64:$Rm, SXTX_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [/* No Pattern: same as uxtx */],
- NoItinerary>;
-}
-
-multiclass addsub_wxtx<bit op, bit S, string asmop, dag outs> {
- def w_uxtx : A64I_addsubext<0b0, op, S, 0b00, 0b011,
- outs,
- (ins GPR32wsp:$Rn, GPR32:$Rm, UXTX_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [/* No pattern: probably same as uxtw */],
- NoItinerary>;
-
- def w_sxtx : A64I_addsubext<0b0, op, S, 0b00, 0b111,
- outs,
- (ins GPR32wsp:$Rn, GPR32:$Rm, SXTX_operand:$Imm3),
- !strconcat(asmop, "$Rn, $Rm, $Imm3"),
- [/* No Pattern: probably same as uxtw */],
- NoItinerary>;
-}
-
-class SetRD<RegisterClass RC, SDPatternOperator op>
- : PatFrag<(ops node:$lhs, node:$rhs), (set RC:$Rd, (op node:$lhs, node:$rhs))>;
-class SetNZCV<SDPatternOperator op>
- : PatFrag<(ops node:$lhs, node:$rhs), (set NZCV, (op node:$lhs, node:$rhs))>;
-
-defm ADDxx :addsub_exts<0b1, 0b0, 0b0, "add\t$Rd, ", SetRD<GPR64xsp, add>,
- (outs GPR64xsp:$Rd), extends_to_i64>,
- addsub_xxtx< 0b0, 0b0, "add\t$Rd, ", SetRD<GPR64xsp, add>,
- (outs GPR64xsp:$Rd)>;
-defm ADDww :addsub_exts<0b0, 0b0, 0b0, "add\t$Rd, ", SetRD<GPR32wsp, add>,
- (outs GPR32wsp:$Rd), extends_to_i32>,
- addsub_wxtx< 0b0, 0b0, "add\t$Rd, ",
- (outs GPR32wsp:$Rd)>;
-defm SUBxx :addsub_exts<0b1, 0b1, 0b0, "sub\t$Rd, ", SetRD<GPR64xsp, sub>,
- (outs GPR64xsp:$Rd), extends_to_i64>,
- addsub_xxtx< 0b1, 0b0, "sub\t$Rd, ", SetRD<GPR64xsp, sub>,
- (outs GPR64xsp:$Rd)>;
-defm SUBww :addsub_exts<0b0, 0b1, 0b0, "sub\t$Rd, ", SetRD<GPR32wsp, sub>,
- (outs GPR32wsp:$Rd), extends_to_i32>,
- addsub_wxtx< 0b1, 0b0, "sub\t$Rd, ",
- (outs GPR32wsp:$Rd)>;
-
-let Defs = [NZCV] in {
-defm ADDSxx :addsub_exts<0b1, 0b0, 0b1, "adds\t$Rd, ", SetRD<GPR64, addc>,
- (outs GPR64:$Rd), extends_to_i64>,
- addsub_xxtx< 0b0, 0b1, "adds\t$Rd, ", SetRD<GPR64, addc>,
- (outs GPR64:$Rd)>;
-defm ADDSww :addsub_exts<0b0, 0b0, 0b1, "adds\t$Rd, ", SetRD<GPR32, addc>,
- (outs GPR32:$Rd), extends_to_i32>,
- addsub_wxtx< 0b0, 0b1, "adds\t$Rd, ",
- (outs GPR32:$Rd)>;
-defm SUBSxx :addsub_exts<0b1, 0b1, 0b1, "subs\t$Rd, ", SetRD<GPR64, subc>,
- (outs GPR64:$Rd), extends_to_i64>,
- addsub_xxtx< 0b1, 0b1, "subs\t$Rd, ", SetRD<GPR64, subc>,
- (outs GPR64:$Rd)>;
-defm SUBSww :addsub_exts<0b0, 0b1, 0b1, "subs\t$Rd, ", SetRD<GPR32, subc>,
- (outs GPR32:$Rd), extends_to_i32>,
- addsub_wxtx< 0b1, 0b1, "subs\t$Rd, ",
- (outs GPR32:$Rd)>;
-
-
-let Rd = 0b11111, isCompare = 1 in {
-defm CMNx : addsub_exts<0b1, 0b0, 0b1, "cmn\t", SetNZCV<A64cmn>,
- (outs), extends_to_i64>,
- addsub_xxtx< 0b0, 0b1, "cmn\t", SetNZCV<A64cmn>, (outs)>;
-defm CMNw : addsub_exts<0b0, 0b0, 0b1, "cmn\t", SetNZCV<A64cmn>,
- (outs), extends_to_i32>,
- addsub_wxtx< 0b0, 0b1, "cmn\t", (outs)>;
-defm CMPx : addsub_exts<0b1, 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>,
- (outs), extends_to_i64>,
- addsub_xxtx< 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>, (outs)>;
-defm CMPw : addsub_exts<0b0, 0b1, 0b1, "cmp\t", SetNZCV<A64cmp>,
- (outs), extends_to_i32>,
- addsub_wxtx< 0b1, 0b1, "cmp\t", (outs)>;
-}
-}
-
-// Now patterns for the operation without a shift being needed. No patterns are
-// created for uxtx/sxtx since they're non-uniform and it's expected that
-// add/sub (shifted register) will handle those cases anyway.
-multiclass addsubext_noshift_patterns<string prefix, SDPatternOperator nodeop,
- extend_types exts> {
- def : Pat<(nodeop exts.ty:$Rn, exts.uxtb),
- (!cast<Instruction>(prefix # "w_uxtb") $Rn, $Rm, 0)>;
- def : Pat<(nodeop exts.ty:$Rn, exts.uxth),
- (!cast<Instruction>(prefix # "w_uxth") $Rn, $Rm, 0)>;
- def : Pat<(nodeop exts.ty:$Rn, exts.uxtw),
- (!cast<Instruction>(prefix # "w_uxtw") $Rn, $Rm, 0)>;
-
- def : Pat<(nodeop exts.ty:$Rn, exts.sxtb),
- (!cast<Instruction>(prefix # "w_sxtb") $Rn, $Rm, 0)>;
- def : Pat<(nodeop exts.ty:$Rn, exts.sxth),
- (!cast<Instruction>(prefix # "w_sxth") $Rn, $Rm, 0)>;
- def : Pat<(nodeop exts.ty:$Rn, exts.sxtw),
- (!cast<Instruction>(prefix # "w_sxtw") $Rn, $Rm, 0)>;
-}
-
-defm : addsubext_noshift_patterns<"ADDxx", add, extends_to_i64>;
-defm : addsubext_noshift_patterns<"ADDww", add, extends_to_i32>;
-defm : addsubext_noshift_patterns<"SUBxx", sub, extends_to_i64>;
-defm : addsubext_noshift_patterns<"SUBww", sub, extends_to_i32>;
-
-defm : addsubext_noshift_patterns<"CMNx", A64cmn, extends_to_i64>;
-defm : addsubext_noshift_patterns<"CMNw", A64cmn, extends_to_i32>;
-defm : addsubext_noshift_patterns<"CMPx", A64cmp, extends_to_i64>;
-defm : addsubext_noshift_patterns<"CMPw", A64cmp, extends_to_i32>;
-
-// An extend of "lsl #imm" is valid if and only if one of Rn and Rd is
-// sp/wsp. It is synonymous with uxtx/uxtw depending on the size of the
-// operation. Also permitted in this case is complete omission of the argument,
-// which implies "lsl #0".
-multiclass lsl_aliases<string asmop, Instruction inst, RegisterClass GPR_Rd,
- RegisterClass GPR_Rn, RegisterClass GPR_Rm> {
- def : InstAlias<!strconcat(asmop, " $Rd, $Rn, $Rm"),
- (inst GPR_Rd:$Rd, GPR_Rn:$Rn, GPR_Rm:$Rm, 0)>;
-
- def : InstAlias<!strconcat(asmop, " $Rd, $Rn, $Rm, $LSL"),
- (inst GPR_Rd:$Rd, GPR_Rn:$Rn, GPR_Rm:$Rm, LSL_extoperand:$LSL)>;
-
-}
-
-defm : lsl_aliases<"add", ADDxxx_uxtx, Rxsp, GPR64xsp, GPR64>;
-defm : lsl_aliases<"add", ADDxxx_uxtx, GPR64xsp, Rxsp, GPR64>;
-defm : lsl_aliases<"add", ADDwww_uxtw, Rwsp, GPR32wsp, GPR32>;
-defm : lsl_aliases<"add", ADDwww_uxtw, GPR32wsp, Rwsp, GPR32>;
-defm : lsl_aliases<"sub", SUBxxx_uxtx, Rxsp, GPR64xsp, GPR64>;
-defm : lsl_aliases<"sub", SUBxxx_uxtx, GPR64xsp, Rxsp, GPR64>;
-defm : lsl_aliases<"sub", SUBwww_uxtw, Rwsp, GPR32wsp, GPR32>;
-defm : lsl_aliases<"sub", SUBwww_uxtw, GPR32wsp, Rwsp, GPR32>;
-
-// Rd cannot be sp for flag-setting variants so only half of the aliases are
-// needed.
-defm : lsl_aliases<"adds", ADDSxxx_uxtx, GPR64, Rxsp, GPR64>;
-defm : lsl_aliases<"adds", ADDSwww_uxtw, GPR32, Rwsp, GPR32>;
-defm : lsl_aliases<"subs", SUBSxxx_uxtx, GPR64, Rxsp, GPR64>;
-defm : lsl_aliases<"subs", SUBSwww_uxtw, GPR32, Rwsp, GPR32>;
-
-// CMP unfortunately has to be different because the instruction doesn't have a
-// dest register.
-multiclass cmp_lsl_aliases<string asmop, Instruction inst,
- RegisterClass GPR_Rn, RegisterClass GPR_Rm> {
- def : InstAlias<!strconcat(asmop, " $Rn, $Rm"),
- (inst GPR_Rn:$Rn, GPR_Rm:$Rm, 0)>;
-
- def : InstAlias<!strconcat(asmop, " $Rn, $Rm, $LSL"),
- (inst GPR_Rn:$Rn, GPR_Rm:$Rm, LSL_extoperand:$LSL)>;
-}
-
-defm : cmp_lsl_aliases<"cmp", CMPxx_uxtx, Rxsp, GPR64>;
-defm : cmp_lsl_aliases<"cmp", CMPww_uxtw, Rwsp, GPR32>;
-defm : cmp_lsl_aliases<"cmn", CMNxx_uxtx, Rxsp, GPR64>;
-defm : cmp_lsl_aliases<"cmn", CMNww_uxtw, Rwsp, GPR32>;
+let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
+ [(AArch64callseq_start timm:$amt)]>;
+def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ [(AArch64callseq_end timm:$amt1, timm:$amt2)]>;
+} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
+
+let isReMaterializable = 1, isCodeGenOnly = 1 in {
+// FIXME: The following pseudo instructions are only needed because remat
+// cannot handle multiple instructions. When that changes, they can be
+// removed, along with the AArch64Wrapper node.
+
+let AddedComplexity = 10 in
+def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
+ [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
+ Sched<[WriteLDAdr]>;
+
+// The MOVaddr instruction should match only when the add is not folded
+// into a load or store address.
+def MOVaddr
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
+ [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
+ tglobaladdr:$low))]>,
+ Sched<[WriteAdrAdr]>;
+def MOVaddrJT
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
+ [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
+ tjumptable:$low))]>,
+ Sched<[WriteAdrAdr]>;
+def MOVaddrCP
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
+ [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
+ tconstpool:$low))]>,
+ Sched<[WriteAdrAdr]>;
+def MOVaddrBA
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
+ [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
+ tblockaddress:$low))]>,
+ Sched<[WriteAdrAdr]>;
+def MOVaddrTLS
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
+ [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
+ tglobaltlsaddr:$low))]>,
+ Sched<[WriteAdrAdr]>;
+def MOVaddrEXT
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
+ [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
+ texternalsym:$low))]>,
+ Sched<[WriteAdrAdr]>;
+
+} // isReMaterializable, isCodeGenOnly
+
+def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
+ (LOADgot tglobaltlsaddr:$addr)>;
+
+def : Pat<(AArch64LOADgot texternalsym:$addr),
+ (LOADgot texternalsym:$addr)>;
+
+def : Pat<(AArch64LOADgot tconstpool:$addr),
+ (LOADgot tconstpool:$addr)>;
//===----------------------------------------------------------------------===//
-// Add-subtract (immediate) instructions
+// System instructions.
//===----------------------------------------------------------------------===//
-// Contains: ADD, ADDS, SUB, SUBS + aliases CMN, CMP, MOV
-
-// These instructions accept a 12-bit unsigned immediate, optionally shifted
-// left by 12 bits. Official assembly format specifies a 12 bit immediate with
-// one of "", "LSL #0", "LSL #12" supplementary operands.
-
-// There are surprisingly few ways to make this work with TableGen, so this
-// implementation has separate instructions for the "LSL #0" and "LSL #12"
-// variants.
-
-// If the MCInst retained a single combined immediate (which could be 0x123000,
-// for example) then both components (imm & shift) would have to be delegated to
-// a single assembly operand. This would entail a separate operand parser
-// (because the LSL would have to live in the same AArch64Operand as the
-// immediate to be accessible); assembly parsing is rather complex and
-// error-prone C++ code.
-//
-// By splitting the immediate, we can delegate handling this optional operand to
-// an InstAlias. Supporting functions to generate the correct MCInst are still
-// required, but these are essentially trivial and parsing can remain generic.
-//
-// Rejected plans with rationale:
-// ------------------------------
-//
-// In an ideal world you'de have two first class immediate operands (in
-// InOperandList, specifying imm12 and shift). Unfortunately this is not
-// selectable by any means I could discover.
-//
-// An Instruction with two MCOperands hidden behind a single entry in
-// InOperandList (expanded by ComplexPatterns and MIOperandInfo) was functional,
-// but required more C++ code to handle encoding/decoding. Parsing (the intended
-// main beneficiary) ended up equally complex because of the optional nature of
-// "LSL #0".
-//
-// Attempting to circumvent the need for a custom OperandParser above by giving
-// InstAliases without the "lsl #0" failed. add/sub could be accommodated but
-// the cmp/cmn aliases didn't use the MIOperandInfo to determine how operands
-// should be parsed: there was no way to accommodate an "lsl #12".
-
-let ParserMethod = "ParseImmWithLSLOperand",
- RenderMethod = "addImmWithLSLOperands" in {
- // Derived PredicateMethod fields are different for each
- def addsubimm_lsl0_asmoperand : AsmOperandClass {
- let Name = "AddSubImmLSL0";
- // If an error is reported against this operand, instruction could also be a
- // register variant.
- let DiagnosticType = "AddSubSecondSource";
- }
-
- def addsubimm_lsl12_asmoperand : AsmOperandClass {
- let Name = "AddSubImmLSL12";
- let DiagnosticType = "AddSubSecondSource";
- }
-}
-
-def shr_12_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getSExtValue() >> 12, MVT::i32);
-}]>;
-
-def shr_12_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((-N->getSExtValue()) >> 12, MVT::i32);
-}]>;
-def neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-N->getSExtValue(), MVT::i32);
-}]>;
+def HINT : HintI<"hint">;
+def : InstAlias<"nop", (HINT 0b000)>;
+def : InstAlias<"yield",(HINT 0b001)>;
+def : InstAlias<"wfe", (HINT 0b010)>;
+def : InstAlias<"wfi", (HINT 0b011)>;
+def : InstAlias<"sev", (HINT 0b100)>;
+def : InstAlias<"sevl", (HINT 0b101)>;
+// As far as LLVM is concerned this writes to the system's exclusive monitors.
+let mayLoad = 1, mayStore = 1 in
+def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
-multiclass addsub_imm_operands<ValueType ty> {
- let PrintMethod = "printAddSubImmLSL0Operand",
- EncoderMethod = "getAddSubImmOpValue",
- ParserMatchClass = addsubimm_lsl0_asmoperand in {
- def _posimm_lsl0 : Operand<ty>,
- ImmLeaf<ty, [{ return Imm >= 0 && (Imm & ~0xfff) == 0; }]>;
- def _negimm_lsl0 : Operand<ty>,
- ImmLeaf<ty, [{ return Imm < 0 && (-Imm & ~0xfff) == 0; }],
- neg_XFORM>;
- }
+// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
+// model patterns with sufficiently fine granularity.
+let mayLoad = ?, mayStore = ? in {
+def DMB : CRmSystemI<barrier_op, 0b101, "dmb",
+ [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
- let PrintMethod = "printAddSubImmLSL12Operand",
- EncoderMethod = "getAddSubImmOpValue",
- ParserMatchClass = addsubimm_lsl12_asmoperand in {
- def _posimm_lsl12 : Operand<ty>,
- ImmLeaf<ty, [{ return Imm >= 0 && (Imm & ~0xfff000) == 0; }],
- shr_12_XFORM>;
+def DSB : CRmSystemI<barrier_op, 0b100, "dsb",
+ [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
- def _negimm_lsl12 : Operand<ty>,
- ImmLeaf<ty, [{ return Imm < 0 && (-Imm & ~0xfff000) == 0; }],
- shr_12_neg_XFORM>;
- }
+def ISB : CRmSystemI<barrier_op, 0b110, "isb",
+ [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
}
-// The add operands don't need any transformation
-defm addsubimm_operand_i32 : addsub_imm_operands<i32>;
-defm addsubimm_operand_i64 : addsub_imm_operands<i64>;
-
-multiclass addsubimm_varieties<string prefix, bit sf, bit op, bits<2> shift,
- string asmop, string cmpasmop,
- Operand imm_operand, Operand cmp_imm_operand,
- RegisterClass GPR, RegisterClass GPRsp,
- AArch64Reg ZR, ValueType Ty> {
- // All registers for non-S variants allow SP
- def _s : A64I_addsubimm<sf, op, 0b0, shift,
- (outs GPRsp:$Rd),
- (ins GPRsp:$Rn, imm_operand:$Imm12),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm12"),
- [(set Ty:$Rd, (add Ty:$Rn, imm_operand:$Imm12))],
- NoItinerary>;
-
-
- // S variants can read SP but would write to ZR
- def _S : A64I_addsubimm<sf, op, 0b1, shift,
- (outs GPR:$Rd),
- (ins GPRsp:$Rn, imm_operand:$Imm12),
- !strconcat(asmop, "s\t$Rd, $Rn, $Imm12"),
- [(set Ty:$Rd, (addc Ty:$Rn, imm_operand:$Imm12))],
- NoItinerary> {
- let Defs = [NZCV];
- }
+def : InstAlias<"clrex", (CLREX 0xf)>;
+def : InstAlias<"isb", (ISB 0xf)>;
- // Note that the pattern here for ADDS is subtle. Canonically CMP
- // a, b becomes SUBS a, b. If b < 0 then this is equivalent to
- // ADDS a, (-b). This is not true in general.
- def _cmp : A64I_addsubimm<sf, op, 0b1, shift,
- (outs), (ins GPRsp:$Rn, imm_operand:$Imm12),
- !strconcat(cmpasmop, " $Rn, $Imm12"),
- [(set NZCV,
- (A64cmp Ty:$Rn, cmp_imm_operand:$Imm12))],
- NoItinerary> {
- let Rd = 0b11111;
- let Defs = [NZCV];
- let isCompare = 1;
- }
-}
+def MRS : MRSI;
+def MSR : MSRI;
+def MSRpstate: MSRpstateI;
+// The thread pointer (on Linux, at least, where this has been implemented) is
+// TPIDR_EL0.
+def : Pat<(AArch64threadpointer), (MRS 0xde82)>;
-multiclass addsubimm_shifts<string prefix, bit sf, bit op,
- string asmop, string cmpasmop, string operand, string cmpoperand,
- RegisterClass GPR, RegisterClass GPRsp, AArch64Reg ZR,
- ValueType Ty> {
- defm _lsl0 : addsubimm_varieties<prefix # "_lsl0", sf, op, 0b00,
- asmop, cmpasmop,
- !cast<Operand>(operand # "_lsl0"),
- !cast<Operand>(cmpoperand # "_lsl0"),
- GPR, GPRsp, ZR, Ty>;
-
- defm _lsl12 : addsubimm_varieties<prefix # "_lsl12", sf, op, 0b01,
- asmop, cmpasmop,
- !cast<Operand>(operand # "_lsl12"),
- !cast<Operand>(cmpoperand # "_lsl12"),
- GPR, GPRsp, ZR, Ty>;
-}
+// Generic system instructions
+def SYSxt : SystemXtI<0, "sys">;
+def SYSLxt : SystemLXtI<1, "sysl">;
-defm ADDwwi : addsubimm_shifts<"ADDwi", 0b0, 0b0, "add", "cmn",
- "addsubimm_operand_i32_posimm",
- "addsubimm_operand_i32_negimm",
- GPR32, GPR32wsp, WZR, i32>;
-defm ADDxxi : addsubimm_shifts<"ADDxi", 0b1, 0b0, "add", "cmn",
- "addsubimm_operand_i64_posimm",
- "addsubimm_operand_i64_negimm",
- GPR64, GPR64xsp, XZR, i64>;
-defm SUBwwi : addsubimm_shifts<"SUBwi", 0b0, 0b1, "sub", "cmp",
- "addsubimm_operand_i32_negimm",
- "addsubimm_operand_i32_posimm",
- GPR32, GPR32wsp, WZR, i32>;
-defm SUBxxi : addsubimm_shifts<"SUBxi", 0b1, 0b1, "sub", "cmp",
- "addsubimm_operand_i64_negimm",
- "addsubimm_operand_i64_posimm",
- GPR64, GPR64xsp, XZR, i64>;
-
-multiclass MOVsp<RegisterClass GPRsp, RegisterClass SP, Instruction addop> {
- def _fromsp : InstAlias<"mov $Rd, $Rn",
- (addop GPRsp:$Rd, SP:$Rn, 0),
- 0b1>;
-
- def _tosp : InstAlias<"mov $Rd, $Rn",
- (addop SP:$Rd, GPRsp:$Rn, 0),
- 0b1>;
-}
-
-// Recall Rxsp is a RegisterClass containing *just* xsp.
-defm MOVxx : MOVsp<GPR64xsp, Rxsp, ADDxxi_lsl0_s>;
-defm MOVww : MOVsp<GPR32wsp, Rwsp, ADDwwi_lsl0_s>;
+def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
+ (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
+ sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
//===----------------------------------------------------------------------===//
-// Add-subtract (shifted register) instructions
+// Move immediate instructions.
//===----------------------------------------------------------------------===//
-// Contains: ADD, ADDS, SUB, SUBS + aliases CMN, CMP, NEG, NEGS
-
-//===-------------------------------
-// 1. The "shifed register" operands. Shared with logical insts.
-//===-------------------------------
-
-multiclass shift_operands<string prefix, string form> {
- def _asmoperand_i32 : AsmOperandClass {
- let Name = "Shift" # form # "i32";
- let RenderMethod = "addShiftOperands";
- let PredicateMethod = "isShift<A64SE::" # form # ", false>";
- let DiagnosticType = "AddSubRegShift32";
- }
-
- // Note that the operand type is intentionally i64 because the DAGCombiner
- // puts these into a canonical form.
- def _i32 : Operand<i64>, ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 31; }]> {
- let ParserMatchClass
- = !cast<AsmOperandClass>(prefix # "_asmoperand_i32");
- let PrintMethod = "printShiftOperand<A64SE::" # form # ">";
- let DecoderMethod = "Decode32BitShiftOperand";
- }
-
- def _asmoperand_i64 : AsmOperandClass {
- let Name = "Shift" # form # "i64";
- let RenderMethod = "addShiftOperands";
- let PredicateMethod = "isShift<A64SE::" # form # ", true>";
- let DiagnosticType = "AddSubRegShift64";
- }
-
- def _i64 : Operand<i64>, ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 63; }]> {
- let ParserMatchClass
- = !cast<AsmOperandClass>(prefix # "_asmoperand_i64");
- let PrintMethod = "printShiftOperand<A64SE::" # form # ">";
- }
-}
-defm lsl_operand : shift_operands<"lsl_operand", "LSL">;
-defm lsr_operand : shift_operands<"lsr_operand", "LSR">;
-defm asr_operand : shift_operands<"asr_operand", "ASR">;
-
-// Not used for add/sub, but defined here for completeness. The "logical
-// (shifted register)" instructions *do* have an ROR variant.
-defm ror_operand : shift_operands<"ror_operand", "ROR">;
-
-//===-------------------------------
-// 2. The basic 3.5-operand ADD/SUB/ADDS/SUBS instructions.
-//===-------------------------------
-
-// N.b. the commutable parameter is just !N. It will be first against the wall
-// when the revolution comes.
-multiclass addsub_shifts<string prefix, bit sf, bit op, bit s, bit commutable,
- string asmop, SDPatternOperator opfrag, ValueType ty,
- RegisterClass GPR, list<Register> defs> {
- let isCommutable = commutable, Defs = defs in {
- def _lsl : A64I_addsubshift<sf, op, s, 0b00,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set GPR:$Rd, (opfrag ty:$Rn, (shl ty:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _lsr : A64I_addsubshift<sf, op, s, 0b01,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set ty:$Rd, (opfrag ty:$Rn, (srl ty:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _asr : A64I_addsubshift<sf, op, s, 0b10,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set ty:$Rd, (opfrag ty:$Rn, (sra ty:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
- }
+defm MOVK : InsertImmediate<0b11, "movk">;
+defm MOVN : MoveImmediate<0b00, "movn">;
- def _noshift
- : InstAlias<!strconcat(asmop, " $Rd, $Rn, $Rm"),
- (!cast<Instruction>(prefix # "_lsl") GPR:$Rd, GPR:$Rn,
- GPR:$Rm, 0)>;
+let PostEncoderMethod = "fixMOVZ" in
+defm MOVZ : MoveImmediate<0b10, "movz">;
- def : Pat<(opfrag ty:$Rn, ty:$Rm),
- (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>;
-}
+// First group of aliases covers an implicit "lsl #0".
+def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
+def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
+def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
+def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
+def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
+def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
-multiclass addsub_sizes<string prefix, bit op, bit s, bit commutable,
- string asmop, SDPatternOperator opfrag,
- list<Register> defs> {
- defm xxx : addsub_shifts<prefix # "xxx", 0b1, op, s,
- commutable, asmop, opfrag, i64, GPR64, defs>;
- defm www : addsub_shifts<prefix # "www", 0b0, op, s,
- commutable, asmop, opfrag, i32, GPR32, defs>;
-}
+// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
+def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
+def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
+def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
+def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
+def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
+def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
+def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
+def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
-defm ADD : addsub_sizes<"ADD", 0b0, 0b0, 0b1, "add", add, []>;
-defm SUB : addsub_sizes<"SUB", 0b1, 0b0, 0b0, "sub", sub, []>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0)>;
-defm ADDS : addsub_sizes<"ADDS", 0b0, 0b1, 0b1, "adds", addc, [NZCV]>;
-defm SUBS : addsub_sizes<"SUBS", 0b1, 0b1, 0b0, "subs", subc, [NZCV]>;
+def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
+def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
-//===-------------------------------
-// 1. The NEG/NEGS aliases
-//===-------------------------------
+def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
+def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
-multiclass neg_alias<Instruction INST, RegisterClass GPR, Register ZR,
- ValueType ty, Operand shift_operand, SDNode shiftop> {
- def : InstAlias<"neg $Rd, $Rm, $Imm6",
- (INST GPR:$Rd, ZR, GPR:$Rm, shift_operand:$Imm6)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
- def : Pat<(sub 0, (shiftop ty:$Rm, shift_operand:$Imm6)),
- (INST ZR, $Rm, shift_operand:$Imm6)>;
-}
-
-defm : neg_alias<SUBwww_lsl, GPR32, WZR, i32, lsl_operand_i32, shl>;
-defm : neg_alias<SUBwww_lsr, GPR32, WZR, i32, lsr_operand_i32, srl>;
-defm : neg_alias<SUBwww_asr, GPR32, WZR, i32, asr_operand_i32, sra>;
-def : InstAlias<"neg $Rd, $Rm", (SUBwww_lsl GPR32:$Rd, WZR, GPR32:$Rm, 0)>;
-def : Pat<(sub 0, i32:$Rm), (SUBwww_lsl WZR, $Rm, 0)>;
-
-defm : neg_alias<SUBxxx_lsl, GPR64, XZR, i64, lsl_operand_i64, shl>;
-defm : neg_alias<SUBxxx_lsr, GPR64, XZR, i64, lsr_operand_i64, srl>;
-defm : neg_alias<SUBxxx_asr, GPR64, XZR, i64, asr_operand_i64, sra>;
-def : InstAlias<"neg $Rd, $Rm", (SUBxxx_lsl GPR64:$Rd, XZR, GPR64:$Rm, 0)>;
-def : Pat<(sub 0, i64:$Rm), (SUBxxx_lsl XZR, $Rm, 0)>;
-
-// NEGS doesn't get any patterns yet: defining multiple outputs means C++ has to
-// be involved.
-class negs_alias<Instruction INST, RegisterClass GPR,
- Register ZR, Operand shift_operand, SDNode shiftop>
- : InstAlias<"negs $Rd, $Rm, $Imm6",
- (INST GPR:$Rd, ZR, GPR:$Rm, shift_operand:$Imm6)>;
-
-def : negs_alias<SUBSwww_lsl, GPR32, WZR, lsl_operand_i32, shl>;
-def : negs_alias<SUBSwww_lsr, GPR32, WZR, lsr_operand_i32, srl>;
-def : negs_alias<SUBSwww_asr, GPR32, WZR, asr_operand_i32, sra>;
-def : InstAlias<"negs $Rd, $Rm", (SUBSwww_lsl GPR32:$Rd, WZR, GPR32:$Rm, 0)>;
-
-def : negs_alias<SUBSxxx_lsl, GPR64, XZR, lsl_operand_i64, shl>;
-def : negs_alias<SUBSxxx_lsr, GPR64, XZR, lsr_operand_i64, srl>;
-def : negs_alias<SUBSxxx_asr, GPR64, XZR, asr_operand_i64, sra>;
-def : InstAlias<"negs $Rd, $Rm", (SUBSxxx_lsl GPR64:$Rd, XZR, GPR64:$Rm, 0)>;
-
-//===-------------------------------
-// 1. The CMP/CMN aliases
-//===-------------------------------
-
-multiclass cmp_shifts<string prefix, bit sf, bit op, bit commutable,
- string asmop, SDPatternOperator opfrag, ValueType ty,
- RegisterClass GPR> {
- let isCommutable = commutable, Rd = 0b11111, Defs = [NZCV] in {
- def _lsl : A64I_addsubshift<sf, op, 0b1, 0b00,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rn, $Rm, $Imm6"),
- [(set NZCV, (opfrag ty:$Rn, (shl ty:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _lsr : A64I_addsubshift<sf, op, 0b1, 0b01,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rn, $Rm, $Imm6"),
- [(set NZCV, (opfrag ty:$Rn, (srl ty:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _asr : A64I_addsubshift<sf, op, 0b1, 0b10,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rn, $Rm, $Imm6"),
- [(set NZCV, (opfrag ty:$Rn, (sra ty:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
+// Final group of aliases covers true "mov $Rd, $imm" cases.
+multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
+ int width, int shift> {
+ def _asmoperand : AsmOperandClass {
+ let Name = basename # width # "_lsl" # shift # "MovAlias";
+ let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
+ # shift # ">";
+ let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
}
- def _noshift
- : InstAlias<!strconcat(asmop, " $Rn, $Rm"),
- (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>;
-
- def : Pat<(opfrag ty:$Rn, ty:$Rm),
- (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>;
-}
-
-defm CMPww : cmp_shifts<"CMPww", 0b0, 0b1, 0b0, "cmp", A64cmp, i32, GPR32>;
-defm CMPxx : cmp_shifts<"CMPxx", 0b1, 0b1, 0b0, "cmp", A64cmp, i64, GPR64>;
-
-defm CMNww : cmp_shifts<"CMNww", 0b0, 0b0, 0b1, "cmn", A64cmn, i32, GPR32>;
-defm CMNxx : cmp_shifts<"CMNxx", 0b1, 0b0, 0b1, "cmn", A64cmn, i64, GPR64>;
-
-//===----------------------------------------------------------------------===//
-// Add-subtract (with carry) instructions
-//===----------------------------------------------------------------------===//
-// Contains: ADC, ADCS, SBC, SBCS + aliases NGC, NGCS
-
-multiclass A64I_addsubcarrySizes<bit op, bit s, string asmop> {
- let Uses = [NZCV] in {
- def www : A64I_addsubcarry<0b0, op, s, 0b000000,
- (outs GPR32:$Rd), (ins GPR32:$Rn, GPR32:$Rm),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- [], NoItinerary>;
-
- def xxx : A64I_addsubcarry<0b1, op, s, 0b000000,
- (outs GPR64:$Rd), (ins GPR64:$Rn, GPR64:$Rm),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- [], NoItinerary>;
+ def _movimm : Operand<i32> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
}
-}
-
-let isCommutable = 1 in {
- defm ADC : A64I_addsubcarrySizes<0b0, 0b0, "adc">;
-}
-defm SBC : A64I_addsubcarrySizes<0b1, 0b0, "sbc">;
-
-let Defs = [NZCV] in {
- let isCommutable = 1 in {
- defm ADCS : A64I_addsubcarrySizes<0b0, 0b1, "adcs">;
- }
+ def : InstAlias<"mov $Rd, $imm",
+ (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
+}
+
+defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
+defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
+
+defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
+defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
+defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
+defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
+
+defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
+defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
+
+defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
+defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
+defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
+defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
+
+let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
+ isAsCheapAsAMove = 1 in {
+// FIXME: The following pseudo instructions are only needed because remat
+// cannot handle multiple instructions. When that changes, we can select
+// directly to the real instructions and get rid of these pseudos.
+
+def MOVi32imm
+ : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
+ [(set GPR32:$dst, imm:$src)]>,
+ Sched<[WriteImm]>;
+def MOVi64imm
+ : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
+ [(set GPR64:$dst, imm:$src)]>,
+ Sched<[WriteImm]>;
+} // isReMaterializable, isCodeGenOnly
+
+// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
+// eventual expansion code fewer bits to worry about getting right. Marshalling
+// the types is a little tricky though:
+def i64imm_32bit : ImmLeaf<i64, [{
+ return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
+}]>;
- defm SBCS : A64I_addsubcarrySizes<0b1, 0b1, "sbcs">;
-}
+def trunc_imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i32);
+}]>;
-def : InstAlias<"ngc $Rd, $Rm", (SBCwww GPR32:$Rd, WZR, GPR32:$Rm)>;
-def : InstAlias<"ngc $Rd, $Rm", (SBCxxx GPR64:$Rd, XZR, GPR64:$Rm)>;
-def : InstAlias<"ngcs $Rd, $Rm", (SBCSwww GPR32:$Rd, WZR, GPR32:$Rm)>;
-def : InstAlias<"ngcs $Rd, $Rm", (SBCSxxx GPR64:$Rd, XZR, GPR64:$Rm)>;
+def : Pat<(i64 i64imm_32bit:$src),
+ (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
+
+// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
+// sequences.
+def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
+ tglobaladdr:$g1, tglobaladdr:$g0),
+ (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g3, 48),
+ tglobaladdr:$g2, 32),
+ tglobaladdr:$g1, 16),
+ tglobaladdr:$g0, 0)>;
+
+def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
+ tblockaddress:$g1, tblockaddress:$g0),
+ (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g3, 48),
+ tblockaddress:$g2, 32),
+ tblockaddress:$g1, 16),
+ tblockaddress:$g0, 0)>;
+
+def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
+ tconstpool:$g1, tconstpool:$g0),
+ (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g3, 48),
+ tconstpool:$g2, 32),
+ tconstpool:$g1, 16),
+ tconstpool:$g0, 0)>;
+
+def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
+ tjumptable:$g1, tjumptable:$g0),
+ (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g3, 48),
+ tjumptable:$g2, 32),
+ tjumptable:$g1, 16),
+ tjumptable:$g0, 0)>;
-// Note that adde and sube can form a chain longer than two (e.g. for 256-bit
-// addition). So the flag-setting instructions are appropriate.
-def : Pat<(adde i32:$Rn, i32:$Rm), (ADCSwww $Rn, $Rm)>;
-def : Pat<(adde i64:$Rn, i64:$Rm), (ADCSxxx $Rn, $Rm)>;
-def : Pat<(sube i32:$Rn, i32:$Rm), (SBCSwww $Rn, $Rm)>;
-def : Pat<(sube i64:$Rn, i64:$Rm), (SBCSxxx $Rn, $Rm)>;
//===----------------------------------------------------------------------===//
-// Bitfield
+// Arithmetic instructions.
//===----------------------------------------------------------------------===//
-// Contains: SBFM, BFM, UBFM, [SU]XT[BHW], ASR, LSR, LSL, SBFI[ZX], BFI, BFXIL,
-// UBFIZ, UBFX
-
-// Because of the rather complicated nearly-overlapping aliases, the decoding of
-// this range of instructions is handled manually. The architectural
-// instructions are BFM, SBFM and UBFM but a disassembler should never produce
-// these.
-//
-// In the end, the best option was to use BFM instructions for decoding under
-// almost all circumstances, but to create aliasing *Instructions* for each of
-// the canonical forms and specify a completely custom decoder which would
-// substitute the correct MCInst as needed.
-//
-// This also simplifies instruction selection, parsing etc because the MCInsts
-// have a shape that's closer to their use in code.
-
-//===-------------------------------
-// 1. The architectural BFM instructions
-//===-------------------------------
-
-def uimm5_asmoperand : AsmOperandClass {
- let Name = "UImm5";
- let PredicateMethod = "isUImm<5>";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "UImm5";
-}
-
-def uimm6_asmoperand : AsmOperandClass {
- let Name = "UImm6";
- let PredicateMethod = "isUImm<6>";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "UImm6";
-}
-
-def bitfield32_imm : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm < 32; }]> {
- let ParserMatchClass = uimm5_asmoperand;
-
- let DecoderMethod = "DecodeBitfield32ImmOperand";
-}
-
-
-def bitfield64_imm : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm < 64; }]> {
- let ParserMatchClass = uimm6_asmoperand;
-
- // Default decoder works in 64-bit case: the 6-bit field can take any value.
-}
-
-multiclass A64I_bitfieldSizes<bits<2> opc, string asmop> {
- def wwii : A64I_bitfield<0b0, opc, 0b0, (outs GPR32:$Rd),
- (ins GPR32:$Rn, bitfield32_imm:$ImmR, bitfield32_imm:$ImmS),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"),
- [], NoItinerary> {
- let DecoderMethod = "DecodeBitfieldInstruction";
- }
-
- def xxii : A64I_bitfield<0b1, opc, 0b1, (outs GPR64:$Rd),
- (ins GPR64:$Rn, bitfield64_imm:$ImmR, bitfield64_imm:$ImmS),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"),
- [], NoItinerary> {
- let DecoderMethod = "DecodeBitfieldInstruction";
- }
-}
-
-defm SBFM : A64I_bitfieldSizes<0b00, "sbfm">;
-defm UBFM : A64I_bitfieldSizes<0b10, "ubfm">;
-
-// BFM instructions modify the destination register rather than defining it
-// completely.
-def BFMwwii :
- A64I_bitfield<0b0, 0b01, 0b0, (outs GPR32:$Rd),
- (ins GPR32:$src, GPR32:$Rn, bitfield32_imm:$ImmR, bitfield32_imm:$ImmS),
- "bfm\t$Rd, $Rn, $ImmR, $ImmS", [], NoItinerary> {
- let DecoderMethod = "DecodeBitfieldInstruction";
- let Constraints = "$src = $Rd";
-}
-
-def BFMxxii :
- A64I_bitfield<0b1, 0b01, 0b1, (outs GPR64:$Rd),
- (ins GPR64:$src, GPR64:$Rn, bitfield64_imm:$ImmR, bitfield64_imm:$ImmS),
- "bfm\t$Rd, $Rn, $ImmR, $ImmS", [], NoItinerary> {
- let DecoderMethod = "DecodeBitfieldInstruction";
- let Constraints = "$src = $Rd";
-}
-
-
-//===-------------------------------
-// 2. Extend aliases to 64-bit dest
-//===-------------------------------
-
-// Unfortunately the extensions that end up as 64-bits cannot be handled by an
-// instruction alias: their syntax is (for example) "SXTB x0, w0", which needs
-// to be mapped to "SBFM x0, x0, #0, 7" (changing the class of Rn). InstAlias is
-// not capable of such a map as far as I'm aware
-
-// Note that these instructions are strictly more specific than the
-// BFM ones (in ImmR) so they can handle their own decoding.
-class A64I_bf_ext<bit sf, bits<2> opc, RegisterClass GPRDest, ValueType dty,
- string asmop, bits<6> imms, dag pattern>
- : A64I_bitfield<sf, opc, sf,
- (outs GPRDest:$Rd), (ins GPR32:$Rn),
- !strconcat(asmop, "\t$Rd, $Rn"),
- [(set dty:$Rd, pattern)], NoItinerary> {
- let ImmR = 0b000000;
- let ImmS = imms;
-}
-
-// Signed extensions
-def SXTBxw : A64I_bf_ext<0b1, 0b00, GPR64, i64, "sxtb", 7,
- (sext_inreg (anyext i32:$Rn), i8)>;
-def SXTBww : A64I_bf_ext<0b0, 0b00, GPR32, i32, "sxtb", 7,
- (sext_inreg i32:$Rn, i8)>;
-def SXTHxw : A64I_bf_ext<0b1, 0b00, GPR64, i64, "sxth", 15,
- (sext_inreg (anyext i32:$Rn), i16)>;
-def SXTHww : A64I_bf_ext<0b0, 0b00, GPR32, i32, "sxth", 15,
- (sext_inreg i32:$Rn, i16)>;
-def SXTWxw : A64I_bf_ext<0b1, 0b00, GPR64, i64, "sxtw", 31, (sext i32:$Rn)>;
-
-// Unsigned extensions
-def UXTBww : A64I_bf_ext<0b0, 0b10, GPR32, i32, "uxtb", 7,
- (and i32:$Rn, 255)>;
-def UXTHww : A64I_bf_ext<0b0, 0b10, GPR32, i32, "uxth", 15,
- (and i32:$Rn, 65535)>;
-
-// The 64-bit unsigned variants are not strictly architectural but recommended
-// for consistency.
-let isAsmParserOnly = 1 in {
- def UXTBxw : A64I_bf_ext<0b0, 0b10, GPR64, i64, "uxtb", 7,
- (and (anyext i32:$Rn), 255)>;
- def UXTHxw : A64I_bf_ext<0b0, 0b10, GPR64, i64, "uxth", 15,
- (and (anyext i32:$Rn), 65535)>;
-}
-
-// Extra patterns for when the source register is actually 64-bits
-// too. There's no architectural difference here, it's just LLVM
-// shinanigans. There's no need for equivalent zero-extension patterns
-// because they'll already be caught by logical (immediate) matching.
-def : Pat<(sext_inreg i64:$Rn, i8),
- (SXTBxw (EXTRACT_SUBREG $Rn, sub_32))>;
-def : Pat<(sext_inreg i64:$Rn, i16),
- (SXTHxw (EXTRACT_SUBREG $Rn, sub_32))>;
-def : Pat<(sext_inreg i64:$Rn, i32),
- (SXTWxw (EXTRACT_SUBREG $Rn, sub_32))>;
-
-
-//===-------------------------------
-// 3. Aliases for ASR and LSR (the simple shifts)
-//===-------------------------------
-
-// These also handle their own decoding because ImmS being set makes
-// them take precedence over BFM.
-multiclass A64I_shift<bits<2> opc, string asmop, SDNode opnode> {
- def wwi : A64I_bitfield<0b0, opc, 0b0,
- (outs GPR32:$Rd), (ins GPR32:$Rn, bitfield32_imm:$ImmR),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR"),
- [(set i32:$Rd, (opnode i32:$Rn, bitfield32_imm:$ImmR))],
- NoItinerary> {
- let ImmS = 31;
- }
-
- def xxi : A64I_bitfield<0b1, opc, 0b1,
- (outs GPR64:$Rd), (ins GPR64:$Rn, bitfield64_imm:$ImmR),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR"),
- [(set i64:$Rd, (opnode i64:$Rn, bitfield64_imm:$ImmR))],
- NoItinerary> {
- let ImmS = 63;
- }
-
-}
-
-defm ASR : A64I_shift<0b00, "asr", sra>;
-defm LSR : A64I_shift<0b10, "lsr", srl>;
-
-//===-------------------------------
-// 4. Aliases for LSL
-//===-------------------------------
-
-// Unfortunately LSL and subsequent aliases are much more complicated. We need
-// to be able to say certain output instruction fields depend in a complex
-// manner on combinations of input assembly fields).
-//
-// MIOperandInfo *might* have been able to do it, but at the cost of
-// significantly more C++ code.
-
-// N.b. contrary to usual practice these operands store the shift rather than
-// the machine bits in an MCInst. The complexity overhead of consistency
-// outweighed the benefits in this case (custom asmparser, printer and selection
-// vs custom encoder).
-def bitfield32_lsl_imm : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 31; }]> {
- let ParserMatchClass = uimm5_asmoperand;
- let EncoderMethod = "getBitfield32LSLOpValue";
-}
-
-def bitfield64_lsl_imm : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 63; }]> {
- let ParserMatchClass = uimm6_asmoperand;
- let EncoderMethod = "getBitfield64LSLOpValue";
-}
-
-class A64I_bitfield_lsl<bit sf, RegisterClass GPR, ValueType ty,
- Operand operand>
- : A64I_bitfield<sf, 0b10, sf, (outs GPR:$Rd), (ins GPR:$Rn, operand:$FullImm),
- "lsl\t$Rd, $Rn, $FullImm",
- [(set ty:$Rd, (shl ty:$Rn, operand:$FullImm))],
- NoItinerary> {
- bits<12> FullImm;
- let ImmR = FullImm{5-0};
- let ImmS = FullImm{11-6};
-
- // No disassembler allowed because it would overlap with BFM which does the
- // actual work.
- let isAsmParserOnly = 1;
-}
-
-def LSLwwi : A64I_bitfield_lsl<0b0, GPR32, i32, bitfield32_lsl_imm>;
-def LSLxxi : A64I_bitfield_lsl<0b1, GPR64, i64, bitfield64_lsl_imm>;
-
-//===-------------------------------
-// 5. Aliases for bitfield extract instructions
-//===-------------------------------
-
-def bfx32_width_asmoperand : AsmOperandClass {
- let Name = "BFX32Width";
- let PredicateMethod = "isBitfieldWidth<32>";
- let RenderMethod = "addBFXWidthOperands";
- let DiagnosticType = "Width32";
-}
-
-def bfx32_width : Operand<i64>, ImmLeaf<i64, [{ return true; }]> {
- let PrintMethod = "printBFXWidthOperand";
- let ParserMatchClass = bfx32_width_asmoperand;
-}
-
-def bfx64_width_asmoperand : AsmOperandClass {
- let Name = "BFX64Width";
- let PredicateMethod = "isBitfieldWidth<64>";
- let RenderMethod = "addBFXWidthOperands";
- let DiagnosticType = "Width64";
-}
-
-def bfx64_width : Operand<i64> {
- let PrintMethod = "printBFXWidthOperand";
- let ParserMatchClass = bfx64_width_asmoperand;
-}
-
-
-multiclass A64I_bitfield_extract<bits<2> opc, string asmop, SDNode op> {
- def wwii : A64I_bitfield<0b0, opc, 0b0, (outs GPR32:$Rd),
- (ins GPR32:$Rn, bitfield32_imm:$ImmR, bfx32_width:$ImmS),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"),
- [(set i32:$Rd, (op i32:$Rn, imm:$ImmR, imm:$ImmS))],
- NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- }
-
- def xxii : A64I_bitfield<0b1, opc, 0b1, (outs GPR64:$Rd),
- (ins GPR64:$Rn, bitfield64_imm:$ImmR, bfx64_width:$ImmS),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"),
- [(set i64:$Rd, (op i64:$Rn, imm:$ImmR, imm:$ImmS))],
- NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- }
-}
-
-defm SBFX : A64I_bitfield_extract<0b00, "sbfx", A64Sbfx>;
-defm UBFX : A64I_bitfield_extract<0b10, "ubfx", A64Ubfx>;
-
-// Again, variants based on BFM modify Rd so need it as an input too.
-def BFXILwwii : A64I_bitfield<0b0, 0b01, 0b0, (outs GPR32:$Rd),
- (ins GPR32:$src, GPR32:$Rn, bitfield32_imm:$ImmR, bfx32_width:$ImmS),
- "bfxil\t$Rd, $Rn, $ImmR, $ImmS", [], NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- let Constraints = "$src = $Rd";
-}
-
-def BFXILxxii : A64I_bitfield<0b1, 0b01, 0b1, (outs GPR64:$Rd),
- (ins GPR64:$src, GPR64:$Rn, bitfield64_imm:$ImmR, bfx64_width:$ImmS),
- "bfxil\t$Rd, $Rn, $ImmR, $ImmS", [], NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- let Constraints = "$src = $Rd";
-}
-
-// SBFX instructions can do a 1-instruction sign-extension of boolean values.
-def : Pat<(sext_inreg i64:$Rn, i1), (SBFXxxii $Rn, 0, 0)>;
-def : Pat<(sext_inreg i32:$Rn, i1), (SBFXwwii $Rn, 0, 0)>;
-def : Pat<(i64 (sext_inreg (anyext i32:$Rn), i1)),
- (SBFXxxii (SUBREG_TO_REG (i64 0), $Rn, sub_32), 0, 0)>;
-
-// UBFX makes sense as an implementation of a 64-bit zero-extension too. Could
-// use either 64-bit or 32-bit variant, but 32-bit might be more efficient.
-def : Pat<(i64 (zext i32:$Rn)), (SUBREG_TO_REG (i64 0), (UBFXwwii $Rn, 0, 31),
- sub_32)>;
-
-//===-------------------------------
-// 6. Aliases for bitfield insert instructions
-//===-------------------------------
-
-def bfi32_lsb_asmoperand : AsmOperandClass {
- let Name = "BFI32LSB";
- let PredicateMethod = "isUImm<5>";
- let RenderMethod = "addBFILSBOperands<32>";
- let DiagnosticType = "UImm5";
-}
-
-def bfi32_lsb : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 31; }]> {
- let PrintMethod = "printBFILSBOperand<32>";
- let ParserMatchClass = bfi32_lsb_asmoperand;
-}
-
-def bfi64_lsb_asmoperand : AsmOperandClass {
- let Name = "BFI64LSB";
- let PredicateMethod = "isUImm<6>";
- let RenderMethod = "addBFILSBOperands<64>";
- let DiagnosticType = "UImm6";
-}
-
-def bfi64_lsb : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 0 && Imm <= 63; }]> {
- let PrintMethod = "printBFILSBOperand<64>";
- let ParserMatchClass = bfi64_lsb_asmoperand;
-}
-
-// Width verification is performed during conversion so width operand can be
-// shared between 32/64-bit cases. Still needed for the print method though
-// because ImmR encodes "width - 1".
-def bfi32_width_asmoperand : AsmOperandClass {
- let Name = "BFI32Width";
- let PredicateMethod = "isBitfieldWidth<32>";
- let RenderMethod = "addBFIWidthOperands";
- let DiagnosticType = "Width32";
-}
-
-def bfi32_width : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 1 && Imm <= 32; }]> {
- let PrintMethod = "printBFIWidthOperand";
- let ParserMatchClass = bfi32_width_asmoperand;
-}
-
-def bfi64_width_asmoperand : AsmOperandClass {
- let Name = "BFI64Width";
- let PredicateMethod = "isBitfieldWidth<64>";
- let RenderMethod = "addBFIWidthOperands";
- let DiagnosticType = "Width64";
-}
-def bfi64_width : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= 1 && Imm <= 64; }]> {
- let PrintMethod = "printBFIWidthOperand";
- let ParserMatchClass = bfi64_width_asmoperand;
-}
-
-multiclass A64I_bitfield_insert<bits<2> opc, string asmop> {
- def wwii : A64I_bitfield<0b0, opc, 0b0, (outs GPR32:$Rd),
- (ins GPR32:$Rn, bfi32_lsb:$ImmR, bfi32_width:$ImmS),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"),
- [], NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- }
-
- def xxii : A64I_bitfield<0b1, opc, 0b1, (outs GPR64:$Rd),
- (ins GPR64:$Rn, bfi64_lsb:$ImmR, bfi64_width:$ImmS),
- !strconcat(asmop, "\t$Rd, $Rn, $ImmR, $ImmS"),
- [], NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- }
-}
-
-defm SBFIZ : A64I_bitfield_insert<0b00, "sbfiz">;
-defm UBFIZ : A64I_bitfield_insert<0b10, "ubfiz">;
+// Add/subtract with carry.
+defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
+defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
+
+def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
+def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
+def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
+def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
+
+// Add/subtract
+defm ADD : AddSub<0, "add", add>;
+defm SUB : AddSub<1, "sub">;
+
+def : InstAlias<"mov $dst, $src",
+ (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
+def : InstAlias<"mov $dst, $src",
+ (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
+def : InstAlias<"mov $dst, $src",
+ (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
+def : InstAlias<"mov $dst, $src",
+ (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
+
+defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn">;
+defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp">;
+
+// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
+def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
+ (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
+def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
+ (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
+def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
+ (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
+def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
+ (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
+def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
+ (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
+def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
+ (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
+def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3),
+ (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>;
+def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3),
+ (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>;
+
+// Because of the immediate format for add/sub-imm instructions, the
+// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
+// These patterns capture that transformation.
+let AddedComplexity = 1 in {
+def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
+ (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
+def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
+ (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
+def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
+ (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
+def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
+ (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
+}
+
+// Because of the immediate format for add/sub-imm instructions, the
+// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
+// These patterns capture that transformation.
+let AddedComplexity = 1 in {
+def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
+ (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
+def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
+ (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
+def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
+ (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
+def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
+ (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
+}
+
+def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
+def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
+def : InstAlias<"neg $dst, $src$shift",
+ (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
+def : InstAlias<"neg $dst, $src$shift",
+ (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
+
+def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
+def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
+def : InstAlias<"negs $dst, $src$shift",
+ (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
+def : InstAlias<"negs $dst, $src$shift",
+ (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
+
+
+// Unsigned/Signed divide
+defm UDIV : Div<0, "udiv", udiv>;
+defm SDIV : Div<1, "sdiv", sdiv>;
+let isCodeGenOnly = 1 in {
+defm UDIV_Int : Div<0, "udiv", int_aarch64_udiv>;
+defm SDIV_Int : Div<1, "sdiv", int_aarch64_sdiv>;
+}
+
+// Variable shift
+defm ASRV : Shift<0b10, "asr", sra>;
+defm LSLV : Shift<0b00, "lsl", shl>;
+defm LSRV : Shift<0b01, "lsr", srl>;
+defm RORV : Shift<0b11, "ror", rotr>;
+
+def : ShiftAlias<"asrv", ASRVWr, GPR32>;
+def : ShiftAlias<"asrv", ASRVXr, GPR64>;
+def : ShiftAlias<"lslv", LSLVWr, GPR32>;
+def : ShiftAlias<"lslv", LSLVXr, GPR64>;
+def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
+def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
+def : ShiftAlias<"rorv", RORVWr, GPR32>;
+def : ShiftAlias<"rorv", RORVXr, GPR64>;
+
+// Multiply-add
+let AddedComplexity = 7 in {
+defm MADD : MulAccum<0, "madd", add>;
+defm MSUB : MulAccum<1, "msub", sub>;
+
+def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
+ (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
+def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
+ (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
+
+def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
+ (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
+def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
+ (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
+} // AddedComplexity = 7
+
+let AddedComplexity = 5 in {
+def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
+def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
+def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
+def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
+
+def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
+ (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
+def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
+ (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
+
+def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
+ (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
+def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
+ (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
+} // AddedComplexity = 5
+
+def : MulAccumWAlias<"mul", MADDWrrr>;
+def : MulAccumXAlias<"mul", MADDXrrr>;
+def : MulAccumWAlias<"mneg", MSUBWrrr>;
+def : MulAccumXAlias<"mneg", MSUBXrrr>;
+def : WideMulAccumAlias<"smull", SMADDLrrr>;
+def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
+def : WideMulAccumAlias<"umull", UMADDLrrr>;
+def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
+
+// Multiply-high
+def SMULHrr : MulHi<0b010, "smulh", mulhs>;
+def UMULHrr : MulHi<0b110, "umulh", mulhu>;
+
+// CRC32
+def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
+def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
+def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
+def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
+
+def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
+def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
+def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
+def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
-def BFIwwii : A64I_bitfield<0b0, 0b01, 0b0, (outs GPR32:$Rd),
- (ins GPR32:$src, GPR32:$Rn, bfi32_lsb:$ImmR, bfi32_width:$ImmS),
- "bfi\t$Rd, $Rn, $ImmR, $ImmS", [], NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- let Constraints = "$src = $Rd";
-}
-
-def BFIxxii : A64I_bitfield<0b1, 0b01, 0b1, (outs GPR64:$Rd),
- (ins GPR64:$src, GPR64:$Rn, bfi64_lsb:$ImmR, bfi64_width:$ImmS),
- "bfi\t$Rd, $Rn, $ImmR, $ImmS", [], NoItinerary> {
- // As above, no disassembler allowed.
- let isAsmParserOnly = 1;
- let Constraints = "$src = $Rd";
-}
-
//===----------------------------------------------------------------------===//
-// Compare and branch (immediate)
+// Logical instructions.
//===----------------------------------------------------------------------===//
-// Contains: CBZ, CBNZ
-
-class label_asmoperand<int width, int scale> : AsmOperandClass {
- let Name = "Label" # width # "_" # scale;
- let PredicateMethod = "isLabel<" # width # "," # scale # ">";
- let RenderMethod = "addLabelOperands<" # width # ", " # scale # ">";
- let DiagnosticType = "Label";
-}
-def label_wid19_scal4_asmoperand : label_asmoperand<19, 4>;
+// (immediate)
+defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
+defm AND : LogicalImm<0b00, "and", and, "bic">;
+defm EOR : LogicalImm<0b10, "eor", xor, "eon">;
+defm ORR : LogicalImm<0b01, "orr", or, "orn">;
+
+// FIXME: these aliases *are* canonical sometimes (when movz can't be
+// used). Actually, it seems to be working right now, but putting logical_immXX
+// here is a bit dodgy on the AsmParser side too.
+def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
+ logical_imm32:$imm), 0>;
+def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
+ logical_imm64:$imm), 0>;
+
+
+// (register)
+defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
+defm BICS : LogicalRegS<0b11, 1, "bics",
+ BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
+defm AND : LogicalReg<0b00, 0, "and", and>;
+defm BIC : LogicalReg<0b00, 1, "bic",
+ BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
+defm EON : LogicalReg<0b10, 1, "eon",
+ BinOpFrag<(xor node:$LHS, (not node:$RHS))>>;
+defm EOR : LogicalReg<0b10, 0, "eor", xor>;
+defm ORN : LogicalReg<0b01, 1, "orn",
+ BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
+defm ORR : LogicalReg<0b01, 0, "orr", or>;
+
+def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
+def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
+
+def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
+def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
+
+def : InstAlias<"mvn $Wd, $Wm$sh",
+ (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
+def : InstAlias<"mvn $Xd, $Xm$sh",
+ (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
+
+def : InstAlias<"tst $src1, $src2",
+ (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
+def : InstAlias<"tst $src1, $src2",
+ (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
+
+def : InstAlias<"tst $src1, $src2",
+ (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
+def : InstAlias<"tst $src1, $src2",
+ (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
+
+def : InstAlias<"tst $src1, $src2$sh",
+ (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
+def : InstAlias<"tst $src1, $src2$sh",
+ (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
+
+
+def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
+def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
-// All conditional immediate branches are the same really: 19 signed bits scaled
-// by the instruction-size (4).
-def bcc_target : Operand<OtherVT> {
- // This label is a 19-bit offset from PC, scaled by the instruction-width: 4.
- let ParserMatchClass = label_wid19_scal4_asmoperand;
- let PrintMethod = "printLabelOperand<19, 4>";
- let EncoderMethod = "getLabelOpValue<AArch64::fixup_a64_condbr>";
- let OperandType = "OPERAND_PCREL";
-}
-
-multiclass cmpbr_sizes<bit op, string asmop, ImmLeaf SETOP> {
- let isBranch = 1, isTerminator = 1 in {
- def x : A64I_cmpbr<0b1, op,
- (outs),
- (ins GPR64:$Rt, bcc_target:$Label),
- !strconcat(asmop,"\t$Rt, $Label"),
- [(A64br_cc (A64cmp i64:$Rt, 0), SETOP, bb:$Label)],
- NoItinerary>;
-
- def w : A64I_cmpbr<0b0, op,
- (outs),
- (ins GPR32:$Rt, bcc_target:$Label),
- !strconcat(asmop,"\t$Rt, $Label"),
- [(A64br_cc (A64cmp i32:$Rt, 0), SETOP, bb:$Label)],
- NoItinerary>;
- }
-}
-
-defm CBZ : cmpbr_sizes<0b0, "cbz", ImmLeaf<i32, [{
- return Imm == A64CC::EQ;
-}]> >;
-defm CBNZ : cmpbr_sizes<0b1, "cbnz", ImmLeaf<i32, [{
- return Imm == A64CC::NE;
-}]> >;
//===----------------------------------------------------------------------===//
-// Conditional branch (immediate) instructions
+// One operand data processing instructions.
//===----------------------------------------------------------------------===//
-// Contains: B.cc
-def cond_code_asmoperand : AsmOperandClass {
- let Name = "CondCode";
- let DiagnosticType = "CondCode";
-}
-
-def cond_code : Operand<i32>, ImmLeaf<i32, [{
- return Imm >= 0 && Imm <= 15;
-}]> {
- let PrintMethod = "printCondCodeOperand";
- let ParserMatchClass = cond_code_asmoperand;
-}
-
-def Bcc : A64I_condbr<0b0, 0b0, (outs),
- (ins cond_code:$Cond, bcc_target:$Label),
- "b.$Cond $Label", [(A64br_cc NZCV, (i32 imm:$Cond), bb:$Label)],
- NoItinerary> {
- let Uses = [NZCV];
- let isBranch = 1;
- let isTerminator = 1;
-}
+defm CLS : OneOperandData<0b101, "cls">;
+defm CLZ : OneOperandData<0b100, "clz", ctlz>;
+defm RBIT : OneOperandData<0b000, "rbit">;
+
+def : Pat<(int_aarch64_rbit GPR32:$Rn), (RBITWr $Rn)>;
+def : Pat<(int_aarch64_rbit GPR64:$Rn), (RBITXr $Rn)>;
+
+def REV16Wr : OneWRegData<0b001, "rev16",
+ UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
+def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
+
+def : Pat<(cttz GPR32:$Rn),
+ (CLZWr (RBITWr GPR32:$Rn))>;
+def : Pat<(cttz GPR64:$Rn),
+ (CLZXr (RBITXr GPR64:$Rn))>;
+def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
+ (i32 1))),
+ (CLSWr GPR32:$Rn)>;
+def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
+ (i64 1))),
+ (CLSXr GPR64:$Rn)>;
+
+// Unlike the other one operand instructions, the instructions with the "rev"
+// mnemonic do *not* just different in the size bit, but actually use different
+// opcode bits for the different sizes.
+def REVWr : OneWRegData<0b010, "rev", bswap>;
+def REVXr : OneXRegData<0b011, "rev", bswap>;
+def REV32Xr : OneXRegData<0b010, "rev32",
+ UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
+
+// The bswap commutes with the rotr so we want a pattern for both possible
+// orders.
+def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
+def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
//===----------------------------------------------------------------------===//
-// Conditional compare (immediate) instructions
+// Bitfield immediate extraction instruction.
//===----------------------------------------------------------------------===//
-// Contains: CCMN, CCMP
-
-def uimm4_asmoperand : AsmOperandClass {
- let Name = "UImm4";
- let PredicateMethod = "isUImm<4>";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "UImm4";
-}
-
-def uimm4 : Operand<i32> {
- let ParserMatchClass = uimm4_asmoperand;
-}
-
-def uimm5 : Operand<i32> {
- let ParserMatchClass = uimm5_asmoperand;
-}
-
-// The only difference between this operand and the one for instructions like
-// B.cc is that it's parsed manually. The other get parsed implicitly as part of
-// the mnemonic handling.
-def cond_code_op_asmoperand : AsmOperandClass {
- let Name = "CondCodeOp";
- let RenderMethod = "addCondCodeOperands";
- let PredicateMethod = "isCondCode";
- let ParserMethod = "ParseCondCodeOperand";
- let DiagnosticType = "CondCode";
-}
-
-def cond_code_op : Operand<i32> {
- let PrintMethod = "printCondCodeOperand";
- let ParserMatchClass = cond_code_op_asmoperand;
-}
-
-class A64I_condcmpimmImpl<bit sf, bit op, RegisterClass GPR, string asmop>
- : A64I_condcmpimm<sf, op, 0b0, 0b0, 0b1, (outs),
- (ins GPR:$Rn, uimm5:$UImm5, uimm4:$NZCVImm, cond_code_op:$Cond),
- !strconcat(asmop, "\t$Rn, $UImm5, $NZCVImm, $Cond"),
- [], NoItinerary> {
- let Defs = [NZCV];
-}
-
-def CCMNwi : A64I_condcmpimmImpl<0b0, 0b0, GPR32, "ccmn">;
-def CCMNxi : A64I_condcmpimmImpl<0b1, 0b0, GPR64, "ccmn">;
-def CCMPwi : A64I_condcmpimmImpl<0b0, 0b1, GPR32, "ccmp">;
-def CCMPxi : A64I_condcmpimmImpl<0b1, 0b1, GPR64, "ccmp">;
+let neverHasSideEffects = 1 in
+defm EXTR : ExtractImm<"extr">;
+def : InstAlias<"ror $dst, $src, $shift",
+ (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
+def : InstAlias<"ror $dst, $src, $shift",
+ (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
+
+def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
+ (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
+def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
+ (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
//===----------------------------------------------------------------------===//
-// Conditional compare (register) instructions
+// Other bitfield immediate instructions.
//===----------------------------------------------------------------------===//
-// Contains: CCMN, CCMP
-
-class A64I_condcmpregImpl<bit sf, bit op, RegisterClass GPR, string asmop>
- : A64I_condcmpreg<sf, op, 0b0, 0b0, 0b1,
- (outs),
- (ins GPR:$Rn, GPR:$Rm, uimm4:$NZCVImm, cond_code_op:$Cond),
- !strconcat(asmop, "\t$Rn, $Rm, $NZCVImm, $Cond"),
- [], NoItinerary> {
- let Defs = [NZCV];
+let neverHasSideEffects = 1 in {
+defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
+defm SBFM : BitfieldImm<0b00, "sbfm">;
+defm UBFM : BitfieldImm<0b10, "ubfm">;
}
-def CCMNww : A64I_condcmpregImpl<0b0, 0b0, GPR32, "ccmn">;
-def CCMNxx : A64I_condcmpregImpl<0b1, 0b0, GPR64, "ccmn">;
-def CCMPww : A64I_condcmpregImpl<0b0, 0b1, GPR32, "ccmp">;
-def CCMPxx : A64I_condcmpregImpl<0b1, 0b1, GPR64, "ccmp">;
-
-//===----------------------------------------------------------------------===//
-// Conditional select instructions
-//===----------------------------------------------------------------------===//
-// Contains: CSEL, CSINC, CSINV, CSNEG + aliases CSET, CSETM, CINC, CINV, CNEG
-
-// Condition code which is encoded as the inversion (semantically rather than
-// bitwise) in the instruction.
-def inv_cond_code_op_asmoperand : AsmOperandClass {
- let Name = "InvCondCodeOp";
- let RenderMethod = "addInvCondCodeOperands";
- let PredicateMethod = "isCondCode";
- let ParserMethod = "ParseCondCodeOperand";
- let DiagnosticType = "CondCode";
-}
+def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
-def inv_cond_code_op : Operand<i32> {
- let ParserMatchClass = inv_cond_code_op_asmoperand;
-}
+def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 31 - N->getZExtValue();
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
-// Having a separate operand for the selectable use-case is debatable, but gives
-// consistency with cond_code.
-def inv_cond_XFORM : SDNodeXForm<imm, [{
- A64CC::CondCodes CC = static_cast<A64CC::CondCodes>(N->getZExtValue());
- return CurDAG->getTargetConstant(A64InvertCondCode(CC), MVT::i32);
+// min(7, 31 - shift_amt)
+def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 31 - N->getZExtValue();
+ enc = enc > 7 ? 7 : enc;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
}]>;
-def inv_cond_code
- : ImmLeaf<i32, [{ return Imm >= 0 && Imm <= 15; }], inv_cond_XFORM>;
+// min(15, 31 - shift_amt)
+def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 31 - N->getZExtValue();
+ enc = enc > 15 ? 15 : enc;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
+def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
-multiclass A64I_condselSizes<bit op, bits<2> op2, string asmop,
- SDPatternOperator select> {
- let Uses = [NZCV] in {
- def wwwc : A64I_condsel<0b0, op, 0b0, op2,
- (outs GPR32:$Rd),
- (ins GPR32:$Rn, GPR32:$Rm, cond_code_op:$Cond),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Cond"),
- [(set i32:$Rd, (select i32:$Rn, i32:$Rm))],
- NoItinerary>;
+def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 63 - N->getZExtValue();
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
+// min(7, 63 - shift_amt)
+def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 63 - N->getZExtValue();
+ enc = enc > 7 ? 7 : enc;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
- def xxxc : A64I_condsel<0b1, op, 0b0, op2,
- (outs GPR64:$Rd),
- (ins GPR64:$Rn, GPR64:$Rm, cond_code_op:$Cond),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Cond"),
- [(set i64:$Rd, (select i64:$Rn, i64:$Rm))],
- NoItinerary>;
- }
-}
+// min(15, 63 - shift_amt)
+def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 63 - N->getZExtValue();
+ enc = enc > 15 ? 15 : enc;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
-def simple_select
- : PatFrag<(ops node:$lhs, node:$rhs),
- (A64select_cc NZCV, node:$lhs, node:$rhs, (i32 imm:$Cond))>;
-
-class complex_select<SDPatternOperator opnode>
- : PatFrag<(ops node:$lhs, node:$rhs),
- (A64select_cc NZCV, node:$lhs, (opnode node:$rhs), (i32 imm:$Cond))>;
-
-
-defm CSEL : A64I_condselSizes<0b0, 0b00, "csel", simple_select>;
-defm CSINC : A64I_condselSizes<0b0, 0b01, "csinc",
- complex_select<PatFrag<(ops node:$val),
- (add node:$val, 1)>>>;
-defm CSINV : A64I_condselSizes<0b1, 0b00, "csinv", complex_select<not>>;
-defm CSNEG : A64I_condselSizes<0b1, 0b01, "csneg", complex_select<ineg>>;
-
-// Now the instruction aliases, which fit nicely into LLVM's model:
-
-def : InstAlias<"cset $Rd, $Cond",
- (CSINCwwwc GPR32:$Rd, WZR, WZR, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cset $Rd, $Cond",
- (CSINCxxxc GPR64:$Rd, XZR, XZR, inv_cond_code_op:$Cond)>;
-def : InstAlias<"csetm $Rd, $Cond",
- (CSINVwwwc GPR32:$Rd, WZR, WZR, inv_cond_code_op:$Cond)>;
-def : InstAlias<"csetm $Rd, $Cond",
- (CSINVxxxc GPR64:$Rd, XZR, XZR, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cinc $Rd, $Rn, $Cond",
- (CSINCwwwc GPR32:$Rd, GPR32:$Rn, GPR32:$Rn, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cinc $Rd, $Rn, $Cond",
- (CSINCxxxc GPR64:$Rd, GPR64:$Rn, GPR64:$Rn, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cinv $Rd, $Rn, $Cond",
- (CSINVwwwc GPR32:$Rd, GPR32:$Rn, GPR32:$Rn, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cinv $Rd, $Rn, $Cond",
- (CSINVxxxc GPR64:$Rd, GPR64:$Rn, GPR64:$Rn, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cneg $Rd, $Rn, $Cond",
- (CSNEGwwwc GPR32:$Rd, GPR32:$Rn, GPR32:$Rn, inv_cond_code_op:$Cond)>;
-def : InstAlias<"cneg $Rd, $Rn, $Cond",
- (CSNEGxxxc GPR64:$Rd, GPR64:$Rn, GPR64:$Rn, inv_cond_code_op:$Cond)>;
-
-// Finally some helper patterns.
-
-// For CSET (a.k.a. zero-extension of icmp)
-def : Pat<(A64select_cc NZCV, 0, 1, cond_code:$Cond),
- (CSINCwwwc WZR, WZR, cond_code:$Cond)>;
-def : Pat<(A64select_cc NZCV, 1, 0, inv_cond_code:$Cond),
- (CSINCwwwc WZR, WZR, inv_cond_code:$Cond)>;
-
-def : Pat<(A64select_cc NZCV, 0, 1, cond_code:$Cond),
- (CSINCxxxc XZR, XZR, cond_code:$Cond)>;
-def : Pat<(A64select_cc NZCV, 1, 0, inv_cond_code:$Cond),
- (CSINCxxxc XZR, XZR, inv_cond_code:$Cond)>;
-
-// For CSETM (a.k.a. sign-extension of icmp)
-def : Pat<(A64select_cc NZCV, 0, -1, cond_code:$Cond),
- (CSINVwwwc WZR, WZR, cond_code:$Cond)>;
-def : Pat<(A64select_cc NZCV, -1, 0, inv_cond_code:$Cond),
- (CSINVwwwc WZR, WZR, inv_cond_code:$Cond)>;
-
-def : Pat<(A64select_cc NZCV, 0, -1, cond_code:$Cond),
- (CSINVxxxc XZR, XZR, cond_code:$Cond)>;
-def : Pat<(A64select_cc NZCV, -1, 0, inv_cond_code:$Cond),
- (CSINVxxxc XZR, XZR, inv_cond_code:$Cond)>;
-
-// CINC, CINV and CNEG get dealt with automatically, which leaves the issue of
-// commutativity. The instructions are to complex for isCommutable to be used,
-// so we have to create the patterns manually:
-
-// No commutable pattern for CSEL since the commuted version is isomorphic.
-
-// CSINC
-def :Pat<(A64select_cc NZCV, (add i32:$Rm, 1), i32:$Rn, inv_cond_code:$Cond),
- (CSINCwwwc $Rn, $Rm, inv_cond_code:$Cond)>;
-def :Pat<(A64select_cc NZCV, (add i64:$Rm, 1), i64:$Rn, inv_cond_code:$Cond),
- (CSINCxxxc $Rn, $Rm, inv_cond_code:$Cond)>;
-
-// CSINV
-def :Pat<(A64select_cc NZCV, (not i32:$Rm), i32:$Rn, inv_cond_code:$Cond),
- (CSINVwwwc $Rn, $Rm, inv_cond_code:$Cond)>;
-def :Pat<(A64select_cc NZCV, (not i64:$Rm), i64:$Rn, inv_cond_code:$Cond),
- (CSINVxxxc $Rn, $Rm, inv_cond_code:$Cond)>;
-
-// CSNEG
-def :Pat<(A64select_cc NZCV, (ineg i32:$Rm), i32:$Rn, inv_cond_code:$Cond),
- (CSNEGwwwc $Rn, $Rm, inv_cond_code:$Cond)>;
-def :Pat<(A64select_cc NZCV, (ineg i64:$Rm), i64:$Rn, inv_cond_code:$Cond),
- (CSNEGxxxc $Rn, $Rm, inv_cond_code:$Cond)>;
+// min(31, 63 - shift_amt)
+def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
+ uint64_t enc = 63 - N->getZExtValue();
+ enc = enc > 31 ? 31 : enc;
+ return CurDAG->getTargetConstant(enc, MVT::i64);
+}]>;
-//===----------------------------------------------------------------------===//
-// Data Processing (1 source) instructions
-//===----------------------------------------------------------------------===//
-// Contains: RBIT, REV16, REV, REV32, CLZ, CLS.
-
-// We define an unary operator which always fails. We will use this to
-// define unary operators that cannot be matched.
-
-class A64I_dp_1src_impl<bit sf, bits<6> opcode, string asmop,
- list<dag> patterns, RegisterClass GPRrc,
- InstrItinClass itin>:
- A64I_dp_1src<sf,
- 0,
- 0b00000,
- opcode,
- !strconcat(asmop, "\t$Rd, $Rn"),
- (outs GPRrc:$Rd),
- (ins GPRrc:$Rn),
- patterns,
- itin>;
-
-multiclass A64I_dp_1src <bits<6> opcode, string asmop> {
- let hasSideEffects = 0 in {
- def ww : A64I_dp_1src_impl<0b0, opcode, asmop, [], GPR32, NoItinerary>;
- def xx : A64I_dp_1src_impl<0b1, opcode, asmop, [], GPR64, NoItinerary>;
- }
-}
+def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
+ (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
+ (i64 (i32shift_b imm0_31:$imm)))>;
+def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
+ (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
+ (i64 (i64shift_b imm0_63:$imm)))>;
-defm RBIT : A64I_dp_1src<0b000000, "rbit">;
-defm CLS : A64I_dp_1src<0b000101, "cls">;
-defm CLZ : A64I_dp_1src<0b000100, "clz">;
-
-def : Pat<(ctlz i32:$Rn), (CLZww $Rn)>;
-def : Pat<(ctlz i64:$Rn), (CLZxx $Rn)>;
-def : Pat<(ctlz_zero_undef i32:$Rn), (CLZww $Rn)>;
-def : Pat<(ctlz_zero_undef i64:$Rn), (CLZxx $Rn)>;
-
-def : Pat<(cttz i32:$Rn), (CLZww (RBITww $Rn))>;
-def : Pat<(cttz i64:$Rn), (CLZxx (RBITxx $Rn))>;
-def : Pat<(cttz_zero_undef i32:$Rn), (CLZww (RBITww $Rn))>;
-def : Pat<(cttz_zero_undef i64:$Rn), (CLZxx (RBITxx $Rn))>;
-
-
-def REVww : A64I_dp_1src_impl<0b0, 0b000010, "rev",
- [(set i32:$Rd, (bswap i32:$Rn))],
- GPR32, NoItinerary>;
-def REVxx : A64I_dp_1src_impl<0b1, 0b000011, "rev",
- [(set i64:$Rd, (bswap i64:$Rn))],
- GPR64, NoItinerary>;
-def REV32xx : A64I_dp_1src_impl<0b1, 0b000010, "rev32",
- [(set i64:$Rd, (bswap (rotr i64:$Rn, (i64 32))))],
- GPR64, NoItinerary>;
-def REV16ww : A64I_dp_1src_impl<0b0, 0b000001, "rev16",
- [(set i32:$Rd, (bswap (rotr i32:$Rn, (i64 16))))],
- GPR32,
- NoItinerary>;
-def REV16xx : A64I_dp_1src_impl<0b1, 0b000001, "rev16", [], GPR64, NoItinerary>;
+let AddedComplexity = 10 in {
+def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
+ (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
+def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
+ (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
+}
+
+def : InstAlias<"asr $dst, $src, $shift",
+ (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
+def : InstAlias<"asr $dst, $src, $shift",
+ (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
+def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
+def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
+def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
+def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
+def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
+
+def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
+ (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
+def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
+ (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
+
+def : InstAlias<"lsr $dst, $src, $shift",
+ (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
+def : InstAlias<"lsr $dst, $src, $shift",
+ (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
+def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
+def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
+def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
+def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
+def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
//===----------------------------------------------------------------------===//
-// Data Processing (2 sources) instructions
+// Conditionally set flags instructions.
//===----------------------------------------------------------------------===//
-// Contains: CRC32C?[BHWX], UDIV, SDIV, LSLV, LSRV, ASRV, RORV + aliases LSL,
-// LSR, ASR, ROR
-
-
-class dp_2src_impl<bit sf, bits<6> opcode, string asmop, list<dag> patterns,
- RegisterClass GPRsp,
- InstrItinClass itin>:
- A64I_dp_2src<sf,
- opcode,
- 0,
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- (outs GPRsp:$Rd),
- (ins GPRsp:$Rn, GPRsp:$Rm),
- patterns,
- itin>;
-
-multiclass dp_2src_crc<bit c, string asmop> {
- def B_www : dp_2src_impl<0b0, {0, 1, 0, c, 0, 0},
- !strconcat(asmop, "b"), [], GPR32, NoItinerary>;
- def H_www : dp_2src_impl<0b0, {0, 1, 0, c, 0, 1},
- !strconcat(asmop, "h"), [], GPR32, NoItinerary>;
- def W_www : dp_2src_impl<0b0, {0, 1, 0, c, 1, 0},
- !strconcat(asmop, "w"), [], GPR32, NoItinerary>;
- def X_wwx : A64I_dp_2src<0b1, {0, 1, 0, c, 1, 1}, 0b0,
- !strconcat(asmop, "x\t$Rd, $Rn, $Rm"),
- (outs GPR32:$Rd), (ins GPR32:$Rn, GPR64:$Rm), [],
- NoItinerary>;
-}
-
-multiclass dp_2src_zext <bits<6> opcode, string asmop, SDPatternOperator op> {
- def www : dp_2src_impl<0b0,
- opcode,
- asmop,
- [(set i32:$Rd,
- (op i32:$Rn, (i64 (zext i32:$Rm))))],
- GPR32,
- NoItinerary>;
- def xxx : dp_2src_impl<0b1,
- opcode,
- asmop,
- [(set i64:$Rd, (op i64:$Rn, i64:$Rm))],
- GPR64,
- NoItinerary>;
-}
-
-
-multiclass dp_2src <bits<6> opcode, string asmop, SDPatternOperator op> {
- def www : dp_2src_impl<0b0,
- opcode,
- asmop,
- [(set i32:$Rd, (op i32:$Rn, i32:$Rm))],
- GPR32,
- NoItinerary>;
- def xxx : dp_2src_impl<0b1,
- opcode,
- asmop,
- [(set i64:$Rd, (op i64:$Rn, i64:$Rm))],
- GPR64,
- NoItinerary>;
-}
+defm CCMN : CondSetFlagsImm<0, "ccmn">;
+defm CCMP : CondSetFlagsImm<1, "ccmp">;
-// Here we define the data processing 2 source instructions.
-defm CRC32 : dp_2src_crc<0b0, "crc32">;
-defm CRC32C : dp_2src_crc<0b1, "crc32c">;
-
-defm UDIV : dp_2src<0b000010, "udiv", udiv>;
-defm SDIV : dp_2src<0b000011, "sdiv", sdiv>;
-
-defm LSLV : dp_2src_zext<0b001000, "lsl", shl>;
-defm LSRV : dp_2src_zext<0b001001, "lsr", srl>;
-defm ASRV : dp_2src_zext<0b001010, "asr", sra>;
-defm RORV : dp_2src_zext<0b001011, "ror", rotr>;
-
-// Extra patterns for an incoming 64-bit value for a 32-bit
-// operation. Since the LLVM operations are undefined (as in C) if the
-// RHS is out of range, it's perfectly permissible to discard the high
-// bits of the GPR64.
-def : Pat<(shl i32:$Rn, i64:$Rm),
- (LSLVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>;
-def : Pat<(srl i32:$Rn, i64:$Rm),
- (LSRVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>;
-def : Pat<(sra i32:$Rn, i64:$Rm),
- (ASRVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>;
-def : Pat<(rotr i32:$Rn, i64:$Rm),
- (RORVwww $Rn, (EXTRACT_SUBREG $Rm, sub_32))>;
-
-// Here we define the aliases for the data processing 2 source instructions.
-def LSL_mnemonic : MnemonicAlias<"lslv", "lsl">;
-def LSR_mnemonic : MnemonicAlias<"lsrv", "lsr">;
-def ASR_menmonic : MnemonicAlias<"asrv", "asr">;
-def ROR_menmonic : MnemonicAlias<"rorv", "ror">;
+defm CCMN : CondSetFlagsReg<0, "ccmn">;
+defm CCMP : CondSetFlagsReg<1, "ccmp">;
//===----------------------------------------------------------------------===//
-// Data Processing (3 sources) instructions
+// Conditional select instructions.
//===----------------------------------------------------------------------===//
-// Contains: MADD, MSUB, SMADDL, SMSUBL, SMULH, UMADDL, UMSUBL, UMULH
-// + aliases MUL, MNEG, SMULL, SMNEGL, UMULL, UMNEGL
-
-class A64I_dp3_4operand<bit sf, bits<6> opcode, RegisterClass AccReg,
- ValueType AccTy, RegisterClass SrcReg,
- string asmop, dag pattern>
- : A64I_dp3<sf, opcode,
- (outs AccReg:$Rd), (ins SrcReg:$Rn, SrcReg:$Rm, AccReg:$Ra),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Ra"),
- [(set AccTy:$Rd, pattern)], NoItinerary> {
- RegisterClass AccGPR = AccReg;
- RegisterClass SrcGPR = SrcReg;
-}
-
-def MADDwwww : A64I_dp3_4operand<0b0, 0b000000, GPR32, i32, GPR32, "madd",
- (add i32:$Ra, (mul i32:$Rn, i32:$Rm))>;
-def MADDxxxx : A64I_dp3_4operand<0b1, 0b000000, GPR64, i64, GPR64, "madd",
- (add i64:$Ra, (mul i64:$Rn, i64:$Rm))>;
-
-def MSUBwwww : A64I_dp3_4operand<0b0, 0b000001, GPR32, i32, GPR32, "msub",
- (sub i32:$Ra, (mul i32:$Rn, i32:$Rm))>;
-def MSUBxxxx : A64I_dp3_4operand<0b1, 0b000001, GPR64, i64, GPR64, "msub",
- (sub i64:$Ra, (mul i64:$Rn, i64:$Rm))>;
-
-def SMADDLxwwx : A64I_dp3_4operand<0b1, 0b000010, GPR64, i64, GPR32, "smaddl",
- (add i64:$Ra, (mul (i64 (sext i32:$Rn)), (sext i32:$Rm)))>;
-def SMSUBLxwwx : A64I_dp3_4operand<0b1, 0b000011, GPR64, i64, GPR32, "smsubl",
- (sub i64:$Ra, (mul (i64 (sext i32:$Rn)), (sext i32:$Rm)))>;
-
-def UMADDLxwwx : A64I_dp3_4operand<0b1, 0b001010, GPR64, i64, GPR32, "umaddl",
- (add i64:$Ra, (mul (i64 (zext i32:$Rn)), (zext i32:$Rm)))>;
-def UMSUBLxwwx : A64I_dp3_4operand<0b1, 0b001011, GPR64, i64, GPR32, "umsubl",
- (sub i64:$Ra, (mul (i64 (zext i32:$Rn)), (zext i32:$Rm)))>;
-
-let isCommutable = 1, PostEncoderMethod = "fixMulHigh" in {
- def UMULHxxx : A64I_dp3<0b1, 0b001100, (outs GPR64:$Rd),
- (ins GPR64:$Rn, GPR64:$Rm),
- "umulh\t$Rd, $Rn, $Rm",
- [(set i64:$Rd, (mulhu i64:$Rn, i64:$Rm))],
- NoItinerary>;
-
- def SMULHxxx : A64I_dp3<0b1, 0b000100, (outs GPR64:$Rd),
- (ins GPR64:$Rn, GPR64:$Rm),
- "smulh\t$Rd, $Rn, $Rm",
- [(set i64:$Rd, (mulhs i64:$Rn, i64:$Rm))],
- NoItinerary>;
-}
-
-multiclass A64I_dp3_3operand<string asmop, A64I_dp3_4operand INST,
- Register ZR, dag pattern> {
- def : InstAlias<asmop # " $Rd, $Rn, $Rm",
- (INST INST.AccGPR:$Rd, INST.SrcGPR:$Rn, INST.SrcGPR:$Rm, ZR)>;
-
- def : Pat<pattern, (INST $Rn, $Rm, ZR)>;
-}
-
-defm : A64I_dp3_3operand<"mul", MADDwwww, WZR, (mul i32:$Rn, i32:$Rm)>;
-defm : A64I_dp3_3operand<"mul", MADDxxxx, XZR, (mul i64:$Rn, i64:$Rm)>;
-
-defm : A64I_dp3_3operand<"mneg", MSUBwwww, WZR,
- (sub 0, (mul i32:$Rn, i32:$Rm))>;
-defm : A64I_dp3_3operand<"mneg", MSUBxxxx, XZR,
- (sub 0, (mul i64:$Rn, i64:$Rm))>;
-
-defm : A64I_dp3_3operand<"smull", SMADDLxwwx, XZR,
- (mul (i64 (sext i32:$Rn)), (sext i32:$Rm))>;
-defm : A64I_dp3_3operand<"smnegl", SMSUBLxwwx, XZR,
- (sub 0, (mul (i64 (sext i32:$Rn)), (sext i32:$Rm)))>;
-
-defm : A64I_dp3_3operand<"umull", UMADDLxwwx, XZR,
- (mul (i64 (zext i32:$Rn)), (zext i32:$Rm))>;
-defm : A64I_dp3_3operand<"umnegl", UMSUBLxwwx, XZR,
- (sub 0, (mul (i64 (zext i32:$Rn)), (zext i32:$Rm)))>;
-
+defm CSEL : CondSelect<0, 0b00, "csel">;
+
+def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
+defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
+defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
+defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
+
+def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
+ (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
+def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
+ (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
+def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
+ (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
+def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
+ (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
+def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
+ (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
+def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
+ (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
+
+def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
+ (CSINCWr WZR, WZR, (i32 imm:$cc))>;
+def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
+ (CSINCXr XZR, XZR, (i32 imm:$cc))>;
+def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
+ (CSINVWr WZR, WZR, (i32 imm:$cc))>;
+def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
+ (CSINVXr XZR, XZR, (i32 imm:$cc))>;
+
+// The inverse of the condition code from the alias instruction is what is used
+// in the aliased instruction. The parser all ready inverts the condition code
+// for these aliases.
+def : InstAlias<"cset $dst, $cc",
+ (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
+def : InstAlias<"cset $dst, $cc",
+ (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
+
+def : InstAlias<"csetm $dst, $cc",
+ (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
+def : InstAlias<"csetm $dst, $cc",
+ (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
+
+def : InstAlias<"cinc $dst, $src, $cc",
+ (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
+def : InstAlias<"cinc $dst, $src, $cc",
+ (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
+
+def : InstAlias<"cinv $dst, $src, $cc",
+ (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
+def : InstAlias<"cinv $dst, $src, $cc",
+ (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
+
+def : InstAlias<"cneg $dst, $src, $cc",
+ (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
+def : InstAlias<"cneg $dst, $src, $cc",
+ (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
//===----------------------------------------------------------------------===//
-// Exception generation
+// PC-relative instructions.
//===----------------------------------------------------------------------===//
-// Contains: SVC, HVC, SMC, BRK, HLT, DCPS1, DCPS2, DCPS3
+let isReMaterializable = 1 in {
+let neverHasSideEffects = 1, mayStore = 0, mayLoad = 0 in {
+def ADR : ADRI<0, "adr", adrlabel, []>;
+} // neverHasSideEffects = 1
-def uimm16_asmoperand : AsmOperandClass {
- let Name = "UImm16";
- let PredicateMethod = "isUImm<16>";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "UImm16";
-}
+def ADRP : ADRI<1, "adrp", adrplabel,
+ [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
+} // isReMaterializable = 1
-def uimm16 : Operand<i32> {
- let ParserMatchClass = uimm16_asmoperand;
-}
-
-class A64I_exceptImpl<bits<3> opc, bits<2> ll, string asmop>
- : A64I_exception<opc, 0b000, ll, (outs), (ins uimm16:$UImm16),
- !strconcat(asmop, "\t$UImm16"), [], NoItinerary> {
- let isBranch = 1;
- let isTerminator = 1;
-}
-
-def SVCi : A64I_exceptImpl<0b000, 0b01, "svc">;
-def HVCi : A64I_exceptImpl<0b000, 0b10, "hvc">;
-def SMCi : A64I_exceptImpl<0b000, 0b11, "smc">;
-def BRKi : A64I_exceptImpl<0b001, 0b00, "brk">;
-def HLTi : A64I_exceptImpl<0b010, 0b00, "hlt">;
-
-def DCPS1i : A64I_exceptImpl<0b101, 0b01, "dcps1">;
-def DCPS2i : A64I_exceptImpl<0b101, 0b10, "dcps2">;
-def DCPS3i : A64I_exceptImpl<0b101, 0b11, "dcps3">;
-
-// The immediate is optional for the DCPS instructions, defaulting to 0.
-def : InstAlias<"dcps1", (DCPS1i 0)>;
-def : InstAlias<"dcps2", (DCPS2i 0)>;
-def : InstAlias<"dcps3", (DCPS3i 0)>;
+// page address of a constant pool entry, block address
+def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
+def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
//===----------------------------------------------------------------------===//
-// Extract (immediate)
+// Unconditional branch (register) instructions.
//===----------------------------------------------------------------------===//
-// Contains: EXTR + alias ROR
-
-def EXTRwwwi : A64I_extract<0b0, 0b000, 0b0,
- (outs GPR32:$Rd),
- (ins GPR32:$Rn, GPR32:$Rm, bitfield32_imm:$LSB),
- "extr\t$Rd, $Rn, $Rm, $LSB",
- [(set i32:$Rd,
- (A64Extr i32:$Rn, i32:$Rm, imm:$LSB))],
- NoItinerary>;
-def EXTRxxxi : A64I_extract<0b1, 0b000, 0b1,
- (outs GPR64:$Rd),
- (ins GPR64:$Rn, GPR64:$Rm, bitfield64_imm:$LSB),
- "extr\t$Rd, $Rn, $Rm, $LSB",
- [(set i64:$Rd,
- (A64Extr i64:$Rn, i64:$Rm, imm:$LSB))],
- NoItinerary>;
-
-def : InstAlias<"ror $Rd, $Rs, $LSB",
- (EXTRwwwi GPR32:$Rd, GPR32:$Rs, GPR32:$Rs, bitfield32_imm:$LSB)>;
-def : InstAlias<"ror $Rd, $Rs, $LSB",
- (EXTRxxxi GPR64:$Rd, GPR64:$Rs, GPR64:$Rs, bitfield64_imm:$LSB)>;
-
-def : Pat<(rotr i32:$Rn, bitfield32_imm:$LSB),
- (EXTRwwwi $Rn, $Rn, bitfield32_imm:$LSB)>;
-def : Pat<(rotr i64:$Rn, bitfield64_imm:$LSB),
- (EXTRxxxi $Rn, $Rn, bitfield64_imm:$LSB)>;
-//===----------------------------------------------------------------------===//
-// Floating-point compare instructions
-//===----------------------------------------------------------------------===//
-// Contains: FCMP, FCMPE
+let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
+def RET : BranchReg<0b0010, "ret", []>;
+def DRPS : SpecialReturn<0b0101, "drps">;
+def ERET : SpecialReturn<0b0100, "eret">;
+} // isReturn = 1, isTerminator = 1, isBarrier = 1
-def fpzero_asmoperand : AsmOperandClass {
- let Name = "FPZero";
- let ParserMethod = "ParseFPImmOperand";
- let DiagnosticType = "FPZero";
-}
+// Default to the LR register.
+def : InstAlias<"ret", (RET LR)>;
-def fpz32 : Operand<f32>,
- ComplexPattern<f32, 1, "SelectFPZeroOperand", [fpimm]> {
- let ParserMatchClass = fpzero_asmoperand;
- let PrintMethod = "printFPZeroOperand";
- let DecoderMethod = "DecodeFPZeroOperand";
-}
+let isCall = 1, Defs = [LR], Uses = [SP] in {
+def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
+} // isCall
-def fpz64 : Operand<f64>,
- ComplexPattern<f64, 1, "SelectFPZeroOperand", [fpimm]> {
- let ParserMatchClass = fpzero_asmoperand;
- let PrintMethod = "printFPZeroOperand";
- let DecoderMethod = "DecodeFPZeroOperand";
-}
+let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
+def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
+} // isBranch, isTerminator, isBarrier, isIndirectBranch
-def fpz64movi : Operand<i64>,
- ComplexPattern<f64, 1, "SelectFPZeroOperand", [fpimm]> {
- let ParserMatchClass = fpzero_asmoperand;
- let PrintMethod = "printFPZeroOperand";
- let DecoderMethod = "DecodeFPZeroOperand";
+// Create a separate pseudo-instruction for codegen to use so that we don't
+// flag lr as used in every function. It'll be restored before the RET by the
+// epilogue if it's legitimately used.
+def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]> {
+ let isTerminator = 1;
+ let isBarrier = 1;
+ let isReturn = 1;
}
-multiclass A64I_fpcmpSignal<bits<2> type, bit imm, dag ins, dag pattern> {
- def _quiet : A64I_fpcmp<0b0, 0b0, type, 0b00, {0b0, imm, 0b0, 0b0, 0b0},
- (outs), ins, "fcmp\t$Rn, $Rm", [pattern],
- NoItinerary> {
- let Defs = [NZCV];
- }
-
- def _sig : A64I_fpcmp<0b0, 0b0, type, 0b00, {0b1, imm, 0b0, 0b0, 0b0},
- (outs), ins, "fcmpe\t$Rn, $Rm", [], NoItinerary> {
- let Defs = [NZCV];
- }
+// This is a directive-like pseudo-instruction. The purpose is to insert an
+// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
+// (which in the usual case is a BLR).
+let hasSideEffects = 1 in
+def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []> {
+ let AsmString = ".tlsdesccall $sym";
}
-defm FCMPss : A64I_fpcmpSignal<0b00, 0b0, (ins FPR32:$Rn, FPR32:$Rm),
- (set NZCV, (A64cmp f32:$Rn, f32:$Rm))>;
-defm FCMPdd : A64I_fpcmpSignal<0b01, 0b0, (ins FPR64:$Rn, FPR64:$Rm),
- (set NZCV, (A64cmp f64:$Rn, f64:$Rm))>;
-
-// What would be Rm should be written as 0; note that even though it's called
-// "$Rm" here to fit in with the InstrFormats, it's actually an immediate.
-defm FCMPsi : A64I_fpcmpSignal<0b00, 0b1, (ins FPR32:$Rn, fpz32:$Rm),
- (set NZCV, (A64cmp f32:$Rn, fpz32:$Rm))>;
-
-defm FCMPdi : A64I_fpcmpSignal<0b01, 0b1, (ins FPR64:$Rn, fpz64:$Rm),
- (set NZCV, (A64cmp f64:$Rn, fpz64:$Rm))>;
-
+// Pseudo-instruction representing a BLR with attached TLSDESC relocation. It
+// gets expanded to two MCInsts during lowering.
+let isCall = 1, Defs = [LR] in
+def TLSDESC_BLR
+ : Pseudo<(outs), (ins GPR64:$dest, i64imm:$sym),
+ [(AArch64tlsdesc_call GPR64:$dest, tglobaltlsaddr:$sym)]>;
+def : Pat<(AArch64tlsdesc_call GPR64:$dest, texternalsym:$sym),
+ (TLSDESC_BLR GPR64:$dest, texternalsym:$sym)>;
//===----------------------------------------------------------------------===//
-// Floating-point conditional compare instructions
+// Conditional branch (immediate) instruction.
//===----------------------------------------------------------------------===//
-// Contains: FCCMP, FCCMPE
-
-class A64I_fpccmpImpl<bits<2> type, bit op, RegisterClass FPR, string asmop>
- : A64I_fpccmp<0b0, 0b0, type, op,
- (outs),
- (ins FPR:$Rn, FPR:$Rm, uimm4:$NZCVImm, cond_code_op:$Cond),
- !strconcat(asmop, "\t$Rn, $Rm, $NZCVImm, $Cond"),
- [], NoItinerary> {
- let Defs = [NZCV];
-}
-
-def FCCMPss : A64I_fpccmpImpl<0b00, 0b0, FPR32, "fccmp">;
-def FCCMPEss : A64I_fpccmpImpl<0b00, 0b1, FPR32, "fccmpe">;
-def FCCMPdd : A64I_fpccmpImpl<0b01, 0b0, FPR64, "fccmp">;
-def FCCMPEdd : A64I_fpccmpImpl<0b01, 0b1, FPR64, "fccmpe">;
+def Bcc : BranchCond;
//===----------------------------------------------------------------------===//
-// Floating-point conditional select instructions
+// Compare-and-branch instructions.
//===----------------------------------------------------------------------===//
-// Contains: FCSEL
-
-let Uses = [NZCV] in {
- def FCSELsssc : A64I_fpcondsel<0b0, 0b0, 0b00, (outs FPR32:$Rd),
- (ins FPR32:$Rn, FPR32:$Rm, cond_code_op:$Cond),
- "fcsel\t$Rd, $Rn, $Rm, $Cond",
- [(set f32:$Rd,
- (simple_select f32:$Rn, f32:$Rm))],
- NoItinerary>;
-
-
- def FCSELdddc : A64I_fpcondsel<0b0, 0b0, 0b01, (outs FPR64:$Rd),
- (ins FPR64:$Rn, FPR64:$Rm, cond_code_op:$Cond),
- "fcsel\t$Rd, $Rn, $Rm, $Cond",
- [(set f64:$Rd,
- (simple_select f64:$Rn, f64:$Rm))],
- NoItinerary>;
-}
+defm CBZ : CmpBranch<0, "cbz", AArch64cbz>;
+defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
//===----------------------------------------------------------------------===//
-// Floating-point data-processing (1 source)
+// Test-bit-and-branch instructions.
//===----------------------------------------------------------------------===//
-// Contains: FMOV, FABS, FNEG, FSQRT, FCVT, FRINT[NPMZAXI].
-
-def FPNoUnop : PatFrag<(ops node:$val), (fneg node:$val),
- [{ (void)N; return false; }]>;
-
-// First we do the fairly trivial bunch with uniform "OP s, s" and "OP d, d"
-// syntax. Default to no pattern because most are odd enough not to have one.
-multiclass A64I_fpdp1sizes<bits<6> opcode, string asmstr,
- SDPatternOperator opnode = FPNoUnop> {
- def ss : A64I_fpdp1<0b0, 0b0, 0b00, opcode, (outs FPR32:$Rd), (ins FPR32:$Rn),
- !strconcat(asmstr, "\t$Rd, $Rn"),
- [(set f32:$Rd, (opnode f32:$Rn))],
- NoItinerary>;
-
- def dd : A64I_fpdp1<0b0, 0b0, 0b01, opcode, (outs FPR64:$Rd), (ins FPR64:$Rn),
- !strconcat(asmstr, "\t$Rd, $Rn"),
- [(set f64:$Rd, (opnode f64:$Rn))],
- NoItinerary>;
-}
-
-defm FMOV : A64I_fpdp1sizes<0b000000, "fmov">;
-defm FABS : A64I_fpdp1sizes<0b000001, "fabs", fabs>;
-defm FNEG : A64I_fpdp1sizes<0b000010, "fneg", fneg>;
-defm FSQRT : A64I_fpdp1sizes<0b000011, "fsqrt", fsqrt>;
-
-defm FRINTN : A64I_fpdp1sizes<0b001000, "frintn">;
-defm FRINTP : A64I_fpdp1sizes<0b001001, "frintp", fceil>;
-defm FRINTM : A64I_fpdp1sizes<0b001010, "frintm", ffloor>;
-defm FRINTZ : A64I_fpdp1sizes<0b001011, "frintz", ftrunc>;
-defm FRINTA : A64I_fpdp1sizes<0b001100, "frinta">;
-defm FRINTX : A64I_fpdp1sizes<0b001110, "frintx", frint>;
-defm FRINTI : A64I_fpdp1sizes<0b001111, "frinti", fnearbyint>;
-
-// The FCVT instrucitons have different source and destination register-types,
-// but the fields are uniform everywhere a D-register (say) crops up. Package
-// this information in a Record.
-class FCVTRegType<RegisterClass rc, bits<2> fld, ValueType vt> {
- RegisterClass Class = rc;
- ValueType VT = vt;
- bit t1 = fld{1};
- bit t0 = fld{0};
-}
-
-def FCVT16 : FCVTRegType<FPR16, 0b11, f16>;
-def FCVT32 : FCVTRegType<FPR32, 0b00, f32>;
-def FCVT64 : FCVTRegType<FPR64, 0b01, f64>;
-
-class A64I_fpdp1_fcvt<FCVTRegType DestReg, FCVTRegType SrcReg, SDNode opnode>
- : A64I_fpdp1<0b0, 0b0, {SrcReg.t1, SrcReg.t0},
- {0,0,0,1, DestReg.t1, DestReg.t0},
- (outs DestReg.Class:$Rd), (ins SrcReg.Class:$Rn),
- "fcvt\t$Rd, $Rn",
- [(set DestReg.VT:$Rd, (opnode SrcReg.VT:$Rn))], NoItinerary>;
-
-def FCVTds : A64I_fpdp1_fcvt<FCVT64, FCVT32, fextend>;
-def FCVThs : A64I_fpdp1_fcvt<FCVT16, FCVT32, fround>;
-def FCVTsd : A64I_fpdp1_fcvt<FCVT32, FCVT64, fround>;
-def FCVThd : A64I_fpdp1_fcvt<FCVT16, FCVT64, fround>;
-def FCVTsh : A64I_fpdp1_fcvt<FCVT32, FCVT16, fextend>;
-def FCVTdh : A64I_fpdp1_fcvt<FCVT64, FCVT16, fextend>;
-
+defm TBZ : TestBranch<0, "tbz", AArch64tbz>;
+defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
//===----------------------------------------------------------------------===//
-// Floating-point data-processing (2 sources) instructions
+// Unconditional branch (immediate) instructions.
//===----------------------------------------------------------------------===//
-// Contains: FMUL, FDIV, FADD, FSUB, FMAX, FMIN, FMAXNM, FMINNM, FNMUL
-
-def FPNoBinop : PatFrag<(ops node:$lhs, node:$rhs), (fadd node:$lhs, node:$rhs),
- [{ (void)N; return false; }]>;
-
-multiclass A64I_fpdp2sizes<bits<4> opcode, string asmstr,
- SDPatternOperator opnode> {
- def sss : A64I_fpdp2<0b0, 0b0, 0b00, opcode,
- (outs FPR32:$Rd),
- (ins FPR32:$Rn, FPR32:$Rm),
- !strconcat(asmstr, "\t$Rd, $Rn, $Rm"),
- [(set f32:$Rd, (opnode f32:$Rn, f32:$Rm))],
- NoItinerary>;
-
- def ddd : A64I_fpdp2<0b0, 0b0, 0b01, opcode,
- (outs FPR64:$Rd),
- (ins FPR64:$Rn, FPR64:$Rm),
- !strconcat(asmstr, "\t$Rd, $Rn, $Rm"),
- [(set f64:$Rd, (opnode f64:$Rn, f64:$Rm))],
- NoItinerary>;
-}
-
-let isCommutable = 1 in {
- defm FMUL : A64I_fpdp2sizes<0b0000, "fmul", fmul>;
- defm FADD : A64I_fpdp2sizes<0b0010, "fadd", fadd>;
+let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
+def B : BranchImm<0, "b", [(br bb:$addr)]>;
+} // isBranch, isTerminator, isBarrier
- // No patterns for these.
- defm FMAX : A64I_fpdp2sizes<0b0100, "fmax", FPNoBinop>;
- defm FMIN : A64I_fpdp2sizes<0b0101, "fmin", FPNoBinop>;
- defm FMAXNM : A64I_fpdp2sizes<0b0110, "fmaxnm", FPNoBinop>;
- defm FMINNM : A64I_fpdp2sizes<0b0111, "fminnm", FPNoBinop>;
-
- defm FNMUL : A64I_fpdp2sizes<0b1000, "fnmul",
- PatFrag<(ops node:$lhs, node:$rhs),
- (fneg (fmul node:$lhs, node:$rhs))> >;
-}
-
-defm FDIV : A64I_fpdp2sizes<0b0001, "fdiv", fdiv>;
-defm FSUB : A64I_fpdp2sizes<0b0011, "fsub", fsub>;
+let isCall = 1, Defs = [LR], Uses = [SP] in {
+def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
+} // isCall
+def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
//===----------------------------------------------------------------------===//
-// Floating-point data-processing (3 sources) instructions
+// Exception generation instructions.
//===----------------------------------------------------------------------===//
-// Contains: FMADD, FMSUB, FNMADD, FNMSUB
-
-def fmsub : PatFrag<(ops node:$Rn, node:$Rm, node:$Ra),
- (fma (fneg node:$Rn), node:$Rm, node:$Ra)>;
-def fnmadd : PatFrag<(ops node:$Rn, node:$Rm, node:$Ra),
- (fma node:$Rn, node:$Rm, (fneg node:$Ra))>;
-def fnmsub : PatFrag<(ops node:$Rn, node:$Rm, node:$Ra),
- (fma (fneg node:$Rn), node:$Rm, (fneg node:$Ra))>;
-
-class A64I_fpdp3Impl<string asmop, RegisterClass FPR, ValueType VT,
- bits<2> type, bit o1, bit o0, SDPatternOperator fmakind>
- : A64I_fpdp3<0b0, 0b0, type, o1, o0, (outs FPR:$Rd),
- (ins FPR:$Rn, FPR:$Rm, FPR:$Ra),
- !strconcat(asmop,"\t$Rd, $Rn, $Rm, $Ra"),
- [(set VT:$Rd, (fmakind VT:$Rn, VT:$Rm, VT:$Ra))],
- NoItinerary>;
-
-def FMADDssss : A64I_fpdp3Impl<"fmadd", FPR32, f32, 0b00, 0b0, 0b0, fma>;
-def FMSUBssss : A64I_fpdp3Impl<"fmsub", FPR32, f32, 0b00, 0b0, 0b1, fmsub>;
-def FNMADDssss : A64I_fpdp3Impl<"fnmadd", FPR32, f32, 0b00, 0b1, 0b0, fnmadd>;
-def FNMSUBssss : A64I_fpdp3Impl<"fnmsub", FPR32, f32, 0b00, 0b1, 0b1, fnmsub>;
-
-def FMADDdddd : A64I_fpdp3Impl<"fmadd", FPR64, f64, 0b01, 0b0, 0b0, fma>;
-def FMSUBdddd : A64I_fpdp3Impl<"fmsub", FPR64, f64, 0b01, 0b0, 0b1, fmsub>;
-def FNMADDdddd : A64I_fpdp3Impl<"fnmadd", FPR64, f64, 0b01, 0b1, 0b0, fnmadd>;
-def FNMSUBdddd : A64I_fpdp3Impl<"fnmsub", FPR64, f64, 0b01, 0b1, 0b1, fnmsub>;
-
-// Extra patterns for when we're allowed to optimise separate multiplication and
-// addition.
-let Predicates = [HasFPARMv8, UseFusedMAC] in {
-def : Pat<(f32 (fadd FPR32:$Ra, (f32 (fmul FPR32:$Rn, FPR32:$Rm)))),
- (FMADDssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-def : Pat<(f32 (fsub FPR32:$Ra, (f32 (fmul FPR32:$Rn, FPR32:$Rm)))),
- (FMSUBssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-def : Pat<(f32 (fsub (f32 (fmul FPR32:$Rn, FPR32:$Rm)), FPR32:$Ra)),
- (FNMADDssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-def : Pat<(f32 (fsub (f32 (fneg FPR32:$Ra)), (f32 (fmul FPR32:$Rn, FPR32:$Rm)))),
- (FNMSUBssss FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-
-def : Pat<(f64 (fadd FPR64:$Ra, (f64 (fmul FPR64:$Rn, FPR64:$Rm)))),
- (FMADDdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-def : Pat<(f64 (fsub FPR64:$Ra, (f64 (fmul FPR64:$Rn, FPR64:$Rm)))),
- (FMSUBdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-def : Pat<(f64 (fsub (f64 (fmul FPR64:$Rn, FPR64:$Rm)), FPR64:$Ra)),
- (FNMADDdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-def : Pat<(f64 (fsub (f64 (fneg FPR64:$Ra)), (f64 (fmul FPR64:$Rn, FPR64:$Rm)))),
- (FNMSUBdddd FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-}
-
+def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
+def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
+def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
+def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
+def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
+def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
+def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
+def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
+
+// DCPSn defaults to an immediate operand of zero if unspecified.
+def : InstAlias<"dcps1", (DCPS1 0)>;
+def : InstAlias<"dcps2", (DCPS2 0)>;
+def : InstAlias<"dcps3", (DCPS3 0)>;
//===----------------------------------------------------------------------===//
-// Floating-point <-> fixed-point conversion instructions
+// Load instructions.
//===----------------------------------------------------------------------===//
-// Contains: FCVTZS, FCVTZU, SCVTF, UCVTF
-
-// #1-#32 allowed, encoded as "64 - <specified imm>
-def fixedpos_asmoperand_i32 : AsmOperandClass {
- let Name = "CVTFixedPos32";
- let RenderMethod = "addCVTFixedPosOperands";
- let PredicateMethod = "isCVTFixedPos<32>";
- let DiagnosticType = "CVTFixedPos32";
-}
-
-// Also encoded as "64 - <specified imm>" but #1-#64 allowed.
-def fixedpos_asmoperand_i64 : AsmOperandClass {
- let Name = "CVTFixedPos64";
- let RenderMethod = "addCVTFixedPosOperands";
- let PredicateMethod = "isCVTFixedPos<64>";
- let DiagnosticType = "CVTFixedPos64";
-}
-
-// We need the cartesian product of f32/f64 i32/i64 operands for
-// conversions:
-// + Selection needs to use operands of correct floating type
-// + Assembly parsing and decoding depend on integer width
-class cvtfix_i32_op<ValueType FloatVT>
- : Operand<FloatVT>,
- ComplexPattern<FloatVT, 1, "SelectCVTFixedPosOperand<32>", [fpimm]> {
- let ParserMatchClass = fixedpos_asmoperand_i32;
- let DecoderMethod = "DecodeCVT32FixedPosOperand";
- let PrintMethod = "printCVTFixedPosOperand";
-}
-class cvtfix_i64_op<ValueType FloatVT>
- : Operand<FloatVT>,
- ComplexPattern<FloatVT, 1, "SelectCVTFixedPosOperand<64>", [fpimm]> {
- let ParserMatchClass = fixedpos_asmoperand_i64;
- let PrintMethod = "printCVTFixedPosOperand";
+// Pair (indexed, offset)
+defm LDPW : LoadPairOffset<0b00, 0, GPR32, simm7s4, "ldp">;
+defm LDPX : LoadPairOffset<0b10, 0, GPR64, simm7s8, "ldp">;
+defm LDPS : LoadPairOffset<0b00, 1, FPR32, simm7s4, "ldp">;
+defm LDPD : LoadPairOffset<0b01, 1, FPR64, simm7s8, "ldp">;
+defm LDPQ : LoadPairOffset<0b10, 1, FPR128, simm7s16, "ldp">;
+
+defm LDPSW : LoadPairOffset<0b01, 0, GPR64, simm7s4, "ldpsw">;
+
+// Pair (pre-indexed)
+def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32, simm7s4, "ldp">;
+def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64, simm7s8, "ldp">;
+def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32, simm7s4, "ldp">;
+def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64, simm7s8, "ldp">;
+def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128, simm7s16, "ldp">;
+
+def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
+
+// Pair (post-indexed)
+def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32, simm7s4, "ldp">;
+def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64, simm7s8, "ldp">;
+def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32, simm7s4, "ldp">;
+def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64, simm7s8, "ldp">;
+def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128, simm7s16, "ldp">;
+
+def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
+
+
+// Pair (no allocate)
+defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32, simm7s4, "ldnp">;
+defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64, simm7s8, "ldnp">;
+defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32, simm7s4, "ldnp">;
+defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64, simm7s8, "ldnp">;
+defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128, simm7s16, "ldnp">;
+
+//---
+// (register offset)
+//---
+
+// Integer
+defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
+defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
+defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
+defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
+
+// Floating-point
+defm LDRB : Load8RO<0b00, 1, 0b01, FPR8, "ldr", untyped, load>;
+defm LDRH : Load16RO<0b01, 1, 0b01, FPR16, "ldr", f16, load>;
+defm LDRS : Load32RO<0b10, 1, 0b01, FPR32, "ldr", f32, load>;
+defm LDRD : Load64RO<0b11, 1, 0b01, FPR64, "ldr", f64, load>;
+defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128, "ldr", f128, load>;
+
+// Load sign-extended half-word
+defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
+defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
+
+// Load sign-extended byte
+defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
+defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
+
+// Load sign-extended word
+defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
+
+// Pre-fetch.
+defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
+
+// For regular load, we do not have any alignment requirement.
+// Thus, it is safe to directly map the vector loads with interesting
+// addressing modes.
+// FIXME: We could do the same for bitconvert to floating point vectors.
+multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
+ ValueType ScalTy, ValueType VecTy,
+ Instruction LOADW, Instruction LOADX,
+ SubRegIndex sub> {
+ def : Pat<(VecTy (scalar_to_vector (ScalTy
+ (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
+ (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
+ (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
+ sub)>;
+
+ def : Pat<(VecTy (scalar_to_vector (ScalTy
+ (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
+ (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
+ (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
+ sub)>;
}
-// Because of the proliferation of weird operands, it's not really
-// worth going for a multiclass here. Oh well.
-
-class A64I_fptofix<bit sf, bits<2> type, bits<3> opcode,
- RegisterClass GPR, RegisterClass FPR,
- ValueType DstTy, ValueType SrcTy,
- Operand scale_op, string asmop, SDNode cvtop>
- : A64I_fpfixed<sf, 0b0, type, 0b11, opcode,
- (outs GPR:$Rd), (ins FPR:$Rn, scale_op:$Scale),
- !strconcat(asmop, "\t$Rd, $Rn, $Scale"),
- [(set DstTy:$Rd, (cvtop (fmul SrcTy:$Rn, scale_op:$Scale)))],
- NoItinerary>;
-
-def FCVTZSwsi : A64I_fptofix<0b0, 0b00, 0b000, GPR32, FPR32, i32, f32,
- cvtfix_i32_op<f32>, "fcvtzs", fp_to_sint>;
-def FCVTZSxsi : A64I_fptofix<0b1, 0b00, 0b000, GPR64, FPR32, i64, f32,
- cvtfix_i64_op<f32>, "fcvtzs", fp_to_sint>;
-def FCVTZUwsi : A64I_fptofix<0b0, 0b00, 0b001, GPR32, FPR32, i32, f32,
- cvtfix_i32_op<f32>, "fcvtzu", fp_to_uint>;
-def FCVTZUxsi : A64I_fptofix<0b1, 0b00, 0b001, GPR64, FPR32, i64, f32,
- cvtfix_i64_op<f32>, "fcvtzu", fp_to_uint>;
-
-def FCVTZSwdi : A64I_fptofix<0b0, 0b01, 0b000, GPR32, FPR64, i32, f64,
- cvtfix_i32_op<f64>, "fcvtzs", fp_to_sint>;
-def FCVTZSxdi : A64I_fptofix<0b1, 0b01, 0b000, GPR64, FPR64, i64, f64,
- cvtfix_i64_op<f64>, "fcvtzs", fp_to_sint>;
-def FCVTZUwdi : A64I_fptofix<0b0, 0b01, 0b001, GPR32, FPR64, i32, f64,
- cvtfix_i32_op<f64>, "fcvtzu", fp_to_uint>;
-def FCVTZUxdi : A64I_fptofix<0b1, 0b01, 0b001, GPR64, FPR64, i64, f64,
- cvtfix_i64_op<f64>, "fcvtzu", fp_to_uint>;
-
-
-class A64I_fixtofp<bit sf, bits<2> type, bits<3> opcode,
- RegisterClass FPR, RegisterClass GPR,
- ValueType DstTy, ValueType SrcTy,
- Operand scale_op, string asmop, SDNode cvtop>
- : A64I_fpfixed<sf, 0b0, type, 0b00, opcode,
- (outs FPR:$Rd), (ins GPR:$Rn, scale_op:$Scale),
- !strconcat(asmop, "\t$Rd, $Rn, $Scale"),
- [(set DstTy:$Rd, (fdiv (cvtop SrcTy:$Rn), scale_op:$Scale))],
- NoItinerary>;
-
-def SCVTFswi : A64I_fixtofp<0b0, 0b00, 0b010, FPR32, GPR32, f32, i32,
- cvtfix_i32_op<f32>, "scvtf", sint_to_fp>;
-def SCVTFsxi : A64I_fixtofp<0b1, 0b00, 0b010, FPR32, GPR64, f32, i64,
- cvtfix_i64_op<f32>, "scvtf", sint_to_fp>;
-def UCVTFswi : A64I_fixtofp<0b0, 0b00, 0b011, FPR32, GPR32, f32, i32,
- cvtfix_i32_op<f32>, "ucvtf", uint_to_fp>;
-def UCVTFsxi : A64I_fixtofp<0b1, 0b00, 0b011, FPR32, GPR64, f32, i64,
- cvtfix_i64_op<f32>, "ucvtf", uint_to_fp>;
-def SCVTFdwi : A64I_fixtofp<0b0, 0b01, 0b010, FPR64, GPR32, f64, i32,
- cvtfix_i32_op<f64>, "scvtf", sint_to_fp>;
-def SCVTFdxi : A64I_fixtofp<0b1, 0b01, 0b010, FPR64, GPR64, f64, i64,
- cvtfix_i64_op<f64>, "scvtf", sint_to_fp>;
-def UCVTFdwi : A64I_fixtofp<0b0, 0b01, 0b011, FPR64, GPR32, f64, i32,
- cvtfix_i32_op<f64>, "ucvtf", uint_to_fp>;
-def UCVTFdxi : A64I_fixtofp<0b1, 0b01, 0b011, FPR64, GPR64, f64, i64,
- cvtfix_i64_op<f64>, "ucvtf", uint_to_fp>;
-
-//===----------------------------------------------------------------------===//
-// Floating-point <-> integer conversion instructions
-//===----------------------------------------------------------------------===//
-// Contains: FCVTZS, FCVTZU, SCVTF, UCVTF
-
-class A64I_fpintI<bit sf, bits<2> type, bits<2> rmode, bits<3> opcode,
- RegisterClass DestPR, RegisterClass SrcPR, string asmop>
- : A64I_fpint<sf, 0b0, type, rmode, opcode, (outs DestPR:$Rd), (ins SrcPR:$Rn),
- !strconcat(asmop, "\t$Rd, $Rn"), [], NoItinerary>;
-
-multiclass A64I_fptointRM<bits<2> rmode, bit o2, string asmop> {
- def Sws : A64I_fpintI<0b0, 0b00, rmode, {o2, 0, 0},
- GPR32, FPR32, asmop # "s">;
- def Sxs : A64I_fpintI<0b1, 0b00, rmode, {o2, 0, 0},
- GPR64, FPR32, asmop # "s">;
- def Uws : A64I_fpintI<0b0, 0b00, rmode, {o2, 0, 1},
- GPR32, FPR32, asmop # "u">;
- def Uxs : A64I_fpintI<0b1, 0b00, rmode, {o2, 0, 1},
- GPR64, FPR32, asmop # "u">;
-
- def Swd : A64I_fpintI<0b0, 0b01, rmode, {o2, 0, 0},
- GPR32, FPR64, asmop # "s">;
- def Sxd : A64I_fpintI<0b1, 0b01, rmode, {o2, 0, 0},
- GPR64, FPR64, asmop # "s">;
- def Uwd : A64I_fpintI<0b0, 0b01, rmode, {o2, 0, 1},
- GPR32, FPR64, asmop # "u">;
- def Uxd : A64I_fpintI<0b1, 0b01, rmode, {o2, 0, 1},
- GPR64, FPR64, asmop # "u">;
-}
-
-defm FCVTN : A64I_fptointRM<0b00, 0b0, "fcvtn">;
-defm FCVTP : A64I_fptointRM<0b01, 0b0, "fcvtp">;
-defm FCVTM : A64I_fptointRM<0b10, 0b0, "fcvtm">;
-defm FCVTZ : A64I_fptointRM<0b11, 0b0, "fcvtz">;
-defm FCVTA : A64I_fptointRM<0b00, 0b1, "fcvta">;
-
-let Predicates = [HasFPARMv8] in {
-def : Pat<(i32 (fp_to_sint f32:$Rn)), (FCVTZSws $Rn)>;
-def : Pat<(i64 (fp_to_sint f32:$Rn)), (FCVTZSxs $Rn)>;
-def : Pat<(i32 (fp_to_uint f32:$Rn)), (FCVTZUws $Rn)>;
-def : Pat<(i64 (fp_to_uint f32:$Rn)), (FCVTZUxs $Rn)>;
-def : Pat<(i32 (fp_to_sint f64:$Rn)), (FCVTZSwd $Rn)>;
-def : Pat<(i64 (fp_to_sint f64:$Rn)), (FCVTZSxd $Rn)>;
-def : Pat<(i32 (fp_to_uint f64:$Rn)), (FCVTZUwd $Rn)>;
-def : Pat<(i64 (fp_to_uint f64:$Rn)), (FCVTZUxd $Rn)>;
-}
+let AddedComplexity = 10 in {
+defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
+defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
-multiclass A64I_inttofp<bit o0, string asmop> {
- def CVTFsw : A64I_fpintI<0b0, 0b00, 0b00, {0, 1, o0}, FPR32, GPR32, asmop>;
- def CVTFsx : A64I_fpintI<0b1, 0b00, 0b00, {0, 1, o0}, FPR32, GPR64, asmop>;
- def CVTFdw : A64I_fpintI<0b0, 0b01, 0b00, {0, 1, o0}, FPR64, GPR32, asmop>;
- def CVTFdx : A64I_fpintI<0b1, 0b01, 0b00, {0, 1, o0}, FPR64, GPR64, asmop>;
-}
+defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
+defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
-defm S : A64I_inttofp<0b0, "scvtf">;
-defm U : A64I_inttofp<0b1, "ucvtf">;
-
-let Predicates = [HasFPARMv8] in {
-def : Pat<(f32 (sint_to_fp i32:$Rn)), (SCVTFsw $Rn)>;
-def : Pat<(f32 (sint_to_fp i64:$Rn)), (SCVTFsx $Rn)>;
-def : Pat<(f64 (sint_to_fp i32:$Rn)), (SCVTFdw $Rn)>;
-def : Pat<(f64 (sint_to_fp i64:$Rn)), (SCVTFdx $Rn)>;
-def : Pat<(f32 (uint_to_fp i32:$Rn)), (UCVTFsw $Rn)>;
-def : Pat<(f32 (uint_to_fp i64:$Rn)), (UCVTFsx $Rn)>;
-def : Pat<(f64 (uint_to_fp i32:$Rn)), (UCVTFdw $Rn)>;
-def : Pat<(f64 (uint_to_fp i64:$Rn)), (UCVTFdx $Rn)>;
-}
+defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
+defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
-def FMOVws : A64I_fpintI<0b0, 0b00, 0b00, 0b110, GPR32, FPR32, "fmov">;
-def FMOVsw : A64I_fpintI<0b0, 0b00, 0b00, 0b111, FPR32, GPR32, "fmov">;
-def FMOVxd : A64I_fpintI<0b1, 0b01, 0b00, 0b110, GPR64, FPR64, "fmov">;
-def FMOVdx : A64I_fpintI<0b1, 0b01, 0b00, 0b111, FPR64, GPR64, "fmov">;
+defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
+defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
-let Predicates = [HasFPARMv8] in {
-def : Pat<(i32 (bitconvert f32:$Rn)), (FMOVws $Rn)>;
-def : Pat<(f32 (bitconvert i32:$Rn)), (FMOVsw $Rn)>;
-def : Pat<(i64 (bitconvert f64:$Rn)), (FMOVxd $Rn)>;
-def : Pat<(f64 (bitconvert i64:$Rn)), (FMOVdx $Rn)>;
-}
+defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
-def lane1_asmoperand : AsmOperandClass {
- let Name = "Lane1";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "Lane1";
-}
+defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
-def lane1 : Operand<i32> {
- let ParserMatchClass = lane1_asmoperand;
- let PrintMethod = "printBareImmOperand";
-}
-let DecoderMethod = "DecodeFMOVLaneInstruction" in {
- def FMOVxv : A64I_fpint<0b1, 0b0, 0b10, 0b01, 0b110,
- (outs GPR64:$Rd), (ins VPR128:$Rn, lane1:$Lane),
- "fmov\t$Rd, $Rn.d[$Lane]", [], NoItinerary>;
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
+ ro_Wextend64:$extend))))),
+ (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
- def FMOVvx : A64I_fpint<0b1, 0b0, 0b10, 0b01, 0b111,
- (outs VPR128:$Rd), (ins GPR64:$Rn, lane1:$Lane),
- "fmov\t$Rd.d[$Lane], $Rn", [], NoItinerary>;
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
+ ro_Xextend64:$extend))))),
+ (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
}
-let Predicates = [HasFPARMv8] in {
-def : InstAlias<"fmov $Rd, $Rn.2d[$Lane]",
- (FMOVxv GPR64:$Rd, VPR128:$Rn, lane1:$Lane), 0b0>;
+// Match all load 64 bits width whose type is compatible with FPR64
+multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
+ Instruction LOADW, Instruction LOADX> {
-def : InstAlias<"fmov $Rd.2d[$Lane], $Rn",
- (FMOVvx VPR128:$Rd, GPR64:$Rn, lane1:$Lane), 0b0>;
-}
-
-//===----------------------------------------------------------------------===//
-// Floating-point immediate instructions
-//===----------------------------------------------------------------------===//
-// Contains: FMOV
+ def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
+ (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
-def fpimm_asmoperand : AsmOperandClass {
- let Name = "FMOVImm";
- let ParserMethod = "ParseFPImmOperand";
- let DiagnosticType = "FPImm";
+ def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
+ (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
}
-// The MCOperand for these instructions are the encoded 8-bit values.
-def SDXF_fpimm : SDNodeXForm<fpimm, [{
- uint32_t Imm8;
- A64Imms::isFPImm(N->getValueAPF(), Imm8);
- return CurDAG->getTargetConstant(Imm8, MVT::i32);
-}]>;
-
-class fmov_operand<ValueType FT>
- : Operand<i32>,
- PatLeaf<(FT fpimm), [{ return A64Imms::isFPImm(N->getValueAPF()); }],
- SDXF_fpimm> {
- let PrintMethod = "printFPImmOperand";
- let ParserMatchClass = fpimm_asmoperand;
+let AddedComplexity = 10 in {
+let Predicates = [IsLE] in {
+ // We must do vector loads with LD1 in big-endian.
+ defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
+ defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
+ defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
+ defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
+}
+
+defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
+defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>;
+
+// Match all load 128 bits width whose type is compatible with FPR128
+let Predicates = [IsLE] in {
+ // We must do vector loads with LD1 in big-endian.
+ defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>;
+ defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>;
+ defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
+ defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
+ defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
+ defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
+}
+} // AddedComplexity = 10
+
+// zextload -> i64
+multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
+ Instruction INSTW, Instruction INSTX> {
+ def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
+ (SUBREG_TO_REG (i64 0),
+ (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
+ sub_32)>;
+
+ def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
+ (SUBREG_TO_REG (i64 0),
+ (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
+ sub_32)>;
}
-def fmov32_operand : fmov_operand<f32>;
-def fmov64_operand : fmov_operand<f64>;
-
-class A64I_fpimm_impl<bits<2> type, RegisterClass Reg, ValueType VT,
- Operand fmov_operand>
- : A64I_fpimm<0b0, 0b0, type, 0b00000,
- (outs Reg:$Rd),
- (ins fmov_operand:$Imm8),
- "fmov\t$Rd, $Imm8",
- [(set VT:$Rd, fmov_operand:$Imm8)],
- NoItinerary>;
-
-def FMOVsi : A64I_fpimm_impl<0b00, FPR32, f32, fmov32_operand>;
-def FMOVdi : A64I_fpimm_impl<0b01, FPR64, f64, fmov64_operand>;
-
-//===----------------------------------------------------------------------===//
-// Load-register (literal) instructions
-//===----------------------------------------------------------------------===//
-// Contains: LDR, LDRSW, PRFM
+let AddedComplexity = 10 in {
+ defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>;
+ defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
+ defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>;
-def ldrlit_label_asmoperand : AsmOperandClass {
- let Name = "LoadLitLabel";
- let RenderMethod = "addLabelOperands<19, 4>";
- let DiagnosticType = "Label";
-}
+ // zextloadi1 -> zextloadi8
+ defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
-def ldrlit_label : Operand<i64> {
- let EncoderMethod = "getLoadLitLabelOpValue";
+ // extload -> zextload
+ defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
+ defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
+ defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
- // This label is a 19-bit offset from PC, scaled by the instruction-width: 4.
- let PrintMethod = "printLabelOperand<19, 4>";
- let ParserMatchClass = ldrlit_label_asmoperand;
- let OperandType = "OPERAND_PCREL";
+ // extloadi1 -> zextloadi8
+ defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>;
}
-// Various instructions take an immediate value (which can always be used),
-// where some numbers have a symbolic name to make things easier. These operands
-// and the associated functions abstract away the differences.
-multiclass namedimm<string prefix, string mapper> {
- def _asmoperand : AsmOperandClass {
- let Name = "NamedImm" # prefix;
- let PredicateMethod = "isUImm";
- let RenderMethod = "addImmOperands";
- let ParserMethod = "ParseNamedImmOperand<" # mapper # ">";
- let DiagnosticType = "NamedImm_" # prefix;
- }
- def _op : Operand<i32> {
- let ParserMatchClass = !cast<AsmOperandClass>(prefix # "_asmoperand");
- let PrintMethod = "printNamedImmOperand<" # mapper # ">";
- let DecoderMethod = "DecodeNamedImmOperand<" # mapper # ">";
- }
-}
-
-defm prefetch : namedimm<"prefetch", "A64PRFM::PRFMMapper">;
+// zextload -> i64
+multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
+ Instruction INSTW, Instruction INSTX> {
+ def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
+ (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
-class A64I_LDRlitSimple<bits<2> opc, bit v, RegisterClass OutReg,
- list<dag> patterns = []>
- : A64I_LDRlit<opc, v, (outs OutReg:$Rt), (ins ldrlit_label:$Imm19),
- "ldr\t$Rt, $Imm19", patterns, NoItinerary>;
+ def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
+ (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
-let mayLoad = 1 in {
- def LDRw_lit : A64I_LDRlitSimple<0b00, 0b0, GPR32>;
- def LDRx_lit : A64I_LDRlitSimple<0b01, 0b0, GPR64>;
}
-let Predicates = [HasFPARMv8] in {
-def LDRs_lit : A64I_LDRlitSimple<0b00, 0b1, FPR32>;
-def LDRd_lit : A64I_LDRlitSimple<0b01, 0b1, FPR64>;
+let AddedComplexity = 10 in {
+ // extload -> zextload
+ defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
+ defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
+ defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
+
+ // zextloadi1 -> zextloadi8
+ defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
+}
+
+//---
+// (unsigned immediate)
+//---
+defm LDRX : LoadUI<0b11, 0, 0b01, GPR64, uimm12s8, "ldr",
+ [(set GPR64:$Rt,
+ (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
+defm LDRW : LoadUI<0b10, 0, 0b01, GPR32, uimm12s4, "ldr",
+ [(set GPR32:$Rt,
+ (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
+defm LDRB : LoadUI<0b00, 1, 0b01, FPR8, uimm12s1, "ldr",
+ [(set FPR8:$Rt,
+ (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
+defm LDRH : LoadUI<0b01, 1, 0b01, FPR16, uimm12s2, "ldr",
+ [(set (f16 FPR16:$Rt),
+ (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
+defm LDRS : LoadUI<0b10, 1, 0b01, FPR32, uimm12s4, "ldr",
+ [(set (f32 FPR32:$Rt),
+ (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
+defm LDRD : LoadUI<0b11, 1, 0b01, FPR64, uimm12s8, "ldr",
+ [(set (f64 FPR64:$Rt),
+ (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
+defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128, uimm12s16, "ldr",
+ [(set (f128 FPR128:$Rt),
+ (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
+
+// For regular load, we do not have any alignment requirement.
+// Thus, it is safe to directly map the vector loads with interesting
+// addressing modes.
+// FIXME: We could do the same for bitconvert to floating point vectors.
+def : Pat <(v8i8 (scalar_to_vector (i32
+ (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
+ (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
+ (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
+def : Pat <(v16i8 (scalar_to_vector (i32
+ (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
+def : Pat <(v4i16 (scalar_to_vector (i32
+ (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+ (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
+ (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
+def : Pat <(v8i16 (scalar_to_vector (i32
+ (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+ (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
+ (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
+def : Pat <(v2i32 (scalar_to_vector (i32
+ (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
+ (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
+ (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
+def : Pat <(v4i32 (scalar_to_vector (i32
+ (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
+ (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
+def : Pat <(v1i64 (scalar_to_vector (i64
+ (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat <(v2i64 (scalar_to_vector (i64
+ (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
+ (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
+
+// Match all load 64 bits width whose type is compatible with FPR64
+let Predicates = [IsLE] in {
+ // We must use LD1 to perform vector loads in big-endian.
+ def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+ def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+ def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+ def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+}
+def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
+ (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
+
+// Match all load 128 bits width whose type is compatible with FPR128
+let Predicates = [IsLE] in {
+ // We must use LD1 to perform vector loads in big-endian.
+ def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+}
+def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
+ (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
+
+defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
+ [(set GPR32:$Rt,
+ (zextloadi16 (am_indexed16 GPR64sp:$Rn,
+ uimm12s2:$offset)))]>;
+defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
+ [(set GPR32:$Rt,
+ (zextloadi8 (am_indexed8 GPR64sp:$Rn,
+ uimm12s1:$offset)))]>;
+// zextload -> i64
+def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
+def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
+
+// zextloadi1 -> zextloadi8
+def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
+def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
+
+// extload -> zextload
+def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
+ (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
+def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
+def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
+def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
+def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
+def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
+def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
+
+// load sign-extended half-word
+defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
+ [(set GPR32:$Rt,
+ (sextloadi16 (am_indexed16 GPR64sp:$Rn,
+ uimm12s2:$offset)))]>;
+defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
+ [(set GPR64:$Rt,
+ (sextloadi16 (am_indexed16 GPR64sp:$Rn,
+ uimm12s2:$offset)))]>;
+
+// load sign-extended byte
+defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
+ [(set GPR32:$Rt,
+ (sextloadi8 (am_indexed8 GPR64sp:$Rn,
+ uimm12s1:$offset)))]>;
+defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
+ [(set GPR64:$Rt,
+ (sextloadi8 (am_indexed8 GPR64sp:$Rn,
+ uimm12s1:$offset)))]>;
+
+// load sign-extended word
+defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
+ [(set GPR64:$Rt,
+ (sextloadi32 (am_indexed32 GPR64sp:$Rn,
+ uimm12s4:$offset)))]>;
+
+// load zero-extended word
+def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
+
+// Pre-fetch.
+def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
+ [(AArch64Prefetch imm:$Rt,
+ (am_indexed64 GPR64sp:$Rn,
+ uimm12s8:$offset))]>;
+
+def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
+
+//---
+// (literal)
+def LDRWl : LoadLiteral<0b00, 0, GPR32, "ldr">;
+def LDRXl : LoadLiteral<0b01, 0, GPR64, "ldr">;
+def LDRSl : LoadLiteral<0b00, 1, FPR32, "ldr">;
+def LDRDl : LoadLiteral<0b01, 1, FPR64, "ldr">;
+def LDRQl : LoadLiteral<0b10, 1, FPR128, "ldr">;
+
+// load sign-extended word
+def LDRSWl : LoadLiteral<0b10, 0, GPR64, "ldrsw">;
+
+// prefetch
+def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
+// [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
+
+//---
+// (unscaled immediate)
+defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64, "ldur",
+ [(set GPR64:$Rt,
+ (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32, "ldur",
+ [(set GPR32:$Rt,
+ (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8, "ldur",
+ [(set FPR8:$Rt,
+ (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16, "ldur",
+ [(set FPR16:$Rt,
+ (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32, "ldur",
+ [(set (f32 FPR32:$Rt),
+ (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64, "ldur",
+ [(set (f64 FPR64:$Rt),
+ (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128, "ldur",
+ [(set (f128 FPR128:$Rt),
+ (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
+
+defm LDURHH
+ : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
+ [(set GPR32:$Rt,
+ (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURBB
+ : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
+ [(set GPR32:$Rt,
+ (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
+
+// Match all load 64 bits width whose type is compatible with FPR64
+let Predicates = [IsLE] in {
+ def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+}
+def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
+ (LDURDi GPR64sp:$Rn, simm9:$offset)>;
+
+// Match all load 128 bits width whose type is compatible with FPR128
+let Predicates = [IsLE] in {
+ def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
+ (LDURQi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
+ (LDURQi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
+ (LDURQi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
+ (LDURQi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
+ (LDURQi GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
+ (LDURQi GPR64sp:$Rn, simm9:$offset)>;
+}
+
+// anyext -> zext
+def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
+ (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+// unscaled zext
+def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
+ (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+
+
+//---
+// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
+
+// Define new assembler match classes as we want to only match these when
+// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
+// associate a DiagnosticType either, as we want the diagnostic for the
+// canonical form (the scaled operand) to take precedence.
+class SImm9OffsetOperand<int Width> : AsmOperandClass {
+ let Name = "SImm9OffsetFB" # Width;
+ let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
+ let RenderMethod = "addImmOperands";
}
-let mayLoad = 1 in {
- let Predicates = [HasFPARMv8] in {
- def LDRq_lit : A64I_LDRlitSimple<0b10, 0b1, FPR128>;
- }
-
-
- def LDRSWx_lit : A64I_LDRlit<0b10, 0b0,
- (outs GPR64:$Rt),
- (ins ldrlit_label:$Imm19),
- "ldrsw\t$Rt, $Imm19",
- [], NoItinerary>;
-
- def PRFM_lit : A64I_LDRlit<0b11, 0b0,
- (outs), (ins prefetch_op:$Rt, ldrlit_label:$Imm19),
- "prfm\t$Rt, $Imm19",
- [], NoItinerary>;
-}
+def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
+def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
+def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
+def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
+def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
+
+def simm9_offset_fb8 : Operand<i64> {
+ let ParserMatchClass = SImm9OffsetFB8Operand;
+}
+def simm9_offset_fb16 : Operand<i64> {
+ let ParserMatchClass = SImm9OffsetFB16Operand;
+}
+def simm9_offset_fb32 : Operand<i64> {
+ let ParserMatchClass = SImm9OffsetFB32Operand;
+}
+def simm9_offset_fb64 : Operand<i64> {
+ let ParserMatchClass = SImm9OffsetFB64Operand;
+}
+def simm9_offset_fb128 : Operand<i64> {
+ let ParserMatchClass = SImm9OffsetFB128Operand;
+}
+
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
+def : InstAlias<"ldr $Rt, [$Rn, $offset]",
+ (LDURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
+
+// zextload -> i64
+def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
+ (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
+
+// load sign-extended half-word
+defm LDURSHW
+ : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
+ [(set GPR32:$Rt,
+ (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURSHX
+ : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
+ [(set GPR64:$Rt,
+ (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
+
+// load sign-extended byte
+defm LDURSBW
+ : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
+ [(set GPR32:$Rt,
+ (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
+defm LDURSBX
+ : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
+ [(set GPR64:$Rt,
+ (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
+
+// load sign-extended word
+defm LDURSW
+ : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
+ [(set GPR64:$Rt,
+ (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
+
+// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
+def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
+ (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
+def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
+ (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
+def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
+ (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
+def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
+ (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
+def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
+ (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
+def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
+ (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
+def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
+ (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
+
+// Pre-fetch.
+defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
+ [(AArch64Prefetch imm:$Rt,
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
+
+//---
+// (unscaled immediate, unprivileged)
+defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
+defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
+
+defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
+defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
+
+// load sign-extended half-word
+defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
+defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
+
+// load sign-extended byte
+defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
+defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
+
+// load sign-extended word
+defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
+
+//---
+// (immediate pre-indexed)
+def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32, "ldr">;
+def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64, "ldr">;
+def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8, "ldr">;
+def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16, "ldr">;
+def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32, "ldr">;
+def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64, "ldr">;
+def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128, "ldr">;
+
+// load sign-extended half-word
+def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
+def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
+
+// load sign-extended byte
+def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
+def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
+
+// load zero-extended byte
+def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32, "ldrb">;
+def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32, "ldrh">;
+
+// load sign-extended word
+def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
+
+//---
+// (immediate post-indexed)
+def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32, "ldr">;
+def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64, "ldr">;
+def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8, "ldr">;
+def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16, "ldr">;
+def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32, "ldr">;
+def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64, "ldr">;
+def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128, "ldr">;
+
+// load sign-extended half-word
+def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
+def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
+
+// load sign-extended byte
+def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
+def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
+
+// load zero-extended byte
+def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32, "ldrb">;
+def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32, "ldrh">;
+
+// load sign-extended word
+def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
//===----------------------------------------------------------------------===//
-// Load-store exclusive instructions
+// Store instructions.
//===----------------------------------------------------------------------===//
-// Contains: STXRB, STXRH, STXR, LDXRB, LDXRH, LDXR. STXP, LDXP, STLXRB,
-// STLXRH, STLXR, LDAXRB, LDAXRH, LDAXR, STLXP, LDAXP, STLRB,
-// STLRH, STLR, LDARB, LDARH, LDAR
-
-// Since these instructions have the undefined register bits set to 1 in
-// their canonical form, we need a post encoder method to set those bits
-// to 1 when encoding these instructions. We do this using the
-// fixLoadStoreExclusive function. This function has template parameters:
-//
-// fixLoadStoreExclusive<int hasRs, int hasRt2>
-//
-// hasRs indicates that the instruction uses the Rs field, so we won't set
-// it to 1 (and the same for Rt2). We don't need template parameters for
-// the other register fiels since Rt and Rn are always used.
-
-// This operand parses a GPR64xsp register, followed by an optional immediate
-// #0.
-def GPR64xsp0_asmoperand : AsmOperandClass {
- let Name = "GPR64xsp0";
- let PredicateMethod = "isWrappedReg";
- let RenderMethod = "addRegOperands";
- let ParserMethod = "ParseLSXAddressOperand";
- // Diagnostics are provided by ParserMethod
-}
-
-def GPR64xsp0 : RegisterOperand<GPR64xsp> {
- let ParserMatchClass = GPR64xsp0_asmoperand;
-}
-
-//===----------------------------------
-// Store-exclusive (releasing & normal)
-//===----------------------------------
-
-class A64I_SRexs_impl<bits<2> size, bits<3> opcode, string asm, dag outs,
- dag ins, list<dag> pat,
- InstrItinClass itin> :
- A64I_LDSTex_stn <size,
- opcode{2}, 0, opcode{1}, opcode{0},
- outs, ins,
- !strconcat(asm, "\t$Rs, $Rt, [$Rn]"),
- pat, itin> {
- let mayStore = 1;
- let PostEncoderMethod = "fixLoadStoreExclusive<1,0>";
- let Constraints = "@earlyclobber $Rs";
-}
-
-multiclass A64I_SRex<string asmstr, bits<3> opcode, string prefix> {
- def _byte: A64I_SRexs_impl<0b00, opcode, !strconcat(asmstr, "b"),
- (outs GPR32:$Rs), (ins GPR32:$Rt, GPR64xsp0:$Rn),
- [], NoItinerary>;
-
- def _hword: A64I_SRexs_impl<0b01, opcode, !strconcat(asmstr, "h"),
- (outs GPR32:$Rs), (ins GPR32:$Rt, GPR64xsp0:$Rn),
- [],NoItinerary>;
-
- def _word: A64I_SRexs_impl<0b10, opcode, asmstr,
- (outs GPR32:$Rs), (ins GPR32:$Rt, GPR64xsp0:$Rn),
- [], NoItinerary>;
- def _dword: A64I_SRexs_impl<0b11, opcode, asmstr,
- (outs GPR32:$Rs), (ins GPR64:$Rt, GPR64xsp0:$Rn),
- [], NoItinerary>;
+// Pair (indexed, offset)
+// FIXME: Use dedicated range-checked addressing mode operand here.
+defm STPW : StorePairOffset<0b00, 0, GPR32, simm7s4, "stp">;
+defm STPX : StorePairOffset<0b10, 0, GPR64, simm7s8, "stp">;
+defm STPS : StorePairOffset<0b00, 1, FPR32, simm7s4, "stp">;
+defm STPD : StorePairOffset<0b01, 1, FPR64, simm7s8, "stp">;
+defm STPQ : StorePairOffset<0b10, 1, FPR128, simm7s16, "stp">;
+
+// Pair (pre-indexed)
+def STPWpre : StorePairPreIdx<0b00, 0, GPR32, simm7s4, "stp">;
+def STPXpre : StorePairPreIdx<0b10, 0, GPR64, simm7s8, "stp">;
+def STPSpre : StorePairPreIdx<0b00, 1, FPR32, simm7s4, "stp">;
+def STPDpre : StorePairPreIdx<0b01, 1, FPR64, simm7s8, "stp">;
+def STPQpre : StorePairPreIdx<0b10, 1, FPR128, simm7s16, "stp">;
+
+// Pair (pre-indexed)
+def STPWpost : StorePairPostIdx<0b00, 0, GPR32, simm7s4, "stp">;
+def STPXpost : StorePairPostIdx<0b10, 0, GPR64, simm7s8, "stp">;
+def STPSpost : StorePairPostIdx<0b00, 1, FPR32, simm7s4, "stp">;
+def STPDpost : StorePairPostIdx<0b01, 1, FPR64, simm7s8, "stp">;
+def STPQpost : StorePairPostIdx<0b10, 1, FPR128, simm7s16, "stp">;
+
+// Pair (no allocate)
+defm STNPW : StorePairNoAlloc<0b00, 0, GPR32, simm7s4, "stnp">;
+defm STNPX : StorePairNoAlloc<0b10, 0, GPR64, simm7s8, "stnp">;
+defm STNPS : StorePairNoAlloc<0b00, 1, FPR32, simm7s4, "stnp">;
+defm STNPD : StorePairNoAlloc<0b01, 1, FPR64, simm7s8, "stnp">;
+defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128, simm7s16, "stnp">;
+
+//---
+// (Register offset)
+
+// Integer
+defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
+defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
+defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>;
+defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>;
+
+
+// Floating-point
+defm STRB : Store8RO< 0b00, 1, 0b00, FPR8, "str", untyped, store>;
+defm STRH : Store16RO<0b01, 1, 0b00, FPR16, "str", f16, store>;
+defm STRS : Store32RO<0b10, 1, 0b00, FPR32, "str", f32, store>;
+defm STRD : Store64RO<0b11, 1, 0b00, FPR64, "str", f64, store>;
+defm STRQ : Store128RO<0b00, 1, 0b10, FPR128, "str", f128, store>;
+
+multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
+ Instruction STRW, Instruction STRX> {
+
+ def : Pat<(storeop GPR64:$Rt,
+ (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
+ (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
+ GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
+
+ def : Pat<(storeop GPR64:$Rt,
+ (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
+ (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
+ GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
}
-defm STXR : A64I_SRex<"stxr", 0b000, "STXR">;
-defm STLXR : A64I_SRex<"stlxr", 0b001, "STLXR">;
-
-//===----------------------------------
-// Loads
-//===----------------------------------
-
-class A64I_LRexs_impl<bits<2> size, bits<3> opcode, string asm, dag outs,
- dag ins, list<dag> pat,
- InstrItinClass itin> :
- A64I_LDSTex_tn <size,
- opcode{2}, 1, opcode{1}, opcode{0},
- outs, ins,
- !strconcat(asm, "\t$Rt, [$Rn]"),
- pat, itin> {
- let mayLoad = 1;
- let PostEncoderMethod = "fixLoadStoreExclusive<0,0>";
-}
-
-multiclass A64I_LRex<string asmstr, bits<3> opcode> {
- def _byte: A64I_LRexs_impl<0b00, opcode, !strconcat(asmstr, "b"),
- (outs GPR32:$Rt), (ins GPR64xsp0:$Rn),
- [], NoItinerary>;
-
- def _hword: A64I_LRexs_impl<0b01, opcode, !strconcat(asmstr, "h"),
- (outs GPR32:$Rt), (ins GPR64xsp0:$Rn),
- [], NoItinerary>;
-
- def _word: A64I_LRexs_impl<0b10, opcode, asmstr,
- (outs GPR32:$Rt), (ins GPR64xsp0:$Rn),
- [], NoItinerary>;
-
- def _dword: A64I_LRexs_impl<0b11, opcode, asmstr,
- (outs GPR64:$Rt), (ins GPR64xsp0:$Rn),
- [], NoItinerary>;
-}
-
-defm LDXR : A64I_LRex<"ldxr", 0b000>;
-defm LDAXR : A64I_LRex<"ldaxr", 0b001>;
-defm LDAR : A64I_LRex<"ldar", 0b101>;
-
-class acquiring_load<PatFrag base>
- : PatFrag<(ops node:$ptr), (base node:$ptr), [{
- AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return Ordering == Acquire || Ordering == SequentiallyConsistent;
-}]>;
-
-def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
-def atomic_load_acquire_16 : acquiring_load<atomic_load_16>;
-def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
-def atomic_load_acquire_64 : acquiring_load<atomic_load_64>;
-
-def : Pat<(atomic_load_acquire_8 i64:$Rn), (LDAR_byte $Rn)>;
-def : Pat<(atomic_load_acquire_16 i64:$Rn), (LDAR_hword $Rn)>;
-def : Pat<(atomic_load_acquire_32 i64:$Rn), (LDAR_word $Rn)>;
-def : Pat<(atomic_load_acquire_64 i64:$Rn), (LDAR_dword $Rn)>;
-
-//===----------------------------------
-// Store-release (no exclusivity)
-//===----------------------------------
-
-class A64I_SLexs_impl<bits<2> size, bits<3> opcode, string asm, dag outs,
- dag ins, list<dag> pat,
- InstrItinClass itin> :
- A64I_LDSTex_tn <size,
- opcode{2}, 0, opcode{1}, opcode{0},
- outs, ins,
- !strconcat(asm, "\t$Rt, [$Rn]"),
- pat, itin> {
- let mayStore = 1;
- let PostEncoderMethod = "fixLoadStoreExclusive<0,0>";
-}
-
-class releasing_store<PatFrag base>
- : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
- AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return Ordering == Release || Ordering == SequentiallyConsistent;
-}]>;
-
-def atomic_store_release_8 : releasing_store<atomic_store_8>;
-def atomic_store_release_16 : releasing_store<atomic_store_16>;
-def atomic_store_release_32 : releasing_store<atomic_store_32>;
-def atomic_store_release_64 : releasing_store<atomic_store_64>;
-
-multiclass A64I_SLex<string asmstr, bits<3> opcode, string prefix> {
- def _byte: A64I_SLexs_impl<0b00, opcode, !strconcat(asmstr, "b"),
- (outs), (ins GPR32:$Rt, GPR64xsp0:$Rn),
- [(atomic_store_release_8 i64:$Rn, i32:$Rt)],
- NoItinerary>;
-
- def _hword: A64I_SLexs_impl<0b01, opcode, !strconcat(asmstr, "h"),
- (outs), (ins GPR32:$Rt, GPR64xsp0:$Rn),
- [(atomic_store_release_16 i64:$Rn, i32:$Rt)],
- NoItinerary>;
-
- def _word: A64I_SLexs_impl<0b10, opcode, asmstr,
- (outs), (ins GPR32:$Rt, GPR64xsp0:$Rn),
- [(atomic_store_release_32 i64:$Rn, i32:$Rt)],
- NoItinerary>;
-
- def _dword: A64I_SLexs_impl<0b11, opcode, asmstr,
- (outs), (ins GPR64:$Rt, GPR64xsp0:$Rn),
- [(atomic_store_release_64 i64:$Rn, i64:$Rt)],
- NoItinerary>;
-}
-
-defm STLR : A64I_SLex<"stlr", 0b101, "STLR">;
-
-//===----------------------------------
-// Store-exclusive pair (releasing & normal)
-//===----------------------------------
-
-class A64I_SPexs_impl<bits<2> size, bits<3> opcode, string asm, dag outs,
- dag ins, list<dag> pat,
- InstrItinClass itin> :
- A64I_LDSTex_stt2n <size,
- opcode{2}, 0, opcode{1}, opcode{0},
- outs, ins,
- !strconcat(asm, "\t$Rs, $Rt, $Rt2, [$Rn]"),
- pat, itin> {
- let mayStore = 1;
-}
-
-
-multiclass A64I_SPex<string asmstr, bits<3> opcode> {
- def _word: A64I_SPexs_impl<0b10, opcode, asmstr, (outs),
- (ins GPR32:$Rs, GPR32:$Rt, GPR32:$Rt2,
- GPR64xsp0:$Rn),
- [], NoItinerary>;
-
- def _dword: A64I_SPexs_impl<0b11, opcode, asmstr, (outs),
- (ins GPR32:$Rs, GPR64:$Rt, GPR64:$Rt2,
- GPR64xsp0:$Rn),
- [], NoItinerary>;
-}
-
-defm STXP : A64I_SPex<"stxp", 0b010>;
-defm STLXP : A64I_SPex<"stlxp", 0b011>;
-
-//===----------------------------------
-// Load-exclusive pair (acquiring & normal)
-//===----------------------------------
-
-class A64I_LPexs_impl<bits<2> size, bits<3> opcode, string asm, dag outs,
- dag ins, list<dag> pat,
- InstrItinClass itin> :
- A64I_LDSTex_tt2n <size,
- opcode{2}, 1, opcode{1}, opcode{0},
- outs, ins,
- !strconcat(asm, "\t$Rt, $Rt2, [$Rn]"),
- pat, itin>{
- let mayLoad = 1;
- let DecoderMethod = "DecodeLoadPairExclusiveInstruction";
- let PostEncoderMethod = "fixLoadStoreExclusive<0,1>";
+let AddedComplexity = 10 in {
+ // truncstore i64
+ defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>;
+ defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
+ defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>;
}
-multiclass A64I_LPex<string asmstr, bits<3> opcode> {
- def _word: A64I_LPexs_impl<0b10, opcode, asmstr,
- (outs GPR32:$Rt, GPR32:$Rt2),
- (ins GPR64xsp0:$Rn),
- [], NoItinerary>;
+multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
+ Instruction STRW, Instruction STRX> {
+ def : Pat<(store (VecTy FPR:$Rt),
+ (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
+ (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
- def _dword: A64I_LPexs_impl<0b11, opcode, asmstr,
- (outs GPR64:$Rt, GPR64:$Rt2),
- (ins GPR64xsp0:$Rn),
- [], NoItinerary>;
+ def : Pat<(store (VecTy FPR:$Rt),
+ (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
+ (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
}
-defm LDXP : A64I_LPex<"ldxp", 0b010>;
-defm LDAXP : A64I_LPex<"ldaxp", 0b011>;
+let AddedComplexity = 10 in {
+// Match all store 64 bits width whose type is compatible with FPR64
+let Predicates = [IsLE] in {
+ // We must use ST1 to store vectors in big-endian.
+ defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
+ defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
+ defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
+ defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
+}
+
+defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
+defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
+
+// Match all store 128 bits width whose type is compatible with FPR128
+let Predicates = [IsLE] in {
+ // We must use ST1 to store vectors in big-endian.
+ defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
+ defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
+ defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
+ defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
+ defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
+ defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
+}
+} // AddedComplexity = 10
+
+//---
+// (unsigned immediate)
+defm STRX : StoreUI<0b11, 0, 0b00, GPR64, uimm12s8, "str",
+ [(store GPR64:$Rt,
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
+defm STRW : StoreUI<0b10, 0, 0b00, GPR32, uimm12s4, "str",
+ [(store GPR32:$Rt,
+ (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
+defm STRB : StoreUI<0b00, 1, 0b00, FPR8, uimm12s1, "str",
+ [(store FPR8:$Rt,
+ (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
+defm STRH : StoreUI<0b01, 1, 0b00, FPR16, uimm12s2, "str",
+ [(store (f16 FPR16:$Rt),
+ (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
+defm STRS : StoreUI<0b10, 1, 0b00, FPR32, uimm12s4, "str",
+ [(store (f32 FPR32:$Rt),
+ (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
+defm STRD : StoreUI<0b11, 1, 0b00, FPR64, uimm12s8, "str",
+ [(store (f64 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
+defm STRQ : StoreUI<0b00, 1, 0b10, FPR128, uimm12s16, "str", []>;
+
+defm STRHH : StoreUI<0b01, 0, 0b00, GPR32, uimm12s2, "strh",
+ [(truncstorei16 GPR32:$Rt,
+ (am_indexed16 GPR64sp:$Rn,
+ uimm12s2:$offset))]>;
+defm STRBB : StoreUI<0b00, 0, 0b00, GPR32, uimm12s1, "strb",
+ [(truncstorei8 GPR32:$Rt,
+ (am_indexed8 GPR64sp:$Rn,
+ uimm12s1:$offset))]>;
+
+// Match all store 64 bits width whose type is compatible with FPR64
+let AddedComplexity = 10 in {
+let Predicates = [IsLE] in {
+ // We must use ST1 to store vectors in big-endian.
+ def : Pat<(store (v2f32 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
+ (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
+ def : Pat<(store (v8i8 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
+ (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
+ def : Pat<(store (v4i16 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
+ (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
+ def : Pat<(store (v2i32 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
+ (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
+}
+def : Pat<(store (v1f64 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
+ (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
+def : Pat<(store (v1i64 FPR64:$Rt),
+ (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
+ (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
+
+// Match all store 128 bits width whose type is compatible with FPR128
+let Predicates = [IsLE] in {
+ // We must use ST1 to store vectors in big-endian.
+ def : Pat<(store (v4f32 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(store (v2f64 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(store (v16i8 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(store (v8i16 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(store (v4i32 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+ def : Pat<(store (v2i64 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+}
+def : Pat<(store (f128 FPR128:$Rt),
+ (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
+ (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
+
+// truncstore i64
+def : Pat<(truncstorei32 GPR64:$Rt,
+ (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
+ (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
+def : Pat<(truncstorei16 GPR64:$Rt,
+ (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
+ (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
+def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
+ (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
+
+} // AddedComplexity = 10
+
+//---
+// (unscaled immediate)
+defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64, "stur",
+ [(store GPR64:$Rt,
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32, "stur",
+ [(store GPR32:$Rt,
+ (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8, "stur",
+ [(store FPR8:$Rt,
+ (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16, "stur",
+ [(store (f16 FPR16:$Rt),
+ (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32, "stur",
+ [(store (f32 FPR32:$Rt),
+ (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64, "stur",
+ [(store (f64 FPR64:$Rt),
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128, "stur",
+ [(store (f128 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32, "sturh",
+ [(truncstorei16 GPR32:$Rt,
+ (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
+defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32, "sturb",
+ [(truncstorei8 GPR32:$Rt,
+ (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
+
+// Match all store 64 bits width whose type is compatible with FPR64
+let Predicates = [IsLE] in {
+ // We must use ST1 to store vectors in big-endian.
+ def : Pat<(store (v2f32 FPR64:$Rt),
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v8i8 FPR64:$Rt),
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v4i16 FPR64:$Rt),
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v2i32 FPR64:$Rt),
+ (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+}
+def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+
+// Match all store 128 bits width whose type is compatible with FPR128
+let Predicates = [IsLE] in {
+ // We must use ST1 to store vectors in big-endian.
+ def : Pat<(store (v4f32 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v2f64 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v16i8 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v8i16 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v4i32 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v2i64 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+ def : Pat<(store (v2f64 FPR128:$Rt),
+ (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
+}
+
+// unscaled i64 truncating stores
+def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
+ (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
+ (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
+def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
+ (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
+
+//---
+// STR mnemonics fall back to STUR for negative or unaligned offsets.
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
+def : InstAlias<"str $Rt, [$Rn, $offset]",
+ (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
+
+def : InstAlias<"strb $Rt, [$Rn, $offset]",
+ (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
+def : InstAlias<"strh $Rt, [$Rn, $offset]",
+ (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
+
+//---
+// (unscaled immediate, unprivileged)
+defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
+defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
+
+defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
+defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
+
+//---
+// (immediate pre-indexed)
+def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32, "str", pre_store, i32>;
+def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64, "str", pre_store, i64>;
+def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8, "str", pre_store, untyped>;
+def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16, "str", pre_store, f16>;
+def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32, "str", pre_store, f32>;
+def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64, "str", pre_store, f64>;
+def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128, "str", pre_store, f128>;
+
+def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32, "strb", pre_truncsti8, i32>;
+def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32, "strh", pre_truncsti16, i32>;
+
+// truncstore i64
+def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
+ (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
+ simm9:$off)>;
+def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
+ (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
+ simm9:$off)>;
+def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
+ (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
+ simm9:$off)>;
+
+def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+
+def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+
+//---
+// (immediate post-indexed)
+def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32, "str", post_store, i32>;
+def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64, "str", post_store, i64>;
+def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8, "str", post_store, untyped>;
+def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16, "str", post_store, f16>;
+def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32, "str", post_store, f32>;
+def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64, "str", post_store, f64>;
+def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128, "str", post_store, f128>;
+
+def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32, "strb", post_truncsti8, i32>;
+def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32, "strh", post_truncsti16, i32>;
+
+// truncstore i64
+def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
+ (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
+ simm9:$off)>;
+def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
+ (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
+ simm9:$off)>;
+def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
+ (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
+ simm9:$off)>;
+
+def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
+
+def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
+def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
+ (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
//===----------------------------------------------------------------------===//
-// Load-store register (unscaled immediate) instructions
+// Load/store exclusive instructions.
//===----------------------------------------------------------------------===//
-// Contains: LDURB, LDURH, LDRUSB, LDRUSH, LDRUSW, STUR, STURB, STURH and PRFUM
-//
-// and
-//
-//===----------------------------------------------------------------------===//
-// Load-store register (register offset) instructions
-//===----------------------------------------------------------------------===//
-// Contains: LDRB, LDRH, LDRSB, LDRSH, LDRSW, STR, STRB, STRH and PRFM
-//
-// and
-//
-//===----------------------------------------------------------------------===//
-// Load-store register (unsigned immediate) instructions
-//===----------------------------------------------------------------------===//
-// Contains: LDRB, LDRH, LDRSB, LDRSH, LDRSW, STR, STRB, STRH and PRFM
-//
-// and
-//
-//===----------------------------------------------------------------------===//
-// Load-store register (immediate post-indexed) instructions
-//===----------------------------------------------------------------------===//
-// Contains: STRB, STRH, STR, LDRB, LDRH, LDR, LDRSB, LDRSH, LDRSW
-//
-// and
-//
-//===----------------------------------------------------------------------===//
-// Load-store register (immediate pre-indexed) instructions
-//===----------------------------------------------------------------------===//
-// Contains: STRB, STRH, STR, LDRB, LDRH, LDR, LDRSB, LDRSH, LDRSW
-
-// Note that patterns are much later on in a completely separate section (they
-// need ADRPxi to be defined).
-
-//===-------------------------------
-// 1. Various operands needed
-//===-------------------------------
-
-//===-------------------------------
-// 1.1 Unsigned 12-bit immediate operands
-//===-------------------------------
-// The addressing mode for these instructions consists of an unsigned 12-bit
-// immediate which is scaled by the size of the memory access.
-//
-// We represent this in the MC layer by two operands:
-// 1. A base register.
-// 2. A 12-bit immediate: not multiplied by access size, so "LDR x0,[x0,#8]"
-// would have '1' in this field.
-// This means that separate functions are needed for converting representations
-// which *are* aware of the intended access size.
-
-// Anything that creates an MCInst (Decoding, selection and AsmParsing) has to
-// know the access size via some means. An isolated operand does not have this
-// information unless told from here, which means we need separate tablegen
-// Operands for each access size. This multiclass takes care of instantiating
-// the correct template functions in the rest of the backend.
-
-//===-------------------------------
-// 1.1 Unsigned 12-bit immediate operands
-//===-------------------------------
-
-multiclass offsets_uimm12<int MemSize, string prefix> {
- def uimm12_asmoperand : AsmOperandClass {
- let Name = "OffsetUImm12_" # MemSize;
- let PredicateMethod = "isOffsetUImm12<" # MemSize # ">";
- let RenderMethod = "addOffsetUImm12Operands<" # MemSize # ">";
- let DiagnosticType = "LoadStoreUImm12_" # MemSize;
- }
-
- // Pattern is really no more than an ImmLeaf, but predicated on MemSize which
- // complicates things beyond TableGen's ken.
- def uimm12 : Operand<i64>,
- ComplexPattern<i64, 1, "SelectOffsetUImm12<" # MemSize # ">"> {
- let ParserMatchClass
- = !cast<AsmOperandClass>(prefix # uimm12_asmoperand);
-
- let PrintMethod = "printOffsetUImm12Operand<" # MemSize # ">";
- let EncoderMethod = "getOffsetUImm12OpValue<" # MemSize # ">";
- }
-}
-
-defm byte_ : offsets_uimm12<1, "byte_">;
-defm hword_ : offsets_uimm12<2, "hword_">;
-defm word_ : offsets_uimm12<4, "word_">;
-defm dword_ : offsets_uimm12<8, "dword_">;
-defm qword_ : offsets_uimm12<16, "qword_">;
-//===-------------------------------
-// 1.1 Signed 9-bit immediate operands
-//===-------------------------------
+def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
+def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
+def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
+def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
-// The MCInst is expected to store the bit-wise encoding of the value,
-// which amounts to lopping off the extended sign bits.
-def SDXF_simm9 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 0x1ff, MVT::i32);
-}]>;
-
-def simm9_asmoperand : AsmOperandClass {
- let Name = "SImm9";
- let PredicateMethod = "isSImm<9>";
- let RenderMethod = "addSImmOperands<9>";
- let DiagnosticType = "LoadStoreSImm9";
-}
-
-def simm9 : Operand<i64>,
- ImmLeaf<i64, [{ return Imm >= -0x100 && Imm <= 0xff; }],
- SDXF_simm9> {
- let PrintMethod = "printOffsetSImm9Operand";
- let ParserMatchClass = simm9_asmoperand;
-}
-
-
-//===-------------------------------
-// 1.3 Register offset extensions
-//===-------------------------------
-
-// The assembly-syntax for these addressing-modes is:
-// [<Xn|SP>, <R><m> {, <extend> {<amount>}}]
-//
-// The essential semantics are:
-// + <amount> is a shift: #<log(transfer size)> or #0
-// + <R> can be W or X.
-// + If <R> is W, <extend> can be UXTW or SXTW
-// + If <R> is X, <extend> can be LSL or SXTX
-//
-// The trickiest of those constraints is that Rm can be either GPR32 or GPR64,
-// which will need separate instructions for LLVM type-consistency. We'll also
-// need separate operands, of course.
-multiclass regexts<int MemSize, int RmSize, RegisterClass GPR,
- string Rm, string prefix> {
- def regext_asmoperand : AsmOperandClass {
- let Name = "AddrRegExtend_" # MemSize # "_" # Rm;
- let PredicateMethod = "isAddrRegExtend<" # MemSize # "," # RmSize # ">";
- let RenderMethod = "addAddrRegExtendOperands<" # MemSize # ">";
- let DiagnosticType = "LoadStoreExtend" # RmSize # "_" # MemSize;
- }
-
- def regext : Operand<i64> {
- let PrintMethod
- = "printAddrRegExtendOperand<" # MemSize # ", " # RmSize # ">";
-
- let DecoderMethod = "DecodeAddrRegExtendOperand";
- let ParserMatchClass
- = !cast<AsmOperandClass>(prefix # regext_asmoperand);
- }
-}
+def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
+def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
+def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
+def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
-multiclass regexts_wx<int MemSize, string prefix> {
- // Rm is an X-register if LSL or SXTX are specified as the shift.
- defm Xm_ : regexts<MemSize, 64, GPR64, "Xm", prefix # "Xm_">;
-
- // Rm is a W-register if UXTW or SXTW are specified as the shift.
- defm Wm_ : regexts<MemSize, 32, GPR32, "Wm", prefix # "Wm_">;
-}
+def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
+def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
+def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
+def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
-defm byte_ : regexts_wx<1, "byte_">;
-defm hword_ : regexts_wx<2, "hword_">;
-defm word_ : regexts_wx<4, "word_">;
-defm dword_ : regexts_wx<8, "dword_">;
-defm qword_ : regexts_wx<16, "qword_">;
+def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
+def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
+def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
+def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
+def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
+def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
+def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
+def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
-//===------------------------------
-// 2. The instructions themselves.
-//===------------------------------
+def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
+def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
+def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
+def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
-// We have the following instructions to implement:
-// | | B | H | W | X |
-// |-----------------+-------+-------+-------+--------|
-// | unsigned str | STRB | STRH | STR | STR |
-// | unsigned ldr | LDRB | LDRH | LDR | LDR |
-// | signed ldr to W | LDRSB | LDRSH | - | - |
-// | signed ldr to X | LDRSB | LDRSH | LDRSW | (PRFM) |
+def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
+def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
-// This will instantiate the LDR/STR instructions you'd expect to use for an
-// unsigned datatype (first two rows above) or floating-point register, which is
-// reasonably uniform across all access sizes.
+def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
+def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
+def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
+def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
-//===------------------------------
-// 2.1 Regular instructions
-//===------------------------------
-
-// This class covers the basic unsigned or irrelevantly-signed loads and stores,
-// to general-purpose and floating-point registers.
-
-class AddrParams<string prefix> {
- Operand uimm12 = !cast<Operand>(prefix # "_uimm12");
-
- Operand regextWm = !cast<Operand>(prefix # "_Wm_regext");
- Operand regextXm = !cast<Operand>(prefix # "_Xm_regext");
-}
-
-def byte_addrparams : AddrParams<"byte">;
-def hword_addrparams : AddrParams<"hword">;
-def word_addrparams : AddrParams<"word">;
-def dword_addrparams : AddrParams<"dword">;
-def qword_addrparams : AddrParams<"qword">;
-
-multiclass A64I_LDRSTR_unsigned<string prefix, bits<2> size, bit v,
- bit high_opc, string asmsuffix,
- RegisterClass GPR, AddrParams params> {
- // Unsigned immediate
- def _STR : A64I_LSunsigimm<size, v, {high_opc, 0b0},
- (outs), (ins GPR:$Rt, GPR64xsp:$Rn, params.uimm12:$UImm12),
- "str" # asmsuffix # "\t$Rt, [$Rn, $UImm12]",
- [], NoItinerary> {
- let mayStore = 1;
- }
- def : InstAlias<"str" # asmsuffix # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "_STR") GPR:$Rt, GPR64xsp:$Rn, 0)>;
-
- def _LDR : A64I_LSunsigimm<size, v, {high_opc, 0b1},
- (outs GPR:$Rt), (ins GPR64xsp:$Rn, params.uimm12:$UImm12),
- "ldr" # asmsuffix # "\t$Rt, [$Rn, $UImm12]",
- [], NoItinerary> {
- let mayLoad = 1;
- }
- def : InstAlias<"ldr" # asmsuffix # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "_LDR") GPR:$Rt, GPR64xsp:$Rn, 0)>;
-
- // Register offset (four of these: load/store and Wm/Xm).
- let mayLoad = 1 in {
- def _Wm_RegOffset_LDR : A64I_LSregoff<size, v, {high_opc, 0b1}, 0b0,
- (outs GPR:$Rt),
- (ins GPR64xsp:$Rn, GPR32:$Rm, params.regextWm:$Ext),
- "ldr" # asmsuffix # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-
- def _Xm_RegOffset_LDR : A64I_LSregoff<size, v, {high_opc, 0b1}, 0b1,
- (outs GPR:$Rt),
- (ins GPR64xsp:$Rn, GPR64:$Rm, params.regextXm:$Ext),
- "ldr" # asmsuffix # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
- }
- def : InstAlias<"ldr" # asmsuffix # " $Rt, [$Rn, $Rm]",
- (!cast<Instruction>(prefix # "_Xm_RegOffset_LDR") GPR:$Rt, GPR64xsp:$Rn,
- GPR64:$Rm, 2)>;
-
- let mayStore = 1 in {
- def _Wm_RegOffset_STR : A64I_LSregoff<size, v, {high_opc, 0b0}, 0b0,
- (outs), (ins GPR:$Rt, GPR64xsp:$Rn, GPR32:$Rm,
- params.regextWm:$Ext),
- "str" # asmsuffix # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-
- def _Xm_RegOffset_STR : A64I_LSregoff<size, v, {high_opc, 0b0}, 0b1,
- (outs), (ins GPR:$Rt, GPR64xsp:$Rn, GPR64:$Rm,
- params.regextXm:$Ext),
- "str" # asmsuffix # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
- }
- def : InstAlias<"str" # asmsuffix # " $Rt, [$Rn, $Rm]",
- (!cast<Instruction>(prefix # "_Xm_RegOffset_STR") GPR:$Rt, GPR64xsp:$Rn,
- GPR64:$Rm, 2)>;
-
- // Unaligned immediate
- def _STUR : A64I_LSunalimm<size, v, {high_opc, 0b0},
- (outs), (ins GPR:$Rt, GPR64xsp:$Rn, simm9:$SImm9),
- "stur" # asmsuffix # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayStore = 1;
- }
- def : InstAlias<"stur" # asmsuffix # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "_STUR") GPR:$Rt, GPR64xsp:$Rn, 0)>;
-
- def _LDUR : A64I_LSunalimm<size, v, {high_opc, 0b1},
- (outs GPR:$Rt), (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldur" # asmsuffix # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayLoad = 1;
- }
- def : InstAlias<"ldur" # asmsuffix # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "_LDUR") GPR:$Rt, GPR64xsp:$Rn, 0)>;
-
- // Post-indexed
- def _PostInd_STR : A64I_LSpostind<size, v, {high_opc, 0b0},
- (outs GPR64xsp:$Rn_wb),
- (ins GPR:$Rt, GPR64xsp:$Rn, simm9:$SImm9),
- "str" # asmsuffix # "\t$Rt, [$Rn], $SImm9",
- [], NoItinerary> {
- let Constraints = "$Rn = $Rn_wb";
- let mayStore = 1;
-
- // Decoder only needed for unpredictability checking (FIXME).
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
- def _PostInd_LDR : A64I_LSpostind<size, v, {high_opc, 0b1},
- (outs GPR:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldr" # asmsuffix # "\t$Rt, [$Rn], $SImm9",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
- // Pre-indexed
- def _PreInd_STR : A64I_LSpreind<size, v, {high_opc, 0b0},
- (outs GPR64xsp:$Rn_wb),
- (ins GPR:$Rt, GPR64xsp:$Rn, simm9:$SImm9),
- "str" # asmsuffix # "\t$Rt, [$Rn, $SImm9]!",
- [], NoItinerary> {
- let Constraints = "$Rn = $Rn_wb";
- let mayStore = 1;
-
- // Decoder only needed for unpredictability checking (FIXME).
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
- def _PreInd_LDR : A64I_LSpreind<size, v, {high_opc, 0b1},
- (outs GPR:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldr" # asmsuffix # "\t$Rt, [$Rn, $SImm9]!",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
-}
-
-// STRB/LDRB: First define the instructions
-defm LS8
- : A64I_LDRSTR_unsigned<"LS8", 0b00, 0b0, 0b0, "b", GPR32, byte_addrparams>;
-
-// STRH/LDRH
-defm LS16
- : A64I_LDRSTR_unsigned<"LS16", 0b01, 0b0, 0b0, "h", GPR32, hword_addrparams>;
-
-
-// STR/LDR to/from a W register
-defm LS32
- : A64I_LDRSTR_unsigned<"LS32", 0b10, 0b0, 0b0, "", GPR32, word_addrparams>;
-
-// STR/LDR to/from an X register
-defm LS64
- : A64I_LDRSTR_unsigned<"LS64", 0b11, 0b0, 0b0, "", GPR64, dword_addrparams>;
-
-let Predicates = [HasFPARMv8] in {
-// STR/LDR to/from a B register
-defm LSFP8
- : A64I_LDRSTR_unsigned<"LSFP8", 0b00, 0b1, 0b0, "", FPR8, byte_addrparams>;
-
-// STR/LDR to/from an H register
-defm LSFP16
- : A64I_LDRSTR_unsigned<"LSFP16", 0b01, 0b1, 0b0, "", FPR16, hword_addrparams>;
-
-// STR/LDR to/from an S register
-defm LSFP32
- : A64I_LDRSTR_unsigned<"LSFP32", 0b10, 0b1, 0b0, "", FPR32, word_addrparams>;
-// STR/LDR to/from a D register
-defm LSFP64
- : A64I_LDRSTR_unsigned<"LSFP64", 0b11, 0b1, 0b0, "", FPR64, dword_addrparams>;
-// STR/LDR to/from a Q register
-defm LSFP128
- : A64I_LDRSTR_unsigned<"LSFP128", 0b00, 0b1, 0b1, "", FPR128,
- qword_addrparams>;
-}
-
-//===------------------------------
-// 2.3 Signed loads
-//===------------------------------
-
-// Byte and half-word signed loads can both go into either an X or a W register,
-// so it's worth factoring out. Signed word loads don't fit because there is no
-// W version.
-multiclass A64I_LDR_signed<bits<2> size, string asmopcode, AddrParams params,
- string prefix> {
- // Unsigned offset
- def w : A64I_LSunsigimm<size, 0b0, 0b11,
- (outs GPR32:$Rt),
- (ins GPR64xsp:$Rn, params.uimm12:$UImm12),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $UImm12]",
- [], NoItinerary> {
- let mayLoad = 1;
- }
- def : InstAlias<"ldrs" # asmopcode # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # w) GPR32:$Rt, GPR64xsp:$Rn, 0)>;
-
- def x : A64I_LSunsigimm<size, 0b0, 0b10,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, params.uimm12:$UImm12),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $UImm12]",
- [], NoItinerary> {
- let mayLoad = 1;
- }
- def : InstAlias<"ldrs" # asmopcode # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # x) GPR64:$Rt, GPR64xsp:$Rn, 0)>;
-
- // Register offset
- let mayLoad = 1 in {
- def w_Wm_RegOffset : A64I_LSregoff<size, 0b0, 0b11, 0b0,
- (outs GPR32:$Rt),
- (ins GPR64xsp:$Rn, GPR32:$Rm, params.regextWm:$Ext),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-
- def w_Xm_RegOffset : A64I_LSregoff<size, 0b0, 0b11, 0b1,
- (outs GPR32:$Rt),
- (ins GPR64xsp:$Rn, GPR64:$Rm, params.regextXm:$Ext),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-
- def x_Wm_RegOffset : A64I_LSregoff<size, 0b0, 0b10, 0b0,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, GPR32:$Rm, params.regextWm:$Ext),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-
- def x_Xm_RegOffset : A64I_LSregoff<size, 0b0, 0b10, 0b1,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, GPR64:$Rm, params.regextXm:$Ext),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
- }
- def : InstAlias<"ldrs" # asmopcode # " $Rt, [$Rn, $Rm]",
- (!cast<Instruction>(prefix # "w_Xm_RegOffset") GPR32:$Rt, GPR64xsp:$Rn,
- GPR64:$Rm, 2)>;
-
- def : InstAlias<"ldrs" # asmopcode # " $Rt, [$Rn, $Rm]",
- (!cast<Instruction>(prefix # "x_Xm_RegOffset") GPR64:$Rt, GPR64xsp:$Rn,
- GPR64:$Rm, 2)>;
-
-
- let mayLoad = 1 in {
- // Unaligned offset
- def w_U : A64I_LSunalimm<size, 0b0, 0b11,
- (outs GPR32:$Rt),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldurs" # asmopcode # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary>;
-
- def x_U : A64I_LSunalimm<size, 0b0, 0b10,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldurs" # asmopcode # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary>;
-
-
- // Post-indexed
- def w_PostInd : A64I_LSpostind<size, 0b0, 0b11,
- (outs GPR32:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldrs" # asmopcode # "\t$Rt, [$Rn], $SImm9",
- [], NoItinerary> {
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
- def x_PostInd : A64I_LSpostind<size, 0b0, 0b10,
- (outs GPR64:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldrs" # asmopcode # "\t$Rt, [$Rn], $SImm9",
- [], NoItinerary> {
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
- // Pre-indexed
- def w_PreInd : A64I_LSpreind<size, 0b0, 0b11,
- (outs GPR32:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $SImm9]!",
- [], NoItinerary> {
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
-
- def x_PreInd : A64I_LSpreind<size, 0b0, 0b10,
- (outs GPR64:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldrs" # asmopcode # "\t$Rt, [$Rn, $SImm9]!",
- [], NoItinerary> {
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
- }
- } // let mayLoad = 1
-}
-
-// LDRSB
-defm LDRSB : A64I_LDR_signed<0b00, "b", byte_addrparams, "LDRSB">;
-// LDRSH
-defm LDRSH : A64I_LDR_signed<0b01, "h", hword_addrparams, "LDRSH">;
-
-// LDRSW: load a 32-bit register, sign-extending to 64-bits.
-def LDRSWx
- : A64I_LSunsigimm<0b10, 0b0, 0b10,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, word_uimm12:$UImm12),
- "ldrsw\t$Rt, [$Rn, $UImm12]",
- [], NoItinerary> {
- let mayLoad = 1;
-}
-def : InstAlias<"ldrsw $Rt, [$Rn]", (LDRSWx GPR64:$Rt, GPR64xsp:$Rn, 0)>;
-
-let mayLoad = 1 in {
- def LDRSWx_Wm_RegOffset : A64I_LSregoff<0b10, 0b0, 0b10, 0b0,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, GPR32:$Rm, word_Wm_regext:$Ext),
- "ldrsw\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-
- def LDRSWx_Xm_RegOffset : A64I_LSregoff<0b10, 0b0, 0b10, 0b1,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, GPR64:$Rm, word_Xm_regext:$Ext),
- "ldrsw\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-}
-def : InstAlias<"ldrsw $Rt, [$Rn, $Rm]",
- (LDRSWx_Xm_RegOffset GPR64:$Rt, GPR64xsp:$Rn, GPR64:$Rm, 2)>;
-
-
-def LDURSWx
- : A64I_LSunalimm<0b10, 0b0, 0b10,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldursw\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayLoad = 1;
-}
-def : InstAlias<"ldursw $Rt, [$Rn]", (LDURSWx GPR64:$Rt, GPR64xsp:$Rn, 0)>;
-
-def LDRSWx_PostInd
- : A64I_LSpostind<0b10, 0b0, 0b10,
- (outs GPR64:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldrsw\t$Rt, [$Rn], $SImm9",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
-}
-
-def LDRSWx_PreInd : A64I_LSpreind<0b10, 0b0, 0b10,
- (outs GPR64:$Rt, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldrsw\t$Rt, [$Rn, $SImm9]!",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeSingleIndexedInstruction";
-}
-
-//===------------------------------
-// 2.4 Prefetch operations
-//===------------------------------
-
-def PRFM : A64I_LSunsigimm<0b11, 0b0, 0b10, (outs),
- (ins prefetch_op:$Rt, GPR64xsp:$Rn, dword_uimm12:$UImm12),
- "prfm\t$Rt, [$Rn, $UImm12]",
- [], NoItinerary> {
- let mayLoad = 1;
-}
-def : InstAlias<"prfm $Rt, [$Rn]",
- (PRFM prefetch_op:$Rt, GPR64xsp:$Rn, 0)>;
-
-let mayLoad = 1 in {
- def PRFM_Wm_RegOffset : A64I_LSregoff<0b11, 0b0, 0b10, 0b0, (outs),
- (ins prefetch_op:$Rt, GPR64xsp:$Rn,
- GPR32:$Rm, dword_Wm_regext:$Ext),
- "prfm\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
- def PRFM_Xm_RegOffset : A64I_LSregoff<0b11, 0b0, 0b10, 0b1, (outs),
- (ins prefetch_op:$Rt, GPR64xsp:$Rn,
- GPR64:$Rm, dword_Xm_regext:$Ext),
- "prfm\t$Rt, [$Rn, $Rm, $Ext]",
- [], NoItinerary>;
-}
-
-def : InstAlias<"prfm $Rt, [$Rn, $Rm]",
- (PRFM_Xm_RegOffset prefetch_op:$Rt, GPR64xsp:$Rn,
- GPR64:$Rm, 2)>;
-
-
-def PRFUM : A64I_LSunalimm<0b11, 0b0, 0b10, (outs),
- (ins prefetch_op:$Rt, GPR64xsp:$Rn, simm9:$SImm9),
- "prfum\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayLoad = 1;
-}
-def : InstAlias<"prfum $Rt, [$Rn]",
- (PRFUM prefetch_op:$Rt, GPR64xsp:$Rn, 0)>;
+def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
+def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
//===----------------------------------------------------------------------===//
-// Load-store register (unprivileged) instructions
+// Scaled floating point to integer conversion instructions.
//===----------------------------------------------------------------------===//
-// Contains: LDTRB, LDTRH, LDTRSB, LDTRSH, LDTRSW, STTR, STTRB and STTRH
-
-// These instructions very much mirror the "unscaled immediate" loads, but since
-// there are no floating-point variants we need to split them out into their own
-// section to avoid instantiation of "ldtr d0, [sp]" etc.
-
-multiclass A64I_LDTRSTTR<bits<2> size, string asmsuffix, RegisterClass GPR,
- string prefix> {
- def _UnPriv_STR : A64I_LSunpriv<size, 0b0, 0b00,
- (outs), (ins GPR:$Rt, GPR64xsp:$Rn, simm9:$SImm9),
- "sttr" # asmsuffix # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayStore = 1;
- }
-
- def : InstAlias<"sttr" # asmsuffix # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "_UnPriv_STR") GPR:$Rt, GPR64xsp:$Rn, 0)>;
-
- def _UnPriv_LDR : A64I_LSunpriv<size, 0b0, 0b01,
- (outs GPR:$Rt), (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldtr" # asmsuffix # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayLoad = 1;
- }
- def : InstAlias<"ldtr" # asmsuffix # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "_UnPriv_LDR") GPR:$Rt, GPR64xsp:$Rn, 0)>;
-
-}
-
-// STTRB/LDTRB: First define the instructions
-defm LS8 : A64I_LDTRSTTR<0b00, "b", GPR32, "LS8">;
-
-// STTRH/LDTRH
-defm LS16 : A64I_LDTRSTTR<0b01, "h", GPR32, "LS16">;
-
-// STTR/LDTR to/from a W register
-defm LS32 : A64I_LDTRSTTR<0b10, "", GPR32, "LS32">;
-
-// STTR/LDTR to/from an X register
-defm LS64 : A64I_LDTRSTTR<0b11, "", GPR64, "LS64">;
-
-// Now a class for the signed instructions that can go to either 32 or 64
-// bits...
-multiclass A64I_LDTR_signed<bits<2> size, string asmopcode, string prefix> {
- let mayLoad = 1 in {
- def w : A64I_LSunpriv<size, 0b0, 0b11,
- (outs GPR32:$Rt),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldtrs" # asmopcode # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary>;
-
- def x : A64I_LSunpriv<size, 0b0, 0b10,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldtrs" # asmopcode # "\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary>;
- }
-
- def : InstAlias<"ldtrs" # asmopcode # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "w") GPR32:$Rt, GPR64xsp:$Rn, 0)>;
-
- def : InstAlias<"ldtrs" # asmopcode # " $Rt, [$Rn]",
- (!cast<Instruction>(prefix # "x") GPR64:$Rt, GPR64xsp:$Rn, 0)>;
-
-}
-
-// LDTRSB
-defm LDTRSB : A64I_LDTR_signed<0b00, "b", "LDTRSB">;
-// LDTRSH
-defm LDTRSH : A64I_LDTR_signed<0b01, "h", "LDTRSH">;
-
-// And finally LDTRSW which only goes to 64 bits.
-def LDTRSWx : A64I_LSunpriv<0b10, 0b0, 0b10,
- (outs GPR64:$Rt),
- (ins GPR64xsp:$Rn, simm9:$SImm9),
- "ldtrsw\t$Rt, [$Rn, $SImm9]",
- [], NoItinerary> {
- let mayLoad = 1;
+defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
+defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
+defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
+defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
+defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
+defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
+defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
+defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
+defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
+defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
+defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
+defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
+let isCodeGenOnly = 1 in {
+defm FCVTZS_Int : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", int_aarch64_neon_fcvtzs>;
+defm FCVTZU_Int : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", int_aarch64_neon_fcvtzu>;
+defm FCVTZS_Int : FPToIntegerScaled<0b11, 0b000, "fcvtzs", int_aarch64_neon_fcvtzs>;
+defm FCVTZU_Int : FPToIntegerScaled<0b11, 0b001, "fcvtzu", int_aarch64_neon_fcvtzu>;
}
-def : InstAlias<"ldtrsw $Rt, [$Rn]", (LDTRSWx GPR64:$Rt, GPR64xsp:$Rn, 0)>;
//===----------------------------------------------------------------------===//
-// Load-store register pair (offset) instructions
-//===----------------------------------------------------------------------===//
-//
-// and
-//
-//===----------------------------------------------------------------------===//
-// Load-store register pair (post-indexed) instructions
-//===----------------------------------------------------------------------===//
-// Contains: STP, LDP, LDPSW
-//
-// and
-//
+// Scaled integer to floating point conversion instructions.
//===----------------------------------------------------------------------===//
-// Load-store register pair (pre-indexed) instructions
-//===----------------------------------------------------------------------===//
-// Contains: STP, LDP, LDPSW
-//
-// and
-//
-//===----------------------------------------------------------------------===//
-// Load-store non-temporal register pair (offset) instructions
-//===----------------------------------------------------------------------===//
-// Contains: STNP, LDNP
-
-
-// Anything that creates an MCInst (Decoding, selection and AsmParsing) has to
-// know the access size via some means. An isolated operand does not have this
-// information unless told from here, which means we need separate tablegen
-// Operands for each access size. This multiclass takes care of instantiating
-// the correct template functions in the rest of the backend.
-
-multiclass offsets_simm7<string MemSize, string prefix> {
- // The bare signed 7-bit immediate is used in post-indexed instructions, but
- // because of the scaling performed a generic "simm7" operand isn't
- // appropriate here either.
- def simm7_asmoperand : AsmOperandClass {
- let Name = "SImm7_Scaled" # MemSize;
- let PredicateMethod = "isSImm7Scaled<" # MemSize # ">";
- let RenderMethod = "addSImm7ScaledOperands<" # MemSize # ">";
- let DiagnosticType = "LoadStoreSImm7_" # MemSize;
- }
-
- def simm7 : Operand<i64> {
- let PrintMethod = "printSImm7ScaledOperand<" # MemSize # ">";
- let ParserMatchClass = !cast<AsmOperandClass>(prefix # "simm7_asmoperand");
- }
-}
-
-defm word_ : offsets_simm7<"4", "word_">;
-defm dword_ : offsets_simm7<"8", "dword_">;
-defm qword_ : offsets_simm7<"16", "qword_">;
-
-multiclass A64I_LSPsimple<bits<2> opc, bit v, RegisterClass SomeReg,
- Operand simm7, string prefix> {
- def _STR : A64I_LSPoffset<opc, v, 0b0, (outs),
- (ins SomeReg:$Rt, SomeReg:$Rt2, GPR64xsp:$Rn, simm7:$SImm7),
- "stp\t$Rt, $Rt2, [$Rn, $SImm7]", [], NoItinerary> {
- let mayStore = 1;
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
- def : InstAlias<"stp $Rt, $Rt2, [$Rn]",
- (!cast<Instruction>(prefix # "_STR") SomeReg:$Rt,
- SomeReg:$Rt2, GPR64xsp:$Rn, 0)>;
-
- def _LDR : A64I_LSPoffset<opc, v, 0b1,
- (outs SomeReg:$Rt, SomeReg:$Rt2),
- (ins GPR64xsp:$Rn, simm7:$SImm7),
- "ldp\t$Rt, $Rt2, [$Rn, $SImm7]", [], NoItinerary> {
- let mayLoad = 1;
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
- def : InstAlias<"ldp $Rt, $Rt2, [$Rn]",
- (!cast<Instruction>(prefix # "_LDR") SomeReg:$Rt,
- SomeReg:$Rt2, GPR64xsp:$Rn, 0)>;
-
- def _PostInd_STR : A64I_LSPpostind<opc, v, 0b0,
- (outs GPR64xsp:$Rn_wb),
- (ins SomeReg:$Rt, SomeReg:$Rt2,
- GPR64xsp:$Rn,
- simm7:$SImm7),
- "stp\t$Rt, $Rt2, [$Rn], $SImm7",
- [], NoItinerary> {
- let mayStore = 1;
- let Constraints = "$Rn = $Rn_wb";
-
- // Decoder only needed for unpredictability checking (FIXME).
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
-
- def _PostInd_LDR : A64I_LSPpostind<opc, v, 0b1,
- (outs SomeReg:$Rt, SomeReg:$Rt2, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm7:$SImm7),
- "ldp\t$Rt, $Rt2, [$Rn], $SImm7",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
-
- def _PreInd_STR : A64I_LSPpreind<opc, v, 0b0, (outs GPR64xsp:$Rn_wb),
- (ins SomeReg:$Rt, SomeReg:$Rt2, GPR64xsp:$Rn, simm7:$SImm7),
- "stp\t$Rt, $Rt2, [$Rn, $SImm7]!",
- [], NoItinerary> {
- let mayStore = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
-
- def _PreInd_LDR : A64I_LSPpreind<opc, v, 0b1,
- (outs SomeReg:$Rt, SomeReg:$Rt2, GPR64xsp:$Rn_wb),
- (ins GPR64xsp:$Rn, simm7:$SImm7),
- "ldp\t$Rt, $Rt2, [$Rn, $SImm7]!",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
-
- def _NonTemp_STR : A64I_LSPnontemp<opc, v, 0b0, (outs),
- (ins SomeReg:$Rt, SomeReg:$Rt2, GPR64xsp:$Rn, simm7:$SImm7),
- "stnp\t$Rt, $Rt2, [$Rn, $SImm7]", [], NoItinerary> {
- let mayStore = 1;
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
- def : InstAlias<"stnp $Rt, $Rt2, [$Rn]",
- (!cast<Instruction>(prefix # "_NonTemp_STR") SomeReg:$Rt,
- SomeReg:$Rt2, GPR64xsp:$Rn, 0)>;
-
- def _NonTemp_LDR : A64I_LSPnontemp<opc, v, 0b1,
- (outs SomeReg:$Rt, SomeReg:$Rt2),
- (ins GPR64xsp:$Rn, simm7:$SImm7),
- "ldnp\t$Rt, $Rt2, [$Rn, $SImm7]", [], NoItinerary> {
- let mayLoad = 1;
- let DecoderMethod = "DecodeLDSTPairInstruction";
- }
- def : InstAlias<"ldnp $Rt, $Rt2, [$Rn]",
- (!cast<Instruction>(prefix # "_NonTemp_LDR") SomeReg:$Rt,
- SomeReg:$Rt2, GPR64xsp:$Rn, 0)>;
-
-}
-
-defm LSPair32 : A64I_LSPsimple<0b00, 0b0, GPR32, word_simm7, "LSPair32">;
-defm LSPair64 : A64I_LSPsimple<0b10, 0b0, GPR64, dword_simm7, "LSPair64">;
-
-let Predicates = [HasFPARMv8] in {
-defm LSFPPair32 : A64I_LSPsimple<0b00, 0b1, FPR32, word_simm7, "LSFPPair32">;
-defm LSFPPair64 : A64I_LSPsimple<0b01, 0b1, FPR64, dword_simm7, "LSFPPair64">;
-defm LSFPPair128 : A64I_LSPsimple<0b10, 0b1, FPR128, qword_simm7,
- "LSFPPair128">;
-}
-
-
-def LDPSWx : A64I_LSPoffset<0b01, 0b0, 0b1,
- (outs GPR64:$Rt, GPR64:$Rt2),
- (ins GPR64xsp:$Rn, word_simm7:$SImm7),
- "ldpsw\t$Rt, $Rt2, [$Rn, $SImm7]", [], NoItinerary> {
- let mayLoad = 1;
- let DecoderMethod = "DecodeLDSTPairInstruction";
-}
-def : InstAlias<"ldpsw $Rt, $Rt2, [$Rn]",
- (LDPSWx GPR64:$Rt, GPR64:$Rt2, GPR64xsp:$Rn, 0)>;
-
-def LDPSWx_PostInd : A64I_LSPpostind<0b01, 0b0, 0b1,
- (outs GPR64:$Rt, GPR64:$Rt2, GPR64:$Rn_wb),
- (ins GPR64xsp:$Rn, word_simm7:$SImm7),
- "ldpsw\t$Rt, $Rt2, [$Rn], $SImm7",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeLDSTPairInstruction";
-}
-
-def LDPSWx_PreInd : A64I_LSPpreind<0b01, 0b0, 0b1,
- (outs GPR64:$Rt, GPR64:$Rt2, GPR64:$Rn_wb),
- (ins GPR64xsp:$Rn, word_simm7:$SImm7),
- "ldpsw\t$Rt, $Rt2, [$Rn, $SImm7]!",
- [], NoItinerary> {
- let mayLoad = 1;
- let Constraints = "$Rn = $Rn_wb";
- let DecoderMethod = "DecodeLDSTPairInstruction";
-}
+defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>;
+defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
//===----------------------------------------------------------------------===//
-// Logical (immediate) instructions
+// Unscaled integer to floating point conversion instruction.
//===----------------------------------------------------------------------===//
-// Contains: AND, ORR, EOR, ANDS, + aliases TST, MOV
-
-multiclass logical_imm_operands<string prefix, string note,
- int size, ValueType VT> {
- def _asmoperand : AsmOperandClass {
- let Name = "LogicalImm" # note # size;
- let PredicateMethod = "isLogicalImm" # note # "<" # size # ">";
- let RenderMethod = "addLogicalImmOperands<" # size # ">";
- let DiagnosticType = "LogicalSecondSource";
- }
- def _operand
- : Operand<VT>, ComplexPattern<VT, 1, "SelectLogicalImm", [imm]> {
- let ParserMatchClass = !cast<AsmOperandClass>(prefix # "_asmoperand");
- let PrintMethod = "printLogicalImmOperand<" # size # ">";
- let DecoderMethod = "DecodeLogicalImmOperand<" # size # ">";
- }
-}
+defm FMOV : UnscaledConversion<"fmov">;
-defm logical_imm32 : logical_imm_operands<"logical_imm32", "", 32, i32>;
-defm logical_imm64 : logical_imm_operands<"logical_imm64", "", 64, i64>;
-
-// The mov versions only differ in assembly parsing, where they
-// exclude values representable with either MOVZ or MOVN.
-defm logical_imm32_mov
- : logical_imm_operands<"logical_imm32_mov", "MOV", 32, i32>;
-defm logical_imm64_mov
- : logical_imm_operands<"logical_imm64_mov", "MOV", 64, i64>;
-
-
-multiclass A64I_logimmSizes<bits<2> opc, string asmop, SDNode opnode> {
- def wwi : A64I_logicalimm<0b0, opc, (outs GPR32wsp:$Rd),
- (ins GPR32:$Rn, logical_imm32_operand:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [(set i32:$Rd,
- (opnode i32:$Rn, logical_imm32_operand:$Imm))],
- NoItinerary>;
-
- def xxi : A64I_logicalimm<0b1, opc, (outs GPR64xsp:$Rd),
- (ins GPR64:$Rn, logical_imm64_operand:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [(set i64:$Rd,
- (opnode i64:$Rn, logical_imm64_operand:$Imm))],
- NoItinerary>;
-}
-
-defm AND : A64I_logimmSizes<0b00, "and", and>;
-defm ORR : A64I_logimmSizes<0b01, "orr", or>;
-defm EOR : A64I_logimmSizes<0b10, "eor", xor>;
-
-let Defs = [NZCV] in {
- def ANDSwwi : A64I_logicalimm<0b0, 0b11, (outs GPR32:$Rd),
- (ins GPR32:$Rn, logical_imm32_operand:$Imm),
- "ands\t$Rd, $Rn, $Imm",
- [], NoItinerary>;
-
- def ANDSxxi : A64I_logicalimm<0b1, 0b11, (outs GPR64:$Rd),
- (ins GPR64:$Rn, logical_imm64_operand:$Imm),
- "ands\t$Rd, $Rn, $Imm",
- [], NoItinerary>;
-}
-
-
-def : InstAlias<"tst $Rn, $Imm",
- (ANDSwwi WZR, GPR32:$Rn, logical_imm32_operand:$Imm)>;
-def : InstAlias<"tst $Rn, $Imm",
- (ANDSxxi XZR, GPR64:$Rn, logical_imm64_operand:$Imm)>;
-def : InstAlias<"mov $Rd, $Imm",
- (ORRwwi GPR32wsp:$Rd, WZR, logical_imm32_mov_operand:$Imm)>;
-def : InstAlias<"mov $Rd, $Imm",
- (ORRxxi GPR64xsp:$Rd, XZR, logical_imm64_mov_operand:$Imm)>;
+def : Pat<(f32 (fpimm0)), (FMOVWSr WZR)>, Requires<[NoZCZ]>;
+def : Pat<(f64 (fpimm0)), (FMOVXDr XZR)>, Requires<[NoZCZ]>;
//===----------------------------------------------------------------------===//
-// Logical (shifted register) instructions
+// Floating point conversion instruction.
//===----------------------------------------------------------------------===//
-// Contains: AND, BIC, ORR, ORN, EOR, EON, ANDS, BICS + aliases TST, MVN, MOV
-
-// Operand for optimizing (icmp (and LHS, RHS), 0, SomeCode). In theory "ANDS"
-// behaves differently for unsigned comparisons, so we defensively only allow
-// signed or n/a as the operand. In practice "unsigned greater than 0" is "not
-// equal to 0" and LLVM gives us this.
-def signed_cond : PatLeaf<(cond), [{
- return !isUnsignedIntSetCC(N->get());
-}]>;
-
-// These instructions share their "shift" operands with add/sub (shifted
-// register instructions). They are defined there.
-
-// N.b. the commutable parameter is just !N. It will be first against the wall
-// when the revolution comes.
-multiclass logical_shifts<string prefix, bit sf, bits<2> opc,
- bit N, bit commutable,
- string asmop, SDPatternOperator opfrag, ValueType ty,
- RegisterClass GPR, list<Register> defs> {
- let isCommutable = commutable, Defs = defs in {
- def _lsl : A64I_logicalshift<sf, opc, 0b00, N,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set ty:$Rd, (opfrag ty:$Rn, (shl ty:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _lsr : A64I_logicalshift<sf, opc, 0b01, N,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set ty:$Rd, (opfrag ty:$Rn, (srl ty:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _asr : A64I_logicalshift<sf, opc, 0b10, N,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set ty:$Rd, (opfrag ty:$Rn, (sra ty:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
-
- def _ror : A64I_logicalshift<sf, opc, 0b11, N,
- (outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("ror_operand_" # ty):$Imm6),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm, $Imm6"),
- [(set ty:$Rd, (opfrag ty:$Rn, (rotr ty:$Rm,
- !cast<Operand>("ror_operand_" # ty):$Imm6))
- )],
- NoItinerary>;
- }
-
- def _noshift
- : InstAlias<!strconcat(asmop, " $Rd, $Rn, $Rm"),
- (!cast<Instruction>(prefix # "_lsl") GPR:$Rd, GPR:$Rn,
- GPR:$Rm, 0)>;
-
- def : Pat<(opfrag ty:$Rn, ty:$Rm),
- (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>;
-}
-
-multiclass logical_sizes<string prefix, bits<2> opc, bit N, bit commutable,
- string asmop, SDPatternOperator opfrag,
- list<Register> defs> {
- defm xxx : logical_shifts<prefix # "xxx", 0b1, opc, N,
- commutable, asmop, opfrag, i64, GPR64, defs>;
- defm www : logical_shifts<prefix # "www", 0b0, opc, N,
- commutable, asmop, opfrag, i32, GPR32, defs>;
-}
-
-
-defm AND : logical_sizes<"AND", 0b00, 0b0, 0b1, "and", and, []>;
-defm ORR : logical_sizes<"ORR", 0b01, 0b0, 0b1, "orr", or, []>;
-defm EOR : logical_sizes<"EOR", 0b10, 0b0, 0b1, "eor", xor, []>;
-defm ANDS : logical_sizes<"ANDS", 0b11, 0b0, 0b1, "ands",
- PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs),
- [{ (void)N; return false; }]>,
- [NZCV]>;
-
-defm BIC : logical_sizes<"BIC", 0b00, 0b1, 0b0, "bic",
- PatFrag<(ops node:$lhs, node:$rhs),
- (and node:$lhs, (not node:$rhs))>, []>;
-defm ORN : logical_sizes<"ORN", 0b01, 0b1, 0b0, "orn",
- PatFrag<(ops node:$lhs, node:$rhs),
- (or node:$lhs, (not node:$rhs))>, []>;
-defm EON : logical_sizes<"EON", 0b10, 0b1, 0b0, "eon",
- PatFrag<(ops node:$lhs, node:$rhs),
- (xor node:$lhs, (not node:$rhs))>, []>;
-defm BICS : logical_sizes<"BICS", 0b11, 0b1, 0b0, "bics",
- PatFrag<(ops node:$lhs, node:$rhs),
- (and node:$lhs, (not node:$rhs)),
- [{ (void)N; return false; }]>,
- [NZCV]>;
-
-multiclass tst_shifts<string prefix, bit sf, ValueType ty, RegisterClass GPR> {
- let isCommutable = 1, Rd = 0b11111, Defs = [NZCV] in {
- def _lsl : A64I_logicalshift<sf, 0b11, 0b00, 0b0,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6),
- "tst\t$Rn, $Rm, $Imm6",
- [(set NZCV, (A64setcc (and ty:$Rn, (shl ty:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6)),
- 0, signed_cond))],
- NoItinerary>;
-
-
- def _lsr : A64I_logicalshift<sf, 0b11, 0b01, 0b0,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6),
- "tst\t$Rn, $Rm, $Imm6",
- [(set NZCV, (A64setcc (and ty:$Rn, (srl ty:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6)),
- 0, signed_cond))],
- NoItinerary>;
-
- def _asr : A64I_logicalshift<sf, 0b11, 0b10, 0b0,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6),
- "tst\t$Rn, $Rm, $Imm6",
- [(set NZCV, (A64setcc (and ty:$Rn, (sra ty:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6)),
- 0, signed_cond))],
- NoItinerary>;
-
- def _ror : A64I_logicalshift<sf, 0b11, 0b11, 0b0,
- (outs),
- (ins GPR:$Rn, GPR:$Rm,
- !cast<Operand>("ror_operand_" # ty):$Imm6),
- "tst\t$Rn, $Rm, $Imm6",
- [(set NZCV, (A64setcc (and ty:$Rn, (rotr ty:$Rm,
- !cast<Operand>("ror_operand_" # ty):$Imm6)),
- 0, signed_cond))],
- NoItinerary>;
- }
-
- def _noshift : InstAlias<"tst $Rn, $Rm",
- (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>;
-
- def : Pat<(A64setcc (and ty:$Rn, ty:$Rm), 0, signed_cond),
- (!cast<Instruction>(prefix # "_lsl") $Rn, $Rm, 0)>;
-}
-
-defm TSTxx : tst_shifts<"TSTxx", 0b1, i64, GPR64>;
-defm TSTww : tst_shifts<"TSTww", 0b0, i32, GPR32>;
-
-
-multiclass mvn_shifts<string prefix, bit sf, ValueType ty, RegisterClass GPR> {
- let isCommutable = 0, Rn = 0b11111 in {
- def _lsl : A64I_logicalshift<sf, 0b01, 0b00, 0b1,
- (outs GPR:$Rd),
- (ins GPR:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6),
- "mvn\t$Rd, $Rm, $Imm6",
- [(set ty:$Rd, (not (shl ty:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6)))],
- NoItinerary>;
-
-
- def _lsr : A64I_logicalshift<sf, 0b01, 0b01, 0b1,
- (outs GPR:$Rd),
- (ins GPR:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6),
- "mvn\t$Rd, $Rm, $Imm6",
- [(set ty:$Rd, (not (srl ty:$Rm,
- !cast<Operand>("lsr_operand_" # ty):$Imm6)))],
- NoItinerary>;
-
- def _asr : A64I_logicalshift<sf, 0b01, 0b10, 0b1,
- (outs GPR:$Rd),
- (ins GPR:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6),
- "mvn\t$Rd, $Rm, $Imm6",
- [(set ty:$Rd, (not (sra ty:$Rm,
- !cast<Operand>("asr_operand_" # ty):$Imm6)))],
- NoItinerary>;
-
- def _ror : A64I_logicalshift<sf, 0b01, 0b11, 0b1,
- (outs GPR:$Rd),
- (ins GPR:$Rm,
- !cast<Operand>("ror_operand_" # ty):$Imm6),
- "mvn\t$Rd, $Rm, $Imm6",
- [(set ty:$Rd, (not (rotr ty:$Rm,
- !cast<Operand>("lsl_operand_" # ty):$Imm6)))],
- NoItinerary>;
- }
-
- def _noshift : InstAlias<"mvn $Rn, $Rm",
- (!cast<Instruction>(prefix # "_lsl") GPR:$Rn, GPR:$Rm, 0)>;
-
- def : Pat<(not ty:$Rm),
- (!cast<Instruction>(prefix # "_lsl") $Rm, 0)>;
-}
-
-defm MVNxx : mvn_shifts<"MVNxx", 0b1, i64, GPR64>;
-defm MVNww : mvn_shifts<"MVNww", 0b0, i32, GPR32>;
-
-def MOVxx :InstAlias<"mov $Rd, $Rm", (ORRxxx_lsl GPR64:$Rd, XZR, GPR64:$Rm, 0)>;
-def MOVww :InstAlias<"mov $Rd, $Rm", (ORRwww_lsl GPR32:$Rd, WZR, GPR32:$Rm, 0)>;
+defm FCVT : FPConversion<"fcvt">;
//===----------------------------------------------------------------------===//
-// Move wide (immediate) instructions
+// Floating point single operand instructions.
//===----------------------------------------------------------------------===//
-// Contains: MOVN, MOVZ, MOVK + MOV aliases
-
-// A wide variety of different relocations are needed for variants of these
-// instructions, so it turns out that we need a different operand for all of
-// them.
-multiclass movw_operands<string prefix, string instname, int width> {
- def _imm_asmoperand : AsmOperandClass {
- let Name = instname # width # "Shifted" # shift;
- let PredicateMethod = "is" # instname # width # "Imm";
- let RenderMethod = "addMoveWideImmOperands";
- let ParserMethod = "ParseImmWithLSLOperand";
- let DiagnosticType = "MOVWUImm16";
- }
- def _imm : Operand<i64> {
- let ParserMatchClass = !cast<AsmOperandClass>(prefix # "_imm_asmoperand");
- let PrintMethod = "printMoveWideImmOperand";
- let EncoderMethod = "getMoveWideImmOpValue";
- let DecoderMethod = "DecodeMoveWideImmOperand<" # width # ">";
+defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
+defm FMOV : SingleOperandFPData<0b0000, "fmov">;
+defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
+defm FRINTA : SingleOperandFPData<0b1100, "frinta", frnd>;
+defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
+defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
+defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
+defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
- let MIOperandInfo = (ops uimm16:$UImm16, imm:$Shift);
- }
-}
-
-defm movn32 : movw_operands<"movn32", "MOVN", 32>;
-defm movn64 : movw_operands<"movn64", "MOVN", 64>;
-defm movz32 : movw_operands<"movz32", "MOVZ", 32>;
-defm movz64 : movw_operands<"movz64", "MOVZ", 64>;
-defm movk32 : movw_operands<"movk32", "MOVK", 32>;
-defm movk64 : movw_operands<"movk64", "MOVK", 64>;
-
-multiclass A64I_movwSizes<bits<2> opc, string asmop, dag ins32bit,
- dag ins64bit> {
-
- def wii : A64I_movw<0b0, opc, (outs GPR32:$Rd), ins32bit,
- !strconcat(asmop, "\t$Rd, $FullImm"),
- [], NoItinerary> {
- bits<18> FullImm;
- let UImm16 = FullImm{15-0};
- let Shift = FullImm{17-16};
- }
-
- def xii : A64I_movw<0b1, opc, (outs GPR64:$Rd), ins64bit,
- !strconcat(asmop, "\t$Rd, $FullImm"),
- [], NoItinerary> {
- bits<18> FullImm;
- let UImm16 = FullImm{15-0};
- let Shift = FullImm{17-16};
- }
-}
+def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
+ (FRINTNDr FPR64:$Rn)>;
-let isMoveImm = 1, isReMaterializable = 1,
- isAsCheapAsAMove = 1, hasSideEffects = 0 in {
- defm MOVN : A64I_movwSizes<0b00, "movn",
- (ins movn32_imm:$FullImm),
- (ins movn64_imm:$FullImm)>;
-
- // Some relocations are able to convert between a MOVZ and a MOVN. If these
- // are applied the instruction must be emitted with the corresponding bits as
- // 0, which means a MOVZ needs to override that bit from the default.
- let PostEncoderMethod = "fixMOVZ" in
- defm MOVZ : A64I_movwSizes<0b10, "movz",
- (ins movz32_imm:$FullImm),
- (ins movz64_imm:$FullImm)>;
+// FRINTX is inserted to set the flags as required by FENV_ACCESS ON behavior
+// in the C spec. Setting hasSideEffects ensures it is not DCE'd.
+// <rdar://problem/13715968>
+// TODO: We should really model the FPSR flags correctly. This is really ugly.
+let hasSideEffects = 1 in {
+defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
}
-let Constraints = "$src = $Rd" in
-defm MOVK : A64I_movwSizes<0b11, "movk",
- (ins GPR32:$src, movk32_imm:$FullImm),
- (ins GPR64:$src, movk64_imm:$FullImm)>;
+defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
-
-// And now the "MOV" aliases. These also need their own operands because what
-// they accept is completely different to what the base instructions accept.
-multiclass movalias_operand<string prefix, string basename,
- string immpredicate, int width> {
- def _asmoperand : AsmOperandClass {
- let Name = basename # width # "MovAlias";
- let PredicateMethod
- = "isMoveWideMovAlias<" # width # ", A64Imms::" # immpredicate # ">";
- let RenderMethod
- = "addMoveWideMovAliasOperands<" # width # ", "
- # "A64Imms::" # immpredicate # ">";
- }
-
- def _movimm : Operand<i64> {
- let ParserMatchClass = !cast<AsmOperandClass>(prefix # "_asmoperand");
-
- let MIOperandInfo = (ops uimm16:$UImm16, imm:$Shift);
- }
+let SchedRW = [WriteFDiv] in {
+defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
}
-defm movz32 : movalias_operand<"movz32", "MOVZ", "isMOVZImm", 32>;
-defm movz64 : movalias_operand<"movz64", "MOVZ", "isMOVZImm", 64>;
-defm movn32 : movalias_operand<"movn32", "MOVN", "isOnlyMOVNImm", 32>;
-defm movn64 : movalias_operand<"movn64", "MOVN", "isOnlyMOVNImm", 64>;
-
-// FIXME: these are officially canonical aliases, but TableGen is too limited to
-// print them at the moment. I believe in this case an "AliasPredicate" method
-// will need to be implemented. to allow it, as well as the more generally
-// useful handling of non-register, non-constant operands.
-class movalias<Instruction INST, RegisterClass GPR, Operand operand>
- : InstAlias<"mov $Rd, $FullImm", (INST GPR:$Rd, operand:$FullImm)>;
-
-def : movalias<MOVZwii, GPR32, movz32_movimm>;
-def : movalias<MOVZxii, GPR64, movz64_movimm>;
-def : movalias<MOVNwii, GPR32, movn32_movimm>;
-def : movalias<MOVNxii, GPR64, movn64_movimm>;
-
-def movw_addressref_g0 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<0>">;
-def movw_addressref_g1 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<1>">;
-def movw_addressref_g2 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<2>">;
-def movw_addressref_g3 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<3>">;
-
-def : Pat<(A64WrapperLarge movw_addressref_g3:$G3, movw_addressref_g2:$G2,
- movw_addressref_g1:$G1, movw_addressref_g0:$G0),
- (MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref_g3:$G3),
- movw_addressref_g2:$G2),
- movw_addressref_g1:$G1),
- movw_addressref_g0:$G0)>;
-
//===----------------------------------------------------------------------===//
-// PC-relative addressing instructions
+// Floating point two operand instructions.
//===----------------------------------------------------------------------===//
-// Contains: ADR, ADRP
-def adr_label : Operand<i64> {
- let EncoderMethod = "getLabelOpValue<AArch64::fixup_a64_adr_prel>";
-
- // This label is a 21-bit offset from PC, unscaled
- let PrintMethod = "printLabelOperand<21, 1>";
- let ParserMatchClass = label_asmoperand<21, 1>;
- let OperandType = "OPERAND_PCREL";
-}
-
-def adrp_label_asmoperand : AsmOperandClass {
- let Name = "AdrpLabel";
- let RenderMethod = "addLabelOperands<21, 4096>";
- let DiagnosticType = "Label";
-}
-
-def adrp_label : Operand<i64> {
- let EncoderMethod = "getAdrpLabelOpValue";
-
- // This label is a 21-bit offset from PC, scaled by the page-size: 4096.
- let PrintMethod = "printLabelOperand<21, 4096>";
- let ParserMatchClass = adrp_label_asmoperand;
- let OperandType = "OPERAND_PCREL";
-}
-
-let hasSideEffects = 0 in {
- def ADRxi : A64I_PCADR<0b0, (outs GPR64:$Rd), (ins adr_label:$Label),
- "adr\t$Rd, $Label", [], NoItinerary>;
-
- def ADRPxi : A64I_PCADR<0b1, (outs GPR64:$Rd), (ins adrp_label:$Label),
- "adrp\t$Rd, $Label", [], NoItinerary>;
-}
+defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
+let SchedRW = [WriteFDiv] in {
+defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
+}
+defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", int_aarch64_neon_fmaxnm>;
+defm FMAX : TwoOperandFPData<0b0100, "fmax", AArch64fmax>;
+defm FMINNM : TwoOperandFPData<0b0111, "fminnm", int_aarch64_neon_fminnm>;
+defm FMIN : TwoOperandFPData<0b0101, "fmin", AArch64fmin>;
+let SchedRW = [WriteFMul] in {
+defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
+defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
+}
+defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
+
+def : Pat<(v1f64 (AArch64fmax (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
+def : Pat<(v1f64 (AArch64fmin (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
+def : Pat<(v1f64 (int_aarch64_neon_fmaxnm (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
+def : Pat<(v1f64 (int_aarch64_neon_fminnm (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
//===----------------------------------------------------------------------===//
-// System instructions
+// Floating point three operand instructions.
//===----------------------------------------------------------------------===//
-// Contains: HINT, CLREX, DSB, DMB, ISB, MSR, SYS, SYSL, MRS
-// + aliases IC, DC, AT, TLBI, NOP, YIELD, WFE, WFI, SEV, SEVL
-
-// Op1 and Op2 fields are sometimes simple 3-bit unsigned immediate values.
-def uimm3_asmoperand : AsmOperandClass {
- let Name = "UImm3";
- let PredicateMethod = "isUImm<3>";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "UImm3";
-}
-
-def uimm3 : Operand<i32> {
- let ParserMatchClass = uimm3_asmoperand;
-}
-
-// The HINT alias can accept a simple unsigned 7-bit immediate.
-def uimm7_asmoperand : AsmOperandClass {
- let Name = "UImm7";
- let PredicateMethod = "isUImm<7>";
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "UImm7";
-}
-def uimm7 : Operand<i32> {
- let ParserMatchClass = uimm7_asmoperand;
-}
-
-// Multiclass namedimm is defined with the prefetch operands. Most of these fit
-// into the NamedImmMapper scheme well: they either accept a named operand or
-// any immediate under a particular value (which may be 0, implying no immediate
-// is allowed).
-defm dbarrier : namedimm<"dbarrier", "A64DB::DBarrierMapper">;
-defm isb : namedimm<"isb", "A64ISB::ISBMapper">;
-defm ic : namedimm<"ic", "A64IC::ICMapper">;
-defm dc : namedimm<"dc", "A64DC::DCMapper">;
-defm at : namedimm<"at", "A64AT::ATMapper">;
-defm tlbi : namedimm<"tlbi", "A64TLBI::TLBIMapper">;
-
-// However, MRS and MSR are more complicated for a few reasons:
-// * There are ~1000 generic names S3_<op1>_<CRn>_<CRm>_<Op2> which have an
-// implementation-defined effect
-// * Most registers are shared, but some are read-only or write-only.
-// * There is a variant of MSR which accepts the same register name (SPSel),
-// but which would have a different encoding.
-
-// In principle these could be resolved in with more complicated subclasses of
-// NamedImmMapper, however that imposes an overhead on other "named
-// immediates". Both in concrete terms with virtual tables and in unnecessary
-// abstraction.
-
-// The solution adopted here is to take the MRS/MSR Mappers out of the usual
-// hierarchy (they're not derived from NamedImmMapper) and to add logic for
-// their special situation.
-def mrs_asmoperand : AsmOperandClass {
- let Name = "MRS";
- let ParserMethod = "ParseSysRegOperand";
- let DiagnosticType = "MRS";
-}
-
-def mrs_op : Operand<i32> {
- let ParserMatchClass = mrs_asmoperand;
- let PrintMethod = "printMRSOperand";
- let DecoderMethod = "DecodeMRSOperand";
-}
-
-def msr_asmoperand : AsmOperandClass {
- let Name = "MSRWithReg";
-
- // Note that SPSel is valid for both this and the pstate operands, but with
- // different immediate encodings. This is why these operands provide a string
- // AArch64Operand rather than an immediate. The overlap is small enough that
- // it could be resolved with hackery now, but who can say in future?
- let ParserMethod = "ParseSysRegOperand";
- let DiagnosticType = "MSR";
-}
-
-def msr_op : Operand<i32> {
- let ParserMatchClass = msr_asmoperand;
- let PrintMethod = "printMSROperand";
- let DecoderMethod = "DecodeMSROperand";
-}
-
-def pstate_asmoperand : AsmOperandClass {
- let Name = "MSRPState";
- // See comment above about parser.
- let ParserMethod = "ParseSysRegOperand";
- let DiagnosticType = "MSR";
-}
-
-def pstate_op : Operand<i32> {
- let ParserMatchClass = pstate_asmoperand;
- let PrintMethod = "printNamedImmOperand<A64PState::PStateMapper>";
- let DecoderMethod = "DecodeNamedImmOperand<A64PState::PStateMapper>";
-}
-
-// When <CRn> is specified, an assembler should accept something like "C4", not
-// the usual "#4" immediate.
-def CRx_asmoperand : AsmOperandClass {
- let Name = "CRx";
- let PredicateMethod = "isUImm<4>";
- let RenderMethod = "addImmOperands";
- let ParserMethod = "ParseCRxOperand";
- // Diagnostics are handled in all cases by ParseCRxOperand.
-}
-
-def CRx : Operand<i32> {
- let ParserMatchClass = CRx_asmoperand;
- let PrintMethod = "printCRxOperand";
-}
-
-
-// Finally, we can start defining the instructions.
-
-// HINT is straightforward, with a few aliases.
-def HINTi : A64I_system<0b0, (outs), (ins uimm7:$UImm7), "hint\t$UImm7",
- [], NoItinerary> {
- bits<7> UImm7;
- let CRm = UImm7{6-3};
- let Op2 = UImm7{2-0};
-
- let Op0 = 0b00;
- let Op1 = 0b011;
- let CRn = 0b0010;
- let Rt = 0b11111;
-}
-
-def : InstAlias<"nop", (HINTi 0)>;
-def : InstAlias<"yield", (HINTi 1)>;
-def : InstAlias<"wfe", (HINTi 2)>;
-def : InstAlias<"wfi", (HINTi 3)>;
-def : InstAlias<"sev", (HINTi 4)>;
-def : InstAlias<"sevl", (HINTi 5)>;
-
-// Quite a few instructions then follow a similar pattern of fixing common
-// fields in the bitpattern, we'll define a helper-class for them.
-class simple_sys<bits<2> op0, bits<3> op1, bits<4> crn, bits<3> op2,
- Operand operand, string asmop>
- : A64I_system<0b0, (outs), (ins operand:$CRm), !strconcat(asmop, "\t$CRm"),
- [], NoItinerary> {
- let Op0 = op0;
- let Op1 = op1;
- let CRn = crn;
- let Op2 = op2;
- let Rt = 0b11111;
-}
+defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
+defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
+ TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
+defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
+ TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
+defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
+ TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
+// The following def pats catch the case where the LHS of an FMA is negated.
+// The TriOpFrag above catches the case where the middle operand is negated.
-def CLREXi : simple_sys<0b00, 0b011, 0b0011, 0b010, uimm4, "clrex">;
-def DSBi : simple_sys<0b00, 0b011, 0b0011, 0b100, dbarrier_op, "dsb">;
-def DMBi : simple_sys<0b00, 0b011, 0b0011, 0b101, dbarrier_op, "dmb">;
-def ISBi : simple_sys<0b00, 0b011, 0b0011, 0b110, isb_op, "isb">;
+// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
+// the NEON variant.
+def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
+ (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-def : InstAlias<"clrex", (CLREXi 0b1111)>;
-def : InstAlias<"isb", (ISBi 0b1111)>;
+def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
+ (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-// (DMBi 0xb) is a "DMB ISH" instruciton, appropriate for Linux SMP
-// configurations at least.
-def : Pat<(atomic_fence imm, imm), (DMBi 0xb)>;
+// We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and
+// "(-a) + b*(-c)".
+def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
+ (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-// Any SYS bitpattern can be represented with a complex and opaque "SYS"
-// instruction.
-def SYSiccix : A64I_system<0b0, (outs),
- (ins uimm3:$Op1, CRx:$CRn, CRx:$CRm,
- uimm3:$Op2, GPR64:$Rt),
- "sys\t$Op1, $CRn, $CRm, $Op2, $Rt",
- [], NoItinerary> {
- let Op0 = 0b01;
-}
-
-// You can skip the Xt argument whether it makes sense or not for the generic
-// SYS instruction.
-def : InstAlias<"sys $Op1, $CRn, $CRm, $Op2",
- (SYSiccix uimm3:$Op1, CRx:$CRn, CRx:$CRm, uimm3:$Op2, XZR)>;
-
-
-// But many have aliases, which obviously don't fit into
-class SYSalias<dag ins, string asmstring>
- : A64I_system<0b0, (outs), ins, asmstring, [], NoItinerary> {
- let isAsmParserOnly = 1;
-
- bits<14> SysOp;
- let Op0 = 0b01;
- let Op1 = SysOp{13-11};
- let CRn = SysOp{10-7};
- let CRm = SysOp{6-3};
- let Op2 = SysOp{2-0};
-}
-
-def ICix : SYSalias<(ins ic_op:$SysOp, GPR64:$Rt), "ic\t$SysOp, $Rt">;
-
-def ICi : SYSalias<(ins ic_op:$SysOp), "ic\t$SysOp"> {
- let Rt = 0b11111;
-}
-
-def DCix : SYSalias<(ins dc_op:$SysOp, GPR64:$Rt), "dc\t$SysOp, $Rt">;
-def ATix : SYSalias<(ins at_op:$SysOp, GPR64:$Rt), "at\t$SysOp, $Rt">;
-
-def TLBIix : SYSalias<(ins tlbi_op:$SysOp, GPR64:$Rt), "tlbi\t$SysOp, $Rt">;
-
-def TLBIi : SYSalias<(ins tlbi_op:$SysOp), "tlbi\t$SysOp"> {
- let Rt = 0b11111;
-}
+def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
+ (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
+def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
+ (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
-def SYSLxicci : A64I_system<0b1, (outs GPR64:$Rt),
- (ins uimm3:$Op1, CRx:$CRn, CRx:$CRm, uimm3:$Op2),
- "sysl\t$Rt, $Op1, $CRn, $CRm, $Op2",
- [], NoItinerary> {
- let Op0 = 0b01;
-}
-
-// The instructions themselves are rather simple for MSR and MRS.
-def MSRix : A64I_system<0b0, (outs), (ins msr_op:$SysReg, GPR64:$Rt),
- "msr\t$SysReg, $Rt", [], NoItinerary> {
- bits<16> SysReg;
- let Op0 = SysReg{15-14};
- let Op1 = SysReg{13-11};
- let CRn = SysReg{10-7};
- let CRm = SysReg{6-3};
- let Op2 = SysReg{2-0};
-}
-
-def MRSxi : A64I_system<0b1, (outs GPR64:$Rt), (ins mrs_op:$SysReg),
- "mrs\t$Rt, $SysReg", [], NoItinerary> {
- bits<16> SysReg;
- let Op0 = SysReg{15-14};
- let Op1 = SysReg{13-11};
- let CRn = SysReg{10-7};
- let CRm = SysReg{6-3};
- let Op2 = SysReg{2-0};
-}
-
-def MSRii : A64I_system<0b0, (outs), (ins pstate_op:$PState, uimm4:$CRm),
- "msr\t$PState, $CRm", [], NoItinerary> {
- bits<6> PState;
-
- let Op0 = 0b00;
- let Op1 = PState{5-3};
- let CRn = 0b0100;
- let Op2 = PState{2-0};
- let Rt = 0b11111;
-}
+def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
+ (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
//===----------------------------------------------------------------------===//
-// Test & branch (immediate) instructions
+// Floating point comparison instructions.
//===----------------------------------------------------------------------===//
-// Contains: TBZ, TBNZ
-
-// The bit to test is a simple unsigned 6-bit immediate in the X-register
-// versions.
-def uimm6 : Operand<i64> {
- let ParserMatchClass = uimm6_asmoperand;
-}
-def label_wid14_scal4_asmoperand : label_asmoperand<14, 4>;
-
-def tbimm_target : Operand<OtherVT> {
- let EncoderMethod = "getLabelOpValue<AArch64::fixup_a64_tstbr>";
-
- // This label is a 14-bit offset from PC, scaled by the instruction-width: 4.
- let PrintMethod = "printLabelOperand<14, 4>";
- let ParserMatchClass = label_wid14_scal4_asmoperand;
-
- let OperandType = "OPERAND_PCREL";
-}
-
-def A64eq : ImmLeaf<i32, [{ return Imm == A64CC::EQ; }]>;
-def A64ne : ImmLeaf<i32, [{ return Imm == A64CC::NE; }]>;
-
-// These instructions correspond to patterns involving "and" with a power of
-// two, which we need to be able to select.
-def tstb64_pat : ComplexPattern<i64, 1, "SelectTSTBOperand<64>">;
-def tstb32_pat : ComplexPattern<i32, 1, "SelectTSTBOperand<32>">;
-
-let isBranch = 1, isTerminator = 1 in {
- def TBZxii : A64I_TBimm<0b0, (outs),
- (ins GPR64:$Rt, uimm6:$Imm, tbimm_target:$Label),
- "tbz\t$Rt, $Imm, $Label",
- [(A64br_cc (A64cmp (and i64:$Rt, tstb64_pat:$Imm), 0),
- A64eq, bb:$Label)],
- NoItinerary>;
-
- def TBNZxii : A64I_TBimm<0b1, (outs),
- (ins GPR64:$Rt, uimm6:$Imm, tbimm_target:$Label),
- "tbnz\t$Rt, $Imm, $Label",
- [(A64br_cc (A64cmp (and i64:$Rt, tstb64_pat:$Imm), 0),
- A64ne, bb:$Label)],
- NoItinerary>;
-
-
- // Note, these instructions overlap with the above 64-bit patterns. This is
- // intentional, "tbz x3, #1, somewhere" and "tbz w3, #1, somewhere" would both
- // do the same thing and are both permitted assembly. They also both have
- // sensible DAG patterns.
- def TBZwii : A64I_TBimm<0b0, (outs),
- (ins GPR32:$Rt, uimm5:$Imm, tbimm_target:$Label),
- "tbz\t$Rt, $Imm, $Label",
- [(A64br_cc (A64cmp (and i32:$Rt, tstb32_pat:$Imm), 0),
- A64eq, bb:$Label)],
- NoItinerary> {
- let Imm{5} = 0b0;
- }
-
- def TBNZwii : A64I_TBimm<0b1, (outs),
- (ins GPR32:$Rt, uimm5:$Imm, tbimm_target:$Label),
- "tbnz\t$Rt, $Imm, $Label",
- [(A64br_cc (A64cmp (and i32:$Rt, tstb32_pat:$Imm), 0),
- A64ne, bb:$Label)],
- NoItinerary> {
- let Imm{5} = 0b0;
- }
-}
+defm FCMPE : FPComparison<1, "fcmpe">;
+defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>;
//===----------------------------------------------------------------------===//
-// Unconditional branch (immediate) instructions
+// Floating point conditional comparison instructions.
//===----------------------------------------------------------------------===//
-// Contains: B, BL
-
-def label_wid26_scal4_asmoperand : label_asmoperand<26, 4>;
-
-def bimm_target : Operand<OtherVT> {
- let EncoderMethod = "getLabelOpValue<AArch64::fixup_a64_uncondbr>";
-
- // This label is a 26-bit offset from PC, scaled by the instruction-width: 4.
- let PrintMethod = "printLabelOperand<26, 4>";
- let ParserMatchClass = label_wid26_scal4_asmoperand;
-
- let OperandType = "OPERAND_PCREL";
-}
-
-def blimm_target : Operand<i64> {
- let EncoderMethod = "getLabelOpValue<AArch64::fixup_a64_call>";
-
- // This label is a 26-bit offset from PC, scaled by the instruction-width: 4.
- let PrintMethod = "printLabelOperand<26, 4>";
- let ParserMatchClass = label_wid26_scal4_asmoperand;
- let OperandType = "OPERAND_PCREL";
-}
+defm FCCMPE : FPCondComparison<1, "fccmpe">;
+defm FCCMP : FPCondComparison<0, "fccmp">;
-class A64I_BimmImpl<bit op, string asmop, list<dag> patterns, Operand lbl_type>
- : A64I_Bimm<op, (outs), (ins lbl_type:$Label),
- !strconcat(asmop, "\t$Label"), patterns,
- NoItinerary>;
+//===----------------------------------------------------------------------===//
+// Floating point conditional select instruction.
+//===----------------------------------------------------------------------===//
-let isBranch = 1 in {
- def Bimm : A64I_BimmImpl<0b0, "b", [(br bb:$Label)], bimm_target> {
- let isTerminator = 1;
- let isBarrier = 1;
- }
+defm FCSEL : FPCondSelect<"fcsel">;
- def BLimm : A64I_BimmImpl<0b1, "bl",
- [(AArch64Call tglobaladdr:$Label)], blimm_target> {
- let isCall = 1;
- let Defs = [X30];
- }
+// CSEL instructions providing f128 types need to be handled by a
+// pseudo-instruction since the eventual code will need to introduce basic
+// blocks and control flow.
+def F128CSEL : Pseudo<(outs FPR128:$Rd),
+ (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
+ [(set (f128 FPR128:$Rd),
+ (AArch64csel FPR128:$Rn, FPR128:$Rm,
+ (i32 imm:$cond), NZCV))]> {
+ let Uses = [NZCV];
+ let usesCustomInserter = 1;
}
-def : Pat<(AArch64Call texternalsym:$Label), (BLimm texternalsym:$Label)>;
//===----------------------------------------------------------------------===//
-// Unconditional branch (register) instructions
+// Floating point immediate move.
//===----------------------------------------------------------------------===//
-// Contains: BR, BLR, RET, ERET, DRP.
-
-// Most of the notional opcode fields in the A64I_Breg format are fixed in A64
-// at the moment.
-class A64I_BregImpl<bits<4> opc,
- dag outs, dag ins, string asmstr, list<dag> patterns,
- InstrItinClass itin = NoItinerary>
- : A64I_Breg<opc, 0b11111, 0b000000, 0b00000,
- outs, ins, asmstr, patterns, itin> {
- let isBranch = 1;
- let isIndirectBranch = 1;
-}
-
-// Note that these are not marked isCall or isReturn because as far as LLVM is
-// concerned they're not. "ret" is just another jump unless it has been selected
-// by LLVM as the function's return.
-
-let isBranch = 1 in {
- def BRx : A64I_BregImpl<0b0000,(outs), (ins GPR64:$Rn),
- "br\t$Rn", [(brind i64:$Rn)]> {
- let isBarrier = 1;
- let isTerminator = 1;
- }
- def BLRx : A64I_BregImpl<0b0001, (outs), (ins GPR64:$Rn),
- "blr\t$Rn", [(AArch64Call i64:$Rn)]> {
- let isBarrier = 0;
- let isCall = 1;
- let Defs = [X30];
- }
-
- def RETx : A64I_BregImpl<0b0010, (outs), (ins GPR64:$Rn),
- "ret\t$Rn", []> {
- let isBarrier = 1;
- let isTerminator = 1;
- let isReturn = 1;
- }
-
- // Create a separate pseudo-instruction for codegen to use so that we don't
- // flag x30 as used in every function. It'll be restored before the RET by the
- // epilogue if it's legitimately used.
- def RET : A64PseudoExpand<(outs), (ins), [(A64ret)], (RETx (ops X30))> {
- let isTerminator = 1;
- let isBarrier = 1;
- let isReturn = 1;
- }
-
- def ERET : A64I_BregImpl<0b0100, (outs), (ins), "eret", []> {
- let Rn = 0b11111;
- let isBarrier = 1;
- let isTerminator = 1;
- let isReturn = 1;
- }
-
- def DRPS : A64I_BregImpl<0b0101, (outs), (ins), "drps", []> {
- let Rn = 0b11111;
- let isBarrier = 1;
- }
+let isReMaterializable = 1 in {
+defm FMOV : FPMoveImmediate<"fmov">;
}
-def RETAlias : InstAlias<"ret", (RETx X30)>;
-
-
//===----------------------------------------------------------------------===//
-// Address generation patterns
+// Advanced SIMD two vector instructions.
//===----------------------------------------------------------------------===//
-// Primary method of address generation for the small/absolute memory model is
-// an ADRP/ADR pair:
-// ADRP x0, some_variable
-// ADD x0, x0, #:lo12:some_variable
-//
-// The load/store elision of the ADD is accomplished when selecting
-// addressing-modes. This just mops up the cases where that doesn't work and we
-// really need an address in some register.
-
-// This wrapper applies a LO12 modifier to the address. Otherwise we could just
-// use the same address.
-
-class ADRP_ADD<SDNode Wrapper, SDNode addrop>
- : Pat<(Wrapper addrop:$Hi, addrop:$Lo12, (i32 imm)),
- (ADDxxi_lsl0_s (ADRPxi addrop:$Hi), addrop:$Lo12)>;
-
-def : ADRP_ADD<A64WrapperSmall, tblockaddress>;
-def : ADRP_ADD<A64WrapperSmall, texternalsym>;
-def : ADRP_ADD<A64WrapperSmall, tglobaladdr>;
-def : ADRP_ADD<A64WrapperSmall, tglobaltlsaddr>;
-def : ADRP_ADD<A64WrapperSmall, tjumptable>;
+defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
+defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
+defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
+defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
+defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
+defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
+defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
+defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
+defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
+defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
+
+defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
+defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
+defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
+defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
+defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
+defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
+defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
+defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
+def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
+ (FCVTLv4i16 V64:$Rn)>;
+def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
+ (i64 4)))),
+ (FCVTLv8i16 V128:$Rn)>;
+def : Pat<(v2f64 (fextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
+def : Pat<(v2f64 (fextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
+ (i64 2))))),
+ (FCVTLv4i32 V128:$Rn)>;
+
+defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
+defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
+defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
+defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
+defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
+def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
+ (FCVTNv4i16 V128:$Rn)>;
+def : Pat<(concat_vectors V64:$Rd,
+ (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
+ (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
+def : Pat<(v2f32 (fround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
+def : Pat<(concat_vectors V64:$Rd, (v2f32 (fround (v2f64 V128:$Rn)))),
+ (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
+defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
+defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
+defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
+ int_aarch64_neon_fcvtxn>;
+defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
+defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
+let isCodeGenOnly = 1 in {
+defm FCVTZS_Int : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs",
+ int_aarch64_neon_fcvtzs>;
+defm FCVTZU_Int : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu",
+ int_aarch64_neon_fcvtzu>;
+}
+defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
+defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
+defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", frnd>;
+defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
+defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
+defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
+defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
+defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
+defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
+defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
+defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
+defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
+ UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
+defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
+// Aliases for MVN -> NOT.
+def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
+ (NOTv8i8 V64:$Vd, V64:$Vn)>;
+def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
+ (NOTv16i8 V128:$Vd, V128:$Vn)>;
+
+def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
+def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
+def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
+def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
+def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
+def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
+def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
+
+def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
+def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
+def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
+def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
+def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+
+def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
+def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
+def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
+
+defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
+defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
+defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
+defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
+defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
+ BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
+defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
+defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
+defm SHLL : SIMDVectorLShiftLongBySizeBHS;
+defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
+defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
+defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
+defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
+defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
+defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
+ BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
+defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
+ int_aarch64_neon_uaddlp>;
+defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
+defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
+defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
+defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
+defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
+defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
+
+def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
+def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
+
+// Patterns for vector long shift (by element width). These need to match all
+// three of zext, sext and anyext so it's easier to pull the patterns out of the
+// definition.
+multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
+ def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
+ (SHLLv8i8 V64:$Rn)>;
+ def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
+ (SHLLv16i8 V128:$Rn)>;
+ def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
+ (SHLLv4i16 V64:$Rn)>;
+ def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
+ (SHLLv8i16 V128:$Rn)>;
+ def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
+ (SHLLv2i32 V64:$Rn)>;
+ def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
+ (SHLLv4i32 V128:$Rn)>;
+}
+
+defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
+defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
+defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
//===----------------------------------------------------------------------===//
-// GOT access patterns
+// Advanced SIMD three vector instructions.
//===----------------------------------------------------------------------===//
-class GOTLoadSmall<SDNode addrfrag>
- : Pat<(A64GOTLoad (A64WrapperSmall addrfrag:$Hi, addrfrag:$Lo12, 8)),
- (LS64_LDR (ADRPxi addrfrag:$Hi), addrfrag:$Lo12)>;
-
-def : GOTLoadSmall<texternalsym>;
-def : GOTLoadSmall<tglobaladdr>;
-def : GOTLoadSmall<tglobaltlsaddr>;
+defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
+defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
+defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
+defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
+defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
+defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
+defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
+defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
+defm FABD : SIMDThreeSameVectorFP<1,1,0b11010,"fabd", int_aarch64_neon_fabd>;
+defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b11101,"facge",int_aarch64_neon_facge>;
+defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b11101,"facgt",int_aarch64_neon_facgt>;
+defm FADDP : SIMDThreeSameVectorFP<1,0,0b11010,"faddp",int_aarch64_neon_addp>;
+defm FADD : SIMDThreeSameVectorFP<0,0,0b11010,"fadd", fadd>;
+defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b11100, "fcmeq", AArch64fcmeq>;
+defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b11100, "fcmge", AArch64fcmge>;
+defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b11100, "fcmgt", AArch64fcmgt>;
+defm FDIV : SIMDThreeSameVectorFP<1,0,0b11111,"fdiv", fdiv>;
+defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b11000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
+defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b11000,"fmaxnm", int_aarch64_neon_fmaxnm>;
+defm FMAXP : SIMDThreeSameVectorFP<1,0,0b11110,"fmaxp", int_aarch64_neon_fmaxp>;
+defm FMAX : SIMDThreeSameVectorFP<0,0,0b11110,"fmax", AArch64fmax>;
+defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b11000,"fminnmp", int_aarch64_neon_fminnmp>;
+defm FMINNM : SIMDThreeSameVectorFP<0,1,0b11000,"fminnm", int_aarch64_neon_fminnm>;
+defm FMINP : SIMDThreeSameVectorFP<1,1,0b11110,"fminp", int_aarch64_neon_fminp>;
+defm FMIN : SIMDThreeSameVectorFP<0,1,0b11110,"fmin", AArch64fmin>;
+
+// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
+// instruction expects the addend first, while the fma intrinsic puts it last.
+defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b11001, "fmla",
+ TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
+defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b11001, "fmls",
+ TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
+
+// The following def pats catch the case where the LHS of an FMA is negated.
+// The TriOpFrag above catches the case where the middle operand is negated.
+def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
+ (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
+
+def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
+ (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
+
+def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
+ (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
+
+defm FMULX : SIMDThreeSameVectorFP<0,0,0b11011,"fmulx", int_aarch64_neon_fmulx>;
+defm FMUL : SIMDThreeSameVectorFP<1,0,0b11011,"fmul", fmul>;
+defm FRECPS : SIMDThreeSameVectorFP<0,0,0b11111,"frecps", int_aarch64_neon_frecps>;
+defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b11111,"frsqrts", int_aarch64_neon_frsqrts>;
+defm FSUB : SIMDThreeSameVectorFP<0,1,0b11010,"fsub", fsub>;
+defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla",
+ TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >;
+defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls",
+ TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >;
+defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
+defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
+defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
+ TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
+defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
+defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
+defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
+defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
+defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", int_aarch64_neon_smax>;
+defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
+defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", int_aarch64_neon_smin>;
+defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
+defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
+defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
+defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
+defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
+defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
+defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
+defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
+defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
+defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
+defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
+ TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
+defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
+defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
+defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
+defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
+defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", int_aarch64_neon_umax>;
+defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
+defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", int_aarch64_neon_umin>;
+defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
+defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
+defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
+defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
+defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
+defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
+defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
+
+defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
+defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
+ BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
+defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
+defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
+defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
+ TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
+defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
+defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
+ BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
+defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
+
+def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
+ (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
+def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
+ (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
+def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
+ (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
+def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
+ (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
+
+def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
+ (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
+def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
+ (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
+def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
+ (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
+def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
+ (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
+
+def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
+ (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
+def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
+ (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
+def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
+ (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
+def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
+ (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
+
+def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
+ (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
+def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
+ (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
+def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
+ (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
+def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
+ (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
+
+def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
+ "|cmls.8b\t$dst, $src1, $src2}",
+ (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
+ "|cmls.16b\t$dst, $src1, $src2}",
+ (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
+ "|cmls.4h\t$dst, $src1, $src2}",
+ (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
+ "|cmls.8h\t$dst, $src1, $src2}",
+ (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
+ "|cmls.2s\t$dst, $src1, $src2}",
+ (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
+ "|cmls.4s\t$dst, $src1, $src2}",
+ (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
+ "|cmls.2d\t$dst, $src1, $src2}",
+ (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
+ "|cmlo.8b\t$dst, $src1, $src2}",
+ (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
+ "|cmlo.16b\t$dst, $src1, $src2}",
+ (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
+ "|cmlo.4h\t$dst, $src1, $src2}",
+ (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
+ "|cmlo.8h\t$dst, $src1, $src2}",
+ (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
+ "|cmlo.2s\t$dst, $src1, $src2}",
+ (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
+ "|cmlo.4s\t$dst, $src1, $src2}",
+ (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
+ "|cmlo.2d\t$dst, $src1, $src2}",
+ (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
+ "|cmle.8b\t$dst, $src1, $src2}",
+ (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
+ "|cmle.16b\t$dst, $src1, $src2}",
+ (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
+ "|cmle.4h\t$dst, $src1, $src2}",
+ (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
+ "|cmle.8h\t$dst, $src1, $src2}",
+ (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
+ "|cmle.2s\t$dst, $src1, $src2}",
+ (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
+ "|cmle.4s\t$dst, $src1, $src2}",
+ (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
+ "|cmle.2d\t$dst, $src1, $src2}",
+ (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
+ "|cmlt.8b\t$dst, $src1, $src2}",
+ (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
+ "|cmlt.16b\t$dst, $src1, $src2}",
+ (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
+ "|cmlt.4h\t$dst, $src1, $src2}",
+ (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
+ "|cmlt.8h\t$dst, $src1, $src2}",
+ (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
+ "|cmlt.2s\t$dst, $src1, $src2}",
+ (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
+ "|cmlt.4s\t$dst, $src1, $src2}",
+ (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
+ "|cmlt.2d\t$dst, $src1, $src2}",
+ (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
+ "|fcmle.2s\t$dst, $src1, $src2}",
+ (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
+ "|fcmle.4s\t$dst, $src1, $src2}",
+ (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
+ "|fcmle.2d\t$dst, $src1, $src2}",
+ (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
+ "|fcmlt.2s\t$dst, $src1, $src2}",
+ (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
+ "|fcmlt.4s\t$dst, $src1, $src2}",
+ (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
+ "|fcmlt.2d\t$dst, $src1, $src2}",
+ (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
+ "|facle.2s\t$dst, $src1, $src2}",
+ (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
+ "|facle.4s\t$dst, $src1, $src2}",
+ (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
+ "|facle.2d\t$dst, $src1, $src2}",
+ (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
+
+def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
+ "|faclt.2s\t$dst, $src1, $src2}",
+ (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
+def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
+ "|faclt.4s\t$dst, $src1, $src2}",
+ (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
+def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
+ "|faclt.2d\t$dst, $src1, $src2}",
+ (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
//===----------------------------------------------------------------------===//
-// Tail call handling
+// Advanced SIMD three scalar instructions.
//===----------------------------------------------------------------------===//
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [XSP] in {
- def TC_RETURNdi
- : PseudoInst<(outs), (ins i64imm:$dst, i32imm:$FPDiff),
- [(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff))]>;
-
- def TC_RETURNxi
- : PseudoInst<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff),
- [(AArch64tcret i64:$dst, (i32 timm:$FPDiff))]>;
-}
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
- Uses = [XSP] in {
- def TAIL_Bimm : A64PseudoExpand<(outs), (ins bimm_target:$Label), [],
- (Bimm bimm_target:$Label)>;
-
- def TAIL_BRx : A64PseudoExpand<(outs), (ins tcGPR64:$Rd), [],
- (BRx GPR64:$Rd)>;
-}
-
-
-def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
- (TC_RETURNdi texternalsym:$dst, imm:$FPDiff)>;
+defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
+defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
+defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
+defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
+defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
+defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
+defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
+defm FABD : SIMDThreeScalarSD<1, 1, 0b11010, "fabd", int_aarch64_sisd_fabd>;
+def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+ (FABD64 FPR64:$Rn, FPR64:$Rm)>;
+defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b11101, "facge",
+ int_aarch64_neon_facge>;
+defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b11101, "facgt",
+ int_aarch64_neon_facgt>;
+defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b11100, "fcmeq", AArch64fcmeq>;
+defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b11100, "fcmge", AArch64fcmge>;
+defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b11100, "fcmgt", AArch64fcmgt>;
+defm FMULX : SIMDThreeScalarSD<0, 0, 0b11011, "fmulx", int_aarch64_neon_fmulx>;
+defm FRECPS : SIMDThreeScalarSD<0, 0, 0b11111, "frecps", int_aarch64_neon_frecps>;
+defm FRSQRTS : SIMDThreeScalarSD<0, 1, 0b11111, "frsqrts", int_aarch64_neon_frsqrts>;
+defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
+defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
+defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
+defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
+defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
+defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
+defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>;
+defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>;
+defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
+defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
+defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
+defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
+defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
+defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>;
+defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
+
+def : InstAlias<"cmls $dst, $src1, $src2",
+ (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"cmle $dst, $src1, $src2",
+ (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"cmlo $dst, $src1, $src2",
+ (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"cmlt $dst, $src1, $src2",
+ (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"fcmle $dst, $src1, $src2",
+ (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
+def : InstAlias<"fcmle $dst, $src1, $src2",
+ (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"fcmlt $dst, $src1, $src2",
+ (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
+def : InstAlias<"fcmlt $dst, $src1, $src2",
+ (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"facle $dst, $src1, $src2",
+ (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
+def : InstAlias<"facle $dst, $src1, $src2",
+ (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
+def : InstAlias<"faclt $dst, $src1, $src2",
+ (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
+def : InstAlias<"faclt $dst, $src1, $src2",
+ (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
//===----------------------------------------------------------------------===//
-// Thread local storage
+// Advanced SIMD three scalar instructions (mixed operands).
//===----------------------------------------------------------------------===//
+defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
+ int_aarch64_neon_sqdmulls_scalar>;
+defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
+defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
+
+def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
+ (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
+ (i32 FPR32:$Rm))))),
+ (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
+def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
+ (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
+ (i32 FPR32:$Rm))))),
+ (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
-// This is a pseudo-instruction representing the ".tlsdesccall" directive in
-// assembly. Its effect is to insert an R_AARCH64_TLSDESC_CALL relocation at the
-// current location. It should always be immediately followed by a BLR
-// instruction, and is intended solely for relaxation by the linker.
-
-def : Pat<(A64threadpointer), (MRSxi 0xde82)>;
-
-def TLSDESCCALL : PseudoInst<(outs), (ins i64imm:$Lbl), []> {
- let hasSideEffects = 1;
-}
-
-def TLSDESC_BLRx : PseudoInst<(outs), (ins GPR64:$Rn, i64imm:$Var),
- [(A64tlsdesc_blr i64:$Rn, tglobaltlsaddr:$Var)]> {
- let isCall = 1;
- let Defs = [X30];
-}
+//===----------------------------------------------------------------------===//
+// Advanced SIMD two scalar instructions.
+//===----------------------------------------------------------------------===//
-def : Pat<(A64tlsdesc_blr i64:$Rn, texternalsym:$Var),
- (TLSDESC_BLRx $Rn, texternalsym:$Var)>;
+defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
+defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
+defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
+defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
+defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
+defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
+defm FCMEQ : SIMDCmpTwoScalarSD<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
+defm FCMGE : SIMDCmpTwoScalarSD<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
+defm FCMGT : SIMDCmpTwoScalarSD<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
+defm FCMLE : SIMDCmpTwoScalarSD<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
+defm FCMLT : SIMDCmpTwoScalarSD<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
+defm FCVTAS : SIMDTwoScalarSD< 0, 0, 0b11100, "fcvtas">;
+defm FCVTAU : SIMDTwoScalarSD< 1, 0, 0b11100, "fcvtau">;
+defm FCVTMS : SIMDTwoScalarSD< 0, 0, 0b11011, "fcvtms">;
+defm FCVTMU : SIMDTwoScalarSD< 1, 0, 0b11011, "fcvtmu">;
+defm FCVTNS : SIMDTwoScalarSD< 0, 0, 0b11010, "fcvtns">;
+defm FCVTNU : SIMDTwoScalarSD< 1, 0, 0b11010, "fcvtnu">;
+defm FCVTPS : SIMDTwoScalarSD< 0, 1, 0b11010, "fcvtps">;
+defm FCVTPU : SIMDTwoScalarSD< 1, 1, 0b11010, "fcvtpu">;
+def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
+defm FCVTZS : SIMDTwoScalarSD< 0, 1, 0b11011, "fcvtzs">;
+defm FCVTZU : SIMDTwoScalarSD< 1, 1, 0b11011, "fcvtzu">;
+defm FRECPE : SIMDTwoScalarSD< 0, 1, 0b11101, "frecpe">;
+defm FRECPX : SIMDTwoScalarSD< 0, 1, 0b11111, "frecpx">;
+defm FRSQRTE : SIMDTwoScalarSD< 1, 1, 0b11101, "frsqrte">;
+defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
+ UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
+defm SCVTF : SIMDTwoScalarCVTSD< 0, 0, 0b11101, "scvtf", AArch64sitof>;
+defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
+defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
+defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
+defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
+defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
+ int_aarch64_neon_suqadd>;
+defm UCVTF : SIMDTwoScalarCVTSD< 1, 0, 0b11101, "ucvtf", AArch64uitof>;
+defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
+defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
+ int_aarch64_neon_usqadd>;
+
+def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
+
+def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
+ (FCVTASv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
+ (FCVTAUv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
+ (FCVTMSv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
+ (FCVTMUv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
+ (FCVTNSv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
+ (FCVTNUv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
+ (FCVTPSv1i64 FPR64:$Rn)>;
+def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
+ (FCVTPUv1i64 FPR64:$Rn)>;
+
+def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
+ (FRECPEv1i32 FPR32:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
+ (FRECPEv1i64 FPR64:$Rn)>;
+def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
+ (FRECPEv1i64 FPR64:$Rn)>;
+
+def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
+ (FRECPXv1i32 FPR32:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
+ (FRECPXv1i64 FPR64:$Rn)>;
+
+def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
+ (FRSQRTEv1i32 FPR32:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
+ (FRSQRTEv1i64 FPR64:$Rn)>;
+def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
+ (FRSQRTEv1i64 FPR64:$Rn)>;
+
+// If an integer is about to be converted to a floating point value,
+// just load it on the floating point unit.
+// Here are the patterns for 8 and 16-bits to float.
+// 8-bits -> float.
+multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
+ SDPatternOperator loadop, Instruction UCVTF,
+ ROAddrMode ro, Instruction LDRW, Instruction LDRX,
+ SubRegIndex sub> {
+ def : Pat<(DstTy (uint_to_fp (SrcTy
+ (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
+ ro.Wext:$extend))))),
+ (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
+ (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
+ sub))>;
+
+ def : Pat<(DstTy (uint_to_fp (SrcTy
+ (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
+ ro.Wext:$extend))))),
+ (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
+ (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
+ sub))>;
+}
+
+defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
+ UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
+def : Pat <(f32 (uint_to_fp (i32
+ (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
+ (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
+ (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
+def : Pat <(f32 (uint_to_fp (i32
+ (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
+ (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
+ (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
+// 16-bits -> float.
+defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
+ UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
+def : Pat <(f32 (uint_to_fp (i32
+ (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+ (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
+ (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
+def : Pat <(f32 (uint_to_fp (i32
+ (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
+ (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
+ (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
+// 32-bits are handled in target specific dag combine:
+// performIntToFpCombine.
+// 64-bits integer to 32-bits floating point, not possible with
+// UCVTF on floating point registers (both source and destination
+// must have the same size).
+
+// Here are the patterns for 8, 16, 32, and 64-bits to double.
+// 8-bits -> double.
+defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
+ UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
+def : Pat <(f64 (uint_to_fp (i32
+ (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
+ (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
+def : Pat <(f64 (uint_to_fp (i32
+ (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
+ (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
+// 16-bits -> double.
+defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
+ UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
+def : Pat <(f64 (uint_to_fp (i32
+ (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+ (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
+def : Pat <(f64 (uint_to_fp (i32
+ (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
+ (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
+// 32-bits -> double.
+defm : UIntToFPROLoadPat<f64, i32, load,
+ UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
+def : Pat <(f64 (uint_to_fp (i32
+ (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
+ (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
+def : Pat <(f64 (uint_to_fp (i32
+ (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
+ (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
+// 64-bits -> double are handled in target specific dag combine:
+// performIntToFpCombine.
//===----------------------------------------------------------------------===//
-// Bitfield patterns
+// Advanced SIMD three different-sized vector instructions.
//===----------------------------------------------------------------------===//
-def bfi32_lsb_to_immr : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((32 - N->getZExtValue()) % 32, MVT::i64);
+defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
+defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
+defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
+defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
+defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
+defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
+ int_aarch64_neon_sabd>;
+defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
+ int_aarch64_neon_sabd>;
+defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
+ BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
+defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
+ BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
+defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
+ TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
+defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
+ TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
+defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
+defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
+ int_aarch64_neon_sqadd>;
+defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
+ int_aarch64_neon_sqsub>;
+defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
+ int_aarch64_neon_sqdmull>;
+defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
+ BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
+defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
+ BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
+defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
+ int_aarch64_neon_uabd>;
+defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
+ int_aarch64_neon_uabd>;
+defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
+ BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
+defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
+ BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
+defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
+ TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
+defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
+ TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
+defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
+defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
+ BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
+defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
+ BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
+
+// Patterns for 64-bit pmull
+def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
+ (PMULLv1i64 V64:$Rn, V64:$Rm)>;
+def : Pat<(int_aarch64_neon_pmull64 (vector_extract (v2i64 V128:$Rn), (i64 1)),
+ (vector_extract (v2i64 V128:$Rm), (i64 1))),
+ (PMULLv2i64 V128:$Rn, V128:$Rm)>;
+
+// CodeGen patterns for addhn and subhn instructions, which can actually be
+// written in LLVM IR without too much difficulty.
+
+// ADDHN
+def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
+ (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
+def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
+ (i32 16))))),
+ (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
+def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
+ (i32 32))))),
+ (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
+def : Pat<(concat_vectors (v8i8 V64:$Rd),
+ (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
+ (i32 8))))),
+ (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+def : Pat<(concat_vectors (v4i16 V64:$Rd),
+ (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
+ (i32 16))))),
+ (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+def : Pat<(concat_vectors (v2i32 V64:$Rd),
+ (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
+ (i32 32))))),
+ (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+
+// SUBHN
+def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
+ (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
+def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
+ (i32 16))))),
+ (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
+def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
+ (i32 32))))),
+ (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
+def : Pat<(concat_vectors (v8i8 V64:$Rd),
+ (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
+ (i32 8))))),
+ (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+def : Pat<(concat_vectors (v4i16 V64:$Rd),
+ (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
+ (i32 16))))),
+ (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+def : Pat<(concat_vectors (v2i32 V64:$Rd),
+ (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
+ (i32 32))))),
+ (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
+ V128:$Rn, V128:$Rm)>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD bitwise extract from vector instruction.
+//----------------------------------------------------------------------------
+
+defm EXT : SIMDBitwiseExtract<"ext">;
+
+def : Pat<(v4i16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
+ (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
+def : Pat<(v8i16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
+ (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
+def : Pat<(v2i32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
+ (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
+def : Pat<(v2f32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
+ (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
+def : Pat<(v4i32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
+ (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
+def : Pat<(v4f32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
+ (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
+def : Pat<(v2i64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
+ (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
+def : Pat<(v2f64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
+ (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
+
+// We use EXT to handle extract_subvector to copy the upper 64-bits of a
+// 128-bit vector.
+def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 8))),
+ (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
+def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 4))),
+ (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
+def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 2))),
+ (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
+def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 1))),
+ (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
+def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 2))),
+ (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
+def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 1))),
+ (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
+
+
+//----------------------------------------------------------------------------
+// AdvSIMD zip vector
+//----------------------------------------------------------------------------
+
+defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
+defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
+defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
+defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
+defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
+defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD TBL/TBX instructions
+//----------------------------------------------------------------------------
+
+defm TBL : SIMDTableLookup< 0, "tbl">;
+defm TBX : SIMDTableLookupTied<1, "tbx">;
+
+def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
+ (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
+def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
+ (TBLv16i8One V128:$Ri, V128:$Rn)>;
+
+def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
+ (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
+ (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
+def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
+ (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
+ (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
+
+
+//----------------------------------------------------------------------------
+// AdvSIMD scalar CPY instruction
+//----------------------------------------------------------------------------
+
+defm CPY : SIMDScalarCPY<"cpy">;
+
+//----------------------------------------------------------------------------
+// AdvSIMD scalar pairwise instructions
+//----------------------------------------------------------------------------
+
+defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
+defm FADDP : SIMDPairwiseScalarSD<1, 0, 0b01101, "faddp">;
+defm FMAXNMP : SIMDPairwiseScalarSD<1, 0, 0b01100, "fmaxnmp">;
+defm FMAXP : SIMDPairwiseScalarSD<1, 0, 0b01111, "fmaxp">;
+defm FMINNMP : SIMDPairwiseScalarSD<1, 1, 0b01100, "fminnmp">;
+defm FMINP : SIMDPairwiseScalarSD<1, 1, 0b01111, "fminp">;
+def : Pat<(i64 (int_aarch64_neon_saddv (v2i64 V128:$Rn))),
+ (ADDPv2i64p V128:$Rn)>;
+def : Pat<(i64 (int_aarch64_neon_uaddv (v2i64 V128:$Rn))),
+ (ADDPv2i64p V128:$Rn)>;
+def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
+ (FADDPv2i32p V64:$Rn)>;
+def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
+ (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
+def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
+ (FADDPv2i64p V128:$Rn)>;
+def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
+ (FMAXNMPv2i32p V64:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
+ (FMAXNMPv2i64p V128:$Rn)>;
+def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
+ (FMAXPv2i32p V64:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
+ (FMAXPv2i64p V128:$Rn)>;
+def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
+ (FMINNMPv2i32p V64:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
+ (FMINNMPv2i64p V128:$Rn)>;
+def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
+ (FMINPv2i32p V64:$Rn)>;
+def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
+ (FMINPv2i64p V128:$Rn)>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD INS/DUP instructions
+//----------------------------------------------------------------------------
+
+def DUPv8i8gpr : SIMDDupFromMain<0, 0b00001, ".8b", v8i8, V64, GPR32>;
+def DUPv16i8gpr : SIMDDupFromMain<1, 0b00001, ".16b", v16i8, V128, GPR32>;
+def DUPv4i16gpr : SIMDDupFromMain<0, 0b00010, ".4h", v4i16, V64, GPR32>;
+def DUPv8i16gpr : SIMDDupFromMain<1, 0b00010, ".8h", v8i16, V128, GPR32>;
+def DUPv2i32gpr : SIMDDupFromMain<0, 0b00100, ".2s", v2i32, V64, GPR32>;
+def DUPv4i32gpr : SIMDDupFromMain<1, 0b00100, ".4s", v4i32, V128, GPR32>;
+def DUPv2i64gpr : SIMDDupFromMain<1, 0b01000, ".2d", v2i64, V128, GPR64>;
+
+def DUPv2i64lane : SIMDDup64FromElement;
+def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
+def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
+def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
+def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
+def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
+def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
+
+def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
+ (v2f32 (DUPv2i32lane
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
+ (i64 0)))>;
+def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
+ (v4f32 (DUPv4i32lane
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
+ (i64 0)))>;
+def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
+ (v2f64 (DUPv2i64lane
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
+ (i64 0)))>;
+
+def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
+ (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
+def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
+ (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
+def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
+ (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
+
+// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
+// instruction even if the types don't match: we just have to remap the lane
+// carefully. N.b. this trick only applies to truncations.
+def VecIndex_x2 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(2 * N->getZExtValue(), MVT::i64);
}]>;
-
-def bfi64_lsb_to_immr : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((64 - N->getZExtValue()) % 64, MVT::i64);
+def VecIndex_x4 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(4 * N->getZExtValue(), MVT::i64);
}]>;
-
-def bfi_width_to_imms : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() - 1, MVT::i64);
+def VecIndex_x8 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(8 * N->getZExtValue(), MVT::i64);
}]>;
+multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
+ ValueType Src128VT, ValueType ScalVT,
+ Instruction DUP, SDNodeXForm IdxXFORM> {
+ def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
+ imm:$idx)))),
+ (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
+
+ def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
+ imm:$idx)))),
+ (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
+}
+
+defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
+defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
+defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
+
+defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
+defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
+defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
+
+multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
+ SDNodeXForm IdxXFORM> {
+ def : Pat<(ResVT (AArch64dup (i32 (trunc (vector_extract (v2i64 V128:$Rn),
+ imm:$idx))))),
+ (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
+
+ def : Pat<(ResVT (AArch64dup (i32 (trunc (vector_extract (v1i64 V64:$Rn),
+ imm:$idx))))),
+ (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
+}
+
+defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
+defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
+defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
+
+defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
+defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
+defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
+
+// SMOV and UMOV definitions, with some extra patterns for convenience
+defm SMOV : SMov;
+defm UMOV : UMov;
+
+def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
+ (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
+def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
+ (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
+def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
+ (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
+def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
+ (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
+def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
+ (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
+def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
+ (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
+
+// Extracting i8 or i16 elements will have the zero-extend transformed to
+// an 'and' mask by type legalization since neither i8 nor i16 are legal types
+// for AArch64. Match these patterns here since UMOV already zeroes out the high
+// bits of the destination register.
+def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
+ (i32 0xff)),
+ (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
+def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
+ (i32 0xffff)),
+ (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
+
+defm INS : SIMDIns;
+
+def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
+ (SUBREG_TO_REG (i32 0),
+ (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
+def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
+ (SUBREG_TO_REG (i32 0),
+ (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
+
+def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
+ (SUBREG_TO_REG (i32 0),
+ (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
+def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
+ (SUBREG_TO_REG (i32 0),
+ (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
+
+def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
+ (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
+ (i32 FPR32:$Rn), ssub))>;
+def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
+ (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
+ (i32 FPR32:$Rn), ssub))>;
+def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
+ (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
+ (i64 FPR64:$Rn), dsub))>;
+
+def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
+ (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
+def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
+ (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
+def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
+ (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
+
+def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
+ (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
+ (EXTRACT_SUBREG
+ (INSvi32lane
+ (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
+ VectorIndexS:$imm,
+ (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
+ (i64 0)),
+ dsub)>;
+def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
+ (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
+ (INSvi32lane
+ V128:$Rn, VectorIndexS:$imm,
+ (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
+ (i64 0))>;
+def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
+ (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
+ (INSvi64lane
+ V128:$Rn, VectorIndexD:$imm,
+ (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
+ (i64 0))>;
+
+// Copy an element at a constant index in one vector into a constant indexed
+// element of another.
+// FIXME refactor to a shared class/dev parameterized on vector type, vector
+// index type and INS extension
+def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
+ (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
+ VectorIndexB:$idx2)),
+ (v16i8 (INSvi8lane
+ V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
+ )>;
+def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
+ (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
+ VectorIndexH:$idx2)),
+ (v8i16 (INSvi16lane
+ V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
+ )>;
+def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
+ (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
+ VectorIndexS:$idx2)),
+ (v4i32 (INSvi32lane
+ V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
+ )>;
+def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
+ (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
+ VectorIndexD:$idx2)),
+ (v2i64 (INSvi64lane
+ V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
+ )>;
+
+multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
+ ValueType VTScal, Instruction INS> {
+ def : Pat<(VT128 (vector_insert V128:$src,
+ (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
+ imm:$Immd)),
+ (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
+
+ def : Pat<(VT128 (vector_insert V128:$src,
+ (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
+ imm:$Immd)),
+ (INS V128:$src, imm:$Immd,
+ (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
+
+ def : Pat<(VT64 (vector_insert V64:$src,
+ (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
+ imm:$Immd)),
+ (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
+ imm:$Immd, V128:$Rn, imm:$Immn),
+ dsub)>;
+
+ def : Pat<(VT64 (vector_insert V64:$src,
+ (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
+ imm:$Immd)),
+ (EXTRACT_SUBREG
+ (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
+ (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
+ dsub)>;
+}
+
+defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
+defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
+defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, INSvi8lane>;
+defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, INSvi16lane>;
+defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, INSvi32lane>;
+defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, INSvi32lane>;
+
+
+// Floating point vector extractions are codegen'd as either a sequence of
+// subregister extractions, possibly fed by an INS if the lane number is
+// anything other than zero.
+def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
+ (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
+def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
+ (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
+def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
+ (f64 (EXTRACT_SUBREG
+ (INSvi64lane (v2f64 (IMPLICIT_DEF)), 0,
+ V128:$Rn, VectorIndexD:$idx),
+ dsub))>;
+def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
+ (f32 (EXTRACT_SUBREG
+ (INSvi32lane (v4f32 (IMPLICIT_DEF)), 0,
+ V128:$Rn, VectorIndexS:$idx),
+ ssub))>;
+
+// All concat_vectors operations are canonicalised to act on i64 vectors for
+// AArch64. In the general case we need an instruction, which had just as well be
+// INS.
+class ConcatPat<ValueType DstTy, ValueType SrcTy>
+ : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
+ (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
+
+def : ConcatPat<v2i64, v1i64>;
+def : ConcatPat<v2f64, v1f64>;
+def : ConcatPat<v4i32, v2i32>;
+def : ConcatPat<v4f32, v2f32>;
+def : ConcatPat<v8i16, v4i16>;
+def : ConcatPat<v16i8, v8i8>;
+
+// If the high lanes are undef, though, we can just ignore them:
+class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
+ : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
+
+def : ConcatUndefPat<v2i64, v1i64>;
+def : ConcatUndefPat<v2f64, v1f64>;
+def : ConcatUndefPat<v4i32, v2i32>;
+def : ConcatUndefPat<v4f32, v2f32>;
+def : ConcatUndefPat<v8i16, v4i16>;
+def : ConcatUndefPat<v16i8, v8i8>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD across lanes instructions
+//----------------------------------------------------------------------------
+
+defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
+defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
+defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
+defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
+defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
+defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
+defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
+defm FMAXNMV : SIMDAcrossLanesS<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
+defm FMAXV : SIMDAcrossLanesS<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
+defm FMINNMV : SIMDAcrossLanesS<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
+defm FMINV : SIMDAcrossLanesS<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
+
+multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc, Intrinsic intOp> {
+// If there is a sign extension after this intrinsic, consume it as smov already
+// performed it
+ def : Pat<(i32 (sext_inreg (i32 (intOp (v8i8 V64:$Rn))), i8)),
+ (i32 (SMOVvi8to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
+ (i64 0)))>;
+ def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
+ (i32 (SMOVvi8to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
+ (i64 0)))>;
+// If there is a sign extension after this intrinsic, consume it as smov already
+// performed it
+def : Pat<(i32 (sext_inreg (i32 (intOp (v16i8 V128:$Rn))), i8)),
+ (i32 (SMOVvi8to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
+ (i64 0)))>;
+def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
+ (i32 (SMOVvi8to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
+ (i64 0)))>;
+// If there is a sign extension after this intrinsic, consume it as smov already
+// performed it
+def : Pat<(i32 (sext_inreg (i32 (intOp (v4i16 V64:$Rn))), i16)),
+ (i32 (SMOVvi16to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
+ (i64 0)))>;
+def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
+ (i32 (SMOVvi16to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
+ (i64 0)))>;
+// If there is a sign extension after this intrinsic, consume it as smov already
+// performed it
+def : Pat<(i32 (sext_inreg (i32 (intOp (v8i16 V128:$Rn))), i16)),
+ (i32 (SMOVvi16to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
+ (i64 0)))>;
+def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
+ (i32 (SMOVvi16to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
+ (i64 0)))>;
+
+def : Pat<(i32 (intOp (v4i32 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub),
+ ssub))>;
+}
+
+multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc, Intrinsic intOp> {
+// If there is a masking operation keeping only what has been actually
+// generated, consume it.
+ def : Pat<(i32 (and (i32 (intOp (v8i8 V64:$Rn))), maski8_or_more)),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
+ ssub))>;
+ def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
+ ssub))>;
+// If there is a masking operation keeping only what has been actually
+// generated, consume it.
+def : Pat<(i32 (and (i32 (intOp (v16i8 V128:$Rn))), maski8_or_more)),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
+ ssub))>;
+def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
+ ssub))>;
+
+// If there is a masking operation keeping only what has been actually
+// generated, consume it.
+def : Pat<(i32 (and (i32 (intOp (v4i16 V64:$Rn))), maski16_or_more)),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
+ ssub))>;
+def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
+ ssub))>;
+// If there is a masking operation keeping only what has been actually
+// generated, consume it.
+def : Pat<(i32 (and (i32 (intOp (v8i16 V128:$Rn))), maski16_or_more)),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
+ ssub))>;
+def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
+ ssub))>;
+
+def : Pat<(i32 (intOp (v4i32 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub),
+ ssub))>;
+
+}
+
+multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
+ def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
+ (i32 (SMOVvi16to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
+ (i64 0)))>;
+def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
+ (i32 (SMOVvi16to32
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
+ (i64 0)))>;
+
+def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
+ ssub))>;
+def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
+ ssub))>;
+
+def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
+ (i64 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
+ dsub))>;
+}
+
+multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
+ Intrinsic intOp> {
+ def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
+ ssub))>;
+def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
+ ssub))>;
+
+def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
+ ssub))>;
+def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
+ (i32 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
+ ssub))>;
+
+def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
+ (i64 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
+ dsub))>;
+}
+
+defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", int_aarch64_neon_saddv>;
+// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
+def : Pat<(i32 (int_aarch64_neon_saddv (v2i32 V64:$Rn))),
+ (EXTRACT_SUBREG (ADDPv2i32 V64:$Rn, V64:$Rn), ssub)>;
+
+defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", int_aarch64_neon_uaddv>;
+// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
+def : Pat<(i32 (int_aarch64_neon_uaddv (v2i32 V64:$Rn))),
+ (EXTRACT_SUBREG (ADDPv2i32 V64:$Rn, V64:$Rn), ssub)>;
+
+defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", int_aarch64_neon_smaxv>;
+def : Pat<(i32 (int_aarch64_neon_smaxv (v2i32 V64:$Rn))),
+ (EXTRACT_SUBREG (SMAXPv2i32 V64:$Rn, V64:$Rn), ssub)>;
+
+defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", int_aarch64_neon_sminv>;
+def : Pat<(i32 (int_aarch64_neon_sminv (v2i32 V64:$Rn))),
+ (EXTRACT_SUBREG (SMINPv2i32 V64:$Rn, V64:$Rn), ssub)>;
+
+defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", int_aarch64_neon_umaxv>;
+def : Pat<(i32 (int_aarch64_neon_umaxv (v2i32 V64:$Rn))),
+ (EXTRACT_SUBREG (UMAXPv2i32 V64:$Rn, V64:$Rn), ssub)>;
+
+defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", int_aarch64_neon_uminv>;
+def : Pat<(i32 (int_aarch64_neon_uminv (v2i32 V64:$Rn))),
+ (EXTRACT_SUBREG (UMINPv2i32 V64:$Rn, V64:$Rn), ssub)>;
+
+defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
+defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
+
+// The vaddlv_s32 intrinsic gets mapped to SADDLP.
+def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
+ (i64 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (SADDLPv2i32_v1i64 V64:$Rn), dsub),
+ dsub))>;
+// The vaddlv_u32 intrinsic gets mapped to UADDLP.
+def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
+ (i64 (EXTRACT_SUBREG
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
+ (UADDLPv2i32_v1i64 V64:$Rn), dsub),
+ dsub))>;
+
+//------------------------------------------------------------------------------
+// AdvSIMD modified immediate instructions
+//------------------------------------------------------------------------------
+
+// AdvSIMD BIC
+defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
+// AdvSIMD ORR
+defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
+
+def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
+
+def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+
+def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
+
+def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+
+// AdvSIMD FMOV
+def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1111, V128, fpimm8,
+ "fmov", ".2d",
+ [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
+def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0b1111, V64, fpimm8,
+ "fmov", ".2s",
+ [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
+def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1111, V128, fpimm8,
+ "fmov", ".4s",
+ [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
+
+// AdvSIMD MOVI
+
+// EDIT byte mask: scalar
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in
+def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
+ [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
+// The movi_edit node has the immediate value already encoded, so we use
+// a plain imm0_255 here.
+def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
+ (MOVID imm0_255:$shift)>;
+
+def : Pat<(v1i64 immAllZerosV), (MOVID (i32 0))>;
+def : Pat<(v2i32 immAllZerosV), (MOVID (i32 0))>;
+def : Pat<(v4i16 immAllZerosV), (MOVID (i32 0))>;
+def : Pat<(v8i8 immAllZerosV), (MOVID (i32 0))>;
+
+def : Pat<(v1i64 immAllOnesV), (MOVID (i32 255))>;
+def : Pat<(v2i32 immAllOnesV), (MOVID (i32 255))>;
+def : Pat<(v4i16 immAllOnesV), (MOVID (i32 255))>;
+def : Pat<(v8i8 immAllOnesV), (MOVID (i32 255))>;
+
+// EDIT byte mask: 2d
+
+// The movi_edit node has the immediate value already encoded, so we use
+// a plain imm0_255 in the pattern
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in
+def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1110, V128,
+ simdimmtype10,
+ "movi", ".2d",
+ [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
+
+
+// Use movi.2d to materialize 0.0 if the HW does zero-cycle zeroing.
+// Complexity is added to break a tie with a plain MOVI.
+let AddedComplexity = 1 in {
+def : Pat<(f32 fpimm0),
+ (f32 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), ssub))>,
+ Requires<[HasZCZ]>;
+def : Pat<(f64 fpimm0),
+ (f64 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), dsub))>,
+ Requires<[HasZCZ]>;
+}
+
+def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
+def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
+def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
+def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
+
+def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
+def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
+def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
+def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
+
+def : Pat<(v2f64 (AArch64dup (f64 fpimm0))), (MOVIv2d_ns (i32 0))>;
+def : Pat<(v4f32 (AArch64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
+
+// EDIT per word & halfword: 2s, 4h, 4s, & 8h
+defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
+
+def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+
+def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+
+def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
+def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
+def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
+def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
+
+// EDIT per word: 2s & 4s with MSL shifter
+def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
+ [(set (v2i32 V64:$Rd),
+ (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
+def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
+ [(set (v4i32 V128:$Rd),
+ (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
+
+// Per byte: 8b & 16b
+def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0b1110, V64, imm0_255,
+ "movi", ".8b",
+ [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
+def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1110, V128, imm0_255,
+ "movi", ".16b",
+ [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
+
+// AdvSIMD MVNI
+
+// EDIT per word & halfword: 2s, 4h, 4s, & 8h
+defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
+
+def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+
+def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+
+def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
+def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
+def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
+def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
+ (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
+
+// EDIT per word: 2s & 4s with MSL shifter
+def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
+ [(set (v2i32 V64:$Rd),
+ (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
+def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
+ [(set (v4i32 V128:$Rd),
+ (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD indexed element
+//----------------------------------------------------------------------------
+
+let neverHasSideEffects = 1 in {
+ defm FMLA : SIMDFPIndexedSDTied<0, 0b0001, "fmla">;
+ defm FMLS : SIMDFPIndexedSDTied<0, 0b0101, "fmls">;
+}
+
+// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
+// instruction expects the addend first, while the intrinsic expects it last.
+
+// On the other hand, there are quite a few valid combinatorial options due to
+// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
+defm : SIMDFPIndexedSDTiedPatterns<"FMLA",
+ TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
+defm : SIMDFPIndexedSDTiedPatterns<"FMLA",
+ TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
+
+defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
+ TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
+defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
+ TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
+defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
+ TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
+defm : SIMDFPIndexedSDTiedPatterns<"FMLS",
+ TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
+
+multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
+ // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
+ // and DUP scalar.
+ def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
+ (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
+ VectorIndexS:$idx))),
+ (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
+ (v2f32 (AArch64duplane32
+ (v4f32 (insert_subvector undef,
+ (v2f32 (fneg V64:$Rm)),
+ (i32 0))),
+ VectorIndexS:$idx)))),
+ (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
+ (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
+ VectorIndexS:$idx)>;
+ def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
+ (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
+ (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
+ (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
+
+ // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
+ // and DUP scalar.
+ def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
+ (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
+ VectorIndexS:$idx))),
+ (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
+ VectorIndexS:$idx)>;
+ def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
+ (v4f32 (AArch64duplane32
+ (v4f32 (insert_subvector undef,
+ (v2f32 (fneg V64:$Rm)),
+ (i32 0))),
+ VectorIndexS:$idx)))),
+ (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
+ (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
+ VectorIndexS:$idx)>;
+ def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
+ (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
+ (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
+ (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
+
+ // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
+ // (DUPLANE from 64-bit would be trivial).
+ def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
+ (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
+ VectorIndexD:$idx))),
+ (FMLSv2i64_indexed
+ V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
+ (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
+ (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
+ (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
+
+ // 2 variants for 32-bit scalar version: extract from .2s or from .4s
+ def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
+ (vector_extract (v4f32 (fneg V128:$Rm)),
+ VectorIndexS:$idx))),
+ (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
+ V128:$Rm, VectorIndexS:$idx)>;
+ def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
+ (vector_extract (v2f32 (fneg V64:$Rm)),
+ VectorIndexS:$idx))),
+ (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
+ (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
+
+ // 1 variant for 64-bit scalar version: extract from .1d or from .2d
+ def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
+ (vector_extract (v2f64 (fneg V128:$Rm)),
+ VectorIndexS:$idx))),
+ (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
+ V128:$Rm, VectorIndexS:$idx)>;
+}
+
+defm : FMLSIndexedAfterNegPatterns<
+ TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
+defm : FMLSIndexedAfterNegPatterns<
+ TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
+
+defm FMULX : SIMDFPIndexedSD<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
+defm FMUL : SIMDFPIndexedSD<0, 0b1001, "fmul", fmul>;
+
+def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
+ (FMULv2i32_indexed V64:$Rn,
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
+ (i64 0))>;
+def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
+ (FMULv4i32_indexed V128:$Rn,
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
+ (i64 0))>;
+def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
+ (FMULv2i64_indexed V128:$Rn,
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
+ (i64 0))>;
+
+defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
+defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
+defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla",
+ TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>;
+defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls",
+ TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>;
+defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
+defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
+ TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
+defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
+ TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
+defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
+ int_aarch64_neon_smull>;
+defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
+ int_aarch64_neon_sqadd>;
+defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
+ int_aarch64_neon_sqsub>;
+defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
+defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
+ TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
+defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
+ TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
+defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
+ int_aarch64_neon_umull>;
+
+// A scalar sqdmull with the second operand being a vector lane can be
+// handled directly with the indexed instruction encoding.
+def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
+ (vector_extract (v4i32 V128:$Vm),
+ VectorIndexS:$idx)),
+ (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD scalar shift instructions
+//----------------------------------------------------------------------------
+defm FCVTZS : SIMDScalarRShiftSD<0, 0b11111, "fcvtzs">;
+defm FCVTZU : SIMDScalarRShiftSD<1, 0b11111, "fcvtzu">;
+defm SCVTF : SIMDScalarRShiftSD<0, 0b11100, "scvtf">;
+defm UCVTF : SIMDScalarRShiftSD<1, 0b11100, "ucvtf">;
+// Codegen patterns for the above. We don't put these directly on the
+// instructions because TableGen's type inference can't handle the truth.
+// Having the same base pattern for fp <--> int totally freaks it out.
+def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
+ (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
+def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
+ (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
+def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
+ (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
+ (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
+ vecshiftR64:$imm)),
+ (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
+ vecshiftR64:$imm)),
+ (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
+ (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
+def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
+ (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
+def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
+ (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
+ (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
+ vecshiftR64:$imm)),
+ (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
+def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
+ vecshiftR64:$imm)),
+ (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
+
+defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
+defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
+defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
+ int_aarch64_neon_sqrshrn>;
+defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
+ int_aarch64_neon_sqrshrun>;
+defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
+defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
+defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
+ int_aarch64_neon_sqshrn>;
+defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
+ int_aarch64_neon_sqshrun>;
+defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
+defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>;
+defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
+ TriOpFrag<(add node:$LHS,
+ (AArch64srshri node:$MHS, node:$RHS))>>;
+defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
+defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
+ TriOpFrag<(add node:$LHS,
+ (AArch64vashr node:$MHS, node:$RHS))>>;
+defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
+ int_aarch64_neon_uqrshrn>;
+defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
+defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
+ int_aarch64_neon_uqshrn>;
+defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>;
+defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
+ TriOpFrag<(add node:$LHS,
+ (AArch64urshri node:$MHS, node:$RHS))>>;
+defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
+defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
+ TriOpFrag<(add node:$LHS,
+ (AArch64vlshr node:$MHS, node:$RHS))>>;
+
+//----------------------------------------------------------------------------
+// AdvSIMD vector shift instructions
+//----------------------------------------------------------------------------
+defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
+defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
+defm SCVTF: SIMDVectorRShiftSDToFP<0, 0b11100, "scvtf",
+ int_aarch64_neon_vcvtfxs2fp>;
+defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
+ int_aarch64_neon_rshrn>;
+defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
+defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
+ BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
+defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
+def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
+ (i32 vecshiftL64:$imm))),
+ (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
+defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
+ int_aarch64_neon_sqrshrn>;
+defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
+ int_aarch64_neon_sqrshrun>;
+defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
+defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
+defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
+ int_aarch64_neon_sqshrn>;
+defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
+ int_aarch64_neon_sqshrun>;
+defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
+def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
+ (i32 vecshiftR64:$imm))),
+ (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
+defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
+defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
+ TriOpFrag<(add node:$LHS,
+ (AArch64srshri node:$MHS, node:$RHS))> >;
+defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
+ BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
+
+defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
+defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
+ TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
+defm UCVTF : SIMDVectorRShiftSDToFP<1, 0b11100, "ucvtf",
+ int_aarch64_neon_vcvtfxu2fp>;
+defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
+ int_aarch64_neon_uqrshrn>;
+defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
+defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
+ int_aarch64_neon_uqshrn>;
+defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
+defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
+ TriOpFrag<(add node:$LHS,
+ (AArch64urshri node:$MHS, node:$RHS))> >;
+defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
+ BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
+defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
+defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
+ TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
+
+// SHRN patterns for when a logical right shift was used instead of arithmetic
+// (the immediate guarantees no sign bits actually end up in the result so it
+// doesn't matter).
+def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
+ (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
+def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
+ (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
+def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
+ (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
+
+def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
+ (trunc (AArch64vlshr (v8i16 V128:$Rn),
+ vecshiftR16Narrow:$imm)))),
+ (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, vecshiftR16Narrow:$imm)>;
+def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
+ (trunc (AArch64vlshr (v4i32 V128:$Rn),
+ vecshiftR32Narrow:$imm)))),
+ (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, vecshiftR32Narrow:$imm)>;
+def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
+ (trunc (AArch64vlshr (v2i64 V128:$Rn),
+ vecshiftR64Narrow:$imm)))),
+ (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ V128:$Rn, vecshiftR32Narrow:$imm)>;
+
+// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
+// Anyexts are implemented as zexts.
+def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
+def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
+def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
+def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
+def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
+def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
+def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
+def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
+def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
+// Also match an extend from the upper half of a 128 bit source register.
+def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
+ (USHLLv16i8_shift V128:$Rn, (i32 0))>;
+def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
+ (USHLLv16i8_shift V128:$Rn, (i32 0))>;
+def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
+ (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
+def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
+ (USHLLv8i16_shift V128:$Rn, (i32 0))>;
+def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
+ (USHLLv8i16_shift V128:$Rn, (i32 0))>;
+def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
+ (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
+def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
+ (USHLLv4i32_shift V128:$Rn, (i32 0))>;
+def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
+ (USHLLv4i32_shift V128:$Rn, (i32 0))>;
+def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
+ (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
+
+// Vector shift sxtl aliases
+def : InstAlias<"sxtl.8h $dst, $src1",
+ (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"sxtl $dst.8h, $src1.8b",
+ (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"sxtl.4s $dst, $src1",
+ (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"sxtl $dst.4s, $src1.4h",
+ (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"sxtl.2d $dst, $src1",
+ (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"sxtl $dst.2d, $src1.2s",
+ (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
+
+// Vector shift sxtl2 aliases
+def : InstAlias<"sxtl2.8h $dst, $src1",
+ (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
+ (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"sxtl2.4s $dst, $src1",
+ (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
+ (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"sxtl2.2d $dst, $src1",
+ (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
+ (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
+
+// Vector shift uxtl aliases
+def : InstAlias<"uxtl.8h $dst, $src1",
+ (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"uxtl $dst.8h, $src1.8b",
+ (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"uxtl.4s $dst, $src1",
+ (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"uxtl $dst.4s, $src1.4h",
+ (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"uxtl.2d $dst, $src1",
+ (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
+def : InstAlias<"uxtl $dst.2d, $src1.2s",
+ (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
+
+// Vector shift uxtl2 aliases
+def : InstAlias<"uxtl2.8h $dst, $src1",
+ (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
+ (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"uxtl2.4s $dst, $src1",
+ (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
+ (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"uxtl2.2d $dst, $src1",
+ (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
+def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
+ (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
+
+// If an integer is about to be converted to a floating point value,
+// just load it on the floating point unit.
+// These patterns are more complex because floating point loads do not
+// support sign extension.
+// The sign extension has to be explicitly added and is only supported for
+// one step: byte-to-half, half-to-word, word-to-doubleword.
+// SCVTF GPR -> FPR is 9 cycles.
+// SCVTF FPR -> FPR is 4 cyclces.
+// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
+// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
+// and still being faster.
+// However, this is not good for code size.
+// 8-bits -> float. 2 sizes step-up.
+class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
+ : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
+ (SCVTFv1i32 (f32 (EXTRACT_SUBREG
+ (SSHLLv4i16_shift
+ (f64
+ (EXTRACT_SUBREG
+ (SSHLLv8i8_shift
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ INST,
+ bsub),
+ 0),
+ dsub)),
+ 0),
+ ssub)))>, Requires<[NotForCodeSize]>;
+
+def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
+ (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
+def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
+ (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
+def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
+ (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
+def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
+ (LDURBi GPR64sp:$Rn, simm9:$offset)>;
+
+// 16-bits -> float. 1 size step-up.
+class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
+ : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
+ (SCVTFv1i32 (f32 (EXTRACT_SUBREG
+ (SSHLLv4i16_shift
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ INST,
+ hsub),
+ 0),
+ ssub)))>, Requires<[NotForCodeSize]>;
+
+def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
+ (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
+def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
+ (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
+def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
+ (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
+def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
+ (LDURHi GPR64sp:$Rn, simm9:$offset)>;
+
+// 32-bits to 32-bits are handled in target specific dag combine:
+// performIntToFpCombine.
+// 64-bits integer to 32-bits floating point, not possible with
+// SCVTF on floating point registers (both source and destination
+// must have the same size).
+
+// Here are the patterns for 8, 16, 32, and 64-bits to double.
+// 8-bits -> double. 3 size step-up: give up.
+// 16-bits -> double. 2 size step.
+class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
+ : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
+ (SCVTFv1i64 (f64 (EXTRACT_SUBREG
+ (SSHLLv2i32_shift
+ (f64
+ (EXTRACT_SUBREG
+ (SSHLLv4i16_shift
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ INST,
+ hsub),
+ 0),
+ dsub)),
+ 0),
+ dsub)))>, Requires<[NotForCodeSize]>;
+
+def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
+ (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
+def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
+ (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
+def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
+ (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
+def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
+ (LDURHi GPR64sp:$Rn, simm9:$offset)>;
+// 32-bits -> double. 1 size step-up.
+class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
+ : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
+ (SCVTFv1i64 (f64 (EXTRACT_SUBREG
+ (SSHLLv2i32_shift
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ INST,
+ ssub),
+ 0),
+ dsub)))>, Requires<[NotForCodeSize]>;
+
+def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
+ (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
+def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
+ (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
+def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
+ (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
+def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
+ (LDURSi GPR64sp:$Rn, simm9:$offset)>;
+
+// 64-bits -> double are handled in target specific dag combine:
+// performIntToFpCombine.
+
+
+//----------------------------------------------------------------------------
+// AdvSIMD Load-Store Structure
+//----------------------------------------------------------------------------
+defm LD1 : SIMDLd1Multiple<"ld1">;
+defm LD2 : SIMDLd2Multiple<"ld2">;
+defm LD3 : SIMDLd3Multiple<"ld3">;
+defm LD4 : SIMDLd4Multiple<"ld4">;
+
+defm ST1 : SIMDSt1Multiple<"st1">;
+defm ST2 : SIMDSt2Multiple<"st2">;
+defm ST3 : SIMDSt3Multiple<"st3">;
+defm ST4 : SIMDSt4Multiple<"st4">;
+
+class Ld1Pat<ValueType ty, Instruction INST>
+ : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
+
+def : Ld1Pat<v16i8, LD1Onev16b>;
+def : Ld1Pat<v8i16, LD1Onev8h>;
+def : Ld1Pat<v4i32, LD1Onev4s>;
+def : Ld1Pat<v2i64, LD1Onev2d>;
+def : Ld1Pat<v8i8, LD1Onev8b>;
+def : Ld1Pat<v4i16, LD1Onev4h>;
+def : Ld1Pat<v2i32, LD1Onev2s>;
+def : Ld1Pat<v1i64, LD1Onev1d>;
+
+class St1Pat<ValueType ty, Instruction INST>
+ : Pat<(store ty:$Vt, GPR64sp:$Rn),
+ (INST ty:$Vt, GPR64sp:$Rn)>;
+
+def : St1Pat<v16i8, ST1Onev16b>;
+def : St1Pat<v8i16, ST1Onev8h>;
+def : St1Pat<v4i32, ST1Onev4s>;
+def : St1Pat<v2i64, ST1Onev2d>;
+def : St1Pat<v8i8, ST1Onev8b>;
+def : St1Pat<v4i16, ST1Onev4h>;
+def : St1Pat<v2i32, ST1Onev2s>;
+def : St1Pat<v1i64, ST1Onev1d>;
+
+//---
+// Single-element
+//---
+
+defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
+defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
+defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
+defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
+let mayLoad = 1, neverHasSideEffects = 1 in {
+defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
+defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
+defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
+defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
+defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
+defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
+defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
+defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
+defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
+defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
+defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
+defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
+defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
+defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
+defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
+defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
+}
+
+def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
+ (LD1Rv8b GPR64sp:$Rn)>;
+def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
+ (LD1Rv16b GPR64sp:$Rn)>;
+def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
+ (LD1Rv4h GPR64sp:$Rn)>;
+def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
+ (LD1Rv8h GPR64sp:$Rn)>;
+def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
+ (LD1Rv2s GPR64sp:$Rn)>;
+def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
+ (LD1Rv4s GPR64sp:$Rn)>;
+def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
+ (LD1Rv2d GPR64sp:$Rn)>;
+def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
+ (LD1Rv1d GPR64sp:$Rn)>;
+// Grab the floating point version too
+def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
+ (LD1Rv2s GPR64sp:$Rn)>;
+def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
+ (LD1Rv4s GPR64sp:$Rn)>;
+def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
+ (LD1Rv2d GPR64sp:$Rn)>;
+def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
+ (LD1Rv1d GPR64sp:$Rn)>;
+
+class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
+ ValueType VTy, ValueType STy, Instruction LD1>
+ : Pat<(vector_insert (VTy VecListOne128:$Rd),
+ (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
+ (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
+
+def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
+def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
+def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
+def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
+def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
+def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
+
+class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
+ ValueType VTy, ValueType STy, Instruction LD1>
+ : Pat<(vector_insert (VTy VecListOne64:$Rd),
+ (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
+ (EXTRACT_SUBREG
+ (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
+ VecIndex:$idx, GPR64sp:$Rn),
+ dsub)>;
+
+def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
+def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
+def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
+def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
+
+
+defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
+defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
+defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
+defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
+
+// Stores
+defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
+defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
+defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
+defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
+
+let AddedComplexity = 15 in
+class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
+ ValueType VTy, ValueType STy, Instruction ST1>
+ : Pat<(scalar_store
+ (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
+ GPR64sp:$Rn),
+ (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
+
+def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
+def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
+def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
+def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
+def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
+def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
+
+let AddedComplexity = 15 in
+class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
+ ValueType VTy, ValueType STy, Instruction ST1>
+ : Pat<(scalar_store
+ (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
+ GPR64sp:$Rn),
+ (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
+ VecIndex:$idx, GPR64sp:$Rn)>;
+
+def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
+def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
+def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
+def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
+
+multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
+ ValueType VTy, ValueType STy, Instruction ST1,
+ int offset> {
+ def : Pat<(scalar_store
+ (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
+ GPR64sp:$Rn, offset),
+ (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
+ VecIndex:$idx, GPR64sp:$Rn, XZR)>;
+
+ def : Pat<(scalar_store
+ (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
+ GPR64sp:$Rn, GPR64:$Rm),
+ (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
+ VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
+}
+
+defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
+defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
+ 2>;
+defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
+defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
+defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
+defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
+
+multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
+ ValueType VTy, ValueType STy, Instruction ST1,
+ int offset> {
+ def : Pat<(scalar_store
+ (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
+ GPR64sp:$Rn, offset),
+ (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
+
+ def : Pat<(scalar_store
+ (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
+ GPR64sp:$Rn, GPR64:$Rm),
+ (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
+}
+
+defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
+ 1>;
+defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
+ 2>;
+defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
+defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
+defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
+defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
+
+let mayStore = 1, neverHasSideEffects = 1 in {
+defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
+defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
+defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
+defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
+defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
+defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
+defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
+defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
+defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
+defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
+defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
+defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
+}
+
+defm ST1 : SIMDLdSt1SingleAliases<"st1">;
+defm ST2 : SIMDLdSt2SingleAliases<"st2">;
+defm ST3 : SIMDLdSt3SingleAliases<"st3">;
+defm ST4 : SIMDLdSt4SingleAliases<"st4">;
+
+//----------------------------------------------------------------------------
+// Crypto extensions
+//----------------------------------------------------------------------------
+
+def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
+def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
+def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
+def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
+
+def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>;
+def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>;
+def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>;
+def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
+def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
+def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
+def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
+
+def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>;
+def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>;
+def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
+
+//----------------------------------------------------------------------------
+// Compiler-pseudos
+//----------------------------------------------------------------------------
+// FIXME: Like for X86, these should go in their own separate .td file.
+
+// Any instruction that defines a 32-bit result leaves the high half of the
+// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
+// be copying from a truncate. But any other 32-bit operation will zero-extend
+// up to 64 bits.
+// FIXME: X86 also checks for CMOV here. Do we need something similar?
+def def32 : PatLeaf<(i32 GPR32:$src), [{
+ return N->getOpcode() != ISD::TRUNCATE &&
+ N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
+ N->getOpcode() != ISD::CopyFromReg;
+}]>;
-// The simpler patterns deal with cases where no AND mask is actually needed
-// (either all bits are used or the low 32 bits are used).
-let AddedComplexity = 10 in {
-
-def : Pat<(A64Bfi i64:$src, i64:$Rn, imm:$ImmR, imm:$ImmS),
- (BFIxxii $src, $Rn,
- (bfi64_lsb_to_immr (i64 imm:$ImmR)),
- (bfi_width_to_imms (i64 imm:$ImmS)))>;
-
-def : Pat<(A64Bfi i32:$src, i32:$Rn, imm:$ImmR, imm:$ImmS),
- (BFIwwii $src, $Rn,
- (bfi32_lsb_to_immr (i64 imm:$ImmR)),
- (bfi_width_to_imms (i64 imm:$ImmS)))>;
-
-
-def : Pat<(and (A64Bfi i64:$src, i64:$Rn, imm:$ImmR, imm:$ImmS),
- (i64 4294967295)),
- (SUBREG_TO_REG (i64 0),
- (BFIwwii (EXTRACT_SUBREG $src, sub_32),
- (EXTRACT_SUBREG $Rn, sub_32),
- (bfi32_lsb_to_immr (i64 imm:$ImmR)),
- (bfi_width_to_imms (i64 imm:$ImmS))),
- sub_32)>;
-
-}
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous patterns
-//===----------------------------------------------------------------------===//
-
-// Truncation from 64 to 32-bits just involves renaming your register.
-def : Pat<(i32 (trunc i64:$val)), (EXTRACT_SUBREG $val, sub_32)>;
-
-// Similarly, extension where we don't care about the high bits is
-// just a rename.
-def : Pat<(i64 (anyext i32:$val)),
- (INSERT_SUBREG (IMPLICIT_DEF), $val, sub_32)>;
-
-// SELECT instructions providing f128 types need to be handled by a
-// pseudo-instruction since the eventual code will need to introduce basic
-// blocks and control flow.
-def F128CSEL : PseudoInst<(outs FPR128:$Rd),
- (ins FPR128:$Rn, FPR128:$Rm, cond_code_op:$Cond),
- [(set f128:$Rd, (simple_select f128:$Rn, f128:$Rm))]> {
- let Uses = [NZCV];
- let usesCustomInserter = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Load/store patterns
-//===----------------------------------------------------------------------===//
-
-// There are lots of patterns here, because we need to allow at least three
-// parameters to vary independently.
-// 1. Instruction: "ldrb w9, [sp]", "ldrh w9, [sp]", ...
-// 2. LLVM source: zextloadi8, anyextloadi8, ...
-// 3. Address-generation: A64Wrapper, (add BASE, OFFSET), ...
+// In the case of a 32-bit def that is known to implicitly zero-extend,
+// we can use a SUBREG_TO_REG.
+def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
+
+// For an anyext, we don't care what the high bits are, so we can perform an
+// INSERT_SUBREF into an IMPLICIT_DEF.
+def : Pat<(i64 (anyext GPR32:$src)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
+
+// When we need to explicitly zero-extend, we use an unsigned bitfield move
+// instruction (UBFM) on the enclosing super-reg.
+def : Pat<(i64 (zext GPR32:$src)),
+ (UBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
+
+// To sign extend, we use a signed bitfield move instruction (SBFM) on the
+// containing super-reg.
+def : Pat<(i64 (sext GPR32:$src)),
+ (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
+def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
+def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
+def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
+def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
+def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
+def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
+def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
+
+def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
+ (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
+ (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
+def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
+ (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
+ (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
+
+def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
+ (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
+ (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
+def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
+ (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
+ (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
+
+def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
+ (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
+ (i64 (i64shift_a imm0_63:$imm)),
+ (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
+
+// sra patterns have an AddedComplexity of 10, so make sure we have a higher
+// AddedComplexity for the following patterns since we want to match sext + sra
+// patterns before we attempt to match a single sra node.
+let AddedComplexity = 20 in {
+// We support all sext + sra combinations which preserve at least one bit of the
+// original value which is to be sign extended. E.g. we support shifts up to
+// bitwidth-1 bits.
+def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
+ (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
+def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
+ (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
+
+def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
+ (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
+def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
+ (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
+
+def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
+ (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
+ (i64 imm0_31:$imm), 31)>;
+} // AddedComplexity = 20
+
+// To truncate, we can simply extract from a subregister.
+def : Pat<(i32 (trunc GPR64sp:$src)),
+ (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
+
+// __builtin_trap() uses the BRK instruction on AArch64.
+def : Pat<(trap), (BRK 1)>;
+
+// Conversions within AdvSIMD types in the same register size are free.
+// But because we need a consistent lane ordering, in big endian many
+// conversions require one or more REV instructions.
//
-// The biggest problem turns out to be the address-generation variable. At the
-// point of instantiation we need to produce two DAGs, one for the pattern and
-// one for the instruction. Doing this at the lowest level of classes doesn't
-// work.
+// Consider a simple memory load followed by a bitconvert then a store.
+// v0 = load v2i32
+// v1 = BITCAST v2i32 v0 to v4i16
+// store v4i16 v2
//
-// Consider the simple uimm12 addressing mode, and the desire to match both (add
-// GPR64xsp:$Rn, uimm12:$Offset) and GPR64xsp:$Rn, particularly on the
-// instruction side. We'd need to insert either "GPR64xsp" and "uimm12" or
-// "GPR64xsp" and "0" into an unknown dag. !subst is not capable of this
-// operation, and PatFrags are for selection not output.
+// In big endian mode every memory access has an implicit byte swap. LDR and
+// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
+// is, they treat the vector as a sequence of elements to be byte-swapped.
+// The two pairs of instructions are fundamentally incompatible. We've decided
+// to use LD1/ST1 only to simplify compiler implementation.
//
-// As a result, the address-generation patterns are the final
-// instantiations. However, we do still need to vary the operand for the address
-// further down (At the point we're deciding A64WrapperSmall, we don't know
-// the memory width of the operation).
-
-//===------------------------------
-// 1. Basic infrastructural defs
-//===------------------------------
-
-// First, some simple classes for !foreach and !subst to use:
-class Decls {
- dag pattern;
-}
-
-def decls : Decls;
-def ALIGN;
-def INST;
-def OFFSET;
-def SHIFT;
-
-// You can't use !subst on an actual immediate, but you *can* use it on an
-// operand record that happens to match a single immediate. So we do.
-def imm_eq0 : ImmLeaf<i64, [{ return Imm == 0; }]>;
-def imm_eq1 : ImmLeaf<i64, [{ return Imm == 1; }]>;
-def imm_eq2 : ImmLeaf<i64, [{ return Imm == 2; }]>;
-def imm_eq3 : ImmLeaf<i64, [{ return Imm == 3; }]>;
-def imm_eq4 : ImmLeaf<i64, [{ return Imm == 4; }]>;
-
-// If the low bits of a pointer are known to be 0 then an "or" is just as good
-// as addition for computing an offset. This fragment forwards that check for
-// TableGen's use.
-def add_like_or : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),
-[{
- return CurDAG->isBaseWithConstantOffset(SDValue(N, 0));
-}]>;
-
-// Load/store (unsigned immediate) operations with relocations against global
-// symbols (for lo12) are only valid if those symbols have correct alignment
-// (since the immediate offset is divided by the access scale, it can't have a
-// remainder).
+// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
+// the original code sequence:
+// v0 = load v2i32
+// v1 = REV v2i32 (implicit)
+// v2 = BITCAST v2i32 v1 to v4i16
+// v3 = REV v4i16 v2 (implicit)
+// store v4i16 v3
//
-// The guaranteed alignment is provided as part of the WrapperSmall
-// operation, and checked against one of these.
-def any_align : ImmLeaf<i32, [{ (void)Imm; return true; }]>;
-def min_align2 : ImmLeaf<i32, [{ return Imm >= 2; }]>;
-def min_align4 : ImmLeaf<i32, [{ return Imm >= 4; }]>;
-def min_align8 : ImmLeaf<i32, [{ return Imm >= 8; }]>;
-def min_align16 : ImmLeaf<i32, [{ return Imm >= 16; }]>;
-
-// "Normal" load/store instructions can be used on atomic operations, provided
-// the ordering parameter is at most "monotonic". Anything above that needs
-// special handling with acquire/release instructions.
-class simple_load<PatFrag base>
- : PatFrag<(ops node:$ptr), (base node:$ptr), [{
- return cast<AtomicSDNode>(N)->getOrdering() <= Monotonic;
-}]>;
-
-def atomic_load_simple_i8 : simple_load<atomic_load_8>;
-def atomic_load_simple_i16 : simple_load<atomic_load_16>;
-def atomic_load_simple_i32 : simple_load<atomic_load_32>;
-def atomic_load_simple_i64 : simple_load<atomic_load_64>;
-
-class simple_store<PatFrag base>
- : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
- return cast<AtomicSDNode>(N)->getOrdering() <= Monotonic;
-}]>;
-
-def atomic_store_simple_i8 : simple_store<atomic_store_8>;
-def atomic_store_simple_i16 : simple_store<atomic_store_16>;
-def atomic_store_simple_i32 : simple_store<atomic_store_32>;
-def atomic_store_simple_i64 : simple_store<atomic_store_64>;
-
-//===------------------------------
-// 2. UImm12 and SImm9
-//===------------------------------
-
-// These instructions have two operands providing the address so they can be
-// treated similarly for most purposes.
-
-//===------------------------------
-// 2.1 Base patterns covering extend/truncate semantics
-//===------------------------------
-
-// Atomic patterns can be shared between integer operations of all sizes, a
-// quick multiclass here allows reuse.
-multiclass ls_atomic_pats<Instruction LOAD, Instruction STORE, dag Base,
- dag Offset, dag address, ValueType transty,
- ValueType sty> {
- def : Pat<(!cast<PatFrag>("atomic_load_simple_" # sty) address),
- (LOAD Base, Offset)>;
-
- def : Pat<(!cast<PatFrag>("atomic_store_simple_" # sty) address, transty:$Rt),
- (STORE $Rt, Base, Offset)>;
-}
-
-// Instructions accessing a memory chunk smaller than a register (or, in a
-// pinch, the same size) have a characteristic set of patterns they want to
-// match: extending loads and truncating stores. This class deals with the
-// sign-neutral version of those patterns.
+// But this is now broken - the value stored is different to the value loaded
+// due to lane reordering. To fix this, on every BITCAST we must perform two
+// other REVs:
+// v0 = load v2i32
+// v1 = REV v2i32 (implicit)
+// v2 = REV v2i32
+// v3 = BITCAST v2i32 v2 to v4i16
+// v4 = REV v4i16
+// v5 = REV v4i16 v4 (implicit)
+// store v4i16 v5
//
-// It will be instantiated across multiple addressing-modes.
-multiclass ls_small_pats<Instruction LOAD, Instruction STORE,
- dag Base, dag Offset,
- dag address, ValueType sty>
- : ls_atomic_pats<LOAD, STORE, Base, Offset, address, i32, sty> {
- def : Pat<(!cast<SDNode>(zextload # sty) address), (LOAD Base, Offset)>;
-
- def : Pat<(!cast<SDNode>(extload # sty) address), (LOAD Base, Offset)>;
-
- // For zero-extension to 64-bits we have to tell LLVM that the whole 64-bit
- // register was actually set.
- def : Pat<(i64 (!cast<SDNode>(zextload # sty) address)),
- (SUBREG_TO_REG (i64 0), (LOAD Base, Offset), sub_32)>;
-
- def : Pat<(i64 (!cast<SDNode>(extload # sty) address)),
- (SUBREG_TO_REG (i64 0), (LOAD Base, Offset), sub_32)>;
-
- def : Pat<(!cast<SDNode>(truncstore # sty) i32:$Rt, address),
- (STORE $Rt, Base, Offset)>;
-
- // For truncating store from 64-bits, we have to manually tell LLVM to
- // ignore the high bits of the x register.
- def : Pat<(!cast<SDNode>(truncstore # sty) i64:$Rt, address),
- (STORE (EXTRACT_SUBREG $Rt, sub_32), Base, Offset)>;
-}
-
-// Next come patterns for sign-extending loads.
-multiclass load_signed_pats<string T, string U, dag Base, dag Offset,
- dag address, ValueType sty> {
- def : Pat<(i32 (!cast<SDNode>("sextload" # sty) address)),
- (!cast<Instruction>("LDRS" # T # "w" # U) Base, Offset)>;
-
- def : Pat<(i64 (!cast<SDNode>("sextload" # sty) address)),
- (!cast<Instruction>("LDRS" # T # "x" # U) Base, Offset)>;
-
-}
-
-// and finally "natural-width" loads and stores come next.
-multiclass ls_neutral_pats<Instruction LOAD, Instruction STORE, dag Base,
- dag Offset, dag address, ValueType sty> {
- def : Pat<(sty (load address)), (LOAD Base, Offset)>;
- def : Pat<(store sty:$Rt, address), (STORE $Rt, Base, Offset)>;
-}
-
-// Integer operations also get atomic instructions to select for.
-multiclass ls_int_neutral_pats<Instruction LOAD, Instruction STORE, dag Base,
- dag Offset, dag address, ValueType sty>
- : ls_neutral_pats<LOAD, STORE, Base, Offset, address, sty>,
- ls_atomic_pats<LOAD, STORE, Base, Offset, address, sty, sty>;
-
-//===------------------------------
-// 2.2. Addressing-mode instantiations
-//===------------------------------
-
-multiclass uimm12_pats<dag address, dag Base, dag Offset> {
- defm : ls_small_pats<LS8_LDR, LS8_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, byte_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, byte_uimm12,
- !subst(ALIGN, any_align, decls.pattern))),
- i8>;
- defm : ls_small_pats<LS16_LDR, LS16_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, hword_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, hword_uimm12,
- !subst(ALIGN, min_align2, decls.pattern))),
- i16>;
- defm : ls_small_pats<LS32_LDR, LS32_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, word_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, word_uimm12,
- !subst(ALIGN, min_align4, decls.pattern))),
- i32>;
-
- defm : ls_int_neutral_pats<LS32_LDR, LS32_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, word_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, word_uimm12,
- !subst(ALIGN, min_align4, decls.pattern))),
- i32>;
-
- defm : ls_int_neutral_pats<LS64_LDR, LS64_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, dword_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, dword_uimm12,
- !subst(ALIGN, min_align8, decls.pattern))),
- i64>;
-
- defm : ls_neutral_pats<LSFP16_LDR, LSFP16_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, hword_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, hword_uimm12,
- !subst(ALIGN, min_align2, decls.pattern))),
- f16>;
-
- defm : ls_neutral_pats<LSFP32_LDR, LSFP32_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, word_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, word_uimm12,
- !subst(ALIGN, min_align4, decls.pattern))),
- f32>;
-
- defm : ls_neutral_pats<LSFP64_LDR, LSFP64_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, dword_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, dword_uimm12,
- !subst(ALIGN, min_align8, decls.pattern))),
- f64>;
-
- defm : ls_neutral_pats<LSFP128_LDR, LSFP128_STR, Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, qword_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, qword_uimm12,
- !subst(ALIGN, min_align16, decls.pattern))),
- f128>;
-
- defm : load_signed_pats<"B", "", Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, byte_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, byte_uimm12,
- !subst(ALIGN, any_align, decls.pattern))),
- i8>;
-
- defm : load_signed_pats<"H", "", Base,
- !foreach(decls.pattern, Offset,
- !subst(OFFSET, hword_uimm12, decls.pattern)),
- !foreach(decls.pattern, address,
- !subst(OFFSET, hword_uimm12,
- !subst(ALIGN, min_align2, decls.pattern))),
- i16>;
-
- def : Pat<(sextloadi32 !foreach(decls.pattern, address,
- !subst(OFFSET, word_uimm12,
- !subst(ALIGN, min_align4, decls.pattern)))),
- (LDRSWx Base, !foreach(decls.pattern, Offset,
- !subst(OFFSET, word_uimm12, decls.pattern)))>;
-}
-
-// Straightforward patterns of last resort: a pointer with or without an
-// appropriate offset.
-defm : uimm12_pats<(i64 i64:$Rn), (i64 i64:$Rn), (i64 0)>;
-defm : uimm12_pats<(add i64:$Rn, OFFSET:$UImm12),
- (i64 i64:$Rn), (i64 OFFSET:$UImm12)>;
-
-// The offset could be hidden behind an "or", of course:
-defm : uimm12_pats<(add_like_or i64:$Rn, OFFSET:$UImm12),
- (i64 i64:$Rn), (i64 OFFSET:$UImm12)>;
-
-// Global addresses under the small-absolute model should use these
-// instructions. There are ELF relocations specifically for it.
-defm : uimm12_pats<(A64WrapperSmall tglobaladdr:$Hi, tglobaladdr:$Lo12, ALIGN),
- (ADRPxi tglobaladdr:$Hi), (i64 tglobaladdr:$Lo12)>;
-
-defm : uimm12_pats<(A64WrapperSmall tglobaltlsaddr:$Hi, tglobaltlsaddr:$Lo12,
- ALIGN),
- (ADRPxi tglobaltlsaddr:$Hi), (i64 tglobaltlsaddr:$Lo12)>;
-
-// External symbols that make it this far should also get standard relocations.
-defm : uimm12_pats<(A64WrapperSmall texternalsym:$Hi, texternalsym:$Lo12,
- ALIGN),
- (ADRPxi texternalsym:$Hi), (i64 texternalsym:$Lo12)>;
-
-defm : uimm12_pats<(A64WrapperSmall tconstpool:$Hi, tconstpool:$Lo12, ALIGN),
- (ADRPxi tconstpool:$Hi), (i64 tconstpool:$Lo12)>;
-
-// We also want to use uimm12 instructions for local variables at the moment.
-def tframeindex_XFORM : SDNodeXForm<frameindex, [{
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- return CurDAG->getTargetFrameIndex(FI, MVT::i64);
-}]>;
-
-defm : uimm12_pats<(i64 frameindex:$Rn),
- (tframeindex_XFORM tframeindex:$Rn), (i64 0)>;
-
-// These can be much simpler than uimm12 because we don't to change the operand
-// type (e.g. LDURB and LDURH take the same operands).
-multiclass simm9_pats<dag address, dag Base, dag Offset> {
- defm : ls_small_pats<LS8_LDUR, LS8_STUR, Base, Offset, address, i8>;
- defm : ls_small_pats<LS16_LDUR, LS16_STUR, Base, Offset, address, i16>;
-
- defm : ls_int_neutral_pats<LS32_LDUR, LS32_STUR, Base, Offset, address, i32>;
- defm : ls_int_neutral_pats<LS64_LDUR, LS64_STUR, Base, Offset, address, i64>;
-
- defm : ls_neutral_pats<LSFP16_LDUR, LSFP16_STUR, Base, Offset, address, f16>;
- defm : ls_neutral_pats<LSFP32_LDUR, LSFP32_STUR, Base, Offset, address, f32>;
- defm : ls_neutral_pats<LSFP64_LDUR, LSFP64_STUR, Base, Offset, address, f64>;
- defm : ls_neutral_pats<LSFP128_LDUR, LSFP128_STUR, Base, Offset, address,
- f128>;
-
- def : Pat<(i64 (zextloadi32 address)),
- (SUBREG_TO_REG (i64 0), (LS32_LDUR Base, Offset), sub_32)>;
-
- def : Pat<(truncstorei32 i64:$Rt, address),
- (LS32_STUR (EXTRACT_SUBREG $Rt, sub_32), Base, Offset)>;
-
- defm : load_signed_pats<"B", "_U", Base, Offset, address, i8>;
- defm : load_signed_pats<"H", "_U", Base, Offset, address, i16>;
- def : Pat<(sextloadi32 address), (LDURSWx Base, Offset)>;
-}
-
-defm : simm9_pats<(add i64:$Rn, simm9:$SImm9),
- (i64 $Rn), (SDXF_simm9 simm9:$SImm9)>;
-
-defm : simm9_pats<(add_like_or i64:$Rn, simm9:$SImm9),
- (i64 $Rn), (SDXF_simm9 simm9:$SImm9)>;
-
-
-//===------------------------------
-// 3. Register offset patterns
-//===------------------------------
-
-// Atomic patterns can be shared between integer operations of all sizes, a
-// quick multiclass here allows reuse.
-multiclass ro_atomic_pats<Instruction LOAD, Instruction STORE, dag Base,
- dag Offset, dag Extend, dag address,
- ValueType transty, ValueType sty> {
- def : Pat<(!cast<PatFrag>("atomic_load_simple_" # sty) address),
- (LOAD Base, Offset, Extend)>;
-
- def : Pat<(!cast<PatFrag>("atomic_store_simple_" # sty) address, transty:$Rt),
- (STORE $Rt, Base, Offset, Extend)>;
-}
-
-// The register offset instructions take three operands giving the instruction,
-// and have an annoying split between instructions where Rm is 32-bit and
-// 64-bit. So we need a special hierarchy to describe them. Other than that the
-// same operations should be supported as for simm9 and uimm12 addressing.
-
-multiclass ro_small_pats<Instruction LOAD, Instruction STORE,
- dag Base, dag Offset, dag Extend,
- dag address, ValueType sty>
- : ro_atomic_pats<LOAD, STORE, Base, Offset, Extend, address, i32, sty> {
- def : Pat<(!cast<SDNode>(zextload # sty) address),
- (LOAD Base, Offset, Extend)>;
-
- def : Pat<(!cast<SDNode>(extload # sty) address),
- (LOAD Base, Offset, Extend)>;
-
- // For zero-extension to 64-bits we have to tell LLVM that the whole 64-bit
- // register was actually set.
- def : Pat<(i64 (!cast<SDNode>(zextload # sty) address)),
- (SUBREG_TO_REG (i64 0), (LOAD Base, Offset, Extend), sub_32)>;
-
- def : Pat<(i64 (!cast<SDNode>(extload # sty) address)),
- (SUBREG_TO_REG (i64 0), (LOAD Base, Offset, Extend), sub_32)>;
-
- def : Pat<(!cast<SDNode>(truncstore # sty) i32:$Rt, address),
- (STORE $Rt, Base, Offset, Extend)>;
-
- // For truncating store from 64-bits, we have to manually tell LLVM to
- // ignore the high bits of the x register.
- def : Pat<(!cast<SDNode>(truncstore # sty) i64:$Rt, address),
- (STORE (EXTRACT_SUBREG $Rt, sub_32), Base, Offset, Extend)>;
-
-}
-
-// Next come patterns for sign-extending loads.
-multiclass ro_signed_pats<string T, string Rm, dag Base, dag Offset, dag Extend,
- dag address, ValueType sty> {
- def : Pat<(i32 (!cast<SDNode>("sextload" # sty) address)),
- (!cast<Instruction>("LDRS" # T # "w_" # Rm # "_RegOffset")
- Base, Offset, Extend)>;
-
- def : Pat<(i64 (!cast<SDNode>("sextload" # sty) address)),
- (!cast<Instruction>("LDRS" # T # "x_" # Rm # "_RegOffset")
- Base, Offset, Extend)>;
-}
-
-// and finally "natural-width" loads and stores come next.
-multiclass ro_neutral_pats<Instruction LOAD, Instruction STORE,
- dag Base, dag Offset, dag Extend, dag address,
- ValueType sty> {
- def : Pat<(sty (load address)), (LOAD Base, Offset, Extend)>;
- def : Pat<(store sty:$Rt, address),
- (STORE $Rt, Base, Offset, Extend)>;
-}
-
-multiclass ro_int_neutral_pats<Instruction LOAD, Instruction STORE,
- dag Base, dag Offset, dag Extend, dag address,
- ValueType sty>
- : ro_neutral_pats<LOAD, STORE, Base, Offset, Extend, address, sty>,
- ro_atomic_pats<LOAD, STORE, Base, Offset, Extend, address, sty, sty>;
-
-multiclass regoff_pats<string Rm, dag address, dag Base, dag Offset,
- dag Extend> {
- defm : ro_small_pats<!cast<Instruction>("LS8_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LS8_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq0, decls.pattern)),
- i8>;
- defm : ro_small_pats<!cast<Instruction>("LS16_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LS16_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq1, decls.pattern)),
- i16>;
- defm : ro_small_pats<!cast<Instruction>("LS32_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LS32_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq2, decls.pattern)),
- i32>;
-
- defm : ro_int_neutral_pats<
- !cast<Instruction>("LS32_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LS32_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq2, decls.pattern)),
- i32>;
-
- defm : ro_int_neutral_pats<
- !cast<Instruction>("LS64_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LS64_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq3, decls.pattern)),
- i64>;
-
- defm : ro_neutral_pats<!cast<Instruction>("LSFP16_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LSFP16_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq1, decls.pattern)),
- f16>;
-
- defm : ro_neutral_pats<!cast<Instruction>("LSFP32_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LSFP32_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq2, decls.pattern)),
- f32>;
-
- defm : ro_neutral_pats<!cast<Instruction>("LSFP64_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LSFP64_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq3, decls.pattern)),
- f64>;
-
- defm : ro_neutral_pats<!cast<Instruction>("LSFP128_" # Rm # "_RegOffset_LDR"),
- !cast<Instruction>("LSFP128_" # Rm # "_RegOffset_STR"),
- Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq4, decls.pattern)),
- f128>;
-
- defm : ro_signed_pats<"B", Rm, Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq0, decls.pattern)),
- i8>;
-
- defm : ro_signed_pats<"H", Rm, Base, Offset, Extend,
- !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq1, decls.pattern)),
- i16>;
-
- def : Pat<(sextloadi32 !foreach(decls.pattern, address,
- !subst(SHIFT, imm_eq2, decls.pattern))),
- (!cast<Instruction>("LDRSWx_" # Rm # "_RegOffset")
- Base, Offset, Extend)>;
-}
-
-
-// Finally we're in a position to tell LLVM exactly what addresses are reachable
-// using register-offset instructions. Essentially a base plus a possibly
-// extended, possibly shifted (by access size) offset.
-
-defm : regoff_pats<"Wm", (add i64:$Rn, (sext i32:$Rm)),
- (i64 i64:$Rn), (i32 i32:$Rm), (i64 6)>;
-
-defm : regoff_pats<"Wm", (add i64:$Rn, (shl (sext i32:$Rm), SHIFT)),
- (i64 i64:$Rn), (i32 i32:$Rm), (i64 7)>;
-
-defm : regoff_pats<"Wm", (add i64:$Rn, (zext i32:$Rm)),
- (i64 i64:$Rn), (i32 i32:$Rm), (i64 2)>;
-
-defm : regoff_pats<"Wm", (add i64:$Rn, (shl (zext i32:$Rm), SHIFT)),
- (i64 i64:$Rn), (i32 i32:$Rm), (i64 3)>;
-
-defm : regoff_pats<"Xm", (add i64:$Rn, i64:$Rm),
- (i64 i64:$Rn), (i64 i64:$Rm), (i64 2)>;
-
-defm : regoff_pats<"Xm", (add i64:$Rn, (shl i64:$Rm, SHIFT)),
- (i64 i64:$Rn), (i64 i64:$Rm), (i64 3)>;
-
-//===----------------------------------------------------------------------===//
-// Advanced SIMD (NEON) Support
+// This means an extra two instructions, but actually in most cases the two REV
+// instructions can be combined into one. For example:
+// (REV64_2s (REV64_4h X)) === (REV32_4h X)
+//
+// There is also no 128-bit REV instruction. This must be synthesized with an
+// EXT instruction.
//
+// Most bitconverts require some sort of conversion. The only exceptions are:
+// a) Identity conversions - vNfX <-> vNiX
+// b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
+//
+
+let Predicates = [IsLE] in {
+def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+
+def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
+ (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
+def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
+ (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
+def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
+ (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
+def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
+ (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
+
+def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
+ (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
+def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
+ (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
+def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
+ (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
+def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
+ (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
+}
+def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
+ (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
+ (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
+
+def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
+ (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
+def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
+ (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
+def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
+ (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
+def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
+ (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
+def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
+ (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
+def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
+def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
+def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
+ (v1i64 (REV64v2i32 FPR64:$src))>;
+def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
+ (v1i64 (REV64v4i16 FPR64:$src))>;
+def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
+ (v1i64 (REV64v8i8 FPR64:$src))>;
+def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
+ (v1i64 (REV64v2i32 FPR64:$src))>;
+}
+def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
+def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
+def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
+ (v2i32 (REV64v2i32 FPR64:$src))>;
+def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
+ (v2i32 (REV32v4i16 FPR64:$src))>;
+def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
+ (v2i32 (REV32v8i8 FPR64:$src))>;
+def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
+ (v2i32 (REV64v2i32 FPR64:$src))>;
+def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
+ (v2i32 (REV64v2i32 FPR64:$src))>;
+}
+def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
+ (v4i16 (REV64v4i16 FPR64:$src))>;
+def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
+ (v4i16 (REV32v4i16 FPR64:$src))>;
+def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
+ (v4i16 (REV16v8i8 FPR64:$src))>;
+def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
+ (v4i16 (REV64v4i16 FPR64:$src))>;
+def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
+ (v4i16 (REV32v4i16 FPR64:$src))>;
+def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
+ (v4i16 (REV64v4i16 FPR64:$src))>;
+}
+
+let Predicates = [IsLE] in {
+def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
+ (v8i8 (REV64v8i8 FPR64:$src))>;
+def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
+ (v8i8 (REV32v8i8 FPR64:$src))>;
+def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
+ (v8i8 (REV16v8i8 FPR64:$src))>;
+def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
+ (v8i8 (REV64v8i8 FPR64:$src))>;
+def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
+ (v8i8 (REV32v8i8 FPR64:$src))>;
+def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
+ (v8i8 (REV64v8i8 FPR64:$src))>;
+}
+
+let Predicates = [IsLE] in {
+def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
+def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
+def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
+def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
+ (f64 (REV64v2i32 FPR64:$src))>;
+def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
+ (f64 (REV64v4i16 FPR64:$src))>;
+def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
+ (f64 (REV64v2i32 FPR64:$src))>;
+def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
+ (f64 (REV64v8i8 FPR64:$src))>;
+}
+def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
+def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
+def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
+def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
+def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
+ (v1f64 (REV64v2i32 FPR64:$src))>;
+def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
+ (v1f64 (REV64v4i16 FPR64:$src))>;
+def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
+ (v1f64 (REV64v8i8 FPR64:$src))>;
+def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
+ (v1f64 (REV64v2i32 FPR64:$src))>;
+}
+def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
+def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
+def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
+ (v2f32 (REV64v2i32 FPR64:$src))>;
+def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
+ (v2f32 (REV32v4i16 FPR64:$src))>;
+def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
+ (v2f32 (REV32v8i8 FPR64:$src))>;
+def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
+ (v2f32 (REV64v2i32 FPR64:$src))>;
+def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
+ (v2f32 (REV64v2i32 FPR64:$src))>;
+}
+def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
+def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
+def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
+def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
+def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
+def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
+ (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
+def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
+ (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
+ (REV64v4i32 FPR128:$src), (i32 8)))>;
+def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
+ (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
+ (REV64v8i16 FPR128:$src), (i32 8)))>;
+def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
+ (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
+def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
+ (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
+ (REV64v4i32 FPR128:$src), (i32 8)))>;
+def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
+ (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
+ (REV64v16i8 FPR128:$src), (i32 8)))>;
+}
+
+let Predicates = [IsLE] in {
+def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
+ (v2f64 (EXTv16i8 FPR128:$src,
+ FPR128:$src, (i32 8)))>;
+def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
+ (v2f64 (REV64v4i32 FPR128:$src))>;
+def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
+ (v2f64 (REV64v8i16 FPR128:$src))>;
+def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
+ (v2f64 (REV64v16i8 FPR128:$src))>;
+def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
+ (v2f64 (REV64v4i32 FPR128:$src))>;
+}
+def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
+ (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
+ (REV64v4i32 FPR128:$src), (i32 8)))>;
+def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
+ (v4f32 (REV32v8i16 FPR128:$src))>;
+def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
+ (v4f32 (REV32v16i8 FPR128:$src))>;
+def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
+ (v4f32 (REV64v4i32 FPR128:$src))>;
+def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
+ (v4f32 (REV64v4i32 FPR128:$src))>;
+}
+def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
+ (v2i64 (EXTv16i8 FPR128:$src,
+ FPR128:$src, (i32 8)))>;
+def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
+ (v2i64 (REV64v4i32 FPR128:$src))>;
+def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
+ (v2i64 (REV64v8i16 FPR128:$src))>;
+def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
+ (v2i64 (REV64v16i8 FPR128:$src))>;
+def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
+ (v2i64 (REV64v4i32 FPR128:$src))>;
+}
+def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
+ (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
+ (REV64v4i32 FPR128:$src),
+ (i32 8)))>;
+def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
+ (v4i32 (REV64v4i32 FPR128:$src))>;
+def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
+ (v4i32 (REV32v8i16 FPR128:$src))>;
+def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
+ (v4i32 (REV32v16i8 FPR128:$src))>;
+def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
+ (v4i32 (REV64v4i32 FPR128:$src))>;
+}
+def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
+
+let Predicates = [IsLE] in {
+def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
+ (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
+ (REV64v8i16 FPR128:$src),
+ (i32 8)))>;
+def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
+ (v8i16 (REV64v8i16 FPR128:$src))>;
+def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
+ (v8i16 (REV32v8i16 FPR128:$src))>;
+def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
+ (v8i16 (REV16v16i8 FPR128:$src))>;
+def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
+ (v8i16 (REV64v8i16 FPR128:$src))>;
+def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
+ (v8i16 (REV32v8i16 FPR128:$src))>;
+}
+
+let Predicates = [IsLE] in {
+def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
+}
+let Predicates = [IsBE] in {
+def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
+ (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
+ (REV64v16i8 FPR128:$src),
+ (i32 8)))>;
+def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
+ (v16i8 (REV64v16i8 FPR128:$src))>;
+def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
+ (v16i8 (REV32v16i8 FPR128:$src))>;
+def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
+ (v16i8 (REV16v16i8 FPR128:$src))>;
+def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
+ (v16i8 (REV64v16i8 FPR128:$src))>;
+def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
+ (v16i8 (REV32v16i8 FPR128:$src))>;
+}
+
+def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
+ (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
+def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
+ (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
+def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
+ (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
+def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
+ (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
+
+// A 64-bit subvector insert to the first 128-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (i32 0)),
+ (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
+def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (i32 0)),
+ (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
+def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (i32 0)),
+ (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
+def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (i32 0)),
+ (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
+def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (i32 0)),
+ (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
+def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (i32 0)),
+ (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
+
+// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
+// or v2f32.
+def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
+ (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
+ (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
+def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
+ (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
+ (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
+ // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
+ // so we match on v4f32 here, not v2f32. This will also catch adding
+ // the low two lanes of a true v4f32 vector.
+def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
+ (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
+ (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
+
+// Scalar 64-bit shifts in FPR64 registers.
+def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
+ (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
+def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
+ (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
+def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
+ (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
+def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
+ (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
+
+// Tail call return handling. These are all compiler pseudo-instructions,
+// so no encoding information or anything like that.
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
+ def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff),[]>;
+ def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>;
+}
+
+def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
+ (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>;
+def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
+ (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
+def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
+ (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
-include "AArch64InstrNEON.td"
+include "AArch64InstrAtomics.td"
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrNEON.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrNEON.td
deleted file mode 100644
index d71749d..0000000
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrNEON.td
+++ /dev/null
@@ -1,8671 +0,0 @@
-//===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the AArch64 NEON instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// NEON-specific DAG Nodes.
-//===----------------------------------------------------------------------===//
-def Neon_bsl : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
- [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisSameAs<0, 3>]>>;
-
-// (outs Result), (ins Imm, OpCmode)
-def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
-
-def Neon_movi : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
-
-def Neon_mvni : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
-
-// (outs Result), (ins Imm)
-def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
- [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
-
-// (outs Result), (ins LHS, RHS, CondCode)
-def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
- [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
-
-// (outs Result), (ins LHS, 0/0.0 constant, CondCode)
-def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
- [SDTCisVec<0>, SDTCisVec<1>]>>;
-
-// (outs Result), (ins LHS, RHS)
-def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
- [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
-
-def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
- SDTCisVT<2, i32>]>;
-def Neon_sqrshlImm : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
-def Neon_uqrshlImm : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
-
-def SDTPERMUTE : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>]>;
-def Neon_uzp1 : SDNode<"AArch64ISD::NEON_UZP1", SDTPERMUTE>;
-def Neon_uzp2 : SDNode<"AArch64ISD::NEON_UZP2", SDTPERMUTE>;
-def Neon_zip1 : SDNode<"AArch64ISD::NEON_ZIP1", SDTPERMUTE>;
-def Neon_zip2 : SDNode<"AArch64ISD::NEON_ZIP2", SDTPERMUTE>;
-def Neon_trn1 : SDNode<"AArch64ISD::NEON_TRN1", SDTPERMUTE>;
-def Neon_trn2 : SDNode<"AArch64ISD::NEON_TRN2", SDTPERMUTE>;
-
-def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
-def Neon_rev64 : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
-def Neon_rev32 : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
-def Neon_rev16 : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
-def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
- [SDTCisVec<0>]>>;
-def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
- [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
-def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
- [SDTCisVec<0>, SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
-
-def SDT_assertext : SDTypeProfile<1, 1,
- [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 0>]>;
-def assertsext : SDNode<"ISD::AssertSext", SDT_assertext>;
-def assertzext : SDNode<"ISD::AssertZext", SDT_assertext>;
-
-//===----------------------------------------------------------------------===//
-// Multiclasses
-//===----------------------------------------------------------------------===//
-
-multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size, bits<5> opcode,
- string asmop, SDPatternOperator opnode8B,
- SDPatternOperator opnode16B,
- bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8B : NeonI_3VSame<0b0, u, size, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
- [(set (v8i8 VPR64:$Rd),
- (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
- NoItinerary>;
-
- def _16B : NeonI_3VSame<0b1, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
- NoItinerary>;
- }
-
-}
-
-multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
- string asmop, SDPatternOperator opnode,
- bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
- NoItinerary>;
-
- def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
- NoItinerary>;
-
- def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
- NoItinerary>;
-
- def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
- NoItinerary>;
- }
-}
-multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
- string asmop, SDPatternOperator opnode,
- bit Commutable = 0>
- : NeonI_3VSame_HS_sizes<u, opcode, asmop, opnode, Commutable> {
- let isCommutable = Commutable in {
- def _8B : NeonI_3VSame<0b0, u, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
- [(set (v8i8 VPR64:$Rd),
- (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
- NoItinerary>;
-
- def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
- NoItinerary>;
- }
-}
-
-multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
- string asmop, SDPatternOperator opnode,
- bit Commutable = 0>
- : NeonI_3VSame_BHS_sizes<u, opcode, asmop, opnode, Commutable> {
- let isCommutable = Commutable in {
- def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
- [(set (v2i64 VPR128:$Rd),
- (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
- NoItinerary>;
- }
-}
-
-// Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
-// but Result types can be integer or floating point types.
-multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
- string asmop, SDPatternOperator opnode2S,
- SDPatternOperator opnode4S,
- SDPatternOperator opnode2D,
- ValueType ResTy2S, ValueType ResTy4S,
- ValueType ResTy2D, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
- [(set (ResTy2S VPR64:$Rd),
- (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
- NoItinerary>;
-
- def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
- [(set (ResTy4S VPR128:$Rd),
- (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
- NoItinerary>;
-
- def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
- [(set (ResTy2D VPR128:$Rd),
- (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
- NoItinerary>;
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Instruction Definitions
-//===----------------------------------------------------------------------===//
-
-// Vector Arithmetic Instructions
-
-// Vector Add (Integer and Floating-Point)
-
-defm ADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
-defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector Sub (Integer and Floating-Point)
-
-defm SUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
-defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
- v2f32, v4f32, v2f64, 0>;
-
-// Vector Multiply (Integer and Floating-Point)
-
-defm MULvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
-defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector Multiply (Polynomial)
-
-defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
- int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
-
-// Vector Multiply-accumulate and Multiply-subtract (Integer)
-
-// class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
-// two operands constraints.
-class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
- RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
- bits<5> opcode, SDPatternOperator opnode>
- : NeonI_3VSame<q, u, size, opcode,
- (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
- asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
- [(set (OpTy VPRC:$Rd),
- (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (add node:$Ra, (mul node:$Rn, node:$Rm))>;
-
-def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
-
-
-def MLAvvv_8B: NeonI_3VSame_Constraint_impl<"mla", ".8b", VPR64, v8i8,
- 0b0, 0b0, 0b00, 0b10010, Neon_mla>;
-def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
- 0b1, 0b0, 0b00, 0b10010, Neon_mla>;
-def MLAvvv_4H: NeonI_3VSame_Constraint_impl<"mla", ".4h", VPR64, v4i16,
- 0b0, 0b0, 0b01, 0b10010, Neon_mla>;
-def MLAvvv_8H: NeonI_3VSame_Constraint_impl<"mla", ".8h", VPR128, v8i16,
- 0b1, 0b0, 0b01, 0b10010, Neon_mla>;
-def MLAvvv_2S: NeonI_3VSame_Constraint_impl<"mla", ".2s", VPR64, v2i32,
- 0b0, 0b0, 0b10, 0b10010, Neon_mla>;
-def MLAvvv_4S: NeonI_3VSame_Constraint_impl<"mla", ".4s", VPR128, v4i32,
- 0b1, 0b0, 0b10, 0b10010, Neon_mla>;
-
-def MLSvvv_8B: NeonI_3VSame_Constraint_impl<"mls", ".8b", VPR64, v8i8,
- 0b0, 0b1, 0b00, 0b10010, Neon_mls>;
-def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
- 0b1, 0b1, 0b00, 0b10010, Neon_mls>;
-def MLSvvv_4H: NeonI_3VSame_Constraint_impl<"mls", ".4h", VPR64, v4i16,
- 0b0, 0b1, 0b01, 0b10010, Neon_mls>;
-def MLSvvv_8H: NeonI_3VSame_Constraint_impl<"mls", ".8h", VPR128, v8i16,
- 0b1, 0b1, 0b01, 0b10010, Neon_mls>;
-def MLSvvv_2S: NeonI_3VSame_Constraint_impl<"mls", ".2s", VPR64, v2i32,
- 0b0, 0b1, 0b10, 0b10010, Neon_mls>;
-def MLSvvv_4S: NeonI_3VSame_Constraint_impl<"mls", ".4s", VPR128, v4i32,
- 0b1, 0b1, 0b10, 0b10010, Neon_mls>;
-
-// Vector Multiply-accumulate and Multiply-subtract (Floating Point)
-
-def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
-
-def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
-
-let Predicates = [HasNEON, UseFusedMAC] in {
-def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s", VPR64, v2f32,
- 0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
-def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s", VPR128, v4f32,
- 0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
-def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64,
- 0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
-
-def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s", VPR64, v2f32,
- 0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
-def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s", VPR128, v4f32,
- 0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
-def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64,
- 0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
-}
-
-// We're also allowed to match the fma instruction regardless of compile
-// options.
-def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
- (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
-def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
- (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
- (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-
-def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
- (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
-def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
- (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
- (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-
-// Vector Divide (Floating-Point)
-
-defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
- v2f32, v4f32, v2f64, 0>;
-
-// Vector Bitwise Operations
-
-// Vector Bitwise AND
-
-defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
-
-// Vector Bitwise Exclusive OR
-
-defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
-
-// Vector Bitwise OR
-
-defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
-
-// ORR disassembled as MOV if Vn==Vm
-
-// Vector Move - register
-// Alias for ORR if Vn=Vm.
-// FIXME: This is actually the preferred syntax but TableGen can't deal with
-// custom printing of aliases.
-def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
- (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
-def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
- (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
-
-// The MOVI instruction takes two immediate operands. The first is the
-// immediate encoding, while the second is the cmode. A cmode of 14, or
-// 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
-def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
-def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
-
-def Neon_not8B : PatFrag<(ops node:$in),
- (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
-def Neon_not16B : PatFrag<(ops node:$in),
- (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
-
-def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
- (or node:$Rn, (Neon_not8B node:$Rm))>;
-
-def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
- (or node:$Rn, (Neon_not16B node:$Rm))>;
-
-def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
- (and node:$Rn, (Neon_not8B node:$Rm))>;
-
-def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
- (and node:$Rn, (Neon_not16B node:$Rm))>;
-
-
-// Vector Bitwise OR NOT - register
-
-defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
- Neon_orn8B, Neon_orn16B, 0>;
-
-// Vector Bitwise Bit Clear (AND NOT) - register
-
-defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
- Neon_bic8B, Neon_bic16B, 0>;
-
-multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
- SDPatternOperator opnode16B,
- Instruction INST8B,
- Instruction INST16B> {
- def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
- (INST8B VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
- (INST8B VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
- (INST8B VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
- (INST16B VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
- (INST16B VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
- (INST16B VPR128:$Rn, VPR128:$Rm)>;
-}
-
-// Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
-defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
-defm : Neon_bitwise2V_patterns<or, or, ORRvvv_8B, ORRvvv_16B>;
-defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
-defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
-defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
-
-// Vector Bitwise Select
-def BSLvvv_8B : NeonI_3VSame_Constraint_impl<"bsl", ".8b", VPR64, v8i8,
- 0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
-
-def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
- 0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
-
-multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
- Instruction INST8B,
- Instruction INST16B> {
- // Disassociate type from instruction definition
- def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-
- // Allow to match BSL instruction pattern with non-constant operand
- def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
- (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
- (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
- (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
- (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
- (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
- (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
- (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
- (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
- (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
- (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
- (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
- (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
- (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
- (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
- (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
- (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
-
- // Allow to match llvm.arm.* intrinsics.
- def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
- (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
- (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
- (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
- (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
- (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v1f64 (int_arm_neon_vbsl (v1f64 VPR64:$src),
- (v1f64 VPR64:$Rn), (v1f64 VPR64:$Rm))),
- (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
- def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
- (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
- (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
- (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
- (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
- (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
- def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
- (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
- (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-}
-
-// Additional patterns for bitwise instruction BSL
-defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
-
-def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
- (Neon_bsl node:$src, node:$Rn, node:$Rm),
- [{ (void)N; return false; }]>;
-
-// Vector Bitwise Insert if True
-
-def BITvvv_8B : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64, v8i8,
- 0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
-def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
- 0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
-
-// Vector Bitwise Insert if False
-
-def BIFvvv_8B : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64, v8i8,
- 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
-def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
- 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
-
-// Vector Absolute Difference and Accumulate (Signed, Unsigned)
-
-def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
-def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
-
-// Vector Absolute Difference and Accumulate (Unsigned)
-def UABAvvv_8B : NeonI_3VSame_Constraint_impl<"uaba", ".8b", VPR64, v8i8,
- 0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
-def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
- 0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
-def UABAvvv_4H : NeonI_3VSame_Constraint_impl<"uaba", ".4h", VPR64, v4i16,
- 0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
-def UABAvvv_8H : NeonI_3VSame_Constraint_impl<"uaba", ".8h", VPR128, v8i16,
- 0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
-def UABAvvv_2S : NeonI_3VSame_Constraint_impl<"uaba", ".2s", VPR64, v2i32,
- 0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
-def UABAvvv_4S : NeonI_3VSame_Constraint_impl<"uaba", ".4s", VPR128, v4i32,
- 0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
-
-// Vector Absolute Difference and Accumulate (Signed)
-def SABAvvv_8B : NeonI_3VSame_Constraint_impl<"saba", ".8b", VPR64, v8i8,
- 0b0, 0b0, 0b00, 0b01111, Neon_saba>;
-def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
- 0b1, 0b0, 0b00, 0b01111, Neon_saba>;
-def SABAvvv_4H : NeonI_3VSame_Constraint_impl<"saba", ".4h", VPR64, v4i16,
- 0b0, 0b0, 0b01, 0b01111, Neon_saba>;
-def SABAvvv_8H : NeonI_3VSame_Constraint_impl<"saba", ".8h", VPR128, v8i16,
- 0b1, 0b0, 0b01, 0b01111, Neon_saba>;
-def SABAvvv_2S : NeonI_3VSame_Constraint_impl<"saba", ".2s", VPR64, v2i32,
- 0b0, 0b0, 0b10, 0b01111, Neon_saba>;
-def SABAvvv_4S : NeonI_3VSame_Constraint_impl<"saba", ".4s", VPR128, v4i32,
- 0b1, 0b0, 0b10, 0b01111, Neon_saba>;
-
-
-// Vector Absolute Difference (Signed, Unsigned)
-defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
-defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
-
-// Vector Absolute Difference (Floating Point)
-defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
- int_arm_neon_vabds, int_arm_neon_vabds,
- int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
-
-// Vector Reciprocal Step (Floating Point)
-defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
- int_arm_neon_vrecps, int_arm_neon_vrecps,
- int_arm_neon_vrecps,
- v2f32, v4f32, v2f64, 0>;
-
-// Vector Reciprocal Square Root Step (Floating Point)
-defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
- int_arm_neon_vrsqrts,
- int_arm_neon_vrsqrts,
- int_arm_neon_vrsqrts,
- v2f32, v4f32, v2f64, 0>;
-
-// Vector Comparisons
-
-def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
- (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
-def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
- (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
-def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
- (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
-def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
- (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
-def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
- (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
-
-// NeonI_compare_aliases class: swaps register operands to implement
-// comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
-class NeonI_compare_aliases<string asmop, string asmlane,
- Instruction inst, RegisterOperand VPRC>
- : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
- ", $Rm" # asmlane,
- (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
-
-// Vector Comparisons (Integer)
-
-// Vector Compare Mask Equal (Integer)
-let isCommutable =1 in {
-defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
-}
-
-// Vector Compare Mask Higher or Same (Unsigned Integer)
-defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
-
-// Vector Compare Mask Greater Than or Equal (Integer)
-defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
-
-// Vector Compare Mask Higher (Unsigned Integer)
-defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
-
-// Vector Compare Mask Greater Than (Integer)
-defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
-
-// Vector Compare Mask Bitwise Test (Integer)
-defm CMTSTvvv: NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
-
-// Vector Compare Mask Less or Same (Unsigned Integer)
-// CMLS is alias for CMHS with operands reversed.
-def CMLSvvv_8B : NeonI_compare_aliases<"cmls", ".8b", CMHSvvv_8B, VPR64>;
-def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
-def CMLSvvv_4H : NeonI_compare_aliases<"cmls", ".4h", CMHSvvv_4H, VPR64>;
-def CMLSvvv_8H : NeonI_compare_aliases<"cmls", ".8h", CMHSvvv_8H, VPR128>;
-def CMLSvvv_2S : NeonI_compare_aliases<"cmls", ".2s", CMHSvvv_2S, VPR64>;
-def CMLSvvv_4S : NeonI_compare_aliases<"cmls", ".4s", CMHSvvv_4S, VPR128>;
-def CMLSvvv_2D : NeonI_compare_aliases<"cmls", ".2d", CMHSvvv_2D, VPR128>;
-
-// Vector Compare Mask Less Than or Equal (Integer)
-// CMLE is alias for CMGE with operands reversed.
-def CMLEvvv_8B : NeonI_compare_aliases<"cmle", ".8b", CMGEvvv_8B, VPR64>;
-def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
-def CMLEvvv_4H : NeonI_compare_aliases<"cmle", ".4h", CMGEvvv_4H, VPR64>;
-def CMLEvvv_8H : NeonI_compare_aliases<"cmle", ".8h", CMGEvvv_8H, VPR128>;
-def CMLEvvv_2S : NeonI_compare_aliases<"cmle", ".2s", CMGEvvv_2S, VPR64>;
-def CMLEvvv_4S : NeonI_compare_aliases<"cmle", ".4s", CMGEvvv_4S, VPR128>;
-def CMLEvvv_2D : NeonI_compare_aliases<"cmle", ".2d", CMGEvvv_2D, VPR128>;
-
-// Vector Compare Mask Lower (Unsigned Integer)
-// CMLO is alias for CMHI with operands reversed.
-def CMLOvvv_8B : NeonI_compare_aliases<"cmlo", ".8b", CMHIvvv_8B, VPR64>;
-def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
-def CMLOvvv_4H : NeonI_compare_aliases<"cmlo", ".4h", CMHIvvv_4H, VPR64>;
-def CMLOvvv_8H : NeonI_compare_aliases<"cmlo", ".8h", CMHIvvv_8H, VPR128>;
-def CMLOvvv_2S : NeonI_compare_aliases<"cmlo", ".2s", CMHIvvv_2S, VPR64>;
-def CMLOvvv_4S : NeonI_compare_aliases<"cmlo", ".4s", CMHIvvv_4S, VPR128>;
-def CMLOvvv_2D : NeonI_compare_aliases<"cmlo", ".2d", CMHIvvv_2D, VPR128>;
-
-// Vector Compare Mask Less Than (Integer)
-// CMLT is alias for CMGT with operands reversed.
-def CMLTvvv_8B : NeonI_compare_aliases<"cmlt", ".8b", CMGTvvv_8B, VPR64>;
-def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
-def CMLTvvv_4H : NeonI_compare_aliases<"cmlt", ".4h", CMGTvvv_4H, VPR64>;
-def CMLTvvv_8H : NeonI_compare_aliases<"cmlt", ".8h", CMGTvvv_8H, VPR128>;
-def CMLTvvv_2S : NeonI_compare_aliases<"cmlt", ".2s", CMGTvvv_2S, VPR64>;
-def CMLTvvv_4S : NeonI_compare_aliases<"cmlt", ".4s", CMGTvvv_4S, VPR128>;
-def CMLTvvv_2D : NeonI_compare_aliases<"cmlt", ".2d", CMGTvvv_2D, VPR128>;
-
-
-def neon_uimm0_asmoperand : AsmOperandClass
-{
- let Name = "UImm0";
- let PredicateMethod = "isUImm<0>";
- let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
- let ParserMatchClass = neon_uimm0_asmoperand;
- let PrintMethod = "printNeonUImm0Operand";
-
-}
-
-multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
-{
- def _8B : NeonI_2VMisc<0b0, u, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.8b, $Rn.8b, $Imm",
- [(set (v8i8 VPR64:$Rd),
- (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-
- def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.16b, $Rn.16b, $Imm",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-
- def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.4h, $Rn.4h, $Imm",
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-
- def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.8h, $Rn.8h, $Imm",
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-
- def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.2s, $Rn.2s, $Imm",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-
- def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.4s, $Rn.4s, $Imm",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-
- def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
- asmop # "\t$Rd.2d, $Rn.2d, $Imm",
- [(set (v2i64 VPR128:$Rd),
- (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
- NoItinerary>;
-}
-
-// Vector Compare Mask Equal to Zero (Integer)
-defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
-
-// Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
-defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
-
-// Vector Compare Mask Greater Than Zero (Signed Integer)
-defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
-
-// Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
-defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
-
-// Vector Compare Mask Less Than Zero (Signed Integer)
-defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
-
-// Vector Comparisons (Floating Point)
-
-// Vector Compare Mask Equal (Floating Point)
-let isCommutable =1 in {
-defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
- Neon_cmeq, Neon_cmeq,
- v2i32, v4i32, v2i64, 0>;
-}
-
-// Vector Compare Mask Greater Than Or Equal (Floating Point)
-defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
- Neon_cmge, Neon_cmge,
- v2i32, v4i32, v2i64, 0>;
-
-// Vector Compare Mask Greater Than (Floating Point)
-defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
- Neon_cmgt, Neon_cmgt,
- v2i32, v4i32, v2i64, 0>;
-
-// Vector Compare Mask Less Than Or Equal (Floating Point)
-// FCMLE is alias for FCMGE with operands reversed.
-def FCMLEvvv_2S : NeonI_compare_aliases<"fcmle", ".2s", FCMGEvvv_2S, VPR64>;
-def FCMLEvvv_4S : NeonI_compare_aliases<"fcmle", ".4s", FCMGEvvv_4S, VPR128>;
-def FCMLEvvv_2D : NeonI_compare_aliases<"fcmle", ".2d", FCMGEvvv_2D, VPR128>;
-
-// Vector Compare Mask Less Than (Floating Point)
-// FCMLT is alias for FCMGT with operands reversed.
-def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>;
-def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>;
-def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>;
-
-
-multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
- string asmop, CondCode CC>
-{
- def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
- asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
- NoItinerary>;
-
- def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
- asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
- NoItinerary>;
-
- def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
- asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
- [(set (v2i64 VPR128:$Rd),
- (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
- NoItinerary>;
-}
-
-// Vector Compare Mask Equal to Zero (Floating Point)
-defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
-
-// Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
-defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
-
-// Vector Compare Mask Greater Than Zero (Floating Point)
-defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
-
-// Vector Compare Mask Less Than or Equal To Zero (Floating Point)
-defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
-
-// Vector Compare Mask Less Than Zero (Floating Point)
-defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
-
-// Vector Absolute Comparisons (Floating Point)
-
-// Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
-defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
- int_arm_neon_vacged, int_arm_neon_vacgeq,
- int_aarch64_neon_vacgeq,
- v2i32, v4i32, v2i64, 0>;
-
-// Vector Absolute Compare Mask Greater Than (Floating Point)
-defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
- int_arm_neon_vacgtd, int_arm_neon_vacgtq,
- int_aarch64_neon_vacgtq,
- v2i32, v4i32, v2i64, 0>;
-
-// Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
-// FACLE is alias for FACGE with operands reversed.
-def FACLEvvv_2S : NeonI_compare_aliases<"facle", ".2s", FACGEvvv_2S, VPR64>;
-def FACLEvvv_4S : NeonI_compare_aliases<"facle", ".4s", FACGEvvv_4S, VPR128>;
-def FACLEvvv_2D : NeonI_compare_aliases<"facle", ".2d", FACGEvvv_2D, VPR128>;
-
-// Vector Absolute Compare Mask Less Than (Floating Point)
-// FACLT is alias for FACGT with operands reversed.
-def FACLTvvv_2S : NeonI_compare_aliases<"faclt", ".2s", FACGTvvv_2S, VPR64>;
-def FACLTvvv_4S : NeonI_compare_aliases<"faclt", ".4s", FACGTvvv_4S, VPR128>;
-def FACLTvvv_2D : NeonI_compare_aliases<"faclt", ".2d", FACGTvvv_2D, VPR128>;
-
-// Vector halving add (Integer Signed, Unsigned)
-defm SHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
- int_arm_neon_vhadds, 1>;
-defm UHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
- int_arm_neon_vhaddu, 1>;
-
-// Vector halving sub (Integer Signed, Unsigned)
-defm SHSUBvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
- int_arm_neon_vhsubs, 0>;
-defm UHSUBvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
- int_arm_neon_vhsubu, 0>;
-
-// Vector rouding halving add (Integer Signed, Unsigned)
-defm SRHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
- int_arm_neon_vrhadds, 1>;
-defm URHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
- int_arm_neon_vrhaddu, 1>;
-
-// Vector Saturating add (Integer Signed, Unsigned)
-defm SQADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
- int_arm_neon_vqadds, 1>;
-defm UQADDvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
- int_arm_neon_vqaddu, 1>;
-
-// Vector Saturating sub (Integer Signed, Unsigned)
-defm SQSUBvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
- int_arm_neon_vqsubs, 1>;
-defm UQSUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
- int_arm_neon_vqsubu, 1>;
-
-// Vector Shift Left (Signed and Unsigned Integer)
-defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
- int_arm_neon_vshifts, 1>;
-defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
- int_arm_neon_vshiftu, 1>;
-
-// Vector Saturating Shift Left (Signed and Unsigned Integer)
-defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
- int_arm_neon_vqshifts, 1>;
-defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
- int_arm_neon_vqshiftu, 1>;
-
-// Vector Rouding Shift Left (Signed and Unsigned Integer)
-defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
- int_arm_neon_vrshifts, 1>;
-defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
- int_arm_neon_vrshiftu, 1>;
-
-// Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
-defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
- int_arm_neon_vqrshifts, 1>;
-defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
- int_arm_neon_vqrshiftu, 1>;
-
-// Vector Maximum (Signed and Unsigned Integer)
-defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
-defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
-
-// Vector Minimum (Signed and Unsigned Integer)
-defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
-defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
-
-// Vector Maximum (Floating Point)
-defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
- int_arm_neon_vmaxs, int_arm_neon_vmaxs,
- int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
-
-// Vector Minimum (Floating Point)
-defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
- int_arm_neon_vmins, int_arm_neon_vmins,
- int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
-
-// Vector maxNum (Floating Point) - prefer a number over a quiet NaN)
-defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
- int_aarch64_neon_vmaxnm,
- int_aarch64_neon_vmaxnm,
- int_aarch64_neon_vmaxnm,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector minNum (Floating Point) - prefer a number over a quiet NaN)
-defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
- int_aarch64_neon_vminnm,
- int_aarch64_neon_vminnm,
- int_aarch64_neon_vminnm,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector Maximum Pairwise (Signed and Unsigned Integer)
-defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
-defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
-
-// Vector Minimum Pairwise (Signed and Unsigned Integer)
-defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
-defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
-
-// Vector Maximum Pairwise (Floating Point)
-defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
- int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
- int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
-
-// Vector Minimum Pairwise (Floating Point)
-defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
- int_arm_neon_vpmins, int_arm_neon_vpmins,
- int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
-
-// Vector maxNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
-defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
- int_aarch64_neon_vpmaxnm,
- int_aarch64_neon_vpmaxnm,
- int_aarch64_neon_vpmaxnm,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector minNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
-defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
- int_aarch64_neon_vpminnm,
- int_aarch64_neon_vpminnm,
- int_aarch64_neon_vpminnm,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector Addition Pairwise (Integer)
-defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
-
-// Vector Addition Pairwise (Floating Point)
-defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
- int_arm_neon_vpadd,
- int_arm_neon_vpadd,
- int_arm_neon_vpadd,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector Saturating Doubling Multiply High
-defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
- int_arm_neon_vqdmulh, 1>;
-
-// Vector Saturating Rouding Doubling Multiply High
-defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
- int_arm_neon_vqrdmulh, 1>;
-
-// Vector Multiply Extended (Floating Point)
-defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
- int_aarch64_neon_vmulx,
- int_aarch64_neon_vmulx,
- int_aarch64_neon_vmulx,
- v2f32, v4f32, v2f64, 1>;
-
-// Vector Immediate Instructions
-
-multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
-{
- def _asmoperand : AsmOperandClass
- {
- let Name = "NeonMovImmShift" # PREFIX;
- let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
- let PredicateMethod = "isNeonMovImmShift" # PREFIX;
- }
-}
-
-// Definition of vector immediates shift operands
-
-// The selectable use-cases extract the shift operation
-// information from the OpCmode fields encoded in the immediate.
-def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
- uint64_t OpCmode = N->getZExtValue();
- unsigned ShiftImm;
- unsigned ShiftOnesIn;
- unsigned HasShift =
- A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
- if (!HasShift) return SDValue();
- return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
-}]>;
-
-// Vector immediates shift operands which accept LSL and MSL
-// shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
-// or 0, 8 (LSLH) or 8, 16 (MSL).
-defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
-defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
-// LSLH restricts shift amount to 0, 8 out of 0, 8, 16, 24
-defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
-
-multiclass neon_mov_imm_shift_operands<string PREFIX,
- string HALF, string ISHALF, code pred>
-{
- def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
- {
- let PrintMethod =
- "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
- let DecoderMethod =
- "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
- let ParserMatchClass =
- !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
- }
-}
-
-defm neon_mov_imm_LSL : neon_mov_imm_shift_operands<"LSL", "", "false", [{
- unsigned ShiftImm;
- unsigned ShiftOnesIn;
- unsigned HasShift =
- A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
- return (HasShift && !ShiftOnesIn);
-}]>;
-
-defm neon_mov_imm_MSL : neon_mov_imm_shift_operands<"MSL", "", "false", [{
- unsigned ShiftImm;
- unsigned ShiftOnesIn;
- unsigned HasShift =
- A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
- return (HasShift && ShiftOnesIn);
-}]>;
-
-defm neon_mov_imm_LSLH : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
- unsigned ShiftImm;
- unsigned ShiftOnesIn;
- unsigned HasShift =
- A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
- return (HasShift && !ShiftOnesIn);
-}]>;
-
-def neon_uimm1_asmoperand : AsmOperandClass
-{
- let Name = "UImm1";
- let PredicateMethod = "isUImm<1>";
- let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm2_asmoperand : AsmOperandClass
-{
- let Name = "UImm2";
- let PredicateMethod = "isUImm<2>";
- let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm8_asmoperand : AsmOperandClass
-{
- let Name = "UImm8";
- let PredicateMethod = "isUImm<8>";
- let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
- let ParserMatchClass = neon_uimm8_asmoperand;
- let PrintMethod = "printUImmHexOperand";
-}
-
-def neon_uimm64_mask_asmoperand : AsmOperandClass
-{
- let Name = "NeonUImm64Mask";
- let PredicateMethod = "isNeonUImm64Mask";
- let RenderMethod = "addNeonUImm64MaskOperands";
-}
-
-// MCOperand for 64-bit bytemask with each byte having only the
-// value 0x00 and 0xff is encoded as an unsigned 8-bit value
-def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
- let ParserMatchClass = neon_uimm64_mask_asmoperand;
- let PrintMethod = "printNeonUImm64MaskOperand";
-}
-
-multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
- SDPatternOperator opnode>
-{
- // shift zeros, per word
- def _2S : NeonI_1VModImm<0b0, op,
- (outs VPR64:$Rd),
- (ins neon_uimm8:$Imm,
- neon_mov_imm_LSL_operand:$Simm),
- !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (opnode (timm:$Imm),
- (neon_mov_imm_LSL_operand:$Simm))))],
- NoItinerary> {
- bits<2> Simm;
- let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
- }
-
- def _4S : NeonI_1VModImm<0b1, op,
- (outs VPR128:$Rd),
- (ins neon_uimm8:$Imm,
- neon_mov_imm_LSL_operand:$Simm),
- !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (opnode (timm:$Imm),
- (neon_mov_imm_LSL_operand:$Simm))))],
- NoItinerary> {
- bits<2> Simm;
- let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
- }
-
- // shift zeros, per halfword
- def _4H : NeonI_1VModImm<0b0, op,
- (outs VPR64:$Rd),
- (ins neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm),
- !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (opnode (timm:$Imm),
- (neon_mov_imm_LSLH_operand:$Simm))))],
- NoItinerary> {
- bit Simm;
- let cmode = {0b1, 0b0, Simm, 0b0};
- }
-
- def _8H : NeonI_1VModImm<0b1, op,
- (outs VPR128:$Rd),
- (ins neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm),
- !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (opnode (timm:$Imm),
- (neon_mov_imm_LSLH_operand:$Simm))))],
- NoItinerary> {
- bit Simm;
- let cmode = {0b1, 0b0, Simm, 0b0};
- }
-}
-
-multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
- SDPatternOperator opnode,
- SDPatternOperator neonopnode>
-{
- let Constraints = "$src = $Rd" in {
- // shift zeros, per word
- def _2S : NeonI_1VModImm<0b0, op,
- (outs VPR64:$Rd),
- (ins VPR64:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSL_operand:$Simm),
- !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (opnode (v2i32 VPR64:$src),
- (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
- neon_mov_imm_LSL_operand:$Simm)))))))],
- NoItinerary> {
- bits<2> Simm;
- let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
- }
-
- def _4S : NeonI_1VModImm<0b1, op,
- (outs VPR128:$Rd),
- (ins VPR128:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSL_operand:$Simm),
- !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (opnode (v4i32 VPR128:$src),
- (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
- neon_mov_imm_LSL_operand:$Simm)))))))],
- NoItinerary> {
- bits<2> Simm;
- let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
- }
-
- // shift zeros, per halfword
- def _4H : NeonI_1VModImm<0b0, op,
- (outs VPR64:$Rd),
- (ins VPR64:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm),
- !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (opnode (v4i16 VPR64:$src),
- (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSL_operand:$Simm)))))))],
- NoItinerary> {
- bit Simm;
- let cmode = {0b1, 0b0, Simm, 0b1};
- }
-
- def _8H : NeonI_1VModImm<0b1, op,
- (outs VPR128:$Rd),
- (ins VPR128:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm),
- !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (opnode (v8i16 VPR128:$src),
- (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSL_operand:$Simm)))))))],
- NoItinerary> {
- bit Simm;
- let cmode = {0b1, 0b0, Simm, 0b1};
- }
- }
-}
-
-multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
- SDPatternOperator opnode>
-{
- // shift ones, per word
- def _2S : NeonI_1VModImm<0b0, op,
- (outs VPR64:$Rd),
- (ins neon_uimm8:$Imm,
- neon_mov_imm_MSL_operand:$Simm),
- !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (opnode (timm:$Imm),
- (neon_mov_imm_MSL_operand:$Simm))))],
- NoItinerary> {
- bit Simm;
- let cmode = {0b1, 0b1, 0b0, Simm};
- }
-
- def _4S : NeonI_1VModImm<0b1, op,
- (outs VPR128:$Rd),
- (ins neon_uimm8:$Imm,
- neon_mov_imm_MSL_operand:$Simm),
- !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (opnode (timm:$Imm),
- (neon_mov_imm_MSL_operand:$Simm))))],
- NoItinerary> {
- bit Simm;
- let cmode = {0b1, 0b1, 0b0, Simm};
- }
-}
-
-// Vector Move Immediate Shifted
-let isReMaterializable = 1 in {
-defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
-}
-
-// Vector Move Inverted Immediate Shifted
-let isReMaterializable = 1 in {
-defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
-}
-
-// Vector Bitwise Bit Clear (AND NOT) - immediate
-let isReMaterializable = 1 in {
-defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
- and, Neon_mvni>;
-}
-
-// Vector Bitwise OR - immedidate
-
-let isReMaterializable = 1 in {
-defm ORRvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
- or, Neon_movi>;
-}
-
-// Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
-// LowerBUILD_VECTOR favors lowering MOVI over MVNI.
-// BIC immediate instructions selection requires additional patterns to
-// transform Neon_movi operands into BIC immediate operands
-
-def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
- uint64_t OpCmode = N->getZExtValue();
- unsigned ShiftImm;
- unsigned ShiftOnesIn;
- (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
- // LSLH restricts shift amount to 0, 8 which are encoded as 0 and 1
- // Transform encoded shift amount 0 to 1 and 1 to 0.
- return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
-}]>;
-
-def neon_mov_imm_LSLH_transform_operand
- : ImmLeaf<i32, [{
- unsigned ShiftImm;
- unsigned ShiftOnesIn;
- unsigned HasShift =
- A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
- return (HasShift && !ShiftOnesIn); }],
- neon_mov_imm_LSLH_transform_XFORM>;
-
-// Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
-// Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
-def : Pat<(v4i16 (and VPR64:$src,
- (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
- (BICvi_lsl_4H VPR64:$src, 0,
- neon_mov_imm_LSLH_transform_operand:$Simm)>;
-
-// Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
-// Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
-def : Pat<(v8i16 (and VPR128:$src,
- (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
- (BICvi_lsl_8H VPR128:$src, 0,
- neon_mov_imm_LSLH_transform_operand:$Simm)>;
-
-
-multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
- SDPatternOperator neonopnode,
- Instruction INST4H,
- Instruction INST8H> {
- def : Pat<(v8i8 (opnode VPR64:$src,
- (bitconvert(v4i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSLH_operand:$Simm))))),
- (INST4H VPR64:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm)>;
- def : Pat<(v1i64 (opnode VPR64:$src,
- (bitconvert(v4i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSLH_operand:$Simm))))),
- (INST4H VPR64:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm)>;
-
- def : Pat<(v16i8 (opnode VPR128:$src,
- (bitconvert(v8i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSLH_operand:$Simm))))),
- (INST8H VPR128:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm)>;
- def : Pat<(v4i32 (opnode VPR128:$src,
- (bitconvert(v8i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSLH_operand:$Simm))))),
- (INST8H VPR128:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm)>;
- def : Pat<(v2i64 (opnode VPR128:$src,
- (bitconvert(v8i16 (neonopnode timm:$Imm,
- neon_mov_imm_LSLH_operand:$Simm))))),
- (INST8H VPR128:$src, neon_uimm8:$Imm,
- neon_mov_imm_LSLH_operand:$Simm)>;
-}
-
-// Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
-defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
-
-// Additional patterns for Vector Bitwise OR - immedidate
-defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
-
-
-// Vector Move Immediate Masked
-let isReMaterializable = 1 in {
-defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
-}
-
-// Vector Move Inverted Immediate Masked
-let isReMaterializable = 1 in {
-defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
-}
-
-class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
- Instruction inst, RegisterOperand VPRC>
- : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
- (inst VPRC:$Rd, neon_uimm8:$Imm, 0), 0b0>;
-
-// Aliases for Vector Move Immediate Shifted
-def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
-
-// Aliases for Vector Move Inverted Immediate Shifted
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
-
-// Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
-def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
-
-// Aliases for Vector Bitwise OR - immedidate
-def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
-
-// Vector Move Immediate - per byte
-let isReMaterializable = 1 in {
-def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
- (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
- "movi\t$Rd.8b, $Imm",
- [(set (v8i8 VPR64:$Rd),
- (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
- NoItinerary> {
- let cmode = 0b1110;
-}
-
-def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
- (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
- "movi\t$Rd.16b, $Imm",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
- NoItinerary> {
- let cmode = 0b1110;
-}
-}
-
-// Vector Move Immediate - bytemask, per double word
-let isReMaterializable = 1 in {
-def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
- (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
- "movi\t $Rd.2d, $Imm",
- [(set (v2i64 VPR128:$Rd),
- (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
- NoItinerary> {
- let cmode = 0b1110;
-}
-}
-
-// Vector Move Immediate - bytemask, one doubleword
-
-let isReMaterializable = 1 in {
-def MOVIdi : NeonI_1VModImm<0b0, 0b1,
- (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
- "movi\t $Rd, $Imm",
- [(set (v1i64 FPR64:$Rd),
- (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))],
- NoItinerary> {
- let cmode = 0b1110;
-}
-}
-
-// Vector Floating Point Move Immediate
-
-class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
- Operand immOpType, bit q, bit op>
- : NeonI_1VModImm<q, op,
- (outs VPRC:$Rd), (ins immOpType:$Imm),
- "fmov\t$Rd" # asmlane # ", $Imm",
- [(set (OpTy VPRC:$Rd),
- (OpTy (Neon_fmovi (timm:$Imm))))],
- NoItinerary> {
- let cmode = 0b1111;
- }
-
-let isReMaterializable = 1 in {
-def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64, v2f32, fmov32_operand, 0b0, 0b0>;
-def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
-def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
-}
-
-// Vector Shift (Immediate)
-// Immediate in [0, 63]
-def imm0_63 : Operand<i32> {
- let ParserMatchClass = uimm6_asmoperand;
-}
-
-// Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
-// as follows:
-//
-// Offset Encoding
-// 8 immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
-// 16 immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
-// 32 immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
-// 64 immh:immb<6> = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
-//
-// The shift right immediate amount, in the range 1 to element bits, is computed
-// as Offset - UInt(immh:immb). The shift left immediate amount, in the range 0
-// to element bits - 1, is computed as UInt(immh:immb) - Offset.
-
-class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
- let Name = "ShrImm" # OFFSET;
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "ShrImm" # OFFSET;
-}
-
-class shr_imm<string OFFSET> : Operand<i32> {
- let EncoderMethod = "getShiftRightImm" # OFFSET;
- let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
- let ParserMatchClass =
- !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
-}
-
-def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
-def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
-def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
-def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
-
-def shr_imm8 : shr_imm<"8">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 8;}]>;
-def shr_imm16 : shr_imm<"16">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 16;}]>;
-def shr_imm32 : shr_imm<"32">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 32;}]>;
-def shr_imm64 : shr_imm<"64">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 64;}]>;
-
-class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
- let Name = "ShlImm" # OFFSET;
- let RenderMethod = "addImmOperands";
- let DiagnosticType = "ShlImm" # OFFSET;
-}
-
-class shl_imm<string OFFSET> : Operand<i32> {
- let EncoderMethod = "getShiftLeftImm" # OFFSET;
- let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
- let ParserMatchClass =
- !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
-}
-
-def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
-def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
-def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
-def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
-
-def shl_imm8 : shl_imm<"8">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 8;}]>;
-def shl_imm16 : shl_imm<"16">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 16;}]>;
-def shl_imm32 : shl_imm<"32">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 32;}]>;
-def shl_imm64 : shl_imm<"64">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 64;}]>;
-
-class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
- RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
- [(set (Ty VPRC:$Rd),
- (Ty (OpNode (Ty VPRC:$Rn),
- (Ty (Neon_vdup (i32 ImmTy:$Imm))))))],
- NoItinerary>;
-
-multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
- // 64-bit vector types.
- def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8, shl> {
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- }
-
- def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16, shl> {
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- }
-
- def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32, shl> {
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- }
-
- // 128-bit vector types.
- def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8, shl> {
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- }
-
- def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16, shl> {
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- }
-
- def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32, shl> {
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- }
-
- def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64, shl> {
- let Inst{22} = 0b1; // immh:immb = 1xxxxxx
- }
-}
-
-multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
- def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
- OpNode> {
- let Inst{22} = 0b1;
- }
-}
-
-// Shift left
-defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
-
-// Shift right
-defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
-defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
-
-def Neon_High16B : PatFrag<(ops node:$in),
- (extract_subvector (v16i8 node:$in), (iPTR 8))>;
-def Neon_High8H : PatFrag<(ops node:$in),
- (extract_subvector (v8i16 node:$in), (iPTR 4))>;
-def Neon_High4S : PatFrag<(ops node:$in),
- (extract_subvector (v4i32 node:$in), (iPTR 2))>;
-def Neon_High2D : PatFrag<(ops node:$in),
- (extract_subvector (v2i64 node:$in), (iPTR 1))>;
-def Neon_High4float : PatFrag<(ops node:$in),
- (extract_subvector (v4f32 node:$in), (iPTR 2))>;
-def Neon_High2double : PatFrag<(ops node:$in),
- (extract_subvector (v2f64 node:$in), (iPTR 1))>;
-
-def Neon_Low16B : PatFrag<(ops node:$in),
- (v8i8 (extract_subvector (v16i8 node:$in),
- (iPTR 0)))>;
-def Neon_Low8H : PatFrag<(ops node:$in),
- (v4i16 (extract_subvector (v8i16 node:$in),
- (iPTR 0)))>;
-def Neon_Low4S : PatFrag<(ops node:$in),
- (v2i32 (extract_subvector (v4i32 node:$in),
- (iPTR 0)))>;
-def Neon_Low2D : PatFrag<(ops node:$in),
- (v1i64 (extract_subvector (v2i64 node:$in),
- (iPTR 0)))>;
-def Neon_Low4float : PatFrag<(ops node:$in),
- (v2f32 (extract_subvector (v4f32 node:$in),
- (iPTR 0)))>;
-def Neon_Low2double : PatFrag<(ops node:$in),
- (v1f64 (extract_subvector (v2f64 node:$in),
- (iPTR 0)))>;
-
-class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
- string SrcT, ValueType DestTy, ValueType SrcTy,
- Operand ImmTy, SDPatternOperator ExtOp>
- : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
- (ins VPR64:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
- [(set (DestTy VPR128:$Rd),
- (DestTy (shl
- (DestTy (ExtOp (SrcTy VPR64:$Rn))),
- (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
- NoItinerary>;
-
-class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
- string SrcT, ValueType DestTy, ValueType SrcTy,
- int StartIndex, Operand ImmTy,
- SDPatternOperator ExtOp, PatFrag getTop>
- : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
- (ins VPR128:$Rn, ImmTy:$Imm),
- asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
- [(set (DestTy VPR128:$Rd),
- (DestTy (shl
- (DestTy (ExtOp
- (SrcTy (getTop VPR128:$Rn)))),
- (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
- NoItinerary>;
-
-multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
- SDNode ExtOp> {
- // 64-bit vector types.
- def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
- shl_imm8, ExtOp> {
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- }
-
- def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
- shl_imm16, ExtOp> {
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- }
-
- def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
- shl_imm32, ExtOp> {
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- }
-
- // 128-bit vector types
- def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
- 8, shl_imm8, ExtOp, Neon_High16B> {
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- }
-
- def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
- 4, shl_imm16, ExtOp, Neon_High8H> {
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- }
-
- def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
- 2, shl_imm32, ExtOp, Neon_High4S> {
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- }
-
- // Use other patterns to match when the immediate is 0.
- def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
- (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
-
- def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
- (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
-
- def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
- (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
-
- def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
- (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
-
- def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
- (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
-
- def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
- (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
-}
-
-// Shift left long
-defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
-defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
-
-// Rounding/Saturating shift
-class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
- RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
- SDPatternOperator OpNode>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
- [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
- (i32 ImmTy:$Imm))))],
- NoItinerary>;
-
-// shift right (vector by immediate)
-multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
- SDPatternOperator OpNode> {
- def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
- OpNode> {
- let Inst{22} = 0b1;
- }
-}
-
-multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
- SDPatternOperator OpNode> {
- // 64-bit vector types.
- def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- // 128-bit vector types.
- def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
- OpNode> {
- let Inst{22} = 0b1;
- }
-}
-
-// Rounding shift right
-defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
- int_aarch64_neon_vsrshr>;
-defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
- int_aarch64_neon_vurshr>;
-
-// Saturating shift left unsigned
-defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
-
-// Saturating shift left
-defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
-defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
-
-class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
- RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
- SDNode OpNode>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
- [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
- (Ty (OpNode (Ty VPRC:$Rn),
- (Ty (Neon_vdup (i32 ImmTy:$Imm))))))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-// Shift Right accumulate
-multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
- def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
- OpNode> {
- let Inst{22} = 0b1;
- }
-}
-
-// Shift right and accumulate
-defm SSRAvvi : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
-defm USRAvvi : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
-
-// Rounding shift accumulate
-class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
- RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
- SDPatternOperator OpNode>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
- [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
- (Ty (OpNode (Ty VPRC:$Rn), (i32 ImmTy:$Imm))))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
- SDPatternOperator OpNode> {
- def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
- OpNode> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
- OpNode> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
- OpNode> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
- OpNode> {
- let Inst{22} = 0b1;
- }
-}
-
-// Rounding shift right and accumulate
-defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
-defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
-
-// Shift insert by immediate
-class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
- RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
- SDPatternOperator OpNode>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
- [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
- (i32 ImmTy:$Imm))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-// shift left insert (vector by immediate)
-multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
- def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
- int_aarch64_neon_vsli> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
- int_aarch64_neon_vsli> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
- int_aarch64_neon_vsli> {
- let Inst{22-21} = 0b01;
- }
-
- // 128-bit vector types
- def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
- int_aarch64_neon_vsli> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
- int_aarch64_neon_vsli> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
- int_aarch64_neon_vsli> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
- int_aarch64_neon_vsli> {
- let Inst{22} = 0b1;
- }
-}
-
-// shift right insert (vector by immediate)
-multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
- // 64-bit vector types.
- def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
- int_aarch64_neon_vsri> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
- int_aarch64_neon_vsri> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
- int_aarch64_neon_vsri> {
- let Inst{22-21} = 0b01;
- }
-
- // 128-bit vector types
- def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
- int_aarch64_neon_vsri> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
- int_aarch64_neon_vsri> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
- int_aarch64_neon_vsri> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
- int_aarch64_neon_vsri> {
- let Inst{22} = 0b1;
- }
-}
-
-// Shift left and insert
-defm SLIvvi : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
-
-// Shift right and insert
-defm SRIvvi : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
-
-class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
- string SrcT, Operand ImmTy>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
- [], NoItinerary>;
-
-class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
- string SrcT, Operand ImmTy>
- : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
- (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
- [], NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-// left long shift by immediate
-multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
- def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
- let Inst{22-19} = 0b0001;
- }
-
- def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
- let Inst{22-20} = 0b001;
- }
-
- def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
- let Inst{22-21} = 0b01;
- }
-
- // Shift Narrow High
- def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
- shr_imm8> {
- let Inst{22-19} = 0b0001;
- }
-
- def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
- shr_imm16> {
- let Inst{22-20} = 0b001;
- }
-
- def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
- shr_imm32> {
- let Inst{22-21} = 0b01;
- }
-}
-
-// Shift right narrow
-defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
-
-// Shift right narrow (prefix Q is saturating, prefix R is rounding)
-defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
-defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
-defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
-defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
-defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
-defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
-defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
-
-def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
- (v2i64 (concat_vectors (v1i64 node:$Rm),
- (v1i64 node:$Rn)))>;
-def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
- (v8i16 (concat_vectors (v4i16 node:$Rm),
- (v4i16 node:$Rn)))>;
-def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
- (v4i32 (concat_vectors (v2i32 node:$Rm),
- (v2i32 node:$Rn)))>;
-def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
- (v4f32 (concat_vectors (v2f32 node:$Rm),
- (v2f32 node:$Rn)))>;
-def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
- (v2f64 (concat_vectors (v1f64 node:$Rm),
- (v1f64 node:$Rn)))>;
-
-def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
- (v8i16 (srl (v8i16 node:$lhs),
- (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
- (v4i32 (srl (v4i32 node:$lhs),
- (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
- (v2i64 (srl (v2i64 node:$lhs),
- (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
- (v8i16 (sra (v8i16 node:$lhs),
- (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
- (v4i32 (sra (v4i32 node:$lhs),
- (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
- (v2i64 (sra (v2i64 node:$lhs),
- (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
-
-// Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
-multiclass Neon_shiftNarrow_patterns<string shr> {
- def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
- (i32 shr_imm8:$Imm)))),
- (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
- def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
- (i32 shr_imm16:$Imm)))),
- (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
- def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
- (i32 shr_imm32:$Imm)))),
- (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
-
- def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
- (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
- VPR128:$Rn, (i32 shr_imm8:$Imm))))))),
- (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
- VPR128:$Rn, imm:$Imm)>;
- def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
- (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
- VPR128:$Rn, (i32 shr_imm16:$Imm))))))),
- (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- VPR128:$Rn, imm:$Imm)>;
- def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
- (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
- VPR128:$Rn, (i32 shr_imm32:$Imm))))))),
- (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- VPR128:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
- def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), shr_imm8:$Imm)),
- (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
- def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), shr_imm16:$Imm)),
- (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
- def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), shr_imm32:$Imm)),
- (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
-
- def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
- (v1i64 (bitconvert (v8i8
- (op (v8i16 VPR128:$Rn), shr_imm8:$Imm))))),
- (!cast<Instruction>(prefix # "_16B")
- (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- VPR128:$Rn, imm:$Imm)>;
- def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
- (v1i64 (bitconvert (v4i16
- (op (v4i32 VPR128:$Rn), shr_imm16:$Imm))))),
- (!cast<Instruction>(prefix # "_8H")
- (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- VPR128:$Rn, imm:$Imm)>;
- def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
- (v1i64 (bitconvert (v2i32
- (op (v2i64 VPR128:$Rn), shr_imm32:$Imm))))),
- (!cast<Instruction>(prefix # "_4S")
- (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- VPR128:$Rn, imm:$Imm)>;
-}
-
-defm : Neon_shiftNarrow_patterns<"lshr">;
-defm : Neon_shiftNarrow_patterns<"ashr">;
-
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
-
-// Convert fix-point and float-pointing
-class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
- RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
- Operand ImmTy, SDPatternOperator IntOp>
- : NeonI_2VShiftImm<q, u, opcode,
- (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
- asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
- [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
- (i32 ImmTy:$Imm))))],
- NoItinerary>;
-
-multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
- SDPatternOperator IntOp> {
- def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
- shr_imm32, IntOp> {
- let Inst{22-21} = 0b01;
- }
-
- def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
- shr_imm32, IntOp> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
- shr_imm64, IntOp> {
- let Inst{22} = 0b1;
- }
-}
-
-multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
- SDPatternOperator IntOp> {
- def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
- shr_imm32, IntOp> {
- let Inst{22-21} = 0b01;
- }
-
- def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
- shr_imm32, IntOp> {
- let Inst{22-21} = 0b01;
- }
-
- def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
- shr_imm64, IntOp> {
- let Inst{22} = 0b1;
- }
-}
-
-// Convert fixed-point to floating-point
-defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
- int_arm_neon_vcvtfxs2fp>;
-defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
- int_arm_neon_vcvtfxu2fp>;
-
-// Convert floating-point to fixed-point
-defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
- int_arm_neon_vcvtfp2fxs>;
-defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
- int_arm_neon_vcvtfp2fxu>;
-
-multiclass Neon_sshll2_0<SDNode ext>
-{
- def _v8i8 : PatFrag<(ops node:$Rn),
- (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
- def _v4i16 : PatFrag<(ops node:$Rn),
- (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
- def _v2i32 : PatFrag<(ops node:$Rn),
- (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
-}
-
-defm NI_sext_high : Neon_sshll2_0<sext>;
-defm NI_zext_high : Neon_sshll2_0<zext>;
-
-
-//===----------------------------------------------------------------------===//
-// Multiclasses for NeonI_Across
-//===----------------------------------------------------------------------===//
-
-// Variant 1
-
-multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
- string asmop, SDPatternOperator opnode>
-{
- def _1h8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
- (outs FPR16:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd, $Rn.8b",
- [(set (v1i16 FPR16:$Rd),
- (v1i16 (opnode (v8i8 VPR64:$Rn))))],
- NoItinerary>;
-
- def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
- (outs FPR16:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.16b",
- [(set (v1i16 FPR16:$Rd),
- (v1i16 (opnode (v16i8 VPR128:$Rn))))],
- NoItinerary>;
-
- def _1s4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
- (outs FPR32:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd, $Rn.4h",
- [(set (v1i32 FPR32:$Rd),
- (v1i32 (opnode (v4i16 VPR64:$Rn))))],
- NoItinerary>;
-
- def _1s8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
- (outs FPR32:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.8h",
- [(set (v1i32 FPR32:$Rd),
- (v1i32 (opnode (v8i16 VPR128:$Rn))))],
- NoItinerary>;
-
- // _1d2s doesn't exist!
-
- def _1d4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
- (outs FPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.4s",
- [(set (v1i64 FPR64:$Rd),
- (v1i64 (opnode (v4i32 VPR128:$Rn))))],
- NoItinerary>;
-}
-
-defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
-defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
-
-// Variant 2
-
-multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
- string asmop, SDPatternOperator opnode>
-{
- def _1b8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
- (outs FPR8:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd, $Rn.8b",
- [(set (v1i8 FPR8:$Rd),
- (v1i8 (opnode (v8i8 VPR64:$Rn))))],
- NoItinerary>;
-
- def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
- (outs FPR8:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.16b",
- [(set (v1i8 FPR8:$Rd),
- (v1i8 (opnode (v16i8 VPR128:$Rn))))],
- NoItinerary>;
-
- def _1h4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
- (outs FPR16:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd, $Rn.4h",
- [(set (v1i16 FPR16:$Rd),
- (v1i16 (opnode (v4i16 VPR64:$Rn))))],
- NoItinerary>;
-
- def _1h8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
- (outs FPR16:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.8h",
- [(set (v1i16 FPR16:$Rd),
- (v1i16 (opnode (v8i16 VPR128:$Rn))))],
- NoItinerary>;
-
- // _1s2s doesn't exist!
-
- def _1s4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
- (outs FPR32:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.4s",
- [(set (v1i32 FPR32:$Rd),
- (v1i32 (opnode (v4i32 VPR128:$Rn))))],
- NoItinerary>;
-}
-
-defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
-defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
-
-defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
-defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
-
-defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
-
-// Variant 3
-
-multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
- string asmop, SDPatternOperator opnode> {
- def _1s4s: NeonI_2VAcross<0b1, u, size, opcode,
- (outs FPR32:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd, $Rn.4s",
- [(set (v1f32 FPR32:$Rd),
- (v1f32 (opnode (v4f32 VPR128:$Rn))))],
- NoItinerary>;
-}
-
-defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
- int_aarch64_neon_vmaxnmv>;
-defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
- int_aarch64_neon_vminnmv>;
-
-defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
- int_aarch64_neon_vmaxv>;
-defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
- int_aarch64_neon_vminv>;
-
-// The followings are for instruction class (Perm)
-
-class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
- string asmop, RegisterOperand OpVPR, string OpS,
- SDPatternOperator opnode, ValueType Ty>
- : NeonI_Perm<q, size, opcode,
- (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (Ty OpVPR:$Rd),
- (Ty (opnode (Ty OpVPR:$Rn), (Ty OpVPR:$Rm))))],
- NoItinerary>;
-
-multiclass NeonI_Perm_pat<bits<3> opcode, string asmop,
- SDPatternOperator opnode> {
- def _8b : NeonI_Permute<0b0, 0b00, opcode, asmop,
- VPR64, "8b", opnode, v8i8>;
- def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop,
- VPR128, "16b",opnode, v16i8>;
- def _4h : NeonI_Permute<0b0, 0b01, opcode, asmop,
- VPR64, "4h", opnode, v4i16>;
- def _8h : NeonI_Permute<0b1, 0b01, opcode, asmop,
- VPR128, "8h", opnode, v8i16>;
- def _2s : NeonI_Permute<0b0, 0b10, opcode, asmop,
- VPR64, "2s", opnode, v2i32>;
- def _4s : NeonI_Permute<0b1, 0b10, opcode, asmop,
- VPR128, "4s", opnode, v4i32>;
- def _2d : NeonI_Permute<0b1, 0b11, opcode, asmop,
- VPR128, "2d", opnode, v2i64>;
-}
-
-defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1", Neon_uzp1>;
-defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1", Neon_trn1>;
-defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1", Neon_zip1>;
-defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2", Neon_uzp2>;
-defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2", Neon_trn2>;
-defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2", Neon_zip2>;
-
-multiclass NeonI_Perm_float_pat<string INS, SDPatternOperator opnode> {
- def : Pat<(v2f32 (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
- (!cast<Instruction>(INS # "_2s") VPR64:$Rn, VPR64:$Rm)>;
-
- def : Pat<(v4f32 (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
- (!cast<Instruction>(INS # "_4s") VPR128:$Rn, VPR128:$Rm)>;
-
- def : Pat<(v2f64 (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
- (!cast<Instruction>(INS # "_2d") VPR128:$Rn, VPR128:$Rm)>;
-}
-
-defm : NeonI_Perm_float_pat<"UZP1vvv", Neon_uzp1>;
-defm : NeonI_Perm_float_pat<"UZP2vvv", Neon_uzp2>;
-defm : NeonI_Perm_float_pat<"ZIP1vvv", Neon_zip1>;
-defm : NeonI_Perm_float_pat<"ZIP2vvv", Neon_zip2>;
-defm : NeonI_Perm_float_pat<"TRN1vvv", Neon_trn1>;
-defm : NeonI_Perm_float_pat<"TRN2vvv", Neon_trn2>;
-
-// The followings are for instruction class (3V Diff)
-
-// normal long/long2 pattern
-class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode, SDPatternOperator ext,
- RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
- (ResTy (ext (OpTy OpVPR:$Rm))))))],
- NoItinerary>;
-
-multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
- string asmop, SDPatternOperator opnode,
- bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, sext, VPR64, v8i16, v8i8>;
- def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, sext, VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, sext, VPR64, v2i64, v2i32>;
- }
-}
-
-multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
- def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
- def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
- }
-}
-
-multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, zext, VPR64, v8i16, v8i8>;
- def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, zext, VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, zext, VPR64, v2i64, v2i32>;
- }
-}
-
-multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
- def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
- def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
- }
-}
-
-defm SADDLvvv : NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
-defm UADDLvvv : NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
-
-defm SADDL2vvv : NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
-defm UADDL2vvv : NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
-
-defm SSUBLvvv : NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
-defm USUBLvvv : NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
-
-defm SSUBL2vvv : NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
-defm USUBL2vvv : NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
-
-// normal wide/wide2 pattern
-class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode, SDPatternOperator ext,
- RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (opnode (ResTy VPR128:$Rn),
- (ResTy (ext (OpTy OpVPR:$Rm))))))],
- NoItinerary>;
-
-multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, sext, VPR64, v8i16, v8i8>;
- def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, sext, VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, sext, VPR64, v2i64, v2i32>;
-}
-
-defm SADDWvvv : NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
-defm SSUBWvvv : NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
-
-multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
- def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
- def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
-}
-
-defm SADDW2vvv : NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
-defm SSUBW2vvv : NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
-
-multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, zext, VPR64, v8i16, v8i8>;
- def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, zext, VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, zext, VPR64, v2i64, v2i32>;
-}
-
-defm UADDWvvv : NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
-defm USUBWvvv : NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
-
-multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
- def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
- def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
-}
-
-defm UADDW2vvv : NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
-defm USUBW2vvv : NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
-
-// Get the high half part of the vector element.
-multiclass NeonI_get_high {
- def _8h : PatFrag<(ops node:$Rn),
- (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
- (v8i16 (Neon_vdup (i32 8)))))))>;
- def _4s : PatFrag<(ops node:$Rn),
- (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
- (v4i32 (Neon_vdup (i32 16)))))))>;
- def _2d : PatFrag<(ops node:$Rn),
- (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
- (v2i64 (Neon_vdup (i32 32)))))))>;
-}
-
-defm NI_get_hi : NeonI_get_high;
-
-// pattern for addhn/subhn with 2 operands
-class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode, SDPatternOperator get_hi,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR64:$Rd),
- (ResTy (get_hi
- (OpTy (opnode (OpTy VPR128:$Rn),
- (OpTy VPR128:$Rm))))))],
- NoItinerary>;
-
-multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
- opnode, NI_get_hi_8h, v8i8, v8i16>;
- def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
- opnode, NI_get_hi_4s, v4i16, v4i32>;
- def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
- opnode, NI_get_hi_2d, v2i32, v2i64>;
- }
-}
-
-defm ADDHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
-defm SUBHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
-
-// pattern for operation with 2 operands
-class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode,
- RegisterOperand ResVPR, RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy ResVPR:$Rd),
- (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
- NoItinerary>;
-
-// normal narrow pattern
-multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
- opnode, VPR64, VPR128, v8i8, v8i16>;
- def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
- opnode, VPR64, VPR128, v4i16, v4i32>;
- def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
- opnode, VPR64, VPR128, v2i32, v2i64>;
- }
-}
-
-defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
-defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
-
-// pattern for acle intrinsic with 3 operands
-class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [], NoItinerary> {
- let Constraints = "$src = $Rd";
- let neverHasSideEffects = 1;
-}
-
-multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
- def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
- def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
- def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
-}
-
-defm ADDHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
-defm SUBHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
-
-defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
-defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
-
-// Patterns have to be separate because there's a SUBREG_TO_REG in the output
-// part.
-class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
- SDPatternOperator coreop>
- : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
- (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
- (SrcTy VPR128:$Rm)))))),
- (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- VPR128:$Rn, VPR128:$Rm)>;
-
-// addhn2 patterns
-def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8, v8i16,
- BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<ADDHN2vvv_8h4s, v4i16, v4i32,
- BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<ADDHN2vvv_4s2d, v2i32, v2i64,
- BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
-
-// subhn2 patterns
-def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8, v8i16,
- BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<SUBHN2vvv_8h4s, v4i16, v4i32,
- BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<SUBHN2vvv_4s2d, v2i32, v2i64,
- BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
-
-// raddhn2 patterns
-def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vraddhn>;
-def : NarrowHighHalfPat<RADDHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vraddhn>;
-def : NarrowHighHalfPat<RADDHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vraddhn>;
-
-// rsubhn2 patterns
-def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vrsubhn>;
-def : NarrowHighHalfPat<RSUBHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vrsubhn>;
-def : NarrowHighHalfPat<RSUBHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vrsubhn>;
-
-// pattern that need to extend result
-class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode,
- RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy, ValueType OpSTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
- (OpTy OpVPR:$Rm))))))],
- NoItinerary>;
-
-multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, VPR64, v8i16, v8i8, v8i8>;
- def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, VPR64, v4i32, v4i16, v4i16>;
- def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, VPR64, v2i64, v2i32, v2i32>;
- }
-}
-
-defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
-defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
-
-multiclass NeonI_Op_High<SDPatternOperator op> {
- def _16B : PatFrag<(ops node:$Rn, node:$Rm),
- (op (v8i8 (Neon_High16B node:$Rn)),
- (v8i8 (Neon_High16B node:$Rm)))>;
- def _8H : PatFrag<(ops node:$Rn, node:$Rm),
- (op (v4i16 (Neon_High8H node:$Rn)),
- (v4i16 (Neon_High8H node:$Rm)))>;
- def _4S : PatFrag<(ops node:$Rn, node:$Rm),
- (op (v2i32 (Neon_High4S node:$Rn)),
- (v2i32 (Neon_High4S node:$Rm)))>;
-}
-
-defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
-defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
-defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
-defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
-defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
-defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
-
-multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
- bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h8b : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- !cast<PatFrag>(opnode # "_16B"),
- VPR128, v8i16, v16i8, v8i8>;
- def _4s4h : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- !cast<PatFrag>(opnode # "_8H"),
- VPR128, v4i32, v8i16, v4i16>;
- def _2d2s : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- !cast<PatFrag>(opnode # "_4S"),
- VPR128, v2i64, v4i32, v2i32>;
- }
-}
-
-defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
-defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
-
-// For pattern that need two operators being chained.
-class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode, SDPatternOperator subop,
- RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy, ValueType OpSTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (opnode
- (ResTy VPR128:$src),
- (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
- (OpTy OpVPR:$Rm))))))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, SDPatternOperator subop>{
- def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, subop, VPR64, v8i16, v8i8, v8i8>;
- def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, subop, VPR64, v4i32, v4i16, v4i16>;
- def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, subop, VPR64, v2i64, v2i32, v2i32>;
-}
-
-defm SABALvvv : NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
- add, int_arm_neon_vabds>;
-defm UABALvvv : NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
- add, int_arm_neon_vabdu>;
-
-multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, string subop> {
- def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- opnode, !cast<PatFrag>(subop # "_16B"),
- VPR128, v8i16, v16i8, v8i8>;
- def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- opnode, !cast<PatFrag>(subop # "_8H"),
- VPR128, v4i32, v8i16, v4i16>;
- def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- opnode, !cast<PatFrag>(subop # "_4S"),
- VPR128, v2i64, v4i32, v2i32>;
-}
-
-defm SABAL2vvv : NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
- "NI_sabdl_hi">;
-defm UABAL2vvv : NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
- "NI_uabdl_hi">;
-
-// Long pattern with 2 operands
-multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, VPR128, VPR64, v8i16, v8i8>;
- def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, VPR128, VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, VPR128, VPR64, v2i64, v2i32>;
- }
-}
-
-defm SMULLvvv : NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
-defm UMULLvvv : NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
-
-class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
- NoItinerary>;
-
-multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
- string opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- !cast<PatFrag>(opnode # "_16B"),
- v8i16, v16i8>;
- def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- !cast<PatFrag>(opnode # "_8H"),
- v4i32, v8i16>;
- def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- !cast<PatFrag>(opnode # "_4S"),
- v2i64, v4i32>;
- }
-}
-
-defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
- "NI_smull_hi", 1>;
-defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
- "NI_umull_hi", 1>;
-
-// Long pattern with 3 operands
-class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator opnode,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (opnode
- (ResTy VPR128:$src),
- (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, v8i16, v8i8>;
- def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, v4i32, v4i16>;
- def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, v2i64, v2i32>;
-}
-
-def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
- (add node:$Rd,
- (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
-
-def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
- (add node:$Rd,
- (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
-
-def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
- (sub node:$Rd,
- (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
-
-def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
- (sub node:$Rd,
- (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
-
-defm SMLALvvv : NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
-defm UMLALvvv : NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
-
-defm SMLSLvvv : NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
-defm UMLSLvvv : NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
-
-class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS,
- SDPatternOperator subop, SDPatternOperator opnode,
- RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy>
- : NeonI_3VDiff<q, u, size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
- [(set (ResTy VPR128:$Rd),
- (ResTy (subop
- (ResTy VPR128:$src),
- (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop,
- SDPatternOperator subop, string opnode> {
- def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- subop, !cast<PatFrag>(opnode # "_16B"),
- VPR128, v8i16, v16i8>;
- def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- subop, !cast<PatFrag>(opnode # "_8H"),
- VPR128, v4i32, v8i16>;
- def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- subop, !cast<PatFrag>(opnode # "_4S"),
- VPR128, v2i64, v4i32>;
-}
-
-defm SMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
- add, "NI_smull_hi">;
-defm UMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
- add, "NI_umull_hi">;
-
-defm SMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
- sub, "NI_smull_hi">;
-defm UMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
- sub, "NI_umull_hi">;
-
-multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, int_arm_neon_vqdmull,
- VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, int_arm_neon_vqdmull,
- VPR64, v2i64, v2i32>;
-}
-
-defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
- int_arm_neon_vqadds>;
-defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
- int_arm_neon_vqsubs>;
-
-multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
- opnode, VPR128, VPR64, v4i32, v4i16>;
- def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
- opnode, VPR128, VPR64, v2i64, v2i32>;
- }
-}
-
-defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
- int_arm_neon_vqdmull, 1>;
-
-multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop,
- string opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- !cast<PatFrag>(opnode # "_8H"),
- v4i32, v8i16>;
- def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- !cast<PatFrag>(opnode # "_4S"),
- v2i64, v4i32>;
- }
-}
-
-defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
- "NI_qdmull_hi", 1>;
-
-multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode> {
- def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
- opnode, NI_qdmull_hi_8H,
- VPR128, v4i32, v8i16>;
- def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
- opnode, NI_qdmull_hi_4S,
- VPR128, v2i64, v4i32>;
-}
-
-defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
- int_arm_neon_vqadds>;
-defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
- int_arm_neon_vqsubs>;
-
-multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
- SDPatternOperator opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
- opnode, VPR128, VPR64, v8i16, v8i8>;
-
- def _1q1d : NeonI_3VDiff<0b0, u, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
- asmop # "\t$Rd.1q, $Rn.1d, $Rm.1d",
- [], NoItinerary>;
- }
-}
-
-defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp, 1>;
-
-multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop,
- string opnode, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
- !cast<PatFrag>(opnode # "_16B"),
- v8i16, v16i8>;
-
- def _1q2d : NeonI_3VDiff<0b1, u, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
- [], NoItinerary>;
- }
-}
-
-defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
- 1>;
-
-// End of implementation for instruction class (3V Diff)
-
-// The followings are vector load/store multiple N-element structure
-// (class SIMD lselem).
-
-// ld1: load multiple 1-element structure to 1/2/3/4 registers.
-// ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
-// The structure consists of a sequence of sets of N values.
-// The first element of the structure is placed in the first lane
-// of the first first vector, the second element in the first lane
-// of the second vector, and so on.
-// E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
-// the three 64-bit vectors list {BA, DC, FE}.
-// E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
-// 64-bit vectors list {DA, EB, FC}.
-// Store instructions store multiple structure to N registers like load.
-
-
-class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
- RegisterOperand VecList, string asmop>
- : NeonI_LdStMult<q, 1, opcode, size,
- (outs VecList:$Rt), (ins GPR64xsp:$Rn),
- asmop # "\t$Rt, [$Rn]",
- [],
- NoItinerary> {
- let mayLoad = 1;
- let neverHasSideEffects = 1;
-}
-
-multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
- def _8B : NeonI_LDVList<0, opcode, 0b00,
- !cast<RegisterOperand>(List # "8B_operand"), asmop>;
-
- def _4H : NeonI_LDVList<0, opcode, 0b01,
- !cast<RegisterOperand>(List # "4H_operand"), asmop>;
-
- def _2S : NeonI_LDVList<0, opcode, 0b10,
- !cast<RegisterOperand>(List # "2S_operand"), asmop>;
-
- def _16B : NeonI_LDVList<1, opcode, 0b00,
- !cast<RegisterOperand>(List # "16B_operand"), asmop>;
-
- def _8H : NeonI_LDVList<1, opcode, 0b01,
- !cast<RegisterOperand>(List # "8H_operand"), asmop>;
-
- def _4S : NeonI_LDVList<1, opcode, 0b10,
- !cast<RegisterOperand>(List # "4S_operand"), asmop>;
-
- def _2D : NeonI_LDVList<1, opcode, 0b11,
- !cast<RegisterOperand>(List # "2D_operand"), asmop>;
-}
-
-// Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
-defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
-def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
-
-defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
-
-defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
-
-defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
-
-// Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
-defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
-def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
-
-defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
-def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
-
-defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
-def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
-
-class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
- RegisterOperand VecList, string asmop>
- : NeonI_LdStMult<q, 0, opcode, size,
- (outs), (ins GPR64xsp:$Rn, VecList:$Rt),
- asmop # "\t$Rt, [$Rn]",
- [],
- NoItinerary> {
- let mayStore = 1;
- let neverHasSideEffects = 1;
-}
-
-multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
- def _8B : NeonI_STVList<0, opcode, 0b00,
- !cast<RegisterOperand>(List # "8B_operand"), asmop>;
-
- def _4H : NeonI_STVList<0, opcode, 0b01,
- !cast<RegisterOperand>(List # "4H_operand"), asmop>;
-
- def _2S : NeonI_STVList<0, opcode, 0b10,
- !cast<RegisterOperand>(List # "2S_operand"), asmop>;
-
- def _16B : NeonI_STVList<1, opcode, 0b00,
- !cast<RegisterOperand>(List # "16B_operand"), asmop>;
-
- def _8H : NeonI_STVList<1, opcode, 0b01,
- !cast<RegisterOperand>(List # "8H_operand"), asmop>;
-
- def _4S : NeonI_STVList<1, opcode, 0b10,
- !cast<RegisterOperand>(List # "4S_operand"), asmop>;
-
- def _2D : NeonI_STVList<1, opcode, 0b11,
- !cast<RegisterOperand>(List # "2D_operand"), asmop>;
-}
-
-// Store multiple N-element structures from N registers (N = 1,2,3,4)
-defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
-def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
-
-defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
-
-defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
-
-defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
-
-// Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
-defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
-def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
-
-defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
-def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
-
-defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
-def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
-
-def : Pat<(v2f64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
-def : Pat<(v2i64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
-
-def : Pat<(v4f32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
-def : Pat<(v4i32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
-
-def : Pat<(v8i16 (load GPR64xsp:$addr)), (LD1_8H GPR64xsp:$addr)>;
-def : Pat<(v16i8 (load GPR64xsp:$addr)), (LD1_16B GPR64xsp:$addr)>;
-
-def : Pat<(v1f64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
-def : Pat<(v1i64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
-
-def : Pat<(v2f32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
-def : Pat<(v2i32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
-
-def : Pat<(v4i16 (load GPR64xsp:$addr)), (LD1_4H GPR64xsp:$addr)>;
-def : Pat<(v8i8 (load GPR64xsp:$addr)), (LD1_8B GPR64xsp:$addr)>;
-
-def : Pat<(store (v2i64 VPR128:$value), GPR64xsp:$addr),
- (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
-def : Pat<(store (v2f64 VPR128:$value), GPR64xsp:$addr),
- (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
-
-def : Pat<(store (v4i32 VPR128:$value), GPR64xsp:$addr),
- (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
-def : Pat<(store (v4f32 VPR128:$value), GPR64xsp:$addr),
- (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
-
-def : Pat<(store (v8i16 VPR128:$value), GPR64xsp:$addr),
- (ST1_8H GPR64xsp:$addr, VPR128:$value)>;
-def : Pat<(store (v16i8 VPR128:$value), GPR64xsp:$addr),
- (ST1_16B GPR64xsp:$addr, VPR128:$value)>;
-
-def : Pat<(store (v1i64 VPR64:$value), GPR64xsp:$addr),
- (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
-def : Pat<(store (v1f64 VPR64:$value), GPR64xsp:$addr),
- (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
-
-def : Pat<(store (v2i32 VPR64:$value), GPR64xsp:$addr),
- (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
-def : Pat<(store (v2f32 VPR64:$value), GPR64xsp:$addr),
- (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
-
-def : Pat<(store (v4i16 VPR64:$value), GPR64xsp:$addr),
- (ST1_4H GPR64xsp:$addr, VPR64:$value)>;
-def : Pat<(store (v8i8 VPR64:$value), GPR64xsp:$addr),
- (ST1_8B GPR64xsp:$addr, VPR64:$value)>;
-
-// End of vector load/store multiple N-element structure(class SIMD lselem)
-
-// The followings are post-index vector load/store multiple N-element
-// structure(class SIMD lselem-post)
-def exact1_asmoperand : AsmOperandClass {
- let Name = "Exact1";
- let PredicateMethod = "isExactImm<1>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact1 : Operand<i32>, ImmLeaf<i32, [{return Imm == 1;}]> {
- let ParserMatchClass = exact1_asmoperand;
-}
-
-def exact2_asmoperand : AsmOperandClass {
- let Name = "Exact2";
- let PredicateMethod = "isExactImm<2>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact2 : Operand<i32>, ImmLeaf<i32, [{return Imm == 2;}]> {
- let ParserMatchClass = exact2_asmoperand;
-}
-
-def exact3_asmoperand : AsmOperandClass {
- let Name = "Exact3";
- let PredicateMethod = "isExactImm<3>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact3 : Operand<i32>, ImmLeaf<i32, [{return Imm == 3;}]> {
- let ParserMatchClass = exact3_asmoperand;
-}
-
-def exact4_asmoperand : AsmOperandClass {
- let Name = "Exact4";
- let PredicateMethod = "isExactImm<4>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact4 : Operand<i32>, ImmLeaf<i32, [{return Imm == 4;}]> {
- let ParserMatchClass = exact4_asmoperand;
-}
-
-def exact6_asmoperand : AsmOperandClass {
- let Name = "Exact6";
- let PredicateMethod = "isExactImm<6>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact6 : Operand<i32>, ImmLeaf<i32, [{return Imm == 6;}]> {
- let ParserMatchClass = exact6_asmoperand;
-}
-
-def exact8_asmoperand : AsmOperandClass {
- let Name = "Exact8";
- let PredicateMethod = "isExactImm<8>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
- let ParserMatchClass = exact8_asmoperand;
-}
-
-def exact12_asmoperand : AsmOperandClass {
- let Name = "Exact12";
- let PredicateMethod = "isExactImm<12>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact12 : Operand<i32>, ImmLeaf<i32, [{return Imm == 12;}]> {
- let ParserMatchClass = exact12_asmoperand;
-}
-
-def exact16_asmoperand : AsmOperandClass {
- let Name = "Exact16";
- let PredicateMethod = "isExactImm<16>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
- let ParserMatchClass = exact16_asmoperand;
-}
-
-def exact24_asmoperand : AsmOperandClass {
- let Name = "Exact24";
- let PredicateMethod = "isExactImm<24>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
- let ParserMatchClass = exact24_asmoperand;
-}
-
-def exact32_asmoperand : AsmOperandClass {
- let Name = "Exact32";
- let PredicateMethod = "isExactImm<32>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
- let ParserMatchClass = exact32_asmoperand;
-}
-
-def exact48_asmoperand : AsmOperandClass {
- let Name = "Exact48";
- let PredicateMethod = "isExactImm<48>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
- let ParserMatchClass = exact48_asmoperand;
-}
-
-def exact64_asmoperand : AsmOperandClass {
- let Name = "Exact64";
- let PredicateMethod = "isExactImm<64>";
- let RenderMethod = "addImmOperands";
-}
-def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
- let ParserMatchClass = exact64_asmoperand;
-}
-
-multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
- RegisterOperand VecList, Operand ImmTy,
- string asmop> {
- let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1,
- DecoderMethod = "DecodeVLDSTPostInstruction" in {
- def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
- (outs VecList:$Rt, GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, ImmTy:$amt),
- asmop # "\t$Rt, [$Rn], $amt",
- [],
- NoItinerary> {
- let Rm = 0b11111;
- }
-
- def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
- (outs VecList:$Rt, GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
- asmop # "\t$Rt, [$Rn], $Rm",
- [],
- NoItinerary>;
- }
-}
-
-multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
- Operand ImmTy2, string asmop> {
- defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
- !cast<RegisterOperand>(List # "8B_operand"),
- ImmTy, asmop>;
-
- defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
- !cast<RegisterOperand>(List # "4H_operand"),
- ImmTy, asmop>;
-
- defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
- !cast<RegisterOperand>(List # "2S_operand"),
- ImmTy, asmop>;
-
- defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
- !cast<RegisterOperand>(List # "16B_operand"),
- ImmTy2, asmop>;
-
- defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
- !cast<RegisterOperand>(List # "8H_operand"),
- ImmTy2, asmop>;
-
- defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
- !cast<RegisterOperand>(List # "4S_operand"),
- ImmTy2, asmop>;
-
- defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
- !cast<RegisterOperand>(List # "2D_operand"),
- ImmTy2, asmop>;
-}
-
-// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
-defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
-defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
- "ld1">;
-
-defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
-
-defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
- "ld3">;
-
-defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
-
-// Post-index load multiple 1-element structures from N consecutive registers
-// (N = 2,3,4)
-defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
- "ld1">;
-defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
- uimm_exact16, "ld1">;
-
-defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
- "ld1">;
-defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
- uimm_exact24, "ld1">;
-
-defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
- "ld1">;
-defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
- uimm_exact32, "ld1">;
-
-multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
- RegisterOperand VecList, Operand ImmTy,
- string asmop> {
- let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
- DecoderMethod = "DecodeVLDSTPostInstruction" in {
- def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
- (outs GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
- asmop # "\t$Rt, [$Rn], $amt",
- [],
- NoItinerary> {
- let Rm = 0b11111;
- }
-
- def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
- (outs GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt),
- asmop # "\t$Rt, [$Rn], $Rm",
- [],
- NoItinerary>;
- }
-}
-
-multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
- Operand ImmTy2, string asmop> {
- defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
- !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
-
- defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
- !cast<RegisterOperand>(List # "4H_operand"),
- ImmTy, asmop>;
-
- defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
- !cast<RegisterOperand>(List # "2S_operand"),
- ImmTy, asmop>;
-
- defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
- !cast<RegisterOperand>(List # "16B_operand"),
- ImmTy2, asmop>;
-
- defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
- !cast<RegisterOperand>(List # "8H_operand"),
- ImmTy2, asmop>;
-
- defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
- !cast<RegisterOperand>(List # "4S_operand"),
- ImmTy2, asmop>;
-
- defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
- !cast<RegisterOperand>(List # "2D_operand"),
- ImmTy2, asmop>;
-}
-
-// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
-defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
-defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
- "st1">;
-
-defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
-
-defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
- "st3">;
-
-defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
-
-// Post-index load multiple 1-element structures from N consecutive registers
-// (N = 2,3,4)
-defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
- "st1">;
-defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
- uimm_exact16, "st1">;
-
-defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
- "st1">;
-defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
- uimm_exact24, "st1">;
-
-defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
- "st1">;
-defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
- uimm_exact32, "st1">;
-
-// End of post-index vector load/store multiple N-element structure
-// (class SIMD lselem-post)
-
-// The followings are vector load/store single N-element structure
-// (class SIMD lsone).
-def neon_uimm0_bare : Operand<i64>,
- ImmLeaf<i64, [{return Imm == 0;}]> {
- let ParserMatchClass = neon_uimm0_asmoperand;
- let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm1_bare : Operand<i64>,
- ImmLeaf<i64, [{return Imm < 2;}]> {
- let ParserMatchClass = neon_uimm1_asmoperand;
- let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm2_bare : Operand<i64>,
- ImmLeaf<i64, [{return Imm < 4;}]> {
- let ParserMatchClass = neon_uimm2_asmoperand;
- let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm3_bare : Operand<i64>,
- ImmLeaf<i64, [{return Imm < 8;}]> {
- let ParserMatchClass = uimm3_asmoperand;
- let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm4_bare : Operand<i64>,
- ImmLeaf<i64, [{return Imm < 16;}]> {
- let ParserMatchClass = uimm4_asmoperand;
- let PrintMethod = "printUImmBareOperand";
-}
-
-class NeonI_LDN_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
- RegisterOperand VecList, string asmop>
- : NeonI_LdOne_Dup<q, r, opcode, size,
- (outs VecList:$Rt), (ins GPR64xsp:$Rn),
- asmop # "\t$Rt, [$Rn]",
- [],
- NoItinerary> {
- let mayLoad = 1;
- let neverHasSideEffects = 1;
-}
-
-multiclass LDN_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop> {
- def _8B : NeonI_LDN_Dup<0, r, opcode, 0b00,
- !cast<RegisterOperand>(List # "8B_operand"), asmop>;
-
- def _4H : NeonI_LDN_Dup<0, r, opcode, 0b01,
- !cast<RegisterOperand>(List # "4H_operand"), asmop>;
-
- def _2S : NeonI_LDN_Dup<0, r, opcode, 0b10,
- !cast<RegisterOperand>(List # "2S_operand"), asmop>;
-
- def _1D : NeonI_LDN_Dup<0, r, opcode, 0b11,
- !cast<RegisterOperand>(List # "1D_operand"), asmop>;
-
- def _16B : NeonI_LDN_Dup<1, r, opcode, 0b00,
- !cast<RegisterOperand>(List # "16B_operand"), asmop>;
-
- def _8H : NeonI_LDN_Dup<1, r, opcode, 0b01,
- !cast<RegisterOperand>(List # "8H_operand"), asmop>;
-
- def _4S : NeonI_LDN_Dup<1, r, opcode, 0b10,
- !cast<RegisterOperand>(List # "4S_operand"), asmop>;
-
- def _2D : NeonI_LDN_Dup<1, r, opcode, 0b11,
- !cast<RegisterOperand>(List # "2D_operand"), asmop>;
-}
-
-// Load single 1-element structure to all lanes of 1 register
-defm LD1R : LDN_Dup_BHSD<0b0, 0b110, "VOne", "ld1r">;
-
-// Load single N-element structure to all lanes of N consecutive
-// registers (N = 2,3,4)
-defm LD2R : LDN_Dup_BHSD<0b1, 0b110, "VPair", "ld2r">;
-defm LD3R : LDN_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r">;
-defm LD4R : LDN_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r">;
-
-
-class LD1R_pattern <ValueType VTy, ValueType DTy, PatFrag LoadOp,
- Instruction INST>
- : Pat<(VTy (Neon_vdup (DTy (LoadOp GPR64xsp:$Rn)))),
- (VTy (INST GPR64xsp:$Rn))>;
-
-// Match all LD1R instructions
-def : LD1R_pattern<v8i8, i32, extloadi8, LD1R_8B>;
-
-def : LD1R_pattern<v16i8, i32, extloadi8, LD1R_16B>;
-
-def : LD1R_pattern<v4i16, i32, extloadi16, LD1R_4H>;
-
-def : LD1R_pattern<v8i16, i32, extloadi16, LD1R_8H>;
-
-def : LD1R_pattern<v2i32, i32, load, LD1R_2S>;
-def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
-
-def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
-def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
-
-def : LD1R_pattern<v1i64, i64, load, LD1R_1D>;
-def : LD1R_pattern<v1f64, f64, load, LD1R_1D>;
-
-def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
-def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
-
-
-multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
- RegisterClass RegList> {
- defm B : VectorList_operands<PREFIX, "B", Count, RegList>;
- defm H : VectorList_operands<PREFIX, "H", Count, RegList>;
- defm S : VectorList_operands<PREFIX, "S", Count, RegList>;
- defm D : VectorList_operands<PREFIX, "D", Count, RegList>;
-}
-
-// Special vector list operand of 128-bit vectors with bare layout.
-// i.e. only show ".b", ".h", ".s", ".d"
-defm VOne : VectorList_Bare_BHSD<"VOne", 1, FPR128>;
-defm VPair : VectorList_Bare_BHSD<"VPair", 2, QPair>;
-defm VTriple : VectorList_Bare_BHSD<"VTriple", 3, QTriple>;
-defm VQuad : VectorList_Bare_BHSD<"VQuad", 4, QQuad>;
-
-class NeonI_LDN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
- Operand ImmOp, string asmop>
- : NeonI_LdStOne_Lane<1, r, op2_1, op0,
- (outs VList:$Rt),
- (ins GPR64xsp:$Rn, VList:$src, ImmOp:$lane),
- asmop # "\t$Rt[$lane], [$Rn]",
- [],
- NoItinerary> {
- let mayLoad = 1;
- let neverHasSideEffects = 1;
- let hasExtraDefRegAllocReq = 1;
- let Constraints = "$src = $Rt";
-}
-
-multiclass LDN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
- def _B : NeonI_LDN_Lane<r, 0b00, op0,
- !cast<RegisterOperand>(List # "B_operand"),
- neon_uimm4_bare, asmop> {
- let Inst{12-10} = lane{2-0};
- let Inst{30} = lane{3};
- }
-
- def _H : NeonI_LDN_Lane<r, 0b01, op0,
- !cast<RegisterOperand>(List # "H_operand"),
- neon_uimm3_bare, asmop> {
- let Inst{12-10} = {lane{1}, lane{0}, 0b0};
- let Inst{30} = lane{2};
- }
-
- def _S : NeonI_LDN_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "S_operand"),
- neon_uimm2_bare, asmop> {
- let Inst{12-10} = {lane{0}, 0b0, 0b0};
- let Inst{30} = lane{1};
- }
-
- def _D : NeonI_LDN_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "D_operand"),
- neon_uimm1_bare, asmop> {
- let Inst{12-10} = 0b001;
- let Inst{30} = lane{0};
- }
-}
-
-// Load single 1-element structure to one lane of 1 register.
-defm LD1LN : LDN_Lane_BHSD<0b0, 0b0, "VOne", "ld1">;
-
-// Load single N-element structure to one lane of N consecutive registers
-// (N = 2,3,4)
-defm LD2LN : LDN_Lane_BHSD<0b1, 0b0, "VPair", "ld2">;
-defm LD3LN : LDN_Lane_BHSD<0b0, 0b1, "VTriple", "ld3">;
-defm LD4LN : LDN_Lane_BHSD<0b1, 0b1, "VQuad", "ld4">;
-
-multiclass LD1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
- Operand ImmOp, Operand ImmOp2, PatFrag LoadOp,
- Instruction INST> {
- def : Pat<(VTy (vector_insert (VTy VPR64:$src),
- (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp:$lane))),
- (VTy (EXTRACT_SUBREG
- (INST GPR64xsp:$Rn,
- (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
- ImmOp:$lane),
- sub_64))>;
-
- def : Pat<(VTy2 (vector_insert (VTy2 VPR128:$src),
- (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp2:$lane))),
- (VTy2 (INST GPR64xsp:$Rn, VPR128:$src, ImmOp2:$lane))>;
-}
-
-// Match all LD1LN instructions
-defm : LD1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
- extloadi8, LD1LN_B>;
-
-defm : LD1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
- extloadi16, LD1LN_H>;
-
-defm : LD1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
- load, LD1LN_S>;
-defm : LD1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
- load, LD1LN_S>;
-
-defm : LD1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
- load, LD1LN_D>;
-defm : LD1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
- load, LD1LN_D>;
-
-class NeonI_STN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
- Operand ImmOp, string asmop>
- : NeonI_LdStOne_Lane<0, r, op2_1, op0,
- (outs), (ins GPR64xsp:$Rn, VList:$Rt, ImmOp:$lane),
- asmop # "\t$Rt[$lane], [$Rn]",
- [],
- NoItinerary> {
- let mayStore = 1;
- let neverHasSideEffects = 1;
- let hasExtraDefRegAllocReq = 1;
-}
-
-multiclass STN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
- def _B : NeonI_STN_Lane<r, 0b00, op0,
- !cast<RegisterOperand>(List # "B_operand"),
- neon_uimm4_bare, asmop> {
- let Inst{12-10} = lane{2-0};
- let Inst{30} = lane{3};
- }
-
- def _H : NeonI_STN_Lane<r, 0b01, op0,
- !cast<RegisterOperand>(List # "H_operand"),
- neon_uimm3_bare, asmop> {
- let Inst{12-10} = {lane{1}, lane{0}, 0b0};
- let Inst{30} = lane{2};
- }
-
- def _S : NeonI_STN_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "S_operand"),
- neon_uimm2_bare, asmop> {
- let Inst{12-10} = {lane{0}, 0b0, 0b0};
- let Inst{30} = lane{1};
- }
-
- def _D : NeonI_STN_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "D_operand"),
- neon_uimm1_bare, asmop>{
- let Inst{12-10} = 0b001;
- let Inst{30} = lane{0};
- }
-}
-
-// Store single 1-element structure from one lane of 1 register.
-defm ST1LN : STN_Lane_BHSD<0b0, 0b0, "VOne", "st1">;
-
-// Store single N-element structure from one lane of N consecutive registers
-// (N = 2,3,4)
-defm ST2LN : STN_Lane_BHSD<0b1, 0b0, "VPair", "st2">;
-defm ST3LN : STN_Lane_BHSD<0b0, 0b1, "VTriple", "st3">;
-defm ST4LN : STN_Lane_BHSD<0b1, 0b1, "VQuad", "st4">;
-
-multiclass ST1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
- Operand ImmOp, Operand ImmOp2, PatFrag StoreOp,
- Instruction INST> {
- def : Pat<(StoreOp (DTy (vector_extract (VTy VPR64:$Rt), ImmOp:$lane)),
- GPR64xsp:$Rn),
- (INST GPR64xsp:$Rn,
- (SUBREG_TO_REG (i64 0), VPR64:$Rt, sub_64),
- ImmOp:$lane)>;
-
- def : Pat<(StoreOp (DTy (vector_extract (VTy2 VPR128:$Rt), ImmOp2:$lane)),
- GPR64xsp:$Rn),
- (INST GPR64xsp:$Rn, VPR128:$Rt, ImmOp2:$lane)>;
-}
-
-// Match all ST1LN instructions
-defm : ST1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
- truncstorei8, ST1LN_B>;
-
-defm : ST1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
- truncstorei16, ST1LN_H>;
-
-defm : ST1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
- store, ST1LN_S>;
-defm : ST1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
- store, ST1LN_S>;
-
-defm : ST1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
- store, ST1LN_D>;
-defm : ST1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
- store, ST1LN_D>;
-
-// End of vector load/store single N-element structure (class SIMD lsone).
-
-
-// The following are post-index load/store single N-element instructions
-// (class SIMD lsone-post)
-
-multiclass NeonI_LDN_WB_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
- RegisterOperand VecList, Operand ImmTy,
- string asmop> {
- let mayLoad = 1, neverHasSideEffects = 1, Constraints = "$wb = $Rn",
- DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
- def _fixed : NeonI_LdOne_Dup_Post<q, r, opcode, size,
- (outs VecList:$Rt, GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, ImmTy:$amt),
- asmop # "\t$Rt, [$Rn], $amt",
- [],
- NoItinerary> {
- let Rm = 0b11111;
- }
-
- def _register : NeonI_LdOne_Dup_Post<q, r, opcode, size,
- (outs VecList:$Rt, GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
- asmop # "\t$Rt, [$Rn], $Rm",
- [],
- NoItinerary>;
- }
-}
-
-multiclass LDWB_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop,
- Operand uimm_b, Operand uimm_h,
- Operand uimm_s, Operand uimm_d> {
- defm _8B : NeonI_LDN_WB_Dup<0, r, opcode, 0b00,
- !cast<RegisterOperand>(List # "8B_operand"),
- uimm_b, asmop>;
-
- defm _4H : NeonI_LDN_WB_Dup<0, r, opcode, 0b01,
- !cast<RegisterOperand>(List # "4H_operand"),
- uimm_h, asmop>;
-
- defm _2S : NeonI_LDN_WB_Dup<0, r, opcode, 0b10,
- !cast<RegisterOperand>(List # "2S_operand"),
- uimm_s, asmop>;
-
- defm _1D : NeonI_LDN_WB_Dup<0, r, opcode, 0b11,
- !cast<RegisterOperand>(List # "1D_operand"),
- uimm_d, asmop>;
-
- defm _16B : NeonI_LDN_WB_Dup<1, r, opcode, 0b00,
- !cast<RegisterOperand>(List # "16B_operand"),
- uimm_b, asmop>;
-
- defm _8H : NeonI_LDN_WB_Dup<1, r, opcode, 0b01,
- !cast<RegisterOperand>(List # "8H_operand"),
- uimm_h, asmop>;
-
- defm _4S : NeonI_LDN_WB_Dup<1, r, opcode, 0b10,
- !cast<RegisterOperand>(List # "4S_operand"),
- uimm_s, asmop>;
-
- defm _2D : NeonI_LDN_WB_Dup<1, r, opcode, 0b11,
- !cast<RegisterOperand>(List # "2D_operand"),
- uimm_d, asmop>;
-}
-
-// Post-index load single 1-element structure to all lanes of 1 register
-defm LD1R_WB : LDWB_Dup_BHSD<0b0, 0b110, "VOne", "ld1r", uimm_exact1,
- uimm_exact2, uimm_exact4, uimm_exact8>;
-
-// Post-index load single N-element structure to all lanes of N consecutive
-// registers (N = 2,3,4)
-defm LD2R_WB : LDWB_Dup_BHSD<0b1, 0b110, "VPair", "ld2r", uimm_exact2,
- uimm_exact4, uimm_exact8, uimm_exact16>;
-defm LD3R_WB : LDWB_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r", uimm_exact3,
- uimm_exact6, uimm_exact12, uimm_exact24>;
-defm LD4R_WB : LDWB_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r", uimm_exact4,
- uimm_exact8, uimm_exact16, uimm_exact32>;
-
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
- Constraints = "$Rn = $wb, $Rt = $src",
- DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
- class LDN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
- Operand ImmTy, Operand ImmOp, string asmop>
- : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
- (outs VList:$Rt, GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, ImmTy:$amt,
- VList:$src, ImmOp:$lane),
- asmop # "\t$Rt[$lane], [$Rn], $amt",
- [],
- NoItinerary> {
- let Rm = 0b11111;
- }
-
- class LDN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
- Operand ImmTy, Operand ImmOp, string asmop>
- : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
- (outs VList:$Rt, GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, GPR64noxzr:$Rm,
- VList:$src, ImmOp:$lane),
- asmop # "\t$Rt[$lane], [$Rn], $Rm",
- [],
- NoItinerary>;
-}
-
-multiclass LD_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
- Operand uimm_b, Operand uimm_h,
- Operand uimm_s, Operand uimm_d> {
- def _B_fixed : LDN_WBFx_Lane<r, 0b00, op0,
- !cast<RegisterOperand>(List # "B_operand"),
- uimm_b, neon_uimm4_bare, asmop> {
- let Inst{12-10} = lane{2-0};
- let Inst{30} = lane{3};
- }
-
- def _B_register : LDN_WBReg_Lane<r, 0b00, op0,
- !cast<RegisterOperand>(List # "B_operand"),
- uimm_b, neon_uimm4_bare, asmop> {
- let Inst{12-10} = lane{2-0};
- let Inst{30} = lane{3};
- }
-
- def _H_fixed : LDN_WBFx_Lane<r, 0b01, op0,
- !cast<RegisterOperand>(List # "H_operand"),
- uimm_h, neon_uimm3_bare, asmop> {
- let Inst{12-10} = {lane{1}, lane{0}, 0b0};
- let Inst{30} = lane{2};
- }
-
- def _H_register : LDN_WBReg_Lane<r, 0b01, op0,
- !cast<RegisterOperand>(List # "H_operand"),
- uimm_h, neon_uimm3_bare, asmop> {
- let Inst{12-10} = {lane{1}, lane{0}, 0b0};
- let Inst{30} = lane{2};
- }
-
- def _S_fixed : LDN_WBFx_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "S_operand"),
- uimm_s, neon_uimm2_bare, asmop> {
- let Inst{12-10} = {lane{0}, 0b0, 0b0};
- let Inst{30} = lane{1};
- }
-
- def _S_register : LDN_WBReg_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "S_operand"),
- uimm_s, neon_uimm2_bare, asmop> {
- let Inst{12-10} = {lane{0}, 0b0, 0b0};
- let Inst{30} = lane{1};
- }
-
- def _D_fixed : LDN_WBFx_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "D_operand"),
- uimm_d, neon_uimm1_bare, asmop> {
- let Inst{12-10} = 0b001;
- let Inst{30} = lane{0};
- }
-
- def _D_register : LDN_WBReg_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "D_operand"),
- uimm_d, neon_uimm1_bare, asmop> {
- let Inst{12-10} = 0b001;
- let Inst{30} = lane{0};
- }
-}
-
-// Post-index load single 1-element structure to one lane of 1 register.
-defm LD1LN_WB : LD_Lane_WB_BHSD<0b0, 0b0, "VOne", "ld1", uimm_exact1,
- uimm_exact2, uimm_exact4, uimm_exact8>;
-
-// Post-index load single N-element structure to one lane of N consecutive
-// registers
-// (N = 2,3,4)
-defm LD2LN_WB : LD_Lane_WB_BHSD<0b1, 0b0, "VPair", "ld2", uimm_exact2,
- uimm_exact4, uimm_exact8, uimm_exact16>;
-defm LD3LN_WB : LD_Lane_WB_BHSD<0b0, 0b1, "VTriple", "ld3", uimm_exact3,
- uimm_exact6, uimm_exact12, uimm_exact24>;
-defm LD4LN_WB : LD_Lane_WB_BHSD<0b1, 0b1, "VQuad", "ld4", uimm_exact4,
- uimm_exact8, uimm_exact16, uimm_exact32>;
-
-let mayStore = 1, neverHasSideEffects = 1,
- hasExtraDefRegAllocReq = 1, Constraints = "$Rn = $wb",
- DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
- class STN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
- Operand ImmTy, Operand ImmOp, string asmop>
- : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
- (outs GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, ImmTy:$amt,
- VList:$Rt, ImmOp:$lane),
- asmop # "\t$Rt[$lane], [$Rn], $amt",
- [],
- NoItinerary> {
- let Rm = 0b11111;
- }
-
- class STN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
- Operand ImmTy, Operand ImmOp, string asmop>
- : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
- (outs GPR64xsp:$wb),
- (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VList:$Rt,
- ImmOp:$lane),
- asmop # "\t$Rt[$lane], [$Rn], $Rm",
- [],
- NoItinerary>;
-}
-
-multiclass ST_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
- Operand uimm_b, Operand uimm_h,
- Operand uimm_s, Operand uimm_d> {
- def _B_fixed : STN_WBFx_Lane<r, 0b00, op0,
- !cast<RegisterOperand>(List # "B_operand"),
- uimm_b, neon_uimm4_bare, asmop> {
- let Inst{12-10} = lane{2-0};
- let Inst{30} = lane{3};
- }
-
- def _B_register : STN_WBReg_Lane<r, 0b00, op0,
- !cast<RegisterOperand>(List # "B_operand"),
- uimm_b, neon_uimm4_bare, asmop> {
- let Inst{12-10} = lane{2-0};
- let Inst{30} = lane{3};
- }
-
- def _H_fixed : STN_WBFx_Lane<r, 0b01, op0,
- !cast<RegisterOperand>(List # "H_operand"),
- uimm_h, neon_uimm3_bare, asmop> {
- let Inst{12-10} = {lane{1}, lane{0}, 0b0};
- let Inst{30} = lane{2};
- }
-
- def _H_register : STN_WBReg_Lane<r, 0b01, op0,
- !cast<RegisterOperand>(List # "H_operand"),
- uimm_h, neon_uimm3_bare, asmop> {
- let Inst{12-10} = {lane{1}, lane{0}, 0b0};
- let Inst{30} = lane{2};
- }
-
- def _S_fixed : STN_WBFx_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "S_operand"),
- uimm_s, neon_uimm2_bare, asmop> {
- let Inst{12-10} = {lane{0}, 0b0, 0b0};
- let Inst{30} = lane{1};
- }
-
- def _S_register : STN_WBReg_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "S_operand"),
- uimm_s, neon_uimm2_bare, asmop> {
- let Inst{12-10} = {lane{0}, 0b0, 0b0};
- let Inst{30} = lane{1};
- }
-
- def _D_fixed : STN_WBFx_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "D_operand"),
- uimm_d, neon_uimm1_bare, asmop> {
- let Inst{12-10} = 0b001;
- let Inst{30} = lane{0};
- }
-
- def _D_register : STN_WBReg_Lane<r, 0b10, op0,
- !cast<RegisterOperand>(List # "D_operand"),
- uimm_d, neon_uimm1_bare, asmop> {
- let Inst{12-10} = 0b001;
- let Inst{30} = lane{0};
- }
-}
-
-// Post-index store single 1-element structure from one lane of 1 register.
-defm ST1LN_WB : ST_Lane_WB_BHSD<0b0, 0b0, "VOne", "st1", uimm_exact1,
- uimm_exact2, uimm_exact4, uimm_exact8>;
-
-// Post-index store single N-element structure from one lane of N consecutive
-// registers (N = 2,3,4)
-defm ST2LN_WB : ST_Lane_WB_BHSD<0b1, 0b0, "VPair", "st2", uimm_exact2,
- uimm_exact4, uimm_exact8, uimm_exact16>;
-defm ST3LN_WB : ST_Lane_WB_BHSD<0b0, 0b1, "VTriple", "st3", uimm_exact3,
- uimm_exact6, uimm_exact12, uimm_exact24>;
-defm ST4LN_WB : ST_Lane_WB_BHSD<0b1, 0b1, "VQuad", "st4", uimm_exact4,
- uimm_exact8, uimm_exact16, uimm_exact32>;
-
-// End of post-index load/store single N-element instructions
-// (class SIMD lsone-post)
-
-// Neon Scalar instructions implementation
-// Scalar Three Same
-
-class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
- RegisterClass FPRC>
- : NeonI_Scalar3Same<u, size, opcode,
- (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- [],
- NoItinerary>;
-
-class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
- : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
-
-multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
- bit Commutable = 0> {
- let isCommutable = Commutable in {
- def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
- def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
- }
-}
-
-multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
- string asmop, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
- def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
- }
-}
-
-multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
- string asmop, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
- def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
- def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
- def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
- }
-}
-
-multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD> {
- def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
- Instruction INSTB,
- Instruction INSTH,
- Instruction INSTS,
- Instruction INSTD>
- : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
- def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
- (INSTB FPR8:$Rn, FPR8:$Rm)>;
-
- def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
- (INSTH FPR16:$Rn, FPR16:$Rm)>;
-
- def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
- (INSTS FPR32:$Rn, FPR32:$Rm)>;
-}
-
-class Neon_Scalar3Same_cmp_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
- : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-
-multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
- Instruction INSTH,
- Instruction INSTS> {
- def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
- (INSTH FPR16:$Rn, FPR16:$Rm)>;
- def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
- (INSTS FPR32:$Rn, FPR32:$Rm)>;
-}
-
-multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
- (INSTS FPR32:$Rn, FPR32:$Rm)>;
- def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-multiclass Neon_Scalar3Same_cmp_SD_size_patterns<SDPatternOperator opnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
- (INSTS FPR32:$Rn, FPR32:$Rm)>;
- def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-class Neon_Scalar3Same_cmp_V1_D_size_patterns<CondCode CC,
- Instruction INSTD>
- : Pat<(v1i64 (Neon_cmp (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm), CC)),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-
-// Scalar Three Different
-
-class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
- RegisterClass FPRCD, RegisterClass FPRCS>
- : NeonI_Scalar3Diff<u, size, opcode,
- (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- [],
- NoItinerary>;
-
-multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
- def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
- def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
-}
-
-multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
- let Constraints = "$Src = $Rd" in {
- def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
- (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- [],
- NoItinerary>;
- def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
- (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
- !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
- [],
- NoItinerary>;
- }
-}
-
-multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
- Instruction INSTH,
- Instruction INSTS> {
- def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
- (INSTH FPR16:$Rn, FPR16:$Rm)>;
- def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
- (INSTS FPR32:$Rn, FPR32:$Rm)>;
-}
-
-multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
- Instruction INSTH,
- Instruction INSTS> {
- def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
- (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
- def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
- (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
-}
-
-// Scalar Two Registers Miscellaneous
-
-class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
- RegisterClass FPRCD, RegisterClass FPRCS>
- : NeonI_Scalar2SameMisc<u, size, opcode,
- (outs FPRCD:$Rd), (ins FPRCS:$Rn),
- !strconcat(asmop, "\t$Rd, $Rn"),
- [],
- NoItinerary>;
-
-multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
- string asmop> {
- def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
- FPR32>;
- def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
- FPR64>;
-}
-
-multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
- def dd : NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
-}
-
-multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
- : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
- def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
- def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
- def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
-}
-
-class NeonI_Scalar2SameMisc_fcvtxn_D_size<bit u, bits<5> opcode, string asmop>
- : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR32, FPR64>;
-
-multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
- string asmop> {
- def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
- def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
- def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
-}
-
-class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
- string asmop, RegisterClass FPRC>
- : NeonI_Scalar2SameMisc<u, size, opcode,
- (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
- !strconcat(asmop, "\t$Rd, $Rn"),
- [],
- NoItinerary>;
-
-multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
- string asmop> {
-
- let Constraints = "$Src = $Rd" in {
- def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
- def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
- def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
- def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
- }
-}
-
-class Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
- : Pat<(v1f32 (opnode (v1f64 FPR64:$Rn))),
- (INSTD FPR64:$Rn)>;
-
-multiclass Neon_Scalar2SameMisc_fcvt_SD_size_patterns<SDPatternOperator opnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn))),
- (INSTS FPR32:$Rn)>;
- def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))),
- (INSTD FPR64:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator Sopnode,
- SDPatternOperator Dopnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn))),
- (INSTS FPR32:$Rn)>;
- def : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn))),
- (INSTD FPR64:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))),
- (INSTS FPR32:$Rn)>;
- def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
- (INSTD FPR64:$Rn)>;
-}
-
-class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
- : NeonI_Scalar2SameMisc<u, 0b11, opcode,
- (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [],
- NoItinerary>;
-
-multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
- string asmop> {
- def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
- (outs FPR32:$Rd), (ins FPR32:$Rn, fpz32:$FPImm),
- !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
- [],
- NoItinerary>;
- def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
- (outs FPR64:$Rd), (ins FPR64:$Rn, fpz32:$FPImm),
- !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
- [],
- NoItinerary>;
-}
-
-class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
- : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
- (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
- (INSTD FPR64:$Rn, 0)>;
-
-class Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<CondCode CC,
- Instruction INSTD>
- : Pat<(v1i64 (Neon_cmpz (v1i64 FPR64:$Rn),
- (i32 neon_uimm0:$Imm), CC)),
- (INSTD FPR64:$Rn, neon_uimm0:$Imm)>;
-
-multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn),
- (v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
- (INSTS FPR32:$Rn, fpz32:$FPImm)>;
- def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn),
- (v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
- (INSTD FPR64:$Rn, fpz32:$FPImm)>;
-}
-
-multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD> {
- def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
- (INSTD FPR64:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
- Instruction INSTB,
- Instruction INSTH,
- Instruction INSTS,
- Instruction INSTD>
- : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
- def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
- (INSTB FPR8:$Rn)>;
- def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
- (INSTH FPR16:$Rn)>;
- def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
- (INSTS FPR32:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
- SDPatternOperator opnode,
- Instruction INSTH,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
- (INSTH FPR16:$Rn)>;
- def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
- (INSTS FPR32:$Rn)>;
- def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
- (INSTD FPR64:$Rn)>;
-
-}
-
-multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
- SDPatternOperator opnode,
- Instruction INSTB,
- Instruction INSTH,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
- (INSTB FPR8:$Src, FPR8:$Rn)>;
- def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
- (INSTH FPR16:$Src, FPR16:$Rn)>;
- def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
- (INSTS FPR32:$Src, FPR32:$Rn)>;
- def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
- (INSTD FPR64:$Src, FPR64:$Rn)>;
-}
-
-// Scalar Shift By Immediate
-
-class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
- RegisterClass FPRC, Operand ImmTy>
- : NeonI_ScalarShiftImm<u, opcode,
- (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [], NoItinerary>;
-
-multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
- string asmop> {
- def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
- bits<6> Imm;
- let Inst{22} = 0b1; // immh:immb = 1xxxxxx
- let Inst{21-16} = Imm;
- }
-}
-
-multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
- string asmop>
- : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
- def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
- bits<3> Imm;
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- let Inst{18-16} = Imm;
- }
- def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
- bits<4> Imm;
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- let Inst{19-16} = Imm;
- }
- def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
- bits<5> Imm;
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- let Inst{20-16} = Imm;
- }
-}
-
-multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
- string asmop> {
- def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
- bits<6> Imm;
- let Inst{22} = 0b1; // immh:immb = 1xxxxxx
- let Inst{21-16} = Imm;
- }
-}
-
-multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
- string asmop>
- : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
- def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
- bits<3> Imm;
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- let Inst{18-16} = Imm;
- }
- def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
- bits<4> Imm;
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- let Inst{19-16} = Imm;
- }
- def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
- bits<5> Imm;
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- let Inst{20-16} = Imm;
- }
-}
-
-class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
- : NeonI_ScalarShiftImm<u, opcode,
- (outs FPR64:$Rd),
- (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [], NoItinerary> {
- bits<6> Imm;
- let Inst{22} = 0b1; // immh:immb = 1xxxxxx
- let Inst{21-16} = Imm;
- let Constraints = "$Src = $Rd";
-}
-
-class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
- : NeonI_ScalarShiftImm<u, opcode,
- (outs FPR64:$Rd),
- (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [], NoItinerary> {
- bits<6> Imm;
- let Inst{22} = 0b1; // immh:immb = 1xxxxxx
- let Inst{21-16} = Imm;
- let Constraints = "$Src = $Rd";
-}
-
-class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
- RegisterClass FPRCD, RegisterClass FPRCS,
- Operand ImmTy>
- : NeonI_ScalarShiftImm<u, opcode,
- (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
- !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
- [], NoItinerary>;
-
-multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
- string asmop> {
- def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
- shr_imm8> {
- bits<3> Imm;
- let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
- let Inst{18-16} = Imm;
- }
- def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
- shr_imm16> {
- bits<4> Imm;
- let Inst{22-20} = 0b001; // immh:immb = 001xxxx
- let Inst{19-16} = Imm;
- }
- def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
- shr_imm32> {
- bits<5> Imm;
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- let Inst{20-16} = Imm;
- }
-}
-
-multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
- def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
- bits<5> Imm;
- let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
- let Inst{20-16} = Imm;
- }
- def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
- bits<6> Imm;
- let Inst{22} = 0b1; // immh:immb = 1xxxxxx
- let Inst{21-16} = Imm;
- }
-}
-
-multiclass Neon_ScalarShiftRImm_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD> {
- def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
- (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_ScalarShiftLImm_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD> {
- def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shl_imm64:$Imm))),
- (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-class Neon_ScalarShiftImm_arm_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
- : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
- (v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))),
- (INSTD FPR64:$Rn, imm:$Imm)>;
-
-multiclass Neon_ScalarShiftLImm_BHSD_size_patterns<SDPatternOperator opnode,
- Instruction INSTB,
- Instruction INSTH,
- Instruction INSTS,
- Instruction INSTD>
- : Neon_ScalarShiftLImm_D_size_patterns<opnode, INSTD> {
- def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 shl_imm8:$Imm))),
- (INSTB FPR8:$Rn, imm:$Imm)>;
- def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 shl_imm16:$Imm))),
- (INSTH FPR16:$Rn, imm:$Imm)>;
- def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 shl_imm32:$Imm))),
- (INSTS FPR32:$Rn, imm:$Imm)>;
-}
-
-class Neon_ScalarShiftLImm_accum_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
- : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
- (i32 shl_imm64:$Imm))),
- (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
-
-class Neon_ScalarShiftRImm_accum_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
- : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
- (i32 shr_imm64:$Imm))),
- (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
-
-multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
- SDPatternOperator opnode,
- Instruction INSTH,
- Instruction INSTS,
- Instruction INSTD> {
- def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 shr_imm16:$Imm))),
- (INSTH FPR16:$Rn, imm:$Imm)>;
- def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
- (INSTS FPR32:$Rn, imm:$Imm)>;
- def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
- (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode,
- SDPatternOperator Dopnode,
- Instruction INSTS,
- Instruction INSTD> {
- def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
- (INSTS FPR32:$Rn, imm:$Imm)>;
- def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
- (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator Sopnode,
- SDPatternOperator Dopnode,
- Instruction INSTS,
- Instruction INSTD> {
- def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
- (INSTS FPR32:$Rn, imm:$Imm)>;
- def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
- (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-// Scalar Signed Shift Right (Immediate)
-defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
-// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftImm_arm_D_size_patterns<sra, SSHRddi>;
-
-// Scalar Unsigned Shift Right (Immediate)
-defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
-// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftImm_arm_D_size_patterns<srl, USHRddi>;
-
-// Scalar Signed Rounding Shift Right (Immediate)
-defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
-
-// Scalar Unigned Rounding Shift Right (Immediate)
-defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
-
-// Scalar Signed Shift Right and Accumulate (Immediate)
-def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
- <int_aarch64_neon_vsrads_n, SSRA>;
-
-// Scalar Unsigned Shift Right and Accumulate (Immediate)
-def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
- <int_aarch64_neon_vsradu_n, USRA>;
-
-// Scalar Signed Rounding Shift Right and Accumulate (Immediate)
-def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
- <int_aarch64_neon_vrsrads_n, SRSRA>;
-
-// Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
-def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
- <int_aarch64_neon_vrsradu_n, URSRA>;
-
-// Scalar Shift Left (Immediate)
-defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
-defm : Neon_ScalarShiftLImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
-// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftImm_arm_D_size_patterns<shl, SHLddi>;
-
-// Signed Saturating Shift Left (Immediate)
-defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
-defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
- SQSHLbbi, SQSHLhhi,
- SQSHLssi, SQSHLddi>;
-// Pattern to match llvm.arm.* intrinsic.
-defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
-
-// Unsigned Saturating Shift Left (Immediate)
-defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
-defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
- UQSHLbbi, UQSHLhhi,
- UQSHLssi, UQSHLddi>;
-// Pattern to match llvm.arm.* intrinsic.
-defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
-
-// Signed Saturating Shift Left Unsigned (Immediate)
-defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
-defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
- SQSHLUbbi, SQSHLUhhi,
- SQSHLUssi, SQSHLUddi>;
-
-// Shift Right And Insert (Immediate)
-def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
- <int_aarch64_neon_vsri, SRI>;
-
-// Shift Left And Insert (Immediate)
-def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
-def : Neon_ScalarShiftLImm_accum_D_size_patterns
- <int_aarch64_neon_vsli, SLI>;
-
-// Signed Saturating Shift Right Narrow (Immediate)
-defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
- SQSHRNbhi, SQSHRNhsi,
- SQSHRNsdi>;
-
-// Unsigned Saturating Shift Right Narrow (Immediate)
-defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
- UQSHRNbhi, UQSHRNhsi,
- UQSHRNsdi>;
-
-// Signed Saturating Rounded Shift Right Narrow (Immediate)
-defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
- SQRSHRNbhi, SQRSHRNhsi,
- SQRSHRNsdi>;
-
-// Unsigned Saturating Rounded Shift Right Narrow (Immediate)
-defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
- UQRSHRNbhi, UQRSHRNhsi,
- UQRSHRNsdi>;
-
-// Signed Saturating Shift Right Unsigned Narrow (Immediate)
-defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
- SQSHRUNbhi, SQSHRUNhsi,
- SQSHRUNsdi>;
-
-// Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
-defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
- SQRSHRUNbhi, SQRSHRUNhsi,
- SQRSHRUNsdi>;
-
-// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
-defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
-defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_s32,
- int_aarch64_neon_vcvtf64_n_s64,
- SCVTF_Nssi, SCVTF_Nddi>;
-
-// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
-defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
-defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_u32,
- int_aarch64_neon_vcvtf64_n_u64,
- UCVTF_Nssi, UCVTF_Nddi>;
-
-// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
-defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
-defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_s32_f32,
- int_aarch64_neon_vcvtd_n_s64_f64,
- FCVTZS_Nssi, FCVTZS_Nddi>;
-
-// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
-defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
-defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_u32_f32,
- int_aarch64_neon_vcvtd_n_u64_f64,
- FCVTZU_Nssi, FCVTZU_Nddi>;
-
-// Patterns For Convert Instructions Between v1f64 and v1i64
-class Neon_ScalarShiftImm_cvtf_v1f64_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
- (INST FPR64:$Rn, imm:$Imm)>;
-
-class Neon_ScalarShiftImm_fcvt_v1f64_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
- (INST FPR64:$Rn, imm:$Imm)>;
-
-def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxs2fp,
- SCVTF_Nddi>;
-
-def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxu2fp,
- UCVTF_Nddi>;
-
-def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxs,
- FCVTZS_Nddi>;
-
-def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxu,
- FCVTZU_Nddi>;
-
-// Scalar Integer Add
-let isCommutable = 1 in {
-def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
-}
-
-// Scalar Integer Sub
-def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
-
-// Pattern for Scalar Integer Add and Sub with D register only
-defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
-defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
-
-// Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
-
-// Scalar Integer Saturating Add (Signed, Unsigned)
-defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
-defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
-
-// Scalar Integer Saturating Sub (Signed, Unsigned)
-defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
-defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
-
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Saturating Add, Sub (Signed, Unsigned)
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqadds, SQADDbbb,
- SQADDhhh, SQADDsss, SQADDddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqaddu, UQADDbbb,
- UQADDhhh, UQADDsss, UQADDddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubs, SQSUBbbb,
- SQSUBhhh, SQSUBsss, SQSUBddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubu, UQSUBbbb,
- UQSUBhhh, UQSUBsss, UQSUBddd>;
-
-// Scalar Integer Saturating Doubling Multiply Half High
-defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
-
-// Scalar Integer Saturating Rounding Doubling Multiply Half High
-defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Saturating Doubling Multiply Half High and
-// Scalar Integer Saturating Rounding Doubling Multiply Half High
-defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
- SQDMULHsss>;
-defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
- SQRDMULHsss>;
-
-// Scalar Floating-point Multiply Extended
-defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
-
-// Scalar Floating-point Reciprocal Step
-defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
-
-// Scalar Floating-point Reciprocal Square Root Step
-defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Floating-point Reciprocal Step and
-// Scalar Floating-point Reciprocal Square Root Step
-defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrecps, FRECPSsss,
- FRECPSddd>;
-defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrsqrts, FRSQRTSsss,
- FRSQRTSddd>;
-
-def : Pat<(v1f64 (fsqrt (v1f64 FPR64:$Rn))), (FSQRTdd FPR64:$Rn)>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Floating-point Multiply Extended,
-multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
- (INSTS FPR32:$Rn, FPR32:$Rm)>;
- def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
- FMULXsss,FMULXddd>;
-
-// Scalar Integer Shift Left (Signed, Unsigned)
-def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
-def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
-
-// Scalar Integer Saturating Shift Left (Signed, Unsigned)
-defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
-defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Saturating Shift Letf (Signed, Unsigned)
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
- SQSHLhhh, SQSHLsss, SQSHLddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
- UQSHLhhh, UQSHLsss, UQSHLddd>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Saturating Shift Letf (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
-
-// Scalar Integer Rounding Shift Left (Signed, Unsigned)
-def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
-def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
-
-// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
-defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
-defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
- SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
- UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
-
-// Signed Saturating Doubling Multiply-Add Long
-defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
-defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
- SQDMLALshh, SQDMLALdss>;
-
-// Signed Saturating Doubling Multiply-Subtract Long
-defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
-defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
- SQDMLSLshh, SQDMLSLdss>;
-
-// Signed Saturating Doubling Multiply Long
-defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
-defm : Neon_Scalar3Diff_HS_size_patterns<int_arm_neon_vqdmull,
- SQDMULLshh, SQDMULLdss>;
-
-// Scalar Signed Integer Convert To Floating-point
-defm SCVTF : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
-defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_s32,
- int_aarch64_neon_vcvtf64_s64,
- SCVTFss, SCVTFdd>;
-
-// Scalar Unsigned Integer Convert To Floating-point
-defm UCVTF : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
-defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_u32,
- int_aarch64_neon_vcvtf64_u64,
- UCVTFss, UCVTFdd>;
-
-// Scalar Floating-point Converts
-def FCVTXN : NeonI_Scalar2SameMisc_fcvtxn_D_size<0b1, 0b10110, "fcvtxn">;
-def : Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<int_aarch64_neon_fcvtxn,
- FCVTXN>;
-
-defm FCVTNS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11010, "fcvtns">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtns,
- FCVTNSss, FCVTNSdd>;
-
-defm FCVTNU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11010, "fcvtnu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtnu,
- FCVTNUss, FCVTNUdd>;
-
-defm FCVTMS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11011, "fcvtms">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtms,
- FCVTMSss, FCVTMSdd>;
-
-defm FCVTMU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11011, "fcvtmu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtmu,
- FCVTMUss, FCVTMUdd>;
-
-defm FCVTAS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11100, "fcvtas">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtas,
- FCVTASss, FCVTASdd>;
-
-defm FCVTAU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11100, "fcvtau">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtau,
- FCVTAUss, FCVTAUdd>;
-
-defm FCVTPS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11010, "fcvtps">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtps,
- FCVTPSss, FCVTPSdd>;
-
-defm FCVTPU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11010, "fcvtpu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtpu,
- FCVTPUss, FCVTPUdd>;
-
-defm FCVTZS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11011, "fcvtzs">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzs,
- FCVTZSss, FCVTZSdd>;
-
-defm FCVTZU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11011, "fcvtzu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzu,
- FCVTZUss, FCVTZUdd>;
-
-// Patterns For Convert Instructions Between v1f64 and v1i64
-class Neon_Scalar2SameMisc_cvtf_v1f64_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-class Neon_Scalar2SameMisc_fcvt_v1f64_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<sint_to_fp, SCVTFdd>;
-def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<uint_to_fp, UCVTFdd>;
-
-def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_sint, FCVTZSdd>;
-def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_uint, FCVTZUdd>;
-
-// Scalar Floating-point Reciprocal Estimate
-defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrecpe,
- FRECPEss, FRECPEdd>;
-
-// Scalar Floating-point Reciprocal Exponent
-defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
- FRECPXss, FRECPXdd>;
-
-// Scalar Floating-point Reciprocal Square Root Estimate
-defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrsqrte,
- FRSQRTEss, FRSQRTEdd>;
-
-// Scalar Floating-point Round
-class Neon_ScalarFloatRound_pattern<SDPatternOperator opnode, Instruction INST>
- : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-def : Neon_ScalarFloatRound_pattern<fceil, FRINTPdd>;
-def : Neon_ScalarFloatRound_pattern<ffloor, FRINTMdd>;
-def : Neon_ScalarFloatRound_pattern<ftrunc, FRINTZdd>;
-def : Neon_ScalarFloatRound_pattern<frint, FRINTXdd>;
-def : Neon_ScalarFloatRound_pattern<fnearbyint, FRINTIdd>;
-def : Neon_ScalarFloatRound_pattern<frnd, FRINTAdd>;
-def : Neon_ScalarFloatRound_pattern<int_aarch64_neon_frintn, FRINTNdd>;
-
-// Scalar Integer Compare
-
-// Scalar Compare Bitwise Equal
-def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
-def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
-
-class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
- Instruction INSTD,
- CondCode CC>
- : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
- (INSTD FPR64:$Rn, FPR64:$Rm)>;
-
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
-
-// Scalar Compare Signed Greather Than Or Equal
-def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
-def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGEddd, SETGE>;
-
-// Scalar Compare Unsigned Higher Or Same
-def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
-def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHSddd, SETUGE>;
-
-// Scalar Compare Unsigned Higher
-def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
-def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHIddd, SETUGT>;
-
-// Scalar Compare Signed Greater Than
-def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
-def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGTddd, SETGT>;
-
-// Scalar Compare Bitwise Test Bits
-def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
-def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
-def : Neon_Scalar3Same_cmp_D_size_patterns<Neon_tst, CMTSTddd>;
-
-// Scalar Compare Bitwise Equal To Zero
-def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
- CMEQddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETEQ, CMEQddi>;
-
-// Scalar Compare Signed Greather Than Or Equal To Zero
-def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
- CMGEddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGE, CMGEddi>;
-
-// Scalar Compare Signed Greater Than Zero
-def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
- CMGTddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGT, CMGTddi>;
-
-// Scalar Compare Signed Less Than Or Equal To Zero
-def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
- CMLEddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLE, CMLEddi>;
-
-// Scalar Compare Less Than Zero
-def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
- CMLTddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLT, CMLTddi>;
-
-// Scalar Floating-point Compare
-
-// Scalar Floating-point Compare Mask Equal
-defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
-defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vceq,
- FCMEQsss, FCMEQddd>;
-def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
-
-// Scalar Floating-point Compare Mask Equal To Zero
-defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vceq,
- FCMEQZssi, FCMEQZddi>;
-def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpz32:$FPImm), SETEQ)),
- (FCMEQZddi FPR64:$Rn, fpz32:$FPImm)>;
-
-// Scalar Floating-point Compare Mask Greater Than Or Equal
-defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
-defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcge,
- FCMGEsss, FCMGEddd>;
-def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGE, FCMGEddd>;
-
-// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
-defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcge,
- FCMGEZssi, FCMGEZddi>;
-
-// Scalar Floating-point Compare Mask Greather Than
-defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
-defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcgt,
- FCMGTsss, FCMGTddd>;
-def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGT, FCMGTddd>;
-
-// Scalar Floating-point Compare Mask Greather Than Zero
-defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcgt,
- FCMGTZssi, FCMGTZddi>;
-
-// Scalar Floating-point Compare Mask Less Than Or Equal To Zero
-defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vclez,
- FCMLEZssi, FCMLEZddi>;
-
-// Scalar Floating-point Compare Mask Less Than Zero
-defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcltz,
- FCMLTZssi, FCMLTZddi>;
-
-// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
-defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
-defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcage,
- FACGEsss, FACGEddd>;
-
-// Scalar Floating-point Absolute Compare Mask Greater Than
-defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
-defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcagt,
- FACGTsss, FACGTddd>;
-
-// Scakar Floating-point Absolute Difference
-defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vabd,
- FABDsss, FABDddd>;
-
-// Scalar Absolute Value
-defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
-defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
-
-// Scalar Signed Saturating Absolute Value
-defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
-defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
- SQABSbb, SQABShh, SQABSss, SQABSdd>;
-
-// Scalar Negate
-defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
-defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
-
-// Scalar Signed Saturating Negate
-defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
-defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
- SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
-
-// Scalar Signed Saturating Accumulated of Unsigned Value
-defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
-defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
- SUQADDbb, SUQADDhh,
- SUQADDss, SUQADDdd>;
-
-// Scalar Unsigned Saturating Accumulated of Signed Value
-defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
-defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
- USQADDbb, USQADDhh,
- USQADDss, USQADDdd>;
-
-def : Pat<(v1i64 (int_aarch64_neon_suqadd (v1i64 FPR64:$Src),
- (v1i64 FPR64:$Rn))),
- (SUQADDdd FPR64:$Src, FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_aarch64_neon_usqadd (v1i64 FPR64:$Src),
- (v1i64 FPR64:$Rn))),
- (USQADDdd FPR64:$Src, FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_arm_neon_vabs (v1i64 FPR64:$Rn))),
- (ABSdd FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_arm_neon_vqabs (v1i64 FPR64:$Rn))),
- (SQABSdd FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_arm_neon_vqneg (v1i64 FPR64:$Rn))),
- (SQNEGdd FPR64:$Rn)>;
-
-def : Pat<(v1i64 (sub (v1i64 (bitconvert (v8i8 Neon_AllZero))),
- (v1i64 FPR64:$Rn))),
- (NEGdd FPR64:$Rn)>;
-
-// Scalar Signed Saturating Extract Unsigned Narrow
-defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
-defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
- SQXTUNbh, SQXTUNhs,
- SQXTUNsd>;
-
-// Scalar Signed Saturating Extract Narrow
-defm SQXTN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
-defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
- SQXTNbh, SQXTNhs,
- SQXTNsd>;
-
-// Scalar Unsigned Saturating Extract Narrow
-defm UQXTN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
-defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
- UQXTNbh, UQXTNhs,
- UQXTNsd>;
-
-// Scalar Reduce Pairwise
-
-multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
- string asmop, bit Commutable = 0> {
- let isCommutable = Commutable in {
- def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
- (outs FPR64:$Rd), (ins VPR128:$Rn),
- !strconcat(asmop, "\t$Rd, $Rn.2d"),
- [],
- NoItinerary>;
- }
-}
-
-multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
- string asmop, bit Commutable = 0>
- : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
- let isCommutable = Commutable in {
- def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
- (outs FPR32:$Rd), (ins VPR64:$Rn),
- !strconcat(asmop, "\t$Rd, $Rn.2s"),
- [],
- NoItinerary>;
- }
-}
-
-// Scalar Reduce Addition Pairwise (Integer) with
-// Pattern to match llvm.arm.* intrinsic
-defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
-
-// Pattern to match llvm.aarch64.* intrinsic for
-// Scalar Reduce Addition Pairwise (Integer)
-def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
- (ADDPvv_D_2D VPR128:$Rn)>;
-def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
- (ADDPvv_D_2D VPR128:$Rn)>;
-
-// Scalar Reduce Addition Pairwise (Floating Point)
-defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
-
-// Scalar Reduce Maximum Pairwise (Floating Point)
-defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
-
-// Scalar Reduce Minimum Pairwise (Floating Point)
-defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
-
-// Scalar Reduce maxNum Pairwise (Floating Point)
-defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
-
-// Scalar Reduce minNum Pairwise (Floating Point)
-defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
-
-multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
- SDPatternOperator opnodeD,
- Instruction INSTS,
- Instruction INSTD> {
- def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
- (INSTS VPR64:$Rn)>;
- def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
- (INSTD VPR128:$Rn)>;
-}
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
- int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
- int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
- int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
- int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
- int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vaddv,
- int_aarch64_neon_vaddv, FADDPvv_S_2S, FADDPvv_D_2D>;
-
-def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))),
- (FADDPvv_S_2S (v2f32
- (EXTRACT_SUBREG
- (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
- sub_64)))>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxv,
- int_aarch64_neon_vmaxv, FMAXPvv_S_2S, FMAXPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminv,
- int_aarch64_neon_vminv, FMINPvv_S_2S, FMINPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vmaxnmv,
- int_aarch64_neon_vmaxnmv, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vminnmv,
- int_aarch64_neon_vminnmv, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
-
-// Scalar by element Arithmetic
-
-class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
- string rmlane, bit u, bit szhi, bit szlo,
- RegisterClass ResFPR, RegisterClass OpFPR,
- RegisterOperand OpVPR, Operand OpImm>
- : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
- (outs ResFPR:$Rd),
- (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
- asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
- [],
- NoItinerary> {
- bits<3> Imm;
- bits<5> MRm;
-}
-
-class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
- string rmlane,
- bit u, bit szhi, bit szlo,
- RegisterClass ResFPR,
- RegisterClass OpFPR,
- RegisterOperand OpVPR,
- Operand OpImm>
- : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
- (outs ResFPR:$Rd),
- (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
- asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
- [],
- NoItinerary> {
- let Constraints = "$src = $Rd";
- bits<3> Imm;
- bits<5> MRm;
-}
-
-// Scalar Floating Point multiply (scalar, by element)
-def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
- 0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
- 0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
- let Inst{11} = Imm{0}; // h
- let Inst{21} = 0b0; // l
- let Inst{20-16} = MRm;
-}
-
-// Scalar Floating Point multiply extended (scalar, by element)
-def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
- 0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
- 0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
- let Inst{11} = Imm{0}; // h
- let Inst{21} = 0b0; // l
- let Inst{20-16} = MRm;
-}
-
-multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
- SDPatternOperator opnode,
- Instruction INST,
- ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
- ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
-
- def : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
- (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
- (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
- def : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
- (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
- (ResTy (INST (ResTy FPRC:$Rn),
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
- OpNImm:$Imm))>;
-
- // swapped operands
- def : Pat<(ResTy (opnode
- (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
- (ResTy FPRC:$Rn))),
- (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
- def : Pat<(ResTy (opnode
- (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
- (ResTy FPRC:$Rn))),
- (ResTy (INST (ResTy FPRC:$Rn),
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
- OpNImm:$Imm))>;
-}
-
-// Patterns for Scalar Floating Point multiply (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
- f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
- f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-
-// Patterns for Scalar Floating Point multiply extended (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
- FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
- v2f32, v4f32, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
- FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
- v1f64, v2f64, neon_uimm0_bare>;
-
-
-// Scalar Floating Point fused multiply-add (scalar, by element)
-def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
- 0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
- 0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
- let Inst{11} = Imm{0}; // h
- let Inst{21} = 0b0; // l
- let Inst{20-16} = MRm;
-}
-
-// Scalar Floating Point fused multiply-subtract (scalar, by element)
-def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
- 0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
- 0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
- let Inst{11} = Imm{0}; // h
- let Inst{21} = 0b0; // l
- let Inst{20-16} = MRm;
-}
-// We are allowed to match the fma instruction regardless of compile options.
-multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
- Instruction FMLAI, Instruction FMLSI,
- ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
- ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
- // fmla
- def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
- (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLAI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
- def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
- (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLAI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn),
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
- OpNImm:$Imm))>;
-
- // swapped fmla operands
- def : Pat<(ResTy (fma
- (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
- (ResTy FPRC:$Rn),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLAI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
- def : Pat<(ResTy (fma
- (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
- (ResTy FPRC:$Rn),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLAI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn),
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
- OpNImm:$Imm))>;
-
- // fmls
- def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
- (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLSI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
- def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
- (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLSI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn),
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
- OpNImm:$Imm))>;
-
- // swapped fmls operands
- def : Pat<(ResTy (fma
- (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
- (ResTy FPRC:$Rn),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLSI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
- def : Pat<(ResTy (fma
- (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
- (ResTy FPRC:$Rn),
- (ResTy FPRC:$Ra))),
- (ResTy (FMLSI (ResTy FPRC:$Ra),
- (ResTy FPRC:$Rn),
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
- OpNImm:$Imm))>;
-}
-
-// Scalar Floating Point fused multiply-add and
-// multiply-subtract (scalar, by element)
-defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
- f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
- f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
- f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-
-// Scalar Signed saturating doubling multiply long (scalar, by element)
-def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
- 0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
- 0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
- let Inst{11} = Imm{2}; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
- 0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
- 0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-
-multiclass Neon_ScalarXIndexedElem_MUL_Patterns<
- SDPatternOperator opnode,
- Instruction INST,
- ValueType ResTy, RegisterClass FPRC,
- ValueType OpVTy, ValueType OpTy,
- ValueType VecOpTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
-
- def : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
- (OpVTy (scalar_to_vector
- (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))))),
- (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
-
- //swapped operands
- def : Pat<(ResTy (opnode
- (OpVTy (scalar_to_vector
- (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
- (OpVTy FPRC:$Rn))),
- (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
-}
-
-
-// Patterns for Scalar Signed saturating doubling
-// multiply long (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
- SQDMULLshv_4H, v1i32, FPR16, v1i16, i16, v4i16,
- i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
- SQDMULLshv_8H, v1i32, FPR16, v1i16, i16, v8i16,
- i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
- SQDMULLdsv_2S, v1i64, FPR32, v1i32, i32, v2i32,
- i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
- SQDMULLdsv_4S, v1i64, FPR32, v1i32, i32, v4i32,
- i32, VPR128Lo, neon_uimm2_bare>;
-
-// Scalar Signed saturating doubling multiply-add long (scalar, by element)
-def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
- 0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
- 0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
- let Inst{11} = Imm{2}; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
- 0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
- 0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-
-// Scalar Signed saturating doubling
-// multiply-subtract long (scalar, by element)
-def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
- 0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
- 0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
- let Inst{11} = Imm{2}; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
- 0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
- 0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-
-multiclass Neon_ScalarXIndexedElem_MLAL_Patterns<
- SDPatternOperator opnode,
- SDPatternOperator coreopnode,
- Instruction INST,
- ValueType ResTy, RegisterClass ResFPRC, RegisterClass FPRC,
- ValueType OpTy,
- ValueType OpVTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
-
- def : Pat<(ResTy (opnode
- (ResTy ResFPRC:$Ra),
- (ResTy (coreopnode (OpTy FPRC:$Rn),
- (OpTy (scalar_to_vector
- (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))))))),
- (ResTy (INST (ResTy ResFPRC:$Ra),
- (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
-
- // swapped operands
- def : Pat<(ResTy (opnode
- (ResTy ResFPRC:$Ra),
- (ResTy (coreopnode
- (OpTy (scalar_to_vector
- (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))),
- (OpTy FPRC:$Rn))))),
- (ResTy (INST (ResTy ResFPRC:$Ra),
- (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
-}
-
-// Patterns for Scalar Signed saturating
-// doubling multiply-add long (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
- int_arm_neon_vqdmull, SQDMLALshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
- i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
- int_arm_neon_vqdmull, SQDMLALshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
- i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
- int_arm_neon_vqdmull, SQDMLALdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
- i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
- int_arm_neon_vqdmull, SQDMLALdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
- i32, VPR128Lo, neon_uimm2_bare>;
-
-// Patterns for Scalar Signed saturating
-// doubling multiply-sub long (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
- int_arm_neon_vqdmull, SQDMLSLshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
- i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
- int_arm_neon_vqdmull, SQDMLSLshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
- i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
- int_arm_neon_vqdmull, SQDMLSLdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
- i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
- int_arm_neon_vqdmull, SQDMLSLdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
- i32, VPR128Lo, neon_uimm2_bare>;
-
-// Scalar general arithmetic operation
-class Neon_Scalar_GeneralMath2D_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-class Neon_Scalar_GeneralMath3D_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
- (INST FPR64:$Rn, FPR64:$Rm)>;
-
-class Neon_Scalar_GeneralMath4D_pattern<SDPatternOperator opnode,
- Instruction INST>
- : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm),
- (v1f64 FPR64:$Ra))),
- (INST FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-
-def : Neon_Scalar_GeneralMath3D_pattern<fadd, FADDddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<fmul, FMULddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<fsub, FSUBddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<fdiv, FDIVddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vabds, FABDddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmaxs, FMAXddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmins, FMINddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vmaxnm, FMAXNMddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vminnm, FMINNMddd>;
-
-def : Neon_Scalar_GeneralMath2D_pattern<fabs, FABSdd>;
-def : Neon_Scalar_GeneralMath2D_pattern<fneg, FNEGdd>;
-
-def : Neon_Scalar_GeneralMath4D_pattern<fma, FMADDdddd>;
-def : Neon_Scalar_GeneralMath4D_pattern<fmsub, FMSUBdddd>;
-
-// Scalar Signed saturating doubling multiply returning
-// high half (scalar, by element)
-def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
- 0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
- 0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
- let Inst{11} = Imm{2}; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
- 0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
- 0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-
-// Patterns for Scalar Signed saturating doubling multiply returning
-// high half (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
- SQDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16,
- i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
- SQDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16,
- i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
- SQDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32,
- i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
- SQDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32,
- i32, VPR128Lo, neon_uimm2_bare>;
-
-// Scalar Signed saturating rounding doubling multiply
-// returning high half (scalar, by element)
-def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
- 0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
- 0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
- let Inst{11} = Imm{2}; // h
- let Inst{21} = Imm{1}; // l
- let Inst{20} = Imm{0}; // m
- let Inst{19-16} = MRm{3-0};
-}
-def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
- 0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
- let Inst{11} = 0b0; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
- 0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
- let Inst{11} = Imm{1}; // h
- let Inst{21} = Imm{0}; // l
- let Inst{20-16} = MRm;
-}
-
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
- SQRDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16, i32,
- VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
- SQRDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16, i32,
- VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
- SQRDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32, i32,
- VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
- SQRDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32, i32,
- VPR128Lo, neon_uimm2_bare>;
-
-// Scalar Copy - DUP element to scalar
-class NeonI_Scalar_DUP<string asmop, string asmlane,
- RegisterClass ResRC, RegisterOperand VPRC,
- Operand OpImm>
- : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
- asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
- [],
- NoItinerary> {
- bits<4> Imm;
-}
-
-def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
- let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
- let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
- ValueType OpTy, Operand OpImm,
- ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
- def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
- (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
-
- def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
- (ResTy (DUPI
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- OpNImm:$Imm))>;
-}
-
-// Patterns for vector extract of FP data using scalar DUP instructions
-defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32,
- v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
-defm : NeonI_Scalar_DUP_Elt_pattern<DUPdv_D, f64,
- v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-
-multiclass NeonI_Scalar_DUP_Ext_Vec_pattern<Instruction DUPI,
- ValueType ResTy, ValueType OpTy,Operand OpLImm,
- ValueType NOpTy, ValueType ExTy, Operand OpNImm> {
-
- def : Pat<(ResTy (extract_subvector (OpTy VPR128:$Rn), OpLImm:$Imm)),
- (ResTy (DUPI VPR128:$Rn, OpLImm:$Imm))>;
-
- def : Pat<(ResTy (extract_subvector (NOpTy VPR64:$Rn), OpNImm:$Imm)),
- (ResTy (DUPI
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- OpNImm:$Imm))>;
-}
-
-// Patterns for extract subvectors of v1ix data using scalar DUP instructions.
-defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPbv_B, v1i8, v16i8, neon_uimm4_bare,
- v8i8, v16i8, neon_uimm3_bare>;
-defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPhv_H, v1i16, v8i16, neon_uimm3_bare,
- v4i16, v8i16, neon_uimm2_bare>;
-defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPsv_S, v1i32, v4i32, neon_uimm2_bare,
- v2i32, v4i32, neon_uimm1_bare>;
-
-multiclass NeonI_Scalar_DUP_Copy_pattern1<Instruction DUPI, ValueType ResTy,
- ValueType OpTy, ValueType ElemTy,
- Operand OpImm, ValueType OpNTy,
- ValueType ExTy, Operand OpNImm> {
-
- def : Pat<(ResTy (vector_insert (ResTy undef),
- (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
- (neon_uimm0_bare:$Imm))),
- (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
-
- def : Pat<(ResTy (vector_insert (ResTy undef),
- (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
- (OpNImm:$Imm))),
- (ResTy (DUPI
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- OpNImm:$Imm))>;
-}
-
-multiclass NeonI_Scalar_DUP_Copy_pattern2<Instruction DUPI, ValueType ResTy,
- ValueType OpTy, ValueType ElemTy,
- Operand OpImm, ValueType OpNTy,
- ValueType ExTy, Operand OpNImm> {
-
- def : Pat<(ResTy (scalar_to_vector
- (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)))),
- (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
-
- def : Pat<(ResTy (scalar_to_vector
- (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)))),
- (ResTy (DUPI
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- OpNImm:$Imm))>;
-}
-
-// Patterns for vector copy to v1ix and v1fx vectors using scalar DUP
-// instructions.
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
- v1i64, v2i64, i64, neon_uimm1_bare,
- v1i64, v2i64, neon_uimm0_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
- v1i32, v4i32, i32, neon_uimm2_bare,
- v2i32, v4i32, neon_uimm1_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPhv_H,
- v1i16, v8i16, i32, neon_uimm3_bare,
- v4i16, v8i16, neon_uimm2_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPbv_B,
- v1i8, v16i8, i32, neon_uimm4_bare,
- v8i8, v16i8, neon_uimm3_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
- v1f64, v2f64, f64, neon_uimm1_bare,
- v1f64, v2f64, neon_uimm0_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
- v1f32, v4f32, f32, neon_uimm2_bare,
- v2f32, v4f32, neon_uimm1_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
- v1i64, v2i64, i64, neon_uimm1_bare,
- v1i64, v2i64, neon_uimm0_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
- v1i32, v4i32, i32, neon_uimm2_bare,
- v2i32, v4i32, neon_uimm1_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPhv_H,
- v1i16, v8i16, i32, neon_uimm3_bare,
- v4i16, v8i16, neon_uimm2_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPbv_B,
- v1i8, v16i8, i32, neon_uimm4_bare,
- v8i8, v16i8, neon_uimm3_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
- v1f64, v2f64, f64, neon_uimm1_bare,
- v1f64, v2f64, neon_uimm0_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
- v1f32, v4f32, f32, neon_uimm2_bare,
- v2f32, v4f32, neon_uimm1_bare>;
-
-multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
- Instruction DUPI, Operand OpImm,
- RegisterClass ResRC> {
- def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn" # asmlane # "[$Imm]"),
- (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
-}
-
-// Aliases for Scalar copy - DUP element (scalar)
-// FIXME: This is actually the preferred syntax but TableGen can't deal with
-// custom printing of aliases.
-defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
-defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
-defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
-defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
-
-multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh, ValueType ResTy,
- ValueType OpTy> {
- def : Pat<(ResTy (GetLow VPR128:$Rn)),
- (ResTy (EXTRACT_SUBREG (OpTy VPR128:$Rn), sub_64))>;
- def : Pat<(ResTy (GetHigh VPR128:$Rn)),
- (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
-}
-
-defm : NeonI_SDUP<Neon_Low16B, Neon_High16B, v8i8, v16i8>;
-defm : NeonI_SDUP<Neon_Low8H, Neon_High8H, v4i16, v8i16>;
-defm : NeonI_SDUP<Neon_Low4S, Neon_High4S, v2i32, v4i32>;
-defm : NeonI_SDUP<Neon_Low2D, Neon_High2D, v1i64, v2i64>;
-defm : NeonI_SDUP<Neon_Low4float, Neon_High4float, v2f32, v4f32>;
-defm : NeonI_SDUP<Neon_Low2double, Neon_High2double, v1f64, v2f64>;
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
-
-// 64-bit vector bitcasts...
-
-def : Pat<(v1i64 (bitconvert (v8i8 VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8 VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v8i8 VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v8i8 VPR64:$src))), (v4i16 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v4i16 VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v4i16 VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16 VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v4i16 VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v2i32 VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v2i32 VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2i32 VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v2i32 VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v2f32 VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v2f32 VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32 VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v2f32 VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v2f32 (bitconvert (v1i64 VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
-
-// ..and 128-bit vector bitcasts...
-
-def : Pat<(v2f64 (bitconvert (v16i8 VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v16i8 VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v16i8 VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v16i8 VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v16i8 VPR128:$src))), (v8i16 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v8i16 VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v8i16 VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v8i16 VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v8i16 VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v8i16 VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v4i32 VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v4i32 VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v4i32 VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v4i32 VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v4i32 VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v4f32 VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v4f32 VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v4f32 VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v4f32 VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v4f32 VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v2i64 VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v2i64 VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v2i64 VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v2i64 VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v2i64 VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2i64 (bitconvert (v2f64 VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v2f64 VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v2f64 VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v2f64 VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>;
-
-// ...and scalar bitcasts...
-def : Pat<(f16 (bitconvert (v1i16 FPR16:$src))), (f16 FPR16:$src)>;
-def : Pat<(f32 (bitconvert (v1i32 FPR32:$src))), (f32 FPR32:$src)>;
-def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
-def : Pat<(f32 (bitconvert (v1f32 FPR32:$src))), (f32 FPR32:$src)>;
-def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
-
-def : Pat<(i64 (bitconvert (v1i64 FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v1f64 FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v2i32 FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v2f32 FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v4i16 FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v8i8 FPR64:$src))), (FMOVxd $src)>;
-
-def : Pat<(i32 (bitconvert (v1i32 FPR32:$src))), (FMOVws $src)>;
-
-def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
-
-def : Pat<(f64 (bitconvert (v8i8 VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64 (bitconvert (v4i16 VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64 (bitconvert (v2i32 VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64 (bitconvert (v2f32 VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64 (bitconvert (v1i64 VPR64:$src))), (f64 VPR64:$src)>;
-
-def : Pat<(f128 (bitconvert (v16i8 VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128 (bitconvert (v8i16 VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128 (bitconvert (v4i32 VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128 (bitconvert (v2i64 VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128 (bitconvert (v4f32 VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))), (f128 VPR128:$src)>;
-
-def : Pat<(v1i16 (bitconvert (f16 FPR16:$src))), (v1i16 FPR16:$src)>;
-def : Pat<(v1i32 (bitconvert (f32 FPR32:$src))), (v1i32 FPR32:$src)>;
-def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
-def : Pat<(v1f32 (bitconvert (f32 FPR32:$src))), (v1f32 FPR32:$src)>;
-def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v1f64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v2i32 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v2f32 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v4i16 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v8i8 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
-
-def : Pat<(v1i32 (bitconvert (i32 GPR32:$src))), (FMOVsw $src)>;
-
-def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
-def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
-
-def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
-def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
-
-// Scalar Three Same
-
-def neon_uimm3 : Operand<i64>,
- ImmLeaf<i64, [{return Imm < 8;}]> {
- let ParserMatchClass = uimm3_asmoperand;
- let PrintMethod = "printUImmHexOperand";
-}
-
-def neon_uimm4 : Operand<i64>,
- ImmLeaf<i64, [{return Imm < 16;}]> {
- let ParserMatchClass = uimm4_asmoperand;
- let PrintMethod = "printUImmHexOperand";
-}
-
-// Bitwise Extract
-class NeonI_Extract<bit q, bits<2> op2, string asmop,
- string OpS, RegisterOperand OpVPR, Operand OpImm>
- : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
- (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
- asmop # "\t$Rd." # OpS # ", $Rn." # OpS #
- ", $Rm." # OpS # ", $Index",
- [],
- NoItinerary>{
- bits<4> Index;
-}
-
-def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
- VPR64, neon_uimm3> {
- let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
-}
-
-def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
- VPR128, neon_uimm4> {
- let Inst{14-11} = Index;
-}
-
-class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
- Operand OpImm>
- : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
- (i64 OpImm:$Imm))),
- (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
-
-def : NI_Extract<v8i8, VPR64, EXTvvvi_8b, neon_uimm3>;
-def : NI_Extract<v4i16, VPR64, EXTvvvi_8b, neon_uimm3>;
-def : NI_Extract<v2i32, VPR64, EXTvvvi_8b, neon_uimm3>;
-def : NI_Extract<v1i64, VPR64, EXTvvvi_8b, neon_uimm3>;
-def : NI_Extract<v2f32, VPR64, EXTvvvi_8b, neon_uimm3>;
-def : NI_Extract<v1f64, VPR64, EXTvvvi_8b, neon_uimm3>;
-def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
-
-// Table lookup
-class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
- string asmop, string OpS, RegisterOperand OpVPR,
- RegisterOperand VecList>
- : NeonI_TBL<q, op2, len, op,
- (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
- [],
- NoItinerary>;
-
-// The vectors in look up table are always 16b
-multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
- def _8b : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
- !cast<RegisterOperand>(List # "16B_operand")>;
-
- def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
- !cast<RegisterOperand>(List # "16B_operand")>;
-}
-
-defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
-defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
-defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
-defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
-
-// Table lookup extention
-class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
- string asmop, string OpS, RegisterOperand OpVPR,
- RegisterOperand VecList>
- : NeonI_TBL<q, op2, len, op,
- (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
- asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
- [],
- NoItinerary> {
- let Constraints = "$src = $Rd";
-}
-
-// The vectors in look up table are always 16b
-multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
- def _8b : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
- !cast<RegisterOperand>(List # "16B_operand")>;
-
- def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
- !cast<RegisterOperand>(List # "16B_operand")>;
-}
-
-defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
-defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
-defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
-defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
-
-class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
- RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
- : NeonI_copy<0b1, 0b0, 0b0011,
- (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
- asmop # "\t$Rd." # Res # "[$Imm], $Rn",
- [(set (ResTy VPR128:$Rd),
- (ResTy (vector_insert
- (ResTy VPR128:$src),
- (OpTy OpGPR:$Rn),
- (OpImm:$Imm))))],
- NoItinerary> {
- bits<4> Imm;
- let Constraints = "$src = $Rd";
-}
-
-//Insert element (vector, from main)
-def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
- neon_uimm4_bare> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
- neon_uimm3_bare> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
- neon_uimm2_bare> {
- let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
- neon_uimm1_bare> {
- let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
- (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
- (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
- (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
- (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
-
-class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
- RegisterClass OpGPR, ValueType OpTy,
- Operand OpImm, Instruction INS>
- : Pat<(ResTy (vector_insert
- (ResTy VPR64:$src),
- (OpTy OpGPR:$Rn),
- (OpImm:$Imm))),
- (ResTy (EXTRACT_SUBREG
- (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
- OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
-
-def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
- neon_uimm3_bare, INSbw>;
-def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
- neon_uimm2_bare, INShw>;
-def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
- neon_uimm1_bare, INSsw>;
-def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
- neon_uimm0_bare, INSdx>;
-
-class NeonI_INS_element<string asmop, string Res, Operand ResImm>
- : NeonI_insert<0b1, 0b1,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
- ResImm:$Immd, ResImm:$Immn),
- asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
- [],
- NoItinerary> {
- let Constraints = "$src = $Rd";
- bits<4> Immd;
- bits<4> Immn;
-}
-
-//Insert element (vector, from element)
-def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
- let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
- let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
-}
-def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
- let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
- let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
- // bit 11 is unspecified, but should be set to zero.
-}
-def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
- let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
- let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
- // bits 11-12 are unspecified, but should be set to zero.
-}
-def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
- let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
- let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
- // bits 11-13 are unspecified, but should be set to zero.
-}
-
-def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
- (INSELb VPR128:$Rd, VPR128:$Rn,
- neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
-def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
- (INSELh VPR128:$Rd, VPR128:$Rn,
- neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
-def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
- (INSELs VPR128:$Rd, VPR128:$Rn,
- neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
-def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
- (INSELd VPR128:$Rd, VPR128:$Rn,
- neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
-
-multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
- ValueType MidTy, Operand StImm, Operand NaImm,
- Instruction INS> {
-def : Pat<(ResTy (vector_insert
- (ResTy VPR128:$src),
- (MidTy (vector_extract
- (ResTy VPR128:$Rn),
- (StImm:$Immn))),
- (StImm:$Immd))),
- (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
- StImm:$Immd, StImm:$Immn)>;
-
-def : Pat <(ResTy (vector_insert
- (ResTy VPR128:$src),
- (MidTy (vector_extract
- (NaTy VPR64:$Rn),
- (NaImm:$Immn))),
- (StImm:$Immd))),
- (INS (ResTy VPR128:$src),
- (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
- StImm:$Immd, NaImm:$Immn)>;
-
-def : Pat <(NaTy (vector_insert
- (NaTy VPR64:$src),
- (MidTy (vector_extract
- (ResTy VPR128:$Rn),
- (StImm:$Immn))),
- (NaImm:$Immd))),
- (NaTy (EXTRACT_SUBREG
- (ResTy (INS
- (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
- (ResTy VPR128:$Rn),
- NaImm:$Immd, StImm:$Immn)),
- sub_64))>;
-
-def : Pat <(NaTy (vector_insert
- (NaTy VPR64:$src),
- (MidTy (vector_extract
- (NaTy VPR64:$Rn),
- (NaImm:$Immn))),
- (NaImm:$Immd))),
- (NaTy (EXTRACT_SUBREG
- (ResTy (INS
- (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
- (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
- NaImm:$Immd, NaImm:$Immn)),
- sub_64))>;
-}
-
-defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
- neon_uimm1_bare, INSELs>;
-defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
- neon_uimm0_bare, INSELd>;
-defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
- neon_uimm3_bare, INSELb>;
-defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
- neon_uimm2_bare, INSELh>;
-defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
- neon_uimm1_bare, INSELs>;
-defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
- neon_uimm0_bare, INSELd>;
-
-multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
- ValueType MidTy,
- RegisterClass OpFPR, Operand ResImm,
- SubRegIndex SubIndex, Instruction INS> {
-def : Pat <(ResTy (vector_insert
- (ResTy VPR128:$src),
- (MidTy OpFPR:$Rn),
- (ResImm:$Imm))),
- (INS (ResTy VPR128:$src),
- (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
- ResImm:$Imm,
- (i64 0))>;
-
-def : Pat <(NaTy (vector_insert
- (NaTy VPR64:$src),
- (MidTy OpFPR:$Rn),
- (ResImm:$Imm))),
- (NaTy (EXTRACT_SUBREG
- (ResTy (INS
- (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
- (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
- ResImm:$Imm,
- (i64 0))),
- sub_64))>;
-}
-
-defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
- sub_32, INSELs>;
-defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
- sub_64, INSELd>;
-
-class NeonI_SMOV<string asmop, string Res, bit Q,
- ValueType OpTy, ValueType eleTy,
- Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
- : NeonI_copy<Q, 0b0, 0b0101,
- (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
- asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
- [(set (ResTy ResGPR:$Rd),
- (ResTy (sext_inreg
- (ResTy (vector_extract
- (OpTy VPR128:$Rn), (OpImm:$Imm))),
- eleTy)))],
- NoItinerary> {
- bits<4> Imm;
-}
-
-//Signed integer move (main, from element)
-def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
- GPR32, i32> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
- GPR32, i32> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
- GPR64, i64> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
- GPR64, i64> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
- GPR64, i64> {
- let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-
-multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
- ValueType eleTy, Operand StImm, Operand NaImm,
- Instruction SMOVI> {
- def : Pat<(i64 (sext_inreg
- (i64 (anyext
- (i32 (vector_extract
- (StTy VPR128:$Rn), (StImm:$Imm))))),
- eleTy)),
- (SMOVI VPR128:$Rn, StImm:$Imm)>;
-
- def : Pat<(i64 (sext
- (i32 (vector_extract
- (StTy VPR128:$Rn), (StImm:$Imm))))),
- (SMOVI VPR128:$Rn, StImm:$Imm)>;
-
- def : Pat<(i64 (sext_inreg
- (i64 (vector_extract
- (NaTy VPR64:$Rn), (NaImm:$Imm))),
- eleTy)),
- (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- NaImm:$Imm)>;
-
- def : Pat<(i64 (sext_inreg
- (i64 (anyext
- (i32 (vector_extract
- (NaTy VPR64:$Rn), (NaImm:$Imm))))),
- eleTy)),
- (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- NaImm:$Imm)>;
-
- def : Pat<(i64 (sext
- (i32 (vector_extract
- (NaTy VPR64:$Rn), (NaImm:$Imm))))),
- (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- NaImm:$Imm)>;
-}
-
-defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
- neon_uimm3_bare, SMOVxb>;
-defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
- neon_uimm2_bare, SMOVxh>;
-defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
- neon_uimm1_bare, SMOVxs>;
-
-class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
- ValueType eleTy, Operand StImm, Operand NaImm,
- Instruction SMOVI>
- : Pat<(i32 (sext_inreg
- (i32 (vector_extract
- (NaTy VPR64:$Rn), (NaImm:$Imm))),
- eleTy)),
- (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- NaImm:$Imm)>;
-
-def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
- neon_uimm3_bare, SMOVwb>;
-def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
- neon_uimm2_bare, SMOVwh>;
-
-class NeonI_UMOV<string asmop, string Res, bit Q,
- ValueType OpTy, Operand OpImm,
- RegisterClass ResGPR, ValueType ResTy>
- : NeonI_copy<Q, 0b0, 0b0111,
- (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
- asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
- [(set (ResTy ResGPR:$Rd),
- (ResTy (vector_extract
- (OpTy VPR128:$Rn), (OpImm:$Imm))))],
- NoItinerary> {
- bits<4> Imm;
-}
-
-//Unsigned integer move (main, from element)
-def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
- GPR32, i32> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
- GPR32, i32> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
- GPR32, i32> {
- let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
- GPR64, i64> {
- let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
- (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
- (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
-
-class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
- Operand StImm, Operand NaImm,
- Instruction SMOVI>
- : Pat<(ResTy (vector_extract
- (NaTy VPR64:$Rn), NaImm:$Imm)),
- (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- NaImm:$Imm)>;
-
-def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
- neon_uimm3_bare, UMOVwb>;
-def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
- neon_uimm2_bare, UMOVwh>;
-def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
- neon_uimm1_bare, UMOVws>;
-
-def : Pat<(i32 (and
- (i32 (vector_extract
- (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
- 255)),
- (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
-
-def : Pat<(i32 (and
- (i32 (vector_extract
- (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
- 65535)),
- (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
-
-def : Pat<(i64 (zext
- (i32 (vector_extract
- (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
- (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
-
-def : Pat<(i32 (and
- (i32 (vector_extract
- (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
- 255)),
- (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
- neon_uimm3_bare:$Imm)>;
-
-def : Pat<(i32 (and
- (i32 (vector_extract
- (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
- 65535)),
- (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
- neon_uimm2_bare:$Imm)>;
-
-def : Pat<(i64 (zext
- (i32 (vector_extract
- (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
- (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
- neon_uimm0_bare:$Imm)>;
-
-// Additional copy patterns for scalar types
-def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
- (UMOVwb (v16i8
- (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
-
-def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
- (UMOVwh (v8i16
- (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
-
-def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
- (FMOVws FPR32:$Rn)>;
-
-def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
- (FMOVxd FPR64:$Rn)>;
-
-def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
- (f64 FPR64:$Rn)>;
-
-def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
- (f32 FPR32:$Rn)>;
-
-def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
- (v1i8 (EXTRACT_SUBREG (v16i8
- (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
- sub_8))>;
-
-def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
- (v1i16 (EXTRACT_SUBREG (v8i16
- (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
- sub_16))>;
-
-def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
- (FMOVsw $src)>;
-
-def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
- (FMOVdx $src)>;
-
-def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$Rn))),
- (v1f32 FPR32:$Rn)>;
-def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
- (v1f64 FPR64:$Rn)>;
-
-def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
- (FMOVdd $src)>;
-
-def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
- (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
- (f64 FPR64:$src), sub_64)>;
-
-class NeonI_DUP_Elt<bit Q, string asmop, string rdlane, string rnlane,
- RegisterOperand ResVPR, Operand OpImm>
- : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
- (ins VPR128:$Rn, OpImm:$Imm),
- asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
- [],
- NoItinerary> {
- bits<4> Imm;
-}
-
-def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
- neon_uimm4_bare> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-
-def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
- neon_uimm3_bare> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-
-def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
- neon_uimm2_bare> {
- let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-
-def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
- neon_uimm1_bare> {
- let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
- neon_uimm4_bare> {
- let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-
-def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
- neon_uimm3_bare> {
- let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-
-def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
- neon_uimm2_bare> {
- let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-
-multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
- ValueType OpTy,ValueType NaTy,
- ValueType ExTy, Operand OpLImm,
- Operand OpNImm> {
-def : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
- (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
-
-def : Pat<(ResTy (Neon_vduplane
- (NaTy VPR64:$Rn), OpNImm:$Imm)),
- (ResTy (DUPELT
- (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
-}
-defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
- neon_uimm4_bare, neon_uimm3_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
- neon_uimm4_bare, neon_uimm3_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
- neon_uimm3_bare, neon_uimm2_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
- neon_uimm3_bare, neon_uimm2_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
- neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
- neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
- neon_uimm1_bare, neon_uimm0_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
- neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
- neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
- neon_uimm1_bare, neon_uimm0_bare>;
-
-def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
- (v2f32 (DUPELT2s
- (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
- (i64 0)))>;
-def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
- (v4f32 (DUPELT4s
- (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
- (i64 0)))>;
-def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
- (v2f64 (DUPELT2d
- (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
- (i64 0)))>;
-
-class NeonI_DUP<bit Q, string asmop, string rdlane,
- RegisterOperand ResVPR, ValueType ResTy,
- RegisterClass OpGPR, ValueType OpTy>
- : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
- asmop # "\t$Rd" # rdlane # ", $Rn",
- [(set (ResTy ResVPR:$Rd),
- (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
- NoItinerary>;
-
-def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
- let Inst{20-16} = 0b00001;
- // bits 17-20 are unspecified, but should be set to zero.
-}
-
-def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
- let Inst{20-16} = 0b00010;
- // bits 18-20 are unspecified, but should be set to zero.
-}
-
-def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
- let Inst{20-16} = 0b00100;
- // bits 19-20 are unspecified, but should be set to zero.
-}
-
-def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
- let Inst{20-16} = 0b01000;
- // bit 20 is unspecified, but should be set to zero.
-}
-
-def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
- let Inst{20-16} = 0b00001;
- // bits 17-20 are unspecified, but should be set to zero.
-}
-
-def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
- let Inst{20-16} = 0b00010;
- // bits 18-20 are unspecified, but should be set to zero.
-}
-
-def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
- let Inst{20-16} = 0b00100;
- // bits 19-20 are unspecified, but should be set to zero.
-}
-
-// patterns for CONCAT_VECTORS
-multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
-def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
- (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
-def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
- (INSELd
- (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
- (i64 1),
- (i64 0))>;
-def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
- (DUPELT2d
- (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- (i64 0))> ;
-}
-
-defm : Concat_Vector_Pattern<v16i8, v8i8>;
-defm : Concat_Vector_Pattern<v8i16, v4i16>;
-defm : Concat_Vector_Pattern<v4i32, v2i32>;
-defm : Concat_Vector_Pattern<v2i64, v1i64>;
-defm : Concat_Vector_Pattern<v4f32, v2f32>;
-defm : Concat_Vector_Pattern<v2f64, v1f64>;
-
-//patterns for EXTRACT_SUBVECTOR
-def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
- (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
- (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
- (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
- (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
- (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
- (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-
-// The followings are for instruction class (3V Elem)
-
-// Variant 1
-
-class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS, string EleOpS,
- Operand OpImm, RegisterOperand ResVPR,
- RegisterOperand OpVPR, RegisterOperand EleOpVPR>
- : NeonI_2VElem<q, u, size, opcode,
- (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
- EleOpVPR:$Re, OpImm:$Index),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
- ", $Re." # EleOpS # "[$Index]",
- [],
- NoItinerary> {
- bits<3> Index;
- bits<5> Re;
-
- let Constraints = "$src = $Rd";
-}
-
-multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
- // vector register class for element is always 128-bit to cover the max index
- def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
- neon_uimm2_bare, VPR64, VPR64, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
- neon_uimm2_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- // Index operations on 16-bit(H) elements are restricted to using v0-v15.
- def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
- neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-
- def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
- neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-}
-
-defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
-defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
-
-// Pattern for lane in 128-bit vector
-class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand ResVPR, RegisterOperand OpVPR,
- RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
- ValueType EleOpTy>
- : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
- (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand ResVPR, RegisterOperand OpVPR,
- RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
- ValueType EleOpTy>
- : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
- (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST ResVPR:$src, OpVPR:$Rn,
- (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
-{
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
- op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32>;
-
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
- op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32>;
-
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
- op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
-
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
- op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
- op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32>;
-
- def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
- op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
-}
-
-defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
-defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
-
-class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
- string asmop, string ResS, string OpS, string EleOpS,
- Operand OpImm, RegisterOperand ResVPR,
- RegisterOperand OpVPR, RegisterOperand EleOpVPR>
- : NeonI_2VElem<q, u, size, opcode,
- (outs ResVPR:$Rd), (ins OpVPR:$Rn,
- EleOpVPR:$Re, OpImm:$Index),
- asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
- ", $Re." # EleOpS # "[$Index]",
- [],
- NoItinerary> {
- bits<3> Index;
- bits<5> Re;
-}
-
-multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
- // vector register class for element is always 128-bit to cover the max index
- def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
- neon_uimm2_bare, VPR64, VPR64, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
- neon_uimm2_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- // Index operations on 16-bit(H) elements are restricted to using v0-v15.
- def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
- neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-
- def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
- neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-}
-
-defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
-defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
-defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
-
-// Pattern for lane in 128-bit vector
-class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand OpVPR, RegisterOperand EleOpVPR,
- ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
- : Pat<(ResTy (op (OpTy OpVPR:$Rn),
- (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand OpVPR, RegisterOperand EleOpVPR,
- ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
- : Pat<(ResTy (op (OpTy OpVPR:$Rn),
- (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST OpVPR:$Rn,
- (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
- op, VPR64, VPR128, v2i32, v2i32, v4i32>;
-
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
- op, VPR128, VPR128, v4i32, v4i32, v4i32>;
-
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
- op, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
-
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
- op, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
- op, VPR64, VPR64, v2i32, v2i32, v2i32>;
-
- def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
- op, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
-}
-
-defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
-defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
-defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
-
-// Variant 2
-
-multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
- // vector register class for element is always 128-bit to cover the max index
- def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
- neon_uimm2_bare, VPR64, VPR64, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
- neon_uimm2_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- // _1d2d doesn't exist!
-
- def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
- neon_uimm1_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{0}};
- let Inst{21} = 0b0;
- let Inst{20-16} = Re;
- }
-}
-
-defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
-defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
-
-class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand OpVPR, RegisterOperand EleOpVPR,
- ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
- SDPatternOperator coreop>
- : Pat<(ResTy (op (OpTy OpVPR:$Rn),
- (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
- (INST OpVPR:$Rn,
- (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
-
-multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
- op, VPR64, VPR128, v2f32, v2f32, v4f32>;
-
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
- op, VPR128, VPR128, v4f32, v4f32, v4f32>;
-
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
- op, VPR128, VPR128, v2f64, v2f64, v2f64>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
- op, VPR64, VPR64, v2f32, v2f32, v2f32>;
-
- def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
- op, VPR128, VPR64, v2f64, v2f64, v1f64,
- BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
-}
-
-defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
-defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
-
-def : Pat<(v2f32 (fmul (v2f32 (Neon_vdup (f32 FPR32:$Re))),
- (v2f32 VPR64:$Rn))),
- (FMULve_2s4s VPR64:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-def : Pat<(v4f32 (fmul (v4f32 (Neon_vdup (f32 FPR32:$Re))),
- (v4f32 VPR128:$Rn))),
- (FMULve_4s4s VPR128:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-def : Pat<(v2f64 (fmul (v2f64 (Neon_vdup (f64 FPR64:$Re))),
- (v2f64 VPR128:$Rn))),
- (FMULve_2d2d VPR128:$Rn, (SUBREG_TO_REG (i64 0), $Re, sub_64), 0)>;
-
-// The followings are patterns using fma
-// -ffp-contract=fast generates fma
-
-multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
- // vector register class for element is always 128-bit to cover the max index
- def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
- neon_uimm2_bare, VPR64, VPR64, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
- neon_uimm2_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- // _1d2d doesn't exist!
-
- def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
- neon_uimm1_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{0}};
- let Inst{21} = 0b0;
- let Inst{20-16} = Re;
- }
-}
-
-defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
-defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
-
-// Pattern for lane in 128-bit vector
-class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand ResVPR, RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy,
- SDPatternOperator coreop>
- : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
- (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
- (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane 0
-class NI_2VEfma_lane0<Instruction INST, SDPatternOperator op,
- RegisterOperand ResVPR, ValueType ResTy>
- : Pat<(ResTy (op (ResTy ResVPR:$Rn),
- (ResTy (Neon_vdup (f32 FPR32:$Re))),
- (ResTy ResVPR:$src))),
- (INST ResVPR:$src, ResVPR:$Rn,
- (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand ResVPR, RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy,
- SDPatternOperator coreop>
- : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
- (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
- (INST ResVPR:$src, ResVPR:$Rn,
- (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
- SDPatternOperator op,
- RegisterOperand ResVPR, RegisterOperand OpVPR,
- ValueType ResTy, ValueType OpTy,
- SDPatternOperator coreop>
- : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
- (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
- (INST ResVPR:$src, ResVPR:$Rn,
- (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
-
-
-multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
- neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
- BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
- def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_2s4s"),
- op, VPR64, v2f32>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
- neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
- BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
- def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_4s4s"),
- op, VPR128, v4f32>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
- neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
- BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
- neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
- BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
- def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
- neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
- BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
-}
-
-defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
-
-// Pattern for lane 0
-class NI_2VEfms_lane0<Instruction INST, SDPatternOperator op,
- RegisterOperand ResVPR, ValueType ResTy>
- : Pat<(ResTy (op (ResTy (fneg ResVPR:$Rn)),
- (ResTy (Neon_vdup (f32 FPR32:$Re))),
- (ResTy ResVPR:$src))),
- (INST ResVPR:$src, ResVPR:$Rn,
- (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
-{
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
- neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
- BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
- neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
- BinOpFrag<(Neon_vduplane
- (fneg node:$LHS), node:$RHS)>>;
-
- def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_2s4s"),
- op, VPR64, v2f32>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
- neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
- BinOpFrag<(fneg (Neon_vduplane
- node:$LHS, node:$RHS))>>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
- neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
- BinOpFrag<(Neon_vduplane
- (fneg node:$LHS), node:$RHS)>>;
-
- def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_4s4s"),
- op, VPR128, v4f32>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
- neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
- BinOpFrag<(fneg (Neon_vduplane
- node:$LHS, node:$RHS))>>;
-
- def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
- neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
- BinOpFrag<(Neon_vduplane
- (fneg node:$LHS), node:$RHS)>>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
- neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
- BinOpFrag<(fneg (Neon_vduplane
- node:$LHS, node:$RHS))>>;
-
- def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
- neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
- BinOpFrag<(Neon_vduplane
- (fneg node:$LHS), node:$RHS)>>;
-
- def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
- neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
- BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
-
- def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
- neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
- BinOpFrag<(Neon_vduplane (fneg node:$LHS), node:$RHS)>>;
-
- def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
- neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
- BinOpFrag<(fneg (Neon_combine_2d
- node:$LHS, node:$RHS))>>;
-
- def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
- neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
- BinOpFrag<(Neon_combine_2d
- (fneg node:$LHS), (fneg node:$RHS))>>;
-}
-
-defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
-
-// Variant 3: Long type
-// E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
-// SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
-
-multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
- // vector register class for element is always 128-bit to cover the max index
- def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
- neon_uimm2_bare, VPR128, VPR64, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
- neon_uimm2_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- // Index operations on 16-bit(H) elements are restricted to using v0-v15.
- def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
- neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-
- def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
- neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-}
-
-defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
-defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
-defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
-defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
-defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
-defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
-
-multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
- // vector register class for element is always 128-bit to cover the max index
- def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
- neon_uimm2_bare, VPR128, VPR64, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
- neon_uimm2_bare, VPR128, VPR128, VPR128> {
- let Inst{11} = {Index{1}};
- let Inst{21} = {Index{0}};
- let Inst{20-16} = Re;
- }
-
- // Index operations on 16-bit(H) elements are restricted to using v0-v15.
- def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
- neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-
- def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
- neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
- let Inst{11} = {Index{2}};
- let Inst{21} = {Index{1}};
- let Inst{20} = {Index{0}};
- let Inst{19-16} = Re{3-0};
- }
-}
-
-defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
-defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
-defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
-
-def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
- (FMOVdd $src)>;
-def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$src))),
- (FMOVss $src)>;
-
-// Pattern for lane in 128-bit vector
-class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand EleOpVPR, ValueType ResTy,
- ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
- SDPatternOperator hiop>
- : Pat<(ResTy (op (ResTy VPR128:$src),
- (HalfOpTy (hiop (OpTy VPR128:$Rn))),
- (HalfOpTy (Neon_vduplane
- (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand EleOpVPR, ValueType ResTy,
- ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
- SDPatternOperator hiop>
- : Pat<(ResTy (op (ResTy VPR128:$src),
- (HalfOpTy (hiop (OpTy VPR128:$Rn))),
- (HalfOpTy (Neon_vduplane
- (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST VPR128:$src, VPR128:$Rn,
- (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-class NI_2VEL2_lane0<Instruction INST, SDPatternOperator op,
- ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
- SDPatternOperator hiop, Instruction DupInst>
- : Pat<(ResTy (op (ResTy VPR128:$src),
- (HalfOpTy (hiop (OpTy VPR128:$Rn))),
- (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
- (INST VPR128:$src, VPR128:$Rn, (DupInst $Re), 0)>;
-
-multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
- op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
-
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
- op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32>;
-
- def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
- op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
-
- def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
- op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
-
- def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
- op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
-
- def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
- op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
- op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
-
- def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
- op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32>;
-
- def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
- op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
-
- def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
- op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
-}
-
-defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
-defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
-defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
-defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
-
-// Pattern for lane in 128-bit vector
-class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand EleOpVPR, ValueType ResTy,
- ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
- SDPatternOperator hiop>
- : Pat<(ResTy (op
- (HalfOpTy (hiop (OpTy VPR128:$Rn))),
- (HalfOpTy (Neon_vduplane
- (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
- RegisterOperand EleOpVPR, ValueType ResTy,
- ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
- SDPatternOperator hiop>
- : Pat<(ResTy (op
- (HalfOpTy (hiop (OpTy VPR128:$Rn))),
- (HalfOpTy (Neon_vduplane
- (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
- (INST VPR128:$Rn,
- (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-// Pattern for fixed lane 0
-class NI_2VEL2_mul_lane0<Instruction INST, SDPatternOperator op,
- ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
- SDPatternOperator hiop, Instruction DupInst>
- : Pat<(ResTy (op
- (HalfOpTy (hiop (OpTy VPR128:$Rn))),
- (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
- (INST VPR128:$Rn, (DupInst $Re), 0)>;
-
-multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
- op, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
-
- def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
- op, VPR64, VPR128, v2i64, v2i32, v4i32>;
-
- def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
- op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
-
- def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
- op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
-
- def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_4s8h"),
- op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
-
- def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_2d4s"),
- op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
- op, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
-
- def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
- op, VPR64, VPR64, v2i64, v2i32, v2i32>;
-
- def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
- op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
-
- def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
- op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
-}
-
-defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
-defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
-defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
-
-multiclass NI_qdma<SDPatternOperator op> {
- def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (op node:$Ra,
- (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
-
- def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
- (op node:$Ra,
- (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
-}
-
-defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
-defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
-
-multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
- !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
- v4i32, v4i16, v8i16>;
-
- def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
- !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
- v2i64, v2i32, v4i32>;
-
- def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
- !cast<PatFrag>(op # "_4s"), VPR128Lo,
- v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
-
- def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
- !cast<PatFrag>(op # "_2d"), VPR128,
- v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
-
- def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
- !cast<PatFrag>(op # "_4s"),
- v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
-
- def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
- !cast<PatFrag>(op # "_2d"),
- v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
-
- // Index can only be half of the max value for lane in 64-bit vector
-
- def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
- !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
- v4i32, v4i16, v4i16>;
-
- def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
- !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
- v2i64, v2i32, v2i32>;
-
- def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
- !cast<PatFrag>(op # "_4s"), VPR64Lo,
- v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
-
- def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
- !cast<PatFrag>(op # "_2d"), VPR64,
- v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
-}
-
-defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
-defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
-
-// End of implementation for instruction class (3V Elem)
-
-class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
- bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
- SDPatternOperator Neon_Rev>
- : NeonI_2VMisc<Q, U, size, opcode,
- (outs ResVPR:$Rd), (ins ResVPR:$Rn),
- asmop # "\t$Rd." # Res # ", $Rn." # Res,
- [(set (ResTy ResVPR:$Rd),
- (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
- NoItinerary> ;
-
-def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
- v16i8, Neon_rev64>;
-def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
- v8i16, Neon_rev64>;
-def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
- v4i32, Neon_rev64>;
-def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
- v8i8, Neon_rev64>;
-def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
- v4i16, Neon_rev64>;
-def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
- v2i32, Neon_rev64>;
-
-def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
-def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
-
-def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
- v16i8, Neon_rev32>;
-def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
- v8i16, Neon_rev32>;
-def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
- v8i8, Neon_rev32>;
-def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
- v4i16, Neon_rev32>;
-
-def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
- v16i8, Neon_rev16>;
-def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
- v8i8, Neon_rev16>;
-
-multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
- SDPatternOperator Neon_Padd> {
- def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.8h, $Rn.16b",
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
- NoItinerary>;
-
- def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.4h, $Rn.8b",
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
- NoItinerary>;
-
- def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.8h",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
- NoItinerary>;
-
- def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.4h",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
- NoItinerary>;
-
- def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2d, $Rn.4s",
- [(set (v2i64 VPR128:$Rd),
- (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
- NoItinerary>;
-
- def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.1d, $Rn.2s",
- [(set (v1i64 VPR64:$Rd),
- (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
- NoItinerary>;
-}
-
-defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
- int_arm_neon_vpaddls>;
-defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
- int_arm_neon_vpaddlu>;
-
-multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
- SDPatternOperator Neon_Padd> {
- let Constraints = "$src = $Rd" in {
- def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.8h, $Rn.16b",
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (Neon_Padd
- (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
- NoItinerary>;
-
- def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
- asmop # "\t$Rd.4h, $Rn.8b",
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (Neon_Padd
- (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
- NoItinerary>;
-
- def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.8h",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (Neon_Padd
- (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
- NoItinerary>;
-
- def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.4h",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (Neon_Padd
- (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
- NoItinerary>;
-
- def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.2d, $Rn.4s",
- [(set (v2i64 VPR128:$Rd),
- (v2i64 (Neon_Padd
- (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
- NoItinerary>;
-
- def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
- asmop # "\t$Rd.1d, $Rn.2s",
- [(set (v1i64 VPR64:$Rd),
- (v1i64 (Neon_Padd
- (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
- NoItinerary>;
- }
-}
-
-defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
- int_arm_neon_vpadals>;
-defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
- int_arm_neon_vpadalu>;
-
-multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
- def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.16b, $Rn.16b",
- [], NoItinerary>;
-
- def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.8h, $Rn.8h",
- [], NoItinerary>;
-
- def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [], NoItinerary>;
-
- def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2d, $Rn.2d",
- [], NoItinerary>;
-
- def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.8b, $Rn.8b",
- [], NoItinerary>;
-
- def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.4h, $Rn.4h",
- [], NoItinerary>;
-
- def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.2s",
- [], NoItinerary>;
-}
-
-defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
-defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
-defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
-defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
-
-multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
- SDPatternOperator Neon_Op> {
- def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
- (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
-
- def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
- (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
-
- def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
- (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
-
- def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
- (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
-
- def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
- (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
-
- def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
- (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
-
- def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
- (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
-}
-
-defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
-defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
-defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
-
-def : Pat<(v16i8 (sub
- (v16i8 Neon_AllZero),
- (v16i8 VPR128:$Rn))),
- (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (sub
- (v8i8 Neon_AllZero),
- (v8i8 VPR64:$Rn))),
- (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
-def : Pat<(v8i16 (sub
- (v8i16 (bitconvert (v16i8 Neon_AllZero))),
- (v8i16 VPR128:$Rn))),
- (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
-def : Pat<(v4i16 (sub
- (v4i16 (bitconvert (v8i8 Neon_AllZero))),
- (v4i16 VPR64:$Rn))),
- (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
-def : Pat<(v4i32 (sub
- (v4i32 (bitconvert (v16i8 Neon_AllZero))),
- (v4i32 VPR128:$Rn))),
- (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
-def : Pat<(v2i32 (sub
- (v2i32 (bitconvert (v8i8 Neon_AllZero))),
- (v2i32 VPR64:$Rn))),
- (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
-def : Pat<(v2i64 (sub
- (v2i64 (bitconvert (v16i8 Neon_AllZero))),
- (v2i64 VPR128:$Rn))),
- (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
-
-multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
- let Constraints = "$src = $Rd" in {
- def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.16b, $Rn.16b",
- [], NoItinerary>;
-
- def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.8h, $Rn.8h",
- [], NoItinerary>;
-
- def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [], NoItinerary>;
-
- def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.2d, $Rn.2d",
- [], NoItinerary>;
-
- def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
- asmop # "\t$Rd.8b, $Rn.8b",
- [], NoItinerary>;
-
- def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
- asmop # "\t$Rd.4h, $Rn.4h",
- [], NoItinerary>;
-
- def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.2s",
- [], NoItinerary>;
- }
-}
-
-defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
-defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
-
-multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
- SDPatternOperator Neon_Op> {
- def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
- (v16i8 (!cast<Instruction>(Prefix # 16b)
- (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
-
- def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
- (v8i16 (!cast<Instruction>(Prefix # 8h)
- (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
-
- def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
- (v4i32 (!cast<Instruction>(Prefix # 4s)
- (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
-
- def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
- (v2i64 (!cast<Instruction>(Prefix # 2d)
- (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
-
- def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
- (v8i8 (!cast<Instruction>(Prefix # 8b)
- (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
-
- def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
- (v4i16 (!cast<Instruction>(Prefix # 4h)
- (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
-
- def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
- (v2i32 (!cast<Instruction>(Prefix # 2s)
- (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
-}
-
-defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
-defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
-
-multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
- SDPatternOperator Neon_Op> {
- def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.16b, $Rn.16b",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
- NoItinerary>;
-
- def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.8h, $Rn.8h",
- [(set (v8i16 VPR128:$Rd),
- (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
- NoItinerary>;
-
- def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
- NoItinerary>;
-
- def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.8b, $Rn.8b",
- [(set (v8i8 VPR64:$Rd),
- (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
- NoItinerary>;
-
- def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.4h, $Rn.4h",
- [(set (v4i16 VPR64:$Rd),
- (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
- NoItinerary>;
-
- def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.2s",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
- NoItinerary>;
-}
-
-defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
-defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
-
-multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
- bits<5> Opcode> {
- def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.16b, $Rn.16b",
- [], NoItinerary>;
-
- def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.8b, $Rn.8b",
- [], NoItinerary>;
-}
-
-defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
-defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
-defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
-
-def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
- (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
-def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
- (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
-
-def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
- (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
- (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
-
-def : Pat<(v16i8 (xor
- (v16i8 VPR128:$Rn),
- (v16i8 Neon_AllOne))),
- (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (xor
- (v8i8 VPR64:$Rn),
- (v8i8 Neon_AllOne))),
- (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
-def : Pat<(v8i16 (xor
- (v8i16 VPR128:$Rn),
- (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
- (NOT16b VPR128:$Rn)>;
-def : Pat<(v4i16 (xor
- (v4i16 VPR64:$Rn),
- (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
- (NOT8b VPR64:$Rn)>;
-def : Pat<(v4i32 (xor
- (v4i32 VPR128:$Rn),
- (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
- (NOT16b VPR128:$Rn)>;
-def : Pat<(v2i32 (xor
- (v2i32 VPR64:$Rn),
- (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
- (NOT8b VPR64:$Rn)>;
-def : Pat<(v2i64 (xor
- (v2i64 VPR128:$Rn),
- (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
- (NOT16b VPR128:$Rn)>;
-
-def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
- (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
- (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
-
-multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
- SDPatternOperator Neon_Op> {
- def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [(set (v4f32 VPR128:$Rd),
- (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
- NoItinerary>;
-
- def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2d, $Rn.2d",
- [(set (v2f64 VPR128:$Rd),
- (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
- NoItinerary>;
-
- def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.2s",
- [(set (v2f32 VPR64:$Rd),
- (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
- NoItinerary>;
-}
-
-defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
-defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
-
-multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
- def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.8b, $Rn.8h",
- [], NoItinerary>;
-
- def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4h, $Rn.4s",
- [], NoItinerary>;
-
- def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2s, $Rn.2d",
- [], NoItinerary>;
-
- let Constraints = "$Rd = $src" in {
- def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "2\t$Rd.16b, $Rn.8h",
- [], NoItinerary>;
-
- def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "2\t$Rd.8h, $Rn.4s",
- [], NoItinerary>;
-
- def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "2\t$Rd.4s, $Rn.2d",
- [], NoItinerary>;
- }
-}
-
-defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
-defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
-defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
-defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
-
-multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix,
- SDPatternOperator Neon_Op> {
- def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
- (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
-
- def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
- (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
-
- def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
- (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
-
- def : Pat<(v16i8 (concat_vectors
- (v8i8 VPR64:$src),
- (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
- (!cast<Instruction>(Prefix # 8h16b)
- (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
- VPR128:$Rn)>;
-
- def : Pat<(v8i16 (concat_vectors
- (v4i16 VPR64:$src),
- (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
- (!cast<Instruction>(Prefix # 4s8h)
- (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
- VPR128:$Rn)>;
-
- def : Pat<(v4i32 (concat_vectors
- (v2i32 VPR64:$src),
- (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
- (!cast<Instruction>(Prefix # 2d4s)
- (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
- VPR128:$Rn)>;
-}
-
-defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
-defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
-defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
-defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
-
-multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
- let DecoderMethod = "DecodeSHLLInstruction" in {
- def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR128:$Rd),
- (ins VPR64:$Rn, uimm_exact8:$Imm),
- asmop # "\t$Rd.8h, $Rn.8b, $Imm",
- [], NoItinerary>;
-
- def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR128:$Rd),
- (ins VPR64:$Rn, uimm_exact16:$Imm),
- asmop # "\t$Rd.4s, $Rn.4h, $Imm",
- [], NoItinerary>;
-
- def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
- (outs VPR128:$Rd),
- (ins VPR64:$Rn, uimm_exact32:$Imm),
- asmop # "\t$Rd.2d, $Rn.2s, $Imm",
- [], NoItinerary>;
-
- def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd),
- (ins VPR128:$Rn, uimm_exact8:$Imm),
- asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
- [], NoItinerary>;
-
- def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd),
- (ins VPR128:$Rn, uimm_exact16:$Imm),
- asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
- [], NoItinerary>;
-
- def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
- (outs VPR128:$Rd),
- (ins VPR128:$Rn, uimm_exact32:$Imm),
- asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
- [], NoItinerary>;
- }
-}
-
-defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
-
-class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
- SDPatternOperator ExtOp, Operand Neon_Imm,
- string suffix>
- : Pat<(DesTy (shl
- (DesTy (ExtOp (OpTy VPR64:$Rn))),
- (DesTy (Neon_vdup
- (i32 Neon_Imm:$Imm))))),
- (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
-
-class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
- SDPatternOperator ExtOp, Operand Neon_Imm,
- string suffix, PatFrag GetHigh>
- : Pat<(DesTy (shl
- (DesTy (ExtOp
- (OpTy (GetHigh VPR128:$Rn)))),
- (DesTy (Neon_vdup
- (i32 Neon_Imm:$Imm))))),
- (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
-
-def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
-def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
-def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
-def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
-def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
-def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
-def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
- Neon_High16B>;
-def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
- Neon_High16B>;
-def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
- Neon_High8H>;
-def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
- Neon_High8H>;
-def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
- Neon_High4S>;
-def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
- Neon_High4S>;
-
-multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
- def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4h, $Rn.4s",
- [], NoItinerary>;
-
- def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2s, $Rn.2d",
- [], NoItinerary>;
-
- let Constraints = "$src = $Rd" in {
- def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "2\t$Rd.8h, $Rn.4s",
- [], NoItinerary>;
-
- def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "2\t$Rd.4s, $Rn.2d",
- [], NoItinerary>;
- }
-}
-
-defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
-
-multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
- SDPatternOperator f32_to_f16_Op,
- SDPatternOperator f64_to_f32_Op> {
-
- def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
- (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
-
- def : Pat<(v8i16 (concat_vectors
- (v4i16 VPR64:$src),
- (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
- (!cast<Instruction>(prefix # "4s8h")
- (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
- (v4f32 VPR128:$Rn))>;
-
- def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
- (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
-
- def : Pat<(v4f32 (concat_vectors
- (v2f32 VPR64:$src),
- (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
- (!cast<Instruction>(prefix # "2d4s")
- (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
- (v2f64 VPR128:$Rn))>;
-}
-
-defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
-
-multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
- bits<5> opcode> {
- def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR64:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2s, $Rn.2d",
- [], NoItinerary>;
-
- def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "2\t$Rd.4s, $Rn.2d",
- [], NoItinerary> {
- let Constraints = "$src = $Rd";
- }
-
- def : Pat<(v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))),
- (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
-
- def : Pat<(v4f32 (concat_vectors
- (v2f32 VPR64:$src),
- (v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))))),
- (!cast<Instruction>(prefix # "2d4s")
- (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
- VPR128:$Rn)>;
-}
-
-defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
-
-def Neon_High4Float : PatFrag<(ops node:$in),
- (extract_subvector (v4f32 node:$in), (iPTR 2))>;
-
-multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
- def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.4s, $Rn.4h",
- [], NoItinerary>;
-
- def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2d, $Rn.2s",
- [], NoItinerary>;
-
- def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "2\t$Rd.4s, $Rn.8h",
- [], NoItinerary>;
-
- def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "2\t$Rd.2d, $Rn.4s",
- [], NoItinerary>;
-}
-
-defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
-
-multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
- def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
- (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
-
- def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
- (v4i16 (Neon_High8H
- (v8i16 VPR128:$Rn))))),
- (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
-
- def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
- (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
-
- def : Pat<(v2f64 (fextend
- (v2f32 (Neon_High4Float
- (v4f32 VPR128:$Rn))))),
- (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
-}
-
-defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
-
-multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
- ValueType ResTy4s, ValueType OpTy4s,
- ValueType ResTy2d, ValueType OpTy2d,
- ValueType ResTy2s, ValueType OpTy2s,
- SDPatternOperator Neon_Op> {
-
- def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [(set (ResTy4s VPR128:$Rd),
- (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
- NoItinerary>;
-
- def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.2d, $Rn.2d",
- [(set (ResTy2d VPR128:$Rd),
- (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
- NoItinerary>;
-
- def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.2s",
- [(set (ResTy2s VPR64:$Rd),
- (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
- NoItinerary>;
-}
-
-multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
- bits<5> opcode, SDPatternOperator Neon_Op> {
- defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
- v2f64, v2i32, v2f32, Neon_Op>;
-}
-
-defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
- int_aarch64_neon_fcvtns>;
-defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
- int_aarch64_neon_fcvtnu>;
-defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
- int_aarch64_neon_fcvtps>;
-defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
- int_aarch64_neon_fcvtpu>;
-defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
- int_aarch64_neon_fcvtms>;
-defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
- int_aarch64_neon_fcvtmu>;
-defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
-defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
-defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
- int_aarch64_neon_fcvtas>;
-defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
- int_aarch64_neon_fcvtau>;
-
-multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
- bits<5> opcode, SDPatternOperator Neon_Op> {
- defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
- v2i64, v2f32, v2i32, Neon_Op>;
-}
-
-defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
-defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
-
-multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
- bits<5> opcode, SDPatternOperator Neon_Op> {
- defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
- v2f64, v2f32, v2f32, Neon_Op>;
-}
-
-defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
- int_aarch64_neon_frintn>;
-defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
-defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
-defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
-defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
-defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
-defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
-defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
- int_arm_neon_vrecpe>;
-defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
- int_arm_neon_vrsqrte>;
-defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111, fsqrt>;
-
-multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
- bits<5> opcode, SDPatternOperator Neon_Op> {
- def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
- NoItinerary>;
-
- def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
- (outs VPR64:$Rd), (ins VPR64:$Rn),
- asmop # "\t$Rd.2s, $Rn.2s",
- [(set (v2i32 VPR64:$Rd),
- (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
- NoItinerary>;
-}
-
-defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
- int_arm_neon_vrecpe>;
-defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
- int_arm_neon_vrsqrte>;
-
-// Crypto Class
-class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
- string asmop, SDPatternOperator opnode>
- : NeonI_Crypto_AES<size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.16b, $Rn.16b",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (opnode (v16i8 VPR128:$src),
- (v16i8 VPR128:$Rn))))],
- NoItinerary>{
- let Constraints = "$src = $Rd";
- let Predicates = [HasNEON, HasCrypto];
-}
-
-def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
-def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
-
-class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
- string asmop, SDPatternOperator opnode>
- : NeonI_Crypto_AES<size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$Rn),
- asmop # "\t$Rd.16b, $Rn.16b",
- [(set (v16i8 VPR128:$Rd),
- (v16i8 (opnode (v16i8 VPR128:$Rn))))],
- NoItinerary>;
-
-def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
-def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
-
-class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
- string asmop, SDPatternOperator opnode>
- : NeonI_Crypto_SHA<size, opcode,
- (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
- asmop # "\t$Rd.4s, $Rn.4s",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (opnode (v4i32 VPR128:$src),
- (v4i32 VPR128:$Rn))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
- let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
- int_arm_neon_sha1su1>;
-def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
- int_arm_neon_sha256su0>;
-
-class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
- string asmop, SDPatternOperator opnode>
- : NeonI_Crypto_SHA<size, opcode,
- (outs FPR32:$Rd), (ins FPR32:$Rn),
- asmop # "\t$Rd, $Rn",
- [(set (v1i32 FPR32:$Rd),
- (v1i32 (opnode (v1i32 FPR32:$Rn))))],
- NoItinerary> {
- let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
-
-class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
- SDPatternOperator opnode>
- : NeonI_Crypto_3VSHA<size, opcode,
- (outs VPR128:$Rd),
- (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
- [(set (v4i32 VPR128:$Rd),
- (v4i32 (opnode (v4i32 VPR128:$src),
- (v4i32 VPR128:$Rn),
- (v4i32 VPR128:$Rm))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
- let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
- int_arm_neon_sha1su0>;
-def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
- int_arm_neon_sha256su1>;
-
-class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
- SDPatternOperator opnode>
- : NeonI_Crypto_3VSHA<size, opcode,
- (outs FPR128:$Rd),
- (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
- asmop # "\t$Rd, $Rn, $Rm.4s",
- [(set (v4i32 FPR128:$Rd),
- (v4i32 (opnode (v4i32 FPR128:$src),
- (v4i32 FPR128:$Rn),
- (v4i32 VPR128:$Rm))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
- let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
- int_arm_neon_sha256h>;
-def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
- int_arm_neon_sha256h2>;
-
-class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop,
- SDPatternOperator opnode>
- : NeonI_Crypto_3VSHA<size, opcode,
- (outs FPR128:$Rd),
- (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
- asmop # "\t$Rd, $Rn, $Rm.4s",
- [(set (v4i32 FPR128:$Rd),
- (v4i32 (opnode (v4i32 FPR128:$src),
- (v1i32 FPR32:$Rn),
- (v4i32 VPR128:$Rm))))],
- NoItinerary> {
- let Constraints = "$src = $Rd";
- let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c", int_aarch64_neon_sha1c>;
-def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p", int_aarch64_neon_sha1p>;
-def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m", int_aarch64_neon_sha1m>;
-
-//
-// Patterns for handling half-precision values
-//
-
-// Convert f16 value coming in as i16 value to f32
-def : Pat<(f32 (f16_to_f32 (i32 (and (i32 GPR32:$Rn), 65535)))),
- (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
-def : Pat<(f32 (f16_to_f32 (i32 (assertzext GPR32:$Rn)))),
- (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
-
-def : Pat<(f32 (f16_to_f32 (i32 (assertzext (i32 (
- f32_to_f16 (f32 FPR32:$Rn))))))),
- (f32 FPR32:$Rn)>;
-
-// Patterns for vector extract of half-precision FP value in i16 storage type
-def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
- (v4i16 VPR64:$Rn), neon_uimm2_bare:$Imm)), 65535)))),
- (FCVTsh (f16 (DUPhv_H
- (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- neon_uimm2_bare:$Imm)))>;
-
-def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
- (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)), 65535)))),
- (FCVTsh (f16 (DUPhv_H (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)))>;
-
-// Patterns for vector insert of half-precision FP value 0 in i16 storage type
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
- (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
- (neon_uimm3_bare:$Imm))),
- (v8i16 (INSELh (v8i16 VPR128:$Rn),
- (v8i16 (SUBREG_TO_REG (i64 0),
- (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
- sub_16)),
- neon_uimm3_bare:$Imm, 0))>;
-
-def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
- (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
- (neon_uimm2_bare:$Imm))),
- (v4i16 (EXTRACT_SUBREG
- (v8i16 (INSELh
- (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- (v8i16 (SUBREG_TO_REG (i64 0),
- (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
- sub_16)),
- neon_uimm2_bare:$Imm, 0)),
- sub_64))>;
-
-// Patterns for vector insert of half-precision FP value in i16 storage type
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
- (i32 (assertsext (i32 (fp_to_sint
- (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
- (neon_uimm3_bare:$Imm))),
- (v8i16 (INSELh (v8i16 VPR128:$Rn),
- (v8i16 (SUBREG_TO_REG (i64 0),
- (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
- sub_16)),
- neon_uimm3_bare:$Imm, 0))>;
-
-def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
- (i32 (assertsext (i32 (fp_to_sint
- (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
- (neon_uimm2_bare:$Imm))),
- (v4i16 (EXTRACT_SUBREG
- (v8i16 (INSELh
- (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- (v8i16 (SUBREG_TO_REG (i64 0),
- (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
- sub_16)),
- neon_uimm2_bare:$Imm, 0)),
- sub_64))>;
-
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
- (i32 (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
- (neon_uimm3_bare:$Imm1))),
- (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
- neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
-
-// Patterns for vector copy of half-precision FP value in i16 storage type
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
- (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
- (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
- 65535)))))))),
- (neon_uimm3_bare:$Imm1))),
- (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
- neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
-
-def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
- (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
- (vector_extract (v4i16 VPR64:$src), neon_uimm3_bare:$Imm2)),
- 65535)))))))),
- (neon_uimm3_bare:$Imm1))),
- (v4i16 (EXTRACT_SUBREG
- (v8i16 (INSELh
- (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
- (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
- neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2)),
- sub_64))>;
-
-
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
new file mode 100644
index 0000000..3df9c4f
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -0,0 +1,951 @@
+//=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that performs load / store related peephole
+// optimizations. This pass should be run after register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64InstrInfo.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-ldst-opt"
+
+/// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
+/// load / store instructions to form ldp / stp instructions.
+
+STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
+STATISTIC(NumPostFolded, "Number of post-index updates folded");
+STATISTIC(NumPreFolded, "Number of pre-index updates folded");
+STATISTIC(NumUnscaledPairCreated,
+ "Number of load/store from unscaled generated");
+
+static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
+ cl::init(20), cl::Hidden);
+
+// Place holder while testing unscaled load/store combining
+static cl::opt<bool> EnableAArch64UnscaledMemOp(
+ "aarch64-unscaled-mem-op", cl::Hidden,
+ cl::desc("Allow AArch64 unscaled load/store combining"), cl::init(true));
+
+namespace {
+struct AArch64LoadStoreOpt : public MachineFunctionPass {
+ static char ID;
+ AArch64LoadStoreOpt() : MachineFunctionPass(ID) {}
+
+ const AArch64InstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+
+ // Scan the instructions looking for a load/store that can be combined
+ // with the current instruction into a load/store pair.
+ // Return the matching instruction if one is found, else MBB->end().
+ // If a matching instruction is found, MergeForward is set to true if the
+ // merge is to remove the first instruction and replace the second with
+ // a pair-wise insn, and false if the reverse is true.
+ MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
+ bool &MergeForward,
+ unsigned Limit);
+ // Merge the two instructions indicated into a single pair-wise instruction.
+ // If MergeForward is true, erase the first instruction and fold its
+ // operation into the second. If false, the reverse. Return the instruction
+ // following the first instruction (which may change during processing).
+ MachineBasicBlock::iterator
+ mergePairedInsns(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator Paired, bool MergeForward);
+
+ // Scan the instruction list to find a base register update that can
+ // be combined with the current instruction (a load or store) using
+ // pre or post indexed addressing with writeback. Scan forwards.
+ MachineBasicBlock::iterator
+ findMatchingUpdateInsnForward(MachineBasicBlock::iterator I, unsigned Limit,
+ int Value);
+
+ // Scan the instruction list to find a base register update that can
+ // be combined with the current instruction (a load or store) using
+ // pre or post indexed addressing with writeback. Scan backwards.
+ MachineBasicBlock::iterator
+ findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
+
+ // Merge a pre-index base register update into a ld/st instruction.
+ MachineBasicBlock::iterator
+ mergePreIdxUpdateInsn(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator Update);
+
+ // Merge a post-index base register update into a ld/st instruction.
+ MachineBasicBlock::iterator
+ mergePostIdxUpdateInsn(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator Update);
+
+ bool optimizeBlock(MachineBasicBlock &MBB);
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ const char *getPassName() const override {
+ return "AArch64 load / store optimization pass";
+ }
+
+private:
+ int getMemSize(MachineInstr *MemMI);
+};
+char AArch64LoadStoreOpt::ID = 0;
+}
+
+static bool isUnscaledLdst(unsigned Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::STURSi:
+ return true;
+ case AArch64::STURDi:
+ return true;
+ case AArch64::STURQi:
+ return true;
+ case AArch64::STURWi:
+ return true;
+ case AArch64::STURXi:
+ return true;
+ case AArch64::LDURSi:
+ return true;
+ case AArch64::LDURDi:
+ return true;
+ case AArch64::LDURQi:
+ return true;
+ case AArch64::LDURWi:
+ return true;
+ case AArch64::LDURXi:
+ return true;
+ }
+}
+
+// Size in bytes of the data moved by an unscaled load or store
+int AArch64LoadStoreOpt::getMemSize(MachineInstr *MemMI) {
+ switch (MemMI->getOpcode()) {
+ default:
+ llvm_unreachable("Opcode has unknown size!");
+ case AArch64::STRSui:
+ case AArch64::STURSi:
+ return 4;
+ case AArch64::STRDui:
+ case AArch64::STURDi:
+ return 8;
+ case AArch64::STRQui:
+ case AArch64::STURQi:
+ return 16;
+ case AArch64::STRWui:
+ case AArch64::STURWi:
+ return 4;
+ case AArch64::STRXui:
+ case AArch64::STURXi:
+ return 8;
+ case AArch64::LDRSui:
+ case AArch64::LDURSi:
+ return 4;
+ case AArch64::LDRDui:
+ case AArch64::LDURDi:
+ return 8;
+ case AArch64::LDRQui:
+ case AArch64::LDURQi:
+ return 16;
+ case AArch64::LDRWui:
+ case AArch64::LDURWi:
+ return 4;
+ case AArch64::LDRXui:
+ case AArch64::LDURXi:
+ return 8;
+ }
+}
+
+static unsigned getMatchingPairOpcode(unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Opcode has no pairwise equivalent!");
+ case AArch64::STRSui:
+ case AArch64::STURSi:
+ return AArch64::STPSi;
+ case AArch64::STRDui:
+ case AArch64::STURDi:
+ return AArch64::STPDi;
+ case AArch64::STRQui:
+ case AArch64::STURQi:
+ return AArch64::STPQi;
+ case AArch64::STRWui:
+ case AArch64::STURWi:
+ return AArch64::STPWi;
+ case AArch64::STRXui:
+ case AArch64::STURXi:
+ return AArch64::STPXi;
+ case AArch64::LDRSui:
+ case AArch64::LDURSi:
+ return AArch64::LDPSi;
+ case AArch64::LDRDui:
+ case AArch64::LDURDi:
+ return AArch64::LDPDi;
+ case AArch64::LDRQui:
+ case AArch64::LDURQi:
+ return AArch64::LDPQi;
+ case AArch64::LDRWui:
+ case AArch64::LDURWi:
+ return AArch64::LDPWi;
+ case AArch64::LDRXui:
+ case AArch64::LDURXi:
+ return AArch64::LDPXi;
+ }
+}
+
+static unsigned getPreIndexedOpcode(unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Opcode has no pre-indexed equivalent!");
+ case AArch64::STRSui:
+ return AArch64::STRSpre;
+ case AArch64::STRDui:
+ return AArch64::STRDpre;
+ case AArch64::STRQui:
+ return AArch64::STRQpre;
+ case AArch64::STRWui:
+ return AArch64::STRWpre;
+ case AArch64::STRXui:
+ return AArch64::STRXpre;
+ case AArch64::LDRSui:
+ return AArch64::LDRSpre;
+ case AArch64::LDRDui:
+ return AArch64::LDRDpre;
+ case AArch64::LDRQui:
+ return AArch64::LDRQpre;
+ case AArch64::LDRWui:
+ return AArch64::LDRWpre;
+ case AArch64::LDRXui:
+ return AArch64::LDRXpre;
+ }
+}
+
+static unsigned getPostIndexedOpcode(unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Opcode has no post-indexed wise equivalent!");
+ case AArch64::STRSui:
+ return AArch64::STRSpost;
+ case AArch64::STRDui:
+ return AArch64::STRDpost;
+ case AArch64::STRQui:
+ return AArch64::STRQpost;
+ case AArch64::STRWui:
+ return AArch64::STRWpost;
+ case AArch64::STRXui:
+ return AArch64::STRXpost;
+ case AArch64::LDRSui:
+ return AArch64::LDRSpost;
+ case AArch64::LDRDui:
+ return AArch64::LDRDpost;
+ case AArch64::LDRQui:
+ return AArch64::LDRQpost;
+ case AArch64::LDRWui:
+ return AArch64::LDRWpost;
+ case AArch64::LDRXui:
+ return AArch64::LDRXpost;
+ }
+}
+
+MachineBasicBlock::iterator
+AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator Paired,
+ bool MergeForward) {
+ MachineBasicBlock::iterator NextI = I;
+ ++NextI;
+ // If NextI is the second of the two instructions to be merged, we need
+ // to skip one further. Either way we merge will invalidate the iterator,
+ // and we don't need to scan the new instruction, as it's a pairwise
+ // instruction, which we're not considering for further action anyway.
+ if (NextI == Paired)
+ ++NextI;
+
+ bool IsUnscaled = isUnscaledLdst(I->getOpcode());
+ int OffsetStride =
+ IsUnscaled && EnableAArch64UnscaledMemOp ? getMemSize(I) : 1;
+
+ unsigned NewOpc = getMatchingPairOpcode(I->getOpcode());
+ // Insert our new paired instruction after whichever of the paired
+ // instructions MergeForward indicates.
+ MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
+ // Also based on MergeForward is from where we copy the base register operand
+ // so we get the flags compatible with the input code.
+ MachineOperand &BaseRegOp =
+ MergeForward ? Paired->getOperand(1) : I->getOperand(1);
+
+ // Which register is Rt and which is Rt2 depends on the offset order.
+ MachineInstr *RtMI, *Rt2MI;
+ if (I->getOperand(2).getImm() ==
+ Paired->getOperand(2).getImm() + OffsetStride) {
+ RtMI = Paired;
+ Rt2MI = I;
+ } else {
+ RtMI = I;
+ Rt2MI = Paired;
+ }
+ // Handle Unscaled
+ int OffsetImm = RtMI->getOperand(2).getImm();
+ if (IsUnscaled && EnableAArch64UnscaledMemOp)
+ OffsetImm /= OffsetStride;
+
+ // Construct the new instruction.
+ MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint,
+ I->getDebugLoc(), TII->get(NewOpc))
+ .addOperand(RtMI->getOperand(0))
+ .addOperand(Rt2MI->getOperand(0))
+ .addOperand(BaseRegOp)
+ .addImm(OffsetImm);
+ (void)MIB;
+
+ // FIXME: Do we need/want to copy the mem operands from the source
+ // instructions? Probably. What uses them after this?
+
+ DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
+ DEBUG(I->print(dbgs()));
+ DEBUG(dbgs() << " ");
+ DEBUG(Paired->print(dbgs()));
+ DEBUG(dbgs() << " with instruction:\n ");
+ DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ DEBUG(dbgs() << "\n");
+
+ // Erase the old instructions.
+ I->eraseFromParent();
+ Paired->eraseFromParent();
+
+ return NextI;
+}
+
+/// trackRegDefsUses - Remember what registers the specified instruction uses
+/// and modifies.
+static void trackRegDefsUses(MachineInstr *MI, BitVector &ModifiedRegs,
+ BitVector &UsedRegs,
+ const TargetRegisterInfo *TRI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask())
+ ModifiedRegs.setBitsNotInMask(MO.getRegMask());
+
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (MO.isDef()) {
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ ModifiedRegs.set(*AI);
+ } else {
+ assert(MO.isUse() && "Reg operand not a def and not a use?!?");
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ UsedRegs.set(*AI);
+ }
+ }
+}
+
+static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
+ if (!IsUnscaled && (Offset > 63 || Offset < -64))
+ return false;
+ if (IsUnscaled) {
+ // Convert the byte-offset used by unscaled into an "element" offset used
+ // by the scaled pair load/store instructions.
+ int ElemOffset = Offset / OffsetStride;
+ if (ElemOffset > 63 || ElemOffset < -64)
+ return false;
+ }
+ return true;
+}
+
+// Do alignment, specialized to power of 2 and for signed ints,
+// avoiding having to do a C-style cast from uint_64t to int when
+// using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
+// FIXME: Move this function to include/MathExtras.h?
+static int alignTo(int Num, int PowOf2) {
+ return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
+}
+
+/// findMatchingInsn - Scan the instructions looking for a load/store that can
+/// be combined with the current instruction into a load/store pair.
+MachineBasicBlock::iterator
+AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
+ bool &MergeForward, unsigned Limit) {
+ MachineBasicBlock::iterator E = I->getParent()->end();
+ MachineBasicBlock::iterator MBBI = I;
+ MachineInstr *FirstMI = I;
+ ++MBBI;
+
+ int Opc = FirstMI->getOpcode();
+ bool MayLoad = FirstMI->mayLoad();
+ bool IsUnscaled = isUnscaledLdst(Opc);
+ unsigned Reg = FirstMI->getOperand(0).getReg();
+ unsigned BaseReg = FirstMI->getOperand(1).getReg();
+ int Offset = FirstMI->getOperand(2).getImm();
+
+ // Early exit if the first instruction modifies the base register.
+ // e.g., ldr x0, [x0]
+ // Early exit if the offset if not possible to match. (6 bits of positive
+ // range, plus allow an extra one in case we find a later insn that matches
+ // with Offset-1
+ if (FirstMI->modifiesRegister(BaseReg, TRI))
+ return E;
+ int OffsetStride =
+ IsUnscaled && EnableAArch64UnscaledMemOp ? getMemSize(FirstMI) : 1;
+ if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
+ return E;
+
+ // Track which registers have been modified and used between the first insn
+ // (inclusive) and the second insn.
+ BitVector ModifiedRegs, UsedRegs;
+ ModifiedRegs.resize(TRI->getNumRegs());
+ UsedRegs.resize(TRI->getNumRegs());
+ for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
+ MachineInstr *MI = MBBI;
+ // Skip DBG_VALUE instructions. Otherwise debug info can affect the
+ // optimization by changing how far we scan.
+ if (MI->isDebugValue())
+ continue;
+
+ // Now that we know this is a real instruction, count it.
+ ++Count;
+
+ if (Opc == MI->getOpcode() && MI->getOperand(2).isImm()) {
+ // If we've found another instruction with the same opcode, check to see
+ // if the base and offset are compatible with our starting instruction.
+ // These instructions all have scaled immediate operands, so we just
+ // check for +1/-1. Make sure to check the new instruction offset is
+ // actually an immediate and not a symbolic reference destined for
+ // a relocation.
+ //
+ // Pairwise instructions have a 7-bit signed offset field. Single insns
+ // have a 12-bit unsigned offset field. To be a valid combine, the
+ // final offset must be in range.
+ unsigned MIBaseReg = MI->getOperand(1).getReg();
+ int MIOffset = MI->getOperand(2).getImm();
+ if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
+ (Offset + OffsetStride == MIOffset))) {
+ int MinOffset = Offset < MIOffset ? Offset : MIOffset;
+ // If this is a volatile load/store that otherwise matched, stop looking
+ // as something is going on that we don't have enough information to
+ // safely transform. Similarly, stop if we see a hint to avoid pairs.
+ if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
+ return E;
+ // If the resultant immediate offset of merging these instructions
+ // is out of range for a pairwise instruction, bail and keep looking.
+ bool MIIsUnscaled = isUnscaledLdst(MI->getOpcode());
+ if (!inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+ continue;
+ }
+ // If the alignment requirements of the paired (scaled) instruction
+ // can't express the offset of the unscaled input, bail and keep
+ // looking.
+ if (IsUnscaled && EnableAArch64UnscaledMemOp &&
+ (alignTo(MinOffset, OffsetStride) != MinOffset)) {
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+ continue;
+ }
+ // If the destination register of the loads is the same register, bail
+ // and keep looking. A load-pair instruction with both destination
+ // registers the same is UNPREDICTABLE and will result in an exception.
+ if (MayLoad && Reg == MI->getOperand(0).getReg()) {
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+ continue;
+ }
+
+ // If the Rt of the second instruction was not modified or used between
+ // the two instructions, we can combine the second into the first.
+ if (!ModifiedRegs[MI->getOperand(0).getReg()] &&
+ !UsedRegs[MI->getOperand(0).getReg()]) {
+ MergeForward = false;
+ return MBBI;
+ }
+
+ // Likewise, if the Rt of the first instruction is not modified or used
+ // between the two instructions, we can combine the first into the
+ // second.
+ if (!ModifiedRegs[FirstMI->getOperand(0).getReg()] &&
+ !UsedRegs[FirstMI->getOperand(0).getReg()]) {
+ MergeForward = true;
+ return MBBI;
+ }
+ // Unable to combine these instructions due to interference in between.
+ // Keep looking.
+ }
+ }
+
+ // If the instruction wasn't a matching load or store, but does (or can)
+ // modify memory, stop searching, as we don't have alias analysis or
+ // anything like that to tell us whether the access is tromping on the
+ // locations we care about. The big one we want to catch is calls.
+ //
+ // FIXME: Theoretically, we can do better than that for SP and FP based
+ // references since we can effectively know where those are touching. It's
+ // unclear if it's worth the extra code, though. Most paired instructions
+ // will be sequential, perhaps with a few intervening non-memory related
+ // instructions.
+ if (MI->mayStore() || MI->isCall())
+ return E;
+ // Likewise, if we're matching a store instruction, we don't want to
+ // move across a load, as it may be reading the same location.
+ if (FirstMI->mayStore() && MI->mayLoad())
+ return E;
+
+ // Update modified / uses register lists.
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+
+ // Otherwise, if the base register is modified, we have no match, so
+ // return early.
+ if (ModifiedRegs[BaseReg])
+ return E;
+ }
+ return E;
+}
+
+MachineBasicBlock::iterator
+AArch64LoadStoreOpt::mergePreIdxUpdateInsn(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator Update) {
+ assert((Update->getOpcode() == AArch64::ADDXri ||
+ Update->getOpcode() == AArch64::SUBXri) &&
+ "Unexpected base register update instruction to merge!");
+ MachineBasicBlock::iterator NextI = I;
+ // Return the instruction following the merged instruction, which is
+ // the instruction following our unmerged load. Unless that's the add/sub
+ // instruction we're merging, in which case it's the one after that.
+ if (++NextI == Update)
+ ++NextI;
+
+ int Value = Update->getOperand(2).getImm();
+ assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
+ "Can't merge 1 << 12 offset into pre-indexed load / store");
+ if (Update->getOpcode() == AArch64::SUBXri)
+ Value = -Value;
+
+ unsigned NewOpc = getPreIndexedOpcode(I->getOpcode());
+ MachineInstrBuilder MIB =
+ BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
+ .addOperand(Update->getOperand(0))
+ .addOperand(I->getOperand(0))
+ .addOperand(I->getOperand(1))
+ .addImm(Value);
+ (void)MIB;
+
+ DEBUG(dbgs() << "Creating pre-indexed load/store.");
+ DEBUG(dbgs() << " Replacing instructions:\n ");
+ DEBUG(I->print(dbgs()));
+ DEBUG(dbgs() << " ");
+ DEBUG(Update->print(dbgs()));
+ DEBUG(dbgs() << " with instruction:\n ");
+ DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ DEBUG(dbgs() << "\n");
+
+ // Erase the old instructions for the block.
+ I->eraseFromParent();
+ Update->eraseFromParent();
+
+ return NextI;
+}
+
+MachineBasicBlock::iterator AArch64LoadStoreOpt::mergePostIdxUpdateInsn(
+ MachineBasicBlock::iterator I, MachineBasicBlock::iterator Update) {
+ assert((Update->getOpcode() == AArch64::ADDXri ||
+ Update->getOpcode() == AArch64::SUBXri) &&
+ "Unexpected base register update instruction to merge!");
+ MachineBasicBlock::iterator NextI = I;
+ // Return the instruction following the merged instruction, which is
+ // the instruction following our unmerged load. Unless that's the add/sub
+ // instruction we're merging, in which case it's the one after that.
+ if (++NextI == Update)
+ ++NextI;
+
+ int Value = Update->getOperand(2).getImm();
+ assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
+ "Can't merge 1 << 12 offset into post-indexed load / store");
+ if (Update->getOpcode() == AArch64::SUBXri)
+ Value = -Value;
+
+ unsigned NewOpc = getPostIndexedOpcode(I->getOpcode());
+ MachineInstrBuilder MIB =
+ BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
+ .addOperand(Update->getOperand(0))
+ .addOperand(I->getOperand(0))
+ .addOperand(I->getOperand(1))
+ .addImm(Value);
+ (void)MIB;
+
+ DEBUG(dbgs() << "Creating post-indexed load/store.");
+ DEBUG(dbgs() << " Replacing instructions:\n ");
+ DEBUG(I->print(dbgs()));
+ DEBUG(dbgs() << " ");
+ DEBUG(Update->print(dbgs()));
+ DEBUG(dbgs() << " with instruction:\n ");
+ DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ DEBUG(dbgs() << "\n");
+
+ // Erase the old instructions for the block.
+ I->eraseFromParent();
+ Update->eraseFromParent();
+
+ return NextI;
+}
+
+static bool isMatchingUpdateInsn(MachineInstr *MI, unsigned BaseReg,
+ int Offset) {
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case AArch64::SUBXri:
+ // Negate the offset for a SUB instruction.
+ Offset *= -1;
+ // FALLTHROUGH
+ case AArch64::ADDXri:
+ // Make sure it's a vanilla immediate operand, not a relocation or
+ // anything else we can't handle.
+ if (!MI->getOperand(2).isImm())
+ break;
+ // Watch out for 1 << 12 shifted value.
+ if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
+ break;
+ // If the instruction has the base register as source and dest and the
+ // immediate will fit in a signed 9-bit integer, then we have a match.
+ if (MI->getOperand(0).getReg() == BaseReg &&
+ MI->getOperand(1).getReg() == BaseReg &&
+ MI->getOperand(2).getImm() <= 255 &&
+ MI->getOperand(2).getImm() >= -256) {
+ // If we have a non-zero Offset, we check that it matches the amount
+ // we're adding to the register.
+ if (!Offset || Offset == MI->getOperand(2).getImm())
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
+ MachineBasicBlock::iterator I, unsigned Limit, int Value) {
+ MachineBasicBlock::iterator E = I->getParent()->end();
+ MachineInstr *MemMI = I;
+ MachineBasicBlock::iterator MBBI = I;
+ const MachineFunction &MF = *MemMI->getParent()->getParent();
+
+ unsigned DestReg = MemMI->getOperand(0).getReg();
+ unsigned BaseReg = MemMI->getOperand(1).getReg();
+ int Offset = MemMI->getOperand(2).getImm() *
+ TII->getRegClass(MemMI->getDesc(), 0, TRI, MF)->getSize();
+
+ // If the base register overlaps the destination register, we can't
+ // merge the update.
+ if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
+ return E;
+
+ // Scan forward looking for post-index opportunities.
+ // Updating instructions can't be formed if the memory insn already
+ // has an offset other than the value we're looking for.
+ if (Offset != Value)
+ return E;
+
+ // Track which registers have been modified and used between the first insn
+ // (inclusive) and the second insn.
+ BitVector ModifiedRegs, UsedRegs;
+ ModifiedRegs.resize(TRI->getNumRegs());
+ UsedRegs.resize(TRI->getNumRegs());
+ ++MBBI;
+ for (unsigned Count = 0; MBBI != E; ++MBBI) {
+ MachineInstr *MI = MBBI;
+ // Skip DBG_VALUE instructions. Otherwise debug info can affect the
+ // optimization by changing how far we scan.
+ if (MI->isDebugValue())
+ continue;
+
+ // Now that we know this is a real instruction, count it.
+ ++Count;
+
+ // If we found a match, return it.
+ if (isMatchingUpdateInsn(MI, BaseReg, Value))
+ return MBBI;
+
+ // Update the status of what the instruction clobbered and used.
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+
+ // Otherwise, if the base register is used or modified, we have no match, so
+ // return early.
+ if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
+ return E;
+ }
+ return E;
+}
+
+MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
+ MachineBasicBlock::iterator I, unsigned Limit) {
+ MachineBasicBlock::iterator B = I->getParent()->begin();
+ MachineBasicBlock::iterator E = I->getParent()->end();
+ MachineInstr *MemMI = I;
+ MachineBasicBlock::iterator MBBI = I;
+ const MachineFunction &MF = *MemMI->getParent()->getParent();
+
+ unsigned DestReg = MemMI->getOperand(0).getReg();
+ unsigned BaseReg = MemMI->getOperand(1).getReg();
+ int Offset = MemMI->getOperand(2).getImm();
+ unsigned RegSize = TII->getRegClass(MemMI->getDesc(), 0, TRI, MF)->getSize();
+
+ // If the load/store is the first instruction in the block, there's obviously
+ // not any matching update. Ditto if the memory offset isn't zero.
+ if (MBBI == B || Offset != 0)
+ return E;
+ // If the base register overlaps the destination register, we can't
+ // merge the update.
+ if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
+ return E;
+
+ // Track which registers have been modified and used between the first insn
+ // (inclusive) and the second insn.
+ BitVector ModifiedRegs, UsedRegs;
+ ModifiedRegs.resize(TRI->getNumRegs());
+ UsedRegs.resize(TRI->getNumRegs());
+ --MBBI;
+ for (unsigned Count = 0; MBBI != B; --MBBI) {
+ MachineInstr *MI = MBBI;
+ // Skip DBG_VALUE instructions. Otherwise debug info can affect the
+ // optimization by changing how far we scan.
+ if (MI->isDebugValue())
+ continue;
+
+ // Now that we know this is a real instruction, count it.
+ ++Count;
+
+ // If we found a match, return it.
+ if (isMatchingUpdateInsn(MI, BaseReg, RegSize))
+ return MBBI;
+
+ // Update the status of what the instruction clobbered and used.
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+
+ // Otherwise, if the base register is used or modified, we have no match, so
+ // return early.
+ if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
+ return E;
+ }
+ return E;
+}
+
+bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
+ bool Modified = false;
+ // Two tranformations to do here:
+ // 1) Find loads and stores that can be merged into a single load or store
+ // pair instruction.
+ // e.g.,
+ // ldr x0, [x2]
+ // ldr x1, [x2, #8]
+ // ; becomes
+ // ldp x0, x1, [x2]
+ // 2) Find base register updates that can be merged into the load or store
+ // as a base-reg writeback.
+ // e.g.,
+ // ldr x0, [x2]
+ // add x2, x2, #4
+ // ; becomes
+ // ldr x0, [x2], #4
+
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ MBBI != E;) {
+ MachineInstr *MI = MBBI;
+ switch (MI->getOpcode()) {
+ default:
+ // Just move on to the next instruction.
+ ++MBBI;
+ break;
+ case AArch64::STRSui:
+ case AArch64::STRDui:
+ case AArch64::STRQui:
+ case AArch64::STRXui:
+ case AArch64::STRWui:
+ case AArch64::LDRSui:
+ case AArch64::LDRDui:
+ case AArch64::LDRQui:
+ case AArch64::LDRXui:
+ case AArch64::LDRWui:
+ // do the unscaled versions as well
+ case AArch64::STURSi:
+ case AArch64::STURDi:
+ case AArch64::STURQi:
+ case AArch64::STURWi:
+ case AArch64::STURXi:
+ case AArch64::LDURSi:
+ case AArch64::LDURDi:
+ case AArch64::LDURQi:
+ case AArch64::LDURWi:
+ case AArch64::LDURXi: {
+ // If this is a volatile load/store, don't mess with it.
+ if (MI->hasOrderedMemoryRef()) {
+ ++MBBI;
+ break;
+ }
+ // Make sure this is a reg+imm (as opposed to an address reloc).
+ if (!MI->getOperand(2).isImm()) {
+ ++MBBI;
+ break;
+ }
+ // Check if this load/store has a hint to avoid pair formation.
+ // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
+ if (TII->isLdStPairSuppressed(MI)) {
+ ++MBBI;
+ break;
+ }
+ // Look ahead up to ScanLimit instructions for a pairable instruction.
+ bool MergeForward = false;
+ MachineBasicBlock::iterator Paired =
+ findMatchingInsn(MBBI, MergeForward, ScanLimit);
+ if (Paired != E) {
+ // Merge the loads into a pair. Keeping the iterator straight is a
+ // pain, so we let the merge routine tell us what the next instruction
+ // is after it's done mucking about.
+ MBBI = mergePairedInsns(MBBI, Paired, MergeForward);
+
+ Modified = true;
+ ++NumPairCreated;
+ if (isUnscaledLdst(MI->getOpcode()))
+ ++NumUnscaledPairCreated;
+ break;
+ }
+ ++MBBI;
+ break;
+ }
+ // FIXME: Do the other instructions.
+ }
+ }
+
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ MBBI != E;) {
+ MachineInstr *MI = MBBI;
+ // Do update merging. It's simpler to keep this separate from the above
+ // switch, though not strictly necessary.
+ int Opc = MI->getOpcode();
+ switch (Opc) {
+ default:
+ // Just move on to the next instruction.
+ ++MBBI;
+ break;
+ case AArch64::STRSui:
+ case AArch64::STRDui:
+ case AArch64::STRQui:
+ case AArch64::STRXui:
+ case AArch64::STRWui:
+ case AArch64::LDRSui:
+ case AArch64::LDRDui:
+ case AArch64::LDRQui:
+ case AArch64::LDRXui:
+ case AArch64::LDRWui:
+ // do the unscaled versions as well
+ case AArch64::STURSi:
+ case AArch64::STURDi:
+ case AArch64::STURQi:
+ case AArch64::STURWi:
+ case AArch64::STURXi:
+ case AArch64::LDURSi:
+ case AArch64::LDURDi:
+ case AArch64::LDURQi:
+ case AArch64::LDURWi:
+ case AArch64::LDURXi: {
+ // Make sure this is a reg+imm (as opposed to an address reloc).
+ if (!MI->getOperand(2).isImm()) {
+ ++MBBI;
+ break;
+ }
+ // Look ahead up to ScanLimit instructions for a mergable instruction.
+ MachineBasicBlock::iterator Update =
+ findMatchingUpdateInsnForward(MBBI, ScanLimit, 0);
+ if (Update != E) {
+ // Merge the update into the ld/st.
+ MBBI = mergePostIdxUpdateInsn(MBBI, Update);
+ Modified = true;
+ ++NumPostFolded;
+ break;
+ }
+ // Don't know how to handle pre/post-index versions, so move to the next
+ // instruction.
+ if (isUnscaledLdst(Opc)) {
+ ++MBBI;
+ break;
+ }
+
+ // Look back to try to find a pre-index instruction. For example,
+ // add x0, x0, #8
+ // ldr x1, [x0]
+ // merged into:
+ // ldr x1, [x0, #8]!
+ Update = findMatchingUpdateInsnBackward(MBBI, ScanLimit);
+ if (Update != E) {
+ // Merge the update into the ld/st.
+ MBBI = mergePreIdxUpdateInsn(MBBI, Update);
+ Modified = true;
+ ++NumPreFolded;
+ break;
+ }
+
+ // Look forward to try to find a post-index instruction. For example,
+ // ldr x1, [x0, #64]
+ // add x0, x0, #64
+ // merged into:
+ // ldr x1, [x0, #64]!
+
+ // The immediate in the load/store is scaled by the size of the register
+ // being loaded. The immediate in the add we're looking for,
+ // however, is not, so adjust here.
+ int Value = MI->getOperand(2).getImm() *
+ TII->getRegClass(MI->getDesc(), 0, TRI, *(MBB.getParent()))
+ ->getSize();
+ Update = findMatchingUpdateInsnForward(MBBI, ScanLimit, Value);
+ if (Update != E) {
+ // Merge the update into the ld/st.
+ MBBI = mergePreIdxUpdateInsn(MBBI, Update);
+ Modified = true;
+ ++NumPreFolded;
+ break;
+ }
+
+ // Nothing found. Just move to the next instruction.
+ ++MBBI;
+ break;
+ }
+ // FIXME: Do the other instructions.
+ }
+ }
+
+ return Modified;
+}
+
+bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
+ const TargetMachine &TM = Fn.getTarget();
+ TII = static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());
+ TRI = TM.getRegisterInfo();
+
+ bool Modified = false;
+ for (auto &MBB : Fn)
+ Modified |= optimizeBlock(MBB);
+
+ return Modified;
+}
+
+// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
+// loads and stores near one another?
+
+/// createARMLoadStoreOptimizationPass - returns an instance of the load / store
+/// optimization pass.
+FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
+ return new AArch64LoadStoreOpt();
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp b/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp
index 8cfb968..75a17b9 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64MCInstLower.cpp - Convert AArch64 MachineInstr to an MCInst -==//
+//==-- AArch64MCInstLower.cpp - Convert AArch64 MachineInstr to an MCInst --==//
//
// The LLVM Compiler Infrastructure
//
@@ -12,146 +12,191 @@
//
//===----------------------------------------------------------------------===//
-#include "AArch64AsmPrinter.h"
-#include "AArch64TargetMachine.h"
+#include "AArch64MCInstLower.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "Utils/AArch64BaseInfo.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
-
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
-MCOperand
-AArch64AsmPrinter::lowerSymbolOperand(const MachineOperand &MO,
- const MCSymbol *Sym) const {
- const MCExpr *Expr = 0;
+AArch64MCInstLower::AArch64MCInstLower(MCContext &ctx, Mangler &mang,
+ AsmPrinter &printer)
+ : Ctx(ctx), Printer(printer), TargetTriple(printer.getTargetTriple()) {}
- Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, OutContext);
+MCSymbol *
+AArch64MCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
+ return Printer.getSymbol(MO.getGlobal());
+}
- switch (MO.getTargetFlags()) {
- case AArch64II::MO_GOT:
- Expr = AArch64MCExpr::CreateGOT(Expr, OutContext);
- break;
- case AArch64II::MO_GOT_LO12:
- Expr = AArch64MCExpr::CreateGOTLo12(Expr, OutContext);
- break;
- case AArch64II::MO_LO12:
- Expr = AArch64MCExpr::CreateLo12(Expr, OutContext);
- break;
- case AArch64II::MO_DTPREL_G1:
- Expr = AArch64MCExpr::CreateDTPREL_G1(Expr, OutContext);
- break;
- case AArch64II::MO_DTPREL_G0_NC:
- Expr = AArch64MCExpr::CreateDTPREL_G0_NC(Expr, OutContext);
- break;
- case AArch64II::MO_GOTTPREL:
- Expr = AArch64MCExpr::CreateGOTTPREL(Expr, OutContext);
- break;
- case AArch64II::MO_GOTTPREL_LO12:
- Expr = AArch64MCExpr::CreateGOTTPRELLo12(Expr, OutContext);
- break;
- case AArch64II::MO_TLSDESC:
- Expr = AArch64MCExpr::CreateTLSDesc(Expr, OutContext);
- break;
- case AArch64II::MO_TLSDESC_LO12:
- Expr = AArch64MCExpr::CreateTLSDescLo12(Expr, OutContext);
- break;
- case AArch64II::MO_TPREL_G1:
- Expr = AArch64MCExpr::CreateTPREL_G1(Expr, OutContext);
- break;
- case AArch64II::MO_TPREL_G0_NC:
- Expr = AArch64MCExpr::CreateTPREL_G0_NC(Expr, OutContext);
- break;
- case AArch64II::MO_ABS_G3:
- Expr = AArch64MCExpr::CreateABS_G3(Expr, OutContext);
- break;
- case AArch64II::MO_ABS_G2_NC:
- Expr = AArch64MCExpr::CreateABS_G2_NC(Expr, OutContext);
- break;
- case AArch64II::MO_ABS_G1_NC:
- Expr = AArch64MCExpr::CreateABS_G1_NC(Expr, OutContext);
- break;
- case AArch64II::MO_ABS_G0_NC:
- Expr = AArch64MCExpr::CreateABS_G0_NC(Expr, OutContext);
- break;
- case AArch64II::MO_NO_FLAG:
- // Expr is already correct
- break;
- default:
- llvm_unreachable("Unexpected MachineOperand flag");
+MCSymbol *
+AArch64MCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const {
+ return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
+}
+
+MCOperand AArch64MCInstLower::lowerSymbolOperandDarwin(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+ // FIXME: We would like an efficient form for this, so we don't have to do a
+ // lot of extra uniquing.
+ MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
+ if ((MO.getTargetFlags() & AArch64II::MO_GOT) != 0) {
+ if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
+ RefKind = MCSymbolRefExpr::VK_GOTPAGE;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
+ AArch64II::MO_PAGEOFF)
+ RefKind = MCSymbolRefExpr::VK_GOTPAGEOFF;
+ else
+ llvm_unreachable("Unexpected target flags with MO_GOT on GV operand");
+ } else if ((MO.getTargetFlags() & AArch64II::MO_TLS) != 0) {
+ if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
+ RefKind = MCSymbolRefExpr::VK_TLVPPAGE;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
+ AArch64II::MO_PAGEOFF)
+ RefKind = MCSymbolRefExpr::VK_TLVPPAGEOFF;
+ else
+ llvm_unreachable("Unexpected target flags with MO_TLS on GV operand");
+ } else {
+ if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
+ RefKind = MCSymbolRefExpr::VK_PAGE;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
+ AArch64II::MO_PAGEOFF)
+ RefKind = MCSymbolRefExpr::VK_PAGEOFF;
}
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
+ if (!MO.isJTI() && MO.getOffset())
+ Expr = MCBinaryExpr::CreateAdd(
+ Expr, MCConstantExpr::Create(MO.getOffset(), Ctx), Ctx);
+ return MCOperand::CreateExpr(Expr);
+}
+
+MCOperand AArch64MCInstLower::lowerSymbolOperandELF(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+ uint32_t RefFlags = 0;
+ if (MO.getTargetFlags() & AArch64II::MO_GOT)
+ RefFlags |= AArch64MCExpr::VK_GOT;
+ else if (MO.getTargetFlags() & AArch64II::MO_TLS) {
+ TLSModel::Model Model;
+ if (MO.isGlobal()) {
+ const GlobalValue *GV = MO.getGlobal();
+ Model = Printer.TM.getTLSModel(GV);
+ } else {
+ assert(MO.isSymbol() &&
+ StringRef(MO.getSymbolName()) == "_TLS_MODULE_BASE_" &&
+ "unexpected external TLS symbol");
+ Model = TLSModel::GeneralDynamic;
+ }
+ switch (Model) {
+ case TLSModel::InitialExec:
+ RefFlags |= AArch64MCExpr::VK_GOTTPREL;
+ break;
+ case TLSModel::LocalExec:
+ RefFlags |= AArch64MCExpr::VK_TPREL;
+ break;
+ case TLSModel::LocalDynamic:
+ RefFlags |= AArch64MCExpr::VK_DTPREL;
+ break;
+ case TLSModel::GeneralDynamic:
+ RefFlags |= AArch64MCExpr::VK_TLSDESC;
+ break;
+ }
+ } else {
+ // No modifier means this is a generic reference, classified as absolute for
+ // the cases where it matters (:abs_g0: etc).
+ RefFlags |= AArch64MCExpr::VK_ABS;
+ }
+
+ if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
+ RefFlags |= AArch64MCExpr::VK_PAGE;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
+ AArch64II::MO_PAGEOFF)
+ RefFlags |= AArch64MCExpr::VK_PAGEOFF;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G3)
+ RefFlags |= AArch64MCExpr::VK_G3;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G2)
+ RefFlags |= AArch64MCExpr::VK_G2;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G1)
+ RefFlags |= AArch64MCExpr::VK_G1;
+ else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G0)
+ RefFlags |= AArch64MCExpr::VK_G0;
+
+ if (MO.getTargetFlags() & AArch64II::MO_NC)
+ RefFlags |= AArch64MCExpr::VK_NC;
+
+ const MCExpr *Expr =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, Ctx);
if (!MO.isJTI() && MO.getOffset())
- Expr = MCBinaryExpr::CreateAdd(Expr,
- MCConstantExpr::Create(MO.getOffset(),
- OutContext),
- OutContext);
+ Expr = MCBinaryExpr::CreateAdd(
+ Expr, MCConstantExpr::Create(MO.getOffset(), Ctx), Ctx);
+
+ AArch64MCExpr::VariantKind RefKind;
+ RefKind = static_cast<AArch64MCExpr::VariantKind>(RefFlags);
+ Expr = AArch64MCExpr::Create(Expr, RefKind, Ctx);
return MCOperand::CreateExpr(Expr);
}
-bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO,
- MCOperand &MCOp) const {
+MCOperand AArch64MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+ if (TargetTriple.isOSDarwin())
+ return lowerSymbolOperandDarwin(MO, Sym);
+
+ assert(TargetTriple.isOSBinFormatELF() && "Expect Darwin or ELF target");
+ return lowerSymbolOperandELF(MO, Sym);
+}
+
+bool AArch64MCInstLower::lowerOperand(const MachineOperand &MO,
+ MCOperand &MCOp) const {
switch (MO.getType()) {
- default: llvm_unreachable("unknown operand type");
+ default:
+ llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
if (MO.isImplicit())
return false;
- assert(!MO.getSubReg() && "Subregs should be eliminated!");
MCOp = MCOperand::CreateReg(MO.getReg());
break;
+ case MachineOperand::MO_RegisterMask:
+ // Regmasks are like implicit defs.
+ return false;
case MachineOperand::MO_Immediate:
MCOp = MCOperand::CreateImm(MO.getImm());
break;
- case MachineOperand::MO_FPImmediate: {
- assert(MO.getFPImm()->isZero() && "Only fp imm 0.0 is supported");
- MCOp = MCOperand::CreateFPImm(0.0);
- break;
- }
- case MachineOperand::MO_BlockAddress:
- MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
- break;
- case MachineOperand::MO_ExternalSymbol:
- MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::CreateExpr(
+ MCSymbolRefExpr::Create(MO.getMBB()->getSymbol(), Ctx));
break;
case MachineOperand::MO_GlobalAddress:
- MCOp = lowerSymbolOperand(MO, getSymbol(MO.getGlobal()));
+ MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
break;
- case MachineOperand::MO_MachineBasicBlock:
- MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(), OutContext));
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
break;
case MachineOperand::MO_JumpTableIndex:
- MCOp = lowerSymbolOperand(MO, GetJTISymbol(MO.getIndex()));
+ MCOp = LowerSymbolOperand(MO, Printer.GetJTISymbol(MO.getIndex()));
break;
case MachineOperand::MO_ConstantPoolIndex:
- MCOp = lowerSymbolOperand(MO, GetCPISymbol(MO.getIndex()));
+ MCOp = LowerSymbolOperand(MO, Printer.GetCPISymbol(MO.getIndex()));
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = LowerSymbolOperand(
+ MO, Printer.GetBlockAddressSymbol(MO.getBlockAddress()));
break;
- case MachineOperand::MO_RegisterMask:
- // Ignore call clobbers
- return false;
-
}
-
return true;
}
-void llvm::LowerAArch64MachineInstrToMCInst(const MachineInstr *MI,
- MCInst &OutMI,
- AArch64AsmPrinter &AP) {
+void AArch64MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
-
MCOperand MCOp;
- if (AP.lowerOperand(MO, MCOp))
+ if (lowerOperand(MI->getOperand(i), MCOp))
OutMI.addOperand(MCOp);
}
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.h b/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.h
new file mode 100644
index 0000000..ba50ba9
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64MCInstLower.h
@@ -0,0 +1,52 @@
+//===-- AArch64MCInstLower.h - Lower MachineInstr to MCInst ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AArch64_MCINSTLOWER_H
+#define AArch64_MCINSTLOWER_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class AsmPrinter;
+class MCAsmInfo;
+class MCContext;
+class MCInst;
+class MCOperand;
+class MCSymbol;
+class MachineInstr;
+class MachineModuleInfoMachO;
+class MachineOperand;
+class Mangler;
+
+/// AArch64MCInstLower - This class is used to lower an MachineInstr
+/// into an MCInst.
+class LLVM_LIBRARY_VISIBILITY AArch64MCInstLower {
+ MCContext &Ctx;
+ AsmPrinter &Printer;
+ Triple TargetTriple;
+
+public:
+ AArch64MCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer);
+
+ bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const;
+ void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+
+ MCOperand lowerSymbolOperandDarwin(const MachineOperand &MO,
+ MCSymbol *Sym) const;
+ MCOperand lowerSymbolOperandELF(const MachineOperand &MO,
+ MCSymbol *Sym) const;
+ MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
+ MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
+};
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
deleted file mode 100644
index f45d8f7..0000000
--- a/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-//===-- AArch64MachineFuctionInfo.cpp - AArch64 machine function info -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file just contains the anchor for the AArch64MachineFunctionInfo to
-// force vtable emission.
-//
-//===----------------------------------------------------------------------===//
-#include "AArch64MachineFunctionInfo.h"
-
-using namespace llvm;
-
-void AArch64MachineFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 33da54f..7c257ba 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -1,4 +1,4 @@
-//=- AArch64MachineFuctionInfo.h - AArch64 machine function info -*- C++ -*-==//
+//=- AArch64MachineFuctionInfo.h - AArch64 machine function info --*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -11,17 +11,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef AARCH64MACHINEFUNCTIONINFO_H
-#define AARCH64MACHINEFUNCTIONINFO_H
+#ifndef AArch64MACHINEFUNCTIONINFO_H
+#define AArch64MACHINEFUNCTIONINFO_H
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
namespace llvm {
-/// This class is derived from MachineFunctionInfo and contains private AArch64
-/// target-specific information for each MachineFunction.
-class AArch64MachineFunctionInfo : public MachineFunctionInfo {
- virtual void anchor();
+/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
+/// contains private AArch64-specific information for each MachineFunction.
+class AArch64FunctionInfo : public MachineFunctionInfo {
/// Number of bytes of arguments this function has on the stack. If the callee
/// is expected to restore the argument stack this should be a multiple of 16,
@@ -39,111 +41,123 @@ class AArch64MachineFunctionInfo : public MachineFunctionInfo {
/// callee is expected to pop the args.
unsigned ArgumentStackToRestore;
- /// If the stack needs to be adjusted on frame entry in two stages, this
- /// records the size of the first adjustment just prior to storing
- /// callee-saved registers. The callee-saved slots are addressed assuming
- /// SP == <incoming-SP> - InitialStackAdjust.
- unsigned InitialStackAdjust;
+ /// HasStackFrame - True if this function has a stack frame. Set by
+ /// processFunctionBeforeCalleeSavedScan().
+ bool HasStackFrame;
- /// Number of local-dynamic TLS accesses.
- unsigned NumLocalDynamics;
+ /// \brief Amount of stack frame size, not including callee-saved registers.
+ unsigned LocalStackSize;
- /// @see AArch64 Procedure Call Standard, B.3
- ///
- /// The Frame index of the area where LowerFormalArguments puts the
- /// general-purpose registers that might contain variadic parameters.
- int VariadicGPRIdx;
+ /// \brief Number of TLS accesses using the special (combinable)
+ /// _TLS_MODULE_BASE_ symbol.
+ unsigned NumLocalDynamicTLSAccesses;
- /// @see AArch64 Procedure Call Standard, B.3
- ///
- /// The size of the frame object used to store the general-purpose registers
- /// which might contain variadic arguments. This is the offset from
- /// VariadicGPRIdx to what's stored in __gr_top.
- unsigned VariadicGPRSize;
+ /// \brief FrameIndex for start of varargs area for arguments passed on the
+ /// stack.
+ int VarArgsStackIndex;
- /// @see AArch64 Procedure Call Standard, B.3
- ///
- /// The Frame index of the area where LowerFormalArguments puts the
- /// floating-point registers that might contain variadic parameters.
- int VariadicFPRIdx;
+ /// \brief FrameIndex for start of varargs area for arguments passed in
+ /// general purpose registers.
+ int VarArgsGPRIndex;
- /// @see AArch64 Procedure Call Standard, B.3
- ///
- /// The size of the frame object used to store the floating-point registers
- /// which might contain variadic arguments. This is the offset from
- /// VariadicFPRIdx to what's stored in __vr_top.
- unsigned VariadicFPRSize;
+ /// \brief Size of the varargs area for arguments passed in general purpose
+ /// registers.
+ unsigned VarArgsGPRSize;
- /// @see AArch64 Procedure Call Standard, B.3
- ///
- /// The Frame index of an object pointing just past the last known stacked
- /// argument on entry to a variadic function. This goes into the __stack field
- /// of the va_list type.
- int VariadicStackIdx;
+ /// \brief FrameIndex for start of varargs area for arguments passed in
+ /// floating-point registers.
+ int VarArgsFPRIndex;
- /// The offset of the frame pointer from the stack pointer on function
- /// entry. This is expected to be negative.
- int FramePointerOffset;
+ /// \brief Size of the varargs area for arguments passed in floating-point
+ /// registers.
+ unsigned VarArgsFPRSize;
public:
- AArch64MachineFunctionInfo()
- : BytesInStackArgArea(0),
- ArgumentStackToRestore(0),
- InitialStackAdjust(0),
- NumLocalDynamics(0),
- VariadicGPRIdx(0),
- VariadicGPRSize(0),
- VariadicFPRIdx(0),
- VariadicFPRSize(0),
- VariadicStackIdx(0),
- FramePointerOffset(0) {}
-
- explicit AArch64MachineFunctionInfo(MachineFunction &MF)
- : BytesInStackArgArea(0),
- ArgumentStackToRestore(0),
- InitialStackAdjust(0),
- NumLocalDynamics(0),
- VariadicGPRIdx(0),
- VariadicGPRSize(0),
- VariadicFPRIdx(0),
- VariadicFPRSize(0),
- VariadicStackIdx(0),
- FramePointerOffset(0) {}
+ AArch64FunctionInfo()
+ : BytesInStackArgArea(0), ArgumentStackToRestore(0), HasStackFrame(false),
+ NumLocalDynamicTLSAccesses(0), VarArgsStackIndex(0), VarArgsGPRIndex(0),
+ VarArgsGPRSize(0), VarArgsFPRIndex(0), VarArgsFPRSize(0) {}
+
+ explicit AArch64FunctionInfo(MachineFunction &MF)
+ : BytesInStackArgArea(0), ArgumentStackToRestore(0), HasStackFrame(false),
+ NumLocalDynamicTLSAccesses(0), VarArgsStackIndex(0), VarArgsGPRIndex(0),
+ VarArgsGPRSize(0), VarArgsFPRIndex(0), VarArgsFPRSize(0) {
+ (void)MF;
+ }
unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
- void setBytesInStackArgArea (unsigned bytes) { BytesInStackArgArea = bytes;}
+ void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
void setArgumentStackToRestore(unsigned bytes) {
ArgumentStackToRestore = bytes;
}
- unsigned getInitialStackAdjust() const { return InitialStackAdjust; }
- void setInitialStackAdjust(unsigned bytes) { InitialStackAdjust = bytes; }
+ bool hasStackFrame() const { return HasStackFrame; }
+ void setHasStackFrame(bool s) { HasStackFrame = s; }
- unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
- void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
+ void setLocalStackSize(unsigned Size) { LocalStackSize = Size; }
+ unsigned getLocalStackSize() const { return LocalStackSize; }
- int getVariadicGPRIdx() const { return VariadicGPRIdx; }
- void setVariadicGPRIdx(int Idx) { VariadicGPRIdx = Idx; }
+ void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
+ unsigned getNumLocalDynamicTLSAccesses() const {
+ return NumLocalDynamicTLSAccesses;
+ }
- unsigned getVariadicGPRSize() const { return VariadicGPRSize; }
- void setVariadicGPRSize(unsigned Size) { VariadicGPRSize = Size; }
+ int getVarArgsStackIndex() const { return VarArgsStackIndex; }
+ void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
- int getVariadicFPRIdx() const { return VariadicFPRIdx; }
- void setVariadicFPRIdx(int Idx) { VariadicFPRIdx = Idx; }
+ int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
+ void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
- unsigned getVariadicFPRSize() const { return VariadicFPRSize; }
- void setVariadicFPRSize(unsigned Size) { VariadicFPRSize = Size; }
+ unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
+ void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
- int getVariadicStackIdx() const { return VariadicStackIdx; }
- void setVariadicStackIdx(int Idx) { VariadicStackIdx = Idx; }
+ int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
+ void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
- int getFramePointerOffset() const { return FramePointerOffset; }
- void setFramePointerOffset(int Idx) { FramePointerOffset = Idx; }
+ unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
+ void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
-};
+ typedef SmallPtrSet<const MachineInstr *, 16> SetOfInstructions;
+
+ const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
+
+ // Shortcuts for LOH related types.
+ class MILOHDirective {
+ MCLOHType Kind;
+ /// Arguments of this directive. Order matters.
+ SmallVector<const MachineInstr *, 3> Args;
+
+ public:
+ typedef SmallVectorImpl<const MachineInstr *> LOHArgs;
+
+ MILOHDirective(MCLOHType Kind, const LOHArgs &Args)
+ : Kind(Kind), Args(Args.begin(), Args.end()) {
+ assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
+ }
+
+ MCLOHType getKind() const { return Kind; }
+ const LOHArgs &getArgs() const { return Args; }
+ };
+
+ typedef MILOHDirective::LOHArgs MILOHArgs;
+ typedef SmallVector<MILOHDirective, 32> MILOHContainer;
+
+ const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
+
+ /// Add a LOH directive of this @p Kind and this @p Args.
+ void addLOHDirective(MCLOHType Kind, const MILOHArgs &Args) {
+ LOHContainerSet.push_back(MILOHDirective(Kind, Args));
+ LOHRelated.insert(Args.begin(), Args.end());
+ }
+
+private:
+ // Hold the lists of LOHs.
+ MILOHContainer LOHContainerSet;
+ SetOfInstructions LOHRelated;
+};
} // End llvm namespace
-#endif
+#endif // AArch64MACHINEFUNCTIONINFO_H
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h b/contrib/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h
new file mode 100644
index 0000000..b22fa24
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64PerfectShuffle.h
@@ -0,0 +1,6586 @@
+//===-- AArch64PerfectShuffle.h - AdvSIMD Perfect Shuffle Table -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file, which was autogenerated by llvm-PerfectShuffle, contains data
+// for the optimal way to build a perfect shuffle using AdvSIMD instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// 31 entries have cost 0
+// 242 entries have cost 1
+// 1447 entries have cost 2
+// 3602 entries have cost 3
+// 1237 entries have cost 4
+// 2 entries have cost 5
+
+// This table is 6561*4 = 26244 bytes in size.
+static const unsigned PerfectShuffleTable[6561+1] = {
+ 135053414U, // <0,0,0,0>: Cost 1 vdup0 LHS
+ 1543503974U, // <0,0,0,1>: Cost 2 vext2 <0,0,0,0>, LHS
+ 2618572962U, // <0,0,0,2>: Cost 3 vext2 <0,2,0,0>, <0,2,0,0>
+ 2568054923U, // <0,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
+ 1476398390U, // <0,0,0,4>: Cost 2 vext1 <0,0,0,0>, RHS
+ 2550140624U, // <0,0,0,5>: Cost 3 vext1 <0,0,0,0>, <5,1,7,3>
+ 2550141434U, // <0,0,0,6>: Cost 3 vext1 <0,0,0,0>, <6,2,7,3>
+ 2591945711U, // <0,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
+ 135053414U, // <0,0,0,u>: Cost 1 vdup0 LHS
+ 2886516736U, // <0,0,1,0>: Cost 3 vzipl LHS, <0,0,0,0>
+ 1812775014U, // <0,0,1,1>: Cost 2 vzipl LHS, LHS
+ 1618133094U, // <0,0,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
+ 2625209292U, // <0,0,1,3>: Cost 3 vext2 <1,3,0,0>, <1,3,0,0>
+ 2886558034U, // <0,0,1,4>: Cost 3 vzipl LHS, <0,4,1,5>
+ 2617246864U, // <0,0,1,5>: Cost 3 vext2 <0,0,0,0>, <1,5,3,7>
+ 3659723031U, // <0,0,1,6>: Cost 4 vext1 <6,0,0,1>, <6,0,0,1>
+ 2591953904U, // <0,0,1,7>: Cost 3 vext1 <7,0,0,1>, <7,0,0,1>
+ 1812775581U, // <0,0,1,u>: Cost 2 vzipl LHS, LHS
+ 3020734464U, // <0,0,2,0>: Cost 3 vtrnl LHS, <0,0,0,0>
+ 3020734474U, // <0,0,2,1>: Cost 3 vtrnl LHS, <0,0,1,1>
+ 1946992742U, // <0,0,2,2>: Cost 2 vtrnl LHS, LHS
+ 2631181989U, // <0,0,2,3>: Cost 3 vext2 <2,3,0,0>, <2,3,0,0>
+ 3020734668U, // <0,0,2,4>: Cost 3 vtrnl LHS, <0,2,4,6>
+ 3826550569U, // <0,0,2,5>: Cost 4 vuzpl <0,2,0,2>, <2,4,5,6>
+ 2617247674U, // <0,0,2,6>: Cost 3 vext2 <0,0,0,0>, <2,6,3,7>
+ 2591962097U, // <0,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
+ 1946992796U, // <0,0,2,u>: Cost 2 vtrnl LHS, LHS
+ 2635163787U, // <0,0,3,0>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
+ 2686419196U, // <0,0,3,1>: Cost 3 vext3 <0,3,1,0>, <0,3,1,0>
+ 2686492933U, // <0,0,3,2>: Cost 3 vext3 <0,3,2,0>, <0,3,2,0>
+ 2617248156U, // <0,0,3,3>: Cost 3 vext2 <0,0,0,0>, <3,3,3,3>
+ 2617248258U, // <0,0,3,4>: Cost 3 vext2 <0,0,0,0>, <3,4,5,6>
+ 3826551298U, // <0,0,3,5>: Cost 4 vuzpl <0,2,0,2>, <3,4,5,6>
+ 3690990200U, // <0,0,3,6>: Cost 4 vext2 <0,0,0,0>, <3,6,0,7>
+ 3713551042U, // <0,0,3,7>: Cost 4 vext2 <3,7,0,0>, <3,7,0,0>
+ 2635163787U, // <0,0,3,u>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
+ 2617248658U, // <0,0,4,0>: Cost 3 vext2 <0,0,0,0>, <4,0,5,1>
+ 2888450150U, // <0,0,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
+ 3021570150U, // <0,0,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
+ 3641829519U, // <0,0,4,3>: Cost 4 vext1 <3,0,0,4>, <3,0,0,4>
+ 3021570252U, // <0,0,4,4>: Cost 3 vtrnl <0,2,4,6>, <0,2,4,6>
+ 1543507254U, // <0,0,4,5>: Cost 2 vext2 <0,0,0,0>, RHS
+ 2752810294U, // <0,0,4,6>: Cost 3 vuzpl <0,2,0,2>, RHS
+ 3786998152U, // <0,0,4,7>: Cost 4 vext3 <4,7,5,0>, <0,4,7,5>
+ 1543507497U, // <0,0,4,u>: Cost 2 vext2 <0,0,0,0>, RHS
+ 2684354972U, // <0,0,5,0>: Cost 3 vext3 <0,0,0,0>, <0,5,0,7>
+ 2617249488U, // <0,0,5,1>: Cost 3 vext2 <0,0,0,0>, <5,1,7,3>
+ 3765617070U, // <0,0,5,2>: Cost 4 vext3 <1,2,3,0>, <0,5,2,7>
+ 3635865780U, // <0,0,5,3>: Cost 4 vext1 <2,0,0,5>, <3,0,4,5>
+ 2617249734U, // <0,0,5,4>: Cost 3 vext2 <0,0,0,0>, <5,4,7,6>
+ 2617249796U, // <0,0,5,5>: Cost 3 vext2 <0,0,0,0>, <5,5,5,5>
+ 2718712274U, // <0,0,5,6>: Cost 3 vext3 <5,6,7,0>, <0,5,6,7>
+ 2617249960U, // <0,0,5,7>: Cost 3 vext2 <0,0,0,0>, <5,7,5,7>
+ 2720039396U, // <0,0,5,u>: Cost 3 vext3 <5,u,7,0>, <0,5,u,7>
+ 2684355053U, // <0,0,6,0>: Cost 3 vext3 <0,0,0,0>, <0,6,0,7>
+ 3963609190U, // <0,0,6,1>: Cost 4 vzipl <0,6,2,7>, LHS
+ 2617250298U, // <0,0,6,2>: Cost 3 vext2 <0,0,0,0>, <6,2,7,3>
+ 3796435464U, // <0,0,6,3>: Cost 4 vext3 <6,3,7,0>, <0,6,3,7>
+ 3659762998U, // <0,0,6,4>: Cost 4 vext1 <6,0,0,6>, RHS
+ 3659763810U, // <0,0,6,5>: Cost 4 vext1 <6,0,0,6>, <5,6,7,0>
+ 2617250616U, // <0,0,6,6>: Cost 3 vext2 <0,0,0,0>, <6,6,6,6>
+ 2657727309U, // <0,0,6,7>: Cost 3 vext2 <6,7,0,0>, <6,7,0,0>
+ 2658390942U, // <0,0,6,u>: Cost 3 vext2 <6,u,0,0>, <6,u,0,0>
+ 2659054575U, // <0,0,7,0>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
+ 3635880854U, // <0,0,7,1>: Cost 4 vext1 <2,0,0,7>, <1,2,3,0>
+ 3635881401U, // <0,0,7,2>: Cost 4 vext1 <2,0,0,7>, <2,0,0,7>
+ 3734787298U, // <0,0,7,3>: Cost 4 vext2 <7,3,0,0>, <7,3,0,0>
+ 2617251174U, // <0,0,7,4>: Cost 3 vext2 <0,0,0,0>, <7,4,5,6>
+ 3659772002U, // <0,0,7,5>: Cost 4 vext1 <6,0,0,7>, <5,6,7,0>
+ 3659772189U, // <0,0,7,6>: Cost 4 vext1 <6,0,0,7>, <6,0,0,7>
+ 2617251436U, // <0,0,7,7>: Cost 3 vext2 <0,0,0,0>, <7,7,7,7>
+ 2659054575U, // <0,0,7,u>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
+ 135053414U, // <0,0,u,0>: Cost 1 vdup0 LHS
+ 1817419878U, // <0,0,u,1>: Cost 2 vzipl LHS, LHS
+ 1947435110U, // <0,0,u,2>: Cost 2 vtrnl LHS, LHS
+ 2568120467U, // <0,0,u,3>: Cost 3 vext1 <3,0,0,u>, <3,0,0,u>
+ 1476463926U, // <0,0,u,4>: Cost 2 vext1 <0,0,0,u>, RHS
+ 1543510170U, // <0,0,u,5>: Cost 2 vext2 <0,0,0,0>, RHS
+ 2752813210U, // <0,0,u,6>: Cost 3 vuzpl <0,2,0,2>, RHS
+ 2592011255U, // <0,0,u,7>: Cost 3 vext1 <7,0,0,u>, <7,0,0,u>
+ 135053414U, // <0,0,u,u>: Cost 1 vdup0 LHS
+ 2618581002U, // <0,1,0,0>: Cost 3 vext2 <0,2,0,1>, <0,0,1,1>
+ 1557446758U, // <0,1,0,1>: Cost 2 vext2 <2,3,0,1>, LHS
+ 2618581155U, // <0,1,0,2>: Cost 3 vext2 <0,2,0,1>, <0,2,0,1>
+ 2690548468U, // <0,1,0,3>: Cost 3 vext3 <1,0,3,0>, <1,0,3,0>
+ 2626543954U, // <0,1,0,4>: Cost 3 vext2 <1,5,0,1>, <0,4,1,5>
+ 4094985216U, // <0,1,0,5>: Cost 4 vtrnl <0,2,0,2>, <1,3,5,7>
+ 2592019278U, // <0,1,0,6>: Cost 3 vext1 <7,0,1,0>, <6,7,0,1>
+ 2592019448U, // <0,1,0,7>: Cost 3 vext1 <7,0,1,0>, <7,0,1,0>
+ 1557447325U, // <0,1,0,u>: Cost 2 vext2 <2,3,0,1>, LHS
+ 1476476938U, // <0,1,1,0>: Cost 2 vext1 <0,0,1,1>, <0,0,1,1>
+ 2886517556U, // <0,1,1,1>: Cost 3 vzipl LHS, <1,1,1,1>
+ 2886517654U, // <0,1,1,2>: Cost 3 vzipl LHS, <1,2,3,0>
+ 2886517720U, // <0,1,1,3>: Cost 3 vzipl LHS, <1,3,1,3>
+ 1476480310U, // <0,1,1,4>: Cost 2 vext1 <0,0,1,1>, RHS
+ 2886558864U, // <0,1,1,5>: Cost 3 vzipl LHS, <1,5,3,7>
+ 2550223354U, // <0,1,1,6>: Cost 3 vext1 <0,0,1,1>, <6,2,7,3>
+ 2550223856U, // <0,1,1,7>: Cost 3 vext1 <0,0,1,1>, <7,0,0,1>
+ 1476482862U, // <0,1,1,u>: Cost 2 vext1 <0,0,1,1>, LHS
+ 1494401126U, // <0,1,2,0>: Cost 2 vext1 <3,0,1,2>, LHS
+ 3020735284U, // <0,1,2,1>: Cost 3 vtrnl LHS, <1,1,1,1>
+ 2562172349U, // <0,1,2,2>: Cost 3 vext1 <2,0,1,2>, <2,0,1,2>
+ 835584U, // <0,1,2,3>: Cost 0 copy LHS
+ 1494404406U, // <0,1,2,4>: Cost 2 vext1 <3,0,1,2>, RHS
+ 3020735488U, // <0,1,2,5>: Cost 3 vtrnl LHS, <1,3,5,7>
+ 2631190458U, // <0,1,2,6>: Cost 3 vext2 <2,3,0,1>, <2,6,3,7>
+ 1518294010U, // <0,1,2,7>: Cost 2 vext1 <7,0,1,2>, <7,0,1,2>
+ 835584U, // <0,1,2,u>: Cost 0 copy LHS
+ 2692318156U, // <0,1,3,0>: Cost 3 vext3 <1,3,0,0>, <1,3,0,0>
+ 2691875800U, // <0,1,3,1>: Cost 3 vext3 <1,2,3,0>, <1,3,1,3>
+ 2691875806U, // <0,1,3,2>: Cost 3 vext3 <1,2,3,0>, <1,3,2,0>
+ 2692539367U, // <0,1,3,3>: Cost 3 vext3 <1,3,3,0>, <1,3,3,0>
+ 2562182454U, // <0,1,3,4>: Cost 3 vext1 <2,0,1,3>, RHS
+ 2691875840U, // <0,1,3,5>: Cost 3 vext3 <1,2,3,0>, <1,3,5,7>
+ 2692760578U, // <0,1,3,6>: Cost 3 vext3 <1,3,6,0>, <1,3,6,0>
+ 2639817411U, // <0,1,3,7>: Cost 3 vext2 <3,7,0,1>, <3,7,0,1>
+ 2691875863U, // <0,1,3,u>: Cost 3 vext3 <1,2,3,0>, <1,3,u,3>
+ 2568159334U, // <0,1,4,0>: Cost 3 vext1 <3,0,1,4>, LHS
+ 4095312692U, // <0,1,4,1>: Cost 4 vtrnl <0,2,4,6>, <1,1,1,1>
+ 2568160934U, // <0,1,4,2>: Cost 3 vext1 <3,0,1,4>, <2,3,0,1>
+ 2568161432U, // <0,1,4,3>: Cost 3 vext1 <3,0,1,4>, <3,0,1,4>
+ 2568162614U, // <0,1,4,4>: Cost 3 vext1 <3,0,1,4>, RHS
+ 1557450038U, // <0,1,4,5>: Cost 2 vext2 <2,3,0,1>, RHS
+ 2754235702U, // <0,1,4,6>: Cost 3 vuzpl <0,4,1,5>, RHS
+ 2592052220U, // <0,1,4,7>: Cost 3 vext1 <7,0,1,4>, <7,0,1,4>
+ 1557450281U, // <0,1,4,u>: Cost 2 vext2 <2,3,0,1>, RHS
+ 3765617775U, // <0,1,5,0>: Cost 4 vext3 <1,2,3,0>, <1,5,0,1>
+ 2647781007U, // <0,1,5,1>: Cost 3 vext2 <5,1,0,1>, <5,1,0,1>
+ 3704934138U, // <0,1,5,2>: Cost 4 vext2 <2,3,0,1>, <5,2,3,0>
+ 2691875984U, // <0,1,5,3>: Cost 3 vext3 <1,2,3,0>, <1,5,3,7>
+ 2657734598U, // <0,1,5,4>: Cost 3 vext2 <6,7,0,1>, <5,4,7,6>
+ 2650435539U, // <0,1,5,5>: Cost 3 vext2 <5,5,0,1>, <5,5,0,1>
+ 2651099172U, // <0,1,5,6>: Cost 3 vext2 <5,6,0,1>, <5,6,0,1>
+ 2651762805U, // <0,1,5,7>: Cost 3 vext2 <5,7,0,1>, <5,7,0,1>
+ 2691876029U, // <0,1,5,u>: Cost 3 vext3 <1,2,3,0>, <1,5,u,7>
+ 2592063590U, // <0,1,6,0>: Cost 3 vext1 <7,0,1,6>, LHS
+ 3765617871U, // <0,1,6,1>: Cost 4 vext3 <1,2,3,0>, <1,6,1,7>
+ 2654417337U, // <0,1,6,2>: Cost 3 vext2 <6,2,0,1>, <6,2,0,1>
+ 3765617889U, // <0,1,6,3>: Cost 4 vext3 <1,2,3,0>, <1,6,3,7>
+ 2592066870U, // <0,1,6,4>: Cost 3 vext1 <7,0,1,6>, RHS
+ 3765617907U, // <0,1,6,5>: Cost 4 vext3 <1,2,3,0>, <1,6,5,7>
+ 2657071869U, // <0,1,6,6>: Cost 3 vext2 <6,6,0,1>, <6,6,0,1>
+ 1583993678U, // <0,1,6,7>: Cost 2 vext2 <6,7,0,1>, <6,7,0,1>
+ 1584657311U, // <0,1,6,u>: Cost 2 vext2 <6,u,0,1>, <6,u,0,1>
+ 2657735672U, // <0,1,7,0>: Cost 3 vext2 <6,7,0,1>, <7,0,1,0>
+ 2657735808U, // <0,1,7,1>: Cost 3 vext2 <6,7,0,1>, <7,1,7,1>
+ 2631193772U, // <0,1,7,2>: Cost 3 vext2 <2,3,0,1>, <7,2,3,0>
+ 2661053667U, // <0,1,7,3>: Cost 3 vext2 <7,3,0,1>, <7,3,0,1>
+ 2657736038U, // <0,1,7,4>: Cost 3 vext2 <6,7,0,1>, <7,4,5,6>
+ 3721524621U, // <0,1,7,5>: Cost 4 vext2 <5,1,0,1>, <7,5,1,0>
+ 2657736158U, // <0,1,7,6>: Cost 3 vext2 <6,7,0,1>, <7,6,1,0>
+ 2657736300U, // <0,1,7,7>: Cost 3 vext2 <6,7,0,1>, <7,7,7,7>
+ 2657736322U, // <0,1,7,u>: Cost 3 vext2 <6,7,0,1>, <7,u,1,2>
+ 1494450278U, // <0,1,u,0>: Cost 2 vext1 <3,0,1,u>, LHS
+ 1557452590U, // <0,1,u,1>: Cost 2 vext2 <2,3,0,1>, LHS
+ 2754238254U, // <0,1,u,2>: Cost 3 vuzpl <0,4,1,5>, LHS
+ 835584U, // <0,1,u,3>: Cost 0 copy LHS
+ 1494453558U, // <0,1,u,4>: Cost 2 vext1 <3,0,1,u>, RHS
+ 1557452954U, // <0,1,u,5>: Cost 2 vext2 <2,3,0,1>, RHS
+ 2754238618U, // <0,1,u,6>: Cost 3 vuzpl <0,4,1,5>, RHS
+ 1518343168U, // <0,1,u,7>: Cost 2 vext1 <7,0,1,u>, <7,0,1,u>
+ 835584U, // <0,1,u,u>: Cost 0 copy LHS
+ 2752299008U, // <0,2,0,0>: Cost 3 vuzpl LHS, <0,0,0,0>
+ 1544847462U, // <0,2,0,1>: Cost 2 vext2 <0,2,0,2>, LHS
+ 1678557286U, // <0,2,0,2>: Cost 2 vuzpl LHS, LHS
+ 2696521165U, // <0,2,0,3>: Cost 3 vext3 <2,0,3,0>, <2,0,3,0>
+ 2752340172U, // <0,2,0,4>: Cost 3 vuzpl LHS, <0,2,4,6>
+ 2691876326U, // <0,2,0,5>: Cost 3 vext3 <1,2,3,0>, <2,0,5,7>
+ 2618589695U, // <0,2,0,6>: Cost 3 vext2 <0,2,0,2>, <0,6,2,7>
+ 2592093185U, // <0,2,0,7>: Cost 3 vext1 <7,0,2,0>, <7,0,2,0>
+ 1678557340U, // <0,2,0,u>: Cost 2 vuzpl LHS, LHS
+ 2618589942U, // <0,2,1,0>: Cost 3 vext2 <0,2,0,2>, <1,0,3,2>
+ 2752299828U, // <0,2,1,1>: Cost 3 vuzpl LHS, <1,1,1,1>
+ 2886518376U, // <0,2,1,2>: Cost 3 vzipl LHS, <2,2,2,2>
+ 2752299766U, // <0,2,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
+ 2550295862U, // <0,2,1,4>: Cost 3 vext1 <0,0,2,1>, RHS
+ 2752340992U, // <0,2,1,5>: Cost 3 vuzpl LHS, <1,3,5,7>
+ 2886559674U, // <0,2,1,6>: Cost 3 vzipl LHS, <2,6,3,7>
+ 3934208106U, // <0,2,1,7>: Cost 4 vuzpr <7,0,1,2>, <0,1,2,7>
+ 2752340771U, // <0,2,1,u>: Cost 3 vuzpl LHS, <1,0,u,2>
+ 1476558868U, // <0,2,2,0>: Cost 2 vext1 <0,0,2,2>, <0,0,2,2>
+ 2226628029U, // <0,2,2,1>: Cost 3 vrev <2,0,1,2>
+ 2752300648U, // <0,2,2,2>: Cost 3 vuzpl LHS, <2,2,2,2>
+ 3020736114U, // <0,2,2,3>: Cost 3 vtrnl LHS, <2,2,3,3>
+ 1476562230U, // <0,2,2,4>: Cost 2 vext1 <0,0,2,2>, RHS
+ 2550304464U, // <0,2,2,5>: Cost 3 vext1 <0,0,2,2>, <5,1,7,3>
+ 2618591162U, // <0,2,2,6>: Cost 3 vext2 <0,2,0,2>, <2,6,3,7>
+ 2550305777U, // <0,2,2,7>: Cost 3 vext1 <0,0,2,2>, <7,0,0,2>
+ 1476564782U, // <0,2,2,u>: Cost 2 vext1 <0,0,2,2>, LHS
+ 2618591382U, // <0,2,3,0>: Cost 3 vext2 <0,2,0,2>, <3,0,1,2>
+ 2752301206U, // <0,2,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
+ 3826043121U, // <0,2,3,2>: Cost 4 vuzpl LHS, <3,1,2,3>
+ 2752301468U, // <0,2,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
+ 2618591746U, // <0,2,3,4>: Cost 3 vext2 <0,2,0,2>, <3,4,5,6>
+ 2752301570U, // <0,2,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
+ 3830688102U, // <0,2,3,6>: Cost 4 vuzpl LHS, <3,2,6,3>
+ 2698807012U, // <0,2,3,7>: Cost 3 vext3 <2,3,7,0>, <2,3,7,0>
+ 2752301269U, // <0,2,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
+ 2562261094U, // <0,2,4,0>: Cost 3 vext1 <2,0,2,4>, LHS
+ 4095313828U, // <0,2,4,1>: Cost 4 vtrnl <0,2,4,6>, <2,6,1,3>
+ 2226718152U, // <0,2,4,2>: Cost 3 vrev <2,0,2,4>
+ 2568235169U, // <0,2,4,3>: Cost 3 vext1 <3,0,2,4>, <3,0,2,4>
+ 2562264374U, // <0,2,4,4>: Cost 3 vext1 <2,0,2,4>, RHS
+ 1544850742U, // <0,2,4,5>: Cost 2 vext2 <0,2,0,2>, RHS
+ 1678560566U, // <0,2,4,6>: Cost 2 vuzpl LHS, RHS
+ 2592125957U, // <0,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
+ 1678560584U, // <0,2,4,u>: Cost 2 vuzpl LHS, RHS
+ 2691876686U, // <0,2,5,0>: Cost 3 vext3 <1,2,3,0>, <2,5,0,7>
+ 2618592976U, // <0,2,5,1>: Cost 3 vext2 <0,2,0,2>, <5,1,7,3>
+ 3765618528U, // <0,2,5,2>: Cost 4 vext3 <1,2,3,0>, <2,5,2,7>
+ 3765618536U, // <0,2,5,3>: Cost 4 vext3 <1,2,3,0>, <2,5,3,6>
+ 2618593222U, // <0,2,5,4>: Cost 3 vext2 <0,2,0,2>, <5,4,7,6>
+ 2752303108U, // <0,2,5,5>: Cost 3 vuzpl LHS, <5,5,5,5>
+ 2618593378U, // <0,2,5,6>: Cost 3 vext2 <0,2,0,2>, <5,6,7,0>
+ 2824785206U, // <0,2,5,7>: Cost 3 vuzpr <1,0,3,2>, RHS
+ 2824785207U, // <0,2,5,u>: Cost 3 vuzpr <1,0,3,2>, RHS
+ 2752303950U, // <0,2,6,0>: Cost 3 vuzpl LHS, <6,7,0,1>
+ 3830690081U, // <0,2,6,1>: Cost 4 vuzpl LHS, <6,0,1,2>
+ 2618593786U, // <0,2,6,2>: Cost 3 vext2 <0,2,0,2>, <6,2,7,3>
+ 2691876794U, // <0,2,6,3>: Cost 3 vext3 <1,2,3,0>, <2,6,3,7>
+ 2752303990U, // <0,2,6,4>: Cost 3 vuzpl LHS, <6,7,4,5>
+ 3830690445U, // <0,2,6,5>: Cost 4 vuzpl LHS, <6,4,5,6>
+ 2752303928U, // <0,2,6,6>: Cost 3 vuzpl LHS, <6,6,6,6>
+ 2657743695U, // <0,2,6,7>: Cost 3 vext2 <6,7,0,2>, <6,7,0,2>
+ 2691876839U, // <0,2,6,u>: Cost 3 vext3 <1,2,3,0>, <2,6,u,7>
+ 2659070961U, // <0,2,7,0>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
+ 2659734594U, // <0,2,7,1>: Cost 3 vext2 <7,1,0,2>, <7,1,0,2>
+ 3734140051U, // <0,2,7,2>: Cost 4 vext2 <7,2,0,2>, <7,2,0,2>
+ 2701166596U, // <0,2,7,3>: Cost 3 vext3 <2,7,3,0>, <2,7,3,0>
+ 2662389094U, // <0,2,7,4>: Cost 3 vext2 <7,5,0,2>, <7,4,5,6>
+ 2662389126U, // <0,2,7,5>: Cost 3 vext2 <7,5,0,2>, <7,5,0,2>
+ 3736794583U, // <0,2,7,6>: Cost 4 vext2 <7,6,0,2>, <7,6,0,2>
+ 2752304748U, // <0,2,7,7>: Cost 3 vuzpl LHS, <7,7,7,7>
+ 2659070961U, // <0,2,7,u>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
+ 1476608026U, // <0,2,u,0>: Cost 2 vext1 <0,0,2,u>, <0,0,2,u>
+ 1544853294U, // <0,2,u,1>: Cost 2 vext2 <0,2,0,2>, LHS
+ 1678563118U, // <0,2,u,2>: Cost 2 vuzpl LHS, LHS
+ 3021178482U, // <0,2,u,3>: Cost 3 vtrnl LHS, <2,2,3,3>
+ 1476611382U, // <0,2,u,4>: Cost 2 vext1 <0,0,2,u>, RHS
+ 1544853658U, // <0,2,u,5>: Cost 2 vext2 <0,2,0,2>, RHS
+ 1678563482U, // <0,2,u,6>: Cost 2 vuzpl LHS, RHS
+ 2824785449U, // <0,2,u,7>: Cost 3 vuzpr <1,0,3,2>, RHS
+ 1678563172U, // <0,2,u,u>: Cost 2 vuzpl LHS, LHS
+ 2556329984U, // <0,3,0,0>: Cost 3 vext1 <1,0,3,0>, <0,0,0,0>
+ 2686421142U, // <0,3,0,1>: Cost 3 vext3 <0,3,1,0>, <3,0,1,2>
+ 2562303437U, // <0,3,0,2>: Cost 3 vext1 <2,0,3,0>, <2,0,3,0>
+ 4094986652U, // <0,3,0,3>: Cost 4 vtrnl <0,2,0,2>, <3,3,3,3>
+ 2556333366U, // <0,3,0,4>: Cost 3 vext1 <1,0,3,0>, RHS
+ 4094986754U, // <0,3,0,5>: Cost 4 vtrnl <0,2,0,2>, <3,4,5,6>
+ 3798796488U, // <0,3,0,6>: Cost 4 vext3 <6,7,3,0>, <3,0,6,7>
+ 3776530634U, // <0,3,0,7>: Cost 4 vext3 <3,0,7,0>, <3,0,7,0>
+ 2556335918U, // <0,3,0,u>: Cost 3 vext1 <1,0,3,0>, LHS
+ 2886518934U, // <0,3,1,0>: Cost 3 vzipl LHS, <3,0,1,2>
+ 2556338933U, // <0,3,1,1>: Cost 3 vext1 <1,0,3,1>, <1,0,3,1>
+ 2691877105U, // <0,3,1,2>: Cost 3 vext3 <1,2,3,0>, <3,1,2,3>
+ 2886519196U, // <0,3,1,3>: Cost 3 vzipl LHS, <3,3,3,3>
+ 2886519298U, // <0,3,1,4>: Cost 3 vzipl LHS, <3,4,5,6>
+ 4095740418U, // <0,3,1,5>: Cost 4 vtrnl <0,3,1,4>, <3,4,5,6>
+ 3659944242U, // <0,3,1,6>: Cost 4 vext1 <6,0,3,1>, <6,0,3,1>
+ 3769600286U, // <0,3,1,7>: Cost 4 vext3 <1,u,3,0>, <3,1,7,3>
+ 2886519582U, // <0,3,1,u>: Cost 3 vzipl LHS, <3,u,1,2>
+ 1482604646U, // <0,3,2,0>: Cost 2 vext1 <1,0,3,2>, LHS
+ 1482605302U, // <0,3,2,1>: Cost 2 vext1 <1,0,3,2>, <1,0,3,2>
+ 2556348008U, // <0,3,2,2>: Cost 3 vext1 <1,0,3,2>, <2,2,2,2>
+ 3020736924U, // <0,3,2,3>: Cost 3 vtrnl LHS, <3,3,3,3>
+ 1482607926U, // <0,3,2,4>: Cost 2 vext1 <1,0,3,2>, RHS
+ 3020737026U, // <0,3,2,5>: Cost 3 vtrnl LHS, <3,4,5,6>
+ 2598154746U, // <0,3,2,6>: Cost 3 vext1 <u,0,3,2>, <6,2,7,3>
+ 2598155258U, // <0,3,2,7>: Cost 3 vext1 <u,0,3,2>, <7,0,1,2>
+ 1482610478U, // <0,3,2,u>: Cost 2 vext1 <1,0,3,2>, LHS
+ 3692341398U, // <0,3,3,0>: Cost 4 vext2 <0,2,0,3>, <3,0,1,2>
+ 2635851999U, // <0,3,3,1>: Cost 3 vext2 <3,1,0,3>, <3,1,0,3>
+ 3636069840U, // <0,3,3,2>: Cost 4 vext1 <2,0,3,3>, <2,0,3,3>
+ 2691877276U, // <0,3,3,3>: Cost 3 vext3 <1,2,3,0>, <3,3,3,3>
+ 3961522690U, // <0,3,3,4>: Cost 4 vzipl <0,3,1,4>, <3,4,5,6>
+ 3826797058U, // <0,3,3,5>: Cost 4 vuzpl <0,2,3,5>, <3,4,5,6>
+ 3703622282U, // <0,3,3,6>: Cost 4 vext2 <2,1,0,3>, <3,6,2,7>
+ 3769600452U, // <0,3,3,7>: Cost 4 vext3 <1,u,3,0>, <3,3,7,7>
+ 2640497430U, // <0,3,3,u>: Cost 3 vext2 <3,u,0,3>, <3,u,0,3>
+ 3962194070U, // <0,3,4,0>: Cost 4 vzipl <0,4,1,5>, <3,0,1,2>
+ 2232617112U, // <0,3,4,1>: Cost 3 vrev <3,0,1,4>
+ 2232690849U, // <0,3,4,2>: Cost 3 vrev <3,0,2,4>
+ 4095314332U, // <0,3,4,3>: Cost 4 vtrnl <0,2,4,6>, <3,3,3,3>
+ 3962194434U, // <0,3,4,4>: Cost 4 vzipl <0,4,1,5>, <3,4,5,6>
+ 2691877378U, // <0,3,4,5>: Cost 3 vext3 <1,2,3,0>, <3,4,5,6>
+ 3826765110U, // <0,3,4,6>: Cost 4 vuzpl <0,2,3,1>, RHS
+ 3665941518U, // <0,3,4,7>: Cost 4 vext1 <7,0,3,4>, <7,0,3,4>
+ 2691877405U, // <0,3,4,u>: Cost 3 vext3 <1,2,3,0>, <3,4,u,6>
+ 3630112870U, // <0,3,5,0>: Cost 4 vext1 <1,0,3,5>, LHS
+ 3630113526U, // <0,3,5,1>: Cost 4 vext1 <1,0,3,5>, <1,0,3,2>
+ 4035199734U, // <0,3,5,2>: Cost 4 vzipr <1,4,0,5>, <1,0,3,2>
+ 3769600578U, // <0,3,5,3>: Cost 4 vext3 <1,u,3,0>, <3,5,3,7>
+ 2232846516U, // <0,3,5,4>: Cost 3 vrev <3,0,4,5>
+ 3779037780U, // <0,3,5,5>: Cost 4 vext3 <3,4,5,0>, <3,5,5,7>
+ 2718714461U, // <0,3,5,6>: Cost 3 vext3 <5,6,7,0>, <3,5,6,7>
+ 2706106975U, // <0,3,5,7>: Cost 3 vext3 <3,5,7,0>, <3,5,7,0>
+ 2233141464U, // <0,3,5,u>: Cost 3 vrev <3,0,u,5>
+ 2691877496U, // <0,3,6,0>: Cost 3 vext3 <1,2,3,0>, <3,6,0,7>
+ 3727511914U, // <0,3,6,1>: Cost 4 vext2 <6,1,0,3>, <6,1,0,3>
+ 3765619338U, // <0,3,6,2>: Cost 4 vext3 <1,2,3,0>, <3,6,2,7>
+ 3765619347U, // <0,3,6,3>: Cost 4 vext3 <1,2,3,0>, <3,6,3,7>
+ 3765987996U, // <0,3,6,4>: Cost 4 vext3 <1,2,u,0>, <3,6,4,7>
+ 3306670270U, // <0,3,6,5>: Cost 4 vrev <3,0,5,6>
+ 3792456365U, // <0,3,6,6>: Cost 4 vext3 <5,6,7,0>, <3,6,6,6>
+ 2706770608U, // <0,3,6,7>: Cost 3 vext3 <3,6,7,0>, <3,6,7,0>
+ 2706844345U, // <0,3,6,u>: Cost 3 vext3 <3,6,u,0>, <3,6,u,0>
+ 3769600707U, // <0,3,7,0>: Cost 4 vext3 <1,u,3,0>, <3,7,0,1>
+ 2659742787U, // <0,3,7,1>: Cost 3 vext2 <7,1,0,3>, <7,1,0,3>
+ 3636102612U, // <0,3,7,2>: Cost 4 vext1 <2,0,3,7>, <2,0,3,7>
+ 3769600740U, // <0,3,7,3>: Cost 4 vext3 <1,u,3,0>, <3,7,3,7>
+ 3769600747U, // <0,3,7,4>: Cost 4 vext3 <1,u,3,0>, <3,7,4,5>
+ 3769600758U, // <0,3,7,5>: Cost 4 vext3 <1,u,3,0>, <3,7,5,7>
+ 3659993400U, // <0,3,7,6>: Cost 4 vext1 <6,0,3,7>, <6,0,3,7>
+ 3781176065U, // <0,3,7,7>: Cost 4 vext3 <3,7,7,0>, <3,7,7,0>
+ 2664388218U, // <0,3,7,u>: Cost 3 vext2 <7,u,0,3>, <7,u,0,3>
+ 1482653798U, // <0,3,u,0>: Cost 2 vext1 <1,0,3,u>, LHS
+ 1482654460U, // <0,3,u,1>: Cost 2 vext1 <1,0,3,u>, <1,0,3,u>
+ 2556397160U, // <0,3,u,2>: Cost 3 vext1 <1,0,3,u>, <2,2,2,2>
+ 3021179292U, // <0,3,u,3>: Cost 3 vtrnl LHS, <3,3,3,3>
+ 1482657078U, // <0,3,u,4>: Cost 2 vext1 <1,0,3,u>, RHS
+ 3021179394U, // <0,3,u,5>: Cost 3 vtrnl LHS, <3,4,5,6>
+ 2598203898U, // <0,3,u,6>: Cost 3 vext1 <u,0,3,u>, <6,2,7,3>
+ 2708097874U, // <0,3,u,7>: Cost 3 vext3 <3,u,7,0>, <3,u,7,0>
+ 1482659630U, // <0,3,u,u>: Cost 2 vext1 <1,0,3,u>, LHS
+ 2617278468U, // <0,4,0,0>: Cost 3 vext2 <0,0,0,4>, <0,0,0,4>
+ 2618605670U, // <0,4,0,1>: Cost 3 vext2 <0,2,0,4>, LHS
+ 2618605734U, // <0,4,0,2>: Cost 3 vext2 <0,2,0,4>, <0,2,0,4>
+ 3642091695U, // <0,4,0,3>: Cost 4 vext1 <3,0,4,0>, <3,0,4,0>
+ 2753134796U, // <0,4,0,4>: Cost 3 vuzpl <0,2,4,6>, <0,2,4,6>
+ 2718714770U, // <0,4,0,5>: Cost 3 vext3 <5,6,7,0>, <4,0,5,1>
+ 3021245750U, // <0,4,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
+ 3665982483U, // <0,4,0,7>: Cost 4 vext1 <7,0,4,0>, <7,0,4,0>
+ 3021245768U, // <0,4,0,u>: Cost 3 vtrnl <0,2,0,2>, RHS
+ 2568355942U, // <0,4,1,0>: Cost 3 vext1 <3,0,4,1>, LHS
+ 3692348212U, // <0,4,1,1>: Cost 4 vext2 <0,2,0,4>, <1,1,1,1>
+ 3692348310U, // <0,4,1,2>: Cost 4 vext2 <0,2,0,4>, <1,2,3,0>
+ 2568358064U, // <0,4,1,3>: Cost 3 vext1 <3,0,4,1>, <3,0,4,1>
+ 2568359222U, // <0,4,1,4>: Cost 3 vext1 <3,0,4,1>, RHS
+ 1812778294U, // <0,4,1,5>: Cost 2 vzipl LHS, RHS
+ 3022671158U, // <0,4,1,6>: Cost 3 vtrnl <0,4,1,5>, RHS
+ 2592248852U, // <0,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
+ 1812778537U, // <0,4,1,u>: Cost 2 vzipl LHS, RHS
+ 2568364134U, // <0,4,2,0>: Cost 3 vext1 <3,0,4,2>, LHS
+ 2238573423U, // <0,4,2,1>: Cost 3 vrev <4,0,1,2>
+ 3692349032U, // <0,4,2,2>: Cost 4 vext2 <0,2,0,4>, <2,2,2,2>
+ 2631214761U, // <0,4,2,3>: Cost 3 vext2 <2,3,0,4>, <2,3,0,4>
+ 2568367414U, // <0,4,2,4>: Cost 3 vext1 <3,0,4,2>, RHS
+ 2887028022U, // <0,4,2,5>: Cost 3 vzipl <0,2,0,2>, RHS
+ 1946996022U, // <0,4,2,6>: Cost 2 vtrnl LHS, RHS
+ 2592257045U, // <0,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
+ 1946996040U, // <0,4,2,u>: Cost 2 vtrnl LHS, RHS
+ 3692349590U, // <0,4,3,0>: Cost 4 vext2 <0,2,0,4>, <3,0,1,2>
+ 3826878614U, // <0,4,3,1>: Cost 4 vuzpl <0,2,4,6>, <3,0,1,2>
+ 3826878625U, // <0,4,3,2>: Cost 4 vuzpl <0,2,4,6>, <3,0,2,4>
+ 3692349852U, // <0,4,3,3>: Cost 4 vext2 <0,2,0,4>, <3,3,3,3>
+ 3692349954U, // <0,4,3,4>: Cost 4 vext2 <0,2,0,4>, <3,4,5,6>
+ 3826878978U, // <0,4,3,5>: Cost 4 vuzpl <0,2,4,6>, <3,4,5,6>
+ 4095200566U, // <0,4,3,6>: Cost 4 vtrnl <0,2,3,1>, RHS
+ 3713583814U, // <0,4,3,7>: Cost 4 vext2 <3,7,0,4>, <3,7,0,4>
+ 3692350238U, // <0,4,3,u>: Cost 4 vext2 <0,2,0,4>, <3,u,1,2>
+ 2550464552U, // <0,4,4,0>: Cost 3 vext1 <0,0,4,4>, <0,0,4,4>
+ 3962194914U, // <0,4,4,1>: Cost 4 vzipl <0,4,1,5>, <4,1,5,0>
+ 3693677631U, // <0,4,4,2>: Cost 4 vext2 <0,4,0,4>, <4,2,6,3>
+ 3642124467U, // <0,4,4,3>: Cost 4 vext1 <3,0,4,4>, <3,0,4,4>
+ 2718715088U, // <0,4,4,4>: Cost 3 vext3 <5,6,7,0>, <4,4,4,4>
+ 2618608950U, // <0,4,4,5>: Cost 3 vext2 <0,2,0,4>, RHS
+ 2753137974U, // <0,4,4,6>: Cost 3 vuzpl <0,2,4,6>, RHS
+ 3666015255U, // <0,4,4,7>: Cost 4 vext1 <7,0,4,4>, <7,0,4,4>
+ 2618609193U, // <0,4,4,u>: Cost 3 vext2 <0,2,0,4>, RHS
+ 2568388710U, // <0,4,5,0>: Cost 3 vext1 <3,0,4,5>, LHS
+ 2568389526U, // <0,4,5,1>: Cost 3 vext1 <3,0,4,5>, <1,2,3,0>
+ 3636159963U, // <0,4,5,2>: Cost 4 vext1 <2,0,4,5>, <2,0,4,5>
+ 2568390836U, // <0,4,5,3>: Cost 3 vext1 <3,0,4,5>, <3,0,4,5>
+ 2568391990U, // <0,4,5,4>: Cost 3 vext1 <3,0,4,5>, RHS
+ 2718715180U, // <0,4,5,5>: Cost 3 vext3 <5,6,7,0>, <4,5,5,6>
+ 1618136374U, // <0,4,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2592281624U, // <0,4,5,7>: Cost 3 vext1 <7,0,4,5>, <7,0,4,5>
+ 1618136392U, // <0,4,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2550480938U, // <0,4,6,0>: Cost 3 vext1 <0,0,4,6>, <0,0,4,6>
+ 3826880801U, // <0,4,6,1>: Cost 4 vuzpl <0,2,4,6>, <6,0,1,2>
+ 2562426332U, // <0,4,6,2>: Cost 3 vext1 <2,0,4,6>, <2,0,4,6>
+ 3786190181U, // <0,4,6,3>: Cost 4 vext3 <4,6,3,0>, <4,6,3,0>
+ 2718715252U, // <0,4,6,4>: Cost 3 vext3 <5,6,7,0>, <4,6,4,6>
+ 3826881165U, // <0,4,6,5>: Cost 4 vuzpl <0,2,4,6>, <6,4,5,6>
+ 2712669568U, // <0,4,6,6>: Cost 3 vext3 <4,6,6,0>, <4,6,6,0>
+ 2657760081U, // <0,4,6,7>: Cost 3 vext2 <6,7,0,4>, <6,7,0,4>
+ 2718715284U, // <0,4,6,u>: Cost 3 vext3 <5,6,7,0>, <4,6,u,2>
+ 3654090854U, // <0,4,7,0>: Cost 4 vext1 <5,0,4,7>, LHS
+ 3934229326U, // <0,4,7,1>: Cost 4 vuzpr <7,0,1,4>, <6,7,0,1>
+ 3734156437U, // <0,4,7,2>: Cost 4 vext2 <7,2,0,4>, <7,2,0,4>
+ 3734820070U, // <0,4,7,3>: Cost 4 vext2 <7,3,0,4>, <7,3,0,4>
+ 3654094134U, // <0,4,7,4>: Cost 4 vext1 <5,0,4,7>, RHS
+ 2713259464U, // <0,4,7,5>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
+ 2713333201U, // <0,4,7,6>: Cost 3 vext3 <4,7,6,0>, <4,7,6,0>
+ 3654095866U, // <0,4,7,7>: Cost 4 vext1 <5,0,4,7>, <7,0,1,2>
+ 2713259464U, // <0,4,7,u>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
+ 2568413286U, // <0,4,u,0>: Cost 3 vext1 <3,0,4,u>, LHS
+ 2618611502U, // <0,4,u,1>: Cost 3 vext2 <0,2,0,4>, LHS
+ 2753140526U, // <0,4,u,2>: Cost 3 vuzpl <0,2,4,6>, LHS
+ 2568415415U, // <0,4,u,3>: Cost 3 vext1 <3,0,4,u>, <3,0,4,u>
+ 2568416566U, // <0,4,u,4>: Cost 3 vext1 <3,0,4,u>, RHS
+ 1817423158U, // <0,4,u,5>: Cost 2 vzipl LHS, RHS
+ 1947438390U, // <0,4,u,6>: Cost 2 vtrnl LHS, RHS
+ 2592306203U, // <0,4,u,7>: Cost 3 vext1 <7,0,4,u>, <7,0,4,u>
+ 1947438408U, // <0,4,u,u>: Cost 2 vtrnl LHS, RHS
+ 3630219264U, // <0,5,0,0>: Cost 4 vext1 <1,0,5,0>, <0,0,0,0>
+ 2625912934U, // <0,5,0,1>: Cost 3 vext2 <1,4,0,5>, LHS
+ 3692355748U, // <0,5,0,2>: Cost 4 vext2 <0,2,0,5>, <0,2,0,2>
+ 3693019384U, // <0,5,0,3>: Cost 4 vext2 <0,3,0,5>, <0,3,0,5>
+ 3630222646U, // <0,5,0,4>: Cost 4 vext1 <1,0,5,0>, RHS
+ 3699655062U, // <0,5,0,5>: Cost 4 vext2 <1,4,0,5>, <0,5,0,1>
+ 2718715508U, // <0,5,0,6>: Cost 3 vext3 <5,6,7,0>, <5,0,6,1>
+ 3087011126U, // <0,5,0,7>: Cost 3 vtrnr <0,0,0,0>, RHS
+ 2625913501U, // <0,5,0,u>: Cost 3 vext2 <1,4,0,5>, LHS
+ 1500659814U, // <0,5,1,0>: Cost 2 vext1 <4,0,5,1>, LHS
+ 2886520528U, // <0,5,1,1>: Cost 3 vzipl LHS, <5,1,7,3>
+ 2574403176U, // <0,5,1,2>: Cost 3 vext1 <4,0,5,1>, <2,2,2,2>
+ 2574403734U, // <0,5,1,3>: Cost 3 vext1 <4,0,5,1>, <3,0,1,2>
+ 1500662674U, // <0,5,1,4>: Cost 2 vext1 <4,0,5,1>, <4,0,5,1>
+ 2886520836U, // <0,5,1,5>: Cost 3 vzipl LHS, <5,5,5,5>
+ 2886520930U, // <0,5,1,6>: Cost 3 vzipl LHS, <5,6,7,0>
+ 2718715600U, // <0,5,1,7>: Cost 3 vext3 <5,6,7,0>, <5,1,7,3>
+ 1500665646U, // <0,5,1,u>: Cost 2 vext1 <4,0,5,1>, LHS
+ 2556493926U, // <0,5,2,0>: Cost 3 vext1 <1,0,5,2>, LHS
+ 2244546120U, // <0,5,2,1>: Cost 3 vrev <5,0,1,2>
+ 3692357256U, // <0,5,2,2>: Cost 4 vext2 <0,2,0,5>, <2,2,5,7>
+ 2568439994U, // <0,5,2,3>: Cost 3 vext1 <3,0,5,2>, <3,0,5,2>
+ 2556497206U, // <0,5,2,4>: Cost 3 vext1 <1,0,5,2>, RHS
+ 3020738564U, // <0,5,2,5>: Cost 3 vtrnl LHS, <5,5,5,5>
+ 4027877161U, // <0,5,2,6>: Cost 4 vzipr <0,2,0,2>, <2,4,5,6>
+ 3093220662U, // <0,5,2,7>: Cost 3 vtrnr <1,0,3,2>, RHS
+ 3093220663U, // <0,5,2,u>: Cost 3 vtrnr <1,0,3,2>, RHS
+ 3699656854U, // <0,5,3,0>: Cost 4 vext2 <1,4,0,5>, <3,0,1,2>
+ 3699656927U, // <0,5,3,1>: Cost 4 vext2 <1,4,0,5>, <3,1,0,3>
+ 3699657006U, // <0,5,3,2>: Cost 4 vext2 <1,4,0,5>, <3,2,0,1>
+ 3699657116U, // <0,5,3,3>: Cost 4 vext2 <1,4,0,5>, <3,3,3,3>
+ 2637859284U, // <0,5,3,4>: Cost 3 vext2 <3,4,0,5>, <3,4,0,5>
+ 3790319453U, // <0,5,3,5>: Cost 4 vext3 <5,3,5,0>, <5,3,5,0>
+ 3699657354U, // <0,5,3,6>: Cost 4 vext2 <1,4,0,5>, <3,6,2,7>
+ 2716725103U, // <0,5,3,7>: Cost 3 vext3 <5,3,7,0>, <5,3,7,0>
+ 2716798840U, // <0,5,3,u>: Cost 3 vext3 <5,3,u,0>, <5,3,u,0>
+ 2661747602U, // <0,5,4,0>: Cost 3 vext2 <7,4,0,5>, <4,0,5,1>
+ 3630252810U, // <0,5,4,1>: Cost 4 vext1 <1,0,5,4>, <1,0,5,4>
+ 3636225507U, // <0,5,4,2>: Cost 4 vext1 <2,0,5,4>, <2,0,5,4>
+ 3716910172U, // <0,5,4,3>: Cost 4 vext2 <4,3,0,5>, <4,3,0,5>
+ 3962195892U, // <0,5,4,4>: Cost 4 vzipl <0,4,1,5>, <5,4,5,6>
+ 2625916214U, // <0,5,4,5>: Cost 3 vext2 <1,4,0,5>, RHS
+ 3718901071U, // <0,5,4,6>: Cost 4 vext2 <4,6,0,5>, <4,6,0,5>
+ 2718715846U, // <0,5,4,7>: Cost 3 vext3 <5,6,7,0>, <5,4,7,6>
+ 2625916457U, // <0,5,4,u>: Cost 3 vext2 <1,4,0,5>, RHS
+ 3791278034U, // <0,5,5,0>: Cost 4 vext3 <5,5,0,0>, <5,5,0,0>
+ 3791351771U, // <0,5,5,1>: Cost 4 vext3 <5,5,1,0>, <5,5,1,0>
+ 3318386260U, // <0,5,5,2>: Cost 4 vrev <5,0,2,5>
+ 3791499245U, // <0,5,5,3>: Cost 4 vext3 <5,5,3,0>, <5,5,3,0>
+ 3318533734U, // <0,5,5,4>: Cost 4 vrev <5,0,4,5>
+ 2718715908U, // <0,5,5,5>: Cost 3 vext3 <5,6,7,0>, <5,5,5,5>
+ 2657767522U, // <0,5,5,6>: Cost 3 vext2 <6,7,0,5>, <5,6,7,0>
+ 2718715928U, // <0,5,5,7>: Cost 3 vext3 <5,6,7,0>, <5,5,7,7>
+ 2718715937U, // <0,5,5,u>: Cost 3 vext3 <5,6,7,0>, <5,5,u,7>
+ 2592358502U, // <0,5,6,0>: Cost 3 vext1 <7,0,5,6>, LHS
+ 3792015404U, // <0,5,6,1>: Cost 4 vext3 <5,6,1,0>, <5,6,1,0>
+ 3731509754U, // <0,5,6,2>: Cost 4 vext2 <6,7,0,5>, <6,2,7,3>
+ 3785748546U, // <0,5,6,3>: Cost 4 vext3 <4,5,6,0>, <5,6,3,4>
+ 2592361782U, // <0,5,6,4>: Cost 3 vext1 <7,0,5,6>, RHS
+ 2592362594U, // <0,5,6,5>: Cost 3 vext1 <7,0,5,6>, <5,6,7,0>
+ 3785748576U, // <0,5,6,6>: Cost 4 vext3 <4,5,6,0>, <5,6,6,7>
+ 1644974178U, // <0,5,6,7>: Cost 2 vext3 <5,6,7,0>, <5,6,7,0>
+ 1645047915U, // <0,5,6,u>: Cost 2 vext3 <5,6,u,0>, <5,6,u,0>
+ 2562506854U, // <0,5,7,0>: Cost 3 vext1 <2,0,5,7>, LHS
+ 2562507670U, // <0,5,7,1>: Cost 3 vext1 <2,0,5,7>, <1,2,3,0>
+ 2562508262U, // <0,5,7,2>: Cost 3 vext1 <2,0,5,7>, <2,0,5,7>
+ 3636250774U, // <0,5,7,3>: Cost 4 vext1 <2,0,5,7>, <3,0,1,2>
+ 2562510134U, // <0,5,7,4>: Cost 3 vext1 <2,0,5,7>, RHS
+ 2718716072U, // <0,5,7,5>: Cost 3 vext3 <5,6,7,0>, <5,7,5,7>
+ 2718716074U, // <0,5,7,6>: Cost 3 vext3 <5,6,7,0>, <5,7,6,0>
+ 2719379635U, // <0,5,7,7>: Cost 3 vext3 <5,7,7,0>, <5,7,7,0>
+ 2562512686U, // <0,5,7,u>: Cost 3 vext1 <2,0,5,7>, LHS
+ 1500717158U, // <0,5,u,0>: Cost 2 vext1 <4,0,5,u>, LHS
+ 2625918766U, // <0,5,u,1>: Cost 3 vext2 <1,4,0,5>, LHS
+ 2719674583U, // <0,5,u,2>: Cost 3 vext3 <5,u,2,0>, <5,u,2,0>
+ 2568489152U, // <0,5,u,3>: Cost 3 vext1 <3,0,5,u>, <3,0,5,u>
+ 1500720025U, // <0,5,u,4>: Cost 2 vext1 <4,0,5,u>, <4,0,5,u>
+ 2625919130U, // <0,5,u,5>: Cost 3 vext2 <1,4,0,5>, RHS
+ 2586407243U, // <0,5,u,6>: Cost 3 vext1 <6,0,5,u>, <6,0,5,u>
+ 1646301444U, // <0,5,u,7>: Cost 2 vext3 <5,u,7,0>, <5,u,7,0>
+ 1646375181U, // <0,5,u,u>: Cost 2 vext3 <5,u,u,0>, <5,u,u,0>
+ 2586411110U, // <0,6,0,0>: Cost 3 vext1 <6,0,6,0>, LHS
+ 2619949158U, // <0,6,0,1>: Cost 3 vext2 <0,4,0,6>, LHS
+ 2619949220U, // <0,6,0,2>: Cost 3 vext2 <0,4,0,6>, <0,2,0,2>
+ 3785748789U, // <0,6,0,3>: Cost 4 vext3 <4,5,6,0>, <6,0,3,4>
+ 2619949386U, // <0,6,0,4>: Cost 3 vext2 <0,4,0,6>, <0,4,0,6>
+ 2586415202U, // <0,6,0,5>: Cost 3 vext1 <6,0,6,0>, <5,6,7,0>
+ 2586415436U, // <0,6,0,6>: Cost 3 vext1 <6,0,6,0>, <6,0,6,0>
+ 2952793398U, // <0,6,0,7>: Cost 3 vzipr <0,0,0,0>, RHS
+ 2619949725U, // <0,6,0,u>: Cost 3 vext2 <0,4,0,6>, LHS
+ 2562531430U, // <0,6,1,0>: Cost 3 vext1 <2,0,6,1>, LHS
+ 3693691700U, // <0,6,1,1>: Cost 4 vext2 <0,4,0,6>, <1,1,1,1>
+ 2886521338U, // <0,6,1,2>: Cost 3 vzipl LHS, <6,2,7,3>
+ 3693691864U, // <0,6,1,3>: Cost 4 vext2 <0,4,0,6>, <1,3,1,3>
+ 2562534710U, // <0,6,1,4>: Cost 3 vext1 <2,0,6,1>, RHS
+ 2580450932U, // <0,6,1,5>: Cost 3 vext1 <5,0,6,1>, <5,0,6,1>
+ 2886521656U, // <0,6,1,6>: Cost 3 vzipl LHS, <6,6,6,6>
+ 2966736182U, // <0,6,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
+ 2966736183U, // <0,6,1,u>: Cost 3 vzipr <2,3,0,1>, RHS
+ 1500741734U, // <0,6,2,0>: Cost 2 vext1 <4,0,6,2>, LHS
+ 2250518817U, // <0,6,2,1>: Cost 3 vrev <6,0,1,2>
+ 2574485096U, // <0,6,2,2>: Cost 3 vext1 <4,0,6,2>, <2,2,2,2>
+ 2631894694U, // <0,6,2,3>: Cost 3 vext2 <2,4,0,6>, <2,3,0,1>
+ 1500744604U, // <0,6,2,4>: Cost 2 vext1 <4,0,6,2>, <4,0,6,2>
+ 2574487248U, // <0,6,2,5>: Cost 3 vext1 <4,0,6,2>, <5,1,7,3>
+ 3020739384U, // <0,6,2,6>: Cost 3 vtrnl LHS, <6,6,6,6>
+ 2954136886U, // <0,6,2,7>: Cost 3 vzipr <0,2,0,2>, RHS
+ 1500747566U, // <0,6,2,u>: Cost 2 vext1 <4,0,6,2>, LHS
+ 3693693078U, // <0,6,3,0>: Cost 4 vext2 <0,4,0,6>, <3,0,1,2>
+ 3705637136U, // <0,6,3,1>: Cost 4 vext2 <2,4,0,6>, <3,1,5,7>
+ 3705637192U, // <0,6,3,2>: Cost 4 vext2 <2,4,0,6>, <3,2,3,0>
+ 3693693340U, // <0,6,3,3>: Cost 4 vext2 <0,4,0,6>, <3,3,3,3>
+ 2637867477U, // <0,6,3,4>: Cost 3 vext2 <3,4,0,6>, <3,4,0,6>
+ 3705637424U, // <0,6,3,5>: Cost 4 vext2 <2,4,0,6>, <3,5,1,7>
+ 3666154056U, // <0,6,3,6>: Cost 4 vext1 <7,0,6,3>, <6,3,7,0>
+ 2722697800U, // <0,6,3,7>: Cost 3 vext3 <6,3,7,0>, <6,3,7,0>
+ 2722771537U, // <0,6,3,u>: Cost 3 vext3 <6,3,u,0>, <6,3,u,0>
+ 2562556006U, // <0,6,4,0>: Cost 3 vext1 <2,0,6,4>, LHS
+ 4095316257U, // <0,6,4,1>: Cost 4 vtrnl <0,2,4,6>, <6,0,1,2>
+ 2562557420U, // <0,6,4,2>: Cost 3 vext1 <2,0,6,4>, <2,0,6,4>
+ 3636299926U, // <0,6,4,3>: Cost 4 vext1 <2,0,6,4>, <3,0,1,2>
+ 2562559286U, // <0,6,4,4>: Cost 3 vext1 <2,0,6,4>, RHS
+ 2619952438U, // <0,6,4,5>: Cost 3 vext2 <0,4,0,6>, RHS
+ 2723287696U, // <0,6,4,6>: Cost 3 vext3 <6,4,6,0>, <6,4,6,0>
+ 4027895094U, // <0,6,4,7>: Cost 4 vzipr <0,2,0,4>, RHS
+ 2619952681U, // <0,6,4,u>: Cost 3 vext2 <0,4,0,6>, RHS
+ 2718716594U, // <0,6,5,0>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
+ 3648250774U, // <0,6,5,1>: Cost 4 vext1 <4,0,6,5>, <1,2,3,0>
+ 3792458436U, // <0,6,5,2>: Cost 4 vext3 <5,6,7,0>, <6,5,2,7>
+ 3705638767U, // <0,6,5,3>: Cost 5 vext2 <2,4,0,6>, <5,3,7,0>
+ 3648252831U, // <0,6,5,4>: Cost 4 vext1 <4,0,6,5>, <4,0,6,5>
+ 3797619416U, // <0,6,5,5>: Cost 4 vext3 <6,5,5,0>, <6,5,5,0>
+ 3792458472U, // <0,6,5,6>: Cost 4 vext3 <5,6,7,0>, <6,5,6,7>
+ 4035202358U, // <0,6,5,7>: Cost 4 vzipr <1,4,0,5>, RHS
+ 2718716594U, // <0,6,5,u>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
+ 3786412796U, // <0,6,6,0>: Cost 4 vext3 <4,6,6,0>, <6,6,0,0>
+ 3792458504U, // <0,6,6,1>: Cost 4 vext3 <5,6,7,0>, <6,6,1,3>
+ 3728200126U, // <0,6,6,2>: Cost 4 vext2 <6,2,0,6>, <6,2,0,6>
+ 3798135575U, // <0,6,6,3>: Cost 4 vext3 <6,6,3,0>, <6,6,3,0>
+ 3786412836U, // <0,6,6,4>: Cost 4 vext3 <4,6,6,0>, <6,6,4,4>
+ 3792458543U, // <0,6,6,5>: Cost 4 vext3 <5,6,7,0>, <6,6,5,6>
+ 2718716728U, // <0,6,6,6>: Cost 3 vext3 <5,6,7,0>, <6,6,6,6>
+ 2718716738U, // <0,6,6,7>: Cost 3 vext3 <5,6,7,0>, <6,6,7,7>
+ 2718716747U, // <0,6,6,u>: Cost 3 vext3 <5,6,7,0>, <6,6,u,7>
+ 2718716750U, // <0,6,7,0>: Cost 3 vext3 <5,6,7,0>, <6,7,0,1>
+ 2724909910U, // <0,6,7,1>: Cost 3 vext3 <6,7,1,0>, <6,7,1,0>
+ 3636323823U, // <0,6,7,2>: Cost 4 vext1 <2,0,6,7>, <2,0,6,7>
+ 2725057384U, // <0,6,7,3>: Cost 3 vext3 <6,7,3,0>, <6,7,3,0>
+ 2718716790U, // <0,6,7,4>: Cost 3 vext3 <5,6,7,0>, <6,7,4,5>
+ 2718716800U, // <0,6,7,5>: Cost 3 vext3 <5,6,7,0>, <6,7,5,6>
+ 3792458629U, // <0,6,7,6>: Cost 4 vext3 <5,6,7,0>, <6,7,6,2>
+ 2725352332U, // <0,6,7,7>: Cost 3 vext3 <6,7,7,0>, <6,7,7,0>
+ 2718716822U, // <0,6,7,u>: Cost 3 vext3 <5,6,7,0>, <6,7,u,1>
+ 1500790886U, // <0,6,u,0>: Cost 2 vext1 <4,0,6,u>, LHS
+ 2619954990U, // <0,6,u,1>: Cost 3 vext2 <0,4,0,6>, LHS
+ 2562590192U, // <0,6,u,2>: Cost 3 vext1 <2,0,6,u>, <2,0,6,u>
+ 2725721017U, // <0,6,u,3>: Cost 3 vext3 <6,u,3,0>, <6,u,3,0>
+ 1500793762U, // <0,6,u,4>: Cost 2 vext1 <4,0,6,u>, <4,0,6,u>
+ 2619955354U, // <0,6,u,5>: Cost 3 vext2 <0,4,0,6>, RHS
+ 2725942228U, // <0,6,u,6>: Cost 3 vext3 <6,u,6,0>, <6,u,6,0>
+ 2954186038U, // <0,6,u,7>: Cost 3 vzipr <0,2,0,u>, RHS
+ 1500796718U, // <0,6,u,u>: Cost 2 vext1 <4,0,6,u>, LHS
+ 2256401391U, // <0,7,0,0>: Cost 3 vrev <7,0,0,0>
+ 2632564838U, // <0,7,0,1>: Cost 3 vext2 <2,5,0,7>, LHS
+ 2256548865U, // <0,7,0,2>: Cost 3 vrev <7,0,2,0>
+ 3700998396U, // <0,7,0,3>: Cost 4 vext2 <1,6,0,7>, <0,3,1,0>
+ 2718716952U, // <0,7,0,4>: Cost 3 vext3 <5,6,7,0>, <7,0,4,5>
+ 2718716962U, // <0,7,0,5>: Cost 3 vext3 <5,6,7,0>, <7,0,5,6>
+ 2621284845U, // <0,7,0,6>: Cost 3 vext2 <0,6,0,7>, <0,6,0,7>
+ 3904685542U, // <0,7,0,7>: Cost 4 vuzpr <2,0,5,7>, <2,0,5,7>
+ 2632565405U, // <0,7,0,u>: Cost 3 vext2 <2,5,0,7>, LHS
+ 2256409584U, // <0,7,1,0>: Cost 3 vrev <7,0,0,1>
+ 3706307380U, // <0,7,1,1>: Cost 4 vext2 <2,5,0,7>, <1,1,1,1>
+ 2632565654U, // <0,7,1,2>: Cost 3 vext2 <2,5,0,7>, <1,2,3,0>
+ 3769603168U, // <0,7,1,3>: Cost 4 vext3 <1,u,3,0>, <7,1,3,5>
+ 2256704532U, // <0,7,1,4>: Cost 3 vrev <7,0,4,1>
+ 3769603184U, // <0,7,1,5>: Cost 4 vext3 <1,u,3,0>, <7,1,5,3>
+ 3700999366U, // <0,7,1,6>: Cost 4 vext2 <1,6,0,7>, <1,6,0,7>
+ 2886522476U, // <0,7,1,7>: Cost 3 vzipl LHS, <7,7,7,7>
+ 2256999480U, // <0,7,1,u>: Cost 3 vrev <7,0,u,1>
+ 2586501222U, // <0,7,2,0>: Cost 3 vext1 <6,0,7,2>, LHS
+ 1182749690U, // <0,7,2,1>: Cost 2 vrev <7,0,1,2>
+ 3636356595U, // <0,7,2,2>: Cost 4 vext1 <2,0,7,2>, <2,0,7,2>
+ 2727711916U, // <0,7,2,3>: Cost 3 vext3 <7,2,3,0>, <7,2,3,0>
+ 2586504502U, // <0,7,2,4>: Cost 3 vext1 <6,0,7,2>, RHS
+ 2632566606U, // <0,7,2,5>: Cost 3 vext2 <2,5,0,7>, <2,5,0,7>
+ 2586505559U, // <0,7,2,6>: Cost 3 vext1 <6,0,7,2>, <6,0,7,2>
+ 3020740204U, // <0,7,2,7>: Cost 3 vtrnl LHS, <7,7,7,7>
+ 1183265849U, // <0,7,2,u>: Cost 2 vrev <7,0,u,2>
+ 3701000342U, // <0,7,3,0>: Cost 4 vext2 <1,6,0,7>, <3,0,1,2>
+ 3706308849U, // <0,7,3,1>: Cost 4 vext2 <2,5,0,7>, <3,1,2,3>
+ 3330315268U, // <0,7,3,2>: Cost 4 vrev <7,0,2,3>
+ 3706309020U, // <0,7,3,3>: Cost 4 vext2 <2,5,0,7>, <3,3,3,3>
+ 3706309122U, // <0,7,3,4>: Cost 4 vext2 <2,5,0,7>, <3,4,5,6>
+ 3712281127U, // <0,7,3,5>: Cost 4 vext2 <3,5,0,7>, <3,5,0,7>
+ 2639202936U, // <0,7,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
+ 3802412321U, // <0,7,3,7>: Cost 4 vext3 <7,3,7,0>, <7,3,7,0>
+ 2640530202U, // <0,7,3,u>: Cost 3 vext2 <3,u,0,7>, <3,u,0,7>
+ 3654287462U, // <0,7,4,0>: Cost 4 vext1 <5,0,7,4>, LHS
+ 2256507900U, // <0,7,4,1>: Cost 3 vrev <7,0,1,4>
+ 2256581637U, // <0,7,4,2>: Cost 3 vrev <7,0,2,4>
+ 3660262008U, // <0,7,4,3>: Cost 4 vext1 <6,0,7,4>, <3,6,0,7>
+ 3786413405U, // <0,7,4,4>: Cost 4 vext3 <4,6,6,0>, <7,4,4,6>
+ 2632568118U, // <0,7,4,5>: Cost 3 vext2 <2,5,0,7>, RHS
+ 3718917457U, // <0,7,4,6>: Cost 4 vext2 <4,6,0,7>, <4,6,0,7>
+ 3787003255U, // <0,7,4,7>: Cost 4 vext3 <4,7,5,0>, <7,4,7,5>
+ 2632568361U, // <0,7,4,u>: Cost 3 vext2 <2,5,0,7>, RHS
+ 3706310268U, // <0,7,5,0>: Cost 4 vext2 <2,5,0,7>, <5,0,7,0>
+ 3792459156U, // <0,7,5,1>: Cost 4 vext3 <5,6,7,0>, <7,5,1,7>
+ 3330331654U, // <0,7,5,2>: Cost 4 vrev <7,0,2,5>
+ 3722899255U, // <0,7,5,3>: Cost 4 vext2 <5,3,0,7>, <5,3,0,7>
+ 2256737304U, // <0,7,5,4>: Cost 3 vrev <7,0,4,5>
+ 3724226521U, // <0,7,5,5>: Cost 4 vext2 <5,5,0,7>, <5,5,0,7>
+ 2718717377U, // <0,7,5,6>: Cost 3 vext3 <5,6,7,0>, <7,5,6,7>
+ 2729997763U, // <0,7,5,7>: Cost 3 vext3 <7,5,7,0>, <7,5,7,0>
+ 2720044499U, // <0,7,5,u>: Cost 3 vext3 <5,u,7,0>, <7,5,u,7>
+ 3712946517U, // <0,7,6,0>: Cost 4 vext2 <3,6,0,7>, <6,0,7,0>
+ 2256524286U, // <0,7,6,1>: Cost 3 vrev <7,0,1,6>
+ 3792459246U, // <0,7,6,2>: Cost 4 vext3 <5,6,7,0>, <7,6,2,7>
+ 3796440567U, // <0,7,6,3>: Cost 4 vext3 <6,3,7,0>, <7,6,3,7>
+ 3654307126U, // <0,7,6,4>: Cost 4 vext1 <5,0,7,6>, RHS
+ 2656457394U, // <0,7,6,5>: Cost 3 vext2 <6,5,0,7>, <6,5,0,7>
+ 3792459281U, // <0,7,6,6>: Cost 4 vext3 <5,6,7,0>, <7,6,6,6>
+ 2730661396U, // <0,7,6,7>: Cost 3 vext3 <7,6,7,0>, <7,6,7,0>
+ 2658448293U, // <0,7,6,u>: Cost 3 vext2 <6,u,0,7>, <6,u,0,7>
+ 3787003431U, // <0,7,7,0>: Cost 4 vext3 <4,7,5,0>, <7,7,0,1>
+ 3654312854U, // <0,7,7,1>: Cost 4 vext1 <5,0,7,7>, <1,2,3,0>
+ 3654313446U, // <0,7,7,2>: Cost 4 vext1 <5,0,7,7>, <2,0,5,7>
+ 3804771905U, // <0,7,7,3>: Cost 4 vext3 <7,7,3,0>, <7,7,3,0>
+ 3654315318U, // <0,7,7,4>: Cost 4 vext1 <5,0,7,7>, RHS
+ 3654315651U, // <0,7,7,5>: Cost 4 vext1 <5,0,7,7>, <5,0,7,7>
+ 3660288348U, // <0,7,7,6>: Cost 4 vext1 <6,0,7,7>, <6,0,7,7>
+ 2718717548U, // <0,7,7,7>: Cost 3 vext3 <5,6,7,0>, <7,7,7,7>
+ 2664420990U, // <0,7,7,u>: Cost 3 vext2 <7,u,0,7>, <7,u,0,7>
+ 2256466935U, // <0,7,u,0>: Cost 3 vrev <7,0,0,u>
+ 1182798848U, // <0,7,u,1>: Cost 2 vrev <7,0,1,u>
+ 2256614409U, // <0,7,u,2>: Cost 3 vrev <7,0,2,u>
+ 2731693714U, // <0,7,u,3>: Cost 3 vext3 <7,u,3,0>, <7,u,3,0>
+ 2256761883U, // <0,7,u,4>: Cost 3 vrev <7,0,4,u>
+ 2632571034U, // <0,7,u,5>: Cost 3 vext2 <2,5,0,7>, RHS
+ 2669066421U, // <0,7,u,6>: Cost 3 vext2 <u,6,0,7>, <u,6,0,7>
+ 2731988662U, // <0,7,u,7>: Cost 3 vext3 <7,u,7,0>, <7,u,7,0>
+ 1183315007U, // <0,7,u,u>: Cost 2 vrev <7,0,u,u>
+ 135053414U, // <0,u,0,0>: Cost 1 vdup0 LHS
+ 1544896614U, // <0,u,0,1>: Cost 2 vext2 <0,2,0,u>, LHS
+ 1678999654U, // <0,u,0,2>: Cost 2 vuzpl LHS, LHS
+ 2691880677U, // <0,u,0,3>: Cost 3 vext3 <1,2,3,0>, <u,0,3,2>
+ 1476988214U, // <0,u,0,4>: Cost 2 vext1 <0,0,u,0>, RHS
+ 2718791419U, // <0,u,0,5>: Cost 3 vext3 <5,6,u,0>, <u,0,5,6>
+ 3021248666U, // <0,u,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
+ 2592535607U, // <0,u,0,7>: Cost 3 vext1 <7,0,u,0>, <7,0,u,0>
+ 135053414U, // <0,u,0,u>: Cost 1 vdup0 LHS
+ 1476993097U, // <0,u,1,0>: Cost 2 vext1 <0,0,u,1>, <0,0,u,1>
+ 1812780846U, // <0,u,1,1>: Cost 2 vzipl LHS, LHS
+ 1618138926U, // <0,u,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
+ 2752742134U, // <0,u,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
+ 1476996406U, // <0,u,1,4>: Cost 2 vext1 <0,0,u,1>, RHS
+ 1812781210U, // <0,u,1,5>: Cost 2 vzipl LHS, RHS
+ 2887006416U, // <0,u,1,6>: Cost 3 vzipl LHS, <u,6,3,7>
+ 2966736200U, // <0,u,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
+ 1812781413U, // <0,u,1,u>: Cost 2 vzipl LHS, LHS
+ 1482973286U, // <0,u,2,0>: Cost 2 vext1 <1,0,u,2>, LHS
+ 1482973987U, // <0,u,2,1>: Cost 2 vext1 <1,0,u,2>, <1,0,u,2>
+ 1946998574U, // <0,u,2,2>: Cost 2 vtrnl LHS, LHS
+ 835584U, // <0,u,2,3>: Cost 0 copy LHS
+ 1482976566U, // <0,u,2,4>: Cost 2 vext1 <1,0,u,2>, RHS
+ 3020781631U, // <0,u,2,5>: Cost 3 vtrnl LHS, <u,4,5,6>
+ 1946998938U, // <0,u,2,6>: Cost 2 vtrnl LHS, RHS
+ 1518810169U, // <0,u,2,7>: Cost 2 vext1 <7,0,u,2>, <7,0,u,2>
+ 835584U, // <0,u,2,u>: Cost 0 copy LHS
+ 2618640534U, // <0,u,3,0>: Cost 3 vext2 <0,2,0,u>, <3,0,1,2>
+ 2752743574U, // <0,u,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
+ 2636556597U, // <0,u,3,2>: Cost 3 vext2 <3,2,0,u>, <3,2,0,u>
+ 2752743836U, // <0,u,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
+ 2618640898U, // <0,u,3,4>: Cost 3 vext2 <0,2,0,u>, <3,4,5,6>
+ 2752743938U, // <0,u,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
+ 2639202936U, // <0,u,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
+ 2639874762U, // <0,u,3,7>: Cost 3 vext2 <3,7,0,u>, <3,7,0,u>
+ 2752743637U, // <0,u,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
+ 2562703462U, // <0,u,4,0>: Cost 3 vext1 <2,0,u,4>, LHS
+ 2888455982U, // <0,u,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
+ 3021575982U, // <0,u,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
+ 2568677591U, // <0,u,4,3>: Cost 3 vext1 <3,0,u,4>, <3,0,u,4>
+ 2562706742U, // <0,u,4,4>: Cost 3 vext1 <2,0,u,4>, RHS
+ 1544899894U, // <0,u,4,5>: Cost 2 vext2 <0,2,0,u>, RHS
+ 1679002934U, // <0,u,4,6>: Cost 2 vuzpl LHS, RHS
+ 2718718033U, // <0,u,4,7>: Cost 3 vext3 <5,6,7,0>, <u,4,7,6>
+ 1679002952U, // <0,u,4,u>: Cost 2 vuzpl LHS, RHS
+ 2568683622U, // <0,u,5,0>: Cost 3 vext1 <3,0,u,5>, LHS
+ 2568684438U, // <0,u,5,1>: Cost 3 vext1 <3,0,u,5>, <1,2,3,0>
+ 3765622902U, // <0,u,5,2>: Cost 4 vext3 <1,2,3,0>, <u,5,2,7>
+ 2691881087U, // <0,u,5,3>: Cost 3 vext3 <1,2,3,0>, <u,5,3,7>
+ 2568686902U, // <0,u,5,4>: Cost 3 vext1 <3,0,u,5>, RHS
+ 2650492890U, // <0,u,5,5>: Cost 3 vext2 <5,5,0,u>, <5,5,0,u>
+ 1618139290U, // <0,u,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2824834358U, // <0,u,5,7>: Cost 3 vuzpr <1,0,3,u>, RHS
+ 1618139308U, // <0,u,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2592579686U, // <0,u,6,0>: Cost 3 vext1 <7,0,u,6>, LHS
+ 2262496983U, // <0,u,6,1>: Cost 3 vrev <u,0,1,6>
+ 2654474688U, // <0,u,6,2>: Cost 3 vext2 <6,2,0,u>, <6,2,0,u>
+ 2691881168U, // <0,u,6,3>: Cost 3 vext3 <1,2,3,0>, <u,6,3,7>
+ 2592582966U, // <0,u,6,4>: Cost 3 vext1 <7,0,u,6>, RHS
+ 2656465587U, // <0,u,6,5>: Cost 3 vext2 <6,5,0,u>, <6,5,0,u>
+ 2657129220U, // <0,u,6,6>: Cost 3 vext2 <6,6,0,u>, <6,6,0,u>
+ 1584051029U, // <0,u,6,7>: Cost 2 vext2 <6,7,0,u>, <6,7,0,u>
+ 1584714662U, // <0,u,6,u>: Cost 2 vext2 <6,u,0,u>, <6,u,0,u>
+ 2562728038U, // <0,u,7,0>: Cost 3 vext1 <2,0,u,7>, LHS
+ 2562728854U, // <0,u,7,1>: Cost 3 vext1 <2,0,u,7>, <1,2,3,0>
+ 2562729473U, // <0,u,7,2>: Cost 3 vext1 <2,0,u,7>, <2,0,u,7>
+ 2661111018U, // <0,u,7,3>: Cost 3 vext2 <7,3,0,u>, <7,3,0,u>
+ 2562731318U, // <0,u,7,4>: Cost 3 vext1 <2,0,u,7>, RHS
+ 2718718258U, // <0,u,7,5>: Cost 3 vext3 <5,6,7,0>, <u,7,5,6>
+ 2586620261U, // <0,u,7,6>: Cost 3 vext1 <6,0,u,7>, <6,0,u,7>
+ 2657793644U, // <0,u,7,7>: Cost 3 vext2 <6,7,0,u>, <7,7,7,7>
+ 2562733870U, // <0,u,7,u>: Cost 3 vext1 <2,0,u,7>, LHS
+ 135053414U, // <0,u,u,0>: Cost 1 vdup0 LHS
+ 1544902446U, // <0,u,u,1>: Cost 2 vext2 <0,2,0,u>, LHS
+ 1679005486U, // <0,u,u,2>: Cost 2 vuzpl LHS, LHS
+ 835584U, // <0,u,u,3>: Cost 0 copy LHS
+ 1483025718U, // <0,u,u,4>: Cost 2 vext1 <1,0,u,u>, RHS
+ 1544902810U, // <0,u,u,5>: Cost 2 vext2 <0,2,0,u>, RHS
+ 1679005850U, // <0,u,u,6>: Cost 2 vuzpl LHS, RHS
+ 1518859327U, // <0,u,u,7>: Cost 2 vext1 <7,0,u,u>, <7,0,u,u>
+ 835584U, // <0,u,u,u>: Cost 0 copy LHS
+ 2689744896U, // <1,0,0,0>: Cost 3 vext3 <0,u,1,1>, <0,0,0,0>
+ 1610694666U, // <1,0,0,1>: Cost 2 vext3 <0,0,1,1>, <0,0,1,1>
+ 2689744916U, // <1,0,0,2>: Cost 3 vext3 <0,u,1,1>, <0,0,2,2>
+ 2619310332U, // <1,0,0,3>: Cost 3 vext2 <0,3,1,0>, <0,3,1,0>
+ 2684657701U, // <1,0,0,4>: Cost 3 vext3 <0,0,4,1>, <0,0,4,1>
+ 2620637598U, // <1,0,0,5>: Cost 3 vext2 <0,5,1,0>, <0,5,1,0>
+ 3708977654U, // <1,0,0,6>: Cost 4 vext2 <3,0,1,0>, <0,6,1,7>
+ 3666351168U, // <1,0,0,7>: Cost 4 vext1 <7,1,0,0>, <7,1,0,0>
+ 1611210825U, // <1,0,0,u>: Cost 2 vext3 <0,0,u,1>, <0,0,u,1>
+ 2556780646U, // <1,0,1,0>: Cost 3 vext1 <1,1,0,1>, LHS
+ 2556781355U, // <1,0,1,1>: Cost 3 vext1 <1,1,0,1>, <1,1,0,1>
+ 1616003174U, // <1,0,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
+ 3693052888U, // <1,0,1,3>: Cost 4 vext2 <0,3,1,0>, <1,3,1,3>
+ 2556783926U, // <1,0,1,4>: Cost 3 vext1 <1,1,0,1>, RHS
+ 2580672143U, // <1,0,1,5>: Cost 3 vext1 <5,1,0,1>, <5,1,0,1>
+ 2724839566U, // <1,0,1,6>: Cost 3 vext3 <6,7,0,1>, <0,1,6,7>
+ 3654415354U, // <1,0,1,7>: Cost 4 vext1 <5,1,0,1>, <7,0,1,2>
+ 1616003228U, // <1,0,1,u>: Cost 2 vext3 <0,u,1,1>, LHS
+ 2685690019U, // <1,0,2,0>: Cost 3 vext3 <0,2,0,1>, <0,2,0,1>
+ 2685763756U, // <1,0,2,1>: Cost 3 vext3 <0,2,1,1>, <0,2,1,1>
+ 2698297524U, // <1,0,2,2>: Cost 3 vext3 <2,3,0,1>, <0,2,2,0>
+ 2685911230U, // <1,0,2,3>: Cost 3 vext3 <0,2,3,1>, <0,2,3,1>
+ 2689745100U, // <1,0,2,4>: Cost 3 vext3 <0,u,1,1>, <0,2,4,6>
+ 3764814038U, // <1,0,2,5>: Cost 4 vext3 <1,1,1,1>, <0,2,5,7>
+ 2724839640U, // <1,0,2,6>: Cost 3 vext3 <6,7,0,1>, <0,2,6,0>
+ 2592625658U, // <1,0,2,7>: Cost 3 vext1 <7,1,0,2>, <7,0,1,2>
+ 2686279915U, // <1,0,2,u>: Cost 3 vext3 <0,2,u,1>, <0,2,u,1>
+ 3087843328U, // <1,0,3,0>: Cost 3 vtrnr LHS, <0,0,0,0>
+ 3087843338U, // <1,0,3,1>: Cost 3 vtrnr LHS, <0,0,1,1>
+ 67944550U, // <1,0,3,2>: Cost 1 vrev LHS
+ 2568743135U, // <1,0,3,3>: Cost 3 vext1 <3,1,0,3>, <3,1,0,3>
+ 2562772278U, // <1,0,3,4>: Cost 3 vext1 <2,1,0,3>, RHS
+ 4099850454U, // <1,0,3,5>: Cost 4 vtrnl <1,0,3,2>, <0,2,5,7>
+ 3704998538U, // <1,0,3,6>: Cost 4 vext2 <2,3,1,0>, <3,6,2,7>
+ 2592633923U, // <1,0,3,7>: Cost 3 vext1 <7,1,0,3>, <7,1,0,3>
+ 68386972U, // <1,0,3,u>: Cost 1 vrev LHS
+ 2620640146U, // <1,0,4,0>: Cost 3 vext2 <0,5,1,0>, <4,0,5,1>
+ 2689745234U, // <1,0,4,1>: Cost 3 vext3 <0,u,1,1>, <0,4,1,5>
+ 2689745244U, // <1,0,4,2>: Cost 3 vext3 <0,u,1,1>, <0,4,2,6>
+ 3760980320U, // <1,0,4,3>: Cost 4 vext3 <0,4,3,1>, <0,4,3,1>
+ 3761054057U, // <1,0,4,4>: Cost 4 vext3 <0,4,4,1>, <0,4,4,1>
+ 2619313462U, // <1,0,4,5>: Cost 3 vext2 <0,3,1,0>, RHS
+ 3761201531U, // <1,0,4,6>: Cost 4 vext3 <0,4,6,1>, <0,4,6,1>
+ 3666383940U, // <1,0,4,7>: Cost 4 vext1 <7,1,0,4>, <7,1,0,4>
+ 2619313705U, // <1,0,4,u>: Cost 3 vext2 <0,3,1,0>, RHS
+ 4029300736U, // <1,0,5,0>: Cost 4 vzipr <0,4,1,5>, <0,0,0,0>
+ 2895249510U, // <1,0,5,1>: Cost 3 vzipl <1,5,3,7>, LHS
+ 3028287590U, // <1,0,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
+ 3642501345U, // <1,0,5,3>: Cost 4 vext1 <3,1,0,5>, <3,1,0,5>
+ 2215592058U, // <1,0,5,4>: Cost 3 vrev <0,1,4,5>
+ 3724242907U, // <1,0,5,5>: Cost 4 vext2 <5,5,1,0>, <5,5,1,0>
+ 3724906540U, // <1,0,5,6>: Cost 4 vext2 <5,6,1,0>, <5,6,1,0>
+ 3911118134U, // <1,0,5,7>: Cost 4 vuzpr <3,1,3,0>, RHS
+ 3028287644U, // <1,0,5,u>: Cost 3 vtrnl <1,3,5,7>, LHS
+ 3762086375U, // <1,0,6,0>: Cost 4 vext3 <0,6,0,1>, <0,6,0,1>
+ 2698297846U, // <1,0,6,1>: Cost 3 vext3 <2,3,0,1>, <0,6,1,7>
+ 3760022015U, // <1,0,6,2>: Cost 4 vext3 <0,2,u,1>, <0,6,2,7>
+ 3642509538U, // <1,0,6,3>: Cost 4 vext1 <3,1,0,6>, <3,1,0,6>
+ 3762381323U, // <1,0,6,4>: Cost 4 vext3 <0,6,4,1>, <0,6,4,1>
+ 3730215604U, // <1,0,6,5>: Cost 4 vext2 <6,5,1,0>, <6,5,1,0>
+ 3730879237U, // <1,0,6,6>: Cost 4 vext2 <6,6,1,0>, <6,6,1,0>
+ 2657801046U, // <1,0,6,7>: Cost 3 vext2 <6,7,1,0>, <6,7,1,0>
+ 2658464679U, // <1,0,6,u>: Cost 3 vext2 <6,u,1,0>, <6,u,1,0>
+ 2659128312U, // <1,0,7,0>: Cost 3 vext2 <7,0,1,0>, <7,0,1,0>
+ 4047898278U, // <1,0,7,1>: Cost 4 vzipr <3,5,1,7>, <2,3,0,1>
+ 2215460970U, // <1,0,7,2>: Cost 3 vrev <0,1,2,7>
+ 3734861035U, // <1,0,7,3>: Cost 4 vext2 <7,3,1,0>, <7,3,1,0>
+ 3731543398U, // <1,0,7,4>: Cost 4 vext2 <6,7,1,0>, <7,4,5,6>
+ 3736188301U, // <1,0,7,5>: Cost 4 vext2 <7,5,1,0>, <7,5,1,0>
+ 2663110110U, // <1,0,7,6>: Cost 3 vext2 <7,6,1,0>, <7,6,1,0>
+ 3731543660U, // <1,0,7,7>: Cost 4 vext2 <6,7,1,0>, <7,7,7,7>
+ 2664437376U, // <1,0,7,u>: Cost 3 vext2 <7,u,1,0>, <7,u,1,0>
+ 3087884288U, // <1,0,u,0>: Cost 3 vtrnr LHS, <0,0,0,0>
+ 1616003730U, // <1,0,u,1>: Cost 2 vext3 <0,u,1,1>, <0,u,1,1>
+ 67985515U, // <1,0,u,2>: Cost 1 vrev LHS
+ 2689893028U, // <1,0,u,3>: Cost 3 vext3 <0,u,3,1>, <0,u,3,1>
+ 2689745586U, // <1,0,u,4>: Cost 3 vext3 <0,u,1,1>, <0,u,4,6>
+ 2619316378U, // <1,0,u,5>: Cost 3 vext2 <0,3,1,0>, RHS
+ 2669082807U, // <1,0,u,6>: Cost 3 vext2 <u,6,1,0>, <u,6,1,0>
+ 2592674888U, // <1,0,u,7>: Cost 3 vext1 <7,1,0,u>, <7,1,0,u>
+ 68427937U, // <1,0,u,u>: Cost 1 vrev LHS
+ 1543585802U, // <1,1,0,0>: Cost 2 vext2 <0,0,1,1>, <0,0,1,1>
+ 1548894310U, // <1,1,0,1>: Cost 2 vext2 <0,u,1,1>, LHS
+ 2618654892U, // <1,1,0,2>: Cost 3 vext2 <0,2,1,1>, <0,2,1,1>
+ 2689745654U, // <1,1,0,3>: Cost 3 vext3 <0,u,1,1>, <1,0,3,2>
+ 2622636370U, // <1,1,0,4>: Cost 3 vext2 <0,u,1,1>, <0,4,1,5>
+ 2620645791U, // <1,1,0,5>: Cost 3 vext2 <0,5,1,1>, <0,5,1,1>
+ 3696378367U, // <1,1,0,6>: Cost 4 vext2 <0,u,1,1>, <0,6,2,7>
+ 3666424905U, // <1,1,0,7>: Cost 4 vext1 <7,1,1,0>, <7,1,1,0>
+ 1548894866U, // <1,1,0,u>: Cost 2 vext2 <0,u,1,1>, <0,u,1,1>
+ 1483112550U, // <1,1,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
+ 202162278U, // <1,1,1,1>: Cost 1 vdup1 LHS
+ 2622636950U, // <1,1,1,2>: Cost 3 vext2 <0,u,1,1>, <1,2,3,0>
+ 2622637016U, // <1,1,1,3>: Cost 3 vext2 <0,u,1,1>, <1,3,1,3>
+ 1483115830U, // <1,1,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
+ 2622637200U, // <1,1,1,5>: Cost 3 vext2 <0,u,1,1>, <1,5,3,7>
+ 2622637263U, // <1,1,1,6>: Cost 3 vext2 <0,u,1,1>, <1,6,1,7>
+ 2592691274U, // <1,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
+ 202162278U, // <1,1,1,u>: Cost 1 vdup1 LHS
+ 2550890588U, // <1,1,2,0>: Cost 3 vext1 <0,1,1,2>, <0,1,1,2>
+ 2617329183U, // <1,1,2,1>: Cost 3 vext2 <0,0,1,1>, <2,1,3,1>
+ 2622637672U, // <1,1,2,2>: Cost 3 vext2 <0,u,1,1>, <2,2,2,2>
+ 2622637734U, // <1,1,2,3>: Cost 3 vext2 <0,u,1,1>, <2,3,0,1>
+ 2550893878U, // <1,1,2,4>: Cost 3 vext1 <0,1,1,2>, RHS
+ 3696379744U, // <1,1,2,5>: Cost 4 vext2 <0,u,1,1>, <2,5,2,7>
+ 2622638010U, // <1,1,2,6>: Cost 3 vext2 <0,u,1,1>, <2,6,3,7>
+ 3804554170U, // <1,1,2,7>: Cost 4 vext3 <7,7,0,1>, <1,2,7,0>
+ 2622638139U, // <1,1,2,u>: Cost 3 vext2 <0,u,1,1>, <2,u,0,1>
+ 2622638230U, // <1,1,3,0>: Cost 3 vext2 <0,u,1,1>, <3,0,1,2>
+ 3087844148U, // <1,1,3,1>: Cost 3 vtrnr LHS, <1,1,1,1>
+ 4161585244U, // <1,1,3,2>: Cost 4 vtrnr LHS, <0,1,1,2>
+ 2014101606U, // <1,1,3,3>: Cost 2 vtrnr LHS, LHS
+ 2622638594U, // <1,1,3,4>: Cost 3 vext2 <0,u,1,1>, <3,4,5,6>
+ 2689745920U, // <1,1,3,5>: Cost 3 vext3 <0,u,1,1>, <1,3,5,7>
+ 3763487753U, // <1,1,3,6>: Cost 4 vext3 <0,u,1,1>, <1,3,6,7>
+ 2592707660U, // <1,1,3,7>: Cost 3 vext1 <7,1,1,3>, <7,1,1,3>
+ 2014101611U, // <1,1,3,u>: Cost 2 vtrnr LHS, LHS
+ 2556878950U, // <1,1,4,0>: Cost 3 vext1 <1,1,1,4>, LHS
+ 2221335351U, // <1,1,4,1>: Cost 3 vrev <1,1,1,4>
+ 3696380988U, // <1,1,4,2>: Cost 4 vext2 <0,u,1,1>, <4,2,6,0>
+ 3763487805U, // <1,1,4,3>: Cost 4 vext3 <0,u,1,1>, <1,4,3,5>
+ 2556882230U, // <1,1,4,4>: Cost 3 vext1 <1,1,1,4>, RHS
+ 1548897590U, // <1,1,4,5>: Cost 2 vext2 <0,u,1,1>, RHS
+ 2758184246U, // <1,1,4,6>: Cost 3 vuzpl <1,1,1,1>, RHS
+ 3666457677U, // <1,1,4,7>: Cost 4 vext1 <7,1,1,4>, <7,1,1,4>
+ 1548897833U, // <1,1,4,u>: Cost 2 vext2 <0,u,1,1>, RHS
+ 2693653615U, // <1,1,5,0>: Cost 3 vext3 <1,5,0,1>, <1,5,0,1>
+ 2617331408U, // <1,1,5,1>: Cost 3 vext2 <0,0,1,1>, <5,1,7,3>
+ 4029302934U, // <1,1,5,2>: Cost 4 vzipr <0,4,1,5>, <3,0,1,2>
+ 2689746064U, // <1,1,5,3>: Cost 3 vext3 <0,u,1,1>, <1,5,3,7>
+ 2221564755U, // <1,1,5,4>: Cost 3 vrev <1,1,4,5>
+ 2955559250U, // <1,1,5,5>: Cost 3 vzipr <0,4,1,5>, <0,4,1,5>
+ 2617331810U, // <1,1,5,6>: Cost 3 vext2 <0,0,1,1>, <5,6,7,0>
+ 2825293110U, // <1,1,5,7>: Cost 3 vuzpr <1,1,1,1>, RHS
+ 2689746109U, // <1,1,5,u>: Cost 3 vext3 <0,u,1,1>, <1,5,u,7>
+ 3696382241U, // <1,1,6,0>: Cost 4 vext2 <0,u,1,1>, <6,0,1,2>
+ 2689746127U, // <1,1,6,1>: Cost 3 vext3 <0,u,1,1>, <1,6,1,7>
+ 2617332218U, // <1,1,6,2>: Cost 3 vext2 <0,0,1,1>, <6,2,7,3>
+ 3763487969U, // <1,1,6,3>: Cost 4 vext3 <0,u,1,1>, <1,6,3,7>
+ 3696382605U, // <1,1,6,4>: Cost 4 vext2 <0,u,1,1>, <6,4,5,6>
+ 4029309266U, // <1,1,6,5>: Cost 4 vzipr <0,4,1,6>, <0,4,1,5>
+ 2617332536U, // <1,1,6,6>: Cost 3 vext2 <0,0,1,1>, <6,6,6,6>
+ 2724840702U, // <1,1,6,7>: Cost 3 vext3 <6,7,0,1>, <1,6,7,0>
+ 2725504263U, // <1,1,6,u>: Cost 3 vext3 <6,u,0,1>, <1,6,u,0>
+ 2617332720U, // <1,1,7,0>: Cost 3 vext2 <0,0,1,1>, <7,0,0,1>
+ 2659800138U, // <1,1,7,1>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
+ 3691074717U, // <1,1,7,2>: Cost 4 vext2 <0,0,1,1>, <7,2,1,3>
+ 4167811174U, // <1,1,7,3>: Cost 4 vtrnr <1,1,5,7>, LHS
+ 2617333094U, // <1,1,7,4>: Cost 3 vext2 <0,0,1,1>, <7,4,5,6>
+ 3295396702U, // <1,1,7,5>: Cost 4 vrev <1,1,5,7>
+ 3803891014U, // <1,1,7,6>: Cost 4 vext3 <7,6,0,1>, <1,7,6,0>
+ 2617333356U, // <1,1,7,7>: Cost 3 vext2 <0,0,1,1>, <7,7,7,7>
+ 2659800138U, // <1,1,7,u>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
+ 1483112550U, // <1,1,u,0>: Cost 2 vext1 <1,1,1,1>, LHS
+ 202162278U, // <1,1,u,1>: Cost 1 vdup1 LHS
+ 2622642056U, // <1,1,u,2>: Cost 3 vext2 <0,u,1,1>, <u,2,3,3>
+ 2014142566U, // <1,1,u,3>: Cost 2 vtrnr LHS, LHS
+ 1483115830U, // <1,1,u,4>: Cost 2 vext1 <1,1,1,1>, RHS
+ 1548900506U, // <1,1,u,5>: Cost 2 vext2 <0,u,1,1>, RHS
+ 2622642384U, // <1,1,u,6>: Cost 3 vext2 <0,u,1,1>, <u,6,3,7>
+ 2825293353U, // <1,1,u,7>: Cost 3 vuzpr <1,1,1,1>, RHS
+ 202162278U, // <1,1,u,u>: Cost 1 vdup1 LHS
+ 2635251712U, // <1,2,0,0>: Cost 3 vext2 <3,0,1,2>, <0,0,0,0>
+ 1561509990U, // <1,2,0,1>: Cost 2 vext2 <3,0,1,2>, LHS
+ 2618663085U, // <1,2,0,2>: Cost 3 vext2 <0,2,1,2>, <0,2,1,2>
+ 2696529358U, // <1,2,0,3>: Cost 3 vext3 <2,0,3,1>, <2,0,3,1>
+ 2635252050U, // <1,2,0,4>: Cost 3 vext2 <3,0,1,2>, <0,4,1,5>
+ 3769533926U, // <1,2,0,5>: Cost 4 vext3 <1,u,2,1>, <2,0,5,7>
+ 2621317617U, // <1,2,0,6>: Cost 3 vext2 <0,6,1,2>, <0,6,1,2>
+ 2659140170U, // <1,2,0,7>: Cost 3 vext2 <7,0,1,2>, <0,7,2,1>
+ 1561510557U, // <1,2,0,u>: Cost 2 vext2 <3,0,1,2>, LHS
+ 2623308516U, // <1,2,1,0>: Cost 3 vext2 <1,0,1,2>, <1,0,1,2>
+ 2635252532U, // <1,2,1,1>: Cost 3 vext2 <3,0,1,2>, <1,1,1,1>
+ 2631271318U, // <1,2,1,2>: Cost 3 vext2 <2,3,1,2>, <1,2,3,0>
+ 2958180454U, // <1,2,1,3>: Cost 3 vzipr <0,u,1,1>, LHS
+ 2550959414U, // <1,2,1,4>: Cost 3 vext1 <0,1,2,1>, RHS
+ 2635252880U, // <1,2,1,5>: Cost 3 vext2 <3,0,1,2>, <1,5,3,7>
+ 2635252952U, // <1,2,1,6>: Cost 3 vext2 <3,0,1,2>, <1,6,2,7>
+ 3732882731U, // <1,2,1,7>: Cost 4 vext2 <7,0,1,2>, <1,7,3,0>
+ 2958180459U, // <1,2,1,u>: Cost 3 vzipr <0,u,1,1>, LHS
+ 2629281213U, // <1,2,2,0>: Cost 3 vext2 <2,0,1,2>, <2,0,1,2>
+ 2635253280U, // <1,2,2,1>: Cost 3 vext2 <3,0,1,2>, <2,1,3,2>
+ 2618664552U, // <1,2,2,2>: Cost 3 vext2 <0,2,1,2>, <2,2,2,2>
+ 2689746546U, // <1,2,2,3>: Cost 3 vext3 <0,u,1,1>, <2,2,3,3>
+ 3764815485U, // <1,2,2,4>: Cost 4 vext3 <1,1,1,1>, <2,2,4,5>
+ 3760023176U, // <1,2,2,5>: Cost 4 vext3 <0,2,u,1>, <2,2,5,7>
+ 2635253690U, // <1,2,2,6>: Cost 3 vext2 <3,0,1,2>, <2,6,3,7>
+ 2659141610U, // <1,2,2,7>: Cost 3 vext2 <7,0,1,2>, <2,7,0,1>
+ 2689746591U, // <1,2,2,u>: Cost 3 vext3 <0,u,1,1>, <2,2,u,3>
+ 403488870U, // <1,2,3,0>: Cost 1 vext1 LHS, LHS
+ 1477231350U, // <1,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 1477232232U, // <1,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
+ 1477233052U, // <1,2,3,3>: Cost 2 vext1 LHS, <3,3,3,3>
+ 403492150U, // <1,2,3,4>: Cost 1 vext1 LHS, RHS
+ 1525010128U, // <1,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
+ 1525010938U, // <1,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1525011450U, // <1,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 403494702U, // <1,2,3,u>: Cost 1 vext1 LHS, LHS
+ 2641226607U, // <1,2,4,0>: Cost 3 vext2 <4,0,1,2>, <4,0,1,2>
+ 3624723446U, // <1,2,4,1>: Cost 4 vext1 <0,1,2,4>, <1,3,4,6>
+ 3301123609U, // <1,2,4,2>: Cost 4 vrev <2,1,2,4>
+ 2598759198U, // <1,2,4,3>: Cost 3 vext1 <u,1,2,4>, <3,u,1,2>
+ 2659142864U, // <1,2,4,4>: Cost 3 vext2 <7,0,1,2>, <4,4,4,4>
+ 1561513270U, // <1,2,4,5>: Cost 2 vext2 <3,0,1,2>, RHS
+ 2659143028U, // <1,2,4,6>: Cost 3 vext2 <7,0,1,2>, <4,6,4,6>
+ 2659143112U, // <1,2,4,7>: Cost 3 vext2 <7,0,1,2>, <4,7,5,0>
+ 1561513513U, // <1,2,4,u>: Cost 2 vext2 <3,0,1,2>, RHS
+ 2550988902U, // <1,2,5,0>: Cost 3 vext1 <0,1,2,5>, LHS
+ 2550989824U, // <1,2,5,1>: Cost 3 vext1 <0,1,2,5>, <1,3,5,7>
+ 3624732264U, // <1,2,5,2>: Cost 4 vext1 <0,1,2,5>, <2,2,2,2>
+ 2955559014U, // <1,2,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
+ 2550992182U, // <1,2,5,4>: Cost 3 vext1 <0,1,2,5>, RHS
+ 2659143684U, // <1,2,5,5>: Cost 3 vext2 <7,0,1,2>, <5,5,5,5>
+ 2659143778U, // <1,2,5,6>: Cost 3 vext2 <7,0,1,2>, <5,6,7,0>
+ 2659143848U, // <1,2,5,7>: Cost 3 vext2 <7,0,1,2>, <5,7,5,7>
+ 2550994734U, // <1,2,5,u>: Cost 3 vext1 <0,1,2,5>, LHS
+ 2700289945U, // <1,2,6,0>: Cost 3 vext3 <2,6,0,1>, <2,6,0,1>
+ 2635256232U, // <1,2,6,1>: Cost 3 vext2 <3,0,1,2>, <6,1,7,2>
+ 2659144186U, // <1,2,6,2>: Cost 3 vext2 <7,0,1,2>, <6,2,7,3>
+ 2689746874U, // <1,2,6,3>: Cost 3 vext3 <0,u,1,1>, <2,6,3,7>
+ 3763488705U, // <1,2,6,4>: Cost 4 vext3 <0,u,1,1>, <2,6,4,5>
+ 3763488716U, // <1,2,6,5>: Cost 4 vext3 <0,u,1,1>, <2,6,5,7>
+ 2659144504U, // <1,2,6,6>: Cost 3 vext2 <7,0,1,2>, <6,6,6,6>
+ 2657817432U, // <1,2,6,7>: Cost 3 vext2 <6,7,1,2>, <6,7,1,2>
+ 2689746919U, // <1,2,6,u>: Cost 3 vext3 <0,u,1,1>, <2,6,u,7>
+ 1585402874U, // <1,2,7,0>: Cost 2 vext2 <7,0,1,2>, <7,0,1,2>
+ 2659144770U, // <1,2,7,1>: Cost 3 vext2 <7,0,1,2>, <7,1,0,2>
+ 3708998858U, // <1,2,7,2>: Cost 4 vext2 <3,0,1,2>, <7,2,6,3>
+ 2635257059U, // <1,2,7,3>: Cost 3 vext2 <3,0,1,2>, <7,3,0,1>
+ 2659145062U, // <1,2,7,4>: Cost 3 vext2 <7,0,1,2>, <7,4,5,6>
+ 3732886916U, // <1,2,7,5>: Cost 4 vext2 <7,0,1,2>, <7,5,0,0>
+ 3732886998U, // <1,2,7,6>: Cost 4 vext2 <7,0,1,2>, <7,6,0,1>
+ 2659145255U, // <1,2,7,7>: Cost 3 vext2 <7,0,1,2>, <7,7,0,1>
+ 1590711938U, // <1,2,7,u>: Cost 2 vext2 <7,u,1,2>, <7,u,1,2>
+ 403529835U, // <1,2,u,0>: Cost 1 vext1 LHS, LHS
+ 1477272310U, // <1,2,u,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 1477273192U, // <1,2,u,2>: Cost 2 vext1 LHS, <2,2,2,2>
+ 1477273750U, // <1,2,u,3>: Cost 2 vext1 LHS, <3,0,1,2>
+ 403533110U, // <1,2,u,4>: Cost 1 vext1 LHS, RHS
+ 1561516186U, // <1,2,u,5>: Cost 2 vext2 <3,0,1,2>, RHS
+ 1525051898U, // <1,2,u,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1525052410U, // <1,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 403535662U, // <1,2,u,u>: Cost 1 vext1 LHS, LHS
+ 2819407872U, // <1,3,0,0>: Cost 3 vuzpr LHS, <0,0,0,0>
+ 1551564902U, // <1,3,0,1>: Cost 2 vext2 <1,3,1,3>, LHS
+ 2819408630U, // <1,3,0,2>: Cost 3 vuzpr LHS, <1,0,3,2>
+ 2619334911U, // <1,3,0,3>: Cost 3 vext2 <0,3,1,3>, <0,3,1,3>
+ 2625306962U, // <1,3,0,4>: Cost 3 vext2 <1,3,1,3>, <0,4,1,5>
+ 3832725879U, // <1,3,0,5>: Cost 4 vuzpl <1,2,3,0>, <0,4,5,6>
+ 3699048959U, // <1,3,0,6>: Cost 4 vext2 <1,3,1,3>, <0,6,2,7>
+ 3776538827U, // <1,3,0,7>: Cost 4 vext3 <3,0,7,1>, <3,0,7,1>
+ 1551565469U, // <1,3,0,u>: Cost 2 vext2 <1,3,1,3>, LHS
+ 2618671862U, // <1,3,1,0>: Cost 3 vext2 <0,2,1,3>, <1,0,3,2>
+ 2819408692U, // <1,3,1,1>: Cost 3 vuzpr LHS, <1,1,1,1>
+ 2624643975U, // <1,3,1,2>: Cost 3 vext2 <1,2,1,3>, <1,2,1,3>
+ 1745666150U, // <1,3,1,3>: Cost 2 vuzpr LHS, LHS
+ 2557005110U, // <1,3,1,4>: Cost 3 vext1 <1,1,3,1>, RHS
+ 2625307792U, // <1,3,1,5>: Cost 3 vext2 <1,3,1,3>, <1,5,3,7>
+ 3698386127U, // <1,3,1,6>: Cost 4 vext2 <1,2,1,3>, <1,6,1,7>
+ 2592838748U, // <1,3,1,7>: Cost 3 vext1 <7,1,3,1>, <7,1,3,1>
+ 1745666155U, // <1,3,1,u>: Cost 2 vuzpr LHS, LHS
+ 2819408790U, // <1,3,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
+ 2625308193U, // <1,3,2,1>: Cost 3 vext2 <1,3,1,3>, <2,1,3,3>
+ 2819408036U, // <1,3,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
+ 2819851890U, // <1,3,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
+ 2819408794U, // <1,3,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
+ 3893149890U, // <1,3,2,5>: Cost 4 vuzpr LHS, <0,2,3,5>
+ 2819408076U, // <1,3,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
+ 3772041583U, // <1,3,2,7>: Cost 4 vext3 <2,3,0,1>, <3,2,7,3>
+ 2819408042U, // <1,3,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
+ 1483276390U, // <1,3,3,0>: Cost 2 vext1 <1,1,3,3>, LHS
+ 1483277128U, // <1,3,3,1>: Cost 2 vext1 <1,1,3,3>, <1,1,3,3>
+ 2557019752U, // <1,3,3,2>: Cost 3 vext1 <1,1,3,3>, <2,2,2,2>
+ 2819408856U, // <1,3,3,3>: Cost 3 vuzpr LHS, <1,3,1,3>
+ 1483279670U, // <1,3,3,4>: Cost 2 vext1 <1,1,3,3>, RHS
+ 2819409614U, // <1,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
+ 2598826490U, // <1,3,3,6>: Cost 3 vext1 <u,1,3,3>, <6,2,7,3>
+ 3087844352U, // <1,3,3,7>: Cost 3 vtrnr LHS, <1,3,5,7>
+ 1483282222U, // <1,3,3,u>: Cost 2 vext1 <1,1,3,3>, LHS
+ 2568970342U, // <1,3,4,0>: Cost 3 vext1 <3,1,3,4>, LHS
+ 2568971224U, // <1,3,4,1>: Cost 3 vext1 <3,1,3,4>, <1,3,1,3>
+ 3832761290U, // <1,3,4,2>: Cost 4 vuzpl <1,2,3,4>, <4,1,2,3>
+ 2233428219U, // <1,3,4,3>: Cost 3 vrev <3,1,3,4>
+ 2568973622U, // <1,3,4,4>: Cost 3 vext1 <3,1,3,4>, RHS
+ 1551568182U, // <1,3,4,5>: Cost 2 vext2 <1,3,1,3>, RHS
+ 2819410434U, // <1,3,4,6>: Cost 3 vuzpr LHS, <3,4,5,6>
+ 3666605151U, // <1,3,4,7>: Cost 4 vext1 <7,1,3,4>, <7,1,3,4>
+ 1551568425U, // <1,3,4,u>: Cost 2 vext2 <1,3,1,3>, RHS
+ 2563006566U, // <1,3,5,0>: Cost 3 vext1 <2,1,3,5>, LHS
+ 2568979456U, // <1,3,5,1>: Cost 3 vext1 <3,1,3,5>, <1,3,5,7>
+ 2563008035U, // <1,3,5,2>: Cost 3 vext1 <2,1,3,5>, <2,1,3,5>
+ 2233436412U, // <1,3,5,3>: Cost 3 vrev <3,1,3,5>
+ 2563009846U, // <1,3,5,4>: Cost 3 vext1 <2,1,3,5>, RHS
+ 2867187716U, // <1,3,5,5>: Cost 3 vuzpr LHS, <5,5,5,5>
+ 2655834214U, // <1,3,5,6>: Cost 3 vext2 <6,4,1,3>, <5,6,7,4>
+ 1745669430U, // <1,3,5,7>: Cost 2 vuzpr LHS, RHS
+ 1745669431U, // <1,3,5,u>: Cost 2 vuzpr LHS, RHS
+ 2867187810U, // <1,3,6,0>: Cost 3 vuzpr LHS, <5,6,7,0>
+ 3699052931U, // <1,3,6,1>: Cost 4 vext2 <1,3,1,3>, <6,1,3,1>
+ 2654507460U, // <1,3,6,2>: Cost 3 vext2 <6,2,1,3>, <6,2,1,3>
+ 3766291091U, // <1,3,6,3>: Cost 4 vext3 <1,3,3,1>, <3,6,3,7>
+ 2655834726U, // <1,3,6,4>: Cost 3 vext2 <6,4,1,3>, <6,4,1,3>
+ 3923384562U, // <1,3,6,5>: Cost 4 vuzpr <5,1,7,3>, <u,6,7,5>
+ 2657161992U, // <1,3,6,6>: Cost 3 vext2 <6,6,1,3>, <6,6,1,3>
+ 2819852218U, // <1,3,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
+ 2819852219U, // <1,3,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
+ 2706926275U, // <1,3,7,0>: Cost 3 vext3 <3,7,0,1>, <3,7,0,1>
+ 2659816524U, // <1,3,7,1>: Cost 3 vext2 <7,1,1,3>, <7,1,1,3>
+ 3636766245U, // <1,3,7,2>: Cost 4 vext1 <2,1,3,7>, <2,1,3,7>
+ 2867187903U, // <1,3,7,3>: Cost 3 vuzpr LHS, <5,7,u,3>
+ 2625312102U, // <1,3,7,4>: Cost 3 vext2 <1,3,1,3>, <7,4,5,6>
+ 2867188598U, // <1,3,7,5>: Cost 3 vuzpr LHS, <6,7,4,5>
+ 3728250344U, // <1,3,7,6>: Cost 4 vext2 <6,2,1,3>, <7,6,2,1>
+ 2867187880U, // <1,3,7,7>: Cost 3 vuzpr LHS, <5,7,5,7>
+ 2707516171U, // <1,3,7,u>: Cost 3 vext3 <3,7,u,1>, <3,7,u,1>
+ 1483317350U, // <1,3,u,0>: Cost 2 vext1 <1,1,3,u>, LHS
+ 1483318093U, // <1,3,u,1>: Cost 2 vext1 <1,1,3,u>, <1,1,3,u>
+ 2819410718U, // <1,3,u,2>: Cost 3 vuzpr LHS, <3,u,1,2>
+ 1745666717U, // <1,3,u,3>: Cost 2 vuzpr LHS, LHS
+ 1483320630U, // <1,3,u,4>: Cost 2 vext1 <1,1,3,u>, RHS
+ 1551571098U, // <1,3,u,5>: Cost 2 vext2 <1,3,1,3>, RHS
+ 2819410758U, // <1,3,u,6>: Cost 3 vuzpr LHS, <3,u,5,6>
+ 1745669673U, // <1,3,u,7>: Cost 2 vuzpr LHS, RHS
+ 1745666722U, // <1,3,u,u>: Cost 2 vuzpr LHS, LHS
+ 2617352205U, // <1,4,0,0>: Cost 3 vext2 <0,0,1,4>, <0,0,1,4>
+ 2619342950U, // <1,4,0,1>: Cost 3 vext2 <0,3,1,4>, LHS
+ 3692421295U, // <1,4,0,2>: Cost 4 vext2 <0,2,1,4>, <0,2,1,4>
+ 2619343104U, // <1,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
+ 2617352530U, // <1,4,0,4>: Cost 3 vext2 <0,0,1,4>, <0,4,1,5>
+ 1634880402U, // <1,4,0,5>: Cost 2 vext3 <4,0,5,1>, <4,0,5,1>
+ 2713930652U, // <1,4,0,6>: Cost 3 vext3 <4,u,5,1>, <4,0,6,2>
+ 3732898396U, // <1,4,0,7>: Cost 4 vext2 <7,0,1,4>, <0,7,4,1>
+ 1635101613U, // <1,4,0,u>: Cost 2 vext3 <4,0,u,1>, <4,0,u,1>
+ 3693085430U, // <1,4,1,0>: Cost 4 vext2 <0,3,1,4>, <1,0,3,2>
+ 2623988535U, // <1,4,1,1>: Cost 3 vext2 <1,1,1,4>, <1,1,1,4>
+ 3693085590U, // <1,4,1,2>: Cost 4 vext2 <0,3,1,4>, <1,2,3,0>
+ 3692422134U, // <1,4,1,3>: Cost 4 vext2 <0,2,1,4>, <1,3,4,6>
+ 3693085726U, // <1,4,1,4>: Cost 4 vext2 <0,3,1,4>, <1,4,0,1>
+ 2892401974U, // <1,4,1,5>: Cost 3 vzipl <1,1,1,1>, RHS
+ 3026619702U, // <1,4,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
+ 3800206324U, // <1,4,1,7>: Cost 4 vext3 <7,0,4,1>, <4,1,7,0>
+ 2892402217U, // <1,4,1,u>: Cost 3 vzipl <1,1,1,1>, RHS
+ 3966978927U, // <1,4,2,0>: Cost 4 vzipl <1,2,3,4>, <4,0,1,2>
+ 3966979018U, // <1,4,2,1>: Cost 4 vzipl <1,2,3,4>, <4,1,2,3>
+ 3693086312U, // <1,4,2,2>: Cost 4 vext2 <0,3,1,4>, <2,2,2,2>
+ 2635269798U, // <1,4,2,3>: Cost 3 vext2 <3,0,1,4>, <2,3,0,1>
+ 3966979280U, // <1,4,2,4>: Cost 4 vzipl <1,2,3,4>, <4,4,4,4>
+ 2893204790U, // <1,4,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
+ 3693086650U, // <1,4,2,6>: Cost 4 vext2 <0,3,1,4>, <2,6,3,7>
+ 3666662502U, // <1,4,2,7>: Cost 4 vext1 <7,1,4,2>, <7,1,4,2>
+ 2893205033U, // <1,4,2,u>: Cost 3 vzipl <1,2,3,0>, RHS
+ 2563063910U, // <1,4,3,0>: Cost 3 vext1 <2,1,4,3>, LHS
+ 2563064730U, // <1,4,3,1>: Cost 3 vext1 <2,1,4,3>, <1,2,3,4>
+ 2563065386U, // <1,4,3,2>: Cost 3 vext1 <2,1,4,3>, <2,1,4,3>
+ 3693087132U, // <1,4,3,3>: Cost 4 vext2 <0,3,1,4>, <3,3,3,3>
+ 2619345410U, // <1,4,3,4>: Cost 3 vext2 <0,3,1,4>, <3,4,5,6>
+ 3087843666U, // <1,4,3,5>: Cost 3 vtrnr LHS, <0,4,1,5>
+ 3087843676U, // <1,4,3,6>: Cost 3 vtrnr LHS, <0,4,2,6>
+ 3666670695U, // <1,4,3,7>: Cost 4 vext1 <7,1,4,3>, <7,1,4,3>
+ 3087843669U, // <1,4,3,u>: Cost 3 vtrnr LHS, <0,4,1,u>
+ 2620672914U, // <1,4,4,0>: Cost 3 vext2 <0,5,1,4>, <4,0,5,1>
+ 3630842706U, // <1,4,4,1>: Cost 4 vext1 <1,1,4,4>, <1,1,4,4>
+ 3313069003U, // <1,4,4,2>: Cost 4 vrev <4,1,2,4>
+ 3642788100U, // <1,4,4,3>: Cost 4 vext1 <3,1,4,4>, <3,1,4,4>
+ 2713930960U, // <1,4,4,4>: Cost 3 vext3 <4,u,5,1>, <4,4,4,4>
+ 2619346230U, // <1,4,4,5>: Cost 3 vext2 <0,3,1,4>, RHS
+ 2713930980U, // <1,4,4,6>: Cost 3 vext3 <4,u,5,1>, <4,4,6,6>
+ 3736882642U, // <1,4,4,7>: Cost 4 vext2 <7,6,1,4>, <4,7,6,1>
+ 2619346473U, // <1,4,4,u>: Cost 3 vext2 <0,3,1,4>, RHS
+ 2557108326U, // <1,4,5,0>: Cost 3 vext1 <1,1,4,5>, LHS
+ 2557109075U, // <1,4,5,1>: Cost 3 vext1 <1,1,4,5>, <1,1,4,5>
+ 2598913774U, // <1,4,5,2>: Cost 3 vext1 <u,1,4,5>, <2,3,u,1>
+ 3630852246U, // <1,4,5,3>: Cost 4 vext1 <1,1,4,5>, <3,0,1,2>
+ 2557111606U, // <1,4,5,4>: Cost 3 vext1 <1,1,4,5>, RHS
+ 2895252790U, // <1,4,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
+ 1616006454U, // <1,4,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 3899059510U, // <1,4,5,7>: Cost 4 vuzpr <1,1,1,4>, RHS
+ 1616006472U, // <1,4,5,u>: Cost 2 vext3 <0,u,1,1>, RHS
+ 2557116518U, // <1,4,6,0>: Cost 3 vext1 <1,1,4,6>, LHS
+ 2557117236U, // <1,4,6,1>: Cost 3 vext1 <1,1,4,6>, <1,1,1,1>
+ 3630859880U, // <1,4,6,2>: Cost 4 vext1 <1,1,4,6>, <2,2,2,2>
+ 2569062550U, // <1,4,6,3>: Cost 3 vext1 <3,1,4,6>, <3,0,1,2>
+ 2557119798U, // <1,4,6,4>: Cost 3 vext1 <1,1,4,6>, RHS
+ 3763490174U, // <1,4,6,5>: Cost 4 vext3 <0,u,1,1>, <4,6,5,7>
+ 3763490183U, // <1,4,6,6>: Cost 4 vext3 <0,u,1,1>, <4,6,6,7>
+ 2712751498U, // <1,4,6,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
+ 2557122350U, // <1,4,6,u>: Cost 3 vext1 <1,1,4,6>, LHS
+ 2659161084U, // <1,4,7,0>: Cost 3 vext2 <7,0,1,4>, <7,0,1,4>
+ 3732903040U, // <1,4,7,1>: Cost 4 vext2 <7,0,1,4>, <7,1,7,1>
+ 3734230174U, // <1,4,7,2>: Cost 4 vext2 <7,2,1,4>, <7,2,1,4>
+ 3734893807U, // <1,4,7,3>: Cost 4 vext2 <7,3,1,4>, <7,3,1,4>
+ 3660729654U, // <1,4,7,4>: Cost 4 vext1 <6,1,4,7>, RHS
+ 3786493384U, // <1,4,7,5>: Cost 4 vext3 <4,6,7,1>, <4,7,5,0>
+ 2713341394U, // <1,4,7,6>: Cost 3 vext3 <4,7,6,1>, <4,7,6,1>
+ 3660731386U, // <1,4,7,7>: Cost 4 vext1 <6,1,4,7>, <7,0,1,2>
+ 2664470148U, // <1,4,7,u>: Cost 3 vext2 <7,u,1,4>, <7,u,1,4>
+ 2557132902U, // <1,4,u,0>: Cost 3 vext1 <1,1,4,u>, LHS
+ 2619348782U, // <1,4,u,1>: Cost 3 vext2 <0,3,1,4>, LHS
+ 2563106351U, // <1,4,u,2>: Cost 3 vext1 <2,1,4,u>, <2,1,4,u>
+ 2713783816U, // <1,4,u,3>: Cost 3 vext3 <4,u,3,1>, <4,u,3,1>
+ 2622666815U, // <1,4,u,4>: Cost 3 vext2 <0,u,1,4>, <u,4,5,6>
+ 1640189466U, // <1,4,u,5>: Cost 2 vext3 <4,u,5,1>, <4,u,5,1>
+ 1616006697U, // <1,4,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 2712751498U, // <1,4,u,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
+ 1616006715U, // <1,4,u,u>: Cost 2 vext3 <0,u,1,1>, RHS
+ 2620014592U, // <1,5,0,0>: Cost 3 vext2 <0,4,1,5>, <0,0,0,0>
+ 1546272870U, // <1,5,0,1>: Cost 2 vext2 <0,4,1,5>, LHS
+ 2618687664U, // <1,5,0,2>: Cost 3 vext2 <0,2,1,5>, <0,2,1,5>
+ 3693093120U, // <1,5,0,3>: Cost 4 vext2 <0,3,1,5>, <0,3,1,4>
+ 1546273106U, // <1,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
+ 2620678563U, // <1,5,0,5>: Cost 3 vext2 <0,5,1,5>, <0,5,1,5>
+ 2714668660U, // <1,5,0,6>: Cost 3 vext3 <5,0,6,1>, <5,0,6,1>
+ 3772042877U, // <1,5,0,7>: Cost 4 vext3 <2,3,0,1>, <5,0,7,1>
+ 1546273437U, // <1,5,0,u>: Cost 2 vext2 <0,4,1,5>, LHS
+ 2620015350U, // <1,5,1,0>: Cost 3 vext2 <0,4,1,5>, <1,0,3,2>
+ 2620015412U, // <1,5,1,1>: Cost 3 vext2 <0,4,1,5>, <1,1,1,1>
+ 2620015510U, // <1,5,1,2>: Cost 3 vext2 <0,4,1,5>, <1,2,3,0>
+ 2618688512U, // <1,5,1,3>: Cost 3 vext2 <0,2,1,5>, <1,3,5,7>
+ 2620015677U, // <1,5,1,4>: Cost 3 vext2 <0,4,1,5>, <1,4,3,5>
+ 2620015727U, // <1,5,1,5>: Cost 3 vext2 <0,4,1,5>, <1,5,0,1>
+ 2620015859U, // <1,5,1,6>: Cost 3 vext2 <0,4,1,5>, <1,6,5,7>
+ 3093728566U, // <1,5,1,7>: Cost 3 vtrnr <1,1,1,1>, RHS
+ 2620015981U, // <1,5,1,u>: Cost 3 vext2 <0,4,1,5>, <1,u,1,3>
+ 3692430816U, // <1,5,2,0>: Cost 4 vext2 <0,2,1,5>, <2,0,5,1>
+ 2620016163U, // <1,5,2,1>: Cost 3 vext2 <0,4,1,5>, <2,1,3,5>
+ 2620016232U, // <1,5,2,2>: Cost 3 vext2 <0,4,1,5>, <2,2,2,2>
+ 2620016294U, // <1,5,2,3>: Cost 3 vext2 <0,4,1,5>, <2,3,0,1>
+ 3693758221U, // <1,5,2,4>: Cost 4 vext2 <0,4,1,5>, <2,4,2,5>
+ 3692431209U, // <1,5,2,5>: Cost 4 vext2 <0,2,1,5>, <2,5,3,7>
+ 2620016570U, // <1,5,2,6>: Cost 3 vext2 <0,4,1,5>, <2,6,3,7>
+ 4173598006U, // <1,5,2,7>: Cost 4 vtrnr <2,1,3,2>, RHS
+ 2620016699U, // <1,5,2,u>: Cost 3 vext2 <0,4,1,5>, <2,u,0,1>
+ 2620016790U, // <1,5,3,0>: Cost 3 vext2 <0,4,1,5>, <3,0,1,2>
+ 2569110672U, // <1,5,3,1>: Cost 3 vext1 <3,1,5,3>, <1,5,3,7>
+ 3693758785U, // <1,5,3,2>: Cost 4 vext2 <0,4,1,5>, <3,2,2,2>
+ 2620017052U, // <1,5,3,3>: Cost 3 vext2 <0,4,1,5>, <3,3,3,3>
+ 2620017154U, // <1,5,3,4>: Cost 3 vext2 <0,4,1,5>, <3,4,5,6>
+ 3135623172U, // <1,5,3,5>: Cost 3 vtrnr LHS, <5,5,5,5>
+ 4161587048U, // <1,5,3,6>: Cost 4 vtrnr LHS, <2,5,3,6>
+ 2014104886U, // <1,5,3,7>: Cost 2 vtrnr LHS, RHS
+ 2014104887U, // <1,5,3,u>: Cost 2 vtrnr LHS, RHS
+ 2620017554U, // <1,5,4,0>: Cost 3 vext2 <0,4,1,5>, <4,0,5,1>
+ 2620017634U, // <1,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
+ 3693759551U, // <1,5,4,2>: Cost 4 vext2 <0,4,1,5>, <4,2,6,3>
+ 3642861837U, // <1,5,4,3>: Cost 4 vext1 <3,1,5,4>, <3,1,5,4>
+ 2575092710U, // <1,5,4,4>: Cost 3 vext1 <4,1,5,4>, <4,1,5,4>
+ 1546276150U, // <1,5,4,5>: Cost 2 vext2 <0,4,1,5>, RHS
+ 2759855414U, // <1,5,4,6>: Cost 3 vuzpl <1,3,5,7>, RHS
+ 2713931718U, // <1,5,4,7>: Cost 3 vext3 <4,u,5,1>, <5,4,7,6>
+ 1546276393U, // <1,5,4,u>: Cost 2 vext2 <0,4,1,5>, RHS
+ 2557182054U, // <1,5,5,0>: Cost 3 vext1 <1,1,5,5>, LHS
+ 2557182812U, // <1,5,5,1>: Cost 3 vext1 <1,1,5,5>, <1,1,5,5>
+ 3630925347U, // <1,5,5,2>: Cost 4 vext1 <1,1,5,5>, <2,1,3,5>
+ 4029301675U, // <1,5,5,3>: Cost 4 vzipr <0,4,1,5>, <1,2,5,3>
+ 2557185334U, // <1,5,5,4>: Cost 3 vext1 <1,1,5,5>, RHS
+ 2713931780U, // <1,5,5,5>: Cost 3 vext3 <4,u,5,1>, <5,5,5,5>
+ 2667794530U, // <1,5,5,6>: Cost 3 vext2 <u,4,1,5>, <5,6,7,0>
+ 2713931800U, // <1,5,5,7>: Cost 3 vext3 <4,u,5,1>, <5,5,7,7>
+ 2557187886U, // <1,5,5,u>: Cost 3 vext1 <1,1,5,5>, LHS
+ 2718208036U, // <1,5,6,0>: Cost 3 vext3 <5,6,0,1>, <5,6,0,1>
+ 2620019115U, // <1,5,6,1>: Cost 3 vext2 <0,4,1,5>, <6,1,7,5>
+ 2667794938U, // <1,5,6,2>: Cost 3 vext2 <u,4,1,5>, <6,2,7,3>
+ 3787673666U, // <1,5,6,3>: Cost 4 vext3 <4,u,5,1>, <5,6,3,4>
+ 3693761165U, // <1,5,6,4>: Cost 4 vext2 <0,4,1,5>, <6,4,5,6>
+ 3319279297U, // <1,5,6,5>: Cost 4 vrev <5,1,5,6>
+ 2667795256U, // <1,5,6,6>: Cost 3 vext2 <u,4,1,5>, <6,6,6,6>
+ 2713931874U, // <1,5,6,7>: Cost 3 vext3 <4,u,5,1>, <5,6,7,0>
+ 2713931883U, // <1,5,6,u>: Cost 3 vext3 <4,u,5,1>, <5,6,u,0>
+ 2557198438U, // <1,5,7,0>: Cost 3 vext1 <1,1,5,7>, LHS
+ 2557199156U, // <1,5,7,1>: Cost 3 vext1 <1,1,5,7>, <1,1,1,1>
+ 2569143974U, // <1,5,7,2>: Cost 3 vext1 <3,1,5,7>, <2,3,0,1>
+ 2569144592U, // <1,5,7,3>: Cost 3 vext1 <3,1,5,7>, <3,1,5,7>
+ 2557201718U, // <1,5,7,4>: Cost 3 vext1 <1,1,5,7>, RHS
+ 2713931944U, // <1,5,7,5>: Cost 3 vext3 <4,u,5,1>, <5,7,5,7>
+ 3787673770U, // <1,5,7,6>: Cost 4 vext3 <4,u,5,1>, <5,7,6,0>
+ 2719387828U, // <1,5,7,7>: Cost 3 vext3 <5,7,7,1>, <5,7,7,1>
+ 2557204270U, // <1,5,7,u>: Cost 3 vext1 <1,1,5,7>, LHS
+ 2620020435U, // <1,5,u,0>: Cost 3 vext2 <0,4,1,5>, <u,0,1,2>
+ 1546278702U, // <1,5,u,1>: Cost 2 vext2 <0,4,1,5>, LHS
+ 2620020616U, // <1,5,u,2>: Cost 3 vext2 <0,4,1,5>, <u,2,3,3>
+ 2620020668U, // <1,5,u,3>: Cost 3 vext2 <0,4,1,5>, <u,3,0,1>
+ 1594054682U, // <1,5,u,4>: Cost 2 vext2 <u,4,1,5>, <u,4,1,5>
+ 1546279066U, // <1,5,u,5>: Cost 2 vext2 <0,4,1,5>, RHS
+ 2620020944U, // <1,5,u,6>: Cost 3 vext2 <0,4,1,5>, <u,6,3,7>
+ 2014145846U, // <1,5,u,7>: Cost 2 vtrnr LHS, RHS
+ 2014145847U, // <1,5,u,u>: Cost 2 vtrnr LHS, RHS
+ 3692437504U, // <1,6,0,0>: Cost 4 vext2 <0,2,1,6>, <0,0,0,0>
+ 2618695782U, // <1,6,0,1>: Cost 3 vext2 <0,2,1,6>, LHS
+ 2618695857U, // <1,6,0,2>: Cost 3 vext2 <0,2,1,6>, <0,2,1,6>
+ 3794161970U, // <1,6,0,3>: Cost 4 vext3 <6,0,3,1>, <6,0,3,1>
+ 2620023122U, // <1,6,0,4>: Cost 3 vext2 <0,4,1,6>, <0,4,1,5>
+ 2620686756U, // <1,6,0,5>: Cost 3 vext2 <0,5,1,6>, <0,5,1,6>
+ 2621350389U, // <1,6,0,6>: Cost 3 vext2 <0,6,1,6>, <0,6,1,6>
+ 4028599606U, // <1,6,0,7>: Cost 4 vzipr <0,3,1,0>, RHS
+ 2618696349U, // <1,6,0,u>: Cost 3 vext2 <0,2,1,6>, LHS
+ 3692438262U, // <1,6,1,0>: Cost 4 vext2 <0,2,1,6>, <1,0,3,2>
+ 2625995572U, // <1,6,1,1>: Cost 3 vext2 <1,4,1,6>, <1,1,1,1>
+ 3692438422U, // <1,6,1,2>: Cost 4 vext2 <0,2,1,6>, <1,2,3,0>
+ 3692438488U, // <1,6,1,3>: Cost 4 vext2 <0,2,1,6>, <1,3,1,3>
+ 2625995820U, // <1,6,1,4>: Cost 3 vext2 <1,4,1,6>, <1,4,1,6>
+ 3692438672U, // <1,6,1,5>: Cost 4 vext2 <0,2,1,6>, <1,5,3,7>
+ 3692438720U, // <1,6,1,6>: Cost 4 vext2 <0,2,1,6>, <1,6,0,1>
+ 2958183734U, // <1,6,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
+ 2958183735U, // <1,6,1,u>: Cost 3 vzipr <0,u,1,1>, RHS
+ 2721526201U, // <1,6,2,0>: Cost 3 vext3 <6,2,0,1>, <6,2,0,1>
+ 3692439097U, // <1,6,2,1>: Cost 4 vext2 <0,2,1,6>, <2,1,6,0>
+ 3692439144U, // <1,6,2,2>: Cost 4 vext2 <0,2,1,6>, <2,2,2,2>
+ 3692439206U, // <1,6,2,3>: Cost 4 vext2 <0,2,1,6>, <2,3,0,1>
+ 3636948278U, // <1,6,2,4>: Cost 4 vext1 <2,1,6,2>, RHS
+ 3787674092U, // <1,6,2,5>: Cost 4 vext3 <4,u,5,1>, <6,2,5,7>
+ 2618697658U, // <1,6,2,6>: Cost 3 vext2 <0,2,1,6>, <2,6,3,7>
+ 2970799414U, // <1,6,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
+ 2970799415U, // <1,6,2,u>: Cost 3 vzipr <3,0,1,2>, RHS
+ 2563211366U, // <1,6,3,0>: Cost 3 vext1 <2,1,6,3>, LHS
+ 3699738854U, // <1,6,3,1>: Cost 4 vext2 <1,4,1,6>, <3,1,1,1>
+ 2563212860U, // <1,6,3,2>: Cost 3 vext1 <2,1,6,3>, <2,1,6,3>
+ 3692439964U, // <1,6,3,3>: Cost 4 vext2 <0,2,1,6>, <3,3,3,3>
+ 2563214646U, // <1,6,3,4>: Cost 3 vext1 <2,1,6,3>, RHS
+ 4191820018U, // <1,6,3,5>: Cost 4 vtrnr <5,1,7,3>, <u,6,7,5>
+ 2587103648U, // <1,6,3,6>: Cost 3 vext1 <6,1,6,3>, <6,1,6,3>
+ 3087845306U, // <1,6,3,7>: Cost 3 vtrnr LHS, <2,6,3,7>
+ 3087845307U, // <1,6,3,u>: Cost 3 vtrnr LHS, <2,6,3,u>
+ 3693767570U, // <1,6,4,0>: Cost 4 vext2 <0,4,1,6>, <4,0,5,1>
+ 3693767650U, // <1,6,4,1>: Cost 4 vext2 <0,4,1,6>, <4,1,5,0>
+ 3636962877U, // <1,6,4,2>: Cost 4 vext1 <2,1,6,4>, <2,1,6,4>
+ 3325088134U, // <1,6,4,3>: Cost 4 vrev <6,1,3,4>
+ 3693767898U, // <1,6,4,4>: Cost 4 vext2 <0,4,1,6>, <4,4,5,5>
+ 2618699062U, // <1,6,4,5>: Cost 3 vext2 <0,2,1,6>, RHS
+ 3833670966U, // <1,6,4,6>: Cost 4 vuzpl <1,3,6,7>, RHS
+ 4028632374U, // <1,6,4,7>: Cost 4 vzipr <0,3,1,4>, RHS
+ 2618699305U, // <1,6,4,u>: Cost 3 vext2 <0,2,1,6>, RHS
+ 3693768264U, // <1,6,5,0>: Cost 4 vext2 <0,4,1,6>, <5,0,1,2>
+ 3630998373U, // <1,6,5,1>: Cost 4 vext1 <1,1,6,5>, <1,1,6,5>
+ 3636971070U, // <1,6,5,2>: Cost 4 vext1 <2,1,6,5>, <2,1,6,5>
+ 3642943767U, // <1,6,5,3>: Cost 4 vext1 <3,1,6,5>, <3,1,6,5>
+ 3693768628U, // <1,6,5,4>: Cost 4 vext2 <0,4,1,6>, <5,4,5,6>
+ 3732918276U, // <1,6,5,5>: Cost 4 vext2 <7,0,1,6>, <5,5,5,5>
+ 2620690530U, // <1,6,5,6>: Cost 3 vext2 <0,5,1,6>, <5,6,7,0>
+ 2955562294U, // <1,6,5,7>: Cost 3 vzipr <0,4,1,5>, RHS
+ 2955562295U, // <1,6,5,u>: Cost 3 vzipr <0,4,1,5>, RHS
+ 2724180733U, // <1,6,6,0>: Cost 3 vext3 <6,6,0,1>, <6,6,0,1>
+ 3631006566U, // <1,6,6,1>: Cost 4 vext1 <1,1,6,6>, <1,1,6,6>
+ 3631007674U, // <1,6,6,2>: Cost 4 vext1 <1,1,6,6>, <2,6,3,7>
+ 3692442184U, // <1,6,6,3>: Cost 4 vext2 <0,2,1,6>, <6,3,7,0>
+ 3631009078U, // <1,6,6,4>: Cost 4 vext1 <1,1,6,6>, RHS
+ 3787674416U, // <1,6,6,5>: Cost 4 vext3 <4,u,5,1>, <6,6,5,7>
+ 2713932600U, // <1,6,6,6>: Cost 3 vext3 <4,u,5,1>, <6,6,6,6>
+ 2713932610U, // <1,6,6,7>: Cost 3 vext3 <4,u,5,1>, <6,6,7,7>
+ 2713932619U, // <1,6,6,u>: Cost 3 vext3 <4,u,5,1>, <6,6,u,7>
+ 1651102542U, // <1,6,7,0>: Cost 2 vext3 <6,7,0,1>, <6,7,0,1>
+ 2724918103U, // <1,6,7,1>: Cost 3 vext3 <6,7,1,1>, <6,7,1,1>
+ 2698302306U, // <1,6,7,2>: Cost 3 vext3 <2,3,0,1>, <6,7,2,3>
+ 3642960153U, // <1,6,7,3>: Cost 4 vext1 <3,1,6,7>, <3,1,6,7>
+ 2713932662U, // <1,6,7,4>: Cost 3 vext3 <4,u,5,1>, <6,7,4,5>
+ 2725213051U, // <1,6,7,5>: Cost 3 vext3 <6,7,5,1>, <6,7,5,1>
+ 2724844426U, // <1,6,7,6>: Cost 3 vext3 <6,7,0,1>, <6,7,6,7>
+ 4035956022U, // <1,6,7,7>: Cost 4 vzipr <1,5,1,7>, RHS
+ 1651692438U, // <1,6,7,u>: Cost 2 vext3 <6,7,u,1>, <6,7,u,1>
+ 1651766175U, // <1,6,u,0>: Cost 2 vext3 <6,u,0,1>, <6,u,0,1>
+ 2618701614U, // <1,6,u,1>: Cost 3 vext2 <0,2,1,6>, LHS
+ 3135663508U, // <1,6,u,2>: Cost 3 vtrnr LHS, <4,6,u,2>
+ 3692443580U, // <1,6,u,3>: Cost 4 vext2 <0,2,1,6>, <u,3,0,1>
+ 2713932743U, // <1,6,u,4>: Cost 3 vext3 <4,u,5,1>, <6,u,4,5>
+ 2618701978U, // <1,6,u,5>: Cost 3 vext2 <0,2,1,6>, RHS
+ 2622683344U, // <1,6,u,6>: Cost 3 vext2 <0,u,1,6>, <u,6,3,7>
+ 3087886266U, // <1,6,u,7>: Cost 3 vtrnr LHS, <2,6,3,7>
+ 1652356071U, // <1,6,u,u>: Cost 2 vext3 <6,u,u,1>, <6,u,u,1>
+ 2726171632U, // <1,7,0,0>: Cost 3 vext3 <7,0,0,1>, <7,0,0,1>
+ 2626666598U, // <1,7,0,1>: Cost 3 vext2 <1,5,1,7>, LHS
+ 3695100067U, // <1,7,0,2>: Cost 4 vext2 <0,6,1,7>, <0,2,0,1>
+ 3707044102U, // <1,7,0,3>: Cost 4 vext2 <2,6,1,7>, <0,3,2,1>
+ 2726466580U, // <1,7,0,4>: Cost 3 vext3 <7,0,4,1>, <7,0,4,1>
+ 3654921933U, // <1,7,0,5>: Cost 4 vext1 <5,1,7,0>, <5,1,7,0>
+ 2621358582U, // <1,7,0,6>: Cost 3 vext2 <0,6,1,7>, <0,6,1,7>
+ 2622022215U, // <1,7,0,7>: Cost 3 vext2 <0,7,1,7>, <0,7,1,7>
+ 2626667165U, // <1,7,0,u>: Cost 3 vext2 <1,5,1,7>, LHS
+ 2593128550U, // <1,7,1,0>: Cost 3 vext1 <7,1,7,1>, LHS
+ 2626667316U, // <1,7,1,1>: Cost 3 vext2 <1,5,1,7>, <1,1,1,1>
+ 3700409238U, // <1,7,1,2>: Cost 4 vext2 <1,5,1,7>, <1,2,3,0>
+ 2257294428U, // <1,7,1,3>: Cost 3 vrev <7,1,3,1>
+ 2593131830U, // <1,7,1,4>: Cost 3 vext1 <7,1,7,1>, RHS
+ 2626667646U, // <1,7,1,5>: Cost 3 vext2 <1,5,1,7>, <1,5,1,7>
+ 2627331279U, // <1,7,1,6>: Cost 3 vext2 <1,6,1,7>, <1,6,1,7>
+ 2593133696U, // <1,7,1,7>: Cost 3 vext1 <7,1,7,1>, <7,1,7,1>
+ 2628658545U, // <1,7,1,u>: Cost 3 vext2 <1,u,1,7>, <1,u,1,7>
+ 2587164774U, // <1,7,2,0>: Cost 3 vext1 <6,1,7,2>, LHS
+ 3701073445U, // <1,7,2,1>: Cost 4 vext2 <1,6,1,7>, <2,1,3,7>
+ 3700409960U, // <1,7,2,2>: Cost 4 vext2 <1,5,1,7>, <2,2,2,2>
+ 2638612134U, // <1,7,2,3>: Cost 3 vext2 <3,5,1,7>, <2,3,0,1>
+ 2587168054U, // <1,7,2,4>: Cost 3 vext1 <6,1,7,2>, RHS
+ 3706382167U, // <1,7,2,5>: Cost 4 vext2 <2,5,1,7>, <2,5,1,7>
+ 2587169192U, // <1,7,2,6>: Cost 3 vext1 <6,1,7,2>, <6,1,7,2>
+ 3660911610U, // <1,7,2,7>: Cost 4 vext1 <6,1,7,2>, <7,0,1,2>
+ 2587170606U, // <1,7,2,u>: Cost 3 vext1 <6,1,7,2>, LHS
+ 1507459174U, // <1,7,3,0>: Cost 2 vext1 <5,1,7,3>, LHS
+ 2569257984U, // <1,7,3,1>: Cost 3 vext1 <3,1,7,3>, <1,3,5,7>
+ 2581202536U, // <1,7,3,2>: Cost 3 vext1 <5,1,7,3>, <2,2,2,2>
+ 2569259294U, // <1,7,3,3>: Cost 3 vext1 <3,1,7,3>, <3,1,7,3>
+ 1507462454U, // <1,7,3,4>: Cost 2 vext1 <5,1,7,3>, RHS
+ 1507462864U, // <1,7,3,5>: Cost 2 vext1 <5,1,7,3>, <5,1,7,3>
+ 2581205498U, // <1,7,3,6>: Cost 3 vext1 <5,1,7,3>, <6,2,7,3>
+ 2581206010U, // <1,7,3,7>: Cost 3 vext1 <5,1,7,3>, <7,0,1,2>
+ 1507465006U, // <1,7,3,u>: Cost 2 vext1 <5,1,7,3>, LHS
+ 2728826164U, // <1,7,4,0>: Cost 3 vext3 <7,4,0,1>, <7,4,0,1>
+ 3654951732U, // <1,7,4,1>: Cost 4 vext1 <5,1,7,4>, <1,1,1,1>
+ 3330987094U, // <1,7,4,2>: Cost 4 vrev <7,1,2,4>
+ 3331060831U, // <1,7,4,3>: Cost 4 vrev <7,1,3,4>
+ 3787674971U, // <1,7,4,4>: Cost 4 vext3 <4,u,5,1>, <7,4,4,4>
+ 2626669878U, // <1,7,4,5>: Cost 3 vext2 <1,5,1,7>, RHS
+ 3785979241U, // <1,7,4,6>: Cost 4 vext3 <4,6,0,1>, <7,4,6,0>
+ 3787085176U, // <1,7,4,7>: Cost 4 vext3 <4,7,6,1>, <7,4,7,6>
+ 2626670121U, // <1,7,4,u>: Cost 3 vext2 <1,5,1,7>, RHS
+ 2569273446U, // <1,7,5,0>: Cost 3 vext1 <3,1,7,5>, LHS
+ 2569274368U, // <1,7,5,1>: Cost 3 vext1 <3,1,7,5>, <1,3,5,7>
+ 3643016808U, // <1,7,5,2>: Cost 4 vext1 <3,1,7,5>, <2,2,2,2>
+ 2569275680U, // <1,7,5,3>: Cost 3 vext1 <3,1,7,5>, <3,1,7,5>
+ 2569276726U, // <1,7,5,4>: Cost 3 vext1 <3,1,7,5>, RHS
+ 4102034790U, // <1,7,5,5>: Cost 4 vtrnl <1,3,5,7>, <7,4,5,6>
+ 2651222067U, // <1,7,5,6>: Cost 3 vext2 <5,6,1,7>, <5,6,1,7>
+ 3899378998U, // <1,7,5,7>: Cost 4 vuzpr <1,1,5,7>, RHS
+ 2569279278U, // <1,7,5,u>: Cost 3 vext1 <3,1,7,5>, LHS
+ 2730153430U, // <1,7,6,0>: Cost 3 vext3 <7,6,0,1>, <7,6,0,1>
+ 2724845022U, // <1,7,6,1>: Cost 3 vext3 <6,7,0,1>, <7,6,1,0>
+ 3643025338U, // <1,7,6,2>: Cost 4 vext1 <3,1,7,6>, <2,6,3,7>
+ 3643025697U, // <1,7,6,3>: Cost 4 vext1 <3,1,7,6>, <3,1,7,6>
+ 3643026742U, // <1,7,6,4>: Cost 4 vext1 <3,1,7,6>, RHS
+ 3654971091U, // <1,7,6,5>: Cost 4 vext1 <5,1,7,6>, <5,1,7,6>
+ 3787675153U, // <1,7,6,6>: Cost 4 vext3 <4,u,5,1>, <7,6,6,6>
+ 2724845076U, // <1,7,6,7>: Cost 3 vext3 <6,7,0,1>, <7,6,7,0>
+ 2725508637U, // <1,7,6,u>: Cost 3 vext3 <6,u,0,1>, <7,6,u,0>
+ 2730817063U, // <1,7,7,0>: Cost 3 vext3 <7,7,0,1>, <7,7,0,1>
+ 3631088436U, // <1,7,7,1>: Cost 4 vext1 <1,1,7,7>, <1,1,1,1>
+ 3660949158U, // <1,7,7,2>: Cost 4 vext1 <6,1,7,7>, <2,3,0,1>
+ 3801904705U, // <1,7,7,3>: Cost 4 vext3 <7,3,0,1>, <7,7,3,0>
+ 3631090998U, // <1,7,7,4>: Cost 4 vext1 <1,1,7,7>, RHS
+ 2662503828U, // <1,7,7,5>: Cost 3 vext2 <7,5,1,7>, <7,5,1,7>
+ 3660951981U, // <1,7,7,6>: Cost 4 vext1 <6,1,7,7>, <6,1,7,7>
+ 2713933420U, // <1,7,7,7>: Cost 3 vext3 <4,u,5,1>, <7,7,7,7>
+ 2731406959U, // <1,7,7,u>: Cost 3 vext3 <7,7,u,1>, <7,7,u,1>
+ 1507500134U, // <1,7,u,0>: Cost 2 vext1 <5,1,7,u>, LHS
+ 2626672430U, // <1,7,u,1>: Cost 3 vext2 <1,5,1,7>, LHS
+ 2581243496U, // <1,7,u,2>: Cost 3 vext1 <5,1,7,u>, <2,2,2,2>
+ 2569300259U, // <1,7,u,3>: Cost 3 vext1 <3,1,7,u>, <3,1,7,u>
+ 1507503414U, // <1,7,u,4>: Cost 2 vext1 <5,1,7,u>, RHS
+ 1507503829U, // <1,7,u,5>: Cost 2 vext1 <5,1,7,u>, <5,1,7,u>
+ 2581246458U, // <1,7,u,6>: Cost 3 vext1 <5,1,7,u>, <6,2,7,3>
+ 2581246970U, // <1,7,u,7>: Cost 3 vext1 <5,1,7,u>, <7,0,1,2>
+ 1507505966U, // <1,7,u,u>: Cost 2 vext1 <5,1,7,u>, LHS
+ 1543643153U, // <1,u,0,0>: Cost 2 vext2 <0,0,1,u>, <0,0,1,u>
+ 1546297446U, // <1,u,0,1>: Cost 2 vext2 <0,4,1,u>, LHS
+ 2819448852U, // <1,u,0,2>: Cost 3 vuzpr LHS, <0,0,2,2>
+ 2619375876U, // <1,u,0,3>: Cost 3 vext2 <0,3,1,u>, <0,3,1,u>
+ 1546297685U, // <1,u,0,4>: Cost 2 vext2 <0,4,1,u>, <0,4,1,u>
+ 1658771190U, // <1,u,0,5>: Cost 2 vext3 <u,0,5,1>, <u,0,5,1>
+ 2736789248U, // <1,u,0,6>: Cost 3 vext3 <u,7,0,1>, <u,0,6,2>
+ 2659189376U, // <1,u,0,7>: Cost 3 vext2 <7,0,1,u>, <0,7,u,1>
+ 1546298013U, // <1,u,0,u>: Cost 2 vext2 <0,4,1,u>, LHS
+ 1483112550U, // <1,u,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
+ 202162278U, // <1,u,1,1>: Cost 1 vdup1 LHS
+ 1616009006U, // <1,u,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
+ 1745707110U, // <1,u,1,3>: Cost 2 vuzpr LHS, LHS
+ 1483115830U, // <1,u,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
+ 2620040336U, // <1,u,1,5>: Cost 3 vext2 <0,4,1,u>, <1,5,3,7>
+ 3026622618U, // <1,u,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
+ 2958183752U, // <1,u,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
+ 202162278U, // <1,u,1,u>: Cost 1 vdup1 LHS
+ 2819449750U, // <1,u,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
+ 2893207342U, // <1,u,2,1>: Cost 3 vzipl <1,2,3,0>, LHS
+ 2819448996U, // <1,u,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
+ 2819450482U, // <1,u,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
+ 2819449754U, // <1,u,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
+ 2893207706U, // <1,u,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
+ 2819449036U, // <1,u,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
+ 2970799432U, // <1,u,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
+ 2819449002U, // <1,u,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
+ 403931292U, // <1,u,3,0>: Cost 1 vext1 LHS, LHS
+ 1477673718U, // <1,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 115726126U, // <1,u,3,2>: Cost 1 vrev LHS
+ 2014102173U, // <1,u,3,3>: Cost 2 vtrnr LHS, LHS
+ 403934518U, // <1,u,3,4>: Cost 1 vext1 LHS, RHS
+ 1507536601U, // <1,u,3,5>: Cost 2 vext1 <5,1,u,3>, <5,1,u,3>
+ 1525453306U, // <1,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 2014105129U, // <1,u,3,7>: Cost 2 vtrnr LHS, RHS
+ 403937070U, // <1,u,3,u>: Cost 1 vext1 LHS, LHS
+ 2620042157U, // <1,u,4,0>: Cost 3 vext2 <0,4,1,u>, <4,0,u,1>
+ 2620042237U, // <1,u,4,1>: Cost 3 vext2 <0,4,1,u>, <4,1,u,0>
+ 2263217967U, // <1,u,4,2>: Cost 3 vrev <u,1,2,4>
+ 2569341224U, // <1,u,4,3>: Cost 3 vext1 <3,1,u,4>, <3,1,u,4>
+ 2569342262U, // <1,u,4,4>: Cost 3 vext1 <3,1,u,4>, RHS
+ 1546300726U, // <1,u,4,5>: Cost 2 vext2 <0,4,1,u>, RHS
+ 2819449180U, // <1,u,4,6>: Cost 3 vuzpr LHS, <0,4,2,6>
+ 2724845649U, // <1,u,4,7>: Cost 3 vext3 <6,7,0,1>, <u,4,7,6>
+ 1546300969U, // <1,u,4,u>: Cost 2 vext2 <0,4,1,u>, RHS
+ 2551431270U, // <1,u,5,0>: Cost 3 vext1 <0,1,u,5>, LHS
+ 2551432192U, // <1,u,5,1>: Cost 3 vext1 <0,1,u,5>, <1,3,5,7>
+ 3028293422U, // <1,u,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
+ 2955559068U, // <1,u,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
+ 2551434550U, // <1,u,5,4>: Cost 3 vext1 <0,1,u,5>, RHS
+ 2895255706U, // <1,u,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
+ 1616009370U, // <1,u,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 1745710390U, // <1,u,5,7>: Cost 2 vuzpr LHS, RHS
+ 1745710391U, // <1,u,5,u>: Cost 2 vuzpr LHS, RHS
+ 2653221159U, // <1,u,6,0>: Cost 3 vext2 <6,0,1,u>, <6,0,1,u>
+ 2725509303U, // <1,u,6,1>: Cost 3 vext3 <6,u,0,1>, <u,6,1,0>
+ 2659193338U, // <1,u,6,2>: Cost 3 vext2 <7,0,1,u>, <6,2,7,3>
+ 2689751248U, // <1,u,6,3>: Cost 3 vext3 <0,u,1,1>, <u,6,3,7>
+ 2867228774U, // <1,u,6,4>: Cost 3 vuzpr LHS, <5,6,7,4>
+ 3764820194U, // <1,u,6,5>: Cost 4 vext3 <1,1,1,1>, <u,6,5,7>
+ 2657202957U, // <1,u,6,6>: Cost 3 vext2 <6,6,1,u>, <6,6,1,u>
+ 2819450810U, // <1,u,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
+ 2819450811U, // <1,u,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
+ 1585452032U, // <1,u,7,0>: Cost 2 vext2 <7,0,1,u>, <7,0,1,u>
+ 2557420340U, // <1,u,7,1>: Cost 3 vext1 <1,1,u,7>, <1,1,1,1>
+ 2569365158U, // <1,u,7,2>: Cost 3 vext1 <3,1,u,7>, <2,3,0,1>
+ 2569365803U, // <1,u,7,3>: Cost 3 vext1 <3,1,u,7>, <3,1,u,7>
+ 2557422902U, // <1,u,7,4>: Cost 3 vext1 <1,1,u,7>, RHS
+ 2662512021U, // <1,u,7,5>: Cost 3 vext2 <7,5,1,u>, <7,5,1,u>
+ 2724845884U, // <1,u,7,6>: Cost 3 vext3 <6,7,0,1>, <u,7,6,7>
+ 2659194476U, // <1,u,7,7>: Cost 3 vext2 <7,0,1,u>, <7,7,7,7>
+ 1590761096U, // <1,u,7,u>: Cost 2 vext2 <7,u,1,u>, <7,u,1,u>
+ 403972257U, // <1,u,u,0>: Cost 1 vext1 LHS, LHS
+ 202162278U, // <1,u,u,1>: Cost 1 vdup1 LHS
+ 115767091U, // <1,u,u,2>: Cost 1 vrev LHS
+ 1745707677U, // <1,u,u,3>: Cost 2 vuzpr LHS, LHS
+ 403975478U, // <1,u,u,4>: Cost 1 vext1 LHS, RHS
+ 1546303642U, // <1,u,u,5>: Cost 2 vext2 <0,4,1,u>, RHS
+ 1616009613U, // <1,u,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 1745710633U, // <1,u,u,7>: Cost 2 vuzpr LHS, RHS
+ 403978030U, // <1,u,u,u>: Cost 1 vext1 LHS, LHS
+ 2551463936U, // <2,0,0,0>: Cost 3 vext1 <0,2,0,0>, <0,0,0,0>
+ 2685698058U, // <2,0,0,1>: Cost 3 vext3 <0,2,0,2>, <0,0,1,1>
+ 1610776596U, // <2,0,0,2>: Cost 2 vext3 <0,0,2,2>, <0,0,2,2>
+ 2619384069U, // <2,0,0,3>: Cost 3 vext2 <0,3,2,0>, <0,3,2,0>
+ 2551467318U, // <2,0,0,4>: Cost 3 vext1 <0,2,0,0>, RHS
+ 3899836596U, // <2,0,0,5>: Cost 4 vuzpr <1,2,3,0>, <3,0,4,5>
+ 2621374968U, // <2,0,0,6>: Cost 3 vext2 <0,6,2,0>, <0,6,2,0>
+ 4168271334U, // <2,0,0,7>: Cost 4 vtrnr <1,2,3,0>, <2,0,5,7>
+ 1611219018U, // <2,0,0,u>: Cost 2 vext3 <0,0,u,2>, <0,0,u,2>
+ 2551472138U, // <2,0,1,0>: Cost 3 vext1 <0,2,0,1>, <0,0,1,1>
+ 2690564186U, // <2,0,1,1>: Cost 3 vext3 <1,0,3,2>, <0,1,1,0>
+ 1611956326U, // <2,0,1,2>: Cost 2 vext3 <0,2,0,2>, LHS
+ 2826092646U, // <2,0,1,3>: Cost 3 vuzpr <1,2,3,0>, LHS
+ 2551475510U, // <2,0,1,4>: Cost 3 vext1 <0,2,0,1>, RHS
+ 3692463248U, // <2,0,1,5>: Cost 4 vext2 <0,2,2,0>, <1,5,3,7>
+ 2587308473U, // <2,0,1,6>: Cost 3 vext1 <6,2,0,1>, <6,2,0,1>
+ 3661050874U, // <2,0,1,7>: Cost 4 vext1 <6,2,0,1>, <7,0,1,2>
+ 1611956380U, // <2,0,1,u>: Cost 2 vext3 <0,2,0,2>, LHS
+ 1477738598U, // <2,0,2,0>: Cost 2 vext1 <0,2,0,2>, LHS
+ 2551481078U, // <2,0,2,1>: Cost 3 vext1 <0,2,0,2>, <1,0,3,2>
+ 2551481796U, // <2,0,2,2>: Cost 3 vext1 <0,2,0,2>, <2,0,2,0>
+ 2551482518U, // <2,0,2,3>: Cost 3 vext1 <0,2,0,2>, <3,0,1,2>
+ 1477741878U, // <2,0,2,4>: Cost 2 vext1 <0,2,0,2>, RHS
+ 2551484112U, // <2,0,2,5>: Cost 3 vext1 <0,2,0,2>, <5,1,7,3>
+ 2551484759U, // <2,0,2,6>: Cost 3 vext1 <0,2,0,2>, <6,0,7,2>
+ 2551485434U, // <2,0,2,7>: Cost 3 vext1 <0,2,0,2>, <7,0,1,2>
+ 1477744430U, // <2,0,2,u>: Cost 2 vext1 <0,2,0,2>, LHS
+ 2953625600U, // <2,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
+ 2953627302U, // <2,0,3,1>: Cost 3 vzipr LHS, <2,3,0,1>
+ 2953625764U, // <2,0,3,2>: Cost 3 vzipr LHS, <0,2,0,2>
+ 4027369695U, // <2,0,3,3>: Cost 4 vzipr LHS, <3,1,0,3>
+ 3625233718U, // <2,0,3,4>: Cost 4 vext1 <0,2,0,3>, RHS
+ 3899836110U, // <2,0,3,5>: Cost 4 vuzpr <1,2,3,0>, <2,3,4,5>
+ 4032012618U, // <2,0,3,6>: Cost 4 vzipr LHS, <0,4,0,6>
+ 3899835392U, // <2,0,3,7>: Cost 4 vuzpr <1,2,3,0>, <1,3,5,7>
+ 2953625770U, // <2,0,3,u>: Cost 3 vzipr LHS, <0,2,0,u>
+ 2551496806U, // <2,0,4,0>: Cost 3 vext1 <0,2,0,4>, LHS
+ 2685698386U, // <2,0,4,1>: Cost 3 vext3 <0,2,0,2>, <0,4,1,5>
+ 2685698396U, // <2,0,4,2>: Cost 3 vext3 <0,2,0,2>, <0,4,2,6>
+ 3625240726U, // <2,0,4,3>: Cost 4 vext1 <0,2,0,4>, <3,0,1,2>
+ 2551500086U, // <2,0,4,4>: Cost 3 vext1 <0,2,0,4>, RHS
+ 2618723638U, // <2,0,4,5>: Cost 3 vext2 <0,2,2,0>, RHS
+ 2765409590U, // <2,0,4,6>: Cost 3 vuzpl <2,3,0,1>, RHS
+ 3799990664U, // <2,0,4,7>: Cost 4 vext3 <7,0,1,2>, <0,4,7,5>
+ 2685698450U, // <2,0,4,u>: Cost 3 vext3 <0,2,0,2>, <0,4,u,6>
+ 3625246822U, // <2,0,5,0>: Cost 4 vext1 <0,2,0,5>, LHS
+ 3289776304U, // <2,0,5,1>: Cost 4 vrev <0,2,1,5>
+ 2690564526U, // <2,0,5,2>: Cost 3 vext3 <1,0,3,2>, <0,5,2,7>
+ 3289923778U, // <2,0,5,3>: Cost 4 vrev <0,2,3,5>
+ 2216255691U, // <2,0,5,4>: Cost 3 vrev <0,2,4,5>
+ 3726307332U, // <2,0,5,5>: Cost 4 vext2 <5,u,2,0>, <5,5,5,5>
+ 3726307426U, // <2,0,5,6>: Cost 4 vext2 <5,u,2,0>, <5,6,7,0>
+ 2826095926U, // <2,0,5,7>: Cost 3 vuzpr <1,2,3,0>, RHS
+ 2216550639U, // <2,0,5,u>: Cost 3 vrev <0,2,u,5>
+ 4162420736U, // <2,0,6,0>: Cost 4 vtrnr <0,2,4,6>, <0,0,0,0>
+ 2901885030U, // <2,0,6,1>: Cost 3 vzipl <2,6,3,7>, LHS
+ 2685698559U, // <2,0,6,2>: Cost 3 vext3 <0,2,0,2>, <0,6,2,7>
+ 3643173171U, // <2,0,6,3>: Cost 4 vext1 <3,2,0,6>, <3,2,0,6>
+ 2216263884U, // <2,0,6,4>: Cost 3 vrev <0,2,4,6>
+ 3730289341U, // <2,0,6,5>: Cost 4 vext2 <6,5,2,0>, <6,5,2,0>
+ 3726308152U, // <2,0,6,6>: Cost 4 vext2 <5,u,2,0>, <6,6,6,6>
+ 3899836346U, // <2,0,6,7>: Cost 4 vuzpr <1,2,3,0>, <2,6,3,7>
+ 2216558832U, // <2,0,6,u>: Cost 3 vrev <0,2,u,6>
+ 2659202049U, // <2,0,7,0>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
+ 3726308437U, // <2,0,7,1>: Cost 4 vext2 <5,u,2,0>, <7,1,2,3>
+ 2726249034U, // <2,0,7,2>: Cost 3 vext3 <7,0,1,2>, <0,7,2,1>
+ 3734934772U, // <2,0,7,3>: Cost 4 vext2 <7,3,2,0>, <7,3,2,0>
+ 3726308710U, // <2,0,7,4>: Cost 4 vext2 <5,u,2,0>, <7,4,5,6>
+ 3726308814U, // <2,0,7,5>: Cost 4 vext2 <5,u,2,0>, <7,5,u,2>
+ 3736925671U, // <2,0,7,6>: Cost 4 vext2 <7,6,2,0>, <7,6,2,0>
+ 3726308972U, // <2,0,7,7>: Cost 4 vext2 <5,u,2,0>, <7,7,7,7>
+ 2659202049U, // <2,0,7,u>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
+ 1477787750U, // <2,0,u,0>: Cost 2 vext1 <0,2,0,u>, LHS
+ 2953668262U, // <2,0,u,1>: Cost 3 vzipr LHS, <2,3,0,1>
+ 1611956893U, // <2,0,u,2>: Cost 2 vext3 <0,2,0,2>, LHS
+ 2551531670U, // <2,0,u,3>: Cost 3 vext1 <0,2,0,u>, <3,0,1,2>
+ 1477791030U, // <2,0,u,4>: Cost 2 vext1 <0,2,0,u>, RHS
+ 2618726554U, // <2,0,u,5>: Cost 3 vext2 <0,2,2,0>, RHS
+ 2765412506U, // <2,0,u,6>: Cost 3 vuzpl <2,3,0,1>, RHS
+ 2826096169U, // <2,0,u,7>: Cost 3 vuzpr <1,2,3,0>, RHS
+ 1611956947U, // <2,0,u,u>: Cost 2 vext3 <0,2,0,2>, LHS
+ 2569453670U, // <2,1,0,0>: Cost 3 vext1 <3,2,1,0>, LHS
+ 2619392102U, // <2,1,0,1>: Cost 3 vext2 <0,3,2,1>, LHS
+ 3759440619U, // <2,1,0,2>: Cost 4 vext3 <0,2,0,2>, <1,0,2,0>
+ 1616823030U, // <2,1,0,3>: Cost 2 vext3 <1,0,3,2>, <1,0,3,2>
+ 2569456950U, // <2,1,0,4>: Cost 3 vext1 <3,2,1,0>, RHS
+ 2690712328U, // <2,1,0,5>: Cost 3 vext3 <1,0,5,2>, <1,0,5,2>
+ 3661115841U, // <2,1,0,6>: Cost 4 vext1 <6,2,1,0>, <6,2,1,0>
+ 2622046794U, // <2,1,0,7>: Cost 3 vext2 <0,7,2,1>, <0,7,2,1>
+ 1617191715U, // <2,1,0,u>: Cost 2 vext3 <1,0,u,2>, <1,0,u,2>
+ 2551545958U, // <2,1,1,0>: Cost 3 vext1 <0,2,1,1>, LHS
+ 2685698868U, // <2,1,1,1>: Cost 3 vext3 <0,2,0,2>, <1,1,1,1>
+ 2628682646U, // <2,1,1,2>: Cost 3 vext2 <1,u,2,1>, <1,2,3,0>
+ 2685698888U, // <2,1,1,3>: Cost 3 vext3 <0,2,0,2>, <1,1,3,3>
+ 2551549238U, // <2,1,1,4>: Cost 3 vext1 <0,2,1,1>, RHS
+ 3693134992U, // <2,1,1,5>: Cost 4 vext2 <0,3,2,1>, <1,5,3,7>
+ 3661124034U, // <2,1,1,6>: Cost 4 vext1 <6,2,1,1>, <6,2,1,1>
+ 3625292794U, // <2,1,1,7>: Cost 4 vext1 <0,2,1,1>, <7,0,1,2>
+ 2685698933U, // <2,1,1,u>: Cost 3 vext3 <0,2,0,2>, <1,1,u,3>
+ 2551554150U, // <2,1,2,0>: Cost 3 vext1 <0,2,1,2>, LHS
+ 3893649571U, // <2,1,2,1>: Cost 4 vuzpr <0,2,0,1>, <0,2,0,1>
+ 2551555688U, // <2,1,2,2>: Cost 3 vext1 <0,2,1,2>, <2,2,2,2>
+ 2685698966U, // <2,1,2,3>: Cost 3 vext3 <0,2,0,2>, <1,2,3,0>
+ 2551557430U, // <2,1,2,4>: Cost 3 vext1 <0,2,1,2>, RHS
+ 3763422123U, // <2,1,2,5>: Cost 4 vext3 <0,u,0,2>, <1,2,5,3>
+ 3693135802U, // <2,1,2,6>: Cost 4 vext2 <0,3,2,1>, <2,6,3,7>
+ 2726249402U, // <2,1,2,7>: Cost 3 vext3 <7,0,1,2>, <1,2,7,0>
+ 2685699011U, // <2,1,2,u>: Cost 3 vext3 <0,2,0,2>, <1,2,u,0>
+ 2551562342U, // <2,1,3,0>: Cost 3 vext1 <0,2,1,3>, LHS
+ 2953625610U, // <2,1,3,1>: Cost 3 vzipr LHS, <0,0,1,1>
+ 2953627798U, // <2,1,3,2>: Cost 3 vzipr LHS, <3,0,1,2>
+ 2953626584U, // <2,1,3,3>: Cost 3 vzipr LHS, <1,3,1,3>
+ 2551565622U, // <2,1,3,4>: Cost 3 vext1 <0,2,1,3>, RHS
+ 2953625938U, // <2,1,3,5>: Cost 3 vzipr LHS, <0,4,1,5>
+ 2587398596U, // <2,1,3,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
+ 4032013519U, // <2,1,3,7>: Cost 4 vzipr LHS, <1,6,1,7>
+ 2953625617U, // <2,1,3,u>: Cost 3 vzipr LHS, <0,0,1,u>
+ 2690565154U, // <2,1,4,0>: Cost 3 vext3 <1,0,3,2>, <1,4,0,5>
+ 3625313270U, // <2,1,4,1>: Cost 4 vext1 <0,2,1,4>, <1,3,4,6>
+ 3771532340U, // <2,1,4,2>: Cost 4 vext3 <2,2,2,2>, <1,4,2,5>
+ 1148404634U, // <2,1,4,3>: Cost 2 vrev <1,2,3,4>
+ 3625315638U, // <2,1,4,4>: Cost 4 vext1 <0,2,1,4>, RHS
+ 2619395382U, // <2,1,4,5>: Cost 3 vext2 <0,3,2,1>, RHS
+ 3837242678U, // <2,1,4,6>: Cost 4 vuzpl <2,0,1,2>, RHS
+ 3799991394U, // <2,1,4,7>: Cost 4 vext3 <7,0,1,2>, <1,4,7,6>
+ 1148773319U, // <2,1,4,u>: Cost 2 vrev <1,2,u,4>
+ 2551578726U, // <2,1,5,0>: Cost 3 vext1 <0,2,1,5>, LHS
+ 2551579648U, // <2,1,5,1>: Cost 3 vext1 <0,2,1,5>, <1,3,5,7>
+ 3625321952U, // <2,1,5,2>: Cost 4 vext1 <0,2,1,5>, <2,0,5,1>
+ 2685699216U, // <2,1,5,3>: Cost 3 vext3 <0,2,0,2>, <1,5,3,7>
+ 2551582006U, // <2,1,5,4>: Cost 3 vext1 <0,2,1,5>, RHS
+ 3740913668U, // <2,1,5,5>: Cost 4 vext2 <u,3,2,1>, <5,5,5,5>
+ 3661156806U, // <2,1,5,6>: Cost 4 vext1 <6,2,1,5>, <6,2,1,5>
+ 3893652790U, // <2,1,5,7>: Cost 4 vuzpr <0,2,0,1>, RHS
+ 2685699261U, // <2,1,5,u>: Cost 3 vext3 <0,2,0,2>, <1,5,u,7>
+ 2551586918U, // <2,1,6,0>: Cost 3 vext1 <0,2,1,6>, LHS
+ 3625329398U, // <2,1,6,1>: Cost 4 vext1 <0,2,1,6>, <1,0,3,2>
+ 2551588794U, // <2,1,6,2>: Cost 3 vext1 <0,2,1,6>, <2,6,3,7>
+ 3088679014U, // <2,1,6,3>: Cost 3 vtrnr <0,2,4,6>, LHS
+ 2551590198U, // <2,1,6,4>: Cost 3 vext1 <0,2,1,6>, RHS
+ 4029382994U, // <2,1,6,5>: Cost 4 vzipr <0,4,2,6>, <0,4,1,5>
+ 3625333560U, // <2,1,6,6>: Cost 4 vext1 <0,2,1,6>, <6,6,6,6>
+ 3731624800U, // <2,1,6,7>: Cost 4 vext2 <6,7,2,1>, <6,7,2,1>
+ 2551592750U, // <2,1,6,u>: Cost 3 vext1 <0,2,1,6>, LHS
+ 2622051322U, // <2,1,7,0>: Cost 3 vext2 <0,7,2,1>, <7,0,1,2>
+ 3733615699U, // <2,1,7,1>: Cost 4 vext2 <7,1,2,1>, <7,1,2,1>
+ 3795125538U, // <2,1,7,2>: Cost 4 vext3 <6,1,7,2>, <1,7,2,0>
+ 2222171037U, // <2,1,7,3>: Cost 3 vrev <1,2,3,7>
+ 3740915046U, // <2,1,7,4>: Cost 4 vext2 <u,3,2,1>, <7,4,5,6>
+ 3296060335U, // <2,1,7,5>: Cost 4 vrev <1,2,5,7>
+ 3736933864U, // <2,1,7,6>: Cost 4 vext2 <7,6,2,1>, <7,6,2,1>
+ 3805300055U, // <2,1,7,7>: Cost 4 vext3 <7,u,1,2>, <1,7,7,u>
+ 2669827714U, // <2,1,7,u>: Cost 3 vext2 <u,7,2,1>, <7,u,1,2>
+ 2551603302U, // <2,1,u,0>: Cost 3 vext1 <0,2,1,u>, LHS
+ 2953666570U, // <2,1,u,1>: Cost 3 vzipr LHS, <0,0,1,1>
+ 2953668758U, // <2,1,u,2>: Cost 3 vzipr LHS, <3,0,1,2>
+ 1148437406U, // <2,1,u,3>: Cost 2 vrev <1,2,3,u>
+ 2551606582U, // <2,1,u,4>: Cost 3 vext1 <0,2,1,u>, RHS
+ 2953666898U, // <2,1,u,5>: Cost 3 vzipr LHS, <0,4,1,5>
+ 2587398596U, // <2,1,u,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
+ 2669828370U, // <2,1,u,7>: Cost 3 vext2 <u,7,2,1>, <u,7,2,1>
+ 1148806091U, // <2,1,u,u>: Cost 2 vrev <1,2,u,u>
+ 1543667732U, // <2,2,0,0>: Cost 2 vext2 <0,0,2,2>, <0,0,2,2>
+ 1548976230U, // <2,2,0,1>: Cost 2 vext2 <0,u,2,2>, LHS
+ 2685699524U, // <2,2,0,2>: Cost 3 vext3 <0,2,0,2>, <2,0,2,0>
+ 2685699535U, // <2,2,0,3>: Cost 3 vext3 <0,2,0,2>, <2,0,3,2>
+ 2551614774U, // <2,2,0,4>: Cost 3 vext1 <0,2,2,0>, RHS
+ 3704422830U, // <2,2,0,5>: Cost 4 vext2 <2,2,2,2>, <0,5,2,7>
+ 3893657642U, // <2,2,0,6>: Cost 4 vuzpr <0,2,0,2>, <0,0,4,6>
+ 3770574323U, // <2,2,0,7>: Cost 4 vext3 <2,0,7,2>, <2,0,7,2>
+ 1548976796U, // <2,2,0,u>: Cost 2 vext2 <0,u,2,2>, <0,u,2,2>
+ 2622718710U, // <2,2,1,0>: Cost 3 vext2 <0,u,2,2>, <1,0,3,2>
+ 2622718772U, // <2,2,1,1>: Cost 3 vext2 <0,u,2,2>, <1,1,1,1>
+ 2622718870U, // <2,2,1,2>: Cost 3 vext2 <0,u,2,2>, <1,2,3,0>
+ 2819915878U, // <2,2,1,3>: Cost 3 vuzpr <0,2,0,2>, LHS
+ 3625364790U, // <2,2,1,4>: Cost 4 vext1 <0,2,2,1>, RHS
+ 2622719120U, // <2,2,1,5>: Cost 3 vext2 <0,u,2,2>, <1,5,3,7>
+ 3760031292U, // <2,2,1,6>: Cost 4 vext3 <0,2,u,2>, <2,1,6,3>
+ 3667170468U, // <2,2,1,7>: Cost 4 vext1 <7,2,2,1>, <7,2,2,1>
+ 2819915883U, // <2,2,1,u>: Cost 3 vuzpr <0,2,0,2>, LHS
+ 1489829990U, // <2,2,2,0>: Cost 2 vext1 <2,2,2,2>, LHS
+ 2563572470U, // <2,2,2,1>: Cost 3 vext1 <2,2,2,2>, <1,0,3,2>
+ 269271142U, // <2,2,2,2>: Cost 1 vdup2 LHS
+ 2685699698U, // <2,2,2,3>: Cost 3 vext3 <0,2,0,2>, <2,2,3,3>
+ 1489833270U, // <2,2,2,4>: Cost 2 vext1 <2,2,2,2>, RHS
+ 2685699720U, // <2,2,2,5>: Cost 3 vext3 <0,2,0,2>, <2,2,5,7>
+ 2622719930U, // <2,2,2,6>: Cost 3 vext2 <0,u,2,2>, <2,6,3,7>
+ 2593436837U, // <2,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
+ 269271142U, // <2,2,2,u>: Cost 1 vdup2 LHS
+ 2685699750U, // <2,2,3,0>: Cost 3 vext3 <0,2,0,2>, <2,3,0,1>
+ 2690565806U, // <2,2,3,1>: Cost 3 vext3 <1,0,3,2>, <2,3,1,0>
+ 2953627240U, // <2,2,3,2>: Cost 3 vzipr LHS, <2,2,2,2>
+ 1879883878U, // <2,2,3,3>: Cost 2 vzipr LHS, LHS
+ 2685699790U, // <2,2,3,4>: Cost 3 vext3 <0,2,0,2>, <2,3,4,5>
+ 3893659342U, // <2,2,3,5>: Cost 4 vuzpr <0,2,0,2>, <2,3,4,5>
+ 2958270812U, // <2,2,3,6>: Cost 3 vzipr LHS, <0,4,2,6>
+ 2593445030U, // <2,2,3,7>: Cost 3 vext1 <7,2,2,3>, <7,2,2,3>
+ 1879883883U, // <2,2,3,u>: Cost 2 vzipr LHS, LHS
+ 2551644262U, // <2,2,4,0>: Cost 3 vext1 <0,2,2,4>, LHS
+ 3625386742U, // <2,2,4,1>: Cost 4 vext1 <0,2,2,4>, <1,0,3,2>
+ 2551645902U, // <2,2,4,2>: Cost 3 vext1 <0,2,2,4>, <2,3,4,5>
+ 3759441686U, // <2,2,4,3>: Cost 4 vext3 <0,2,0,2>, <2,4,3,5>
+ 2551647542U, // <2,2,4,4>: Cost 3 vext1 <0,2,2,4>, RHS
+ 1548979510U, // <2,2,4,5>: Cost 2 vext2 <0,u,2,2>, RHS
+ 2764901686U, // <2,2,4,6>: Cost 3 vuzpl <2,2,2,2>, RHS
+ 3667195047U, // <2,2,4,7>: Cost 4 vext1 <7,2,2,4>, <7,2,2,4>
+ 1548979753U, // <2,2,4,u>: Cost 2 vext2 <0,u,2,2>, RHS
+ 3696463432U, // <2,2,5,0>: Cost 4 vext2 <0,u,2,2>, <5,0,1,2>
+ 2617413328U, // <2,2,5,1>: Cost 3 vext2 <0,0,2,2>, <5,1,7,3>
+ 2685699936U, // <2,2,5,2>: Cost 3 vext3 <0,2,0,2>, <2,5,2,7>
+ 4027383910U, // <2,2,5,3>: Cost 4 vzipr <0,1,2,5>, LHS
+ 2228201085U, // <2,2,5,4>: Cost 3 vrev <2,2,4,5>
+ 2617413636U, // <2,2,5,5>: Cost 3 vext2 <0,0,2,2>, <5,5,5,5>
+ 2617413730U, // <2,2,5,6>: Cost 3 vext2 <0,0,2,2>, <5,6,7,0>
+ 2819919158U, // <2,2,5,7>: Cost 3 vuzpr <0,2,0,2>, RHS
+ 2819919159U, // <2,2,5,u>: Cost 3 vuzpr <0,2,0,2>, RHS
+ 3625402554U, // <2,2,6,0>: Cost 4 vext1 <0,2,2,6>, <0,2,2,6>
+ 3760031652U, // <2,2,6,1>: Cost 4 vext3 <0,2,u,2>, <2,6,1,3>
+ 2617414138U, // <2,2,6,2>: Cost 3 vext2 <0,0,2,2>, <6,2,7,3>
+ 2685700026U, // <2,2,6,3>: Cost 3 vext3 <0,2,0,2>, <2,6,3,7>
+ 3625405750U, // <2,2,6,4>: Cost 4 vext1 <0,2,2,6>, RHS
+ 3760031692U, // <2,2,6,5>: Cost 4 vext3 <0,2,u,2>, <2,6,5,7>
+ 3088679116U, // <2,2,6,6>: Cost 3 vtrnr <0,2,4,6>, <0,2,4,6>
+ 2657891169U, // <2,2,6,7>: Cost 3 vext2 <6,7,2,2>, <6,7,2,2>
+ 2685700071U, // <2,2,6,u>: Cost 3 vext3 <0,2,0,2>, <2,6,u,7>
+ 2726250474U, // <2,2,7,0>: Cost 3 vext3 <7,0,1,2>, <2,7,0,1>
+ 3704427616U, // <2,2,7,1>: Cost 4 vext2 <2,2,2,2>, <7,1,3,5>
+ 2660545701U, // <2,2,7,2>: Cost 3 vext2 <7,2,2,2>, <7,2,2,2>
+ 4030718054U, // <2,2,7,3>: Cost 4 vzipr <0,6,2,7>, LHS
+ 2617415014U, // <2,2,7,4>: Cost 3 vext2 <0,0,2,2>, <7,4,5,6>
+ 3302033032U, // <2,2,7,5>: Cost 4 vrev <2,2,5,7>
+ 3661246929U, // <2,2,7,6>: Cost 4 vext1 <6,2,2,7>, <6,2,2,7>
+ 2617415276U, // <2,2,7,7>: Cost 3 vext2 <0,0,2,2>, <7,7,7,7>
+ 2731558962U, // <2,2,7,u>: Cost 3 vext3 <7,u,1,2>, <2,7,u,1>
+ 1489829990U, // <2,2,u,0>: Cost 2 vext1 <2,2,2,2>, LHS
+ 1548982062U, // <2,2,u,1>: Cost 2 vext2 <0,u,2,2>, LHS
+ 269271142U, // <2,2,u,2>: Cost 1 vdup2 LHS
+ 1879924838U, // <2,2,u,3>: Cost 2 vzipr LHS, LHS
+ 1489833270U, // <2,2,u,4>: Cost 2 vext1 <2,2,2,2>, RHS
+ 1548982426U, // <2,2,u,5>: Cost 2 vext2 <0,u,2,2>, RHS
+ 2953666908U, // <2,2,u,6>: Cost 3 vzipr LHS, <0,4,2,6>
+ 2819919401U, // <2,2,u,7>: Cost 3 vuzpr <0,2,0,2>, RHS
+ 269271142U, // <2,2,u,u>: Cost 1 vdup2 LHS
+ 1544339456U, // <2,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
+ 470597734U, // <2,3,0,1>: Cost 1 vext2 LHS, LHS
+ 1548984484U, // <2,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 2619408648U, // <2,3,0,3>: Cost 3 vext2 <0,3,2,3>, <0,3,2,3>
+ 1548984658U, // <2,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 2665857454U, // <2,3,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
+ 2622726655U, // <2,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
+ 2593494188U, // <2,3,0,7>: Cost 3 vext1 <7,2,3,0>, <7,2,3,0>
+ 470598301U, // <2,3,0,u>: Cost 1 vext2 LHS, LHS
+ 1544340214U, // <2,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 1544340276U, // <2,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
+ 1544340374U, // <2,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
+ 1548985304U, // <2,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 2551696694U, // <2,3,1,4>: Cost 3 vext1 <0,2,3,1>, RHS
+ 1548985488U, // <2,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 2622727375U, // <2,3,1,6>: Cost 3 vext2 LHS, <1,6,1,7>
+ 2665858347U, // <2,3,1,7>: Cost 3 vext2 LHS, <1,7,3,0>
+ 1548985709U, // <2,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
+ 2622727613U, // <2,3,2,0>: Cost 3 vext2 LHS, <2,0,1,2>
+ 2622727711U, // <2,3,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
+ 1544341096U, // <2,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
+ 1544341158U, // <2,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
+ 2622727958U, // <2,3,2,4>: Cost 3 vext2 LHS, <2,4,3,5>
+ 2622728032U, // <2,3,2,5>: Cost 3 vext2 LHS, <2,5,2,7>
+ 1548986298U, // <2,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 2665859050U, // <2,3,2,7>: Cost 3 vext2 LHS, <2,7,0,1>
+ 1548986427U, // <2,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
+ 1548986518U, // <2,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
+ 2622728415U, // <2,3,3,1>: Cost 3 vext2 LHS, <3,1,0,3>
+ 1489913458U, // <2,3,3,2>: Cost 2 vext1 <2,2,3,3>, <2,2,3,3>
+ 1544341916U, // <2,3,3,3>: Cost 2 vext2 LHS, <3,3,3,3>
+ 1548986882U, // <2,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
+ 2665859632U, // <2,3,3,5>: Cost 3 vext2 LHS, <3,5,1,7>
+ 2234304870U, // <2,3,3,6>: Cost 3 vrev <3,2,6,3>
+ 2958271632U, // <2,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
+ 1548987166U, // <2,3,3,u>: Cost 2 vext2 LHS, <3,u,1,2>
+ 1483948134U, // <2,3,4,0>: Cost 2 vext1 <1,2,3,4>, LHS
+ 1483948954U, // <2,3,4,1>: Cost 2 vext1 <1,2,3,4>, <1,2,3,4>
+ 2622729276U, // <2,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
+ 2557692054U, // <2,3,4,3>: Cost 3 vext1 <1,2,3,4>, <3,0,1,2>
+ 1483951414U, // <2,3,4,4>: Cost 2 vext1 <1,2,3,4>, RHS
+ 470601014U, // <2,3,4,5>: Cost 1 vext2 LHS, RHS
+ 1592118644U, // <2,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
+ 2593526960U, // <2,3,4,7>: Cost 3 vext1 <7,2,3,4>, <7,2,3,4>
+ 470601257U, // <2,3,4,u>: Cost 1 vext2 LHS, RHS
+ 2551726182U, // <2,3,5,0>: Cost 3 vext1 <0,2,3,5>, LHS
+ 1592118992U, // <2,3,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
+ 2665860862U, // <2,3,5,2>: Cost 3 vext2 LHS, <5,2,3,4>
+ 2551728642U, // <2,3,5,3>: Cost 3 vext1 <0,2,3,5>, <3,4,5,6>
+ 1592119238U, // <2,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
+ 1592119300U, // <2,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
+ 1592119394U, // <2,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
+ 1592119464U, // <2,3,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
+ 1592119545U, // <2,3,5,u>: Cost 2 vext2 LHS, <5,u,5,7>
+ 2622730529U, // <2,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
+ 2557707164U, // <2,3,6,1>: Cost 3 vext1 <1,2,3,6>, <1,2,3,6>
+ 1592119802U, // <2,3,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
+ 2665861682U, // <2,3,6,3>: Cost 3 vext2 LHS, <6,3,4,5>
+ 2622730893U, // <2,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
+ 2665861810U, // <2,3,6,5>: Cost 3 vext2 LHS, <6,5,0,7>
+ 1592120120U, // <2,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
+ 1592120142U, // <2,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
+ 1592120223U, // <2,3,6,u>: Cost 2 vext2 LHS, <6,u,0,1>
+ 1592120314U, // <2,3,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
+ 2659890261U, // <2,3,7,1>: Cost 3 vext2 <7,1,2,3>, <7,1,2,3>
+ 2660553894U, // <2,3,7,2>: Cost 3 vext2 <7,2,2,3>, <7,2,2,3>
+ 2665862371U, // <2,3,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
+ 1592120678U, // <2,3,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
+ 2665862534U, // <2,3,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
+ 2665862614U, // <2,3,7,6>: Cost 3 vext2 LHS, <7,6,0,1>
+ 1592120940U, // <2,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
+ 1592120962U, // <2,3,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
+ 1548990163U, // <2,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
+ 470603566U, // <2,3,u,1>: Cost 1 vext2 LHS, LHS
+ 1548990341U, // <2,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
+ 1548990396U, // <2,3,u,3>: Cost 2 vext2 LHS, <u,3,0,1>
+ 1548990527U, // <2,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
+ 470603930U, // <2,3,u,5>: Cost 1 vext2 LHS, RHS
+ 1548990672U, // <2,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
+ 1592121600U, // <2,3,u,7>: Cost 2 vext2 LHS, <u,7,0,1>
+ 470604133U, // <2,3,u,u>: Cost 1 vext2 LHS, LHS
+ 2617425942U, // <2,4,0,0>: Cost 3 vext2 <0,0,2,4>, <0,0,2,4>
+ 2618753126U, // <2,4,0,1>: Cost 3 vext2 <0,2,2,4>, LHS
+ 2618753208U, // <2,4,0,2>: Cost 3 vext2 <0,2,2,4>, <0,2,2,4>
+ 2619416841U, // <2,4,0,3>: Cost 3 vext2 <0,3,2,4>, <0,3,2,4>
+ 2587593628U, // <2,4,0,4>: Cost 3 vext1 <6,2,4,0>, <4,0,6,2>
+ 2712832914U, // <2,4,0,5>: Cost 3 vext3 <4,6,u,2>, <4,0,5,1>
+ 1634962332U, // <2,4,0,6>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
+ 3799993252U, // <2,4,0,7>: Cost 4 vext3 <7,0,1,2>, <4,0,7,1>
+ 1634962332U, // <2,4,0,u>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
+ 2619417334U, // <2,4,1,0>: Cost 3 vext2 <0,3,2,4>, <1,0,3,2>
+ 3692495668U, // <2,4,1,1>: Cost 4 vext2 <0,2,2,4>, <1,1,1,1>
+ 2625389466U, // <2,4,1,2>: Cost 3 vext2 <1,3,2,4>, <1,2,3,4>
+ 2826125414U, // <2,4,1,3>: Cost 3 vuzpr <1,2,3,4>, LHS
+ 3699794995U, // <2,4,1,4>: Cost 4 vext2 <1,4,2,4>, <1,4,2,4>
+ 3692496016U, // <2,4,1,5>: Cost 4 vext2 <0,2,2,4>, <1,5,3,7>
+ 3763424238U, // <2,4,1,6>: Cost 4 vext3 <0,u,0,2>, <4,1,6,3>
+ 3667317942U, // <2,4,1,7>: Cost 4 vext1 <7,2,4,1>, <7,2,4,1>
+ 2826125419U, // <2,4,1,u>: Cost 3 vuzpr <1,2,3,4>, LHS
+ 2629371336U, // <2,4,2,0>: Cost 3 vext2 <2,0,2,4>, <2,0,2,4>
+ 3699131946U, // <2,4,2,1>: Cost 4 vext2 <1,3,2,4>, <2,1,4,3>
+ 2630698602U, // <2,4,2,2>: Cost 3 vext2 <2,2,2,4>, <2,2,2,4>
+ 2618754766U, // <2,4,2,3>: Cost 3 vext2 <0,2,2,4>, <2,3,4,5>
+ 2826126234U, // <2,4,2,4>: Cost 3 vuzpr <1,2,3,4>, <1,2,3,4>
+ 2899119414U, // <2,4,2,5>: Cost 3 vzipl <2,2,2,2>, RHS
+ 3033337142U, // <2,4,2,6>: Cost 3 vtrnl <2,2,2,2>, RHS
+ 3800214597U, // <2,4,2,7>: Cost 4 vext3 <7,0,4,2>, <4,2,7,0>
+ 2899119657U, // <2,4,2,u>: Cost 3 vzipl <2,2,2,2>, RHS
+ 2635344033U, // <2,4,3,0>: Cost 3 vext2 <3,0,2,4>, <3,0,2,4>
+ 4032012325U, // <2,4,3,1>: Cost 4 vzipr LHS, <0,0,4,1>
+ 3692497228U, // <2,4,3,2>: Cost 4 vext2 <0,2,2,4>, <3,2,3,4>
+ 3692497308U, // <2,4,3,3>: Cost 4 vext2 <0,2,2,4>, <3,3,3,3>
+ 3001404624U, // <2,4,3,4>: Cost 3 vzipr LHS, <4,4,4,4>
+ 2953627342U, // <2,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
+ 2953625804U, // <2,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
+ 3899868160U, // <2,4,3,7>: Cost 4 vuzpr <1,2,3,4>, <1,3,5,7>
+ 2953625806U, // <2,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
+ 2710916266U, // <2,4,4,0>: Cost 3 vext3 <4,4,0,2>, <4,4,0,2>
+ 3899869648U, // <2,4,4,1>: Cost 4 vuzpr <1,2,3,4>, <3,4,0,1>
+ 3899869658U, // <2,4,4,2>: Cost 4 vuzpr <1,2,3,4>, <3,4,1,2>
+ 3899868930U, // <2,4,4,3>: Cost 4 vuzpr <1,2,3,4>, <2,4,1,3>
+ 2712833232U, // <2,4,4,4>: Cost 3 vext3 <4,6,u,2>, <4,4,4,4>
+ 2618756406U, // <2,4,4,5>: Cost 3 vext2 <0,2,2,4>, RHS
+ 2765737270U, // <2,4,4,6>: Cost 3 vuzpl <2,3,4,5>, RHS
+ 4168304426U, // <2,4,4,7>: Cost 4 vtrnr <1,2,3,4>, <2,4,5,7>
+ 2618756649U, // <2,4,4,u>: Cost 3 vext2 <0,2,2,4>, RHS
+ 2551800011U, // <2,4,5,0>: Cost 3 vext1 <0,2,4,5>, <0,2,4,5>
+ 2569716470U, // <2,4,5,1>: Cost 3 vext1 <3,2,4,5>, <1,0,3,2>
+ 2563745405U, // <2,4,5,2>: Cost 3 vext1 <2,2,4,5>, <2,2,4,5>
+ 2569718102U, // <2,4,5,3>: Cost 3 vext1 <3,2,4,5>, <3,2,4,5>
+ 2551803190U, // <2,4,5,4>: Cost 3 vext1 <0,2,4,5>, RHS
+ 3625545732U, // <2,4,5,5>: Cost 4 vext1 <0,2,4,5>, <5,5,5,5>
+ 1611959606U, // <2,4,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 2826128694U, // <2,4,5,7>: Cost 3 vuzpr <1,2,3,4>, RHS
+ 1611959624U, // <2,4,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1478066278U, // <2,4,6,0>: Cost 2 vext1 <0,2,4,6>, LHS
+ 2551808758U, // <2,4,6,1>: Cost 3 vext1 <0,2,4,6>, <1,0,3,2>
+ 2551809516U, // <2,4,6,2>: Cost 3 vext1 <0,2,4,6>, <2,0,6,4>
+ 2551810198U, // <2,4,6,3>: Cost 3 vext1 <0,2,4,6>, <3,0,1,2>
+ 1478069558U, // <2,4,6,4>: Cost 2 vext1 <0,2,4,6>, RHS
+ 2901888310U, // <2,4,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
+ 2551812920U, // <2,4,6,6>: Cost 3 vext1 <0,2,4,6>, <6,6,6,6>
+ 2726251914U, // <2,4,6,7>: Cost 3 vext3 <7,0,1,2>, <4,6,7,1>
+ 1478072110U, // <2,4,6,u>: Cost 2 vext1 <0,2,4,6>, LHS
+ 2659234821U, // <2,4,7,0>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
+ 3786722726U, // <2,4,7,1>: Cost 4 vext3 <4,7,1,2>, <4,7,1,2>
+ 3734303911U, // <2,4,7,2>: Cost 4 vext2 <7,2,2,4>, <7,2,2,4>
+ 3734967544U, // <2,4,7,3>: Cost 4 vext2 <7,3,2,4>, <7,3,2,4>
+ 3727005030U, // <2,4,7,4>: Cost 4 vext2 <6,0,2,4>, <7,4,5,6>
+ 2726251976U, // <2,4,7,5>: Cost 3 vext3 <7,0,1,2>, <4,7,5,0>
+ 2726251986U, // <2,4,7,6>: Cost 3 vext3 <7,0,1,2>, <4,7,6,1>
+ 3727005292U, // <2,4,7,7>: Cost 4 vext2 <6,0,2,4>, <7,7,7,7>
+ 2659234821U, // <2,4,7,u>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
+ 1478082662U, // <2,4,u,0>: Cost 2 vext1 <0,2,4,u>, LHS
+ 2618758958U, // <2,4,u,1>: Cost 3 vext2 <0,2,2,4>, LHS
+ 2551826024U, // <2,4,u,2>: Cost 3 vext1 <0,2,4,u>, <2,2,2,2>
+ 2551826582U, // <2,4,u,3>: Cost 3 vext1 <0,2,4,u>, <3,0,1,2>
+ 1478085942U, // <2,4,u,4>: Cost 2 vext1 <0,2,4,u>, RHS
+ 2953668302U, // <2,4,u,5>: Cost 3 vzipr LHS, <2,3,4,5>
+ 1611959849U, // <2,4,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 2826128937U, // <2,4,u,7>: Cost 3 vuzpr <1,2,3,4>, RHS
+ 1611959867U, // <2,4,u,u>: Cost 2 vext3 <0,2,0,2>, RHS
+ 3691839488U, // <2,5,0,0>: Cost 4 vext2 <0,1,2,5>, <0,0,0,0>
+ 2618097766U, // <2,5,0,1>: Cost 3 vext2 <0,1,2,5>, LHS
+ 2620088484U, // <2,5,0,2>: Cost 3 vext2 <0,4,2,5>, <0,2,0,2>
+ 2619425034U, // <2,5,0,3>: Cost 3 vext2 <0,3,2,5>, <0,3,2,5>
+ 2620088667U, // <2,5,0,4>: Cost 3 vext2 <0,4,2,5>, <0,4,2,5>
+ 2620752300U, // <2,5,0,5>: Cost 3 vext2 <0,5,2,5>, <0,5,2,5>
+ 3693830655U, // <2,5,0,6>: Cost 4 vext2 <0,4,2,5>, <0,6,2,7>
+ 3094531382U, // <2,5,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
+ 2618098333U, // <2,5,0,u>: Cost 3 vext2 <0,1,2,5>, LHS
+ 3691840246U, // <2,5,1,0>: Cost 4 vext2 <0,1,2,5>, <1,0,3,2>
+ 3691840308U, // <2,5,1,1>: Cost 4 vext2 <0,1,2,5>, <1,1,1,1>
+ 2626061206U, // <2,5,1,2>: Cost 3 vext2 <1,4,2,5>, <1,2,3,0>
+ 2618098688U, // <2,5,1,3>: Cost 3 vext2 <0,1,2,5>, <1,3,5,7>
+ 2626061364U, // <2,5,1,4>: Cost 3 vext2 <1,4,2,5>, <1,4,2,5>
+ 3691840656U, // <2,5,1,5>: Cost 4 vext2 <0,1,2,5>, <1,5,3,7>
+ 3789082310U, // <2,5,1,6>: Cost 4 vext3 <5,1,6,2>, <5,1,6,2>
+ 2712833744U, // <2,5,1,7>: Cost 3 vext3 <4,6,u,2>, <5,1,7,3>
+ 2628715896U, // <2,5,1,u>: Cost 3 vext2 <1,u,2,5>, <1,u,2,5>
+ 3693831613U, // <2,5,2,0>: Cost 4 vext2 <0,4,2,5>, <2,0,1,2>
+ 4026698642U, // <2,5,2,1>: Cost 4 vzipr <0,0,2,2>, <4,0,5,1>
+ 2632033896U, // <2,5,2,2>: Cost 3 vext2 <2,4,2,5>, <2,2,2,2>
+ 3691841190U, // <2,5,2,3>: Cost 4 vext2 <0,1,2,5>, <2,3,0,1>
+ 2632034061U, // <2,5,2,4>: Cost 3 vext2 <2,4,2,5>, <2,4,2,5>
+ 3691841352U, // <2,5,2,5>: Cost 4 vext2 <0,1,2,5>, <2,5,0,1>
+ 3691841466U, // <2,5,2,6>: Cost 4 vext2 <0,1,2,5>, <2,6,3,7>
+ 3088354614U, // <2,5,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
+ 3088354615U, // <2,5,2,u>: Cost 3 vtrnr <0,2,0,2>, RHS
+ 2557829222U, // <2,5,3,0>: Cost 3 vext1 <1,2,5,3>, LHS
+ 2557830059U, // <2,5,3,1>: Cost 3 vext1 <1,2,5,3>, <1,2,5,3>
+ 2575746766U, // <2,5,3,2>: Cost 3 vext1 <4,2,5,3>, <2,3,4,5>
+ 3691841948U, // <2,5,3,3>: Cost 4 vext2 <0,1,2,5>, <3,3,3,3>
+ 2619427330U, // <2,5,3,4>: Cost 3 vext2 <0,3,2,5>, <3,4,5,6>
+ 2581720847U, // <2,5,3,5>: Cost 3 vext1 <5,2,5,3>, <5,2,5,3>
+ 2953628162U, // <2,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 2953626624U, // <2,5,3,7>: Cost 3 vzipr LHS, <1,3,5,7>
+ 2953626625U, // <2,5,3,u>: Cost 3 vzipr LHS, <1,3,5,u>
+ 2569781350U, // <2,5,4,0>: Cost 3 vext1 <3,2,5,4>, LHS
+ 3631580076U, // <2,5,4,1>: Cost 4 vext1 <1,2,5,4>, <1,2,5,4>
+ 2569782990U, // <2,5,4,2>: Cost 3 vext1 <3,2,5,4>, <2,3,4,5>
+ 2569783646U, // <2,5,4,3>: Cost 3 vext1 <3,2,5,4>, <3,2,5,4>
+ 2569784630U, // <2,5,4,4>: Cost 3 vext1 <3,2,5,4>, RHS
+ 2618101046U, // <2,5,4,5>: Cost 3 vext2 <0,1,2,5>, RHS
+ 3893905922U, // <2,5,4,6>: Cost 4 vuzpr <0,2,3,5>, <3,4,5,6>
+ 3094564150U, // <2,5,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
+ 2618101289U, // <2,5,4,u>: Cost 3 vext2 <0,1,2,5>, RHS
+ 2551873638U, // <2,5,5,0>: Cost 3 vext1 <0,2,5,5>, LHS
+ 3637560320U, // <2,5,5,1>: Cost 4 vext1 <2,2,5,5>, <1,3,5,7>
+ 3637560966U, // <2,5,5,2>: Cost 4 vext1 <2,2,5,5>, <2,2,5,5>
+ 3723030343U, // <2,5,5,3>: Cost 4 vext2 <5,3,2,5>, <5,3,2,5>
+ 2551876918U, // <2,5,5,4>: Cost 3 vext1 <0,2,5,5>, RHS
+ 2712834052U, // <2,5,5,5>: Cost 3 vext3 <4,6,u,2>, <5,5,5,5>
+ 4028713474U, // <2,5,5,6>: Cost 4 vzipr <0,3,2,5>, <3,4,5,6>
+ 2712834072U, // <2,5,5,7>: Cost 3 vext3 <4,6,u,2>, <5,5,7,7>
+ 2712834081U, // <2,5,5,u>: Cost 3 vext3 <4,6,u,2>, <5,5,u,7>
+ 2575769702U, // <2,5,6,0>: Cost 3 vext1 <4,2,5,6>, LHS
+ 3631596462U, // <2,5,6,1>: Cost 4 vext1 <1,2,5,6>, <1,2,5,6>
+ 2655924730U, // <2,5,6,2>: Cost 3 vext2 <6,4,2,5>, <6,2,7,3>
+ 3643541856U, // <2,5,6,3>: Cost 4 vext1 <3,2,5,6>, <3,2,5,6>
+ 2655924849U, // <2,5,6,4>: Cost 3 vext2 <6,4,2,5>, <6,4,2,5>
+ 3787755607U, // <2,5,6,5>: Cost 4 vext3 <4,u,6,2>, <5,6,5,7>
+ 4029385218U, // <2,5,6,6>: Cost 4 vzipr <0,4,2,6>, <3,4,5,6>
+ 3088682294U, // <2,5,6,7>: Cost 3 vtrnr <0,2,4,6>, RHS
+ 3088682295U, // <2,5,6,u>: Cost 3 vtrnr <0,2,4,6>, RHS
+ 2563833958U, // <2,5,7,0>: Cost 3 vext1 <2,2,5,7>, LHS
+ 2551890678U, // <2,5,7,1>: Cost 3 vext1 <0,2,5,7>, <1,0,3,2>
+ 2563835528U, // <2,5,7,2>: Cost 3 vext1 <2,2,5,7>, <2,2,5,7>
+ 3637577878U, // <2,5,7,3>: Cost 4 vext1 <2,2,5,7>, <3,0,1,2>
+ 2563837238U, // <2,5,7,4>: Cost 3 vext1 <2,2,5,7>, RHS
+ 2712834216U, // <2,5,7,5>: Cost 3 vext3 <4,6,u,2>, <5,7,5,7>
+ 2712834220U, // <2,5,7,6>: Cost 3 vext3 <4,6,u,2>, <5,7,6,2>
+ 4174449974U, // <2,5,7,7>: Cost 4 vtrnr <2,2,5,7>, RHS
+ 2563839790U, // <2,5,7,u>: Cost 3 vext1 <2,2,5,7>, LHS
+ 2563842150U, // <2,5,u,0>: Cost 3 vext1 <2,2,5,u>, LHS
+ 2618103598U, // <2,5,u,1>: Cost 3 vext2 <0,1,2,5>, LHS
+ 2563843721U, // <2,5,u,2>: Cost 3 vext1 <2,2,5,u>, <2,2,5,u>
+ 2569816418U, // <2,5,u,3>: Cost 3 vext1 <3,2,5,u>, <3,2,5,u>
+ 2622748735U, // <2,5,u,4>: Cost 3 vext2 <0,u,2,5>, <u,4,5,6>
+ 2618103962U, // <2,5,u,5>: Cost 3 vext2 <0,1,2,5>, RHS
+ 2953669122U, // <2,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 2953667584U, // <2,5,u,7>: Cost 3 vzipr LHS, <1,3,5,7>
+ 2618104165U, // <2,5,u,u>: Cost 3 vext2 <0,1,2,5>, LHS
+ 2620096512U, // <2,6,0,0>: Cost 3 vext2 <0,4,2,6>, <0,0,0,0>
+ 1546354790U, // <2,6,0,1>: Cost 2 vext2 <0,4,2,6>, LHS
+ 2620096676U, // <2,6,0,2>: Cost 3 vext2 <0,4,2,6>, <0,2,0,2>
+ 3693838588U, // <2,6,0,3>: Cost 4 vext2 <0,4,2,6>, <0,3,1,0>
+ 1546355036U, // <2,6,0,4>: Cost 2 vext2 <0,4,2,6>, <0,4,2,6>
+ 3694502317U, // <2,6,0,5>: Cost 4 vext2 <0,5,2,6>, <0,5,2,6>
+ 2551911246U, // <2,6,0,6>: Cost 3 vext1 <0,2,6,0>, <6,7,0,1>
+ 2720723287U, // <2,6,0,7>: Cost 3 vext3 <6,0,7,2>, <6,0,7,2>
+ 1546355357U, // <2,6,0,u>: Cost 2 vext2 <0,4,2,6>, LHS
+ 2620097270U, // <2,6,1,0>: Cost 3 vext2 <0,4,2,6>, <1,0,3,2>
+ 2620097332U, // <2,6,1,1>: Cost 3 vext2 <0,4,2,6>, <1,1,1,1>
+ 2620097430U, // <2,6,1,2>: Cost 3 vext2 <0,4,2,6>, <1,2,3,0>
+ 2820243558U, // <2,6,1,3>: Cost 3 vuzpr <0,2,4,6>, LHS
+ 2620097598U, // <2,6,1,4>: Cost 3 vext2 <0,4,2,6>, <1,4,3,6>
+ 2620097680U, // <2,6,1,5>: Cost 3 vext2 <0,4,2,6>, <1,5,3,7>
+ 3693839585U, // <2,6,1,6>: Cost 4 vext2 <0,4,2,6>, <1,6,3,7>
+ 2721386920U, // <2,6,1,7>: Cost 3 vext3 <6,1,7,2>, <6,1,7,2>
+ 2820243563U, // <2,6,1,u>: Cost 3 vuzpr <0,2,4,6>, LHS
+ 2714014137U, // <2,6,2,0>: Cost 3 vext3 <4,u,6,2>, <6,2,0,1>
+ 2712834500U, // <2,6,2,1>: Cost 3 vext3 <4,6,u,2>, <6,2,1,3>
+ 2620098152U, // <2,6,2,2>: Cost 3 vext2 <0,4,2,6>, <2,2,2,2>
+ 2620098214U, // <2,6,2,3>: Cost 3 vext2 <0,4,2,6>, <2,3,0,1>
+ 2632042254U, // <2,6,2,4>: Cost 3 vext2 <2,4,2,6>, <2,4,2,6>
+ 2712834540U, // <2,6,2,5>: Cost 3 vext3 <4,6,u,2>, <6,2,5,7>
+ 2820243660U, // <2,6,2,6>: Cost 3 vuzpr <0,2,4,6>, <0,2,4,6>
+ 2958265654U, // <2,6,2,7>: Cost 3 vzipr <0,u,2,2>, RHS
+ 2620098619U, // <2,6,2,u>: Cost 3 vext2 <0,4,2,6>, <2,u,0,1>
+ 2620098710U, // <2,6,3,0>: Cost 3 vext2 <0,4,2,6>, <3,0,1,2>
+ 3893986982U, // <2,6,3,1>: Cost 4 vuzpr <0,2,4,6>, <2,3,0,1>
+ 2569848762U, // <2,6,3,2>: Cost 3 vext1 <3,2,6,3>, <2,6,3,7>
+ 2620098972U, // <2,6,3,3>: Cost 3 vext2 <0,4,2,6>, <3,3,3,3>
+ 2620099074U, // <2,6,3,4>: Cost 3 vext2 <0,4,2,6>, <3,4,5,6>
+ 3893987022U, // <2,6,3,5>: Cost 4 vuzpr <0,2,4,6>, <2,3,4,5>
+ 3001404644U, // <2,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
+ 1879887158U, // <2,6,3,7>: Cost 2 vzipr LHS, RHS
+ 1879887159U, // <2,6,3,u>: Cost 2 vzipr LHS, RHS
+ 2620099484U, // <2,6,4,0>: Cost 3 vext2 <0,4,2,6>, <4,0,6,2>
+ 2620099566U, // <2,6,4,1>: Cost 3 vext2 <0,4,2,6>, <4,1,6,3>
+ 2620099644U, // <2,6,4,2>: Cost 3 vext2 <0,4,2,6>, <4,2,6,0>
+ 3643599207U, // <2,6,4,3>: Cost 4 vext1 <3,2,6,4>, <3,2,6,4>
+ 2575830080U, // <2,6,4,4>: Cost 3 vext1 <4,2,6,4>, <4,2,6,4>
+ 1546358070U, // <2,6,4,5>: Cost 2 vext2 <0,4,2,6>, RHS
+ 2667875700U, // <2,6,4,6>: Cost 3 vext2 <u,4,2,6>, <4,6,4,6>
+ 4028042550U, // <2,6,4,7>: Cost 4 vzipr <0,2,2,4>, RHS
+ 1546358313U, // <2,6,4,u>: Cost 2 vext2 <0,4,2,6>, RHS
+ 3693841992U, // <2,6,5,0>: Cost 4 vext2 <0,4,2,6>, <5,0,1,2>
+ 2667876048U, // <2,6,5,1>: Cost 3 vext2 <u,4,2,6>, <5,1,7,3>
+ 2712834756U, // <2,6,5,2>: Cost 3 vext3 <4,6,u,2>, <6,5,2,7>
+ 3643607400U, // <2,6,5,3>: Cost 4 vext1 <3,2,6,5>, <3,2,6,5>
+ 2252091873U, // <2,6,5,4>: Cost 3 vrev <6,2,4,5>
+ 2667876356U, // <2,6,5,5>: Cost 3 vext2 <u,4,2,6>, <5,5,5,5>
+ 2667876450U, // <2,6,5,6>: Cost 3 vext2 <u,4,2,6>, <5,6,7,0>
+ 2820246838U, // <2,6,5,7>: Cost 3 vuzpr <0,2,4,6>, RHS
+ 2820246839U, // <2,6,5,u>: Cost 3 vuzpr <0,2,4,6>, RHS
+ 2563899494U, // <2,6,6,0>: Cost 3 vext1 <2,2,6,6>, LHS
+ 3893988683U, // <2,6,6,1>: Cost 4 vuzpr <0,2,4,6>, <4,6,0,1>
+ 2563901072U, // <2,6,6,2>: Cost 3 vext1 <2,2,6,6>, <2,2,6,6>
+ 3893987236U, // <2,6,6,3>: Cost 4 vuzpr <0,2,4,6>, <2,6,1,3>
+ 2563902774U, // <2,6,6,4>: Cost 3 vext1 <2,2,6,6>, RHS
+ 3893988723U, // <2,6,6,5>: Cost 4 vuzpr <0,2,4,6>, <4,6,4,5>
+ 2712834872U, // <2,6,6,6>: Cost 3 vext3 <4,6,u,2>, <6,6,6,6>
+ 2955644214U, // <2,6,6,7>: Cost 3 vzipr <0,4,2,6>, RHS
+ 2955644215U, // <2,6,6,u>: Cost 3 vzipr <0,4,2,6>, RHS
+ 2712834894U, // <2,6,7,0>: Cost 3 vext3 <4,6,u,2>, <6,7,0,1>
+ 2724926296U, // <2,6,7,1>: Cost 3 vext3 <6,7,1,2>, <6,7,1,2>
+ 2725000033U, // <2,6,7,2>: Cost 3 vext3 <6,7,2,2>, <6,7,2,2>
+ 2702365544U, // <2,6,7,3>: Cost 3 vext3 <3,0,1,2>, <6,7,3,0>
+ 2712834934U, // <2,6,7,4>: Cost 3 vext3 <4,6,u,2>, <6,7,4,5>
+ 3776107393U, // <2,6,7,5>: Cost 4 vext3 <3,0,1,2>, <6,7,5,7>
+ 2725294981U, // <2,6,7,6>: Cost 3 vext3 <6,7,6,2>, <6,7,6,2>
+ 2726253452U, // <2,6,7,7>: Cost 3 vext3 <7,0,1,2>, <6,7,7,0>
+ 2712834966U, // <2,6,7,u>: Cost 3 vext3 <4,6,u,2>, <6,7,u,1>
+ 2620102355U, // <2,6,u,0>: Cost 3 vext2 <0,4,2,6>, <u,0,1,2>
+ 1546360622U, // <2,6,u,1>: Cost 2 vext2 <0,4,2,6>, LHS
+ 2620102536U, // <2,6,u,2>: Cost 3 vext2 <0,4,2,6>, <u,2,3,3>
+ 2820244125U, // <2,6,u,3>: Cost 3 vuzpr <0,2,4,6>, LHS
+ 1594136612U, // <2,6,u,4>: Cost 2 vext2 <u,4,2,6>, <u,4,2,6>
+ 1546360986U, // <2,6,u,5>: Cost 2 vext2 <0,4,2,6>, RHS
+ 2620102864U, // <2,6,u,6>: Cost 3 vext2 <0,4,2,6>, <u,6,3,7>
+ 1879928118U, // <2,6,u,7>: Cost 2 vzipr LHS, RHS
+ 1879928119U, // <2,6,u,u>: Cost 2 vzipr LHS, RHS
+ 2726179825U, // <2,7,0,0>: Cost 3 vext3 <7,0,0,2>, <7,0,0,2>
+ 1652511738U, // <2,7,0,1>: Cost 2 vext3 <7,0,1,2>, <7,0,1,2>
+ 2621431972U, // <2,7,0,2>: Cost 3 vext2 <0,6,2,7>, <0,2,0,2>
+ 2257949868U, // <2,7,0,3>: Cost 3 vrev <7,2,3,0>
+ 2726474773U, // <2,7,0,4>: Cost 3 vext3 <7,0,4,2>, <7,0,4,2>
+ 2620768686U, // <2,7,0,5>: Cost 3 vext2 <0,5,2,7>, <0,5,2,7>
+ 2621432319U, // <2,7,0,6>: Cost 3 vext2 <0,6,2,7>, <0,6,2,7>
+ 2599760953U, // <2,7,0,7>: Cost 3 vext1 <u,2,7,0>, <7,0,u,2>
+ 1653027897U, // <2,7,0,u>: Cost 2 vext3 <7,0,u,2>, <7,0,u,2>
+ 2639348470U, // <2,7,1,0>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
+ 3695174452U, // <2,7,1,1>: Cost 4 vext2 <0,6,2,7>, <1,1,1,1>
+ 3695174550U, // <2,7,1,2>: Cost 4 vext2 <0,6,2,7>, <1,2,3,0>
+ 3694511104U, // <2,7,1,3>: Cost 4 vext2 <0,5,2,7>, <1,3,5,7>
+ 3713090594U, // <2,7,1,4>: Cost 4 vext2 <3,6,2,7>, <1,4,0,5>
+ 3693184144U, // <2,7,1,5>: Cost 4 vext2 <0,3,2,7>, <1,5,3,7>
+ 2627405016U, // <2,7,1,6>: Cost 3 vext2 <1,6,2,7>, <1,6,2,7>
+ 3799995519U, // <2,7,1,7>: Cost 4 vext3 <7,0,1,2>, <7,1,7,0>
+ 2639348470U, // <2,7,1,u>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
+ 3695175101U, // <2,7,2,0>: Cost 4 vext2 <0,6,2,7>, <2,0,1,2>
+ 3643655168U, // <2,7,2,1>: Cost 4 vext1 <3,2,7,2>, <1,3,5,7>
+ 2257892517U, // <2,7,2,2>: Cost 3 vrev <7,2,2,2>
+ 3695175334U, // <2,7,2,3>: Cost 4 vext2 <0,6,2,7>, <2,3,0,1>
+ 3695175465U, // <2,7,2,4>: Cost 4 vext2 <0,6,2,7>, <2,4,5,6>
+ 2632714080U, // <2,7,2,5>: Cost 3 vext2 <2,5,2,7>, <2,5,2,7>
+ 2633377713U, // <2,7,2,6>: Cost 3 vext2 <2,6,2,7>, <2,6,2,7>
+ 3695175658U, // <2,7,2,7>: Cost 4 vext2 <0,6,2,7>, <2,7,0,1>
+ 2634704979U, // <2,7,2,u>: Cost 3 vext2 <2,u,2,7>, <2,u,2,7>
+ 1514094694U, // <2,7,3,0>: Cost 2 vext1 <6,2,7,3>, LHS
+ 2569921680U, // <2,7,3,1>: Cost 3 vext1 <3,2,7,3>, <1,5,3,7>
+ 2587838056U, // <2,7,3,2>: Cost 3 vext1 <6,2,7,3>, <2,2,2,2>
+ 2569922927U, // <2,7,3,3>: Cost 3 vext1 <3,2,7,3>, <3,2,7,3>
+ 1514097974U, // <2,7,3,4>: Cost 2 vext1 <6,2,7,3>, RHS
+ 2581868321U, // <2,7,3,5>: Cost 3 vext1 <5,2,7,3>, <5,2,7,3>
+ 1514099194U, // <2,7,3,6>: Cost 2 vext1 <6,2,7,3>, <6,2,7,3>
+ 2587841530U, // <2,7,3,7>: Cost 3 vext1 <6,2,7,3>, <7,0,1,2>
+ 1514100526U, // <2,7,3,u>: Cost 2 vext1 <6,2,7,3>, LHS
+ 2708706617U, // <2,7,4,0>: Cost 3 vext3 <4,0,6,2>, <7,4,0,6>
+ 3649643418U, // <2,7,4,1>: Cost 4 vext1 <4,2,7,4>, <1,2,3,4>
+ 3649644330U, // <2,7,4,2>: Cost 4 vext1 <4,2,7,4>, <2,4,5,7>
+ 2257982640U, // <2,7,4,3>: Cost 3 vrev <7,2,3,4>
+ 3649645641U, // <2,7,4,4>: Cost 4 vext1 <4,2,7,4>, <4,2,7,4>
+ 2621435190U, // <2,7,4,5>: Cost 3 vext2 <0,6,2,7>, RHS
+ 2712835441U, // <2,7,4,6>: Cost 3 vext3 <4,6,u,2>, <7,4,6,u>
+ 3799995762U, // <2,7,4,7>: Cost 4 vext3 <7,0,1,2>, <7,4,7,0>
+ 2621435433U, // <2,7,4,u>: Cost 3 vext2 <0,6,2,7>, RHS
+ 2729497990U, // <2,7,5,0>: Cost 3 vext3 <7,5,0,2>, <7,5,0,2>
+ 3643679744U, // <2,7,5,1>: Cost 4 vext1 <3,2,7,5>, <1,3,5,7>
+ 3637708424U, // <2,7,5,2>: Cost 4 vext1 <2,2,7,5>, <2,2,5,7>
+ 3643681137U, // <2,7,5,3>: Cost 4 vext1 <3,2,7,5>, <3,2,7,5>
+ 2599800118U, // <2,7,5,4>: Cost 3 vext1 <u,2,7,5>, RHS
+ 3786577334U, // <2,7,5,5>: Cost 4 vext3 <4,6,u,2>, <7,5,5,5>
+ 3786577345U, // <2,7,5,6>: Cost 4 vext3 <4,6,u,2>, <7,5,6,7>
+ 2599802214U, // <2,7,5,7>: Cost 3 vext1 <u,2,7,5>, <7,4,5,6>
+ 2599802670U, // <2,7,5,u>: Cost 3 vext1 <u,2,7,5>, LHS
+ 2581889126U, // <2,7,6,0>: Cost 3 vext1 <5,2,7,6>, LHS
+ 3643687936U, // <2,7,6,1>: Cost 4 vext1 <3,2,7,6>, <1,3,5,7>
+ 2663240186U, // <2,7,6,2>: Cost 3 vext2 <7,6,2,7>, <6,2,7,3>
+ 3643689330U, // <2,7,6,3>: Cost 4 vext1 <3,2,7,6>, <3,2,7,6>
+ 2581892406U, // <2,7,6,4>: Cost 3 vext1 <5,2,7,6>, RHS
+ 2581892900U, // <2,7,6,5>: Cost 3 vext1 <5,2,7,6>, <5,2,7,6>
+ 2587865597U, // <2,7,6,6>: Cost 3 vext1 <6,2,7,6>, <6,2,7,6>
+ 3786577428U, // <2,7,6,7>: Cost 4 vext3 <4,6,u,2>, <7,6,7,0>
+ 2581894958U, // <2,7,6,u>: Cost 3 vext1 <5,2,7,6>, LHS
+ 2726254119U, // <2,7,7,0>: Cost 3 vext3 <7,0,1,2>, <7,7,0,1>
+ 3804640817U, // <2,7,7,1>: Cost 4 vext3 <7,7,1,2>, <7,7,1,2>
+ 3637724826U, // <2,7,7,2>: Cost 4 vext1 <2,2,7,7>, <2,2,7,7>
+ 3734992123U, // <2,7,7,3>: Cost 4 vext2 <7,3,2,7>, <7,3,2,7>
+ 2552040758U, // <2,7,7,4>: Cost 3 vext1 <0,2,7,7>, RHS
+ 3799995992U, // <2,7,7,5>: Cost 4 vext3 <7,0,1,2>, <7,7,5,5>
+ 2663241198U, // <2,7,7,6>: Cost 3 vext2 <7,6,2,7>, <7,6,2,7>
+ 2712835692U, // <2,7,7,7>: Cost 3 vext3 <4,6,u,2>, <7,7,7,7>
+ 2731562607U, // <2,7,7,u>: Cost 3 vext3 <7,u,1,2>, <7,7,u,1>
+ 1514135654U, // <2,7,u,0>: Cost 2 vext1 <6,2,7,u>, LHS
+ 1657820802U, // <2,7,u,1>: Cost 2 vext3 <7,u,1,2>, <7,u,1,2>
+ 2587879016U, // <2,7,u,2>: Cost 3 vext1 <6,2,7,u>, <2,2,2,2>
+ 2569963892U, // <2,7,u,3>: Cost 3 vext1 <3,2,7,u>, <3,2,7,u>
+ 1514138934U, // <2,7,u,4>: Cost 2 vext1 <6,2,7,u>, RHS
+ 2621438106U, // <2,7,u,5>: Cost 3 vext2 <0,6,2,7>, RHS
+ 1514140159U, // <2,7,u,6>: Cost 2 vext1 <6,2,7,u>, <6,2,7,u>
+ 2587882490U, // <2,7,u,7>: Cost 3 vext1 <6,2,7,u>, <7,0,1,2>
+ 1514141486U, // <2,7,u,u>: Cost 2 vext1 <6,2,7,u>, LHS
+ 1544380416U, // <2,u,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
+ 470638699U, // <2,u,0,1>: Cost 1 vext2 LHS, LHS
+ 1544380580U, // <2,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 1658631909U, // <2,u,0,3>: Cost 2 vext3 <u,0,3,2>, <u,0,3,2>
+ 1544380754U, // <2,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 2665898414U, // <2,u,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
+ 1658853120U, // <2,u,0,6>: Cost 2 vext3 <u,0,6,2>, <u,0,6,2>
+ 3094531625U, // <2,u,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
+ 470639261U, // <2,u,0,u>: Cost 1 vext2 LHS, LHS
+ 1544381174U, // <2,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 1544381236U, // <2,u,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
+ 1544381334U, // <2,u,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
+ 1544381400U, // <2,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 2618123325U, // <2,u,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
+ 1544381584U, // <2,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 2618123489U, // <2,u,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
+ 2726254427U, // <2,u,1,7>: Cost 3 vext3 <7,0,1,2>, <u,1,7,3>
+ 1544381823U, // <2,u,1,u>: Cost 2 vext2 LHS, <1,u,3,3>
+ 1478328422U, // <2,u,2,0>: Cost 2 vext1 <0,2,u,2>, LHS
+ 2618123807U, // <2,u,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
+ 269271142U, // <2,u,2,2>: Cost 1 vdup2 LHS
+ 1544382118U, // <2,u,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
+ 1478331702U, // <2,u,2,4>: Cost 2 vext1 <0,2,u,2>, RHS
+ 2618124136U, // <2,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
+ 1544382394U, // <2,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 3088354857U, // <2,u,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
+ 269271142U, // <2,u,2,u>: Cost 1 vdup2 LHS
+ 1544382614U, // <2,u,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
+ 2953627374U, // <2,u,3,1>: Cost 3 vzipr LHS, <2,3,u,1>
+ 1490282143U, // <2,u,3,2>: Cost 2 vext1 <2,2,u,3>, <2,2,u,3>
+ 1879883932U, // <2,u,3,3>: Cost 2 vzipr LHS, LHS
+ 1544382978U, // <2,u,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
+ 2953627378U, // <2,u,3,5>: Cost 3 vzipr LHS, <2,3,u,5>
+ 1514172931U, // <2,u,3,6>: Cost 2 vext1 <6,2,u,3>, <6,2,u,3>
+ 1879887176U, // <2,u,3,7>: Cost 2 vzipr LHS, RHS
+ 1879883937U, // <2,u,3,u>: Cost 2 vzipr LHS, LHS
+ 1484316774U, // <2,u,4,0>: Cost 2 vext1 <1,2,u,4>, LHS
+ 1484317639U, // <2,u,4,1>: Cost 2 vext1 <1,2,u,4>, <1,2,u,4>
+ 2552088270U, // <2,u,4,2>: Cost 3 vext1 <0,2,u,4>, <2,3,4,5>
+ 1190213513U, // <2,u,4,3>: Cost 2 vrev <u,2,3,4>
+ 1484320054U, // <2,u,4,4>: Cost 2 vext1 <1,2,u,4>, RHS
+ 470641974U, // <2,u,4,5>: Cost 1 vext2 LHS, RHS
+ 1592159604U, // <2,u,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
+ 3094564393U, // <2,u,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
+ 470642217U, // <2,u,4,u>: Cost 1 vext2 LHS, RHS
+ 2552094959U, // <2,u,5,0>: Cost 3 vext1 <0,2,u,5>, <0,2,u,5>
+ 1592159952U, // <2,u,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
+ 2564040353U, // <2,u,5,2>: Cost 3 vext1 <2,2,u,5>, <2,2,u,5>
+ 2690275455U, // <2,u,5,3>: Cost 3 vext3 <0,u,u,2>, <u,5,3,7>
+ 1592160198U, // <2,u,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
+ 1592160260U, // <2,u,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
+ 1611962522U, // <2,u,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1592160424U, // <2,u,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
+ 1611962540U, // <2,u,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1478361190U, // <2,u,6,0>: Cost 2 vext1 <0,2,u,6>, LHS
+ 2552103670U, // <2,u,6,1>: Cost 3 vext1 <0,2,u,6>, <1,0,3,2>
+ 1592160762U, // <2,u,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
+ 2685704400U, // <2,u,6,3>: Cost 3 vext3 <0,2,0,2>, <u,6,3,7>
+ 1478364470U, // <2,u,6,4>: Cost 2 vext1 <0,2,u,6>, RHS
+ 2901891226U, // <2,u,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
+ 1592161080U, // <2,u,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
+ 1592161102U, // <2,u,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
+ 1478367022U, // <2,u,6,u>: Cost 2 vext1 <0,2,u,6>, LHS
+ 1592161274U, // <2,u,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
+ 2659931226U, // <2,u,7,1>: Cost 3 vext2 <7,1,2,u>, <7,1,2,u>
+ 2564056739U, // <2,u,7,2>: Cost 3 vext1 <2,2,u,7>, <2,2,u,7>
+ 2665903331U, // <2,u,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
+ 1592161638U, // <2,u,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
+ 2665903494U, // <2,u,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
+ 2587947527U, // <2,u,7,6>: Cost 3 vext1 <6,2,u,7>, <6,2,u,7>
+ 1592161900U, // <2,u,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
+ 1592161922U, // <2,u,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
+ 1478377574U, // <2,u,u,0>: Cost 2 vext1 <0,2,u,u>, LHS
+ 470644526U, // <2,u,u,1>: Cost 1 vext2 LHS, LHS
+ 269271142U, // <2,u,u,2>: Cost 1 vdup2 LHS
+ 1879924892U, // <2,u,u,3>: Cost 2 vzipr LHS, LHS
+ 1478380854U, // <2,u,u,4>: Cost 2 vext1 <0,2,u,u>, RHS
+ 470644890U, // <2,u,u,5>: Cost 1 vext2 LHS, RHS
+ 1611962765U, // <2,u,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1879928136U, // <2,u,u,7>: Cost 2 vzipr LHS, RHS
+ 470645093U, // <2,u,u,u>: Cost 1 vext2 LHS, LHS
+ 1611448320U, // <3,0,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
+ 1611890698U, // <3,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
+ 1611890708U, // <3,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
+ 3763576860U, // <3,0,0,3>: Cost 4 vext3 LHS, <0,0,3,1>
+ 2689835045U, // <3,0,0,4>: Cost 3 vext3 LHS, <0,0,4,1>
+ 3698508206U, // <3,0,0,5>: Cost 4 vext2 <1,2,3,0>, <0,5,2,7>
+ 3763576887U, // <3,0,0,6>: Cost 4 vext3 LHS, <0,0,6,1>
+ 3667678434U, // <3,0,0,7>: Cost 4 vext1 <7,3,0,0>, <7,3,0,0>
+ 1616093258U, // <3,0,0,u>: Cost 2 vext3 LHS, <0,0,u,2>
+ 1490337894U, // <3,0,1,0>: Cost 2 vext1 <2,3,0,1>, LHS
+ 2685632602U, // <3,0,1,1>: Cost 3 vext3 LHS, <0,1,1,0>
+ 537706598U, // <3,0,1,2>: Cost 1 vext3 LHS, LHS
+ 2624766936U, // <3,0,1,3>: Cost 3 vext2 <1,2,3,0>, <1,3,1,3>
+ 1490341174U, // <3,0,1,4>: Cost 2 vext1 <2,3,0,1>, RHS
+ 2624767120U, // <3,0,1,5>: Cost 3 vext2 <1,2,3,0>, <1,5,3,7>
+ 2732966030U, // <3,0,1,6>: Cost 3 vext3 LHS, <0,1,6,7>
+ 2593944803U, // <3,0,1,7>: Cost 3 vext1 <7,3,0,1>, <7,3,0,1>
+ 537706652U, // <3,0,1,u>: Cost 1 vext3 LHS, LHS
+ 1611890852U, // <3,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
+ 2685632684U, // <3,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
+ 2685632692U, // <3,0,2,2>: Cost 3 vext3 LHS, <0,2,2,0>
+ 2685632702U, // <3,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
+ 1611890892U, // <3,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
+ 2732966102U, // <3,0,2,5>: Cost 3 vext3 LHS, <0,2,5,7>
+ 2624767930U, // <3,0,2,6>: Cost 3 vext2 <1,2,3,0>, <2,6,3,7>
+ 2685632744U, // <3,0,2,7>: Cost 3 vext3 LHS, <0,2,7,7>
+ 1611890924U, // <3,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
+ 2624768150U, // <3,0,3,0>: Cost 3 vext2 <1,2,3,0>, <3,0,1,2>
+ 2685632764U, // <3,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
+ 2685632774U, // <3,0,3,2>: Cost 3 vext3 LHS, <0,3,2,1>
+ 2624768412U, // <3,0,3,3>: Cost 3 vext2 <1,2,3,0>, <3,3,3,3>
+ 2624768514U, // <3,0,3,4>: Cost 3 vext2 <1,2,3,0>, <3,4,5,6>
+ 3702491714U, // <3,0,3,5>: Cost 4 vext2 <1,u,3,0>, <3,5,3,7>
+ 2624768632U, // <3,0,3,6>: Cost 3 vext2 <1,2,3,0>, <3,6,0,7>
+ 3702491843U, // <3,0,3,7>: Cost 4 vext2 <1,u,3,0>, <3,7,0,1>
+ 2686959934U, // <3,0,3,u>: Cost 3 vext3 <0,3,u,3>, <0,3,u,3>
+ 2689835336U, // <3,0,4,0>: Cost 3 vext3 LHS, <0,4,0,4>
+ 1611891026U, // <3,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
+ 1611891036U, // <3,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
+ 3763577184U, // <3,0,4,3>: Cost 4 vext3 LHS, <0,4,3,1>
+ 2689835374U, // <3,0,4,4>: Cost 3 vext3 LHS, <0,4,4,6>
+ 1551027510U, // <3,0,4,5>: Cost 2 vext2 <1,2,3,0>, RHS
+ 2666573172U, // <3,0,4,6>: Cost 3 vext2 <u,2,3,0>, <4,6,4,6>
+ 3667711206U, // <3,0,4,7>: Cost 4 vext1 <7,3,0,4>, <7,3,0,4>
+ 1616093586U, // <3,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
+ 2685190556U, // <3,0,5,0>: Cost 3 vext3 LHS, <0,5,0,7>
+ 2666573520U, // <3,0,5,1>: Cost 3 vext2 <u,2,3,0>, <5,1,7,3>
+ 3040886886U, // <3,0,5,2>: Cost 3 vtrnl <3,4,5,6>, LHS
+ 3625912834U, // <3,0,5,3>: Cost 4 vext1 <0,3,0,5>, <3,4,5,6>
+ 2666573766U, // <3,0,5,4>: Cost 3 vext2 <u,2,3,0>, <5,4,7,6>
+ 2666573828U, // <3,0,5,5>: Cost 3 vext2 <u,2,3,0>, <5,5,5,5>
+ 2732966354U, // <3,0,5,6>: Cost 3 vext3 LHS, <0,5,6,7>
+ 2666573992U, // <3,0,5,7>: Cost 3 vext2 <u,2,3,0>, <5,7,5,7>
+ 3040886940U, // <3,0,5,u>: Cost 3 vtrnl <3,4,5,6>, LHS
+ 2685190637U, // <3,0,6,0>: Cost 3 vext3 LHS, <0,6,0,7>
+ 2732966390U, // <3,0,6,1>: Cost 3 vext3 LHS, <0,6,1,7>
+ 2689835519U, // <3,0,6,2>: Cost 3 vext3 LHS, <0,6,2,7>
+ 3667724438U, // <3,0,6,3>: Cost 4 vext1 <7,3,0,6>, <3,0,1,2>
+ 3763577355U, // <3,0,6,4>: Cost 4 vext3 LHS, <0,6,4,1>
+ 3806708243U, // <3,0,6,5>: Cost 4 vext3 LHS, <0,6,5,0>
+ 2666574648U, // <3,0,6,6>: Cost 3 vext2 <u,2,3,0>, <6,6,6,6>
+ 2657948520U, // <3,0,6,7>: Cost 3 vext2 <6,7,3,0>, <6,7,3,0>
+ 2689835573U, // <3,0,6,u>: Cost 3 vext3 LHS, <0,6,u,7>
+ 2666574842U, // <3,0,7,0>: Cost 3 vext2 <u,2,3,0>, <7,0,1,2>
+ 2685633095U, // <3,0,7,1>: Cost 3 vext3 LHS, <0,7,1,7>
+ 2660603052U, // <3,0,7,2>: Cost 3 vext2 <7,2,3,0>, <7,2,3,0>
+ 3643844997U, // <3,0,7,3>: Cost 4 vext1 <3,3,0,7>, <3,3,0,7>
+ 2666575206U, // <3,0,7,4>: Cost 3 vext2 <u,2,3,0>, <7,4,5,6>
+ 3655790391U, // <3,0,7,5>: Cost 4 vext1 <5,3,0,7>, <5,3,0,7>
+ 3731690968U, // <3,0,7,6>: Cost 4 vext2 <6,7,3,0>, <7,6,0,3>
+ 2666575468U, // <3,0,7,7>: Cost 3 vext2 <u,2,3,0>, <7,7,7,7>
+ 2664584850U, // <3,0,7,u>: Cost 3 vext2 <7,u,3,0>, <7,u,3,0>
+ 1616093834U, // <3,0,u,0>: Cost 2 vext3 LHS, <0,u,0,2>
+ 1611891346U, // <3,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
+ 537707165U, // <3,0,u,2>: Cost 1 vext3 LHS, LHS
+ 2689835684U, // <3,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
+ 1616093874U, // <3,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
+ 1551030426U, // <3,0,u,5>: Cost 2 vext2 <1,2,3,0>, RHS
+ 2624772304U, // <3,0,u,6>: Cost 3 vext2 <1,2,3,0>, <u,6,3,7>
+ 2594002154U, // <3,0,u,7>: Cost 3 vext1 <7,3,0,u>, <7,3,0,u>
+ 537707219U, // <3,0,u,u>: Cost 1 vext3 LHS, LHS
+ 2552201318U, // <3,1,0,0>: Cost 3 vext1 <0,3,1,0>, LHS
+ 2618802278U, // <3,1,0,1>: Cost 3 vext2 <0,2,3,1>, LHS
+ 2618802366U, // <3,1,0,2>: Cost 3 vext2 <0,2,3,1>, <0,2,3,1>
+ 1611449078U, // <3,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
+ 2552204598U, // <3,1,0,4>: Cost 3 vext1 <0,3,1,0>, RHS
+ 2732966663U, // <3,1,0,5>: Cost 3 vext3 LHS, <1,0,5,1>
+ 3906258396U, // <3,1,0,6>: Cost 4 vuzpr <2,3,0,1>, <2,0,4,6>
+ 3667752171U, // <3,1,0,7>: Cost 4 vext1 <7,3,1,0>, <7,3,1,0>
+ 1611891491U, // <3,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
+ 2689835819U, // <3,1,1,0>: Cost 3 vext3 LHS, <1,1,0,1>
+ 1611449140U, // <3,1,1,1>: Cost 2 vext3 LHS, <1,1,1,1>
+ 2624775063U, // <3,1,1,2>: Cost 3 vext2 <1,2,3,1>, <1,2,3,1>
+ 1611891528U, // <3,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
+ 2689835859U, // <3,1,1,4>: Cost 3 vext3 LHS, <1,1,4,5>
+ 2689835868U, // <3,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
+ 3763577701U, // <3,1,1,6>: Cost 4 vext3 LHS, <1,1,6,5>
+ 3765273452U, // <3,1,1,7>: Cost 4 vext3 <1,1,7,3>, <1,1,7,3>
+ 1611891573U, // <3,1,1,u>: Cost 2 vext3 LHS, <1,1,u,3>
+ 2629420494U, // <3,1,2,0>: Cost 3 vext2 <2,0,3,1>, <2,0,3,1>
+ 2689835911U, // <3,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
+ 2564163248U, // <3,1,2,2>: Cost 3 vext1 <2,3,1,2>, <2,3,1,2>
+ 1611449238U, // <3,1,2,3>: Cost 2 vext3 LHS, <1,2,3,0>
+ 2564164918U, // <3,1,2,4>: Cost 3 vext1 <2,3,1,2>, RHS
+ 2689835947U, // <3,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
+ 3692545978U, // <3,1,2,6>: Cost 4 vext2 <0,2,3,1>, <2,6,3,7>
+ 2732966842U, // <3,1,2,7>: Cost 3 vext3 LHS, <1,2,7,0>
+ 1611891651U, // <3,1,2,u>: Cost 2 vext3 LHS, <1,2,u,0>
+ 1484456038U, // <3,1,3,0>: Cost 2 vext1 <1,3,1,3>, LHS
+ 1611891672U, // <3,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
+ 2685633502U, // <3,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
+ 2685633512U, // <3,1,3,3>: Cost 3 vext3 LHS, <1,3,3,1>
+ 1484459318U, // <3,1,3,4>: Cost 2 vext1 <1,3,1,3>, RHS
+ 1611891712U, // <3,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
+ 2689836041U, // <3,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
+ 2733409294U, // <3,1,3,7>: Cost 3 vext3 LHS, <1,3,7,3>
+ 1611891735U, // <3,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
+ 2552234086U, // <3,1,4,0>: Cost 3 vext1 <0,3,1,4>, LHS
+ 2732966955U, // <3,1,4,1>: Cost 3 vext3 LHS, <1,4,1,5>
+ 2732966964U, // <3,1,4,2>: Cost 3 vext3 LHS, <1,4,2,5>
+ 2685633597U, // <3,1,4,3>: Cost 3 vext3 LHS, <1,4,3,5>
+ 2552237366U, // <3,1,4,4>: Cost 3 vext1 <0,3,1,4>, RHS
+ 2618805558U, // <3,1,4,5>: Cost 3 vext2 <0,2,3,1>, RHS
+ 2769472822U, // <3,1,4,6>: Cost 3 vuzpl <3,0,1,2>, RHS
+ 3667784943U, // <3,1,4,7>: Cost 4 vext1 <7,3,1,4>, <7,3,1,4>
+ 2685633642U, // <3,1,4,u>: Cost 3 vext3 LHS, <1,4,u,5>
+ 2689836143U, // <3,1,5,0>: Cost 3 vext3 LHS, <1,5,0,1>
+ 2564187280U, // <3,1,5,1>: Cost 3 vext1 <2,3,1,5>, <1,5,3,7>
+ 2564187827U, // <3,1,5,2>: Cost 3 vext1 <2,3,1,5>, <2,3,1,5>
+ 1611891856U, // <3,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
+ 2689836183U, // <3,1,5,4>: Cost 3 vext3 LHS, <1,5,4,5>
+ 3759375522U, // <3,1,5,5>: Cost 4 vext3 LHS, <1,5,5,7>
+ 3720417378U, // <3,1,5,6>: Cost 4 vext2 <4,u,3,1>, <5,6,7,0>
+ 2832518454U, // <3,1,5,7>: Cost 3 vuzpr <2,3,0,1>, RHS
+ 1611891901U, // <3,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
+ 3763578048U, // <3,1,6,0>: Cost 4 vext3 LHS, <1,6,0,1>
+ 2689836239U, // <3,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
+ 2732967128U, // <3,1,6,2>: Cost 3 vext3 LHS, <1,6,2,7>
+ 2685633761U, // <3,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
+ 3763578088U, // <3,1,6,4>: Cost 4 vext3 LHS, <1,6,4,5>
+ 2689836275U, // <3,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
+ 3763578108U, // <3,1,6,6>: Cost 4 vext3 LHS, <1,6,6,7>
+ 2732967166U, // <3,1,6,7>: Cost 3 vext3 LHS, <1,6,7,0>
+ 2685633806U, // <3,1,6,u>: Cost 3 vext3 LHS, <1,6,u,7>
+ 3631972454U, // <3,1,7,0>: Cost 4 vext1 <1,3,1,7>, LHS
+ 2659947612U, // <3,1,7,1>: Cost 3 vext2 <7,1,3,1>, <7,1,3,1>
+ 4036102294U, // <3,1,7,2>: Cost 4 vzipr <1,5,3,7>, <3,0,1,2>
+ 3095396454U, // <3,1,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
+ 3631975734U, // <3,1,7,4>: Cost 4 vext1 <1,3,1,7>, RHS
+ 2222982144U, // <3,1,7,5>: Cost 3 vrev <1,3,5,7>
+ 3296797705U, // <3,1,7,6>: Cost 4 vrev <1,3,6,7>
+ 3720418924U, // <3,1,7,7>: Cost 4 vext2 <4,u,3,1>, <7,7,7,7>
+ 3095396459U, // <3,1,7,u>: Cost 3 vtrnr <1,3,5,7>, LHS
+ 1484496998U, // <3,1,u,0>: Cost 2 vext1 <1,3,1,u>, LHS
+ 1611892077U, // <3,1,u,1>: Cost 2 vext3 LHS, <1,u,1,3>
+ 2685633907U, // <3,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
+ 1611892092U, // <3,1,u,3>: Cost 2 vext3 LHS, <1,u,3,0>
+ 1484500278U, // <3,1,u,4>: Cost 2 vext1 <1,3,1,u>, RHS
+ 1611892117U, // <3,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
+ 2685633950U, // <3,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
+ 2832518697U, // <3,1,u,7>: Cost 3 vuzpr <2,3,0,1>, RHS
+ 1611892140U, // <3,1,u,u>: Cost 2 vext3 LHS, <1,u,u,3>
+ 2623455232U, // <3,2,0,0>: Cost 3 vext2 <1,0,3,2>, <0,0,0,0>
+ 1549713510U, // <3,2,0,1>: Cost 2 vext2 <1,0,3,2>, LHS
+ 2689836484U, // <3,2,0,2>: Cost 3 vext3 LHS, <2,0,2,0>
+ 2685633997U, // <3,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
+ 2623455570U, // <3,2,0,4>: Cost 3 vext2 <1,0,3,2>, <0,4,1,5>
+ 2732967398U, // <3,2,0,5>: Cost 3 vext3 LHS, <2,0,5,7>
+ 2689836524U, // <3,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
+ 2229044964U, // <3,2,0,7>: Cost 3 vrev <2,3,7,0>
+ 1549714077U, // <3,2,0,u>: Cost 2 vext2 <1,0,3,2>, LHS
+ 1549714166U, // <3,2,1,0>: Cost 2 vext2 <1,0,3,2>, <1,0,3,2>
+ 2623456052U, // <3,2,1,1>: Cost 3 vext2 <1,0,3,2>, <1,1,1,1>
+ 2623456150U, // <3,2,1,2>: Cost 3 vext2 <1,0,3,2>, <1,2,3,0>
+ 2685634079U, // <3,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
+ 2552286518U, // <3,2,1,4>: Cost 3 vext1 <0,3,2,1>, RHS
+ 2623456400U, // <3,2,1,5>: Cost 3 vext2 <1,0,3,2>, <1,5,3,7>
+ 2689836604U, // <3,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
+ 3667834101U, // <3,2,1,7>: Cost 4 vext1 <7,3,2,1>, <7,3,2,1>
+ 1155385070U, // <3,2,1,u>: Cost 2 vrev <2,3,u,1>
+ 2689836629U, // <3,2,2,0>: Cost 3 vext3 LHS, <2,2,0,1>
+ 2689836640U, // <3,2,2,1>: Cost 3 vext3 LHS, <2,2,1,3>
+ 1611449960U, // <3,2,2,2>: Cost 2 vext3 LHS, <2,2,2,2>
+ 1611892338U, // <3,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
+ 2689836669U, // <3,2,2,4>: Cost 3 vext3 LHS, <2,2,4,5>
+ 2689836680U, // <3,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
+ 2689836688U, // <3,2,2,6>: Cost 3 vext3 LHS, <2,2,6,6>
+ 3763578518U, // <3,2,2,7>: Cost 4 vext3 LHS, <2,2,7,3>
+ 1611892383U, // <3,2,2,u>: Cost 2 vext3 LHS, <2,2,u,3>
+ 1611450022U, // <3,2,3,0>: Cost 2 vext3 LHS, <2,3,0,1>
+ 2685191854U, // <3,2,3,1>: Cost 3 vext3 LHS, <2,3,1,0>
+ 2685191865U, // <3,2,3,2>: Cost 3 vext3 LHS, <2,3,2,2>
+ 2685191875U, // <3,2,3,3>: Cost 3 vext3 LHS, <2,3,3,3>
+ 1611450062U, // <3,2,3,4>: Cost 2 vext3 LHS, <2,3,4,5>
+ 2732967635U, // <3,2,3,5>: Cost 3 vext3 LHS, <2,3,5,1>
+ 2732967645U, // <3,2,3,6>: Cost 3 vext3 LHS, <2,3,6,2>
+ 2732967652U, // <3,2,3,7>: Cost 3 vext3 LHS, <2,3,7,0>
+ 1611450094U, // <3,2,3,u>: Cost 2 vext3 LHS, <2,3,u,1>
+ 2558279782U, // <3,2,4,0>: Cost 3 vext1 <1,3,2,4>, LHS
+ 2558280602U, // <3,2,4,1>: Cost 3 vext1 <1,3,2,4>, <1,2,3,4>
+ 2732967692U, // <3,2,4,2>: Cost 3 vext3 LHS, <2,4,2,4>
+ 2685634326U, // <3,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
+ 2558283062U, // <3,2,4,4>: Cost 3 vext1 <1,3,2,4>, RHS
+ 1549716790U, // <3,2,4,5>: Cost 2 vext2 <1,0,3,2>, RHS
+ 2689836844U, // <3,2,4,6>: Cost 3 vext3 LHS, <2,4,6,0>
+ 2229077736U, // <3,2,4,7>: Cost 3 vrev <2,3,7,4>
+ 1549717033U, // <3,2,4,u>: Cost 2 vext2 <1,0,3,2>, RHS
+ 2552316006U, // <3,2,5,0>: Cost 3 vext1 <0,3,2,5>, LHS
+ 2228643507U, // <3,2,5,1>: Cost 3 vrev <2,3,1,5>
+ 2689836896U, // <3,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
+ 2685634408U, // <3,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
+ 1155122894U, // <3,2,5,4>: Cost 2 vrev <2,3,4,5>
+ 2665263108U, // <3,2,5,5>: Cost 3 vext2 <u,0,3,2>, <5,5,5,5>
+ 2689836932U, // <3,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
+ 2665263272U, // <3,2,5,7>: Cost 3 vext2 <u,0,3,2>, <5,7,5,7>
+ 1155417842U, // <3,2,5,u>: Cost 2 vrev <2,3,u,5>
+ 2689836953U, // <3,2,6,0>: Cost 3 vext3 LHS, <2,6,0,1>
+ 2689836964U, // <3,2,6,1>: Cost 3 vext3 LHS, <2,6,1,3>
+ 2689836976U, // <3,2,6,2>: Cost 3 vext3 LHS, <2,6,2,6>
+ 1611892666U, // <3,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
+ 2689836993U, // <3,2,6,4>: Cost 3 vext3 LHS, <2,6,4,5>
+ 2689837004U, // <3,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
+ 2689837013U, // <3,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
+ 2665263950U, // <3,2,6,7>: Cost 3 vext2 <u,0,3,2>, <6,7,0,1>
+ 1611892711U, // <3,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
+ 2665264122U, // <3,2,7,0>: Cost 3 vext2 <u,0,3,2>, <7,0,1,2>
+ 2623460419U, // <3,2,7,1>: Cost 3 vext2 <1,0,3,2>, <7,1,0,3>
+ 4169138340U, // <3,2,7,2>: Cost 4 vtrnr <1,3,5,7>, <0,2,0,2>
+ 2962358374U, // <3,2,7,3>: Cost 3 vzipr <1,5,3,7>, LHS
+ 2665264486U, // <3,2,7,4>: Cost 3 vext2 <u,0,3,2>, <7,4,5,6>
+ 2228954841U, // <3,2,7,5>: Cost 3 vrev <2,3,5,7>
+ 2229028578U, // <3,2,7,6>: Cost 3 vrev <2,3,6,7>
+ 2665264748U, // <3,2,7,7>: Cost 3 vext2 <u,0,3,2>, <7,7,7,7>
+ 2962358379U, // <3,2,7,u>: Cost 3 vzipr <1,5,3,7>, LHS
+ 1611892795U, // <3,2,u,0>: Cost 2 vext3 LHS, <2,u,0,1>
+ 1549719342U, // <3,2,u,1>: Cost 2 vext2 <1,0,3,2>, LHS
+ 1611449960U, // <3,2,u,2>: Cost 2 vext3 LHS, <2,2,2,2>
+ 1611892824U, // <3,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
+ 1611892835U, // <3,2,u,4>: Cost 2 vext3 LHS, <2,u,4,5>
+ 1549719706U, // <3,2,u,5>: Cost 2 vext2 <1,0,3,2>, RHS
+ 2689837168U, // <3,2,u,6>: Cost 3 vext3 LHS, <2,u,6,0>
+ 2665265408U, // <3,2,u,7>: Cost 3 vext2 <u,0,3,2>, <u,7,0,1>
+ 1611892867U, // <3,2,u,u>: Cost 2 vext3 LHS, <2,u,u,1>
+ 2685192331U, // <3,3,0,0>: Cost 3 vext3 LHS, <3,0,0,0>
+ 1611450518U, // <3,3,0,1>: Cost 2 vext3 LHS, <3,0,1,2>
+ 2685634717U, // <3,3,0,2>: Cost 3 vext3 LHS, <3,0,2,0>
+ 2564294806U, // <3,3,0,3>: Cost 3 vext1 <2,3,3,0>, <3,0,1,2>
+ 2685634736U, // <3,3,0,4>: Cost 3 vext3 LHS, <3,0,4,1>
+ 2732968122U, // <3,3,0,5>: Cost 3 vext3 LHS, <3,0,5,2>
+ 3763579075U, // <3,3,0,6>: Cost 4 vext3 LHS, <3,0,6,2>
+ 4034053264U, // <3,3,0,7>: Cost 4 vzipr <1,2,3,0>, <1,5,3,7>
+ 1611450581U, // <3,3,0,u>: Cost 2 vext3 LHS, <3,0,u,2>
+ 2685192415U, // <3,3,1,0>: Cost 3 vext3 LHS, <3,1,0,3>
+ 1550385992U, // <3,3,1,1>: Cost 2 vext2 <1,1,3,3>, <1,1,3,3>
+ 2685192433U, // <3,3,1,2>: Cost 3 vext3 LHS, <3,1,2,3>
+ 2685634808U, // <3,3,1,3>: Cost 3 vext3 LHS, <3,1,3,1>
+ 2558332214U, // <3,3,1,4>: Cost 3 vext1 <1,3,3,1>, RHS
+ 2685634828U, // <3,3,1,5>: Cost 3 vext3 LHS, <3,1,5,3>
+ 3759376661U, // <3,3,1,6>: Cost 4 vext3 LHS, <3,1,6,3>
+ 2703477022U, // <3,3,1,7>: Cost 3 vext3 <3,1,7,3>, <3,1,7,3>
+ 1555031423U, // <3,3,1,u>: Cost 2 vext2 <1,u,3,3>, <1,u,3,3>
+ 2564309094U, // <3,3,2,0>: Cost 3 vext1 <2,3,3,2>, LHS
+ 2630100513U, // <3,3,2,1>: Cost 3 vext2 <2,1,3,3>, <2,1,3,3>
+ 1557022322U, // <3,3,2,2>: Cost 2 vext2 <2,2,3,3>, <2,2,3,3>
+ 2685192520U, // <3,3,2,3>: Cost 3 vext3 LHS, <3,2,3,0>
+ 2564312374U, // <3,3,2,4>: Cost 3 vext1 <2,3,3,2>, RHS
+ 2732968286U, // <3,3,2,5>: Cost 3 vext3 LHS, <3,2,5,4>
+ 2685634918U, // <3,3,2,6>: Cost 3 vext3 LHS, <3,2,6,3>
+ 2704140655U, // <3,3,2,7>: Cost 3 vext3 <3,2,7,3>, <3,2,7,3>
+ 1561004120U, // <3,3,2,u>: Cost 2 vext2 <2,u,3,3>, <2,u,3,3>
+ 1496547430U, // <3,3,3,0>: Cost 2 vext1 <3,3,3,3>, LHS
+ 2624129256U, // <3,3,3,1>: Cost 3 vext2 <1,1,3,3>, <3,1,1,3>
+ 2630764866U, // <3,3,3,2>: Cost 3 vext2 <2,2,3,3>, <3,2,2,3>
+ 336380006U, // <3,3,3,3>: Cost 1 vdup3 LHS
+ 1496550710U, // <3,3,3,4>: Cost 2 vext1 <3,3,3,3>, RHS
+ 2732968368U, // <3,3,3,5>: Cost 3 vext3 LHS, <3,3,5,5>
+ 2624129683U, // <3,3,3,6>: Cost 3 vext2 <1,1,3,3>, <3,6,3,7>
+ 2594182400U, // <3,3,3,7>: Cost 3 vext1 <7,3,3,3>, <7,3,3,3>
+ 336380006U, // <3,3,3,u>: Cost 1 vdup3 LHS
+ 2558353510U, // <3,3,4,0>: Cost 3 vext1 <1,3,3,4>, LHS
+ 2558354411U, // <3,3,4,1>: Cost 3 vext1 <1,3,3,4>, <1,3,3,4>
+ 2564327108U, // <3,3,4,2>: Cost 3 vext1 <2,3,3,4>, <2,3,3,4>
+ 2564327938U, // <3,3,4,3>: Cost 3 vext1 <2,3,3,4>, <3,4,5,6>
+ 2960343962U, // <3,3,4,4>: Cost 3 vzipr <1,2,3,4>, <1,2,3,4>
+ 1611893250U, // <3,3,4,5>: Cost 2 vext3 LHS, <3,4,5,6>
+ 2771619126U, // <3,3,4,6>: Cost 3 vuzpl <3,3,3,3>, RHS
+ 4034086032U, // <3,3,4,7>: Cost 4 vzipr <1,2,3,4>, <1,5,3,7>
+ 1611893277U, // <3,3,4,u>: Cost 2 vext3 LHS, <3,4,u,6>
+ 2558361702U, // <3,3,5,0>: Cost 3 vext1 <1,3,3,5>, LHS
+ 2558362604U, // <3,3,5,1>: Cost 3 vext1 <1,3,3,5>, <1,3,3,5>
+ 2558363342U, // <3,3,5,2>: Cost 3 vext1 <1,3,3,5>, <2,3,4,5>
+ 2732968512U, // <3,3,5,3>: Cost 3 vext3 LHS, <3,5,3,5>
+ 2558364982U, // <3,3,5,4>: Cost 3 vext1 <1,3,3,5>, RHS
+ 3101279950U, // <3,3,5,5>: Cost 3 vtrnr <2,3,4,5>, <2,3,4,5>
+ 2665934946U, // <3,3,5,6>: Cost 3 vext2 <u,1,3,3>, <5,6,7,0>
+ 2826636598U, // <3,3,5,7>: Cost 3 vuzpr <1,3,1,3>, RHS
+ 2826636599U, // <3,3,5,u>: Cost 3 vuzpr <1,3,1,3>, RHS
+ 2732968568U, // <3,3,6,0>: Cost 3 vext3 LHS, <3,6,0,7>
+ 3763579521U, // <3,3,6,1>: Cost 4 vext3 LHS, <3,6,1,7>
+ 2732968586U, // <3,3,6,2>: Cost 3 vext3 LHS, <3,6,2,7>
+ 2732968595U, // <3,3,6,3>: Cost 3 vext3 LHS, <3,6,3,7>
+ 2732968604U, // <3,3,6,4>: Cost 3 vext3 LHS, <3,6,4,7>
+ 3763579557U, // <3,3,6,5>: Cost 4 vext3 LHS, <3,6,5,7>
+ 2732968621U, // <3,3,6,6>: Cost 3 vext3 LHS, <3,6,6,6>
+ 2657973099U, // <3,3,6,7>: Cost 3 vext2 <6,7,3,3>, <6,7,3,3>
+ 2658636732U, // <3,3,6,u>: Cost 3 vext2 <6,u,3,3>, <6,u,3,3>
+ 2558378086U, // <3,3,7,0>: Cost 3 vext1 <1,3,3,7>, LHS
+ 2558378990U, // <3,3,7,1>: Cost 3 vext1 <1,3,3,7>, <1,3,3,7>
+ 2564351687U, // <3,3,7,2>: Cost 3 vext1 <2,3,3,7>, <2,3,3,7>
+ 2661291264U, // <3,3,7,3>: Cost 3 vext2 <7,3,3,3>, <7,3,3,3>
+ 2558381366U, // <3,3,7,4>: Cost 3 vext1 <1,3,3,7>, RHS
+ 2732968694U, // <3,3,7,5>: Cost 3 vext3 LHS, <3,7,5,7>
+ 3781126907U, // <3,3,7,6>: Cost 4 vext3 <3,7,6,3>, <3,7,6,3>
+ 3095397376U, // <3,3,7,7>: Cost 3 vtrnr <1,3,5,7>, <1,3,5,7>
+ 2558383918U, // <3,3,7,u>: Cost 3 vext1 <1,3,3,7>, LHS
+ 1496547430U, // <3,3,u,0>: Cost 2 vext1 <3,3,3,3>, LHS
+ 1611893534U, // <3,3,u,1>: Cost 2 vext3 LHS, <3,u,1,2>
+ 1592858504U, // <3,3,u,2>: Cost 2 vext2 <u,2,3,3>, <u,2,3,3>
+ 336380006U, // <3,3,u,3>: Cost 1 vdup3 LHS
+ 1496550710U, // <3,3,u,4>: Cost 2 vext1 <3,3,3,3>, RHS
+ 1611893574U, // <3,3,u,5>: Cost 2 vext3 LHS, <3,u,5,6>
+ 2690280268U, // <3,3,u,6>: Cost 3 vext3 LHS, <3,u,6,3>
+ 2826636841U, // <3,3,u,7>: Cost 3 vuzpr <1,3,1,3>, RHS
+ 336380006U, // <3,3,u,u>: Cost 1 vdup3 LHS
+ 2624798720U, // <3,4,0,0>: Cost 3 vext2 <1,2,3,4>, <0,0,0,0>
+ 1551056998U, // <3,4,0,1>: Cost 2 vext2 <1,2,3,4>, LHS
+ 2624798884U, // <3,4,0,2>: Cost 3 vext2 <1,2,3,4>, <0,2,0,2>
+ 3693232384U, // <3,4,0,3>: Cost 4 vext2 <0,3,3,4>, <0,3,1,4>
+ 2624799058U, // <3,4,0,4>: Cost 3 vext2 <1,2,3,4>, <0,4,1,5>
+ 1659227026U, // <3,4,0,5>: Cost 2 vext3 LHS, <4,0,5,1>
+ 1659227036U, // <3,4,0,6>: Cost 2 vext3 LHS, <4,0,6,2>
+ 3667973382U, // <3,4,0,7>: Cost 4 vext1 <7,3,4,0>, <7,3,4,0>
+ 1551057565U, // <3,4,0,u>: Cost 2 vext2 <1,2,3,4>, LHS
+ 2624799478U, // <3,4,1,0>: Cost 3 vext2 <1,2,3,4>, <1,0,3,2>
+ 2624799540U, // <3,4,1,1>: Cost 3 vext2 <1,2,3,4>, <1,1,1,1>
+ 1551057818U, // <3,4,1,2>: Cost 2 vext2 <1,2,3,4>, <1,2,3,4>
+ 2624799704U, // <3,4,1,3>: Cost 3 vext2 <1,2,3,4>, <1,3,1,3>
+ 2564377910U, // <3,4,1,4>: Cost 3 vext1 <2,3,4,1>, RHS
+ 2689838050U, // <3,4,1,5>: Cost 3 vext3 LHS, <4,1,5,0>
+ 2689838062U, // <3,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
+ 2628117807U, // <3,4,1,7>: Cost 3 vext2 <1,7,3,4>, <1,7,3,4>
+ 1555039616U, // <3,4,1,u>: Cost 2 vext2 <1,u,3,4>, <1,u,3,4>
+ 3626180710U, // <3,4,2,0>: Cost 4 vext1 <0,3,4,2>, LHS
+ 2624800298U, // <3,4,2,1>: Cost 3 vext2 <1,2,3,4>, <2,1,4,3>
+ 2624800360U, // <3,4,2,2>: Cost 3 vext2 <1,2,3,4>, <2,2,2,2>
+ 2624800422U, // <3,4,2,3>: Cost 3 vext2 <1,2,3,4>, <2,3,0,1>
+ 2624800514U, // <3,4,2,4>: Cost 3 vext2 <1,2,3,4>, <2,4,1,3>
+ 2709965878U, // <3,4,2,5>: Cost 3 vext3 <4,2,5,3>, <4,2,5,3>
+ 2689838140U, // <3,4,2,6>: Cost 3 vext3 LHS, <4,2,6,0>
+ 2634090504U, // <3,4,2,7>: Cost 3 vext2 <2,7,3,4>, <2,7,3,4>
+ 2689838158U, // <3,4,2,u>: Cost 3 vext3 LHS, <4,2,u,0>
+ 2624800918U, // <3,4,3,0>: Cost 3 vext2 <1,2,3,4>, <3,0,1,2>
+ 2636081403U, // <3,4,3,1>: Cost 3 vext2 <3,1,3,4>, <3,1,3,4>
+ 2636745036U, // <3,4,3,2>: Cost 3 vext2 <3,2,3,4>, <3,2,3,4>
+ 2624801180U, // <3,4,3,3>: Cost 3 vext2 <1,2,3,4>, <3,3,3,3>
+ 2624801232U, // <3,4,3,4>: Cost 3 vext2 <1,2,3,4>, <3,4,0,1>
+ 2905836854U, // <3,4,3,5>: Cost 3 vzipl <3,3,3,3>, RHS
+ 3040054582U, // <3,4,3,6>: Cost 3 vtrnl <3,3,3,3>, RHS
+ 3702524611U, // <3,4,3,7>: Cost 4 vext2 <1,u,3,4>, <3,7,0,1>
+ 2624801566U, // <3,4,3,u>: Cost 3 vext2 <1,2,3,4>, <3,u,1,2>
+ 2564399206U, // <3,4,4,0>: Cost 3 vext1 <2,3,4,4>, LHS
+ 2564400026U, // <3,4,4,1>: Cost 3 vext1 <2,3,4,4>, <1,2,3,4>
+ 2564400845U, // <3,4,4,2>: Cost 3 vext1 <2,3,4,4>, <2,3,4,4>
+ 2570373542U, // <3,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
+ 1659227344U, // <3,4,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
+ 1551060278U, // <3,4,4,5>: Cost 2 vext2 <1,2,3,4>, RHS
+ 1659227364U, // <3,4,4,6>: Cost 2 vext3 LHS, <4,4,6,6>
+ 3668006154U, // <3,4,4,7>: Cost 4 vext1 <7,3,4,4>, <7,3,4,4>
+ 1551060521U, // <3,4,4,u>: Cost 2 vext2 <1,2,3,4>, RHS
+ 1490665574U, // <3,4,5,0>: Cost 2 vext1 <2,3,4,5>, LHS
+ 2689838341U, // <3,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
+ 1490667214U, // <3,4,5,2>: Cost 2 vext1 <2,3,4,5>, <2,3,4,5>
+ 2564409494U, // <3,4,5,3>: Cost 3 vext1 <2,3,4,5>, <3,0,1,2>
+ 1490668854U, // <3,4,5,4>: Cost 2 vext1 <2,3,4,5>, RHS
+ 2689838381U, // <3,4,5,5>: Cost 3 vext3 LHS, <4,5,5,7>
+ 537709878U, // <3,4,5,6>: Cost 1 vext3 LHS, RHS
+ 2594272523U, // <3,4,5,7>: Cost 3 vext1 <7,3,4,5>, <7,3,4,5>
+ 537709896U, // <3,4,5,u>: Cost 1 vext3 LHS, RHS
+ 2689838411U, // <3,4,6,0>: Cost 3 vext3 LHS, <4,6,0,1>
+ 2558444534U, // <3,4,6,1>: Cost 3 vext1 <1,3,4,6>, <1,3,4,6>
+ 2666607098U, // <3,4,6,2>: Cost 3 vext2 <u,2,3,4>, <6,2,7,3>
+ 2558446082U, // <3,4,6,3>: Cost 3 vext1 <1,3,4,6>, <3,4,5,6>
+ 1659227508U, // <3,4,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
+ 2689838462U, // <3,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
+ 2689838471U, // <3,4,6,6>: Cost 3 vext3 LHS, <4,6,6,7>
+ 2657981292U, // <3,4,6,7>: Cost 3 vext2 <6,7,3,4>, <6,7,3,4>
+ 1659227540U, // <3,4,6,u>: Cost 2 vext3 LHS, <4,6,u,2>
+ 2666607610U, // <3,4,7,0>: Cost 3 vext2 <u,2,3,4>, <7,0,1,2>
+ 3702527072U, // <3,4,7,1>: Cost 4 vext2 <1,u,3,4>, <7,1,3,5>
+ 2660635824U, // <3,4,7,2>: Cost 3 vext2 <7,2,3,4>, <7,2,3,4>
+ 3644139945U, // <3,4,7,3>: Cost 4 vext1 <3,3,4,7>, <3,3,4,7>
+ 2666607974U, // <3,4,7,4>: Cost 3 vext2 <u,2,3,4>, <7,4,5,6>
+ 2732969416U, // <3,4,7,5>: Cost 3 vext3 LHS, <4,7,5,0>
+ 2732969425U, // <3,4,7,6>: Cost 3 vext3 LHS, <4,7,6,0>
+ 2666608236U, // <3,4,7,7>: Cost 3 vext2 <u,2,3,4>, <7,7,7,7>
+ 2664617622U, // <3,4,7,u>: Cost 3 vext2 <7,u,3,4>, <7,u,3,4>
+ 1490690150U, // <3,4,u,0>: Cost 2 vext1 <2,3,4,u>, LHS
+ 1551062830U, // <3,4,u,1>: Cost 2 vext2 <1,2,3,4>, LHS
+ 1490691793U, // <3,4,u,2>: Cost 2 vext1 <2,3,4,u>, <2,3,4,u>
+ 2624804796U, // <3,4,u,3>: Cost 3 vext2 <1,2,3,4>, <u,3,0,1>
+ 1490693430U, // <3,4,u,4>: Cost 2 vext1 <2,3,4,u>, RHS
+ 1551063194U, // <3,4,u,5>: Cost 2 vext2 <1,2,3,4>, RHS
+ 537710121U, // <3,4,u,6>: Cost 1 vext3 LHS, RHS
+ 2594297102U, // <3,4,u,7>: Cost 3 vext1 <7,3,4,u>, <7,3,4,u>
+ 537710139U, // <3,4,u,u>: Cost 1 vext3 LHS, RHS
+ 3692576768U, // <3,5,0,0>: Cost 4 vext2 <0,2,3,5>, <0,0,0,0>
+ 2618835046U, // <3,5,0,1>: Cost 3 vext2 <0,2,3,5>, LHS
+ 2618835138U, // <3,5,0,2>: Cost 3 vext2 <0,2,3,5>, <0,2,3,5>
+ 3692577024U, // <3,5,0,3>: Cost 4 vext2 <0,2,3,5>, <0,3,1,4>
+ 2689838690U, // <3,5,0,4>: Cost 3 vext3 LHS, <5,0,4,1>
+ 2732969579U, // <3,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
+ 2732969588U, // <3,5,0,6>: Cost 3 vext3 LHS, <5,0,6,1>
+ 2246963055U, // <3,5,0,7>: Cost 3 vrev <5,3,7,0>
+ 2618835613U, // <3,5,0,u>: Cost 3 vext2 <0,2,3,5>, LHS
+ 2594308198U, // <3,5,1,0>: Cost 3 vext1 <7,3,5,1>, LHS
+ 3692577588U, // <3,5,1,1>: Cost 4 vext2 <0,2,3,5>, <1,1,1,1>
+ 2624807835U, // <3,5,1,2>: Cost 3 vext2 <1,2,3,5>, <1,2,3,5>
+ 2625471468U, // <3,5,1,3>: Cost 3 vext2 <1,3,3,5>, <1,3,3,5>
+ 2626135101U, // <3,5,1,4>: Cost 3 vext2 <1,4,3,5>, <1,4,3,5>
+ 2594311888U, // <3,5,1,5>: Cost 3 vext1 <7,3,5,1>, <5,1,7,3>
+ 3699877107U, // <3,5,1,6>: Cost 4 vext2 <1,4,3,5>, <1,6,5,7>
+ 1641680592U, // <3,5,1,7>: Cost 2 vext3 <5,1,7,3>, <5,1,7,3>
+ 1641754329U, // <3,5,1,u>: Cost 2 vext3 <5,1,u,3>, <5,1,u,3>
+ 3692578274U, // <3,5,2,0>: Cost 4 vext2 <0,2,3,5>, <2,0,5,3>
+ 2630116899U, // <3,5,2,1>: Cost 3 vext2 <2,1,3,5>, <2,1,3,5>
+ 3692578408U, // <3,5,2,2>: Cost 4 vext2 <0,2,3,5>, <2,2,2,2>
+ 2625472206U, // <3,5,2,3>: Cost 3 vext2 <1,3,3,5>, <2,3,4,5>
+ 2632107798U, // <3,5,2,4>: Cost 3 vext2 <2,4,3,5>, <2,4,3,5>
+ 2715938575U, // <3,5,2,5>: Cost 3 vext3 <5,2,5,3>, <5,2,5,3>
+ 3692578746U, // <3,5,2,6>: Cost 4 vext2 <0,2,3,5>, <2,6,3,7>
+ 2716086049U, // <3,5,2,7>: Cost 3 vext3 <5,2,7,3>, <5,2,7,3>
+ 2634762330U, // <3,5,2,u>: Cost 3 vext2 <2,u,3,5>, <2,u,3,5>
+ 3692578966U, // <3,5,3,0>: Cost 4 vext2 <0,2,3,5>, <3,0,1,2>
+ 2636089596U, // <3,5,3,1>: Cost 3 vext2 <3,1,3,5>, <3,1,3,5>
+ 3699214668U, // <3,5,3,2>: Cost 4 vext2 <1,3,3,5>, <3,2,3,4>
+ 2638080412U, // <3,5,3,3>: Cost 3 vext2 <3,4,3,5>, <3,3,3,3>
+ 2618837506U, // <3,5,3,4>: Cost 3 vext2 <0,2,3,5>, <3,4,5,6>
+ 2832844494U, // <3,5,3,5>: Cost 3 vuzpr <2,3,4,5>, <2,3,4,5>
+ 4033415682U, // <3,5,3,6>: Cost 4 vzipr <1,1,3,3>, <3,4,5,6>
+ 3095072054U, // <3,5,3,7>: Cost 3 vtrnr <1,3,1,3>, RHS
+ 3095072055U, // <3,5,3,u>: Cost 3 vtrnr <1,3,1,3>, RHS
+ 2600304742U, // <3,5,4,0>: Cost 3 vext1 <u,3,5,4>, LHS
+ 3763580815U, // <3,5,4,1>: Cost 4 vext3 LHS, <5,4,1,5>
+ 2564474582U, // <3,5,4,2>: Cost 3 vext1 <2,3,5,4>, <2,3,5,4>
+ 3699879044U, // <3,5,4,3>: Cost 4 vext2 <1,4,3,5>, <4,3,5,0>
+ 2600308022U, // <3,5,4,4>: Cost 3 vext1 <u,3,5,4>, RHS
+ 2618838326U, // <3,5,4,5>: Cost 3 vext2 <0,2,3,5>, RHS
+ 2772454710U, // <3,5,4,6>: Cost 3 vuzpl <3,4,5,6>, RHS
+ 1659228102U, // <3,5,4,7>: Cost 2 vext3 LHS, <5,4,7,6>
+ 1659228111U, // <3,5,4,u>: Cost 2 vext3 LHS, <5,4,u,6>
+ 2570453094U, // <3,5,5,0>: Cost 3 vext1 <3,3,5,5>, LHS
+ 2624810704U, // <3,5,5,1>: Cost 3 vext2 <1,2,3,5>, <5,1,7,3>
+ 2570454734U, // <3,5,5,2>: Cost 3 vext1 <3,3,5,5>, <2,3,4,5>
+ 2570455472U, // <3,5,5,3>: Cost 3 vext1 <3,3,5,5>, <3,3,5,5>
+ 2570456374U, // <3,5,5,4>: Cost 3 vext1 <3,3,5,5>, RHS
+ 1659228164U, // <3,5,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
+ 2732969998U, // <3,5,5,6>: Cost 3 vext3 LHS, <5,5,6,6>
+ 1659228184U, // <3,5,5,7>: Cost 2 vext3 LHS, <5,5,7,7>
+ 1659228193U, // <3,5,5,u>: Cost 2 vext3 LHS, <5,5,u,7>
+ 2732970020U, // <3,5,6,0>: Cost 3 vext3 LHS, <5,6,0,1>
+ 2732970035U, // <3,5,6,1>: Cost 3 vext3 LHS, <5,6,1,7>
+ 2564490968U, // <3,5,6,2>: Cost 3 vext1 <2,3,5,6>, <2,3,5,6>
+ 2732970050U, // <3,5,6,3>: Cost 3 vext3 LHS, <5,6,3,4>
+ 2732970060U, // <3,5,6,4>: Cost 3 vext3 LHS, <5,6,4,5>
+ 2732970071U, // <3,5,6,5>: Cost 3 vext3 LHS, <5,6,5,7>
+ 2732970080U, // <3,5,6,6>: Cost 3 vext3 LHS, <5,6,6,7>
+ 1659228258U, // <3,5,6,7>: Cost 2 vext3 LHS, <5,6,7,0>
+ 1659228267U, // <3,5,6,u>: Cost 2 vext3 LHS, <5,6,u,0>
+ 1484783718U, // <3,5,7,0>: Cost 2 vext1 <1,3,5,7>, LHS
+ 1484784640U, // <3,5,7,1>: Cost 2 vext1 <1,3,5,7>, <1,3,5,7>
+ 2558527080U, // <3,5,7,2>: Cost 3 vext1 <1,3,5,7>, <2,2,2,2>
+ 2558527638U, // <3,5,7,3>: Cost 3 vext1 <1,3,5,7>, <3,0,1,2>
+ 1484786998U, // <3,5,7,4>: Cost 2 vext1 <1,3,5,7>, RHS
+ 1659228328U, // <3,5,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
+ 2732970154U, // <3,5,7,6>: Cost 3 vext3 LHS, <5,7,6,0>
+ 2558531180U, // <3,5,7,7>: Cost 3 vext1 <1,3,5,7>, <7,7,7,7>
+ 1484789550U, // <3,5,7,u>: Cost 2 vext1 <1,3,5,7>, LHS
+ 1484791910U, // <3,5,u,0>: Cost 2 vext1 <1,3,5,u>, LHS
+ 1484792833U, // <3,5,u,1>: Cost 2 vext1 <1,3,5,u>, <1,3,5,u>
+ 2558535272U, // <3,5,u,2>: Cost 3 vext1 <1,3,5,u>, <2,2,2,2>
+ 2558535830U, // <3,5,u,3>: Cost 3 vext1 <1,3,5,u>, <3,0,1,2>
+ 1484795190U, // <3,5,u,4>: Cost 2 vext1 <1,3,5,u>, RHS
+ 1659228409U, // <3,5,u,5>: Cost 2 vext3 LHS, <5,u,5,7>
+ 2772457626U, // <3,5,u,6>: Cost 3 vuzpl <3,4,5,6>, RHS
+ 1646326023U, // <3,5,u,7>: Cost 2 vext3 <5,u,7,3>, <5,u,7,3>
+ 1484797742U, // <3,5,u,u>: Cost 2 vext1 <1,3,5,u>, LHS
+ 2558541926U, // <3,6,0,0>: Cost 3 vext1 <1,3,6,0>, LHS
+ 2689839393U, // <3,6,0,1>: Cost 3 vext3 LHS, <6,0,1,2>
+ 2689839404U, // <3,6,0,2>: Cost 3 vext3 LHS, <6,0,2,4>
+ 3706519808U, // <3,6,0,3>: Cost 4 vext2 <2,5,3,6>, <0,3,1,4>
+ 2689839420U, // <3,6,0,4>: Cost 3 vext3 LHS, <6,0,4,2>
+ 2732970314U, // <3,6,0,5>: Cost 3 vext3 LHS, <6,0,5,7>
+ 2732970316U, // <3,6,0,6>: Cost 3 vext3 LHS, <6,0,6,0>
+ 2960313654U, // <3,6,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
+ 2689839456U, // <3,6,0,u>: Cost 3 vext3 LHS, <6,0,u,2>
+ 3763581290U, // <3,6,1,0>: Cost 4 vext3 LHS, <6,1,0,3>
+ 3763581297U, // <3,6,1,1>: Cost 4 vext3 LHS, <6,1,1,1>
+ 2624816028U, // <3,6,1,2>: Cost 3 vext2 <1,2,3,6>, <1,2,3,6>
+ 3763581315U, // <3,6,1,3>: Cost 4 vext3 LHS, <6,1,3,1>
+ 2626143294U, // <3,6,1,4>: Cost 3 vext2 <1,4,3,6>, <1,4,3,6>
+ 3763581335U, // <3,6,1,5>: Cost 4 vext3 LHS, <6,1,5,3>
+ 2721321376U, // <3,6,1,6>: Cost 3 vext3 <6,1,6,3>, <6,1,6,3>
+ 2721395113U, // <3,6,1,7>: Cost 3 vext3 <6,1,7,3>, <6,1,7,3>
+ 2628797826U, // <3,6,1,u>: Cost 3 vext2 <1,u,3,6>, <1,u,3,6>
+ 2594390118U, // <3,6,2,0>: Cost 3 vext1 <7,3,6,2>, LHS
+ 2721616324U, // <3,6,2,1>: Cost 3 vext3 <6,2,1,3>, <6,2,1,3>
+ 2630788725U, // <3,6,2,2>: Cost 3 vext2 <2,2,3,6>, <2,2,3,6>
+ 3763581395U, // <3,6,2,3>: Cost 4 vext3 LHS, <6,2,3,0>
+ 2632115991U, // <3,6,2,4>: Cost 3 vext2 <2,4,3,6>, <2,4,3,6>
+ 2632779624U, // <3,6,2,5>: Cost 3 vext2 <2,5,3,6>, <2,5,3,6>
+ 2594394618U, // <3,6,2,6>: Cost 3 vext1 <7,3,6,2>, <6,2,7,3>
+ 1648316922U, // <3,6,2,7>: Cost 2 vext3 <6,2,7,3>, <6,2,7,3>
+ 1648390659U, // <3,6,2,u>: Cost 2 vext3 <6,2,u,3>, <6,2,u,3>
+ 3693914262U, // <3,6,3,0>: Cost 4 vext2 <0,4,3,6>, <3,0,1,2>
+ 3638281176U, // <3,6,3,1>: Cost 4 vext1 <2,3,6,3>, <1,3,1,3>
+ 3696568678U, // <3,6,3,2>: Cost 4 vext2 <0,u,3,6>, <3,2,6,3>
+ 2638088604U, // <3,6,3,3>: Cost 3 vext2 <3,4,3,6>, <3,3,3,3>
+ 2632780290U, // <3,6,3,4>: Cost 3 vext2 <2,5,3,6>, <3,4,5,6>
+ 3712494145U, // <3,6,3,5>: Cost 4 vext2 <3,5,3,6>, <3,5,3,6>
+ 3698559612U, // <3,6,3,6>: Cost 4 vext2 <1,2,3,6>, <3,6,1,2>
+ 2959674678U, // <3,6,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
+ 2959674679U, // <3,6,3,u>: Cost 3 vzipr <1,1,3,3>, RHS
+ 3763581536U, // <3,6,4,0>: Cost 4 vext3 LHS, <6,4,0,6>
+ 2722943590U, // <3,6,4,1>: Cost 3 vext3 <6,4,1,3>, <6,4,1,3>
+ 2732970609U, // <3,6,4,2>: Cost 3 vext3 LHS, <6,4,2,5>
+ 3698560147U, // <3,6,4,3>: Cost 4 vext2 <1,2,3,6>, <4,3,6,6>
+ 2732970628U, // <3,6,4,4>: Cost 3 vext3 LHS, <6,4,4,6>
+ 2689839757U, // <3,6,4,5>: Cost 3 vext3 LHS, <6,4,5,6>
+ 2732970640U, // <3,6,4,6>: Cost 3 vext3 LHS, <6,4,6,0>
+ 2960346422U, // <3,6,4,7>: Cost 3 vzipr <1,2,3,4>, RHS
+ 2689839784U, // <3,6,4,u>: Cost 3 vext3 LHS, <6,4,u,6>
+ 2576498790U, // <3,6,5,0>: Cost 3 vext1 <4,3,6,5>, LHS
+ 3650241270U, // <3,6,5,1>: Cost 4 vext1 <4,3,6,5>, <1,0,3,2>
+ 2732970692U, // <3,6,5,2>: Cost 3 vext3 LHS, <6,5,2,7>
+ 2576501250U, // <3,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
+ 2576501906U, // <3,6,5,4>: Cost 3 vext1 <4,3,6,5>, <4,3,6,5>
+ 3650244622U, // <3,6,5,5>: Cost 4 vext1 <4,3,6,5>, <5,5,6,6>
+ 4114633528U, // <3,6,5,6>: Cost 4 vtrnl <3,4,5,6>, <6,6,6,6>
+ 2732970735U, // <3,6,5,7>: Cost 3 vext3 LHS, <6,5,7,5>
+ 2576504622U, // <3,6,5,u>: Cost 3 vext1 <4,3,6,5>, LHS
+ 2732970749U, // <3,6,6,0>: Cost 3 vext3 LHS, <6,6,0,1>
+ 2724270856U, // <3,6,6,1>: Cost 3 vext3 <6,6,1,3>, <6,6,1,3>
+ 2624819706U, // <3,6,6,2>: Cost 3 vext2 <1,2,3,6>, <6,2,7,3>
+ 3656223234U, // <3,6,6,3>: Cost 4 vext1 <5,3,6,6>, <3,4,5,6>
+ 2732970788U, // <3,6,6,4>: Cost 3 vext3 LHS, <6,6,4,4>
+ 2732970800U, // <3,6,6,5>: Cost 3 vext3 LHS, <6,6,5,7>
+ 1659228984U, // <3,6,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
+ 1659228994U, // <3,6,6,7>: Cost 2 vext3 LHS, <6,6,7,7>
+ 1659229003U, // <3,6,6,u>: Cost 2 vext3 LHS, <6,6,u,7>
+ 1659229006U, // <3,6,7,0>: Cost 2 vext3 LHS, <6,7,0,1>
+ 2558600201U, // <3,6,7,1>: Cost 3 vext1 <1,3,6,7>, <1,3,6,7>
+ 2558601146U, // <3,6,7,2>: Cost 3 vext1 <1,3,6,7>, <2,6,3,7>
+ 2725081963U, // <3,6,7,3>: Cost 3 vext3 <6,7,3,3>, <6,7,3,3>
+ 1659229046U, // <3,6,7,4>: Cost 2 vext3 LHS, <6,7,4,5>
+ 2715423611U, // <3,6,7,5>: Cost 3 vext3 <5,1,7,3>, <6,7,5,1>
+ 2722059141U, // <3,6,7,6>: Cost 3 vext3 <6,2,7,3>, <6,7,6,2>
+ 2962361654U, // <3,6,7,7>: Cost 3 vzipr <1,5,3,7>, RHS
+ 1659229078U, // <3,6,7,u>: Cost 2 vext3 LHS, <6,7,u,1>
+ 1659229087U, // <3,6,u,0>: Cost 2 vext3 LHS, <6,u,0,1>
+ 2689840041U, // <3,6,u,1>: Cost 3 vext3 LHS, <6,u,1,2>
+ 2558609339U, // <3,6,u,2>: Cost 3 vext1 <1,3,6,u>, <2,6,3,u>
+ 2576525853U, // <3,6,u,3>: Cost 3 vext1 <4,3,6,u>, <3,4,u,6>
+ 1659229127U, // <3,6,u,4>: Cost 2 vext3 LHS, <6,u,4,5>
+ 2689840081U, // <3,6,u,5>: Cost 3 vext3 LHS, <6,u,5,6>
+ 1659228984U, // <3,6,u,6>: Cost 2 vext3 LHS, <6,6,6,6>
+ 1652298720U, // <3,6,u,7>: Cost 2 vext3 <6,u,7,3>, <6,u,7,3>
+ 1659229159U, // <3,6,u,u>: Cost 2 vext3 LHS, <6,u,u,1>
+ 2626813952U, // <3,7,0,0>: Cost 3 vext2 <1,5,3,7>, <0,0,0,0>
+ 1553072230U, // <3,7,0,1>: Cost 2 vext2 <1,5,3,7>, LHS
+ 2626814116U, // <3,7,0,2>: Cost 3 vext2 <1,5,3,7>, <0,2,0,2>
+ 3700556028U, // <3,7,0,3>: Cost 4 vext2 <1,5,3,7>, <0,3,1,0>
+ 2626814290U, // <3,7,0,4>: Cost 3 vext2 <1,5,3,7>, <0,4,1,5>
+ 2582507375U, // <3,7,0,5>: Cost 3 vext1 <5,3,7,0>, <5,3,7,0>
+ 2588480072U, // <3,7,0,6>: Cost 3 vext1 <6,3,7,0>, <6,3,7,0>
+ 2732971055U, // <3,7,0,7>: Cost 3 vext3 LHS, <7,0,7,1>
+ 1553072797U, // <3,7,0,u>: Cost 2 vext2 <1,5,3,7>, LHS
+ 2626814710U, // <3,7,1,0>: Cost 3 vext2 <1,5,3,7>, <1,0,3,2>
+ 2626814772U, // <3,7,1,1>: Cost 3 vext2 <1,5,3,7>, <1,1,1,1>
+ 2626814870U, // <3,7,1,2>: Cost 3 vext2 <1,5,3,7>, <1,2,3,0>
+ 2625487854U, // <3,7,1,3>: Cost 3 vext2 <1,3,3,7>, <1,3,3,7>
+ 2582514998U, // <3,7,1,4>: Cost 3 vext1 <5,3,7,1>, RHS
+ 1553073296U, // <3,7,1,5>: Cost 2 vext2 <1,5,3,7>, <1,5,3,7>
+ 2627478753U, // <3,7,1,6>: Cost 3 vext2 <1,6,3,7>, <1,6,3,7>
+ 2727367810U, // <3,7,1,7>: Cost 3 vext3 <7,1,7,3>, <7,1,7,3>
+ 1555064195U, // <3,7,1,u>: Cost 2 vext2 <1,u,3,7>, <1,u,3,7>
+ 2588491878U, // <3,7,2,0>: Cost 3 vext1 <6,3,7,2>, LHS
+ 3700557318U, // <3,7,2,1>: Cost 4 vext2 <1,5,3,7>, <2,1,0,3>
+ 2626815592U, // <3,7,2,2>: Cost 3 vext2 <1,5,3,7>, <2,2,2,2>
+ 2626815654U, // <3,7,2,3>: Cost 3 vext2 <1,5,3,7>, <2,3,0,1>
+ 2588495158U, // <3,7,2,4>: Cost 3 vext1 <6,3,7,2>, RHS
+ 2632787817U, // <3,7,2,5>: Cost 3 vext2 <2,5,3,7>, <2,5,3,7>
+ 1559709626U, // <3,7,2,6>: Cost 2 vext2 <2,6,3,7>, <2,6,3,7>
+ 2728031443U, // <3,7,2,7>: Cost 3 vext3 <7,2,7,3>, <7,2,7,3>
+ 1561036892U, // <3,7,2,u>: Cost 2 vext2 <2,u,3,7>, <2,u,3,7>
+ 2626816150U, // <3,7,3,0>: Cost 3 vext2 <1,5,3,7>, <3,0,1,2>
+ 2626816268U, // <3,7,3,1>: Cost 3 vext2 <1,5,3,7>, <3,1,5,3>
+ 2633451878U, // <3,7,3,2>: Cost 3 vext2 <2,6,3,7>, <3,2,6,3>
+ 2626816412U, // <3,7,3,3>: Cost 3 vext2 <1,5,3,7>, <3,3,3,3>
+ 2626816514U, // <3,7,3,4>: Cost 3 vext2 <1,5,3,7>, <3,4,5,6>
+ 2638760514U, // <3,7,3,5>: Cost 3 vext2 <3,5,3,7>, <3,5,3,7>
+ 2639424147U, // <3,7,3,6>: Cost 3 vext2 <3,6,3,7>, <3,6,3,7>
+ 2826961920U, // <3,7,3,7>: Cost 3 vuzpr <1,3,5,7>, <1,3,5,7>
+ 2626816798U, // <3,7,3,u>: Cost 3 vext2 <1,5,3,7>, <3,u,1,2>
+ 2582536294U, // <3,7,4,0>: Cost 3 vext1 <5,3,7,4>, LHS
+ 2582537360U, // <3,7,4,1>: Cost 3 vext1 <5,3,7,4>, <1,5,3,7>
+ 2588510138U, // <3,7,4,2>: Cost 3 vext1 <6,3,7,4>, <2,6,3,7>
+ 3700558996U, // <3,7,4,3>: Cost 4 vext2 <1,5,3,7>, <4,3,6,7>
+ 2582539574U, // <3,7,4,4>: Cost 3 vext1 <5,3,7,4>, RHS
+ 1553075510U, // <3,7,4,5>: Cost 2 vext2 <1,5,3,7>, RHS
+ 2588512844U, // <3,7,4,6>: Cost 3 vext1 <6,3,7,4>, <6,3,7,4>
+ 2564625766U, // <3,7,4,7>: Cost 3 vext1 <2,3,7,4>, <7,4,5,6>
+ 1553075753U, // <3,7,4,u>: Cost 2 vext2 <1,5,3,7>, RHS
+ 2732971398U, // <3,7,5,0>: Cost 3 vext3 LHS, <7,5,0,2>
+ 2626817744U, // <3,7,5,1>: Cost 3 vext2 <1,5,3,7>, <5,1,7,3>
+ 3700559649U, // <3,7,5,2>: Cost 4 vext2 <1,5,3,7>, <5,2,7,3>
+ 2626817903U, // <3,7,5,3>: Cost 3 vext2 <1,5,3,7>, <5,3,7,0>
+ 2258728203U, // <3,7,5,4>: Cost 3 vrev <7,3,4,5>
+ 2732971446U, // <3,7,5,5>: Cost 3 vext3 LHS, <7,5,5,5>
+ 2732971457U, // <3,7,5,6>: Cost 3 vext3 LHS, <7,5,6,7>
+ 2826964278U, // <3,7,5,7>: Cost 3 vuzpr <1,3,5,7>, RHS
+ 2826964279U, // <3,7,5,u>: Cost 3 vuzpr <1,3,5,7>, RHS
+ 2732971478U, // <3,7,6,0>: Cost 3 vext3 LHS, <7,6,0,1>
+ 2732971486U, // <3,7,6,1>: Cost 3 vext3 LHS, <7,6,1,0>
+ 2633454074U, // <3,7,6,2>: Cost 3 vext2 <2,6,3,7>, <6,2,7,3>
+ 2633454152U, // <3,7,6,3>: Cost 3 vext2 <2,6,3,7>, <6,3,7,0>
+ 2732971518U, // <3,7,6,4>: Cost 3 vext3 LHS, <7,6,4,5>
+ 2732971526U, // <3,7,6,5>: Cost 3 vext3 LHS, <7,6,5,4>
+ 2732971537U, // <3,7,6,6>: Cost 3 vext3 LHS, <7,6,6,6>
+ 2732971540U, // <3,7,6,7>: Cost 3 vext3 LHS, <7,6,7,0>
+ 2726041124U, // <3,7,6,u>: Cost 3 vext3 <6,u,7,3>, <7,6,u,7>
+ 2570616934U, // <3,7,7,0>: Cost 3 vext1 <3,3,7,7>, LHS
+ 2570617856U, // <3,7,7,1>: Cost 3 vext1 <3,3,7,7>, <1,3,5,7>
+ 2564646635U, // <3,7,7,2>: Cost 3 vext1 <2,3,7,7>, <2,3,7,7>
+ 2570619332U, // <3,7,7,3>: Cost 3 vext1 <3,3,7,7>, <3,3,7,7>
+ 2570620214U, // <3,7,7,4>: Cost 3 vext1 <3,3,7,7>, RHS
+ 2582564726U, // <3,7,7,5>: Cost 3 vext1 <5,3,7,7>, <5,3,7,7>
+ 2588537423U, // <3,7,7,6>: Cost 3 vext1 <6,3,7,7>, <6,3,7,7>
+ 1659229804U, // <3,7,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
+ 1659229804U, // <3,7,7,u>: Cost 2 vext3 LHS, <7,7,7,7>
+ 2626819795U, // <3,7,u,0>: Cost 3 vext2 <1,5,3,7>, <u,0,1,2>
+ 1553078062U, // <3,7,u,1>: Cost 2 vext2 <1,5,3,7>, LHS
+ 2626819973U, // <3,7,u,2>: Cost 3 vext2 <1,5,3,7>, <u,2,3,0>
+ 2826961565U, // <3,7,u,3>: Cost 3 vuzpr <1,3,5,7>, LHS
+ 2626820159U, // <3,7,u,4>: Cost 3 vext2 <1,5,3,7>, <u,4,5,6>
+ 1553078426U, // <3,7,u,5>: Cost 2 vext2 <1,5,3,7>, RHS
+ 1595545808U, // <3,7,u,6>: Cost 2 vext2 <u,6,3,7>, <u,6,3,7>
+ 1659229804U, // <3,7,u,7>: Cost 2 vext3 LHS, <7,7,7,7>
+ 1553078629U, // <3,7,u,u>: Cost 2 vext2 <1,5,3,7>, LHS
+ 1611448320U, // <3,u,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
+ 1611896531U, // <3,u,0,1>: Cost 2 vext3 LHS, <u,0,1,2>
+ 1659672284U, // <3,u,0,2>: Cost 2 vext3 LHS, <u,0,2,2>
+ 1616099045U, // <3,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
+ 2685638381U, // <3,u,0,4>: Cost 3 vext3 LHS, <u,0,4,1>
+ 1663874806U, // <3,u,0,5>: Cost 2 vext3 LHS, <u,0,5,1>
+ 1663874816U, // <3,u,0,6>: Cost 2 vext3 LHS, <u,0,6,2>
+ 2960313672U, // <3,u,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
+ 1611896594U, // <3,u,0,u>: Cost 2 vext3 LHS, <u,0,u,2>
+ 1549763324U, // <3,u,1,0>: Cost 2 vext2 <1,0,3,u>, <1,0,3,u>
+ 1550426957U, // <3,u,1,1>: Cost 2 vext2 <1,1,3,u>, <1,1,3,u>
+ 537712430U, // <3,u,1,2>: Cost 1 vext3 LHS, LHS
+ 1616541495U, // <3,u,1,3>: Cost 2 vext3 LHS, <u,1,3,3>
+ 1490930998U, // <3,u,1,4>: Cost 2 vext1 <2,3,u,1>, RHS
+ 1553081489U, // <3,u,1,5>: Cost 2 vext2 <1,5,3,u>, <1,5,3,u>
+ 2627486946U, // <3,u,1,6>: Cost 3 vext2 <1,6,3,u>, <1,6,3,u>
+ 1659230043U, // <3,u,1,7>: Cost 2 vext3 LHS, <u,1,7,3>
+ 537712484U, // <3,u,1,u>: Cost 1 vext3 LHS, LHS
+ 1611890852U, // <3,u,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
+ 2624833102U, // <3,u,2,1>: Cost 3 vext2 <1,2,3,u>, <2,1,u,3>
+ 1557063287U, // <3,u,2,2>: Cost 2 vext2 <2,2,3,u>, <2,2,3,u>
+ 1616099205U, // <3,u,2,3>: Cost 2 vext3 LHS, <u,2,3,0>
+ 1611890892U, // <3,u,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
+ 2689841054U, // <3,u,2,5>: Cost 3 vext3 LHS, <u,2,5,7>
+ 1559717819U, // <3,u,2,6>: Cost 2 vext2 <2,6,3,u>, <2,6,3,u>
+ 1659230124U, // <3,u,2,7>: Cost 2 vext3 LHS, <u,2,7,3>
+ 1616541618U, // <3,u,2,u>: Cost 2 vext3 LHS, <u,2,u,0>
+ 1611896764U, // <3,u,3,0>: Cost 2 vext3 LHS, <u,3,0,1>
+ 1484973079U, // <3,u,3,1>: Cost 2 vext1 <1,3,u,3>, <1,3,u,3>
+ 2685638607U, // <3,u,3,2>: Cost 3 vext3 LHS, <u,3,2,2>
+ 336380006U, // <3,u,3,3>: Cost 1 vdup3 LHS
+ 1611896804U, // <3,u,3,4>: Cost 2 vext3 LHS, <u,3,4,5>
+ 1616541679U, // <3,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
+ 2690283512U, // <3,u,3,6>: Cost 3 vext3 LHS, <u,3,6,7>
+ 2959674696U, // <3,u,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
+ 336380006U, // <3,u,3,u>: Cost 1 vdup3 LHS
+ 2558722150U, // <3,u,4,0>: Cost 3 vext1 <1,3,u,4>, LHS
+ 1659672602U, // <3,u,4,1>: Cost 2 vext3 LHS, <u,4,1,5>
+ 1659672612U, // <3,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
+ 2689841196U, // <3,u,4,3>: Cost 3 vext3 LHS, <u,4,3,5>
+ 1659227344U, // <3,u,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
+ 1611896895U, // <3,u,4,5>: Cost 2 vext3 LHS, <u,4,5,6>
+ 1663875144U, // <3,u,4,6>: Cost 2 vext3 LHS, <u,4,6,6>
+ 1659230289U, // <3,u,4,7>: Cost 2 vext3 LHS, <u,4,7,6>
+ 1611896922U, // <3,u,4,u>: Cost 2 vext3 LHS, <u,4,u,6>
+ 1490960486U, // <3,u,5,0>: Cost 2 vext1 <2,3,u,5>, LHS
+ 2689841261U, // <3,u,5,1>: Cost 3 vext3 LHS, <u,5,1,7>
+ 1490962162U, // <3,u,5,2>: Cost 2 vext1 <2,3,u,5>, <2,3,u,5>
+ 1616541823U, // <3,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
+ 1490963766U, // <3,u,5,4>: Cost 2 vext1 <2,3,u,5>, RHS
+ 1659228164U, // <3,u,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
+ 537712794U, // <3,u,5,6>: Cost 1 vext3 LHS, RHS
+ 1659230371U, // <3,u,5,7>: Cost 2 vext3 LHS, <u,5,7,7>
+ 537712812U, // <3,u,5,u>: Cost 1 vext3 LHS, RHS
+ 2689841327U, // <3,u,6,0>: Cost 3 vext3 LHS, <u,6,0,1>
+ 2558739482U, // <3,u,6,1>: Cost 3 vext1 <1,3,u,6>, <1,3,u,6>
+ 2689841351U, // <3,u,6,2>: Cost 3 vext3 LHS, <u,6,2,7>
+ 1616099536U, // <3,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
+ 1659227508U, // <3,u,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
+ 2690283746U, // <3,u,6,5>: Cost 3 vext3 LHS, <u,6,5,7>
+ 1659228984U, // <3,u,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
+ 1659230445U, // <3,u,6,7>: Cost 2 vext3 LHS, <u,6,7,0>
+ 1616099581U, // <3,u,6,u>: Cost 2 vext3 LHS, <u,6,u,7>
+ 1485004902U, // <3,u,7,0>: Cost 2 vext1 <1,3,u,7>, LHS
+ 1485005851U, // <3,u,7,1>: Cost 2 vext1 <1,3,u,7>, <1,3,u,7>
+ 2558748264U, // <3,u,7,2>: Cost 3 vext1 <1,3,u,7>, <2,2,2,2>
+ 3095397021U, // <3,u,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
+ 1485008182U, // <3,u,7,4>: Cost 2 vext1 <1,3,u,7>, RHS
+ 1659228328U, // <3,u,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
+ 2722060599U, // <3,u,7,6>: Cost 3 vext3 <6,2,7,3>, <u,7,6,2>
+ 1659229804U, // <3,u,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
+ 1485010734U, // <3,u,7,u>: Cost 2 vext1 <1,3,u,7>, LHS
+ 1616099665U, // <3,u,u,0>: Cost 2 vext3 LHS, <u,u,0,1>
+ 1611897179U, // <3,u,u,1>: Cost 2 vext3 LHS, <u,u,1,2>
+ 537712997U, // <3,u,u,2>: Cost 1 vext3 LHS, LHS
+ 336380006U, // <3,u,u,3>: Cost 1 vdup3 LHS
+ 1616099705U, // <3,u,u,4>: Cost 2 vext3 LHS, <u,u,4,5>
+ 1611897219U, // <3,u,u,5>: Cost 2 vext3 LHS, <u,u,5,6>
+ 537713037U, // <3,u,u,6>: Cost 1 vext3 LHS, RHS
+ 1659230607U, // <3,u,u,7>: Cost 2 vext3 LHS, <u,u,7,0>
+ 537713051U, // <3,u,u,u>: Cost 1 vext3 LHS, LHS
+ 2691907584U, // <4,0,0,0>: Cost 3 vext3 <1,2,3,4>, <0,0,0,0>
+ 2691907594U, // <4,0,0,1>: Cost 3 vext3 <1,2,3,4>, <0,0,1,1>
+ 2691907604U, // <4,0,0,2>: Cost 3 vext3 <1,2,3,4>, <0,0,2,2>
+ 3709862144U, // <4,0,0,3>: Cost 4 vext2 <3,1,4,0>, <0,3,1,4>
+ 2684682280U, // <4,0,0,4>: Cost 3 vext3 <0,0,4,4>, <0,0,4,4>
+ 3694600633U, // <4,0,0,5>: Cost 4 vext2 <0,5,4,0>, <0,5,4,0>
+ 3291431290U, // <4,0,0,6>: Cost 4 vrev <0,4,6,0>
+ 3668342067U, // <4,0,0,7>: Cost 4 vext1 <7,4,0,0>, <7,4,0,0>
+ 2691907657U, // <4,0,0,u>: Cost 3 vext3 <1,2,3,4>, <0,0,u,1>
+ 2570715238U, // <4,0,1,0>: Cost 3 vext1 <3,4,0,1>, LHS
+ 2570716058U, // <4,0,1,1>: Cost 3 vext1 <3,4,0,1>, <1,2,3,4>
+ 1618165862U, // <4,0,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2570717648U, // <4,0,1,3>: Cost 3 vext1 <3,4,0,1>, <3,4,0,1>
+ 2570718518U, // <4,0,1,4>: Cost 3 vext1 <3,4,0,1>, RHS
+ 2594607206U, // <4,0,1,5>: Cost 3 vext1 <7,4,0,1>, <5,6,7,4>
+ 3662377563U, // <4,0,1,6>: Cost 4 vext1 <6,4,0,1>, <6,4,0,1>
+ 2594608436U, // <4,0,1,7>: Cost 3 vext1 <7,4,0,1>, <7,4,0,1>
+ 1618165916U, // <4,0,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2685714598U, // <4,0,2,0>: Cost 3 vext3 <0,2,0,4>, <0,2,0,4>
+ 3759530159U, // <4,0,2,1>: Cost 4 vext3 <0,2,1,4>, <0,2,1,4>
+ 2685862072U, // <4,0,2,2>: Cost 3 vext3 <0,2,2,4>, <0,2,2,4>
+ 2631476937U, // <4,0,2,3>: Cost 3 vext2 <2,3,4,0>, <2,3,4,0>
+ 2685714636U, // <4,0,2,4>: Cost 3 vext3 <0,2,0,4>, <0,2,4,6>
+ 3765649622U, // <4,0,2,5>: Cost 4 vext3 <1,2,3,4>, <0,2,5,7>
+ 2686157020U, // <4,0,2,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
+ 3668358453U, // <4,0,2,7>: Cost 4 vext1 <7,4,0,2>, <7,4,0,2>
+ 2686304494U, // <4,0,2,u>: Cost 3 vext3 <0,2,u,4>, <0,2,u,4>
+ 3632529510U, // <4,0,3,0>: Cost 4 vext1 <1,4,0,3>, LHS
+ 2686451968U, // <4,0,3,1>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
+ 2686525705U, // <4,0,3,2>: Cost 3 vext3 <0,3,2,4>, <0,3,2,4>
+ 3760341266U, // <4,0,3,3>: Cost 4 vext3 <0,3,3,4>, <0,3,3,4>
+ 3632532790U, // <4,0,3,4>: Cost 4 vext1 <1,4,0,3>, RHS
+ 3913254606U, // <4,0,3,5>: Cost 4 vuzpr <3,4,5,0>, <2,3,4,5>
+ 3705219740U, // <4,0,3,6>: Cost 4 vext2 <2,3,4,0>, <3,6,4,7>
+ 3713845990U, // <4,0,3,7>: Cost 4 vext2 <3,7,4,0>, <3,7,4,0>
+ 2686451968U, // <4,0,3,u>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
+ 2552823910U, // <4,0,4,0>: Cost 3 vext1 <0,4,0,4>, LHS
+ 2691907922U, // <4,0,4,1>: Cost 3 vext3 <1,2,3,4>, <0,4,1,5>
+ 2691907932U, // <4,0,4,2>: Cost 3 vext3 <1,2,3,4>, <0,4,2,6>
+ 3626567830U, // <4,0,4,3>: Cost 4 vext1 <0,4,0,4>, <3,0,1,2>
+ 2552827190U, // <4,0,4,4>: Cost 3 vext1 <0,4,0,4>, RHS
+ 2631478582U, // <4,0,4,5>: Cost 3 vext2 <2,3,4,0>, RHS
+ 3626570017U, // <4,0,4,6>: Cost 4 vext1 <0,4,0,4>, <6,0,1,2>
+ 3668374839U, // <4,0,4,7>: Cost 4 vext1 <7,4,0,4>, <7,4,0,4>
+ 2552829742U, // <4,0,4,u>: Cost 3 vext1 <0,4,0,4>, LHS
+ 2558804070U, // <4,0,5,0>: Cost 3 vext1 <1,4,0,5>, LHS
+ 1839644774U, // <4,0,5,1>: Cost 2 vzipl RHS, LHS
+ 2913386660U, // <4,0,5,2>: Cost 3 vzipl RHS, <0,2,0,2>
+ 2570750420U, // <4,0,5,3>: Cost 3 vext1 <3,4,0,5>, <3,4,0,5>
+ 2558807350U, // <4,0,5,4>: Cost 3 vext1 <1,4,0,5>, RHS
+ 3987128750U, // <4,0,5,5>: Cost 4 vzipl RHS, <0,5,2,7>
+ 3987128822U, // <4,0,5,6>: Cost 4 vzipl RHS, <0,6,1,7>
+ 2594641208U, // <4,0,5,7>: Cost 3 vext1 <7,4,0,5>, <7,4,0,5>
+ 1839645341U, // <4,0,5,u>: Cost 2 vzipl RHS, LHS
+ 2552840294U, // <4,0,6,0>: Cost 3 vext1 <0,4,0,6>, LHS
+ 3047604234U, // <4,0,6,1>: Cost 3 vtrnl RHS, <0,0,1,1>
+ 1973862502U, // <4,0,6,2>: Cost 2 vtrnl RHS, LHS
+ 2570758613U, // <4,0,6,3>: Cost 3 vext1 <3,4,0,6>, <3,4,0,6>
+ 2552843574U, // <4,0,6,4>: Cost 3 vext1 <0,4,0,6>, RHS
+ 2217664887U, // <4,0,6,5>: Cost 3 vrev <0,4,5,6>
+ 3662418528U, // <4,0,6,6>: Cost 4 vext1 <6,4,0,6>, <6,4,0,6>
+ 2658022257U, // <4,0,6,7>: Cost 3 vext2 <6,7,4,0>, <6,7,4,0>
+ 1973862556U, // <4,0,6,u>: Cost 2 vtrnl RHS, LHS
+ 3731764218U, // <4,0,7,0>: Cost 4 vext2 <6,7,4,0>, <7,0,1,2>
+ 3988324454U, // <4,0,7,1>: Cost 4 vzipl <4,7,5,0>, LHS
+ 4122034278U, // <4,0,7,2>: Cost 4 vtrnl <4,6,7,1>, LHS
+ 3735082246U, // <4,0,7,3>: Cost 4 vext2 <7,3,4,0>, <7,3,4,0>
+ 3731764536U, // <4,0,7,4>: Cost 4 vext2 <6,7,4,0>, <7,4,0,5>
+ 3937145718U, // <4,0,7,5>: Cost 4 vuzpr <7,4,5,0>, <6,7,4,5>
+ 3737073145U, // <4,0,7,6>: Cost 4 vext2 <7,6,4,0>, <7,6,4,0>
+ 3731764844U, // <4,0,7,7>: Cost 4 vext2 <6,7,4,0>, <7,7,7,7>
+ 4122034332U, // <4,0,7,u>: Cost 4 vtrnl <4,6,7,1>, LHS
+ 2552856678U, // <4,0,u,0>: Cost 3 vext1 <0,4,0,u>, LHS
+ 1841635430U, // <4,0,u,1>: Cost 2 vzipl RHS, LHS
+ 1618166429U, // <4,0,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2570774999U, // <4,0,u,3>: Cost 3 vext1 <3,4,0,u>, <3,4,0,u>
+ 2552859958U, // <4,0,u,4>: Cost 3 vext1 <0,4,0,u>, RHS
+ 2631481498U, // <4,0,u,5>: Cost 3 vext2 <2,3,4,0>, RHS
+ 2686157020U, // <4,0,u,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
+ 2594665787U, // <4,0,u,7>: Cost 3 vext1 <7,4,0,u>, <7,4,0,u>
+ 1618166483U, // <4,0,u,u>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2617548837U, // <4,1,0,0>: Cost 3 vext2 <0,0,4,1>, <0,0,4,1>
+ 2622857318U, // <4,1,0,1>: Cost 3 vext2 <0,u,4,1>, LHS
+ 3693281484U, // <4,1,0,2>: Cost 4 vext2 <0,3,4,1>, <0,2,4,6>
+ 2691908342U, // <4,1,0,3>: Cost 3 vext3 <1,2,3,4>, <1,0,3,2>
+ 2622857554U, // <4,1,0,4>: Cost 3 vext2 <0,u,4,1>, <0,4,1,5>
+ 3764470538U, // <4,1,0,5>: Cost 4 vext3 <1,0,5,4>, <1,0,5,4>
+ 3695272459U, // <4,1,0,6>: Cost 4 vext2 <0,6,4,1>, <0,6,4,1>
+ 3733094980U, // <4,1,0,7>: Cost 4 vext2 <7,0,4,1>, <0,7,1,4>
+ 2622857885U, // <4,1,0,u>: Cost 3 vext2 <0,u,4,1>, LHS
+ 3696599798U, // <4,1,1,0>: Cost 4 vext2 <0,u,4,1>, <1,0,3,2>
+ 2691097399U, // <4,1,1,1>: Cost 3 vext3 <1,1,1,4>, <1,1,1,4>
+ 2631484314U, // <4,1,1,2>: Cost 3 vext2 <2,3,4,1>, <1,2,3,4>
+ 2691908424U, // <4,1,1,3>: Cost 3 vext3 <1,2,3,4>, <1,1,3,3>
+ 3696600125U, // <4,1,1,4>: Cost 4 vext2 <0,u,4,1>, <1,4,3,5>
+ 3696600175U, // <4,1,1,5>: Cost 4 vext2 <0,u,4,1>, <1,5,0,1>
+ 3696600307U, // <4,1,1,6>: Cost 4 vext2 <0,u,4,1>, <1,6,5,7>
+ 3668423997U, // <4,1,1,7>: Cost 4 vext1 <7,4,1,1>, <7,4,1,1>
+ 2691908469U, // <4,1,1,u>: Cost 3 vext3 <1,2,3,4>, <1,1,u,3>
+ 2570797158U, // <4,1,2,0>: Cost 3 vext1 <3,4,1,2>, LHS
+ 2570797978U, // <4,1,2,1>: Cost 3 vext1 <3,4,1,2>, <1,2,3,4>
+ 3696600680U, // <4,1,2,2>: Cost 4 vext2 <0,u,4,1>, <2,2,2,2>
+ 1618166682U, // <4,1,2,3>: Cost 2 vext3 <1,2,3,4>, <1,2,3,4>
+ 2570800438U, // <4,1,2,4>: Cost 3 vext1 <3,4,1,2>, RHS
+ 3765650347U, // <4,1,2,5>: Cost 4 vext3 <1,2,3,4>, <1,2,5,3>
+ 3696601018U, // <4,1,2,6>: Cost 4 vext2 <0,u,4,1>, <2,6,3,7>
+ 3668432190U, // <4,1,2,7>: Cost 4 vext1 <7,4,1,2>, <7,4,1,2>
+ 1618535367U, // <4,1,2,u>: Cost 2 vext3 <1,2,u,4>, <1,2,u,4>
+ 2564833382U, // <4,1,3,0>: Cost 3 vext1 <2,4,1,3>, LHS
+ 2691908568U, // <4,1,3,1>: Cost 3 vext3 <1,2,3,4>, <1,3,1,3>
+ 2691908578U, // <4,1,3,2>: Cost 3 vext3 <1,2,3,4>, <1,3,2,4>
+ 2692572139U, // <4,1,3,3>: Cost 3 vext3 <1,3,3,4>, <1,3,3,4>
+ 2564836662U, // <4,1,3,4>: Cost 3 vext1 <2,4,1,3>, RHS
+ 2691908608U, // <4,1,3,5>: Cost 3 vext3 <1,2,3,4>, <1,3,5,7>
+ 2588725862U, // <4,1,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
+ 3662468090U, // <4,1,3,7>: Cost 4 vext1 <6,4,1,3>, <7,0,1,2>
+ 2691908631U, // <4,1,3,u>: Cost 3 vext3 <1,2,3,4>, <1,3,u,3>
+ 3760194590U, // <4,1,4,0>: Cost 4 vext3 <0,3,1,4>, <1,4,0,1>
+ 3693947874U, // <4,1,4,1>: Cost 4 vext2 <0,4,4,1>, <4,1,5,0>
+ 3765650484U, // <4,1,4,2>: Cost 4 vext3 <1,2,3,4>, <1,4,2,5>
+ 3113877606U, // <4,1,4,3>: Cost 3 vtrnr <4,4,4,4>, LHS
+ 3760194630U, // <4,1,4,4>: Cost 4 vext3 <0,3,1,4>, <1,4,4,5>
+ 2622860598U, // <4,1,4,5>: Cost 3 vext2 <0,u,4,1>, RHS
+ 3297436759U, // <4,1,4,6>: Cost 4 vrev <1,4,6,4>
+ 3800007772U, // <4,1,4,7>: Cost 4 vext3 <7,0,1,4>, <1,4,7,0>
+ 2622860841U, // <4,1,4,u>: Cost 3 vext2 <0,u,4,1>, RHS
+ 1479164006U, // <4,1,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
+ 2552906486U, // <4,1,5,1>: Cost 3 vext1 <0,4,1,5>, <1,0,3,2>
+ 2552907299U, // <4,1,5,2>: Cost 3 vext1 <0,4,1,5>, <2,1,3,5>
+ 2552907926U, // <4,1,5,3>: Cost 3 vext1 <0,4,1,5>, <3,0,1,2>
+ 1479167286U, // <4,1,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
+ 2913387664U, // <4,1,5,5>: Cost 3 vzipl RHS, <1,5,3,7>
+ 2600686074U, // <4,1,5,6>: Cost 3 vext1 <u,4,1,5>, <6,2,7,3>
+ 2600686586U, // <4,1,5,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
+ 1479169838U, // <4,1,5,u>: Cost 2 vext1 <0,4,1,5>, LHS
+ 2552914022U, // <4,1,6,0>: Cost 3 vext1 <0,4,1,6>, LHS
+ 2558886708U, // <4,1,6,1>: Cost 3 vext1 <1,4,1,6>, <1,1,1,1>
+ 4028205206U, // <4,1,6,2>: Cost 4 vzipr <0,2,4,6>, <3,0,1,2>
+ 3089858662U, // <4,1,6,3>: Cost 3 vtrnr <0,4,2,6>, LHS
+ 2552917302U, // <4,1,6,4>: Cost 3 vext1 <0,4,1,6>, RHS
+ 2223637584U, // <4,1,6,5>: Cost 3 vrev <1,4,5,6>
+ 4121347081U, // <4,1,6,6>: Cost 4 vtrnl RHS, <1,3,6,7>
+ 3721155406U, // <4,1,6,7>: Cost 4 vext2 <5,0,4,1>, <6,7,0,1>
+ 2552919854U, // <4,1,6,u>: Cost 3 vext1 <0,4,1,6>, LHS
+ 2659357716U, // <4,1,7,0>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
+ 3733763173U, // <4,1,7,1>: Cost 4 vext2 <7,1,4,1>, <7,1,4,1>
+ 3734426806U, // <4,1,7,2>: Cost 4 vext2 <7,2,4,1>, <7,2,4,1>
+ 2695226671U, // <4,1,7,3>: Cost 3 vext3 <1,7,3,4>, <1,7,3,4>
+ 3721155942U, // <4,1,7,4>: Cost 4 vext2 <5,0,4,1>, <7,4,5,6>
+ 3721155976U, // <4,1,7,5>: Cost 4 vext2 <5,0,4,1>, <7,5,0,4>
+ 3662500458U, // <4,1,7,6>: Cost 4 vext1 <6,4,1,7>, <6,4,1,7>
+ 3721156204U, // <4,1,7,7>: Cost 4 vext2 <5,0,4,1>, <7,7,7,7>
+ 2659357716U, // <4,1,7,u>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
+ 1479188582U, // <4,1,u,0>: Cost 2 vext1 <0,4,1,u>, LHS
+ 2552931062U, // <4,1,u,1>: Cost 3 vext1 <0,4,1,u>, <1,0,3,2>
+ 2552931944U, // <4,1,u,2>: Cost 3 vext1 <0,4,1,u>, <2,2,2,2>
+ 1622148480U, // <4,1,u,3>: Cost 2 vext3 <1,u,3,4>, <1,u,3,4>
+ 1479191862U, // <4,1,u,4>: Cost 2 vext1 <0,4,1,u>, RHS
+ 2622863514U, // <4,1,u,5>: Cost 3 vext2 <0,u,4,1>, RHS
+ 2588725862U, // <4,1,u,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
+ 2600686586U, // <4,1,u,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
+ 1479194414U, // <4,1,u,u>: Cost 2 vext1 <0,4,1,u>, LHS
+ 2617557030U, // <4,2,0,0>: Cost 3 vext2 <0,0,4,2>, <0,0,4,2>
+ 2622865510U, // <4,2,0,1>: Cost 3 vext2 <0,u,4,2>, LHS
+ 2622865612U, // <4,2,0,2>: Cost 3 vext2 <0,u,4,2>, <0,2,4,6>
+ 3693289753U, // <4,2,0,3>: Cost 4 vext2 <0,3,4,2>, <0,3,4,2>
+ 2635473244U, // <4,2,0,4>: Cost 3 vext2 <3,0,4,2>, <0,4,2,6>
+ 3765650918U, // <4,2,0,5>: Cost 4 vext3 <1,2,3,4>, <2,0,5,7>
+ 2696775148U, // <4,2,0,6>: Cost 3 vext3 <2,0,6,4>, <2,0,6,4>
+ 3695944285U, // <4,2,0,7>: Cost 4 vext2 <0,7,4,2>, <0,7,4,2>
+ 2622866077U, // <4,2,0,u>: Cost 3 vext2 <0,u,4,2>, LHS
+ 3696607990U, // <4,2,1,0>: Cost 4 vext2 <0,u,4,2>, <1,0,3,2>
+ 3696608052U, // <4,2,1,1>: Cost 4 vext2 <0,u,4,2>, <1,1,1,1>
+ 3696608150U, // <4,2,1,2>: Cost 4 vext2 <0,u,4,2>, <1,2,3,0>
+ 3895574630U, // <4,2,1,3>: Cost 4 vuzpr <0,4,u,2>, LHS
+ 2691909162U, // <4,2,1,4>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
+ 3696608400U, // <4,2,1,5>: Cost 4 vext2 <0,u,4,2>, <1,5,3,7>
+ 3760784956U, // <4,2,1,6>: Cost 4 vext3 <0,4,0,4>, <2,1,6,3>
+ 3773908549U, // <4,2,1,7>: Cost 5 vext3 <2,5,7,4>, <2,1,7,3>
+ 2691909162U, // <4,2,1,u>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
+ 3696608748U, // <4,2,2,0>: Cost 4 vext2 <0,u,4,2>, <2,0,6,4>
+ 3696608828U, // <4,2,2,1>: Cost 4 vext2 <0,u,4,2>, <2,1,6,3>
+ 2691909224U, // <4,2,2,2>: Cost 3 vext3 <1,2,3,4>, <2,2,2,2>
+ 2691909234U, // <4,2,2,3>: Cost 3 vext3 <1,2,3,4>, <2,2,3,3>
+ 3759605368U, // <4,2,2,4>: Cost 4 vext3 <0,2,2,4>, <2,2,4,0>
+ 3696609156U, // <4,2,2,5>: Cost 4 vext2 <0,u,4,2>, <2,5,6,7>
+ 3760785040U, // <4,2,2,6>: Cost 4 vext3 <0,4,0,4>, <2,2,6,6>
+ 3668505927U, // <4,2,2,7>: Cost 4 vext1 <7,4,2,2>, <7,4,2,2>
+ 2691909279U, // <4,2,2,u>: Cost 3 vext3 <1,2,3,4>, <2,2,u,3>
+ 2691909286U, // <4,2,3,0>: Cost 3 vext3 <1,2,3,4>, <2,3,0,1>
+ 3764840111U, // <4,2,3,1>: Cost 4 vext3 <1,1,1,4>, <2,3,1,1>
+ 3765651129U, // <4,2,3,2>: Cost 4 vext3 <1,2,3,4>, <2,3,2,2>
+ 2698544836U, // <4,2,3,3>: Cost 3 vext3 <2,3,3,4>, <2,3,3,4>
+ 2685863630U, // <4,2,3,4>: Cost 3 vext3 <0,2,2,4>, <2,3,4,5>
+ 2698692310U, // <4,2,3,5>: Cost 3 vext3 <2,3,5,4>, <2,3,5,4>
+ 3772507871U, // <4,2,3,6>: Cost 4 vext3 <2,3,6,4>, <2,3,6,4>
+ 2698839784U, // <4,2,3,7>: Cost 3 vext3 <2,3,7,4>, <2,3,7,4>
+ 2691909358U, // <4,2,3,u>: Cost 3 vext3 <1,2,3,4>, <2,3,u,1>
+ 2564915302U, // <4,2,4,0>: Cost 3 vext1 <2,4,2,4>, LHS
+ 2564916122U, // <4,2,4,1>: Cost 3 vext1 <2,4,2,4>, <1,2,3,4>
+ 2564917004U, // <4,2,4,2>: Cost 3 vext1 <2,4,2,4>, <2,4,2,4>
+ 2699208469U, // <4,2,4,3>: Cost 3 vext3 <2,4,3,4>, <2,4,3,4>
+ 2564918582U, // <4,2,4,4>: Cost 3 vext1 <2,4,2,4>, RHS
+ 2622868790U, // <4,2,4,5>: Cost 3 vext2 <0,u,4,2>, RHS
+ 2229667632U, // <4,2,4,6>: Cost 3 vrev <2,4,6,4>
+ 3800082229U, // <4,2,4,7>: Cost 4 vext3 <7,0,2,4>, <2,4,7,0>
+ 2622869033U, // <4,2,4,u>: Cost 3 vext2 <0,u,4,2>, RHS
+ 2552979558U, // <4,2,5,0>: Cost 3 vext1 <0,4,2,5>, LHS
+ 2558952342U, // <4,2,5,1>: Cost 3 vext1 <1,4,2,5>, <1,2,3,0>
+ 2564925032U, // <4,2,5,2>: Cost 3 vext1 <2,4,2,5>, <2,2,2,2>
+ 2967060582U, // <4,2,5,3>: Cost 3 vzipr <2,3,4,5>, LHS
+ 2552982838U, // <4,2,5,4>: Cost 3 vext1 <0,4,2,5>, RHS
+ 3987130190U, // <4,2,5,5>: Cost 4 vzipl RHS, <2,5,0,7>
+ 2913388474U, // <4,2,5,6>: Cost 3 vzipl RHS, <2,6,3,7>
+ 3895577910U, // <4,2,5,7>: Cost 4 vuzpr <0,4,u,2>, RHS
+ 2552985390U, // <4,2,5,u>: Cost 3 vext1 <0,4,2,5>, LHS
+ 1479245926U, // <4,2,6,0>: Cost 2 vext1 <0,4,2,6>, LHS
+ 2552988406U, // <4,2,6,1>: Cost 3 vext1 <0,4,2,6>, <1,0,3,2>
+ 2552989288U, // <4,2,6,2>: Cost 3 vext1 <0,4,2,6>, <2,2,2,2>
+ 2954461286U, // <4,2,6,3>: Cost 3 vzipr <0,2,4,6>, LHS
+ 1479249206U, // <4,2,6,4>: Cost 2 vext1 <0,4,2,6>, RHS
+ 2229610281U, // <4,2,6,5>: Cost 3 vrev <2,4,5,6>
+ 2600767994U, // <4,2,6,6>: Cost 3 vext1 <u,4,2,6>, <6,2,7,3>
+ 2600768506U, // <4,2,6,7>: Cost 3 vext1 <u,4,2,6>, <7,0,1,2>
+ 1479251758U, // <4,2,6,u>: Cost 2 vext1 <0,4,2,6>, LHS
+ 2659365909U, // <4,2,7,0>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
+ 3733771366U, // <4,2,7,1>: Cost 4 vext2 <7,1,4,2>, <7,1,4,2>
+ 3734434999U, // <4,2,7,2>: Cost 4 vext2 <7,2,4,2>, <7,2,4,2>
+ 2701199368U, // <4,2,7,3>: Cost 3 vext3 <2,7,3,4>, <2,7,3,4>
+ 4175774618U, // <4,2,7,4>: Cost 4 vtrnr <2,4,5,7>, <1,2,3,4>
+ 3303360298U, // <4,2,7,5>: Cost 4 vrev <2,4,5,7>
+ 3727136217U, // <4,2,7,6>: Cost 4 vext2 <6,0,4,2>, <7,6,0,4>
+ 3727136364U, // <4,2,7,7>: Cost 4 vext2 <6,0,4,2>, <7,7,7,7>
+ 2659365909U, // <4,2,7,u>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
+ 1479262310U, // <4,2,u,0>: Cost 2 vext1 <0,4,2,u>, LHS
+ 2553004790U, // <4,2,u,1>: Cost 3 vext1 <0,4,2,u>, <1,0,3,2>
+ 2553005672U, // <4,2,u,2>: Cost 3 vext1 <0,4,2,u>, <2,2,2,2>
+ 2954477670U, // <4,2,u,3>: Cost 3 vzipr <0,2,4,u>, LHS
+ 1479265590U, // <4,2,u,4>: Cost 2 vext1 <0,4,2,u>, RHS
+ 2622871706U, // <4,2,u,5>: Cost 3 vext2 <0,u,4,2>, RHS
+ 2229700404U, // <4,2,u,6>: Cost 3 vrev <2,4,6,u>
+ 2600784890U, // <4,2,u,7>: Cost 3 vext1 <u,4,2,u>, <7,0,1,2>
+ 1479268142U, // <4,2,u,u>: Cost 2 vext1 <0,4,2,u>, LHS
+ 3765651595U, // <4,3,0,0>: Cost 4 vext3 <1,2,3,4>, <3,0,0,0>
+ 2691909782U, // <4,3,0,1>: Cost 3 vext3 <1,2,3,4>, <3,0,1,2>
+ 2702452897U, // <4,3,0,2>: Cost 3 vext3 <3,0,2,4>, <3,0,2,4>
+ 3693297946U, // <4,3,0,3>: Cost 4 vext2 <0,3,4,3>, <0,3,4,3>
+ 3760711856U, // <4,3,0,4>: Cost 4 vext3 <0,3,u,4>, <3,0,4,1>
+ 2235533820U, // <4,3,0,5>: Cost 3 vrev <3,4,5,0>
+ 3309349381U, // <4,3,0,6>: Cost 4 vrev <3,4,6,0>
+ 3668563278U, // <4,3,0,7>: Cost 4 vext1 <7,4,3,0>, <7,4,3,0>
+ 2691909845U, // <4,3,0,u>: Cost 3 vext3 <1,2,3,4>, <3,0,u,2>
+ 2235173328U, // <4,3,1,0>: Cost 3 vrev <3,4,0,1>
+ 3764840678U, // <4,3,1,1>: Cost 4 vext3 <1,1,1,4>, <3,1,1,1>
+ 2630173594U, // <4,3,1,2>: Cost 3 vext2 <2,1,4,3>, <1,2,3,4>
+ 2703190267U, // <4,3,1,3>: Cost 3 vext3 <3,1,3,4>, <3,1,3,4>
+ 3760195840U, // <4,3,1,4>: Cost 4 vext3 <0,3,1,4>, <3,1,4,0>
+ 3765651724U, // <4,3,1,5>: Cost 4 vext3 <1,2,3,4>, <3,1,5,3>
+ 3309357574U, // <4,3,1,6>: Cost 4 vrev <3,4,6,1>
+ 3769633054U, // <4,3,1,7>: Cost 4 vext3 <1,u,3,4>, <3,1,7,3>
+ 2703558952U, // <4,3,1,u>: Cost 3 vext3 <3,1,u,4>, <3,1,u,4>
+ 3626770534U, // <4,3,2,0>: Cost 4 vext1 <0,4,3,2>, LHS
+ 2630174250U, // <4,3,2,1>: Cost 3 vext2 <2,1,4,3>, <2,1,4,3>
+ 3765651777U, // <4,3,2,2>: Cost 4 vext3 <1,2,3,4>, <3,2,2,2>
+ 2703853900U, // <4,3,2,3>: Cost 3 vext3 <3,2,3,4>, <3,2,3,4>
+ 3626773814U, // <4,3,2,4>: Cost 4 vext1 <0,4,3,2>, RHS
+ 2704001374U, // <4,3,2,5>: Cost 3 vext3 <3,2,5,4>, <3,2,5,4>
+ 3765651814U, // <4,3,2,6>: Cost 4 vext3 <1,2,3,4>, <3,2,6,3>
+ 3769633135U, // <4,3,2,7>: Cost 4 vext3 <1,u,3,4>, <3,2,7,3>
+ 2634819681U, // <4,3,2,u>: Cost 3 vext2 <2,u,4,3>, <2,u,4,3>
+ 3765651839U, // <4,3,3,0>: Cost 4 vext3 <1,2,3,4>, <3,3,0,1>
+ 3765651848U, // <4,3,3,1>: Cost 4 vext3 <1,2,3,4>, <3,3,1,1>
+ 3710552404U, // <4,3,3,2>: Cost 4 vext2 <3,2,4,3>, <3,2,4,3>
+ 2691910044U, // <4,3,3,3>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
+ 2704591270U, // <4,3,3,4>: Cost 3 vext3 <3,3,4,4>, <3,3,4,4>
+ 3769633202U, // <4,3,3,5>: Cost 4 vext3 <1,u,3,4>, <3,3,5,7>
+ 3703917212U, // <4,3,3,6>: Cost 4 vext2 <2,1,4,3>, <3,6,4,7>
+ 3769633220U, // <4,3,3,7>: Cost 4 vext3 <1,u,3,4>, <3,3,7,7>
+ 2691910044U, // <4,3,3,u>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
+ 2691910096U, // <4,3,4,0>: Cost 3 vext3 <1,2,3,4>, <3,4,0,1>
+ 2691910106U, // <4,3,4,1>: Cost 3 vext3 <1,2,3,4>, <3,4,1,2>
+ 2564990741U, // <4,3,4,2>: Cost 3 vext1 <2,4,3,4>, <2,4,3,4>
+ 3765651946U, // <4,3,4,3>: Cost 4 vext3 <1,2,3,4>, <3,4,3,0>
+ 2691910136U, // <4,3,4,4>: Cost 3 vext3 <1,2,3,4>, <3,4,4,5>
+ 2686454274U, // <4,3,4,5>: Cost 3 vext3 <0,3,1,4>, <3,4,5,6>
+ 2235640329U, // <4,3,4,6>: Cost 3 vrev <3,4,6,4>
+ 3801483792U, // <4,3,4,7>: Cost 4 vext3 <7,2,3,4>, <3,4,7,2>
+ 2691910168U, // <4,3,4,u>: Cost 3 vext3 <1,2,3,4>, <3,4,u,1>
+ 2559025254U, // <4,3,5,0>: Cost 3 vext1 <1,4,3,5>, LHS
+ 2559026237U, // <4,3,5,1>: Cost 3 vext1 <1,4,3,5>, <1,4,3,5>
+ 2564998862U, // <4,3,5,2>: Cost 3 vext1 <2,4,3,5>, <2,3,4,5>
+ 2570971548U, // <4,3,5,3>: Cost 3 vext1 <3,4,3,5>, <3,3,3,3>
+ 2559028534U, // <4,3,5,4>: Cost 3 vext1 <1,4,3,5>, RHS
+ 4163519477U, // <4,3,5,5>: Cost 4 vtrnr <0,4,1,5>, <1,3,4,5>
+ 3309390346U, // <4,3,5,6>: Cost 4 vrev <3,4,6,5>
+ 2706139747U, // <4,3,5,7>: Cost 3 vext3 <3,5,7,4>, <3,5,7,4>
+ 2559031086U, // <4,3,5,u>: Cost 3 vext1 <1,4,3,5>, LHS
+ 2559033446U, // <4,3,6,0>: Cost 3 vext1 <1,4,3,6>, LHS
+ 2559034430U, // <4,3,6,1>: Cost 3 vext1 <1,4,3,6>, <1,4,3,6>
+ 2565007127U, // <4,3,6,2>: Cost 3 vext1 <2,4,3,6>, <2,4,3,6>
+ 2570979740U, // <4,3,6,3>: Cost 3 vext1 <3,4,3,6>, <3,3,3,3>
+ 2559036726U, // <4,3,6,4>: Cost 3 vext1 <1,4,3,6>, RHS
+ 1161841154U, // <4,3,6,5>: Cost 2 vrev <3,4,5,6>
+ 4028203932U, // <4,3,6,6>: Cost 4 vzipr <0,2,4,6>, <1,2,3,6>
+ 2706803380U, // <4,3,6,7>: Cost 3 vext3 <3,6,7,4>, <3,6,7,4>
+ 1162062365U, // <4,3,6,u>: Cost 2 vrev <3,4,u,6>
+ 3769633475U, // <4,3,7,0>: Cost 4 vext3 <1,u,3,4>, <3,7,0,1>
+ 3769633488U, // <4,3,7,1>: Cost 4 vext3 <1,u,3,4>, <3,7,1,5>
+ 3638757144U, // <4,3,7,2>: Cost 4 vext1 <2,4,3,7>, <2,4,3,7>
+ 3769633508U, // <4,3,7,3>: Cost 4 vext3 <1,u,3,4>, <3,7,3,7>
+ 3769633515U, // <4,3,7,4>: Cost 4 vext3 <1,u,3,4>, <3,7,4,5>
+ 3769633526U, // <4,3,7,5>: Cost 4 vext3 <1,u,3,4>, <3,7,5,7>
+ 3662647932U, // <4,3,7,6>: Cost 4 vext1 <6,4,3,7>, <6,4,3,7>
+ 3781208837U, // <4,3,7,7>: Cost 4 vext3 <3,7,7,4>, <3,7,7,4>
+ 3769633547U, // <4,3,7,u>: Cost 4 vext3 <1,u,3,4>, <3,7,u,1>
+ 2559049830U, // <4,3,u,0>: Cost 3 vext1 <1,4,3,u>, LHS
+ 2691910430U, // <4,3,u,1>: Cost 3 vext3 <1,2,3,4>, <3,u,1,2>
+ 2565023513U, // <4,3,u,2>: Cost 3 vext1 <2,4,3,u>, <2,4,3,u>
+ 2707835698U, // <4,3,u,3>: Cost 3 vext3 <3,u,3,4>, <3,u,3,4>
+ 2559053110U, // <4,3,u,4>: Cost 3 vext1 <1,4,3,u>, RHS
+ 1161857540U, // <4,3,u,5>: Cost 2 vrev <3,4,5,u>
+ 2235673101U, // <4,3,u,6>: Cost 3 vrev <3,4,6,u>
+ 2708130646U, // <4,3,u,7>: Cost 3 vext3 <3,u,7,4>, <3,u,7,4>
+ 1162078751U, // <4,3,u,u>: Cost 2 vrev <3,4,u,u>
+ 2617573416U, // <4,4,0,0>: Cost 3 vext2 <0,0,4,4>, <0,0,4,4>
+ 1570373734U, // <4,4,0,1>: Cost 2 vext2 <4,4,4,4>, LHS
+ 2779676774U, // <4,4,0,2>: Cost 3 vuzpl <4,6,4,6>, LHS
+ 3760196480U, // <4,4,0,3>: Cost 4 vext3 <0,3,1,4>, <4,0,3,1>
+ 2576977100U, // <4,4,0,4>: Cost 3 vext1 <4,4,4,0>, <4,4,4,0>
+ 2718747538U, // <4,4,0,5>: Cost 3 vext3 <5,6,7,4>, <4,0,5,1>
+ 2718747548U, // <4,4,0,6>: Cost 3 vext3 <5,6,7,4>, <4,0,6,2>
+ 3668637015U, // <4,4,0,7>: Cost 4 vext1 <7,4,4,0>, <7,4,4,0>
+ 1570374301U, // <4,4,0,u>: Cost 2 vext2 <4,4,4,4>, LHS
+ 2644116214U, // <4,4,1,0>: Cost 3 vext2 <4,4,4,4>, <1,0,3,2>
+ 2644116276U, // <4,4,1,1>: Cost 3 vext2 <4,4,4,4>, <1,1,1,1>
+ 2691910602U, // <4,4,1,2>: Cost 3 vext3 <1,2,3,4>, <4,1,2,3>
+ 2644116440U, // <4,4,1,3>: Cost 3 vext2 <4,4,4,4>, <1,3,1,3>
+ 2711227356U, // <4,4,1,4>: Cost 3 vext3 <4,4,4,4>, <4,1,4,3>
+ 2709310438U, // <4,4,1,5>: Cost 3 vext3 <4,1,5,4>, <4,1,5,4>
+ 3765652462U, // <4,4,1,6>: Cost 4 vext3 <1,2,3,4>, <4,1,6,3>
+ 3768970231U, // <4,4,1,7>: Cost 4 vext3 <1,7,3,4>, <4,1,7,3>
+ 2695891968U, // <4,4,1,u>: Cost 3 vext3 <1,u,3,4>, <4,1,u,3>
+ 3703260634U, // <4,4,2,0>: Cost 4 vext2 <2,0,4,4>, <2,0,4,4>
+ 3765652499U, // <4,4,2,1>: Cost 4 vext3 <1,2,3,4>, <4,2,1,4>
+ 2644117096U, // <4,4,2,2>: Cost 3 vext2 <4,4,4,4>, <2,2,2,2>
+ 2631509709U, // <4,4,2,3>: Cost 3 vext2 <2,3,4,4>, <2,3,4,4>
+ 2644117269U, // <4,4,2,4>: Cost 3 vext2 <4,4,4,4>, <2,4,3,4>
+ 3705251698U, // <4,4,2,5>: Cost 4 vext2 <2,3,4,4>, <2,5,4,7>
+ 2710047808U, // <4,4,2,6>: Cost 3 vext3 <4,2,6,4>, <4,2,6,4>
+ 3783863369U, // <4,4,2,7>: Cost 4 vext3 <4,2,7,4>, <4,2,7,4>
+ 2634827874U, // <4,4,2,u>: Cost 3 vext2 <2,u,4,4>, <2,u,4,4>
+ 2644117654U, // <4,4,3,0>: Cost 3 vext2 <4,4,4,4>, <3,0,1,2>
+ 3638797210U, // <4,4,3,1>: Cost 4 vext1 <2,4,4,3>, <1,2,3,4>
+ 3638798082U, // <4,4,3,2>: Cost 4 vext1 <2,4,4,3>, <2,4,1,3>
+ 2637482406U, // <4,4,3,3>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
+ 2638146039U, // <4,4,3,4>: Cost 3 vext2 <3,4,4,4>, <3,4,4,4>
+ 3913287374U, // <4,4,3,5>: Cost 4 vuzpr <3,4,5,4>, <2,3,4,5>
+ 3765652625U, // <4,4,3,6>: Cost 4 vext3 <1,2,3,4>, <4,3,6,4>
+ 3713878762U, // <4,4,3,7>: Cost 4 vext2 <3,7,4,4>, <3,7,4,4>
+ 2637482406U, // <4,4,3,u>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
+ 1503264870U, // <4,4,4,0>: Cost 2 vext1 <4,4,4,4>, LHS
+ 2577007514U, // <4,4,4,1>: Cost 3 vext1 <4,4,4,4>, <1,2,3,4>
+ 2577008232U, // <4,4,4,2>: Cost 3 vext1 <4,4,4,4>, <2,2,2,2>
+ 2571037175U, // <4,4,4,3>: Cost 3 vext1 <3,4,4,4>, <3,4,4,4>
+ 161926454U, // <4,4,4,4>: Cost 1 vdup0 RHS
+ 1570377014U, // <4,4,4,5>: Cost 2 vext2 <4,4,4,4>, RHS
+ 2779680054U, // <4,4,4,6>: Cost 3 vuzpl <4,6,4,6>, RHS
+ 2594927963U, // <4,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
+ 161926454U, // <4,4,4,u>: Cost 1 vdup0 RHS
+ 2571042918U, // <4,4,5,0>: Cost 3 vext1 <3,4,4,5>, LHS
+ 2571043738U, // <4,4,5,1>: Cost 3 vext1 <3,4,4,5>, <1,2,3,4>
+ 3638814495U, // <4,4,5,2>: Cost 4 vext1 <2,4,4,5>, <2,4,4,5>
+ 2571045368U, // <4,4,5,3>: Cost 3 vext1 <3,4,4,5>, <3,4,4,5>
+ 2571046198U, // <4,4,5,4>: Cost 3 vext1 <3,4,4,5>, RHS
+ 1839648054U, // <4,4,5,5>: Cost 2 vzipl RHS, RHS
+ 1618169142U, // <4,4,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 2594936156U, // <4,4,5,7>: Cost 3 vext1 <7,4,4,5>, <7,4,4,5>
+ 1618169160U, // <4,4,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
+ 2553135206U, // <4,4,6,0>: Cost 3 vext1 <0,4,4,6>, LHS
+ 3626877686U, // <4,4,6,1>: Cost 4 vext1 <0,4,4,6>, <1,0,3,2>
+ 2565080782U, // <4,4,6,2>: Cost 3 vext1 <2,4,4,6>, <2,3,4,5>
+ 2571053561U, // <4,4,6,3>: Cost 3 vext1 <3,4,4,6>, <3,4,4,6>
+ 2553138486U, // <4,4,6,4>: Cost 3 vext1 <0,4,4,6>, RHS
+ 2241555675U, // <4,4,6,5>: Cost 3 vrev <4,4,5,6>
+ 1973865782U, // <4,4,6,6>: Cost 2 vtrnl RHS, RHS
+ 2658055029U, // <4,4,6,7>: Cost 3 vext2 <6,7,4,4>, <6,7,4,4>
+ 1973865800U, // <4,4,6,u>: Cost 2 vtrnl RHS, RHS
+ 2644120570U, // <4,4,7,0>: Cost 3 vext2 <4,4,4,4>, <7,0,1,2>
+ 3638829978U, // <4,4,7,1>: Cost 4 vext1 <2,4,4,7>, <1,2,3,4>
+ 3638830881U, // <4,4,7,2>: Cost 4 vext1 <2,4,4,7>, <2,4,4,7>
+ 3735115018U, // <4,4,7,3>: Cost 4 vext2 <7,3,4,4>, <7,3,4,4>
+ 2662036827U, // <4,4,7,4>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
+ 2713292236U, // <4,4,7,5>: Cost 3 vext3 <4,7,5,4>, <4,7,5,4>
+ 2713365973U, // <4,4,7,6>: Cost 3 vext3 <4,7,6,4>, <4,7,6,4>
+ 2644121196U, // <4,4,7,7>: Cost 3 vext2 <4,4,4,4>, <7,7,7,7>
+ 2662036827U, // <4,4,7,u>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
+ 1503297638U, // <4,4,u,0>: Cost 2 vext1 <4,4,4,u>, LHS
+ 1570379566U, // <4,4,u,1>: Cost 2 vext2 <4,4,4,4>, LHS
+ 2779682606U, // <4,4,u,2>: Cost 3 vuzpl <4,6,4,6>, LHS
+ 2571069947U, // <4,4,u,3>: Cost 3 vext1 <3,4,4,u>, <3,4,4,u>
+ 161926454U, // <4,4,u,4>: Cost 1 vdup0 RHS
+ 1841638710U, // <4,4,u,5>: Cost 2 vzipl RHS, RHS
+ 1618169385U, // <4,4,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 2594960735U, // <4,4,u,7>: Cost 3 vext1 <7,4,4,u>, <7,4,4,u>
+ 161926454U, // <4,4,u,u>: Cost 1 vdup0 RHS
+ 2631516160U, // <4,5,0,0>: Cost 3 vext2 <2,3,4,5>, <0,0,0,0>
+ 1557774438U, // <4,5,0,1>: Cost 2 vext2 <2,3,4,5>, LHS
+ 2618908875U, // <4,5,0,2>: Cost 3 vext2 <0,2,4,5>, <0,2,4,5>
+ 2571078140U, // <4,5,0,3>: Cost 3 vext1 <3,4,5,0>, <3,4,5,0>
+ 2626871634U, // <4,5,0,4>: Cost 3 vext2 <1,5,4,5>, <0,4,1,5>
+ 3705258414U, // <4,5,0,5>: Cost 4 vext2 <2,3,4,5>, <0,5,2,7>
+ 2594968438U, // <4,5,0,6>: Cost 3 vext1 <7,4,5,0>, <6,7,4,5>
+ 2594968928U, // <4,5,0,7>: Cost 3 vext1 <7,4,5,0>, <7,4,5,0>
+ 1557775005U, // <4,5,0,u>: Cost 2 vext2 <2,3,4,5>, LHS
+ 2631516918U, // <4,5,1,0>: Cost 3 vext2 <2,3,4,5>, <1,0,3,2>
+ 2624217939U, // <4,5,1,1>: Cost 3 vext2 <1,1,4,5>, <1,1,4,5>
+ 2631517078U, // <4,5,1,2>: Cost 3 vext2 <2,3,4,5>, <1,2,3,0>
+ 2821341286U, // <4,5,1,3>: Cost 3 vuzpr <0,4,1,5>, LHS
+ 3895086054U, // <4,5,1,4>: Cost 4 vuzpr <0,4,1,5>, <4,1,5,4>
+ 2626872471U, // <4,5,1,5>: Cost 3 vext2 <1,5,4,5>, <1,5,4,5>
+ 3895083131U, // <4,5,1,6>: Cost 4 vuzpr <0,4,1,5>, <0,1,4,6>
+ 2718748368U, // <4,5,1,7>: Cost 3 vext3 <5,6,7,4>, <5,1,7,3>
+ 2821341291U, // <4,5,1,u>: Cost 3 vuzpr <0,4,1,5>, LHS
+ 2571092070U, // <4,5,2,0>: Cost 3 vext1 <3,4,5,2>, LHS
+ 3699287585U, // <4,5,2,1>: Cost 4 vext2 <1,3,4,5>, <2,1,3,3>
+ 2630854269U, // <4,5,2,2>: Cost 3 vext2 <2,2,4,5>, <2,2,4,5>
+ 1557776078U, // <4,5,2,3>: Cost 2 vext2 <2,3,4,5>, <2,3,4,5>
+ 2631517974U, // <4,5,2,4>: Cost 3 vext2 <2,3,4,5>, <2,4,3,5>
+ 3692652384U, // <4,5,2,5>: Cost 4 vext2 <0,2,4,5>, <2,5,2,7>
+ 2631518138U, // <4,5,2,6>: Cost 3 vext2 <2,3,4,5>, <2,6,3,7>
+ 4164013366U, // <4,5,2,7>: Cost 4 vtrnr <0,4,u,2>, RHS
+ 1561094243U, // <4,5,2,u>: Cost 2 vext2 <2,u,4,5>, <2,u,4,5>
+ 2631518358U, // <4,5,3,0>: Cost 3 vext2 <2,3,4,5>, <3,0,1,2>
+ 3895084710U, // <4,5,3,1>: Cost 4 vuzpr <0,4,1,5>, <2,3,0,1>
+ 2631518540U, // <4,5,3,2>: Cost 3 vext2 <2,3,4,5>, <3,2,3,4>
+ 2631518620U, // <4,5,3,3>: Cost 3 vext2 <2,3,4,5>, <3,3,3,3>
+ 2631518716U, // <4,5,3,4>: Cost 3 vext2 <2,3,4,5>, <3,4,5,0>
+ 2631518784U, // <4,5,3,5>: Cost 3 vext2 <2,3,4,5>, <3,5,3,5>
+ 2658060980U, // <4,5,3,6>: Cost 3 vext2 <6,7,4,5>, <3,6,7,4>
+ 2640145131U, // <4,5,3,7>: Cost 3 vext2 <3,7,4,5>, <3,7,4,5>
+ 2631519006U, // <4,5,3,u>: Cost 3 vext2 <2,3,4,5>, <3,u,1,2>
+ 2571108454U, // <4,5,4,0>: Cost 3 vext1 <3,4,5,4>, LHS
+ 3632907342U, // <4,5,4,1>: Cost 4 vext1 <1,4,5,4>, <1,4,5,4>
+ 2571110094U, // <4,5,4,2>: Cost 3 vext1 <3,4,5,4>, <2,3,4,5>
+ 2571110912U, // <4,5,4,3>: Cost 3 vext1 <3,4,5,4>, <3,4,5,4>
+ 2571111734U, // <4,5,4,4>: Cost 3 vext1 <3,4,5,4>, RHS
+ 1557777718U, // <4,5,4,5>: Cost 2 vext2 <2,3,4,5>, RHS
+ 2645454195U, // <4,5,4,6>: Cost 3 vext2 <4,6,4,5>, <4,6,4,5>
+ 2718748614U, // <4,5,4,7>: Cost 3 vext3 <5,6,7,4>, <5,4,7,6>
+ 1557777961U, // <4,5,4,u>: Cost 2 vext2 <2,3,4,5>, RHS
+ 1503346790U, // <4,5,5,0>: Cost 2 vext1 <4,4,5,5>, LHS
+ 2913398480U, // <4,5,5,1>: Cost 3 vzipl RHS, <5,1,7,3>
+ 2631519998U, // <4,5,5,2>: Cost 3 vext2 <2,3,4,5>, <5,2,3,4>
+ 2577090710U, // <4,5,5,3>: Cost 3 vext1 <4,4,5,5>, <3,0,1,2>
+ 1503349978U, // <4,5,5,4>: Cost 2 vext1 <4,4,5,5>, <4,4,5,5>
+ 2631520260U, // <4,5,5,5>: Cost 3 vext2 <2,3,4,5>, <5,5,5,5>
+ 2913390690U, // <4,5,5,6>: Cost 3 vzipl RHS, <5,6,7,0>
+ 2821344566U, // <4,5,5,7>: Cost 3 vuzpr <0,4,1,5>, RHS
+ 1503352622U, // <4,5,5,u>: Cost 2 vext1 <4,4,5,5>, LHS
+ 1497383014U, // <4,5,6,0>: Cost 2 vext1 <3,4,5,6>, LHS
+ 2559181904U, // <4,5,6,1>: Cost 3 vext1 <1,4,5,6>, <1,4,5,6>
+ 2565154601U, // <4,5,6,2>: Cost 3 vext1 <2,4,5,6>, <2,4,5,6>
+ 1497385474U, // <4,5,6,3>: Cost 2 vext1 <3,4,5,6>, <3,4,5,6>
+ 1497386294U, // <4,5,6,4>: Cost 2 vext1 <3,4,5,6>, RHS
+ 3047608324U, // <4,5,6,5>: Cost 3 vtrnl RHS, <5,5,5,5>
+ 2571129656U, // <4,5,6,6>: Cost 3 vext1 <3,4,5,6>, <6,6,6,6>
+ 27705344U, // <4,5,6,7>: Cost 0 copy RHS
+ 27705344U, // <4,5,6,u>: Cost 0 copy RHS
+ 2565161062U, // <4,5,7,0>: Cost 3 vext1 <2,4,5,7>, LHS
+ 2565161882U, // <4,5,7,1>: Cost 3 vext1 <2,4,5,7>, <1,2,3,4>
+ 2565162794U, // <4,5,7,2>: Cost 3 vext1 <2,4,5,7>, <2,4,5,7>
+ 2661381387U, // <4,5,7,3>: Cost 3 vext2 <7,3,4,5>, <7,3,4,5>
+ 2565164342U, // <4,5,7,4>: Cost 3 vext1 <2,4,5,7>, RHS
+ 2718748840U, // <4,5,7,5>: Cost 3 vext3 <5,6,7,4>, <5,7,5,7>
+ 2718748846U, // <4,5,7,6>: Cost 3 vext3 <5,6,7,4>, <5,7,6,4>
+ 2719412407U, // <4,5,7,7>: Cost 3 vext3 <5,7,7,4>, <5,7,7,4>
+ 2565166894U, // <4,5,7,u>: Cost 3 vext1 <2,4,5,7>, LHS
+ 1497399398U, // <4,5,u,0>: Cost 2 vext1 <3,4,5,u>, LHS
+ 1557780270U, // <4,5,u,1>: Cost 2 vext2 <2,3,4,5>, LHS
+ 2631522181U, // <4,5,u,2>: Cost 3 vext2 <2,3,4,5>, <u,2,3,0>
+ 1497401860U, // <4,5,u,3>: Cost 2 vext1 <3,4,5,u>, <3,4,5,u>
+ 1497402678U, // <4,5,u,4>: Cost 2 vext1 <3,4,5,u>, RHS
+ 1557780634U, // <4,5,u,5>: Cost 2 vext2 <2,3,4,5>, RHS
+ 2631522512U, // <4,5,u,6>: Cost 3 vext2 <2,3,4,5>, <u,6,3,7>
+ 27705344U, // <4,5,u,7>: Cost 0 copy RHS
+ 27705344U, // <4,5,u,u>: Cost 0 copy RHS
+ 2618916864U, // <4,6,0,0>: Cost 3 vext2 <0,2,4,6>, <0,0,0,0>
+ 1545175142U, // <4,6,0,1>: Cost 2 vext2 <0,2,4,6>, LHS
+ 1545175244U, // <4,6,0,2>: Cost 2 vext2 <0,2,4,6>, <0,2,4,6>
+ 3692658940U, // <4,6,0,3>: Cost 4 vext2 <0,2,4,6>, <0,3,1,0>
+ 2618917202U, // <4,6,0,4>: Cost 3 vext2 <0,2,4,6>, <0,4,1,5>
+ 3852910806U, // <4,6,0,5>: Cost 4 vuzpl RHS, <0,2,5,7>
+ 2253525648U, // <4,6,0,6>: Cost 3 vrev <6,4,6,0>
+ 4040764726U, // <4,6,0,7>: Cost 4 vzipr <2,3,4,0>, RHS
+ 1545175709U, // <4,6,0,u>: Cost 2 vext2 <0,2,4,6>, LHS
+ 2618917622U, // <4,6,1,0>: Cost 3 vext2 <0,2,4,6>, <1,0,3,2>
+ 2618917684U, // <4,6,1,1>: Cost 3 vext2 <0,2,4,6>, <1,1,1,1>
+ 2618917782U, // <4,6,1,2>: Cost 3 vext2 <0,2,4,6>, <1,2,3,0>
+ 2618917848U, // <4,6,1,3>: Cost 3 vext2 <0,2,4,6>, <1,3,1,3>
+ 3692659773U, // <4,6,1,4>: Cost 4 vext2 <0,2,4,6>, <1,4,3,5>
+ 2618918032U, // <4,6,1,5>: Cost 3 vext2 <0,2,4,6>, <1,5,3,7>
+ 3692659937U, // <4,6,1,6>: Cost 4 vext2 <0,2,4,6>, <1,6,3,7>
+ 4032146742U, // <4,6,1,7>: Cost 4 vzipr <0,u,4,1>, RHS
+ 2618918253U, // <4,6,1,u>: Cost 3 vext2 <0,2,4,6>, <1,u,1,3>
+ 2618918380U, // <4,6,2,0>: Cost 3 vext2 <0,2,4,6>, <2,0,6,4>
+ 2618918460U, // <4,6,2,1>: Cost 3 vext2 <0,2,4,6>, <2,1,6,3>
+ 2618918504U, // <4,6,2,2>: Cost 3 vext2 <0,2,4,6>, <2,2,2,2>
+ 2618918566U, // <4,6,2,3>: Cost 3 vext2 <0,2,4,6>, <2,3,0,1>
+ 2618918679U, // <4,6,2,4>: Cost 3 vext2 <0,2,4,6>, <2,4,3,6>
+ 2618918788U, // <4,6,2,5>: Cost 3 vext2 <0,2,4,6>, <2,5,6,7>
+ 2618918842U, // <4,6,2,6>: Cost 3 vext2 <0,2,4,6>, <2,6,3,7>
+ 2718749178U, // <4,6,2,7>: Cost 3 vext3 <5,6,7,4>, <6,2,7,3>
+ 2618918971U, // <4,6,2,u>: Cost 3 vext2 <0,2,4,6>, <2,u,0,1>
+ 2618919062U, // <4,6,3,0>: Cost 3 vext2 <0,2,4,6>, <3,0,1,2>
+ 2636171526U, // <4,6,3,1>: Cost 3 vext2 <3,1,4,6>, <3,1,4,6>
+ 3692661057U, // <4,6,3,2>: Cost 4 vext2 <0,2,4,6>, <3,2,2,2>
+ 2618919324U, // <4,6,3,3>: Cost 3 vext2 <0,2,4,6>, <3,3,3,3>
+ 2618919426U, // <4,6,3,4>: Cost 3 vext2 <0,2,4,6>, <3,4,5,6>
+ 2638826058U, // <4,6,3,5>: Cost 3 vext2 <3,5,4,6>, <3,5,4,6>
+ 3913303030U, // <4,6,3,6>: Cost 4 vuzpr <3,4,5,6>, <1,3,4,6>
+ 2722730572U, // <4,6,3,7>: Cost 3 vext3 <6,3,7,4>, <6,3,7,4>
+ 2618919710U, // <4,6,3,u>: Cost 3 vext2 <0,2,4,6>, <3,u,1,2>
+ 2565210214U, // <4,6,4,0>: Cost 3 vext1 <2,4,6,4>, LHS
+ 2718749286U, // <4,6,4,1>: Cost 3 vext3 <5,6,7,4>, <6,4,1,3>
+ 2565211952U, // <4,6,4,2>: Cost 3 vext1 <2,4,6,4>, <2,4,6,4>
+ 2571184649U, // <4,6,4,3>: Cost 3 vext1 <3,4,6,4>, <3,4,6,4>
+ 2565213494U, // <4,6,4,4>: Cost 3 vext1 <2,4,6,4>, RHS
+ 1545178422U, // <4,6,4,5>: Cost 2 vext2 <0,2,4,6>, RHS
+ 1705430326U, // <4,6,4,6>: Cost 2 vuzpl RHS, RHS
+ 2595075437U, // <4,6,4,7>: Cost 3 vext1 <7,4,6,4>, <7,4,6,4>
+ 1545178665U, // <4,6,4,u>: Cost 2 vext2 <0,2,4,6>, RHS
+ 2565218406U, // <4,6,5,0>: Cost 3 vext1 <2,4,6,5>, LHS
+ 2645462736U, // <4,6,5,1>: Cost 3 vext2 <4,6,4,6>, <5,1,7,3>
+ 2913399290U, // <4,6,5,2>: Cost 3 vzipl RHS, <6,2,7,3>
+ 3913305394U, // <4,6,5,3>: Cost 4 vuzpr <3,4,5,6>, <4,5,6,3>
+ 2645462982U, // <4,6,5,4>: Cost 3 vext2 <4,6,4,6>, <5,4,7,6>
+ 2779172868U, // <4,6,5,5>: Cost 3 vuzpl RHS, <5,5,5,5>
+ 2913391416U, // <4,6,5,6>: Cost 3 vzipl RHS, <6,6,6,6>
+ 2821426486U, // <4,6,5,7>: Cost 3 vuzpr <0,4,2,6>, RHS
+ 2821426487U, // <4,6,5,u>: Cost 3 vuzpr <0,4,2,6>, RHS
+ 1503428710U, // <4,6,6,0>: Cost 2 vext1 <4,4,6,6>, LHS
+ 2577171190U, // <4,6,6,1>: Cost 3 vext1 <4,4,6,6>, <1,0,3,2>
+ 2645463546U, // <4,6,6,2>: Cost 3 vext2 <4,6,4,6>, <6,2,7,3>
+ 2577172630U, // <4,6,6,3>: Cost 3 vext1 <4,4,6,6>, <3,0,1,2>
+ 1503431908U, // <4,6,6,4>: Cost 2 vext1 <4,4,6,6>, <4,4,6,6>
+ 2253501069U, // <4,6,6,5>: Cost 3 vrev <6,4,5,6>
+ 2618921784U, // <4,6,6,6>: Cost 3 vext2 <0,2,4,6>, <6,6,6,6>
+ 2954464566U, // <4,6,6,7>: Cost 3 vzipr <0,2,4,6>, RHS
+ 1503434542U, // <4,6,6,u>: Cost 2 vext1 <4,4,6,6>, LHS
+ 2645464058U, // <4,6,7,0>: Cost 3 vext2 <4,6,4,6>, <7,0,1,2>
+ 2779173882U, // <4,6,7,1>: Cost 3 vuzpl RHS, <7,0,1,2>
+ 3638978355U, // <4,6,7,2>: Cost 4 vext1 <2,4,6,7>, <2,4,6,7>
+ 2725090156U, // <4,6,7,3>: Cost 3 vext3 <6,7,3,4>, <6,7,3,4>
+ 2645464422U, // <4,6,7,4>: Cost 3 vext2 <4,6,4,6>, <7,4,5,6>
+ 2779174246U, // <4,6,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
+ 3852915914U, // <4,6,7,6>: Cost 4 vuzpl RHS, <7,2,6,3>
+ 2779174508U, // <4,6,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
+ 2779173945U, // <4,6,7,u>: Cost 3 vuzpl RHS, <7,0,u,2>
+ 1503445094U, // <4,6,u,0>: Cost 2 vext1 <4,4,6,u>, LHS
+ 1545180974U, // <4,6,u,1>: Cost 2 vext2 <0,2,4,6>, LHS
+ 1705432878U, // <4,6,u,2>: Cost 2 vuzpl RHS, LHS
+ 2618922940U, // <4,6,u,3>: Cost 3 vext2 <0,2,4,6>, <u,3,0,1>
+ 1503448294U, // <4,6,u,4>: Cost 2 vext1 <4,4,6,u>, <4,4,6,u>
+ 1545181338U, // <4,6,u,5>: Cost 2 vext2 <0,2,4,6>, RHS
+ 1705433242U, // <4,6,u,6>: Cost 2 vuzpl RHS, RHS
+ 2954480950U, // <4,6,u,7>: Cost 3 vzipr <0,2,4,u>, RHS
+ 1545181541U, // <4,6,u,u>: Cost 2 vext2 <0,2,4,6>, LHS
+ 3706601472U, // <4,7,0,0>: Cost 4 vext2 <2,5,4,7>, <0,0,0,0>
+ 2632859750U, // <4,7,0,1>: Cost 3 vext2 <2,5,4,7>, LHS
+ 2726343685U, // <4,7,0,2>: Cost 3 vext3 <7,0,2,4>, <7,0,2,4>
+ 3701293312U, // <4,7,0,3>: Cost 4 vext2 <1,6,4,7>, <0,3,1,4>
+ 3706601810U, // <4,7,0,4>: Cost 4 vext2 <2,5,4,7>, <0,4,1,5>
+ 2259424608U, // <4,7,0,5>: Cost 3 vrev <7,4,5,0>
+ 3695321617U, // <4,7,0,6>: Cost 4 vext2 <0,6,4,7>, <0,6,4,7>
+ 3800454194U, // <4,7,0,7>: Cost 4 vext3 <7,0,7,4>, <7,0,7,4>
+ 2632860317U, // <4,7,0,u>: Cost 3 vext2 <2,5,4,7>, LHS
+ 2259064116U, // <4,7,1,0>: Cost 3 vrev <7,4,0,1>
+ 3700630324U, // <4,7,1,1>: Cost 4 vext2 <1,5,4,7>, <1,1,1,1>
+ 2632860570U, // <4,7,1,2>: Cost 3 vext2 <2,5,4,7>, <1,2,3,4>
+ 3769635936U, // <4,7,1,3>: Cost 4 vext3 <1,u,3,4>, <7,1,3,5>
+ 3656920374U, // <4,7,1,4>: Cost 4 vext1 <5,4,7,1>, RHS
+ 3700630681U, // <4,7,1,5>: Cost 4 vext2 <1,5,4,7>, <1,5,4,7>
+ 3701294314U, // <4,7,1,6>: Cost 4 vext2 <1,6,4,7>, <1,6,4,7>
+ 3793818754U, // <4,7,1,7>: Cost 4 vext3 <5,u,7,4>, <7,1,7,3>
+ 2259654012U, // <4,7,1,u>: Cost 3 vrev <7,4,u,1>
+ 3656925286U, // <4,7,2,0>: Cost 4 vext1 <5,4,7,2>, LHS
+ 3706603050U, // <4,7,2,1>: Cost 4 vext2 <2,5,4,7>, <2,1,4,3>
+ 3706603112U, // <4,7,2,2>: Cost 4 vext2 <2,5,4,7>, <2,2,2,2>
+ 2727744688U, // <4,7,2,3>: Cost 3 vext3 <7,2,3,4>, <7,2,3,4>
+ 3705939745U, // <4,7,2,4>: Cost 4 vext2 <2,4,4,7>, <2,4,4,7>
+ 2632861554U, // <4,7,2,5>: Cost 3 vext2 <2,5,4,7>, <2,5,4,7>
+ 3706603450U, // <4,7,2,6>: Cost 4 vext2 <2,5,4,7>, <2,6,3,7>
+ 3792491731U, // <4,7,2,7>: Cost 4 vext3 <5,6,7,4>, <7,2,7,3>
+ 2634852453U, // <4,7,2,u>: Cost 3 vext2 <2,u,4,7>, <2,u,4,7>
+ 3706603670U, // <4,7,3,0>: Cost 4 vext2 <2,5,4,7>, <3,0,1,2>
+ 3662906266U, // <4,7,3,1>: Cost 4 vext1 <6,4,7,3>, <1,2,3,4>
+ 3725183326U, // <4,7,3,2>: Cost 4 vext2 <5,6,4,7>, <3,2,5,4>
+ 3706603932U, // <4,7,3,3>: Cost 4 vext2 <2,5,4,7>, <3,3,3,3>
+ 3701295618U, // <4,7,3,4>: Cost 4 vext2 <1,6,4,7>, <3,4,5,6>
+ 2638834251U, // <4,7,3,5>: Cost 3 vext2 <3,5,4,7>, <3,5,4,7>
+ 2639497884U, // <4,7,3,6>: Cost 3 vext2 <3,6,4,7>, <3,6,4,7>
+ 3802445093U, // <4,7,3,7>: Cost 4 vext3 <7,3,7,4>, <7,3,7,4>
+ 2640825150U, // <4,7,3,u>: Cost 3 vext2 <3,u,4,7>, <3,u,4,7>
+ 2718750004U, // <4,7,4,0>: Cost 3 vext3 <5,6,7,4>, <7,4,0,1>
+ 3706604490U, // <4,7,4,1>: Cost 4 vext2 <2,5,4,7>, <4,1,2,3>
+ 3656943474U, // <4,7,4,2>: Cost 4 vext1 <5,4,7,4>, <2,5,4,7>
+ 3779884371U, // <4,7,4,3>: Cost 4 vext3 <3,5,7,4>, <7,4,3,5>
+ 2259383643U, // <4,7,4,4>: Cost 3 vrev <7,4,4,4>
+ 2632863030U, // <4,7,4,5>: Cost 3 vext2 <2,5,4,7>, RHS
+ 2259531117U, // <4,7,4,6>: Cost 3 vrev <7,4,6,4>
+ 3907340074U, // <4,7,4,7>: Cost 4 vuzpr <2,4,5,7>, <2,4,5,7>
+ 2632863273U, // <4,7,4,u>: Cost 3 vext2 <2,5,4,7>, RHS
+ 2913391610U, // <4,7,5,0>: Cost 3 vzipl RHS, <7,0,1,2>
+ 3645006848U, // <4,7,5,1>: Cost 4 vext1 <3,4,7,5>, <1,3,5,7>
+ 2589181646U, // <4,7,5,2>: Cost 3 vext1 <6,4,7,5>, <2,3,4,5>
+ 3645008403U, // <4,7,5,3>: Cost 4 vext1 <3,4,7,5>, <3,4,7,5>
+ 2913391974U, // <4,7,5,4>: Cost 3 vzipl RHS, <7,4,5,6>
+ 2583211973U, // <4,7,5,5>: Cost 3 vext1 <5,4,7,5>, <5,4,7,5>
+ 2589184670U, // <4,7,5,6>: Cost 3 vext1 <6,4,7,5>, <6,4,7,5>
+ 2913392236U, // <4,7,5,7>: Cost 3 vzipl RHS, <7,7,7,7>
+ 2913392258U, // <4,7,5,u>: Cost 3 vzipl RHS, <7,u,1,2>
+ 1509474406U, // <4,7,6,0>: Cost 2 vext1 <5,4,7,6>, LHS
+ 3047609338U, // <4,7,6,1>: Cost 3 vtrnl RHS, <7,0,1,2>
+ 2583217768U, // <4,7,6,2>: Cost 3 vext1 <5,4,7,6>, <2,2,2,2>
+ 2583218326U, // <4,7,6,3>: Cost 3 vext1 <5,4,7,6>, <3,0,1,2>
+ 1509477686U, // <4,7,6,4>: Cost 2 vext1 <5,4,7,6>, RHS
+ 1509478342U, // <4,7,6,5>: Cost 2 vext1 <5,4,7,6>, <5,4,7,6>
+ 2583220730U, // <4,7,6,6>: Cost 3 vext1 <5,4,7,6>, <6,2,7,3>
+ 3047609964U, // <4,7,6,7>: Cost 3 vtrnl RHS, <7,7,7,7>
+ 1509480238U, // <4,7,6,u>: Cost 2 vext1 <5,4,7,6>, LHS
+ 3650994278U, // <4,7,7,0>: Cost 4 vext1 <4,4,7,7>, LHS
+ 3650995098U, // <4,7,7,1>: Cost 4 vext1 <4,4,7,7>, <1,2,3,4>
+ 3650996010U, // <4,7,7,2>: Cost 4 vext1 <4,4,7,7>, <2,4,5,7>
+ 3804804677U, // <4,7,7,3>: Cost 4 vext3 <7,7,3,4>, <7,7,3,4>
+ 3650997486U, // <4,7,7,4>: Cost 4 vext1 <4,4,7,7>, <4,4,7,7>
+ 2662725039U, // <4,7,7,5>: Cost 3 vext2 <7,5,4,7>, <7,5,4,7>
+ 3662942880U, // <4,7,7,6>: Cost 4 vext1 <6,4,7,7>, <6,4,7,7>
+ 2718750316U, // <4,7,7,7>: Cost 3 vext3 <5,6,7,4>, <7,7,7,7>
+ 2664715938U, // <4,7,7,u>: Cost 3 vext2 <7,u,4,7>, <7,u,4,7>
+ 1509490790U, // <4,7,u,0>: Cost 2 vext1 <5,4,7,u>, LHS
+ 2632865582U, // <4,7,u,1>: Cost 3 vext2 <2,5,4,7>, LHS
+ 2583234152U, // <4,7,u,2>: Cost 3 vext1 <5,4,7,u>, <2,2,2,2>
+ 2583234710U, // <4,7,u,3>: Cost 3 vext1 <5,4,7,u>, <3,0,1,2>
+ 1509494070U, // <4,7,u,4>: Cost 2 vext1 <5,4,7,u>, RHS
+ 1509494728U, // <4,7,u,5>: Cost 2 vext1 <5,4,7,u>, <5,4,7,u>
+ 2583237114U, // <4,7,u,6>: Cost 3 vext1 <5,4,7,u>, <6,2,7,3>
+ 3047757420U, // <4,7,u,7>: Cost 3 vtrnl RHS, <7,7,7,7>
+ 1509496622U, // <4,7,u,u>: Cost 2 vext1 <5,4,7,u>, LHS
+ 2618933248U, // <4,u,0,0>: Cost 3 vext2 <0,2,4,u>, <0,0,0,0>
+ 1545191526U, // <4,u,0,1>: Cost 2 vext2 <0,2,4,u>, LHS
+ 1545191630U, // <4,u,0,2>: Cost 2 vext2 <0,2,4,u>, <0,2,4,u>
+ 2691913445U, // <4,u,0,3>: Cost 3 vext3 <1,2,3,4>, <u,0,3,2>
+ 2618933586U, // <4,u,0,4>: Cost 3 vext2 <0,2,4,u>, <0,4,1,5>
+ 2265397305U, // <4,u,0,5>: Cost 3 vrev <u,4,5,0>
+ 2595189625U, // <4,u,0,6>: Cost 3 vext1 <7,4,u,0>, <6,7,4,u>
+ 2595190139U, // <4,u,0,7>: Cost 3 vext1 <7,4,u,0>, <7,4,u,0>
+ 1545192093U, // <4,u,0,u>: Cost 2 vext2 <0,2,4,u>, LHS
+ 2618934006U, // <4,u,1,0>: Cost 3 vext2 <0,2,4,u>, <1,0,3,2>
+ 2618934068U, // <4,u,1,1>: Cost 3 vext2 <0,2,4,u>, <1,1,1,1>
+ 1618171694U, // <4,u,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2618934232U, // <4,u,1,3>: Cost 3 vext2 <0,2,4,u>, <1,3,1,3>
+ 2695894848U, // <4,u,1,4>: Cost 3 vext3 <1,u,3,4>, <u,1,4,3>
+ 2618934416U, // <4,u,1,5>: Cost 3 vext2 <0,2,4,u>, <1,5,3,7>
+ 3692676321U, // <4,u,1,6>: Cost 4 vext2 <0,2,4,u>, <1,6,3,7>
+ 2718750555U, // <4,u,1,7>: Cost 3 vext3 <5,6,7,4>, <u,1,7,3>
+ 1618171748U, // <4,u,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2553397350U, // <4,u,2,0>: Cost 3 vext1 <0,4,u,2>, LHS
+ 2630215215U, // <4,u,2,1>: Cost 3 vext2 <2,1,4,u>, <2,1,4,u>
+ 2618934888U, // <4,u,2,2>: Cost 3 vext2 <0,2,4,u>, <2,2,2,2>
+ 1557800657U, // <4,u,2,3>: Cost 2 vext2 <2,3,4,u>, <2,3,4,u>
+ 2618935065U, // <4,u,2,4>: Cost 3 vext2 <0,2,4,u>, <2,4,3,u>
+ 2733864859U, // <4,u,2,5>: Cost 3 vext3 <u,2,5,4>, <u,2,5,4>
+ 2618935226U, // <4,u,2,6>: Cost 3 vext2 <0,2,4,u>, <2,6,3,7>
+ 2718750636U, // <4,u,2,7>: Cost 3 vext3 <5,6,7,4>, <u,2,7,3>
+ 1561118822U, // <4,u,2,u>: Cost 2 vext2 <2,u,4,u>, <2,u,4,u>
+ 2618935446U, // <4,u,3,0>: Cost 3 vext2 <0,2,4,u>, <3,0,1,2>
+ 2779318422U, // <4,u,3,1>: Cost 3 vuzpl RHS, <3,0,1,2>
+ 2636851545U, // <4,u,3,2>: Cost 3 vext2 <3,2,4,u>, <3,2,4,u>
+ 2618935708U, // <4,u,3,3>: Cost 3 vext2 <0,2,4,u>, <3,3,3,3>
+ 2618935810U, // <4,u,3,4>: Cost 3 vext2 <0,2,4,u>, <3,4,5,6>
+ 2691913711U, // <4,u,3,5>: Cost 3 vext3 <1,2,3,4>, <u,3,5,7>
+ 2588725862U, // <4,u,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
+ 2640169710U, // <4,u,3,7>: Cost 3 vext2 <3,7,4,u>, <3,7,4,u>
+ 2618936094U, // <4,u,3,u>: Cost 3 vext2 <0,2,4,u>, <3,u,1,2>
+ 1503559782U, // <4,u,4,0>: Cost 2 vext1 <4,4,u,4>, LHS
+ 2692282391U, // <4,u,4,1>: Cost 3 vext3 <1,2,u,4>, <u,4,1,2>
+ 2565359426U, // <4,u,4,2>: Cost 3 vext1 <2,4,u,4>, <2,4,u,4>
+ 2571332123U, // <4,u,4,3>: Cost 3 vext1 <3,4,u,4>, <3,4,u,4>
+ 161926454U, // <4,u,4,4>: Cost 1 vdup0 RHS
+ 1545194806U, // <4,u,4,5>: Cost 2 vext2 <0,2,4,u>, RHS
+ 1705577782U, // <4,u,4,6>: Cost 2 vuzpl RHS, RHS
+ 2718750801U, // <4,u,4,7>: Cost 3 vext3 <5,6,7,4>, <u,4,7,6>
+ 161926454U, // <4,u,4,u>: Cost 1 vdup0 RHS
+ 1479164006U, // <4,u,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
+ 1839650606U, // <4,u,5,1>: Cost 2 vzipl RHS, LHS
+ 2565367502U, // <4,u,5,2>: Cost 3 vext1 <2,4,u,5>, <2,3,4,5>
+ 3089777309U, // <4,u,5,3>: Cost 3 vtrnr <0,4,1,5>, LHS
+ 1479167286U, // <4,u,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
+ 1839650970U, // <4,u,5,5>: Cost 2 vzipl RHS, RHS
+ 1618172058U, // <4,u,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 3089780265U, // <4,u,5,7>: Cost 3 vtrnr <0,4,1,5>, RHS
+ 1618172076U, // <4,u,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
+ 1479688294U, // <4,u,6,0>: Cost 2 vext1 <0,4,u,6>, LHS
+ 2553430774U, // <4,u,6,1>: Cost 3 vext1 <0,4,u,6>, <1,0,3,2>
+ 1973868334U, // <4,u,6,2>: Cost 2 vtrnl RHS, LHS
+ 1497606685U, // <4,u,6,3>: Cost 2 vext1 <3,4,u,6>, <3,4,u,6>
+ 1479691574U, // <4,u,6,4>: Cost 2 vext1 <0,4,u,6>, RHS
+ 1509552079U, // <4,u,6,5>: Cost 2 vext1 <5,4,u,6>, <5,4,u,6>
+ 1973868698U, // <4,u,6,6>: Cost 2 vtrnl RHS, RHS
+ 27705344U, // <4,u,6,7>: Cost 0 copy RHS
+ 27705344U, // <4,u,6,u>: Cost 0 copy RHS
+ 2565382246U, // <4,u,7,0>: Cost 3 vext1 <2,4,u,7>, LHS
+ 2565383066U, // <4,u,7,1>: Cost 3 vext1 <2,4,u,7>, <1,2,3,4>
+ 2565384005U, // <4,u,7,2>: Cost 3 vext1 <2,4,u,7>, <2,4,u,7>
+ 2661405966U, // <4,u,7,3>: Cost 3 vext2 <7,3,4,u>, <7,3,4,u>
+ 2565385526U, // <4,u,7,4>: Cost 3 vext1 <2,4,u,7>, RHS
+ 2779321702U, // <4,u,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
+ 2589274793U, // <4,u,7,6>: Cost 3 vext1 <6,4,u,7>, <6,4,u,7>
+ 2779321964U, // <4,u,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
+ 2565388078U, // <4,u,7,u>: Cost 3 vext1 <2,4,u,7>, LHS
+ 1479704678U, // <4,u,u,0>: Cost 2 vext1 <0,4,u,u>, LHS
+ 1545197358U, // <4,u,u,1>: Cost 2 vext2 <0,2,4,u>, LHS
+ 1618172261U, // <4,u,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 1497623071U, // <4,u,u,3>: Cost 2 vext1 <3,4,u,u>, <3,4,u,u>
+ 161926454U, // <4,u,u,4>: Cost 1 vdup0 RHS
+ 1545197722U, // <4,u,u,5>: Cost 2 vext2 <0,2,4,u>, RHS
+ 1618172301U, // <4,u,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 27705344U, // <4,u,u,7>: Cost 0 copy RHS
+ 27705344U, // <4,u,u,u>: Cost 0 copy RHS
+ 2687123456U, // <5,0,0,0>: Cost 3 vext3 <0,4,1,5>, <0,0,0,0>
+ 2687123466U, // <5,0,0,1>: Cost 3 vext3 <0,4,1,5>, <0,0,1,1>
+ 2687123476U, // <5,0,0,2>: Cost 3 vext3 <0,4,1,5>, <0,0,2,2>
+ 3710599434U, // <5,0,0,3>: Cost 4 vext2 <3,2,5,0>, <0,3,2,5>
+ 2642166098U, // <5,0,0,4>: Cost 3 vext2 <4,1,5,0>, <0,4,1,5>
+ 3657060306U, // <5,0,0,5>: Cost 4 vext1 <5,5,0,0>, <5,5,0,0>
+ 3292094923U, // <5,0,0,6>: Cost 4 vrev <0,5,6,0>
+ 3669005700U, // <5,0,0,7>: Cost 4 vext1 <7,5,0,0>, <7,5,0,0>
+ 2687123530U, // <5,0,0,u>: Cost 3 vext3 <0,4,1,5>, <0,0,u,2>
+ 2559434854U, // <5,0,1,0>: Cost 3 vext1 <1,5,0,1>, LHS
+ 2559435887U, // <5,0,1,1>: Cost 3 vext1 <1,5,0,1>, <1,5,0,1>
+ 1613381734U, // <5,0,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 3698656256U, // <5,0,1,3>: Cost 4 vext2 <1,2,5,0>, <1,3,5,7>
+ 2559438134U, // <5,0,1,4>: Cost 3 vext1 <1,5,0,1>, RHS
+ 2583326675U, // <5,0,1,5>: Cost 3 vext1 <5,5,0,1>, <5,5,0,1>
+ 3715908851U, // <5,0,1,6>: Cost 4 vext2 <4,1,5,0>, <1,6,5,7>
+ 3657069562U, // <5,0,1,7>: Cost 4 vext1 <5,5,0,1>, <7,0,1,2>
+ 1613381788U, // <5,0,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2686017700U, // <5,0,2,0>: Cost 3 vext3 <0,2,4,5>, <0,2,0,2>
+ 2685796528U, // <5,0,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
+ 2698625208U, // <5,0,2,2>: Cost 3 vext3 <2,3,4,5>, <0,2,2,4>
+ 2685944002U, // <5,0,2,3>: Cost 3 vext3 <0,2,3,5>, <0,2,3,5>
+ 2686017739U, // <5,0,2,4>: Cost 3 vext3 <0,2,4,5>, <0,2,4,5>
+ 2686091476U, // <5,0,2,5>: Cost 3 vext3 <0,2,5,5>, <0,2,5,5>
+ 2725167324U, // <5,0,2,6>: Cost 3 vext3 <6,7,4,5>, <0,2,6,4>
+ 2595280230U, // <5,0,2,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
+ 2686312687U, // <5,0,2,u>: Cost 3 vext3 <0,2,u,5>, <0,2,u,5>
+ 3760128248U, // <5,0,3,0>: Cost 4 vext3 <0,3,0,5>, <0,3,0,5>
+ 3759685888U, // <5,0,3,1>: Cost 4 vext3 <0,2,3,5>, <0,3,1,4>
+ 2686533898U, // <5,0,3,2>: Cost 3 vext3 <0,3,2,5>, <0,3,2,5>
+ 3760349459U, // <5,0,3,3>: Cost 4 vext3 <0,3,3,5>, <0,3,3,5>
+ 2638187004U, // <5,0,3,4>: Cost 3 vext2 <3,4,5,0>, <3,4,5,0>
+ 3776348452U, // <5,0,3,5>: Cost 4 vext3 <3,0,4,5>, <0,3,5,4>
+ 3713256094U, // <5,0,3,6>: Cost 4 vext2 <3,6,5,0>, <3,6,5,0>
+ 3914064896U, // <5,0,3,7>: Cost 4 vuzpr <3,5,7,0>, <1,3,5,7>
+ 2686976320U, // <5,0,3,u>: Cost 3 vext3 <0,3,u,5>, <0,3,u,5>
+ 2559459430U, // <5,0,4,0>: Cost 3 vext1 <1,5,0,4>, LHS
+ 1613381970U, // <5,0,4,1>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
+ 2687123804U, // <5,0,4,2>: Cost 3 vext3 <0,4,1,5>, <0,4,2,6>
+ 3761013092U, // <5,0,4,3>: Cost 4 vext3 <0,4,3,5>, <0,4,3,5>
+ 2559462710U, // <5,0,4,4>: Cost 3 vext1 <1,5,0,4>, RHS
+ 2638187830U, // <5,0,4,5>: Cost 3 vext2 <3,4,5,0>, RHS
+ 3761234303U, // <5,0,4,6>: Cost 4 vext3 <0,4,6,5>, <0,4,6,5>
+ 2646150600U, // <5,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
+ 1613381970U, // <5,0,4,u>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
+ 3766763926U, // <5,0,5,0>: Cost 4 vext3 <1,4,0,5>, <0,5,0,1>
+ 2919268454U, // <5,0,5,1>: Cost 3 vzipl <5,5,5,5>, LHS
+ 3053486182U, // <5,0,5,2>: Cost 3 vtrnl <5,5,5,5>, LHS
+ 3723210589U, // <5,0,5,3>: Cost 4 vext2 <5,3,5,0>, <5,3,5,0>
+ 3766763966U, // <5,0,5,4>: Cost 4 vext3 <1,4,0,5>, <0,5,4,5>
+ 2650796031U, // <5,0,5,5>: Cost 3 vext2 <5,5,5,0>, <5,5,5,0>
+ 3719893090U, // <5,0,5,6>: Cost 4 vext2 <4,7,5,0>, <5,6,7,0>
+ 3914067254U, // <5,0,5,7>: Cost 4 vuzpr <3,5,7,0>, RHS
+ 2919269021U, // <5,0,5,u>: Cost 3 vzipl <5,5,5,5>, LHS
+ 4047519744U, // <5,0,6,0>: Cost 4 vzipr <3,4,5,6>, <0,0,0,0>
+ 2920038502U, // <5,0,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
+ 3759759871U, // <5,0,6,2>: Cost 4 vext3 <0,2,4,5>, <0,6,2,7>
+ 3645164070U, // <5,0,6,3>: Cost 4 vext1 <3,5,0,6>, <3,5,0,6>
+ 3762414095U, // <5,0,6,4>: Cost 4 vext3 <0,6,4,5>, <0,6,4,5>
+ 3993780690U, // <5,0,6,5>: Cost 4 vzipl <5,6,7,0>, <0,5,6,7>
+ 3719893816U, // <5,0,6,6>: Cost 4 vext2 <4,7,5,0>, <6,6,6,6>
+ 2662077302U, // <5,0,6,7>: Cost 3 vext2 <7,4,5,0>, <6,7,4,5>
+ 2920039069U, // <5,0,6,u>: Cost 3 vzipl <5,6,7,0>, LHS
+ 2565455974U, // <5,0,7,0>: Cost 3 vext1 <2,5,0,7>, LHS
+ 2565456790U, // <5,0,7,1>: Cost 3 vext1 <2,5,0,7>, <1,2,3,0>
+ 2565457742U, // <5,0,7,2>: Cost 3 vext1 <2,5,0,7>, <2,5,0,7>
+ 3639199894U, // <5,0,7,3>: Cost 4 vext1 <2,5,0,7>, <3,0,1,2>
+ 2565459254U, // <5,0,7,4>: Cost 3 vext1 <2,5,0,7>, RHS
+ 2589347938U, // <5,0,7,5>: Cost 3 vext1 <6,5,0,7>, <5,6,7,0>
+ 2589348530U, // <5,0,7,6>: Cost 3 vext1 <6,5,0,7>, <6,5,0,7>
+ 4188456422U, // <5,0,7,7>: Cost 4 vtrnr RHS, <2,0,5,7>
+ 2565461806U, // <5,0,7,u>: Cost 3 vext1 <2,5,0,7>, LHS
+ 2687124106U, // <5,0,u,0>: Cost 3 vext3 <0,4,1,5>, <0,u,0,2>
+ 1616036502U, // <5,0,u,1>: Cost 2 vext3 <0,u,1,5>, <0,u,1,5>
+ 1613382301U, // <5,0,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2689925800U, // <5,0,u,3>: Cost 3 vext3 <0,u,3,5>, <0,u,3,5>
+ 2687124146U, // <5,0,u,4>: Cost 3 vext3 <0,4,1,5>, <0,u,4,6>
+ 2638190746U, // <5,0,u,5>: Cost 3 vext2 <3,4,5,0>, RHS
+ 2589356723U, // <5,0,u,6>: Cost 3 vext1 <6,5,0,u>, <6,5,0,u>
+ 2595280230U, // <5,0,u,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
+ 1613382355U, // <5,0,u,u>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2646818816U, // <5,1,0,0>: Cost 3 vext2 <4,u,5,1>, <0,0,0,0>
+ 1573077094U, // <5,1,0,1>: Cost 2 vext2 <4,u,5,1>, LHS
+ 2646818980U, // <5,1,0,2>: Cost 3 vext2 <4,u,5,1>, <0,2,0,2>
+ 2687124214U, // <5,1,0,3>: Cost 3 vext3 <0,4,1,5>, <1,0,3,2>
+ 2641510738U, // <5,1,0,4>: Cost 3 vext2 <4,0,5,1>, <0,4,1,5>
+ 2641510814U, // <5,1,0,5>: Cost 3 vext2 <4,0,5,1>, <0,5,1,0>
+ 3720561142U, // <5,1,0,6>: Cost 4 vext2 <4,u,5,1>, <0,6,1,7>
+ 3298141357U, // <5,1,0,7>: Cost 4 vrev <1,5,7,0>
+ 1573077661U, // <5,1,0,u>: Cost 2 vext2 <4,u,5,1>, LHS
+ 2223891567U, // <5,1,1,0>: Cost 3 vrev <1,5,0,1>
+ 2687124276U, // <5,1,1,1>: Cost 3 vext3 <0,4,1,5>, <1,1,1,1>
+ 2646819734U, // <5,1,1,2>: Cost 3 vext2 <4,u,5,1>, <1,2,3,0>
+ 2687124296U, // <5,1,1,3>: Cost 3 vext3 <0,4,1,5>, <1,1,3,3>
+ 2691326803U, // <5,1,1,4>: Cost 3 vext3 <1,1,4,5>, <1,1,4,5>
+ 2691400540U, // <5,1,1,5>: Cost 3 vext3 <1,1,5,5>, <1,1,5,5>
+ 3765216101U, // <5,1,1,6>: Cost 4 vext3 <1,1,6,5>, <1,1,6,5>
+ 3765289838U, // <5,1,1,7>: Cost 4 vext3 <1,1,7,5>, <1,1,7,5>
+ 2687124341U, // <5,1,1,u>: Cost 3 vext3 <0,4,1,5>, <1,1,u,3>
+ 3297641584U, // <5,1,2,0>: Cost 4 vrev <1,5,0,2>
+ 3763520391U, // <5,1,2,1>: Cost 4 vext3 <0,u,1,5>, <1,2,1,3>
+ 2646820456U, // <5,1,2,2>: Cost 3 vext2 <4,u,5,1>, <2,2,2,2>
+ 2687124374U, // <5,1,2,3>: Cost 3 vext3 <0,4,1,5>, <1,2,3,0>
+ 2691990436U, // <5,1,2,4>: Cost 3 vext3 <1,2,4,5>, <1,2,4,5>
+ 2687124395U, // <5,1,2,5>: Cost 3 vext3 <0,4,1,5>, <1,2,5,3>
+ 2646820794U, // <5,1,2,6>: Cost 3 vext2 <4,u,5,1>, <2,6,3,7>
+ 3808199610U, // <5,1,2,7>: Cost 4 vext3 <u,3,4,5>, <1,2,7,0>
+ 2687124419U, // <5,1,2,u>: Cost 3 vext3 <0,4,1,5>, <1,2,u,0>
+ 2577440870U, // <5,1,3,0>: Cost 3 vext1 <4,5,1,3>, LHS
+ 2687124440U, // <5,1,3,1>: Cost 3 vext3 <0,4,1,5>, <1,3,1,3>
+ 3759686627U, // <5,1,3,2>: Cost 4 vext3 <0,2,3,5>, <1,3,2,5>
+ 2692580332U, // <5,1,3,3>: Cost 3 vext3 <1,3,3,5>, <1,3,3,5>
+ 2687124469U, // <5,1,3,4>: Cost 3 vext3 <0,4,1,5>, <1,3,4,5>
+ 2685207552U, // <5,1,3,5>: Cost 3 vext3 <0,1,2,5>, <1,3,5,7>
+ 3760866313U, // <5,1,3,6>: Cost 4 vext3 <0,4,1,5>, <1,3,6,7>
+ 2692875280U, // <5,1,3,7>: Cost 3 vext3 <1,3,7,5>, <1,3,7,5>
+ 2687124503U, // <5,1,3,u>: Cost 3 vext3 <0,4,1,5>, <1,3,u,3>
+ 1567771538U, // <5,1,4,0>: Cost 2 vext2 <4,0,5,1>, <4,0,5,1>
+ 2693096491U, // <5,1,4,1>: Cost 3 vext3 <1,4,1,5>, <1,4,1,5>
+ 2693170228U, // <5,1,4,2>: Cost 3 vext3 <1,4,2,5>, <1,4,2,5>
+ 2687124541U, // <5,1,4,3>: Cost 3 vext3 <0,4,1,5>, <1,4,3,5>
+ 2646822096U, // <5,1,4,4>: Cost 3 vext2 <4,u,5,1>, <4,4,4,4>
+ 1573080374U, // <5,1,4,5>: Cost 2 vext2 <4,u,5,1>, RHS
+ 2646822260U, // <5,1,4,6>: Cost 3 vext2 <4,u,5,1>, <4,6,4,6>
+ 3298174129U, // <5,1,4,7>: Cost 4 vrev <1,5,7,4>
+ 1573080602U, // <5,1,4,u>: Cost 2 vext2 <4,u,5,1>, <4,u,5,1>
+ 2687124591U, // <5,1,5,0>: Cost 3 vext3 <0,4,1,5>, <1,5,0,1>
+ 2646822543U, // <5,1,5,1>: Cost 3 vext2 <4,u,5,1>, <5,1,0,1>
+ 3760866433U, // <5,1,5,2>: Cost 4 vext3 <0,4,1,5>, <1,5,2,1>
+ 2687124624U, // <5,1,5,3>: Cost 3 vext3 <0,4,1,5>, <1,5,3,7>
+ 2687124631U, // <5,1,5,4>: Cost 3 vext3 <0,4,1,5>, <1,5,4,5>
+ 2646822916U, // <5,1,5,5>: Cost 3 vext2 <4,u,5,1>, <5,5,5,5>
+ 2646823010U, // <5,1,5,6>: Cost 3 vext2 <4,u,5,1>, <5,6,7,0>
+ 2646823080U, // <5,1,5,7>: Cost 3 vext2 <4,u,5,1>, <5,7,5,7>
+ 2687124663U, // <5,1,5,u>: Cost 3 vext3 <0,4,1,5>, <1,5,u,1>
+ 2553577574U, // <5,1,6,0>: Cost 3 vext1 <0,5,1,6>, LHS
+ 3763520719U, // <5,1,6,1>: Cost 4 vext3 <0,u,1,5>, <1,6,1,7>
+ 2646823418U, // <5,1,6,2>: Cost 3 vext2 <4,u,5,1>, <6,2,7,3>
+ 3760866529U, // <5,1,6,3>: Cost 4 vext3 <0,4,1,5>, <1,6,3,7>
+ 2553580854U, // <5,1,6,4>: Cost 3 vext1 <0,5,1,6>, RHS
+ 2687124723U, // <5,1,6,5>: Cost 3 vext3 <0,4,1,5>, <1,6,5,7>
+ 2646823736U, // <5,1,6,6>: Cost 3 vext2 <4,u,5,1>, <6,6,6,6>
+ 2646823758U, // <5,1,6,7>: Cost 3 vext2 <4,u,5,1>, <6,7,0,1>
+ 2646823839U, // <5,1,6,u>: Cost 3 vext2 <4,u,5,1>, <6,u,0,1>
+ 2559557734U, // <5,1,7,0>: Cost 3 vext1 <1,5,1,7>, LHS
+ 2559558452U, // <5,1,7,1>: Cost 3 vext1 <1,5,1,7>, <1,1,1,1>
+ 2571503270U, // <5,1,7,2>: Cost 3 vext1 <3,5,1,7>, <2,3,0,1>
+ 2040971366U, // <5,1,7,3>: Cost 2 vtrnr RHS, LHS
+ 2559561014U, // <5,1,7,4>: Cost 3 vext1 <1,5,1,7>, RHS
+ 2595393232U, // <5,1,7,5>: Cost 3 vext1 <7,5,1,7>, <5,1,7,3>
+ 4188455035U, // <5,1,7,6>: Cost 4 vtrnr RHS, <0,1,4,6>
+ 2646824556U, // <5,1,7,7>: Cost 3 vext2 <4,u,5,1>, <7,7,7,7>
+ 2040971371U, // <5,1,7,u>: Cost 2 vtrnr RHS, LHS
+ 1591662326U, // <5,1,u,0>: Cost 2 vext2 <u,0,5,1>, <u,0,5,1>
+ 1573082926U, // <5,1,u,1>: Cost 2 vext2 <4,u,5,1>, LHS
+ 2695824760U, // <5,1,u,2>: Cost 3 vext3 <1,u,2,5>, <1,u,2,5>
+ 2040979558U, // <5,1,u,3>: Cost 2 vtrnr RHS, LHS
+ 2687124874U, // <5,1,u,4>: Cost 3 vext3 <0,4,1,5>, <1,u,4,5>
+ 1573083290U, // <5,1,u,5>: Cost 2 vext2 <4,u,5,1>, RHS
+ 2646825168U, // <5,1,u,6>: Cost 3 vext2 <4,u,5,1>, <u,6,3,7>
+ 2646825216U, // <5,1,u,7>: Cost 3 vext2 <4,u,5,1>, <u,7,0,1>
+ 2040979563U, // <5,1,u,u>: Cost 2 vtrnr RHS, LHS
+ 3702652928U, // <5,2,0,0>: Cost 4 vext2 <1,u,5,2>, <0,0,0,0>
+ 2628911206U, // <5,2,0,1>: Cost 3 vext2 <1,u,5,2>, LHS
+ 2641518756U, // <5,2,0,2>: Cost 3 vext2 <4,0,5,2>, <0,2,0,2>
+ 3759760847U, // <5,2,0,3>: Cost 4 vext3 <0,2,4,5>, <2,0,3,2>
+ 3760866775U, // <5,2,0,4>: Cost 4 vext3 <0,4,1,5>, <2,0,4,1>
+ 3759539680U, // <5,2,0,5>: Cost 4 vext3 <0,2,1,5>, <2,0,5,1>
+ 3760866796U, // <5,2,0,6>: Cost 4 vext3 <0,4,1,5>, <2,0,6,4>
+ 3304114054U, // <5,2,0,7>: Cost 4 vrev <2,5,7,0>
+ 2628911773U, // <5,2,0,u>: Cost 3 vext2 <1,u,5,2>, LHS
+ 2623603464U, // <5,2,1,0>: Cost 3 vext2 <1,0,5,2>, <1,0,5,2>
+ 3698008921U, // <5,2,1,1>: Cost 4 vext2 <1,1,5,2>, <1,1,5,2>
+ 3633325603U, // <5,2,1,2>: Cost 4 vext1 <1,5,2,1>, <2,1,3,5>
+ 2687125027U, // <5,2,1,3>: Cost 3 vext3 <0,4,1,5>, <2,1,3,5>
+ 3633327414U, // <5,2,1,4>: Cost 4 vext1 <1,5,2,1>, RHS
+ 3759539760U, // <5,2,1,5>: Cost 4 vext3 <0,2,1,5>, <2,1,5,0>
+ 3760866876U, // <5,2,1,6>: Cost 4 vext3 <0,4,1,5>, <2,1,6,3>
+ 3304122247U, // <5,2,1,7>: Cost 4 vrev <2,5,7,1>
+ 2687125072U, // <5,2,1,u>: Cost 3 vext3 <0,4,1,5>, <2,1,u,5>
+ 3633332326U, // <5,2,2,0>: Cost 4 vext1 <1,5,2,2>, LHS
+ 3759760992U, // <5,2,2,1>: Cost 4 vext3 <0,2,4,5>, <2,2,1,3>
+ 2687125096U, // <5,2,2,2>: Cost 3 vext3 <0,4,1,5>, <2,2,2,2>
+ 2687125106U, // <5,2,2,3>: Cost 3 vext3 <0,4,1,5>, <2,2,3,3>
+ 2697963133U, // <5,2,2,4>: Cost 3 vext3 <2,2,4,5>, <2,2,4,5>
+ 3759466120U, // <5,2,2,5>: Cost 4 vext3 <0,2,0,5>, <2,2,5,7>
+ 3760866960U, // <5,2,2,6>: Cost 4 vext3 <0,4,1,5>, <2,2,6,6>
+ 3771926168U, // <5,2,2,7>: Cost 4 vext3 <2,2,7,5>, <2,2,7,5>
+ 2687125151U, // <5,2,2,u>: Cost 3 vext3 <0,4,1,5>, <2,2,u,3>
+ 2687125158U, // <5,2,3,0>: Cost 3 vext3 <0,4,1,5>, <2,3,0,1>
+ 2698405555U, // <5,2,3,1>: Cost 3 vext3 <2,3,1,5>, <2,3,1,5>
+ 2577516238U, // <5,2,3,2>: Cost 3 vext1 <4,5,2,3>, <2,3,4,5>
+ 3759687365U, // <5,2,3,3>: Cost 4 vext3 <0,2,3,5>, <2,3,3,5>
+ 1624884942U, // <5,2,3,4>: Cost 2 vext3 <2,3,4,5>, <2,3,4,5>
+ 2698700503U, // <5,2,3,5>: Cost 3 vext3 <2,3,5,5>, <2,3,5,5>
+ 3772368608U, // <5,2,3,6>: Cost 4 vext3 <2,3,4,5>, <2,3,6,5>
+ 3702655716U, // <5,2,3,7>: Cost 4 vext2 <1,u,5,2>, <3,7,3,7>
+ 1625179890U, // <5,2,3,u>: Cost 2 vext3 <2,3,u,5>, <2,3,u,5>
+ 2641521555U, // <5,2,4,0>: Cost 3 vext2 <4,0,5,2>, <4,0,5,2>
+ 3772368642U, // <5,2,4,1>: Cost 4 vext3 <2,3,4,5>, <2,4,1,3>
+ 2699142925U, // <5,2,4,2>: Cost 3 vext3 <2,4,2,5>, <2,4,2,5>
+ 2698626838U, // <5,2,4,3>: Cost 3 vext3 <2,3,4,5>, <2,4,3,5>
+ 2698626848U, // <5,2,4,4>: Cost 3 vext3 <2,3,4,5>, <2,4,4,6>
+ 2628914486U, // <5,2,4,5>: Cost 3 vext2 <1,u,5,2>, RHS
+ 2645503353U, // <5,2,4,6>: Cost 3 vext2 <4,6,5,2>, <4,6,5,2>
+ 3304146826U, // <5,2,4,7>: Cost 4 vrev <2,5,7,4>
+ 2628914729U, // <5,2,4,u>: Cost 3 vext2 <1,u,5,2>, RHS
+ 2553643110U, // <5,2,5,0>: Cost 3 vext1 <0,5,2,5>, LHS
+ 3758950227U, // <5,2,5,1>: Cost 4 vext3 <0,1,2,5>, <2,5,1,3>
+ 3759761248U, // <5,2,5,2>: Cost 4 vext3 <0,2,4,5>, <2,5,2,7>
+ 2982396006U, // <5,2,5,3>: Cost 3 vzipr <4,u,5,5>, LHS
+ 2553646390U, // <5,2,5,4>: Cost 3 vext1 <0,5,2,5>, RHS
+ 2553647108U, // <5,2,5,5>: Cost 3 vext1 <0,5,2,5>, <5,5,5,5>
+ 3760867204U, // <5,2,5,6>: Cost 4 vext3 <0,4,1,5>, <2,5,6,7>
+ 3702657141U, // <5,2,5,7>: Cost 4 vext2 <1,u,5,2>, <5,7,0,1>
+ 2982396011U, // <5,2,5,u>: Cost 3 vzipr <4,u,5,5>, LHS
+ 3627393126U, // <5,2,6,0>: Cost 4 vext1 <0,5,2,6>, LHS
+ 3760867236U, // <5,2,6,1>: Cost 4 vext3 <0,4,1,5>, <2,6,1,3>
+ 2645504506U, // <5,2,6,2>: Cost 3 vext2 <4,6,5,2>, <6,2,7,3>
+ 2687125434U, // <5,2,6,3>: Cost 3 vext3 <0,4,1,5>, <2,6,3,7>
+ 2700617665U, // <5,2,6,4>: Cost 3 vext3 <2,6,4,5>, <2,6,4,5>
+ 3760867276U, // <5,2,6,5>: Cost 4 vext3 <0,4,1,5>, <2,6,5,7>
+ 3763521493U, // <5,2,6,6>: Cost 4 vext3 <0,u,1,5>, <2,6,6,7>
+ 3719246670U, // <5,2,6,7>: Cost 4 vext2 <4,6,5,2>, <6,7,0,1>
+ 2687125479U, // <5,2,6,u>: Cost 3 vext3 <0,4,1,5>, <2,6,u,7>
+ 2565603430U, // <5,2,7,0>: Cost 3 vext1 <2,5,2,7>, LHS
+ 2553660150U, // <5,2,7,1>: Cost 3 vext1 <0,5,2,7>, <1,0,3,2>
+ 2565605216U, // <5,2,7,2>: Cost 3 vext1 <2,5,2,7>, <2,5,2,7>
+ 2961178726U, // <5,2,7,3>: Cost 3 vzipr <1,3,5,7>, LHS
+ 2565606710U, // <5,2,7,4>: Cost 3 vext1 <2,5,2,7>, RHS
+ 4034920552U, // <5,2,7,5>: Cost 4 vzipr <1,3,5,7>, <0,1,2,5>
+ 3114713292U, // <5,2,7,6>: Cost 3 vtrnr RHS, <0,2,4,6>
+ 3702658668U, // <5,2,7,7>: Cost 4 vext2 <1,u,5,2>, <7,7,7,7>
+ 2961178731U, // <5,2,7,u>: Cost 3 vzipr <1,3,5,7>, LHS
+ 2687125563U, // <5,2,u,0>: Cost 3 vext3 <0,4,1,5>, <2,u,0,1>
+ 2628917038U, // <5,2,u,1>: Cost 3 vext2 <1,u,5,2>, LHS
+ 2565613409U, // <5,2,u,2>: Cost 3 vext1 <2,5,2,u>, <2,5,2,u>
+ 2687125592U, // <5,2,u,3>: Cost 3 vext3 <0,4,1,5>, <2,u,3,3>
+ 1628203107U, // <5,2,u,4>: Cost 2 vext3 <2,u,4,5>, <2,u,4,5>
+ 2628917402U, // <5,2,u,5>: Cost 3 vext2 <1,u,5,2>, RHS
+ 2702092405U, // <5,2,u,6>: Cost 3 vext3 <2,u,6,5>, <2,u,6,5>
+ 3304179598U, // <5,2,u,7>: Cost 4 vrev <2,5,7,u>
+ 1628498055U, // <5,2,u,u>: Cost 2 vext3 <2,u,u,5>, <2,u,u,5>
+ 3760867467U, // <5,3,0,0>: Cost 4 vext3 <0,4,1,5>, <3,0,0,0>
+ 2687125654U, // <5,3,0,1>: Cost 3 vext3 <0,4,1,5>, <3,0,1,2>
+ 3759761565U, // <5,3,0,2>: Cost 4 vext3 <0,2,4,5>, <3,0,2,0>
+ 3633391766U, // <5,3,0,3>: Cost 4 vext1 <1,5,3,0>, <3,0,1,2>
+ 2687125680U, // <5,3,0,4>: Cost 3 vext3 <0,4,1,5>, <3,0,4,1>
+ 3760277690U, // <5,3,0,5>: Cost 4 vext3 <0,3,2,5>, <3,0,5,2>
+ 3310013014U, // <5,3,0,6>: Cost 4 vrev <3,5,6,0>
+ 2236344927U, // <5,3,0,7>: Cost 3 vrev <3,5,7,0>
+ 2687125717U, // <5,3,0,u>: Cost 3 vext3 <0,4,1,5>, <3,0,u,2>
+ 3760867551U, // <5,3,1,0>: Cost 4 vext3 <0,4,1,5>, <3,1,0,3>
+ 3760867558U, // <5,3,1,1>: Cost 4 vext3 <0,4,1,5>, <3,1,1,1>
+ 2624938923U, // <5,3,1,2>: Cost 3 vext2 <1,2,5,3>, <1,2,5,3>
+ 2703198460U, // <5,3,1,3>: Cost 3 vext3 <3,1,3,5>, <3,1,3,5>
+ 3760867587U, // <5,3,1,4>: Cost 4 vext3 <0,4,1,5>, <3,1,4,3>
+ 2636219536U, // <5,3,1,5>: Cost 3 vext2 <3,1,5,3>, <1,5,3,7>
+ 3698681075U, // <5,3,1,6>: Cost 4 vext2 <1,2,5,3>, <1,6,5,7>
+ 2703493408U, // <5,3,1,7>: Cost 3 vext3 <3,1,7,5>, <3,1,7,5>
+ 2628920721U, // <5,3,1,u>: Cost 3 vext2 <1,u,5,3>, <1,u,5,3>
+ 3766765870U, // <5,3,2,0>: Cost 4 vext3 <1,4,0,5>, <3,2,0,1>
+ 3698681379U, // <5,3,2,1>: Cost 4 vext2 <1,2,5,3>, <2,1,3,5>
+ 3760867649U, // <5,3,2,2>: Cost 4 vext3 <0,4,1,5>, <3,2,2,2>
+ 2698627404U, // <5,3,2,3>: Cost 3 vext3 <2,3,4,5>, <3,2,3,4>
+ 2703935830U, // <5,3,2,4>: Cost 3 vext3 <3,2,4,5>, <3,2,4,5>
+ 2698627422U, // <5,3,2,5>: Cost 3 vext3 <2,3,4,5>, <3,2,5,4>
+ 3760867686U, // <5,3,2,6>: Cost 4 vext3 <0,4,1,5>, <3,2,6,3>
+ 3769788783U, // <5,3,2,7>: Cost 4 vext3 <1,u,5,5>, <3,2,7,3>
+ 2701945209U, // <5,3,2,u>: Cost 3 vext3 <2,u,4,5>, <3,2,u,4>
+ 3760867711U, // <5,3,3,0>: Cost 4 vext3 <0,4,1,5>, <3,3,0,1>
+ 2636220684U, // <5,3,3,1>: Cost 3 vext2 <3,1,5,3>, <3,1,5,3>
+ 3772369298U, // <5,3,3,2>: Cost 4 vext3 <2,3,4,5>, <3,3,2,2>
+ 2687125916U, // <5,3,3,3>: Cost 3 vext3 <0,4,1,5>, <3,3,3,3>
+ 2704599463U, // <5,3,3,4>: Cost 3 vext3 <3,3,4,5>, <3,3,4,5>
+ 2704673200U, // <5,3,3,5>: Cost 3 vext3 <3,3,5,5>, <3,3,5,5>
+ 3709962935U, // <5,3,3,6>: Cost 4 vext2 <3,1,5,3>, <3,6,7,7>
+ 3772369346U, // <5,3,3,7>: Cost 4 vext3 <2,3,4,5>, <3,3,7,5>
+ 2704894411U, // <5,3,3,u>: Cost 3 vext3 <3,3,u,5>, <3,3,u,5>
+ 2704968148U, // <5,3,4,0>: Cost 3 vext3 <3,4,0,5>, <3,4,0,5>
+ 3698682850U, // <5,3,4,1>: Cost 4 vext2 <1,2,5,3>, <4,1,5,0>
+ 2642857014U, // <5,3,4,2>: Cost 3 vext2 <4,2,5,3>, <4,2,5,3>
+ 2705189359U, // <5,3,4,3>: Cost 3 vext3 <3,4,3,5>, <3,4,3,5>
+ 2705263096U, // <5,3,4,4>: Cost 3 vext3 <3,4,4,5>, <3,4,4,5>
+ 2685946370U, // <5,3,4,5>: Cost 3 vext3 <0,2,3,5>, <3,4,5,6>
+ 3779152394U, // <5,3,4,6>: Cost 4 vext3 <3,4,6,5>, <3,4,6,5>
+ 2236377699U, // <5,3,4,7>: Cost 3 vrev <3,5,7,4>
+ 2687126045U, // <5,3,4,u>: Cost 3 vext3 <0,4,1,5>, <3,4,u,6>
+ 2571632742U, // <5,3,5,0>: Cost 3 vext1 <3,5,3,5>, LHS
+ 2559689870U, // <5,3,5,1>: Cost 3 vext1 <1,5,3,5>, <1,5,3,5>
+ 2571634382U, // <5,3,5,2>: Cost 3 vext1 <3,5,3,5>, <2,3,4,5>
+ 2571635264U, // <5,3,5,3>: Cost 3 vext1 <3,5,3,5>, <3,5,3,5>
+ 2571636022U, // <5,3,5,4>: Cost 3 vext1 <3,5,3,5>, RHS
+ 2559692804U, // <5,3,5,5>: Cost 3 vext1 <1,5,3,5>, <5,5,5,5>
+ 3720581218U, // <5,3,5,6>: Cost 4 vext2 <4,u,5,3>, <5,6,7,0>
+ 2236385892U, // <5,3,5,7>: Cost 3 vrev <3,5,7,5>
+ 2571638574U, // <5,3,5,u>: Cost 3 vext1 <3,5,3,5>, LHS
+ 2565668966U, // <5,3,6,0>: Cost 3 vext1 <2,5,3,6>, LHS
+ 3633439887U, // <5,3,6,1>: Cost 4 vext1 <1,5,3,6>, <1,5,3,6>
+ 2565670760U, // <5,3,6,2>: Cost 3 vext1 <2,5,3,6>, <2,5,3,6>
+ 2565671426U, // <5,3,6,3>: Cost 3 vext1 <2,5,3,6>, <3,4,5,6>
+ 2565672246U, // <5,3,6,4>: Cost 3 vext1 <2,5,3,6>, RHS
+ 3639414630U, // <5,3,6,5>: Cost 4 vext1 <2,5,3,6>, <5,3,6,0>
+ 4047521640U, // <5,3,6,6>: Cost 4 vzipr <3,4,5,6>, <2,5,3,6>
+ 2725169844U, // <5,3,6,7>: Cost 3 vext3 <6,7,4,5>, <3,6,7,4>
+ 2565674798U, // <5,3,6,u>: Cost 3 vext1 <2,5,3,6>, LHS
+ 1485963366U, // <5,3,7,0>: Cost 2 vext1 <1,5,3,7>, LHS
+ 1485964432U, // <5,3,7,1>: Cost 2 vext1 <1,5,3,7>, <1,5,3,7>
+ 2559706728U, // <5,3,7,2>: Cost 3 vext1 <1,5,3,7>, <2,2,2,2>
+ 2559707286U, // <5,3,7,3>: Cost 3 vext1 <1,5,3,7>, <3,0,1,2>
+ 1485966646U, // <5,3,7,4>: Cost 2 vext1 <1,5,3,7>, RHS
+ 2559708880U, // <5,3,7,5>: Cost 3 vext1 <1,5,3,7>, <5,1,7,3>
+ 2601513466U, // <5,3,7,6>: Cost 3 vext1 <u,5,3,7>, <6,2,7,3>
+ 3114714112U, // <5,3,7,7>: Cost 3 vtrnr RHS, <1,3,5,7>
+ 1485969198U, // <5,3,7,u>: Cost 2 vext1 <1,5,3,7>, LHS
+ 1485971558U, // <5,3,u,0>: Cost 2 vext1 <1,5,3,u>, LHS
+ 1485972625U, // <5,3,u,1>: Cost 2 vext1 <1,5,3,u>, <1,5,3,u>
+ 2559714920U, // <5,3,u,2>: Cost 3 vext1 <1,5,3,u>, <2,2,2,2>
+ 2559715478U, // <5,3,u,3>: Cost 3 vext1 <1,5,3,u>, <3,0,1,2>
+ 1485974838U, // <5,3,u,4>: Cost 2 vext1 <1,5,3,u>, RHS
+ 2687126342U, // <5,3,u,5>: Cost 3 vext3 <0,4,1,5>, <3,u,5,6>
+ 2601521658U, // <5,3,u,6>: Cost 3 vext1 <u,5,3,u>, <6,2,7,3>
+ 2236410471U, // <5,3,u,7>: Cost 3 vrev <3,5,7,u>
+ 1485977390U, // <5,3,u,u>: Cost 2 vext1 <1,5,3,u>, LHS
+ 3627491430U, // <5,4,0,0>: Cost 4 vext1 <0,5,4,0>, LHS
+ 2636890214U, // <5,4,0,1>: Cost 3 vext2 <3,2,5,4>, LHS
+ 3703333028U, // <5,4,0,2>: Cost 4 vext2 <2,0,5,4>, <0,2,0,2>
+ 3782249348U, // <5,4,0,3>: Cost 4 vext3 <4,0,3,5>, <4,0,3,5>
+ 2642198866U, // <5,4,0,4>: Cost 3 vext2 <4,1,5,4>, <0,4,1,5>
+ 2687126418U, // <5,4,0,5>: Cost 3 vext3 <0,4,1,5>, <4,0,5,1>
+ 2242243887U, // <5,4,0,6>: Cost 3 vrev <4,5,6,0>
+ 3316059448U, // <5,4,0,7>: Cost 4 vrev <4,5,7,0>
+ 2636890781U, // <5,4,0,u>: Cost 3 vext2 <3,2,5,4>, LHS
+ 2241809658U, // <5,4,1,0>: Cost 3 vrev <4,5,0,1>
+ 3698025307U, // <5,4,1,1>: Cost 4 vext2 <1,1,5,4>, <1,1,5,4>
+ 3698688940U, // <5,4,1,2>: Cost 4 vext2 <1,2,5,4>, <1,2,5,4>
+ 3698689024U, // <5,4,1,3>: Cost 4 vext2 <1,2,5,4>, <1,3,5,7>
+ 3700016206U, // <5,4,1,4>: Cost 4 vext2 <1,4,5,4>, <1,4,5,4>
+ 2687126498U, // <5,4,1,5>: Cost 3 vext3 <0,4,1,5>, <4,1,5,0>
+ 3760868336U, // <5,4,1,6>: Cost 4 vext3 <0,4,1,5>, <4,1,6,5>
+ 3316067641U, // <5,4,1,7>: Cost 4 vrev <4,5,7,1>
+ 2242399554U, // <5,4,1,u>: Cost 3 vrev <4,5,u,1>
+ 3703334371U, // <5,4,2,0>: Cost 4 vext2 <2,0,5,4>, <2,0,5,4>
+ 3703998004U, // <5,4,2,1>: Cost 4 vext2 <2,1,5,4>, <2,1,5,4>
+ 3704661637U, // <5,4,2,2>: Cost 4 vext2 <2,2,5,4>, <2,2,5,4>
+ 2636891854U, // <5,4,2,3>: Cost 3 vext2 <3,2,5,4>, <2,3,4,5>
+ 3705988903U, // <5,4,2,4>: Cost 4 vext2 <2,4,5,4>, <2,4,5,4>
+ 2698628150U, // <5,4,2,5>: Cost 3 vext3 <2,3,4,5>, <4,2,5,3>
+ 3760868415U, // <5,4,2,6>: Cost 4 vext3 <0,4,1,5>, <4,2,6,3>
+ 3783871562U, // <5,4,2,7>: Cost 4 vext3 <4,2,7,5>, <4,2,7,5>
+ 2666752099U, // <5,4,2,u>: Cost 3 vext2 <u,2,5,4>, <2,u,4,5>
+ 3639459942U, // <5,4,3,0>: Cost 4 vext1 <2,5,4,3>, LHS
+ 3709970701U, // <5,4,3,1>: Cost 4 vext2 <3,1,5,4>, <3,1,5,4>
+ 2636892510U, // <5,4,3,2>: Cost 3 vext2 <3,2,5,4>, <3,2,5,4>
+ 3710634396U, // <5,4,3,3>: Cost 4 vext2 <3,2,5,4>, <3,3,3,3>
+ 2638219776U, // <5,4,3,4>: Cost 3 vext2 <3,4,5,4>, <3,4,5,4>
+ 3766987908U, // <5,4,3,5>: Cost 4 vext3 <1,4,3,5>, <4,3,5,0>
+ 2710719634U, // <5,4,3,6>: Cost 3 vext3 <4,3,6,5>, <4,3,6,5>
+ 3914097664U, // <5,4,3,7>: Cost 4 vuzpr <3,5,7,4>, <1,3,5,7>
+ 2640874308U, // <5,4,3,u>: Cost 3 vext2 <3,u,5,4>, <3,u,5,4>
+ 2583642214U, // <5,4,4,0>: Cost 3 vext1 <5,5,4,4>, LHS
+ 2642201574U, // <5,4,4,1>: Cost 3 vext2 <4,1,5,4>, <4,1,5,4>
+ 3710635062U, // <5,4,4,2>: Cost 4 vext2 <3,2,5,4>, <4,2,5,3>
+ 3717270664U, // <5,4,4,3>: Cost 4 vext2 <4,3,5,4>, <4,3,5,4>
+ 2713963728U, // <5,4,4,4>: Cost 3 vext3 <4,u,5,5>, <4,4,4,4>
+ 1637567706U, // <5,4,4,5>: Cost 2 vext3 <4,4,5,5>, <4,4,5,5>
+ 2242276659U, // <5,4,4,6>: Cost 3 vrev <4,5,6,4>
+ 2646183372U, // <5,4,4,7>: Cost 3 vext2 <4,7,5,4>, <4,7,5,4>
+ 1637788917U, // <5,4,4,u>: Cost 2 vext3 <4,4,u,5>, <4,4,u,5>
+ 2559762534U, // <5,4,5,0>: Cost 3 vext1 <1,5,4,5>, LHS
+ 2559763607U, // <5,4,5,1>: Cost 3 vext1 <1,5,4,5>, <1,5,4,5>
+ 2698628366U, // <5,4,5,2>: Cost 3 vext3 <2,3,4,5>, <4,5,2,3>
+ 3633506454U, // <5,4,5,3>: Cost 4 vext1 <1,5,4,5>, <3,0,1,2>
+ 2559765814U, // <5,4,5,4>: Cost 3 vext1 <1,5,4,5>, RHS
+ 2583654395U, // <5,4,5,5>: Cost 3 vext1 <5,5,4,5>, <5,5,4,5>
+ 1613385014U, // <5,4,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
+ 3901639990U, // <5,4,5,7>: Cost 4 vuzpr <1,5,0,4>, RHS
+ 1613385032U, // <5,4,5,u>: Cost 2 vext3 <0,4,1,5>, RHS
+ 2559770726U, // <5,4,6,0>: Cost 3 vext1 <1,5,4,6>, LHS
+ 2559771648U, // <5,4,6,1>: Cost 3 vext1 <1,5,4,6>, <1,3,5,7>
+ 3633514088U, // <5,4,6,2>: Cost 4 vext1 <1,5,4,6>, <2,2,2,2>
+ 2571717122U, // <5,4,6,3>: Cost 3 vext1 <3,5,4,6>, <3,4,5,6>
+ 2559774006U, // <5,4,6,4>: Cost 3 vext1 <1,5,4,6>, RHS
+ 2712636796U, // <5,4,6,5>: Cost 3 vext3 <4,6,5,5>, <4,6,5,5>
+ 3760868743U, // <5,4,6,6>: Cost 4 vext3 <0,4,1,5>, <4,6,6,7>
+ 2712784270U, // <5,4,6,7>: Cost 3 vext3 <4,6,7,5>, <4,6,7,5>
+ 2559776558U, // <5,4,6,u>: Cost 3 vext1 <1,5,4,6>, LHS
+ 2565750886U, // <5,4,7,0>: Cost 3 vext1 <2,5,4,7>, LHS
+ 2565751706U, // <5,4,7,1>: Cost 3 vext1 <2,5,4,7>, <1,2,3,4>
+ 2565752690U, // <5,4,7,2>: Cost 3 vext1 <2,5,4,7>, <2,5,4,7>
+ 2571725387U, // <5,4,7,3>: Cost 3 vext1 <3,5,4,7>, <3,5,4,7>
+ 2565754166U, // <5,4,7,4>: Cost 3 vext1 <2,5,4,7>, RHS
+ 3114713426U, // <5,4,7,5>: Cost 3 vtrnr RHS, <0,4,1,5>
+ 94817590U, // <5,4,7,6>: Cost 1 vrev RHS
+ 2595616175U, // <5,4,7,7>: Cost 3 vext1 <7,5,4,7>, <7,5,4,7>
+ 94965064U, // <5,4,7,u>: Cost 1 vrev RHS
+ 2559787110U, // <5,4,u,0>: Cost 3 vext1 <1,5,4,u>, LHS
+ 2559788186U, // <5,4,u,1>: Cost 3 vext1 <1,5,4,u>, <1,5,4,u>
+ 2242014483U, // <5,4,u,2>: Cost 3 vrev <4,5,2,u>
+ 2667419628U, // <5,4,u,3>: Cost 3 vext2 <u,3,5,4>, <u,3,5,4>
+ 2559790390U, // <5,4,u,4>: Cost 3 vext1 <1,5,4,u>, RHS
+ 1640222238U, // <5,4,u,5>: Cost 2 vext3 <4,u,5,5>, <4,u,5,5>
+ 94825783U, // <5,4,u,6>: Cost 1 vrev RHS
+ 2714111536U, // <5,4,u,7>: Cost 3 vext3 <4,u,7,5>, <4,u,7,5>
+ 94973257U, // <5,4,u,u>: Cost 1 vrev RHS
+ 2646851584U, // <5,5,0,0>: Cost 3 vext2 <4,u,5,5>, <0,0,0,0>
+ 1573109862U, // <5,5,0,1>: Cost 2 vext2 <4,u,5,5>, LHS
+ 2646851748U, // <5,5,0,2>: Cost 3 vext2 <4,u,5,5>, <0,2,0,2>
+ 3760279130U, // <5,5,0,3>: Cost 4 vext3 <0,3,2,5>, <5,0,3,2>
+ 2687127138U, // <5,5,0,4>: Cost 3 vext3 <0,4,1,5>, <5,0,4,1>
+ 2248142847U, // <5,5,0,5>: Cost 3 vrev <5,5,5,0>
+ 3720593910U, // <5,5,0,6>: Cost 4 vext2 <4,u,5,5>, <0,6,1,7>
+ 4182502710U, // <5,5,0,7>: Cost 4 vtrnr <3,5,7,0>, RHS
+ 1573110429U, // <5,5,0,u>: Cost 2 vext2 <4,u,5,5>, LHS
+ 2646852342U, // <5,5,1,0>: Cost 3 vext2 <4,u,5,5>, <1,0,3,2>
+ 2624291676U, // <5,5,1,1>: Cost 3 vext2 <1,1,5,5>, <1,1,5,5>
+ 2646852502U, // <5,5,1,2>: Cost 3 vext2 <4,u,5,5>, <1,2,3,0>
+ 2646852568U, // <5,5,1,3>: Cost 3 vext2 <4,u,5,5>, <1,3,1,3>
+ 2715217591U, // <5,5,1,4>: Cost 3 vext3 <5,1,4,5>, <5,1,4,5>
+ 2628936848U, // <5,5,1,5>: Cost 3 vext2 <1,u,5,5>, <1,5,3,7>
+ 3698033907U, // <5,5,1,6>: Cost 4 vext2 <1,1,5,5>, <1,6,5,7>
+ 2713964240U, // <5,5,1,7>: Cost 3 vext3 <4,u,5,5>, <5,1,7,3>
+ 2628937107U, // <5,5,1,u>: Cost 3 vext2 <1,u,5,5>, <1,u,5,5>
+ 3645497446U, // <5,5,2,0>: Cost 4 vext1 <3,5,5,2>, LHS
+ 3760869099U, // <5,5,2,1>: Cost 4 vext3 <0,4,1,5>, <5,2,1,3>
+ 2646853224U, // <5,5,2,2>: Cost 3 vext2 <4,u,5,5>, <2,2,2,2>
+ 2698628862U, // <5,5,2,3>: Cost 3 vext3 <2,3,4,5>, <5,2,3,4>
+ 3772370694U, // <5,5,2,4>: Cost 4 vext3 <2,3,4,5>, <5,2,4,3>
+ 2713964303U, // <5,5,2,5>: Cost 3 vext3 <4,u,5,5>, <5,2,5,3>
+ 2646853562U, // <5,5,2,6>: Cost 3 vext2 <4,u,5,5>, <2,6,3,7>
+ 4038198272U, // <5,5,2,7>: Cost 4 vzipr <1,u,5,2>, <1,3,5,7>
+ 2701946667U, // <5,5,2,u>: Cost 3 vext3 <2,u,4,5>, <5,2,u,4>
+ 2646853782U, // <5,5,3,0>: Cost 3 vext2 <4,u,5,5>, <3,0,1,2>
+ 3698034922U, // <5,5,3,1>: Cost 4 vext2 <1,1,5,5>, <3,1,1,5>
+ 3702679919U, // <5,5,3,2>: Cost 4 vext2 <1,u,5,5>, <3,2,7,3>
+ 2637564336U, // <5,5,3,3>: Cost 3 vext2 <3,3,5,5>, <3,3,5,5>
+ 2646854146U, // <5,5,3,4>: Cost 3 vext2 <4,u,5,5>, <3,4,5,6>
+ 2638891602U, // <5,5,3,5>: Cost 3 vext2 <3,5,5,5>, <3,5,5,5>
+ 3702680247U, // <5,5,3,6>: Cost 4 vext2 <1,u,5,5>, <3,6,7,7>
+ 3702680259U, // <5,5,3,7>: Cost 4 vext2 <1,u,5,5>, <3,7,0,1>
+ 2646854430U, // <5,5,3,u>: Cost 3 vext2 <4,u,5,5>, <3,u,1,2>
+ 2646854546U, // <5,5,4,0>: Cost 3 vext2 <4,u,5,5>, <4,0,5,1>
+ 2642209767U, // <5,5,4,1>: Cost 3 vext2 <4,1,5,5>, <4,1,5,5>
+ 3711306806U, // <5,5,4,2>: Cost 4 vext2 <3,3,5,5>, <4,2,5,3>
+ 3645516369U, // <5,5,4,3>: Cost 4 vext1 <3,5,5,4>, <3,5,5,4>
+ 1570458842U, // <5,5,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
+ 1573113142U, // <5,5,4,5>: Cost 2 vext2 <4,u,5,5>, RHS
+ 2645527932U, // <5,5,4,6>: Cost 3 vext2 <4,6,5,5>, <4,6,5,5>
+ 2713964486U, // <5,5,4,7>: Cost 3 vext3 <4,u,5,5>, <5,4,7,6>
+ 1573113374U, // <5,5,4,u>: Cost 2 vext2 <4,u,5,5>, <4,u,5,5>
+ 1509982310U, // <5,5,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
+ 2646855376U, // <5,5,5,1>: Cost 3 vext2 <4,u,5,5>, <5,1,7,3>
+ 2583725672U, // <5,5,5,2>: Cost 3 vext1 <5,5,5,5>, <2,2,2,2>
+ 2583726230U, // <5,5,5,3>: Cost 3 vext1 <5,5,5,5>, <3,0,1,2>
+ 1509985590U, // <5,5,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
+ 229035318U, // <5,5,5,5>: Cost 1 vdup1 RHS
+ 2646855778U, // <5,5,5,6>: Cost 3 vext2 <4,u,5,5>, <5,6,7,0>
+ 2646855848U, // <5,5,5,7>: Cost 3 vext2 <4,u,5,5>, <5,7,5,7>
+ 229035318U, // <5,5,5,u>: Cost 1 vdup1 RHS
+ 2577760358U, // <5,5,6,0>: Cost 3 vext1 <4,5,5,6>, LHS
+ 3633587361U, // <5,5,6,1>: Cost 4 vext1 <1,5,5,6>, <1,5,5,6>
+ 2646856186U, // <5,5,6,2>: Cost 3 vext2 <4,u,5,5>, <6,2,7,3>
+ 3633588738U, // <5,5,6,3>: Cost 4 vext1 <1,5,5,6>, <3,4,5,6>
+ 2718535756U, // <5,5,6,4>: Cost 3 vext3 <5,6,4,5>, <5,6,4,5>
+ 2644202223U, // <5,5,6,5>: Cost 3 vext2 <4,4,5,5>, <6,5,7,5>
+ 2973780482U, // <5,5,6,6>: Cost 3 vzipr <3,4,5,6>, <3,4,5,6>
+ 2646856526U, // <5,5,6,7>: Cost 3 vext2 <4,u,5,5>, <6,7,0,1>
+ 2646856607U, // <5,5,6,u>: Cost 3 vext2 <4,u,5,5>, <6,u,0,1>
+ 2571796582U, // <5,5,7,0>: Cost 3 vext1 <3,5,5,7>, LHS
+ 3633595392U, // <5,5,7,1>: Cost 4 vext1 <1,5,5,7>, <1,3,5,7>
+ 2571798222U, // <5,5,7,2>: Cost 3 vext1 <3,5,5,7>, <2,3,4,5>
+ 2571799124U, // <5,5,7,3>: Cost 3 vext1 <3,5,5,7>, <3,5,5,7>
+ 2571799862U, // <5,5,7,4>: Cost 3 vext1 <3,5,5,7>, RHS
+ 3114717188U, // <5,5,7,5>: Cost 3 vtrnr RHS, <5,5,5,5>
+ 4034923010U, // <5,5,7,6>: Cost 4 vzipr <1,3,5,7>, <3,4,5,6>
+ 2040974646U, // <5,5,7,7>: Cost 2 vtrnr RHS, RHS
+ 2040974647U, // <5,5,7,u>: Cost 2 vtrnr RHS, RHS
+ 1509982310U, // <5,5,u,0>: Cost 2 vext1 <5,5,5,5>, LHS
+ 1573115694U, // <5,5,u,1>: Cost 2 vext2 <4,u,5,5>, LHS
+ 2571806414U, // <5,5,u,2>: Cost 3 vext1 <3,5,5,u>, <2,3,4,5>
+ 2571807317U, // <5,5,u,3>: Cost 3 vext1 <3,5,5,u>, <3,5,5,u>
+ 1509985590U, // <5,5,u,4>: Cost 2 vext1 <5,5,5,5>, RHS
+ 229035318U, // <5,5,u,5>: Cost 1 vdup1 RHS
+ 2646857936U, // <5,5,u,6>: Cost 3 vext2 <4,u,5,5>, <u,6,3,7>
+ 2040982838U, // <5,5,u,7>: Cost 2 vtrnr RHS, RHS
+ 229035318U, // <5,5,u,u>: Cost 1 vdup1 RHS
+ 2638233600U, // <5,6,0,0>: Cost 3 vext2 <3,4,5,6>, <0,0,0,0>
+ 1564491878U, // <5,6,0,1>: Cost 2 vext2 <3,4,5,6>, LHS
+ 2632261796U, // <5,6,0,2>: Cost 3 vext2 <2,4,5,6>, <0,2,0,2>
+ 2638233856U, // <5,6,0,3>: Cost 3 vext2 <3,4,5,6>, <0,3,1,4>
+ 2638233938U, // <5,6,0,4>: Cost 3 vext2 <3,4,5,6>, <0,4,1,5>
+ 3706003885U, // <5,6,0,5>: Cost 4 vext2 <2,4,5,6>, <0,5,2,6>
+ 3706003967U, // <5,6,0,6>: Cost 4 vext2 <2,4,5,6>, <0,6,2,7>
+ 4047473974U, // <5,6,0,7>: Cost 4 vzipr <3,4,5,0>, RHS
+ 1564492445U, // <5,6,0,u>: Cost 2 vext2 <3,4,5,6>, LHS
+ 2638234358U, // <5,6,1,0>: Cost 3 vext2 <3,4,5,6>, <1,0,3,2>
+ 2638234420U, // <5,6,1,1>: Cost 3 vext2 <3,4,5,6>, <1,1,1,1>
+ 2638234518U, // <5,6,1,2>: Cost 3 vext2 <3,4,5,6>, <1,2,3,0>
+ 2638234584U, // <5,6,1,3>: Cost 3 vext2 <3,4,5,6>, <1,3,1,3>
+ 2626290768U, // <5,6,1,4>: Cost 3 vext2 <1,4,5,6>, <1,4,5,6>
+ 2638234768U, // <5,6,1,5>: Cost 3 vext2 <3,4,5,6>, <1,5,3,7>
+ 3700032719U, // <5,6,1,6>: Cost 4 vext2 <1,4,5,6>, <1,6,1,7>
+ 2982366518U, // <5,6,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
+ 2628945300U, // <5,6,1,u>: Cost 3 vext2 <1,u,5,6>, <1,u,5,6>
+ 3706004925U, // <5,6,2,0>: Cost 4 vext2 <2,4,5,6>, <2,0,1,2>
+ 3711976966U, // <5,6,2,1>: Cost 4 vext2 <3,4,5,6>, <2,1,0,3>
+ 2638235240U, // <5,6,2,2>: Cost 3 vext2 <3,4,5,6>, <2,2,2,2>
+ 2638235302U, // <5,6,2,3>: Cost 3 vext2 <3,4,5,6>, <2,3,0,1>
+ 2632263465U, // <5,6,2,4>: Cost 3 vext2 <2,4,5,6>, <2,4,5,6>
+ 2638235496U, // <5,6,2,5>: Cost 3 vext2 <3,4,5,6>, <2,5,3,6>
+ 2638235578U, // <5,6,2,6>: Cost 3 vext2 <3,4,5,6>, <2,6,3,7>
+ 2713965050U, // <5,6,2,7>: Cost 3 vext3 <4,u,5,5>, <6,2,7,3>
+ 2634917997U, // <5,6,2,u>: Cost 3 vext2 <2,u,5,6>, <2,u,5,6>
+ 2638235798U, // <5,6,3,0>: Cost 3 vext2 <3,4,5,6>, <3,0,1,2>
+ 3711977695U, // <5,6,3,1>: Cost 4 vext2 <3,4,5,6>, <3,1,0,3>
+ 3710650720U, // <5,6,3,2>: Cost 4 vext2 <3,2,5,6>, <3,2,5,6>
+ 2638236060U, // <5,6,3,3>: Cost 3 vext2 <3,4,5,6>, <3,3,3,3>
+ 1564494338U, // <5,6,3,4>: Cost 2 vext2 <3,4,5,6>, <3,4,5,6>
+ 2638236234U, // <5,6,3,5>: Cost 3 vext2 <3,4,5,6>, <3,5,4,6>
+ 3711978104U, // <5,6,3,6>: Cost 4 vext2 <3,4,5,6>, <3,6,0,7>
+ 4034227510U, // <5,6,3,7>: Cost 4 vzipr <1,2,5,3>, RHS
+ 1567148870U, // <5,6,3,u>: Cost 2 vext2 <3,u,5,6>, <3,u,5,6>
+ 2577817702U, // <5,6,4,0>: Cost 3 vext1 <4,5,6,4>, LHS
+ 3700034544U, // <5,6,4,1>: Cost 4 vext2 <1,4,5,6>, <4,1,6,5>
+ 2723033713U, // <5,6,4,2>: Cost 3 vext3 <6,4,2,5>, <6,4,2,5>
+ 2638236818U, // <5,6,4,3>: Cost 3 vext2 <3,4,5,6>, <4,3,6,5>
+ 2644208859U, // <5,6,4,4>: Cost 3 vext2 <4,4,5,6>, <4,4,5,6>
+ 1564495158U, // <5,6,4,5>: Cost 2 vext2 <3,4,5,6>, RHS
+ 2645536125U, // <5,6,4,6>: Cost 3 vext2 <4,6,5,6>, <4,6,5,6>
+ 2723402398U, // <5,6,4,7>: Cost 3 vext3 <6,4,7,5>, <6,4,7,5>
+ 1564495401U, // <5,6,4,u>: Cost 2 vext2 <3,4,5,6>, RHS
+ 2577825894U, // <5,6,5,0>: Cost 3 vext1 <4,5,6,5>, LHS
+ 2662125264U, // <5,6,5,1>: Cost 3 vext2 <7,4,5,6>, <5,1,7,3>
+ 3775836867U, // <5,6,5,2>: Cost 4 vext3 <2,u,6,5>, <6,5,2,6>
+ 3711979343U, // <5,6,5,3>: Cost 4 vext2 <3,4,5,6>, <5,3,3,4>
+ 2650181556U, // <5,6,5,4>: Cost 3 vext2 <5,4,5,6>, <5,4,5,6>
+ 2662125572U, // <5,6,5,5>: Cost 3 vext2 <7,4,5,6>, <5,5,5,5>
+ 2638237732U, // <5,6,5,6>: Cost 3 vext2 <3,4,5,6>, <5,6,0,1>
+ 2982399286U, // <5,6,5,7>: Cost 3 vzipr <4,u,5,5>, RHS
+ 2982399287U, // <5,6,5,u>: Cost 3 vzipr <4,u,5,5>, RHS
+ 2583806054U, // <5,6,6,0>: Cost 3 vext1 <5,5,6,6>, LHS
+ 3711979910U, // <5,6,6,1>: Cost 4 vext2 <3,4,5,6>, <6,1,3,4>
+ 2662126074U, // <5,6,6,2>: Cost 3 vext2 <7,4,5,6>, <6,2,7,3>
+ 2583808514U, // <5,6,6,3>: Cost 3 vext1 <5,5,6,6>, <3,4,5,6>
+ 2583809334U, // <5,6,6,4>: Cost 3 vext1 <5,5,6,6>, RHS
+ 2583810062U, // <5,6,6,5>: Cost 3 vext1 <5,5,6,6>, <5,5,6,6>
+ 2638238520U, // <5,6,6,6>: Cost 3 vext2 <3,4,5,6>, <6,6,6,6>
+ 2973781302U, // <5,6,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
+ 2973781303U, // <5,6,6,u>: Cost 3 vzipr <3,4,5,6>, RHS
+ 430358630U, // <5,6,7,0>: Cost 1 vext1 RHS, LHS
+ 1504101110U, // <5,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
+ 1504101992U, // <5,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1504102550U, // <5,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 430361910U, // <5,6,7,4>: Cost 1 vext1 RHS, RHS
+ 1504104390U, // <5,6,7,5>: Cost 2 vext1 RHS, <5,4,7,6>
+ 1504105272U, // <5,6,7,6>: Cost 2 vext1 RHS, <6,6,6,6>
+ 1504106092U, // <5,6,7,7>: Cost 2 vext1 RHS, <7,7,7,7>
+ 430364462U, // <5,6,7,u>: Cost 1 vext1 RHS, LHS
+ 430366822U, // <5,6,u,0>: Cost 1 vext1 RHS, LHS
+ 1564497710U, // <5,6,u,1>: Cost 2 vext2 <3,4,5,6>, LHS
+ 1504110184U, // <5,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1504110742U, // <5,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 430370103U, // <5,6,u,4>: Cost 1 vext1 RHS, RHS
+ 1564498074U, // <5,6,u,5>: Cost 2 vext2 <3,4,5,6>, RHS
+ 1504113146U, // <5,6,u,6>: Cost 2 vext1 RHS, <6,2,7,3>
+ 1504113658U, // <5,6,u,7>: Cost 2 vext1 RHS, <7,0,1,2>
+ 430372654U, // <5,6,u,u>: Cost 1 vext1 RHS, LHS
+ 2625634304U, // <5,7,0,0>: Cost 3 vext2 <1,3,5,7>, <0,0,0,0>
+ 1551892582U, // <5,7,0,1>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625634468U, // <5,7,0,2>: Cost 3 vext2 <1,3,5,7>, <0,2,0,2>
+ 2571889247U, // <5,7,0,3>: Cost 3 vext1 <3,5,7,0>, <3,5,7,0>
+ 2625634642U, // <5,7,0,4>: Cost 3 vext2 <1,3,5,7>, <0,4,1,5>
+ 2595778728U, // <5,7,0,5>: Cost 3 vext1 <7,5,7,0>, <5,7,5,7>
+ 3699376639U, // <5,7,0,6>: Cost 4 vext2 <1,3,5,7>, <0,6,2,7>
+ 2260235715U, // <5,7,0,7>: Cost 3 vrev <7,5,7,0>
+ 1551893149U, // <5,7,0,u>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625635062U, // <5,7,1,0>: Cost 3 vext2 <1,3,5,7>, <1,0,3,2>
+ 2624308020U, // <5,7,1,1>: Cost 3 vext2 <1,1,5,7>, <1,1,1,1>
+ 2625635222U, // <5,7,1,2>: Cost 3 vext2 <1,3,5,7>, <1,2,3,0>
+ 1551893504U, // <5,7,1,3>: Cost 2 vext2 <1,3,5,7>, <1,3,5,7>
+ 2571898166U, // <5,7,1,4>: Cost 3 vext1 <3,5,7,1>, RHS
+ 2625635472U, // <5,7,1,5>: Cost 3 vext2 <1,3,5,7>, <1,5,3,7>
+ 2627626227U, // <5,7,1,6>: Cost 3 vext2 <1,6,5,7>, <1,6,5,7>
+ 3702031684U, // <5,7,1,7>: Cost 4 vext2 <1,7,5,7>, <1,7,5,7>
+ 1555211669U, // <5,7,1,u>: Cost 2 vext2 <1,u,5,7>, <1,u,5,7>
+ 2629617126U, // <5,7,2,0>: Cost 3 vext2 <2,0,5,7>, <2,0,5,7>
+ 3699377670U, // <5,7,2,1>: Cost 4 vext2 <1,3,5,7>, <2,1,0,3>
+ 2625635944U, // <5,7,2,2>: Cost 3 vext2 <1,3,5,7>, <2,2,2,2>
+ 2625636006U, // <5,7,2,3>: Cost 3 vext2 <1,3,5,7>, <2,3,0,1>
+ 2632271658U, // <5,7,2,4>: Cost 3 vext2 <2,4,5,7>, <2,4,5,7>
+ 2625636201U, // <5,7,2,5>: Cost 3 vext2 <1,3,5,7>, <2,5,3,7>
+ 2625636282U, // <5,7,2,6>: Cost 3 vext2 <1,3,5,7>, <2,6,3,7>
+ 3708004381U, // <5,7,2,7>: Cost 4 vext2 <2,7,5,7>, <2,7,5,7>
+ 2625636411U, // <5,7,2,u>: Cost 3 vext2 <1,3,5,7>, <2,u,0,1>
+ 2625636502U, // <5,7,3,0>: Cost 3 vext2 <1,3,5,7>, <3,0,1,2>
+ 2625636604U, // <5,7,3,1>: Cost 3 vext2 <1,3,5,7>, <3,1,3,5>
+ 3699378478U, // <5,7,3,2>: Cost 4 vext2 <1,3,5,7>, <3,2,0,1>
+ 2625636764U, // <5,7,3,3>: Cost 3 vext2 <1,3,5,7>, <3,3,3,3>
+ 2625636866U, // <5,7,3,4>: Cost 3 vext2 <1,3,5,7>, <3,4,5,6>
+ 2625636959U, // <5,7,3,5>: Cost 3 vext2 <1,3,5,7>, <3,5,7,0>
+ 3699378808U, // <5,7,3,6>: Cost 4 vext2 <1,3,5,7>, <3,6,0,7>
+ 2640235254U, // <5,7,3,7>: Cost 3 vext2 <3,7,5,7>, <3,7,5,7>
+ 2625637150U, // <5,7,3,u>: Cost 3 vext2 <1,3,5,7>, <3,u,1,2>
+ 2571919462U, // <5,7,4,0>: Cost 3 vext1 <3,5,7,4>, LHS
+ 2571920384U, // <5,7,4,1>: Cost 3 vext1 <3,5,7,4>, <1,3,5,7>
+ 3699379260U, // <5,7,4,2>: Cost 4 vext2 <1,3,5,7>, <4,2,6,0>
+ 2571922019U, // <5,7,4,3>: Cost 3 vext1 <3,5,7,4>, <3,5,7,4>
+ 2571922742U, // <5,7,4,4>: Cost 3 vext1 <3,5,7,4>, RHS
+ 1551895862U, // <5,7,4,5>: Cost 2 vext2 <1,3,5,7>, RHS
+ 2846277980U, // <5,7,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
+ 2646207951U, // <5,7,4,7>: Cost 3 vext2 <4,7,5,7>, <4,7,5,7>
+ 1551896105U, // <5,7,4,u>: Cost 2 vext2 <1,3,5,7>, RHS
+ 2583871590U, // <5,7,5,0>: Cost 3 vext1 <5,5,7,5>, LHS
+ 2652180176U, // <5,7,5,1>: Cost 3 vext2 <5,7,5,7>, <5,1,7,3>
+ 2625638177U, // <5,7,5,2>: Cost 3 vext2 <1,3,5,7>, <5,2,7,3>
+ 2625638262U, // <5,7,5,3>: Cost 3 vext2 <1,3,5,7>, <5,3,7,7>
+ 2583874870U, // <5,7,5,4>: Cost 3 vext1 <5,5,7,5>, RHS
+ 2846281732U, // <5,7,5,5>: Cost 3 vuzpr RHS, <5,5,5,5>
+ 2651517015U, // <5,7,5,6>: Cost 3 vext2 <5,6,5,7>, <5,6,5,7>
+ 1772539190U, // <5,7,5,7>: Cost 2 vuzpr RHS, RHS
+ 1772539191U, // <5,7,5,u>: Cost 2 vuzpr RHS, RHS
+ 2846281826U, // <5,7,6,0>: Cost 3 vuzpr RHS, <5,6,7,0>
+ 3699380615U, // <5,7,6,1>: Cost 4 vext2 <1,3,5,7>, <6,1,3,5>
+ 2846281108U, // <5,7,6,2>: Cost 3 vuzpr RHS, <4,6,u,2>
+ 2589854210U, // <5,7,6,3>: Cost 3 vext1 <6,5,7,6>, <3,4,5,6>
+ 2846281830U, // <5,7,6,4>: Cost 3 vuzpr RHS, <5,6,7,4>
+ 2725467658U, // <5,7,6,5>: Cost 3 vext3 <6,7,u,5>, <7,6,5,u>
+ 2846281076U, // <5,7,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
+ 2846279610U, // <5,7,6,7>: Cost 3 vuzpr RHS, <2,6,3,7>
+ 2846279611U, // <5,7,6,u>: Cost 3 vuzpr RHS, <2,6,3,u>
+ 1510146150U, // <5,7,7,0>: Cost 2 vext1 <5,5,7,7>, LHS
+ 2846282574U, // <5,7,7,1>: Cost 3 vuzpr RHS, <6,7,0,1>
+ 2583889512U, // <5,7,7,2>: Cost 3 vext1 <5,5,7,7>, <2,2,2,2>
+ 2846281919U, // <5,7,7,3>: Cost 3 vuzpr RHS, <5,7,u,3>
+ 1510149430U, // <5,7,7,4>: Cost 2 vext1 <5,5,7,7>, RHS
+ 1510150168U, // <5,7,7,5>: Cost 2 vext1 <5,5,7,7>, <5,5,7,7>
+ 2583892474U, // <5,7,7,6>: Cost 3 vext1 <5,5,7,7>, <6,2,7,3>
+ 2625640044U, // <5,7,7,7>: Cost 3 vext2 <1,3,5,7>, <7,7,7,7>
+ 1510151982U, // <5,7,7,u>: Cost 2 vext1 <5,5,7,7>, LHS
+ 1510154342U, // <5,7,u,0>: Cost 2 vext1 <5,5,7,u>, LHS
+ 1551898414U, // <5,7,u,1>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625640325U, // <5,7,u,2>: Cost 3 vext2 <1,3,5,7>, <u,2,3,0>
+ 1772536477U, // <5,7,u,3>: Cost 2 vuzpr RHS, LHS
+ 1510157622U, // <5,7,u,4>: Cost 2 vext1 <5,5,7,u>, RHS
+ 1551898778U, // <5,7,u,5>: Cost 2 vext2 <1,3,5,7>, RHS
+ 2625640656U, // <5,7,u,6>: Cost 3 vext2 <1,3,5,7>, <u,6,3,7>
+ 1772539433U, // <5,7,u,7>: Cost 2 vuzpr RHS, RHS
+ 1551898981U, // <5,7,u,u>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625642496U, // <5,u,0,0>: Cost 3 vext2 <1,3,5,u>, <0,0,0,0>
+ 1551900774U, // <5,u,0,1>: Cost 2 vext2 <1,3,5,u>, LHS
+ 2625642660U, // <5,u,0,2>: Cost 3 vext2 <1,3,5,u>, <0,2,0,2>
+ 2698630885U, // <5,u,0,3>: Cost 3 vext3 <2,3,4,5>, <u,0,3,2>
+ 2687129325U, // <5,u,0,4>: Cost 3 vext3 <0,4,1,5>, <u,0,4,1>
+ 2689783542U, // <5,u,0,5>: Cost 3 vext3 <0,u,1,5>, <u,0,5,1>
+ 2266134675U, // <5,u,0,6>: Cost 3 vrev <u,5,6,0>
+ 2595853772U, // <5,u,0,7>: Cost 3 vext1 <7,5,u,0>, <7,5,u,0>
+ 1551901341U, // <5,u,0,u>: Cost 2 vext2 <1,3,5,u>, LHS
+ 2625643254U, // <5,u,1,0>: Cost 3 vext2 <1,3,5,u>, <1,0,3,2>
+ 2625643316U, // <5,u,1,1>: Cost 3 vext2 <1,3,5,u>, <1,1,1,1>
+ 1613387566U, // <5,u,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 1551901697U, // <5,u,1,3>: Cost 2 vext2 <1,3,5,u>, <1,3,5,u>
+ 2626307154U, // <5,u,1,4>: Cost 3 vext2 <1,4,5,u>, <1,4,5,u>
+ 2689783622U, // <5,u,1,5>: Cost 3 vext3 <0,u,1,5>, <u,1,5,0>
+ 2627634420U, // <5,u,1,6>: Cost 3 vext2 <1,6,5,u>, <1,6,5,u>
+ 2982366536U, // <5,u,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
+ 1613387620U, // <5,u,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2846286742U, // <5,u,2,0>: Cost 3 vuzpr RHS, <1,2,3,0>
+ 2685796528U, // <5,u,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
+ 2625644136U, // <5,u,2,2>: Cost 3 vext2 <1,3,5,u>, <2,2,2,2>
+ 2687129480U, // <5,u,2,3>: Cost 3 vext3 <0,4,1,5>, <u,2,3,3>
+ 2632279851U, // <5,u,2,4>: Cost 3 vext2 <2,4,5,u>, <2,4,5,u>
+ 2625644394U, // <5,u,2,5>: Cost 3 vext2 <1,3,5,u>, <2,5,3,u>
+ 2625644474U, // <5,u,2,6>: Cost 3 vext2 <1,3,5,u>, <2,6,3,7>
+ 2713966508U, // <5,u,2,7>: Cost 3 vext3 <4,u,5,5>, <u,2,7,3>
+ 2625644603U, // <5,u,2,u>: Cost 3 vext2 <1,3,5,u>, <2,u,0,1>
+ 2687129532U, // <5,u,3,0>: Cost 3 vext3 <0,4,1,5>, <u,3,0,1>
+ 2636261649U, // <5,u,3,1>: Cost 3 vext2 <3,1,5,u>, <3,1,5,u>
+ 2636925282U, // <5,u,3,2>: Cost 3 vext2 <3,2,5,u>, <3,2,5,u>
+ 2625644956U, // <5,u,3,3>: Cost 3 vext2 <1,3,5,u>, <3,3,3,3>
+ 1564510724U, // <5,u,3,4>: Cost 2 vext2 <3,4,5,u>, <3,4,5,u>
+ 2625645160U, // <5,u,3,5>: Cost 3 vext2 <1,3,5,u>, <3,5,u,0>
+ 2734610422U, // <5,u,3,6>: Cost 3 vext3 <u,3,6,5>, <u,3,6,5>
+ 2640243447U, // <5,u,3,7>: Cost 3 vext2 <3,7,5,u>, <3,7,5,u>
+ 1567165256U, // <5,u,3,u>: Cost 2 vext2 <3,u,5,u>, <3,u,5,u>
+ 1567828889U, // <5,u,4,0>: Cost 2 vext2 <4,0,5,u>, <4,0,5,u>
+ 1661163546U, // <5,u,4,1>: Cost 2 vext3 <u,4,1,5>, <u,4,1,5>
+ 2734463012U, // <5,u,4,2>: Cost 3 vext3 <u,3,4,5>, <u,4,2,6>
+ 2698631212U, // <5,u,4,3>: Cost 3 vext3 <2,3,4,5>, <u,4,3,5>
+ 1570458842U, // <5,u,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
+ 1551904054U, // <5,u,4,5>: Cost 2 vext2 <1,3,5,u>, RHS
+ 2846286172U, // <5,u,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
+ 2646216144U, // <5,u,4,7>: Cost 3 vext2 <4,7,5,u>, <4,7,5,u>
+ 1551904297U, // <5,u,4,u>: Cost 2 vext2 <1,3,5,u>, RHS
+ 1509982310U, // <5,u,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
+ 2560058555U, // <5,u,5,1>: Cost 3 vext1 <1,5,u,5>, <1,5,u,5>
+ 2698926194U, // <5,u,5,2>: Cost 3 vext3 <2,3,u,5>, <u,5,2,3>
+ 2698631295U, // <5,u,5,3>: Cost 3 vext3 <2,3,4,5>, <u,5,3,7>
+ 1509985590U, // <5,u,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
+ 229035318U, // <5,u,5,5>: Cost 1 vdup1 RHS
+ 1613387930U, // <5,u,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
+ 1772547382U, // <5,u,5,7>: Cost 2 vuzpr RHS, RHS
+ 229035318U, // <5,u,5,u>: Cost 1 vdup1 RHS
+ 2566037606U, // <5,u,6,0>: Cost 3 vext1 <2,5,u,6>, LHS
+ 2920044334U, // <5,u,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
+ 2566039445U, // <5,u,6,2>: Cost 3 vext1 <2,5,u,6>, <2,5,u,6>
+ 2687129808U, // <5,u,6,3>: Cost 3 vext3 <0,4,1,5>, <u,6,3,7>
+ 2566040886U, // <5,u,6,4>: Cost 3 vext1 <2,5,u,6>, RHS
+ 2920044698U, // <5,u,6,5>: Cost 3 vzipl <5,6,7,0>, RHS
+ 2846289268U, // <5,u,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
+ 2973781320U, // <5,u,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
+ 2687129853U, // <5,u,6,u>: Cost 3 vext3 <0,4,1,5>, <u,6,u,7>
+ 430506086U, // <5,u,7,0>: Cost 1 vext1 RHS, LHS
+ 1486333117U, // <5,u,7,1>: Cost 2 vext1 <1,5,u,7>, <1,5,u,7>
+ 1504249448U, // <5,u,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 2040971933U, // <5,u,7,3>: Cost 2 vtrnr RHS, LHS
+ 430509384U, // <5,u,7,4>: Cost 1 vext1 RHS, RHS
+ 1504251600U, // <5,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
+ 118708378U, // <5,u,7,6>: Cost 1 vrev RHS
+ 2040974889U, // <5,u,7,7>: Cost 2 vtrnr RHS, RHS
+ 430511918U, // <5,u,7,u>: Cost 1 vext1 RHS, LHS
+ 430514278U, // <5,u,u,0>: Cost 1 vext1 RHS, LHS
+ 1551906606U, // <5,u,u,1>: Cost 2 vext2 <1,3,5,u>, LHS
+ 1613388133U, // <5,u,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 1772544669U, // <5,u,u,3>: Cost 2 vuzpr RHS, LHS
+ 430517577U, // <5,u,u,4>: Cost 1 vext1 RHS, RHS
+ 229035318U, // <5,u,u,5>: Cost 1 vdup1 RHS
+ 118716571U, // <5,u,u,6>: Cost 1 vrev RHS
+ 1772547625U, // <5,u,u,7>: Cost 2 vuzpr RHS, RHS
+ 430520110U, // <5,u,u,u>: Cost 1 vext1 RHS, LHS
+ 2686025728U, // <6,0,0,0>: Cost 3 vext3 <0,2,4,6>, <0,0,0,0>
+ 2686025738U, // <6,0,0,1>: Cost 3 vext3 <0,2,4,6>, <0,0,1,1>
+ 2686025748U, // <6,0,0,2>: Cost 3 vext3 <0,2,4,6>, <0,0,2,2>
+ 3779084320U, // <6,0,0,3>: Cost 4 vext3 <3,4,5,6>, <0,0,3,5>
+ 2642903388U, // <6,0,0,4>: Cost 3 vext2 <4,2,6,0>, <0,4,2,6>
+ 3657723939U, // <6,0,0,5>: Cost 4 vext1 <5,6,0,0>, <5,6,0,0>
+ 3926676514U, // <6,0,0,6>: Cost 4 vuzpr <5,6,7,0>, <7,0,5,6>
+ 3926675786U, // <6,0,0,7>: Cost 4 vuzpr <5,6,7,0>, <6,0,5,7>
+ 2686025802U, // <6,0,0,u>: Cost 3 vext3 <0,2,4,6>, <0,0,u,2>
+ 2566070374U, // <6,0,1,0>: Cost 3 vext1 <2,6,0,1>, LHS
+ 3759767642U, // <6,0,1,1>: Cost 4 vext3 <0,2,4,6>, <0,1,1,0>
+ 1612284006U, // <6,0,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2583988738U, // <6,0,1,3>: Cost 3 vext1 <5,6,0,1>, <3,4,5,6>
+ 2566073654U, // <6,0,1,4>: Cost 3 vext1 <2,6,0,1>, RHS
+ 2583990308U, // <6,0,1,5>: Cost 3 vext1 <5,6,0,1>, <5,6,0,1>
+ 2589963005U, // <6,0,1,6>: Cost 3 vext1 <6,6,0,1>, <6,6,0,1>
+ 2595935702U, // <6,0,1,7>: Cost 3 vext1 <7,6,0,1>, <7,6,0,1>
+ 1612284060U, // <6,0,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2686025892U, // <6,0,2,0>: Cost 3 vext3 <0,2,4,6>, <0,2,0,2>
+ 2685804721U, // <6,0,2,1>: Cost 3 vext3 <0,2,1,6>, <0,2,1,6>
+ 3759620282U, // <6,0,2,2>: Cost 4 vext3 <0,2,2,6>, <0,2,2,6>
+ 2705342658U, // <6,0,2,3>: Cost 3 vext3 <3,4,5,6>, <0,2,3,5>
+ 1612284108U, // <6,0,2,4>: Cost 2 vext3 <0,2,4,6>, <0,2,4,6>
+ 3706029956U, // <6,0,2,5>: Cost 4 vext2 <2,4,6,0>, <2,5,6,7>
+ 2686173406U, // <6,0,2,6>: Cost 3 vext3 <0,2,6,6>, <0,2,6,6>
+ 3651769338U, // <6,0,2,7>: Cost 4 vext1 <4,6,0,2>, <7,0,1,2>
+ 1612579056U, // <6,0,2,u>: Cost 2 vext3 <0,2,u,6>, <0,2,u,6>
+ 3706030230U, // <6,0,3,0>: Cost 4 vext2 <2,4,6,0>, <3,0,1,2>
+ 2705342720U, // <6,0,3,1>: Cost 3 vext3 <3,4,5,6>, <0,3,1,4>
+ 2705342730U, // <6,0,3,2>: Cost 3 vext3 <3,4,5,6>, <0,3,2,5>
+ 3706030492U, // <6,0,3,3>: Cost 4 vext2 <2,4,6,0>, <3,3,3,3>
+ 2644896258U, // <6,0,3,4>: Cost 3 vext2 <4,5,6,0>, <3,4,5,6>
+ 3718638154U, // <6,0,3,5>: Cost 4 vext2 <4,5,6,0>, <3,5,4,6>
+ 3729918619U, // <6,0,3,6>: Cost 4 vext2 <6,4,6,0>, <3,6,4,6>
+ 3926672384U, // <6,0,3,7>: Cost 4 vuzpr <5,6,7,0>, <1,3,5,7>
+ 2705342784U, // <6,0,3,u>: Cost 3 vext3 <3,4,5,6>, <0,3,u,5>
+ 2687058250U, // <6,0,4,0>: Cost 3 vext3 <0,4,0,6>, <0,4,0,6>
+ 2686026066U, // <6,0,4,1>: Cost 3 vext3 <0,2,4,6>, <0,4,1,5>
+ 1613463900U, // <6,0,4,2>: Cost 2 vext3 <0,4,2,6>, <0,4,2,6>
+ 3761021285U, // <6,0,4,3>: Cost 4 vext3 <0,4,3,6>, <0,4,3,6>
+ 2687353198U, // <6,0,4,4>: Cost 3 vext3 <0,4,4,6>, <0,4,4,6>
+ 2632289590U, // <6,0,4,5>: Cost 3 vext2 <2,4,6,0>, RHS
+ 2645560704U, // <6,0,4,6>: Cost 3 vext2 <4,6,6,0>, <4,6,6,0>
+ 2646224337U, // <6,0,4,7>: Cost 3 vext2 <4,7,6,0>, <4,7,6,0>
+ 1613906322U, // <6,0,4,u>: Cost 2 vext3 <0,4,u,6>, <0,4,u,6>
+ 3651788902U, // <6,0,5,0>: Cost 4 vext1 <4,6,0,5>, LHS
+ 2687795620U, // <6,0,5,1>: Cost 3 vext3 <0,5,1,6>, <0,5,1,6>
+ 3761611181U, // <6,0,5,2>: Cost 4 vext3 <0,5,2,6>, <0,5,2,6>
+ 3723284326U, // <6,0,5,3>: Cost 4 vext2 <5,3,6,0>, <5,3,6,0>
+ 2646224838U, // <6,0,5,4>: Cost 3 vext2 <4,7,6,0>, <5,4,7,6>
+ 3718639630U, // <6,0,5,5>: Cost 4 vext2 <4,5,6,0>, <5,5,6,6>
+ 2652196962U, // <6,0,5,6>: Cost 3 vext2 <5,7,6,0>, <5,6,7,0>
+ 2852932918U, // <6,0,5,7>: Cost 3 vuzpr <5,6,7,0>, RHS
+ 2852932919U, // <6,0,5,u>: Cost 3 vuzpr <5,6,7,0>, RHS
+ 2852933730U, // <6,0,6,0>: Cost 3 vuzpr <5,6,7,0>, <5,6,7,0>
+ 2925985894U, // <6,0,6,1>: Cost 3 vzipl <6,6,6,6>, LHS
+ 3060203622U, // <6,0,6,2>: Cost 3 vtrnl <6,6,6,6>, LHS
+ 3718640178U, // <6,0,6,3>: Cost 4 vext2 <4,5,6,0>, <6,3,4,5>
+ 2656178832U, // <6,0,6,4>: Cost 3 vext2 <6,4,6,0>, <6,4,6,0>
+ 3725939378U, // <6,0,6,5>: Cost 4 vext2 <5,7,6,0>, <6,5,0,7>
+ 2657506098U, // <6,0,6,6>: Cost 3 vext2 <6,6,6,0>, <6,6,6,0>
+ 2619020110U, // <6,0,6,7>: Cost 3 vext2 <0,2,6,0>, <6,7,0,1>
+ 2925986461U, // <6,0,6,u>: Cost 3 vzipl <6,6,6,6>, LHS
+ 2572091494U, // <6,0,7,0>: Cost 3 vext1 <3,6,0,7>, LHS
+ 2572092310U, // <6,0,7,1>: Cost 3 vext1 <3,6,0,7>, <1,2,3,0>
+ 2980495524U, // <6,0,7,2>: Cost 3 vzipr RHS, <0,2,0,2>
+ 2572094072U, // <6,0,7,3>: Cost 3 vext1 <3,6,0,7>, <3,6,0,7>
+ 2572094774U, // <6,0,7,4>: Cost 3 vext1 <3,6,0,7>, RHS
+ 4054238242U, // <6,0,7,5>: Cost 4 vzipr RHS, <1,4,0,5>
+ 3645837653U, // <6,0,7,6>: Cost 4 vext1 <3,6,0,7>, <6,0,7,0>
+ 4054239054U, // <6,0,7,7>: Cost 4 vzipr RHS, <2,5,0,7>
+ 2572097326U, // <6,0,7,u>: Cost 3 vext1 <3,6,0,7>, LHS
+ 2686026378U, // <6,0,u,0>: Cost 3 vext3 <0,2,4,6>, <0,u,0,2>
+ 2686026386U, // <6,0,u,1>: Cost 3 vext3 <0,2,4,6>, <0,u,1,1>
+ 1612284573U, // <6,0,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2705343144U, // <6,0,u,3>: Cost 3 vext3 <3,4,5,6>, <0,u,3,5>
+ 1616265906U, // <6,0,u,4>: Cost 2 vext3 <0,u,4,6>, <0,u,4,6>
+ 2632292506U, // <6,0,u,5>: Cost 3 vext2 <2,4,6,0>, RHS
+ 2590020356U, // <6,0,u,6>: Cost 3 vext1 <6,6,0,u>, <6,6,0,u>
+ 2852933161U, // <6,0,u,7>: Cost 3 vuzpr <5,6,7,0>, RHS
+ 1612284627U, // <6,0,u,u>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2595995750U, // <6,1,0,0>: Cost 3 vext1 <7,6,1,0>, LHS
+ 2646229094U, // <6,1,0,1>: Cost 3 vext2 <4,7,6,1>, LHS
+ 3694092492U, // <6,1,0,2>: Cost 4 vext2 <0,4,6,1>, <0,2,4,6>
+ 2686026486U, // <6,1,0,3>: Cost 3 vext3 <0,2,4,6>, <1,0,3,2>
+ 2595999030U, // <6,1,0,4>: Cost 3 vext1 <7,6,1,0>, RHS
+ 3767730952U, // <6,1,0,5>: Cost 4 vext3 <1,5,4,6>, <1,0,5,2>
+ 2596000590U, // <6,1,0,6>: Cost 3 vext1 <7,6,1,0>, <6,7,0,1>
+ 2596001246U, // <6,1,0,7>: Cost 3 vext1 <7,6,1,0>, <7,6,1,0>
+ 2686026531U, // <6,1,0,u>: Cost 3 vext3 <0,2,4,6>, <1,0,u,2>
+ 3763602219U, // <6,1,1,0>: Cost 4 vext3 <0,u,2,6>, <1,1,0,1>
+ 2686026548U, // <6,1,1,1>: Cost 3 vext3 <0,2,4,6>, <1,1,1,1>
+ 3764929346U, // <6,1,1,2>: Cost 4 vext3 <1,1,2,6>, <1,1,2,6>
+ 2686026568U, // <6,1,1,3>: Cost 3 vext3 <0,2,4,6>, <1,1,3,3>
+ 2691334996U, // <6,1,1,4>: Cost 3 vext3 <1,1,4,6>, <1,1,4,6>
+ 3760874332U, // <6,1,1,5>: Cost 4 vext3 <0,4,1,6>, <1,1,5,5>
+ 3765224294U, // <6,1,1,6>: Cost 4 vext3 <1,1,6,6>, <1,1,6,6>
+ 3669751263U, // <6,1,1,7>: Cost 4 vext1 <7,6,1,1>, <7,6,1,1>
+ 2686026613U, // <6,1,1,u>: Cost 3 vext3 <0,2,4,6>, <1,1,u,3>
+ 2554208358U, // <6,1,2,0>: Cost 3 vext1 <0,6,1,2>, LHS
+ 3763602311U, // <6,1,2,1>: Cost 4 vext3 <0,u,2,6>, <1,2,1,3>
+ 3639895971U, // <6,1,2,2>: Cost 4 vext1 <2,6,1,2>, <2,6,1,2>
+ 2686026646U, // <6,1,2,3>: Cost 3 vext3 <0,2,4,6>, <1,2,3,0>
+ 2554211638U, // <6,1,2,4>: Cost 3 vext1 <0,6,1,2>, RHS
+ 3760874411U, // <6,1,2,5>: Cost 4 vext3 <0,4,1,6>, <1,2,5,3>
+ 2554212858U, // <6,1,2,6>: Cost 3 vext1 <0,6,1,2>, <6,2,7,3>
+ 3802973114U, // <6,1,2,7>: Cost 4 vext3 <7,4,5,6>, <1,2,7,0>
+ 2686026691U, // <6,1,2,u>: Cost 3 vext3 <0,2,4,6>, <1,2,u,0>
+ 2566160486U, // <6,1,3,0>: Cost 3 vext1 <2,6,1,3>, LHS
+ 2686026712U, // <6,1,3,1>: Cost 3 vext3 <0,2,4,6>, <1,3,1,3>
+ 2686026724U, // <6,1,3,2>: Cost 3 vext3 <0,2,4,6>, <1,3,2,6>
+ 3759768552U, // <6,1,3,3>: Cost 4 vext3 <0,2,4,6>, <1,3,3,1>
+ 2692662262U, // <6,1,3,4>: Cost 3 vext3 <1,3,4,6>, <1,3,4,6>
+ 2686026752U, // <6,1,3,5>: Cost 3 vext3 <0,2,4,6>, <1,3,5,7>
+ 2590053128U, // <6,1,3,6>: Cost 3 vext1 <6,6,1,3>, <6,6,1,3>
+ 3663795194U, // <6,1,3,7>: Cost 4 vext1 <6,6,1,3>, <7,0,1,2>
+ 2686026775U, // <6,1,3,u>: Cost 3 vext3 <0,2,4,6>, <1,3,u,3>
+ 2641587099U, // <6,1,4,0>: Cost 3 vext2 <4,0,6,1>, <4,0,6,1>
+ 2693104684U, // <6,1,4,1>: Cost 3 vext3 <1,4,1,6>, <1,4,1,6>
+ 3639912357U, // <6,1,4,2>: Cost 4 vext1 <2,6,1,4>, <2,6,1,4>
+ 2687206462U, // <6,1,4,3>: Cost 3 vext3 <0,4,2,6>, <1,4,3,6>
+ 3633941814U, // <6,1,4,4>: Cost 4 vext1 <1,6,1,4>, RHS
+ 2693399632U, // <6,1,4,5>: Cost 3 vext3 <1,4,5,6>, <1,4,5,6>
+ 3765077075U, // <6,1,4,6>: Cost 4 vext3 <1,1,4,6>, <1,4,6,0>
+ 2646232530U, // <6,1,4,7>: Cost 3 vext2 <4,7,6,1>, <4,7,6,1>
+ 2687206507U, // <6,1,4,u>: Cost 3 vext3 <0,4,2,6>, <1,4,u,6>
+ 2647559796U, // <6,1,5,0>: Cost 3 vext2 <5,0,6,1>, <5,0,6,1>
+ 3765077118U, // <6,1,5,1>: Cost 4 vext3 <1,1,4,6>, <1,5,1,7>
+ 3767583878U, // <6,1,5,2>: Cost 4 vext3 <1,5,2,6>, <1,5,2,6>
+ 2686026896U, // <6,1,5,3>: Cost 3 vext3 <0,2,4,6>, <1,5,3,7>
+ 2693989528U, // <6,1,5,4>: Cost 3 vext3 <1,5,4,6>, <1,5,4,6>
+ 3767805089U, // <6,1,5,5>: Cost 4 vext3 <1,5,5,6>, <1,5,5,6>
+ 2652868706U, // <6,1,5,6>: Cost 3 vext2 <5,u,6,1>, <5,6,7,0>
+ 3908250934U, // <6,1,5,7>: Cost 4 vuzpr <2,6,0,1>, RHS
+ 2686026941U, // <6,1,5,u>: Cost 3 vext3 <0,2,4,6>, <1,5,u,7>
+ 2554241126U, // <6,1,6,0>: Cost 3 vext1 <0,6,1,6>, LHS
+ 3763602639U, // <6,1,6,1>: Cost 4 vext3 <0,u,2,6>, <1,6,1,7>
+ 3759547607U, // <6,1,6,2>: Cost 4 vext3 <0,2,1,6>, <1,6,2,6>
+ 3115221094U, // <6,1,6,3>: Cost 3 vtrnr <4,6,4,6>, LHS
+ 2554244406U, // <6,1,6,4>: Cost 3 vext1 <0,6,1,6>, RHS
+ 3760874739U, // <6,1,6,5>: Cost 4 vext3 <0,4,1,6>, <1,6,5,7>
+ 2554245944U, // <6,1,6,6>: Cost 3 vext1 <0,6,1,6>, <6,6,6,6>
+ 3719975758U, // <6,1,6,7>: Cost 4 vext2 <4,7,6,1>, <6,7,0,1>
+ 3115221099U, // <6,1,6,u>: Cost 3 vtrnr <4,6,4,6>, LHS
+ 2560221286U, // <6,1,7,0>: Cost 3 vext1 <1,6,1,7>, LHS
+ 2560222415U, // <6,1,7,1>: Cost 3 vext1 <1,6,1,7>, <1,6,1,7>
+ 2980497558U, // <6,1,7,2>: Cost 3 vzipr RHS, <3,0,1,2>
+ 3103211622U, // <6,1,7,3>: Cost 3 vtrnr <2,6,3,7>, LHS
+ 2560224566U, // <6,1,7,4>: Cost 3 vext1 <1,6,1,7>, RHS
+ 2980495698U, // <6,1,7,5>: Cost 3 vzipr RHS, <0,4,1,5>
+ 3633967526U, // <6,1,7,6>: Cost 4 vext1 <1,6,1,7>, <6,1,7,0>
+ 4054237686U, // <6,1,7,7>: Cost 4 vzipr RHS, <0,6,1,7>
+ 2560227118U, // <6,1,7,u>: Cost 3 vext1 <1,6,1,7>, LHS
+ 2560229478U, // <6,1,u,0>: Cost 3 vext1 <1,6,1,u>, LHS
+ 2686027117U, // <6,1,u,1>: Cost 3 vext3 <0,2,4,6>, <1,u,1,3>
+ 2686027129U, // <6,1,u,2>: Cost 3 vext3 <0,2,4,6>, <1,u,2,6>
+ 2686027132U, // <6,1,u,3>: Cost 3 vext3 <0,2,4,6>, <1,u,3,0>
+ 2687206795U, // <6,1,u,4>: Cost 3 vext3 <0,4,2,6>, <1,u,4,6>
+ 2686027157U, // <6,1,u,5>: Cost 3 vext3 <0,2,4,6>, <1,u,5,7>
+ 2590094093U, // <6,1,u,6>: Cost 3 vext1 <6,6,1,u>, <6,6,1,u>
+ 2596066790U, // <6,1,u,7>: Cost 3 vext1 <7,6,1,u>, <7,6,1,u>
+ 2686027177U, // <6,1,u,u>: Cost 3 vext3 <0,2,4,6>, <1,u,u,0>
+ 2646900736U, // <6,2,0,0>: Cost 3 vext2 <4,u,6,2>, <0,0,0,0>
+ 1573159014U, // <6,2,0,1>: Cost 2 vext2 <4,u,6,2>, LHS
+ 2646900900U, // <6,2,0,2>: Cost 3 vext2 <4,u,6,2>, <0,2,0,2>
+ 3759769037U, // <6,2,0,3>: Cost 4 vext3 <0,2,4,6>, <2,0,3,0>
+ 2641592668U, // <6,2,0,4>: Cost 3 vext2 <4,0,6,2>, <0,4,2,6>
+ 3779085794U, // <6,2,0,5>: Cost 4 vext3 <3,4,5,6>, <2,0,5,3>
+ 2686027244U, // <6,2,0,6>: Cost 3 vext3 <0,2,4,6>, <2,0,6,4>
+ 3669816807U, // <6,2,0,7>: Cost 4 vext1 <7,6,2,0>, <7,6,2,0>
+ 1573159581U, // <6,2,0,u>: Cost 2 vext2 <4,u,6,2>, LHS
+ 2230527897U, // <6,2,1,0>: Cost 3 vrev <2,6,0,1>
+ 2646901556U, // <6,2,1,1>: Cost 3 vext2 <4,u,6,2>, <1,1,1,1>
+ 2646901654U, // <6,2,1,2>: Cost 3 vext2 <4,u,6,2>, <1,2,3,0>
+ 2847047782U, // <6,2,1,3>: Cost 3 vuzpr <4,6,u,2>, LHS
+ 3771049517U, // <6,2,1,4>: Cost 4 vext3 <2,1,4,6>, <2,1,4,6>
+ 2646901904U, // <6,2,1,5>: Cost 3 vext2 <4,u,6,2>, <1,5,3,7>
+ 2686027324U, // <6,2,1,6>: Cost 3 vext3 <0,2,4,6>, <2,1,6,3>
+ 3669825000U, // <6,2,1,7>: Cost 4 vext1 <7,6,2,1>, <7,6,2,1>
+ 2231117793U, // <6,2,1,u>: Cost 3 vrev <2,6,u,1>
+ 3763603029U, // <6,2,2,0>: Cost 4 vext3 <0,u,2,6>, <2,2,0,1>
+ 3759769184U, // <6,2,2,1>: Cost 4 vext3 <0,2,4,6>, <2,2,1,3>
+ 2686027368U, // <6,2,2,2>: Cost 3 vext3 <0,2,4,6>, <2,2,2,2>
+ 2686027378U, // <6,2,2,3>: Cost 3 vext3 <0,2,4,6>, <2,2,3,3>
+ 2697971326U, // <6,2,2,4>: Cost 3 vext3 <2,2,4,6>, <2,2,4,6>
+ 3759769224U, // <6,2,2,5>: Cost 4 vext3 <0,2,4,6>, <2,2,5,7>
+ 2698118800U, // <6,2,2,6>: Cost 3 vext3 <2,2,6,6>, <2,2,6,6>
+ 3920794092U, // <6,2,2,7>: Cost 4 vuzpr <4,6,u,2>, <6,2,5,7>
+ 2686027423U, // <6,2,2,u>: Cost 3 vext3 <0,2,4,6>, <2,2,u,3>
+ 2686027430U, // <6,2,3,0>: Cost 3 vext3 <0,2,4,6>, <2,3,0,1>
+ 3759769262U, // <6,2,3,1>: Cost 4 vext3 <0,2,4,6>, <2,3,1,0>
+ 2698487485U, // <6,2,3,2>: Cost 3 vext3 <2,3,2,6>, <2,3,2,6>
+ 2705344196U, // <6,2,3,3>: Cost 3 vext3 <3,4,5,6>, <2,3,3,4>
+ 2686027470U, // <6,2,3,4>: Cost 3 vext3 <0,2,4,6>, <2,3,4,5>
+ 2698708696U, // <6,2,3,5>: Cost 3 vext3 <2,3,5,6>, <2,3,5,6>
+ 2724660961U, // <6,2,3,6>: Cost 3 vext3 <6,6,6,6>, <2,3,6,6>
+ 2729232104U, // <6,2,3,7>: Cost 3 vext3 <7,4,5,6>, <2,3,7,4>
+ 2686027502U, // <6,2,3,u>: Cost 3 vext3 <0,2,4,6>, <2,3,u,1>
+ 1567853468U, // <6,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
+ 3759769351U, // <6,2,4,1>: Cost 4 vext3 <0,2,4,6>, <2,4,1,u>
+ 2699151118U, // <6,2,4,2>: Cost 3 vext3 <2,4,2,6>, <2,4,2,6>
+ 2686027543U, // <6,2,4,3>: Cost 3 vext3 <0,2,4,6>, <2,4,3,6>
+ 2699298592U, // <6,2,4,4>: Cost 3 vext3 <2,4,4,6>, <2,4,4,6>
+ 1573162294U, // <6,2,4,5>: Cost 2 vext2 <4,u,6,2>, RHS
+ 2686027564U, // <6,2,4,6>: Cost 3 vext3 <0,2,4,6>, <2,4,6,0>
+ 3719982547U, // <6,2,4,7>: Cost 4 vext2 <4,7,6,2>, <4,7,6,2>
+ 1573162532U, // <6,2,4,u>: Cost 2 vext2 <4,u,6,2>, <4,u,6,2>
+ 3779086154U, // <6,2,5,0>: Cost 4 vext3 <3,4,5,6>, <2,5,0,3>
+ 2646904528U, // <6,2,5,1>: Cost 3 vext2 <4,u,6,2>, <5,1,7,3>
+ 3759769440U, // <6,2,5,2>: Cost 4 vext3 <0,2,4,6>, <2,5,2,7>
+ 2699888488U, // <6,2,5,3>: Cost 3 vext3 <2,5,3,6>, <2,5,3,6>
+ 2230855617U, // <6,2,5,4>: Cost 3 vrev <2,6,4,5>
+ 2646904836U, // <6,2,5,5>: Cost 3 vext2 <4,u,6,2>, <5,5,5,5>
+ 2646904930U, // <6,2,5,6>: Cost 3 vext2 <4,u,6,2>, <5,6,7,0>
+ 2847051062U, // <6,2,5,7>: Cost 3 vuzpr <4,6,u,2>, RHS
+ 2700257173U, // <6,2,5,u>: Cost 3 vext3 <2,5,u,6>, <2,5,u,6>
+ 2687207321U, // <6,2,6,0>: Cost 3 vext3 <0,4,2,6>, <2,6,0,1>
+ 2686027684U, // <6,2,6,1>: Cost 3 vext3 <0,2,4,6>, <2,6,1,3>
+ 2566260656U, // <6,2,6,2>: Cost 3 vext1 <2,6,2,6>, <2,6,2,6>
+ 2685806522U, // <6,2,6,3>: Cost 3 vext3 <0,2,1,6>, <2,6,3,7>
+ 2687207361U, // <6,2,6,4>: Cost 3 vext3 <0,4,2,6>, <2,6,4,5>
+ 2686027724U, // <6,2,6,5>: Cost 3 vext3 <0,2,4,6>, <2,6,5,7>
+ 2646905656U, // <6,2,6,6>: Cost 3 vext2 <4,u,6,2>, <6,6,6,6>
+ 2646905678U, // <6,2,6,7>: Cost 3 vext2 <4,u,6,2>, <6,7,0,1>
+ 2686027751U, // <6,2,6,u>: Cost 3 vext3 <0,2,4,6>, <2,6,u,7>
+ 2554323046U, // <6,2,7,0>: Cost 3 vext1 <0,6,2,7>, LHS
+ 2572239606U, // <6,2,7,1>: Cost 3 vext1 <3,6,2,7>, <1,0,3,2>
+ 2566268849U, // <6,2,7,2>: Cost 3 vext1 <2,6,2,7>, <2,6,2,7>
+ 1906753638U, // <6,2,7,3>: Cost 2 vzipr RHS, LHS
+ 2554326326U, // <6,2,7,4>: Cost 3 vext1 <0,6,2,7>, RHS
+ 3304687564U, // <6,2,7,5>: Cost 4 vrev <2,6,5,7>
+ 2980495708U, // <6,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
+ 2646906476U, // <6,2,7,7>: Cost 3 vext2 <4,u,6,2>, <7,7,7,7>
+ 1906753643U, // <6,2,7,u>: Cost 2 vzipr RHS, LHS
+ 1591744256U, // <6,2,u,0>: Cost 2 vext2 <u,0,6,2>, <u,0,6,2>
+ 1573164846U, // <6,2,u,1>: Cost 2 vext2 <4,u,6,2>, LHS
+ 2701805650U, // <6,2,u,2>: Cost 3 vext3 <2,u,2,6>, <2,u,2,6>
+ 1906761830U, // <6,2,u,3>: Cost 2 vzipr RHS, LHS
+ 2686027875U, // <6,2,u,4>: Cost 3 vext3 <0,2,4,6>, <2,u,4,5>
+ 1573165210U, // <6,2,u,5>: Cost 2 vext2 <4,u,6,2>, RHS
+ 2686322800U, // <6,2,u,6>: Cost 3 vext3 <0,2,u,6>, <2,u,6,0>
+ 2847051305U, // <6,2,u,7>: Cost 3 vuzpr <4,6,u,2>, RHS
+ 1906761835U, // <6,2,u,u>: Cost 2 vzipr RHS, LHS
+ 3759769739U, // <6,3,0,0>: Cost 4 vext3 <0,2,4,6>, <3,0,0,0>
+ 2686027926U, // <6,3,0,1>: Cost 3 vext3 <0,2,4,6>, <3,0,1,2>
+ 2686027937U, // <6,3,0,2>: Cost 3 vext3 <0,2,4,6>, <3,0,2,4>
+ 3640027286U, // <6,3,0,3>: Cost 4 vext1 <2,6,3,0>, <3,0,1,2>
+ 2687207601U, // <6,3,0,4>: Cost 3 vext3 <0,4,2,6>, <3,0,4,2>
+ 2705344698U, // <6,3,0,5>: Cost 3 vext3 <3,4,5,6>, <3,0,5,2>
+ 3663917847U, // <6,3,0,6>: Cost 4 vext1 <6,6,3,0>, <6,6,3,0>
+ 2237008560U, // <6,3,0,7>: Cost 3 vrev <3,6,7,0>
+ 2686027989U, // <6,3,0,u>: Cost 3 vext3 <0,2,4,6>, <3,0,u,2>
+ 3759769823U, // <6,3,1,0>: Cost 4 vext3 <0,2,4,6>, <3,1,0,3>
+ 3759769830U, // <6,3,1,1>: Cost 4 vext3 <0,2,4,6>, <3,1,1,1>
+ 3759769841U, // <6,3,1,2>: Cost 4 vext3 <0,2,4,6>, <3,1,2,3>
+ 3759769848U, // <6,3,1,3>: Cost 4 vext3 <0,2,4,6>, <3,1,3,1>
+ 2703280390U, // <6,3,1,4>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
+ 3759769868U, // <6,3,1,5>: Cost 4 vext3 <0,2,4,6>, <3,1,5,3>
+ 3704063194U, // <6,3,1,6>: Cost 4 vext2 <2,1,6,3>, <1,6,3,0>
+ 3767732510U, // <6,3,1,7>: Cost 4 vext3 <1,5,4,6>, <3,1,7,3>
+ 2703280390U, // <6,3,1,u>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
+ 3704063468U, // <6,3,2,0>: Cost 4 vext2 <2,1,6,3>, <2,0,6,4>
+ 2630321724U, // <6,3,2,1>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
+ 3759769921U, // <6,3,2,2>: Cost 4 vext3 <0,2,4,6>, <3,2,2,2>
+ 3759769928U, // <6,3,2,3>: Cost 4 vext3 <0,2,4,6>, <3,2,3,0>
+ 3704063767U, // <6,3,2,4>: Cost 4 vext2 <2,1,6,3>, <2,4,3,6>
+ 3704063876U, // <6,3,2,5>: Cost 4 vext2 <2,1,6,3>, <2,5,6,7>
+ 2636957626U, // <6,3,2,6>: Cost 3 vext2 <3,2,6,3>, <2,6,3,7>
+ 3777907058U, // <6,3,2,7>: Cost 4 vext3 <3,2,7,6>, <3,2,7,6>
+ 2630321724U, // <6,3,2,u>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
+ 3759769983U, // <6,3,3,0>: Cost 4 vext3 <0,2,4,6>, <3,3,0,1>
+ 3710036245U, // <6,3,3,1>: Cost 4 vext2 <3,1,6,3>, <3,1,6,3>
+ 2636958054U, // <6,3,3,2>: Cost 3 vext2 <3,2,6,3>, <3,2,6,3>
+ 2686028188U, // <6,3,3,3>: Cost 3 vext3 <0,2,4,6>, <3,3,3,3>
+ 2704607656U, // <6,3,3,4>: Cost 3 vext3 <3,3,4,6>, <3,3,4,6>
+ 3773041072U, // <6,3,3,5>: Cost 4 vext3 <2,4,4,6>, <3,3,5,5>
+ 3711363731U, // <6,3,3,6>: Cost 4 vext2 <3,3,6,3>, <3,6,3,7>
+ 3767732676U, // <6,3,3,7>: Cost 4 vext3 <1,5,4,6>, <3,3,7,7>
+ 2707999179U, // <6,3,3,u>: Cost 3 vext3 <3,u,5,6>, <3,3,u,5>
+ 2584232038U, // <6,3,4,0>: Cost 3 vext1 <5,6,3,4>, LHS
+ 2642267118U, // <6,3,4,1>: Cost 3 vext2 <4,1,6,3>, <4,1,6,3>
+ 2642930751U, // <6,3,4,2>: Cost 3 vext2 <4,2,6,3>, <4,2,6,3>
+ 2705197552U, // <6,3,4,3>: Cost 3 vext3 <3,4,3,6>, <3,4,3,6>
+ 2584235318U, // <6,3,4,4>: Cost 3 vext1 <5,6,3,4>, RHS
+ 1631603202U, // <6,3,4,5>: Cost 2 vext3 <3,4,5,6>, <3,4,5,6>
+ 2654211444U, // <6,3,4,6>: Cost 3 vext2 <6,1,6,3>, <4,6,4,6>
+ 2237041332U, // <6,3,4,7>: Cost 3 vrev <3,6,7,4>
+ 1631824413U, // <6,3,4,u>: Cost 2 vext3 <3,4,u,6>, <3,4,u,6>
+ 3640066150U, // <6,3,5,0>: Cost 4 vext1 <2,6,3,5>, LHS
+ 3772746288U, // <6,3,5,1>: Cost 4 vext3 <2,4,0,6>, <3,5,1,7>
+ 3640067790U, // <6,3,5,2>: Cost 4 vext1 <2,6,3,5>, <2,3,4,5>
+ 3773041216U, // <6,3,5,3>: Cost 4 vext3 <2,4,4,6>, <3,5,3,5>
+ 2705934922U, // <6,3,5,4>: Cost 3 vext3 <3,5,4,6>, <3,5,4,6>
+ 3773041236U, // <6,3,5,5>: Cost 4 vext3 <2,4,4,6>, <3,5,5,7>
+ 3779086940U, // <6,3,5,6>: Cost 4 vext3 <3,4,5,6>, <3,5,6,6>
+ 3767732831U, // <6,3,5,7>: Cost 4 vext3 <1,5,4,6>, <3,5,7,0>
+ 2706229870U, // <6,3,5,u>: Cost 3 vext3 <3,5,u,6>, <3,5,u,6>
+ 2602164326U, // <6,3,6,0>: Cost 3 vext1 <u,6,3,6>, LHS
+ 2654212512U, // <6,3,6,1>: Cost 3 vext2 <6,1,6,3>, <6,1,6,3>
+ 2566334393U, // <6,3,6,2>: Cost 3 vext1 <2,6,3,6>, <2,6,3,6>
+ 3704066588U, // <6,3,6,3>: Cost 4 vext2 <2,1,6,3>, <6,3,2,1>
+ 2602167524U, // <6,3,6,4>: Cost 3 vext1 <u,6,3,6>, <4,4,6,6>
+ 3710702321U, // <6,3,6,5>: Cost 4 vext2 <3,2,6,3>, <6,5,7,7>
+ 2724661933U, // <6,3,6,6>: Cost 3 vext3 <6,6,6,6>, <3,6,6,6>
+ 3710702465U, // <6,3,6,7>: Cost 4 vext2 <3,2,6,3>, <6,7,5,7>
+ 2602170158U, // <6,3,6,u>: Cost 3 vext1 <u,6,3,6>, LHS
+ 1492598886U, // <6,3,7,0>: Cost 2 vext1 <2,6,3,7>, LHS
+ 2560369889U, // <6,3,7,1>: Cost 3 vext1 <1,6,3,7>, <1,6,3,7>
+ 1492600762U, // <6,3,7,2>: Cost 2 vext1 <2,6,3,7>, <2,6,3,7>
+ 2566342806U, // <6,3,7,3>: Cost 3 vext1 <2,6,3,7>, <3,0,1,2>
+ 1492602166U, // <6,3,7,4>: Cost 2 vext1 <2,6,3,7>, RHS
+ 2602176208U, // <6,3,7,5>: Cost 3 vext1 <u,6,3,7>, <5,1,7,3>
+ 2566345210U, // <6,3,7,6>: Cost 3 vext1 <2,6,3,7>, <6,2,7,3>
+ 2980496528U, // <6,3,7,7>: Cost 3 vzipr RHS, <1,5,3,7>
+ 1492604718U, // <6,3,7,u>: Cost 2 vext1 <2,6,3,7>, LHS
+ 1492607078U, // <6,3,u,0>: Cost 2 vext1 <2,6,3,u>, LHS
+ 2686028574U, // <6,3,u,1>: Cost 3 vext3 <0,2,4,6>, <3,u,1,2>
+ 1492608955U, // <6,3,u,2>: Cost 2 vext1 <2,6,3,u>, <2,6,3,u>
+ 2566350998U, // <6,3,u,3>: Cost 3 vext1 <2,6,3,u>, <3,0,1,2>
+ 1492610358U, // <6,3,u,4>: Cost 2 vext1 <2,6,3,u>, RHS
+ 1634257734U, // <6,3,u,5>: Cost 2 vext3 <3,u,5,6>, <3,u,5,6>
+ 2566353489U, // <6,3,u,6>: Cost 3 vext1 <2,6,3,u>, <6,3,u,0>
+ 2980504720U, // <6,3,u,7>: Cost 3 vzipr RHS, <1,5,3,7>
+ 1492612910U, // <6,3,u,u>: Cost 2 vext1 <2,6,3,u>, LHS
+ 3703406592U, // <6,4,0,0>: Cost 4 vext2 <2,0,6,4>, <0,0,0,0>
+ 2629664870U, // <6,4,0,1>: Cost 3 vext2 <2,0,6,4>, LHS
+ 2629664972U, // <6,4,0,2>: Cost 3 vext2 <2,0,6,4>, <0,2,4,6>
+ 3779087232U, // <6,4,0,3>: Cost 4 vext3 <3,4,5,6>, <4,0,3,1>
+ 2642936156U, // <6,4,0,4>: Cost 3 vext2 <4,2,6,4>, <0,4,2,6>
+ 2712570770U, // <6,4,0,5>: Cost 3 vext3 <4,6,4,6>, <4,0,5,1>
+ 2687208348U, // <6,4,0,6>: Cost 3 vext3 <0,4,2,6>, <4,0,6,2>
+ 3316723081U, // <6,4,0,7>: Cost 4 vrev <4,6,7,0>
+ 2629665437U, // <6,4,0,u>: Cost 3 vext2 <2,0,6,4>, LHS
+ 2242473291U, // <6,4,1,0>: Cost 3 vrev <4,6,0,1>
+ 3700089652U, // <6,4,1,1>: Cost 4 vext2 <1,4,6,4>, <1,1,1,1>
+ 3703407510U, // <6,4,1,2>: Cost 4 vext2 <2,0,6,4>, <1,2,3,0>
+ 2852962406U, // <6,4,1,3>: Cost 3 vuzpr <5,6,7,4>, LHS
+ 3628166454U, // <6,4,1,4>: Cost 4 vext1 <0,6,4,1>, RHS
+ 3760876514U, // <6,4,1,5>: Cost 4 vext3 <0,4,1,6>, <4,1,5,0>
+ 2687208430U, // <6,4,1,6>: Cost 3 vext3 <0,4,2,6>, <4,1,6,3>
+ 3316731274U, // <6,4,1,7>: Cost 4 vrev <4,6,7,1>
+ 2243063187U, // <6,4,1,u>: Cost 3 vrev <4,6,u,1>
+ 2629666284U, // <6,4,2,0>: Cost 3 vext2 <2,0,6,4>, <2,0,6,4>
+ 3703408188U, // <6,4,2,1>: Cost 4 vext2 <2,0,6,4>, <2,1,6,3>
+ 3703408232U, // <6,4,2,2>: Cost 4 vext2 <2,0,6,4>, <2,2,2,2>
+ 3703408294U, // <6,4,2,3>: Cost 4 vext2 <2,0,6,4>, <2,3,0,1>
+ 2632320816U, // <6,4,2,4>: Cost 3 vext2 <2,4,6,4>, <2,4,6,4>
+ 2923384118U, // <6,4,2,5>: Cost 3 vzipl <6,2,7,3>, RHS
+ 2687208508U, // <6,4,2,6>: Cost 3 vext3 <0,4,2,6>, <4,2,6,0>
+ 3760950341U, // <6,4,2,7>: Cost 4 vext3 <0,4,2,6>, <4,2,7,0>
+ 2634975348U, // <6,4,2,u>: Cost 3 vext2 <2,u,6,4>, <2,u,6,4>
+ 3703408790U, // <6,4,3,0>: Cost 4 vext2 <2,0,6,4>, <3,0,1,2>
+ 3316305238U, // <6,4,3,1>: Cost 4 vrev <4,6,1,3>
+ 3703408947U, // <6,4,3,2>: Cost 4 vext2 <2,0,6,4>, <3,2,0,6>
+ 3703409052U, // <6,4,3,3>: Cost 4 vext2 <2,0,6,4>, <3,3,3,3>
+ 2644929026U, // <6,4,3,4>: Cost 3 vext2 <4,5,6,4>, <3,4,5,6>
+ 3718670922U, // <6,4,3,5>: Cost 4 vext2 <4,5,6,4>, <3,5,4,6>
+ 2705345682U, // <6,4,3,6>: Cost 3 vext3 <3,4,5,6>, <4,3,6,5>
+ 3926705152U, // <6,4,3,7>: Cost 4 vuzpr <5,6,7,4>, <1,3,5,7>
+ 2668817222U, // <6,4,3,u>: Cost 3 vext2 <u,5,6,4>, <3,u,5,6>
+ 2590277734U, // <6,4,4,0>: Cost 3 vext1 <6,6,4,4>, LHS
+ 3716017135U, // <6,4,4,1>: Cost 4 vext2 <4,1,6,4>, <4,1,6,4>
+ 2642938944U, // <6,4,4,2>: Cost 3 vext2 <4,2,6,4>, <4,2,6,4>
+ 3717344401U, // <6,4,4,3>: Cost 4 vext2 <4,3,6,4>, <4,3,6,4>
+ 2712571088U, // <6,4,4,4>: Cost 3 vext3 <4,6,4,6>, <4,4,4,4>
+ 2629668150U, // <6,4,4,5>: Cost 3 vext2 <2,0,6,4>, RHS
+ 1637649636U, // <6,4,4,6>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
+ 2646257109U, // <6,4,4,7>: Cost 3 vext2 <4,7,6,4>, <4,7,6,4>
+ 1637649636U, // <6,4,4,u>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
+ 2566398054U, // <6,4,5,0>: Cost 3 vext1 <2,6,4,5>, LHS
+ 3760876805U, // <6,4,5,1>: Cost 4 vext3 <0,4,1,6>, <4,5,1,3>
+ 2566399937U, // <6,4,5,2>: Cost 3 vext1 <2,6,4,5>, <2,6,4,5>
+ 2584316418U, // <6,4,5,3>: Cost 3 vext1 <5,6,4,5>, <3,4,5,6>
+ 2566401334U, // <6,4,5,4>: Cost 3 vext1 <2,6,4,5>, RHS
+ 2584318028U, // <6,4,5,5>: Cost 3 vext1 <5,6,4,5>, <5,6,4,5>
+ 1612287286U, // <6,4,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
+ 2852965686U, // <6,4,5,7>: Cost 3 vuzpr <5,6,7,4>, RHS
+ 1612287304U, // <6,4,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
+ 1504608358U, // <6,4,6,0>: Cost 2 vext1 <4,6,4,6>, LHS
+ 2578350838U, // <6,4,6,1>: Cost 3 vext1 <4,6,4,6>, <1,0,3,2>
+ 2578351720U, // <6,4,6,2>: Cost 3 vext1 <4,6,4,6>, <2,2,2,2>
+ 2578352278U, // <6,4,6,3>: Cost 3 vext1 <4,6,4,6>, <3,0,1,2>
+ 1504611638U, // <6,4,6,4>: Cost 2 vext1 <4,6,4,6>, RHS
+ 2578353872U, // <6,4,6,5>: Cost 3 vext1 <4,6,4,6>, <5,1,7,3>
+ 2578354682U, // <6,4,6,6>: Cost 3 vext1 <4,6,4,6>, <6,2,7,3>
+ 2578355194U, // <6,4,6,7>: Cost 3 vext1 <4,6,4,6>, <7,0,1,2>
+ 1504614190U, // <6,4,6,u>: Cost 2 vext1 <4,6,4,6>, LHS
+ 2572386406U, // <6,4,7,0>: Cost 3 vext1 <3,6,4,7>, LHS
+ 2572387226U, // <6,4,7,1>: Cost 3 vext1 <3,6,4,7>, <1,2,3,4>
+ 3640157902U, // <6,4,7,2>: Cost 4 vext1 <2,6,4,7>, <2,3,4,5>
+ 2572389020U, // <6,4,7,3>: Cost 3 vext1 <3,6,4,7>, <3,6,4,7>
+ 2572389686U, // <6,4,7,4>: Cost 3 vext1 <3,6,4,7>, RHS
+ 2980497102U, // <6,4,7,5>: Cost 3 vzipr RHS, <2,3,4,5>
+ 2980495564U, // <6,4,7,6>: Cost 3 vzipr RHS, <0,2,4,6>
+ 4054239090U, // <6,4,7,7>: Cost 4 vzipr RHS, <2,5,4,7>
+ 2572392238U, // <6,4,7,u>: Cost 3 vext1 <3,6,4,7>, LHS
+ 1504608358U, // <6,4,u,0>: Cost 2 vext1 <4,6,4,6>, LHS
+ 2629670702U, // <6,4,u,1>: Cost 3 vext2 <2,0,6,4>, LHS
+ 2566424516U, // <6,4,u,2>: Cost 3 vext1 <2,6,4,u>, <2,6,4,u>
+ 2584340994U, // <6,4,u,3>: Cost 3 vext1 <5,6,4,u>, <3,4,5,6>
+ 1640156694U, // <6,4,u,4>: Cost 2 vext3 <4,u,4,6>, <4,u,4,6>
+ 2629671066U, // <6,4,u,5>: Cost 3 vext2 <2,0,6,4>, RHS
+ 1612287529U, // <6,4,u,6>: Cost 2 vext3 <0,2,4,6>, RHS
+ 2852965929U, // <6,4,u,7>: Cost 3 vuzpr <5,6,7,4>, RHS
+ 1612287547U, // <6,4,u,u>: Cost 2 vext3 <0,2,4,6>, RHS
+ 3708723200U, // <6,5,0,0>: Cost 4 vext2 <2,u,6,5>, <0,0,0,0>
+ 2634981478U, // <6,5,0,1>: Cost 3 vext2 <2,u,6,5>, LHS
+ 3694125260U, // <6,5,0,2>: Cost 4 vext2 <0,4,6,5>, <0,2,4,6>
+ 3779087962U, // <6,5,0,3>: Cost 4 vext3 <3,4,5,6>, <5,0,3,2>
+ 3760877154U, // <6,5,0,4>: Cost 4 vext3 <0,4,1,6>, <5,0,4,1>
+ 4195110916U, // <6,5,0,5>: Cost 4 vtrnr <5,6,7,0>, <5,5,5,5>
+ 3696779775U, // <6,5,0,6>: Cost 4 vext2 <0,u,6,5>, <0,6,2,7>
+ 1175212130U, // <6,5,0,7>: Cost 2 vrev <5,6,7,0>
+ 1175285867U, // <6,5,0,u>: Cost 2 vrev <5,6,u,0>
+ 2248445988U, // <6,5,1,0>: Cost 3 vrev <5,6,0,1>
+ 3698107237U, // <6,5,1,1>: Cost 4 vext2 <1,1,6,5>, <1,1,6,5>
+ 3708724118U, // <6,5,1,2>: Cost 4 vext2 <2,u,6,5>, <1,2,3,0>
+ 3908575334U, // <6,5,1,3>: Cost 4 vuzpr <2,6,4,5>, LHS
+ 3716023376U, // <6,5,1,4>: Cost 4 vext2 <4,1,6,5>, <1,4,5,6>
+ 3708724368U, // <6,5,1,5>: Cost 4 vext2 <2,u,6,5>, <1,5,3,7>
+ 3767733960U, // <6,5,1,6>: Cost 4 vext3 <1,5,4,6>, <5,1,6,4>
+ 2712571600U, // <6,5,1,7>: Cost 3 vext3 <4,6,4,6>, <5,1,7,3>
+ 2712571609U, // <6,5,1,u>: Cost 3 vext3 <4,6,4,6>, <5,1,u,3>
+ 2578391142U, // <6,5,2,0>: Cost 3 vext1 <4,6,5,2>, LHS
+ 3704079934U, // <6,5,2,1>: Cost 4 vext2 <2,1,6,5>, <2,1,6,5>
+ 3708724840U, // <6,5,2,2>: Cost 4 vext2 <2,u,6,5>, <2,2,2,2>
+ 3705407182U, // <6,5,2,3>: Cost 4 vext2 <2,3,6,5>, <2,3,4,5>
+ 2578394422U, // <6,5,2,4>: Cost 3 vext1 <4,6,5,2>, RHS
+ 3717351272U, // <6,5,2,5>: Cost 4 vext2 <4,3,6,5>, <2,5,3,6>
+ 2634983354U, // <6,5,2,6>: Cost 3 vext2 <2,u,6,5>, <2,6,3,7>
+ 3115486518U, // <6,5,2,7>: Cost 3 vtrnr <4,6,u,2>, RHS
+ 2634983541U, // <6,5,2,u>: Cost 3 vext2 <2,u,6,5>, <2,u,6,5>
+ 3708725398U, // <6,5,3,0>: Cost 4 vext2 <2,u,6,5>, <3,0,1,2>
+ 3710052631U, // <6,5,3,1>: Cost 4 vext2 <3,1,6,5>, <3,1,6,5>
+ 3708725606U, // <6,5,3,2>: Cost 4 vext2 <2,u,6,5>, <3,2,6,3>
+ 3708725660U, // <6,5,3,3>: Cost 4 vext2 <2,u,6,5>, <3,3,3,3>
+ 2643610114U, // <6,5,3,4>: Cost 3 vext2 <4,3,6,5>, <3,4,5,6>
+ 3717352010U, // <6,5,3,5>: Cost 4 vext2 <4,3,6,5>, <3,5,4,6>
+ 3773632358U, // <6,5,3,6>: Cost 4 vext3 <2,5,3,6>, <5,3,6,0>
+ 2248978533U, // <6,5,3,7>: Cost 3 vrev <5,6,7,3>
+ 2249052270U, // <6,5,3,u>: Cost 3 vrev <5,6,u,3>
+ 2596323430U, // <6,5,4,0>: Cost 3 vext1 <7,6,5,4>, LHS
+ 3716025328U, // <6,5,4,1>: Cost 4 vext2 <4,1,6,5>, <4,1,6,5>
+ 3716688961U, // <6,5,4,2>: Cost 4 vext2 <4,2,6,5>, <4,2,6,5>
+ 2643610770U, // <6,5,4,3>: Cost 3 vext2 <4,3,6,5>, <4,3,6,5>
+ 2596326710U, // <6,5,4,4>: Cost 3 vext1 <7,6,5,4>, RHS
+ 2634984758U, // <6,5,4,5>: Cost 3 vext2 <2,u,6,5>, RHS
+ 3767734199U, // <6,5,4,6>: Cost 4 vext3 <1,5,4,6>, <5,4,6,0>
+ 1643696070U, // <6,5,4,7>: Cost 2 vext3 <5,4,7,6>, <5,4,7,6>
+ 1643769807U, // <6,5,4,u>: Cost 2 vext3 <5,4,u,6>, <5,4,u,6>
+ 2578415718U, // <6,5,5,0>: Cost 3 vext1 <4,6,5,5>, LHS
+ 3652158198U, // <6,5,5,1>: Cost 4 vext1 <4,6,5,5>, <1,0,3,2>
+ 3652159080U, // <6,5,5,2>: Cost 4 vext1 <4,6,5,5>, <2,2,2,2>
+ 3652159638U, // <6,5,5,3>: Cost 4 vext1 <4,6,5,5>, <3,0,1,2>
+ 2578418998U, // <6,5,5,4>: Cost 3 vext1 <4,6,5,5>, RHS
+ 2712571908U, // <6,5,5,5>: Cost 3 vext3 <4,6,4,6>, <5,5,5,5>
+ 2718027790U, // <6,5,5,6>: Cost 3 vext3 <5,5,6,6>, <5,5,6,6>
+ 2712571928U, // <6,5,5,7>: Cost 3 vext3 <4,6,4,6>, <5,5,7,7>
+ 2712571937U, // <6,5,5,u>: Cost 3 vext3 <4,6,4,6>, <5,5,u,7>
+ 2705346596U, // <6,5,6,0>: Cost 3 vext3 <3,4,5,6>, <5,6,0,1>
+ 3767144496U, // <6,5,6,1>: Cost 4 vext3 <1,4,5,6>, <5,6,1,4>
+ 3773116473U, // <6,5,6,2>: Cost 4 vext3 <2,4,5,6>, <5,6,2,4>
+ 2705346626U, // <6,5,6,3>: Cost 3 vext3 <3,4,5,6>, <5,6,3,4>
+ 2705346636U, // <6,5,6,4>: Cost 3 vext3 <3,4,5,6>, <5,6,4,5>
+ 3908577217U, // <6,5,6,5>: Cost 4 vuzpr <2,6,4,5>, <2,6,4,5>
+ 2578428728U, // <6,5,6,6>: Cost 3 vext1 <4,6,5,6>, <6,6,6,6>
+ 2712572002U, // <6,5,6,7>: Cost 3 vext3 <4,6,4,6>, <5,6,7,0>
+ 2705346668U, // <6,5,6,u>: Cost 3 vext3 <3,4,5,6>, <5,6,u,1>
+ 2560516198U, // <6,5,7,0>: Cost 3 vext1 <1,6,5,7>, LHS
+ 2560517363U, // <6,5,7,1>: Cost 3 vext1 <1,6,5,7>, <1,6,5,7>
+ 2566490060U, // <6,5,7,2>: Cost 3 vext1 <2,6,5,7>, <2,6,5,7>
+ 3634260118U, // <6,5,7,3>: Cost 4 vext1 <1,6,5,7>, <3,0,1,2>
+ 2560519478U, // <6,5,7,4>: Cost 3 vext1 <1,6,5,7>, RHS
+ 2980498650U, // <6,5,7,5>: Cost 3 vzipr RHS, <4,4,5,5>
+ 2980497922U, // <6,5,7,6>: Cost 3 vzipr RHS, <3,4,5,6>
+ 3103214902U, // <6,5,7,7>: Cost 3 vtrnr <2,6,3,7>, RHS
+ 2560522030U, // <6,5,7,u>: Cost 3 vext1 <1,6,5,7>, LHS
+ 2560524390U, // <6,5,u,0>: Cost 3 vext1 <1,6,5,u>, LHS
+ 2560525556U, // <6,5,u,1>: Cost 3 vext1 <1,6,5,u>, <1,6,5,u>
+ 2566498253U, // <6,5,u,2>: Cost 3 vext1 <2,6,5,u>, <2,6,5,u>
+ 2646931439U, // <6,5,u,3>: Cost 3 vext2 <4,u,6,5>, <u,3,5,7>
+ 2560527670U, // <6,5,u,4>: Cost 3 vext1 <1,6,5,u>, RHS
+ 2634987674U, // <6,5,u,5>: Cost 3 vext2 <2,u,6,5>, RHS
+ 2980506114U, // <6,5,u,6>: Cost 3 vzipr RHS, <3,4,5,6>
+ 1175277674U, // <6,5,u,7>: Cost 2 vrev <5,6,7,u>
+ 1175351411U, // <6,5,u,u>: Cost 2 vrev <5,6,u,u>
+ 2578448486U, // <6,6,0,0>: Cost 3 vext1 <4,6,6,0>, LHS
+ 1573191782U, // <6,6,0,1>: Cost 2 vext2 <4,u,6,6>, LHS
+ 2686030124U, // <6,6,0,2>: Cost 3 vext3 <0,2,4,6>, <6,0,2,4>
+ 3779088690U, // <6,6,0,3>: Cost 4 vext3 <3,4,5,6>, <6,0,3,1>
+ 2687209788U, // <6,6,0,4>: Cost 3 vext3 <0,4,2,6>, <6,0,4,2>
+ 3652194000U, // <6,6,0,5>: Cost 4 vext1 <4,6,6,0>, <5,1,7,3>
+ 2254852914U, // <6,6,0,6>: Cost 3 vrev <6,6,6,0>
+ 4041575734U, // <6,6,0,7>: Cost 4 vzipr <2,4,6,0>, RHS
+ 1573192349U, // <6,6,0,u>: Cost 2 vext2 <4,u,6,6>, LHS
+ 2646934262U, // <6,6,1,0>: Cost 3 vext2 <4,u,6,6>, <1,0,3,2>
+ 2646934324U, // <6,6,1,1>: Cost 3 vext2 <4,u,6,6>, <1,1,1,1>
+ 2646934422U, // <6,6,1,2>: Cost 3 vext2 <4,u,6,6>, <1,2,3,0>
+ 2846785638U, // <6,6,1,3>: Cost 3 vuzpr <4,6,4,6>, LHS
+ 3760951694U, // <6,6,1,4>: Cost 4 vext3 <0,4,2,6>, <6,1,4,3>
+ 2646934672U, // <6,6,1,5>: Cost 3 vext2 <4,u,6,6>, <1,5,3,7>
+ 2712572320U, // <6,6,1,6>: Cost 3 vext3 <4,6,4,6>, <6,1,6,3>
+ 3775549865U, // <6,6,1,7>: Cost 4 vext3 <2,u,2,6>, <6,1,7,3>
+ 2846785643U, // <6,6,1,u>: Cost 3 vuzpr <4,6,4,6>, LHS
+ 3759772094U, // <6,6,2,0>: Cost 4 vext3 <0,2,4,6>, <6,2,0,6>
+ 3704751676U, // <6,6,2,1>: Cost 4 vext2 <2,2,6,6>, <2,1,6,3>
+ 2631009936U, // <6,6,2,2>: Cost 3 vext2 <2,2,6,6>, <2,2,6,6>
+ 2646935206U, // <6,6,2,3>: Cost 3 vext2 <4,u,6,6>, <2,3,0,1>
+ 3759772127U, // <6,6,2,4>: Cost 4 vext3 <0,2,4,6>, <6,2,4,3>
+ 3704752004U, // <6,6,2,5>: Cost 4 vext2 <2,2,6,6>, <2,5,6,7>
+ 2646935482U, // <6,6,2,6>: Cost 3 vext2 <4,u,6,6>, <2,6,3,7>
+ 2712572410U, // <6,6,2,7>: Cost 3 vext3 <4,6,4,6>, <6,2,7,3>
+ 2712572419U, // <6,6,2,u>: Cost 3 vext3 <4,6,4,6>, <6,2,u,3>
+ 2646935702U, // <6,6,3,0>: Cost 3 vext2 <4,u,6,6>, <3,0,1,2>
+ 3777024534U, // <6,6,3,1>: Cost 4 vext3 <3,1,4,6>, <6,3,1,4>
+ 3704752453U, // <6,6,3,2>: Cost 4 vext2 <2,2,6,6>, <3,2,2,6>
+ 2646935964U, // <6,6,3,3>: Cost 3 vext2 <4,u,6,6>, <3,3,3,3>
+ 2705347122U, // <6,6,3,4>: Cost 3 vext3 <3,4,5,6>, <6,3,4,5>
+ 3779678778U, // <6,6,3,5>: Cost 4 vext3 <3,5,4,6>, <6,3,5,4>
+ 2657553069U, // <6,6,3,6>: Cost 3 vext2 <6,6,6,6>, <3,6,6,6>
+ 4039609654U, // <6,6,3,7>: Cost 4 vzipr <2,1,6,3>, RHS
+ 2708001366U, // <6,6,3,u>: Cost 3 vext3 <3,u,5,6>, <6,3,u,5>
+ 2578481254U, // <6,6,4,0>: Cost 3 vext1 <4,6,6,4>, LHS
+ 3652223734U, // <6,6,4,1>: Cost 4 vext1 <4,6,6,4>, <1,0,3,2>
+ 3760951922U, // <6,6,4,2>: Cost 4 vext3 <0,4,2,6>, <6,4,2,6>
+ 3779089019U, // <6,6,4,3>: Cost 4 vext3 <3,4,5,6>, <6,4,3,6>
+ 1570540772U, // <6,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
+ 1573195062U, // <6,6,4,5>: Cost 2 vext2 <4,u,6,6>, RHS
+ 2712572560U, // <6,6,4,6>: Cost 3 vext3 <4,6,4,6>, <6,4,6,0>
+ 2723410591U, // <6,6,4,7>: Cost 3 vext3 <6,4,7,6>, <6,4,7,6>
+ 1573195304U, // <6,6,4,u>: Cost 2 vext2 <4,u,6,6>, <4,u,6,6>
+ 3640287334U, // <6,6,5,0>: Cost 4 vext1 <2,6,6,5>, LHS
+ 2646937296U, // <6,6,5,1>: Cost 3 vext2 <4,u,6,6>, <5,1,7,3>
+ 3640289235U, // <6,6,5,2>: Cost 4 vext1 <2,6,6,5>, <2,6,6,5>
+ 3720679279U, // <6,6,5,3>: Cost 4 vext2 <4,u,6,6>, <5,3,7,0>
+ 2646937542U, // <6,6,5,4>: Cost 3 vext2 <4,u,6,6>, <5,4,7,6>
+ 2646937604U, // <6,6,5,5>: Cost 3 vext2 <4,u,6,6>, <5,5,5,5>
+ 2646937698U, // <6,6,5,6>: Cost 3 vext2 <4,u,6,6>, <5,6,7,0>
+ 2846788918U, // <6,6,5,7>: Cost 3 vuzpr <4,6,4,6>, RHS
+ 2846788919U, // <6,6,5,u>: Cost 3 vuzpr <4,6,4,6>, RHS
+ 1516699750U, // <6,6,6,0>: Cost 2 vext1 <6,6,6,6>, LHS
+ 2590442230U, // <6,6,6,1>: Cost 3 vext1 <6,6,6,6>, <1,0,3,2>
+ 2646938106U, // <6,6,6,2>: Cost 3 vext2 <4,u,6,6>, <6,2,7,3>
+ 2590443670U, // <6,6,6,3>: Cost 3 vext1 <6,6,6,6>, <3,0,1,2>
+ 1516703030U, // <6,6,6,4>: Cost 2 vext1 <6,6,6,6>, RHS
+ 2590445264U, // <6,6,6,5>: Cost 3 vext1 <6,6,6,6>, <5,1,7,3>
+ 296144182U, // <6,6,6,6>: Cost 1 vdup2 RHS
+ 2712572738U, // <6,6,6,7>: Cost 3 vext3 <4,6,4,6>, <6,6,7,7>
+ 296144182U, // <6,6,6,u>: Cost 1 vdup2 RHS
+ 2566561894U, // <6,6,7,0>: Cost 3 vext1 <2,6,6,7>, LHS
+ 3634332924U, // <6,6,7,1>: Cost 4 vext1 <1,6,6,7>, <1,6,6,7>
+ 2566563797U, // <6,6,7,2>: Cost 3 vext1 <2,6,6,7>, <2,6,6,7>
+ 2584480258U, // <6,6,7,3>: Cost 3 vext1 <5,6,6,7>, <3,4,5,6>
+ 2566565174U, // <6,6,7,4>: Cost 3 vext1 <2,6,6,7>, RHS
+ 2717438846U, // <6,6,7,5>: Cost 3 vext3 <5,4,7,6>, <6,7,5,4>
+ 2980500280U, // <6,6,7,6>: Cost 3 vzipr RHS, <6,6,6,6>
+ 1906756918U, // <6,6,7,7>: Cost 2 vzipr RHS, RHS
+ 1906756919U, // <6,6,7,u>: Cost 2 vzipr RHS, RHS
+ 1516699750U, // <6,6,u,0>: Cost 2 vext1 <6,6,6,6>, LHS
+ 1573197614U, // <6,6,u,1>: Cost 2 vext2 <4,u,6,6>, LHS
+ 2566571990U, // <6,6,u,2>: Cost 3 vext1 <2,6,6,u>, <2,6,6,u>
+ 2846786205U, // <6,6,u,3>: Cost 3 vuzpr <4,6,4,6>, LHS
+ 1516703030U, // <6,6,u,4>: Cost 2 vext1 <6,6,6,6>, RHS
+ 1573197978U, // <6,6,u,5>: Cost 2 vext2 <4,u,6,6>, RHS
+ 296144182U, // <6,6,u,6>: Cost 1 vdup2 RHS
+ 1906765110U, // <6,6,u,7>: Cost 2 vzipr RHS, RHS
+ 296144182U, // <6,6,u,u>: Cost 1 vdup2 RHS
+ 1571209216U, // <6,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
+ 497467494U, // <6,7,0,1>: Cost 1 vext2 RHS, LHS
+ 1571209380U, // <6,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
+ 2644951292U, // <6,7,0,3>: Cost 3 vext2 RHS, <0,3,1,0>
+ 1571209554U, // <6,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
+ 1510756450U, // <6,7,0,5>: Cost 2 vext1 <5,6,7,0>, <5,6,7,0>
+ 2644951542U, // <6,7,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
+ 2584499194U, // <6,7,0,7>: Cost 3 vext1 <5,6,7,0>, <7,0,1,2>
+ 497468061U, // <6,7,0,u>: Cost 1 vext2 RHS, LHS
+ 1571209974U, // <6,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
+ 1571210036U, // <6,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
+ 1571210134U, // <6,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
+ 1571210200U, // <6,7,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
+ 2644952098U, // <6,7,1,4>: Cost 3 vext2 RHS, <1,4,0,5>
+ 1571210384U, // <6,7,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
+ 2644952271U, // <6,7,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
+ 2578535418U, // <6,7,1,7>: Cost 3 vext1 <4,6,7,1>, <7,0,1,2>
+ 1571210605U, // <6,7,1,u>: Cost 2 vext2 RHS, <1,u,1,3>
+ 2644952509U, // <6,7,2,0>: Cost 3 vext2 RHS, <2,0,1,2>
+ 2644952582U, // <6,7,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
+ 1571210856U, // <6,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
+ 1571210918U, // <6,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
+ 2644952828U, // <6,7,2,4>: Cost 3 vext2 RHS, <2,4,0,6>
+ 2633009028U, // <6,7,2,5>: Cost 3 vext2 <2,5,6,7>, <2,5,6,7>
+ 1571211194U, // <6,7,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
+ 2668840938U, // <6,7,2,7>: Cost 3 vext2 RHS, <2,7,0,1>
+ 1571211323U, // <6,7,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
+ 1571211414U, // <6,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
+ 2644953311U, // <6,7,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
+ 2644953390U, // <6,7,3,2>: Cost 3 vext2 RHS, <3,2,0,1>
+ 1571211676U, // <6,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
+ 1571211778U, // <6,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
+ 2644953648U, // <6,7,3,5>: Cost 3 vext2 RHS, <3,5,1,7>
+ 2644953720U, // <6,7,3,6>: Cost 3 vext2 RHS, <3,6,0,7>
+ 2644953795U, // <6,7,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
+ 1571212062U, // <6,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
+ 1573202834U, // <6,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
+ 2644954058U, // <6,7,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
+ 2644954166U, // <6,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
+ 2644954258U, // <6,7,4,3>: Cost 3 vext2 RHS, <4,3,6,5>
+ 1571212496U, // <6,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
+ 497470774U, // <6,7,4,5>: Cost 1 vext2 RHS, RHS
+ 1573203316U, // <6,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 2646281688U, // <6,7,4,7>: Cost 3 vext2 <4,7,6,7>, <4,7,6,7>
+ 497471017U, // <6,7,4,u>: Cost 1 vext2 RHS, RHS
+ 2644954696U, // <6,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
+ 1573203664U, // <6,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 2644954878U, // <6,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
+ 2644954991U, // <6,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
+ 1571213254U, // <6,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
+ 1571213316U, // <6,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
+ 1571213410U, // <6,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
+ 1573204136U, // <6,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
+ 1573204217U, // <6,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
+ 2644955425U, // <6,7,6,0>: Cost 3 vext2 RHS, <6,0,1,2>
+ 2644955561U, // <6,7,6,1>: Cost 3 vext2 RHS, <6,1,7,3>
+ 1573204474U, // <6,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 2644955698U, // <6,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
+ 2644955789U, // <6,7,6,4>: Cost 3 vext2 RHS, <6,4,5,6>
+ 2644955889U, // <6,7,6,5>: Cost 3 vext2 RHS, <6,5,7,7>
+ 1571214136U, // <6,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
+ 1571214158U, // <6,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
+ 1573204895U, // <6,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
+ 1573204986U, // <6,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
+ 2572608656U, // <6,7,7,1>: Cost 3 vext1 <3,6,7,7>, <1,5,3,7>
+ 2644956362U, // <6,7,7,2>: Cost 3 vext2 RHS, <7,2,6,3>
+ 2572610231U, // <6,7,7,3>: Cost 3 vext1 <3,6,7,7>, <3,6,7,7>
+ 1573205350U, // <6,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
+ 2646947220U, // <6,7,7,5>: Cost 3 vext2 RHS, <7,5,1,7>
+ 1516786498U, // <6,7,7,6>: Cost 2 vext1 <6,6,7,7>, <6,6,7,7>
+ 1571214956U, // <6,7,7,7>: Cost 2 vext2 RHS, <7,7,7,7>
+ 1573205634U, // <6,7,7,u>: Cost 2 vext2 RHS, <7,u,1,2>
+ 1571215059U, // <6,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
+ 497473326U, // <6,7,u,1>: Cost 1 vext2 RHS, LHS
+ 1571215237U, // <6,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
+ 1571215292U, // <6,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
+ 1571215423U, // <6,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
+ 497473690U, // <6,7,u,5>: Cost 1 vext2 RHS, RHS
+ 1571215568U, // <6,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
+ 1573206272U, // <6,7,u,7>: Cost 2 vext2 RHS, <u,7,0,1>
+ 497473893U, // <6,7,u,u>: Cost 1 vext2 RHS, LHS
+ 1571217408U, // <6,u,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
+ 497475686U, // <6,u,0,1>: Cost 1 vext2 RHS, LHS
+ 1571217572U, // <6,u,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
+ 2689865445U, // <6,u,0,3>: Cost 3 vext3 <0,u,2,6>, <u,0,3,2>
+ 1571217746U, // <6,u,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
+ 1510830187U, // <6,u,0,5>: Cost 2 vext1 <5,6,u,0>, <5,6,u,0>
+ 2644959734U, // <6,u,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
+ 1193130221U, // <6,u,0,7>: Cost 2 vrev <u,6,7,0>
+ 497476253U, // <6,u,0,u>: Cost 1 vext2 RHS, LHS
+ 1571218166U, // <6,u,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
+ 1571218228U, // <6,u,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
+ 1612289838U, // <6,u,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 1571218392U, // <6,u,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
+ 2566663478U, // <6,u,1,4>: Cost 3 vext1 <2,6,u,1>, RHS
+ 1571218576U, // <6,u,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
+ 2644960463U, // <6,u,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
+ 2717439835U, // <6,u,1,7>: Cost 3 vext3 <5,4,7,6>, <u,1,7,3>
+ 1612289892U, // <6,u,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
+ 1504870502U, // <6,u,2,0>: Cost 2 vext1 <4,6,u,2>, LHS
+ 2644960774U, // <6,u,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
+ 1571219048U, // <6,u,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
+ 1571219110U, // <6,u,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
+ 1504873782U, // <6,u,2,4>: Cost 2 vext1 <4,6,u,2>, RHS
+ 2633017221U, // <6,u,2,5>: Cost 3 vext2 <2,5,6,u>, <2,5,6,u>
+ 1571219386U, // <6,u,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
+ 2712573868U, // <6,u,2,7>: Cost 3 vext3 <4,6,4,6>, <u,2,7,3>
+ 1571219515U, // <6,u,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
+ 1571219606U, // <6,u,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
+ 2644961503U, // <6,u,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
+ 2566678499U, // <6,u,3,2>: Cost 3 vext1 <2,6,u,3>, <2,6,u,3>
+ 1571219868U, // <6,u,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
+ 1571219970U, // <6,u,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
+ 2689865711U, // <6,u,3,5>: Cost 3 vext3 <0,u,2,6>, <u,3,5,7>
+ 2708002806U, // <6,u,3,6>: Cost 3 vext3 <3,u,5,6>, <u,3,6,5>
+ 2644961987U, // <6,u,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
+ 1571220254U, // <6,u,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
+ 1571220370U, // <6,u,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
+ 2644962250U, // <6,u,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
+ 1661245476U, // <6,u,4,2>: Cost 2 vext3 <u,4,2,6>, <u,4,2,6>
+ 2686031917U, // <6,u,4,3>: Cost 3 vext3 <0,2,4,6>, <u,4,3,6>
+ 1571220688U, // <6,u,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
+ 497478967U, // <6,u,4,5>: Cost 1 vext2 RHS, RHS
+ 1571220852U, // <6,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 1661614161U, // <6,u,4,7>: Cost 2 vext3 <u,4,7,6>, <u,4,7,6>
+ 497479209U, // <6,u,4,u>: Cost 1 vext2 RHS, RHS
+ 2566692966U, // <6,u,5,0>: Cost 3 vext1 <2,6,u,5>, LHS
+ 1571221200U, // <6,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 2566694885U, // <6,u,5,2>: Cost 3 vext1 <2,6,u,5>, <2,6,u,5>
+ 2689865855U, // <6,u,5,3>: Cost 3 vext3 <0,u,2,6>, <u,5,3,7>
+ 1571221446U, // <6,u,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
+ 1571221508U, // <6,u,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
+ 1612290202U, // <6,u,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
+ 1571221672U, // <6,u,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
+ 1612290220U, // <6,u,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
+ 1504903270U, // <6,u,6,0>: Cost 2 vext1 <4,6,u,6>, LHS
+ 2644963752U, // <6,u,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
+ 1571222010U, // <6,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 2686032080U, // <6,u,6,3>: Cost 3 vext3 <0,2,4,6>, <u,6,3,7>
+ 1504906550U, // <6,u,6,4>: Cost 2 vext1 <4,6,u,6>, RHS
+ 2644964079U, // <6,u,6,5>: Cost 3 vext2 RHS, <6,5,7,5>
+ 296144182U, // <6,u,6,6>: Cost 1 vdup2 RHS
+ 1571222350U, // <6,u,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
+ 296144182U, // <6,u,6,u>: Cost 1 vdup2 RHS
+ 1492967526U, // <6,u,7,0>: Cost 2 vext1 <2,6,u,7>, LHS
+ 2560738574U, // <6,u,7,1>: Cost 3 vext1 <1,6,u,7>, <1,6,u,7>
+ 1492969447U, // <6,u,7,2>: Cost 2 vext1 <2,6,u,7>, <2,6,u,7>
+ 1906753692U, // <6,u,7,3>: Cost 2 vzipr RHS, LHS
+ 1492970806U, // <6,u,7,4>: Cost 2 vext1 <2,6,u,7>, RHS
+ 2980495761U, // <6,u,7,5>: Cost 3 vzipr RHS, <0,4,u,5>
+ 1516860235U, // <6,u,7,6>: Cost 2 vext1 <6,6,u,7>, <6,6,u,7>
+ 1906756936U, // <6,u,7,7>: Cost 2 vzipr RHS, RHS
+ 1492973358U, // <6,u,7,u>: Cost 2 vext1 <2,6,u,7>, LHS
+ 1492975718U, // <6,u,u,0>: Cost 2 vext1 <2,6,u,u>, LHS
+ 497481518U, // <6,u,u,1>: Cost 1 vext2 RHS, LHS
+ 1612290405U, // <6,u,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 1571223484U, // <6,u,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
+ 1492978998U, // <6,u,u,4>: Cost 2 vext1 <2,6,u,u>, RHS
+ 497481882U, // <6,u,u,5>: Cost 1 vext2 RHS, RHS
+ 296144182U, // <6,u,u,6>: Cost 1 vdup2 RHS
+ 1906765128U, // <6,u,u,7>: Cost 2 vzipr RHS, RHS
+ 497482085U, // <6,u,u,u>: Cost 1 vext2 RHS, LHS
+ 1638318080U, // <7,0,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
+ 1638318090U, // <7,0,0,1>: Cost 2 vext3 RHS, <0,0,1,1>
+ 1638318100U, // <7,0,0,2>: Cost 2 vext3 RHS, <0,0,2,2>
+ 3646442178U, // <7,0,0,3>: Cost 4 vext1 <3,7,0,0>, <3,7,0,0>
+ 2712059941U, // <7,0,0,4>: Cost 3 vext3 RHS, <0,0,4,1>
+ 2651603364U, // <7,0,0,5>: Cost 3 vext2 <5,6,7,0>, <0,5,1,6>
+ 2590618445U, // <7,0,0,6>: Cost 3 vext1 <6,7,0,0>, <6,7,0,0>
+ 3785801798U, // <7,0,0,7>: Cost 4 vext3 RHS, <0,0,7,7>
+ 1638318153U, // <7,0,0,u>: Cost 2 vext3 RHS, <0,0,u,1>
+ 1516879974U, // <7,0,1,0>: Cost 2 vext1 <6,7,0,1>, LHS
+ 2693922911U, // <7,0,1,1>: Cost 3 vext3 <1,5,3,7>, <0,1,1,5>
+ 564576358U, // <7,0,1,2>: Cost 1 vext3 RHS, LHS
+ 2638996480U, // <7,0,1,3>: Cost 3 vext2 <3,5,7,0>, <1,3,5,7>
+ 1516883254U, // <7,0,1,4>: Cost 2 vext1 <6,7,0,1>, RHS
+ 2649613456U, // <7,0,1,5>: Cost 3 vext2 <5,3,7,0>, <1,5,3,7>
+ 1516884814U, // <7,0,1,6>: Cost 2 vext1 <6,7,0,1>, <6,7,0,1>
+ 2590626808U, // <7,0,1,7>: Cost 3 vext1 <6,7,0,1>, <7,0,1,0>
+ 564576412U, // <7,0,1,u>: Cost 1 vext3 RHS, LHS
+ 1638318244U, // <7,0,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
+ 2692743344U, // <7,0,2,1>: Cost 3 vext3 <1,3,5,7>, <0,2,1,5>
+ 2712060084U, // <7,0,2,2>: Cost 3 vext3 RHS, <0,2,2,0>
+ 2712060094U, // <7,0,2,3>: Cost 3 vext3 RHS, <0,2,3,1>
+ 1638318284U, // <7,0,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
+ 2712060118U, // <7,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
+ 2651604922U, // <7,0,2,6>: Cost 3 vext2 <5,6,7,0>, <2,6,3,7>
+ 2686255336U, // <7,0,2,7>: Cost 3 vext3 <0,2,7,7>, <0,2,7,7>
+ 1638318316U, // <7,0,2,u>: Cost 2 vext3 RHS, <0,2,u,2>
+ 2651605142U, // <7,0,3,0>: Cost 3 vext2 <5,6,7,0>, <3,0,1,2>
+ 2712060156U, // <7,0,3,1>: Cost 3 vext3 RHS, <0,3,1,0>
+ 2712060165U, // <7,0,3,2>: Cost 3 vext3 RHS, <0,3,2,0>
+ 2651605404U, // <7,0,3,3>: Cost 3 vext2 <5,6,7,0>, <3,3,3,3>
+ 2651605506U, // <7,0,3,4>: Cost 3 vext2 <5,6,7,0>, <3,4,5,6>
+ 2638998111U, // <7,0,3,5>: Cost 3 vext2 <3,5,7,0>, <3,5,7,0>
+ 2639661744U, // <7,0,3,6>: Cost 3 vext2 <3,6,7,0>, <3,6,7,0>
+ 3712740068U, // <7,0,3,7>: Cost 4 vext2 <3,5,7,0>, <3,7,3,7>
+ 2640989010U, // <7,0,3,u>: Cost 3 vext2 <3,u,7,0>, <3,u,7,0>
+ 2712060232U, // <7,0,4,0>: Cost 3 vext3 RHS, <0,4,0,4>
+ 1638318418U, // <7,0,4,1>: Cost 2 vext3 RHS, <0,4,1,5>
+ 1638318428U, // <7,0,4,2>: Cost 2 vext3 RHS, <0,4,2,6>
+ 3646474950U, // <7,0,4,3>: Cost 4 vext1 <3,7,0,4>, <3,7,0,4>
+ 2712060270U, // <7,0,4,4>: Cost 3 vext3 RHS, <0,4,4,6>
+ 1577864502U, // <7,0,4,5>: Cost 2 vext2 <5,6,7,0>, RHS
+ 2651606388U, // <7,0,4,6>: Cost 3 vext2 <5,6,7,0>, <4,6,4,6>
+ 3787792776U, // <7,0,4,7>: Cost 4 vext3 RHS, <0,4,7,5>
+ 1638318481U, // <7,0,4,u>: Cost 2 vext3 RHS, <0,4,u,5>
+ 2590654566U, // <7,0,5,0>: Cost 3 vext1 <6,7,0,5>, LHS
+ 2651606736U, // <7,0,5,1>: Cost 3 vext2 <5,6,7,0>, <5,1,7,3>
+ 2712060334U, // <7,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
+ 2649616239U, // <7,0,5,3>: Cost 3 vext2 <5,3,7,0>, <5,3,7,0>
+ 2651606982U, // <7,0,5,4>: Cost 3 vext2 <5,6,7,0>, <5,4,7,6>
+ 2651607044U, // <7,0,5,5>: Cost 3 vext2 <5,6,7,0>, <5,5,5,5>
+ 1577865314U, // <7,0,5,6>: Cost 2 vext2 <5,6,7,0>, <5,6,7,0>
+ 2651607208U, // <7,0,5,7>: Cost 3 vext2 <5,6,7,0>, <5,7,5,7>
+ 1579192580U, // <7,0,5,u>: Cost 2 vext2 <5,u,7,0>, <5,u,7,0>
+ 2688393709U, // <7,0,6,0>: Cost 3 vext3 <0,6,0,7>, <0,6,0,7>
+ 2712060406U, // <7,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
+ 2688541183U, // <7,0,6,2>: Cost 3 vext3 <0,6,2,7>, <0,6,2,7>
+ 2655588936U, // <7,0,6,3>: Cost 3 vext2 <6,3,7,0>, <6,3,7,0>
+ 3762430481U, // <7,0,6,4>: Cost 4 vext3 <0,6,4,7>, <0,6,4,7>
+ 2651607730U, // <7,0,6,5>: Cost 3 vext2 <5,6,7,0>, <6,5,0,7>
+ 2651607864U, // <7,0,6,6>: Cost 3 vext2 <5,6,7,0>, <6,6,6,6>
+ 2651607886U, // <7,0,6,7>: Cost 3 vext2 <5,6,7,0>, <6,7,0,1>
+ 2688983605U, // <7,0,6,u>: Cost 3 vext3 <0,6,u,7>, <0,6,u,7>
+ 2651608058U, // <7,0,7,0>: Cost 3 vext2 <5,6,7,0>, <7,0,1,2>
+ 2932703334U, // <7,0,7,1>: Cost 3 vzipl <7,7,7,7>, LHS
+ 3066921062U, // <7,0,7,2>: Cost 3 vtrnl <7,7,7,7>, LHS
+ 3712742678U, // <7,0,7,3>: Cost 4 vext2 <3,5,7,0>, <7,3,5,7>
+ 2651608422U, // <7,0,7,4>: Cost 3 vext2 <5,6,7,0>, <7,4,5,6>
+ 2651608513U, // <7,0,7,5>: Cost 3 vext2 <5,6,7,0>, <7,5,6,7>
+ 2663552532U, // <7,0,7,6>: Cost 3 vext2 <7,6,7,0>, <7,6,7,0>
+ 2651608684U, // <7,0,7,7>: Cost 3 vext2 <5,6,7,0>, <7,7,7,7>
+ 2651608706U, // <7,0,7,u>: Cost 3 vext2 <5,6,7,0>, <7,u,1,2>
+ 1638318730U, // <7,0,u,0>: Cost 2 vext3 RHS, <0,u,0,2>
+ 1638318738U, // <7,0,u,1>: Cost 2 vext3 RHS, <0,u,1,1>
+ 564576925U, // <7,0,u,2>: Cost 1 vext3 RHS, LHS
+ 2572765898U, // <7,0,u,3>: Cost 3 vext1 <3,7,0,u>, <3,7,0,u>
+ 1638318770U, // <7,0,u,4>: Cost 2 vext3 RHS, <0,u,4,6>
+ 1577867418U, // <7,0,u,5>: Cost 2 vext2 <5,6,7,0>, RHS
+ 1516942165U, // <7,0,u,6>: Cost 2 vext1 <6,7,0,u>, <6,7,0,u>
+ 2651609344U, // <7,0,u,7>: Cost 3 vext2 <5,6,7,0>, <u,7,0,1>
+ 564576979U, // <7,0,u,u>: Cost 1 vext3 RHS, LHS
+ 2590687334U, // <7,1,0,0>: Cost 3 vext1 <6,7,1,0>, LHS
+ 2639003750U, // <7,1,0,1>: Cost 3 vext2 <3,5,7,1>, LHS
+ 2793357414U, // <7,1,0,2>: Cost 3 vuzpl <7,0,1,2>, LHS
+ 1638318838U, // <7,1,0,3>: Cost 2 vext3 RHS, <1,0,3,2>
+ 2590690614U, // <7,1,0,4>: Cost 3 vext1 <6,7,1,0>, RHS
+ 2712060679U, // <7,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
+ 2590692182U, // <7,1,0,6>: Cost 3 vext1 <6,7,1,0>, <6,7,1,0>
+ 3785802521U, // <7,1,0,7>: Cost 4 vext3 RHS, <1,0,7,1>
+ 1638318883U, // <7,1,0,u>: Cost 2 vext3 RHS, <1,0,u,2>
+ 2712060715U, // <7,1,1,0>: Cost 3 vext3 RHS, <1,1,0,1>
+ 1638318900U, // <7,1,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
+ 3774300994U, // <7,1,1,2>: Cost 4 vext3 <2,6,3,7>, <1,1,2,6>
+ 1638318920U, // <7,1,1,3>: Cost 2 vext3 RHS, <1,1,3,3>
+ 2712060755U, // <7,1,1,4>: Cost 3 vext3 RHS, <1,1,4,5>
+ 2691416926U, // <7,1,1,5>: Cost 3 vext3 <1,1,5,7>, <1,1,5,7>
+ 2590700375U, // <7,1,1,6>: Cost 3 vext1 <6,7,1,1>, <6,7,1,1>
+ 3765158766U, // <7,1,1,7>: Cost 4 vext3 <1,1,5,7>, <1,1,7,5>
+ 1638318965U, // <7,1,1,u>: Cost 2 vext3 RHS, <1,1,u,3>
+ 2712060796U, // <7,1,2,0>: Cost 3 vext3 RHS, <1,2,0,1>
+ 2712060807U, // <7,1,2,1>: Cost 3 vext3 RHS, <1,2,1,3>
+ 3712747112U, // <7,1,2,2>: Cost 4 vext2 <3,5,7,1>, <2,2,2,2>
+ 1638318998U, // <7,1,2,3>: Cost 2 vext3 RHS, <1,2,3,0>
+ 2712060836U, // <7,1,2,4>: Cost 3 vext3 RHS, <1,2,4,5>
+ 2712060843U, // <7,1,2,5>: Cost 3 vext3 RHS, <1,2,5,3>
+ 2590708568U, // <7,1,2,6>: Cost 3 vext1 <6,7,1,2>, <6,7,1,2>
+ 2735948730U, // <7,1,2,7>: Cost 3 vext3 RHS, <1,2,7,0>
+ 1638319043U, // <7,1,2,u>: Cost 2 vext3 RHS, <1,2,u,0>
+ 2712060876U, // <7,1,3,0>: Cost 3 vext3 RHS, <1,3,0,0>
+ 1638319064U, // <7,1,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
+ 2712060894U, // <7,1,3,2>: Cost 3 vext3 RHS, <1,3,2,0>
+ 2692596718U, // <7,1,3,3>: Cost 3 vext3 <1,3,3,7>, <1,3,3,7>
+ 2712060917U, // <7,1,3,4>: Cost 3 vext3 RHS, <1,3,4,5>
+ 1619002368U, // <7,1,3,5>: Cost 2 vext3 <1,3,5,7>, <1,3,5,7>
+ 2692817929U, // <7,1,3,6>: Cost 3 vext3 <1,3,6,7>, <1,3,6,7>
+ 2735948814U, // <7,1,3,7>: Cost 3 vext3 RHS, <1,3,7,3>
+ 1619223579U, // <7,1,3,u>: Cost 2 vext3 <1,3,u,7>, <1,3,u,7>
+ 2712060962U, // <7,1,4,0>: Cost 3 vext3 RHS, <1,4,0,5>
+ 2712060971U, // <7,1,4,1>: Cost 3 vext3 RHS, <1,4,1,5>
+ 2712060980U, // <7,1,4,2>: Cost 3 vext3 RHS, <1,4,2,5>
+ 2712060989U, // <7,1,4,3>: Cost 3 vext3 RHS, <1,4,3,5>
+ 3785802822U, // <7,1,4,4>: Cost 4 vext3 RHS, <1,4,4,5>
+ 2639007030U, // <7,1,4,5>: Cost 3 vext2 <3,5,7,1>, RHS
+ 2645642634U, // <7,1,4,6>: Cost 3 vext2 <4,6,7,1>, <4,6,7,1>
+ 3719384520U, // <7,1,4,7>: Cost 4 vext2 <4,6,7,1>, <4,7,5,0>
+ 2639007273U, // <7,1,4,u>: Cost 3 vext2 <3,5,7,1>, RHS
+ 2572812390U, // <7,1,5,0>: Cost 3 vext1 <3,7,1,5>, LHS
+ 2693776510U, // <7,1,5,1>: Cost 3 vext3 <1,5,1,7>, <1,5,1,7>
+ 3774301318U, // <7,1,5,2>: Cost 4 vext3 <2,6,3,7>, <1,5,2,6>
+ 1620182160U, // <7,1,5,3>: Cost 2 vext3 <1,5,3,7>, <1,5,3,7>
+ 2572815670U, // <7,1,5,4>: Cost 3 vext1 <3,7,1,5>, RHS
+ 3766486178U, // <7,1,5,5>: Cost 4 vext3 <1,3,5,7>, <1,5,5,7>
+ 2651615331U, // <7,1,5,6>: Cost 3 vext2 <5,6,7,1>, <5,6,7,1>
+ 2652278964U, // <7,1,5,7>: Cost 3 vext2 <5,7,7,1>, <5,7,7,1>
+ 1620550845U, // <7,1,5,u>: Cost 2 vext3 <1,5,u,7>, <1,5,u,7>
+ 3768108230U, // <7,1,6,0>: Cost 4 vext3 <1,6,0,7>, <1,6,0,7>
+ 2694440143U, // <7,1,6,1>: Cost 3 vext3 <1,6,1,7>, <1,6,1,7>
+ 2712061144U, // <7,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
+ 2694587617U, // <7,1,6,3>: Cost 3 vext3 <1,6,3,7>, <1,6,3,7>
+ 3768403178U, // <7,1,6,4>: Cost 4 vext3 <1,6,4,7>, <1,6,4,7>
+ 2694735091U, // <7,1,6,5>: Cost 3 vext3 <1,6,5,7>, <1,6,5,7>
+ 3768550652U, // <7,1,6,6>: Cost 4 vext3 <1,6,6,7>, <1,6,6,7>
+ 2652279630U, // <7,1,6,7>: Cost 3 vext2 <5,7,7,1>, <6,7,0,1>
+ 2694956302U, // <7,1,6,u>: Cost 3 vext3 <1,6,u,7>, <1,6,u,7>
+ 2645644282U, // <7,1,7,0>: Cost 3 vext2 <4,6,7,1>, <7,0,1,2>
+ 2859062094U, // <7,1,7,1>: Cost 3 vuzpr <6,7,0,1>, <6,7,0,1>
+ 3779462437U, // <7,1,7,2>: Cost 4 vext3 <3,5,1,7>, <1,7,2,3>
+ 3121938534U, // <7,1,7,3>: Cost 3 vtrnr <5,7,5,7>, LHS
+ 2554916150U, // <7,1,7,4>: Cost 3 vext1 <0,7,1,7>, RHS
+ 3769140548U, // <7,1,7,5>: Cost 4 vext3 <1,7,5,7>, <1,7,5,7>
+ 3726022164U, // <7,1,7,6>: Cost 4 vext2 <5,7,7,1>, <7,6,7,0>
+ 2554918508U, // <7,1,7,7>: Cost 3 vext1 <0,7,1,7>, <7,7,7,7>
+ 3121938539U, // <7,1,7,u>: Cost 3 vtrnr <5,7,5,7>, LHS
+ 2572836966U, // <7,1,u,0>: Cost 3 vext1 <3,7,1,u>, LHS
+ 1638319469U, // <7,1,u,1>: Cost 2 vext3 RHS, <1,u,1,3>
+ 2712061299U, // <7,1,u,2>: Cost 3 vext3 RHS, <1,u,2,0>
+ 1622173059U, // <7,1,u,3>: Cost 2 vext3 <1,u,3,7>, <1,u,3,7>
+ 2572840246U, // <7,1,u,4>: Cost 3 vext1 <3,7,1,u>, RHS
+ 1622320533U, // <7,1,u,5>: Cost 2 vext3 <1,u,5,7>, <1,u,5,7>
+ 2696136094U, // <7,1,u,6>: Cost 3 vext3 <1,u,6,7>, <1,u,6,7>
+ 2859060777U, // <7,1,u,7>: Cost 3 vuzpr <6,7,0,1>, RHS
+ 1622541744U, // <7,1,u,u>: Cost 2 vext3 <1,u,u,7>, <1,u,u,7>
+ 2712061364U, // <7,2,0,0>: Cost 3 vext3 RHS, <2,0,0,2>
+ 2712061373U, // <7,2,0,1>: Cost 3 vext3 RHS, <2,0,1,2>
+ 2712061380U, // <7,2,0,2>: Cost 3 vext3 RHS, <2,0,2,0>
+ 2712061389U, // <7,2,0,3>: Cost 3 vext3 RHS, <2,0,3,0>
+ 2712061404U, // <7,2,0,4>: Cost 3 vext3 RHS, <2,0,4,6>
+ 2696725990U, // <7,2,0,5>: Cost 3 vext3 <2,0,5,7>, <2,0,5,7>
+ 2712061417U, // <7,2,0,6>: Cost 3 vext3 RHS, <2,0,6,1>
+ 3785803251U, // <7,2,0,7>: Cost 4 vext3 RHS, <2,0,7,2>
+ 2696947201U, // <7,2,0,u>: Cost 3 vext3 <2,0,u,7>, <2,0,u,7>
+ 2712061446U, // <7,2,1,0>: Cost 3 vext3 RHS, <2,1,0,3>
+ 3785803276U, // <7,2,1,1>: Cost 4 vext3 RHS, <2,1,1,0>
+ 3785803285U, // <7,2,1,2>: Cost 4 vext3 RHS, <2,1,2,0>
+ 2712061471U, // <7,2,1,3>: Cost 3 vext3 RHS, <2,1,3,1>
+ 2712061482U, // <7,2,1,4>: Cost 3 vext3 RHS, <2,1,4,3>
+ 3766486576U, // <7,2,1,5>: Cost 4 vext3 <1,3,5,7>, <2,1,5,0>
+ 2712061500U, // <7,2,1,6>: Cost 3 vext3 RHS, <2,1,6,3>
+ 2602718850U, // <7,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
+ 2712061516U, // <7,2,1,u>: Cost 3 vext3 RHS, <2,1,u,1>
+ 2712061525U, // <7,2,2,0>: Cost 3 vext3 RHS, <2,2,0,1>
+ 2712061536U, // <7,2,2,1>: Cost 3 vext3 RHS, <2,2,1,3>
+ 1638319720U, // <7,2,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
+ 1638319730U, // <7,2,2,3>: Cost 2 vext3 RHS, <2,2,3,3>
+ 2712061565U, // <7,2,2,4>: Cost 3 vext3 RHS, <2,2,4,5>
+ 2698053256U, // <7,2,2,5>: Cost 3 vext3 <2,2,5,7>, <2,2,5,7>
+ 2712061584U, // <7,2,2,6>: Cost 3 vext3 RHS, <2,2,6,6>
+ 3771795096U, // <7,2,2,7>: Cost 4 vext3 <2,2,5,7>, <2,2,7,5>
+ 1638319775U, // <7,2,2,u>: Cost 2 vext3 RHS, <2,2,u,3>
+ 1638319782U, // <7,2,3,0>: Cost 2 vext3 RHS, <2,3,0,1>
+ 2693924531U, // <7,2,3,1>: Cost 3 vext3 <1,5,3,7>, <2,3,1,5>
+ 2700560061U, // <7,2,3,2>: Cost 3 vext3 <2,6,3,7>, <2,3,2,6>
+ 2693924551U, // <7,2,3,3>: Cost 3 vext3 <1,5,3,7>, <2,3,3,7>
+ 1638319822U, // <7,2,3,4>: Cost 2 vext3 RHS, <2,3,4,5>
+ 2698716889U, // <7,2,3,5>: Cost 3 vext3 <2,3,5,7>, <2,3,5,7>
+ 2712061665U, // <7,2,3,6>: Cost 3 vext3 RHS, <2,3,6,6>
+ 2735949540U, // <7,2,3,7>: Cost 3 vext3 RHS, <2,3,7,0>
+ 1638319854U, // <7,2,3,u>: Cost 2 vext3 RHS, <2,3,u,1>
+ 2712061692U, // <7,2,4,0>: Cost 3 vext3 RHS, <2,4,0,6>
+ 2712061698U, // <7,2,4,1>: Cost 3 vext3 RHS, <2,4,1,3>
+ 2712061708U, // <7,2,4,2>: Cost 3 vext3 RHS, <2,4,2,4>
+ 2712061718U, // <7,2,4,3>: Cost 3 vext3 RHS, <2,4,3,5>
+ 2712061728U, // <7,2,4,4>: Cost 3 vext3 RHS, <2,4,4,6>
+ 2699380522U, // <7,2,4,5>: Cost 3 vext3 <2,4,5,7>, <2,4,5,7>
+ 2712061740U, // <7,2,4,6>: Cost 3 vext3 RHS, <2,4,6,0>
+ 3809691445U, // <7,2,4,7>: Cost 4 vext3 RHS, <2,4,7,0>
+ 2699601733U, // <7,2,4,u>: Cost 3 vext3 <2,4,u,7>, <2,4,u,7>
+ 2699675470U, // <7,2,5,0>: Cost 3 vext3 <2,5,0,7>, <2,5,0,7>
+ 3766486867U, // <7,2,5,1>: Cost 4 vext3 <1,3,5,7>, <2,5,1,3>
+ 2699822944U, // <7,2,5,2>: Cost 3 vext3 <2,5,2,7>, <2,5,2,7>
+ 2692745065U, // <7,2,5,3>: Cost 3 vext3 <1,3,5,7>, <2,5,3,7>
+ 2699970418U, // <7,2,5,4>: Cost 3 vext3 <2,5,4,7>, <2,5,4,7>
+ 3766486907U, // <7,2,5,5>: Cost 4 vext3 <1,3,5,7>, <2,5,5,7>
+ 2700117892U, // <7,2,5,6>: Cost 3 vext3 <2,5,6,7>, <2,5,6,7>
+ 3771795334U, // <7,2,5,7>: Cost 4 vext3 <2,2,5,7>, <2,5,7,0>
+ 2692745110U, // <7,2,5,u>: Cost 3 vext3 <1,3,5,7>, <2,5,u,7>
+ 2572894310U, // <7,2,6,0>: Cost 3 vext1 <3,7,2,6>, LHS
+ 2712061860U, // <7,2,6,1>: Cost 3 vext3 RHS, <2,6,1,3>
+ 2700486577U, // <7,2,6,2>: Cost 3 vext3 <2,6,2,7>, <2,6,2,7>
+ 1626818490U, // <7,2,6,3>: Cost 2 vext3 <2,6,3,7>, <2,6,3,7>
+ 2572897590U, // <7,2,6,4>: Cost 3 vext1 <3,7,2,6>, RHS
+ 2700707788U, // <7,2,6,5>: Cost 3 vext3 <2,6,5,7>, <2,6,5,7>
+ 2700781525U, // <7,2,6,6>: Cost 3 vext3 <2,6,6,7>, <2,6,6,7>
+ 3774597086U, // <7,2,6,7>: Cost 4 vext3 <2,6,7,7>, <2,6,7,7>
+ 1627187175U, // <7,2,6,u>: Cost 2 vext3 <2,6,u,7>, <2,6,u,7>
+ 2735949802U, // <7,2,7,0>: Cost 3 vext3 RHS, <2,7,0,1>
+ 3780200434U, // <7,2,7,1>: Cost 4 vext3 <3,6,2,7>, <2,7,1,0>
+ 3773564928U, // <7,2,7,2>: Cost 4 vext3 <2,5,2,7>, <2,7,2,5>
+ 2986541158U, // <7,2,7,3>: Cost 3 vzipr <5,5,7,7>, LHS
+ 2554989878U, // <7,2,7,4>: Cost 3 vext1 <0,7,2,7>, RHS
+ 3775113245U, // <7,2,7,5>: Cost 4 vext3 <2,7,5,7>, <2,7,5,7>
+ 4060283228U, // <7,2,7,6>: Cost 4 vzipr <5,5,7,7>, <0,4,2,6>
+ 2554992236U, // <7,2,7,7>: Cost 3 vext1 <0,7,2,7>, <7,7,7,7>
+ 2986541163U, // <7,2,7,u>: Cost 3 vzipr <5,5,7,7>, LHS
+ 1638320187U, // <7,2,u,0>: Cost 2 vext3 RHS, <2,u,0,1>
+ 2693924936U, // <7,2,u,1>: Cost 3 vext3 <1,5,3,7>, <2,u,1,5>
+ 1638319720U, // <7,2,u,2>: Cost 2 vext3 RHS, <2,2,2,2>
+ 1628145756U, // <7,2,u,3>: Cost 2 vext3 <2,u,3,7>, <2,u,3,7>
+ 1638320227U, // <7,2,u,4>: Cost 2 vext3 RHS, <2,u,4,5>
+ 2702035054U, // <7,2,u,5>: Cost 3 vext3 <2,u,5,7>, <2,u,5,7>
+ 2702108791U, // <7,2,u,6>: Cost 3 vext3 <2,u,6,7>, <2,u,6,7>
+ 2735949945U, // <7,2,u,7>: Cost 3 vext3 RHS, <2,u,7,0>
+ 1628514441U, // <7,2,u,u>: Cost 2 vext3 <2,u,u,7>, <2,u,u,7>
+ 2712062091U, // <7,3,0,0>: Cost 3 vext3 RHS, <3,0,0,0>
+ 1638320278U, // <7,3,0,1>: Cost 2 vext3 RHS, <3,0,1,2>
+ 2712062109U, // <7,3,0,2>: Cost 3 vext3 RHS, <3,0,2,0>
+ 2590836886U, // <7,3,0,3>: Cost 3 vext1 <6,7,3,0>, <3,0,1,2>
+ 2712062128U, // <7,3,0,4>: Cost 3 vext3 RHS, <3,0,4,1>
+ 2712062138U, // <7,3,0,5>: Cost 3 vext3 RHS, <3,0,5,2>
+ 2590839656U, // <7,3,0,6>: Cost 3 vext1 <6,7,3,0>, <6,7,3,0>
+ 3311414017U, // <7,3,0,7>: Cost 4 vrev <3,7,7,0>
+ 1638320341U, // <7,3,0,u>: Cost 2 vext3 RHS, <3,0,u,2>
+ 2237164227U, // <7,3,1,0>: Cost 3 vrev <3,7,0,1>
+ 2712062182U, // <7,3,1,1>: Cost 3 vext3 RHS, <3,1,1,1>
+ 2712062193U, // <7,3,1,2>: Cost 3 vext3 RHS, <3,1,2,3>
+ 2692745468U, // <7,3,1,3>: Cost 3 vext3 <1,3,5,7>, <3,1,3,5>
+ 2712062214U, // <7,3,1,4>: Cost 3 vext3 RHS, <3,1,4,6>
+ 2693925132U, // <7,3,1,5>: Cost 3 vext3 <1,5,3,7>, <3,1,5,3>
+ 3768183059U, // <7,3,1,6>: Cost 4 vext3 <1,6,1,7>, <3,1,6,1>
+ 2692745504U, // <7,3,1,7>: Cost 3 vext3 <1,3,5,7>, <3,1,7,5>
+ 2696063273U, // <7,3,1,u>: Cost 3 vext3 <1,u,5,7>, <3,1,u,5>
+ 2712062254U, // <7,3,2,0>: Cost 3 vext3 RHS, <3,2,0,1>
+ 2712062262U, // <7,3,2,1>: Cost 3 vext3 RHS, <3,2,1,0>
+ 2712062273U, // <7,3,2,2>: Cost 3 vext3 RHS, <3,2,2,2>
+ 2712062280U, // <7,3,2,3>: Cost 3 vext3 RHS, <3,2,3,0>
+ 2712062294U, // <7,3,2,4>: Cost 3 vext3 RHS, <3,2,4,5>
+ 2712062302U, // <7,3,2,5>: Cost 3 vext3 RHS, <3,2,5,4>
+ 2700560742U, // <7,3,2,6>: Cost 3 vext3 <2,6,3,7>, <3,2,6,3>
+ 2712062319U, // <7,3,2,7>: Cost 3 vext3 RHS, <3,2,7,3>
+ 2712062325U, // <7,3,2,u>: Cost 3 vext3 RHS, <3,2,u,0>
+ 2712062335U, // <7,3,3,0>: Cost 3 vext3 RHS, <3,3,0,1>
+ 2636368158U, // <7,3,3,1>: Cost 3 vext2 <3,1,7,3>, <3,1,7,3>
+ 2637031791U, // <7,3,3,2>: Cost 3 vext2 <3,2,7,3>, <3,2,7,3>
+ 1638320540U, // <7,3,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
+ 2712062374U, // <7,3,3,4>: Cost 3 vext3 RHS, <3,3,4,4>
+ 2704689586U, // <7,3,3,5>: Cost 3 vext3 <3,3,5,7>, <3,3,5,7>
+ 2590864235U, // <7,3,3,6>: Cost 3 vext1 <6,7,3,3>, <6,7,3,3>
+ 2704837060U, // <7,3,3,7>: Cost 3 vext3 <3,3,7,7>, <3,3,7,7>
+ 1638320540U, // <7,3,3,u>: Cost 2 vext3 RHS, <3,3,3,3>
+ 2712062416U, // <7,3,4,0>: Cost 3 vext3 RHS, <3,4,0,1>
+ 2712062426U, // <7,3,4,1>: Cost 3 vext3 RHS, <3,4,1,2>
+ 2566981640U, // <7,3,4,2>: Cost 3 vext1 <2,7,3,4>, <2,7,3,4>
+ 2712062447U, // <7,3,4,3>: Cost 3 vext3 RHS, <3,4,3,5>
+ 2712062456U, // <7,3,4,4>: Cost 3 vext3 RHS, <3,4,4,5>
+ 1638320642U, // <7,3,4,5>: Cost 2 vext3 RHS, <3,4,5,6>
+ 2648313204U, // <7,3,4,6>: Cost 3 vext2 <5,1,7,3>, <4,6,4,6>
+ 3311446789U, // <7,3,4,7>: Cost 4 vrev <3,7,7,4>
+ 1638320669U, // <7,3,4,u>: Cost 2 vext3 RHS, <3,4,u,6>
+ 2602819686U, // <7,3,5,0>: Cost 3 vext1 <u,7,3,5>, LHS
+ 1574571728U, // <7,3,5,1>: Cost 2 vext2 <5,1,7,3>, <5,1,7,3>
+ 2648977185U, // <7,3,5,2>: Cost 3 vext2 <5,2,7,3>, <5,2,7,3>
+ 2705869378U, // <7,3,5,3>: Cost 3 vext3 <3,5,3,7>, <3,5,3,7>
+ 2237491947U, // <7,3,5,4>: Cost 3 vrev <3,7,4,5>
+ 2706016852U, // <7,3,5,5>: Cost 3 vext3 <3,5,5,7>, <3,5,5,7>
+ 2648313954U, // <7,3,5,6>: Cost 3 vext2 <5,1,7,3>, <5,6,7,0>
+ 2692745823U, // <7,3,5,7>: Cost 3 vext3 <1,3,5,7>, <3,5,7,0>
+ 1579217159U, // <7,3,5,u>: Cost 2 vext2 <5,u,7,3>, <5,u,7,3>
+ 2706311800U, // <7,3,6,0>: Cost 3 vext3 <3,6,0,7>, <3,6,0,7>
+ 2654286249U, // <7,3,6,1>: Cost 3 vext2 <6,1,7,3>, <6,1,7,3>
+ 1581208058U, // <7,3,6,2>: Cost 2 vext2 <6,2,7,3>, <6,2,7,3>
+ 2706533011U, // <7,3,6,3>: Cost 3 vext3 <3,6,3,7>, <3,6,3,7>
+ 2706606748U, // <7,3,6,4>: Cost 3 vext3 <3,6,4,7>, <3,6,4,7>
+ 3780422309U, // <7,3,6,5>: Cost 4 vext3 <3,6,5,7>, <3,6,5,7>
+ 2712062637U, // <7,3,6,6>: Cost 3 vext3 RHS, <3,6,6,6>
+ 2706827959U, // <7,3,6,7>: Cost 3 vext3 <3,6,7,7>, <3,6,7,7>
+ 1585189856U, // <7,3,6,u>: Cost 2 vext2 <6,u,7,3>, <6,u,7,3>
+ 2693925571U, // <7,3,7,0>: Cost 3 vext3 <1,5,3,7>, <3,7,0,1>
+ 2693925584U, // <7,3,7,1>: Cost 3 vext3 <1,5,3,7>, <3,7,1,5>
+ 2700561114U, // <7,3,7,2>: Cost 3 vext3 <2,6,3,7>, <3,7,2,6>
+ 2572978916U, // <7,3,7,3>: Cost 3 vext1 <3,7,3,7>, <3,7,3,7>
+ 2693925611U, // <7,3,7,4>: Cost 3 vext3 <1,5,3,7>, <3,7,4,5>
+ 2707344118U, // <7,3,7,5>: Cost 3 vext3 <3,7,5,7>, <3,7,5,7>
+ 2654950894U, // <7,3,7,6>: Cost 3 vext2 <6,2,7,3>, <7,6,2,7>
+ 2648315500U, // <7,3,7,7>: Cost 3 vext2 <5,1,7,3>, <7,7,7,7>
+ 2693925643U, // <7,3,7,u>: Cost 3 vext3 <1,5,3,7>, <3,7,u,1>
+ 2237221578U, // <7,3,u,0>: Cost 3 vrev <3,7,0,u>
+ 1638320926U, // <7,3,u,1>: Cost 2 vext3 RHS, <3,u,1,2>
+ 1593153452U, // <7,3,u,2>: Cost 2 vext2 <u,2,7,3>, <u,2,7,3>
+ 1638320540U, // <7,3,u,3>: Cost 2 vext3 RHS, <3,3,3,3>
+ 2237516526U, // <7,3,u,4>: Cost 3 vrev <3,7,4,u>
+ 1638320966U, // <7,3,u,5>: Cost 2 vext3 RHS, <3,u,5,6>
+ 2712062796U, // <7,3,u,6>: Cost 3 vext3 RHS, <3,u,6,3>
+ 2692967250U, // <7,3,u,7>: Cost 3 vext3 <1,3,u,7>, <3,u,7,0>
+ 1638320989U, // <7,3,u,u>: Cost 2 vext3 RHS, <3,u,u,2>
+ 2651635712U, // <7,4,0,0>: Cost 3 vext2 <5,6,7,4>, <0,0,0,0>
+ 1577893990U, // <7,4,0,1>: Cost 2 vext2 <5,6,7,4>, LHS
+ 2651635876U, // <7,4,0,2>: Cost 3 vext2 <5,6,7,4>, <0,2,0,2>
+ 3785804672U, // <7,4,0,3>: Cost 4 vext3 RHS, <4,0,3,1>
+ 2651636050U, // <7,4,0,4>: Cost 3 vext2 <5,6,7,4>, <0,4,1,5>
+ 1638468498U, // <7,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
+ 1638468508U, // <7,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
+ 3787795364U, // <7,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
+ 1640459181U, // <7,4,0,u>: Cost 2 vext3 RHS, <4,0,u,1>
+ 2651636470U, // <7,4,1,0>: Cost 3 vext2 <5,6,7,4>, <1,0,3,2>
+ 2651636532U, // <7,4,1,1>: Cost 3 vext2 <5,6,7,4>, <1,1,1,1>
+ 2712062922U, // <7,4,1,2>: Cost 3 vext3 RHS, <4,1,2,3>
+ 2639029248U, // <7,4,1,3>: Cost 3 vext2 <3,5,7,4>, <1,3,5,7>
+ 2712062940U, // <7,4,1,4>: Cost 3 vext3 RHS, <4,1,4,3>
+ 2712062946U, // <7,4,1,5>: Cost 3 vext3 RHS, <4,1,5,0>
+ 2712062958U, // <7,4,1,6>: Cost 3 vext3 RHS, <4,1,6,3>
+ 3785804791U, // <7,4,1,7>: Cost 4 vext3 RHS, <4,1,7,3>
+ 2712062973U, // <7,4,1,u>: Cost 3 vext3 RHS, <4,1,u,0>
+ 3785804807U, // <7,4,2,0>: Cost 4 vext3 RHS, <4,2,0,1>
+ 3785804818U, // <7,4,2,1>: Cost 4 vext3 RHS, <4,2,1,3>
+ 2651637352U, // <7,4,2,2>: Cost 3 vext2 <5,6,7,4>, <2,2,2,2>
+ 2651637414U, // <7,4,2,3>: Cost 3 vext2 <5,6,7,4>, <2,3,0,1>
+ 3716753194U, // <7,4,2,4>: Cost 4 vext2 <4,2,7,4>, <2,4,5,7>
+ 2712063030U, // <7,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
+ 2712063036U, // <7,4,2,6>: Cost 3 vext3 RHS, <4,2,6,0>
+ 3773123658U, // <7,4,2,7>: Cost 4 vext3 <2,4,5,7>, <4,2,7,5>
+ 2712063054U, // <7,4,2,u>: Cost 3 vext3 RHS, <4,2,u,0>
+ 2651637910U, // <7,4,3,0>: Cost 3 vext2 <5,6,7,4>, <3,0,1,2>
+ 3712772348U, // <7,4,3,1>: Cost 4 vext2 <3,5,7,4>, <3,1,3,5>
+ 3785804906U, // <7,4,3,2>: Cost 4 vext3 RHS, <4,3,2,1>
+ 2651638172U, // <7,4,3,3>: Cost 3 vext2 <5,6,7,4>, <3,3,3,3>
+ 2651638274U, // <7,4,3,4>: Cost 3 vext2 <5,6,7,4>, <3,4,5,6>
+ 2639030883U, // <7,4,3,5>: Cost 3 vext2 <3,5,7,4>, <3,5,7,4>
+ 2712063122U, // <7,4,3,6>: Cost 3 vext3 RHS, <4,3,6,5>
+ 3712772836U, // <7,4,3,7>: Cost 4 vext2 <3,5,7,4>, <3,7,3,7>
+ 2641021782U, // <7,4,3,u>: Cost 3 vext2 <3,u,7,4>, <3,u,7,4>
+ 2714053802U, // <7,4,4,0>: Cost 3 vext3 RHS, <4,4,0,2>
+ 3785804978U, // <7,4,4,1>: Cost 4 vext3 RHS, <4,4,1,1>
+ 3716754505U, // <7,4,4,2>: Cost 4 vext2 <4,2,7,4>, <4,2,7,4>
+ 3785804998U, // <7,4,4,3>: Cost 4 vext3 RHS, <4,4,3,3>
+ 1638321360U, // <7,4,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
+ 1638468826U, // <7,4,4,5>: Cost 2 vext3 RHS, <4,4,5,5>
+ 1638468836U, // <7,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
+ 3785215214U, // <7,4,4,7>: Cost 4 vext3 <4,4,7,7>, <4,4,7,7>
+ 1640459509U, // <7,4,4,u>: Cost 2 vext3 RHS, <4,4,u,5>
+ 1517207654U, // <7,4,5,0>: Cost 2 vext1 <6,7,4,5>, LHS
+ 2573034640U, // <7,4,5,1>: Cost 3 vext1 <3,7,4,5>, <1,5,3,7>
+ 2712063246U, // <7,4,5,2>: Cost 3 vext3 RHS, <4,5,2,3>
+ 2573036267U, // <7,4,5,3>: Cost 3 vext1 <3,7,4,5>, <3,7,4,5>
+ 1517210934U, // <7,4,5,4>: Cost 2 vext1 <6,7,4,5>, RHS
+ 2711989549U, // <7,4,5,5>: Cost 3 vext3 <4,5,5,7>, <4,5,5,7>
+ 564579638U, // <7,4,5,6>: Cost 1 vext3 RHS, RHS
+ 2651639976U, // <7,4,5,7>: Cost 3 vext2 <5,6,7,4>, <5,7,5,7>
+ 564579656U, // <7,4,5,u>: Cost 1 vext3 RHS, RHS
+ 2712063307U, // <7,4,6,0>: Cost 3 vext3 RHS, <4,6,0,1>
+ 3767668056U, // <7,4,6,1>: Cost 4 vext3 <1,5,3,7>, <4,6,1,5>
+ 2651640314U, // <7,4,6,2>: Cost 3 vext2 <5,6,7,4>, <6,2,7,3>
+ 2655621708U, // <7,4,6,3>: Cost 3 vext2 <6,3,7,4>, <6,3,7,4>
+ 1638468980U, // <7,4,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
+ 2712063358U, // <7,4,6,5>: Cost 3 vext3 RHS, <4,6,5,7>
+ 2712063367U, // <7,4,6,6>: Cost 3 vext3 RHS, <4,6,6,7>
+ 2712210826U, // <7,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
+ 1638469012U, // <7,4,6,u>: Cost 2 vext3 RHS, <4,6,u,2>
+ 2651640826U, // <7,4,7,0>: Cost 3 vext2 <5,6,7,4>, <7,0,1,2>
+ 3773713830U, // <7,4,7,1>: Cost 4 vext3 <2,5,4,7>, <4,7,1,2>
+ 3773713842U, // <7,4,7,2>: Cost 4 vext3 <2,5,4,7>, <4,7,2,5>
+ 3780349372U, // <7,4,7,3>: Cost 4 vext3 <3,6,4,7>, <4,7,3,6>
+ 2651641140U, // <7,4,7,4>: Cost 3 vext2 <5,6,7,4>, <7,4,0,1>
+ 2712210888U, // <7,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
+ 2712210898U, // <7,4,7,6>: Cost 3 vext3 RHS, <4,7,6,1>
+ 2651641452U, // <7,4,7,7>: Cost 3 vext2 <5,6,7,4>, <7,7,7,7>
+ 2713538026U, // <7,4,7,u>: Cost 3 vext3 <4,7,u,7>, <4,7,u,7>
+ 1517232230U, // <7,4,u,0>: Cost 2 vext1 <6,7,4,u>, LHS
+ 1577899822U, // <7,4,u,1>: Cost 2 vext2 <5,6,7,4>, LHS
+ 2712063489U, // <7,4,u,2>: Cost 3 vext3 RHS, <4,u,2,3>
+ 2573060846U, // <7,4,u,3>: Cost 3 vext1 <3,7,4,u>, <3,7,4,u>
+ 1640312342U, // <7,4,u,4>: Cost 2 vext3 RHS, <4,u,4,6>
+ 1638469146U, // <7,4,u,5>: Cost 2 vext3 RHS, <4,u,5,1>
+ 564579881U, // <7,4,u,6>: Cost 1 vext3 RHS, RHS
+ 2714054192U, // <7,4,u,7>: Cost 3 vext3 RHS, <4,u,7,5>
+ 564579899U, // <7,4,u,u>: Cost 1 vext3 RHS, RHS
+ 2579038310U, // <7,5,0,0>: Cost 3 vext1 <4,7,5,0>, LHS
+ 2636382310U, // <7,5,0,1>: Cost 3 vext2 <3,1,7,5>, LHS
+ 2796339302U, // <7,5,0,2>: Cost 3 vuzpl <7,4,5,6>, LHS
+ 3646810719U, // <7,5,0,3>: Cost 4 vext1 <3,7,5,0>, <3,5,7,0>
+ 2712063586U, // <7,5,0,4>: Cost 3 vext3 RHS, <5,0,4,1>
+ 2735951467U, // <7,5,0,5>: Cost 3 vext3 RHS, <5,0,5,1>
+ 2735951476U, // <7,5,0,6>: Cost 3 vext3 RHS, <5,0,6,1>
+ 2579043322U, // <7,5,0,7>: Cost 3 vext1 <4,7,5,0>, <7,0,1,2>
+ 2636382877U, // <7,5,0,u>: Cost 3 vext2 <3,1,7,5>, LHS
+ 2712211087U, // <7,5,1,0>: Cost 3 vext3 RHS, <5,1,0,1>
+ 3698180916U, // <7,5,1,1>: Cost 4 vext2 <1,1,7,5>, <1,1,1,1>
+ 3710124950U, // <7,5,1,2>: Cost 4 vext2 <3,1,7,5>, <1,2,3,0>
+ 2636383232U, // <7,5,1,3>: Cost 3 vext2 <3,1,7,5>, <1,3,5,7>
+ 2712211127U, // <7,5,1,4>: Cost 3 vext3 RHS, <5,1,4,5>
+ 2590994128U, // <7,5,1,5>: Cost 3 vext1 <6,7,5,1>, <5,1,7,3>
+ 2590995323U, // <7,5,1,6>: Cost 3 vext1 <6,7,5,1>, <6,7,5,1>
+ 1638469328U, // <7,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
+ 1638469337U, // <7,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
+ 3785805536U, // <7,5,2,0>: Cost 4 vext3 RHS, <5,2,0,1>
+ 3785805544U, // <7,5,2,1>: Cost 4 vext3 RHS, <5,2,1,0>
+ 3704817288U, // <7,5,2,2>: Cost 4 vext2 <2,2,7,5>, <2,2,5,7>
+ 2712063742U, // <7,5,2,3>: Cost 3 vext3 RHS, <5,2,3,4>
+ 3716761386U, // <7,5,2,4>: Cost 4 vext2 <4,2,7,5>, <2,4,5,7>
+ 2714054415U, // <7,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
+ 3774304024U, // <7,5,2,6>: Cost 4 vext3 <2,6,3,7>, <5,2,6,3>
+ 2712063777U, // <7,5,2,7>: Cost 3 vext3 RHS, <5,2,7,3>
+ 2712063787U, // <7,5,2,u>: Cost 3 vext3 RHS, <5,2,u,4>
+ 3634888806U, // <7,5,3,0>: Cost 4 vext1 <1,7,5,3>, LHS
+ 2636384544U, // <7,5,3,1>: Cost 3 vext2 <3,1,7,5>, <3,1,7,5>
+ 3710790001U, // <7,5,3,2>: Cost 4 vext2 <3,2,7,5>, <3,2,7,5>
+ 3710126492U, // <7,5,3,3>: Cost 4 vext2 <3,1,7,5>, <3,3,3,3>
+ 3634892086U, // <7,5,3,4>: Cost 4 vext1 <1,7,5,3>, RHS
+ 2639039076U, // <7,5,3,5>: Cost 3 vext2 <3,5,7,5>, <3,5,7,5>
+ 3713444533U, // <7,5,3,6>: Cost 4 vext2 <3,6,7,5>, <3,6,7,5>
+ 2693926767U, // <7,5,3,7>: Cost 3 vext3 <1,5,3,7>, <5,3,7,0>
+ 2712063864U, // <7,5,3,u>: Cost 3 vext3 RHS, <5,3,u,0>
+ 2579071078U, // <7,5,4,0>: Cost 3 vext1 <4,7,5,4>, LHS
+ 3646841856U, // <7,5,4,1>: Cost 4 vext1 <3,7,5,4>, <1,3,5,7>
+ 3716762698U, // <7,5,4,2>: Cost 4 vext2 <4,2,7,5>, <4,2,7,5>
+ 3646843491U, // <7,5,4,3>: Cost 4 vext1 <3,7,5,4>, <3,5,7,4>
+ 2579074358U, // <7,5,4,4>: Cost 3 vext1 <4,7,5,4>, RHS
+ 2636385590U, // <7,5,4,5>: Cost 3 vext2 <3,1,7,5>, RHS
+ 2645675406U, // <7,5,4,6>: Cost 3 vext2 <4,6,7,5>, <4,6,7,5>
+ 1638322118U, // <7,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
+ 1638469583U, // <7,5,4,u>: Cost 2 vext3 RHS, <5,4,u,6>
+ 2714054611U, // <7,5,5,0>: Cost 3 vext3 RHS, <5,5,0,1>
+ 2652974800U, // <7,5,5,1>: Cost 3 vext2 <5,u,7,5>, <5,1,7,3>
+ 3710127905U, // <7,5,5,2>: Cost 4 vext2 <3,1,7,5>, <5,2,7,3>
+ 3785805808U, // <7,5,5,3>: Cost 4 vext3 RHS, <5,5,3,3>
+ 2712211450U, // <7,5,5,4>: Cost 3 vext3 RHS, <5,5,4,4>
+ 1638322180U, // <7,5,5,5>: Cost 2 vext3 RHS, <5,5,5,5>
+ 2712064014U, // <7,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
+ 1638469656U, // <7,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
+ 1638469665U, // <7,5,5,u>: Cost 2 vext3 RHS, <5,5,u,7>
+ 2712064036U, // <7,5,6,0>: Cost 3 vext3 RHS, <5,6,0,1>
+ 2714054707U, // <7,5,6,1>: Cost 3 vext3 RHS, <5,6,1,7>
+ 3785805879U, // <7,5,6,2>: Cost 4 vext3 RHS, <5,6,2,2>
+ 2712064066U, // <7,5,6,3>: Cost 3 vext3 RHS, <5,6,3,4>
+ 2712064076U, // <7,5,6,4>: Cost 3 vext3 RHS, <5,6,4,5>
+ 2714054743U, // <7,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
+ 2712064096U, // <7,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
+ 1638322274U, // <7,5,6,7>: Cost 2 vext3 RHS, <5,6,7,0>
+ 1638469739U, // <7,5,6,u>: Cost 2 vext3 RHS, <5,6,u,0>
+ 1511325798U, // <7,5,7,0>: Cost 2 vext1 <5,7,5,7>, LHS
+ 2692747392U, // <7,5,7,1>: Cost 3 vext3 <1,3,5,7>, <5,7,1,3>
+ 2585069160U, // <7,5,7,2>: Cost 3 vext1 <5,7,5,7>, <2,2,2,2>
+ 2573126390U, // <7,5,7,3>: Cost 3 vext1 <3,7,5,7>, <3,7,5,7>
+ 1511329078U, // <7,5,7,4>: Cost 2 vext1 <5,7,5,7>, RHS
+ 1638469800U, // <7,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
+ 2712211626U, // <7,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
+ 2712211636U, // <7,5,7,7>: Cost 3 vext3 RHS, <5,7,7,1>
+ 1638469823U, // <7,5,7,u>: Cost 2 vext3 RHS, <5,7,u,3>
+ 1511333990U, // <7,5,u,0>: Cost 2 vext1 <5,7,5,u>, LHS
+ 2636388142U, // <7,5,u,1>: Cost 3 vext2 <3,1,7,5>, LHS
+ 2712211671U, // <7,5,u,2>: Cost 3 vext3 RHS, <5,u,2,0>
+ 2573134583U, // <7,5,u,3>: Cost 3 vext1 <3,7,5,u>, <3,7,5,u>
+ 1511337270U, // <7,5,u,4>: Cost 2 vext1 <5,7,5,u>, RHS
+ 1638469881U, // <7,5,u,5>: Cost 2 vext3 RHS, <5,u,5,7>
+ 2712064258U, // <7,5,u,6>: Cost 3 vext3 RHS, <5,u,6,7>
+ 1638469892U, // <7,5,u,7>: Cost 2 vext3 RHS, <5,u,7,0>
+ 1638469904U, // <7,5,u,u>: Cost 2 vext3 RHS, <5,u,u,3>
+ 2650324992U, // <7,6,0,0>: Cost 3 vext2 <5,4,7,6>, <0,0,0,0>
+ 1576583270U, // <7,6,0,1>: Cost 2 vext2 <5,4,7,6>, LHS
+ 2712064300U, // <7,6,0,2>: Cost 3 vext3 RHS, <6,0,2,4>
+ 2255295336U, // <7,6,0,3>: Cost 3 vrev <6,7,3,0>
+ 2712064316U, // <7,6,0,4>: Cost 3 vext3 RHS, <6,0,4,2>
+ 2585088098U, // <7,6,0,5>: Cost 3 vext1 <5,7,6,0>, <5,6,7,0>
+ 2735952204U, // <7,6,0,6>: Cost 3 vext3 RHS, <6,0,6,0>
+ 2712211799U, // <7,6,0,7>: Cost 3 vext3 RHS, <6,0,7,2>
+ 1576583837U, // <7,6,0,u>: Cost 2 vext2 <5,4,7,6>, LHS
+ 1181340494U, // <7,6,1,0>: Cost 2 vrev <6,7,0,1>
+ 2650325812U, // <7,6,1,1>: Cost 3 vext2 <5,4,7,6>, <1,1,1,1>
+ 2650325910U, // <7,6,1,2>: Cost 3 vext2 <5,4,7,6>, <1,2,3,0>
+ 2650325976U, // <7,6,1,3>: Cost 3 vext2 <5,4,7,6>, <1,3,1,3>
+ 2579123510U, // <7,6,1,4>: Cost 3 vext1 <4,7,6,1>, RHS
+ 2650326160U, // <7,6,1,5>: Cost 3 vext2 <5,4,7,6>, <1,5,3,7>
+ 2714055072U, // <7,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
+ 2712064425U, // <7,6,1,7>: Cost 3 vext3 RHS, <6,1,7,3>
+ 1181930390U, // <7,6,1,u>: Cost 2 vrev <6,7,u,1>
+ 2712211897U, // <7,6,2,0>: Cost 3 vext3 RHS, <6,2,0,1>
+ 2714055108U, // <7,6,2,1>: Cost 3 vext3 RHS, <6,2,1,3>
+ 2650326632U, // <7,6,2,2>: Cost 3 vext2 <5,4,7,6>, <2,2,2,2>
+ 2650326694U, // <7,6,2,3>: Cost 3 vext2 <5,4,7,6>, <2,3,0,1>
+ 2714055137U, // <7,6,2,4>: Cost 3 vext3 RHS, <6,2,4,5>
+ 2714055148U, // <7,6,2,5>: Cost 3 vext3 RHS, <6,2,5,7>
+ 2650326970U, // <7,6,2,6>: Cost 3 vext2 <5,4,7,6>, <2,6,3,7>
+ 1638470138U, // <7,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
+ 1638470147U, // <7,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
+ 2650327190U, // <7,6,3,0>: Cost 3 vext2 <5,4,7,6>, <3,0,1,2>
+ 2255172441U, // <7,6,3,1>: Cost 3 vrev <6,7,1,3>
+ 2255246178U, // <7,6,3,2>: Cost 3 vrev <6,7,2,3>
+ 2650327452U, // <7,6,3,3>: Cost 3 vext2 <5,4,7,6>, <3,3,3,3>
+ 2712064562U, // <7,6,3,4>: Cost 3 vext3 RHS, <6,3,4,5>
+ 2650327627U, // <7,6,3,5>: Cost 3 vext2 <5,4,7,6>, <3,5,4,7>
+ 3713452726U, // <7,6,3,6>: Cost 4 vext2 <3,6,7,6>, <3,6,7,6>
+ 2700563016U, // <7,6,3,7>: Cost 3 vext3 <2,6,3,7>, <6,3,7,0>
+ 2712064593U, // <7,6,3,u>: Cost 3 vext3 RHS, <6,3,u,0>
+ 2650327954U, // <7,6,4,0>: Cost 3 vext2 <5,4,7,6>, <4,0,5,1>
+ 2735952486U, // <7,6,4,1>: Cost 3 vext3 RHS, <6,4,1,3>
+ 2735952497U, // <7,6,4,2>: Cost 3 vext3 RHS, <6,4,2,5>
+ 2255328108U, // <7,6,4,3>: Cost 3 vrev <6,7,3,4>
+ 2712212100U, // <7,6,4,4>: Cost 3 vext3 RHS, <6,4,4,6>
+ 1576586550U, // <7,6,4,5>: Cost 2 vext2 <5,4,7,6>, RHS
+ 2714055312U, // <7,6,4,6>: Cost 3 vext3 RHS, <6,4,6,0>
+ 2712212126U, // <7,6,4,7>: Cost 3 vext3 RHS, <6,4,7,5>
+ 1576586793U, // <7,6,4,u>: Cost 2 vext2 <5,4,7,6>, RHS
+ 2579152998U, // <7,6,5,0>: Cost 3 vext1 <4,7,6,5>, LHS
+ 2650328784U, // <7,6,5,1>: Cost 3 vext2 <5,4,7,6>, <5,1,7,3>
+ 2714055364U, // <7,6,5,2>: Cost 3 vext3 RHS, <6,5,2,7>
+ 3785806538U, // <7,6,5,3>: Cost 4 vext3 RHS, <6,5,3,4>
+ 1576587206U, // <7,6,5,4>: Cost 2 vext2 <5,4,7,6>, <5,4,7,6>
+ 2650329092U, // <7,6,5,5>: Cost 3 vext2 <5,4,7,6>, <5,5,5,5>
+ 2650329186U, // <7,6,5,6>: Cost 3 vext2 <5,4,7,6>, <5,6,7,0>
+ 2712064753U, // <7,6,5,7>: Cost 3 vext3 RHS, <6,5,7,7>
+ 1181963162U, // <7,6,5,u>: Cost 2 vrev <6,7,u,5>
+ 2714055421U, // <7,6,6,0>: Cost 3 vext3 RHS, <6,6,0,1>
+ 2714055432U, // <7,6,6,1>: Cost 3 vext3 RHS, <6,6,1,3>
+ 2650329594U, // <7,6,6,2>: Cost 3 vext2 <5,4,7,6>, <6,2,7,3>
+ 3785806619U, // <7,6,6,3>: Cost 4 vext3 RHS, <6,6,3,4>
+ 2712212260U, // <7,6,6,4>: Cost 3 vext3 RHS, <6,6,4,4>
+ 2714055472U, // <7,6,6,5>: Cost 3 vext3 RHS, <6,6,5,7>
+ 1638323000U, // <7,6,6,6>: Cost 2 vext3 RHS, <6,6,6,6>
+ 1638470466U, // <7,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
+ 1638470475U, // <7,6,6,u>: Cost 2 vext3 RHS, <6,6,u,7>
+ 1638323022U, // <7,6,7,0>: Cost 2 vext3 RHS, <6,7,0,1>
+ 2712064854U, // <7,6,7,1>: Cost 3 vext3 RHS, <6,7,1,0>
+ 2712064865U, // <7,6,7,2>: Cost 3 vext3 RHS, <6,7,2,2>
+ 2712064872U, // <7,6,7,3>: Cost 3 vext3 RHS, <6,7,3,0>
+ 1638323062U, // <7,6,7,4>: Cost 2 vext3 RHS, <6,7,4,5>
+ 2712064894U, // <7,6,7,5>: Cost 3 vext3 RHS, <6,7,5,4>
+ 2712064905U, // <7,6,7,6>: Cost 3 vext3 RHS, <6,7,6,6>
+ 2712064915U, // <7,6,7,7>: Cost 3 vext3 RHS, <6,7,7,7>
+ 1638323094U, // <7,6,7,u>: Cost 2 vext3 RHS, <6,7,u,1>
+ 1638470559U, // <7,6,u,0>: Cost 2 vext3 RHS, <6,u,0,1>
+ 1576589102U, // <7,6,u,1>: Cost 2 vext2 <5,4,7,6>, LHS
+ 2712212402U, // <7,6,u,2>: Cost 3 vext3 RHS, <6,u,2,2>
+ 2712212409U, // <7,6,u,3>: Cost 3 vext3 RHS, <6,u,3,0>
+ 1638470599U, // <7,6,u,4>: Cost 2 vext3 RHS, <6,u,4,5>
+ 1576589466U, // <7,6,u,5>: Cost 2 vext2 <5,4,7,6>, RHS
+ 1638323000U, // <7,6,u,6>: Cost 2 vext3 RHS, <6,6,6,6>
+ 1638470624U, // <7,6,u,7>: Cost 2 vext3 RHS, <6,u,7,3>
+ 1638470631U, // <7,6,u,u>: Cost 2 vext3 RHS, <6,u,u,1>
+ 2712065007U, // <7,7,0,0>: Cost 3 vext3 RHS, <7,0,0,0>
+ 1638323194U, // <7,7,0,1>: Cost 2 vext3 RHS, <7,0,1,2>
+ 2712065025U, // <7,7,0,2>: Cost 3 vext3 RHS, <7,0,2,0>
+ 3646958337U, // <7,7,0,3>: Cost 4 vext1 <3,7,7,0>, <3,7,7,0>
+ 2712065044U, // <7,7,0,4>: Cost 3 vext3 RHS, <7,0,4,1>
+ 2585161907U, // <7,7,0,5>: Cost 3 vext1 <5,7,7,0>, <5,7,7,0>
+ 2591134604U, // <7,7,0,6>: Cost 3 vext1 <6,7,7,0>, <6,7,7,0>
+ 2591134714U, // <7,7,0,7>: Cost 3 vext1 <6,7,7,0>, <7,0,1,2>
+ 1638323257U, // <7,7,0,u>: Cost 2 vext3 RHS, <7,0,u,2>
+ 2712065091U, // <7,7,1,0>: Cost 3 vext3 RHS, <7,1,0,3>
+ 2712065098U, // <7,7,1,1>: Cost 3 vext3 RHS, <7,1,1,1>
+ 2712065109U, // <7,7,1,2>: Cost 3 vext3 RHS, <7,1,2,3>
+ 2692748384U, // <7,7,1,3>: Cost 3 vext3 <1,3,5,7>, <7,1,3,5>
+ 2585169206U, // <7,7,1,4>: Cost 3 vext1 <5,7,7,1>, RHS
+ 2693928048U, // <7,7,1,5>: Cost 3 vext3 <1,5,3,7>, <7,1,5,3>
+ 2585170766U, // <7,7,1,6>: Cost 3 vext1 <5,7,7,1>, <6,7,0,1>
+ 2735953024U, // <7,7,1,7>: Cost 3 vext3 RHS, <7,1,7,1>
+ 2695918731U, // <7,7,1,u>: Cost 3 vext3 <1,u,3,7>, <7,1,u,3>
+ 3770471574U, // <7,7,2,0>: Cost 4 vext3 <2,0,5,7>, <7,2,0,5>
+ 3785807002U, // <7,7,2,1>: Cost 4 vext3 RHS, <7,2,1,0>
+ 2712065189U, // <7,7,2,2>: Cost 3 vext3 RHS, <7,2,2,2>
+ 2712065196U, // <7,7,2,3>: Cost 3 vext3 RHS, <7,2,3,0>
+ 3773125818U, // <7,7,2,4>: Cost 4 vext3 <2,4,5,7>, <7,2,4,5>
+ 3766490305U, // <7,7,2,5>: Cost 4 vext3 <1,3,5,7>, <7,2,5,3>
+ 2700563658U, // <7,7,2,6>: Cost 3 vext3 <2,6,3,7>, <7,2,6,3>
+ 2735953107U, // <7,7,2,7>: Cost 3 vext3 RHS, <7,2,7,3>
+ 2701890780U, // <7,7,2,u>: Cost 3 vext3 <2,u,3,7>, <7,2,u,3>
+ 2712065251U, // <7,7,3,0>: Cost 3 vext3 RHS, <7,3,0,1>
+ 3766490350U, // <7,7,3,1>: Cost 4 vext3 <1,3,5,7>, <7,3,1,3>
+ 3774305530U, // <7,7,3,2>: Cost 4 vext3 <2,6,3,7>, <7,3,2,6>
+ 2637728196U, // <7,7,3,3>: Cost 3 vext2 <3,3,7,7>, <3,3,7,7>
+ 2712065291U, // <7,7,3,4>: Cost 3 vext3 RHS, <7,3,4,5>
+ 2585186486U, // <7,7,3,5>: Cost 3 vext1 <5,7,7,3>, <5,7,7,3>
+ 2639719095U, // <7,7,3,6>: Cost 3 vext2 <3,6,7,7>, <3,6,7,7>
+ 2640382728U, // <7,7,3,7>: Cost 3 vext2 <3,7,7,7>, <3,7,7,7>
+ 2641046361U, // <7,7,3,u>: Cost 3 vext2 <3,u,7,7>, <3,u,7,7>
+ 2712212792U, // <7,7,4,0>: Cost 3 vext3 RHS, <7,4,0,5>
+ 3646989312U, // <7,7,4,1>: Cost 4 vext1 <3,7,7,4>, <1,3,5,7>
+ 3785807176U, // <7,7,4,2>: Cost 4 vext3 RHS, <7,4,2,3>
+ 3646991109U, // <7,7,4,3>: Cost 4 vext1 <3,7,7,4>, <3,7,7,4>
+ 2712065371U, // <7,7,4,4>: Cost 3 vext3 RHS, <7,4,4,4>
+ 1638323558U, // <7,7,4,5>: Cost 2 vext3 RHS, <7,4,5,6>
+ 2712212845U, // <7,7,4,6>: Cost 3 vext3 RHS, <7,4,6,4>
+ 2591167846U, // <7,7,4,7>: Cost 3 vext1 <6,7,7,4>, <7,4,5,6>
+ 1638323585U, // <7,7,4,u>: Cost 2 vext3 RHS, <7,4,u,6>
+ 2585198694U, // <7,7,5,0>: Cost 3 vext1 <5,7,7,5>, LHS
+ 2712212884U, // <7,7,5,1>: Cost 3 vext3 RHS, <7,5,1,7>
+ 3711471393U, // <7,7,5,2>: Cost 4 vext2 <3,3,7,7>, <5,2,7,3>
+ 2649673590U, // <7,7,5,3>: Cost 3 vext2 <5,3,7,7>, <5,3,7,7>
+ 2712065455U, // <7,7,5,4>: Cost 3 vext3 RHS, <7,5,4,7>
+ 1577259032U, // <7,7,5,5>: Cost 2 vext2 <5,5,7,7>, <5,5,7,7>
+ 2712065473U, // <7,7,5,6>: Cost 3 vext3 RHS, <7,5,6,7>
+ 2712212936U, // <7,7,5,7>: Cost 3 vext3 RHS, <7,5,7,5>
+ 1579249931U, // <7,7,5,u>: Cost 2 vext2 <5,u,7,7>, <5,u,7,7>
+ 2591178854U, // <7,7,6,0>: Cost 3 vext1 <6,7,7,6>, LHS
+ 2735953374U, // <7,7,6,1>: Cost 3 vext3 RHS, <7,6,1,0>
+ 2712212974U, // <7,7,6,2>: Cost 3 vext3 RHS, <7,6,2,7>
+ 2655646287U, // <7,7,6,3>: Cost 3 vext2 <6,3,7,7>, <6,3,7,7>
+ 2591182134U, // <7,7,6,4>: Cost 3 vext1 <6,7,7,6>, RHS
+ 2656973553U, // <7,7,6,5>: Cost 3 vext2 <6,5,7,7>, <6,5,7,7>
+ 1583895362U, // <7,7,6,6>: Cost 2 vext2 <6,6,7,7>, <6,6,7,7>
+ 2712065556U, // <7,7,6,7>: Cost 3 vext3 RHS, <7,6,7,0>
+ 1585222628U, // <7,7,6,u>: Cost 2 vext2 <6,u,7,7>, <6,u,7,7>
+ 1523417190U, // <7,7,7,0>: Cost 2 vext1 <7,7,7,7>, LHS
+ 2597159670U, // <7,7,7,1>: Cost 3 vext1 <7,7,7,7>, <1,0,3,2>
+ 2597160552U, // <7,7,7,2>: Cost 3 vext1 <7,7,7,7>, <2,2,2,2>
+ 2597161110U, // <7,7,7,3>: Cost 3 vext1 <7,7,7,7>, <3,0,1,2>
+ 1523420470U, // <7,7,7,4>: Cost 2 vext1 <7,7,7,7>, RHS
+ 2651002296U, // <7,7,7,5>: Cost 3 vext2 <5,5,7,7>, <7,5,5,7>
+ 2657637906U, // <7,7,7,6>: Cost 3 vext2 <6,6,7,7>, <7,6,6,7>
+ 363253046U, // <7,7,7,7>: Cost 1 vdup3 RHS
+ 363253046U, // <7,7,7,u>: Cost 1 vdup3 RHS
+ 1523417190U, // <7,7,u,0>: Cost 2 vext1 <7,7,7,7>, LHS
+ 1638471298U, // <7,7,u,1>: Cost 2 vext3 RHS, <7,u,1,2>
+ 2712213132U, // <7,7,u,2>: Cost 3 vext3 RHS, <7,u,2,3>
+ 2712213138U, // <7,7,u,3>: Cost 3 vext3 RHS, <7,u,3,0>
+ 1523420470U, // <7,7,u,4>: Cost 2 vext1 <7,7,7,7>, RHS
+ 1638471338U, // <7,7,u,5>: Cost 2 vext3 RHS, <7,u,5,6>
+ 1595840756U, // <7,7,u,6>: Cost 2 vext2 <u,6,7,7>, <u,6,7,7>
+ 363253046U, // <7,7,u,7>: Cost 1 vdup3 RHS
+ 363253046U, // <7,7,u,u>: Cost 1 vdup3 RHS
+ 1638318080U, // <7,u,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
+ 1638323923U, // <7,u,0,1>: Cost 2 vext3 RHS, <u,0,1,2>
+ 1662211804U, // <7,u,0,2>: Cost 2 vext3 RHS, <u,0,2,2>
+ 1638323941U, // <7,u,0,3>: Cost 2 vext3 RHS, <u,0,3,2>
+ 2712065773U, // <7,u,0,4>: Cost 3 vext3 RHS, <u,0,4,1>
+ 1662359286U, // <7,u,0,5>: Cost 2 vext3 RHS, <u,0,5,1>
+ 1662359296U, // <7,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
+ 2987150664U, // <7,u,0,7>: Cost 3 vzipr <5,6,7,0>, RHS
+ 1638323986U, // <7,u,0,u>: Cost 2 vext3 RHS, <u,0,u,2>
+ 1517469798U, // <7,u,1,0>: Cost 2 vext1 <6,7,u,1>, LHS
+ 1638318900U, // <7,u,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
+ 564582190U, // <7,u,1,2>: Cost 1 vext3 RHS, LHS
+ 1638324023U, // <7,u,1,3>: Cost 2 vext3 RHS, <u,1,3,3>
+ 1517473078U, // <7,u,1,4>: Cost 2 vext1 <6,7,u,1>, RHS
+ 2693928777U, // <7,u,1,5>: Cost 3 vext3 <1,5,3,7>, <u,1,5,3>
+ 1517474710U, // <7,u,1,6>: Cost 2 vext1 <6,7,u,1>, <6,7,u,1>
+ 1640462171U, // <7,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
+ 564582244U, // <7,u,1,u>: Cost 1 vext3 RHS, LHS
+ 1638318244U, // <7,u,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
+ 2712065907U, // <7,u,2,1>: Cost 3 vext3 RHS, <u,2,1,0>
+ 1638319720U, // <7,u,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
+ 1638324101U, // <7,u,2,3>: Cost 2 vext3 RHS, <u,2,3,0>
+ 1638318284U, // <7,u,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
+ 2712065947U, // <7,u,2,5>: Cost 3 vext3 RHS, <u,2,5,4>
+ 2700564387U, // <7,u,2,6>: Cost 3 vext3 <2,6,3,7>, <u,2,6,3>
+ 1640314796U, // <7,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
+ 1638324146U, // <7,u,2,u>: Cost 2 vext3 RHS, <u,2,u,0>
+ 1638324156U, // <7,u,3,0>: Cost 2 vext3 RHS, <u,3,0,1>
+ 1638319064U, // <7,u,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
+ 2700564435U, // <7,u,3,2>: Cost 3 vext3 <2,6,3,7>, <u,3,2,6>
+ 1638320540U, // <7,u,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
+ 1638324196U, // <7,u,3,4>: Cost 2 vext3 RHS, <u,3,4,5>
+ 1638324207U, // <7,u,3,5>: Cost 2 vext3 RHS, <u,3,5,7>
+ 2700564472U, // <7,u,3,6>: Cost 3 vext3 <2,6,3,7>, <u,3,6,7>
+ 2695919610U, // <7,u,3,7>: Cost 3 vext3 <1,u,3,7>, <u,3,7,0>
+ 1638324228U, // <7,u,3,u>: Cost 2 vext3 RHS, <u,3,u,1>
+ 2712066061U, // <7,u,4,0>: Cost 3 vext3 RHS, <u,4,0,1>
+ 1662212122U, // <7,u,4,1>: Cost 2 vext3 RHS, <u,4,1,5>
+ 1662212132U, // <7,u,4,2>: Cost 2 vext3 RHS, <u,4,2,6>
+ 2712066092U, // <7,u,4,3>: Cost 3 vext3 RHS, <u,4,3,5>
+ 1638321360U, // <7,u,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
+ 1638324287U, // <7,u,4,5>: Cost 2 vext3 RHS, <u,4,5,6>
+ 1662359624U, // <7,u,4,6>: Cost 2 vext3 RHS, <u,4,6,6>
+ 1640314961U, // <7,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
+ 1638324314U, // <7,u,4,u>: Cost 2 vext3 RHS, <u,4,u,6>
+ 1517502566U, // <7,u,5,0>: Cost 2 vext1 <6,7,u,5>, LHS
+ 1574612693U, // <7,u,5,1>: Cost 2 vext2 <5,1,7,u>, <5,1,7,u>
+ 2712066162U, // <7,u,5,2>: Cost 3 vext3 RHS, <u,5,2,3>
+ 1638324351U, // <7,u,5,3>: Cost 2 vext3 RHS, <u,5,3,7>
+ 1576603592U, // <7,u,5,4>: Cost 2 vext2 <5,4,7,u>, <5,4,7,u>
+ 1577267225U, // <7,u,5,5>: Cost 2 vext2 <5,5,7,u>, <5,5,7,u>
+ 564582554U, // <7,u,5,6>: Cost 1 vext3 RHS, RHS
+ 1640462499U, // <7,u,5,7>: Cost 2 vext3 RHS, <u,5,7,7>
+ 564582572U, // <7,u,5,u>: Cost 1 vext3 RHS, RHS
+ 2712066223U, // <7,u,6,0>: Cost 3 vext3 RHS, <u,6,0,1>
+ 2712066238U, // <7,u,6,1>: Cost 3 vext3 RHS, <u,6,1,7>
+ 1581249023U, // <7,u,6,2>: Cost 2 vext2 <6,2,7,u>, <6,2,7,u>
+ 1638324432U, // <7,u,6,3>: Cost 2 vext3 RHS, <u,6,3,7>
+ 1638468980U, // <7,u,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
+ 2712066274U, // <7,u,6,5>: Cost 3 vext3 RHS, <u,6,5,7>
+ 1583903555U, // <7,u,6,6>: Cost 2 vext2 <6,6,7,u>, <6,6,7,u>
+ 1640315117U, // <7,u,6,7>: Cost 2 vext3 RHS, <u,6,7,0>
+ 1638324477U, // <7,u,6,u>: Cost 2 vext3 RHS, <u,6,u,7>
+ 1638471936U, // <7,u,7,0>: Cost 2 vext3 RHS, <u,7,0,1>
+ 2692970763U, // <7,u,7,1>: Cost 3 vext3 <1,3,u,7>, <u,7,1,3>
+ 2700933399U, // <7,u,7,2>: Cost 3 vext3 <2,6,u,7>, <u,7,2,6>
+ 2573347601U, // <7,u,7,3>: Cost 3 vext1 <3,7,u,7>, <3,7,u,7>
+ 1638471976U, // <7,u,7,4>: Cost 2 vext3 RHS, <u,7,4,5>
+ 1511551171U, // <7,u,7,5>: Cost 2 vext1 <5,7,u,7>, <5,7,u,7>
+ 2712213815U, // <7,u,7,6>: Cost 3 vext3 RHS, <u,7,6,2>
+ 363253046U, // <7,u,7,7>: Cost 1 vdup3 RHS
+ 363253046U, // <7,u,7,u>: Cost 1 vdup3 RHS
+ 1638324561U, // <7,u,u,0>: Cost 2 vext3 RHS, <u,u,0,1>
+ 1638324571U, // <7,u,u,1>: Cost 2 vext3 RHS, <u,u,1,2>
+ 564582757U, // <7,u,u,2>: Cost 1 vext3 RHS, LHS
+ 1638324587U, // <7,u,u,3>: Cost 2 vext3 RHS, <u,u,3,0>
+ 1638324601U, // <7,u,u,4>: Cost 2 vext3 RHS, <u,u,4,5>
+ 1638324611U, // <7,u,u,5>: Cost 2 vext3 RHS, <u,u,5,6>
+ 564582797U, // <7,u,u,6>: Cost 1 vext3 RHS, RHS
+ 363253046U, // <7,u,u,7>: Cost 1 vdup3 RHS
+ 564582811U, // <7,u,u,u>: Cost 1 vext3 RHS, LHS
+ 135053414U, // <u,0,0,0>: Cost 1 vdup0 LHS
+ 1611489290U, // <u,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
+ 1611489300U, // <u,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
+ 2568054923U, // <u,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
+ 1481706806U, // <u,0,0,4>: Cost 2 vext1 <0,u,0,0>, RHS
+ 2555449040U, // <u,0,0,5>: Cost 3 vext1 <0,u,0,0>, <5,1,7,3>
+ 2591282078U, // <u,0,0,6>: Cost 3 vext1 <6,u,0,0>, <6,u,0,0>
+ 2591945711U, // <u,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
+ 135053414U, // <u,0,0,u>: Cost 1 vdup0 LHS
+ 1493655654U, // <u,0,1,0>: Cost 2 vext1 <2,u,0,1>, LHS
+ 1860550758U, // <u,0,1,1>: Cost 2 vzipl LHS, LHS
+ 537747563U, // <u,0,1,2>: Cost 1 vext3 LHS, LHS
+ 2625135576U, // <u,0,1,3>: Cost 3 vext2 <1,2,u,0>, <1,3,1,3>
+ 1493658934U, // <u,0,1,4>: Cost 2 vext1 <2,u,0,1>, RHS
+ 2625135760U, // <u,0,1,5>: Cost 3 vext2 <1,2,u,0>, <1,5,3,7>
+ 1517548447U, // <u,0,1,6>: Cost 2 vext1 <6,u,0,1>, <6,u,0,1>
+ 2591290362U, // <u,0,1,7>: Cost 3 vext1 <6,u,0,1>, <7,0,1,2>
+ 537747612U, // <u,0,1,u>: Cost 1 vext3 LHS, LHS
+ 1611489444U, // <u,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
+ 2685231276U, // <u,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
+ 1994768486U, // <u,0,2,2>: Cost 2 vtrnl LHS, LHS
+ 2685231294U, // <u,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
+ 1611489484U, // <u,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
+ 2712068310U, // <u,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
+ 2625136570U, // <u,0,2,6>: Cost 3 vext2 <1,2,u,0>, <2,6,3,7>
+ 2591962097U, // <u,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
+ 1611489516U, // <u,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
+ 2954067968U, // <u,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
+ 2685231356U, // <u,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
+ 72589981U, // <u,0,3,2>: Cost 1 vrev LHS
+ 2625137052U, // <u,0,3,3>: Cost 3 vext2 <1,2,u,0>, <3,3,3,3>
+ 2625137154U, // <u,0,3,4>: Cost 3 vext2 <1,2,u,0>, <3,4,5,6>
+ 2639071848U, // <u,0,3,5>: Cost 3 vext2 <3,5,u,0>, <3,5,u,0>
+ 2639735481U, // <u,0,3,6>: Cost 3 vext2 <3,6,u,0>, <3,6,u,0>
+ 2597279354U, // <u,0,3,7>: Cost 3 vext1 <7,u,0,3>, <7,u,0,3>
+ 73032403U, // <u,0,3,u>: Cost 1 vrev LHS
+ 2687074636U, // <u,0,4,0>: Cost 3 vext3 <0,4,0,u>, <0,4,0,u>
+ 1611489618U, // <u,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
+ 1611489628U, // <u,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
+ 3629222038U, // <u,0,4,3>: Cost 4 vext1 <0,u,0,4>, <3,0,1,2>
+ 2555481398U, // <u,0,4,4>: Cost 3 vext1 <0,u,0,4>, RHS
+ 1551396150U, // <u,0,4,5>: Cost 2 vext2 <1,2,u,0>, RHS
+ 2651680116U, // <u,0,4,6>: Cost 3 vext2 <5,6,u,0>, <4,6,4,6>
+ 2646150600U, // <u,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
+ 1611932050U, // <u,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
+ 2561458278U, // <u,0,5,0>: Cost 3 vext1 <1,u,0,5>, LHS
+ 1863532646U, // <u,0,5,1>: Cost 2 vzipl RHS, LHS
+ 2712068526U, // <u,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
+ 2649689976U, // <u,0,5,3>: Cost 3 vext2 <5,3,u,0>, <5,3,u,0>
+ 2220237489U, // <u,0,5,4>: Cost 3 vrev <0,u,4,5>
+ 2651680772U, // <u,0,5,5>: Cost 3 vext2 <5,6,u,0>, <5,5,5,5>
+ 1577939051U, // <u,0,5,6>: Cost 2 vext2 <5,6,u,0>, <5,6,u,0>
+ 2830077238U, // <u,0,5,7>: Cost 3 vuzpr <1,u,3,0>, RHS
+ 1579266317U, // <u,0,5,u>: Cost 2 vext2 <5,u,u,0>, <5,u,u,0>
+ 2555494502U, // <u,0,6,0>: Cost 3 vext1 <0,u,0,6>, LHS
+ 2712068598U, // <u,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
+ 1997750374U, // <u,0,6,2>: Cost 2 vtrnl RHS, LHS
+ 2655662673U, // <u,0,6,3>: Cost 3 vext2 <6,3,u,0>, <6,3,u,0>
+ 2555497782U, // <u,0,6,4>: Cost 3 vext1 <0,u,0,6>, RHS
+ 2651681459U, // <u,0,6,5>: Cost 3 vext2 <5,6,u,0>, <6,5,0,u>
+ 2651681592U, // <u,0,6,6>: Cost 3 vext2 <5,6,u,0>, <6,6,6,6>
+ 2651681614U, // <u,0,6,7>: Cost 3 vext2 <5,6,u,0>, <6,7,0,1>
+ 1997750428U, // <u,0,6,u>: Cost 2 vtrnl RHS, LHS
+ 2567446630U, // <u,0,7,0>: Cost 3 vext1 <2,u,0,7>, LHS
+ 2567447446U, // <u,0,7,1>: Cost 3 vext1 <2,u,0,7>, <1,2,3,0>
+ 2567448641U, // <u,0,7,2>: Cost 3 vext1 <2,u,0,7>, <2,u,0,7>
+ 2573421338U, // <u,0,7,3>: Cost 3 vext1 <3,u,0,7>, <3,u,0,7>
+ 2567449910U, // <u,0,7,4>: Cost 3 vext1 <2,u,0,7>, RHS
+ 2651682242U, // <u,0,7,5>: Cost 3 vext2 <5,6,u,0>, <7,5,6,u>
+ 2591339429U, // <u,0,7,6>: Cost 3 vext1 <6,u,0,7>, <6,u,0,7>
+ 2651682412U, // <u,0,7,7>: Cost 3 vext2 <5,6,u,0>, <7,7,7,7>
+ 2567452462U, // <u,0,7,u>: Cost 3 vext1 <2,u,0,7>, LHS
+ 135053414U, // <u,0,u,0>: Cost 1 vdup0 LHS
+ 1611489938U, // <u,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
+ 537748125U, // <u,0,u,2>: Cost 1 vext3 LHS, LHS
+ 2685674148U, // <u,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
+ 1611932338U, // <u,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
+ 1551399066U, // <u,0,u,5>: Cost 2 vext2 <1,2,u,0>, RHS
+ 1517605798U, // <u,0,u,6>: Cost 2 vext1 <6,u,0,u>, <6,u,0,u>
+ 2830077481U, // <u,0,u,7>: Cost 3 vuzpr <1,u,3,0>, RHS
+ 537748179U, // <u,0,u,u>: Cost 1 vext3 LHS, LHS
+ 1544101961U, // <u,1,0,0>: Cost 2 vext2 <0,0,u,1>, <0,0,u,1>
+ 1558036582U, // <u,1,0,1>: Cost 2 vext2 <2,3,u,1>, LHS
+ 2619171051U, // <u,1,0,2>: Cost 3 vext2 <0,2,u,1>, <0,2,u,1>
+ 1611490038U, // <u,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
+ 2555522358U, // <u,1,0,4>: Cost 3 vext1 <0,u,1,0>, RHS
+ 2712068871U, // <u,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
+ 2591355815U, // <u,1,0,6>: Cost 3 vext1 <6,u,1,0>, <6,u,1,0>
+ 2597328512U, // <u,1,0,7>: Cost 3 vext1 <7,u,1,0>, <7,u,1,0>
+ 1611490083U, // <u,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
+ 1481785446U, // <u,1,1,0>: Cost 2 vext1 <0,u,1,1>, LHS
+ 202162278U, // <u,1,1,1>: Cost 1 vdup1 LHS
+ 2555528808U, // <u,1,1,2>: Cost 3 vext1 <0,u,1,1>, <2,2,2,2>
+ 1611490120U, // <u,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
+ 1481788726U, // <u,1,1,4>: Cost 2 vext1 <0,u,1,1>, RHS
+ 2689876828U, // <u,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
+ 2591364008U, // <u,1,1,6>: Cost 3 vext1 <6,u,1,1>, <6,u,1,1>
+ 2592691274U, // <u,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
+ 202162278U, // <u,1,1,u>: Cost 1 vdup1 LHS
+ 1499709542U, // <u,1,2,0>: Cost 2 vext1 <3,u,1,2>, LHS
+ 2689876871U, // <u,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
+ 2631116445U, // <u,1,2,2>: Cost 3 vext2 <2,2,u,1>, <2,2,u,1>
+ 835584U, // <u,1,2,3>: Cost 0 copy LHS
+ 1499712822U, // <u,1,2,4>: Cost 2 vext1 <3,u,1,2>, RHS
+ 2689876907U, // <u,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
+ 2631780282U, // <u,1,2,6>: Cost 3 vext2 <2,3,u,1>, <2,6,3,7>
+ 1523603074U, // <u,1,2,7>: Cost 2 vext1 <7,u,1,2>, <7,u,1,2>
+ 835584U, // <u,1,2,u>: Cost 0 copy LHS
+ 1487773798U, // <u,1,3,0>: Cost 2 vext1 <1,u,1,3>, LHS
+ 1611490264U, // <u,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
+ 2685232094U, // <u,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
+ 2018746470U, // <u,1,3,3>: Cost 2 vtrnr LHS, LHS
+ 1487777078U, // <u,1,3,4>: Cost 2 vext1 <1,u,1,3>, RHS
+ 1611490304U, // <u,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
+ 2685674505U, // <u,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
+ 2640407307U, // <u,1,3,7>: Cost 3 vext2 <3,7,u,1>, <3,7,u,1>
+ 1611490327U, // <u,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
+ 1567992749U, // <u,1,4,0>: Cost 2 vext2 <4,0,u,1>, <4,0,u,1>
+ 2693121070U, // <u,1,4,1>: Cost 3 vext3 <1,4,1,u>, <1,4,1,u>
+ 2693194807U, // <u,1,4,2>: Cost 3 vext3 <1,4,2,u>, <1,4,2,u>
+ 1152386432U, // <u,1,4,3>: Cost 2 vrev <1,u,3,4>
+ 2555555126U, // <u,1,4,4>: Cost 3 vext1 <0,u,1,4>, RHS
+ 1558039862U, // <u,1,4,5>: Cost 2 vext2 <2,3,u,1>, RHS
+ 2645716371U, // <u,1,4,6>: Cost 3 vext2 <4,6,u,1>, <4,6,u,1>
+ 2597361284U, // <u,1,4,7>: Cost 3 vext1 <7,u,1,4>, <7,u,1,4>
+ 1152755117U, // <u,1,4,u>: Cost 2 vrev <1,u,u,4>
+ 1481818214U, // <u,1,5,0>: Cost 2 vext1 <0,u,1,5>, LHS
+ 2555560694U, // <u,1,5,1>: Cost 3 vext1 <0,u,1,5>, <1,0,3,2>
+ 2555561576U, // <u,1,5,2>: Cost 3 vext1 <0,u,1,5>, <2,2,2,2>
+ 1611490448U, // <u,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
+ 1481821494U, // <u,1,5,4>: Cost 2 vext1 <0,u,1,5>, RHS
+ 2651025435U, // <u,1,5,5>: Cost 3 vext2 <5,5,u,1>, <5,5,u,1>
+ 2651689068U, // <u,1,5,6>: Cost 3 vext2 <5,6,u,1>, <5,6,u,1>
+ 2823966006U, // <u,1,5,7>: Cost 3 vuzpr <0,u,1,1>, RHS
+ 1611932861U, // <u,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
+ 2555568230U, // <u,1,6,0>: Cost 3 vext1 <0,u,1,6>, LHS
+ 2689877199U, // <u,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
+ 2712069336U, // <u,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
+ 2685232353U, // <u,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
+ 2555571510U, // <u,1,6,4>: Cost 3 vext1 <0,u,1,6>, RHS
+ 2689877235U, // <u,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
+ 2657661765U, // <u,1,6,6>: Cost 3 vext2 <6,6,u,1>, <6,6,u,1>
+ 1584583574U, // <u,1,6,7>: Cost 2 vext2 <6,7,u,1>, <6,7,u,1>
+ 1585247207U, // <u,1,6,u>: Cost 2 vext2 <6,u,u,1>, <6,u,u,1>
+ 2561548390U, // <u,1,7,0>: Cost 3 vext1 <1,u,1,7>, LHS
+ 2561549681U, // <u,1,7,1>: Cost 3 vext1 <1,u,1,7>, <1,u,1,7>
+ 2573493926U, // <u,1,7,2>: Cost 3 vext1 <3,u,1,7>, <2,3,0,1>
+ 2042962022U, // <u,1,7,3>: Cost 2 vtrnr RHS, LHS
+ 2561551670U, // <u,1,7,4>: Cost 3 vext1 <1,u,1,7>, RHS
+ 2226300309U, // <u,1,7,5>: Cost 3 vrev <1,u,5,7>
+ 2658325990U, // <u,1,7,6>: Cost 3 vext2 <6,7,u,1>, <7,6,1,u>
+ 2658326124U, // <u,1,7,7>: Cost 3 vext2 <6,7,u,1>, <7,7,7,7>
+ 2042962027U, // <u,1,7,u>: Cost 2 vtrnr RHS, LHS
+ 1481842790U, // <u,1,u,0>: Cost 2 vext1 <0,u,1,u>, LHS
+ 202162278U, // <u,1,u,1>: Cost 1 vdup1 LHS
+ 2685674867U, // <u,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
+ 835584U, // <u,1,u,3>: Cost 0 copy LHS
+ 1481846070U, // <u,1,u,4>: Cost 2 vext1 <0,u,1,u>, RHS
+ 1611933077U, // <u,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
+ 2685674910U, // <u,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
+ 1523652232U, // <u,1,u,7>: Cost 2 vext1 <7,u,1,u>, <7,u,1,u>
+ 835584U, // <u,1,u,u>: Cost 0 copy LHS
+ 1544110154U, // <u,2,0,0>: Cost 2 vext2 <0,0,u,2>, <0,0,u,2>
+ 1545437286U, // <u,2,0,1>: Cost 2 vext2 <0,2,u,2>, LHS
+ 1545437420U, // <u,2,0,2>: Cost 2 vext2 <0,2,u,2>, <0,2,u,2>
+ 2685232589U, // <u,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
+ 2619179346U, // <u,2,0,4>: Cost 3 vext2 <0,2,u,2>, <0,4,1,5>
+ 2712069606U, // <u,2,0,5>: Cost 3 vext3 RHS, <2,0,5,7>
+ 2689877484U, // <u,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
+ 2659656273U, // <u,2,0,7>: Cost 3 vext2 <7,0,u,2>, <0,7,2,u>
+ 1545437853U, // <u,2,0,u>: Cost 2 vext2 <0,2,u,2>, LHS
+ 1550082851U, // <u,2,1,0>: Cost 2 vext2 <1,0,u,2>, <1,0,u,2>
+ 2619179828U, // <u,2,1,1>: Cost 3 vext2 <0,2,u,2>, <1,1,1,1>
+ 2619179926U, // <u,2,1,2>: Cost 3 vext2 <0,2,u,2>, <1,2,3,0>
+ 2685232671U, // <u,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
+ 2555604278U, // <u,2,1,4>: Cost 3 vext1 <0,u,2,1>, RHS
+ 2619180176U, // <u,2,1,5>: Cost 3 vext2 <0,2,u,2>, <1,5,3,7>
+ 2689877564U, // <u,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
+ 2602718850U, // <u,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
+ 1158703235U, // <u,2,1,u>: Cost 2 vrev <2,u,u,1>
+ 1481867366U, // <u,2,2,0>: Cost 2 vext1 <0,u,2,2>, LHS
+ 2555609846U, // <u,2,2,1>: Cost 3 vext1 <0,u,2,2>, <1,0,3,2>
+ 269271142U, // <u,2,2,2>: Cost 1 vdup2 LHS
+ 1611490930U, // <u,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
+ 1481870646U, // <u,2,2,4>: Cost 2 vext1 <0,u,2,2>, RHS
+ 2689877640U, // <u,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
+ 2619180986U, // <u,2,2,6>: Cost 3 vext2 <0,2,u,2>, <2,6,3,7>
+ 2593436837U, // <u,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
+ 269271142U, // <u,2,2,u>: Cost 1 vdup2 LHS
+ 408134301U, // <u,2,3,0>: Cost 1 vext1 LHS, LHS
+ 1481876214U, // <u,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 1481877096U, // <u,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
+ 1880326246U, // <u,2,3,3>: Cost 2 vzipr LHS, LHS
+ 408137014U, // <u,2,3,4>: Cost 1 vext1 LHS, RHS
+ 1529654992U, // <u,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
+ 1529655802U, // <u,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1529656314U, // <u,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 408139566U, // <u,2,3,u>: Cost 1 vext1 LHS, LHS
+ 1567853468U, // <u,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
+ 2561598362U, // <u,2,4,1>: Cost 3 vext1 <1,u,2,4>, <1,2,3,4>
+ 2555627214U, // <u,2,4,2>: Cost 3 vext1 <0,u,2,4>, <2,3,4,5>
+ 2685232918U, // <u,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
+ 2555628854U, // <u,2,4,4>: Cost 3 vext1 <0,u,2,4>, RHS
+ 1545440566U, // <u,2,4,5>: Cost 2 vext2 <0,2,u,2>, RHS
+ 1571982740U, // <u,2,4,6>: Cost 2 vext2 <4,6,u,2>, <4,6,u,2>
+ 2592125957U, // <u,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
+ 1545440809U, // <u,2,4,u>: Cost 2 vext2 <0,2,u,2>, RHS
+ 2555633766U, // <u,2,5,0>: Cost 3 vext1 <0,u,2,5>, LHS
+ 2561606550U, // <u,2,5,1>: Cost 3 vext1 <1,u,2,5>, <1,2,3,0>
+ 2689877856U, // <u,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
+ 2685233000U, // <u,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
+ 1158441059U, // <u,2,5,4>: Cost 2 vrev <2,u,4,5>
+ 2645725188U, // <u,2,5,5>: Cost 3 vext2 <4,6,u,2>, <5,5,5,5>
+ 2689877892U, // <u,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
+ 2823900470U, // <u,2,5,7>: Cost 3 vuzpr <0,u,0,2>, RHS
+ 1158736007U, // <u,2,5,u>: Cost 2 vrev <2,u,u,5>
+ 1481900134U, // <u,2,6,0>: Cost 2 vext1 <0,u,2,6>, LHS
+ 2555642614U, // <u,2,6,1>: Cost 3 vext1 <0,u,2,6>, <1,0,3,2>
+ 2555643496U, // <u,2,6,2>: Cost 3 vext1 <0,u,2,6>, <2,2,2,2>
+ 1611491258U, // <u,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
+ 1481903414U, // <u,2,6,4>: Cost 2 vext1 <0,u,2,6>, RHS
+ 2689877964U, // <u,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
+ 2689877973U, // <u,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
+ 2645726030U, // <u,2,6,7>: Cost 3 vext2 <4,6,u,2>, <6,7,0,1>
+ 1611933671U, // <u,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
+ 1585919033U, // <u,2,7,0>: Cost 2 vext2 <7,0,u,2>, <7,0,u,2>
+ 2573566710U, // <u,2,7,1>: Cost 3 vext1 <3,u,2,7>, <1,0,3,2>
+ 2567596115U, // <u,2,7,2>: Cost 3 vext1 <2,u,2,7>, <2,u,2,7>
+ 1906901094U, // <u,2,7,3>: Cost 2 vzipr RHS, LHS
+ 2555653430U, // <u,2,7,4>: Cost 3 vext1 <0,u,2,7>, RHS
+ 2800080230U, // <u,2,7,5>: Cost 3 vuzpl LHS, <7,4,5,6>
+ 2980643164U, // <u,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
+ 2645726828U, // <u,2,7,7>: Cost 3 vext2 <4,6,u,2>, <7,7,7,7>
+ 1906901099U, // <u,2,7,u>: Cost 2 vzipr RHS, LHS
+ 408175266U, // <u,2,u,0>: Cost 1 vext1 LHS, LHS
+ 1545443118U, // <u,2,u,1>: Cost 2 vext2 <0,2,u,2>, LHS
+ 269271142U, // <u,2,u,2>: Cost 1 vdup2 LHS
+ 1611491416U, // <u,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
+ 408177974U, // <u,2,u,4>: Cost 1 vext1 LHS, RHS
+ 1545443482U, // <u,2,u,5>: Cost 2 vext2 <0,2,u,2>, RHS
+ 1726339226U, // <u,2,u,6>: Cost 2 vuzpl LHS, RHS
+ 1529697274U, // <u,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 408180526U, // <u,2,u,u>: Cost 1 vext1 LHS, LHS
+ 1544781824U, // <u,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
+ 471040156U, // <u,3,0,1>: Cost 1 vext2 LHS, LHS
+ 1544781988U, // <u,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 2618523900U, // <u,3,0,3>: Cost 3 vext2 LHS, <0,3,1,0>
+ 1544782162U, // <u,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 2238188352U, // <u,3,0,5>: Cost 3 vrev <3,u,5,0>
+ 2623169023U, // <u,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
+ 2238335826U, // <u,3,0,7>: Cost 3 vrev <3,u,7,0>
+ 471040669U, // <u,3,0,u>: Cost 1 vext2 LHS, LHS
+ 1544782582U, // <u,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 1544782644U, // <u,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
+ 1544782742U, // <u,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
+ 1544782808U, // <u,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 2618524733U, // <u,3,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
+ 1544782992U, // <u,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 2618524897U, // <u,3,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
+ 2703517987U, // <u,3,1,7>: Cost 3 vext3 <3,1,7,u>, <3,1,7,u>
+ 1544783213U, // <u,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
+ 1529716838U, // <u,3,2,0>: Cost 2 vext1 <u,u,3,2>, LHS
+ 1164167966U, // <u,3,2,1>: Cost 2 vrev <3,u,1,2>
+ 1544783464U, // <u,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
+ 1544783526U, // <u,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
+ 1529720118U, // <u,3,2,4>: Cost 2 vext1 <u,u,3,2>, RHS
+ 2618525544U, // <u,3,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
+ 1544783802U, // <u,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 2704181620U, // <u,3,2,7>: Cost 3 vext3 <3,2,7,u>, <3,2,7,u>
+ 1544783931U, // <u,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
+ 1544784022U, // <u,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
+ 1487922559U, // <u,3,3,1>: Cost 2 vext1 <1,u,3,3>, <1,u,3,3>
+ 1493895256U, // <u,3,3,2>: Cost 2 vext1 <2,u,3,3>, <2,u,3,3>
+ 336380006U, // <u,3,3,3>: Cost 1 vdup3 LHS
+ 1544784386U, // <u,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
+ 2824054478U, // <u,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
+ 2238286668U, // <u,3,3,6>: Cost 3 vrev <3,u,6,3>
+ 2954069136U, // <u,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
+ 336380006U, // <u,3,3,u>: Cost 1 vdup3 LHS
+ 1487929446U, // <u,3,4,0>: Cost 2 vext1 <1,u,3,4>, LHS
+ 1487930752U, // <u,3,4,1>: Cost 2 vext1 <1,u,3,4>, <1,u,3,4>
+ 2623171644U, // <u,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
+ 2561673366U, // <u,3,4,3>: Cost 3 vext1 <1,u,3,4>, <3,0,1,2>
+ 1487932726U, // <u,3,4,4>: Cost 2 vext1 <1,u,3,4>, RHS
+ 471043382U, // <u,3,4,5>: Cost 1 vext2 LHS, RHS
+ 1592561012U, // <u,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
+ 2238368598U, // <u,3,4,7>: Cost 3 vrev <3,u,7,4>
+ 471043625U, // <u,3,4,u>: Cost 1 vext2 LHS, RHS
+ 2555707494U, // <u,3,5,0>: Cost 3 vext1 <0,u,3,5>, LHS
+ 1574645465U, // <u,3,5,1>: Cost 2 vext2 <5,1,u,3>, <5,1,u,3>
+ 2567653106U, // <u,3,5,2>: Cost 3 vext1 <2,u,3,5>, <2,3,u,5>
+ 2555709954U, // <u,3,5,3>: Cost 3 vext1 <0,u,3,5>, <3,4,5,6>
+ 1592561606U, // <u,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
+ 1592561668U, // <u,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
+ 1592561762U, // <u,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
+ 1750314294U, // <u,3,5,7>: Cost 2 vuzpr LHS, RHS
+ 1750314295U, // <u,3,5,u>: Cost 2 vuzpr LHS, RHS
+ 2623172897U, // <u,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
+ 2561688962U, // <u,3,6,1>: Cost 3 vext1 <1,u,3,6>, <1,u,3,6>
+ 1581281795U, // <u,3,6,2>: Cost 2 vext2 <6,2,u,3>, <6,2,u,3>
+ 2706541204U, // <u,3,6,3>: Cost 3 vext3 <3,6,3,u>, <3,6,3,u>
+ 2623173261U, // <u,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
+ 1164495686U, // <u,3,6,5>: Cost 2 vrev <3,u,5,6>
+ 1592562488U, // <u,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
+ 1592562510U, // <u,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
+ 1164716897U, // <u,3,6,u>: Cost 2 vrev <3,u,u,6>
+ 1487954022U, // <u,3,7,0>: Cost 2 vext1 <1,u,3,7>, LHS
+ 1487955331U, // <u,3,7,1>: Cost 2 vext1 <1,u,3,7>, <1,u,3,7>
+ 1493928028U, // <u,3,7,2>: Cost 2 vext1 <2,u,3,7>, <2,u,3,7>
+ 2561697942U, // <u,3,7,3>: Cost 3 vext1 <1,u,3,7>, <3,0,1,2>
+ 1487957302U, // <u,3,7,4>: Cost 2 vext1 <1,u,3,7>, RHS
+ 2707352311U, // <u,3,7,5>: Cost 3 vext3 <3,7,5,u>, <3,7,5,u>
+ 2655024623U, // <u,3,7,6>: Cost 3 vext2 <6,2,u,3>, <7,6,2,u>
+ 1592563308U, // <u,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
+ 1487959854U, // <u,3,7,u>: Cost 2 vext1 <1,u,3,7>, LHS
+ 1544787667U, // <u,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
+ 471045934U, // <u,3,u,1>: Cost 1 vext2 LHS, LHS
+ 1549432709U, // <u,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
+ 336380006U, // <u,3,u,3>: Cost 1 vdup3 LHS
+ 1544788031U, // <u,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
+ 471046298U, // <u,3,u,5>: Cost 1 vext2 LHS, RHS
+ 1549433040U, // <u,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
+ 1750314537U, // <u,3,u,7>: Cost 2 vuzpr LHS, RHS
+ 471046501U, // <u,3,u,u>: Cost 1 vext2 LHS, LHS
+ 2625167360U, // <u,4,0,0>: Cost 3 vext2 <1,2,u,4>, <0,0,0,0>
+ 1551425638U, // <u,4,0,1>: Cost 2 vext2 <1,2,u,4>, LHS
+ 2619195630U, // <u,4,0,2>: Cost 3 vext2 <0,2,u,4>, <0,2,u,4>
+ 2619343104U, // <u,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
+ 2625167698U, // <u,4,0,4>: Cost 3 vext2 <1,2,u,4>, <0,4,1,5>
+ 1638329234U, // <u,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
+ 1638329244U, // <u,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
+ 3787803556U, // <u,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
+ 1551426205U, // <u,4,0,u>: Cost 2 vext2 <1,2,u,4>, LHS
+ 2555748454U, // <u,4,1,0>: Cost 3 vext1 <0,u,4,1>, LHS
+ 2625168180U, // <u,4,1,1>: Cost 3 vext2 <1,2,u,4>, <1,1,1,1>
+ 1551426503U, // <u,4,1,2>: Cost 2 vext2 <1,2,u,4>, <1,2,u,4>
+ 2625168344U, // <u,4,1,3>: Cost 3 vext2 <1,2,u,4>, <1,3,1,3>
+ 2555751734U, // <u,4,1,4>: Cost 3 vext1 <0,u,4,1>, RHS
+ 1860554038U, // <u,4,1,5>: Cost 2 vzipl LHS, RHS
+ 2689879022U, // <u,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
+ 2592248852U, // <u,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
+ 1555408301U, // <u,4,1,u>: Cost 2 vext2 <1,u,u,4>, <1,u,u,4>
+ 2555756646U, // <u,4,2,0>: Cost 3 vext1 <0,u,4,2>, LHS
+ 2625168943U, // <u,4,2,1>: Cost 3 vext2 <1,2,u,4>, <2,1,4,u>
+ 2625169000U, // <u,4,2,2>: Cost 3 vext2 <1,2,u,4>, <2,2,2,2>
+ 2619197134U, // <u,4,2,3>: Cost 3 vext2 <0,2,u,4>, <2,3,4,5>
+ 2555759926U, // <u,4,2,4>: Cost 3 vext1 <0,u,4,2>, RHS
+ 2712071222U, // <u,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
+ 1994771766U, // <u,4,2,6>: Cost 2 vtrnl LHS, RHS
+ 2592257045U, // <u,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
+ 1994771784U, // <u,4,2,u>: Cost 2 vtrnl LHS, RHS
+ 2625169558U, // <u,4,3,0>: Cost 3 vext2 <1,2,u,4>, <3,0,1,2>
+ 2567709594U, // <u,4,3,1>: Cost 3 vext1 <2,u,4,3>, <1,2,3,4>
+ 2567710817U, // <u,4,3,2>: Cost 3 vext1 <2,u,4,3>, <2,u,4,3>
+ 2625169820U, // <u,4,3,3>: Cost 3 vext2 <1,2,u,4>, <3,3,3,3>
+ 2625169922U, // <u,4,3,4>: Cost 3 vext2 <1,2,u,4>, <3,4,5,6>
+ 2954069710U, // <u,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
+ 2954068172U, // <u,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
+ 3903849472U, // <u,4,3,7>: Cost 4 vuzpr <1,u,3,4>, <1,3,5,7>
+ 2954068174U, // <u,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
+ 1505919078U, // <u,4,4,0>: Cost 2 vext1 <4,u,4,4>, LHS
+ 2567717831U, // <u,4,4,1>: Cost 3 vext1 <2,u,4,4>, <1,2,u,4>
+ 2567719010U, // <u,4,4,2>: Cost 3 vext1 <2,u,4,4>, <2,u,4,4>
+ 2570373542U, // <u,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
+ 161926454U, // <u,4,4,4>: Cost 1 vdup0 RHS
+ 1551428918U, // <u,4,4,5>: Cost 2 vext2 <1,2,u,4>, RHS
+ 1638329572U, // <u,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
+ 2594927963U, // <u,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
+ 161926454U, // <u,4,4,u>: Cost 1 vdup0 RHS
+ 1493983334U, // <u,4,5,0>: Cost 2 vext1 <2,u,4,5>, LHS
+ 2689879301U, // <u,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
+ 1493985379U, // <u,4,5,2>: Cost 2 vext1 <2,u,4,5>, <2,u,4,5>
+ 2567727254U, // <u,4,5,3>: Cost 3 vext1 <2,u,4,5>, <3,0,1,2>
+ 1493986614U, // <u,4,5,4>: Cost 2 vext1 <2,u,4,5>, RHS
+ 1863535926U, // <u,4,5,5>: Cost 2 vzipl RHS, RHS
+ 537750838U, // <u,4,5,6>: Cost 1 vext3 LHS, RHS
+ 2830110006U, // <u,4,5,7>: Cost 3 vuzpr <1,u,3,4>, RHS
+ 537750856U, // <u,4,5,u>: Cost 1 vext3 LHS, RHS
+ 1482047590U, // <u,4,6,0>: Cost 2 vext1 <0,u,4,6>, LHS
+ 2555790070U, // <u,4,6,1>: Cost 3 vext1 <0,u,4,6>, <1,0,3,2>
+ 2555790952U, // <u,4,6,2>: Cost 3 vext1 <0,u,4,6>, <2,2,2,2>
+ 2555791510U, // <u,4,6,3>: Cost 3 vext1 <0,u,4,6>, <3,0,1,2>
+ 1482050870U, // <u,4,6,4>: Cost 2 vext1 <0,u,4,6>, RHS
+ 2689879422U, // <u,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
+ 1997753654U, // <u,4,6,6>: Cost 2 vtrnl RHS, RHS
+ 2712071562U, // <u,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
+ 1482053422U, // <u,4,6,u>: Cost 2 vext1 <0,u,4,6>, LHS
+ 2567741542U, // <u,4,7,0>: Cost 3 vext1 <2,u,4,7>, LHS
+ 2567742362U, // <u,4,7,1>: Cost 3 vext1 <2,u,4,7>, <1,2,3,4>
+ 2567743589U, // <u,4,7,2>: Cost 3 vext1 <2,u,4,7>, <2,u,4,7>
+ 2573716286U, // <u,4,7,3>: Cost 3 vext1 <3,u,4,7>, <3,u,4,7>
+ 2567744822U, // <u,4,7,4>: Cost 3 vext1 <2,u,4,7>, RHS
+ 2712071624U, // <u,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
+ 96808489U, // <u,4,7,6>: Cost 1 vrev RHS
+ 2651715180U, // <u,4,7,7>: Cost 3 vext2 <5,6,u,4>, <7,7,7,7>
+ 96955963U, // <u,4,7,u>: Cost 1 vrev RHS
+ 1482063974U, // <u,4,u,0>: Cost 2 vext1 <0,u,4,u>, LHS
+ 1551431470U, // <u,4,u,1>: Cost 2 vext2 <1,2,u,4>, LHS
+ 1494009958U, // <u,4,u,2>: Cost 2 vext1 <2,u,4,u>, <2,u,4,u>
+ 2555807894U, // <u,4,u,3>: Cost 3 vext1 <0,u,4,u>, <3,0,1,2>
+ 161926454U, // <u,4,u,4>: Cost 1 vdup0 RHS
+ 1551431834U, // <u,4,u,5>: Cost 2 vext2 <1,2,u,4>, RHS
+ 537751081U, // <u,4,u,6>: Cost 1 vext3 LHS, RHS
+ 2830110249U, // <u,4,u,7>: Cost 3 vuzpr <1,u,3,4>, RHS
+ 537751099U, // <u,4,u,u>: Cost 1 vext3 LHS, RHS
+ 2631811072U, // <u,5,0,0>: Cost 3 vext2 <2,3,u,5>, <0,0,0,0>
+ 1558069350U, // <u,5,0,1>: Cost 2 vext2 <2,3,u,5>, LHS
+ 2619203823U, // <u,5,0,2>: Cost 3 vext2 <0,2,u,5>, <0,2,u,5>
+ 2619867456U, // <u,5,0,3>: Cost 3 vext2 <0,3,u,5>, <0,3,u,5>
+ 1546273106U, // <u,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
+ 2733010539U, // <u,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
+ 2597622682U, // <u,5,0,6>: Cost 3 vext1 <7,u,5,0>, <6,7,u,5>
+ 1176539396U, // <u,5,0,7>: Cost 2 vrev <5,u,7,0>
+ 1558069917U, // <u,5,0,u>: Cost 2 vext2 <2,3,u,5>, LHS
+ 1505968230U, // <u,5,1,0>: Cost 2 vext1 <4,u,5,1>, LHS
+ 2624512887U, // <u,5,1,1>: Cost 3 vext2 <1,1,u,5>, <1,1,u,5>
+ 2631811990U, // <u,5,1,2>: Cost 3 vext2 <2,3,u,5>, <1,2,3,0>
+ 2618541056U, // <u,5,1,3>: Cost 3 vext2 <0,1,u,5>, <1,3,5,7>
+ 1505971510U, // <u,5,1,4>: Cost 2 vext1 <4,u,5,1>, RHS
+ 2627167419U, // <u,5,1,5>: Cost 3 vext2 <1,5,u,5>, <1,5,u,5>
+ 2579714554U, // <u,5,1,6>: Cost 3 vext1 <4,u,5,1>, <6,2,7,3>
+ 1638330064U, // <u,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
+ 1638477529U, // <u,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
+ 2561802342U, // <u,5,2,0>: Cost 3 vext1 <1,u,5,2>, LHS
+ 2561803264U, // <u,5,2,1>: Cost 3 vext1 <1,u,5,2>, <1,3,5,7>
+ 2631149217U, // <u,5,2,2>: Cost 3 vext2 <2,2,u,5>, <2,2,u,5>
+ 1558071026U, // <u,5,2,3>: Cost 2 vext2 <2,3,u,5>, <2,3,u,5>
+ 2561805622U, // <u,5,2,4>: Cost 3 vext1 <1,u,5,2>, RHS
+ 2714062607U, // <u,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
+ 2631813050U, // <u,5,2,6>: Cost 3 vext2 <2,3,u,5>, <2,6,3,7>
+ 3092335926U, // <u,5,2,7>: Cost 3 vtrnr <0,u,0,2>, RHS
+ 1561389191U, // <u,5,2,u>: Cost 2 vext2 <2,u,u,5>, <2,u,u,5>
+ 2561810534U, // <u,5,3,0>: Cost 3 vext1 <1,u,5,3>, LHS
+ 2561811857U, // <u,5,3,1>: Cost 3 vext1 <1,u,5,3>, <1,u,5,3>
+ 2631813474U, // <u,5,3,2>: Cost 3 vext2 <2,3,u,5>, <3,2,5,u>
+ 2631813532U, // <u,5,3,3>: Cost 3 vext2 <2,3,u,5>, <3,3,3,3>
+ 2619869698U, // <u,5,3,4>: Cost 3 vext2 <0,3,u,5>, <3,4,5,6>
+ 3001847002U, // <u,5,3,5>: Cost 3 vzipr LHS, <4,4,5,5>
+ 2954070530U, // <u,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 2018749750U, // <u,5,3,7>: Cost 2 vtrnr LHS, RHS
+ 2018749751U, // <u,5,3,u>: Cost 2 vtrnr LHS, RHS
+ 2573762662U, // <u,5,4,0>: Cost 3 vext1 <3,u,5,4>, LHS
+ 2620017634U, // <u,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
+ 2573764338U, // <u,5,4,2>: Cost 3 vext1 <3,u,5,4>, <2,3,u,5>
+ 2573765444U, // <u,5,4,3>: Cost 3 vext1 <3,u,5,4>, <3,u,5,4>
+ 1570680053U, // <u,5,4,4>: Cost 2 vext2 <4,4,u,5>, <4,4,u,5>
+ 1558072630U, // <u,5,4,5>: Cost 2 vext2 <2,3,u,5>, RHS
+ 2645749143U, // <u,5,4,6>: Cost 3 vext2 <4,6,u,5>, <4,6,u,5>
+ 1638330310U, // <u,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
+ 1558072873U, // <u,5,4,u>: Cost 2 vext2 <2,3,u,5>, RHS
+ 1506000998U, // <u,5,5,0>: Cost 2 vext1 <4,u,5,5>, LHS
+ 2561827984U, // <u,5,5,1>: Cost 3 vext1 <1,u,5,5>, <1,5,3,7>
+ 2579744360U, // <u,5,5,2>: Cost 3 vext1 <4,u,5,5>, <2,2,2,2>
+ 2579744918U, // <u,5,5,3>: Cost 3 vext1 <4,u,5,5>, <3,0,1,2>
+ 1506004278U, // <u,5,5,4>: Cost 2 vext1 <4,u,5,5>, RHS
+ 229035318U, // <u,5,5,5>: Cost 1 vdup1 RHS
+ 2712072206U, // <u,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
+ 1638330392U, // <u,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
+ 229035318U, // <u,5,5,u>: Cost 1 vdup1 RHS
+ 1500037222U, // <u,5,6,0>: Cost 2 vext1 <3,u,5,6>, LHS
+ 2561836436U, // <u,5,6,1>: Cost 3 vext1 <1,u,5,6>, <1,u,5,6>
+ 2567809133U, // <u,5,6,2>: Cost 3 vext1 <2,u,5,6>, <2,u,5,6>
+ 1500040006U, // <u,5,6,3>: Cost 2 vext1 <3,u,5,6>, <3,u,5,6>
+ 1500040502U, // <u,5,6,4>: Cost 2 vext1 <3,u,5,6>, RHS
+ 2714062935U, // <u,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
+ 2712072288U, // <u,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
+ 27705344U, // <u,5,6,7>: Cost 0 copy RHS
+ 27705344U, // <u,5,6,u>: Cost 0 copy RHS
+ 1488101478U, // <u,5,7,0>: Cost 2 vext1 <1,u,5,7>, LHS
+ 1488102805U, // <u,5,7,1>: Cost 2 vext1 <1,u,5,7>, <1,u,5,7>
+ 2561844840U, // <u,5,7,2>: Cost 3 vext1 <1,u,5,7>, <2,2,2,2>
+ 2561845398U, // <u,5,7,3>: Cost 3 vext1 <1,u,5,7>, <3,0,1,2>
+ 1488104758U, // <u,5,7,4>: Cost 2 vext1 <1,u,5,7>, RHS
+ 1638330536U, // <u,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
+ 2712072362U, // <u,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
+ 2042965302U, // <u,5,7,7>: Cost 2 vtrnr RHS, RHS
+ 1488107310U, // <u,5,7,u>: Cost 2 vext1 <1,u,5,7>, LHS
+ 1488109670U, // <u,5,u,0>: Cost 2 vext1 <1,u,5,u>, LHS
+ 1488110998U, // <u,5,u,1>: Cost 2 vext1 <1,u,5,u>, <1,u,5,u>
+ 2561853032U, // <u,5,u,2>: Cost 3 vext1 <1,u,5,u>, <2,2,2,2>
+ 1500056392U, // <u,5,u,3>: Cost 2 vext1 <3,u,5,u>, <3,u,5,u>
+ 1488112950U, // <u,5,u,4>: Cost 2 vext1 <1,u,5,u>, RHS
+ 229035318U, // <u,5,u,5>: Cost 1 vdup1 RHS
+ 2954111490U, // <u,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 27705344U, // <u,5,u,7>: Cost 0 copy RHS
+ 27705344U, // <u,5,u,u>: Cost 0 copy RHS
+ 2619211776U, // <u,6,0,0>: Cost 3 vext2 <0,2,u,6>, <0,0,0,0>
+ 1545470054U, // <u,6,0,1>: Cost 2 vext2 <0,2,u,6>, LHS
+ 1545470192U, // <u,6,0,2>: Cost 2 vext2 <0,2,u,6>, <0,2,u,6>
+ 2255958969U, // <u,6,0,3>: Cost 3 vrev <6,u,3,0>
+ 1546797458U, // <u,6,0,4>: Cost 2 vext2 <0,4,u,6>, <0,4,u,6>
+ 2720624971U, // <u,6,0,5>: Cost 3 vext3 <6,0,5,u>, <6,0,5,u>
+ 2256180180U, // <u,6,0,6>: Cost 3 vrev <6,u,6,0>
+ 2960682294U, // <u,6,0,7>: Cost 3 vzipr <1,2,u,0>, RHS
+ 1545470621U, // <u,6,0,u>: Cost 2 vext2 <0,2,u,6>, LHS
+ 1182004127U, // <u,6,1,0>: Cost 2 vrev <6,u,0,1>
+ 2619212596U, // <u,6,1,1>: Cost 3 vext2 <0,2,u,6>, <1,1,1,1>
+ 2619212694U, // <u,6,1,2>: Cost 3 vext2 <0,2,u,6>, <1,2,3,0>
+ 2619212760U, // <u,6,1,3>: Cost 3 vext2 <0,2,u,6>, <1,3,1,3>
+ 2626511979U, // <u,6,1,4>: Cost 3 vext2 <1,4,u,6>, <1,4,u,6>
+ 2619212944U, // <u,6,1,5>: Cost 3 vext2 <0,2,u,6>, <1,5,3,7>
+ 2714063264U, // <u,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
+ 2967326006U, // <u,6,1,7>: Cost 3 vzipr <2,3,u,1>, RHS
+ 1182594023U, // <u,6,1,u>: Cost 2 vrev <6,u,u,1>
+ 1506050150U, // <u,6,2,0>: Cost 2 vext1 <4,u,6,2>, LHS
+ 2579792630U, // <u,6,2,1>: Cost 3 vext1 <4,u,6,2>, <1,0,3,2>
+ 2619213416U, // <u,6,2,2>: Cost 3 vext2 <0,2,u,6>, <2,2,2,2>
+ 2619213478U, // <u,6,2,3>: Cost 3 vext2 <0,2,u,6>, <2,3,0,1>
+ 1506053430U, // <u,6,2,4>: Cost 2 vext1 <4,u,6,2>, RHS
+ 2633148309U, // <u,6,2,5>: Cost 3 vext2 <2,5,u,6>, <2,5,u,6>
+ 2619213754U, // <u,6,2,6>: Cost 3 vext2 <0,2,u,6>, <2,6,3,7>
+ 1638330874U, // <u,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
+ 1638478339U, // <u,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
+ 2619213974U, // <u,6,3,0>: Cost 3 vext2 <0,2,u,6>, <3,0,1,2>
+ 2255836074U, // <u,6,3,1>: Cost 3 vrev <6,u,1,3>
+ 2255909811U, // <u,6,3,2>: Cost 3 vrev <6,u,2,3>
+ 2619214236U, // <u,6,3,3>: Cost 3 vext2 <0,2,u,6>, <3,3,3,3>
+ 1564715549U, // <u,6,3,4>: Cost 2 vext2 <3,4,u,6>, <3,4,u,6>
+ 2639121006U, // <u,6,3,5>: Cost 3 vext2 <3,5,u,6>, <3,5,u,6>
+ 3001847012U, // <u,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
+ 1880329526U, // <u,6,3,7>: Cost 2 vzipr LHS, RHS
+ 1880329527U, // <u,6,3,u>: Cost 2 vzipr LHS, RHS
+ 2567864422U, // <u,6,4,0>: Cost 3 vext1 <2,u,6,4>, LHS
+ 2733011558U, // <u,6,4,1>: Cost 3 vext3 LHS, <6,4,1,3>
+ 2567866484U, // <u,6,4,2>: Cost 3 vext1 <2,u,6,4>, <2,u,6,4>
+ 2638458005U, // <u,6,4,3>: Cost 3 vext2 <3,4,u,6>, <4,3,6,u>
+ 1570540772U, // <u,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
+ 1545473334U, // <u,6,4,5>: Cost 2 vext2 <0,2,u,6>, RHS
+ 1572015512U, // <u,6,4,6>: Cost 2 vext2 <4,6,u,6>, <4,6,u,6>
+ 2960715062U, // <u,6,4,7>: Cost 3 vzipr <1,2,u,4>, RHS
+ 1545473577U, // <u,6,4,u>: Cost 2 vext2 <0,2,u,6>, RHS
+ 2567872614U, // <u,6,5,0>: Cost 3 vext1 <2,u,6,5>, LHS
+ 2645757648U, // <u,6,5,1>: Cost 3 vext2 <4,6,u,6>, <5,1,7,3>
+ 2567874490U, // <u,6,5,2>: Cost 3 vext1 <2,u,6,5>, <2,6,3,7>
+ 2576501250U, // <u,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
+ 1576660943U, // <u,6,5,4>: Cost 2 vext2 <5,4,u,6>, <5,4,u,6>
+ 2645757956U, // <u,6,5,5>: Cost 3 vext2 <4,6,u,6>, <5,5,5,5>
+ 2645758050U, // <u,6,5,6>: Cost 3 vext2 <4,6,u,6>, <5,6,7,0>
+ 2824080694U, // <u,6,5,7>: Cost 3 vuzpr <0,u,2,6>, RHS
+ 1182626795U, // <u,6,5,u>: Cost 2 vrev <6,u,u,5>
+ 1506082918U, // <u,6,6,0>: Cost 2 vext1 <4,u,6,6>, LHS
+ 2579825398U, // <u,6,6,1>: Cost 3 vext1 <4,u,6,6>, <1,0,3,2>
+ 2645758458U, // <u,6,6,2>: Cost 3 vext2 <4,6,u,6>, <6,2,7,3>
+ 2579826838U, // <u,6,6,3>: Cost 3 vext1 <4,u,6,6>, <3,0,1,2>
+ 1506086198U, // <u,6,6,4>: Cost 2 vext1 <4,u,6,6>, RHS
+ 2579828432U, // <u,6,6,5>: Cost 3 vext1 <4,u,6,6>, <5,1,7,3>
+ 296144182U, // <u,6,6,6>: Cost 1 vdup2 RHS
+ 1638331202U, // <u,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
+ 296144182U, // <u,6,6,u>: Cost 1 vdup2 RHS
+ 432349286U, // <u,6,7,0>: Cost 1 vext1 RHS, LHS
+ 1506091766U, // <u,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
+ 1506092648U, // <u,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1506093206U, // <u,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 432352809U, // <u,6,7,4>: Cost 1 vext1 RHS, RHS
+ 1506094800U, // <u,6,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
+ 1506095610U, // <u,6,7,6>: Cost 2 vext1 RHS, <6,2,7,3>
+ 1906904374U, // <u,6,7,7>: Cost 2 vzipr RHS, RHS
+ 432355118U, // <u,6,7,u>: Cost 1 vext1 RHS, LHS
+ 432357478U, // <u,6,u,0>: Cost 1 vext1 RHS, LHS
+ 1545475886U, // <u,6,u,1>: Cost 2 vext2 <0,2,u,6>, LHS
+ 1506100840U, // <u,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1506101398U, // <u,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 432361002U, // <u,6,u,4>: Cost 1 vext1 RHS, RHS
+ 1545476250U, // <u,6,u,5>: Cost 2 vext2 <0,2,u,6>, RHS
+ 296144182U, // <u,6,u,6>: Cost 1 vdup2 RHS
+ 1880370486U, // <u,6,u,7>: Cost 2 vzipr LHS, RHS
+ 432363310U, // <u,6,u,u>: Cost 1 vext1 RHS, LHS
+ 1571356672U, // <u,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
+ 497614950U, // <u,7,0,1>: Cost 1 vext2 RHS, LHS
+ 1571356836U, // <u,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
+ 2573880146U, // <u,7,0,3>: Cost 3 vext1 <3,u,7,0>, <3,u,7,0>
+ 1571357010U, // <u,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
+ 1512083716U, // <u,7,0,5>: Cost 2 vext1 <5,u,7,0>, <5,u,7,0>
+ 2621874741U, // <u,7,0,6>: Cost 3 vext2 <0,6,u,7>, <0,6,u,7>
+ 2585826298U, // <u,7,0,7>: Cost 3 vext1 <5,u,7,0>, <7,0,1,2>
+ 497615517U, // <u,7,0,u>: Cost 1 vext2 RHS, LHS
+ 1571357430U, // <u,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
+ 1571357492U, // <u,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
+ 1571357590U, // <u,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
+ 1552114715U, // <u,7,1,3>: Cost 2 vext2 <1,3,u,7>, <1,3,u,7>
+ 2573888822U, // <u,7,1,4>: Cost 3 vext1 <3,u,7,1>, RHS
+ 1553441981U, // <u,7,1,5>: Cost 2 vext2 <1,5,u,7>, <1,5,u,7>
+ 2627847438U, // <u,7,1,6>: Cost 3 vext2 <1,6,u,7>, <1,6,u,7>
+ 2727408775U, // <u,7,1,7>: Cost 3 vext3 <7,1,7,u>, <7,1,7,u>
+ 1555432880U, // <u,7,1,u>: Cost 2 vext2 <1,u,u,7>, <1,u,u,7>
+ 2629838337U, // <u,7,2,0>: Cost 3 vext2 <2,0,u,7>, <2,0,u,7>
+ 1188058754U, // <u,7,2,1>: Cost 2 vrev <7,u,1,2>
+ 1571358312U, // <u,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
+ 1571358374U, // <u,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
+ 2632492869U, // <u,7,2,4>: Cost 3 vext2 <2,4,u,7>, <2,4,u,7>
+ 2633156502U, // <u,7,2,5>: Cost 3 vext2 <2,5,u,7>, <2,5,u,7>
+ 1560078311U, // <u,7,2,6>: Cost 2 vext2 <2,6,u,7>, <2,6,u,7>
+ 2728072408U, // <u,7,2,7>: Cost 3 vext3 <7,2,7,u>, <7,2,7,u>
+ 1561405577U, // <u,7,2,u>: Cost 2 vext2 <2,u,u,7>, <2,u,u,7>
+ 1571358870U, // <u,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
+ 2627184913U, // <u,7,3,1>: Cost 3 vext2 <1,5,u,7>, <3,1,5,u>
+ 2633820523U, // <u,7,3,2>: Cost 3 vext2 <2,6,u,7>, <3,2,6,u>
+ 1571359132U, // <u,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
+ 1571359234U, // <u,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
+ 1512108295U, // <u,7,3,5>: Cost 2 vext1 <5,u,7,3>, <5,u,7,3>
+ 1518080992U, // <u,7,3,6>: Cost 2 vext1 <6,u,7,3>, <6,u,7,3>
+ 2640456465U, // <u,7,3,7>: Cost 3 vext2 <3,7,u,7>, <3,7,u,7>
+ 1571359518U, // <u,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
+ 1571359634U, // <u,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
+ 2573911067U, // <u,7,4,1>: Cost 3 vext1 <3,u,7,4>, <1,3,u,7>
+ 2645101622U, // <u,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
+ 2573912918U, // <u,7,4,3>: Cost 3 vext1 <3,u,7,4>, <3,u,7,4>
+ 1571359952U, // <u,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
+ 497618248U, // <u,7,4,5>: Cost 1 vext2 RHS, RHS
+ 1571360116U, // <u,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 2645102024U, // <u,7,4,7>: Cost 3 vext2 RHS, <4,7,5,0>
+ 497618473U, // <u,7,4,u>: Cost 1 vext2 RHS, RHS
+ 2645102152U, // <u,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
+ 1571360464U, // <u,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 2645102334U, // <u,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
+ 2645102447U, // <u,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
+ 1571360710U, // <u,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
+ 1571360772U, // <u,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
+ 1571360866U, // <u,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
+ 1571360936U, // <u,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
+ 1571361017U, // <u,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
+ 1530044518U, // <u,7,6,0>: Cost 2 vext1 <u,u,7,6>, LHS
+ 2645103016U, // <u,7,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
+ 1571361274U, // <u,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 2645103154U, // <u,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
+ 1530047798U, // <u,7,6,4>: Cost 2 vext1 <u,u,7,6>, RHS
+ 1188386474U, // <u,7,6,5>: Cost 2 vrev <7,u,5,6>
+ 1571361592U, // <u,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
+ 1571361614U, // <u,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
+ 1571361695U, // <u,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
+ 1571361786U, // <u,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
+ 2573935616U, // <u,7,7,1>: Cost 3 vext1 <3,u,7,7>, <1,3,5,7>
+ 2645103781U, // <u,7,7,2>: Cost 3 vext2 RHS, <7,2,2,2>
+ 2573937497U, // <u,7,7,3>: Cost 3 vext1 <3,u,7,7>, <3,u,7,7>
+ 1571362150U, // <u,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
+ 1512141067U, // <u,7,7,5>: Cost 2 vext1 <5,u,7,7>, <5,u,7,7>
+ 1518113764U, // <u,7,7,6>: Cost 2 vext1 <6,u,7,7>, <6,u,7,7>
+ 363253046U, // <u,7,7,7>: Cost 1 vdup3 RHS
+ 363253046U, // <u,7,7,u>: Cost 1 vdup3 RHS
+ 1571362515U, // <u,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
+ 497620782U, // <u,7,u,1>: Cost 1 vext2 RHS, LHS
+ 1571362693U, // <u,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
+ 1571362748U, // <u,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
+ 1571362879U, // <u,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
+ 497621146U, // <u,7,u,5>: Cost 1 vext2 RHS, RHS
+ 1571363024U, // <u,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
+ 363253046U, // <u,7,u,7>: Cost 1 vdup3 RHS
+ 497621349U, // <u,7,u,u>: Cost 1 vext2 RHS, LHS
+ 135053414U, // <u,u,0,0>: Cost 1 vdup0 LHS
+ 471081121U, // <u,u,0,1>: Cost 1 vext2 LHS, LHS
+ 1544822948U, // <u,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 1616140005U, // <u,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
+ 1544823122U, // <u,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 1512157453U, // <u,u,0,5>: Cost 2 vext1 <5,u,u,0>, <5,u,u,0>
+ 1662220032U, // <u,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
+ 1194457487U, // <u,u,0,7>: Cost 2 vrev <u,u,7,0>
+ 471081629U, // <u,u,0,u>: Cost 1 vext2 LHS, LHS
+ 1544823542U, // <u,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 202162278U, // <u,u,1,1>: Cost 1 vdup1 LHS
+ 537753390U, // <u,u,1,2>: Cost 1 vext3 LHS, LHS
+ 1544823768U, // <u,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 1494248758U, // <u,u,1,4>: Cost 2 vext1 <2,u,u,1>, RHS
+ 1544823952U, // <u,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 1518138343U, // <u,u,1,6>: Cost 2 vext1 <6,u,u,1>, <6,u,u,1>
+ 1640322907U, // <u,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
+ 537753444U, // <u,u,1,u>: Cost 1 vext3 LHS, LHS
+ 1482309734U, // <u,u,2,0>: Cost 2 vext1 <0,u,u,2>, LHS
+ 1194031451U, // <u,u,2,1>: Cost 2 vrev <u,u,1,2>
+ 269271142U, // <u,u,2,2>: Cost 1 vdup2 LHS
+ 835584U, // <u,u,2,3>: Cost 0 copy LHS
+ 1482313014U, // <u,u,2,4>: Cost 2 vext1 <0,u,u,2>, RHS
+ 2618566504U, // <u,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
+ 1544824762U, // <u,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 1638479788U, // <u,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
+ 835584U, // <u,u,2,u>: Cost 0 copy LHS
+ 408576723U, // <u,u,3,0>: Cost 1 vext1 LHS, LHS
+ 1482318582U, // <u,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 120371557U, // <u,u,3,2>: Cost 1 vrev LHS
+ 336380006U, // <u,u,3,3>: Cost 1 vdup3 LHS
+ 408579382U, // <u,u,3,4>: Cost 1 vext1 LHS, RHS
+ 1616140271U, // <u,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
+ 1530098170U, // <u,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1880329544U, // <u,u,3,7>: Cost 2 vzipr LHS, RHS
+ 408581934U, // <u,u,3,u>: Cost 1 vext1 LHS, LHS
+ 1488298086U, // <u,u,4,0>: Cost 2 vext1 <1,u,u,4>, LHS
+ 1488299437U, // <u,u,4,1>: Cost 2 vext1 <1,u,u,4>, <1,u,u,4>
+ 1659271204U, // <u,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
+ 1194195311U, // <u,u,4,3>: Cost 2 vrev <u,u,3,4>
+ 161926454U, // <u,u,4,4>: Cost 1 vdup0 RHS
+ 471084342U, // <u,u,4,5>: Cost 1 vext2 LHS, RHS
+ 1571368308U, // <u,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 1640323153U, // <u,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
+ 471084585U, // <u,u,4,u>: Cost 1 vext2 LHS, RHS
+ 1494278246U, // <u,u,5,0>: Cost 2 vext1 <2,u,u,5>, LHS
+ 1571368656U, // <u,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 1494280327U, // <u,u,5,2>: Cost 2 vext1 <2,u,u,5>, <2,u,u,5>
+ 1616140415U, // <u,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
+ 1494281526U, // <u,u,5,4>: Cost 2 vext1 <2,u,u,5>, RHS
+ 229035318U, // <u,u,5,5>: Cost 1 vdup1 RHS
+ 537753754U, // <u,u,5,6>: Cost 1 vext3 LHS, RHS
+ 1750355254U, // <u,u,5,7>: Cost 2 vuzpr LHS, RHS
+ 537753772U, // <u,u,5,u>: Cost 1 vext3 LHS, RHS
+ 1482342502U, // <u,u,6,0>: Cost 2 vext1 <0,u,u,6>, LHS
+ 2556084982U, // <u,u,6,1>: Cost 3 vext1 <0,u,u,6>, <1,0,3,2>
+ 1571369466U, // <u,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 1611938000U, // <u,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
+ 1482345782U, // <u,u,6,4>: Cost 2 vext1 <0,u,u,6>, RHS
+ 1194359171U, // <u,u,6,5>: Cost 2 vrev <u,u,5,6>
+ 296144182U, // <u,u,6,6>: Cost 1 vdup2 RHS
+ 27705344U, // <u,u,6,7>: Cost 0 copy RHS
+ 27705344U, // <u,u,6,u>: Cost 0 copy RHS
+ 432496742U, // <u,u,7,0>: Cost 1 vext1 RHS, LHS
+ 1488324016U, // <u,u,7,1>: Cost 2 vext1 <1,u,u,7>, <1,u,u,7>
+ 1494296713U, // <u,u,7,2>: Cost 2 vext1 <2,u,u,7>, <2,u,u,7>
+ 1906901148U, // <u,u,7,3>: Cost 2 vzipr RHS, LHS
+ 432500283U, // <u,u,7,4>: Cost 1 vext1 RHS, RHS
+ 1506242256U, // <u,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
+ 120699277U, // <u,u,7,6>: Cost 1 vrev RHS
+ 363253046U, // <u,u,7,7>: Cost 1 vdup3 RHS
+ 432502574U, // <u,u,7,u>: Cost 1 vext1 RHS, LHS
+ 408617688U, // <u,u,u,0>: Cost 1 vext1 LHS, LHS
+ 471086894U, // <u,u,u,1>: Cost 1 vext2 LHS, LHS
+ 537753957U, // <u,u,u,2>: Cost 1 vext3 LHS, LHS
+ 835584U, // <u,u,u,3>: Cost 0 copy LHS
+ 408620342U, // <u,u,u,4>: Cost 1 vext1 LHS, RHS
+ 471087258U, // <u,u,u,5>: Cost 1 vext2 LHS, RHS
+ 537753997U, // <u,u,u,6>: Cost 1 vext3 LHS, RHS
+ 27705344U, // <u,u,u,7>: Cost 0 copy RHS
+ 835584U, // <u,u,u,u>: Cost 0 copy LHS
+ 0
+};
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/contrib/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp
new file mode 100644
index 0000000..4723cc4
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp
@@ -0,0 +1,578 @@
+//=- AArch64PromoteConstant.cpp --- Promote constant to global for AArch64 -==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AArch64PromoteConstant pass which promotes constants
+// to global variables when this is likely to be more efficient. Currently only
+// types related to constant vector (i.e., constant vector, array of constant
+// vectors, constant structure with a constant vector field, etc.) are promoted
+// to global variables. Constant vectors are likely to be lowered in target
+// constant pool during instruction selection already; therefore, the access
+// will remain the same (memory load), but the structure types are not split
+// into different constant pool accesses for each field. A bonus side effect is
+// that created globals may be merged by the global merge pass.
+//
+// FIXME: This pass may be useful for other targets too.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-promote-const"
+
+// Stress testing mode - disable heuristics.
+static cl::opt<bool> Stress("aarch64-stress-promote-const", cl::Hidden,
+ cl::desc("Promote all vector constants"));
+
+STATISTIC(NumPromoted, "Number of promoted constants");
+STATISTIC(NumPromotedUses, "Number of promoted constants uses");
+
+//===----------------------------------------------------------------------===//
+// AArch64PromoteConstant
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// Promotes interesting constant into global variables.
+/// The motivating example is:
+/// static const uint16_t TableA[32] = {
+/// 41944, 40330, 38837, 37450, 36158, 34953, 33826, 32768,
+/// 31776, 30841, 29960, 29128, 28340, 27595, 26887, 26215,
+/// 25576, 24967, 24386, 23832, 23302, 22796, 22311, 21846,
+/// 21400, 20972, 20561, 20165, 19785, 19419, 19066, 18725,
+/// };
+///
+/// uint8x16x4_t LoadStatic(void) {
+/// uint8x16x4_t ret;
+/// ret.val[0] = vld1q_u16(TableA + 0);
+/// ret.val[1] = vld1q_u16(TableA + 8);
+/// ret.val[2] = vld1q_u16(TableA + 16);
+/// ret.val[3] = vld1q_u16(TableA + 24);
+/// return ret;
+/// }
+///
+/// The constants in this example are folded into the uses. Thus, 4 different
+/// constants are created.
+///
+/// As their type is vector the cheapest way to create them is to load them
+/// for the memory.
+///
+/// Therefore the final assembly final has 4 different loads. With this pass
+/// enabled, only one load is issued for the constants.
+class AArch64PromoteConstant : public ModulePass {
+
+public:
+ static char ID;
+ AArch64PromoteConstant() : ModulePass(ID) {}
+
+ const char *getPassName() const override { return "AArch64 Promote Constant"; }
+
+ /// Iterate over the functions and promote the interesting constants into
+ /// global variables with module scope.
+ bool runOnModule(Module &M) override {
+ DEBUG(dbgs() << getPassName() << '\n');
+ bool Changed = false;
+ for (auto &MF : M) {
+ Changed |= runOnFunction(MF);
+ }
+ return Changed;
+ }
+
+private:
+ /// Look for interesting constants used within the given function.
+ /// Promote them into global variables, load these global variables within
+ /// the related function, so that the number of inserted load is minimal.
+ bool runOnFunction(Function &F);
+
+ // This transformation requires dominator info
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ }
+
+ /// Type to store a list of User.
+ typedef SmallVector<Value::user_iterator, 4> Users;
+ /// Map an insertion point to all the uses it dominates.
+ typedef DenseMap<Instruction *, Users> InsertionPoints;
+ /// Map a function to the required insertion point of load for a
+ /// global variable.
+ typedef DenseMap<Function *, InsertionPoints> InsertionPointsPerFunc;
+
+ /// Find the closest point that dominates the given Use.
+ Instruction *findInsertionPoint(Value::user_iterator &Use);
+
+ /// Check if the given insertion point is dominated by an existing
+ /// insertion point.
+ /// If true, the given use is added to the list of dominated uses for
+ /// the related existing point.
+ /// \param NewPt the insertion point to be checked
+ /// \param UseIt the use to be added into the list of dominated uses
+ /// \param InsertPts existing insertion points
+ /// \pre NewPt and all instruction in InsertPts belong to the same function
+ /// \return true if one of the insertion point in InsertPts dominates NewPt,
+ /// false otherwise
+ bool isDominated(Instruction *NewPt, Value::user_iterator &UseIt,
+ InsertionPoints &InsertPts);
+
+ /// Check if the given insertion point can be merged with an existing
+ /// insertion point in a common dominator.
+ /// If true, the given use is added to the list of the created insertion
+ /// point.
+ /// \param NewPt the insertion point to be checked
+ /// \param UseIt the use to be added into the list of dominated uses
+ /// \param InsertPts existing insertion points
+ /// \pre NewPt and all instruction in InsertPts belong to the same function
+ /// \pre isDominated returns false for the exact same parameters.
+ /// \return true if it exists an insertion point in InsertPts that could
+ /// have been merged with NewPt in a common dominator,
+ /// false otherwise
+ bool tryAndMerge(Instruction *NewPt, Value::user_iterator &UseIt,
+ InsertionPoints &InsertPts);
+
+ /// Compute the minimal insertion points to dominates all the interesting
+ /// uses of value.
+ /// Insertion points are group per function and each insertion point
+ /// contains a list of all the uses it dominates within the related function
+ /// \param Val constant to be examined
+ /// \param[out] InsPtsPerFunc output storage of the analysis
+ void computeInsertionPoints(Constant *Val,
+ InsertionPointsPerFunc &InsPtsPerFunc);
+
+ /// Insert a definition of a new global variable at each point contained in
+ /// InsPtsPerFunc and update the related uses (also contained in
+ /// InsPtsPerFunc).
+ bool insertDefinitions(Constant *Cst, InsertionPointsPerFunc &InsPtsPerFunc);
+
+ /// Compute the minimal insertion points to dominate all the interesting
+ /// uses of Val and insert a definition of a new global variable
+ /// at these points.
+ /// Also update the uses of Val accordingly.
+ /// Currently a use of Val is considered interesting if:
+ /// - Val is not UndefValue
+ /// - Val is not zeroinitialized
+ /// - Replacing Val per a load of a global variable is valid.
+ /// \see shouldConvert for more details
+ bool computeAndInsertDefinitions(Constant *Val);
+
+ /// Promote the given constant into a global variable if it is expected to
+ /// be profitable.
+ /// \return true if Cst has been promoted
+ bool promoteConstant(Constant *Cst);
+
+ /// Transfer the list of dominated uses of IPI to NewPt in InsertPts.
+ /// Append UseIt to this list and delete the entry of IPI in InsertPts.
+ static void appendAndTransferDominatedUses(Instruction *NewPt,
+ Value::user_iterator &UseIt,
+ InsertionPoints::iterator &IPI,
+ InsertionPoints &InsertPts) {
+ // Record the dominated use.
+ IPI->second.push_back(UseIt);
+ // Transfer the dominated uses of IPI to NewPt
+ // Inserting into the DenseMap may invalidate existing iterator.
+ // Keep a copy of the key to find the iterator to erase.
+ Instruction *OldInstr = IPI->first;
+ InsertPts.insert(InsertionPoints::value_type(NewPt, IPI->second));
+ // Erase IPI.
+ IPI = InsertPts.find(OldInstr);
+ InsertPts.erase(IPI);
+ }
+};
+} // end anonymous namespace
+
+char AArch64PromoteConstant::ID = 0;
+
+namespace llvm {
+void initializeAArch64PromoteConstantPass(PassRegistry &);
+}
+
+INITIALIZE_PASS_BEGIN(AArch64PromoteConstant, "aarch64-promote-const",
+ "AArch64 Promote Constant Pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(AArch64PromoteConstant, "aarch64-promote-const",
+ "AArch64 Promote Constant Pass", false, false)
+
+ModulePass *llvm::createAArch64PromoteConstantPass() {
+ return new AArch64PromoteConstant();
+}
+
+/// Check if the given type uses a vector type.
+static bool isConstantUsingVectorTy(const Type *CstTy) {
+ if (CstTy->isVectorTy())
+ return true;
+ if (CstTy->isStructTy()) {
+ for (unsigned EltIdx = 0, EndEltIdx = CstTy->getStructNumElements();
+ EltIdx < EndEltIdx; ++EltIdx)
+ if (isConstantUsingVectorTy(CstTy->getStructElementType(EltIdx)))
+ return true;
+ } else if (CstTy->isArrayTy())
+ return isConstantUsingVectorTy(CstTy->getArrayElementType());
+ return false;
+}
+
+/// Check if the given use (Instruction + OpIdx) of Cst should be converted into
+/// a load of a global variable initialized with Cst.
+/// A use should be converted if it is legal to do so.
+/// For instance, it is not legal to turn the mask operand of a shuffle vector
+/// into a load of a global variable.
+static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr,
+ unsigned OpIdx) {
+ // shufflevector instruction expects a const for the mask argument, i.e., the
+ // third argument. Do not promote this use in that case.
+ if (isa<const ShuffleVectorInst>(Instr) && OpIdx == 2)
+ return false;
+
+ // extractvalue instruction expects a const idx.
+ if (isa<const ExtractValueInst>(Instr) && OpIdx > 0)
+ return false;
+
+ // extractvalue instruction expects a const idx.
+ if (isa<const InsertValueInst>(Instr) && OpIdx > 1)
+ return false;
+
+ if (isa<const AllocaInst>(Instr) && OpIdx > 0)
+ return false;
+
+ // Alignment argument must be constant.
+ if (isa<const LoadInst>(Instr) && OpIdx > 0)
+ return false;
+
+ // Alignment argument must be constant.
+ if (isa<const StoreInst>(Instr) && OpIdx > 1)
+ return false;
+
+ // Index must be constant.
+ if (isa<const GetElementPtrInst>(Instr) && OpIdx > 0)
+ return false;
+
+ // Personality function and filters must be constant.
+ // Give up on that instruction.
+ if (isa<const LandingPadInst>(Instr))
+ return false;
+
+ // Switch instruction expects constants to compare to.
+ if (isa<const SwitchInst>(Instr))
+ return false;
+
+ // Expected address must be a constant.
+ if (isa<const IndirectBrInst>(Instr))
+ return false;
+
+ // Do not mess with intrinsics.
+ if (isa<const IntrinsicInst>(Instr))
+ return false;
+
+ // Do not mess with inline asm.
+ const CallInst *CI = dyn_cast<const CallInst>(Instr);
+ if (CI && isa<const InlineAsm>(CI->getCalledValue()))
+ return false;
+
+ return true;
+}
+
+/// Check if the given Cst should be converted into
+/// a load of a global variable initialized with Cst.
+/// A constant should be converted if it is likely that the materialization of
+/// the constant will be tricky. Thus, we give up on zero or undef values.
+///
+/// \todo Currently, accept only vector related types.
+/// Also we give up on all simple vector type to keep the existing
+/// behavior. Otherwise, we should push here all the check of the lowering of
+/// BUILD_VECTOR. By giving up, we lose the potential benefit of merging
+/// constant via global merge and the fact that the same constant is stored
+/// only once with this method (versus, as many function that uses the constant
+/// for the regular approach, even for float).
+/// Again, the simplest solution would be to promote every
+/// constant and rematerialize them when they are actually cheap to create.
+static bool shouldConvert(const Constant *Cst) {
+ if (isa<const UndefValue>(Cst))
+ return false;
+
+ // FIXME: In some cases, it may be interesting to promote in memory
+ // a zero initialized constant.
+ // E.g., when the type of Cst require more instructions than the
+ // adrp/add/load sequence or when this sequence can be shared by several
+ // instances of Cst.
+ // Ideally, we could promote this into a global and rematerialize the constant
+ // when it was a bad idea.
+ if (Cst->isZeroValue())
+ return false;
+
+ if (Stress)
+ return true;
+
+ // FIXME: see function \todo
+ if (Cst->getType()->isVectorTy())
+ return false;
+ return isConstantUsingVectorTy(Cst->getType());
+}
+
+Instruction *
+AArch64PromoteConstant::findInsertionPoint(Value::user_iterator &Use) {
+ // If this user is a phi, the insertion point is in the related
+ // incoming basic block.
+ PHINode *PhiInst = dyn_cast<PHINode>(*Use);
+ Instruction *InsertionPoint;
+ if (PhiInst)
+ InsertionPoint =
+ PhiInst->getIncomingBlock(Use.getOperandNo())->getTerminator();
+ else
+ InsertionPoint = dyn_cast<Instruction>(*Use);
+ assert(InsertionPoint && "User is not an instruction!");
+ return InsertionPoint;
+}
+
+bool AArch64PromoteConstant::isDominated(Instruction *NewPt,
+ Value::user_iterator &UseIt,
+ InsertionPoints &InsertPts) {
+
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
+ *NewPt->getParent()->getParent()).getDomTree();
+
+ // Traverse all the existing insertion points and check if one is dominating
+ // NewPt. If it is, remember that.
+ for (auto &IPI : InsertPts) {
+ if (NewPt == IPI.first || DT.dominates(IPI.first, NewPt) ||
+ // When IPI.first is a terminator instruction, DT may think that
+ // the result is defined on the edge.
+ // Here we are testing the insertion point, not the definition.
+ (IPI.first->getParent() != NewPt->getParent() &&
+ DT.dominates(IPI.first->getParent(), NewPt->getParent()))) {
+ // No need to insert this point. Just record the dominated use.
+ DEBUG(dbgs() << "Insertion point dominated by:\n");
+ DEBUG(IPI.first->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+ IPI.second.push_back(UseIt);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt,
+ Value::user_iterator &UseIt,
+ InsertionPoints &InsertPts) {
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
+ *NewPt->getParent()->getParent()).getDomTree();
+ BasicBlock *NewBB = NewPt->getParent();
+
+ // Traverse all the existing insertion point and check if one is dominated by
+ // NewPt and thus useless or can be combined with NewPt into a common
+ // dominator.
+ for (InsertionPoints::iterator IPI = InsertPts.begin(),
+ EndIPI = InsertPts.end();
+ IPI != EndIPI; ++IPI) {
+ BasicBlock *CurBB = IPI->first->getParent();
+ if (NewBB == CurBB) {
+ // Instructions are in the same block.
+ // By construction, NewPt is dominating the other.
+ // Indeed, isDominated returned false with the exact same arguments.
+ DEBUG(dbgs() << "Merge insertion point with:\n");
+ DEBUG(IPI->first->print(dbgs()));
+ DEBUG(dbgs() << "\nat considered insertion point.\n");
+ appendAndTransferDominatedUses(NewPt, UseIt, IPI, InsertPts);
+ return true;
+ }
+
+ // Look for a common dominator
+ BasicBlock *CommonDominator = DT.findNearestCommonDominator(NewBB, CurBB);
+ // If none exists, we cannot merge these two points.
+ if (!CommonDominator)
+ continue;
+
+ if (CommonDominator != NewBB) {
+ // By construction, the CommonDominator cannot be CurBB.
+ assert(CommonDominator != CurBB &&
+ "Instruction has not been rejected during isDominated check!");
+ // Take the last instruction of the CommonDominator as insertion point
+ NewPt = CommonDominator->getTerminator();
+ }
+ // else, CommonDominator is the block of NewBB, hence NewBB is the last
+ // possible insertion point in that block.
+ DEBUG(dbgs() << "Merge insertion point with:\n");
+ DEBUG(IPI->first->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+ DEBUG(NewPt->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+ appendAndTransferDominatedUses(NewPt, UseIt, IPI, InsertPts);
+ return true;
+ }
+ return false;
+}
+
+void AArch64PromoteConstant::computeInsertionPoints(
+ Constant *Val, InsertionPointsPerFunc &InsPtsPerFunc) {
+ DEBUG(dbgs() << "** Compute insertion points **\n");
+ for (Value::user_iterator UseIt = Val->user_begin(),
+ EndUseIt = Val->user_end();
+ UseIt != EndUseIt; ++UseIt) {
+ // If the user is not an Instruction, we cannot modify it.
+ if (!isa<Instruction>(*UseIt))
+ continue;
+
+ // Filter out uses that should not be converted.
+ if (!shouldConvertUse(Val, cast<Instruction>(*UseIt), UseIt.getOperandNo()))
+ continue;
+
+ DEBUG(dbgs() << "Considered use, opidx " << UseIt.getOperandNo() << ":\n");
+ DEBUG((*UseIt)->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+
+ Instruction *InsertionPoint = findInsertionPoint(UseIt);
+
+ DEBUG(dbgs() << "Considered insertion point:\n");
+ DEBUG(InsertionPoint->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+
+ // Check if the current insertion point is useless, i.e., it is dominated
+ // by another one.
+ InsertionPoints &InsertPts =
+ InsPtsPerFunc[InsertionPoint->getParent()->getParent()];
+ if (isDominated(InsertionPoint, UseIt, InsertPts))
+ continue;
+ // This insertion point is useful, check if we can merge some insertion
+ // point in a common dominator or if NewPt dominates an existing one.
+ if (tryAndMerge(InsertionPoint, UseIt, InsertPts))
+ continue;
+
+ DEBUG(dbgs() << "Keep considered insertion point\n");
+
+ // It is definitely useful by its own
+ InsertPts[InsertionPoint].push_back(UseIt);
+ }
+}
+
+bool AArch64PromoteConstant::insertDefinitions(
+ Constant *Cst, InsertionPointsPerFunc &InsPtsPerFunc) {
+ // We will create one global variable per Module.
+ DenseMap<Module *, GlobalVariable *> ModuleToMergedGV;
+ bool HasChanged = false;
+
+ // Traverse all insertion points in all the function.
+ for (InsertionPointsPerFunc::iterator FctToInstPtsIt = InsPtsPerFunc.begin(),
+ EndIt = InsPtsPerFunc.end();
+ FctToInstPtsIt != EndIt; ++FctToInstPtsIt) {
+ InsertionPoints &InsertPts = FctToInstPtsIt->second;
+// Do more checking for debug purposes.
+#ifndef NDEBUG
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
+ *FctToInstPtsIt->first).getDomTree();
+#endif
+ GlobalVariable *PromotedGV;
+ assert(!InsertPts.empty() && "Empty uses does not need a definition");
+
+ Module *M = FctToInstPtsIt->first->getParent();
+ DenseMap<Module *, GlobalVariable *>::iterator MapIt =
+ ModuleToMergedGV.find(M);
+ if (MapIt == ModuleToMergedGV.end()) {
+ PromotedGV = new GlobalVariable(
+ *M, Cst->getType(), true, GlobalValue::InternalLinkage, nullptr,
+ "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
+ PromotedGV->setInitializer(Cst);
+ ModuleToMergedGV[M] = PromotedGV;
+ DEBUG(dbgs() << "Global replacement: ");
+ DEBUG(PromotedGV->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+ ++NumPromoted;
+ HasChanged = true;
+ } else {
+ PromotedGV = MapIt->second;
+ }
+
+ for (InsertionPoints::iterator IPI = InsertPts.begin(),
+ EndIPI = InsertPts.end();
+ IPI != EndIPI; ++IPI) {
+ // Create the load of the global variable.
+ IRBuilder<> Builder(IPI->first->getParent(), IPI->first);
+ LoadInst *LoadedCst = Builder.CreateLoad(PromotedGV);
+ DEBUG(dbgs() << "**********\n");
+ DEBUG(dbgs() << "New def: ");
+ DEBUG(LoadedCst->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+
+ // Update the dominated uses.
+ Users &DominatedUsers = IPI->second;
+ for (Value::user_iterator Use : DominatedUsers) {
+#ifndef NDEBUG
+ assert((DT.dominates(LoadedCst, cast<Instruction>(*Use)) ||
+ (isa<PHINode>(*Use) &&
+ DT.dominates(LoadedCst, findInsertionPoint(Use)))) &&
+ "Inserted definition does not dominate all its uses!");
+#endif
+ DEBUG(dbgs() << "Use to update " << Use.getOperandNo() << ":");
+ DEBUG(Use->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+ Use->setOperand(Use.getOperandNo(), LoadedCst);
+ ++NumPromotedUses;
+ }
+ }
+ }
+ return HasChanged;
+}
+
+bool AArch64PromoteConstant::computeAndInsertDefinitions(Constant *Val) {
+ InsertionPointsPerFunc InsertPtsPerFunc;
+ computeInsertionPoints(Val, InsertPtsPerFunc);
+ return insertDefinitions(Val, InsertPtsPerFunc);
+}
+
+bool AArch64PromoteConstant::promoteConstant(Constant *Cst) {
+ assert(Cst && "Given variable is not a valid constant.");
+
+ if (!shouldConvert(Cst))
+ return false;
+
+ DEBUG(dbgs() << "******************************\n");
+ DEBUG(dbgs() << "Candidate constant: ");
+ DEBUG(Cst->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+
+ return computeAndInsertDefinitions(Cst);
+}
+
+bool AArch64PromoteConstant::runOnFunction(Function &F) {
+ // Look for instructions using constant vector. Promote that constant to a
+ // global variable. Create as few loads of this variable as possible and
+ // update the uses accordingly.
+ bool LocalChange = false;
+ SmallSet<Constant *, 8> AlreadyChecked;
+
+ for (auto &MBB : F) {
+ for (auto &MI : MBB) {
+ // Traverse the operand, looking for constant vectors. Replace them by a
+ // load of a global variable of constant vector type.
+ for (unsigned OpIdx = 0, EndOpIdx = MI.getNumOperands();
+ OpIdx != EndOpIdx; ++OpIdx) {
+ Constant *Cst = dyn_cast<Constant>(MI.getOperand(OpIdx));
+ // There is no point in promoting global values as they are already
+ // global. Do not promote constant expressions either, as they may
+ // require some code expansion.
+ if (Cst && !isa<GlobalValue>(Cst) && !isa<ConstantExpr>(Cst) &&
+ AlreadyChecked.insert(Cst))
+ LocalChange |= promoteConstant(Cst);
+ }
+ }
+ }
+ return LocalChange;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 75ec44f..01b9587 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -12,161 +12,393 @@
//
//===----------------------------------------------------------------------===//
-
#include "AArch64RegisterInfo.h"
#include "AArch64FrameLowering.h"
-#include "AArch64MachineFunctionInfo.h"
-#include "AArch64TargetMachine.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/ADT/BitVector.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "AArch64GenRegisterInfo.inc"
-using namespace llvm;
+AArch64RegisterInfo::AArch64RegisterInfo(const AArch64InstrInfo *tii,
+ const AArch64Subtarget *sti)
+ : AArch64GenRegisterInfo(AArch64::LR), TII(tii), STI(sti) {}
-AArch64RegisterInfo::AArch64RegisterInfo()
- : AArch64GenRegisterInfo(AArch64::X30) {
-}
-
-const uint16_t *
+const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- return CSR_PCS_SaveList;
-}
-
-const uint32_t*
-AArch64RegisterInfo::getCallPreservedMask(CallingConv::ID) const {
- return CSR_PCS_RegMask;
+ assert(MF && "Invalid MachineFunction pointer.");
+ if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg)
+ return CSR_AArch64_AllRegs_SaveList;
+ else
+ return CSR_AArch64_AAPCS_SaveList;
}
-const uint32_t *AArch64RegisterInfo::getTLSDescCallPreservedMask() const {
- return TLSDesc_RegMask;
+const uint32_t *
+AArch64RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ if (CC == CallingConv::AnyReg)
+ return CSR_AArch64_AllRegs_RegMask;
+ else
+ return CSR_AArch64_AAPCS_RegMask;
}
-const TargetRegisterClass *
-AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
- if (RC == &AArch64::FlagClassRegClass)
- return &AArch64::GPR64RegClass;
+const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
+ if (STI->isTargetDarwin())
+ return CSR_AArch64_TLS_Darwin_RegMask;
- return RC;
+ assert(STI->isTargetELF() && "only expect Darwin or ELF TLS");
+ return CSR_AArch64_TLS_ELF_RegMask;
}
-
+const uint32_t *
+AArch64RegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const {
+ // This should return a register mask that is the same as that returned by
+ // getCallPreservedMask but that additionally preserves the register used for
+ // the first i64 argument (which must also be the register used to return a
+ // single i64 return value)
+ //
+ // In case that the calling convention does not use the same register for
+ // both, the function should return NULL (does not currently apply)
+ return CSR_AArch64_AAPCS_ThisReturn_RegMask;
+}
BitVector
AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
- BitVector Reserved(getNumRegs());
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
- Reserved.set(AArch64::XSP);
- Reserved.set(AArch64::WSP);
-
+ // FIXME: avoid re-calculating this every time.
+ BitVector Reserved(getNumRegs());
+ Reserved.set(AArch64::SP);
Reserved.set(AArch64::XZR);
+ Reserved.set(AArch64::WSP);
Reserved.set(AArch64::WZR);
- if (TFI->hasFP(MF)) {
- Reserved.set(AArch64::X29);
+ if (TFI->hasFP(MF) || STI->isTargetDarwin()) {
+ Reserved.set(AArch64::FP);
Reserved.set(AArch64::W29);
}
+ if (STI->isTargetDarwin()) {
+ Reserved.set(AArch64::X18); // Platform register
+ Reserved.set(AArch64::W18);
+ }
+
+ if (hasBasePointer(MF)) {
+ Reserved.set(AArch64::X19);
+ Reserved.set(AArch64::W19);
+ }
+
return Reserved;
}
-void
-AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI,
- int SPAdj,
- unsigned FIOperandNum,
- RegScavenger *RS) const {
- assert(SPAdj == 0 && "Cannot deal with nonzero SPAdj yet");
- MachineInstr &MI = *MBBI;
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const AArch64FrameLowering *TFI =
- static_cast<const AArch64FrameLowering *>(MF.getTarget().getFrameLowering());
-
- // In order to work out the base and offset for addressing, the FrameLowering
- // code needs to know (sometimes) whether the instruction is storing/loading a
- // callee-saved register, or whether it's a more generic
- // operation. Fortunately the frame indices are used *only* for that purpose
- // and are contiguous, so we can check here.
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- int MinCSFI = 0;
- int MaxCSFI = -1;
-
- if (CSI.size()) {
- MinCSFI = CSI[0].getFrameIdx();
- MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
+bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
+ unsigned Reg) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ switch (Reg) {
+ default:
+ break;
+ case AArch64::SP:
+ case AArch64::XZR:
+ case AArch64::WSP:
+ case AArch64::WZR:
+ return true;
+ case AArch64::X18:
+ case AArch64::W18:
+ return STI->isTargetDarwin();
+ case AArch64::FP:
+ case AArch64::W29:
+ return TFI->hasFP(MF) || STI->isTargetDarwin();
+ case AArch64::W19:
+ case AArch64::X19:
+ return hasBasePointer(MF);
}
- int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
- bool IsCalleeSaveOp = FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI;
+ return false;
+}
- unsigned FrameReg;
- int64_t Offset;
- Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj,
- IsCalleeSaveOp);
+const TargetRegisterClass *
+AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ return &AArch64::GPR64RegClass;
+}
- Offset += MI.getOperand(FIOperandNum + 1).getImm();
+const TargetRegisterClass *
+AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
+ if (RC == &AArch64::CCRRegClass)
+ return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
+ return RC;
+}
- // DBG_VALUE instructions have no real restrictions so they can be handled
- // easily.
- if (MI.isDebugValue()) {
- MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*isDef=*/ false);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
- return;
- }
+unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
- const AArch64InstrInfo &TII =
- *static_cast<const AArch64InstrInfo*>(MF.getTarget().getInstrInfo());
- int MinOffset, MaxOffset, OffsetScale;
- if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s) {
- MinOffset = 0;
- MaxOffset = 0xfff;
- OffsetScale = 1;
- } else {
- // Load/store of a stack object
- TII.getAddressConstraints(MI, OffsetScale, MinOffset, MaxOffset);
- }
+bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
- // The frame lowering has told us a base and offset it thinks we should use to
- // access this variable, but it's still up to us to make sure the values are
- // legal for the instruction in question.
- if (Offset % OffsetScale != 0 || Offset < MinOffset || Offset > MaxOffset) {
- unsigned BaseReg =
- MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
- emitRegUpdate(MBB, MBBI, MBBI->getDebugLoc(), TII,
- BaseReg, FrameReg, BaseReg, Offset);
- FrameReg = BaseReg;
- Offset = 0;
+ // In the presence of variable sized objects, if the fixed stack size is
+ // large enough that referencing from the FP won't result in things being
+ // in range relatively often, we can use a base pointer to allow access
+ // from the other direction like the SP normally works.
+ if (MFI->hasVarSizedObjects()) {
+ // Conservatively estimate whether the negative offset from the frame
+ // pointer will be sufficient to reach. If a function has a smallish
+ // frame, it's less likely to have lots of spills and callee saved
+ // space, so it's all more likely to be within range of the frame pointer.
+ // If it's wrong, we'll materialize the constant and still get to the
+ // object; it's just suboptimal. Negative offsets use the unscaled
+ // load/store instructions, which have a 9-bit signed immediate.
+ if (MFI->getLocalFrameSize() < 256)
+ return false;
+ return true;
}
- // Negative offsets are expected if we address from FP, but for
- // now this checks nothing has gone horribly wrong.
- assert(Offset >= 0 && "Unexpected negative offset from SP");
-
- MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, true);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset / OffsetScale);
+ return false;
}
unsigned
AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
- if (TFI->hasFP(MF))
- return AArch64::X29;
- else
- return AArch64::XSP;
+ return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
+}
+
+bool AArch64RegisterInfo::requiresRegisterScavenging(
+ const MachineFunction &MF) const {
+ return true;
+}
+
+bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
+ const MachineFunction &MF) const {
+ return true;
}
bool
AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // AArch64FrameLowering::resolveFrameIndexReference() can always fall back
+ // to the stack pointer, so only put the emergency spill slot next to the
+ // FP when there's no better way to access it (SP or base pointer).
+ return MFI->hasVarSizedObjects() && !hasBasePointer(MF);
+}
+
+bool AArch64RegisterInfo::requiresFrameIndexScavenging(
+ const MachineFunction &MF) const {
+ return true;
+}
+
+bool
+AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Only consider eliminating leaf frames.
+ if (MFI->hasCalls() || (MF.getTarget().Options.DisableFramePointerElim(MF) &&
+ MFI->adjustsStack()))
+ return true;
+ return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
+}
+
+/// needsFrameBaseReg - Returns true if the instruction's frame index
+/// reference would be better served by a base register other than FP
+/// or SP. Used by LocalStackFrameAllocation to determine which frame index
+/// references it should create new base registers for.
+bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
+ int64_t Offset) const {
+ for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
+ assert(i < MI->getNumOperands() &&
+ "Instr doesn't have FrameIndex operand!");
+
+ // It's the load/store FI references that cause issues, as it can be difficult
+ // to materialize the offset if it won't fit in the literal field. Estimate
+ // based on the size of the local frame and some conservative assumptions
+ // about the rest of the stack frame (note, this is pre-regalloc, so
+ // we don't know everything for certain yet) whether this offset is likely
+ // to be out of range of the immediate. Return true if so.
+
+ // We only generate virtual base registers for loads and stores, so
+ // return false for everything else.
+ if (!MI->mayLoad() && !MI->mayStore())
+ return false;
+
+ // Without a virtual base register, if the function has variable sized
+ // objects, all fixed-size local references will be via the frame pointer,
+ // Approximate the offset and see if it's legal for the instruction.
+ // Note that the incoming offset is based on the SP value at function entry,
+ // so it'll be negative.
+ MachineFunction &MF = *MI->getParent()->getParent();
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
- const AArch64FrameLowering *AFI
- = static_cast<const AArch64FrameLowering*>(TFI);
- return AFI->useFPForAddressing(MF);
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Estimate an offset from the frame pointer.
+ // Conservatively assume all GPR callee-saved registers get pushed.
+ // FP, LR, X19-X28, D8-D15. 64-bits each.
+ int64_t FPOffset = Offset - 16 * 20;
+ // Estimate an offset from the stack pointer.
+ // The incoming offset is relating to the SP at the start of the function,
+ // but when we access the local it'll be relative to the SP after local
+ // allocation, so adjust our SP-relative offset by that allocation size.
+ Offset += MFI->getLocalFrameSize();
+ // Assume that we'll have at least some spill slots allocated.
+ // FIXME: This is a total SWAG number. We should run some statistics
+ // and pick a real one.
+ Offset += 128; // 128 bytes of spill slots
+
+ // If there is a frame pointer, try using it.
+ // The FP is only available if there is no dynamic realignment. We
+ // don't know for sure yet whether we'll need that, so we guess based
+ // on whether there are any local variables that would trigger it.
+ if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, FPOffset))
+ return false;
+
+ // If we can reference via the stack pointer or base pointer, try that.
+ // FIXME: This (and the code that resolves the references) can be improved
+ // to only disallow SP relative references in the live range of
+ // the VLA(s). In practice, it's unclear how much difference that
+ // would make, but it may be worth doing.
+ if (isFrameOffsetLegal(MI, Offset))
+ return false;
+
+ // The offset likely isn't legal; we want to allocate a virtual base register.
+ return true;
+}
+
+bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
+ int64_t Offset) const {
+ assert(Offset <= INT_MAX && "Offset too big to fit in int.");
+ assert(MI && "Unable to get the legal offset for nil instruction.");
+ int SaveOffset = Offset;
+ return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
}
+
+/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
+/// at the beginning of the basic block.
+void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ unsigned BaseReg,
+ int FrameIdx,
+ int64_t Offset) const {
+ MachineBasicBlock::iterator Ins = MBB->begin();
+ DebugLoc DL; // Defaults to "unknown"
+ if (Ins != MBB->end())
+ DL = Ins->getDebugLoc();
+
+ const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ const MachineFunction &MF = *MBB->getParent();
+ MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
+ unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
+
+ BuildMI(*MBB, Ins, DL, MCID, BaseReg)
+ .addFrameIndex(FrameIdx)
+ .addImm(Offset)
+ .addImm(Shifter);
+}
+
+void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const {
+ int Off = Offset; // ARM doesn't need the general 64-bit offsets
+ unsigned i = 0;
+
+ while (!MI.getOperand(i).isFI()) {
+ ++i;
+ assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
+ }
+ bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
+ assert(Done && "Unable to resolve frame index!");
+ (void)Done;
+}
+
+void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected");
+
+ MachineInstr &MI = *II;
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ const AArch64FrameLowering *TFI = static_cast<const AArch64FrameLowering *>(
+ MF.getTarget().getFrameLowering());
+
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ unsigned FrameReg;
+ int Offset;
+
+ // Special handling of dbg_value, stackmap and patchpoint instructions.
+ if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
+ MI.getOpcode() == TargetOpcode::PATCHPOINT) {
+ Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
+ /*PreferFP=*/true);
+ Offset += MI.getOperand(FIOperandNum + 1).getImm();
+ MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ return;
+ }
+
+ // Modify MI as necessary to handle as much of 'Offset' as possible
+ Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg);
+ if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
+ return;
+
+ assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
+ "Emergency spill slot is out of reach");
+
+ // If we get here, the immediate doesn't fit into the instruction. We folded
+ // as much as possible above. Handle the rest, providing a register that is
+ // SP+LargeImm.
+ unsigned ScratchReg =
+ MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
+ emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
+ MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
+}
+
+namespace llvm {
+
+unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ switch (RC->getID()) {
+ default:
+ return 0;
+ case AArch64::GPR32RegClassID:
+ case AArch64::GPR32spRegClassID:
+ case AArch64::GPR32allRegClassID:
+ case AArch64::GPR64spRegClassID:
+ case AArch64::GPR64allRegClassID:
+ case AArch64::GPR64RegClassID:
+ case AArch64::GPR32commonRegClassID:
+ case AArch64::GPR64commonRegClassID:
+ return 32 - 1 // XZR/SP
+ - (TFI->hasFP(MF) || STI->isTargetDarwin()) // FP
+ - STI->isTargetDarwin() // X18 reserved as platform register
+ - hasBasePointer(MF); // X19
+ case AArch64::FPR8RegClassID:
+ case AArch64::FPR16RegClassID:
+ case AArch64::FPR32RegClassID:
+ case AArch64::FPR64RegClassID:
+ case AArch64::FPR128RegClassID:
+ return 32;
+
+ case AArch64::DDRegClassID:
+ case AArch64::DDDRegClassID:
+ case AArch64::DDDDRegClassID:
+ case AArch64::QQRegClassID:
+ case AArch64::QQQRegClassID:
+ case AArch64::QQQQRegClassID:
+ return 32;
+
+ case AArch64::FPR128_loRegClassID:
+ return 16;
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.h b/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
index 4d67943..76af1ed 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
@@ -1,4 +1,4 @@
-//==- AArch64RegisterInfo.h - AArch64 Register Information Impl -*- C++ -*-===//
+//==- AArch64RegisterInfo.h - AArch64 Register Information Impl --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,14 +7,12 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the AArch64 implementation of the MCRegisterInfo class.
+// This file contains the AArch64 implementation of the MRegisterInfo class.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_AARCH64REGISTERINFO_H
-#define LLVM_TARGET_AARCH64REGISTERINFO_H
-
-#include "llvm/Target/TargetRegisterInfo.h"
+#ifndef LLVM_TARGET_AArch64REGISTERINFO_H
+#define LLVM_TARGET_AArch64REGISTERINFO_H
#define GET_REGINFO_HEADER
#include "AArch64GenRegisterInfo.inc"
@@ -23,49 +21,81 @@ namespace llvm {
class AArch64InstrInfo;
class AArch64Subtarget;
+class MachineFunction;
+class RegScavenger;
+class TargetRegisterClass;
struct AArch64RegisterInfo : public AArch64GenRegisterInfo {
- AArch64RegisterInfo();
+private:
+ const AArch64InstrInfo *TII;
+ const AArch64Subtarget *STI;
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+public:
+ AArch64RegisterInfo(const AArch64InstrInfo *tii, const AArch64Subtarget *sti);
- const uint32_t *getTLSDescCallPreservedMask() const;
+ bool isReservedReg(const MachineFunction &MF, unsigned Reg) const;
- BitVector getReservedRegs(const MachineFunction &MF) const;
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ /// Code Generation virtual methods...
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
- void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
- unsigned FIOperandNum,
- RegScavenger *Rs = NULL) const;
+ unsigned getCSRFirstUseCost() const override {
+ // The cost will be compared against BlockFrequency where entry has the
+ // value of 1 << 14. A value of 5 will choose to spill or split really
+ // cold path instead of using a callee-saved register.
+ return 5;
+ }
- /// getCrossCopyRegClass - Returns a legal register class to copy a register
- /// in the specified class to or from. Returns original class if it is
- /// possible to copy between a two registers of the specified class.
+ // Calls involved in thread-local variable lookup save more registers than
+ // normal calls, so they need a different mask to represent this.
+ const uint32_t *getTLSCallPreservedMask() const;
+
+ /// getThisReturnPreservedMask - Returns a call preserved mask specific to the
+ /// case that 'returned' is on an i64 first argument if the calling convention
+ /// is one that can (partially) model this attribute with a preserved mask
+ /// (i.e. it is a calling convention that uses the same register for the first
+ /// i64 argument and an i64 return value)
+ ///
+ /// Should return NULL in the case that the calling convention does not have
+ /// this property
+ const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
const TargetRegisterClass *
- getCrossCopyRegClass(const TargetRegisterClass *RC) const;
-
- /// getLargestLegalSuperClass - Returns the largest super class of RC that is
- /// legal to use in the current sub-target and has the same spill size.
- const TargetRegisterClass*
- getLargestLegalSuperClass(const TargetRegisterClass *RC) const {
- if (RC == &AArch64::tcGPR64RegClass)
- return &AArch64::GPR64RegClass;
-
- return RC;
- }
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
+ const TargetRegisterClass *
+ getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
+
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
+ bool useFPForScavengingIndex(const MachineFunction &MF) const override;
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
+
+ bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
+ bool isFrameOffsetLegal(const MachineInstr *MI,
+ int64_t Offset) const override;
+ void materializeFrameBaseRegister(MachineBasicBlock *MBB, unsigned BaseReg,
+ int FrameIdx,
+ int64_t Offset) const override;
+ void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const override;
+ void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+ bool cannotEliminateFrame(const MachineFunction &MF) const;
- bool requiresRegisterScavenging(const MachineFunction &MF) const {
- return true;
- }
+ bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override;
+ bool hasBasePointer(const MachineFunction &MF) const;
+ unsigned getBaseRegister() const;
- bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
- return true;
- }
+ // Debug information queries.
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
- bool useFPForScavengingIndex(const MachineFunction &MF) const;
+ unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const override;
};
} // end namespace llvm
-#endif // LLVM_TARGET_AARCH64REGISTERINFO_H
+#endif // LLVM_TARGET_AArch64REGISTERINFO_H
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
index 4e2022c..a30e4ad 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
@@ -1,4 +1,4 @@
-//===- AArch64RegisterInfo.td - ARM Register defs ----------*- tablegen -*-===//
+//=- AArch64RegisterInfo.td - Describe the AArch64 Regisers --*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,285 +7,587 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains declarations that describe the AArch64 register file
//
//===----------------------------------------------------------------------===//
-let Namespace = "AArch64" in {
-def sub_128 : SubRegIndex<128>;
-def sub_64 : SubRegIndex<64>;
-def sub_32 : SubRegIndex<32>;
-def sub_16 : SubRegIndex<16>;
-def sub_8 : SubRegIndex<8>;
-
-// Note: Code depends on these having consecutive numbers.
-def qqsub : SubRegIndex<256, 256>;
-
-def qsub_0 : SubRegIndex<128>;
-def qsub_1 : SubRegIndex<128, 128>;
-def qsub_2 : ComposedSubRegIndex<qqsub, qsub_0>;
-def qsub_3 : ComposedSubRegIndex<qqsub, qsub_1>;
-
-def dsub_0 : SubRegIndex<64>;
-def dsub_1 : SubRegIndex<64, 64>;
-def dsub_2 : ComposedSubRegIndex<qsub_1, dsub_0>;
-def dsub_3 : ComposedSubRegIndex<qsub_1, dsub_1>;
-def dsub_4 : ComposedSubRegIndex<qsub_2, dsub_0>;
-}
-// Registers are identified with 5-bit ID numbers.
-class AArch64Reg<bits<16> enc, string n> : Register<n> {
+class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
+ list<string> altNames = []>
+ : Register<n, altNames> {
let HWEncoding = enc;
let Namespace = "AArch64";
+ let SubRegs = subregs;
}
-class AArch64RegWithSubs<bits<16> enc, string n, list<Register> subregs = [],
- list<SubRegIndex> inds = []>
- : AArch64Reg<enc, n> {
- let SubRegs = subregs;
- let SubRegIndices = inds;
+let Namespace = "AArch64" in {
+ def sub_32 : SubRegIndex<32>;
+
+ def bsub : SubRegIndex<8>;
+ def hsub : SubRegIndex<16>;
+ def ssub : SubRegIndex<32>;
+ def dsub : SubRegIndex<32>;
+ def qhisub : SubRegIndex<64>;
+ def qsub : SubRegIndex<64>;
+ // Note: Code depends on these having consecutive numbers
+ def dsub0 : SubRegIndex<64>;
+ def dsub1 : SubRegIndex<64>;
+ def dsub2 : SubRegIndex<64>;
+ def dsub3 : SubRegIndex<64>;
+ // Note: Code depends on these having consecutive numbers
+ def qsub0 : SubRegIndex<128>;
+ def qsub1 : SubRegIndex<128>;
+ def qsub2 : SubRegIndex<128>;
+ def qsub3 : SubRegIndex<128>;
+}
+
+let Namespace = "AArch64" in {
+ def vreg : RegAltNameIndex;
+ def vlist1 : RegAltNameIndex;
}
//===----------------------------------------------------------------------===//
-// Integer registers: w0-w30, wzr, wsp, x0-x30, xzr, sp
+// Registers
//===----------------------------------------------------------------------===//
-
-foreach Index = 0-30 in {
- def W#Index : AArch64Reg< Index, "w"#Index>, DwarfRegNum<[Index]>;
+def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>;
+def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>;
+def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>;
+def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>;
+def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>;
+def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>;
+def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>;
+def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>;
+def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>;
+def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>;
+def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
+def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
+def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
+def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
+def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
+def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
+def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
+def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
+def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
+def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
+def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
+def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
+def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
+def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
+def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
+def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
+def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
+def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
+def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
+def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
+def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
+def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
+def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
+
+let SubRegIndices = [sub_32] in {
+def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>;
+def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>;
+def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>;
+def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>;
+def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>;
+def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>;
+def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>;
+def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>;
+def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>;
+def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>;
+def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
+def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
+def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
+def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
+def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
+def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
+def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
+def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
+def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
+def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
+def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
+def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
+def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
+def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
+def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
+def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
+def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
+def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
+def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
+def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
+def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
+def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>;
+def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
}
-def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
-def WZR : AArch64Reg<31, "wzr">;
+// Condition code register.
+def NZCV : AArch64Reg<0, "nzcv">;
-// Could be combined with previous loop, but this way leaves w and x registers
-// consecutive as LLVM register numbers, which makes for easier debugging.
-foreach Index = 0-30 in {
- def X#Index : AArch64RegWithSubs<Index, "x"#Index,
- [!cast<Register>("W"#Index)], [sub_32]>,
- DwarfRegNum<[Index]>;
+// GPR register classes with the intersections of GPR32/GPR32sp and
+// GPR64/GPR64sp for use by the coalescer.
+def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
+ let AltOrders = [(rotl GPR32common, 8)];
+ let AltOrderSelect = [{ return 1; }];
+}
+def GPR64common : RegisterClass<"AArch64", [i64], 64,
+ (add (sequence "X%u", 0, 28), FP, LR)> {
+ let AltOrders = [(rotl GPR64common, 8)];
+ let AltOrderSelect = [{ return 1; }];
+}
+// GPR register classes which exclude SP/WSP.
+def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
+ let AltOrders = [(rotl GPR32, 8)];
+ let AltOrderSelect = [{ return 1; }];
+}
+def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
+ let AltOrders = [(rotl GPR64, 8)];
+ let AltOrderSelect = [{ return 1; }];
}
-def XSP : AArch64RegWithSubs<31, "sp", [WSP], [sub_32]>, DwarfRegNum<[31]>;
-def XZR : AArch64RegWithSubs<31, "xzr", [WZR], [sub_32]>;
+// GPR register classes which include SP/WSP.
+def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
+ let AltOrders = [(rotl GPR32sp, 8)];
+ let AltOrderSelect = [{ return 1; }];
+}
+def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
+ let AltOrders = [(rotl GPR64sp, 8)];
+ let AltOrderSelect = [{ return 1; }];
+}
-// Most instructions treat register 31 as zero for reads and a black-hole for
-// writes.
+def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
+def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
-// Note that the order of registers is important for the Disassembler here:
-// tablegen uses it to form MCRegisterClass::getRegister, which we assume can
-// take an encoding value.
-def GPR32 : RegisterClass<"AArch64", [i32], 32,
- (add (sequence "W%u", 0, 30), WZR)> {
+def GPR64spPlus0Operand : AsmOperandClass {
+ let Name = "GPR64sp0";
+ let RenderMethod = "addRegOperands";
+ let ParserMethod = "tryParseGPR64sp0Operand";
}
-def GPR64 : RegisterClass<"AArch64", [i64], 64,
- (add (sequence "X%u", 0, 30), XZR)> {
+def GPR64sp0 : RegisterOperand<GPR64sp> {
+ let ParserMatchClass = GPR64spPlus0Operand;
}
-def GPR32nowzr : RegisterClass<"AArch64", [i32], 32,
- (sequence "W%u", 0, 30)> {
+// GPR register classes which include WZR/XZR AND SP/WSP. This is not a
+// constraint used by any instructions, it is used as a common super-class.
+def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
+def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
+
+// For tail calls, we can't use callee-saved registers, as they are restored
+// to the saved value before the tail call, which would clobber a call address.
+// This is for indirect tail calls to store the address of the destination.
+def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
+ X22, X23, X24, X25, X26,
+ X27, X28, FP, LR)>;
+
+// GPR register classes for post increment amount of vector load/store that
+// has alternate printing when Rm=31 and prints a constant immediate value
+// equal to the total number of bytes transferred.
+
+// FIXME: TableGen *should* be able to do these itself now. There appears to be
+// a bug in counting how many operands a Post-indexed MCInst should have which
+// means the aliases don't trigger.
+def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">;
+def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">;
+def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">;
+def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">;
+def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">;
+def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">;
+def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
+def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
+def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
+def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
+def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
+def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
+
+// Condition code regclass.
+def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
+ let CopyCost = -1; // Don't allow copying of status registers.
+
+ // CCR is not allocatable.
+ let isAllocatable = 0;
}
-def GPR64noxzr : RegisterClass<"AArch64", [i64], 64,
- (sequence "X%u", 0, 30)> {
-}
+//===----------------------------------------------------------------------===//
+// Floating Point Scalar Registers
+//===----------------------------------------------------------------------===//
-// For tail calls, we can't use callee-saved registers or the structure-return
-// register, as they are supposed to be live across function calls and may be
-// clobbered by the epilogue.
-def tcGPR64 : RegisterClass<"AArch64", [i64], 64,
- (add (sequence "X%u", 0, 7),
- (sequence "X%u", 9, 18))> {
+def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>;
+def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>;
+def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>;
+def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>;
+def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>;
+def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>;
+def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>;
+def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>;
+def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>;
+def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>;
+def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
+def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
+def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
+def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
+def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
+def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
+def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
+def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
+def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
+def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
+def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
+def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
+def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
+def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
+def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
+def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
+def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
+def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
+def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
+def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
+def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
+def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
+
+let SubRegIndices = [bsub] in {
+def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>;
+def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>;
+def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>;
+def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>;
+def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>;
+def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>;
+def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>;
+def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>;
+def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>;
+def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>;
+def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
+def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
+def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
+def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
+def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
+def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
+def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
+def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
+def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
+def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
+def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
+def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
+def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
+def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
+def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
+def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
+def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
+def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
+def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
+def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
+def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
+def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
}
+let SubRegIndices = [hsub] in {
+def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>;
+def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>;
+def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>;
+def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>;
+def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>;
+def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>;
+def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>;
+def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>;
+def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>;
+def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>;
+def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
+def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
+def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
+def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
+def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
+def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
+def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
+def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
+def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
+def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
+def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
+def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
+def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
+def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
+def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
+def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
+def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
+def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
+def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
+def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
+def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
+def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
+}
-// Certain addressing-useful instructions accept sp directly. Again the order of
-// registers is important to the Disassembler.
-def GPR32wsp : RegisterClass<"AArch64", [i32], 32,
- (add (sequence "W%u", 0, 30), WSP)> {
+let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
+def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
+def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
+def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
+def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
+def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
+def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
+def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
+def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
+def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
+def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
+def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
+def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
+def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
+def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
+def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
+def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
+def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
+def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
+def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
+def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
+def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
+def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
+def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
+def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
+def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
+def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
+def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
+def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
+def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
+def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
+def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
+def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
}
-def GPR64xsp : RegisterClass<"AArch64", [i64], 64,
- (add (sequence "X%u", 0, 30), XSP)> {
+let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
+def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
+def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
+def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
+def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
+def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
+def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
+def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
+def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
+def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
+def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
+def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
+def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
+def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
+def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
+def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
+def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
+def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
+def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
+def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
+def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
+def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
+def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
+def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
+def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
+def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
+def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
+def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
+def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
+def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
+def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
+def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
+def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
}
-// Some aliases *only* apply to SP (e.g. MOV uses different encoding for SP and
-// non-SP variants). We can't use a bare register in those patterns because
-// TableGen doesn't like it, so we need a class containing just stack registers
-def Rxsp : RegisterClass<"AArch64", [i64], 64,
- (add XSP)> {
+def FPR8 : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> {
+ let Size = 8;
}
+def FPR16 : RegisterClass<"AArch64", [f16], 16, (sequence "H%u", 0, 31)> {
+ let Size = 16;
+}
+def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
+def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
+ v1i64],
+ 64, (sequence "D%u", 0, 31)>;
+// We don't (yet) have an f128 legal type, so don't use that here. We
+// normalize 128-bit vectors to v2f64 for arg passing and such, so use
+// that here.
+def FPR128 : RegisterClass<"AArch64",
+ [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128],
+ 128, (sequence "Q%u", 0, 31)>;
-def Rwsp : RegisterClass<"AArch64", [i32], 32,
- (add WSP)> {
+// The lower 16 vector registers. Some instructions can only take registers
+// in this range.
+def FPR128_lo : RegisterClass<"AArch64",
+ [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+ 128, (trunc FPR128, 16)>;
+
+// Pairs, triples, and quads of 64-bit vector registers.
+def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
+def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
+ [(rotl FPR64, 0), (rotl FPR64, 1),
+ (rotl FPR64, 2)]>;
+def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
+ [(rotl FPR64, 0), (rotl FPR64, 1),
+ (rotl FPR64, 2), (rotl FPR64, 3)]>;
+def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
+ let Size = 128;
+}
+def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
+ let Size = 196;
+}
+def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
+ let Size = 256;
}
-//===----------------------------------------------------------------------===//
-// Scalar registers in the vector unit:
-// b0-b31, h0-h31, s0-s31, d0-d31, q0-q31
-//===----------------------------------------------------------------------===//
+// Pairs, triples, and quads of 128-bit vector registers.
+def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
+def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
+ [(rotl FPR128, 0), (rotl FPR128, 1),
+ (rotl FPR128, 2)]>;
+def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
+ [(rotl FPR128, 0), (rotl FPR128, 1),
+ (rotl FPR128, 2), (rotl FPR128, 3)]>;
+def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
+ let Size = 256;
+}
+def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
+ let Size = 384;
+}
+def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
+ let Size = 512;
+}
-foreach Index = 0-31 in {
- def B # Index : AArch64Reg< Index, "b" # Index>,
- DwarfRegNum<[!add(Index, 64)]>;
- def H # Index : AArch64RegWithSubs<Index, "h" # Index,
- [!cast<Register>("B" # Index)], [sub_8]>,
- DwarfRegNum<[!add(Index, 64)]>;
+// Vector operand versions of the FP registers. Alternate name printing and
+// assmebler matching.
+def VectorReg64AsmOperand : AsmOperandClass {
+ let Name = "VectorReg64";
+ let PredicateMethod = "isVectorReg";
+}
+def VectorReg128AsmOperand : AsmOperandClass {
+ let Name = "VectorReg128";
+ let PredicateMethod = "isVectorReg";
+}
- def S # Index : AArch64RegWithSubs<Index, "s" # Index,
- [!cast<Register>("H" # Index)], [sub_16]>,
- DwarfRegNum<[!add(Index, 64)]>;
+def V64 : RegisterOperand<FPR64, "printVRegOperand"> {
+ let ParserMatchClass = VectorReg64AsmOperand;
+}
- def D # Index : AArch64RegWithSubs<Index, "d" # Index,
- [!cast<Register>("S" # Index)], [sub_32]>,
- DwarfRegNum<[!add(Index, 64)]>;
+def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
+ let ParserMatchClass = VectorReg128AsmOperand;
+}
- def Q # Index : AArch64RegWithSubs<Index, "q" # Index,
- [!cast<Register>("D" # Index)], [sub_64]>,
- DwarfRegNum<[!add(Index, 64)]>;
+def VectorRegLoAsmOperand : AsmOperandClass { let Name = "VectorRegLo"; }
+def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
+ let ParserMatchClass = VectorRegLoAsmOperand;
}
+class TypedVecListAsmOperand<int count, int regsize, int lanes, string kind>
+ : AsmOperandClass {
+ let Name = "TypedVectorList" # count # "_" # lanes # kind;
-def FPR8 : RegisterClass<"AArch64", [i8, v1i8], 8,
- (sequence "B%u", 0, 31)> {
+ let PredicateMethod
+ = "isTypedVectorList<" # count # ", " # lanes # ", '" # kind # "'>";
+ let RenderMethod = "addVectorList" # regsize # "Operands<" # count # ">";
}
-def FPR16 : RegisterClass<"AArch64", [f16, v1i16], 16,
- (sequence "H%u", 0, 31)> {
-}
+class TypedVecListRegOperand<RegisterClass Reg, int lanes, string kind>
+ : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
+ # kind # "'>">;
-def FPR32 : RegisterClass<"AArch64", [f32, v1i32, v1f32], 32,
- (sequence "S%u", 0, 31)> {
-}
+multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
+ // With implicit types (probably on instruction instead). E.g. { v0, v1 }
+ def _64AsmOperand : AsmOperandClass {
+ let Name = NAME # "64";
+ let PredicateMethod = "isImplicitlyTypedVectorList<" # count # ">";
+ let RenderMethod = "addVectorList64Operands<" # count # ">";
+ }
-def FPR64 : RegisterClass<"AArch64",
- [f64, v2f32, v2i32, v4i16, v8i8, v1i64, v1f64],
- 64, (sequence "D%u", 0, 31)>;
+ def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
+ }
-def FPR128 : RegisterClass<"AArch64",
- [f128, v2f64, v2i64, v4f32, v4i32, v8i16, v16i8],
- 128, (sequence "Q%u", 0, 31)>;
+ def _128AsmOperand : AsmOperandClass {
+ let Name = NAME # "128";
+ let PredicateMethod = "isImplicitlyTypedVectorList<" # count # ">";
+ let RenderMethod = "addVectorList128Operands<" # count # ">";
+ }
+
+ def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
+ }
-def FPR64Lo : RegisterClass<"AArch64",
- [f64, v2f32, v2i32, v4i16, v8i8, v1i64, v1f64],
- 64, (sequence "D%u", 0, 15)>;
+ // 64-bit register lists with explicit type.
-def FPR128Lo : RegisterClass<"AArch64",
- [f128, v2f64, v2i64, v4f32, v4i32, v8i16, v16i8],
- 128, (sequence "Q%u", 0, 15)>;
+ // { v0.8b, v1.8b }
+ def _8bAsmOperand : TypedVecListAsmOperand<count, 64, 8, "b">;
+ def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
+ }
-//===----------------------------------------------------------------------===//
-// Vector registers:
-//===----------------------------------------------------------------------===//
+ // { v0.4h, v1.4h }
+ def _4hAsmOperand : TypedVecListAsmOperand<count, 64, 4, "h">;
+ def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
+ }
-def VPR64AsmOperand : AsmOperandClass {
- let Name = "VPR";
- let PredicateMethod = "isReg";
- let RenderMethod = "addRegOperands";
-}
+ // { v0.2s, v1.2s }
+ def _2sAsmOperand : TypedVecListAsmOperand<count, 64, 2, "s">;
+ def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
+ }
+
+ // { v0.1d, v1.1d }
+ def _1dAsmOperand : TypedVecListAsmOperand<count, 64, 1, "d">;
+ def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
+ }
-def VPR64 : RegisterOperand<FPR64, "printVPRRegister">;
+ // 128-bit register lists with explicit type
-def VPR128 : RegisterOperand<FPR128, "printVPRRegister">;
+ // { v0.16b, v1.16b }
+ def _16bAsmOperand : TypedVecListAsmOperand<count, 128, 16, "b">;
+ def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
+ }
-def VPR64Lo : RegisterOperand<FPR64Lo, "printVPRRegister">;
+ // { v0.8h, v1.8h }
+ def _8hAsmOperand : TypedVecListAsmOperand<count, 128, 8, "h">;
+ def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
+ }
-def VPR128Lo : RegisterOperand<FPR128Lo, "printVPRRegister">;
+ // { v0.4s, v1.4s }
+ def _4sAsmOperand : TypedVecListAsmOperand<count, 128, 4, "s">;
+ def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
+ }
-// Flags register
-def NZCV : Register<"nzcv"> {
- let Namespace = "AArch64";
-}
+ // { v0.2d, v1.2d }
+ def _2dAsmOperand : TypedVecListAsmOperand<count, 128, 2, "d">;
+ def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
+ }
-def FlagClass : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
- let CopyCost = -1;
- let isAllocatable = 0;
-}
+ // { v0.b, v1.b }
+ def _bAsmOperand : TypedVecListAsmOperand<count, 128, 0, "b">;
+ def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
+ }
-//===----------------------------------------------------------------------===//
-// Consecutive vector registers
-//===----------------------------------------------------------------------===//
-// 2 Consecutive 64-bit registers: D0_D1, D1_D2, ..., D30_D31
-def Tuples2D : RegisterTuples<[dsub_0, dsub_1],
- [(rotl FPR64, 0), (rotl FPR64, 1)]>;
-
-// 3 Consecutive 64-bit registers: D0_D1_D2, ..., D31_D0_D1
-def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2],
- [(rotl FPR64, 0), (rotl FPR64, 1),
- (rotl FPR64, 2)]>;
-
-// 4 Consecutive 64-bit registers: D0_D1_D2_D3, ..., D31_D0_D1_D2
-def Tuples4D : RegisterTuples<[dsub_0, dsub_1, dsub_2, dsub_3],
- [(rotl FPR64, 0), (rotl FPR64, 1),
- (rotl FPR64, 2), (rotl FPR64, 3)]>;
-
-// 2 Consecutive 128-bit registers: Q0_Q1, Q1_Q2, ..., Q30_Q31
-def Tuples2Q : RegisterTuples<[qsub_0, qsub_1],
- [(rotl FPR128, 0), (rotl FPR128, 1)]>;
-
-// 3 Consecutive 128-bit registers: Q0_Q1_Q2, ..., Q31_Q0_Q1
-def Tuples3Q : RegisterTuples<[qsub_0, qsub_1, qsub_2],
- [(rotl FPR128, 0), (rotl FPR128, 1),
- (rotl FPR128, 2)]>;
-
-// 4 Consecutive 128-bit registers: Q0_Q1_Q2_Q3, ..., Q31_Q0_Q1_Q2
-def Tuples4Q : RegisterTuples<[qsub_0, qsub_1, qsub_2, qsub_3],
- [(rotl FPR128, 0), (rotl FPR128, 1),
- (rotl FPR128, 2), (rotl FPR128, 3)]>;
-
-// The followings are super register classes to model 2/3/4 consecutive
-// 64-bit/128-bit registers.
-
-def DPair : RegisterClass<"AArch64", [v2i64], 64, (add Tuples2D)>;
-
-def DTriple : RegisterClass<"AArch64", [untyped], 64, (add Tuples3D)> {
- let Size = 192; // 3 x 64 bits, we have no predefined type of that size.
-}
-
-def DQuad : RegisterClass<"AArch64", [v4i64], 64, (add Tuples4D)>;
-
-def QPair : RegisterClass<"AArch64", [v4i64], 128, (add Tuples2Q)>;
-
-def QTriple : RegisterClass<"AArch64", [untyped], 128, (add Tuples3Q)> {
- let Size = 384; // 3 x 128 bits, we have no predefined type of that size.
-}
-
-def QQuad : RegisterClass<"AArch64", [v8i64], 128, (add Tuples4Q)>;
-
-
-// The followings are vector list operands
-multiclass VectorList_operands<string PREFIX, string LAYOUT, int Count,
- RegisterClass RegList> {
- def _asmoperand : AsmOperandClass {
- let Name = PREFIX # LAYOUT # Count;
- let RenderMethod = "addVectorListOperands";
- let PredicateMethod =
- "isVectorList<A64Layout::VL_" # LAYOUT # ", " # Count # ">";
- let ParserMethod = "ParseVectorList";
+ // { v0.h, v1.h }
+ def _hAsmOperand : TypedVecListAsmOperand<count, 128, 0, "h">;
+ def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
}
- def _operand : RegisterOperand<RegList,
- "printVectorList<A64Layout::VL_" # LAYOUT # ", " # Count # ">"> {
- let ParserMatchClass =
- !cast<AsmOperandClass>(PREFIX # LAYOUT # "_asmoperand");
+ // { v0.s, v1.s }
+ def _sAsmOperand : TypedVecListAsmOperand<count, 128, 0, "s">;
+ def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
}
-}
-multiclass VectorList_BHSD<string PREFIX, int Count, RegisterClass DRegList,
- RegisterClass QRegList> {
- defm 8B : VectorList_operands<PREFIX, "8B", Count, DRegList>;
- defm 4H : VectorList_operands<PREFIX, "4H", Count, DRegList>;
- defm 2S : VectorList_operands<PREFIX, "2S", Count, DRegList>;
- defm 1D : VectorList_operands<PREFIX, "1D", Count, DRegList>;
- defm 16B : VectorList_operands<PREFIX, "16B", Count, QRegList>;
- defm 8H : VectorList_operands<PREFIX, "8H", Count, QRegList>;
- defm 4S : VectorList_operands<PREFIX, "4S", Count, QRegList>;
- defm 2D : VectorList_operands<PREFIX, "2D", Count, QRegList>;
+ // { v0.d, v1.d }
+ def _dAsmOperand : TypedVecListAsmOperand<count, 128, 0, "d">;
+ def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
+ let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
+ }
+
+
}
-// Vector list operand with 1/2/3/4 registers: VOne8B_operand,..., VQuad2D_operand
-defm VOne : VectorList_BHSD<"VOne", 1, FPR64, FPR128>;
-defm VPair : VectorList_BHSD<"VPair", 2, DPair, QPair>;
-defm VTriple : VectorList_BHSD<"VTriple", 3, DTriple, QTriple>;
-defm VQuad : VectorList_BHSD<"VQuad", 4, DQuad, QQuad>; \ No newline at end of file
+defm VecListOne : VectorList<1, FPR64, FPR128>;
+defm VecListTwo : VectorList<2, DD, QQ>;
+defm VecListThree : VectorList<3, DDD, QQQ>;
+defm VecListFour : VectorList<4, DDDD, QQQQ>;
+
+
+// Register operand versions of the scalar FP registers.
+def FPR16Op : RegisterOperand<FPR16, "printOperand">;
+def FPR32Op : RegisterOperand<FPR32, "printOperand">;
+def FPR64Op : RegisterOperand<FPR64, "printOperand">;
+def FPR128Op : RegisterOperand<FPR128, "printOperand">;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SchedA53.td b/contrib/llvm/lib/Target/AArch64/AArch64SchedA53.td
new file mode 100644
index 0000000..d709bee
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SchedA53.td
@@ -0,0 +1,291 @@
+//==- AArch64SchedA53.td - Cortex-A53 Scheduling Definitions -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the ARM Cortex A53 processors.
+//
+//===----------------------------------------------------------------------===//
+
+// ===---------------------------------------------------------------------===//
+// The following definitions describe the simpler per-operand machine model.
+// This works with MachineScheduler. See MCSchedModel.h for details.
+
+// Cortex-A53 machine model for scheduling and other instruction cost heuristics.
+def CortexA53Model : SchedMachineModel {
+ let MicroOpBufferSize = 0; // Explicitly set to zero since A53 is in-order.
+ let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
+ let MinLatency = 1 ; // OperandCycles are interpreted as MinLatency.
+ let LoadLatency = 3; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+ let MispredictPenalty = 9; // Based on "Cortex-A53 Software Optimisation
+ // Specification - Instruction Timings"
+ // v 1.0 Spreadsheet
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available.
+
+// Modeling each pipeline as a ProcResource using the BufferSize = 0 since
+// Cortex-A53 is in-order.
+
+def A53UnitALU : ProcResource<2> { let BufferSize = 0; } // Int ALU
+def A53UnitMAC : ProcResource<1> { let BufferSize = 0; } // Int MAC
+def A53UnitDiv : ProcResource<1> { let BufferSize = 0; } // Int Division
+def A53UnitLdSt : ProcResource<1> { let BufferSize = 0; } // Load/Store
+def A53UnitB : ProcResource<1> { let BufferSize = 0; } // Branch
+def A53UnitFPALU : ProcResource<1> { let BufferSize = 0; } // FP ALU
+def A53UnitFPMDS : ProcResource<1> { let BufferSize = 0; } // FP Mult/Div/Sqrt
+
+
+//===----------------------------------------------------------------------===//
+// Subtarget-specific SchedWrite types which both map the ProcResources and
+// set the latency.
+
+let SchedModel = CortexA53Model in {
+
+// ALU - Despite having a full latency of 4, most of the ALU instructions can
+// forward a cycle earlier and then two cycles earlier in the case of a
+// shift-only instruction. These latencies will be incorrect when the
+// result cannot be forwarded, but modeling isn't rocket surgery.
+def : WriteRes<WriteImm, [A53UnitALU]> { let Latency = 3; }
+def : WriteRes<WriteI, [A53UnitALU]> { let Latency = 3; }
+def : WriteRes<WriteISReg, [A53UnitALU]> { let Latency = 3; }
+def : WriteRes<WriteIEReg, [A53UnitALU]> { let Latency = 3; }
+def : WriteRes<WriteIS, [A53UnitALU]> { let Latency = 2; }
+def : WriteRes<WriteExtr, [A53UnitALU]> { let Latency = 3; }
+
+// MAC
+def : WriteRes<WriteIM32, [A53UnitMAC]> { let Latency = 4; }
+def : WriteRes<WriteIM64, [A53UnitMAC]> { let Latency = 4; }
+
+// Div
+def : WriteRes<WriteID32, [A53UnitDiv]> { let Latency = 4; }
+def : WriteRes<WriteID64, [A53UnitDiv]> { let Latency = 4; }
+
+// Load
+def : WriteRes<WriteLD, [A53UnitLdSt]> { let Latency = 4; }
+def : WriteRes<WriteLDIdx, [A53UnitLdSt]> { let Latency = 4; }
+def : WriteRes<WriteLDHi, [A53UnitLdSt]> { let Latency = 4; }
+
+// Vector Load - Vector loads take 1-5 cycles to issue. For the WriteVecLd
+// below, choosing the median of 3 which makes the latency 6.
+// May model this more carefully in the future. The remaining
+// A53WriteVLD# types represent the 1-5 cycle issues explicitly.
+def : WriteRes<WriteVLD, [A53UnitLdSt]> { let Latency = 6;
+ let ResourceCycles = [3]; }
+def A53WriteVLD1 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 4; }
+def A53WriteVLD2 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 5;
+ let ResourceCycles = [2]; }
+def A53WriteVLD3 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 6;
+ let ResourceCycles = [3]; }
+def A53WriteVLD4 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 7;
+ let ResourceCycles = [4]; }
+def A53WriteVLD5 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 8;
+ let ResourceCycles = [5]; }
+
+// Pre/Post Indexing - Performed as part of address generation which is already
+// accounted for in the WriteST* latencies below
+def : WriteRes<WriteAdr, []> { let Latency = 0; }
+
+// Store
+def : WriteRes<WriteST, [A53UnitLdSt]> { let Latency = 4; }
+def : WriteRes<WriteSTP, [A53UnitLdSt]> { let Latency = 4; }
+def : WriteRes<WriteSTIdx, [A53UnitLdSt]> { let Latency = 4; }
+def : WriteRes<WriteSTX, [A53UnitLdSt]> { let Latency = 4; }
+
+// Vector Store - Similar to vector loads, can take 1-3 cycles to issue.
+def : WriteRes<WriteVST, [A53UnitLdSt]> { let Latency = 5;
+ let ResourceCycles = [2];}
+def A53WriteVST1 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 4; }
+def A53WriteVST2 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 5;
+ let ResourceCycles = [2]; }
+def A53WriteVST3 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 6;
+ let ResourceCycles = [3]; }
+
+// Branch
+def : WriteRes<WriteBr, [A53UnitB]>;
+def : WriteRes<WriteBrReg, [A53UnitB]>;
+def : WriteRes<WriteSys, [A53UnitB]>;
+def : WriteRes<WriteBarrier, [A53UnitB]>;
+def : WriteRes<WriteHint, [A53UnitB]>;
+
+// FP ALU
+def : WriteRes<WriteF, [A53UnitFPALU]> { let Latency = 6; }
+def : WriteRes<WriteFCmp, [A53UnitFPALU]> { let Latency = 6; }
+def : WriteRes<WriteFCvt, [A53UnitFPALU]> { let Latency = 6; }
+def : WriteRes<WriteFCopy, [A53UnitFPALU]> { let Latency = 6; }
+def : WriteRes<WriteFImm, [A53UnitFPALU]> { let Latency = 6; }
+def : WriteRes<WriteV, [A53UnitFPALU]> { let Latency = 6; }
+
+// FP Mul, Div, Sqrt
+def : WriteRes<WriteFMul, [A53UnitFPMDS]> { let Latency = 6; }
+def : WriteRes<WriteFDiv, [A53UnitFPMDS]> { let Latency = 33;
+ let ResourceCycles = [29]; }
+def A53WriteFMAC : SchedWriteRes<[A53UnitFPMDS]> { let Latency = 10; }
+def A53WriteFDivSP : SchedWriteRes<[A53UnitFPMDS]> { let Latency = 18;
+ let ResourceCycles = [14]; }
+def A53WriteFDivDP : SchedWriteRes<[A53UnitFPMDS]> { let Latency = 33;
+ let ResourceCycles = [29]; }
+def A53WriteFSqrtSP : SchedWriteRes<[A53UnitFPMDS]> { let Latency = 17;
+ let ResourceCycles = [13]; }
+def A53WriteFSqrtDP : SchedWriteRes<[A53UnitFPMDS]> { let Latency = 32;
+ let ResourceCycles = [28]; }
+
+//===----------------------------------------------------------------------===//
+// Subtarget-specific SchedRead types.
+
+// No forwarding for these reads.
+def : ReadAdvance<ReadExtrHi, 0>;
+def : ReadAdvance<ReadAdrBase, 0>;
+def : ReadAdvance<ReadVLD, 0>;
+
+// ALU - Most operands in the ALU pipes are not needed for two cycles. Shiftable
+// operands are needed one cycle later if and only if they are to be
+// shifted. Otherwise, they too are needed two cycles later. This same
+// ReadAdvance applies to Extended registers as well, even though there is
+// a separate SchedPredicate for them.
+def : ReadAdvance<ReadI, 2, [WriteImm,WriteI,
+ WriteISReg, WriteIEReg,WriteIS,
+ WriteID32,WriteID64,
+ WriteIM32,WriteIM64]>;
+def A53ReadShifted : SchedReadAdvance<1, [WriteImm,WriteI,
+ WriteISReg, WriteIEReg,WriteIS,
+ WriteID32,WriteID64,
+ WriteIM32,WriteIM64]>;
+def A53ReadNotShifted : SchedReadAdvance<2, [WriteImm,WriteI,
+ WriteISReg, WriteIEReg,WriteIS,
+ WriteID32,WriteID64,
+ WriteIM32,WriteIM64]>;
+def A53ReadISReg : SchedReadVariant<[
+ SchedVar<RegShiftedPred, [A53ReadShifted]>,
+ SchedVar<NoSchedPred, [A53ReadNotShifted]>]>;
+def : SchedAlias<ReadISReg, A53ReadISReg>;
+
+def A53ReadIEReg : SchedReadVariant<[
+ SchedVar<RegExtendedPred, [A53ReadShifted]>,
+ SchedVar<NoSchedPred, [A53ReadNotShifted]>]>;
+def : SchedAlias<ReadIEReg, A53ReadIEReg>;
+
+// MAC - Operands are generally needed one cycle later in the MAC pipe.
+// Accumulator operands are needed two cycles later.
+def : ReadAdvance<ReadIM, 1, [WriteImm,WriteI,
+ WriteISReg, WriteIEReg,WriteIS,
+ WriteID32,WriteID64,
+ WriteIM32,WriteIM64]>;
+def : ReadAdvance<ReadIMA, 2, [WriteImm,WriteI,
+ WriteISReg, WriteIEReg,WriteIS,
+ WriteID32,WriteID64,
+ WriteIM32,WriteIM64]>;
+
+// Div
+def : ReadAdvance<ReadID, 1, [WriteImm,WriteI,
+ WriteISReg, WriteIEReg,WriteIS,
+ WriteID32,WriteID64,
+ WriteIM32,WriteIM64]>;
+
+//===----------------------------------------------------------------------===//
+// Subtarget-specific InstRWs.
+
+//---
+// Miscellaneous
+//---
+def : InstRW<[WriteI], (instrs COPY)>;
+
+//---
+// Vector Loads
+//---
+def : InstRW<[A53WriteVLD1], (instregex "LD1i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVLD1], (instregex "LD1Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD1], (instregex "LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD2], (instregex "LD1Twov(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD3], (instregex "LD1Threev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD4], (instregex "LD1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD1, WriteAdr], (instregex "LD1i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVLD1, WriteAdr], (instregex "LD1Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVLD1, WriteAdr], (instregex "LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVLD2, WriteAdr], (instregex "LD1Twov(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVLD3, WriteAdr], (instregex "LD1Threev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVLD4, WriteAdr], (instregex "LD1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A53WriteVLD1], (instregex "LD2i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVLD1], (instregex "LD2Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD2], (instregex "LD2Twov(8b|4h|2s)$")>;
+def : InstRW<[A53WriteVLD4], (instregex "LD2Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD1, WriteAdr], (instregex "LD2i(8|16|32|64)(_POST)?$")>;
+def : InstRW<[A53WriteVLD1, WriteAdr], (instregex "LD2Rv(8b|4h|2s|1d|16b|8h|4s|2d)(_POST)?$")>;
+def : InstRW<[A53WriteVLD2, WriteAdr], (instregex "LD2Twov(8b|4h|2s)(_POST)?$")>;
+def : InstRW<[A53WriteVLD4, WriteAdr], (instregex "LD2Twov(16b|8h|4s|2d)(_POST)?$")>;
+
+def : InstRW<[A53WriteVLD2], (instregex "LD3i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVLD2], (instregex "LD3Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD4], (instregex "LD3Threev(8b|4h|2s|1d|16b|8h|4s)$")>;
+def : InstRW<[A53WriteVLD3], (instregex "LD3Threev(2d)$")>;
+def : InstRW<[A53WriteVLD2, WriteAdr], (instregex "LD3i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVLD2, WriteAdr], (instregex "LD3Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVLD4, WriteAdr], (instregex "LD3Threev(8b|4h|2s|1d|16b|8h|4s)_POST$")>;
+def : InstRW<[A53WriteVLD3, WriteAdr], (instregex "LD3Threev(2d)_POST$")>;
+
+def : InstRW<[A53WriteVLD2], (instregex "LD4i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVLD2], (instregex "LD4Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVLD5], (instregex "LD4Fourv(8b|4h|2s|1d|16b|8h|4s)$")>;
+def : InstRW<[A53WriteVLD4], (instregex "LD4Fourv(2d)$")>;
+def : InstRW<[A53WriteVLD2, WriteAdr], (instregex "LD4i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVLD2, WriteAdr], (instregex "LD4Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVLD5, WriteAdr], (instregex "LD4Fourv(8b|4h|2s|1d|16b|8h|4s)_POST$")>;
+def : InstRW<[A53WriteVLD4, WriteAdr], (instregex "LD4Fourv(2d)_POST$")>;
+
+//---
+// Vector Stores
+//---
+def : InstRW<[A53WriteVST1], (instregex "ST1i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVST1], (instregex "ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVST1], (instregex "ST1Twov(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVST2], (instregex "ST1Threev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVST2], (instregex "ST1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVST1, WriteAdr], (instregex "ST1i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVST1, WriteAdr], (instregex "ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVST1, WriteAdr], (instregex "ST1Twov(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST1Threev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A53WriteVST1], (instregex "ST2i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVST1], (instregex "ST2Twov(8b|4h|2s)$")>;
+def : InstRW<[A53WriteVST2], (instregex "ST2Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[A53WriteVST1, WriteAdr], (instregex "ST2i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVST1, WriteAdr], (instregex "ST2Twov(8b|4h|2s)_POST$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST2Twov(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A53WriteVST2], (instregex "ST3i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVST3], (instregex "ST3Threev(8b|4h|2s|1d|16b|8h|4s)$")>;
+def : InstRW<[A53WriteVST2], (instregex "ST3Threev(2d)$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST3i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVST3, WriteAdr], (instregex "ST3Threev(8b|4h|2s|1d|16b|8h|4s)_POST$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST3Threev(2d)_POST$")>;
+
+def : InstRW<[A53WriteVST2], (instregex "ST4i(8|16|32|64)$")>;
+def : InstRW<[A53WriteVST3], (instregex "ST4Fourv(8b|4h|2s|1d|16b|8h|4s)$")>;
+def : InstRW<[A53WriteVST2], (instregex "ST4Fourv(2d)$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST4i(8|16|32|64)_POST$")>;
+def : InstRW<[A53WriteVST3, WriteAdr], (instregex "ST4Fourv(8b|4h|2s|1d|16b|8h|4s)_POST$")>;
+def : InstRW<[A53WriteVST2, WriteAdr], (instregex "ST4Fourv(2d)_POST$")>;
+
+//---
+// Floating Point MAC, DIV, SQRT
+//---
+def : InstRW<[A53WriteFMAC], (instregex "^FN?M(ADD|SUB).*")>;
+def : InstRW<[A53WriteFMAC], (instregex "^FML(A|S).*")>;
+def : InstRW<[A53WriteFDivSP], (instrs FDIVSrr)>;
+def : InstRW<[A53WriteFDivDP], (instrs FDIVDrr)>;
+def : InstRW<[A53WriteFDivSP], (instregex "^FDIVv.*32$")>;
+def : InstRW<[A53WriteFDivDP], (instregex "^FDIVv.*64$")>;
+def : InstRW<[A53WriteFSqrtSP], (instregex "^.*SQRT.*32$")>;
+def : InstRW<[A53WriteFSqrtDP], (instregex "^.*SQRT.*64$")>;
+
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SchedA57.td b/contrib/llvm/lib/Target/AArch64/AArch64SchedA57.td
new file mode 100644
index 0000000..8209f96
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SchedA57.td
@@ -0,0 +1,304 @@
+//=- AArch64SchedA57.td - ARM Cortex-A57 Scheduling Defs -----*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the machine model for ARM Cortex-A57 to support
+// instruction scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+def CortexA57Model : SchedMachineModel {
+ let IssueWidth = 8; // 3-way decode and 8-way issue
+ let MicroOpBufferSize = 128; // 128 micro-op re-order buffer
+ let LoadLatency = 4; // Optimistic load latency
+ let MispredictPenalty = 14; // Fetch + Decode/Rename/Dispatch + Branch
+}
+
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available on Cortex-A57.
+// Cortex A-57 has 8 pipelines that each has its own 8-entry queue where
+// micro-ops wait for their operands and then issue out-of-order.
+
+def A57UnitB : ProcResource<1> { let BufferSize = 8; } // Type B micro-ops
+def A57UnitI : ProcResource<2> { let BufferSize = 8; } // Type I micro-ops
+def A57UnitM : ProcResource<1> { let BufferSize = 8; } // Type M micro-ops
+def A57UnitL : ProcResource<1> { let BufferSize = 8; } // Type L micro-ops
+def A57UnitS : ProcResource<1> { let BufferSize = 8; } // Type S micro-ops
+def A57UnitX : ProcResource<1> { let BufferSize = 8; } // Type X micro-ops
+def A57UnitW : ProcResource<1> { let BufferSize = 8; } // Type W micro-ops
+let SchedModel = CortexA57Model in {
+ def A57UnitV : ProcResGroup<[A57UnitX, A57UnitW]>; // Type V micro-ops
+}
+
+
+let SchedModel = CortexA57Model in {
+
+//===----------------------------------------------------------------------===//
+// Define customized scheduler read/write types specific to the Cortex-A57.
+
+include "AArch64SchedA57WriteRes.td"
+
+//===----------------------------------------------------------------------===//
+// Map the target-defined scheduler read/write resources and latency for
+// Cortex-A57. The Cortex-A57 types are directly associated with resources, so
+// defining the aliases precludes the need for mapping them using WriteRes. The
+// aliases are sufficient for creating a coarse, working model. As the model
+// evolves, InstRWs will be used to override these SchedAliases.
+
+def : SchedAlias<WriteImm, A57Write_1cyc_1I>;
+def : SchedAlias<WriteI, A57Write_1cyc_1I>;
+def : SchedAlias<WriteISReg, A57Write_2cyc_1M>;
+def : SchedAlias<WriteIEReg, A57Write_2cyc_1M>;
+def : SchedAlias<WriteExtr, A57Write_1cyc_1I>;
+def : SchedAlias<WriteIS, A57Write_1cyc_1I>;
+def : SchedAlias<WriteID32, A57Write_19cyc_1M>;
+def : SchedAlias<WriteID64, A57Write_35cyc_1M>;
+def : SchedAlias<WriteIM32, A57Write_3cyc_1M>;
+def : SchedAlias<WriteIM64, A57Write_5cyc_1M>;
+def : SchedAlias<WriteBr, A57Write_1cyc_1B>;
+def : SchedAlias<WriteBrReg, A57Write_1cyc_1B>;
+def : SchedAlias<WriteLD, A57Write_4cyc_1L>;
+def : SchedAlias<WriteST, A57Write_1cyc_1S>;
+def : SchedAlias<WriteSTP, A57Write_1cyc_1S>;
+def : SchedAlias<WriteAdr, A57Write_1cyc_1I>;
+def : SchedAlias<WriteLDIdx, A57Write_4cyc_1I_1L>;
+def : SchedAlias<WriteSTIdx, A57Write_1cyc_1I_1S>;
+def : SchedAlias<WriteF, A57Write_3cyc_1V>;
+def : SchedAlias<WriteFCmp, A57Write_3cyc_1V>;
+def : SchedAlias<WriteFCvt, A57Write_5cyc_1V>;
+def : SchedAlias<WriteFCopy, A57Write_3cyc_1V>;
+def : SchedAlias<WriteFImm, A57Write_3cyc_1V>;
+def : SchedAlias<WriteFMul, A57Write_5cyc_1V>;
+def : SchedAlias<WriteFDiv, A57Write_18cyc_1X>;
+def : SchedAlias<WriteV, A57Write_3cyc_1V>;
+def : SchedAlias<WriteVLD, A57Write_5cyc_1L>;
+def : SchedAlias<WriteVST, A57Write_1cyc_1S>;
+
+def : WriteRes<WriteSys, []> { let Latency = 1; }
+def : WriteRes<WriteBarrier, []> { let Latency = 1; }
+def : WriteRes<WriteHint, []> { let Latency = 1; }
+
+def : WriteRes<WriteLDHi, []> { let Latency = 4; }
+
+// Forwarding logic is not [yet] explicitly modeled beyond what is captured
+// in the latencies of the A57 Generic SchedWriteRes's.
+def : ReadAdvance<ReadI, 0>;
+def : ReadAdvance<ReadISReg, 0>;
+def : ReadAdvance<ReadIEReg, 0>;
+def : ReadAdvance<ReadIM, 0>;
+def : ReadAdvance<ReadIMA, 0>;
+def : ReadAdvance<ReadID, 0>;
+def : ReadAdvance<ReadExtrHi, 0>;
+def : ReadAdvance<ReadAdrBase, 0>;
+def : ReadAdvance<ReadVLD, 0>;
+
+
+//===----------------------------------------------------------------------===//
+// Specialize the coarse model by associating instruction groups with the
+// subtarget-defined types. As the modeled is refined, this will override most
+// of the above ShchedAlias mappings.
+
+// Miscellaneous
+// -----------------------------------------------------------------------------
+
+def : InstRW<[WriteI], (instrs COPY)>;
+
+
+// Branch Instructions
+// -----------------------------------------------------------------------------
+
+def : InstRW<[A57Write_1cyc_1B_1I], (instrs BL)>;
+def : InstRW<[A57Write_2cyc_1B_1I], (instrs BLR)>;
+
+
+// Divide and Multiply Instructions
+// -----------------------------------------------------------------------------
+
+// Multiply high
+def : InstRW<[A57Write_6cyc_1M], (instrs SMULHrr, UMULHrr)>;
+
+
+// Miscellaneous Data-Processing Instructions
+// -----------------------------------------------------------------------------
+
+def : InstRW<[A57Write_1cyc_1I], (instrs EXTRWrri)>;
+def : InstRW<[A57Write_3cyc_1I_1M], (instrs EXTRXrri)>;
+def : InstRW<[A57Write_2cyc_1M], (instregex "BFM")>;
+
+
+// Cryptography Extensions
+// -----------------------------------------------------------------------------
+
+def : InstRW<[A57Write_3cyc_1W], (instregex "CRC32")>;
+
+
+// Vector Load
+// -----------------------------------------------------------------------------
+
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "LD1i(8|16|32)$")>;
+def : InstRW<[A57Write_8cyc_1L_1V, WriteAdr], (instregex "LD1i(8|16|32)_POST$")>;
+def : InstRW<[A57Write_5cyc_1L], (instregex "LD1i(64)$")>;
+def : InstRW<[A57Write_5cyc_1L, WriteAdr], (instregex "LD1i(64)_POST$")>;
+
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "LD1Rv(8b|4h|2s)$")>;
+def : InstRW<[A57Write_8cyc_1L_1V, WriteAdr], (instregex "LD1Rv(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_5cyc_1L], (instregex "LD1Rv(1d)$")>;
+def : InstRW<[A57Write_5cyc_1L, WriteAdr], (instregex "LD1Rv(1d)_POST$")>;
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_8cyc_1L_1V, WriteAdr], (instregex "LD1Rv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A57Write_5cyc_1L], (instregex "LD1Onev(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_5cyc_1L, WriteAdr], (instregex "LD1Onev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_5cyc_1L], (instregex "LD1Onev(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_5cyc_1L, WriteAdr], (instregex "LD1Onev(16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A57Write_5cyc_1L], (instregex "LD1Twov(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_5cyc_1L, WriteAdr], (instregex "LD1Twov(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD1Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD1Twov(16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD1Threev(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD1Threev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_7cyc_3L], (instregex "LD1Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_7cyc_3L, WriteAdr], (instregex "LD1Threev(16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD1Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD1Fourv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_8cyc_4L], (instregex "LD1Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_8cyc_4L, WriteAdr], (instregex "LD1Fourv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A57Write_8cyc_1L_2V], (instregex "LD2i(8|16)$")>;
+def : InstRW<[A57Write_8cyc_1L_2V, WriteAdr], (instregex "LD2i(8|16)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD2i(32)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD2i(32)_POST$")>;
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "LD2i(64)$")>;
+def : InstRW<[A57Write_8cyc_1L_1V, WriteAdr], (instregex "LD2i(64)_POST$")>;
+
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "LD2Rv(8b|4h|2s)$")>;
+def : InstRW<[A57Write_8cyc_1L_1V, WriteAdr], (instregex "LD2Rv(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_5cyc_1L], (instregex "LD2Rv(1d)$")>;
+def : InstRW<[A57Write_5cyc_1L, WriteAdr], (instregex "LD2Rv(1d)_POST$")>;
+def : InstRW<[A57Write_8cyc_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_8cyc_1L_2V, WriteAdr], (instregex "LD2Rv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A57Write_8cyc_1L_1V], (instregex "LD2Twov(8b|4h|2s)$")>;
+def : InstRW<[A57Write_8cyc_1L_1V, WriteAdr], (instregex "LD2Twov(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_9cyc_2L_2V], (instregex "LD2Twov(16b|8h|4s)$")>;
+def : InstRW<[A57Write_9cyc_2L_2V, WriteAdr], (instregex "LD2Twov(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD2Twov(2d)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD2Twov(2d)_POST$")>;
+
+def : InstRW<[A57Write_9cyc_1L_3V], (instregex "LD3i(8|16)$")>;
+def : InstRW<[A57Write_9cyc_1L_3V, WriteAdr], (instregex "LD3i(8|16)_POST$")>;
+def : InstRW<[A57Write_8cyc_1L_2V], (instregex "LD3i(32)$")>;
+def : InstRW<[A57Write_8cyc_1L_2V, WriteAdr], (instregex "LD3i(32)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD3i(64)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD3i(64)_POST$")>;
+
+def : InstRW<[A57Write_8cyc_1L_2V], (instregex "LD3Rv(8b|4h|2s)$")>;
+def : InstRW<[A57Write_8cyc_1L_2V, WriteAdr], (instregex "LD3Rv(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD3Rv(1d)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD3Rv(1d)_POST$")>;
+def : InstRW<[A57Write_9cyc_1L_3V], (instregex "LD3Rv(16b|8h|4s)$")>;
+def : InstRW<[A57Write_9cyc_1L_3V, WriteAdr], (instregex "LD3Rv(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_9cyc_2L_3V], (instregex "LD3Rv(2d)$")>;
+def : InstRW<[A57Write_9cyc_2L_3V, WriteAdr], (instregex "LD3Rv(2d)_POST$")>;
+
+def : InstRW<[A57Write_9cyc_2L_2V], (instregex "LD3Threev(8b|4h|2s)$")>;
+def : InstRW<[A57Write_9cyc_2L_2V, WriteAdr], (instregex "LD3Threev(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_10cyc_3L_4V], (instregex "LD3Threev(16b|8h|4s)$")>;
+def : InstRW<[A57Write_10cyc_3L_4V, WriteAdr], (instregex "LD3Threev(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_8cyc_4L], (instregex "LD3Threev(2d)$")>;
+def : InstRW<[A57Write_8cyc_4L, WriteAdr], (instregex "LD3Threev(2d)_POST$")>;
+
+def : InstRW<[A57Write_9cyc_2L_3V], (instregex "LD4i(8|16)$")>;
+def : InstRW<[A57Write_9cyc_2L_3V, WriteAdr], (instregex "LD4i(8|16)_POST$")>;
+def : InstRW<[A57Write_8cyc_1L_2V], (instregex "LD4i(32)$")>;
+def : InstRW<[A57Write_8cyc_1L_2V, WriteAdr], (instregex "LD4i(32)_POST$")>;
+def : InstRW<[A57Write_9cyc_2L_3V], (instregex "LD4i(64)$")>;
+def : InstRW<[A57Write_9cyc_2L_3V, WriteAdr], (instregex "LD4i(64)_POST$")>;
+
+def : InstRW<[A57Write_8cyc_1L_2V], (instregex "LD4Rv(8b|4h|2s)$")>;
+def : InstRW<[A57Write_8cyc_1L_2V, WriteAdr], (instregex "LD4Rv(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_6cyc_2L], (instregex "LD4Rv(1d)$")>;
+def : InstRW<[A57Write_6cyc_2L, WriteAdr], (instregex "LD4Rv(1d)_POST$")>;
+def : InstRW<[A57Write_9cyc_2L_3V], (instregex "LD4Rv(16b|8h|4s)$")>;
+def : InstRW<[A57Write_9cyc_2L_3V, WriteAdr], (instregex "LD4Rv(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_9cyc_2L_4V], (instregex "LD4Rv(2d)$")>;
+def : InstRW<[A57Write_9cyc_2L_4V, WriteAdr], (instregex "LD4Rv(2d)_POST$")>;
+
+def : InstRW<[A57Write_9cyc_2L_2V], (instregex "LD4Fourv(8b|4h|2s)$")>;
+def : InstRW<[A57Write_9cyc_2L_2V, WriteAdr], (instregex "LD4Fourv(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_11cyc_4L_4V], (instregex "LD4Fourv(16b|8h|4s)$")>;
+def : InstRW<[A57Write_11cyc_4L_4V, WriteAdr], (instregex "LD4Fourv(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_8cyc_4L], (instregex "LD4Fourv(2d)$")>;
+def : InstRW<[A57Write_8cyc_4L, WriteAdr], (instregex "LD4Fourv(2d)_POST$")>;
+
+// Vector Store
+// -----------------------------------------------------------------------------
+
+def : InstRW<[A57Write_1cyc_1S], (instregex "ST1i(8|16|32)$")>;
+def : InstRW<[A57Write_1cyc_1S, WriteAdr], (instregex "ST1i(8|16|32)_POST$")>;
+def : InstRW<[A57Write_3cyc_1S_1V], (instregex "ST1i(64)$")>;
+def : InstRW<[A57Write_3cyc_1S_1V, WriteAdr], (instregex "ST1i(64)_POST$")>;
+
+def : InstRW<[A57Write_1cyc_1S], (instregex "ST1Onev(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_1cyc_1S, WriteAdr], (instregex "ST1Onev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_2cyc_2S], (instregex "ST1Onev(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_2cyc_2S, WriteAdr], (instregex "ST1Onev(16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A57Write_2cyc_2S], (instregex "ST1Twov(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_2cyc_2S, WriteAdr], (instregex "ST1Twov(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_4cyc_4S], (instregex "ST1Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_4cyc_4S, WriteAdr], (instregex "ST1Twov(16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A57Write_3cyc_3S], (instregex "ST1Threev(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_3cyc_3S, WriteAdr], (instregex "ST1Threev(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_6cyc_6S], (instregex "ST1Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_6cyc_6S, WriteAdr], (instregex "ST1Threev(16b|8h|4s|2d)_POST$")>;
+def : InstRW<[A57Write_4cyc_4S], (instregex "ST1Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[A57Write_4cyc_4S, WriteAdr], (instregex "ST1Fourv(8b|4h|2s|1d)_POST$")>;
+def : InstRW<[A57Write_8cyc_8S], (instregex "ST1Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[A57Write_8cyc_8S, WriteAdr], (instregex "ST1Fourv(16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[A57Write_3cyc_1S_1V], (instregex "ST2i(8|16|32)$")>;
+def : InstRW<[A57Write_3cyc_1S_1V, WriteAdr], (instregex "ST2i(8|16|32)_POST$")>;
+def : InstRW<[A57Write_2cyc_2S], (instregex "ST2i(64)$")>;
+def : InstRW<[A57Write_2cyc_2S, WriteAdr], (instregex "ST2i(64)_POST$")>;
+
+def : InstRW<[A57Write_3cyc_2S_1V], (instregex "ST2Twov(8b|4h|2s)$")>;
+def : InstRW<[A57Write_3cyc_2S_1V, WriteAdr], (instregex "ST2Twov(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_4cyc_4S_2V], (instregex "ST2Twov(16b|8h|4s)$")>;
+def : InstRW<[A57Write_4cyc_4S_2V, WriteAdr], (instregex "ST2Twov(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_4cyc_4S], (instregex "ST2Twov(2d)$")>;
+def : InstRW<[A57Write_4cyc_4S, WriteAdr], (instregex "ST2Twov(2d)_POST$")>;
+
+def : InstRW<[A57Write_3cyc_1S_1V], (instregex "ST3i(8|16)$")>;
+def : InstRW<[A57Write_3cyc_1S_1V, WriteAdr], (instregex "ST3i(8|16)_POST$")>;
+def : InstRW<[A57Write_3cyc_3S], (instregex "ST3i(32)$")>;
+def : InstRW<[A57Write_3cyc_3S, WriteAdr], (instregex "ST3i(32)_POST$")>;
+def : InstRW<[A57Write_3cyc_2S_1V], (instregex "ST3i(64)$")>;
+def : InstRW<[A57Write_3cyc_2S_1V, WriteAdr], (instregex "ST3i(64)_POST$")>;
+
+def : InstRW<[A57Write_3cyc_3S_2V], (instregex "ST3Threev(8b|4h|2s)$")>;
+def : InstRW<[A57Write_3cyc_3S_2V, WriteAdr], (instregex "ST3Threev(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_6cyc_6S_4V], (instregex "ST3Threev(16b|8h|4s)$")>;
+def : InstRW<[A57Write_6cyc_6S_4V, WriteAdr], (instregex "ST3Threev(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_6cyc_6S], (instregex "ST3Threev(2d)$")>;
+def : InstRW<[A57Write_6cyc_6S, WriteAdr], (instregex "ST3Threev(2d)_POST$")>;
+
+def : InstRW<[A57Write_3cyc_1S_1V], (instregex "ST4i(8|16)$")>;
+def : InstRW<[A57Write_3cyc_1S_1V, WriteAdr], (instregex "ST4i(8|16)_POST$")>;
+def : InstRW<[A57Write_4cyc_4S], (instregex "ST4i(32)$")>;
+def : InstRW<[A57Write_4cyc_4S, WriteAdr], (instregex "ST4i(32)_POST$")>;
+def : InstRW<[A57Write_3cyc_2S_1V], (instregex "ST4i(64)$")>;
+def : InstRW<[A57Write_3cyc_2S_1V, WriteAdr], (instregex "ST4i(64)_POST$")>;
+
+def : InstRW<[A57Write_4cyc_4S_2V], (instregex "ST4Fourv(8b|4h|2s)$")>;
+def : InstRW<[A57Write_4cyc_4S_2V, WriteAdr], (instregex "ST4Fourv(8b|4h|2s)_POST$")>;
+def : InstRW<[A57Write_8cyc_8S_4V], (instregex "ST4Fourv(16b|8h|4s)$")>;
+def : InstRW<[A57Write_8cyc_8S_4V, WriteAdr], (instregex "ST4Fourv(16b|8h|4s)_POST$")>;
+def : InstRW<[A57Write_8cyc_8S], (instregex "ST4Fourv(2d)$")>;
+def : InstRW<[A57Write_8cyc_8S, WriteAdr], (instregex "ST4Fourv(2d)_POST$")>;
+
+} // SchedModel = CortexA57Model
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SchedA57WriteRes.td b/contrib/llvm/lib/Target/AArch64/AArch64SchedA57WriteRes.td
new file mode 100644
index 0000000..a8f421b
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SchedA57WriteRes.td
@@ -0,0 +1,512 @@
+//=- AArch64SchedA57WriteRes.td - ARM Cortex-A57 Write Res ---*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains all of the Cortex-A57 specific SchedWriteRes types. The approach
+// below is to define a generic SchedWriteRes for every combination of
+// latency and microOps. The naming conventions is to use a prefix, one field
+// for latency, and one or more microOp count/type designators.
+// Prefix: A57Write
+// Latency: #cyc
+// MicroOp Count/Types: #(B|I|M|L|S|X|W|V)
+//
+// e.g. A57Write_6cyc_1I_6S_4V means the total latency is 6 and there are
+// 11 micro-ops to be issued down one I pipe, six S pipes and four V pipes.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Define Generic 1 micro-op types
+
+def A57Write_5cyc_1L : SchedWriteRes<[A57UnitL]> { let Latency = 5; }
+def A57Write_5cyc_1M : SchedWriteRes<[A57UnitM]> { let Latency = 5; }
+def A57Write_5cyc_1V : SchedWriteRes<[A57UnitV]> { let Latency = 5; }
+def A57Write_5cyc_1W : SchedWriteRes<[A57UnitW]> { let Latency = 5; }
+def A57Write_10cyc_1V : SchedWriteRes<[A57UnitV]> { let Latency = 10; }
+def A57Write_18cyc_1X : SchedWriteRes<[A57UnitX]> { let Latency = 18; }
+def A57Write_19cyc_1M : SchedWriteRes<[A57UnitM]> { let Latency = 19; }
+def A57Write_1cyc_1B : SchedWriteRes<[A57UnitB]> { let Latency = 1; }
+def A57Write_1cyc_1I : SchedWriteRes<[A57UnitI]> { let Latency = 1; }
+def A57Write_1cyc_1S : SchedWriteRes<[A57UnitS]> { let Latency = 1; }
+def A57Write_2cyc_1M : SchedWriteRes<[A57UnitM]> { let Latency = 2; }
+def A57Write_32cyc_1X : SchedWriteRes<[A57UnitX]> { let Latency = 32; }
+def A57Write_35cyc_1M : SchedWriteRes<[A57UnitM]> { let Latency = 35; }
+def A57Write_3cyc_1M : SchedWriteRes<[A57UnitM]> { let Latency = 3; }
+def A57Write_3cyc_1V : SchedWriteRes<[A57UnitV]> { let Latency = 3; }
+def A57Write_3cyc_1W : SchedWriteRes<[A57UnitW]> { let Latency = 3; }
+def A57Write_3cyc_1X : SchedWriteRes<[A57UnitX]> { let Latency = 3; }
+def A57Write_4cyc_1L : SchedWriteRes<[A57UnitL]> { let Latency = 4; }
+def A57Write_4cyc_1X : SchedWriteRes<[A57UnitX]> { let Latency = 4; }
+def A57Write_9cyc_1V : SchedWriteRes<[A57UnitV]> { let Latency = 9; }
+def A57Write_6cyc_1M : SchedWriteRes<[A57UnitM]> { let Latency = 6; }
+def A57Write_6cyc_1V : SchedWriteRes<[A57UnitV]> { let Latency = 6; }
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 2 micro-op types
+
+def A57Write_64cyc_2X : SchedWriteRes<[A57UnitX, A57UnitX]> {
+ let Latency = 64;
+ let NumMicroOps = 2;
+}
+def A57Write_6cyc_1I_1L : SchedWriteRes<[A57UnitI,
+ A57UnitL]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+def A57Write_7cyc_1V_1X : SchedWriteRes<[A57UnitV,
+ A57UnitX]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+}
+def A57Write_8cyc_1L_1V : SchedWriteRes<[A57UnitL,
+ A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+def A57Write_9cyc_2V : SchedWriteRes<[A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 2;
+}
+def A57Write_8cyc_2X : SchedWriteRes<[A57UnitX, A57UnitX]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+def A57Write_6cyc_2L : SchedWriteRes<[A57UnitL, A57UnitL]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+def A57Write_6cyc_2V : SchedWriteRes<[A57UnitV, A57UnitV]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+def A57Write_6cyc_2W : SchedWriteRes<[A57UnitW, A57UnitW]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+def A57Write_5cyc_1I_1L : SchedWriteRes<[A57UnitI,
+ A57UnitL]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+def A57Write_5cyc_2V : SchedWriteRes<[A57UnitV, A57UnitV]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+def A57Write_5cyc_2X : SchedWriteRes<[A57UnitX, A57UnitX]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+def A57Write_10cyc_1L_1V : SchedWriteRes<[A57UnitL,
+ A57UnitV]> {
+ let Latency = 10;
+ let NumMicroOps = 2;
+}
+def A57Write_10cyc_2V : SchedWriteRes<[A57UnitV, A57UnitV]> {
+ let Latency = 10;
+ let NumMicroOps = 2;
+}
+def A57Write_1cyc_1B_1I : SchedWriteRes<[A57UnitB,
+ A57UnitI]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+def A57Write_1cyc_1I_1S : SchedWriteRes<[A57UnitI,
+ A57UnitS]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+def A57Write_2cyc_1B_1I : SchedWriteRes<[A57UnitB,
+ A57UnitI]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+def A57Write_2cyc_2S : SchedWriteRes<[A57UnitS, A57UnitS]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+def A57Write_2cyc_2V : SchedWriteRes<[A57UnitV, A57UnitV]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+def A57Write_36cyc_2X : SchedWriteRes<[A57UnitX, A57UnitX]> {
+ let Latency = 36;
+ let NumMicroOps = 2;
+}
+def A57Write_3cyc_1I_1M : SchedWriteRes<[A57UnitI,
+ A57UnitM]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+}
+def A57Write_3cyc_1I_1S : SchedWriteRes<[A57UnitI,
+ A57UnitS]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+}
+def A57Write_3cyc_1S_1V : SchedWriteRes<[A57UnitS,
+ A57UnitV]> {
+ let Latency = 3;
+ let NumMicroOps = 2;
+}
+def A57Write_4cyc_1I_1L : SchedWriteRes<[A57UnitI,
+ A57UnitL]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+def A57Write_4cyc_2X : SchedWriteRes<[A57UnitX, A57UnitX]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 3 micro-op types
+
+def A57Write_10cyc_3V : SchedWriteRes<[A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 10;
+ let NumMicroOps = 3;
+}
+def A57Write_2cyc_1I_2S : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS]> {
+ let Latency = 2;
+ let NumMicroOps = 3;
+}
+def A57Write_3cyc_1I_1S_1V : SchedWriteRes<[A57UnitI,
+ A57UnitS,
+ A57UnitV]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+}
+def A57Write_3cyc_1M_2S : SchedWriteRes<[A57UnitM,
+ A57UnitS, A57UnitS]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+}
+def A57Write_3cyc_3S : SchedWriteRes<[A57UnitS, A57UnitS, A57UnitS]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+}
+def A57Write_3cyc_2S_1V : SchedWriteRes<[A57UnitS, A57UnitS,
+ A57UnitV]> {
+ let Latency = 3;
+ let NumMicroOps = 3;
+}
+def A57Write_5cyc_1I_2L : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL]> {
+ let Latency = 5;
+ let NumMicroOps = 3;
+}
+def A57Write_6cyc_1I_2L : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL]> {
+ let Latency = 6;
+ let NumMicroOps = 3;
+}
+def A57Write_6cyc_3V : SchedWriteRes<[A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 6;
+ let NumMicroOps = 3;
+}
+def A57Write_7cyc_3L : SchedWriteRes<[A57UnitL, A57UnitL, A57UnitL]> {
+ let Latency = 7;
+ let NumMicroOps = 3;
+}
+def A57Write_8cyc_1I_1L_1V : SchedWriteRes<[A57UnitI,
+ A57UnitL,
+ A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+}
+def A57Write_8cyc_1L_2V : SchedWriteRes<[A57UnitL,
+ A57UnitV, A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+}
+def A57Write_8cyc_3V : SchedWriteRes<[A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 3;
+}
+def A57Write_9cyc_3V : SchedWriteRes<[A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 3;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 4 micro-op types
+
+def A57Write_2cyc_2I_2S : SchedWriteRes<[A57UnitI, A57UnitI,
+ A57UnitS, A57UnitS]> {
+ let Latency = 2;
+ let NumMicroOps = 4;
+}
+def A57Write_3cyc_2I_2S : SchedWriteRes<[A57UnitI, A57UnitI,
+ A57UnitS, A57UnitS]> {
+ let Latency = 3;
+ let NumMicroOps = 4;
+}
+def A57Write_3cyc_1I_3S : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS, A57UnitS]> {
+ let Latency = 3;
+ let NumMicroOps = 4;
+}
+def A57Write_3cyc_1I_2S_1V : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS,
+ A57UnitV]> {
+ let Latency = 3;
+ let NumMicroOps = 4;
+}
+def A57Write_4cyc_4S : SchedWriteRes<[A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS]> {
+ let Latency = 4;
+ let NumMicroOps = 4;
+}
+def A57Write_7cyc_1I_3L : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL, A57UnitL]> {
+ let Latency = 7;
+ let NumMicroOps = 4;
+}
+def A57Write_5cyc_2I_2L : SchedWriteRes<[A57UnitI, A57UnitI,
+ A57UnitL, A57UnitL]> {
+ let Latency = 5;
+ let NumMicroOps = 4;
+}
+def A57Write_8cyc_1I_1L_2V : SchedWriteRes<[A57UnitI,
+ A57UnitL,
+ A57UnitV, A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 4;
+}
+def A57Write_8cyc_4L : SchedWriteRes<[A57UnitL, A57UnitL,
+ A57UnitL, A57UnitL]> {
+ let Latency = 8;
+ let NumMicroOps = 4;
+}
+def A57Write_9cyc_2L_2V : SchedWriteRes<[A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 4;
+}
+def A57Write_9cyc_1L_3V : SchedWriteRes<[A57UnitL,
+ A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 4;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 5 micro-op types
+
+def A57Write_3cyc_3S_2V : SchedWriteRes<[A57UnitS, A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV]> {
+ let Latency = 3;
+ let NumMicroOps = 5;
+}
+def A57Write_8cyc_1I_4L : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL,
+ A57UnitL, A57UnitL]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+}
+def A57Write_4cyc_1I_4S : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS]> {
+ let Latency = 4;
+ let NumMicroOps = 5;
+}
+def A57Write_9cyc_1I_2L_2V : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 5;
+}
+def A57Write_9cyc_1I_1L_3V : SchedWriteRes<[A57UnitI,
+ A57UnitL,
+ A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 5;
+}
+def A57Write_9cyc_2L_3V : SchedWriteRes<[A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 5;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 6 micro-op types
+
+def A57Write_3cyc_1I_3S_2V : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV]> {
+ let Latency = 3;
+ let NumMicroOps = 6;
+}
+def A57Write_4cyc_2I_4S : SchedWriteRes<[A57UnitI, A57UnitI,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS]> {
+ let Latency = 4;
+ let NumMicroOps = 6;
+}
+def A57Write_4cyc_4S_2V : SchedWriteRes<[A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV]> {
+ let Latency = 4;
+ let NumMicroOps = 6;
+}
+def A57Write_6cyc_6S : SchedWriteRes<[A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS, A57UnitS]> {
+ let Latency = 6;
+ let NumMicroOps = 6;
+}
+def A57Write_9cyc_1I_2L_3V : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+}
+def A57Write_9cyc_1I_1L_4V : SchedWriteRes<[A57UnitI,
+ A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+}
+def A57Write_9cyc_2L_4V : SchedWriteRes<[A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 6;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 7 micro-op types
+
+def A57Write_10cyc_3L_4V : SchedWriteRes<[A57UnitL, A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 10;
+ let NumMicroOps = 7;
+}
+def A57Write_4cyc_1I_4S_2V : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV]> {
+ let Latency = 4;
+ let NumMicroOps = 7;
+}
+def A57Write_6cyc_1I_6S : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS, A57UnitS]> {
+ let Latency = 6;
+ let NumMicroOps = 7;
+}
+def A57Write_9cyc_1I_2L_4V : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 9;
+ let NumMicroOps = 7;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 8 micro-op types
+
+def A57Write_10cyc_1I_3L_4V : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 10;
+ let NumMicroOps = 8;
+}
+def A57Write_11cyc_4L_4V : SchedWriteRes<[A57UnitL, A57UnitL,
+ A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 11;
+ let NumMicroOps = 8;
+}
+def A57Write_8cyc_8S : SchedWriteRes<[A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS]> {
+ let Latency = 8;
+ let NumMicroOps = 8;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 9 micro-op types
+
+def A57Write_8cyc_1I_8S : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS]> {
+ let Latency = 8;
+ let NumMicroOps = 9;
+}
+def A57Write_11cyc_1I_4L_4V : SchedWriteRes<[A57UnitI,
+ A57UnitL, A57UnitL,
+ A57UnitL, A57UnitL,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 11;
+ let NumMicroOps = 9;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 10 micro-op types
+
+def A57Write_6cyc_6S_4V : SchedWriteRes<[A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 6;
+ let NumMicroOps = 10;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 11 micro-op types
+
+def A57Write_6cyc_1I_6S_4V : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 6;
+ let NumMicroOps = 11;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Define Generic 12 micro-op types
+
+def A57Write_8cyc_8S_4V : SchedWriteRes<[A57UnitS, A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS, A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 12;
+}
+
+//===----------------------------------------------------------------------===//
+// Define Generic 13 micro-op types
+
+def A57Write_8cyc_1I_8S_4V : SchedWriteRes<[A57UnitI,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS, A57UnitS,
+ A57UnitS, A57UnitS,
+ A57UnitV, A57UnitV,
+ A57UnitV, A57UnitV]> {
+ let Latency = 8;
+ let NumMicroOps = 13;
+}
+
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SchedCyclone.td b/contrib/llvm/lib/Target/AArch64/AArch64SchedCyclone.td
new file mode 100644
index 0000000..a2a1802
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SchedCyclone.td
@@ -0,0 +1,865 @@
+//=- ARMSchedCyclone.td - AArch64 Cyclone Scheduling Defs ----*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the machine model for AArch64 Cyclone to support
+// instruction scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+def CycloneModel : SchedMachineModel {
+ let IssueWidth = 6; // 6 micro-ops are dispatched per cycle.
+ let MicroOpBufferSize = 192; // Based on the reorder buffer.
+ let LoadLatency = 4; // Optimistic load latency.
+ let MispredictPenalty = 16; // 14-19 cycles are typical.
+}
+
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available on Cyclone.
+
+// 4 integer pipes
+def CyUnitI : ProcResource<4> {
+ let BufferSize = 48;
+}
+
+// 2 branch units: I[0..1]
+def CyUnitB : ProcResource<2> {
+ let Super = CyUnitI;
+ let BufferSize = 24;
+}
+
+// 1 indirect-branch unit: I[0]
+def CyUnitBR : ProcResource<1> {
+ let Super = CyUnitB;
+}
+
+// 2 shifter pipes: I[2..3]
+// When an instruction consumes a CyUnitIS, it also consumes a CyUnitI
+def CyUnitIS : ProcResource<2> {
+ let Super = CyUnitI;
+ let BufferSize = 24;
+}
+
+// 1 mul pipe: I[0]
+def CyUnitIM : ProcResource<1> {
+ let Super = CyUnitBR;
+ let BufferSize = 32;
+}
+
+// 1 div pipe: I[1]
+def CyUnitID : ProcResource<1> {
+ let Super = CyUnitB;
+ let BufferSize = 16;
+}
+
+// 1 integer division unit. This is driven by the ID pipe, but only
+// consumes the pipe for one cycle at issue and another cycle at writeback.
+def CyUnitIntDiv : ProcResource<1>;
+
+// 2 ld/st pipes.
+def CyUnitLS : ProcResource<2> {
+ let BufferSize = 28;
+}
+
+// 3 fp/vector pipes.
+def CyUnitV : ProcResource<3> {
+ let BufferSize = 48;
+}
+// 2 fp/vector arithmetic and multiply pipes: V[0-1]
+def CyUnitVM : ProcResource<2> {
+ let Super = CyUnitV;
+ let BufferSize = 32;
+}
+// 1 fp/vector division/sqrt pipe: V[2]
+def CyUnitVD : ProcResource<1> {
+ let Super = CyUnitV;
+ let BufferSize = 16;
+}
+// 1 fp compare pipe: V[0]
+def CyUnitVC : ProcResource<1> {
+ let Super = CyUnitVM;
+ let BufferSize = 16;
+}
+
+// 2 fp division/square-root units. These are driven by the VD pipe,
+// but only consume the pipe for one cycle at issue and a cycle at writeback.
+def CyUnitFloatDiv : ProcResource<2>;
+
+//===----------------------------------------------------------------------===//
+// Define scheduler read/write resources and latency on Cyclone.
+// This mirrors sections 7.7-7.9 of the Tuning Guide v1.0.1.
+
+let SchedModel = CycloneModel in {
+
+//---
+// 7.8.1. Moves
+//---
+
+// A single nop micro-op (uX).
+def WriteX : SchedWriteRes<[]> { let Latency = 0; }
+
+// Move zero is a register rename (to machine register zero).
+// The move is replaced by a single nop micro-op.
+// MOVZ Rd, #0
+// AND Rd, Rzr, #imm
+def WriteZPred : SchedPredicate<[{TII->isGPRZero(MI)}]>;
+def WriteImmZ : SchedWriteVariant<[
+ SchedVar<WriteZPred, [WriteX]>,
+ SchedVar<NoSchedPred, [WriteImm]>]>;
+def : InstRW<[WriteImmZ], (instrs MOVZWi,MOVZXi,ANDWri,ANDXri)>;
+
+// Move GPR is a register rename and single nop micro-op.
+// ORR Xd, XZR, Xm
+// ADD Xd, Xn, #0
+def WriteIMovPred : SchedPredicate<[{TII->isGPRCopy(MI)}]>;
+def WriteVMovPred : SchedPredicate<[{TII->isFPRCopy(MI)}]>;
+def WriteMov : SchedWriteVariant<[
+ SchedVar<WriteIMovPred, [WriteX]>,
+ SchedVar<WriteVMovPred, [WriteX]>,
+ SchedVar<NoSchedPred, [WriteI]>]>;
+def : InstRW<[WriteMov], (instrs COPY,ORRXrr,ADDXrr)>;
+
+// Move non-zero immediate is an integer ALU op.
+// MOVN,MOVZ,MOVK
+def : WriteRes<WriteImm, [CyUnitI]>;
+
+//---
+// 7.8.2-7.8.5. Arithmetic and Logical, Comparison, Conditional,
+// Shifts and Bitfield Operations
+//---
+
+// ADR,ADRP
+// ADD(S)ri,SUB(S)ri,AND(S)ri,EORri,ORRri
+// ADD(S)rr,SUB(S)rr,AND(S)rr,BIC(S)rr,EONrr,EORrr,ORNrr,ORRrr
+// ADC(S),SBC(S)
+// Aliases: CMN, CMP, TST
+//
+// Conditional operations.
+// CCMNi,CCMPi,CCMNr,CCMPr,
+// CSEL,CSINC,CSINV,CSNEG
+//
+// Bit counting and reversal operations.
+// CLS,CLZ,RBIT,REV,REV16,REV32
+def : WriteRes<WriteI, [CyUnitI]>;
+
+// ADD with shifted register operand is a single micro-op that
+// consumes a shift pipeline for two cycles.
+// ADD(S)rs,SUB(S)rs,AND(S)rs,BIC(S)rs,EONrs,EORrs,ORNrs,ORRrs
+// EXAMPLE: ADDrs Xn, Xm LSL #imm
+def : WriteRes<WriteISReg, [CyUnitIS]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+
+// ADD with extended register operand is the same as shifted reg operand.
+// ADD(S)re,SUB(S)re
+// EXAMPLE: ADDXre Xn, Xm, UXTB #1
+def : WriteRes<WriteIEReg, [CyUnitIS]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+
+// Variable shift and bitfield operations.
+// ASRV,LSLV,LSRV,RORV,BFM,SBFM,UBFM
+def : WriteRes<WriteIS, [CyUnitIS]>;
+
+// EXTR Shifts a pair of registers and requires two micro-ops.
+// The second micro-op is delayed, as modeled by ReadExtrHi.
+// EXTR Xn, Xm, #imm
+def : WriteRes<WriteExtr, [CyUnitIS, CyUnitIS]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+
+// EXTR's first register read is delayed by one cycle, effectively
+// shortening its writer's latency.
+// EXTR Xn, Xm, #imm
+def : ReadAdvance<ReadExtrHi, 1>;
+
+//---
+// 7.8.6. Multiplies
+//---
+
+// MUL/MNEG are aliases for MADD/MSUB.
+// MADDW,MSUBW,SMADDL,SMSUBL,UMADDL,UMSUBL
+def : WriteRes<WriteIM32, [CyUnitIM]> {
+ let Latency = 4;
+}
+// MADDX,MSUBX,SMULH,UMULH
+def : WriteRes<WriteIM64, [CyUnitIM]> {
+ let Latency = 5;
+}
+
+//---
+// 7.8.7. Divide
+//---
+
+// 32-bit divide takes 7-13 cycles. 10 cycles covers a 20-bit quotient.
+// The ID pipe is consumed for 2 cycles: issue and writeback.
+// SDIVW,UDIVW
+def : WriteRes<WriteID32, [CyUnitID, CyUnitIntDiv]> {
+ let Latency = 10;
+ let ResourceCycles = [2, 10];
+}
+// 64-bit divide takes 7-21 cycles. 13 cycles covers a 32-bit quotient.
+// The ID pipe is consumed for 2 cycles: issue and writeback.
+// SDIVX,UDIVX
+def : WriteRes<WriteID64, [CyUnitID, CyUnitIntDiv]> {
+ let Latency = 13;
+ let ResourceCycles = [2, 13];
+}
+
+//---
+// 7.8.8,7.8.10. Load/Store, single element
+//---
+
+// Integer loads take 4 cycles and use one LS unit for one cycle.
+def : WriteRes<WriteLD, [CyUnitLS]> {
+ let Latency = 4;
+}
+
+// Store-load forwarding is 4 cycles.
+//
+// Note: The store-exclusive sequence incorporates this
+// latency. However, general heuristics should not model the
+// dependence between a store and subsequent may-alias load because
+// hardware speculation works.
+def : WriteRes<WriteST, [CyUnitLS]> {
+ let Latency = 4;
+}
+
+// Load from base address plus an optionally scaled register offset.
+// Rt latency is latency WriteIS + WriteLD.
+// EXAMPLE: LDR Xn, Xm [, lsl 3]
+def CyWriteLDIdx : SchedWriteVariant<[
+ SchedVar<ScaledIdxPred, [WriteIS, WriteLD]>, // Load from scaled register.
+ SchedVar<NoSchedPred, [WriteLD]>]>; // Load from register offset.
+def : SchedAlias<WriteLDIdx, CyWriteLDIdx>; // Map AArch64->Cyclone type.
+
+// EXAMPLE: STR Xn, Xm [, lsl 3]
+def CyWriteSTIdx : SchedWriteVariant<[
+ SchedVar<ScaledIdxPred, [WriteIS, WriteST]>, // Store to scaled register.
+ SchedVar<NoSchedPred, [WriteST]>]>; // Store to register offset.
+def : SchedAlias<WriteSTIdx, CyWriteSTIdx>; // Map AArch64->Cyclone type.
+
+// Read the (unshifted) base register Xn in the second micro-op one cycle later.
+// EXAMPLE: LDR Xn, Xm [, lsl 3]
+def ReadBaseRS : SchedReadAdvance<1>;
+def CyReadAdrBase : SchedReadVariant<[
+ SchedVar<ScaledIdxPred, [ReadBaseRS]>, // Read base reg after shifting offset.
+ SchedVar<NoSchedPred, [ReadDefault]>]>; // Read base reg with no shift.
+def : SchedAlias<ReadAdrBase, CyReadAdrBase>; // Map AArch64->Cyclone type.
+
+//---
+// 7.8.9,7.8.11. Load/Store, paired
+//---
+
+// Address pre/post increment is a simple ALU op with one cycle latency.
+def : WriteRes<WriteAdr, [CyUnitI]>;
+
+// LDP high register write is fused with the load, but a nop micro-op remains.
+def : WriteRes<WriteLDHi, []> {
+ let Latency = 4;
+}
+
+// STP is a vector op and store, except for QQ, which is just two stores.
+def : SchedAlias<WriteSTP, WriteVSTShuffle>;
+def : InstRW<[WriteST, WriteST], (instrs STPQi)>;
+
+//---
+// 7.8.13. Branches
+//---
+
+// Branches take a single micro-op.
+// The misprediction penalty is defined as a SchedMachineModel property.
+def : WriteRes<WriteBr, [CyUnitB]> {let Latency = 0;}
+def : WriteRes<WriteBrReg, [CyUnitBR]> {let Latency = 0;}
+
+//---
+// 7.8.14. Never-issued Instructions, Barrier and Hint Operations
+//---
+
+// NOP,SEV,SEVL,WFE,WFI,YIELD
+def : WriteRes<WriteHint, []> {let Latency = 0;}
+// ISB
+def : InstRW<[WriteI], (instrs ISB)>;
+// SLREX,DMB,DSB
+def : WriteRes<WriteBarrier, [CyUnitLS]>;
+
+// System instructions get an invalid latency because the latency of
+// other operations across them is meaningless.
+def : WriteRes<WriteSys, []> {let Latency = -1;}
+
+//===----------------------------------------------------------------------===//
+// 7.9 Vector Unit Instructions
+
+// Simple vector operations take 2 cycles.
+def : WriteRes<WriteV, [CyUnitV]> {let Latency = 2;}
+
+// Define some longer latency vector op types for Cyclone.
+def CyWriteV3 : SchedWriteRes<[CyUnitV]> {let Latency = 3;}
+def CyWriteV4 : SchedWriteRes<[CyUnitV]> {let Latency = 4;}
+def CyWriteV5 : SchedWriteRes<[CyUnitV]> {let Latency = 5;}
+def CyWriteV6 : SchedWriteRes<[CyUnitV]> {let Latency = 6;}
+
+// Simple floating-point operations take 2 cycles.
+def : WriteRes<WriteF, [CyUnitV]> {let Latency = 2;}
+
+//---
+// 7.9.1 Vector Moves
+//---
+
+// TODO: Add Cyclone-specific zero-cycle zeros. LLVM currently
+// generates expensive int-float conversion instead:
+// FMOVDi Dd, #0.0
+// FMOVv2f64ns Vd.2d, #0.0
+
+// FMOVSi,FMOVDi
+def : WriteRes<WriteFImm, [CyUnitV]> {let Latency = 2;}
+
+// MOVI,MVNI are WriteV
+// FMOVv2f32ns,FMOVv2f64ns,FMOVv4f32ns are WriteV
+
+// Move FPR is a register rename and single nop micro-op.
+// ORR.16b Vd,Vn,Vn
+// COPY is handled above in the WriteMov Variant.
+def WriteVMov : SchedWriteVariant<[
+ SchedVar<WriteVMovPred, [WriteX]>,
+ SchedVar<NoSchedPred, [WriteV]>]>;
+def : InstRW<[WriteVMov], (instrs ORRv16i8)>;
+
+// FMOVSr,FMOVDr are WriteF.
+
+// MOV V,V is a WriteV.
+
+// CPY D,V[x] is a WriteV
+
+// INS V[x],V[y] is a WriteV.
+
+// FMOVWSr,FMOVXDr,FMOVXDHighr
+def : WriteRes<WriteFCopy, [CyUnitLS]> {
+ let Latency = 5;
+}
+
+// FMOVSWr,FMOVDXr
+def : InstRW<[WriteLD], (instrs FMOVSWr,FMOVDXr,FMOVDXHighr)>;
+
+// INS V[x],R
+def CyWriteCopyToFPR : WriteSequence<[WriteVLD, WriteV]>;
+def : InstRW<[CyWriteCopyToFPR], (instregex "INSv")>;
+
+// SMOV,UMOV R,V[x]
+def CyWriteCopyToGPR : WriteSequence<[WriteLD, WriteI]>;
+def : InstRW<[CyWriteCopyToGPR], (instregex "SMOVv","UMOVv")>;
+
+// DUP V,R
+def : InstRW<[CyWriteCopyToFPR], (instregex "DUPv")>;
+
+// DUP V,V[x] is a WriteV.
+
+//---
+// 7.9.2 Integer Arithmetic, Logical, and Comparisons
+//---
+
+// BIC,ORR V,#imm are WriteV
+
+def : InstRW<[CyWriteV3], (instregex "ABSv")>;
+
+// MVN,NEG,NOT are WriteV
+
+def : InstRW<[CyWriteV3], (instregex "SQABSv","SQNEGv")>;
+
+// ADDP is a WriteV.
+def CyWriteVADDLP : SchedWriteRes<[CyUnitV]> {let Latency = 2;}
+def : InstRW<[CyWriteVADDLP], (instregex "SADDLPv","UADDLPv")>;
+
+def : InstRW<[CyWriteV3],
+ (instregex "ADDVv","SMAXVv","UMAXVv","SMINVv","UMINVv")>;
+
+def : InstRW<[CyWriteV3], (instregex "SADDLV","UADDLV")>;
+
+// ADD,SUB are WriteV
+
+// Forward declare.
+def CyWriteVABD : SchedWriteRes<[CyUnitV]> {let Latency = 3;}
+
+// Add/Diff and accumulate uses the vector multiply unit.
+def CyWriteVAccum : SchedWriteRes<[CyUnitVM]> {let Latency = 3;}
+def CyReadVAccum : SchedReadAdvance<1,
+ [CyWriteVAccum, CyWriteVADDLP, CyWriteVABD]>;
+
+def : InstRW<[CyWriteVAccum, CyReadVAccum],
+ (instregex "SADALP","UADALP")>;
+
+def : InstRW<[CyWriteVAccum, CyReadVAccum],
+ (instregex "SABAv","UABAv","SABALv","UABALv")>;
+
+def : InstRW<[CyWriteV3], (instregex "SQADDv","SQSUBv","UQADDv","UQSUBv")>;
+
+def : InstRW<[CyWriteV3], (instregex "SUQADDv","USQADDv")>;
+
+def : InstRW<[CyWriteV4], (instregex "ADDHNv","RADDHNv", "RSUBHNv", "SUBHNv")>;
+
+// WriteV includes:
+// AND,BIC,CMTST,EOR,ORN,ORR
+// ADDP
+// SHADD,SHSUB,SRHADD,UHADD,UHSUB,URHADD
+// SADDL,SSUBL,UADDL,USUBL
+// SADDW,SSUBW,UADDW,USUBW
+
+def : InstRW<[CyWriteV3], (instregex "CMEQv","CMGEv","CMGTv",
+ "CMLEv","CMLTv",
+ "CMHIv","CMHSv")>;
+
+def : InstRW<[CyWriteV3], (instregex "SMAXv","SMINv","UMAXv","UMINv",
+ "SMAXPv","SMINPv","UMAXPv","UMINPv")>;
+
+def : InstRW<[CyWriteVABD], (instregex "SABDv","UABDv",
+ "SABDLv","UABDLv")>;
+
+//---
+// 7.9.3 Floating Point Arithmetic and Comparisons
+//---
+
+// FABS,FNEG are WriteF
+
+def : InstRW<[CyWriteV4], (instrs FADDPv2i32p)>;
+def : InstRW<[CyWriteV5], (instrs FADDPv2i64p)>;
+
+def : InstRW<[CyWriteV3], (instregex "FMAXPv2i","FMAXNMPv2i",
+ "FMINPv2i","FMINNMPv2i")>;
+
+def : InstRW<[CyWriteV4], (instregex "FMAXVv","FMAXNMVv","FMINVv","FMINNMVv")>;
+
+def : InstRW<[CyWriteV4], (instrs FADDSrr,FADDv2f32,FADDv4f32,
+ FSUBSrr,FSUBv2f32,FSUBv4f32,
+ FADDPv2f32,FADDPv4f32,
+ FABD32,FABDv2f32,FABDv4f32)>;
+def : InstRW<[CyWriteV5], (instrs FADDDrr,FADDv2f64,
+ FSUBDrr,FSUBv2f64,
+ FADDPv2f64,
+ FABD64,FABDv2f64)>;
+
+def : InstRW<[CyWriteV3], (instregex "FCMEQ","FCMGT","FCMLE","FCMLT")>;
+
+def : InstRW<[CyWriteV3], (instregex "FACGE","FACGT",
+ "FMAXS","FMAXD","FMAXv",
+ "FMINS","FMIND","FMINv",
+ "FMAXNMS","FMAXNMD","FMAXNMv",
+ "FMINNMS","FMINNMD","FMINNMv",
+ "FMAXPv2f","FMAXPv4f",
+ "FMINPv2f","FMINPv4f",
+ "FMAXNMPv2f","FMAXNMPv4f",
+ "FMINNMPv2f","FMINNMPv4f")>;
+
+// FCMP,FCMPE,FCCMP,FCCMPE
+def : WriteRes<WriteFCmp, [CyUnitVC]> {let Latency = 4;}
+
+// FCSEL is a WriteF.
+
+//---
+// 7.9.4 Shifts and Bitfield Operations
+//---
+
+// SHL is a WriteV
+
+def CyWriteVSHR : SchedWriteRes<[CyUnitV]> {let Latency = 2;}
+def : InstRW<[CyWriteVSHR], (instregex "SSHRv","USHRv")>;
+
+def CyWriteVSRSHR : SchedWriteRes<[CyUnitV]> {let Latency = 3;}
+def : InstRW<[CyWriteVSRSHR], (instregex "SRSHRv","URSHRv")>;
+
+// Shift and accumulate uses the vector multiply unit.
+def CyWriteVShiftAcc : SchedWriteRes<[CyUnitVM]> {let Latency = 3;}
+def CyReadVShiftAcc : SchedReadAdvance<1,
+ [CyWriteVShiftAcc, CyWriteVSHR, CyWriteVSRSHR]>;
+def : InstRW<[CyWriteVShiftAcc, CyReadVShiftAcc],
+ (instregex "SRSRAv","SSRAv","URSRAv","USRAv")>;
+
+// SSHL,USHL are WriteV.
+
+def : InstRW<[CyWriteV3], (instregex "SRSHLv","URSHLv")>;
+
+// SQSHL,SQSHLU,UQSHL are WriteV.
+
+def : InstRW<[CyWriteV3], (instregex "SQRSHLv","UQRSHLv")>;
+
+// WriteV includes:
+// SHLL,SSHLL,USHLL
+// SLI,SRI
+// BIF,BIT,BSL
+// EXT
+// CLS,CLZ,CNT,RBIT,REV16,REV32,REV64,XTN
+// XTN2
+
+def : InstRW<[CyWriteV4],
+ (instregex "RSHRNv","SHRNv",
+ "SQRSHRNv","SQRSHRUNv","SQSHRNv","SQSHRUNv",
+ "UQRSHRNv","UQSHRNv","SQXTNv","SQXTUNv","UQXTNv")>;
+
+//---
+// 7.9.5 Multiplication
+//---
+
+def CyWriteVMul : SchedWriteRes<[CyUnitVM]> { let Latency = 4;}
+def : InstRW<[CyWriteVMul], (instregex "MULv","SMULLv","UMULLv",
+ "SQDMULLv","SQDMULHv","SQRDMULHv")>;
+
+// FMUL,FMULX,FNMUL default to WriteFMul.
+def : WriteRes<WriteFMul, [CyUnitVM]> { let Latency = 4;}
+
+def CyWriteV64Mul : SchedWriteRes<[CyUnitVM]> { let Latency = 5;}
+def : InstRW<[CyWriteV64Mul], (instrs FMULDrr,FMULv2f64,FMULv2i64_indexed,
+ FNMULDrr,FMULX64,FMULXv2f64,FMULXv2i64_indexed)>;
+
+def CyReadVMulAcc : SchedReadAdvance<1, [CyWriteVMul, CyWriteV64Mul]>;
+def : InstRW<[CyWriteVMul, CyReadVMulAcc],
+ (instregex "MLA","MLS","SMLAL","SMLSL","UMLAL","UMLSL",
+ "SQDMLAL","SQDMLSL")>;
+
+def CyWriteSMul : SchedWriteRes<[CyUnitVM]> { let Latency = 8;}
+def CyWriteDMul : SchedWriteRes<[CyUnitVM]> { let Latency = 10;}
+def CyReadSMul : SchedReadAdvance<4, [CyWriteSMul]>;
+def CyReadDMul : SchedReadAdvance<5, [CyWriteDMul]>;
+
+def : InstRW<[CyWriteSMul, CyReadSMul],
+ (instrs FMADDSrrr,FMSUBSrrr,FNMADDSrrr,FNMSUBSrrr,
+ FMLAv2f32,FMLAv4f32,
+ FMLAv1i32_indexed,FMLAv1i64_indexed,FMLAv2i32_indexed)>;
+def : InstRW<[CyWriteDMul, CyReadDMul],
+ (instrs FMADDDrrr,FMSUBDrrr,FNMADDDrrr,FNMSUBDrrr,
+ FMLAv2f64,FMLAv2i64_indexed,
+ FMLSv2f64,FMLSv2i64_indexed)>;
+
+def CyWritePMUL : SchedWriteRes<[CyUnitVD]> { let Latency = 3; }
+def : InstRW<[CyWritePMUL], (instregex "PMULv", "PMULLv")>;
+
+//---
+// 7.9.6 Divide and Square Root
+//---
+
+// FDIV,FSQRT
+// TODO: Add 64-bit variant with 19 cycle latency.
+// TODO: Specialize FSQRT for longer latency.
+def : WriteRes<WriteFDiv, [CyUnitVD, CyUnitFloatDiv]> {
+ let Latency = 17;
+ let ResourceCycles = [2, 17];
+}
+
+def : InstRW<[CyWriteV4], (instregex "FRECPEv","FRECPXv","URECPEv","URSQRTEv")>;
+
+def WriteFRSQRTE : SchedWriteRes<[CyUnitVM]> { let Latency = 4; }
+def : InstRW<[WriteFRSQRTE], (instregex "FRSQRTEv")>;
+
+def WriteFRECPS : SchedWriteRes<[CyUnitVM]> { let Latency = 8; }
+def WriteFRSQRTS : SchedWriteRes<[CyUnitVM]> { let Latency = 10; }
+def : InstRW<[WriteFRECPS], (instregex "FRECPSv")>;
+def : InstRW<[WriteFRSQRTS], (instregex "FRSQRTSv")>;
+
+//---
+// 7.9.7 Integer-FP Conversions
+//---
+
+// FCVT lengthen f16/s32
+def : InstRW<[WriteV], (instrs FCVTSHr,FCVTDHr,FCVTDSr)>;
+
+// FCVT,FCVTN,FCVTXN
+// SCVTF,UCVTF V,V
+// FRINT(AIMNPXZ) V,V
+def : WriteRes<WriteFCvt, [CyUnitV]> {let Latency = 4;}
+
+// SCVT/UCVT S/D, Rd = VLD5+V4: 9 cycles.
+def CyWriteCvtToFPR : WriteSequence<[WriteVLD, CyWriteV4]>;
+def : InstRW<[CyWriteCopyToFPR], (instregex "FCVT[AMNPZ][SU][SU][WX][SD]r")>;
+
+// FCVT Rd, S/D = V6+LD4: 10 cycles
+def CyWriteCvtToGPR : WriteSequence<[CyWriteV6, WriteLD]>;
+def : InstRW<[CyWriteCvtToGPR], (instregex "[SU]CVTF[SU][WX][SD]r")>;
+
+// FCVTL is a WriteV
+
+//---
+// 7.9.8-7.9.10 Cryptography, Data Transposition, Table Lookup
+//---
+
+def CyWriteCrypto2 : SchedWriteRes<[CyUnitVD]> {let Latency = 2;}
+def : InstRW<[CyWriteCrypto2], (instrs AESIMCrr, AESMCrr, SHA1Hrr,
+ AESDrr, AESErr, SHA1SU1rr, SHA256SU0rr,
+ SHA1SU0rrr)>;
+
+def CyWriteCrypto3 : SchedWriteRes<[CyUnitVD]> {let Latency = 3;}
+def : InstRW<[CyWriteCrypto3], (instrs SHA256SU1rrr)>;
+
+def CyWriteCrypto6 : SchedWriteRes<[CyUnitVD]> {let Latency = 6;}
+def : InstRW<[CyWriteCrypto6], (instrs SHA1Crrr, SHA1Mrrr, SHA1Prrr,
+ SHA256Hrrr,SHA256H2rrr)>;
+
+// TRN,UZP,ZUP are WriteV.
+
+// TBL,TBX are WriteV.
+
+//---
+// 7.9.11-7.9.14 Load/Store, single element and paired
+//---
+
+// Loading into the vector unit takes 5 cycles vs 4 for integer loads.
+def : WriteRes<WriteVLD, [CyUnitLS]> {
+ let Latency = 5;
+}
+
+// Store-load forwarding is 4 cycles.
+def : WriteRes<WriteVST, [CyUnitLS]> {
+ let Latency = 4;
+}
+
+// WriteVLDPair/VSTPair sequences are expanded by the target description.
+
+//---
+// 7.9.15 Load, element operations
+//---
+
+// Only the first WriteVLD and WriteAdr for writeback matches def operands.
+// Subsequent WriteVLDs consume resources. Since all loaded values have the
+// same latency, this is acceptable.
+
+// Vd is read 5 cycles after issuing the vector load.
+def : ReadAdvance<ReadVLD, 5>;
+
+def : InstRW<[WriteVLD],
+ (instregex "LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLD, WriteAdr],
+ (instregex "LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)_POST")>;
+
+// Register writes from the load's high half are fused micro-ops.
+def : InstRW<[WriteVLD],
+ (instregex "LD1Twov(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteVLD, WriteAdr],
+ (instregex "LD1Twov(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVLD, WriteVLD],
+ (instregex "LD1Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLD, WriteAdr, WriteVLD],
+ (instregex "LD1Twov(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLD, WriteVLD],
+ (instregex "LD1Threev(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteVLD, WriteAdr, WriteVLD],
+ (instregex "LD1Threev(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVLD, WriteVLD, WriteVLD],
+ (instregex "LD1Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLD, WriteAdr, WriteVLD, WriteVLD],
+ (instregex "LD1Threev(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLD, WriteVLD],
+ (instregex "LD1Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteVLD, WriteAdr, WriteVLD],
+ (instregex "LD1Fourv(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVLD, WriteVLD, WriteVLD, WriteVLD],
+ (instregex "LD1Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLD, WriteAdr, WriteVLD, WriteVLD, WriteVLD],
+ (instregex "LD1Fourv(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, ReadVLD],
+ (instregex "LD1i(8|16|32)$")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr],
+ (instregex "LD1i(8|16|32)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, ReadVLD], (instrs LD1i64)>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr],(instrs LD1i64_POST)>;
+
+def : InstRW<[WriteVLDShuffle],
+ (instregex "LD1Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr],
+ (instregex "LD1Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+def : InstRW<[WriteVLDShuffle, WriteV],
+ (instregex "LD2Twov(8b|4h|2s)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteV],
+ (instregex "LD2Twov(8b|4h|2s)_POST$")>;
+def : InstRW<[WriteVLDShuffle, WriteVLDShuffle],
+ (instregex "LD2Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle],
+ (instregex "LD2Twov(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteV],
+ (instregex "LD2i(8|16|32)$")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr, WriteV],
+ (instregex "LD2i(8|16|32)_POST")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteV],
+ (instregex "LD2i64$")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr, WriteV],
+ (instregex "LD2i64_POST")>;
+
+def : InstRW<[WriteVLDShuffle, WriteV],
+ (instregex "LD2Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteV],
+ (instregex "LD2Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteV],
+ (instregex "LD3Threev(8b|4h|2s)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle, WriteV],
+ (instregex "LD3Threev(8b|4h|2s)_POST")>;
+def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteVLDShuffle],
+ (instregex "LD3Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle, WriteVLDShuffle],
+ (instregex "LD3Threev(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteV, WriteV],
+ (instregex "LD3i(8|16|32)$")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr, WriteV, WriteV],
+ (instregex "LD3i(8|16|32)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteVLDShuffle, WriteV],
+ (instregex "LD3i64$")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr, WriteVLDShuffle, WriteV],
+ (instregex "LD3i64_POST")>;
+
+def : InstRW<[WriteVLDShuffle, WriteV, WriteV],
+ (instregex "LD3Rv(8b|4h|2s|16b|8h|4s)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteV, WriteV],
+ (instregex "LD3Rv(8b|4h|2s|16b|8h|4s)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteV],
+ (instrs LD3Rv1d,LD3Rv2d)>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle, WriteV],
+ (instrs LD3Rv2d_POST,LD3Rv2d_POST)>;
+
+def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteV, WriteV],
+ (instregex "LD4Fourv(8b|4h|2s)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle, WriteV, WriteV],
+ (instregex "LD4Fourv(8b|4h|2s)_POST")>;
+def : InstRW<[WriteVLDPairShuffle, WriteVLDPairShuffle,
+ WriteVLDPairShuffle, WriteVLDPairShuffle],
+ (instregex "LD4Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteVLDPairShuffle, WriteAdr, WriteVLDPairShuffle,
+ WriteVLDPairShuffle, WriteVLDPairShuffle],
+ (instregex "LD4Fourv(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteV, WriteV, WriteV],
+ (instregex "LD4i(8|16|32)$")>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr, WriteV, WriteV, WriteV],
+ (instregex "LD4i(8|16|32)_POST")>;
+
+
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteVLDShuffle, WriteV, WriteV],
+ (instrs LD4i64)>;
+def : InstRW<[WriteVLDShuffle, ReadVLD, WriteAdr, WriteVLDShuffle, WriteV],
+ (instrs LD4i64_POST)>;
+
+def : InstRW<[WriteVLDShuffle, WriteV, WriteV, WriteV],
+ (instregex "LD4Rv(8b|4h|2s|16b|8h|4s)$")>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteV, WriteV, WriteV],
+ (instregex "LD4Rv(8b|4h|2s|16b|8h|4s)_POST")>;
+
+def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteV, WriteV],
+ (instrs LD4Rv1d,LD4Rv2d)>;
+def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle, WriteV, WriteV],
+ (instrs LD4Rv1d_POST,LD4Rv2d_POST)>;
+
+//---
+// 7.9.16 Store, element operations
+//---
+
+// Only the WriteAdr for writeback matches a def operands.
+// Subsequent WriteVLDs only consume resources.
+
+def : InstRW<[WriteVST],
+ (instregex "ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVST],
+ (instregex "ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTShuffle],
+ (instregex "ST1Twov(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle],
+ (instregex "ST1Twov(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVST, WriteVST],
+ (instregex "ST1Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVST, WriteVST],
+ (instregex "ST1Twov(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTShuffle, WriteVST],
+ (instregex "ST1Threev(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVST],
+ (instregex "ST1Threev(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVST, WriteVST, WriteVST],
+ (instregex "ST1Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVST, WriteVST, WriteVST],
+ (instregex "ST1Threev(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST1Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST1Fourv(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVST, WriteVST, WriteVST, WriteVST],
+ (instregex "ST1Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVST, WriteVST, WriteVST, WriteVST],
+ (instregex "ST1Fourv(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTShuffle], (instregex "ST1i(8|16|32)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle], (instregex "ST1i(8|16|32)_POST")>;
+
+def : InstRW<[WriteVSTShuffle], (instrs ST1i64)>;
+def : InstRW<[WriteAdr, WriteVSTShuffle], (instrs ST1i64_POST)>;
+
+def : InstRW<[WriteVSTShuffle],
+ (instregex "ST2Twov(8b|4h|2s)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle],
+ (instregex "ST2Twov(8b|4h|2s)_POST")>;
+def : InstRW<[WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST2Twov(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST2Twov(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTShuffle], (instregex "ST2i(8|16|32)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle], (instregex "ST2i(8|16|32)_POST")>;
+def : InstRW<[WriteVSTShuffle], (instrs ST2i64)>;
+def : InstRW<[WriteAdr, WriteVSTShuffle], (instrs ST2i64_POST)>;
+
+def : InstRW<[WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST3Threev(8b|4h|2s)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST3Threev(8b|4h|2s)_POST")>;
+def : InstRW<[WriteVSTShuffle, WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST3Threev(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle, WriteVSTShuffle],
+ (instregex "ST3Threev(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTShuffle], (instregex "ST3i(8|16|32)$")>;
+def : InstRW<[WriteAdr, WriteVSTShuffle], (instregex "ST3i(8|16|32)_POST")>;
+
+def :InstRW<[WriteVSTShuffle, WriteVSTShuffle], (instrs ST3i64)>;
+def :InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle], (instrs ST3i64_POST)>;
+
+def : InstRW<[WriteVSTPairShuffle, WriteVSTPairShuffle],
+ (instregex "ST4Fourv(8b|4h|2s|1d)$")>;
+def : InstRW<[WriteAdr, WriteVSTPairShuffle, WriteVSTPairShuffle],
+ (instregex "ST4Fourv(8b|4h|2s|1d)_POST")>;
+def : InstRW<[WriteVSTPairShuffle, WriteVSTPairShuffle,
+ WriteVSTPairShuffle, WriteVSTPairShuffle],
+ (instregex "ST4Fourv(16b|8h|4s|2d)$")>;
+def : InstRW<[WriteAdr, WriteVSTPairShuffle, WriteVSTPairShuffle,
+ WriteVSTPairShuffle, WriteVSTPairShuffle],
+ (instregex "ST4Fourv(16b|8h|4s|2d)_POST")>;
+
+def : InstRW<[WriteVSTPairShuffle], (instregex "ST4i(8|16|32)$")>;
+def : InstRW<[WriteAdr, WriteVSTPairShuffle], (instregex "ST4i(8|16|32)_POST")>;
+
+def : InstRW<[WriteVSTShuffle, WriteVSTShuffle], (instrs ST4i64)>;
+def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle],(instrs ST4i64_POST)>;
+
+//---
+// Unused SchedRead types
+//---
+
+def : ReadAdvance<ReadI, 0>;
+def : ReadAdvance<ReadISReg, 0>;
+def : ReadAdvance<ReadIEReg, 0>;
+def : ReadAdvance<ReadIM, 0>;
+def : ReadAdvance<ReadIMA, 0>;
+def : ReadAdvance<ReadID, 0>;
+
+} // SchedModel = CycloneModel
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64Schedule.td b/contrib/llvm/lib/Target/AArch64/AArch64Schedule.td
index e17cdaa..eaa9110 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64Schedule.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64Schedule.td
@@ -1,4 +1,4 @@
-//===- AArch64Schedule.td - AArch64 Scheduling Definitions -*- tablegen -*-===//
+//==-- AArch64Schedule.td - AArch64 Scheduling Definitions -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,4 +7,98 @@
//
//===----------------------------------------------------------------------===//
-def GenericItineraries : ProcessorItineraries<[], [], []>;
+// Define TII for use in SchedVariant Predicates.
+// const MachineInstr *MI and const TargetSchedModel *SchedModel
+// are defined by default.
+def : PredicateProlog<[{
+ const AArch64InstrInfo *TII =
+ static_cast<const AArch64InstrInfo*>(SchedModel->getInstrInfo());
+ (void)TII;
+}]>;
+
+// AArch64 Scheduler Definitions
+
+def WriteImm : SchedWrite; // MOVN, MOVZ
+// TODO: Provide variants for MOV32/64imm Pseudos that dynamically
+// select the correct sequence of WriteImms.
+
+def WriteI : SchedWrite; // ALU
+def WriteISReg : SchedWrite; // ALU of Shifted-Reg
+def WriteIEReg : SchedWrite; // ALU of Extended-Reg
+def ReadI : SchedRead; // ALU
+def ReadISReg : SchedRead; // ALU of Shifted-Reg
+def ReadIEReg : SchedRead; // ALU of Extended-Reg
+def WriteExtr : SchedWrite; // EXTR shifts a reg pair
+def ReadExtrHi : SchedRead; // Read the high reg of the EXTR pair
+def WriteIS : SchedWrite; // Shift/Scale
+def WriteID32 : SchedWrite; // 32-bit Divide
+def WriteID64 : SchedWrite; // 64-bit Divide
+def ReadID : SchedRead; // 32/64-bit Divide
+def WriteIM32 : SchedWrite; // 32-bit Multiply
+def WriteIM64 : SchedWrite; // 64-bit Multiply
+def ReadIM : SchedRead; // 32/64-bit Multiply
+def ReadIMA : SchedRead; // 32/64-bit Multiply Accumulate
+def WriteBr : SchedWrite; // Branch
+def WriteBrReg : SchedWrite; // Indirect Branch
+
+def WriteLD : SchedWrite; // Load from base addr plus immediate offset
+def WriteST : SchedWrite; // Store to base addr plus immediate offset
+def WriteSTP : SchedWrite; // Store a register pair.
+def WriteAdr : SchedWrite; // Address pre/post increment.
+
+def WriteLDIdx : SchedWrite; // Load from a register index (maybe scaled).
+def WriteSTIdx : SchedWrite; // Store to a register index (maybe scaled).
+def ReadAdrBase : SchedRead; // Read the base resister of a reg-offset LD/ST.
+
+// Predicate for determining when a shiftable register is shifted.
+def RegShiftedPred : SchedPredicate<[{TII->hasShiftedReg(MI)}]>;
+
+// Predicate for determining when a extendedable register is extended.
+def RegExtendedPred : SchedPredicate<[{TII->hasExtendedReg(MI)}]>;
+
+// ScaledIdxPred is true if a WriteLDIdx operand will be
+// scaled. Subtargets can use this to dynamically select resources and
+// latency for WriteLDIdx and ReadAdrBase.
+def ScaledIdxPred : SchedPredicate<[{TII->isScaledAddr(MI)}]>;
+
+// Serialized two-level address load.
+// EXAMPLE: LOADGot
+def WriteLDAdr : WriteSequence<[WriteAdr, WriteLD]>;
+
+// Serialized two-level address lookup.
+// EXAMPLE: MOVaddr...
+def WriteAdrAdr : WriteSequence<[WriteAdr, WriteAdr]>;
+
+// The second register of a load-pair.
+// LDP,LDPSW,LDNP,LDXP,LDAXP
+def WriteLDHi : SchedWrite;
+
+// Store-exclusive is a store followed by a dependent load.
+def WriteSTX : WriteSequence<[WriteST, WriteLD]>;
+
+def WriteSys : SchedWrite; // Long, variable latency system ops.
+def WriteBarrier : SchedWrite; // Memory barrier.
+def WriteHint : SchedWrite; // Hint instruction.
+
+def WriteF : SchedWrite; // General floating-point ops.
+def WriteFCmp : SchedWrite; // Floating-point compare.
+def WriteFCvt : SchedWrite; // Float conversion.
+def WriteFCopy : SchedWrite; // Float-int register copy.
+def WriteFImm : SchedWrite; // Floating-point immediate.
+def WriteFMul : SchedWrite; // Floating-point multiply.
+def WriteFDiv : SchedWrite; // Floating-point division.
+
+def WriteV : SchedWrite; // Vector ops.
+def WriteVLD : SchedWrite; // Vector loads.
+def WriteVST : SchedWrite; // Vector stores.
+
+// Read the unwritten lanes of the VLD's destination registers.
+def ReadVLD : SchedRead;
+
+// Sequential vector load and shuffle.
+def WriteVLDShuffle : WriteSequence<[WriteVLD, WriteV]>;
+def WriteVLDPairShuffle : WriteSequence<[WriteVLD, WriteV, WriteV]>;
+
+// Store a shuffled vector.
+def WriteVSTShuffle : WriteSequence<[WriteV, WriteVST]>;
+def WriteVSTPairShuffle : WriteSequence<[WriteV, WriteV, WriteVST]>;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 6bbe075..1bf64fc 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -11,15 +11,50 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-selectiondag-info"
#include "AArch64TargetMachine.h"
-#include "llvm/CodeGen/SelectionDAG.h"
using namespace llvm;
-AArch64SelectionDAGInfo::AArch64SelectionDAGInfo(const AArch64TargetMachine &TM)
- : TargetSelectionDAGInfo(TM),
- Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
-}
+#define DEBUG_TYPE "aarch64-selectiondag-info"
+
+AArch64SelectionDAGInfo::AArch64SelectionDAGInfo(const DataLayout *DL)
+ : TargetSelectionDAGInfo(DL) {}
+
+AArch64SelectionDAGInfo::~AArch64SelectionDAGInfo() {}
+
+SDValue AArch64SelectionDAGInfo::EmitTargetCodeForMemset(
+ SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo) const {
+ // Check to see if there is a specialized entry-point for memory zeroing.
+ ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
+ ConstantSDNode *SizeValue = dyn_cast<ConstantSDNode>(Size);
+ const char *bzeroEntry =
+ (V && V->isNullValue())
+ ? DAG.getTarget().getSubtarget<AArch64Subtarget>().getBZeroEntry()
+ : nullptr;
+ // For small size (< 256), it is not beneficial to use bzero
+ // instead of memset.
+ if (bzeroEntry && (!SizeValue || SizeValue->getZExtValue() > 256)) {
+ const AArch64TargetLowering &TLI =
+ *static_cast<const AArch64TargetLowering *>(
+ DAG.getTarget().getTargetLowering());
-AArch64SelectionDAGInfo::~AArch64SelectionDAGInfo() {
+ EVT IntPtr = TLI.getPointerTy();
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Node = Dst;
+ Entry.Ty = IntPtrTy;
+ Args.push_back(Entry);
+ Entry.Node = Size;
+ Args.push_back(Entry);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args), 0)
+ .setDiscardResult();
+ std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
+ return CallResult.second;
+ }
+ return SDValue();
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h b/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h
index d412ed2..1180eea 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h
@@ -11,22 +11,23 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64SELECTIONDAGINFO_H
-#define LLVM_AARCH64SELECTIONDAGINFO_H
+#ifndef AArch64SELECTIONDAGINFO_H
+#define AArch64SELECTIONDAGINFO_H
#include "llvm/Target/TargetSelectionDAGInfo.h"
namespace llvm {
-class AArch64TargetMachine;
-
class AArch64SelectionDAGInfo : public TargetSelectionDAGInfo {
- const AArch64Subtarget *Subtarget;
public:
- explicit AArch64SelectionDAGInfo(const AArch64TargetMachine &TM);
+ explicit AArch64SelectionDAGInfo(const DataLayout *DL);
~AArch64SelectionDAGInfo();
-};
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
+ SDValue Dst, SDValue Src, SDValue Size,
+ unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo) const override;
+};
}
#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/contrib/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
new file mode 100644
index 0000000..45f8ddb
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -0,0 +1,168 @@
+//===--- AArch64StorePairSuppress.cpp --- Suppress store pair formation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies floating point stores that should not be combined into
+// store pairs. Later we may do the same for floating point loads.
+// ===---------------------------------------------------------------------===//
+
+#include "AArch64InstrInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineTraceMetrics.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-stp-suppress"
+
+namespace {
+class AArch64StorePairSuppress : public MachineFunctionPass {
+ const AArch64InstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const MachineRegisterInfo *MRI;
+ MachineFunction *MF;
+ TargetSchedModel SchedModel;
+ MachineTraceMetrics *Traces;
+ MachineTraceMetrics::Ensemble *MinInstr;
+
+public:
+ static char ID;
+ AArch64StorePairSuppress() : MachineFunctionPass(ID) {}
+
+ virtual const char *getPassName() const override {
+ return "AArch64 Store Pair Suppression";
+ }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+private:
+ bool shouldAddSTPToBlock(const MachineBasicBlock *BB);
+
+ bool isNarrowFPStore(const MachineInstr &MI);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineTraceMetrics>();
+ AU.addPreserved<MachineTraceMetrics>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+char AArch64StorePairSuppress::ID = 0;
+} // anonymous
+
+FunctionPass *llvm::createAArch64StorePairSuppressPass() {
+ return new AArch64StorePairSuppress();
+}
+
+/// Return true if an STP can be added to this block without increasing the
+/// critical resource height. STP is good to form in Ld/St limited blocks and
+/// bad to form in float-point limited blocks. This is true independent of the
+/// critical path. If the critical path is longer than the resource height, the
+/// extra vector ops can limit physreg renaming. Otherwise, it could simply
+/// oversaturate the vector units.
+bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB) {
+ if (!MinInstr)
+ MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
+
+ MachineTraceMetrics::Trace BBTrace = MinInstr->getTrace(BB);
+ unsigned ResLength = BBTrace.getResourceLength();
+
+ // Get the machine model's scheduling class for STPQi.
+ // Bypass TargetSchedule's SchedClass resolution since we only have an opcode.
+ unsigned SCIdx = TII->get(AArch64::STPDi).getSchedClass();
+ const MCSchedClassDesc *SCDesc =
+ SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
+
+ // If a subtarget does not define resources for STPQi, bail here.
+ if (SCDesc->isValid() && !SCDesc->isVariant()) {
+ unsigned ResLenWithSTP = BBTrace.getResourceLength(
+ ArrayRef<const MachineBasicBlock *>(), SCDesc);
+ if (ResLenWithSTP > ResLength) {
+ DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber()
+ << " resources " << ResLength << " -> " << ResLenWithSTP
+ << "\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Return true if this is a floating-point store smaller than the V reg. On
+/// cyclone, these require a vector shuffle before storing a pair.
+/// Ideally we would call getMatchingPairOpcode() and have the machine model
+/// tell us if it's profitable with no cpu knowledge here.
+///
+/// FIXME: We plan to develop a decent Target abstraction for simple loads and
+/// stores. Until then use a nasty switch similar to AArch64LoadStoreOptimizer.
+bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case AArch64::STRSui:
+ case AArch64::STRDui:
+ case AArch64::STURSi:
+ case AArch64::STURDi:
+ return true;
+ }
+}
+
+bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+ TII = static_cast<const AArch64InstrInfo *>(MF->getTarget().getInstrInfo());
+ TRI = MF->getTarget().getRegisterInfo();
+ MRI = &MF->getRegInfo();
+ const TargetSubtargetInfo &ST =
+ MF->getTarget().getSubtarget<TargetSubtargetInfo>();
+ SchedModel.init(*ST.getSchedModel(), &ST, TII);
+
+ Traces = &getAnalysis<MachineTraceMetrics>();
+ MinInstr = nullptr;
+
+ DEBUG(dbgs() << "*** " << getPassName() << ": " << MF->getName() << '\n');
+
+ if (!SchedModel.hasInstrSchedModel()) {
+ DEBUG(dbgs() << " Skipping pass: no machine model present.\n");
+ return false;
+ }
+
+ // Check for a sequence of stores to the same base address. We don't need to
+ // precisely determine whether a store pair can be formed. But we do want to
+ // filter out most situations where we can't form store pairs to avoid
+ // computing trace metrics in those cases.
+ for (auto &MBB : *MF) {
+ bool SuppressSTP = false;
+ unsigned PrevBaseReg = 0;
+ for (auto &MI : MBB) {
+ if (!isNarrowFPStore(MI))
+ continue;
+ unsigned BaseReg;
+ unsigned Offset;
+ if (TII->getLdStBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
+ if (PrevBaseReg == BaseReg) {
+ // If this block can take STPs, skip ahead to the next block.
+ if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
+ break;
+ // Otherwise, continue unpairing the stores in this block.
+ DEBUG(dbgs() << "Unpairing store " << MI << "\n");
+ SuppressSTP = true;
+ TII->suppressLdStPair(&MI);
+ }
+ PrevBaseReg = BaseReg;
+ } else
+ PrevBaseReg = 0;
+ }
+ }
+ // This pass just sets some internal MachineMemOperand flags. It can't really
+ // invalidate anything.
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index 5c693c1..bb0b72c 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64Subtarget.cpp - AArch64 Subtarget Information --------------===//
+//===-- AArch64Subtarget.cpp - AArch64 Subtarget Information ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,55 +7,124 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the AArch64 specific subclass of TargetSubtargetInfo.
+// This file implements the AArch64 specific subclass of TargetSubtarget.
//
//===----------------------------------------------------------------------===//
+#include "AArch64InstrInfo.h"
#include "AArch64Subtarget.h"
-#include "AArch64RegisterInfo.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineScheduler.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-subtarget"
-#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
+#define GET_SUBTARGETINFO_TARGET_DESC
#include "AArch64GenSubtargetInfo.inc"
-using namespace llvm;
+static cl::opt<bool>
+EnableEarlyIfConvert("aarch64-early-ifcvt", cl::desc("Enable the early if "
+ "converter pass"), cl::init(true), cl::Hidden);
-// Pin the vtable to this file.
-void AArch64Subtarget::anchor() {}
+AArch64Subtarget &
+AArch64Subtarget::initializeSubtargetDependencies(StringRef FS) {
+ // Determine default and user-specified characteristics
-AArch64Subtarget::AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS)
- : AArch64GenSubtargetInfo(TT, CPU, FS), HasFPARMv8(false), HasNEON(false),
- HasCrypto(false), TargetTriple(TT), CPUString(CPU) {
+ if (CPUString.empty())
+ CPUString = "generic";
- initializeSubtargetFeatures(CPU, FS);
+ ParseSubtargetFeatures(CPUString, FS);
+ return *this;
}
-void AArch64Subtarget::initializeSubtargetFeatures(StringRef CPU,
- StringRef FS) {
- if (CPU.empty())
- CPUString = "generic";
+AArch64Subtarget::AArch64Subtarget(const std::string &TT,
+ const std::string &CPU,
+ const std::string &FS, TargetMachine &TM,
+ bool LittleEndian)
+ : AArch64GenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
+ HasFPARMv8(false), HasNEON(false), HasCrypto(false), HasCRC(false),
+ HasZeroCycleRegMove(false), HasZeroCycleZeroing(false), CPUString(CPU),
+ TargetTriple(TT),
+ // This nested ternary is horrible, but DL needs to be properly
+ // initialized
+ // before TLInfo is constructed.
+ DL(isTargetMachO()
+ ? "e-m:o-i64:64-i128:128-n32:64-S128"
+ : (LittleEndian ? "e-m:e-i64:64-i128:128-n32:64-S128"
+ : "E-m:e-i64:64-i128:128-n32:64-S128")),
+ FrameLowering(), InstrInfo(initializeSubtargetDependencies(FS)),
+ TSInfo(&DL), TLInfo(TM) {}
- std::string FullFS = FS;
- if (CPUString == "generic") {
- // Enable FP by default.
- if (FullFS.empty())
- FullFS = "+fp-armv8";
+/// ClassifyGlobalReference - Find the target operand flags that describe
+/// how a global value should be referenced for the current subtarget.
+unsigned char
+AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
+ const TargetMachine &TM) const {
+
+ // Determine whether this is a reference to a definition or a declaration.
+ // Materializable GVs (in JIT lazy compilation mode) do not require an extra
+ // load from stub.
+ bool isDecl = GV->hasAvailableExternallyLinkage();
+ if (GV->isDeclaration() && !GV->isMaterializable())
+ isDecl = true;
+
+ // MachO large model always goes via a GOT, simply to get a single 8-byte
+ // absolute relocation on all global addresses.
+ if (TM.getCodeModel() == CodeModel::Large && isTargetMachO())
+ return AArch64II::MO_GOT;
+
+ // The small code mode's direct accesses use ADRP, which cannot necessarily
+ // produce the value 0 (if the code is above 4GB). Therefore they must use the
+ // GOT.
+ if (TM.getCodeModel() == CodeModel::Small && GV->isWeakForLinker() && isDecl)
+ return AArch64II::MO_GOT;
+
+ // If symbol visibility is hidden, the extra load is not needed if
+ // the symbol is definitely defined in the current translation unit.
+
+ // The handling of non-hidden symbols in PIC mode is rather target-dependent:
+ // + On MachO, if the symbol is defined in this module the GOT can be
+ // skipped.
+ // + On ELF, the R_AARCH64_COPY relocation means that even symbols actually
+ // defined could end up in unexpected places. Use a GOT.
+ if (TM.getRelocationModel() != Reloc::Static && GV->hasDefaultVisibility()) {
+ if (isTargetMachO())
+ return (isDecl || GV->isWeakForLinker()) ? AArch64II::MO_GOT
+ : AArch64II::MO_NO_FLAG;
else
- FullFS = "+fp-armv8," + FullFS;
+ // No need to go through the GOT for local symbols on ELF.
+ return GV->hasLocalLinkage() ? AArch64II::MO_NO_FLAG : AArch64II::MO_GOT;
}
- ParseSubtargetFeatures(CPU, FullFS);
+ return AArch64II::MO_NO_FLAG;
}
-bool AArch64Subtarget::GVIsIndirectSymbol(const GlobalValue *GV,
- Reloc::Model RelocM) const {
- if (RelocM == Reloc::Static)
- return false;
+/// This function returns the name of a function which has an interface
+/// like the non-standard bzero function, if such a function exists on
+/// the current subtarget and it is considered prefereable over
+/// memset with zero passed as the second argument. Otherwise it
+/// returns null.
+const char *AArch64Subtarget::getBZeroEntry() const {
+ // Prefer bzero on Darwin only.
+ if(isTargetDarwin())
+ return "bzero";
+
+ return nullptr;
+}
+
+void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin, MachineInstr *end,
+ unsigned NumRegionInstrs) const {
+ // LNT run (at least on Cyclone) showed reasonably significant gains for
+ // bi-directional scheduling. 253.perlbmk.
+ Policy.OnlyTopDown = false;
+ Policy.OnlyBottomUp = false;
+}
- return !GV->hasLocalLinkage() && !GV->hasHiddenVisibility();
+bool AArch64Subtarget::enableEarlyIfConversion() const {
+ return EnableEarlyIfConvert;
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h
index bbfd3bc..52124f6 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -1,4 +1,4 @@
-//==-- AArch64Subtarget.h - Define Subtarget for the AArch64 ---*- C++ -*--===//
+//===--- AArch64Subtarget.h - Define Subtarget for the AArch64 -*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,66 +7,126 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares the AArch64 specific subclass of TargetSubtargetInfo.
+// This file declares the AArch64 specific subclass of TargetSubtarget.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_AARCH64_SUBTARGET_H
-#define LLVM_TARGET_AARCH64_SUBTARGET_H
+#ifndef AArch64SUBTARGET_H
+#define AArch64SUBTARGET_H
-#include "llvm/ADT/Triple.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64FrameLowering.h"
+#include "AArch64ISelLowering.h"
+#include "AArch64RegisterInfo.h"
+#include "AArch64SelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "AArch64GenSubtargetInfo.inc"
-#include <string>
-
namespace llvm {
-class StringRef;
class GlobalValue;
+class StringRef;
class AArch64Subtarget : public AArch64GenSubtargetInfo {
- virtual void anchor();
protected:
+ enum ARMProcFamilyEnum {Others, CortexA53, CortexA57, Cyclone};
+
+ /// ARMProcFamily - ARM processor family: Cortex-A53, Cortex-A57, and others.
+ ARMProcFamilyEnum ARMProcFamily;
+
bool HasFPARMv8;
bool HasNEON;
bool HasCrypto;
+ bool HasCRC;
- /// TargetTriple - What processor and OS we're targeting.
- Triple TargetTriple;
+ // HasZeroCycleRegMove - Has zero-cycle register mov instructions.
+ bool HasZeroCycleRegMove;
+
+ // HasZeroCycleZeroing - Has zero-cycle zeroing instructions.
+ bool HasZeroCycleZeroing;
/// CPUString - String name of used CPU.
std::string CPUString;
+ /// TargetTriple - What processor and OS we're targeting.
+ Triple TargetTriple;
+
+ const DataLayout DL;
+ AArch64FrameLowering FrameLowering;
+ AArch64InstrInfo InstrInfo;
+ AArch64SelectionDAGInfo TSInfo;
+ AArch64TargetLowering TLInfo;
private:
- void initializeSubtargetFeatures(StringRef CPU, StringRef FS);
+ /// initializeSubtargetDependencies - Initializes using CPUString and the
+ /// passed in feature string so that we can use initializer lists for
+ /// subtarget initialization.
+ AArch64Subtarget &initializeSubtargetDependencies(StringRef FS);
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
- ///
- AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS);
+ AArch64Subtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, TargetMachine &TM, bool LittleEndian);
- virtual bool enableMachineScheduler() const {
- return true;
+ const AArch64SelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ const AArch64FrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ const AArch64TargetLowering *getTargetLowering() const {
+ return &TLInfo;
}
+ const AArch64InstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ bool enableMachineScheduler() const override { return true; }
+
+ bool hasZeroCycleRegMove() const { return HasZeroCycleRegMove; }
+
+ bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
+
+ bool hasFPARMv8() const { return HasFPARMv8; }
+ bool hasNEON() const { return HasNEON; }
+ bool hasCrypto() const { return HasCrypto; }
+ bool hasCRC() const { return HasCRC; }
+
+ bool isLittleEndian() const { return DL.isLittleEndian(); }
+
+ bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
+
+ bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
+
+ bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
+
+ bool isCyclone() const { return CPUString == "cyclone"; }
+
+ /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
+ /// that still makes it profitable to inline the call.
+ unsigned getMaxInlineSizeThreshold() const { return 64; }
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const;
+ /// ClassifyGlobalReference - Find the target operand flags that describe
+ /// how a global value should be referenced for the current subtarget.
+ unsigned char ClassifyGlobalReference(const GlobalValue *GV,
+ const TargetMachine &TM) const;
- bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
- bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+ /// This function returns the name of a function which has an interface
+ /// like the non-standard bzero function, if such a function exists on
+ /// the current subtarget and it is considered prefereable over
+ /// memset with zero passed as the second argument. Otherwise it
+ /// returns null.
+ const char *getBZeroEntry() const;
- bool hasFPARMv8() const { return HasFPARMv8; }
- bool hasNEON() const { return HasNEON; }
- bool hasCrypto() const { return HasCrypto; }
+ void overrideSchedPolicy(MachineSchedPolicy &Policy, MachineInstr *begin,
+ MachineInstr *end,
+ unsigned NumRegionInstrs) const override;
- const std::string & getCPUString() const { return CPUString; }
+ bool enableEarlyIfConversion() const override;
};
} // End llvm namespace
-#endif // LLVM_TARGET_AARCH64_SUBTARGET_H
+#endif // AArch64SUBTARGET_H
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index f1695e2..f99b90b 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -7,76 +7,210 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the implementation of the AArch64TargetMachine
-// methods. Principally just setting up the passes needed to generate correct
-// code on this architecture.
//
//===----------------------------------------------------------------------===//
#include "AArch64.h"
#include "AArch64TargetMachine.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/PassManager.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
-
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
+static cl::opt<bool>
+EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
+ " integer instructions"), cl::init(false), cl::Hidden);
+
+static cl::opt<bool>
+EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote "
+ "constant pass"), cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the"
+ " linker optimization hints (LOH)"), cl::init(true),
+ cl::Hidden);
+
+static cl::opt<bool>
+EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden,
+ cl::desc("Enable the pass that removes dead"
+ " definitons and replaces stores to"
+ " them with stores to the zero"
+ " register"),
+ cl::init(true));
+
+static cl::opt<bool>
+EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair"
+ " optimization pass"), cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
+ cl::desc("Run SimplifyCFG after expanding atomic operations"
+ " to make use of cmpxchg flow-based information"),
+ cl::init(true));
+
extern "C" void LLVMInitializeAArch64Target() {
- RegisterTargetMachine<AArch64TargetMachine> X(TheAArch64Target);
+ // Register the target.
+ RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
+ RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
+
+ RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64leTarget);
+ RegisterTargetMachine<AArch64beTargetMachine> W(TheARM64beTarget);
}
+/// TargetMachine ctor - Create an AArch64 architecture model.
+///
AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
- InstrInfo(Subtarget),
- DL("e-p:64:64-i64:64:64-i128:128:128-s0:32:32-f128:128:128-n32:64-S128"),
- TLInfo(*this),
- TSInfo(*this),
- FrameLowering(Subtarget) {
+ CodeGenOpt::Level OL,
+ bool LittleEndian)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this, LittleEndian) {
initAsmInfo();
}
+void AArch64leTargetMachine::anchor() { }
+
+AArch64leTargetMachine::
+AArch64leTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+
+void AArch64beTargetMachine::anchor() { }
+
+AArch64beTargetMachine::
+AArch64beTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+
namespace {
/// AArch64 Code Generator Pass Configuration Options.
class AArch64PassConfig : public TargetPassConfig {
public:
AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {}
AArch64TargetMachine &getAArch64TargetMachine() const {
return getTM<AArch64TargetMachine>();
}
- const AArch64Subtarget &getAArch64Subtarget() const {
- return *getAArch64TargetMachine().getSubtargetImpl();
- }
-
- virtual bool addInstSelector();
- virtual bool addPreEmitPass();
+ void addIRPasses() override;
+ bool addPreISel() override;
+ bool addInstSelector() override;
+ bool addILPOpts() override;
+ bool addPreRegAlloc() override;
+ bool addPostRegAlloc() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // namespace
+void AArch64TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
+ // Add first the target-independent BasicTTI pass, then our AArch64 pass. This
+ // allows the AArch64 pass to delegate to the target independent layer when
+ // appropriate.
+ PM.add(createBasicTargetTransformInfoPass(this));
+ PM.add(createAArch64TargetTransformInfoPass(this));
+}
+
TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
return new AArch64PassConfig(this, PM);
}
-bool AArch64PassConfig::addPreEmitPass() {
- addPass(&UnpackMachineBundlesID);
- addPass(createAArch64BranchFixupPass());
- return true;
+void AArch64PassConfig::addIRPasses() {
+ // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
+ // ourselves.
+ addPass(createAtomicExpandLoadLinkedPass(TM));
+
+ // Cmpxchg instructions are often used with a subsequent comparison to
+ // determine whether it succeeded. We can exploit existing control-flow in
+ // ldrex/strex loops to simplify this, but it needs tidying up.
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
+ addPass(createCFGSimplificationPass());
+
+ TargetPassConfig::addIRPasses();
+}
+
+// Pass Pipeline Configuration
+bool AArch64PassConfig::addPreISel() {
+ // Run promote constant before global merge, so that the promoted constants
+ // get a chance to be merged
+ if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
+ addPass(createAArch64PromoteConstantPass());
+ if (TM->getOptLevel() != CodeGenOpt::None)
+ addPass(createGlobalMergePass(TM));
+ if (TM->getOptLevel() != CodeGenOpt::None)
+ addPass(createAArch64AddressTypePromotionPass());
+
+ return false;
}
bool AArch64PassConfig::addInstSelector() {
- addPass(createAArch64ISelDAG(getAArch64TargetMachine(), getOptLevel()));
+ addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
- // For ELF, cleanup any local-dynamic TLS accesses.
- if (getAArch64Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None)
+ // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
+ // references to _TLS_MODULE_BASE_ as possible.
+ if (TM->getSubtarget<AArch64Subtarget>().isTargetELF() &&
+ getOptLevel() != CodeGenOpt::None)
addPass(createAArch64CleanupLocalDynamicTLSPass());
return false;
}
+
+bool AArch64PassConfig::addILPOpts() {
+ if (EnableCCMP)
+ addPass(createAArch64ConditionalCompares());
+ addPass(&EarlyIfConverterID);
+ if (EnableStPairSuppress)
+ addPass(createAArch64StorePairSuppressPass());
+ return true;
+}
+
+bool AArch64PassConfig::addPreRegAlloc() {
+ // Use AdvSIMD scalar instructions whenever profitable.
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar)
+ addPass(createAArch64AdvSIMDScalar());
+ return true;
+}
+
+bool AArch64PassConfig::addPostRegAlloc() {
+ // Change dead register definitions to refer to the zero register.
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
+ addPass(createAArch64DeadRegisterDefinitions());
+ return true;
+}
+
+bool AArch64PassConfig::addPreSched2() {
+ // Expand some pseudo instructions to allow proper scheduling.
+ addPass(createAArch64ExpandPseudoPass());
+ // Use load/store pair instructions when possible.
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
+ addPass(createAArch64LoadStoreOptimizationPass());
+ return true;
+}
+
+bool AArch64PassConfig::addPreEmitPass() {
+ // Relax conditional branch instructions if they're otherwise out of
+ // range of their destination.
+ addPass(createAArch64BranchRelaxation());
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
+ TM->getSubtarget<AArch64Subtarget>().isTargetMachO())
+ addPass(createAArch64CollectLOHPass());
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.h b/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.h
index c1f47c2..852cb3f 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.h
@@ -1,4 +1,4 @@
-//=== AArch64TargetMachine.h - Define TargetMachine for AArch64 -*- C++ -*-===//
+//==-- AArch64TargetMachine.h - Define TargetMachine for AArch64 -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -11,13 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64TARGETMACHINE_H
-#define LLVM_AARCH64TARGETMACHINE_H
+#ifndef AArch64TARGETMACHINE_H
+#define AArch64TARGETMACHINE_H
-#include "AArch64FrameLowering.h"
-#include "AArch64ISelLowering.h"
#include "AArch64InstrInfo.h"
-#include "AArch64SelectionDAGInfo.h"
#include "AArch64Subtarget.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
@@ -25,45 +22,66 @@
namespace llvm {
class AArch64TargetMachine : public LLVMTargetMachine {
- AArch64Subtarget Subtarget;
- AArch64InstrInfo InstrInfo;
- const DataLayout DL;
- AArch64TargetLowering TLInfo;
- AArch64SelectionDAGInfo TSInfo;
- AArch64FrameLowering FrameLowering;
+protected:
+ AArch64Subtarget Subtarget;
public:
AArch64TargetMachine(const Target &T, StringRef TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
+ CodeGenOpt::Level OL, bool IsLittleEndian);
- const AArch64InstrInfo *getInstrInfo() const {
- return &InstrInfo;
+ const AArch64Subtarget *getSubtargetImpl() const override {
+ return &Subtarget;
}
-
- const AArch64FrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const AArch64TargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
-
- const AArch64TargetLowering *getTargetLowering() const {
- return &TLInfo;
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
-
- const AArch64SelectionDAGInfo *getSelectionDAGInfo() const {
- return &TSInfo;
+ const AArch64FrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
+ }
+ const AArch64InstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
+ const AArch64RegisterInfo *getRegisterInfo() const override {
+ return &getInstrInfo()->getRegisterInfo();
+ }
+ const AArch64SelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+
+ // Pass Pipeline Configuration
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
- const AArch64Subtarget *getSubtargetImpl() const { return &Subtarget; }
+ /// \brief Register AArch64 analysis passes with a pass manager.
+ void addAnalysisPasses(PassManagerBase &PM) override;
+};
- const DataLayout *getDataLayout() const { return &DL; }
+// AArch64leTargetMachine - AArch64 little endian target machine.
+//
+class AArch64leTargetMachine : public AArch64TargetMachine {
+ virtual void anchor();
+public:
+ AArch64leTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+};
- const TargetRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
- TargetPassConfig *createPassConfig(PassManagerBase &PM);
+// AArch64beTargetMachine - AArch64 big endian target machine.
+//
+class AArch64beTargetMachine : public AArch64TargetMachine {
+ virtual void anchor();
+public:
+ AArch64beTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
-}
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp b/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp
index f8f2119..4069038 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.cpp
@@ -6,26 +6,47 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file deals with any AArch64 specific requirements on object files.
-//
-//===----------------------------------------------------------------------===//
-
#include "AArch64TargetObjectFile.h"
-
+#include "AArch64TargetMachine.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/Dwarf.h"
using namespace llvm;
+using namespace dwarf;
-void
-AArch64LinuxTargetObjectFile::Initialize(MCContext &Ctx,
- const TargetMachine &TM) {
+void AArch64_ELFTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
InitializeELF(TM.Options.UseInitArray);
}
-void
-AArch64ElfTargetObjectFile::Initialize(MCContext &Ctx,
- const TargetMachine &TM) {
- TargetLoweringObjectFileELF::Initialize(Ctx, TM);
- InitializeELF(TM.Options.UseInitArray);
+const MCExpr *AArch64_MachoTargetObjectFile::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
+ // On Darwin, we can reference dwarf symbols with foo@GOT-., which
+ // is an indirect pc-relative reference. The default implementation
+ // won't reference using the GOT, so we need this target-specific
+ // version.
+ if (Encoding & (DW_EH_PE_indirect | DW_EH_PE_pcrel)) {
+ const MCSymbol *Sym = TM.getSymbol(GV, Mang);
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOT, getContext());
+ MCSymbol *PCSym = getContext().CreateTempSymbol();
+ Streamer.EmitLabel(PCSym);
+ const MCExpr *PC = MCSymbolRefExpr::Create(PCSym, getContext());
+ return MCBinaryExpr::CreateSub(Res, PC, getContext());
+ }
+
+ return TargetLoweringObjectFileMachO::getTTypeGlobalReference(
+ GV, Encoding, Mang, TM, MMI, Streamer);
+}
+
+MCSymbol *AArch64_MachoTargetObjectFile::getCFIPersonalitySymbol(
+ const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI) const {
+ return TM.getSymbol(GV, Mang);
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h b/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h
index f782285..de63cb4 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetObjectFile.h
@@ -1,4 +1,4 @@
-//===-- AArch64TargetObjectFile.h - AArch64 Object Info ---------*- C++ -*-===//
+//===-- AArch64TargetObjectFile.h - AArch64 Object Info -*- C++ ---------*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,29 +6,34 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file deals with any AArch64 specific requirements on object files.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_TARGET_AARCH64_TARGETOBJECTFILE_H
-#define LLVM_TARGET_AARCH64_TARGETOBJECTFILE_H
+#ifndef LLVM_TARGET_AArch64_TARGETOBJECTFILE_H
+#define LLVM_TARGET_AArch64_TARGETOBJECTFILE_H
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
namespace llvm {
+class AArch64TargetMachine;
+
+/// This implementation is used for AArch64 ELF targets (Linux in particular).
+class AArch64_ELFTargetObjectFile : public TargetLoweringObjectFileELF {
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+};
- /// AArch64ElfTargetObjectFile - This implementation is used for ELF
- /// AArch64 targets.
- class AArch64ElfTargetObjectFile : public TargetLoweringObjectFileELF {
- virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
- };
+/// AArch64_MachoTargetObjectFile - This TLOF implementation is used for Darwin.
+class AArch64_MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
+public:
+ const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+ unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
- class AArch64LinuxTargetObjectFile : public TargetLoweringObjectFileELF {
- virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
- };
+ MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI) const override;
+};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
new file mode 100644
index 0000000..1dac14b
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -0,0 +1,500 @@
+//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI pass --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a TargetTransformInfo analysis pass specific to the
+/// AArch64 target machine. It uses the target's detailed information to provide
+/// more precise answers to certain TTI queries, while letting the target
+/// independent and default TTI implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64TargetMachine.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
+#include <algorithm>
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64tti"
+
+// Declare the pass initialization routine locally as target-specific passes
+// don't have a target-wide initialization entry point, and so we rely on the
+// pass constructor initialization.
+namespace llvm {
+void initializeAArch64TTIPass(PassRegistry &);
+}
+
+namespace {
+
+class AArch64TTI final : public ImmutablePass, public TargetTransformInfo {
+ const AArch64TargetMachine *TM;
+ const AArch64Subtarget *ST;
+ const AArch64TargetLowering *TLI;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+
+public:
+ AArch64TTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
+ llvm_unreachable("This pass cannot be directly constructed");
+ }
+
+ AArch64TTI(const AArch64TargetMachine *TM)
+ : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
+ TLI(TM->getTargetLowering()) {
+ initializeAArch64TTIPass(*PassRegistry::getPassRegistry());
+ }
+
+ void initializePass() override { pushTTIStack(this); }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ TargetTransformInfo::getAnalysisUsage(AU);
+ }
+
+ /// Pass identification.
+ static char ID;
+
+ /// Provide necessary pointer adjustments for the two base classes.
+ void *getAdjustedAnalysisPointer(const void *ID) override {
+ if (ID == &TargetTransformInfo::ID)
+ return (TargetTransformInfo *)this;
+ return this;
+ }
+
+ /// \name Scalar TTI Implementations
+ /// @{
+ unsigned getIntImmCost(int64_t Val) const;
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override;
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override;
+ PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector) const override {
+ if (Vector) {
+ if (ST->hasNEON())
+ return 32;
+ return 0;
+ }
+ return 31;
+ }
+
+ unsigned getRegisterBitWidth(bool Vector) const override {
+ if (Vector) {
+ if (ST->hasNEON())
+ return 128;
+ return 0;
+ }
+ return 64;
+ }
+
+ unsigned getMaximumUnrollFactor() const override { return 2; }
+
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const
+ override;
+
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const
+ override;
+
+ unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ OperandValueKind Opd1Info = OK_AnyValue,
+ OperandValueKind Opd2Info = OK_AnyValue) const
+ override;
+
+ unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const override;
+
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const
+ override;
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const override;
+ /// @}
+};
+
+} // end anonymous namespace
+
+INITIALIZE_AG_PASS(AArch64TTI, TargetTransformInfo, "aarch64tti",
+ "AArch64 Target Transform Info", true, true, false)
+char AArch64TTI::ID = 0;
+
+ImmutablePass *
+llvm::createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM) {
+ return new AArch64TTI(TM);
+}
+
+/// \brief Calculate the cost of materializing a 64-bit value. This helper
+/// method might only calculate a fraction of a larger immediate. Therefore it
+/// is valid to return a cost of ZERO.
+unsigned AArch64TTI::getIntImmCost(int64_t Val) const {
+ // Check if the immediate can be encoded within an instruction.
+ if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
+ return 0;
+
+ if (Val < 0)
+ Val = ~Val;
+
+ // Calculate how many moves we will need to materialize this constant.
+ unsigned LZ = countLeadingZeros((uint64_t)Val);
+ return (64 - LZ + 15) / 16;
+}
+
+/// \brief Calculate the cost of materializing the given constant.
+unsigned AArch64TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0)
+ return ~0U;
+
+ // Sign-extend all constants to a multiple of 64-bit.
+ APInt ImmVal = Imm;
+ if (BitSize & 0x3f)
+ ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
+
+ // Split the constant into 64-bit chunks and calculate the cost for each
+ // chunk.
+ unsigned Cost = 0;
+ for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
+ APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
+ int64_t Val = Tmp.getSExtValue();
+ Cost += getIntImmCost(Val);
+ }
+ // We need at least one instruction to materialze the constant.
+ return std::max(1U, Cost);
+}
+
+unsigned AArch64TTI::getIntImmCost(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ // There is no cost model for constants with a bit size of 0. Return TCC_Free
+ // here, so that constant hoisting will ignore this constant.
+ if (BitSize == 0)
+ return TCC_Free;
+
+ unsigned ImmIdx = ~0U;
+ switch (Opcode) {
+ default:
+ return TCC_Free;
+ case Instruction::GetElementPtr:
+ // Always hoist the base address of a GetElementPtr.
+ if (Idx == 0)
+ return 2 * TCC_Basic;
+ return TCC_Free;
+ case Instruction::Store:
+ ImmIdx = 0;
+ break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::ICmp:
+ ImmIdx = 1;
+ break;
+ // Always return TCC_Free for the shift value of a shift instruction.
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ if (Idx == 1)
+ return TCC_Free;
+ break;
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::IntToPtr:
+ case Instruction::PtrToInt:
+ case Instruction::BitCast:
+ case Instruction::PHI:
+ case Instruction::Call:
+ case Instruction::Select:
+ case Instruction::Ret:
+ case Instruction::Load:
+ break;
+ }
+
+ if (Idx == ImmIdx) {
+ unsigned NumConstants = (BitSize + 63) / 64;
+ unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
+ return (Cost <= NumConstants * TCC_Basic)
+ ? static_cast<unsigned>(TCC_Free) : Cost;
+ }
+ return AArch64TTI::getIntImmCost(Imm, Ty);
+}
+
+unsigned AArch64TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ // There is no cost model for constants with a bit size of 0. Return TCC_Free
+ // here, so that constant hoisting will ignore this constant.
+ if (BitSize == 0)
+ return TCC_Free;
+
+ switch (IID) {
+ default:
+ return TCC_Free;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ if (Idx == 1) {
+ unsigned NumConstants = (BitSize + 63) / 64;
+ unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
+ return (Cost <= NumConstants * TCC_Basic)
+ ? static_cast<unsigned>(TCC_Free) : Cost;
+ }
+ break;
+ case Intrinsic::experimental_stackmap:
+ if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
+ return TCC_Free;
+ break;
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
+ return TCC_Free;
+ break;
+ }
+ return AArch64TTI::getIntImmCost(Imm, Ty);
+}
+
+AArch64TTI::PopcntSupportKind
+AArch64TTI::getPopcntSupport(unsigned TyWidth) const {
+ assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
+ if (TyWidth == 32 || TyWidth == 64)
+ return PSK_FastHardware;
+ // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
+ return PSK_Software;
+}
+
+unsigned AArch64TTI::getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const {
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ EVT SrcTy = TLI->getValueType(Src);
+ EVT DstTy = TLI->getValueType(Dst);
+
+ if (!SrcTy.isSimple() || !DstTy.isSimple())
+ return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+
+ static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
+ // LowerVectorINT_TO_FP:
+ { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
+
+ // Complex: to v2f32
+ { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
+ { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
+ { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
+
+ // Complex: to v4f32
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
+
+ // Complex: to v2f64
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
+
+
+ // LowerVectorFP_TO_INT
+ { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
+ { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
+ { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
+ { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
+ { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
+ { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
+
+ // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
+ { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
+ { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
+ { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
+ { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
+ { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
+ { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
+
+ // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
+ { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
+ { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
+ { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
+ { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
+
+ // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
+ { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
+ { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
+ { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
+ { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
+ { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
+ { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
+ };
+
+ int Idx = ConvertCostTableLookup<MVT>(
+ ConversionTbl, array_lengthof(ConversionTbl), ISD, DstTy.getSimpleVT(),
+ SrcTy.getSimpleVT());
+ if (Idx != -1)
+ return ConversionTbl[Idx].Cost;
+
+ return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
+}
+
+unsigned AArch64TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const {
+ assert(Val->isVectorTy() && "This must be a vector type");
+
+ if (Index != -1U) {
+ // Legalize the type.
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
+
+ // This type is legalized to a scalar type.
+ if (!LT.second.isVector())
+ return 0;
+
+ // The type may be split. Normalize the index to the new type.
+ unsigned Width = LT.second.getVectorNumElements();
+ Index = Index % Width;
+
+ // The element at index zero is already inside the vector.
+ if (Index == 0)
+ return 0;
+ }
+
+ // All other insert/extracts cost this much.
+ return 2;
+}
+
+unsigned AArch64TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ OperandValueKind Opd1Info,
+ OperandValueKind Opd2Info) const {
+ // Legalize the type.
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
+
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+
+ switch (ISD) {
+ default:
+ return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Opd1Info,
+ Opd2Info);
+ case ISD::ADD:
+ case ISD::MUL:
+ case ISD::XOR:
+ case ISD::OR:
+ case ISD::AND:
+ // These nodes are marked as 'custom' for combining purposes only.
+ // We know that they are legal. See LowerAdd in ISelLowering.
+ return 1 * LT.first;
+ }
+}
+
+unsigned AArch64TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
+ // Address computations in vectorized code with non-consecutive addresses will
+ // likely result in more instructions compared to scalar code where the
+ // computation can more often be merged into the index mode. The resulting
+ // extra micro-ops can significantly decrease throughput.
+ unsigned NumVectorInstToHideOverhead = 10;
+
+ if (Ty->isVectorTy() && IsComplex)
+ return NumVectorInstToHideOverhead;
+
+ // In many cases the address computation is not merged into the instruction
+ // addressing mode.
+ return 1;
+}
+
+unsigned AArch64TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const {
+
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ // We don't lower vector selects well that are wider than the register width.
+ if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
+ // We would need this many instructions to hide the scalarization happening.
+ unsigned AmortizationCost = 20;
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ VectorSelectTbl[] = {
+ { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
+ { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 * AmortizationCost },
+ { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 * AmortizationCost },
+ { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
+ { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
+ { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
+ };
+
+ EVT SelCondTy = TLI->getValueType(CondTy);
+ EVT SelValTy = TLI->getValueType(ValTy);
+ if (SelCondTy.isSimple() && SelValTy.isSimple()) {
+ int Idx =
+ ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(),
+ SelValTy.getSimpleVT());
+ if (Idx != -1)
+ return VectorSelectTbl[Idx].Cost;
+ }
+ }
+ return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
+}
+
+unsigned AArch64TTI::getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) const {
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
+
+ if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
+ Src->getVectorElementType()->isIntegerTy(64)) {
+ // Unaligned stores are extremely inefficient. We don't split
+ // unaligned v2i64 stores because the negative impact that has shown in
+ // practice on inlined memcpy code.
+ // We make v2i64 stores expensive so that we will only vectorize if there
+ // are 6 other instructions getting vectorized.
+ unsigned AmortizationCost = 6;
+
+ return LT.first * 2 * AmortizationCost;
+ }
+
+ if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
+ Src->getVectorNumElements() < 8) {
+ // We scalarize the loads/stores because there is not v.4b register and we
+ // have to promote the elements to v.4h.
+ unsigned NumVecElts = Src->getVectorNumElements();
+ unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
+ // We generate 2 instructions per vector element.
+ return NumVectorizableInstsToAmortize * NumVecElts * 2;
+ }
+
+ return LT.first;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index fbbce11..37e9296 100644
--- a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -6,34 +6,32 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file contains the (GNU-style) assembly parser for the AArch64
-// architecture.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "Utils/AArch64BaseInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/TargetRegistry.h"
-
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include <cstdio>
using namespace llvm;
namespace {
@@ -41,210 +39,284 @@ namespace {
class AArch64Operand;
class AArch64AsmParser : public MCTargetAsmParser {
+private:
+ StringRef Mnemonic; ///< Instruction mnemonic.
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ // Map of register aliases registers via the .req directive.
+ StringMap<std::pair<bool, unsigned> > RegisterReqs;
+
+ AArch64TargetStreamer &getTargetStreamer() {
+ MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
+ return static_cast<AArch64TargetStreamer &>(TS);
+ }
+
+ MCAsmParser &getParser() const { return Parser; }
+ MCAsmLexer &getLexer() const { return Parser.getLexer(); }
+
+ SMLoc getLoc() const { return Parser.getTok().getLoc(); }
+
+ bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
+ AArch64CC::CondCode parseCondCodeString(StringRef Cond);
+ bool parseCondCode(OperandVector &Operands, bool invertCondCode);
+ unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
+ int tryParseRegister();
+ int tryMatchVectorRegister(StringRef &Kind, bool expected);
+ bool parseRegister(OperandVector &Operands);
+ bool parseSymbolicImmVal(const MCExpr *&ImmVal);
+ bool parseVectorList(OperandVector &Operands);
+ bool parseOperand(OperandVector &Operands, bool isCondCode,
+ bool invertCondCode);
+
+ void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
+ bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
+ bool showMatchError(SMLoc Loc, unsigned ErrCode);
+
+ bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool parseDirectiveTLSDescCall(SMLoc L);
+
+ bool parseDirectiveLOH(StringRef LOH, SMLoc L);
+ bool parseDirectiveLtorg(SMLoc L);
+
+ bool parseDirectiveReq(StringRef Name, SMLoc L);
+ bool parseDirectiveUnreq(SMLoc L);
+
+ bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
+/// @name Auto-generated Match Functions
+/// {
+
#define GET_ASSEMBLER_HEADER
#include "AArch64GenAsmMatcher.inc"
+ /// }
+
+ OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
+ OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
+ OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
+ OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
+ OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
+ OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
+ OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
+ OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
+ OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
+ OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
+ OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
+ bool tryParseVectorRegister(OperandVector &Operands);
+
public:
enum AArch64MatchResultTy {
- Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
+ Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "AArch64GenAsmMatcher.inc"
};
-
AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
+ if (Parser.getStreamer().getTargetStreamer() == nullptr)
+ new AArch64TargetStreamer(Parser.getStreamer());
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
- // These are the public interface of the MCTargetAsmParser
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- bool ParseDirective(AsmToken DirectiveID);
- bool ParseDirectiveTLSDescCall(SMLoc L);
- bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
- bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer&Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
-
- // The rest of the sub-parsers have more freedom over interface: they return
- // an OperandMatchResultTy because it's less ambiguous than true/false or
- // -1/0/1 even if it is more verbose
- OperandMatchResultTy
- ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic);
-
- OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
-
- OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
-
- OperandMatchResultTy
- ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t NumLanes);
-
- OperandMatchResultTy
- ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t &NumLanes);
-
- OperandMatchResultTy
- ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- template<typename SomeNamedImmMapper> OperandMatchResultTy
- ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
- }
-
- OperandMatchResultTy
- ParseNamedImmOperand(const NamedImmMapper &Mapper,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- OperandMatchResultTy
- ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
- SMLoc &LayoutLoc);
-
- OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
-
- bool validateInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- /// Scan the next token (which had better be an identifier) and determine
- /// whether it represents a general-purpose or vector register. It returns
- /// true if an identifier was found and populates its reference arguments. It
- /// does not consume the token.
- bool
- IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
- SMLoc &LayoutLoc) const;
-
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+ bool ParseDirective(AsmToken DirectiveID) override;
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
+
+ static bool classifySymbolRef(const MCExpr *Expr,
+ AArch64MCExpr::VariantKind &ELFRefKind,
+ MCSymbolRefExpr::VariantKind &DarwinRefKind,
+ int64_t &Addend);
};
-
-}
+} // end anonymous namespace
namespace {
-/// Instances of this class represent a parsed AArch64 machine instruction.
+/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
+/// instruction.
class AArch64Operand : public MCParsedAsmOperand {
private:
enum KindTy {
- k_ImmWithLSL, // #uimm {, LSL #amt }
- k_CondCode, // eq/ne/...
- k_FPImmediate, // Limited-precision floating-point imm
- k_Immediate, // Including expressions referencing symbols
+ k_Immediate,
+ k_ShiftedImm,
+ k_CondCode,
k_Register,
+ k_VectorList,
+ k_VectorIndex,
+ k_Token,
+ k_SysReg,
+ k_SysCR,
+ k_Prefetch,
k_ShiftExtend,
- k_VectorList, // A sequential list of 1 to 4 registers.
- k_SysReg, // The register operand of MRS and MSR instructions
- k_Token, // The mnemonic; other raw tokens the auto-generated
- k_WrappedRegister // Load/store exclusive permit a wrapped register.
+ k_FPImm,
+ k_Barrier
} Kind;
SMLoc StartLoc, EndLoc;
- struct ImmWithLSLOp {
- const MCExpr *Val;
- unsigned ShiftAmount;
- bool ImplicitAmount;
+ struct TokOp {
+ const char *Data;
+ unsigned Length;
+ bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
};
- struct CondCodeOp {
- A64CC::CondCodes Code;
+ struct RegOp {
+ unsigned RegNum;
+ bool isVector;
};
- struct FPImmOp {
- double Val;
+ struct VectorListOp {
+ unsigned RegNum;
+ unsigned Count;
+ unsigned NumElements;
+ unsigned ElementKind;
+ };
+
+ struct VectorIndexOp {
+ unsigned Val;
};
struct ImmOp {
const MCExpr *Val;
};
- struct RegOp {
- unsigned RegNum;
+ struct ShiftedImmOp {
+ const MCExpr *Val;
+ unsigned ShiftAmount;
};
- struct ShiftExtendOp {
- A64SE::ShiftExtSpecifiers ShiftType;
- unsigned Amount;
- bool ImplicitAmount;
+ struct CondCodeOp {
+ AArch64CC::CondCode Code;
};
- // A vector register list is a sequential list of 1 to 4 registers.
- struct VectorListOp {
- unsigned RegNum;
- unsigned Count;
- A64Layout::VectorLayout Layout;
+ struct FPImmOp {
+ unsigned Val; // Encoded 8-bit representation.
+ };
+
+ struct BarrierOp {
+ unsigned Val; // Not the enum since not all values have names.
};
struct SysRegOp {
const char *Data;
unsigned Length;
+ uint64_t FeatureBits; // We need to pass through information about which
+ // core we are compiling for so that the SysReg
+ // Mappers can appropriately conditionalize.
};
- struct TokOp {
- const char *Data;
- unsigned Length;
+ struct SysCRImmOp {
+ unsigned Val;
+ };
+
+ struct PrefetchOp {
+ unsigned Val;
+ };
+
+ struct ShiftExtendOp {
+ AArch64_AM::ShiftExtendType Type;
+ unsigned Amount;
+ bool HasExplicitAmount;
+ };
+
+ struct ExtendOp {
+ unsigned Val;
};
union {
- struct ImmWithLSLOp ImmWithLSL;
- struct CondCodeOp CondCode;
- struct FPImmOp FPImm;
- struct ImmOp Imm;
+ struct TokOp Tok;
struct RegOp Reg;
- struct ShiftExtendOp ShiftExtend;
struct VectorListOp VectorList;
+ struct VectorIndexOp VectorIndex;
+ struct ImmOp Imm;
+ struct ShiftedImmOp ShiftedImm;
+ struct CondCodeOp CondCode;
+ struct FPImmOp FPImm;
+ struct BarrierOp Barrier;
struct SysRegOp SysReg;
- struct TokOp Tok;
+ struct SysCRImmOp SysCRImm;
+ struct PrefetchOp Prefetch;
+ struct ShiftExtendOp ShiftExtend;
};
- AArch64Operand(KindTy K, SMLoc S, SMLoc E)
- : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
+ // Keep the MCContext around as the MCExprs may need manipulated during
+ // the add<>Operands() calls.
+ MCContext &Ctx;
public:
- AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
+ AArch64Operand(KindTy K, MCContext &_Ctx)
+ : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
+
+ AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
+ Kind = o.Kind;
+ StartLoc = o.StartLoc;
+ EndLoc = o.EndLoc;
+ switch (Kind) {
+ case k_Token:
+ Tok = o.Tok;
+ break;
+ case k_Immediate:
+ Imm = o.Imm;
+ break;
+ case k_ShiftedImm:
+ ShiftedImm = o.ShiftedImm;
+ break;
+ case k_CondCode:
+ CondCode = o.CondCode;
+ break;
+ case k_FPImm:
+ FPImm = o.FPImm;
+ break;
+ case k_Barrier:
+ Barrier = o.Barrier;
+ break;
+ case k_Register:
+ Reg = o.Reg;
+ break;
+ case k_VectorList:
+ VectorList = o.VectorList;
+ break;
+ case k_VectorIndex:
+ VectorIndex = o.VectorIndex;
+ break;
+ case k_SysReg:
+ SysReg = o.SysReg;
+ break;
+ case k_SysCR:
+ SysCRImm = o.SysCRImm;
+ break;
+ case k_Prefetch:
+ Prefetch = o.Prefetch;
+ break;
+ case k_ShiftExtend:
+ ShiftExtend = o.ShiftExtend;
+ break;
+ }
}
- SMLoc getStartLoc() const { return StartLoc; }
- SMLoc getEndLoc() const { return EndLoc; }
- void print(raw_ostream&) const;
- void dump() const;
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const override { return StartLoc; }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const override { return EndLoc; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const {
- assert((Kind == k_Register || Kind == k_WrappedRegister)
- && "Invalid access!");
- return Reg.RegNum;
+ bool isTokenSuffix() const {
+ assert(Kind == k_Token && "Invalid access!");
+ return Tok.IsSuffix;
}
const MCExpr *getImm() const {
@@ -252,1237 +324,1844 @@ public:
return Imm.Val;
}
- A64CC::CondCodes getCondCode() const {
- assert(Kind == k_CondCode && "Invalid access!");
- return CondCode.Code;
+ const MCExpr *getShiftedImmVal() const {
+ assert(Kind == k_ShiftedImm && "Invalid access!");
+ return ShiftedImm.Val;
}
- static bool isNonConstantExpr(const MCExpr *E,
- AArch64MCExpr::VariantKind &Variant) {
- if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
- Variant = A64E->getKind();
- return true;
- } else if (!isa<MCConstantExpr>(E)) {
- Variant = AArch64MCExpr::VK_AARCH64_None;
- return true;
- }
-
- return false;
+ unsigned getShiftedImmShift() const {
+ assert(Kind == k_ShiftedImm && "Invalid access!");
+ return ShiftedImm.ShiftAmount;
}
- bool isCondCode() const { return Kind == k_CondCode; }
- bool isToken() const { return Kind == k_Token; }
- bool isReg() const { return Kind == k_Register; }
- bool isImm() const { return Kind == k_Immediate; }
- bool isMem() const { return false; }
- bool isFPImm() const { return Kind == k_FPImmediate; }
- bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
- bool isSysReg() const { return Kind == k_SysReg; }
- bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
- bool isWrappedReg() const { return Kind == k_WrappedRegister; }
-
- bool isAddSubImmLSL0() const {
- if (!isImmWithLSL()) return false;
- if (ImmWithLSL.ShiftAmount != 0) return false;
-
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_LO12
- || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
- || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
- || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
- || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
- || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
- }
-
- // Otherwise it should be a real immediate in range:
- const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
- return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ AArch64CC::CondCode getCondCode() const {
+ assert(Kind == k_CondCode && "Invalid access!");
+ return CondCode.Code;
}
- bool isAddSubImmLSL12() const {
- if (!isImmWithLSL()) return false;
- if (ImmWithLSL.ShiftAmount != 12) return false;
-
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
- || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
- }
-
- // Otherwise it should be a real immediate in range:
- const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
- return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ unsigned getFPImm() const {
+ assert(Kind == k_FPImm && "Invalid access!");
+ return FPImm.Val;
}
- template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
- if (!isShiftOrExtend()) return false;
-
- A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
- if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
- return false;
-
- if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
- return false;
-
- return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
+ unsigned getBarrier() const {
+ assert(Kind == k_Barrier && "Invalid access!");
+ return Barrier.Val;
}
- bool isAdrpLabel() const {
- if (!isImm()) return false;
-
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(getImm(), Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_None
- || Variant == AArch64MCExpr::VK_AARCH64_GOT
- || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
- || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
- }
-
- return isLabel<21, 4096>();
+ unsigned getReg() const override {
+ assert(Kind == k_Register && "Invalid access!");
+ return Reg.RegNum;
}
- template<unsigned RegWidth> bool isBitfieldWidth() const {
- if (!isImm()) return false;
-
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
-
- return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
+ unsigned getVectorListStart() const {
+ assert(Kind == k_VectorList && "Invalid access!");
+ return VectorList.RegNum;
}
- template<int RegWidth>
- bool isCVTFixedPos() const {
- if (!isImm()) return false;
-
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
-
- return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
+ unsigned getVectorListCount() const {
+ assert(Kind == k_VectorList && "Invalid access!");
+ return VectorList.Count;
}
- bool isFMOVImm() const {
- if (!isFPImm()) return false;
-
- APFloat RealVal(FPImm.Val);
- uint32_t ImmVal;
- return A64Imms::isFPImm(RealVal, ImmVal);
+ unsigned getVectorIndex() const {
+ assert(Kind == k_VectorIndex && "Invalid access!");
+ return VectorIndex.Val;
}
- bool isFPZero() const {
- if (!isFPImm()) return false;
+ StringRef getSysReg() const {
+ assert(Kind == k_SysReg && "Invalid access!");
+ return StringRef(SysReg.Data, SysReg.Length);
+ }
- APFloat RealVal(FPImm.Val);
- return RealVal.isPosZero();
+ uint64_t getSysRegFeatureBits() const {
+ assert(Kind == k_SysReg && "Invalid access!");
+ return SysReg.FeatureBits;
}
- template<unsigned field_width, unsigned scale>
- bool isLabel() const {
- if (!isImm()) return false;
+ unsigned getSysCR() const {
+ assert(Kind == k_SysCR && "Invalid access!");
+ return SysCRImm.Val;
+ }
- if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
- return true;
- } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
- int64_t Val = CE->getValue();
- int64_t Min = - (scale * (1LL << (field_width - 1)));
- int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
- return (Val % scale) == 0 && Val >= Min && Val <= Max;
- }
+ unsigned getPrefetch() const {
+ assert(Kind == k_Prefetch && "Invalid access!");
+ return Prefetch.Val;
+ }
- // N.b. this disallows explicit relocation specifications via an
- // AArch64MCExpr. Users needing that behaviour
- return false;
+ AArch64_AM::ShiftExtendType getShiftExtendType() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.Type;
}
- bool isLane1() const {
- if (!isImm()) return false;
+ unsigned getShiftExtendAmount() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.Amount;
+ }
- // Because it's come through custom assembly parsing, it must always be a
- // constant expression.
- return cast<MCConstantExpr>(getImm())->getValue() == 1;
+ bool hasShiftExtendAmount() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.HasExplicitAmount;
}
- bool isLoadLitLabel() const {
- if (!isImm()) return false;
+ bool isImm() const override { return Kind == k_Immediate; }
+ bool isMem() const override { return false; }
+ bool isSImm9() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -256 && Val < 256);
+ }
+ bool isSImm7s4() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
+ }
+ bool isSImm7s8() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
+ }
+ bool isSImm7s16() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
+ }
+
+ bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
+ Addend)) {
+ // If we don't understand the expression, assume the best and
+ // let the fixup and relocation code deal with it.
+ return true;
+ }
- AArch64MCExpr::VariantKind Variant;
- if (isNonConstantExpr(getImm(), Variant)) {
- return Variant == AArch64MCExpr::VK_AARCH64_None
- || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
+ if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
+ ELFRefKind == AArch64MCExpr::VK_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
+ // Note that we don't range-check the addend. It's adjusted modulo page
+ // size when converted, so there is no "out of range" condition when using
+ // @pageoff.
+ return Addend >= 0 && (Addend % Scale) == 0;
+ } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
+ // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
+ return Addend == 0;
}
- return isLabel<19, 4>();
+ return false;
}
- template<unsigned RegWidth> bool isLogicalImm() const {
- if (!isImm()) return false;
+ template <int Scale> bool isUImm12Offset() const {
+ if (!isImm())
+ return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
- if (!CE) return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return isSymbolicUImm12Offset(getImm(), Scale);
- uint32_t Bits;
- return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
+ int64_t Val = MCE->getValue();
+ return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
}
- template<unsigned RegWidth> bool isLogicalImmMOV() const {
- if (!isLogicalImm<RegWidth>()) return false;
-
- const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
-
- // The move alias for ORR is only valid if the immediate cannot be
- // represented with a move (immediate) instruction; they take priority.
- int UImm16, Shift;
- return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
- && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
+ bool isImm0_7() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 8);
+ }
+ bool isImm1_8() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val > 0 && Val < 9);
}
+ bool isImm0_15() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 16);
+ }
+ bool isImm1_16() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val > 0 && Val < 17);
+ }
+ bool isImm0_31() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 32);
+ }
+ bool isImm1_31() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 32);
+ }
+ bool isImm1_32() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 33);
+ }
+ bool isImm0_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 64);
+ }
+ bool isImm1_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 64);
+ }
+ bool isImm1_64() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 1 && Val < 65);
+ }
+ bool isImm0_127() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 128);
+ }
+ bool isImm0_255() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 256);
+ }
+ bool isImm0_65535() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 0 && Val < 65536);
+ }
+ bool isImm32_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 32 && Val < 64);
+ }
+ bool isLogicalImm32() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ if (Val >> 32 != 0 && Val >> 32 != ~0LL)
+ return false;
+ Val &= 0xFFFFFFFF;
+ return AArch64_AM::isLogicalImmediate(Val, 32);
+ }
+ bool isLogicalImm64() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
+ }
+ bool isLogicalImm32Not() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
+ return AArch64_AM::isLogicalImmediate(Val, 32);
+ }
+ bool isLogicalImm64Not() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
+ }
+ bool isShiftedImm() const { return Kind == k_ShiftedImm; }
+ bool isAddSubImm() const {
+ if (!isShiftedImm() && !isImm())
+ return false;
- template<int MemSize>
- bool isOffsetUImm12() const {
- if (!isImm()) return false;
+ const MCExpr *Expr;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
+ if (isShiftedImm()) {
+ unsigned Shift = ShiftedImm.ShiftAmount;
+ Expr = ShiftedImm.Val;
+ if (Shift != 0 && Shift != 12)
+ return false;
+ } else {
+ Expr = getImm();
+ }
- // Assume they know what they're doing for now if they've given us a
- // non-constant expression. In principle we could check for ridiculous
- // things that can't possibly work or relocations that would almost
- // certainly break resulting code.
- if (!CE)
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
+ DarwinRefKind, Addend)) {
+ return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
+ || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
+ || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
+ || ELFRefKind == AArch64MCExpr::VK_LO12
+ || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
+ || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
+ || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
+ || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
+ || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
+ || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
+ || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
+ }
+
+ // Otherwise it should be a real immediate in range:
+ const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
+ return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ }
+ bool isCondCode() const { return Kind == k_CondCode; }
+ bool isSIMDImmType10() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
+ }
+ bool isBranchTarget26() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
return true;
+ int64_t Val = MCE->getValue();
+ if (Val & 0x3)
+ return false;
+ return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
+ }
+ bool isPCRelLabel19() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return true;
+ int64_t Val = MCE->getValue();
+ if (Val & 0x3)
+ return false;
+ return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
+ }
+ bool isBranchTarget14() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return true;
+ int64_t Val = MCE->getValue();
+ if (Val & 0x3)
+ return false;
+ return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
+ }
- int64_t Val = CE->getValue();
+ bool
+ isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
+ if (!isImm())
+ return false;
- // Must be a multiple of the access size in bytes.
- if ((Val & (MemSize - 1)) != 0) return false;
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
+ DarwinRefKind, Addend)) {
+ return false;
+ }
+ if (DarwinRefKind != MCSymbolRefExpr::VK_None)
+ return false;
- // Must be 12-bit unsigned
- return Val >= 0 && Val <= 0xfff * MemSize;
- }
+ for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
+ if (ELFRefKind == AllowedModifiers[i])
+ return Addend == 0;
+ }
- template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
- bool isShift() const {
- if (!isShiftOrExtend()) return false;
+ return false;
+ }
- if (ShiftExtend.ShiftType != SHKind)
- return false;
+ bool isMovZSymbolG3() const {
+ static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
+ return isMovWSymbol(Variants);
+ }
- return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
+ bool isMovZSymbolG2() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
+ AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
+ return isMovWSymbol(Variants);
}
- bool isMOVN32Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
+ bool isMovZSymbolG1() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
+ AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
+ AArch64MCExpr::VK_DTPREL_G1,
};
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(32, PermittedModifiers, NumModifiers);
- }
-
- bool isMOVN64Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_SABS_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G2,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(64, PermittedModifiers, NumModifiers);
+ return isMovWSymbol(Variants);
}
+ bool isMovZSymbolG0() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
+ AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
+ return isMovWSymbol(Variants);
+ }
- bool isMOVZ32Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0,
- AArch64MCExpr::VK_AARCH64_ABS_G1,
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(32, PermittedModifiers, NumModifiers);
- }
-
- bool isMOVZ64Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0,
- AArch64MCExpr::VK_AARCH64_ABS_G1,
- AArch64MCExpr::VK_AARCH64_ABS_G2,
- AArch64MCExpr::VK_AARCH64_ABS_G3,
- AArch64MCExpr::VK_AARCH64_SABS_G0,
- AArch64MCExpr::VK_AARCH64_SABS_G1,
- AArch64MCExpr::VK_AARCH64_SABS_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G2,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G2,
- AArch64MCExpr::VK_AARCH64_TPREL_G1,
- AArch64MCExpr::VK_AARCH64_TPREL_G0,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ bool isMovKSymbolG3() const {
+ static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
+ return isMovWSymbol(Variants);
+ }
- return isMoveWideImm(64, PermittedModifiers, NumModifiers);
+ bool isMovKSymbolG2() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G2_NC};
+ return isMovWSymbol(Variants);
}
- bool isMOVK32Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
- };
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(32, PermittedModifiers, NumModifiers);
- }
-
- bool isMOVK64Imm() const {
- static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
- AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
- AArch64MCExpr::VK_AARCH64_ABS_G3,
- AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
- AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
+ bool isMovKSymbolG1() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
+ AArch64MCExpr::VK_DTPREL_G1_NC
};
- const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
- return isMoveWideImm(64, PermittedModifiers, NumModifiers);
+ return isMovWSymbol(Variants);
}
- bool isMoveWideImm(unsigned RegWidth,
- const AArch64MCExpr::VariantKind *PermittedModifiers,
- unsigned NumModifiers) const {
- if (!isImmWithLSL()) return false;
+ bool isMovKSymbolG0() const {
+ static AArch64MCExpr::VariantKind Variants[] = {
+ AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
+ AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
+ };
+ return isMovWSymbol(Variants);
+ }
- if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
- if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
+ template<int RegWidth, int Shift>
+ bool isMOVZMovAlias() const {
+ if (!isImm()) return false;
- AArch64MCExpr::VariantKind Modifier;
- if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
- // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
- if (!ImmWithLSL.ImplicitAmount) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ uint64_t Value = CE->getValue();
- for (unsigned i = 0; i < NumModifiers; ++i)
- if (PermittedModifiers[i] == Modifier) return true;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+ // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
+ if (Value == 0 && Shift != 0)
return false;
- }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
- return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
+ return (Value & ~(0xffffULL << Shift)) == 0;
}
- template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
- bool isMoveWideMovAlias() const {
+ template<int RegWidth, int Shift>
+ bool isMOVNMovAlias() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
-
- int UImm16, Shift;
uint64_t Value = CE->getValue();
- // If this is a 32-bit instruction then all bits above 32 should be the
- // same: either of these is fine because signed/unsigned values should be
- // permitted.
- if (RegWidth == 32) {
- if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
+ // MOVZ takes precedence over MOVN.
+ for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
+ if ((Value & ~(0xffffULL << MOVZShift)) == 0)
return false;
+ Value = ~Value;
+ if (RegWidth == 32)
Value &= 0xffffffffULL;
- }
- return isValidImm(RegWidth, Value, UImm16, Shift);
+ return (Value & ~(0xffffULL << Shift)) == 0;
}
- bool isMSRWithReg() const {
+ bool isFPImm() const { return Kind == k_FPImm; }
+ bool isBarrier() const { return Kind == k_Barrier; }
+ bool isSysReg() const { return Kind == k_SysReg; }
+ bool isMRSSystemRegister() const {
if (!isSysReg()) return false;
bool IsKnownRegister;
- StringRef Name(SysReg.Data, SysReg.Length);
- A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
+ auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
+ Mapper.fromString(getSysReg(), IsKnownRegister);
return IsKnownRegister;
}
-
- bool isMSRPState() const {
+ bool isMSRSystemRegister() const {
if (!isSysReg()) return false;
bool IsKnownRegister;
- StringRef Name(SysReg.Data, SysReg.Length);
- A64PState::PStateMapper().fromString(Name, IsKnownRegister);
+ auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
+ Mapper.fromString(getSysReg(), IsKnownRegister);
return IsKnownRegister;
}
-
- bool isMRS() const {
+ bool isSystemPStateField() const {
if (!isSysReg()) return false;
- // First check against specific MSR-only (write-only) registers
bool IsKnownRegister;
- StringRef Name(SysReg.Data, SysReg.Length);
- A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
+ AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
return IsKnownRegister;
}
+ bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
+ bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
+ bool isVectorRegLo() const {
+ return Kind == k_Register && Reg.isVector &&
+ AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
+ Reg.RegNum);
+ }
+ bool isGPR32as64() const {
+ return Kind == k_Register && !Reg.isVector &&
+ AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
+ }
- bool isPRFM() const {
- if (!isImm()) return false;
+ bool isGPR64sp0() const {
+ return Kind == k_Register && !Reg.isVector &&
+ AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ /// Is this a vector list with the type implicit (presumably attached to the
+ /// instruction itself)?
+ template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
+ return Kind == k_VectorList && VectorList.Count == NumRegs &&
+ !VectorList.ElementKind;
+ }
- if (!CE)
+ template <unsigned NumRegs, unsigned NumElements, char ElementKind>
+ bool isTypedVectorList() const {
+ if (Kind != k_VectorList)
return false;
-
- return CE->getValue() >= 0 && CE->getValue() <= 31;
+ if (VectorList.Count != NumRegs)
+ return false;
+ if (VectorList.ElementKind != ElementKind)
+ return false;
+ return VectorList.NumElements == NumElements;
}
- template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
- if (!isShiftOrExtend()) return false;
-
- if (ShiftExtend.ShiftType != SHKind)
+ bool isVectorIndex1() const {
+ return Kind == k_VectorIndex && VectorIndex.Val == 1;
+ }
+ bool isVectorIndexB() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 16;
+ }
+ bool isVectorIndexH() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 8;
+ }
+ bool isVectorIndexS() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 4;
+ }
+ bool isVectorIndexD() const {
+ return Kind == k_VectorIndex && VectorIndex.Val < 2;
+ }
+ bool isToken() const override { return Kind == k_Token; }
+ bool isTokenEqual(StringRef Str) const {
+ return Kind == k_Token && getToken() == Str;
+ }
+ bool isSysCR() const { return Kind == k_SysCR; }
+ bool isPrefetch() const { return Kind == k_Prefetch; }
+ bool isShiftExtend() const { return Kind == k_ShiftExtend; }
+ bool isShifter() const {
+ if (!isShiftExtend())
return false;
- return ShiftExtend.Amount <= 4;
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
+ ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
+ ST == AArch64_AM::MSL);
}
-
- bool isRegExtendLSL() const {
- if (!isShiftOrExtend()) return false;
-
- if (ShiftExtend.ShiftType != A64SE::LSL)
+ bool isExtend() const {
+ if (!isShiftExtend())
return false;
- return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
+ ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
+ ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
+ ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
+ ET == AArch64_AM::LSL) &&
+ getShiftExtendAmount() <= 4;
}
- // if 0 < value <= w, return true
- bool isShrFixedWidth(int w) const {
- if (!isImm())
+ bool isExtend64() const {
+ if (!isExtend())
return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
+ // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
+ }
+ bool isExtendLSL64() const {
+ if (!isExtend())
return false;
- int64_t Value = CE->getValue();
- return Value > 0 && Value <= w;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
+ ET == AArch64_AM::LSL) &&
+ getShiftExtendAmount() <= 4;
}
- bool isShrImm8() const { return isShrFixedWidth(8); }
-
- bool isShrImm16() const { return isShrFixedWidth(16); }
-
- bool isShrImm32() const { return isShrFixedWidth(32); }
-
- bool isShrImm64() const { return isShrFixedWidth(64); }
-
- // if 0 <= value < w, return true
- bool isShlFixedWidth(int w) const {
- if (!isImm())
+ template<int Width> bool isMemXExtend() const {
+ if (!isExtend())
return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
+ (getShiftExtendAmount() == Log2_32(Width / 8) ||
+ getShiftExtendAmount() == 0);
+ }
+
+ template<int Width> bool isMemWExtend() const {
+ if (!isExtend())
return false;
- int64_t Value = CE->getValue();
- return Value >= 0 && Value < w;
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
+ (getShiftExtendAmount() == Log2_32(Width / 8) ||
+ getShiftExtendAmount() == 0);
}
- bool isShlImm8() const { return isShlFixedWidth(8); }
+ template <unsigned width>
+ bool isArithmeticShifter() const {
+ if (!isShifter())
+ return false;
- bool isShlImm16() const { return isShlFixedWidth(16); }
+ // An arithmetic shifter is LSL, LSR, or ASR.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
+ ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
+ }
- bool isShlImm32() const { return isShlFixedWidth(32); }
+ template <unsigned width>
+ bool isLogicalShifter() const {
+ if (!isShifter())
+ return false;
- bool isShlImm64() const { return isShlFixedWidth(64); }
+ // A logical shifter is LSL, LSR, ASR or ROR.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
+ ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
+ getShiftExtendAmount() < width;
+ }
- bool isNeonMovImmShiftLSL() const {
- if (!isShiftOrExtend())
+ bool isMovImm32Shifter() const {
+ if (!isShifter())
return false;
- if (ShiftExtend.ShiftType != A64SE::LSL)
+ // A MOVi shifter is LSL of 0, 16, 32, or 48.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ if (ST != AArch64_AM::LSL)
return false;
-
- // Valid shift amount is 0, 8, 16 and 24.
- return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
+ uint64_t Val = getShiftExtendAmount();
+ return (Val == 0 || Val == 16);
}
- bool isNeonMovImmShiftLSLH() const {
- if (!isShiftOrExtend())
+ bool isMovImm64Shifter() const {
+ if (!isShifter())
return false;
- if (ShiftExtend.ShiftType != A64SE::LSL)
+ // A MOVi shifter is LSL of 0 or 16.
+ AArch64_AM::ShiftExtendType ST = getShiftExtendType();
+ if (ST != AArch64_AM::LSL)
+ return false;
+ uint64_t Val = getShiftExtendAmount();
+ return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
+ }
+
+ bool isLogicalVecShifter() const {
+ if (!isShifter())
return false;
- // Valid shift amount is 0 and 8.
- return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
+ // A logical vector shifter is a left shift by 0, 8, 16, or 24.
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == AArch64_AM::LSL &&
+ (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
}
- bool isNeonMovImmShiftMSL() const {
- if (!isShiftOrExtend())
+ bool isLogicalVecHalfWordShifter() const {
+ if (!isLogicalVecShifter())
return false;
- if (ShiftExtend.ShiftType != A64SE::MSL)
+ // A logical vector shifter is a left shift by 0 or 8.
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == AArch64_AM::LSL &&
+ (Shift == 0 || Shift == 8);
+ }
+
+ bool isMoveVecShifter() const {
+ if (!isShiftExtend())
return false;
- // Valid shift amount is 8 and 16.
- return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
+ // A logical vector shifter is a left shift by 8 or 16.
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == AArch64_AM::MSL &&
+ (Shift == 8 || Shift == 16);
}
- template <A64Layout::VectorLayout Layout, unsigned Count>
- bool isVectorList() const {
- return Kind == k_VectorList && VectorList.Layout == Layout &&
- VectorList.Count == Count;
+ // Fallback unscaled operands are for aliases of LDR/STR that fall back
+ // to LDUR/STUR when the offset is not legal for the former but is for
+ // the latter. As such, in addition to checking for being a legal unscaled
+ // address, also check that it is not a legal scaled address. This avoids
+ // ambiguity in the matcher.
+ template<int Width>
+ bool isSImm9OffsetFB() const {
+ return isSImm9() && !isUImm12Offset<Width / 8>();
}
- template <int MemSize> bool isSImm7Scaled() const {
+ bool isAdrpLabel() const {
+ // Validation was handled during parsing, so we just sanity check that
+ // something didn't go haywire.
if (!isImm())
- return false;
+ return false;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Min = - (4096 * (1LL << (21 - 1)));
+ int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
+ return (Val % 4096) == 0 && Val >= Min && Val <= Max;
+ }
- int64_t Val = CE->getValue();
- if (Val % MemSize != 0) return false;
+ return true;
+ }
- Val /= MemSize;
+ bool isAdrLabel() const {
+ // Validation was handled during parsing, so we just sanity check that
+ // something didn't go haywire.
+ if (!isImm())
+ return false;
- return Val >= -64 && Val < 64;
- }
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Min = - (1LL << (21 - 1));
+ int64_t Max = ((1LL << (21 - 1)) - 1);
+ return Val >= Min && Val <= Max;
+ }
- template<int BitWidth>
- bool isSImm() const {
- if (!isImm()) return false;
+ return true;
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ // Add as immediates when possible. Null MCExpr = 0.
+ if (!Expr)
+ Inst.addOperand(MCOperand::CreateImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(Expr));
+ }
- return CE->getValue() >= -(1LL << (BitWidth - 1))
- && CE->getValue() < (1LL << (BitWidth - 1));
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
}
- template<int bitWidth>
- bool isUImm() const {
- if (!isImm()) return false;
+ void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(
+ AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ const MCRegisterInfo *RI = Ctx.getRegisterInfo();
+ uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
+ RI->getEncodingValue(getReg()));
- return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
}
- bool isUImm() const {
- if (!isImm()) return false;
-
- return isa<MCConstantExpr>(getImm());
+ void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(
+ AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
+ Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
}
- bool isNeonUImm64Mask() const {
- if (!isImm())
- return false;
+ void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(
+ AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
- return false;
+ void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
- uint64_t Value = CE->getValue();
+ template <unsigned NumRegs>
+ void addVectorList64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
+ AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
+ unsigned FirstReg = FirstRegs[NumRegs - 1];
- // i64 value with each byte being either 0x00 or 0xff.
- for (unsigned i = 0; i < 8; ++i, Value >>= 8)
- if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
- return false;
- return true;
+ Inst.addOperand(
+ MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
}
- // if value == N, return true
- template<int N>
- bool isExactImm() const {
- if (!isImm()) return false;
-
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE) return false;
+ template <unsigned NumRegs>
+ void addVectorList128Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
+ AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
+ unsigned FirstReg = FirstRegs[NumRegs - 1];
- return CE->getValue() == N;
+ Inst.addOperand(
+ MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
}
- static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
- unsigned ShiftAmount,
- bool ImplicitAmount,
- SMLoc S,SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
- Op->ImmWithLSL.Val = Val;
- Op->ImmWithLSL.ShiftAmount = ShiftAmount;
- Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
- return Op;
+ void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
- Op->CondCode.Code = Code;
- return Op;
+ void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateFPImm(double Val,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
- Op->FPImm.Val = Val;
- return Op;
+ void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
- Op->Imm.Val = Val;
- return Op;
+ void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
- Op->Reg.RegNum = RegNum;
- return Op;
+ void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
- static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
- Op->Reg.RegNum = RegNum;
- return Op;
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // If this is a pageoff symrefexpr with an addend, adjust the addend
+ // to be only the page-offset portion. Otherwise, just add the expr
+ // as-is.
+ addExpr(Inst, getImm());
}
- static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
- unsigned Amount,
- bool ImplicitAmount,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
- Op->ShiftExtend.ShiftType = ShiftTyp;
- Op->ShiftExtend.Amount = Amount;
- Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
- return Op;
+ void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ if (isShiftedImm()) {
+ addExpr(Inst, getShiftedImmVal());
+ Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
+ } else {
+ addExpr(Inst, getImm());
+ Inst.addOperand(MCOperand::CreateImm(0));
+ }
}
- static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
- AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
- Op->Tok.Data = Str.data();
- Op->Tok.Length = Str.size();
- return Op;
+ void addCondCodeOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getCondCode()));
}
- static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
- A64Layout::VectorLayout Layout,
- SMLoc S, SMLoc E) {
- AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
- Op->VectorList.RegNum = RegNum;
- Op->VectorList.Count = Count;
- Op->VectorList.Layout = Layout;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
+ void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ addExpr(Inst, getImm());
+ else
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
}
- static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
- AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
- Op->Tok.Data = Str.data();
- Op->Tok.Length = Str.size();
- return Op;
+ void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
+ addImmOperands(Inst, N);
}
+ template<int Scale>
+ void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
- void addExpr(MCInst &Inst, const MCExpr *Expr) const {
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(Expr));
+ if (!MCE) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ return;
+ }
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
}
- template<unsigned RegWidth>
- void addBFILSBOperands(MCInst &Inst, unsigned N) const {
+ void addSImm9Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
- Inst.addOperand(MCOperand::CreateImm(EncodedVal));
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
+ void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
}
- void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
+ void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
+ }
- uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
+ }
- Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
+ void addImm0_7Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addCondCodeOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getCondCode()));
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_15Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
+ void addImm1_16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_31Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- APFloat RealVal(FPImm.Val);
- uint32_t ImmVal;
- A64Imms::isFPImm(RealVal, ImmVal);
+ void addImm1_31Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- Inst.addOperand(MCOperand::CreateImm(ImmVal));
+ void addImm1_32Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addFPZeroOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands");
- Inst.addOperand(MCOperand::CreateImm(0));
+ void addImm0_63Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_63Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- unsigned Encoded = A64InvertCondCode(getCondCode());
- Inst.addOperand(MCOperand::CreateImm(Encoded));
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addRegOperands(MCInst &Inst, unsigned N) const {
+ void addImm1_64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- void addImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_127Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- template<int MemSize>
- void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_255Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- uint64_t Val = CE->getValue() / MemSize;
- Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
+ void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
- template<int BitWidth>
- void addSImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm32_63Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- uint64_t Val = CE->getValue();
- Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
+ void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ uint64_t encoding =
+ AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
+ Inst.addOperand(MCOperand::CreateImm(encoding));
}
- void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
- assert (N == 1 && "Invalid number of operands!");
+ void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
+ Inst.addOperand(MCOperand::CreateImm(encoding));
+ }
- addExpr(Inst, ImmWithLSL.Val);
+ void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
+ uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
+ Inst.addOperand(MCOperand::CreateImm(encoding));
}
- template<unsigned field_width, unsigned scale>
- void addLabelOperands(MCInst &Inst, unsigned N) const {
+ void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ uint64_t encoding =
+ AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
+ Inst.addOperand(MCOperand::CreateImm(encoding));
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
+ void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
+ uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
+ Inst.addOperand(MCOperand::CreateImm(encoding));
+ }
- if (!CE) {
- addExpr(Inst, Imm.Val);
+ void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
+ // Branch operands don't encode the low bits, so shift them off
+ // here. If it's a label, however, just put it on directly as there's
+ // not enough information now to do anything.
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE) {
+ addExpr(Inst, getImm());
return;
}
-
- int64_t Val = CE->getValue();
- assert(Val % scale == 0 && "Unaligned immediate in instruction");
- Val /= scale;
-
- Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
}
- template<int MemSize>
- void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
+ void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
+ // Branch operands don't encode the low bits, so shift them off
+ // here. If it's a label, however, just put it on directly as there's
+ // not enough information now to do anything.
assert(N == 1 && "Invalid number of operands!");
-
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
- Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
- } else {
- Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE) {
+ addExpr(Inst, getImm());
+ return;
}
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
}
- template<unsigned RegWidth>
- void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands");
- const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
+ void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
+ // Branch operands don't encode the low bits, so shift them off
+ // here. If it's a label, however, just put it on directly as there's
+ // not enough information now to do anything.
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE) {
+ addExpr(Inst, getImm());
+ return;
+ }
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
+ }
- uint32_t Bits;
- A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
+ void addFPImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getFPImm()));
+ }
- Inst.addOperand(MCOperand::CreateImm(Bits));
+ void addBarrierOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getBarrier()));
}
- void addMRSOperands(MCInst &Inst, unsigned N) const {
+ void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
bool Valid;
- StringRef Name(SysReg.Data, SysReg.Length);
- uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
+ auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
+ uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
Inst.addOperand(MCOperand::CreateImm(Bits));
}
- void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
+ void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
bool Valid;
- StringRef Name(SysReg.Data, SysReg.Length);
- uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
+ auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
+ uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
Inst.addOperand(MCOperand::CreateImm(Bits));
}
- void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
+ void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
bool Valid;
- StringRef Name(SysReg.Data, SysReg.Length);
- uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
+ uint32_t Bits =
+ AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
Inst.addOperand(MCOperand::CreateImm(Bits));
}
- void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && "Invalid number of operands!");
-
- addExpr(Inst, ImmWithLSL.Val);
-
- AArch64MCExpr::VariantKind Variant;
- if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
- Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
- return;
- }
-
- // We know it's relocated
- switch (Variant) {
- case AArch64MCExpr::VK_AARCH64_ABS_G0:
- case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
- case AArch64MCExpr::VK_AARCH64_SABS_G0:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
- case AArch64MCExpr::VK_AARCH64_TPREL_G0:
- case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
- Inst.addOperand(MCOperand::CreateImm(0));
- break;
- case AArch64MCExpr::VK_AARCH64_ABS_G1:
- case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
- case AArch64MCExpr::VK_AARCH64_SABS_G1:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
- Inst.addOperand(MCOperand::CreateImm(1));
- break;
- case AArch64MCExpr::VK_AARCH64_ABS_G2:
- case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
- case AArch64MCExpr::VK_AARCH64_SABS_G2:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
- case AArch64MCExpr::VK_AARCH64_TPREL_G2:
- Inst.addOperand(MCOperand::CreateImm(2));
- break;
- case AArch64MCExpr::VK_AARCH64_ABS_G3:
- Inst.addOperand(MCOperand::CreateImm(3));
- break;
- default: llvm_unreachable("Inappropriate move wide relocation");
- }
+ void addSysCROperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getSysCR()));
}
- template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
- void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && "Invalid number of operands!");
- int UImm16, Shift;
-
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- uint64_t Value = CE->getValue();
-
- if (RegWidth == 32) {
- Value &= 0xffffffffULL;
- }
+ void addPrefetchOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
+ }
- bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
- (void)Valid;
- assert(Valid && "Invalid immediates should have been weeded out by now");
+ void addShifterOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ unsigned Imm =
+ AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
- Inst.addOperand(MCOperand::CreateImm(UImm16));
- Inst.addOperand(MCOperand::CreateImm(Shift));
+ void addExtendOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
+ unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
}
- void addPRFMOperands(MCInst &Inst, unsigned N) const {
+ void addExtend64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
+ unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
- const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
- assert(CE->getValue() >= 0 && CE->getValue() <= 31
- && "PRFM operand should be 5-bits");
+ void addMemExtendOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
+ Inst.addOperand(MCOperand::CreateImm(IsSigned));
+ Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
+ }
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ // For 8-bit load/store instructions with a register offset, both the
+ // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
+ // they're disambiguated by whether the shift was explicit or implicit rather
+ // than its size.
+ void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ AArch64_AM::ShiftExtendType ET = getShiftExtendType();
+ bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
+ Inst.addOperand(MCOperand::CreateImm(IsSigned));
+ Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
}
- // For Add-sub (extended register) operands.
- void addRegExtendOperands(MCInst &Inst, unsigned N) const {
+ template<int Shift>
+ void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
+ const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
}
- // For Vector Immediates shifted imm operands.
- void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
+ template<int Shift>
+ void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
- llvm_unreachable("Invalid shift amount for vector immediate inst.");
-
- // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
- int64_t Imm = ShiftExtend.Amount / 8;
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
}
- void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
-
- if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
- llvm_unreachable("Invalid shift amount for vector immediate inst.");
+ void print(raw_ostream &OS) const override;
- // Encode LSLH shift amount 0, 8 as 0, 1.
- int64_t Imm = ShiftExtend.Amount / 8;
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ static std::unique_ptr<AArch64Operand>
+ CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
+ Op->Tok.Data = Str.data();
+ Op->Tok.Length = Str.size();
+ Op->Tok.IsSuffix = IsSuffix;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
-
- if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
- llvm_unreachable("Invalid shift amount for vector immediate inst.");
+ static std::unique_ptr<AArch64Operand>
+ CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
+ Op->Reg.RegNum = RegNum;
+ Op->Reg.isVector = isVector;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- // Encode MSL shift amount 8, 16 as 0, 1.
- int64_t Imm = ShiftExtend.Amount / 8 - 1;
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ static std::unique_ptr<AArch64Operand>
+ CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
+ char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.NumElements = NumElements;
+ Op->VectorList.ElementKind = ElementKind;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
- // For the extend in load-store (register offset) instructions.
- template<unsigned MemSize>
- void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
- addAddrRegExtendOperands(Inst, N, MemSize);
+ static std::unique_ptr<AArch64Operand>
+ CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
+ Op->VectorIndex.Val = Idx;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
- void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
- unsigned MemSize) const {
- assert(N == 1 && "Invalid number of operands!");
+ static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
+ SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- // First bit of Option is set in instruction classes, the high two bits are
- // as follows:
- unsigned OptionHi = 0;
- switch (ShiftExtend.ShiftType) {
- case A64SE::UXTW:
- case A64SE::LSL:
- OptionHi = 1;
- break;
- case A64SE::SXTW:
- case A64SE::SXTX:
- OptionHi = 3;
- break;
- default:
- llvm_unreachable("Invalid extend type for register offset");
- }
+ static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
+ unsigned ShiftAmount,
+ SMLoc S, SMLoc E,
+ MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
+ Op->ShiftedImm .Val = Val;
+ Op->ShiftedImm.ShiftAmount = ShiftAmount;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- unsigned S = 0;
- if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
- S = 1;
- else if (MemSize != 1 && ShiftExtend.Amount != 0)
- S = 1;
+ static std::unique_ptr<AArch64Operand>
+ CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
+ Op->CondCode.Code = Code;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
+ static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
+ MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
+ Op->FPImm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addShiftOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
+ static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
+ MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
+ Op->Barrier.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
+ static std::unique_ptr<AArch64Operand>
+ CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
+ Op->SysReg.Data = Str.data();
+ Op->SysReg.Length = Str.size();
+ Op->SysReg.FeatureBits = FeatureBits;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
- // A bit from each byte in the constant forms the encoded immediate
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- uint64_t Value = CE->getValue();
+ static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
+ SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
+ Op->SysCRImm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
- unsigned Imm = 0;
- for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
- Imm |= (Value & 1) << i;
- }
- Inst.addOperand(MCOperand::CreateImm(Imm));
+ static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
+ MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
+ Op->Prefetch.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- void addVectorListOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
+ static std::unique_ptr<AArch64Operand>
+ CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
+ bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
+ Op->ShiftExtend.Type = ShOp;
+ Op->ShiftExtend.Amount = Val;
+ Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
};
} // end anonymous namespace.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic) {
+void AArch64Operand::print(raw_ostream &OS) const {
+ switch (Kind) {
+ case k_FPImm:
+ OS << "<fpimm " << getFPImm() << "("
+ << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
+ break;
+ case k_Barrier: {
+ bool Valid;
+ StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
+ if (Valid)
+ OS << "<barrier " << Name << ">";
+ else
+ OS << "<barrier invalid #" << getBarrier() << ">";
+ break;
+ }
+ case k_Immediate:
+ getImm()->print(OS);
+ break;
+ case k_ShiftedImm: {
+ unsigned Shift = getShiftedImmShift();
+ OS << "<shiftedimm ";
+ getShiftedImmVal()->print(OS);
+ OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
+ break;
+ }
+ case k_CondCode:
+ OS << "<condcode " << getCondCode() << ">";
+ break;
+ case k_Register:
+ OS << "<register " << getReg() << ">";
+ break;
+ case k_VectorList: {
+ OS << "<vectorlist ";
+ unsigned Reg = getVectorListStart();
+ for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
+ OS << Reg + i << " ";
+ OS << ">";
+ break;
+ }
+ case k_VectorIndex:
+ OS << "<vectorindex " << getVectorIndex() << ">";
+ break;
+ case k_SysReg:
+ OS << "<sysreg: " << getSysReg() << '>';
+ break;
+ case k_Token:
+ OS << "'" << getToken() << "'";
+ break;
+ case k_SysCR:
+ OS << "c" << getSysCR();
+ break;
+ case k_Prefetch: {
+ bool Valid;
+ StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
+ if (Valid)
+ OS << "<prfop " << Name << ">";
+ else
+ OS << "<prfop invalid #" << getPrefetch() << ">";
+ break;
+ }
+ case k_ShiftExtend: {
+ OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
+ << getShiftExtendAmount();
+ if (!hasShiftExtendAmount())
+ OS << "<imp>";
+ OS << '>';
+ break;
+ }
+ }
+}
- // See if the operand has a custom parser
- OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+/// @name Auto-generated Match Functions
+/// {
- // It could either succeed, fail or just not care.
- if (ResTy != MatchOperand_NoMatch)
- return ResTy;
+static unsigned MatchRegisterName(StringRef Name);
- switch (getLexer().getKind()) {
- default:
- Error(Parser.getTok().getLoc(), "unexpected token in operand");
- return MatchOperand_ParseFail;
- case AsmToken::Identifier: {
- // It might be in the LSL/UXTB family ...
- OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
+/// }
+
+static unsigned matchVectorRegName(StringRef Name) {
+ return StringSwitch<unsigned>(Name)
+ .Case("v0", AArch64::Q0)
+ .Case("v1", AArch64::Q1)
+ .Case("v2", AArch64::Q2)
+ .Case("v3", AArch64::Q3)
+ .Case("v4", AArch64::Q4)
+ .Case("v5", AArch64::Q5)
+ .Case("v6", AArch64::Q6)
+ .Case("v7", AArch64::Q7)
+ .Case("v8", AArch64::Q8)
+ .Case("v9", AArch64::Q9)
+ .Case("v10", AArch64::Q10)
+ .Case("v11", AArch64::Q11)
+ .Case("v12", AArch64::Q12)
+ .Case("v13", AArch64::Q13)
+ .Case("v14", AArch64::Q14)
+ .Case("v15", AArch64::Q15)
+ .Case("v16", AArch64::Q16)
+ .Case("v17", AArch64::Q17)
+ .Case("v18", AArch64::Q18)
+ .Case("v19", AArch64::Q19)
+ .Case("v20", AArch64::Q20)
+ .Case("v21", AArch64::Q21)
+ .Case("v22", AArch64::Q22)
+ .Case("v23", AArch64::Q23)
+ .Case("v24", AArch64::Q24)
+ .Case("v25", AArch64::Q25)
+ .Case("v26", AArch64::Q26)
+ .Case("v27", AArch64::Q27)
+ .Case("v28", AArch64::Q28)
+ .Case("v29", AArch64::Q29)
+ .Case("v30", AArch64::Q30)
+ .Case("v31", AArch64::Q31)
+ .Default(0);
+}
- // We can only continue if no tokens were eaten.
- if (GotShift != MatchOperand_NoMatch)
- return GotShift;
+static bool isValidVectorKind(StringRef Name) {
+ return StringSwitch<bool>(Name.lower())
+ .Case(".8b", true)
+ .Case(".16b", true)
+ .Case(".4h", true)
+ .Case(".8h", true)
+ .Case(".2s", true)
+ .Case(".4s", true)
+ .Case(".1d", true)
+ .Case(".2d", true)
+ .Case(".1q", true)
+ // Accept the width neutral ones, too, for verbose syntax. If those
+ // aren't used in the right places, the token operand won't match so
+ // all will work out.
+ .Case(".b", true)
+ .Case(".h", true)
+ .Case(".s", true)
+ .Case(".d", true)
+ .Default(false);
+}
- // ... or it might be a register ...
- uint32_t NumLanes = 0;
- OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
- assert(GotReg != MatchOperand_ParseFail
- && "register parsing shouldn't partially succeed");
-
- if (GotReg == MatchOperand_Success) {
- if (Parser.getTok().is(AsmToken::LBrac))
- return ParseNEONLane(Operands, NumLanes);
- else
- return MatchOperand_Success;
- }
- // ... or it might be a symbolish thing
- }
- // Fall through
- case AsmToken::LParen: // E.g. (strcmp-4)
- case AsmToken::Integer: // 1f, 2b labels
- case AsmToken::String: // quoted labels
- case AsmToken::Dot: // . is Current location
- case AsmToken::Dollar: // $ is PC
- case AsmToken::Colon: {
- SMLoc StartLoc = Parser.getTok().getLoc();
- SMLoc EndLoc;
- const MCExpr *ImmVal = 0;
-
- if (ParseImmediate(ImmVal) != MatchOperand_Success)
- return MatchOperand_ParseFail;
+static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
+ char &ElementKind) {
+ assert(isValidVectorKind(Name));
- EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
- return MatchOperand_Success;
- }
- case AsmToken::Hash: { // Immediates
- SMLoc StartLoc = Parser.getTok().getLoc();
- SMLoc EndLoc;
- const MCExpr *ImmVal = 0;
- Parser.Lex();
+ ElementKind = Name.lower()[Name.size() - 1];
+ NumElements = 0;
- if (ParseImmediate(ImmVal) != MatchOperand_Success)
- return MatchOperand_ParseFail;
+ if (Name.size() == 2)
+ return;
- EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
- return MatchOperand_Success;
+ // Parse the lane count
+ Name = Name.drop_front();
+ while (isdigit(Name.front())) {
+ NumElements = 10 * NumElements + (Name.front() - '0');
+ Name = Name.drop_front();
}
- case AsmToken::LBrac: {
- SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateToken("[", Loc));
- Parser.Lex(); // Eat '['
+}
- // There's no comma after a '[', so we can parse the next operand
- // immediately.
- return ParseOperand(Operands, Mnemonic);
- }
- // The following will likely be useful later, but not in very early cases
- case AsmToken::LCurly: // SIMD vector list is not parsed here
- llvm_unreachable("Don't know how to deal with '{' in operand");
- return MatchOperand_ParseFail;
+bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+ StartLoc = getLoc();
+ RegNo = tryParseRegister();
+ EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ return (RegNo == (unsigned)-1);
+}
+
+// Matches a register name or register alias previously defined by '.req'
+unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
+ bool isVector) {
+ unsigned RegNum = isVector ? matchVectorRegName(Name)
+ : MatchRegisterName(Name);
+
+ if (RegNum == 0) {
+ // Check for aliases registered via .req. Canonicalize to lower case.
+ // That's more consistent since register names are case insensitive, and
+ // it's how the original entry was passed in from MC/MCParser/AsmParser.
+ auto Entry = RegisterReqs.find(Name.lower());
+ if (Entry == RegisterReqs.end())
+ return 0;
+ // set RegNum if the match is the right kind of register
+ if (isVector == Entry->getValue().first)
+ RegNum = Entry->getValue().second;
+ }
+ return RegNum;
+}
+
+/// tryParseRegister - Try to parse a register name. The token must be an
+/// Identifier when called, and if it is a register name the token is eaten and
+/// the register is added to the operand list.
+int AArch64AsmParser::tryParseRegister() {
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+
+ std::string lowerCase = Tok.getString().lower();
+ unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
+ // Also handle a few aliases of registers.
+ if (RegNum == 0)
+ RegNum = StringSwitch<unsigned>(lowerCase)
+ .Case("fp", AArch64::FP)
+ .Case("lr", AArch64::LR)
+ .Case("x31", AArch64::XZR)
+ .Case("w31", AArch64::WZR)
+ .Default(0);
+
+ if (RegNum == 0)
+ return -1;
+
+ Parser.Lex(); // Eat identifier token.
+ return RegNum;
+}
+
+/// tryMatchVectorRegister - Try to parse a vector register name with optional
+/// kind specifier. If it is a register specifier, eat the token and return it.
+int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ TokError("vector register expected");
+ return -1;
+ }
+
+ StringRef Name = Parser.getTok().getString();
+ // If there is a kind specifier, it's separated from the register name by
+ // a '.'.
+ size_t Start = 0, Next = Name.find('.');
+ StringRef Head = Name.slice(Start, Next);
+ unsigned RegNum = matchRegisterNameAlias(Head, true);
+
+ if (RegNum) {
+ if (Next != StringRef::npos) {
+ Kind = Name.slice(Next, StringRef::npos);
+ if (!isValidVectorKind(Kind)) {
+ TokError("invalid vector kind qualifier");
+ return -1;
+ }
+ }
+ Parser.Lex(); // Eat the register token.
+ return RegNum;
}
+
+ if (expected)
+ TokError("vector register expected");
+ return -1;
}
+/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
- if (getLexer().is(AsmToken::Colon)) {
- AArch64MCExpr::VariantKind RefKind;
+AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
+ SMLoc S = getLoc();
- OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
- if (ResTy != MatchOperand_Success)
- return ResTy;
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
- const MCExpr *SubExprVal;
- if (getParser().parseExpression(SubExprVal))
- return MatchOperand_ParseFail;
+ StringRef Tok = Parser.getTok().getIdentifier();
+ if (Tok[0] != 'c' && Tok[0] != 'C') {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
- ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
- return MatchOperand_Success;
+ uint32_t CRNum;
+ bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
+ if (BadNum || CRNum > 15) {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
}
- // No weird AArch64MCExpr prefix
- return getParser().parseExpression(ExprVal)
- ? MatchOperand_ParseFail : MatchOperand_Success;
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(
+ AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
+ return MatchOperand_Success;
}
-// A lane attached to a NEON register. "[N]", which should yield three tokens:
-// '[', N, ']'. A hash is not allowed to precede the immediate here.
+/// tryParsePrefetch - Try to parse a prefetch operand.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t NumLanes) {
- SMLoc Loc = Parser.getTok().getLoc();
+AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ // Either an identifier for named values or a 5-bit immediate.
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
+ if (Hash)
+ Parser.Lex(); // Eat hash token.
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
+
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("immediate value expected for prefetch operand");
+ return MatchOperand_ParseFail;
+ }
+ unsigned prfop = MCE->getValue();
+ if (prfop > 31) {
+ TokError("prefetch operand out of range, [0,31] expected");
+ return MatchOperand_ParseFail;
+ }
- assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
- Operands.push_back(AArch64Operand::CreateToken("[", Loc));
- Parser.Lex(); // Eat '['
+ Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
+ return MatchOperand_Success;
+ }
- if (Parser.getTok().isNot(AsmToken::Integer)) {
- Error(Parser.getTok().getLoc(), "expected lane number");
+ if (Tok.isNot(AsmToken::Identifier)) {
+ TokError("pre-fetch hint expected");
return MatchOperand_ParseFail;
}
- if (Parser.getTok().getIntVal() >= NumLanes) {
- Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
+ bool Valid;
+ unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
+ if (!Valid) {
+ TokError("pre-fetch hint expected");
return MatchOperand_ParseFail;
}
- const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
- getContext());
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat actual lane
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
+ return MatchOperand_Success;
+}
+
+/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
+/// instruction.
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const MCExpr *Expr;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat hash token.
+ }
- if (Parser.getTok().isNot(AsmToken::RBrac)) {
- Error(Parser.getTok().getLoc(), "expected ']' after lane");
+ if (parseSymbolicImmVal(Expr))
return MatchOperand_ParseFail;
+
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
+ ELFRefKind == AArch64MCExpr::VK_INVALID) {
+ // No modifier was specified at all; this is the syntax for an ELF basic
+ // ADRP relocation (unfortunately).
+ Expr =
+ AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
+ } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
+ Addend != 0) {
+ Error(S, "gotpage label reference not allowed an addend");
+ return MatchOperand_ParseFail;
+ } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
+ DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
+ DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
+ ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
+ ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
+ ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
+ // The operand must be an @page or @gotpage qualified symbolref.
+ Error(S, "page or gotpage label reference expected");
+ return MatchOperand_ParseFail;
+ }
}
- Operands.push_back(AArch64Operand::CreateToken("]", Loc));
- Parser.Lex(); // Eat ']'
+ // We have either a label reference possibly with addend or an immediate. The
+ // addend is a raw value here. The linker will adjust it to only reference the
+ // page.
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
return MatchOperand_Success;
}
+/// tryParseAdrLabel - Parse and validate a source label for the ADR
+/// instruction.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
- assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
- Parser.Lex();
+AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ const MCExpr *Expr;
- if (getLexer().isNot(AsmToken::Identifier)) {
- Error(Parser.getTok().getLoc(),
- "expected relocation specifier in operand after ':'");
- return MatchOperand_ParseFail;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat hash token.
}
- std::string LowerCase = Parser.getTok().getIdentifier().lower();
- RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
- .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
- .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
- .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
- .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
- .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
- .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
- .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
- .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
- .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
- .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
- .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
- .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
- .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
- .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
- .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
- .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
- .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
- .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
- .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
- .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
- .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
- .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
- .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
- .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
- .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
- .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
- .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
- .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
- .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
- .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
- .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
- .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
- .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
- .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
- .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
- .Default(AArch64MCExpr::VK_AARCH64_None);
-
- if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
- Error(Parser.getTok().getLoc(),
- "expected relocation specifier in operand after ':'");
+ if (getParser().parseExpression(Expr))
return MatchOperand_ParseFail;
+
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
+
+ return MatchOperand_Success;
+}
+
+/// tryParseFPImm - A floating point immediate expression operand.
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
+ SMLoc S = getLoc();
+
+ bool Hash = false;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat '#'
+ Hash = true;
}
- Parser.Lex(); // Eat identifier
- if (getLexer().isNot(AsmToken::Colon)) {
- Error(Parser.getTok().getLoc(),
- "expected ':' after relocation specifier");
- return MatchOperand_ParseFail;
+ // Handle negation, as that still comes through as a separate token.
+ bool isNegative = false;
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ isNegative = true;
+ Parser.Lex();
}
- Parser.Lex();
- return MatchOperand_Success;
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Real)) {
+ APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ // If we had a '-' in front, toggle the sign bit.
+ IntVal ^= (uint64_t)isNegative << 63;
+ int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
+ Parser.Lex(); // Eat the token.
+ // Check for out of range values. As an exception, we let Zero through,
+ // as we handle that special case in post-processing before matching in
+ // order to use the zero register for it.
+ if (Val == -1 && !RealVal.isZero()) {
+ TokError("expected compatible register or floating-point constant");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
+ return MatchOperand_Success;
+ }
+ if (Tok.is(AsmToken::Integer)) {
+ int64_t Val;
+ if (!isNegative && Tok.getString().startswith("0x")) {
+ Val = Tok.getIntVal();
+ if (Val > 255 || Val < 0) {
+ TokError("encoded floating point value out of range");
+ return MatchOperand_ParseFail;
+ }
+ } else {
+ APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ // If we had a '-' in front, toggle the sign bit.
+ IntVal ^= (uint64_t)isNegative << 63;
+ Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
+ }
+ Parser.Lex(); // Eat the token.
+ Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
+ return MatchOperand_Success;
+ }
+
+ if (!Hash)
+ return MatchOperand_NoMatch;
+
+ TokError("invalid floating point immediate");
+ return MatchOperand_ParseFail;
}
+/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseImmWithLSLOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // FIXME?: I want to live in a world where immediates must start with
- // #. Please don't dash my hopes (well, do if you have a good reason).
- if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
+AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
+ SMLoc S = getLoc();
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat '#'
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat '#'
+ else if (Parser.getTok().isNot(AsmToken::Integer))
+ // Operand should start from # or should be integer, emit error otherwise.
+ return MatchOperand_NoMatch;
const MCExpr *Imm;
- if (ParseImmediate(Imm) != MatchOperand_Success)
+ if (parseSymbolicImmVal(Imm))
return MatchOperand_ParseFail;
else if (Parser.getTok().isNot(AsmToken::Comma)) {
+ uint64_t ShiftAmount = 0;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
+ if (MCE) {
+ int64_t Val = MCE->getValue();
+ if (Val > 0xfff && (Val & 0xfff) == 0) {
+ Imm = MCConstantExpr::Create(Val >> 12, getContext());
+ ShiftAmount = 12;
+ }
+ }
SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
+ Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
+ getContext()));
return MatchOperand_Success;
}
@@ -1490,18 +2169,22 @@ AArch64AsmParser::ParseImmWithLSLOperand(
Parser.Lex();
// The optional operand must be "lsl #N" where N is non-negative.
- if (Parser.getTok().is(AsmToken::Identifier)
- && Parser.getTok().getIdentifier().equals_lower("lsl")) {
- Parser.Lex();
+ if (!Parser.getTok().is(AsmToken::Identifier) ||
+ !Parser.getTok().getIdentifier().equals_lower("lsl")) {
+ Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
+ return MatchOperand_ParseFail;
+ }
- if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex();
+ // Eat 'lsl'
+ Parser.Lex();
- if (Parser.getTok().isNot(AsmToken::Integer)) {
- Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
- return MatchOperand_ParseFail;
- }
- }
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex();
+ }
+
+ if (Parser.getTok().isNot(AsmToken::Integer)) {
+ Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
+ return MatchOperand_ParseFail;
}
int64_t ShiftAmount = Parser.getTok().getIntVal();
@@ -1513,729 +2196,1036 @@ AArch64AsmParser::ParseImmWithLSLOperand(
Parser.Lex(); // Eat the number
SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
- false, S, E));
+ Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
+ S, E, getContext()));
return MatchOperand_Success;
}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseCondCodeOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- if (Parser.getTok().isNot(AsmToken::Identifier))
- return MatchOperand_NoMatch;
-
- StringRef Tok = Parser.getTok().getIdentifier();
- A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
-
- if (CondCode == A64CC::Invalid)
- return MatchOperand_NoMatch;
-
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat condition code
- SMLoc E = Parser.getTok().getLoc();
-
- Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
- return MatchOperand_Success;
+/// parseCondCodeString - Parse a Condition Code string.
+AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
+ AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
+ .Case("eq", AArch64CC::EQ)
+ .Case("ne", AArch64CC::NE)
+ .Case("cs", AArch64CC::HS)
+ .Case("hs", AArch64CC::HS)
+ .Case("cc", AArch64CC::LO)
+ .Case("lo", AArch64CC::LO)
+ .Case("mi", AArch64CC::MI)
+ .Case("pl", AArch64CC::PL)
+ .Case("vs", AArch64CC::VS)
+ .Case("vc", AArch64CC::VC)
+ .Case("hi", AArch64CC::HI)
+ .Case("ls", AArch64CC::LS)
+ .Case("ge", AArch64CC::GE)
+ .Case("lt", AArch64CC::LT)
+ .Case("gt", AArch64CC::GT)
+ .Case("le", AArch64CC::LE)
+ .Case("al", AArch64CC::AL)
+ .Case("nv", AArch64CC::NV)
+ .Default(AArch64CC::Invalid);
+ return CC;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseCRxOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- SMLoc S = Parser.getTok().getLoc();
- if (Parser.getTok().isNot(AsmToken::Identifier)) {
- Error(S, "Expected cN operand where 0 <= N <= 15");
- return MatchOperand_ParseFail;
- }
+/// parseCondCode - Parse a Condition Code operand.
+bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
+ bool invertCondCode) {
+ SMLoc S = getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
- StringRef Tok = Parser.getTok().getIdentifier();
- if (Tok[0] != 'c' && Tok[0] != 'C') {
- Error(S, "Expected cN operand where 0 <= N <= 15");
- return MatchOperand_ParseFail;
- }
+ StringRef Cond = Tok.getString();
+ AArch64CC::CondCode CC = parseCondCodeString(Cond);
+ if (CC == AArch64CC::Invalid)
+ return TokError("invalid condition code");
+ Parser.Lex(); // Eat identifier token.
- uint32_t CRNum;
- bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
- if (BadNum || CRNum > 15) {
- Error(S, "Expected cN operand where 0 <= N <= 15");
- return MatchOperand_ParseFail;
+ if (invertCondCode) {
+ if (CC == AArch64CC::AL || CC == AArch64CC::NV)
+ return TokError("condition codes AL and NV are invalid for this instruction");
+ CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
}
- const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
-
- Parser.Lex();
- SMLoc E = Parser.getTok().getLoc();
-
- Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
- return MatchOperand_Success;
+ Operands.push_back(
+ AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
+ return false;
}
+/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
+/// them if present.
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseFPImmOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
+ const AsmToken &Tok = Parser.getTok();
+ std::string LowerID = Tok.getString().lower();
+ AArch64_AM::ShiftExtendType ShOp =
+ StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
+ .Case("lsl", AArch64_AM::LSL)
+ .Case("lsr", AArch64_AM::LSR)
+ .Case("asr", AArch64_AM::ASR)
+ .Case("ror", AArch64_AM::ROR)
+ .Case("msl", AArch64_AM::MSL)
+ .Case("uxtb", AArch64_AM::UXTB)
+ .Case("uxth", AArch64_AM::UXTH)
+ .Case("uxtw", AArch64_AM::UXTW)
+ .Case("uxtx", AArch64_AM::UXTX)
+ .Case("sxtb", AArch64_AM::SXTB)
+ .Case("sxth", AArch64_AM::SXTH)
+ .Case("sxtw", AArch64_AM::SXTW)
+ .Case("sxtx", AArch64_AM::SXTX)
+ .Default(AArch64_AM::InvalidShiftExtend);
+
+ if (ShOp == AArch64_AM::InvalidShiftExtend)
+ return MatchOperand_NoMatch;
- // FIXME?: I want to live in a world where immediates must start with
- // #. Please don't dash my hopes (well, do if you have a good reason).
- if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
+ SMLoc S = Tok.getLoc();
+ Parser.Lex();
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat '#'
+ bool Hash = getLexer().is(AsmToken::Hash);
+ if (!Hash && getLexer().isNot(AsmToken::Integer)) {
+ if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
+ ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
+ ShOp == AArch64_AM::MSL) {
+ // We expect a number here.
+ TokError("expected #imm after shift specifier");
+ return MatchOperand_ParseFail;
+ }
- bool Negative = false;
- if (Parser.getTok().is(AsmToken::Minus)) {
- Negative = true;
- Parser.Lex(); // Eat '-'
- } else if (Parser.getTok().is(AsmToken::Plus)) {
- Parser.Lex(); // Eat '+'
+ // "extend" type operatoins don't need an immediate, #0 is implicit.
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(
+ AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
+ return MatchOperand_Success;
}
- if (Parser.getTok().isNot(AsmToken::Real)) {
- Error(S, "Expected floating-point immediate");
+ if (Hash)
+ Parser.Lex(); // Eat the '#'.
+
+ // Make sure we do actually have a number
+ if (!Parser.getTok().is(AsmToken::Integer)) {
+ Error(Parser.getTok().getLoc(),
+ "expected integer shift amount");
return MatchOperand_ParseFail;
}
- APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
- if (Negative) RealVal.changeSign();
- double DblVal = RealVal.convertToDouble();
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
- Parser.Lex(); // Eat real number
- SMLoc E = Parser.getTok().getLoc();
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("expected #imm after shift specifier");
+ return MatchOperand_ParseFail;
+ }
- Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateShiftExtend(
+ ShOp, MCE->getValue(), true, S, E, getContext()));
return MatchOperand_Success;
}
+/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
+/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
+bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
+ if (Name.find('.') != StringRef::npos)
+ return TokError("invalid operand");
-// Automatically generated
-static unsigned MatchRegisterName(StringRef Name);
+ Mnemonic = Name;
+ Operands.push_back(
+ AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
-bool
-AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
- StringRef &Layout,
- SMLoc &LayoutLoc) const {
const AsmToken &Tok = Parser.getTok();
-
- if (Tok.isNot(AsmToken::Identifier))
- return false;
-
- std::string LowerReg = Tok.getString().lower();
- size_t DotPos = LowerReg.find('.');
-
- bool IsVec128 = false;
+ StringRef Op = Tok.getString();
SMLoc S = Tok.getLoc();
- RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
- if (DotPos == std::string::npos) {
- Layout = StringRef();
- } else {
- // Everything afterwards needs to be a literal token, expected to be
- // '.2d','.b' etc for vector registers.
-
- // This StringSwitch validates the input and (perhaps more importantly)
- // gives us a permanent string to use in the token (a pointer into LowerReg
- // would go out of scope when we return).
- LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
- StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
-
- // See if it's a 128-bit layout first.
- Layout = StringSwitch<const char *>(LayoutText)
- .Case(".q", ".q").Case(".1q", ".1q")
- .Case(".d", ".d").Case(".2d", ".2d")
- .Case(".s", ".s").Case(".4s", ".4s")
- .Case(".h", ".h").Case(".8h", ".8h")
- .Case(".b", ".b").Case(".16b", ".16b")
- .Default("");
-
- if (Layout.size() != 0)
- IsVec128 = true;
- else {
- Layout = StringSwitch<const char *>(LayoutText)
- .Case(".1d", ".1d")
- .Case(".2s", ".2s")
- .Case(".4h", ".4h")
- .Case(".8b", ".8b")
- .Default("");
+ const MCExpr *Expr = nullptr;
+
+#define SYS_ALIAS(op1, Cn, Cm, op2) \
+ do { \
+ Expr = MCConstantExpr::Create(op1, getContext()); \
+ Operands.push_back( \
+ AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
+ Operands.push_back( \
+ AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
+ Operands.push_back( \
+ AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
+ Expr = MCConstantExpr::Create(op2, getContext()); \
+ Operands.push_back( \
+ AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
+ } while (0)
+
+ if (Mnemonic == "ic") {
+ if (!Op.compare_lower("ialluis")) {
+ // SYS #0, C7, C1, #0
+ SYS_ALIAS(0, 7, 1, 0);
+ } else if (!Op.compare_lower("iallu")) {
+ // SYS #0, C7, C5, #0
+ SYS_ALIAS(0, 7, 5, 0);
+ } else if (!Op.compare_lower("ivau")) {
+ // SYS #3, C7, C5, #1
+ SYS_ALIAS(3, 7, 5, 1);
+ } else {
+ return TokError("invalid operand for IC instruction");
}
-
- if (Layout.size() == 0) {
- // If we've still not pinned it down the register is malformed.
- return false;
+ } else if (Mnemonic == "dc") {
+ if (!Op.compare_lower("zva")) {
+ // SYS #3, C7, C4, #1
+ SYS_ALIAS(3, 7, 4, 1);
+ } else if (!Op.compare_lower("ivac")) {
+ // SYS #3, C7, C6, #1
+ SYS_ALIAS(0, 7, 6, 1);
+ } else if (!Op.compare_lower("isw")) {
+ // SYS #0, C7, C6, #2
+ SYS_ALIAS(0, 7, 6, 2);
+ } else if (!Op.compare_lower("cvac")) {
+ // SYS #3, C7, C10, #1
+ SYS_ALIAS(3, 7, 10, 1);
+ } else if (!Op.compare_lower("csw")) {
+ // SYS #0, C7, C10, #2
+ SYS_ALIAS(0, 7, 10, 2);
+ } else if (!Op.compare_lower("cvau")) {
+ // SYS #3, C7, C11, #1
+ SYS_ALIAS(3, 7, 11, 1);
+ } else if (!Op.compare_lower("civac")) {
+ // SYS #3, C7, C14, #1
+ SYS_ALIAS(3, 7, 14, 1);
+ } else if (!Op.compare_lower("cisw")) {
+ // SYS #0, C7, C14, #2
+ SYS_ALIAS(0, 7, 14, 2);
+ } else {
+ return TokError("invalid operand for DC instruction");
+ }
+ } else if (Mnemonic == "at") {
+ if (!Op.compare_lower("s1e1r")) {
+ // SYS #0, C7, C8, #0
+ SYS_ALIAS(0, 7, 8, 0);
+ } else if (!Op.compare_lower("s1e2r")) {
+ // SYS #4, C7, C8, #0
+ SYS_ALIAS(4, 7, 8, 0);
+ } else if (!Op.compare_lower("s1e3r")) {
+ // SYS #6, C7, C8, #0
+ SYS_ALIAS(6, 7, 8, 0);
+ } else if (!Op.compare_lower("s1e1w")) {
+ // SYS #0, C7, C8, #1
+ SYS_ALIAS(0, 7, 8, 1);
+ } else if (!Op.compare_lower("s1e2w")) {
+ // SYS #4, C7, C8, #1
+ SYS_ALIAS(4, 7, 8, 1);
+ } else if (!Op.compare_lower("s1e3w")) {
+ // SYS #6, C7, C8, #1
+ SYS_ALIAS(6, 7, 8, 1);
+ } else if (!Op.compare_lower("s1e0r")) {
+ // SYS #0, C7, C8, #3
+ SYS_ALIAS(0, 7, 8, 2);
+ } else if (!Op.compare_lower("s1e0w")) {
+ // SYS #0, C7, C8, #3
+ SYS_ALIAS(0, 7, 8, 3);
+ } else if (!Op.compare_lower("s12e1r")) {
+ // SYS #4, C7, C8, #4
+ SYS_ALIAS(4, 7, 8, 4);
+ } else if (!Op.compare_lower("s12e1w")) {
+ // SYS #4, C7, C8, #5
+ SYS_ALIAS(4, 7, 8, 5);
+ } else if (!Op.compare_lower("s12e0r")) {
+ // SYS #4, C7, C8, #6
+ SYS_ALIAS(4, 7, 8, 6);
+ } else if (!Op.compare_lower("s12e0w")) {
+ // SYS #4, C7, C8, #7
+ SYS_ALIAS(4, 7, 8, 7);
+ } else {
+ return TokError("invalid operand for AT instruction");
+ }
+ } else if (Mnemonic == "tlbi") {
+ if (!Op.compare_lower("vmalle1is")) {
+ // SYS #0, C8, C3, #0
+ SYS_ALIAS(0, 8, 3, 0);
+ } else if (!Op.compare_lower("alle2is")) {
+ // SYS #4, C8, C3, #0
+ SYS_ALIAS(4, 8, 3, 0);
+ } else if (!Op.compare_lower("alle3is")) {
+ // SYS #6, C8, C3, #0
+ SYS_ALIAS(6, 8, 3, 0);
+ } else if (!Op.compare_lower("vae1is")) {
+ // SYS #0, C8, C3, #1
+ SYS_ALIAS(0, 8, 3, 1);
+ } else if (!Op.compare_lower("vae2is")) {
+ // SYS #4, C8, C3, #1
+ SYS_ALIAS(4, 8, 3, 1);
+ } else if (!Op.compare_lower("vae3is")) {
+ // SYS #6, C8, C3, #1
+ SYS_ALIAS(6, 8, 3, 1);
+ } else if (!Op.compare_lower("aside1is")) {
+ // SYS #0, C8, C3, #2
+ SYS_ALIAS(0, 8, 3, 2);
+ } else if (!Op.compare_lower("vaae1is")) {
+ // SYS #0, C8, C3, #3
+ SYS_ALIAS(0, 8, 3, 3);
+ } else if (!Op.compare_lower("alle1is")) {
+ // SYS #4, C8, C3, #4
+ SYS_ALIAS(4, 8, 3, 4);
+ } else if (!Op.compare_lower("vale1is")) {
+ // SYS #0, C8, C3, #5
+ SYS_ALIAS(0, 8, 3, 5);
+ } else if (!Op.compare_lower("vaale1is")) {
+ // SYS #0, C8, C3, #7
+ SYS_ALIAS(0, 8, 3, 7);
+ } else if (!Op.compare_lower("vmalle1")) {
+ // SYS #0, C8, C7, #0
+ SYS_ALIAS(0, 8, 7, 0);
+ } else if (!Op.compare_lower("alle2")) {
+ // SYS #4, C8, C7, #0
+ SYS_ALIAS(4, 8, 7, 0);
+ } else if (!Op.compare_lower("vale2is")) {
+ // SYS #4, C8, C3, #5
+ SYS_ALIAS(4, 8, 3, 5);
+ } else if (!Op.compare_lower("vale3is")) {
+ // SYS #6, C8, C3, #5
+ SYS_ALIAS(6, 8, 3, 5);
+ } else if (!Op.compare_lower("alle3")) {
+ // SYS #6, C8, C7, #0
+ SYS_ALIAS(6, 8, 7, 0);
+ } else if (!Op.compare_lower("vae1")) {
+ // SYS #0, C8, C7, #1
+ SYS_ALIAS(0, 8, 7, 1);
+ } else if (!Op.compare_lower("vae2")) {
+ // SYS #4, C8, C7, #1
+ SYS_ALIAS(4, 8, 7, 1);
+ } else if (!Op.compare_lower("vae3")) {
+ // SYS #6, C8, C7, #1
+ SYS_ALIAS(6, 8, 7, 1);
+ } else if (!Op.compare_lower("aside1")) {
+ // SYS #0, C8, C7, #2
+ SYS_ALIAS(0, 8, 7, 2);
+ } else if (!Op.compare_lower("vaae1")) {
+ // SYS #0, C8, C7, #3
+ SYS_ALIAS(0, 8, 7, 3);
+ } else if (!Op.compare_lower("alle1")) {
+ // SYS #4, C8, C7, #4
+ SYS_ALIAS(4, 8, 7, 4);
+ } else if (!Op.compare_lower("vale1")) {
+ // SYS #0, C8, C7, #5
+ SYS_ALIAS(0, 8, 7, 5);
+ } else if (!Op.compare_lower("vale2")) {
+ // SYS #4, C8, C7, #5
+ SYS_ALIAS(4, 8, 7, 5);
+ } else if (!Op.compare_lower("vale3")) {
+ // SYS #6, C8, C7, #5
+ SYS_ALIAS(6, 8, 7, 5);
+ } else if (!Op.compare_lower("vaale1")) {
+ // SYS #0, C8, C7, #7
+ SYS_ALIAS(0, 8, 7, 7);
+ } else if (!Op.compare_lower("ipas2e1")) {
+ // SYS #4, C8, C4, #1
+ SYS_ALIAS(4, 8, 4, 1);
+ } else if (!Op.compare_lower("ipas2le1")) {
+ // SYS #4, C8, C4, #5
+ SYS_ALIAS(4, 8, 4, 5);
+ } else if (!Op.compare_lower("ipas2e1is")) {
+ // SYS #4, C8, C4, #1
+ SYS_ALIAS(4, 8, 0, 1);
+ } else if (!Op.compare_lower("ipas2le1is")) {
+ // SYS #4, C8, C4, #5
+ SYS_ALIAS(4, 8, 0, 5);
+ } else if (!Op.compare_lower("vmalls12e1")) {
+ // SYS #4, C8, C7, #6
+ SYS_ALIAS(4, 8, 7, 6);
+ } else if (!Op.compare_lower("vmalls12e1is")) {
+ // SYS #4, C8, C3, #6
+ SYS_ALIAS(4, 8, 3, 6);
+ } else {
+ return TokError("invalid operand for TLBI instruction");
}
}
- RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
- if (RegNum == AArch64::NoRegister) {
- RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
- .Case("ip0", AArch64::X16)
- .Case("ip1", AArch64::X17)
- .Case("fp", AArch64::X29)
- .Case("lr", AArch64::X30)
- .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
- .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
- .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
- .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
- .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
- .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
- .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
- .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
- .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
- .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
- .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
- .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
- .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
- .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
- .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
- .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
- .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
- .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
- .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
- .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
- .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
- .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
- .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
- .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
- .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
- .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
- .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
- .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
- .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
- .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
- .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
- .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
- .Default(AArch64::NoRegister);
- }
- if (RegNum == AArch64::NoRegister)
- return false;
+#undef SYS_ALIAS
- return true;
-}
+ Parser.Lex(); // Eat operand.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- uint32_t &NumLanes) {
- unsigned RegNum;
- StringRef Layout;
- SMLoc RegEndLoc, LayoutLoc;
- SMLoc S = Parser.getTok().getLoc();
-
- if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
- return MatchOperand_NoMatch;
+ bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
+ bool HasRegister = false;
- Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
+ // Check for the optional register operand.
+ if (getLexer().is(AsmToken::Comma)) {
+ Parser.Lex(); // Eat comma.
- if (Layout.size() != 0) {
- unsigned long long TmpLanes = 0;
- llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
- if (TmpLanes != 0) {
- NumLanes = TmpLanes;
- } else {
- // If the number of lanes isn't specified explicitly, a valid instruction
- // will have an element specifier and be capable of acting on the entire
- // vector register.
- switch (Layout.back()) {
- default: llvm_unreachable("Invalid layout specifier");
- case 'b': NumLanes = 16; break;
- case 'h': NumLanes = 8; break;
- case 's': NumLanes = 4; break;
- case 'd': NumLanes = 2; break;
- case 'q': NumLanes = 1; break;
- }
- }
+ if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
+ return TokError("expected register operand");
- Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
+ HasRegister = true;
}
- Parser.Lex();
- return MatchOperand_Success;
-}
-
-bool
-AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc) {
- // This callback is used for things like DWARF frame directives in
- // assembly. They don't care about things like NEON layouts or lanes, they
- // just want to be able to produce the DWARF register number.
- StringRef LayoutSpec;
- SMLoc RegEndLoc, LayoutLoc;
- StartLoc = Parser.getTok().getLoc();
-
- if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
- return true;
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Parser.eatToEndOfStatement();
+ return TokError("unexpected token in argument list");
+ }
- Parser.Lex();
- EndLoc = Parser.getTok().getLoc();
+ if (ExpectRegister && !HasRegister) {
+ return TokError("specified " + Mnemonic + " op requires a register");
+ }
+ else if (!ExpectRegister && HasRegister) {
+ return TokError("specified " + Mnemonic + " op does not use a register");
+ }
+ Parser.Lex(); // Consume the EndOfStatement
return false;
}
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Since these operands occur in very limited circumstances, without
- // alternatives, we actually signal an error if there is no match. If relaxing
- // this, beware of unintended consequences: an immediate will be accepted
- // during matching, no matter how it gets into the AArch64Operand.
+AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
- SMLoc S = Tok.getLoc();
- if (Tok.is(AsmToken::Identifier)) {
- bool ValidName;
- uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
-
- if (!ValidName) {
- Error(S, "operand specifier not recognised");
+ // Can be either a #imm style literal or an option name
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
+ // Immediate operand.
+ if (Hash)
+ Parser.Lex(); // Eat the '#'
+ const MCExpr *ImmVal;
+ SMLoc ExprLoc = getLoc();
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ Error(ExprLoc, "immediate value expected for barrier operand");
return MatchOperand_ParseFail;
}
-
- Parser.Lex(); // We're done with the identifier. Eat it
-
- SMLoc E = Parser.getTok().getLoc();
- const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
- Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
+ if (MCE->getValue() < 0 || MCE->getValue() > 15) {
+ Error(ExprLoc, "barrier operand out of range");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(
+ AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
return MatchOperand_Success;
- } else if (Tok.is(AsmToken::Hash)) {
- Parser.Lex();
+ }
- const MCExpr *ImmVal;
- if (ParseImmediate(ImmVal) != MatchOperand_Success)
- return MatchOperand_ParseFail;
+ if (Tok.isNot(AsmToken::Identifier)) {
+ TokError("invalid operand for instruction");
+ return MatchOperand_ParseFail;
+ }
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
- Error(S, "Invalid immediate for instruction");
- return MatchOperand_ParseFail;
- }
+ bool Valid;
+ unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
+ if (!Valid) {
+ TokError("invalid barrier option name");
+ return MatchOperand_ParseFail;
+ }
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
- return MatchOperand_Success;
+ // The only valid named option for ISB is 'sy'
+ if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
+ TokError("'sy' or #imm operand expected");
+ return MatchOperand_ParseFail;
}
- Error(S, "unexpected operand for instruction");
- return MatchOperand_ParseFail;
+ Operands.push_back(
+ AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
+ Parser.Lex(); // Consume the option
+
+ return MatchOperand_Success;
}
AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseSysRegOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
- // Any MSR/MRS operand will be an identifier, and we want to store it as some
- // kind of string: SPSel is valid for two different forms of MSR with two
- // different encodings. There's no collision at the moment, but the potential
- // is there.
- if (!Tok.is(AsmToken::Identifier)) {
+ if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- }
- SMLoc S = Tok.getLoc();
- Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
+ Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
+ STI.getFeatureBits(), getContext()));
Parser.Lex(); // Eat identifier
return MatchOperand_Success;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseLSXAddressOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- SMLoc S = Parser.getTok().getLoc();
-
- unsigned RegNum;
- SMLoc RegEndLoc, LayoutLoc;
- StringRef Layout;
- if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
- || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
- || Layout.size() != 0) {
- // Check Layout.size because we don't want to let "x3.4s" or similar
- // through.
- return MatchOperand_NoMatch;
- }
- Parser.Lex(); // Eat register
+/// tryParseVectorRegister - Parse a vector register operand.
+bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
+ if (Parser.getTok().isNot(AsmToken::Identifier))
+ return true;
- if (Parser.getTok().is(AsmToken::RBrac)) {
- // We're done
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
- return MatchOperand_Success;
- }
+ SMLoc S = getLoc();
+ // Check for a vector register specifier first.
+ StringRef Kind;
+ int64_t Reg = tryMatchVectorRegister(Kind, false);
+ if (Reg == -1)
+ return true;
+ Operands.push_back(
+ AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
+ // If there was an explicit qualifier, that goes on as a literal text
+ // operand.
+ if (!Kind.empty())
+ Operands.push_back(
+ AArch64Operand::CreateToken(Kind, false, S, getContext()));
+
+ // If there is an index specifier following the register, parse that too.
+ if (Parser.getTok().is(AsmToken::LBrac)) {
+ SMLoc SIdx = getLoc();
+ Parser.Lex(); // Eat left bracket token.
- // Otherwise, only ", #0" is valid
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("immediate value expected for vector index");
+ return false;
+ }
- if (Parser.getTok().isNot(AsmToken::Comma)) {
- Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
- return MatchOperand_ParseFail;
- }
- Parser.Lex(); // Eat ','
+ SMLoc E = getLoc();
+ if (Parser.getTok().isNot(AsmToken::RBrac)) {
+ Error(E, "']' expected");
+ return false;
+ }
- if (Parser.getTok().isNot(AsmToken::Hash)) {
- Error(Parser.getTok().getLoc(), "expected '#0'");
- return MatchOperand_ParseFail;
- }
- Parser.Lex(); // Eat '#'
+ Parser.Lex(); // Eat right bracket token.
- if (Parser.getTok().isNot(AsmToken::Integer)
- || Parser.getTok().getIntVal() != 0 ) {
- Error(Parser.getTok().getLoc(), "expected '#0'");
- return MatchOperand_ParseFail;
+ Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
+ E, getContext()));
}
- Parser.Lex(); // Eat '0'
- SMLoc E = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
- return MatchOperand_Success;
+ return false;
}
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseShiftExtend(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- StringRef IDVal = Parser.getTok().getIdentifier();
- std::string LowerID = IDVal.lower();
-
- A64SE::ShiftExtSpecifiers Spec =
- StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
- .Case("lsl", A64SE::LSL)
- .Case("msl", A64SE::MSL)
- .Case("lsr", A64SE::LSR)
- .Case("asr", A64SE::ASR)
- .Case("ror", A64SE::ROR)
- .Case("uxtb", A64SE::UXTB)
- .Case("uxth", A64SE::UXTH)
- .Case("uxtw", A64SE::UXTW)
- .Case("uxtx", A64SE::UXTX)
- .Case("sxtb", A64SE::SXTB)
- .Case("sxth", A64SE::SXTH)
- .Case("sxtw", A64SE::SXTW)
- .Case("sxtx", A64SE::SXTX)
- .Default(A64SE::Invalid);
-
- if (Spec == A64SE::Invalid)
- return MatchOperand_NoMatch;
+/// parseRegister - Parse a non-vector register operand.
+bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ // Try for a vector register.
+ if (!tryParseVectorRegister(Operands))
+ return false;
- // Eat the shift
- SMLoc S, E;
- S = Parser.getTok().getLoc();
- Parser.Lex();
+ // Try for a scalar register.
+ int64_t Reg = tryParseRegister();
+ if (Reg == -1)
+ return true;
+ Operands.push_back(
+ AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
- if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
- Spec != A64SE::ROR && Spec != A64SE::MSL) {
- // The shift amount can be omitted for the extending versions, but not real
- // shifts:
- // add x0, x0, x0, uxtb
- // is valid, and equivalent to
- // add x0, x0, x0, uxtb #0
-
- if (Parser.getTok().is(AsmToken::Comma) ||
- Parser.getTok().is(AsmToken::EndOfStatement) ||
- Parser.getTok().is(AsmToken::RBrac)) {
- Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
- S, E));
- return MatchOperand_Success;
+ // A small number of instructions (FMOVXDhighr, for example) have "[1]"
+ // as a string token in the instruction itself.
+ if (getLexer().getKind() == AsmToken::LBrac) {
+ SMLoc LBracS = getLoc();
+ Parser.Lex();
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Integer)) {
+ SMLoc IntS = getLoc();
+ int64_t Val = Tok.getIntVal();
+ if (Val == 1) {
+ Parser.Lex();
+ if (getLexer().getKind() == AsmToken::RBrac) {
+ SMLoc RBracS = getLoc();
+ Parser.Lex();
+ Operands.push_back(
+ AArch64Operand::CreateToken("[", false, LBracS, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateToken("1", false, IntS, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateToken("]", false, RBracS, getContext()));
+ return false;
+ }
+ }
}
}
- // Eat # at beginning of immediate
- if (!Parser.getTok().is(AsmToken::Hash)) {
- Error(Parser.getTok().getLoc(),
- "expected #imm after shift specifier");
- return MatchOperand_ParseFail;
- }
- Parser.Lex();
+ return false;
+}
- // Make sure we do actually have a number
- if (!Parser.getTok().is(AsmToken::Integer)) {
- Error(Parser.getTok().getLoc(),
- "expected integer shift amount");
- return MatchOperand_ParseFail;
- }
- unsigned Amount = Parser.getTok().getIntVal();
- Parser.Lex();
- E = Parser.getTok().getLoc();
+bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
+ bool HasELFModifier = false;
+ AArch64MCExpr::VariantKind RefKind;
- Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
- S, E));
+ if (Parser.getTok().is(AsmToken::Colon)) {
+ Parser.Lex(); // Eat ':"
+ HasELFModifier = true;
- return MatchOperand_Success;
-}
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(),
+ "expect relocation specifier in operand after ':'");
+ return true;
+ }
-/// Try to parse a vector register token, If it is a vector register,
-/// the token is eaten and return true. Otherwise return false.
-bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
- StringRef &Layout, SMLoc &LayoutLoc) {
- bool IsVector = true;
-
- if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
- IsVector = false;
- else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
- .contains(RegNum) &&
- !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
- .contains(RegNum))
- IsVector = false;
- else if (Layout.size() == 0)
- IsVector = false;
-
- if (!IsVector)
- Error(Parser.getTok().getLoc(), "expected vector type register");
-
- Parser.Lex(); // Eat this token.
- return IsVector;
-}
+ std::string LowerCase = Parser.getTok().getIdentifier().lower();
+ RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
+ .Case("lo12", AArch64MCExpr::VK_LO12)
+ .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
+ .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
+ .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
+ .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
+ .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
+ .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
+ .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
+ .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
+ .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
+ .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
+ .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
+ .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
+ .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
+ .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
+ .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
+ .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
+ .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
+ .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
+ .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
+ .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
+ .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
+ .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
+ .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
+ .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
+ .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
+ .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
+ .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
+ .Case("got", AArch64MCExpr::VK_GOT_PAGE)
+ .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
+ .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
+ .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
+ .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
+ .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
+ .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
+ .Default(AArch64MCExpr::VK_INVALID);
+
+ if (RefKind == AArch64MCExpr::VK_INVALID) {
+ Error(Parser.getTok().getLoc(),
+ "expect relocation specifier in operand after ':'");
+ return true;
+ }
+ Parser.Lex(); // Eat identifier
-// A vector list contains 1-4 consecutive registers.
-// Now there are two kinds of vector list when number of vector > 1:
-// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
-// (2) {Vn.layout - Vm.layout}
-// If the layout is like .b/.h/.s/.d, also parse the lane.
-AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- if (Parser.getTok().isNot(AsmToken::LCurly)) {
- Error(Parser.getTok().getLoc(), "'{' expected");
- return MatchOperand_ParseFail;
+ if (Parser.getTok().isNot(AsmToken::Colon)) {
+ Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
+ return true;
+ }
+ Parser.Lex(); // Eat ':'
}
- SMLoc SLoc = Parser.getTok().getLoc();
- Parser.Lex(); // Eat '{' token.
- unsigned Reg, Count = 1;
- StringRef LayoutStr;
- SMLoc RegEndLoc, LayoutLoc;
- if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
- return MatchOperand_ParseFail;
+ if (getParser().parseExpression(ImmVal))
+ return true;
+
+ if (HasELFModifier)
+ ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
+
+ return false;
+}
+
+/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
+bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
+ assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
+ SMLoc S = getLoc();
+ Parser.Lex(); // Eat left bracket token.
+ StringRef Kind;
+ int64_t FirstReg = tryMatchVectorRegister(Kind, true);
+ if (FirstReg == -1)
+ return true;
+ int64_t PrevReg = FirstReg;
+ unsigned Count = 1;
if (Parser.getTok().is(AsmToken::Minus)) {
Parser.Lex(); // Eat the minus.
- unsigned Reg2;
- StringRef LayoutStr2;
- SMLoc RegEndLoc2, LayoutLoc2;
- SMLoc RegLoc2 = Parser.getTok().getLoc();
+ SMLoc Loc = getLoc();
+ StringRef NextKind;
+ int64_t Reg = tryMatchVectorRegister(NextKind, true);
+ if (Reg == -1)
+ return true;
+ // Any Kind suffices must match on all regs in the list.
+ if (Kind != NextKind)
+ return Error(Loc, "mismatched register size suffix");
- if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
- return MatchOperand_ParseFail;
- unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
+ unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
- if (LayoutStr != LayoutStr2) {
- Error(LayoutLoc2, "expected the same vector layout");
- return MatchOperand_ParseFail;
- }
if (Space == 0 || Space > 3) {
- Error(RegLoc2, "invalid number of vectors");
- return MatchOperand_ParseFail;
+ return Error(Loc, "invalid number of vectors");
}
Count += Space;
- } else {
- unsigned LastReg = Reg;
+ }
+ else {
while (Parser.getTok().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
- unsigned Reg2;
- StringRef LayoutStr2;
- SMLoc RegEndLoc2, LayoutLoc2;
- SMLoc RegLoc2 = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat the comma token.
- if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
- return MatchOperand_ParseFail;
- unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
- : (Reg2 + 32 - LastReg);
- Count++;
-
- // The space between two vectors should be 1. And they should have the same layout.
- // Total count shouldn't be great than 4
- if (Space != 1) {
- Error(RegLoc2, "invalid space between two vectors");
- return MatchOperand_ParseFail;
- }
- if (LayoutStr != LayoutStr2) {
- Error(LayoutLoc2, "expected the same vector layout");
- return MatchOperand_ParseFail;
- }
- if (Count > 4) {
- Error(RegLoc2, "invalid number of vectors");
- return MatchOperand_ParseFail;
- }
+ SMLoc Loc = getLoc();
+ StringRef NextKind;
+ int64_t Reg = tryMatchVectorRegister(NextKind, true);
+ if (Reg == -1)
+ return true;
+ // Any Kind suffices must match on all regs in the list.
+ if (Kind != NextKind)
+ return Error(Loc, "mismatched register size suffix");
- LastReg = Reg2;
+ // Registers must be incremental (with wraparound at 31)
+ if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
+ (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
+ return Error(Loc, "registers must be sequential");
+
+ PrevReg = Reg;
+ ++Count;
}
}
- if (Parser.getTok().isNot(AsmToken::RCurly)) {
- Error(Parser.getTok().getLoc(), "'}' expected");
- return MatchOperand_ParseFail;
- }
- SMLoc ELoc = Parser.getTok().getLoc();
- Parser.Lex(); // Eat '}' token.
+ if (Parser.getTok().isNot(AsmToken::RCurly))
+ return Error(getLoc(), "'}' expected");
+ Parser.Lex(); // Eat the '}' token.
- A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
- if (Count > 1) { // If count > 1, create vector list using super register.
- bool IsVec64 = (Layout < A64Layout::VL_16B);
- static unsigned SupRegIDs[3][2] = {
- { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
- { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
- { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
- };
- unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
- unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
- const MCRegisterInfo *MRI = getContext().getRegisterInfo();
- Reg = MRI->getMatchingSuperReg(Reg, Sub0,
- &AArch64MCRegisterClasses[SupRegID]);
- }
- Operands.push_back(
- AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
+ if (Count > 4)
+ return Error(S, "invalid number of vectors");
+
+ unsigned NumElements = 0;
+ char ElementKind = 0;
+ if (!Kind.empty())
+ parseValidVectorKind(Kind, NumElements, ElementKind);
+
+ Operands.push_back(AArch64Operand::CreateVectorList(
+ FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
+ // If there is an index specifier following the list, parse that too.
if (Parser.getTok().is(AsmToken::LBrac)) {
- uint32_t NumLanes = 0;
- switch(Layout) {
- case A64Layout::VL_B : NumLanes = 16; break;
- case A64Layout::VL_H : NumLanes = 8; break;
- case A64Layout::VL_S : NumLanes = 4; break;
- case A64Layout::VL_D : NumLanes = 2; break;
- default:
- SMLoc Loc = getLexer().getLoc();
- Error(Loc, "expected comma before next operand");
- return MatchOperand_ParseFail;
+ SMLoc SIdx = getLoc();
+ Parser.Lex(); // Eat left bracket token.
+
+ const MCExpr *ImmVal;
+ if (getParser().parseExpression(ImmVal))
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!MCE) {
+ TokError("immediate value expected for vector index");
+ return false;
}
- return ParseNEONLane(Operands, NumLanes);
- } else {
+
+ SMLoc E = getLoc();
+ if (Parser.getTok().isNot(AsmToken::RBrac)) {
+ Error(E, "']' expected");
+ return false;
+ }
+
+ Parser.Lex(); // Eat right bracket token.
+
+ Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
+ E, getContext()));
+ }
+ return false;
+}
+
+AArch64AsmParser::OperandMatchResultTy
+AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
+ const AsmToken &Tok = Parser.getTok();
+ if (!Tok.is(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
+
+ unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
+
+ MCContext &Ctx = getContext();
+ const MCRegisterInfo *RI = Ctx.getRegisterInfo();
+ if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
+ return MatchOperand_NoMatch;
+
+ SMLoc S = getLoc();
+ Parser.Lex(); // Eat register
+
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Operands.push_back(
+ AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
return MatchOperand_Success;
}
+ Parser.Lex(); // Eat comma.
+
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat hash
+
+ if (Parser.getTok().isNot(AsmToken::Integer)) {
+ Error(getLoc(), "index must be absent or #0");
+ return MatchOperand_ParseFail;
+ }
+
+ const MCExpr *ImmVal;
+ if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
+ cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
+ Error(getLoc(), "index must be absent or #0");
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(
+ AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
+ return MatchOperand_Success;
}
-// FIXME: We would really like to be able to tablegen'erate this.
-bool AArch64AsmParser::
-validateInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- switch (Inst.getOpcode()) {
- case AArch64::BFIwwii:
- case AArch64::BFIxxii:
- case AArch64::SBFIZwwii:
- case AArch64::SBFIZxxii:
- case AArch64::UBFIZwwii:
- case AArch64::UBFIZxxii: {
- unsigned ImmOps = Inst.getNumOperands() - 2;
- int64_t ImmR = Inst.getOperand(ImmOps).getImm();
- int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
-
- if (ImmR != 0 && ImmS >= ImmR) {
- return Error(Operands[4]->getStartLoc(),
- "requested insert overflows register");
- }
+/// parseOperand - Parse a arm instruction operand. For now this parses the
+/// operand regardless of the mnemonic.
+bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
+ bool invertCondCode) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+ if (ResTy == MatchOperand_Success)
return false;
- }
- case AArch64::BFXILwwii:
- case AArch64::BFXILxxii:
- case AArch64::SBFXwwii:
- case AArch64::SBFXxxii:
- case AArch64::UBFXwwii:
- case AArch64::UBFXxxii: {
- unsigned ImmOps = Inst.getNumOperands() - 2;
- int64_t ImmR = Inst.getOperand(ImmOps).getImm();
- int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
- int64_t RegWidth = 0;
- switch (Inst.getOpcode()) {
- case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
- RegWidth = 64;
- break;
- case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
- RegWidth = 32;
- break;
- }
+ // If there wasn't a custom match, try the generic matcher below. Otherwise,
+ // there was a match, but an error occurred, in which case, just return that
+ // the operand parsing failed.
+ if (ResTy == MatchOperand_ParseFail)
+ return true;
- if (ImmS >= RegWidth || ImmS < ImmR) {
- return Error(Operands[4]->getStartLoc(),
- "requested extract overflows register");
- }
+ // Nothing custom, so do general case parsing.
+ SMLoc S, E;
+ switch (getLexer().getKind()) {
+ default: {
+ SMLoc S = getLoc();
+ const MCExpr *Expr;
+ if (parseSymbolicImmVal(Expr))
+ return Error(S, "invalid operand");
+
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
return false;
}
- case AArch64::ICix: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
- if (!A64IC::NeedsRegister(ICOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified IC op does not use a register");
- }
- return false;
+ case AsmToken::LBrac: {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
+ getContext()));
+ Parser.Lex(); // Eat '['
+
+ // There's no comma after a '[', so we can parse the next operand
+ // immediately.
+ return parseOperand(Operands, false, false);
}
- case AArch64::ICi: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
- if (A64IC::NeedsRegister(ICOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified IC op requires a register");
- }
+ case AsmToken::LCurly:
+ return parseVectorList(Operands);
+ case AsmToken::Identifier: {
+ // If we're expecting a Condition Code operand, then just parse that.
+ if (isCondCode)
+ return parseCondCode(Operands, invertCondCode);
+
+ // If it's a register name, parse it.
+ if (!parseRegister(Operands))
+ return false;
+
+ // This could be an optional "shift" or "extend" operand.
+ OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
+ // We can only continue if no tokens were eaten.
+ if (GotShift != MatchOperand_NoMatch)
+ return GotShift;
+
+ // This was not a register so parse other operands that start with an
+ // identifier (like labels) as expressions and create them as immediates.
+ const MCExpr *IdVal;
+ S = getLoc();
+ if (getParser().parseExpression(IdVal))
+ return true;
+
+ E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
return false;
}
- case AArch64::TLBIix: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
- if (!A64TLBI::NeedsRegister(TLBIOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified TLBI op does not use a register");
+ case AsmToken::Integer:
+ case AsmToken::Real:
+ case AsmToken::Hash: {
+ // #42 -> immediate.
+ S = getLoc();
+ if (getLexer().is(AsmToken::Hash))
+ Parser.Lex();
+
+ // Parse a negative sign
+ bool isNegative = false;
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ isNegative = true;
+ // We need to consume this token only when we have a Real, otherwise
+ // we let parseSymbolicImmVal take care of it
+ if (Parser.getLexer().peekTok().is(AsmToken::Real))
+ Parser.Lex();
+ }
+
+ // The only Real that should come through here is a literal #0.0 for
+ // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
+ // so convert the value.
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Real)) {
+ APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
+ Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
+ Mnemonic != "fcmlt")
+ return TokError("unexpected floating point literal");
+ else if (IntVal != 0 || isNegative)
+ return TokError("expected floating-point constant #0.0");
+ Parser.Lex(); // Eat the token.
+
+ Operands.push_back(
+ AArch64Operand::CreateToken("#0", false, S, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateToken(".0", false, S, getContext()));
+ return false;
}
+
+ const MCExpr *ImmVal;
+ if (parseSymbolicImmVal(ImmVal))
+ return true;
+
+ E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
return false;
}
- case AArch64::TLBIi: {
- int64_t ImmVal = Inst.getOperand(0).getImm();
- A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
- if (A64TLBI::NeedsRegister(TLBIOp)) {
- return Error(Operands[1]->getStartLoc(),
- "specified TLBI op requires a register");
+ case AsmToken::Equal: {
+ SMLoc Loc = Parser.getTok().getLoc();
+ if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
+ return Error(Loc, "unexpected token in operand");
+ Parser.Lex(); // Eat '='
+ const MCExpr *SubExprVal;
+ if (getParser().parseExpression(SubExprVal))
+ return true;
+
+ if (Operands.size() < 2 ||
+ !static_cast<AArch64Operand &>(*Operands[1]).isReg())
+ return true;
+
+ bool IsXReg =
+ AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Operands[1]->getReg());
+
+ MCContext& Ctx = getContext();
+ E = SMLoc::getFromPointer(Loc.getPointer() - 1);
+ // If the op is an imm and can be fit into a mov, then replace ldr with mov.
+ if (isa<MCConstantExpr>(SubExprVal)) {
+ uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
+ uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
+ while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
+ ShiftAmt += 16;
+ Imm >>= 16;
+ }
+ if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
+ Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
+ Operands.push_back(AArch64Operand::CreateImm(
+ MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
+ if (ShiftAmt)
+ Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
+ ShiftAmt, true, S, E, Ctx));
+ return false;
+ }
+ APInt Simm = APInt(64, Imm << ShiftAmt);
+ // check if the immediate is an unsigned or signed 32-bit int for W regs
+ if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
+ return Error(Loc, "Immediate too large for register");
}
+ // If it is a label or an imm that cannot fit in a movz, put it into CP.
+ const MCExpr *CPLoc =
+ getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
+ Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
return false;
}
}
-
- return false;
}
-
-// Parses the instruction *together with* all operands, appending each parsed
-// operand to the "Operands" list
+/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
+/// operands.
bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- size_t CondCodePos = Name.find('.');
-
- StringRef Mnemonic = Name.substr(0, CondCodePos);
- Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
-
- if (CondCodePos != StringRef::npos) {
- // We have a condition code
- SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
- StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
- A64CC::CondCodes Code;
+ OperandVector &Operands) {
+ Name = StringSwitch<StringRef>(Name.lower())
+ .Case("beq", "b.eq")
+ .Case("bne", "b.ne")
+ .Case("bhs", "b.hs")
+ .Case("bcs", "b.cs")
+ .Case("blo", "b.lo")
+ .Case("bcc", "b.cc")
+ .Case("bmi", "b.mi")
+ .Case("bpl", "b.pl")
+ .Case("bvs", "b.vs")
+ .Case("bvc", "b.vc")
+ .Case("bhi", "b.hi")
+ .Case("bls", "b.ls")
+ .Case("bge", "b.ge")
+ .Case("blt", "b.lt")
+ .Case("bgt", "b.gt")
+ .Case("ble", "b.le")
+ .Case("bal", "b.al")
+ .Case("bnv", "b.nv")
+ .Default(Name);
+
+ // First check for the AArch64-specific .req directive.
+ if (Parser.getTok().is(AsmToken::Identifier) &&
+ Parser.getTok().getIdentifier() == ".req") {
+ parseDirectiveReq(Name, NameLoc);
+ // We always return 'error' for this, as we're done with this
+ // statement and don't need to match the 'instruction."
+ return true;
+ }
- Code = A64StringToCondCode(CondStr);
+ // Create the leading tokens for the mnemonic, split by '.' characters.
+ size_t Start = 0, Next = Name.find('.');
+ StringRef Head = Name.slice(Start, Next);
- if (Code == A64CC::Invalid) {
- Error(S, "invalid condition code");
+ // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
+ if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
+ bool IsError = parseSysAlias(Head, NameLoc, Operands);
+ if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
Parser.eatToEndOfStatement();
- return true;
- }
-
- SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
-
- Operands.push_back(AArch64Operand::CreateToken(".", DotL));
- SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
- Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
+ return IsError;
}
- // Now we parse the operands of this instruction
+ Operands.push_back(
+ AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
+ Mnemonic = Head;
+
+ // Handle condition codes for a branch mnemonic
+ if (Head == "b" && Next != StringRef::npos) {
+ Start = Next;
+ Next = Name.find('.', Start + 1);
+ Head = Name.slice(Start + 1, Next);
+
+ SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ (Head.data() - Name.data()));
+ AArch64CC::CondCode CC = parseCondCodeString(Head);
+ if (CC == AArch64CC::Invalid)
+ return Error(SuffixLoc, "invalid condition code");
+ Operands.push_back(
+ AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
+ Operands.push_back(
+ AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
+ }
+
+ // Add the remaining tokens in the mnemonic.
+ while (Next != StringRef::npos) {
+ Start = Next;
+ Next = Name.find('.', Start + 1);
+ Head = Name.slice(Start, Next);
+ SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
+ (Head.data() - Name.data()) + 1);
+ Operands.push_back(
+ AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
+ }
+
+ // Conditional compare instructions have a Condition Code operand, which needs
+ // to be parsed and an immediate operand created.
+ bool condCodeFourthOperand =
+ (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
+ Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
+ Head == "csinc" || Head == "csinv" || Head == "csneg");
+
+ // These instructions are aliases to some of the conditional select
+ // instructions. However, the condition code is inverted in the aliased
+ // instruction.
+ //
+ // FIXME: Is this the correct way to handle these? Or should the parser
+ // generate the aliased instructions directly?
+ bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
+ bool condCodeThirdOperand =
+ (Head == "cinc" || Head == "cinv" || Head == "cneg");
+
+ // Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
- if (ParseOperand(Operands, Mnemonic)) {
+ if (parseOperand(Operands, false, false)) {
Parser.eatToEndOfStatement();
return true;
}
+ unsigned N = 2;
while (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
+ Parser.Lex(); // Eat the comma.
// Parse and remember the operand.
- if (ParseOperand(Operands, Mnemonic)) {
+ if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
+ (N == 3 && condCodeThirdOperand) ||
+ (N == 2 && condCodeSecondOperand),
+ condCodeSecondOperand || condCodeThirdOperand)) {
Parser.eatToEndOfStatement();
return true;
}
-
// After successfully parsing some operands there are two special cases to
// consider (i.e. notional operands not separated by commas). Both are due
// to memory specifiers:
@@ -2246,47 +3236,702 @@ bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
// in the given context!
if (Parser.getTok().is(AsmToken::RBrac)) {
SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateToken("]", Loc));
+ Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
+ getContext()));
Parser.Lex();
}
if (Parser.getTok().is(AsmToken::Exclaim)) {
SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(AArch64Operand::CreateToken("!", Loc));
+ Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
+ getContext()));
Parser.Lex();
}
+
+ ++N;
}
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
- SMLoc Loc = getLexer().getLoc();
+ SMLoc Loc = Parser.getTok().getLoc();
Parser.eatToEndOfStatement();
- return Error(Loc, "expected comma before next operand");
+ return Error(Loc, "unexpected token in argument list");
}
- // Eat the EndOfStatement
- Parser.Lex();
-
+ Parser.Lex(); // Consume the EndOfStatement
return false;
}
+// FIXME: This entire function is a giant hack to provide us with decent
+// operand range validation/diagnostics until TableGen/MC can be extended
+// to support autogeneration of this kind of validation.
+bool AArch64AsmParser::validateInstruction(MCInst &Inst,
+ SmallVectorImpl<SMLoc> &Loc) {
+ const MCRegisterInfo *RI = getContext().getRegisterInfo();
+ // Check for indexed addressing modes w/ the base register being the
+ // same as a destination/source register or pair load where
+ // the Rt == Rt2. All of those are undefined behaviour.
+ switch (Inst.getOpcode()) {
+ case AArch64::LDPSWpre:
+ case AArch64::LDPWpost:
+ case AArch64::LDPWpre:
+ case AArch64::LDPXpost:
+ case AArch64::LDPXpre: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ unsigned Rn = Inst.getOperand(3).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable LDP instruction, writeback base "
+ "is also a destination");
+ if (RI->isSubRegisterEq(Rn, Rt2))
+ return Error(Loc[1], "unpredictable LDP instruction, writeback base "
+ "is also a destination");
+ // FALLTHROUGH
+ }
+ case AArch64::LDPDi:
+ case AArch64::LDPQi:
+ case AArch64::LDPSi:
+ case AArch64::LDPSWi:
+ case AArch64::LDPWi:
+ case AArch64::LDPXi: {
+ unsigned Rt = Inst.getOperand(0).getReg();
+ unsigned Rt2 = Inst.getOperand(1).getReg();
+ if (Rt == Rt2)
+ return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
+ break;
+ }
+ case AArch64::LDPDpost:
+ case AArch64::LDPDpre:
+ case AArch64::LDPQpost:
+ case AArch64::LDPQpre:
+ case AArch64::LDPSpost:
+ case AArch64::LDPSpre:
+ case AArch64::LDPSWpost: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ if (Rt == Rt2)
+ return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
+ break;
+ }
+ case AArch64::STPDpost:
+ case AArch64::STPDpre:
+ case AArch64::STPQpost:
+ case AArch64::STPQpre:
+ case AArch64::STPSpost:
+ case AArch64::STPSpre:
+ case AArch64::STPWpost:
+ case AArch64::STPWpre:
+ case AArch64::STPXpost:
+ case AArch64::STPXpre: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ unsigned Rn = Inst.getOperand(3).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable STP instruction, writeback base "
+ "is also a source");
+ if (RI->isSubRegisterEq(Rn, Rt2))
+ return Error(Loc[1], "unpredictable STP instruction, writeback base "
+ "is also a source");
+ break;
+ }
+ case AArch64::LDRBBpre:
+ case AArch64::LDRBpre:
+ case AArch64::LDRHHpre:
+ case AArch64::LDRHpre:
+ case AArch64::LDRSBWpre:
+ case AArch64::LDRSBXpre:
+ case AArch64::LDRSHWpre:
+ case AArch64::LDRSHXpre:
+ case AArch64::LDRSWpre:
+ case AArch64::LDRWpre:
+ case AArch64::LDRXpre:
+ case AArch64::LDRBBpost:
+ case AArch64::LDRBpost:
+ case AArch64::LDRHHpost:
+ case AArch64::LDRHpost:
+ case AArch64::LDRSBWpost:
+ case AArch64::LDRSBXpost:
+ case AArch64::LDRSHWpost:
+ case AArch64::LDRSHXpost:
+ case AArch64::LDRSWpost:
+ case AArch64::LDRWpost:
+ case AArch64::LDRXpost: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rn = Inst.getOperand(2).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable LDR instruction, writeback base "
+ "is also a source");
+ break;
+ }
+ case AArch64::STRBBpost:
+ case AArch64::STRBpost:
+ case AArch64::STRHHpost:
+ case AArch64::STRHpost:
+ case AArch64::STRWpost:
+ case AArch64::STRXpost:
+ case AArch64::STRBBpre:
+ case AArch64::STRBpre:
+ case AArch64::STRHHpre:
+ case AArch64::STRHpre:
+ case AArch64::STRWpre:
+ case AArch64::STRXpre: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rn = Inst.getOperand(2).getReg();
+ if (RI->isSubRegisterEq(Rn, Rt))
+ return Error(Loc[0], "unpredictable STR instruction, writeback base "
+ "is also a source");
+ break;
+ }
+ }
+
+ // Now check immediate ranges. Separate from the above as there is overlap
+ // in the instructions being checked and this keeps the nested conditionals
+ // to a minimum.
+ switch (Inst.getOpcode()) {
+ case AArch64::ADDSWri:
+ case AArch64::ADDSXri:
+ case AArch64::ADDWri:
+ case AArch64::ADDXri:
+ case AArch64::SUBSWri:
+ case AArch64::SUBSXri:
+ case AArch64::SUBWri:
+ case AArch64::SUBXri: {
+ // Annoyingly we can't do this in the isAddSubImm predicate, so there is
+ // some slight duplication here.
+ if (Inst.getOperand(2).isExpr()) {
+ const MCExpr *Expr = Inst.getOperand(2).getExpr();
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ return Error(Loc[2], "invalid immediate expression");
+ }
+
+ // Only allow these with ADDXri.
+ if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
+ Inst.getOpcode() == AArch64::ADDXri)
+ return false;
+
+ // Only allow these with ADDXri/ADDWri
+ if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
+ ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
+ ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
+ (Inst.getOpcode() == AArch64::ADDXri ||
+ Inst.getOpcode() == AArch64::ADDWri))
+ return false;
+
+ // Don't allow expressions in the immediate field otherwise
+ return Error(Loc[2], "invalid immediate expression");
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
+ switch (ErrCode) {
+ case Match_MissingFeature:
+ return Error(Loc,
+ "instruction requires a CPU feature not currently enabled");
+ case Match_InvalidOperand:
+ return Error(Loc, "invalid operand for instruction");
+ case Match_InvalidSuffix:
+ return Error(Loc, "invalid type suffix for instruction");
+ case Match_InvalidCondCode:
+ return Error(Loc, "expected AArch64 condition code");
+ case Match_AddSubRegExtendSmall:
+ return Error(Loc,
+ "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
+ case Match_AddSubRegExtendLarge:
+ return Error(Loc,
+ "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
+ case Match_AddSubSecondSource:
+ return Error(Loc,
+ "expected compatible register, symbol or integer in range [0, 4095]");
+ case Match_LogicalSecondSource:
+ return Error(Loc, "expected compatible register or logical immediate");
+ case Match_InvalidMovImm32Shift:
+ return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
+ case Match_InvalidMovImm64Shift:
+ return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
+ case Match_AddSubRegShift32:
+ return Error(Loc,
+ "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
+ case Match_AddSubRegShift64:
+ return Error(Loc,
+ "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
+ case Match_InvalidFPImm:
+ return Error(Loc,
+ "expected compatible register or floating-point constant");
+ case Match_InvalidMemoryIndexedSImm9:
+ return Error(Loc, "index must be an integer in range [-256, 255].");
+ case Match_InvalidMemoryIndexed4SImm7:
+ return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
+ case Match_InvalidMemoryIndexed8SImm7:
+ return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
+ case Match_InvalidMemoryIndexed16SImm7:
+ return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
+ case Match_InvalidMemoryWExtend8:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0");
+ case Match_InvalidMemoryWExtend16:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
+ case Match_InvalidMemoryWExtend32:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
+ case Match_InvalidMemoryWExtend64:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
+ case Match_InvalidMemoryWExtend128:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
+ case Match_InvalidMemoryXExtend8:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0");
+ case Match_InvalidMemoryXExtend16:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
+ case Match_InvalidMemoryXExtend32:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
+ case Match_InvalidMemoryXExtend64:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
+ case Match_InvalidMemoryXExtend128:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
+ case Match_InvalidMemoryIndexed1:
+ return Error(Loc, "index must be an integer in range [0, 4095].");
+ case Match_InvalidMemoryIndexed2:
+ return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
+ case Match_InvalidMemoryIndexed4:
+ return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
+ case Match_InvalidMemoryIndexed8:
+ return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
+ case Match_InvalidMemoryIndexed16:
+ return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
+ case Match_InvalidImm0_7:
+ return Error(Loc, "immediate must be an integer in range [0, 7].");
+ case Match_InvalidImm0_15:
+ return Error(Loc, "immediate must be an integer in range [0, 15].");
+ case Match_InvalidImm0_31:
+ return Error(Loc, "immediate must be an integer in range [0, 31].");
+ case Match_InvalidImm0_63:
+ return Error(Loc, "immediate must be an integer in range [0, 63].");
+ case Match_InvalidImm0_127:
+ return Error(Loc, "immediate must be an integer in range [0, 127].");
+ case Match_InvalidImm0_65535:
+ return Error(Loc, "immediate must be an integer in range [0, 65535].");
+ case Match_InvalidImm1_8:
+ return Error(Loc, "immediate must be an integer in range [1, 8].");
+ case Match_InvalidImm1_16:
+ return Error(Loc, "immediate must be an integer in range [1, 16].");
+ case Match_InvalidImm1_32:
+ return Error(Loc, "immediate must be an integer in range [1, 32].");
+ case Match_InvalidImm1_64:
+ return Error(Loc, "immediate must be an integer in range [1, 64].");
+ case Match_InvalidIndex1:
+ return Error(Loc, "expected lane specifier '[1]'");
+ case Match_InvalidIndexB:
+ return Error(Loc, "vector lane must be an integer in range [0, 15].");
+ case Match_InvalidIndexH:
+ return Error(Loc, "vector lane must be an integer in range [0, 7].");
+ case Match_InvalidIndexS:
+ return Error(Loc, "vector lane must be an integer in range [0, 3].");
+ case Match_InvalidIndexD:
+ return Error(Loc, "vector lane must be an integer in range [0, 1].");
+ case Match_InvalidLabel:
+ return Error(Loc, "expected label or encodable integer pc offset");
+ case Match_MRS:
+ return Error(Loc, "expected readable system register");
+ case Match_MSR:
+ return Error(Loc, "expected writable system register or pstate");
+ case Match_MnemonicFail:
+ return Error(Loc, "unrecognized instruction mnemonic");
+ default:
+ llvm_unreachable("unexpected error code!");
+ }
+}
+
+static const char *getSubtargetFeatureName(unsigned Val);
+
+bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
+ assert(!Operands.empty() && "Unexpect empty operand list!");
+ AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
+ assert(Op.isToken() && "Leading operand should always be a mnemonic!");
+
+ StringRef Tok = Op.getToken();
+ unsigned NumOperands = Operands.size();
+
+ if (NumOperands == 4 && Tok == "lsl") {
+ AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
+ AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
+ if (Op2.isReg() && Op3.isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
+ if (Op3CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t NewOp3Val = 0;
+ uint64_t NewOp4Val = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
+ Op2.getReg())) {
+ NewOp3Val = (32 - Op3Val) & 0x1f;
+ NewOp4Val = 31 - Op3Val;
+ } else {
+ NewOp3Val = (64 - Op3Val) & 0x3f;
+ NewOp4Val = 63 - Op3Val;
+ }
+
+ const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
+ const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
+
+ Operands[0] = AArch64Operand::CreateToken(
+ "ubfm", false, Op.getStartLoc(), getContext());
+ Operands.push_back(AArch64Operand::CreateImm(
+ NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
+ Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
+ Op3.getEndLoc(), getContext());
+ }
+ }
+ } else if (NumOperands == 5) {
+ // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
+ // UBFIZ -> UBFM aliases.
+ if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
+ AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
+ AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
+ AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
+
+ if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
+ const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
+
+ if (Op3CE && Op4CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t Op4Val = Op4CE->getValue();
+
+ uint64_t RegWidth = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op1.getReg()))
+ RegWidth = 64;
+ else
+ RegWidth = 32;
+
+ if (Op3Val >= RegWidth)
+ return Error(Op3.getStartLoc(),
+ "expected integer in range [0, 31]");
+ if (Op4Val < 1 || Op4Val > RegWidth)
+ return Error(Op4.getStartLoc(),
+ "expected integer in range [1, 32]");
+
+ uint64_t NewOp3Val = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
+ Op1.getReg()))
+ NewOp3Val = (32 - Op3Val) & 0x1f;
+ else
+ NewOp3Val = (64 - Op3Val) & 0x3f;
+
+ uint64_t NewOp4Val = Op4Val - 1;
+
+ if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
+ return Error(Op4.getStartLoc(),
+ "requested insert overflows register");
+
+ const MCExpr *NewOp3 =
+ MCConstantExpr::Create(NewOp3Val, getContext());
+ const MCExpr *NewOp4 =
+ MCConstantExpr::Create(NewOp4Val, getContext());
+ Operands[3] = AArch64Operand::CreateImm(
+ NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
+ Operands[4] = AArch64Operand::CreateImm(
+ NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
+ if (Tok == "bfi")
+ Operands[0] = AArch64Operand::CreateToken(
+ "bfm", false, Op.getStartLoc(), getContext());
+ else if (Tok == "sbfiz")
+ Operands[0] = AArch64Operand::CreateToken(
+ "sbfm", false, Op.getStartLoc(), getContext());
+ else if (Tok == "ubfiz")
+ Operands[0] = AArch64Operand::CreateToken(
+ "ubfm", false, Op.getStartLoc(), getContext());
+ else
+ llvm_unreachable("No valid mnemonic for alias?");
+ }
+ }
+
+ // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
+ // UBFX -> UBFM aliases.
+ } else if (NumOperands == 5 &&
+ (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
+ AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
+ AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
+ AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
+
+ if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
+ const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
+
+ if (Op3CE && Op4CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t Op4Val = Op4CE->getValue();
+
+ uint64_t RegWidth = 0;
+ if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op1.getReg()))
+ RegWidth = 64;
+ else
+ RegWidth = 32;
+
+ if (Op3Val >= RegWidth)
+ return Error(Op3.getStartLoc(),
+ "expected integer in range [0, 31]");
+ if (Op4Val < 1 || Op4Val > RegWidth)
+ return Error(Op4.getStartLoc(),
+ "expected integer in range [1, 32]");
+
+ uint64_t NewOp4Val = Op3Val + Op4Val - 1;
+
+ if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
+ return Error(Op4.getStartLoc(),
+ "requested extract overflows register");
+
+ const MCExpr *NewOp4 =
+ MCConstantExpr::Create(NewOp4Val, getContext());
+ Operands[4] = AArch64Operand::CreateImm(
+ NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
+ if (Tok == "bfxil")
+ Operands[0] = AArch64Operand::CreateToken(
+ "bfm", false, Op.getStartLoc(), getContext());
+ else if (Tok == "sbfx")
+ Operands[0] = AArch64Operand::CreateToken(
+ "sbfm", false, Op.getStartLoc(), getContext());
+ else if (Tok == "ubfx")
+ Operands[0] = AArch64Operand::CreateToken(
+ "ubfm", false, Op.getStartLoc(), getContext());
+ else
+ llvm_unreachable("No valid mnemonic for alias?");
+ }
+ }
+ }
+ }
+ // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
+ // InstAlias can't quite handle this since the reg classes aren't
+ // subclasses.
+ if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
+ // The source register can be Wn here, but the matcher expects a
+ // GPR64. Twiddle it here if necessary.
+ AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
+ if (Op.isReg()) {
+ unsigned Reg = getXRegFromWReg(Op.getReg());
+ Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
+ Op.getEndLoc(), getContext());
+ }
+ }
+ // FIXME: Likewise for sxt[bh] with a Xd dst operand
+ else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
+ AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
+ if (Op.isReg() &&
+ AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op.getReg())) {
+ // The source register can be Wn here, but the matcher expects a
+ // GPR64. Twiddle it here if necessary.
+ AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
+ if (Op.isReg()) {
+ unsigned Reg = getXRegFromWReg(Op.getReg());
+ Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
+ Op.getEndLoc(), getContext());
+ }
+ }
+ }
+ // FIXME: Likewise for uxt[bh] with a Xd dst operand
+ else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
+ AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
+ if (Op.isReg() &&
+ AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
+ Op.getReg())) {
+ // The source register can be Wn here, but the matcher expects a
+ // GPR32. Twiddle it here if necessary.
+ AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
+ if (Op.isReg()) {
+ unsigned Reg = getWRegFromXReg(Op.getReg());
+ Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
+ Op.getEndLoc(), getContext());
+ }
+ }
+ }
+
+ // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
+ if (NumOperands == 3 && Tok == "fmov") {
+ AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
+ AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
+ if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
+ unsigned zreg =
+ AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
+ RegOp.getReg())
+ ? AArch64::WZR
+ : AArch64::XZR;
+ Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
+ Op.getEndLoc(), getContext());
+ }
+ }
+
+ MCInst Inst;
+ // First try to match against the secondary set of tables containing the
+ // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
+ unsigned MatchResult =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
+
+ // If that fails, try against the alternate table containing long-form NEON:
+ // "fadd v0.2s, v1.2s, v2.2s"
+ if (MatchResult != Match_Success)
+ MatchResult =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
+
+ switch (MatchResult) {
+ case Match_Success: {
+ // Perform range checking and other semantic validations
+ SmallVector<SMLoc, 8> OperandLocs;
+ NumOperands = Operands.size();
+ for (unsigned i = 1; i < NumOperands; ++i)
+ OperandLocs.push_back(Operands[i]->getStartLoc());
+ if (validateInstruction(Inst, OperandLocs))
+ return true;
+
+ Inst.setLoc(IDLoc);
+ Out.EmitInstruction(Inst, STI);
+ return false;
+ }
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing (neon, e.g.).
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
+ case Match_MnemonicFail:
+ return showMatchError(IDLoc, MatchResult);
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ }
+ // If the match failed on a suffix token operand, tweak the diagnostic
+ // accordingly.
+ if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
+ ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
+ MatchResult = Match_InvalidSuffix;
+
+ return showMatchError(ErrorLoc, MatchResult);
+ }
+ case Match_InvalidMemoryIndexed1:
+ case Match_InvalidMemoryIndexed2:
+ case Match_InvalidMemoryIndexed4:
+ case Match_InvalidMemoryIndexed8:
+ case Match_InvalidMemoryIndexed16:
+ case Match_InvalidCondCode:
+ case Match_AddSubRegExtendSmall:
+ case Match_AddSubRegExtendLarge:
+ case Match_AddSubSecondSource:
+ case Match_LogicalSecondSource:
+ case Match_AddSubRegShift32:
+ case Match_AddSubRegShift64:
+ case Match_InvalidMovImm32Shift:
+ case Match_InvalidMovImm64Shift:
+ case Match_InvalidFPImm:
+ case Match_InvalidMemoryWExtend8:
+ case Match_InvalidMemoryWExtend16:
+ case Match_InvalidMemoryWExtend32:
+ case Match_InvalidMemoryWExtend64:
+ case Match_InvalidMemoryWExtend128:
+ case Match_InvalidMemoryXExtend8:
+ case Match_InvalidMemoryXExtend16:
+ case Match_InvalidMemoryXExtend32:
+ case Match_InvalidMemoryXExtend64:
+ case Match_InvalidMemoryXExtend128:
+ case Match_InvalidMemoryIndexed4SImm7:
+ case Match_InvalidMemoryIndexed8SImm7:
+ case Match_InvalidMemoryIndexed16SImm7:
+ case Match_InvalidMemoryIndexedSImm9:
+ case Match_InvalidImm0_7:
+ case Match_InvalidImm0_15:
+ case Match_InvalidImm0_31:
+ case Match_InvalidImm0_63:
+ case Match_InvalidImm0_127:
+ case Match_InvalidImm0_65535:
+ case Match_InvalidImm1_8:
+ case Match_InvalidImm1_16:
+ case Match_InvalidImm1_32:
+ case Match_InvalidImm1_64:
+ case Match_InvalidIndex1:
+ case Match_InvalidIndexB:
+ case Match_InvalidIndexH:
+ case Match_InvalidIndexS:
+ case Match_InvalidIndexD:
+ case Match_InvalidLabel:
+ case Match_MSR:
+ case Match_MRS: {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+ // Any time we get here, there's nothing fancy to do. Just get the
+ // operand SMLoc and display the diagnostic.
+ SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ return showMatchError(ErrorLoc, MatchResult);
+ }
+ }
+
+ llvm_unreachable("Implement any new match types added!");
+ return true;
+}
+
+/// ParseDirective parses the arm specific directives
bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
+ SMLoc Loc = DirectiveID.getLoc();
if (IDVal == ".hword")
- return ParseDirectiveWord(2, DirectiveID.getLoc());
- else if (IDVal == ".word")
- return ParseDirectiveWord(4, DirectiveID.getLoc());
- else if (IDVal == ".xword")
- return ParseDirectiveWord(8, DirectiveID.getLoc());
- else if (IDVal == ".tlsdesccall")
- return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
-
- return true;
+ return parseDirectiveWord(2, Loc);
+ if (IDVal == ".word")
+ return parseDirectiveWord(4, Loc);
+ if (IDVal == ".xword")
+ return parseDirectiveWord(8, Loc);
+ if (IDVal == ".tlsdesccall")
+ return parseDirectiveTLSDescCall(Loc);
+ if (IDVal == ".ltorg" || IDVal == ".pool")
+ return parseDirectiveLtorg(Loc);
+ if (IDVal == ".unreq")
+ return parseDirectiveUnreq(DirectiveID.getLoc());
+
+ return parseDirectiveLOH(IDVal, Loc);
}
/// parseDirectiveWord
/// ::= .word [ expression (, expression)* ]
-bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
+bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
@@ -2311,286 +3956,258 @@ bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
// parseDirectiveTLSDescCall:
// ::= .tlsdesccall symbol
-bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
+bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
StringRef Name;
if (getParser().parseIdentifier(Name))
return Error(L, "expected symbol after directive");
MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
+ Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
MCInst Inst;
Inst.setOpcode(AArch64::TLSDESCCALL);
Inst.addOperand(MCOperand::CreateExpr(Expr));
- getParser().getStreamer().EmitInstruction(Inst);
+ getParser().getStreamer().EmitInstruction(Inst, STI);
return false;
}
+/// ::= .loh <lohName | lohId> label1, ..., labelN
+/// The number of arguments depends on the loh identifier.
+bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
+ if (IDVal != MCLOHDirectiveName())
+ return true;
+ MCLOHType Kind;
+ if (getParser().getTok().isNot(AsmToken::Identifier)) {
+ if (getParser().getTok().isNot(AsmToken::Integer))
+ return TokError("expected an identifier or a number in directive");
+ // We successfully get a numeric value for the identifier.
+ // Check if it is valid.
+ int64_t Id = getParser().getTok().getIntVal();
+ Kind = (MCLOHType)Id;
+ // Check that Id does not overflow MCLOHType.
+ if (!isValidMCLOHType(Kind) || Id != Kind)
+ return TokError("invalid numeric identifier in directive");
+ } else {
+ StringRef Name = getTok().getIdentifier();
+ // We successfully parse an identifier.
+ // Check if it is a recognized one.
+ int Id = MCLOHNameToId(Name);
+
+ if (Id == -1)
+ return TokError("invalid identifier in directive");
+ Kind = (MCLOHType)Id;
+ }
+ // Consume the identifier.
+ Lex();
+ // Get the number of arguments of this LOH.
+ int NbArgs = MCLOHIdToNbArgs(Kind);
+
+ assert(NbArgs != -1 && "Invalid number of arguments");
+
+ SmallVector<MCSymbol *, 3> Args;
+ for (int Idx = 0; Idx < NbArgs; ++Idx) {
+ StringRef Name;
+ if (getParser().parseIdentifier(Name))
+ return TokError("expected identifier in directive");
+ Args.push_back(getContext().GetOrCreateSymbol(Name));
+
+ if (Idx + 1 == NbArgs)
+ break;
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
+ Lex();
+ }
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
-bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
- MCInst Inst;
- unsigned MatchResult;
- MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
- MatchingInlineAsm);
-
- if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
- return Error(IDLoc, "too few operands for instruction");
+ getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
+ return false;
+}
- switch (MatchResult) {
- default: break;
- case Match_Success:
- if (validateInstruction(Inst, Operands))
- return true;
+/// parseDirectiveLtorg
+/// ::= .ltorg | .pool
+bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
+ getTargetStreamer().emitCurrentConstantPool();
+ return false;
+}
- Out.EmitInstruction(Inst);
- return false;
- case Match_MissingFeature:
- Error(IDLoc, "instruction requires a CPU feature not currently enabled");
- return true;
- case Match_InvalidOperand: {
- SMLoc ErrorLoc = IDLoc;
- if (ErrorInfo != ~0U) {
- ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
- if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+/// parseDirectiveReq
+/// ::= name .req registername
+bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
+ Parser.Lex(); // Eat the '.req' token.
+ SMLoc SRegLoc = getLoc();
+ unsigned RegNum = tryParseRegister();
+ bool IsVector = false;
+
+ if (RegNum == static_cast<unsigned>(-1)) {
+ StringRef Kind;
+ RegNum = tryMatchVectorRegister(Kind, false);
+ if (!Kind.empty()) {
+ Error(SRegLoc, "vector register without type specifier expected");
+ return false;
}
+ IsVector = true;
+ }
- return Error(ErrorLoc, "invalid operand for instruction");
+ if (RegNum == static_cast<unsigned>(-1)) {
+ Parser.eatToEndOfStatement();
+ Error(SRegLoc, "register name or alias expected");
+ return false;
}
- case Match_MnemonicFail:
- return Error(IDLoc, "invalid instruction");
- case Match_AddSubRegExtendSmall:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
- case Match_AddSubRegExtendLarge:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
- case Match_AddSubRegShift32:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
- case Match_AddSubRegShift64:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
- case Match_AddSubSecondSource:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected compatible register, symbol or integer in range [0, 4095]");
- case Match_CVTFixedPos32:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 32]");
- case Match_CVTFixedPos64:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 64]");
- case Match_CondCode:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected AArch64 condition code");
- case Match_FPImm:
- // Any situation which allows a nontrivial floating-point constant also
- // allows a register.
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected compatible register or floating-point constant");
- case Match_FPZero:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected floating-point constant #0.0 or invalid register type");
- case Match_Label:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected label or encodable integer pc offset");
- case Match_Lane1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected lane specifier '[1]'");
- case Match_LoadStoreExtend32_1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0");
- case Match_LoadStoreExtend32_2:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
- case Match_LoadStoreExtend32_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
- case Match_LoadStoreExtend32_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
- case Match_LoadStoreExtend32_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
- case Match_LoadStoreExtend64_1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0");
- case Match_LoadStoreExtend64_2:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
- case Match_LoadStoreExtend64_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
- case Match_LoadStoreExtend64_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
- case Match_LoadStoreExtend64_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
- case Match_LoadStoreSImm7_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer multiple of 4 in range [-256, 252]");
- case Match_LoadStoreSImm7_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer multiple of 8 in range [-512, 508]");
- case Match_LoadStoreSImm7_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer multiple of 16 in range [-1024, 1016]");
- case Match_LoadStoreSImm9:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [-256, 255]");
- case Match_LoadStoreUImm12_1:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 4095]");
- case Match_LoadStoreUImm12_2:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 8190]");
- case Match_LoadStoreUImm12_4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 16380]");
- case Match_LoadStoreUImm12_8:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 32760]");
- case Match_LoadStoreUImm12_16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic reference or integer in range [0, 65520]");
- case Match_LogicalSecondSource:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected compatible register or logical immediate");
- case Match_MOVWUImm16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected relocated symbol or integer in range [0, 65535]");
- case Match_MRS:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected readable system register");
- case Match_MSR:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected writable system register or pstate");
- case Match_NamedImm_at:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
- case Match_NamedImm_dbarrier:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15] or symbolic barrier operand");
- case Match_NamedImm_dc:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected symbolic 'dc' operand");
- case Match_NamedImm_ic:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
- case Match_NamedImm_isb:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15] or 'sy'");
- case Match_NamedImm_prefetch:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
- case Match_NamedImm_tlbi:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected translation buffer invalidation operand");
- case Match_UImm16:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 65535]");
- case Match_UImm3:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 7]");
- case Match_UImm4:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15]");
- case Match_UImm5:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 31]");
- case Match_UImm6:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 63]");
- case Match_UImm7:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 127]");
- case Match_Width32:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [<lsb>, 31]");
- case Match_Width64:
- return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [<lsb>, 63]");
- case Match_ShrImm8:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 8]");
- case Match_ShrImm16:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 16]");
- case Match_ShrImm32:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 32]");
- case Match_ShrImm64:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [1, 64]");
- case Match_ShlImm8:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 7]");
- case Match_ShlImm16:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 15]");
- case Match_ShlImm32:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 31]");
- case Match_ShlImm64:
- return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
- "expected integer in range [0, 63]");
+ // Shouldn't be anything else.
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
+ Parser.eatToEndOfStatement();
+ return false;
}
- llvm_unreachable("Implement any new match types added!");
+ Parser.Lex(); // Consume the EndOfStatement
+
+ auto pair = std::make_pair(IsVector, RegNum);
+ if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
+ Warning(L, "ignoring redefinition of register alias '" + Name + "'");
+
return true;
}
-void AArch64Operand::print(raw_ostream &OS) const {
- switch (Kind) {
- case k_CondCode:
- OS << "<CondCode: " << CondCode.Code << ">";
- break;
- case k_FPImmediate:
- OS << "<fpimm: " << FPImm.Val << ">";
- break;
- case k_ImmWithLSL:
- OS << "<immwithlsl: imm=" << ImmWithLSL.Val
- << ", shift=" << ImmWithLSL.ShiftAmount << ">";
- break;
- case k_Immediate:
- getImm()->print(OS);
- break;
- case k_Register:
- OS << "<register " << getReg() << '>';
- break;
- case k_Token:
- OS << '\'' << getToken() << '\'';
- break;
- case k_ShiftExtend:
- OS << "<shift: type=" << ShiftExtend.ShiftType
- << ", amount=" << ShiftExtend.Amount << ">";
- break;
- case k_SysReg: {
- StringRef Name(SysReg.Data, SysReg.Length);
- OS << "<sysreg: " << Name << '>';
- break;
- }
- default:
- llvm_unreachable("No idea how to print this kind of operand");
- break;
+/// parseDirectiveUneq
+/// ::= .unreq registername
+bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
+ Parser.eatToEndOfStatement();
+ return false;
}
+ RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
+ Parser.Lex(); // Eat the identifier.
+ return false;
}
-void AArch64Operand::dump() const {
- print(errs());
-}
+bool
+AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
+ AArch64MCExpr::VariantKind &ELFRefKind,
+ MCSymbolRefExpr::VariantKind &DarwinRefKind,
+ int64_t &Addend) {
+ ELFRefKind = AArch64MCExpr::VK_INVALID;
+ DarwinRefKind = MCSymbolRefExpr::VK_None;
+ Addend = 0;
+
+ if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
+ ELFRefKind = AE->getKind();
+ Expr = AE->getSubExpr();
+ }
+
+ const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
+ if (SE) {
+ // It's a simple symbol reference with no addend.
+ DarwinRefKind = SE->getKind();
+ return true;
+ }
+
+ const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
+ if (!BE)
+ return false;
+
+ SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
+ if (!SE)
+ return false;
+ DarwinRefKind = SE->getKind();
+ if (BE->getOpcode() != MCBinaryExpr::Add &&
+ BE->getOpcode() != MCBinaryExpr::Sub)
+ return false;
+
+ // See if the addend is is a constant, otherwise there's more going
+ // on here than we can deal with.
+ auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
+ if (!AddendExpr)
+ return false;
+
+ Addend = AddendExpr->getValue();
+ if (BE->getOpcode() == MCBinaryExpr::Sub)
+ Addend = -Addend;
+
+ // It's some symbol reference + a constant addend, but really
+ // shouldn't use both Darwin and ELF syntax.
+ return ELFRefKind == AArch64MCExpr::VK_INVALID ||
+ DarwinRefKind == MCSymbolRefExpr::VK_None;
+}
/// Force static initialization.
extern "C" void LLVMInitializeAArch64AsmParser() {
- RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
+ RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
+ RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
+
+ RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
+ RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
}
#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
#define GET_MATCHER_IMPLEMENTATION
#include "AArch64GenAsmMatcher.inc"
+
+// Define this matcher function after the auto-generated include so we
+// have the match class enum definitions.
+unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
+ unsigned Kind) {
+ AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
+ // If the kind is a token for a literal immediate, check if our asm
+ // operand matches. This is for InstAliases which have a fixed-value
+ // immediate in the syntax.
+ int64_t ExpectedVal;
+ switch (Kind) {
+ default:
+ return Match_InvalidOperand;
+ case MCK__35_0:
+ ExpectedVal = 0;
+ break;
+ case MCK__35_1:
+ ExpectedVal = 1;
+ break;
+ case MCK__35_12:
+ ExpectedVal = 12;
+ break;
+ case MCK__35_16:
+ ExpectedVal = 16;
+ break;
+ case MCK__35_2:
+ ExpectedVal = 2;
+ break;
+ case MCK__35_24:
+ ExpectedVal = 24;
+ break;
+ case MCK__35_3:
+ ExpectedVal = 3;
+ break;
+ case MCK__35_32:
+ ExpectedVal = 32;
+ break;
+ case MCK__35_4:
+ ExpectedVal = 4;
+ break;
+ case MCK__35_48:
+ ExpectedVal = 48;
+ break;
+ case MCK__35_6:
+ ExpectedVal = 6;
+ break;
+ case MCK__35_64:
+ ExpectedVal = 64;
+ break;
+ case MCK__35_8:
+ ExpectedVal = 8;
+ break;
+ }
+ if (!Op.isImm())
+ return Match_InvalidOperand;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
+ if (!CE)
+ return Match_InvalidOperand;
+ if (CE->getValue() == ExpectedVal)
+ return Match_Success;
+ return Match_InvalidOperand;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index be4d7f2..6de27d6 100644
--- a/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -1,4 +1,4 @@
-//===- AArch64Disassembler.cpp - Disassembler for AArch64 ISA -------------===//
+//===- AArch64Disassembler.cpp - Disassembler for AArch64 -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,245 +7,169 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the functions necessary to decode AArch64 instruction
-// bitpatterns into MCInsts (with the help of TableGenerated information from
-// the instruction definitions).
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-disassembler"
-
-#include "AArch64.h"
-#include "AArch64RegisterInfo.h"
+#include "AArch64Disassembler.h"
+#include "AArch64ExternalSymbolizer.h"
#include "AArch64Subtarget.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrDesc.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MemoryObject.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-typedef MCDisassembler::DecodeStatus DecodeStatus;
-
-namespace {
-/// AArch64 disassembler for all AArch64 platforms.
-class AArch64Disassembler : public MCDisassembler {
- OwningPtr<const MCRegisterInfo> RegInfo;
-public:
- /// Initializes the disassembler.
- ///
- AArch64Disassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info)
- : MCDisassembler(STI), RegInfo(Info) {
- }
-
- ~AArch64Disassembler() {}
-
- /// See MCDisassembler.
- DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
-
- const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
-};
+#define DEBUG_TYPE "aarch64-disassembler"
-}
-
-// Forward-declarations used in the auto-generated files.
-static DecodeStatus DecodeGPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-static DecodeStatus
-DecodeGPR64xspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-
-static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-static DecodeStatus
-DecodeGPR32wspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
+// Pull DecodeStatus and its enum values into the global namespace.
+typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
-static DecodeStatus DecodeFPR8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR16RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR64LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder);
+// Forward declare these because the autogenerated code will reference them.
+// Definitions are further down.
static DecodeStatus DecodeFPR128RegisterClass(llvm::MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeFPR128LoRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo, uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeFPR128_loRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo, uint64_t Address,
- const void *Decoder);
-static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo, uint64_t Address,
- const void *Decoder);
-static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeFPR16RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeFPR8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeGPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-
-static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
- unsigned OptionHiS,
- uint64_t Address,
- const void *Decoder);
-
-
-static DecodeStatus DecodeBitfield32ImmOperand(llvm::MCInst &Inst,
- unsigned Imm6Bits,
- uint64_t Address,
+static DecodeStatus DecodeGPR64spRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
const void *Decoder);
-
-static DecodeStatus DecodeCVT32FixedPosOperand(llvm::MCInst &Inst,
- unsigned Imm6Bits,
- uint64_t Address,
+static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeGPR32spRegisterClass(llvm::MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
const void *Decoder);
-
-static DecodeStatus DecodeFPZeroOperand(llvm::MCInst &Inst,
- unsigned RmBits,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeShiftRightImm8(MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftRightImm16(MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeQQRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeShiftRightImm32(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder);
-static DecodeStatus DecodeShiftRightImm64(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeShiftLeftImm8(MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftLeftImm16(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder);
-static DecodeStatus DecodeShiftLeftImm32(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder);
-static DecodeStatus DecodeShiftLeftImm64(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-template<int RegWidth>
-static DecodeStatus DecodeMoveWideImmOperand(llvm::MCInst &Inst,
- unsigned FullImm,
- uint64_t Address,
- const void *Decoder);
-
-template<int RegWidth>
-static DecodeStatus DecodeLogicalImmOperand(llvm::MCInst &Inst,
- unsigned Bits,
+static DecodeStatus DecodeQQQRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeQQQQRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-
-static DecodeStatus DecodeRegExtendOperand(llvm::MCInst &Inst,
- unsigned ShiftAmount,
+static DecodeStatus DecodeDDRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeDDDRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
-static DecodeStatus
-DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
- uint64_t Address, const void *Decoder);
-
-static DecodeStatus Decode32BitShiftOperand(llvm::MCInst &Inst,
- unsigned ShiftAmount,
+static DecodeStatus DecodeDDDDRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeBitfieldInstruction(llvm::MCInst &Inst, unsigned Insn,
+
+static DecodeStatus DecodeFixedPointScaleImm32(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeFixedPointScaleImm64(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodePCRelLabel19(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeMemExtend(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeMRSSystemRegister(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeMSRSystemRegister(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeThreeAddrSRegInstruction(llvm::MCInst &Inst,
+ uint32_t insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeMoveImmInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeUnsignedLdStInstruction(llvm::MCInst &Inst,
+ uint32_t insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeSignedLdStInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
+ uint32_t insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
-
-static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeAddSubERegInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLogicalImmInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeModImmInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeModImmTiedInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeAdrInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeBaseAddSubImm(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeUnconditionalBranch(llvm::MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeSystemPStateInstruction(llvm::MCInst &Inst,
+ uint32_t insn,
+ uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeTestAndBranch(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeLDSTPairInstruction(llvm::MCInst &Inst,
- unsigned Insn,
+static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
-
-static DecodeStatus DecodeLoadPairExclusiveInstruction(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-template<typename SomeNamedImmMapper>
-static DecodeStatus DecodeNamedImmOperand(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus
-DecodeSysRegOperand(const A64SysReg::SysRegMapper &InstMapper,
- llvm::MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder);
-
-static DecodeStatus DecodeMRSOperand(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeMSROperand(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-
-static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Val,
- uint64_t Address,
+static DecodeStatus DecodeVecShiftR64Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftR64ImmNarrow(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
const void *Decoder);
-
-static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeSHLLInstruction(MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const void *Decoder);
-
-static bool Check(DecodeStatus &Out, DecodeStatus In);
-
-#include "AArch64GenDisassemblerTables.inc"
-#include "AArch64GenInstrInfo.inc"
+static DecodeStatus DecodeVecShiftR32Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftR32ImmNarrow(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder);
+static DecodeStatus DecodeVecShiftR16Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftR16ImmNarrow(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder);
+static DecodeStatus DecodeVecShiftR8Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftL64Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftL32Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftL16Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
+static DecodeStatus DecodeVecShiftL8Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder);
static bool Check(DecodeStatus &Out, DecodeStatus In) {
switch (In) {
@@ -262,486 +186,479 @@ static bool Check(DecodeStatus &Out, DecodeStatus In) {
llvm_unreachable("Invalid DecodeStatus!");
}
+#include "AArch64GenDisassemblerTables.inc"
+#include "AArch64GenInstrInfo.inc"
+
+#define Success llvm::MCDisassembler::Success
+#define Fail llvm::MCDisassembler::Fail
+#define SoftFail llvm::MCDisassembler::SoftFail
+
+static MCDisassembler *createAArch64Disassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new AArch64Disassembler(STI, Ctx);
+}
+
DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
- const MemoryObject &Region,
- uint64_t Address,
- raw_ostream &os,
- raw_ostream &cs) const {
+ const MemoryObject &Region,
+ uint64_t Address,
+ raw_ostream &os,
+ raw_ostream &cs) const {
CommentStream = &cs;
uint8_t bytes[4];
+ Size = 0;
// We want to read exactly 4 bytes of data.
- if (Region.readBytes(Address, 4, bytes) == -1) {
- Size = 0;
- return MCDisassembler::Fail;
- }
+ if (Region.readBytes(Address, 4, (uint8_t *)bytes) == -1)
+ return Fail;
+ Size = 4;
// Encoded as a small-endian 32-bit word in the stream.
- uint32_t insn = (bytes[3] << 24) |
- (bytes[2] << 16) |
- (bytes[1] << 8) |
- (bytes[0] << 0);
+ uint32_t insn =
+ (bytes[3] << 24) | (bytes[2] << 16) | (bytes[1] << 8) | (bytes[0] << 0);
// Calling the auto-generated decoder function.
- DecodeStatus result = decodeInstruction(DecoderTableA6432, MI, insn, Address,
- this, STI);
- if (result != MCDisassembler::Fail) {
- Size = 4;
- return result;
- }
-
- MI.clear();
- Size = 0;
- return MCDisassembler::Fail;
+ return decodeInstruction(DecoderTable32, MI, insn, Address, this, STI);
}
-static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
- const AArch64Disassembler *Dis = static_cast<const AArch64Disassembler*>(D);
- return Dis->getRegInfo()->getRegClass(RC).getRegister(RegNo);
+static MCSymbolizer *
+createAArch64ExternalSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp,
+ void *DisInfo, MCContext *Ctx,
+ MCRelocationInfo *RelInfo) {
+ return new llvm::AArch64ExternalSymbolizer(
+ *Ctx,
+ std::unique_ptr<MCRelocationInfo>(RelInfo),
+ GetOpInfo, SymbolLookUp, DisInfo);
}
-static DecodeStatus DecodeGPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
- if (RegNo > 31)
- return MCDisassembler::Fail;
+extern "C" void LLVMInitializeAArch64Disassembler() {
+ TargetRegistry::RegisterMCDisassembler(TheAArch64leTarget,
+ createAArch64Disassembler);
+ TargetRegistry::RegisterMCDisassembler(TheAArch64beTarget,
+ createAArch64Disassembler);
+ TargetRegistry::RegisterMCSymbolizer(TheAArch64leTarget,
+ createAArch64ExternalSymbolizer);
+ TargetRegistry::RegisterMCSymbolizer(TheAArch64beTarget,
+ createAArch64ExternalSymbolizer);
- uint16_t Register = getReg(Decoder, AArch64::GPR64RegClassID, RegNo);
- Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ TargetRegistry::RegisterMCDisassembler(TheARM64leTarget,
+ createAArch64Disassembler);
+ TargetRegistry::RegisterMCDisassembler(TheARM64beTarget,
+ createAArch64Disassembler);
+ TargetRegistry::RegisterMCSymbolizer(TheARM64leTarget,
+ createAArch64ExternalSymbolizer);
+ TargetRegistry::RegisterMCSymbolizer(TheARM64beTarget,
+ createAArch64ExternalSymbolizer);
}
-static DecodeStatus
-DecodeGPR64xspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
- if (RegNo > 31)
- return MCDisassembler::Fail;
-
- uint16_t Register = getReg(Decoder, AArch64::GPR64xspRegClassID, RegNo);
- Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
-}
+static const unsigned FPR128DecoderTable[] = {
+ AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
+ AArch64::Q5, AArch64::Q6, AArch64::Q7, AArch64::Q8, AArch64::Q9,
+ AArch64::Q10, AArch64::Q11, AArch64::Q12, AArch64::Q13, AArch64::Q14,
+ AArch64::Q15, AArch64::Q16, AArch64::Q17, AArch64::Q18, AArch64::Q19,
+ AArch64::Q20, AArch64::Q21, AArch64::Q22, AArch64::Q23, AArch64::Q24,
+ AArch64::Q25, AArch64::Q26, AArch64::Q27, AArch64::Q28, AArch64::Q29,
+ AArch64::Q30, AArch64::Q31
+};
-static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeFPR128RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
if (RegNo > 31)
- return MCDisassembler::Fail;
+ return Fail;
- uint16_t Register = getReg(Decoder, AArch64::GPR32RegClassID, RegNo);
+ unsigned Register = FPR128DecoderTable[RegNo];
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
-static DecodeStatus
-DecodeGPR32wspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
- if (RegNo > 31)
- return MCDisassembler::Fail;
-
- uint16_t Register = getReg(Decoder, AArch64::GPR32wspRegClassID, RegNo);
- Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+static DecodeStatus DecodeFPR128_loRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
+ if (RegNo > 15)
+ return Fail;
+ return DecodeFPR128RegisterClass(Inst, RegNo, Addr, Decoder);
}
-static DecodeStatus
-DecodeFPR8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
- if (RegNo > 31)
- return MCDisassembler::Fail;
-
- uint16_t Register = getReg(Decoder, AArch64::FPR8RegClassID, RegNo);
- Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
-}
+static const unsigned FPR64DecoderTable[] = {
+ AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4,
+ AArch64::D5, AArch64::D6, AArch64::D7, AArch64::D8, AArch64::D9,
+ AArch64::D10, AArch64::D11, AArch64::D12, AArch64::D13, AArch64::D14,
+ AArch64::D15, AArch64::D16, AArch64::D17, AArch64::D18, AArch64::D19,
+ AArch64::D20, AArch64::D21, AArch64::D22, AArch64::D23, AArch64::D24,
+ AArch64::D25, AArch64::D26, AArch64::D27, AArch64::D28, AArch64::D29,
+ AArch64::D30, AArch64::D31
+};
-static DecodeStatus
-DecodeFPR16RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
+static DecodeStatus DecodeFPR64RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
if (RegNo > 31)
- return MCDisassembler::Fail;
+ return Fail;
- uint16_t Register = getReg(Decoder, AArch64::FPR16RegClassID, RegNo);
+ unsigned Register = FPR64DecoderTable[RegNo];
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
+static const unsigned FPR32DecoderTable[] = {
+ AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4,
+ AArch64::S5, AArch64::S6, AArch64::S7, AArch64::S8, AArch64::S9,
+ AArch64::S10, AArch64::S11, AArch64::S12, AArch64::S13, AArch64::S14,
+ AArch64::S15, AArch64::S16, AArch64::S17, AArch64::S18, AArch64::S19,
+ AArch64::S20, AArch64::S21, AArch64::S22, AArch64::S23, AArch64::S24,
+ AArch64::S25, AArch64::S26, AArch64::S27, AArch64::S28, AArch64::S29,
+ AArch64::S30, AArch64::S31
+};
-static DecodeStatus
-DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
+static DecodeStatus DecodeFPR32RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
if (RegNo > 31)
- return MCDisassembler::Fail;
+ return Fail;
- uint16_t Register = getReg(Decoder, AArch64::FPR32RegClassID, RegNo);
+ unsigned Register = FPR32DecoderTable[RegNo];
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
-static DecodeStatus
-DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
+static const unsigned FPR16DecoderTable[] = {
+ AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
+ AArch64::H5, AArch64::H6, AArch64::H7, AArch64::H8, AArch64::H9,
+ AArch64::H10, AArch64::H11, AArch64::H12, AArch64::H13, AArch64::H14,
+ AArch64::H15, AArch64::H16, AArch64::H17, AArch64::H18, AArch64::H19,
+ AArch64::H20, AArch64::H21, AArch64::H22, AArch64::H23, AArch64::H24,
+ AArch64::H25, AArch64::H26, AArch64::H27, AArch64::H28, AArch64::H29,
+ AArch64::H30, AArch64::H31
+};
+
+static DecodeStatus DecodeFPR16RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
if (RegNo > 31)
- return MCDisassembler::Fail;
+ return Fail;
- uint16_t Register = getReg(Decoder, AArch64::FPR64RegClassID, RegNo);
+ unsigned Register = FPR16DecoderTable[RegNo];
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
-static DecodeStatus
-DecodeFPR64LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
- if (RegNo > 15)
- return MCDisassembler::Fail;
-
- return DecodeFPR64RegisterClass(Inst, RegNo, Address, Decoder);
-}
+static const unsigned FPR8DecoderTable[] = {
+ AArch64::B0, AArch64::B1, AArch64::B2, AArch64::B3, AArch64::B4,
+ AArch64::B5, AArch64::B6, AArch64::B7, AArch64::B8, AArch64::B9,
+ AArch64::B10, AArch64::B11, AArch64::B12, AArch64::B13, AArch64::B14,
+ AArch64::B15, AArch64::B16, AArch64::B17, AArch64::B18, AArch64::B19,
+ AArch64::B20, AArch64::B21, AArch64::B22, AArch64::B23, AArch64::B24,
+ AArch64::B25, AArch64::B26, AArch64::B27, AArch64::B28, AArch64::B29,
+ AArch64::B30, AArch64::B31
+};
-static DecodeStatus
-DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
+static DecodeStatus DecodeFPR8RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
if (RegNo > 31)
- return MCDisassembler::Fail;
+ return Fail;
- uint16_t Register = getReg(Decoder, AArch64::FPR128RegClassID, RegNo);
+ unsigned Register = FPR8DecoderTable[RegNo];
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
-static DecodeStatus
-DecodeFPR128LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address, const void *Decoder) {
- if (RegNo > 15)
- return MCDisassembler::Fail;
-
- return DecodeFPR128RegisterClass(Inst, RegNo, Address, Decoder);
-}
+static const unsigned GPR64DecoderTable[] = {
+ AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
+ AArch64::X5, AArch64::X6, AArch64::X7, AArch64::X8, AArch64::X9,
+ AArch64::X10, AArch64::X11, AArch64::X12, AArch64::X13, AArch64::X14,
+ AArch64::X15, AArch64::X16, AArch64::X17, AArch64::X18, AArch64::X19,
+ AArch64::X20, AArch64::X21, AArch64::X22, AArch64::X23, AArch64::X24,
+ AArch64::X25, AArch64::X26, AArch64::X27, AArch64::X28, AArch64::FP,
+ AArch64::LR, AArch64::XZR
+};
-static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- if (RegNo > 30)
- return MCDisassembler::Fail;
+static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
- uint16_t Register = getReg(Decoder, AArch64::GPR64noxzrRegClassID, RegNo);
+ unsigned Register = GPR64DecoderTable[RegNo];
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
-static DecodeStatus DecodeRegisterClassByID(llvm::MCInst &Inst, unsigned RegNo,
- unsigned RegID,
- const void *Decoder) {
+static DecodeStatus DecodeGPR64spRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
if (RegNo > 31)
- return MCDisassembler::Fail;
-
- uint16_t Register = getReg(Decoder, RegID, RegNo);
+ return Fail;
+ unsigned Register = GPR64DecoderTable[RegNo];
+ if (Register == AArch64::XZR)
+ Register = AArch64::SP;
Inst.addOperand(MCOperand::CreateReg(Register));
- return MCDisassembler::Success;
+ return Success;
}
-static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- return DecodeRegisterClassByID(Inst, RegNo, AArch64::DPairRegClassID,
- Decoder);
-}
+static const unsigned GPR32DecoderTable[] = {
+ AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4,
+ AArch64::W5, AArch64::W6, AArch64::W7, AArch64::W8, AArch64::W9,
+ AArch64::W10, AArch64::W11, AArch64::W12, AArch64::W13, AArch64::W14,
+ AArch64::W15, AArch64::W16, AArch64::W17, AArch64::W18, AArch64::W19,
+ AArch64::W20, AArch64::W21, AArch64::W22, AArch64::W23, AArch64::W24,
+ AArch64::W25, AArch64::W26, AArch64::W27, AArch64::W28, AArch64::W29,
+ AArch64::W30, AArch64::WZR
+};
-static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address,
+static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
const void *Decoder) {
- return DecodeRegisterClassByID(Inst, RegNo, AArch64::QPairRegClassID,
- Decoder);
-}
+ if (RegNo > 31)
+ return Fail;
-static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo, uint64_t Address,
- const void *Decoder) {
- return DecodeRegisterClassByID(Inst, RegNo, AArch64::DTripleRegClassID,
- Decoder);
+ unsigned Register = GPR32DecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst,
- unsigned RegNo, uint64_t Address,
+static DecodeStatus DecodeGPR32spRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
const void *Decoder) {
- return DecodeRegisterClassByID(Inst, RegNo, AArch64::QTripleRegClassID,
- Decoder);
-}
+ if (RegNo > 31)
+ return Fail;
-static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- return DecodeRegisterClassByID(Inst, RegNo, AArch64::DQuadRegClassID,
- Decoder);
+ unsigned Register = GPR32DecoderTable[RegNo];
+ if (Register == AArch64::WZR)
+ Register = AArch64::WSP;
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- return DecodeRegisterClassByID(Inst, RegNo, AArch64::QQuadRegClassID,
- Decoder);
-}
+static const unsigned VectorDecoderTable[] = {
+ AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
+ AArch64::Q5, AArch64::Q6, AArch64::Q7, AArch64::Q8, AArch64::Q9,
+ AArch64::Q10, AArch64::Q11, AArch64::Q12, AArch64::Q13, AArch64::Q14,
+ AArch64::Q15, AArch64::Q16, AArch64::Q17, AArch64::Q18, AArch64::Q19,
+ AArch64::Q20, AArch64::Q21, AArch64::Q22, AArch64::Q23, AArch64::Q24,
+ AArch64::Q25, AArch64::Q26, AArch64::Q27, AArch64::Q28, AArch64::Q29,
+ AArch64::Q30, AArch64::Q31
+};
-static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
- unsigned OptionHiS,
- uint64_t Address,
- const void *Decoder) {
- // Option{1} must be 1. OptionHiS is made up of {Option{2}, Option{1},
- // S}. Hence we want to check bit 1.
- if (!(OptionHiS & 2))
- return MCDisassembler::Fail;
+static DecodeStatus DecodeVectorRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
- Inst.addOperand(MCOperand::CreateImm(OptionHiS));
- return MCDisassembler::Success;
+ unsigned Register = VectorDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeBitfield32ImmOperand(llvm::MCInst &Inst,
- unsigned Imm6Bits,
- uint64_t Address,
- const void *Decoder) {
- // In the 32-bit variant, bit 6 must be zero. I.e. the immediate must be
- // between 0 and 31.
- if (Imm6Bits > 31)
- return MCDisassembler::Fail;
+static const unsigned QQDecoderTable[] = {
+ AArch64::Q0_Q1, AArch64::Q1_Q2, AArch64::Q2_Q3, AArch64::Q3_Q4,
+ AArch64::Q4_Q5, AArch64::Q5_Q6, AArch64::Q6_Q7, AArch64::Q7_Q8,
+ AArch64::Q8_Q9, AArch64::Q9_Q10, AArch64::Q10_Q11, AArch64::Q11_Q12,
+ AArch64::Q12_Q13, AArch64::Q13_Q14, AArch64::Q14_Q15, AArch64::Q15_Q16,
+ AArch64::Q16_Q17, AArch64::Q17_Q18, AArch64::Q18_Q19, AArch64::Q19_Q20,
+ AArch64::Q20_Q21, AArch64::Q21_Q22, AArch64::Q22_Q23, AArch64::Q23_Q24,
+ AArch64::Q24_Q25, AArch64::Q25_Q26, AArch64::Q26_Q27, AArch64::Q27_Q28,
+ AArch64::Q28_Q29, AArch64::Q29_Q30, AArch64::Q30_Q31, AArch64::Q31_Q0
+};
- Inst.addOperand(MCOperand::CreateImm(Imm6Bits));
- return MCDisassembler::Success;
+static DecodeStatus DecodeQQRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr, const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
+ unsigned Register = QQDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeCVT32FixedPosOperand(llvm::MCInst &Inst,
- unsigned Imm6Bits,
- uint64_t Address,
- const void *Decoder) {
- // 1 <= Imm <= 32. Encoded as 64 - Imm so: 63 >= Encoded >= 32.
- if (Imm6Bits < 32)
- return MCDisassembler::Fail;
-
- Inst.addOperand(MCOperand::CreateImm(Imm6Bits));
- return MCDisassembler::Success;
-}
+static const unsigned QQQDecoderTable[] = {
+ AArch64::Q0_Q1_Q2, AArch64::Q1_Q2_Q3, AArch64::Q2_Q3_Q4,
+ AArch64::Q3_Q4_Q5, AArch64::Q4_Q5_Q6, AArch64::Q5_Q6_Q7,
+ AArch64::Q6_Q7_Q8, AArch64::Q7_Q8_Q9, AArch64::Q8_Q9_Q10,
+ AArch64::Q9_Q10_Q11, AArch64::Q10_Q11_Q12, AArch64::Q11_Q12_Q13,
+ AArch64::Q12_Q13_Q14, AArch64::Q13_Q14_Q15, AArch64::Q14_Q15_Q16,
+ AArch64::Q15_Q16_Q17, AArch64::Q16_Q17_Q18, AArch64::Q17_Q18_Q19,
+ AArch64::Q18_Q19_Q20, AArch64::Q19_Q20_Q21, AArch64::Q20_Q21_Q22,
+ AArch64::Q21_Q22_Q23, AArch64::Q22_Q23_Q24, AArch64::Q23_Q24_Q25,
+ AArch64::Q24_Q25_Q26, AArch64::Q25_Q26_Q27, AArch64::Q26_Q27_Q28,
+ AArch64::Q27_Q28_Q29, AArch64::Q28_Q29_Q30, AArch64::Q29_Q30_Q31,
+ AArch64::Q30_Q31_Q0, AArch64::Q31_Q0_Q1
+};
-static DecodeStatus DecodeFPZeroOperand(llvm::MCInst &Inst,
- unsigned RmBits,
- uint64_t Address,
- const void *Decoder) {
- // Any bits are valid in the instruction (they're architecturally ignored),
- // but a code generator should insert 0.
- Inst.addOperand(MCOperand::CreateImm(0));
- return MCDisassembler::Success;
+static DecodeStatus DecodeQQQRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr, const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
+ unsigned Register = QQQDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeShiftRightImm8(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(8 - Val));
- return MCDisassembler::Success;
-}
+static const unsigned QQQQDecoderTable[] = {
+ AArch64::Q0_Q1_Q2_Q3, AArch64::Q1_Q2_Q3_Q4, AArch64::Q2_Q3_Q4_Q5,
+ AArch64::Q3_Q4_Q5_Q6, AArch64::Q4_Q5_Q6_Q7, AArch64::Q5_Q6_Q7_Q8,
+ AArch64::Q6_Q7_Q8_Q9, AArch64::Q7_Q8_Q9_Q10, AArch64::Q8_Q9_Q10_Q11,
+ AArch64::Q9_Q10_Q11_Q12, AArch64::Q10_Q11_Q12_Q13, AArch64::Q11_Q12_Q13_Q14,
+ AArch64::Q12_Q13_Q14_Q15, AArch64::Q13_Q14_Q15_Q16, AArch64::Q14_Q15_Q16_Q17,
+ AArch64::Q15_Q16_Q17_Q18, AArch64::Q16_Q17_Q18_Q19, AArch64::Q17_Q18_Q19_Q20,
+ AArch64::Q18_Q19_Q20_Q21, AArch64::Q19_Q20_Q21_Q22, AArch64::Q20_Q21_Q22_Q23,
+ AArch64::Q21_Q22_Q23_Q24, AArch64::Q22_Q23_Q24_Q25, AArch64::Q23_Q24_Q25_Q26,
+ AArch64::Q24_Q25_Q26_Q27, AArch64::Q25_Q26_Q27_Q28, AArch64::Q26_Q27_Q28_Q29,
+ AArch64::Q27_Q28_Q29_Q30, AArch64::Q28_Q29_Q30_Q31, AArch64::Q29_Q30_Q31_Q0,
+ AArch64::Q30_Q31_Q0_Q1, AArch64::Q31_Q0_Q1_Q2
+};
-static DecodeStatus DecodeShiftRightImm16(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(16 - Val));
- return MCDisassembler::Success;
+static DecodeStatus DecodeQQQQRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
+ unsigned Register = QQQQDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeShiftRightImm32(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(32 - Val));
- return MCDisassembler::Success;
-}
+static const unsigned DDDecoderTable[] = {
+ AArch64::D0_D1, AArch64::D1_D2, AArch64::D2_D3, AArch64::D3_D4,
+ AArch64::D4_D5, AArch64::D5_D6, AArch64::D6_D7, AArch64::D7_D8,
+ AArch64::D8_D9, AArch64::D9_D10, AArch64::D10_D11, AArch64::D11_D12,
+ AArch64::D12_D13, AArch64::D13_D14, AArch64::D14_D15, AArch64::D15_D16,
+ AArch64::D16_D17, AArch64::D17_D18, AArch64::D18_D19, AArch64::D19_D20,
+ AArch64::D20_D21, AArch64::D21_D22, AArch64::D22_D23, AArch64::D23_D24,
+ AArch64::D24_D25, AArch64::D25_D26, AArch64::D26_D27, AArch64::D27_D28,
+ AArch64::D28_D29, AArch64::D29_D30, AArch64::D30_D31, AArch64::D31_D0
+};
-static DecodeStatus DecodeShiftRightImm64(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(64 - Val));
- return MCDisassembler::Success;
+static DecodeStatus DecodeDDRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr, const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
+ unsigned Register = DDDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeShiftLeftImm8(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- if (Val > 7)
- return MCDisassembler::Fail;
+static const unsigned DDDDecoderTable[] = {
+ AArch64::D0_D1_D2, AArch64::D1_D2_D3, AArch64::D2_D3_D4,
+ AArch64::D3_D4_D5, AArch64::D4_D5_D6, AArch64::D5_D6_D7,
+ AArch64::D6_D7_D8, AArch64::D7_D8_D9, AArch64::D8_D9_D10,
+ AArch64::D9_D10_D11, AArch64::D10_D11_D12, AArch64::D11_D12_D13,
+ AArch64::D12_D13_D14, AArch64::D13_D14_D15, AArch64::D14_D15_D16,
+ AArch64::D15_D16_D17, AArch64::D16_D17_D18, AArch64::D17_D18_D19,
+ AArch64::D18_D19_D20, AArch64::D19_D20_D21, AArch64::D20_D21_D22,
+ AArch64::D21_D22_D23, AArch64::D22_D23_D24, AArch64::D23_D24_D25,
+ AArch64::D24_D25_D26, AArch64::D25_D26_D27, AArch64::D26_D27_D28,
+ AArch64::D27_D28_D29, AArch64::D28_D29_D30, AArch64::D29_D30_D31,
+ AArch64::D30_D31_D0, AArch64::D31_D0_D1
+};
- Inst.addOperand(MCOperand::CreateImm(Val));
- return MCDisassembler::Success;
+static DecodeStatus DecodeDDDRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr, const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
+ unsigned Register = DDDDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeShiftLeftImm16(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- if (Val > 15)
- return MCDisassembler::Fail;
+static const unsigned DDDDDecoderTable[] = {
+ AArch64::D0_D1_D2_D3, AArch64::D1_D2_D3_D4, AArch64::D2_D3_D4_D5,
+ AArch64::D3_D4_D5_D6, AArch64::D4_D5_D6_D7, AArch64::D5_D6_D7_D8,
+ AArch64::D6_D7_D8_D9, AArch64::D7_D8_D9_D10, AArch64::D8_D9_D10_D11,
+ AArch64::D9_D10_D11_D12, AArch64::D10_D11_D12_D13, AArch64::D11_D12_D13_D14,
+ AArch64::D12_D13_D14_D15, AArch64::D13_D14_D15_D16, AArch64::D14_D15_D16_D17,
+ AArch64::D15_D16_D17_D18, AArch64::D16_D17_D18_D19, AArch64::D17_D18_D19_D20,
+ AArch64::D18_D19_D20_D21, AArch64::D19_D20_D21_D22, AArch64::D20_D21_D22_D23,
+ AArch64::D21_D22_D23_D24, AArch64::D22_D23_D24_D25, AArch64::D23_D24_D25_D26,
+ AArch64::D24_D25_D26_D27, AArch64::D25_D26_D27_D28, AArch64::D26_D27_D28_D29,
+ AArch64::D27_D28_D29_D30, AArch64::D28_D29_D30_D31, AArch64::D29_D30_D31_D0,
+ AArch64::D30_D31_D0_D1, AArch64::D31_D0_D1_D2
+};
- Inst.addOperand(MCOperand::CreateImm(Val));
- return MCDisassembler::Success;
+static DecodeStatus DecodeDDDDRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Addr,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return Fail;
+ unsigned Register = DDDDDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return Success;
}
-static DecodeStatus DecodeShiftLeftImm32(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- if (Val > 31)
- return MCDisassembler::Fail;
-
- Inst.addOperand(MCOperand::CreateImm(Val));
- return MCDisassembler::Success;
+static DecodeStatus DecodeFixedPointScaleImm32(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder) {
+ // scale{5} is asserted as 1 in tblgen.
+ Imm |= 0x20;
+ Inst.addOperand(MCOperand::CreateImm(64 - Imm));
+ return Success;
}
-static DecodeStatus DecodeShiftLeftImm64(MCInst &Inst, unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- if (Val > 63)
- return MCDisassembler::Fail;
-
- Inst.addOperand(MCOperand::CreateImm(Val));
- return MCDisassembler::Success;
+static DecodeStatus DecodeFixedPointScaleImm64(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(64 - Imm));
+ return Success;
}
-template<int RegWidth>
-static DecodeStatus DecodeMoveWideImmOperand(llvm::MCInst &Inst,
- unsigned FullImm,
- uint64_t Address,
- const void *Decoder) {
- unsigned Imm16 = FullImm & 0xffff;
- unsigned Shift = FullImm >> 16;
+static DecodeStatus DecodePCRelLabel19(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ int64_t ImmVal = Imm;
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+
+ // Sign-extend 19-bit immediate.
+ if (ImmVal & (1 << (19 - 1)))
+ ImmVal |= ~((1LL << 19) - 1);
- if (RegWidth == 32 && Shift > 1) return MCDisassembler::Fail;
+ if (!Dis->tryAddingSymbolicOperand(Inst, ImmVal << 2, Addr,
+ Inst.getOpcode() != AArch64::LDRXl, 0, 4))
+ Inst.addOperand(MCOperand::CreateImm(ImmVal));
+ return Success;
+}
- Inst.addOperand(MCOperand::CreateImm(Imm16));
- Inst.addOperand(MCOperand::CreateImm(Shift));
- return MCDisassembler::Success;
+static DecodeStatus DecodeMemExtend(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Address, const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm((Imm >> 1) & 1));
+ Inst.addOperand(MCOperand::CreateImm(Imm & 1));
+ return Success;
}
-template<int RegWidth>
-static DecodeStatus DecodeLogicalImmOperand(llvm::MCInst &Inst,
- unsigned Bits,
+static DecodeStatus DecodeMRSSystemRegister(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder) {
- uint64_t Imm;
- if (!A64Imms::isLogicalImmBits(RegWidth, Bits, Imm))
- return MCDisassembler::Fail;
-
- Inst.addOperand(MCOperand::CreateImm(Bits));
- return MCDisassembler::Success;
-}
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+ const MCSubtargetInfo &STI = Dis->getSubtargetInfo();
+ Imm |= 0x8000;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
-static DecodeStatus DecodeRegExtendOperand(llvm::MCInst &Inst,
- unsigned ShiftAmount,
- uint64_t Address,
- const void *Decoder) {
- // Only values 0-4 are valid for this 3-bit field
- if (ShiftAmount > 4)
- return MCDisassembler::Fail;
+ bool ValidNamed;
+ (void)AArch64SysReg::MRSMapper(STI.getFeatureBits())
+ .toString(Imm, ValidNamed);
- Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
- return MCDisassembler::Success;
+ return ValidNamed ? Success : Fail;
}
-static DecodeStatus Decode32BitShiftOperand(llvm::MCInst &Inst,
- unsigned ShiftAmount,
+static DecodeStatus DecodeMSRSystemRegister(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder) {
- // Only values below 32 are valid for a 32-bit register
- if (ShiftAmount > 31)
- return MCDisassembler::Fail;
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+ const MCSubtargetInfo &STI = Dis->getSubtargetInfo();
- Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
- return MCDisassembler::Success;
-}
+ Imm |= 0x8000;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
-static DecodeStatus DecodeBitfieldInstruction(llvm::MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- unsigned Rd = fieldFromInstruction(Insn, 0, 5);
- unsigned Rn = fieldFromInstruction(Insn, 5, 5);
- unsigned ImmS = fieldFromInstruction(Insn, 10, 6);
- unsigned ImmR = fieldFromInstruction(Insn, 16, 6);
- unsigned SF = fieldFromInstruction(Insn, 31, 1);
-
- // Undef for 0b11 just in case it occurs. Don't want the compiler to optimise
- // out assertions that it thinks should never be hit.
- enum OpcTypes { SBFM = 0, BFM, UBFM, Undef } Opc;
- Opc = (OpcTypes)fieldFromInstruction(Insn, 29, 2);
-
- if (!SF) {
- // ImmR and ImmS must be between 0 and 31 for 32-bit instructions.
- if (ImmR > 31 || ImmS > 31)
- return MCDisassembler::Fail;
- }
-
- if (SF) {
- DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
- // BFM MCInsts use Rd as a source too.
- if (Opc == BFM) DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
- DecodeGPR64RegisterClass(Inst, Rn, Address, Decoder);
- } else {
- DecodeGPR32RegisterClass(Inst, Rd, Address, Decoder);
- // BFM MCInsts use Rd as a source too.
- if (Opc == BFM) DecodeGPR32RegisterClass(Inst, Rd, Address, Decoder);
- DecodeGPR32RegisterClass(Inst, Rn, Address, Decoder);
- }
-
- // ASR and LSR have more specific patterns so they won't get here:
- assert(!(ImmS == 31 && !SF && Opc != BFM)
- && "shift should have used auto decode");
- assert(!(ImmS == 63 && SF && Opc != BFM)
- && "shift should have used auto decode");
-
- // Extension instructions similarly:
- if (Opc == SBFM && ImmR == 0) {
- assert((ImmS != 7 && ImmS != 15) && "extension got here");
- assert((ImmS != 31 || SF == 0) && "extension got here");
- } else if (Opc == UBFM && ImmR == 0) {
- assert((SF != 0 || (ImmS != 7 && ImmS != 15)) && "extension got here");
- }
-
- if (Opc == UBFM) {
- // It might be a LSL instruction, which actually takes the shift amount
- // itself as an MCInst operand.
- if (SF && (ImmS + 1) % 64 == ImmR) {
- Inst.setOpcode(AArch64::LSLxxi);
- Inst.addOperand(MCOperand::CreateImm(63 - ImmS));
- return MCDisassembler::Success;
- } else if (!SF && (ImmS + 1) % 32 == ImmR) {
- Inst.setOpcode(AArch64::LSLwwi);
- Inst.addOperand(MCOperand::CreateImm(31 - ImmS));
- return MCDisassembler::Success;
- }
- }
-
- // Otherwise it's definitely either an extract or an insert depending on which
- // of ImmR or ImmS is larger.
- unsigned ExtractOp, InsertOp;
- switch (Opc) {
- default: llvm_unreachable("unexpected instruction trying to decode bitfield");
- case SBFM:
- ExtractOp = SF ? AArch64::SBFXxxii : AArch64::SBFXwwii;
- InsertOp = SF ? AArch64::SBFIZxxii : AArch64::SBFIZwwii;
- break;
- case BFM:
- ExtractOp = SF ? AArch64::BFXILxxii : AArch64::BFXILwwii;
- InsertOp = SF ? AArch64::BFIxxii : AArch64::BFIwwii;
- break;
- case UBFM:
- ExtractOp = SF ? AArch64::UBFXxxii : AArch64::UBFXwwii;
- InsertOp = SF ? AArch64::UBFIZxxii : AArch64::UBFIZwwii;
- break;
- }
-
- // Otherwise it's a boring insert or extract
- Inst.addOperand(MCOperand::CreateImm(ImmR));
- Inst.addOperand(MCOperand::CreateImm(ImmS));
-
-
- if (ImmS < ImmR)
- Inst.setOpcode(InsertOp);
- else
- Inst.setOpcode(ExtractOp);
+ bool ValidNamed;
+ (void)AArch64SysReg::MSRMapper(STI.getFeatureBits())
+ .toString(Imm, ValidNamed);
- return MCDisassembler::Success;
+ return ValidNamed ? Success : Fail;
}
static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
@@ -764,809 +681,879 @@ static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
// Add the lane
Inst.addOperand(MCOperand::CreateImm(1));
- return MCDisassembler::Success;
+ return Success;
}
+static DecodeStatus DecodeVecShiftRImm(llvm::MCInst &Inst, unsigned Imm,
+ unsigned Add) {
+ Inst.addOperand(MCOperand::CreateImm(Add - Imm));
+ return Success;
+}
-static DecodeStatus DecodeLDSTPairInstruction(llvm::MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- DecodeStatus Result = MCDisassembler::Success;
- unsigned Rt = fieldFromInstruction(Insn, 0, 5);
- unsigned Rn = fieldFromInstruction(Insn, 5, 5);
- unsigned Rt2 = fieldFromInstruction(Insn, 10, 5);
- unsigned SImm7 = fieldFromInstruction(Insn, 15, 7);
- unsigned L = fieldFromInstruction(Insn, 22, 1);
- unsigned V = fieldFromInstruction(Insn, 26, 1);
- unsigned Opc = fieldFromInstruction(Insn, 30, 2);
-
- // Not an official name, but it turns out that bit 23 distinguishes indexed
- // from non-indexed operations.
- unsigned Indexed = fieldFromInstruction(Insn, 23, 1);
-
- if (Indexed && L == 0) {
- // The MCInst for an indexed store has an out operand and 4 ins:
- // Rn_wb, Rt, Rt2, Rn, Imm
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- }
-
- // You shouldn't load to the same register twice in an instruction...
- if (L && Rt == Rt2)
- Result = MCDisassembler::SoftFail;
-
- // ... or do any operation that writes-back to a transfer register. But note
- // that "stp xzr, xzr, [sp], #4" is fine because xzr and sp are different.
- if (Indexed && V == 0 && Rn != 31 && (Rt == Rn || Rt2 == Rn))
- Result = MCDisassembler::SoftFail;
-
- // Exactly how we decode the MCInst's registers depends on the Opc and V
- // fields of the instruction. These also obviously determine the size of the
- // operation so we can fill in that information while we're at it.
- if (V) {
- // The instruction operates on the FP/SIMD registers
- switch (Opc) {
- default: return MCDisassembler::Fail;
- case 0:
- DecodeFPR32RegisterClass(Inst, Rt, Address, Decoder);
- DecodeFPR32RegisterClass(Inst, Rt2, Address, Decoder);
- break;
- case 1:
- DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
- DecodeFPR64RegisterClass(Inst, Rt2, Address, Decoder);
- break;
- case 2:
- DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
- DecodeFPR128RegisterClass(Inst, Rt2, Address, Decoder);
- break;
- }
- } else {
- switch (Opc) {
- default: return MCDisassembler::Fail;
- case 0:
- DecodeGPR32RegisterClass(Inst, Rt, Address, Decoder);
- DecodeGPR32RegisterClass(Inst, Rt2, Address, Decoder);
- break;
- case 1:
- assert(L && "unexpected \"store signed\" attempt");
- DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder);
- DecodeGPR64RegisterClass(Inst, Rt2, Address, Decoder);
- break;
- case 2:
- DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder);
- DecodeGPR64RegisterClass(Inst, Rt2, Address, Decoder);
- break;
- }
- }
-
- if (Indexed && L == 1) {
- // The MCInst for an indexed load has 3 out operands and an 3 ins:
- // Rt, Rt2, Rn_wb, Rt2, Rn, Imm
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- }
-
-
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- Inst.addOperand(MCOperand::CreateImm(SImm7));
+static DecodeStatus DecodeVecShiftLImm(llvm::MCInst &Inst, unsigned Imm,
+ unsigned Add) {
+ Inst.addOperand(MCOperand::CreateImm((Imm + Add) & (Add - 1)));
+ return Success;
+}
- return Result;
+static DecodeStatus DecodeVecShiftR64Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm, 64);
}
-static DecodeStatus DecodeLoadPairExclusiveInstruction(llvm::MCInst &Inst,
- uint32_t Val,
- uint64_t Address,
- const void *Decoder) {
- unsigned Rt = fieldFromInstruction(Val, 0, 5);
- unsigned Rn = fieldFromInstruction(Val, 5, 5);
- unsigned Rt2 = fieldFromInstruction(Val, 10, 5);
- unsigned MemSize = fieldFromInstruction(Val, 30, 2);
-
- DecodeStatus S = MCDisassembler::Success;
- if (Rt == Rt2) S = MCDisassembler::SoftFail;
-
- switch (MemSize) {
- case 2:
- if (!Check(S, DecodeGPR32RegisterClass(Inst, Rt, Address, Decoder)))
- return MCDisassembler::Fail;
- if (!Check(S, DecodeGPR32RegisterClass(Inst, Rt2, Address, Decoder)))
- return MCDisassembler::Fail;
- break;
- case 3:
- if (!Check(S, DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder)))
- return MCDisassembler::Fail;
- if (!Check(S, DecodeGPR64RegisterClass(Inst, Rt2, Address, Decoder)))
- return MCDisassembler::Fail;
- break;
- default:
- llvm_unreachable("Invalid MemSize in DecodeLoadPairExclusiveInstruction");
- }
+static DecodeStatus DecodeVecShiftR64ImmNarrow(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm | 0x20, 64);
+}
- if (!Check(S, DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder)))
- return MCDisassembler::Fail;
+static DecodeStatus DecodeVecShiftR32Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm, 32);
+}
- return S;
+static DecodeStatus DecodeVecShiftR32ImmNarrow(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm | 0x10, 32);
}
-template<typename SomeNamedImmMapper>
-static DecodeStatus DecodeNamedImmOperand(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- SomeNamedImmMapper Mapper;
- bool ValidNamed;
- Mapper.toString(Val, ValidNamed);
- if (ValidNamed || Mapper.validImm(Val)) {
- Inst.addOperand(MCOperand::CreateImm(Val));
- return MCDisassembler::Success;
- }
+static DecodeStatus DecodeVecShiftR16Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm, 16);
+}
- return MCDisassembler::Fail;
+static DecodeStatus DecodeVecShiftR16ImmNarrow(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr,
+ const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm | 0x8, 16);
}
-static DecodeStatus DecodeSysRegOperand(const A64SysReg::SysRegMapper &Mapper,
- llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- bool ValidNamed;
- Mapper.toString(Val, ValidNamed);
+static DecodeStatus DecodeVecShiftR8Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftRImm(Inst, Imm, 8);
+}
- Inst.addOperand(MCOperand::CreateImm(Val));
+static DecodeStatus DecodeVecShiftL64Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftLImm(Inst, Imm, 64);
+}
- return ValidNamed ? MCDisassembler::Success : MCDisassembler::Fail;
+static DecodeStatus DecodeVecShiftL32Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftLImm(Inst, Imm, 32);
}
-static DecodeStatus DecodeMRSOperand(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- return DecodeSysRegOperand(A64SysReg::MRSMapper(), Inst, Val, Address,
- Decoder);
+static DecodeStatus DecodeVecShiftL16Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftLImm(Inst, Imm, 16);
}
-static DecodeStatus DecodeMSROperand(llvm::MCInst &Inst,
- unsigned Val,
- uint64_t Address,
- const void *Decoder) {
- return DecodeSysRegOperand(A64SysReg::MSRMapper(), Inst, Val, Address,
- Decoder);
+static DecodeStatus DecodeVecShiftL8Imm(llvm::MCInst &Inst, unsigned Imm,
+ uint64_t Addr, const void *Decoder) {
+ return DecodeVecShiftLImm(Inst, Imm, 8);
}
-static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
+static DecodeStatus DecodeThreeAddrSRegInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
const void *Decoder) {
- unsigned Rt = fieldFromInstruction(Insn, 0, 5);
- unsigned Rn = fieldFromInstruction(Insn, 5, 5);
- unsigned Imm9 = fieldFromInstruction(Insn, 12, 9);
-
- unsigned Opc = fieldFromInstruction(Insn, 22, 2);
- unsigned V = fieldFromInstruction(Insn, 26, 1);
- unsigned Size = fieldFromInstruction(Insn, 30, 2);
-
- if (Opc == 0 || (V == 1 && Opc == 2)) {
- // It's a store, the MCInst gets: Rn_wb, Rt, Rn, Imm
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned Rm = fieldFromInstruction(insn, 16, 5);
+ unsigned shiftHi = fieldFromInstruction(insn, 22, 2);
+ unsigned shiftLo = fieldFromInstruction(insn, 10, 6);
+ unsigned shift = (shiftHi << 6) | shiftLo;
+ switch (Inst.getOpcode()) {
+ default:
+ return Fail;
+ case AArch64::ADDWrs:
+ case AArch64::ADDSWrs:
+ case AArch64::SUBWrs:
+ case AArch64::SUBSWrs:
+ // if shift == '11' then ReservedValue()
+ if (shiftHi == 0x3)
+ return Fail;
+ // Deliberate fallthrough
+ case AArch64::ANDWrs:
+ case AArch64::ANDSWrs:
+ case AArch64::BICWrs:
+ case AArch64::BICSWrs:
+ case AArch64::ORRWrs:
+ case AArch64::ORNWrs:
+ case AArch64::EORWrs:
+ case AArch64::EONWrs: {
+ // if sf == '0' and imm6<5> == '1' then ReservedValue()
+ if (shiftLo >> 5 == 1)
+ return Fail;
+ DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
+ break;
}
-
- if (V == 0 && (Opc == 2 || Size == 3)) {
- DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder);
- } else if (V == 0) {
- DecodeGPR32RegisterClass(Inst, Rt, Address, Decoder);
- } else if (V == 1 && (Opc & 2)) {
- DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
- } else {
- switch (Size) {
- case 0:
- DecodeFPR8RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 1:
- DecodeFPR16RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 2:
- DecodeFPR32RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 3:
- DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
- break;
- }
+ case AArch64::ADDXrs:
+ case AArch64::ADDSXrs:
+ case AArch64::SUBXrs:
+ case AArch64::SUBSXrs:
+ // if shift == '11' then ReservedValue()
+ if (shiftHi == 0x3)
+ return Fail;
+ // Deliberate fallthrough
+ case AArch64::ANDXrs:
+ case AArch64::ANDSXrs:
+ case AArch64::BICXrs:
+ case AArch64::BICSXrs:
+ case AArch64::ORRXrs:
+ case AArch64::ORNXrs:
+ case AArch64::EORXrs:
+ case AArch64::EONXrs:
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
+ break;
}
- if (Opc != 0 && (V != 1 || Opc != 2)) {
- // It's a load, the MCInst gets: Rt, Rn_wb, Rn, Imm
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- }
+ Inst.addOperand(MCOperand::CreateImm(shift));
+ return Success;
+}
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+static DecodeStatus DecodeMoveImmInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned imm = fieldFromInstruction(insn, 5, 16);
+ unsigned shift = fieldFromInstruction(insn, 21, 2);
+ shift <<= 4;
+ switch (Inst.getOpcode()) {
+ default:
+ return Fail;
+ case AArch64::MOVZWi:
+ case AArch64::MOVNWi:
+ case AArch64::MOVKWi:
+ if (shift & (1U << 5))
+ return Fail;
+ DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
+ break;
+ case AArch64::MOVZXi:
+ case AArch64::MOVNXi:
+ case AArch64::MOVKXi:
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ break;
+ }
- Inst.addOperand(MCOperand::CreateImm(Imm9));
+ if (Inst.getOpcode() == AArch64::MOVKWi ||
+ Inst.getOpcode() == AArch64::MOVKXi)
+ Inst.addOperand(Inst.getOperand(0));
- // N.b. The official documentation says undpredictable if Rt == Rn, but this
- // takes place at the architectural rather than encoding level:
- //
- // "STR xzr, [sp], #4" is perfectly valid.
- if (V == 0 && Rt == Rn && Rn != 31)
- return MCDisassembler::SoftFail;
- else
- return MCDisassembler::Success;
+ Inst.addOperand(MCOperand::CreateImm(imm));
+ Inst.addOperand(MCOperand::CreateImm(shift));
+ return Success;
}
-static MCDisassembler *createAArch64Disassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new AArch64Disassembler(STI, T.createMCRegInfo(""));
-}
+static DecodeStatus DecodeUnsignedLdStInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rt = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned offset = fieldFromInstruction(insn, 10, 12);
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
-extern "C" void LLVMInitializeAArch64Disassembler() {
- TargetRegistry::RegisterMCDisassembler(TheAArch64Target,
- createAArch64Disassembler);
-}
+ switch (Inst.getOpcode()) {
+ default:
+ return Fail;
+ case AArch64::PRFMui:
+ // Rt is an immediate in prefetch.
+ Inst.addOperand(MCOperand::CreateImm(Rt));
+ break;
+ case AArch64::STRBBui:
+ case AArch64::LDRBBui:
+ case AArch64::LDRSBWui:
+ case AArch64::STRHHui:
+ case AArch64::LDRHHui:
+ case AArch64::LDRSHWui:
+ case AArch64::STRWui:
+ case AArch64::LDRWui:
+ DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDRSBXui:
+ case AArch64::LDRSHXui:
+ case AArch64::LDRSWui:
+ case AArch64::STRXui:
+ case AArch64::LDRXui:
+ DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDRQui:
+ case AArch64::STRQui:
+ DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDRDui:
+ case AArch64::STRDui:
+ DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDRSui:
+ case AArch64::STRSui:
+ DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDRHui:
+ case AArch64::STRHui:
+ DecodeFPR16RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDRBui:
+ case AArch64::STRBui:
+ DecodeFPR8RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ }
-template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
-static DecodeStatus
-DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
- uint64_t Address, const void *Decoder) {
- bool IsLSL = false;
- if (Ext == A64SE::LSL)
- IsLSL = true;
- else if (Ext != A64SE::MSL)
- return MCDisassembler::Fail;
-
- // MSL and LSLH accepts encoded shift amount 0 or 1.
- if ((!IsLSL || (IsLSL && IsHalf)) && ShiftAmount != 0 && ShiftAmount != 1)
- return MCDisassembler::Fail;
-
- // LSL accepts encoded shift amount 0, 1, 2 or 3.
- if (IsLSL && ShiftAmount > 3)
- return MCDisassembler::Fail;
-
- Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
- return MCDisassembler::Success;
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ if (!Dis->tryAddingSymbolicOperand(Inst, offset, Addr, Fail, 0, 4))
+ Inst.addOperand(MCOperand::CreateImm(offset));
+ return Success;
}
-// Decode post-index vector load/store instructions.
-// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
-// operand is an immediate equal the the length of vector list in bytes,
-// or Rm is decoded to a GPR64noxzr register.
-static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- unsigned Rt = fieldFromInstruction(Insn, 0, 5);
- unsigned Rn = fieldFromInstruction(Insn, 5, 5);
- unsigned Rm = fieldFromInstruction(Insn, 16, 5);
- unsigned Opcode = fieldFromInstruction(Insn, 12, 4);
- unsigned IsLoad = fieldFromInstruction(Insn, 22, 1);
- // 0 for 64bit vector list, 1 for 128bit vector list
- unsigned Is128BitVec = fieldFromInstruction(Insn, 30, 1);
+static DecodeStatus DecodeSignedLdStInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rt = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ int64_t offset = fieldFromInstruction(insn, 12, 9);
- unsigned NumVecs;
- switch (Opcode) {
- case 0: // ld4/st4
- case 2: // ld1/st1 with 4 vectors
- NumVecs = 4; break;
- case 4: // ld3/st3
- case 6: // ld1/st1 with 3 vectors
- NumVecs = 3; break;
- case 7: // ld1/st1 with 1 vector
- NumVecs = 1; break;
- case 8: // ld2/st2
- case 10: // ld1/st1 with 2 vectors
- NumVecs = 2; break;
+ // offset is a 9-bit signed immediate, so sign extend it to
+ // fill the unsigned.
+ if (offset & (1 << (9 - 1)))
+ offset |= ~((1LL << 9) - 1);
+
+ // First operand is always the writeback to the address register, if needed.
+ switch (Inst.getOpcode()) {
default:
- llvm_unreachable("Invalid opcode for post-index load/store instructions");
+ break;
+ case AArch64::LDRSBWpre:
+ case AArch64::LDRSHWpre:
+ case AArch64::STRBBpre:
+ case AArch64::LDRBBpre:
+ case AArch64::STRHHpre:
+ case AArch64::LDRHHpre:
+ case AArch64::STRWpre:
+ case AArch64::LDRWpre:
+ case AArch64::LDRSBWpost:
+ case AArch64::LDRSHWpost:
+ case AArch64::STRBBpost:
+ case AArch64::LDRBBpost:
+ case AArch64::STRHHpost:
+ case AArch64::LDRHHpost:
+ case AArch64::STRWpost:
+ case AArch64::LDRWpost:
+ case AArch64::LDRSBXpre:
+ case AArch64::LDRSHXpre:
+ case AArch64::STRXpre:
+ case AArch64::LDRSWpre:
+ case AArch64::LDRXpre:
+ case AArch64::LDRSBXpost:
+ case AArch64::LDRSHXpost:
+ case AArch64::STRXpost:
+ case AArch64::LDRSWpost:
+ case AArch64::LDRXpost:
+ case AArch64::LDRQpre:
+ case AArch64::STRQpre:
+ case AArch64::LDRQpost:
+ case AArch64::STRQpost:
+ case AArch64::LDRDpre:
+ case AArch64::STRDpre:
+ case AArch64::LDRDpost:
+ case AArch64::STRDpost:
+ case AArch64::LDRSpre:
+ case AArch64::STRSpre:
+ case AArch64::LDRSpost:
+ case AArch64::STRSpost:
+ case AArch64::LDRHpre:
+ case AArch64::STRHpre:
+ case AArch64::LDRHpost:
+ case AArch64::STRHpost:
+ case AArch64::LDRBpre:
+ case AArch64::STRBpre:
+ case AArch64::LDRBpost:
+ case AArch64::STRBpost:
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ break;
}
- // Decode vector list of 1/2/3/4 vectors for load instructions.
- if (IsLoad) {
- switch (NumVecs) {
- case 1:
- Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
- : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 2:
- Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 3:
- Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 4:
- Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
- break;
- }
+ switch (Inst.getOpcode()) {
+ default:
+ return Fail;
+ case AArch64::PRFUMi:
+ // Rt is an immediate in prefetch.
+ Inst.addOperand(MCOperand::CreateImm(Rt));
+ break;
+ case AArch64::STURBBi:
+ case AArch64::LDURBBi:
+ case AArch64::LDURSBWi:
+ case AArch64::STURHHi:
+ case AArch64::LDURHHi:
+ case AArch64::LDURSHWi:
+ case AArch64::STURWi:
+ case AArch64::LDURWi:
+ case AArch64::LDTRSBWi:
+ case AArch64::LDTRSHWi:
+ case AArch64::STTRWi:
+ case AArch64::LDTRWi:
+ case AArch64::STTRHi:
+ case AArch64::LDTRHi:
+ case AArch64::LDTRBi:
+ case AArch64::STTRBi:
+ case AArch64::LDRSBWpre:
+ case AArch64::LDRSHWpre:
+ case AArch64::STRBBpre:
+ case AArch64::LDRBBpre:
+ case AArch64::STRHHpre:
+ case AArch64::LDRHHpre:
+ case AArch64::STRWpre:
+ case AArch64::LDRWpre:
+ case AArch64::LDRSBWpost:
+ case AArch64::LDRSHWpost:
+ case AArch64::STRBBpost:
+ case AArch64::LDRBBpost:
+ case AArch64::STRHHpost:
+ case AArch64::LDRHHpost:
+ case AArch64::STRWpost:
+ case AArch64::LDRWpost:
+ DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDURSBXi:
+ case AArch64::LDURSHXi:
+ case AArch64::LDURSWi:
+ case AArch64::STURXi:
+ case AArch64::LDURXi:
+ case AArch64::LDTRSBXi:
+ case AArch64::LDTRSHXi:
+ case AArch64::LDTRSWi:
+ case AArch64::STTRXi:
+ case AArch64::LDTRXi:
+ case AArch64::LDRSBXpre:
+ case AArch64::LDRSHXpre:
+ case AArch64::STRXpre:
+ case AArch64::LDRSWpre:
+ case AArch64::LDRXpre:
+ case AArch64::LDRSBXpost:
+ case AArch64::LDRSHXpost:
+ case AArch64::STRXpost:
+ case AArch64::LDRSWpost:
+ case AArch64::LDRXpost:
+ DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDURQi:
+ case AArch64::STURQi:
+ case AArch64::LDRQpre:
+ case AArch64::STRQpre:
+ case AArch64::LDRQpost:
+ case AArch64::STRQpost:
+ DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDURDi:
+ case AArch64::STURDi:
+ case AArch64::LDRDpre:
+ case AArch64::STRDpre:
+ case AArch64::LDRDpost:
+ case AArch64::STRDpost:
+ DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDURSi:
+ case AArch64::STURSi:
+ case AArch64::LDRSpre:
+ case AArch64::STRSpre:
+ case AArch64::LDRSpost:
+ case AArch64::STRSpost:
+ DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDURHi:
+ case AArch64::STURHi:
+ case AArch64::LDRHpre:
+ case AArch64::STRHpre:
+ case AArch64::LDRHpost:
+ case AArch64::STRHpost:
+ DecodeFPR16RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
+ case AArch64::LDURBi:
+ case AArch64::STURBi:
+ case AArch64::LDRBpre:
+ case AArch64::STRBpre:
+ case AArch64::LDRBpost:
+ case AArch64::STRBpost:
+ DecodeFPR8RegisterClass(Inst, Rt, Addr, Decoder);
+ break;
}
- // Decode write back register, which is equal to Rn.
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-
- if (Rm == 31) // If Rm is 0x11111, add the vector list length in byte
- Inst.addOperand(MCOperand::CreateImm(NumVecs * (Is128BitVec ? 16 : 8)));
- else // Decode Rm
- DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
-
- // Decode vector list of 1/2/3/4 vectors for load instructions.
- if (!IsLoad) {
- switch (NumVecs) {
- case 1:
- Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
- : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 2:
- Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 3:
- Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 4:
- Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
- break;
- }
- }
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ Inst.addOperand(MCOperand::CreateImm(offset));
+
+ bool IsLoad = fieldFromInstruction(insn, 22, 1);
+ bool IsIndexed = fieldFromInstruction(insn, 10, 2) != 0;
+ bool IsFP = fieldFromInstruction(insn, 26, 1);
- return MCDisassembler::Success;
+ // Cannot write back to a transfer register (but xzr != sp).
+ if (IsLoad && IsIndexed && !IsFP && Rn != 31 && Rt == Rn)
+ return SoftFail;
+
+ return Success;
}
-// Decode post-index vector load/store lane instructions.
-// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
-// operand is an immediate equal the the length of the changed bytes,
-// or Rm is decoded to a GPR64noxzr register.
-static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
- uint64_t Address,
+static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
const void *Decoder) {
- bool Is64bitVec = false;
- bool IsLoadDup = false;
- bool IsLoad = false;
- // The total number of bytes transferred.
- // TransferBytes = NumVecs * OneLaneBytes
- unsigned TransferBytes = 0;
- unsigned NumVecs = 0;
- unsigned Opc = Inst.getOpcode();
- switch (Opc) {
- case AArch64::LD1R_WB_8B_fixed: case AArch64::LD1R_WB_8B_register:
- case AArch64::LD1R_WB_4H_fixed: case AArch64::LD1R_WB_4H_register:
- case AArch64::LD1R_WB_2S_fixed: case AArch64::LD1R_WB_2S_register:
- case AArch64::LD1R_WB_1D_fixed: case AArch64::LD1R_WB_1D_register: {
- switch (Opc) {
- case AArch64::LD1R_WB_8B_fixed: case AArch64::LD1R_WB_8B_register:
- TransferBytes = 1; break;
- case AArch64::LD1R_WB_4H_fixed: case AArch64::LD1R_WB_4H_register:
- TransferBytes = 2; break;
- case AArch64::LD1R_WB_2S_fixed: case AArch64::LD1R_WB_2S_register:
- TransferBytes = 4; break;
- case AArch64::LD1R_WB_1D_fixed: case AArch64::LD1R_WB_1D_register:
- TransferBytes = 8; break;
- }
- Is64bitVec = true;
- IsLoadDup = true;
- NumVecs = 1;
- break;
- }
+ unsigned Rt = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned Rt2 = fieldFromInstruction(insn, 10, 5);
+ unsigned Rs = fieldFromInstruction(insn, 16, 5);
- case AArch64::LD1R_WB_16B_fixed: case AArch64::LD1R_WB_16B_register:
- case AArch64::LD1R_WB_8H_fixed: case AArch64::LD1R_WB_8H_register:
- case AArch64::LD1R_WB_4S_fixed: case AArch64::LD1R_WB_4S_register:
- case AArch64::LD1R_WB_2D_fixed: case AArch64::LD1R_WB_2D_register: {
- switch (Opc) {
- case AArch64::LD1R_WB_16B_fixed: case AArch64::LD1R_WB_16B_register:
- TransferBytes = 1; break;
- case AArch64::LD1R_WB_8H_fixed: case AArch64::LD1R_WB_8H_register:
- TransferBytes = 2; break;
- case AArch64::LD1R_WB_4S_fixed: case AArch64::LD1R_WB_4S_register:
- TransferBytes = 4; break;
- case AArch64::LD1R_WB_2D_fixed: case AArch64::LD1R_WB_2D_register:
- TransferBytes = 8; break;
- }
- IsLoadDup = true;
- NumVecs = 1;
+ unsigned Opcode = Inst.getOpcode();
+ switch (Opcode) {
+ default:
+ return Fail;
+ case AArch64::STLXRW:
+ case AArch64::STLXRB:
+ case AArch64::STLXRH:
+ case AArch64::STXRW:
+ case AArch64::STXRB:
+ case AArch64::STXRH:
+ DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
+ // FALLTHROUGH
+ case AArch64::LDARW:
+ case AArch64::LDARB:
+ case AArch64::LDARH:
+ case AArch64::LDAXRW:
+ case AArch64::LDAXRB:
+ case AArch64::LDAXRH:
+ case AArch64::LDXRW:
+ case AArch64::LDXRB:
+ case AArch64::LDXRH:
+ case AArch64::STLRW:
+ case AArch64::STLRB:
+ case AArch64::STLRH:
+ DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
- }
-
- case AArch64::LD2R_WB_8B_fixed: case AArch64::LD2R_WB_8B_register:
- case AArch64::LD2R_WB_4H_fixed: case AArch64::LD2R_WB_4H_register:
- case AArch64::LD2R_WB_2S_fixed: case AArch64::LD2R_WB_2S_register:
- case AArch64::LD2R_WB_1D_fixed: case AArch64::LD2R_WB_1D_register: {
- switch (Opc) {
- case AArch64::LD2R_WB_8B_fixed: case AArch64::LD2R_WB_8B_register:
- TransferBytes = 2; break;
- case AArch64::LD2R_WB_4H_fixed: case AArch64::LD2R_WB_4H_register:
- TransferBytes = 4; break;
- case AArch64::LD2R_WB_2S_fixed: case AArch64::LD2R_WB_2S_register:
- TransferBytes = 8; break;
- case AArch64::LD2R_WB_1D_fixed: case AArch64::LD2R_WB_1D_register:
- TransferBytes = 16; break;
- }
- Is64bitVec = true;
- IsLoadDup = true;
- NumVecs = 2;
+ case AArch64::STLXRX:
+ case AArch64::STXRX:
+ DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
+ // FALLTHROUGH
+ case AArch64::LDARX:
+ case AArch64::LDAXRX:
+ case AArch64::LDXRX:
+ case AArch64::STLRX:
+ DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
- }
-
- case AArch64::LD2R_WB_16B_fixed: case AArch64::LD2R_WB_16B_register:
- case AArch64::LD2R_WB_8H_fixed: case AArch64::LD2R_WB_8H_register:
- case AArch64::LD2R_WB_4S_fixed: case AArch64::LD2R_WB_4S_register:
- case AArch64::LD2R_WB_2D_fixed: case AArch64::LD2R_WB_2D_register: {
- switch (Opc) {
- case AArch64::LD2R_WB_16B_fixed: case AArch64::LD2R_WB_16B_register:
- TransferBytes = 2; break;
- case AArch64::LD2R_WB_8H_fixed: case AArch64::LD2R_WB_8H_register:
- TransferBytes = 4; break;
- case AArch64::LD2R_WB_4S_fixed: case AArch64::LD2R_WB_4S_register:
- TransferBytes = 8; break;
- case AArch64::LD2R_WB_2D_fixed: case AArch64::LD2R_WB_2D_register:
- TransferBytes = 16; break;
- }
- IsLoadDup = true;
- NumVecs = 2;
+ case AArch64::STLXPW:
+ case AArch64::STXPW:
+ DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
+ // FALLTHROUGH
+ case AArch64::LDAXPW:
+ case AArch64::LDXPW:
+ DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rt2, Addr, Decoder);
break;
- }
-
- case AArch64::LD3R_WB_8B_fixed: case AArch64::LD3R_WB_8B_register:
- case AArch64::LD3R_WB_4H_fixed: case AArch64::LD3R_WB_4H_register:
- case AArch64::LD3R_WB_2S_fixed: case AArch64::LD3R_WB_2S_register:
- case AArch64::LD3R_WB_1D_fixed: case AArch64::LD3R_WB_1D_register: {
- switch (Opc) {
- case AArch64::LD3R_WB_8B_fixed: case AArch64::LD3R_WB_8B_register:
- TransferBytes = 3; break;
- case AArch64::LD3R_WB_4H_fixed: case AArch64::LD3R_WB_4H_register:
- TransferBytes = 6; break;
- case AArch64::LD3R_WB_2S_fixed: case AArch64::LD3R_WB_2S_register:
- TransferBytes = 12; break;
- case AArch64::LD3R_WB_1D_fixed: case AArch64::LD3R_WB_1D_register:
- TransferBytes = 24; break;
- }
- Is64bitVec = true;
- IsLoadDup = true;
- NumVecs = 3;
+ case AArch64::STLXPX:
+ case AArch64::STXPX:
+ DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
+ // FALLTHROUGH
+ case AArch64::LDAXPX:
+ case AArch64::LDXPX:
+ DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rt2, Addr, Decoder);
break;
}
- case AArch64::LD3R_WB_16B_fixed: case AArch64::LD3R_WB_16B_register:
- case AArch64::LD3R_WB_4S_fixed: case AArch64::LD3R_WB_8H_register:
- case AArch64::LD3R_WB_8H_fixed: case AArch64::LD3R_WB_4S_register:
- case AArch64::LD3R_WB_2D_fixed: case AArch64::LD3R_WB_2D_register: {
- switch (Opc) {
- case AArch64::LD3R_WB_16B_fixed: case AArch64::LD3R_WB_16B_register:
- TransferBytes = 3; break;
- case AArch64::LD3R_WB_8H_fixed: case AArch64::LD3R_WB_8H_register:
- TransferBytes = 6; break;
- case AArch64::LD3R_WB_4S_fixed: case AArch64::LD3R_WB_4S_register:
- TransferBytes = 12; break;
- case AArch64::LD3R_WB_2D_fixed: case AArch64::LD3R_WB_2D_register:
- TransferBytes = 24; break;
- }
- IsLoadDup = true;
- NumVecs = 3;
- break;
- }
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
- case AArch64::LD4R_WB_8B_fixed: case AArch64::LD4R_WB_8B_register:
- case AArch64::LD4R_WB_4H_fixed: case AArch64::LD4R_WB_4H_register:
- case AArch64::LD4R_WB_2S_fixed: case AArch64::LD4R_WB_2S_register:
- case AArch64::LD4R_WB_1D_fixed: case AArch64::LD4R_WB_1D_register: {
- switch (Opc) {
- case AArch64::LD4R_WB_8B_fixed: case AArch64::LD4R_WB_8B_register:
- TransferBytes = 4; break;
- case AArch64::LD4R_WB_4H_fixed: case AArch64::LD4R_WB_4H_register:
- TransferBytes = 8; break;
- case AArch64::LD4R_WB_2S_fixed: case AArch64::LD4R_WB_2S_register:
- TransferBytes = 16; break;
- case AArch64::LD4R_WB_1D_fixed: case AArch64::LD4R_WB_1D_register:
- TransferBytes = 32; break;
- }
- Is64bitVec = true;
- IsLoadDup = true;
- NumVecs = 4;
- break;
- }
+ // You shouldn't load to the same register twice in an instruction...
+ if ((Opcode == AArch64::LDAXPW || Opcode == AArch64::LDXPW ||
+ Opcode == AArch64::LDAXPX || Opcode == AArch64::LDXPX) &&
+ Rt == Rt2)
+ return SoftFail;
- case AArch64::LD4R_WB_16B_fixed: case AArch64::LD4R_WB_16B_register:
- case AArch64::LD4R_WB_4S_fixed: case AArch64::LD4R_WB_8H_register:
- case AArch64::LD4R_WB_8H_fixed: case AArch64::LD4R_WB_4S_register:
- case AArch64::LD4R_WB_2D_fixed: case AArch64::LD4R_WB_2D_register: {
- switch (Opc) {
- case AArch64::LD4R_WB_16B_fixed: case AArch64::LD4R_WB_16B_register:
- TransferBytes = 4; break;
- case AArch64::LD4R_WB_8H_fixed: case AArch64::LD4R_WB_8H_register:
- TransferBytes = 8; break;
- case AArch64::LD4R_WB_4S_fixed: case AArch64::LD4R_WB_4S_register:
- TransferBytes = 16; break;
- case AArch64::LD4R_WB_2D_fixed: case AArch64::LD4R_WB_2D_register:
- TransferBytes = 32; break;
- }
- IsLoadDup = true;
- NumVecs = 4;
- break;
- }
+ return Success;
+}
- case AArch64::LD1LN_WB_B_fixed: case AArch64::LD1LN_WB_B_register:
- case AArch64::LD1LN_WB_H_fixed: case AArch64::LD1LN_WB_H_register:
- case AArch64::LD1LN_WB_S_fixed: case AArch64::LD1LN_WB_S_register:
- case AArch64::LD1LN_WB_D_fixed: case AArch64::LD1LN_WB_D_register: {
- switch (Opc) {
- case AArch64::LD1LN_WB_B_fixed: case AArch64::LD1LN_WB_B_register:
- TransferBytes = 1; break;
- case AArch64::LD1LN_WB_H_fixed: case AArch64::LD1LN_WB_H_register:
- TransferBytes = 2; break;
- case AArch64::LD1LN_WB_S_fixed: case AArch64::LD1LN_WB_S_register:
- TransferBytes = 4; break;
- case AArch64::LD1LN_WB_D_fixed: case AArch64::LD1LN_WB_D_register:
- TransferBytes = 8; break;
- }
- IsLoad = true;
- NumVecs = 1;
- break;
- }
+static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rt = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned Rt2 = fieldFromInstruction(insn, 10, 5);
+ int64_t offset = fieldFromInstruction(insn, 15, 7);
+ bool IsLoad = fieldFromInstruction(insn, 22, 1);
- case AArch64::LD2LN_WB_B_fixed: case AArch64::LD2LN_WB_B_register:
- case AArch64::LD2LN_WB_H_fixed: case AArch64::LD2LN_WB_H_register:
- case AArch64::LD2LN_WB_S_fixed: case AArch64::LD2LN_WB_S_register:
- case AArch64::LD2LN_WB_D_fixed: case AArch64::LD2LN_WB_D_register: {
- switch (Opc) {
- case AArch64::LD2LN_WB_B_fixed: case AArch64::LD2LN_WB_B_register:
- TransferBytes = 2; break;
- case AArch64::LD2LN_WB_H_fixed: case AArch64::LD2LN_WB_H_register:
- TransferBytes = 4; break;
- case AArch64::LD2LN_WB_S_fixed: case AArch64::LD2LN_WB_S_register:
- TransferBytes = 8; break;
- case AArch64::LD2LN_WB_D_fixed: case AArch64::LD2LN_WB_D_register:
- TransferBytes = 16; break;
- }
- IsLoad = true;
- NumVecs = 2;
- break;
- }
+ // offset is a 7-bit signed immediate, so sign extend it to
+ // fill the unsigned.
+ if (offset & (1 << (7 - 1)))
+ offset |= ~((1LL << 7) - 1);
- case AArch64::LD3LN_WB_B_fixed: case AArch64::LD3LN_WB_B_register:
- case AArch64::LD3LN_WB_H_fixed: case AArch64::LD3LN_WB_H_register:
- case AArch64::LD3LN_WB_S_fixed: case AArch64::LD3LN_WB_S_register:
- case AArch64::LD3LN_WB_D_fixed: case AArch64::LD3LN_WB_D_register: {
- switch (Opc) {
- case AArch64::LD3LN_WB_B_fixed: case AArch64::LD3LN_WB_B_register:
- TransferBytes = 3; break;
- case AArch64::LD3LN_WB_H_fixed: case AArch64::LD3LN_WB_H_register:
- TransferBytes = 6; break;
- case AArch64::LD3LN_WB_S_fixed: case AArch64::LD3LN_WB_S_register:
- TransferBytes = 12; break;
- case AArch64::LD3LN_WB_D_fixed: case AArch64::LD3LN_WB_D_register:
- TransferBytes = 24; break;
- }
- IsLoad = true;
- NumVecs = 3;
- break;
- }
+ unsigned Opcode = Inst.getOpcode();
+ bool NeedsDisjointWritebackTransfer = false;
- case AArch64::LD4LN_WB_B_fixed: case AArch64::LD4LN_WB_B_register:
- case AArch64::LD4LN_WB_H_fixed: case AArch64::LD4LN_WB_H_register:
- case AArch64::LD4LN_WB_S_fixed: case AArch64::LD4LN_WB_S_register:
- case AArch64::LD4LN_WB_D_fixed: case AArch64::LD4LN_WB_D_register: {
- switch (Opc) {
- case AArch64::LD4LN_WB_B_fixed: case AArch64::LD4LN_WB_B_register:
- TransferBytes = 4; break;
- case AArch64::LD4LN_WB_H_fixed: case AArch64::LD4LN_WB_H_register:
- TransferBytes = 8; break;
- case AArch64::LD4LN_WB_S_fixed: case AArch64::LD4LN_WB_S_register:
- TransferBytes = 16; break;
- case AArch64::LD4LN_WB_D_fixed: case AArch64::LD4LN_WB_D_register:
- TransferBytes = 32; break;
- }
- IsLoad = true;
- NumVecs = 4;
+ // First operand is always writeback of base register.
+ switch (Opcode) {
+ default:
break;
- }
-
- case AArch64::ST1LN_WB_B_fixed: case AArch64::ST1LN_WB_B_register:
- case AArch64::ST1LN_WB_H_fixed: case AArch64::ST1LN_WB_H_register:
- case AArch64::ST1LN_WB_S_fixed: case AArch64::ST1LN_WB_S_register:
- case AArch64::ST1LN_WB_D_fixed: case AArch64::ST1LN_WB_D_register: {
- switch (Opc) {
- case AArch64::ST1LN_WB_B_fixed: case AArch64::ST1LN_WB_B_register:
- TransferBytes = 1; break;
- case AArch64::ST1LN_WB_H_fixed: case AArch64::ST1LN_WB_H_register:
- TransferBytes = 2; break;
- case AArch64::ST1LN_WB_S_fixed: case AArch64::ST1LN_WB_S_register:
- TransferBytes = 4; break;
- case AArch64::ST1LN_WB_D_fixed: case AArch64::ST1LN_WB_D_register:
- TransferBytes = 8; break;
- }
- NumVecs = 1;
+ case AArch64::LDPXpost:
+ case AArch64::STPXpost:
+ case AArch64::LDPSWpost:
+ case AArch64::LDPXpre:
+ case AArch64::STPXpre:
+ case AArch64::LDPSWpre:
+ case AArch64::LDPWpost:
+ case AArch64::STPWpost:
+ case AArch64::LDPWpre:
+ case AArch64::STPWpre:
+ case AArch64::LDPQpost:
+ case AArch64::STPQpost:
+ case AArch64::LDPQpre:
+ case AArch64::STPQpre:
+ case AArch64::LDPDpost:
+ case AArch64::STPDpost:
+ case AArch64::LDPDpre:
+ case AArch64::STPDpre:
+ case AArch64::LDPSpost:
+ case AArch64::STPSpost:
+ case AArch64::LDPSpre:
+ case AArch64::STPSpre:
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
break;
}
- case AArch64::ST2LN_WB_B_fixed: case AArch64::ST2LN_WB_B_register:
- case AArch64::ST2LN_WB_H_fixed: case AArch64::ST2LN_WB_H_register:
- case AArch64::ST2LN_WB_S_fixed: case AArch64::ST2LN_WB_S_register:
- case AArch64::ST2LN_WB_D_fixed: case AArch64::ST2LN_WB_D_register: {
- switch (Opc) {
- case AArch64::ST2LN_WB_B_fixed: case AArch64::ST2LN_WB_B_register:
- TransferBytes = 2; break;
- case AArch64::ST2LN_WB_H_fixed: case AArch64::ST2LN_WB_H_register:
- TransferBytes = 4; break;
- case AArch64::ST2LN_WB_S_fixed: case AArch64::ST2LN_WB_S_register:
- TransferBytes = 8; break;
- case AArch64::ST2LN_WB_D_fixed: case AArch64::ST2LN_WB_D_register:
- TransferBytes = 16; break;
- }
- NumVecs = 2;
+ switch (Opcode) {
+ default:
+ return Fail;
+ case AArch64::LDPXpost:
+ case AArch64::STPXpost:
+ case AArch64::LDPSWpost:
+ case AArch64::LDPXpre:
+ case AArch64::STPXpre:
+ case AArch64::LDPSWpre:
+ NeedsDisjointWritebackTransfer = true;
+ // Fallthrough
+ case AArch64::LDNPXi:
+ case AArch64::STNPXi:
+ case AArch64::LDPXi:
+ case AArch64::STPXi:
+ case AArch64::LDPSWi:
+ DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rt2, Addr, Decoder);
break;
- }
-
- case AArch64::ST3LN_WB_B_fixed: case AArch64::ST3LN_WB_B_register:
- case AArch64::ST3LN_WB_H_fixed: case AArch64::ST3LN_WB_H_register:
- case AArch64::ST3LN_WB_S_fixed: case AArch64::ST3LN_WB_S_register:
- case AArch64::ST3LN_WB_D_fixed: case AArch64::ST3LN_WB_D_register: {
- switch (Opc) {
- case AArch64::ST3LN_WB_B_fixed: case AArch64::ST3LN_WB_B_register:
- TransferBytes = 3; break;
- case AArch64::ST3LN_WB_H_fixed: case AArch64::ST3LN_WB_H_register:
- TransferBytes = 6; break;
- case AArch64::ST3LN_WB_S_fixed: case AArch64::ST3LN_WB_S_register:
- TransferBytes = 12; break;
- case AArch64::ST3LN_WB_D_fixed: case AArch64::ST3LN_WB_D_register:
- TransferBytes = 24; break;
- }
- NumVecs = 3;
+ case AArch64::LDPWpost:
+ case AArch64::STPWpost:
+ case AArch64::LDPWpre:
+ case AArch64::STPWpre:
+ NeedsDisjointWritebackTransfer = true;
+ // Fallthrough
+ case AArch64::LDNPWi:
+ case AArch64::STNPWi:
+ case AArch64::LDPWi:
+ case AArch64::STPWi:
+ DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rt2, Addr, Decoder);
break;
- }
-
- case AArch64::ST4LN_WB_B_fixed: case AArch64::ST4LN_WB_B_register:
- case AArch64::ST4LN_WB_H_fixed: case AArch64::ST4LN_WB_H_register:
- case AArch64::ST4LN_WB_S_fixed: case AArch64::ST4LN_WB_S_register:
- case AArch64::ST4LN_WB_D_fixed: case AArch64::ST4LN_WB_D_register: {
- switch (Opc) {
- case AArch64::ST4LN_WB_B_fixed: case AArch64::ST4LN_WB_B_register:
- TransferBytes = 4; break;
- case AArch64::ST4LN_WB_H_fixed: case AArch64::ST4LN_WB_H_register:
- TransferBytes = 8; break;
- case AArch64::ST4LN_WB_S_fixed: case AArch64::ST4LN_WB_S_register:
- TransferBytes = 16; break;
- case AArch64::ST4LN_WB_D_fixed: case AArch64::ST4LN_WB_D_register:
- TransferBytes = 32; break;
- }
- NumVecs = 4;
+ case AArch64::LDNPQi:
+ case AArch64::STNPQi:
+ case AArch64::LDPQpost:
+ case AArch64::STPQpost:
+ case AArch64::LDPQi:
+ case AArch64::STPQi:
+ case AArch64::LDPQpre:
+ case AArch64::STPQpre:
+ DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeFPR128RegisterClass(Inst, Rt2, Addr, Decoder);
+ break;
+ case AArch64::LDNPDi:
+ case AArch64::STNPDi:
+ case AArch64::LDPDpost:
+ case AArch64::STPDpost:
+ case AArch64::LDPDi:
+ case AArch64::STPDi:
+ case AArch64::LDPDpre:
+ case AArch64::STPDpre:
+ DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeFPR64RegisterClass(Inst, Rt2, Addr, Decoder);
+ break;
+ case AArch64::LDNPSi:
+ case AArch64::STNPSi:
+ case AArch64::LDPSpost:
+ case AArch64::STPSpost:
+ case AArch64::LDPSi:
+ case AArch64::STPSi:
+ case AArch64::LDPSpre:
+ case AArch64::STPSpre:
+ DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ DecodeFPR32RegisterClass(Inst, Rt2, Addr, Decoder);
break;
}
- default:
- return MCDisassembler::Fail;
- } // End of switch (Opc)
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ Inst.addOperand(MCOperand::CreateImm(offset));
- unsigned Rt = fieldFromInstruction(Insn, 0, 5);
- unsigned Rn = fieldFromInstruction(Insn, 5, 5);
- unsigned Rm = fieldFromInstruction(Insn, 16, 5);
-
- // Decode post-index of load duplicate lane
- if (IsLoadDup) {
- switch (NumVecs) {
- case 1:
- Is64bitVec ? DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder)
- : DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 2:
- Is64bitVec ? DecodeDPairRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 3:
- Is64bitVec ? DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 4:
- Is64bitVec ? DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder)
- : DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
- }
-
- // Decode write back register, which is equal to Rn.
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-
- if (Rm == 31) // If Rm is 0x11111, add the number of transferred bytes
- Inst.addOperand(MCOperand::CreateImm(TransferBytes));
- else // Decode Rm
- DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
-
- return MCDisassembler::Success;
- }
+ // You shouldn't load to the same register twice in an instruction...
+ if (IsLoad && Rt == Rt2)
+ return SoftFail;
- // Decode post-index of load/store lane
- // Loads have a vector list as output.
- if (IsLoad) {
- switch (NumVecs) {
- case 1:
- DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 2:
- DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 3:
- DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 4:
- DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
- }
- }
+ // ... or do any operation that writes-back to a transfer register. But note
+ // that "stp xzr, xzr, [sp], #4" is fine because xzr and sp are different.
+ if (NeedsDisjointWritebackTransfer && Rn != 31 && (Rt == Rn || Rt2 == Rn))
+ return SoftFail;
+
+ return Success;
+}
- // Decode write back register, which is equal to Rn.
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
- DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
+static DecodeStatus DecodeAddSubERegInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned Rm = fieldFromInstruction(insn, 16, 5);
+ unsigned extend = fieldFromInstruction(insn, 10, 6);
- if (Rm == 31) // If Rm is 0x11111, add the number of transferred bytes
- Inst.addOperand(MCOperand::CreateImm(TransferBytes));
- else // Decode Rm
- DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
+ unsigned shift = extend & 0x7;
+ if (shift > 4)
+ return Fail;
- // Decode the source vector list.
- switch (NumVecs) {
- case 1:
- DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
- break;
- case 2:
- DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
+ switch (Inst.getOpcode()) {
+ default:
+ return Fail;
+ case AArch64::ADDWrx:
+ case AArch64::SUBWrx:
+ DecodeGPR32spRegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR32spRegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
- case 3:
- DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
+ case AArch64::ADDSWrx:
+ case AArch64::SUBSWrx:
+ DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR32spRegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
- case 4:
- DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
- }
-
- // Decode lane
- unsigned Q = fieldFromInstruction(Insn, 30, 1);
- unsigned S = fieldFromInstruction(Insn, 10, 3);
- unsigned lane = 0;
- // Calculate the number of lanes by number of vectors and transfered bytes.
- // NumLanes = 16 bytes / bytes of each lane
- unsigned NumLanes = 16 / (TransferBytes / NumVecs);
- switch (NumLanes) {
- case 16: // A vector has 16 lanes, each lane is 1 bytes.
- lane = (Q << 3) | S;
+ case AArch64::ADDXrx:
+ case AArch64::SUBXrx:
+ DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
- case 8:
- lane = (Q << 2) | (S >> 1);
+ case AArch64::ADDSXrx:
+ case AArch64::SUBSXrx:
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rm, Addr, Decoder);
break;
- case 4:
- lane = (Q << 1) | (S >> 2);
+ case AArch64::ADDXrx64:
+ case AArch64::SUBXrx64:
+ DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
break;
- case 2:
- lane = Q;
+ case AArch64::SUBSXrx64:
+ case AArch64::ADDSXrx64:
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
break;
}
- Inst.addOperand(MCOperand::CreateImm(lane));
- return MCDisassembler::Success;
+ Inst.addOperand(MCOperand::CreateImm(extend));
+ return Success;
}
-static DecodeStatus DecodeSHLLInstruction(MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- unsigned Rd = fieldFromInstruction(Insn, 0, 5);
- unsigned Rn = fieldFromInstruction(Insn, 5, 5);
- unsigned size = fieldFromInstruction(Insn, 22, 2);
- unsigned Q = fieldFromInstruction(Insn, 30, 1);
+static DecodeStatus DecodeLogicalImmInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned Datasize = fieldFromInstruction(insn, 31, 1);
+ unsigned imm;
+
+ if (Datasize) {
+ if (Inst.getOpcode() == AArch64::ANDSXri)
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ else
+ DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64RegisterClass(Inst, Rn, Addr, Decoder);
+ imm = fieldFromInstruction(insn, 10, 13);
+ if (!AArch64_AM::isValidDecodeLogicalImmediate(imm, 64))
+ return Fail;
+ } else {
+ if (Inst.getOpcode() == AArch64::ANDSWri)
+ DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
+ else
+ DecodeGPR32spRegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR32RegisterClass(Inst, Rn, Addr, Decoder);
+ imm = fieldFromInstruction(insn, 10, 12);
+ if (!AArch64_AM::isValidDecodeLogicalImmediate(imm, 32))
+ return Fail;
+ }
+ Inst.addOperand(MCOperand::CreateImm(imm));
+ return Success;
+}
- DecodeFPR128RegisterClass(Inst, Rd, Address, Decoder);
+static DecodeStatus DecodeModImmInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned cmode = fieldFromInstruction(insn, 12, 4);
+ unsigned imm = fieldFromInstruction(insn, 16, 3) << 5;
+ imm |= fieldFromInstruction(insn, 5, 5);
- if(Q)
- DecodeFPR128RegisterClass(Inst, Rn, Address, Decoder);
+ if (Inst.getOpcode() == AArch64::MOVID)
+ DecodeFPR64RegisterClass(Inst, Rd, Addr, Decoder);
else
- DecodeFPR64RegisterClass(Inst, Rn, Address, Decoder);
+ DecodeVectorRegisterClass(Inst, Rd, Addr, Decoder);
+
+ Inst.addOperand(MCOperand::CreateImm(imm));
- switch (size) {
- case 0:
- Inst.addOperand(MCOperand::CreateImm(8));
+ switch (Inst.getOpcode()) {
+ default:
break;
- case 1:
- Inst.addOperand(MCOperand::CreateImm(16));
+ case AArch64::MOVIv4i16:
+ case AArch64::MOVIv8i16:
+ case AArch64::MVNIv4i16:
+ case AArch64::MVNIv8i16:
+ case AArch64::MOVIv2i32:
+ case AArch64::MOVIv4i32:
+ case AArch64::MVNIv2i32:
+ case AArch64::MVNIv4i32:
+ Inst.addOperand(MCOperand::CreateImm((cmode & 6) << 2));
break;
- case 2:
- Inst.addOperand(MCOperand::CreateImm(32));
+ case AArch64::MOVIv2s_msl:
+ case AArch64::MOVIv4s_msl:
+ case AArch64::MVNIv2s_msl:
+ case AArch64::MVNIv4s_msl:
+ Inst.addOperand(MCOperand::CreateImm(cmode & 1 ? 0x110 : 0x108));
break;
- default :
- return MCDisassembler::Fail;
}
- return MCDisassembler::Success;
+
+ return Success;
+}
+
+static DecodeStatus DecodeModImmTiedInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
+ const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned cmode = fieldFromInstruction(insn, 12, 4);
+ unsigned imm = fieldFromInstruction(insn, 16, 3) << 5;
+ imm |= fieldFromInstruction(insn, 5, 5);
+
+ // Tied operands added twice.
+ DecodeVectorRegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeVectorRegisterClass(Inst, Rd, Addr, Decoder);
+
+ Inst.addOperand(MCOperand::CreateImm(imm));
+ Inst.addOperand(MCOperand::CreateImm((cmode & 6) << 2));
+
+ return Success;
+}
+
+static DecodeStatus DecodeAdrInstruction(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr, const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ int64_t imm = fieldFromInstruction(insn, 5, 19) << 2;
+ imm |= fieldFromInstruction(insn, 29, 2);
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+
+ // Sign-extend the 21-bit immediate.
+ if (imm & (1 << (21 - 1)))
+ imm |= ~((1LL << 21) - 1);
+
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ if (!Dis->tryAddingSymbolicOperand(Inst, imm, Addr, Fail, 0, 4))
+ Inst.addOperand(MCOperand::CreateImm(imm));
+
+ return Success;
+}
+
+static DecodeStatus DecodeBaseAddSubImm(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr, const void *Decoder) {
+ unsigned Rd = fieldFromInstruction(insn, 0, 5);
+ unsigned Rn = fieldFromInstruction(insn, 5, 5);
+ unsigned Imm = fieldFromInstruction(insn, 10, 14);
+ unsigned S = fieldFromInstruction(insn, 29, 1);
+ unsigned Datasize = fieldFromInstruction(insn, 31, 1);
+
+ unsigned ShifterVal = (Imm >> 12) & 3;
+ unsigned ImmVal = Imm & 0xFFF;
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+
+ if (ShifterVal != 0 && ShifterVal != 1)
+ return Fail;
+
+ if (Datasize) {
+ if (Rd == 31 && !S)
+ DecodeGPR64spRegisterClass(Inst, Rd, Addr, Decoder);
+ else
+ DecodeGPR64RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
+ } else {
+ if (Rd == 31 && !S)
+ DecodeGPR32spRegisterClass(Inst, Rd, Addr, Decoder);
+ else
+ DecodeGPR32RegisterClass(Inst, Rd, Addr, Decoder);
+ DecodeGPR32spRegisterClass(Inst, Rn, Addr, Decoder);
+ }
+
+ if (!Dis->tryAddingSymbolicOperand(Inst, Imm, Addr, Fail, 0, 4))
+ Inst.addOperand(MCOperand::CreateImm(ImmVal));
+ Inst.addOperand(MCOperand::CreateImm(12 * ShifterVal));
+ return Success;
+}
+
+static DecodeStatus DecodeUnconditionalBranch(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr,
+ const void *Decoder) {
+ int64_t imm = fieldFromInstruction(insn, 0, 26);
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+
+ // Sign-extend the 26-bit immediate.
+ if (imm & (1 << (26 - 1)))
+ imm |= ~((1LL << 26) - 1);
+
+ if (!Dis->tryAddingSymbolicOperand(Inst, imm << 2, Addr, true, 0, 4))
+ Inst.addOperand(MCOperand::CreateImm(imm));
+
+ return Success;
+}
+
+static DecodeStatus DecodeSystemPStateInstruction(llvm::MCInst &Inst,
+ uint32_t insn, uint64_t Addr,
+ const void *Decoder) {
+ uint64_t op1 = fieldFromInstruction(insn, 16, 3);
+ uint64_t op2 = fieldFromInstruction(insn, 5, 3);
+ uint64_t crm = fieldFromInstruction(insn, 8, 4);
+
+ uint64_t pstate_field = (op1 << 3) | op2;
+
+ Inst.addOperand(MCOperand::CreateImm(pstate_field));
+ Inst.addOperand(MCOperand::CreateImm(crm));
+
+ bool ValidNamed;
+ (void)AArch64PState::PStateMapper().toString(pstate_field, ValidNamed);
+
+ return ValidNamed ? Success : Fail;
}
+static DecodeStatus DecodeTestAndBranch(llvm::MCInst &Inst, uint32_t insn,
+ uint64_t Addr, const void *Decoder) {
+ uint64_t Rt = fieldFromInstruction(insn, 0, 5);
+ uint64_t bit = fieldFromInstruction(insn, 31, 1) << 5;
+ bit |= fieldFromInstruction(insn, 19, 5);
+ int64_t dst = fieldFromInstruction(insn, 5, 14);
+ const AArch64Disassembler *Dis =
+ static_cast<const AArch64Disassembler *>(Decoder);
+
+ // Sign-extend 14-bit immediate.
+ if (dst & (1 << (14 - 1)))
+ dst |= ~((1LL << 14) - 1);
+
+ if (fieldFromInstruction(insn, 31, 1) == 0)
+ DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
+ else
+ DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
+ Inst.addOperand(MCOperand::CreateImm(bit));
+ if (!Dis->tryAddingSymbolicOperand(Inst, dst << 2, Addr, true, 0, 4))
+ Inst.addOperand(MCOperand::CreateImm(dst));
+
+ return Success;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
new file mode 100644
index 0000000..68d4867
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
@@ -0,0 +1,40 @@
+//===- AArch64Disassembler.h - Disassembler for AArch64 ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AArch64DISASSEMBLER_H
+#define AArch64DISASSEMBLER_H
+
+#include "llvm/MC/MCDisassembler.h"
+
+namespace llvm {
+
+class MCInst;
+class MemoryObject;
+class raw_ostream;
+
+class AArch64Disassembler : public MCDisassembler {
+public:
+ AArch64Disassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
+
+ ~AArch64Disassembler() {}
+
+ /// getInstruction - See MCDisassembler.
+ MCDisassembler::DecodeStatus
+ getInstruction(MCInst &instr, uint64_t &size, const MemoryObject &region,
+ uint64_t address, raw_ostream &vStream,
+ raw_ostream &cStream) const override;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
new file mode 100644
index 0000000..2057c51
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
@@ -0,0 +1,220 @@
+//===- AArch64ExternalSymbolizer.cpp - Symbolizer for AArch64 ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64ExternalSymbolizer.h"
+#include "AArch64Subtarget.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
+#include "Utils/AArch64BaseInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-disassembler"
+
+static MCSymbolRefExpr::VariantKind
+getVariant(uint64_t LLVMDisassembler_VariantKind) {
+ switch (LLVMDisassembler_VariantKind) {
+ case LLVMDisassembler_VariantKind_None:
+ return MCSymbolRefExpr::VK_None;
+ case LLVMDisassembler_VariantKind_ARM64_PAGE:
+ return MCSymbolRefExpr::VK_PAGE;
+ case LLVMDisassembler_VariantKind_ARM64_PAGEOFF:
+ return MCSymbolRefExpr::VK_PAGEOFF;
+ case LLVMDisassembler_VariantKind_ARM64_GOTPAGE:
+ return MCSymbolRefExpr::VK_GOTPAGE;
+ case LLVMDisassembler_VariantKind_ARM64_GOTPAGEOFF:
+ return MCSymbolRefExpr::VK_GOTPAGEOFF;
+ case LLVMDisassembler_VariantKind_ARM64_TLVP:
+ case LLVMDisassembler_VariantKind_ARM64_TLVOFF:
+ default:
+ llvm_unreachable("bad LLVMDisassembler_VariantKind");
+ }
+}
+
+/// tryAddingSymbolicOperand - tryAddingSymbolicOperand trys to add a symbolic
+/// operand in place of the immediate Value in the MCInst. The immediate
+/// Value has not had any PC adjustment made by the caller. If the instruction
+/// is a branch that adds the PC to the immediate Value then isBranch is
+/// Success, else Fail. If GetOpInfo is non-null, then it is called to get any
+/// symbolic information at the Address for this instrution. If that returns
+/// non-zero then the symbolic information it returns is used to create an
+/// MCExpr and that is added as an operand to the MCInst. If GetOpInfo()
+/// returns zero and isBranch is Success then a symbol look up for
+/// Address + Value is done and if a symbol is found an MCExpr is created with
+/// that, else an MCExpr with Address + Value is created. If GetOpInfo()
+/// returns zero and isBranch is Fail then the the Opcode of the MCInst is
+/// tested and for ADRP an other instructions that help to load of pointers
+/// a symbol look up is done to see it is returns a specific reference type
+/// to add to the comment stream. This function returns Success if it adds
+/// an operand to the MCInst and Fail otherwise.
+bool AArch64ExternalSymbolizer::tryAddingSymbolicOperand(
+ MCInst &MI, raw_ostream &CommentStream, int64_t Value, uint64_t Address,
+ bool IsBranch, uint64_t Offset, uint64_t InstSize) {
+ // FIXME: This method shares a lot of code with
+ // MCExternalSymbolizer::tryAddingSymbolicOperand. It may be possible
+ // refactor the MCExternalSymbolizer interface to allow more of this
+ // implementation to be shared.
+ //
+ struct LLVMOpInfo1 SymbolicOp;
+ memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
+ SymbolicOp.Value = Value;
+ uint64_t ReferenceType;
+ const char *ReferenceName;
+ if (!GetOpInfo ||
+ !GetOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp)) {
+ if (IsBranch) {
+ ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
+ const char *Name = SymbolLookUp(DisInfo, Address + Value, &ReferenceType,
+ Address, &ReferenceName);
+ if (Name) {
+ SymbolicOp.AddSymbol.Name = Name;
+ SymbolicOp.AddSymbol.Present = true;
+ SymbolicOp.Value = 0;
+ } else {
+ SymbolicOp.Value = Address + Value;
+ }
+ if (ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub)
+ CommentStream << "symbol stub for: " << ReferenceName;
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_Objc_Message)
+ CommentStream << "Objc message: " << ReferenceName;
+ } else if (MI.getOpcode() == AArch64::ADRP) {
+ ReferenceType = LLVMDisassembler_ReferenceType_In_ARM64_ADRP;
+ // otool expects the fully encoded ADRP instruction to be passed in as
+ // the value here, so reconstruct it:
+ const MCRegisterInfo &MCRI = *Ctx.getRegisterInfo();
+ uint32_t EncodedInst = 0x90000000;
+ EncodedInst |= (Value & 0x3) << 29; // immlo
+ EncodedInst |= ((Value >> 2) & 0x7FFFF) << 5; // immhi
+ EncodedInst |= MCRI.getEncodingValue(MI.getOperand(0).getReg()); // reg
+ SymbolLookUp(DisInfo, EncodedInst, &ReferenceType, Address,
+ &ReferenceName);
+ CommentStream << format("0x%llx",
+ 0xfffffffffffff000LL & (Address + Value));
+ } else if (MI.getOpcode() == AArch64::ADDXri ||
+ MI.getOpcode() == AArch64::LDRXui ||
+ MI.getOpcode() == AArch64::LDRXl ||
+ MI.getOpcode() == AArch64::ADR) {
+ if (MI.getOpcode() == AArch64::ADDXri)
+ ReferenceType = LLVMDisassembler_ReferenceType_In_ARM64_ADDXri;
+ else if (MI.getOpcode() == AArch64::LDRXui)
+ ReferenceType = LLVMDisassembler_ReferenceType_In_ARM64_LDRXui;
+ if (MI.getOpcode() == AArch64::LDRXl) {
+ ReferenceType = LLVMDisassembler_ReferenceType_In_ARM64_LDRXl;
+ SymbolLookUp(DisInfo, Address + Value, &ReferenceType, Address,
+ &ReferenceName);
+ } else if (MI.getOpcode() == AArch64::ADR) {
+ ReferenceType = LLVMDisassembler_ReferenceType_In_ARM64_ADR;
+ SymbolLookUp(DisInfo, Address + Value, &ReferenceType, Address,
+ &ReferenceName);
+ } else {
+ const MCRegisterInfo &MCRI = *Ctx.getRegisterInfo();
+ // otool expects the fully encoded ADD/LDR instruction to be passed in
+ // as the value here, so reconstruct it:
+ unsigned EncodedInst =
+ MI.getOpcode() == AArch64::ADDXri ? 0x91000000: 0xF9400000;
+ EncodedInst |= Value << 10; // imm12 [+ shift:2 for ADD]
+ EncodedInst |=
+ MCRI.getEncodingValue(MI.getOperand(1).getReg()) << 5; // Rn
+ EncodedInst |= MCRI.getEncodingValue(MI.getOperand(0).getReg()); // Rd
+
+ SymbolLookUp(DisInfo, EncodedInst, &ReferenceType, Address,
+ &ReferenceName);
+ }
+ if (ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr)
+ CommentStream << "literal pool symbol address: " << ReferenceName;
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr)
+ CommentStream << "literal pool for: \"" << ReferenceName << "\"";
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref)
+ CommentStream << "Objc cfstring ref: @\"" << ReferenceName << "\"";
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_Objc_Message)
+ CommentStream << "Objc message: " << ReferenceName;
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_Objc_Message_Ref)
+ CommentStream << "Objc message ref: " << ReferenceName;
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_Objc_Selector_Ref)
+ CommentStream << "Objc selector ref: " << ReferenceName;
+ else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_Objc_Class_Ref)
+ CommentStream << "Objc class ref: " << ReferenceName;
+ // For these instructions, the SymbolLookUp() above is just to get the
+ // ReferenceType and ReferenceName. We want to make sure not to
+ // fall through so we don't build an MCExpr to leave the disassembly
+ // of the immediate values of these instructions to the InstPrinter.
+ return false;
+ } else {
+ return false;
+ }
+ }
+
+ const MCExpr *Add = nullptr;
+ if (SymbolicOp.AddSymbol.Present) {
+ if (SymbolicOp.AddSymbol.Name) {
+ StringRef Name(SymbolicOp.AddSymbol.Name);
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name);
+ MCSymbolRefExpr::VariantKind Variant = getVariant(SymbolicOp.VariantKind);
+ if (Variant != MCSymbolRefExpr::VK_None)
+ Add = MCSymbolRefExpr::Create(Sym, Variant, Ctx);
+ else
+ Add = MCSymbolRefExpr::Create(Sym, Ctx);
+ } else {
+ Add = MCConstantExpr::Create(SymbolicOp.AddSymbol.Value, Ctx);
+ }
+ }
+
+ const MCExpr *Sub = nullptr;
+ if (SymbolicOp.SubtractSymbol.Present) {
+ if (SymbolicOp.SubtractSymbol.Name) {
+ StringRef Name(SymbolicOp.SubtractSymbol.Name);
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name);
+ Sub = MCSymbolRefExpr::Create(Sym, Ctx);
+ } else {
+ Sub = MCConstantExpr::Create(SymbolicOp.SubtractSymbol.Value, Ctx);
+ }
+ }
+
+ const MCExpr *Off = nullptr;
+ if (SymbolicOp.Value != 0)
+ Off = MCConstantExpr::Create(SymbolicOp.Value, Ctx);
+
+ const MCExpr *Expr;
+ if (Sub) {
+ const MCExpr *LHS;
+ if (Add)
+ LHS = MCBinaryExpr::CreateSub(Add, Sub, Ctx);
+ else
+ LHS = MCUnaryExpr::CreateMinus(Sub, Ctx);
+ if (Off)
+ Expr = MCBinaryExpr::CreateAdd(LHS, Off, Ctx);
+ else
+ Expr = LHS;
+ } else if (Add) {
+ if (Off)
+ Expr = MCBinaryExpr::CreateAdd(Add, Off, Ctx);
+ else
+ Expr = Add;
+ } else {
+ if (Off)
+ Expr = Off;
+ else
+ Expr = MCConstantExpr::Create(0, Ctx);
+ }
+
+ MI.addOperand(MCOperand::CreateExpr(Expr));
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h
new file mode 100644
index 0000000..171d31c
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h
@@ -0,0 +1,38 @@
+//===- AArch64ExternalSymbolizer.h - Symbolizer for AArch64 -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Symbolize AArch64 assembly code during disassembly using callbacks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AArch64EXTERNALSYMBOLIZER_H
+#define AArch64EXTERNALSYMBOLIZER_H
+
+#include "llvm/MC/MCExternalSymbolizer.h"
+
+namespace llvm {
+
+class AArch64ExternalSymbolizer : public MCExternalSymbolizer {
+public:
+ AArch64ExternalSymbolizer(MCContext &Ctx,
+ std::unique_ptr<MCRelocationInfo> RelInfo,
+ LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp,
+ void *DisInfo)
+ : MCExternalSymbolizer(Ctx, std::move(RelInfo), GetOpInfo, SymbolLookUp,
+ DisInfo) {}
+
+ bool tryAddingSymbolicOperand(MCInst &MI, raw_ostream &CommentStream,
+ int64_t Value, uint64_t Address, bool IsBranch,
+ uint64_t Offset, uint64_t InstSize) override;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
index 0438de3..8a21f06 100644
--- a/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
+++ b/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
@@ -11,529 +11,1306 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "AArch64InstPrinter.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#define GET_INSTRUCTION_NAME
#define PRINT_ALIAS_INSTR
#include "AArch64GenAsmWriter.inc"
-
-static int64_t unpackSignedImm(int BitWidth, uint64_t Value) {
- assert(!(Value & ~((1ULL << BitWidth)-1)) && "immediate not n-bit");
- if (Value & (1ULL << (BitWidth - 1)))
- return static_cast<int64_t>(Value) - (1LL << BitWidth);
- else
- return Value;
-}
+#define GET_INSTRUCTION_NAME
+#define PRINT_ALIAS_INSTR
+#include "AArch64GenAsmWriter1.inc"
AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI) :
- MCInstPrinter(MAI, MII, MRI) {
+ const MCSubtargetInfo &STI)
+ : MCInstPrinter(MAI, MII, MRI) {
// Initialize the set of available features.
setAvailableFeatures(STI.getFeatureBits());
}
+AArch64AppleInstPrinter::AArch64AppleInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI)
+ : AArch64InstPrinter(MAI, MII, MRI, STI) {}
+
void AArch64InstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
+ // This is for .cfi directives.
OS << getRegisterName(RegNo);
}
-void
-AArch64InstPrinter::printOffsetSImm9Operand(const MCInst *MI,
- unsigned OpNum, raw_ostream &O) {
- const MCOperand &MOImm = MI->getOperand(OpNum);
- int32_t Imm = unpackSignedImm(9, MOImm.getImm());
-
- O << '#' << Imm;
-}
-
-void
-AArch64InstPrinter::printAddrRegExtendOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O, unsigned MemSize,
- unsigned RmSize) {
- unsigned ExtImm = MI->getOperand(OpNum).getImm();
- unsigned OptionHi = ExtImm >> 1;
- unsigned S = ExtImm & 1;
- bool IsLSL = OptionHi == 1 && RmSize == 64;
-
- const char *Ext;
- switch (OptionHi) {
- case 1:
- Ext = (RmSize == 32) ? "uxtw" : "lsl";
- break;
- case 3:
- Ext = (RmSize == 32) ? "sxtw" : "sxtx";
- break;
- default:
- llvm_unreachable("Incorrect Option on load/store (reg offset)");
- }
- O << Ext;
+void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot) {
+ // Check for special encodings and print the canonical alias instead.
- if (S) {
- unsigned ShiftAmt = Log2_32(MemSize);
- O << " #" << ShiftAmt;
- } else if (IsLSL) {
- O << " #0";
- }
-}
+ unsigned Opcode = MI->getOpcode();
-void
-AArch64InstPrinter::printAddSubImmLSL0Operand(const MCInst *MI,
- unsigned OpNum, raw_ostream &O) {
- const MCOperand &Imm12Op = MI->getOperand(OpNum);
+ if (Opcode == AArch64::SYSxt)
+ if (printSysAlias(MI, O)) {
+ printAnnotation(O, Annot);
+ return;
+ }
- if (Imm12Op.isImm()) {
- int64_t Imm12 = Imm12Op.getImm();
- assert(Imm12 >= 0 && "Invalid immediate for add/sub imm");
- O << "#" << Imm12;
- } else {
- assert(Imm12Op.isExpr() && "Unexpected shift operand type");
- O << "#" << *Imm12Op.getExpr();
- }
-}
+ // SBFM/UBFM should print to a nicer aliased form if possible.
+ if (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri ||
+ Opcode == AArch64::UBFMXri || Opcode == AArch64::UBFMWri) {
+ const MCOperand &Op0 = MI->getOperand(0);
+ const MCOperand &Op1 = MI->getOperand(1);
+ const MCOperand &Op2 = MI->getOperand(2);
+ const MCOperand &Op3 = MI->getOperand(3);
+
+ bool IsSigned = (Opcode == AArch64::SBFMXri || Opcode == AArch64::SBFMWri);
+ bool Is64Bit = (Opcode == AArch64::SBFMXri || Opcode == AArch64::UBFMXri);
+ if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
+ const char *AsmMnemonic = nullptr;
+
+ switch (Op3.getImm()) {
+ default:
+ break;
+ case 7:
+ if (IsSigned)
+ AsmMnemonic = "sxtb";
+ else if (!Is64Bit)
+ AsmMnemonic = "uxtb";
+ break;
+ case 15:
+ if (IsSigned)
+ AsmMnemonic = "sxth";
+ else if (!Is64Bit)
+ AsmMnemonic = "uxth";
+ break;
+ case 31:
+ // *xtw is only valid for signed 64-bit operations.
+ if (Is64Bit && IsSigned)
+ AsmMnemonic = "sxtw";
+ break;
+ }
+
+ if (AsmMnemonic) {
+ O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
+ << ", " << getRegisterName(getWRegFromXReg(Op1.getReg()));
+ printAnnotation(O, Annot);
+ return;
+ }
+ }
-void
-AArch64InstPrinter::printAddSubImmLSL12Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
+ // All immediate shifts are aliases, implemented using the Bitfield
+ // instruction. In all cases the immediate shift amount shift must be in
+ // the range 0 to (reg.size -1).
+ if (Op2.isImm() && Op3.isImm()) {
+ const char *AsmMnemonic = nullptr;
+ int shift = 0;
+ int64_t immr = Op2.getImm();
+ int64_t imms = Op3.getImm();
+ if (Opcode == AArch64::UBFMWri && imms != 0x1F && ((imms + 1) == immr)) {
+ AsmMnemonic = "lsl";
+ shift = 31 - imms;
+ } else if (Opcode == AArch64::UBFMXri && imms != 0x3f &&
+ ((imms + 1 == immr))) {
+ AsmMnemonic = "lsl";
+ shift = 63 - imms;
+ } else if (Opcode == AArch64::UBFMWri && imms == 0x1f) {
+ AsmMnemonic = "lsr";
+ shift = immr;
+ } else if (Opcode == AArch64::UBFMXri && imms == 0x3f) {
+ AsmMnemonic = "lsr";
+ shift = immr;
+ } else if (Opcode == AArch64::SBFMWri && imms == 0x1f) {
+ AsmMnemonic = "asr";
+ shift = immr;
+ } else if (Opcode == AArch64::SBFMXri && imms == 0x3f) {
+ AsmMnemonic = "asr";
+ shift = immr;
+ }
+ if (AsmMnemonic) {
+ O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
+ << ", " << getRegisterName(Op1.getReg()) << ", #" << shift;
+ printAnnotation(O, Annot);
+ return;
+ }
+ }
- printAddSubImmLSL0Operand(MI, OpNum, O);
+ // SBFIZ/UBFIZ aliases
+ if (Op2.getImm() > Op3.getImm()) {
+ O << '\t' << (IsSigned ? "sbfiz" : "ubfiz") << '\t'
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
+ << ", #" << (Is64Bit ? 64 : 32) - Op2.getImm() << ", #" << Op3.getImm() + 1;
+ printAnnotation(O, Annot);
+ return;
+ }
- O << ", lsl #12";
-}
+ // Otherwise SBFX/UBFX is the preferred form
+ O << '\t' << (IsSigned ? "sbfx" : "ubfx") << '\t'
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op1.getReg())
+ << ", #" << Op2.getImm() << ", #" << Op3.getImm() - Op2.getImm() + 1;
+ printAnnotation(O, Annot);
+ return;
+ }
-void
-AArch64InstPrinter::printBareImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(OpNum);
- O << MO.getImm();
-}
+ if (Opcode == AArch64::BFMXri || Opcode == AArch64::BFMWri) {
+ const MCOperand &Op0 = MI->getOperand(0); // Op1 == Op0
+ const MCOperand &Op2 = MI->getOperand(2);
+ int ImmR = MI->getOperand(3).getImm();
+ int ImmS = MI->getOperand(4).getImm();
+
+ // BFI alias
+ if (ImmS < ImmR) {
+ int BitWidth = Opcode == AArch64::BFMXri ? 64 : 32;
+ int LSB = (BitWidth - ImmR) % BitWidth;
+ int Width = ImmS + 1;
+ O << "\tbfi\t" << getRegisterName(Op0.getReg()) << ", "
+ << getRegisterName(Op2.getReg()) << ", #" << LSB << ", #" << Width;
+ printAnnotation(O, Annot);
+ return;
+ }
-template<unsigned RegWidth> void
-AArch64InstPrinter::printBFILSBOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &ImmROp = MI->getOperand(OpNum);
- unsigned LSB = ImmROp.getImm() == 0 ? 0 : RegWidth - ImmROp.getImm();
+ int LSB = ImmR;
+ int Width = ImmS - ImmR + 1;
+ // Otherwise BFXIL the preferred form
+ O << "\tbfxil\t"
+ << getRegisterName(Op0.getReg()) << ", " << getRegisterName(Op2.getReg())
+ << ", #" << LSB << ", #" << Width;
+ printAnnotation(O, Annot);
+ return;
+ }
- O << '#' << LSB;
-}
+ // Symbolic operands for MOVZ, MOVN and MOVK already imply a shift
+ // (e.g. :gottprel_g1: is always going to be "lsl #16") so it should not be
+ // printed.
+ if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi ||
+ Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
+ MI->getOperand(1).isExpr()) {
+ if (Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi)
+ O << "\tmovz\t";
+ else
+ O << "\tmovn\t";
-void AArch64InstPrinter::printBFIWidthOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &ImmSOp = MI->getOperand(OpNum);
- unsigned Width = ImmSOp.getImm() + 1;
+ O << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << *MI->getOperand(1).getExpr();
+ return;
+ }
- O << '#' << Width;
-}
+ if ((Opcode == AArch64::MOVKXi || Opcode == AArch64::MOVKWi) &&
+ MI->getOperand(2).isExpr()) {
+ O << "\tmovk\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << *MI->getOperand(2).getExpr();
+ return;
+ }
-void
-AArch64InstPrinter::printBFXWidthOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &ImmSOp = MI->getOperand(OpNum);
- const MCOperand &ImmROp = MI->getOperand(OpNum - 1);
+ if (!printAliasInstr(MI, O))
+ printInstruction(MI, O);
- unsigned ImmR = ImmROp.getImm();
- unsigned ImmS = ImmSOp.getImm();
+ printAnnotation(O, Annot);
+}
- assert(ImmS >= ImmR && "Invalid ImmR, ImmS combination for bitfield extract");
+static bool isTblTbxInstruction(unsigned Opcode, StringRef &Layout,
+ bool &IsTbx) {
+ switch (Opcode) {
+ case AArch64::TBXv8i8One:
+ case AArch64::TBXv8i8Two:
+ case AArch64::TBXv8i8Three:
+ case AArch64::TBXv8i8Four:
+ IsTbx = true;
+ Layout = ".8b";
+ return true;
+ case AArch64::TBLv8i8One:
+ case AArch64::TBLv8i8Two:
+ case AArch64::TBLv8i8Three:
+ case AArch64::TBLv8i8Four:
+ IsTbx = false;
+ Layout = ".8b";
+ return true;
+ case AArch64::TBXv16i8One:
+ case AArch64::TBXv16i8Two:
+ case AArch64::TBXv16i8Three:
+ case AArch64::TBXv16i8Four:
+ IsTbx = true;
+ Layout = ".16b";
+ return true;
+ case AArch64::TBLv16i8One:
+ case AArch64::TBLv16i8Two:
+ case AArch64::TBLv16i8Three:
+ case AArch64::TBLv16i8Four:
+ IsTbx = false;
+ Layout = ".16b";
+ return true;
+ default:
+ return false;
+ }
+}
- O << '#' << (ImmS - ImmR + 1);
+struct LdStNInstrDesc {
+ unsigned Opcode;
+ const char *Mnemonic;
+ const char *Layout;
+ int ListOperand;
+ bool HasLane;
+ int NaturalOffset;
+};
+
+static LdStNInstrDesc LdStNInstInfo[] = {
+ { AArch64::LD1i8, "ld1", ".b", 1, true, 0 },
+ { AArch64::LD1i16, "ld1", ".h", 1, true, 0 },
+ { AArch64::LD1i32, "ld1", ".s", 1, true, 0 },
+ { AArch64::LD1i64, "ld1", ".d", 1, true, 0 },
+ { AArch64::LD1i8_POST, "ld1", ".b", 2, true, 1 },
+ { AArch64::LD1i16_POST, "ld1", ".h", 2, true, 2 },
+ { AArch64::LD1i32_POST, "ld1", ".s", 2, true, 4 },
+ { AArch64::LD1i64_POST, "ld1", ".d", 2, true, 8 },
+ { AArch64::LD1Rv16b, "ld1r", ".16b", 0, false, 0 },
+ { AArch64::LD1Rv8h, "ld1r", ".8h", 0, false, 0 },
+ { AArch64::LD1Rv4s, "ld1r", ".4s", 0, false, 0 },
+ { AArch64::LD1Rv2d, "ld1r", ".2d", 0, false, 0 },
+ { AArch64::LD1Rv8b, "ld1r", ".8b", 0, false, 0 },
+ { AArch64::LD1Rv4h, "ld1r", ".4h", 0, false, 0 },
+ { AArch64::LD1Rv2s, "ld1r", ".2s", 0, false, 0 },
+ { AArch64::LD1Rv1d, "ld1r", ".1d", 0, false, 0 },
+ { AArch64::LD1Rv16b_POST, "ld1r", ".16b", 1, false, 1 },
+ { AArch64::LD1Rv8h_POST, "ld1r", ".8h", 1, false, 2 },
+ { AArch64::LD1Rv4s_POST, "ld1r", ".4s", 1, false, 4 },
+ { AArch64::LD1Rv2d_POST, "ld1r", ".2d", 1, false, 8 },
+ { AArch64::LD1Rv8b_POST, "ld1r", ".8b", 1, false, 1 },
+ { AArch64::LD1Rv4h_POST, "ld1r", ".4h", 1, false, 2 },
+ { AArch64::LD1Rv2s_POST, "ld1r", ".2s", 1, false, 4 },
+ { AArch64::LD1Rv1d_POST, "ld1r", ".1d", 1, false, 8 },
+ { AArch64::LD1Onev16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Onev8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Onev4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Onev2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Onev8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Onev4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Onev2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Onev1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Onev16b_POST, "ld1", ".16b", 1, false, 16 },
+ { AArch64::LD1Onev8h_POST, "ld1", ".8h", 1, false, 16 },
+ { AArch64::LD1Onev4s_POST, "ld1", ".4s", 1, false, 16 },
+ { AArch64::LD1Onev2d_POST, "ld1", ".2d", 1, false, 16 },
+ { AArch64::LD1Onev8b_POST, "ld1", ".8b", 1, false, 8 },
+ { AArch64::LD1Onev4h_POST, "ld1", ".4h", 1, false, 8 },
+ { AArch64::LD1Onev2s_POST, "ld1", ".2s", 1, false, 8 },
+ { AArch64::LD1Onev1d_POST, "ld1", ".1d", 1, false, 8 },
+ { AArch64::LD1Twov16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Twov8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Twov4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Twov2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Twov8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Twov4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Twov2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Twov1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Twov16b_POST, "ld1", ".16b", 1, false, 32 },
+ { AArch64::LD1Twov8h_POST, "ld1", ".8h", 1, false, 32 },
+ { AArch64::LD1Twov4s_POST, "ld1", ".4s", 1, false, 32 },
+ { AArch64::LD1Twov2d_POST, "ld1", ".2d", 1, false, 32 },
+ { AArch64::LD1Twov8b_POST, "ld1", ".8b", 1, false, 16 },
+ { AArch64::LD1Twov4h_POST, "ld1", ".4h", 1, false, 16 },
+ { AArch64::LD1Twov2s_POST, "ld1", ".2s", 1, false, 16 },
+ { AArch64::LD1Twov1d_POST, "ld1", ".1d", 1, false, 16 },
+ { AArch64::LD1Threev16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Threev8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Threev4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Threev2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Threev8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Threev4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Threev2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Threev1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Threev16b_POST, "ld1", ".16b", 1, false, 48 },
+ { AArch64::LD1Threev8h_POST, "ld1", ".8h", 1, false, 48 },
+ { AArch64::LD1Threev4s_POST, "ld1", ".4s", 1, false, 48 },
+ { AArch64::LD1Threev2d_POST, "ld1", ".2d", 1, false, 48 },
+ { AArch64::LD1Threev8b_POST, "ld1", ".8b", 1, false, 24 },
+ { AArch64::LD1Threev4h_POST, "ld1", ".4h", 1, false, 24 },
+ { AArch64::LD1Threev2s_POST, "ld1", ".2s", 1, false, 24 },
+ { AArch64::LD1Threev1d_POST, "ld1", ".1d", 1, false, 24 },
+ { AArch64::LD1Fourv16b, "ld1", ".16b", 0, false, 0 },
+ { AArch64::LD1Fourv8h, "ld1", ".8h", 0, false, 0 },
+ { AArch64::LD1Fourv4s, "ld1", ".4s", 0, false, 0 },
+ { AArch64::LD1Fourv2d, "ld1", ".2d", 0, false, 0 },
+ { AArch64::LD1Fourv8b, "ld1", ".8b", 0, false, 0 },
+ { AArch64::LD1Fourv4h, "ld1", ".4h", 0, false, 0 },
+ { AArch64::LD1Fourv2s, "ld1", ".2s", 0, false, 0 },
+ { AArch64::LD1Fourv1d, "ld1", ".1d", 0, false, 0 },
+ { AArch64::LD1Fourv16b_POST, "ld1", ".16b", 1, false, 64 },
+ { AArch64::LD1Fourv8h_POST, "ld1", ".8h", 1, false, 64 },
+ { AArch64::LD1Fourv4s_POST, "ld1", ".4s", 1, false, 64 },
+ { AArch64::LD1Fourv2d_POST, "ld1", ".2d", 1, false, 64 },
+ { AArch64::LD1Fourv8b_POST, "ld1", ".8b", 1, false, 32 },
+ { AArch64::LD1Fourv4h_POST, "ld1", ".4h", 1, false, 32 },
+ { AArch64::LD1Fourv2s_POST, "ld1", ".2s", 1, false, 32 },
+ { AArch64::LD1Fourv1d_POST, "ld1", ".1d", 1, false, 32 },
+ { AArch64::LD2i8, "ld2", ".b", 1, true, 0 },
+ { AArch64::LD2i16, "ld2", ".h", 1, true, 0 },
+ { AArch64::LD2i32, "ld2", ".s", 1, true, 0 },
+ { AArch64::LD2i64, "ld2", ".d", 1, true, 0 },
+ { AArch64::LD2i8_POST, "ld2", ".b", 2, true, 2 },
+ { AArch64::LD2i16_POST, "ld2", ".h", 2, true, 4 },
+ { AArch64::LD2i32_POST, "ld2", ".s", 2, true, 8 },
+ { AArch64::LD2i64_POST, "ld2", ".d", 2, true, 16 },
+ { AArch64::LD2Rv16b, "ld2r", ".16b", 0, false, 0 },
+ { AArch64::LD2Rv8h, "ld2r", ".8h", 0, false, 0 },
+ { AArch64::LD2Rv4s, "ld2r", ".4s", 0, false, 0 },
+ { AArch64::LD2Rv2d, "ld2r", ".2d", 0, false, 0 },
+ { AArch64::LD2Rv8b, "ld2r", ".8b", 0, false, 0 },
+ { AArch64::LD2Rv4h, "ld2r", ".4h", 0, false, 0 },
+ { AArch64::LD2Rv2s, "ld2r", ".2s", 0, false, 0 },
+ { AArch64::LD2Rv1d, "ld2r", ".1d", 0, false, 0 },
+ { AArch64::LD2Rv16b_POST, "ld2r", ".16b", 1, false, 2 },
+ { AArch64::LD2Rv8h_POST, "ld2r", ".8h", 1, false, 4 },
+ { AArch64::LD2Rv4s_POST, "ld2r", ".4s", 1, false, 8 },
+ { AArch64::LD2Rv2d_POST, "ld2r", ".2d", 1, false, 16 },
+ { AArch64::LD2Rv8b_POST, "ld2r", ".8b", 1, false, 2 },
+ { AArch64::LD2Rv4h_POST, "ld2r", ".4h", 1, false, 4 },
+ { AArch64::LD2Rv2s_POST, "ld2r", ".2s", 1, false, 8 },
+ { AArch64::LD2Rv1d_POST, "ld2r", ".1d", 1, false, 16 },
+ { AArch64::LD2Twov16b, "ld2", ".16b", 0, false, 0 },
+ { AArch64::LD2Twov8h, "ld2", ".8h", 0, false, 0 },
+ { AArch64::LD2Twov4s, "ld2", ".4s", 0, false, 0 },
+ { AArch64::LD2Twov2d, "ld2", ".2d", 0, false, 0 },
+ { AArch64::LD2Twov8b, "ld2", ".8b", 0, false, 0 },
+ { AArch64::LD2Twov4h, "ld2", ".4h", 0, false, 0 },
+ { AArch64::LD2Twov2s, "ld2", ".2s", 0, false, 0 },
+ { AArch64::LD2Twov16b_POST, "ld2", ".16b", 1, false, 32 },
+ { AArch64::LD2Twov8h_POST, "ld2", ".8h", 1, false, 32 },
+ { AArch64::LD2Twov4s_POST, "ld2", ".4s", 1, false, 32 },
+ { AArch64::LD2Twov2d_POST, "ld2", ".2d", 1, false, 32 },
+ { AArch64::LD2Twov8b_POST, "ld2", ".8b", 1, false, 16 },
+ { AArch64::LD2Twov4h_POST, "ld2", ".4h", 1, false, 16 },
+ { AArch64::LD2Twov2s_POST, "ld2", ".2s", 1, false, 16 },
+ { AArch64::LD3i8, "ld3", ".b", 1, true, 0 },
+ { AArch64::LD3i16, "ld3", ".h", 1, true, 0 },
+ { AArch64::LD3i32, "ld3", ".s", 1, true, 0 },
+ { AArch64::LD3i64, "ld3", ".d", 1, true, 0 },
+ { AArch64::LD3i8_POST, "ld3", ".b", 2, true, 3 },
+ { AArch64::LD3i16_POST, "ld3", ".h", 2, true, 6 },
+ { AArch64::LD3i32_POST, "ld3", ".s", 2, true, 12 },
+ { AArch64::LD3i64_POST, "ld3", ".d", 2, true, 24 },
+ { AArch64::LD3Rv16b, "ld3r", ".16b", 0, false, 0 },
+ { AArch64::LD3Rv8h, "ld3r", ".8h", 0, false, 0 },
+ { AArch64::LD3Rv4s, "ld3r", ".4s", 0, false, 0 },
+ { AArch64::LD3Rv2d, "ld3r", ".2d", 0, false, 0 },
+ { AArch64::LD3Rv8b, "ld3r", ".8b", 0, false, 0 },
+ { AArch64::LD3Rv4h, "ld3r", ".4h", 0, false, 0 },
+ { AArch64::LD3Rv2s, "ld3r", ".2s", 0, false, 0 },
+ { AArch64::LD3Rv1d, "ld3r", ".1d", 0, false, 0 },
+ { AArch64::LD3Rv16b_POST, "ld3r", ".16b", 1, false, 3 },
+ { AArch64::LD3Rv8h_POST, "ld3r", ".8h", 1, false, 6 },
+ { AArch64::LD3Rv4s_POST, "ld3r", ".4s", 1, false, 12 },
+ { AArch64::LD3Rv2d_POST, "ld3r", ".2d", 1, false, 24 },
+ { AArch64::LD3Rv8b_POST, "ld3r", ".8b", 1, false, 3 },
+ { AArch64::LD3Rv4h_POST, "ld3r", ".4h", 1, false, 6 },
+ { AArch64::LD3Rv2s_POST, "ld3r", ".2s", 1, false, 12 },
+ { AArch64::LD3Rv1d_POST, "ld3r", ".1d", 1, false, 24 },
+ { AArch64::LD3Threev16b, "ld3", ".16b", 0, false, 0 },
+ { AArch64::LD3Threev8h, "ld3", ".8h", 0, false, 0 },
+ { AArch64::LD3Threev4s, "ld3", ".4s", 0, false, 0 },
+ { AArch64::LD3Threev2d, "ld3", ".2d", 0, false, 0 },
+ { AArch64::LD3Threev8b, "ld3", ".8b", 0, false, 0 },
+ { AArch64::LD3Threev4h, "ld3", ".4h", 0, false, 0 },
+ { AArch64::LD3Threev2s, "ld3", ".2s", 0, false, 0 },
+ { AArch64::LD3Threev16b_POST, "ld3", ".16b", 1, false, 48 },
+ { AArch64::LD3Threev8h_POST, "ld3", ".8h", 1, false, 48 },
+ { AArch64::LD3Threev4s_POST, "ld3", ".4s", 1, false, 48 },
+ { AArch64::LD3Threev2d_POST, "ld3", ".2d", 1, false, 48 },
+ { AArch64::LD3Threev8b_POST, "ld3", ".8b", 1, false, 24 },
+ { AArch64::LD3Threev4h_POST, "ld3", ".4h", 1, false, 24 },
+ { AArch64::LD3Threev2s_POST, "ld3", ".2s", 1, false, 24 },
+ { AArch64::LD4i8, "ld4", ".b", 1, true, 0 },
+ { AArch64::LD4i16, "ld4", ".h", 1, true, 0 },
+ { AArch64::LD4i32, "ld4", ".s", 1, true, 0 },
+ { AArch64::LD4i64, "ld4", ".d", 1, true, 0 },
+ { AArch64::LD4i8_POST, "ld4", ".b", 2, true, 4 },
+ { AArch64::LD4i16_POST, "ld4", ".h", 2, true, 8 },
+ { AArch64::LD4i32_POST, "ld4", ".s", 2, true, 16 },
+ { AArch64::LD4i64_POST, "ld4", ".d", 2, true, 32 },
+ { AArch64::LD4Rv16b, "ld4r", ".16b", 0, false, 0 },
+ { AArch64::LD4Rv8h, "ld4r", ".8h", 0, false, 0 },
+ { AArch64::LD4Rv4s, "ld4r", ".4s", 0, false, 0 },
+ { AArch64::LD4Rv2d, "ld4r", ".2d", 0, false, 0 },
+ { AArch64::LD4Rv8b, "ld4r", ".8b", 0, false, 0 },
+ { AArch64::LD4Rv4h, "ld4r", ".4h", 0, false, 0 },
+ { AArch64::LD4Rv2s, "ld4r", ".2s", 0, false, 0 },
+ { AArch64::LD4Rv1d, "ld4r", ".1d", 0, false, 0 },
+ { AArch64::LD4Rv16b_POST, "ld4r", ".16b", 1, false, 4 },
+ { AArch64::LD4Rv8h_POST, "ld4r", ".8h", 1, false, 8 },
+ { AArch64::LD4Rv4s_POST, "ld4r", ".4s", 1, false, 16 },
+ { AArch64::LD4Rv2d_POST, "ld4r", ".2d", 1, false, 32 },
+ { AArch64::LD4Rv8b_POST, "ld4r", ".8b", 1, false, 4 },
+ { AArch64::LD4Rv4h_POST, "ld4r", ".4h", 1, false, 8 },
+ { AArch64::LD4Rv2s_POST, "ld4r", ".2s", 1, false, 16 },
+ { AArch64::LD4Rv1d_POST, "ld4r", ".1d", 1, false, 32 },
+ { AArch64::LD4Fourv16b, "ld4", ".16b", 0, false, 0 },
+ { AArch64::LD4Fourv8h, "ld4", ".8h", 0, false, 0 },
+ { AArch64::LD4Fourv4s, "ld4", ".4s", 0, false, 0 },
+ { AArch64::LD4Fourv2d, "ld4", ".2d", 0, false, 0 },
+ { AArch64::LD4Fourv8b, "ld4", ".8b", 0, false, 0 },
+ { AArch64::LD4Fourv4h, "ld4", ".4h", 0, false, 0 },
+ { AArch64::LD4Fourv2s, "ld4", ".2s", 0, false, 0 },
+ { AArch64::LD4Fourv16b_POST, "ld4", ".16b", 1, false, 64 },
+ { AArch64::LD4Fourv8h_POST, "ld4", ".8h", 1, false, 64 },
+ { AArch64::LD4Fourv4s_POST, "ld4", ".4s", 1, false, 64 },
+ { AArch64::LD4Fourv2d_POST, "ld4", ".2d", 1, false, 64 },
+ { AArch64::LD4Fourv8b_POST, "ld4", ".8b", 1, false, 32 },
+ { AArch64::LD4Fourv4h_POST, "ld4", ".4h", 1, false, 32 },
+ { AArch64::LD4Fourv2s_POST, "ld4", ".2s", 1, false, 32 },
+ { AArch64::ST1i8, "st1", ".b", 0, true, 0 },
+ { AArch64::ST1i16, "st1", ".h", 0, true, 0 },
+ { AArch64::ST1i32, "st1", ".s", 0, true, 0 },
+ { AArch64::ST1i64, "st1", ".d", 0, true, 0 },
+ { AArch64::ST1i8_POST, "st1", ".b", 1, true, 1 },
+ { AArch64::ST1i16_POST, "st1", ".h", 1, true, 2 },
+ { AArch64::ST1i32_POST, "st1", ".s", 1, true, 4 },
+ { AArch64::ST1i64_POST, "st1", ".d", 1, true, 8 },
+ { AArch64::ST1Onev16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Onev8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Onev4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Onev2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Onev8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Onev4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Onev2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Onev1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Onev16b_POST, "st1", ".16b", 1, false, 16 },
+ { AArch64::ST1Onev8h_POST, "st1", ".8h", 1, false, 16 },
+ { AArch64::ST1Onev4s_POST, "st1", ".4s", 1, false, 16 },
+ { AArch64::ST1Onev2d_POST, "st1", ".2d", 1, false, 16 },
+ { AArch64::ST1Onev8b_POST, "st1", ".8b", 1, false, 8 },
+ { AArch64::ST1Onev4h_POST, "st1", ".4h", 1, false, 8 },
+ { AArch64::ST1Onev2s_POST, "st1", ".2s", 1, false, 8 },
+ { AArch64::ST1Onev1d_POST, "st1", ".1d", 1, false, 8 },
+ { AArch64::ST1Twov16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Twov8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Twov4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Twov2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Twov8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Twov4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Twov2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Twov1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Twov16b_POST, "st1", ".16b", 1, false, 32 },
+ { AArch64::ST1Twov8h_POST, "st1", ".8h", 1, false, 32 },
+ { AArch64::ST1Twov4s_POST, "st1", ".4s", 1, false, 32 },
+ { AArch64::ST1Twov2d_POST, "st1", ".2d", 1, false, 32 },
+ { AArch64::ST1Twov8b_POST, "st1", ".8b", 1, false, 16 },
+ { AArch64::ST1Twov4h_POST, "st1", ".4h", 1, false, 16 },
+ { AArch64::ST1Twov2s_POST, "st1", ".2s", 1, false, 16 },
+ { AArch64::ST1Twov1d_POST, "st1", ".1d", 1, false, 16 },
+ { AArch64::ST1Threev16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Threev8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Threev4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Threev2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Threev8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Threev4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Threev2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Threev1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Threev16b_POST, "st1", ".16b", 1, false, 48 },
+ { AArch64::ST1Threev8h_POST, "st1", ".8h", 1, false, 48 },
+ { AArch64::ST1Threev4s_POST, "st1", ".4s", 1, false, 48 },
+ { AArch64::ST1Threev2d_POST, "st1", ".2d", 1, false, 48 },
+ { AArch64::ST1Threev8b_POST, "st1", ".8b", 1, false, 24 },
+ { AArch64::ST1Threev4h_POST, "st1", ".4h", 1, false, 24 },
+ { AArch64::ST1Threev2s_POST, "st1", ".2s", 1, false, 24 },
+ { AArch64::ST1Threev1d_POST, "st1", ".1d", 1, false, 24 },
+ { AArch64::ST1Fourv16b, "st1", ".16b", 0, false, 0 },
+ { AArch64::ST1Fourv8h, "st1", ".8h", 0, false, 0 },
+ { AArch64::ST1Fourv4s, "st1", ".4s", 0, false, 0 },
+ { AArch64::ST1Fourv2d, "st1", ".2d", 0, false, 0 },
+ { AArch64::ST1Fourv8b, "st1", ".8b", 0, false, 0 },
+ { AArch64::ST1Fourv4h, "st1", ".4h", 0, false, 0 },
+ { AArch64::ST1Fourv2s, "st1", ".2s", 0, false, 0 },
+ { AArch64::ST1Fourv1d, "st1", ".1d", 0, false, 0 },
+ { AArch64::ST1Fourv16b_POST, "st1", ".16b", 1, false, 64 },
+ { AArch64::ST1Fourv8h_POST, "st1", ".8h", 1, false, 64 },
+ { AArch64::ST1Fourv4s_POST, "st1", ".4s", 1, false, 64 },
+ { AArch64::ST1Fourv2d_POST, "st1", ".2d", 1, false, 64 },
+ { AArch64::ST1Fourv8b_POST, "st1", ".8b", 1, false, 32 },
+ { AArch64::ST1Fourv4h_POST, "st1", ".4h", 1, false, 32 },
+ { AArch64::ST1Fourv2s_POST, "st1", ".2s", 1, false, 32 },
+ { AArch64::ST1Fourv1d_POST, "st1", ".1d", 1, false, 32 },
+ { AArch64::ST2i8, "st2", ".b", 0, true, 0 },
+ { AArch64::ST2i16, "st2", ".h", 0, true, 0 },
+ { AArch64::ST2i32, "st2", ".s", 0, true, 0 },
+ { AArch64::ST2i64, "st2", ".d", 0, true, 0 },
+ { AArch64::ST2i8_POST, "st2", ".b", 1, true, 2 },
+ { AArch64::ST2i16_POST, "st2", ".h", 1, true, 4 },
+ { AArch64::ST2i32_POST, "st2", ".s", 1, true, 8 },
+ { AArch64::ST2i64_POST, "st2", ".d", 1, true, 16 },
+ { AArch64::ST2Twov16b, "st2", ".16b", 0, false, 0 },
+ { AArch64::ST2Twov8h, "st2", ".8h", 0, false, 0 },
+ { AArch64::ST2Twov4s, "st2", ".4s", 0, false, 0 },
+ { AArch64::ST2Twov2d, "st2", ".2d", 0, false, 0 },
+ { AArch64::ST2Twov8b, "st2", ".8b", 0, false, 0 },
+ { AArch64::ST2Twov4h, "st2", ".4h", 0, false, 0 },
+ { AArch64::ST2Twov2s, "st2", ".2s", 0, false, 0 },
+ { AArch64::ST2Twov16b_POST, "st2", ".16b", 1, false, 32 },
+ { AArch64::ST2Twov8h_POST, "st2", ".8h", 1, false, 32 },
+ { AArch64::ST2Twov4s_POST, "st2", ".4s", 1, false, 32 },
+ { AArch64::ST2Twov2d_POST, "st2", ".2d", 1, false, 32 },
+ { AArch64::ST2Twov8b_POST, "st2", ".8b", 1, false, 16 },
+ { AArch64::ST2Twov4h_POST, "st2", ".4h", 1, false, 16 },
+ { AArch64::ST2Twov2s_POST, "st2", ".2s", 1, false, 16 },
+ { AArch64::ST3i8, "st3", ".b", 0, true, 0 },
+ { AArch64::ST3i16, "st3", ".h", 0, true, 0 },
+ { AArch64::ST3i32, "st3", ".s", 0, true, 0 },
+ { AArch64::ST3i64, "st3", ".d", 0, true, 0 },
+ { AArch64::ST3i8_POST, "st3", ".b", 1, true, 3 },
+ { AArch64::ST3i16_POST, "st3", ".h", 1, true, 6 },
+ { AArch64::ST3i32_POST, "st3", ".s", 1, true, 12 },
+ { AArch64::ST3i64_POST, "st3", ".d", 1, true, 24 },
+ { AArch64::ST3Threev16b, "st3", ".16b", 0, false, 0 },
+ { AArch64::ST3Threev8h, "st3", ".8h", 0, false, 0 },
+ { AArch64::ST3Threev4s, "st3", ".4s", 0, false, 0 },
+ { AArch64::ST3Threev2d, "st3", ".2d", 0, false, 0 },
+ { AArch64::ST3Threev8b, "st3", ".8b", 0, false, 0 },
+ { AArch64::ST3Threev4h, "st3", ".4h", 0, false, 0 },
+ { AArch64::ST3Threev2s, "st3", ".2s", 0, false, 0 },
+ { AArch64::ST3Threev16b_POST, "st3", ".16b", 1, false, 48 },
+ { AArch64::ST3Threev8h_POST, "st3", ".8h", 1, false, 48 },
+ { AArch64::ST3Threev4s_POST, "st3", ".4s", 1, false, 48 },
+ { AArch64::ST3Threev2d_POST, "st3", ".2d", 1, false, 48 },
+ { AArch64::ST3Threev8b_POST, "st3", ".8b", 1, false, 24 },
+ { AArch64::ST3Threev4h_POST, "st3", ".4h", 1, false, 24 },
+ { AArch64::ST3Threev2s_POST, "st3", ".2s", 1, false, 24 },
+ { AArch64::ST4i8, "st4", ".b", 0, true, 0 },
+ { AArch64::ST4i16, "st4", ".h", 0, true, 0 },
+ { AArch64::ST4i32, "st4", ".s", 0, true, 0 },
+ { AArch64::ST4i64, "st4", ".d", 0, true, 0 },
+ { AArch64::ST4i8_POST, "st4", ".b", 1, true, 4 },
+ { AArch64::ST4i16_POST, "st4", ".h", 1, true, 8 },
+ { AArch64::ST4i32_POST, "st4", ".s", 1, true, 16 },
+ { AArch64::ST4i64_POST, "st4", ".d", 1, true, 32 },
+ { AArch64::ST4Fourv16b, "st4", ".16b", 0, false, 0 },
+ { AArch64::ST4Fourv8h, "st4", ".8h", 0, false, 0 },
+ { AArch64::ST4Fourv4s, "st4", ".4s", 0, false, 0 },
+ { AArch64::ST4Fourv2d, "st4", ".2d", 0, false, 0 },
+ { AArch64::ST4Fourv8b, "st4", ".8b", 0, false, 0 },
+ { AArch64::ST4Fourv4h, "st4", ".4h", 0, false, 0 },
+ { AArch64::ST4Fourv2s, "st4", ".2s", 0, false, 0 },
+ { AArch64::ST4Fourv16b_POST, "st4", ".16b", 1, false, 64 },
+ { AArch64::ST4Fourv8h_POST, "st4", ".8h", 1, false, 64 },
+ { AArch64::ST4Fourv4s_POST, "st4", ".4s", 1, false, 64 },
+ { AArch64::ST4Fourv2d_POST, "st4", ".2d", 1, false, 64 },
+ { AArch64::ST4Fourv8b_POST, "st4", ".8b", 1, false, 32 },
+ { AArch64::ST4Fourv4h_POST, "st4", ".4h", 1, false, 32 },
+ { AArch64::ST4Fourv2s_POST, "st4", ".2s", 1, false, 32 },
+};
+
+static LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
+ unsigned Idx;
+ for (Idx = 0; Idx != array_lengthof(LdStNInstInfo); ++Idx)
+ if (LdStNInstInfo[Idx].Opcode == Opcode)
+ return &LdStNInstInfo[Idx];
+
+ return nullptr;
}
-void
-AArch64InstPrinter::printCRxOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &CRx = MI->getOperand(OpNum);
+void AArch64AppleInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot) {
+ unsigned Opcode = MI->getOpcode();
+ StringRef Layout, Mnemonic;
- O << 'c' << CRx.getImm();
-}
+ bool IsTbx;
+ if (isTblTbxInstruction(MI->getOpcode(), Layout, IsTbx)) {
+ O << "\t" << (IsTbx ? "tbx" : "tbl") << Layout << '\t'
+ << getRegisterName(MI->getOperand(0).getReg(), AArch64::vreg) << ", ";
+ unsigned ListOpNum = IsTbx ? 2 : 1;
+ printVectorList(MI, ListOpNum, O, "");
-void
-AArch64InstPrinter::printCVTFixedPosOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &ScaleOp = MI->getOperand(OpNum);
+ O << ", "
+ << getRegisterName(MI->getOperand(ListOpNum + 1).getReg(), AArch64::vreg);
+ printAnnotation(O, Annot);
+ return;
+ }
- O << '#' << (64 - ScaleOp.getImm());
-}
+ if (LdStNInstrDesc *LdStDesc = getLdStNInstrDesc(Opcode)) {
+ O << "\t" << LdStDesc->Mnemonic << LdStDesc->Layout << '\t';
+
+ // Now onto the operands: first a vector list with possible lane
+ // specifier. E.g. { v0 }[2]
+ int OpNum = LdStDesc->ListOperand;
+ printVectorList(MI, OpNum++, O, "");
+
+ if (LdStDesc->HasLane)
+ O << '[' << MI->getOperand(OpNum++).getImm() << ']';
+
+ // Next the address: [xN]
+ unsigned AddrReg = MI->getOperand(OpNum++).getReg();
+ O << ", [" << getRegisterName(AddrReg) << ']';
+
+ // Finally, there might be a post-indexed offset.
+ if (LdStDesc->NaturalOffset != 0) {
+ unsigned Reg = MI->getOperand(OpNum++).getReg();
+ if (Reg != AArch64::XZR)
+ O << ", " << getRegisterName(Reg);
+ else {
+ assert(LdStDesc->NaturalOffset && "no offset on post-inc instruction?");
+ O << ", #" << LdStDesc->NaturalOffset;
+ }
+ }
+ printAnnotation(O, Annot);
+ return;
+ }
-void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &o) {
- const MCOperand &MOImm8 = MI->getOperand(OpNum);
+ AArch64InstPrinter::printInst(MI, O, Annot);
+}
- assert(MOImm8.isImm()
- && "Immediate operand required for floating-point immediate inst");
+bool AArch64InstPrinter::printSysAlias(const MCInst *MI, raw_ostream &O) {
+#ifndef NDEBUG
+ unsigned Opcode = MI->getOpcode();
+ assert(Opcode == AArch64::SYSxt && "Invalid opcode for SYS alias!");
+#endif
+
+ const char *Asm = nullptr;
+ const MCOperand &Op1 = MI->getOperand(0);
+ const MCOperand &Cn = MI->getOperand(1);
+ const MCOperand &Cm = MI->getOperand(2);
+ const MCOperand &Op2 = MI->getOperand(3);
+
+ unsigned Op1Val = Op1.getImm();
+ unsigned CnVal = Cn.getImm();
+ unsigned CmVal = Cm.getImm();
+ unsigned Op2Val = Op2.getImm();
+
+ if (CnVal == 7) {
+ switch (CmVal) {
+ default:
+ break;
+
+ // IC aliases
+ case 1:
+ if (Op1Val == 0 && Op2Val == 0)
+ Asm = "ic\tialluis";
+ break;
+ case 5:
+ if (Op1Val == 0 && Op2Val == 0)
+ Asm = "ic\tiallu";
+ else if (Op1Val == 3 && Op2Val == 1)
+ Asm = "ic\tivau";
+ break;
+
+ // DC aliases
+ case 4:
+ if (Op1Val == 3 && Op2Val == 1)
+ Asm = "dc\tzva";
+ break;
+ case 6:
+ if (Op1Val == 0 && Op2Val == 1)
+ Asm = "dc\tivac";
+ if (Op1Val == 0 && Op2Val == 2)
+ Asm = "dc\tisw";
+ break;
+ case 10:
+ if (Op1Val == 3 && Op2Val == 1)
+ Asm = "dc\tcvac";
+ else if (Op1Val == 0 && Op2Val == 2)
+ Asm = "dc\tcsw";
+ break;
+ case 11:
+ if (Op1Val == 3 && Op2Val == 1)
+ Asm = "dc\tcvau";
+ break;
+ case 14:
+ if (Op1Val == 3 && Op2Val == 1)
+ Asm = "dc\tcivac";
+ else if (Op1Val == 0 && Op2Val == 2)
+ Asm = "dc\tcisw";
+ break;
+
+ // AT aliases
+ case 8:
+ switch (Op1Val) {
+ default:
+ break;
+ case 0:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "at\ts1e1r"; break;
+ case 1: Asm = "at\ts1e1w"; break;
+ case 2: Asm = "at\ts1e0r"; break;
+ case 3: Asm = "at\ts1e0w"; break;
+ }
+ break;
+ case 4:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "at\ts1e2r"; break;
+ case 1: Asm = "at\ts1e2w"; break;
+ case 4: Asm = "at\ts12e1r"; break;
+ case 5: Asm = "at\ts12e1w"; break;
+ case 6: Asm = "at\ts12e0r"; break;
+ case 7: Asm = "at\ts12e0w"; break;
+ }
+ break;
+ case 6:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "at\ts1e3r"; break;
+ case 1: Asm = "at\ts1e3w"; break;
+ }
+ break;
+ }
+ break;
+ }
+ } else if (CnVal == 8) {
+ // TLBI aliases
+ switch (CmVal) {
+ default:
+ break;
+ case 3:
+ switch (Op1Val) {
+ default:
+ break;
+ case 0:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "tlbi\tvmalle1is"; break;
+ case 1: Asm = "tlbi\tvae1is"; break;
+ case 2: Asm = "tlbi\taside1is"; break;
+ case 3: Asm = "tlbi\tvaae1is"; break;
+ case 5: Asm = "tlbi\tvale1is"; break;
+ case 7: Asm = "tlbi\tvaale1is"; break;
+ }
+ break;
+ case 4:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "tlbi\talle2is"; break;
+ case 1: Asm = "tlbi\tvae2is"; break;
+ case 4: Asm = "tlbi\talle1is"; break;
+ case 5: Asm = "tlbi\tvale2is"; break;
+ case 6: Asm = "tlbi\tvmalls12e1is"; break;
+ }
+ break;
+ case 6:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "tlbi\talle3is"; break;
+ case 1: Asm = "tlbi\tvae3is"; break;
+ case 5: Asm = "tlbi\tvale3is"; break;
+ }
+ break;
+ }
+ break;
+ case 0:
+ switch (Op1Val) {
+ default:
+ break;
+ case 4:
+ switch (Op2Val) {
+ default:
+ break;
+ case 1: Asm = "tlbi\tipas2e1is"; break;
+ case 5: Asm = "tlbi\tipas2le1is"; break;
+ }
+ break;
+ }
+ break;
+ case 4:
+ switch (Op1Val) {
+ default:
+ break;
+ case 4:
+ switch (Op2Val) {
+ default:
+ break;
+ case 1: Asm = "tlbi\tipas2e1"; break;
+ case 5: Asm = "tlbi\tipas2le1"; break;
+ }
+ break;
+ }
+ break;
+ case 7:
+ switch (Op1Val) {
+ default:
+ break;
+ case 0:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "tlbi\tvmalle1"; break;
+ case 1: Asm = "tlbi\tvae1"; break;
+ case 2: Asm = "tlbi\taside1"; break;
+ case 3: Asm = "tlbi\tvaae1"; break;
+ case 5: Asm = "tlbi\tvale1"; break;
+ case 7: Asm = "tlbi\tvaale1"; break;
+ }
+ break;
+ case 4:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "tlbi\talle2"; break;
+ case 1: Asm = "tlbi\tvae2"; break;
+ case 4: Asm = "tlbi\talle1"; break;
+ case 5: Asm = "tlbi\tvale2"; break;
+ case 6: Asm = "tlbi\tvmalls12e1"; break;
+ }
+ break;
+ case 6:
+ switch (Op2Val) {
+ default:
+ break;
+ case 0: Asm = "tlbi\talle3"; break;
+ case 1: Asm = "tlbi\tvae3"; break;
+ case 5: Asm = "tlbi\tvale3"; break;
+ }
+ break;
+ }
+ break;
+ }
+ }
- uint32_t Imm8 = MOImm8.getImm();
- uint32_t Fraction = Imm8 & 0xf;
- uint32_t Exponent = (Imm8 >> 4) & 0x7;
- uint32_t Negative = (Imm8 >> 7) & 0x1;
+ if (Asm) {
+ unsigned Reg = MI->getOperand(4).getReg();
- float Val = 1.0f + Fraction / 16.0f;
+ O << '\t' << Asm;
+ if (StringRef(Asm).lower().find("all") == StringRef::npos)
+ O << ", " << getRegisterName(Reg);
+ }
- // That is:
- // 000 -> 2^1, 001 -> 2^2, 010 -> 2^3, 011 -> 2^4,
- // 100 -> 2^-3, 101 -> 2^-2, 110 -> 2^-1, 111 -> 2^0
- if (Exponent & 0x4) {
- Val /= 1 << (7 - Exponent);
+ return Asm != nullptr;
+}
+
+void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ unsigned Reg = Op.getReg();
+ O << getRegisterName(Reg);
+ } else if (Op.isImm()) {
+ O << '#' << Op.getImm();
} else {
- Val *= 1 << (Exponent + 1);
+ assert(Op.isExpr() && "unknown operand kind in printOperand");
+ O << *Op.getExpr();
}
+}
- Val = Negative ? -Val : Val;
-
- o << '#' << format("%.8f", Val);
+void AArch64InstPrinter::printHexImm(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ O << format("#%#llx", Op.getImm());
}
-void AArch64InstPrinter::printFPZeroOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &o) {
- o << "#0.0";
+void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
+ unsigned Imm, raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ unsigned Reg = Op.getReg();
+ if (Reg == AArch64::XZR)
+ O << "#" << Imm;
+ else
+ O << getRegisterName(Reg);
+ } else
+ llvm_unreachable("unknown operand kind in printPostIncOperand64");
}
-void
-AArch64InstPrinter::printCondCodeOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(OpNum);
+void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ assert(Op.isReg() && "Non-register vreg operand!");
+ unsigned Reg = Op.getReg();
+ O << getRegisterName(Reg, AArch64::vreg);
+}
- O << A64CondCodeToString(static_cast<A64CC::CondCodes>(MO.getImm()));
+void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ assert(Op.isImm() && "System instruction C[nm] operands must be immediates!");
+ O << "c" << Op.getImm();
}
-template <unsigned field_width, unsigned scale> void
-AArch64InstPrinter::printLabelOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
+void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
-
- if (!MO.isImm()) {
- printOperand(MI, OpNum, O);
- return;
+ if (MO.isImm()) {
+ unsigned Val = (MO.getImm() & 0xfff);
+ assert(Val == MO.getImm() && "Add/sub immediate out of range!");
+ unsigned Shift =
+ AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm());
+ O << '#' << Val;
+ if (Shift != 0)
+ printShifter(MI, OpNum + 1, O);
+
+ if (CommentStream)
+ *CommentStream << '=' << (Val << Shift) << '\n';
+ } else {
+ assert(MO.isExpr() && "Unexpected operand type!");
+ O << *MO.getExpr();
+ printShifter(MI, OpNum + 1, O);
}
+}
- // The immediate of LDR (lit) instructions is a signed 19-bit immediate, which
- // is multiplied by 4 (because all A64 instructions are 32-bits wide).
- uint64_t UImm = MO.getImm();
- uint64_t Sign = UImm & (1LL << (field_width - 1));
- int64_t SImm = scale * ((UImm & ~Sign) - Sign);
-
- O << "#" << SImm;
+void AArch64InstPrinter::printLogicalImm32(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ uint64_t Val = MI->getOperand(OpNum).getImm();
+ O << "#0x";
+ O.write_hex(AArch64_AM::decodeLogicalImmediate(Val, 32));
}
-template<unsigned RegWidth> void
-AArch64InstPrinter::printLogicalImmOperand(const MCInst *MI, unsigned OpNum,
+void AArch64InstPrinter::printLogicalImm64(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(OpNum);
- uint64_t Val;
- A64Imms::isLogicalImmBits(RegWidth, MO.getImm(), Val);
+ uint64_t Val = MI->getOperand(OpNum).getImm();
O << "#0x";
- O.write_hex(Val);
+ O.write_hex(AArch64_AM::decodeLogicalImmediate(Val, 64));
}
-void
-AArch64InstPrinter::printOffsetUImm12Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O, int MemSize) {
- const MCOperand &MOImm = MI->getOperand(OpNum);
+void AArch64InstPrinter::printShifter(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ // LSL #0 should not be printed.
+ if (AArch64_AM::getShiftType(Val) == AArch64_AM::LSL &&
+ AArch64_AM::getShiftValue(Val) == 0)
+ return;
+ O << ", " << AArch64_AM::getShiftExtendName(AArch64_AM::getShiftType(Val))
+ << " #" << AArch64_AM::getShiftValue(Val);
+}
- if (MOImm.isImm()) {
- uint32_t Imm = MOImm.getImm() * MemSize;
+void AArch64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << getRegisterName(MI->getOperand(OpNum).getReg());
+ printShifter(MI, OpNum + 1, O);
+}
- O << "#" << Imm;
- } else {
- O << "#" << *MOImm.getExpr();
+void AArch64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << getRegisterName(MI->getOperand(OpNum).getReg());
+ printArithExtend(MI, OpNum + 1, O);
+}
+
+void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNum).getImm();
+ AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getArithExtendType(Val);
+ unsigned ShiftVal = AArch64_AM::getArithShiftValue(Val);
+
+ // If the destination or first source register operand is [W]SP, print
+ // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
+ // all.
+ if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
+ unsigned Dest = MI->getOperand(0).getReg();
+ unsigned Src1 = MI->getOperand(1).getReg();
+ if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
+ ExtType == AArch64_AM::UXTX) ||
+ ((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
+ ExtType == AArch64_AM::UXTW) ) {
+ if (ShiftVal != 0)
+ O << ", lsl #" << ShiftVal;
+ return;
+ }
}
+ O << ", " << AArch64_AM::getShiftExtendName(ExtType);
+ if (ShiftVal != 0)
+ O << " #" << ShiftVal;
}
-void
-AArch64InstPrinter::printShiftOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O,
- A64SE::ShiftExtSpecifiers Shift) {
- const MCOperand &MO = MI->getOperand(OpNum);
+void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O, char SrcRegKind,
+ unsigned Width) {
+ unsigned SignExtend = MI->getOperand(OpNum).getImm();
+ unsigned DoShift = MI->getOperand(OpNum + 1).getImm();
- // LSL #0 is not printed
- if (Shift == A64SE::LSL && MO.isImm() && MO.getImm() == 0)
- return;
+ // sxtw, sxtx, uxtw or lsl (== uxtx)
+ bool IsLSL = !SignExtend && SrcRegKind == 'x';
+ if (IsLSL)
+ O << "lsl";
+ else
+ O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
- switch (Shift) {
- case A64SE::LSL: O << "lsl"; break;
- case A64SE::LSR: O << "lsr"; break;
- case A64SE::ASR: O << "asr"; break;
- case A64SE::ROR: O << "ror"; break;
- default: llvm_unreachable("Invalid shift specifier in logical instruction");
- }
+ if (DoShift || IsLSL)
+ O << " #" << Log2_32(Width / 8);
+}
- O << " #" << MO.getImm();
+void AArch64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(OpNum).getImm();
+ O << AArch64CC::getCondCodeName(CC);
}
-void
-AArch64InstPrinter::printMoveWideImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &UImm16MO = MI->getOperand(OpNum);
- const MCOperand &ShiftMO = MI->getOperand(OpNum + 1);
+void AArch64InstPrinter::printInverseCondCode(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)MI->getOperand(OpNum).getImm();
+ O << AArch64CC::getCondCodeName(AArch64CC::getInvertedCondCode(CC));
+}
- if (UImm16MO.isImm()) {
- O << '#' << UImm16MO.getImm();
+void AArch64InstPrinter::printAMNoIndex(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ']';
+}
- if (ShiftMO.getImm() != 0)
- O << ", lsl #" << (ShiftMO.getImm() * 16);
+template<int Scale>
+void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << '#' << Scale * MI->getOperand(OpNum).getImm();
+}
- return;
+void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
+ unsigned Scale, raw_ostream &O) {
+ const MCOperand MO = MI->getOperand(OpNum);
+ if (MO.isImm()) {
+ O << "#" << (MO.getImm() * Scale);
+ } else {
+ assert(MO.isExpr() && "Unexpected operand type!");
+ O << *MO.getExpr();
}
-
- O << "#" << *UImm16MO.getExpr();
}
-void AArch64InstPrinter::printNamedImmOperand(const NamedImmMapper &Mapper,
- const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- bool ValidName;
- const MCOperand &MO = MI->getOperand(OpNum);
- StringRef Name = Mapper.toString(MO.getImm(), ValidName);
+void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
+ unsigned Scale, raw_ostream &O) {
+ const MCOperand MO1 = MI->getOperand(OpNum + 1);
+ O << '[' << getRegisterName(MI->getOperand(OpNum).getReg());
+ if (MO1.isImm()) {
+ O << ", #" << (MO1.getImm() * Scale);
+ } else {
+ assert(MO1.isExpr() && "Unexpected operand type!");
+ O << ", " << *MO1.getExpr();
+ }
+ O << ']';
+}
- if (ValidName)
+void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned prfop = MI->getOperand(OpNum).getImm();
+ bool Valid;
+ StringRef Name = AArch64PRFM::PRFMMapper().toString(prfop, Valid);
+ if (Valid)
O << Name;
else
- O << '#' << MO.getImm();
+ O << '#' << prfop;
}
-void
-AArch64InstPrinter::printSysRegOperand(const A64SysReg::SysRegMapper &Mapper,
- const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
+void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
+ float FPImm =
+ MO.isFPImm() ? MO.getFPImm() : AArch64_AM::getFPImmFloat(MO.getImm());
- bool ValidName;
- std::string Name = Mapper.toString(MO.getImm(), ValidName);
- if (ValidName) {
- O << Name;
- return;
- }
+ // 8 decimal places are enough to perfectly represent permitted floats.
+ O << format("#%.8f", FPImm);
}
+static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
+ while (Stride--) {
+ switch (Reg) {
+ default:
+ llvm_unreachable("Vector register expected!");
+ case AArch64::Q0: Reg = AArch64::Q1; break;
+ case AArch64::Q1: Reg = AArch64::Q2; break;
+ case AArch64::Q2: Reg = AArch64::Q3; break;
+ case AArch64::Q3: Reg = AArch64::Q4; break;
+ case AArch64::Q4: Reg = AArch64::Q5; break;
+ case AArch64::Q5: Reg = AArch64::Q6; break;
+ case AArch64::Q6: Reg = AArch64::Q7; break;
+ case AArch64::Q7: Reg = AArch64::Q8; break;
+ case AArch64::Q8: Reg = AArch64::Q9; break;
+ case AArch64::Q9: Reg = AArch64::Q10; break;
+ case AArch64::Q10: Reg = AArch64::Q11; break;
+ case AArch64::Q11: Reg = AArch64::Q12; break;
+ case AArch64::Q12: Reg = AArch64::Q13; break;
+ case AArch64::Q13: Reg = AArch64::Q14; break;
+ case AArch64::Q14: Reg = AArch64::Q15; break;
+ case AArch64::Q15: Reg = AArch64::Q16; break;
+ case AArch64::Q16: Reg = AArch64::Q17; break;
+ case AArch64::Q17: Reg = AArch64::Q18; break;
+ case AArch64::Q18: Reg = AArch64::Q19; break;
+ case AArch64::Q19: Reg = AArch64::Q20; break;
+ case AArch64::Q20: Reg = AArch64::Q21; break;
+ case AArch64::Q21: Reg = AArch64::Q22; break;
+ case AArch64::Q22: Reg = AArch64::Q23; break;
+ case AArch64::Q23: Reg = AArch64::Q24; break;
+ case AArch64::Q24: Reg = AArch64::Q25; break;
+ case AArch64::Q25: Reg = AArch64::Q26; break;
+ case AArch64::Q26: Reg = AArch64::Q27; break;
+ case AArch64::Q27: Reg = AArch64::Q28; break;
+ case AArch64::Q28: Reg = AArch64::Q29; break;
+ case AArch64::Q29: Reg = AArch64::Q30; break;
+ case AArch64::Q30: Reg = AArch64::Q31; break;
+ // Vector lists can wrap around.
+ case AArch64::Q31:
+ Reg = AArch64::Q0;
+ break;
+ }
+ }
+ return Reg;
+}
-void AArch64InstPrinter::printRegExtendOperand(const MCInst *MI,
- unsigned OpNum,
- raw_ostream &O,
- A64SE::ShiftExtSpecifiers Ext) {
- // FIXME: In principle TableGen should be able to detect this itself far more
- // easily. We will only accumulate more of these hacks.
- unsigned Reg0 = MI->getOperand(0).getReg();
- unsigned Reg1 = MI->getOperand(1).getReg();
-
- if (isStackReg(Reg0) || isStackReg(Reg1)) {
- A64SE::ShiftExtSpecifiers LSLEquiv;
-
- if (Reg0 == AArch64::XSP || Reg1 == AArch64::XSP)
- LSLEquiv = A64SE::UXTX;
- else
- LSLEquiv = A64SE::UXTW;
+void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O,
+ StringRef LayoutSuffix) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
- if (Ext == LSLEquiv) {
- O << "lsl #" << MI->getOperand(OpNum).getImm();
- return;
- }
+ O << "{ ";
+
+ // Work out how many registers there are in the list (if there is an actual
+ // list).
+ unsigned NumRegs = 1;
+ if (MRI.getRegClass(AArch64::DDRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::QQRegClassID).contains(Reg))
+ NumRegs = 2;
+ else if (MRI.getRegClass(AArch64::DDDRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::QQQRegClassID).contains(Reg))
+ NumRegs = 3;
+ else if (MRI.getRegClass(AArch64::DDDDRegClassID).contains(Reg) ||
+ MRI.getRegClass(AArch64::QQQQRegClassID).contains(Reg))
+ NumRegs = 4;
+
+ // Now forget about the list and find out what the first register is.
+ if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::dsub0))
+ Reg = FirstReg;
+ else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::qsub0))
+ Reg = FirstReg;
+
+ // If it's a D-reg, we need to promote it to the equivalent Q-reg before
+ // printing (otherwise getRegisterName fails).
+ if (MRI.getRegClass(AArch64::FPR64RegClassID).contains(Reg)) {
+ const MCRegisterClass &FPR128RC =
+ MRI.getRegClass(AArch64::FPR128RegClassID);
+ Reg = MRI.getMatchingSuperReg(Reg, AArch64::dsub, &FPR128RC);
}
- switch (Ext) {
- case A64SE::UXTB: O << "uxtb"; break;
- case A64SE::UXTH: O << "uxth"; break;
- case A64SE::UXTW: O << "uxtw"; break;
- case A64SE::UXTX: O << "uxtx"; break;
- case A64SE::SXTB: O << "sxtb"; break;
- case A64SE::SXTH: O << "sxth"; break;
- case A64SE::SXTW: O << "sxtw"; break;
- case A64SE::SXTX: O << "sxtx"; break;
- default: llvm_unreachable("Unexpected shift type for printing");
+ for (unsigned i = 0; i < NumRegs; ++i, Reg = getNextVectorRegister(Reg)) {
+ O << getRegisterName(Reg, AArch64::vreg) << LayoutSuffix;
+ if (i + 1 != NumRegs)
+ O << ", ";
}
- const MCOperand &MO = MI->getOperand(OpNum);
- if (MO.getImm() != 0)
- O << " #" << MO.getImm();
+ O << " }";
}
-template<int MemScale> void
-AArch64InstPrinter::printSImm7ScaledOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MOImm = MI->getOperand(OpNum);
- int32_t Imm = unpackSignedImm(7, MOImm.getImm());
+void AArch64InstPrinter::printImplicitlyTypedVectorList(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ printVectorList(MI, OpNum, O, "");
+}
+
+template <unsigned NumLanes, char LaneKind>
+void AArch64InstPrinter::printTypedVectorList(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ std::string Suffix(".");
+ if (NumLanes)
+ Suffix += itostr(NumLanes) + LaneKind;
+ else
+ Suffix += LaneKind;
- O << "#" << (Imm * MemScale);
+ printVectorList(MI, OpNum, O, Suffix);
}
-void AArch64InstPrinter::printVPRRegister(const MCInst *MI, unsigned OpNo,
+void AArch64InstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- unsigned Reg = MI->getOperand(OpNo).getReg();
- std::string Name = getRegisterName(Reg);
- Name[0] = 'v';
- O << Name;
+ O << "[" << MI->getOperand(OpNum).getImm() << "]";
}
-void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isReg()) {
- unsigned Reg = Op.getReg();
- O << getRegisterName(Reg);
- } else if (Op.isImm()) {
- O << '#' << Op.getImm();
+void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+
+ // If the label has already been resolved to an immediate offset (say, when
+ // we're running the disassembler), just print the immediate.
+ if (Op.isImm()) {
+ O << "#" << (Op.getImm() << 2);
+ return;
+ }
+
+ // If the branch target is simply an address then print it in hex.
+ const MCConstantExpr *BranchTarget =
+ dyn_cast<MCConstantExpr>(MI->getOperand(OpNum).getExpr());
+ int64_t Address;
+ if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
+ O << "0x";
+ O.write_hex(Address);
} else {
- assert(Op.isExpr() && "unknown operand kind in printOperand");
- // If a symbolic branch target was added as a constant expression then print
- // that address in hex.
- const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
- int64_t Address;
- if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
- O << "0x";
- O.write_hex(Address);
- }
- else {
- // Otherwise, just print the expression.
- O << *Op.getExpr();
- }
+ // Otherwise, just print the expression.
+ O << *MI->getOperand(OpNum).getExpr();
}
}
+void AArch64InstPrinter::printAdrpLabel(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNum);
-void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
- StringRef Annot) {
- if (MI->getOpcode() == AArch64::TLSDESCCALL) {
- // This is a special assembler directive which applies an
- // R_AARCH64_TLSDESC_CALL to the following (BLR) instruction. It has a fixed
- // form outside the normal TableGenerated scheme.
- O << "\t.tlsdesccall " << *MI->getOperand(0).getExpr();
- } else if (!printAliasInstr(MI, O))
- printInstruction(MI, O);
+ // If the label has already been resolved to an immediate offset (say, when
+ // we're running the disassembler), just print the immediate.
+ if (Op.isImm()) {
+ O << "#" << (Op.getImm() << 12);
+ return;
+ }
- printAnnotation(O, Annot);
+ // Otherwise, just print the expression.
+ O << *MI->getOperand(OpNum).getExpr();
}
-template <A64SE::ShiftExtSpecifiers Ext, bool isHalf>
-void AArch64InstPrinter::printNeonMovImmShiftOperand(const MCInst *MI,
- unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(OpNum);
-
- assert(MO.isImm() &&
- "Immediate operand required for Neon vector immediate inst.");
-
- bool IsLSL = false;
- if (Ext == A64SE::LSL)
- IsLSL = true;
- else if (Ext != A64SE::MSL)
- llvm_unreachable("Invalid shift specifier in movi instruction");
-
- int64_t Imm = MO.getImm();
-
- // MSL and LSLH accepts encoded shift amount 0 or 1.
- if ((!IsLSL || (IsLSL && isHalf)) && Imm != 0 && Imm != 1)
- llvm_unreachable("Invalid shift amount in movi instruction");
-
- // LSH accepts encoded shift amount 0, 1, 2 or 3.
- if (IsLSL && (Imm < 0 || Imm > 3))
- llvm_unreachable("Invalid shift amount in movi instruction");
-
- // Print shift amount as multiple of 8 with MSL encoded shift amount
- // 0 and 1 printed as 8 and 16.
- if (!IsLSL)
- Imm++;
- Imm *= 8;
-
- // LSL #0 is not printed
- if (IsLSL) {
- if (Imm == 0)
- return;
- O << ", lsl";
- } else
- O << ", msl";
-
- O << " #" << Imm;
-}
+void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
+ unsigned Opcode = MI->getOpcode();
-void AArch64InstPrinter::printNeonUImm0Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &o) {
- o << "#0x0";
+ bool Valid;
+ StringRef Name;
+ if (Opcode == AArch64::ISB)
+ Name = AArch64ISB::ISBMapper().toString(Val, Valid);
+ else
+ Name = AArch64DB::DBarrierMapper().toString(Val, Valid);
+ if (Valid)
+ O << Name;
+ else
+ O << "#" << Val;
}
-void AArch64InstPrinter::printUImmHexOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MOUImm = MI->getOperand(OpNum);
-
- assert(MOUImm.isImm() &&
- "Immediate operand required for Neon vector immediate inst.");
+void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
- unsigned Imm = MOUImm.getImm();
+ bool Valid;
+ auto Mapper = AArch64SysReg::MRSMapper(getAvailableFeatures());
+ std::string Name = Mapper.toString(Val, Valid);
- O << "#0x";
- O.write_hex(Imm);
+ if (Valid)
+ O << StringRef(Name).upper();
}
-void AArch64InstPrinter::printUImmBareOperand(const MCInst *MI,
- unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MOUImm = MI->getOperand(OpNum);
+void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
- assert(MOUImm.isImm()
- && "Immediate operand required for Neon vector immediate inst.");
+ bool Valid;
+ auto Mapper = AArch64SysReg::MSRMapper(getAvailableFeatures());
+ std::string Name = Mapper.toString(Val, Valid);
- unsigned Imm = MOUImm.getImm();
- O << Imm;
+ if (Valid)
+ O << StringRef(Name).upper();
}
-void AArch64InstPrinter::printNeonUImm64MaskOperand(const MCInst *MI,
- unsigned OpNum,
- raw_ostream &O) {
- const MCOperand &MOUImm8 = MI->getOperand(OpNum);
-
- assert(MOUImm8.isImm() &&
- "Immediate operand required for Neon vector immediate bytemask inst.");
+void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Val = MI->getOperand(OpNo).getImm();
- uint32_t UImm8 = MOUImm8.getImm();
- uint64_t Mask = 0;
-
- // Replicates 0x00 or 0xff byte in a 64-bit vector
- for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
- if ((UImm8 >> ByteNum) & 1)
- Mask |= (uint64_t)0xff << (8 * ByteNum);
- }
-
- O << "#0x";
- O.write_hex(Mask);
+ bool Valid;
+ StringRef Name = AArch64PState::PStateMapper().toString(Val, Valid);
+ if (Valid)
+ O << StringRef(Name.str()).upper();
+ else
+ O << "#" << Val;
}
-// If Count > 1, there are two valid kinds of vector list:
-// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
-// (2) {Vn.layout - Vm.layout}
-// We choose the first kind as output.
-template <A64Layout::VectorLayout Layout, unsigned Count>
-void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- assert(Count >= 1 && Count <= 4 && "Invalid Number of Vectors");
-
- unsigned Reg = MI->getOperand(OpNum).getReg();
- std::string LayoutStr = A64VectorLayoutToString(Layout);
- O << "{";
- if (Count > 1) { // Print sub registers separately
- bool IsVec64 = (Layout < A64Layout::VL_16B);
- unsigned SubRegIdx = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
- for (unsigned I = 0; I < Count; I++) {
- std::string Name = getRegisterName(MRI.getSubReg(Reg, SubRegIdx++));
- Name[0] = 'v';
- O << Name << LayoutStr;
- if (I != Count - 1)
- O << ", ";
- }
- } else { // Print the register directly when NumVecs is 1.
- std::string Name = getRegisterName(Reg);
- Name[0] = 'v';
- O << Name << LayoutStr;
- }
- O << "}";
+void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned RawVal = MI->getOperand(OpNo).getImm();
+ uint64_t Val = AArch64_AM::decodeAdvSIMDModImmType10(RawVal);
+ O << format("#%#016llx", Val);
}
diff --git a/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h b/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
index 37b7273..fe7666e 100644
--- a/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
+++ b/contrib/llvm/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64INSTPRINTER_H
-#define LLVM_AARCH64INSTPRINTER_H
+#ifndef AArch64INSTPRINTER_H
+#define AArch64INSTPRINTER_H
#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "Utils/AArch64BaseInfo.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -28,154 +28,112 @@ public:
AArch64InstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
- // Autogenerated by tblgen
- void printInstruction(const MCInst *MI, raw_ostream &O);
- bool printAliasInstr(const MCInst *MI, raw_ostream &O);
- static const char *getRegisterName(unsigned RegNo);
- static const char *getInstructionName(unsigned Opcode);
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
- void printRegName(raw_ostream &O, unsigned RegNum) const;
-
- template<unsigned MemSize, unsigned RmSize>
- void printAddrRegExtendOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- printAddrRegExtendOperand(MI, OpNum, O, MemSize, RmSize);
+ // Autogenerated by tblgen.
+ virtual void printInstruction(const MCInst *MI, raw_ostream &O);
+ virtual bool printAliasInstr(const MCInst *MI, raw_ostream &O);
+ virtual void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx, raw_ostream &O);
+ virtual StringRef getRegName(unsigned RegNo) const {
+ return getRegisterName(RegNo);
}
+ static const char *getRegisterName(unsigned RegNo,
+ unsigned AltIdx = AArch64::NoRegAltName);
-
- void printAddrRegExtendOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O, unsigned MemSize,
- unsigned RmSize);
-
- void printAddSubImmLSL0Operand(const MCInst *MI,
- unsigned OpNum, raw_ostream &O);
- void printAddSubImmLSL12Operand(const MCInst *MI,
- unsigned OpNum, raw_ostream &O);
-
- void printBareImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
- template<unsigned RegWidth>
- void printBFILSBOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printBFIWidthOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printBFXWidthOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-
- void printCondCodeOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- void printCRxOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- void printCVTFixedPosOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- void printFPImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &o);
-
- void printFPZeroOperand(const MCInst *MI, unsigned OpNum, raw_ostream &o);
-
- template<int MemScale>
- void printOffsetUImm12Operand(const MCInst *MI,
- unsigned OpNum, raw_ostream &o) {
- printOffsetUImm12Operand(MI, OpNum, o, MemScale);
+protected:
+ bool printSysAlias(const MCInst *MI, raw_ostream &O);
+ // Operand printers
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printHexImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printPostIncOperand(const MCInst *MI, unsigned OpNo, unsigned Imm,
+ raw_ostream &O);
+ template<int Amount>
+ void printPostIncOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printPostIncOperand(MI, OpNo, Amount, O);
}
- void printOffsetUImm12Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &o, int MemScale);
-
- template<unsigned field_width, unsigned scale>
- void printLabelOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- template<unsigned RegWidth>
- void printLogicalImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
- template<typename SomeNamedImmMapper>
- void printNamedImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- printNamedImmOperand(SomeNamedImmMapper(), MI, OpNum, O);
+ void printVRegOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSysCROperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printAddSubImm(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printLogicalImm32(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printLogicalImm64(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printShifter(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printShiftedRegister(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printExtendedRegister(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printArithExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+
+ void printMemExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O,
+ char SrcRegKind, unsigned Width);
+ template <char SrcRegKind, unsigned Width>
+ void printMemExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
+ printMemExtend(MI, OpNum, O, SrcRegKind, Width);
}
- void printNamedImmOperand(const NamedImmMapper &Mapper,
- const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- void printSysRegOperand(const A64SysReg::SysRegMapper &Mapper,
- const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
+ void printCondCode(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printInverseCondCode(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printAlignedLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printUImm12Offset(const MCInst *MI, unsigned OpNum, unsigned Scale,
+ raw_ostream &O);
+ void printAMIndexedWB(const MCInst *MI, unsigned OpNum, unsigned Scale,
+ raw_ostream &O);
- void printMRSOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- printSysRegOperand(A64SysReg::MRSMapper(), MI, OpNum, O);
+ template<int Scale>
+ void printUImm12Offset(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
+ printUImm12Offset(MI, OpNum, Scale, O);
}
- void printMSROperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- printSysRegOperand(A64SysReg::MSRMapper(), MI, OpNum, O);
+ template<int BitWidth>
+ void printAMIndexedWB(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
+ printAMIndexedWB(MI, OpNum, BitWidth / 8, O);
}
- void printShiftOperand(const char *name, const MCInst *MI,
- unsigned OpIdx, raw_ostream &O);
-
- void printLSLOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printAMNoIndex(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printLSROperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
- printShiftOperand("lsr", MI, OpNum, O);
- }
- void printASROperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
- printShiftOperand("asr", MI, OpNum, O);
- }
- void printROROperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
- printShiftOperand("ror", MI, OpNum, O);
- }
+ template<int Scale>
+ void printImmScale(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- template<A64SE::ShiftExtSpecifiers Shift>
- void printShiftOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
- printShiftOperand(MI, OpNum, O, Shift);
- }
+ void printPrefetchOp(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printShiftOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O, A64SE::ShiftExtSpecifiers Sh);
+ void printFPImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O,
+ StringRef LayoutSuffix);
- void printMoveWideImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
+ /// Print a list of vector registers where the type suffix is implicit
+ /// (i.e. attached to the instruction rather than the registers).
+ void printImplicitlyTypedVectorList(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
- template<int MemSize> void
- printSImm7ScaledOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ template <unsigned NumLanes, char LaneKind>
+ void printTypedVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printOffsetSImm9Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- void printPRFMOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
- template<A64SE::ShiftExtSpecifiers EXT>
- void printRegExtendOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- printRegExtendOperand(MI, OpNum, O, EXT);
- }
+ void printVectorIndex(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printAdrpLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printBarrierOption(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printMSRSystemRegister(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printMRSSystemRegister(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printSystemPStateField(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printSIMDType10Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+};
- void printRegExtendOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O, A64SE::ShiftExtSpecifiers Ext);
+class AArch64AppleInstPrinter : public AArch64InstPrinter {
+public:
+ AArch64AppleInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
- void printVPRRegister(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
- bool isStackReg(unsigned RegNo) {
- return RegNo == AArch64::XSP || RegNo == AArch64::WSP;
+ void printInstruction(const MCInst *MI, raw_ostream &O) override;
+ bool printAliasInstr(const MCInst *MI, raw_ostream &O) override;
+ virtual void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx, raw_ostream &O);
+ StringRef getRegName(unsigned RegNo) const override {
+ return getRegisterName(RegNo);
}
-
- template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
- void printNeonMovImmShiftOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
- void printNeonUImm0Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printUImmHexOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printUImmBareOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printNeonUImm64MaskOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
-
- template <A64Layout::VectorLayout Layout, unsigned Count>
- void printVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo,
+ unsigned AltIdx = AArch64::NoRegAltName);
};
}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
new file mode 100644
index 0000000..8b1e44e2
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -0,0 +1,738 @@
+//===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AArch64 addressing mode implementation stuff.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_AArch64_AArch64ADDRESSINGMODES_H
+#define LLVM_TARGET_AArch64_AArch64ADDRESSINGMODES_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+/// AArch64_AM - AArch64 Addressing Mode Stuff
+namespace AArch64_AM {
+
+//===----------------------------------------------------------------------===//
+// Shifts
+//
+
+enum ShiftExtendType {
+ InvalidShiftExtend = -1,
+ LSL = 0,
+ LSR,
+ ASR,
+ ROR,
+ MSL,
+
+ UXTB,
+ UXTH,
+ UXTW,
+ UXTX,
+
+ SXTB,
+ SXTH,
+ SXTW,
+ SXTX,
+};
+
+/// getShiftName - Get the string encoding for the shift type.
+static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
+ switch (ST) {
+ default: assert(false && "unhandled shift type!");
+ case AArch64_AM::LSL: return "lsl";
+ case AArch64_AM::LSR: return "lsr";
+ case AArch64_AM::ASR: return "asr";
+ case AArch64_AM::ROR: return "ror";
+ case AArch64_AM::MSL: return "msl";
+ case AArch64_AM::UXTB: return "uxtb";
+ case AArch64_AM::UXTH: return "uxth";
+ case AArch64_AM::UXTW: return "uxtw";
+ case AArch64_AM::UXTX: return "uxtx";
+ case AArch64_AM::SXTB: return "sxtb";
+ case AArch64_AM::SXTH: return "sxth";
+ case AArch64_AM::SXTW: return "sxtw";
+ case AArch64_AM::SXTX: return "sxtx";
+ }
+ return nullptr;
+}
+
+/// getShiftType - Extract the shift type.
+static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
+ switch ((Imm >> 6) & 0x7) {
+ default: return AArch64_AM::InvalidShiftExtend;
+ case 0: return AArch64_AM::LSL;
+ case 1: return AArch64_AM::LSR;
+ case 2: return AArch64_AM::ASR;
+ case 3: return AArch64_AM::ROR;
+ case 4: return AArch64_AM::MSL;
+ }
+}
+
+/// getShiftValue - Extract the shift value.
+static inline unsigned getShiftValue(unsigned Imm) {
+ return Imm & 0x3f;
+}
+
+/// getShifterImm - Encode the shift type and amount:
+/// imm: 6-bit shift amount
+/// shifter: 000 ==> lsl
+/// 001 ==> lsr
+/// 010 ==> asr
+/// 011 ==> ror
+/// 100 ==> msl
+/// {8-6} = shifter
+/// {5-0} = imm
+static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
+ unsigned Imm) {
+ assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
+ unsigned STEnc = 0;
+ switch (ST) {
+ default: llvm_unreachable("Invalid shift requested");
+ case AArch64_AM::LSL: STEnc = 0; break;
+ case AArch64_AM::LSR: STEnc = 1; break;
+ case AArch64_AM::ASR: STEnc = 2; break;
+ case AArch64_AM::ROR: STEnc = 3; break;
+ case AArch64_AM::MSL: STEnc = 4; break;
+ }
+ return (STEnc << 6) | (Imm & 0x3f);
+}
+
+//===----------------------------------------------------------------------===//
+// Extends
+//
+
+/// getArithShiftValue - get the arithmetic shift value.
+static inline unsigned getArithShiftValue(unsigned Imm) {
+ return Imm & 0x7;
+}
+
+/// getExtendType - Extract the extend type for operands of arithmetic ops.
+static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
+ assert((Imm & 0x7) == Imm && "invalid immediate!");
+ switch (Imm) {
+ default: llvm_unreachable("Compiler bug!");
+ case 0: return AArch64_AM::UXTB;
+ case 1: return AArch64_AM::UXTH;
+ case 2: return AArch64_AM::UXTW;
+ case 3: return AArch64_AM::UXTX;
+ case 4: return AArch64_AM::SXTB;
+ case 5: return AArch64_AM::SXTH;
+ case 6: return AArch64_AM::SXTW;
+ case 7: return AArch64_AM::SXTX;
+ }
+}
+
+static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) {
+ return getExtendType((Imm >> 3) & 0x7);
+}
+
+/// Mapping from extend bits to required operation:
+/// shifter: 000 ==> uxtb
+/// 001 ==> uxth
+/// 010 ==> uxtw
+/// 011 ==> uxtx
+/// 100 ==> sxtb
+/// 101 ==> sxth
+/// 110 ==> sxtw
+/// 111 ==> sxtx
+inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
+ switch (ET) {
+ default: llvm_unreachable("Invalid extend type requested");
+ case AArch64_AM::UXTB: return 0; break;
+ case AArch64_AM::UXTH: return 1; break;
+ case AArch64_AM::UXTW: return 2; break;
+ case AArch64_AM::UXTX: return 3; break;
+ case AArch64_AM::SXTB: return 4; break;
+ case AArch64_AM::SXTH: return 5; break;
+ case AArch64_AM::SXTW: return 6; break;
+ case AArch64_AM::SXTX: return 7; break;
+ }
+}
+
+/// getArithExtendImm - Encode the extend type and shift amount for an
+/// arithmetic instruction:
+/// imm: 3-bit extend amount
+/// {5-3} = shifter
+/// {2-0} = imm3
+static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
+ unsigned Imm) {
+ assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
+ return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
+}
+
+/// getMemDoShift - Extract the "do shift" flag value for load/store
+/// instructions.
+static inline bool getMemDoShift(unsigned Imm) {
+ return (Imm & 0x1) != 0;
+}
+
+/// getExtendType - Extract the extend type for the offset operand of
+/// loads/stores.
+static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) {
+ return getExtendType((Imm >> 1) & 0x7);
+}
+
+/// getExtendImm - Encode the extend type and amount for a load/store inst:
+/// doshift: should the offset be scaled by the access size
+/// shifter: 000 ==> uxtb
+/// 001 ==> uxth
+/// 010 ==> uxtw
+/// 011 ==> uxtx
+/// 100 ==> sxtb
+/// 101 ==> sxth
+/// 110 ==> sxtw
+/// 111 ==> sxtx
+/// {3-1} = shifter
+/// {0} = doshift
+static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET,
+ bool DoShift) {
+ return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
+}
+
+static inline uint64_t ror(uint64_t elt, unsigned size) {
+ return ((elt & 1) << (size-1)) | (elt >> 1);
+}
+
+/// processLogicalImmediate - Determine if an immediate value can be encoded
+/// as the immediate operand of a logical instruction for the given register
+/// size. If so, return true with "encoding" set to the encoded value in
+/// the form N:immr:imms.
+static inline bool processLogicalImmediate(uint64_t imm, unsigned regSize,
+ uint64_t &encoding) {
+ if (imm == 0ULL || imm == ~0ULL ||
+ (regSize != 64 && (imm >> regSize != 0 || imm == ~0U)))
+ return false;
+
+ unsigned size = 2;
+ uint64_t eltVal = imm;
+
+ // First, determine the element size.
+ while (size < regSize) {
+ unsigned numElts = regSize / size;
+ unsigned mask = (1ULL << size) - 1;
+ uint64_t lowestEltVal = imm & mask;
+
+ bool allMatched = true;
+ for (unsigned i = 1; i < numElts; ++i) {
+ uint64_t currEltVal = (imm >> (i*size)) & mask;
+ if (currEltVal != lowestEltVal) {
+ allMatched = false;
+ break;
+ }
+ }
+
+ if (allMatched) {
+ eltVal = lowestEltVal;
+ break;
+ }
+
+ size *= 2;
+ }
+
+ // Second, determine the rotation to make the element be: 0^m 1^n.
+ for (unsigned i = 0; i < size; ++i) {
+ eltVal = ror(eltVal, size);
+ uint32_t clz = countLeadingZeros(eltVal) - (64 - size);
+ uint32_t cto = CountTrailingOnes_64(eltVal);
+
+ if (clz + cto == size) {
+ // Encode in immr the number of RORs it would take to get *from* this
+ // element value to our target value, where i+1 is the number of RORs
+ // to go the opposite direction.
+ unsigned immr = size - (i + 1);
+
+ // If size has a 1 in the n'th bit, create a value that has zeroes in
+ // bits [0, n] and ones above that.
+ uint64_t nimms = ~(size-1) << 1;
+
+ // Or the CTO value into the low bits, which must be below the Nth bit
+ // bit mentioned above.
+ nimms |= (cto-1);
+
+ // Extract the seventh bit and toggle it to create the N field.
+ unsigned N = ((nimms >> 6) & 1) ^ 1;
+
+ encoding = (N << 12) | (immr << 6) | (nimms & 0x3f);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// isLogicalImmediate - Return true if the immediate is valid for a logical
+/// immediate instruction of the given register size. Return false otherwise.
+static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
+ uint64_t encoding;
+ return processLogicalImmediate(imm, regSize, encoding);
+}
+
+/// encodeLogicalImmediate - Return the encoded immediate value for a logical
+/// immediate instruction of the given register size.
+static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
+ uint64_t encoding = 0;
+ bool res = processLogicalImmediate(imm, regSize, encoding);
+ assert(res && "invalid logical immediate");
+ (void)res;
+ return encoding;
+}
+
+/// decodeLogicalImmediate - Decode a logical immediate value in the form
+/// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
+/// integer value it represents with regSize bits.
+static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
+ // Extract the N, imms, and immr fields.
+ unsigned N = (val >> 12) & 1;
+ unsigned immr = (val >> 6) & 0x3f;
+ unsigned imms = val & 0x3f;
+
+ assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
+ int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ assert(len >= 0 && "undefined logical immediate encoding");
+ unsigned size = (1 << len);
+ unsigned R = immr & (size - 1);
+ unsigned S = imms & (size - 1);
+ assert(S != size - 1 && "undefined logical immediate encoding");
+ uint64_t pattern = (1ULL << (S + 1)) - 1;
+ for (unsigned i = 0; i < R; ++i)
+ pattern = ror(pattern, size);
+
+ // Replicate the pattern to fill the regSize.
+ while (size != regSize) {
+ pattern |= (pattern << size);
+ size *= 2;
+ }
+ return pattern;
+}
+
+/// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
+/// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
+/// is a valid encoding for an integer value with regSize bits.
+static inline bool isValidDecodeLogicalImmediate(uint64_t val,
+ unsigned regSize) {
+ // Extract the N and imms fields needed for checking.
+ unsigned N = (val >> 12) & 1;
+ unsigned imms = val & 0x3f;
+
+ if (regSize == 32 && N != 0) // undefined logical immediate encoding
+ return false;
+ int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
+ if (len < 0) // undefined logical immediate encoding
+ return false;
+ unsigned size = (1 << len);
+ unsigned S = imms & (size - 1);
+ if (S == size - 1) // undefined logical immediate encoding
+ return false;
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Floating-point Immediates
+//
+static inline float getFPImmFloat(unsigned Imm) {
+ // We expect an 8-bit binary encoding of a floating-point number here.
+ union {
+ uint32_t I;
+ float F;
+ } FPUnion;
+
+ uint8_t Sign = (Imm >> 7) & 0x1;
+ uint8_t Exp = (Imm >> 4) & 0x7;
+ uint8_t Mantissa = Imm & 0xf;
+
+ // 8-bit FP iEEEE Float Encoding
+ // abcd efgh aBbbbbbc defgh000 00000000 00000000
+ //
+ // where B = NOT(b);
+
+ FPUnion.I = 0;
+ FPUnion.I |= Sign << 31;
+ FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
+ FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
+ FPUnion.I |= (Exp & 0x3) << 23;
+ FPUnion.I |= Mantissa << 19;
+ return FPUnion.F;
+}
+
+/// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP32Imm(const APInt &Imm) {
+ uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
+ int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
+ int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0x7ffff)
+ return -1;
+ Mantissa >>= 19;
+ if ((Mantissa & 0xf) != Mantissa)
+ return -1;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP32Imm(const APFloat &FPImm) {
+ return getFP32Imm(FPImm.bitcastToAPInt());
+}
+
+/// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
+/// floating-point value. If the value cannot be represented as an 8-bit
+/// floating-point value, then return -1.
+static inline int getFP64Imm(const APInt &Imm) {
+ uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
+ int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
+ uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0xffffffffffffULL)
+ return -1;
+ Mantissa >>= 48;
+ if ((Mantissa & 0xf) != Mantissa)
+ return -1;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+}
+
+static inline int getFP64Imm(const APFloat &FPImm) {
+ return getFP64Imm(FPImm.bitcastToAPInt());
+}
+
+//===--------------------------------------------------------------------===//
+// AdvSIMD Modified Immediates
+//===--------------------------------------------------------------------===//
+
+// 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
+static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffffff00ffffff00ULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 32) | EncVal;
+}
+
+// 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
+static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffff00ffffff00ffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 40) | (EncVal << 8);
+}
+
+// 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
+static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xff00ffffff00ffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) {
+ return (Imm & 0xff0000ULL) >> 16;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 16);
+}
+
+// abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
+static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0x00ffffff00ffffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) {
+ return (Imm & 0xff000000ULL) >> 24;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 56) | (EncVal << 24);
+}
+
+// 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
+static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
+ ((Imm & 0xff00ff00ff00ff00ULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
+}
+
+// abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
+static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
+ ((Imm & 0x00ff00ff00ff00ffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
+}
+
+// 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
+static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) {
+ return (Imm & 0xff00ULL) >> 8;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
+}
+
+// 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
+static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
+}
+
+static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) {
+ return (Imm & 0x00ff0000ULL) >> 16;
+}
+
+// abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
+static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
+ ((Imm >> 56) == (Imm & 0x000000ffULL));
+}
+
+static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) {
+ return (Imm & 0xffULL);
+}
+
+static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
+ uint64_t EncVal = Imm;
+ EncVal |= (EncVal << 8);
+ EncVal |= (EncVal << 16);
+ EncVal |= (EncVal << 32);
+ return EncVal;
+}
+
+// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+// cmode: 1110, op: 1
+static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
+ uint64_t ByteA = Imm & 0xff00000000000000ULL;
+ uint64_t ByteB = Imm & 0x00ff000000000000ULL;
+ uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
+ uint64_t ByteD = Imm & 0x000000ff00000000ULL;
+ uint64_t ByteE = Imm & 0x00000000ff000000ULL;
+ uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
+ uint64_t ByteG = Imm & 0x000000000000ff00ULL;
+ uint64_t ByteH = Imm & 0x00000000000000ffULL;
+
+ return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
+ (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
+ (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
+ (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
+ (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
+ (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
+ (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
+ (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
+ uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
+ uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
+ uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
+ uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
+ if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
+ if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
+ if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
+ if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
+ if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
+ if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
+ if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
+ return EncVal;
+}
+
+// aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
+static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
+ uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
+ return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
+ (BString == 0x1f || BString == 0x20) &&
+ ((Imm & 0x0007ffff0007ffffULL) == 0);
+}
+
+static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0x80000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x20000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x01000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x00800000ULL) != 0;
+ uint8_t BitE = (Imm & 0x00400000ULL) != 0;
+ uint8_t BitF = (Imm & 0x00200000ULL) != 0;
+ uint8_t BitG = (Imm & 0x00100000ULL) != 0;
+ uint8_t BitH = (Imm & 0x00080000ULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0x80000000ULL;
+ if (Imm & 0x40) EncVal |= 0x3e000000ULL;
+ else EncVal |= 0x40000000ULL;
+ if (Imm & 0x20) EncVal |= 0x01000000ULL;
+ if (Imm & 0x10) EncVal |= 0x00800000ULL;
+ if (Imm & 0x08) EncVal |= 0x00400000ULL;
+ if (Imm & 0x04) EncVal |= 0x00200000ULL;
+ if (Imm & 0x02) EncVal |= 0x00100000ULL;
+ if (Imm & 0x01) EncVal |= 0x00080000ULL;
+ return (EncVal << 32) | EncVal;
+}
+
+// aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
+static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
+ uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
+ return ((BString == 0xff || BString == 0x100) &&
+ ((Imm & 0x0000ffffffffffffULL) == 0));
+}
+
+static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) {
+ uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
+ uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
+ uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
+ uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
+ uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
+ uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
+ uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
+ uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
+
+ uint8_t EncVal = BitA;
+ EncVal <<= 1;
+ EncVal |= BitB;
+ EncVal <<= 1;
+ EncVal |= BitC;
+ EncVal <<= 1;
+ EncVal |= BitD;
+ EncVal <<= 1;
+ EncVal |= BitE;
+ EncVal <<= 1;
+ EncVal |= BitF;
+ EncVal <<= 1;
+ EncVal |= BitG;
+ EncVal <<= 1;
+ EncVal |= BitH;
+ return EncVal;
+}
+
+static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
+ uint64_t EncVal = 0;
+ if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
+ if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
+ else EncVal |= 0x4000000000000000ULL;
+ if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
+ if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
+ if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
+ if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
+ if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
+ if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
+ return (EncVal << 32) | EncVal;
+}
+
+} // end namespace AArch64_AM
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 8a9077c..a917616 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -6,167 +6,57 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file contains the AArch64 implementation of the MCAsmBackend class,
-// which is principally concerned with relaxation of the various fixup kinds.
-//
-//===----------------------------------------------------------------------===//
+#include "AArch64.h"
+#include "AArch64RegisterInfo.h"
#include "MCTargetDesc/AArch64FixupKinds.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/Support/ELF.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/MachO.h"
using namespace llvm;
namespace {
-class AArch64AsmBackend : public MCAsmBackend {
- const MCSubtargetInfo* STI;
-public:
- AArch64AsmBackend(const Target &T, const StringRef TT)
- : MCAsmBackend(),
- STI(AArch64_MC::createAArch64MCSubtargetInfo(TT, "", ""))
- {}
-
-
- ~AArch64AsmBackend() {
- delete STI;
- }
-
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
- virtual void processFixupValue(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFixup &Fixup, const MCFragment *DF,
- MCValue &Target, uint64_t &Value,
- bool &IsResolved);
-};
-} // end anonymous namespace
-
-void AArch64AsmBackend::processFixupValue(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFixup &Fixup,
- const MCFragment *DF,
- MCValue &Target, uint64_t &Value,
- bool &IsResolved) {
- // The ADRP instruction adds some multiple of 0x1000 to the current PC &
- // ~0xfff. This means that the required offset to reach a symbol can vary by
- // up to one step depending on where the ADRP is in memory. For example:
- //
- // ADRP x0, there
- // there:
- //
- // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
- // we'll need that as an offset. At any other address "there" will be in the
- // same page as the ADRP and the instruction should encode 0x0. Assuming the
- // section isn't 0x1000-aligned, we therefore need to delegate this decision
- // to the linker -- a relocation!
- if ((uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_page ||
- (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_got_page ||
- (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_gottprel_page ||
- (uint32_t)Fixup.getKind() == AArch64::fixup_a64_tlsdesc_adr_page)
- IsResolved = false;
-}
-
-
-static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value);
-
-namespace {
+class AArch64AsmBackend : public MCAsmBackend {
+ static const unsigned PCRelFlagVal =
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
-class ELFAArch64AsmBackend : public AArch64AsmBackend {
public:
- uint8_t OSABI;
- ELFAArch64AsmBackend(const Target &T, const StringRef TT,
- uint8_t _OSABI)
- : AArch64AsmBackend(T, TT), OSABI(_OSABI) { }
-
- bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const;
+ AArch64AsmBackend(const Target &T) : MCAsmBackend() {}
- unsigned int getNumFixupKinds() const {
+ unsigned getNumFixupKinds() const override {
return AArch64::NumTargetFixupKinds;
}
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
-// This table *must* be in the order that the fixup_* kinds are defined in
-// AArch64FixupKinds.h.
-//
-// Name Offset (bits) Size (bits) Flags
-{ "fixup_a64_ld_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_adr_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_adr_prel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_add_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst8_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst16_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst32_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst64_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst128_lo12", 0, 32, 0 },
-{ "fixup_a64_tstbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_condbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_uncondbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_call", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_movw_uabs_g0", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g0_nc", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g1", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g1_nc", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g2", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g2_nc", 0, 32, 0 },
-{ "fixup_a64_movw_uabs_g3", 0, 32, 0 },
-{ "fixup_a64_movw_sabs_g0", 0, 32, 0 },
-{ "fixup_a64_movw_sabs_g1", 0, 32, 0 },
-{ "fixup_a64_movw_sabs_g2", 0, 32, 0 },
-{ "fixup_a64_adr_prel_got_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_ld64_got_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g2", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g1", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g1_nc", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g0", 0, 32, 0 },
-{ "fixup_a64_movw_dtprel_g0_nc", 0, 32, 0 },
-{ "fixup_a64_add_dtprel_hi12", 0, 32, 0 },
-{ "fixup_a64_add_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_add_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst8_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst8_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst16_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst16_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst32_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst32_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst64_dtprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst64_dtprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_movw_gottprel_g1", 0, 32, 0 },
-{ "fixup_a64_movw_gottprel_g0_nc", 0, 32, 0 },
-{ "fixup_a64_adr_gottprel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_ld64_gottprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ld_gottprel_prel19", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_movw_tprel_g2", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g1", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g1_nc", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g0", 0, 32, 0 },
-{ "fixup_a64_movw_tprel_g0_nc", 0, 32, 0 },
-{ "fixup_a64_add_tprel_hi12", 0, 32, 0 },
-{ "fixup_a64_add_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_add_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst8_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst8_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst16_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst16_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst32_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst32_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_ldst64_tprel_lo12", 0, 32, 0 },
-{ "fixup_a64_ldst64_tprel_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_tlsdesc_adr_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_tlsdesc_ld64_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_tlsdesc_add_lo12_nc", 0, 32, 0 },
-{ "fixup_a64_tlsdesc_call", 0, 0, 0 }
+ // This table *must* be in the order that the fixup_* kinds are defined in
+ // AArch64FixupKinds.h.
+ //
+ // Name Offset (bits) Size (bits) Flags
+ { "fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal },
+ { "fixup_aarch64_add_imm12", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale1", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale2", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale4", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale8", 10, 12, 0 },
+ { "fixup_aarch64_ldst_imm12_scale16", 10, 12, 0 },
+ { "fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal },
+ { "fixup_aarch64_movw", 5, 16, 0 },
+ { "fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal },
+ { "fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal },
+ { "fixup_aarch64_tlsdesc_call", 0, 0, 0 }
};
+
if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
@@ -176,410 +66,501 @@ public:
}
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const {
- unsigned NumBytes = getFixupKindInfo(Fixup.getKind()).TargetSize / 8;
- Value = adjustFixupValue(Fixup.getKind(), Value);
- if (!Value) return; // Doesn't change encoding.
-
- unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
-
- // For each byte of the fragment that the fixup touches, mask in the bits
- // from the fixup value.
- for (unsigned i = 0; i != NumBytes; ++i) {
- Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
- }
- }
+ uint64_t Value, bool IsPCRel) const override;
- bool mayNeedRelaxation(const MCInst&) const {
- return false;
- }
+ bool mayNeedRelaxation(const MCInst &Inst) const override;
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
- void relaxInstruction(const MCInst&, llvm::MCInst&) const {
- llvm_unreachable("Cannot relax instructions");
- }
+ void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createAArch64ELFObjectWriter(OS, OSABI);
- }
+ unsigned getPointerSize() const { return 8; }
};
} // end anonymous namespace
-bool
-ELFAArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
- // Correct for now. With all instructions 32-bit only very low-level
- // considerations could make you select something which may fail.
- return false;
-}
+/// \brief The number of bytes the fixup may change.
+static unsigned getFixupKindNumBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ return 0;
-bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
- // Can't emit NOP with size not multiple of 32-bits
- if (Count % 4 != 0)
- return false;
+ case FK_Data_1:
+ return 1;
- uint64_t NumNops = Count / 4;
- for (uint64_t i = 0; i != NumNops; ++i)
- OW->Write32(0xd503201f);
+ case FK_Data_2:
+ case AArch64::fixup_aarch64_movw:
+ return 2;
+
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return 3;
+
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ case FK_Data_4:
+ return 4;
- return true;
+ case FK_Data_8:
+ return 8;
+ }
}
-static unsigned ADRImmBits(unsigned Value) {
+static unsigned AdrImmBits(unsigned Value) {
unsigned lo2 = Value & 0x3;
- unsigned hi19 = (Value & 0x1fffff) >> 2;
-
+ unsigned hi19 = (Value & 0x1ffffc) >> 2;
return (hi19 << 5) | (lo2 << 29);
}
static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
+ int64_t SignedValue = static_cast<int64_t>(Value);
switch (Kind) {
default:
- llvm_unreachable("Unknown fixup kind!");
- case FK_Data_2:
- assert((int64_t)Value >= -32768 &&
- (int64_t)Value <= 65536 &&
- "Out of range ABS16 fixup");
+ assert(false && "Unknown fixup kind!");
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ if (SignedValue > 2097151 || SignedValue < -2097152)
+ report_fatal_error("fixup value out of range");
+ return AdrImmBits(Value & 0x1fffffULL);
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ // Signed 21-bit immediate
+ if (SignedValue > 2097151 || SignedValue < -2097152)
+ report_fatal_error("fixup value out of range");
+ // Low two bits are not encoded.
+ return (Value >> 2) & 0x7ffff;
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ // Unsigned 12-bit immediate
+ if (Value >= 0x1000)
+ report_fatal_error("invalid imm12 fixup value");
return Value;
- case FK_Data_4:
- assert((int64_t)Value >= -(1LL << 31) &&
- (int64_t)Value <= (1LL << 32) - 1 &&
- "Out of range ABS32 fixup");
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ // Unsigned 12-bit immediate which gets multiplied by 2
+ if (Value & 1 || Value >= 0x2000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 1;
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ // Unsigned 12-bit immediate which gets multiplied by 4
+ if (Value & 3 || Value >= 0x4000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 2;
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ // Unsigned 12-bit immediate which gets multiplied by 8
+ if (Value & 7 || Value >= 0x8000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 3;
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ // Unsigned 12-bit immediate which gets multiplied by 16
+ if (Value & 15 || Value >= 0x10000)
+ report_fatal_error("invalid imm12 fixup value");
+ return Value >> 4;
+ case AArch64::fixup_aarch64_movw:
+ report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet");
return Value;
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ // Signed 16-bit immediate
+ if (SignedValue > 32767 || SignedValue < -32768)
+ report_fatal_error("fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0x3)
+ report_fatal_error("fixup not sufficiently aligned");
+ return (Value >> 2) & 0x3fff;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ // Signed 28-bit immediate
+ if (SignedValue > 134217727 || SignedValue < -134217728)
+ report_fatal_error("fixup value out of range");
+ // Low two bits are not encoded (4-byte alignment assumed).
+ if (Value & 0x3)
+ report_fatal_error("fixup not sufficiently aligned");
+ return (Value >> 2) & 0x3ffffff;
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
case FK_Data_8:
return Value;
+ }
+}
- case AArch64::fixup_a64_ld_gottprel_prel19:
- // R_AARCH64_LD_GOTTPREL_PREL19: Set a load-literal immediate to bits 1F
- // FFFC of G(TPREL(S+A)) - P; check -2^20 <= X < 2^20.
- case AArch64::fixup_a64_ld_prel:
- // R_AARCH64_LD_PREL_LO19: Sets a load-literal (immediate) value to bits
- // 1F FFFC of S+A-P, checking that -2^20 <= S+A-P < 2^20.
- assert((int64_t)Value >= -(1LL << 20) &&
- (int64_t)Value < (1LL << 20) && "Out of range LDR (lit) fixup");
- return (Value & 0x1ffffc) << 3;
-
- case AArch64::fixup_a64_adr_prel:
- // R_AARCH64_ADR_PREL_LO21: Sets an ADR immediate value to bits 1F FFFF of
- // the result of S+A-P, checking that -2^20 <= S+A-P < 2^20.
- assert((int64_t)Value >= -(1LL << 20) &&
- (int64_t)Value < (1LL << 20) && "Out of range ADR fixup");
- return ADRImmBits(Value & 0x1fffff);
-
- case AArch64::fixup_a64_adr_prel_page:
- // R_AARCH64_ADR_PREL_PG_HI21: Sets an ADRP immediate value to bits 1 FFFF
- // F000 of the result of the operation, checking that -2^32 <= result <
- // 2^32.
- assert((int64_t)Value >= -(1LL << 32) &&
- (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup");
- return ADRImmBits((Value & 0x1fffff000ULL) >> 12);
-
- case AArch64::fixup_a64_add_dtprel_hi12:
- // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits
- // FF F000 of DTPREL(S+A), check 0 <= X < 2^24.
- case AArch64::fixup_a64_add_tprel_hi12:
- // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits
- // FF F000 of TPREL(S+A), check 0 <= X < 2^24.
- assert((int64_t)Value >= 0 &&
- (int64_t)Value < (1LL << 24) && "Out of range ADD fixup");
- return (Value & 0xfff000) >> 2;
-
- case AArch64::fixup_a64_add_dtprel_lo12:
- // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits
- // FFF of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_add_tprel_lo12:
- // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits
- // FFF of TPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t)Value >= 0 &&
- (int64_t)Value < (1LL << 12) && "Out of range ADD fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_add_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: Set an ADD immediate field to bits
- // FFF of DTPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_add_tprel_lo12_nc:
- // R_AARCH64_TLSLD_ADD_TPREL_LO12_NC: Set an ADD immediate field to bits
- // FFF of TPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_tlsdesc_add_lo12_nc:
- // R_AARCH64_TLSDESC_ADD_LO12_NC: Set an ADD immediate field to bits
- // FFF of G(TLSDESC(S+A)), with no overflow check.
- case AArch64::fixup_a64_add_lo12:
- // R_AARCH64_ADD_ABS_LO12_NC: Sets an ADD immediate value to bits FFF of
- // S+A, with no overflow check.
- return (Value & 0xfff) << 10;
-
- case AArch64::fixup_a64_ldst8_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst8_tprel_lo12:
- // R_AARCH64_TLSLE_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst8_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst8_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst8_lo12:
- // R_AARCH64_LDST8_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFF
- // of S+A, with no overflow check.
- return (Value & 0xfff) << 10;
-
- case AArch64::fixup_a64_ldst16_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst16_tprel_lo12:
- // R_AARCH64_TLSLE_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst16_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst16_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst16_lo12:
- // R_AARCH64_LDST16_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFE
- // of S+A, with no overflow check.
- return (Value & 0xffe) << 9;
-
- case AArch64::fixup_a64_ldst32_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst32_tprel_lo12:
- // R_AARCH64_TLSLE_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst32_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst32_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst32_lo12:
- // R_AARCH64_LDST32_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFC
- // of S+A, with no overflow check.
- return (Value & 0xffc) << 8;
-
- case AArch64::fixup_a64_ldst64_dtprel_lo12:
- // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8
- // of DTPREL(S+A), check 0 <= X < 2^12.
- case AArch64::fixup_a64_ldst64_tprel_lo12:
- // R_AARCH64_TLSLE_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8
- // of DTPREL(S+A), check 0 <= X < 2^12.
- assert((int64_t) Value >= 0 &&
- (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
- // ... fallthrough to no-checking versions ...
- case AArch64::fixup_a64_ldst64_dtprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8
- // of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst64_tprel_lo12_nc:
- // R_AARCH64_TLSLD_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8
- // of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_ldst64_lo12:
- // R_AARCH64_LDST64_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF8
- // of S+A, with no overflow check.
- return (Value & 0xff8) << 7;
-
- case AArch64::fixup_a64_ldst128_lo12:
- // R_AARCH64_LDST128_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF0
- // of S+A, with no overflow check.
- return (Value & 0xff0) << 6;
-
- case AArch64::fixup_a64_movw_uabs_g0:
- // R_AARCH64_MOVW_UABS_G0: Sets a MOVZ immediate field to bits FFFF of S+A
- // with a check that S+A < 2^16
- assert(Value <= 0xffff && "Out of range move wide fixup");
- return (Value & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_dtprel_g0_nc:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: Sets a MOVK immediate field to bits
- // FFFF of DTPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_movw_gottprel_g0_nc:
- // R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: Sets a MOVK immediate field to bits
- // FFFF of G(TPREL(S+A)) - GOT with no overflow check.
- case AArch64::fixup_a64_movw_tprel_g0_nc:
- // R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: Sets a MOVK immediate field to bits
- // FFFF of TPREL(S+A) with no overflow check.
- case AArch64::fixup_a64_movw_uabs_g0_nc:
- // R_AARCH64_MOVW_UABS_G0_NC: Sets a MOVK immediate field to bits FFFF of
- // S+A with no overflow check.
- return (Value & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g1:
- // R_AARCH64_MOVW_UABS_G1: Sets a MOVZ immediate field to bits FFFF0000 of
- // S+A with a check that S+A < 2^32
- assert(Value <= 0xffffffffull && "Out of range move wide fixup");
- return ((Value >> 16) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_dtprel_g1_nc:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: Set a MOVK immediate field
- // to bits FFFF0000 of DTPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_movw_tprel_g1_nc:
- // R_AARCH64_TLSLD_MOVW_TPREL_G1_NC: Set a MOVK immediate field
- // to bits FFFF0000 of TPREL(S+A), with no overflow check.
- case AArch64::fixup_a64_movw_uabs_g1_nc:
- // R_AARCH64_MOVW_UABS_G1_NC: Sets a MOVK immediate field to bits
- // FFFF0000 of S+A with no overflow check.
- return ((Value >> 16) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g2:
- // R_AARCH64_MOVW_UABS_G2: Sets a MOVZ immediate field to bits FFFF 0000
- // 0000 of S+A with a check that S+A < 2^48
- assert(Value <= 0xffffffffffffull && "Out of range move wide fixup");
- return ((Value >> 32) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g2_nc:
- // R_AARCH64_MOVW_UABS_G2: Sets a MOVK immediate field to bits FFFF 0000
- // 0000 of S+A with no overflow check.
- return ((Value >> 32) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_uabs_g3:
- // R_AARCH64_MOVW_UABS_G3: Sets a MOVZ immediate field to bits FFFF 0000
- // 0000 0000 of S+A (no overflow check needed)
- return ((Value >> 48) & 0xffff) << 5;
-
- case AArch64::fixup_a64_movw_dtprel_g0:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G0: Set a MOV[NZ] immediate field
- // to bits FFFF of DTPREL(S+A).
- case AArch64::fixup_a64_movw_tprel_g0:
- // R_AARCH64_TLSLE_MOVW_TPREL_G0: Set a MOV[NZ] immediate field to
- // bits FFFF of TPREL(S+A).
- case AArch64::fixup_a64_movw_sabs_g0: {
- // R_AARCH64_MOVW_SABS_G0: Sets MOV[NZ] immediate field using bits FFFF of
- // S+A (see notes below); check -2^16 <= S+A < 2^16. (notes say that we
- // should convert between MOVN and MOVZ to achieve our goals).
- int64_t Signed = Value;
- assert(Signed >= -(1LL << 16) && Signed < (1LL << 16)
- && "Out of range move wide fixup");
- if (Signed >= 0) {
- Value = (Value & 0xffff) << 5;
- // Bit 30 converts the MOVN encoding into a MOVZ
- Value |= 1 << 30;
- } else {
- // MCCodeEmitter should have encoded a MOVN, which is fine.
- Value = (~Value & 0xffff) << 5;
- }
- return Value;
+void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+ unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
+ if (!Value)
+ return; // Doesn't change encoding.
+ MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
+ // Apply any target-specific value adjustments.
+ Value = adjustFixupValue(Fixup.getKind(), Value);
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+}
+
+bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
+ return false;
+}
+
+bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const {
+ // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
+ // into the targets for now.
+ //
+ // Relax if the value is too big for a (signed) i8.
+ return int64_t(Value) != int64_t(int8_t(Value));
+}
+
+void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
+ MCInst &Res) const {
+ assert(false && "AArch64AsmBackend::relaxInstruction() unimplemented");
+}
+
+bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ // If the count is not 4-byte aligned, we must be writing data into the text
+ // section (otherwise we have unaligned instructions, and thus have far
+ // bigger problems), so just write zeros instead.
+ if ((Count & 3) != 0) {
+ for (uint64_t i = 0, e = (Count & 3); i != e; ++i)
+ OW->Write8(0);
}
- case AArch64::fixup_a64_movw_dtprel_g1:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G1: Set a MOV[NZ] immediate field
- // to bits FFFF0000 of DTPREL(S+A).
- case AArch64::fixup_a64_movw_gottprel_g1:
- // R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: Set a MOV[NZ] immediate field
- // to bits FFFF0000 of G(TPREL(S+A)) - GOT.
- case AArch64::fixup_a64_movw_tprel_g1:
- // R_AARCH64_TLSLE_MOVW_TPREL_G1: Set a MOV[NZ] immediate field to
- // bits FFFF0000 of TPREL(S+A).
- case AArch64::fixup_a64_movw_sabs_g1: {
- // R_AARCH64_MOVW_SABS_G1: Sets MOV[NZ] immediate field using bits FFFF 0000
- // of S+A (see notes below); check -2^32 <= S+A < 2^32. (notes say that we
- // should convert between MOVN and MOVZ to achieve our goals).
- int64_t Signed = Value;
- assert(Signed >= -(1LL << 32) && Signed < (1LL << 32)
- && "Out of range move wide fixup");
- if (Signed >= 0) {
- Value = ((Value >> 16) & 0xffff) << 5;
- // Bit 30 converts the MOVN encoding into a MOVZ
- Value |= 1 << 30;
- } else {
- Value = ((~Value >> 16) & 0xffff) << 5;
- }
- return Value;
+ // We are properly aligned, so write NOPs as requested.
+ Count /= 4;
+ for (uint64_t i = 0; i != Count; ++i)
+ OW->Write32(0xd503201f);
+ return true;
+}
+
+namespace {
+
+namespace CU {
+
+/// \brief Compact unwind encoding values.
+enum CompactUnwindEncodings {
+ /// \brief A "frameless" leaf function, where no non-volatile registers are
+ /// saved. The return remains in LR throughout the function.
+ UNWIND_AArch64_MODE_FRAMELESS = 0x02000000,
+
+ /// \brief No compact unwind encoding available. Instead the low 23-bits of
+ /// the compact unwind encoding is the offset of the DWARF FDE in the
+ /// __eh_frame section. This mode is never used in object files. It is only
+ /// generated by the linker in final linked images, which have only DWARF info
+ /// for a function.
+ UNWIND_AArch64_MODE_DWARF = 0x03000000,
+
+ /// \brief This is a standard arm64 prologue where FP/LR are immediately
+ /// pushed on the stack, then SP is copied to FP. If there are any
+ /// non-volatile register saved, they are copied into the stack fame in pairs
+ /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
+ /// five X pairs and four D pairs can be saved, but the memory layout must be
+ /// in register number order.
+ UNWIND_AArch64_MODE_FRAME = 0x04000000,
+
+ /// \brief Frame register pair encodings.
+ UNWIND_AArch64_FRAME_X19_X20_PAIR = 0x00000001,
+ UNWIND_AArch64_FRAME_X21_X22_PAIR = 0x00000002,
+ UNWIND_AArch64_FRAME_X23_X24_PAIR = 0x00000004,
+ UNWIND_AArch64_FRAME_X25_X26_PAIR = 0x00000008,
+ UNWIND_AArch64_FRAME_X27_X28_PAIR = 0x00000010,
+ UNWIND_AArch64_FRAME_D8_D9_PAIR = 0x00000100,
+ UNWIND_AArch64_FRAME_D10_D11_PAIR = 0x00000200,
+ UNWIND_AArch64_FRAME_D12_D13_PAIR = 0x00000400,
+ UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800
+};
+
+} // end CU namespace
+
+// FIXME: This should be in a separate file.
+class DarwinAArch64AsmBackend : public AArch64AsmBackend {
+ const MCRegisterInfo &MRI;
+
+ /// \brief Encode compact unwind stack adjustment for frameless functions.
+ /// See UNWIND_AArch64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
+ /// The stack size always needs to be 16 byte aligned.
+ uint32_t encodeStackAdjustment(uint32_t StackSize) const {
+ return (StackSize / 16) << 12;
+ }
+
+public:
+ DarwinAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI)
+ : AArch64AsmBackend(T), MRI(MRI) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64,
+ MachO::CPU_SUBTYPE_ARM64_ALL);
}
- case AArch64::fixup_a64_movw_dtprel_g2:
- // R_AARCH64_TLSLD_MOVW_DTPREL_G2: Set a MOV[NZ] immediate field
- // to bits FFFF 0000 0000 of DTPREL(S+A).
- case AArch64::fixup_a64_movw_tprel_g2:
- // R_AARCH64_TLSLE_MOVW_TPREL_G2: Set a MOV[NZ] immediate field to
- // bits FFFF 0000 0000 of TPREL(S+A).
- case AArch64::fixup_a64_movw_sabs_g2: {
- // R_AARCH64_MOVW_SABS_G2: Sets MOV[NZ] immediate field using bits FFFF 0000
- // 0000 of S+A (see notes below); check -2^48 <= S+A < 2^48. (notes say that
- // we should convert between MOVN and MOVZ to achieve our goals).
- int64_t Signed = Value;
- assert(Signed >= -(1LL << 48) && Signed < (1LL << 48)
- && "Out of range move wide fixup");
- if (Signed >= 0) {
- Value = ((Value >> 32) & 0xffff) << 5;
- // Bit 30 converts the MOVN encoding into a MOVZ
- Value |= 1 << 30;
- } else {
- Value = ((~Value >> 32) & 0xffff) << 5;
+ bool doesSectionRequireSymbols(const MCSection &Section) const override {
+ // Any section for which the linker breaks things into atoms needs to
+ // preserve symbols, including assembler local symbols, to identify
+ // those atoms. These sections are:
+ // Sections of type:
+ //
+ // S_CSTRING_LITERALS (e.g. __cstring)
+ // S_LITERAL_POINTERS (e.g. objc selector pointers)
+ // S_16BYTE_LITERALS, S_8BYTE_LITERALS, S_4BYTE_LITERALS
+ //
+ // Sections named:
+ //
+ // __TEXT,__eh_frame
+ // __TEXT,__ustring
+ // __DATA,__cfstring
+ // __DATA,__objc_classrefs
+ // __DATA,__objc_catlist
+ //
+ // FIXME: It would be better if the compiler used actual linker local
+ // symbols for each of these sections rather than preserving what
+ // are ostensibly assembler local symbols.
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO &>(Section);
+ return (SMO.getType() == MachO::S_CSTRING_LITERALS ||
+ SMO.getType() == MachO::S_4BYTE_LITERALS ||
+ SMO.getType() == MachO::S_8BYTE_LITERALS ||
+ SMO.getType() == MachO::S_16BYTE_LITERALS ||
+ SMO.getType() == MachO::S_LITERAL_POINTERS ||
+ (SMO.getSegmentName() == "__TEXT" &&
+ (SMO.getSectionName() == "__eh_frame" ||
+ SMO.getSectionName() == "__ustring")) ||
+ (SMO.getSegmentName() == "__DATA" &&
+ (SMO.getSectionName() == "__cfstring" ||
+ SMO.getSectionName() == "__objc_classrefs" ||
+ SMO.getSectionName() == "__objc_catlist")));
+ }
+
+ /// \brief Generate the compact unwind encoding from the CFI directives.
+ uint32_t generateCompactUnwindEncoding(
+ ArrayRef<MCCFIInstruction> Instrs) const override {
+ if (Instrs.empty())
+ return CU::UNWIND_AArch64_MODE_FRAMELESS;
+
+ bool HasFP = false;
+ unsigned StackSize = 0;
+
+ uint32_t CompactUnwindEncoding = 0;
+ for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
+ const MCCFIInstruction &Inst = Instrs[i];
+
+ switch (Inst.getOperation()) {
+ default:
+ // Cannot handle this directive: bail out.
+ return CU::UNWIND_AArch64_MODE_DWARF;
+ case MCCFIInstruction::OpDefCfa: {
+ // Defines a frame pointer.
+ assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) ==
+ AArch64::FP &&
+ "Invalid frame pointer!");
+ assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
+
+ const MCCFIInstruction &LRPush = Instrs[++i];
+ assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
+ "Link register not pushed!");
+ const MCCFIInstruction &FPPush = Instrs[++i];
+ assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
+ "Frame pointer not pushed!");
+
+ unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true);
+ unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true);
+
+ LRReg = getXRegFromWReg(LRReg);
+ FPReg = getXRegFromWReg(FPReg);
+
+ assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
+ "Pushing invalid registers for frame!");
+
+ // Indicate that the function has a frame.
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAME;
+ HasFP = true;
+ break;
+ }
+ case MCCFIInstruction::OpDefCfaOffset: {
+ assert(StackSize == 0 && "We already have the CFA offset!");
+ StackSize = std::abs(Inst.getOffset());
+ break;
+ }
+ case MCCFIInstruction::OpOffset: {
+ // Registers are saved in pairs. We expect there to be two consecutive
+ // `.cfi_offset' instructions with the appropriate registers specified.
+ unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
+ if (i + 1 == e)
+ return CU::UNWIND_AArch64_MODE_DWARF;
+
+ const MCCFIInstruction &Inst2 = Instrs[++i];
+ if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
+ return CU::UNWIND_AArch64_MODE_DWARF;
+ unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
+
+ // N.B. The encodings must be in register number order, and the X
+ // registers before the D registers.
+
+ // X19/X20 pair = 0x00000001,
+ // X21/X22 pair = 0x00000002,
+ // X23/X24 pair = 0x00000004,
+ // X25/X26 pair = 0x00000008,
+ // X27/X28 pair = 0x00000010
+ Reg1 = getXRegFromWReg(Reg1);
+ Reg2 = getXRegFromWReg(Reg2);
+
+ if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
+ (CompactUnwindEncoding & 0xF1E) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X19_X20_PAIR;
+ else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
+ (CompactUnwindEncoding & 0xF1C) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X21_X22_PAIR;
+ else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
+ (CompactUnwindEncoding & 0xF18) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X23_X24_PAIR;
+ else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
+ (CompactUnwindEncoding & 0xF10) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X25_X26_PAIR;
+ else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
+ (CompactUnwindEncoding & 0xF00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X27_X28_PAIR;
+ else {
+ Reg1 = getDRegFromBReg(Reg1);
+ Reg2 = getDRegFromBReg(Reg2);
+
+ // D8/D9 pair = 0x00000100,
+ // D10/D11 pair = 0x00000200,
+ // D12/D13 pair = 0x00000400,
+ // D14/D15 pair = 0x00000800
+ if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
+ (CompactUnwindEncoding & 0xE00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D8_D9_PAIR;
+ else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
+ (CompactUnwindEncoding & 0xC00) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D10_D11_PAIR;
+ else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
+ (CompactUnwindEncoding & 0x800) == 0)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D12_D13_PAIR;
+ else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D14_D15_PAIR;
+ else
+ // A pair was pushed which we cannot handle.
+ return CU::UNWIND_AArch64_MODE_DWARF;
+ }
+
+ break;
+ }
+ }
}
- return Value;
+
+ if (!HasFP) {
+ // With compact unwind info we can only represent stack adjustments of up
+ // to 65520 bytes.
+ if (StackSize > 65520)
+ return CU::UNWIND_AArch64_MODE_DWARF;
+
+ CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAMELESS;
+ CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
+ }
+
+ return CompactUnwindEncoding;
}
+};
- case AArch64::fixup_a64_tstbr:
- // R_AARCH64_TSTBR14: Sets the immediate field of a TBZ/TBNZ instruction to
- // bits FFFC of S+A-P, checking -2^15 <= S+A-P < 2^15.
- assert((int64_t)Value >= -(1LL << 15) &&
- (int64_t)Value < (1LL << 15) && "Out of range TBZ/TBNZ fixup");
- return (Value & 0xfffc) << (5 - 2);
-
- case AArch64::fixup_a64_condbr:
- // R_AARCH64_CONDBR19: Sets the immediate field of a conditional branch
- // instruction to bits 1FFFFC of S+A-P, checking -2^20 <= S+A-P < 2^20.
- assert((int64_t)Value >= -(1LL << 20) &&
- (int64_t)Value < (1LL << 20) && "Out of range B.cond fixup");
- return (Value & 0x1ffffc) << (5 - 2);
-
- case AArch64::fixup_a64_uncondbr:
- // R_AARCH64_JUMP26 same as below (except to a linker, possibly).
- case AArch64::fixup_a64_call:
- // R_AARCH64_CALL26: Sets a CALL immediate field to bits FFFFFFC of S+A-P,
- // checking that -2^27 <= S+A-P < 2^27.
- assert((int64_t)Value >= -(1LL << 27) &&
- (int64_t)Value < (1LL << 27) && "Out of range branch fixup");
- return (Value & 0xffffffc) >> 2;
-
- case AArch64::fixup_a64_adr_gottprel_page:
- // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: Set an ADRP immediate field to bits
- // 1FFFFF000 of Page(G(TPREL(S+A))) - Page(P); check -2^32 <= X < 2^32.
- case AArch64::fixup_a64_tlsdesc_adr_page:
- // R_AARCH64_TLSDESC_ADR_PAGE: Set an ADRP immediate field to bits 1FFFFF000
- // of Page(G(TLSDESC(S+A))) - Page(P); check -2^32 <= X < 2^32.
- case AArch64::fixup_a64_adr_prel_got_page:
- // R_AARCH64_ADR_GOT_PAGE: Sets the immediate value of an ADRP to bits
- // 1FFFFF000 of the operation, checking that -2^32 < Page(G(S))-Page(GOT) <
- // 2^32.
- assert((int64_t)Value >= -(1LL << 32) &&
- (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup");
- return ADRImmBits((Value & 0x1fffff000ULL) >> 12);
-
- case AArch64::fixup_a64_ld64_gottprel_lo12_nc:
- // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: Set an LD offset field to bits FF8
- // of X, with no overflow check. Check that X & 7 == 0.
- case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc:
- // R_AARCH64_TLSDESC_LD64_LO12_NC: Set an LD offset field to bits FF8 of
- // G(TLSDESC(S+A)), with no overflow check. Check that X & 7 == 0.
- case AArch64::fixup_a64_ld64_got_lo12_nc:
- // R_AARCH64_LD64_GOT_LO12_NC: Sets the LD/ST immediate field to bits FF8 of
- // G(S) with no overflow check. Check X & 7 == 0
- assert(((int64_t)Value & 7) == 0 && "Misaligned fixup");
- return (Value & 0xff8) << 7;
-
- case AArch64::fixup_a64_tlsdesc_call:
- // R_AARCH64_TLSDESC_CALL: For relaxation only.
- return 0;
+} // end anonymous namespace
+
+namespace {
+
+class ELFAArch64AsmBackend : public AArch64AsmBackend {
+public:
+ uint8_t OSABI;
+ bool IsLittleEndian;
+
+ ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian)
+ : AArch64AsmBackend(T), OSABI(OSABI), IsLittleEndian(IsLittleEndian) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian);
+ }
+
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override;
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+};
+
+void ELFAArch64AsmBackend::processFixupValue(
+ const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup,
+ const MCFragment *DF, const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {
+ // The ADRP instruction adds some multiple of 0x1000 to the current PC &
+ // ~0xfff. This means that the required offset to reach a symbol can vary by
+ // up to one step depending on where the ADRP is in memory. For example:
+ //
+ // ADRP x0, there
+ // there:
+ //
+ // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
+ // we'll need that as an offset. At any other address "there" will be in the
+ // same page as the ADRP and the instruction should encode 0x0. Assuming the
+ // section isn't 0x1000-aligned, we therefore need to delegate this decision
+ // to the linker -- a relocation!
+ if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
+ IsResolved = false;
+}
+
+void ELFAArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+ // store fixups in .eh_frame section in big endian order
+ if (!IsLittleEndian && Fixup.getKind() == FK_Data_4) {
+ const MCSection *Sec = Fixup.getValue()->FindAssociatedSection();
+ const MCSectionELF *SecELF = static_cast<const MCSectionELF *>(Sec);
+ if (SecELF->getSectionName() == ".eh_frame")
+ Value = ByteSwap_32(unsigned(Value));
}
+ AArch64AsmBackend::applyFixup (Fixup, Data, DataSize, Value, IsPCRel);
+}
+}
+
+MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
+ Triple TheTriple(TT);
+
+ if (TheTriple.isOSDarwin())
+ return new DarwinAArch64AsmBackend(T, MRI);
+
+ assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target");
+ return new ELFAArch64AsmBackend(T, TheTriple.getOS(), /*IsLittleEndian=*/true);
}
-MCAsmBackend *
-llvm::createAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
Triple TheTriple(TT);
- return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS());
+
+ assert(TheTriple.isOSBinFormatELF() &&
+ "Big endian is only supported for ELF targets!");
+ return new ELFAArch64AsmBackend(T, TheTriple.getOS(),
+ /*IsLittleEndian=*/false);
}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index 4bcc65d..e05191e 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCValue.h"
@@ -23,270 +24,234 @@ using namespace llvm;
namespace {
class AArch64ELFObjectWriter : public MCELFObjectTargetWriter {
public:
- AArch64ELFObjectWriter(uint8_t OSABI);
+ AArch64ELFObjectWriter(uint8_t OSABI, bool IsLittleEndian);
virtual ~AArch64ELFObjectWriter();
protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
+
private:
};
}
-AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI)
- : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
- /*HasRelocationAddend*/ true)
-{}
+AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI,
+ bool IsLittleEndian)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
+ /*HasRelocationAddend*/ true) {}
-AArch64ELFObjectWriter::~AArch64ELFObjectWriter()
-{}
+AArch64ELFObjectWriter::~AArch64ELFObjectWriter() {}
unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
- unsigned Type;
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ bool IsNC = AArch64MCExpr::isNotChecked(RefKind);
+
+ assert((!Target.getSymA() ||
+ Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None) &&
+ "Should only be expression-level modifiers here");
+
+ assert((!Target.getSymB() ||
+ Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None) &&
+ "Should only be expression-level modifiers here");
+
if (IsPCRel) {
switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented fixup -> relocation");
- case FK_Data_8:
- return ELF::R_AARCH64_PREL64;
- case FK_Data_4:
- return ELF::R_AARCH64_PREL32;
case FK_Data_2:
return ELF::R_AARCH64_PREL16;
- case AArch64::fixup_a64_ld_prel:
- Type = ELF::R_AARCH64_LD_PREL_LO19;
- break;
- case AArch64::fixup_a64_adr_prel:
- Type = ELF::R_AARCH64_ADR_PREL_LO21;
- break;
- case AArch64::fixup_a64_adr_prel_page:
- Type = ELF::R_AARCH64_ADR_PREL_PG_HI21;
- break;
- case AArch64::fixup_a64_adr_prel_got_page:
- Type = ELF::R_AARCH64_ADR_GOT_PAGE;
- break;
- case AArch64::fixup_a64_tstbr:
- Type = ELF::R_AARCH64_TSTBR14;
- break;
- case AArch64::fixup_a64_condbr:
- Type = ELF::R_AARCH64_CONDBR19;
- break;
- case AArch64::fixup_a64_uncondbr:
- Type = ELF::R_AARCH64_JUMP26;
- break;
- case AArch64::fixup_a64_call:
- Type = ELF::R_AARCH64_CALL26;
- break;
- case AArch64::fixup_a64_adr_gottprel_page:
- Type = ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
- break;
- case AArch64::fixup_a64_ld_gottprel_prel19:
- Type = ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19;
- break;
- case AArch64::fixup_a64_tlsdesc_adr_page:
- Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE;
- break;
+ case FK_Data_4:
+ return ELF::R_AARCH64_PREL32;
+ case FK_Data_8:
+ return ELF::R_AARCH64_PREL64;
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ assert(SymLoc == AArch64MCExpr::VK_NONE && "unexpected ADR relocation");
+ return ELF::R_AARCH64_ADR_PREL_LO21;
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC)
+ return ELF::R_AARCH64_ADR_PREL_PG_HI21;
+ if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC)
+ return ELF::R_AARCH64_ADR_GOT_PAGE;
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
+ return ELF::R_AARCH64_TLSDESC_ADR_PAGE;
+ llvm_unreachable("invalid symbol kind for ADRP relocation");
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ return ELF::R_AARCH64_JUMP26;
+ case AArch64::fixup_aarch64_pcrel_call26:
+ return ELF::R_AARCH64_CALL26;
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL)
+ return ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19;
+ return ELF::R_AARCH64_LD_PREL_LO19;
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ return ELF::R_AARCH64_TSTBR14;
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ return ELF::R_AARCH64_CONDBR19;
+ default:
+ llvm_unreachable("Unsupported pc-relative fixup kind");
}
} else {
switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented fixup -> relocation");
- case FK_Data_8:
- return ELF::R_AARCH64_ABS64;
- case FK_Data_4:
- return ELF::R_AARCH64_ABS32;
case FK_Data_2:
return ELF::R_AARCH64_ABS16;
- case AArch64::fixup_a64_add_lo12:
- Type = ELF::R_AARCH64_ADD_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ld64_got_lo12_nc:
- Type = ELF::R_AARCH64_LD64_GOT_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst8_lo12:
- Type = ELF::R_AARCH64_LDST8_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst16_lo12:
- Type = ELF::R_AARCH64_LDST16_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst32_lo12:
- Type = ELF::R_AARCH64_LDST32_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst64_lo12:
- Type = ELF::R_AARCH64_LDST64_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst128_lo12:
- Type = ELF::R_AARCH64_LDST128_ABS_LO12_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g0:
- Type = ELF::R_AARCH64_MOVW_UABS_G0;
- break;
- case AArch64::fixup_a64_movw_uabs_g0_nc:
- Type = ELF::R_AARCH64_MOVW_UABS_G0_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g1:
- Type = ELF::R_AARCH64_MOVW_UABS_G1;
- break;
- case AArch64::fixup_a64_movw_uabs_g1_nc:
- Type = ELF::R_AARCH64_MOVW_UABS_G1_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g2:
- Type = ELF::R_AARCH64_MOVW_UABS_G2;
- break;
- case AArch64::fixup_a64_movw_uabs_g2_nc:
- Type = ELF::R_AARCH64_MOVW_UABS_G2_NC;
- break;
- case AArch64::fixup_a64_movw_uabs_g3:
- Type = ELF::R_AARCH64_MOVW_UABS_G3;
- break;
- case AArch64::fixup_a64_movw_sabs_g0:
- Type = ELF::R_AARCH64_MOVW_SABS_G0;
- break;
- case AArch64::fixup_a64_movw_sabs_g1:
- Type = ELF::R_AARCH64_MOVW_SABS_G1;
- break;
- case AArch64::fixup_a64_movw_sabs_g2:
- Type = ELF::R_AARCH64_MOVW_SABS_G2;
- break;
+ case FK_Data_4:
+ return ELF::R_AARCH64_ABS32;
+ case FK_Data_8:
+ return ELF::R_AARCH64_ABS64;
+ case AArch64::fixup_aarch64_add_imm12:
+ if (RefKind == AArch64MCExpr::VK_DTPREL_HI12)
+ return ELF::R_AARCH64_TLSLD_ADD_DTPREL_HI12;
+ if (RefKind == AArch64MCExpr::VK_TPREL_HI12)
+ return ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_LO12_NC)
+ return ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_LO12)
+ return ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12;
+ if (RefKind == AArch64MCExpr::VK_TPREL_LO12_NC)
+ return ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_LO12)
+ return ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12;
+ if (RefKind == AArch64MCExpr::VK_TLSDESC_LO12)
+ return ELF::R_AARCH64_TLSDESC_ADD_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_ADD_ABS_LO12_NC;
- // TLS Local-dynamic block
- case AArch64::fixup_a64_movw_dtprel_g2:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
- break;
- case AArch64::fixup_a64_movw_dtprel_g1:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1;
- break;
- case AArch64::fixup_a64_movw_dtprel_g1_nc:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
- break;
- case AArch64::fixup_a64_movw_dtprel_g0:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0;
- break;
- case AArch64::fixup_a64_movw_dtprel_g0_nc:
- Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC;
- break;
- case AArch64::fixup_a64_add_dtprel_hi12:
- Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_HI12;
- break;
- case AArch64::fixup_a64_add_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_add_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst8_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst8_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst16_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst16_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst32_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst32_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst64_dtprel_lo12:
- Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst64_dtprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC;
- break;
+ report_fatal_error("invalid fixup for add (uimm12) instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST8_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC;
- // TLS initial-exec block
- case AArch64::fixup_a64_movw_gottprel_g1:
- Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
- break;
- case AArch64::fixup_a64_movw_gottprel_g0_nc:
- Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
- break;
- case AArch64::fixup_a64_ld64_gottprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
- break;
+ report_fatal_error("invalid fixup for 8-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST16_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC;
- // TLS local-exec block
- case AArch64::fixup_a64_movw_tprel_g2:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
- break;
- case AArch64::fixup_a64_movw_tprel_g1:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1;
- break;
- case AArch64::fixup_a64_movw_tprel_g1_nc:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
- break;
- case AArch64::fixup_a64_movw_tprel_g0:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0;
- break;
- case AArch64::fixup_a64_movw_tprel_g0_nc:
- Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC;
- break;
- case AArch64::fixup_a64_add_tprel_hi12:
- Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12;
- break;
- case AArch64::fixup_a64_add_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12;
- break;
- case AArch64::fixup_a64_add_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst8_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst8_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst16_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst16_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst32_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst32_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC;
- break;
- case AArch64::fixup_a64_ldst64_tprel_lo12:
- Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12;
- break;
- case AArch64::fixup_a64_ldst64_tprel_lo12_nc:
- Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC;
- break;
+ report_fatal_error("invalid fixup for 16-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST32_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC;
- // TLS general-dynamic block
- case AArch64::fixup_a64_tlsdesc_adr_page:
- Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE;
- break;
- case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc:
- Type = ELF::R_AARCH64_TLSDESC_LD64_LO12_NC;
- break;
- case AArch64::fixup_a64_tlsdesc_add_lo12_nc:
- Type = ELF::R_AARCH64_TLSDESC_ADD_LO12_NC;
- break;
- case AArch64::fixup_a64_tlsdesc_call:
- Type = ELF::R_AARCH64_TLSDESC_CALL;
- break;
+ report_fatal_error("invalid fixup for 32-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST64_ABS_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_GOT && IsNC)
+ return ELF::R_AARCH64_LD64_GOT_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
+ return ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12;
+ if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
+ return ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_GOTTPREL && IsNC)
+ return ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
+ if (SymLoc == AArch64MCExpr::VK_TLSDESC && IsNC)
+ return ELF::R_AARCH64_TLSDESC_LD64_LO12_NC;
+
+ report_fatal_error("invalid fixup for 64-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
+ return ELF::R_AARCH64_LDST128_ABS_LO12_NC;
+
+ report_fatal_error("invalid fixup for 128-bit load/store instruction");
+ return 0;
+ case AArch64::fixup_aarch64_movw:
+ if (RefKind == AArch64MCExpr::VK_ABS_G3)
+ return ELF::R_AARCH64_MOVW_UABS_G3;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2)
+ return ELF::R_AARCH64_MOVW_UABS_G2;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2_S)
+ return ELF::R_AARCH64_MOVW_SABS_G2;
+ if (RefKind == AArch64MCExpr::VK_ABS_G2_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G2_NC;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1)
+ return ELF::R_AARCH64_MOVW_UABS_G1;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1_S)
+ return ELF::R_AARCH64_MOVW_SABS_G1;
+ if (RefKind == AArch64MCExpr::VK_ABS_G1_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0)
+ return ELF::R_AARCH64_MOVW_UABS_G0;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0_S)
+ return ELF::R_AARCH64_MOVW_SABS_G0;
+ if (RefKind == AArch64MCExpr::VK_ABS_G0_NC)
+ return ELF::R_AARCH64_MOVW_UABS_G0_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G2)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G1)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G1_NC)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G0)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0;
+ if (RefKind == AArch64MCExpr::VK_DTPREL_G0_NC)
+ return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G2)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G1)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G1_NC)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G0)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0;
+ if (RefKind == AArch64MCExpr::VK_TPREL_G0_NC)
+ return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC;
+ if (RefKind == AArch64MCExpr::VK_GOTTPREL_G1)
+ return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
+ if (RefKind == AArch64MCExpr::VK_GOTTPREL_G0_NC)
+ return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
+ report_fatal_error("invalid fixup for movz/movk instruction");
+ return 0;
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ return ELF::R_AARCH64_TLSDESC_CALL;
+ default:
+ llvm_unreachable("Unknown ELF relocation type");
}
}
- return Type;
+ llvm_unreachable("Unimplemented fixup -> relocation");
}
MCObjectWriter *llvm::createAArch64ELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI) {
- MCELFObjectTargetWriter *MOTW = new AArch64ELFObjectWriter(OSABI);
- return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true);
+ uint8_t OSABI,
+ bool IsLittleEndian) {
+ MCELFObjectTargetWriter *MOTW =
+ new AArch64ELFObjectWriter(OSABI, IsLittleEndian);
+ return createELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index a64c463..a79406d 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -56,14 +56,14 @@ namespace {
class AArch64ELFStreamer : public MCELFStreamer {
public:
AArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
- MCCodeEmitter *Emitter)
- : MCELFStreamer(Context, 0, TAB, OS, Emitter), MappingSymbolCounter(0),
+ MCCodeEmitter *Emitter)
+ : MCELFStreamer(Context, TAB, OS, Emitter), MappingSymbolCounter(0),
LastEMS(EMS_None) {}
~AArch64ELFStreamer() {}
- virtual void ChangeSection(const MCSection *Section,
- const MCExpr *Subsection) {
+ void ChangeSection(const MCSection *Section,
+ const MCExpr *Subsection) override {
// We have to keep track of the mapping symbol state of any sections we
// use. Each one should start off as EMS_None, which is provided as the
// default constructor by DenseMap::lookup.
@@ -76,15 +76,16 @@ public:
/// This function is the one used to emit instruction data into the ELF
/// streamer. We override it to add the appropriate mapping symbol if
/// necessary.
- virtual void EmitInstruction(const MCInst& Inst) {
+ void EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) override {
EmitA64MappingSymbol();
- MCELFStreamer::EmitInstruction(Inst);
+ MCELFStreamer::EmitInstruction(Inst, STI);
}
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
- virtual void EmitBytes(StringRef Data) {
+ void EmitBytes(StringRef Data) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitBytes(Data);
}
@@ -92,7 +93,8 @@ public:
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitValueImpl(Value, Size);
}
@@ -105,13 +107,15 @@ private:
};
void EmitDataMappingSymbol() {
- if (LastEMS == EMS_Data) return;
+ if (LastEMS == EMS_Data)
+ return;
EmitMappingSymbol("$d");
LastEMS = EMS_Data;
}
void EmitA64MappingSymbol() {
- if (LastEMS == EMS_A64) return;
+ if (LastEMS == EMS_A64)
+ return;
EmitMappingSymbol("$x");
LastEMS = EMS_A64;
}
@@ -120,15 +124,14 @@ private:
MCSymbol *Start = getContext().CreateTempSymbol();
EmitLabel(Start);
- MCSymbol *Symbol =
- getContext().GetOrCreateSymbol(Name + "." +
- Twine(MappingSymbolCounter++));
+ MCSymbol *Symbol = getContext().GetOrCreateSymbol(
+ Name + "." + Twine(MappingSymbolCounter++));
MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
MCELF::SetType(SD, ELF::STT_NOTYPE);
MCELF::SetBinding(SD, ELF::STB_LOCAL);
SD.setExternal(false);
- AssignSection(Symbol, getCurrentSection().first);
+ Symbol->setSection(*getCurrentSection().first);
const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext());
Symbol->setVariableValue(Value);
@@ -144,16 +147,14 @@ private:
}
namespace llvm {
- MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack) {
- AArch64ELFStreamer *S = new AArch64ELFStreamer(Context, TAB, OS, Emitter);
- if (RelaxAll)
- S->getAssembler().setRelaxAll(true);
- if (NoExecStack)
- S->getAssembler().setNoExecStack(true);
- return S;
- }
+MCELFStreamer *createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool RelaxAll, bool NoExecStack) {
+ AArch64ELFStreamer *S = new AArch64ELFStreamer(Context, TAB, OS, Emitter);
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ if (NoExecStack)
+ S->getAssembler().setNoExecStack(true);
+ return S;
+}
}
-
-
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
index 5a89ca5..bc6973b 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
@@ -18,10 +18,9 @@
namespace llvm {
- MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS,
- MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack);
+MCELFStreamer *createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool RelaxAll, bool NoExecStack);
}
#endif // AArch64_ELF_STREAMER_H
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
index eeb122d..bf405fb 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
@@ -1,4 +1,4 @@
-//=- AArch64/AArch64FixupKinds.h - AArch64 Specific Fixup Entries -*- C++ -*-=//
+//===-- AArch64FixupKinds.h - AArch64 Specific Fixup Entries ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,108 +6,71 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file describes the LLVM fixups applied to MCInsts in the AArch64
-// backend.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64_AARCH64FIXUPKINDS_H
-#define LLVM_AARCH64_AARCH64FIXUPKINDS_H
+#ifndef LLVM_AArch64FIXUPKINDS_H
+#define LLVM_AArch64FIXUPKINDS_H
#include "llvm/MC/MCFixup.h"
namespace llvm {
- namespace AArch64 {
- enum Fixups {
- fixup_a64_ld_prel = FirstTargetFixupKind,
- fixup_a64_adr_prel,
- fixup_a64_adr_prel_page,
-
- fixup_a64_add_lo12,
-
- fixup_a64_ldst8_lo12,
- fixup_a64_ldst16_lo12,
- fixup_a64_ldst32_lo12,
- fixup_a64_ldst64_lo12,
- fixup_a64_ldst128_lo12,
-
- fixup_a64_tstbr,
- fixup_a64_condbr,
- fixup_a64_uncondbr,
- fixup_a64_call,
-
- fixup_a64_movw_uabs_g0,
- fixup_a64_movw_uabs_g0_nc,
- fixup_a64_movw_uabs_g1,
- fixup_a64_movw_uabs_g1_nc,
- fixup_a64_movw_uabs_g2,
- fixup_a64_movw_uabs_g2_nc,
- fixup_a64_movw_uabs_g3,
-
- fixup_a64_movw_sabs_g0,
- fixup_a64_movw_sabs_g1,
- fixup_a64_movw_sabs_g2,
-
- fixup_a64_adr_prel_got_page,
- fixup_a64_ld64_got_lo12_nc,
-
- // Produce offsets relative to the module's dynamic TLS area.
- fixup_a64_movw_dtprel_g2,
- fixup_a64_movw_dtprel_g1,
- fixup_a64_movw_dtprel_g1_nc,
- fixup_a64_movw_dtprel_g0,
- fixup_a64_movw_dtprel_g0_nc,
- fixup_a64_add_dtprel_hi12,
- fixup_a64_add_dtprel_lo12,
- fixup_a64_add_dtprel_lo12_nc,
- fixup_a64_ldst8_dtprel_lo12,
- fixup_a64_ldst8_dtprel_lo12_nc,
- fixup_a64_ldst16_dtprel_lo12,
- fixup_a64_ldst16_dtprel_lo12_nc,
- fixup_a64_ldst32_dtprel_lo12,
- fixup_a64_ldst32_dtprel_lo12_nc,
- fixup_a64_ldst64_dtprel_lo12,
- fixup_a64_ldst64_dtprel_lo12_nc,
-
- // Produce the GOT entry containing a variable's address in TLS's
- // initial-exec mode.
- fixup_a64_movw_gottprel_g1,
- fixup_a64_movw_gottprel_g0_nc,
- fixup_a64_adr_gottprel_page,
- fixup_a64_ld64_gottprel_lo12_nc,
- fixup_a64_ld_gottprel_prel19,
-
- // Produce offsets relative to the thread pointer: TPIDR_EL0.
- fixup_a64_movw_tprel_g2,
- fixup_a64_movw_tprel_g1,
- fixup_a64_movw_tprel_g1_nc,
- fixup_a64_movw_tprel_g0,
- fixup_a64_movw_tprel_g0_nc,
- fixup_a64_add_tprel_hi12,
- fixup_a64_add_tprel_lo12,
- fixup_a64_add_tprel_lo12_nc,
- fixup_a64_ldst8_tprel_lo12,
- fixup_a64_ldst8_tprel_lo12_nc,
- fixup_a64_ldst16_tprel_lo12,
- fixup_a64_ldst16_tprel_lo12_nc,
- fixup_a64_ldst32_tprel_lo12,
- fixup_a64_ldst32_tprel_lo12_nc,
- fixup_a64_ldst64_tprel_lo12,
- fixup_a64_ldst64_tprel_lo12_nc,
-
- // Produce the special fixups used by the general-dynamic TLS model.
- fixup_a64_tlsdesc_adr_page,
- fixup_a64_tlsdesc_ld64_lo12_nc,
- fixup_a64_tlsdesc_add_lo12_nc,
- fixup_a64_tlsdesc_call,
-
-
- // Marker
- LastTargetFixupKind,
- NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
- };
- }
-}
+namespace AArch64 {
+
+enum Fixups {
+ // fixup_aarch64_pcrel_adr_imm21 - A 21-bit pc-relative immediate inserted into
+ // an ADR instruction.
+ fixup_aarch64_pcrel_adr_imm21 = FirstTargetFixupKind,
+
+ // fixup_aarch64_pcrel_adrp_imm21 - A 21-bit pc-relative immediate inserted into
+ // an ADRP instruction.
+ fixup_aarch64_pcrel_adrp_imm21,
+
+ // fixup_aarch64_imm12 - 12-bit fixup for add/sub instructions.
+ // No alignment adjustment. All value bits are encoded.
+ fixup_aarch64_add_imm12,
+
+ // fixup_aarch64_ldst_imm12_* - unsigned 12-bit fixups for load and
+ // store instructions.
+ fixup_aarch64_ldst_imm12_scale1,
+ fixup_aarch64_ldst_imm12_scale2,
+ fixup_aarch64_ldst_imm12_scale4,
+ fixup_aarch64_ldst_imm12_scale8,
+ fixup_aarch64_ldst_imm12_scale16,
+
+ // fixup_aarch64_ldr_pcrel_imm19 - The high 19 bits of a 21-bit pc-relative
+ // immediate. Same encoding as fixup_aarch64_pcrel_adrhi, except this is used by
+ // pc-relative loads and generates relocations directly when necessary.
+ fixup_aarch64_ldr_pcrel_imm19,
+
+ // FIXME: comment
+ fixup_aarch64_movw,
+
+ // fixup_aarch64_pcrel_imm14 - The high 14 bits of a 21-bit pc-relative
+ // immediate.
+ fixup_aarch64_pcrel_branch14,
+
+ // fixup_aarch64_pcrel_branch19 - The high 19 bits of a 21-bit pc-relative
+ // immediate. Same encoding as fixup_aarch64_pcrel_adrhi, except this is use by
+ // b.cc and generates relocations directly when necessary.
+ fixup_aarch64_pcrel_branch19,
+
+ // fixup_aarch64_pcrel_branch26 - The high 26 bits of a 28-bit pc-relative
+ // immediate.
+ fixup_aarch64_pcrel_branch26,
+
+ // fixup_aarch64_pcrel_call26 - The high 26 bits of a 28-bit pc-relative
+ // immediate. Distinguished from branch26 only on ELF.
+ fixup_aarch64_pcrel_call26,
+
+ // fixup_aarch64_tlsdesc_call - zero-space placeholder for the ELF
+ // R_AARCH64_TLSDESC_CALL relocation.
+ fixup_aarch64_tlsdesc_call,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+
+} // end namespace AArch64
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
index add874c..1763b40 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
@@ -12,10 +12,66 @@
//===----------------------------------------------------------------------===//
#include "AArch64MCAsmInfo.h"
-
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
-AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo() {
+enum AsmWriterVariantTy {
+ Default = -1,
+ Generic = 0,
+ Apple = 1
+};
+
+static cl::opt<AsmWriterVariantTy> AsmWriterVariant(
+ "aarch64-neon-syntax", cl::init(Default),
+ cl::desc("Choose style of NEON code to emit from AArch64 backend:"),
+ cl::values(clEnumValN(Generic, "generic", "Emit generic NEON assembly"),
+ clEnumValN(Apple, "apple", "Emit Apple-style NEON assembly"),
+ clEnumValEnd));
+
+AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() {
+ // We prefer NEON instructions to be printed in the short form.
+ AssemblerDialect = AsmWriterVariant == Default ? 1 : AsmWriterVariant;
+
+ PrivateGlobalPrefix = "L";
+ SeparatorString = "%%";
+ CommentString = ";";
+ PointerSize = CalleeSaveStackSlotSize = 8;
+
+ AlignmentIsInBytes = false;
+ UsesELFSectionDirectiveForBSS = true;
+ SupportsDebugInformation = true;
+ UseDataRegionDirectives = true;
+
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+}
+
+const MCExpr *AArch64MCAsmInfoDarwin::getExprForPersonalitySymbol(
+ const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const {
+ // On Darwin, we can reference dwarf symbols with foo@GOT-., which
+ // is an indirect pc-relative reference. The default implementation
+ // won't reference using the GOT, so we need this target-specific
+ // version.
+ MCContext &Context = Streamer.getContext();
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOT, Context);
+ MCSymbol *PCSym = Context.CreateTempSymbol();
+ Streamer.EmitLabel(PCSym);
+ const MCExpr *PC = MCSymbolRefExpr::Create(PCSym, Context);
+ return MCBinaryExpr::CreateSub(Res, PC, Context);
+}
+
+AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(StringRef TT) {
+ Triple T(TT);
+ if (T.getArch() == Triple::arm64_be || T.getArch() == Triple::aarch64_be)
+ IsLittleEndian = false;
+
+ // We prefer NEON instructions to be printed in the short form.
+ AssemblerDialect = AsmWriterVariant == Default ? 0 : AsmWriterVariant;
+
PointerSize = 8;
// ".comm align is in bytes but .align is pow-2."
@@ -29,14 +85,17 @@ AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo() {
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = "\t.xword\t";
- UseDataRegionDirectives = true;
+ UseDataRegionDirectives = false;
+
+ WeakRefDirective = "\t.weak\t";
HasLEB128 = true;
SupportsDebugInformation = true;
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
-}
-// Pin the vtable to this file.
-void AArch64ELFMCAsmInfo::anchor() {}
+ UseIntegratedAssembler = true;
+
+ HasIdentDirective = true;
+}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
index d1dd285..42a031d 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
@@ -1,4 +1,4 @@
-//==-- AArch64MCAsmInfo.h - AArch64 asm properties -------------*- C++ -*--===//
+//=====-- AArch64MCAsmInfo.h - AArch64 asm properties ---------*- C++ -*--====//
//
// The LLVM Compiler Infrastructure
//
@@ -11,17 +11,24 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64TARGETASMINFO_H
-#define LLVM_AARCH64TARGETASMINFO_H
+#ifndef AArch64TARGETASMINFO_H
+#define AArch64TARGETASMINFO_H
-#include "llvm/MC/MCAsmInfoELF.h"
+#include "llvm/MC/MCAsmInfoDarwin.h"
namespace llvm {
+class Target;
+class StringRef;
+class MCStreamer;
+struct AArch64MCAsmInfoDarwin : public MCAsmInfoDarwin {
+ explicit AArch64MCAsmInfoDarwin();
+ const MCExpr *
+ getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
+};
-struct AArch64ELFMCAsmInfo : public MCAsmInfoELF {
- explicit AArch64ELFMCAsmInfo();
-private:
- virtual void anchor();
+struct AArch64MCAsmInfoELF : public MCAsmInfo {
+ explicit AArch64MCAsmInfoELF(StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
index b41c566..f051357 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
+//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
//
// The LLVM Compiler Infrastructure
//
@@ -11,10 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "MCTargetDesc/AArch64MCExpr.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
@@ -22,480 +21,558 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
-
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
+STATISTIC(MCNumFixups, "Number of MC fixups created.");
+
namespace {
+
class AArch64MCCodeEmitter : public MCCodeEmitter {
- AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
MCContext &Ctx;
+ AArch64MCCodeEmitter(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
public:
- AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
+ AArch64MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
+ MCContext &ctx)
+ : Ctx(ctx) {}
~AArch64MCCodeEmitter() {}
- unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- template<int MemSize>
- unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
- return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
- }
-
- unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- int MemSize) const;
-
- unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // Labels are handled mostly the same way: a symbol is needed, and
- // just gets some fixup attached.
- template<AArch64::Fixups fixupDesired>
- unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
-
- unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
-
- unsigned getAddressWithFixup(const MCOperand &MO,
- unsigned FixupKind,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
-
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
- unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
+ /// attached to a load, store or prfm instruction. If operand requires a
+ /// relocation, record it and return zero in that part of the encoding.
+ template <uint32_t FixupKind>
+ uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
+ /// target.
+ uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
+ /// the 2-bit shift field.
+ uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getCondBranchTargetOpValue - Return the encoded value for a conditional
+ /// branch target.
+ uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getLoadLiteralOpValue - Return the encoded value for a load-literal
+ /// pc-relative address.
+ uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
+ /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
+ /// operation is a sign extend (as opposed to a zero extend).
+ uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
+ /// branch target.
+ uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getBranchTargetOpValue - Return the encoded value for an unconditional
+ /// branch target.
+ uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
+ /// of a MOVZ or MOVK instruction.
+ uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getVecShifterOpValue - Return the encoded value for the vector shifter.
+ uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMoveVecShifterOpValue - Return the encoded value for the vector move
+ /// shifter (MSL).
+ uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getFixedPointScaleOpValue - Return the encoded value for the
+ // FP-to-fixed-point scale factor.
+ uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getSIMDShift64OpValue - Return the encoded value for the
+ // shift-by-immediate AdvSIMD instructions.
+ uint32_t getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ uint32_t getSIMDShift64_32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ uint32_t getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- void EmitByte(unsigned char C, raw_ostream &OS) const {
- OS << (char)C;
- }
+ uint32_t getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
- void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
+ unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+
+ void EmitByte(unsigned char C, raw_ostream &OS) const { OS << (char)C; }
+
+ void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const {
// Output the constant in little endian byte order.
- for (unsigned i = 0; i != 4; ++i) {
- EmitByte(Val & 0xff, OS);
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, OS);
Val >>= 8;
}
}
-
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- template<int hasRs, int hasRt2> unsigned
- fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
-
- unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
- unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
+ unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+ template<int hasRs, int hasRt2> unsigned
+ fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
+ unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
};
} // end anonymous namespace
-unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
- unsigned FixupKind,
- SmallVectorImpl<MCFixup> &Fixups) const {
- if (!MO.isExpr()) {
- // This can occur for manually decoded or constructed MCInsts, but neither
- // the assembly-parser nor instruction selection will currently produce an
- // MCInst that's not a symbol reference.
- assert(MO.isImm() && "Unexpected address requested");
- return MO.getImm();
- }
+MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new AArch64MCCodeEmitter(MCII, STI, Ctx);
+}
- const MCExpr *Expr = MO.getExpr();
- MCFixupKind Kind = MCFixupKind(FixupKind);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned
+AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ if (MO.isReg())
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
- return 0;
+ assert(MO.isImm() && "did not expect relocated expression");
+ return static_cast<unsigned>(MO.getImm());
}
-unsigned AArch64MCCodeEmitter::
-getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- int MemSize) const {
- const MCOperand &ImmOp = MI.getOperand(OpIdx);
- if (ImmOp.isImm())
- return ImmOp.getImm();
-
- assert(ImmOp.isExpr() && "Unexpected operand type");
- const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
- unsigned FixupKind;
-
-
- switch (Expr->getKind()) {
- default: llvm_unreachable("Unexpected operand modifier");
- case AArch64MCExpr::VK_AARCH64_LO12: {
- static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
- AArch64::fixup_a64_ldst16_lo12,
- AArch64::fixup_a64_ldst32_lo12,
- AArch64::fixup_a64_ldst64_lo12,
- AArch64::fixup_a64_ldst128_lo12 };
- assert(MemSize <= 16 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_GOT_LO12:
- assert(MemSize == 8 && "Invalid fixup for operation");
- FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
- break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_dtprel_lo12,
- AArch64::fixup_a64_ldst16_dtprel_lo12,
- AArch64::fixup_a64_ldst32_dtprel_lo12,
- AArch64::fixup_a64_ldst64_dtprel_lo12
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
- AArch64::fixup_a64_ldst64_dtprel_lo12_nc
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
- assert(MemSize == 8 && "Invalid fixup for operation");
- FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
- break;
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_tprel_lo12,
- AArch64::fixup_a64_ldst16_tprel_lo12,
- AArch64::fixup_a64_ldst32_tprel_lo12,
- AArch64::fixup_a64_ldst64_tprel_lo12
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
- static const unsigned FixupsBySize[] = {
- AArch64::fixup_a64_ldst8_tprel_lo12_nc,
- AArch64::fixup_a64_ldst16_tprel_lo12_nc,
- AArch64::fixup_a64_ldst32_tprel_lo12_nc,
- AArch64::fixup_a64_ldst64_tprel_lo12_nc
- };
- assert(MemSize <= 8 && "Invalid fixup for operation");
- FixupKind = FixupsBySize[Log2_32(MemSize)];
- break;
- }
- case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
- assert(MemSize == 8 && "Invalid fixup for operation");
- FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
- break;
+template<unsigned FixupKind> uint32_t
+AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ uint32_t ImmVal = 0;
+
+ if (MO.isImm())
+ ImmVal = static_cast<uint32_t>(MO.getImm());
+ else {
+ assert(MO.isExpr() && "unable to encode load/store imm operand");
+ MCFixupKind Kind = MCFixupKind(FixupKind);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
+ ++MCNumFixups;
}
- return getAddressWithFixup(ImmOp, FixupKind, Fixups);
+ return ImmVal;
}
-unsigned
-AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+/// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
+/// target.
+uint32_t
+AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
-
- assert(MO.isExpr());
-
- unsigned FixupKind = 0;
- switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
- default: llvm_unreachable("Invalid expression modifier");
- case AArch64MCExpr::VK_AARCH64_LO12:
- FixupKind = AArch64::fixup_a64_add_lo12; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
- FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
- FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
- FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
- FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
- FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
- FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
- case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
- FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
- }
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
+ const MCExpr *Expr = MO.getExpr();
- return getAddressWithFixup(MO, FixupKind, Fixups);
-}
+ MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
+ ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
+ : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
-unsigned
-AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ MCNumFixups += 1;
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
+/// the 2-bit shift field. The shift field is stored in bits 13-14 of the
+/// return value.
+uint32_t
+AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Suboperands are [imm, shifter].
const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
+ "unexpected shift type for add/sub immediate");
+ unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
+ assert((ShiftVal == 0 || ShiftVal == 12) &&
+ "unexpected shift value for add/sub immediate");
if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
-
- assert(MO.isExpr());
+ return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << 12));
+ assert(MO.isExpr() && "Unable to encode MCOperand!");
+ const MCExpr *Expr = MO.getExpr();
- unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
- if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
- Modifier = Expr->getKind();
+ // Encode the 12 bits of the fixup.
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
- unsigned FixupKind = 0;
- switch(Modifier) {
- case AArch64MCExpr::VK_AARCH64_None:
- FixupKind = AArch64::fixup_a64_adr_prel_page;
- break;
- case AArch64MCExpr::VK_AARCH64_GOT:
- FixupKind = AArch64::fixup_a64_adr_prel_got_page;
- break;
- case AArch64MCExpr::VK_AARCH64_GOTTPREL:
- FixupKind = AArch64::fixup_a64_adr_gottprel_page;
- break;
- case AArch64MCExpr::VK_AARCH64_TLSDESC:
- FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
- break;
- default:
- llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
- }
+ ++MCNumFixups;
- return getAddressWithFixup(MO, FixupKind, Fixups);
+ return 0;
}
-unsigned
-AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
-
+/// getCondBranchTargetOpValue - Return the encoded value for a conditional
+/// branch target.
+uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Only immediate expected for shift");
- return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
-}
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
-unsigned
-AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
- const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Only immediate expected for shift");
+ ++MCNumFixups;
- return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
+ // All of the information is in the fixup.
+ return 0;
}
-unsigned AArch64MCCodeEmitter::getShiftRightImm8(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return 8 - MI.getOperand(Op).getImm();
-}
+/// getLoadLiteralOpValue - Return the encoded value for a load-literal
+/// pc-relative address.
+uint32_t
+AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
-unsigned AArch64MCCodeEmitter::getShiftRightImm16(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return 16 - MI.getOperand(Op).getImm();
-}
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected target type!");
-unsigned AArch64MCCodeEmitter::getShiftRightImm32(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return 32 - MI.getOperand(Op).getImm();
-}
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
-unsigned AArch64MCCodeEmitter::getShiftRightImm64(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return 64 - MI.getOperand(Op).getImm();
-}
+ ++MCNumFixups;
-unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return MI.getOperand(Op).getImm() - 8;
+ // All of the information is in the fixup.
+ return 0;
}
-unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return MI.getOperand(Op).getImm() - 16;
+uint32_t
+AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ unsigned SignExtend = MI.getOperand(OpIdx).getImm();
+ unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
+ return (SignExtend << 1) | DoShift;
}
-unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return MI.getOperand(Op).getImm() - 32;
-}
+uint32_t
+AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected movz/movk immediate");
+
+ Fixups.push_back(MCFixup::Create(
+ 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
-unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
- const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
- return MI.getOperand(Op).getImm() - 64;
+ ++MCNumFixups;
+
+ return 0;
}
-template<AArch64::Fixups fixupDesired> unsigned
-AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
- unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+/// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
+/// branch target.
+uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
- if (MO.isExpr())
- return getAddressWithFixup(MO, fixupDesired, Fixups);
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm())
+ return MO.getImm();
+ assert(MO.isExpr() && "Unexpected ADR target type!");
+
+ MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
+
+ ++MCNumFixups;
- assert(MO.isImm());
- return MO.getImm();
+ // All of the information is in the fixup.
+ return 0;
}
-unsigned
-AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
- unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+/// getBranchTargetOpValue - Return the encoded value for an unconditional
+/// branch target.
+uint32_t
+AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
+ // If the destination is an immediate, we have nothing to do.
if (MO.isImm())
return MO.getImm();
+ assert(MO.isExpr() && "Unexpected ADR target type!");
- assert(MO.isExpr());
+ MCFixupKind Kind = MI.getOpcode() == AArch64::BL
+ ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
+ : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), Kind, MI.getLoc()));
- unsigned FixupKind;
- if (isa<AArch64MCExpr>(MO.getExpr())) {
- assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
- == AArch64MCExpr::VK_AARCH64_GOTTPREL
- && "Invalid symbol modifier for literal load");
- FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
- } else {
- FixupKind = AArch64::fixup_a64_ld_prel;
- }
+ ++MCNumFixups;
- return getAddressWithFixup(MO, FixupKind, Fixups);
+ // All of the information is in the fixup.
+ return 0;
}
+/// getVecShifterOpValue - Return the encoded value for the vector shifter:
+///
+/// 00 -> 0
+/// 01 -> 8
+/// 10 -> 16
+/// 11 -> 24
+uint32_t
+AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
-unsigned
-AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
- const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
- if (MO.isReg()) {
- return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
- } else if (MO.isImm()) {
- return static_cast<unsigned>(MO.getImm());
+ switch (MO.getImm()) {
+ default:
+ break;
+ case 0:
+ return 0;
+ case 8:
+ return 1;
+ case 16:
+ return 2;
+ case 24:
+ return 3;
}
- llvm_unreachable("Unable to encode MCOperand!");
+ assert(false && "Invalid value for vector shift amount!");
return 0;
}
-unsigned
-AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
- const MCOperand &UImm16MO = MI.getOperand(OpIdx);
- const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
+uint32_t
+AArch64MCCodeEmitter::getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 64 - (MO.getImm());
+}
- unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
+uint32_t AArch64MCCodeEmitter::getSIMDShift64_32OpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 64 - (MO.getImm() | 32);
+}
- if (UImm16MO.isImm()) {
- Result |= UImm16MO.getImm();
- return Result;
- }
+uint32_t
+AArch64MCCodeEmitter::getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 32 - (MO.getImm() | 16);
+}
- const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
- AArch64::Fixups requestedFixup;
- switch (A64E->getKind()) {
- default: llvm_unreachable("unexpected expression modifier");
- case AArch64MCExpr::VK_AARCH64_ABS_G0:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G1:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G2:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
- case AArch64MCExpr::VK_AARCH64_ABS_G3:
- requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
- case AArch64MCExpr::VK_AARCH64_SABS_G0:
- requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
- case AArch64MCExpr::VK_AARCH64_SABS_G1:
- requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
- case AArch64MCExpr::VK_AARCH64_SABS_G2:
- requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
- requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G2:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G1:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G0:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
- case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
- requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
- }
+uint32_t
+AArch64MCCodeEmitter::getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the shift amount!");
+ return 16 - (MO.getImm() | 8);
+}
+
+/// getFixedPointScaleOpValue - Return the encoded value for the
+// FP-to-fixed-point scale factor.
+uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 64 - MO.getImm();
+}
- return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 64 - MO.getImm();
}
-template<int hasRs, int hasRt2> unsigned
-AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
- unsigned EncodedValue) const {
- if (!hasRs) EncodedValue |= 0x001F0000;
- if (!hasRt2) EncodedValue |= 0x00007C00;
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 32 - MO.getImm();
+}
- return EncodedValue;
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 16 - MO.getImm();
}
-unsigned
-AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
+uint32_t
+AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return 8 - MO.getImm();
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 64;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 32;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 16;
+}
+
+uint32_t
+AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Expected an immediate value for the scale amount!");
+ return MO.getImm() - 8;
+}
+
+/// getMoveVecShifterOpValue - Return the encoded value for the vector move
+/// shifter (MSL).
+uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() &&
+ "Expected an immediate value for the move shift amount!");
+ unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
+ assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
+ return ShiftVal == 8 ? 0 : 1;
+}
+
+unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
// If one of the signed fixup kinds is applied to a MOVZ instruction, the
// eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
// job to ensure that any bits possibly affected by this are 0. This means we
@@ -508,59 +585,66 @@ AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
switch (A64E->getKind()) {
- case AArch64MCExpr::VK_AARCH64_SABS_G0:
- case AArch64MCExpr::VK_AARCH64_SABS_G1:
- case AArch64MCExpr::VK_AARCH64_SABS_G2:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
- case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G2:
- case AArch64MCExpr::VK_AARCH64_TPREL_G1:
- case AArch64MCExpr::VK_AARCH64_TPREL_G0:
+ case AArch64MCExpr::VK_DTPREL_G2:
+ case AArch64MCExpr::VK_DTPREL_G1:
+ case AArch64MCExpr::VK_DTPREL_G0:
+ case AArch64MCExpr::VK_GOTTPREL_G1:
+ case AArch64MCExpr::VK_TPREL_G2:
+ case AArch64MCExpr::VK_TPREL_G1:
+ case AArch64MCExpr::VK_TPREL_G0:
return EncodedValue & ~(1u << 30);
default:
// Nothing to do for an unsigned fixup.
return EncodedValue;
}
- llvm_unreachable("Should have returned by now");
-}
-
-unsigned
-AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
- unsigned EncodedValue) const {
- // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
- // (i.e. all bits 1) but is ignored by the processor.
- EncodedValue |= 0x1f << 10;
- return EncodedValue;
-}
-MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new AArch64MCCodeEmitter(Ctx);
+ return EncodedValue & ~(1u << 30);
}
-void AArch64MCCodeEmitter::
-EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+void AArch64MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MI.getOpcode() == AArch64::TLSDESCCALL) {
// This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
// following (BLR) instruction. It doesn't emit any code itself so it
// doesn't go through the normal TableGenerated channels.
- MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
- const MCExpr *Expr;
- Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
- Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
+ MCFixupKind Fixup = MCFixupKind(AArch64::fixup_aarch64_tlsdesc_call);
+ Fixups.push_back(MCFixup::Create(0, MI.getOperand(0).getExpr(), Fixup));
return;
}
- uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
+ EmitConstant(Binary, 4, OS);
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+unsigned
+AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
+ // (i.e. all bits 1) but is ignored by the processor.
+ EncodedValue |= 0x1f << 10;
+ return EncodedValue;
+}
+
+template<int hasRs, int hasRt2> unsigned
+AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (!hasRs) EncodedValue |= 0x001F0000;
+ if (!hasRt2) EncodedValue |= 0x00007C00;
- EmitInstruction(Binary, OS);
+ return EncodedValue;
}
+unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
+ const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
+ // The Rm field of FCMP and friends is unused - it should be assembled
+ // as 0, but is ignored by the processor.
+ EncodedValue &= ~(0x1f << 16);
+ return EncodedValue;
+}
#include "AArch64GenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
index c1abfe7..42a6787 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -12,74 +12,92 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "aarch64mcexpr"
#include "AArch64MCExpr.h"
-#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Object/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-const AArch64MCExpr*
-AArch64MCExpr::Create(VariantKind Kind, const MCExpr *Expr,
- MCContext &Ctx) {
- return new (Ctx) AArch64MCExpr(Kind, Expr);
+#define DEBUG_TYPE "aarch64symbolrefexpr"
+
+const AArch64MCExpr *AArch64MCExpr::Create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx) {
+ return new (Ctx) AArch64MCExpr(Expr, Kind);
+}
+
+StringRef AArch64MCExpr::getVariantKindName() const {
+ switch (static_cast<uint32_t>(getKind())) {
+ case VK_CALL: return "";
+ case VK_LO12: return ":lo12:";
+ case VK_ABS_G3: return ":abs_g3:";
+ case VK_ABS_G2: return ":abs_g2:";
+ case VK_ABS_G2_S: return ":abs_g2_s:";
+ case VK_ABS_G2_NC: return ":abs_g2_nc:";
+ case VK_ABS_G1: return ":abs_g1:";
+ case VK_ABS_G1_S: return ":abs_g1_s:";
+ case VK_ABS_G1_NC: return ":abs_g1_nc:";
+ case VK_ABS_G0: return ":abs_g0:";
+ case VK_ABS_G0_S: return ":abs_g0_s:";
+ case VK_ABS_G0_NC: return ":abs_g0_nc:";
+ case VK_DTPREL_G2: return ":dtprel_g2:";
+ case VK_DTPREL_G1: return ":dtprel_g1:";
+ case VK_DTPREL_G1_NC: return ":dtprel_g1_nc:";
+ case VK_DTPREL_G0: return ":dtprel_g0:";
+ case VK_DTPREL_G0_NC: return ":dtprel_g0_nc:";
+ case VK_DTPREL_HI12: return ":dtprel_hi12:";
+ case VK_DTPREL_LO12: return ":dtprel_lo12:";
+ case VK_DTPREL_LO12_NC: return ":dtprel_lo12_nc:";
+ case VK_TPREL_G2: return ":tprel_g2:";
+ case VK_TPREL_G1: return ":tprel_g1:";
+ case VK_TPREL_G1_NC: return ":tprel_g1_nc:";
+ case VK_TPREL_G0: return ":tprel_g0:";
+ case VK_TPREL_G0_NC: return ":tprel_g0_nc:";
+ case VK_TPREL_HI12: return ":tprel_hi12:";
+ case VK_TPREL_LO12: return ":tprel_lo12:";
+ case VK_TPREL_LO12_NC: return ":tprel_lo12_nc:";
+ case VK_TLSDESC_LO12: return ":tlsdesc_lo12:";
+ case VK_ABS_PAGE: return "";
+ case VK_GOT_PAGE: return ":got:";
+ case VK_GOT_LO12: return ":got_lo12:";
+ case VK_GOTTPREL_PAGE: return ":gottprel:";
+ case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:";
+ case VK_GOTTPREL_G1: return ":gottprel_g1:";
+ case VK_GOTTPREL_G0_NC: return ":gottprel_g0_nc:";
+ case VK_TLSDESC: return "";
+ case VK_TLSDESC_PAGE: return ":tlsdesc:";
+ default:
+ llvm_unreachable("Invalid ELF symbol kind");
+ }
}
void AArch64MCExpr::PrintImpl(raw_ostream &OS) const {
- switch (Kind) {
- default: llvm_unreachable("Invalid kind!");
- case VK_AARCH64_GOT: OS << ":got:"; break;
- case VK_AARCH64_GOT_LO12: OS << ":got_lo12:"; break;
- case VK_AARCH64_LO12: OS << ":lo12:"; break;
- case VK_AARCH64_ABS_G0: OS << ":abs_g0:"; break;
- case VK_AARCH64_ABS_G0_NC: OS << ":abs_g0_nc:"; break;
- case VK_AARCH64_ABS_G1: OS << ":abs_g1:"; break;
- case VK_AARCH64_ABS_G1_NC: OS << ":abs_g1_nc:"; break;
- case VK_AARCH64_ABS_G2: OS << ":abs_g2:"; break;
- case VK_AARCH64_ABS_G2_NC: OS << ":abs_g2_nc:"; break;
- case VK_AARCH64_ABS_G3: OS << ":abs_g3:"; break;
- case VK_AARCH64_SABS_G0: OS << ":abs_g0_s:"; break;
- case VK_AARCH64_SABS_G1: OS << ":abs_g1_s:"; break;
- case VK_AARCH64_SABS_G2: OS << ":abs_g2_s:"; break;
- case VK_AARCH64_DTPREL_G2: OS << ":dtprel_g2:"; break;
- case VK_AARCH64_DTPREL_G1: OS << ":dtprel_g1:"; break;
- case VK_AARCH64_DTPREL_G1_NC: OS << ":dtprel_g1_nc:"; break;
- case VK_AARCH64_DTPREL_G0: OS << ":dtprel_g0:"; break;
- case VK_AARCH64_DTPREL_G0_NC: OS << ":dtprel_g0_nc:"; break;
- case VK_AARCH64_DTPREL_HI12: OS << ":dtprel_hi12:"; break;
- case VK_AARCH64_DTPREL_LO12: OS << ":dtprel_lo12:"; break;
- case VK_AARCH64_DTPREL_LO12_NC: OS << ":dtprel_lo12_nc:"; break;
- case VK_AARCH64_GOTTPREL_G1: OS << ":gottprel_g1:"; break;
- case VK_AARCH64_GOTTPREL_G0_NC: OS << ":gottprel_g0_nc:"; break;
- case VK_AARCH64_GOTTPREL: OS << ":gottprel:"; break;
- case VK_AARCH64_GOTTPREL_LO12: OS << ":gottprel_lo12:"; break;
- case VK_AARCH64_TPREL_G2: OS << ":tprel_g2:"; break;
- case VK_AARCH64_TPREL_G1: OS << ":tprel_g1:"; break;
- case VK_AARCH64_TPREL_G1_NC: OS << ":tprel_g1_nc:"; break;
- case VK_AARCH64_TPREL_G0: OS << ":tprel_g0:"; break;
- case VK_AARCH64_TPREL_G0_NC: OS << ":tprel_g0_nc:"; break;
- case VK_AARCH64_TPREL_HI12: OS << ":tprel_hi12:"; break;
- case VK_AARCH64_TPREL_LO12: OS << ":tprel_lo12:"; break;
- case VK_AARCH64_TPREL_LO12_NC: OS << ":tprel_lo12_nc:"; break;
- case VK_AARCH64_TLSDESC: OS << ":tlsdesc:"; break;
- case VK_AARCH64_TLSDESC_LO12: OS << ":tlsdesc_lo12:"; break;
+ if (getKind() != VK_NONE)
+ OS << getVariantKindName();
+ OS << *Expr;
+}
- }
+void AArch64MCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
- const MCExpr *Expr = getSubExpr();
- if (Expr->getKind() != MCExpr::SymbolRef)
- OS << '(';
- Expr->print(OS);
- if (Expr->getKind() != MCExpr::SymbolRef)
- OS << ')';
+const MCSection *AArch64MCExpr::FindAssociatedSection() const {
+ llvm_unreachable("FIXME: what goes here?");
}
-bool
-AArch64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const {
- return getSubExpr()->EvaluateAsRelocatable(Res, *Layout);
+bool AArch64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ if (!getSubExpr()->EvaluateAsRelocatable(Res, Layout))
+ return false;
+
+ Res =
+ MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind());
+
+ return true;
}
static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
@@ -113,66 +131,15 @@ static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
}
void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
- switch (getKind()) {
+ switch (getSymbolLoc(Kind)) {
default:
return;
- case VK_AARCH64_DTPREL_G2:
- case VK_AARCH64_DTPREL_G1:
- case VK_AARCH64_DTPREL_G1_NC:
- case VK_AARCH64_DTPREL_G0:
- case VK_AARCH64_DTPREL_G0_NC:
- case VK_AARCH64_DTPREL_HI12:
- case VK_AARCH64_DTPREL_LO12:
- case VK_AARCH64_DTPREL_LO12_NC:
- case VK_AARCH64_GOTTPREL_G1:
- case VK_AARCH64_GOTTPREL_G0_NC:
- case VK_AARCH64_GOTTPREL:
- case VK_AARCH64_GOTTPREL_LO12:
- case VK_AARCH64_TPREL_G2:
- case VK_AARCH64_TPREL_G1:
- case VK_AARCH64_TPREL_G1_NC:
- case VK_AARCH64_TPREL_G0:
- case VK_AARCH64_TPREL_G0_NC:
- case VK_AARCH64_TPREL_HI12:
- case VK_AARCH64_TPREL_LO12:
- case VK_AARCH64_TPREL_LO12_NC:
- case VK_AARCH64_TLSDESC:
- case VK_AARCH64_TLSDESC_LO12:
+ case VK_DTPREL:
+ case VK_GOTTPREL:
+ case VK_TPREL:
+ case VK_TLSDESC:
break;
}
fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
}
-
-// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
-// that method should be made public?
-// FIXME: really do above: now that two backends are using it.
-static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- llvm_unreachable("Can't handle nested target expr!");
- break;
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbolsImpl(BE->getLHS(), Asm);
- AddValueSymbolsImpl(BE->getRHS(), Asm);
- break;
- }
-
- case MCExpr::SymbolRef:
- Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
- break;
- }
-}
-
-void AArch64MCExpr::AddValueSymbols(MCAssembler *Asm) const {
- AddValueSymbolsImpl(getSubExpr(), Asm);
-}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
index d9798ae..5422f9d 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
@@ -1,4 +1,4 @@
-//==- AArch64MCExpr.h - AArch64 specific MC expression classes --*- C++ -*-===//
+//=--- AArch64MCExpr.h - AArch64 specific MC expression classes ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -12,168 +12,149 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64MCEXPR_H
-#define LLVM_AARCH64MCEXPR_H
+#ifndef LLVM_AArch64MCEXPR_H
+#define LLVM_AArch64MCEXPR_H
#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class AArch64MCExpr : public MCTargetExpr {
public:
enum VariantKind {
- VK_AARCH64_None,
- VK_AARCH64_GOT, // :got: modifier in assembly
- VK_AARCH64_GOT_LO12, // :got_lo12:
- VK_AARCH64_LO12, // :lo12:
-
- VK_AARCH64_ABS_G0, // :abs_g0:
- VK_AARCH64_ABS_G0_NC, // :abs_g0_nc:
- VK_AARCH64_ABS_G1,
- VK_AARCH64_ABS_G1_NC,
- VK_AARCH64_ABS_G2,
- VK_AARCH64_ABS_G2_NC,
- VK_AARCH64_ABS_G3,
-
- VK_AARCH64_SABS_G0, // :abs_g0_s:
- VK_AARCH64_SABS_G1,
- VK_AARCH64_SABS_G2,
-
- VK_AARCH64_DTPREL_G2, // :dtprel_g2:
- VK_AARCH64_DTPREL_G1,
- VK_AARCH64_DTPREL_G1_NC,
- VK_AARCH64_DTPREL_G0,
- VK_AARCH64_DTPREL_G0_NC,
- VK_AARCH64_DTPREL_HI12,
- VK_AARCH64_DTPREL_LO12,
- VK_AARCH64_DTPREL_LO12_NC,
-
- VK_AARCH64_GOTTPREL_G1, // :gottprel:
- VK_AARCH64_GOTTPREL_G0_NC,
- VK_AARCH64_GOTTPREL,
- VK_AARCH64_GOTTPREL_LO12,
-
- VK_AARCH64_TPREL_G2, // :tprel:
- VK_AARCH64_TPREL_G1,
- VK_AARCH64_TPREL_G1_NC,
- VK_AARCH64_TPREL_G0,
- VK_AARCH64_TPREL_G0_NC,
- VK_AARCH64_TPREL_HI12,
- VK_AARCH64_TPREL_LO12,
- VK_AARCH64_TPREL_LO12_NC,
-
- VK_AARCH64_TLSDESC, // :tlsdesc:
- VK_AARCH64_TLSDESC_LO12
+ VK_NONE = 0x000,
+
+ // Symbol locations specifying (roughly speaking) what calculation should be
+ // performed to construct the final address for the relocated
+ // symbol. E.g. direct, via the GOT, ...
+ VK_ABS = 0x001,
+ VK_SABS = 0x002,
+ VK_GOT = 0x003,
+ VK_DTPREL = 0x004,
+ VK_GOTTPREL = 0x005,
+ VK_TPREL = 0x006,
+ VK_TLSDESC = 0x007,
+ VK_SymLocBits = 0x00f,
+
+ // Variants specifying which part of the final address calculation is
+ // used. E.g. the low 12 bits for an ADD/LDR, the middle 16 bits for a
+ // MOVZ/MOVK.
+ VK_PAGE = 0x010,
+ VK_PAGEOFF = 0x020,
+ VK_HI12 = 0x030,
+ VK_G0 = 0x040,
+ VK_G1 = 0x050,
+ VK_G2 = 0x060,
+ VK_G3 = 0x070,
+ VK_AddressFragBits = 0x0f0,
+
+ // Whether the final relocation is a checked one (where a linker should
+ // perform a range-check on the final address) or not. Note that this field
+ // is unfortunately sometimes omitted from the assembly syntax. E.g. :lo12:
+ // on its own is a non-checked relocation. We side with ELF on being
+ // explicit about this!
+ VK_NC = 0x100,
+
+ // Convenience definitions for referring to specific textual representations
+ // of relocation specifiers. Note that this means the "_NC" is sometimes
+ // omitted in line with assembly syntax here (VK_LO12 rather than VK_LO12_NC
+ // since a user would write ":lo12:").
+ VK_CALL = VK_ABS,
+ VK_ABS_PAGE = VK_ABS | VK_PAGE,
+ VK_ABS_G3 = VK_ABS | VK_G3,
+ VK_ABS_G2 = VK_ABS | VK_G2,
+ VK_ABS_G2_S = VK_SABS | VK_G2,
+ VK_ABS_G2_NC = VK_ABS | VK_G2 | VK_NC,
+ VK_ABS_G1 = VK_ABS | VK_G1,
+ VK_ABS_G1_S = VK_SABS | VK_G1,
+ VK_ABS_G1_NC = VK_ABS | VK_G1 | VK_NC,
+ VK_ABS_G0 = VK_ABS | VK_G0,
+ VK_ABS_G0_S = VK_SABS | VK_G0,
+ VK_ABS_G0_NC = VK_ABS | VK_G0 | VK_NC,
+ VK_LO12 = VK_ABS | VK_PAGEOFF | VK_NC,
+ VK_GOT_LO12 = VK_GOT | VK_PAGEOFF | VK_NC,
+ VK_GOT_PAGE = VK_GOT | VK_PAGE,
+ VK_DTPREL_G2 = VK_DTPREL | VK_G2,
+ VK_DTPREL_G1 = VK_DTPREL | VK_G1,
+ VK_DTPREL_G1_NC = VK_DTPREL | VK_G1 | VK_NC,
+ VK_DTPREL_G0 = VK_DTPREL | VK_G0,
+ VK_DTPREL_G0_NC = VK_DTPREL | VK_G0 | VK_NC,
+ VK_DTPREL_HI12 = VK_DTPREL | VK_HI12,
+ VK_DTPREL_LO12 = VK_DTPREL | VK_PAGEOFF,
+ VK_DTPREL_LO12_NC = VK_DTPREL | VK_PAGEOFF | VK_NC,
+ VK_GOTTPREL_PAGE = VK_GOTTPREL | VK_PAGE,
+ VK_GOTTPREL_LO12_NC = VK_GOTTPREL | VK_PAGEOFF | VK_NC,
+ VK_GOTTPREL_G1 = VK_GOTTPREL | VK_G1,
+ VK_GOTTPREL_G0_NC = VK_GOTTPREL | VK_G0 | VK_NC,
+ VK_TPREL_G2 = VK_TPREL | VK_G2,
+ VK_TPREL_G1 = VK_TPREL | VK_G1,
+ VK_TPREL_G1_NC = VK_TPREL | VK_G1 | VK_NC,
+ VK_TPREL_G0 = VK_TPREL | VK_G0,
+ VK_TPREL_G0_NC = VK_TPREL | VK_G0 | VK_NC,
+ VK_TPREL_HI12 = VK_TPREL | VK_HI12,
+ VK_TPREL_LO12 = VK_TPREL | VK_PAGEOFF,
+ VK_TPREL_LO12_NC = VK_TPREL | VK_PAGEOFF | VK_NC,
+ VK_TLSDESC_LO12 = VK_TLSDESC | VK_PAGEOFF | VK_NC,
+ VK_TLSDESC_PAGE = VK_TLSDESC | VK_PAGE,
+
+ VK_INVALID = 0xfff
};
private:
- const VariantKind Kind;
const MCExpr *Expr;
+ const VariantKind Kind;
- explicit AArch64MCExpr(VariantKind _Kind, const MCExpr *_Expr)
- : Kind(_Kind), Expr(_Expr) {}
+ explicit AArch64MCExpr(const MCExpr *Expr, VariantKind Kind)
+ : Expr(Expr), Kind(Kind) {}
public:
/// @name Construction
/// @{
- static const AArch64MCExpr *Create(VariantKind Kind, const MCExpr *Expr,
- MCContext &Ctx);
-
- static const AArch64MCExpr *CreateLo12(const MCExpr *Expr, MCContext &Ctx) {
- return Create(VK_AARCH64_LO12, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOT(const MCExpr *Expr, MCContext &Ctx) {
- return Create(VK_AARCH64_GOT, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOTLo12(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_GOT_LO12, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateDTPREL_G1(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_DTPREL_G1, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateDTPREL_G0_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_DTPREL_G0_NC, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOTTPREL(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_GOTTPREL, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateGOTTPRELLo12(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_GOTTPREL_LO12, Expr, Ctx);
- }
-
- static const AArch64MCExpr *CreateTLSDesc(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TLSDESC, Expr, Ctx);
- }
+ static const AArch64MCExpr *Create(const MCExpr *Expr, VariantKind Kind,
+ MCContext &Ctx);
- static const AArch64MCExpr *CreateTLSDescLo12(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TLSDESC_LO12, Expr, Ctx);
- }
+ /// @}
+ /// @name Accessors
+ /// @{
- static const AArch64MCExpr *CreateTPREL_G1(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TPREL_G1, Expr, Ctx);
- }
+ /// Get the kind of this expression.
+ VariantKind getKind() const { return static_cast<VariantKind>(Kind); }
- static const AArch64MCExpr *CreateTPREL_G0_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_TPREL_G0_NC, Expr, Ctx);
- }
+ /// Get the expression this modifier applies to.
+ const MCExpr *getSubExpr() const { return Expr; }
- static const AArch64MCExpr *CreateABS_G3(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G3, Expr, Ctx);
- }
+ /// @}
+ /// @name VariantKind information extractors.
+ /// @{
- static const AArch64MCExpr *CreateABS_G2_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G2_NC, Expr, Ctx);
+ static VariantKind getSymbolLoc(VariantKind Kind) {
+ return static_cast<VariantKind>(Kind & VK_SymLocBits);
}
- static const AArch64MCExpr *CreateABS_G1_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G1_NC, Expr, Ctx);
+ static VariantKind getAddressFrag(VariantKind Kind) {
+ return static_cast<VariantKind>(Kind & VK_AddressFragBits);
}
- static const AArch64MCExpr *CreateABS_G0_NC(const MCExpr *Expr,
- MCContext &Ctx) {
- return Create(VK_AARCH64_ABS_G0_NC, Expr, Ctx);
- }
+ static bool isNotChecked(VariantKind Kind) { return Kind & VK_NC; }
/// @}
- /// @name Accessors
- /// @{
- /// getOpcode - Get the kind of this expression.
- VariantKind getKind() const { return Kind; }
+ /// Convert the variant kind into an ELF-appropriate modifier
+ /// (e.g. ":got:", ":lo12:").
+ StringRef getVariantKindName() const;
- /// getSubExpr - Get the child of this expression.
- const MCExpr *getSubExpr() const { return Expr; }
+ void PrintImpl(raw_ostream &OS) const override;
- /// @}
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+
+ const MCSection *FindAssociatedSection() const override;
- void PrintImpl(raw_ostream &OS) const;
bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const;
- void AddValueSymbols(MCAssembler *) const;
- const MCSection *FindAssociatedSection() const {
- return getSubExpr()->FindAssociatedSection();
- }
+ const MCAsmLayout *Layout) const override;
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const;
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 58fc95c..ae698c5 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions -------------===//
+//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,18 +15,15 @@
#include "AArch64ELFStreamer.h"
#include "AArch64MCAsmInfo.h"
#include "InstPrinter/AArch64InstPrinter.h"
-#include "llvm/ADT/APInt.h"
#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
-#define GET_REGINFO_MC_DESC
-#include "AArch64GenRegisterInfo.inc"
+using namespace llvm;
#define GET_INSTRINFO_MC_DESC
#include "AArch64GenInstrInfo.inc"
@@ -34,26 +31,29 @@
#define GET_SUBTARGETINFO_MC_DESC
#include "AArch64GenSubtargetInfo.inc"
-using namespace llvm;
+#define GET_REGINFO_MC_DESC
+#include "AArch64GenRegisterInfo.inc"
-MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT,
- StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitAArch64MCSubtargetInfo(X, TT, CPU, FS);
+static MCInstrInfo *createAArch64MCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitAArch64MCInstrInfo(X);
return X;
}
+static MCSubtargetInfo *
+createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) {
+ MCSubtargetInfo *X = new MCSubtargetInfo();
-static MCInstrInfo *createAArch64MCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitAArch64MCInstrInfo(X);
+ if (CPU.empty())
+ CPU = "generic";
+
+ InitAArch64MCSubtargetInfo(X, TT, CPU, FS);
return X;
}
static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
MCRegisterInfo *X = new MCRegisterInfo();
- InitAArch64MCRegisterInfo(X, AArch64::X30);
+ InitAArch64MCRegisterInfo(X, AArch64::LR);
return X;
}
@@ -61,9 +61,17 @@ static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
StringRef TT) {
Triple TheTriple(TT);
- MCAsmInfo *MAI = new AArch64ELFMCAsmInfo();
- unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true);
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0);
+ MCAsmInfo *MAI;
+ if (TheTriple.isOSDarwin())
+ MAI = new AArch64MCAsmInfoDarwin();
+ else {
+ assert(TheTriple.isOSBinFormatELF() && "Only expect Darwin or ELF");
+ MAI = new AArch64MCAsmInfoELF(TT);
+ }
+
+ // Initial state of the frame pointer is SP.
+ unsigned Reg = MRI.getDwarfRegNum(AArch64::SP, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -72,39 +80,35 @@ static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
static MCCodeGenInfo *createAArch64MCCodeGenInfo(StringRef TT, Reloc::Model RM,
CodeModel::Model CM,
CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC) {
- // On ELF platforms the default static relocation model has a smart enough
- // linker to cope with referencing external symbols defined in a shared
- // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
- RM = Reloc::Static;
- }
+ Triple TheTriple(TT);
+ assert((TheTriple.isOSBinFormatELF() || TheTriple.isOSBinFormatMachO()) &&
+ "Only expect Darwin and ELF targets");
if (CM == CodeModel::Default)
CM = CodeModel::Small;
- else if (CM == CodeModel::JITDefault) {
- // The default MCJIT memory managers make no guarantees about where they can
- // find an executable page; JITed code needs to be able to refer to globals
- // no matter how far away they are.
+ // The default MCJIT memory managers make no guarantees about where they can
+ // find an executable page; JITed code needs to be able to refer to globals
+ // no matter how far away they are.
+ else if (CM == CodeModel::JITDefault)
CM = CodeModel::Large;
- }
+ else if (CM != CodeModel::Small && CM != CodeModel::Large)
+ report_fatal_error(
+ "Only small and large code models are allowed on AArch64");
+
+ // AArch64 Darwin is always PIC.
+ if (TheTriple.isOSDarwin())
+ RM = Reloc::PIC_;
+ // On ELF platforms the default static relocation model has a smart enough
+ // linker to cope with referencing external symbols defined in a shared
+ // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
+ else if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC)
+ RM = Reloc::Static;
+ MCCodeGenInfo *X = new MCCodeGenInfo();
X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
-static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
- MCContext &Ctx, MCAsmBackend &MAB,
- raw_ostream &OS,
- MCCodeEmitter *Emitter,
- bool RelaxAll,
- bool NoExecStack) {
- Triple TheTriple(TT);
-
- return createAArch64ELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
-}
-
-
static MCInstPrinter *createAArch64MCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -113,89 +117,109 @@ static MCInstPrinter *createAArch64MCInstPrinter(const Target &T,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
return new AArch64InstPrinter(MAI, MII, MRI, STI);
- return 0;
-}
-
-namespace {
-
-class AArch64MCInstrAnalysis : public MCInstrAnalysis {
-public:
- AArch64MCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
-
- virtual bool isUnconditionalBranch(const MCInst &Inst) const {
- if (Inst.getOpcode() == AArch64::Bcc
- && Inst.getOperand(0).getImm() == A64CC::AL)
- return true;
- return MCInstrAnalysis::isUnconditionalBranch(Inst);
- }
-
- virtual bool isConditionalBranch(const MCInst &Inst) const {
- if (Inst.getOpcode() == AArch64::Bcc
- && Inst.getOperand(0).getImm() == A64CC::AL)
- return false;
- return MCInstrAnalysis::isConditionalBranch(Inst);
- }
-
- bool evaluateBranch(const MCInst &Inst, uint64_t Addr,
- uint64_t Size, uint64_t &Target) const {
- unsigned LblOperand = Inst.getOpcode() == AArch64::Bcc ? 1 : 0;
- // FIXME: We only handle PCRel branches for now.
- if (Info->get(Inst.getOpcode()).OpInfo[LblOperand].OperandType
- != MCOI::OPERAND_PCREL)
- return false;
-
- int64_t Imm = Inst.getOperand(LblOperand).getImm();
- Target = Addr + Imm;
- return true;
- }
-};
+ if (SyntaxVariant == 1)
+ return new AArch64AppleInstPrinter(MAI, MII, MRI, STI);
+ return nullptr;
}
-static MCInstrAnalysis *createAArch64MCInstrAnalysis(const MCInstrInfo *Info) {
- return new AArch64MCInstrAnalysis(Info);
-}
+static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
+ MCContext &Ctx, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI, bool RelaxAll,
+ bool NoExecStack) {
+ Triple TheTriple(TT);
+ if (TheTriple.isOSDarwin())
+ return createMachOStreamer(Ctx, TAB, OS, Emitter, RelaxAll,
+ /*LabelSections*/ true);
+ return createAArch64ELFStreamer(Ctx, TAB, OS, Emitter, RelaxAll, NoExecStack);
+}
+// Force static initialization.
extern "C" void LLVMInitializeAArch64TargetMC() {
// Register the MC asm info.
- RegisterMCAsmInfoFn A(TheAArch64Target, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn X(TheAArch64leTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn Y(TheAArch64beTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn Z(TheARM64leTarget, createAArch64MCAsmInfo);
+ RegisterMCAsmInfoFn W(TheARM64beTarget, createAArch64MCAsmInfo);
// Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheAArch64Target,
+ TargetRegistry::RegisterMCCodeGenInfo(TheAArch64leTarget,
+ createAArch64MCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheAArch64beTarget,
+ createAArch64MCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheARM64leTarget,
+ createAArch64MCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheARM64beTarget,
createAArch64MCCodeGenInfo);
// Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(TheAArch64Target,
+ TargetRegistry::RegisterMCInstrInfo(TheAArch64leTarget,
+ createAArch64MCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheAArch64beTarget,
+ createAArch64MCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheARM64leTarget,
+ createAArch64MCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheARM64beTarget,
createAArch64MCInstrInfo);
// Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(TheAArch64Target,
+ TargetRegistry::RegisterMCRegInfo(TheAArch64leTarget,
+ createAArch64MCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheAArch64beTarget,
+ createAArch64MCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheARM64leTarget,
+ createAArch64MCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheARM64beTarget,
createAArch64MCRegisterInfo);
// Register the MC subtarget info.
- using AArch64_MC::createAArch64MCSubtargetInfo;
- TargetRegistry::RegisterMCSubtargetInfo(TheAArch64Target,
+ TargetRegistry::RegisterMCSubtargetInfo(TheAArch64leTarget,
+ createAArch64MCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheAArch64beTarget,
+ createAArch64MCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheARM64leTarget,
+ createAArch64MCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheARM64beTarget,
createAArch64MCSubtargetInfo);
- // Register the MC instruction analyzer.
- TargetRegistry::RegisterMCInstrAnalysis(TheAArch64Target,
- createAArch64MCInstrAnalysis);
+ // Register the asm backend.
+ TargetRegistry::RegisterMCAsmBackend(TheAArch64leTarget,
+ createAArch64leAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheAArch64beTarget,
+ createAArch64beAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheARM64leTarget,
+ createAArch64leAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheARM64beTarget,
+ createAArch64beAsmBackend);
// Register the MC Code Emitter
- TargetRegistry::RegisterMCCodeEmitter(TheAArch64Target,
+ TargetRegistry::RegisterMCCodeEmitter(TheAArch64leTarget,
+ createAArch64MCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheAArch64beTarget,
+ createAArch64MCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheARM64leTarget,
+ createAArch64MCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheARM64beTarget,
createAArch64MCCodeEmitter);
-
- // Register the asm backend.
- TargetRegistry::RegisterMCAsmBackend(TheAArch64Target,
- createAArch64AsmBackend);
// Register the object streamer.
- TargetRegistry::RegisterMCObjectStreamer(TheAArch64Target,
+ TargetRegistry::RegisterMCObjectStreamer(TheAArch64leTarget,
createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheAArch64beTarget,
+ createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheARM64leTarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheARM64beTarget, createMCStreamer);
// Register the MCInstPrinter.
- TargetRegistry::RegisterMCInstPrinter(TheAArch64Target,
+ TargetRegistry::RegisterMCInstPrinter(TheAArch64leTarget,
+ createAArch64MCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheAArch64beTarget,
+ createAArch64MCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheARM64leTarget,
+ createAArch64MCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheARM64beTarget,
createAArch64MCInstPrinter);
}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index 670e657..d886ea2 100644
--- a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -11,41 +11,45 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64MCTARGETDESC_H
-#define LLVM_AARCH64MCTARGETDESC_H
+#ifndef AArch64MCTARGETDESC_H
+#define AArch64MCTARGETDESC_H
#include "llvm/Support/DataTypes.h"
+#include <string>
namespace llvm {
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
-class MCObjectWriter;
class MCRegisterInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class StringRef;
class Target;
class raw_ostream;
-extern Target TheAArch64Target;
-
-namespace AArch64_MC {
- MCSubtargetInfo *createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS);
-}
+extern Target TheAArch64leTarget;
+extern Target TheAArch64beTarget;
+extern Target TheARM64leTarget;
+extern Target TheARM64beTarget;
MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx);
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+MCAsmBackend *createAArch64leAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
+MCAsmBackend *createAArch64beAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
-MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI);
+MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS, uint8_t OSABI,
+ bool IsLittleEndian);
-MCAsmBackend *createAArch64AsmBackend(const Target &T,
- const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+MCObjectWriter *createAArch64MachObjectWriter(raw_ostream &OS, uint32_t CPUType,
+ uint32_t CPUSubtype);
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
new file mode 100644
index 0000000..ba95366
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
@@ -0,0 +1,396 @@
+//===-- AArch64MachObjectWriter.cpp - ARM Mach Object Writer --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
+using namespace llvm;
+
+namespace {
+class AArch64MachObjectWriter : public MCMachObjectTargetWriter {
+ bool getAArch64FixupKindMachOInfo(const MCFixup &Fixup, unsigned &RelocType,
+ const MCSymbolRefExpr *Sym,
+ unsigned &Log2Size, const MCAssembler &Asm);
+
+public:
+ AArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype)
+ : MCMachObjectTargetWriter(true /* is64Bit */, CPUType, CPUSubtype,
+ /*UseAggressiveSymbolFolding=*/true) {}
+
+ void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
+ const MCAsmLayout &Layout, const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) override;
+};
+}
+
+bool AArch64MachObjectWriter::getAArch64FixupKindMachOInfo(
+ const MCFixup &Fixup, unsigned &RelocType, const MCSymbolRefExpr *Sym,
+ unsigned &Log2Size, const MCAssembler &Asm) {
+ RelocType = unsigned(MachO::ARM64_RELOC_UNSIGNED);
+ Log2Size = ~0U;
+
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ return false;
+
+ case FK_Data_1:
+ Log2Size = llvm::Log2_32(1);
+ return true;
+ case FK_Data_2:
+ Log2Size = llvm::Log2_32(2);
+ return true;
+ case FK_Data_4:
+ Log2Size = llvm::Log2_32(4);
+ if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
+ RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
+ return true;
+ case FK_Data_8:
+ Log2Size = llvm::Log2_32(8);
+ if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
+ RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
+ return true;
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ Log2Size = llvm::Log2_32(4);
+ switch (Sym->getKind()) {
+ default:
+ llvm_unreachable("Unexpected symbol reference variant kind!");
+ case MCSymbolRefExpr::VK_PAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_PAGEOFF12);
+ return true;
+ case MCSymbolRefExpr::VK_GOTPAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12);
+ return true;
+ case MCSymbolRefExpr::VK_TLVPPAGEOFF:
+ RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12);
+ return true;
+ }
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ Log2Size = llvm::Log2_32(4);
+ // This encompasses the relocation for the whole 21-bit value.
+ switch (Sym->getKind()) {
+ default:
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "ADR/ADRP relocations must be GOT relative");
+ case MCSymbolRefExpr::VK_PAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_PAGE21);
+ return true;
+ case MCSymbolRefExpr::VK_GOTPAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGE21);
+ return true;
+ case MCSymbolRefExpr::VK_TLVPPAGE:
+ RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGE21);
+ return true;
+ }
+ return true;
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ Log2Size = llvm::Log2_32(4);
+ RelocType = unsigned(MachO::ARM64_RELOC_BRANCH26);
+ return true;
+ }
+}
+
+void AArch64MachObjectWriter::RecordRelocation(
+ MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
+
+ // See <reloc.h>.
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment);
+ unsigned Log2Size = 0;
+ int64_t Value = 0;
+ unsigned Index = 0;
+ unsigned IsExtern = 0;
+ unsigned Type = 0;
+ unsigned Kind = Fixup.getKind();
+
+ FixupOffset += Fixup.getOffset();
+
+ // AArch64 pcrel relocation addends do not include the section offset.
+ if (IsPCRel)
+ FixedValue += FixupOffset;
+
+ // ADRP fixups use relocations for the whole symbol value and only
+ // put the addend in the instruction itself. Clear out any value the
+ // generic code figured out from the sybmol definition.
+ if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
+ FixedValue = 0;
+
+ // imm19 relocations are for conditional branches, which require
+ // assembler local symbols. If we got here, that's not what we have,
+ // so complain loudly.
+ if (Kind == AArch64::fixup_aarch64_pcrel_branch19) {
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "conditional branch requires assembler-local"
+ " label. '" +
+ Target.getSymA()->getSymbol().getName() +
+ "' is external.");
+ return;
+ }
+
+ // 14-bit branch relocations should only target internal labels, and so
+ // should never get here.
+ if (Kind == AArch64::fixup_aarch64_pcrel_branch14) {
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "Invalid relocation on conditional branch!");
+ return;
+ }
+
+ if (!getAArch64FixupKindMachOInfo(Fixup, Type, Target.getSymA(), Log2Size,
+ Asm)) {
+ Asm.getContext().FatalError(Fixup.getLoc(), "unknown AArch64 fixup kind!");
+ return;
+ }
+
+ Value = Target.getConstant();
+
+ if (Target.isAbsolute()) { // constant
+ // FIXME: Should this always be extern?
+ // SymbolNum of 0 indicates the absolute section.
+ Type = MachO::ARM64_RELOC_UNSIGNED;
+ Index = 0;
+
+ if (IsPCRel) {
+ IsExtern = 1;
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "PC relative absolute relocation!");
+
+ // FIXME: x86_64 sets the type to a branch reloc here. Should we do
+ // something similar?
+ }
+ } else if (Target.getSymB()) { // A - B + constant
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ const MCSymbolData &A_SD = Asm.getSymbolData(*A);
+ const MCSymbolData *A_Base = Asm.getAtom(&A_SD);
+
+ const MCSymbol *B = &Target.getSymB()->getSymbol();
+ const MCSymbolData &B_SD = Asm.getSymbolData(*B);
+ const MCSymbolData *B_Base = Asm.getAtom(&B_SD);
+
+ // Check for "_foo@got - .", which comes through here as:
+ // Ltmp0:
+ // ... _foo@got - Ltmp0
+ if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_GOT &&
+ Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None &&
+ Layout.getSymbolOffset(&B_SD) ==
+ Layout.getFragmentOffset(Fragment) + Fixup.getOffset()) {
+ // SymB is the PC, so use a PC-rel pointer-to-GOT relocation.
+ Index = A_Base->getIndex();
+ IsExtern = 1;
+ Type = MachO::ARM64_RELOC_POINTER_TO_GOT;
+ IsPCRel = 1;
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+ return;
+ } else if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
+ Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None)
+ // Otherwise, neither symbol can be modified.
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation of modified symbol");
+
+ // We don't support PCrel relocations of differences.
+ if (IsPCRel)
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported pc-relative relocation of "
+ "difference");
+
+ // AArch64 always uses external relocations. If there is no symbol to use as
+ // a base address (a local symbol with no preceding non-local symbol),
+ // error out.
+ //
+ // FIXME: We should probably just synthesize an external symbol and use
+ // that.
+ if (!A_Base)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + A->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ if (!B_Base)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + B->getName() +
+ "'. Must have non-local symbol earlier in section.");
+
+ if (A_Base == B_Base && A_Base)
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation with identical base");
+
+ Value += (!A_SD.getFragment() ? 0
+ : Writer->getSymbolAddress(&A_SD, Layout)) -
+ (!A_Base || !A_Base->getFragment()
+ ? 0
+ : Writer->getSymbolAddress(A_Base, Layout));
+ Value -= (!B_SD.getFragment() ? 0
+ : Writer->getSymbolAddress(&B_SD, Layout)) -
+ (!B_Base || !B_Base->getFragment()
+ ? 0
+ : Writer->getSymbolAddress(B_Base, Layout));
+
+ Index = A_Base->getIndex();
+ IsExtern = 1;
+ Type = MachO::ARM64_RELOC_UNSIGNED;
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+
+ Index = B_Base->getIndex();
+ IsExtern = 1;
+ Type = MachO::ARM64_RELOC_SUBTRACTOR;
+ } else { // A + constant
+ const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
+ const MCSymbolData &SD = Asm.getSymbolData(*Symbol);
+ const MCSymbolData *Base = Asm.getAtom(&SD);
+ const MCSectionMachO &Section = static_cast<const MCSectionMachO &>(
+ Fragment->getParent()->getSection());
+
+ // If the symbol is a variable and we weren't able to get a Base for it
+ // (i.e., it's not in the symbol table associated with a section) resolve
+ // the relocation based its expansion instead.
+ if (Symbol->isVariable() && !Base) {
+ // If the evaluation is an absolute value, just use that directly
+ // to keep things easy.
+ int64_t Res;
+ if (SD.getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, Writer->getSectionAddressMap())) {
+ FixedValue = Res;
+ return;
+ }
+
+ // FIXME: Will the Target we already have ever have any data in it
+ // we need to preserve and merge with the new Target? How about
+ // the FixedValue?
+ if (!Symbol->getVariableValue()->EvaluateAsRelocatable(Target, &Layout))
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unable to resolve variable '" +
+ Symbol->getName() + "'");
+ return RecordRelocation(Writer, Asm, Layout, Fragment, Fixup, Target,
+ FixedValue);
+ }
+
+ // Relocations inside debug sections always use local relocations when
+ // possible. This seems to be done because the debugger doesn't fully
+ // understand relocation entries and expects to find values that
+ // have already been fixed up.
+ if (Symbol->isInSection()) {
+ if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
+ Base = nullptr;
+ }
+
+ // AArch64 uses external relocations as much as possible. For debug
+ // sections, and for pointer-sized relocations (.quad), we allow section
+ // relocations. It's code sections that run into trouble.
+ if (Base) {
+ Index = Base->getIndex();
+ IsExtern = 1;
+
+ // Add the local offset, if needed.
+ if (Base != &SD)
+ Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base);
+ } else if (Symbol->isInSection()) {
+ // Pointer-sized relocations can use a local relocation. Otherwise,
+ // we have to be in a debug info section.
+ if (!Section.hasAttribute(MachO::S_ATTR_DEBUG) && Log2Size != 3)
+ Asm.getContext().FatalError(
+ Fixup.getLoc(),
+ "unsupported relocation of local symbol '" + Symbol->getName() +
+ "'. Must have non-local symbol earlier in section.");
+ // Adjust the relocation to be section-relative.
+ // The index is the section ordinal (1-based).
+ const MCSectionData &SymSD =
+ Asm.getSectionData(SD.getSymbol().getSection());
+ Index = SymSD.getOrdinal() + 1;
+ IsExtern = 0;
+ Value += Writer->getSymbolAddress(&SD, Layout);
+
+ if (IsPCRel)
+ Value -= Writer->getFragmentAddress(Fragment, Layout) +
+ Fixup.getOffset() + (1ULL << Log2Size);
+ } else {
+ // Resolve constant variables.
+ if (SD.getSymbol().isVariable()) {
+ int64_t Res;
+ if (SD.getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, Writer->getSectionAddressMap())) {
+ FixedValue = Res;
+ return;
+ }
+ }
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation of variable '" +
+ Symbol->getName() + "'");
+ }
+ }
+
+ // If the relocation kind is Branch26, Page21, or Pageoff12, any addend
+ // is represented via an Addend relocation, not encoded directly into
+ // the instruction.
+ if ((Type == MachO::ARM64_RELOC_BRANCH26 ||
+ Type == MachO::ARM64_RELOC_PAGE21 ||
+ Type == MachO::ARM64_RELOC_PAGEOFF12) &&
+ Value) {
+ assert((Value & 0xff000000) == 0 && "Added relocation out of range!");
+
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+
+ // Now set up the Addend relocation.
+ Type = MachO::ARM64_RELOC_ADDEND;
+ Index = Value;
+ IsPCRel = 0;
+ Log2Size = 2;
+ IsExtern = 0;
+
+ // Put zero into the instruction itself. The addend is in the relocation.
+ Value = 0;
+ }
+
+ // If there's any addend left to handle, encode it in the instruction.
+ FixedValue = Value;
+
+ // struct relocation_info (8 bytes)
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = FixupOffset;
+ MRE.r_word1 = ((Index << 0) | (IsPCRel << 24) | (Log2Size << 25) |
+ (IsExtern << 27) | (Type << 28));
+ Writer->addRelocation(Fragment->getParent(), MRE);
+}
+
+MCObjectWriter *llvm::createAArch64MachObjectWriter(raw_ostream &OS,
+ uint32_t CPUType,
+ uint32_t CPUSubtype) {
+ return createMachObjectWriter(
+ new AArch64MachObjectWriter(CPUType, CPUSubtype), OS,
+ /*IsLittleEndian=*/true);
+}
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
new file mode 100644
index 0000000..dcc1a3c
--- /dev/null
+++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.cpp
@@ -0,0 +1,41 @@
+//===- AArch64TargetStreamer.cpp - AArch64TargetStreamer class --*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AArch64TargetStreamer class.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/ADT/MapVector.h"
+#include "llvm/MC/ConstantPools.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+
+using namespace llvm;
+
+//
+// AArch64TargetStreamer Implemenation
+//
+AArch64TargetStreamer::AArch64TargetStreamer(MCStreamer &S)
+ : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
+
+AArch64TargetStreamer::~AArch64TargetStreamer() {}
+
+// The constant pool handling is shared by all AArch64TargetStreamer
+// implementations.
+const MCExpr *AArch64TargetStreamer::addConstantPoolEntry(const MCExpr *Expr,
+ unsigned Size) {
+ return ConstantPools->addEntry(Streamer, Expr, Size);
+}
+
+void AArch64TargetStreamer::emitCurrentConstantPool() {
+ ConstantPools->emitForCurrentSection(Streamer);
+}
+
+// finish() - write out any non-empty assembler constant pools.
+void AArch64TargetStreamer::finish() { ConstantPools->emitAll(Streamer); }
diff --git a/contrib/llvm/lib/Target/AArch64/README.txt b/contrib/llvm/lib/Target/AArch64/README.txt
deleted file mode 100644
index 601990f..0000000
--- a/contrib/llvm/lib/Target/AArch64/README.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-This file will contain changes that need to be made before AArch64 can become an
-officially supported target. Currently a placeholder.
diff --git a/contrib/llvm/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp b/contrib/llvm/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
index 377b533..3a382c1 100644
--- a/contrib/llvm/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
@@ -1,4 +1,4 @@
-//===-- AArch64TargetInfo.cpp - AArch64 Target Implementation -------------===//
+//===-- AArch64TargetInfo.cpp - AArch64 Target Implementation -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,19 +6,26 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file contains the key registration step for the architecture.
-//
-//===----------------------------------------------------------------------===//
-#include "AArch64.h"
-#include "llvm/IR/Module.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-Target llvm::TheAArch64Target;
+namespace llvm {
+Target TheAArch64leTarget;
+Target TheAArch64beTarget;
+Target TheARM64leTarget;
+Target TheARM64beTarget;
+} // end namespace llvm
extern "C" void LLVMInitializeAArch64TargetInfo() {
- RegisterTarget<Triple::aarch64, /*HasJIT=*/true>
- X(TheAArch64Target, "aarch64", "AArch64 (ARM 64-bit target)");
+ RegisterTarget<Triple::arm64, /*HasJIT=*/true> X(TheARM64leTarget, "arm64",
+ "AArch64 (little endian)");
+ RegisterTarget<Triple::arm64_be, /*HasJIT=*/true> Y(TheARM64beTarget, "arm64_be",
+ "AArch64 (big endian)");
+
+ RegisterTarget<Triple::aarch64, /*HasJIT=*/true> Z(
+ TheAArch64leTarget, "aarch64", "AArch64 (little endian)");
+ RegisterTarget<Triple::aarch64_be, /*HasJIT=*/true> W(
+ TheAArch64beTarget, "aarch64_be", "AArch64 (big endian)");
}
diff --git a/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp b/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
index 2a97cd6..3c24bb3 100644
--- a/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
@@ -18,7 +18,7 @@
using namespace llvm;
-StringRef NamedImmMapper::toString(uint32_t Value, bool &Valid) const {
+StringRef AArch64NamedImmMapper::toString(uint32_t Value, bool &Valid) const {
for (unsigned i = 0; i < NumPairs; ++i) {
if (Pairs[i].Value == Value) {
Valid = true;
@@ -30,7 +30,7 @@ StringRef NamedImmMapper::toString(uint32_t Value, bool &Valid) const {
return StringRef();
}
-uint32_t NamedImmMapper::fromString(StringRef Name, bool &Valid) const {
+uint32_t AArch64NamedImmMapper::fromString(StringRef Name, bool &Valid) const {
std::string LowerCaseName = Name.lower();
for (unsigned i = 0; i < NumPairs; ++i) {
if (Pairs[i].Name == LowerCaseName) {
@@ -43,11 +43,11 @@ uint32_t NamedImmMapper::fromString(StringRef Name, bool &Valid) const {
return -1;
}
-bool NamedImmMapper::validImm(uint32_t Value) const {
+bool AArch64NamedImmMapper::validImm(uint32_t Value) const {
return Value < TooBigImm;
}
-const NamedImmMapper::Mapping A64AT::ATMapper::ATPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64AT::ATMapper::ATPairs[] = {
{"s1e1r", S1E1R},
{"s1e2r", S1E2R},
{"s1e3r", S1E3R},
@@ -62,10 +62,10 @@ const NamedImmMapper::Mapping A64AT::ATMapper::ATPairs[] = {
{"s12e0w", S12E0W},
};
-A64AT::ATMapper::ATMapper()
- : NamedImmMapper(ATPairs, 0) {}
+AArch64AT::ATMapper::ATMapper()
+ : AArch64NamedImmMapper(ATPairs, 0) {}
-const NamedImmMapper::Mapping A64DB::DBarrierMapper::DBarrierPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64DB::DBarrierMapper::DBarrierPairs[] = {
{"oshld", OSHLD},
{"oshst", OSHST},
{"osh", OSH},
@@ -80,10 +80,10 @@ const NamedImmMapper::Mapping A64DB::DBarrierMapper::DBarrierPairs[] = {
{"sy", SY}
};
-A64DB::DBarrierMapper::DBarrierMapper()
- : NamedImmMapper(DBarrierPairs, 16u) {}
+AArch64DB::DBarrierMapper::DBarrierMapper()
+ : AArch64NamedImmMapper(DBarrierPairs, 16u) {}
-const NamedImmMapper::Mapping A64DC::DCMapper::DCPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64DC::DCMapper::DCPairs[] = {
{"zva", ZVA},
{"ivac", IVAC},
{"isw", ISW},
@@ -94,26 +94,26 @@ const NamedImmMapper::Mapping A64DC::DCMapper::DCPairs[] = {
{"cisw", CISW}
};
-A64DC::DCMapper::DCMapper()
- : NamedImmMapper(DCPairs, 0) {}
+AArch64DC::DCMapper::DCMapper()
+ : AArch64NamedImmMapper(DCPairs, 0) {}
-const NamedImmMapper::Mapping A64IC::ICMapper::ICPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64IC::ICMapper::ICPairs[] = {
{"ialluis", IALLUIS},
{"iallu", IALLU},
{"ivau", IVAU}
};
-A64IC::ICMapper::ICMapper()
- : NamedImmMapper(ICPairs, 0) {}
+AArch64IC::ICMapper::ICMapper()
+ : AArch64NamedImmMapper(ICPairs, 0) {}
-const NamedImmMapper::Mapping A64ISB::ISBMapper::ISBPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64ISB::ISBMapper::ISBPairs[] = {
{"sy", SY},
};
-A64ISB::ISBMapper::ISBMapper()
- : NamedImmMapper(ISBPairs, 16) {}
+AArch64ISB::ISBMapper::ISBMapper()
+ : AArch64NamedImmMapper(ISBPairs, 16) {}
-const NamedImmMapper::Mapping A64PRFM::PRFMMapper::PRFMPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64PRFM::PRFMMapper::PRFMPairs[] = {
{"pldl1keep", PLDL1KEEP},
{"pldl1strm", PLDL1STRM},
{"pldl2keep", PLDL2KEEP},
@@ -134,19 +134,19 @@ const NamedImmMapper::Mapping A64PRFM::PRFMMapper::PRFMPairs[] = {
{"pstl3strm", PSTL3STRM}
};
-A64PRFM::PRFMMapper::PRFMMapper()
- : NamedImmMapper(PRFMPairs, 32) {}
+AArch64PRFM::PRFMMapper::PRFMMapper()
+ : AArch64NamedImmMapper(PRFMPairs, 32) {}
-const NamedImmMapper::Mapping A64PState::PStateMapper::PStatePairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64PState::PStateMapper::PStatePairs[] = {
{"spsel", SPSel},
{"daifset", DAIFSet},
{"daifclr", DAIFClr}
};
-A64PState::PStateMapper::PStateMapper()
- : NamedImmMapper(PStatePairs, 0) {}
+AArch64PState::PStateMapper::PStateMapper()
+ : AArch64NamedImmMapper(PStatePairs, 0) {}
-const NamedImmMapper::Mapping A64SysReg::MRSMapper::MRSPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64SysReg::MRSMapper::MRSPairs[] = {
{"mdccsr_el0", MDCCSR_EL0},
{"dbgdtrrx_el0", DBGDTRRX_EL0},
{"mdrar_el1", MDRAR_EL1},
@@ -176,16 +176,16 @@ const NamedImmMapper::Mapping A64SysReg::MRSMapper::MRSPairs[] = {
{"id_isar3_el1", ID_ISAR3_EL1},
{"id_isar4_el1", ID_ISAR4_EL1},
{"id_isar5_el1", ID_ISAR5_EL1},
- {"id_aa64pfr0_el1", ID_AA64PFR0_EL1},
- {"id_aa64pfr1_el1", ID_AA64PFR1_EL1},
- {"id_aa64dfr0_el1", ID_AA64DFR0_EL1},
- {"id_aa64dfr1_el1", ID_AA64DFR1_EL1},
- {"id_aa64afr0_el1", ID_AA64AFR0_EL1},
- {"id_aa64afr1_el1", ID_AA64AFR1_EL1},
- {"id_aa64isar0_el1", ID_AA64ISAR0_EL1},
- {"id_aa64isar1_el1", ID_AA64ISAR1_EL1},
- {"id_aa64mmfr0_el1", ID_AA64MMFR0_EL1},
- {"id_aa64mmfr1_el1", ID_AA64MMFR1_EL1},
+ {"id_aa64pfr0_el1", ID_A64PFR0_EL1},
+ {"id_aa64pfr1_el1", ID_A64PFR1_EL1},
+ {"id_aa64dfr0_el1", ID_A64DFR0_EL1},
+ {"id_aa64dfr1_el1", ID_A64DFR1_EL1},
+ {"id_aa64afr0_el1", ID_A64AFR0_EL1},
+ {"id_aa64afr1_el1", ID_A64AFR1_EL1},
+ {"id_aa64isar0_el1", ID_A64ISAR0_EL1},
+ {"id_aa64isar1_el1", ID_A64ISAR1_EL1},
+ {"id_aa64mmfr0_el1", ID_A64MMFR0_EL1},
+ {"id_aa64mmfr1_el1", ID_A64MMFR1_EL1},
{"mvfr0_el1", MVFR0_EL1},
{"mvfr1_el1", MVFR1_EL1},
{"mvfr2_el1", MVFR2_EL1},
@@ -245,12 +245,13 @@ const NamedImmMapper::Mapping A64SysReg::MRSMapper::MRSPairs[] = {
{"ich_elsr_el2", ICH_ELSR_EL2}
};
-A64SysReg::MRSMapper::MRSMapper() {
+AArch64SysReg::MRSMapper::MRSMapper(uint64_t FeatureBits)
+ : SysRegMapper(FeatureBits) {
InstPairs = &MRSPairs[0];
NumInstPairs = llvm::array_lengthof(MRSPairs);
}
-const NamedImmMapper::Mapping A64SysReg::MSRMapper::MSRPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64SysReg::MSRMapper::MSRPairs[] = {
{"dbgdtrtx_el0", DBGDTRTX_EL0},
{"oslar_el1", OSLAR_EL1},
{"pmswinc_el0", PMSWINC_EL0},
@@ -268,13 +269,14 @@ const NamedImmMapper::Mapping A64SysReg::MSRMapper::MSRPairs[] = {
{"icc_sgi0r_el1", ICC_SGI0R_EL1}
};
-A64SysReg::MSRMapper::MSRMapper() {
+AArch64SysReg::MSRMapper::MSRMapper(uint64_t FeatureBits)
+ : SysRegMapper(FeatureBits) {
InstPairs = &MSRPairs[0];
NumInstPairs = llvm::array_lengthof(MSRPairs);
}
-const NamedImmMapper::Mapping A64SysReg::SysRegMapper::SysRegPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64SysReg::SysRegMapper::SysRegPairs[] = {
{"osdtrrx_el1", OSDTRRX_EL1},
{"osdtrtx_el1", OSDTRTX_EL1},
{"teecr32_el1", TEECR32_EL1},
@@ -753,10 +755,16 @@ const NamedImmMapper::Mapping A64SysReg::SysRegMapper::SysRegPairs[] = {
{"ich_lr15_el2", ICH_LR15_EL2}
};
+const AArch64NamedImmMapper::Mapping
+AArch64SysReg::SysRegMapper::CycloneSysRegPairs[] = {
+ {"cpm_ioacc_ctl_el3", CPM_IOACC_CTL_EL3}
+};
+
uint32_t
-A64SysReg::SysRegMapper::fromString(StringRef Name, bool &Valid) const {
- // First search the registers shared by all
+AArch64SysReg::SysRegMapper::fromString(StringRef Name, bool &Valid) const {
std::string NameLower = Name.lower();
+
+ // First search the registers shared by all
for (unsigned i = 0; i < array_lengthof(SysRegPairs); ++i) {
if (SysRegPairs[i].Name == NameLower) {
Valid = true;
@@ -764,6 +772,16 @@ A64SysReg::SysRegMapper::fromString(StringRef Name, bool &Valid) const {
}
}
+ // Next search for target specific registers
+ if (FeatureBits & AArch64::ProcCyclone) {
+ for (unsigned i = 0; i < array_lengthof(CycloneSysRegPairs); ++i) {
+ if (CycloneSysRegPairs[i].Name == NameLower) {
+ Valid = true;
+ return CycloneSysRegPairs[i].Value;
+ }
+ }
+ }
+
// Now try the instruction-specific registers (either read-only or
// write-only).
for (unsigned i = 0; i < NumInstPairs; ++i) {
@@ -796,7 +814,8 @@ A64SysReg::SysRegMapper::fromString(StringRef Name, bool &Valid) const {
}
std::string
-A64SysReg::SysRegMapper::toString(uint32_t Bits, bool &Valid) const {
+AArch64SysReg::SysRegMapper::toString(uint32_t Bits, bool &Valid) const {
+ // First search the registers shared by all
for (unsigned i = 0; i < array_lengthof(SysRegPairs); ++i) {
if (SysRegPairs[i].Value == Bits) {
Valid = true;
@@ -804,6 +823,18 @@ A64SysReg::SysRegMapper::toString(uint32_t Bits, bool &Valid) const {
}
}
+ // Next search for target specific registers
+ if (FeatureBits & AArch64::ProcCyclone) {
+ for (unsigned i = 0; i < array_lengthof(CycloneSysRegPairs); ++i) {
+ if (CycloneSysRegPairs[i].Value == Bits) {
+ Valid = true;
+ return CycloneSysRegPairs[i].Name;
+ }
+ }
+ }
+
+ // Now try the instruction-specific registers (either read-only or
+ // write-only).
for (unsigned i = 0; i < NumInstPairs; ++i) {
if (InstPairs[i].Value == Bits) {
Valid = true;
@@ -831,7 +862,7 @@ A64SysReg::SysRegMapper::toString(uint32_t Bits, bool &Valid) const {
+ "_c" + utostr(CRm) + "_" + utostr(Op2);
}
-const NamedImmMapper::Mapping A64TLBI::TLBIMapper::TLBIPairs[] = {
+const AArch64NamedImmMapper::Mapping AArch64TLBI::TLBIMapper::TLBIPairs[] = {
{"ipas2e1is", IPAS2E1IS},
{"ipas2le1is", IPAS2LE1IS},
{"vmalle1is", VMALLE1IS},
@@ -866,308 +897,5 @@ const NamedImmMapper::Mapping A64TLBI::TLBIMapper::TLBIPairs[] = {
{"vaale1", VAALE1}
};
-A64TLBI::TLBIMapper::TLBIMapper()
- : NamedImmMapper(TLBIPairs, 0) {}
-
-bool A64Imms::isFPImm(const APFloat &Val, uint32_t &Imm8Bits) {
- const fltSemantics &Sem = Val.getSemantics();
- unsigned FracBits = APFloat::semanticsPrecision(Sem) - 1;
-
- uint32_t ExpMask;
- switch (FracBits) {
- case 10: // IEEE half-precision
- ExpMask = 0x1f;
- break;
- case 23: // IEEE single-precision
- ExpMask = 0xff;
- break;
- case 52: // IEEE double-precision
- ExpMask = 0x7ff;
- break;
- case 112: // IEEE quad-precision
- // No immediates are valid for double precision.
- return false;
- default:
- llvm_unreachable("Only half, single and double precision supported");
- }
-
- uint32_t ExpStart = FracBits;
- uint64_t FracMask = (1ULL << FracBits) - 1;
-
- uint32_t Sign = Val.isNegative();
-
- uint64_t Bits= Val.bitcastToAPInt().getLimitedValue();
- uint64_t Fraction = Bits & FracMask;
- int32_t Exponent = ((Bits >> ExpStart) & ExpMask);
- Exponent -= ExpMask >> 1;
-
- // S[d] = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>, 5):imm8<5:0>:Zeros(19)
- // D[d] = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>, 8):imm8<5:0>:Zeros(48)
- // This translates to: only 4 bits of fraction; -3 <= exp <= 4.
- uint64_t A64FracStart = FracBits - 4;
- uint64_t A64FracMask = 0xf;
-
- // Are there too many fraction bits?
- if (Fraction & ~(A64FracMask << A64FracStart))
- return false;
-
- if (Exponent < -3 || Exponent > 4)
- return false;
-
- uint32_t PackedFraction = (Fraction >> A64FracStart) & A64FracMask;
- uint32_t PackedExp = (Exponent + 7) & 0x7;
-
- Imm8Bits = (Sign << 7) | (PackedExp << 4) | PackedFraction;
- return true;
-}
-
-// Encoding of the immediate for logical (immediate) instructions:
-//
-// | N | imms | immr | size | R | S |
-// |---+--------+--------+------+--------------+--------------|
-// | 1 | ssssss | rrrrrr | 64 | UInt(rrrrrr) | UInt(ssssss) |
-// | 0 | 0sssss | xrrrrr | 32 | UInt(rrrrr) | UInt(sssss) |
-// | 0 | 10ssss | xxrrrr | 16 | UInt(rrrr) | UInt(ssss) |
-// | 0 | 110sss | xxxrrr | 8 | UInt(rrr) | UInt(sss) |
-// | 0 | 1110ss | xxxxrr | 4 | UInt(rr) | UInt(ss) |
-// | 0 | 11110s | xxxxxr | 2 | UInt(r) | UInt(s) |
-// | 0 | 11111x | - | | UNALLOCATED | |
-//
-// Columns 'R', 'S' and 'size' specify a "bitmask immediate" of size bits in
-// which the lower S+1 bits are ones and the remaining bits are zero, then
-// rotated right by R bits, which is then replicated across the datapath.
-//
-// + Values of 'N', 'imms' and 'immr' which do not match the above table are
-// RESERVED.
-// + If all 's' bits in the imms field are set then the instruction is
-// RESERVED.
-// + The 'x' bits in the 'immr' field are IGNORED.
-
-bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) {
- int RepeatWidth;
- int Rotation = 0;
- int Num1s = 0;
-
- // Because there are S+1 ones in the replicated mask, an immediate of all
- // zeros is not allowed. Filtering it here is probably more efficient.
- if (Imm == 0) return false;
-
- for (RepeatWidth = RegWidth; RepeatWidth > 1; RepeatWidth /= 2) {
- uint64_t RepeatMask = RepeatWidth == 64 ? -1 : (1ULL << RepeatWidth) - 1;
- uint64_t ReplicatedMask = Imm & RepeatMask;
-
- if (ReplicatedMask == 0) continue;
-
- // First we have to make sure the mask is actually repeated in each slot for
- // this width-specifier.
- bool IsReplicatedMask = true;
- for (unsigned i = RepeatWidth; i < RegWidth; i += RepeatWidth) {
- if (((Imm >> i) & RepeatMask) != ReplicatedMask) {
- IsReplicatedMask = false;
- break;
- }
- }
- if (!IsReplicatedMask) continue;
-
- // Now we have to work out the amount of rotation needed. The first part of
- // this calculation is actually independent of RepeatWidth, but the complex
- // case will depend on it.
- Rotation = countTrailingZeros(Imm);
- if (Rotation == 0) {
- // There were no leading zeros, which means it's either in place or there
- // are 1s at each end (e.g. 0x8003 needs rotating).
- Rotation = RegWidth == 64 ? CountLeadingOnes_64(Imm)
- : CountLeadingOnes_32(Imm);
- Rotation = RepeatWidth - Rotation;
- }
-
- uint64_t ReplicatedOnes = ReplicatedMask;
- if (Rotation != 0 && Rotation != 64)
- ReplicatedOnes = (ReplicatedMask >> Rotation)
- | ((ReplicatedMask << (RepeatWidth - Rotation)) & RepeatMask);
-
- // Of course, they may not actually be ones, so we have to check that:
- if (!isMask_64(ReplicatedOnes))
- continue;
-
- Num1s = CountTrailingOnes_64(ReplicatedOnes);
-
- // We know we've got an almost valid encoding (certainly, if this is invalid
- // no other parameters would work).
- break;
- }
-
- // The encodings which would produce all 1s are RESERVED.
- if (RepeatWidth == 1 || Num1s == RepeatWidth) return false;
-
- uint32_t N = RepeatWidth == 64;
- uint32_t ImmR = RepeatWidth - Rotation;
- uint32_t ImmS = Num1s - 1;
-
- switch (RepeatWidth) {
- default: break; // No action required for other valid rotations.
- case 16: ImmS |= 0x20; break; // 10ssss
- case 8: ImmS |= 0x30; break; // 110sss
- case 4: ImmS |= 0x38; break; // 1110ss
- case 2: ImmS |= 0x3c; break; // 11110s
- }
-
- Bits = ImmS | (ImmR << 6) | (N << 12);
-
- return true;
-}
-
-
-bool A64Imms::isLogicalImmBits(unsigned RegWidth, uint32_t Bits,
- uint64_t &Imm) {
- uint32_t N = Bits >> 12;
- uint32_t ImmR = (Bits >> 6) & 0x3f;
- uint32_t ImmS = Bits & 0x3f;
-
- // N=1 encodes a 64-bit replication and is invalid for the 32-bit
- // instructions.
- if (RegWidth == 32 && N != 0) return false;
-
- int Width = 0;
- if (N == 1)
- Width = 64;
- else if ((ImmS & 0x20) == 0)
- Width = 32;
- else if ((ImmS & 0x10) == 0)
- Width = 16;
- else if ((ImmS & 0x08) == 0)
- Width = 8;
- else if ((ImmS & 0x04) == 0)
- Width = 4;
- else if ((ImmS & 0x02) == 0)
- Width = 2;
- else {
- // ImmS is 0b11111x: UNALLOCATED
- return false;
- }
-
- int Num1s = (ImmS & (Width - 1)) + 1;
-
- // All encodings which would map to -1 (signed) are RESERVED.
- if (Num1s == Width) return false;
-
- int Rotation = (ImmR & (Width - 1));
- uint64_t Mask = (1ULL << Num1s) - 1;
- uint64_t WidthMask = Width == 64 ? -1 : (1ULL << Width) - 1;
- if (Rotation != 0 && Rotation != 64)
- Mask = (Mask >> Rotation)
- | ((Mask << (Width - Rotation)) & WidthMask);
-
- Imm = Mask;
- for (unsigned i = 1; i < RegWidth / Width; ++i) {
- Mask <<= Width;
- Imm |= Mask;
- }
-
- return true;
-}
-
-bool A64Imms::isMOVZImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift) {
- // If high bits are set then a 32-bit MOVZ can't possibly work.
- if (RegWidth == 32 && (Value & ~0xffffffffULL))
- return false;
-
- for (int i = 0; i < RegWidth; i += 16) {
- // If the value is 0 when we mask out all the bits that could be set with
- // the current LSL value then it's representable.
- if ((Value & ~(0xffffULL << i)) == 0) {
- Shift = i / 16;
- UImm16 = (Value >> i) & 0xffff;
- return true;
- }
- }
- return false;
-}
-
-bool A64Imms::isMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift) {
- // MOVN is defined to set its register to NOT(LSL(imm16, shift)).
-
- // We have to be a little careful about a 32-bit register: 0xffff_1234 *is*
- // representable, but ~0xffff_1234 == 0xffff_ffff_0000_edcb which is not
- // a valid input for isMOVZImm.
- if (RegWidth == 32 && (Value & ~0xffffffffULL))
- return false;
-
- uint64_t MOVZEquivalent = RegWidth == 32 ? ~Value & 0xffffffff : ~Value;
-
- return isMOVZImm(RegWidth, MOVZEquivalent, UImm16, Shift);
-}
-
-bool A64Imms::isOnlyMOVNImm(int RegWidth, uint64_t Value,
- int &UImm16, int &Shift) {
- if (isMOVZImm(RegWidth, Value, UImm16, Shift))
- return false;
-
- return isMOVNImm(RegWidth, Value, UImm16, Shift);
-}
-
-// decodeNeonModShiftImm - Decode a Neon OpCmode value into the
-// the shift amount and the shift type (shift zeros or ones in) and
-// returns whether the OpCmode value implies a shift operation.
-bool A64Imms::decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm,
- unsigned &ShiftOnesIn) {
- ShiftImm = 0;
- ShiftOnesIn = false;
- bool HasShift = true;
-
- if (OpCmode == 0xe) {
- // movi byte
- HasShift = false;
- } else if (OpCmode == 0x1e) {
- // movi 64-bit bytemask
- HasShift = false;
- } else if ((OpCmode & 0xc) == 0x8) {
- // shift zeros, per halfword
- ShiftImm = ((OpCmode & 0x2) >> 1);
- } else if ((OpCmode & 0x8) == 0) {
- // shift zeros, per word
- ShiftImm = ((OpCmode & 0x6) >> 1);
- } else if ((OpCmode & 0xe) == 0xc) {
- // shift ones, per word
- ShiftOnesIn = true;
- ShiftImm = (OpCmode & 0x1);
- } else {
- // per byte, per bytemask
- llvm_unreachable("Unsupported Neon modified immediate");
- }
-
- return HasShift;
-}
-
-// decodeNeonModImm - Decode a NEON modified immediate and OpCmode values
-// into the element value and the element size in bits.
-uint64_t A64Imms::decodeNeonModImm(unsigned Val, unsigned OpCmode,
- unsigned &EltBits) {
- uint64_t DecodedVal = Val;
- EltBits = 0;
-
- if (OpCmode == 0xe) {
- // movi byte
- EltBits = 8;
- } else if (OpCmode == 0x1e) {
- // movi 64-bit bytemask
- DecodedVal = 0;
- for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
- if ((Val >> ByteNum) & 1)
- DecodedVal |= (uint64_t)0xff << (8 * ByteNum);
- }
- EltBits = 64;
- } else if ((OpCmode & 0xc) == 0x8) {
- // shift zeros, per halfword
- EltBits = 16;
- } else if ((OpCmode & 0x8) == 0) {
- // shift zeros, per word
- EltBits = 32;
- } else if ((OpCmode & 0xe) == 0xc) {
- // shift ones, per word
- EltBits = 32;
- } else {
- llvm_unreachable("Unsupported Neon modified immediate");
- }
- return DecodedVal;
-}
+AArch64TLBI::TLBIMapper::TLBIMapper()
+ : AArch64NamedImmMapper(TLBIPairs, 0) {}
diff --git a/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index ce970b0..9d2ce21 100644
--- a/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -1,4 +1,4 @@
-//===-- AArch64BaseInfo.h - Top level definitions for AArch64- --*- C++ -*-===//
+//===-- AArch64BaseInfo.h - Top level definitions for AArch64 ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,95 +14,256 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_AARCH64_BASEINFO_H
-#define LLVM_AARCH64_BASEINFO_H
+#ifndef AArch64BASEINFO_H
+#define AArch64BASEINFO_H
-#include "llvm/ADT/StringSwitch.h"
+// FIXME: Is it easiest to fix this layering violation by moving the .inc
+// #includes from AArch64MCTargetDesc.h to here?
+#include "MCTargetDesc/AArch64MCTargetDesc.h" // For AArch64::X0 and friends.
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
-// // Enums corresponding to AArch64 condition codes
-namespace A64CC {
- // The CondCodes constants map directly to the 4-bit encoding of the
- // condition field for predicated instructions.
- enum CondCodes { // Meaning (integer) Meaning (floating-point)
- EQ = 0, // Equal Equal
- NE, // Not equal Not equal, or unordered
- HS, // Unsigned higher or same >, ==, or unordered
- LO, // Unsigned lower or same Less than
- MI, // Minus, negative Less than
- PL, // Plus, positive or zero >, ==, or unordered
- VS, // Overflow Unordered
- VC, // No overflow Ordered
- HI, // Unsigned higher Greater than, or unordered
- LS, // Unsigned lower or same Less than or equal
- GE, // Greater than or equal Greater than or equal
- LT, // Less than Less than, or unordered
- GT, // Signed greater than Greater than
- LE, // Signed less than or equal <, ==, or unordered
- AL, // Always (unconditional) Always (unconditional)
- NV, // Always (unconditional) Always (unconditional)
- // Note the NV exists purely to disassemble 0b1111. Execution
- // is "always".
- Invalid
- };
+inline static unsigned getWRegFromXReg(unsigned Reg) {
+ switch (Reg) {
+ case AArch64::X0: return AArch64::W0;
+ case AArch64::X1: return AArch64::W1;
+ case AArch64::X2: return AArch64::W2;
+ case AArch64::X3: return AArch64::W3;
+ case AArch64::X4: return AArch64::W4;
+ case AArch64::X5: return AArch64::W5;
+ case AArch64::X6: return AArch64::W6;
+ case AArch64::X7: return AArch64::W7;
+ case AArch64::X8: return AArch64::W8;
+ case AArch64::X9: return AArch64::W9;
+ case AArch64::X10: return AArch64::W10;
+ case AArch64::X11: return AArch64::W11;
+ case AArch64::X12: return AArch64::W12;
+ case AArch64::X13: return AArch64::W13;
+ case AArch64::X14: return AArch64::W14;
+ case AArch64::X15: return AArch64::W15;
+ case AArch64::X16: return AArch64::W16;
+ case AArch64::X17: return AArch64::W17;
+ case AArch64::X18: return AArch64::W18;
+ case AArch64::X19: return AArch64::W19;
+ case AArch64::X20: return AArch64::W20;
+ case AArch64::X21: return AArch64::W21;
+ case AArch64::X22: return AArch64::W22;
+ case AArch64::X23: return AArch64::W23;
+ case AArch64::X24: return AArch64::W24;
+ case AArch64::X25: return AArch64::W25;
+ case AArch64::X26: return AArch64::W26;
+ case AArch64::X27: return AArch64::W27;
+ case AArch64::X28: return AArch64::W28;
+ case AArch64::FP: return AArch64::W29;
+ case AArch64::LR: return AArch64::W30;
+ case AArch64::SP: return AArch64::WSP;
+ case AArch64::XZR: return AArch64::WZR;
+ }
+ // For anything else, return it unchanged.
+ return Reg;
+}
-} // namespace A64CC
+inline static unsigned getXRegFromWReg(unsigned Reg) {
+ switch (Reg) {
+ case AArch64::W0: return AArch64::X0;
+ case AArch64::W1: return AArch64::X1;
+ case AArch64::W2: return AArch64::X2;
+ case AArch64::W3: return AArch64::X3;
+ case AArch64::W4: return AArch64::X4;
+ case AArch64::W5: return AArch64::X5;
+ case AArch64::W6: return AArch64::X6;
+ case AArch64::W7: return AArch64::X7;
+ case AArch64::W8: return AArch64::X8;
+ case AArch64::W9: return AArch64::X9;
+ case AArch64::W10: return AArch64::X10;
+ case AArch64::W11: return AArch64::X11;
+ case AArch64::W12: return AArch64::X12;
+ case AArch64::W13: return AArch64::X13;
+ case AArch64::W14: return AArch64::X14;
+ case AArch64::W15: return AArch64::X15;
+ case AArch64::W16: return AArch64::X16;
+ case AArch64::W17: return AArch64::X17;
+ case AArch64::W18: return AArch64::X18;
+ case AArch64::W19: return AArch64::X19;
+ case AArch64::W20: return AArch64::X20;
+ case AArch64::W21: return AArch64::X21;
+ case AArch64::W22: return AArch64::X22;
+ case AArch64::W23: return AArch64::X23;
+ case AArch64::W24: return AArch64::X24;
+ case AArch64::W25: return AArch64::X25;
+ case AArch64::W26: return AArch64::X26;
+ case AArch64::W27: return AArch64::X27;
+ case AArch64::W28: return AArch64::X28;
+ case AArch64::W29: return AArch64::FP;
+ case AArch64::W30: return AArch64::LR;
+ case AArch64::WSP: return AArch64::SP;
+ case AArch64::WZR: return AArch64::XZR;
+ }
+ // For anything else, return it unchanged.
+ return Reg;
+}
-inline static const char *A64CondCodeToString(A64CC::CondCodes CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case A64CC::EQ: return "eq";
- case A64CC::NE: return "ne";
- case A64CC::HS: return "hs";
- case A64CC::LO: return "lo";
- case A64CC::MI: return "mi";
- case A64CC::PL: return "pl";
- case A64CC::VS: return "vs";
- case A64CC::VC: return "vc";
- case A64CC::HI: return "hi";
- case A64CC::LS: return "ls";
- case A64CC::GE: return "ge";
- case A64CC::LT: return "lt";
- case A64CC::GT: return "gt";
- case A64CC::LE: return "le";
- case A64CC::AL: return "al";
- case A64CC::NV: return "nv";
+static inline unsigned getBRegFromDReg(unsigned Reg) {
+ switch (Reg) {
+ case AArch64::D0: return AArch64::B0;
+ case AArch64::D1: return AArch64::B1;
+ case AArch64::D2: return AArch64::B2;
+ case AArch64::D3: return AArch64::B3;
+ case AArch64::D4: return AArch64::B4;
+ case AArch64::D5: return AArch64::B5;
+ case AArch64::D6: return AArch64::B6;
+ case AArch64::D7: return AArch64::B7;
+ case AArch64::D8: return AArch64::B8;
+ case AArch64::D9: return AArch64::B9;
+ case AArch64::D10: return AArch64::B10;
+ case AArch64::D11: return AArch64::B11;
+ case AArch64::D12: return AArch64::B12;
+ case AArch64::D13: return AArch64::B13;
+ case AArch64::D14: return AArch64::B14;
+ case AArch64::D15: return AArch64::B15;
+ case AArch64::D16: return AArch64::B16;
+ case AArch64::D17: return AArch64::B17;
+ case AArch64::D18: return AArch64::B18;
+ case AArch64::D19: return AArch64::B19;
+ case AArch64::D20: return AArch64::B20;
+ case AArch64::D21: return AArch64::B21;
+ case AArch64::D22: return AArch64::B22;
+ case AArch64::D23: return AArch64::B23;
+ case AArch64::D24: return AArch64::B24;
+ case AArch64::D25: return AArch64::B25;
+ case AArch64::D26: return AArch64::B26;
+ case AArch64::D27: return AArch64::B27;
+ case AArch64::D28: return AArch64::B28;
+ case AArch64::D29: return AArch64::B29;
+ case AArch64::D30: return AArch64::B30;
+ case AArch64::D31: return AArch64::B31;
}
+ // For anything else, return it unchanged.
+ return Reg;
}
-inline static A64CC::CondCodes A64StringToCondCode(StringRef CondStr) {
- return StringSwitch<A64CC::CondCodes>(CondStr.lower())
- .Case("eq", A64CC::EQ)
- .Case("ne", A64CC::NE)
- .Case("ne", A64CC::NE)
- .Case("hs", A64CC::HS)
- .Case("cs", A64CC::HS)
- .Case("lo", A64CC::LO)
- .Case("cc", A64CC::LO)
- .Case("mi", A64CC::MI)
- .Case("pl", A64CC::PL)
- .Case("vs", A64CC::VS)
- .Case("vc", A64CC::VC)
- .Case("hi", A64CC::HI)
- .Case("ls", A64CC::LS)
- .Case("ge", A64CC::GE)
- .Case("lt", A64CC::LT)
- .Case("gt", A64CC::GT)
- .Case("le", A64CC::LE)
- .Case("al", A64CC::AL)
- .Case("nv", A64CC::NV)
- .Default(A64CC::Invalid);
+
+static inline unsigned getDRegFromBReg(unsigned Reg) {
+ switch (Reg) {
+ case AArch64::B0: return AArch64::D0;
+ case AArch64::B1: return AArch64::D1;
+ case AArch64::B2: return AArch64::D2;
+ case AArch64::B3: return AArch64::D3;
+ case AArch64::B4: return AArch64::D4;
+ case AArch64::B5: return AArch64::D5;
+ case AArch64::B6: return AArch64::D6;
+ case AArch64::B7: return AArch64::D7;
+ case AArch64::B8: return AArch64::D8;
+ case AArch64::B9: return AArch64::D9;
+ case AArch64::B10: return AArch64::D10;
+ case AArch64::B11: return AArch64::D11;
+ case AArch64::B12: return AArch64::D12;
+ case AArch64::B13: return AArch64::D13;
+ case AArch64::B14: return AArch64::D14;
+ case AArch64::B15: return AArch64::D15;
+ case AArch64::B16: return AArch64::D16;
+ case AArch64::B17: return AArch64::D17;
+ case AArch64::B18: return AArch64::D18;
+ case AArch64::B19: return AArch64::D19;
+ case AArch64::B20: return AArch64::D20;
+ case AArch64::B21: return AArch64::D21;
+ case AArch64::B22: return AArch64::D22;
+ case AArch64::B23: return AArch64::D23;
+ case AArch64::B24: return AArch64::D24;
+ case AArch64::B25: return AArch64::D25;
+ case AArch64::B26: return AArch64::D26;
+ case AArch64::B27: return AArch64::D27;
+ case AArch64::B28: return AArch64::D28;
+ case AArch64::B29: return AArch64::D29;
+ case AArch64::B30: return AArch64::D30;
+ case AArch64::B31: return AArch64::D31;
+ }
+ // For anything else, return it unchanged.
+ return Reg;
+}
+
+namespace AArch64CC {
+
+// The CondCodes constants map directly to the 4-bit encoding of the condition
+// field for predicated instructions.
+enum CondCode { // Meaning (integer) Meaning (floating-point)
+ EQ = 0x0, // Equal Equal
+ NE = 0x1, // Not equal Not equal, or unordered
+ HS = 0x2, // Unsigned higher or same >, ==, or unordered
+ LO = 0x3, // Unsigned lower Less than
+ MI = 0x4, // Minus, negative Less than
+ PL = 0x5, // Plus, positive or zero >, ==, or unordered
+ VS = 0x6, // Overflow Unordered
+ VC = 0x7, // No overflow Not unordered
+ HI = 0x8, // Unsigned higher Greater than, or unordered
+ LS = 0x9, // Unsigned lower or same Less than or equal
+ GE = 0xa, // Greater than or equal Greater than or equal
+ LT = 0xb, // Less than Less than, or unordered
+ GT = 0xc, // Greater than Greater than
+ LE = 0xd, // Less than or equal <, ==, or unordered
+ AL = 0xe, // Always (unconditional) Always (unconditional)
+ NV = 0xf, // Always (unconditional) Always (unconditional)
+ // Note the NV exists purely to disassemble 0b1111. Execution is "always".
+ Invalid
+};
+
+inline static const char *getCondCodeName(CondCode Code) {
+ switch (Code) {
+ default: llvm_unreachable("Unknown condition code");
+ case EQ: return "eq";
+ case NE: return "ne";
+ case HS: return "hs";
+ case LO: return "lo";
+ case MI: return "mi";
+ case PL: return "pl";
+ case VS: return "vs";
+ case VC: return "vc";
+ case HI: return "hi";
+ case LS: return "ls";
+ case GE: return "ge";
+ case LT: return "lt";
+ case GT: return "gt";
+ case LE: return "le";
+ case AL: return "al";
+ case NV: return "nv";
+ }
}
-inline static A64CC::CondCodes A64InvertCondCode(A64CC::CondCodes CC) {
- // It turns out that the condition codes have been designed so that in order
- // to reverse the intent of the condition you only have to invert the low bit:
+inline static CondCode getInvertedCondCode(CondCode Code) {
+ // To reverse a condition it's necessary to only invert the low bit:
+
+ return static_cast<CondCode>(static_cast<unsigned>(Code) ^ 0x1);
+}
- return static_cast<A64CC::CondCodes>(static_cast<unsigned>(CC) ^ 0x1);
+/// Given a condition code, return NZCV flags that would satisfy that condition.
+/// The flag bits are in the format expected by the ccmp instructions.
+/// Note that many different flag settings can satisfy a given condition code,
+/// this function just returns one of them.
+inline static unsigned getNZCVToSatisfyCondCode(CondCode Code) {
+ // NZCV flags encoded as expected by ccmp instructions, ARMv8 ISA 5.5.7.
+ enum { N = 8, Z = 4, C = 2, V = 1 };
+ switch (Code) {
+ default: llvm_unreachable("Unknown condition code");
+ case EQ: return Z; // Z == 1
+ case NE: return 0; // Z == 0
+ case HS: return C; // C == 1
+ case LO: return 0; // C == 0
+ case MI: return N; // N == 1
+ case PL: return 0; // N == 0
+ case VS: return V; // V == 1
+ case VC: return 0; // V == 0
+ case HI: return C; // C == 1 && Z == 0
+ case LS: return 0; // C == 0 || Z == 1
+ case GE: return 0; // N == V
+ case LT: return N; // N != V
+ case GT: return 0; // Z == 0 && N == V
+ case LE: return Z; // Z == 1 || N != V
+ }
}
+} // end namespace AArch64CC
/// Instances of this class can perform bidirectional mapping from random
/// identifier strings to operand encodings. For example "MSR" takes a named
@@ -115,14 +276,14 @@ inline static A64CC::CondCodes A64InvertCondCode(A64CC::CondCodes CC) {
/// out just how often these instructions are emitted before working on it. It
/// might even be optimal to just reorder the tables for the common instructions
/// rather than changing the algorithm.
-struct NamedImmMapper {
+struct AArch64NamedImmMapper {
struct Mapping {
const char *Name;
uint32_t Value;
};
template<int N>
- NamedImmMapper(const Mapping (&Pairs)[N], uint32_t TooBigImm)
+ AArch64NamedImmMapper(const Mapping (&Pairs)[N], uint32_t TooBigImm)
: Pairs(&Pairs[0]), NumPairs(N), TooBigImm(TooBigImm) {}
StringRef toString(uint32_t Value, bool &Valid) const;
@@ -138,7 +299,7 @@ protected:
uint32_t TooBigImm;
};
-namespace A64AT {
+namespace AArch64AT {
enum ATValues {
Invalid = -1, // Op0 Op1 CRn CRm Op2
S1E1R = 0x43c0, // 01 000 0111 1000 000
@@ -155,14 +316,14 @@ namespace A64AT {
S12E0W = 0x63c7 // 01 100 0111 1000 111
};
- struct ATMapper : NamedImmMapper {
+ struct ATMapper : AArch64NamedImmMapper {
const static Mapping ATPairs[];
ATMapper();
};
}
-namespace A64DB {
+namespace AArch64DB {
enum DBValues {
Invalid = -1,
OSHLD = 0x1,
@@ -179,14 +340,14 @@ namespace A64DB {
SY = 0xf
};
- struct DBarrierMapper : NamedImmMapper {
+ struct DBarrierMapper : AArch64NamedImmMapper {
const static Mapping DBarrierPairs[];
DBarrierMapper();
};
}
-namespace A64DC {
+namespace AArch64DC {
enum DCValues {
Invalid = -1, // Op1 CRn CRm Op2
ZVA = 0x5ba1, // 01 011 0111 0100 001
@@ -199,7 +360,7 @@ namespace A64DC {
CISW = 0x43f2 // 01 000 0111 1110 010
};
- struct DCMapper : NamedImmMapper {
+ struct DCMapper : AArch64NamedImmMapper {
const static Mapping DCPairs[];
DCMapper();
@@ -207,7 +368,7 @@ namespace A64DC {
}
-namespace A64IC {
+namespace AArch64IC {
enum ICValues {
Invalid = -1, // Op1 CRn CRm Op2
IALLUIS = 0x0388, // 000 0111 0001 000
@@ -216,7 +377,7 @@ namespace A64IC {
};
- struct ICMapper : NamedImmMapper {
+ struct ICMapper : AArch64NamedImmMapper {
const static Mapping ICPairs[];
ICMapper();
@@ -227,19 +388,19 @@ namespace A64IC {
}
}
-namespace A64ISB {
+namespace AArch64ISB {
enum ISBValues {
Invalid = -1,
SY = 0xf
};
- struct ISBMapper : NamedImmMapper {
+ struct ISBMapper : AArch64NamedImmMapper {
const static Mapping ISBPairs[];
ISBMapper();
};
}
-namespace A64PRFM {
+namespace AArch64PRFM {
enum PRFMValues {
Invalid = -1,
PLDL1KEEP = 0x00,
@@ -262,14 +423,14 @@ namespace A64PRFM {
PSTL3STRM = 0x15
};
- struct PRFMMapper : NamedImmMapper {
+ struct PRFMMapper : AArch64NamedImmMapper {
const static Mapping PRFMPairs[];
PRFMMapper();
};
}
-namespace A64PState {
+namespace AArch64PState {
enum PStateValues {
Invalid = -1,
SPSel = 0x05,
@@ -277,7 +438,7 @@ namespace A64PState {
DAIFClr = 0x1f
};
- struct PStateMapper : NamedImmMapper {
+ struct PStateMapper : AArch64NamedImmMapper {
const static Mapping PStatePairs[];
PStateMapper();
@@ -285,7 +446,7 @@ namespace A64PState {
}
-namespace A64SE {
+namespace AArch64SE {
enum ShiftExtSpecifiers {
Invalid = -1,
LSL,
@@ -306,7 +467,7 @@ namespace A64SE {
};
}
-namespace A64Layout {
+namespace AArch64Layout {
enum VectorLayout {
Invalid = -1,
VL_8B,
@@ -329,43 +490,43 @@ namespace A64Layout {
}
inline static const char *
-A64VectorLayoutToString(A64Layout::VectorLayout Layout) {
+AArch64VectorLayoutToString(AArch64Layout::VectorLayout Layout) {
switch (Layout) {
- case A64Layout::VL_8B: return ".8b";
- case A64Layout::VL_4H: return ".4h";
- case A64Layout::VL_2S: return ".2s";
- case A64Layout::VL_1D: return ".1d";
- case A64Layout::VL_16B: return ".16b";
- case A64Layout::VL_8H: return ".8h";
- case A64Layout::VL_4S: return ".4s";
- case A64Layout::VL_2D: return ".2d";
- case A64Layout::VL_B: return ".b";
- case A64Layout::VL_H: return ".h";
- case A64Layout::VL_S: return ".s";
- case A64Layout::VL_D: return ".d";
+ case AArch64Layout::VL_8B: return ".8b";
+ case AArch64Layout::VL_4H: return ".4h";
+ case AArch64Layout::VL_2S: return ".2s";
+ case AArch64Layout::VL_1D: return ".1d";
+ case AArch64Layout::VL_16B: return ".16b";
+ case AArch64Layout::VL_8H: return ".8h";
+ case AArch64Layout::VL_4S: return ".4s";
+ case AArch64Layout::VL_2D: return ".2d";
+ case AArch64Layout::VL_B: return ".b";
+ case AArch64Layout::VL_H: return ".h";
+ case AArch64Layout::VL_S: return ".s";
+ case AArch64Layout::VL_D: return ".d";
default: llvm_unreachable("Unknown Vector Layout");
}
}
-inline static A64Layout::VectorLayout
-A64StringToVectorLayout(StringRef LayoutStr) {
- return StringSwitch<A64Layout::VectorLayout>(LayoutStr)
- .Case(".8b", A64Layout::VL_8B)
- .Case(".4h", A64Layout::VL_4H)
- .Case(".2s", A64Layout::VL_2S)
- .Case(".1d", A64Layout::VL_1D)
- .Case(".16b", A64Layout::VL_16B)
- .Case(".8h", A64Layout::VL_8H)
- .Case(".4s", A64Layout::VL_4S)
- .Case(".2d", A64Layout::VL_2D)
- .Case(".b", A64Layout::VL_B)
- .Case(".h", A64Layout::VL_H)
- .Case(".s", A64Layout::VL_S)
- .Case(".d", A64Layout::VL_D)
- .Default(A64Layout::Invalid);
+inline static AArch64Layout::VectorLayout
+AArch64StringToVectorLayout(StringRef LayoutStr) {
+ return StringSwitch<AArch64Layout::VectorLayout>(LayoutStr)
+ .Case(".8b", AArch64Layout::VL_8B)
+ .Case(".4h", AArch64Layout::VL_4H)
+ .Case(".2s", AArch64Layout::VL_2S)
+ .Case(".1d", AArch64Layout::VL_1D)
+ .Case(".16b", AArch64Layout::VL_16B)
+ .Case(".8h", AArch64Layout::VL_8H)
+ .Case(".4s", AArch64Layout::VL_4S)
+ .Case(".2d", AArch64Layout::VL_2D)
+ .Case(".b", AArch64Layout::VL_B)
+ .Case(".h", AArch64Layout::VL_H)
+ .Case(".s", AArch64Layout::VL_S)
+ .Case(".d", AArch64Layout::VL_D)
+ .Default(AArch64Layout::Invalid);
}
-namespace A64SysReg {
+namespace AArch64SysReg {
enum SysRegROValues {
MDCCSR_EL0 = 0x9808, // 10 011 0000 0001 000
DBGDTRRX_EL0 = 0x9828, // 10 011 0000 0101 000
@@ -396,16 +557,16 @@ namespace A64SysReg {
ID_ISAR3_EL1 = 0xc013, // 11 000 0000 0010 011
ID_ISAR4_EL1 = 0xc014, // 11 000 0000 0010 100
ID_ISAR5_EL1 = 0xc015, // 11 000 0000 0010 101
- ID_AA64PFR0_EL1 = 0xc020, // 11 000 0000 0100 000
- ID_AA64PFR1_EL1 = 0xc021, // 11 000 0000 0100 001
- ID_AA64DFR0_EL1 = 0xc028, // 11 000 0000 0101 000
- ID_AA64DFR1_EL1 = 0xc029, // 11 000 0000 0101 001
- ID_AA64AFR0_EL1 = 0xc02c, // 11 000 0000 0101 100
- ID_AA64AFR1_EL1 = 0xc02d, // 11 000 0000 0101 101
- ID_AA64ISAR0_EL1 = 0xc030, // 11 000 0000 0110 000
- ID_AA64ISAR1_EL1 = 0xc031, // 11 000 0000 0110 001
- ID_AA64MMFR0_EL1 = 0xc038, // 11 000 0000 0111 000
- ID_AA64MMFR1_EL1 = 0xc039, // 11 000 0000 0111 001
+ ID_A64PFR0_EL1 = 0xc020, // 11 000 0000 0100 000
+ ID_A64PFR1_EL1 = 0xc021, // 11 000 0000 0100 001
+ ID_A64DFR0_EL1 = 0xc028, // 11 000 0000 0101 000
+ ID_A64DFR1_EL1 = 0xc029, // 11 000 0000 0101 001
+ ID_A64AFR0_EL1 = 0xc02c, // 11 000 0000 0101 100
+ ID_A64AFR1_EL1 = 0xc02d, // 11 000 0000 0101 101
+ ID_A64ISAR0_EL1 = 0xc030, // 11 000 0000 0110 000
+ ID_A64ISAR1_EL1 = 0xc031, // 11 000 0000 0110 001
+ ID_A64MMFR0_EL1 = 0xc038, // 11 000 0000 0111 000
+ ID_A64MMFR1_EL1 = 0xc039, // 11 000 0000 0111 001
MVFR0_EL1 = 0xc018, // 11 000 0000 0011 000
MVFR1_EL1 = 0xc019, // 11 000 0000 0011 001
MVFR2_EL1 = 0xc01a, // 11 000 0000 0011 010
@@ -960,38 +1121,45 @@ namespace A64SysReg {
ICH_LR12_EL2 = 0xe66c, // 11 100 1100 1101 100
ICH_LR13_EL2 = 0xe66d, // 11 100 1100 1101 101
ICH_LR14_EL2 = 0xe66e, // 11 100 1100 1101 110
- ICH_LR15_EL2 = 0xe66f // 11 100 1100 1101 111
+ ICH_LR15_EL2 = 0xe66f, // 11 100 1100 1101 111
+ };
+
+ // Cyclone specific system registers
+ enum CycloneSysRegValues {
+ CPM_IOACC_CTL_EL3 = 0xff90
};
- // Note that these do not inherit from NamedImmMapper. This class is
+ // Note that these do not inherit from AArch64NamedImmMapper. This class is
// sufficiently different in its behaviour that I don't believe it's worth
- // burdening the common NamedImmMapper with abstractions only needed in
+ // burdening the common AArch64NamedImmMapper with abstractions only needed in
// this one case.
struct SysRegMapper {
- static const NamedImmMapper::Mapping SysRegPairs[];
+ static const AArch64NamedImmMapper::Mapping SysRegPairs[];
+ static const AArch64NamedImmMapper::Mapping CycloneSysRegPairs[];
- const NamedImmMapper::Mapping *InstPairs;
+ const AArch64NamedImmMapper::Mapping *InstPairs;
size_t NumInstPairs;
+ uint64_t FeatureBits;
- SysRegMapper() {}
+ SysRegMapper(uint64_t FeatureBits) : FeatureBits(FeatureBits) { }
uint32_t fromString(StringRef Name, bool &Valid) const;
std::string toString(uint32_t Bits, bool &Valid) const;
};
struct MSRMapper : SysRegMapper {
- static const NamedImmMapper::Mapping MSRPairs[];
- MSRMapper();
+ static const AArch64NamedImmMapper::Mapping MSRPairs[];
+ MSRMapper(uint64_t FeatureBits);
};
struct MRSMapper : SysRegMapper {
- static const NamedImmMapper::Mapping MRSPairs[];
- MRSMapper();
+ static const AArch64NamedImmMapper::Mapping MRSPairs[];
+ MRSMapper(uint64_t FeatureBits);
};
uint32_t ParseGenericRegister(StringRef Name, bool &Valid);
}
-namespace A64TLBI {
+namespace AArch64TLBI {
enum TLBIValues {
Invalid = -1, // Op0 Op1 CRn CRm Op2
IPAS2E1IS = 0x6401, // 01 100 1000 0000 001
@@ -1028,7 +1196,7 @@ namespace A64TLBI {
VAALE1 = 0x443f // 01 000 1000 0111 111
};
- struct TLBIMapper : NamedImmMapper {
+ struct TLBIMapper : AArch64NamedImmMapper {
const static Mapping TLBIPairs[];
TLBIMapper();
@@ -1051,88 +1219,62 @@ namespace A64TLBI {
return true;
}
}
-}
+}
namespace AArch64II {
-
+ /// Target Operand Flag enum.
enum TOF {
- //===--------------------------------------------------------------===//
+ //===------------------------------------------------------------------===//
// AArch64 Specific MachineOperand flags.
MO_NO_FLAG,
- // MO_GOT - Represents a relocation referring to the GOT entry of a given
- // symbol. Used in adrp.
- MO_GOT,
-
- // MO_GOT_LO12 - Represents a relocation referring to the low 12 bits of the
- // GOT entry of a given symbol. Used in ldr only.
- MO_GOT_LO12,
-
- // MO_DTPREL_* - Represents a relocation referring to the offset from a
- // module's dynamic thread pointer. Used in the local-dynamic TLS access
- // model.
- MO_DTPREL_G1,
- MO_DTPREL_G0_NC,
-
- // MO_GOTTPREL_* - Represents a relocation referring to a GOT entry
- // providing the offset of a variable from the thread-pointer. Used in
- // initial-exec TLS model where this offset is assigned in the static thread
- // block and thus known by the dynamic linker.
- MO_GOTTPREL,
- MO_GOTTPREL_LO12,
-
- // MO_TLSDESC_* - Represents a relocation referring to a GOT entry providing
- // a TLS descriptor chosen by the dynamic linker. Used for the
- // general-dynamic and local-dynamic TLS access models where very littls is
- // known at link-time.
- MO_TLSDESC,
- MO_TLSDESC_LO12,
-
- // MO_TPREL_* - Represents a relocation referring to the offset of a
- // variable from the thread pointer itself. Used in the local-exec TLS
- // access model.
- MO_TPREL_G1,
- MO_TPREL_G0_NC,
-
- // MO_LO12 - On a symbol operand, this represents a relocation containing
- // lower 12 bits of the address. Used in add/sub/ldr/str.
- MO_LO12,
-
- // MO_ABS_G* - Represent the 16-bit granules of an absolute reference using
- // movz/movk instructions.
- MO_ABS_G3,
- MO_ABS_G2_NC,
- MO_ABS_G1_NC,
- MO_ABS_G0_NC
+ MO_FRAGMENT = 0x7,
+
+ /// MO_PAGE - A symbol operand with this flag represents the pc-relative
+ /// offset of the 4K page containing the symbol. This is used with the
+ /// ADRP instruction.
+ MO_PAGE = 1,
+
+ /// MO_PAGEOFF - A symbol operand with this flag represents the offset of
+ /// that symbol within a 4K page. This offset is added to the page address
+ /// to produce the complete address.
+ MO_PAGEOFF = 2,
+
+ /// MO_G3 - A symbol operand with this flag (granule 3) represents the high
+ /// 16-bits of a 64-bit address, used in a MOVZ or MOVK instruction
+ MO_G3 = 3,
+
+ /// MO_G2 - A symbol operand with this flag (granule 2) represents the bits
+ /// 32-47 of a 64-bit address, used in a MOVZ or MOVK instruction
+ MO_G2 = 4,
+
+ /// MO_G1 - A symbol operand with this flag (granule 1) represents the bits
+ /// 16-31 of a 64-bit address, used in a MOVZ or MOVK instruction
+ MO_G1 = 5,
+
+ /// MO_G0 - A symbol operand with this flag (granule 0) represents the bits
+ /// 0-15 of a 64-bit address, used in a MOVZ or MOVK instruction
+ MO_G0 = 6,
+
+ /// MO_GOT - This flag indicates that a symbol operand represents the
+ /// address of the GOT entry for the symbol, rather than the address of
+ /// the symbol itself.
+ MO_GOT = 8,
+
+ /// MO_NC - Indicates whether the linker is expected to check the symbol
+ /// reference for overflow. For example in an ADRP/ADD pair of relocations
+ /// the ADRP usually does check, but not the ADD.
+ MO_NC = 0x10,
+
+ /// MO_TLS - Indicates that the operand being accessed is some kind of
+ /// thread-local symbol. On Darwin, only one type of thread-local access
+ /// exists (pre linker-relaxation), but on ELF the TLSModel used for the
+ /// referee will affect interpretation.
+ MO_TLS = 0x20
};
-}
-
-class APFloat;
-
-namespace A64Imms {
- bool isFPImm(const APFloat &Val, uint32_t &Imm8Bits);
-
- inline bool isFPImm(const APFloat &Val) {
- uint32_t Imm8;
- return isFPImm(Val, Imm8);
- }
-
- bool isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits);
- bool isLogicalImmBits(unsigned RegWidth, uint32_t Bits, uint64_t &Imm);
-
- bool isMOVZImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
- bool isMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
-
- // We sometimes want to know whether the immediate is representable with a
- // MOVN but *not* with a MOVZ (because that would take priority).
- bool isOnlyMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
-
- uint64_t decodeNeonModImm(unsigned Val, unsigned OpCmode, unsigned &EltBits);
- bool decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm,
- unsigned &ShiftOnesIn);
- }
+} // end namespace AArch64II
-} // end namespace llvm;
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/ARM/A15SDOptimizer.cpp b/contrib/llvm/lib/Target/ARM/A15SDOptimizer.cpp
index 3e805a2..92eaf9e 100644
--- a/contrib/llvm/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/A15SDOptimizer.cpp
@@ -24,36 +24,30 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "a15-sd-optimizer"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
-#include "ARMSubtarget.h"
-#include "ARMISelLowering.h"
-#include "ARMTargetMachine.h"
-
-#include "llvm/ADT/SmallPtrSet.h"
+#include "ARMBaseRegisterInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
-
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "a15-sd-optimizer"
+
namespace {
struct A15SDOptimizer : public MachineFunctionPass {
static char ID;
A15SDOptimizer() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM A15 S->D optimizer";
}
@@ -97,7 +91,7 @@ namespace {
unsigned createImplicitDef(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
DebugLoc DL);
-
+
//
// Various property checkers
//
@@ -165,7 +159,7 @@ unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) {
if (!MI) return ARM::ssub_0;
MachineOperand *MO = MI->findRegisterDefOperand(SReg);
- assert(MO->isReg() && "Non register operand found!");
+ assert(MO->isReg() && "Non-register operand found!");
if (!MO) return ARM::ssub_0;
if (MI->isCopy() && usesRegClass(MI->getOperand(1),
@@ -227,9 +221,9 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
IsDead = false;
break;
}
- for (MachineRegisterInfo::use_iterator II = MRI->use_begin(Reg),
- EE = MRI->use_end();
- II != EE; ++II) {
+ for (MachineRegisterInfo::use_instr_iterator
+ II = MRI->use_instr_begin(Reg), EE = MRI->use_instr_end();
+ II != EE; ++II) {
// We don't care about self references.
if (&*II == Def)
continue;
@@ -266,7 +260,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) {
if (DPRMI && SPRMI) {
// See if the first operand of this insert_subreg is IMPLICIT_DEF
MachineInstr *ECDef = elideCopies(DPRMI);
- if (ECDef != 0 && ECDef->isImplicitDef()) {
+ if (ECDef && ECDef->isImplicitDef()) {
// Another corner case - if we're inserting something that is purely
// a subreg copy of a DPR, just use that DPR.
@@ -327,8 +321,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) {
return optimizeAllLanesPattern(MI, MI->getOperand(0).getReg());
}
- assert(0 && "Unhandled update pattern!");
- return 0;
+ llvm_unreachable("Unhandled update pattern!");
}
// Return true if this MachineInstr inserts a scalar (SPR) value into
@@ -355,10 +348,10 @@ MachineInstr *A15SDOptimizer::elideCopies(MachineInstr *MI) {
if (!MI->isFullCopy())
return MI;
if (!TRI->isVirtualRegister(MI->getOperand(1).getReg()))
- return NULL;
+ return nullptr;
MachineInstr *Def = MRI->getVRegDef(MI->getOperand(1).getReg());
if (!Def)
- return NULL;
+ return nullptr;
return elideCopies(Def);
}
@@ -442,7 +435,7 @@ A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
Out)
.addReg(Reg)
.addImm(Lane));
-
+
return Out;
}
@@ -608,7 +601,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
// * INSERT_SUBREG: * If the SPR value was originally in another DPR/QPR
// lane, and the other lane(s) of the DPR/QPR register
// that we are inserting in are undefined, use the
- // original DPR/QPR value.
+ // original DPR/QPR value.
// * Otherwise, fall back on the same stategy as COPY.
//
// * REG_SEQUENCE: * If all except one of the input operands are
@@ -654,7 +647,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
unsigned DPRDefReg = MI->getOperand(0).getReg();
for (MachineRegisterInfo::use_iterator I = MRI->use_begin(DPRDefReg),
E = MRI->use_end(); I != E; ++I)
- Uses.push_back(&I.getOperand());
+ Uses.push_back(&*I);
// We can optimize this.
unsigned NewReg = optimizeSDPattern(MI);
@@ -700,7 +693,7 @@ bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
MI != ME;) {
Modified |= runOnInstruction(MI++);
}
-
+
}
for (std::set<MachineInstr *>::iterator I = DeadInstr.begin(),
diff --git a/contrib/llvm/lib/Target/ARM/ARM.h b/contrib/llvm/lib/Target/ARM/ARM.h
index 80e5f37..55df29c 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.h
+++ b/contrib/llvm/lib/Target/ARM/ARM.h
@@ -15,19 +15,19 @@
#ifndef TARGET_ARM_H
#define TARGET_ARM_H
-#include "MCTargetDesc/ARMBaseInfo.h"
-#include "MCTargetDesc/ARMMCTargetDesc.h"
-#include "llvm/Support/DataTypes.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/CodeGen.h"
namespace llvm {
class ARMAsmPrinter;
class ARMBaseTargetMachine;
class FunctionPass;
+class ImmutablePass;
class JITCodeEmitter;
class MachineInstr;
class MCInst;
+class TargetLowering;
+class TargetMachine;
FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM,
CodeGenOpt::Level OptLevel);
@@ -43,6 +43,7 @@ FunctionPass *createARMGlobalMergePass(const TargetLowering* tli);
FunctionPass *createARMConstantIslandPass();
FunctionPass *createMLxExpansionPass();
FunctionPass *createThumb2ITBlockPass();
+FunctionPass *createARMOptimizeBarriersPass();
FunctionPass *createThumb2SizeReductionPass();
/// \brief Creates an ARM-specific Target Transformation Info pass.
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index 36e5680..7916ccc 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -73,6 +73,11 @@ def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true",
"Enable support for CRC instructions">;
+// Cyclone has preferred instructions for zeroing VFP registers, which can
+// execute in 0 cycles.
+def FeatureZCZeroing : SubtargetFeature<"zcz", "HasZeroCycleZeroing", "true",
+ "Has zero-cycle zeroing instructions">;
+
// Some processors have FP multiply-accumulate instructions that don't
// play nicely with other VFP / NEON instructions, and it's generally better
// to just not use them.
@@ -179,7 +184,14 @@ def ProcA5 : SubtargetFeature<"a5", "ARMProcFamily", "CortexA5",
"Cortex-A5 ARM processors",
[FeatureSlowFPBrcc, FeatureHasSlowFPVMLx,
FeatureVMLxForwarding, FeatureT2XtPk,
- FeatureTrustZone]>;
+ FeatureTrustZone, FeatureMP]>;
+def ProcA7 : SubtargetFeature<"a7", "ARMProcFamily", "CortexA7",
+ "Cortex-A7 ARM processors",
+ [FeatureSlowFPBrcc, FeatureHasSlowFPVMLx,
+ FeatureVMLxForwarding, FeatureT2XtPk,
+ FeatureVFP4, FeatureMP,
+ FeatureHWDiv, FeatureHWDivARM,
+ FeatureTrustZone, FeatureVirtualization]>;
def ProcA8 : SubtargetFeature<"a8", "ARMProcFamily", "CortexA8",
"Cortex-A8 ARM processors",
[FeatureSlowFPBrcc, FeatureHasSlowFPVMLx,
@@ -198,6 +210,15 @@ def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift",
FeatureHWDivARM, FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp,
FeatureHasSlowFPVMLx, FeatureTrustZone]>;
+def ProcA12 : SubtargetFeature<"a12", "ARMProcFamily", "CortexA12",
+ "Cortex-A12 ARM processors",
+ [FeatureVMLxForwarding,
+ FeatureT2XtPk, FeatureVFP4,
+ FeatureHWDiv, FeatureHWDivARM,
+ FeatureAvoidPartialCPSR,
+ FeatureVirtualization,
+ FeatureTrustZone]>;
+
// FIXME: It has not been determined if A15 has these features.
def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15",
@@ -227,6 +248,26 @@ def ProcR5 : SubtargetFeature<"r5", "ARMProcFamily", "CortexR5",
FeatureAvoidPartialCPSR,
FeatureT2XtPk]>;
+// FIXME: krait has currently the same features as A9
+// plus VFP4 and hardware division features.
+def ProcKrait : SubtargetFeature<"krait", "ARMProcFamily", "Krait",
+ "Qualcomm ARM processors",
+ [FeatureVMLxForwarding,
+ FeatureT2XtPk, FeatureFP16,
+ FeatureAvoidPartialCPSR,
+ FeatureTrustZone,
+ FeatureVFP4,
+ FeatureHWDiv,
+ FeatureHWDivARM]>;
+
+
+def FeatureAPCS : SubtargetFeature<"apcs", "TargetABI", "ARM_ABI_APCS",
+ "Use the APCS ABI">;
+
+def FeatureAAPCS : SubtargetFeature<"aapcs", "TargetABI", "ARM_ABI_AAPCS",
+ "Use the AAPCS ABI">;
+
+
class ProcNoItin<string Name, list<SubtargetFeature> Features>
: Processor<Name, NoItineraries, Features>;
@@ -296,6 +337,10 @@ def : ProcessorModel<"cortex-a5", CortexA8Model,
[ProcA5, HasV7Ops, FeatureNEON, FeatureDB,
FeatureVFP4, FeatureDSPThumb2,
FeatureHasRAS, FeatureAClass]>;
+def : ProcessorModel<"cortex-a7", CortexA8Model,
+ [ProcA7, HasV7Ops, FeatureNEON, FeatureDB,
+ FeatureDSPThumb2, FeatureHasRAS,
+ FeatureAClass]>;
def : ProcessorModel<"cortex-a8", CortexA8Model,
[ProcA8, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureHasRAS,
@@ -308,11 +353,26 @@ def : ProcessorModel<"cortex-a9-mp", CortexA9Model,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureMP,
FeatureHasRAS, FeatureAClass]>;
+
+// FIXME: A12 has currently the same Schedule model as A9
+def : ProcessorModel<"cortex-a12", CortexA9Model,
+ [ProcA12, HasV7Ops, FeatureNEON, FeatureDB,
+ FeatureDSPThumb2, FeatureMP,
+ FeatureHasRAS, FeatureAClass]>;
+
// FIXME: A15 has currently the same ProcessorModel as A9.
def : ProcessorModel<"cortex-a15", CortexA9Model,
[ProcA15, HasV7Ops, FeatureNEON, FeatureDB,
FeatureDSPThumb2, FeatureHasRAS,
FeatureAClass]>;
+
+// FIXME: krait has currently the same Schedule model as A9
+def : ProcessorModel<"krait", CortexA9Model,
+ [ProcKrait, HasV7Ops,
+ FeatureNEON, FeatureDB,
+ FeatureDSPThumb2, FeatureHasRAS,
+ FeatureAClass]>;
+
// FIXME: R5 has currently the same ProcessorModel as A8.
def : ProcessorModel<"cortex-r5", CortexA8Model,
[ProcR5, HasV7Ops, FeatureDB,
@@ -347,6 +407,13 @@ def : ProcNoItin<"cortex-a57", [ProcA57, HasV8Ops, FeatureAClass,
FeatureDB, FeatureFPARMv8,
FeatureNEON, FeatureDSPThumb2]>;
+// Cyclone is very similar to swift
+def : ProcessorModel<"cyclone", SwiftModel,
+ [ProcSwift, HasV8Ops, HasV7Ops,
+ FeatureCrypto, FeatureFPARMv8,
+ FeatureDB,FeatureDSPThumb2,
+ FeatureHasRAS, FeatureZCZeroing]>;
+
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
@@ -363,17 +430,6 @@ include "ARMInstrInfo.td"
def ARMInstrInfo : InstrInfo;
-
-//===----------------------------------------------------------------------===//
-// Assembly printer
-//===----------------------------------------------------------------------===//
-// ARM Uses the MC printer for asm output, so make sure the TableGen
-// AsmWriter bits get associated with the correct class.
-def ARMAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
//===----------------------------------------------------------------------===//
@@ -381,6 +437,4 @@ def ARMAsmWriter : AsmWriter {
def ARM : Target {
// Pull in Instruction Info:
let InstructionSet = ARMInstrInfo;
-
- let AssemblyWriters = [ARMAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index e79f88d..28d2610 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -12,10 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "ARMAsmPrinter.h"
#include "ARM.h"
-#include "ARMBuildAttrs.h"
#include "ARMConstantPoolValue.h"
#include "ARMFPUName.h"
#include "ARMMachineFunctionInfo.h"
@@ -26,13 +24,13 @@
#include "MCTargetDesc/ARMMCExpr.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -45,80 +43,19 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/Support/COFF.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetMachine.h"
#include <cctype>
using namespace llvm;
-/// EmitDwarfRegOp - Emit dwarf register operation.
-void ARMAsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc,
- bool Indirect) const {
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->getDwarfRegNum(MLoc.getReg(), false) != -1) {
- AsmPrinter::EmitDwarfRegOp(MLoc, Indirect);
- return;
- }
- assert(MLoc.isReg() && !Indirect &&
- "This doesn't support offset/indirection - implement it if needed");
- unsigned Reg = MLoc.getReg();
- if (Reg >= ARM::S0 && Reg <= ARM::S31) {
- assert(ARM::S0 + 31 == ARM::S31 && "Unexpected ARM S register numbering");
- // S registers are described as bit-pieces of a register
- // S[2x] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 0)
- // S[2x+1] = DW_OP_regx(256 + (x>>1)) DW_OP_bit_piece(32, 32)
-
- unsigned SReg = Reg - ARM::S0;
- bool odd = SReg & 0x1;
- unsigned Rx = 256 + (SReg >> 1);
-
- OutStreamer.AddComment("DW_OP_regx for S register");
- EmitInt8(dwarf::DW_OP_regx);
-
- OutStreamer.AddComment(Twine(SReg));
- EmitULEB128(Rx);
-
- if (odd) {
- OutStreamer.AddComment("DW_OP_bit_piece 32 32");
- EmitInt8(dwarf::DW_OP_bit_piece);
- EmitULEB128(32);
- EmitULEB128(32);
- } else {
- OutStreamer.AddComment("DW_OP_bit_piece 32 0");
- EmitInt8(dwarf::DW_OP_bit_piece);
- EmitULEB128(32);
- EmitULEB128(0);
- }
- } else if (Reg >= ARM::Q0 && Reg <= ARM::Q15) {
- assert(ARM::Q0 + 15 == ARM::Q15 && "Unexpected ARM Q register numbering");
- // Q registers Q0-Q15 are described by composing two D registers together.
- // Qx = DW_OP_regx(256+2x) DW_OP_piece(8) DW_OP_regx(256+2x+1)
- // DW_OP_piece(8)
-
- unsigned QReg = Reg - ARM::Q0;
- unsigned D1 = 256 + 2 * QReg;
- unsigned D2 = D1 + 1;
-
- OutStreamer.AddComment("DW_OP_regx for Q register: D1");
- EmitInt8(dwarf::DW_OP_regx);
- EmitULEB128(D1);
- OutStreamer.AddComment("DW_OP_piece 8");
- EmitInt8(dwarf::DW_OP_piece);
- EmitULEB128(8);
-
- OutStreamer.AddComment("DW_OP_regx for Q register: D2");
- EmitInt8(dwarf::DW_OP_regx);
- EmitULEB128(D2);
- OutStreamer.AddComment("DW_OP_piece 8");
- EmitInt8(dwarf::DW_OP_piece);
- EmitULEB128(8);
- }
-}
+#define DEBUG_TYPE "asm-printer"
void ARMAsmPrinter::EmitFunctionBodyEnd() {
// Make sure to terminate any constant pools that were at the end
@@ -145,12 +82,13 @@ void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts());
assert(GV && "C++ constructor pointer was not a GlobalValue!");
- const MCExpr *E = MCSymbolRefExpr::Create(getSymbol(GV),
- (Subtarget->isTargetDarwin()
- ? MCSymbolRefExpr::VK_None
- : MCSymbolRefExpr::VK_ARM_TARGET1),
+ const MCExpr *E = MCSymbolRefExpr::Create(GetARMGVSymbol(GV,
+ ARMII::MO_NO_FLAG),
+ (Subtarget->isTargetELF()
+ ? MCSymbolRefExpr::VK_ARM_TARGET1
+ : MCSymbolRefExpr::VK_None),
OutContext);
-
+
OutStreamer.EmitValue(E, Size);
}
@@ -161,7 +99,28 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
AFI = MF.getInfo<ARMFunctionInfo>();
MCP = MF.getConstantPool();
- return AsmPrinter::runOnMachineFunction(MF);
+ SetupMachineFunction(MF);
+
+ if (Subtarget->isTargetCOFF()) {
+ bool Internal = MF.getFunction()->hasInternalLinkage();
+ COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC
+ : COFF::IMAGE_SYM_CLASS_EXTERNAL;
+ int Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
+
+ OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
+ OutStreamer.EmitCOFFSymbolStorageClass(Scl);
+ OutStreamer.EmitCOFFSymbolType(Type);
+ OutStreamer.EndCOFFSymbolDef();
+ }
+
+ // Have common code print out the function header with linkage info etc.
+ EmitFunctionHeader();
+
+ // Emit the rest of the function body.
+ EmitFunctionBody();
+
+ // We didn't modify anything.
+ return false;
}
void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
@@ -206,25 +165,16 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
else if ((Modifier && strcmp(Modifier, "hi16") == 0) ||
(TF & ARMII::MO_HI16))
O << ":upper16:";
- O << *getSymbol(GV);
+ O << *GetARMGVSymbol(GV, TF);
printOffset(MO.getOffset(), O);
if (TF == ARMII::MO_PLT)
O << "(PLT)";
break;
}
- case MachineOperand::MO_ExternalSymbol: {
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- if (TF == ARMII::MO_PLT)
- O << "(PLT)";
- break;
- }
case MachineOperand::MO_ConstantPoolIndex:
O << *GetCPISymbol(MO.getIndex());
break;
- case MachineOperand::MO_JumpTableIndex:
- O << *GetJTISymbol(MO.getIndex());
- break;
}
}
@@ -232,16 +182,18 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
MCSymbol *ARMAsmPrinter::
GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const {
+ const DataLayout *DL = TM.getDataLayout();
SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "JTI"
+ raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "JTI"
<< getFunctionNumber() << '_' << uid << '_' << uid2;
return OutContext.GetOrCreateSymbol(Name.str());
}
MCSymbol *ARMAsmPrinter::GetARMSJLJEHLabel() const {
+ const DataLayout *DL = TM.getDataLayout();
SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "SJLJEH"
+ raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "SJLJEH"
<< getFunctionNumber();
return OutContext.GetOrCreateSymbol(Name.str());
}
@@ -311,7 +263,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
if (ARM::GPRPairRegClass.contains(RegBegin)) {
const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
unsigned Reg0 = TRI->getSubReg(RegBegin, ARM::gsub_0);
- O << ARMInstPrinter::getRegisterName(Reg0) << ", ";;
+ O << ARMInstPrinter::getRegisterName(Reg0) << ", ";
RegBegin = TRI->getSubReg(RegBegin, ARM::gsub_1);
}
O << ARMInstPrinter::getRegisterName(RegBegin);
@@ -446,8 +398,22 @@ bool ARMAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
return false;
}
+static bool isThumb(const MCSubtargetInfo& STI) {
+ return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
+}
+
+void ARMAsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const {
+ // If either end mode is unknown (EndInfo == NULL) or different than
+ // the start mode, then restore the start mode.
+ const bool WasThumb = isThumb(StartInfo);
+ if (!EndInfo || WasThumb != isThumb(*EndInfo)) {
+ OutStreamer.EmitAssemblerFlag(WasThumb ? MCAF_Code16 : MCAF_Code32);
+ }
+}
+
void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget->isTargetMachO()) {
Reloc::Model RelocM = TM.getRelocationModel();
if (RelocM == Reloc::PIC_ || RelocM == Reloc::DynamicNoPIC) {
// Declare all the text sections up front (before the DWARF sections
@@ -468,7 +434,7 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
// Now any user defined text sections from function attributes.
for (Module::iterator F = M.begin(), e = M.end(); F != e; ++F)
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage())
- TextSections.insert(TLOFMacho.SectionForGlobal(F, Mang, TM));
+ TextSections.insert(TLOFMacho.SectionForGlobal(F, *Mang, TM));
// Now the coalescable sections.
TextSections.insert(TLOFMacho.getTextCoalSection());
TextSections.insert(TLOFMacho.getConstTextCoalSection());
@@ -480,23 +446,30 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
if (RelocM == Reloc::DynamicNoPIC) {
const MCSection *sect =
OutContext.getMachOSection("__TEXT", "__symbol_stub4",
- MCSectionMachO::S_SYMBOL_STUBS,
+ MachO::S_SYMBOL_STUBS,
12, SectionKind::getText());
OutStreamer.SwitchSection(sect);
} else {
const MCSection *sect =
OutContext.getMachOSection("__TEXT", "__picsymbolstub4",
- MCSectionMachO::S_SYMBOL_STUBS,
+ MachO::S_SYMBOL_STUBS,
16, SectionKind::getText());
OutStreamer.SwitchSection(sect);
}
const MCSection *StaticInitSect =
OutContext.getMachOSection("__TEXT", "__StaticInit",
- MCSectionMachO::S_REGULAR |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_REGULAR |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
SectionKind::getText());
OutStreamer.SwitchSection(StaticInitSect);
}
+
+ // Compiling with debug info should not affect the code
+ // generation. Ensure the cstring section comes before the
+ // optional __DWARF secion. Otherwise, PC-relative loads would
+ // have to use different instruction sequences at "-g" in order to
+ // reach global data in the same object file.
+ OutStreamer.SwitchSection(getObjFileLowering().getCStringSection());
}
// Use unified assembler syntax.
@@ -507,9 +480,32 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
emitAttributes();
}
+static void
+emitNonLazySymbolPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel,
+ MachineModuleInfoImpl::StubValueTy &MCSym) {
+ // L_foo$stub:
+ OutStreamer.EmitLabel(StubLabel);
+ // .indirect_symbol _foo
+ OutStreamer.EmitSymbolAttribute(MCSym.getPointer(), MCSA_IndirectSymbol);
+
+ if (MCSym.getInt())
+ // External to current translation unit.
+ OutStreamer.EmitIntValue(0, 4/*size*/);
+ else
+ // Internal to current translation unit.
+ //
+ // When we place the LSDA into the TEXT section, the type info
+ // pointers need to be indirect and pc-rel. We accomplish this by
+ // using NLPs; however, sometimes the types are local to the file.
+ // We need to fill in the value for the NLP in those cases.
+ OutStreamer.EmitValue(
+ MCSymbolRefExpr::Create(MCSym.getPointer(), OutStreamer.getContext()),
+ 4 /*size*/);
+}
+
void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget->isTargetMachO()) {
// All darwin targets use mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@@ -523,27 +519,9 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer.SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
EmitAlignment(2);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$stub:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .indirect_symbol _foo
- MachineModuleInfoImpl::StubValueTy &MCSym = Stubs[i].second;
- OutStreamer.EmitSymbolAttribute(MCSym.getPointer(),MCSA_IndirectSymbol);
-
- if (MCSym.getInt())
- // External to current translation unit.
- OutStreamer.EmitIntValue(0, 4/*size*/);
- else
- // Internal to current translation unit.
- //
- // When we place the LSDA into the TEXT section, the type info
- // pointers need to be indirect and pc-rel. We accomplish this by
- // using NLPs; however, sometimes the types are local to the file.
- // We need to fill in the value for the NLP in those cases.
- OutStreamer.EmitValue(MCSymbolRefExpr::Create(MCSym.getPointer(),
- OutContext),
- 4/*size*/);
- }
+
+ for (auto &Stub : Stubs)
+ emitNonLazySymbolPointer(OutStreamer, Stub.first, Stub.second);
Stubs.clear();
OutStreamer.AddBlankLine();
@@ -551,17 +529,11 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
Stubs = MMIMacho.GetHiddenGVStubList();
if (!Stubs.empty()) {
- OutStreamer.SwitchSection(getObjFileLowering().getDataSection());
+ OutStreamer.SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
EmitAlignment(2);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$stub:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .long _foo
- OutStreamer.EmitValue(MCSymbolRefExpr::
- Create(Stubs[i].second.getPointer(),
- OutContext),
- 4/*size*/);
- }
+
+ for (auto &Stub : Stubs)
+ emitNonLazySymbolPointer(OutStreamer, Stub.first, Stub.second);
Stubs.clear();
OutStreamer.AddBlankLine();
@@ -574,6 +546,28 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
// generates code that does this, it is always safe to set.
OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
}
+
+ // Emit a .data.rel section containing any stubs that were created.
+ if (Subtarget->isTargetELF()) {
+ const TargetLoweringObjectFileELF &TLOFELF =
+ static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
+
+ MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
+
+ // Output stubs for external and common global variables.
+ MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
+ if (!Stubs.empty()) {
+ OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
+ const DataLayout *TD = TM.getDataLayout();
+
+ for (auto &stub: Stubs) {
+ OutStreamer.EmitLabel(stub.first);
+ OutStreamer.EmitSymbolValue(stub.second.getPointer(),
+ TD->getPointerSize(0));
+ }
+ Stubs.clear();
+ }
+ }
}
//===----------------------------------------------------------------------===//
@@ -611,28 +605,33 @@ static ARMBuildAttrs::CPUArch getArchForCPU(StringRef CPU,
}
void ARMAsmPrinter::emitAttributes() {
- MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
+ MCTargetStreamer &TS = *OutStreamer.getTargetStreamer();
ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
ATS.switchVendor("aeabi");
std::string CPUString = Subtarget->getCPUString();
- if (CPUString != "generic")
+ // FIXME: remove krait check when GNU tools support krait cpu
+ if (CPUString != "generic" && CPUString != "krait")
ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
ATS.emitAttribute(ARMBuildAttrs::CPU_arch,
getArchForCPU(CPUString, Subtarget));
- if (Subtarget->isAClass()) {
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::ApplicationProfile);
- } else if (Subtarget->isRClass()) {
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::RealTimeProfile);
- } else if (Subtarget->isMClass()){
- ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
- ARMBuildAttrs::MicroControllerProfile);
+ // Tag_CPU_arch_profile must have the default value of 0 when "Architecture
+ // profile is not applicable (e.g. pre v7, or cross-profile code)".
+ if (Subtarget->hasV7Ops()) {
+ if (Subtarget->isAClass()) {
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::ApplicationProfile);
+ } else if (Subtarget->isRClass()) {
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::RealTimeProfile);
+ } else if (Subtarget->isMClass()) {
+ ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::MicroControllerProfile);
+ }
}
ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use, Subtarget->hasARMOps() ?
@@ -673,6 +672,20 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitFPU(ARM::VFPV2);
}
+ if (TM.getRelocationModel() == Reloc::PIC_) {
+ // PIC specific attributes.
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_RW_data,
+ ARMBuildAttrs::AddressRWPCRel);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_RO_data,
+ ARMBuildAttrs::AddressROPCRel);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_GOT_use,
+ ARMBuildAttrs::AddressGOT);
+ } else {
+ // Allow direct addressing of imported data for all other relocation models.
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_GOT_use,
+ ARMBuildAttrs::AddressDirect);
+ }
+
// Signal various FP modes.
if (!TM.Options.UnsafeFPMath) {
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::Allowed);
@@ -687,10 +700,10 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitAttribute(ARMBuildAttrs::ABI_FP_number_model,
ARMBuildAttrs::AllowIEE754);
- // FIXME: add more flags to ARMBuildAttrs.h
+ // FIXME: add more flags to ARMBuildAttributes.h
// 8-bytes alignment stuff.
- ATS.emitAttribute(ARMBuildAttrs::ABI_align8_needed, 1);
- ATS.emitAttribute(ARMBuildAttrs::ABI_align8_preserved, 1);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_align_needed, 1);
+ ATS.emitAttribute(ARMBuildAttrs::ABI_align_preserved, 1);
// ABI_HardFP_use attribute to indicate single precision FP.
if (Subtarget->isFPOnlySP())
@@ -709,11 +722,39 @@ void ARMAsmPrinter::emitAttributes() {
if (Subtarget->hasMPExtension())
ATS.emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
- if (Subtarget->hasDivide()) {
- // Check if hardware divide is only available in thumb2 or ARM as well.
- ATS.emitAttribute(ARMBuildAttrs::DIV_use,
- Subtarget->hasDivideInARMMode() ? ARMBuildAttrs::AllowDIVExt :
- ARMBuildAttrs::AllowDIVIfExists);
+ // Hardware divide in ARM mode is part of base arch, starting from ARMv8.
+ // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
+ // It is not possible to produce DisallowDIV: if hwdiv is present in the base
+ // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
+ // AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
+ // otherwise, the default value (AllowDIVIfExists) applies.
+ if (Subtarget->hasDivideInARMMode() && !Subtarget->hasV8Ops())
+ ATS.emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
+
+ if (MMI) {
+ if (const Module *SourceModule = MMI->getModule()) {
+ // ABI_PCS_wchar_t to indicate wchar_t width
+ // FIXME: There is no way to emit value 0 (wchar_t prohibited).
+ if (auto WCharWidthValue = cast_or_null<ConstantInt>(
+ SourceModule->getModuleFlag("wchar_size"))) {
+ int WCharWidth = WCharWidthValue->getZExtValue();
+ assert((WCharWidth == 2 || WCharWidth == 4) &&
+ "wchar_t width must be 2 or 4 bytes");
+ ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_wchar_t, WCharWidth);
+ }
+
+ // ABI_enum_size to indicate enum width
+ // FIXME: There is no way to emit value 0 (enums prohibited) or value 3
+ // (all enums contain a value needing 32 bits to encode).
+ if (auto EnumWidthValue = cast_or_null<ConstantInt>(
+ SourceModule->getModuleFlag("min_enum_size"))) {
+ int EnumWidth = EnumWidthValue->getZExtValue();
+ assert((EnumWidth == 1 || EnumWidth == 4) &&
+ "Minimum enum width must be 1 or 4 bytes");
+ int EnumBuildAttr = EnumWidth == 1 ? 1 : 2;
+ ATS.emitAttribute(ARMBuildAttrs::ABI_enum_size, EnumBuildAttr);
+ }
+ }
}
if (Subtarget->hasTrustZone() && Subtarget->hasVirtualization())
@@ -729,28 +770,6 @@ void ARMAsmPrinter::emitAttributes() {
ATS.finishAttributeSection();
}
-void ARMAsmPrinter::emitARMAttributeSection() {
- // <format-version>
- // [ <section-length> "vendor-name"
- // [ <file-tag> <size> <attribute>*
- // | <section-tag> <size> <section-number>* 0 <attribute>*
- // | <symbol-tag> <size> <symbol-number>* 0 <attribute>*
- // ]+
- // ]*
-
- if (OutStreamer.hasRawTextSupport())
- return;
-
- const ARMElfTargetObjectFile &TLOFELF =
- static_cast<const ARMElfTargetObjectFile &>
- (getObjFileLowering());
-
- OutStreamer.SwitchSection(TLOFELF.getAttributesSection());
-
- // Format version
- OutStreamer.EmitIntValue(0x41, 1);
-}
-
//===----------------------------------------------------------------------===//
static MCSymbol *getPICLabel(const char *Prefix, unsigned FunctionNumber,
@@ -765,36 +784,57 @@ static MCSymbolRefExpr::VariantKind
getModifierVariantKind(ARMCP::ARMCPModifier Modifier) {
switch (Modifier) {
case ARMCP::no_modifier: return MCSymbolRefExpr::VK_None;
- case ARMCP::TLSGD: return MCSymbolRefExpr::VK_ARM_TLSGD;
- case ARMCP::TPOFF: return MCSymbolRefExpr::VK_ARM_TPOFF;
- case ARMCP::GOTTPOFF: return MCSymbolRefExpr::VK_ARM_GOTTPOFF;
- case ARMCP::GOT: return MCSymbolRefExpr::VK_ARM_GOT;
- case ARMCP::GOTOFF: return MCSymbolRefExpr::VK_ARM_GOTOFF;
+ case ARMCP::TLSGD: return MCSymbolRefExpr::VK_TLSGD;
+ case ARMCP::TPOFF: return MCSymbolRefExpr::VK_TPOFF;
+ case ARMCP::GOTTPOFF: return MCSymbolRefExpr::VK_GOTTPOFF;
+ case ARMCP::GOT: return MCSymbolRefExpr::VK_GOT;
+ case ARMCP::GOTOFF: return MCSymbolRefExpr::VK_GOTOFF;
}
llvm_unreachable("Invalid ARMCPModifier!");
}
-MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
- bool isIndirect = Subtarget->isTargetDarwin() &&
- Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
- if (!isIndirect)
- return getSymbol(GV);
+MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV,
+ unsigned char TargetFlags) {
+ if (Subtarget->isTargetMachO()) {
+ bool IsIndirect = (TargetFlags & ARMII::MO_NONLAZY) &&
+ Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
+
+ if (!IsIndirect)
+ return getSymbol(GV);
- // FIXME: Remove this when Darwin transition to @GOT like syntax.
- MCSymbol *MCSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MachineModuleInfoMachO &MMIMachO =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
- MachineModuleInfoImpl::StubValueTy &StubSym =
- GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(MCSym) :
- MMIMachO.getGVStubEntry(MCSym);
- if (StubSym.getPointer() == 0)
- StubSym = MachineModuleInfoImpl::
- StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
- return MCSym;
+ // FIXME: Remove this when Darwin transition to @GOT like syntax.
+ MCSymbol *MCSym = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MachineModuleInfoMachO &MMIMachO =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>();
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(MCSym)
+ : MMIMachO.getGVStubEntry(MCSym);
+ if (!StubSym.getPointer())
+ StubSym = MachineModuleInfoImpl::StubValueTy(getSymbol(GV),
+ !GV->hasInternalLinkage());
+ return MCSym;
+ } else if (Subtarget->isTargetCOFF()) {
+ assert(Subtarget->isTargetWindows() &&
+ "Windows is the only supported COFF target");
+
+ bool IsIndirect = (TargetFlags & ARMII::MO_DLLIMPORT);
+ if (!IsIndirect)
+ return getSymbol(GV);
+
+ SmallString<128> Name;
+ Name = "__imp_";
+ getNameWithPrefix(Name, GV);
+
+ return OutContext.GetOrCreateSymbol(Name);
+ } else if (Subtarget->isTargetELF()) {
+ return getSymbol(GV);
+ }
+ llvm_unreachable("unexpected target");
}
void ARMAsmPrinter::
EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
+ const DataLayout *DL = TM.getDataLayout();
int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType());
ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);
@@ -803,7 +843,7 @@ EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
if (ACPV->isLSDA()) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
- OS << MAI->getPrivateGlobalPrefix() << "_LSDA_" << getFunctionNumber();
+ OS << DL->getPrivateGlobalPrefix() << "_LSDA_" << getFunctionNumber();
MCSym = OutContext.GetOrCreateSymbol(OS.str());
} else if (ACPV->isBlockAddress()) {
const BlockAddress *BA =
@@ -811,7 +851,11 @@ EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
MCSym = GetBlockAddressSymbol(BA);
} else if (ACPV->isGlobalValue()) {
const GlobalValue *GV = cast<ARMConstantPoolConstant>(ACPV)->getGV();
- MCSym = GetARMGVSymbol(GV);
+
+ // On Darwin, const-pool entries may get the "FOO$non_lazy_ptr" mangling, so
+ // flag the global as MO_NONLAZY.
+ unsigned char TF = Subtarget->isTargetMachO() ? ARMII::MO_NONLAZY : 0;
+ MCSym = GetARMGVSymbol(GV, TF);
} else if (ACPV->isMachineBasicBlock()) {
const MachineBasicBlock *MBB = cast<ARMConstantPoolMBB>(ACPV)->getMBB();
MCSym = MBB->getSymbol();
@@ -827,7 +871,7 @@ EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
OutContext);
if (ACPV->getPCAdjustment()) {
- MCSymbol *PCLabel = getPICLabel(MAI->getPrivateGlobalPrefix(),
+ MCSymbol *PCLabel = getPICLabel(DL->getPrivateGlobalPrefix(),
getFunctionNumber(),
ACPV->getLabelId(),
OutContext);
@@ -929,10 +973,10 @@ void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
MachineBasicBlock *MBB = JTBBs[i];
const MCExpr *MBBSymbolExpr = MCSymbolRefExpr::Create(MBB->getSymbol(),
- OutContext);
+ OutContext);
// If this isn't a TBB or TBH, the entries are direct branch instructions.
if (OffsetWidth == 4) {
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::t2B)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::t2B)
.addExpr(MBBSymbolExpr)
.addImm(ARMCC::AL)
.addReg(0));
@@ -966,7 +1010,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
assert(MI->getFlag(MachineInstr::FrameSetup) &&
"Only instruction which are involved into frame setup code are allowed");
- MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
+ MCTargetStreamer &TS = *OutStreamer.getTargetStreamer();
ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
const MachineFunction &MF = *MI->getParent()->getParent();
const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
@@ -1030,7 +1074,8 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
RegList.push_back(SrcReg);
break;
}
- ATS.emitRegSave(RegList, Opc == ARM::VSTMDDB_UPD);
+ if (MAI->getExceptionHandlingType() == ExceptionHandling::ARM)
+ ATS.emitRegSave(RegList, Opc == ARM::VSTMDDB_UPD);
} else {
// Changes of stack / frame pointer.
if (SrcReg == ARM::SP) {
@@ -1075,20 +1120,22 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
}
}
- if (DstReg == FramePtr && FramePtr != ARM::SP)
- // Set-up of the frame pointer. Positive values correspond to "add"
- // instruction.
- ATS.emitSetFP(FramePtr, ARM::SP, -Offset);
- else if (DstReg == ARM::SP) {
- // Change of SP by an offset. Positive values correspond to "sub"
- // instruction.
- ATS.emitPad(Offset);
- } else {
- MI->dump();
- llvm_unreachable("Unsupported opcode for unwinding information");
+ if (MAI->getExceptionHandlingType() == ExceptionHandling::ARM) {
+ if (DstReg == FramePtr && FramePtr != ARM::SP)
+ // Set-up of the frame pointer. Positive values correspond to "add"
+ // instruction.
+ ATS.emitSetFP(FramePtr, ARM::SP, -Offset);
+ else if (DstReg == ARM::SP) {
+ // Change of SP by an offset. Positive values correspond to "sub"
+ // instruction.
+ ATS.emitPad(Offset);
+ } else {
+ // Move of SP to a register. Positive values correspond to an "add"
+ // instruction.
+ ATS.emitMovSP(DstReg, -Offset);
+ }
}
} else if (DstReg == ARM::SP) {
- // FIXME: .movsp goes here
MI->dump();
llvm_unreachable("Unsupported opcode for unwinding information");
}
@@ -1099,13 +1146,13 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
}
}
-extern cl::opt<bool> EnableARMEHABI;
-
// Simple pseudo-instructions have their lowering (with expansion to real
// instructions) auto-generated.
#include "ARMGenMCPseudoLowering.inc"
void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ const DataLayout *DL = TM.getDataLayout();
+
// If we just ended a constant pool, mark it as such.
if (InConstantPool && MI->getOpcode() != ARM::CONSTPOOL_ENTRY) {
OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
@@ -1113,7 +1160,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
// Emit unwinding stuff for frame-related instructions
- if (EnableARMEHABI && MI->getFlag(MachineInstr::FrameSetup))
+ if (Subtarget->isTargetEHABICompatible() &&
+ MI->getFlag(MachineInstr::FrameSetup))
EmitUnwindingInstruction(MI);
// Do any auto-generated pseudo lowerings.
@@ -1133,7 +1181,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case ARM::t2LEApcrel: {
// FIXME: Need to also handle globals and externals
MCSymbol *CPISymbol = GetCPISymbol(MI->getOperand(1).getIndex());
- OutStreamer.EmitInstruction(MCInstBuilder(MI->getOpcode() ==
+ EmitToStreamer(OutStreamer, MCInstBuilder(MI->getOpcode() ==
ARM::t2LEApcrel ? ARM::t2ADR
: (MI->getOpcode() == ARM::tLEApcrel ? ARM::tADR
: ARM::ADR))
@@ -1150,7 +1198,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbol *JTIPICSymbol =
GetARMJTIPICJumpTableLabel2(MI->getOperand(1).getIndex(),
MI->getOperand(2).getImm());
- OutStreamer.EmitInstruction(MCInstBuilder(MI->getOpcode() ==
+ EmitToStreamer(OutStreamer, MCInstBuilder(MI->getOpcode() ==
ARM::t2LEApcrelJT ? ARM::t2ADR
: (MI->getOpcode() == ARM::tLEApcrelJT ? ARM::tADR
: ARM::ADR))
@@ -1164,7 +1212,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Darwin call instructions are just normal call instructions with different
// clobber semantics (they clobber R9).
case ARM::BX_CALL: {
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::MOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::MOVr)
.addReg(ARM::LR)
.addReg(ARM::PC)
// Add predicate operands.
@@ -1173,19 +1221,19 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Add 's' bit operand (always reg0 for this)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::BX)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::BX)
.addReg(MI->getOperand(0).getReg()));
return;
}
case ARM::tBX_CALL: {
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tMOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVr)
.addReg(ARM::LR)
.addReg(ARM::PC)
// Add predicate operands.
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tBX)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tBX)
.addReg(MI->getOperand(0).getReg())
// Add predicate operands.
.addImm(ARMCC::AL)
@@ -1193,7 +1241,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
case ARM::BMOVPCRX_CALL: {
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::MOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::MOVr)
.addReg(ARM::LR)
.addReg(ARM::PC)
// Add predicate operands.
@@ -1202,7 +1250,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Add 's' bit operand (always reg0 for this)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::MOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::MOVr)
.addReg(ARM::PC)
.addReg(MI->getOperand(0).getReg())
// Add predicate operands.
@@ -1213,7 +1261,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
case ARM::BMOVPCB_CALL: {
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::MOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::MOVr)
.addReg(ARM::LR)
.addReg(ARM::PC)
// Add predicate operands.
@@ -1222,10 +1270,12 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Add 's' bit operand (always reg0 for this)
.addReg(0));
- const GlobalValue *GV = MI->getOperand(0).getGlobal();
- MCSymbol *GVSym = getSymbol(GV);
+ const MachineOperand &Op = MI->getOperand(0);
+ const GlobalValue *GV = Op.getGlobal();
+ const unsigned TF = Op.getTargetFlags();
+ MCSymbol *GVSym = GetARMGVSymbol(GV, TF);
const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::Bcc)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::Bcc)
.addExpr(GVSymExpr)
// Add predicate operands.
.addImm(ARMCC::AL)
@@ -1239,33 +1289,28 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
unsigned TF = MI->getOperand(1).getTargetFlags();
- bool isPIC = TF == ARMII::MO_LO16_NONLAZY_PIC;
const GlobalValue *GV = MI->getOperand(1).getGlobal();
- MCSymbol *GVSym = GetARMGVSymbol(GV);
+ MCSymbol *GVSym = GetARMGVSymbol(GV, TF);
const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
- if (isPIC) {
- MCSymbol *LabelSym = getPICLabel(MAI->getPrivateGlobalPrefix(),
- getFunctionNumber(),
- MI->getOperand(2).getImm(), OutContext);
- const MCExpr *LabelSymExpr= MCSymbolRefExpr::Create(LabelSym, OutContext);
- unsigned PCAdj = (Opc == ARM::MOVi16_ga_pcrel) ? 8 : 4;
- const MCExpr *PCRelExpr =
- ARMMCExpr::CreateLower16(MCBinaryExpr::CreateSub(GVSymExpr,
- MCBinaryExpr::CreateAdd(LabelSymExpr,
+
+ MCSymbol *LabelSym = getPICLabel(DL->getPrivateGlobalPrefix(),
+ getFunctionNumber(),
+ MI->getOperand(2).getImm(), OutContext);
+ const MCExpr *LabelSymExpr= MCSymbolRefExpr::Create(LabelSym, OutContext);
+ unsigned PCAdj = (Opc == ARM::MOVi16_ga_pcrel) ? 8 : 4;
+ const MCExpr *PCRelExpr =
+ ARMMCExpr::CreateLower16(MCBinaryExpr::CreateSub(GVSymExpr,
+ MCBinaryExpr::CreateAdd(LabelSymExpr,
MCConstantExpr::Create(PCAdj, OutContext),
- OutContext), OutContext), OutContext);
+ OutContext), OutContext), OutContext);
TmpInst.addOperand(MCOperand::CreateExpr(PCRelExpr));
- } else {
- const MCExpr *RefExpr= ARMMCExpr::CreateLower16(GVSymExpr, OutContext);
- TmpInst.addOperand(MCOperand::CreateExpr(RefExpr));
- }
// Add predicate operands.
TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
TmpInst.addOperand(MCOperand::CreateReg(0));
// Add 's' bit operand (always reg0 for this)
TmpInst.addOperand(MCOperand::CreateReg(0));
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case ARM::MOVTi16_ga_pcrel:
@@ -1277,32 +1322,27 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
unsigned TF = MI->getOperand(2).getTargetFlags();
- bool isPIC = TF == ARMII::MO_HI16_NONLAZY_PIC;
const GlobalValue *GV = MI->getOperand(2).getGlobal();
- MCSymbol *GVSym = GetARMGVSymbol(GV);
+ MCSymbol *GVSym = GetARMGVSymbol(GV, TF);
const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
- if (isPIC) {
- MCSymbol *LabelSym = getPICLabel(MAI->getPrivateGlobalPrefix(),
- getFunctionNumber(),
- MI->getOperand(3).getImm(), OutContext);
- const MCExpr *LabelSymExpr= MCSymbolRefExpr::Create(LabelSym, OutContext);
- unsigned PCAdj = (Opc == ARM::MOVTi16_ga_pcrel) ? 8 : 4;
- const MCExpr *PCRelExpr =
+
+ MCSymbol *LabelSym = getPICLabel(DL->getPrivateGlobalPrefix(),
+ getFunctionNumber(),
+ MI->getOperand(3).getImm(), OutContext);
+ const MCExpr *LabelSymExpr= MCSymbolRefExpr::Create(LabelSym, OutContext);
+ unsigned PCAdj = (Opc == ARM::MOVTi16_ga_pcrel) ? 8 : 4;
+ const MCExpr *PCRelExpr =
ARMMCExpr::CreateUpper16(MCBinaryExpr::CreateSub(GVSymExpr,
MCBinaryExpr::CreateAdd(LabelSymExpr,
MCConstantExpr::Create(PCAdj, OutContext),
OutContext), OutContext), OutContext);
TmpInst.addOperand(MCOperand::CreateExpr(PCRelExpr));
- } else {
- const MCExpr *RefExpr= ARMMCExpr::CreateUpper16(GVSymExpr, OutContext);
- TmpInst.addOperand(MCOperand::CreateExpr(RefExpr));
- }
// Add predicate operands.
TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
TmpInst.addOperand(MCOperand::CreateReg(0));
// Add 's' bit operand (always reg0 for this)
TmpInst.addOperand(MCOperand::CreateReg(0));
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case ARM::tPICADD: {
@@ -1312,12 +1352,12 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// This adds the address of LPC0 to r0.
// Emit the label.
- OutStreamer.EmitLabel(getPICLabel(MAI->getPrivateGlobalPrefix(),
+ OutStreamer.EmitLabel(getPICLabel(DL->getPrivateGlobalPrefix(),
getFunctionNumber(), MI->getOperand(2).getImm(),
OutContext));
// Form and emit the add.
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tADDhirr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tADDhirr)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(0).getReg())
.addReg(ARM::PC)
@@ -1333,12 +1373,12 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// This adds the address of LPC0 to r0.
// Emit the label.
- OutStreamer.EmitLabel(getPICLabel(MAI->getPrivateGlobalPrefix(),
+ OutStreamer.EmitLabel(getPICLabel(DL->getPrivateGlobalPrefix(),
getFunctionNumber(), MI->getOperand(2).getImm(),
OutContext));
// Form and emit the add.
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::ADDrr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::ADDrr)
.addReg(MI->getOperand(0).getReg())
.addReg(ARM::PC)
.addReg(MI->getOperand(1).getReg())
@@ -1364,7 +1404,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// a PC-relative address at the ldr instruction.
// Emit the label.
- OutStreamer.EmitLabel(getPICLabel(MAI->getPrivateGlobalPrefix(),
+ OutStreamer.EmitLabel(getPICLabel(DL->getPrivateGlobalPrefix(),
getFunctionNumber(), MI->getOperand(2).getImm(),
OutContext));
@@ -1382,7 +1422,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case ARM::PICLDRSB: Opcode = ARM::LDRSB; break;
case ARM::PICLDRSH: Opcode = ARM::LDRSH; break;
}
- OutStreamer.EmitInstruction(MCInstBuilder(Opcode)
+ EmitToStreamer(OutStreamer, MCInstBuilder(Opcode)
.addReg(MI->getOperand(0).getReg())
.addReg(ARM::PC)
.addReg(MI->getOperand(1).getReg())
@@ -1419,7 +1459,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case ARM::t2BR_JT: {
// Lower and emit the instruction itself, then the jump table following it.
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tMOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVr)
.addReg(ARM::PC)
.addReg(MI->getOperand(0).getReg())
// Add predicate operands.
@@ -1432,7 +1472,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case ARM::t2TBB_JT: {
// Lower and emit the instruction itself, then the jump table following it.
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::t2TBB)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::t2TBB)
.addReg(ARM::PC)
.addReg(MI->getOperand(0).getReg())
// Add predicate operands.
@@ -1447,7 +1487,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case ARM::t2TBH_JT: {
// Lower and emit the instruction itself, then the jump table following it.
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::t2TBH)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::t2TBH)
.addReg(ARM::PC)
.addReg(MI->getOperand(0).getReg())
// Add predicate operands.
@@ -1474,7 +1514,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Add 's' bit operand (always reg0 for this)
if (Opc == ARM::MOVr)
TmpInst.addOperand(MCOperand::CreateReg(0));
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
// Make sure the Thumb jump table is 4-byte aligned.
if (Opc == ARM::tMOVr)
@@ -1504,7 +1544,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Add predicate operands.
TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
TmpInst.addOperand(MCOperand::CreateReg(0));
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
// Output the data for the jump table itself
EmitJumpTable(MI);
@@ -1513,7 +1553,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case ARM::BR_JTadd: {
// Lower and emit the instruction itself, then the jump table following it.
// add pc, target, idx
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::ADDrr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::ADDrr)
.addReg(ARM::PC)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
@@ -1530,7 +1570,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case ARM::TRAP: {
// Non-Darwin binutils don't yet support the "trap" mnemonic.
// FIXME: Remove this special case when they do.
- if (!Subtarget->isTargetDarwin()) {
+ if (!Subtarget->isTargetMachO()) {
//.long 0xe7ffdefe @ trap
uint32_t Val = 0xe7ffdefeUL;
OutStreamer.AddComment("trap");
@@ -1549,7 +1589,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case ARM::tTRAP: {
// Non-Darwin binutils don't yet support the "trap" mnemonic.
// FIXME: Remove this special case when they do.
- if (!Subtarget->isTargetDarwin()) {
+ if (!Subtarget->isTargetMachO()) {
//.short 57086 @ trap
uint16_t Val = 0xdefe;
OutStreamer.AddComment("trap");
@@ -1573,14 +1613,14 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
unsigned ValReg = MI->getOperand(1).getReg();
MCSymbol *Label = GetARMSJLJEHLabel();
OutStreamer.AddComment("eh_setjmp begin");
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tMOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVr)
.addReg(ValReg)
.addReg(ARM::PC)
// Predicate.
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tADDi3)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tADDi3)
.addReg(ValReg)
// 's' bit operand
.addReg(ARM::CPSR)
@@ -1590,7 +1630,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tSTRi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tSTRi)
.addReg(ValReg)
.addReg(SrcReg)
// The offset immediate is #4. The operand value is scaled by 4 for the
@@ -1600,7 +1640,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tMOVi8)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVi8)
.addReg(ARM::R0)
.addReg(ARM::CPSR)
.addImm(0)
@@ -1609,13 +1649,13 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addReg(0));
const MCExpr *SymbolExpr = MCSymbolRefExpr::Create(Label, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tB)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tB)
.addExpr(SymbolExpr)
.addImm(ARMCC::AL)
.addReg(0));
OutStreamer.AddComment("eh_setjmp end");
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tMOVi8)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVi8)
.addReg(ARM::R0)
.addReg(ARM::CPSR)
.addImm(1)
@@ -1639,7 +1679,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
unsigned ValReg = MI->getOperand(1).getReg();
OutStreamer.AddComment("eh_setjmp begin");
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::ADDri)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::ADDri)
.addReg(ValReg)
.addReg(ARM::PC)
.addImm(8)
@@ -1649,7 +1689,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// 's' bit operand (always reg0 for this).
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::STRi12)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::STRi12)
.addReg(ValReg)
.addReg(SrcReg)
.addImm(4)
@@ -1657,7 +1697,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::MOVi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::MOVi)
.addReg(ARM::R0)
.addImm(0)
// Predicate.
@@ -1666,7 +1706,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// 's' bit operand (always reg0 for this).
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::ADDri)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::ADDri)
.addReg(ARM::PC)
.addReg(ARM::PC)
.addImm(0)
@@ -1677,7 +1717,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addReg(0));
OutStreamer.AddComment("eh_setjmp end");
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::MOVi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::MOVi)
.addReg(ARM::R0)
.addImm(1)
// Predicate.
@@ -1694,7 +1734,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// bx $scratch
unsigned SrcReg = MI->getOperand(0).getReg();
unsigned ScratchReg = MI->getOperand(1).getReg();
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::LDRi12)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::LDRi12)
.addReg(ARM::SP)
.addReg(SrcReg)
.addImm(8)
@@ -1702,7 +1742,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::LDRi12)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::LDRi12)
.addReg(ScratchReg)
.addReg(SrcReg)
.addImm(4)
@@ -1710,7 +1750,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::LDRi12)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::LDRi12)
.addReg(ARM::R7)
.addReg(SrcReg)
.addImm(0)
@@ -1718,7 +1758,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::BX)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::BX)
.addReg(ScratchReg)
// Predicate.
.addImm(ARMCC::AL)
@@ -1733,7 +1773,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// bx $scratch
unsigned SrcReg = MI->getOperand(0).getReg();
unsigned ScratchReg = MI->getOperand(1).getReg();
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tLDRi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tLDRi)
.addReg(ScratchReg)
.addReg(SrcReg)
// The offset immediate is #8. The operand value is scaled by 4 for the
@@ -1743,14 +1783,14 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tMOVr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tMOVr)
.addReg(ARM::SP)
.addReg(ScratchReg)
// Predicate.
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tLDRi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tLDRi)
.addReg(ScratchReg)
.addReg(SrcReg)
.addImm(1)
@@ -1758,7 +1798,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tLDRi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tLDRi)
.addReg(ARM::R7)
.addReg(SrcReg)
.addImm(0)
@@ -1766,7 +1806,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(ARMCC::AL)
.addReg(0));
- OutStreamer.EmitInstruction(MCInstBuilder(ARM::tBX)
+ EmitToStreamer(OutStreamer, MCInstBuilder(ARM::tBX)
.addReg(ScratchReg)
// Predicate.
.addImm(ARMCC::AL)
@@ -1778,7 +1818,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst TmpInst;
LowerARMMachineInstrToMCInst(MI, TmpInst, *this);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
}
//===----------------------------------------------------------------------===//
@@ -1787,6 +1827,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Force static initialization.
extern "C" void LLVMInitializeARMAsmPrinter() {
- RegisterAsmPrinter<ARMAsmPrinter> X(TheARMTarget);
- RegisterAsmPrinter<ARMAsmPrinter> Y(TheThumbTarget);
+ RegisterAsmPrinter<ARMAsmPrinter> X(TheARMLETarget);
+ RegisterAsmPrinter<ARMAsmPrinter> Y(TheARMBETarget);
+ RegisterAsmPrinter<ARMAsmPrinter> A(TheThumbLETarget);
+ RegisterAsmPrinter<ARMAsmPrinter> B(TheThumbBETarget);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
index de72e06..7c103c6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -10,14 +10,16 @@
#ifndef ARMASMPRINTER_H
#define ARMASMPRINTER_H
-#include "ARM.h"
-#include "ARMTargetMachine.h"
+#include "ARMSubtarget.h"
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetMachine.h"
namespace llvm {
+class ARMFunctionInfo;
class MCOperand;
+class MachineConstantPool;
+class MachineOperand;
namespace ARM {
enum DW_ISA {
@@ -45,37 +47,41 @@ class LLVM_LIBRARY_VISIBILITY ARMAsmPrinter : public AsmPrinter {
bool InConstantPool;
public:
explicit ARMAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), AFI(NULL), MCP(NULL), InConstantPool(false) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
- }
+ : AsmPrinter(TM, Streamer), AFI(nullptr), MCP(nullptr),
+ InConstantPool(false) {
+ Subtarget = &TM.getSubtarget<ARMSubtarget>();
+ }
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "ARM Assembly / Object Emitter";
}
void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
+
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O) LLVM_OVERRIDE;
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O) LLVM_OVERRIDE;
+ void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const override;
void EmitJumpTable(const MachineInstr *MI);
void EmitJump2Table(const MachineInstr *MI);
- virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
- virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE;
+ void EmitInstruction(const MachineInstr *MI) override;
+ bool runOnMachineFunction(MachineFunction &F) override;
- virtual void EmitConstantPool() LLVM_OVERRIDE {
+ void EmitConstantPool() override {
// we emit constant pools customly!
}
- virtual void EmitFunctionBodyEnd() LLVM_OVERRIDE;
- virtual void EmitFunctionEntryLabel() LLVM_OVERRIDE;
- virtual void EmitStartOfAsmFile(Module &M) LLVM_OVERRIDE;
- virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
- virtual void EmitXXStructor(const Constant *CV) LLVM_OVERRIDE;
+ void EmitFunctionBodyEnd() override;
+ void EmitFunctionEntryLabel() override;
+ void EmitStartOfAsmFile(Module &M) override;
+ void EmitEndOfAsmFile(Module &M) override;
+ void EmitXXStructor(const Constant *CV) override;
// lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
@@ -84,9 +90,6 @@ private:
// Helpers for EmitStartOfAsmFile() and EmitEndOfAsmFile()
void emitAttributes();
- // Helper for ELF .o only
- void emitARMAttributeSection();
-
// Generic helper used to emit e.g. ARMv5 mul pseudos
void EmitPatchedInstruction(const MachineInstr *MI, unsigned TargetOpc);
@@ -97,13 +100,9 @@ private:
const MachineInstr *MI);
public:
- /// EmitDwarfRegOp - Emit dwarf register operation.
- virtual void EmitDwarfRegOp(const MachineLocation &MLoc, bool Indirect) const
- LLVM_OVERRIDE;
-
- virtual unsigned getISAEncoding() LLVM_OVERRIDE {
+ unsigned getISAEncoding() override {
// ARM/Darwin adds ISA to the DWARF info for each function.
- if (!Subtarget->isTargetDarwin())
+ if (!Subtarget->isTargetMachO())
return 0;
return Subtarget->isThumb() ?
ARM::DW_ISA_ARM_thumb : ARM::DW_ISA_ARM_arm;
@@ -115,13 +114,12 @@ private:
MCSymbol *GetARMSJLJEHLabel() const;
- MCSymbol *GetARMGVSymbol(const GlobalValue *GV);
+ MCSymbol *GetARMGVSymbol(const GlobalValue *GV, unsigned char TargetFlags);
public:
/// EmitMachineConstantPoolValue - Print a machine constantpool value to
/// the .s file.
- virtual void
- EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) LLVM_OVERRIDE;
+ void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 658af83..0288db9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -32,16 +32,19 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "arm-instrinfo"
+
#define GET_INSTRINFO_CTOR_DTOR
#include "ARMGenInstrInfo.inc"
-using namespace llvm;
-
static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
@@ -100,14 +103,15 @@ ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
// currently defaults to no prepass hazard recognizer.
-ScheduleHazardRecognizer *ARMBaseInstrInfo::
-CreateTargetHazardRecognizer(const TargetMachine *TM,
- const ScheduleDAG *DAG) const {
+ScheduleHazardRecognizer *
+ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+ const ScheduleDAG *DAG) const {
if (usePreRAHazardRecognizer()) {
- const InstrItineraryData *II = TM->getInstrItineraryData();
+ const InstrItineraryData *II =
+ &static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData();
return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
}
- return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG);
+ return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
}
ScheduleHazardRecognizer *ARMBaseInstrInfo::
@@ -125,14 +129,14 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// FIXME: Thumb2 support.
if (!EnableARM3Addr)
- return NULL;
+ return nullptr;
MachineInstr *MI = MBBI;
MachineFunction &MF = *MI->getParent()->getParent();
uint64_t TSFlags = MI->getDesc().TSFlags;
bool isPre = false;
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
- default: return NULL;
+ default: return nullptr;
case ARMII::IndexModePre:
isPre = true;
break;
@@ -144,10 +148,10 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// operation.
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
if (MemOpc == 0)
- return NULL;
+ return nullptr;
- MachineInstr *UpdateMI = NULL;
- MachineInstr *MemMI = NULL;
+ MachineInstr *UpdateMI = nullptr;
+ MachineInstr *MemMI = nullptr;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
@@ -169,7 +173,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (ARM_AM::getSOImmVal(Amt) == -1)
// Can't encode it in a so_imm operand. This transformation will
// add more than 1 instruction. Abandon!
- return NULL;
+ return nullptr;
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
.addReg(BaseReg).addImm(Amt)
@@ -273,8 +277,8 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
- TBB = 0;
- FBB = 0;
+ TBB = nullptr;
+ FBB = nullptr;
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin())
@@ -283,7 +287,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Walk backwards from the end of the basic block until the branch is
// analyzed or we give up.
- while (isPredicated(I) || I->isTerminator()) {
+ while (isPredicated(I) || I->isTerminator() || I->isDebugValue()) {
// Flag to be raised on unanalyzeable instructions. This is useful in cases
// where we want to clean up on the end of the basic block before we bail
@@ -331,12 +335,12 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
I->isReturn())) {
// Forget any previous condition branch information - it no longer applies.
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// If we can modify the function, delete everything below this
// unconditional branch.
if (AllowModify) {
- MachineBasicBlock::iterator DI = llvm::next(I);
+ MachineBasicBlock::iterator DI = std::next(I);
while (DI != MBB.end()) {
MachineInstr *InstToDelete = DI;
++DI;
@@ -405,7 +409,7 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
assert((Cond.size() == 2 || Cond.size() == 0) &&
"ARM branch conditions have two components!");
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) { // Unconditional branch?
if (isThumb)
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0);
@@ -535,6 +539,22 @@ bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
return true;
}
+namespace llvm {
+template <> bool IsCPSRDead<MachineInstr>(MachineInstr *MI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isUndef() || MO.isUse())
+ continue;
+ if (MO.getReg() != ARM::CPSR)
+ continue;
+ if (!MO.isDead())
+ return false;
+ }
+ // all definitions of CPSR are dead
+ return true;
+}
+}
+
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
LLVM_ATTRIBUTE_NOINLINE
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
@@ -559,15 +579,10 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
// If this machine instr is an inline asm, measure it.
if (MI->getOpcode() == ARM::INLINEASM)
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
- if (MI->isLabel())
- return 0;
unsigned Opc = MI->getOpcode();
switch (Opc) {
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case TargetOpcode::PROLOG_LABEL:
- case TargetOpcode::EH_LABEL:
- case TargetOpcode::DBG_VALUE:
+ default:
+ // pseudo-instruction sizes are zero.
return 0;
case TargetOpcode::BUNDLE:
return getInstBundleLength(MI);
@@ -611,7 +626,7 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
unsigned JTI = JTOP.getIndex();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- assert(MJTI != 0);
+ assert(MJTI != nullptr);
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
assert(JTI < JT.size());
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
@@ -630,9 +645,6 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
++NumEntries;
return NumEntries * EntrySize + InstSize;
}
- default:
- // Otherwise, pseudo-instruction sizes are zero.
- return 0;
}
}
@@ -1242,7 +1254,8 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
unsigned PCLabelId = AFI->createPICLabelUId();
- ARMConstantPoolValue *NewCPV = 0;
+ ARMConstantPoolValue *NewCPV = nullptr;
+
// FIXME: The below assumes PIC relocation model and that the function
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
// zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
@@ -1325,10 +1338,11 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
Opcode == ARM::t2LDRpci_pic ||
Opcode == ARM::tLDRpci ||
Opcode == ARM::tLDRpci_pic ||
- Opcode == ARM::MOV_ga_dyn ||
+ Opcode == ARM::LDRLIT_ga_pcrel ||
+ Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
+ Opcode == ARM::tLDRLIT_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel_ldr ||
- Opcode == ARM::t2MOV_ga_dyn ||
Opcode == ARM::t2MOV_ga_pcrel) {
if (MI1->getOpcode() != Opcode)
return false;
@@ -1340,10 +1354,11 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
if (MO0.getOffset() != MO1.getOffset())
return false;
- if (Opcode == ARM::MOV_ga_dyn ||
+ if (Opcode == ARM::LDRLIT_ga_pcrel ||
+ Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
+ Opcode == ARM::tLDRLIT_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel_ldr ||
- Opcode == ARM::t2MOV_ga_dyn ||
Opcode == ARM::t2MOV_ga_pcrel)
// Ignore the PC labels.
return MO0.getGlobal() == MO1.getGlobal();
@@ -1534,7 +1549,7 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
// Terminators and labels can't be scheduled around.
- if (MI->isTerminator() || MI->isLabel())
+ if (MI->isTerminator() || MI->isPosition())
return true;
// Treat the start of the IT block as a scheduling boundary, but schedule
@@ -1650,10 +1665,10 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
// MOVCC AL can't be inverted. Shouldn't happen.
if (CC == ARMCC::AL || PredReg != ARM::CPSR)
- return NULL;
+ return nullptr;
MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
if (!MI)
- return NULL;
+ return nullptr;
// After swapping the MOVCC operands, also invert the condition.
MI->getOperand(MI->findFirstPredOperandIdx())
.setImm(ARMCC::getOppositeCondition(CC));
@@ -1669,35 +1684,36 @@ static MachineInstr *canFoldIntoMOVCC(unsigned Reg,
const MachineRegisterInfo &MRI,
const TargetInstrInfo *TII) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
- return 0;
+ return nullptr;
if (!MRI.hasOneNonDBGUse(Reg))
- return 0;
+ return nullptr;
MachineInstr *MI = MRI.getVRegDef(Reg);
if (!MI)
- return 0;
+ return nullptr;
// MI is folded into the MOVCC by predicating it.
if (!MI->isPredicable())
- return 0;
+ return nullptr;
// Check if MI has any non-dead defs or physreg uses. This also detects
// predicated instructions which will be reading CPSR.
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
// Reject frame index operands, PEI can't handle the predicated pseudos.
if (MO.isFI() || MO.isCPI() || MO.isJTI())
- return 0;
+ return nullptr;
if (!MO.isReg())
continue;
// MI can't have any tied operands, that would conflict with predication.
if (MO.isTied())
- return 0;
+ return nullptr;
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
- return 0;
+ return nullptr;
if (MO.isDef() && !MO.isDead())
- return 0;
+ return nullptr;
}
bool DontMoveAcrossStores = true;
- if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores))
- return 0;
+ if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ nullptr,
+ DontMoveAcrossStores))
+ return nullptr;
return MI;
}
@@ -1732,14 +1748,14 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
if (!DefMI)
DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this);
if (!DefMI)
- return 0;
+ return nullptr;
// Find new register class to use.
MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
unsigned DestReg = MI->getOperand(0).getReg();
const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
if (!MRI.constrainRegClass(DestReg, PreviousClass))
- return 0;
+ return nullptr;
// Create a new predicated version of DefMI.
// Rfalse is the first use.
@@ -1857,12 +1873,22 @@ void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
}
}
-bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
- MachineInstr *MI,
+static bool isAnySubRegLive(unsigned Reg, const TargetRegisterInfo *TRI,
+ MachineInstr *MI) {
+ for (MCSubRegIterator Subreg(Reg, TRI, /* IncludeSelf */ true);
+ Subreg.isValid(); ++Subreg)
+ if (MI->getParent()->computeRegisterLiveness(TRI, *Subreg, MI) !=
+ MachineBasicBlock::LQR_Dead)
+ return true;
+ return false;
+}
+bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
+ MachineFunction &MF, MachineInstr *MI,
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize))
+ if (!MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize))
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@@ -1911,7 +1937,6 @@ bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
RegList.push_back(MI->getOperand(i));
- MachineBasicBlock *MBB = MI->getParent();
const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
@@ -1932,9 +1957,11 @@ bool llvm::tryFoldSPUpdateIntoPushPop(MachineFunction &MF,
// registers live within the function we might clobber a return value
// register; the other way a register can be live here is if it's
// callee-saved.
+ // TODO: Currently, computeRegisterLiveness() does not report "live" if a
+ // sub reg is live. When computeRegisterLiveness() works for sub reg, it
+ // can replace isAnySubRegLive().
if (isCalleeSavedRegister(CurReg, CSRegs) ||
- MBB->computeRegisterLiveness(TRI, CurReg, MI) !=
- MachineBasicBlock::LQR_Dead) {
+ isAnySubRegLive(CurReg, TRI, MI)) {
// VFP pops don't allow holes in the register list, so any skip is fatal
// for our transformation. GPR pops do, so we should just keep looking.
if (IsVFPPushPop)
@@ -2159,7 +2186,7 @@ static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
// Walk down one instruction which is potentially an 'and'.
const MachineInstr &Copy = *MI;
MachineBasicBlock::iterator AND(
- llvm::next(MachineBasicBlock::iterator(MI)));
+ std::next(MachineBasicBlock::iterator(MI)));
if (AND == MI->getParent()->end()) return false;
MI = AND;
return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
@@ -2235,9 +2262,10 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// Masked compares sometimes use the same register as the corresponding 'and'.
if (CmpMask != ~0) {
if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) {
- MI = 0;
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
- UE = MRI->use_end(); UI != UE; ++UI) {
+ MI = nullptr;
+ for (MachineRegisterInfo::use_instr_iterator
+ UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
+ UI != UE; ++UI) {
if (UI->getParent() != CmpInstr->getParent()) continue;
MachineInstr *PotentialAND = &*UI;
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
@@ -2261,17 +2289,17 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// One is MI, the other is a SUB instruction.
// For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
// For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
- MachineInstr *Sub = NULL;
+ MachineInstr *Sub = nullptr;
if (SrcReg2 != 0)
// MI is not a candidate for CMPrr.
- MI = NULL;
+ MI = nullptr;
else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
// Conservatively refuse to convert an instruction which isn't in the same
// BB as the comparison.
// For CMPri, we need to check Sub, thus we can't return here.
if (CmpInstr->getOpcode() == ARM::CMPri ||
CmpInstr->getOpcode() == ARM::t2CMPri)
- MI = NULL;
+ MI = nullptr;
else
return false;
}
@@ -2947,7 +2975,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
break;
}
return UOps;
- } else if (Subtarget.isCortexA8()) {
+ } else if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
if (NumRegs < 4)
return 2;
// 4 registers would be issued: 2, 2.
@@ -2984,7 +3012,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(DefClass, DefIdx);
int DefCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
// (regno / 2) + (regno % 2) + 1
DefCycle = RegNo / 2 + 1;
if (RegNo % 2)
@@ -3025,7 +3053,7 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(DefClass, DefIdx);
int DefCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
// 4 registers would be issued: 1, 2, 1.
// 5 registers would be issued: 1, 2, 2.
DefCycle = RegNo / 2;
@@ -3059,7 +3087,7 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(UseClass, UseIdx);
int UseCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
// (regno / 2) + (regno % 2) + 1
UseCycle = RegNo / 2 + 1;
if (RegNo % 2)
@@ -3099,7 +3127,7 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
return ItinData->getOperandCycle(UseClass, UseIdx);
int UseCycle;
- if (Subtarget.isCortexA8()) {
+ if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
UseCycle = RegNo / 2;
if (UseCycle < 2)
UseCycle = 2;
@@ -3236,8 +3264,7 @@ static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
Dist = 0;
MachineBasicBlock::const_iterator I = MI; ++I;
- MachineBasicBlock::const_instr_iterator II =
- llvm::prior(I.getInstrIterator());
+ MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
assert(II->isInsideBundle() && "Empty bundle?");
int Idx = -1;
@@ -3276,7 +3303,7 @@ static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
if (Idx == -1) {
Dist = 0;
- return 0;
+ return nullptr;
}
UseIdx = Idx;
@@ -3290,7 +3317,7 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
const MachineInstr *DefMI,
const MCInstrDesc *DefMCID, unsigned DefAlign) {
int Adjust = 0;
- if (Subtarget.isCortexA8() || Subtarget.isLikeA9()) {
+ if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
@@ -3591,7 +3618,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
UseMCID, UseIdx, UseAlign);
if (Latency > 1 &&
- (Subtarget.isCortexA8() || Subtarget.isLikeA9())) {
+ (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
+ Subtarget.isCortexA7())) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefMCID.getOpcode()) {
@@ -4333,6 +4361,29 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI,
MI->addRegisterKilled(DReg, TRI, true);
}
+void ARMBaseInstrInfo::getUnconditionalBranch(
+ MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
+ if (Subtarget.isThumb())
+ Branch.setOpcode(ARM::tB);
+ else if (Subtarget.isThumb2())
+ Branch.setOpcode(ARM::t2B);
+ else
+ Branch.setOpcode(ARM::Bcc);
+
+ Branch.addOperand(MCOperand::CreateExpr(BranchTarget));
+ Branch.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ Branch.addOperand(MCOperand::CreateReg(0));
+}
+
+void ARMBaseInstrInfo::getTrap(MCInst &MI) const {
+ if (Subtarget.isThumb())
+ MI.setOpcode(ARM::tTRAP);
+ else if (Subtarget.useNaClTrap())
+ MI.setOpcode(ARM::TRAPNaCl);
+ else
+ MI.setOpcode(ARM::TRAP);
+}
+
bool ARMBaseInstrInfo::hasNOP() const {
return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 93e5964..b8d6758 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -14,7 +14,7 @@
#ifndef ARMBASEINSTRUCTIONINFO_H
#define ARMBASEINSTRUCTIONINFO_H
-#include "ARM.h"
+#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -42,37 +42,37 @@ public:
// if there is not such an opcode.
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0;
- virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
+ MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const override;
virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
const ARMSubtarget &getSubtarget() const { return Subtarget; }
ScheduleHazardRecognizer *
- CreateTargetHazardRecognizer(const TargetMachine *TM,
- const ScheduleDAG *DAG) const;
+ CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+ const ScheduleDAG *DAG) const override;
ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
- const ScheduleDAG *DAG) const;
+ const ScheduleDAG *DAG) const override;
// Branch analysis.
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify = false) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
+
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
// Predication support.
- bool isPredicated(const MachineInstr *MI) const;
+ bool isPredicated(const MachineInstr *MI) const override;
ARMCC::CondCodes getPredicate(const MachineInstr *MI) const {
int PIdx = MI->findFirstPredOperandIdx();
@@ -80,76 +80,73 @@ public:
: ARMCC::AL;
}
- virtual
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const;
+ const SmallVectorImpl<MachineOperand> &Pred) const override;
- virtual
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
+ const SmallVectorImpl<MachineOperand> &Pred2) const override;
- virtual bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
+ bool DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const override;
- virtual bool isPredicable(MachineInstr *MI) const;
+ bool isPredicable(MachineInstr *MI) const override;
/// GetInstSize - Returns the size of the specified MachineInstr.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
- virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
- virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const override;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
- virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
- virtual void reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
- const TargetRegisterInfo &TRI) const;
+ void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ unsigned DestReg, unsigned SubIdx,
+ const MachineInstr *Orig,
+ const TargetRegisterInfo &TRI) const override;
- MachineInstr *duplicate(MachineInstr *Orig, MachineFunction &MF) const;
+ MachineInstr *duplicate(MachineInstr *Orig,
+ MachineFunction &MF) const override;
- MachineInstr *commuteInstruction(MachineInstr*, bool=false) const;
+ MachineInstr *commuteInstruction(MachineInstr*,
+ bool=false) const override;
const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
unsigned SubIdx, unsigned State,
const TargetRegisterInfo *TRI) const;
- virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1,
- const MachineRegisterInfo *MRI) const;
+ bool produceSameValue(const MachineInstr *MI0, const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI) const override;
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
/// determine if two loads are loading from the same base address. It should
/// only return true if the base pointers are the same and the only
/// differences between the two addresses is the offset. It also returns the
/// offsets by reference.
- virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
- int64_t &Offset1, int64_t &Offset2)const;
+ bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
+ int64_t &Offset2) const override;
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads
@@ -159,83 +156,86 @@ public:
/// from the common base address. It returns true if it decides it's desirable
/// to schedule the two loads together. "NumLoads" is the number of loads that
/// have already been scheduled after Load1.
- virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
- int64_t Offset1, int64_t Offset2,
- unsigned NumLoads) const;
-
- virtual bool isSchedulingBoundary(const MachineInstr *MI,
- const MachineBasicBlock *MBB,
- const MachineFunction &MF) const;
-
- virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumCycles, unsigned ExtraPredCycles,
- const BranchProbability &Probability) const;
-
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
- unsigned NumT, unsigned ExtraT,
- MachineBasicBlock &FMBB,
- unsigned NumF, unsigned ExtraF,
- const BranchProbability &Probability) const;
-
- virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
- unsigned NumCycles,
- const BranchProbability
- &Probability) const {
+ bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const override;
+
+ bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const override;
+
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCycles, unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const override;
+
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
+ unsigned ExtraT, MachineBasicBlock &FMBB,
+ unsigned NumF, unsigned ExtraF,
+ const BranchProbability &Probability) const override;
+
+ bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ const BranchProbability &Probability) const override {
return NumCycles == 1;
}
- virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
- MachineBasicBlock &FMBB) const;
+ bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+ MachineBasicBlock &FMBB) const override;
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
- unsigned &SrcReg2, int &CmpMask,
- int &CmpValue) const;
+ bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const override;
/// optimizeCompareInstr - Convert the instruction to set the zero flag so
/// that we can remove a "comparison with zero"; Remove a redundant CMP
/// instruction if the flags can be updated in the same way by an earlier
/// instruction such as SUB.
- virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
- unsigned SrcReg2, int CmpMask, int CmpValue,
- const MachineRegisterInfo *MRI) const;
+ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const override;
- virtual bool analyzeSelect(const MachineInstr *MI,
- SmallVectorImpl<MachineOperand> &Cond,
- unsigned &TrueOp, unsigned &FalseOp,
- bool &Optimizable) const;
+ bool analyzeSelect(const MachineInstr *MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const override;
- virtual MachineInstr *optimizeSelect(MachineInstr *MI, bool) const;
+ MachineInstr *optimizeSelect(MachineInstr *MI, bool) const override;
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
- virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
- unsigned Reg, MachineRegisterInfo *MRI) const;
+ bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const override;
- virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr *MI) const;
+ unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const override;
- virtual
int getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const;
- virtual
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
- SDNode *UseNode, unsigned UseIdx) const;
+ SDNode *UseNode, unsigned UseIdx) const override;
/// VFP/NEON execution domains.
std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr *MI) const;
- void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
+ getExecutionDomain(const MachineInstr *MI) const override;
+ void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
unsigned getPartialRegUpdateClearance(const MachineInstr*, unsigned,
- const TargetRegisterInfo*) const;
+ const TargetRegisterInfo*) const override;
void breakPartialRegDependency(MachineBasicBlock::iterator, unsigned,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
+
+ void
+ getUnconditionalBranch(MCInst &Branch,
+ const MCSymbolRefExpr *BranchTarget) const override;
+
+ void getTrap(MCInst &MI) const override;
+
/// Get the number of addresses by LDM or VLDM or zero for unknown.
unsigned getNumLDMAddresses(const MachineInstr *MI) const;
@@ -264,24 +264,27 @@ private:
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
- unsigned getPredicationCost(const MachineInstr *MI) const;
+ unsigned getPredicationCost(const MachineInstr *MI) const override;
unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
- unsigned *PredCost = 0) const;
+ unsigned *PredCost = nullptr) const override;
int getInstrLatency(const InstrItineraryData *ItinData,
- SDNode *Node) const;
+ SDNode *Node) const override;
bool hasHighOperandLatency(const InstrItineraryData *ItinData,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const;
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const override;
bool hasLowDefLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx) const;
+ const MachineInstr *DefMI,
+ unsigned DefIdx) const override;
/// verifyInstruction - Perform target specific instruction verification.
- bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const;
+ bool verifyInstruction(const MachineInstr *MI,
+ StringRef &ErrInfo) const override;
private:
/// Modeling special VFP / NEON fp MLA / MLS hazards.
@@ -417,7 +420,8 @@ void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
/// NumBytes. This can save a few bytes per function in code-size, but
/// obviously generates more memory traffic. As such, it only takes
/// effect in functions being optimised for size.
-bool tryFoldSPUpdateIntoPushPop(MachineFunction &MF, MachineInstr *MI,
+bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
+ MachineFunction &MF, MachineInstr *MI,
unsigned NumBytes);
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 8717dc0..32b5f4a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -38,20 +38,29 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#define DEBUG_TYPE "arm-register-info"
+
#define GET_REGINFO_TARGET_DESC
#include "ARMGenRegisterInfo.inc"
using namespace llvm;
ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMSubtarget &sti)
- : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), STI(sti),
- FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
- BasePtr(ARM::R6) {
+ : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), STI(sti), BasePtr(ARM::R6) {
+ if (STI.isTargetMachO()) {
+ if (STI.isTargetDarwin() || STI.isThumb1Only())
+ FramePtr = ARM::R7;
+ else
+ FramePtr = ARM::R11;
+ } else if (STI.isTargetWindows())
+ FramePtr = ARM::R11;
+ else // ARM EABI
+ FramePtr = STI.isThumb() ? ARM::R7 : ARM::R11;
}
-const uint16_t*
+const MCPhysReg*
ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- const uint16_t *RegList = (STI.isTargetIOS() && !STI.isAAPCS_ABI())
+ const MCPhysReg *RegList = (STI.isTargetIOS() && !STI.isAAPCS_ABI())
? CSR_iOS_SaveList
: CSR_AAPCS_SaveList;
@@ -107,7 +116,7 @@ ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const {
// should return NULL
if (CC == CallingConv::GHC)
// This is academic becase all GHC calls are (supposed to be) tail calls
- return NULL;
+ return nullptr;
return (STI.isTargetIOS() && !STI.isAAPCS_ABI())
? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask;
}
@@ -173,7 +182,7 @@ ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind
const TargetRegisterClass *
ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &ARM::CCRRegClass)
- return 0; // Can't copy CCR registers.
+ return nullptr; // Can't copy CCR registers.
return RC;
}
@@ -408,6 +417,11 @@ emitLoadConstPool(MachineBasicBlock &MBB,
.setMIFlags(MIFlags);
}
+bool ARMBaseRegisterInfo::mayOverrideLocalAssignment() const {
+ // The native linux build hits a downstream codegen bug when this is enabled.
+ return STI.isTargetDarwin();
+}
+
bool ARMBaseRegisterInfo::
requiresRegisterScavenging(const MachineFunction &MF) const {
return true;
@@ -590,10 +604,8 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
AddDefaultCC(MIB);
}
-void
-ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
- unsigned BaseReg, int64_t Offset) const {
- MachineInstr &MI = *I;
+void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const {
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const ARMBaseInstrInfo &TII =
@@ -765,3 +777,60 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
}
}
+
+bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI,
+ const TargetRegisterClass *SrcRC,
+ unsigned SubReg,
+ const TargetRegisterClass *DstRC,
+ unsigned DstSubReg,
+ const TargetRegisterClass *NewRC) const {
+ auto MBB = MI->getParent();
+ auto MF = MBB->getParent();
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+ // If not copying into a sub-register this should be ok because we shouldn't
+ // need to split the reg.
+ if (!DstSubReg)
+ return true;
+ // Small registers don't frequently cause a problem, so we can coalesce them.
+ if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32)
+ return true;
+
+ auto NewRCWeight =
+ MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
+ auto SrcRCWeight =
+ MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
+ auto DstRCWeight =
+ MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
+ // If the source register class is more expensive than the destination, the
+ // coalescing is probably profitable.
+ if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
+ return true;
+ if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
+ return true;
+
+ // If the register allocator isn't constrained, we can always allow coalescing
+ // unfortunately we don't know yet if we will be constrained.
+ // The goal of this heuristic is to restrict how many expensive registers
+ // we allow to coalesce in a given basic block.
+ auto AFI = MF->getInfo<ARMFunctionInfo>();
+ auto It = AFI->getCoalescedWeight(MBB);
+
+ DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
+ << It->second << "\n");
+ DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
+ << NewRCWeight.RegWeight << "\n");
+
+ // This number is the largest round number that which meets the criteria:
+ // (1) addresses PR18825
+ // (2) generates better code in some test cases (like vldm-shed-a9.ll)
+ // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
+ // In practice the SizeMultiplier will only factor in for straight line code
+ // that uses a lot of NEON vectors, which isn't terribly common.
+ unsigned SizeMultiplier = MBB->size()/100;
+ SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
+ if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
+ It->second += NewRCWeight.RegWeight;
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index e28fff6..833d3f2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -14,7 +14,7 @@
#ifndef ARMBASEREGISTERINFO_H
#define ARMBASEREGISTERINFO_H
-#include "ARM.h"
+#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#define GET_REGINFO_HEADER
@@ -42,7 +42,7 @@ static inline bool isARMArea1Register(unsigned Reg, bool isIOS) {
case R4: case R5: case R6: case R7:
case LR: case SP: case PC:
return true;
- case R8: case R9: case R10: case R11:
+ case R8: case R9: case R10: case R11: case R12:
// For iOS we want r7 and lr to be next to each other.
return !isIOS;
default:
@@ -53,7 +53,7 @@ static inline bool isARMArea1Register(unsigned Reg, bool isIOS) {
static inline bool isARMArea2Register(unsigned Reg, bool isIOS) {
using namespace ARM;
switch (Reg) {
- case R8: case R9: case R10: case R11:
+ case R8: case R9: case R10: case R11: case R12:
// iOS has this second area.
return isIOS;
default:
@@ -100,8 +100,9 @@ protected:
public:
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
const uint32_t *getNoPreservedMask() const;
/// getThisReturnPreservedMask - Returns a call preserved mask specific to the
@@ -113,48 +114,51 @@ public:
/// Should return NULL in the case that the calling convention does not have
/// this property
const uint32_t *getThisReturnPreservedMask(CallingConv::ID) const;
-
- BitVector getReservedRegs(const MachineFunction &MF) const;
- const TargetRegisterClass*
- getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
- const TargetRegisterClass*
- getCrossCopyRegClass(const TargetRegisterClass *RC) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
- const TargetRegisterClass*
- getLargestLegalSuperClass(const TargetRegisterClass *RC) const;
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
+ const TargetRegisterClass *
+ getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
+
+ const TargetRegisterClass *
+ getLargestLegalSuperClass(const TargetRegisterClass *RC) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const;
+ MachineFunction &MF) const override;
void getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
- const VirtRegMap *VRM) const;
+ const VirtRegMap *VRM) const override;
void UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
- MachineFunction &MF) const;
+ MachineFunction &MF) const override;
- virtual bool avoidWriteAfterWrite(const TargetRegisterClass *RC) const;
+ bool avoidWriteAfterWrite(const TargetRegisterClass *RC) const override;
bool hasBasePointer(const MachineFunction &MF) const;
bool canRealignStack(const MachineFunction &MF) const;
- bool needsStackRealignment(const MachineFunction &MF) const;
- int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const;
- bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const;
+ bool needsStackRealignment(const MachineFunction &MF) const override;
+ int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const override;
+ bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
- int64_t Offset) const;
- void resolveFrameIndex(MachineBasicBlock::iterator I,
- unsigned BaseReg, int64_t Offset) const;
- bool isFrameOffsetLegal(const MachineInstr *MI, int64_t Offset) const;
+ int64_t Offset) const override;
+ void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const override;
+ bool isFrameOffsetLegal(const MachineInstr *MI,
+ int64_t Offset) const override;
bool cannotEliminateFrame(const MachineFunction &MF) const;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
unsigned getBaseRegister() const { return BasePtr; }
bool isLowRegister(unsigned Reg) const;
@@ -164,25 +168,33 @@ public:
/// specified immediate.
virtual void emitLoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx,
- int Val,
- ARMCC::CondCodes Pred = ARMCC::AL,
+ DebugLoc dl, unsigned DestReg, unsigned SubIdx,
+ int Val, ARMCC::CondCodes Pred = ARMCC::AL,
unsigned PredReg = 0,
unsigned MIFlags = MachineInstr::NoFlags)const;
/// Code Generation virtual methods...
- virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool mayOverrideLocalAssignment() const override;
+
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
+
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
- virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
- virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
+ bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override;
- virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const;
+ void eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
- virtual void eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true
+ bool shouldCoalesce(MachineInstr *MI,
+ const TargetRegisterClass *SrcRC,
+ unsigned SubReg,
+ const TargetRegisterClass *DstRC,
+ unsigned DstSubReg,
+ const TargetRegisterClass *NewRC) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h b/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
deleted file mode 100644
index b16d4ef..0000000
--- a/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
+++ /dev/null
@@ -1,170 +0,0 @@
-//===-- ARMBuildAttrs.h - ARM Build Attributes ------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains enumerations and support routines for ARM build attributes
-// as defined in ARM ABI addenda document (ABI release 2.08).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef __TARGET_ARMBUILDATTRS_H__
-#define __TARGET_ARMBUILDATTRS_H__
-
-namespace llvm {
-namespace ARMBuildAttrs {
-
- enum SpecialAttr {
- // This is for the .cpu asm attr. It translates into one or more
- // AttrType (below) entries in the .ARM.attributes section in the ELF.
- SEL_CPU
- };
-
- enum AttrType {
- // Rest correspond to ELF/.ARM.attributes
- File = 1,
- Section = 2,
- Symbol = 3,
- CPU_raw_name = 4,
- CPU_name = 5,
- CPU_arch = 6,
- CPU_arch_profile = 7,
- ARM_ISA_use = 8,
- THUMB_ISA_use = 9,
- VFP_arch = 10,
- WMMX_arch = 11,
- Advanced_SIMD_arch = 12,
- PCS_config = 13,
- ABI_PCS_R9_use = 14,
- ABI_PCS_RW_data = 15,
- ABI_PCS_RO_data = 16,
- ABI_PCS_GOT_use = 17,
- ABI_PCS_wchar_t = 18,
- ABI_FP_rounding = 19,
- ABI_FP_denormal = 20,
- ABI_FP_exceptions = 21,
- ABI_FP_user_exceptions = 22,
- ABI_FP_number_model = 23,
- ABI_align8_needed = 24,
- ABI_align8_preserved = 25,
- ABI_enum_size = 26,
- ABI_HardFP_use = 27,
- ABI_VFP_args = 28,
- ABI_WMMX_args = 29,
- ABI_optimization_goals = 30,
- ABI_FP_optimization_goals = 31,
- compatibility = 32,
- CPU_unaligned_access = 34,
- FP_HP_extension = 36,
- ABI_FP_16bit_format = 38,
- MPextension_use = 42, // was 70, 2.08 ABI
- DIV_use = 44,
- nodefaults = 64,
- also_compatible_with = 65,
- T2EE_use = 66,
- conformance = 67,
- Virtualization_use = 68,
- MPextension_use_old = 70
- };
-
- // Magic numbers for .ARM.attributes
- enum AttrMagic {
- Format_Version = 0x41
- };
-
- // Legal Values for CPU_arch, (=6), uleb128
- enum CPUArch {
- Pre_v4 = 0,
- v4 = 1, // e.g. SA110
- v4T = 2, // e.g. ARM7TDMI
- v5T = 3, // e.g. ARM9TDMI
- v5TE = 4, // e.g. ARM946E_S
- v5TEJ = 5, // e.g. ARM926EJ_S
- v6 = 6, // e.g. ARM1136J_S
- v6KZ = 7, // e.g. ARM1176JZ_S
- v6T2 = 8, // e.g. ARM1156T2F_S
- v6K = 9, // e.g. ARM1136J_S
- v7 = 10, // e.g. Cortex A8, Cortex M3
- v6_M = 11, // e.g. Cortex M1
- v6S_M = 12, // v6_M with the System extensions
- v7E_M = 13, // v7_M with DSP extensions
- v8 = 14 // v8, AArch32
- };
-
- enum CPUArchProfile { // (=7), uleb128
- Not_Applicable = 0, // pre v7, or cross-profile code
- ApplicationProfile = (0x41), // 'A' (e.g. for Cortex A8)
- RealTimeProfile = (0x52), // 'R' (e.g. for Cortex R4)
- MicroControllerProfile = (0x4D), // 'M' (e.g. for Cortex M3)
- SystemProfile = (0x53) // 'S' Application or real-time profile
- };
-
- // The following have a lot of common use cases
- enum {
- Not_Allowed = 0,
- Allowed = 1,
-
- // Tag_ARM_ISA_use (=8), uleb128
-
- // Tag_THUMB_ISA_use, (=9), uleb128
- AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
-
- // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
- AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA)
- AllowFPv3A = 3, // v3 FP ISA permitted (implies use of the v2 FP ISA)
- AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31
- AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA)
- AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31
- AllowFPARMv8A = 7, // Use of the ARM v8-A FP ISA was permitted
- AllowFPARMv8B = 8, // Use of the ARM v8-A FP ISA was permitted, but only D0-D15, S0-S31
-
- // Tag_WMMX_arch, (=11), uleb128
- AllowWMMXv1 = 1, // The user permitted this entity to use WMMX v1
- AllowWMMXv2 = 2, // The user permitted this entity to use WMMX v2
-
- // Tag_Advanced_SIMD_arch, (=12), uleb128
- AllowNeon = 1, // SIMDv1 was permitted
- AllowNeon2 = 2, // SIMDv2 was permitted (Half-precision FP, MAC operations)
- AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted
-
- // Tag_ABI_FP_denormal, (=20), uleb128
- PreserveFPSign = 2, // sign when flushed-to-zero is preserved
-
- // Tag_ABI_FP_number_model, (=23), uleb128
- AllowRTABI = 2, // numbers, infinities, and one quiet NaN (see [RTABI])
- AllowIEE754 = 3, // this code to use all the IEEE 754-defined FP encodings
-
- // Tag_ABI_HardFP_use, (=27), uleb128
- HardFPImplied = 0, // FP use should be implied by Tag_FP_arch
- HardFPSinglePrecision = 1, // Single-precision only
-
- // Tag_ABI_VFP_args, (=28), uleb128
- BaseAAPCS = 0,
- HardFPAAPCS = 1,
-
- // Tag_FP_HP_extension, (=36), uleb128
- AllowHPFP = 1, // Allow use of Half Precision FP
-
- // Tag_MPextension_use, (=42), uleb128
- AllowMP = 1, // Allow use of MP extensions
-
- // Tag_DIV_use, (=44), uleb128
- AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no info exists.
- DisallowDIV = 1, // Hardware divide explicitly disallowed
- AllowDIVExt = 2, // Allow hardware divide as optional architecture extension above
- // the base arch specified by Tag_CPU_arch and Tag_CPU_arch_profile.
-
- // Tag_Virtualization_use, (=68), uleb128
- AllowTZ = 1,
- AllowVirtualization = 2,
- AllowTZVirtualization = 3
- };
-
-} // namespace ARMBuildAttrs
-} // namespace llvm
-
-#endif // __TARGET_ARMBUILDATTRS_H__
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
index 4f94ad2..dc41c1c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
@@ -28,7 +28,7 @@ namespace llvm {
static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
CCState &State, bool CanFail) {
- static const uint16_t RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+ static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
// Try to get the first register.
if (unsigned Reg = State.AllocateReg(RegList, 4))
@@ -71,10 +71,10 @@ static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
CCState &State, bool CanFail) {
- static const uint16_t HiRegList[] = { ARM::R0, ARM::R2 };
- static const uint16_t LoRegList[] = { ARM::R1, ARM::R3 };
- static const uint16_t ShadowRegList[] = { ARM::R0, ARM::R1 };
- static const uint16_t GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+ static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
+ static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
+ static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 };
+ static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2);
if (Reg == 0) {
@@ -123,8 +123,8 @@ static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo, CCState &State) {
- static const uint16_t HiRegList[] = { ARM::R0, ARM::R2 };
- static const uint16_t LoRegList[] = { ARM::R1, ARM::R3 };
+ static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
+ static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
if (Reg == 0)
@@ -160,6 +160,105 @@ static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
State);
}
+static const uint16_t SRegList[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3,
+ ARM::S4, ARM::S5, ARM::S6, ARM::S7,
+ ARM::S8, ARM::S9, ARM::S10, ARM::S11,
+ ARM::S12, ARM::S13, ARM::S14, ARM::S15 };
+static const uint16_t DRegList[] = { ARM::D0, ARM::D1, ARM::D2, ARM::D3,
+ ARM::D4, ARM::D5, ARM::D6, ARM::D7 };
+static const uint16_t QRegList[] = { ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3 };
+
+// Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA
+// has InConsecutiveRegs set, and that the last member also has
+// InConsecutiveRegsLast set. We must process all members of the HA before
+// we can allocate it, as we need to know the total number of registers that
+// will be needed in order to (attempt to) allocate a contiguous block.
+static bool CC_ARM_AAPCS_Custom_HA(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ SmallVectorImpl<CCValAssign> &PendingHAMembers = State.getPendingLocs();
+ // AAPCS HFAs must have 1-4 elements, all of the same type
+ assert(PendingHAMembers.size() < 8);
+ if (PendingHAMembers.size() > 0)
+ assert(PendingHAMembers[0].getLocVT() == LocVT);
+
+ // Add the argument to the list to be allocated once we know the size of the
+ // HA
+ PendingHAMembers.push_back(
+ CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
+
+ if (ArgFlags.isInConsecutiveRegsLast()) {
+ assert(PendingHAMembers.size() > 0 && PendingHAMembers.size() <= 8 &&
+ "Homogeneous aggregates must have between 1 and 4 members");
+
+ // Try to allocate a contiguous block of registers, each of the correct
+ // size to hold one member.
+ const uint16_t *RegList;
+ unsigned NumRegs;
+ switch (LocVT.SimpleTy) {
+ case MVT::i32:
+ case MVT::f32:
+ RegList = SRegList;
+ NumRegs = 16;
+ break;
+ case MVT::f64:
+ RegList = DRegList;
+ NumRegs = 8;
+ break;
+ case MVT::v2f64:
+ RegList = QRegList;
+ NumRegs = 4;
+ break;
+ default:
+ llvm_unreachable("Unexpected member type for HA");
+ break;
+ }
+
+ unsigned RegResult =
+ State.AllocateRegBlock(RegList, NumRegs, PendingHAMembers.size());
+
+ if (RegResult) {
+ for (SmallVectorImpl<CCValAssign>::iterator It = PendingHAMembers.begin();
+ It != PendingHAMembers.end(); ++It) {
+ It->convertToReg(RegResult);
+ State.addLoc(*It);
+ ++RegResult;
+ }
+ PendingHAMembers.clear();
+ return true;
+ }
+
+ // Register allocation failed, fall back to the stack
+
+ // Mark all VFP regs as unavailable (AAPCS rule C.2.vfp)
+ for (unsigned regNo = 0; regNo < 16; ++regNo)
+ State.AllocateReg(SRegList[regNo]);
+
+ unsigned Size = LocVT.getSizeInBits() / 8;
+ unsigned Align = Size;
+
+ if (LocVT.SimpleTy == MVT::v2f64 || LocVT.SimpleTy == MVT::i32) {
+ // Vectors are always aligned to 8 bytes. If we've seen an i32 here
+ // it's because it's been split from a larger type, also with align 8.
+ Align = 8;
+ }
+
+ for (auto It : PendingHAMembers) {
+ It.convertToMem(State.AllocateStack(Size, Align));
+ State.addLoc(It);
+
+ // Only the first member needs to be aligned.
+ Align = 1;
+ }
+
+ // All pending members have now been allocated
+ PendingHAMembers.clear();
+ }
+
+ // This will be allocated by the last member of the HA
+ return true;
+}
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
index 9bea4b2..526089b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -64,6 +64,13 @@ def FastCC_ARM_APCS : CallingConv<[
CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
S9, S10, S11, S12, S13, S14, S15]>>,
+
+ // CPRCs may be allocated to co-processor registers or the stack - they
+ // may never be allocated to core registers.
+ CCIfType<[f32], CCAssignToStackWithShadow<4, 4, [Q0, Q1, Q2, Q3]>>,
+ CCIfType<[f64], CCAssignToStackWithShadow<8, 4, [Q0, Q1, Q2, Q3]>>,
+ CCIfType<[v2f64], CCAssignToStackWithShadow<16, 4, [Q0, Q1, Q2, Q3]>>,
+
CCDelegateTo<CC_ARM_APCS>
]>;
@@ -114,10 +121,11 @@ def CC_ARM_AAPCS_Common : CallingConv<[
CCIfType<[i32], CCIf<"ArgFlags.getOrigAlign() != 8",
CCAssignToReg<[R0, R1, R2, R3]>>>,
- CCIfType<[i32], CCIfAlign<"8", CCAssignToStackWithShadow<4, 8, R3>>>,
- CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
- CCIfType<[f64], CCAssignToStack<8, 8>>,
- CCIfType<[v2f64], CCAssignToStack<16, 8>>
+ CCIfType<[i32], CCIfAlign<"8", CCAssignToStackWithShadow<4, 8, [R0, R1, R2, R3]>>>,
+ CCIfType<[i32], CCAssignToStackWithShadow<4, 4, [R0, R1, R2, R3]>>,
+ CCIfType<[f32], CCAssignToStackWithShadow<4, 4, [Q0, Q1, Q2, Q3]>>,
+ CCIfType<[f64], CCAssignToStackWithShadow<8, 8, [Q0, Q1, Q2, Q3]>>,
+ CCIfType<[v2f64], CCAssignToStackWithShadow<16, 8, [Q0, Q1, Q2, Q3]>>
]>;
def RetCC_ARM_AAPCS_Common : CallingConv<[
@@ -166,6 +174,9 @@ def CC_ARM_AAPCS_VFP : CallingConv<[
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+ // HFAs are passed in a contiguous block of registers, or on the stack
+ CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_HA">>,
+
CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
diff --git a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
index 568ca85..5fb6ebfe 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -12,10 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMConstantPoolValue.h"
+#include "ARMMachineFunctionInfo.h"
#include "ARMRelocations.h"
#include "ARMSubtarget.h"
#include "ARMTargetMachine.h"
@@ -40,6 +40,8 @@
#endif
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumEmitted, "Number of machine instructions emitted");
namespace {
@@ -57,7 +59,7 @@ namespace {
bool IsPIC;
bool IsThumb;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineModuleInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -65,10 +67,10 @@ namespace {
static char ID;
public:
ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(ID), JTI(0),
+ : MachineFunctionPass(ID), JTI(nullptr),
II((const ARMBaseInstrInfo *)tm.getInstrInfo()),
TD(tm.getDataLayout()), TM(tm),
- MCE(mce), MCPEs(0), MJTEs(0),
+ MCE(mce), MCPEs(nullptr), MJTEs(nullptr),
IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
/// getBinaryCodeForInstr - This function, generated by the
@@ -76,9 +78,9 @@ namespace {
/// machine instructions.
uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const;
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM Machine Code Emitter";
}
@@ -207,8 +209,6 @@ namespace {
const { return 0; }
unsigned getThumbAddrModeRegRegOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
- unsigned getT2AddrModeImm12OpValue(const MachineInstr &MI, unsigned Op)
- const { return 0; }
unsigned getT2AddrModeImm8OpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
unsigned getT2Imm8s4OpValue(const MachineInstr &MI, unsigned Op)
@@ -219,8 +219,6 @@ namespace {
const { return 0; }
unsigned getT2AddrModeImm8OffsetOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
- unsigned getT2AddrModeImm12OffsetOpValue(const MachineInstr &MI,unsigned Op)
- const { return 0; }
unsigned getT2AddrModeSORegOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
unsigned getT2SORegOpValue(const MachineInstr &MI, unsigned Op)
@@ -238,10 +236,6 @@ namespace {
const { return 0; }
unsigned getBitfieldInvertedMaskOpValue(const MachineInstr &MI,
unsigned Op) const { return 0; }
- unsigned getSsatBitPosValue(const MachineInstr &MI,
- unsigned Op) const { return 0; }
- uint32_t getLdStmModeOpValue(const MachineInstr &MI, unsigned OpIdx)
- const {return 0; }
uint32_t getLdStSORegOpValue(const MachineInstr &MI, unsigned OpIdx)
const { return 0; }
@@ -270,8 +264,6 @@ namespace {
return 0;
}
- uint32_t getAddrMode2OpValue(const MachineInstr &MI, unsigned OpIdx)
- const { return 0;}
uint32_t getAddrMode2OffsetOpValue(const MachineInstr &MI, unsigned OpIdx)
const { return 0;}
uint32_t getPostIdxRegOpValue(const MachineInstr &MI, unsigned OpIdx)
@@ -282,8 +274,6 @@ namespace {
const { return 0; }
uint32_t getAddrModeThumbSPOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
- uint32_t getAddrModeSOpValue(const MachineInstr &MI, unsigned Op)
- const { return 0; }
uint32_t getAddrModeISOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
uint32_t getAddrModePCOpValue(const MachineInstr &MI, unsigned Op)
@@ -385,7 +375,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
MCPEs = &MF.getConstantPool()->getConstants();
- MJTEs = 0;
+ MJTEs = nullptr;
if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables();
IsPIC = TM.getRelocationModel() == Reloc::PIC_;
IsThumb = MF.getInfo<ARMFunctionInfo>()->isThumbFunction();
@@ -866,7 +856,8 @@ void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) {
}
break;
}
- case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::CFI_INSTRUCTION:
+ break;
case TargetOpcode::EH_LABEL:
MCE.emitLabel(MI.getOperand(0).getMCSymbol());
break;
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index cff5ce2..ce264ee 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-cp-islands"
#include "ARM.h"
#include "ARMMachineFunctionInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
@@ -36,6 +35,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "arm-cp-islands"
+
STATISTIC(NumCPEs, "Number of constpool entries");
STATISTIC(NumSplit, "Number of uncond branches inserted");
STATISTIC(NumCBrFixed, "Number of cond branches fixed");
@@ -266,9 +267,9 @@ namespace {
static char ID;
ARMConstantIslands() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM constant island placement and branch shortening pass";
}
@@ -569,10 +570,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) {
// Get the next machine basic block in the function.
MachineFunction::iterator MBBI = MBB;
// Can't fall off end of function.
- if (llvm::next(MBBI) == MBB->getParent()->end())
+ if (std::next(MBBI) == MBB->getParent()->end())
return false;
- MachineBasicBlock *NextBB = llvm::next(MBBI);
+ MachineBasicBlock *NextBB = std::next(MBBI);
for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I)
if (*I == NextBB)
@@ -593,7 +594,7 @@ ARMConstantIslands::CPEntry
if (CPEs[i].CPEMI == CPEMI)
return &CPEs[i];
}
- return NULL;
+ return nullptr;
}
/// getCPELogAlign - Returns the required alignment of the constant pool entry
@@ -917,7 +918,7 @@ MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
CompareMBBNumbers);
MachineBasicBlock* WaterBB = *IP;
if (WaterBB == OrigBB)
- WaterList.insert(llvm::next(IP), NewBB);
+ WaterList.insert(std::next(IP), NewBB);
else
WaterList.insert(IP, OrigBB);
NewWaterList.insert(OrigBB);
@@ -1102,7 +1103,7 @@ bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
assert(CPE && "Unexpected!");
if (--CPE->RefCount == 0) {
removeDeadCPEMI(CPEMI);
- CPE->CPEMI = NULL;
+ CPE->CPEMI = nullptr;
--NumCPEs;
return true;
}
@@ -1135,7 +1136,7 @@ int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
if (CPEs[i].CPEMI == CPEMI)
continue;
// Removing CPEs can leave empty entries, skip
- if (CPEs[i].CPEMI == NULL)
+ if (CPEs[i].CPEMI == nullptr)
continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
U.NegOk)) {
@@ -1188,7 +1189,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
return false;
unsigned BestGrowth = ~0u;
- for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
+ for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
--IP) {
MachineBasicBlock* WaterBB = *IP;
// Check if water is in range and is either at a lower address than the
@@ -1249,7 +1250,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
<< format(", expected CPE offset %#x\n", CPEOffset));
- NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
+ NewMBB = std::next(MachineFunction::iterator(UserMBB));
// Add an unconditional branch from UserMBB to fallthrough block. Record
// it for branch lengthening; this new branch will not get out of range,
// but if the preceding conditional branch is out of range, the targets
@@ -1317,11 +1318,10 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
++MI;
unsigned CPUIndex = CPUserIndex+1;
unsigned NumCPUsers = CPUsers.size();
- MachineInstr *LastIT = 0;
+ MachineInstr *LastIT = nullptr;
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
Offset < BaseInsertOffset;
- Offset += TII->GetInstSizeInBytes(MI),
- MI = llvm::next(MI)) {
+ Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
assert(MI != UserMBB->end() && "Fell off end of block");
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
CPUser &U = CPUsers[CPUIndex];
@@ -1393,7 +1393,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
NewWaterList.insert(NewIsland);
// The new CPE goes before the following block (NewMBB).
- NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
+ NewMBB = std::next(MachineFunction::iterator(WaterBB));
} else {
// No water found.
@@ -1405,7 +1405,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
// next iteration for constant pools, but in this context, we don't want
// it. Check for this so it will be removed from the WaterList.
// Also remove any entry from NewWaterList.
- MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
+ MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB));
IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
if (IP != WaterList.end())
NewWaterList.erase(WaterBB);
@@ -1443,7 +1443,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
// Increase the size of the island block to account for the new entry.
BBInfo[NewIsland->getNumber()].Size += Size;
- adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
+ adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland)));
// Finally, change the CPI in the instruction operand to be ID.
for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
@@ -1492,7 +1492,7 @@ bool ARMConstantIslands::removeUnusedCPEntries() {
for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
removeDeadCPEMI(CPEs[j].CPEMI);
- CPEs[j].CPEMI = NULL;
+ CPEs[j].CPEMI = nullptr;
MadeChange = true;
}
}
@@ -1592,7 +1592,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
++NumCBrFixed;
if (BMI != MI) {
- if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
+ if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
BMI->getOpcode() == Br.UncondBr) {
// Last MI in the BB is an unconditional branch. Can we simply invert the
// condition and swap destinations:
@@ -1622,7 +1622,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MBB->back().eraseFromParent();
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
}
- MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
+ MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
<< " also invert condition and change dest. to BB#"
@@ -1845,7 +1845,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
// FIXME: After the tables are shrunk, can we get rid some of the
// constantpool tables?
MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- if (MJTI == 0) return false;
+ if (!MJTI) return false;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
@@ -1971,7 +1971,7 @@ bool ARMConstantIslands::reorderThumb2JumpTables() {
bool MadeChange = false;
MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- if (MJTI == 0) return false;
+ if (!MJTI) return false;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
@@ -2013,11 +2013,11 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
// try to move it; otherwise, create a new block following the jump
// table that branches back to the actual target. This is a very simple
// heuristic. FIXME: We can definitely improve it.
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
SmallVector<MachineOperand, 4> CondPrior;
MachineFunction::iterator BBi = BB;
- MachineFunction::iterator OldPrior = prior(BBi);
+ MachineFunction::iterator OldPrior = std::prev(BBi);
// If the block terminator isn't analyzable, don't try to move the block
bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
@@ -2033,7 +2033,7 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
// Update numbering to account for the block being moved.
MF->RenumberBlocks();
++NumJTMoved;
- return NULL;
+ return nullptr;
}
// Create a new MBB for the code after the jump BB.
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
index 7ae7bf4..c7a8415 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
@@ -103,12 +103,12 @@ public:
bool isLSDA() const { return Kind == ARMCP::CPLSDA; }
bool isMachineBasicBlock() const{ return Kind == ARMCP::CPMachineBasicBlock; }
- virtual unsigned getRelocationInfo() const { return 2; }
+ unsigned getRelocationInfo() const override { return 2; }
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment);
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) override;
- virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID);
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
/// hasSameValue - Return true if this ARM constpool value can share the same
/// constantpool entry as another ARM constpool value.
@@ -120,7 +120,7 @@ public:
this->Modifier == A->Modifier;
}
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
void print(raw_ostream *O) const { if (O) print(*O); }
void dump() const;
};
@@ -164,16 +164,16 @@ public:
const GlobalValue *getGV() const;
const BlockAddress *getBlockAddress() const;
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment);
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) override;
/// hasSameValue - Return true if this ARM constpool value can share the same
/// constantpool entry as another ARM constpool value.
- virtual bool hasSameValue(ARMConstantPoolValue *ACPV);
+ bool hasSameValue(ARMConstantPoolValue *ACPV) override;
- virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID);
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
static bool classof(const ARMConstantPoolValue *APV) {
return APV->isGlobalValue() || APV->isBlockAddress() || APV->isLSDA();
}
@@ -198,16 +198,16 @@ public:
const char *getSymbol() const { return S.c_str(); }
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment);
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) override;
- virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID);
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
/// hasSameValue - Return true if this ARM constpool value can share the same
/// constantpool entry as another ARM constpool value.
- virtual bool hasSameValue(ARMConstantPoolValue *ACPV);
+ bool hasSameValue(ARMConstantPoolValue *ACPV) override;
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
static bool classof(const ARMConstantPoolValue *ACPV) {
return ACPV->isExtSymbol();
@@ -234,16 +234,16 @@ public:
const MachineBasicBlock *getMBB() const { return MBB; }
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment);
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) override;
- virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID);
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
/// hasSameValue - Return true if this ARM constpool value can share the same
/// constantpool entry as another ARM constpool value.
- virtual bool hasSameValue(ARMConstantPoolValue *ACPV);
+ bool hasSameValue(ARMConstantPoolValue *ACPV) override;
- virtual void print(raw_ostream &O) const;
+ void print(raw_ostream &O) const override;
static bool classof(const ARMConstantPoolValue *ACPV) {
return ACPV->isMachineBasicBlock();
diff --git a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 3e62b64..51d3dbb 100644
--- a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -14,21 +14,25 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-pseudo"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
+#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h" // FIXME: for debug only. remove!
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "arm-pseudo"
+
static cl::opt<bool>
VerifyARMPseudo("verify-arm-pseudo-expand", cl::Hidden,
cl::desc("Verify machine code after expanding ARM pseudos"));
@@ -44,9 +48,9 @@ namespace {
const ARMSubtarget *STI;
ARMFunctionInfo *AFI;
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM pseudo instruction expansion pass";
}
@@ -343,7 +347,7 @@ static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
std::lower_bound(NEONLdStTable, NEONLdStTable + NumEntries, Opcode);
if (I != NEONLdStTable + NumEntries && I->PseudoOpc == Opcode)
return I;
- return NULL;
+ return nullptr;
}
/// GetDSubRegs - Get 4 D subregisters of a Q, QQ, or QQQQ register,
@@ -479,6 +483,8 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg.
MIB->addRegisterKilled(SrcReg, TRI, true);
+ else if (!SrcIsUndef)
+ MIB.addReg(SrcReg, RegState::Implicit); // Add implicit uses for src reg.
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
@@ -604,12 +610,45 @@ void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
MIB.addOperand(MI.getOperand(OpIdx++));
MIB.addOperand(MI.getOperand(OpIdx++));
- if (SrcIsKill) // Add an implicit kill for the super-reg.
- MIB->addRegisterKilled(SrcReg, TRI, true);
+ // Add an implicit kill and use for the super-reg.
+ MIB.addReg(SrcReg, RegState::Implicit | getKillRegState(SrcIsKill));
TransferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
}
+static bool IsAnAddressOperand(const MachineOperand &MO) {
+ // This check is overly conservative. Unless we are certain that the machine
+ // operand is not a symbol reference, we return that it is a symbol reference.
+ // This is important as the load pair may not be split up Windows.
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ case MachineOperand::MO_Immediate:
+ case MachineOperand::MO_CImmediate:
+ case MachineOperand::MO_FPImmediate:
+ return false;
+ case MachineOperand::MO_MachineBasicBlock:
+ return true;
+ case MachineOperand::MO_FrameIndex:
+ return false;
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_TargetIndex:
+ case MachineOperand::MO_JumpTableIndex:
+ case MachineOperand::MO_ExternalSymbol:
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_BlockAddress:
+ return true;
+ case MachineOperand::MO_RegisterMask:
+ case MachineOperand::MO_RegisterLiveOut:
+ return false;
+ case MachineOperand::MO_Metadata:
+ case MachineOperand::MO_MCSymbol:
+ return true;
+ case MachineOperand::MO_CFIIndex:
+ return false;
+ }
+ llvm_unreachable("unhandled machine operand type");
+}
+
void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI) {
MachineInstr &MI = *MBBI;
@@ -620,10 +659,14 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
bool DstIsDead = MI.getOperand(0).isDead();
bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
const MachineOperand &MO = MI.getOperand(isCC ? 2 : 1);
+ bool RequiresBundling = STI->isTargetWindows() && IsAnAddressOperand(MO);
MachineInstrBuilder LO16, HI16;
if (!STI->hasV6T2Ops() &&
(Opcode == ARM::MOVi32imm || Opcode == ARM::MOVCCi32imm)) {
+ // FIXME Windows CE supports older ARM CPUs
+ assert(!STI->isTargetWindows() && "Windows on ARM requires ARMv7+");
+
// Expand into a movi + orr.
LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi), DstReg);
HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::ORRri))
@@ -660,17 +703,29 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg);
- if (MO.isImm()) {
+ switch (MO.getType()) {
+ case MachineOperand::MO_Immediate: {
unsigned Imm = MO.getImm();
unsigned Lo16 = Imm & 0xffff;
unsigned Hi16 = (Imm >> 16) & 0xffff;
LO16 = LO16.addImm(Lo16);
HI16 = HI16.addImm(Hi16);
- } else {
+ break;
+ }
+ case MachineOperand::MO_ExternalSymbol: {
+ const char *ES = MO.getSymbolName();
+ unsigned TF = MO.getTargetFlags();
+ LO16 = LO16.addExternalSymbol(ES, TF | ARMII::MO_LO16);
+ HI16 = HI16.addExternalSymbol(ES, TF | ARMII::MO_HI16);
+ break;
+ }
+ default: {
const GlobalValue *GV = MO.getGlobal();
unsigned TF = MO.getTargetFlags();
LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
+ break;
+ }
}
LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
@@ -678,6 +733,9 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
LO16.addImm(Pred).addReg(PredReg);
HI16.addImm(Pred).addReg(PredReg);
+ if (RequiresBundling)
+ finalizeBundle(MBB, &*LO16, &*MBBI);
+
TransferImpOps(MI, LO16, HI16);
MI.eraseFromParent();
}
@@ -869,10 +927,16 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
}
case ARM::tTPsoft:
case ARM::TPsoft: {
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, MI.getDebugLoc(),
- TII->get(Opcode == ARM::tTPsoft ? ARM::tBL : ARM::BL))
- .addExternalSymbol("__aeabi_read_tp", 0);
+ MachineInstrBuilder MIB;
+ if (Opcode == ARM::tTPsoft)
+ MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get( ARM::tBL))
+ .addImm((unsigned)ARMCC::AL).addReg(0)
+ .addExternalSymbol("__aeabi_read_tp", 0);
+ else
+ MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get( ARM::BL))
+ .addExternalSymbol("__aeabi_read_tp", 0);
MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
TransferImpOps(MI, MIB, MIB);
@@ -900,10 +964,61 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
return true;
}
- case ARM::MOV_ga_dyn:
+ case ARM::LDRLIT_ga_abs:
+ case ARM::LDRLIT_ga_pcrel:
+ case ARM::LDRLIT_ga_pcrel_ldr:
+ case ARM::tLDRLIT_ga_abs:
+ case ARM::tLDRLIT_ga_pcrel: {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ const MachineOperand &MO1 = MI.getOperand(1);
+ const GlobalValue *GV = MO1.getGlobal();
+ bool IsARM =
+ Opcode != ARM::tLDRLIT_ga_pcrel && Opcode != ARM::tLDRLIT_ga_abs;
+ bool IsPIC =
+ Opcode != ARM::LDRLIT_ga_abs && Opcode != ARM::tLDRLIT_ga_abs;
+ unsigned LDRLITOpc = IsARM ? ARM::LDRi12 : ARM::tLDRpci;
+ unsigned PICAddOpc =
+ IsARM
+ ? (Opcode == ARM::LDRLIT_ga_pcrel_ldr ? ARM::PICADD : ARM::PICLDR)
+ : ARM::tPICADD;
+
+ // We need a new const-pool entry to load from.
+ MachineConstantPool *MCP = MBB.getParent()->getConstantPool();
+ unsigned ARMPCLabelIndex = 0;
+ MachineConstantPoolValue *CPV;
+
+ if (IsPIC) {
+ unsigned PCAdj = IsARM ? 8 : 4;
+ ARMPCLabelIndex = AFI->createPICLabelUId();
+ CPV = ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex,
+ ARMCP::CPValue, PCAdj);
+ } else
+ CPV = ARMConstantPoolConstant::Create(GV, ARMCP::no_modifier);
+
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LDRLITOpc), DstReg)
+ .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, 4));
+ if (IsARM)
+ MIB.addImm(0);
+ AddDefaultPred(MIB);
+
+ if (IsPIC) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(PICAddOpc))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg)
+ .addImm(ARMPCLabelIndex);
+
+ if (IsARM)
+ AddDefaultPred(MIB);
+ }
+
+ MI.eraseFromParent();
+ return true;
+ }
case ARM::MOV_ga_pcrel:
case ARM::MOV_ga_pcrel_ldr:
- case ARM::t2MOV_ga_dyn:
case ARM::t2MOV_ga_pcrel: {
// Expand into movw + movw. Also "add pc" / ldr [pc] in PIC mode.
unsigned LabelId = AFI->createPICLabelUId();
@@ -912,14 +1027,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
const MachineOperand &MO1 = MI.getOperand(1);
const GlobalValue *GV = MO1.getGlobal();
unsigned TF = MO1.getTargetFlags();
- bool isARM = (Opcode != ARM::t2MOV_ga_pcrel && Opcode!=ARM::t2MOV_ga_dyn);
- bool isPIC = (Opcode != ARM::MOV_ga_dyn && Opcode != ARM::t2MOV_ga_dyn);
+ bool isARM = Opcode != ARM::t2MOV_ga_pcrel;
unsigned LO16Opc = isARM ? ARM::MOVi16_ga_pcrel : ARM::t2MOVi16_ga_pcrel;
unsigned HI16Opc = isARM ? ARM::MOVTi16_ga_pcrel :ARM::t2MOVTi16_ga_pcrel;
- unsigned LO16TF = isPIC
- ? ARMII::MO_LO16_NONLAZY_PIC : ARMII::MO_LO16_NONLAZY;
- unsigned HI16TF = isPIC
- ? ARMII::MO_HI16_NONLAZY_PIC : ARMII::MO_HI16_NONLAZY;
+ unsigned LO16TF = TF | ARMII::MO_LO16;
+ unsigned HI16TF = TF | ARMII::MO_HI16;
unsigned PICAddOpc = isARM
? (Opcode == ARM::MOV_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
: ARM::tPICADD;
@@ -927,16 +1039,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
TII->get(LO16Opc), DstReg)
.addGlobalAddress(GV, MO1.getOffset(), TF | LO16TF)
.addImm(LabelId);
- MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
- TII->get(HI16Opc), DstReg)
+
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc), DstReg)
.addReg(DstReg)
.addGlobalAddress(GV, MO1.getOffset(), TF | HI16TF)
.addImm(LabelId);
- if (!isPIC) {
- TransferImpOps(MI, MIB1, MIB2);
- MI.eraseFromParent();
- return true;
- }
MachineInstrBuilder MIB3 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(PICAddOpc))
@@ -1032,33 +1139,6 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MI.eraseFromParent();
return true;
}
- case ARM::VDUPfqf:
- case ARM::VDUPfdf:{
- unsigned NewOpc = Opcode == ARM::VDUPfqf ? ARM::VDUPLN32q :
- ARM::VDUPLN32d;
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
- unsigned OpIdx = 0;
- unsigned SrcReg = MI.getOperand(1).getReg();
- unsigned Lane = TRI->getEncodingValue(SrcReg) & 1;
- unsigned DReg = TRI->getMatchingSuperReg(SrcReg,
- Lane & 1 ? ARM::ssub_1 : ARM::ssub_0,
- &ARM::DPR_VFP2RegClass);
- // The lane is [0,1] for the containing DReg superregister.
- // Copy the dst/src register operands.
- MIB.addOperand(MI.getOperand(OpIdx++));
- MIB.addReg(DReg);
- ++OpIdx;
- // Add the lane select operand.
- MIB.addImm(Lane);
- // Add the predicate operands.
- MIB.addOperand(MI.getOperand(OpIdx++));
- MIB.addOperand(MI.getOperand(OpIdx++));
-
- TransferImpOps(MI, MIB, MIB);
- MI.eraseFromParent();
- return true;
- }
case ARM::VLD2q8Pseudo:
case ARM::VLD2q16Pseudo:
@@ -1253,7 +1333,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
- MachineBasicBlock::iterator NMBBI = llvm::next(MBBI);
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
Modified |= ExpandMI(MBB, MBBI);
MBBI = NMBBI;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFPUName.def b/contrib/llvm/lib/Target/ARM/ARMFPUName.def
index 9a1bbe7..1fef3b3 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFPUName.def
+++ b/contrib/llvm/lib/Target/ARM/ARMFPUName.def
@@ -28,5 +28,6 @@ ARM_FPU_NAME("neon", NEON)
ARM_FPU_NAME("neon-vfpv4", NEON_VFPV4)
ARM_FPU_NAME("neon-fp-armv8", NEON_FP_ARMV8)
ARM_FPU_NAME("crypto-neon-fp-armv8", CRYPTO_NEON_FP_ARMV8)
+ARM_FPU_NAME("softvfp", SOFTVFP)
#undef ARM_FPU_NAME
diff --git a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
index a4004f3..e2d90cd 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -14,11 +14,12 @@
//===----------------------------------------------------------------------===//
#include "ARM.h"
-#include "ARMBaseInstrInfo.h"
+#include "ARMBaseRegisterInfo.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
+#include "ARMISelLowering.h"
+#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
-#include "ARMTargetMachine.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/Analysis.h"
@@ -30,18 +31,18 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -73,11 +74,12 @@ namespace {
}
} Address;
-class ARMFastISel : public FastISel {
+class ARMFastISel final : public FastISel {
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
/// make the right decision when generating code for different targets.
const ARMSubtarget *Subtarget;
+ Module &M;
const TargetMachine &TM;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
@@ -91,6 +93,7 @@ class ARMFastISel : public FastISel {
explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
: FastISel(funcInfo, libInfo),
+ M(const_cast<Module&>(*funcInfo.Fn->getParent())),
TM(funcInfo.MF->getTarget()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()) {
@@ -102,8 +105,6 @@ class ARMFastISel : public FastISel {
// Code from FastISel.cpp.
private:
- unsigned FastEmitInst_(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC);
unsigned FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill);
@@ -120,10 +121,6 @@ class ARMFastISel : public FastISel {
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm);
- unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- const ConstantFP *FPImm);
unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
@@ -132,22 +129,15 @@ class ARMFastISel : public FastISel {
unsigned FastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm);
- unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- uint64_t Imm1, uint64_t Imm2);
-
- unsigned FastEmitInst_extractsubreg(MVT RetVT,
- unsigned Op0, bool Op0IsKill,
- uint32_t Idx);
// Backend specific FastISel code.
private:
- virtual bool TargetSelectInstruction(const Instruction *I);
- virtual unsigned TargetMaterializeConstant(const Constant *C);
- virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
- virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
- const LoadInst *LI);
- virtual bool FastLowerArguments();
+ bool TargetSelectInstruction(const Instruction *I) override;
+ unsigned TargetMaterializeConstant(const Constant *C) override;
+ unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
+ bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) override;
+ bool FastLowerArguments() override;
private:
#include "ARMGenFastISel.inc"
@@ -176,8 +166,6 @@ class ARMFastISel : public FastISel {
// Utility routines.
private:
- unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned OpNum,
- unsigned Op);
bool isTypeLegal(Type *Ty, MVT &VT);
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
@@ -201,6 +189,8 @@ class ARMFastISel : public FastISel {
unsigned ARMSelectCallOp(bool UseReg);
unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
+ const TargetLowering *getTargetLowering() { return TM.getTargetLowering(); }
+
// Call handling routines.
private:
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
@@ -293,32 +283,6 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
return MIB;
}
-unsigned ARMFastISel::constrainOperandRegClass(const MCInstrDesc &II,
- unsigned Op, unsigned OpNum) {
- if (TargetRegisterInfo::isVirtualRegister(Op)) {
- const TargetRegisterClass *RegClass =
- TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
- if (!MRI.constrainRegClass(Op, RegClass)) {
- // If it's not legal to COPY between the register classes, something
- // has gone very wrong before we got here.
- unsigned NewOp = createResultReg(RegClass);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::COPY), NewOp).addReg(Op));
- return NewOp;
- }
- }
- return Op;
-}
-
-unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
- const TargetRegisterClass* RC) {
- unsigned ResultReg = createResultReg(RC);
- const MCInstrDesc &II = TII.get(MachineInstOpcode);
-
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
- return ResultReg;
-}
-
unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) {
@@ -329,12 +293,12 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
// for this instruction.
Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
+ ResultReg).addReg(Op0, Op0IsKill * RegState::Kill));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -354,14 +318,15 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
Op1 = constrainOperandRegClass(II, Op1, 2);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -383,16 +348,17 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
Op2 = constrainOperandRegClass(II, Op1, 3);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill)
- .addReg(Op2, Op2IsKill * RegState::Kill));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addReg(Op2, Op2IsKill * RegState::Kill));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -410,39 +376,15 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
// for this instruction.
Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addImm(Imm));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addImm(Imm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(II.ImplicitDefs[0]));
- }
- return ResultReg;
-}
-
-unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- const ConstantFP *FPImm) {
- unsigned ResultReg = createResultReg(RC);
- const MCInstrDesc &II = TII.get(MachineInstOpcode);
-
- // Make sure the input operand is sufficiently constrained to be legal
- // for this instruction.
- Op0 = constrainOperandRegClass(II, Op0, 1);
- if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addFPImm(FPImm));
- } else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addFPImm(FPImm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -462,16 +404,17 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
Op0 = constrainOperandRegClass(II, Op0, 1);
Op1 = constrainOperandRegClass(II, Op1, 2);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill)
- .addImm(Imm));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addImm(Imm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -485,58 +428,25 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addImm(Imm));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
+ ResultReg).addImm(Imm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(Imm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- uint64_t Imm1, uint64_t Imm2) {
- unsigned ResultReg = createResultReg(RC);
- const MCInstrDesc &II = TII.get(MachineInstOpcode);
-
- if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addImm(Imm1).addImm(Imm2));
- } else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addImm(Imm1).addImm(Imm2));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::COPY),
- ResultReg)
- .addReg(II.ImplicitDefs[0]));
- }
- return ResultReg;
-}
-
-unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
- unsigned Op0, bool Op0IsKill,
- uint32_t Idx) {
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
- "Cannot yet extract from physregs");
-
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(Op0, getKillRegState(Op0IsKill), Idx));
- return ResultReg;
-}
-
// TODO: Don't worry about 64-bit now, but when this is fixed remove the
// checks from the various callers.
unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::f64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(SrcReg));
return MoveReg;
@@ -546,7 +456,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::i64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
return MoveReg;
@@ -572,9 +482,8 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
Opc = ARM::FCONSTS;
}
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- DestReg)
- .addImm(Imm));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), DestReg).addImm(Imm));
return DestReg;
}
@@ -582,20 +491,20 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
if (!Subtarget->hasVFP2()) return false;
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
if (Align == 0) {
// TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(CFP->getType());
+ Align = DL.getTypeAllocSize(CFP->getType());
}
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
// The extra reg is for addrmode5.
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- DestReg)
- .addConstantPoolIndex(Idx)
- .addReg(0));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
+ .addConstantPoolIndex(Idx)
+ .addReg(0));
return DestReg;
}
@@ -612,7 +521,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
&ARM::GPRRegClass;
unsigned ImmReg = createResultReg(RC);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(CI->getZExtValue()));
return ImmReg;
@@ -626,7 +535,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
if (UseImm) {
unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(Imm));
return ImmReg;
@@ -640,24 +549,25 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(C->getType());
+ unsigned Align = DL.getPrefTypeAlignment(C->getType());
if (Align == 0) {
// TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(C->getType());
+ Align = DL.getTypeAllocSize(C->getType());
}
unsigned Idx = MCP.getConstantPoolIndex(C, Align);
if (isThumb2)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRpci), DestReg)
.addConstantPoolIndex(Idx));
- else
+ else {
// The extra immediate is for addrmode2.
DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::LDRcp), DestReg)
.addConstantPoolIndex(Idx)
.addImm(0));
+ }
return DestReg;
}
@@ -673,37 +583,36 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
(const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned DestReg = createResultReg(RC);
- // FastISel TLS support on non-Darwin is broken, punt to SelectionDAG.
+ // FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
bool IsThreadLocal = GVar && GVar->isThreadLocal();
- if (!Subtarget->isTargetDarwin() && IsThreadLocal) return 0;
+ if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0;
// Use movw+movt when possible, it avoids constant pool entries.
- // Darwin targets don't support movt with Reloc::Static, see
- // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support
- // static movt relocations.
- if (Subtarget->useMovt() &&
- Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) {
+ // Non-darwin targets only support static movt relocations in FastISel.
+ if (Subtarget->useMovt(*FuncInfo.MF) &&
+ (Subtarget->isTargetMachO() || RelocM == Reloc::Static)) {
unsigned Opc;
+ unsigned char TF = 0;
+ if (Subtarget->isTargetMachO())
+ TF = ARMII::MO_NONLAZY;
+
switch (RelocM) {
case Reloc::PIC_:
Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
break;
- case Reloc::DynamicNoPIC:
- Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn;
- break;
default:
Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
break;
}
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- DestReg).addGlobalAddress(GV));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
} else {
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(GV->getType());
+ unsigned Align = DL.getPrefTypeAlignment(GV->getType());
if (Align == 0) {
// TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(GV->getType());
+ Align = DL.getTypeAllocSize(GV->getType());
}
if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_)
@@ -722,18 +631,18 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
MachineInstrBuilder MIB;
if (isThumb2) {
unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
- .addConstantPoolIndex(Idx);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ DestReg).addConstantPoolIndex(Idx);
if (RelocM == Reloc::PIC_)
MIB.addImm(Id);
AddOptionalDefs(MIB);
} else {
// The extra immediate is for addrmode2.
DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
- DestReg)
- .addConstantPoolIndex(Idx)
- .addImm(0);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(ARM::LDRcp), DestReg)
+ .addConstantPoolIndex(Idx)
+ .addImm(0);
AddOptionalDefs(MIB);
if (RelocM == Reloc::PIC_) {
@@ -741,7 +650,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), NewDestReg)
+ DbgLoc, TII.get(Opc), NewDestReg)
.addReg(DestReg)
.addImm(Id);
AddOptionalDefs(MIB);
@@ -754,15 +663,15 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
MachineInstrBuilder MIB;
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
if (isThumb2)
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRi12), NewDestReg)
.addReg(DestReg)
.addImm(0);
else
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
- NewDestReg)
- .addReg(DestReg)
- .addImm(0);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(ARM::LDRi12), NewDestReg)
+ .addReg(DestReg)
+ .addImm(0);
DestReg = NewDestReg;
AddOptionalDefs(MIB);
}
@@ -802,10 +711,12 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
// This will get lowered later into the correct offsets and registers
// via rewriteXFrameIndex.
if (SI != FuncInfo.StaticAllocaMap.end()) {
+ unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
+
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addFrameIndex(SI->second)
.addImm(0));
@@ -841,7 +752,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
// Computes the address to get to an object.
bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
// Some boilerplate from the X86 FastISel.
- const User *U = NULL;
+ const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
// Don't walk into other basic blocks unless the object is an alloca from
@@ -889,11 +800,11 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
i != e; ++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
} else {
- uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
for (;;) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
@@ -979,7 +890,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
(const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned ResultReg = createResultReg(RC);
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addFrameIndex(Addr.Base.FI)
.addImm(0));
@@ -1130,7 +1041,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
if (allocReg)
ResultReg = createResultReg(RC);
assert (ResultReg > 255 && "Expected an allocated virtual register.");
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
@@ -1138,7 +1049,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// load. Now we must move from the GRP to the FP register.
if (needVMOV) {
unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(ResultReg));
ResultReg = MoveReg;
@@ -1180,7 +1091,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
(const TargetRegisterClass*)&ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), Res)
.addReg(SrcReg).addImm(1));
SrcReg = Res;
@@ -1227,7 +1138,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// Unaligned stores need special handling. Floats require word-alignment.
if (Alignment && Alignment < 4) {
unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
SrcReg = MoveReg;
@@ -1252,7 +1163,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// Create the base instruction, then add the operands.
SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(StrOpc))
.addReg(SrcReg);
AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
@@ -1363,9 +1274,9 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
return false;
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
.addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
@@ -1376,7 +1287,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
unsigned OpReg = getRegForValue(TI->getOperand(0));
OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
.addReg(OpReg).addImm(1));
@@ -1387,10 +1298,10 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
@@ -1398,7 +1309,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
dyn_cast<ConstantInt>(BI->getCondition())) {
uint64_t Imm = CI->getZExtValue();
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
- FastEmitBranch(Target, DL);
+ FastEmitBranch(Target, DbgLoc);
return true;
}
@@ -1414,8 +1325,10 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// the one-bit value left in the virtual register.
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
- .addReg(CmpReg).addImm(1));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc))
+ .addReg(CmpReg)
+ .addImm(1));
unsigned CCMode = ARMCC::NE;
if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
@@ -1424,9 +1337,9 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
@@ -1436,8 +1349,8 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
if (AddrReg == 0) return false;
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
- .addReg(AddrReg));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc)).addReg(AddrReg));
const IndirectBrInst *IB = cast<IndirectBrInst>(I);
for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
@@ -1470,7 +1383,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
const APInt &CIVal = ConstInt->getValue();
Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
// For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
- // then a cmn, because there is no way to represent 2147483648 as a
+ // then a cmn, because there is no way to represent 2147483648 as a
// signed 32-bit int.
if (Imm < 0 && Imm != (int)0x80000000) {
isNegativeImm = true;
@@ -1542,11 +1455,11 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
if (!UseImm) {
SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(SrcReg1).addReg(SrcReg2));
} else {
MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(SrcReg1);
// Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
@@ -1558,7 +1471,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// For floating point we need to move the result to a comparison register
// that we can then use for branches.
if (Ty->isFloatTy() || Ty->isDoubleTy())
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::FMSTAT)));
return true;
}
@@ -1586,7 +1499,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = TargetMaterializeConstant(Zero);
// ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg)
.addReg(ZeroReg).addImm(1)
.addImm(ARMPred).addReg(ARM::CPSR);
@@ -1606,7 +1519,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) {
if (Op == 0) return false;
unsigned Result = createResultReg(&ARM::DPRRegClass);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTDS), Result)
.addReg(Op));
UpdateValueMap(I, Result);
@@ -1625,7 +1538,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
if (Op == 0) return false;
unsigned Result = createResultReg(&ARM::SPRRegClass);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTSD), Result)
.addReg(Op));
UpdateValueMap(I, Result);
@@ -1670,9 +1583,8 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
else return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- ResultReg)
- .addReg(FP));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg).addReg(FP));
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1697,9 +1609,8 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
// f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- ResultReg)
- .addReg(Op));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg).addReg(Op));
// This result needs to be in an integer register, but the conversion only
// takes place in fp-regs.
@@ -1746,8 +1657,10 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CondReg).addImm(0));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
+ .addReg(CondReg)
+ .addImm(0));
unsigned MovCCOpc;
const TargetRegisterClass *RC;
@@ -1765,12 +1678,20 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
if (!UseImm) {
Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
+ ResultReg)
+ .addReg(Op2Reg)
+ .addReg(Op1Reg)
+ .addImm(ARMCC::NE)
+ .addReg(ARM::CPSR);
} else {
Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
+ ResultReg)
+ .addReg(Op1Reg)
+ .addImm(Imm)
+ .addImm(ARMCC::EQ)
+ .addReg(ARM::CPSR);
}
UpdateValueMap(I, ResultReg);
return true;
@@ -1859,7 +1780,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(SrcReg1).addReg(SrcReg2));
UpdateValueMap(I, ResultReg);
@@ -1901,7 +1822,7 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
if (Op2 == 0) return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(Op1).addReg(Op2));
UpdateValueMap(I, ResultReg);
@@ -2013,7 +1934,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackDown))
.addImm(NumBytes));
@@ -2058,9 +1979,8 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
// Now copy/store arg to correct locations.
if (VA.isRegLoc() && !VA.needsCustom()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- VA.getLocReg())
- .addReg(Arg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else if (VA.needsCustom()) {
// TODO: We need custom lowering for vector (v2f64) args.
@@ -2072,7 +1992,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
assert(VA.isRegLoc() && NextVA.isRegLoc() &&
"We only handle register args!");
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRRD), VA.getLocReg())
.addReg(NextVA.getLocReg(), RegState::Define)
.addReg(Arg));
@@ -2099,7 +2019,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
unsigned &NumBytes, bool isVarArg) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackUp))
.addImm(NumBytes).addImm(0));
@@ -2116,7 +2036,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
MVT DestVT = RVLocs[0].getValVT();
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
unsigned ResultReg = createResultReg(DstRC);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVDRR), ResultReg)
.addReg(RVLocs[0].getLocReg())
.addReg(RVLocs[1].getLocReg()));
@@ -2137,7 +2057,8 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
unsigned ResultReg = createResultReg(DstRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(RVLocs[0].getLocReg());
UsedRegs.push_back(RVLocs[0].getLocReg());
@@ -2214,15 +2135,15 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
// Avoid a cross-class copy. This is very unlikely.
if (!SrcRC->contains(DstReg))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- DstReg).addReg(SrcReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
// Add register to return instruction.
RetRegs.push_back(VA.getLocReg());
}
unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(RetOpc));
AddOptionalDefs(MIB);
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
@@ -2243,8 +2164,9 @@ unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
EVT LCREVT = TLI.getValueType(GVTy);
if (!LCREVT.isSimple()) return 0;
- GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
- GlobalValue::ExternalLinkage, 0, Name);
+ GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false,
+ GlobalValue::ExternalLinkage, nullptr,
+ Name);
assert(GV->getType() == GVTy && "We miscomputed the type for the global!");
return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
}
@@ -2295,7 +2217,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
if (!isTypeLegal(ArgTy, ArgVT)) return false;
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(Op);
@@ -2320,7 +2242,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
// Issue the call.
unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(CallOpc));
+ DbgLoc, TII.get(CallOpc));
// BL / BLX don't take a predicate, but tBL / tBLX do.
if (isThumb2)
AddDefaultPred(MIB);
@@ -2348,7 +2270,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
}
bool ARMFastISel::SelectCall(const Instruction *I,
- const char *IntrMemName = 0) {
+ const char *IntrMemName = nullptr) {
const CallInst *CI = cast<CallInst>(I);
const Value *Callee = CI->getCalledValue();
@@ -2428,7 +2350,7 @@ bool ARMFastISel::SelectCall(const Instruction *I,
if (Arg == 0)
return false;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(*i);
@@ -2461,7 +2383,7 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// Issue the call.
unsigned CallOpc = ARMSelectCallOp(UseReg);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(CallOpc));
+ DbgLoc, TII.get(CallOpc));
unsigned char OpFlags = 0;
@@ -2578,7 +2500,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
while (Depth--) {
DestReg = createResultReg(RC);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(LdrOpc), DestReg)
.addReg(SrcReg).addImm(0));
SrcReg = DestReg;
@@ -2635,7 +2557,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
return SelectCall(&I, "memset");
}
case Intrinsic::trap: {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(
Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP));
return true;
}
@@ -2788,7 +2710,7 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
bool isKill = 1 == Instr;
MachineInstrBuilder MIB = BuildMI(
- *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg);
+ *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg);
if (setsCPSR)
MIB.addReg(ARM::CPSR, RegState::Define);
SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
@@ -2866,7 +2788,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
if(ResultReg == 0) return false;
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(Reg1);
@@ -3027,7 +2949,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
// Load value.
if (isThumb2) {
DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRpci), DestReg1)
.addConstantPoolIndex(Idx));
Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
@@ -3035,7 +2957,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
// The extra immediate is for addrmode2.
DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(ARM::LDRcp), DestReg1)
+ DbgLoc, TII.get(ARM::LDRcp), DestReg1)
.addConstantPoolIndex(Idx).addImm(0));
Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs;
}
@@ -3051,7 +2973,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1);
GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), DestReg2)
+ DbgLoc, TII.get(Opc), DestReg2)
.addReg(DestReg1)
.addReg(GlobalBaseReg);
if (!UseGOTOFF)
@@ -3125,7 +3047,8 @@ bool ARMFastISel::FastLowerArguments() {
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(DstReg, getKillRegState(true));
UpdateValueMap(I, ResultReg);
}
@@ -3141,7 +3064,7 @@ namespace llvm {
const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
// Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
bool UseFastISel = false;
- UseFastISel |= Subtarget->isTargetIOS() && !Subtarget->isThumb1Only();
+ UseFastISel |= Subtarget->isTargetMachO() && !Subtarget->isThumb1Only();
UseFastISel |= Subtarget->isTargetLinux() && !Subtarget->isThumb();
UseFastISel |= Subtarget->isTargetNaCl() && !Subtarget->isThumb();
@@ -3153,6 +3076,6 @@ namespace llvm {
TM.Options.NoFramePointerElim = true;
return new ARMFastISel(funcInfo, libInfo);
}
- return 0;
+ return nullptr;
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFeatures.h b/contrib/llvm/lib/Target/ARM/ARMFeatures.h
index dafc4b3..e191a3c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFeatures.h
+++ b/contrib/llvm/lib/Target/ARM/ARMFeatures.h
@@ -1,4 +1,4 @@
-//===-- ARMFeatures.h - Checks for ARM instruction features ------*- C++ -*-===//
+//===-- ARMFeatures.h - Checks for ARM instruction features -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,37 +14,27 @@
#ifndef TARGET_ARM_FEATURES_H
#define TARGET_ARM_FEATURES_H
-#include "ARM.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
namespace llvm {
template<typename InstrType> // could be MachineInstr or MCInst
-inline bool isV8EligibleForIT(InstrType *Instr, int BLXOperandIndex = 0) {
+bool IsCPSRDead(InstrType *Instr);
+
+template<typename InstrType> // could be MachineInstr or MCInst
+inline bool isV8EligibleForIT(InstrType *Instr) {
switch (Instr->getOpcode()) {
default:
return false;
case ARM::tADC:
case ARM::tADDi3:
case ARM::tADDi8:
- case ARM::tADDrSPi:
case ARM::tADDrr:
case ARM::tAND:
case ARM::tASRri:
case ARM::tASRrr:
case ARM::tBIC:
- case ARM::tCMNz:
- case ARM::tCMPi8:
- case ARM::tCMPr:
case ARM::tEOR:
- case ARM::tLDRBi:
- case ARM::tLDRBr:
- case ARM::tLDRHi:
- case ARM::tLDRHr:
- case ARM::tLDRSB:
- case ARM::tLDRSH:
- case ARM::tLDRi:
- case ARM::tLDRr:
- case ARM::tLDRspi:
case ARM::tLSLri:
case ARM::tLSLrr:
case ARM::tLSRri:
@@ -56,6 +46,24 @@ inline bool isV8EligibleForIT(InstrType *Instr, int BLXOperandIndex = 0) {
case ARM::tROR:
case ARM::tRSB:
case ARM::tSBC:
+ case ARM::tSUBi3:
+ case ARM::tSUBi8:
+ case ARM::tSUBrr:
+ // Outside of an IT block, these set CPSR.
+ return IsCPSRDead(Instr);
+ case ARM::tADDrSPi:
+ case ARM::tCMNz:
+ case ARM::tCMPi8:
+ case ARM::tCMPr:
+ case ARM::tLDRBi:
+ case ARM::tLDRBr:
+ case ARM::tLDRHi:
+ case ARM::tLDRHr:
+ case ARM::tLDRSB:
+ case ARM::tLDRSH:
+ case ARM::tLDRi:
+ case ARM::tLDRr:
+ case ARM::tLDRspi:
case ARM::tSTRBi:
case ARM::tSTRBr:
case ARM::tSTRHi:
@@ -63,21 +71,17 @@ inline bool isV8EligibleForIT(InstrType *Instr, int BLXOperandIndex = 0) {
case ARM::tSTRi:
case ARM::tSTRr:
case ARM::tSTRspi:
- case ARM::tSUBi3:
- case ARM::tSUBi8:
- case ARM::tSUBrr:
case ARM::tTST:
return true;
// there are some "conditionally deprecated" opcodes
case ARM::tADDspr:
+ case ARM::tBLXr:
return Instr->getOperand(2).getReg() != ARM::PC;
// ADD PC, SP and BLX PC were always unpredictable,
// now on top of it they're deprecated
case ARM::tADDrSP:
case ARM::tBX:
return Instr->getOperand(0).getReg() != ARM::PC;
- case ARM::tBLXr:
- return Instr->getOperand(BLXOperandIndex).getReg() != ARM::PC;
case ARM::tADDhirr:
return Instr->getOperand(0).getReg() != ARM::PC &&
Instr->getOperand(2).getReg() != ARM::PC;
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index d32bdbc..a67b360 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -14,15 +14,18 @@
#include "ARMFrameLowering.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
+#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Function.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
@@ -36,6 +39,10 @@ static MachineBasicBlock::iterator
skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI,
unsigned NumAlignedDPRCS2Regs);
+ARMFrameLowering::ARMFrameLowering(const ARMSubtarget &sti)
+ : TargetFrameLowering(StackGrowsDown, sti.getStackAlignment(), 0, 4),
+ STI(sti) {}
+
/// hasFP - Return true if the specified function should have a dedicated frame
/// pointer register. This is true if the function has variable sized allocas
/// or if frame pointer elimination is disabled.
@@ -84,7 +91,7 @@ ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
static bool isCSRestore(MachineInstr *MI,
const ARMBaseInstrInfo &TII,
- const uint16_t *CSRegs) {
+ const MCPhysReg *CSRegs) {
// Integer spill area is handled with "pop".
if (isPopOpcode(MI->getOpcode())) {
// The first two operands are predicates. The last two are
@@ -129,24 +136,47 @@ static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB,
MIFlags, Pred, PredReg);
}
+static int sizeOfSPAdjustment(const MachineInstr *MI) {
+ assert(MI->getOpcode() == ARM::VSTMDDB_UPD);
+ int count = 0;
+ // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
+ // pred) so the list starts at 4.
+ for (int i = MI->getNumOperands() - 1; i >= 4; --i)
+ count += 8;
+ return count;
+}
+
+static bool WindowsRequiresStackProbe(const MachineFunction &MF,
+ size_t StackSizeInBytes) {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ if (MFI->getStackProtectorIndex() > 0)
+ return StackSizeInBytes >= 4080;
+ return StackSizeInBytes >= 4096;
+}
+
void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ MachineModuleInfo &MMI = MF.getMMI();
+ MCContext &Context = MMI.getContext();
+ const TargetMachine &TM = MF.getTarget();
+ const MCRegisterInfo *MRI = Context.getRegisterInfo();
const ARMBaseRegisterInfo *RegInfo =
- static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo());
const ARMBaseInstrInfo &TII =
- *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
+ *static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
assert(!AFI->isThumb1OnlyFunction() &&
"This emitPrologue does not support Thumb1!");
bool isARM = !AFI->isThumbFunction();
- unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
+ unsigned Align = TM.getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
unsigned NumBytes = MFI->getStackSize();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ int CFAOffset = 0;
// Determine the sizes of each callee-save spill areas and record which frame
// belongs to which callee-save spill areas.
@@ -159,22 +189,46 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
- // Allocate the vararg register save area. This is not counted in NumBytes.
- if (ArgRegsSaveSize)
+ // Allocate the vararg register save area.
+ if (ArgRegsSaveSize) {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -ArgRegsSaveSize,
MachineInstr::FrameSetup);
+ CFAOffset -= ArgRegsSaveSize;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
+ if (!AFI->hasStackFrame() &&
+ (!STI.isTargetWindows() || !WindowsRequiresStackProbe(MF, NumBytes))) {
+ if (NumBytes - ArgRegsSaveSize != 0) {
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, -(NumBytes - ArgRegsSaveSize),
MachineInstr::FrameSetup);
+ CFAOffset -= NumBytes - ArgRegsSaveSize;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
return;
}
+ // Determine spill area sizes.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
int FI = CSI[i].getFrameIdx();
switch (Reg) {
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ case ARM::R12:
+ if (STI.isTargetDarwin()) {
+ GPRCS2Size += 4;
+ break;
+ }
+ // fallthrough
case ARM::R0:
case ARM::R1:
case ARM::R2:
@@ -188,18 +242,6 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
FramePtrSpillFI = FI;
GPRCS1Size += 4;
break;
- case ARM::R8:
- case ARM::R9:
- case ARM::R10:
- case ARM::R11:
- case ARM::R12:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- if (STI.isTargetIOS())
- GPRCS2Size += 4;
- else
- GPRCS1Size += 4;
- break;
default:
// This is a DPR. Exclude the aligned DPRCS2 spills.
if (Reg == ARM::D8)
@@ -210,18 +252,21 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
}
// Move past area 1.
- MachineBasicBlock::iterator LastPush = MBB.end(), FramePtrPush;
+ MachineBasicBlock::iterator LastPush = MBB.end(), GPRCS1Push, GPRCS2Push,
+ DPRCSPush;
if (GPRCS1Size > 0)
- FramePtrPush = LastPush = MBBI++;
+ GPRCS1Push = LastPush = MBBI++;
// Determine starting offsets of spill areas.
bool HasFP = hasFP(MF);
- unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
+ unsigned DPRCSOffset = NumBytes - (ArgRegsSaveSize + GPRCS1Size
+ + GPRCS2Size + DPRCSSize);
unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
int FramePtrOffsetInPush = 0;
if (HasFP) {
- FramePtrOffsetInPush = MFI->getObjectOffset(FramePtrSpillFI) + GPRCS1Size;
+ FramePtrOffsetInPush = MFI->getObjectOffset(FramePtrSpillFI)
+ + GPRCS1Size + ArgRegsSaveSize;
AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
NumBytes);
}
@@ -230,13 +275,12 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
// Move past area 2.
- if (GPRCS2Size > 0) {
- LastPush = MBBI++;
- }
+ if (GPRCS2Size > 0)
+ GPRCS2Push = LastPush = MBBI++;
// Move past area 3.
if (DPRCSSize > 0) {
- LastPush = MBBI++;
+ DPRCSPush = MBBI;
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
while (MBBI->getOpcode() == ARM::VSTMDDB_UPD)
@@ -254,11 +298,60 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
} else
NumBytes = DPRCSOffset;
+ if (STI.isTargetWindows() && WindowsRequiresStackProbe(MF, NumBytes)) {
+ uint32_t NumWords = NumBytes >> 2;
+
+ if (NumWords < 65536)
+ AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), ARM::R4)
+ .addImm(NumWords)
+ .setMIFlags(MachineInstr::FrameSetup));
+ else
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), ARM::R4)
+ .addImm(NumWords)
+ .setMIFlags(MachineInstr::FrameSetup);
+
+ switch (TM.getCodeModel()) {
+ case CodeModel::Small:
+ case CodeModel::Medium:
+ case CodeModel::Default:
+ case CodeModel::Kernel:
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tBL))
+ .addImm((unsigned)ARMCC::AL).addReg(0)
+ .addExternalSymbol("__chkstk")
+ .addReg(ARM::R4, RegState::Implicit)
+ .setMIFlags(MachineInstr::FrameSetup);
+ break;
+ case CodeModel::Large:
+ case CodeModel::JITDefault:
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), ARM::R12)
+ .addExternalSymbol("__chkstk")
+ .setMIFlags(MachineInstr::FrameSetup);
+
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tBLXr))
+ .addImm((unsigned)ARMCC::AL).addReg(0)
+ .addReg(ARM::R12, RegState::Kill)
+ .addReg(ARM::R4, RegState::Implicit)
+ .setMIFlags(MachineInstr::FrameSetup);
+ break;
+ }
+
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr),
+ ARM::SP)
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::R4, RegState::Kill)
+ .setMIFlags(MachineInstr::FrameSetup)));
+ NumBytes = 0;
+ }
+
+ unsigned adjustedGPRCS1Size = GPRCS1Size;
if (NumBytes) {
// Adjust SP after all the callee-save spills.
- if (tryFoldSPUpdateIntoPushPop(MF, LastPush, NumBytes)) {
- if (LastPush == FramePtrPush)
+ if (tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, NumBytes)) {
+ if (LastPush == GPRCS1Push) {
FramePtrOffsetInPush += NumBytes;
+ adjustedGPRCS1Size += NumBytes;
+ NumBytes = 0;
+ }
} else
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
MachineInstr::FrameSetup);
@@ -275,17 +368,138 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setShouldRestoreSPFromFP(true);
}
+ if (adjustedGPRCS1Size > 0) {
+ CFAOffset -= adjustedGPRCS1Size;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ MachineBasicBlock::iterator Pos = ++GPRCS1Push;
+ BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ for (const auto &Entry : CSI) {
+ unsigned Reg = Entry.getReg();
+ int FI = Entry.getFrameIdx();
+ switch (Reg) {
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ case ARM::R12:
+ if (STI.isTargetDarwin())
+ break;
+ // fallthrough
+ case ARM::R0:
+ case ARM::R1:
+ case ARM::R2:
+ case ARM::R3:
+ case ARM::R4:
+ case ARM::R5:
+ case ARM::R6:
+ case ARM::R7:
+ case ARM::LR:
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(Reg, true), MFI->getObjectOffset(FI)));
+ BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ break;
+ }
+ }
+ }
+
// Set FP to point to the stack slot that contains the previous FP.
// For iOS, FP is R7, which has now been stored in spill area 1.
// Otherwise, if this is not iOS, all the callee-saved registers go
// into spill area 1, including the FP in R11. In either case, it
// is in area one and the adjustment needs to take place just after
// that push.
- if (HasFP)
- emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, ++FramePtrPush, dl, TII,
+ if (HasFP) {
+ emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, GPRCS1Push, dl, TII,
FramePtr, ARM::SP, FramePtrOffsetInPush,
MachineInstr::FrameSetup);
+ if (FramePtrOffsetInPush) {
+ CFAOffset += FramePtrOffsetInPush;
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfa(
+ nullptr, MRI->getDwarfRegNum(FramePtr, true), CFAOffset));
+ BuildMI(MBB, GPRCS1Push, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ } else {
+ unsigned CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
+ nullptr, MRI->getDwarfRegNum(FramePtr, true)));
+ BuildMI(MBB, GPRCS1Push, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
+
+ if (GPRCS2Size > 0) {
+ MachineBasicBlock::iterator Pos = ++GPRCS2Push;
+ if (!HasFP) {
+ CFAOffset -= GPRCS2Size;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ for (const auto &Entry : CSI) {
+ unsigned Reg = Entry.getReg();
+ int FI = Entry.getFrameIdx();
+ switch (Reg) {
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ case ARM::R12:
+ if (STI.isTargetDarwin()) {
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ unsigned Offset = MFI->getObjectOffset(FI);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
+ BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ break;
+ }
+ }
+ }
+
+ if (DPRCSSize > 0) {
+ // Since vpush register list cannot have gaps, there may be multiple vpush
+ // instructions in the prologue.
+ do {
+ MachineBasicBlock::iterator Push = DPRCSPush++;
+ if (!HasFP) {
+ CFAOffset -= sizeOfSPAdjustment(Push);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, DPRCSPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ } while (DPRCSPush->getOpcode() == ARM::VSTMDDB_UPD);
+
+ for (const auto &Entry : CSI) {
+ unsigned Reg = Entry.getReg();
+ int FI = Entry.getFrameIdx();
+ if ((Reg >= ARM::D0 && Reg <= ARM::D31) &&
+ (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs())) {
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ unsigned Offset = MFI->getObjectOffset(FI);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
+ BuildMI(MBB, DPRCSPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
+ }
+
+ if (NumBytes) {
+ if (!HasFP) {
+ CFAOffset -= NumBytes;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
if (STI.isTargetELF() && hasFP(MF))
MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
@@ -378,11 +592,11 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
return;
if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
+ if (NumBytes - ArgRegsSaveSize != 0)
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes - ArgRegsSaveSize);
} else {
// Unwind MBBI to point to first LDR / VLDRD.
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
if (MBBI != MBB.begin()) {
do {
--MBBI;
@@ -392,7 +606,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
}
// Move SP to start of FP callee save spill area.
- NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
+ NumBytes -= (ArgRegsSaveSize +
+ AFI->getGPRCalleeSavedArea1Size() +
AFI->getGPRCalleeSavedArea2Size() +
AFI->getDPRCalleeSavedAreaSize());
@@ -430,7 +645,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
ARM::SP)
.addReg(FramePtr));
}
- } else if (NumBytes && !tryFoldSPUpdateIntoPushPop(MF, MBBI, NumBytes))
+ } else if (NumBytes &&
+ !tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
// Increment past our save areas.
@@ -453,7 +669,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
// Jump to label or value in register.
if (RetOpcode == ARM::TCRETURNdi) {
unsigned TCOpcode = STI.isThumb() ?
- (STI.isTargetIOS() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
+ (STI.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
ARM::TAILJMPd;
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
if (JumpTarget.isGlobal())
@@ -473,7 +689,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
addReg(JumpTarget.getReg(), RegState::Kill);
}
- MachineInstr *NewMI = prior(MBBI);
+ MachineInstr *NewMI = std::prev(MBBI);
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
NewMI->addOperand(MBBI->getOperand(i));
@@ -598,7 +814,7 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
unsigned LastReg = 0;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (!(Func)(Reg, STI.isTargetIOS())) continue;
+ if (!(Func)(Reg, STI.isTargetDarwin())) continue;
// D-registers in the aligned area DPRCS2 are NOT spilled here.
if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
@@ -644,6 +860,11 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
AddDefaultPred(MIB);
}
Regs.clear();
+
+ // Put any subsequent vpush instructions before this one: they will refer to
+ // higher register numbers so need to be pushed first in order to preserve
+ // monotonicity.
+ --MI;
}
}
@@ -671,7 +892,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
bool DeleteRet = false;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (!(Func)(Reg, STI.isTargetIOS())) continue;
+ if (!(Func)(Reg, STI.isTargetDarwin())) continue;
// The aligned reloads from area DPRCS2 are not inserted here.
if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
@@ -727,6 +948,10 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
AddDefaultPred(MIB);
}
Regs.clear();
+
+ // Put any subsequent vpop instructions after this one: they will refer to
+ // higher register numbers so need to be popped afterwards.
+ ++MI;
}
}
@@ -858,7 +1083,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
}
// The last spill instruction inserted should kill the scratch register r4.
- llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI);
+ std::prev(MI)->addRegisterKilled(ARM::R4, TRI);
}
/// Skip past the code inserted by emitAlignedDPRCS2Spills, and return an
@@ -968,7 +1193,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
.addReg(ARM::R4).addImm(2*(NextReg-R4BaseReg)));
// Last store kills r4.
- llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI);
+ std::prev(MI)->addRegisterKilled(ARM::R4, TRI);
}
bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
@@ -1036,12 +1261,9 @@ bool ARMFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
const ARMBaseInstrInfo &TII) {
unsigned FnSize = 0;
- for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- const MachineBasicBlock &MBB = *MBBI;
- for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
- I != E; ++I)
- FnSize += TII.GetInstSizeInBytes(I);
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB)
+ FnSize += TII.GetInstSizeInBytes(&MI);
}
return FnSize;
}
@@ -1054,21 +1276,21 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
const TargetFrameLowering *TFI) {
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
unsigned Limit = (1 << 12) - 1;
- for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
- for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- if (!I->getOperand(i).isFI()) continue;
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ if (!MI.getOperand(i).isFI())
+ continue;
// When using ADDri to get the address of a stack object, 255 is the
// largest offset guaranteed to fit in the immediate offset.
- if (I->getOpcode() == ARM::ADDri) {
+ if (MI.getOpcode() == ARM::ADDri) {
Limit = std::min(Limit, (1U << 8) - 1);
break;
}
// Otherwise check the addressing mode.
- switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
+ switch (MI.getDesc().TSFlags & ARMII::AddrModeMask) {
case ARMII::AddrMode3:
case ARMII::AddrModeT2_i8:
Limit = std::min(Limit, (1U << 8) - 1);
@@ -1205,7 +1427,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Don't spill FP if the frame can be eliminated. This is determined
// by scanning the callee-save registers to see if any is used.
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
bool Spilled = false;
@@ -1220,7 +1442,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
if (Spilled) {
NumGPRSpills++;
- if (!STI.isTargetIOS()) {
+ if (!STI.isTargetDarwin()) {
if (Reg == ARM::LR)
LRSpilled = true;
CS1Spilled = true;
@@ -1242,7 +1464,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
break;
}
} else {
- if (!STI.isTargetIOS()) {
+ if (!STI.isTargetDarwin()) {
UnspilledCS1GPRs.push_back(Reg);
continue;
}
@@ -1317,6 +1539,10 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
if (hasFP(MF)) {
MRI.setPhysRegUsed(FramePtr);
+ auto FPPos = std::find(UnspilledCS1GPRs.begin(), UnspilledCS1GPRs.end(),
+ FramePtr);
+ if (FPPos != UnspilledCS1GPRs.end())
+ UnspilledCS1GPRs.erase(FPPos);
NumGPRSpills++;
}
@@ -1444,3 +1670,370 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MBB.erase(I);
}
+/// Get the minimum constant for ARM that is greater than or equal to the
+/// argument. In ARM, constants can have any value that can be produced by
+/// rotating an 8-bit value to the right by an even number of bits within a
+/// 32-bit word.
+static uint32_t alignToARMConstant(uint32_t Value) {
+ unsigned Shifted = 0;
+
+ if (Value == 0)
+ return 0;
+
+ while (!(Value & 0xC0000000)) {
+ Value = Value << 2;
+ Shifted += 2;
+ }
+
+ bool Carry = (Value & 0x00FFFFFF);
+ Value = ((Value & 0xFF000000) >> 24) + Carry;
+
+ if (Value & 0x0000100)
+ Value = Value & 0x000001FC;
+
+ if (Shifted > 24)
+ Value = Value >> (Shifted - 24);
+ else
+ Value = Value << (24 - Shifted);
+
+ return Value;
+}
+
+// The stack limit in the TCB is set to this many bytes above the actual
+// stack limit.
+static const uint64_t kSplitStackAvailable = 256;
+
+// Adjust the function prologue to enable split stacks. This currently only
+// supports android and linux.
+//
+// The ABI of the segmented stack prologue is a little arbitrarily chosen, but
+// must be well defined in order to allow for consistent implementations of the
+// __morestack helper function. The ABI is also not a normal ABI in that it
+// doesn't follow the normal calling conventions because this allows the
+// prologue of each function to be optimized further.
+//
+// Currently, the ABI looks like (when calling __morestack)
+//
+// * r4 holds the minimum stack size requested for this function call
+// * r5 holds the stack size of the arguments to the function
+// * the beginning of the function is 3 instructions after the call to
+// __morestack
+//
+// Implementations of __morestack should use r4 to allocate a new stack, r5 to
+// place the arguments on to the new stack, and the 3-instruction knowledge to
+// jump directly to the body of the function when working on the new stack.
+//
+// An old (and possibly no longer compatible) implementation of __morestack for
+// ARM can be found at [1].
+//
+// [1] - https://github.com/mozilla/rust/blob/86efd9/src/rt/arch/arm/morestack.S
+void ARMFrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
+ unsigned Opcode;
+ unsigned CFIIndex;
+ const ARMSubtarget *ST = &MF.getTarget().getSubtarget<ARMSubtarget>();
+ bool Thumb = ST->isThumb();
+
+ // Sadly, this currently doesn't support varargs, platforms other than
+ // android/linux. Note that thumb1/thumb2 are support for android/linux.
+ if (MF.getFunction()->isVarArg())
+ report_fatal_error("Segmented stacks do not support vararg functions.");
+ if (!ST->isTargetAndroid() && !ST->isTargetLinux())
+ report_fatal_error("Segmented stacks not supported on this platform.");
+
+ MachineBasicBlock &prologueMBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ MCContext &Context = MMI.getContext();
+ const MCRegisterInfo *MRI = Context.getRegisterInfo();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
+ ARMFunctionInfo *ARMFI = MF.getInfo<ARMFunctionInfo>();
+ DebugLoc DL;
+
+ uint64_t StackSize = MFI->getStackSize();
+
+ // Do not generate a prologue for functions with a stack of size zero
+ if (StackSize == 0)
+ return;
+
+ // Use R4 and R5 as scratch registers.
+ // We save R4 and R5 before use and restore them before leaving the function.
+ unsigned ScratchReg0 = ARM::R4;
+ unsigned ScratchReg1 = ARM::R5;
+ uint64_t AlignedStackSize;
+
+ MachineBasicBlock *PrevStackMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *PostStackMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *AllocMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *GetMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *McrMBB = MF.CreateMachineBasicBlock();
+
+ for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
+ e = prologueMBB.livein_end();
+ i != e; ++i) {
+ AllocMBB->addLiveIn(*i);
+ GetMBB->addLiveIn(*i);
+ McrMBB->addLiveIn(*i);
+ PrevStackMBB->addLiveIn(*i);
+ PostStackMBB->addLiveIn(*i);
+ }
+
+ MF.push_front(PostStackMBB);
+ MF.push_front(AllocMBB);
+ MF.push_front(GetMBB);
+ MF.push_front(McrMBB);
+ MF.push_front(PrevStackMBB);
+
+ // The required stack size that is aligned to ARM constant criterion.
+ AlignedStackSize = alignToARMConstant(StackSize);
+
+ // When the frame size is less than 256 we just compare the stack
+ // boundary directly to the value of the stack pointer, per gcc.
+ bool CompareStackPointer = AlignedStackSize < kSplitStackAvailable;
+
+ // We will use two of the callee save registers as scratch registers so we
+ // need to save those registers onto the stack.
+ // We will use SR0 to hold stack limit and SR1 to hold the stack size
+ // requested and arguments for __morestack().
+ // SR0: Scratch Register #0
+ // SR1: Scratch Register #1
+ // push {SR0, SR1}
+ if (Thumb) {
+ AddDefaultPred(BuildMI(PrevStackMBB, DL, TII.get(ARM::tPUSH)))
+ .addReg(ScratchReg0).addReg(ScratchReg1);
+ } else {
+ AddDefaultPred(BuildMI(PrevStackMBB, DL, TII.get(ARM::STMDB_UPD))
+ .addReg(ARM::SP, RegState::Define).addReg(ARM::SP))
+ .addReg(ScratchReg0).addReg(ScratchReg1);
+ }
+
+ // Emit the relevant DWARF information about the change in stack pointer as
+ // well as where to find both r4 and r5 (the callee-save registers)
+ CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -8));
+ BuildMI(PrevStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(ScratchReg1, true), -4));
+ BuildMI(PrevStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(ScratchReg0, true), -8));
+ BuildMI(PrevStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // mov SR1, sp
+ if (Thumb) {
+ AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::tMOVr), ScratchReg1)
+ .addReg(ARM::SP));
+ } else if (CompareStackPointer) {
+ AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::MOVr), ScratchReg1)
+ .addReg(ARM::SP)).addReg(0);
+ }
+
+ // sub SR1, sp, #StackSize
+ if (!CompareStackPointer && Thumb) {
+ AddDefaultPred(
+ AddDefaultCC(BuildMI(McrMBB, DL, TII.get(ARM::tSUBi8), ScratchReg1))
+ .addReg(ScratchReg1).addImm(AlignedStackSize));
+ } else if (!CompareStackPointer) {
+ AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::SUBri), ScratchReg1)
+ .addReg(ARM::SP).addImm(AlignedStackSize)).addReg(0);
+ }
+
+ if (Thumb && ST->isThumb1Only()) {
+ unsigned PCLabelId = ARMFI->createPICLabelUId();
+ ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create(
+ MF.getFunction()->getContext(), "__STACK_LIMIT", PCLabelId, 0);
+ MachineConstantPool *MCP = MF.getConstantPool();
+ unsigned CPI = MCP->getConstantPoolIndex(NewCPV, MF.getAlignment());
+
+ // ldr SR0, [pc, offset(STACK_LIMIT)]
+ AddDefaultPred(BuildMI(GetMBB, DL, TII.get(ARM::tLDRpci), ScratchReg0)
+ .addConstantPoolIndex(CPI));
+
+ // ldr SR0, [SR0]
+ AddDefaultPred(BuildMI(GetMBB, DL, TII.get(ARM::tLDRi), ScratchReg0)
+ .addReg(ScratchReg0).addImm(0));
+ } else {
+ // Get TLS base address from the coprocessor
+ // mrc p15, #0, SR0, c13, c0, #3
+ AddDefaultPred(BuildMI(McrMBB, DL, TII.get(ARM::MRC), ScratchReg0)
+ .addImm(15)
+ .addImm(0)
+ .addImm(13)
+ .addImm(0)
+ .addImm(3));
+
+ // Use the last tls slot on android and a private field of the TCP on linux.
+ assert(ST->isTargetAndroid() || ST->isTargetLinux());
+ unsigned TlsOffset = ST->isTargetAndroid() ? 63 : 1;
+
+ // Get the stack limit from the right offset
+ // ldr SR0, [sr0, #4 * TlsOffset]
+ AddDefaultPred(BuildMI(GetMBB, DL, TII.get(ARM::LDRi12), ScratchReg0)
+ .addReg(ScratchReg0).addImm(4 * TlsOffset));
+ }
+
+ // Compare stack limit with stack size requested.
+ // cmp SR0, SR1
+ Opcode = Thumb ? ARM::tCMPr : ARM::CMPrr;
+ AddDefaultPred(BuildMI(GetMBB, DL, TII.get(Opcode))
+ .addReg(ScratchReg0)
+ .addReg(ScratchReg1));
+
+ // This jump is taken if StackLimit < SP - stack required.
+ Opcode = Thumb ? ARM::tBcc : ARM::Bcc;
+ BuildMI(GetMBB, DL, TII.get(Opcode)).addMBB(PostStackMBB)
+ .addImm(ARMCC::LO)
+ .addReg(ARM::CPSR);
+
+
+ // Calling __morestack(StackSize, Size of stack arguments).
+ // __morestack knows that the stack size requested is in SR0(r4)
+ // and amount size of stack arguments is in SR1(r5).
+
+ // Pass first argument for the __morestack by Scratch Register #0.
+ // The amount size of stack required
+ if (Thumb) {
+ AddDefaultPred(AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8),
+ ScratchReg0)).addImm(AlignedStackSize));
+ } else {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg0)
+ .addImm(AlignedStackSize)).addReg(0);
+ }
+ // Pass second argument for the __morestack by Scratch Register #1.
+ // The amount size of stack consumed to save function arguments.
+ if (Thumb) {
+ AddDefaultPred(
+ AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg1))
+ .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())));
+ } else {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg1)
+ .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())))
+ .addReg(0);
+ }
+
+ // push {lr} - Save return address of this function.
+ if (Thumb) {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tPUSH)))
+ .addReg(ARM::LR);
+ } else {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::STMDB_UPD))
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP))
+ .addReg(ARM::LR);
+ }
+
+ // Emit the DWARF info about the change in stack as well as where to find the
+ // previous link register
+ CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -12));
+ BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(ARM::LR, true), -12));
+ BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Call __morestack().
+ if (Thumb) {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tBL)))
+ .addExternalSymbol("__morestack");
+ } else {
+ BuildMI(AllocMBB, DL, TII.get(ARM::BL))
+ .addExternalSymbol("__morestack");
+ }
+
+ // pop {lr} - Restore return address of this original function.
+ if (Thumb) {
+ if (ST->isThumb1Only()) {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tPOP)))
+ .addReg(ScratchReg0);
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVr), ARM::LR)
+ .addReg(ScratchReg0));
+ } else {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::t2LDR_POST))
+ .addReg(ARM::LR, RegState::Define)
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP)
+ .addImm(4));
+ }
+ } else {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD))
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP))
+ .addReg(ARM::LR);
+ }
+
+ // Restore SR0 and SR1 in case of __morestack() was called.
+ // __morestack() will skip PostStackMBB block so we need to restore
+ // scratch registers from here.
+ // pop {SR0, SR1}
+ if (Thumb) {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::tPOP)))
+ .addReg(ScratchReg0)
+ .addReg(ScratchReg1);
+ } else {
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD))
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP))
+ .addReg(ScratchReg0)
+ .addReg(ScratchReg1);
+ }
+
+ // Update the CFA offset now that we've popped
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0));
+ BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // bx lr - Return from this function.
+ Opcode = Thumb ? ARM::tBX_RET : ARM::BX_RET;
+ AddDefaultPred(BuildMI(AllocMBB, DL, TII.get(Opcode)));
+
+ // Restore SR0 and SR1 in case of __morestack() was not called.
+ // pop {SR0, SR1}
+ if (Thumb) {
+ AddDefaultPred(BuildMI(PostStackMBB, DL, TII.get(ARM::tPOP)))
+ .addReg(ScratchReg0)
+ .addReg(ScratchReg1);
+ } else {
+ AddDefaultPred(BuildMI(PostStackMBB, DL, TII.get(ARM::LDMIA_UPD))
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP))
+ .addReg(ScratchReg0)
+ .addReg(ScratchReg1);
+ }
+
+ // Update the CFA offset now that we've popped
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0));
+ BuildMI(PostStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Tell debuggers that r4 and r5 are now the same as they were in the
+ // previous function, that they're the "Same Value".
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createSameValue(
+ nullptr, MRI->getDwarfRegNum(ScratchReg0, true)));
+ BuildMI(PostStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createSameValue(
+ nullptr, MRI->getDwarfRegNum(ScratchReg1, true)));
+ BuildMI(PostStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Organizing MBB lists
+ PostStackMBB->addSuccessor(&prologueMBB);
+
+ AllocMBB->addSuccessor(PostStackMBB);
+
+ GetMBB->addSuccessor(PostStackMBB);
+ GetMBB->addSuccessor(AllocMBB);
+
+ McrMBB->addSuccessor(GetMBB);
+
+ PrevStackMBB->addSuccessor(McrMBB);
+
+#ifdef XDEBUG
+ MF.verify();
+#endif
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
index d95a2cb..709afbc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
@@ -14,8 +14,6 @@
#ifndef ARM_FRAMEINFO_H
#define ARM_FRAMEINFO_H
-#include "ARM.h"
-#include "ARMSubtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -26,38 +24,36 @@ protected:
const ARMSubtarget &STI;
public:
- explicit ARMFrameLowering(const ARMSubtarget &sti)
- : TargetFrameLowering(StackGrowsDown, sti.getStackAlignment(), 0, 8),
- STI(sti) {
- }
+ explicit ARMFrameLowering(const ARMSubtarget &sti);
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
- bool hasFP(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
- bool canSimplifyCallFramePseudos(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const;
- int ResolveFrameIndexReference(const MachineFunction &MF,
- int FI,
+ unsigned &FrameReg) const override;
+ int ResolveFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg, int SPAdj) const;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
+ RegScavenger *RS) const override;
+
+ void adjustForSegmentedStacks(MachineFunction &MF) const override;
private:
void emitPushInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
@@ -71,10 +67,10 @@ public:
bool(*Func)(unsigned, bool),
unsigned NumAlignedDPRCS2Regs) const;
- virtual void eliminateCallFramePseudoInstr(
- MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
+ void
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
index c69d313..0885c4e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -57,7 +57,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
MachineBasicBlock::iterator I = LastMI;
if (I != LastMI->getParent()->begin()) {
- I = llvm::prior(I);
+ I = std::prev(I);
DefMI = &*I;
}
}
@@ -77,7 +77,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
}
void ARMHazardRecognizer::Reset() {
- LastMI = 0;
+ LastMI = nullptr;
FpMLxStalls = 0;
ScoreboardHazardRecognizer::Reset();
}
@@ -95,7 +95,7 @@ void ARMHazardRecognizer::EmitInstruction(SUnit *SU) {
void ARMHazardRecognizer::AdvanceCycle() {
if (FpMLxStalls && --FpMLxStalls == 0)
// Stalled for 4 cycles but still can't schedule any other instructions.
- LastMI = 0;
+ LastMI = nullptr;
ScoreboardHazardRecognizer::AdvanceCycle();
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
index e1dcec3..a8198e2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
@@ -35,13 +35,13 @@ public:
ARMHazardRecognizer(const InstrItineraryData *ItinData,
const ScheduleDAG *DAG)
: ScoreboardHazardRecognizer(ItinData, DAG, "post-RA-sched"),
- LastMI(0) {}
+ LastMI(nullptr) {}
- virtual HazardType getHazardType(SUnit *SU, int Stalls);
- virtual void Reset();
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
- virtual void RecedeCycle();
+ HazardType getHazardType(SUnit *SU, int Stalls) override;
+ void Reset() override;
+ void EmitInstruction(SUnit *SU) override;
+ void AdvanceCycle() override;
+ void RecedeCycle() override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 6d9b188..38547cf 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-isel"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMTargetMachine.h"
@@ -32,12 +31,13 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
+#define DEBUG_TYPE "arm-isel"
+
static cl::opt<bool>
DisableShifterOp("disable-shifter-op", cl::Hidden,
cl::desc("Disable isel of shifter-op"),
@@ -60,24 +60,26 @@ enum AddrMode2Type {
};
class ARMDAGToDAGISel : public SelectionDAGISel {
- ARMBaseTargetMachine &TM;
-
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
/// make the right decision when generating code for different targets.
const ARMSubtarget *Subtarget;
public:
- explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
- CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel), TM(tm),
- Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
+ explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(tm, OptLevel) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ // Reset the subtarget each time through.
+ Subtarget = &MF.getTarget().getSubtarget<ARMSubtarget>();
+ SelectionDAGISel::runOnMachineFunction(MF);
+ return true;
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM Instruction Selection";
}
- virtual void PreprocessISelDAG();
+ void PreprocessISelDAG() override;
/// getI32Imm - Return a target constant of type i32 with the specified
/// value.
@@ -85,7 +87,7 @@ public:
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
bool hasNoVMLxHazardUse(SDNode *N) const;
@@ -253,13 +255,10 @@ private:
SDNode *SelectConcatVector(SDNode *N);
- SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64);
-
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
// Form pairs of consecutive R, S, D, or Q registers.
SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
@@ -401,7 +400,7 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
N1, CurDAG->getConstant(TZ, MVT::i32));
CurDAG->UpdateNodeOperands(N, N0, N1);
- }
+ }
}
/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
@@ -414,8 +413,8 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (!CheckVMLxHazard)
return true;
- if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9() &&
- !Subtarget->isSwift())
+ if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
+ !Subtarget->isCortexA9() && !Subtarget->isSwift())
return true;
if (!N->hasOneUse())
@@ -425,8 +424,8 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (Use->getOpcode() == ISD::CopyToReg)
return true;
if (Use->isMachineOpcode()) {
- const ARMBaseInstrInfo *TII =
- static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
+ const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
+ CurDAG->getTarget().getInstrInfo());
const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
if (MCID.mayStore())
@@ -534,8 +533,7 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
}
if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
+ N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
Base = N.getOperand(0);
} else
Base = N;
@@ -702,8 +700,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
Base = CurDAG->getTargetFrameIndex(FI,
getTargetLowering()->getPointerTy());
} else if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
+ N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
Base = N.getOperand(0);
}
Offset = CurDAG->getRegister(0, MVT::i32);
@@ -963,8 +960,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
Base = CurDAG->getTargetFrameIndex(FI,
getTargetLowering()->getPointerTy());
} else if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
+ N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
Base = N.getOperand(0);
}
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
@@ -1141,8 +1137,7 @@ ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
if (!CurDAG->isBaseWithConstantOffset(N)) {
if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
+ N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
Base = N.getOperand(0);
} else {
Base = N;
@@ -1278,8 +1273,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
}
if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
+ N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::TargetConstantPool)
return false; // We want to select t2LDRpci instead.
@@ -1412,7 +1406,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
SDValue &OffImm) {
- // This *must* succeed since it's used for the irreplacable ldrex and strex
+ // This *must* succeed since it's used for the irreplaceable ldrex and strex
// instructions.
Base = N;
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
@@ -1449,7 +1443,7 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::MemIndexedMode AM = LD->getAddressingMode();
if (AM == ISD::UNINDEXED)
- return NULL;
+ return nullptr;
EVT LoadedVT = LD->getMemoryVT();
SDValue Offset, AMOpc;
@@ -1515,14 +1509,14 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
}
}
- return NULL;
+ return nullptr;
}
SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::MemIndexedMode AM = LD->getAddressingMode();
if (AM == ISD::UNINDEXED)
- return NULL;
+ return nullptr;
EVT LoadedVT = LD->getMemoryVT();
bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
@@ -1549,7 +1543,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
break;
default:
- return NULL;
+ return nullptr;
}
Match = true;
}
@@ -1563,7 +1557,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
MVT::Other, Ops);
}
- return NULL;
+ return nullptr;
}
/// \brief Form a GPRPair pseudo register from a pair of GPR regs.
@@ -1708,10 +1702,10 @@ static bool isVSTfixed(unsigned Opc)
case ARM::VST1d16wb_fixed : return true;
case ARM::VST1d32wb_fixed : return true;
case ARM::VST1d64wb_fixed : return true;
- case ARM::VST1q8wb_fixed : return true;
- case ARM::VST1q16wb_fixed : return true;
- case ARM::VST1q32wb_fixed : return true;
- case ARM::VST1q64wb_fixed : return true;
+ case ARM::VST1q8wb_fixed : return true;
+ case ARM::VST1q16wb_fixed : return true;
+ case ARM::VST1q32wb_fixed : return true;
+ case ARM::VST1q64wb_fixed : return true;
case ARM::VST1d64TPseudoWB_fixed : return true;
case ARM::VST1d64QPseudoWB_fixed : return true;
case ARM::VST2d8wb_fixed : return true;
@@ -1785,7 +1779,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
SDValue MemAddr, Align;
unsigned AddrOpIdx = isUpdating ? 1 : 2;
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
- return NULL;
+ return nullptr;
SDValue Chain = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -1904,7 +1898,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
- return NULL;
+ return nullptr;
}
SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
@@ -1918,7 +1912,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
unsigned AddrOpIdx = isUpdating ? 1 : 2;
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
- return NULL;
+ return nullptr;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2064,7 +2058,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
unsigned AddrOpIdx = isUpdating ? 1 : 2;
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
- return NULL;
+ return nullptr;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2169,7 +2163,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
- return NULL;
+ return nullptr;
}
SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
@@ -2180,7 +2174,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
SDValue MemAddr, Align;
if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
- return NULL;
+ return nullptr;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2252,7 +2246,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
- return NULL;
+ return nullptr;
}
SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
@@ -2291,7 +2285,7 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
bool isSigned) {
if (!Subtarget->hasV6T2Ops())
- return NULL;
+ return nullptr;
unsigned Opc = isSigned
? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
@@ -2304,7 +2298,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
// The immediate is a mask of the low bits iff imm & (imm+1) == 0
if (And_imm & (And_imm + 1))
- return NULL;
+ return nullptr;
unsigned Srl_imm = 0;
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
@@ -2324,7 +2318,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, MVT::i32),
getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
// ARM models shift instructions as MOVsi with shifter operand.
@@ -2334,17 +2328,17 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops, 5);
+ return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
}
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, MVT::i32),
CurDAG->getTargetConstant(Width, MVT::i32),
- getAL(CurDAG), Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
+ getAL(CurDAG), Reg0 };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
}
- return NULL;
+ return nullptr;
}
// Otherwise, we're looking for a shift of a shift
@@ -2358,16 +2352,16 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
unsigned Width = 32 - Srl_imm - 1;
int LSB = Srl_imm - Shl_imm;
if (LSB < 0)
- return NULL;
+ return nullptr;
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, MVT::i32),
CurDAG->getTargetConstant(Width, MVT::i32),
getAL(CurDAG), Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
}
- return NULL;
+ return nullptr;
}
/// Target-specific DAG combining for ISD::XOR.
@@ -2386,10 +2380,10 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
EVT VT = N->getValueType(0);
if (Subtarget->isThumb1Only())
- return NULL;
+ return nullptr;
if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
- return NULL;
+ return nullptr;
SDValue ADDSrc0 = XORSrc0.getOperand(0);
SDValue ADDSrc1 = XORSrc0.getOperand(1);
@@ -2400,13 +2394,13 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
unsigned Size = XType.getSizeInBits() - 1;
if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
- XType.isInteger() && SRAConstant != NULL &&
+ XType.isInteger() && SRAConstant != nullptr &&
Size == SRAConstant->getZExtValue()) {
unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
}
- return NULL;
+ return nullptr;
}
SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
@@ -2418,44 +2412,12 @@ SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
}
-SDNode *ARMDAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
- unsigned Op16,unsigned Op32,
- unsigned Op64) {
- // Mostly direct translation to the given operations, except that we preserve
- // the AtomicOrdering for use later on.
- AtomicSDNode *AN = cast<AtomicSDNode>(Node);
- EVT VT = AN->getMemoryVT();
-
- unsigned Op;
- SDVTList VTs = CurDAG->getVTList(AN->getValueType(0), MVT::Other);
- if (VT == MVT::i8)
- Op = Op8;
- else if (VT == MVT::i16)
- Op = Op16;
- else if (VT == MVT::i32)
- Op = Op32;
- else if (VT == MVT::i64) {
- Op = Op64;
- VTs = CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other);
- } else
- llvm_unreachable("Unexpected atomic operation");
-
- SmallVector<SDValue, 6> Ops;
- for (unsigned i = 1; i < AN->getNumOperands(); ++i)
- Ops.push_back(AN->getOperand(i));
-
- Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
- Ops.push_back(AN->getOperand(0)); // Chain moves to the end
-
- return CurDAG->SelectNodeTo(Node, Op, VTs, &Ops[0], Ops.size());
-}
-
SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
switch (N->getOpcode()) {
@@ -2477,19 +2439,21 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ISD::Constant: {
unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
bool UseCP = true;
- if (Subtarget->hasThumb2())
+ if (Subtarget->useMovt(*MF))
// Thumb2-aware targets have the MOVT instruction, so all immediates can
// be done with MOV + MOVT, at worst.
- UseCP = 0;
+ UseCP = false;
else {
if (Subtarget->isThumb()) {
- UseCP = (Val > 255 && // MOV
- ~Val > 255 && // MOV + MVN
- !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
+ UseCP = (Val > 255 && // MOV
+ ~Val > 255 && // MOV + MVN
+ !ARM_AM::isThumbImmShiftedVal(Val) && // MOV + LSL
+ !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
} else
- UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
- ARM_AM::getSOImmVal(~Val) == -1 && // MVN
- !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
+ UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
+ ARM_AM::getSOImmVal(~Val) == -1 && // MVN
+ !ARM_AM::isSOImmTwoPartVal(Val) && // two instrs.
+ !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
}
if (UseCP) {
@@ -2499,7 +2463,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
getTargetLowering()->getPointerTy());
SDNode *ResNode;
- if (Subtarget->isThumb1Only()) {
+ if (Subtarget->isThumb()) {
SDValue Pred = getAL(CurDAG);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
@@ -2517,7 +2481,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
Ops);
}
ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
- return NULL;
+ return nullptr;
}
// Other cases are autogenerated.
@@ -2531,14 +2495,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (Subtarget->isThumb1Only()) {
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops);
} else {
unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
ARM::t2ADDri : ARM::ADDri);
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
}
case ISD::SRL:
@@ -2565,10 +2529,10 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
+ return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
+ return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
}
}
if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
@@ -2581,10 +2545,10 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
+ return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
+ return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
}
}
}
@@ -2699,7 +2663,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
}
case ISD::LOAD: {
- SDNode *ResNode = 0;
+ SDNode *ResNode = nullptr;
if (Subtarget->isThumb() && Subtarget->hasThumb2())
ResNode = SelectT2IndexedLoad(N);
else
@@ -2746,13 +2710,13 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
ReplaceUses(SDValue(N, 0),
SDValue(Chain.getNode(), Chain.getResNo()));
- return NULL;
+ return nullptr;
}
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: return NULL;
+ default: return nullptr;
case MVT::v8i8: Opc = ARM::VZIPd8; break;
case MVT::v4i16: Opc = ARM::VZIPd16; break;
case MVT::v2f32:
@@ -2772,7 +2736,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: return NULL;
+ default: return nullptr;
case MVT::v8i8: Opc = ARM::VUZPd8; break;
case MVT::v4i16: Opc = ARM::VUZPd16; break;
case MVT::v2f32:
@@ -2792,7 +2756,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: return NULL;
+ default: return nullptr;
case MVT::v8i8: Opc = ARM::VTRNd8; break;
case MVT::v4i16: Opc = ARM::VTRNd16; break;
case MVT::v2f32:
@@ -2873,7 +2837,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD1q16wb_fixed,
ARM::VLD1q32wb_fixed,
ARM::VLD1q64wb_fixed };
- return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
+ return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
}
case ARMISD::VLD2_UPD: {
@@ -2884,7 +2848,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
ARM::VLD2q16PseudoWB_fixed,
ARM::VLD2q32PseudoWB_fixed };
- return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
+ return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
}
case ARMISD::VLD3_UPD: {
@@ -2951,7 +2915,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST1q16wb_fixed,
ARM::VST1q32wb_fixed,
ARM::VST1q64wb_fixed };
- return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
+ return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
}
case ARMISD::VST2_UPD: {
@@ -2962,7 +2926,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
ARM::VST2q16PseudoWB_fixed,
ARM::VST2q32PseudoWB_fixed };
- return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
+ return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
}
case ARMISD::VST3_UPD: {
@@ -3027,13 +2991,16 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
default:
break;
+ case Intrinsic::arm_ldaexd:
case Intrinsic::arm_ldrexd: {
- SDValue MemAddr = N->getOperand(2);
SDLoc dl(N);
SDValue Chain = N->getOperand(0);
-
+ SDValue MemAddr = N->getOperand(2);
bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
- unsigned NewOpc = isThumb ? ARM::t2LDREXD :ARM::LDREXD;
+
+ bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
+ unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
+ : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
// arm_ldrexd returns a i64 value in {i32, i32}
std::vector<EVT> ResTys;
@@ -3083,9 +3050,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ReplaceUses(SDValue(N, 1), Result);
}
ReplaceUses(SDValue(N, 2), OutChain);
- return NULL;
+ return nullptr;
}
-
+ case Intrinsic::arm_stlexd:
case Intrinsic::arm_strexd: {
SDLoc dl(N);
SDValue Chain = N->getOperand(0);
@@ -3111,7 +3078,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
Ops.push_back(Chain);
- unsigned NewOpc = isThumb ? ARM::t2STREXD : ARM::STREXD;
+ bool IsRelease = IntNo == Intrinsic::arm_stlexd;
+ unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
+ : (IsRelease ? ARM::STLEXD : ARM::STREXD);
SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
// Transfer memoperands.
@@ -3127,7 +3096,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD1d32, ARM::VLD1d64 };
static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
ARM::VLD1q32, ARM::VLD1q64};
- return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
+ return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
}
case Intrinsic::arm_neon_vld2: {
@@ -3135,7 +3104,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD2d32, ARM::VLD1q64 };
static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
ARM::VLD2q32Pseudo };
- return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
+ return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
}
case Intrinsic::arm_neon_vld3: {
@@ -3198,7 +3167,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST1d32, ARM::VST1d64 };
static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
ARM::VST1q32, ARM::VST1q64 };
- return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
+ return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
}
case Intrinsic::arm_neon_vst2: {
@@ -3206,7 +3175,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST2d32, ARM::VST1q64 };
static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
ARM::VST2q32Pseudo };
- return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
+ return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
}
case Intrinsic::arm_neon_vst3: {
@@ -3320,91 +3289,6 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ISD::CONCAT_VECTORS:
return SelectConcatVector(N);
-
- case ISD::ATOMIC_LOAD:
- if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
- return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_LOAD_I64);
- else
- break;
-
- case ISD::ATOMIC_STORE:
- if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
- return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_STORE_I64);
- else
- break;
-
- case ISD::ATOMIC_LOAD_ADD:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_ADD_I8,
- ARM::ATOMIC_LOAD_ADD_I16,
- ARM::ATOMIC_LOAD_ADD_I32,
- ARM::ATOMIC_LOAD_ADD_I64);
- case ISD::ATOMIC_LOAD_SUB:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_SUB_I8,
- ARM::ATOMIC_LOAD_SUB_I16,
- ARM::ATOMIC_LOAD_SUB_I32,
- ARM::ATOMIC_LOAD_SUB_I64);
- case ISD::ATOMIC_LOAD_AND:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_AND_I8,
- ARM::ATOMIC_LOAD_AND_I16,
- ARM::ATOMIC_LOAD_AND_I32,
- ARM::ATOMIC_LOAD_AND_I64);
- case ISD::ATOMIC_LOAD_OR:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_OR_I8,
- ARM::ATOMIC_LOAD_OR_I16,
- ARM::ATOMIC_LOAD_OR_I32,
- ARM::ATOMIC_LOAD_OR_I64);
- case ISD::ATOMIC_LOAD_XOR:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_XOR_I8,
- ARM::ATOMIC_LOAD_XOR_I16,
- ARM::ATOMIC_LOAD_XOR_I32,
- ARM::ATOMIC_LOAD_XOR_I64);
- case ISD::ATOMIC_LOAD_NAND:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_NAND_I8,
- ARM::ATOMIC_LOAD_NAND_I16,
- ARM::ATOMIC_LOAD_NAND_I32,
- ARM::ATOMIC_LOAD_NAND_I64);
- case ISD::ATOMIC_LOAD_MIN:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_MIN_I8,
- ARM::ATOMIC_LOAD_MIN_I16,
- ARM::ATOMIC_LOAD_MIN_I32,
- ARM::ATOMIC_LOAD_MIN_I64);
- case ISD::ATOMIC_LOAD_MAX:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_MAX_I8,
- ARM::ATOMIC_LOAD_MAX_I16,
- ARM::ATOMIC_LOAD_MAX_I32,
- ARM::ATOMIC_LOAD_MAX_I64);
- case ISD::ATOMIC_LOAD_UMIN:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_UMIN_I8,
- ARM::ATOMIC_LOAD_UMIN_I16,
- ARM::ATOMIC_LOAD_UMIN_I32,
- ARM::ATOMIC_LOAD_UMIN_I64);
- case ISD::ATOMIC_LOAD_UMAX:
- return SelectAtomic(N,
- ARM::ATOMIC_LOAD_UMAX_I8,
- ARM::ATOMIC_LOAD_UMAX_I16,
- ARM::ATOMIC_LOAD_UMAX_I32,
- ARM::ATOMIC_LOAD_UMAX_I64);
- case ISD::ATOMIC_SWAP:
- return SelectAtomic(N,
- ARM::ATOMIC_SWAP_I8,
- ARM::ATOMIC_SWAP_I16,
- ARM::ATOMIC_SWAP_I32,
- ARM::ATOMIC_SWAP_I64);
- case ISD::ATOMIC_CMP_SWAP:
- return SelectAtomic(N,
- ARM::ATOMIC_CMP_SWAP_I8,
- ARM::ATOMIC_CMP_SWAP_I16,
- ARM::ATOMIC_CMP_SWAP_I32,
- ARM::ATOMIC_CMP_SWAP_I64);
}
return SelectCode(N);
@@ -3425,7 +3309,8 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
// them into a GPRPair.
SDLoc dl(N);
- SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0);
+ SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
+ : SDValue(nullptr,0);
SmallVector<bool, 8> OpChanged;
// Glue node will be appended late.
@@ -3507,7 +3392,7 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
// Update the original glue user.
std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
Ops.push_back(T1.getValue(1));
- CurDAG->UpdateNodeOperands(GU, &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(GU, Ops);
GU = T1.getNode();
}
else {
@@ -3554,11 +3439,10 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
if (Glue.getNode())
AsmNodeOperands.push_back(Glue);
if (!Changed)
- return NULL;
+ return nullptr;
SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
- CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0],
- AsmNodeOperands.size());
+ CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
New->setNodeId(-1);
return New.getNode();
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index f3be818..a76531a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -12,9 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-isel"
#include "ARMISelLowering.h"
-#include "ARM.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
@@ -38,29 +36,26 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include <utility>
using namespace llvm;
+#define DEBUG_TYPE "arm-isel"
+
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
-// This option should go away when tail calls fully work.
-static cl::opt<bool>
-EnableARMTailCalls("arm-tail-calls", cl::Hidden,
- cl::desc("Generate tail calls (TEMPORARY OPTION)."),
- cl::init(false));
-
cl::opt<bool>
EnableARMLongCalls("arm-long-calls", cl::Hidden,
cl::desc("Generate calls via indirect call instructions"),
@@ -87,7 +82,7 @@ namespace {
}
// The APCS parameter registers.
-static const uint16_t GPRArgRegs[] = {
+static const MCPhysReg GPRArgRegs[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
};
@@ -160,25 +155,26 @@ void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
}
-static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
- if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
+static TargetLoweringObjectFile *createTLOF(const Triple &TT) {
+ if (TT.isOSBinFormatMachO())
return new TargetLoweringObjectFileMachO();
-
+ if (TT.isOSWindows())
+ return new TargetLoweringObjectFileCOFF();
return new ARMElfTargetObjectFile();
}
ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
- : TargetLowering(TM, createTLOF(TM)) {
+ : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))) {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
RegInfo = TM.getRegisterInfo();
Itins = TM.getInstrItineraryData();
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- if (Subtarget->isTargetIOS()) {
+ if (Subtarget->isTargetMachO()) {
// Uses VFP for Thumb libfuncs if available.
if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
- Subtarget->hasARMOps()) {
+ Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) {
// Single-precision floating-point arithmetic.
setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
@@ -254,172 +250,134 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
// These libcalls are not available in 32-bit.
- setLibcallName(RTLIB::SHL_I128, 0);
- setLibcallName(RTLIB::SRL_I128, 0);
- setLibcallName(RTLIB::SRA_I128, 0);
-
- if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetDarwin()) {
- // Double-precision floating-point arithmetic helper functions
- // RTABI chapter 4.1.2, Table 2
- setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
- setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
- setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul");
- setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub");
- setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS);
-
- // Double-precision floating-point comparison helper functions
- // RTABI chapter 4.1.2, Table 3
- setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq");
- setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
- setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq");
- setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ);
- setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt");
- setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
- setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple");
- setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
- setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge");
- setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
- setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt");
- setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
- setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun");
- setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
- setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun");
- setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
- setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS);
-
- // Single-precision floating-point arithmetic helper functions
- // RTABI chapter 4.1.2, Table 4
- setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd");
- setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv");
- setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul");
- setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub");
- setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS);
-
- // Single-precision floating-point comparison helper functions
- // RTABI chapter 4.1.2, Table 5
- setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq");
- setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
- setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq");
- setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ);
- setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt");
- setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
- setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple");
- setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
- setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge");
- setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
- setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt");
- setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
- setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun");
- setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
- setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun");
- setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
- setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS);
-
- // Floating-point to integer conversions.
- // RTABI chapter 4.1.2, Table 6
- setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz");
- setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz");
- setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz");
- setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz");
- setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz");
- setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz");
- setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz");
- setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz");
- setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS);
-
- // Conversions between floating types.
- // RTABI chapter 4.1.2, Table 7
- setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
- setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d");
- setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
-
- // Integer to floating-point conversions.
- // RTABI chapter 4.1.2, Table 8
- setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d");
- setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d");
- setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d");
- setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d");
- setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f");
- setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f");
- setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f");
- setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f");
- setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
-
- // Long long helper functions
- // RTABI chapter 4.2, Table 9
- setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul");
- setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl");
- setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr");
- setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr");
- setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS);
-
- // Integer division functions
- // RTABI chapter 4.3.1
- setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv");
- setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv");
- setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv");
- setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
- setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv");
- setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv");
- setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv");
- setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
- setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
-
- // Memory operations
- // RTABI chapter 4.3.4
- setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy");
- setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove");
- setLibcallName(RTLIB::MEMSET, "__aeabi_memset");
- setLibcallCallingConv(RTLIB::MEMCPY, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::MEMMOVE, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::MEMSET, CallingConv::ARM_AAPCS);
+ setLibcallName(RTLIB::SHL_I128, nullptr);
+ setLibcallName(RTLIB::SRL_I128, nullptr);
+ setLibcallName(RTLIB::SRA_I128, nullptr);
+
+ if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() &&
+ !Subtarget->isTargetWindows()) {
+ static const struct {
+ const RTLIB::Libcall Op;
+ const char * const Name;
+ const CallingConv::ID CC;
+ const ISD::CondCode Cond;
+ } LibraryCalls[] = {
+ // Double-precision floating-point arithmetic helper functions
+ // RTABI chapter 4.1.2, Table 2
+ { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Double-precision floating-point comparison helper functions
+ // RTABI chapter 4.1.2, Table 3
+ { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
+ { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
+
+ // Single-precision floating-point arithmetic helper functions
+ // RTABI chapter 4.1.2, Table 4
+ { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Single-precision floating-point comparison helper functions
+ // RTABI chapter 4.1.2, Table 5
+ { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
+ { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
+ { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
+
+ // Floating-point to integer conversions.
+ // RTABI chapter 4.1.2, Table 6
+ { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Conversions between floating types.
+ // RTABI chapter 4.1.2, Table 7
+ { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Integer to floating-point conversions.
+ // RTABI chapter 4.1.2, Table 8
+ { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Long long helper functions
+ // RTABI chapter 4.2, Table 9
+ { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Integer division functions
+ // RTABI chapter 4.3.1
+ { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+
+ // Memory operations
+ // RTABI chapter 4.3.4
+ { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
+ };
+
+ for (const auto &LC : LibraryCalls) {
+ setLibcallName(LC.Op, LC.Name);
+ setLibcallCallingConv(LC.Op, LC.CC);
+ if (LC.Cond != ISD::SETCC_INVALID)
+ setCmpLibcallCC(LC.Op, LC.Cond);
+ }
+ }
+
+ if (Subtarget->isTargetWindows()) {
+ static const struct {
+ const RTLIB::Libcall Op;
+ const char * const Name;
+ const CallingConv::ID CC;
+ } LibraryCalls[] = {
+ { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
+ { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
+ };
+
+ for (const auto &LC : LibraryCalls) {
+ setLibcallName(LC.Op, LC.Name);
+ setLibcallCallingConv(LC.Op, LC.CC);
+ }
}
// Use divmod compiler-rt calls for iOS 5.0 and later.
@@ -438,8 +396,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
addRegisterClass(MVT::f32, &ARM::SPRRegClass);
if (!Subtarget->isFPOnlySP())
addRegisterClass(MVT::f64, &ARM::DPRRegClass);
-
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
}
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
@@ -451,6 +407,13 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
+
+ setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+
+ setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
}
setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
@@ -617,8 +580,14 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
computeRegisterProperties();
- // ARM does not have f32 extending load.
+ // ARM does not have floating-point extending loads.
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+
+ // ... or truncating stores
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
// ARM does not have i1 sign extending load.
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -638,6 +607,11 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
}
+ setOperationAction(ISD::SADDO, MVT::i32, Custom);
+ setOperationAction(ISD::UADDO, MVT::i32, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i32, Custom);
+ setOperationAction(ISD::USUBO, MVT::i32, Custom);
+
// i64 operation support.
setOperationAction(ISD::MUL, MVT::i64, Expand);
setOperationAction(ISD::MULHU, MVT::i32, Expand);
@@ -733,39 +707,31 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- if (!Subtarget->isTargetDarwin()) {
- // Non-Darwin platforms may return values in these registers via the
+ if (!Subtarget->isTargetMachO()) {
+ // Non-MachO platforms may return values in these registers via the
// personality function.
setExceptionPointerRegister(ARM::R0);
setExceptionSelectorRegister(ARM::R1);
}
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+ if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+ else
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+
// ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
// the default expansion.
if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
- // ATOMIC_FENCE needs custom lowering; the other 32-bit ones are legal and
- // handled normally.
+ // ATOMIC_FENCE needs custom lowering; the others should have been expanded
+ // to ldrex/strex loops already.
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
- // Custom lowering for 64-bit ops
- setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
+
// On v8, we have particularly efficient implementations of atomic fences
// if they can be combined with nearby atomic loads and stores.
if (!Subtarget->hasV8Ops()) {
// Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
setInsertFencesForAtomic(true);
}
- setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
} else {
// If there's anything we can use as a barrier, go through custom lowering
// for ATOMIC_FENCE.
@@ -863,13 +829,20 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
}
- // Special handling for half-precision FP.
+
+ // v8 adds f64 <-> f16 conversion. Before that it should be expanded.
+ if (!Subtarget->hasV8Ops()) {
+ setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
+ }
+
+ // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
if (!Subtarget->hasFP16()) {
- setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand);
- setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand);
+ setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
}
}
-
+
// Combine sin / cos into one node or libcall if possible.
if (Subtarget->hasSinCos()) {
setLibcallName(RTLIB::SINCOS_F32, "sincosf");
@@ -920,44 +893,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
}
-static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
- bool isThumb2, unsigned &LdrOpc,
- unsigned &StrOpc) {
- static const unsigned LoadBares[4][2] = {{ARM::LDREXB, ARM::t2LDREXB},
- {ARM::LDREXH, ARM::t2LDREXH},
- {ARM::LDREX, ARM::t2LDREX},
- {ARM::LDREXD, ARM::t2LDREXD}};
- static const unsigned LoadAcqs[4][2] = {{ARM::LDAEXB, ARM::t2LDAEXB},
- {ARM::LDAEXH, ARM::t2LDAEXH},
- {ARM::LDAEX, ARM::t2LDAEX},
- {ARM::LDAEXD, ARM::t2LDAEXD}};
- static const unsigned StoreBares[4][2] = {{ARM::STREXB, ARM::t2STREXB},
- {ARM::STREXH, ARM::t2STREXH},
- {ARM::STREX, ARM::t2STREX},
- {ARM::STREXD, ARM::t2STREXD}};
- static const unsigned StoreRels[4][2] = {{ARM::STLEXB, ARM::t2STLEXB},
- {ARM::STLEXH, ARM::t2STLEXH},
- {ARM::STLEX, ARM::t2STLEX},
- {ARM::STLEXD, ARM::t2STLEXD}};
-
- const unsigned (*LoadOps)[2], (*StoreOps)[2];
- if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
- LoadOps = LoadAcqs;
- else
- LoadOps = LoadBares;
-
- if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
- StoreOps = StoreRels;
- else
- StoreOps = StoreBares;
-
- assert(isPowerOf2_32(Size) && Size <= 8 &&
- "unsupported size for atomic binary op!");
-
- LdrOpc = LoadOps[Log2_32(Size)][isThumb2];
- StrOpc = StoreOps[Log2_32(Size)][isThumb2];
-}
-
// FIXME: It might make sense to define the representative register class as the
// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
@@ -970,7 +905,7 @@ static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
// and extractions.
std::pair<const TargetRegisterClass*, uint8_t>
ARMTargetLowering::findRepresentativeClass(MVT VT) const{
- const TargetRegisterClass *RRC = 0;
+ const TargetRegisterClass *RRC = nullptr;
uint8_t Cost = 1;
switch (VT.SimpleTy) {
default:
@@ -1007,9 +942,8 @@ ARMTargetLowering::findRepresentativeClass(MVT VT) const{
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case ARMISD::Wrapper: return "ARMISD::Wrapper";
- case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN";
case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC";
case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
case ARMISD::CALL: return "ARMISD::CALL";
@@ -1064,6 +998,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
+ case ARMISD::WIN__CHKSTK: return "ARMISD:::WIN__CHKSTK";
+
case ARMISD::VCEQ: return "ARMISD::VCEQ";
case ARMISD::VCEQZ: return "ARMISD::VCEQZ";
case ARMISD::VCGE: return "ARMISD::VCGE";
@@ -1079,10 +1015,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VSHL: return "ARMISD::VSHL";
case ARMISD::VSHRs: return "ARMISD::VSHRs";
case ARMISD::VSHRu: return "ARMISD::VSHRu";
- case ARMISD::VSHLLs: return "ARMISD::VSHLLs";
- case ARMISD::VSHLLu: return "ARMISD::VSHLLu";
- case ARMISD::VSHLLi: return "ARMISD::VSHLLi";
- case ARMISD::VSHRN: return "ARMISD::VSHRN";
case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
@@ -1266,40 +1198,58 @@ static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
#include "ARMGenCallingConv.inc"
-/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
-/// given CallingConvention value.
-CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
- bool Return,
- bool isVarArg) const {
+/// getEffectiveCallingConv - Get the effective calling convention, taking into
+/// account presence of floating point hardware and calling convention
+/// limitations, such as support for variadic functions.
+CallingConv::ID
+ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
+ bool isVarArg) const {
switch (CC) {
default:
llvm_unreachable("Unsupported calling convention");
- case CallingConv::Fast:
- if (Subtarget->hasVFP2() && !isVarArg) {
- if (!Subtarget->isAAPCS_ABI())
- return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
- // For AAPCS ABI targets, just use VFP variant of the calling convention.
- return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
- }
- // Fallthrough
- case CallingConv::C: {
- // Use target triple & subtarget features to do actual dispatch.
+ case CallingConv::ARM_AAPCS:
+ case CallingConv::ARM_APCS:
+ case CallingConv::GHC:
+ return CC;
+ case CallingConv::ARM_AAPCS_VFP:
+ return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
+ case CallingConv::C:
if (!Subtarget->isAAPCS_ABI())
- return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
- else if (Subtarget->hasVFP2() &&
+ return CallingConv::ARM_APCS;
+ else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
!isVarArg)
- return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
- return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
+ return CallingConv::ARM_AAPCS_VFP;
+ else
+ return CallingConv::ARM_AAPCS;
+ case CallingConv::Fast:
+ if (!Subtarget->isAAPCS_ABI()) {
+ if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
+ return CallingConv::Fast;
+ return CallingConv::ARM_APCS;
+ } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
+ return CallingConv::ARM_AAPCS_VFP;
+ else
+ return CallingConv::ARM_AAPCS;
}
- case CallingConv::ARM_AAPCS_VFP:
- if (!isVarArg)
- return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
- // Fallthrough
- case CallingConv::ARM_AAPCS:
- return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
+}
+
+/// CCAssignFnForNode - Selects the correct CCAssignFn for the given
+/// CallingConvention.
+CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
+ bool Return,
+ bool isVarArg) const {
+ switch (getEffectiveCallingConv(CC, isVarArg)) {
+ default:
+ llvm_unreachable("Unsupported calling convention");
case CallingConv::ARM_APCS:
return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
+ case CallingConv::ARM_AAPCS:
+ return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
+ case CallingConv::ARM_AAPCS_VFP:
+ return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
+ case CallingConv::Fast:
+ return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
case CallingConv::GHC:
return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
}
@@ -1348,6 +1298,8 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
InFlag);
Chain = Hi.getValue(1);
InFlag = Hi.getValue(2);
+ if (!Subtarget->isLittle())
+ std::swap (Lo, Hi);
Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
if (VA.getLocVT() == MVT::v2f64) {
@@ -1363,6 +1315,8 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
Chain = Hi.getValue(1);
InFlag = Hi.getValue(2);
+ if (!Subtarget->isLittle())
+ std::swap (Lo, Hi);
Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
DAG.getConstant(1, MVT::i32));
@@ -1413,16 +1367,17 @@ void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
DAG.getVTList(MVT::i32, MVT::i32), Arg);
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
+ unsigned id = Subtarget->isLittle() ? 0 : 1;
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
if (NextVA.isRegLoc())
- RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
+ RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
else {
assert(NextVA.isMemLoc());
- if (StackPtr.getNode() == 0)
+ if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
- MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
+ MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
dl, DAG, NextVA,
Flags));
}
@@ -1450,14 +1405,19 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
bool isThisReturn = false;
bool isSibCall = false;
+
// Disable tail calls if they're not supported.
- if (!EnableARMTailCalls && !Subtarget->supportsTailCall())
+ if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
isTailCall = false;
+
if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
Outs, OutVals, Ins, DAG);
+ if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
// We don't support GuaranteedTailCallOpt for ARM, only automatically
// detected sibcalls.
if (isTailCall) {
@@ -1602,7 +1562,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
- Ops, array_lengthof(Ops)));
+ Ops));
}
} else if (!isSibCall) {
assert(VA.isMemLoc());
@@ -1613,8 +1573,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
@@ -1656,8 +1615,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (EnableARMLongCalls) {
- assert (getTargetMachine().getRelocationModel() == Reloc::Static
- && "long-calls with non-static relocation model!");
+ assert((Subtarget->isTargetWindows() ||
+ getTargetMachine().getRelocationModel() == Reloc::Static) &&
+ "long-calls with non-static relocation model!");
// Handle a global address or an external symbol. If it's not one of
// those, the target's already in a register, so we don't need to do
// anything extra.
@@ -1695,25 +1655,29 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
const GlobalValue *GV = G->getGlobal();
isDirect = true;
bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
- bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
+ bool isStub = (isExt && Subtarget->isTargetMachO()) &&
getTargetMachine().getRelocationModel() != Reloc::Static;
isARMFunc = !Subtarget->isThumb() || isStub;
// ARM call to a local ARM function is predicable.
isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
// tBX takes a register source operand.
- if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
- unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
- ARMConstantPoolValue *CPV =
- ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4);
- SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- Callee = DAG.getLoad(getPointerTy(), dl,
- DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(),
- false, false, false, 0);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
- getPointerTy(), Callee, PICLabel);
+ if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
+ assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
+ Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(),
+ DAG.getTargetGlobalAddress(GV, dl, getPointerTy()));
+ } else if (Subtarget->isTargetCOFF()) {
+ assert(Subtarget->isTargetWindows() &&
+ "Windows is the only supported COFF target");
+ unsigned TargetFlags = GV->hasDLLImportStorageClass()
+ ? ARMII::MO_DLLIMPORT
+ : ARMII::MO_NO_FLAG;
+ Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0,
+ TargetFlags);
+ if (GV->hasDLLImportStorageClass())
+ Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(),
+ Callee), MachinePointerInfo::getGOT(),
+ false, false, false, 0);
} else {
// On ELF targets for PIC code, direct calls should go through the PLT
unsigned OpFlags = 0;
@@ -1724,7 +1688,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
isDirect = true;
- bool isStub = Subtarget->isTargetDarwin() &&
+ bool isStub = Subtarget->isTargetMachO() &&
getTargetMachine().getRelocationModel() != Reloc::Static;
isARMFunc = !Subtarget->isThumb() || isStub;
// tBX takes a register source operand.
@@ -1755,8 +1719,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently.
unsigned CallOpc;
- bool HasMinSizeAttr = MF.getFunction()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ bool HasMinSizeAttr = MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
@@ -1811,10 +1775,10 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (isTailCall)
- return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+ return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
// Returns a chain and a flag for retval copy to use.
- Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
@@ -1841,22 +1805,6 @@ ARMTargetLowering::HandleByVal(
State->getCallOrPrologue() == Call) &&
"unhandled ParmContext");
- // For in-prologue parameters handling, we also introduce stack offset
- // for byval registers: see CallingConvLower.cpp, CCState::HandleByVal.
- // This behaviour outsides AAPCS rules (5.5 Parameters Passing) of how
- // NSAA should be evaluted (NSAA means "next stacked argument address").
- // So: NextStackOffset = NSAAOffset + SizeOfByValParamsStoredInRegs.
- // Then: NSAAOffset = NextStackOffset - SizeOfByValParamsStoredInRegs.
- unsigned NSAAOffset = State->getNextStackOffset();
- if (State->getCallOrPrologue() != Call) {
- for (unsigned i = 0, e = State->getInRegsParamsCount(); i != e; ++i) {
- unsigned RB, RE;
- State->getInRegsParamInfo(i, RB, RE);
- assert(NSAAOffset >= (RE-RB)*4 &&
- "Stack offset for byval regs doesn't introduced anymore?");
- NSAAOffset -= (RE-RB)*4;
- }
- }
if ((ARM::R0 <= reg) && (reg <= ARM::R3)) {
if (Subtarget->isAAPCS_ABI() && Align > 4) {
unsigned AlignInRegs = Align / 4;
@@ -1871,6 +1819,7 @@ ARMTargetLowering::HandleByVal(
// all remained GPR regs. In that case we can't split parameter, we must
// send it to stack. We also must set NCRN to R4, so waste all
// remained registers.
+ const unsigned NSAAOffset = State->getNextStackOffset();
if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) {
while (State->AllocateReg(GPRArgRegs, 4))
;
@@ -1890,18 +1839,14 @@ ARMTargetLowering::HandleByVal(
// allocate remained amount of registers we need.
for (unsigned i = reg+1; i != ByValRegEnd; ++i)
State->AllocateReg(GPRArgRegs, 4);
- // At a call site, a byval parameter that is split between
- // registers and memory needs its size truncated here. In a
- // function prologue, such byval parameters are reassembled in
- // memory, and are not truncated.
- if (State->getCallOrPrologue() == Call) {
- // Make remained size equal to 0 in case, when
- // the whole structure may be stored into registers.
- if (size < excess)
- size = 0;
- else
- size -= excess;
- }
+ // A byval parameter that is split between registers and memory needs its
+ // size truncated here.
+ // In the case where the entire structure fits in registers, we set the
+ // size in memory to zero.
+ if (size < excess)
+ size = 0;
+ else
+ size -= excess;
}
}
}
@@ -2138,8 +2083,7 @@ static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
- return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other,
- RetOps.data(), RetOps.size());
+ return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
}
SDValue
@@ -2163,6 +2107,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
SDValue Flag;
SmallVector<SDValue, 4> RetOps;
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
+ bool isLittleEndian = Subtarget->isLittle();
// Copy the result values into the output registers.
for (unsigned i = 0, realRVLocIdx = 0;
@@ -2189,12 +2134,15 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
DAG.getVTList(MVT::i32, MVT::i32), Half);
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ HalfGPRs.getValue(isLittleEndian ? 0 : 1),
+ Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
VA = RVLocs[++i]; // skip ahead to next loc
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- HalfGPRs.getValue(1), Flag);
+ HalfGPRs.getValue(isLittleEndian ? 1 : 0),
+ Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
VA = RVLocs[++i]; // skip ahead to next loc
@@ -2206,12 +2154,15 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
// Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
// available.
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
+ DAG.getVTList(MVT::i32, MVT::i32), Arg);
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ fmrrd.getValue(isLittleEndian ? 0 : 1),
+ Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
VA = RVLocs[++i]; // skip ahead to next loc
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ fmrrd.getValue(isLittleEndian ? 1 : 0),
Flag);
} else
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
@@ -2240,8 +2191,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
return LowerInterruptReturn(RetOps, dl, DAG);
}
- return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other,
- RetOps.data(), RetOps.size());
+ return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
}
bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
@@ -2310,10 +2260,10 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
}
bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!EnableARMTailCalls && !Subtarget->supportsTailCall())
+ if (!Subtarget->supportsTailCall())
return false;
- if (!CI->isTailCall())
+ if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
return false;
return !Subtarget->isThumb1Only();
@@ -2403,13 +2353,14 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
Entry.Node = Argument;
Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
Args.push_back(Entry);
+
// FIXME: is there useful debug info available here?
- TargetLowering::CallLoweringInfo CLI(Chain,
- (Type *) Type::getInt32Ty(*DAG.getContext()),
- false, false, false, false,
- 0, CallingConv::C, /*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
+ DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
+ 0);
+
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.first;
}
@@ -2516,7 +2467,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
// If we have T2 ops, we can materialize the address directly via movt/movw
// pair. This is always cheaper.
- if (Subtarget->useMovt()) {
+ if (Subtarget->useMovt(DAG.getMachineFunction())) {
++NumMovwMovt;
// FIXME: Once remat is capable of dealing with instructions with register
// operands, expand this into two nodes.
@@ -2538,56 +2489,46 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
- // FIXME: Enable this for static codegen when tool issues are fixed. Also
- // update ARMFastISel::ARMMaterializeGV.
- if (Subtarget->useMovt() && RelocM != Reloc::Static) {
+ if (Subtarget->useMovt(DAG.getMachineFunction()))
++NumMovwMovt;
- // FIXME: Once remat is capable of dealing with instructions with register
- // operands, expand this into two nodes.
- if (RelocM == Reloc::Static)
- return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, dl, PtrVT));
-
- unsigned Wrapper = (RelocM == Reloc::PIC_)
- ? ARMISD::WrapperPIC : ARMISD::WrapperDYN;
- SDValue Result = DAG.getNode(Wrapper, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, dl, PtrVT));
- if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
- Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(),
- false, false, false, 0);
- return Result;
- }
- unsigned ARMPCLabelIndex = 0;
- SDValue CPAddr;
- if (RelocM == Reloc::Static) {
- CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
- } else {
- ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
- ARMPCLabelIndex = AFI->createPICLabelUId();
- unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
- ARMConstantPoolValue *CPV =
- ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue,
- PCAdj);
- CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- }
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+ // FIXME: Once remat is capable of dealing with instructions with register
+ // operands, expand this into multiple nodes
+ unsigned Wrapper =
+ RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
- SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(),
- false, false, false, 0);
- SDValue Chain = Result.getValue(1);
-
- if (RelocM == Reloc::PIC_) {
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
- }
+ SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
+ SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
- Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(),
- false, false, false, 0);
+ Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
+ MachinePointerInfo::getGOT(), false, false, false, 0);
+ return Result;
+}
+
+SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
+ assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
+ "Windows on ARM expects to use movw/movt");
+
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ const ARMII::TOF TargetFlags =
+ (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
+ EVT PtrVT = getPointerTy();
+ SDValue Result;
+ SDLoc DL(Op);
+ ++NumMovwMovt;
+
+ // FIXME: Once remat is capable of dealing with instructions with register
+ // operands, expand this into two nodes.
+ Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
+ TargetFlags));
+ if (GV->hasDLLImportStorageClass())
+ Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
+ MachinePointerInfo::getGOT(), false, false, false, 0);
return Result;
}
@@ -2636,6 +2577,11 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
+ case Intrinsic::arm_rbit: {
+ assert(Op.getOperand(1).getValueType() == MVT::i32 &&
+ "RBIT intrinsic must have i32 type!");
+ return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1));
+ }
case Intrinsic::arm_thread_pointer: {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
@@ -2779,7 +2725,8 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
}
-
+ if (!Subtarget->isLittle())
+ std::swap (ArgValue, ArgValue2);
return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
}
@@ -2807,11 +2754,11 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
ArgRegsSize = NumGPRs * 4;
// If parameter is split between stack and GPRs...
- if (NumGPRs && Align == 8 &&
+ if (NumGPRs && Align > 4 &&
(ArgRegsSize < ArgSize ||
InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) {
- // Add padding for part of param recovered from GPRs, so
- // its last byte must be at address K*8 - 1.
+ // Add padding for part of param recovered from GPRs. For example,
+ // if Align == 8, its last byte must be at address K*8 - 1.
// We need to do it, since remained (stack) part of parameter has
// stack alignment, and we need to "attach" "GPRs head" without gaps
// to it:
@@ -2821,8 +2768,7 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
//
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
unsigned Padding =
- ((ArgRegsSize + AFI->getArgRegsSaveSize() + Align - 1) & ~(Align-1)) -
- (ArgRegsSize + AFI->getArgRegsSaveSize());
+ OffsetToAlignment(ArgRegsSize + AFI->getArgRegsSaveSize(), Align);
ArgRegsSaveSize = ArgRegsSize + Padding;
} else
// We don't need to extend regs save size for byval parameters if they
@@ -2846,10 +2792,12 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
unsigned OffsetFromOrigArg,
unsigned ArgOffset,
unsigned ArgSize,
- bool ForceMutable) const {
+ bool ForceMutable,
+ unsigned ByValStoreOffset,
+ unsigned TotalArgRegsSaveSize) const {
// Currently, two use-cases possible:
- // Case #1. Non var-args function, and we meet first byval parameter.
+ // Case #1. Non-var-args function, and we meet first byval parameter.
// Setup first unallocated register as first byval register;
// eat all remained registers
// (these two actions are performed by HandleByVal method).
@@ -2883,7 +2831,6 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
// Note: once stack area for byval/varargs registers
// was initialized, it can't be initialized again.
if (ArgRegsSaveSize) {
-
unsigned Padding = ArgRegsSaveSize - ArgRegsSize;
if (Padding) {
@@ -2892,11 +2839,18 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
AFI->setStoredByValParamsPadding(Padding);
}
- int FrameIndex = MFI->CreateFixedObject(
- ArgRegsSaveSize,
- Padding + ArgOffset,
- false);
+ int FrameIndex = MFI->CreateFixedObject(ArgRegsSaveSize,
+ Padding +
+ ByValStoreOffset -
+ (int64_t)TotalArgRegsSaveSize,
+ false);
SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
+ if (Padding) {
+ MFI->CreateFixedObject(Padding,
+ ArgOffset + ByValStoreOffset -
+ (int64_t)ArgRegsSaveSize,
+ false);
+ }
SmallVector<SDValue, 4> MemOps;
for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex;
@@ -2921,13 +2875,18 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize());
if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOps[0], MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return FrameIndex;
- } else
+ } else {
+ if (ArgSize == 0) {
+ // We cannot allocate a zero-byte object for the first variadic argument,
+ // so just make up a size.
+ ArgSize = 4;
+ }
// This will point to the next argument passed via stack.
return MFI->CreateFixedObject(
- 4, AFI->getStoredByValParamsPadding() + ArgOffset, !ForceMutable);
+ ArgSize, ArgOffset, !ForceMutable);
+ }
}
// Setup stack frame, the va_list pointer will start from.
@@ -2935,6 +2894,7 @@ void
ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
unsigned ArgOffset,
+ unsigned TotalArgRegsSaveSize,
bool ForceMutable) const {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -2945,8 +2905,9 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
// If there is no regs to be stored, just point address after last
// argument passed via stack.
int FrameIndex =
- StoreByValRegs(CCInfo, DAG, dl, Chain, 0, CCInfo.getInRegsParamsCount(),
- 0, ArgOffset, 0, ForceMutable);
+ StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
+ CCInfo.getInRegsParamsCount(), 0, ArgOffset, 0, ForceMutable,
+ 0, TotalArgRegsSaveSize);
AFI->setVarArgsFrameIndex(FrameIndex);
}
@@ -2983,6 +2944,51 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// We also increase this value in case of varargs function.
AFI->setArgRegsSaveSize(0);
+ unsigned ByValStoreOffset = 0;
+ unsigned TotalArgRegsSaveSize = 0;
+ unsigned ArgRegsSaveSizeMaxAlign = 4;
+
+ // Calculate the amount of stack space that we need to allocate to store
+ // byval and variadic arguments that are passed in registers.
+ // We need to know this before we allocate the first byval or variadic
+ // argument, as they will be allocated a stack slot below the CFA (Canonical
+ // Frame Address, the stack pointer at entry to the function).
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isMemLoc()) {
+ int index = VA.getValNo();
+ if (index != lastInsIndex) {
+ ISD::ArgFlagsTy Flags = Ins[index].Flags;
+ if (Flags.isByVal()) {
+ unsigned ExtraArgRegsSize;
+ unsigned ExtraArgRegsSaveSize;
+ computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProceed(),
+ Flags.getByValSize(),
+ ExtraArgRegsSize, ExtraArgRegsSaveSize);
+
+ TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
+ if (Flags.getByValAlign() > ArgRegsSaveSizeMaxAlign)
+ ArgRegsSaveSizeMaxAlign = Flags.getByValAlign();
+ CCInfo.nextInRegsParam();
+ }
+ lastInsIndex = index;
+ }
+ }
+ }
+ CCInfo.rewindByValRegsInfo();
+ lastInsIndex = -1;
+ if (isVarArg) {
+ unsigned ExtraArgRegsSize;
+ unsigned ExtraArgRegsSaveSize;
+ computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsCount(), 0,
+ ExtraArgRegsSize, ExtraArgRegsSaveSize);
+ TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
+ }
+ // If the arg regs save area contains N-byte aligned values, the
+ // bottom of it must be at least N-byte aligned.
+ TotalArgRegsSaveSize = RoundUpToAlignment(TotalArgRegsSaveSize, ArgRegsSaveSizeMaxAlign);
+ TotalArgRegsSaveSize = std::min(TotalArgRegsSaveSize, 16U);
+
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
@@ -3081,18 +3087,23 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// a tail call.
if (Flags.isByVal()) {
unsigned CurByValIndex = CCInfo.getInRegsParamsProceed();
+
+ ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
int FrameIndex = StoreByValRegs(
CCInfo, DAG, dl, Chain, CurOrigArg,
CurByValIndex,
Ins[VA.getValNo()].PartOffset,
VA.getLocMemOffset(),
Flags.getByValSize(),
- true /*force mutable frames*/);
+ true /*force mutable frames*/,
+ ByValStoreOffset,
+ TotalArgRegsSaveSize);
+ ByValStoreOffset += Flags.getByValSize();
+ ByValStoreOffset = std::min(ByValStoreOffset, 16U);
InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
CCInfo.nextInRegsParam();
} else {
- unsigned FIOffset = VA.getLocMemOffset() +
- AFI->getStoredByValParamsPadding();
+ unsigned FIOffset = VA.getLocMemOffset();
int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
FIOffset, true);
@@ -3110,7 +3121,10 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// varargs
if (isVarArg)
VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
- CCInfo.getNextStackOffset());
+ CCInfo.getNextStackOffset(),
+ TotalArgRegsSaveSize);
+
+ AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
return Chain;
}
@@ -3224,11 +3238,96 @@ ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
}
+std::pair<SDValue, SDValue>
+ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
+ SDValue &ARMcc) const {
+ assert(Op.getValueType() == MVT::i32 && "Unsupported value type");
+
+ SDValue Value, OverflowCmp;
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+
+
+ // FIXME: We are currently always generating CMPs because we don't support
+ // generating CMN through the backend. This is not as good as the natural
+ // CMP case because it causes a register dependency and cannot be folded
+ // later.
+
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("Unknown overflow instruction!");
+ case ISD::SADDO:
+ ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
+ Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
+ break;
+ case ISD::UADDO:
+ ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
+ Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
+ break;
+ case ISD::SSUBO:
+ ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
+ Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
+ break;
+ case ISD::USUBO:
+ ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
+ Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
+ break;
+ } // switch (...)
+
+ return std::make_pair(Value, OverflowCmp);
+}
+
+
+SDValue
+ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
+ // Let legalize expand this if it isn't a legal type yet.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
+ return SDValue();
+
+ SDValue Value, OverflowCmp;
+ SDValue ARMcc;
+ std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
+ SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+ // We use 0 and 1 as false and true values.
+ SDValue TVal = DAG.getConstant(1, MVT::i32);
+ SDValue FVal = DAG.getConstant(0, MVT::i32);
+ EVT VT = Op.getValueType();
+
+ SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal,
+ ARMcc, CCR, OverflowCmp);
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+ return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
+}
+
+
SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Cond = Op.getOperand(0);
SDValue SelectTrue = Op.getOperand(1);
SDValue SelectFalse = Op.getOperand(2);
SDLoc dl(Op);
+ unsigned Opc = Cond.getOpcode();
+
+ if (Cond.getResNo() == 1 &&
+ (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
+ Opc == ISD::USUBO)) {
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
+ return SDValue();
+
+ SDValue Value, OverflowCmp;
+ SDValue ARMcc;
+ std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
+ SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+ EVT VT = Op.getValueType();
+
+ return DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, SelectTrue, SelectFalse,
+ ARMcc, CCR, OverflowCmp);
+
+ }
// Convert:
//
@@ -3279,7 +3378,7 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
if (CC == ISD::SETNE)
return ISD::SETEQ;
- return ISD::getSetCCSwappedOperands(CC);
+ return ISD::getSetCCInverse(CC, true);
}
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
@@ -3530,7 +3629,7 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
ARMcc = DAG.getConstant(CondCode, MVT::i32);
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
- return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7);
+ return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
}
return SDValue();
@@ -3570,11 +3669,11 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
- SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
+ SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
if (CondCode2 != ARMCC::AL) {
ARMcc = DAG.getConstant(CondCode2, MVT::i32);
SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
- Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
+ Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
}
return Res;
}
@@ -3771,7 +3870,7 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
// Bitcast operand 1 to i32.
if (SrcVT == MVT::f64)
Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
- &Tmp1, 1).getValue(1);
+ Tmp1).getValue(1);
Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
// Or in the signbit with integer operations.
@@ -3787,7 +3886,7 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
// f64: Or the high part with signbit and then combine two parts.
Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
- &Tmp0, 1);
+ Tmp0);
SDValue Lo = Tmp0.getValue(0);
SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
@@ -3799,6 +3898,9 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
EVT VT = Op.getValueType();
SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -3816,14 +3918,16 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
}
SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ const ARMBaseRegisterInfo &ARI =
+ *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
- ? ARM::R7 : ARM::R11;
+ unsigned FrameReg = ARI.getFrameRegister(MF);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
@@ -3832,6 +3936,18 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
return FrameAddr;
}
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned ARMTargetLowering::getRegisterByName(const char* RegName,
+ EVT VT) const {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("sp", ARM::SP)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ report_fatal_error("Invalid register name global variable");
+}
+
/// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
@@ -3861,8 +3977,15 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
// Turn f64->i64 into VMOVRRD.
if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
- SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
+ SDValue Cvt;
+ if (TLI.isBigEndian() && SrcVT.isVector() &&
+ SrcVT.getVectorNumElements() > 1)
+ Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
+ DAG.getVTList(MVT::i32, MVT::i32),
+ DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
+ else
+ Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
+ DAG.getVTList(MVT::i32, MVT::i32), Op);
// Merge the pieces into a single i64 value.
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
}
@@ -3918,7 +4041,7 @@ SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
CCR, Cmp);
SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
@@ -3952,7 +4075,7 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
CCR, Cmp);
SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
@@ -4157,7 +4280,7 @@ static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
// First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
// captures the result into a carry flag.
unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
- Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1);
+ Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
// The low part is an ARMISD::RRX operand, which shifts the carry in.
Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
@@ -4380,7 +4503,6 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
// Value = 0x0000nnff: Op=x, Cmode=1100.
OpCmode = 0xc;
Imm = SplatBits >> 8;
- SplatBits |= 0xff;
break;
}
@@ -4389,7 +4511,6 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
// Value = 0x00nnffff: Op=x, Cmode=1101.
OpCmode = 0xd;
Imm = SplatBits >> 16;
- SplatBits |= 0xffff;
break;
}
@@ -4418,9 +4539,13 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
BitMask <<= 8;
ImmMask <<= 1;
}
+
+ if (DAG.getTargetLoweringInfo().isBigEndian())
+ // swap higher and lower 32 bit word
+ Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
+
// Op=1, Cmode=1110.
OpCmode = 0x1e;
- SplatBits = Val;
VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
break;
}
@@ -4917,7 +5042,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
Ops.push_back(N);
Ops.push_back(Op.getOperand(I));
Ops.push_back(DAG.getConstant(I, MVT::i32));
- N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3);
+ N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
}
}
return N;
@@ -4928,7 +5053,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
- SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
+ SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
Val = LowerBUILD_VECTOR(Val, DAG, ST);
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
@@ -4964,7 +5089,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
- SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
+ SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
@@ -5271,12 +5396,10 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
if (V2.getNode()->getOpcode() == ISD::UNDEF)
return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8,
- &VTBLMask[0], 8));
+ DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask));
return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8,
- &VTBLMask[0], 8));
+ DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask));
}
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
@@ -5429,7 +5552,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(ShuffleMask[i] & (NumElts-1),
MVT::i32)));
}
- SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
+ SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
@@ -5666,7 +5789,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32));
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
- MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
+ MVT::getVectorVT(TruncVT, NumElts), Ops);
}
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
@@ -6004,12 +6127,12 @@ SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
? "__sincos_stret" : "__sincosf_stret";
SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy());
- TargetLowering::
- CallLoweringInfo CLI(DAG.getEntryNode(), Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0,
- CallingConv::C, /*isTaillCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed*/false,
- Callee, Args, DAG, dl);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), Callee,
+ std::move(Args), 0)
+ .setDiscardResult();
+
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
SDValue LoadSin = DAG.getLoad(ArgVT, dl, CallResult.second, SRet,
@@ -6031,40 +6154,11 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
return Op;
- // Aquire/Release load/store is not legal for targets without a
+ // Acquire/Release load/store is not legal for targets without a
// dmb or equivalent available.
return SDValue();
}
-static void
-ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results,
- SelectionDAG &DAG) {
- SDLoc dl(Node);
- assert (Node->getValueType(0) == MVT::i64 &&
- "Only know how to expand i64 atomics");
- AtomicSDNode *AN = cast<AtomicSDNode>(Node);
-
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(Node->getOperand(0)); // Chain
- Ops.push_back(Node->getOperand(1)); // Ptr
- for(unsigned i=2; i<Node->getNumOperands(); i++) {
- // Low part
- Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(i), DAG.getIntPtrConstant(0)));
- // High part
- Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(i), DAG.getIntPtrConstant(1)));
- }
- SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
- SDValue Result =
- DAG.getAtomic(Node->getOpcode(), dl, MVT::i64, Tys, Ops.data(), Ops.size(),
- cast<MemSDNode>(Node)->getMemOperand(), AN->getOrdering(),
- AN->getSynchScope());
- SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) };
- Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
- Results.push_back(Result.getValue(2));
-}
-
static void ReplaceREADCYCLECOUNTER(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG,
@@ -6085,8 +6179,7 @@ static void ReplaceREADCYCLECOUNTER(SDNode *N,
};
Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
- DAG.getVTList(MVT::i32, MVT::Other), &Ops[0],
- array_lengthof(Ops));
+ DAG.getVTList(MVT::i32, MVT::Other), Ops);
OutChain = Cycles32.getValue(1);
} else {
// Intrinsic is defined to return 0 on unsupported platforms. Technically
@@ -6109,8 +6202,15 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::GlobalAddress:
- return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
- LowerGlobalAddressELF(Op, DAG);
+ switch (Subtarget->getTargetTriple().getObjectFormat()) {
+ default: llvm_unreachable("unknown object format");
+ case Triple::COFF:
+ return LowerGlobalAddressWindows(Op, DAG);
+ case Triple::ELF:
+ return LowerGlobalAddressELF(Op, DAG);
+ case Triple::MachO:
+ return LowerGlobalAddressDarwin(Op, DAG);
+ }
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
@@ -6155,11 +6255,20 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADDE:
case ISD::SUBC:
case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
+ case ISD::SADDO:
+ case ISD::UADDO:
+ case ISD::SSUBO:
+ case ISD::USUBO:
+ return LowerXALUO(Op, DAG);
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
case ISD::FSINCOS: return LowerFSINCOS(Op, DAG);
case ISD::SDIVREM:
case ISD::UDIVREM: return LowerDivRem(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC:
+ if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
+ return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ llvm_unreachable("Don't know how to custom lower this!");
}
}
@@ -6182,22 +6291,6 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::READCYCLECOUNTER:
ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
return;
- case ISD::ATOMIC_STORE:
- case ISD::ATOMIC_LOAD:
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_SWAP:
- case ISD::ATOMIC_CMP_SWAP:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMAX:
- ReplaceATOMIC_OP_64(N, Results, DAG);
- return;
}
if (Res.getNode())
Results.push_back(Res);
@@ -6207,538 +6300,6 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
// ARM Scheduler Hooks
//===----------------------------------------------------------------------===//
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned oldval = MI->getOperand(2).getReg();
- unsigned newval = MI->getOperand(3).getReg();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- unsigned scratch = MRI.createVirtualRegister(isThumb2 ?
- (const TargetRegisterClass*)&ARM::rGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass);
-
- if (isThumb2) {
- MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
- MRI.constrainRegClass(oldval, &ARM::rGPRRegClass);
- MRI.constrainRegClass(newval, &ARM::rGPRRegClass);
- }
-
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc);
-
- MachineFunction *MF = BB->getParent();
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It; // insert the new blocks after the current block
-
- MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loop1MBB);
- MF->insert(It, loop2MBB);
- MF->insert(It, exitMBB);
-
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- // thisMBB:
- // ...
- // fallthrough --> loop1MBB
- BB->addSuccessor(loop1MBB);
-
- // loop1MBB:
- // ldrex dest, [ptr]
- // cmp dest, oldval
- // bne exitMBB
- BB = loop1MBB;
- MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
- if (ldrOpc == ARM::t2LDREX)
- MIB.addImm(0);
- AddDefaultPred(MIB);
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
- .addReg(dest).addReg(oldval));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
- BB->addSuccessor(loop2MBB);
- BB->addSuccessor(exitMBB);
-
- // loop2MBB:
- // strex scratch, newval, [ptr]
- // cmp scratch, #0
- // bne loop1MBB
- BB = loop2MBB;
- MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr);
- if (strOpc == ARM::t2STREX)
- MIB.addImm(0);
- AddDefaultPred(MIB);
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
- .addReg(scratch).addImm(0));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
- BB->addSuccessor(loop1MBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MI->eraseFromParent(); // The instruction is gone now.
-
- return BB;
-}
-
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size, unsigned BinOpcode) const {
- // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction *MF = BB->getParent();
- MachineFunction::iterator It = BB;
- ++It;
-
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned incr = MI->getOperand(2).getReg();
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- if (isThumb2) {
- MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
- MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
- MRI.constrainRegClass(incr, &ARM::rGPRRegClass);
- }
-
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc);
-
- MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loopMBB);
- MF->insert(It, exitMBB);
-
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- const TargetRegisterClass *TRC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::rGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
- unsigned scratch = MRI.createVirtualRegister(TRC);
- unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
-
- // thisMBB:
- // ...
- // fallthrough --> loopMBB
- BB->addSuccessor(loopMBB);
-
- // loopMBB:
- // ldrex dest, ptr
- // <binop> scratch2, dest, incr
- // strex scratch, scratch2, ptr
- // cmp scratch, #0
- // bne- loopMBB
- // fallthrough --> exitMBB
- BB = loopMBB;
- MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
- if (ldrOpc == ARM::t2LDREX)
- MIB.addImm(0);
- AddDefaultPred(MIB);
- if (BinOpcode) {
- // operand order needs to go the other way for NAND
- if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
- AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
- addReg(incr).addReg(dest)).addReg(0);
- else
- AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
- addReg(dest).addReg(incr)).addReg(0);
- }
-
- MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
- if (strOpc == ARM::t2STREX)
- MIB.addImm(0);
- AddDefaultPred(MIB);
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
- .addReg(scratch).addImm(0));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
-
- BB->addSuccessor(loopMBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MI->eraseFromParent(); // The instruction is gone now.
-
- return BB;
-}
-
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size,
- bool signExtend,
- ARMCC::CondCodes Cond) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction *MF = BB->getParent();
- MachineFunction::iterator It = BB;
- ++It;
-
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned incr = MI->getOperand(2).getReg();
- unsigned oldval = dest;
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- if (isThumb2) {
- MRI.constrainRegClass(dest, &ARM::rGPRRegClass);
- MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
- MRI.constrainRegClass(incr, &ARM::rGPRRegClass);
- }
-
- unsigned ldrOpc, strOpc, extendOpc;
- getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc);
- switch (Size) {
- default: llvm_unreachable("unsupported size for AtomicBinaryMinMax!");
- case 1:
- extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
- break;
- case 2:
- extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
- break;
- case 4:
- extendOpc = 0;
- break;
- }
-
- MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loopMBB);
- MF->insert(It, exitMBB);
-
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- const TargetRegisterClass *TRC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::rGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
- unsigned scratch = MRI.createVirtualRegister(TRC);
- unsigned scratch2 = MRI.createVirtualRegister(TRC);
-
- // thisMBB:
- // ...
- // fallthrough --> loopMBB
- BB->addSuccessor(loopMBB);
-
- // loopMBB:
- // ldrex dest, ptr
- // (sign extend dest, if required)
- // cmp dest, incr
- // cmov.cond scratch2, incr, dest
- // strex scratch, scratch2, ptr
- // cmp scratch, #0
- // bne- loopMBB
- // fallthrough --> exitMBB
- BB = loopMBB;
- MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
- if (ldrOpc == ARM::t2LDREX)
- MIB.addImm(0);
- AddDefaultPred(MIB);
-
- // Sign extend the value, if necessary.
- if (signExtend && extendOpc) {
- oldval = MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass
- : &ARM::GPRnopcRegClass);
- if (!isThumb2)
- MRI.constrainRegClass(dest, &ARM::GPRnopcRegClass);
- AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval)
- .addReg(dest)
- .addImm(0));
- }
-
- // Build compare and cmov instructions.
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
- .addReg(oldval).addReg(incr));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2)
- .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR);
-
- MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
- if (strOpc == ARM::t2STREX)
- MIB.addImm(0);
- AddDefaultPred(MIB);
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
- .addReg(scratch).addImm(0));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
-
- BB->addSuccessor(loopMBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MI->eraseFromParent(); // The instruction is gone now.
-
- return BB;
-}
-
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Op1, unsigned Op2,
- bool NeedsCarry, bool IsCmpxchg,
- bool IsMinMax, ARMCC::CondCodes CC) const {
- // This also handles ATOMIC_SWAP and ATOMIC_STORE, indicated by Op1==0.
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction *MF = BB->getParent();
- MachineFunction::iterator It = BB;
- ++It;
-
- bool isStore = (MI->getOpcode() == ARM::ATOMIC_STORE_I64);
- unsigned offset = (isStore ? -2 : 0);
- unsigned destlo = MI->getOperand(0).getReg();
- unsigned desthi = MI->getOperand(1).getReg();
- unsigned ptr = MI->getOperand(offset+2).getReg();
- unsigned vallo = MI->getOperand(offset+3).getReg();
- unsigned valhi = MI->getOperand(offset+4).getReg();
- unsigned OrdIdx = offset + (IsCmpxchg ? 7 : 5);
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(OrdIdx).getImm());
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- if (isThumb2) {
- MRI.constrainRegClass(destlo, &ARM::rGPRRegClass);
- MRI.constrainRegClass(desthi, &ARM::rGPRRegClass);
- MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
- MRI.constrainRegClass(vallo, &ARM::rGPRRegClass);
- MRI.constrainRegClass(valhi, &ARM::rGPRRegClass);
- }
-
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(8, Ord, isThumb2, ldrOpc, strOpc);
-
- MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *contBB = 0, *cont2BB = 0;
- if (IsCmpxchg || IsMinMax)
- contBB = MF->CreateMachineBasicBlock(LLVM_BB);
- if (IsCmpxchg)
- cont2BB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
-
- MF->insert(It, loopMBB);
- if (IsCmpxchg || IsMinMax) MF->insert(It, contBB);
- if (IsCmpxchg) MF->insert(It, cont2BB);
- MF->insert(It, exitMBB);
-
- // Transfer the remainder of BB and its successor edges to exitMBB.
- exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- exitMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- const TargetRegisterClass *TRC = isThumb2 ?
- (const TargetRegisterClass*)&ARM::tGPRRegClass :
- (const TargetRegisterClass*)&ARM::GPRRegClass;
- unsigned storesuccess = MRI.createVirtualRegister(TRC);
-
- // thisMBB:
- // ...
- // fallthrough --> loopMBB
- BB->addSuccessor(loopMBB);
-
- // loopMBB:
- // ldrexd r2, r3, ptr
- // <binopa> r0, r2, incr
- // <binopb> r1, r3, incr
- // strexd storesuccess, r0, r1, ptr
- // cmp storesuccess, #0
- // bne- loopMBB
- // fallthrough --> exitMBB
- BB = loopMBB;
-
- if (!isStore) {
- // Load
- if (isThumb2) {
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
- .addReg(destlo, RegState::Define)
- .addReg(desthi, RegState::Define)
- .addReg(ptr));
- } else {
- unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
- .addReg(GPRPair0, RegState::Define).addReg(ptr));
- // Copy r2/r3 into dest. (This copy will normally be coalesced.)
- BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo)
- .addReg(GPRPair0, 0, ARM::gsub_0);
- BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi)
- .addReg(GPRPair0, 0, ARM::gsub_1);
- }
- }
-
- unsigned StoreLo, StoreHi;
- if (IsCmpxchg) {
- // Add early exit
- for (unsigned i = 0; i < 2; i++) {
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr :
- ARM::CMPrr))
- .addReg(i == 0 ? destlo : desthi)
- .addReg(i == 0 ? vallo : valhi));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
- BB->addSuccessor(exitMBB);
- BB->addSuccessor(i == 0 ? contBB : cont2BB);
- BB = (i == 0 ? contBB : cont2BB);
- }
-
- // Copy to physregs for strexd
- StoreLo = MI->getOperand(5).getReg();
- StoreHi = MI->getOperand(6).getReg();
- } else if (Op1) {
- // Perform binary operation
- unsigned tmpRegLo = MRI.createVirtualRegister(TRC);
- AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), tmpRegLo)
- .addReg(destlo).addReg(vallo))
- .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry));
- unsigned tmpRegHi = MRI.createVirtualRegister(TRC);
- AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), tmpRegHi)
- .addReg(desthi).addReg(valhi))
- .addReg(IsMinMax ? ARM::CPSR : 0, getDefRegState(IsMinMax));
-
- StoreLo = tmpRegLo;
- StoreHi = tmpRegHi;
- } else {
- // Copy to physregs for strexd
- StoreLo = vallo;
- StoreHi = valhi;
- }
- if (IsMinMax) {
- // Compare and branch to exit block.
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(exitMBB).addImm(CC).addReg(ARM::CPSR);
- BB->addSuccessor(exitMBB);
- BB->addSuccessor(contBB);
- BB = contBB;
- StoreLo = vallo;
- StoreHi = valhi;
- }
-
- // Store
- if (isThumb2) {
- MRI.constrainRegClass(StoreLo, &ARM::rGPRRegClass);
- MRI.constrainRegClass(StoreHi, &ARM::rGPRRegClass);
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess)
- .addReg(StoreLo).addReg(StoreHi).addReg(ptr));
- } else {
- // Marshal a pair...
- unsigned StorePair = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
- unsigned UndefPair = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
- unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
- BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), UndefPair);
- BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1)
- .addReg(UndefPair)
- .addReg(StoreLo)
- .addImm(ARM::gsub_0);
- BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), StorePair)
- .addReg(r1)
- .addReg(StoreHi)
- .addImm(ARM::gsub_1);
-
- // ...and store it
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess)
- .addReg(StorePair).addReg(ptr));
- }
- // Cmp+jump
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
- .addReg(storesuccess).addImm(0));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
-
- BB->addSuccessor(loopMBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MI->eraseFromParent(); // The instruction is gone now.
-
- return BB;
-}
-
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicLoad64(MachineInstr *MI, MachineBasicBlock *BB) const {
-
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
- unsigned destlo = MI->getOperand(0).getReg();
- unsigned desthi = MI->getOperand(1).getReg();
- unsigned ptr = MI->getOperand(2).getReg();
- AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
-
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- if (isThumb2) {
- MRI.constrainRegClass(destlo, &ARM::rGPRRegClass);
- MRI.constrainRegClass(desthi, &ARM::rGPRRegClass);
- MRI.constrainRegClass(ptr, &ARM::rGPRRegClass);
- }
- unsigned ldrOpc, strOpc;
- getExclusiveOperation(8, Ord, isThumb2, ldrOpc, strOpc);
-
- MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(ldrOpc));
-
- if (isThumb2) {
- MIB.addReg(destlo, RegState::Define)
- .addReg(desthi, RegState::Define)
- .addReg(ptr);
-
- } else {
- unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
- MIB.addReg(GPRPair0, RegState::Define).addReg(ptr);
-
- // Copy GPRPair0 into dest. (This copy will normally be coalesced.)
- BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), destlo)
- .addReg(GPRPair0, 0, ARM::gsub_0);
- BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), desthi)
- .addReg(GPRPair0, 0, ARM::gsub_1);
- }
- AddDefaultPred(MIB);
-
- MI->eraseFromParent(); // The instruction is gone now.
-
- return BB;
-}
-
/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
/// registers the function context.
void ARMTargetLowering::
@@ -7193,7 +6754,7 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
}
// N.B. the order the invoke BBs are processed in doesn't matter here.
- const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF);
+ const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
SmallVector<MachineBasicBlock*, 64> MBBLPads;
for (SmallPtrSet<MachineBasicBlock*, 64>::iterator
I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) {
@@ -7390,8 +6951,8 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned UnitSize = 0;
- const TargetRegisterClass *TRC = 0;
- const TargetRegisterClass *VecTRC = 0;
+ const TargetRegisterClass *TRC = nullptr;
+ const TargetRegisterClass *VecTRC = nullptr;
bool IsThumb1 = Subtarget->isThumb1Only();
bool IsThumb2 = Subtarget->isThumb2();
@@ -7425,7 +6986,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
? (const TargetRegisterClass *)&ARM::DPairRegClass
: UnitSize == 8
? (const TargetRegisterClass *)&ARM::DPRRegClass
- : 0;
+ : nullptr;
unsigned BytesLeft = SizeVal % UnitSize;
unsigned LoopSize = SizeVal - BytesLeft;
@@ -7493,8 +7054,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// Load an immediate to varEnd.
@@ -7608,6 +7168,72 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
}
MachineBasicBlock *
+ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const TargetMachine &TM = getTargetMachine();
+ const TargetInstrInfo &TII = *TM.getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ assert(Subtarget->isTargetWindows() &&
+ "__chkstk is only supported on Windows");
+ assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
+
+ // __chkstk takes the number of words to allocate on the stack in R4, and
+ // returns the stack adjustment in number of bytes in R4. This will not
+ // clober any other registers (other than the obvious lr).
+ //
+ // Although, technically, IP should be considered a register which may be
+ // clobbered, the call itself will not touch it. Windows on ARM is a pure
+ // thumb-2 environment, so there is no interworking required. As a result, we
+ // do not expect a veneer to be emitted by the linker, clobbering IP.
+ //
+ // Each module receives its own copy of __chkstk, so no import thunk is
+ // required, again, ensuring that IP is not clobbered.
+ //
+ // Finally, although some linkers may theoretically provide a trampoline for
+ // out of range calls (which is quite common due to a 32M range limitation of
+ // branches for Thumb), we can generate the long-call version via
+ // -mcmodel=large, alleviating the need for the trampoline which may clobber
+ // IP.
+
+ switch (TM.getCodeModel()) {
+ case CodeModel::Small:
+ case CodeModel::Medium:
+ case CodeModel::Default:
+ case CodeModel::Kernel:
+ BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
+ .addImm((unsigned)ARMCC::AL).addReg(0)
+ .addExternalSymbol("__chkstk")
+ .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
+ .addReg(ARM::R4, RegState::Implicit | RegState::Define)
+ .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead);
+ break;
+ case CodeModel::Large:
+ case CodeModel::JITDefault: {
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
+
+ BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
+ .addExternalSymbol("__chkstk");
+ BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
+ .addImm((unsigned)ARMCC::AL).addReg(0)
+ .addReg(Reg, RegState::Kill)
+ .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
+ .addReg(ARM::R4, RegState::Implicit | RegState::Define)
+ .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead);
+ break;
+ }
+ }
+
+ AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr),
+ ARM::SP)
+ .addReg(ARM::SP).addReg(ARM::R4)));
+
+ MI->eraseFromParent();
+ return MBB;
+}
+
+MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
@@ -7670,131 +7296,6 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MI->eraseFromParent();
return BB;
}
- case ARM::ATOMIC_LOAD_ADD_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
- case ARM::ATOMIC_LOAD_ADD_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
- case ARM::ATOMIC_LOAD_ADD_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
-
- case ARM::ATOMIC_LOAD_AND_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
- case ARM::ATOMIC_LOAD_AND_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
- case ARM::ATOMIC_LOAD_AND_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
-
- case ARM::ATOMIC_LOAD_OR_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
- case ARM::ATOMIC_LOAD_OR_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
- case ARM::ATOMIC_LOAD_OR_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
-
- case ARM::ATOMIC_LOAD_XOR_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
- case ARM::ATOMIC_LOAD_XOR_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
- case ARM::ATOMIC_LOAD_XOR_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
-
- case ARM::ATOMIC_LOAD_NAND_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
- case ARM::ATOMIC_LOAD_NAND_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
- case ARM::ATOMIC_LOAD_NAND_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
-
- case ARM::ATOMIC_LOAD_SUB_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
- case ARM::ATOMIC_LOAD_SUB_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
- case ARM::ATOMIC_LOAD_SUB_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
-
- case ARM::ATOMIC_LOAD_MIN_I8:
- return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT);
- case ARM::ATOMIC_LOAD_MIN_I16:
- return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT);
- case ARM::ATOMIC_LOAD_MIN_I32:
- return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT);
-
- case ARM::ATOMIC_LOAD_MAX_I8:
- return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT);
- case ARM::ATOMIC_LOAD_MAX_I16:
- return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT);
- case ARM::ATOMIC_LOAD_MAX_I32:
- return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT);
-
- case ARM::ATOMIC_LOAD_UMIN_I8:
- return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO);
- case ARM::ATOMIC_LOAD_UMIN_I16:
- return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO);
- case ARM::ATOMIC_LOAD_UMIN_I32:
- return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO);
-
- case ARM::ATOMIC_LOAD_UMAX_I8:
- return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI);
- case ARM::ATOMIC_LOAD_UMAX_I16:
- return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI);
- case ARM::ATOMIC_LOAD_UMAX_I32:
- return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI);
-
- case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0);
- case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
- case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
-
- case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1);
- case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
- case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
-
- case ARM::ATOMIC_LOAD_I64:
- return EmitAtomicLoad64(MI, BB);
-
- case ARM::ATOMIC_LOAD_ADD_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr,
- isThumb2 ? ARM::t2ADCrr : ARM::ADCrr,
- /*NeedsCarry*/ true);
- case ARM::ATOMIC_LOAD_SUB_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
- isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
- /*NeedsCarry*/ true);
- case ARM::ATOMIC_LOAD_OR_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr,
- isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
- case ARM::ATOMIC_LOAD_XOR_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr,
- isThumb2 ? ARM::t2EORrr : ARM::EORrr);
- case ARM::ATOMIC_LOAD_AND_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
- isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
- case ARM::ATOMIC_STORE_I64:
- case ARM::ATOMIC_SWAP_I64:
- return EmitAtomicBinary64(MI, BB, 0, 0, false);
- case ARM::ATOMIC_CMP_SWAP_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
- isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
- /*NeedsCarry*/ false, /*IsCmpxchg*/true);
- case ARM::ATOMIC_LOAD_MIN_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
- isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
- /*NeedsCarry*/ true, /*IsCmpxchg*/false,
- /*IsMinMax*/ true, ARMCC::LT);
- case ARM::ATOMIC_LOAD_MAX_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
- isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
- /*NeedsCarry*/ true, /*IsCmpxchg*/false,
- /*IsMinMax*/ true, ARMCC::GE);
- case ARM::ATOMIC_LOAD_UMIN_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
- isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
- /*NeedsCarry*/ true, /*IsCmpxchg*/false,
- /*IsMinMax*/ true, ARMCC::LO);
- case ARM::ATOMIC_LOAD_UMAX_I64:
- return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
- isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
- /*NeedsCarry*/ true, /*IsCmpxchg*/false,
- /*IsMinMax*/ true, ARMCC::HS);
case ARM::tMOVCCr_pseudo: {
// To "insert" a SELECT_CC instruction, we actually have to insert the
@@ -7820,8 +7321,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(copy0MBB);
@@ -7854,7 +7354,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case ARM::BCCi64:
case ARM::BCCZi64: {
// If there is an unconditional branch to the other successor, remove it.
- BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
+ BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
// Compare both parts that make up the double comparison separately for
// equality.
@@ -7939,8 +7439,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
SinkBB->splice(SinkBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
SinkBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(RSBBB);
@@ -7983,6 +7482,8 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case ARM::COPY_STRUCT_BYVAL_I32:
++NumLoopByVals;
return EmitStructByval(MI, BB);
+ case ARM::WIN__CHKSTK:
+ return EmitLowered__chkstk(MI, BB);
}
}
@@ -8273,7 +7774,9 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
// Get widened type and narrowed type.
MVT widenType;
unsigned numElem = VT.getVectorNumElements();
- switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
+
+ EVT inputLaneType = Vec.getValueType().getVectorElementType();
+ switch (inputLaneType.getSimpleVT().SimpleTy) {
case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
@@ -8281,9 +7784,9 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
llvm_unreachable("Invalid vector element type for padd optimization.");
}
- SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
- widenType, &Ops[0], Ops.size());
- return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, tmp);
+ SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), widenType, Ops);
+ unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
+ return DAG.getNode(ExtOp, SDLoc(N), VT, tmp);
}
static SDValue findMUL_LOHI(SDValue V) {
@@ -8341,7 +7844,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
// Look for the glued ADDE.
SDNode* AddeNode = AddcNode->getGluedUser();
- if (AddeNode == NULL)
+ if (!AddeNode)
return SDValue();
// Make sure it is really an ADDE.
@@ -8376,9 +7879,9 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
// Figure out the high and low input values to the MLAL node.
SDValue* HiMul = &MULOp;
- SDValue* HiAdd = NULL;
- SDValue* LoMul = NULL;
- SDValue* LowAdd = NULL;
+ SDValue* HiAdd = nullptr;
+ SDValue* LoMul = nullptr;
+ SDValue* LowAdd = nullptr;
if (IsLeftOperandMUL)
HiAdd = &AddeOp1;
@@ -8395,7 +7898,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
LowAdd = &AddcOp0;
}
- if (LoMul == NULL)
+ if (!LoMul)
return SDValue();
if (LoMul->getNode() != HiMul->getNode())
@@ -8412,8 +7915,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
Ops.push_back(*HiAdd);
SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode),
- DAG.getVTList(MVT::i32, MVT::i32),
- &Ops[0], Ops.size());
+ DAG.getVTList(MVT::i32, MVT::i32), Ops);
// Replace the ADDs' nodes uses by the MLA node's values.
SDValue HiMLALResult(MLALNode.getNode(), 1);
@@ -8937,6 +8439,8 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
std::min(4U, LD->getAlignment() / 2));
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
+ if (DCI.DAG.getTargetLoweringInfo().isBigEndian())
+ std::swap (NewLD1, NewLD2);
SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
DCI.RemoveFromWorklist(LD);
DAG.DeleteNode(LD);
@@ -9004,7 +8508,8 @@ static SDValue PerformSTORECombine(SDNode *N,
SDLoc DL(St);
SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
- for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio;
+ for (unsigned i = 0; i < NumElems; ++i)
+ ShuffleVec[i] = TLI.isBigEndian() ? (i+1) * SizeRatio - 1 : i * SizeRatio;
// Can't shuffle using an illegal type.
if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
@@ -9050,8 +8555,7 @@ static SDValue PerformSTORECombine(SDNode *N,
Increment);
Chains.push_back(Ch);
}
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
- Chains.size());
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}
if (!ISD::isNormalStore(St))
@@ -9062,16 +8566,18 @@ static SDValue PerformSTORECombine(SDNode *N,
if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
StVal.getNode()->hasOneUse()) {
SelectionDAG &DAG = DCI.DAG;
+ bool isBigEndian = DAG.getTargetLoweringInfo().isBigEndian();
SDLoc DL(St);
SDValue BasePtr = St->getBasePtr();
SDValue NewST1 = DAG.getStore(St->getChain(), DL,
- StVal.getNode()->getOperand(0), BasePtr,
- St->getPointerInfo(), St->isVolatile(),
+ StVal.getNode()->getOperand(isBigEndian ? 1 : 0 ),
+ BasePtr, St->getPointerInfo(), St->isVolatile(),
St->isNonTemporal(), St->getAlignment());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(4, MVT::i32));
- return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1),
+ return DAG.getStore(NewST1.getValue(0), DL,
+ StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
OffsetPtr, St->getPointerInfo(), St->isVolatile(),
St->isNonTemporal(),
std::min(4U, St->getAlignment() / 2));
@@ -9147,7 +8653,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
DCI.AddToWorklist(V.getNode());
}
EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts);
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}
@@ -9230,7 +8736,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
// Fold obvious case.
V = V.getOperand(0);
else {
- V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
+ V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
// Make the DAGCombiner fold the bitcasts.
DCI.AddToWorklist(V.getNode());
}
@@ -9426,7 +8932,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
Tys[n] = VecTy;
Tys[n++] = MVT::i32;
Tys[n] = MVT::Other;
- SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2);
+ SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, NumResultVecs+2));
SmallVector<SDValue, 8> Ops;
Ops.push_back(N->getOperand(0)); // incoming chain
Ops.push_back(N->getOperand(AddrOpIdx));
@@ -9436,8 +8942,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
}
MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
- Ops.data(), Ops.size(),
- MemInt->getMemoryVT(),
+ Ops, MemInt->getMemoryVT(),
MemInt->getMemOperand());
// Update the uses.
@@ -9506,11 +9011,11 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
for (n = 0; n < NumVecs; ++n)
Tys[n] = VT;
Tys[n] = MVT::Other;
- SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1);
+ SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, NumVecs+1));
SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
- Ops, 2, VLDMemInt->getMemoryVT(),
+ Ops, VLDMemInt->getMemoryVT(),
VLDMemInt->getMemOperand());
// Update the uses.
@@ -9759,9 +9264,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
// loads from a constant pool.
case Intrinsic::arm_neon_vshifts:
case Intrinsic::arm_neon_vshiftu:
- case Intrinsic::arm_neon_vshiftls:
- case Intrinsic::arm_neon_vshiftlu:
- case Intrinsic::arm_neon_vshiftn:
case Intrinsic::arm_neon_vrshifts:
case Intrinsic::arm_neon_vrshiftu:
case Intrinsic::arm_neon_vrshiftn:
@@ -9792,12 +9294,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
}
return SDValue();
- case Intrinsic::arm_neon_vshiftls:
- case Intrinsic::arm_neon_vshiftlu:
- if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
- break;
- llvm_unreachable("invalid shift count for vshll intrinsic");
-
case Intrinsic::arm_neon_vrshifts:
case Intrinsic::arm_neon_vrshiftu:
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
@@ -9815,7 +9311,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
break;
llvm_unreachable("invalid shift count for vqshlu intrinsic");
- case Intrinsic::arm_neon_vshiftn:
case Intrinsic::arm_neon_vrshiftn:
case Intrinsic::arm_neon_vqshiftns:
case Intrinsic::arm_neon_vqshiftnu:
@@ -9838,16 +9333,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
case Intrinsic::arm_neon_vshiftu:
// Opcode already set above.
break;
- case Intrinsic::arm_neon_vshiftls:
- case Intrinsic::arm_neon_vshiftlu:
- if (Cnt == VT.getVectorElementType().getSizeInBits())
- VShiftOpc = ARMISD::VSHLLi;
- else
- VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
- ARMISD::VSHLLs : ARMISD::VSHLLu);
- break;
- case Intrinsic::arm_neon_vshiftn:
- VShiftOpc = ARMISD::VSHRN; break;
case Intrinsic::arm_neon_vrshifts:
VShiftOpc = ARMISD::VRSHRs; break;
case Intrinsic::arm_neon_vrshiftu:
@@ -10128,7 +9613,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
if (Res.getNode()) {
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne);
+ DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne);
// Capture demanded bits information that would be otherwise lost.
if (KnownZero == 0xfffffffe)
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
@@ -10211,7 +9696,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
}
-bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
+bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, unsigned,
+ bool *Fast) const {
// The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
@@ -10233,7 +9719,7 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
case MVT::v2f64: {
// For any little-endian targets with neon, we can support unaligned ld/st
// of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
- // A big-endian target may also explictly support unaligned accesses
+ // A big-endian target may also explicitly support unaligned accesses
if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) {
if (Fast)
*Fast = true;
@@ -10265,11 +9751,11 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
bool Fast;
if (Size >= 16 &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
- (allowsUnalignedMemoryAccesses(MVT::v2f64, &Fast) && Fast))) {
+ (allowsUnalignedMemoryAccesses(MVT::v2f64, 0, &Fast) && Fast))) {
return MVT::v2f64;
} else if (Size >= 8 &&
(memOpAlign(SrcAlign, DstAlign, 8) ||
- (allowsUnalignedMemoryAccesses(MVT::f64, &Fast) && Fast))) {
+ (allowsUnalignedMemoryAccesses(MVT::f64, 0, &Fast) && Fast))) {
return MVT::f64;
}
}
@@ -10714,11 +10200,11 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
return true;
}
-void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
unsigned BitWidth = KnownOne.getBitWidth();
KnownZero = KnownOne = APInt(BitWidth, 0);
switch (Op.getOpcode()) {
@@ -10734,15 +10220,29 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
break;
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
if (KnownZero == 0 && KnownOne == 0) return;
APInt KnownZeroRHS, KnownOneRHS;
- DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
KnownZero &= KnownZeroRHS;
KnownOne &= KnownOneRHS;
return;
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
+ Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
+ switch (IntID) {
+ default: return;
+ case Intrinsic::arm_ldaex:
+ case Intrinsic::arm_ldrex: {
+ EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
+ unsigned MemBits = VT.getScalarType().getSizeInBits();
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
+ return;
+ }
+ }
+ }
}
}
@@ -10818,7 +10318,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight(
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
@@ -10897,7 +10397,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
- SDValue Result(0, 0);
+ SDValue Result;
// Currently only support length 1 constraints.
if (Constraint.length() != 1) return;
@@ -11096,16 +10596,41 @@ SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
Type *RetTy = (Type*)StructType::get(Ty, Ty, NULL);
SDLoc dl(Op);
- TargetLowering::
- CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, true,
- 0, getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, dl);
- std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(InChain)
+ .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
return CallInfo.first;
}
+SDValue
+ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetWindows() && "unsupported target platform");
+ SDLoc DL(Op);
+
+ // Get the inputs.
+ SDValue Chain = Op.getOperand(0);
+ SDValue Size = Op.getOperand(1);
+
+ SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
+ DAG.getConstant(2, MVT::i32));
+
+ SDValue Flag;
+ Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
+ Flag = Chain.getValue(1);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
+
+ SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
+ Chain = NewSP.getValue(1);
+
+ SDValue Ops[2] = { NewSP, Chain };
+ return DAG.getMergeValues(Ops, DL);
+}
+
bool
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The ARM target isn't yet aware of offsets.
@@ -11191,6 +10716,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = true;
return true;
}
+ case Intrinsic::arm_ldaex:
case Intrinsic::arm_ldrex: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
@@ -11203,6 +10729,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = false;
return true;
}
+ case Intrinsic::arm_stlex:
case Intrinsic::arm_strex: {
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
Info.opc = ISD::INTRINSIC_W_CHAIN;
@@ -11215,6 +10742,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = true;
return true;
}
+ case Intrinsic::arm_stlexd:
case Intrinsic::arm_strexd: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i64;
@@ -11226,6 +10754,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = true;
return true;
}
+ case Intrinsic::arm_ldaexd:
case Intrinsic::arm_ldrexd: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i64;
@@ -11243,3 +10772,178 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return false;
}
+
+/// \brief Returns true if it is beneficial to convert a load of a constant
+/// to just the constant itself.
+bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned Bits = Ty->getPrimitiveSizeInBits();
+ if (Bits == 0 || Bits > 32)
+ return false;
+ return true;
+}
+
+bool ARMTargetLowering::shouldExpandAtomicInIR(Instruction *Inst) const {
+ // Loads and stores less than 64-bits are already atomic; ones above that
+ // are doomed anyway, so defer to the default libcall and blame the OS when
+ // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
+ // anything for those.
+ bool IsMClass = Subtarget->isMClass();
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
+ return Size == 64 && !IsMClass;
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ return LI->getType()->getPrimitiveSizeInBits() == 64 && !IsMClass;
+ }
+
+ // For the real atomic operations, we have ldrex/strex up to 32 bits,
+ // and up to 64 bits on the non-M profiles
+ unsigned AtomicLimit = IsMClass ? 32 : 64;
+ return Inst->getType()->getPrimitiveSizeInBits() <= AtomicLimit;
+}
+
+Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ Module *M = Builder.GetInsertBlock()->getParent()->getParent();
+ Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
+ bool IsAcquire =
+ Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent;
+
+ // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
+ // intrinsic must return {i32, i32} and we have to recombine them into a
+ // single i64 here.
+ if (ValTy->getPrimitiveSizeInBits() == 64) {
+ Intrinsic::ID Int =
+ IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
+ Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
+
+ Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
+ Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
+
+ Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
+ Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
+ if (!Subtarget->isLittle())
+ std::swap (Lo, Hi);
+ Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
+ Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
+ return Builder.CreateOr(
+ Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
+ }
+
+ Type *Tys[] = { Addr->getType() };
+ Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
+ Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
+
+ return Builder.CreateTruncOrBitCast(
+ Builder.CreateCall(Ldrex, Addr),
+ cast<PointerType>(Addr->getType())->getElementType());
+}
+
+Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr,
+ AtomicOrdering Ord) const {
+ Module *M = Builder.GetInsertBlock()->getParent()->getParent();
+ bool IsRelease =
+ Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent;
+
+ // Since the intrinsics must have legal type, the i64 intrinsics take two
+ // parameters: "i32, i32". We must marshal Val into the appropriate form
+ // before the call.
+ if (Val->getType()->getPrimitiveSizeInBits() == 64) {
+ Intrinsic::ID Int =
+ IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
+ Function *Strex = Intrinsic::getDeclaration(M, Int);
+ Type *Int32Ty = Type::getInt32Ty(M->getContext());
+
+ Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
+ Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
+ if (!Subtarget->isLittle())
+ std::swap (Lo, Hi);
+ Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
+ return Builder.CreateCall3(Strex, Lo, Hi, Addr);
+ }
+
+ Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
+ Type *Tys[] = { Addr->getType() };
+ Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
+
+ return Builder.CreateCall2(
+ Strex, Builder.CreateZExtOrBitCast(
+ Val, Strex->getFunctionType()->getParamType(0)),
+ Addr);
+}
+
+enum HABaseType {
+ HA_UNKNOWN = 0,
+ HA_FLOAT,
+ HA_DOUBLE,
+ HA_VECT64,
+ HA_VECT128
+};
+
+static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
+ uint64_t &Members) {
+ if (const StructType *ST = dyn_cast<StructType>(Ty)) {
+ for (unsigned i = 0; i < ST->getNumElements(); ++i) {
+ uint64_t SubMembers = 0;
+ if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
+ return false;
+ Members += SubMembers;
+ }
+ } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ uint64_t SubMembers = 0;
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
+ return false;
+ Members += SubMembers * AT->getNumElements();
+ } else if (Ty->isFloatTy()) {
+ if (Base != HA_UNKNOWN && Base != HA_FLOAT)
+ return false;
+ Members = 1;
+ Base = HA_FLOAT;
+ } else if (Ty->isDoubleTy()) {
+ if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
+ return false;
+ Members = 1;
+ Base = HA_DOUBLE;
+ } else if (const VectorType *VT = dyn_cast<VectorType>(Ty)) {
+ Members = 1;
+ switch (Base) {
+ case HA_FLOAT:
+ case HA_DOUBLE:
+ return false;
+ case HA_VECT64:
+ return VT->getBitWidth() == 64;
+ case HA_VECT128:
+ return VT->getBitWidth() == 128;
+ case HA_UNKNOWN:
+ switch (VT->getBitWidth()) {
+ case 64:
+ Base = HA_VECT64;
+ return true;
+ case 128:
+ Base = HA_VECT128;
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ return (Members > 0 && Members <= 4);
+}
+
+/// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate.
+bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
+ Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
+ if (getEffectiveCallingConv(CallConv, isVarArg) !=
+ CallingConv::ARM_AAPCS_VFP)
+ return false;
+
+ HABaseType Base = HA_UNKNOWN;
+ uint64_t Members = 0;
+ bool result = isHomogeneousAggregate(Ty, Base, Members);
+ DEBUG(dbgs() << "isHA: " << result << " "; Ty->dump(); dbgs() << "\n");
+ return result;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 90facdd..1ace0f3 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -15,17 +15,15 @@
#ifndef ARMISELLOWERING_H
#define ARMISELLOWERING_H
-#include "ARM.h"
-#include "ARMSubtarget.h"
+#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include <vector>
namespace llvm {
class ARMConstantPoolValue;
+ class ARMSubtarget;
namespace ARMISD {
// ARM Specific DAG Nodes
@@ -35,8 +33,6 @@ namespace llvm {
Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
// TargetExternalSymbol, and TargetGlobalAddress.
- WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in
- // DYN mode.
WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
// PIC mode.
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
@@ -99,6 +95,8 @@ namespace llvm {
PRELOAD, // Preload
+ WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
+
VCEQ, // Vector compare equal.
VCEQZ, // Vector compare equal to zero.
VCGE, // Vector compare greater than or equal.
@@ -115,10 +113,6 @@ namespace llvm {
VSHL, // ...left
VSHRs, // ...right (signed)
VSHRu, // ...right (unsigned)
- VSHLLs, // ...left long (signed)
- VSHLLu, // ...left long (unsigned)
- VSHLLi, // ...left long (with maximum shift count)
- VSHRN, // ...right narrow
// Vector rounding shift by immediate:
VRSHRs, // ...right (signed)
@@ -240,116 +234,114 @@ namespace llvm {
public:
explicit ARMTargetLowering(TargetMachine &TM);
- virtual unsigned getJumpTableEncoding() const;
+ unsigned getJumpTableEncoding() const override;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const override;
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
- virtual bool isSelectSupported(SelectSupportKind Kind) const {
+ bool isSelectSupported(SelectSupportKind Kind) const override {
// ARM does not support scalar condition selects on vectors.
return (Kind != ScalarCondVectorVal);
}
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
- virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- virtual MachineBasicBlock *
+ MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *MBB) const override;
- virtual void
- AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
+ void AdjustInstrPostInstrSelection(MachineInstr *MI,
+ SDNode *Node) const override;
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
+ bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses of the specified type. Returns whether it
/// is "fast" by reference in the second argument.
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
+ bool *Fast) const override;
- virtual EVT getOptimalMemOpType(uint64_t Size,
- unsigned DstAlign, unsigned SrcAlign,
- bool IsMemset, bool ZeroMemset,
- bool MemcpyStrSrc,
- MachineFunction &MF) const;
+ EVT getOptimalMemOpType(uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool IsMemset, bool ZeroMemset,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const override;
using TargetLowering::isZExtFree;
- virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
- virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+ bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can
/// compare a register against the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalICmpImmediate(int64_t Imm) const;
+ bool isLegalICmpImmediate(int64_t Imm) const override;
/// isLegalAddImmediate - Return true if the specified immediate is legal
/// add immediate, that is the target has add instructions which can
/// add a register and the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalAddImmediate(int64_t Imm) const;
+ bool isLegalAddImmediate(int64_t Imm) const override;
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
- virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
+ bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
/// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store.
- virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
- SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
+ bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
+ SDValue &Offset, ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const;
+ void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
- virtual bool ExpandInlineAsm(CallInst *CI) const;
+ bool ExpandInlineAsm(CallInst *CI) const override;
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
+ AsmOperandInfo &info, const char *constraint) const override;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
/// being processed is 'm'.
- virtual void LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
const ARMSubtarget* getSubtarget() const {
return Subtarget;
@@ -357,39 +349,58 @@ namespace llvm {
/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
- virtual const TargetRegisterClass *getRegClassFor(MVT VT) const;
+ const TargetRegisterClass *getRegClassFor(MVT VT) const override;
/// getMaximalGlobalOffset - Returns the maximal possible offset which can
/// be used for loads / stores from the global.
- virtual unsigned getMaximalGlobalOffset() const;
+ unsigned getMaximalGlobalOffset() const override;
/// Returns true if a cast between SrcAS and DestAS is a noop.
- virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
// Addrspacecasts are always noops.
return true;
}
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) const;
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const override;
- Sched::Preference getSchedulingPreference(SDNode *N) const;
+ Sched::Preference getSchedulingPreference(SDNode *N) const override;
- bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
- bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+ bool
+ isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
+
+ bool getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ unsigned Intrinsic) const override;
+
+ /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// to just the constant itself.
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
+
+ /// \brief Returns true if an argument of type Ty needs to be passed in a
+ /// contiguous block of registers in calling convention CallConv.
+ bool functionArgumentNeedsConsecutiveRegisters(
+ Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
+
+ Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const override;
+ Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr, AtomicOrdering Ord) const override;
+
+ bool shouldExpandAtomicInIR(Instruction *Inst) const override;
- virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallInst &I,
- unsigned Intrinsic) const;
protected:
std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(MVT VT) const;
+ findRepresentativeClass(MVT VT) const override;
private:
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
@@ -407,6 +418,7 @@ namespace llvm {
void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
+ std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
@@ -420,6 +432,8 @@ namespace llvm {
SDValue &Root, SelectionDAG &DAG,
SDLoc dl) const;
+ CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
+ bool isVarArg) const;
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
bool isVarArg) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
@@ -433,6 +447,7 @@ namespace llvm {
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG) const;
@@ -441,6 +456,7 @@ namespace llvm {
TLSModel::Model model) const;
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
@@ -456,6 +472,9 @@ namespace llvm {
const ARMSubtarget *ST) const;
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
@@ -466,7 +485,7 @@ namespace llvm {
/// lower a pair of fmul and fadd to the latter so it's not clear that there
/// would be a gain or that the gain would be worthwhile enough to risk
/// correctness bugs.
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; }
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; }
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
@@ -477,12 +496,12 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
@@ -491,11 +510,14 @@ namespace llvm {
unsigned OffsetFromOrigArg,
unsigned ArgOffset,
unsigned ArgSize,
- bool ForceMutable) const;
+ bool ForceMutable,
+ unsigned ByValStoreOffset,
+ unsigned TotalArgRegsSaveSize) const;
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
SDLoc dl, SDValue &Chain,
unsigned ArgOffset,
+ unsigned TotalArgRegsSaveSize,
bool ForceMutable = false) const;
void computeRegArea(CCState &CCInfo, MachineFunction &MF,
@@ -504,12 +526,12 @@ namespace llvm {
unsigned &ArgRegsSize,
unsigned &ArgRegsSaveSize) const;
- virtual SDValue
+ SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
/// HandleByVal - Target-specific cleanup for ByVal support.
- virtual void HandleByVal(CCState *, unsigned &, unsigned) const;
+ void HandleByVal(CCState *, unsigned &, unsigned) const override;
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
@@ -524,21 +546,21 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
- virtual bool CanLowerReturn(CallingConv::ID CallConv,
- MachineFunction &MF, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const;
+ bool CanLowerReturn(CallingConv::ID CallConv,
+ MachineFunction &MF, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
- virtual SDValue
+ SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const override;
- virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
+ bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
+ bool mayBeEmittedAsTailCall(CallInst *CI) const override;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
@@ -548,29 +570,6 @@ namespace llvm {
SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
- MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const;
- MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size,
- unsigned BinOpcode) const;
- MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Op1,
- unsigned Op2,
- bool NeedsCarry = false,
- bool IsCmpxchg = false,
- bool IsMinMax = false,
- ARMCC::CondCodes CC = ARMCC::AL) const;
- MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size,
- bool signExtend,
- ARMCC::CondCodes Cond) const;
- MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
void SetupEntryBlockForSjLj(MachineInstr *MI,
MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB, int FI) const;
@@ -582,6 +581,9 @@ namespace llvm {
MachineBasicBlock *EmitStructByval(MachineInstr *MI,
MachineBasicBlock *MBB) const;
+
+ MachineBasicBlock *EmitLowered__chkstk(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
};
enum NEONModImmType {
@@ -590,7 +592,6 @@ namespace llvm {
OtherModImm
};
-
namespace ARM {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
index f93504f..59e9260 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -212,25 +212,25 @@ def msr_mask : Operand<i32> {
// 32 imm6<5> = '1', 32 - <imm> is encoded in imm6<4:0>
// 64 64 - <imm> is encoded in imm6<5:0>
def shr_imm8_asm_operand : ImmAsmOperand { let Name = "ShrImm8"; }
-def shr_imm8 : Operand<i32> {
+def shr_imm8 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm <= 8; }]> {
let EncoderMethod = "getShiftRight8Imm";
let DecoderMethod = "DecodeShiftRight8Imm";
let ParserMatchClass = shr_imm8_asm_operand;
}
def shr_imm16_asm_operand : ImmAsmOperand { let Name = "ShrImm16"; }
-def shr_imm16 : Operand<i32> {
+def shr_imm16 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm <= 16; }]> {
let EncoderMethod = "getShiftRight16Imm";
let DecoderMethod = "DecodeShiftRight16Imm";
let ParserMatchClass = shr_imm16_asm_operand;
}
def shr_imm32_asm_operand : ImmAsmOperand { let Name = "ShrImm32"; }
-def shr_imm32 : Operand<i32> {
+def shr_imm32 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm <= 32; }]> {
let EncoderMethod = "getShiftRight32Imm";
let DecoderMethod = "DecodeShiftRight32Imm";
let ParserMatchClass = shr_imm32_asm_operand;
}
def shr_imm64_asm_operand : ImmAsmOperand { let Name = "ShrImm64"; }
-def shr_imm64 : Operand<i32> {
+def shr_imm64 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm <= 64; }]> {
let EncoderMethod = "getShiftRight64Imm";
let DecoderMethod = "DecodeShiftRight64Imm";
let ParserMatchClass = shr_imm64_asm_operand;
@@ -329,10 +329,10 @@ class InstThumb<AddrMode am, int sz, IndexMode im,
// Pseudo-instructions for alternate assembly syntax (never used by codegen).
// These are aliases that require C++ handling to convert to the target
// instruction, while InstAliases can be handled directly by tblgen.
-class AsmPseudoInst<string asm, dag iops>
+class AsmPseudoInst<string asm, dag iops, dag oops = (outs)>
: InstTemplate<AddrModeNone, 0, IndexModeNone, Pseudo, GenericDomain,
"", NoItinerary> {
- let OutOperandList = (outs);
+ let OutOperandList = oops;
let InOperandList = iops;
let Pattern = [];
let isCodeGenOnly = 0; // So we get asm matcher for it.
@@ -340,16 +340,16 @@ class AsmPseudoInst<string asm, dag iops>
let isPseudo = 1;
}
-class ARMAsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
- Requires<[IsARM]>;
-class tAsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
- Requires<[IsThumb]>;
-class t2AsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
- Requires<[IsThumb2]>;
-class VFP2AsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
- Requires<[HasVFP2]>;
-class NEONAsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
- Requires<[HasNEON]>;
+class ARMAsmPseudo<string asm, dag iops, dag oops = (outs)>
+ : AsmPseudoInst<asm, iops, oops>, Requires<[IsARM]>;
+class tAsmPseudo<string asm, dag iops, dag oops = (outs)>
+ : AsmPseudoInst<asm, iops, oops>, Requires<[IsThumb]>;
+class t2AsmPseudo<string asm, dag iops, dag oops = (outs)>
+ : AsmPseudoInst<asm, iops, oops>, Requires<[IsThumb2]>;
+class VFP2AsmPseudo<string asm, dag iops, dag oops = (outs)>
+ : AsmPseudoInst<asm, iops, oops>, Requires<[HasVFP2]>;
+class NEONAsmPseudo<string asm, dag iops, dag oops = (outs)>
+ : AsmPseudoInst<asm, iops, oops>, Requires<[HasNEON]>;
// Pseudo instructions for the code generator.
class PseudoInst<dag oops, dag iops, InstrItinClass itin, list<dag> pattern>
@@ -477,6 +477,10 @@ class AXI<dag oops, dag iops, Format f, InstrItinClass itin,
string asm, list<dag> pattern>
: XI<oops, iops, AddrModeNone, 4, IndexModeNone, f, itin,
asm, "", pattern>;
+class AXIM<dag oops, dag iops, AddrMode am, Format f, InstrItinClass itin,
+ string asm, list<dag> pattern>
+ : XI<oops, iops, am, 4, IndexModeNone, f, itin,
+ asm, "", pattern>;
class AInoP<dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: InoP<oops, iops, AddrModeNone, 4, IndexModeNone, f, itin,
@@ -2025,7 +2029,7 @@ class N2V<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
// Same as N2V but not predicated.
class N2Vnp<bits<2> op19_18, bits<2> op17_16, bits<3> op10_8, bit op7, bit op6,
dag oops, dag iops, InstrItinClass itin, string OpcodeStr,
- string Dt, ValueType ResTy, ValueType OpTy, list<dag> pattern>
+ string Dt, list<dag> pattern>
: NeonInp<oops, iops, AddrModeNone, IndexModeNone, N2RegFrm, itin,
OpcodeStr, Dt, "$Vd, $Vm", "", pattern> {
bits<5> Vd;
@@ -2134,8 +2138,7 @@ class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
class N3Vnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
bit op4, dag oops, dag iops,Format f, InstrItinClass itin,
- string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- SDPatternOperator IntOp, bit Commutable, list<dag> pattern>
+ string OpcodeStr, string Dt, list<dag> pattern>
: NeonInp<oops, iops, AddrModeNone, IndexModeNone, f, itin, OpcodeStr,
Dt, "$Vd, $Vn, $Vm", "", pattern> {
bits<5> Vd;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index df867b4..f235ac2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -97,7 +97,7 @@ namespace {
static char ID;
ARMCGBR() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->getGlobalBaseReg() == 0)
return false;
@@ -146,11 +146,11 @@ namespace {
return true;
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM PIC Global Base Reg Initialization";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
index 5d3e059..b09958a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
@@ -14,10 +14,8 @@
#ifndef ARMINSTRUCTIONINFO_H
#define ARMINSTRUCTIONINFO_H
-#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMRegisterInfo.h"
-#include "ARMSubtarget.h"
namespace llvm {
class ARMSubtarget;
@@ -28,17 +26,17 @@ public:
explicit ARMInstrInfo(const ARMSubtarget &STI);
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
- void getNoopForMachoTarget(MCInst &NopInst) const;
+ void getNoopForMachoTarget(MCInst &NopInst) const override;
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
- unsigned getUnindexedOpcode(unsigned Opc) const;
+ unsigned getUnindexedOpcode(unsigned Opc) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- const ARMRegisterInfo &getRegisterInfo() const { return RI; }
+ const ARMRegisterInfo &getRegisterInfo() const override { return RI; }
};
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index 7a14b8e..a02d997 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -95,7 +95,6 @@ def ARMSmlal : SDNode<"ARMISD::SMLAL", SDT_ARM64bitmlal>;
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
-def ARMWrapperDYN : SDNode<"ARMISD::WrapperDYN", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
@@ -187,7 +186,8 @@ def ARMvminnm : SDNode<"ARMISD::VMINNM", SDT_ARMVMINNM, []>;
def HasV4T : Predicate<"Subtarget->hasV4TOps()">,
AssemblerPredicate<"HasV4TOps", "armv4t">;
def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
-def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
+def HasV5T : Predicate<"Subtarget->hasV5TOps()">,
+ AssemblerPredicate<"HasV5TOps", "armv5t">;
def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">,
AssemblerPredicate<"HasV5TEOps", "armv5te">;
def HasV6 : Predicate<"Subtarget->hasV6Ops()">,
@@ -244,6 +244,7 @@ def HasMP : Predicate<"Subtarget->hasMPExtension()">,
def HasTrustZone : Predicate<"Subtarget->hasTrustZone()">,
AssemblerPredicate<"FeatureTrustZone",
"TrustZone">;
+def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
def IsThumb : Predicate<"Subtarget->isThumb()">,
@@ -261,14 +262,16 @@ def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb", "arm-mode">;
def IsIOS : Predicate<"Subtarget->isTargetIOS()">;
def IsNotIOS : Predicate<"!Subtarget->isTargetIOS()">;
+def IsMachO : Predicate<"Subtarget->isTargetMachO()">;
+def IsNotMachO : Predicate<"!Subtarget->isTargetMachO()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def UseNaClTrap : Predicate<"Subtarget->useNaClTrap()">,
AssemblerPredicate<"FeatureNaClTrap", "NaCl">;
def DontUseNaClTrap : Predicate<"!Subtarget->useNaClTrap()">;
// FIXME: Eventually this will be just "hasV6T2Ops".
-def UseMovt : Predicate<"Subtarget->useMovt()">;
-def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
+def UseMovt : Predicate<"Subtarget->useMovt(*MF)">;
+def DontUseMovt : Predicate<"!Subtarget->useMovt(*MF)">;
def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
def UseMulOps : Predicate<"Subtarget->useMulOps()">;
@@ -276,7 +279,8 @@ def UseMulOps : Predicate<"Subtarget->useMulOps()">;
// But only select them if more precision in FP computation is allowed.
// Do not use them for Darwin platforms.
def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
- " FPOpFusion::Fast) && "
+ " FPOpFusion::Fast && "
+ " Subtarget->hasVFP4()) && "
"!Subtarget->isTargetDarwin()">;
def DontUseFusedMAC : Predicate<"!(TM.Options.AllowFPOpFusion =="
" FPOpFusion::Fast &&"
@@ -489,7 +493,7 @@ def neon_vcvt_imm32 : Operand<i32> {
// rot_imm: An integer that encodes a rotate amount. Must be 8, 16, or 24.
def rot_imm_XFORM: SDNodeXForm<imm, [{
switch (N->getZExtValue()){
- default: assert(0);
+ default: llvm_unreachable(nullptr);
case 0: return CurDAG->getTargetConstant(0, MVT::i32);
case 8: return CurDAG->getTargetConstant(1, MVT::i32);
case 16: return CurDAG->getTargetConstant(2, MVT::i32);
@@ -590,7 +594,7 @@ def so_imm2part : PatLeaf<(imm), [{
/// arm_i32imm - True for +V6T2, or true only if so_imm2part is true.
///
def arm_i32imm : PatLeaf<(imm), [{
- if (Subtarget->hasV6T2Ops())
+ if (Subtarget->useMovt(*MF))
return true;
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
@@ -987,6 +991,81 @@ def addrmode6oneL32 : Operand<i32>,
let EncoderMethod = "getAddrMode6OneLane32AddressOpValue";
}
+// Base class for addrmode6 with specific alignment restrictions.
+class AddrMode6Align : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
+ let PrintMethod = "printAddrMode6Operand";
+ let MIOperandInfo = (ops GPR:$addr, i32imm:$align);
+ let EncoderMethod = "getAddrMode6AddressOpValue";
+ let DecoderMethod = "DecodeAddrMode6Operand";
+}
+
+// Special version of addrmode6 to handle no allowed alignment encoding for
+// VLD/VST instructions and checking the alignment is not specified.
+def AddrMode6AlignNoneAsmOperand : AsmOperandClass {
+ let Name = "AlignedMemoryNone";
+ let DiagnosticType = "AlignedMemoryRequiresNone";
+}
+def addrmode6alignNone : AddrMode6Align {
+ // The alignment specifier can only be omitted.
+ let ParserMatchClass = AddrMode6AlignNoneAsmOperand;
+}
+
+// Special version of addrmode6 to handle 16-bit alignment encoding for
+// VLD/VST instructions and checking the alignment value.
+def AddrMode6Align16AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory16";
+ let DiagnosticType = "AlignedMemoryRequires16";
+}
+def addrmode6align16 : AddrMode6Align {
+ // The alignment specifier can only be 16 or omitted.
+ let ParserMatchClass = AddrMode6Align16AsmOperand;
+}
+
+// Special version of addrmode6 to handle 32-bit alignment encoding for
+// VLD/VST instructions and checking the alignment value.
+def AddrMode6Align32AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory32";
+ let DiagnosticType = "AlignedMemoryRequires32";
+}
+def addrmode6align32 : AddrMode6Align {
+ // The alignment specifier can only be 32 or omitted.
+ let ParserMatchClass = AddrMode6Align32AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit alignment encoding for
+// VLD/VST instructions and checking the alignment value.
+def AddrMode6Align64AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory64";
+ let DiagnosticType = "AlignedMemoryRequires64";
+}
+def addrmode6align64 : AddrMode6Align {
+ // The alignment specifier can only be 64 or omitted.
+ let ParserMatchClass = AddrMode6Align64AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
+// for VLD/VST instructions and checking the alignment value.
+def AddrMode6Align64or128AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory64or128";
+ let DiagnosticType = "AlignedMemoryRequires64or128";
+}
+def addrmode6align64or128 : AddrMode6Align {
+ // The alignment specifier can only be 64, 128 or omitted.
+ let ParserMatchClass = AddrMode6Align64or128AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit, 128-bit or 256-bit alignment
+// encoding for VLD/VST instructions and checking the alignment value.
+def AddrMode6Align64or128or256AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory64or128or256";
+ let DiagnosticType = "AlignedMemoryRequires64or128or256";
+}
+def addrmode6align64or128or256 : AddrMode6Align {
+ // The alignment specifier can only be 64, 128, 256 or omitted.
+ let ParserMatchClass = AddrMode6Align64or128or256AsmOperand;
+}
+
// Special version of addrmode6 to handle alignment encoding for VLD-dup
// instructions, specifically VLD4-dup.
def addrmode6dup : Operand<i32>,
@@ -999,6 +1078,69 @@ def addrmode6dup : Operand<i32>,
let ParserMatchClass = AddrMode6AsmOperand;
}
+// Base class for addrmode6dup with specific alignment restrictions.
+class AddrMode6DupAlign : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
+ let PrintMethod = "printAddrMode6Operand";
+ let MIOperandInfo = (ops GPR:$addr, i32imm);
+ let EncoderMethod = "getAddrMode6DupAddressOpValue";
+}
+
+// Special version of addrmode6 to handle no allowed alignment encoding for
+// VLD-dup instruction and checking the alignment is not specified.
+def AddrMode6dupAlignNoneAsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemoryNone";
+ let DiagnosticType = "DupAlignedMemoryRequiresNone";
+}
+def addrmode6dupalignNone : AddrMode6DupAlign {
+ // The alignment specifier can only be omitted.
+ let ParserMatchClass = AddrMode6dupAlignNoneAsmOperand;
+}
+
+// Special version of addrmode6 to handle 16-bit alignment encoding for VLD-dup
+// instruction and checking the alignment value.
+def AddrMode6dupAlign16AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory16";
+ let DiagnosticType = "DupAlignedMemoryRequires16";
+}
+def addrmode6dupalign16 : AddrMode6DupAlign {
+ // The alignment specifier can only be 16 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign16AsmOperand;
+}
+
+// Special version of addrmode6 to handle 32-bit alignment encoding for VLD-dup
+// instruction and checking the alignment value.
+def AddrMode6dupAlign32AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory32";
+ let DiagnosticType = "DupAlignedMemoryRequires32";
+}
+def addrmode6dupalign32 : AddrMode6DupAlign {
+ // The alignment specifier can only be 32 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign32AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit alignment encoding for VLD
+// instructions and checking the alignment value.
+def AddrMode6dupAlign64AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory64";
+ let DiagnosticType = "DupAlignedMemoryRequires64";
+}
+def addrmode6dupalign64 : AddrMode6DupAlign {
+ // The alignment specifier can only be 64 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign64AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
+// for VLD instructions and checking the alignment value.
+def AddrMode6dupAlign64or128AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory64or128";
+ let DiagnosticType = "DupAlignedMemoryRequires64or128";
+}
+def addrmode6dupalign64or128 : AddrMode6DupAlign {
+ // The alignment specifier can only be 64, 128 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign64or128AsmOperand;
+}
+
// addrmodepc := pc + reg
//
def addrmodepc : Operand<i32>,
@@ -1685,7 +1827,8 @@ PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
}
def HINT : AI<(outs), (ins imm0_239:$imm), MiscFrm, NoItinerary,
- "hint", "\t$imm", []>, Requires<[IsARM, HasV6]> {
+ "hint", "\t$imm", [(int_arm_hint imm0_239:$imm)]>,
+ Requires<[IsARM, HasV6]> {
bits<8> imm;
let Inst{27-8} = 0b00110010000011110000;
let Inst{7-0} = imm;
@@ -1698,8 +1841,6 @@ def : InstAlias<"wfi$p", (HINT 3, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"sev$p", (HINT 4, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
-def : Pat<(int_arm_sevl), (HINT 5)>;
-
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
"\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
bits<4> Rd;
@@ -1725,6 +1866,8 @@ def BKPT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
let Inst{31-28} = 0xe; // AL
let Inst{7-4} = 0b0111;
}
+// default immediate for breakpoint mnemonic
+def : InstAlias<"bkpt", (BKPT 0)>, Requires<[IsARM]>;
def HLT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
"hlt", "\t$val", []>, Requires<[IsARM, HasV8]> {
@@ -1770,8 +1913,8 @@ let imod = 0, iflags = 0, M = 1 in
// Preload signals the memory system of possible future data/instruction access.
multiclass APreLoad<bits<1> read, bits<1> data, string opc> {
- def i12 : AXI<(outs), (ins addrmode_imm12:$addr), MiscFrm, IIC_Preload,
- !strconcat(opc, "\t$addr"),
+ def i12 : AXIM<(outs), (ins addrmode_imm12:$addr), AddrMode_i12, MiscFrm,
+ IIC_Preload, !strconcat(opc, "\t$addr"),
[(ARMPreload addrmode_imm12:$addr, (i32 read), (i32 data))]>,
Sched<[WritePreLd]> {
bits<4> Rt;
@@ -1824,6 +1967,18 @@ def DBG : AI<(outs), (ins imm0_15:$opt), MiscFrm, NoItinerary, "dbg", "\t$opt",
let Inst{3-0} = opt;
}
+// A8.8.247 UDF - Undefined (Encoding A1)
+def UDF : AInoP<(outs), (ins imm0_65535:$imm16), MiscFrm, NoItinerary,
+ "udf", "\t$imm16", [(int_arm_undefined imm0_65535:$imm16)]> {
+ bits<16> imm16;
+ let Inst{31-28} = 0b1110; // AL
+ let Inst{27-25} = 0b011;
+ let Inst{24-20} = 0b11111;
+ let Inst{19-8} = imm16{15-4};
+ let Inst{7-4} = 0b1111;
+ let Inst{3-0} = imm16{3-0};
+}
+
/*
* A5.4 Permanently UNDEFINED instructions.
*
@@ -2272,11 +2427,10 @@ def LDRSB : AI3ld<0b1101, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
[(set GPR:$Rt, (sextloadi8 addrmode3:$addr))]>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
-// Load doubleword
-def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rd, GPR:$dst2),
- (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoad_d_r, "ldrd", "\t$Rd, $dst2, $addr",
- []>, Requires<[IsARM, HasV5TE]>;
+ // Load doubleword
+ def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rt, GPR:$Rt2), (ins addrmode3:$addr),
+ LdMiscFrm, IIC_iLoad_d_r, "ldrd", "\t$Rt, $Rt2, $addr", []>,
+ Requires<[IsARM, HasV5TE]>;
}
def LDA : AIldracq<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
@@ -2441,11 +2595,11 @@ def LDRT_POST_REG : AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
-def LDRT_POST_IMM : AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins addr_offset_none:$addr, am2offset_imm:$offset),
- IndexModePost, LdFrm, IIC_iLoad_ru,
- "ldrt", "\t$Rt, $addr, $offset",
- "$addr.base = $Rn_wb", []> {
+def LDRT_POST_IMM
+ : AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$Rn_wb),
+ (ins addr_offset_none:$addr, am2offset_imm:$offset),
+ IndexModePost, LdFrm, IIC_iLoad_ru,
+ "ldrt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
@@ -2477,11 +2631,11 @@ def LDRBT_POST_REG : AI2ldstidx<1, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
-def LDRBT_POST_IMM : AI2ldstidx<1, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins addr_offset_none:$addr, am2offset_imm:$offset),
- IndexModePost, LdFrm, IIC_iLoad_bh_ru,
- "ldrbt", "\t$Rt, $addr, $offset",
- "$addr.base = $Rn_wb", []> {
+def LDRBT_POST_IMM
+ : AI2ldstidx<1, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
+ (ins addr_offset_none:$addr, am2offset_imm:$offset),
+ IndexModePost, LdFrm, IIC_iLoad_bh_ru,
+ "ldrbt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
@@ -2524,6 +2678,14 @@ defm LDRHT : AI3ldrT<0b1011, "ldrht">;
defm LDRSHT : AI3ldrT<0b1111, "ldrsht">;
}
+def LDRT_POST
+ : ARMAsmPseudo<"ldrt${q} $Rt, $addr", (ins addr_offset_none:$addr, pred:$q),
+ (outs GPR:$Rt)>;
+
+def LDRBT_POST
+ : ARMAsmPseudo<"ldrbt${q} $Rt, $addr", (ins addr_offset_none:$addr, pred:$q),
+ (outs GPR:$Rt)>;
+
// Store
// Stores with truncate
@@ -2532,12 +2694,12 @@ def STRH : AI3str<0b1011, (outs), (ins GPR:$Rt, addrmode3:$addr), StMiscFrm,
[(truncstorei16 GPR:$Rt, addrmode3:$addr)]>;
// Store doubleword
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
-def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$src2, addrmode3:$addr),
- StMiscFrm, IIC_iStore_d_r,
- "strd", "\t$Rt, $src2, $addr", []>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{21} = 0;
+let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
+ def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$Rt2, addrmode3:$addr),
+ StMiscFrm, IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", []>,
+ Requires<[IsARM, HasV5TE]> {
+ let Inst{21} = 0;
+ }
}
// Indexed stores
@@ -2546,7 +2708,8 @@ multiclass AI2_stridx<bit isByte, string opc,
def _PRE_IMM : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addrmode_imm12_pre:$addr), IndexModePre,
StFrm, iii,
- opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
+ opc, "\t$Rt, $addr!",
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 0;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
@@ -2558,7 +2721,8 @@ multiclass AI2_stridx<bit isByte, string opc,
def _PRE_REG : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, ldst_so_reg:$addr),
IndexModePre, StFrm, iir,
- opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
+ opc, "\t$Rt, $addr!",
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 1;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
@@ -2571,7 +2735,7 @@ multiclass AI2_stridx<bit isByte, string opc,
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, StFrm, iir,
opc, "\t$Rt, $addr, $offset",
- "$addr.base = $Rn_wb", []> {
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
@@ -2589,7 +2753,7 @@ multiclass AI2_stridx<bit isByte, string opc,
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, StFrm, iii,
opc, "\t$Rt, $addr, $offset",
- "$addr.base = $Rn_wb", []> {
+ "$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
@@ -2746,11 +2910,11 @@ def STRBT_POST_REG : AI2ldstidx<0, 1, 0, (outs GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
-def STRBT_POST_IMM : AI2ldstidx<0, 1, 0, (outs GPR:$Rn_wb),
- (ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
- IndexModePost, StFrm, IIC_iStore_bh_ru,
- "strbt", "\t$Rt, $addr, $offset",
- "$addr.base = $Rn_wb", []> {
+def STRBT_POST_IMM
+ : AI2ldstidx<0, 1, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
+ IndexModePost, StFrm, IIC_iStore_bh_ru,
+ "strbt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
@@ -2763,6 +2927,10 @@ def STRBT_POST_IMM : AI2ldstidx<0, 1, 0, (outs GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
+def STRBT_POST
+ : ARMAsmPseudo<"strbt${q} $Rt, $addr",
+ (ins GPR:$Rt, addr_offset_none:$addr, pred:$q)>;
+
let mayStore = 1, neverHasSideEffects = 1 in {
def STRT_POST_REG : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
@@ -2783,11 +2951,11 @@ def STRT_POST_REG : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
-def STRT_POST_IMM : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
- (ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
- IndexModePost, StFrm, IIC_iStore_ru,
- "strt", "\t$Rt, $addr, $offset",
- "$addr.base = $Rn_wb", []> {
+def STRT_POST_IMM
+ : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
+ IndexModePost, StFrm, IIC_iStore_ru,
+ "strt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
@@ -2801,6 +2969,9 @@ def STRT_POST_IMM : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
}
}
+def STRT_POST
+ : ARMAsmPseudo<"strt${q} $Rt, $addr",
+ (ins GPR:$Rt, addr_offset_none:$addr, pred:$q)>;
multiclass AI3strT<bits<4> op, string opc> {
def i : AI3ldstidxT<op, 0, (outs GPR:$base_wb),
@@ -3165,8 +3336,8 @@ def SBFX : I<(outs GPRnopc:$Rd),
let Inst{3-0} = Rn;
}
-def UBFX : I<(outs GPR:$Rd),
- (ins GPR:$Rn, imm0_31:$lsb, imm1_32:$width),
+def UBFX : I<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rn, imm0_31:$lsb, imm1_32:$width),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"ubfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
@@ -3601,21 +3772,22 @@ def MULv5: ARMPseudoExpand<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm,
Requires<[IsARM, NoV6, UseMulOps]>;
}
-def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+def MLA : AsMul1I32<0b0000001, (outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra),
IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
- [(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
- Requires<[IsARM, HasV6, UseMulOps]> {
+ [(set GPRnopc:$Rd, (add (mul GPRnopc:$Rn, GPRnopc:$Rm), GPRnopc:$Ra))]>,
+ Requires<[IsARM, HasV6, UseMulOps]> {
bits<4> Ra;
let Inst{15-12} = Ra;
}
let Constraints = "@earlyclobber $Rd" in
-def MLAv5: ARMPseudoExpand<(outs GPR:$Rd),
- (ins GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s),
- 4, IIC_iMAC32,
- [(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))],
- (MLA GPR:$Rd, GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s)>,
- Requires<[IsARM, NoV6]>;
+def MLAv5: ARMPseudoExpand<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra,
+ pred:$p, cc_out:$s), 4, IIC_iMAC32,
+ [(set GPRnopc:$Rd, (add (mul GPRnopc:$Rn, GPRnopc:$Rm), GPRnopc:$Ra))],
+ (MLA GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra",
@@ -3683,7 +3855,8 @@ def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
let Inst{3-0} = Rn;
}
-let Constraints = "$RLo = $RdLo,$RHi = $RdHi" in {
+let Constraints =
+ "@earlyclobber $RdLo,@earlyclobber $RdHi,$RLo = $RdLo,$RHi = $RdHi" in {
def SMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
@@ -3698,14 +3871,6 @@ def UMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
Requires<[IsARM, NoV6]>;
}
-let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
-def UMAALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
- (ins GPR:$Rn, GPR:$Rm, pred:$p),
- 4, IIC_iMAC64, [],
- (UMAAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p)>,
- Requires<[IsARM, NoV6]>;
-}
-
} // neverHasSideEffects
// Most significant word multiply
@@ -3972,6 +4137,11 @@ def REV16 : AMiscA1I<0b01101011, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
Requires<[IsARM, HasV6]>,
Sched<[WriteALU]>;
+def : ARMV6Pat<(srl (bswap (extloadi16 addrmode3:$addr)), (i32 16)),
+ (REV16 (LDRH addrmode3:$addr))>;
+def : ARMV6Pat<(truncstorei16 (srl (bswap GPR:$Rn), (i32 16)), addrmode3:$addr),
+ (STRH (REV16 GPR:$Rn), addrmode3:$addr)>;
+
let AddedComplexity = 5 in
def REVSH : AMiscA1I<0b01101111, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "revsh", "\t$Rd, $Rm",
@@ -4276,7 +4446,7 @@ def instsyncb_opt : Operand<i32> {
let DecoderMethod = "DecodeInstSyncBarrierOption";
}
-// memory barriers protect the atomic sequences
+// Memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"dmb", "\t$opt", [(int_arm_dmb (i32 imm0_15:$opt))]>,
@@ -4285,7 +4455,6 @@ def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
let Inst{31-4} = 0xf57ff05;
let Inst{3-0} = opt;
}
-}
def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"dsb", "\t$opt", [(int_arm_dsb (i32 imm0_15:$opt))]>,
@@ -4297,226 +4466,19 @@ def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
// ISB has only full system option
def ISB : AInoP<(outs), (ins instsyncb_opt:$opt), MiscFrm, NoItinerary,
- "isb", "\t$opt", []>,
+ "isb", "\t$opt", [(int_arm_isb (i32 imm0_15:$opt))]>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff06;
let Inst{3-0} = opt;
}
+}
let usesCustomInserter = 1, Defs = [CPSR] in {
// Pseudo instruction that combines movs + predicated rsbmi
// to implement integer ABS
def ABS : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$src), 8, NoItinerary, []>;
-
-// Atomic pseudo-insts which will be lowered to ldrex/strex loops.
-// (64-bit pseudos use a hand-written selection code).
- let mayLoad = 1, mayStore = 1 in {
- def ATOMIC_LOAD_ADD_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_SUB_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_AND_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_OR_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_XOR_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_NAND_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MIN_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MAX_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMIN_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMAX_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_SWAP_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$new, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_CMP_SWAP_I8 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$old, GPR:$new, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_ADD_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_SUB_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_AND_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_OR_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_XOR_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_NAND_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MIN_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MAX_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMIN_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMAX_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_SWAP_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$new, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_CMP_SWAP_I16 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$old, GPR:$new, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_ADD_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_SUB_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_AND_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_OR_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_XOR_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_NAND_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$incr, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MIN_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MAX_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMIN_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMAX_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$val, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_SWAP_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$new, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_CMP_SWAP_I32 : PseudoInst<
- (outs GPR:$dst),
- (ins GPR:$ptr, GPR:$old, GPR:$new, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_ADD_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_SUB_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_AND_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_OR_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_XOR_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_NAND_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MIN_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_MAX_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMIN_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_LOAD_UMAX_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_SWAP_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
- def ATOMIC_CMP_SWAP_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$cmp1, GPR:$cmp2,
- GPR:$set1, GPR:$set2, i32imm:$ordering),
- NoItinerary, []>;
- }
- let mayLoad = 1 in
- def ATOMIC_LOAD_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, i32imm:$ordering),
- NoItinerary, []>;
- let mayStore = 1 in
- def ATOMIC_STORE_I64 : PseudoInst<
- (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
- NoItinerary, []>;
}
let usesCustomInserter = 1 in {
@@ -4553,6 +4515,33 @@ def strex_4 : PatFrag<(ops node:$val, node:$ptr),
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
+def ldaex_1 : PatFrag<(ops node:$ptr), (int_arm_ldaex node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def ldaex_2 : PatFrag<(ops node:$ptr), (int_arm_ldaex node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def ldaex_4 : PatFrag<(ops node:$ptr), (int_arm_ldaex node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def stlex_1 : PatFrag<(ops node:$val, node:$ptr),
+ (int_arm_stlex node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def stlex_2 : PatFrag<(ops node:$val, node:$ptr),
+ (int_arm_stlex node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def stlex_4 : PatFrag<(ops node:$val, node:$ptr),
+ (int_arm_stlex node:$val, node:$ptr), [{
+ return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
let mayLoad = 1 in {
def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldrexb", "\t$Rt, $addr",
@@ -4570,11 +4559,14 @@ def LDREXD : AIldrex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
}
def LDAEXB : AIldaex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
- NoItinerary, "ldaexb", "\t$Rt, $addr", []>;
+ NoItinerary, "ldaexb", "\t$Rt, $addr",
+ [(set GPR:$Rt, (ldaex_1 addr_offset_none:$addr))]>;
def LDAEXH : AIldaex<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
- NoItinerary, "ldaexh", "\t$Rt, $addr", []>;
+ NoItinerary, "ldaexh", "\t$Rt, $addr",
+ [(set GPR:$Rt, (ldaex_2 addr_offset_none:$addr))]>;
def LDAEX : AIldaex<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
- NoItinerary, "ldaex", "\t$Rt, $addr", []>;
+ NoItinerary, "ldaex", "\t$Rt, $addr",
+ [(set GPR:$Rt, (ldaex_4 addr_offset_none:$addr))]>;
let hasExtraDefRegAllocReq = 1 in
def LDAEXD : AIldaex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
NoItinerary, "ldaexd", "\t$Rt, $addr", []> {
@@ -4585,13 +4577,16 @@ def LDAEXD : AIldaex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
def STREXB: AIstrex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strexb", "\t$Rd, $Rt, $addr",
- [(set GPR:$Rd, (strex_1 GPR:$Rt, addr_offset_none:$addr))]>;
+ [(set GPR:$Rd, (strex_1 GPR:$Rt,
+ addr_offset_none:$addr))]>;
def STREXH: AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strexh", "\t$Rd, $Rt, $addr",
- [(set GPR:$Rd, (strex_2 GPR:$Rt, addr_offset_none:$addr))]>;
+ [(set GPR:$Rd, (strex_2 GPR:$Rt,
+ addr_offset_none:$addr))]>;
def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strex", "\t$Rd, $Rt, $addr",
- [(set GPR:$Rd, (strex_4 GPR:$Rt, addr_offset_none:$addr))]>;
+ [(set GPR:$Rd, (strex_4 GPR:$Rt,
+ addr_offset_none:$addr))]>;
let hasExtraSrcRegAllocReq = 1 in
def STREXD : AIstrex<0b01, (outs GPR:$Rd),
(ins GPRPairOp:$Rt, addr_offset_none:$addr),
@@ -4600,13 +4595,16 @@ def STREXD : AIstrex<0b01, (outs GPR:$Rd),
}
def STLEXB: AIstlex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlexb", "\t$Rd, $Rt, $addr",
- []>;
+ [(set GPR:$Rd,
+ (stlex_1 GPR:$Rt, addr_offset_none:$addr))]>;
def STLEXH: AIstlex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlexh", "\t$Rd, $Rt, $addr",
- []>;
+ [(set GPR:$Rd,
+ (stlex_2 GPR:$Rt, addr_offset_none:$addr))]>;
def STLEX : AIstlex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlex", "\t$Rd, $Rt, $addr",
- []>;
+ [(set GPR:$Rd,
+ (stlex_4 GPR:$Rt, addr_offset_none:$addr))]>;
let hasExtraSrcRegAllocReq = 1 in
def STLEXD : AIstlex<0b01, (outs GPR:$Rd),
(ins GPRPairOp:$Rt, addr_offset_none:$addr),
@@ -4621,15 +4619,16 @@ def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex",
let Inst{31-0} = 0b11110101011111111111000000011111;
}
-def : ARMPat<(and (ldrex_1 addr_offset_none:$addr), 0xff),
- (LDREXB addr_offset_none:$addr)>;
-def : ARMPat<(and (ldrex_2 addr_offset_none:$addr), 0xffff),
- (LDREXH addr_offset_none:$addr)>;
def : ARMPat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
(STREXB GPR:$Rt, addr_offset_none:$addr)>;
def : ARMPat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
(STREXH GPR:$Rt, addr_offset_none:$addr)>;
+def : ARMPat<(stlex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
+ (STLEXB GPR:$Rt, addr_offset_none:$addr)>;
+def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
+ (STLEXH GPR:$Rt, addr_offset_none:$addr)>;
+
class acquiring_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
@@ -4960,7 +4959,7 @@ def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>,
Requires<[PreV8]>;
-def : ARMInstAlias<"mcr2$ $cop, $opc1, $Rt, $CRn, $CRm",
+def : ARMInstAlias<"mcr2 $cop, $opc1, $Rt, $CRn, $CRm",
(MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, 0)>;
def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
@@ -4968,7 +4967,7 @@ def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>,
Requires<[PreV8]>;
-def : ARMInstAlias<"mrc2$ $cop, $opc1, $Rt, $CRn, $CRm",
+def : ARMInstAlias<"mrc2 $cop, $opc1, $Rt, $CRn, $CRm",
(MRC2 GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, 0)>;
@@ -5097,6 +5096,19 @@ def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
let Inst{11-0} = a;
}
+// Dynamic stack allocation yields a _chkstk for Windows targets. These calls
+// are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+def win__chkstk : SDNode<"ARMISD::WIN__CHKSTK", SDTNone,
+ [SDNPHasChain, SDNPSideEffect]>;
+let usesCustomInserter = 1, Uses = [R4], Defs = [R4, SP] in
+ def WIN__CHKSTK : PseudoInst<(outs), (ins), NoItinerary, [(win__chkstk)]>;
+
//===----------------------------------------------------------------------===//
// TLS Instructions
//
@@ -5104,9 +5116,11 @@ def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
// __aeabi_read_tp preserves the registers r1-r3.
// This is a pseudo inst so that we can get the encoding right,
// complete with fixup for the aeabi_read_tp function.
+// TPsoft is valid for ARM mode only, in case of Thumb mode a tTPsoft pattern
+// is defined in "ARMInstrThumb.td".
let isCall = 1,
Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
- def TPsoft : PseudoInst<(outs), (ins), IIC_Br,
+ def TPsoft : ARMPseudoInst<(outs), (ins), 4, IIC_Br,
[(set R0, ARMthread_pointer)]>, Sched<[WriteBr]>;
}
@@ -5184,6 +5198,10 @@ def MOVi32imm : PseudoInst<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVix2,
[(set GPR:$dst, (arm_i32imm:$src))]>,
Requires<[IsARM]>;
+def LDRLIT_ga_abs : PseudoInst<(outs GPR:$dst), (ins i32imm:$src), IIC_iLoad_i,
+ [(set GPR:$dst, (ARMWrapper tglobaladdr:$src))]>,
+ Requires<[IsARM, DontUseMovt]>;
+
// Pseudo instruction that combines movw + movt + add pc (if PIC).
// It also makes it possible to rematerialize the instructions.
// FIXME: Remove this when we can do generalized remat and when machine licm
@@ -5194,10 +5212,17 @@ def MOV_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
[(set GPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
Requires<[IsARM, UseMovt]>;
-def MOV_ga_dyn : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
- IIC_iMOVix2,
- [(set GPR:$dst, (ARMWrapperDYN tglobaladdr:$addr))]>,
- Requires<[IsARM, UseMovt]>;
+def LDRLIT_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
+ IIC_iLoadiALU,
+ [(set GPR:$dst,
+ (ARMWrapperPIC tglobaladdr:$addr))]>,
+ Requires<[IsARM, DontUseMovt]>;
+
+def LDRLIT_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
+ NoItinerary,
+ [(set GPR:$dst,
+ (load (ARMWrapperPIC tglobaladdr:$addr)))]>,
+ Requires<[IsARM, DontUseMovt]>;
let AddedComplexity = 10 in
def MOV_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
@@ -5207,8 +5232,6 @@ def MOV_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
} // isReMaterializable
// ConstantPool, GlobalAddress, and JumpTable
-def : ARMPat<(ARMWrapper tglobaladdr :$dst), (LEApcrel tglobaladdr :$dst)>,
- Requires<[IsARM, DontUseMovt]>;
def : ARMPat<(ARMWrapper tconstpool :$dst), (LEApcrel tconstpool :$dst)>;
def : ARMPat<(ARMWrapper tglobaladdr :$dst), (MOVi32imm tglobaladdr :$dst)>,
Requires<[IsARM, UseMovt]>;
@@ -5544,9 +5567,22 @@ def : ARMInstAlias<"neg${s}${p} $Rd, $Rm",
def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>,
Requires<[IsARM, NoV6]>;
-// UMULL/SMULL are available on all arches, but the instruction definitions
-// need difference constraints pre-v6. Use these aliases for the assembly
-// parsing on pre-v6.
+// MUL/UMLAL/SMLAL/UMULL/SMULL are available on all arches, but
+// the instruction definitions need difference constraints pre-v6.
+// Use these aliases for the assembly parsing on pre-v6.
+def : InstAlias<"mul${s}${p} $Rd, $Rn, $Rm",
+ (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
+def : InstAlias<"mla${s}${p} $Rd, $Rn, $Rm, $Ra",
+ (MLA GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra,
+ pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
+def : InstAlias<"smlal${s}${p} $RdLo, $RdHi, $Rn, $Rm",
+ (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
+def : InstAlias<"umlal${s}${p} $RdLo, $RdHi, $Rn, $Rm",
+ (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
def : InstAlias<"smull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
(SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 0b05c08..c02bb3b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -39,6 +39,49 @@ def nImmVMOVI32 : Operand<i32> {
let PrintMethod = "printNEONModImmOperand";
let ParserMatchClass = nImmVMOVI32AsmOperand;
}
+
+def nImmVMOVI16AsmOperandByteReplicate :
+ AsmOperandClass {
+ let Name = "NEONi16vmovByteReplicate";
+ let PredicateMethod = "isNEONi16ByteReplicate";
+ let RenderMethod = "addNEONvmovByteReplicateOperands";
+}
+def nImmVMOVI32AsmOperandByteReplicate :
+ AsmOperandClass {
+ let Name = "NEONi32vmovByteReplicate";
+ let PredicateMethod = "isNEONi32ByteReplicate";
+ let RenderMethod = "addNEONvmovByteReplicateOperands";
+}
+def nImmVMVNI16AsmOperandByteReplicate :
+ AsmOperandClass {
+ let Name = "NEONi16invByteReplicate";
+ let PredicateMethod = "isNEONi16ByteReplicate";
+ let RenderMethod = "addNEONinvByteReplicateOperands";
+}
+def nImmVMVNI32AsmOperandByteReplicate :
+ AsmOperandClass {
+ let Name = "NEONi32invByteReplicate";
+ let PredicateMethod = "isNEONi32ByteReplicate";
+ let RenderMethod = "addNEONinvByteReplicateOperands";
+}
+
+def nImmVMOVI16ByteReplicate : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmVMOVI16AsmOperandByteReplicate;
+}
+def nImmVMOVI32ByteReplicate : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmVMOVI32AsmOperandByteReplicate;
+}
+def nImmVMVNI16ByteReplicate : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmVMVNI16AsmOperandByteReplicate;
+}
+def nImmVMVNI32ByteReplicate : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmVMVNI32AsmOperandByteReplicate;
+}
+
def nImmVMOVI32NegAsmOperand : AsmOperandClass { let Name = "NEONi32vmovNeg"; }
def nImmVMOVI32Neg : Operand<i32> {
let PrintMethod = "printNEONModImmOperand";
@@ -466,9 +509,6 @@ def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
-def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
-def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
-def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
@@ -620,37 +660,37 @@ class VLDQQQQWBPseudo<InstrItinClass itin>
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// VLD1 : Vector Load (multiple single elements)
-class VLD1D<bits<4> op7_4, string Dt>
+class VLD1D<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1,
+ (ins AddrMode:$Rn), IIC_VLD1,
"vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-class VLD1Q<bits<4> op7_4, string Dt>
+class VLD1Q<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1x2,
+ (ins AddrMode:$Rn), IIC_VLD1x2,
"vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
-def VLD1d16 : VLD1D<{0,1,0,?}, "16">;
-def VLD1d32 : VLD1D<{1,0,0,?}, "32">;
-def VLD1d64 : VLD1D<{1,1,0,?}, "64">;
+def VLD1d8 : VLD1D<{0,0,0,?}, "8", addrmode6align64>;
+def VLD1d16 : VLD1D<{0,1,0,?}, "16", addrmode6align64>;
+def VLD1d32 : VLD1D<{1,0,0,?}, "32", addrmode6align64>;
+def VLD1d64 : VLD1D<{1,1,0,?}, "64", addrmode6align64>;
-def VLD1q8 : VLD1Q<{0,0,?,?}, "8">;
-def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
-def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
-def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
+def VLD1q8 : VLD1Q<{0,0,?,?}, "8", addrmode6align64or128>;
+def VLD1q16 : VLD1Q<{0,1,?,?}, "16", addrmode6align64or128>;
+def VLD1q32 : VLD1Q<{1,0,?,?}, "32", addrmode6align64or128>;
+def VLD1q64 : VLD1Q<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with address register writeback:
-multiclass VLD1DWB<bits<4> op7_4, string Dt> {
+multiclass VLD1DWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10, 0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1u,
+ (ins AddrMode:$Rn), IIC_VLD1u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -658,16 +698,16 @@ multiclass VLD1DWB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
}
-multiclass VLD1QWB<bits<4> op7_4, string Dt> {
+multiclass VLD1QWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1x2u,
+ (ins AddrMode:$Rn), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -675,7 +715,7 @@ multiclass VLD1QWB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
@@ -683,27 +723,27 @@ multiclass VLD1QWB<bits<4> op7_4, string Dt> {
}
}
-defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8">;
-defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16">;
-defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32">;
-defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64">;
-defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8">;
-defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16">;
-defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32">;
-defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64">;
+defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8", addrmode6align64>;
+defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16", addrmode6align64>;
+defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32", addrmode6align64>;
+defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64", addrmode6align64>;
+defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8", addrmode6align64or128>;
+defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16", addrmode6align64or128>;
+defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32", addrmode6align64or128>;
+defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with 3 registers
-class VLD1D3<bits<4> op7_4, string Dt>
+class VLD1D3<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
+ (ins AddrMode:$Rn), IIC_VLD1x3, "vld1", Dt,
"$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
+multiclass VLD1D3WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1x2u,
+ (ins AddrMode:$Rn), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -711,7 +751,7 @@ multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
@@ -719,32 +759,32 @@ multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
}
}
-def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
-def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
-def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
-def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
+def VLD1d8T : VLD1D3<{0,0,0,?}, "8", addrmode6align64>;
+def VLD1d16T : VLD1D3<{0,1,0,?}, "16", addrmode6align64>;
+def VLD1d32T : VLD1D3<{1,0,0,?}, "32", addrmode6align64>;
+def VLD1d64T : VLD1D3<{1,1,0,?}, "64", addrmode6align64>;
-defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8">;
-defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16">;
-defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32">;
-defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64">;
+defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8", addrmode6align64>;
+defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16", addrmode6align64>;
+defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32", addrmode6align64>;
+defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64", addrmode6align64>;
def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
def VLD1d64TPseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD1x3>;
def VLD1d64TPseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD1x3>;
// ...with 4 registers
-class VLD1D4<bits<4> op7_4, string Dt>
+class VLD1D4<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
+ (ins AddrMode:$Rn), IIC_VLD1x4, "vld1", Dt,
"$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
+multiclass VLD1D4WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10,0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1x2u,
+ (ins AddrMode:$Rn), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -752,7 +792,7 @@ multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
@@ -760,15 +800,15 @@ multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
}
}
-def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
-def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
-def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
-def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
+def VLD1d8Q : VLD1D4<{0,0,?,?}, "8", addrmode6align64or128or256>;
+def VLD1d16Q : VLD1D4<{0,1,?,?}, "16", addrmode6align64or128or256>;
+def VLD1d32Q : VLD1D4<{1,0,?,?}, "32", addrmode6align64or128or256>;
+def VLD1d64Q : VLD1D4<{1,1,?,?}, "64", addrmode6align64or128or256>;
-defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8">;
-defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16">;
-defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32">;
-defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64">;
+defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8", addrmode6align64or128or256>;
+defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16", addrmode6align64or128or256>;
+defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32", addrmode6align64or128or256>;
+defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64", addrmode6align64or128or256>;
def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
def VLD1d64QPseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD1x4>;
@@ -776,22 +816,28 @@ def VLD1d64QPseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD1x4>;
// VLD2 : Vector Load (multiple 2-element structures)
class VLD2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
- InstrItinClass itin>
+ InstrItinClass itin, Operand AddrMode>
: NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd),
- (ins addrmode6:$Rn), itin,
+ (ins AddrMode:$Rn), itin,
"vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST2Instruction";
}
-def VLD2d8 : VLD2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2>;
-def VLD2d16 : VLD2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2>;
-def VLD2d32 : VLD2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2>;
+def VLD2d8 : VLD2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2d16 : VLD2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2d32 : VLD2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2,
+ addrmode6align64or128>;
-def VLD2q8 : VLD2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2>;
-def VLD2q16 : VLD2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2>;
-def VLD2q32 : VLD2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2>;
+def VLD2q8 : VLD2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2,
+ addrmode6align64or128or256>;
+def VLD2q16 : VLD2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2,
+ addrmode6align64or128or256>;
+def VLD2q32 : VLD2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2,
+ addrmode6align64or128or256>;
def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
@@ -799,9 +845,9 @@ def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD2x2>;
// ...with address register writeback:
multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt,
- RegisterOperand VdTy, InstrItinClass itin> {
+ RegisterOperand VdTy, InstrItinClass itin, Operand AddrMode> {
def _fixed : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), itin,
+ (ins AddrMode:$Rn), itin,
"vld2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -809,7 +855,7 @@ multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt,
let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), itin,
+ (ins AddrMode:$Rn, rGPR:$Rm), itin,
"vld2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
@@ -817,13 +863,19 @@ multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt,
}
}
-defm VLD2d8wb : VLD2WB<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2u>;
-defm VLD2d16wb : VLD2WB<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2u>;
-defm VLD2d32wb : VLD2WB<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2u>;
+defm VLD2d8wb : VLD2WB<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2d16wb : VLD2WB<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2d32wb : VLD2WB<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2u,
+ addrmode6align64or128>;
-defm VLD2q8wb : VLD2WB<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2u>;
-defm VLD2q16wb : VLD2WB<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2u>;
-defm VLD2q32wb : VLD2WB<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2u>;
+defm VLD2q8wb : VLD2WB<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2u,
+ addrmode6align64or128or256>;
+defm VLD2q16wb : VLD2WB<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2u,
+ addrmode6align64or128or256>;
+defm VLD2q32wb : VLD2WB<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2u,
+ addrmode6align64or128or256>;
def VLD2q8PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
def VLD2q16PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
@@ -833,12 +885,18 @@ def VLD2q16PseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD2x2u>;
def VLD2q32PseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD2x2u>;
// ...with double-spaced registers
-def VLD2b8 : VLD2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2>;
-def VLD2b16 : VLD2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2>;
-def VLD2b32 : VLD2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2>;
-defm VLD2b8wb : VLD2WB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2u>;
-defm VLD2b16wb : VLD2WB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2u>;
-defm VLD2b32wb : VLD2WB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2u>;
+def VLD2b8 : VLD2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2b16 : VLD2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2b32 : VLD2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2,
+ addrmode6align64or128>;
+defm VLD2b8wb : VLD2WB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2b16wb : VLD2WB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2b32wb : VLD2WB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2u,
+ addrmode6align64or128>;
// VLD3 : Vector Load (multiple 3-element structures)
class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -1296,47 +1354,55 @@ def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
// VLD1DUP : Vector Load (single element to all lanes)
-class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
+class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp,
+ Operand AddrMode>
: NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListOneDAllLanes:$Vd),
- (ins addrmode6dup:$Rn),
+ (ins AddrMode:$Rn),
IIC_VLD1dup, "vld1", Dt, "$Vd, $Rn", "",
[(set VecListOneDAllLanes:$Vd,
- (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
+ (Ty (NEONvdup (i32 (LoadOp AddrMode:$Rn)))))]> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
-def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
-def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
-def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
+def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8,
+ addrmode6dupalignNone>;
+def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16,
+ addrmode6dupalign16>;
+def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load,
+ addrmode6dupalign32>;
def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
(VLD1DUPd32 addrmode6:$addr)>;
-class VLD1QDUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
+class VLD1QDUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp,
+ Operand AddrMode>
: NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListDPairAllLanes:$Vd),
- (ins addrmode6dup:$Rn), IIC_VLD1dup,
+ (ins AddrMode:$Rn), IIC_VLD1dup,
"vld1", Dt, "$Vd, $Rn", "",
[(set VecListDPairAllLanes:$Vd,
- (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
+ (Ty (NEONvdup (i32 (LoadOp AddrMode:$Rn)))))]> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
-def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8", v16i8, extloadi8>;
-def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16>;
-def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32", v4i32, load>;
+def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8", v16i8, extloadi8,
+ addrmode6dupalignNone>;
+def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16,
+ addrmode6dupalign16>;
+def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32", v4i32, load,
+ addrmode6dupalign32>;
def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
(VLD1DUPq32 addrmode6:$addr)>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// ...with address register writeback:
-multiclass VLD1DUPWB<bits<4> op7_4, string Dt> {
+multiclass VLD1DUPWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListOneDAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn), IIC_VLD1dupu,
+ (ins AddrMode:$Rn), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1345,17 +1411,17 @@ multiclass VLD1DUPWB<bits<4> op7_4, string Dt> {
}
def _register : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListOneDAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD1dupu,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
}
-multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
+multiclass VLD1QDUPWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListDPairAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn), IIC_VLD1dupu,
+ (ins AddrMode:$Rn), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1364,7 +1430,7 @@ multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
}
def _register : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListDPairAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD1dupu,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
@@ -1372,38 +1438,47 @@ multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
}
}
-defm VLD1DUPd8wb : VLD1DUPWB<{0,0,0,0}, "8">;
-defm VLD1DUPd16wb : VLD1DUPWB<{0,1,0,?}, "16">;
-defm VLD1DUPd32wb : VLD1DUPWB<{1,0,0,?}, "32">;
+defm VLD1DUPd8wb : VLD1DUPWB<{0,0,0,0}, "8", addrmode6dupalignNone>;
+defm VLD1DUPd16wb : VLD1DUPWB<{0,1,0,?}, "16", addrmode6dupalign16>;
+defm VLD1DUPd32wb : VLD1DUPWB<{1,0,0,?}, "32", addrmode6dupalign32>;
-defm VLD1DUPq8wb : VLD1QDUPWB<{0,0,1,0}, "8">;
-defm VLD1DUPq16wb : VLD1QDUPWB<{0,1,1,?}, "16">;
-defm VLD1DUPq32wb : VLD1QDUPWB<{1,0,1,?}, "32">;
+defm VLD1DUPq8wb : VLD1QDUPWB<{0,0,1,0}, "8", addrmode6dupalignNone>;
+defm VLD1DUPq16wb : VLD1QDUPWB<{0,1,1,?}, "16", addrmode6dupalign16>;
+defm VLD1DUPq32wb : VLD1QDUPWB<{1,0,1,?}, "32", addrmode6dupalign32>;
// VLD2DUP : Vector Load (single 2-element structure to all lanes)
-class VLD2DUP<bits<4> op7_4, string Dt, RegisterOperand VdTy>
+class VLD2DUP<bits<4> op7_4, string Dt, RegisterOperand VdTy, Operand AddrMode>
: NLdSt<1, 0b10, 0b1101, op7_4, (outs VdTy:$Vd),
- (ins addrmode6dup:$Rn), IIC_VLD2dup,
+ (ins AddrMode:$Rn), IIC_VLD2dup,
"vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD2DupInstruction";
}
-def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8", VecListDPairAllLanes>;
-def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16", VecListDPairAllLanes>;
-def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32", VecListDPairAllLanes>;
+def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8", VecListDPairAllLanes,
+ addrmode6dupalign16>;
+def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16", VecListDPairAllLanes,
+ addrmode6dupalign32>;
+def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32", VecListDPairAllLanes,
+ addrmode6dupalign64>;
+// HACK this one, VLD2DUPd8x2 must be changed at the same time with VLD2b8 or
+// "vld2.8 {d0[], d2[]}, [r4:32]" will become "vld2.8 {d0, d2}, [r4:32]".
// ...with double-spaced registers
-def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8", VecListDPairSpacedAllLanes>;
-def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16", VecListDPairSpacedAllLanes>;
-def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32", VecListDPairSpacedAllLanes>;
+def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8", VecListDPairSpacedAllLanes,
+ addrmode6dupalign16>;
+def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16", VecListDPairSpacedAllLanes,
+ addrmode6dupalign32>;
+def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32", VecListDPairSpacedAllLanes,
+ addrmode6dupalign64>;
// ...with address register writeback:
-multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
+multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy,
+ Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1101, op7_4,
(outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn), IIC_VLD2dupu,
+ (ins AddrMode:$Rn), IIC_VLD2dupu,
"vld2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1412,7 +1487,7 @@ multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
}
def _register : NLdSt<1, 0b10, 0b1101, op7_4,
(outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD2dupu,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD2dupu,
"vld2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
@@ -1420,13 +1495,19 @@ multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
}
}
-defm VLD2DUPd8wb : VLD2DUPWB<{0,0,0,0}, "8", VecListDPairAllLanes>;
-defm VLD2DUPd16wb : VLD2DUPWB<{0,1,0,?}, "16", VecListDPairAllLanes>;
-defm VLD2DUPd32wb : VLD2DUPWB<{1,0,0,?}, "32", VecListDPairAllLanes>;
+defm VLD2DUPd8wb : VLD2DUPWB<{0,0,0,0}, "8", VecListDPairAllLanes,
+ addrmode6dupalign16>;
+defm VLD2DUPd16wb : VLD2DUPWB<{0,1,0,?}, "16", VecListDPairAllLanes,
+ addrmode6dupalign32>;
+defm VLD2DUPd32wb : VLD2DUPWB<{1,0,0,?}, "32", VecListDPairAllLanes,
+ addrmode6dupalign64>;
-defm VLD2DUPd8x2wb : VLD2DUPWB<{0,0,1,0}, "8", VecListDPairSpacedAllLanes>;
-defm VLD2DUPd16x2wb : VLD2DUPWB<{0,1,1,?}, "16", VecListDPairSpacedAllLanes>;
-defm VLD2DUPd32x2wb : VLD2DUPWB<{1,0,1,?}, "32", VecListDPairSpacedAllLanes>;
+defm VLD2DUPd8x2wb : VLD2DUPWB<{0,0,1,0}, "8", VecListDPairSpacedAllLanes,
+ addrmode6dupalign16>;
+defm VLD2DUPd16x2wb : VLD2DUPWB<{0,1,1,?}, "16", VecListDPairSpacedAllLanes,
+ addrmode6dupalign32>;
+defm VLD2DUPd32x2wb : VLD2DUPWB<{1,0,1,?}, "32", VecListDPairSpacedAllLanes,
+ addrmode6dupalign64>;
// VLD3DUP : Vector Load (single 3-element structure to all lanes)
class VLD3DUP<bits<4> op7_4, string Dt>
@@ -1452,22 +1533,22 @@ def VLD3DUPq16 : VLD3DUP<{0,1,1,?}, "16">;
def VLD3DUPq32 : VLD3DUP<{1,0,1,?}, "32">;
// ...with address register writeback:
-class VLD3DUPWB<bits<4> op7_4, string Dt>
+class VLD3DUPWB<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD3dupu,
+ (ins AddrMode:$Rn, am6offset:$Rm), IIC_VLD3dupu,
"vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn$Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = 0;
let DecoderMethod = "DecodeVLD3DupInstruction";
}
-def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
-def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
-def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
+def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8", addrmode6dupalign64>;
+def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16", addrmode6dupalign64>;
+def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32", addrmode6dupalign64>;
-def VLD3DUPq8_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
-def VLD3DUPq16_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
-def VLD3DUPq32_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
+def VLD3DUPq8_UPD : VLD3DUPWB<{0,0,1,0}, "8", addrmode6dupalign64>;
+def VLD3DUPq16_UPD : VLD3DUPWB<{0,1,1,?}, "16", addrmode6dupalign64>;
+def VLD3DUPq32_UPD : VLD3DUPWB<{1,0,1,?}, "32", addrmode6dupalign64>;
def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
@@ -1563,35 +1644,35 @@ class VSTQQQQWBPseudo<InstrItinClass itin>
"$addr.addr = $wb">;
// VST1 : Vector Store (multiple single elements)
-class VST1D<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, VecListOneD:$Vd),
+class VST1D<bits<4> op7_4, string Dt, Operand AddrMode>
+ : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins AddrMode:$Rn, VecListOneD:$Vd),
IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-class VST1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListDPair:$Vd),
+class VST1Q<bits<4> op7_4, string Dt, Operand AddrMode>
+ : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins AddrMode:$Rn, VecListDPair:$Vd),
IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-def VST1d8 : VST1D<{0,0,0,?}, "8">;
-def VST1d16 : VST1D<{0,1,0,?}, "16">;
-def VST1d32 : VST1D<{1,0,0,?}, "32">;
-def VST1d64 : VST1D<{1,1,0,?}, "64">;
+def VST1d8 : VST1D<{0,0,0,?}, "8", addrmode6align64>;
+def VST1d16 : VST1D<{0,1,0,?}, "16", addrmode6align64>;
+def VST1d32 : VST1D<{1,0,0,?}, "32", addrmode6align64>;
+def VST1d64 : VST1D<{1,1,0,?}, "64", addrmode6align64>;
-def VST1q8 : VST1Q<{0,0,?,?}, "8">;
-def VST1q16 : VST1Q<{0,1,?,?}, "16">;
-def VST1q32 : VST1Q<{1,0,?,?}, "32">;
-def VST1q64 : VST1Q<{1,1,?,?}, "64">;
+def VST1q8 : VST1Q<{0,0,?,?}, "8", addrmode6align64or128>;
+def VST1q16 : VST1Q<{0,1,?,?}, "16", addrmode6align64or128>;
+def VST1q32 : VST1Q<{1,0,?,?}, "32", addrmode6align64or128>;
+def VST1q64 : VST1Q<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with address register writeback:
-multiclass VST1DWB<bits<4> op7_4, string Dt> {
+multiclass VST1DWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00, 0b0111,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListOneD:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, VecListOneD:$Vd), IIC_VLD1u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1599,7 +1680,7 @@ multiclass VST1DWB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListOneD:$Vd),
IIC_VLD1u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1607,9 +1688,9 @@ multiclass VST1DWB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
}
-multiclass VST1QWB<bits<4> op7_4, string Dt> {
+multiclass VST1QWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListDPair:$Vd), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, VecListDPair:$Vd), IIC_VLD1x2u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1617,7 +1698,7 @@ multiclass VST1QWB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListDPair:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListDPair:$Vd),
IIC_VLD1x2u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1626,28 +1707,28 @@ multiclass VST1QWB<bits<4> op7_4, string Dt> {
}
}
-defm VST1d8wb : VST1DWB<{0,0,0,?}, "8">;
-defm VST1d16wb : VST1DWB<{0,1,0,?}, "16">;
-defm VST1d32wb : VST1DWB<{1,0,0,?}, "32">;
-defm VST1d64wb : VST1DWB<{1,1,0,?}, "64">;
+defm VST1d8wb : VST1DWB<{0,0,0,?}, "8", addrmode6align64>;
+defm VST1d16wb : VST1DWB<{0,1,0,?}, "16", addrmode6align64>;
+defm VST1d32wb : VST1DWB<{1,0,0,?}, "32", addrmode6align64>;
+defm VST1d64wb : VST1DWB<{1,1,0,?}, "64", addrmode6align64>;
-defm VST1q8wb : VST1QWB<{0,0,?,?}, "8">;
-defm VST1q16wb : VST1QWB<{0,1,?,?}, "16">;
-defm VST1q32wb : VST1QWB<{1,0,?,?}, "32">;
-defm VST1q64wb : VST1QWB<{1,1,?,?}, "64">;
+defm VST1q8wb : VST1QWB<{0,0,?,?}, "8", addrmode6align64or128>;
+defm VST1q16wb : VST1QWB<{0,1,?,?}, "16", addrmode6align64or128>;
+defm VST1q32wb : VST1QWB<{1,0,?,?}, "32", addrmode6align64or128>;
+defm VST1q64wb : VST1QWB<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with 3 registers
-class VST1D3<bits<4> op7_4, string Dt>
+class VST1D3<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0, 0b00, 0b0110, op7_4, (outs),
- (ins addrmode6:$Rn, VecListThreeD:$Vd),
+ (ins AddrMode:$Rn, VecListThreeD:$Vd),
IIC_VST1x3, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VST1D3WB<bits<4> op7_4, string Dt> {
+multiclass VST1D3WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListThreeD:$Vd), IIC_VLD1x3u,
+ (ins AddrMode:$Rn, VecListThreeD:$Vd), IIC_VLD1x3u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1655,7 +1736,7 @@ multiclass VST1D3WB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
IIC_VLD1x3u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1664,33 +1745,33 @@ multiclass VST1D3WB<bits<4> op7_4, string Dt> {
}
}
-def VST1d8T : VST1D3<{0,0,0,?}, "8">;
-def VST1d16T : VST1D3<{0,1,0,?}, "16">;
-def VST1d32T : VST1D3<{1,0,0,?}, "32">;
-def VST1d64T : VST1D3<{1,1,0,?}, "64">;
+def VST1d8T : VST1D3<{0,0,0,?}, "8", addrmode6align64>;
+def VST1d16T : VST1D3<{0,1,0,?}, "16", addrmode6align64>;
+def VST1d32T : VST1D3<{1,0,0,?}, "32", addrmode6align64>;
+def VST1d64T : VST1D3<{1,1,0,?}, "64", addrmode6align64>;
-defm VST1d8Twb : VST1D3WB<{0,0,0,?}, "8">;
-defm VST1d16Twb : VST1D3WB<{0,1,0,?}, "16">;
-defm VST1d32Twb : VST1D3WB<{1,0,0,?}, "32">;
-defm VST1d64Twb : VST1D3WB<{1,1,0,?}, "64">;
+defm VST1d8Twb : VST1D3WB<{0,0,0,?}, "8", addrmode6align64>;
+defm VST1d16Twb : VST1D3WB<{0,1,0,?}, "16", addrmode6align64>;
+defm VST1d32Twb : VST1D3WB<{1,0,0,?}, "32", addrmode6align64>;
+defm VST1d64Twb : VST1D3WB<{1,1,0,?}, "64", addrmode6align64>;
def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
def VST1d64TPseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST1x3u>;
def VST1d64TPseudoWB_register : VSTQQWBPseudo<IIC_VST1x3u>;
// ...with 4 registers
-class VST1D4<bits<4> op7_4, string Dt>
+class VST1D4<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0, 0b00, 0b0010, op7_4, (outs),
- (ins addrmode6:$Rn, VecListFourD:$Vd),
+ (ins AddrMode:$Rn, VecListFourD:$Vd),
IIC_VST1x4, "vst1", Dt, "$Vd, $Rn", "",
[]> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VST1D4WB<bits<4> op7_4, string Dt> {
+multiclass VST1D4WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1x4u,
+ (ins AddrMode:$Rn, VecListFourD:$Vd), IIC_VLD1x4u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1698,7 +1779,7 @@ multiclass VST1D4WB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListFourD:$Vd),
IIC_VLD1x4u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1707,15 +1788,15 @@ multiclass VST1D4WB<bits<4> op7_4, string Dt> {
}
}
-def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
-def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
-def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
-def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
+def VST1d8Q : VST1D4<{0,0,?,?}, "8", addrmode6align64or128or256>;
+def VST1d16Q : VST1D4<{0,1,?,?}, "16", addrmode6align64or128or256>;
+def VST1d32Q : VST1D4<{1,0,?,?}, "32", addrmode6align64or128or256>;
+def VST1d64Q : VST1D4<{1,1,?,?}, "64", addrmode6align64or128or256>;
-defm VST1d8Qwb : VST1D4WB<{0,0,?,?}, "8">;
-defm VST1d16Qwb : VST1D4WB<{0,1,?,?}, "16">;
-defm VST1d32Qwb : VST1D4WB<{1,0,?,?}, "32">;
-defm VST1d64Qwb : VST1D4WB<{1,1,?,?}, "64">;
+defm VST1d8Qwb : VST1D4WB<{0,0,?,?}, "8", addrmode6align64or128or256>;
+defm VST1d16Qwb : VST1D4WB<{0,1,?,?}, "16", addrmode6align64or128or256>;
+defm VST1d32Qwb : VST1D4WB<{1,0,?,?}, "32", addrmode6align64or128or256>;
+defm VST1d64Qwb : VST1D4WB<{1,1,?,?}, "64", addrmode6align64or128or256>;
def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
def VST1d64QPseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST1x4u>;
@@ -1723,21 +1804,27 @@ def VST1d64QPseudoWB_register : VSTQQWBPseudo<IIC_VST1x4u>;
// VST2 : Vector Store (multiple 2-element structures)
class VST2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
- InstrItinClass itin>
- : NLdSt<0, 0b00, op11_8, op7_4, (outs), (ins addrmode6:$Rn, VdTy:$Vd),
+ InstrItinClass itin, Operand AddrMode>
+ : NLdSt<0, 0b00, op11_8, op7_4, (outs), (ins AddrMode:$Rn, VdTy:$Vd),
itin, "vst2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST2Instruction";
}
-def VST2d8 : VST2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VST2>;
-def VST2d16 : VST2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VST2>;
-def VST2d32 : VST2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VST2>;
+def VST2d8 : VST2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VST2,
+ addrmode6align64or128>;
+def VST2d16 : VST2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VST2,
+ addrmode6align64or128>;
+def VST2d32 : VST2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VST2,
+ addrmode6align64or128>;
-def VST2q8 : VST2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VST2x2>;
-def VST2q16 : VST2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VST2x2>;
-def VST2q32 : VST2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VST2x2>;
+def VST2q8 : VST2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VST2x2,
+ addrmode6align64or128or256>;
+def VST2q16 : VST2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VST2x2,
+ addrmode6align64or128or256>;
+def VST2q32 : VST2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VST2x2,
+ addrmode6align64or128or256>;
def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
@@ -1745,9 +1832,9 @@ def VST2q32Pseudo : VSTQQPseudo<IIC_VST2x2>;
// ...with address register writeback:
multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt,
- RegisterOperand VdTy> {
+ RegisterOperand VdTy, Operand AddrMode> {
def _fixed : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VdTy:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, VdTy:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1755,16 +1842,16 @@ multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt,
let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST2Instruction";
}
}
-multiclass VST2QWB<bits<4> op7_4, string Dt> {
+multiclass VST2QWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, VecListFourD:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1772,7 +1859,7 @@ multiclass VST2QWB<bits<4> op7_4, string Dt> {
let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListFourD:$Vd),
IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1781,13 +1868,16 @@ multiclass VST2QWB<bits<4> op7_4, string Dt> {
}
}
-defm VST2d8wb : VST2DWB<0b1000, {0,0,?,?}, "8", VecListDPair>;
-defm VST2d16wb : VST2DWB<0b1000, {0,1,?,?}, "16", VecListDPair>;
-defm VST2d32wb : VST2DWB<0b1000, {1,0,?,?}, "32", VecListDPair>;
+defm VST2d8wb : VST2DWB<0b1000, {0,0,?,?}, "8", VecListDPair,
+ addrmode6align64or128>;
+defm VST2d16wb : VST2DWB<0b1000, {0,1,?,?}, "16", VecListDPair,
+ addrmode6align64or128>;
+defm VST2d32wb : VST2DWB<0b1000, {1,0,?,?}, "32", VecListDPair,
+ addrmode6align64or128>;
-defm VST2q8wb : VST2QWB<{0,0,?,?}, "8">;
-defm VST2q16wb : VST2QWB<{0,1,?,?}, "16">;
-defm VST2q32wb : VST2QWB<{1,0,?,?}, "32">;
+defm VST2q8wb : VST2QWB<{0,0,?,?}, "8", addrmode6align64or128or256>;
+defm VST2q16wb : VST2QWB<{0,1,?,?}, "16", addrmode6align64or128or256>;
+defm VST2q32wb : VST2QWB<{1,0,?,?}, "32", addrmode6align64or128or256>;
def VST2q8PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
def VST2q16PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
@@ -1797,12 +1887,18 @@ def VST2q16PseudoWB_register : VSTQQWBregisterPseudo<IIC_VST2x2u>;
def VST2q32PseudoWB_register : VSTQQWBregisterPseudo<IIC_VST2x2u>;
// ...with double-spaced registers
-def VST2b8 : VST2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VST2>;
-def VST2b16 : VST2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VST2>;
-def VST2b32 : VST2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VST2>;
-defm VST2b8wb : VST2DWB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced>;
-defm VST2b16wb : VST2DWB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced>;
-defm VST2b32wb : VST2DWB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced>;
+def VST2b8 : VST2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VST2,
+ addrmode6align64or128>;
+def VST2b16 : VST2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VST2,
+ addrmode6align64or128>;
+def VST2b32 : VST2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VST2,
+ addrmode6align64or128>;
+defm VST2b8wb : VST2DWB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced,
+ addrmode6align64or128>;
+defm VST2b16wb : VST2DWB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced,
+ addrmode6align64or128>;
+defm VST2b32wb : VST2DWB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced,
+ addrmode6align64or128>;
// VST3 : Vector Store (multiple 3-element structures)
class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -2270,9 +2366,9 @@ def : Pat<(v2f64 (dword_alignedload addrmode6:$addr)),
def : Pat<(dword_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
(VST1q64 addrmode6:$addr, QPR:$value)>;
def : Pat<(v2f64 (word_alignedload addrmode6:$addr)),
- (VLD1q32 addrmode6:$addr)>;
+ (VLD1q32 addrmode6:$addr)>, Requires<[IsLE]>;
def : Pat<(word_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
- (VST1q32 addrmode6:$addr, QPR:$value)>;
+ (VST1q32 addrmode6:$addr, QPR:$value)>, Requires<[IsLE]>;
def : Pat<(v2f64 (hword_alignedload addrmode6:$addr)),
(VLD1q16 addrmode6:$addr)>, Requires<[IsLE]>;
def : Pat<(hword_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
@@ -2360,14 +2456,14 @@ class N2VDIntnp<bits<2> op17_16, bits<3> op10_8, bit op7,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2Vnp<0b10, op17_16, op10_8, op7, 0, (outs DPR:$Vd), (ins DPR:$Vm),
- itin, OpcodeStr, Dt, ResTy, OpTy,
+ itin, OpcodeStr, Dt,
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
class N2VQIntnp<bits<2> op17_16, bits<3> op10_8, bit op7,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2Vnp<0b10, op17_16, op10_8, op7, 1, (outs QPR:$Vd), (ins QPR:$Vm),
- itin, OpcodeStr, Dt, ResTy, OpTy,
+ itin, OpcodeStr, Dt,
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
// Similar to NV2VQIntnp with some more encoding bits exposed (crypto).
@@ -2375,7 +2471,7 @@ class N2VQIntXnp<bits<2> op19_18, bits<2> op17_16, bits<3> op10_8, bit op6,
bit op7, InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2Vnp<op19_18, op17_16, op10_8, op7, op6, (outs QPR:$Vd), (ins QPR:$Vm),
- itin, OpcodeStr, Dt, ResTy, OpTy,
+ itin, OpcodeStr, Dt,
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
// Same as N2VQIntXnp but with Vd as a src register.
@@ -2384,7 +2480,7 @@ class N2VQIntX2np<bits<2> op19_18, bits<2> op17_16, bits<3> op10_8, bit op6,
ValueType ResTy, ValueType OpTy, SDPatternOperator IntOp>
: N2Vnp<op19_18, op17_16, op10_8, op7, op6,
(outs QPR:$Vd), (ins QPR:$src, QPR:$Vm),
- itin, OpcodeStr, Dt, ResTy, OpTy,
+ itin, OpcodeStr, Dt,
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src), (OpTy QPR:$Vm))))]> {
let Constraints = "$src = $Vd";
}
@@ -2558,7 +2654,6 @@ class N3VDIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
SDPatternOperator IntOp, bit Commutable>
: N3Vnp<op27_23, op21_20, op11_8, op6, op4,
(outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin, OpcodeStr, Dt,
- ResTy, OpTy, IntOp, Commutable,
[(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
@@ -2612,7 +2707,6 @@ class N3VQIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
SDPatternOperator IntOp, bit Commutable>
: N3Vnp<op27_23, op21_20, op11_8, op6, op4,
(outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin, OpcodeStr, Dt,
- ResTy, OpTy, IntOp, Commutable,
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>;
// Same as N3VQIntnp but with Vd as a src register.
@@ -2621,8 +2715,8 @@ class N3VQInt3np<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
string Dt, ValueType ResTy, ValueType OpTy,
SDPatternOperator IntOp, bit Commutable>
: N3Vnp<op27_23, op21_20, op11_8, op6, op4,
- (outs QPR:$Vd), (ins QPR:$src, QPR:$Vn, QPR:$Vm), f, itin, OpcodeStr,
- Dt, ResTy, OpTy, IntOp, Commutable,
+ (outs QPR:$Vd), (ins QPR:$src, QPR:$Vn, QPR:$Vm),
+ f, itin, OpcodeStr, Dt,
[(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src), (OpTy QPR:$Vn),
(OpTy QPR:$Vm))))]> {
let Constraints = "$src = $Vd";
@@ -2942,7 +3036,6 @@ class N3VLIntnp<bits<5> op27_23, bits<2> op21_20, bits<4> op11_8, bit op6,
SDPatternOperator IntOp, bit Commutable>
: N3Vnp<op27_23, op21_20, op11_8, op6, op4,
(outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin, OpcodeStr, Dt,
- ResTy, OpTy, IntOp, Commutable,
[(set QPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
@@ -3038,22 +3131,23 @@ class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
// Long shift by immediate.
class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
+ ValueType ResTy, ValueType OpTy, Operand ImmTy,
+ SDPatternOperator OpNode>
: N2VImm<op24, op23, op11_8, op7, op6, op4,
(outs QPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), N2RegVShLFrm,
IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
- [(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
- (i32 imm:$SIMM))))]>;
+ [(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm), ImmTy:$SIMM)))]>;
// Narrow shift by immediate.
class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
+ ValueType ResTy, ValueType OpTy, Operand ImmTy,
+ SDPatternOperator OpNode>
: N2VImm<op24, op23, op11_8, op7, op6, op4,
(outs DPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, itin,
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
[(set DPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm),
- (i32 imm:$SIMM))))]>;
+ (i32 ImmTy:$SIMM))))]>;
// Shift right by immediate and accumulate,
// both double- and quad-register.
@@ -3941,7 +4035,8 @@ multiclass N2VShInsR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
// Neon Shift Long operations,
// element sizes of 8, 16, 32 bits:
multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
- bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
+ bit op4, string OpcodeStr, string Dt,
+ SDPatternOperator OpNode> {
def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, imm1_7, OpNode> {
let Inst{21-19} = 0b001; // imm6 = 001xxx
@@ -3960,7 +4055,7 @@ multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
// element sizes of 16, 32, 64 bits:
multiclass N2VNSh_HSD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
bit op4, InstrItinClass itin, string OpcodeStr, string Dt,
- SDNode OpNode> {
+ SDPatternOperator OpNode> {
def v8i8 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
OpcodeStr, !strconcat(Dt, "16"),
v8i8, v8i16, shr_imm8, OpNode> {
@@ -4427,14 +4522,14 @@ defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s",
// VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacge",
- "f32", v2i32, v2f32, int_arm_neon_vacged, 0>;
+ "f32", v2i32, v2f32, int_arm_neon_vacge, 0>;
def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacge",
- "f32", v4i32, v4f32, int_arm_neon_vacgeq, 0>;
+ "f32", v4i32, v4f32, int_arm_neon_vacge, 0>;
// VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacgt",
- "f32", v2i32, v2f32, int_arm_neon_vacgtd, 0>;
+ "f32", v2i32, v2f32, int_arm_neon_vacgt, 0>;
def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacgt",
- "f32", v4i32, v4f32, int_arm_neon_vacgtq, 0>;
+ "f32", v4i32, v4f32, int_arm_neon_vacgt, 0>;
// VTST : Vector Test Bits
defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
IIC_VBINi4Q, "vtst", "", NEONvtst, 1>;
@@ -4946,28 +5041,51 @@ defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u", "VSHRu",
NEONvshru>;
// VSHLL : Vector Shift Left Long
-defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
-defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
+defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s",
+ PatFrag<(ops node:$LHS, node:$RHS), (NEONvshl (sext node:$LHS), node:$RHS)>>;
+defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u",
+ PatFrag<(ops node:$LHS, node:$RHS), (NEONvshl (zext node:$LHS), node:$RHS)>>;
// VSHLL : Vector Shift Left Long (with maximum shift count)
class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
- ValueType OpTy, Operand ImmTy, SDNode OpNode>
+ ValueType OpTy, Operand ImmTy>
: N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
- ResTy, OpTy, ImmTy, OpNode> {
+ ResTy, OpTy, ImmTy, null_frag> {
let Inst{21-16} = op21_16;
let DecoderMethod = "DecodeVSHLMaxInstruction";
}
def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
- v8i16, v8i8, imm8, NEONvshlli>;
+ v8i16, v8i8, imm8>;
def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
- v4i32, v4i16, imm16, NEONvshlli>;
+ v4i32, v4i16, imm16>;
def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
- v2i64, v2i32, imm32, NEONvshlli>;
+ v2i64, v2i32, imm32>;
+
+def : Pat<(v8i16 (NEONvshl (zext (v8i8 DPR:$Rn)), (i32 8))),
+ (VSHLLi8 DPR:$Rn, 8)>;
+def : Pat<(v4i32 (NEONvshl (zext (v4i16 DPR:$Rn)), (i32 16))),
+ (VSHLLi16 DPR:$Rn, 16)>;
+def : Pat<(v2i64 (NEONvshl (zext (v2i32 DPR:$Rn)), (i32 32))),
+ (VSHLLi32 DPR:$Rn, 32)>;
+def : Pat<(v8i16 (NEONvshl (sext (v8i8 DPR:$Rn)), (i32 8))),
+ (VSHLLi8 DPR:$Rn, 8)>;
+def : Pat<(v4i32 (NEONvshl (sext (v4i16 DPR:$Rn)), (i32 16))),
+ (VSHLLi16 DPR:$Rn, 16)>;
+def : Pat<(v2i64 (NEONvshl (sext (v2i32 DPR:$Rn)), (i32 32))),
+ (VSHLLi32 DPR:$Rn, 32)>;
// VSHRN : Vector Shift Right and Narrow
defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
- NEONvshrn>;
+ PatFrag<(ops node:$Rn, node:$amt),
+ (trunc (NEONvshrs node:$Rn, node:$amt))>>;
+
+def : Pat<(v8i8 (trunc (NEONvshru (v8i16 QPR:$Vn), shr_imm8:$amt))),
+ (VSHRNv8i8 QPR:$Vn, shr_imm8:$amt)>;
+def : Pat<(v4i16 (trunc (NEONvshru (v4i32 QPR:$Vn), shr_imm16:$amt))),
+ (VSHRNv4i16 QPR:$Vn, shr_imm16:$amt)>;
+def : Pat<(v2i32 (trunc (NEONvshru (v2i64 QPR:$Vn), shr_imm32:$amt))),
+ (VSHRNv2i32 QPR:$Vn, shr_imm32:$amt)>;
// VRSHL : Vector Rounding Shift
defm VRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 0, N3RegVShFrm,
@@ -5077,9 +5195,6 @@ def : Pat<(xor (v4i32 (NEONvshrs QPR:$src, (i32 31))),
(v4i32 (add QPR:$src, (NEONvshrs QPR:$src, (i32 31))))),
(VABSv4i32 QPR:$src)>;
-def : Pat<(v2f32 (int_arm_neon_vabs (v2f32 DPR:$src))), (VABSfd DPR:$src)>;
-def : Pat<(v4f32 (int_arm_neon_vabs (v4f32 QPR:$src))), (VABSfq QPR:$src)>;
-
// VQABS : Vector Saturating Absolute Value
defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
@@ -5226,6 +5341,55 @@ def VMOVv4f32 : N1ModImm<1, 0b000, 0b1111, 0, 1, 0, 1, (outs QPR:$Vd),
[(set QPR:$Vd, (v4f32 (NEONvmovFPImm timm:$SIMM)))]>;
} // isReMaterializable
+// Add support for bytes replication feature, so it could be GAS compatible.
+// E.g. instructions below:
+// "vmov.i32 d0, 0xffffffff"
+// "vmov.i32 d0, 0xabababab"
+// "vmov.i16 d0, 0xabab"
+// are incorrect, but we could deal with such cases.
+// For last two instructions, for example, it should emit:
+// "vmov.i8 d0, 0xab"
+def : NEONInstAlias<"vmov${p}.i16 $Vd, $Vm",
+ (VMOVv8i8 DPR:$Vd, nImmVMOVI16ByteReplicate:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmov${p}.i32 $Vd, $Vm",
+ (VMOVv8i8 DPR:$Vd, nImmVMOVI32ByteReplicate:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmov${p}.i16 $Vd, $Vm",
+ (VMOVv16i8 QPR:$Vd, nImmVMOVI16ByteReplicate:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmov${p}.i32 $Vd, $Vm",
+ (VMOVv16i8 QPR:$Vd, nImmVMOVI32ByteReplicate:$Vm, pred:$p)>;
+
+// Also add same support for VMVN instructions. So instruction:
+// "vmvn.i32 d0, 0xabababab"
+// actually means:
+// "vmov.i8 d0, 0x54"
+def : NEONInstAlias<"vmvn${p}.i16 $Vd, $Vm",
+ (VMOVv8i8 DPR:$Vd, nImmVMVNI16ByteReplicate:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmvn${p}.i32 $Vd, $Vm",
+ (VMOVv8i8 DPR:$Vd, nImmVMVNI32ByteReplicate:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmvn${p}.i16 $Vd, $Vm",
+ (VMOVv16i8 QPR:$Vd, nImmVMVNI16ByteReplicate:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmvn${p}.i32 $Vd, $Vm",
+ (VMOVv16i8 QPR:$Vd, nImmVMVNI32ByteReplicate:$Vm, pred:$p)>;
+
+// On some CPUs the two instructions "vmov.i32 dD, #0" and "vmov.i32 qD, #0"
+// require zero cycles to execute so they should be used wherever possible for
+// setting a register to zero.
+
+// Even without these pseudo-insts we would probably end up with the correct
+// instruction, but we could not mark the general ones with "isAsCheapAsAMove"
+// since they are sometimes rather expensive (in general).
+
+let AddedComplexity = 50, isAsCheapAsAMove = 1, isReMaterializable = 1 in {
+ def VMOVD0 : ARMPseudoExpand<(outs DPR:$Vd), (ins), 4, IIC_VMOVImm,
+ [(set DPR:$Vd, (v2i32 NEONimmAllZerosV))],
+ (VMOVv2i32 DPR:$Vd, 0, (ops 14, zero_reg))>,
+ Requires<[HasZCZ]>;
+ def VMOVQ0 : ARMPseudoExpand<(outs QPR:$Vd), (ins), 4, IIC_VMOVImm,
+ [(set QPR:$Vd, (v4i32 NEONimmAllZerosV))],
+ (VMOVv4i32 QPR:$Vd, 0, (ops 14, zero_reg))>,
+ Requires<[HasZCZ]>;
+}
+
// VMOV : Vector Get Lane (move scalar to ARM core register)
def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
@@ -5490,10 +5654,12 @@ def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
(DSubReg_i32_reg imm:$lane))),
(SubReg_i32_lane imm:$lane)))>;
-def VDUPfdf : PseudoNeonI<(outs DPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
- [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
-def VDUPfqf : PseudoNeonI<(outs QPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
- [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
+def : Pat<(v2f32 (NEONvdup (f32 SPR:$src))),
+ (v2f32 (VDUPLN32d (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
+ SPR:$src, ssub_0), (i32 0)))>;
+def : Pat<(v4f32 (NEONvdup (f32 SPR:$src))),
+ (v4f32 (VDUPLN32q (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
+ SPR:$src, ssub_0), (i32 0)))>;
// VMOVN : Vector Narrowing Move
defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN,
@@ -5576,22 +5742,22 @@ def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
}
-def : NEONInstAlias<"vcvt${p}.s32.f32 $Dd, $Dm, #0",
+def : NEONInstAlias<"vcvt${p}.s32.f32 $Dd, $Dm, #0",
(VCVTf2sd DPR:$Dd, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.u32.f32 $Dd, $Dm, #0",
+def : NEONInstAlias<"vcvt${p}.u32.f32 $Dd, $Dm, #0",
(VCVTf2ud DPR:$Dd, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.f32.s32 $Dd, $Dm, #0",
+def : NEONInstAlias<"vcvt${p}.f32.s32 $Dd, $Dm, #0",
(VCVTs2fd DPR:$Dd, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.f32.u32 $Dd, $Dm, #0",
+def : NEONInstAlias<"vcvt${p}.f32.u32 $Dd, $Dm, #0",
(VCVTu2fd DPR:$Dd, DPR:$Dm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.s32.f32 $Qd, $Qm, #0",
+def : NEONInstAlias<"vcvt${p}.s32.f32 $Qd, $Qm, #0",
(VCVTf2sq QPR:$Qd, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.u32.f32 $Qd, $Qm, #0",
+def : NEONInstAlias<"vcvt${p}.u32.f32 $Qd, $Qm, #0",
(VCVTf2uq QPR:$Qd, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.f32.s32 $Qd, $Qm, #0",
+def : NEONInstAlias<"vcvt${p}.f32.s32 $Qd, $Qm, #0",
(VCVTs2fq QPR:$Qd, QPR:$Qm, pred:$p)>;
-def : NEONInstAlias<"vcvt${p}.f32.u32 $Qd, $Qm, #0",
+def : NEONInstAlias<"vcvt${p}.f32.u32 $Qd, $Qm, #0",
(VCVTu2fq QPR:$Qd, QPR:$Qm, pred:$p)>;
@@ -5874,7 +6040,7 @@ defm VRINTPN : VRINT_FPI<"p", 0b111, int_arm_neon_vrintp>;
// Cryptography instructions
let PostEncoderMethod = "NEONThumb2DataIPostEncoder",
- DecoderNamespace = "v8Crypto" in {
+ DecoderNamespace = "v8Crypto", hasSideEffects = 0 in {
class AES<string op, bit op7, bit op6, SDPatternOperator Int>
: N2VQIntXnp<0b00, 0b00, 0b011, op6, op7, NoItinerary,
!strconcat("aes", op), "8", v16i8, v16i8, Int>,
@@ -5904,17 +6070,45 @@ def AESE : AES2Op<"e", 0, 0, int_arm_neon_aese>;
def AESIMC : AES<"imc", 1, 1, int_arm_neon_aesimc>;
def AESMC : AES<"mc", 1, 0, int_arm_neon_aesmc>;
-def SHA1H : N2SHA<"1h", 0b01, 0b010, 1, 1, int_arm_neon_sha1h>;
+def SHA1H : N2SHA<"1h", 0b01, 0b010, 1, 1, null_frag>;
def SHA1SU1 : N2SHA2Op<"1su1", 0b10, 0b011, 1, 0, int_arm_neon_sha1su1>;
def SHA256SU0 : N2SHA2Op<"256su0", 0b10, 0b011, 1, 1, int_arm_neon_sha256su0>;
-def SHA1C : N3SHA3Op<"1c", 0b00100, 0b00, int_arm_neon_sha1c>;
-def SHA1M : N3SHA3Op<"1m", 0b00100, 0b10, int_arm_neon_sha1m>;
-def SHA1P : N3SHA3Op<"1p", 0b00100, 0b01, int_arm_neon_sha1p>;
+def SHA1C : N3SHA3Op<"1c", 0b00100, 0b00, null_frag>;
+def SHA1M : N3SHA3Op<"1m", 0b00100, 0b10, null_frag>;
+def SHA1P : N3SHA3Op<"1p", 0b00100, 0b01, null_frag>;
def SHA1SU0 : N3SHA3Op<"1su0", 0b00100, 0b11, int_arm_neon_sha1su0>;
def SHA256H : N3SHA3Op<"256h", 0b00110, 0b00, int_arm_neon_sha256h>;
def SHA256H2 : N3SHA3Op<"256h2", 0b00110, 0b01, int_arm_neon_sha256h2>;
def SHA256SU1 : N3SHA3Op<"256su1", 0b00110, 0b10, int_arm_neon_sha256su1>;
+def : Pat<(i32 (int_arm_neon_sha1h i32:$Rn)),
+ (COPY_TO_REGCLASS (f32 (EXTRACT_SUBREG
+ (SHA1H (SUBREG_TO_REG (i64 0),
+ (f32 (COPY_TO_REGCLASS i32:$Rn, SPR)),
+ ssub_0)),
+ ssub_0)), GPR)>;
+
+def : Pat<(v4i32 (int_arm_neon_sha1c v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk)),
+ (SHA1C v4i32:$hash_abcd,
+ (SUBREG_TO_REG (i64 0),
+ (f32 (COPY_TO_REGCLASS i32:$hash_e, SPR)),
+ ssub_0),
+ v4i32:$wk)>;
+
+def : Pat<(v4i32 (int_arm_neon_sha1m v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk)),
+ (SHA1M v4i32:$hash_abcd,
+ (SUBREG_TO_REG (i64 0),
+ (f32 (COPY_TO_REGCLASS i32:$hash_e, SPR)),
+ ssub_0),
+ v4i32:$wk)>;
+
+def : Pat<(v4i32 (int_arm_neon_sha1p v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk)),
+ (SHA1P v4i32:$hash_abcd,
+ (SUBREG_TO_REG (i64 0),
+ (f32 (COPY_TO_REGCLASS i32:$hash_e, SPR)),
+ ssub_0),
+ v4i32:$wk)>;
+
//===----------------------------------------------------------------------===//
// NEON instructions for single-precision FP math
//===----------------------------------------------------------------------===//
@@ -5982,67 +6176,145 @@ def : Pat<(f32 (bitconvert GPR:$a)),
//===----------------------------------------------------------------------===//
// bit_convert
-def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
+ def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
+ def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
+}
def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
+}
def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
+}
def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
+}
def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
+}
-def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
+ def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
+ def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
+}
def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
+}
def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
+}
def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
+}
def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
+let Predicates = [IsLE] in {
+ def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
+}
+
+let Predicates = [IsBE] in {
+ // 64 bit conversions
+ def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (VREV64d16 DPR:$src)>;
+ def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (VREV64d8 DPR:$src)>;
+ def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (VREV32d16 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (VREV32d8 DPR:$src)>;
+ def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (VREV64d16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (VREV32d16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (VREV16d8 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (VREV64d16 DPR:$src)>;
+ def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (VREV32d16 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (VREV64d8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (VREV32d8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (VREV16d8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (VREV64d8 DPR:$src)>;
+ def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (VREV32d8 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (VREV64d16 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (VREV64d8 DPR:$src)>;
+ def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (VREV64d32 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (VREV32d16 DPR:$src)>;
+ def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (VREV32d8 DPR:$src)>;
+
+ // 128 bit conversions
+ def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (VREV64q16 QPR:$src)>;
+ def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (VREV64q8 QPR:$src)>;
+ def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (VREV32q16 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (VREV32q8 QPR:$src)>;
+ def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (VREV64q16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (VREV32q16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (VREV16q8 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (VREV64q16 QPR:$src)>;
+ def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (VREV32q16 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (VREV64q8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (VREV32q8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (VREV16q8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (VREV64q8 QPR:$src)>;
+ def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (VREV32q8 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (VREV32q16 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (VREV32q8 QPR:$src)>;
+ def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (VREV64q32 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (VREV64q16 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (VREV64q8 QPR:$src)>;
+ def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (VREV64q32 QPR:$src)>;
+}
// Fold extracting an element out of a v2i32 into a vfp register.
def : Pat<(f32 (bitconvert (i32 (extractelt (v2i32 DPR:$src), imm:$lane)))),
@@ -6051,7 +6323,7 @@ def : Pat<(f32 (bitconvert (i32 (extractelt (v2i32 DPR:$src), imm:$lane)))),
// Vector lengthening move with load, matching extending loads.
// extload, zextload and sextload for a standard lengthening load. Example:
-// Lengthen_Single<"8", "i16", "8"> =
+// Lengthen_Single<"8", "i16", "8"> =
// Pat<(v8i16 (extloadvi8 addrmode6:$addr))
// (VMOVLuv8i16 (VLD1d8 addrmode6:$addr,
// (f64 (IMPLICIT_DEF)), (i32 0)))>;
@@ -6078,7 +6350,7 @@ multiclass Lengthen_Single<string DestLanes, string DestTy, string SrcTy> {
// half the lanes available. Example:
// Lengthen_HalfSingle<"4", "i16", "8", "i16", "i8"> =
// Pat<(v4i16 (extloadvi8 addrmode6oneL32:$addr)),
-// (EXTRACT_SUBREG (VMOVLuv8i16 (VLD1LNd32 addrmode6oneL32:$addr,
+// (EXTRACT_SUBREG (VMOVLuv8i16 (VLD1LNd32 addrmode6oneL32:$addr,
// (f64 (IMPLICIT_DEF)), (i32 0))),
// dsub_0)>;
multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy,
@@ -6100,6 +6372,32 @@ multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy,
dsub_0)>;
}
+// The following class definition is basically a copy of the
+// Lengthen_HalfSingle definition above, however with an additional parameter
+// "RevLanes" to select the correct VREV32dXX instruction. This is to convert
+// data loaded by VLD1LN into proper vector format in big endian mode.
+multiclass Lengthen_HalfSingle_Big_Endian<string DestLanes, string DestTy, string SrcTy,
+ string InsnLanes, string InsnTy, string RevLanes> {
+ def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode6oneL32:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # InsnLanes # InsnTy)
+ (!cast<Instruction>("VREV32d" # RevLanes)
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0)>;
+ def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode6oneL32:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # InsnLanes # InsnTy)
+ (!cast<Instruction>("VREV32d" # RevLanes)
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0)>;
+ def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode6oneL32:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # InsnLanes # InsnTy)
+ (!cast<Instruction>("VREV32d" # RevLanes)
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0)>;
+}
+
// extload, zextload and sextload for a lengthening load followed by another
// lengthening load, to quadruple the initial length.
//
@@ -6134,6 +6432,36 @@ multiclass Lengthen_Double<string DestLanes, string DestTy, string SrcTy,
dsub_0))>;
}
+// The following class definition is basically a copy of the
+// Lengthen_Double definition above, however with an additional parameter
+// "RevLanes" to select the correct VREV32dXX instruction. This is to convert
+// data loaded by VLD1LN into proper vector format in big endian mode.
+multiclass Lengthen_Double_Big_Endian<string DestLanes, string DestTy, string SrcTy,
+ string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
+ string Insn2Ty, string RevLanes> {
+ def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode6oneL32:$addr)),
+ (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
+ (!cast<Instruction>("VREV32d" # RevLanes)
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0))>;
+ def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode6oneL32:$addr)),
+ (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
+ (!cast<Instruction>("VREV32d" # RevLanes)
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0))>;
+ def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode6oneL32:$addr)),
+ (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
+ (!cast<Instruction>("VREV32d" # RevLanes)
+ (VLD1LNd32 addrmode6oneL32:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0))>;
+}
+
// extload, zextload and sextload for a lengthening load followed by another
// lengthening load, to quadruple the initial length, but which ends up only
// requiring half the available lanes (a 64-bit outcome instead of a 128-bit).
@@ -6171,33 +6499,102 @@ multiclass Lengthen_HalfDouble<string DestLanes, string DestTy, string SrcTy,
dsub_0)>;
}
+// The following class definition is basically a copy of the
+// Lengthen_HalfDouble definition above, however with an additional VREV16d8
+// instruction to convert data loaded by VLD1LN into proper vector format
+// in big endian mode.
+multiclass Lengthen_HalfDouble_Big_Endian<string DestLanes, string DestTy, string SrcTy,
+ string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
+ string Insn2Ty> {
+ def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode6:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
+ (!cast<Instruction>("VREV16d8")
+ (VLD1LNd16 addrmode6:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0)),
+ dsub_0)>;
+ def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode6:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
+ (!cast<Instruction>("VREV16d8")
+ (VLD1LNd16 addrmode6:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0)),
+ dsub_0)>;
+ def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode6:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
+ (!cast<Instruction>("VREV16d8")
+ (VLD1LNd16 addrmode6:$addr, (f64 (IMPLICIT_DEF)), (i32 0)))),
+ dsub_0)),
+ dsub_0)>;
+}
+
defm : Lengthen_Single<"8", "i16", "8">; // v8i8 -> v8i16
defm : Lengthen_Single<"4", "i32", "16">; // v4i16 -> v4i32
defm : Lengthen_Single<"2", "i64", "32">; // v2i32 -> v2i64
-defm : Lengthen_HalfSingle<"4", "i16", "i8", "8", "i16">; // v4i8 -> v4i16
-defm : Lengthen_HalfSingle<"2", "i32", "i16", "4", "i32">; // v2i16 -> v2i32
+let Predicates = [IsLE] in {
+ defm : Lengthen_HalfSingle<"4", "i16", "i8", "8", "i16">; // v4i8 -> v4i16
+ defm : Lengthen_HalfSingle<"2", "i32", "i16", "4", "i32">; // v2i16 -> v2i32
+
+ // Double lengthening - v4i8 -> v4i16 -> v4i32
+ defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32">;
+ // v2i8 -> v2i16 -> v2i32
+ defm : Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32">;
+ // v2i16 -> v2i32 -> v2i64
+ defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64">;
+}
+
+let Predicates = [IsBE] in {
+ defm : Lengthen_HalfSingle_Big_Endian<"4", "i16", "i8", "8", "i16", "8">; // v4i8 -> v4i16
+ defm : Lengthen_HalfSingle_Big_Endian<"2", "i32", "i16", "4", "i32", "16">; // v2i16 -> v2i32
-// Double lengthening - v4i8 -> v4i16 -> v4i32
-defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32">;
-// v2i8 -> v2i16 -> v2i32
-defm : Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32">;
-// v2i16 -> v2i32 -> v2i64
-defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64">;
+ // Double lengthening - v4i8 -> v4i16 -> v4i32
+ defm : Lengthen_Double_Big_Endian<"4", "i32", "i8", "8", "i16", "4", "i32", "8">;
+ // v2i8 -> v2i16 -> v2i32
+ defm : Lengthen_HalfDouble_Big_Endian<"2", "i32", "i8", "8", "i16", "4", "i32">;
+ // v2i16 -> v2i32 -> v2i64
+ defm : Lengthen_Double_Big_Endian<"2", "i64", "i16", "4", "i32", "2", "i64", "16">;
+}
// Triple lengthening - v2i8 -> v2i16 -> v2i32 -> v2i64
-def : Pat<(v2i64 (extloadvi8 addrmode6:$addr)),
- (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
- (VLD1LNd16 addrmode6:$addr,
- (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
-def : Pat<(v2i64 (zextloadvi8 addrmode6:$addr)),
- (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
- (VLD1LNd16 addrmode6:$addr,
- (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
-def : Pat<(v2i64 (sextloadvi8 addrmode6:$addr)),
- (VMOVLsv2i64 (EXTRACT_SUBREG (VMOVLsv4i32 (EXTRACT_SUBREG (VMOVLsv8i16
- (VLD1LNd16 addrmode6:$addr,
- (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
+let Predicates = [IsLE] in {
+ def : Pat<(v2i64 (extloadvi8 addrmode6:$addr)),
+ (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
+ def : Pat<(v2i64 (zextloadvi8 addrmode6:$addr)),
+ (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
+ def : Pat<(v2i64 (sextloadvi8 addrmode6:$addr)),
+ (VMOVLsv2i64 (EXTRACT_SUBREG (VMOVLsv4i32 (EXTRACT_SUBREG (VMOVLsv8i16
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0))), dsub_0)), dsub_0))>;
+}
+// The following patterns are basically a copy of the patterns above,
+// however with an additional VREV16d instruction to convert data
+// loaded by VLD1LN into proper vector format in big endian mode.
+let Predicates = [IsBE] in {
+ def : Pat<(v2i64 (extloadvi8 addrmode6:$addr)),
+ (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
+ (!cast<Instruction>("VREV16d8")
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0)))), dsub_0)), dsub_0))>;
+ def : Pat<(v2i64 (zextloadvi8 addrmode6:$addr)),
+ (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
+ (!cast<Instruction>("VREV16d8")
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0)))), dsub_0)), dsub_0))>;
+ def : Pat<(v2i64 (sextloadvi8 addrmode6:$addr)),
+ (VMOVLsv2i64 (EXTRACT_SUBREG (VMOVLsv4i32 (EXTRACT_SUBREG (VMOVLsv8i16
+ (!cast<Instruction>("VREV16d8")
+ (VLD1LNd16 addrmode6:$addr,
+ (f64 (IMPLICIT_DEF)), (i32 0)))), dsub_0)), dsub_0))>;
+}
//===----------------------------------------------------------------------===//
// Assembler aliases
@@ -6242,379 +6639,442 @@ defm : NEONDTAnyInstAlias<"vorr${p}", "$Vdn, $Vm",
// VLD1 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD1LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD1LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD1LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr!",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD1LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr!",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD1LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr!",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD1LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr, $Rm",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD1LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr, $Rm",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VLD1LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr, $Rm",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
// VST1 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST1LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST1LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST1LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr!",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST1LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr!",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST1LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr!",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST1LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr, $Rm",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VST1LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr, $Rm",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VST1LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr, $Rm",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
// VLD2 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD2LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD2LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr, pred:$p)>;
def VLD2LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD2LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr!",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD2LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr!",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr!",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD2LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr!",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr!",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD2LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr, $Rm",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr, $Rm",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr, $Rm",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VST2 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST2LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST2LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr!",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST2LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr!",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr!",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr!",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr!",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr, $Rm",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16","$list, $addr, $Rm",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16","$list, $addr, $Rm",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VLD3 all-lanes pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD3DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdAsm_16: NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdAsm_32: NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqAsm_16: NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqAsm_32: NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPqWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
// VLD3 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD3LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VLD3LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VLD3LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
// VLD3 multiple structure pseudo-instructions. These need special handling for
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VLD3dAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VST3 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST3LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VST3LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VST3LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VST3LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VST3LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
@@ -6622,168 +7082,190 @@ def VST3LNqWB_register_Asm_32 :
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VST3dAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VLD4 all-lanes pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD4DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPdAsm_16: NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPdAsm_32: NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPqAsm_16: NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPqAsm_32: NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPqWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourDAllLanes:$list,
+ addrmode6dupalign64or128:$addr, rGPR:$Rm, pred:$p)>;
def VLD4DUPqWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourQAllLanes:$list,
+ addrmode6dupalign64or128:$addr, rGPR:$Rm, pred:$p)>;
// VLD4 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD4LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD4LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD4LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
def VLD4LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
@@ -6791,168 +7273,202 @@ def VLD4LNqWB_register_Asm_32 :
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VLD4dAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
// VST4 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST4LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST4LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST4LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VST4LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST4LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
def VST4LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST4LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
// VST4 multiple structure pseudo-instructions. These need special handling for
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VST4dAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
// VMOV/VMVN takes an optional datatype suffix
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
index af5ef53..e17f73af 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -269,7 +269,8 @@ class T1SystemEncoding<bits<8> opc>
let Inst{7-0} = opc;
}
-def tHINT : T1pI<(outs), (ins imm0_15:$imm), NoItinerary, "hint", "\t$imm", []>,
+def tHINT : T1pI<(outs), (ins imm0_15:$imm), NoItinerary, "hint", "\t$imm",
+ [(int_arm_hint imm0_15:$imm)]>,
T1SystemEncoding<0x00>,
Requires<[IsThumb, HasV6M]> {
bits<4> imm;
@@ -288,7 +289,6 @@ def : tHintAlias<"sev$p", (tHINT 4, pred:$p)>; // A8.6.157
def : tInstAlias<"sevl$p", (tHINT 5, pred:$p)> {
let Predicates = [IsThumb2, HasV8];
}
-def : T2Pat<(int_arm_sevl), (tHINT 5)>;
// The imm operand $val can be used by a debugger to store more information
// about the breakpoint.
@@ -300,6 +300,8 @@ def tBKPT : T1I<(outs), (ins imm0_255:$val), NoItinerary, "bkpt\t$val",
bits<8> val;
let Inst{7-0} = val;
}
+// default immediate for breakpoint mnemonic
+def : InstAlias<"bkpt", (tBKPT 0)>, Requires<[IsThumb]>;
def tHLT : T1I<(outs), (ins imm0_63:$val), NoItinerary, "hlt\t$val",
[]>, T1Encoding<0b101110>, Requires<[IsThumb, HasV8]> {
@@ -543,15 +545,15 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
(tBX GPR:$dst, (ops 14, zero_reg))>,
Requires<[IsThumb]>, Sched<[WriteBr]>;
}
- // tTAILJMPd: IOS version uses a Thumb2 branch (no Thumb1 tail calls
- // on IOS), so it's in ARMInstrThumb2.td.
- // Non-IOS version:
+ // tTAILJMPd: MachO version uses a Thumb2 branch (no Thumb1 tail calls
+ // on MachO), so it's in ARMInstrThumb2.td.
+ // Non-MachO version:
let Uses = [SP] in {
def tTAILJMPdND : tPseudoExpand<(outs),
(ins t_brtarget:$dst, pred:$p),
4, IIC_Br, [],
(tB t_brtarget:$dst, pred:$p)>,
- Requires<[IsThumb, IsNotIOS]>, Sched<[WriteBr]>;
+ Requires<[IsThumb, IsNotMachO]>, Sched<[WriteBr]>;
}
}
@@ -1191,6 +1193,15 @@ def tTST : // A8.6.230
[(ARMcmpZ (and_su tGPR:$Rn, tGPR:$Rm), 0)]>,
Sched<[WriteALU]>;
+// A8.8.247 UDF - Undefined (Encoding T1)
+def tUDF : TI<(outs), (ins imm0_255:$imm8), IIC_Br, "udf\t$imm8",
+ [(int_arm_undefined imm0_255:$imm8)]>, Encoding16 {
+ bits<8> imm8;
+ let Inst{15-12} = 0b1101;
+ let Inst{11-8} = 0b1110;
+ let Inst{7-0} = imm8;
+}
+
// Zero-extend byte
def tUXTB : // A8.6.262
T1pIMiscEncode<{0,0,1,0,1,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
@@ -1306,10 +1317,35 @@ def : T1Pat<(addc tGPR:$lhs, imm8_255_neg:$rhs),
def : T1Pat<(subc tGPR:$lhs, tGPR:$rhs),
(tSUBrr tGPR:$lhs, tGPR:$rhs)>;
-// ConstantPool, GlobalAddress
-def : T1Pat<(ARMWrapper tglobaladdr :$dst), (tLEApcrel tglobaladdr :$dst)>;
+// Bswap 16 with load/store
+def : T1Pat<(srl (bswap (extloadi16 t_addrmode_rrs2:$addr)), (i32 16)),
+ (tREV16 (tLDRHr t_addrmode_rrs2:$addr))>;
+def : T1Pat<(srl (bswap (extloadi16 t_addrmode_is2:$addr)), (i32 16)),
+ (tREV16 (tLDRHi t_addrmode_is2:$addr))>;
+def : T1Pat<(truncstorei16 (srl (bswap tGPR:$Rn), (i32 16)),
+ t_addrmode_rrs2:$addr),
+ (tSTRHr (tREV16 tGPR:$Rn), t_addrmode_rrs2:$addr)>;
+def : T1Pat<(truncstorei16 (srl (bswap tGPR:$Rn), (i32 16)),
+ t_addrmode_is2:$addr),
+ (tSTRHi(tREV16 tGPR:$Rn), t_addrmode_is2:$addr)>;
+
+// ConstantPool
def : T1Pat<(ARMWrapper tconstpool :$dst), (tLEApcrel tconstpool :$dst)>;
+// GlobalAddress
+def tLDRLIT_ga_pcrel : PseudoInst<(outs tGPR:$dst), (ins i32imm:$addr),
+ IIC_iLoadiALU,
+ [(set tGPR:$dst,
+ (ARMWrapperPIC tglobaladdr:$addr))]>,
+ Requires<[IsThumb, DontUseMovt]>;
+
+def tLDRLIT_ga_abs : PseudoInst<(outs tGPR:$dst), (ins i32imm:$src),
+ IIC_iLoad_i,
+ [(set tGPR:$dst,
+ (ARMWrapper tglobaladdr:$src))]>,
+ Requires<[IsThumb, DontUseMovt]>;
+
+
// JumpTable
def : T1Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
(tLEApcrelJT tjumptable:$dst, imm:$id)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 48acffd..85e9351 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -1445,7 +1445,7 @@ defm t2STRH:T2I_st<0b01,"strh", IIC_iStore_bh_i, IIC_iStore_bh_si,
// Store doubleword
let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
- (ins GPR:$Rt, GPR:$Rt2, t2addrmode_imm8s4:$addr),
+ (ins rGPR:$Rt, rGPR:$Rt2, t2addrmode_imm8s4:$addr),
IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", "", []>;
// Indexed stores
@@ -1676,7 +1676,7 @@ defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>;
// pci variant is very similar to i12, but supports negative offsets
// from the PC. Only PLD and PLI have pci variants (not PLDW)
class T2Iplpci<bits<1> inst, string opc> : T2Iso<(outs), (ins t2ldrlabel:$addr),
- IIC_Preload, opc, "\t$addr",
+ IIC_Preload, opc, "\t$addr",
[(ARMPreload (ARMWrapper tconstpool:$addr),
(i32 0), (i32 inst))]>, Sched<[WritePreLd]> {
let Inst{31-25} = 0b1111100;
@@ -1918,7 +1918,7 @@ def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins imm0_65535_expr:$imm), IIC_iMOVi,
let DecoderMethod = "DecodeT2MOVTWInstruction";
}
-def : t2InstAlias<"mov${p} $Rd, $imm",
+def : t2InstAlias<"mov${p} $Rd, $imm",
(t2MOVi16 rGPR:$Rd, imm256_65535_expr:$imm, pred:$p)>;
def t2MOVi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
@@ -2407,6 +2407,19 @@ def t2UBFX: T2TwoRegBitFI<
let Inst{15} = 0;
}
+// A8.8.247 UDF - Undefined (Encoding T2)
+def t2UDF : T2XI<(outs), (ins imm0_65535:$imm16), IIC_Br, "udf.w\t$imm16",
+ [(int_arm_undefined imm0_65535:$imm16)]> {
+ bits<16> imm16;
+ let Inst{31-29} = 0b111;
+ let Inst{28-27} = 0b10;
+ let Inst{26-20} = 0b1111111;
+ let Inst{19-16} = imm16{15-12};
+ let Inst{15} = 0b1;
+ let Inst{14-12} = 0b010;
+ let Inst{11-0} = imm16{11-0};
+}
+
// A8.6.18 BFI - Bitfield insert (Encoding T1)
let Constraints = "$src = $Rd" in {
def t2BFI : T2TwoRegBitFI<(outs rGPR:$Rd),
@@ -3196,27 +3209,28 @@ def t2MOVCCi32imm
let hasSideEffects = 1 in {
def t2DMB : T2I<(outs), (ins memb_opt:$opt), NoItinerary,
"dmb", "\t$opt", [(int_arm_dmb (i32 imm0_15:$opt))]>,
- Requires<[HasDB]> {
+ Requires<[IsThumb, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f5;
let Inst{3-0} = opt;
}
-}
def t2DSB : T2I<(outs), (ins memb_opt:$opt), NoItinerary,
"dsb", "\t$opt", [(int_arm_dsb (i32 imm0_15:$opt))]>,
- Requires<[HasDB]> {
+ Requires<[IsThumb, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f4;
let Inst{3-0} = opt;
}
def t2ISB : T2I<(outs), (ins instsyncb_opt:$opt), NoItinerary,
- "isb", "\t$opt", []>, Requires<[HasDB]> {
+ "isb", "\t$opt", [(int_arm_isb (i32 imm0_15:$opt))]>,
+ Requires<[IsThumb, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf3bf8f6;
let Inst{3-0} = opt;
}
+}
class T2I_ldrex<bits<4> opcod, dag oops, dag iops, AddrMode am, int sz,
InstrItinClass itin, string opc, string asm, string cstr,
@@ -3284,15 +3298,18 @@ def t2LDREXD : T2I_ldrex<0b0111, (outs rGPR:$Rt, rGPR:$Rt2),
def t2LDAEXB : T2I_ldrex<0b1100, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaexb", "\t$Rt, $addr", "",
- []>, Requires<[IsThumb, HasV8]>;
+ [(set rGPR:$Rt, (ldaex_1 addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8]>;
def t2LDAEXH : T2I_ldrex<0b1101, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaexh", "\t$Rt, $addr", "",
- []>, Requires<[IsThumb, HasV8]>;
+ [(set rGPR:$Rt, (ldaex_2 addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8]>;
def t2LDAEX : Thumb2I<(outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaex", "\t$Rt, $addr", "",
- []>, Requires<[IsThumb, HasV8]> {
+ [(set rGPR:$Rt, (ldaex_4 addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8]> {
bits<4> Rt;
bits<4> addr;
let Inst{31-27} = 0b11101;
@@ -3320,21 +3337,21 @@ def t2STREXB : T2I_strex<0b0100, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"strexb", "\t$Rd, $Rt, $addr", "",
- [(set rGPR:$Rd, (strex_1 rGPR:$Rt,
- addr_offset_none:$addr))]>;
+ [(set rGPR:$Rd,
+ (strex_1 rGPR:$Rt, addr_offset_none:$addr))]>;
def t2STREXH : T2I_strex<0b0101, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"strexh", "\t$Rd, $Rt, $addr", "",
- [(set rGPR:$Rd, (strex_2 rGPR:$Rt,
- addr_offset_none:$addr))]>;
+ [(set rGPR:$Rd,
+ (strex_2 rGPR:$Rt, addr_offset_none:$addr))]>;
def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
t2addrmode_imm0_1020s4:$addr),
AddrModeNone, 4, NoItinerary,
"strex", "\t$Rd, $Rt, $addr", "",
- [(set rGPR:$Rd, (strex_4 rGPR:$Rt,
- t2addrmode_imm0_1020s4:$addr))]> {
+ [(set rGPR:$Rd,
+ (strex_4 rGPR:$Rt, t2addrmode_imm0_1020s4:$addr))]> {
bits<4> Rd;
bits<4> Rt;
bits<12> addr;
@@ -3358,19 +3375,25 @@ def t2STLEXB : T2I_strex<0b1100, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"stlexb", "\t$Rd, $Rt, $addr", "",
- []>, Requires<[IsThumb, HasV8]>;
+ [(set rGPR:$Rd,
+ (stlex_1 rGPR:$Rt, addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8]>;
def t2STLEXH : T2I_strex<0b1101, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"stlexh", "\t$Rd, $Rt, $addr", "",
- []>, Requires<[IsThumb, HasV8]>;
+ [(set rGPR:$Rd,
+ (stlex_2 rGPR:$Rt, addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8]>;
def t2STLEX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"stlex", "\t$Rd, $Rt, $addr", "",
- []>, Requires<[IsThumb, HasV8]> {
+ [(set rGPR:$Rd,
+ (stlex_4 rGPR:$Rt, addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8]> {
bits<4> Rd;
bits<4> Rt;
bits<4> addr;
@@ -3412,6 +3435,15 @@ def : T2Pat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
def : T2Pat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
(t2STREXH GPR:$Rt, addr_offset_none:$addr)>;
+def : T2Pat<(and (ldaex_1 addr_offset_none:$addr), 0xff),
+ (t2LDAEXB addr_offset_none:$addr)>;
+def : T2Pat<(and (ldaex_2 addr_offset_none:$addr), 0xffff),
+ (t2LDAEXH addr_offset_none:$addr)>;
+def : T2Pat<(stlex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
+ (t2STLEXB GPR:$Rt, addr_offset_none:$addr)>;
+def : T2Pat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
+ (t2STLEXH GPR:$Rt, addr_offset_none:$addr)>;
+
//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
// eh_sjlj_setjmp() is an instruction sequence to store the return
@@ -3477,8 +3509,8 @@ def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br,
let Inst{25-16} = target{20-11};
let Inst{10-0} = target{10-0};
let DecoderMethod = "DecodeT2BInstruction";
- let AsmMatchConverter = "cvtThumbBranches";
-}
+ let AsmMatchConverter = "cvtThumbBranches";
+}
let isNotDuplicable = 1, isIndirectBranch = 1 in {
def t2BR_JT : t2PseudoInst<(outs),
@@ -3549,7 +3581,7 @@ def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
let AsmMatchConverter = "cvtThumbBranches";
}
-// Tail calls. The IOS version of thumb tail calls uses a t2 branch, so
+// Tail calls. The MachO version of thumb tail calls uses a t2 branch, so
// it goes here.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// IOS version.
@@ -3558,7 +3590,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
(ins uncondbrtarget:$dst, pred:$p),
4, IIC_Br, [],
(t2B uncondbrtarget:$dst, pred:$p)>,
- Requires<[IsThumb2, IsIOS]>, Sched<[WriteBr]>;
+ Requires<[IsThumb2, IsMachO]>, Sched<[WriteBr]>;
}
// IT block
@@ -3653,7 +3685,8 @@ def : t2InstAlias<"cps.w $mode", (t2CPS1p imm0_31:$mode), 0>;
// A6.3.4 Branches and miscellaneous control
// Table A6-14 Change Processor State, and hint instructions
-def t2HINT : T2I<(outs), (ins imm0_239:$imm), NoItinerary, "hint", ".w\t$imm",[]> {
+def t2HINT : T2I<(outs), (ins imm0_239:$imm), NoItinerary, "hint", ".w\t$imm",
+ [(int_arm_hint imm0_239:$imm)]> {
bits<8> imm;
let Inst{31-3} = 0b11110011101011111000000000000;
let Inst{7-0} = imm;
@@ -3680,7 +3713,7 @@ def t2DBG : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "dbg", "\t$opt", []> {
// Secure Monitor Call is a system instruction.
// Option = Inst{19-16}
-def t2SMC : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt",
+def t2SMC : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt",
[]>, Requires<[IsThumb2, HasTrustZone]> {
let Inst{31-27} = 0b11110;
let Inst{26-20} = 0b1111111;
@@ -3781,7 +3814,7 @@ def t2SUBS_PC_LR : T2I <(outs), (ins imm0_255:$imm), NoItinerary,
let isReMaterializable = 1, isMoveImm = 1 in
def t2MOVi32imm : PseudoInst<(outs rGPR:$dst), (ins i32imm:$src), IIC_iMOVix2,
[(set rGPR:$dst, (i32 imm:$src))]>,
- Requires<[IsThumb, HasV6T2]>;
+ Requires<[IsThumb, UseMovt]>;
// Pseudo instruction that combines movw + movt + add pc (if pic).
// It also makes it possible to rematerialize the instructions.
@@ -3793,15 +3826,9 @@ def t2MOV_ga_pcrel : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr),
[(set rGPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
Requires<[IsThumb2, UseMovt]>;
-def t2MOV_ga_dyn : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr),
- IIC_iMOVix2,
- [(set rGPR:$dst, (ARMWrapperDYN tglobaladdr:$addr))]>,
- Requires<[IsThumb2, UseMovt]>;
}
// ConstantPool, GlobalAddress, and JumpTable
-def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2LEApcrel tglobaladdr :$dst)>,
- Requires<[IsThumb2, DontUseMovt]>;
def : T2Pat<(ARMWrapper tconstpool :$dst), (t2LEApcrel tconstpool :$dst)>;
def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2MOVi32imm tglobaladdr :$dst)>,
Requires<[IsThumb2, UseMovt]>;
@@ -4266,7 +4293,7 @@ def : t2InstAlias<"sbc${s}${p} $Rd, $Rn, $ShiftedRm",
// Aliases for ADD without the ".w" optional width specifier.
def : t2InstAlias<"add${s}${p} $Rd, $Rn, $imm",
- (t2ADDri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm:$imm, pred:$p,
+ (t2ADDri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm:$imm, pred:$p,
cc_out:$s)>;
def : t2InstAlias<"add${p} $Rd, $Rn, $imm",
(t2ADDri12 GPRnopc:$Rd, GPR:$Rn, imm0_4095:$imm, pred:$p)>;
@@ -4371,7 +4398,7 @@ def : t2InstAlias<"ldrsh${p} $Rt, $addr",
(t2LDRSHs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
def : t2InstAlias<"ldr${p} $Rt, $addr",
- (t2LDRpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+ (t2LDRpci GPRnopc:$Rt, t2ldrlabel:$addr, pred:$p)>;
def : t2InstAlias<"ldrb${p} $Rt, $addr",
(t2LDRBpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
def : t2InstAlias<"ldrh${p} $Rt, $addr",
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
index a8cdc5c..55a6efc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -200,13 +200,34 @@ let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
-defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>;
+defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
} // neverHasSideEffects
def : MnemonicAlias<"vldm", "vldmia">;
def : MnemonicAlias<"vstm", "vstmia">;
+// FLDM/FSTM - Load / Store multiple single / double precision registers for
+// pre-ARMv6 cores.
+// These instructions are deprecated!
+def : VFP2MnemonicAlias<"fldmias", "vldmia">;
+def : VFP2MnemonicAlias<"fldmdbs", "vldmdb">;
+def : VFP2MnemonicAlias<"fldmeas", "vldmdb">;
+def : VFP2MnemonicAlias<"fldmfds", "vldmia">;
+def : VFP2MnemonicAlias<"fldmiad", "vldmia">;
+def : VFP2MnemonicAlias<"fldmdbd", "vldmdb">;
+def : VFP2MnemonicAlias<"fldmead", "vldmdb">;
+def : VFP2MnemonicAlias<"fldmfdd", "vldmia">;
+
+def : VFP2MnemonicAlias<"fstmias", "vstmia">;
+def : VFP2MnemonicAlias<"fstmdbs", "vstmdb">;
+def : VFP2MnemonicAlias<"fstmeas", "vstmia">;
+def : VFP2MnemonicAlias<"fstmfds", "vstmdb">;
+def : VFP2MnemonicAlias<"fstmiad", "vstmia">;
+def : VFP2MnemonicAlias<"fstmdbd", "vstmdb">;
+def : VFP2MnemonicAlias<"fstmead", "vstmia">;
+def : VFP2MnemonicAlias<"fstmfdd", "vstmdb">;
+
def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
Requires<[HasVFP2]>;
def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
@@ -247,7 +268,7 @@ multiclass vfp_ldstx_mult<string asm, bit L_bit> {
AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
let Inst{24-23} = 0b10; // Decrement Before
- let Inst{21} = 1;
+ let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
}
}
@@ -255,6 +276,12 @@ multiclass vfp_ldstx_mult<string asm, bit L_bit> {
defm FLDM : vfp_ldstx_mult<"fldm", 1>;
defm FSTM : vfp_ldstx_mult<"fstm", 0>;
+def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
+def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
+
+def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
+def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
+
//===----------------------------------------------------------------------===//
// FP Binary Operations.
//
@@ -524,12 +551,6 @@ def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
-def : Pat<(f32_to_f16 SPR:$a),
- (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
-
-def : Pat<(f16_to_f32 GPR:$a),
- (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
-
def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
/* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
[/* For disassembly only; pattern left blank */]>;
@@ -592,6 +613,19 @@ def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
let Inst{5} = Dm{4};
}
+def : Pat<(fp_to_f16 SPR:$a),
+ (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
+
+def : Pat<(fp_to_f16 (f64 DPR:$a)),
+ (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>;
+
+def : Pat<(f16_to_fp GPR:$a),
+ (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
+
+def : Pat<(f64 (f16_to_fp GPR:$a)),
+ (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>;
+
+
multiclass vcvt_inst<string opc, bits<2> rm> {
let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
@@ -1639,7 +1673,7 @@ def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
//===----------------------------------------------------------------------===//
// Assembler aliases.
//
-// A few mnemnoic aliases for pre-unifixed syntax. We don't guarantee to
+// A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
// support them all, but supporting at least some of the basics is
// good to be friendly.
def : VFP2MnemonicAlias<"flds", "vldr">;
@@ -1735,3 +1769,14 @@ def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
// VMOVD does.
def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
(VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
+
+// FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
+// These aliases provide added functionality over vmov.f instructions by
+// allowing users to write assembly containing encoded floating point constants
+// (e.g. #0x70 vs #1.0). Without these alises there is no way for the
+// assembler to accept encoded fp constants (but the equivalent fp-literal is
+// accepted directly by vmovf).
+def : VFP3InstAlias<"fconstd${p} $Dd, $val",
+ (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
+def : VFP3InstAlias<"fconsts${p} $Sd, $val",
+ (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
index 351a290..6d1114d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
@@ -11,12 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "ARMJITInfo.h"
-#include "ARM.h"
#include "ARMConstantPoolValue.h"
+#include "ARMMachineFunctionInfo.h"
#include "ARMRelocations.h"
-#include "ARMSubtarget.h"
+#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
@@ -26,6 +25,8 @@
#include <cstdlib>
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
void ARMJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
report_fatal_error("ARMJITInfo::replaceMachineCodeForFunction");
}
@@ -320,13 +321,13 @@ void ARMJITInfo::relocate(void *Function, MachineRelocation *MR,
break;
}
case ARM::reloc_arm_movw: {
- ResultPtr = ResultPtr & 0xFFFF;
+ ResultPtr = ResultPtr & 0xFFFF;
*((intptr_t*)RelocPos) |= ResultPtr & 0xFFF;
*((intptr_t*)RelocPos) |= ((ResultPtr >> 12) & 0xF) << 16;
break;
}
case ARM::reloc_arm_movt: {
- ResultPtr = (ResultPtr >> 16) & 0xFFFF;
+ ResultPtr = (ResultPtr >> 16) & 0xFFFF;
*((intptr_t*)RelocPos) |= ResultPtr & 0xFFF;
*((intptr_t*)RelocPos) |= ((ResultPtr >> 12) & 0xF) << 16;
break;
@@ -334,3 +335,10 @@ void ARMJITInfo::relocate(void *Function, MachineRelocation *MR,
}
}
}
+
+void ARMJITInfo::Initialize(const MachineFunction &MF, bool isPIC) {
+ const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ ConstPoolId2AddrMap.resize(AFI->getNumPICLabels());
+ JumpTableId2AddrMap.resize(AFI->getNumJumpTables());
+ IsPIC = isPIC;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
index 23a6a9b..27e2a20 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
@@ -14,7 +14,6 @@
#ifndef ARMJITINFO_H
#define ARMJITINFO_H
-#include "ARMMachineFunctionInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -53,45 +52,45 @@ namespace llvm {
/// overwriting OLD with a branch to NEW. This is used for self-modifying
/// code.
///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
+ void replaceMachineCodeForFunction(void *Old, void *New) override;
/// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object
/// to emit an indirect symbol which contains the address of the specified
/// ptr.
- virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
- JITCodeEmitter &JCE);
+ void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
+ JITCodeEmitter &JCE) override;
// getStubLayout - Returns the size and alignment of the largest call stub
// on ARM.
- virtual StubLayout getStubLayout();
+ StubLayout getStubLayout() override;
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
/// small native function that simply calls the function at the specified
/// address.
- virtual void *emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE);
+ void *emitFunctionStub(const Function* F, void *Fn,
+ JITCodeEmitter &JCE) override;
/// getLazyResolverFunction - Expose the lazy resolver to the JIT.
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
+ LazyResolverFn getLazyResolverFunction(JITCompilerFn) override;
/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase);
+ void relocate(void *Function, MachineRelocation *MR,
+ unsigned NumRelocs, unsigned char* GOTBase) override;
/// hasCustomConstantPool - Allows a target to specify that constant
/// pool address resolution is handled by the target.
- virtual bool hasCustomConstantPool() const { return true; }
+ bool hasCustomConstantPool() const override { return true; }
/// hasCustomJumpTables - Allows a target to specify that jumptables
/// are emitted by the target.
- virtual bool hasCustomJumpTables() const { return true; }
+ bool hasCustomJumpTables() const override { return true; }
/// allocateSeparateGVMemory - If true, globals should be placed in
/// separately allocated heap memory rather than in the same
/// code memory allocated by JITCodeEmitter.
- virtual bool allocateSeparateGVMemory() const {
+ bool allocateSeparateGVMemory() const override {
#ifdef __APPLE__
return true;
#else
@@ -103,12 +102,7 @@ namespace llvm {
/// Resize constant pool ids to CONSTPOOL_ENTRY addresses map; resize
/// jump table ids to jump table bases map; remember if codegen relocation
/// model is PIC.
- void Initialize(const MachineFunction &MF, bool isPIC) {
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- ConstPoolId2AddrMap.resize(AFI->getNumPICLabels());
- JumpTableId2AddrMap.resize(AFI->getNumJumpTables());
- IsPIC = isPIC;
- }
+ void Initialize(const MachineFunction &MF, bool isPIC);
/// getConstantPoolEntryAddr - The ARM target puts all constant
/// pool entries into constant islands. This returns the address of the
diff --git a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 61596d5..a03bcdb 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -12,12 +12,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-ldst-opt"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
+#include "ARMISelLowering.h"
#include "ARMMachineFunctionInfo.h"
+#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
+#include "Thumb1RegisterInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -36,12 +38,13 @@
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "arm-ldst-opt"
+
STATISTIC(NumLDMGened , "Number of ldm instructions generated");
STATISTIC(NumSTMGened , "Number of stm instructions generated");
STATISTIC(NumVLDMGened, "Number of vldm instructions generated");
@@ -65,13 +68,14 @@ namespace {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const ARMSubtarget *STI;
+ const TargetLowering *TL;
ARMFunctionInfo *AFI;
RegScavenger *RS;
- bool isThumb2;
+ bool isThumb1, isThumb2;
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM load / store optimization pass";
}
@@ -93,7 +97,10 @@ namespace {
void findUsesOfImpDef(SmallVectorImpl<MachineOperand *> &UsesOfImpDefs,
const MemOpQueue &MemOps, unsigned DefReg,
unsigned RangeBegin, unsigned RangeEnd);
-
+ void UpdateBaseRegUses(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc dl, unsigned Base, unsigned WordOffset,
+ ARMCC::CondCodes Pred, unsigned PredReg);
bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
int Offset, unsigned Base, bool BaseKill, int Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
@@ -119,7 +126,6 @@ namespace {
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned Scratch, MemOpQueue &MemOps,
SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
-
void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI);
@@ -159,6 +165,21 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::db: return ARM::STMDB;
case ARM_AM::ib: return ARM::STMIB;
}
+ case ARM::tLDRi:
+ // tLDMIA is writeback-only - unless the base register is in the input
+ // reglist.
+ ++NumLDMGened;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::tLDMIA;
+ }
+ case ARM::tSTRi:
+ // There is no non-writeback tSTMIA either.
+ ++NumSTMGened;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::tSTMIA_UPD;
+ }
case ARM::t2LDRi8:
case ARM::t2LDRi12:
++NumLDMGened;
@@ -217,6 +238,9 @@ AMSubMode getLoadStoreMultipleSubMode(int Opcode) {
case ARM::LDMIA_UPD:
case ARM::STMIA:
case ARM::STMIA_UPD:
+ case ARM::tLDMIA:
+ case ARM::tLDMIA_UPD:
+ case ARM::tSTMIA_UPD:
case ARM::t2LDMIA_RET:
case ARM::t2LDMIA:
case ARM::t2LDMIA_UPD:
@@ -263,12 +287,20 @@ AMSubMode getLoadStoreMultipleSubMode(int Opcode) {
} // end namespace ARM_AM
} // end namespace llvm
+static bool isT1i32Load(unsigned Opc) {
+ return Opc == ARM::tLDRi;
+}
+
static bool isT2i32Load(unsigned Opc) {
return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
}
static bool isi32Load(unsigned Opc) {
- return Opc == ARM::LDRi12 || isT2i32Load(Opc);
+ return Opc == ARM::LDRi12 || isT1i32Load(Opc) || isT2i32Load(Opc) ;
+}
+
+static bool isT1i32Store(unsigned Opc) {
+ return Opc == ARM::tSTRi;
}
static bool isT2i32Store(unsigned Opc) {
@@ -276,7 +308,102 @@ static bool isT2i32Store(unsigned Opc) {
}
static bool isi32Store(unsigned Opc) {
- return Opc == ARM::STRi12 || isT2i32Store(Opc);
+ return Opc == ARM::STRi12 || isT1i32Store(Opc) || isT2i32Store(Opc);
+}
+
+static unsigned getImmScale(unsigned Opc) {
+ switch (Opc) {
+ default: llvm_unreachable("Unhandled opcode!");
+ case ARM::tLDRi:
+ case ARM::tSTRi:
+ return 1;
+ case ARM::tLDRHi:
+ case ARM::tSTRHi:
+ return 2;
+ case ARM::tLDRBi:
+ case ARM::tSTRBi:
+ return 4;
+ }
+}
+
+/// Update future uses of the base register with the offset introduced
+/// due to writeback. This function only works on Thumb1.
+void
+ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc dl, unsigned Base,
+ unsigned WordOffset,
+ ARMCC::CondCodes Pred, unsigned PredReg) {
+ assert(isThumb1 && "Can only update base register uses for Thumb1!");
+
+ // Start updating any instructions with immediate offsets. Insert a sub before
+ // the first non-updateable instruction (if any).
+ for (; MBBI != MBB.end(); ++MBBI) {
+ if (MBBI->readsRegister(Base)) {
+ unsigned Opc = MBBI->getOpcode();
+ int Offset;
+ bool InsertSub = false;
+
+ if (Opc == ARM::tLDRi || Opc == ARM::tSTRi ||
+ Opc == ARM::tLDRHi || Opc == ARM::tSTRHi ||
+ Opc == ARM::tLDRBi || Opc == ARM::tSTRBi) {
+ // Loads and stores with immediate offsets can be updated, but only if
+ // the new offset isn't negative.
+ // The MachineOperand containing the offset immediate is the last one
+ // before predicates.
+ MachineOperand &MO =
+ MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
+ // The offsets are scaled by 1, 2 or 4 depending on the Opcode
+ Offset = MO.getImm() - WordOffset * getImmScale(Opc);
+ if (Offset >= 0)
+ MO.setImm(Offset);
+ else
+ InsertSub = true;
+
+ } else if (Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) {
+ // SUB/ADD using this register. Merge it with the update.
+ // If the merged offset is too large, insert a new sub instead.
+ MachineOperand &MO =
+ MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
+ Offset = (Opc == ARM::tSUBi8) ?
+ MO.getImm() + WordOffset * 4 :
+ MO.getImm() - WordOffset * 4 ;
+ if (TL->isLegalAddImmediate(Offset)) {
+ MO.setImm(Offset);
+ // The base register has now been reset, so exit early.
+ return;
+ } else {
+ InsertSub = true;
+ }
+
+ } else {
+ // Can't update the instruction.
+ InsertSub = true;
+ }
+
+ if (InsertSub) {
+ // An instruction above couldn't be updated, so insert a sub.
+ AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII->get(ARM::tSUBi8), Base))
+ .addReg(Base, getKillRegState(true)).addImm(WordOffset * 4)
+ .addImm(Pred).addReg(PredReg);
+ return;
+ }
+ }
+
+ if (MBBI->killsRegister(Base))
+ // Register got killed. Stop updating.
+ return;
+ }
+
+ // The end of the block was reached. This means register liveness escapes the
+ // block, and it's necessary to insert a sub before the last instruction.
+ if (MBB.succ_size() > 0)
+ // But only insert the SUB if there is actually a successor block.
+ // FIXME: Check more carefully if register is live at this point, e.g. by
+ // also examining the successor block's register liveness information.
+ AddDefaultT1CC(BuildMI(MBB, --MBBI, dl, TII->get(ARM::tSUBi8), Base))
+ .addReg(Base, getKillRegState(true)).addImm(WordOffset * 4)
+ .addImm(Pred).addReg(PredReg);
}
/// MergeOps - Create and insert a LDM or STM with Base as base register and
@@ -296,18 +423,19 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
return false;
ARM_AM::AMSubMode Mode = ARM_AM::ia;
- // VFP and Thumb2 do not support IB or DA modes.
+ // VFP and Thumb2 do not support IB or DA modes. Thumb1 only supports IA.
bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
- bool haveIBAndDA = isNotVFP && !isThumb2;
- if (Offset == 4 && haveIBAndDA)
+ bool haveIBAndDA = isNotVFP && !isThumb2 && !isThumb1;
+
+ if (Offset == 4 && haveIBAndDA) {
Mode = ARM_AM::ib;
- else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA)
+ } else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA) {
Mode = ARM_AM::da;
- else if (Offset == -4 * (int)NumRegs && isNotVFP)
+ } else if (Offset == -4 * (int)NumRegs && isNotVFP && !isThumb1) {
// VLDM/VSTM do not support DB mode without also updating the base reg.
Mode = ARM_AM::db;
- else if (Offset != 0) {
- // Check if this is a supported opcode before we insert instructions to
+ } else if (Offset != 0) {
+ // Check if this is a supported opcode before inserting instructions to
// calculate a new base register.
if (!getLoadStoreMultipleOpcode(Opcode, Mode)) return false;
@@ -318,41 +446,98 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
return false;
unsigned NewBase;
- if (isi32Load(Opcode))
+ if (isi32Load(Opcode)) {
// If it is a load, then just use one of the destination register to
// use as the new base.
NewBase = Regs[NumRegs-1].first;
- else {
+ } else {
// Use the scratch register to use as a new base.
NewBase = Scratch;
if (NewBase == 0)
return false;
}
- int BaseOpc = !isThumb2 ? ARM::ADDri : ARM::t2ADDri;
+
+ int BaseOpc =
+ isThumb2 ? ARM::t2ADDri :
+ isThumb1 ? ARM::tADDi8 : ARM::ADDri;
+
if (Offset < 0) {
- BaseOpc = !isThumb2 ? ARM::SUBri : ARM::t2SUBri;
+ BaseOpc =
+ isThumb2 ? ARM::t2SUBri :
+ isThumb1 ? ARM::tSUBi8 : ARM::SUBri;
Offset = - Offset;
}
- int ImmedOffset = isThumb2
- ? ARM_AM::getT2SOImmVal(Offset) : ARM_AM::getSOImmVal(Offset);
- if (ImmedOffset == -1)
- // FIXME: Try t2ADDri12 or t2SUBri12?
- return false; // Probably not worth it then.
-
- BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
- .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
- .addImm(Pred).addReg(PredReg).addReg(0);
+
+ if (!TL->isLegalAddImmediate(Offset))
+ // FIXME: Try add with register operand?
+ return false; // Probably not worth it then.
+
+ if (isThumb1) {
+ if (Base != NewBase) {
+ // Need to insert a MOV to the new base first.
+ // FIXME: If the immediate fits in 3 bits, use ADD instead.
+ BuildMI(MBB, MBBI, dl, TII->get(ARM::tMOVr), NewBase)
+ .addReg(Base, getKillRegState(BaseKill))
+ .addImm(Pred).addReg(PredReg);
+ }
+ AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase))
+ .addReg(NewBase, getKillRegState(true)).addImm(Offset)
+ .addImm(Pred).addReg(PredReg);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
+ .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
+ .addImm(Pred).addReg(PredReg).addReg(0);
+ }
+
Base = NewBase;
- BaseKill = true; // New base is always killed right its use.
+ BaseKill = true; // New base is always killed straight away.
}
bool isDef = (isi32Load(Opcode) || Opcode == ARM::VLDRS ||
Opcode == ARM::VLDRD);
+
+ // Get LS multiple opcode. Note that for Thumb1 this might be an opcode with
+ // base register writeback.
Opcode = getLoadStoreMultipleOpcode(Opcode, Mode);
if (!Opcode) return false;
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(Opcode))
- .addReg(Base, getKillRegState(BaseKill))
- .addImm(Pred).addReg(PredReg);
+
+ bool Writeback = isThumb1; // Thumb1 LDM/STM have base reg writeback.
+
+ // Exception: If the base register is in the input reglist, Thumb1 LDM is
+ // non-writeback. Check for this.
+ if (Opcode == ARM::tLDMIA && isThumb1)
+ for (unsigned I = 0; I < NumRegs; ++I)
+ if (Base == Regs[I].first) {
+ Writeback = false;
+ break;
+ }
+
+ MachineInstrBuilder MIB;
+
+ if (Writeback) {
+ if (Opcode == ARM::tLDMIA)
+ // Update tLDMIA with writeback if necessary.
+ Opcode = ARM::tLDMIA_UPD;
+
+ MIB = BuildMI(MBB, MBBI, dl, TII->get(Opcode));
+
+ // Thumb1: we might need to set base writeback when building the MI.
+ MIB.addReg(Base, getDefRegState(true))
+ .addReg(Base, getKillRegState(BaseKill));
+
+ // The base isn't dead after a merged instruction with writeback. Update
+ // future uses of the base with the added offset (if possible), or reset
+ // the base register as necessary.
+ if (!BaseKill)
+ UpdateBaseRegUses(MBB, MBBI, dl, Base, NumRegs, Pred, PredReg);
+ } else {
+ // No writeback, simply build the MachineInstr.
+ MIB = BuildMI(MBB, MBBI, dl, TII->get(Opcode));
+ MIB.addReg(Base, getKillRegState(BaseKill));
+ }
+
+ MIB.addImm(Pred).addReg(PredReg);
+
for (unsigned i = 0; i != NumRegs; ++i)
MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
| getKillRegState(Regs[i].second));
@@ -484,7 +669,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
return;
// Merge succeeded, update records.
- Merges.push_back(prior(Loc));
+ Merges.push_back(std::prev(Loc));
// In gathering loads together, we may have moved the imp-def of a register
// past one of its uses. This is OK, since we know better than the rest of
@@ -492,7 +677,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
// affected uses.
for (SmallVectorImpl<MachineOperand *>::iterator I = UsesOfImpDefs.begin(),
E = UsesOfImpDefs.end();
- I != E; ++I)
+ I != E; ++I)
(*I)->setIsUndef();
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
@@ -589,7 +774,6 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
MergeOpsUpdate(MBB, MemOps, SIndex, MemOps.size(), insertAfter, SOffset,
Base, BaseKill, Opcode, Pred, PredReg, Scratch, dl, Merges);
- return;
}
static bool definesCPSR(MachineInstr *MI) {
@@ -616,6 +800,7 @@ static bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
bool CheckCPSRDef = false;
switch (MI->getOpcode()) {
default: return false;
+ case ARM::tSUBi8:
case ARM::t2SUBri:
case ARM::SUBri:
CheckCPSRDef = true;
@@ -628,10 +813,11 @@ static bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
if (Bytes == 0 || (Limit && Bytes >= Limit))
return false;
- unsigned Scale = (MI->getOpcode() == ARM::tSUBspi) ? 4 : 1; // FIXME
+ unsigned Scale = (MI->getOpcode() == ARM::tSUBspi ||
+ MI->getOpcode() == ARM::tSUBi8) ? 4 : 1; // FIXME
if (!(MI->getOperand(0).getReg() == Base &&
MI->getOperand(1).getReg() == Base &&
- (MI->getOperand(2).getImm()*Scale) == Bytes &&
+ (MI->getOperand(2).getImm() * Scale) == Bytes &&
getInstrPredicate(MI, MyPredReg) == Pred &&
MyPredReg == PredReg))
return false;
@@ -649,6 +835,7 @@ static bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
bool CheckCPSRDef = false;
switch (MI->getOpcode()) {
default: return false;
+ case ARM::tADDi8:
case ARM::t2ADDri:
case ARM::ADDri:
CheckCPSRDef = true;
@@ -661,10 +848,11 @@ static bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
// Make sure the offset fits in 8 bits.
return false;
- unsigned Scale = (MI->getOpcode() == ARM::tADDspi) ? 4 : 1; // FIXME
+ unsigned Scale = (MI->getOpcode() == ARM::tADDspi ||
+ MI->getOpcode() == ARM::tADDi8) ? 4 : 1; // FIXME
if (!(MI->getOperand(0).getReg() == Base &&
MI->getOperand(1).getReg() == Base &&
- (MI->getOperand(2).getImm()*Scale) == Bytes &&
+ (MI->getOperand(2).getImm() * Scale) == Bytes &&
getInstrPredicate(MI, MyPredReg) == Pred &&
MyPredReg == PredReg))
return false;
@@ -677,6 +865,8 @@ static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
default: return 0;
case ARM::LDRi12:
case ARM::STRi12:
+ case ARM::tLDRi:
+ case ARM::tSTRi:
case ARM::t2LDRi8:
case ARM::t2LDRi12:
case ARM::t2STRi8:
@@ -695,6 +885,9 @@ static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
case ARM::STMDA:
case ARM::STMDB:
case ARM::STMIB:
+ case ARM::tLDMIA:
+ case ARM::tLDMIA_UPD:
+ case ARM::tSTMIA_UPD:
case ARM::t2LDMIA:
case ARM::t2LDMDB:
case ARM::t2STMIA:
@@ -791,6 +984,9 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
bool &Advance,
MachineBasicBlock::iterator &I) {
+ // Thumb1 is already using updating loads/stores.
+ if (isThumb1) return false;
+
MachineInstr *MI = MBBI;
unsigned Base = MI->getOperand(0).getReg();
bool BaseKill = MI->getOperand(0).isKill();
@@ -812,7 +1008,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
// Try merging with the previous instruction.
MachineBasicBlock::iterator BeginMBBI = MBB.begin();
if (MBBI != BeginMBBI) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
--PrevMBBI;
if (Mode == ARM_AM::ia &&
@@ -831,7 +1027,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
// Try merging with the next instruction.
MachineBasicBlock::iterator EndMBBI = MBB.end();
if (!DoMerge && MBBI != EndMBBI) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
+ MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
++NextMBBI;
if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
@@ -927,6 +1123,10 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
const TargetInstrInfo *TII,
bool &Advance,
MachineBasicBlock::iterator &I) {
+ // Thumb1 doesn't have updating LDR/STR.
+ // FIXME: Use LDM/STM with single register instead.
+ if (isThumb1) return false;
+
MachineInstr *MI = MBBI;
unsigned Base = MI->getOperand(1).getReg();
bool BaseKill = MI->getOperand(1).isKill();
@@ -959,7 +1159,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
// Try merging with the previous instruction.
MachineBasicBlock::iterator BeginMBBI = MBB.begin();
if (MBBI != BeginMBBI) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
--PrevMBBI;
if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) {
@@ -978,7 +1178,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
// Try merging with the next instruction.
MachineBasicBlock::iterator EndMBBI = MBB.end();
if (!DoMerge && MBBI != EndMBBI) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
+ MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
++NextMBBI;
if (!isAM5 &&
@@ -1002,7 +1202,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
return false;
if (isAM5) {
- // VLDM[SD}_UPD, VSTM[SD]_UPD
+ // VLDM[SD]_UPD, VSTM[SD]_UPD
// (There are no base-updating versions of VLDR/VSTR instructions, but the
// updating load/store-multiple instructions can be used with only one
// register.)
@@ -1100,6 +1300,8 @@ static bool isMemoryOp(const MachineInstr *MI) {
return MI->getOperand(1).isReg();
case ARM::LDRi12:
case ARM::STRi12:
+ case ARM::tLDRi:
+ case ARM::tSTRi:
case ARM::t2LDRi8:
case ARM::t2LDRi12:
case ARM::t2STRi8:
@@ -1122,7 +1324,7 @@ void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
}
if (Loc != MBB.begin())
- RS->forward(prior(Loc));
+ RS->forward(std::prev(Loc));
}
static int getMemoryOpOffset(const MachineInstr *MI) {
@@ -1137,6 +1339,10 @@ static int getMemoryOpOffset(const MachineInstr *MI) {
Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
return OffField;
+ // Thumb1 immediate offsets are scaled by 4
+ if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi)
+ return OffField * 4;
+
int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
: ARM_AM::getAM5Offset(OffField) * 4;
if (isAM3) {
@@ -1232,7 +1438,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
++NumSTRD2STM;
}
- NewBBI = llvm::prior(MBBI);
+ NewBBI = std::prev(MBBI);
} else {
// Split into two instructions.
unsigned NewOpc = (isLd)
@@ -1254,7 +1460,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
OddReg, OddDeadKill, false,
BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
- NewBBI = llvm::prior(MBBI);
+ NewBBI = std::prev(MBBI);
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, false,
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
@@ -1274,7 +1480,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
EvenReg, EvenDeadKill, EvenUndef,
BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
- NewBBI = llvm::prior(MBBI);
+ NewBBI = std::prev(MBBI);
InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2,
OddReg, OddDeadKill, OddUndef,
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
@@ -1408,18 +1614,22 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
if (MBBI == E)
// Reach the end of the block, try merging the memory instructions.
TryMerge = true;
- } else
+ } else {
TryMerge = true;
+ }
if (TryMerge) {
if (NumMemOps > 1) {
// Try to find a free register to use as a new base in case it's needed.
// First advance to the instruction just before the start of the chain.
AdvanceRS(MBB, MemOps);
+
// Find a scratch register.
- unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass);
+ unsigned Scratch =
+ RS->FindUnusedReg(isThumb1 ? &ARM::tGPRRegClass : &ARM::GPRRegClass);
+
// Process the load / store instructions.
- RS->forward(prior(MBBI));
+ RS->forward(std::prev(MBBI));
// Merge ops.
Merges.clear();
@@ -1441,13 +1651,13 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
++NumMerges;
// RS may be pointing to an instruction that's deleted.
- RS->skipTo(prior(MBBI));
+ RS->skipTo(std::prev(MBBI));
} else if (NumMemOps == 1) {
// Try folding preceding/trailing base inc/dec into the single
// load/store.
if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
++NumMerges;
- RS->forward(prior(MBBI));
+ RS->forward(std::prev(MBBI));
}
}
@@ -1483,6 +1693,8 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
/// =>
/// ldmfd sp!, {..., pc}
bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
+ // Thumb1 LDM doesn't allow high registers.
+ if (isThumb1) return false;
if (MBB.empty()) return false;
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
@@ -1490,7 +1702,7 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
(MBBI->getOpcode() == ARM::BX_RET ||
MBBI->getOpcode() == ARM::tBX_RET ||
MBBI->getOpcode() == ARM::MOVPCLR)) {
- MachineInstr *PrevMI = prior(MBBI);
+ MachineInstr *PrevMI = std::prev(MBBI);
unsigned Opcode = PrevMI->getOpcode();
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
@@ -1513,12 +1725,20 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
const TargetMachine &TM = Fn.getTarget();
+ TL = TM.getTargetLowering();
AFI = Fn.getInfo<ARMFunctionInfo>();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
STI = &TM.getSubtarget<ARMSubtarget>();
RS = new RegScavenger();
isThumb2 = AFI->isThumb2Function();
+ isThumb1 = AFI->isThumbFunction() && !isThumb2;
+
+ // FIXME: Temporarily disabling for Thumb-1 due to miscompiles
+ if (isThumb1) {
+ delete RS;
+ return false;
+ }
bool Modified = false;
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
@@ -1550,9 +1770,9 @@ namespace {
MachineRegisterInfo *MRI;
MachineFunction *MF;
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM pre- register allocation load / store optimization pass";
}
@@ -1666,11 +1886,11 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
// FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
unsigned Scale = 1;
unsigned Opcode = Op0->getOpcode();
- if (Opcode == ARM::LDRi12)
+ if (Opcode == ARM::LDRi12) {
NewOpc = ARM::LDRD;
- else if (Opcode == ARM::STRi12)
+ } else if (Opcode == ARM::STRi12) {
NewOpc = ARM::STRD;
- else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
+ } else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
NewOpc = ARM::t2LDRDi8;
Scale = 4;
isT2 = true;
@@ -1678,8 +1898,9 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
NewOpc = ARM::t2STRDi8;
Scale = 4;
isT2 = true;
- } else
+ } else {
return false;
+ }
// Make sure the base address satisfies i64 ld / st alignment requirement.
// At the moment, we ignore the memoryoperand's value.
@@ -1724,17 +1945,6 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
return true;
}
-namespace {
- struct OffsetCompare {
- bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
- int LOffset = getMemoryOpOffset(LHS);
- int ROffset = getMemoryOpOffset(RHS);
- assert(LHS == RHS || LOffset != ROffset);
- return LOffset > ROffset;
- }
- };
-}
-
bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
SmallVectorImpl<MachineInstr *> &Ops,
unsigned Base, bool isLd,
@@ -1742,7 +1952,13 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
bool RetVal = false;
// Sort by offset (in reverse order).
- std::sort(Ops.begin(), Ops.end(), OffsetCompare());
+ std::sort(Ops.begin(), Ops.end(),
+ [](const MachineInstr *LHS, const MachineInstr *RHS) {
+ int LOffset = getMemoryOpOffset(LHS);
+ int ROffset = getMemoryOpOffset(RHS);
+ assert(LHS == RHS || LOffset != ROffset);
+ return LOffset > ROffset;
+ });
// The loads / stores of the same base are in order. Scan them from first to
// last and check for the following:
@@ -1751,8 +1967,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
while (Ops.size() > 1) {
unsigned FirstLoc = ~0U;
unsigned LastLoc = 0;
- MachineInstr *FirstOp = 0;
- MachineInstr *LastOp = 0;
+ MachineInstr *FirstOp = nullptr;
+ MachineInstr *LastOp = nullptr;
int LastOffset = 0;
unsigned LastOpcode = 0;
unsigned LastBytes = 0;
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp b/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
index e12c9c6..023f5f8 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
@@ -14,25 +14,27 @@
#include "ARM.h"
#include "ARMAsmPrinter.h"
+#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
MCOperand ARMAsmPrinter::GetSymbolRef(const MachineOperand &MO,
const MCSymbol *Symbol) {
const MCExpr *Expr;
- switch (MO.getTargetFlags()) {
+ unsigned Option = MO.getTargetFlags() & ARMII::MO_OPTION_MASK;
+ switch (Option) {
default: {
Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
OutContext);
- switch (MO.getTargetFlags()) {
+ switch (Option) {
default: llvm_unreachable("Unknown target flag on symbol operand");
- case 0:
+ case ARMII::MO_NO_FLAG:
break;
case ARMII::MO_LO16:
Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
@@ -49,7 +51,7 @@ MCOperand ARMAsmPrinter::GetSymbolRef(const MachineOperand &MO,
}
case ARMII::MO_PLT:
- Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_ARM_PLT,
+ Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_PLT,
OutContext);
break;
}
@@ -81,9 +83,11 @@ bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
MO.getMBB()->getSymbol(), OutContext));
break;
- case MachineOperand::MO_GlobalAddress:
- MCOp = GetSymbolRef(MO, getSymbol(MO.getGlobal()));
+ case MachineOperand::MO_GlobalAddress: {
+ MCOp = GetSymbolRef(MO,
+ GetARMGVSymbol(MO.getGlobal(), MO.getTargetFlags()));
break;
+ }
case MachineOperand::MO_ExternalSymbol:
MCOp = GetSymbolRef(MO,
GetExternalSymbolSymbol(MO.getSymbolName()));
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp
index af445e2..892b269 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp
@@ -12,3 +12,13 @@
using namespace llvm;
void ARMFunctionInfo::anchor() { }
+
+ARMFunctionInfo::ARMFunctionInfo(MachineFunction &MF)
+ : isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
+ hasThumb2(MF.getTarget().getSubtarget<ARMSubtarget>().hasThumb2()),
+ StByValParamsPadding(0), ArgRegsSaveSize(0), HasStackFrame(false),
+ RestoreSPFromFP(false), LRSpilledForFarJump(false),
+ FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
+ GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0), JumpTableUId(0),
+ PICLabelUId(0), VarArgsFrameIndex(0), HasITBlocks(false),
+ GlobalBaseReg(0) {}
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index 010edf3..d3fabc3 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/DenseMap.h"
namespace llvm {
@@ -38,7 +39,7 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// StByValParamsPadding - For parameter that is split between
/// GPRs and memory; while recovering GPRs part, when
- /// StackAlignment == 8, and GPRs-part-size mod 8 != 0,
+ /// StackAlignment > 4, and GPRs-part-size mod StackAlignment != 0,
/// we need to insert gap before parameter start address. It allows to
/// "attach" GPR-part to the part that was passed via stack.
unsigned StByValParamsPadding;
@@ -114,6 +115,14 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// relocation models.
unsigned GlobalBaseReg;
+ /// ArgumentStackSize - amount of bytes on stack consumed by the arguments
+ /// being passed on the stack
+ unsigned ArgumentStackSize;
+
+ /// CoalescedWeights - mapping of basic blocks to the rolling counter of
+ /// coalesced weights.
+ DenseMap<const MachineBasicBlock*, unsigned> CoalescedWeights;
+
public:
ARMFunctionInfo() :
isThumb(false),
@@ -126,16 +135,7 @@ public:
JumpTableUId(0), PICLabelUId(0),
VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {}
- explicit ARMFunctionInfo(MachineFunction &MF) :
- isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
- hasThumb2(MF.getTarget().getSubtarget<ARMSubtarget>().hasThumb2()),
- StByValParamsPadding(0),
- ArgRegsSaveSize(0), HasStackFrame(false), RestoreSPFromFP(false),
- LRSpilledForFarJump(false),
- FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
- GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
- JumpTableUId(0), PICLabelUId(0),
- VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {}
+ explicit ARMFunctionInfo(MachineFunction &MF);
bool isThumbFunction() const { return isThumb; }
bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
@@ -182,6 +182,9 @@ public:
void setGPRCalleeSavedArea2Size(unsigned s) { GPRCS2Size = s; }
void setDPRCalleeSavedAreaSize(unsigned s) { DPRCSSize = s; }
+ unsigned getArgumentStackSize() const { return ArgumentStackSize; }
+ void setArgumentStackSize(unsigned size) { ArgumentStackSize = size; }
+
unsigned createJumpTableUId() {
return JumpTableUId++;
}
@@ -213,7 +216,7 @@ public:
void recordCPEClone(unsigned CPIdx, unsigned CPCloneIdx) {
if (!CPEClones.insert(std::make_pair(CPCloneIdx, CPIdx)).second)
- assert(0 && "Duplicate entries!");
+ llvm_unreachable("Duplicate entries!");
}
unsigned getOriginalCPIdx(unsigned CloneIdx) const {
@@ -223,6 +226,15 @@ public:
else
return -1U;
}
+
+ DenseMap<const MachineBasicBlock*, unsigned>::iterator getCoalescedWeight(
+ MachineBasicBlock* MBB) {
+ auto It = CoalescedWeights.find(MBB);
+ if (It == CoalescedWeights.end()) {
+ It = CoalescedWeights.insert(std::make_pair(MBB, 0)).first;
+ }
+ return It;
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMOptimizeBarriersPass.cpp b/contrib/llvm/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
new file mode 100644
index 0000000..2a49255
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
@@ -0,0 +1,101 @@
+//===-- ARMOptimizeBarriersPass - two DMBs without a memory access in between,
+//removed one -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===------------------------------------------------------------------------------------------===//
+
+#include "ARM.h"
+#include "ARMMachineFunctionInfo.h"
+#include "ARMInstrInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "double barriers"
+
+STATISTIC(NumDMBsRemoved, "Number of DMBs removed");
+
+namespace {
+class ARMOptimizeBarriersPass : public MachineFunctionPass {
+public:
+ static char ID;
+ ARMOptimizeBarriersPass() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ const char *getPassName() const override {
+ return "optimise barriers pass";
+ }
+
+private:
+};
+char ARMOptimizeBarriersPass::ID = 0;
+}
+
+// Returns whether the instruction can safely move past a DMB instruction
+// The current implementation allows this iif MI does not have any possible
+// memory access
+static bool CanMovePastDMB(const MachineInstr *MI) {
+ return !(MI->mayLoad() ||
+ MI->mayStore() ||
+ MI->hasUnmodeledSideEffects() ||
+ MI->isCall() ||
+ MI->isReturn());
+}
+
+bool ARMOptimizeBarriersPass::runOnMachineFunction(MachineFunction &MF) {
+ // Vector to store the DMBs we will remove after the first iteration
+ std::vector<MachineInstr *> ToRemove;
+ // DMBType is the Imm value of the first operand. It determines whether it's a
+ // DMB ish, dmb sy, dmb osh, etc
+ int64_t DMBType = -1;
+
+ // Find a dmb. If we can move it until the next dmb, tag the second one for
+ // removal
+ for (auto &MBB : MF) {
+ // Will be true when we have seen a DMB, and not seen any instruction since
+ // that cannot move past a DMB
+ bool IsRemovableNextDMB = false;
+ for (auto &MI : MBB) {
+ if (MI.getOpcode() == ARM::DMB) {
+ if (IsRemovableNextDMB) {
+ // If the Imm of this DMB is the same as that of the last DMB, we can
+ // tag this second DMB for removal
+ if (MI.getOperand(0).getImm() == DMBType) {
+ ToRemove.push_back(&MI);
+ } else {
+ // If it has a different DMBType, we cannot remove it, but will scan
+ // for the next DMB, recording this DMB's type as last seen DMB type
+ DMBType = MI.getOperand(0).getImm();
+ }
+ } else {
+ // After we see a DMB, a next one is removable
+ IsRemovableNextDMB = true;
+ DMBType = MI.getOperand(0).getImm();
+ }
+ } else if (!CanMovePastDMB(&MI)) {
+ // If we find an instruction unable to pass past a DMB, a next DMB is
+ // not removable
+ IsRemovableNextDMB = false;
+ }
+ }
+ }
+ // Remove the tagged DMB
+ for (auto MI : ToRemove) {
+ MI->eraseFromParent();
+ ++NumDMBsRemoved;
+ }
+
+ return NumDMBsRemoved > 0;
+}
+
+/// createARMOptimizeBarriersPass - Returns an instance of the remove double
+/// barriers
+/// pass.
+FunctionPass *llvm::createARMOptimizeBarriersPass() {
+ return new ARMOptimizeBarriersPass();
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
index a788036..80b4b48 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -12,8 +12,6 @@
//===----------------------------------------------------------------------===//
#include "ARMRegisterInfo.h"
-#include "ARM.h"
-#include "ARMBaseInstrInfo.h"
using namespace llvm;
void ARMRegisterInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h
index fb1537c..3e6af3f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h
@@ -14,9 +14,7 @@
#ifndef ARMREGISTERINFO_H
#define ARMREGISTERINFO_H
-#include "ARM.h"
#include "ARMBaseRegisterInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
index d045761..b290e7f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -116,13 +116,13 @@ def D15 : ARMReg<15, "d15", [S30, S31]>, DwarfRegNum<[271]>;
}
// VFP3 defines 16 additional double registers
-def D16 : ARMFReg<16, "d16">, DwarfRegNum<[272]>;
+def D16 : ARMFReg<16, "d16">, DwarfRegNum<[272]>;
def D17 : ARMFReg<17, "d17">, DwarfRegNum<[273]>;
def D18 : ARMFReg<18, "d18">, DwarfRegNum<[274]>;
def D19 : ARMFReg<19, "d19">, DwarfRegNum<[275]>;
def D20 : ARMFReg<20, "d20">, DwarfRegNum<[276]>;
def D21 : ARMFReg<21, "d21">, DwarfRegNum<[277]>;
-def D22 : ARMFReg<22, "d22">, DwarfRegNum<[278]>;
+def D22 : ARMFReg<22, "d22">, DwarfRegNum<[278]>;
def D23 : ARMFReg<23, "d23">, DwarfRegNum<[279]>;
def D24 : ARMFReg<24, "d24">, DwarfRegNum<[280]>;
def D25 : ARMFReg<25, "d25">, DwarfRegNum<[281]>;
@@ -158,11 +158,11 @@ def Q15 : ARMReg<15, "q15", [D30, D31]>;
// Current Program Status Register.
// We model fpscr with two registers: FPSCR models the control bits and will be
// reserved. FPSCR_NZCV models the flag bits and will be unreserved. APSR_NZCV
-// models the APSR when it's accessed by some special instructions. In such cases
+// models the APSR when it's accessed by some special instructions. In such cases
// it has the same encoding as PC.
def CPSR : ARMReg<0, "cpsr">;
def APSR : ARMReg<1, "apsr">;
-def APSR_NZCV : ARMReg<15, "apsr_nzcv">;
+def APSR_NZCV : ARMReg<15, "apsr_nzcv">;
def SPSR : ARMReg<2, "spsr">;
def FPSCR : ARMReg<3, "fpscr">;
def FPSCR_NZCV : ARMReg<3, "fpscr_nzcv"> {
@@ -214,7 +214,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> {
}
// GPRs without the PC but with APSR. Some instructions allow accessing the
-// APSR, while actually encoding PC in the register field. This is usefull
+// APSR, while actually encoding PC in the register field. This is useful
// for assembly and disassembly only.
def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> {
let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
index 603e775..9a1d222 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -1894,16 +1894,26 @@ def CortexA9Model : SchedMachineModel {
let MispredictPenalty = 8; // Based on estimate of pipeline depth.
let Itineraries = CortexA9Itineraries;
+
+ // FIXME: Many vector operations were never given an itinerary. We
+ // haven't mapped these to the new model either.
+ let CompleteModel = 0;
}
//===----------------------------------------------------------------------===//
// Define each kind of processor resource and number available.
+//
+// The AGU unit has BufferSize=1 so that the latency between operations
+// that use it are considered to stall other operations.
+//
+// The FP unit has BufferSize=0 so that it is a hard dispatch
+// hazard. No instruction may be dispatched while the unit is reserved.
let SchedModel = CortexA9Model in {
def A9UnitALU : ProcResource<2>;
def A9UnitMul : ProcResource<1> { let Super = A9UnitALU; }
-def A9UnitAGU : ProcResource<1>;
+def A9UnitAGU : ProcResource<1> { let BufferSize = 1; }
def A9UnitLS : ProcResource<1>;
def A9UnitFP : ProcResource<1> { let BufferSize = 0; }
def A9UnitB : ProcResource<1>;
@@ -2217,7 +2227,7 @@ def A9WriteLMfp : SchedWriteVariant<[
SchedVar<A9PostRA, [A9WriteLMfpPostRA]>]>;
//===----------------------------------------------------------------------===//
-// Resources for other (non LDM/VLDM) Variants.
+// Resources for other (non-LDM/VLDM) Variants.
// These mov immediate writers are unconditionally expanded with
// additive latency.
@@ -2397,6 +2407,7 @@ def :ItinRW<[A9WriteV3, A9Read2], [IIC_VSUBiD, IIC_VSUBiQ, IIC_VCNTiD]>;
// ...
// VHADD/VRHADD/VQADD/VTST/VADH/VRADH
def :ItinRW<[A9WriteV4, A9Read2, A9Read2], [IIC_VBINi4D, IIC_VBINi4Q]>;
+
// VSBH/VRSBH/VHSUB/VQSUB/VABD/VCEQ/VCGE/VCGT/VMAX/VMIN/VPMAX/VPMIN/VABDL
def :ItinRW<[A9WriteV4, A9Read2], [IIC_VSUBi4D, IIC_VSUBi4Q]>;
// VQNEG/VQABS
@@ -2431,7 +2442,7 @@ def :ItinRW<[A9WriteV3], [IIC_VSHLiD, IIC_VSHLiQ]>;
def :ItinRW<[A9WriteV4], [IIC_VSHLi4D, IIC_VSHLi4Q]>;
// NEON permute
-def :ItinRW<[A9WriteV2], [IIC_VPERMD, IIC_VPERMQ, IIC_VEXTD]>;
+def :ItinRW<[A9WriteV2, A9WriteV2], [IIC_VPERMD, IIC_VPERMQ, IIC_VEXTD]>;
def :ItinRW<[A9WriteV3, A9WriteV4, ReadDefault, A9Read2],
[IIC_VPERMQ3, IIC_VEXTQ]>;
def :ItinRW<[A9WriteV3, A9Read2], [IIC_VTB1]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td b/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td
index 8d7dbc2..b03d5ff 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleSwift.td
@@ -1721,7 +1721,7 @@ let SchedModel = SwiftModel in {
SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
SwiftWriteLM13CyNo, SwiftWriteP01OneCycle,
SwiftVLDMPerm3]>,
- // Load of a Q register (not neccessarily true). We should not be mapping to
+ // Load of a Q register (not necessarily true). We should not be mapping to
// 4 S registers, either.
SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo,
SwiftWriteLM4CyNo, SwiftWriteLM4CyNo]>,
@@ -1858,7 +1858,7 @@ let SchedModel = SwiftModel in {
// Assume 5 D registers.
SchedVar<SwiftLMAddr10Pred, [SwiftWriteSTM6]>,
SchedVar<SwiftLMAddr11Pred, [SwiftWriteSTM12]>,
- // Asume three Q registers.
+ // Assume three Q registers.
SchedVar<SwiftLMAddr12Pred, [SwiftWriteSTM4]>,
SchedVar<SwiftLMAddr13Pred, [SwiftWriteSTM14]>,
// Assume 7 D registers.
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
index 0ace9bc..57d0bfb 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
@@ -93,7 +93,7 @@ def ARMV6Itineraries : ProcessorItineraries<
InstrItinData<IIC_iMAC32 , [InstrStage<2, [V6_Pipe]>], [5, 1, 1, 2]>,
InstrItinData<IIC_iMUL64 , [InstrStage<3, [V6_Pipe]>], [6, 1, 1]>,
InstrItinData<IIC_iMAC64 , [InstrStage<3, [V6_Pipe]>], [6, 1, 1, 2]>,
-
+
// Integer load pipeline
//
// Immediate offset
@@ -181,7 +181,7 @@ def ARMV6Itineraries : ProcessorItineraries<
//
// Store multiple + update
InstrItinData<IIC_iStore_mu , [InstrStage<3, [V6_Pipe]>], [2]>,
-
+
// Branch
//
// no delay slots, so the latency of a branch is unimportant
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 93add6e..3dcc0df 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -11,16 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-selectiondag-info"
#include "ARMTargetMachine.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/DerivedTypes.h"
using namespace llvm;
-ARMSelectionDAGInfo::ARMSelectionDAGInfo(const TargetMachine &TM)
- : TargetSelectionDAGInfo(TM),
- Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
-}
+#define DEBUG_TYPE "arm-selectiondag-info"
+
+ARMSelectionDAGInfo::ARMSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
ARMSelectionDAGInfo::~ARMSelectionDAGInfo() {
}
@@ -33,6 +32,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
+ const ARMSubtarget &Subtarget = DAG.getTarget().getSubtarget<ARMSubtarget>();
// Do repeated 4-byte loads and stores. To be improved.
// This requires 4-byte alignment.
if ((Align & 3) != 0)
@@ -43,7 +43,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
if (!ConstantSize)
return SDValue();
uint64_t SizeVal = ConstantSize->getZExtValue();
- if (!AlwaysInline && SizeVal > Subtarget->getMaxInlineSizeThreshold())
+ if (!AlwaysInline && SizeVal > Subtarget.getMaxInlineSizeThreshold())
return SDValue();
unsigned BytesLeft = SizeVal & 3;
@@ -52,9 +52,10 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
EVT VT = MVT::i32;
unsigned VTSize = 4;
unsigned i = 0;
- const unsigned MAX_LOADS_IN_LDM = 6;
- SDValue TFOps[MAX_LOADS_IN_LDM];
- SDValue Loads[MAX_LOADS_IN_LDM];
+ // Emit a maximum of 4 loads in Thumb1 since we have fewer registers
+ const unsigned MAX_LOADS_IN_LDM = Subtarget.isThumb1Only() ? 4 : 6;
+ SDValue TFOps[6];
+ SDValue Loads[6];
uint64_t SrcOff = 0, DstOff = 0;
// Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
@@ -71,7 +72,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ makeArrayRef(TFOps, i));
for (i = 0;
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
@@ -82,7 +84,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
isVolatile, false, 0);
DstOff += VTSize;
}
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ makeArrayRef(TFOps, i));
EmittedNumMemOps += i;
}
@@ -112,7 +115,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SrcOff += VTSize;
BytesLeft -= VTSize;
}
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ makeArrayRef(TFOps, i));
i = 0;
BytesLeft = BytesLeftSave;
@@ -133,7 +137,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
DstOff += VTSize;
BytesLeft -= VTSize;
}
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ makeArrayRef(TFOps, i));
}
// Adjust parameters for memset, EABI uses format (ptr, size, value),
@@ -145,8 +150,10 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
- // Use default for non AAPCS (or Darwin) subtargets
- if (!Subtarget->isAAPCS_ABI() || Subtarget->isTargetDarwin())
+ const ARMSubtarget &Subtarget = DAG.getTarget().getSubtarget<ARMSubtarget>();
+ // Use default for non-AAPCS (or MachO) subtargets
+ if (!Subtarget.isAAPCS_ABI() || Subtarget.isTargetMachO() ||
+ Subtarget.isTargetWindows())
return SDValue();
const ARMTargetLowering &TLI =
@@ -179,22 +186,14 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
Args.push_back(Entry);
// Emit __eabi_memset call
- TargetLowering::CallLoweringInfo CLI(Chain,
- Type::getVoidTy(*DAG.getContext()), // return type
- false, // return sign ext
- false, // return zero ext
- false, // is var arg
- false, // is in regs
- 0, // number of fixed arguments
- TLI.getLibcallCallingConv(RTLIB::MEMSET), // call conv
- false, // is tail call
- false, // does not return
- false, // is return val used
- DAG.getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
- TLI.getPointerTy()), // callee
- Args, DAG, dl);
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(CLI);
-
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(TLI.getLibcallCallingConv(RTLIB::MEMSET),
+ Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
+ TLI.getPointerTy()), std::move(Args), 0)
+ .setDiscardResult();
+
+ std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
return CallResult.second;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
index 56c9375..13769dc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
@@ -36,31 +36,25 @@ namespace ARM_AM {
} // end namespace ARM_AM
class ARMSelectionDAGInfo : public TargetSelectionDAGInfo {
- /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
- /// make the right decision when generating code for different targets.
- const ARMSubtarget *Subtarget;
-
public:
- explicit ARMSelectionDAGInfo(const TargetMachine &TM);
+ explicit ARMSelectionDAGInfo(const DataLayout &DL);
~ARMSelectionDAGInfo();
- virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const;
+ MachinePointerInfo SrcPtrInfo) const override;
// Adjust parameters for memset, see RTABI section 4.3.4
- virtual
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align,
bool isVolatile,
- MachinePointerInfo DstPtrInfo) const;
+ MachinePointerInfo DstPtrInfo) const override;
};
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
index a116298..c1b4562 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -12,21 +12,33 @@
//===----------------------------------------------------------------------===//
#include "ARMSubtarget.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMBaseRegisterInfo.h"
+#include "ARMFrameLowering.h"
+#include "ARMISelLowering.h"
+#include "ARMInstrInfo.h"
+#include "ARMJITInfo.h"
+#include "ARMSelectionDAGInfo.h"
+#include "ARMSubtarget.h"
+#include "ARMMachineFunctionInfo.h"
+#include "Thumb1FrameLowering.h"
+#include "Thumb1InstrInfo.h"
+#include "Thumb2InstrInfo.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "arm-subtarget"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "ARMGenSubtargetInfo.inc"
-using namespace llvm;
-
static cl::opt<bool>
ReserveR9("arm-reserve-r9", cl::Hidden,
cl::desc("Reserve R9, making it unavailable as GPR"));
@@ -74,20 +86,89 @@ IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT),
"Allow IT blocks based on ARMv7"),
clEnumValEnd));
-ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetOptions &Options)
- : ARMGenSubtargetInfo(TT, CPU, FS)
- , ARMProcFamily(Others)
- , ARMProcClass(None)
- , stackAlignment(4)
- , CPUString(CPU)
- , TargetTriple(TT)
- , Options(Options)
- , TargetABI(ARM_ABI_APCS) {
+static std::string computeDataLayout(ARMSubtarget &ST) {
+ std::string Ret = "";
+
+ if (ST.isLittle())
+ // Little endian.
+ Ret += "e";
+ else
+ // Big endian.
+ Ret += "E";
+
+ Ret += DataLayout::getManglingComponent(ST.getTargetTriple());
+
+ // Pointers are 32 bits and aligned to 32 bits.
+ Ret += "-p:32:32";
+
+ // On thumb, i16,i18 and i1 have natural aligment requirements, but we try to
+ // align to 32.
+ if (ST.isThumb())
+ Ret += "-i1:8:32-i8:8:32-i16:16:32";
+
+ // ABIs other than APCS have 64 bit integers with natural alignment.
+ if (!ST.isAPCS_ABI())
+ Ret += "-i64:64";
+
+ // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
+ // bits, others to 64 bits. We always try to align to 64 bits.
+ if (ST.isAPCS_ABI())
+ Ret += "-f64:32:64";
+
+ // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
+ // to 64. We always ty to give them natural alignment.
+ if (ST.isAPCS_ABI())
+ Ret += "-v64:32:64-v128:32:128";
+ else
+ Ret += "-v128:64:128";
+
+ // On thumb and APCS, only try to align aggregates to 32 bits (the default is
+ // 64 bits).
+ if (ST.isThumb() || ST.isAPCS_ABI())
+ Ret += "-a:0:32";
+
+ // Integer registers are 32 bits.
+ Ret += "-n32";
+
+ // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
+ // aligned everywhere else.
+ if (ST.isTargetNaCl())
+ Ret += "-S128";
+ else if (ST.isAAPCS_ABI())
+ Ret += "-S64";
+ else
+ Ret += "-S32";
+
+ return Ret;
+}
+
+/// initializeSubtargetDependencies - Initializes using a CPU and feature string
+/// so that we can use initializer lists for subtarget initialization.
+ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
initializeEnvironment();
resetSubtargetFeatures(CPU, FS);
+ return *this;
}
+ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, TargetMachine &TM,
+ bool IsLittle, const TargetOptions &Options)
+ : ARMGenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
+ ARMProcClass(None), stackAlignment(4), CPUString(CPU), IsLittle(IsLittle),
+ TargetTriple(TT), Options(Options), TargetABI(ARM_ABI_UNKNOWN),
+ DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS))),
+ TSInfo(DL), JITInfo(),
+ InstrInfo(isThumb1Only()
+ ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this)
+ : !isThumb()
+ ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this)
+ : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)),
+ TLInfo(TM),
+ FrameLowering(!isThumb1Only()
+ ? new ARMFrameLowering(*this)
+ : (ARMFrameLowering *)new Thumb1FrameLowering(*this)) {}
+
void ARMSubtarget::initializeEnvironment() {
HasV4TOps = false;
HasV5TOps = false;
@@ -110,7 +191,6 @@ void ARMSubtarget::initializeEnvironment() {
InThumbMode = false;
HasThumb2 = false;
NoARM = false;
- PostRAScheduler = false;
IsR9Reserved = ReserveR9;
UseMovt = false;
SupportsTailCall = false;
@@ -131,6 +211,7 @@ void ARMSubtarget::initializeEnvironment() {
HasTrustZone = false;
HasCrypto = false;
HasCRC = false;
+ HasZeroCycleZeroing = false;
AllowsUnalignedMem = false;
Thumb2DSP = false;
UseNaClTrap = false;
@@ -175,10 +256,9 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
}
ParseSubtargetFeatures(CPUString, ArchFS);
- // Thumb2 implies at least V6T2. FIXME: Fix tests to explicitly specify a
- // ARM version or CPU and then remove this.
- if (!HasV6T2Ops && hasThumb2())
- HasV4TOps = HasV5TOps = HasV5TEOps = HasV6Ops = HasV6MOps = HasV6T2Ops = true;
+ // FIXME: This used enable V6T2 support implicitly for Thumb2 mode.
+ // Assert this for now to make the change obvious.
+ assert(hasV6T2Ops() || !hasThumb2());
// Keep a pointer to static instruction cost data for the specified CPU.
SchedModel = getSchedModelForCPU(CPUString);
@@ -186,34 +266,54 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
- if ((TargetTriple.getTriple().find("eabi") != std::string::npos) ||
- (isTargetIOS() && isMClass()))
- // FIXME: We might want to separate AAPCS and EABI. Some systems, e.g.
- // Darwin-EABI conforms to AACPS but not the rest of EABI.
+ if (TargetABI == ARM_ABI_UNKNOWN) {
+ switch (TargetTriple.getEnvironment()) {
+ case Triple::Android:
+ case Triple::EABI:
+ case Triple::EABIHF:
+ case Triple::GNUEABI:
+ case Triple::GNUEABIHF:
+ TargetABI = ARM_ABI_AAPCS;
+ break;
+ default:
+ if ((isTargetIOS() && isMClass()) ||
+ (TargetTriple.isOSBinFormatMachO() &&
+ TargetTriple.getOS() == Triple::UnknownOS))
+ TargetABI = ARM_ABI_AAPCS;
+ else
+ TargetABI = ARM_ABI_APCS;
+ break;
+ }
+ }
+
+ // FIXME: this is invalid for WindowsCE
+ if (isTargetWindows()) {
TargetABI = ARM_ABI_AAPCS;
+ NoARM = true;
+ }
if (isAAPCS_ABI())
stackAlignment = 8;
+ if (isTargetNaCl())
+ stackAlignment = 16;
UseMovt = hasV6T2Ops() && ArmUseMOVT;
- if (!isTargetIOS()) {
- IsR9Reserved = ReserveR9;
- } else {
+ if (isTargetMachO()) {
IsR9Reserved = ReserveR9 | !HasV6Ops;
- SupportsTailCall = !getTargetTriple().isOSVersionLT(5, 0);
+ SupportsTailCall = !isTargetIOS() || !getTargetTriple().isOSVersionLT(5, 0);
+ } else {
+ IsR9Reserved = ReserveR9;
+ SupportsTailCall = !isThumb1Only();
}
- if (!isThumb() || hasThumb2())
- PostRAScheduler = true;
-
switch (Align) {
case DefaultAlign:
// Assume pre-ARMv6 doesn't support unaligned accesses.
//
// ARMv6 may or may not support unaligned accesses depending on the
// SCTLR.U bit, which is architecture-specific. We assume ARMv6
- // Darwin targets support unaligned accesses, and others don't.
+ // Darwin and NetBSD targets support unaligned accesses, and others don't.
//
// ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
// which raises an alignment fault on unaligned accesses. Linux
@@ -222,9 +322,15 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// Linux targets support unaligned accesses. The same goes for NaCl.
//
// The above behavior is consistent with GCC.
- AllowsUnalignedMem = (
- (hasV7Ops() && (isTargetLinux() || isTargetNaCl())) ||
- (hasV6Ops() && isTargetDarwin()));
+ AllowsUnalignedMem =
+ (hasV7Ops() && (isTargetLinux() || isTargetNaCl() ||
+ isTargetNetBSD())) ||
+ (hasV6Ops() && (isTargetMachO() || isTargetNetBSD()));
+ // The one exception is cortex-m0, which despite being v6, does not
+ // support unaligned accesses. Rather than make the above boolean
+ // expression even more obtuse, just override the value here.
+ if (isThumb1Only() && isMClass())
+ AllowsUnalignedMem = false;
break;
case StrictAlign:
AllowsUnalignedMem = false;
@@ -266,7 +372,7 @@ ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
if (GV->isDeclaration() && !GV->isMaterializable())
isDecl = true;
- if (!isTargetDarwin()) {
+ if (!isTargetMachO()) {
// Extra load is needed for all externally visible.
if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
return false;
@@ -315,10 +421,20 @@ bool ARMSubtarget::hasSinCos() const {
!getTargetTriple().isOSVersionLT(7, 0);
}
-bool ARMSubtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_NONE;
- return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
+// This overrides the PostRAScheduler bit in the SchedModel for any CPU.
+bool ARMSubtarget::enablePostMachineScheduler() const {
+ return (!isThumb() || hasThumb2());
+}
+
+bool ARMSubtarget::enableAtomicExpandLoadLinked() const {
+ return hasAnyDataBarrier() && !isThumb1Only();
+}
+
+bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
+ // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
+ // immediates as it is inherently position independent, and may be out of
+ // range otherwise.
+ return UseMovt && (isTargetWindows() ||
+ !MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize));
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index 5276901..f8283b0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -14,8 +14,20 @@
#ifndef ARMSUBTARGET_H
#define ARMSUBTARGET_H
+
+#include "ARMFrameLowering.h"
+#include "ARMISelLowering.h"
+#include "ARMInstrInfo.h"
+#include "ARMJITInfo.h"
+#include "ARMSelectionDAGInfo.h"
+#include "ARMSubtarget.h"
+#include "Thumb1FrameLowering.h"
+#include "Thumb1InstrInfo.h"
+#include "Thumb2InstrInfo.h"
+#include "ARMJITInfo.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -31,7 +43,8 @@ class TargetOptions;
class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
enum ARMProcFamilyEnum {
- Others, CortexA5, CortexA8, CortexA9, CortexA15, CortexR5, Swift, CortexA53, CortexA57
+ Others, CortexA5, CortexA7, CortexA8, CortexA9, CortexA12, CortexA15,
+ CortexR5, Swift, CortexA53, CortexA57, Krait
};
enum ARMProcClassEnum {
None, AClass, RClass, MClass
@@ -92,9 +105,6 @@ protected:
/// NoARM - True if subtarget does not support ARM mode execution.
bool NoARM;
- /// PostRAScheduler - True if using post-register-allocation scheduler.
- bool PostRAScheduler;
-
/// IsR9Reserved - True if R9 is a not available as general purpose register.
bool IsR9Reserved;
@@ -172,6 +182,10 @@ protected:
/// HasCRC - if true, processor supports CRC instructions
bool HasCRC;
+ /// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
+ /// particularly effective at zeroing a VFP register.
+ bool HasZeroCycleZeroing;
+
/// AllowsUnalignedMem - If true, the subtarget allows unaligned memory
/// accesses for some types. For details, see
/// ARMTargetLowering::allowsUnalignedMemoryAccesses().
@@ -198,6 +212,9 @@ protected:
/// CPUString - String name of used CPU.
std::string CPUString;
+ /// IsLittle - The target is Little Endian
+ bool IsLittle;
+
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
@@ -212,6 +229,7 @@ protected:
public:
enum {
+ ARM_ABI_UNKNOWN,
ARM_ABI_APCS,
ARM_ABI_AAPCS // ARM EABI
} TargetABI;
@@ -220,22 +238,45 @@ protected:
/// of the specified triple.
///
ARMSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetOptions &Options);
+ const std::string &FS, TargetMachine &TM, bool IsLittle,
+ const TargetOptions &Options);
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
unsigned getMaxInlineSizeThreshold() const {
- // FIXME: For now, we don't lower memcpy's to loads / stores for Thumb1.
- // Change this once Thumb1 ldmia / stmia support is added.
- return isThumb1Only() ? 0 : 64;
+ return 64;
}
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
/// \brief Reset the features for the ARM target.
- virtual void resetSubtargetFeatures(const MachineFunction *MF);
+ void resetSubtargetFeatures(const MachineFunction *MF) override;
+
+ /// initializeSubtargetDependencies - Initializes using a CPU and feature string
+ /// so that we can use initializer lists for subtarget initialization.
+ ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
+
+ const DataLayout *getDataLayout() const { return &DL; }
+ const ARMSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ ARMJITInfo *getJITInfo() { return &JITInfo; }
+ const ARMBaseInstrInfo *getInstrInfo() const { return InstrInfo.get(); }
+ const ARMTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const ARMFrameLowering *getFrameLowering() const { return FrameLowering.get(); }
+ const ARMBaseRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo->getRegisterInfo();
+ }
+
private:
+ const DataLayout DL;
+ ARMSelectionDAGInfo TSInfo;
+ ARMJITInfo JITInfo;
+ // Either Thumb1InstrInfo or Thumb2InstrInfo.
+ std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
+ ARMTargetLowering TLInfo;
+ // Either Thumb1FrameLowering or ARMFrameLowering.
+ std::unique_ptr<ARMFrameLowering> FrameLowering;
+
void initializeEnvironment();
void resetSubtargetFeatures(StringRef CPU, StringRef FS);
public:
@@ -251,13 +292,15 @@ public:
bool hasV8Ops() const { return HasV8Ops; }
bool isCortexA5() const { return ARMProcFamily == CortexA5; }
+ bool isCortexA7() const { return ARMProcFamily == CortexA7; }
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
bool isCortexA9() const { return ARMProcFamily == CortexA9; }
bool isCortexA15() const { return ARMProcFamily == CortexA15; }
bool isSwift() const { return ARMProcFamily == Swift; }
bool isCortexM3() const { return CPUString == "cortex-m3"; }
- bool isLikeA9() const { return isCortexA9() || isCortexA15(); }
+ bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
bool isCortexR5() const { return ARMProcFamily == CortexR5; }
+ bool isKrait() const { return ARMProcFamily == Krait; }
bool hasARMOps() const { return !NoARM; }
@@ -286,6 +329,7 @@ public:
bool isFPOnlySP() const { return FPOnlySP; }
bool hasPerfMon() const { return HasPerfMon; }
bool hasTrustZone() const { return HasTrustZone; }
+ bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
bool prefers32BitThumb() const { return Pref32BitThumb; }
bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
@@ -299,22 +343,59 @@ public:
const Triple &getTargetTriple() const { return TargetTriple; }
- bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
- bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
+ bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
- bool isTargetELF() const { return !isTargetDarwin(); }
+ bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
+ bool isTargetNetBSD() const { return TargetTriple.getOS() == Triple::NetBSD; }
+ bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
+
+ bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
+ bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
+ bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
+
// ARM EABI is the bare-metal EABI described in ARM ABI documents and
// can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
// FIXME: Add a flag for bare-metal for that target and set Triple::EABI
// even for GNUEABI, so we can make a distinction here and still conform to
// the EABI on GNU (and Android) mode. This requires change in Clang, too.
+ // FIXME: The Darwin exception is temporary, while we move users to
+ // "*-*-*-macho" triples as quickly as possible.
bool isTargetAEABI() const {
- return TargetTriple.getEnvironment() == Triple::EABI;
+ return (TargetTriple.getEnvironment() == Triple::EABI ||
+ TargetTriple.getEnvironment() == Triple::EABIHF) &&
+ !isTargetDarwin() && !isTargetWindows();
}
- bool isAPCS_ABI() const { return TargetABI == ARM_ABI_APCS; }
- bool isAAPCS_ABI() const { return TargetABI == ARM_ABI_AAPCS; }
+ // ARM Targets that support EHABI exception handling standard
+ // Darwin uses SjLj. Other targets might need more checks.
+ bool isTargetEHABICompatible() const {
+ return (TargetTriple.getEnvironment() == Triple::EABI ||
+ TargetTriple.getEnvironment() == Triple::GNUEABI ||
+ TargetTriple.getEnvironment() == Triple::EABIHF ||
+ TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == Triple::Android) &&
+ !isTargetDarwin() && !isTargetWindows();
+ }
+
+ bool isTargetHardFloat() const {
+ // FIXME: this is invalid for WindowsCE
+ return TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == Triple::EABIHF ||
+ isTargetWindows();
+ }
+ bool isTargetAndroid() const {
+ return TargetTriple.getEnvironment() == Triple::Android;
+ }
+
+ bool isAPCS_ABI() const {
+ assert(TargetABI != ARM_ABI_UNKNOWN);
+ return TargetABI == ARM_ABI_APCS;
+ }
+ bool isAAPCS_ABI() const {
+ assert(TargetABI != ARM_ABI_UNKNOWN);
+ return TargetABI == ARM_ABI_AAPCS;
+ }
bool isThumb() const { return InThumbMode; }
bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
@@ -326,7 +407,8 @@ public:
bool isR9Reserved() const { return IsR9Reserved; }
- bool useMovt() const { return UseMovt && hasV6T2Ops(); }
+ bool useMovt(const MachineFunction &MF) const;
+
bool supportsTailCall() const { return SupportsTailCall; }
bool allowsUnalignedMem() const { return AllowsUnalignedMem; }
@@ -335,16 +417,19 @@ public:
const std::string & getCPUString() const { return CPUString; }
+ bool isLittle() const { return IsLittle; }
+
unsigned getMispredictionPenalty() const;
-
+
/// This function returns true if the target has sincos() routine in its
/// compiler runtime or math libraries.
bool hasSinCos() const;
- /// enablePostRAScheduler - True at 'More' optimization.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
+ /// True for some subtargets at > -O0.
+ bool enablePostMachineScheduler() const override;
+
+ // enableAtomicExpandLoadLinked - True if we need to expand our atomics.
+ bool enableAtomicExpandLoadLinked() const override;
/// getInstrItins - Return the instruction itineraies based on subtarget
/// selection.
@@ -358,6 +443,7 @@ public:
/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect
/// symbol.
bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const;
+
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index c2bf788..d85194b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "ARMTargetMachine.h"
#include "ARM.h"
+#include "ARMTargetMachine.h"
#include "ARMFrameLowering.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -24,19 +24,22 @@
using namespace llvm;
static cl::opt<bool>
-EnableGlobalMerge("global-merge", cl::Hidden,
- cl::desc("Enable global merge pass"),
- cl::init(true));
-
-static cl::opt<bool>
DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden,
cl::desc("Inhibit optimization of S->D register accesses on A15"),
cl::init(false));
+static cl::opt<bool>
+EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden,
+ cl::desc("Run SimplifyCFG after expanding atomic operations"
+ " to make use of cmpxchg flow-based information"),
+ cl::init(true));
+
extern "C" void LLVMInitializeARMTarget() {
// Register the target.
- RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget);
- RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget);
+ RegisterTargetMachine<ARMLETargetMachine> X(TheARMLETarget);
+ RegisterTargetMachine<ARMBETargetMachine> Y(TheARMBETarget);
+ RegisterTargetMachine<ThumbLETargetMachine> A(TheThumbLETarget);
+ RegisterTargetMachine<ThumbBETargetMachine> B(TheThumbBETarget);
}
@@ -46,14 +49,14 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, Options),
- JITInfo(),
- InstrItins(Subtarget.getInstrItineraryData()) {
- // Default to soft float ABI
+ CodeGenOpt::Level OL, bool isLittle)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this, isLittle, Options) {
+
+ // Default to triple-appropriate float ABI
if (Options.FloatABIType == FloatABI::Default)
- this->Options.FloatABIType = FloatABI::Soft;
+ this->Options.FloatABIType =
+ Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft;
}
void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
@@ -67,60 +70,65 @@ void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
void ARMTargetMachine::anchor() { }
-ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
+ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- InstrInfo(Subtarget),
- DL(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:64-i64:32:64-"
- "v128:32:128-v64:32:64-n32-S32") :
- Subtarget.isAAPCS_ABI() ?
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "v128:64:128-v64:64:64-n32-S64") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "v128:64:128-v64:64:64-n32-S32")),
- TLInfo(*this),
- TSInfo(*this),
- FrameLowering(Subtarget) {
+ CodeGenOpt::Level OL, bool isLittle)
+ : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
initAsmInfo();
if (!Subtarget.hasARMOps())
report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
"support ARM mode execution!");
}
+void ARMLETargetMachine::anchor() { }
+
+ARMLETargetMachine::ARMLETargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+
+void ARMBETargetMachine::anchor() { }
+
+ARMBETargetMachine::ARMBETargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+
void ThumbTargetMachine::anchor() { }
ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- InstrInfo(Subtarget.hasThumb2()
- ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
- : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
- DL(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:64-i64:32:64-"
- "i16:16:32-i8:8:32-i1:8:32-"
- "v128:32:128-v64:32:64-a:0:32-n32-S32") :
- Subtarget.isAAPCS_ABI() ?
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "i16:16:32-i8:8:32-i1:8:32-"
- "v128:64:128-v64:64:64-a:0:32-n32-S64") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "i16:16:32-i8:8:32-i1:8:32-"
- "v128:64:128-v64:64:64-a:0:32-n32-S32")),
- TLInfo(*this),
- TSInfo(*this),
- FrameLowering(Subtarget.hasThumb2()
- ? new ARMFrameLowering(Subtarget)
- : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
+ CodeGenOpt::Level OL, bool isLittle)
+ : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL,
+ isLittle) {
initAsmInfo();
}
+void ThumbLETargetMachine::anchor() { }
+
+ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+
+void ThumbBETargetMachine::anchor() { }
+
+ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+
namespace {
/// ARM Code Generator Pass Configuration Options.
class ARMPassConfig : public TargetPassConfig {
@@ -136,11 +144,12 @@ public:
return *getARMTargetMachine().getSubtargetImpl();
}
- virtual bool addPreISel();
- virtual bool addInstSelector();
- virtual bool addPreRegAlloc();
- virtual bool addPreSched2();
- virtual bool addPreEmitPass();
+ void addIRPasses() override;
+ bool addPreISel() override;
+ bool addInstSelector() override;
+ bool addPreRegAlloc() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // namespace
@@ -148,8 +157,22 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
return new ARMPassConfig(this, PM);
}
+void ARMPassConfig::addIRPasses() {
+ addPass(createAtomicExpandLoadLinkedPass(TM));
+
+ // Cmpxchg instructions are often used with a subsequent comparison to
+ // determine whether it succeeded. We can exploit existing control-flow in
+ // ldrex/strex loops to simplify this, but it needs tidying up.
+ const ARMSubtarget *Subtarget = &getARMSubtarget();
+ if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only())
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
+ addPass(createCFGSimplificationPass());
+
+ TargetPassConfig::addIRPasses();
+}
+
bool ARMPassConfig::addPreISel() {
- if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge)
+ if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createGlobalMergePass(TM));
return false;
@@ -166,8 +189,7 @@ bool ARMPassConfig::addInstSelector() {
}
bool ARMPassConfig::addPreRegAlloc() {
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
+ if (getOptLevel() != CodeGenOpt::None)
addPass(createARMLoadStoreOptimizationPass(true));
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
addPass(createMLxExpansionPass());
@@ -181,12 +203,10 @@ bool ARMPassConfig::addPreRegAlloc() {
}
bool ARMPassConfig::addPreSched2() {
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None) {
- if (!getARMSubtarget().isThumb1Only()) {
- addPass(createARMLoadStoreOptimizationPass());
- printAndVerify("After ARM load / store optimizer");
- }
+ addPass(createARMLoadStoreOptimizationPass());
+ printAndVerify("After ARM load / store optimizer");
+
if (getARMSubtarget().hasNEON())
addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
}
@@ -219,6 +239,7 @@ bool ARMPassConfig::addPreEmitPass() {
addPass(&UnpackMachineBundlesID);
}
+ addPass(createARMOptimizeBarriersPass());
addPass(createARMConstantIslandPass());
return true;
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
index d4caf5c..b72b1df 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
@@ -14,18 +14,9 @@
#ifndef ARMTARGETMACHINE_H
#define ARMTARGETMACHINE_H
-#include "ARMFrameLowering.h"
-#include "ARMISelLowering.h"
#include "ARMInstrInfo.h"
-#include "ARMJITInfo.h"
-#include "ARMSelectionDAGInfo.h"
#include "ARMSubtarget.h"
-#include "Thumb1FrameLowering.h"
-#include "Thumb1InstrInfo.h"
-#include "Thumb2InstrInfo.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -33,68 +24,76 @@ namespace llvm {
class ARMBaseTargetMachine : public LLVMTargetMachine {
protected:
ARMSubtarget Subtarget;
-private:
- ARMJITInfo JITInfo;
- InstrItineraryData InstrItins;
-
public:
ARMBaseTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
+ CodeGenOpt::Level OL,
+ bool isLittle);
- virtual ARMJITInfo *getJITInfo() { return &JITInfo; }
- virtual const ARMSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const ARMTargetLowering *getTargetLowering() const {
- // Implemented by derived classes
- llvm_unreachable("getTargetLowering not implemented");
+ const ARMSubtarget *getSubtargetImpl() const override { return &Subtarget; }
+ const ARMBaseRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
+ }
+ const ARMTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
+ }
+ const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+ const ARMBaseInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const InstrItineraryData *getInstrItineraryData() const {
- return &InstrItins;
+ const ARMFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
+ const InstrItineraryData *getInstrItineraryData() const override {
+ return &getSubtargetImpl()->getInstrItineraryData();
+ }
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
+ }
+ ARMJITInfo *getJITInfo() override { return Subtarget.getJITInfo(); }
/// \brief Register ARM analysis passes with a pass manager.
- virtual void addAnalysisPasses(PassManagerBase &PM);
+ void addAnalysisPasses(PassManagerBase &PM) override;
// Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
- virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &MCE);
+ bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &MCE) override;
};
/// ARMTargetMachine - ARM target machine.
///
class ARMTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
- ARMInstrInfo InstrInfo;
- const DataLayout DL; // Calculates type size & alignment
- ARMTargetLowering TLInfo;
- ARMSelectionDAGInfo TSInfo;
- ARMFrameLowering FrameLowering;
public:
- ARMTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-
- virtual const ARMRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
+ ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
+};
- virtual const ARMTargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
+/// ARMLETargetMachine - ARM little endian target machine.
+///
+class ARMLETargetMachine : public ARMTargetMachine {
+ void anchor() override;
+public:
+ ARMLETargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+};
- virtual const ARMSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
- virtual const ARMFrameLowering *getFrameLowering() const {
- return &FrameLowering;
- }
- virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const DataLayout *getDataLayout() const { return &DL; }
+/// ARMBETargetMachine - ARM big endian target machine.
+///
+class ARMBETargetMachine : public ARMTargetMachine {
+ void anchor() override;
+public:
+ ARMBETargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
};
/// ThumbTargetMachine - Thumb target machine.
@@ -103,42 +102,32 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
///
class ThumbTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
- // Either Thumb1InstrInfo or Thumb2InstrInfo.
- OwningPtr<ARMBaseInstrInfo> InstrInfo;
- const DataLayout DL; // Calculates type size & alignment
- ARMTargetLowering TLInfo;
- ARMSelectionDAGInfo TSInfo;
- // Either Thumb1FrameLowering or ARMFrameLowering.
- OwningPtr<ARMFrameLowering> FrameLowering;
public:
- ThumbTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
-
- /// returns either Thumb1RegisterInfo or Thumb2RegisterInfo
- virtual const ARMBaseRegisterInfo *getRegisterInfo() const {
- return &InstrInfo->getRegisterInfo();
- }
-
- virtual const ARMTargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
+ ThumbTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
+};
- virtual const ARMSelectionDAGInfo *getSelectionDAGInfo() const {
- return &TSInfo;
- }
+/// ThumbLETargetMachine - Thumb little endian target machine.
+///
+class ThumbLETargetMachine : public ThumbTargetMachine {
+ void anchor() override;
+public:
+ ThumbLETargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+};
- /// returns either Thumb1InstrInfo or Thumb2InstrInfo
- virtual const ARMBaseInstrInfo *getInstrInfo() const {
- return InstrInfo.get();
- }
- /// returns either Thumb1FrameLowering or ARMFrameLowering
- virtual const ARMFrameLowering *getFrameLowering() const {
- return FrameLowering.get();
- }
- virtual const DataLayout *getDataLayout() const { return &DL; }
+/// ThumbBETargetMachine - Thumb big endian target machine.
+///
+class ThumbBETargetMachine : public ThumbTargetMachine {
+ void anchor() override;
+public:
+ ThumbBETargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
index 7ec71b2..48238bf 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -10,13 +10,14 @@
#include "ARMTargetObjectFile.h"
#include "ARMSubtarget.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ELF.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
using namespace dwarf;
@@ -31,7 +32,7 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
InitializeELF(isAAPCS_ABI);
if (isAAPCS_ABI) {
- LSDASection = NULL;
+ LSDASection = nullptr;
}
AttributesSection =
@@ -41,13 +42,22 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
SectionKind::getMetadata());
}
-const MCExpr *ARMElfTargetObjectFile::
-getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const {
+const MCExpr *ARMElfTargetObjectFile::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
+ if (TM.getMCAsmInfo()->getExceptionHandlingType() != ExceptionHandling::ARM)
+ return TargetLoweringObjectFileELF::getTTypeGlobalReference(
+ GV, Encoding, Mang, TM, MMI, Streamer);
+
assert(Encoding == DW_EH_PE_absptr && "Can handle absptr encoding only");
- return MCSymbolRefExpr::Create(getSymbol(*Mang, GV),
- MCSymbolRefExpr::VK_ARM_TARGET2,
+ return MCSymbolRefExpr::Create(TM.getSymbol(GV, Mang),
+ MCSymbolRefExpr::VK_ARM_TARGET2, getContext());
+}
+
+const MCExpr *ARMElfTargetObjectFile::
+getDebugThreadLocalSymbol(const MCSymbol *Sym) const {
+ return MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_ARM_TLSLDO,
getContext());
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
index 7f60727..c926421 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
@@ -23,19 +23,19 @@ protected:
public:
ARMElfTargetObjectFile() :
TargetLoweringObjectFileELF(),
- AttributesSection(NULL)
+ AttributesSection(nullptr)
{}
- virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
const MCExpr *
- getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const;
-
- virtual const MCSection *getAttributesSection() const {
- return AttributesSection;
- }
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
+
+ /// \brief Describe a TLS variable address within debug info.
+ const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 6bbb38f..a2ace62 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -14,17 +14,18 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "armtti"
#include "ARM.h"
#include "ARMTargetMachine.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
+#define DEBUG_TYPE "armtti"
+
// Declare the pass initialization routine locally as target-specific passes
-// don't havve a target-wide initialization entry point, and so we rely on the
+// don't have a target-wide initialization entry point, and so we rely on the
// pass constructor initialization.
namespace llvm {
void initializeARMTTIPass(PassRegistry &);
@@ -32,7 +33,7 @@ void initializeARMTTIPass(PassRegistry &);
namespace {
-class ARMTTI : public ImmutablePass, public TargetTransformInfo {
+class ARMTTI final : public ImmutablePass, public TargetTransformInfo {
const ARMBaseTargetMachine *TM;
const ARMSubtarget *ST;
const ARMTargetLowering *TLI;
@@ -42,7 +43,7 @@ class ARMTTI : public ImmutablePass, public TargetTransformInfo {
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
public:
- ARMTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ ARMTTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
@@ -52,15 +53,11 @@ public:
initializeARMTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ void initializePass() override {
pushTTIStack(this);
}
- virtual void finalizePass() {
- popTTIStack();
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
@@ -68,7 +65,7 @@ public:
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
@@ -76,8 +73,8 @@ public:
/// \name Scalar TTI Implementations
/// @{
-
- virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) const;
+ using TargetTransformInfo::getIntImmCost;
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
/// @}
@@ -85,7 +82,7 @@ public:
/// \name Vector TTI Implementations
/// @{
- unsigned getNumberOfRegisters(bool Vector) const {
+ unsigned getNumberOfRegisters(bool Vector) const override {
if (Vector) {
if (ST->hasNEON())
return 16;
@@ -94,10 +91,10 @@ public:
if (ST->isThumb1Only())
return 8;
- return 16;
+ return 13;
}
- unsigned getRegisterBitWidth(bool Vector) const {
+ unsigned getRegisterBitWidth(bool Vector) const override {
if (Vector) {
if (ST->hasNEON())
return 128;
@@ -107,7 +104,7 @@ public:
return 32;
}
- unsigned getMaximumUnrollFactor() const {
+ unsigned getMaximumUnrollFactor() const override {
// These are out of order CPUs:
if (ST->isCortexA15() || ST->isSwift())
return 2;
@@ -115,23 +112,27 @@ public:
}
unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const;
+ int Index, Type *SubTp) const override;
unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const;
+ Type *Src) const override;
- unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const;
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const override;
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const;
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const override;
- unsigned getAddressComputationCost(Type *Val, bool IsComplex) const;
+ unsigned getAddressComputationCost(Type *Val,
+ bool IsComplex) const override;
- unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
- OperandValueKind Op1Info = OK_AnyValue,
- OperandValueKind Op2Info = OK_AnyValue) const;
+ unsigned
+ getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ OperandValueKind Op1Info = OK_AnyValue,
+ OperandValueKind Op2Info = OK_AnyValue) const override;
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
- unsigned AddressSpace) const;
+ unsigned AddressSpace) const override;
/// @}
};
@@ -162,25 +163,25 @@ unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
(ARM_AM::getSOImmVal(~ZImmVal) != -1))
return 1;
return ST->hasV6T2Ops() ? 2 : 3;
- } else if (ST->isThumb2()) {
+ }
+ if (ST->isThumb2()) {
if ((SImmVal >= 0 && SImmVal < 65536) ||
(ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
(ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
return 1;
return ST->hasV6T2Ops() ? 2 : 3;
- } else /*Thumb1*/ {
- if (SImmVal >= 0 && SImmVal < 256)
- return 1;
- if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
- return 2;
- // Load from constantpool.
- return 3;
}
- return 2;
+ // Thumb1.
+ if (SImmVal >= 0 && SImmVal < 256)
+ return 1;
+ if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
+ return 2;
+ // Load from constantpool.
+ return 3;
}
unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const {
+ Type *Src) const {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@@ -442,34 +443,62 @@ unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) const {
- // We only handle costs of reverse shuffles for now.
- if (Kind != SK_Reverse)
+ // We only handle costs of reverse and alternate shuffles for now.
+ if (Kind != SK_Reverse && Kind != SK_Alternate)
return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
- static const CostTblEntry<MVT::SimpleValueType> NEONShuffleTbl[] = {
- // Reverse shuffle cost one instruction if we are shuffling within a double
- // word (vrev) or two if we shuffle a quad word (vrev, vext).
- { ISD::VECTOR_SHUFFLE, MVT::v2i32, 1 },
- { ISD::VECTOR_SHUFFLE, MVT::v2f32, 1 },
- { ISD::VECTOR_SHUFFLE, MVT::v2i64, 1 },
- { ISD::VECTOR_SHUFFLE, MVT::v2f64, 1 },
-
- { ISD::VECTOR_SHUFFLE, MVT::v4i32, 2 },
- { ISD::VECTOR_SHUFFLE, MVT::v4f32, 2 },
- { ISD::VECTOR_SHUFFLE, MVT::v8i16, 2 },
- { ISD::VECTOR_SHUFFLE, MVT::v16i8, 2 }
- };
+ if (Kind == SK_Reverse) {
+ static const CostTblEntry<MVT::SimpleValueType> NEONShuffleTbl[] = {
+ // Reverse shuffle cost one instruction if we are shuffling within a
+ // double word (vrev) or two if we shuffle a quad word (vrev, vext).
+ {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
- std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
+ {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
+ {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
+ {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
+ {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
- int Idx = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
- if (Idx == -1)
- return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
+
+ int Idx = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
+ if (Idx == -1)
+ return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
- return LT.first * NEONShuffleTbl[Idx].Cost;
+ return LT.first * NEONShuffleTbl[Idx].Cost;
+ }
+ if (Kind == SK_Alternate) {
+ static const CostTblEntry<MVT::SimpleValueType> NEONAltShuffleTbl[] = {
+ // Alt shuffle cost table for ARM. Cost is the number of instructions
+ // required to create the shuffled vector.
+
+ {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
+
+ {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
+ {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
+ {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
+
+ {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
+
+ {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
+
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
+ int Idx =
+ CostTableLookup(NEONAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
+ if (Idx == -1)
+ return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ return LT.first * NEONAltShuffleTbl[Idx].Cost;
+ }
+ return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
}
-unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Op1Info,
+unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ OperandValueKind Op1Info,
OperandValueKind Op2Info) const {
int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
@@ -533,7 +562,7 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
// creates a sequence of shift, and, or instructions to construct values.
// These sequences are recognized by the ISel and have zero-cost. Not so for
// the vectorized code. Because we have support for v2i64 but not i64 those
- // sequences look particularily beneficial to vectorize.
+ // sequences look particularly beneficial to vectorize.
// To work around this we increase the cost of v2i64 operations to make them
// seem less beneficial.
if (LT.second == MVT::v2i64 &&
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index e3f9e0d..b62706c 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -7,33 +7,40 @@
//
//===----------------------------------------------------------------------===//
-#include "ARMBuildAttrs.h"
#include "ARMFPUName.h"
#include "ARMFeatures.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "MCTargetDesc/ARMAddressingModes.h"
+#include "MCTargetDesc/ARMArchName.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMMCExpr.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/Support/ARMEHABI.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SourceMgr.h"
@@ -48,31 +55,90 @@ class ARMOperand;
enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
+class UnwindContext {
+ MCAsmParser &Parser;
+
+ typedef SmallVector<SMLoc, 4> Locs;
+
+ Locs FnStartLocs;
+ Locs CantUnwindLocs;
+ Locs PersonalityLocs;
+ Locs PersonalityIndexLocs;
+ Locs HandlerDataLocs;
+ int FPReg;
+
+public:
+ UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
+
+ bool hasFnStart() const { return !FnStartLocs.empty(); }
+ bool cantUnwind() const { return !CantUnwindLocs.empty(); }
+ bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
+ bool hasPersonality() const {
+ return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
+ }
+
+ void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
+ void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
+ void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
+ void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
+ void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
+
+ void saveFPReg(int Reg) { FPReg = Reg; }
+ int getFPReg() const { return FPReg; }
+
+ void emitFnStartLocNotes() const {
+ for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
+ FI != FE; ++FI)
+ Parser.Note(*FI, ".fnstart was specified here");
+ }
+ void emitCantUnwindLocNotes() const {
+ for (Locs::const_iterator UI = CantUnwindLocs.begin(),
+ UE = CantUnwindLocs.end(); UI != UE; ++UI)
+ Parser.Note(*UI, ".cantunwind was specified here");
+ }
+ void emitHandlerDataLocNotes() const {
+ for (Locs::const_iterator HI = HandlerDataLocs.begin(),
+ HE = HandlerDataLocs.end(); HI != HE; ++HI)
+ Parser.Note(*HI, ".handlerdata was specified here");
+ }
+ void emitPersonalityLocNotes() const {
+ for (Locs::const_iterator PI = PersonalityLocs.begin(),
+ PE = PersonalityLocs.end(),
+ PII = PersonalityIndexLocs.begin(),
+ PIE = PersonalityIndexLocs.end();
+ PI != PE || PII != PIE;) {
+ if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
+ Parser.Note(*PI++, ".personality was specified here");
+ else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
+ Parser.Note(*PII++, ".personalityindex was specified here");
+ else
+ llvm_unreachable(".personality and .personalityindex cannot be "
+ "at the same location");
+ }
+ }
+
+ void reset() {
+ FnStartLocs = Locs();
+ CantUnwindLocs = Locs();
+ PersonalityLocs = Locs();
+ HandlerDataLocs = Locs();
+ PersonalityIndexLocs = Locs();
+ FPReg = ARM::SP;
+ }
+};
+
class ARMAsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
const MCInstrInfo &MII;
const MCRegisterInfo *MRI;
+ UnwindContext UC;
ARMTargetStreamer &getTargetStreamer() {
- MCTargetStreamer &TS = getParser().getStreamer().getTargetStreamer();
+ MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<ARMTargetStreamer &>(TS);
}
- // Unwind directives state
- SMLoc FnStartLoc;
- SMLoc CantUnwindLoc;
- SMLoc PersonalityLoc;
- SMLoc HandlerDataLoc;
- int FPReg;
- void resetUnwindDirectiveParserState() {
- FnStartLoc = SMLoc();
- CantUnwindLoc = SMLoc();
- PersonalityLoc = SMLoc();
- HandlerDataLoc = SMLoc();
- FPReg = -1;
- }
-
// Map of register aliases registers via the .req directive.
StringMap<unsigned> RegisterReqs;
@@ -111,6 +177,9 @@ class ARMAsmParser : public MCTargetAsmParser {
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
+ void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) {
+ return Parser.Note(L, Msg, Ranges);
+ }
bool Warning(SMLoc L, const Twine &Msg,
ArrayRef<SMRange> Ranges = None) {
return Parser.Warning(L, Msg, Ranges);
@@ -121,15 +190,15 @@ class ARMAsmParser : public MCTargetAsmParser {
}
int tryParseRegister();
- bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
- int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
- bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
- bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
- bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
+ bool tryParseRegisterWithWriteBack(OperandVector &);
+ int tryParseShiftRegister(OperandVector &);
+ bool parseRegisterList(OperandVector &);
+ bool parseMemory(OperandVector &);
+ bool parseOperand(OperandVector &, StringRef Mnemonic);
bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
unsigned &ShiftAmount);
- bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool parseLiteralValues(unsigned Size, SMLoc L);
bool parseDirectiveThumb(SMLoc L);
bool parseDirectiveARM(SMLoc L);
bool parseDirectiveThumbFunc(SMLoc L);
@@ -149,6 +218,17 @@ class ARMAsmParser : public MCTargetAsmParser {
bool parseDirectiveSetFP(SMLoc L);
bool parseDirectivePad(SMLoc L);
bool parseDirectiveRegSave(SMLoc L, bool IsVector);
+ bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
+ bool parseDirectiveLtorg(SMLoc L);
+ bool parseDirectiveEven(SMLoc L);
+ bool parseDirectivePersonalityIndex(SMLoc L);
+ bool parseDirectiveUnwindRaw(SMLoc L);
+ bool parseDirectiveTLSDescSeq(SMLoc L);
+ bool parseDirectiveMovSP(SMLoc L);
+ bool parseDirectiveObjectArch(SMLoc L);
+ bool parseDirectiveArchExtension(SMLoc L);
+ bool parseDirectiveAlign(SMLoc L);
+ bool parseDirectiveThumbSet(SMLoc L);
StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
bool &CarrySetting, unsigned &ProcessorIMod,
@@ -202,54 +282,42 @@ class ARMAsmParser : public MCTargetAsmParser {
/// }
- OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseCoprocNumOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseCoprocRegOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseCoprocOptionOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseMemBarrierOptOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseInstSyncBarrierOptOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseProcIFlagsOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseMSRMaskOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
- StringRef Op, int Low, int High);
- OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
+ OperandMatchResultTy parseITCondCode(OperandVector &);
+ OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
+ OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
+ OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
+ OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
+ OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
+ OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
+ OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
+ OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
+ int High);
+ OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
return parsePKHImm(O, "lsl", 0, 31);
}
- OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
+ OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
return parsePKHImm(O, "asr", 1, 32);
}
- OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
- OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy parseSetEndImm(OperandVector &);
+ OperandMatchResultTy parseShifterImm(OperandVector &);
+ OperandMatchResultTy parseRotImm(OperandVector &);
+ OperandMatchResultTy parseBitfield(OperandVector &);
+ OperandMatchResultTy parsePostIdxReg(OperandVector &);
+ OperandMatchResultTy parseAM3Offset(OperandVector &);
+ OperandMatchResultTy parseFPImm(OperandVector &);
+ OperandMatchResultTy parseVectorList(OperandVector &);
OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
SMLoc &EndLoc);
// Asm Match Converter Methods
- void cvtThumbMultiply(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- void cvtThumbBranches(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
-
- bool validateInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
- bool processInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
- bool shouldOmitCCOutOperand(StringRef Mnemonic,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
- bool shouldOmitPredicateOperand(StringRef Mnemonic,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
+ void cvtThumbBranches(MCInst &Inst, const OperandVector &);
+
+ bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
+ bool processInstruction(MCInst &Inst, const OperandVector &Ops);
+ bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
+ bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
+
public:
enum ARMMatchResultTy {
Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
@@ -262,8 +330,9 @@ public:
};
ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
- const MCInstrInfo &MII)
- : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), FPReg(-1) {
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
+ : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), UC(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
// Cache the MCRegisterInfo.
@@ -279,21 +348,20 @@ public:
}
// Implementation of the MCTargetAsmParser interface:
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
- bool ParseDirective(AsmToken DirectiveID);
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool ParseDirective(AsmToken DirectiveID) override;
- unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
- unsigned checkTargetMatchPredicate(MCInst &Inst);
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
+ unsigned checkTargetMatchPredicate(MCInst &Inst) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
- void onLabelParsed(MCSymbol *Symbol);
-
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
+ void onLabelParsed(MCSymbol *Symbol) override;
};
} // end anonymous namespace
@@ -332,7 +400,7 @@ class ARMOperand : public MCParsedAsmOperand {
k_Token
} Kind;
- SMLoc StartLoc, EndLoc;
+ SMLoc StartLoc, EndLoc, AlignmentLoc;
SmallVector<unsigned, 8> Registers;
struct CCOp {
@@ -463,8 +531,8 @@ class ARMOperand : public MCParsedAsmOperand {
struct BitfieldOp Bitfield;
};
- ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
public:
+ ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
Kind = o.Kind;
StartLoc = o.StartLoc;
@@ -542,13 +610,19 @@ public:
}
/// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const { return StartLoc; }
+ SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const { return EndLoc; }
+ SMLoc getEndLoc() const override { return EndLoc; }
/// getLocRange - Get the range between the first and last token of this
/// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
+ /// getAlignmentLoc - Get the location of the Alignment token of this operand.
+ SMLoc getAlignmentLoc() const {
+ assert(Kind == k_Memory && "Invalid access!");
+ return AlignmentLoc;
+ }
+
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CC.Val;
@@ -564,7 +638,7 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const {
+ unsigned getReg() const override {
assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
return Reg.RegNum;
}
@@ -612,7 +686,7 @@ public:
bool isCCOut() const { return Kind == k_CCOut; }
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
- bool isImm() const { return Kind == k_Immediate; }
+ bool isImm() const override { return Kind == k_Immediate; }
// checks whether this operand is an unsigned offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
@@ -988,14 +1062,14 @@ public:
int64_t Value = CE->getValue();
return Value == 1 || Value == 0;
}
- bool isReg() const { return Kind == k_Register; }
+ bool isReg() const override { return Kind == k_Register; }
bool isRegList() const { return Kind == k_RegisterList; }
bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
- bool isToken() const { return Kind == k_Token; }
+ bool isToken() const override { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
- bool isMem() const { return Kind == k_Memory; }
+ bool isMem() const override { return Kind == k_Memory; }
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
@@ -1005,12 +1079,12 @@ public:
bool isPostIdxReg() const {
return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
- bool isMemNoOffset(bool alignOK = false) const {
+ bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
if (!isMem())
return false;
// No offset of any kind.
- return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
- (alignOK || Memory.Alignment == 0);
+ return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
+ (alignOK || Memory.Alignment == Alignment);
}
bool isMemPCRelImm12() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
@@ -1026,6 +1100,65 @@ public:
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
+ bool isAlignedMemoryNone() const {
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemoryNone() const {
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory16() const {
+ if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory16() const {
+ if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory32() const {
+ if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory32() const {
+ if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory64() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory64() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory64or128() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory64or128() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory64or128or256() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
+ return true;
+ if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
bool isAddrMode2() const {
if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
@@ -1282,6 +1415,7 @@ public:
}
bool isVecListDPairSpaced() const {
+ if (Kind != k_VectorList) return false;
if (isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
.contains(VectorList.RegNum));
@@ -1460,7 +1594,10 @@ public:
}
bool isNEONi16splat() const {
- if (!isImm()) return false;
+ if (isNEONByteReplicate(2))
+ return false; // Leave that for bytes replication and forbid by default.
+ if (!isImm())
+ return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
@@ -1470,7 +1607,10 @@ public:
}
bool isNEONi32splat() const {
- if (!isImm()) return false;
+ if (isNEONByteReplicate(4))
+ return false; // Leave that for bytes replication and forbid by default.
+ if (!isImm())
+ return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
@@ -1482,11 +1622,36 @@ public:
(Value >= 0x01000000 && Value <= 0xff000000);
}
+ bool isNEONByteReplicate(unsigned NumBytes) const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE)
+ return false;
+ int64_t Value = CE->getValue();
+ if (!Value)
+ return false; // Don't bother with zero.
+
+ unsigned char B = Value & 0xff;
+ for (unsigned i = 1; i < NumBytes; ++i) {
+ Value >>= 8;
+ if ((Value & 0xff) != B)
+ return false;
+ }
+ return true;
+ }
+ bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
+ bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
bool isNEONi32vmov() const {
- if (!isImm()) return false;
+ if (isNEONByteReplicate(4))
+ return false; // Let it to be classified as byte-replicate case.
+ if (!isImm())
+ return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
- if (!CE) return false;
+ if (!CE)
+ return false;
int64_t Value = CE->getValue();
// i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
// for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
@@ -1527,7 +1692,7 @@ public:
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
- if (Expr == 0)
+ if (!Expr)
Inst.addOperand(MCOperand::CreateImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
@@ -1580,7 +1745,7 @@ public:
void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
assert(isRegShiftedReg() &&
- "addRegShiftedRegOperands() on non RegShiftedReg!");
+ "addRegShiftedRegOperands() on non-RegShiftedReg!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
Inst.addOperand(MCOperand::CreateImm(
@@ -1590,7 +1755,7 @@ public:
void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
assert(isRegShiftedImm() &&
- "addRegShiftedImmOperands() on non RegShiftedImm!");
+ "addRegShiftedImmOperands() on non-RegShiftedImm!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
// Shift of #32 is encoded as 0 where permitted
unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
@@ -1841,6 +2006,50 @@ public:
Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
}
+ void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
@@ -2190,6 +2399,19 @@ public:
Inst.addOperand(MCOperand::CreateImm(Value));
}
+ void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ unsigned Value = CE->getValue();
+ assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
+ Inst.getOpcode() == ARM::VMOVv16i8) &&
+ "All vmvn instructions that wants to replicate non-zero byte "
+ "always must be replaced with VMOVv8i8 or VMOVv16i8.");
+ unsigned B = ((~Value) & 0xff);
+ B |= 0xe00; // cmode = 0b1110
+ Inst.addOperand(MCOperand::CreateImm(B));
+ }
void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
@@ -2204,6 +2426,19 @@ public:
Inst.addOperand(MCOperand::CreateImm(Value));
}
+ void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ unsigned Value = CE->getValue();
+ assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
+ Inst.getOpcode() == ARM::VMOVv16i8) &&
+ "All instructions that wants to replicate non-zero byte "
+ "always must be replaced with VMOVv8i8 or VMOVv16i8.");
+ unsigned B = Value & 0xff;
+ B |= 0xe00; // cmode = 0b1110
+ Inst.addOperand(MCOperand::CreateImm(B));
+ }
void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
@@ -2230,58 +2465,60 @@ public:
Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
}
- virtual void print(raw_ostream &OS) const;
+ void print(raw_ostream &OS) const override;
- static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_ITCondMask);
+ static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_ITCondMask);
Op->ITMask.Mask = Mask;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_CondCode);
+ static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
+ SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_CondCode);
Op->CC.Val = CC;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_CoprocNum);
+ static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_CoprocNum);
Op->Cop.Val = CopVal;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_CoprocReg);
+ static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_CoprocReg);
Op->Cop.Val = CopVal;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_CoprocOption);
+ static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_CoprocOption);
Op->Cop.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_CCOut);
+ static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_CCOut);
Op->Reg.RegNum = RegNum;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_Token);
+ static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_Token);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
@@ -2289,20 +2526,20 @@ public:
return Op;
}
- static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_Register);
+ static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_Register);
Op->Reg.RegNum = RegNum;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
- unsigned SrcReg,
- unsigned ShiftReg,
- unsigned ShiftImm,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
+ static std::unique_ptr<ARMOperand>
+ CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
+ unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
Op->RegShiftedReg.ShiftTy = ShTy;
Op->RegShiftedReg.SrcReg = SrcReg;
Op->RegShiftedReg.ShiftReg = ShiftReg;
@@ -2312,11 +2549,10 @@ public:
return Op;
}
- static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
- unsigned SrcReg,
- unsigned ShiftImm,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
+ static std::unique_ptr<ARMOperand>
+ CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
+ unsigned ShiftImm, SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
Op->RegShiftedImm.ShiftTy = ShTy;
Op->RegShiftedImm.SrcReg = SrcReg;
Op->RegShiftedImm.ShiftImm = ShiftImm;
@@ -2325,9 +2561,9 @@ public:
return Op;
}
- static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
+ static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
+ SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
Op->ShifterImm.isASR = isASR;
Op->ShifterImm.Imm = Imm;
Op->StartLoc = S;
@@ -2335,17 +2571,18 @@ public:
return Op;
}
- static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_RotateImmediate);
+ static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_RotateImmediate);
Op->RotImm.Imm = Imm;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
+ static std::unique_ptr<ARMOperand>
+ CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
Op->Bitfield.LSB = LSB;
Op->Bitfield.Width = Width;
Op->StartLoc = S;
@@ -2353,8 +2590,8 @@ public:
return Op;
}
- static ARMOperand *
- CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs,
+ static std::unique_ptr<ARMOperand>
+ CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
SMLoc StartLoc, SMLoc EndLoc) {
assert (Regs.size() > 0 && "RegList contains no registers?");
KindTy Kind = k_RegisterList;
@@ -2368,7 +2605,7 @@ public:
// Sort based on the register encoding values.
array_pod_sort(Regs.begin(), Regs.end());
- ARMOperand *Op = new ARMOperand(Kind);
+ auto Op = make_unique<ARMOperand>(Kind);
for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
I = Regs.begin(), E = Regs.end(); I != E; ++I)
Op->Registers.push_back(I->second);
@@ -2377,9 +2614,11 @@ public:
return Op;
}
- static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
- bool isDoubleSpaced, SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_VectorList);
+ static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
+ unsigned Count,
+ bool isDoubleSpaced,
+ SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_VectorList);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
@@ -2388,10 +2627,10 @@ public:
return Op;
}
- static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
- bool isDoubleSpaced,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
+ static std::unique_ptr<ARMOperand>
+ CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
+ SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
@@ -2400,11 +2639,10 @@ public:
return Op;
}
- static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
- unsigned Index,
- bool isDoubleSpaced,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
+ static std::unique_ptr<ARMOperand>
+ CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
+ bool isDoubleSpaced, SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.LaneIndex = Index;
@@ -2414,32 +2652,30 @@ public:
return Op;
}
- static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
- MCContext &Ctx) {
- ARMOperand *Op = new ARMOperand(k_VectorIndex);
+ static std::unique_ptr<ARMOperand>
+ CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
+ auto Op = make_unique<ARMOperand>(k_VectorIndex);
Op->VectorIndex.Val = Idx;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_Immediate);
+ static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_Immediate);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static ARMOperand *CreateMem(unsigned BaseRegNum,
- const MCConstantExpr *OffsetImm,
- unsigned OffsetRegNum,
- ARM_AM::ShiftOpc ShiftType,
- unsigned ShiftImm,
- unsigned Alignment,
- bool isNegative,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_Memory);
+ static std::unique_ptr<ARMOperand>
+ CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
+ unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
+ unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
+ SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
+ auto Op = make_unique<ARMOperand>(k_Memory);
Op->Memory.BaseRegNum = BaseRegNum;
Op->Memory.OffsetImm = OffsetImm;
Op->Memory.OffsetRegNum = OffsetRegNum;
@@ -2449,14 +2685,14 @@ public:
Op->Memory.isNegative = isNegative;
Op->StartLoc = S;
Op->EndLoc = E;
+ Op->AlignmentLoc = AlignmentLoc;
return Op;
}
- static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
- ARM_AM::ShiftOpc ShiftTy,
- unsigned ShiftImm,
- SMLoc S, SMLoc E) {
- ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
+ static std::unique_ptr<ARMOperand>
+ CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
+ unsigned ShiftImm, SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
Op->PostIdxReg.RegNum = RegNum;
Op->PostIdxReg.isAdd = isAdd;
Op->PostIdxReg.ShiftTy = ShiftTy;
@@ -2466,33 +2702,35 @@ public:
return Op;
}
- static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
+ static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
+ SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
Op->MBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,
- SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt);
+ static std::unique_ptr<ARMOperand>
+ CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
Op->ISBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_ProcIFlags);
+ static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
+ SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_ProcIFlags);
Op->IFlags.Val = IFlags;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
- ARMOperand *Op = new ARMOperand(k_MSRMask);
+ static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
+ auto Op = make_unique<ARMOperand>(k_MSRMask);
Op->MMask.Val = MMask;
Op->StartLoc = S;
Op->EndLoc = S;
@@ -2696,11 +2934,11 @@ int ARMAsmParser::tryParseRegister() {
// occurs, return -1. An irrecoverable error is one where tokens have been
// consumed in the process of trying to parse the shifter (i.e., when it is
// indeed a shifter operand, but malformed).
-int ARMAsmParser::tryParseShiftRegister(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ if (Tok.isNot(AsmToken::Identifier))
+ return -1;
std::string lowerCase = Tok.getString().lower();
ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
@@ -2720,7 +2958,8 @@ int ARMAsmParser::tryParseShiftRegister(
// The source register for the shift has already been added to the
// operand list, so we need to pop it off and combine it into the shifted
// register operand instead.
- OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
+ std::unique_ptr<ARMOperand> PrevOp(
+ (ARMOperand *)Operands.pop_back_val().release());
if (!PrevOp->isReg())
return Error(PrevOp->getStartLoc(), "shift must be of a register");
int SrcReg = PrevOp->getReg();
@@ -2739,7 +2978,7 @@ int ARMAsmParser::tryParseShiftRegister(
Parser.getTok().is(AsmToken::Dollar)) {
Parser.Lex(); // Eat hash.
SMLoc ImmLoc = Parser.getTok().getLoc();
- const MCExpr *ShiftExpr = 0;
+ const MCExpr *ShiftExpr = nullptr;
if (getParser().parseExpression(ShiftExpr, EndLoc)) {
Error(ImmLoc, "invalid immediate shift value");
return -1;
@@ -2769,12 +3008,12 @@ int ARMAsmParser::tryParseShiftRegister(
EndLoc = Parser.getTok().getEndLoc();
ShiftReg = tryParseRegister();
if (ShiftReg == -1) {
- Error (L, "expected immediate or register in shift operand");
+ Error(L, "expected immediate or register in shift operand");
return -1;
}
} else {
- Error (Parser.getTok().getLoc(),
- "expected immediate or register in shift operand");
+ Error(Parser.getTok().getLoc(),
+ "expected immediate or register in shift operand");
return -1;
}
}
@@ -2797,8 +3036,7 @@ int ARMAsmParser::tryParseShiftRegister(
///
/// TODO this is likely to change to allow different register types and or to
/// parse for a specific register type.
-bool ARMAsmParser::
-tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
const AsmToken &RegTok = Parser.getTok();
int RegNo = tryParseRegister();
if (RegNo == -1)
@@ -2844,17 +3082,25 @@ tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
/// MatchCoprocessorOperandName - Try to parse an coprocessor related
-/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
-/// "c5", ...
+/// instruction with a symbolic operand name.
+/// We accept "crN" syntax for GAS compatibility.
+/// <operand-name> ::= <prefix><number>
+/// If CoprocOp is 'c', then:
+/// <prefix> ::= c | cr
+/// If CoprocOp is 'p', then :
+/// <prefix> ::= p
+/// <number> ::= integer in range [0, 15]
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
// Use the same layout as the tablegen'erated register name matcher. Ugly,
// but efficient.
+ if (Name.size() < 2 || Name[0] != CoprocOp)
+ return -1;
+ Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
+
switch (Name.size()) {
default: return -1;
- case 2:
- if (Name[0] != CoprocOp)
- return -1;
- switch (Name[1]) {
+ case 1:
+ switch (Name[0]) {
default: return -1;
case '0': return 0;
case '1': return 1;
@@ -2867,10 +3113,10 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
case '8': return 8;
case '9': return 9;
}
- case 3:
- if (Name[0] != CoprocOp || Name[1] != '1')
+ case 2:
+ if (Name[0] != '1')
return -1;
- switch (Name[2]) {
+ switch (Name[1]) {
default: return -1;
// p10 and p11 are invalid for coproc instructions (reserved for FP/NEON)
case '0': return CoprocOp == 'p'? -1: 10;
@@ -2884,8 +3130,8 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
}
/// parseITCondCode - Try to parse a condition code for an IT instruction.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseITCondCode(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
@@ -2921,8 +3167,8 @@ parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
/// token must be an Identifier when called, and if it is a coprocessor
/// number, the token is eaten and the operand is added to the operand list.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
@@ -2940,8 +3186,8 @@ parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
/// token must be an Identifier when called, and if it is a coprocessor
/// number, the token is eaten and the operand is added to the operand list.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
@@ -2958,8 +3204,8 @@ parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
/// coproc_option : '{' imm0_255 '}'
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
// If this isn't a '{', this isn't a coprocessor immediate operand.
@@ -3036,8 +3282,7 @@ static unsigned getDRegFromQReg(unsigned QReg) {
}
/// Parse a register list.
-bool ARMAsmParser::
-parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
assert(Parser.getTok().is(AsmToken::LCurly) &&
"Token is not a Left Curly Brace");
SMLoc S = Parser.getTok().getLoc();
@@ -3218,8 +3463,8 @@ parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
}
// parse a vector register list
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseVectorList(OperandVector &Operands) {
VectorLaneTy LaneKind;
unsigned LaneIndex;
SMLoc S = Parser.getTok().getLoc();
@@ -3469,8 +3714,8 @@ parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
unsigned Opt;
@@ -3518,7 +3763,7 @@ parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Error(Loc, "illegal expression");
return MatchOperand_ParseFail;
}
-
+
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
if (!CE) {
Error(Loc, "constant expression expected");
@@ -3540,8 +3785,8 @@ parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
unsigned Opt;
@@ -3591,8 +3836,8 @@ parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
@@ -3625,8 +3870,8 @@ parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
@@ -3753,9 +3998,9 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
- int Low, int High) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
+ int High) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) {
Error(Parser.getTok().getLoc(), Op + " operand expected.");
@@ -3801,8 +4046,8 @@ parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
return MatchOperand_Success;
}
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
if (Tok.isNot(AsmToken::Identifier)) {
@@ -3830,8 +4075,8 @@ parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// lsl #n 'n' in [0,31]
/// asr #n 'n' in [1,32]
/// n == 32 encoded as n == 0.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseShifterImm(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
if (Tok.isNot(AsmToken::Identifier)) {
@@ -3900,8 +4145,8 @@ parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
/// of instructions. Legal values are:
/// ror #n 'n' in {0, 8, 16, 24}
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseRotImm(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
SMLoc S = Tok.getLoc();
if (Tok.isNot(AsmToken::Identifier))
@@ -3946,8 +4191,8 @@ parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseBitfield(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
// The bitfield descriptor is really two operands, the LSB and the width.
if (Parser.getTok().isNot(AsmToken::Hash) &&
@@ -4014,8 +4259,8 @@ parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
// Check for a post-index addressing register operand. Specifically:
// postidx_reg := '+' register {, shift}
// | '-' register {, shift}
@@ -4063,8 +4308,8 @@ parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
// Check for a post-index addressing register operand. Specifically:
// am3offset := '+' register
// | '-' register
@@ -4117,7 +4362,7 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
isAdd = false;
haveEaten = true;
}
-
+
Tok = Parser.getTok();
int Reg = tryParseRegister();
if (Reg == -1) {
@@ -4136,26 +4381,24 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// Convert parsed operands to MCInst. Needed here because this instruction
/// only has two register operands, but multiplication is commutative so
/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
-void ARMAsmParser::
-cvtThumbMultiply(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
- ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
+void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
+ const OperandVector &Operands) {
+ ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
+ ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
// If we have a three-operand form, make sure to set Rn to be the operand
// that isn't the same as Rd.
unsigned RegOp = 4;
if (Operands.size() == 6 &&
- ((ARMOperand*)Operands[4])->getReg() ==
- ((ARMOperand*)Operands[3])->getReg())
+ ((ARMOperand &)*Operands[4]).getReg() ==
+ ((ARMOperand &)*Operands[3]).getReg())
RegOp = 5;
- ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
+ ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
Inst.addOperand(Inst.getOperand(0));
- ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
+ ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
}
-void ARMAsmParser::
-cvtThumbBranches(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
+ const OperandVector &Operands) {
int CondOp = -1, ImmOp = -1;
switch(Inst.getOpcode()) {
case ARM::tB:
@@ -4178,7 +4421,7 @@ cvtThumbBranches(MCInst &Inst,
} else {
// outside IT blocks we can only have unconditional branches with AL
// condition code or conditional branches with non-AL condition code
- unsigned Cond = static_cast<ARMOperand*>(Operands[CondOp])->getCondCode();
+ unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
switch(Inst.getOpcode()) {
case ARM::tB:
case ARM::tBcc:
@@ -4190,32 +4433,31 @@ cvtThumbBranches(MCInst &Inst,
break;
}
}
-
+
// now decide on encoding size based on branch target range
switch(Inst.getOpcode()) {
// classify tB as either t2B or t1B based on range of immediate operand
case ARM::tB: {
- ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
- if(!op->isSignedOffset<11, 1>() && isThumbTwo())
+ ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
+ if (!op.isSignedOffset<11, 1>() && isThumbTwo())
Inst.setOpcode(ARM::t2B);
break;
}
// classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
case ARM::tBcc: {
- ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
- if(!op->isSignedOffset<8, 1>() && isThumbTwo())
+ ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
+ if (!op.isSignedOffset<8, 1>() && isThumbTwo())
Inst.setOpcode(ARM::t2Bcc);
break;
}
}
- ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1);
- ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2);
+ ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
+ ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
}
/// Parse an ARM memory expression, return false if successful else return true
/// or an error. The first token must be a '[' when called.
-bool ARMAsmParser::
-parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool ARMAsmParser::parseMemory(OperandVector &Operands) {
SMLoc S, E;
assert(Parser.getTok().is(AsmToken::LBrac) &&
"Token is not a Left Bracket");
@@ -4237,8 +4479,9 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
E = Tok.getEndLoc();
Parser.Lex(); // Eat right bracket token.
- Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
- 0, 0, false, S, E));
+ Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
+ ARM_AM::no_shift, 0, 0, false,
+ S, E));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand. It's rather odd, but syntactically valid.
@@ -4260,6 +4503,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (Parser.getTok().is(AsmToken::Colon)) {
Parser.Lex(); // Eat the ':'.
E = Parser.getTok().getLoc();
+ SMLoc AlignmentLoc = Tok.getLoc();
const MCExpr *Expr;
if (getParser().parseExpression(Expr))
@@ -4292,9 +4536,9 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Don't worry about range checking the value here. That's handled by
// the is*() predicates.
- Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
+ Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
ARM_AM::no_shift, 0, Align,
- false, S, E));
+ false, S, E, AlignmentLoc));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
@@ -4385,7 +4629,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat right bracket token.
- Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
+ Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
ShiftType, ShiftImm, 0, isNegative,
S, E));
@@ -4463,8 +4707,8 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
}
/// parseFPImm - A floating point immediate expression operand.
-ARMAsmParser::OperandMatchResultTy ARMAsmParser::
-parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ARMAsmParser::OperandMatchResultTy
+ARMAsmParser::parseFPImm(OperandVector &Operands) {
// Anything that can accept a floating point constant as an operand
// needs to go through here, as the regular parseExpression is
// integer only.
@@ -4490,9 +4734,13 @@ parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// integer constant. Make sure we don't try to parse an FPImm
// for these:
// vmov.i{8|16|32|64} <dreg|qreg>, #imm
- ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
- if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
- TyOp->getToken() != ".f64"))
+ ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
+ bool isVmovf = TyOp.isToken() &&
+ (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64");
+ ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
+ bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
+ Mnemonic.getToken() == "fconsts");
+ if (!(isVmovf || isFconst))
return MatchOperand_NoMatch;
Parser.Lex(); // Eat '#' or '$'.
@@ -4505,7 +4753,7 @@ parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
const AsmToken &Tok = Parser.getTok();
SMLoc Loc = Tok.getLoc();
- if (Tok.is(AsmToken::Real)) {
+ if (Tok.is(AsmToken::Real) && isVmovf) {
APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
// If we had a '-' in front, toggle the sign bit.
@@ -4518,15 +4766,16 @@ parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
// Also handle plain integers. Instructions which allow floating point
// immediates also allow a raw encoded 8-bit value.
- if (Tok.is(AsmToken::Integer)) {
+ if (Tok.is(AsmToken::Integer) && isFconst) {
int64_t Val = Tok.getIntVal();
Parser.Lex(); // Eat the token.
if (Val > 255 || Val < 0) {
Error(Loc, "encoded floating point value out of range");
return MatchOperand_ParseFail;
}
- double RealVal = ARM_AM::getFPImmFloat(Val);
- Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
+ float RealVal = ARM_AM::getFPImmFloat(Val);
+ Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
+
Operands.push_back(ARMOperand::CreateImm(
MCConstantExpr::Create(Val, getContext()), S,
Parser.getTok().getLoc()));
@@ -4539,8 +4788,7 @@ parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
/// Parse a arm instruction operand. For now this parses the operand regardless
/// of the mnemonic.
-bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic) {
+bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
SMLoc S, E;
// Check if the current operand has a custom associated parser, if so, try to
@@ -4623,7 +4871,7 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
// There can be a trailing '!' on operands that we want as a separate
- // '!' Token operand. Handle that here. For example, the compatibilty
+ // '!' Token operand. Handle that here. For example, the compatibility
// alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
if (Parser.getTok().is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
@@ -4653,6 +4901,20 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
return false;
}
+ case AsmToken::Equal: {
+ if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
+ return Error(Parser.getTok().getLoc(), "unexpected token in operand");
+
+ Parser.Lex(); // Eat '='
+ const MCExpr *SubExprVal;
+ if (getParser().parseExpression(SubExprVal))
+ return true;
+ E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
+ Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E));
+ return false;
+ }
}
}
@@ -4661,6 +4923,10 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
RefKind = ARMMCExpr::VK_ARM_None;
+ // consume an optional '#' (GNU compatibility)
+ if (getLexer().is(AsmToken::Hash))
+ Parser.Lex();
+
// :lower16: and :upper16: modifiers
assert(getLexer().is(AsmToken::Colon) && "expected a :");
Parser.Lex(); // Eat ':'
@@ -4763,7 +5029,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
- Mnemonic == "vfms" || Mnemonic == "vfnms" ||
+ Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
(Mnemonic == "movs" && isThumb()))) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
CarrySetting = true;
@@ -4817,8 +5083,9 @@ getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
- Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") ||
- Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") ||
+ Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
+ Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
+ Mnemonic.startswith("vsel") ||
Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
@@ -4847,7 +5114,7 @@ getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
}
bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandVector &Operands) {
// FIXME: This is all horribly hacky. We really need a better way to deal
// with optional operands like this in the matcher table.
@@ -4860,17 +5127,17 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// conditionally adding the cc_out in the first place because we need
// to check the type of the parsed immediate operand.
if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
- !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
- static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
+ !static_cast<ARMOperand &>(*Operands[4]).isARMSOImm() &&
+ static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
+ static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
return true;
// Register-register 'add' for thumb does not have a cc_out operand
// when there are only two register operands.
if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->isReg() &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
+ static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
return true;
// Register-register 'add' for thumb does not have a cc_out operand
// when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
@@ -4878,13 +5145,12 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// that can handle a different range and has a cc_out operand.
if (((isThumb() && Mnemonic == "add") ||
(isThumbTwo() && Mnemonic == "sub")) &&
- Operands.size() == 6 &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
- ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
- static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
+ Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
+ static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
+ ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
+ static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
return true;
// For Thumb2, add/sub immediate does not have a cc_out operand for the
// imm0_4095 variant. That's the least-preferred variant when
@@ -4892,23 +5158,22 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// should remove the cc_out operand, we have to explicitly check that
// it's not one of the other variants. Ugh.
if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
- Operands.size() == 6 &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->isReg() &&
- static_cast<ARMOperand*>(Operands[5])->isImm()) {
+ Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[5]).isImm()) {
// Nest conditions rather than one big 'if' statement for readability.
//
// If both registers are low, we're in an IT block, and the immediate is
// in range, we should use encoding T1 instead, which has a cc_out.
if (inITBlock() &&
- isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
- isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
- static_cast<ARMOperand*>(Operands[5])->isImm0_7())
+ isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
+ isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
+ static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
return false;
// Check against T3. If the second register is the PC, this is an
// alternate form of ADR, which uses encoding T4, so check for that too.
- if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
- static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
+ if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
+ static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
return false;
// Otherwise, we use encoding T4, which does not have a cc_out
@@ -4920,35 +5185,34 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
// use the 16-bit encoding or not.
if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->isReg() &&
- static_cast<ARMOperand*>(Operands[5])->isReg() &&
+ static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
+ static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[5]).isReg() &&
// If the registers aren't low regs, the destination reg isn't the
// same as one of the source regs, or the cc_out operand is zero
// outside of an IT block, we have to use the 32-bit encoding, so
// remove the cc_out operand.
- (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
- !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
- !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
- !inITBlock() ||
- (static_cast<ARMOperand*>(Operands[3])->getReg() !=
- static_cast<ARMOperand*>(Operands[5])->getReg() &&
- static_cast<ARMOperand*>(Operands[3])->getReg() !=
- static_cast<ARMOperand*>(Operands[4])->getReg())))
+ (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
+ !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
+ !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
+ !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
+ static_cast<ARMOperand &>(*Operands[5]).getReg() &&
+ static_cast<ARMOperand &>(*Operands[3]).getReg() !=
+ static_cast<ARMOperand &>(*Operands[4]).getReg())))
return true;
// Also check the 'mul' syntax variant that doesn't specify an explicit
// destination register.
if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->isReg() &&
+ static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
+ static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).isReg() &&
// If the registers aren't low regs or the cc_out operand is zero
// outside of an IT block, we have to use the 32-bit encoding, so
// remove the cc_out operand.
- (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
- !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
+ (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
+ !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
!inITBlock()))
return true;
@@ -4961,32 +5225,32 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// anyway.
if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
(Operands.size() == 5 || Operands.size() == 6) &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
- (static_cast<ARMOperand*>(Operands[4])->isImm() ||
+ static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
+ static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
+ (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
(Operands.size() == 6 &&
- static_cast<ARMOperand*>(Operands[5])->isImm())))
+ static_cast<ARMOperand &>(*Operands[5]).isImm())))
return true;
return false;
}
-bool ARMAsmParser::shouldOmitPredicateOperand(
- StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
+ OperandVector &Operands) {
// VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
unsigned RegIdx = 3;
if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
- static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") {
- if (static_cast<ARMOperand *>(Operands[3])->isToken() &&
- static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32")
+ static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32") {
+ if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32")
RegIdx = 4;
- if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() &&
- (ARMMCRegisterClasses[ARM::DPRRegClassID]
- .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) ||
- ARMMCRegisterClasses[ARM::QPRRegClassID]
- .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg())))
+ if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
+ (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
+ static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
+ ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
+ static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
return true;
}
return false;
@@ -5009,12 +5273,39 @@ static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
}
static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
unsigned VariantID);
+
+static bool RequiresVFPRegListValidation(StringRef Inst,
+ bool &AcceptSinglePrecisionOnly,
+ bool &AcceptDoublePrecisionOnly) {
+ if (Inst.size() < 7)
+ return false;
+
+ if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
+ StringRef AddressingMode = Inst.substr(4, 2);
+ if (AddressingMode == "ia" || AddressingMode == "db" ||
+ AddressingMode == "ea" || AddressingMode == "fd") {
+ AcceptSinglePrecisionOnly = Inst[6] == 's';
+ AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
+ return true;
+ }
+ }
+
+ return false;
+}
+
/// Parse an arm instruction mnemonic followed by its operands.
bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc NameLoc, OperandVector &Operands) {
+ // FIXME: Can this be done via tablegen in some fashion?
+ bool RequireVFPRegisterListCheck;
+ bool AcceptSinglePrecisionOnly;
+ bool AcceptDoublePrecisionOnly;
+ RequireVFPRegisterListCheck =
+ RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
+ AcceptDoublePrecisionOnly);
+
// Apply mnemonic aliases before doing anything else, as the destination
- // mnemnonic may include suffices and we want to handle them normally.
+ // mnemonic may include suffices and we want to handle them normally.
// The generic tblgen'erated code does this later, at the start of
// MatchInstructionImpl(), but that's too late for aliases that include
// any sort of suffix.
@@ -5141,6 +5432,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// For for ARM mode generate an error if the .n qualifier is used.
if (ExtraToken == ".n" && !isThumb()) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
+ Parser.eatToEndOfStatement();
return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
"arm mode");
}
@@ -5181,6 +5473,16 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Parser.Lex(); // Consume the EndOfStatement
+ if (RequireVFPRegisterListCheck) {
+ ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
+ if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
+ return Error(Op.getStartLoc(),
+ "VFP/Neon single precision register expected");
+ if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
+ return Error(Op.getStartLoc(),
+ "VFP/Neon double precision register expected");
+ }
+
// Some instructions, mostly Thumb, have forms for the same mnemonic that
// do and don't have a cc_out optional-def operand. With some spot-checks
// of the operand list, we can figure out which variant we're trying to
@@ -5188,20 +5490,14 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// try to remove a cc_out operand that was explicitly set on the the
// mnemonic, of course (CarrySetting == true). Reason number #317 the
// table driven matcher doesn't fit well with the ARM instruction set.
- if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
- ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
+ if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
Operands.erase(Operands.begin() + 1);
- delete Op;
- }
// Some instructions have the same mnemonic, but don't always
// have a predicate. Distinguish them here and delete the
// predicate if needed.
- if (shouldOmitPredicateOperand(Mnemonic, Operands)) {
- ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
+ if (shouldOmitPredicateOperand(Mnemonic, Operands))
Operands.erase(Operands.begin() + 1);
- delete Op;
- }
// ARM mode 'blx' need special handling, as the register operand version
// is predicable, but the label operand version is not. So, we can't rely
@@ -5209,11 +5505,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// a k_CondCode operand in the list. If we're trying to match the label
// version, remove the k_CondCode operand here.
if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
- static_cast<ARMOperand*>(Operands[2])->isImm()) {
- ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
+ static_cast<ARMOperand &>(*Operands[2]).isImm())
Operands.erase(Operands.begin() + 1);
- delete Op;
- }
// Adjust operands of ldrexd/strexd to MCK_GPRPair.
// ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
@@ -5226,32 +5519,50 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Mnemonic == "stlexd")) {
bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
unsigned Idx = isLoad ? 2 : 3;
- ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
- ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
+ ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
+ ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
// Adjust only if Op1 and Op2 are GPRs.
- if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
- MRC.contains(Op2->getReg())) {
- unsigned Reg1 = Op1->getReg();
- unsigned Reg2 = Op2->getReg();
+ if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
+ MRC.contains(Op2.getReg())) {
+ unsigned Reg1 = Op1.getReg();
+ unsigned Reg2 = Op2.getReg();
unsigned Rt = MRI->getEncodingValue(Reg1);
unsigned Rt2 = MRI->getEncodingValue(Reg2);
// Rt2 must be Rt + 1 and Rt must be even.
if (Rt + 1 != Rt2 || (Rt & 1)) {
- Error(Op2->getStartLoc(), isLoad ?
- "destination operands must be sequential" :
- "source operands must be sequential");
+ Error(Op2.getStartLoc(), isLoad
+ ? "destination operands must be sequential"
+ : "source operands must be sequential");
return true;
}
unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
&(MRI->getRegClass(ARM::GPRPairRegClassID)));
- Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
- Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
- NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
- delete Op1;
- delete Op2;
+ Operands[Idx] =
+ ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
+ Operands.erase(Operands.begin() + Idx + 1);
+ }
+ }
+
+ // GNU Assembler extension (compatibility)
+ if ((Mnemonic == "ldrd" || Mnemonic == "strd")) {
+ ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
+ ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
+ if (Op3.isMem()) {
+ assert(Op2.isReg() && "expected register argument");
+
+ unsigned SuperReg = MRI->getMatchingSuperReg(
+ Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID));
+
+ assert(SuperReg && "expected register pair");
+
+ unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1);
+
+ Operands.insert(
+ Operands.begin() + 3,
+ ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
}
}
@@ -5261,19 +5572,13 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// so the Mnemonic is the original name "subs" and delete the predicate
// operand so it will match the table entry.
if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
- static_cast<ARMOperand*>(Operands[3])->isReg() &&
- static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC &&
- static_cast<ARMOperand*>(Operands[4])->isReg() &&
- static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR &&
- static_cast<ARMOperand*>(Operands[5])->isImm()) {
- ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]);
- Operands.erase(Operands.begin());
- delete Op0;
- Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc));
-
- ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]);
+ static_cast<ARMOperand &>(*Operands[3]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
+ static_cast<ARMOperand &>(*Operands[4]).isReg() &&
+ static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
+ static_cast<ARMOperand &>(*Operands[5]).isImm()) {
+ Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
Operands.erase(Operands.begin() + 1);
- delete Op1;
}
return false;
}
@@ -5319,9 +5624,8 @@ static bool instIsBreakpoint(const MCInst &Inst) {
}
// FIXME: We would really like to be able to tablegen'erate this.
-bool ARMAsmParser::
-validateInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool ARMAsmParser::validateInstruction(MCInst &Inst,
+ const OperandVector &Operands) {
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
SMLoc Loc = Operands[0]->getStartLoc();
@@ -5344,7 +5648,7 @@ validateInstruction(MCInst &Inst,
// Find the condition code Operand to get its SMLoc information.
SMLoc CondLoc;
for (unsigned I = 1; I < Operands.size(); ++I)
- if (static_cast<ARMOperand*>(Operands[I])->isCondCode())
+ if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
CondLoc = Operands[I]->getStartLoc();
return Error(CondLoc, "incorrect condition in IT block; got '" +
StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
@@ -5444,8 +5748,8 @@ validateInstruction(MCInst &Inst,
// in the register list.
unsigned Rn = Inst.getOperand(0).getReg();
bool HasWritebackToken =
- (static_cast<ARMOperand*>(Operands[3])->isToken() &&
- static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
+ (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
bool ListContainsBase;
if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
@@ -5496,7 +5800,6 @@ validateInstruction(MCInst &Inst,
case ARM::sysSTMIB_UPD:
return Error(Operands[2]->getStartLoc(),
"system STM cannot have writeback register");
- break;
case ARM::tMUL: {
// The second source operand must be the same register as the destination
// operand.
@@ -5506,11 +5809,10 @@ validateInstruction(MCInst &Inst,
// this first statement is always true for the new Inst. Essentially, the
// destination is unconditionally copied into the second source operand
// without checking to see if it matches what we actually parsed.
- if (Operands.size() == 6 &&
- (((ARMOperand*)Operands[3])->getReg() !=
- ((ARMOperand*)Operands[5])->getReg()) &&
- (((ARMOperand*)Operands[3])->getReg() !=
- ((ARMOperand*)Operands[4])->getReg())) {
+ if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
+ ((ARMOperand &)*Operands[5]).getReg()) &&
+ (((ARMOperand &)*Operands[3]).getReg() !=
+ ((ARMOperand &)*Operands[4]).getReg())) {
return Error(Operands[3]->getStartLoc(),
"destination register must match source register");
}
@@ -5563,26 +5865,50 @@ validateInstruction(MCInst &Inst,
}
// Final range checking for Thumb unconditional branch instructions.
case ARM::tB:
- if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<11, 1>())
+ if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
return Error(Operands[2]->getStartLoc(), "branch target out of range");
break;
case ARM::t2B: {
int op = (Operands[2]->isImm()) ? 2 : 3;
- if (!(static_cast<ARMOperand*>(Operands[op]))->isSignedOffset<24, 1>())
+ if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
return Error(Operands[op]->getStartLoc(), "branch target out of range");
break;
}
// Final range checking for Thumb conditional branch instructions.
case ARM::tBcc:
- if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<8, 1>())
+ if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
return Error(Operands[2]->getStartLoc(), "branch target out of range");
break;
case ARM::t2Bcc: {
int Op = (Operands[2]->isImm()) ? 2 : 3;
- if (!(static_cast<ARMOperand*>(Operands[Op]))->isSignedOffset<20, 1>())
+ if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
return Error(Operands[Op]->getStartLoc(), "branch target out of range");
break;
}
+ case ARM::MOVi16:
+ case ARM::t2MOVi16:
+ case ARM::t2MOVTi16:
+ {
+ // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
+ // especially when we turn it into a movw and the expression <symbol> does
+ // not have a :lower16: or :upper16 as part of the expression. We don't
+ // want the behavior of silently truncating, which can be unexpected and
+ // lead to bugs that are difficult to find since this is an easy mistake
+ // to make.
+ int i = (Operands[3]->isImm()) ? 3 : 4;
+ ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
+ if (CE) break;
+ const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
+ if (!E) break;
+ const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
+ if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
+ ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
+ return Error(
+ Op.getStartLoc(),
+ "immediate expression for mov requires :lower16: or :upper16");
+ break;
+ }
}
return false;
@@ -5733,7 +6059,7 @@ static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
- case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
+ case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
@@ -5789,7 +6115,7 @@ static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
- case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
+ case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
@@ -5844,10 +6170,45 @@ static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
}
}
-bool ARMAsmParser::
-processInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool ARMAsmParser::processInstruction(MCInst &Inst,
+ const OperandVector &Operands) {
switch (Inst.getOpcode()) {
+ // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
+ case ARM::LDRT_POST:
+ case ARM::LDRBT_POST: {
+ const unsigned Opcode =
+ (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
+ : ARM::LDRBT_POST_IMM;
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ Inst = TmpInst;
+ return true;
+ }
+ // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
+ case ARM::STRT_POST:
+ case ARM::STRBT_POST: {
+ const unsigned Opcode =
+ (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
+ : ARM::STRBT_POST_IMM;
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ Inst = TmpInst;
+ return true;
+ }
// Alias for alternate form of 'ADR Rd, #imm' instruction.
case ARM::ADDri: {
if (Inst.getOperand(1).getReg() != ARM::PC ||
@@ -5867,8 +6228,8 @@ processInstruction(MCInst &Inst,
// Select the narrow version if the immediate will fit.
if (Inst.getOperand(1).getImm() > 0 &&
Inst.getOperand(1).getImm() <= 0xff &&
- !(static_cast<ARMOperand*>(Operands[2])->isToken() &&
- static_cast<ARMOperand*>(Operands[2])->getToken() == ".w"))
+ !(static_cast<ARMOperand &>(*Operands[2]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w"))
Inst.setOpcode(ARM::tLDRpci);
else
Inst.setOpcode(ARM::t2LDRpci);
@@ -6958,8 +7319,8 @@ processInstruction(MCInst &Inst,
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
- !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
- static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
+ !(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode");
@@ -7162,7 +7523,7 @@ processInstruction(MCInst &Inst,
case ARM::LDMIA_UPD:
// If this is a load of a single register via a 'pop', then we should use
// a post-indexed LDR instruction instead, per the ARM ARM.
- if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
+ if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
Inst.getNumOperands() == 5) {
MCInst TmpInst;
TmpInst.setOpcode(ARM::LDR_POST_IMM);
@@ -7180,7 +7541,7 @@ processInstruction(MCInst &Inst,
case ARM::STMDB_UPD:
// If this is a store of a single register via a 'push', then we should use
// a pre-indexed STR instruction instead, per the ARM ARM.
- if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
+ if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
Inst.getNumOperands() == 5) {
MCInst TmpInst;
TmpInst.setOpcode(ARM::STR_PRE_IMM);
@@ -7196,7 +7557,7 @@ processInstruction(MCInst &Inst,
case ARM::t2ADDri12:
// If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
// mnemonic was used (not "addw"), encoding T3 is preferred.
- if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
+ if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
break;
Inst.setOpcode(ARM::t2ADDri);
@@ -7205,7 +7566,7 @@ processInstruction(MCInst &Inst,
case ARM::t2SUBri12:
// If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
// mnemonic was used (not "subw"), encoding T3 is preferred.
- if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
+ if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
break;
Inst.setOpcode(ARM::t2SUBri);
@@ -7241,9 +7602,9 @@ processInstruction(MCInst &Inst,
!isARMLowRegister(Inst.getOperand(0).getReg()) ||
(unsigned)Inst.getOperand(2).getImm() > 255 ||
((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
- (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
- (static_cast<ARMOperand*>(Operands[3])->isToken() &&
- static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
+ (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
+ (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
break;
MCInst TmpInst;
TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
@@ -7264,8 +7625,8 @@ processInstruction(MCInst &Inst,
// 'as' behaviour. Make sure the wide encoding wasn't explicit.
if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
Inst.getOperand(5).getReg() != 0 ||
- (static_cast<ARMOperand*>(Operands[3])->isToken() &&
- static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
+ (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
break;
MCInst TmpInst;
TmpInst.setOpcode(ARM::tADDhirr);
@@ -7322,8 +7683,8 @@ processInstruction(MCInst &Inst,
// an error in validateInstruction().
unsigned Rn = Inst.getOperand(0).getReg();
bool hasWritebackToken =
- (static_cast<ARMOperand*>(Operands[3])->isToken() &&
- static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
+ (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
+ static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
bool listContainsBase;
if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
(!listContainsBase && !hasWritebackToken) ||
@@ -7385,10 +7746,10 @@ processInstruction(MCInst &Inst,
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
(unsigned)Inst.getOperand(1).getImm() <= 255 &&
((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
- Inst.getOperand(4).getReg() == ARM::CPSR) ||
- (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
- (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
- static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
+ Inst.getOperand(4).getReg() == ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
+ (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
+ static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
// The operands aren't in the same order for tMOVi8...
MCInst TmpInst;
TmpInst.setOpcode(ARM::tMOVi8);
@@ -7409,8 +7770,8 @@ processInstruction(MCInst &Inst,
isARMLowRegister(Inst.getOperand(1).getReg()) &&
Inst.getOperand(2).getImm() == ARMCC::AL &&
Inst.getOperand(4).getReg() == ARM::CPSR &&
- (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
- static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
+ (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
+ static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
// The operands aren't the same for tMOV[S]r... (no cc_out)
MCInst TmpInst;
TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
@@ -7432,8 +7793,8 @@ processInstruction(MCInst &Inst,
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
Inst.getOperand(2).getImm() == 0 &&
- (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
- static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
+ (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
+ static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("Illegal opcode!");
@@ -7545,9 +7906,10 @@ processInstruction(MCInst &Inst,
isARMLowRegister(Inst.getOperand(2).getReg())) &&
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
- (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
- (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
- !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
+ (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
+ (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
+ !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
+ ".w"))) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode");
@@ -7584,9 +7946,10 @@ processInstruction(MCInst &Inst,
(Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
- (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
- (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
- !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
+ (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
+ (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
+ !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
+ ".w"))) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("unexpected opcode");
@@ -7659,12 +8022,17 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
return Match_Success;
}
+namespace llvm {
+template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) {
+ return true; // In an assembly source, no need to second-guess
+}
+}
+
static const char *getSubtargetFeatureName(unsigned Val);
-bool ARMAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
unsigned MatchResult;
@@ -7694,7 +8062,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// Only after the instruction is fully processed, we can validate it
if (wasInITBlock && hasV8Ops() && isThumb() &&
- !isV8EligibleForIT(&Inst, 2)) {
+ !isV8EligibleForIT(&Inst)) {
Warning(IDLoc, "deprecated instruction in IT block");
}
}
@@ -7710,7 +8078,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return false;
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ Out.EmitInstruction(Inst, STI);
return false;
case Match_MissingFeature: {
assert(ErrorInfo && "Unknown missing feature!");
@@ -7733,7 +8101,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
}
@@ -7741,7 +8109,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
}
case Match_MnemonicFail:
return Error(IDLoc, "invalid instruction",
- ((ARMOperand*)Operands[0])->getLocRange());
+ ((ARMOperand &)*Operands[0]).getLocRange());
case Match_RequiresNotITBlock:
return Error(IDLoc, "flag setting instruction only valid outside IT block");
case Match_RequiresITBlock:
@@ -7751,15 +8119,51 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_RequiresThumb2:
return Error(IDLoc, "instruction variant requires Thumb2");
case Match_ImmRange0_15: {
- SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
+ SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
}
case Match_ImmRange0_239: {
- SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
+ SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
}
+ case Match_AlignedMemoryRequiresNone:
+ case Match_DupAlignedMemoryRequiresNone:
+ case Match_AlignedMemoryRequires16:
+ case Match_DupAlignedMemoryRequires16:
+ case Match_AlignedMemoryRequires32:
+ case Match_DupAlignedMemoryRequires32:
+ case Match_AlignedMemoryRequires64:
+ case Match_DupAlignedMemoryRequires64:
+ case Match_AlignedMemoryRequires64or128:
+ case Match_DupAlignedMemoryRequires64or128:
+ case Match_AlignedMemoryRequires64or128or256:
+ {
+ SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ switch (MatchResult) {
+ default:
+ llvm_unreachable("Missing Match_Aligned type");
+ case Match_AlignedMemoryRequiresNone:
+ case Match_DupAlignedMemoryRequiresNone:
+ return Error(ErrorLoc, "alignment must be omitted");
+ case Match_AlignedMemoryRequires16:
+ case Match_DupAlignedMemoryRequires16:
+ return Error(ErrorLoc, "alignment must be 16 or omitted");
+ case Match_AlignedMemoryRequires32:
+ case Match_DupAlignedMemoryRequires32:
+ return Error(ErrorLoc, "alignment must be 32 or omitted");
+ case Match_AlignedMemoryRequires64:
+ case Match_DupAlignedMemoryRequires64:
+ return Error(ErrorLoc, "alignment must be 64 or omitted");
+ case Match_AlignedMemoryRequires64or128:
+ case Match_DupAlignedMemoryRequires64or128:
+ return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
+ case Match_AlignedMemoryRequires64or128or256:
+ return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
+ }
+ }
}
llvm_unreachable("Implement any new match types added!");
@@ -7767,9 +8171,15 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
/// parseDirective parses the arm specific directives
bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
+ const MCObjectFileInfo::Environment Format =
+ getContext().getObjectFileInfo()->getObjectFileType();
+ bool IsMachO = Format == MCObjectFileInfo::IsMachO;
+
StringRef IDVal = DirectiveID.getIdentifier();
if (IDVal == ".word")
- return parseDirectiveWord(4, DirectiveID.getLoc());
+ return parseLiteralValues(4, DirectiveID.getLoc());
+ else if (IDVal == ".short" || IDVal == ".hword")
+ return parseLiteralValues(2, DirectiveID.getLoc());
else if (IDVal == ".thumb")
return parseDirectiveThumb(DirectiveID.getLoc());
else if (IDVal == ".arm")
@@ -7782,16 +8192,6 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
return parseDirectiveSyntax(DirectiveID.getLoc());
else if (IDVal == ".unreq")
return parseDirectiveUnreq(DirectiveID.getLoc());
- else if (IDVal == ".arch")
- return parseDirectiveArch(DirectiveID.getLoc());
- else if (IDVal == ".eabi_attribute")
- return parseDirectiveEabiAttr(DirectiveID.getLoc());
- else if (IDVal == ".cpu")
- return parseDirectiveCPU(DirectiveID.getLoc());
- else if (IDVal == ".fpu")
- return parseDirectiveFPU(DirectiveID.getLoc());
- else if (IDVal == ".fnstart")
- return parseDirectiveFnStart(DirectiveID.getLoc());
else if (IDVal == ".fnend")
return parseDirectiveFnEnd(DirectiveID.getLoc());
else if (IDVal == ".cantunwind")
@@ -7808,17 +8208,61 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
return parseDirectiveRegSave(DirectiveID.getLoc(), false);
else if (IDVal == ".vsave")
return parseDirectiveRegSave(DirectiveID.getLoc(), true);
+ else if (IDVal == ".ltorg" || IDVal == ".pool")
+ return parseDirectiveLtorg(DirectiveID.getLoc());
+ else if (IDVal == ".even")
+ return parseDirectiveEven(DirectiveID.getLoc());
+ else if (IDVal == ".personalityindex")
+ return parseDirectivePersonalityIndex(DirectiveID.getLoc());
+ else if (IDVal == ".unwind_raw")
+ return parseDirectiveUnwindRaw(DirectiveID.getLoc());
+ else if (IDVal == ".movsp")
+ return parseDirectiveMovSP(DirectiveID.getLoc());
+ else if (IDVal == ".arch_extension")
+ return parseDirectiveArchExtension(DirectiveID.getLoc());
+ else if (IDVal == ".align")
+ return parseDirectiveAlign(DirectiveID.getLoc());
+ else if (IDVal == ".thumb_set")
+ return parseDirectiveThumbSet(DirectiveID.getLoc());
+
+ if (!IsMachO) {
+ if (IDVal == ".arch")
+ return parseDirectiveArch(DirectiveID.getLoc());
+ else if (IDVal == ".cpu")
+ return parseDirectiveCPU(DirectiveID.getLoc());
+ else if (IDVal == ".eabi_attribute")
+ return parseDirectiveEabiAttr(DirectiveID.getLoc());
+ else if (IDVal == ".fpu")
+ return parseDirectiveFPU(DirectiveID.getLoc());
+ else if (IDVal == ".fnstart")
+ return parseDirectiveFnStart(DirectiveID.getLoc());
+ else if (IDVal == ".inst")
+ return parseDirectiveInst(DirectiveID.getLoc());
+ else if (IDVal == ".inst.n")
+ return parseDirectiveInst(DirectiveID.getLoc(), 'n');
+ else if (IDVal == ".inst.w")
+ return parseDirectiveInst(DirectiveID.getLoc(), 'w');
+ else if (IDVal == ".object_arch")
+ return parseDirectiveObjectArch(DirectiveID.getLoc());
+ else if (IDVal == ".tlsdescseq")
+ return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
+ }
+
return true;
}
-/// parseDirectiveWord
-/// ::= .word [ expression (, expression)* ]
-bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
+/// parseLiteralValues
+/// ::= .hword expression [, expression]*
+/// ::= .short expression [, expression]*
+/// ::= .word expression [, expression]*
+bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
- if (getParser().parseExpression(Value))
- return true;
+ if (getParser().parseExpression(Value)) {
+ Parser.eatToEndOfStatement();
+ return false;
+ }
getParser().getStreamer().EmitValue(Value, Size);
@@ -7826,8 +8270,10 @@ bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
break;
// FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
}
}
@@ -7839,15 +8285,20 @@ bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
/// parseDirectiveThumb
/// ::= .thumb
bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
- if (!hasThumb())
- return Error(L, "target does not support Thumb mode");
+ if (!hasThumb()) {
+ Error(L, "target does not support Thumb mode");
+ return false;
+ }
if (!isThumb())
SwitchMode();
+
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
return false;
}
@@ -7855,15 +8306,20 @@ bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
/// parseDirectiveARM
/// ::= .arm
bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
- if (!hasARM())
- return Error(L, "target does not support ARM mode");
+ if (!hasARM()) {
+ Error(L, "target does not support ARM mode");
+ return false;
+ }
if (isThumb())
SwitchMode();
+
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
return false;
}
@@ -7886,8 +8342,11 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
if (isMachO) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::EndOfStatement)) {
- if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
- return Error(L, "unexpected token in .thumb_func directive");
+ if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
+ Error(L, "unexpected token in .thumb_func directive");
+ return false;
+ }
+
MCSymbol *Func =
getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier());
getParser().getStreamer().EmitThumbFunc(Func);
@@ -7896,11 +8355,12 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
}
}
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
NextSymbolIsThumb = true;
-
return false;
}
@@ -7908,18 +8368,26 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
/// ::= .syntax unified | divided
bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier))
- return Error(L, "unexpected token in .syntax directive");
+ if (Tok.isNot(AsmToken::Identifier)) {
+ Error(L, "unexpected token in .syntax directive");
+ return false;
+ }
+
StringRef Mode = Tok.getString();
- if (Mode == "unified" || Mode == "UNIFIED")
+ if (Mode == "unified" || Mode == "UNIFIED") {
Parser.Lex();
- else if (Mode == "divided" || Mode == "DIVIDED")
- return Error(L, "'.syntax divided' arm asssembly not supported");
- else
- return Error(L, "unrecognized syntax mode in .syntax directive");
+ } else if (Mode == "divided" || Mode == "DIVIDED") {
+ Error(L, "'.syntax divided' arm asssembly not supported");
+ return false;
+ } else {
+ Error(L, "unrecognized syntax mode in .syntax directive");
+ return false;
+ }
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(Parser.getTok().getLoc(), "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(), "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
// TODO tell the MC streamer the mode
@@ -7931,30 +8399,37 @@ bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
/// ::= .code 16 | 32
bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Integer))
- return Error(L, "unexpected token in .code directive");
+ if (Tok.isNot(AsmToken::Integer)) {
+ Error(L, "unexpected token in .code directive");
+ return false;
+ }
int64_t Val = Parser.getTok().getIntVal();
- if (Val == 16)
- Parser.Lex();
- else if (Val == 32)
- Parser.Lex();
- else
- return Error(L, "invalid operand to .code directive");
+ if (Val != 16 && Val != 32) {
+ Error(L, "invalid operand to .code directive");
+ return false;
+ }
+ Parser.Lex();
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(Parser.getTok().getLoc(), "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(), "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
if (Val == 16) {
- if (!hasThumb())
- return Error(L, "target does not support Thumb mode");
+ if (!hasThumb()) {
+ Error(L, "target does not support Thumb mode");
+ return false;
+ }
if (!isThumb())
SwitchMode();
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
} else {
- if (!hasARM())
- return Error(L, "target does not support ARM mode");
+ if (!hasARM()) {
+ Error(L, "target does not support ARM mode");
+ return false;
+ }
if (isThumb())
SwitchMode();
@@ -7972,21 +8447,23 @@ bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
SMLoc SRegLoc, ERegLoc;
if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
Parser.eatToEndOfStatement();
- return Error(SRegLoc, "register name expected");
+ Error(SRegLoc, "register name expected");
+ return false;
}
// Shouldn't be anything else.
if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
Parser.eatToEndOfStatement();
- return Error(Parser.getTok().getLoc(),
- "unexpected input in .req directive.");
+ Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
+ return false;
}
Parser.Lex(); // Consume the EndOfStatement
- if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
- return Error(SRegLoc, "redefinition of '" + Name +
- "' does not match original.");
+ if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) {
+ Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
+ return false;
+ }
return false;
}
@@ -7996,9 +8473,10 @@ bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
if (Parser.getTok().isNot(AsmToken::Identifier)) {
Parser.eatToEndOfStatement();
- return Error(L, "unexpected input in .unreq directive.");
+ Error(L, "unexpected input in .unreq directive.");
+ return false;
}
- RegisterReqs.erase(Parser.getTok().getIdentifier());
+ RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
Parser.Lex(); // Eat the identifier.
return false;
}
@@ -8006,28 +8484,128 @@ bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
/// parseDirectiveArch
/// ::= .arch token
bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
- return true;
+ StringRef Arch = getParser().parseStringToEndOfStatement().trim();
+
+ unsigned ID = StringSwitch<unsigned>(Arch)
+#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
+ .Case(NAME, ARM::ID)
+#define ARM_ARCH_ALIAS(NAME, ID) \
+ .Case(NAME, ARM::ID)
+#include "MCTargetDesc/ARMArchName.def"
+ .Default(ARM::INVALID_ARCH);
+
+ if (ID == ARM::INVALID_ARCH) {
+ Error(L, "Unknown arch name");
+ return false;
+ }
+
+ getTargetStreamer().emitArch(ID);
+ return false;
}
/// parseDirectiveEabiAttr
-/// ::= .eabi_attribute int, int
+/// ::= .eabi_attribute int, int [, "str"]
+/// ::= .eabi_attribute Tag_name, int [, "str"]
bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
- if (Parser.getTok().isNot(AsmToken::Integer))
- return Error(L, "integer expected");
- int64_t Tag = Parser.getTok().getIntVal();
- Parser.Lex(); // eat tag integer
+ int64_t Tag;
+ SMLoc TagLoc;
+ TagLoc = Parser.getTok().getLoc();
+ if (Parser.getTok().is(AsmToken::Identifier)) {
+ StringRef Name = Parser.getTok().getIdentifier();
+ Tag = ARMBuildAttrs::AttrTypeFromString(Name);
+ if (Tag == -1) {
+ Error(TagLoc, "attribute name not recognised: " + Name);
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ Parser.Lex();
+ } else {
+ const MCExpr *AttrExpr;
+
+ TagLoc = Parser.getTok().getLoc();
+ if (Parser.parseExpression(AttrExpr)) {
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
+ if (!CE) {
+ Error(TagLoc, "expected numeric constant");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
- if (Parser.getTok().isNot(AsmToken::Comma))
- return Error(L, "comma expected");
+ Tag = CE->getValue();
+ }
+
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Error(Parser.getTok().getLoc(), "comma expected");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
Parser.Lex(); // skip comma
- L = Parser.getTok().getLoc();
- if (Parser.getTok().isNot(AsmToken::Integer))
- return Error(L, "integer expected");
- int64_t Value = Parser.getTok().getIntVal();
- Parser.Lex(); // eat value integer
+ StringRef StringValue = "";
+ bool IsStringValue = false;
+
+ int64_t IntegerValue = 0;
+ bool IsIntegerValue = false;
+
+ if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
+ IsStringValue = true;
+ else if (Tag == ARMBuildAttrs::compatibility) {
+ IsStringValue = true;
+ IsIntegerValue = true;
+ } else if (Tag < 32 || Tag % 2 == 0)
+ IsIntegerValue = true;
+ else if (Tag % 2 == 1)
+ IsStringValue = true;
+ else
+ llvm_unreachable("invalid tag type");
+
+ if (IsIntegerValue) {
+ const MCExpr *ValueExpr;
+ SMLoc ValueExprLoc = Parser.getTok().getLoc();
+ if (Parser.parseExpression(ValueExpr)) {
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
+ if (!CE) {
+ Error(ValueExprLoc, "expected numeric constant");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
- getTargetStreamer().emitAttribute(Tag, Value);
+ IntegerValue = CE->getValue();
+ }
+
+ if (Tag == ARMBuildAttrs::compatibility) {
+ if (Parser.getTok().isNot(AsmToken::Comma))
+ IsStringValue = false;
+ else
+ Parser.Lex();
+ }
+
+ if (IsStringValue) {
+ if (Parser.getTok().isNot(AsmToken::String)) {
+ Error(Parser.getTok().getLoc(), "bad string constant");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ StringValue = Parser.getTok().getStringContents();
+ Parser.Lex();
+ }
+
+ if (IsIntegerValue && IsStringValue) {
+ assert(Tag == ARMBuildAttrs::compatibility);
+ getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
+ } else if (IsIntegerValue)
+ getTargetStreamer().emitAttribute(Tag, IntegerValue);
+ else if (IsStringValue)
+ getTargetStreamer().emitTextAttribute(Tag, StringValue);
return false;
}
@@ -8049,8 +8627,10 @@ bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
#include "ARMFPUName.def"
.Default(ARM::INVALID_FPU);
- if (ID == ARM::INVALID_FPU)
- return Error(L, "Unknown FPU name");
+ if (ID == ARM::INVALID_FPU) {
+ Error(L, "Unknown FPU name");
+ return false;
+ }
getTargetStreamer().emitFPU(ID);
return false;
@@ -8059,14 +8639,18 @@ bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
/// parseDirectiveFnStart
/// ::= .fnstart
bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
- if (FnStartLoc.isValid()) {
+ if (UC.hasFnStart()) {
Error(L, ".fnstart starts before the end of previous one");
- Error(FnStartLoc, "previous .fnstart starts here");
- return true;
+ UC.emitFnStartLocNotes();
+ return false;
}
- FnStartLoc = L;
+ // Reset the unwind directives parser state
+ UC.reset();
+
getTargetStreamer().emitFnStart();
+
+ UC.recordFnStart(L);
return false;
}
@@ -8074,31 +8658,37 @@ bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
/// ::= .fnend
bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
// Check the ordering of unwind directives
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .fnend directive");
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .fnend directive");
+ return false;
+ }
// Reset the unwind directives parser state
- resetUnwindDirectiveParserState();
getTargetStreamer().emitFnEnd();
+
+ UC.reset();
return false;
}
/// parseDirectiveCantUnwind
/// ::= .cantunwind
bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
+ UC.recordCantUnwind(L);
+
// Check the ordering of unwind directives
- CantUnwindLoc = L;
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .cantunwind directive");
- if (HandlerDataLoc.isValid()) {
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .cantunwind directive");
+ return false;
+ }
+ if (UC.hasHandlerData()) {
Error(L, ".cantunwind can't be used with .handlerdata directive");
- Error(HandlerDataLoc, ".handlerdata was specified here");
- return true;
+ UC.emitHandlerDataLocNotes();
+ return false;
}
- if (PersonalityLoc.isValid()) {
+ if (UC.hasPersonality()) {
Error(L, ".cantunwind can't be used with .personality directive");
- Error(PersonalityLoc, ".personality was specified here");
- return true;
+ UC.emitPersonalityLocNotes();
+ return false;
}
getTargetStreamer().emitCantUnwind();
@@ -8108,25 +8698,37 @@ bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
/// parseDirectivePersonality
/// ::= .personality name
bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
+ bool HasExistingPersonality = UC.hasPersonality();
+
+ UC.recordPersonality(L);
+
// Check the ordering of unwind directives
- PersonalityLoc = L;
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .personality directive");
- if (CantUnwindLoc.isValid()) {
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .personality directive");
+ return false;
+ }
+ if (UC.cantUnwind()) {
Error(L, ".personality can't be used with .cantunwind directive");
- Error(CantUnwindLoc, ".cantunwind was specified here");
- return true;
+ UC.emitCantUnwindLocNotes();
+ return false;
}
- if (HandlerDataLoc.isValid()) {
+ if (UC.hasHandlerData()) {
Error(L, ".personality must precede .handlerdata directive");
- Error(HandlerDataLoc, ".handlerdata was specified here");
- return true;
+ UC.emitHandlerDataLocNotes();
+ return false;
+ }
+ if (HasExistingPersonality) {
+ Parser.eatToEndOfStatement();
+ Error(L, "multiple personality directives");
+ UC.emitPersonalityLocNotes();
+ return false;
}
// Parse the name of the personality routine
if (Parser.getTok().isNot(AsmToken::Identifier)) {
Parser.eatToEndOfStatement();
- return Error(L, "unexpected input in .personality directive.");
+ Error(L, "unexpected input in .personality directive.");
+ return false;
}
StringRef Name(Parser.getTok().getIdentifier());
Parser.Lex();
@@ -8139,14 +8741,17 @@ bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
/// parseDirectiveHandlerData
/// ::= .handlerdata
bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
+ UC.recordHandlerData(L);
+
// Check the ordering of unwind directives
- HandlerDataLoc = L;
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .personality directive");
- if (CantUnwindLoc.isValid()) {
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .personality directive");
+ return false;
+ }
+ if (UC.cantUnwind()) {
Error(L, ".handlerdata can't be used with .cantunwind directive");
- Error(CantUnwindLoc, ".cantunwind was specified here");
- return true;
+ UC.emitCantUnwindLocNotes();
+ return false;
}
getTargetStreamer().emitHandlerData();
@@ -8157,34 +8762,45 @@ bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
/// ::= .setfp fpreg, spreg [, offset]
bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
// Check the ordering of unwind directives
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .setfp directive");
- if (HandlerDataLoc.isValid())
- return Error(L, ".setfp must precede .handlerdata directive");
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .setfp directive");
+ return false;
+ }
+ if (UC.hasHandlerData()) {
+ Error(L, ".setfp must precede .handlerdata directive");
+ return false;
+ }
// Parse fpreg
- SMLoc NewFPRegLoc = Parser.getTok().getLoc();
- int NewFPReg = tryParseRegister();
- if (NewFPReg == -1)
- return Error(NewFPRegLoc, "frame pointer register expected");
+ SMLoc FPRegLoc = Parser.getTok().getLoc();
+ int FPReg = tryParseRegister();
+ if (FPReg == -1) {
+ Error(FPRegLoc, "frame pointer register expected");
+ return false;
+ }
// Consume comma
- if (!Parser.getTok().is(AsmToken::Comma))
- return Error(Parser.getTok().getLoc(), "comma expected");
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Error(Parser.getTok().getLoc(), "comma expected");
+ return false;
+ }
Parser.Lex(); // skip comma
// Parse spreg
- SMLoc NewSPRegLoc = Parser.getTok().getLoc();
- int NewSPReg = tryParseRegister();
- if (NewSPReg == -1)
- return Error(NewSPRegLoc, "stack pointer register expected");
+ SMLoc SPRegLoc = Parser.getTok().getLoc();
+ int SPReg = tryParseRegister();
+ if (SPReg == -1) {
+ Error(SPRegLoc, "stack pointer register expected");
+ return false;
+ }
- if (NewSPReg != ARM::SP && NewSPReg != FPReg)
- return Error(NewSPRegLoc,
- "register should be either $sp or the latest fp register");
+ if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
+ Error(SPRegLoc, "register should be either $sp or the latest fp register");
+ return false;
+ }
// Update the frame pointer register
- FPReg = NewFPReg;
+ UC.saveFPReg(FPReg);
// Parse offset
int64_t Offset = 0;
@@ -8193,24 +8809,29 @@ bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
- return Error(Parser.getTok().getLoc(), "'#' expected");
+ Error(Parser.getTok().getLoc(), "'#' expected");
+ return false;
}
Parser.Lex(); // skip hash token.
const MCExpr *OffsetExpr;
SMLoc ExLoc = Parser.getTok().getLoc();
SMLoc EndLoc;
- if (getParser().parseExpression(OffsetExpr, EndLoc))
- return Error(ExLoc, "malformed setfp offset");
+ if (getParser().parseExpression(OffsetExpr, EndLoc)) {
+ Error(ExLoc, "malformed setfp offset");
+ return false;
+ }
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
- if (!CE)
- return Error(ExLoc, "setfp offset must be an immediate");
+ if (!CE) {
+ Error(ExLoc, "setfp offset must be an immediate");
+ return false;
+ }
Offset = CE->getValue();
}
- getTargetStreamer().emitSetFP(static_cast<unsigned>(NewFPReg),
- static_cast<unsigned>(NewSPReg), Offset);
+ getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
+ static_cast<unsigned>(SPReg), Offset);
return false;
}
@@ -8218,26 +8839,35 @@ bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
/// ::= .pad offset
bool ARMAsmParser::parseDirectivePad(SMLoc L) {
// Check the ordering of unwind directives
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .pad directive");
- if (HandlerDataLoc.isValid())
- return Error(L, ".pad must precede .handlerdata directive");
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .pad directive");
+ return false;
+ }
+ if (UC.hasHandlerData()) {
+ Error(L, ".pad must precede .handlerdata directive");
+ return false;
+ }
// Parse the offset
if (Parser.getTok().isNot(AsmToken::Hash) &&
Parser.getTok().isNot(AsmToken::Dollar)) {
- return Error(Parser.getTok().getLoc(), "'#' expected");
+ Error(Parser.getTok().getLoc(), "'#' expected");
+ return false;
}
Parser.Lex(); // skip hash token.
const MCExpr *OffsetExpr;
SMLoc ExLoc = Parser.getTok().getLoc();
SMLoc EndLoc;
- if (getParser().parseExpression(OffsetExpr, EndLoc))
- return Error(ExLoc, "malformed pad offset");
+ if (getParser().parseExpression(OffsetExpr, EndLoc)) {
+ Error(ExLoc, "malformed pad offset");
+ return false;
+ }
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
- if (!CE)
- return Error(ExLoc, "pad offset must be an immediate");
+ if (!CE) {
+ Error(ExLoc, "pad offset must be an immediate");
+ return false;
+ }
getTargetStreamer().emitPad(CE->getValue());
return false;
@@ -8248,37 +8878,470 @@ bool ARMAsmParser::parseDirectivePad(SMLoc L) {
/// ::= .vsave { registers }
bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
// Check the ordering of unwind directives
- if (!FnStartLoc.isValid())
- return Error(L, ".fnstart must precede .save or .vsave directives");
- if (HandlerDataLoc.isValid())
- return Error(L, ".save or .vsave must precede .handlerdata directive");
+ if (!UC.hasFnStart()) {
+ Error(L, ".fnstart must precede .save or .vsave directives");
+ return false;
+ }
+ if (UC.hasHandlerData()) {
+ Error(L, ".save or .vsave must precede .handlerdata directive");
+ return false;
+ }
// RAII object to make sure parsed operands are deleted.
- struct CleanupObject {
- SmallVector<MCParsedAsmOperand *, 1> Operands;
- ~CleanupObject() {
- for (unsigned I = 0, E = Operands.size(); I != E; ++I)
- delete Operands[I];
- }
- } CO;
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
// Parse the register list
- if (parseRegisterList(CO.Operands))
+ if (parseRegisterList(Operands))
+ return false;
+ ARMOperand &Op = (ARMOperand &)*Operands[0];
+ if (!IsVector && !Op.isRegList()) {
+ Error(L, ".save expects GPR registers");
+ return false;
+ }
+ if (IsVector && !Op.isDPRRegList()) {
+ Error(L, ".vsave expects DPR registers");
+ return false;
+ }
+
+ getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
+ return false;
+}
+
+/// parseDirectiveInst
+/// ::= .inst opcode [, ...]
+/// ::= .inst.n opcode [, ...]
+/// ::= .inst.w opcode [, ...]
+bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
+ int Width;
+
+ if (isThumb()) {
+ switch (Suffix) {
+ case 'n':
+ Width = 2;
+ break;
+ case 'w':
+ Width = 4;
+ break;
+ default:
+ Parser.eatToEndOfStatement();
+ Error(Loc, "cannot determine Thumb instruction size, "
+ "use inst.n/inst.w instead");
+ return false;
+ }
+ } else {
+ if (Suffix) {
+ Parser.eatToEndOfStatement();
+ Error(Loc, "width suffixes are invalid in ARM mode");
+ return false;
+ }
+ Width = 4;
+ }
+
+ if (getLexer().is(AsmToken::EndOfStatement)) {
+ Parser.eatToEndOfStatement();
+ Error(Loc, "expected expression following directive");
+ return false;
+ }
+
+ for (;;) {
+ const MCExpr *Expr;
+
+ if (getParser().parseExpression(Expr)) {
+ Error(Loc, "expected expression");
+ return false;
+ }
+
+ const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
+ if (!Value) {
+ Error(Loc, "expected constant expression");
+ return false;
+ }
+
+ switch (Width) {
+ case 2:
+ if (Value->getValue() > 0xffff) {
+ Error(Loc, "inst.n operand is too big, use inst.w instead");
+ return false;
+ }
+ break;
+ case 4:
+ if (Value->getValue() > 0xffffffff) {
+ Error(Loc,
+ StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
+ return false;
+ }
+ break;
+ default:
+ llvm_unreachable("only supported widths are 2 and 4");
+ }
+
+ getTargetStreamer().emitInst(Value->getValue(), Suffix);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(Loc, "unexpected token in directive");
+ return false;
+ }
+
+ Parser.Lex();
+ }
+
+ Parser.Lex();
+ return false;
+}
+
+/// parseDirectiveLtorg
+/// ::= .ltorg | .pool
+bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
+ getTargetStreamer().emitCurrentConstantPool();
+ return false;
+}
+
+bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
+ const MCSection *Section = getStreamer().getCurrentSection().first;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ TokError("unexpected token in directive");
+ return false;
+ }
+
+ if (!Section) {
+ getStreamer().InitSections();
+ Section = getStreamer().getCurrentSection().first;
+ }
+
+ assert(Section && "must have section to emit alignment");
+ if (Section->UseCodeAlign())
+ getStreamer().EmitCodeAlignment(2);
+ else
+ getStreamer().EmitValueToAlignment(2);
+
+ return false;
+}
+
+/// parseDirectivePersonalityIndex
+/// ::= .personalityindex index
+bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
+ bool HasExistingPersonality = UC.hasPersonality();
+
+ UC.recordPersonalityIndex(L);
+
+ if (!UC.hasFnStart()) {
+ Parser.eatToEndOfStatement();
+ Error(L, ".fnstart must precede .personalityindex directive");
+ return false;
+ }
+ if (UC.cantUnwind()) {
+ Parser.eatToEndOfStatement();
+ Error(L, ".personalityindex cannot be used with .cantunwind");
+ UC.emitCantUnwindLocNotes();
+ return false;
+ }
+ if (UC.hasHandlerData()) {
+ Parser.eatToEndOfStatement();
+ Error(L, ".personalityindex must precede .handlerdata directive");
+ UC.emitHandlerDataLocNotes();
+ return false;
+ }
+ if (HasExistingPersonality) {
+ Parser.eatToEndOfStatement();
+ Error(L, "multiple personality directives");
+ UC.emitPersonalityLocNotes();
+ return false;
+ }
+
+ const MCExpr *IndexExpression;
+ SMLoc IndexLoc = Parser.getTok().getLoc();
+ if (Parser.parseExpression(IndexExpression)) {
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
+ if (!CE) {
+ Parser.eatToEndOfStatement();
+ Error(IndexLoc, "index must be a constant number");
+ return false;
+ }
+ if (CE->getValue() < 0 ||
+ CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
+ Parser.eatToEndOfStatement();
+ Error(IndexLoc, "personality routine index should be in range [0-3]");
+ return false;
+ }
+
+ getTargetStreamer().emitPersonalityIndex(CE->getValue());
+ return false;
+}
+
+/// parseDirectiveUnwindRaw
+/// ::= .unwind_raw offset, opcode [, opcode...]
+bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
+ if (!UC.hasFnStart()) {
+ Parser.eatToEndOfStatement();
+ Error(L, ".fnstart must precede .unwind_raw directives");
+ return false;
+ }
+
+ int64_t StackOffset;
+
+ const MCExpr *OffsetExpr;
+ SMLoc OffsetLoc = getLexer().getLoc();
+ if (getLexer().is(AsmToken::EndOfStatement) ||
+ getParser().parseExpression(OffsetExpr)) {
+ Error(OffsetLoc, "expected expression");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
+ if (!CE) {
+ Error(OffsetLoc, "offset must be a constant");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ StackOffset = CE->getValue();
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(getLexer().getLoc(), "expected comma");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ Parser.Lex();
+
+ SmallVector<uint8_t, 16> Opcodes;
+ for (;;) {
+ const MCExpr *OE;
+
+ SMLoc OpcodeLoc = getLexer().getLoc();
+ if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
+ Error(OpcodeLoc, "expected opcode expression");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
+ if (!OC) {
+ Error(OpcodeLoc, "opcode value must be a constant");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const int64_t Opcode = OC->getValue();
+ if (Opcode & ~0xff) {
+ Error(OpcodeLoc, "invalid opcode");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ Opcodes.push_back(uint8_t(Opcode));
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(getLexer().getLoc(), "unexpected token in directive");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ Parser.Lex();
+ }
+
+ getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
+
+ Parser.Lex();
+ return false;
+}
+
+/// parseDirectiveTLSDescSeq
+/// ::= .tlsdescseq tls-variable
+bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
+ if (getLexer().isNot(AsmToken::Identifier)) {
+ TokError("expected variable after '.tlsdescseq' directive");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ const MCSymbolRefExpr *SRE =
+ MCSymbolRefExpr::Create(Parser.getTok().getIdentifier(),
+ MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
+ Lex();
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(), "unexpected token");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
+ return false;
+}
+
+/// parseDirectiveMovSP
+/// ::= .movsp reg [, #offset]
+bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
+ if (!UC.hasFnStart()) {
+ Parser.eatToEndOfStatement();
+ Error(L, ".fnstart must precede .movsp directives");
+ return false;
+ }
+ if (UC.getFPReg() != ARM::SP) {
+ Parser.eatToEndOfStatement();
+ Error(L, "unexpected .movsp directive");
+ return false;
+ }
+
+ SMLoc SPRegLoc = Parser.getTok().getLoc();
+ int SPReg = tryParseRegister();
+ if (SPReg == -1) {
+ Parser.eatToEndOfStatement();
+ Error(SPRegLoc, "register expected");
+ return false;
+ }
+
+ if (SPReg == ARM::SP || SPReg == ARM::PC) {
+ Parser.eatToEndOfStatement();
+ Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
+ return false;
+ }
+
+ int64_t Offset = 0;
+ if (Parser.getTok().is(AsmToken::Comma)) {
+ Parser.Lex();
+
+ if (Parser.getTok().isNot(AsmToken::Hash)) {
+ Error(Parser.getTok().getLoc(), "expected #constant");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ Parser.Lex();
+
+ const MCExpr *OffsetExpr;
+ SMLoc OffsetLoc = Parser.getTok().getLoc();
+ if (Parser.parseExpression(OffsetExpr)) {
+ Parser.eatToEndOfStatement();
+ Error(OffsetLoc, "malformed offset expression");
+ return false;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
+ if (!CE) {
+ Parser.eatToEndOfStatement();
+ Error(OffsetLoc, "offset must be an immediate constant");
+ return false;
+ }
+
+ Offset = CE->getValue();
+ }
+
+ getTargetStreamer().emitMovSP(SPReg, Offset);
+ UC.saveFPReg(SPReg);
+
+ return false;
+}
+
+/// parseDirectiveObjectArch
+/// ::= .object_arch name
+bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
+ if (getLexer().isNot(AsmToken::Identifier)) {
+ Error(getLexer().getLoc(), "unexpected token");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ StringRef Arch = Parser.getTok().getString();
+ SMLoc ArchLoc = Parser.getTok().getLoc();
+ getLexer().Lex();
+
+ unsigned ID = StringSwitch<unsigned>(Arch)
+#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
+ .Case(NAME, ARM::ID)
+#define ARM_ARCH_ALIAS(NAME, ID) \
+ .Case(NAME, ARM::ID)
+#include "MCTargetDesc/ARMArchName.def"
+#undef ARM_ARCH_NAME
+#undef ARM_ARCH_ALIAS
+ .Default(ARM::INVALID_ARCH);
+
+ if (ID == ARM::INVALID_ARCH) {
+ Error(ArchLoc, "unknown architecture '" + Arch + "'");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ getTargetStreamer().emitObjectArch(ID);
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(getLexer().getLoc(), "unexpected token");
+ Parser.eatToEndOfStatement();
+ }
+
+ return false;
+}
+
+/// parseDirectiveAlign
+/// ::= .align
+bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
+ // NOTE: if this is not the end of the statement, fall back to the target
+ // agnostic handling for this directive which will correctly handle this.
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return true;
- ARMOperand *Op = (ARMOperand*)CO.Operands[0];
- if (!IsVector && !Op->isRegList())
- return Error(L, ".save expects GPR registers");
- if (IsVector && !Op->isDPRRegList())
- return Error(L, ".vsave expects DPR registers");
- getTargetStreamer().emitRegSave(Op->getRegList(), IsVector);
+ // '.align' is target specifically handled to mean 2**2 byte alignment.
+ if (getStreamer().getCurrentSection().first->UseCodeAlign())
+ getStreamer().EmitCodeAlignment(4, 0);
+ else
+ getStreamer().EmitValueToAlignment(4, 0, 1, 0);
+
+ return false;
+}
+
+/// parseDirectiveThumbSet
+/// ::= .thumb_set name, value
+bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
+ StringRef Name;
+ if (Parser.parseIdentifier(Name)) {
+ TokError("expected identifier after '.thumb_set'");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ TokError("expected comma after name '" + Name + "'");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ Lex();
+
+ const MCExpr *Value;
+ if (Parser.parseExpression(Value)) {
+ TokError("missing expression");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ TokError("unexpected token");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ Lex();
+
+ MCSymbol *Alias = getContext().GetOrCreateSymbol(Name);
+ getTargetStreamer().emitThumbSet(Alias, Value);
return false;
}
/// Force static initialization.
extern "C" void LLVMInitializeARMAsmParser() {
- RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
- RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
+ RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget);
+ RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget);
+ RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget);
+ RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget);
}
#define GET_REGISTER_MATCHER
@@ -8286,20 +9349,113 @@ extern "C" void LLVMInitializeARMAsmParser() {
#define GET_MATCHER_IMPLEMENTATION
#include "ARMGenAsmMatcher.inc"
+static const struct ExtMapEntry {
+ const char *Extension;
+ const unsigned ArchCheck;
+ const uint64_t Features;
+} Extensions[] = {
+ { "crc", Feature_HasV8, ARM::FeatureCRC },
+ { "crypto", Feature_HasV8,
+ ARM::FeatureCrypto | ARM::FeatureNEON | ARM::FeatureFPARMv8 },
+ { "fp", Feature_HasV8, ARM::FeatureFPARMv8 },
+ { "idiv", Feature_HasV7 | Feature_IsNotMClass,
+ ARM::FeatureHWDiv | ARM::FeatureHWDivARM },
+ // FIXME: iWMMXT not supported
+ { "iwmmxt", Feature_None, 0 },
+ // FIXME: iWMMXT2 not supported
+ { "iwmmxt2", Feature_None, 0 },
+ // FIXME: Maverick not supported
+ { "maverick", Feature_None, 0 },
+ { "mp", Feature_HasV7 | Feature_IsNotMClass, ARM::FeatureMP },
+ // FIXME: ARMv6-m OS Extensions feature not checked
+ { "os", Feature_None, 0 },
+ // FIXME: Also available in ARMv6-K
+ { "sec", Feature_HasV7, ARM::FeatureTrustZone },
+ { "simd", Feature_HasV8, ARM::FeatureNEON | ARM::FeatureFPARMv8 },
+ // FIXME: Only available in A-class, isel not predicated
+ { "virt", Feature_HasV7, ARM::FeatureVirtualization },
+ // FIXME: xscale not supported
+ { "xscale", Feature_None, 0 },
+};
+
+/// parseDirectiveArchExtension
+/// ::= .arch_extension [no]feature
+bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
+ if (getLexer().isNot(AsmToken::Identifier)) {
+ Error(getLexer().getLoc(), "unexpected token");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ StringRef Extension = Parser.getTok().getString();
+ SMLoc ExtLoc = Parser.getTok().getLoc();
+ getLexer().Lex();
+
+ bool EnableFeature = true;
+ if (Extension.startswith_lower("no")) {
+ EnableFeature = false;
+ Extension = Extension.substr(2);
+ }
+
+ for (unsigned EI = 0, EE = array_lengthof(Extensions); EI != EE; ++EI) {
+ if (Extensions[EI].Extension != Extension)
+ continue;
+
+ unsigned FB = getAvailableFeatures();
+ if ((FB & Extensions[EI].ArchCheck) != Extensions[EI].ArchCheck) {
+ Error(ExtLoc, "architectural extension '" + Extension + "' is not "
+ "allowed for the current base architecture");
+ return false;
+ }
+
+ if (!Extensions[EI].Features)
+ report_fatal_error("unsupported architectural extension: " + Extension);
+
+ if (EnableFeature)
+ FB |= ComputeAvailableFeatures(Extensions[EI].Features);
+ else
+ FB &= ~ComputeAvailableFeatures(Extensions[EI].Features);
+
+ setAvailableFeatures(FB);
+ return false;
+ }
+
+ Error(ExtLoc, "unknown architectural extension: " + Extension);
+ Parser.eatToEndOfStatement();
+ return false;
+}
+
// Define this matcher function after the auto-generated include so we
// have the match class enum definitions.
-unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
+unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
unsigned Kind) {
- ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
+ ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
// If the kind is a token for a literal immediate, check if our asm
// operand matches. This is for InstAliases which have a fixed-value
// immediate in the syntax.
- if (Kind == MCK__35_0 && Op->isImm()) {
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
- if (!CE)
- return Match_InvalidOperand;
- if (CE->getValue() == 0)
+ switch (Kind) {
+ default: break;
+ case MCK__35_0:
+ if (Op.isImm())
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
+ if (CE->getValue() == 0)
+ return Match_Success;
+ break;
+ case MCK_ARMSOImm:
+ if (Op.isImm()) {
+ const MCExpr *SOExpr = Op.getImm();
+ int64_t Value;
+ if (!SOExpr->EvaluateAsAbsolute(Value))
+ return Match_Success;
+ assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
+ "expression value must be representable in 32 bits");
+ }
+ break;
+ case MCK_GPRPair:
+ if (Op.isReg() &&
+ MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
return Match_Success;
+ break;
}
return Match_InvalidOperand;
}
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 9c7988f..4d4038d 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-disassembler"
-
#include "llvm/MC/MCDisassembler.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
@@ -29,6 +27,8 @@
using namespace llvm;
+#define DEBUG_TYPE "arm-disassembler"
+
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
@@ -90,20 +90,18 @@ class ARMDisassembler : public MCDisassembler {
public:
/// Constructor - Initializes the disassembler.
///
- ARMDisassembler(const MCSubtargetInfo &STI) :
- MCDisassembler(STI) {
+ ARMDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) :
+ MCDisassembler(STI, Ctx) {
}
~ARMDisassembler() {
}
/// getInstruction - See MCDisassembler.
- DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ const MemoryObject &region, uint64_t address,
raw_ostream &vStream,
- raw_ostream &cStream) const;
+ raw_ostream &cStream) const override;
};
/// ThumbDisassembler - Thumb disassembler for all Thumb platforms.
@@ -111,20 +109,18 @@ class ThumbDisassembler : public MCDisassembler {
public:
/// Constructor - Initializes the disassembler.
///
- ThumbDisassembler(const MCSubtargetInfo &STI) :
- MCDisassembler(STI) {
+ ThumbDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) :
+ MCDisassembler(STI, Ctx) {
}
~ThumbDisassembler() {
}
/// getInstruction - See MCDisassembler.
- DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ const MemoryObject &region, uint64_t address,
raw_ostream &vStream,
- raw_ostream &cStream) const;
+ raw_ostream &cStream) const override;
private:
mutable ITStatus ITBlock;
@@ -404,12 +400,16 @@ static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
#include "ARMGenDisassemblerTables.inc"
-static MCDisassembler *createARMDisassembler(const Target &T, const MCSubtargetInfo &STI) {
- return new ARMDisassembler(STI);
+static MCDisassembler *createARMDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new ARMDisassembler(STI, Ctx);
}
-static MCDisassembler *createThumbDisassembler(const Target &T, const MCSubtargetInfo &STI) {
- return new ThumbDisassembler(STI);
+static MCDisassembler *createThumbDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new ThumbDisassembler(STI, Ctx);
}
DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
@@ -860,9 +860,13 @@ DecodeStatus ThumbDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
extern "C" void LLVMInitializeARMDisassembler() {
- TargetRegistry::RegisterMCDisassembler(TheARMTarget,
+ TargetRegistry::RegisterMCDisassembler(TheARMLETarget,
+ createARMDisassembler);
+ TargetRegistry::RegisterMCDisassembler(TheARMBETarget,
createARMDisassembler);
- TargetRegistry::RegisterMCDisassembler(TheThumbTarget,
+ TargetRegistry::RegisterMCDisassembler(TheThumbLETarget,
+ createThumbDisassembler);
+ TargetRegistry::RegisterMCDisassembler(TheThumbBETarget,
createThumbDisassembler);
}
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index f897028..228fb57 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "ARMInstPrinter.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
@@ -23,6 +22,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#include "ARMGenAsmWriter.inc"
/// translateShiftImm - Convert shift immediate from 0-31 to 1-32 for printing.
@@ -307,17 +308,30 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
<< markup(">");
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
- // If a symbolic branch target was added as a constant expression then print
- // that address in hex. And only print 32 unsigned bits for the address.
- const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
- int64_t Address;
- if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
- O << "0x";
- O.write_hex((uint32_t)Address);
+ const MCExpr *Expr = Op.getExpr();
+ switch (Expr->getKind()) {
+ case MCExpr::Binary:
+ O << '#' << *Expr;
+ break;
+ case MCExpr::Constant: {
+ // If a symbolic branch target was added as a constant expression then
+ // print that address in hex. And only print 32 unsigned bits for the
+ // address.
+ const MCConstantExpr *Constant = cast<MCConstantExpr>(Expr);
+ int64_t TargetAddress;
+ if (!Constant->EvaluateAsAbsolute(TargetAddress)) {
+ O << '#' << *Expr;
+ } else {
+ O << "0x";
+ O.write_hex(static_cast<uint32_t>(TargetAddress));
+ }
+ break;
}
- else {
- // Otherwise, just print the expression.
- O << *Op.getExpr();
+ default:
+ // FIXME: Should we always treat this as if it is a constant literal and
+ // prefix it with '#'?
+ O << *Expr;
+ break;
}
}
}
@@ -1078,13 +1092,13 @@ void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
if (isSub) {
O << ", "
<< markup("<imm:")
- << "#-" << -OffImm
+ << "#-" << formatImm(-OffImm)
<< markup(">");
}
else if (AlwaysPrintImm0 || OffImm > 0) {
O << ", "
<< markup("<imm:")
- << "#" << OffImm
+ << "#" << formatImm(OffImm)
<< markup(">");
}
O << "]" << markup(">");
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 15ae8d1..f671fe4 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -26,8 +26,8 @@ public:
ARMInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.def b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.def
new file mode 100644
index 0000000..9f007a0
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.def
@@ -0,0 +1,50 @@
+//===-- ARMArchName.def - List of the ARM arch names ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the list of the supported ARM architecture names,
+// i.e. the supported value for -march= option.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef ARM_ARCH_NAME
+#error "You must define ARM_ARCH_NAME before including ARMArchName.def"
+#endif
+
+// ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH)
+ARM_ARCH_NAME("armv2", ARMV2, "2", v4)
+ARM_ARCH_NAME("armv2a", ARMV2A, "2A", v4)
+ARM_ARCH_NAME("armv3", ARMV3, "3", v4)
+ARM_ARCH_NAME("armv3m", ARMV3M, "3M", v4)
+ARM_ARCH_NAME("armv4", ARMV4, "4", v4)
+ARM_ARCH_NAME("armv4t", ARMV4T, "4T", v4T)
+ARM_ARCH_NAME("armv5", ARMV5, "5", v5T)
+ARM_ARCH_NAME("armv5t", ARMV5T, "5T", v5T)
+ARM_ARCH_NAME("armv5te", ARMV5TE, "5TE", v5TE)
+ARM_ARCH_NAME("armv6", ARMV6, "6", v6)
+ARM_ARCH_NAME("armv6j", ARMV6J, "6J", v6)
+ARM_ARCH_NAME("armv6t2", ARMV6T2, "6T2", v6T2)
+ARM_ARCH_NAME("armv6z", ARMV6Z, "6Z", v6KZ)
+ARM_ARCH_NAME("armv6zk", ARMV6ZK, "6ZK", v6KZ)
+ARM_ARCH_NAME("armv6-m", ARMV6M, "6-M", v6_M)
+ARM_ARCH_NAME("armv7", ARMV7, "7", v7)
+ARM_ARCH_NAME("armv7-a", ARMV7A, "7-A", v7)
+ARM_ARCH_ALIAS("armv7a", ARMV7A)
+ARM_ARCH_NAME("armv7-r", ARMV7R, "7-R", v7)
+ARM_ARCH_ALIAS("armv7r", ARMV7R)
+ARM_ARCH_NAME("armv7-m", ARMV7M, "7-M", v7)
+ARM_ARCH_ALIAS("armv7m", ARMV7M)
+ARM_ARCH_NAME("armv8-a", ARMV8A, "8-A", v8)
+ARM_ARCH_ALIAS("armv8a", ARMV8A)
+ARM_ARCH_NAME("iwmmxt", IWMMXT, "iwmmxt", v5TE)
+ARM_ARCH_NAME("iwmmxt2", IWMMXT2, "iwmmxt2", v5TE)
+
+#undef ARM_ARCH_NAME
+#undef ARM_ARCH_ALIAS
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.h
new file mode 100644
index 0000000..34b9fc1
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMArchName.h
@@ -0,0 +1,27 @@
+//===-- ARMArchName.h - List of the ARM arch names --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMARCHNAME_H
+#define ARMARCHNAME_H
+
+namespace llvm {
+namespace ARM {
+
+enum ArchKind {
+ INVALID_ARCH = 0
+
+#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) , ID
+#define ARM_ARCH_ALIAS(NAME, ID) /* empty */
+#include "ARMArchName.def"
+};
+
+} // namespace ARM
+} // namespace llvm
+
+#endif // ARMARCHNAME_H
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 5615b80..7acd9cc 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -41,24 +41,27 @@ public:
class ARMAsmBackend : public MCAsmBackend {
const MCSubtargetInfo* STI;
- bool isThumbMode; // Currently emitting Thumb code.
+ bool isThumbMode; // Currently emitting Thumb code.
+ bool IsLittleEndian; // Big or little endian.
public:
- ARMAsmBackend(const Target &T, const StringRef TT)
+ ARMAsmBackend(const Target &T, const StringRef TT, bool IsLittle)
: MCAsmBackend(), STI(ARM_MC::createARMMCSubtargetInfo(TT, "", "")),
- isThumbMode(TT.startswith("thumb")) {}
+ isThumbMode(TT.startswith("thumb")), IsLittleEndian(IsLittle) {}
~ARMAsmBackend() {
delete STI;
}
- unsigned getNumFixupKinds() const { return ARM::NumTargetFixupKinds; }
+ unsigned getNumFixupKinds() const override {
+ return ARM::NumTargetFixupKinds;
+ }
bool hasNOP() const {
return (STI->getFeatureBits() & ARM::HasV6T2Ops) != 0;
}
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
- const static MCFixupKindInfo Infos[ARM::NumTargetFixupKinds] = {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
+ const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
// This table *must* be in the order that the fixup_* kinds are defined in
// ARMFixupKinds.h.
//
@@ -94,10 +97,43 @@ public:
{ "fixup_arm_movw_lo16", 0, 20, 0 },
{ "fixup_t2_movt_hi16", 0, 20, 0 },
{ "fixup_t2_movw_lo16", 0, 20, 0 },
-{ "fixup_arm_movt_hi16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_arm_movw_lo16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_t2_movt_hi16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_t2_movw_lo16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
+ };
+ const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
+// This table *must* be in the order that the fixup_* kinds are defined in
+// ARMFixupKinds.h.
+//
+// Name Offset (bits) Size (bits) Flags
+{ "fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_thumb_adr_pcrel_10",8, 8, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_cp", 8, 8, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel },
+// movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 - 19.
+{ "fixup_arm_movt_hi16", 12, 20, 0 },
+{ "fixup_arm_movw_lo16", 12, 20, 0 },
+{ "fixup_t2_movt_hi16", 12, 20, 0 },
+{ "fixup_t2_movw_lo16", 12, 20, 0 },
};
if (Kind < FirstTargetFixupKind)
@@ -105,32 +141,31 @@ public:
assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
"Invalid kind!");
- return Infos[Kind - FirstTargetFixupKind];
+ return (IsLittleEndian ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind];
}
/// processFixupValue - Target hook to process the literal value of a fixup
/// if necessary.
void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
- MCValue &Target, uint64_t &Value,
- bool &IsResolved);
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override;
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const;
+ uint64_t Value, bool IsPCRel) const override;
- bool mayNeedRelaxation(const MCInst &Inst) const;
+ bool mayNeedRelaxation(const MCInst &Inst) const override;
- bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const;
+ const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
- void handleAssemblerFlag(MCAssemblerFlag Flag) {
+ void handleAssemblerFlag(MCAssemblerFlag Flag) override {
switch (Flag) {
default: break;
case MCAF_Code16:
@@ -145,6 +180,7 @@ public:
unsigned getPointerSize() const { return 4; }
bool isThumb() const { return isThumbMode; }
void setIsThumb(bool it) { isThumbMode = it; }
+ bool isLittle() const { return IsLittleEndian; }
};
} // end anonymous namespace
@@ -155,6 +191,8 @@ static unsigned getRelaxedOpcode(unsigned Op) {
case ARM::tLDRpci: return ARM::t2LDRpci;
case ARM::tADR: return ARM::t2ADR;
case ARM::tB: return ARM::t2B;
+ case ARM::tCBZ: return ARM::tHINT;
+ case ARM::tCBNZ: return ARM::tHINT;
}
}
@@ -196,6 +234,12 @@ bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
int64_t Offset = int64_t(Value) - 4;
return Offset > 1020 || Offset < 0 || Offset & 3;
}
+ case ARM::fixup_arm_thumb_cb:
+ // If we have a Thumb CBZ or CBNZ instruction and its target is the next
+ // instruction it is is actually out of range for the instruction.
+ // It will be changed to a NOP.
+ int64_t Offset = (Value & ~1);
+ return Offset == 2;
}
llvm_unreachable("Unexpected fixup kind in fixupNeedsRelaxation()!");
}
@@ -212,7 +256,18 @@ void ARMAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
report_fatal_error("unexpected instruction to relax: " + OS.str());
}
- // The instructions we're relaxing have (so far) the same operands.
+ // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
+ // have to change the operands too.
+ if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
+ RelaxedOp == ARM::tHINT) {
+ Res.setOpcode(RelaxedOp);
+ Res.addOperand(MCOperand::CreateImm(0));
+ Res.addOperand(MCOperand::CreateImm(14));
+ Res.addOperand(MCOperand::CreateReg(0));
+ return;
+ }
+
+ // The rest of instructions we're relaxing have the same operands.
// We just need to update to the proper opcode.
Res = Inst;
Res.setOpcode(RelaxedOp);
@@ -251,8 +306,36 @@ bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
return true;
}
+static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
+ if (IsLittleEndian) {
+ // Note that the halfwords are stored high first and low second in thumb;
+ // so we need to swap the fixup value here to map properly.
+ uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
+ Swapped |= (Value & 0x0000FFFF) << 16;
+ return Swapped;
+ }
+ else
+ return Value;
+}
+
+static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
+ bool IsLittleEndian) {
+ uint32_t Value;
+
+ if (IsLittleEndian) {
+ Value = (SecondHalf & 0xFFFF) << 16;
+ Value |= (FirstHalf & 0xFFFF);
+ } else {
+ Value = (SecondHalf & 0xFFFF);
+ Value |= (FirstHalf & 0xFFFF) << 16;
+ }
+
+ return Value;
+}
+
static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
- MCContext *Ctx = NULL) {
+ bool IsPCRel, MCContext *Ctx,
+ bool IsLittleEndian) {
unsigned Kind = Fixup.getKind();
switch (Kind) {
default:
@@ -261,12 +344,15 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case FK_Data_2:
case FK_Data_4:
return Value;
+ case FK_SecRel_2:
+ return Value;
+ case FK_SecRel_4:
+ return Value;
case ARM::fixup_arm_movt_hi16:
- Value >>= 16;
+ if (!IsPCRel)
+ Value >>= 16;
// Fallthrough
- case ARM::fixup_arm_movw_lo16:
- case ARM::fixup_arm_movt_hi16_pcrel:
- case ARM::fixup_arm_movw_lo16_pcrel: {
+ case ARM::fixup_arm_movw_lo16: {
unsigned Hi4 = (Value & 0xF000) >> 12;
unsigned Lo12 = Value & 0x0FFF;
// inst{19-16} = Hi4;
@@ -275,12 +361,10 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
return Value;
}
case ARM::fixup_t2_movt_hi16:
- Value >>= 16;
+ if (!IsPCRel)
+ Value >>= 16;
// Fallthrough
- case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movt_hi16_pcrel: //FIXME: Shouldn't this be shifted like
- // the other hi16 fixup?
- case ARM::fixup_t2_movw_lo16_pcrel: {
+ case ARM::fixup_t2_movw_lo16: {
unsigned Hi4 = (Value & 0xF000) >> 12;
unsigned i = (Value & 0x800) >> 11;
unsigned Mid3 = (Value & 0x700) >> 8;
@@ -290,9 +374,7 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
// inst{14-12} = Mid3;
// inst{7-0} = Lo8;
Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
- uint64_t swapped = (Value & 0xFFFF0000) >> 16;
- swapped |= (Value & 0x0000FFFF) << 16;
- return swapped;
+ return swapHalfWords(Value, IsLittleEndian);
}
case ARM::fixup_arm_ldst_pcrel_12:
// ARM PC-relative values are offset by 8.
@@ -312,11 +394,8 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
// Same addressing mode as fixup_arm_pcrel_10,
// but with 16-bit halfwords swapped.
- if (Kind == ARM::fixup_t2_ldst_pcrel_12) {
- uint64_t swapped = (Value & 0xFFFF0000) >> 16;
- swapped |= (Value & 0x0000FFFF) << 16;
- return swapped;
- }
+ if (Kind == ARM::fixup_t2_ldst_pcrel_12)
+ return swapHalfWords(Value, IsLittleEndian);
return Value;
}
@@ -349,9 +428,7 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
out |= (Value & 0x700) << 4;
out |= (Value & 0x0FF);
- uint64_t swapped = (out & 0xFFFF0000) >> 16;
- swapped |= (out & 0x0000FFFF) << 16;
- return swapped;
+ return swapHalfWords(out, IsLittleEndian);
}
case ARM::fixup_arm_condbranch:
@@ -361,6 +438,9 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case ARM::fixup_arm_blx:
// These values don't encode the low two bits since they're always zero.
// Offset by 8 just as above.
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL)
+ return 0;
return 0xffffff & ((Value - 8) >> 2);
case ARM::fixup_t2_uncondbranch: {
Value = Value - 4;
@@ -379,9 +459,7 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
out |= (Value & 0x1FF800) << 5; // imm6 field
out |= (Value & 0x0007FF); // imm11 field
- uint64_t swapped = (out & 0xFFFF0000) >> 16;
- swapped |= (out & 0x0000FFFF) << 16;
- return swapped;
+ return swapHalfWords(out, IsLittleEndian);
}
case ARM::fixup_t2_condbranch: {
Value = Value - 4;
@@ -394,70 +472,64 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
out |= (Value & 0x1F800) << 5; // imm6 field
out |= (Value & 0x007FF); // imm11 field
- uint32_t swapped = (out & 0xFFFF0000) >> 16;
- swapped |= (out & 0x0000FFFF) << 16;
- return swapped;
+ return swapHalfWords(out, IsLittleEndian);
}
case ARM::fixup_arm_thumb_bl: {
- // The value doesn't encode the low bit (always zero) and is offset by
- // four. The 32-bit immediate value is encoded as
- // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
- // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
- // The value is encoded into disjoint bit positions in the destination
- // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
- // J = either J1 or J2 bit
- //
- // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
- //
- // Note that the halfwords are stored high first, low second; so we need
- // to transpose the fixup value here to map properly.
- uint32_t offset = (Value - 4) >> 1;
- uint32_t signBit = (offset & 0x800000) >> 23;
- uint32_t I1Bit = (offset & 0x400000) >> 22;
- uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
- uint32_t I2Bit = (offset & 0x200000) >> 21;
- uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
- uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
- uint32_t imm11Bits = (offset & 0x000007FF);
-
- uint32_t Binary = 0;
- uint32_t firstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
- uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
- (uint16_t)imm11Bits);
- Binary |= secondHalf << 16;
- Binary |= firstHalf;
- return Binary;
-
+ // The value doesn't encode the low bit (always zero) and is offset by
+ // four. The 32-bit immediate value is encoded as
+ // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
+ // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
+ // The value is encoded into disjoint bit positions in the destination
+ // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
+ // J = either J1 or J2 bit
+ //
+ // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
+ //
+ // Note that the halfwords are stored high first, low second; so we need
+ // to transpose the fixup value here to map properly.
+ uint32_t offset = (Value - 4) >> 1;
+ uint32_t signBit = (offset & 0x800000) >> 23;
+ uint32_t I1Bit = (offset & 0x400000) >> 22;
+ uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
+ uint32_t I2Bit = (offset & 0x200000) >> 21;
+ uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
+ uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
+ uint32_t imm11Bits = (offset & 0x000007FF);
+
+ uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
+ uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
+ (uint16_t)imm11Bits);
+ return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian);
}
case ARM::fixup_arm_thumb_blx: {
- // The value doesn't encode the low two bits (always zero) and is offset by
- // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
- // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
- // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
- // The value is encoded into disjoint bit positions in the destination
- // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
- // J = either J1 or J2 bit, 0 = zero.
- //
- // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
- //
- // Note that the halfwords are stored high first, low second; so we need
- // to transpose the fixup value here to map properly.
- uint32_t offset = (Value - 2) >> 2;
- uint32_t signBit = (offset & 0x400000) >> 22;
- uint32_t I1Bit = (offset & 0x200000) >> 21;
- uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
- uint32_t I2Bit = (offset & 0x100000) >> 20;
- uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
- uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
- uint32_t imm10LBits = (offset & 0x3FF);
-
- uint32_t Binary = 0;
- uint32_t firstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
- uint32_t secondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
- ((uint16_t)imm10LBits) << 1);
- Binary |= secondHalf << 16;
- Binary |= firstHalf;
- return Binary;
+ // The value doesn't encode the low two bits (always zero) and is offset by
+ // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
+ // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
+ // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
+ // The value is encoded into disjoint bit positions in the destination
+ // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
+ // J = either J1 or J2 bit, 0 = zero.
+ //
+ // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
+ //
+ // Note that the halfwords are stored high first, low second; so we need
+ // to transpose the fixup value here to map properly.
+ uint32_t offset = (Value - 2) >> 2;
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL)
+ offset = 0;
+ uint32_t signBit = (offset & 0x400000) >> 22;
+ uint32_t I1Bit = (offset & 0x200000) >> 21;
+ uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
+ uint32_t I2Bit = (offset & 0x100000) >> 20;
+ uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
+ uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
+ uint32_t imm10LBits = (offset & 0x3FF);
+
+ uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
+ uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
+ ((uint16_t)imm10LBits) << 1);
+ return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian);
}
case ARM::fixup_arm_thumb_cp:
// Offset by 4, and don't encode the low two bits. Two bytes of that
@@ -509,11 +581,8 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
// Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
// swapped.
- if (Kind == ARM::fixup_t2_pcrel_10) {
- uint32_t swapped = (Value & 0xFFFF0000) >> 16;
- swapped |= (Value & 0x0000FFFF) << 16;
- return swapped;
- }
+ if (Kind == ARM::fixup_t2_pcrel_10)
+ return swapHalfWords(Value, IsLittleEndian);
return Value;
}
@@ -524,7 +593,7 @@ void ARMAsmBackend::processFixupValue(const MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCFixup &Fixup,
const MCFragment *DF,
- MCValue &Target, uint64_t &Value,
+ const MCValue &Target, uint64_t &Value,
bool &IsResolved) {
const MCSymbolRefExpr *A = Target.getSymA();
// Some fixups to thumb function symbols need the low bit (thumb bit)
@@ -541,11 +610,18 @@ void ARMAsmBackend::processFixupValue(const MCAssembler &Asm,
Value |= 1;
}
}
+ // For Thumb1 BL instruction, it is possible to be a long jump between
+ // the basic blocks of the same function. Thus, we would like to resolve
+ // the offset when the destination has the same MCFragment.
+ if (A && (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) {
+ const MCSymbol &Sym = A->getSymbol().AliasedSymbol();
+ const MCSymbolData &SymData = Asm.getSymbolData(Sym);
+ IsResolved = (SymData.getFragment() == DF);
+ }
// We must always generate a relocation for BL/BLX instructions if we have
// a symbol to reference, as the linker relies on knowing the destination
// symbol's thumb-ness to get interworking right.
if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx ||
- (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl ||
(unsigned)Fixup.getKind() == ARM::fixup_arm_blx ||
(unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl ||
(unsigned)Fixup.getKind() == ARM::fixup_arm_condbl))
@@ -554,7 +630,8 @@ void ARMAsmBackend::processFixupValue(const MCAssembler &Asm,
// Try to get the encoded value for the fixup as-if we're mapping it into
// the instruction. This allows adjustFixupValue() to issue a diagnostic
// if the value aren't invalid.
- (void)adjustFixupValue(Fixup, Value, &Asm.getContext());
+ (void)adjustFixupValue(Fixup, Value, false, &Asm.getContext(),
+ IsLittleEndian);
}
/// getFixupKindNumBytes - The number of bytes the fixup may change.
@@ -595,33 +672,101 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
case ARM::fixup_arm_thumb_blx:
case ARM::fixup_arm_movt_hi16:
case ARM::fixup_arm_movw_lo16:
- case ARM::fixup_arm_movt_hi16_pcrel:
- case ARM::fixup_arm_movw_lo16_pcrel:
case ARM::fixup_t2_movt_hi16:
case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movt_hi16_pcrel:
- case ARM::fixup_t2_movw_lo16_pcrel:
+ return 4;
+
+ case FK_SecRel_2:
+ return 2;
+ case FK_SecRel_4:
+ return 4;
+ }
+}
+
+/// getFixupKindContainerSizeBytes - The number of bytes of the
+/// container involved in big endian.
+static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+
+ case FK_Data_1:
+ return 1;
+ case FK_Data_2:
+ return 2;
+ case FK_Data_4:
+ return 4;
+
+ case ARM::fixup_arm_thumb_bcc:
+ case ARM::fixup_arm_thumb_cp:
+ case ARM::fixup_thumb_adr_pcrel_10:
+ case ARM::fixup_arm_thumb_br:
+ case ARM::fixup_arm_thumb_cb:
+ // Instruction size is 2 bytes.
+ return 2;
+
+ case ARM::fixup_arm_pcrel_10_unscaled:
+ case ARM::fixup_arm_ldst_pcrel_12:
+ case ARM::fixup_arm_pcrel_10:
+ case ARM::fixup_arm_adr_pcrel_12:
+ case ARM::fixup_arm_uncondbl:
+ case ARM::fixup_arm_condbl:
+ case ARM::fixup_arm_blx:
+ case ARM::fixup_arm_condbranch:
+ case ARM::fixup_arm_uncondbranch:
+ case ARM::fixup_t2_ldst_pcrel_12:
+ case ARM::fixup_t2_condbranch:
+ case ARM::fixup_t2_uncondbranch:
+ case ARM::fixup_t2_pcrel_10:
+ case ARM::fixup_t2_adr_pcrel_12:
+ case ARM::fixup_arm_thumb_bl:
+ case ARM::fixup_arm_thumb_blx:
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movw_lo16:
+ // Instruction size is 4 bytes.
return 4;
}
}
void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
- unsigned DataSize, uint64_t Value) const {
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
- Value = adjustFixupValue(Fixup, Value);
+ Value = adjustFixupValue(Fixup, Value, IsPCRel, nullptr, IsLittleEndian);
if (!Value) return; // Doesn't change encoding.
unsigned Offset = Fixup.getOffset();
assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+ // Used to point to big endian bytes.
+ unsigned FullSizeBytes;
+ if (!IsLittleEndian) {
+ FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
+ assert((Offset + FullSizeBytes) <= DataSize && "Invalid fixup size!");
+ assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
+ }
+
// For each byte of the fragment that the fixup touches, mask in the bits from
// the fixup value. The Value has been "split up" into the appropriate
// bitfields above.
- for (unsigned i = 0; i != NumBytes; ++i)
- Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = IsLittleEndian ? i : (FullSizeBytes - 1 - i);
+ Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
}
namespace {
+// FIXME: This should be in a separate file.
+class ARMWinCOFFAsmBackend : public ARMAsmBackend {
+public:
+ ARMWinCOFFAsmBackend(const Target &T, const StringRef &Triple)
+ : ARMAsmBackend(T, Triple, true) { }
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createARMWinCOFFObjectWriter(OS, /*Is64Bit=*/false);
+ }
+};
// FIXME: This should be in a separate file.
// ELF is an ELF of course...
@@ -629,11 +774,11 @@ class ELFARMAsmBackend : public ARMAsmBackend {
public:
uint8_t OSABI;
ELFARMAsmBackend(const Target &T, const StringRef TT,
- uint8_t _OSABI)
- : ARMAsmBackend(T, TT), OSABI(_OSABI) { }
+ uint8_t OSABI, bool IsLittle)
+ : ARMAsmBackend(T, TT, IsLittle), OSABI(OSABI) { }
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createARMELFObjectWriter(OS, OSABI);
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createARMELFObjectWriter(OS, OSABI, isLittle());
}
};
@@ -643,29 +788,28 @@ public:
const MachO::CPUSubTypeARM Subtype;
DarwinARMAsmBackend(const Target &T, const StringRef TT,
MachO::CPUSubTypeARM st)
- : ARMAsmBackend(T, TT), Subtype(st) {
+ : ARMAsmBackend(T, TT, /* IsLittleEndian */ true), Subtype(st) {
HasDataInCodeSupport = true;
}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createARMMachObjectWriter(OS, /*Is64Bit=*/false,
MachO::CPU_TYPE_ARM,
Subtype);
}
-
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
- return false;
- }
};
} // end anonymous namespace
MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+ StringRef TT, StringRef CPU,
+ bool isLittle) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin()) {
+ switch (TheTriple.getObjectFormat()) {
+ default: llvm_unreachable("unsupported object format");
+ case Triple::MachO: {
MachO::CPUSubTypeARM CS =
StringSwitch<MachO::CPUSubTypeARM>(TheTriple.getArchName())
.Cases("armv4t", "thumbv4t", MachO::CPU_SUBTYPE_ARM_V4T)
@@ -673,7 +817,6 @@ MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
.Cases("armv6", "thumbv6", MachO::CPU_SUBTYPE_ARM_V6)
.Cases("armv6m", "thumbv6m", MachO::CPU_SUBTYPE_ARM_V6M)
.Cases("armv7em", "thumbv7em", MachO::CPU_SUBTYPE_ARM_V7EM)
- .Cases("armv7f", "thumbv7f", MachO::CPU_SUBTYPE_ARM_V7F)
.Cases("armv7k", "thumbv7k", MachO::CPU_SUBTYPE_ARM_V7K)
.Cases("armv7m", "thumbv7m", MachO::CPU_SUBTYPE_ARM_V7M)
.Cases("armv7s", "thumbv7s", MachO::CPU_SUBTYPE_ARM_V7S)
@@ -681,13 +824,37 @@ MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
return new DarwinARMAsmBackend(T, TT, CS);
}
+ case Triple::COFF:
+ assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
+ return new ARMWinCOFFAsmBackend(T, TT);
+ case Triple::ELF:
+ assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
+ return new ELFARMAsmBackend(T, TT, OSABI, isLittle);
+ }
+}
-#if 0
- // FIXME: Introduce yet another checker but assert(0).
- if (TheTriple.isOSBinFormatCOFF())
- assert(0 && "Windows not supported on ARM");
-#endif
+MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
+ return createARMAsmBackend(T, MRI, TT, CPU, true);
+}
+
+MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
+ return createARMAsmBackend(T, MRI, TT, CPU, false);
+}
- uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
- return new ELFARMAsmBackend(T, TT, OSABI);
+MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
+ return createARMAsmBackend(T, MRI, TT, CPU, true);
}
+
+MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
+ return createARMAsmBackend(T, MRI, TT, CPU, false);
+}
+
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index af939fc..1686d76 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -183,7 +183,8 @@ namespace ARM_ISB {
inline static const char *InstSyncBOptToString(unsigned val) {
switch (val) {
- default: llvm_unreachable("Unkown memory operation");
+ default:
+ llvm_unreachable("Unknown memory operation");
case RESERVED_0: return "#0x0";
case RESERVED_1: return "#0x1";
case RESERVED_2: return "#0x2";
@@ -278,42 +279,41 @@ namespace ARMII {
//===------------------------------------------------------------------===//
// ARM Specific MachineOperand flags.
- MO_NO_FLAG,
+ MO_NO_FLAG = 0,
/// MO_LO16 - On a symbol operand, this represents a relocation containing
/// lower 16 bit of the address. Used only via movw instruction.
- MO_LO16,
+ MO_LO16 = 0x1,
/// MO_HI16 - On a symbol operand, this represents a relocation containing
/// higher 16 bit of the address. Used only via movt instruction.
- MO_HI16,
-
- /// MO_LO16_NONLAZY - On a symbol operand "FOO", this represents a
- /// relocation containing lower 16 bit of the non-lazy-ptr indirect symbol,
- /// i.e. "FOO$non_lazy_ptr".
- /// Used only via movw instruction.
- MO_LO16_NONLAZY,
-
- /// MO_HI16_NONLAZY - On a symbol operand "FOO", this represents a
- /// relocation containing lower 16 bit of the non-lazy-ptr indirect symbol,
- /// i.e. "FOO$non_lazy_ptr". Used only via movt instruction.
- MO_HI16_NONLAZY,
-
- /// MO_LO16_NONLAZY_PIC - On a symbol operand "FOO", this represents a
- /// relocation containing lower 16 bit of the PC relative address of the
- /// non-lazy-ptr indirect symbol, i.e. "FOO$non_lazy_ptr - LABEL".
- /// Used only via movw instruction.
- MO_LO16_NONLAZY_PIC,
-
- /// MO_HI16_NONLAZY_PIC - On a symbol operand "FOO", this represents a
- /// relocation containing lower 16 bit of the PC relative address of the
- /// non-lazy-ptr indirect symbol, i.e. "FOO$non_lazy_ptr - LABEL".
- /// Used only via movt instruction.
- MO_HI16_NONLAZY_PIC,
+ MO_HI16 = 0x2,
/// MO_PLT - On a symbol operand, this represents an ELF PLT reference on a
/// call operand.
- MO_PLT
+ MO_PLT = 0x3,
+
+ /// MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects
+ /// just that part of the flag set.
+ MO_OPTION_MASK = 0x3f,
+
+ /// MO_DLLIMPORT - On a symbol operand, this represents that the reference
+ /// to the symbol is for an import stub. This is used for DLL import
+ /// storage class indication on Windows.
+ MO_DLLIMPORT = 0x40,
+
+ /// MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it
+ /// represents a symbol which, if indirect, will get special Darwin mangling
+ /// as a non-lazy-ptr indirect symbol (i.e. "L_FOO$non_lazy_ptr"). Can be
+ /// combined with MO_LO16, MO_HI16 or MO_NO_FLAG (in a constant-pool, for
+ /// example).
+ MO_NONLAZY = 0x80,
+
+ // It's undefined behaviour if an enum overflows the range between its
+ // smallest and largest values, but since these are |ed together, it can
+ // happen. Put a sentinel in (values of this enum are stored as "unsigned
+ // char").
+ MO_UNUSED_MAXIMUM = 0xff
};
enum {
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index f98bbd2..a86601b 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -34,14 +34,11 @@ namespace {
virtual ~ARMELFObjectWriter();
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
+
+ bool needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const override;
};
}
@@ -52,91 +49,19 @@ ARMELFObjectWriter::ARMELFObjectWriter(uint8_t OSABI)
ARMELFObjectWriter::~ARMELFObjectWriter() {}
-// In ARM, _MergedGlobals and other most symbols get emitted directly.
-// I.e. not as an offset to a section symbol.
-// This code is an approximation of what ARM/gcc does.
-
-STATISTIC(PCRelCount, "Total number of PIC Relocations");
-STATISTIC(NonPCRelCount, "Total number of non-PIC relocations");
-
-const MCSymbol *ARMELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- const MCSymbol &Symbol = Target.getSymA()->getSymbol().AliasedSymbol();
- bool EmitThisSym = false;
-
- const MCSectionELF &Section =
- static_cast<const MCSectionELF&>(Symbol.getSection());
- bool InNormalSection = true;
- unsigned RelocType = 0;
- RelocType = GetRelocTypeInner(Target, Fixup, IsPCRel);
-
- DEBUG(
- const MCSymbolRefExpr::VariantKind Kind = Target.getSymA()->getKind();
- MCSymbolRefExpr::VariantKind Kind2;
- Kind2 = Target.getSymB() ? Target.getSymB()->getKind() :
- MCSymbolRefExpr::VK_None;
- dbgs() << "considering symbol "
- << Section.getSectionName() << "/"
- << Symbol.getName() << "/"
- << " Rel:" << (unsigned)RelocType
- << " Kind: " << (int)Kind << "/" << (int)Kind2
- << " Tmp:"
- << Symbol.isAbsolute() << "/" << Symbol.isDefined() << "/"
- << Symbol.isVariable() << "/" << Symbol.isTemporary()
- << " Counts:" << PCRelCount << "/" << NonPCRelCount << "\n");
-
- if (IsPCRel) { ++PCRelCount;
- switch (RelocType) {
- default:
- // Most relocation types are emitted as explicit symbols
- InNormalSection =
- StringSwitch<bool>(Section.getSectionName())
- .Case(".data.rel.ro.local", false)
- .Case(".data.rel", false)
- .Case(".bss", false)
- .Default(true);
- EmitThisSym = true;
- break;
- case ELF::R_ARM_ABS32:
- // But things get strange with R_ARM_ABS32
- // In this case, most things that go in .rodata show up
- // as section relative relocations
- InNormalSection =
- StringSwitch<bool>(Section.getSectionName())
- .Case(".data.rel.ro.local", false)
- .Case(".data.rel", false)
- .Case(".rodata", false)
- .Case(".bss", false)
- .Default(true);
- EmitThisSym = false;
- break;
- }
- } else {
- NonPCRelCount++;
- InNormalSection =
- StringSwitch<bool>(Section.getSectionName())
- .Case(".data.rel.ro.local", false)
- .Case(".rodata", false)
- .Case(".data.rel", false)
- .Case(".bss", false)
- .Default(true);
-
- switch (RelocType) {
- default: EmitThisSym = true; break;
- case ELF::R_ARM_ABS32: EmitThisSym = false; break;
- case ELF::R_ARM_PREL31: EmitThisSym = false; break;
- }
- }
-
- if (EmitThisSym)
- return &Symbol;
- if (! Symbol.isTemporary() && InNormalSection) {
- return &Symbol;
+bool ARMELFObjectWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const {
+ // FIXME: This is extremelly conservative. This really needs to use a
+ // whitelist with a clear explanation for why each realocation needs to
+ // point to the symbol, not to the section.
+ switch (Type) {
+ default:
+ return true;
+
+ case ELF::R_ARM_PREL31:
+ case ELF::R_ARM_ABS32:
+ return false;
}
- return NULL;
}
// Need to examine the Fixup when determining whether to
@@ -144,17 +69,14 @@ const MCSymbol *ARMELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
// offset
unsigned ARMELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
+ bool IsPCRel) const {
return GetRelocTypeInner(Target, Fixup, IsPCRel);
}
unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
unsigned Type = 0;
if (IsPCRel) {
@@ -166,19 +88,25 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_None:
Type = ELF::R_ARM_REL32;
break;
- case MCSymbolRefExpr::VK_ARM_TLSGD:
+ case MCSymbolRefExpr::VK_TLSGD:
llvm_unreachable("unimplemented");
- case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ case MCSymbolRefExpr::VK_GOTTPOFF:
Type = ELF::R_ARM_TLS_IE32;
break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ Type = ELF::R_ARM_GOT_PREL;
+ break;
}
break;
case ARM::fixup_arm_blx:
case ARM::fixup_arm_uncondbl:
switch (Modifier) {
- case MCSymbolRefExpr::VK_ARM_PLT:
+ case MCSymbolRefExpr::VK_PLT:
Type = ELF::R_ARM_PLT32;
break;
+ case MCSymbolRefExpr::VK_ARM_TLSCALL:
+ Type = ELF::R_ARM_TLS_CALL;
+ break;
default:
Type = ELF::R_ARM_CALL;
break;
@@ -194,24 +122,27 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
Type = ELF::R_ARM_THM_JUMP24;
break;
case ARM::fixup_arm_movt_hi16:
- case ARM::fixup_arm_movt_hi16_pcrel:
Type = ELF::R_ARM_MOVT_PREL;
break;
case ARM::fixup_arm_movw_lo16:
- case ARM::fixup_arm_movw_lo16_pcrel:
Type = ELF::R_ARM_MOVW_PREL_NC;
break;
case ARM::fixup_t2_movt_hi16:
- case ARM::fixup_t2_movt_hi16_pcrel:
Type = ELF::R_ARM_THM_MOVT_PREL;
break;
case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movw_lo16_pcrel:
Type = ELF::R_ARM_THM_MOVW_PREL_NC;
break;
case ARM::fixup_arm_thumb_bl:
case ARM::fixup_arm_thumb_blx:
- Type = ELF::R_ARM_THM_CALL;
+ switch (Modifier) {
+ case MCSymbolRefExpr::VK_ARM_TLSCALL:
+ Type = ELF::R_ARM_THM_TLS_CALL;
+ break;
+ default:
+ Type = ELF::R_ARM_THM_CALL;
+ break;
+ }
break;
}
} else {
@@ -223,24 +154,27 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_ARM_NONE:
Type = ELF::R_ARM_NONE;
break;
- case MCSymbolRefExpr::VK_ARM_GOT:
+ case MCSymbolRefExpr::VK_GOT:
Type = ELF::R_ARM_GOT_BREL;
break;
- case MCSymbolRefExpr::VK_ARM_TLSGD:
+ case MCSymbolRefExpr::VK_TLSGD:
Type = ELF::R_ARM_TLS_GD32;
break;
- case MCSymbolRefExpr::VK_ARM_TPOFF:
+ case MCSymbolRefExpr::VK_TPOFF:
Type = ELF::R_ARM_TLS_LE32;
break;
- case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ case MCSymbolRefExpr::VK_GOTTPOFF:
Type = ELF::R_ARM_TLS_IE32;
break;
case MCSymbolRefExpr::VK_None:
Type = ELF::R_ARM_ABS32;
break;
- case MCSymbolRefExpr::VK_ARM_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTOFF:
Type = ELF::R_ARM_GOTOFF32;
break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ Type = ELF::R_ARM_GOT_PREL;
+ break;
case MCSymbolRefExpr::VK_ARM_TARGET1:
Type = ELF::R_ARM_TARGET1;
break;
@@ -250,6 +184,18 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_ARM_PREL31:
Type = ELF::R_ARM_PREL31;
break;
+ case MCSymbolRefExpr::VK_ARM_TLSLDO:
+ Type = ELF::R_ARM_TLS_LDO32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSCALL:
+ Type = ELF::R_ARM_TLS_CALL;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSDESC:
+ Type = ELF::R_ARM_TLS_GOTDESC;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSDESCSEQ:
+ Type = ELF::R_ARM_TLS_DESCSEQ;
+ break;
}
break;
case ARM::fixup_arm_ldst_pcrel_12:
@@ -283,7 +229,8 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
}
MCObjectWriter *llvm::createARMELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI) {
+ uint8_t OSABI,
+ bool IsLittleEndian) {
MCELFObjectTargetWriter *MOTW = new ARMELFObjectWriter(OSABI);
- return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true);
+ return createELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 471897d..7b5d8b0 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -13,14 +13,14 @@
//
//===----------------------------------------------------------------------===//
-#include "ARMBuildAttrs.h"
+#include "ARMArchName.h"
#include "ARMFPUName.h"
#include "ARMRegisterInfo.h"
-#include "ARMUnwindOp.h"
#include "ARMUnwindOpAsm.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
@@ -30,6 +30,7 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
@@ -37,16 +38,20 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ARMBuildAttributes.h"
+#include "llvm/Support/ARMEHABI.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
static std::string GetAEABIUnwindPersonalityName(unsigned Index) {
- assert(Index < NUM_PERSONALITY_INDEX && "Invalid personality index");
+ assert(Index < ARM::EHABI::NUM_PERSONALITY_INDEX &&
+ "Invalid personality index");
return (Twine("__aeabi_unwind_cpp_pr") + Twine(Index)).str();
}
@@ -58,7 +63,46 @@ static const char *GetFPUName(unsigned ID) {
#define ARM_FPU_NAME(NAME, ID) case ARM::ID: return NAME;
#include "ARMFPUName.def"
}
- return NULL;
+ return nullptr;
+}
+
+static const char *GetArchName(unsigned ID) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Unknown ARCH kind");
+ break;
+#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
+ case ARM::ID: return NAME;
+#define ARM_ARCH_ALIAS(NAME, ID) /* empty */
+#include "ARMArchName.def"
+ }
+ return nullptr;
+}
+
+static const char *GetArchDefaultCPUName(unsigned ID) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Unknown ARCH kind");
+ break;
+#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
+ case ARM::ID: return DEFAULT_CPU_NAME;
+#define ARM_ARCH_ALIAS(NAME, ID) /* empty */
+#include "ARMArchName.def"
+ }
+ return nullptr;
+}
+
+static unsigned GetArchDefaultCPUArch(unsigned ID) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Unknown ARCH kind");
+ break;
+#define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
+ case ARM::ID: return ARMBuildAttrs::DEFAULT_CPU_ARCH;
+#define ARM_ARCH_ALIAS(NAME, ID) /* empty */
+#include "ARMArchName.def"
+ }
+ return 0;
}
namespace {
@@ -68,36 +112,56 @@ class ARMELFStreamer;
class ARMTargetAsmStreamer : public ARMTargetStreamer {
formatted_raw_ostream &OS;
MCInstPrinter &InstPrinter;
-
- virtual void emitFnStart();
- virtual void emitFnEnd();
- virtual void emitCantUnwind();
- virtual void emitPersonality(const MCSymbol *Personality);
- virtual void emitHandlerData();
- virtual void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0);
- virtual void emitPad(int64_t Offset);
- virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
- bool isVector);
-
- virtual void switchVendor(StringRef Vendor);
- virtual void emitAttribute(unsigned Attribute, unsigned Value);
- virtual void emitTextAttribute(unsigned Attribute, StringRef String);
- virtual void emitFPU(unsigned FPU);
- virtual void finishAttributeSection();
+ bool IsVerboseAsm;
+
+ void emitFnStart() override;
+ void emitFnEnd() override;
+ void emitCantUnwind() override;
+ void emitPersonality(const MCSymbol *Personality) override;
+ void emitPersonalityIndex(unsigned Index) override;
+ void emitHandlerData() override;
+ void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0) override;
+ void emitMovSP(unsigned Reg, int64_t Offset = 0) override;
+ void emitPad(int64_t Offset) override;
+ void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector) override;
+ void emitUnwindRaw(int64_t Offset,
+ const SmallVectorImpl<uint8_t> &Opcodes) override;
+
+ void switchVendor(StringRef Vendor) override;
+ void emitAttribute(unsigned Attribute, unsigned Value) override;
+ void emitTextAttribute(unsigned Attribute, StringRef String) override;
+ void emitIntTextAttribute(unsigned Attribute, unsigned IntValue,
+ StringRef StrinValue) override;
+ void emitArch(unsigned Arch) override;
+ void emitObjectArch(unsigned Arch) override;
+ void emitFPU(unsigned FPU) override;
+ void emitInst(uint32_t Inst, char Suffix = '\0') override;
+ void finishAttributeSection() override;
+
+ void AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) override;
+ void emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) override;
public:
- ARMTargetAsmStreamer(formatted_raw_ostream &OS, MCInstPrinter &InstPrinter);
+ ARMTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS,
+ MCInstPrinter &InstPrinter, bool VerboseAsm);
};
-ARMTargetAsmStreamer::ARMTargetAsmStreamer(formatted_raw_ostream &OS,
- MCInstPrinter &InstPrinter)
- : OS(OS), InstPrinter(InstPrinter) {}
+ARMTargetAsmStreamer::ARMTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter &InstPrinter,
+ bool VerboseAsm)
+ : ARMTargetStreamer(S), OS(OS), InstPrinter(InstPrinter),
+ IsVerboseAsm(VerboseAsm) {}
void ARMTargetAsmStreamer::emitFnStart() { OS << "\t.fnstart\n"; }
void ARMTargetAsmStreamer::emitFnEnd() { OS << "\t.fnend\n"; }
void ARMTargetAsmStreamer::emitCantUnwind() { OS << "\t.cantunwind\n"; }
void ARMTargetAsmStreamer::emitPersonality(const MCSymbol *Personality) {
OS << "\t.personality " << Personality->getName() << '\n';
}
+void ARMTargetAsmStreamer::emitPersonalityIndex(unsigned Index) {
+ OS << "\t.personalityindex " << Index << '\n';
+}
void ARMTargetAsmStreamer::emitHandlerData() { OS << "\t.handlerdata\n"; }
void ARMTargetAsmStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
int64_t Offset) {
@@ -109,6 +173,16 @@ void ARMTargetAsmStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
OS << ", #" << Offset;
OS << '\n';
}
+void ARMTargetAsmStreamer::emitMovSP(unsigned Reg, int64_t Offset) {
+ assert((Reg != ARM::SP && Reg != ARM::PC) &&
+ "the operand of .movsp cannot be either sp or pc");
+
+ OS << "\t.movsp\t";
+ InstPrinter.printRegName(OS, Reg);
+ if (Offset)
+ OS << ", #" << Offset;
+ OS << '\n';
+}
void ARMTargetAsmStreamer::emitPad(int64_t Offset) {
OS << "\t.pad\t#" << Offset << '\n';
}
@@ -132,22 +206,82 @@ void ARMTargetAsmStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
void ARMTargetAsmStreamer::switchVendor(StringRef Vendor) {
}
void ARMTargetAsmStreamer::emitAttribute(unsigned Attribute, unsigned Value) {
- OS << "\t.eabi_attribute\t" << Attribute << ", " << Twine(Value) << "\n";
+ OS << "\t.eabi_attribute\t" << Attribute << ", " << Twine(Value);
+ if (IsVerboseAsm) {
+ StringRef Name = ARMBuildAttrs::AttrTypeAsString(Attribute);
+ if (!Name.empty())
+ OS << "\t@ " << Name;
+ }
+ OS << "\n";
}
void ARMTargetAsmStreamer::emitTextAttribute(unsigned Attribute,
StringRef String) {
switch (Attribute) {
- default: llvm_unreachable("Unsupported Text attribute in ASM Mode");
case ARMBuildAttrs::CPU_name:
- OS << "\t.cpu\t" << String.lower() << "\n";
+ OS << "\t.cpu\t" << String.lower();
+ break;
+ default:
+ OS << "\t.eabi_attribute\t" << Attribute << ", \"" << String << "\"";
+ if (IsVerboseAsm) {
+ StringRef Name = ARMBuildAttrs::AttrTypeAsString(Attribute);
+ if (!Name.empty())
+ OS << "\t@ " << Name;
+ }
break;
}
+ OS << "\n";
+}
+void ARMTargetAsmStreamer::emitIntTextAttribute(unsigned Attribute,
+ unsigned IntValue,
+ StringRef StringValue) {
+ switch (Attribute) {
+ default: llvm_unreachable("unsupported multi-value attribute in asm mode");
+ case ARMBuildAttrs::compatibility:
+ OS << "\t.eabi_attribute\t" << Attribute << ", " << IntValue;
+ if (!StringValue.empty())
+ OS << ", \"" << StringValue << "\"";
+ if (IsVerboseAsm)
+ OS << "\t@ " << ARMBuildAttrs::AttrTypeAsString(Attribute);
+ break;
+ }
+ OS << "\n";
+}
+void ARMTargetAsmStreamer::emitArch(unsigned Arch) {
+ OS << "\t.arch\t" << GetArchName(Arch) << "\n";
+}
+void ARMTargetAsmStreamer::emitObjectArch(unsigned Arch) {
+ OS << "\t.object_arch\t" << GetArchName(Arch) << '\n';
}
void ARMTargetAsmStreamer::emitFPU(unsigned FPU) {
OS << "\t.fpu\t" << GetFPUName(FPU) << "\n";
}
void ARMTargetAsmStreamer::finishAttributeSection() {
}
+void
+ARMTargetAsmStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *S) {
+ OS << "\t.tlsdescseq\t" << S->getSymbol().getName();
+}
+
+void ARMTargetAsmStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {
+ OS << "\t.thumb_set\t" << *Symbol << ", " << *Value << '\n';
+}
+
+void ARMTargetAsmStreamer::emitInst(uint32_t Inst, char Suffix) {
+ OS << "\t.inst";
+ if (Suffix)
+ OS << "." << Suffix;
+ OS << "\t0x" << utohexstr(Inst) << "\n";
+}
+
+void ARMTargetAsmStreamer::emitUnwindRaw(int64_t Offset,
+ const SmallVectorImpl<uint8_t> &Opcodes) {
+ OS << "\t.unwind_raw " << Offset;
+ for (SmallVectorImpl<uint8_t>::const_iterator OCI = Opcodes.begin(),
+ OCE = Opcodes.end();
+ OCI != OCE; ++OCI)
+ OS << ", 0x" << utohexstr(*OCI);
+ OS << '\n';
+}
class ARMTargetELFStreamer : public ARMTargetStreamer {
private:
@@ -158,7 +292,8 @@ private:
enum {
HiddenAttribute = 0,
NumericAttribute,
- TextAttribute
+ TextAttribute,
+ NumericAndTextAttributes
} Type;
unsigned Tag;
unsigned IntValue;
@@ -171,26 +306,17 @@ private:
StringRef CurrentVendor;
unsigned FPU;
+ unsigned Arch;
+ unsigned EmittedArch;
SmallVector<AttributeItem, 64> Contents;
const MCSection *AttributeSection;
- // FIXME: this should be in a more generic place, but
- // getULEBSize() is in MCAsmInfo and will be moved to MCDwarf
- static size_t getULEBSize(int Value) {
- size_t Size = 0;
- do {
- Value >>= 7;
- Size += sizeof(int8_t); // Is this really necessary?
- } while (Value);
- return Size;
- }
-
AttributeItem *getAttributeItem(unsigned Attribute) {
for (size_t i = 0; i < Contents.size(); ++i)
if (Contents[i].Tag == Attribute)
return &Contents[i];
- return 0;
+ return nullptr;
}
void setAttributeItem(unsigned Attribute, unsigned Value,
@@ -199,6 +325,7 @@ private:
if (AttributeItem *Item = getAttributeItem(Attribute)) {
if (!OverwriteExisting)
return;
+ Item->Type = AttributeItem::NumericAttribute;
Item->IntValue = Value;
return;
}
@@ -219,6 +346,7 @@ private:
if (AttributeItem *Item = getAttributeItem(Attribute)) {
if (!OverwriteExisting)
return;
+ Item->Type = AttributeItem::TextAttribute;
Item->StringValue = Value;
return;
}
@@ -233,33 +361,69 @@ private:
Contents.push_back(Item);
}
+ void setAttributeItems(unsigned Attribute, unsigned IntValue,
+ StringRef StringValue, bool OverwriteExisting) {
+ // Look for existing attribute item
+ if (AttributeItem *Item = getAttributeItem(Attribute)) {
+ if (!OverwriteExisting)
+ return;
+ Item->Type = AttributeItem::NumericAndTextAttributes;
+ Item->IntValue = IntValue;
+ Item->StringValue = StringValue;
+ return;
+ }
+
+ // Create new attribute item
+ AttributeItem Item = {
+ AttributeItem::NumericAndTextAttributes,
+ Attribute,
+ IntValue,
+ StringValue
+ };
+ Contents.push_back(Item);
+ }
+
+ void emitArchDefaultAttributes();
void emitFPUDefaultAttributes();
ARMELFStreamer &getStreamer();
- virtual void emitFnStart();
- virtual void emitFnEnd();
- virtual void emitCantUnwind();
- virtual void emitPersonality(const MCSymbol *Personality);
- virtual void emitHandlerData();
- virtual void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0);
- virtual void emitPad(int64_t Offset);
- virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
- bool isVector);
-
- virtual void switchVendor(StringRef Vendor);
- virtual void emitAttribute(unsigned Attribute, unsigned Value);
- virtual void emitTextAttribute(unsigned Attribute, StringRef String);
- virtual void emitFPU(unsigned FPU);
- virtual void finishAttributeSection();
+ void emitFnStart() override;
+ void emitFnEnd() override;
+ void emitCantUnwind() override;
+ void emitPersonality(const MCSymbol *Personality) override;
+ void emitPersonalityIndex(unsigned Index) override;
+ void emitHandlerData() override;
+ void emitSetFP(unsigned FpReg, unsigned SpReg, int64_t Offset = 0) override;
+ void emitMovSP(unsigned Reg, int64_t Offset = 0) override;
+ void emitPad(int64_t Offset) override;
+ void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector) override;
+ void emitUnwindRaw(int64_t Offset,
+ const SmallVectorImpl<uint8_t> &Opcodes) override;
+
+ void switchVendor(StringRef Vendor) override;
+ void emitAttribute(unsigned Attribute, unsigned Value) override;
+ void emitTextAttribute(unsigned Attribute, StringRef String) override;
+ void emitIntTextAttribute(unsigned Attribute, unsigned IntValue,
+ StringRef StringValue) override;
+ void emitArch(unsigned Arch) override;
+ void emitObjectArch(unsigned Arch) override;
+ void emitFPU(unsigned FPU) override;
+ void emitInst(uint32_t Inst, char Suffix = '\0') override;
+ void finishAttributeSection() override;
+ void emitLabel(MCSymbol *Symbol) override;
+
+ void AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) override;
+ void emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) override;
size_t calculateContentSize() const;
public:
- ARMTargetELFStreamer()
- : ARMTargetStreamer(), CurrentVendor("aeabi"), FPU(ARM::INVALID_FPU),
- AttributeSection(0) {
- }
+ ARMTargetELFStreamer(MCStreamer &S)
+ : ARMTargetStreamer(S), CurrentVendor("aeabi"), FPU(ARM::INVALID_FPU),
+ Arch(ARM::INVALID_ARCH), EmittedArch(ARM::INVALID_ARCH),
+ AttributeSection(nullptr) {}
};
/// Extend the generic ELFStreamer class so that it can emit mapping symbols at
@@ -278,30 +442,32 @@ class ARMELFStreamer : public MCELFStreamer {
public:
friend class ARMTargetELFStreamer;
- ARMELFStreamer(MCContext &Context, MCTargetStreamer *TargetStreamer,
- MCAsmBackend &TAB, raw_ostream &OS, MCCodeEmitter *Emitter,
- bool IsThumb)
- : MCELFStreamer(Context, TargetStreamer, TAB, OS, Emitter),
- IsThumb(IsThumb), MappingSymbolCounter(0), LastEMS(EMS_None) {
+ ARMELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
+ MCCodeEmitter *Emitter, bool IsThumb)
+ : MCELFStreamer(Context, TAB, OS, Emitter), IsThumb(IsThumb),
+ MappingSymbolCounter(0), LastEMS(EMS_None) {
Reset();
}
~ARMELFStreamer() {}
- virtual void FinishImpl();
+ void FinishImpl() override;
// ARM exception handling directives
void emitFnStart();
void emitFnEnd();
void emitCantUnwind();
void emitPersonality(const MCSymbol *Per);
+ void emitPersonalityIndex(unsigned index);
void emitHandlerData();
void emitSetFP(unsigned NewFpReg, unsigned NewSpReg, int64_t Offset = 0);
+ void emitMovSP(unsigned Reg, int64_t Offset = 0);
void emitPad(int64_t Offset);
void emitRegSave(const SmallVectorImpl<unsigned> &RegList, bool isVector);
+ void emitUnwindRaw(int64_t Offset, const SmallVectorImpl<uint8_t> &Opcodes);
- virtual void ChangeSection(const MCSection *Section,
- const MCExpr *Subsection) {
+ void ChangeSection(const MCSection *Section,
+ const MCExpr *Subsection) override {
// We have to keep track of the mapping symbol state of any sections we
// use. Each one should start off as EMS_None, which is provided as the
// default constructor by DenseMap::lookup.
@@ -314,19 +480,58 @@ public:
/// This function is the one used to emit instruction data into the ELF
/// streamer. We override it to add the appropriate mapping symbol if
/// necessary.
- virtual void EmitInstruction(const MCInst& Inst) {
+ void EmitInstruction(const MCInst& Inst,
+ const MCSubtargetInfo &STI) override {
if (IsThumb)
EmitThumbMappingSymbol();
else
EmitARMMappingSymbol();
- MCELFStreamer::EmitInstruction(Inst);
+ MCELFStreamer::EmitInstruction(Inst, STI);
+ }
+
+ void emitInst(uint32_t Inst, char Suffix) {
+ unsigned Size;
+ char Buffer[4];
+ const bool LittleEndian = getContext().getAsmInfo()->isLittleEndian();
+
+ switch (Suffix) {
+ case '\0':
+ Size = 4;
+
+ assert(!IsThumb);
+ EmitARMMappingSymbol();
+ for (unsigned II = 0, IE = Size; II != IE; II++) {
+ const unsigned I = LittleEndian ? (Size - II - 1) : II;
+ Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT);
+ }
+
+ break;
+ case 'n':
+ case 'w':
+ Size = (Suffix == 'n' ? 2 : 4);
+
+ assert(IsThumb);
+ EmitThumbMappingSymbol();
+ for (unsigned II = 0, IE = Size; II != IE; II = II + 2) {
+ const unsigned I0 = LittleEndian ? II + 0 : (Size - II - 1);
+ const unsigned I1 = LittleEndian ? II + 1 : (Size - II - 2);
+ Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT);
+ Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT);
+ }
+
+ break;
+ default:
+ llvm_unreachable("Invalid Suffix");
+ }
+
+ MCELFStreamer::EmitBytes(StringRef(Buffer, Size));
}
/// This is one of the functions used to emit data into an ELF section, so the
/// ARM streamer overrides it to add the appropriate mapping symbol ($d) if
/// necessary.
- virtual void EmitBytes(StringRef Data) {
+ void EmitBytes(StringRef Data) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitBytes(Data);
}
@@ -334,12 +539,13 @@ public:
/// This is one of the functions used to emit data into an ELF section, so the
/// ARM streamer overrides it to add the appropriate mapping symbol ($d) if
/// necessary.
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) {
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitValueImpl(Value, Size);
}
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override {
MCELFStreamer::EmitAssemblerFlag(Flag);
switch (Flag) {
@@ -402,13 +608,9 @@ private:
Symbol->setVariableValue(Value);
}
- void EmitThumbFunc(MCSymbol *Func) {
- // FIXME: Anything needed here to flag the function as thumb?
-
+ void EmitThumbFunc(MCSymbol *Func) override {
getAssembler().setIsThumbFunc(Func);
-
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Func);
- SD.setFlags(SD.getFlags() | ELF_Other_ThumbFunc);
+ EmitSymbolAttribute(Func, MCSA_ELF_TypeFunction);
}
// Helper functions for ARM exception handling directives
@@ -423,6 +625,8 @@ private:
void SwitchToExTabSection(const MCSymbol &FnStart);
void SwitchToExIdxSection(const MCSymbol &FnStart);
+ void EmitFixup(const MCExpr *Expr, MCFixupKind Kind);
+
bool IsThumb;
int64_t MappingSymbolCounter;
@@ -446,8 +650,7 @@ private:
} // end anonymous namespace
ARMELFStreamer &ARMTargetELFStreamer::getStreamer() {
- ARMELFStreamer *S = static_cast<ARMELFStreamer *>(Streamer);
- return *S;
+ return static_cast<ARMELFStreamer &>(Streamer);
}
void ARMTargetELFStreamer::emitFnStart() { getStreamer().emitFnStart(); }
@@ -456,6 +659,9 @@ void ARMTargetELFStreamer::emitCantUnwind() { getStreamer().emitCantUnwind(); }
void ARMTargetELFStreamer::emitPersonality(const MCSymbol *Personality) {
getStreamer().emitPersonality(Personality);
}
+void ARMTargetELFStreamer::emitPersonalityIndex(unsigned Index) {
+ getStreamer().emitPersonalityIndex(Index);
+}
void ARMTargetELFStreamer::emitHandlerData() {
getStreamer().emitHandlerData();
}
@@ -463,6 +669,9 @@ void ARMTargetELFStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
int64_t Offset) {
getStreamer().emitSetFP(FpReg, SpReg, Offset);
}
+void ARMTargetELFStreamer::emitMovSP(unsigned Reg, int64_t Offset) {
+ getStreamer().emitMovSP(Reg, Offset);
+}
void ARMTargetELFStreamer::emitPad(int64_t Offset) {
getStreamer().emitPad(Offset);
}
@@ -470,6 +679,10 @@ void ARMTargetELFStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
bool isVector) {
getStreamer().emitRegSave(RegList, isVector);
}
+void ARMTargetELFStreamer::emitUnwindRaw(int64_t Offset,
+ const SmallVectorImpl<uint8_t> &Opcodes) {
+ getStreamer().emitUnwindRaw(Offset, Opcodes);
+}
void ARMTargetELFStreamer::switchVendor(StringRef Vendor) {
assert(!Vendor.empty() && "Vendor cannot be empty.");
@@ -491,6 +704,108 @@ void ARMTargetELFStreamer::emitTextAttribute(unsigned Attribute,
StringRef Value) {
setAttributeItem(Attribute, Value, /* OverwriteExisting= */ true);
}
+void ARMTargetELFStreamer::emitIntTextAttribute(unsigned Attribute,
+ unsigned IntValue,
+ StringRef StringValue) {
+ setAttributeItems(Attribute, IntValue, StringValue,
+ /* OverwriteExisting= */ true);
+}
+void ARMTargetELFStreamer::emitArch(unsigned Value) {
+ Arch = Value;
+}
+void ARMTargetELFStreamer::emitObjectArch(unsigned Value) {
+ EmittedArch = Value;
+}
+void ARMTargetELFStreamer::emitArchDefaultAttributes() {
+ using namespace ARMBuildAttrs;
+
+ setAttributeItem(CPU_name, GetArchDefaultCPUName(Arch), false);
+ if (EmittedArch == ARM::INVALID_ARCH)
+ setAttributeItem(CPU_arch, GetArchDefaultCPUArch(Arch), false);
+ else
+ setAttributeItem(CPU_arch, GetArchDefaultCPUArch(EmittedArch), false);
+
+ switch (Arch) {
+ case ARM::ARMV2:
+ case ARM::ARMV2A:
+ case ARM::ARMV3:
+ case ARM::ARMV3M:
+ case ARM::ARMV4:
+ case ARM::ARMV5:
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ break;
+
+ case ARM::ARMV4T:
+ case ARM::ARMV5T:
+ case ARM::ARMV5TE:
+ case ARM::ARMV6:
+ case ARM::ARMV6J:
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, Allowed, false);
+ break;
+
+ case ARM::ARMV6T2:
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
+ break;
+
+ case ARM::ARMV6Z:
+ case ARM::ARMV6ZK:
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, Allowed, false);
+ setAttributeItem(Virtualization_use, AllowTZ, false);
+ break;
+
+ case ARM::ARMV6M:
+ setAttributeItem(THUMB_ISA_use, Allowed, false);
+ break;
+
+ case ARM::ARMV7:
+ setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
+ break;
+
+ case ARM::ARMV7A:
+ setAttributeItem(CPU_arch_profile, ApplicationProfile, false);
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
+ break;
+
+ case ARM::ARMV7R:
+ setAttributeItem(CPU_arch_profile, RealTimeProfile, false);
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
+ break;
+
+ case ARM::ARMV7M:
+ setAttributeItem(CPU_arch_profile, MicroControllerProfile, false);
+ setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
+ break;
+
+ case ARM::ARMV8A:
+ setAttributeItem(CPU_arch_profile, ApplicationProfile, false);
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, AllowThumb32, false);
+ setAttributeItem(MPextension_use, Allowed, false);
+ setAttributeItem(Virtualization_use, AllowTZVirtualization, false);
+ break;
+
+ case ARM::IWMMXT:
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, Allowed, false);
+ setAttributeItem(WMMX_arch, AllowWMMXv1, false);
+ break;
+
+ case ARM::IWMMXT2:
+ setAttributeItem(ARM_ISA_use, Allowed, false);
+ setAttributeItem(THUMB_ISA_use, Allowed, false);
+ setAttributeItem(WMMX_arch, AllowWMMXv2, false);
+ break;
+
+ default:
+ report_fatal_error("Unknown Arch: " + Twine(Arch));
+ break;
+ }
+}
void ARMTargetELFStreamer::emitFPU(unsigned Value) {
FPU = Value;
}
@@ -498,43 +813,43 @@ void ARMTargetELFStreamer::emitFPUDefaultAttributes() {
switch (FPU) {
case ARM::VFP:
case ARM::VFPV2:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv2,
/* OverwriteExisting= */ false);
break;
case ARM::VFPV3:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv3A,
/* OverwriteExisting= */ false);
break;
case ARM::VFPV3_D16:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv3B,
/* OverwriteExisting= */ false);
break;
case ARM::VFPV4:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv4A,
/* OverwriteExisting= */ false);
break;
case ARM::VFPV4_D16:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv4B,
/* OverwriteExisting= */ false);
break;
case ARM::FP_ARMV8:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPARMv8A,
/* OverwriteExisting= */ false);
break;
case ARM::NEON:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv3A,
/* OverwriteExisting= */ false);
setAttributeItem(ARMBuildAttrs::Advanced_SIMD_arch,
@@ -543,7 +858,7 @@ void ARMTargetELFStreamer::emitFPUDefaultAttributes() {
break;
case ARM::NEON_VFPV4:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPv4A,
/* OverwriteExisting= */ false);
setAttributeItem(ARMBuildAttrs::Advanced_SIMD_arch,
@@ -553,7 +868,7 @@ void ARMTargetELFStreamer::emitFPUDefaultAttributes() {
case ARM::NEON_FP_ARMV8:
case ARM::CRYPTO_NEON_FP_ARMV8:
- setAttributeItem(ARMBuildAttrs::VFP_arch,
+ setAttributeItem(ARMBuildAttrs::FP_arch,
ARMBuildAttrs::AllowFPARMv8A,
/* OverwriteExisting= */ false);
setAttributeItem(ARMBuildAttrs::Advanced_SIMD_arch,
@@ -561,6 +876,9 @@ void ARMTargetELFStreamer::emitFPUDefaultAttributes() {
/* OverwriteExisting= */ false);
break;
+ case ARM::SOFTVFP:
+ break;
+
default:
report_fatal_error("Unknown FPU: " + Twine(FPU));
break;
@@ -574,13 +892,18 @@ size_t ARMTargetELFStreamer::calculateContentSize() const {
case AttributeItem::HiddenAttribute:
break;
case AttributeItem::NumericAttribute:
- Result += getULEBSize(item.Tag);
- Result += getULEBSize(item.IntValue);
+ Result += getULEB128Size(item.Tag);
+ Result += getULEB128Size(item.IntValue);
break;
case AttributeItem::TextAttribute:
- Result += getULEBSize(item.Tag);
+ Result += getULEB128Size(item.Tag);
Result += item.StringValue.size() + 1; // string + '\0'
break;
+ case AttributeItem::NumericAndTextAttributes:
+ Result += getULEB128Size(item.Tag);
+ Result += getULEB128Size(item.IntValue);
+ Result += item.StringValue.size() + 1; // string + '\0';
+ break;
}
}
return Result;
@@ -597,6 +920,9 @@ void ARMTargetELFStreamer::finishAttributeSection() {
if (FPU != ARM::INVALID_FPU)
emitFPUDefaultAttributes();
+ if (Arch != ARM::INVALID_ARCH)
+ emitArchDefaultAttributes();
+
if (Contents.empty())
return;
@@ -648,6 +974,11 @@ void ARMTargetELFStreamer::finishAttributeSection() {
Streamer.EmitBytes(item.StringValue.upper());
Streamer.EmitIntValue(0, 1); // '\0'
break;
+ case AttributeItem::NumericAndTextAttributes:
+ Streamer.EmitULEB128IntValue(item.IntValue);
+ Streamer.EmitBytes(item.StringValue.upper());
+ Streamer.EmitIntValue(0, 1); // '\0'
+ break;
}
}
@@ -655,8 +986,41 @@ void ARMTargetELFStreamer::finishAttributeSection() {
FPU = ARM::INVALID_FPU;
}
+void ARMTargetELFStreamer::emitLabel(MCSymbol *Symbol) {
+ ARMELFStreamer &Streamer = getStreamer();
+ if (!Streamer.IsThumb)
+ return;
+
+ const MCSymbolData &SD = Streamer.getOrCreateSymbolData(Symbol);
+ unsigned Type = MCELF::GetType(SD);
+ if (Type == ELF_STT_Func || Type == ELF_STT_GnuIFunc)
+ Streamer.EmitThumbFunc(Symbol);
+}
+
+void
+ARMTargetELFStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *S) {
+ getStreamer().EmitFixup(S, FK_Data_4);
+}
+
+void ARMTargetELFStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(Value)) {
+ const MCSymbol &Sym = SRE->getSymbol();
+ if (!Sym.isDefined()) {
+ getStreamer().EmitAssignment(Symbol, Value);
+ return;
+ }
+ }
+
+ getStreamer().EmitThumbFunc(Symbol);
+ getStreamer().EmitAssignment(Symbol, Value);
+}
+
+void ARMTargetELFStreamer::emitInst(uint32_t Inst, char Suffix) {
+ getStreamer().emitInst(Inst, Suffix);
+}
+
void ARMELFStreamer::FinishImpl() {
- MCTargetStreamer &TS = getTargetStreamer();
+ MCTargetStreamer &TS = *getTargetStreamer();
ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
ATS.finishAttributeSection();
@@ -679,7 +1043,7 @@ inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix,
}
// Get .ARM.extab or .ARM.exidx section
- const MCSectionELF *EHSection = NULL;
+ const MCSectionELF *EHSection = nullptr;
if (const MCSymbol *Group = FnSection.getGroup()) {
EHSection = getContext().getELFSection(
EHSecName, Type, Flags | ELF::SHF_GROUP, Kind,
@@ -691,7 +1055,7 @@ inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix,
// Switch to .ARM.extab or .ARM.exidx section
SwitchSection(EHSection);
- EmitCodeAlignment(4, 0);
+ EmitCodeAlignment(4);
}
inline void ARMELFStreamer::SwitchToExTabSection(const MCSymbol &FnStart) {
@@ -709,12 +1073,17 @@ inline void ARMELFStreamer::SwitchToExIdxSection(const MCSymbol &FnStart) {
SectionKind::getDataRel(),
FnStart);
}
+void ARMELFStreamer::EmitFixup(const MCExpr *Expr, MCFixupKind Kind) {
+ MCDataFragment *Frag = getOrCreateDataFragment();
+ Frag->getFixups().push_back(MCFixup::Create(Frag->getContents().size(), Expr,
+ Kind));
+}
void ARMELFStreamer::Reset() {
- ExTab = NULL;
- FnStart = NULL;
- Personality = NULL;
- PersonalityIndex = NUM_PERSONALITY_INDEX;
+ ExTab = nullptr;
+ FnStart = nullptr;
+ Personality = nullptr;
+ PersonalityIndex = ARM::EHABI::NUM_PERSONALITY_INDEX;
FPReg = ARM::SP;
FPOffset = 0;
SPOffset = 0;
@@ -727,13 +1096,13 @@ void ARMELFStreamer::Reset() {
}
void ARMELFStreamer::emitFnStart() {
- assert(FnStart == 0);
+ assert(FnStart == nullptr);
FnStart = getContext().CreateTempSymbol();
EmitLabel(FnStart);
}
void ARMELFStreamer::emitFnEnd() {
- assert(FnStart && ".fnstart must preceeds .fnend");
+ assert(FnStart && ".fnstart must precedes .fnend");
// Emit unwind opcodes if there is no .handlerdata directive
if (!ExTab && !CantUnwind)
@@ -742,7 +1111,7 @@ void ARMELFStreamer::emitFnEnd() {
// Emit the exception index table entry
SwitchToExIdxSection(*FnStart);
- if (PersonalityIndex < NUM_PERSONALITY_INDEX)
+ if (PersonalityIndex < ARM::EHABI::NUM_PERSONALITY_INDEX)
EmitPersonalityFixup(GetAEABIUnwindPersonalityName(PersonalityIndex));
const MCSymbolRefExpr *FnStartRef =
@@ -753,7 +1122,7 @@ void ARMELFStreamer::emitFnEnd() {
EmitValue(FnStartRef, 4);
if (CantUnwind) {
- EmitIntValue(EXIDX_CANTUNWIND, 4);
+ EmitIntValue(ARM::EHABI::EXIDX_CANTUNWIND, 4);
} else if (ExTab) {
// Emit a reference to the unwind opcodes in the ".ARM.extab" section.
const MCSymbolRefExpr *ExTabEntryRef =
@@ -765,12 +1134,15 @@ void ARMELFStreamer::emitFnEnd() {
// For the __aeabi_unwind_cpp_pr0, we have to emit the unwind opcodes in
// the second word of exception index table entry. The size of the unwind
// opcodes should always be 4 bytes.
- assert(PersonalityIndex == AEABI_UNWIND_CPP_PR0 &&
- "Compact model must use __aeabi_cpp_unwind_pr0 as personality");
+ assert(PersonalityIndex == ARM::EHABI::AEABI_UNWIND_CPP_PR0 &&
+ "Compact model must use __aeabi_unwind_cpp_pr0 as personality");
assert(Opcodes.size() == 4u &&
- "Unwind opcode size for __aeabi_cpp_unwind_pr0 must be equal to 4");
- EmitBytes(StringRef(reinterpret_cast<const char*>(Opcodes.data()),
- Opcodes.size()));
+ "Unwind opcode size for __aeabi_unwind_cpp_pr0 must be equal to 4");
+ uint64_t Intval = Opcodes[0] |
+ Opcodes[1] << 8 |
+ Opcodes[2] << 16 |
+ Opcodes[3] << 24;
+ EmitIntValue(Intval, Opcodes.size());
}
// Switch to the section containing FnStart
@@ -789,7 +1161,7 @@ void ARMELFStreamer::EmitPersonalityFixup(StringRef Name) {
const MCSymbolRefExpr *PersonalityRef = MCSymbolRefExpr::Create(
PersonalitySym, MCSymbolRefExpr::VK_ARM_NONE, getContext());
- AddValueSymbols(PersonalityRef);
+ visitUsedExpr(*PersonalityRef);
MCDataFragment *DF = getOrCreateDataFragment();
DF->getFixups().push_back(MCFixup::Create(DF->getContents().size(),
PersonalityRef,
@@ -820,7 +1192,7 @@ void ARMELFStreamer::FlushUnwindOpcodes(bool NoHandlerData) {
// For compact model 0, we have to emit the unwind opcodes in the .ARM.exidx
// section. Thus, we don't have to create an entry in the .ARM.extab
// section.
- if (NoHandlerData && PersonalityIndex == AEABI_UNWIND_CPP_PR0)
+ if (NoHandlerData && PersonalityIndex == ARM::EHABI::AEABI_UNWIND_CPP_PR0)
return;
// Switch to .ARM.extab section.
@@ -842,8 +1214,15 @@ void ARMELFStreamer::FlushUnwindOpcodes(bool NoHandlerData) {
}
// Emit unwind opcodes
- EmitBytes(StringRef(reinterpret_cast<const char *>(Opcodes.data()),
- Opcodes.size()));
+ assert((Opcodes.size() % 4) == 0 &&
+ "Unwind opcode size for __aeabi_cpp_unwind_pr0 must be multiple of 4");
+ for (unsigned I = 0; I != Opcodes.size(); I += 4) {
+ uint64_t Intval = Opcodes[I] |
+ Opcodes[I + 1] << 8 |
+ Opcodes[I + 2] << 16 |
+ Opcodes[I + 3] << 24;
+ EmitIntValue(Intval, 4);
+ }
// According to ARM EHABI section 9.2, if the __aeabi_unwind_cpp_pr1() or
// __aeabi_unwind_cpp_pr2() is used, then the handler data must be emitted
@@ -863,6 +1242,11 @@ void ARMELFStreamer::emitPersonality(const MCSymbol *Per) {
UnwindOpAsm.setPersonality(Per);
}
+void ARMELFStreamer::emitPersonalityIndex(unsigned Index) {
+ assert(Index < ARM::EHABI::NUM_PERSONALITY_INDEX && "invalid index");
+ PersonalityIndex = Index;
+}
+
void ARMELFStreamer::emitSetFP(unsigned NewFPReg, unsigned NewSPReg,
int64_t Offset) {
assert((NewSPReg == ARM::SP || NewSPReg == FPReg) &&
@@ -877,6 +1261,20 @@ void ARMELFStreamer::emitSetFP(unsigned NewFPReg, unsigned NewSPReg,
FPOffset += Offset;
}
+void ARMELFStreamer::emitMovSP(unsigned Reg, int64_t Offset) {
+ assert((Reg != ARM::SP && Reg != ARM::PC) &&
+ "the operand of .movsp cannot be either sp or pc");
+ assert(FPReg == ARM::SP && "current FP must be SP");
+
+ FlushPendingOffset();
+
+ FPReg = Reg;
+ FPOffset = SPOffset + Offset;
+
+ const MCRegisterInfo *MRI = getContext().getRegisterInfo();
+ UnwindOpAsm.EmitSetSP(MRI->getEncodingValue(FPReg));
+}
+
void ARMELFStreamer::emitPad(int64_t Offset) {
// Track the change of the $sp offset
SPOffset -= Offset;
@@ -916,27 +1314,37 @@ void ARMELFStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
UnwindOpAsm.EmitRegSave(Mask);
}
+void ARMELFStreamer::emitUnwindRaw(int64_t Offset,
+ const SmallVectorImpl<uint8_t> &Opcodes) {
+ FlushPendingOffset();
+ SPOffset = SPOffset - Offset;
+ UnwindOpAsm.EmitRaw(Opcodes);
+}
+
namespace llvm {
MCStreamer *createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory,
+ bool isVerboseAsm, bool useDwarfDirectory,
MCInstPrinter *InstPrint, MCCodeEmitter *CE,
MCAsmBackend *TAB, bool ShowInst) {
- ARMTargetAsmStreamer *S = new ARMTargetAsmStreamer(OS, *InstPrint);
+ MCStreamer *S = llvm::createAsmStreamer(
+ Ctx, OS, isVerboseAsm, useDwarfDirectory, InstPrint, CE, TAB, ShowInst);
+ new ARMTargetAsmStreamer(*S, OS, *InstPrint, isVerboseAsm);
+ return S;
+}
- return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
- useDwarfDirectory, InstPrint, CE, TAB,
- ShowInst);
+MCStreamer *createARMNullStreamer(MCContext &Ctx) {
+ MCStreamer *S = llvm::createNullStreamer(Ctx);
+ new ARMTargetStreamer(*S);
+ return S;
}
MCELFStreamer* createARMELFStreamer(MCContext &Context, MCAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *Emitter,
bool RelaxAll, bool NoExecStack,
bool IsThumb) {
- ARMTargetELFStreamer *TS = new ARMTargetELFStreamer();
- ARMELFStreamer *S =
- new ARMELFStreamer(Context, TS, TAB, OS, Emitter, IsThumb);
+ ARMELFStreamer *S = new ARMELFStreamer(Context, TAB, OS, Emitter, IsThumb);
+ new ARMTargetELFStreamer(*S);
// FIXME: This should eventually end up somewhere else where more
// intelligent flag decisions can be made. For now we are just maintaining
// the status quo for ARM and setting EF_ARM_EABI_VER5 as the default.
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
index 0085feb..bfd9e33 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
@@ -100,15 +100,6 @@ enum Fixups {
fixup_t2_movt_hi16, // :upper16:
fixup_t2_movw_lo16, // :lower16:
- // It is possible to create an "immediate" that happens to be pcrel.
- // movw r0, :lower16:Foo-(Bar+8) and movt r0, :upper16:Foo-(Bar+8)
- // result in different reloc tags than the above two.
- // Needed to support ELF::R_ARM_MOVT_PREL and ELF::R_ARM_MOVW_PREL_NC
- fixup_arm_movt_hi16_pcrel, // :upper16:
- fixup_arm_movw_lo16_pcrel, // :lower16:
- fixup_t2_movt_hi16_pcrel, // :upper16:
- fixup_t2_movw_lo16_pcrel, // :lower16:
-
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index ad796e6..7a19208 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -13,19 +13,19 @@
#include "ARMMCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/ADT/Triple.h"
using namespace llvm;
-cl::opt<bool>
-EnableARMEHABI("arm-enable-ehabi", cl::Hidden,
- cl::desc("Generate ARM EHABI tables"),
- cl::init(false));
-
-
void ARMMCAsmInfoDarwin::anchor() { }
-ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() {
- Data64bitsDirective = 0;
+ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin(StringRef TT) {
+ Triple TheTriple(TT);
+ if ((TheTriple.getArch() == Triple::armeb) ||
+ (TheTriple.getArch() == Triple::thumbeb))
+ IsLittleEndian = false;
+
+ Data64bitsDirective = nullptr;
CommentString = "@";
Code16Directive = ".code\t16";
Code32Directive = ".code\t32";
@@ -35,17 +35,23 @@ ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() {
// Exceptions handling
ExceptionsType = ExceptionHandling::SjLj;
+
+ UseIntegratedAssembler = true;
}
void ARMELFMCAsmInfo::anchor() { }
-ARMELFMCAsmInfo::ARMELFMCAsmInfo() {
+ARMELFMCAsmInfo::ARMELFMCAsmInfo(StringRef TT) {
+ Triple TheTriple(TT);
+ if ((TheTriple.getArch() == Triple::armeb) ||
+ (TheTriple.getArch() == Triple::thumbeb))
+ IsLittleEndian = false;
+
// ".comm align is in bytes but .align is pow-2."
AlignmentIsInBytes = false;
- Data64bitsDirective = 0;
+ Data64bitsDirective = nullptr;
CommentString = "@";
- PrivateGlobalPrefix = ".L";
Code16Directive = ".code\t16";
Code32Directive = ".code\t32";
@@ -53,6 +59,56 @@ ARMELFMCAsmInfo::ARMELFMCAsmInfo() {
SupportsDebugInformation = true;
// Exceptions handling
- if (EnableARMEHABI)
+ switch (TheTriple.getOS()) {
+ case Triple::NetBSD:
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+ break;
+ default:
ExceptionsType = ExceptionHandling::ARM;
+ break;
+ }
+
+ // foo(plt) instead of foo@plt
+ UseParensForSymbolVariant = true;
+
+ UseIntegratedAssembler = true;
+}
+
+void ARMELFMCAsmInfo::setUseIntegratedAssembler(bool Value) {
+ UseIntegratedAssembler = Value;
+ if (!UseIntegratedAssembler) {
+ // gas doesn't handle VFP register names in cfi directives,
+ // so don't use register names with external assembler.
+ // See https://sourceware.org/bugzilla/show_bug.cgi?id=16694
+ DwarfRegNumForCFI = true;
+ }
+}
+
+void ARMCOFFMCAsmInfoMicrosoft::anchor() { }
+
+ARMCOFFMCAsmInfoMicrosoft::ARMCOFFMCAsmInfoMicrosoft() {
+ AlignmentIsInBytes = false;
+
+ PrivateGlobalPrefix = "$M";
}
+
+void ARMCOFFMCAsmInfoGNU::anchor() { }
+
+ARMCOFFMCAsmInfoGNU::ARMCOFFMCAsmInfoGNU() {
+ AlignmentIsInBytes = false;
+ HasSingleParameterDotFile = true;
+
+ CommentString = "@";
+ Code16Directive = ".code\t16";
+ Code32Directive = ".code\t32";
+ PrivateGlobalPrefix = ".L";
+
+ HasLEB128 = true;
+ SupportsDebugInformation = true;
+ ExceptionsType = ExceptionHandling::None;
+ UseParensForSymbolVariant = true;
+
+ UseIntegratedAssembler = false;
+ DwarfRegNumForCFI = true;
+}
+
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
index e1f716d..51cfa0ad 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
@@ -14,21 +14,36 @@
#ifndef LLVM_ARMTARGETASMINFO_H
#define LLVM_ARMTARGETASMINFO_H
+#include "llvm/MC/MCAsmInfoCOFF.h"
#include "llvm/MC/MCAsmInfoDarwin.h"
#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class ARMMCAsmInfoDarwin : public MCAsmInfoDarwin {
- virtual void anchor();
+ void anchor() override;
public:
- explicit ARMMCAsmInfoDarwin();
+ explicit ARMMCAsmInfoDarwin(StringRef TT);
};
class ARMELFMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
- explicit ARMELFMCAsmInfo();
+ explicit ARMELFMCAsmInfo(StringRef TT);
+
+ void setUseIntegratedAssembler(bool Value) override;
+ };
+
+ class ARMCOFFMCAsmInfoMicrosoft : public MCAsmInfoMicrosoft {
+ void anchor() override;
+ public:
+ explicit ARMCOFFMCAsmInfoMicrosoft();
+ };
+
+ class ARMCOFFMCAsmInfoGNU : public MCAsmInfoGNUCOFF {
+ void anchor() override;
+ public:
+ explicit ARMCOFFMCAsmInfoGNU();
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 4382d0d..b8ee555 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
@@ -26,10 +25,13 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created.");
@@ -38,27 +40,25 @@ class ARMMCCodeEmitter : public MCCodeEmitter {
ARMMCCodeEmitter(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION;
void operator=(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
- const MCSubtargetInfo &STI;
const MCContext &CTX;
+ bool IsLittleEndian;
public:
- ARMMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : MCII(mcii), STI(sti), CTX(ctx) {
+ ARMMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool IsLittle)
+ : MCII(mcii), CTX(ctx), IsLittleEndian(IsLittle) {
}
~ARMMCCodeEmitter() {}
- bool isThumb() const {
- // FIXME: Can tablegen auto-generate this?
+ bool isThumb(const MCSubtargetInfo &STI) const {
return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
}
- bool isThumb2() const {
- return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) != 0;
+ bool isThumb2(const MCSubtargetInfo &STI) const {
+ return isThumb(STI) && (STI.getFeatureBits() & ARM::FeatureThumb2) != 0;
}
- bool isTargetDarwin() const {
+ bool isTargetMachO(const MCSubtargetInfo &STI) const {
Triple TT(STI.getTargetTriple());
- return TT.isOSDarwin();
+ return TT.isOSBinFormatMachO();
}
unsigned getMachineSoImmOpValue(unsigned SoImm) const;
@@ -66,107 +66,131 @@ public:
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getHiLo16ImmOpValue - Return the encoding for the hi / low 16-bit of
/// the specified operand. This is used for operands with :lower16: and
/// :upper16: prefixes.
uint32_t getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx,
unsigned &Reg, unsigned &Imm,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getThumbBLTargetOpValue - Return encoding info for Thumb immediate
/// BL branch target.
uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
/// BLX branch target.
uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getBranchTargetOpValue - Return encoding info for 24-bit immediate
/// branch target.
uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
/// immediate Thumb2 direct branch target.
uint32_t getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate
/// branch target.
uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint32_t getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAdrLabelOpValue - Return encoding info for 12-bit immediate
/// ADR label target.
uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12'
/// operand.
uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand.
uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups)const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2'
/// operand.
uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getT2AddrModeImm0_1020s4OpValue - Return encoding info for 'reg + imm8<<2'
/// operand.
uint32_t getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getT2Imm8s4OpValue - Return encoding info for '+/- imm8<<2'
/// operand.
uint32_t getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm'
/// operand as needed by load/store instructions.
uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getLdStmModeOpValue - Return encoding for load/store multiple mode.
uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(OpIdx).getImm();
switch (Mode) {
default: llvm_unreachable("Unknown addressing sub-mode!");
@@ -192,44 +216,54 @@ public:
/// getAddrMode2OpValue - Return encoding for addrmode2 operands.
uint32_t getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrMode2OffsetOpValue - Return encoding for am2offset operands.
uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getPostIdxRegOpValue - Return encoding for postidx_reg operands.
uint32_t getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrMode3OffsetOpValue - Return encoding for am3offset operands.
uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrMode3OpValue - Return encoding for addrmode3 operands.
uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12'
/// operand.
uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getAddrMode5OpValue - Return encoding info for 'reg +/- imm8' operand.
uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getCCOutOpValue - Return encoding of the 's' bit.
unsigned getCCOutOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or
// '1' respectively.
return MI.getOperand(Op).getReg() == ARM::CPSR;
@@ -237,8 +271,27 @@ public:
/// getSOImmOpValue - Return an encoded 12-bit shifted-immediate value.
unsigned getSOImmOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned SoImm = MI.getOperand(Op).getImm();
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(Op);
+
+ // We expect MO to be an immediate or an expression,
+ // if it is an immediate - that's fine, just encode the value.
+ // Otherwise - create a Fixup.
+ if (MO.isExpr()) {
+ const MCExpr *Expr = MO.getExpr();
+ // In instruction code this value always encoded as lowest 12 bits,
+ // so we don't have to perform any specific adjustments.
+ // Due to requirements of relocatable records we have to use FK_Data_4.
+ // See ARMELFObjectWriter::ExplicitRelSym and
+ // ARMELFObjectWriter::GetRelocTypeInner for more details.
+ MCFixupKind Kind = MCFixupKind(FK_Data_4);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
+ return 0;
+ }
+
+ unsigned SoImm = MO.getImm();
int SoImmVal = ARM_AM::getSOImmVal(SoImm);
assert(SoImmVal != -1 && "Not a valid so_imm value!");
@@ -253,7 +306,8 @@ public:
/// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value.
unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
unsigned SoImm = MI.getOperand(Op).getImm();
unsigned Encoded = ARM_AM::getT2SOImmVal(SoImm);
assert(Encoded != ~0U && "Not a Thumb2 so_imm value?");
@@ -261,64 +315,88 @@ public:
}
unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getSORegOpValue - Return an encoded so_reg shifted register value.
unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return 64 - MI.getOperand(Op).getImm();
}
unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getShiftRight8Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getShiftRight16Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getShiftRight32Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getShiftRight64Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned NEONThumb2DataIPostEncoder(const MCInst &MI,
- unsigned EncodedValue) const;
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI,
- unsigned EncodedValue) const;
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
unsigned NEONThumb2DupPostEncoder(const MCInst &MI,
- unsigned EncodedValue) const;
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
unsigned NEONThumb2V8PostEncoder(const MCInst &MI,
- unsigned EncodedValue) const;
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
unsigned VFPThumb2PostEncoder(const MCInst &MI,
- unsigned EncodedValue) const;
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const;
void EmitByte(unsigned char C, raw_ostream &OS) const {
OS << (char)C;
@@ -327,30 +405,39 @@ public:
void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const {
// Output the constant in little endian byte order.
for (unsigned i = 0; i != Size; ++i) {
- EmitByte(Val & 255, OS);
- Val >>= 8;
+ unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
+ EmitByte((Val >> Shift) & 0xff, OS);
}
}
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
};
} // end anonymous namespace
-MCCodeEmitter *llvm::createARMMCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new ARMMCCodeEmitter(MCII, STI, Ctx);
+MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new ARMMCCodeEmitter(MCII, Ctx, true);
+}
+
+MCCodeEmitter *llvm::createARMBEMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new ARMMCCodeEmitter(MCII, Ctx, false);
}
/// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing
/// instructions, and rewrite them to their Thumb2 form if we are currently in
/// Thumb2 mode.
unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI,
- unsigned EncodedValue) const {
- if (isThumb2()) {
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (isThumb2(STI)) {
// NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved
// to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are
// set to 1111.
@@ -368,8 +455,9 @@ unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI,
/// instructions, and rewrite them to their Thumb2 form if we are currently in
/// Thumb2 mode.
unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI,
- unsigned EncodedValue) const {
- if (isThumb2()) {
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (isThumb2(STI)) {
EncodedValue &= 0xF0FFFFFF;
EncodedValue |= 0x09000000;
}
@@ -381,8 +469,9 @@ unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI,
/// instructions, and rewrite them to their Thumb2 form if we are currently in
/// Thumb2 mode.
unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI,
- unsigned EncodedValue) const {
- if (isThumb2()) {
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (isThumb2(STI)) {
EncodedValue &= 0x00FFFFFF;
EncodedValue |= 0xEE000000;
}
@@ -393,8 +482,9 @@ unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI,
/// Post-process encoded NEON v8 instructions, and rewrite them to Thumb2 form
/// if we are in Thumb2.
unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI,
- unsigned EncodedValue) const {
- if (isThumb2()) {
+ unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (isThumb2(STI)) {
EncodedValue |= 0xC000000; // Set bits 27-26
}
@@ -404,8 +494,9 @@ unsigned ARMMCCodeEmitter::NEONThumb2V8PostEncoder(const MCInst &MI,
/// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite
/// them to their Thumb2 form if we are currently in Thumb2 mode.
unsigned ARMMCCodeEmitter::
-VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue) const {
- if (isThumb2()) {
+VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue,
+ const MCSubtargetInfo &STI) const {
+ if (isThumb2(STI)) {
EncodedValue &= 0x0FFFFFFF;
EncodedValue |= 0xE0000000;
}
@@ -416,7 +507,8 @@ VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue) const {
/// operand requires relocation, record the relocation and return zero.
unsigned ARMMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
unsigned RegNo = CTX.getRegisterInfo()->getEncodingValue(Reg);
@@ -444,7 +536,8 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
/// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand.
bool ARMMCCodeEmitter::
EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
- unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
@@ -473,7 +566,8 @@ EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
/// which is either an immediate or requires a fixup.
static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
unsigned FixupKind,
- SmallVectorImpl<MCFixup> &Fixups) {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) {
const MCOperand &MO = MI.getOperand(OpIdx);
// If the destination is an immediate, we have nothing to do.
@@ -509,11 +603,12 @@ static int32_t encodeThumbBLOffset(int32_t offset) {
/// getThumbBLTargetOpValue - Return encoding info for immediate branch target.
uint32_t ARMMCCodeEmitter::
getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bl,
- Fixups);
+ Fixups, STI);
return encodeThumbBLOffset(MO.getImm());
}
@@ -521,43 +616,47 @@ getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
/// BLX branch target.
uint32_t ARMMCCodeEmitter::
getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_blx,
- Fixups);
+ Fixups, STI);
return encodeThumbBLOffset(MO.getImm());
}
/// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
uint32_t ARMMCCodeEmitter::
getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_br,
- Fixups);
+ Fixups, STI);
return (MO.getImm() >> 1);
}
/// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
uint32_t ARMMCCodeEmitter::
getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bcc,
- Fixups);
+ Fixups, STI);
return (MO.getImm() >> 1);
}
/// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
uint32_t ARMMCCodeEmitter::
getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
- return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cb, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cb, Fixups, STI);
return (MO.getImm() >> 1);
}
@@ -582,27 +681,29 @@ static bool HasConditionalBranch(const MCInst &MI) {
/// target.
uint32_t ARMMCCodeEmitter::
getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// FIXME: This really, really shouldn't use TargetMachine. We don't want
// coupling between MC and TM anywhere we can help it.
- if (isThumb2())
+ if (isThumb2(STI))
return
- ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_condbranch, Fixups);
- return getARMBranchTargetOpValue(MI, OpIdx, Fixups);
+ ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_condbranch, Fixups, STI);
+ return getARMBranchTargetOpValue(MI, OpIdx, Fixups, STI);
}
/// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
/// target.
uint32_t ARMMCCodeEmitter::
getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr()) {
if (HasConditionalBranch(MI))
return ::getBranchTargetOpValue(MI, OpIdx,
- ARM::fixup_arm_condbranch, Fixups);
+ ARM::fixup_arm_condbranch, Fixups, STI);
return ::getBranchTargetOpValue(MI, OpIdx,
- ARM::fixup_arm_uncondbranch, Fixups);
+ ARM::fixup_arm_uncondbranch, Fixups, STI);
}
return MO.getImm() >> 2;
@@ -610,13 +711,14 @@ getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr()) {
if (HasConditionalBranch(MI))
return ::getBranchTargetOpValue(MI, OpIdx,
- ARM::fixup_arm_condbl, Fixups);
- return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_uncondbl, Fixups);
+ ARM::fixup_arm_condbl, Fixups, STI);
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_uncondbl, Fixups, STI);
}
return MO.getImm() >> 2;
@@ -624,10 +726,11 @@ getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
- return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_blx, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_blx, Fixups, STI);
return MO.getImm() >> 1;
}
@@ -636,12 +739,13 @@ getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
/// immediate branch target.
uint32_t ARMMCCodeEmitter::
getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
unsigned Val = 0;
const MCOperand MO = MI.getOperand(OpIdx);
if(MO.isExpr())
- return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups, STI);
else
Val = MO.getImm() >> 1;
@@ -665,11 +769,12 @@ getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
/// ADR label target.
uint32_t ARMMCCodeEmitter::
getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_adr_pcrel_12,
- Fixups);
+ Fixups, STI);
int64_t offset = MO.getImm();
uint32_t Val = 0x2000;
@@ -705,11 +810,12 @@ getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
/// target.
uint32_t ARMMCCodeEmitter::
getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_adr_pcrel_12,
- Fixups);
+ Fixups, STI);
int32_t Val = MO.getImm();
if (Val == INT32_MIN)
Val = 0x1000;
@@ -724,11 +830,12 @@ getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
/// target.
uint32_t ARMMCCodeEmitter::
getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_thumb_adr_pcrel_10,
- Fixups);
+ Fixups, STI);
return MO.getImm();
}
@@ -736,7 +843,8 @@ getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
/// operand.
uint32_t ARMMCCodeEmitter::
getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &) const {
+ SmallVectorImpl<MCFixup> &,
+ const MCSubtargetInfo &STI) const {
// [Rn, Rm]
// {5-3} = Rm
// {2-0} = Rn
@@ -750,7 +858,8 @@ getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
/// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand.
uint32_t ARMMCCodeEmitter::
getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {17-13} = reg
// {12} = (U)nsigned (add == '1', sub == '0')
// {11-0} = imm12
@@ -767,7 +876,7 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
isAdd = false ; // 'U' bit is set as part of the fixup.
MCFixupKind Kind;
- if (isThumb2())
+ if (isThumb2(STI))
Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12);
else
Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12);
@@ -787,7 +896,7 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
Imm12 = Offset;
}
} else
- isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm12, Fixups);
+ isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm12, Fixups, STI);
uint32_t Binary = Imm12 & 0xfff;
// Immediate is always encoded as positive. The 'U' bit controls add vs sub.
@@ -801,7 +910,8 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
/// '+/- imm8<<2' operand.
uint32_t ARMMCCodeEmitter::
getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// FIXME: The immediate operand should have already been encoded like this
// before ever getting here. The encoder method should just need to combine
// the MI operands for the register and the offset into a single
@@ -832,7 +942,8 @@ getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx,
/// 'reg +/- imm8<<2' operand.
uint32_t ARMMCCodeEmitter::
getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {12-9} = reg
// {8} = (U)nsigned (add == '1', sub == '0')
// {7-0} = imm8
@@ -852,7 +963,7 @@ getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
++MCNumCPRelocations;
} else
- isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups);
+ isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
// FIXME: The immediate operand should have already been encoded like this
// before ever getting here. The encoder method should just need to combine
@@ -872,7 +983,8 @@ getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
/// 'reg + imm8<<2' operand.
uint32_t ARMMCCodeEmitter::
getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {11-8} = reg
// {7-0} = imm8
const MCOperand &MO = MI.getOperand(OpIdx);
@@ -882,22 +994,10 @@ getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
return (Reg << 8) | Imm8;
}
-// FIXME: This routine assumes that a binary
-// expression will always result in a PCRel expression
-// In reality, its only true if one or more subexpressions
-// is itself a PCRel (i.e. "." in asm or some other pcrel construct)
-// but this is good enough for now.
-static bool EvaluateAsPCRel(const MCExpr *Expr) {
- switch (Expr->getKind()) {
- default: llvm_unreachable("Unexpected expression type");
- case MCExpr::SymbolRef: return false;
- case MCExpr::Binary: return true;
- }
-}
-
uint32_t
ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {20-16} = imm{15-12}
// {11-0} = imm{11-0}
const MCOperand &MO = MI.getOperand(OpIdx);
@@ -912,51 +1012,48 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
const ARMMCExpr *ARM16Expr = cast<ARMMCExpr>(E);
E = ARM16Expr->getSubExpr();
+ if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(E)) {
+ const int64_t Value = MCE->getValue();
+ if (Value > UINT32_MAX)
+ report_fatal_error("constant value truncated (limited to 32-bit)");
+
+ switch (ARM16Expr->getKind()) {
+ case ARMMCExpr::VK_ARM_HI16:
+ return (int32_t(Value) & 0xffff0000) >> 16;
+ case ARMMCExpr::VK_ARM_LO16:
+ return (int32_t(Value) & 0x0000ffff);
+ default: llvm_unreachable("Unsupported ARMFixup");
+ }
+ }
+
switch (ARM16Expr->getKind()) {
default: llvm_unreachable("Unsupported ARMFixup");
case ARMMCExpr::VK_ARM_HI16:
- if (!isTargetDarwin() && EvaluateAsPCRel(E))
- Kind = MCFixupKind(isThumb2()
- ? ARM::fixup_t2_movt_hi16_pcrel
- : ARM::fixup_arm_movt_hi16_pcrel);
- else
- Kind = MCFixupKind(isThumb2()
- ? ARM::fixup_t2_movt_hi16
- : ARM::fixup_arm_movt_hi16);
+ Kind = MCFixupKind(isThumb2(STI) ? ARM::fixup_t2_movt_hi16
+ : ARM::fixup_arm_movt_hi16);
break;
case ARMMCExpr::VK_ARM_LO16:
- if (!isTargetDarwin() && EvaluateAsPCRel(E))
- Kind = MCFixupKind(isThumb2()
- ? ARM::fixup_t2_movw_lo16_pcrel
- : ARM::fixup_arm_movw_lo16_pcrel);
- else
- Kind = MCFixupKind(isThumb2()
- ? ARM::fixup_t2_movw_lo16
- : ARM::fixup_arm_movw_lo16);
+ Kind = MCFixupKind(isThumb2(STI) ? ARM::fixup_t2_movw_lo16
+ : ARM::fixup_arm_movw_lo16);
break;
}
+
Fixups.push_back(MCFixup::Create(0, E, Kind, MI.getLoc()));
return 0;
}
// If the expression doesn't have :upper16: or :lower16: on it,
- // it's just a plain immediate expression, and those evaluate to
+ // it's just a plain immediate expression, previously those evaluated to
// the lower 16 bits of the expression regardless of whether
- // we have a movt or a movw.
- if (!isTargetDarwin() && EvaluateAsPCRel(E))
- Kind = MCFixupKind(isThumb2()
- ? ARM::fixup_t2_movw_lo16_pcrel
- : ARM::fixup_arm_movw_lo16_pcrel);
- else
- Kind = MCFixupKind(isThumb2()
- ? ARM::fixup_t2_movw_lo16
- : ARM::fixup_arm_movw_lo16);
- Fixups.push_back(MCFixup::Create(0, E, Kind, MI.getLoc()));
- return 0;
+ // we have a movt or a movw, but that led to misleadingly results.
+ // This is now disallowed in the the AsmParser in validateInstruction()
+ // so this should never happen.
+ llvm_unreachable("expression without :upper16: or :lower16:");
}
uint32_t ARMMCCodeEmitter::
getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx+1);
const MCOperand &MO2 = MI.getOperand(OpIdx+2);
@@ -989,21 +1086,23 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {17-14} Rn
// {13} 1 == imm12, 0 == Rm
// {12} isAdd
// {11-0} imm12/Rm
const MCOperand &MO = MI.getOperand(OpIdx);
unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
- uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups);
+ uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups, STI);
Binary |= Rn << 14;
return Binary;
}
uint32_t ARMMCCodeEmitter::
getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {13} 1 == imm12, 0 == Rm
// {12} isAdd
// {11-0} imm12/Rm
@@ -1025,7 +1124,8 @@ getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {4} isAdd
// {3-0} Rm
const MCOperand &MO = MI.getOperand(OpIdx);
@@ -1036,7 +1136,8 @@ getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {9} 1 == imm8, 0 == Rm
// {8} isAdd
// {7-4} imm7_4/zero
@@ -1055,7 +1156,8 @@ getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
uint32_t ARMMCCodeEmitter::
getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {13} 1 == imm8, 0 == Rm
// {12-9} Rn
// {8} isAdd
@@ -1091,7 +1193,8 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
/// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands.
uint32_t ARMMCCodeEmitter::
getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// [SP, #imm]
// {7-0} = imm8
const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
@@ -1106,7 +1209,8 @@ getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
/// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
uint32_t ARMMCCodeEmitter::
getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// [Rn, #imm]
// {7-3} = imm5
// {2-0} = Rn
@@ -1120,17 +1224,19 @@ getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
/// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
uint32_t ARMMCCodeEmitter::
getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr())
- return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cp, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cp, Fixups, STI);
return (MO.getImm() >> 2);
}
/// getAddrMode5OpValue - Return encoding info for 'reg +/- imm10' operand.
uint32_t ARMMCCodeEmitter::
getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// {12-9} = reg
// {8} = (U)nsigned (add == '1', sub == '0')
// {7-0} = imm8
@@ -1146,7 +1252,7 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
assert(MO.isExpr() && "Unexpected machine operand type!");
const MCExpr *Expr = MO.getExpr();
MCFixupKind Kind;
- if (isThumb2())
+ if (isThumb2(STI))
Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
else
Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
@@ -1154,7 +1260,7 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
++MCNumCPRelocations;
} else {
- EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups);
+ EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
}
@@ -1168,7 +1274,8 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
unsigned ARMMCCodeEmitter::
getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be
// shifted. The second is Rs, the amount to shift by, and the third specifies
// the type of the shift.
@@ -1215,7 +1322,8 @@ getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
unsigned ARMMCCodeEmitter::
getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Sub-operands are [reg, imm]. The first register is Rm, the reg to be
// shifted. The second is the amount to shift by.
//
@@ -1261,7 +1369,8 @@ getSORegImmOpValue(const MCInst &MI, unsigned OpIdx,
unsigned ARMMCCodeEmitter::
getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO1 = MI.getOperand(OpNum);
const MCOperand &MO2 = MI.getOperand(OpNum+1);
const MCOperand &MO3 = MI.getOperand(OpNum+2);
@@ -1279,7 +1388,8 @@ getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
unsigned ARMMCCodeEmitter::
getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO1 = MI.getOperand(OpNum);
const MCOperand &MO2 = MI.getOperand(OpNum+1);
@@ -1300,7 +1410,8 @@ getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
unsigned ARMMCCodeEmitter::
getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO1 = MI.getOperand(OpNum);
// FIXME: Needs fixup support.
@@ -1316,7 +1427,8 @@ getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
unsigned ARMMCCodeEmitter::
getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO1 = MI.getOperand(OpNum);
// FIXME: Needs fixup support.
@@ -1332,7 +1444,8 @@ getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
unsigned ARMMCCodeEmitter::
getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Sub-operands are [reg, imm]. The first register is Rm, the reg to be
// shifted. The second is the amount to shift by.
//
@@ -1374,7 +1487,8 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
unsigned ARMMCCodeEmitter::
getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// 10 bits. lower 5 bits are are the lsb of the mask, high five bits are the
// msb of the mask.
const MCOperand &MO = MI.getOperand(Op);
@@ -1387,7 +1501,8 @@ getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
unsigned ARMMCCodeEmitter::
getRegisterListOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// VLDM/VSTM:
// {12-8} = Vd
// {7-0} = Number of registers
@@ -1423,7 +1538,8 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op,
/// with the alignment operand.
unsigned ARMMCCodeEmitter::
getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
@@ -1446,7 +1562,8 @@ getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
/// along with the alignment operand for use in VST1 and VLD1 with size 32.
unsigned ARMMCCodeEmitter::
getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
@@ -1472,7 +1589,8 @@ getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
/// different for VLD4-dup.
unsigned ARMMCCodeEmitter::
getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &Reg = MI.getOperand(Op);
const MCOperand &Imm = MI.getOperand(Op + 1);
@@ -1492,7 +1610,8 @@ getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
unsigned ARMMCCodeEmitter::
getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(Op);
if (MO.getReg() == 0) return 0x0D;
return CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
@@ -1500,31 +1619,36 @@ getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
unsigned ARMMCCodeEmitter::
getShiftRight8Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return 8 - MI.getOperand(Op).getImm();
}
unsigned ARMMCCodeEmitter::
getShiftRight16Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return 16 - MI.getOperand(Op).getImm();
}
unsigned ARMMCCodeEmitter::
getShiftRight32Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return 32 - MI.getOperand(Op).getImm();
}
unsigned ARMMCCodeEmitter::
getShiftRight64Imm(const MCInst &MI, unsigned Op,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return 64 - MI.getOperand(Op).getImm();
}
void ARMMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Pseudo instructions don't get encoded.
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
uint64_t TSFlags = Desc.TSFlags;
@@ -1537,10 +1661,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
else
llvm_unreachable("Unexpected instruction size!");
- uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+ uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
// Thumb 32-bit wide instructions need to emit the high order halfword
// first.
- if (isThumb() && Size == 4) {
+ if (isThumb(STI) && Size == 4) {
EmitConstant(Binary >> 16, 2, OS);
EmitConstant(Binary & 0xffff, 2, OS);
} else
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp
index fc8505b..e545e3c 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp
@@ -7,12 +7,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "armmcexpr"
#include "ARMMCExpr.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
using namespace llvm;
+#define DEBUG_TYPE "armmcexpr"
+
const ARMMCExpr*
ARMMCExpr::Create(VariantKind Kind, const MCExpr *Expr,
MCContext &Ctx) {
@@ -40,33 +41,6 @@ ARMMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
return false;
}
-// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
-// that method should be made public?
-static void AddValueSymbols_(const MCExpr *Value, MCAssembler *Asm) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- llvm_unreachable("Can't handle nested target expr!");
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbols_(BE->getLHS(), Asm);
- AddValueSymbols_(BE->getRHS(), Asm);
- break;
- }
-
- case MCExpr::SymbolRef:
- Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbols_(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
- break;
- }
-}
-
-void ARMMCExpr::AddValueSymbols(MCAssembler *Asm) const {
- AddValueSymbols_(getSubExpr(), Asm);
+void ARMMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
index cd4067a..c5c0b10 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
@@ -56,16 +56,16 @@ public:
/// @}
- void PrintImpl(raw_ostream &OS) const;
+ void PrintImpl(raw_ostream &OS) const override;
bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const;
- void AddValueSymbols(MCAssembler *) const;
- const MCSection *FindAssociatedSection() const {
+ const MCAsmLayout *Layout) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ const MCSection *FindAssociatedSection() const override {
return getSubExpr()->FindAssociatedSection();
}
// There are no TLS ARMMCExprs at the moment.
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {}
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index a99de0e..6a3ec8f 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -21,6 +21,7 @@
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -83,89 +84,87 @@ static bool getITDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
Triple triple(TT);
- // Set the boolean corresponding to the current target triple, or the default
- // if one cannot be determined, to true.
- unsigned Len = TT.size();
- unsigned Idx = 0;
-
- // FIXME: Enhance Triple helper class to extract ARM version.
- bool isThumb = false;
- if (Len >= 5 && TT.substr(0, 4) == "armv")
- Idx = 4;
- else if (Len >= 6 && TT.substr(0, 5) == "thumb") {
- isThumb = true;
- if (Len >= 7 && TT[5] == 'v')
- Idx = 6;
- }
+ bool isThumb = triple.getArch() == Triple::thumb ||
+ triple.getArch() == Triple::thumbeb;
bool NoCPU = CPU == "generic" || CPU.empty();
std::string ARMArchFeature;
- if (Idx) {
- unsigned SubVer = TT[Idx];
- if (SubVer == '8') {
- if (NoCPU)
- // v8a: FeatureDB, FeatureFPARMv8, FeatureNEON, FeatureDSPThumb2, FeatureMP,
- // FeatureHWDiv, FeatureHWDivARM, FeatureTrustZone, FeatureT2XtPk, FeatureCrypto, FeatureCRC
- ARMArchFeature = "+v8,+db,+fp-armv8,+neon,+t2dsp,+mp,+hwdiv,+hwdiv-arm,+trustzone,+t2xtpk,+crypto,+crc";
- else
- // Use CPU to figure out the exact features
- ARMArchFeature = "+v8";
- } else if (SubVer == '7') {
- if (Len >= Idx+2 && TT[Idx+1] == 'm') {
- isThumb = true;
- if (NoCPU)
- // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass
- ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass";
- else
- // Use CPU to figure out the exact features.
- ARMArchFeature = "+v7";
- } else if (Len >= Idx+3 && TT[Idx+1] == 'e'&& TT[Idx+2] == 'm') {
- if (NoCPU)
- // v7em: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureDSPThumb2,
- // FeatureT2XtPk, FeatureMClass
- ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,t2xtpk,+mclass";
- else
- // Use CPU to figure out the exact features.
- ARMArchFeature = "+v7";
- } else if (Len >= Idx+2 && TT[Idx+1] == 's') {
- if (NoCPU)
- // v7s: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
- // Swift
- ARMArchFeature = "+v7,+swift,+neon,+db,+t2dsp,+t2xtpk";
- else
- // Use CPU to figure out the exact features.
- ARMArchFeature = "+v7";
- } else {
- // v7 CPUs have lots of different feature sets. If no CPU is specified,
- // then assume v7a (e.g. cortex-a8) feature set. Otherwise, return
- // the "minimum" feature set and use CPU string to figure out the exact
- // features.
- if (NoCPU)
- // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
- ARMArchFeature = "+v7,+neon,+db,+t2dsp,+t2xtpk";
- else
- // Use CPU to figure out the exact features.
- ARMArchFeature = "+v7";
- }
- } else if (SubVer == '6') {
- if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2')
- ARMArchFeature = "+v6t2";
- else if (Len >= Idx+2 && TT[Idx+1] == 'm') {
- isThumb = true;
- if (NoCPU)
- // v6m: FeatureNoARM, FeatureMClass
- ARMArchFeature = "+v6m,+noarm,+mclass";
- else
- ARMArchFeature = "+v6";
- } else
- ARMArchFeature = "+v6";
- } else if (SubVer == '5') {
- if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e')
- ARMArchFeature = "+v5te";
- else
- ARMArchFeature = "+v5t";
- } else if (SubVer == '4' && Len >= Idx+2 && TT[Idx+1] == 't')
- ARMArchFeature = "+v4t";
+ switch (triple.getSubArch()) {
+ case Triple::ARMSubArch_v8:
+ if (NoCPU)
+ // v8a: FeatureDB, FeatureFPARMv8, FeatureNEON, FeatureDSPThumb2,
+ // FeatureMP, FeatureHWDiv, FeatureHWDivARM, FeatureTrustZone,
+ // FeatureT2XtPk, FeatureCrypto, FeatureCRC
+ ARMArchFeature = "+v8,+db,+fp-armv8,+neon,+t2dsp,+mp,+hwdiv,+hwdiv-arm,"
+ "+trustzone,+t2xtpk,+crypto,+crc";
+ else
+ // Use CPU to figure out the exact features
+ ARMArchFeature = "+v8";
+ break;
+ case Triple::ARMSubArch_v7m:
+ isThumb = true;
+ if (NoCPU)
+ // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass
+ ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
+ break;
+ case Triple::ARMSubArch_v7em:
+ if (NoCPU)
+ // v7em: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureDSPThumb2,
+ // FeatureT2XtPk, FeatureMClass
+ ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,t2xtpk,+mclass";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
+ break;
+ case Triple::ARMSubArch_v7s:
+ if (NoCPU)
+ // v7s: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureHasRAS
+ // Swift
+ ARMArchFeature = "+v7,+swift,+neon,+db,+t2dsp,+ras";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
+ break;
+ case Triple::ARMSubArch_v7:
+ // v7 CPUs have lots of different feature sets. If no CPU is specified,
+ // then assume v7a (e.g. cortex-a8) feature set. Otherwise, return
+ // the "minimum" feature set and use CPU string to figure out the exact
+ // features.
+ if (NoCPU)
+ // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk
+ ARMArchFeature = "+v7,+neon,+db,+t2dsp,+t2xtpk";
+ else
+ // Use CPU to figure out the exact features.
+ ARMArchFeature = "+v7";
+ break;
+ case Triple::ARMSubArch_v6t2:
+ ARMArchFeature = "+v6t2";
+ break;
+ case Triple::ARMSubArch_v6m:
+ isThumb = true;
+ if (NoCPU)
+ // v6m: FeatureNoARM, FeatureMClass
+ ARMArchFeature = "+v6m,+noarm,+mclass";
+ else
+ ARMArchFeature = "+v6";
+ break;
+ case Triple::ARMSubArch_v6:
+ ARMArchFeature = "+v6";
+ break;
+ case Triple::ARMSubArch_v5te:
+ ARMArchFeature = "+v5te";
+ break;
+ case Triple::ARMSubArch_v5:
+ ARMArchFeature = "+v5t";
+ break;
+ case Triple::ARMSubArch_v4t:
+ ARMArchFeature = "+v4t";
+ break;
+ case Triple::NoSubArch:
+ break;
}
if (isThumb) {
@@ -215,10 +214,37 @@ static MCRegisterInfo *createARMMCRegisterInfo(StringRef Triple) {
static MCAsmInfo *createARMMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin())
- return new ARMMCAsmInfoDarwin();
+ MCAsmInfo *MAI;
+ switch (TheTriple.getOS()) {
+ case llvm::Triple::Darwin:
+ case llvm::Triple::IOS:
+ case llvm::Triple::MacOSX:
+ MAI = new ARMMCAsmInfoDarwin(TT);
+ break;
+ case llvm::Triple::Win32:
+ switch (TheTriple.getEnvironment()) {
+ case llvm::Triple::Itanium:
+ MAI = new ARMCOFFMCAsmInfoGNU();
+ break;
+ case llvm::Triple::MSVC:
+ MAI = new ARMCOFFMCAsmInfoMicrosoft();
+ break;
+ default:
+ llvm_unreachable("invalid environment");
+ }
+ break;
+ default:
+ if (TheTriple.isOSBinFormatMachO())
+ MAI = new ARMMCAsmInfoDarwin(TT);
+ else
+ MAI = new ARMELFMCAsmInfo(TT);
+ break;
+ }
+
+ unsigned Reg = MRI.getDwarfRegNum(ARM::SP, true);
+ MAI->addInitialFrameState(MCCFIInstruction::createDefCfa(nullptr, Reg, 0));
- return new ARMELFMCAsmInfo();
+ return MAI;
}
static MCCodeGenInfo *createARMMCCodeGenInfo(StringRef TT, Reloc::Model RM,
@@ -239,19 +265,25 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Ctx, MCAsmBackend &MAB,
raw_ostream &OS,
MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll,
bool NoExecStack) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin())
- return createMachOStreamer(Ctx, MAB, OS, Emitter, false);
-
- if (TheTriple.isOSWindows()) {
- llvm_unreachable("ARM does not support Windows COFF format");
+ switch (TheTriple.getObjectFormat()) {
+ default: llvm_unreachable("unsupported object format");
+ case Triple::MachO: {
+ MCStreamer *S = createMachOStreamer(Ctx, MAB, OS, Emitter, false);
+ new ARMTargetStreamer(*S);
+ return S;
+ }
+ case Triple::COFF:
+ assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
+ return createARMWinCOFFStreamer(Ctx, MAB, *Emitter, OS);
+ case Triple::ELF:
+ return createARMELFStreamer(Ctx, MAB, OS, Emitter, false, NoExecStack,
+ TheTriple.getArch() == Triple::thumb);
}
-
- return createARMELFStreamer(Ctx, MAB, OS, Emitter, false, NoExecStack,
- TheTriple.getArch() == Triple::thumb);
}
static MCInstPrinter *createARMMCInstPrinter(const Target &T,
@@ -262,13 +294,13 @@ static MCInstPrinter *createARMMCInstPrinter(const Target &T,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
return new ARMInstPrinter(MAI, MII, MRI, STI);
- return 0;
+ return nullptr;
}
static MCRelocationInfo *createARMMCRelocationInfo(StringRef TT,
MCContext &Ctx) {
Triple TheTriple(TT);
- if (TheTriple.isEnvironmentMachO())
+ if (TheTriple.isOSBinFormatMachO())
return createARMMachORelocationInfo(Ctx);
// Default to the stock relocation info.
return llvm::createMCRelocationInfo(TT, Ctx);
@@ -280,14 +312,14 @@ class ARMMCInstrAnalysis : public MCInstrAnalysis {
public:
ARMMCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
- virtual bool isUnconditionalBranch(const MCInst &Inst) const {
+ bool isUnconditionalBranch(const MCInst &Inst) const override {
// BCCs with the "always" predicate are unconditional branches.
if (Inst.getOpcode() == ARM::Bcc && Inst.getOperand(1).getImm()==ARMCC::AL)
return true;
return MCInstrAnalysis::isUnconditionalBranch(Inst);
}
- virtual bool isConditionalBranch(const MCInst &Inst) const {
+ bool isConditionalBranch(const MCInst &Inst) const override {
// BCCs with the "always" predicate are unconditional branches.
if (Inst.getOpcode() == ARM::Bcc && Inst.getOperand(1).getImm()==ARMCC::AL)
return false;
@@ -295,7 +327,7 @@ public:
}
bool evaluateBranch(const MCInst &Inst, uint64_t Addr,
- uint64_t Size, uint64_t &Target) const {
+ uint64_t Size, uint64_t &Target) const override {
// We only handle PCRel branches for now.
if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType!=MCOI::OPERAND_PCREL)
return false;
@@ -316,56 +348,100 @@ static MCInstrAnalysis *createARMMCInstrAnalysis(const MCInstrInfo *Info) {
// Force static initialization.
extern "C" void LLVMInitializeARMTargetMC() {
// Register the MC asm info.
- RegisterMCAsmInfoFn A(TheARMTarget, createARMMCAsmInfo);
- RegisterMCAsmInfoFn B(TheThumbTarget, createARMMCAsmInfo);
+ RegisterMCAsmInfoFn X(TheARMLETarget, createARMMCAsmInfo);
+ RegisterMCAsmInfoFn Y(TheARMBETarget, createARMMCAsmInfo);
+ RegisterMCAsmInfoFn A(TheThumbLETarget, createARMMCAsmInfo);
+ RegisterMCAsmInfoFn B(TheThumbBETarget, createARMMCAsmInfo);
// Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheARMTarget, createARMMCCodeGenInfo);
- TargetRegistry::RegisterMCCodeGenInfo(TheThumbTarget, createARMMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheARMLETarget, createARMMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheARMBETarget, createARMMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheThumbLETarget, createARMMCCodeGenInfo);
+ TargetRegistry::RegisterMCCodeGenInfo(TheThumbBETarget, createARMMCCodeGenInfo);
// Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(TheARMTarget, createARMMCInstrInfo);
- TargetRegistry::RegisterMCInstrInfo(TheThumbTarget, createARMMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheARMLETarget, createARMMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheARMBETarget, createARMMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheThumbLETarget, createARMMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheThumbBETarget, createARMMCInstrInfo);
// Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(TheARMTarget, createARMMCRegisterInfo);
- TargetRegistry::RegisterMCRegInfo(TheThumbTarget, createARMMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheARMLETarget, createARMMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheARMBETarget, createARMMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheThumbLETarget, createARMMCRegisterInfo);
+ TargetRegistry::RegisterMCRegInfo(TheThumbBETarget, createARMMCRegisterInfo);
// Register the MC subtarget info.
- TargetRegistry::RegisterMCSubtargetInfo(TheARMTarget,
+ TargetRegistry::RegisterMCSubtargetInfo(TheARMLETarget,
+ ARM_MC::createARMMCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheARMBETarget,
ARM_MC::createARMMCSubtargetInfo);
- TargetRegistry::RegisterMCSubtargetInfo(TheThumbTarget,
+ TargetRegistry::RegisterMCSubtargetInfo(TheThumbLETarget,
+ ARM_MC::createARMMCSubtargetInfo);
+ TargetRegistry::RegisterMCSubtargetInfo(TheThumbBETarget,
ARM_MC::createARMMCSubtargetInfo);
// Register the MC instruction analyzer.
- TargetRegistry::RegisterMCInstrAnalysis(TheARMTarget,
+ TargetRegistry::RegisterMCInstrAnalysis(TheARMLETarget,
+ createARMMCInstrAnalysis);
+ TargetRegistry::RegisterMCInstrAnalysis(TheARMBETarget,
+ createARMMCInstrAnalysis);
+ TargetRegistry::RegisterMCInstrAnalysis(TheThumbLETarget,
createARMMCInstrAnalysis);
- TargetRegistry::RegisterMCInstrAnalysis(TheThumbTarget,
+ TargetRegistry::RegisterMCInstrAnalysis(TheThumbBETarget,
createARMMCInstrAnalysis);
// Register the MC Code Emitter
- TargetRegistry::RegisterMCCodeEmitter(TheARMTarget, createARMMCCodeEmitter);
- TargetRegistry::RegisterMCCodeEmitter(TheThumbTarget, createARMMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheARMLETarget,
+ createARMLEMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheARMBETarget,
+ createARMBEMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheThumbLETarget,
+ createARMLEMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheThumbBETarget,
+ createARMBEMCCodeEmitter);
// Register the asm backend.
- TargetRegistry::RegisterMCAsmBackend(TheARMTarget, createARMAsmBackend);
- TargetRegistry::RegisterMCAsmBackend(TheThumbTarget, createARMAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheARMLETarget, createARMLEAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheARMBETarget, createARMBEAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheThumbLETarget,
+ createThumbLEAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheThumbBETarget,
+ createThumbBEAsmBackend);
// Register the object streamer.
- TargetRegistry::RegisterMCObjectStreamer(TheARMTarget, createMCStreamer);
- TargetRegistry::RegisterMCObjectStreamer(TheThumbTarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheARMLETarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheARMBETarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheThumbLETarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheThumbBETarget, createMCStreamer);
// Register the asm streamer.
- TargetRegistry::RegisterAsmStreamer(TheARMTarget, createMCAsmStreamer);
- TargetRegistry::RegisterAsmStreamer(TheThumbTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheARMLETarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheARMBETarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheThumbLETarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheThumbBETarget, createMCAsmStreamer);
+
+ // Register the null streamer.
+ TargetRegistry::RegisterNullStreamer(TheARMLETarget, createARMNullStreamer);
+ TargetRegistry::RegisterNullStreamer(TheARMBETarget, createARMNullStreamer);
+ TargetRegistry::RegisterNullStreamer(TheThumbLETarget, createARMNullStreamer);
+ TargetRegistry::RegisterNullStreamer(TheThumbBETarget, createARMNullStreamer);
// Register the MCInstPrinter.
- TargetRegistry::RegisterMCInstPrinter(TheARMTarget, createARMMCInstPrinter);
- TargetRegistry::RegisterMCInstPrinter(TheThumbTarget, createARMMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheARMLETarget, createARMMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheARMBETarget, createARMMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheThumbLETarget,
+ createARMMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheThumbBETarget,
+ createARMMCInstPrinter);
// Register the MC relocation info.
- TargetRegistry::RegisterMCRelocationInfo(TheARMTarget,
+ TargetRegistry::RegisterMCRelocationInfo(TheARMLETarget,
+ createARMMCRelocationInfo);
+ TargetRegistry::RegisterMCRelocationInfo(TheARMBETarget,
+ createARMMCRelocationInfo);
+ TargetRegistry::RegisterMCRelocationInfo(TheThumbLETarget,
createARMMCRelocationInfo);
- TargetRegistry::RegisterMCRelocationInfo(TheThumbTarget,
+ TargetRegistry::RegisterMCRelocationInfo(TheThumbBETarget,
createARMMCRelocationInfo);
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index 959be8b..5326e56 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -33,7 +33,8 @@ class StringRef;
class Target;
class raw_ostream;
-extern Target TheARMTarget, TheThumbTarget;
+extern Target TheARMLETarget, TheThumbLETarget;
+extern Target TheARMBETarget, TheThumbBETarget;
namespace ARM_MC {
std::string ParseARMTriple(StringRef TT, StringRef CPU);
@@ -46,22 +47,47 @@ namespace ARM_MC {
}
MCStreamer *createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory,
+ bool isVerboseAsm, bool useDwarfDirectory,
MCInstPrinter *InstPrint, MCCodeEmitter *CE,
MCAsmBackend *TAB, bool ShowInst);
-MCCodeEmitter *createARMMCCodeEmitter(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx);
+MCStreamer *createARMNullStreamer(MCContext &Ctx);
+
+MCCodeEmitter *createARMLEMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+
+MCCodeEmitter *createARMBEMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
MCAsmBackend *createARMAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU,
+ bool IsLittleEndian);
+
+MCAsmBackend *createARMLEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+
+MCAsmBackend *createARMBEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
StringRef TT, StringRef CPU);
+MCAsmBackend *createThumbLEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+
+MCAsmBackend *createThumbBEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+
+/// createARMWinCOFFStreamer - Construct a PE/COFF machine code streamer which
+/// will generate a PE/COFF object file.
+MCStreamer *createARMWinCOFFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ MCCodeEmitter &Emitter, raw_ostream &OS);
+
/// createARMELFObjectWriter - Construct an ELF Mach-O object writer.
MCObjectWriter *createARMELFObjectWriter(raw_ostream &OS,
- uint8_t OSABI);
+ uint8_t OSABI,
+ bool IsLittleEndian);
/// createARMMachObjectWriter - Construct an ARM Mach-O object writer.
MCObjectWriter *createARMMachObjectWriter(raw_ostream &OS,
@@ -69,6 +95,8 @@ MCObjectWriter *createARMMachObjectWriter(raw_ostream &OS,
uint32_t CPUType,
uint32_t CPUSubtype);
+/// createARMWinCOFFObjectWriter - Construct an ARM PE/COFF object writer.
+MCObjectWriter *createARMWinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit);
/// createARMMachORelocationInfo - Construct ARM Mach-O relocation info.
MCRelocationInfo *createARMMachORelocationInfo(MCContext &Ctx);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
index 807c948..d4b00e6 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
@@ -9,10 +9,10 @@
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "ARMMCExpr.h"
+#include "llvm-c/Disassembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm-c/Disassembler.h"
using namespace llvm;
using namespace object;
@@ -23,7 +23,7 @@ public:
ARMMachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
const MCExpr *createExprForCAPIVariantKind(const MCExpr *SubExpr,
- unsigned VariantKind) {
+ unsigned VariantKind) override {
switch(VariantKind) {
case LLVMDisassembler_VariantKind_ARM_HI16:
return ARMMCExpr::CreateUpper16(SubExpr, Ctx);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index 1f681ba..186776a 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -32,6 +32,7 @@ class ARMMachObjectWriter : public MCMachObjectTargetWriter {
const MCFragment *Fragment,
const MCFixup &Fixup,
MCValue Target,
+ unsigned Type,
unsigned Log2Size,
uint64_t &FixedValue);
void RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
@@ -56,7 +57,7 @@ public:
void RecordRelocation(MachObjectWriter *Writer,
const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue);
+ MCValue Target, uint64_t &FixedValue) override;
};
}
@@ -82,10 +83,14 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
Log2Size = llvm::Log2_32(8);
return true;
- // Handle 24-bit branch kinds.
+ // These fixups are expected to always be resolvable at assembly time and
+ // have no relocations supported.
case ARM::fixup_arm_ldst_pcrel_12:
case ARM::fixup_arm_pcrel_10:
case ARM::fixup_arm_adr_pcrel_12:
+ return false;
+
+ // Handle 24-bit branch kinds.
case ARM::fixup_arm_condbranch:
case ARM::fixup_arm_uncondbranch:
case ARM::fixup_arm_uncondbl:
@@ -119,23 +124,19 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
// 0 - arm instructions
// 1 - thumb instructions
case ARM::fixup_arm_movt_hi16:
- case ARM::fixup_arm_movt_hi16_pcrel:
RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 1;
return true;
case ARM::fixup_t2_movt_hi16:
- case ARM::fixup_t2_movt_hi16_pcrel:
RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 3;
return true;
case ARM::fixup_arm_movw_lo16:
- case ARM::fixup_arm_movw_lo16_pcrel:
RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 0;
return true;
case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movw_lo16_pcrel:
RelocType = unsigned(MachO::ARM_RELOC_HALF);
Log2Size = 2;
return true;
@@ -156,7 +157,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
- MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+ const MCSymbolData *A_SD = &Asm.getSymbolData(*A);
if (!A_SD->getFragment())
Asm.getContext().FatalError(Fixup.getLoc(),
@@ -170,7 +171,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
FixedValue += SecAddr;
if (const MCSymbolRefExpr *B = Target.getSymB()) {
- MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+ const MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
if (!B_SD->getFragment())
Asm.getContext().FatalError(Fixup.getLoc(),
@@ -202,22 +203,19 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
switch ((unsigned)Fixup.getKind()) {
default: break;
case ARM::fixup_arm_movt_hi16:
- case ARM::fixup_arm_movt_hi16_pcrel:
MovtBit = 1;
// The thumb bit shouldn't be set in the 'other-half' bit of the
// relocation, but it will be set in FixedValue if the base symbol
// is a thumb function. Clear it out here.
- if (A_SD->getFlags() & SF_ThumbFunc)
+ if (Asm.isThumbFunc(A))
FixedValue &= 0xfffffffe;
break;
case ARM::fixup_t2_movt_hi16:
- case ARM::fixup_t2_movt_hi16_pcrel:
- if (A_SD->getFlags() & SF_ThumbFunc)
+ if (Asm.isThumbFunc(A))
FixedValue &= 0xfffffffe;
MovtBit = 1;
// Fallthrough
case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movw_lo16_pcrel:
ThumbBit = 1;
break;
}
@@ -254,15 +252,15 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
const MCFragment *Fragment,
const MCFixup &Fixup,
MCValue Target,
+ unsigned Type,
unsigned Log2Size,
uint64_t &FixedValue) {
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
- unsigned Type = MachO::ARM_RELOC_VANILLA;
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
- MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+ const MCSymbolData *A_SD = &Asm.getSymbolData(*A);
if (!A_SD->getFragment())
Asm.getContext().FatalError(Fixup.getLoc(),
@@ -275,7 +273,8 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
uint32_t Value2 = 0;
if (const MCSymbolRefExpr *B = Target.getSymB()) {
- MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+ assert(Type == MachO::ARM_RELOC_VANILLA && "invalid reloc for 2 symbols");
+ const MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
if (!B_SD->getFragment())
Asm.getContext().FatalError(Fixup.getLoc(),
@@ -377,11 +376,12 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
return RecordARMScatteredHalfRelocation(Writer, Asm, Layout, Fragment,
Fixup, Target, FixedValue);
return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
- Target, Log2Size, FixedValue);
+ Target, RelocType, Log2Size,
+ FixedValue);
}
// Get the symbol data, if any.
- MCSymbolData *SD = 0;
+ const MCSymbolData *SD = nullptr;
if (Target.getSymA())
SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
@@ -395,7 +395,8 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
Offset += 1 << Log2Size;
if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD))
return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
- Target, Log2Size, FixedValue);
+ Target, RelocType, Log2Size,
+ FixedValue);
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
@@ -461,15 +462,11 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
switch ((unsigned)Fixup.getKind()) {
default: break;
case ARM::fixup_arm_movw_lo16:
- case ARM::fixup_arm_movw_lo16_pcrel:
case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movw_lo16_pcrel:
Value = (FixedValue >> 16) & 0xffff;
break;
case ARM::fixup_arm_movt_hi16:
- case ARM::fixup_arm_movt_hi16_pcrel:
case ARM::fixup_t2_movt_hi16:
- case ARM::fixup_t2_movt_hi16_pcrel:
Value = FixedValue & 0xffff;
break;
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
new file mode 100644
index 0000000..8acd7af
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
@@ -0,0 +1,73 @@
+//===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ARMTargetStreamer class.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/ADT/MapVector.h"
+#include "llvm/MC/ConstantPools.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+
+using namespace llvm;
+//
+// ARMTargetStreamer Implemenation
+//
+ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S)
+ : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
+
+ARMTargetStreamer::~ARMTargetStreamer() {}
+
+// The constant pool handling is shared by all ARMTargetStreamer
+// implementations.
+const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr) {
+ return ConstantPools->addEntry(Streamer, Expr, 4);
+}
+
+void ARMTargetStreamer::emitCurrentConstantPool() {
+ ConstantPools->emitForCurrentSection(Streamer);
+}
+
+// finish() - write out any non-empty assembler constant pools.
+void ARMTargetStreamer::finish() { ConstantPools->emitAll(Streamer); }
+
+// The remaining callbacks should be handled separately by each
+// streamer.
+void ARMTargetStreamer::emitFnStart() {}
+void ARMTargetStreamer::emitFnEnd() {}
+void ARMTargetStreamer::emitCantUnwind() {}
+void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {}
+void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {}
+void ARMTargetStreamer::emitHandlerData() {}
+void ARMTargetStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
+ int64_t Offset) {}
+void ARMTargetStreamer::emitMovSP(unsigned Reg, int64_t Offset) {}
+void ARMTargetStreamer::emitPad(int64_t Offset) {}
+void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector) {}
+void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset,
+ const SmallVectorImpl<uint8_t> &Opcodes) {
+}
+void ARMTargetStreamer::switchVendor(StringRef Vendor) {}
+void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {}
+void ARMTargetStreamer::emitTextAttribute(unsigned Attribute,
+ StringRef String) {}
+void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute,
+ unsigned IntValue,
+ StringRef StringValue) {}
+void ARMTargetStreamer::emitArch(unsigned Arch) {}
+void ARMTargetStreamer::emitObjectArch(unsigned Arch) {}
+void ARMTargetStreamer::emitFPU(unsigned FPU) {}
+void ARMTargetStreamer::finishAttributeSection() {}
+void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {}
+void
+ARMTargetStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) {}
+
+void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h
deleted file mode 100644
index fa4add6..0000000
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOp.h
+++ /dev/null
@@ -1,125 +0,0 @@
-//===-- ARMUnwindOp.h - ARM Unwind Opcodes ----------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the constants for the ARM unwind opcodes and exception
-// handling table entry kinds.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARM_UNWIND_OP_H
-#define ARM_UNWIND_OP_H
-
-namespace llvm {
-
- /// ARM exception handling table entry kinds
- enum ARMEHTEntryKind {
- EHT_GENERIC = 0x00,
- EHT_COMPACT = 0x80
- };
-
- enum {
- /// Special entry for the function never unwind
- EXIDX_CANTUNWIND = 0x1
- };
-
- /// ARM-defined frame unwinding opcodes
- enum ARMUnwindOpcodes {
- // Format: 00xxxxxx
- // Purpose: vsp = vsp + ((x << 2) + 4)
- UNWIND_OPCODE_INC_VSP = 0x00,
-
- // Format: 01xxxxxx
- // Purpose: vsp = vsp - ((x << 2) + 4)
- UNWIND_OPCODE_DEC_VSP = 0x40,
-
- // Format: 10000000 00000000
- // Purpose: refuse to unwind
- UNWIND_OPCODE_REFUSE = 0x8000,
-
- // Format: 1000xxxx xxxxxxxx
- // Purpose: pop r[15:12], r[11:4]
- // Constraint: x != 0
- UNWIND_OPCODE_POP_REG_MASK_R4 = 0x8000,
-
- // Format: 1001xxxx
- // Purpose: vsp = r[x]
- // Constraint: x != 13 && x != 15
- UNWIND_OPCODE_SET_VSP = 0x90,
-
- // Format: 10100xxx
- // Purpose: pop r[(4+x):4]
- UNWIND_OPCODE_POP_REG_RANGE_R4 = 0xa0,
-
- // Format: 10101xxx
- // Purpose: pop r14, r[(4+x):4]
- UNWIND_OPCODE_POP_REG_RANGE_R4_R14 = 0xa8,
-
- // Format: 10110000
- // Purpose: finish
- UNWIND_OPCODE_FINISH = 0xb0,
-
- // Format: 10110001 0000xxxx
- // Purpose: pop r[3:0]
- // Constraint: x != 0
- UNWIND_OPCODE_POP_REG_MASK = 0xb100,
-
- // Format: 10110010 x(uleb128)
- // Purpose: vsp = vsp + ((x << 2) + 0x204)
- UNWIND_OPCODE_INC_VSP_ULEB128 = 0xb2,
-
- // Format: 10110011 xxxxyyyy
- // Purpose: pop d[(x+y):x]
- UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX = 0xb300,
-
- // Format: 10111xxx
- // Purpose: pop d[(8+x):8]
- UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX_D8 = 0xb8,
-
- // Format: 11000xxx
- // Purpose: pop wR[(10+x):10]
- UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE_WR10 = 0xc0,
-
- // Format: 11000110 xxxxyyyy
- // Purpose: pop wR[(x+y):x]
- UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE = 0xc600,
-
- // Format: 11000111 0000xxxx
- // Purpose: pop wCGR[3:0]
- // Constraint: x != 0
- UNWIND_OPCODE_POP_WIRELESS_MMX_REG_MASK = 0xc700,
-
- // Format: 11001000 xxxxyyyy
- // Purpose: pop d[(16+x+y):(16+x)]
- UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 = 0xc800,
-
- // Format: 11001001 xxxxyyyy
- // Purpose: pop d[(x+y):x]
- UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD = 0xc900,
-
- // Format: 11010xxx
- // Purpose: pop d[(8+x):8]
- UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D8 = 0xd0
- };
-
- /// ARM-defined Personality Routine Index
- enum ARMPersonalityRoutineIndex {
- // To make the exception handling table become more compact, ARM defined
- // several personality routines in EHABI. There are 3 different
- // personality routines in ARM EHABI currently. It is possible to have 16
- // pre-defined personality routines at most.
- AEABI_UNWIND_CPP_PR0 = 0,
- AEABI_UNWIND_CPP_PR1 = 1,
- AEABI_UNWIND_CPP_PR2 = 2,
-
- NUM_PERSONALITY_INDEX
- };
-
-}
-
-#endif // ARM_UNWIND_OP_H
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
index c943370..593fe34 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
@@ -13,8 +13,7 @@
//===----------------------------------------------------------------------===//
#include "ARMUnwindOpAsm.h"
-
-#include "ARMUnwindOp.h"
+#include "llvm/Support/ARMEHABI.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
@@ -50,14 +49,15 @@ namespace {
/// Emit the personality index prefix.
inline void EmitPersonalityIndex(unsigned PI) {
- assert(PI < NUM_PERSONALITY_INDEX && "Invalid personality prefix");
- EmitByte(EHT_COMPACT | PI);
+ assert(PI < ARM::EHABI::NUM_PERSONALITY_INDEX &&
+ "Invalid personality prefix");
+ EmitByte(ARM::EHABI::EHT_COMPACT | PI);
}
/// Fill the rest of bytes with FINISH opcode.
inline void FillFinishOpcode() {
while (Pos < Vec.size())
- EmitByte(UNWIND_OPCODE_FINISH);
+ EmitByte(ARM::EHABI::UNWIND_OPCODE_FINISH);
}
};
}
@@ -85,22 +85,22 @@ void UnwindOpcodeAssembler::EmitRegSave(uint32_t RegSave) {
uint32_t UnmaskedReg = RegSave & 0xfff0u & (~Mask);
if (UnmaskedReg == 0u) {
// Pop r[4 : (4 + n)]
- EmitInt8(UNWIND_OPCODE_POP_REG_RANGE_R4 | Range);
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_POP_REG_RANGE_R4 | Range);
RegSave &= 0x000fu;
} else if (UnmaskedReg == (1u << 14)) {
// Pop r[14] + r[4 : (4 + n)]
- EmitInt8(UNWIND_OPCODE_POP_REG_RANGE_R4_R14 | Range);
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_POP_REG_RANGE_R4_R14 | Range);
RegSave &= 0x000fu;
}
}
// Two bytes opcode to save register r15-r4
if ((RegSave & 0xfff0u) != 0)
- EmitInt16(UNWIND_OPCODE_POP_REG_MASK_R4 | (RegSave >> 4));
+ EmitInt16(ARM::EHABI::UNWIND_OPCODE_POP_REG_MASK_R4 | (RegSave >> 4));
// Opcode to save register r3-r0
if ((RegSave & 0x000fu) != 0)
- EmitInt16(UNWIND_OPCODE_POP_REG_MASK | (RegSave & 0x000fu));
+ EmitInt16(ARM::EHABI::UNWIND_OPCODE_POP_REG_MASK | (RegSave & 0x000fu));
}
/// Emit unwind opcodes for .vsave directives
@@ -125,7 +125,7 @@ void UnwindOpcodeAssembler::EmitVFPRegSave(uint32_t VFPRegSave) {
Bit >>= 1;
}
- EmitInt16(UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 |
+ EmitInt16(ARM::EHABI::UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 |
((i - 16) << 4) | Range);
}
@@ -147,34 +147,36 @@ void UnwindOpcodeAssembler::EmitVFPRegSave(uint32_t VFPRegSave) {
Bit >>= 1;
}
- EmitInt16(UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD | (i << 4) | Range);
+ EmitInt16(ARM::EHABI::UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD | (i << 4) |
+ Range);
}
}
/// Emit unwind opcodes to copy address from source register to $sp.
void UnwindOpcodeAssembler::EmitSetSP(uint16_t Reg) {
- EmitInt8(UNWIND_OPCODE_SET_VSP | Reg);
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_SET_VSP | Reg);
}
/// Emit unwind opcodes to add $sp with an offset.
void UnwindOpcodeAssembler::EmitSPOffset(int64_t Offset) {
if (Offset > 0x200) {
uint8_t Buff[16];
- Buff[0] = UNWIND_OPCODE_INC_VSP_ULEB128;
+ Buff[0] = ARM::EHABI::UNWIND_OPCODE_INC_VSP_ULEB128;
size_t ULEBSize = encodeULEB128((Offset - 0x204) >> 2, Buff + 1);
EmitBytes(Buff, ULEBSize + 1);
} else if (Offset > 0) {
if (Offset > 0x100) {
- EmitInt8(UNWIND_OPCODE_INC_VSP | 0x3fu);
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_INC_VSP | 0x3fu);
Offset -= 0x100;
}
- EmitInt8(UNWIND_OPCODE_INC_VSP | static_cast<uint8_t>((Offset - 4) >> 2));
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_INC_VSP |
+ static_cast<uint8_t>((Offset - 4) >> 2));
} else if (Offset < 0) {
while (Offset < -0x100) {
- EmitInt8(UNWIND_OPCODE_DEC_VSP | 0x3fu);
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_DEC_VSP | 0x3fu);
Offset += 0x100;
}
- EmitInt8(UNWIND_OPCODE_DEC_VSP |
+ EmitInt8(ARM::EHABI::UNWIND_OPCODE_DEC_VSP |
static_cast<uint8_t>(((-Offset) - 4) >> 2));
}
}
@@ -186,20 +188,23 @@ void UnwindOpcodeAssembler::Finalize(unsigned &PersonalityIndex,
if (HasPersonality) {
// User-specifed personality routine: [ SIZE , OP1 , OP2 , ... ]
- PersonalityIndex = NUM_PERSONALITY_INDEX;
+ PersonalityIndex = ARM::EHABI::NUM_PERSONALITY_INDEX;
size_t TotalSize = Ops.size() + 1;
size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
Result.resize(RoundUpSize);
OpStreamer.EmitSize(RoundUpSize);
} else {
- if (Ops.size() <= 3) {
+ // If no personalityindex is specified, select ane
+ if (PersonalityIndex == ARM::EHABI::NUM_PERSONALITY_INDEX)
+ PersonalityIndex = (Ops.size() <= 3) ? ARM::EHABI::AEABI_UNWIND_CPP_PR0
+ : ARM::EHABI::AEABI_UNWIND_CPP_PR1;
+ if (PersonalityIndex == ARM::EHABI::AEABI_UNWIND_CPP_PR0) {
// __aeabi_unwind_cpp_pr0: [ 0x80 , OP1 , OP2 , OP3 ]
- PersonalityIndex = AEABI_UNWIND_CPP_PR0;
+ assert(Ops.size() <= 3 && "too many opcodes for __aeabi_unwind_cpp_pr0");
Result.resize(4);
OpStreamer.EmitPersonalityIndex(PersonalityIndex);
} else {
- // __aeabi_unwind_cpp_pr1: [ 0x81 , SIZE , OP1 , OP2 , ... ]
- PersonalityIndex = AEABI_UNWIND_CPP_PR1;
+ // __aeabi_unwind_cpp_pr{1,2}: [ {0x81,0x82} , SIZE , OP1 , OP2 , ... ]
size_t TotalSize = Ops.size() + 2;
size_t RoundUpSize = (TotalSize + 3) / 4 * 4;
Result.resize(RoundUpSize);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h
index ac67c6e..cd58759 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h
@@ -15,10 +15,8 @@
#ifndef ARM_UNWIND_OP_ASM_H
#define ARM_UNWIND_OP_ASM_H
-#include "ARMUnwindOp.h"
-
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ARMEHABI.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -45,7 +43,7 @@ public:
HasPersonality = 0;
}
- /// Set the personality index
+ /// Set the personality
void setPersonality(const MCSymbol *Per) {
HasPersonality = 1;
}
@@ -62,6 +60,12 @@ public:
/// Emit unwind opcodes to add $sp with an offset.
void EmitSPOffset(int64_t Offset);
+ /// Emit unwind raw opcodes
+ void EmitRaw(const SmallVectorImpl<uint8_t> &Opcodes) {
+ Ops.insert(Ops.end(), Opcodes.begin(), Opcodes.end());
+ OpBegins.push_back(OpBegins.back() + Opcodes.size());
+ }
+
/// Finalize the unwind opcode sequence for EmitBytes()
void Finalize(unsigned &PersonalityIndex,
SmallVectorImpl<uint8_t> &Result);
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
new file mode 100644
index 0000000..d31f1f4
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
@@ -0,0 +1,82 @@
+//===-- ARMWinCOFFObjectWriter.cpp - ARM Windows COFF Object Writer -- C++ -==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/ARMFixupKinds.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCWinCOFFObjectWriter.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+namespace {
+class ARMWinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
+public:
+ ARMWinCOFFObjectWriter(bool Is64Bit)
+ : MCWinCOFFObjectTargetWriter(COFF::IMAGE_FILE_MACHINE_ARMNT) {
+ assert(!Is64Bit && "AArch64 support not yet implemented");
+ }
+ virtual ~ARMWinCOFFObjectWriter() { }
+
+ unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsCrossSection) const override;
+
+ bool recordRelocation(const MCFixup &) const override;
+};
+
+unsigned ARMWinCOFFObjectWriter::getRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsCrossSection) const {
+ assert(getMachine() == COFF::IMAGE_FILE_MACHINE_ARMNT &&
+ "AArch64 support not yet implemented");
+
+ MCSymbolRefExpr::VariantKind Modifier =
+ Target.isAbsolute() ? MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+
+ switch (static_cast<unsigned>(Fixup.getKind())) {
+ default: llvm_unreachable("unsupported relocation type");
+ case FK_Data_4:
+ switch (Modifier) {
+ case MCSymbolRefExpr::VK_COFF_IMGREL32:
+ return COFF::IMAGE_REL_ARM_ADDR32NB;
+ case MCSymbolRefExpr::VK_SECREL:
+ return COFF::IMAGE_REL_ARM_SECREL;
+ default:
+ return COFF::IMAGE_REL_ARM_ADDR32;
+ }
+ case FK_SecRel_2:
+ return COFF::IMAGE_REL_ARM_SECTION;
+ case FK_SecRel_4:
+ return COFF::IMAGE_REL_ARM_SECREL;
+ case ARM::fixup_t2_condbranch:
+ return COFF::IMAGE_REL_ARM_BRANCH20T;
+ case ARM::fixup_t2_uncondbranch:
+ return COFF::IMAGE_REL_ARM_BRANCH24T;
+ case ARM::fixup_arm_thumb_bl:
+ case ARM::fixup_arm_thumb_blx:
+ return COFF::IMAGE_REL_ARM_BLX23T;
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movt_hi16:
+ return COFF::IMAGE_REL_ARM_MOV32T;
+ }
+}
+
+bool ARMWinCOFFObjectWriter::recordRelocation(const MCFixup &Fixup) const {
+ return static_cast<unsigned>(Fixup.getKind()) != ARM::fixup_t2_movt_hi16;
+}
+}
+
+namespace llvm {
+MCObjectWriter *createARMWinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit) {
+ MCWinCOFFObjectTargetWriter *MOTW = new ARMWinCOFFObjectWriter(Is64Bit);
+ return createWinCOFFObjectWriter(MOTW, OS);
+}
+}
+
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp
new file mode 100644
index 0000000..b344ced
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp
@@ -0,0 +1,46 @@
+//===-- ARMWinCOFFStreamer.cpp - ARM Target WinCOFF Streamer ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARMMCTargetDesc.h"
+#include "llvm/MC/MCWinCOFFStreamer.h"
+
+using namespace llvm;
+
+namespace {
+class ARMWinCOFFStreamer : public MCWinCOFFStreamer {
+public:
+ ARMWinCOFFStreamer(MCContext &C, MCAsmBackend &AB, MCCodeEmitter &CE,
+ raw_ostream &OS)
+ : MCWinCOFFStreamer(C, AB, CE, OS) { }
+
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+ void EmitThumbFunc(MCSymbol *Symbol) override;
+};
+
+void ARMWinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ switch (Flag) {
+ default: llvm_unreachable("not implemented");
+ case MCAF_SyntaxUnified:
+ case MCAF_Code16:
+ break;
+ }
+}
+
+void ARMWinCOFFStreamer::EmitThumbFunc(MCSymbol *Symbol) {
+ getAssembler().setIsThumbFunc(Symbol);
+}
+}
+
+namespace llvm {
+MCStreamer *createARMWinCOFFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ MCCodeEmitter &Emitter, raw_ostream &OS) {
+ return new ARMWinCOFFStreamer(Context, MAB, Emitter, OS);
+}
+}
+
diff --git a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 2e266c2..f6d24e9e 100644
--- a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mlx-expansion"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMSubtarget.h"
@@ -28,6 +27,8 @@
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "mlx-expansion"
+
static cl::opt<bool>
ForceExapnd("expand-all-fp-mlx", cl::init(false), cl::Hidden);
static cl::opt<unsigned>
@@ -40,9 +41,9 @@ namespace {
static char ID;
MLxExpansion() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM MLA / MLS expansion pass";
}
@@ -73,7 +74,7 @@ namespace {
}
void MLxExpansion::clearStack() {
- std::fill(LastMIs, LastMIs + 4, (MachineInstr*)0);
+ std::fill(LastMIs, LastMIs + 4, nullptr);
MIIdx = 0;
}
@@ -88,7 +89,7 @@ MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const {
// real definition MI. This is important for _sfp instructions.
unsigned Reg = MI->getOperand(1).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return 0;
+ return nullptr;
MachineBasicBlock *MBB = MI->getParent();
MachineInstr *DefMI = MRI->getVRegDef(Reg);
@@ -120,7 +121,7 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
return Reg;
MachineBasicBlock *MBB = MI->getParent();
- MachineInstr *UseMI = &*MRI->use_nodbg_begin(Reg);
+ MachineInstr *UseMI = &*MRI->use_instr_nodbg_begin(Reg);
if (UseMI->getParent() != MBB)
return Reg;
@@ -129,7 +130,7 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
!MRI->hasOneNonDBGUse(Reg))
return Reg;
- UseMI = &*MRI->use_nodbg_begin(Reg);
+ UseMI = &*MRI->use_instr_nodbg_begin(Reg);
if (UseMI->getParent() != MBB)
return Reg;
}
@@ -312,9 +313,9 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
dbgs() << "Expanding: " << *MI;
dbgs() << " to:\n";
MachineBasicBlock::iterator MII = MI;
- MII = llvm::prior(MII);
+ MII = std::prev(MII);
MachineInstr &MI2 = *MII;
- MII = llvm::prior(MII);
+ MII = std::prev(MII);
MachineInstr &MI1 = *MII;
dbgs() << " " << MI1;
dbgs() << " " << MI2;
@@ -335,7 +336,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
while (MII != E) {
MachineInstr *MI = &*MII;
- if (MI->isLabel() || MI->isImplicitDef() || MI->isCopy()) {
+ if (MI->isPosition() || MI->isImplicitDef() || MI->isCopy()) {
++MII;
continue;
}
@@ -352,7 +353,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
if (Domain == ARMII::DomainGeneral) {
if (++Skip == 2)
// Assume dual issues of non-VFP / NEON instructions.
- pushStack(0);
+ pushStack(nullptr);
} else {
Skip = 0;
@@ -385,11 +386,8 @@ bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
isSwift = STI->isSwift();
bool Modified = false;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock &MBB = *MFI;
+ for (MachineBasicBlock &MBB : Fn)
Modified |= ExpandFPMLxInstructions(MBB);
- }
return Modified;
}
diff --git a/contrib/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp b/contrib/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
index fa5681f..e464671 100644
--- a/contrib/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
@@ -7,17 +7,22 @@
//
//===----------------------------------------------------------------------===//
-#include "ARM.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-Target llvm::TheARMTarget, llvm::TheThumbTarget;
+Target llvm::TheARMLETarget, llvm::TheARMBETarget;
+Target llvm::TheThumbLETarget, llvm::TheThumbBETarget;
extern "C" void LLVMInitializeARMTargetInfo() {
RegisterTarget<Triple::arm, /*HasJIT=*/true>
- X(TheARMTarget, "arm", "ARM");
+ X(TheARMLETarget, "arm", "ARM");
+ RegisterTarget<Triple::armeb, /*HasJIT=*/true>
+ Y(TheARMBETarget, "armeb", "ARM (big endian)");
RegisterTarget<Triple::thumb, /*HasJIT=*/true>
- Y(TheThumbTarget, "thumb", "Thumb");
+ A(TheThumbLETarget, "thumb", "Thumb");
+ RegisterTarget<Triple::thumbeb, /*HasJIT=*/true>
+ B(TheThumbBETarget, "thumbeb", "Thumb (big endian)");
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index cfb33f5..baa97a7 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -16,10 +16,14 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
+Thumb1FrameLowering::Thumb1FrameLowering(const ARMSubtarget &sti)
+ : ARMFrameLowering(sti) {}
+
bool Thumb1FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const{
const MachineFrameInfo *FFI = MF.getFrameInfo();
unsigned CFSize = FFI->getMaxCallFrameSize();
@@ -83,6 +87,8 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
const Thumb1RegisterInfo *RegInfo =
static_cast<const Thumb1RegisterInfo*>(MF.getTarget().getRegisterInfo());
const Thumb1InstrInfo &TII =
@@ -91,10 +97,13 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
unsigned NumBytes = MFI->getStackSize();
+ assert(NumBytes >= ArgRegsSaveSize &&
+ "ArgRegsSaveSize is included in NumBytes");
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
unsigned BasePtr = RegInfo->getBaseRegister();
+ int CFAOffset = 0;
// Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
NumBytes = (NumBytes + 3) & ~3;
@@ -105,14 +114,26 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
int FramePtrSpillFI = 0;
- if (ArgRegsSaveSize)
+ if (ArgRegsSaveSize) {
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -ArgRegsSaveSize,
MachineInstr::FrameSetup);
+ CFAOffset -= ArgRegsSaveSize;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -NumBytes,
+ if (NumBytes - ArgRegsSaveSize != 0) {
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -(NumBytes - ArgRegsSaveSize),
MachineInstr::FrameSetup);
+ CFAOffset -= NumBytes - ArgRegsSaveSize;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
return;
}
@@ -120,6 +141,15 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned Reg = CSI[i].getReg();
int FI = CSI[i].getFrameIdx();
switch (Reg) {
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ if (STI.isTargetMachO()) {
+ GPRCS2Size += 4;
+ break;
+ }
+ // fallthrough
case ARM::R4:
case ARM::R5:
case ARM::R6:
@@ -129,17 +159,6 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
FramePtrSpillFI = FI;
GPRCS1Size += 4;
break;
- case ARM::R8:
- case ARM::R9:
- case ARM::R10:
- case ARM::R11:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- if (STI.isTargetIOS())
- GPRCS2Size += 4;
- else
- GPRCS1Size += 4;
- break;
default:
DPRCSSize += 8;
}
@@ -152,7 +171,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
}
// Determine starting offsets of spill areas.
- unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
+ unsigned DPRCSOffset = NumBytes - ArgRegsSaveSize - (GPRCS1Size + GPRCS2Size + DPRCSSize);
unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
bool HasFP = hasFP(MF);
@@ -165,27 +184,89 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
NumBytes = DPRCSOffset;
int FramePtrOffsetInBlock = 0;
- if (tryFoldSPUpdateIntoPushPop(MF, prior(MBBI), NumBytes)) {
+ unsigned adjustedGPRCS1Size = GPRCS1Size;
+ if (tryFoldSPUpdateIntoPushPop(STI, MF, std::prev(MBBI), NumBytes)) {
FramePtrOffsetInBlock = NumBytes;
+ adjustedGPRCS1Size += NumBytes;
NumBytes = 0;
}
+ if (adjustedGPRCS1Size) {
+ CFAOffset -= adjustedGPRCS1Size;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
+ E = CSI.end(); I != E; ++I) {
+ unsigned Reg = I->getReg();
+ int FI = I->getFrameIdx();
+ switch (Reg) {
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ case ARM::R12:
+ if (STI.isTargetMachO())
+ break;
+ // fallthough
+ case ARM::R0:
+ case ARM::R1:
+ case ARM::R2:
+ case ARM::R3:
+ case ARM::R4:
+ case ARM::R5:
+ case ARM::R6:
+ case ARM::R7:
+ case ARM::LR:
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(Reg, true), MFI->getObjectOffset(FI)));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ break;
+ }
+ }
+
+
// Adjust FP so it point to the stack slot that contains the previous FP.
if (HasFP) {
- FramePtrOffsetInBlock += MFI->getObjectOffset(FramePtrSpillFI) + GPRCS1Size;
+ FramePtrOffsetInBlock += MFI->getObjectOffset(FramePtrSpillFI)
+ + GPRCS1Size + ArgRegsSaveSize;
AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
.addReg(ARM::SP).addImm(FramePtrOffsetInBlock / 4)
.setMIFlags(MachineInstr::FrameSetup));
+ if(FramePtrOffsetInBlock) {
+ CFAOffset += FramePtrOffsetInBlock;
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfa(
+ nullptr, MRI->getDwarfRegNum(FramePtr, true), CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ } else {
+ unsigned CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
+ nullptr, MRI->getDwarfRegNum(FramePtr, true)));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
if (NumBytes > 508)
// If offset is > 508 then sp cannot be adjusted in a single instruction,
// try restoring from fp instead.
AFI->setShouldRestoreSPFromFP(true);
}
- if (NumBytes)
+ if (NumBytes) {
// Insert it after all the callee-save spills.
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -NumBytes,
MachineInstr::FrameSetup);
+ if (!HasFP) {
+ CFAOffset -= NumBytes;
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
if (STI.isTargetELF() && HasFP)
MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
@@ -215,7 +296,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setShouldRestoreSPFromFP(true);
}
-static bool isCSRestore(MachineInstr *MI, const uint16_t *CSRegs) {
+static bool isCSRestore(MachineInstr *MI, const MCPhysReg *CSRegs) {
if (MI->getOpcode() == ARM::tLDRspi &&
MI->getOperand(1).isFI() &&
isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
@@ -248,12 +329,14 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(Align);
int NumBytes = (int)MFI->getStackSize();
- const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
+ assert((unsigned)NumBytes >= ArgRegsSaveSize &&
+ "ArgRegsSaveSize is included in NumBytes");
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
+ if (NumBytes - ArgRegsSaveSize != 0)
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes - ArgRegsSaveSize);
} else {
// Unwind MBBI to point to first LDR / VLDRD.
if (MBBI != MBB.begin()) {
@@ -267,7 +350,8 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
// Move SP to start of FP callee save spill area.
NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
AFI->getGPRCalleeSavedArea2Size() +
- AFI->getDPRCalleeSavedAreaSize());
+ AFI->getDPRCalleeSavedAreaSize() +
+ ArgRegsSaveSize);
if (AFI->shouldRestoreSPFromFP()) {
NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
@@ -289,11 +373,11 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
} else {
if (MBBI->getOpcode() == ARM::tBX_RET &&
&MBB.front() != MBBI &&
- prior(MBBI)->getOpcode() == ARM::tPOP) {
- MachineBasicBlock::iterator PMBBI = prior(MBBI);
- if (!tryFoldSPUpdateIntoPushPop(MF, PMBBI, NumBytes))
+ std::prev(MBBI)->getOpcode() == ARM::tPOP) {
+ MachineBasicBlock::iterator PMBBI = std::prev(MBBI);
+ if (!tryFoldSPUpdateIntoPushPop(STI, MF, PMBBI, NumBytes))
emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
- } else if (!tryFoldSPUpdateIntoPushPop(MF, MBBI, NumBytes))
+ } else if (!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
}
}
@@ -304,9 +388,9 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
// we need to update the SP after popping the value. Therefore, we
// pop the old LR into R3 as a temporary.
- // Move back past the callee-saved register restoration
- while (MBBI != MBB.end() && isCSRestore(MBBI, CSRegs))
- ++MBBI;
+ // Get the last instruction, tBX_RET
+ MBBI = MBB.getLastNonDebugInstr();
+ assert (MBBI->getOpcode() == ARM::tBX_RET);
// Epilogue for vararg functions: pop LR to R3 and branch off it.
AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)))
.addReg(ARM::R3, RegState::Define);
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h
index 5a300af..a227f8e 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h
@@ -11,44 +11,40 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __THUMB_FRAMEINFO_H_
-#define __THUMB_FRAMEINFO_H_
+#ifndef LLVM_ARM_THUMB1FRAMELOWERING_H
+#define LLVM_ARM_THUMB1FRAMELOWERING_H
-#include "ARM.h"
#include "ARMFrameLowering.h"
-#include "ARMSubtarget.h"
#include "Thumb1InstrInfo.h"
#include "Thumb1RegisterInfo.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
- class ARMSubtarget;
class Thumb1FrameLowering : public ARMFrameLowering {
public:
- explicit Thumb1FrameLowering(const ARMSubtarget &sti)
- : ARMFrameLowering(sti) {
- }
+ explicit Thumb1FrameLowering(const ARMSubtarget &sti);
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
+ void
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index 22a925e..68cbb5c 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "Thumb1InstrInfo.h"
-#include "ARM.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index 36af204..c5845b7 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -14,7 +14,6 @@
#ifndef THUMB1INSTRUCTIONINFO_H
#define THUMB1INSTRUCTIONINFO_H
-#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "Thumb1RegisterInfo.h"
@@ -27,33 +26,33 @@ public:
explicit Thumb1InstrInfo(const ARMSubtarget &STI);
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
- void getNoopForMachoTarget(MCInst &NopInst) const;
+ void getNoopForMachoTarget(MCInst &NopInst) const override;
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
- unsigned getUnindexedOpcode(unsigned Opc) const;
+ unsigned getUnindexedOpcode(unsigned Opc) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- const Thumb1RegisterInfo &getRegisterInfo() const { return RI; }
+ const Thumb1RegisterInfo &getRegisterInfo() const override { return RI; }
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
};
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
index 65a7221..f907b14 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "Thumb1RegisterInfo.h"
-#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
@@ -30,7 +29,6 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -421,7 +419,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Mask);
}
Offset = (Offset - Mask * Scale);
- MachineBasicBlock::iterator NII = llvm::next(II);
+ MachineBasicBlock::iterator NII = std::next(II);
emitThumbRegPlusImmediate(MBB, NII, dl, DestReg, DestReg, Offset, TII,
*this);
} else {
@@ -484,10 +482,8 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
return Offset == 0;
}
-void
-Thumb1RegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
- unsigned BaseReg, int64_t Offset) const {
- MachineInstr &MI = *I;
+void Thumb1RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const {
const ARMBaseInstrInfo &TII =
*static_cast<const ARMBaseInstrInfo*>(
MI.getParent()->getParent()->getTarget().getInstrInfo());
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
index 9689b23..0c0abbe 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -15,7 +15,6 @@
#ifndef THUMB1REGISTERINFO_H
#define THUMB1REGISTERINFO_H
-#include "ARM.h"
#include "ARMBaseRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -27,21 +26,20 @@ struct Thumb1RegisterInfo : public ARMBaseRegisterInfo {
public:
Thumb1RegisterInfo(const ARMSubtarget &STI);
- const TargetRegisterClass*
- getLargestLegalSuperClass(const TargetRegisterClass *RC) const;
+ const TargetRegisterClass *
+ getLargestLegalSuperClass(const TargetRegisterClass *RC) const override;
- const TargetRegisterClass*
- getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
- void emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0,
- unsigned MIFlags = MachineInstr::NoFlags) const;
+ void
+ emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ DebugLoc dl, unsigned DestReg, unsigned SubIdx, int Val,
+ ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0,
+ unsigned MIFlags = MachineInstr::NoFlags) const override;
// rewrite MI to access 'Offset' bytes from the FP. Update Offset to be
// however much remains to be handled. Return 'true' if no further
@@ -49,16 +47,16 @@ public:
bool rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const ARMBaseInstrInfo &TII) const;
- void resolveFrameIndex(MachineBasicBlock::iterator I,
- unsigned BaseReg, int64_t Offset) const;
+ void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const override;
bool saveScavengerRegister(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &UseMI,
const TargetRegisterClass *RC,
- unsigned Reg) const;
+ unsigned Reg) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
};
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index 0b7d3bb..edb9ff3 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -7,7 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "thumb2-it"
#include "ARM.h"
#include "ARMMachineFunctionInfo.h"
#include "Thumb2InstrInfo.h"
@@ -19,6 +18,8 @@
#include "llvm/CodeGen/MachineInstrBundle.h"
using namespace llvm;
+#define DEBUG_TYPE "thumb2-it"
+
STATISTIC(NumITs, "Number of IT blocks inserted");
STATISTIC(NumMovedInsts, "Number of predicated instructions moved");
@@ -33,9 +34,9 @@ namespace {
const TargetRegisterInfo *TRI;
ARMFunctionInfo *AFI;
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Thumb IT blocks insertion pass";
}
@@ -242,7 +243,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
// Finalize the bundle.
MachineBasicBlock::instr_iterator LI = LastITMI;
- finalizeBundle(MBB, InsertPos.getInstrIterator(), llvm::next(LI));
+ finalizeBundle(MBB, InsertPos.getInstrIterator(), std::next(LI));
Modified = true;
++NumITs;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index 91788ac..a9df006 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "Thumb2InstrInfo.h"
-#include "ARM.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 2cdcd06..34d45d3 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -14,7 +14,6 @@
#ifndef THUMB2INSTRUCTIONINFO_H
#define THUMB2INSTRUCTIONINFO_H
-#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "Thumb2RegisterInfo.h"
@@ -28,40 +27,40 @@ public:
explicit Thumb2InstrInfo(const ARMSubtarget &STI);
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
- void getNoopForMachoTarget(MCInst &NopInst) const;
+ void getNoopForMachoTarget(MCInst &NopInst) const override;
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
- unsigned getUnindexedOpcode(unsigned Opc) const;
+ unsigned getUnindexedOpcode(unsigned Opc) const override;
void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
- MachineBasicBlock *NewDest) const;
+ MachineBasicBlock *NewDest) const override;
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) const;
+ MachineBasicBlock::iterator MBBI) const override;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
+ const Thumb2RegisterInfo &getRegisterInfo() const override { return RI; }
};
/// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
index 4cb827f..782d81f 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
@@ -14,7 +14,6 @@
#include "Thumb2RegisterInfo.h"
#include "ARM.h"
-#include "ARMBaseInstrInfo.h"
#include "ARMSubtarget.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -22,6 +21,8 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
Thumb2RegisterInfo::Thumb2RegisterInfo(const ARMSubtarget &sti)
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
index b1d63fa..8a33e6c 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
@@ -15,9 +15,7 @@
#ifndef THUMB2REGISTERINFO_H
#define THUMB2REGISTERINFO_H
-#include "ARM.h"
#include "ARMBaseRegisterInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
@@ -29,13 +27,11 @@ public:
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
- void emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0,
- unsigned MIFlags = MachineInstr::NoFlags) const;
+ void
+ emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ DebugLoc dl, unsigned DestReg, unsigned SubIdx, int Val,
+ ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0,
+ unsigned MIFlags = MachineInstr::NoFlags) const override;
};
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 4795aae..09debe7 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -7,10 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "t2-reduce-size"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
-#include "ARMBaseRegisterInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "Thumb2InstrInfo.h"
@@ -23,9 +21,11 @@
#include "llvm/IR/Function.h" // To access Function attributes
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "t2-reduce-size"
+
STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
@@ -137,9 +137,9 @@ namespace {
const Thumb2InstrInfo *TII;
const ARMSubtarget *STI;
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Thumb2 instruction size reduction pass";
}
@@ -256,8 +256,7 @@ Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
return HighLatencyCPSR || FirstInSelfLoop;
SmallSet<unsigned, 2> Defs;
- for (unsigned i = 0, e = CPSRDef->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = CPSRDef->getOperand(i);
+ for (const MachineOperand &MO : CPSRDef->operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isUse())
continue;
unsigned Reg = MO.getReg();
@@ -266,8 +265,7 @@ Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
Defs.insert(Reg);
}
- for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = Use->getOperand(i);
+ for (const MachineOperand &MO : Use->operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
@@ -858,8 +856,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
bool HasDef = false;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
+ for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isUse())
continue;
if (MO.getReg() != ARM::CPSR)
@@ -874,8 +871,7 @@ static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
}
static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
+ for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isDef())
continue;
if (MO.getReg() != ARM::CPSR)
@@ -920,15 +916,14 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
// Yes, CPSR could be livein.
bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
- MachineInstr *BundleMI = 0;
+ MachineInstr *BundleMI = nullptr;
- CPSRDef = 0;
+ CPSRDef = nullptr;
HighLatencyCPSR = false;
// Check predecessors for the latest CPSRDef.
- for (MachineBasicBlock::pred_iterator
- I = MBB.pred_begin(), E = MBB.pred_end(); I != E; ++I) {
- const MBBInfo &PInfo = BlockInfo[(*I)->getNumber()];
+ for (auto *Pred : MBB.predecessors()) {
+ const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
if (!PInfo.Visited) {
// Since blocks are visited in RPO, this must be a back-edge.
continue;
@@ -945,7 +940,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
MachineBasicBlock::instr_iterator NextMII;
for (; MII != E; MII = NextMII) {
- NextMII = llvm::next(MII);
+ NextMII = std::next(MII);
MachineInstr *MI = &*MII;
if (MI->isBundle()) {
@@ -962,7 +957,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
Modified = true;
- MachineBasicBlock::instr_iterator I = prior(NextMII);
+ MachineBasicBlock::instr_iterator I = std::prev(NextMII);
MI = &*I;
// Removing and reinserting the first instruction in a bundle will break
// up the bundle. Fix the bundling if it was broken.
@@ -980,13 +975,16 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
if (MO && !MO->isDead())
LiveCPSR = true;
+ MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
+ if (MO && !MO->isKill())
+ LiveCPSR = true;
}
bool DefCPSR = false;
LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
if (MI->isCall()) {
// Calls don't really set CPSR.
- CPSRDef = 0;
+ CPSRDef = nullptr;
HighLatencyCPSR = false;
IsSelfLoop = false;
} else if (DefCPSR) {
@@ -1012,8 +1010,8 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
AttributeSet FnAttrs = MF.getFunction()->getAttributes();
OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize);
- MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::MinSize);
+ MinimizeSize =
+ FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
index ddc7a66..f610fbb 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -108,9 +108,9 @@ namespace {
explicit CppWriter(formatted_raw_ostream &o) :
ModulePass(ID), Out(o), uniqueNum(0), is_inline(false), indent_level(0){}
- virtual const char *getPassName() const { return "C++ backend"; }
+ const char *getPassName() const override { return "C++ backend"; }
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
void printProgram(const std::string& fname, const std::string& modName );
void printModule(const std::string& fname, const std::string& modName );
@@ -131,6 +131,7 @@ namespace {
private:
void printLinkageType(GlobalValue::LinkageTypes LT);
void printVisibilityType(GlobalValue::VisibilityTypes VisTypes);
+ void printDLLStorageClassType(GlobalValue::DLLStorageClassTypes DSCType);
void printThreadLocalMode(GlobalVariable::ThreadLocalMode TLM);
void printCallingConv(CallingConv::ID cc);
void printEscapedString(const std::string& str);
@@ -282,10 +283,6 @@ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
Out << "GlobalValue::InternalLinkage"; break;
case GlobalValue::PrivateLinkage:
Out << "GlobalValue::PrivateLinkage"; break;
- case GlobalValue::LinkerPrivateLinkage:
- Out << "GlobalValue::LinkerPrivateLinkage"; break;
- case GlobalValue::LinkerPrivateWeakLinkage:
- Out << "GlobalValue::LinkerPrivateWeakLinkage"; break;
case GlobalValue::AvailableExternallyLinkage:
Out << "GlobalValue::AvailableExternallyLinkage "; break;
case GlobalValue::LinkOnceAnyLinkage:
@@ -300,10 +297,6 @@ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
Out << "GlobalValue::AppendingLinkage"; break;
case GlobalValue::ExternalLinkage:
Out << "GlobalValue::ExternalLinkage"; break;
- case GlobalValue::DLLImportLinkage:
- Out << "GlobalValue::DLLImportLinkage"; break;
- case GlobalValue::DLLExportLinkage:
- Out << "GlobalValue::DLLExportLinkage"; break;
case GlobalValue::ExternalWeakLinkage:
Out << "GlobalValue::ExternalWeakLinkage"; break;
case GlobalValue::CommonLinkage:
@@ -325,6 +318,21 @@ void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
}
}
+void CppWriter::printDLLStorageClassType(
+ GlobalValue::DLLStorageClassTypes DSCType) {
+ switch (DSCType) {
+ case GlobalValue::DefaultStorageClass:
+ Out << "GlobalValue::DefaultStorageClass";
+ break;
+ case GlobalValue::DLLImportStorageClass:
+ Out << "GlobalValue::DLLImportStorageClass";
+ break;
+ case GlobalValue::DLLExportStorageClass:
+ Out << "GlobalValue::DLLExportStorageClass";
+ break;
+ }
+}
+
void CppWriter::printThreadLocalMode(GlobalVariable::ThreadLocalMode TLM) {
switch (TLM) {
case GlobalVariable::NotThreadLocal:
@@ -361,25 +369,25 @@ void CppWriter::printEscapedString(const std::string &Str) {
}
std::string CppWriter::getCppName(Type* Ty) {
- // First, handle the primitive types .. easy
- if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return "Type::getVoidTy(mod->getContext())";
- case Type::IntegerTyID: {
- unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
- return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
- }
- case Type::X86_FP80TyID: return "Type::getX86_FP80Ty(mod->getContext())";
- case Type::FloatTyID: return "Type::getFloatTy(mod->getContext())";
- case Type::DoubleTyID: return "Type::getDoubleTy(mod->getContext())";
- case Type::LabelTyID: return "Type::getLabelTy(mod->getContext())";
- case Type::X86_MMXTyID: return "Type::getX86_MMXTy(mod->getContext())";
- default:
- error("Invalid primitive type");
- break;
- }
- // shouldn't be returned, but make it sensible
+ switch (Ty->getTypeID()) {
+ default:
+ break;
+ case Type::VoidTyID:
return "Type::getVoidTy(mod->getContext())";
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
+ return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
+ }
+ case Type::X86_FP80TyID:
+ return "Type::getX86_FP80Ty(mod->getContext())";
+ case Type::FloatTyID:
+ return "Type::getFloatTy(mod->getContext())";
+ case Type::DoubleTyID:
+ return "Type::getDoubleTy(mod->getContext())";
+ case Type::LabelTyID:
+ return "Type::getLabelTy(mod->getContext())";
+ case Type::X86_MMXTyID:
+ return "Type::getX86_MMXTy(mod->getContext())";
}
// Now, see if we've seen the type before and return that
@@ -388,7 +396,7 @@ std::string CppWriter::getCppName(Type* Ty) {
return I->second;
// Okay, let's build a new name for this type. Start with a prefix
- const char* prefix = 0;
+ const char* prefix = nullptr;
switch (Ty->getTypeID()) {
case Type::FunctionTyID: prefix = "FuncTy_"; break;
case Type::StructTyID: prefix = "StructTy_"; break;
@@ -491,6 +499,7 @@ void CppWriter::printAttributes(const AttributeSet &PAL,
HANDLE_ATTR(NoUnwind);
HANDLE_ATTR(NoAlias);
HANDLE_ATTR(ByVal);
+ HANDLE_ATTR(InAlloca);
HANDLE_ATTR(Nest);
HANDLE_ATTR(ReadNone);
HANDLE_ATTR(ReadOnly);
@@ -537,7 +546,8 @@ void CppWriter::printAttributes(const AttributeSet &PAL,
void CppWriter::printType(Type* Ty) {
// We don't print definitions for primitive types
- if (Ty->isPrimitiveType() || Ty->isIntegerTy())
+ if (Ty->isFloatingPointTy() || Ty->isX86_MMXTy() || Ty->isIntegerTy() ||
+ Ty->isLabelTy() || Ty->isMetadataTy() || Ty->isVoidTy())
return;
// If we already defined this type, we don't need to define it again.
@@ -1026,6 +1036,13 @@ void CppWriter::printVariableHead(const GlobalVariable *GV) {
Out << ");";
nl(Out);
}
+ if (GV->getDLLStorageClass() != GlobalValue::DefaultStorageClass) {
+ printCppName(GV);
+ Out << "->setDLLStorageClass(";
+ printDLLStorageClassType(GV->getDLLStorageClass());
+ Out << ");";
+ nl(Out);
+ }
if (GV->isThreadLocal()) {
printCppName(GV);
Out << "->setThreadLocalMode(";
@@ -1546,16 +1563,24 @@ void CppWriter::printInstruction(const Instruction *I,
}
case Instruction::AtomicCmpXchg: {
const AtomicCmpXchgInst *cxi = cast<AtomicCmpXchgInst>(I);
- StringRef Ordering = ConvertAtomicOrdering(cxi->getOrdering());
+ StringRef SuccessOrdering =
+ ConvertAtomicOrdering(cxi->getSuccessOrdering());
+ StringRef FailureOrdering =
+ ConvertAtomicOrdering(cxi->getFailureOrdering());
StringRef CrossThread = ConvertAtomicSynchScope(cxi->getSynchScope());
Out << "AtomicCmpXchgInst* " << iName
<< " = new AtomicCmpXchgInst("
<< opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", "
- << Ordering << ", " << CrossThread << ", " << bbname
+ << SuccessOrdering << ", " << FailureOrdering << ", "
+ << CrossThread << ", " << bbname
<< ");";
nl(Out) << iName << "->setName(\"";
printEscapedString(cxi->getName());
Out << "\");";
+ nl(Out) << iName << "->setVolatile("
+ << (cxi->isVolatile() ? "true" : "false") << ");";
+ nl(Out) << iName << "->setWeak("
+ << (cxi->isWeak() ? "true" : "false") << ");";
break;
}
case Instruction::AtomicRMW: {
@@ -1586,6 +1611,8 @@ void CppWriter::printInstruction(const Instruction *I,
nl(Out) << iName << "->setName(\"";
printEscapedString(rmwi->getName());
Out << "\");";
+ nl(Out) << iName << "->setVolatile("
+ << (rmwi->isVolatile() ? "true" : "false") << ");";
break;
}
case Instruction::LandingPad: {
@@ -1669,9 +1696,8 @@ void CppWriter::printFunctionUses(const Function* F) {
// Print the function declarations for any functions encountered
nl(Out) << "// Function Declarations"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (Function* Fun = dyn_cast<Function>(*I)) {
+ for (auto *GV : gvs) {
+ if (Function *Fun = dyn_cast<Function>(GV)) {
if (!is_inline || Fun != F)
printFunctionHead(Fun);
}
@@ -1679,17 +1705,15 @@ void CppWriter::printFunctionUses(const Function* F) {
// Print the global variable declarations for any variables encountered
nl(Out) << "// Global Variable Declarations"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (GlobalVariable* F = dyn_cast<GlobalVariable>(*I))
+ for (auto *GV : gvs) {
+ if (GlobalVariable *F = dyn_cast<GlobalVariable>(GV))
printVariableHead(F);
}
// Print the constants found
nl(Out) << "// Constant Definitions"; nl(Out);
- for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
- E = consts.end(); I != E; ++I) {
- printConstant(*I);
+ for (const auto *C : consts) {
+ printConstant(C);
}
// Process the global variables definitions now that all the constants have
@@ -1697,10 +1721,9 @@ void CppWriter::printFunctionUses(const Function* F) {
// initializers.
if (GenerationType != GenFunction) {
nl(Out) << "// Global Variable Definitions"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
- printVariableBody(GV);
+ for (const auto &GV : gvs) {
+ if (GlobalVariable *Var = dyn_cast<GlobalVariable>(GV))
+ printVariableBody(Var);
}
}
}
@@ -1744,6 +1767,13 @@ void CppWriter::printFunctionHead(const Function* F) {
Out << ");";
nl(Out);
}
+ if (F->getDLLStorageClass() != GlobalValue::DefaultStorageClass) {
+ printCppName(F);
+ Out << "->setDLLStorageClass(";
+ printDLLStorageClassType(F->getDLLStorageClass());
+ Out << ");";
+ nl(Out);
+ }
if (F->hasGC()) {
printCppName(F);
Out << "->setGC(\"" << F->getGC() << "\");";
@@ -1916,13 +1946,13 @@ void CppWriter::printProgram(const std::string& fname,
Out << "#include <llvm/ADT/SmallVector.h>\n";
Out << "#include <llvm/Analysis/Verifier.h>\n";
- Out << "#include <llvm/Assembly/PrintModulePass.h>\n";
Out << "#include <llvm/IR/BasicBlock.h>\n";
Out << "#include <llvm/IR/CallingConv.h>\n";
Out << "#include <llvm/IR/Constants.h>\n";
Out << "#include <llvm/IR/DerivedTypes.h>\n";
Out << "#include <llvm/IR/Function.h>\n";
Out << "#include <llvm/IR/GlobalVariable.h>\n";
+ Out << "#include <llvm/IR/IRPrintingPasses.h>\n";
Out << "#include <llvm/IR/InlineAsm.h>\n";
Out << "#include <llvm/IR/Instructions.h>\n";
Out << "#include <llvm/IR/LLVMContext.h>\n";
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
index 477e788..673ade7 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
+++ b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
@@ -28,14 +28,12 @@ struct CPPTargetMachine : public TargetMachine {
CodeGenOpt::Level OL)
: TargetMachine(T, TT, CPU, FS, Options) {}
- virtual bool addPassesToEmitFile(PassManagerBase &PM,
- formatted_raw_ostream &Out,
- CodeGenFileType FileType,
- bool DisableVerify,
- AnalysisID StartAfter,
- AnalysisID StopAfter);
-
- virtual const DataLayout *getDataLayout() const { return 0; }
+ bool addPassesToEmitFile(PassManagerBase &PM, formatted_raw_ostream &Out,
+ CodeGenFileType FileType, bool DisableVerify,
+ AnalysisID StartAfter,
+ AnalysisID StopAfter) override;
+
+ const DataLayout *getDataLayout() const override { return nullptr; }
};
extern Target TheCppBackendTarget;
diff --git a/contrib/llvm/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp b/contrib/llvm/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp
index 1ca74a4..096dc73 100644
--- a/contrib/llvm/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp
@@ -14,9 +14,10 @@ using namespace llvm;
Target llvm::TheCppBackendTarget;
-static unsigned CppBackend_TripleMatchQuality(const std::string &TT) {
- // This class always works, but shouldn't be the default in most cases.
- return 1;
+static bool CppBackend_TripleMatchQuality(Triple::ArchType Arch) {
+ // This backend doesn't correspond to any architecture. It must be explicitly
+ // selected with -march.
+ return false;
}
extern "C" void LLVMInitializeCppBackendTargetInfo() {
diff --git a/contrib/llvm/lib/Target/Hexagon/Hexagon.td b/contrib/llvm/lib/Target/Hexagon/Hexagon.td
index 568798c..5f4a6c6 100644
--- a/contrib/llvm/lib/Target/Hexagon/Hexagon.td
+++ b/contrib/llvm/lib/Target/Hexagon/Hexagon.td
@@ -200,19 +200,9 @@ class Proc<string Name, SchedMachineModel Model,
list<SubtargetFeature> Features>
: ProcessorModel<Name, Model, Features>;
-def : Proc<"hexagonv2", HexagonModel, [ArchV2]>;
-def : Proc<"hexagonv3", HexagonModel, [ArchV2, ArchV3]>;
def : Proc<"hexagonv4", HexagonModelV4, [ArchV2, ArchV3, ArchV4]>;
def : Proc<"hexagonv5", HexagonModelV4, [ArchV2, ArchV3, ArchV4, ArchV5]>;
-
-// Hexagon Uses the MC printer for assembler output, so make sure the TableGen
-// AsmWriter bits get associated with the correct class.
-def HexagonAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
//===----------------------------------------------------------------------===//
@@ -220,6 +210,4 @@ def HexagonAsmWriter : AsmWriter {
def Hexagon : Target {
// Pull in Instruction Info:
let InstructionSet = HexagonInstrInfo;
-
- let AssemblyWriters = [HexagonAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index a2e04ba..2e011bd 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -13,19 +13,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonMachineFunctionInfo.h"
-#include "HexagonTargetMachine.h"
#include "HexagonSubtarget.h"
-#include "MCTargetDesc/HexagonMCInst.h"
+#include "HexagonTargetMachine.h"
#include "InstPrinter/HexagonInstPrinter.h"
+#include "MCTargetDesc/HexagonMCInst.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -34,6 +32,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -49,7 +48,6 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
@@ -57,21 +55,12 @@
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
static cl::opt<bool> AlignCalls(
"hexagon-align-calls", cl::Hidden, cl::init(true),
cl::desc("Insert falign after call instruction for Hexagon target"));
-void HexagonAsmPrinter::EmitAlignment(unsigned NumBits,
- const GlobalValue *GV) const {
- // For basic block level alignment, use ".falign".
- if (!GV) {
- OutStreamer.EmitRawText(StringRef("\t.falign"));
- return;
- }
-
- AsmPrinter::EmitAlignment(NumBits, GV);
-}
-
void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
@@ -87,16 +76,9 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
case MachineOperand::MO_MachineBasicBlock:
O << *MO.getMBB()->getSymbol();
return;
- case MachineOperand::MO_JumpTableIndex:
- O << *GetJTISymbol(MO.getIndex());
- // FIXME: PIC relocation model.
- return;
case MachineOperand::MO_ConstantPoolIndex:
O << *GetCPISymbol(MO.getIndex());
return;
- case MachineOperand::MO_ExternalSymbol:
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- return;
case MachineOperand::MO_GlobalAddress:
// Computing the address of a global symbol, not calling it.
O << *getSymbol(MO.getGlobal());
@@ -186,12 +168,6 @@ bool HexagonAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
return false;
}
-void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
- unsigned OpNo,
- raw_ostream &O) {
- llvm_unreachable("Unimplemented");
-}
-
/// printMachineInstruction -- Print out a single Hexagon MI in Darwin syntax to
/// the current output stream.
@@ -224,7 +200,7 @@ void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCI.setPacketEnd(Index == (Size-1));
HexagonLowerToMC(BundleMIs[Index], MCI, *this);
- OutStreamer.EmitInstruction(MCI);
+ EmitToStreamer(OutStreamer, MCI);
}
}
else {
@@ -234,66 +210,12 @@ void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCI.setPacketEnd(true);
}
HexagonLowerToMC(MI, MCI, *this);
- OutStreamer.EmitInstruction(MCI);
+ EmitToStreamer(OutStreamer, MCI);
}
return;
}
-/// PrintUnmangledNameSafely - Print out the printable characters in the name.
-/// Don't print things like \n or \0.
-// static void PrintUnmangledNameSafely(const Value *V, raw_ostream &OS) {
-// for (const char *Name = V->getNameStart(), *E = Name+V->getNameLen();
-// Name != E; ++Name)
-// if (isprint(*Name))
-// OS << *Name;
-// }
-
-
-void HexagonAsmPrinter::printAddrModeBasePlusOffset(const MachineInstr *MI,
- int OpNo, raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNo);
- const MachineOperand &MO2 = MI->getOperand(OpNo+1);
-
- O << HexagonInstPrinter::getRegisterName(MO1.getReg())
- << " + #"
- << MO2.getImm();
-}
-
-
-void HexagonAsmPrinter::printGlobalOperand(const MachineInstr *MI, int OpNo,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- assert( (MO.getType() == MachineOperand::MO_GlobalAddress) &&
- "Expecting global address");
-
- O << *getSymbol(MO.getGlobal());
- if (MO.getOffset() != 0) {
- O << " + ";
- O << MO.getOffset();
- }
-}
-
-void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
- "Expecting jump table index");
-
- // Hexagon_TODO: Do we need name mangling?
- O << *GetJTISymbol(MO.getIndex());
-}
-
-void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) &&
- "Expecting constant pool index");
-
- // Hexagon_TODO: Do we need name mangling?
- O << *GetCPISymbol(MO.getIndex());
-}
-
static MCInstPrinter *createHexagonMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -303,7 +225,7 @@ static MCInstPrinter *createHexagonMCInstPrinter(const Target &T,
if (SyntaxVariant == 0)
return(new HexagonInstPrinter(MAI, MII, MRI));
else
- return NULL;
+ return nullptr;
}
extern "C" void LLVMInitializeHexagonAsmPrinter() {
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h
index bc2af63..7fe8c57 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h
@@ -30,134 +30,24 @@ namespace llvm {
Subtarget = &TM.getSubtarget<HexagonSubtarget>();
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Assembly Printer";
}
- bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+ bool isBlockOnlyReachableByFallthrough(
+ const MachineBasicBlock *MBB) const override;
- virtual void EmitInstruction(const MachineInstr *MI);
- virtual void EmitAlignment(unsigned NumBits,
- const GlobalValue *GV = 0) const;
+ void EmitInstruction(const MachineInstr *MI) override;
void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS);
+ raw_ostream &OS) override;
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS);
-
- /// printInstruction - This method is automatically generated by tablegen
- /// from the instruction set description. This method returns true if the
- /// machine instruction was sufficiently described to print it, otherwise it
- /// returns false.
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
-
- // void printMachineInstruction(const MachineInstr *MI);
- void printOp(const MachineOperand &MO, raw_ostream &O);
-
- /// printRegister - Print register according to target requirements.
- ///
- void printRegister(const MachineOperand &MO, bool R0AsZero,
- raw_ostream &O) {
- unsigned RegNo = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(RegNo) && "Not physreg??");
- O << getRegisterName(RegNo);
- }
-
- void printImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- int value = MI->getOperand(OpNo).getImm();
- O << value;
- }
-
- void printNegImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- int value = MI->getOperand(OpNo).getImm();
- O << -value;
- }
-
- void printMEMriOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNo);
- const MachineOperand &MO2 = MI->getOperand(OpNo+1);
-
- O << getRegisterName(MO1.getReg())
- << " + #"
- << (int) MO2.getImm();
- }
-
- void printFrameIndexOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNo);
- const MachineOperand &MO2 = MI->getOperand(OpNo+1);
-
- O << getRegisterName(MO1.getReg())
- << ", #"
- << MO2.getImm();
- }
-
- void printBranchOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- // Branches can take an immediate operand. This is used by the branch
- // selection pass to print $+8, an eight byte displacement from the PC.
- if (MI->getOperand(OpNo).isImm()) {
- O << "$+" << MI->getOperand(OpNo).getImm()*4;
- } else {
- printOp(MI->getOperand(OpNo), O);
- }
- }
-
- void printCallOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- }
-
- void printAbsAddrOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- }
-
- void printSymbolHi(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- O << "#HI(";
- if (MI->getOperand(OpNo).isImm()) {
- printImmOperand(MI, OpNo, O);
- }
- else {
- printOp(MI->getOperand(OpNo), O);
- }
- O << ")";
- }
-
- void printSymbolLo(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- O << "#HI(";
- if (MI->getOperand(OpNo).isImm()) {
- printImmOperand(MI, OpNo, O);
- }
- else {
- printOp(MI->getOperand(OpNo), O);
- }
- O << ")";
- }
-
- void printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O);
-
-#if 0
- void printModuleLevelGV(const GlobalVariable* GVar, raw_ostream &O);
-#endif
-
- void printAddrModeBasePlusOffset(const MachineInstr *MI, int OpNo,
- raw_ostream &O);
-
- void printGlobalOperand(const MachineInstr *MI, int OpNo, raw_ostream &O);
- void printJumpTable(const MachineInstr *MI, int OpNo, raw_ostream &O);
- void printConstantPool(const MachineInstr *MI, int OpNo, raw_ostream &O);
+ raw_ostream &OS) override;
static const char *getRegisterName(unsigned RegNo);
-
-#if 0
- void EmitStartOfAsmFile(Module &M);
-#endif
};
} // end of llvm namespace
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
index 8597f11..de340e0 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -6,7 +6,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hexagon_cfg"
#include "Hexagon.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonSubtarget.h"
@@ -26,6 +25,8 @@
using namespace llvm;
+#define DEBUG_TYPE "hexagon_cfg"
+
namespace llvm {
void initializeHexagonCFGOptimizerPass(PassRegistry&);
}
@@ -48,10 +49,10 @@ private:
initializeHexagonCFGOptimizerPass(*PassRegistry::getPassRegistry());
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon CFG Optimizer";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
@@ -146,8 +147,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::succ_iterator SI = MBB->succ_begin();
MachineBasicBlock* FirstSucc = *SI;
MachineBasicBlock* SecondSucc = *(++SI);
- MachineBasicBlock* LayoutSucc = NULL;
- MachineBasicBlock* JumpAroundTarget = NULL;
+ MachineBasicBlock* LayoutSucc = nullptr;
+ MachineBasicBlock* JumpAroundTarget = nullptr;
if (MBB->isLayoutSuccessor(FirstSucc)) {
LayoutSucc = FirstSucc;
@@ -161,7 +162,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// The target of the unconditional branch must be JumpAroundTarget.
// TODO: If not, we should not invert the unconditional branch.
- MachineBasicBlock* CondBranchTarget = NULL;
+ MachineBasicBlock* CondBranchTarget = nullptr;
if ((MI->getOpcode() == Hexagon::JMP_t) ||
(MI->getOpcode() == Hexagon::JMP_f)) {
CondBranchTarget = MI->getOperand(1).getMBB();
@@ -239,7 +240,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
static void initializePassOnce(PassRegistry &Registry) {
PassInfo *PI = new PassInfo("Hexagon CFG Optimizer", "hexagon-cfg",
- &HexagonCFGOptimizer::ID, 0, false, false);
+ &HexagonCFGOptimizer::ID, nullptr, false, false);
Registry.registerPass(*PI, true);
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h
index 33c8306..70b8b64 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/ValueTypes.h"
//
// Need to handle varargs.
@@ -29,7 +28,7 @@ namespace llvm {
class TargetMachine;
class Hexagon_CCState;
class SDNode;
-
+ struct EVT;
/// Hexagon_CCAssignFn - This function assigns a location for Val, updating
/// State to reflect the change.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index dc440cb..aeff680 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -11,32 +11,31 @@
// to move them together. If we can move them next to each other we do so and
// replace them with a combine instruction.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hexagon-copy-combine"
-
#include "llvm/PassSupport.h"
-#include "llvm/ADT/DenseSet.h"
+#include "Hexagon.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/CodeGen/Passes.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-
-#include "Hexagon.h"
-#include "HexagonInstrInfo.h"
-#include "HexagonRegisterInfo.h"
-#include "HexagonSubtarget.h"
-#include "HexagonTargetMachine.h"
-#include "HexagonMachineFunctionInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "hexagon-copy-combine"
+
static
cl::opt<bool> IsCombinesDisabled("disable-merge-into-combines",
cl::Hidden, cl::ZeroOrMore,
@@ -69,15 +68,15 @@ public:
initializeHexagonCopyToCombinePass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Copy-To-Combine Pass";
}
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
private:
MachineInstr *findPairable(MachineInstr *I1, bool &DoInsertAtI1);
@@ -263,7 +262,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
unsigned KilledOperand = 0;
if (I2->killsRegister(I2UseReg))
KilledOperand = I2UseReg;
- MachineInstr *KillingInstr = 0;
+ MachineInstr *KillingInstr = nullptr;
for (; I != End; ++I) {
// If the intervening instruction I:
@@ -286,7 +285,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// Update the intermediate instruction to with the kill flag.
if (KillingInstr) {
bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true);
- (void)Added; // supress compiler warning
+ (void)Added; // suppress compiler warning
assert(Added && "Must successfully update kill flag");
removeKillInfo(I2, KilledOperand);
}
@@ -301,13 +300,13 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
MachineBasicBlock::iterator I(I1), End(I2);
// At O3 we got better results (dhrystone) by being more conservative here.
if (!ShouldCombineAggressively)
- End = llvm::next(MachineBasicBlock::iterator(I2));
+ End = std::next(MachineBasicBlock::iterator(I2));
IsImmUseReg = I1->getOperand(1).isImm() || I1->getOperand(1).isGlobal();
unsigned I1UseReg = IsImmUseReg ? 0 : I1->getOperand(1).getReg();
// Track killed operands. If we move across an instruction that kills our
// operand, we need to update the kill information on the moved I1. It kills
// the operand now.
- MachineInstr *KillingInstr = 0;
+ MachineInstr *KillingInstr = nullptr;
unsigned KilledOperand = 0;
while(++I != End) {
@@ -334,7 +333,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// Check for an exact kill (registers match).
if (I1UseReg && I->killsRegister(I1UseReg)) {
- assert(KillingInstr == 0 && "Should only see one killing instruction");
+ assert(!KillingInstr && "Should only see one killing instruction");
KilledOperand = I1UseReg;
KillingInstr = &*I;
}
@@ -344,7 +343,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// Update I1 to set the kill flag. This flag will later be picked up by
// the new COMBINE instruction.
bool Added = I1->addRegisterKilled(KilledOperand, TRI);
- (void)Added; // supress compiler warning
+ (void)Added; // suppress compiler warning
assert(Added && "Must successfully update kill flag");
}
DoInsertAtI1 = false;
@@ -465,7 +464,7 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
/// false if the combine must be inserted at the returned instruction.
MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
bool &DoInsertAtI1) {
- MachineBasicBlock::iterator I2 = llvm::next(MachineBasicBlock::iterator(I1));
+ MachineBasicBlock::iterator I2 = std::next(MachineBasicBlock::iterator(I1));
unsigned I1DestReg = I1->getOperand(0).getReg();
for (MachineBasicBlock::iterator End = I1->getParent()->end(); I2 != End;
@@ -507,7 +506,7 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
// Not safe. Stop searching.
break;
}
- return 0;
+ return nullptr;
}
void HexagonCopyToCombine::combine(MachineInstr *I1, MachineInstr *I2,
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
index 8a5991f..3dafe80 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
@@ -60,10 +60,10 @@ class HexagonExpandPredSpillCode : public MachineFunctionPass {
initializeHexagonExpandPredSpillCodePass(Registry);
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Expand Predicate Spill Code";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
@@ -187,7 +187,7 @@ static void initializePassOnce(PassRegistry &Registry) {
const char *Name = "Hexagon Expand Predicate Spill Code";
PassInfo *PI = new PassInfo(Name, "hexagon-spill-pred",
&HexagonExpandPredSpillCode::ID,
- 0, false, false);
+ nullptr, false, false);
Registry.registerPass(*PI, true);
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index 240cc95..d41939a 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -15,6 +15,8 @@
#include "llvm/ADT/DenseMap.h"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -22,8 +24,6 @@
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/PassSupport.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "Hexagon.h"
-#include "HexagonTargetMachine.h"
using namespace llvm;
@@ -40,11 +40,13 @@ namespace {
initializeHexagonFixupHwLoopsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const { return "Hexagon Hardware Loop Fixup"; }
+ const char *getPassName() const override {
+ return "Hexagon Hardware Loop Fixup";
+ }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 2b04f25..21df12f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -144,14 +144,14 @@ bool HexagonFrameLowering::hasTailCall(MachineBasicBlock &MBB) const {
void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
DebugLoc dl = MBBI->getDebugLoc();
//
// Only insert deallocframe if we need to. Also at -O0. See comment
// in emitPrologue above.
//
if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
MachineBasicBlock::iterator MBBI_end = MBB.end();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
@@ -165,12 +165,12 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
}
// Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
// versions.
- if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPret
- && !DisableDeallocRet) {
+ if (MF.getTarget().getSubtarget<HexagonSubtarget>().hasV4TOps() &&
+ MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
// Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
// instruction if we encounter it.
MachineBasicBlock::iterator BeforeJMPR =
- MBB.begin() == MBBI ? MBBI : prior(MBBI);
+ MBB.begin() == MBBI ? MBBI : std::prev(MBBI);
if (BeforeJMPR != MBBI &&
BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
// Remove the JMPR node.
@@ -190,7 +190,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
// DEALLOCFRAME instruction after it.
MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
MachineBasicBlock::iterator I =
- Term == MBB.begin() ? MBB.end() : prior(Term);
+ Term == MBB.begin() ? MBB.end() : std::prev(Term);
if (I != MBB.end() &&
I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
return;
@@ -246,7 +246,7 @@ HexagonFrameLowering::spillCalleeSavedRegisters(
//
unsigned SuperReg = uniqueSuperReg(Reg, TRI);
bool CanUseDblStore = false;
- const TargetRegisterClass* SuperRegClass = 0;
+ const TargetRegisterClass* SuperRegClass = nullptr;
if (ContiguousRegs && (i < CSI.size()-1)) {
unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI);
@@ -300,7 +300,7 @@ bool HexagonFrameLowering::restoreCalleeSavedRegisters(
// Check if we can use a double-word load.
//
unsigned SuperReg = uniqueSuperReg(Reg, TRI);
- const TargetRegisterClass* SuperRegClass = 0;
+ const TargetRegisterClass* SuperRegClass = nullptr;
bool CanUseDblLoad = false;
if (ContiguousRegs && (i < CSI.size()-1)) {
unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
index a62c76a..2d4b0b9 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
@@ -11,42 +11,38 @@
#define HEXAGON_FRAMEINFO_H
#include "Hexagon.h"
-#include "HexagonSubtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
class HexagonFrameLowering : public TargetFrameLowering {
private:
- const HexagonSubtarget &STI;
void determineFrameLayout(MachineFunction &MF) const;
public:
- explicit HexagonFrameLowering(const HexagonSubtarget &sti)
- : TargetFrameLowering(StackGrowsDown, 8, 0), STI(sti) {
- }
+ explicit HexagonFrameLowering() : TargetFrameLowering(StackGrowsDown, 8, 0) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
- virtual bool
- spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- virtual bool
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+
+ void
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
+
+ bool
restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
- bool hasFP(const MachineFunction &MF) const;
+ const TargetRegisterInfo *TRI) const override;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
+ bool hasFP(const MachineFunction &MF) const override;
bool hasTailCall(MachineBasicBlock &MBB) const;
};
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 52d5ab2..7f76421 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -26,8 +26,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hwloops"
#include "llvm/ADT/SmallSet.h"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -40,14 +41,13 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "Hexagon.h"
-#include "HexagonTargetMachine.h"
-
#include <algorithm>
#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "hwloops"
+
#ifndef NDEBUG
static cl::opt<int> HWLoopLimit("max-hwloop", cl::Hidden, cl::init(-1));
#endif
@@ -78,11 +78,11 @@ namespace {
initializeHexagonHardwareLoopsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const { return "Hexagon Hardware Loops"; }
+ const char *getPassName() const override { return "Hexagon Hardware Loops"; }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -265,8 +265,8 @@ namespace {
return Contents.ImmVal;
}
- void print(raw_ostream &OS, const TargetMachine *TM = 0) const {
- const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : 0;
+ void print(raw_ostream &OS, const TargetMachine *TM = nullptr) const {
+ const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : nullptr;
if (isReg()) { OS << PrintReg(Contents.R.Reg, TRI, Contents.R.Sub); }
if (isImm()) { OS << Contents.ImmVal; }
}
@@ -370,7 +370,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
} // for (instr)
SmallVector<MachineOperand,2> Cond;
- MachineBasicBlock *TB = 0, *FB = 0;
+ MachineBasicBlock *TB = nullptr, *FB = nullptr;
bool NotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Cond, false);
if (NotAnalyzed)
return false;
@@ -435,37 +435,37 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
"Loop must have more than one incoming edge!");
MachineBasicBlock *Backedge = *PI++;
if (PI == TopMBB->pred_end()) // dead loop?
- return 0;
+ return nullptr;
MachineBasicBlock *Incoming = *PI++;
if (PI != TopMBB->pred_end()) // multiple backedges?
- return 0;
+ return nullptr;
// Make sure there is one incoming and one backedge and determine which
// is which.
if (L->contains(Incoming)) {
if (L->contains(Backedge))
- return 0;
+ return nullptr;
std::swap(Incoming, Backedge);
} else if (!L->contains(Backedge))
- return 0;
+ return nullptr;
// Look for the cmp instruction to determine if we can get a useful trip
// count. The trip count can be either a register or an immediate. The
// location of the value depends upon the type (reg or imm).
MachineBasicBlock *Latch = L->getLoopLatch();
if (!Latch)
- return 0;
+ return nullptr;
unsigned IVReg = 0;
int64_t IVBump = 0;
MachineInstr *IVOp;
bool FoundIV = findInductionRegister(L, IVReg, IVBump, IVOp);
if (!FoundIV)
- return 0;
+ return nullptr;
MachineBasicBlock *Preheader = L->getLoopPreheader();
- MachineOperand *InitialValue = 0;
+ MachineOperand *InitialValue = nullptr;
MachineInstr *IV_Phi = MRI->getVRegDef(IVReg);
for (unsigned i = 1, n = IV_Phi->getNumOperands(); i < n; i += 2) {
MachineBasicBlock *MBB = IV_Phi->getOperand(i+1).getMBB();
@@ -475,13 +475,13 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
IVReg = IV_Phi->getOperand(i).getReg(); // Want IV reg after bump.
}
if (!InitialValue)
- return 0;
+ return nullptr;
SmallVector<MachineOperand,2> Cond;
- MachineBasicBlock *TB = 0, *FB = 0;
+ MachineBasicBlock *TB = nullptr, *FB = nullptr;
bool NotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Cond, false);
if (NotAnalyzed)
- return 0;
+ return nullptr;
MachineBasicBlock *Header = L->getHeader();
// TB must be non-null. If FB is also non-null, one of them must be
@@ -490,7 +490,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
assert (TB && "Latch block without a branch?");
assert ((!FB || TB == Header || FB == Header) && "Branches not to header?");
if (!TB || (FB && TB != Header && FB != Header))
- return 0;
+ return nullptr;
// Branches of form "if (!P) ..." cause HexagonInstrInfo::AnalyzeBranch
// to put imm(0), followed by P in the vector Cond.
@@ -506,7 +506,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
bool AnalyzedCmp = TII->analyzeCompare(CondI, CmpReg1, CmpReg2,
Mask, ImmValue);
if (!AnalyzedCmp)
- return 0;
+ return nullptr;
// The comparison operator type determines how we compute the loop
// trip count.
@@ -522,7 +522,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
bool isSwapped = false;
const MachineOperand &Op1 = CondI->getOperand(1);
const MachineOperand &Op2 = CondI->getOperand(2);
- const MachineOperand *EndValue = 0;
+ const MachineOperand *EndValue = nullptr;
if (Op1.isReg()) {
if (Op2.isImm() || Op1.getReg() == IVReg)
@@ -534,7 +534,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
}
if (!EndValue)
- return 0;
+ return nullptr;
switch (CondOpc) {
case Hexagon::CMPEQri:
@@ -553,7 +553,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
case Hexagon::CMPbEQri_V4:
case Hexagon::CMPhEQri_V4: {
if (IVBump != 1)
- return 0;
+ return nullptr;
int64_t InitV, EndV;
// Since the comparisons are "ri", the EndValue should be an
@@ -563,26 +563,26 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
// Allow InitialValue to be a register defined with an immediate.
if (InitialValue->isReg()) {
if (!defWithImmediate(InitialValue->getReg()))
- return 0;
+ return nullptr;
InitV = getImmediate(*InitialValue);
} else {
assert(InitialValue->isImm());
InitV = InitialValue->getImm();
}
if (InitV >= EndV)
- return 0;
+ return nullptr;
if (CondOpc == Hexagon::CMPbEQri_V4) {
if (!isInt<8>(InitV) || !isInt<8>(EndV))
- return 0;
+ return nullptr;
} else { // Hexagon::CMPhEQri_V4
if (!isInt<16>(InitV) || !isInt<16>(EndV))
- return 0;
+ return nullptr;
}
Cmp = !Negated ? Comparison::EQ : Comparison::NE;
break;
}
default:
- return 0;
+ return nullptr;
}
if (isSwapped)
@@ -592,14 +592,14 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
unsigned R = InitialValue->getReg();
MachineBasicBlock *DefBB = MRI->getVRegDef(R)->getParent();
if (!MDT->properlyDominates(DefBB, Header))
- return 0;
+ return nullptr;
OldInsts.push_back(MRI->getVRegDef(R));
}
if (EndValue->isReg()) {
unsigned R = EndValue->getReg();
MachineBasicBlock *DefBB = MRI->getVRegDef(R)->getParent();
if (!MDT->properlyDominates(DefBB, Header))
- return 0;
+ return nullptr;
}
return computeCount(L, InitialValue, EndValue, IVReg, IVBump, Cmp);
@@ -617,7 +617,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
Comparison::Kind Cmp) const {
// Cannot handle comparison EQ, i.e. while (A == B).
if (Cmp == Comparison::EQ)
- return 0;
+ return nullptr;
// Check if either the start or end values are an assignment of an immediate.
// If so, use the immediate value rather than the register.
@@ -643,11 +643,11 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
// If loop executes while iv is "less" with the iv value going down, then
// the iv must wrap.
if (CmpLess && IVBump < 0)
- return 0;
+ return nullptr;
// If loop executes while iv is "greater" with the iv value going up, then
// the iv must wrap.
if (CmpGreater && IVBump > 0)
- return 0;
+ return nullptr;
if (Start->isImm() && End->isImm()) {
// Both, start and end are immediates.
@@ -655,15 +655,15 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
int64_t EndV = End->getImm();
int64_t Dist = EndV - StartV;
if (Dist == 0)
- return 0;
+ return nullptr;
bool Exact = (Dist % IVBump) == 0;
if (Cmp == Comparison::NE) {
if (!Exact)
- return 0;
+ return nullptr;
if ((Dist < 0) ^ (IVBump < 0))
- return 0;
+ return nullptr;
}
// For comparisons that include the final value (i.e. include equality
@@ -684,7 +684,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
uint64_t Count = Dist1;
if (Count > 0xFFFFFFFFULL)
- return 0;
+ return nullptr;
return new CountValue(CountValue::CV_Immediate, Count);
}
@@ -696,7 +696,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
// If the induction variable bump is not a power of 2, quit.
// Othwerise we'd need a general integer division.
if (!isPowerOf2_64(abs64(IVBump)))
- return 0;
+ return nullptr;
MachineBasicBlock *PH = Loop->getLoopPreheader();
assert (PH && "Should have a preheader by now");
@@ -767,7 +767,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
// Hardware loops cannot handle 64-bit registers. If it's a double
// register, it has to have a subregister.
if (!SR && RC == &Hexagon::DoubleRegsRegClass)
- return 0;
+ return nullptr;
const TargetRegisterClass *IntRC = &Hexagon::IntRegsRegClass;
// Compute DistR (register with the distance between Start and End).
@@ -908,10 +908,10 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
// this instruction is dead: both it (and the phi node) can be removed.
use_nodbg_iterator I = MRI->use_nodbg_begin(Reg);
use_nodbg_iterator End = MRI->use_nodbg_end();
- if (llvm::next(I) != End || !I.getOperand().getParent()->isPHI())
+ if (std::next(I) != End || !I->getParent()->isPHI())
return false;
- MachineInstr *OnePhi = I.getOperand().getParent();
+ MachineInstr *OnePhi = I->getParent();
for (unsigned j = 0, f = OnePhi->getNumOperands(); j != f; ++j) {
const MachineOperand &OPO = OnePhi->getOperand(j);
if (!OPO.isReg() || !OPO.isDef())
@@ -921,8 +921,8 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
use_nodbg_iterator nextJ;
for (use_nodbg_iterator J = MRI->use_nodbg_begin(OPReg);
J != End; J = nextJ) {
- nextJ = llvm::next(J);
- MachineOperand &Use = J.getOperand();
+ nextJ = std::next(J);
+ MachineOperand &Use = *J;
MachineInstr *UseMI = Use.getParent();
// If the phi node has a user that is not MI, bail...
@@ -955,9 +955,9 @@ void HexagonHardwareLoops::removeIfDead(MachineInstr *MI) {
MachineRegisterInfo::use_iterator nextI;
for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg),
E = MRI->use_end(); I != E; I = nextI) {
- nextI = llvm::next(I); // I is invalidated by the setReg
- MachineOperand &Use = I.getOperand();
- MachineInstr *UseMI = Use.getParent();
+ nextI = std::next(I); // I is invalidated by the setReg
+ MachineOperand &Use = *I;
+ MachineInstr *UseMI = I->getParent();
if (UseMI == MI)
continue;
if (Use.isDebug())
@@ -1014,7 +1014,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
MachineBasicBlock *LastMBB = L->getExitingBlock();
// Don't generate hw loop if the loop has more than one exit.
- if (LastMBB == 0)
+ if (!LastMBB)
return false;
MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
@@ -1036,7 +1036,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
SmallVector<MachineInstr*, 2> OldInsts;
// Are we able to determine the trip count for the loop?
CountValue *TripCount = getLoopTripCount(L, OldInsts);
- if (TripCount == 0)
+ if (!TripCount)
return false;
// Is the trip count available in the preheader?
@@ -1128,7 +1128,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
if (LastI != LastMBB->end())
LastI = LastMBB->erase(LastI);
SmallVector<MachineOperand, 0> Cond;
- TII->InsertBranch(*LastMBB, BranchTarget, 0, Cond, LastIDL);
+ TII->InsertBranch(*LastMBB, BranchTarget, nullptr, Cond, LastIDL);
}
} else {
// Conditional branch to loop start; just delete it.
@@ -1163,7 +1163,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI,
// Out of order.
unsigned PredR = CmpI->getOperand(0).getReg();
bool FoundBump = false;
- instr_iterator CmpIt = CmpI, NextIt = llvm::next(CmpIt);
+ instr_iterator CmpIt = CmpI, NextIt = std::next(CmpIt);
for (instr_iterator I = NextIt, E = BB->instr_end(); I != E; ++I) {
MachineInstr *In = &*I;
for (unsigned i = 0, n = In->getNumOperands(); i < n; ++i) {
@@ -1177,7 +1177,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI,
if (In == BumpI) {
instr_iterator After = BumpI;
instr_iterator From = CmpI;
- BB->splice(llvm::next(After), BB, From);
+ BB->splice(std::next(After), BB, From);
FoundBump = true;
break;
}
@@ -1197,7 +1197,7 @@ MachineInstr *HexagonHardwareLoops::defWithImmediate(unsigned R) {
case Hexagon::CONST64_Int_Real:
return DI;
}
- return 0;
+ return nullptr;
}
@@ -1292,7 +1292,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
if (IndRegs.empty())
return false;
- MachineBasicBlock *TB = 0, *FB = 0;
+ MachineBasicBlock *TB = nullptr, *FB = nullptr;
SmallVector<MachineOperand,2> Cond;
// AnalyzeBranch returns true if it fails to analyze branch.
bool NotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Cond, false);
@@ -1323,7 +1323,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
return false;
SmallSet<unsigned,2> CmpRegs;
- MachineOperand *CmpImmOp = 0;
+ MachineOperand *CmpImmOp = nullptr;
// Go over all operands to the compare and look for immediate and register
// operands. Assume that if the compare has a single register use and a
@@ -1421,7 +1421,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
DebugLoc DL;
if (!Latch || Header->hasAddressTaken())
- return 0;
+ return nullptr;
typedef MachineBasicBlock::instr_iterator instr_iterator;
@@ -1430,17 +1430,17 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
typedef std::vector<MachineBasicBlock*> MBBVector;
MBBVector Preds(Header->pred_begin(), Header->pred_end());
SmallVector<MachineOperand,2> Tmp1;
- MachineBasicBlock *TB = 0, *FB = 0;
+ MachineBasicBlock *TB = nullptr, *FB = nullptr;
if (TII->AnalyzeBranch(*Latch, TB, FB, Tmp1, false))
- return 0;
+ return nullptr;
for (MBBVector::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) {
MachineBasicBlock *PB = *I;
if (PB != Latch) {
bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp1, false);
if (NotAnalyzed)
- return 0;
+ return nullptr;
}
}
@@ -1516,32 +1516,32 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
SmallVector<MachineOperand,1> Tmp2;
SmallVector<MachineOperand,1> EmptyCond;
- TB = FB = 0;
+ TB = FB = nullptr;
for (MBBVector::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) {
MachineBasicBlock *PB = *I;
if (PB != Latch) {
Tmp2.clear();
bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false);
- (void)NotAnalyzed; // supress compiler warning
+ (void)NotAnalyzed; // suppress compiler warning
assert (!NotAnalyzed && "Should be analyzable!");
if (TB != Header && (Tmp2.empty() || FB != Header))
- TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL);
+ TII->InsertBranch(*PB, NewPH, nullptr, EmptyCond, DL);
PB->ReplaceUsesOfBlockWith(Header, NewPH);
}
}
// It can happen that the latch block will fall through into the header.
// Insert an unconditional branch to the header.
- TB = FB = 0;
+ TB = FB = nullptr;
bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false);
- (void)LatchNotAnalyzed; // supress compiler warning
+ (void)LatchNotAnalyzed; // suppress compiler warning
assert (!LatchNotAnalyzed && "Should be analyzable!");
if (!TB && !FB)
- TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL);
+ TII->InsertBranch(*Latch, Header, nullptr, EmptyCond, DL);
// Finally, the branch from the preheader to the header.
- TII->InsertBranch(*NewPH, Header, 0, EmptyCond, DL);
+ TII->InsertBranch(*NewPH, Header, nullptr, EmptyCond, DL);
NewPH->addSuccessor(Header);
return NewPH;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 5ae93284..dabe650 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -11,18 +11,19 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hexagon-isel"
#include "Hexagon.h"
#include "HexagonISelLowering.h"
#include "HexagonTargetMachine.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/IR/Intrinsics.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
+#define DEBUG_TYPE "hexagon-isel"
+
static
cl::opt<unsigned>
MaxNumOfUsesForConstExtenders("ga-max-num-uses-for-constant-extenders",
@@ -61,7 +62,7 @@ public:
}
bool hasNumUsesBelowThresGA(SDNode *N) const;
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
// Complex Pattern Selectors.
inline bool foldGlobalAddress(SDValue &N, SDValue &R);
@@ -78,15 +79,15 @@ public:
bool SelectADDRriU6_1(SDValue& N, SDValue &R1, SDValue &R2);
bool SelectADDRriU6_2(SDValue& N, SDValue &R1, SDValue &R2);
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon DAG->DAG Pattern Instruction Selection";
}
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
bool SelectAddr(SDNode *Op, SDValue Addr, SDValue &Base, SDValue &Offset);
SDNode *SelectLoad(SDNode *N);
@@ -186,7 +187,7 @@ FunctionPass *llvm::createHexagonISelDag(HexagonTargetMachine &TM,
static void initializePassOnce(PassRegistry &Registry) {
const char *Name = "Hexagon DAG->DAG Pattern Instruction Selection";
PassInfo *PI = new PassInfo(Name, "hexagon-isel",
- &SelectionDAGISel::ID, 0, false, false);
+ &SelectionDAGISel::ID, nullptr, false, false);
Registry.registerPass(*PI, true);
}
@@ -1238,7 +1239,7 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
SDValue(Arg, 0));
Ops.push_back(SDValue(PdRs,0));
- } else if (RC == NULL && (dyn_cast<ConstantSDNode>(Arg) != NULL)) {
+ } else if (!RC && (dyn_cast<ConstantSDNode>(Arg) != nullptr)) {
// This is immediate operand. Lower it here making sure that we DO have
// const SDNode for immediate value.
int32_t Val = cast<ConstantSDNode>(Arg)->getSExtValue();
@@ -1346,7 +1347,7 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
@@ -1639,7 +1640,7 @@ bool HexagonDAGToDAGISel::hasNumUsesBelowThresGA(SDNode *N) const {
}
//===--------------------------------------------------------------------===//
-// Return true if the non GP-relative global address can be folded.
+// Return true if the non-GP-relative global address can be folded.
//===--------------------------------------------------------------------===//
inline bool HexagonDAGToDAGISel::foldGlobalAddress(SDValue &N, SDValue &R) {
return foldGlobalAddressImpl(N, R, false);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 1374179..a460ea4 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -39,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "hexagon-lowering"
+
static cl::opt<bool>
EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
cl::desc("Control jump table emission on Hexagon target"));
@@ -135,7 +137,7 @@ CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
}
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
@@ -182,7 +184,7 @@ static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- static const uint16_t RegList[] = {
+ static const MCPhysReg RegList[] = {
Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
Hexagon::R5
};
@@ -205,10 +207,10 @@ static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
return false;
}
- static const uint16_t RegList1[] = {
+ static const MCPhysReg RegList1[] = {
Hexagon::D1, Hexagon::D2
};
- static const uint16_t RegList2[] = {
+ static const MCPhysReg RegList2[] = {
Hexagon::R1, Hexagon::R3
};
if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
@@ -346,8 +348,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
}
@@ -410,7 +411,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
int NumNamedVarArgParams = -1;
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
{
- const Function* CalleeFn = NULL;
+ const Function* CalleeFn = nullptr;
Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
{
@@ -462,9 +463,10 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
+ const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
+ DAG.getTarget().getRegisterInfo());
SDValue StackPtr =
- DAG.getCopyFromReg(Chain, dl, TM.getRegisterInfo()->getStackRegister(),
- getPointerTy());
+ DAG.getCopyFromReg(Chain, dl, QRI->getStackRegister(), getPointerTy());
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@@ -520,8 +522,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Transform all store nodes into one single node because all store
// nodes are independent of each other.
if (!MemOpChains.empty()) {
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0],
- MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
}
if (!isTailCall)
@@ -595,9 +596,9 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (isTailCall)
- return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+ return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
- Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
@@ -720,7 +721,10 @@ SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
cast<RegisterSDNode>(Node->getOperand(i))->getReg();
// Check it to be lr
- if (Reg == TM.getRegisterInfo()->getRARegister()) {
+ const HexagonRegisterInfo *QRI =
+ static_cast<const HexagonRegisterInfo *>(
+ DAG.getTarget().getRegisterInfo());
+ if (Reg == QRI->getRARegister()) {
FuncInfo->setHasClobberLR(true);
break;
}
@@ -812,12 +816,12 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
- SDValue CopyChain = DAG.getCopyToReg(Chain, dl,
- TM.getRegisterInfo()->getStackRegister(),
- Sub);
+ const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
+ DAG.getTarget().getRegisterInfo());
+ SDValue CopyChain = DAG.getCopyToReg(Chain, dl, QRI->getStackRegister(), Sub);
SDValue Ops[2] = { ArgAdjust, CopyChain };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
SDValue
@@ -916,8 +920,7 @@ const {
}
if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
- MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
if (isVarArg) {
// This will point to the next argument passed via stack.
@@ -945,21 +948,6 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
}
SDValue
-HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue CC = Op.getOperand(4);
- SDValue TrueVal = Op.getOperand(2);
- SDValue FalseVal = Op.getOperand(3);
- SDLoc dl(Op);
- SDNode* OpNode = Op.getNode();
- EVT SVT = OpNode->getValueType(0);
-
- SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC);
- return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal);
-}
-
-SDValue
HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op.getValueType();
SDLoc dl(Op);
@@ -976,11 +964,14 @@ HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
EVT VT = Op.getValueType();
SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -999,7 +990,8 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue
HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
- const HexagonRegisterInfo *TRI = TM.getRegisterInfo();
+ const HexagonRegisterInfo *TRI =
+ static_cast<const HexagonRegisterInfo *>(DAG.getTarget().getRegisterInfo());
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
@@ -1051,433 +1043,426 @@ HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// TargetLowering Implementation
//===----------------------------------------------------------------------===//
-HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
- &targetmachine)
- : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
- TM(targetmachine) {
+HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
+ : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
+ TM(targetmachine) {
- const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
+ const HexagonSubtarget &Subtarget = TM.getSubtarget<HexagonSubtarget>();
- // Set up the register classes.
- addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
- addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
-
- if (QRI->Subtarget.hasV5TOps()) {
- addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
- addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
- }
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
+ addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
- addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
-
- computeRegisterProperties();
-
- // Align loop entry
- setPrefLoopAlignment(4);
+ if (Subtarget.hasV5TOps()) {
+ addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
+ addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
+ }
- // Limits for inline expansion of memcpy/memmove
- MaxStoresPerMemcpy = 6;
- MaxStoresPerMemmove = 6;
+ addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
- //
- // Library calls for unsupported operations
- //
+ computeRegisterProperties();
- setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
- setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
-
- setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
- setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
-
- setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
- setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
-
- setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
- setOperationAction(ISD::SDIV, MVT::i32, Expand);
- setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
- setOperationAction(ISD::SREM, MVT::i32, Expand);
-
- setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
- setOperationAction(ISD::SDIV, MVT::i64, Expand);
- setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
- setOperationAction(ISD::SREM, MVT::i64, Expand);
-
- setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
- setOperationAction(ISD::UDIV, MVT::i32, Expand);
-
- setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
- setOperationAction(ISD::UDIV, MVT::i64, Expand);
-
- setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
- setOperationAction(ISD::UREM, MVT::i32, Expand);
-
- setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
- setOperationAction(ISD::UREM, MVT::i64, Expand);
-
- setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
- setOperationAction(ISD::FDIV, MVT::f32, Expand);
-
- setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
- setOperationAction(ISD::FDIV, MVT::f64, Expand);
-
- setOperationAction(ISD::FSQRT, MVT::f32, Expand);
- setOperationAction(ISD::FSQRT, MVT::f64, Expand);
- setOperationAction(ISD::FSIN, MVT::f32, Expand);
- setOperationAction(ISD::FSIN, MVT::f64, Expand);
-
- if (QRI->Subtarget.hasV5TOps()) {
- // Hexagon V5 Support.
- setOperationAction(ISD::FADD, MVT::f32, Legal);
- setOperationAction(ISD::FADD, MVT::f64, Legal);
- setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
- setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
- setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
- setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
- setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
-
- setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
- setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
- setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
- setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
-
- setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
- setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
- setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
- setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
-
- setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
- setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
- setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
- setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
-
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
-
- setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
- setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
- setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
- setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
-
- setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
- setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
- setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
- setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
-
- setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
- setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
- setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
- setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
-
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
- setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
- setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
-
- setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
- setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
- setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
-
- setOperationAction(ISD::FABS, MVT::f32, Legal);
- setOperationAction(ISD::FABS, MVT::f64, Expand);
-
- setOperationAction(ISD::FNEG, MVT::f32, Legal);
- setOperationAction(ISD::FNEG, MVT::f64, Expand);
- } else {
+ // Align loop entry
+ setPrefLoopAlignment(4);
- // Expand fp<->uint.
- setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+ // Limits for inline expansion of memcpy/memmove
+ MaxStoresPerMemcpy = 6;
+ MaxStoresPerMemmove = 6;
- setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+ //
+ // Library calls for unsupported operations
+ //
- setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
- setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
+
+ setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
+ setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
+
+ setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
+ setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
+
+ setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
+ setOperationAction(ISD::SDIV, MVT::i32, Expand);
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
+ setOperationAction(ISD::SDIV, MVT::i64, Expand);
+ setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
+ setOperationAction(ISD::UDIV, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
+ setOperationAction(ISD::UDIV, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
+ setOperationAction(ISD::FDIV, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
+ setOperationAction(ISD::FDIV, MVT::f64, Expand);
+
+ setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+ setOperationAction(ISD::FSQRT, MVT::f64, Expand);
+ setOperationAction(ISD::FSIN, MVT::f32, Expand);
+ setOperationAction(ISD::FSIN, MVT::f64, Expand);
+
+ if (Subtarget.hasV5TOps()) {
+ // Hexagon V5 Support.
+ setOperationAction(ISD::FADD, MVT::f32, Legal);
+ setOperationAction(ISD::FADD, MVT::f64, Legal);
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
+
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
+
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
+
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
+ setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
+
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
+
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
+
+ setOperationAction(ISD::FABS, MVT::f32, Legal);
+ setOperationAction(ISD::FABS, MVT::f64, Expand);
+
+ setOperationAction(ISD::FNEG, MVT::f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::f64, Expand);
+ } else {
- setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
- setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
+ // Expand fp<->uint.
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
- setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
- setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
+ setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
- setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
- setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
+ setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
+ setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
- setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
- setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
+ setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
- setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
- setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
+ setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
+ setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
- setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
- setOperationAction(ISD::FADD, MVT::f64, Expand);
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
- setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
- setOperationAction(ISD::FADD, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
+ setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
- setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
- setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
- setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
- setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
+ setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
+ setOperationAction(ISD::FADD, MVT::f64, Expand);
- setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
- setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
+ setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
+ setOperationAction(ISD::FADD, MVT::f32, Expand);
- setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
- setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
- setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
- setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
+ setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
- setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
- setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
+ setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
- setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
- setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
+ setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
- setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
- setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
+ setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
- setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
- setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
+ setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
- setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
- setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
+ setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
+ setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
- setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
- setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
- setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
- setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
+ setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
- setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
- setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
+ setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
- setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
- setOperationAction(ISD::FMUL, MVT::f64, Expand);
+ setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
- setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
- setOperationAction(ISD::MUL, MVT::f32, Expand);
+ setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
- setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
- setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
+ setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
- setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
+ setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
+ setOperationAction(ISD::FMUL, MVT::f64, Expand);
- setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
- setOperationAction(ISD::SUB, MVT::f64, Expand);
+ setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
+ setOperationAction(ISD::MUL, MVT::f32, Expand);
- setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
- setOperationAction(ISD::SUB, MVT::f32, Expand);
+ setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
+ setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
- setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
- setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
+ setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
- setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
- setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
+ setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
+ setOperationAction(ISD::SUB, MVT::f64, Expand);
- setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
- setCondCodeAction(ISD::SETO, MVT::f64, Expand);
+ setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
+ setOperationAction(ISD::SUB, MVT::f32, Expand);
- setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
- setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+ setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
- setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
- setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+ setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
- setOperationAction(ISD::FABS, MVT::f32, Expand);
- setOperationAction(ISD::FABS, MVT::f64, Expand);
- setOperationAction(ISD::FNEG, MVT::f32, Expand);
- setOperationAction(ISD::FNEG, MVT::f64, Expand);
- }
+ setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETO, MVT::f64, Expand);
- setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
- setOperationAction(ISD::SREM, MVT::i32, Expand);
-
- setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
- setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
- setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
- setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
-
- setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
- setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
- setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
- setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
-
- setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
-
- // Turn FP extload into load/fextend.
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
- // Hexagon has a i1 sign extending load.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
- // Turn FP truncstore into trunc + store.
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- // Custom legalize GlobalAddress nodes into CONST32.
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
- setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
- // Truncate action?
- setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
-
- // Hexagon doesn't have sext_inreg, replace them with shl/sra.
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
-
- // Hexagon has no REM or DIVREM operations.
- setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i64, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
-
- setOperationAction(ISD::BSWAP, MVT::i64, Expand);
-
- // Lower SELECT_CC to SETCC and SELECT.
- setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
-
- if (QRI->Subtarget.hasV5TOps()) {
-
- // We need to make the operation type of SELECT node to be Custom,
- // such that we don't go into the infinite loop of
- // select -> setcc -> select_cc -> select loop.
- setOperationAction(ISD::SELECT, MVT::f32, Custom);
- setOperationAction(ISD::SELECT, MVT::f64, Custom);
-
- setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETO, MVT::f32, Expand);
- } else {
+ setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
- // Hexagon has no select or setcc: expand to SELECT_CC.
- setOperationAction(ISD::SELECT, MVT::f32, Expand);
- setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ setOperationAction(ISD::FABS, MVT::f32, Expand);
+ setOperationAction(ISD::FABS, MVT::f64, Expand);
+ setOperationAction(ISD::FNEG, MVT::f32, Expand);
+ setOperationAction(ISD::FNEG, MVT::f64, Expand);
+ }
- // This is a workaround documented in DAGCombiner.cpp:2892 We don't
- // support SELECT_CC on every type.
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+
+ setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
+
+ setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
+
+ setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
+
+ // Turn FP extload into load/fextend.
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ // Hexagon has a i1 sign extending load.
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ // Turn FP truncstore into trunc + store.
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // Custom legalize GlobalAddress nodes into CONST32.
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
+ setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
+ // Truncate action?
+ setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
+
+ // Hexagon doesn't have sext_inreg, replace them with shl/sra.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ // Hexagon has no REM or DIVREM operations.
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Lower SELECT_CC to SETCC and SELECT.
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+
+ if (Subtarget.hasV5TOps()) {
+
+ // We need to make the operation type of SELECT node to be Custom,
+ // such that we don't go into the infinite loop of
+ // select -> setcc -> select_cc -> select loop.
+ setOperationAction(ISD::SELECT, MVT::f32, Custom);
+ setOperationAction(ISD::SELECT, MVT::f64, Custom);
+
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
- }
+ } else {
- if (EmitJumpTables) {
- setOperationAction(ISD::BR_JT, MVT::Other, Custom);
- } else {
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- }
- // Increase jump tables cutover to 5, was 4.
- setMinimumJumpTableEntries(5);
-
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- setOperationAction(ISD::BR_CC, MVT::f64, Expand);
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);
-
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
-
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FREM , MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FREM , MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
-
- // In V4, we have double word add/sub with carry. The problem with
- // modelling this instruction is that it produces 2 results - Rdd and Px.
- // To model update of Px, we will have to use Defs[p0..p3] which will
- // cause any predicate live range to spill. So, we pretend we dont't
- // have these instructions.
- setOperationAction(ISD::ADDE, MVT::i8, Expand);
- setOperationAction(ISD::ADDE, MVT::i16, Expand);
- setOperationAction(ISD::ADDE, MVT::i32, Expand);
- setOperationAction(ISD::ADDE, MVT::i64, Expand);
- setOperationAction(ISD::SUBE, MVT::i8, Expand);
- setOperationAction(ISD::SUBE, MVT::i16, Expand);
- setOperationAction(ISD::SUBE, MVT::i32, Expand);
- setOperationAction(ISD::SUBE, MVT::i64, Expand);
- setOperationAction(ISD::ADDC, MVT::i8, Expand);
- setOperationAction(ISD::ADDC, MVT::i16, Expand);
- setOperationAction(ISD::ADDC, MVT::i32, Expand);
- setOperationAction(ISD::ADDC, MVT::i64, Expand);
- setOperationAction(ISD::SUBC, MVT::i8, Expand);
- setOperationAction(ISD::SUBC, MVT::i16, Expand);
- setOperationAction(ISD::SUBC, MVT::i32, Expand);
- setOperationAction(ISD::SUBC, MVT::i64, Expand);
-
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
- setOperationAction(ISD::CTPOP, MVT::i64, Expand);
- setOperationAction(ISD::CTTZ , MVT::i32, Expand);
- setOperationAction(ISD::CTTZ , MVT::i64, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
- setOperationAction(ISD::CTLZ , MVT::i32, Expand);
- setOperationAction(ISD::CTLZ , MVT::i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
- setOperationAction(ISD::ROTL , MVT::i32, Expand);
- setOperationAction(ISD::ROTR , MVT::i32, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FPOW , MVT::f64, Expand);
- setOperationAction(ISD::FPOW , MVT::f32, Expand);
-
- setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
-
- setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
-
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-
- setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
-
- if (TM.getSubtargetImpl()->isSubtargetV2()) {
- setExceptionPointerRegister(Hexagon::R20);
- setExceptionSelectorRegister(Hexagon::R21);
- } else {
- setExceptionPointerRegister(Hexagon::R0);
- setExceptionSelectorRegister(Hexagon::R1);
- }
+ // Hexagon has no select or setcc: expand to SELECT_CC.
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ }
- // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
- setOperationAction(ISD::VASTART , MVT::Other, Custom);
+ if (EmitJumpTables) {
+ setOperationAction(ISD::BR_JT, MVT::Other, Custom);
+ } else {
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ }
+ // Increase jump tables cutover to 5, was 4.
+ setMinimumJumpTableEntries(5);
+
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
+ setOperationAction(ISD::FSIN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOS, MVT::f64, Expand);
+ setOperationAction(ISD::FREM, MVT::f64, Expand);
+ setOperationAction(ISD::FSIN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOS, MVT::f32, Expand);
+ setOperationAction(ISD::FREM, MVT::f32, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
+
+ // In V4, we have double word add/sub with carry. The problem with
+ // modelling this instruction is that it produces 2 results - Rdd and Px.
+ // To model update of Px, we will have to use Defs[p0..p3] which will
+ // cause any predicate live range to spill. So, we pretend we dont't
+ // have these instructions.
+ setOperationAction(ISD::ADDE, MVT::i8, Expand);
+ setOperationAction(ISD::ADDE, MVT::i16, Expand);
+ setOperationAction(ISD::ADDE, MVT::i32, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+ setOperationAction(ISD::SUBE, MVT::i8, Expand);
+ setOperationAction(ISD::SUBE, MVT::i16, Expand);
+ setOperationAction(ISD::SUBE, MVT::i32, Expand);
+ setOperationAction(ISD::SUBE, MVT::i64, Expand);
+ setOperationAction(ISD::ADDC, MVT::i8, Expand);
+ setOperationAction(ISD::ADDC, MVT::i16, Expand);
+ setOperationAction(ISD::ADDC, MVT::i32, Expand);
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::SUBC, MVT::i8, Expand);
+ setOperationAction(ISD::SUBC, MVT::i16, Expand);
+ setOperationAction(ISD::SUBC, MVT::i32, Expand);
+ setOperationAction(ISD::SUBC, MVT::i64, Expand);
+
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW, MVT::f64, Expand);
+ setOperationAction(ISD::FPOW, MVT::f32, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
+
+ if (Subtarget.isSubtargetV2()) {
+ setExceptionPointerRegister(Hexagon::R20);
+ setExceptionSelectorRegister(Hexagon::R21);
+ } else {
+ setExceptionPointerRegister(Hexagon::R0);
+ setExceptionSelectorRegister(Hexagon::R1);
+ }
- // Use the default implementation.
- setOperationAction(ISD::VAARG , MVT::Other, Expand);
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
- setOperationAction(ISD::VAEND , MVT::Other, Expand);
- setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
+ // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ // Use the default implementation.
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
- setOperationAction(ISD::INLINEASM , MVT::Other, Custom);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+ setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
- setMinFunctionAlignment(2);
+ setMinFunctionAlignment(2);
- // Needed for DYNAMIC_STACKALLOC expansion.
- unsigned StackRegister = TM.getRegisterInfo()->getStackRegister();
- setStackPointerRegisterToSaveRestore(StackRegister);
- setSchedulingPreference(Sched::VLIW);
+ // Needed for DYNAMIC_STACKALLOC expansion.
+ const HexagonRegisterInfo *QRI =
+ static_cast<const HexagonRegisterInfo *>(TM.getRegisterInfo());
+ setStackPointerRegisterToSaveRestore(QRI->getStackRegister());
+ setSchedulingPreference(Sched::VLIW);
}
-
const char*
HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case HexagonISD::CONST32: return "HexagonISD::CONST32";
case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
@@ -1575,7 +1560,6 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::SELECT: return Op;
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
@@ -1639,8 +1623,7 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
- return QRI->Subtarget.hasV5TOps();
+ return TM.getSubtarget<HexagonSubtarget>().hasV5TOps();
}
/// isLegalAddressingMode - Return true if the addressing mode represented by
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 73da226..ec16cc8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -74,8 +74,8 @@ namespace llvm {
unsigned& RetSize) const;
public:
- HexagonTargetMachine &TM;
- explicit HexagonTargetLowering(HexagonTargetMachine &targetmachine);
+ const TargetMachine &TM;
+ explicit HexagonTargetLowering(const TargetMachine &targetmachine);
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
@@ -92,14 +92,14 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
- virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
- virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
- virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+ bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
@@ -109,12 +109,12 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
@@ -124,7 +124,6 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
SDValue Callee) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
@@ -133,46 +132,45 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const override;
- virtual MachineBasicBlock
- *EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const override;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
- virtual EVT getSetCCResultType(LLVMContext &C, EVT VT) const {
+ EVT getSetCCResultType(LLVMContext &C, EVT VT) const override {
if (!VT.isVector())
return MVT::i1;
else
return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
}
- virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
- SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
+ bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
// Intrinsics
- virtual SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op,
- SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type.
/// TODO: Handle pre/postinc as well.
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can
/// compare a register against the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalICmpImmediate(int64_t Imm) const;
+ bool isLegalICmpImmediate(int64_t Imm) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
index d25bfa8..1057343 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Hexagon Intruction Flags +
+// Hexagon Instruction Flags +
//
// *** Must match HexagonBaseInfo.h ***
//===----------------------------------------------------------------------===//
@@ -68,7 +68,7 @@ def DoubleWordAccess : MemAccessSize<4>;// Double word access instruction (memd)
//===----------------------------------------------------------------------===//
-// Intruction Class Declaration +
+// Instruction Class Declaration +
//===----------------------------------------------------------------------===//
class OpcodeHexagon {
@@ -104,54 +104,72 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
// Solo instructions, i.e., those that cannot be in a packet with others.
bits<1> isSolo = 0;
let TSFlags{5} = isSolo;
+ // Packed only with A or X-type instructions.
+ bits<1> isSoloAX = 0;
+ let TSFlags{6} = isSoloAX;
+ // Only A-type instruction in first slot or nothing.
+ bits<1> isSoloAin1 = 0;
+ let TSFlags{7} = isSoloAin1;
// Predicated instructions.
bits<1> isPredicated = 0;
- let TSFlags{6} = isPredicated;
+ let TSFlags{8} = isPredicated;
bits<1> isPredicatedFalse = 0;
- let TSFlags{7} = isPredicatedFalse;
+ let TSFlags{9} = isPredicatedFalse;
bits<1> isPredicatedNew = 0;
- let TSFlags{8} = isPredicatedNew;
+ let TSFlags{10} = isPredicatedNew;
+ bits<1> isPredicateLate = 0;
+ let TSFlags{11} = isPredicateLate; // Late predicate producer insn.
// New-value insn helper fields.
bits<1> isNewValue = 0;
- let TSFlags{9} = isNewValue; // New-value consumer insn.
+ let TSFlags{12} = isNewValue; // New-value consumer insn.
bits<1> hasNewValue = 0;
- let TSFlags{10} = hasNewValue; // New-value producer insn.
+ let TSFlags{13} = hasNewValue; // New-value producer insn.
bits<3> opNewValue = 0;
- let TSFlags{13-11} = opNewValue; // New-value produced operand.
- bits<2> opNewBits = 0;
- let TSFlags{15-14} = opNewBits; // New-value opcode bits location: 0, 8, 16.
+ let TSFlags{16-14} = opNewValue; // New-value produced operand.
bits<1> isNVStorable = 0;
- let TSFlags{16} = isNVStorable; // Store that can become new-value store.
+ let TSFlags{17} = isNVStorable; // Store that can become new-value store.
bits<1> isNVStore = 0;
- let TSFlags{17} = isNVStore; // New-value store insn.
+ let TSFlags{18} = isNVStore; // New-value store insn.
+ bits<1> isCVLoadable = 0;
+ let TSFlags{19} = isCVLoadable; // Load that can become cur-value load.
+ bits<1> isCVLoad = 0;
+ let TSFlags{20} = isCVLoad; // Cur-value load insn.
// Immediate extender helper fields.
bits<1> isExtendable = 0;
- let TSFlags{18} = isExtendable; // Insn may be extended.
+ let TSFlags{21} = isExtendable; // Insn may be extended.
bits<1> isExtended = 0;
- let TSFlags{19} = isExtended; // Insn must be extended.
+ let TSFlags{22} = isExtended; // Insn must be extended.
bits<3> opExtendable = 0;
- let TSFlags{22-20} = opExtendable; // Which operand may be extended.
+ let TSFlags{25-23} = opExtendable; // Which operand may be extended.
bits<1> isExtentSigned = 0;
- let TSFlags{23} = isExtentSigned; // Signed or unsigned range.
+ let TSFlags{26} = isExtentSigned; // Signed or unsigned range.
bits<5> opExtentBits = 0;
- let TSFlags{28-24} = opExtentBits; //Number of bits of range before extending.
+ let TSFlags{31-27} = opExtentBits; //Number of bits of range before extending.
+ bits<2> opExtentAlign = 0;
+ let TSFlags{33-32} = opExtentAlign; // Alignment exponent before extending.
// If an instruction is valid on a subtarget (v2-v5), set the corresponding
// bit from validSubTargets. v2 is the least significant bit.
// By default, instruction is valid on all subtargets.
SubTarget validSubTargets = HasV2SubT;
- let TSFlags{32-29} = validSubTargets.Value;
+ let TSFlags{37-34} = validSubTargets.Value;
// Addressing mode for load/store instructions.
AddrModeType addrMode = NoAddrMode;
- let TSFlags{35-33} = addrMode.Value;
+ let TSFlags{42-40} = addrMode.Value;
// Memory access size for mem access instructions (load/store)
MemAccessSize accessSize = NoMemAccess;
- let TSFlags{38-36} = accessSize.Value;
+ let TSFlags{45-43} = accessSize.Value;
+
+ bits<1> isTaken = 0;
+ let TSFlags {47} = isTaken; // Branch prediction.
+
+ bits<1> isFP = 0;
+ let TSFlags {48} = isFP; // Floating-point.
// Fields used for relation models.
string BaseOpcode = "";
@@ -173,14 +191,14 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
}
//===----------------------------------------------------------------------===//
-// Intruction Classes Definitions +
+// Instruction Classes Definitions +
//===----------------------------------------------------------------------===//
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD>;
+ string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>;
let mayLoad = 1 in
class LDInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
@@ -199,16 +217,16 @@ class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
let mayLoad = 1 in
class LD0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>;
// ST Instruction Class in V2/V3 can take SLOT0 only.
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
let mayStore = 1 in
class STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST>;
+ string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "">
@@ -216,39 +234,39 @@ class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
let mayStore = 1 in
class ST0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, ST0, TypeST>;
+ string cstr = "", InstrItinClass itin = ST_tc_ld_SLOT0>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
// ST Instruction Class in V2/V3 can take SLOT0 only.
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
+ : STInst<outs, ins, asmstr, pattern, cstr, itin>;
// SYSTEM Instruction Class in V4 can take SLOT0 only
// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1.
class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, SYS, TypeSYSTEM>;
+ string cstr = "", InstrItinClass itin = ST_tc_3stall_SLOT0>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeSYSTEM>;
// ALU32 Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class ALU32Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU32, TypeALU32>;
+ string cstr = "", InstrItinClass itin = ALU32_2op_tc_1_SLOT0123>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU32>;
// ALU64 Instruction Class in V2/V3.
// XTYPE Instruction Class in V4.
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
class ALU64Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE>;
+ string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeXTYPE>;
class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : ALU64Inst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
+ : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
// M Instruction Class in V2/V3.
@@ -256,55 +274,55 @@ class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE>;
+ string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeXTYPE>;
// M Instruction Class in V2/V3.
// XTYPE Instruction Class in V4.
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : MInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = M_tc_2_SLOT23>
+ : MInst<outs, ins, asmstr, pattern, cstr, itin>;
// S Instruction Class in V2/V3.
// XTYPE Instruction Class in V4.
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE>;
+ string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeXTYPE>;
// S Instruction Class in V2/V3.
// XTYPE Instruction Class in V4.
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : SInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = S_3op_tc_1_SLOT23>
+ : SInst<outs, ins, asmstr, pattern, cstr, itin>;
// J Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, J, TypeJ>;
+ string cstr = "", InstrItinClass itin = J_tc_2early_SLOT23>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>;
// JR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, JR, TypeJR>;
+ string cstr = "", InstrItinClass itin = J_tc_2early_SLOT2>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJR>;
// CR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, CR, TypeCR>;
+ string cstr = "", InstrItinClass itin = CR_tc_2early_SLOT3>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCR>;
let isCodeGenOnly = 1, isPseudo = 1 in
class Endloop<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, ENDLOOP, TypeENDLOOP>;
+ string cstr = "", InstrItinClass itin = J_tc_2early_SLOT0123>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeENDLOOP>;
let isCodeGenOnly = 1, isPseudo = 1 in
class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern = [],
@@ -317,39 +335,40 @@ class PseudoM<dag outs, dag ins, string asmstr, list<dag> pattern = [],
: InstHexagon<outs, ins, asmstr, pattern, cstr, PSEUDOM, TypePSEUDO>;
//===----------------------------------------------------------------------===//
-// Intruction Classes Definitions -
+// Instruction Classes Definitions -
//===----------------------------------------------------------------------===//
//
// ALU32 patterns
//.
-class ALU32_rr<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr = "">
- : ALU32Inst<outs, ins, asmstr, pattern, cstr>;
+class ALU32_rr<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = ALU32_2op_tc_1_SLOT0123>
+ : ALU32Inst<outs, ins, asmstr, pattern, cstr, itin>;
-class ALU32_ir<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr = "">
- : ALU32Inst<outs, ins, asmstr, pattern, cstr>;
+class ALU32_ir<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = ALU32_2op_tc_1_SLOT0123>
+ : ALU32Inst<outs, ins, asmstr, pattern, cstr, itin>;
-class ALU32_ri<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr = "">
- : ALU32Inst<outs, ins, asmstr, pattern, cstr>;
+class ALU32_ri<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = ALU32_2op_tc_1_SLOT0123>
+ : ALU32Inst<outs, ins, asmstr, pattern, cstr, itin>;
+
+class ALU32_ii<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = ALU32_2op_tc_1_SLOT0123>
+ : ALU32Inst<outs, ins, asmstr, pattern, cstr, itin>;
-class ALU32_ii<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr = "">
- : ALU32Inst<outs, ins, asmstr, pattern, cstr>;
//
// ALU64 patterns.
//
-class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr = "">
- : ALU64Inst<outs, ins, asmstr, pattern, cstr>;
+class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
+ : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern,
- string cstr = "">
- : ALU64Inst<outs, ins, asmstr, pattern, cstr>;
+class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
+ : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
// Post increment ST Instruction.
class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index 9fda0da..d92f97b 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
//----------------------------------------------------------------------------//
-// Hexagon Intruction Flags +
+// Hexagon Instruction Flags
//
// *** Must match BaseInfo.h ***
//----------------------------------------------------------------------------//
@@ -22,30 +22,30 @@ def TypeNV : IType<10>;
def TypePREFIX : IType<30>;
//----------------------------------------------------------------------------//
-// Intruction Classes Definitions +
+// Instruction Classes Definitions
//----------------------------------------------------------------------------//
//
// NV type instructions.
//
class NVInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV>;
+ string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeNV>;
class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : NVInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
+ : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
// Definition of Post increment new value store.
class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : NVInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
+ : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
// Post increment ST Instruction.
let mayStore = 1 in
class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : NVInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
+ : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
// New-value conditional branch.
class NCJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
@@ -54,13 +54,14 @@ class NCJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
let mayLoad = 1, mayStore = 1 in
class MEMInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, MEM_V4, TypeMEMOP>;
+ string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeMEMOP>;
class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : MEMInst<outs, ins, asmstr, pattern, cstr>;
+ string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
+ : MEMInst<outs, ins, asmstr, pattern, cstr, itin>;
let isCodeGenOnly = 1 in
class EXTENDERInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
- : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX>;
+ : InstHexagon<outs, ins, asmstr, pattern, "", EXTENDER_tc_1_SLOT0123,
+ TypePREFIX>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 6b97609..1c95e06 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -26,13 +26,16 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "hexagon-instrinfo"
+
#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRMAP_INFO
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
-using namespace llvm;
-
///
/// Constants for Hexagon instructions.
///
@@ -135,7 +138,7 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
regPos = 1;
}
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) {
// Due to a bug in TailMerging/CFG Optimization, we need to add a
// special case handling of a predicated jump followed by an
@@ -147,11 +150,11 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond,
false)) {
MachineBasicBlock *NextBB =
- llvm::next(MachineFunction::iterator(&MBB));
+ std::next(MachineFunction::iterator(&MBB));
if (NewTBB == NextBB) {
ReverseBranchCondition(Cond);
RemoveBranch(MBB);
- return InsertBranch(MBB, TBB, 0, Cond, DL);
+ return InsertBranch(MBB, TBB, nullptr, Cond, DL);
}
}
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
@@ -174,8 +177,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
- TBB = NULL;
- FBB = NULL;
+ TBB = nullptr;
+ FBB = nullptr;
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::instr_iterator I = MBB.instr_end();
@@ -224,7 +227,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Get the last instruction in the block.
MachineInstr *LastInst = I;
- MachineInstr *SecondLastInst = NULL;
+ MachineInstr *SecondLastInst = nullptr;
// Find one more terminator if present.
do {
if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(I)) {
@@ -557,7 +560,7 @@ MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
const SmallVectorImpl<unsigned> &Ops,
int FI) const {
// Hexagon_TODO: Implement.
- return(0);
+ return nullptr;
}
unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
@@ -1535,14 +1538,13 @@ int HexagonInstrInfo::GetDotOldOp(const int opc) const {
int NewOp = opc;
if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
NewOp = Hexagon::getPredOldOpcode(NewOp);
- if (NewOp < 0)
- assert(0 && "Couldn't change predicate new instruction to its old form.");
+ assert(NewOp >= 0 &&
+ "Couldn't change predicate new instruction to its old form.");
}
- if (isNewValueStore(NewOp)) { // Convert into non new-value format
+ if (isNewValueStore(NewOp)) { // Convert into non-new-value format
NewOp = Hexagon::getNonNVStore(NewOp);
- if (NewOp < 0)
- assert(0 && "Couldn't change new-value store to its old form.");
+ assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
}
return NewOp;
}
@@ -1654,7 +1656,7 @@ bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
// Terminators and labels can't be scheduled around.
- if (MI->getDesc().isTerminator() || MI->isLabel() || MI->isInlineAsm())
+ if (MI->getDesc().isTerminator() || MI->isPosition() || MI->isInlineAsm())
return true;
return false;
@@ -1793,7 +1795,7 @@ bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
return true;
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
- // Check addressing mode and retreive non-ext equivalent instruction.
+ // Check addressing mode and retrieve non-ext equivalent instruction.
switch (getAddrMode(MI)) {
case HexagonII::Absolute :
@@ -1827,7 +1829,7 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
return NonExtOpcode;
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
- // Check addressing mode and retreive non-ext equivalent instruction.
+ // Check addressing mode and retrieve non-ext equivalent instruction.
switch (getAddrMode(MI)) {
case HexagonII::Absolute :
return Hexagon::getBasedWithImmOffset(MI->getOpcode());
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 3f45b8b..6b032c9 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -16,15 +16,17 @@
#include "HexagonRegisterInfo.h"
#include "MCTargetDesc/HexagonBaseInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "HexagonGenInstrInfo.inc"
namespace llvm {
+struct EVT;
+
class HexagonInstrInfo : public HexagonGenInstrInfo {
virtual void anchor();
const HexagonRegisterInfo RI;
@@ -38,124 +40,121 @@ public:
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const HexagonRegisterInfo &getRegisterInfo() const { return RI; }
+ const HexagonRegisterInfo &getRegisterInfo() const { return RI; }
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
-
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
-
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- virtual bool analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+
+
+ bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
+
+ bool analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const override;
+
+ MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const override {
+ return nullptr;
}
unsigned createVR(MachineFunction* MF, MVT VT) const;
- virtual bool isBranch(const MachineInstr *MI) const;
- virtual bool isPredicable(MachineInstr *MI) const;
- virtual bool
- PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Cond) const;
-
- virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
- unsigned ExtraPredCycles,
- const BranchProbability &Probability) const;
-
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
- unsigned NumTCycles, unsigned ExtraTCycles,
- MachineBasicBlock &FMBB,
- unsigned NumFCycles, unsigned ExtraFCycles,
- const BranchProbability &Probability) const;
-
- virtual bool isPredicated(const MachineInstr *MI) const;
- virtual bool isPredicated(unsigned Opcode) const;
- virtual bool isPredicatedTrue(const MachineInstr *MI) const;
- virtual bool isPredicatedTrue(unsigned Opcode) const;
- virtual bool isPredicatedNew(const MachineInstr *MI) const;
- virtual bool isPredicatedNew(unsigned Opcode) const;
- virtual bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
- virtual bool
- SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
-
- virtual bool
- ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
- virtual bool
- isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles,
- const BranchProbability &Probability) const;
-
- virtual DFAPacketizer*
+ bool isBranch(const MachineInstr *MI) const;
+ bool isPredicable(MachineInstr *MI) const override;
+ bool PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const override;
+
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
+ const BranchProbability &Probability) const override;
+
+ bool isPredicated(const MachineInstr *MI) const override;
+ bool isPredicated(unsigned Opcode) const;
+ bool isPredicatedTrue(const MachineInstr *MI) const;
+ bool isPredicatedTrue(unsigned Opcode) const;
+ bool isPredicatedNew(const MachineInstr *MI) const;
+ bool isPredicatedNew(unsigned Opcode) const;
+ bool DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const override;
+ bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2) const override;
+
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ const BranchProbability &Probability) const override;
+
+ DFAPacketizer*
CreateTargetScheduleState(const TargetMachine *TM,
- const ScheduleDAG *DAG) const;
+ const ScheduleDAG *DAG) const override;
- virtual bool isSchedulingBoundary(const MachineInstr *MI,
- const MachineBasicBlock *MBB,
- const MachineFunction &MF) const;
+ bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const override;
bool isValidOffset(const int Opcode, const int Offset) const;
bool isValidAutoIncImm(const EVT VT, const int Offset) const;
bool isMemOp(const MachineInstr *MI) const;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
index c96aaca..4dcf101 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -768,12 +768,13 @@ class T_JMP <dag InsDag, list<dag> JumpList = []>
let InputType = "imm", isExtendable = 1, opExtendable = 1, isExtentSigned = 1,
Defs = [PC], isPredicated = 1, opExtentBits = 17 in
-class T_JMP_c <bit PredNot, bit isPredNew, bit isTaken>:
+class T_JMP_c <bit PredNot, bit isPredNew, bit isTak>:
JInst<(outs ), (ins PredRegs:$src, brtarget:$dst),
!if(PredNot, "if (!$src", "if ($src")#
!if(isPredNew, ".new) ", ") ")#"jump"#
- !if(isPredNew, !if(isTaken, ":t ", ":nt "), " ")#"$dst"> {
+ !if(isPredNew, !if(isTak, ":t ", ":nt "), " ")#"$dst"> {
+ let isTaken = isTak;
let isBrTaken = !if(isPredNew, !if(isTaken, "true", "false"), "");
let isPredicatedFalse = PredNot;
let isPredicatedNew = isPredNew;
@@ -784,7 +785,7 @@ class T_JMP_c <bit PredNot, bit isPredNew, bit isTaken>:
let Inst{27-24} = 0b1100;
let Inst{21} = PredNot;
- let Inst{12} = !if(isPredNew, isTaken, zero);
+ let Inst{12} = !if(isPredNew, isTak, zero);
let Inst{11} = isPredNew;
let Inst{9-8} = src;
let Inst{23-22} = dst{16-15};
@@ -806,12 +807,13 @@ class T_JMPr<dag InsDag = (ins IntRegs:$dst)>
}
let Defs = [PC], isPredicated = 1, InputType = "reg" in
-class T_JMPr_c <bit PredNot, bit isPredNew, bit isTaken>:
+class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>:
JRInst <(outs ), (ins PredRegs:$src, IntRegs:$dst),
!if(PredNot, "if (!$src", "if ($src")#
!if(isPredNew, ".new) ", ") ")#"jumpr"#
- !if(isPredNew, !if(isTaken, ":t ", ":nt "), " ")#"$dst"> {
+ !if(isPredNew, !if(isTak, ":t ", ":nt "), " ")#"$dst"> {
+ let isTaken = isTak;
let isBrTaken = !if(isPredNew, !if(isTaken, "true", "false"), "");
let isPredicatedFalse = PredNot;
let isPredicatedNew = isPredNew;
@@ -823,7 +825,7 @@ class T_JMPr_c <bit PredNot, bit isPredNew, bit isTaken>:
let Inst{27-22} = 0b001101;
let Inst{21} = PredNot;
let Inst{20-16} = dst;
- let Inst{12} = !if(isPredNew, isTaken, zero);
+ let Inst{12} = !if(isPredNew, isTak, zero);
let Inst{11} = isPredNew;
let Inst{9-8} = src;
let Predicates = !if(isPredNew, [HasV3T], [HasV2T]);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
index 475c23d..db5b7ea 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -1004,21 +1004,22 @@ defm POST_STwri: ST_PostInc_nv <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel;
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 11 in
class NVJrr_template<string mnemonic, bits<3> majOp, bit NvOpNum,
- bit isNegCond, bit isTaken>
+ bit isNegCond, bit isTak>
: NVInst_V4<(outs),
(ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
"if ("#!if(isNegCond, "!","")#mnemonic#
"($src1"#!if(!eq(NvOpNum, 0),".new, ",", ")#
"$src2"#!if(!eq(NvOpNum, 1),".new))","))")#" jump:"
- #!if(isTaken, "t","nt")#" $offset",
+ #!if(isTak, "t","nt")#" $offset",
[]>, Requires<[HasV4T]> {
bits<5> src1;
bits<5> src2;
bits<3> Ns; // New-Value Operand
- bits<5> RegOp; // Non New-Value Operand
+ bits<5> RegOp; // Non-New-Value Operand
bits<11> offset;
+ let isTaken = isTak;
let isBrTaken = !if(isTaken, "true", "false");
let isPredicatedFalse = isNegCond;
@@ -1030,7 +1031,7 @@ class NVJrr_template<string mnemonic, bits<3> majOp, bit NvOpNum,
let Inst{25-23} = majOp;
let Inst{22} = isNegCond;
let Inst{18-16} = Ns;
- let Inst{13} = isTaken;
+ let Inst{13} = isTak;
let Inst{12-8} = RegOp;
let Inst{21-20} = offset{10-9};
let Inst{7-1} = offset{8-2};
@@ -1078,13 +1079,14 @@ let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 11 in
class NVJri_template<string mnemonic, bits<3> majOp, bit isNegCond,
- bit isTaken>
+ bit isTak>
: NVInst_V4<(outs),
(ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset),
"if ("#!if(isNegCond, "!","")#mnemonic#"($src1.new, #$src2)) jump:"
- #!if(isTaken, "t","nt")#" $offset",
+ #!if(isTak, "t","nt")#" $offset",
[]>, Requires<[HasV4T]> {
+ let isTaken = isTak;
let isPredicatedFalse = isNegCond;
let isBrTaken = !if(isTaken, "true", "false");
@@ -1097,7 +1099,7 @@ class NVJri_template<string mnemonic, bits<3> majOp, bit isNegCond,
let Inst{25-23} = majOp;
let Inst{22} = isNegCond;
let Inst{18-16} = src1;
- let Inst{13} = isTaken;
+ let Inst{13} = isTak;
let Inst{12-8} = src2;
let Inst{21-20} = offset{10-9};
let Inst{7-1} = offset{8-2};
@@ -1135,14 +1137,15 @@ let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 11 in
class NVJ_ConstImm_template<string mnemonic, bits<3> majOp, string ImmVal,
- bit isNegCond, bit isTaken>
+ bit isNegCond, bit isTak>
: NVInst_V4<(outs),
(ins IntRegs:$src1, brtarget:$offset),
"if ("#!if(isNegCond, "!","")#mnemonic
#"($src1.new, #"#ImmVal#")) jump:"
- #!if(isTaken, "t","nt")#" $offset",
+ #!if(isTak, "t","nt")#" $offset",
[]>, Requires<[HasV4T]> {
+ let isTaken = isTak;
let isPredicatedFalse = isNegCond;
let isBrTaken = !if(isTaken, "true", "false");
@@ -1153,7 +1156,7 @@ class NVJ_ConstImm_template<string mnemonic, bits<3> majOp, string ImmVal,
let Inst{25-23} = majOp;
let Inst{22} = isNegCond;
let Inst{18-16} = src1;
- let Inst{13} = isTaken;
+ let Inst{13} = isTak;
let Inst{21-20} = offset{10-9};
let Inst{7-1} = offset{8-2};
}
@@ -2019,9 +2022,10 @@ multiclass MemOpi_bitPats <PatFrag ldOp, PatFrag stOp, PatLeaf immPred,
// mem[bhw](Rs+#0) = [clrbit|setbit](#U5)
let AddedComplexity = 225 in
- def : Pat <(stOp (OpNode (ldOp addrPred:$addr), immPred:$bitend),
- addrPred:$addr),
- (MI IntRegs:$addr, #0, (xformFunc immPred:$bitend))>;
+ def : Pat <(stOp (OpNode (ldOp (addrPred IntRegs:$addr, extPred:$offset)),
+ immPred:$bitend),
+ (addrPred (i32 IntRegs:$addr), extPred:$offset)),
+ (MI IntRegs:$addr, extPred:$offset, (xformFunc immPred:$bitend))>;
}
multiclass MemOpi_bitExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
@@ -2065,9 +2069,10 @@ multiclass MemOpr_Pats <PatFrag ldOp, PatFrag stOp, ComplexPattern addrPred,
PatLeaf extPred, InstHexagon MI, SDNode OpNode> {
let AddedComplexity = 141 in
// mem[bhw](Rs+#0) [+-&|]= Rt
- def : Pat <(stOp (OpNode (ldOp addrPred:$addr), (i32 IntRegs:$addend)),
- addrPred:$addr),
- (MI IntRegs:$addr, #0, (i32 IntRegs:$addend) )>;
+ def : Pat <(stOp (OpNode (ldOp (addrPred IntRegs:$addr, extPred:$offset)),
+ (i32 IntRegs:$addend)),
+ (addrPred (i32 IntRegs:$addr), extPred:$offset)),
+ (MI IntRegs:$addr, extPred:$offset, (i32 IntRegs:$addend) )>;
// mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
let AddedComplexity = 150 in
@@ -3198,7 +3203,7 @@ def : Pat<(i64 (cttz (i64 DoubleRegs:$src1))),
// i8 -> i64 loads
-// We need a complexity of 120 here to overide preceeding handling of
+// We need a complexity of 120 here to override preceding handling of
// zextloadi8.
let Predicates = [HasV4T], AddedComplexity = 120 in {
def: Pat <(i64 (extloadi8 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
@@ -3220,7 +3225,7 @@ def: Pat <(i64 (sextloadi8 FoldGlobalAddr:$addr)),
(i64 (SXTW (LDrib_abs_V4 FoldGlobalAddr:$addr)))>;
}
// i16 -> i64 loads
-// We need a complexity of 120 here to overide preceeding handling of
+// We need a complexity of 120 here to override preceding handling of
// zextloadi16.
let AddedComplexity = 120 in {
def: Pat <(i64 (extloadi16 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
@@ -3248,7 +3253,7 @@ def: Pat <(i64 (sextloadi16 FoldGlobalAddr:$addr)),
Requires<[HasV4T]>;
}
// i32->i64 loads
-// We need a complexity of 120 here to overide preceeding handling of
+// We need a complexity of 120 here to override preceding handling of
// zextloadi32.
let AddedComplexity = 120 in {
def: Pat <(i64 (extloadi32 (NumUsesBelowThresCONST32 tglobaladdr:$addr))),
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
index bbb2fa4..5e4346d 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
@@ -18,9 +18,9 @@
#include "MCTargetDesc/HexagonMCInst.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
index a59c8c9..d799bdb 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
@@ -10,8 +10,8 @@
#ifndef HexagonMACHINEFUNCTIONINFO_H
#define HexagonMACHINEFUNCTIONINFO_H
-#include <map>
#include "llvm/CodeGen/MachineFunction.h"
+#include <map>
namespace llvm {
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index c94f081..6fcaa20 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -12,17 +12,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "misched"
-
#include "HexagonMachineScheduler.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/IR/Function.h"
using namespace llvm;
-/// Platform specific modifications to DAG.
+#define DEBUG_TYPE "misched"
+
+/// Platform-specific modifications to DAG.
void VLIWMachineScheduler::postprocessDAG() {
- SUnit* LastSequentialCall = NULL;
+ SUnit* LastSequentialCall = nullptr;
// Currently we only catch the situation when compare gets scheduled
// before preceding call.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
@@ -108,7 +108,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) {
case TargetOpcode::REG_SEQUENCE:
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
- case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::CFI_INSTRUCTION:
case TargetOpcode::EH_LABEL:
case TargetOpcode::COPY:
case TargetOpcode::INLINEASM:
@@ -150,7 +150,7 @@ void VLIWMachineScheduler::schedule() {
buildDAGWithRegPressure();
- // Postprocess the DAG to add platform specific artificial dependencies.
+ // Postprocess the DAG to add platform-specific artificial dependencies.
postprocessDAG();
SmallVector<SUnit*, 8> TopRoots, BotRoots;
@@ -186,6 +186,9 @@ void VLIWMachineScheduler::schedule() {
scheduleMI(SU, IsTopNode);
updateQueues(SU, IsTopNode);
+
+ // Notify the scheduling strategy after updating the DAG.
+ SchedImpl->schedNode(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
@@ -266,7 +269,7 @@ void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
/// can dispatch per cycle.
///
/// TODO: Also check whether the SU must start a new group.
-bool ConvergingVLIWScheduler::SchedBoundary::checkHazard(SUnit *SU) {
+bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit *SU) {
if (HazardRec->isEnabled())
return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
@@ -277,7 +280,7 @@ bool ConvergingVLIWScheduler::SchedBoundary::checkHazard(SUnit *SU) {
return false;
}
-void ConvergingVLIWScheduler::SchedBoundary::releaseNode(SUnit *SU,
+void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit *SU,
unsigned ReadyCycle) {
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
@@ -292,7 +295,7 @@ void ConvergingVLIWScheduler::SchedBoundary::releaseNode(SUnit *SU,
}
/// Move the boundary of scheduled code by one cycle.
-void ConvergingVLIWScheduler::SchedBoundary::bumpCycle() {
+void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
unsigned Width = SchedModel->getIssueWidth();
IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
@@ -318,7 +321,7 @@ void ConvergingVLIWScheduler::SchedBoundary::bumpCycle() {
}
/// Move the boundary of scheduled code by one SUnit.
-void ConvergingVLIWScheduler::SchedBoundary::bumpNode(SUnit *SU) {
+void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
bool startNewCycle = false;
// Update the reservation table.
@@ -348,7 +351,7 @@ void ConvergingVLIWScheduler::SchedBoundary::bumpNode(SUnit *SU) {
/// Release pending ready nodes in to the available queue. This makes them
/// visible to heuristics.
-void ConvergingVLIWScheduler::SchedBoundary::releasePending() {
+void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
// If the available queue is empty, it is safe to reset MinReadyCycle.
if (Available.empty())
MinReadyCycle = UINT_MAX;
@@ -376,7 +379,7 @@ void ConvergingVLIWScheduler::SchedBoundary::releasePending() {
}
/// Remove SU from the ready set for this boundary.
-void ConvergingVLIWScheduler::SchedBoundary::removeReady(SUnit *SU) {
+void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit *SU) {
if (Available.isInQueue(SU))
Available.remove(Available.find(SU));
else {
@@ -388,20 +391,20 @@ void ConvergingVLIWScheduler::SchedBoundary::removeReady(SUnit *SU) {
/// If this queue only has one ready candidate, return it. As a side effect,
/// advance the cycle until at least one node is ready. If multiple instructions
/// are ready, return NULL.
-SUnit *ConvergingVLIWScheduler::SchedBoundary::pickOnlyChoice() {
+SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
for (unsigned i = 0; Available.empty(); ++i) {
assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
"permanent hazard"); (void)i;
- ResourceModel->reserveResources(0);
+ ResourceModel->reserveResources(nullptr);
bumpCycle();
releasePending();
}
if (Available.size() == 1)
return *Available.begin();
- return NULL;
+ return nullptr;
}
#ifndef NDEBUG
@@ -421,7 +424,7 @@ void ConvergingVLIWScheduler::traceCandidate(const char *Label,
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
/// of SU, return it, otherwise return null.
static SUnit *getSingleUnscheduledPred(SUnit *SU) {
- SUnit *OnlyAvailablePred = 0;
+ SUnit *OnlyAvailablePred = nullptr;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit &Pred = *I->getSUnit();
@@ -429,7 +432,7 @@ static SUnit *getSingleUnscheduledPred(SUnit *SU) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
- return 0;
+ return nullptr;
OnlyAvailablePred = &Pred;
}
}
@@ -439,7 +442,7 @@ static SUnit *getSingleUnscheduledPred(SUnit *SU) {
/// getSingleUnscheduledSucc - If there is exactly one unscheduled successor
/// of SU, return it, otherwise return null.
static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
- SUnit *OnlyAvailableSucc = 0;
+ SUnit *OnlyAvailableSucc = nullptr;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
SUnit &Succ = *I->getSUnit();
@@ -447,7 +450,7 @@ static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
// We found an available, but not scheduled, successor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ)
- return 0;
+ return nullptr;
OnlyAvailableSucc = &Succ;
}
}
@@ -636,7 +639,7 @@ SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
if (DAG->top() == DAG->bottom()) {
assert(Top.Available.empty() && Top.Pending.empty() &&
Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
- return NULL;
+ return nullptr;
}
SUnit *SU;
if (llvm::ForceTopDown) {
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h
index 8ac333f..8c41086 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h
@@ -14,7 +14,6 @@
#ifndef HEXAGONASMPRINTER_H
#define HEXAGONASMPRINTER_H
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -57,7 +56,7 @@ class VLIWResourceModel {
public:
VLIWResourceModel(const TargetMachine &TM, const TargetSchedModel *SM) :
SchedModel(SM), TotalPackets(0) {
- ResourcesModel = TM.getInstrInfo()->CreateTargetScheduleState(&TM,NULL);
+ ResourcesModel = TM.getInstrInfo()->CreateTargetScheduleState(&TM, nullptr);
// This hard requirement could be relaxed,
// but for now do not let it proceed.
@@ -92,15 +91,16 @@ VLIWResourceModel(const TargetMachine &TM, const TargetSchedModel *SM) :
/// Extend the standard ScheduleDAGMI to provide more context and override the
/// top-level schedule() driver.
-class VLIWMachineScheduler : public ScheduleDAGMI {
+class VLIWMachineScheduler : public ScheduleDAGMILive {
public:
- VLIWMachineScheduler(MachineSchedContext *C, MachineSchedStrategy *S):
- ScheduleDAGMI(C, S) {}
+ VLIWMachineScheduler(MachineSchedContext *C,
+ std::unique_ptr<MachineSchedStrategy> S)
+ : ScheduleDAGMILive(C, std::move(S)) {}
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work.
- virtual void schedule();
- /// Perform platform specific DAG postprocessing.
+ virtual void schedule() override;
+ /// Perform platform-specific DAG postprocessing.
void postprocessDAG();
};
@@ -120,7 +120,7 @@ class ConvergingVLIWScheduler : public MachineSchedStrategy {
// Best scheduling cost.
int SCost;
- SchedCandidate(): SU(NULL), SCost(0) {}
+ SchedCandidate(): SU(nullptr), SCost(0) {}
};
/// Represent the type of SchedCandidate found within a single queue.
enum CandResult {
@@ -130,7 +130,7 @@ class ConvergingVLIWScheduler : public MachineSchedStrategy {
/// Each Scheduling boundary is associated with ready queues. It tracks the
/// current cycle in whichever direction at has moved, and maintains the state
/// of "hazards" and other interlocks at the current cycle.
- struct SchedBoundary {
+ struct VLIWSchedBoundary {
VLIWMachineScheduler *DAG;
const TargetSchedModel *SchedModel;
@@ -152,14 +152,14 @@ class ConvergingVLIWScheduler : public MachineSchedStrategy {
/// Pending queues extend the ready queues with the same ID and the
/// PendingFlag set.
- SchedBoundary(unsigned ID, const Twine &Name):
- DAG(0), SchedModel(0), Available(ID, Name+".A"),
+ VLIWSchedBoundary(unsigned ID, const Twine &Name):
+ DAG(nullptr), SchedModel(nullptr), Available(ID, Name+".A"),
Pending(ID << ConvergingVLIWScheduler::LogMaxQID, Name+".P"),
- CheckPending(false), HazardRec(0), ResourceModel(0),
+ CheckPending(false), HazardRec(nullptr), ResourceModel(nullptr),
CurrCycle(0), IssueCount(0),
MinReadyCycle(UINT_MAX), MaxMinLatency(0) {}
- ~SchedBoundary() {
+ ~VLIWSchedBoundary() {
delete ResourceModel;
delete HazardRec;
}
@@ -192,8 +192,8 @@ class ConvergingVLIWScheduler : public MachineSchedStrategy {
const TargetSchedModel *SchedModel;
// State of the top and bottom scheduled instruction boundaries.
- SchedBoundary Top;
- SchedBoundary Bot;
+ VLIWSchedBoundary Top;
+ VLIWSchedBoundary Bot;
public:
/// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
@@ -203,18 +203,19 @@ public:
LogMaxQID = 2
};
- ConvergingVLIWScheduler():
- DAG(0), SchedModel(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
+ ConvergingVLIWScheduler()
+ : DAG(nullptr), SchedModel(nullptr), Top(TopQID, "TopQ"),
+ Bot(BotQID, "BotQ") {}
- virtual void initialize(ScheduleDAGMI *dag);
+ virtual void initialize(ScheduleDAGMI *dag) override;
- virtual SUnit *pickNode(bool &IsTopNode);
+ virtual SUnit *pickNode(bool &IsTopNode) override;
- virtual void schedNode(SUnit *SU, bool IsTopNode);
+ virtual void schedNode(SUnit *SU, bool IsTopNode) override;
- virtual void releaseTopNode(SUnit *SU);
+ virtual void releaseTopNode(SUnit *SU) override;
- virtual void releaseBottomNode(SUnit *SU);
+ virtual void releaseBottomNode(SUnit *SU) override;
unsigned ReportPackets() {
return Top.ResourceModel->getTotalPackets() +
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
index f7c4513..b7c03a7 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -21,34 +21,33 @@
//
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hexagon-nvj"
#include "llvm/PassSupport.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
+#include "Hexagon.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/ScheduleDAGInstrs.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "Hexagon.h"
-#include "HexagonTargetMachine.h"
-#include "HexagonRegisterInfo.h"
-#include "HexagonSubtarget.h"
-#include "HexagonInstrInfo.h"
-#include "HexagonMachineFunctionInfo.h"
-
#include <map>
-
-#include "llvm/Support/CommandLine.h"
using namespace llvm;
+#define DEBUG_TYPE "hexagon-nvj"
+
STATISTIC(NumNVJGenerated, "Number of New Value Jump Instructions created");
static cl::opt<int>
@@ -76,16 +75,16 @@ namespace {
initializeHexagonNewValueJumpPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon NewValueJump";
}
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
private:
/// \brief A handle to the branch probability pass.
@@ -395,8 +394,8 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
bool MO2IsKill = false;
MachineBasicBlock::iterator jmpPos;
MachineBasicBlock::iterator cmpPos;
- MachineInstr *cmpInstr = NULL, *jmpInstr = NULL;
- MachineBasicBlock *jmpTarget = NULL;
+ MachineInstr *cmpInstr = nullptr, *jmpInstr = nullptr;
+ MachineBasicBlock *jmpTarget = nullptr;
bool afterRA = false;
bool isSecondOpReg = false;
bool isSecondOpNewified = false;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
index 5490ecd..48b6159 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -35,7 +35,6 @@
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hexagon-peephole"
#include "Hexagon.h"
#include "HexagonTargetMachine.h"
#include "llvm/ADT/DenseMap.h"
@@ -57,6 +56,8 @@
using namespace llvm;
+#define DEBUG_TYPE "hexagon-peephole"
+
static cl::opt<bool> DisableHexagonPeephole("disable-hexagon-peephole",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Disable Peephole Optimization"));
@@ -89,13 +90,13 @@ namespace {
initializeHexagonPeepholePass(*PassRegistry::getPassRegistry());
}
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon optimize redundant zero and size extends";
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index 1786e9d..fb466d3 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -14,26 +14,26 @@
#include "HexagonRegisterInfo.h"
#include "Hexagon.h"
+#include "HexagonMachineFunctionInfo.h"
#include "HexagonSubtarget.h"
#include "HexagonTargetMachine.h"
-#include "HexagonMachineFunctionInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -43,13 +43,12 @@ HexagonRegisterInfo::HexagonRegisterInfo(HexagonSubtarget &st)
Subtarget(st) {
}
-const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction
- *MF)
- const {
- static const uint16_t CalleeSavedRegsV2[] = {
+const MCPhysReg *
+HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ static const MCPhysReg CalleeSavedRegsV2[] = {
Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
};
- static const uint16_t CalleeSavedRegsV3[] = {
+ static const MCPhysReg CalleeSavedRegsV3[] = {
Hexagon::R16, Hexagon::R17, Hexagon::R18, Hexagon::R19,
Hexagon::R20, Hexagon::R21, Hexagon::R22, Hexagon::R23,
Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
index 89af7c3..648b4af 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -48,16 +48,17 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
HexagonRegisterInfo(HexagonSubtarget &st);
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
- const TargetRegisterClass* const* getCalleeSavedRegClasses(
- const MachineFunction *MF = 0) const;
+ const TargetRegisterClass* const*
+ getCalleeSavedRegClasses(const MachineFunction *MF = nullptr) const;
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
/// determineFrameLayout - Determine the size of the frame and maximum call
/// frame size.
@@ -65,17 +66,17 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
/// requiresRegisterScavenging - returns true since we may need scavenging for
/// a temporary register when generating hardware loop instructions.
- bool requiresRegisterScavenging(const MachineFunction &MF) const {
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
}
- bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override {
return true;
}
// Debug information queries.
unsigned getRARegister() const;
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
unsigned getFrameRegister() const;
unsigned getStackRegister() const;
};
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
index 44234e8..2b459a4 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -33,15 +33,16 @@ namespace {
HexagonRemoveExtendArgs() : FunctionPass(ID) {
initializeHexagonRemoveExtendArgsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Remove sign extends";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineFunctionAnalysis>();
AU.addPreserved<MachineFunctionAnalysis>();
+ AU.addPreserved("stack-protector");
FunctionPass::getAnalysisUsage(AU);
}
};
@@ -59,18 +60,17 @@ bool HexagonRemoveExtendArgs::runOnFunction(Function &F) {
if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) {
Argument* Arg = AI;
if (!isa<PointerType>(Arg->getType())) {
- for (Instruction::use_iterator UI = Arg->use_begin();
- UI != Arg->use_end();) {
+ for (auto UI = Arg->user_begin(); UI != Arg->user_end();) {
if (isa<SExtInst>(*UI)) {
- Instruction* Use = cast<Instruction>(*UI);
- SExtInst* SI = new SExtInst(Arg, Use->getType());
+ Instruction* I = cast<Instruction>(*UI);
+ SExtInst* SI = new SExtInst(Arg, I->getType());
assert (EVT::getEVT(SI->getType()) ==
- (EVT::getEVT(Use->getType())));
+ (EVT::getEVT(I->getType())));
++UI;
- Use->replaceAllUsesWith(SI);
+ I->replaceAllUsesWith(SI);
Instruction* First = F.getEntryBlock().begin();
SI->insertBefore(First);
- Use->eraseFromParent();
+ I->eraseFromParent();
} else {
++UI;
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
index c2cfbb9..528cafc 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
@@ -7,57 +7,6 @@
//
//===----------------------------------------------------------------------===//
-// Functional Units
-def LSUNIT : FuncUnit; // SLOT0
-def LUNIT : FuncUnit; // SLOT1
-def MUNIT : FuncUnit; // SLOT2
-def SUNIT : FuncUnit; // SLOT3
-def LOOPUNIT : FuncUnit;
-
-// Itinerary classes
-def ALU32 : InstrItinClass;
-def ALU64 : InstrItinClass;
-def CR : InstrItinClass;
-def J : InstrItinClass;
-def JR : InstrItinClass;
-def LD : InstrItinClass;
-def LD0 : InstrItinClass;
-def M : InstrItinClass;
-def ST : InstrItinClass;
-def ST0 : InstrItinClass;
-def S : InstrItinClass;
-def SYS : InstrItinClass;
-def ENDLOOP : InstrItinClass;
-def PSEUDO : InstrItinClass;
-def PSEUDOM : InstrItinClass;
-
-def HexagonItineraries :
- ProcessorItineraries<[LSUNIT, LUNIT, MUNIT, SUNIT, LOOPUNIT], [], [
- InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
- InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
- InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
- InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
- InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
- InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
- InstrItinData<LD0 , [InstrStage<1, [LSUNIT]>]>,
- InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
- InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
- InstrItinData<ST0 , [InstrStage<1, [LSUNIT]>]>,
- InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
- InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
- InstrItinData<ENDLOOP, [InstrStage<1, [LOOPUNIT]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [MUNIT, SUNIT], 0>,
- InstrStage<1, [MUNIT, SUNIT]>]>
- ]>;
-
-def HexagonModel : SchedMachineModel {
- // Max issue per cycle == bundle width.
- let IssueWidth = 4;
- let Itineraries = HexagonItineraries;
- let LoadLatency = 1;
-}
-
//===----------------------------------------------------------------------===//
// V4 Machine Info +
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
index ef72cf40..a7d2d47 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -34,29 +34,158 @@ def SLOT3 : FuncUnit;
def SLOT_ENDLOOP: FuncUnit;
// Itinerary classes.
-def NV_V4 : InstrItinClass;
-def MEM_V4 : InstrItinClass;
+def PSEUDO : InstrItinClass;
+def PSEUDOM : InstrItinClass;
// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
+def DUPLEX : InstrItinClass;
def PREFIX : InstrItinClass;
+def COMPOUND : InstrItinClass;
+
+def ALU32_2op_tc_1_SLOT0123 : InstrItinClass;
+def ALU32_2op_tc_2early_SLOT0123 : InstrItinClass;
+def ALU32_3op_tc_2early_SLOT0123 : InstrItinClass;
+def ALU32_3op_tc_1_SLOT0123 : InstrItinClass;
+def ALU32_3op_tc_2_SLOT0123 : InstrItinClass;
+def ALU32_ADDI_tc_1_SLOT0123 : InstrItinClass;
+def ALU64_tc_1_SLOT23 : InstrItinClass;
+def ALU64_tc_1or2_SLOT23 : InstrItinClass;
+def ALU64_tc_2_SLOT23 : InstrItinClass;
+def ALU64_tc_2early_SLOT23 : InstrItinClass;
+def ALU64_tc_3x_SLOT23 : InstrItinClass;
+def CR_tc_2_SLOT3 : InstrItinClass;
+def CR_tc_2early_SLOT23 : InstrItinClass;
+def CR_tc_2early_SLOT3 : InstrItinClass;
+def CR_tc_3x_SLOT23 : InstrItinClass;
+def CR_tc_3x_SLOT3 : InstrItinClass;
+def J_tc_2early_SLOT23 : InstrItinClass;
+def J_tc_2early_SLOT2 : InstrItinClass;
+def LD_tc_ld_SLOT01 : InstrItinClass;
+def LD_tc_ld_SLOT0 : InstrItinClass;
+def LD_tc_3or4stall_SLOT0 : InstrItinClass;
+def M_tc_1_SLOT23 : InstrItinClass;
+def M_tc_1or2_SLOT23 : InstrItinClass;
+def M_tc_2_SLOT23 : InstrItinClass;
+def M_tc_3_SLOT23 : InstrItinClass;
+def M_tc_3x_SLOT23 : InstrItinClass;
+def M_tc_3or4x_SLOT23 : InstrItinClass;
+def ST_tc_st_SLOT01 : InstrItinClass;
+def ST_tc_st_SLOT0 : InstrItinClass;
+def ST_tc_ld_SLOT0 : InstrItinClass;
+def ST_tc_3stall_SLOT0 : InstrItinClass;
+def S_2op_tc_1_SLOT23 : InstrItinClass;
+def S_2op_tc_2_SLOT23 : InstrItinClass;
+def S_2op_tc_2early_SLOT23 : InstrItinClass;
+def S_2op_tc_3or4x_SLOT23 : InstrItinClass;
+def S_3op_tc_1_SLOT23 : InstrItinClass;
+def S_3op_tc_1or2_SLOT23 : InstrItinClass;
+def S_3op_tc_2_SLOT23 : InstrItinClass;
+def S_3op_tc_2early_SLOT23 : InstrItinClass;
+def S_3op_tc_3_SLOT23 : InstrItinClass;
+def S_3op_tc_3x_SLOT23 : InstrItinClass;
+def NCJ_tc_3or4stall_SLOT0 : InstrItinClass;
+def V2LDST_tc_ld_SLOT01 : InstrItinClass;
+def V2LDST_tc_st_SLOT0 : InstrItinClass;
+def V2LDST_tc_st_SLOT01 : InstrItinClass;
+def V4LDST_tc_ld_SLOT01 : InstrItinClass;
+def V4LDST_tc_st_SLOT0 : InstrItinClass;
+def V4LDST_tc_st_SLOT01 : InstrItinClass;
+def J_tc_2early_SLOT0123 : InstrItinClass;
+def EXTENDER_tc_1_SLOT0123 : InstrItinClass;
+
def HexagonItinerariesV4 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
- InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>]>,
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ // ALU32
+ InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU32_2op_tc_2early_SLOT0123,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU32_3op_tc_2early_SLOT0123,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+
+ // ALU64
+ InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64_tc_1or2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+
+ // CR -> System
+ InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>]>,
+ InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>]>,
+ InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>]>,
+
+ // Jump (conditional/unconditional/return etc)
+ // CR
+ InstrItinData<CR_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ // J
+ InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ // JR
+ InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>]>,
+
+ //Load
+ InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<LD_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+
+ // M
+ InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<M_tc_1or2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+
+ // Store
+ // ST
+ InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ // ST0
+ InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+
+ // S
+ InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_2op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_1or2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+
+ // SYS
+ InstrItinData<ST_tc_3stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+
+ // New Value Compare Jump
+ InstrItinData<NCJ_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+
+ // Mem ops - MEM_V4
+ InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+
+ InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
+
+ // ENDLOOP
+ InstrItinData<J_tc_2early_SLOT0123 , [InstrStage<1, [SLOT_ENDLOOP]>]>,
+
+ // Extender/PREFIX
+ InstrItinData<EXTENDER_tc_1_SLOT0123,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+
+ InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
InstrStage<1, [SLOT2, SLOT3]>]>
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
index c37bf9f..b5db997 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
@@ -11,16 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "hexagon-selectiondag-info"
#include "HexagonTargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "hexagon-selectiondag-info"
+
bool llvm::flag_aligned_memcpy;
-HexagonSelectionDAGInfo::HexagonSelectionDAGInfo(const HexagonTargetMachine
- &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+HexagonSelectionDAGInfo::HexagonSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
HexagonSelectionDAGInfo::~HexagonSelectionDAGInfo() {
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
index 31f278a..b40b303 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
@@ -18,21 +18,18 @@
namespace llvm {
-class HexagonTargetMachine;
-
class HexagonSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit HexagonSelectionDAGInfo(const HexagonTargetMachine &TM);
+ explicit HexagonSelectionDAGInfo(const DataLayout &DL);
~HexagonSelectionDAGInfo();
- virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const;
+ MachinePointerInfo SrcPtrInfo) const override;
};
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
index 5166f8e..247207f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
@@ -17,11 +17,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "xfer"
-
-#include "HexagonTargetMachine.h"
-#include "HexagonSubtarget.h"
#include "HexagonMachineFunctionInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonTargetObjectFile.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -33,32 +32,33 @@
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include <map>
using namespace llvm;
+#define DEBUG_TYPE "xfer"
+
namespace {
class HexagonSplitConst32AndConst64 : public MachineFunctionPass {
- const HexagonTargetMachine& QTM;
- const HexagonSubtarget &QST;
+ const HexagonTargetMachine &QTM;
public:
static char ID;
- HexagonSplitConst32AndConst64(const HexagonTargetMachine& TM)
- : MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {}
+ HexagonSplitConst32AndConst64(const HexagonTargetMachine &TM)
+ : MachineFunctionPass(ID), QTM(TM) {}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Split Const32s and Const64s";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
@@ -67,6 +67,12 @@ char HexagonSplitConst32AndConst64::ID = 0;
bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
+ const HexagonTargetObjectFile &TLOF =
+ (const HexagonTargetObjectFile &)
+ QTM.getTargetLowering()->getObjFileLowering();
+ if (TLOF.IsSmallDataEnabled())
+ return true;
+
const TargetInstrInfo *TII = QTM.getInstrInfo();
// Loop over all of the basic blocks
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
index 8608e08..9601090 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
@@ -26,7 +26,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "xfer"
#include "Hexagon.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonSubtarget.h"
@@ -49,6 +48,8 @@
using namespace llvm;
+#define DEBUG_TYPE "xfer"
+
namespace llvm {
void initializeHexagonSplitTFRCondSetsPass(PassRegistry&);
}
@@ -67,10 +68,10 @@ class HexagonSplitTFRCondSets : public MachineFunctionPass {
initializeHexagonSplitTFRCondSetsPass(*PassRegistry::getPassRegistry());
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Split TFRCondSets";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
@@ -221,7 +222,8 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
static void initializePassOnce(PassRegistry &Registry) {
const char *Name = "Hexagon Split TFRCondSets";
PassInfo *PI = new PassInfo(Name, "hexagon-split-tfr",
- &HexagonSplitTFRCondSets::ID, 0, false, false);
+ &HexagonSplitTFRCondSets::ID, nullptr, false,
+ false);
Registry.registerPass(*PI, true);
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index fca6707..657893f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -18,6 +18,8 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
+#define DEBUG_TYPE "hexagon-subtarget"
+
#define GET_SUBTARGETINFO_CTOR
#define GET_SUBTARGETINFO_TARGET_DESC
#include "HexagonGenSubtargetInfo.inc"
@@ -46,10 +48,8 @@ EnableIEEERndNear(
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Generate non-chopped conversion from fp to int."));
-HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
- HexagonGenSubtargetInfo(TT, CPU, FS),
- CPUString(CPU.str()) {
-
+HexagonSubtarget &
+HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
// If the programmer has not specified a Hexagon version, default to -mv4.
if (CPUString.empty())
CPUString = "hexagonv4";
@@ -68,6 +68,15 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
}
ParseSubtargetFeatures(CPUString, FS);
+ return *this;
+}
+
+HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
+ const TargetMachine &TM)
+ : HexagonGenSubtargetInfo(TT, CPU, FS), CPUString(CPU.str()),
+ DL("e-m:e-p:32:32-i1:32-i64:64-a:0-n32"),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
+ TSInfo(DL), FrameLowering() {
// Initialize scheduling itinerary for the specified CPU.
InstrItins = getInstrItineraryForCPU(CPUString);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 690bef0..b184e62 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -14,6 +14,11 @@
#ifndef Hexagon_SUBTARGET_H
#define Hexagon_SUBTARGET_H
+#include "HexagonFrameLowering.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonISelLowering.h"
+#include "HexagonSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -28,6 +33,7 @@ namespace llvm {
class HexagonSubtarget : public HexagonGenSubtargetInfo {
virtual void anchor();
+
bool UseMemOps;
bool ModeIEEERndNear;
@@ -37,16 +43,35 @@ public:
};
HexagonArchEnum HexagonArchVersion;
+private:
std::string CPUString;
+ const DataLayout DL; // Calculates type size & alignment.
+ HexagonInstrInfo InstrInfo;
+ HexagonTargetLowering TLInfo;
+ HexagonSelectionDAGInfo TSInfo;
+ HexagonFrameLowering FrameLowering;
InstrItineraryData InstrItins;
public:
- HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS);
+ HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
+ const TargetMachine &TM);
/// getInstrItins - Return the instruction itineraies based on subtarget
/// selection.
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
+ const HexagonInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const HexagonRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const HexagonTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const HexagonFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ const HexagonSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ HexagonSubtarget &initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index bb950a0..7831410 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -52,7 +52,7 @@ extern "C" void LLVMInitializeHexagonTarget() {
}
static ScheduleDAGInstrs *createVLIWMachineSched(MachineSchedContext *C) {
- return new VLIWMachineScheduler(C, new ConvergingVLIWScheduler());
+ return new VLIWMachineScheduler(C, make_unique<ConvergingVLIWScheduler>());
}
static MachineSchedRegistry
@@ -67,35 +67,13 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM,
- CodeModel::Model CM,
+ Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- DL("e-p:32:32:32-"
- "i64:64:64-i32:32:32-i16:16:16-i1:32:32-"
- "f64:64:64-f32:32:32-a0:0-n32") ,
- Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
- TSInfo(*this),
- FrameLowering(Subtarget),
- InstrItins(&Subtarget.getInstrItineraryData()) {
- setMCUseCFI(false);
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
-// addPassesForOptimizations - Allow the backend (target) to add Target
-// Independent Optimization passes to the Pass Manager.
-bool HexagonTargetMachine::addPassesForOptimizations(PassManagerBase &PM) {
- if (getOptLevel() != CodeGenOpt::None) {
- PM.add(createConstantPropagationPass());
- PM.add(createLoopSimplifyPass());
- PM.add(createDeadCodeEliminationPass());
- PM.add(createConstantPropagationPass());
- PM.add(createLoopUnrollPass());
- PM.add(createLoopStrengthReducePass());
- }
- return true;
-}
-
namespace {
/// Hexagon Code Generator Pass Configuration Options.
class HexagonPassConfig : public TargetPassConfig {
@@ -116,16 +94,16 @@ public:
return getTM<HexagonTargetMachine>();
}
- virtual ScheduleDAGInstrs *
- createMachineScheduler(MachineSchedContext *C) const {
+ ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const override {
return createVLIWMachineSched(C);
}
- virtual bool addInstSelector();
- virtual bool addPreRegAlloc();
- virtual bool addPostRegAlloc();
- virtual bool addPreSched2();
- virtual bool addPreEmitPass();
+ bool addInstSelector() override;
+ bool addPreRegAlloc() override;
+ bool addPostRegAlloc() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // namespace
@@ -167,16 +145,12 @@ bool HexagonPassConfig::addPostRegAlloc() {
bool HexagonPassConfig::addPreSched2() {
const HexagonTargetMachine &TM = getHexagonTargetMachine();
- const HexagonTargetObjectFile &TLOF =
- (const HexagonTargetObjectFile &)getTargetLowering()->getObjFileLowering();
addPass(createHexagonCopyToCombine());
if (getOptLevel() != CodeGenOpt::None)
addPass(&IfConverterID);
- if (!TLOF.IsSmallDataEnabled()) {
- addPass(createHexagonSplitConst32AndConst64(TM));
- printAndVerify("After hexagon split const32/64 pass");
- }
+ addPass(createHexagonSplitConst32AndConst64(TM));
+ printAndVerify("After hexagon split const32/64 pass");
return true;
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
index cf8f9aa..d88178e 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -14,12 +14,8 @@
#ifndef HexagonTARGETMACHINE_H
#define HexagonTARGETMACHINE_H
-#include "HexagonFrameLowering.h"
-#include "HexagonISelLowering.h"
#include "HexagonInstrInfo.h"
-#include "HexagonSelectionDAGInfo.h"
#include "HexagonSubtarget.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -27,13 +23,7 @@ namespace llvm {
class Module;
class HexagonTargetMachine : public LLVMTargetMachine {
- const DataLayout DL; // Calculates type size & alignment.
HexagonSubtarget Subtarget;
- HexagonInstrInfo InstrInfo;
- HexagonTargetLowering TLInfo;
- HexagonSelectionDAGInfo TSInfo;
- HexagonFrameLowering FrameLowering;
- const InstrItineraryData* InstrItins;
public:
HexagonTargetMachine(const Target &T, StringRef TT,StringRef CPU,
@@ -41,39 +31,33 @@ public:
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
- virtual const HexagonInstrInfo *getInstrInfo() const {
- return &InstrInfo;
+ const HexagonInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const HexagonSubtarget *getSubtargetImpl() const {
+ const HexagonSubtarget *getSubtargetImpl() const override {
return &Subtarget;
}
- virtual const HexagonRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
+ const HexagonRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
}
-
- virtual const InstrItineraryData* getInstrItineraryData() const {
- return InstrItins;
+ const InstrItineraryData* getInstrItineraryData() const override {
+ return &getSubtargetImpl()->getInstrItineraryData();
}
-
-
- virtual const HexagonTargetLowering* getTargetLowering() const {
- return &TLInfo;
+ const HexagonTargetLowering* getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
-
- virtual const HexagonFrameLowering* getFrameLowering() const {
- return &FrameLowering;
+ const HexagonFrameLowering* getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
-
- virtual const HexagonSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
+ const HexagonSelectionDAGInfo* getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
-
- virtual const DataLayout *getDataLayout() const { return &DL; }
static unsigned getModuleMatchQuality(const Module &M);
- // Pass Pipeline Configuration.
- virtual bool addPassesForOptimizations(PassManagerBase &PM);
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
};
extern bool flag_aligned_memcpy;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
index 7773cff..c97526e 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -85,9 +85,10 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
return false;
}
-const MCSection *HexagonTargetObjectFile::
-SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+const MCSection *
+HexagonTargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
// Handle Small Section classification here.
if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind))
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h
index 41f6792..1bd1272 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h
@@ -19,7 +19,7 @@ namespace llvm {
const MCSectionELF *SmallDataSection;
const MCSectionELF *SmallBSSSection;
public:
- virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
/// IsGlobalInSmallSection - Return true if this global address should be
/// placed into small data/bss section.
@@ -30,10 +30,9 @@ namespace llvm {
const TargetMachine &TM) const;
bool IsSmallDataEnabled () const;
- const MCSection* SelectSectionForGlobal(const GlobalValue *GV,
- SectionKind Kind,
- Mangler *Mang,
- const TargetMachine &TM) const;
+ const MCSection *SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const override;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 41e382d..87ce960 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -16,42 +16,42 @@
// prune the dependence.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "packets"
#include "llvm/CodeGen/DFAPacketizer.h"
-#include "llvm/CodeGen/Passes.h"
+#include "Hexagon.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
-#include "llvm/CodeGen/LatencyPriorityQueue.h"
-#include "llvm/CodeGen/SchedulerRegistry.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/MathExtras.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/MC/MCInstrItineraries.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "Hexagon.h"
-#include "HexagonTargetMachine.h"
-#include "HexagonRegisterInfo.h"
-#include "HexagonSubtarget.h"
-#include "HexagonMachineFunctionInfo.h"
-
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include <map>
#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "packets"
+
static cl::opt<bool> PacketizeVolatiles("hexagon-packetize-volatiles",
cl::ZeroOrMore, cl::Hidden, cl::init(true),
cl::desc("Allow non-solo packetization of volatile memory references"));
@@ -70,7 +70,7 @@ namespace {
initializeHexagonPacketizerPass(*PassRegistry::getPassRegistry());
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineBranchProbabilityInfo>();
@@ -80,11 +80,11 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Packetizer";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
char HexagonPacketizer::ID = 0;
@@ -122,24 +122,25 @@ namespace {
const MachineBranchProbabilityInfo *MBPI);
// initPacketizerState - initialize some internal flags.
- void initPacketizerState();
+ void initPacketizerState() override;
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
- bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB);
+ bool ignorePseudoInstruction(MachineInstr *MI,
+ MachineBasicBlock *MBB) override;
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
- bool isSoloInstruction(MachineInstr *MI);
+ bool isSoloInstruction(MachineInstr *MI) override;
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
- bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ);
+ bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override;
// isLegalToPruneDependencies - Is it legal to prune dependece between SUI
// and SUJ.
- bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ);
+ bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override;
- MachineBasicBlock::iterator addToPacket(MachineInstr *MI);
+ MachineBasicBlock::iterator addToPacket(MachineInstr *MI) override;
private:
bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType, unsigned DepReg);
bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType,
@@ -238,20 +239,20 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) {
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
for(;I != MBB->begin(); --I, --RemainingCount) {
- if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
+ if (TII->isSchedulingBoundary(std::prev(I), MBB, Fn))
break;
}
I = MBB->begin();
// Skip empty scheduling regions.
if (I == RegionEnd) {
- RegionEnd = llvm::prior(RegionEnd);
+ RegionEnd = std::prev(RegionEnd);
--RemainingCount;
continue;
}
// Skip regions with one instruction.
- if (I == llvm::prior(RegionEnd)) {
- RegionEnd = llvm::prior(RegionEnd);
+ if (I == std::prev(RegionEnd)) {
+ RegionEnd = std::prev(RegionEnd);
continue;
}
@@ -391,7 +392,7 @@ static bool IsLoopN(MachineInstr *MI) {
/// callee-saved register.
static bool DoesModifyCalleeSavedReg(MachineInstr *MI,
const TargetRegisterInfo *TRI) {
- for (const uint16_t *CSR = TRI->getCalleeSavedRegs(); *CSR; ++CSR) {
+ for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(); *CSR; ++CSR) {
unsigned CalleeSavedReg = *CSR;
if (MI->modifiesRegister(CalleeSavedReg, TRI))
return true;
@@ -604,7 +605,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
// evaluate identically
unsigned predRegNumSrc = 0;
unsigned predRegNumDst = 0;
- const TargetRegisterClass* predRegClass = NULL;
+ const TargetRegisterClass* predRegClass = nullptr;
// Get predicate register used in the source instruction
for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) {
@@ -681,7 +682,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
}
}
- // Make sure that for non POST_INC stores:
+ // Make sure that for non-POST_INC stores:
// 1. The only use of reg is DepReg and no other registers.
// This handles V4 base+index registers.
// The following store can not be dot new.
@@ -1173,7 +1174,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// of that (IsCallDependent) function. Bug 6216 is opened for this.
//
unsigned DepReg = 0;
- const TargetRegisterClass* RC = NULL;
+ const TargetRegisterClass* RC = nullptr;
if (DepType == SDep::Data) {
DepReg = SUJ->Succs[i].getReg();
RC = QRI->getMinimalPhysRegClass(DepReg);
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h b/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
index c607b5d..668ca98 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
@@ -41,7 +41,7 @@ static bool CC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
}
- // Only assign registers for named (non varargs) arguments
+ // Only assign registers for named (non-varargs) arguments
if ( !ForceMem && ((NonVarArgsParams == -1) || (CurrentParam <=
NonVarArgsParams))) {
diff --git a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
index 7c41507..9942a60 100644
--- a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
@@ -11,19 +11,20 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "HexagonAsmPrinter.h"
#include "Hexagon.h"
#include "HexagonInstPrinter.h"
#include "MCTargetDesc/HexagonMCInst.h"
-#include "llvm/MC/MCInst.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#define GET_INSTRUCTION_NAME
#include "HexagonGenAsmWriter.inc"
diff --git a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
index d0cef68..09e3f88 100644
--- a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
+++ b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
@@ -27,7 +27,7 @@ namespace llvm {
const MCRegisterInfo &MRI)
: MCInstPrinter(MAI, MII, MRI), MII(MII) {}
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot);
virtual StringRef getOpcodeName(unsigned Opcode) const;
void printInstruction(const MCInst *MI, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index 8519cf3..f8be77c 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -87,70 +87,82 @@ namespace HexagonII {
// Solo instructions.
SoloPos = 5,
SoloMask = 0x1,
+ // Packed only with A or X-type instructions.
+ SoloAXPos = 6,
+ SoloAXMask = 0x1,
+ // Only A-type instruction in first slot or nothing.
+ SoloAin1Pos = 7,
+ SoloAin1Mask = 0x1,
// Predicated instructions.
- PredicatedPos = 6,
+ PredicatedPos = 8,
PredicatedMask = 0x1,
- PredicatedFalsePos = 7,
+ PredicatedFalsePos = 9,
PredicatedFalseMask = 0x1,
- PredicatedNewPos = 8,
+ PredicatedNewPos = 10,
PredicatedNewMask = 0x1,
+ PredicateLatePos = 11,
+ PredicateLateMask = 0x1,
// New-Value consumer instructions.
- NewValuePos = 9,
+ NewValuePos = 12,
NewValueMask = 0x1,
-
// New-Value producer instructions.
- hasNewValuePos = 10,
+ hasNewValuePos = 13,
hasNewValueMask = 0x1,
-
// Which operand consumes or produces a new value.
- NewValueOpPos = 11,
+ NewValueOpPos = 14,
NewValueOpMask = 0x7,
-
- // Which bits encode the new value.
- NewValueBitsPos = 14,
- NewValueBitsMask = 0x3,
-
// Stores that can become new-value stores.
- mayNVStorePos = 16,
+ mayNVStorePos = 17,
mayNVStoreMask = 0x1,
-
// New-value store instructions.
- NVStorePos = 17,
+ NVStorePos = 18,
NVStoreMask = 0x1,
+ // Loads that can become current-value loads.
+ mayCVLoadPos = 19,
+ mayCVLoadMask = 0x1,
+ // Current-value load instructions.
+ CVLoadPos = 20,
+ CVLoadMask = 0x1,
// Extendable insns.
- ExtendablePos = 18,
+ ExtendablePos = 21,
ExtendableMask = 0x1,
-
// Insns must be extended.
- ExtendedPos = 19,
+ ExtendedPos = 22,
ExtendedMask = 0x1,
-
// Which operand may be extended.
- ExtendableOpPos = 20,
+ ExtendableOpPos = 23,
ExtendableOpMask = 0x7,
-
// Signed or unsigned range.
- ExtentSignedPos = 23,
+ ExtentSignedPos = 26,
ExtentSignedMask = 0x1,
-
// Number of bits of range before extending operand.
- ExtentBitsPos = 24,
+ ExtentBitsPos = 27,
ExtentBitsMask = 0x1f,
+ // Alignment power-of-two before extending operand.
+ ExtentAlignPos = 32,
+ ExtentAlignMask = 0x3,
// Valid subtargets
- validSubTargetPos = 29,
+ validSubTargetPos = 34,
validSubTargetMask = 0xf,
// Addressing mode for load/store instructions.
- AddrModePos = 33,
+ AddrModePos = 40,
AddrModeMask = 0x7,
+ // Access size for load/store instructions.
+ MemAccessSizePos = 43,
+ MemAccesSizeMask = 0x7,
+
+ // Branch predicted taken.
+ TakenPos = 47,
+ TakenMask = 0x1,
- // Access size of memory access instructions (load/store).
- MemAccessSizePos = 36,
- MemAccesSizeMask = 0x7
+ // Floating-point instructions.
+ FPPos = 48,
+ FPMask = 0x1
};
// *** The code above must match HexagonInstrFormat*.td *** //
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
index 3f9415b..141e514 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
@@ -21,12 +21,11 @@ void HexagonMCAsmInfo::anchor() {}
HexagonMCAsmInfo::HexagonMCAsmInfo(StringRef TT) {
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
- Data64bitsDirective = 0; // .xword is only supported by V9.
+ Data64bitsDirective = nullptr; // .xword is only supported by V9.
ZeroDirective = "\t.skip\t";
CommentString = "//";
HasLEB128 = true;
- PrivateGlobalPrefix = ".L";
LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment;
InlineAsmStart = "# InlineAsm Start";
InlineAsmEnd = "# InlineAsm End";
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
index bd8cb76..953d804 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
@@ -19,7 +19,7 @@
namespace llvm {
class HexagonMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
explicit HexagonMCAsmInfo(StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h
index 3ca71f0..3c52d456 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInst.h
@@ -31,7 +31,7 @@ namespace llvm {
public:
explicit HexagonMCInst():
- MCInst(), MCID(0), packetStart(0), packetEnd(0) {};
+ MCInst(), MCID(nullptr), packetStart(0), packetEnd(0) {};
HexagonMCInst(const MCInstrDesc& mcid):
MCInst(), MCID(&mcid), packetStart(0), packetEnd(0) {};
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 2f93a52..581674d 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -14,15 +14,17 @@
#include "HexagonMCTargetDesc.h"
#include "HexagonMCAsmInfo.h"
#include "InstPrinter/HexagonInstPrinter.h"
-#include "llvm/MC/MachineLocation.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "HexagonGenInstrInfo.inc"
@@ -32,8 +34,6 @@
#define GET_REGINFO_MC_DESC
#include "HexagonGenRegisterInfo.inc"
-using namespace llvm;
-
static MCInstrInfo *createHexagonMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitHexagonMCInstrInfo(X);
@@ -60,7 +60,7 @@ static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI,
// VirtualFP = (R30 + #0).
MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(
- 0, Hexagon::R30, 0);
+ nullptr, Hexagon::R30, 0);
MAI->addInitialFrameState(Inst);
return MAI;
diff --git a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
index 4b12aea..acf1214 100644
--- a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "MSP430InstPrinter.h"
#include "MSP430.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -21,6 +20,8 @@
#include "llvm/Support/FormattedStream.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
// Include the auto-generated portion of the assembly writer.
#include "MSP430GenAsmWriter.inc"
@@ -44,7 +45,7 @@ void MSP430InstPrinter::printPCRelImmOperand(const MCInst *MI, unsigned OpNo,
void MSP430InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O, const char *Modifier) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ assert((Modifier == nullptr || Modifier[0] == 0) && "No modifiers supported");
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
O << getRegisterName(Op.getReg());
diff --git a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
index d32eb3a..5afbd20 100644
--- a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
@@ -25,17 +25,17 @@ namespace llvm {
const MCRegisterInfo &MRI)
: MCInstPrinter(MAI, MII, MRI) {}
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printPCRelImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printSrcMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printCCOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
index acf2ab8..df1aa1a 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
@@ -20,7 +20,6 @@ void MSP430MCAsmInfo::anchor() { }
MSP430MCAsmInfo::MSP430MCAsmInfo(StringRef TT) {
PointerSize = CalleeSaveStackSlotSize = 2;
- PrivateGlobalPrefix = ".L";
CommentString = ";";
AlignmentIsInBytes = false;
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
index a7e0e58..ef805bb 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
@@ -20,7 +20,7 @@ namespace llvm {
class StringRef;
class MSP430MCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
explicit MSP430MCAsmInfo(StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
index 530e6aa..72adb45 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
@@ -20,6 +20,8 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "MSP430GenInstrInfo.inc"
@@ -29,8 +31,6 @@
#define GET_REGINFO_MC_DESC
#include "MSP430GenRegisterInfo.inc"
-using namespace llvm;
-
static MCInstrInfo *createMSP430MCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitMSP430MCInstrInfo(X);
@@ -66,7 +66,7 @@ static MCInstPrinter *createMSP430MCInstPrinter(const Target &T,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
return new MSP430InstPrinter(MAI, MII, MRI);
- return 0;
+ return nullptr;
}
extern "C" void LLVMInitializeMSP430TargetMC() {
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430.td b/contrib/llvm/lib/Target/MSP430/MSP430.td
index c6796b3..dfea669 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430.td
@@ -50,17 +50,11 @@ include "MSP430InstrInfo.td"
def MSP430InstrInfo : InstrInfo;
-def MSP430InstPrinter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
//===----------------------------------------------------------------------===//
// Target Declaration
//===----------------------------------------------------------------------===//
def MSP430 : Target {
let InstructionSet = MSP430InstrInfo;
- let AssemblyWriters = [MSP430InstPrinter];
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
index 18311c3..22a973e 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
@@ -12,13 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "MSP430.h"
#include "InstPrinter/MSP430InstPrinter.h"
#include "MSP430InstrInfo.h"
#include "MSP430MCInstLower.h"
#include "MSP430TargetMachine.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -26,6 +24,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
@@ -33,30 +32,31 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
namespace {
class MSP430AsmPrinter : public AsmPrinter {
public:
MSP430AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MSP430 Assembly Printer";
}
void printOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O, const char* Modifier = 0);
+ raw_ostream &O, const char* Modifier = nullptr);
void printSrcMemOperand(const MachineInstr *MI, int OpNum,
raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
bool PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned OpNo, unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O);
- void EmitInstruction(const MachineInstr *MI);
+ const char *ExtraCode, raw_ostream &O) override;
+ void EmitInstruction(const MachineInstr *MI) override;
};
} // end of anonymous namespace
@@ -99,12 +99,6 @@ void MSP430AsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
return;
}
- case MachineOperand::MO_ExternalSymbol: {
- bool isMemOp = Modifier && !strcmp(Modifier, "mem");
- O << (isMemOp ? '&' : '#');
- O << MAI->getGlobalPrefix() << MO.getSymbolName();
- return;
- }
}
}
@@ -158,7 +152,7 @@ void MSP430AsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
}
// Force static initialization.
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp b/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp
index f128427..a96930a 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "msp430-branch-select"
#include "MSP430.h"
#include "MSP430InstrInfo.h"
#include "llvm/ADT/Statistic.h"
@@ -25,6 +24,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "msp430-branch-select"
+
STATISTIC(NumExpanded, "Number of branches expanded to long format");
namespace {
@@ -35,9 +36,9 @@ namespace {
/// BlockSizes - The sizes of the basic blocks in the function.
std::vector<unsigned> BlockSizes;
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MSP430 Branch Selector";
}
};
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
index e504011..82c8b29 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -71,7 +71,7 @@ void MSP430FrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(MSP430::SPW);
// Mark the FramePtr as live-in in every block except the entry.
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
+ for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
I != E; ++I)
I->addLiveIn(MSP430::FPW);
@@ -138,7 +138,7 @@ void MSP430FrameLowering::emitEpilogue(MachineFunction &MF,
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
unsigned Opc = PI->getOpcode();
if (Opc != MSP430::POP16r && !PI->isTerminator())
break;
@@ -242,7 +242,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// alignment boundary.
Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
- MachineInstr *New = 0;
+ MachineInstr *New = nullptr;
if (Old->getOpcode() == TII.getCallFrameSetupOpcode()) {
New = BuildMI(MF, Old->getDebugLoc(),
TII.get(MSP430::SUB16ri), MSP430::SPW)
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
index 8370714..fadfeed 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
@@ -15,43 +15,38 @@
#define MSP430_FRAMEINFO_H
#include "MSP430.h"
-#include "MSP430Subtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
- class MSP430Subtarget;
-
class MSP430FrameLowering : public TargetFrameLowering {
protected:
- const MSP430Subtarget &STI;
public:
- explicit MSP430FrameLowering(const MSP430Subtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2, 2),
- STI(sti) {}
+ explicit MSP430FrameLowering()
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2, 2) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
- bool hasFP(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 4152829..a9b9035 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -31,6 +31,8 @@
#include "llvm/Target/TargetLowering.h"
using namespace llvm;
+#define DEBUG_TYPE "msp430-isel"
+
namespace {
struct MSP430ISelAddressMode {
enum {
@@ -52,17 +54,17 @@ namespace {
unsigned Align; // CP alignment.
MSP430ISelAddressMode()
- : BaseType(RegBase), Disp(0), GV(0), CP(0), BlockAddr(0),
- ES(0), JT(-1), Align(0) {
+ : BaseType(RegBase), Disp(0), GV(nullptr), CP(nullptr),
+ BlockAddr(nullptr), ES(nullptr), JT(-1), Align(0) {
}
bool hasSymbolicDisplacement() const {
- return GV != 0 || CP != 0 || ES != 0 || JT != -1;
+ return GV != nullptr || CP != nullptr || ES != nullptr || JT != -1;
}
void dump() {
errs() << "MSP430ISelAddressMode " << this << '\n';
- if (BaseType == RegBase && Base.Reg.getNode() != 0) {
+ if (BaseType == RegBase && Base.Reg.getNode() != nullptr) {
errs() << "Base.Reg ";
Base.Reg.getNode()->dump();
} else if (BaseType == FrameIndexBase) {
@@ -99,7 +101,7 @@ namespace {
Lowering(*TM.getTargetLowering()),
Subtarget(*TM.getSubtargetImpl()) { }
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MSP430 DAG->DAG Pattern Instruction Selection";
}
@@ -107,15 +109,14 @@ namespace {
bool MatchWrapper(SDValue N, MSP430ISelAddressMode &AM);
bool MatchAddressBase(SDValue N, MSP430ISelAddressMode &AM);
- virtual bool
- SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
// Include the pieces autogenerated from the target description.
#include "MSP430GenDAGISel.inc"
private:
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
SDNode *SelectIndexedLoad(SDNode *Op);
SDNode *SelectIndexedBinOp(SDNode *Op, SDValue N1, SDValue N2,
unsigned Opc8, unsigned Opc16);
@@ -199,7 +200,7 @@ bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) {
case ISD::FrameIndex:
if (AM.BaseType == MSP430ISelAddressMode::RegBase
- && AM.Base.Reg.getNode() == 0) {
+ && AM.Base.Reg.getNode() == nullptr) {
AM.BaseType = MSP430ISelAddressMode::FrameIndexBase;
AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
return false;
@@ -228,7 +229,7 @@ bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) {
// Start with the LHS as an addr mode.
if (!MatchAddress(N.getOperand(0), AM) &&
// Address could not have picked a GV address for the displacement.
- AM.GV == NULL &&
+ AM.GV == nullptr &&
// Check to see if the LHS & C is zero.
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
AM.Disp += Offset;
@@ -330,7 +331,7 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) {
SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
if (!isValidIndexedLoad(LD))
- return NULL;
+ return nullptr;
MVT VT = LD->getMemoryVT().getSimpleVT();
@@ -343,7 +344,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) {
Opcode = MSP430::MOV16rm_POST;
break;
default:
- return NULL;
+ return nullptr;
}
return CurDAG->getMachineNode(Opcode, SDLoc(N),
@@ -359,7 +360,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op,
IsLegalToFold(N1, Op, Op, OptLevel)) {
LoadSDNode *LD = cast<LoadSDNode>(N1);
if (!isValidIndexedLoad(LD))
- return NULL;
+ return nullptr;
MVT VT = LD->getMemoryVT().getSimpleVT();
unsigned Opc = (VT == MVT::i16 ? Opc16 : Opc8);
@@ -367,9 +368,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op,
MemRefs0[0] = cast<MemSDNode>(N1)->getMemOperand();
SDValue Ops0[] = { N2, LD->getBasePtr(), LD->getChain() };
SDNode *ResNode =
- CurDAG->SelectNodeTo(Op, Opc,
- VT, MVT::i16, MVT::Other,
- Ops0, 3);
+ CurDAG->SelectNodeTo(Op, Opc, VT, MVT::i16, MVT::Other, Ops0);
cast<MachineSDNode>(ResNode)->setMemRefs(MemRefs0, MemRefs0 + 1);
// Transfer chain.
ReplaceUses(SDValue(N1.getNode(), 2), SDValue(ResNode, 2));
@@ -378,7 +377,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op,
return ResNode;
}
- return NULL;
+ return nullptr;
}
@@ -396,7 +395,7 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
Node->dump(CurDAG);
errs() << "\n");
Node->setNodeId(-1);
- return NULL;
+ return nullptr;
}
// Few custom selection stuff.
@@ -484,7 +483,7 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
SDNode *ResNode = SelectCode(Node);
DEBUG(errs() << "=> ");
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
DEBUG(Node->dump(CurDAG));
else
DEBUG(ResNode->dump(CurDAG));
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 745cdf5..3d3ee92 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "msp430-lower"
-
#include "MSP430ISelLowering.h"
#include "MSP430.h"
#include "MSP430MachineFunctionInfo.h"
@@ -38,6 +36,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "msp430-lower"
+
typedef enum {
NoHWMult,
HWMultIntr,
@@ -57,11 +57,8 @@ HWMultMode("msp430-hwmult-mode", cl::Hidden,
"Assume hardware multiplier cannot be used inside interrupts"),
clEnumValEnd));
-MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
- TargetLowering(tm, new TargetLoweringObjectFileELF()),
- Subtarget(*tm.getSubtargetImpl()) {
-
- TD = getDataLayout();
+MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM)
+ : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the register classes.
addRegisterClass(MVT::i8, &MSP430::GR8RegClass);
@@ -284,7 +281,7 @@ template<typename ArgT>
static void AnalyzeArguments(CCState &State,
SmallVectorImpl<CCValAssign> &ArgLocs,
const SmallVectorImpl<ArgT> &Args) {
- static const uint16_t RegList[] = {
+ static const MCPhysReg RegList[] = {
MSP430::R15W, MSP430::R14W, MSP430::R13W, MSP430::R12W
};
static const unsigned NbRegs = array_lengthof(RegList);
@@ -462,7 +459,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
errs() << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getSimpleVT().SimpleTy << "\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
case MVT::i16:
unsigned VReg = RegInfo.createVirtualRegister(&MSP430::GR16RegClass);
@@ -568,7 +565,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(Opc, dl, MVT::Other, &RetOps[0], RetOps.size());
+ return DAG.getNode(Opc, dl, MVT::Other, RetOps);
}
/// LowerCCCCallTo - functions arguments are copied from virtual regs to
@@ -629,7 +626,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
} else {
assert(VA.isMemLoc());
- if (StackPtr.getNode() == 0)
+ if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, MSP430::SPW, getPointerTy());
SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(),
@@ -659,8 +656,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because all store nodes are
// independent of each other.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Build a sequence of copy-to-reg nodes chained together with token chain and
// flag operands which copy the outgoing args into registers. The InFlag in
@@ -695,7 +691,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
if (InFlag.getNode())
Ops.push_back(InFlag);
- Chain = DAG.getNode(MSP430ISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(MSP430ISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
@@ -986,7 +982,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
Ops.push_back(Zero);
Ops.push_back(TargetCC);
Ops.push_back(Flag);
- return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, &Ops[0], Ops.size());
+ return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, Ops);
}
}
@@ -1009,7 +1005,7 @@ SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op,
Ops.push_back(TargetCC);
Ops.push_back(Flag);
- return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, &Ops[0], Ops.size());
+ return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, Ops);
}
SDValue MSP430TargetLowering::LowerSIGN_EXTEND(SDValue Op,
@@ -1033,7 +1029,7 @@ MSP430TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
- uint64_t SlotSize = TD->getPointerSize();
+ uint64_t SlotSize = getDataLayout()->getPointerSize();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
true);
FuncInfo->setRAIndex(ReturnAddrIndex);
@@ -1047,13 +1043,16 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDLoc dl(Op);
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
- DAG.getConstant(TD->getPointerSize(), MVT::i16);
+ DAG.getConstant(getDataLayout()->getPointerSize(), MVT::i16);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
@@ -1145,7 +1144,7 @@ bool MSP430TargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
const char *MSP430TargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return NULL;
+ default: return nullptr;
case MSP430ISD::RET_FLAG: return "MSP430ISD::RET_FLAG";
case MSP430ISD::RETI_FLAG: return "MSP430ISD::RETI_FLAG";
case MSP430ISD::RRA: return "MSP430ISD::RRA";
@@ -1245,8 +1244,7 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
// Update machine-CFG edges by transferring all successors of the current
// block to the block containing instructions after shift.
- RemBB->splice(RemBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
+ RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
BB->end());
RemBB->transferSuccessorsAndUpdatePHIs(BB);
@@ -1341,8 +1339,7 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Update machine-CFG edges by transferring all successors of the current
// block to the new block which will contain the Phi node for the select.
copy1MBB->splice(copy1MBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
index 85a861e..3e2f344 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
@@ -66,21 +66,18 @@ namespace llvm {
};
}
- class MSP430Subtarget;
- class MSP430TargetMachine;
-
class MSP430TargetLowering : public TargetLowering {
public:
- explicit MSP430TargetLowering(MSP430TargetMachine &TM);
+ explicit MSP430TargetLowering(const TargetMachine &TM);
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; }
/// LowerOperation - Provide custom lowering hooks for some operations.
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// getTargetNodeName - This method returns the name of a target specific
/// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
SDValue LowerShifts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -97,15 +94,16 @@ namespace llvm {
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
TargetLowering::ConstraintType
- getConstraintType(const std::string &Constraint) const;
+ getConstraintType(const std::string &Constraint) const override;
std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT VT) const override;
/// isTruncateFree - Return true if it's free to truncate a value of type
/// Ty1 to type Ty2. e.g. On msp430 it's free to truncate a i16 value in
/// register R15W to i8 by referencing its sub-register R15B.
- virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
- virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
/// isZExtFree - Return true if any actual instruction that defines a value
/// of type Ty1 implicit zero-extends the value to Ty2 in the result
@@ -115,12 +113,12 @@ namespace llvm {
/// necessarily apply to truncate instructions. e.g. on msp430, all
/// instructions that define 8-bit values implicit zero-extend the result
/// out to 16 bits.
- virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
- virtual bool isZExtFree(EVT VT1, EVT VT2) const;
- virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+ bool isZExtFree(Type *Ty1, Type *Ty2) const override;
+ bool isZExtFree(EVT VT1, EVT VT2) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
MachineBasicBlock* EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const;
+ MachineBasicBlock *BB) const override;
MachineBasicBlock* EmitShiftInstr(MachineInstr *MI,
MachineBasicBlock *BB) const;
@@ -148,31 +146,27 @@ namespace llvm {
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
-
- virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
- SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
-
- const MSP430Subtarget &Subtarget;
- const DataLayout *TD;
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc dl, SelectionDAG &DAG) const override;
+
+ bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base,
+ SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index 7a0b00a..ccb6c09 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -22,17 +22,17 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#include "MSP430GenInstrInfo.inc"
-using namespace llvm;
-
// Pin the vtable to this file.
void MSP430InstrInfo::anchor() {}
-MSP430InstrInfo::MSP430InstrInfo(MSP430TargetMachine &tm)
+MSP430InstrInfo::MSP430InstrInfo(MSP430Subtarget &STI)
: MSP430GenInstrInfo(MSP430::ADJCALLSTACKDOWN, MSP430::ADJCALLSTACKUP),
- RI(tm) {}
+ RI() {}
void MSP430InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
@@ -205,14 +205,14 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// If the block has any instructions after a JMP, delete them.
- while (llvm::next(I) != MBB.end())
- llvm::next(I)->eraseFromParent();
+ while (std::next(I) != MBB.end())
+ std::next(I)->eraseFromParent();
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- TBB = 0;
+ TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
continue;
@@ -299,7 +299,7 @@ unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
default:
switch (Desc.getOpcode()) {
default: llvm_unreachable("Unknown instruction size!");
- case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::CFI_INSTRUCTION:
case TargetOpcode::EH_LABEL:
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index ad2b8cc..e6baaef 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -22,7 +22,7 @@
namespace llvm {
-class MSP430TargetMachine;
+class MSP430Subtarget;
/// MSP430II - This namespace holds all of the target specific flags that
/// instruction info tracks.
@@ -44,46 +44,47 @@ class MSP430InstrInfo : public MSP430GenInstrInfo {
const MSP430RegisterInfo RI;
virtual void anchor();
public:
- explicit MSP430InstrInfo(MSP430TargetMachine &TM);
+ explicit MSP430InstrInfo(MSP430Subtarget &STI);
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ const TargetRegisterInfo &getRegisterInfo() const { return RI; }
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill,
- int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill,
+ int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
// Branch folding goodness
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
- bool isUnpredicatedTerminator(const MachineInstr *MI) const;
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+ bool isUnpredicatedTerminator(const MachineInstr *MI) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
+ bool AllowModify) const override;
- unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
+ DebugLoc DL) const override;
};
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
index 52f9ee5..05352a2 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
@@ -17,13 +17,15 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
MCSymbol *MSP430MCInstLower::
@@ -48,8 +50,9 @@ GetExternalSymbolSymbol(const MachineOperand &MO) const {
MCSymbol *MSP430MCInstLower::
GetJumpTableSymbol(const MachineOperand &MO) const {
+ const DataLayout *DL = Printer.TM.getDataLayout();
SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI"
+ raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "JTI"
<< Printer.getFunctionNumber() << '_'
<< MO.getIndex();
@@ -64,8 +67,9 @@ GetJumpTableSymbol(const MachineOperand &MO) const {
MCSymbol *MSP430MCInstLower::
GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
+ const DataLayout *DL = Printer.TM.getDataLayout();
SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI"
+ raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "CPI"
<< Printer.getFunctionNumber() << '_'
<< MO.getIndex();
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 1a5e312..691bcee 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "msp430-reg-info"
-
#include "MSP430RegisterInfo.h"
#include "MSP430.h"
#include "MSP430MachineFunctionInfo.h"
@@ -26,38 +24,38 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "msp430-reg-info"
+
#define GET_REGINFO_TARGET_DESC
#include "MSP430GenRegisterInfo.inc"
-using namespace llvm;
-
// FIXME: Provide proper call frame setup / destroy opcodes.
-MSP430RegisterInfo::MSP430RegisterInfo(MSP430TargetMachine &tm)
- : MSP430GenRegisterInfo(MSP430::PCW), TM(tm) {
- StackAlign = TM.getFrameLowering()->getStackAlignment();
-}
+MSP430RegisterInfo::MSP430RegisterInfo()
+ : MSP430GenRegisterInfo(MSP430::PCW) {}
-const uint16_t*
+const MCPhysReg*
MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const TargetFrameLowering *TFI = MF->getTarget().getFrameLowering();
const Function* F = MF->getFunction();
- static const uint16_t CalleeSavedRegs[] = {
+ static const MCPhysReg CalleeSavedRegs[] = {
MSP430::FPW, MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
0
};
- static const uint16_t CalleeSavedRegsFP[] = {
+ static const MCPhysReg CalleeSavedRegsFP[] = {
MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
0
};
- static const uint16_t CalleeSavedRegsIntr[] = {
+ static const MCPhysReg CalleeSavedRegsIntr[] = {
MSP430::FPW, MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
MSP430::R12W, MSP430::R13W, MSP430::R14W, MSP430::R15W,
0
};
- static const uint16_t CalleeSavedRegsIntrFP[] = {
+ static const MCPhysReg CalleeSavedRegsIntrFP[] = {
MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
MSP430::R12W, MSP430::R13W, MSP430::R14W, MSP430::R15W,
@@ -88,8 +86,10 @@ BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(MSP430::CGW);
// Mark frame pointer as reserved if needed.
- if (TFI->hasFP(MF))
+ if (TFI->hasFP(MF)) {
+ Reserved.set(MSP430::FPB);
Reserved.set(MSP430::FPW);
+ }
return Reserved;
}
@@ -142,10 +142,10 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// We need to materialize the offset via add instruction.
unsigned DstReg = MI.getOperand(0).getReg();
if (Offset < 0)
- BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::SUB16ri), DstReg)
+ BuildMI(MBB, std::next(II), dl, TII.get(MSP430::SUB16ri), DstReg)
.addReg(DstReg).addImm(-Offset);
else
- BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::ADD16ri), DstReg)
+ BuildMI(MBB, std::next(II), dl, TII.get(MSP430::ADD16ri), DstReg)
.addReg(DstReg).addImm(Offset);
return;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
index 78047cc..cb01961 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -21,32 +21,25 @@
namespace llvm {
-class TargetInstrInfo;
-class MSP430TargetMachine;
-
struct MSP430RegisterInfo : public MSP430GenRegisterInfo {
-private:
- MSP430TargetMachine &TM;
-
- /// StackAlign - Default stack alignment.
- ///
- unsigned StackAlign;
public:
- MSP430RegisterInfo(MSP430TargetMachine &tm);
+ MSP430RegisterInfo();
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
const TargetRegisterClass*
- getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.cpp
index 24f45fa..3897ef6 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.cpp
@@ -11,13 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "msp430-selectiondag-info"
#include "MSP430TargetMachine.h"
using namespace llvm;
-MSP430SelectionDAGInfo::MSP430SelectionDAGInfo(const MSP430TargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+#define DEBUG_TYPE "msp430-selectiondag-info"
+
+MSP430SelectionDAGInfo::MSP430SelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
MSP430SelectionDAGInfo::~MSP430SelectionDAGInfo() {
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.h
index fa81948..cb04adc 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430SelectionDAGInfo.h
@@ -22,7 +22,7 @@ class MSP430TargetMachine;
class MSP430SelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit MSP430SelectionDAGInfo(const MSP430TargetMachine &TM);
+ explicit MSP430SelectionDAGInfo(const DataLayout &DL);
~MSP430SelectionDAGInfo();
};
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
index edeaf34..dbddc52 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
@@ -15,20 +15,25 @@
#include "MSP430.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "msp430-subtarget"
+
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "MSP430GenSubtargetInfo.inc"
-using namespace llvm;
-
void MSP430Subtarget::anchor() { }
-MSP430Subtarget::MSP430Subtarget(const std::string &TT,
- const std::string &CPU,
- const std::string &FS) :
- MSP430GenSubtargetInfo(TT, CPU, FS) {
- std::string CPUName = "generic";
-
- // Parse features string.
- ParseSubtargetFeatures(CPUName, FS);
+MSP430Subtarget &MSP430Subtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
+ ParseSubtargetFeatures("generic", FS);
+ return *this;
}
+
+MSP430Subtarget::MSP430Subtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM)
+ : MSP430GenSubtargetInfo(TT, CPU, FS),
+ // FIXME: Check DataLayout string.
+ DL("e-m:e-p:16:16-i32:16:32-n8:16"), FrameLowering(),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
+ TSInfo(DL) {}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h
index 4d8792e..0152ad1 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h
@@ -14,6 +14,12 @@
#ifndef LLVM_TARGET_MSP430_SUBTARGET_H
#define LLVM_TARGET_MSP430_SUBTARGET_H
+#include "MSP430FrameLowering.h"
+#include "MSP430InstrInfo.h"
+#include "MSP430ISelLowering.h"
+#include "MSP430RegisterInfo.h"
+#include "MSP430SelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -26,16 +32,33 @@ class StringRef;
class MSP430Subtarget : public MSP430GenSubtargetInfo {
virtual void anchor();
bool ExtendedInsts;
+ const DataLayout DL; // Calculates type size & alignment
+ MSP430FrameLowering FrameLowering;
+ MSP430InstrInfo InstrInfo;
+ MSP430TargetLowering TLInfo;
+ MSP430SelectionDAGInfo TSInfo;
+
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
MSP430Subtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
+ const std::string &FS, const TargetMachine &TM);
+
+ MSP430Subtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const MSP430InstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const TargetRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const MSP430TargetLowering *getTargetLowering() const { return &TLInfo; }
+ const MSP430SelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index 6710a09..5ca36f2 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -24,19 +24,13 @@ extern "C" void LLVMInitializeMSP430Target() {
RegisterTargetMachine<MSP430TargetMachine> X(TheMSP430Target);
}
-MSP430TargetMachine::MSP430TargetMachine(const Target &T,
- StringRef TT,
- StringRef CPU,
- StringRef FS,
+MSP430TargetMachine::MSP430TargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
- // FIXME: Check DataLayout string.
- DL("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) {
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -51,8 +45,8 @@ public:
return getTM<MSP430TargetMachine>();
}
- virtual bool addInstSelector();
- virtual bool addPreEmitPass();
+ bool addInstSelector() override;
+ bool addPreEmitPass() override;
};
} // namespace
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
index be695a2..efa8403 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
@@ -15,13 +15,7 @@
#ifndef LLVM_TARGET_MSP430_TARGETMACHINE_H
#define LLVM_TARGET_MSP430_TARGETMACHINE_H
-#include "MSP430FrameLowering.h"
-#include "MSP430ISelLowering.h"
-#include "MSP430InstrInfo.h"
-#include "MSP430RegisterInfo.h"
-#include "MSP430SelectionDAGInfo.h"
#include "MSP430Subtarget.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -31,11 +25,6 @@ namespace llvm {
///
class MSP430TargetMachine : public LLVMTargetMachine {
MSP430Subtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- MSP430InstrInfo InstrInfo;
- MSP430TargetLowering TLInfo;
- MSP430SelectionDAGInfo TSInfo;
- MSP430FrameLowering FrameLowering;
public:
MSP430TargetMachine(const Target &T, StringRef TT,
@@ -43,25 +32,28 @@ public:
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const MSP430InstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const DataLayout *getDataLayout() const { return &DL;}
- virtual const MSP430Subtarget *getSubtargetImpl() const { return &Subtarget; }
-
- virtual const TargetRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
+ const MSP430InstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
-
- virtual const MSP430TargetLowering *getTargetLowering() const {
- return &TLInfo;
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
-
- virtual const MSP430SelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
+ const MSP430Subtarget *getSubtargetImpl() const override {
+ return &Subtarget;
+ }
+ const TargetRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
+ }
+ const MSP430TargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
+ }
+ const MSP430SelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
}
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
}; // MSP430TargetMachine.
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mangler.cpp b/contrib/llvm/lib/Target/Mangler.cpp
deleted file mode 100644
index 38be25c..0000000
--- a/contrib/llvm/lib/Target/Mangler.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-//===-- Mangler.cpp - Self-contained c/asm llvm name mangler --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Unified name mangler for assembly backends.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Target/Mangler.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
-/// and the specified name as the global variable name. GVName must not be
-/// empty.
-void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
- const Twine &GVName, ManglerPrefixTy PrefixTy,
- bool UseGlobalPrefix) {
- SmallString<256> TmpData;
- StringRef Name = GVName.toStringRef(TmpData);
- assert(!Name.empty() && "getNameWithPrefix requires non-empty name");
-
- const MCAsmInfo *MAI = TM->getMCAsmInfo();
-
- // If the global name is not led with \1, add the appropriate prefixes.
- if (Name[0] == '\1') {
- Name = Name.substr(1);
- } else {
- if (PrefixTy == Mangler::Private) {
- const char *Prefix = MAI->getPrivateGlobalPrefix();
- OutName.append(Prefix, Prefix+strlen(Prefix));
- } else if (PrefixTy == Mangler::LinkerPrivate) {
- const char *Prefix = MAI->getLinkerPrivateGlobalPrefix();
- OutName.append(Prefix, Prefix+strlen(Prefix));
- }
-
- if (UseGlobalPrefix) {
- const char *Prefix = MAI->getGlobalPrefix();
- if (Prefix[0] == 0)
- ; // Common noop, no prefix.
- else if (Prefix[1] == 0)
- OutName.push_back(Prefix[0]); // Common, one character prefix.
- else
- // Arbitrary length prefix.
- OutName.append(Prefix, Prefix+strlen(Prefix));
- }
- }
-
- // If this is a simple string that doesn't need escaping, just append it.
- OutName.append(Name.begin(), Name.end());
-}
-
-/// AddFastCallStdCallSuffix - Microsoft fastcall and stdcall functions require
-/// a suffix on their name indicating the number of words of arguments they
-/// take.
-static void AddFastCallStdCallSuffix(SmallVectorImpl<char> &OutName,
- const Function *F, const DataLayout &TD) {
- // Calculate arguments size total.
- unsigned ArgWords = 0;
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- Type *Ty = AI->getType();
- // 'Dereference' type in case of byval parameter attribute
- if (AI->hasByValAttr())
- Ty = cast<PointerType>(Ty)->getElementType();
- // Size should be aligned to DWORD boundary
- ArgWords += ((TD.getTypeAllocSize(Ty) + 3)/4)*4;
- }
-
- raw_svector_ostream(OutName) << '@' << ArgWords;
-}
-
-
-/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
-/// and the specified global variable's name. If the global variable doesn't
-/// have a name, this fills in a unique name for the global.
-void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
- const GlobalValue *GV, bool isImplicitlyPrivate,
- bool UseGlobalPrefix) {
- ManglerPrefixTy PrefixTy = Mangler::Default;
- if (GV->hasPrivateLinkage() || isImplicitlyPrivate)
- PrefixTy = Mangler::Private;
- else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage())
- PrefixTy = Mangler::LinkerPrivate;
-
- // If this global has a name, handle it simply.
- if (GV->hasName()) {
- StringRef Name = GV->getName();
- getNameWithPrefix(OutName, Name, PrefixTy, UseGlobalPrefix);
- // No need to do anything else if the global has the special "do not mangle"
- // flag in the name.
- if (Name[0] == 1)
- return;
- } else {
- // Get the ID for the global, assigning a new one if we haven't got one
- // already.
- unsigned &ID = AnonGlobalIDs[GV];
- if (ID == 0) ID = NextAnonGlobalID++;
-
- // Must mangle the global into a unique ID.
- getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy,
- UseGlobalPrefix);
- }
-
- // If we are supposed to add a microsoft-style suffix for stdcall/fastcall,
- // add it.
- if (TM->getMCAsmInfo()->hasMicrosoftFastStdCallMangling()) {
- if (const Function *F = dyn_cast<Function>(GV)) {
- CallingConv::ID CC = F->getCallingConv();
-
- // fastcall functions need to start with @.
- // FIXME: This logic seems unlikely to be right.
- if (CC == CallingConv::X86_FastCall) {
- if (OutName[0] == '_')
- OutName[0] = '@';
- else
- OutName.insert(OutName.begin(), '@');
- }
-
- // fastcall and stdcall functions usually need @42 at the end to specify
- // the argument info.
- FunctionType *FT = F->getFunctionType();
- if ((CC == CallingConv::X86_FastCall || CC == CallingConv::X86_StdCall) &&
- // "Pure" variadic functions do not receive @0 suffix.
- (!FT->isVarArg() || FT->getNumParams() == 0 ||
- (FT->getNumParams() == 1 && F->hasStructRetAttr())))
- AddFastCallStdCallSuffix(OutName, F, *TM->getDataLayout());
- }
- }
-}
diff --git a/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index cdae6c2..53b30f9 100644
--- a/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -7,24 +7,30 @@
//
//===----------------------------------------------------------------------===//
+#include "MCTargetDesc/MipsMCExpr.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetStreamer.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/ADT/APInt.h"
using namespace llvm;
+#define DEBUG_TYPE "mips-asm-parser"
+
namespace llvm {
class MCInstrInfo;
}
@@ -54,137 +60,81 @@ private:
namespace {
class MipsAsmParser : public MCTargetAsmParser {
-
MipsTargetStreamer &getTargetStreamer() {
- MCTargetStreamer &TS = Parser.getStreamer().getTargetStreamer();
+ MCTargetStreamer &TS = *Parser.getStreamer().getTargetStreamer();
return static_cast<MipsTargetStreamer &>(TS);
}
MCSubtargetInfo &STI;
MCAsmParser &Parser;
MipsAssemblerOptions Options;
- bool hasConsumedDollar;
#define GET_ASSEMBLER_HEADER
#include "MipsGenAsmMatcher.inc"
- bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
-
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
-
- bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- bool ParseDirective(AsmToken DirectiveID);
-
- MipsAsmParser::OperandMatchResultTy
- parseRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands, int RegKind);
-
- MipsAsmParser::OperandMatchResultTy
- parseMSARegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands, int RegKind);
-
- MipsAsmParser::OperandMatchResultTy
- parseMSACtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- int RegKind);
-
- MipsAsmParser::OperandMatchResultTy
- parseMemOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- bool parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- int RegKind);
-
- MipsAsmParser::OperandMatchResultTy
- parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseGPR32(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseGPR64(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseHWRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseAFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
-
- MipsAsmParser::OperandMatchResultTy
- parseFGRH32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ unsigned checkTargetMatchPredicate(MCInst &Inst) override;
- MipsAsmParser::OperandMatchResultTy
- parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
- MipsAsmParser::OperandMatchResultTy
- parseACC64DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ /// Parse a register as used in CFI directives
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
- MipsAsmParser::OperandMatchResultTy
- parseLO32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ bool ParseParenSuffix(StringRef Name, OperandVector &Operands);
- MipsAsmParser::OperandMatchResultTy
- parseHI32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ bool ParseBracketSuffix(StringRef Name, OperandVector &Operands);
- MipsAsmParser::OperandMatchResultTy
- parseCOP2(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
- MipsAsmParser::OperandMatchResultTy
- parseMSA128BRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ bool ParseDirective(AsmToken DirectiveID) override;
- MipsAsmParser::OperandMatchResultTy
- parseMSA128HRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ MipsAsmParser::OperandMatchResultTy parseMemOperand(OperandVector &Operands);
MipsAsmParser::OperandMatchResultTy
- parseMSA128WRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ MatchAnyRegisterNameWithoutDollar(OperandVector &Operands,
+ StringRef Identifier, SMLoc S);
MipsAsmParser::OperandMatchResultTy
- parseMSA128DRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ MatchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S);
- MipsAsmParser::OperandMatchResultTy
- parseMSA128CtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ MipsAsmParser::OperandMatchResultTy ParseAnyRegister(OperandVector &Operands);
- MipsAsmParser::OperandMatchResultTy
- parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ MipsAsmParser::OperandMatchResultTy ParseImm(OperandVector &Operands);
- MipsAsmParser::OperandMatchResultTy
- parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+ MipsAsmParser::OperandMatchResultTy ParseJumpTarget(OperandVector &Operands);
- bool searchSymbolAlias(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- unsigned RegKind);
+ MipsAsmParser::OperandMatchResultTy parseInvNum(OperandVector &Operands);
- bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &,
- StringRef Mnemonic);
+ MipsAsmParser::OperandMatchResultTy ParseLSAImm(OperandVector &Operands);
- int tryParseRegister(bool is64BitReg);
+ bool searchSymbolAlias(OperandVector &Operands);
- bool tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- bool is64BitReg);
+ bool ParseOperand(OperandVector &, StringRef Mnemonic);
bool needsExpansion(MCInst &Inst);
- void expandInstruction(MCInst &Inst, SMLoc IDLoc,
+ // Expands assembly pseudo instructions.
+ // Returns false on success, true otherwise.
+ bool expandInstruction(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
- void expandLoadImm(MCInst &Inst, SMLoc IDLoc,
+
+ bool expandLoadImm(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
- void expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
+
+ bool expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
- void expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
+
+ bool expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
+
void expandMemInst(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions, bool isLoad,
bool isImmOpnd);
- bool reportParseError(StringRef ErrorMsg);
+ bool reportParseError(Twine ErrorMsg);
+ bool reportParseError(SMLoc Loc, Twine ErrorMsg);
bool parseMemOffset(const MCExpr *&Res, bool isParenExpr);
bool parseRelocOperand(const MCExpr *&Res);
@@ -192,9 +142,12 @@ class MipsAsmParser : public MCTargetAsmParser {
const MCExpr *evaluateRelocExpr(const MCExpr *Expr, StringRef RelocStr);
bool isEvaluated(const MCExpr *Expr);
+ bool parseSetFeature(uint64_t Feature);
+ bool parseDirectiveCPLoad(SMLoc Loc);
+ bool parseDirectiveCPSetup();
+ bool parseDirectiveNaN();
bool parseDirectiveSet();
- bool parseDirectiveMipsHackStocg();
- bool parseDirectiveMipsHackELFFlags();
+ bool parseDirectiveOption();
bool parseSetAtDirective();
bool parseSetNoAtDirective();
@@ -202,25 +155,22 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseSetNoMacroDirective();
bool parseSetReorderDirective();
bool parseSetNoReorderDirective();
+ bool parseSetNoMips16Directive();
+ bool parseSetFpDirective();
bool parseSetAssignment();
- bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool parseDataDirective(unsigned Size, SMLoc L);
bool parseDirectiveGpWord();
+ bool parseDirectiveGpDWord();
+ bool parseDirectiveModule();
+ bool parseDirectiveModuleFP();
+ bool parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI,
+ StringRef Directive);
MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol);
- bool isMips64() const {
- return (STI.getFeatureBits() & Mips::FeatureMips64) != 0;
- }
-
- bool isFP64() const {
- return (STI.getFeatureBits() & Mips::FeatureFP64Bit) != 0;
- }
-
- bool isN64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
-
- int matchRegisterName(StringRef Symbol, bool is64BitReg);
+ bool eatComma(StringRef ErrorStr);
int matchCPURegisterName(StringRef Symbol);
@@ -236,11 +186,11 @@ class MipsAsmParser : public MCTargetAsmParser {
int matchMSA128CtrlRegisterName(StringRef Name);
- int regKindToRegClass(int RegKind);
-
unsigned getReg(int RC, int RegNo);
- int getATReg();
+ unsigned getGPR(int RegNo);
+
+ int getATReg(SMLoc Loc);
bool processInstruction(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
@@ -250,17 +200,102 @@ class MipsAsmParser : public MCTargetAsmParser {
// Example: INSERT.B $w0[n], $1 => 16 > n >= 0
bool validateMSAIndex(int Val, int RegKind);
+ void setFeatureBits(unsigned Feature, StringRef FeatureString) {
+ if (!(STI.getFeatureBits() & Feature)) {
+ setAvailableFeatures(
+ ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
+ }
+ }
+
+ void clearFeatureBits(unsigned Feature, StringRef FeatureString) {
+ if (STI.getFeatureBits() & Feature) {
+ setAvailableFeatures(
+ ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
+ }
+ }
+
public:
+ enum MipsMatchResultTy {
+ Match_RequiresDifferentSrcAndDst = FIRST_TARGET_MATCH_RESULT_TY
+#define GET_OPERAND_DIAGNOSTIC_TYPES
+#include "MipsGenAsmMatcher.inc"
+#undef GET_OPERAND_DIAGNOSTIC_TYPES
+
+ };
+
MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
- const MCInstrInfo &MII)
- : MCTargetAsmParser(), STI(sti), Parser(parser),
- hasConsumedDollar(false) {
+ const MCInstrInfo &MII, const MCTargetOptions &Options)
+ : MCTargetAsmParser(), STI(sti), Parser(parser) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+
+ getTargetStreamer().updateABIInfo(*this);
+
+ // Assert exactly one ABI was chosen.
+ assert((((STI.getFeatureBits() & Mips::FeatureO32) != 0) +
+ ((STI.getFeatureBits() & Mips::FeatureEABI) != 0) +
+ ((STI.getFeatureBits() & Mips::FeatureN32) != 0) +
+ ((STI.getFeatureBits() & Mips::FeatureN64) != 0)) == 1);
+
+ if (!isABI_O32() && !useOddSPReg() != 0)
+ report_fatal_error("-mno-odd-spreg requires the O32 ABI");
}
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
+
+ /// True if all of $fcc0 - $fcc7 exist for the current ISA.
+ bool hasEightFccRegisters() const { return hasMips4() || hasMips32(); }
+
+ bool isGP64bit() const { return STI.getFeatureBits() & Mips::FeatureGP64Bit; }
+ bool isFP64bit() const { return STI.getFeatureBits() & Mips::FeatureFP64Bit; }
+ bool isABI_N32() const { return STI.getFeatureBits() & Mips::FeatureN32; }
+ bool isABI_N64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
+ bool isABI_O32() const { return STI.getFeatureBits() & Mips::FeatureO32; }
+ bool isABI_FPXX() const { return STI.getFeatureBits() & Mips::FeatureFPXX; }
+
+ bool useOddSPReg() const {
+ return !(STI.getFeatureBits() & Mips::FeatureNoOddSPReg);
+ }
+
+ bool inMicroMipsMode() const {
+ return STI.getFeatureBits() & Mips::FeatureMicroMips;
+ }
+ bool hasMips1() const { return STI.getFeatureBits() & Mips::FeatureMips1; }
+ bool hasMips2() const { return STI.getFeatureBits() & Mips::FeatureMips2; }
+ bool hasMips3() const { return STI.getFeatureBits() & Mips::FeatureMips3; }
+ bool hasMips4() const { return STI.getFeatureBits() & Mips::FeatureMips4; }
+ bool hasMips5() const { return STI.getFeatureBits() & Mips::FeatureMips5; }
+ bool hasMips32() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips32);
+ }
+ bool hasMips64() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips64);
+ }
+ bool hasMips32r2() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips32r2);
+ }
+ bool hasMips64r2() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips64r2);
+ }
+ bool hasMips32r6() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips32r6);
+ }
+ bool hasMips64r6() const {
+ return (STI.getFeatureBits() & Mips::FeatureMips64r6);
+ }
+ bool hasDSP() const { return (STI.getFeatureBits() & Mips::FeatureDSP); }
+ bool hasDSPR2() const { return (STI.getFeatureBits() & Mips::FeatureDSPR2); }
+ bool hasMSA() const { return (STI.getFeatureBits() & Mips::FeatureMSA); }
+
+ bool inMips16Mode() const {
+ return STI.getFeatureBits() & Mips::FeatureMips16;
+ }
+ // TODO: see how can we get this info.
+ bool abiUsesSoftFloat() const { return false; }
+
+ /// Warn if RegNo is the current assembler temporary.
+ void WarnIfAssemblerTemporary(int RegNo, SMLoc Loc);
};
}
@@ -269,53 +304,59 @@ namespace {
/// MipsOperand - Instances of this class represent a parsed Mips machine
/// instruction.
class MipsOperand : public MCParsedAsmOperand {
-
public:
- enum RegisterKind {
- Kind_None,
- Kind_GPR32,
- Kind_GPR64,
- Kind_HWRegs,
- Kind_FGR32Regs,
- Kind_FGRH32Regs,
- Kind_FGR64Regs,
- Kind_AFGR64Regs,
- Kind_CCRRegs,
- Kind_FCCRegs,
- Kind_ACC64DSP,
- Kind_LO32DSP,
- Kind_HI32DSP,
- Kind_COP2,
- Kind_MSA128BRegs,
- Kind_MSA128HRegs,
- Kind_MSA128WRegs,
- Kind_MSA128DRegs,
- Kind_MSA128CtrlRegs
+ /// Broad categories of register classes
+ /// The exact class is finalized by the render method.
+ enum RegKind {
+ RegKind_GPR = 1, /// GPR32 and GPR64 (depending on isGP64bit())
+ RegKind_FGR = 2, /// FGR32, FGR64, AFGR64 (depending on context and
+ /// isFP64bit())
+ RegKind_FCC = 4, /// FCC
+ RegKind_MSA128 = 8, /// MSA128[BHWD] (makes no difference which)
+ RegKind_MSACtrl = 16, /// MSA control registers
+ RegKind_COP2 = 32, /// COP2
+ RegKind_ACC = 64, /// HI32DSP, LO32DSP, and ACC64DSP (depending on
+ /// context).
+ RegKind_CCR = 128, /// CCR
+ RegKind_HWRegs = 256, /// HWRegs
+ RegKind_COP3 = 512, /// COP3
+
+ /// Potentially any (e.g. $1)
+ RegKind_Numeric = RegKind_GPR | RegKind_FGR | RegKind_FCC | RegKind_MSA128 |
+ RegKind_MSACtrl | RegKind_COP2 | RegKind_ACC |
+ RegKind_CCR | RegKind_HWRegs | RegKind_COP3
};
private:
enum KindTy {
- k_CondCode,
- k_CoprocNum,
- k_Immediate,
- k_Memory,
- k_PostIndexRegister,
- k_Register,
- k_PtrReg,
- k_Token,
- k_LSAImm
+ k_Immediate, /// An immediate (possibly involving symbol references)
+ k_Memory, /// Base + Offset Memory Address
+ k_PhysRegister, /// A physical register from the Mips namespace
+ k_RegisterIndex, /// A register index in one or more RegKind.
+ k_Token /// A simple token
} Kind;
- MipsOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+public:
+ MipsOperand(KindTy K, MipsAsmParser &Parser)
+ : MCParsedAsmOperand(), Kind(K), AsmParser(Parser) {}
+
+private:
+ /// For diagnostics, and checking the assembler temporary
+ MipsAsmParser &AsmParser;
struct Token {
const char *Data;
unsigned Length;
};
- struct RegOp {
- unsigned RegNum;
- RegisterKind Kind;
+ struct PhysRegOp {
+ unsigned Num; /// Register Number
+ };
+
+ struct RegIdxOp {
+ unsigned Index; /// Index into the register class
+ RegKind Kind; /// Bitfield of the kinds it could possibly be
+ const MCRegisterInfo *RegInfo;
};
struct ImmOp {
@@ -323,33 +364,173 @@ private:
};
struct MemOp {
- unsigned Base;
+ MipsOperand *Base;
const MCExpr *Off;
};
union {
struct Token Tok;
- struct RegOp Reg;
+ struct PhysRegOp PhysReg;
+ struct RegIdxOp RegIdx;
struct ImmOp Imm;
struct MemOp Mem;
};
SMLoc StartLoc, EndLoc;
+ /// Internal constructor for register kinds
+ static std::unique_ptr<MipsOperand> CreateReg(unsigned Index, RegKind RegKind,
+ const MCRegisterInfo *RegInfo,
+ SMLoc S, SMLoc E,
+ MipsAsmParser &Parser) {
+ auto Op = make_unique<MipsOperand>(k_RegisterIndex, Parser);
+ Op->RegIdx.Index = Index;
+ Op->RegIdx.RegInfo = RegInfo;
+ Op->RegIdx.Kind = RegKind;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
public:
- void addRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
+ /// Coerce the register to GPR32 and return the real register for the current
+ /// target.
+ unsigned getGPR32Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!");
+ AsmParser.WarnIfAssemblerTemporary(RegIdx.Index, StartLoc);
+ unsigned ClassID = Mips::GPR32RegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
- void addPtrRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getPtrReg()));
+ /// Coerce the register to GPR64 and return the real register for the current
+ /// target.
+ unsigned getGPR64Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!");
+ unsigned ClassID = Mips::GPR64RegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+private:
+ /// Coerce the register to AFGR64 and return the real register for the current
+ /// target.
+ unsigned getAFGR64Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
+ if (RegIdx.Index % 2 != 0)
+ AsmParser.Warning(StartLoc, "Float register should be even.");
+ return RegIdx.RegInfo->getRegClass(Mips::AFGR64RegClassID)
+ .getRegister(RegIdx.Index / 2);
+ }
+
+ /// Coerce the register to FGR64 and return the real register for the current
+ /// target.
+ unsigned getFGR64Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
+ return RegIdx.RegInfo->getRegClass(Mips::FGR64RegClassID)
+ .getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to FGR32 and return the real register for the current
+ /// target.
+ unsigned getFGR32Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
+ return RegIdx.RegInfo->getRegClass(Mips::FGR32RegClassID)
+ .getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to FGRH32 and return the real register for the current
+ /// target.
+ unsigned getFGRH32Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_FGR) && "Invalid access!");
+ return RegIdx.RegInfo->getRegClass(Mips::FGRH32RegClassID)
+ .getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to FCC and return the real register for the current
+ /// target.
+ unsigned getFCCReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_FCC) && "Invalid access!");
+ return RegIdx.RegInfo->getRegClass(Mips::FCCRegClassID)
+ .getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to MSA128 and return the real register for the current
+ /// target.
+ unsigned getMSA128Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_MSA128) && "Invalid access!");
+ // It doesn't matter which of the MSA128[BHWD] classes we use. They are all
+ // identical
+ unsigned ClassID = Mips::MSA128BRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to MSACtrl and return the real register for the
+ /// current target.
+ unsigned getMSACtrlReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_MSACtrl) && "Invalid access!");
+ unsigned ClassID = Mips::MSACtrlRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to COP2 and return the real register for the
+ /// current target.
+ unsigned getCOP2Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_COP2) && "Invalid access!");
+ unsigned ClassID = Mips::COP2RegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to COP3 and return the real register for the
+ /// current target.
+ unsigned getCOP3Reg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_COP3) && "Invalid access!");
+ unsigned ClassID = Mips::COP3RegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to ACC64DSP and return the real register for the
+ /// current target.
+ unsigned getACC64DSPReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_ACC) && "Invalid access!");
+ unsigned ClassID = Mips::ACC64DSPRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to HI32DSP and return the real register for the
+ /// current target.
+ unsigned getHI32DSPReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_ACC) && "Invalid access!");
+ unsigned ClassID = Mips::HI32DSPRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to LO32DSP and return the real register for the
+ /// current target.
+ unsigned getLO32DSPReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_ACC) && "Invalid access!");
+ unsigned ClassID = Mips::LO32DSPRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to CCR and return the real register for the
+ /// current target.
+ unsigned getCCRReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_CCR) && "Invalid access!");
+ unsigned ClassID = Mips::CCRRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
+ }
+
+ /// Coerce the register to HWRegs and return the real register for the
+ /// current target.
+ unsigned getHWRegsReg() const {
+ assert(isRegIdx() && (RegIdx.Kind & RegKind_HWRegs) && "Invalid access!");
+ unsigned ClassID = Mips::HWRegsRegClassID;
+ return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index);
}
+public:
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediate when possible. Null MCExpr = 0.
- if (Expr == 0)
+ if (!Expr)
Inst.addOperand(MCOperand::CreateImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
@@ -357,6 +538,100 @@ public:
Inst.addOperand(MCOperand::CreateExpr(Expr));
}
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ llvm_unreachable("Use a custom parser instead");
+ }
+
+ /// Render the operand to an MCInst as a GPR32
+ /// Asserts if the wrong number of operands are requested, or the operand
+ /// is not a k_RegisterIndex compatible with RegKind_GPR
+ void addGPR32AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getGPR32Reg()));
+ }
+
+ /// Render the operand to an MCInst as a GPR64
+ /// Asserts if the wrong number of operands are requested, or the operand
+ /// is not a k_RegisterIndex compatible with RegKind_GPR
+ void addGPR64AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getGPR64Reg()));
+ }
+
+ void addAFGR64AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getAFGR64Reg()));
+ }
+
+ void addFGR64AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getFGR64Reg()));
+ }
+
+ void addFGR32AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getFGR32Reg()));
+ // FIXME: We ought to do this for -integrated-as without -via-file-asm too.
+ if (!AsmParser.useOddSPReg() && RegIdx.Index & 1)
+ AsmParser.Error(StartLoc, "-mno-odd-spreg prohibits the use of odd FPU "
+ "registers");
+ }
+
+ void addFGRH32AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getFGRH32Reg()));
+ }
+
+ void addFCCAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getFCCReg()));
+ }
+
+ void addMSA128AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMSA128Reg()));
+ }
+
+ void addMSACtrlAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMSACtrlReg()));
+ }
+
+ void addCOP2AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getCOP2Reg()));
+ }
+
+ void addCOP3AsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getCOP3Reg()));
+ }
+
+ void addACC64DSPAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getACC64DSPReg()));
+ }
+
+ void addHI32DSPAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getHI32DSPReg()));
+ }
+
+ void addLO32DSPAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getLO32DSPReg()));
+ }
+
+ void addCCRAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getCCRReg()));
+ }
+
+ void addHWRegsAsmRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getHWRegsReg()));
+ }
+
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCExpr *Expr = getImm();
@@ -366,46 +641,72 @@ public:
void addMemOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getMemBase()));
+ Inst.addOperand(MCOperand::CreateReg(getMemBase()->getGPR32Reg()));
const MCExpr *Expr = getMemOff();
addExpr(Inst, Expr);
}
- bool isReg() const { return Kind == k_Register; }
- bool isImm() const { return Kind == k_Immediate; }
- bool isToken() const { return Kind == k_Token; }
- bool isMem() const { return Kind == k_Memory; }
- bool isPtrReg() const { return Kind == k_PtrReg; }
+ bool isReg() const override {
+ // As a special case until we sort out the definition of div/divu, pretend
+ // that $0/$zero are k_PhysRegister so that MCK_ZERO works correctly.
+ if (isGPRAsmReg() && RegIdx.Index == 0)
+ return true;
+
+ return Kind == k_PhysRegister;
+ }
+ bool isRegIdx() const { return Kind == k_RegisterIndex; }
+ bool isImm() const override { return Kind == k_Immediate; }
+ bool isConstantImm() const {
+ return isImm() && dyn_cast<MCConstantExpr>(getImm());
+ }
+ bool isToken() const override {
+ // Note: It's not possible to pretend that other operand kinds are tokens.
+ // The matcher emitter checks tokens first.
+ return Kind == k_Token;
+ }
+ bool isMem() const override { return Kind == k_Memory; }
+ bool isConstantMemOff() const {
+ return isMem() && dyn_cast<MCConstantExpr>(getMemOff());
+ }
+ template <unsigned Bits> bool isMemWithSimmOffset() const {
+ return isMem() && isConstantMemOff() && isInt<Bits>(getConstantMemOff());
+ }
bool isInvNum() const { return Kind == k_Immediate; }
- bool isLSAImm() const { return Kind == k_LSAImm; }
+ bool isLSAImm() const {
+ if (!isConstantImm())
+ return false;
+ int64_t Val = getConstantImm();
+ return 1 <= Val && Val <= 4;
+ }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const {
- assert((Kind == k_Register) && "Invalid access!");
- return Reg.RegNum;
- }
-
- unsigned getPtrReg() const {
- assert((Kind == k_PtrReg) && "Invalid access!");
- return Reg.RegNum;
- }
+ unsigned getReg() const override {
+ // As a special case until we sort out the definition of div/divu, pretend
+ // that $0/$zero are k_PhysRegister so that MCK_ZERO works correctly.
+ if (Kind == k_RegisterIndex && RegIdx.Index == 0 &&
+ RegIdx.Kind & RegKind_GPR)
+ return getGPR32Reg(); // FIXME: GPR64 too
- void setRegKind(RegisterKind RegKind) {
- assert((Kind == k_Register || Kind == k_PtrReg) && "Invalid access!");
- Reg.Kind = RegKind;
+ assert(Kind == k_PhysRegister && "Invalid access!");
+ return PhysReg.Num;
}
const MCExpr *getImm() const {
- assert((Kind == k_Immediate || Kind == k_LSAImm) && "Invalid access!");
+ assert((Kind == k_Immediate) && "Invalid access!");
return Imm.Val;
}
- unsigned getMemBase() const {
+ int64_t getConstantImm() const {
+ const MCExpr *Val = getImm();
+ return static_cast<const MCConstantExpr *>(Val)->getValue();
+ }
+
+ MipsOperand *getMemBase() const {
assert((Kind == k_Memory) && "Invalid access!");
return Mem.Base;
}
@@ -415,8 +716,13 @@ public:
return Mem.Off;
}
- static MipsOperand *CreateToken(StringRef Str, SMLoc S) {
- MipsOperand *Op = new MipsOperand(k_Token);
+ int64_t getConstantMemOff() const {
+ return static_cast<const MCConstantExpr *>(getMemOff())->getValue();
+ }
+
+ static std::unique_ptr<MipsOperand> CreateToken(StringRef Str, SMLoc S,
+ MipsAsmParser &Parser) {
+ auto Op = make_unique<MipsOperand>(k_Token, Parser);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
@@ -424,130 +730,162 @@ public:
return Op;
}
- static MipsOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
- MipsOperand *Op = new MipsOperand(k_Register);
- Op->Reg.RegNum = RegNum;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MipsOperand *CreatePtrReg(unsigned RegNum, SMLoc S, SMLoc E) {
- MipsOperand *Op = new MipsOperand(k_PtrReg);
- Op->Reg.RegNum = RegNum;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MipsOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- MipsOperand *Op = new MipsOperand(k_Immediate);
- Op->Imm.Val = Val;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static MipsOperand *CreateLSAImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- MipsOperand *Op = new MipsOperand(k_LSAImm);
- Op->Imm.Val = Val;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
+ /// Create a numeric register (e.g. $1). The exact register remains
+ /// unresolved until an instruction successfully matches
+ static std::unique_ptr<MipsOperand>
+ CreateNumericReg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S,
+ SMLoc E, MipsAsmParser &Parser) {
+ DEBUG(dbgs() << "CreateNumericReg(" << Index << ", ...)\n");
+ return CreateReg(Index, RegKind_Numeric, RegInfo, S, E, Parser);
}
- static MipsOperand *CreateMem(unsigned Base, const MCExpr *Off,
- SMLoc S, SMLoc E) {
- MipsOperand *Op = new MipsOperand(k_Memory);
- Op->Mem.Base = Base;
- Op->Mem.Off = Off;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
+ /// Create a register that is definitely a GPR.
+ /// This is typically only used for named registers such as $gp.
+ static std::unique_ptr<MipsOperand>
+ CreateGPRReg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S, SMLoc E,
+ MipsAsmParser &Parser) {
+ return CreateReg(Index, RegKind_GPR, RegInfo, S, E, Parser);
}
- bool isGPR32Asm() const {
- return Kind == k_Register && Reg.Kind == Kind_GPR32;
- }
- void addRegAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+ /// Create a register that is definitely a FGR.
+ /// This is typically only used for named registers such as $f0.
+ static std::unique_ptr<MipsOperand>
+ CreateFGRReg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S, SMLoc E,
+ MipsAsmParser &Parser) {
+ return CreateReg(Index, RegKind_FGR, RegInfo, S, E, Parser);
}
- bool isGPR64Asm() const {
- return Kind == k_Register && Reg.Kind == Kind_GPR64;
+ /// Create a register that is definitely an FCC.
+ /// This is typically only used for named registers such as $fcc0.
+ static std::unique_ptr<MipsOperand>
+ CreateFCCReg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S, SMLoc E,
+ MipsAsmParser &Parser) {
+ return CreateReg(Index, RegKind_FCC, RegInfo, S, E, Parser);
}
- bool isHWRegsAsm() const {
- assert((Kind == k_Register) && "Invalid access!");
- return Reg.Kind == Kind_HWRegs;
+ /// Create a register that is definitely an ACC.
+ /// This is typically only used for named registers such as $ac0.
+ static std::unique_ptr<MipsOperand>
+ CreateACCReg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S, SMLoc E,
+ MipsAsmParser &Parser) {
+ return CreateReg(Index, RegKind_ACC, RegInfo, S, E, Parser);
}
- bool isCCRAsm() const {
- assert((Kind == k_Register) && "Invalid access!");
- return Reg.Kind == Kind_CCRRegs;
+ /// Create a register that is definitely an MSA128.
+ /// This is typically only used for named registers such as $w0.
+ static std::unique_ptr<MipsOperand>
+ CreateMSA128Reg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S,
+ SMLoc E, MipsAsmParser &Parser) {
+ return CreateReg(Index, RegKind_MSA128, RegInfo, S, E, Parser);
}
- bool isAFGR64Asm() const {
- return Kind == k_Register && Reg.Kind == Kind_AFGR64Regs;
+ /// Create a register that is definitely an MSACtrl.
+ /// This is typically only used for named registers such as $msaaccess.
+ static std::unique_ptr<MipsOperand>
+ CreateMSACtrlReg(unsigned Index, const MCRegisterInfo *RegInfo, SMLoc S,
+ SMLoc E, MipsAsmParser &Parser) {
+ return CreateReg(Index, RegKind_MSACtrl, RegInfo, S, E, Parser);
}
- bool isFGR64Asm() const {
- return Kind == k_Register && Reg.Kind == Kind_FGR64Regs;
+ static std::unique_ptr<MipsOperand>
+ CreateImm(const MCExpr *Val, SMLoc S, SMLoc E, MipsAsmParser &Parser) {
+ auto Op = make_unique<MipsOperand>(k_Immediate, Parser);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
- bool isFGR32Asm() const {
- return (Kind == k_Register) && Reg.Kind == Kind_FGR32Regs;
+ static std::unique_ptr<MipsOperand>
+ CreateMem(std::unique_ptr<MipsOperand> Base, const MCExpr *Off, SMLoc S,
+ SMLoc E, MipsAsmParser &Parser) {
+ auto Op = make_unique<MipsOperand>(k_Memory, Parser);
+ Op->Mem.Base = Base.release();
+ Op->Mem.Off = Off;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
}
- bool isFGRH32Asm() const {
- return (Kind == k_Register) && Reg.Kind == Kind_FGRH32Regs;
+ bool isGPRAsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index <= 31;
}
-
- bool isFCCRegsAsm() const {
- return (Kind == k_Register) && Reg.Kind == Kind_FCCRegs;
+ bool isFGRAsmReg() const {
+ // AFGR64 is $0-$15 but we handle this in getAFGR64()
+ return isRegIdx() && RegIdx.Kind & RegKind_FGR && RegIdx.Index <= 31;
}
-
- bool isACC64DSPAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_ACC64DSP;
+ bool isHWRegsAsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_HWRegs && RegIdx.Index <= 31;
}
-
- bool isLO32DSPAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_LO32DSP;
+ bool isCCRAsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_CCR && RegIdx.Index <= 31;
}
-
- bool isHI32DSPAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_HI32DSP;
+ bool isFCCAsmReg() const {
+ if (!(isRegIdx() && RegIdx.Kind & RegKind_FCC))
+ return false;
+ if (!AsmParser.hasEightFccRegisters())
+ return RegIdx.Index == 0;
+ return RegIdx.Index <= 7;
}
-
- bool isCOP2Asm() const { return Kind == k_Register && Reg.Kind == Kind_COP2; }
-
- bool isMSA128BAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_MSA128BRegs;
+ bool isACCAsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_ACC && RegIdx.Index <= 3;
}
-
- bool isMSA128HAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_MSA128HRegs;
+ bool isCOP2AsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_COP2 && RegIdx.Index <= 31;
}
-
- bool isMSA128WAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_MSA128WRegs;
+ bool isCOP3AsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_COP3 && RegIdx.Index <= 31;
}
-
- bool isMSA128DAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_MSA128DRegs;
+ bool isMSA128AsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_MSA128 && RegIdx.Index <= 31;
}
-
- bool isMSA128CRAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_MSA128CtrlRegs;
+ bool isMSACtrlAsmReg() const {
+ return isRegIdx() && RegIdx.Kind & RegKind_MSACtrl && RegIdx.Index <= 7;
}
/// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const { return StartLoc; }
+ SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const { return EndLoc; }
+ SMLoc getEndLoc() const override { return EndLoc; }
- virtual void print(raw_ostream &OS) const {
- llvm_unreachable("unimplemented!");
+ virtual ~MipsOperand() {
+ switch (Kind) {
+ case k_Immediate:
+ break;
+ case k_Memory:
+ delete Mem.Base;
+ break;
+ case k_PhysRegister:
+ case k_RegisterIndex:
+ case k_Token:
+ break;
+ }
+ }
+
+ void print(raw_ostream &OS) const override {
+ switch (Kind) {
+ case k_Immediate:
+ OS << "Imm<";
+ Imm.Val->print(OS);
+ OS << ">";
+ break;
+ case k_Memory:
+ OS << "Mem<";
+ Mem.Base->print(OS);
+ OS << ", ";
+ Mem.Off->print(OS);
+ OS << ">";
+ break;
+ case k_PhysRegister:
+ OS << "PhysReg<" << PhysReg.Num << ">";
+ break;
+ case k_RegisterIndex:
+ OS << "RegIdx<" << RegIdx.Index << ":" << RegIdx.Kind << ">";
+ break;
+ case k_Token:
+ OS << Tok.Data;
+ break;
+ }
}
}; // class MipsOperand
} // namespace
@@ -562,7 +900,67 @@ static const MCInstrDesc &getInstDesc(unsigned Opcode) {
bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
+
Inst.setLoc(IDLoc);
+
+ if (MCID.isBranch() || MCID.isCall()) {
+ const unsigned Opcode = Inst.getOpcode();
+ MCOperand Offset;
+
+ switch (Opcode) {
+ default:
+ break;
+ case Mips::BEQ:
+ case Mips::BNE:
+ case Mips::BEQ_MM:
+ case Mips::BNE_MM:
+ assert(MCID.getNumOperands() == 3 && "unexpected number of operands");
+ Offset = Inst.getOperand(2);
+ if (!Offset.isImm())
+ break; // We'll deal with this situation later on when applying fixups.
+ if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
+ return Error(IDLoc, "branch target out of range");
+ if (OffsetToAlignment(Offset.getImm(),
+ 1LL << (inMicroMipsMode() ? 1 : 2)))
+ return Error(IDLoc, "branch to misaligned address");
+ break;
+ case Mips::BGEZ:
+ case Mips::BGTZ:
+ case Mips::BLEZ:
+ case Mips::BLTZ:
+ case Mips::BGEZAL:
+ case Mips::BLTZAL:
+ case Mips::BC1F:
+ case Mips::BC1T:
+ case Mips::BGEZ_MM:
+ case Mips::BGTZ_MM:
+ case Mips::BLEZ_MM:
+ case Mips::BLTZ_MM:
+ case Mips::BGEZAL_MM:
+ case Mips::BLTZAL_MM:
+ case Mips::BC1F_MM:
+ case Mips::BC1T_MM:
+ assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
+ Offset = Inst.getOperand(1);
+ if (!Offset.isImm())
+ break; // We'll deal with this situation later on when applying fixups.
+ if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
+ return Error(IDLoc, "branch target out of range");
+ if (OffsetToAlignment(Offset.getImm(),
+ 1LL << (inMicroMipsMode() ? 1 : 2)))
+ return Error(IDLoc, "branch to misaligned address");
+ break;
+ }
+ }
+
+ // SSNOP is deprecated on MIPS32r6/MIPS64r6
+ // We still accept it but it is a normal nop.
+ if (hasMips32r6() && Inst.getOpcode() == Mips::SSNOP) {
+ std::string ISA = hasMips64r6() ? "MIPS64r6" : "MIPS32r6";
+ Warning(IDLoc, "ssnop is deprecated for " + ISA + " and is equivalent to a "
+ "nop instruction");
+ }
+
if (MCID.hasDelaySlot() && Options.isReorder()) {
// If this instruction has a delay slot and .set reorder is active,
// emit a NOP after it.
@@ -611,7 +1009,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
} // if load/store
if (needsExpansion(Inst))
- expandInstruction(Inst, IDLoc, Instructions);
+ return expandInstruction(Inst, IDLoc, Instructions);
else
Instructions.push_back(Inst);
@@ -624,17 +1022,27 @@ bool MipsAsmParser::needsExpansion(MCInst &Inst) {
case Mips::LoadImm32Reg:
case Mips::LoadAddr32Imm:
case Mips::LoadAddr32Reg:
+ case Mips::LoadImm64Reg:
return true;
default:
return false;
}
}
-void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
+bool MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
switch (Inst.getOpcode()) {
+ default:
+ assert(0 && "unimplemented expansion");
+ return true;
case Mips::LoadImm32Reg:
return expandLoadImm(Inst, IDLoc, Instructions);
+ case Mips::LoadImm64Reg:
+ if (!isGP64bit()) {
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ }
+ return expandLoadImm(Inst, IDLoc, Instructions);
case Mips::LoadAddr32Imm:
return expandLoadAddressImm(Inst, IDLoc, Instructions);
case Mips::LoadAddr32Reg:
@@ -642,7 +1050,31 @@ void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
}
}
-void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
+namespace {
+template <int Shift, bool PerformShift>
+void createShiftOr(int64_t Value, unsigned RegNo, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ MCInst tmpInst;
+ if (PerformShift) {
+ tmpInst.setOpcode(Mips::DSLL);
+ tmpInst.addOperand(MCOperand::CreateReg(RegNo));
+ tmpInst.addOperand(MCOperand::CreateReg(RegNo));
+ tmpInst.addOperand(MCOperand::CreateImm(16));
+ tmpInst.setLoc(IDLoc);
+ Instructions.push_back(tmpInst);
+ tmpInst.clear();
+ }
+ tmpInst.setOpcode(Mips::ORi);
+ tmpInst.addOperand(MCOperand::CreateReg(RegNo));
+ tmpInst.addOperand(MCOperand::CreateReg(RegNo));
+ tmpInst.addOperand(
+ MCOperand::CreateImm(((Value & (0xffffLL << Shift)) >> Shift)));
+ tmpInst.setLoc(IDLoc);
+ Instructions.push_back(tmpInst);
+}
+}
+
+bool MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
const MCOperand &ImmOp = Inst.getOperand(1);
@@ -650,8 +1082,10 @@ void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
const MCOperand &RegOp = Inst.getOperand(0);
assert(RegOp.isReg() && "expected register operand kind");
- int ImmValue = ImmOp.getImm();
+ int64_t ImmValue = ImmOp.getImm();
tmpInst.setLoc(IDLoc);
+ // FIXME: gas has a special case for values that are 000...1111, which
+ // becomes a li -1 and then a dsrl
if (0 <= ImmValue && ImmValue <= 65535) {
// For 0 <= j <= 65535.
// li d,j => ori d,$zero,j
@@ -668,25 +1102,76 @@ void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
tmpInst.addOperand(MCOperand::CreateReg(Mips::ZERO));
tmpInst.addOperand(MCOperand::CreateImm(ImmValue));
Instructions.push_back(tmpInst);
- } else {
- // For any other value of j that is representable as a 32-bit integer.
+ } else if ((ImmValue & 0xffffffff) == ImmValue) {
+ // For any value of j that is representable as a 32-bit integer, create
+ // a sequence of:
// li d,j => lui d,hi16(j)
// ori d,d,lo16(j)
tmpInst.setOpcode(Mips::LUi);
tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16));
Instructions.push_back(tmpInst);
- tmpInst.clear();
- tmpInst.setOpcode(Mips::ORi);
+ createShiftOr<0, false>(ImmValue, RegOp.getReg(), IDLoc, Instructions);
+ } else if ((ImmValue & (0xffffLL << 48)) == 0) {
+ if (!isGP64bit()) {
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ }
+
+ // <------- lo32 ------>
+ // <------- hi32 ------>
+ // <- hi16 -> <- lo16 ->
+ // _________________________________
+ // | | | |
+ // | 16-bytes | 16-bytes | 16-bytes |
+ // |__________|__________|__________|
+ //
+ // For any value of j that is representable as a 48-bit integer, create
+ // a sequence of:
+ // li d,j => lui d,hi16(j)
+ // ori d,d,hi16(lo32(j))
+ // dsll d,d,16
+ // ori d,d,lo16(lo32(j))
+ tmpInst.setOpcode(Mips::LUi);
tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
+ tmpInst.addOperand(
+ MCOperand::CreateImm((ImmValue & (0xffffLL << 32)) >> 32));
+ Instructions.push_back(tmpInst);
+ createShiftOr<16, false>(ImmValue, RegOp.getReg(), IDLoc, Instructions);
+ createShiftOr<0, true>(ImmValue, RegOp.getReg(), IDLoc, Instructions);
+ } else {
+ if (!isGP64bit()) {
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ }
+
+ // <------- hi32 ------> <------- lo32 ------>
+ // <- hi16 -> <- lo16 ->
+ // ___________________________________________
+ // | | | | |
+ // | 16-bytes | 16-bytes | 16-bytes | 16-bytes |
+ // |__________|__________|__________|__________|
+ //
+ // For any value of j that isn't representable as a 48-bit integer.
+ // li d,j => lui d,hi16(j)
+ // ori d,d,lo16(hi32(j))
+ // dsll d,d,16
+ // ori d,d,hi16(lo32(j))
+ // dsll d,d,16
+ // ori d,d,lo16(lo32(j))
+ tmpInst.setOpcode(Mips::LUi);
tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg()));
- tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff));
- tmpInst.setLoc(IDLoc);
+ tmpInst.addOperand(
+ MCOperand::CreateImm((ImmValue & (0xffffLL << 48)) >> 48));
Instructions.push_back(tmpInst);
+ createShiftOr<32, false>(ImmValue, RegOp.getReg(), IDLoc, Instructions);
+ createShiftOr<16, true>(ImmValue, RegOp.getReg(), IDLoc, Instructions);
+ createShiftOr<0, true>(ImmValue, RegOp.getReg(), IDLoc, Instructions);
}
+ return false;
}
-void
+bool
MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
@@ -727,9 +1212,10 @@ MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
tmpInst.addOperand(MCOperand::CreateReg(SrcRegOp.getReg()));
Instructions.push_back(tmpInst);
}
+ return false;
}
-void
+bool
MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
@@ -761,6 +1247,7 @@ MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff));
Instructions.push_back(tmpInst);
}
+ return false;
}
void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
@@ -771,8 +1258,6 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
unsigned ImmOffset, HiOffset, LoOffset;
const MCExpr *ExprOffset;
unsigned TmpRegNum;
- unsigned AtRegNum = getReg(
- (isMips64()) ? Mips::GPR64RegClassID : Mips::GPR32RegClassID, getATReg());
// 1st operand is either the source or destination register.
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
unsigned RegOpNum = Inst.getOperand(0).getReg();
@@ -792,10 +1277,46 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
ExprOffset = Inst.getOperand(2).getExpr();
// All instructions will have the same location.
TempInst.setLoc(IDLoc);
- // 1st instruction in expansion is LUi. For load instruction we can use
- // the dst register as a temporary if base and dst are different,
- // but for stores we must use $at.
- TmpRegNum = (isLoad && (BaseRegNum != RegOpNum)) ? RegOpNum : AtRegNum;
+ // These are some of the types of expansions we perform here:
+ // 1) lw $8, sym => lui $8, %hi(sym)
+ // lw $8, %lo(sym)($8)
+ // 2) lw $8, offset($9) => lui $8, %hi(offset)
+ // add $8, $8, $9
+ // lw $8, %lo(offset)($9)
+ // 3) lw $8, offset($8) => lui $at, %hi(offset)
+ // add $at, $at, $8
+ // lw $8, %lo(offset)($at)
+ // 4) sw $8, sym => lui $at, %hi(sym)
+ // sw $8, %lo(sym)($at)
+ // 5) sw $8, offset($8) => lui $at, %hi(offset)
+ // add $at, $at, $8
+ // sw $8, %lo(offset)($at)
+ // 6) ldc1 $f0, sym => lui $at, %hi(sym)
+ // ldc1 $f0, %lo(sym)($at)
+ //
+ // For load instructions we can use the destination register as a temporary
+ // if base and dst are different (examples 1 and 2) and if the base register
+ // is general purpose otherwise we must use $at (example 6) and error if it's
+ // not available. For stores we must use $at (examples 4 and 5) because we
+ // must not clobber the source register setting up the offset.
+ const MCInstrDesc &Desc = getInstDesc(Inst.getOpcode());
+ int16_t RegClassOp0 = Desc.OpInfo[0].RegClass;
+ unsigned RegClassIDOp0 =
+ getContext().getRegisterInfo()->getRegClass(RegClassOp0).getID();
+ bool IsGPR = (RegClassIDOp0 == Mips::GPR32RegClassID) ||
+ (RegClassIDOp0 == Mips::GPR64RegClassID);
+ if (isLoad && IsGPR && (BaseRegNum != RegOpNum))
+ TmpRegNum = RegOpNum;
+ else {
+ int AT = getATReg(IDLoc);
+ // At this point we need AT to perform the expansions and we exit if it is
+ // not available.
+ if (!AT)
+ return;
+ TmpRegNum = getReg(
+ (isGP64bit()) ? Mips::GPR64RegClassID : Mips::GPR32RegClassID, AT);
+ }
+
TempInst.setOpcode(Mips::LUi);
TempInst.addOperand(MCOperand::CreateReg(TmpRegNum));
if (isImmOpnd)
@@ -823,7 +1344,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.addOperand(MCOperand::CreateReg(BaseRegNum));
Instructions.push_back(TempInst);
TempInst.clear();
- // And finaly, create original instruction with low part
+ // And finally, create original instruction with low part
// of offset and new base.
TempInst.setOpcode(Inst.getOpcode());
TempInst.addOperand(MCOperand::CreateReg(RegOpNum));
@@ -845,10 +1366,24 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.clear();
}
-bool MipsAsmParser::MatchAndEmitInstruction(
- SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand *> &Operands, MCStreamer &Out,
- unsigned &ErrorInfo, bool MatchingInlineAsm) {
+unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
+ // As described by the Mips32r2 spec, the registers Rd and Rs for
+ // jalr.hb must be different.
+ unsigned Opcode = Inst.getOpcode();
+
+ if (Opcode == Mips::JALR_HB &&
+ (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()))
+ return Match_RequiresDifferentSrcAndDst;
+
+ return Match_Success;
+}
+
+bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
+
MCInst Inst;
SmallVector<MCInst, 8> Instructions;
unsigned MatchResult =
@@ -861,7 +1396,7 @@ bool MipsAsmParser::MatchAndEmitInstruction(
if (processInstruction(Inst, IDLoc, Instructions))
return true;
for (unsigned i = 0; i < Instructions.size(); i++)
- Out.EmitInstruction(Instructions[i]);
+ Out.EmitInstruction(Instructions[i], STI);
return false;
}
case Match_MissingFeature:
@@ -873,7 +1408,7 @@ bool MipsAsmParser::MatchAndEmitInstruction(
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((MipsOperand *)Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((MipsOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
@@ -882,18 +1417,28 @@ bool MipsAsmParser::MatchAndEmitInstruction(
}
case Match_MnemonicFail:
return Error(IDLoc, "invalid instruction");
+ case Match_RequiresDifferentSrcAndDst:
+ return Error(IDLoc, "source and destination must be different");
}
return true;
}
+void MipsAsmParser::WarnIfAssemblerTemporary(int RegIndex, SMLoc Loc) {
+ if ((RegIndex != 0) && ((int)Options.getATRegNum() == RegIndex)) {
+ if (RegIndex == 1)
+ Warning(Loc, "Used $at without \".set noat\"");
+ else
+ Warning(Loc, Twine("Used $") + Twine(RegIndex) + " with \".set at=$" +
+ Twine(RegIndex) + "\"");
+ }
+}
+
int MipsAsmParser::matchCPURegisterName(StringRef Name) {
int CC;
- if (Name == "at")
- return getATReg();
-
CC = StringSwitch<unsigned>(Name)
.Case("zero", 0)
+ .Case("at", 1)
.Case("a0", 4)
.Case("a1", 5)
.Case("a2", 6)
@@ -910,9 +1455,10 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) {
.Case("s7", 23)
.Case("k0", 26)
.Case("k1", 27)
+ .Case("gp", 28)
.Case("sp", 29)
.Case("fp", 30)
- .Case("gp", 28)
+ .Case("s8", 30)
.Case("ra", 31)
.Case("t0", 8)
.Case("t1", 9)
@@ -926,22 +1472,23 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) {
.Case("t9", 25)
.Default(-1);
- // Although SGI documentation just cuts out t0-t3 for n32/n64,
- // GNU pushes the values of t0-t3 to override the o32/o64 values for t4-t7
- // We are supporting both cases, so for t0-t3 we'll just push them to t4-t7.
- if (isMips64() && 8 <= CC && CC <= 11)
- CC += 4;
-
- if (CC == -1 && isMips64())
- CC = StringSwitch<unsigned>(Name)
- .Case("a4", 8)
- .Case("a5", 9)
- .Case("a6", 10)
- .Case("a7", 11)
- .Case("kt0", 26)
- .Case("kt1", 27)
- .Case("s8", 30)
- .Default(-1);
+ if (isABI_N32() || isABI_N64()) {
+ // Although SGI documentation just cuts out t0-t3 for n32/n64,
+ // GNU pushes the values of t0-t3 to override the o32/o64 values for t4-t7
+ // We are supporting both cases, so for t0-t3 we'll just push them to t4-t7.
+ if (8 <= CC && CC <= 11)
+ CC += 4;
+
+ if (CC == -1)
+ CC = StringSwitch<unsigned>(Name)
+ .Case("a4", 8)
+ .Case("a5", 9)
+ .Case("a6", 10)
+ .Case("a7", 11)
+ .Case("kt0", 26)
+ .Case("kt1", 27)
+ .Default(-1);
+ }
return CC;
}
@@ -1017,59 +1564,6 @@ int MipsAsmParser::matchMSA128CtrlRegisterName(StringRef Name) {
return CC;
}
-int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) {
-
- int CC;
- CC = matchCPURegisterName(Name);
- if (CC != -1)
- return matchRegisterByNumber(CC, is64BitReg ? Mips::GPR64RegClassID
- : Mips::GPR32RegClassID);
- CC = matchFPURegisterName(Name);
- // TODO: decide about fpu register class
- if (CC != -1)
- return matchRegisterByNumber(CC, isFP64() ? Mips::FGR64RegClassID
- : Mips::FGR32RegClassID);
- return matchMSA128RegisterName(Name);
-}
-
-int MipsAsmParser::regKindToRegClass(int RegKind) {
-
- switch (RegKind) {
- case MipsOperand::Kind_GPR32:
- return Mips::GPR32RegClassID;
- case MipsOperand::Kind_GPR64:
- return Mips::GPR64RegClassID;
- case MipsOperand::Kind_HWRegs:
- return Mips::HWRegsRegClassID;
- case MipsOperand::Kind_FGR32Regs:
- return Mips::FGR32RegClassID;
- case MipsOperand::Kind_FGRH32Regs:
- return Mips::FGRH32RegClassID;
- case MipsOperand::Kind_FGR64Regs:
- return Mips::FGR64RegClassID;
- case MipsOperand::Kind_AFGR64Regs:
- return Mips::AFGR64RegClassID;
- case MipsOperand::Kind_CCRRegs:
- return Mips::CCRRegClassID;
- case MipsOperand::Kind_ACC64DSP:
- return Mips::ACC64DSPRegClassID;
- case MipsOperand::Kind_FCCRegs:
- return Mips::FCCRegClassID;
- case MipsOperand::Kind_MSA128BRegs:
- return Mips::MSA128BRegClassID;
- case MipsOperand::Kind_MSA128HRegs:
- return Mips::MSA128HRegClassID;
- case MipsOperand::Kind_MSA128WRegs:
- return Mips::MSA128WRegClassID;
- case MipsOperand::Kind_MSA128DRegs:
- return Mips::MSA128DRegClassID;
- case MipsOperand::Kind_MSA128CtrlRegs:
- return Mips::MSACtrlRegClassID;
- default:
- return -1;
- }
-}
-
bool MipsAssemblerOptions::setATReg(unsigned Reg) {
if (Reg > 31)
return false;
@@ -1078,53 +1572,34 @@ bool MipsAssemblerOptions::setATReg(unsigned Reg) {
return true;
}
-int MipsAsmParser::getATReg() { return Options.getATRegNum(); }
+int MipsAsmParser::getATReg(SMLoc Loc) {
+ int AT = Options.getATRegNum();
+ if (AT == 0)
+ reportParseError(Loc,
+ "Pseudo instruction requires $at, which is not available");
+ return AT;
+}
unsigned MipsAsmParser::getReg(int RC, int RegNo) {
return *(getContext().getRegisterInfo()->getRegClass(RC).begin() + RegNo);
}
+unsigned MipsAsmParser::getGPR(int RegNo) {
+ return getReg(isGP64bit() ? Mips::GPR64RegClassID : Mips::GPR32RegClassID,
+ RegNo);
+}
+
int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, unsigned RegClass) {
if (RegNum >
- getContext().getRegisterInfo()->getRegClass(RegClass).getNumRegs())
+ getContext().getRegisterInfo()->getRegClass(RegClass).getNumRegs() - 1)
return -1;
return getReg(RegClass, RegNum);
}
-int MipsAsmParser::tryParseRegister(bool is64BitReg) {
- const AsmToken &Tok = Parser.getTok();
- int RegNum = -1;
-
- if (Tok.is(AsmToken::Identifier)) {
- std::string lowerCase = Tok.getString().lower();
- RegNum = matchRegisterName(lowerCase, is64BitReg);
- } else if (Tok.is(AsmToken::Integer))
- RegNum = matchRegisterByNumber(static_cast<unsigned>(Tok.getIntVal()),
- is64BitReg ? Mips::GPR64RegClassID
- : Mips::GPR32RegClassID);
- return RegNum;
-}
-
-bool MipsAsmParser::tryParseRegisterOperand(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands, bool is64BitReg) {
-
- SMLoc S = Parser.getTok().getLoc();
- int RegNo = -1;
-
- RegNo = tryParseRegister(is64BitReg);
- if (RegNo == -1)
- return true;
+bool MipsAsmParser::ParseOperand(OperandVector &Operands, StringRef Mnemonic) {
+ DEBUG(dbgs() << "ParseOperand\n");
- Operands.push_back(
- MipsOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
- Parser.Lex(); // Eat register token.
- return false;
-}
-
-bool
-MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- StringRef Mnemonic) {
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
@@ -1136,6 +1611,8 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
if (ResTy == MatchOperand_ParseFail)
return true;
+ DEBUG(dbgs() << ".. Generic Parser\n");
+
switch (getLexer().getKind()) {
default:
Error(Parser.getTok().getLoc(), "unexpected token in operand");
@@ -1143,29 +1620,15 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
case AsmToken::Dollar: {
// Parse the register.
SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat dollar token.
- // Parse the register operand.
- if (!tryParseRegisterOperand(Operands, isMips64())) {
- if (getLexer().is(AsmToken::LParen)) {
- // Check if it is indexed addressing operand.
- Operands.push_back(MipsOperand::CreateToken("(", S));
- Parser.Lex(); // Eat the parenthesis.
- if (getLexer().isNot(AsmToken::Dollar))
- return true;
-
- Parser.Lex(); // Eat the dollar
- if (tryParseRegisterOperand(Operands, isMips64()))
- return true;
- if (!getLexer().is(AsmToken::RParen))
- return true;
-
- S = Parser.getTok().getLoc();
- Operands.push_back(MipsOperand::CreateToken(")", S));
- Parser.Lex();
- }
+ // Almost all registers have been parsed by custom parsers. There is only
+ // one exception to this. $zero (and it's alias $0) will reach this point
+ // for div, divu, and similar instructions because it is not an operand
+ // to the instruction definition but an explicit register. Special case
+ // this situation for now.
+ if (ParseAnyRegister(Operands) != MatchOperand_NoMatch)
return false;
- }
+
// Maybe it is a symbol reference.
StringRef Identifier;
if (Parser.parseIdentifier(Identifier))
@@ -1177,47 +1640,19 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
const MCExpr *Res =
MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext());
- Operands.push_back(MipsOperand::CreateImm(Res, S, E));
+ Operands.push_back(MipsOperand::CreateImm(Res, S, E, *this));
return false;
}
- case AsmToken::Identifier:
- // For instruction aliases like "bc1f $Label" dedicated parser will
- // eat the '$' sign before failing. So in order to look for appropriate
- // label we must check first if we have already consumed '$'.
- if (hasConsumedDollar) {
- hasConsumedDollar = false;
- SMLoc S = Parser.getTok().getLoc();
- StringRef Identifier;
- if (Parser.parseIdentifier(Identifier))
- return true;
- SMLoc E =
- SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier);
- // Create a symbol reference.
- const MCExpr *Res =
- MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext());
-
- Operands.push_back(MipsOperand::CreateImm(Res, S, E));
- return false;
- }
- // Look for the existing symbol, we should check if
- // we need to assigne the propper RegisterKind.
- if (searchSymbolAlias(Operands, MipsOperand::Kind_None))
- return false;
// Else drop to expression parsing.
case AsmToken::LParen:
case AsmToken::Minus:
case AsmToken::Plus:
case AsmToken::Integer:
+ case AsmToken::Tilde:
case AsmToken::String: {
- // Quoted label names.
- const MCExpr *IdVal;
- SMLoc S = Parser.getTok().getLoc();
- if (getParser().parseExpression(IdVal))
- return true;
- SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
- return false;
+ DEBUG(dbgs() << ".. generic integer\n");
+ OperandMatchResultTy ResTy = ParseImm(Operands);
+ return ResTy != MatchOperand_Success;
}
case AsmToken::Percent: {
// It is a symbol reference or constant expression.
@@ -1228,7 +1663,7 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
+ Operands.push_back(MipsOperand::CreateImm(IdVal, S, E, *this));
return false;
} // case AsmToken::Percent
} // switch(getLexer().getKind())
@@ -1240,23 +1675,30 @@ const MCExpr *MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
const MCExpr *Res;
// Check the type of the expression.
if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Expr)) {
- // It's a constant, evaluate lo or hi value.
- if (RelocStr == "lo") {
- short Val = MCE->getValue();
- Res = MCConstantExpr::Create(Val, getContext());
- } else if (RelocStr == "hi") {
- int Val = MCE->getValue();
- int LoSign = Val & 0x8000;
- Val = (Val & 0xffff0000) >> 16;
- // Lower part is treated as a signed int, so if it is negative
- // we must add 1 to the hi part to compensate.
- if (LoSign)
- Val++;
- Res = MCConstantExpr::Create(Val, getContext());
- } else {
- llvm_unreachable("Invalid RelocStr value");
+ // It's a constant, evaluate reloc value.
+ int16_t Val;
+ switch (getVariantKind(RelocStr)) {
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ // Get the 1st 16-bits.
+ Val = MCE->getValue() & 0xffff;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ // Get the 2nd 16-bits. Also add 1 if bit 15 is 1, to compensate for low
+ // 16 bits being negative.
+ Val = ((MCE->getValue() + 0x8000) >> 16) & 0xffff;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHER:
+ // Get the 3rd 16-bits.
+ Val = ((MCE->getValue() + 0x80008000LL) >> 32) & 0xffff;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHEST:
+ // Get the 4th 16-bits.
+ Val = ((MCE->getValue() + 0x800080008000LL) >> 48) & 0xffff;
+ break;
+ default:
+ report_fatal_error("Unsupported reloc value!");
}
- return Res;
+ return MCConstantExpr::Create(Val, getContext());
}
if (const MCSymbolRefExpr *MSRE = dyn_cast<MCSymbolRefExpr>(Expr)) {
@@ -1268,6 +1710,12 @@ const MCExpr *MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
}
if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) {
+ MCSymbolRefExpr::VariantKind VK = getVariantKind(RelocStr);
+
+ // Try to create target expression.
+ if (MipsMCExpr::isSupportedBinaryExpr(VK, BE))
+ return MipsMCExpr::Create(VK, Expr, getContext());
+
const MCExpr *LExp = evaluateRelocExpr(BE->getLHS(), RelocStr);
const MCExpr *RExp = evaluateRelocExpr(BE->getRHS(), RelocStr);
Res = MCBinaryExpr::Create(BE->getOpcode(), LExp, RExp, getContext());
@@ -1298,8 +1746,8 @@ bool MipsAsmParser::isEvaluated(const MCExpr *Expr) {
}
case MCExpr::Unary:
return isEvaluated(cast<MCUnaryExpr>(Expr)->getSubExpr());
- default:
- return false;
+ case MCExpr::Target:
+ return true;
}
return false;
}
@@ -1348,9 +1796,27 @@ bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) {
bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) {
- StartLoc = Parser.getTok().getLoc();
- RegNo = tryParseRegister(isMips64());
- EndLoc = Parser.getTok().getLoc();
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
+ OperandMatchResultTy ResTy = ParseAnyRegister(Operands);
+ if (ResTy == MatchOperand_Success) {
+ assert(Operands.size() == 1);
+ MipsOperand &Operand = static_cast<MipsOperand &>(*Operands.front());
+ StartLoc = Operand.getStartLoc();
+ EndLoc = Operand.getEndLoc();
+
+ // AFAIK, we only support numeric registers and named GPR's in CFI
+ // directives.
+ // Don't worry about eating tokens before failing. Using an unrecognised
+ // register is a parse error.
+ if (Operand.isGPRAsmReg()) {
+ // Resolve to GPR32 or GPR64 appropriately.
+ RegNo = isGP64bit() ? Operand.getGPR64Reg() : Operand.getGPR32Reg();
+ }
+
+ return (RegNo == (unsigned)-1);
+ }
+
+ assert(Operands.size() == 0);
return (RegNo == (unsigned)-1);
}
@@ -1382,10 +1848,10 @@ bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
return Result;
}
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
-
- const MCExpr *IdVal = 0;
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseMemOperand(OperandVector &Operands) {
+ DEBUG(dbgs() << "parseMemOperand\n");
+ const MCExpr *IdVal = nullptr;
SMLoc S;
bool isParenExpr = false;
MipsAsmParser::OperandMatchResultTy Res = MatchOperand_NoMatch;
@@ -1403,11 +1869,11 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
const AsmToken &Tok = Parser.getTok(); // Get the next token.
if (Tok.isNot(AsmToken::LParen)) {
- MipsOperand *Mnemonic = static_cast<MipsOperand *>(Operands[0]);
- if (Mnemonic->getToken() == "la") {
+ MipsOperand &Mnemonic = static_cast<MipsOperand &>(*Operands[0]);
+ if (Mnemonic.getToken() == "la") {
SMLoc E =
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
+ Operands.push_back(MipsOperand::CreateImm(IdVal, S, E, *this));
return MatchOperand_Success;
}
if (Tok.is(AsmToken::EndOfStatement)) {
@@ -1415,8 +1881,11 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
// Zero register assumed, add a memory operand with ZERO as its base.
- Operands.push_back(MipsOperand::CreateMem(
- isMips64() ? Mips::ZERO_64 : Mips::ZERO, IdVal, S, E));
+ // "Base" will be managed by k_Memory.
+ auto Base = MipsOperand::CreateGPRReg(0, getContext().getRegisterInfo(),
+ S, E, *this);
+ Operands.push_back(
+ MipsOperand::CreateMem(std::move(Base), IdVal, S, E, *this));
return MatchOperand_Success;
}
Error(Parser.getTok().getLoc(), "'(' expected");
@@ -1426,8 +1895,7 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
Parser.Lex(); // Eat the '(' token.
}
- Res = parseRegs(Operands, isMips64() ? (int)MipsOperand::Kind_GPR64
- : (int)MipsOperand::Kind_GPR32);
+ Res = ParseAnyRegister(Operands);
if (Res != MatchOperand_Success)
return Res;
@@ -1440,13 +1908,14 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
Parser.Lex(); // Eat the ')' token.
- if (IdVal == 0)
+ if (!IdVal)
IdVal = MCConstantExpr::Create(0, getContext());
// Replace the register operand with the memory operand.
- MipsOperand *op = static_cast<MipsOperand *>(Operands.back());
- int RegNo = op->getReg();
+ std::unique_ptr<MipsOperand> op(
+ static_cast<MipsOperand *>(Operands.back().release()));
// Remove the register from the operands.
+ // "op" will be managed by k_Memory.
Operands.pop_back();
// Add the memory operand.
if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(IdVal)) {
@@ -1458,603 +1927,195 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
getContext());
}
- Operands.push_back(MipsOperand::CreateMem(RegNo, IdVal, S, E));
- delete op;
+ Operands.push_back(MipsOperand::CreateMem(std::move(op), IdVal, S, E, *this));
return MatchOperand_Success;
}
-bool MipsAsmParser::parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- int RegKind) {
- // If the first token is not '$' we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar))
- return false;
+bool MipsAsmParser::searchSymbolAlias(OperandVector &Operands) {
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex();
- AsmToken::TokenKind TkKind = getLexer().getKind();
- int Reg;
-
- if (TkKind == AsmToken::Integer) {
- Reg = matchRegisterByNumber(Parser.getTok().getIntVal(),
- regKindToRegClass(RegKind));
- if (Reg == -1)
- return false;
- } else if (TkKind == AsmToken::Identifier) {
- if ((Reg = matchCPURegisterName(Parser.getTok().getString().lower())) == -1)
+ MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier());
+ if (Sym) {
+ SMLoc S = Parser.getTok().getLoc();
+ const MCExpr *Expr;
+ if (Sym->isVariable())
+ Expr = Sym->getVariableValue();
+ else
return false;
- Reg = getReg(regKindToRegClass(RegKind), Reg);
- } else {
- return false;
+ if (Expr->getKind() == MCExpr::SymbolRef) {
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
+ const StringRef DefSymbol = Ref->getSymbol().getName();
+ if (DefSymbol.startswith("$")) {
+ OperandMatchResultTy ResTy =
+ MatchAnyRegisterNameWithoutDollar(Operands, DefSymbol.substr(1), S);
+ if (ResTy == MatchOperand_Success) {
+ Parser.Lex();
+ return true;
+ } else if (ResTy == MatchOperand_ParseFail)
+ llvm_unreachable("Should never ParseFail");
+ return false;
+ }
+ } else if (Expr->getKind() == MCExpr::Constant) {
+ Parser.Lex();
+ const MCConstantExpr *Const = static_cast<const MCConstantExpr *>(Expr);
+ Operands.push_back(
+ MipsOperand::CreateImm(Const, S, Parser.getTok().getLoc(), *this));
+ return true;
+ }
}
-
- MipsOperand *Op = MipsOperand::CreatePtrReg(Reg, S, Parser.getTok().getLoc());
- Op->setRegKind((MipsOperand::RegisterKind)RegKind);
- Operands.push_back(Op);
- Parser.Lex();
- return true;
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- MipsOperand::RegisterKind RegKind =
- isN64() ? MipsOperand::Kind_GPR64 : MipsOperand::Kind_GPR32;
-
- // Parse index register.
- if (!parsePtrReg(Operands, RegKind))
- return MatchOperand_NoMatch;
-
- // Parse '('.
- if (Parser.getTok().isNot(AsmToken::LParen))
- return MatchOperand_NoMatch;
-
- Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc()));
- Parser.Lex();
-
- // Parse base register.
- if (!parsePtrReg(Operands, RegKind))
- return MatchOperand_NoMatch;
-
- // Parse ')'.
- if (Parser.getTok().isNot(AsmToken::RParen))
- return MatchOperand_NoMatch;
-
- Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc()));
- Parser.Lex();
-
- return MatchOperand_Success;
+ return false;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- int RegKind) {
- MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- if (getLexer().getKind() == AsmToken::Identifier && !hasConsumedDollar) {
- if (searchSymbolAlias(Operands, Kind))
- return MatchOperand_Success;
- return MatchOperand_NoMatch;
- }
- SMLoc S = Parser.getTok().getLoc();
- // If the first token is not '$', we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar) && !hasConsumedDollar)
- return MatchOperand_NoMatch;
- if (!hasConsumedDollar) {
- Parser.Lex(); // Eat the '$'
- hasConsumedDollar = true;
- }
- if (getLexer().getKind() == AsmToken::Identifier) {
- int RegNum = -1;
- std::string RegName = Parser.getTok().getString().lower();
- // Match register by name
- switch (RegKind) {
- case MipsOperand::Kind_GPR32:
- case MipsOperand::Kind_GPR64:
- RegNum = matchCPURegisterName(RegName);
- break;
- case MipsOperand::Kind_AFGR64Regs:
- case MipsOperand::Kind_FGR64Regs:
- case MipsOperand::Kind_FGR32Regs:
- case MipsOperand::Kind_FGRH32Regs:
- RegNum = matchFPURegisterName(RegName);
- if (RegKind == MipsOperand::Kind_AFGR64Regs)
- RegNum /= 2;
- else if (RegKind == MipsOperand::Kind_FGRH32Regs && !isFP64())
- if (RegNum != -1 && RegNum % 2 != 0)
- Warning(S, "Float register should be even.");
- break;
- case MipsOperand::Kind_FCCRegs:
- RegNum = matchFCCRegisterName(RegName);
- break;
- case MipsOperand::Kind_ACC64DSP:
- RegNum = matchACRegisterName(RegName);
- break;
- default:
- break; // No match, value is set to -1.
- }
- // No match found, return _NoMatch to give a chance to other round.
- if (RegNum < 0)
- return MatchOperand_NoMatch;
-
- int RegVal = getReg(regKindToRegClass(Kind), RegNum);
- if (RegVal == -1)
- return MatchOperand_NoMatch;
-
- MipsOperand *Op =
- MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
- Op->setRegKind(Kind);
- Operands.push_back(Op);
- hasConsumedDollar = false;
- Parser.Lex(); // Eat the register name.
- return MatchOperand_Success;
- } else if (getLexer().getKind() == AsmToken::Integer) {
- unsigned RegNum = Parser.getTok().getIntVal();
- if (Kind == MipsOperand::Kind_HWRegs) {
- if (RegNum != 29)
- return MatchOperand_NoMatch;
- // Only hwreg 29 is supported, found at index 0.
- RegNum = 0;
- }
- int Reg = matchRegisterByNumber(RegNum, regKindToRegClass(Kind));
- if (Reg == -1)
- return MatchOperand_NoMatch;
- MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
- Op->setRegKind(Kind);
- Operands.push_back(Op);
- hasConsumedDollar = false;
- Parser.Lex(); // Eat the register number.
- if ((RegKind == MipsOperand::Kind_GPR32) &&
- (getLexer().is(AsmToken::LParen))) {
- // Check if it is indexed addressing operand.
- Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc()));
- Parser.Lex(); // Eat the parenthesis.
- if (parseRegs(Operands, RegKind) != MatchOperand_Success)
- return MatchOperand_NoMatch;
- if (getLexer().isNot(AsmToken::RParen))
- return MatchOperand_NoMatch;
- Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc()));
- Parser.Lex();
- }
+MipsAsmParser::MatchAnyRegisterNameWithoutDollar(OperandVector &Operands,
+ StringRef Identifier,
+ SMLoc S) {
+ int Index = matchCPURegisterName(Identifier);
+ if (Index != -1) {
+ Operands.push_back(MipsOperand::CreateGPRReg(
+ Index, getContext().getRegisterInfo(), S, getLexer().getLoc(), *this));
return MatchOperand_Success;
}
- return MatchOperand_NoMatch;
-}
-
-bool MipsAsmParser::validateMSAIndex(int Val, int RegKind) {
- MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- if (Val < 0)
- return false;
-
- switch (Kind) {
- default:
- return false;
- case MipsOperand::Kind_MSA128BRegs:
- return Val < 16;
- case MipsOperand::Kind_MSA128HRegs:
- return Val < 8;
- case MipsOperand::Kind_MSA128WRegs:
- return Val < 4;
- case MipsOperand::Kind_MSA128DRegs:
- return Val < 2;
+ Index = matchFPURegisterName(Identifier);
+ if (Index != -1) {
+ Operands.push_back(MipsOperand::CreateFGRReg(
+ Index, getContext().getRegisterInfo(), S, getLexer().getLoc(), *this));
+ return MatchOperand_Success;
}
-}
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseMSARegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- int RegKind) {
- MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- SMLoc S = Parser.getTok().getLoc();
- std::string RegName;
-
- if (Parser.getTok().isNot(AsmToken::Dollar))
- return MatchOperand_NoMatch;
-
- switch (RegKind) {
- default:
- return MatchOperand_ParseFail;
- case MipsOperand::Kind_MSA128BRegs:
- case MipsOperand::Kind_MSA128HRegs:
- case MipsOperand::Kind_MSA128WRegs:
- case MipsOperand::Kind_MSA128DRegs:
- break;
+ Index = matchFCCRegisterName(Identifier);
+ if (Index != -1) {
+ Operands.push_back(MipsOperand::CreateFCCReg(
+ Index, getContext().getRegisterInfo(), S, getLexer().getLoc(), *this));
+ return MatchOperand_Success;
}
- Parser.Lex(); // Eat the '$'.
- if (getLexer().getKind() == AsmToken::Identifier)
- RegName = Parser.getTok().getString().lower();
- else
- return MatchOperand_ParseFail;
-
- int RegNum = matchMSA128RegisterName(RegName);
-
- if (RegNum < 0 || RegNum > 31)
- return MatchOperand_ParseFail;
-
- int RegVal = getReg(regKindToRegClass(Kind), RegNum);
- if (RegVal == -1)
- return MatchOperand_ParseFail;
-
- MipsOperand *Op = MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
- Op->setRegKind(Kind);
- Operands.push_back(Op);
-
- Parser.Lex(); // Eat the register identifier.
-
- // MSA registers may be suffixed with an index in the form of:
- // 1) Immediate expression.
- // 2) General Purpose Register.
- // Examples:
- // 1) copy_s.b $29,$w0[0]
- // 2) sld.b $w0,$w1[$1]
-
- if (Parser.getTok().isNot(AsmToken::LBrac))
+ Index = matchACRegisterName(Identifier);
+ if (Index != -1) {
+ Operands.push_back(MipsOperand::CreateACCReg(
+ Index, getContext().getRegisterInfo(), S, getLexer().getLoc(), *this));
return MatchOperand_Success;
+ }
- MipsOperand *Mnemonic = static_cast<MipsOperand *>(Operands[0]);
-
- Operands.push_back(MipsOperand::CreateToken("[", Parser.getTok().getLoc()));
- Parser.Lex(); // Parse the '[' token.
-
- if (Parser.getTok().is(AsmToken::Dollar)) {
- // This must be a GPR.
- MipsOperand *RegOp;
- SMLoc VIdx = Parser.getTok().getLoc();
- Parser.Lex(); // Parse the '$' token.
-
- // GPR have aliases and we must account for that. Example: $30 == $fp
- if (getLexer().getKind() == AsmToken::Integer) {
- unsigned RegNum = Parser.getTok().getIntVal();
- int Reg = matchRegisterByNumber(
- RegNum, regKindToRegClass(MipsOperand::Kind_GPR32));
- if (Reg == -1) {
- Error(VIdx, "invalid general purpose register");
- return MatchOperand_ParseFail;
- }
-
- RegOp = MipsOperand::CreateReg(Reg, VIdx, Parser.getTok().getLoc());
- } else if (getLexer().getKind() == AsmToken::Identifier) {
- int RegNum = -1;
- std::string RegName = Parser.getTok().getString().lower();
-
- RegNum = matchCPURegisterName(RegName);
- if (RegNum == -1) {
- Error(VIdx, "general purpose register expected");
- return MatchOperand_ParseFail;
- }
- RegNum = getReg(regKindToRegClass(MipsOperand::Kind_GPR32), RegNum);
- RegOp = MipsOperand::CreateReg(RegNum, VIdx, Parser.getTok().getLoc());
- } else
- return MatchOperand_ParseFail;
-
- RegOp->setRegKind(MipsOperand::Kind_GPR32);
- Operands.push_back(RegOp);
- Parser.Lex(); // Eat the register identifier.
-
- if (Parser.getTok().isNot(AsmToken::RBrac))
- return MatchOperand_ParseFail;
-
- Operands.push_back(MipsOperand::CreateToken("]", Parser.getTok().getLoc()));
- Parser.Lex(); // Parse the ']' token.
-
+ Index = matchMSA128RegisterName(Identifier);
+ if (Index != -1) {
+ Operands.push_back(MipsOperand::CreateMSA128Reg(
+ Index, getContext().getRegisterInfo(), S, getLexer().getLoc(), *this));
return MatchOperand_Success;
}
- // The index must be a constant expression then.
- SMLoc VIdx = Parser.getTok().getLoc();
- const MCExpr *ImmVal;
-
- if (getParser().parseExpression(ImmVal))
- return MatchOperand_ParseFail;
-
- const MCConstantExpr *expr = dyn_cast<MCConstantExpr>(ImmVal);
- if (!expr || !validateMSAIndex((int)expr->getValue(), Kind)) {
- Error(VIdx, "invalid immediate value");
- return MatchOperand_ParseFail;
+ Index = matchMSA128CtrlRegisterName(Identifier);
+ if (Index != -1) {
+ Operands.push_back(MipsOperand::CreateMSACtrlReg(
+ Index, getContext().getRegisterInfo(), S, getLexer().getLoc(), *this));
+ return MatchOperand_Success;
}
- SMLoc E = Parser.getTok().getEndLoc();
-
- if (Parser.getTok().isNot(AsmToken::RBrac))
- return MatchOperand_ParseFail;
-
- bool insve =
- Mnemonic->getToken() == "insve.b" || Mnemonic->getToken() == "insve.h" ||
- Mnemonic->getToken() == "insve.w" || Mnemonic->getToken() == "insve.d";
-
- // The second vector index of insve instructions is always 0.
- if (insve && Operands.size() > 6) {
- if (expr->getValue() != 0) {
- Error(VIdx, "immediate value must be 0");
- return MatchOperand_ParseFail;
- }
- Operands.push_back(MipsOperand::CreateToken("0", VIdx));
- } else
- Operands.push_back(MipsOperand::CreateImm(expr, VIdx, E));
-
- Operands.push_back(MipsOperand::CreateToken("]", Parser.getTok().getLoc()));
-
- Parser.Lex(); // Parse the ']' token.
-
- return MatchOperand_Success;
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseMSACtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
- int RegKind) {
- MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
-
- if (Kind != MipsOperand::Kind_MSA128CtrlRegs)
- return MatchOperand_NoMatch;
-
- if (Parser.getTok().isNot(AsmToken::Dollar))
- return MatchOperand_ParseFail;
-
- SMLoc S = Parser.getTok().getLoc();
-
- Parser.Lex(); // Eat the '$' symbol.
-
- int RegNum = -1;
- if (getLexer().getKind() == AsmToken::Identifier)
- RegNum = matchMSA128CtrlRegisterName(Parser.getTok().getString().lower());
- else if (getLexer().getKind() == AsmToken::Integer)
- RegNum = Parser.getTok().getIntVal();
- else
- return MatchOperand_ParseFail;
-
- if (RegNum < 0 || RegNum > 7)
- return MatchOperand_ParseFail;
-
- int RegVal = getReg(regKindToRegClass(Kind), RegNum);
- if (RegVal == -1)
- return MatchOperand_ParseFail;
-
- MipsOperand *RegOp =
- MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
- RegOp->setRegKind(MipsOperand::Kind_MSA128CtrlRegs);
- Operands.push_back(RegOp);
- Parser.Lex(); // Eat the register identifier.
-
- return MatchOperand_Success;
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseGPR64(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
-
- if (!isMips64())
- return MatchOperand_NoMatch;
- return parseRegs(Operands, (int)MipsOperand::Kind_GPR64);
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseGPR32(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_GPR32);
-}
-
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseAFGR64Regs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
-
- if (isFP64())
- return MatchOperand_NoMatch;
- return parseRegs(Operands, (int)MipsOperand::Kind_AFGR64Regs);
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- if (!isFP64())
- return MatchOperand_NoMatch;
- return parseRegs(Operands, (int)MipsOperand::Kind_FGR64Regs);
+ return MatchOperand_NoMatch;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_FGR32Regs);
-}
-
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseFGRH32Regs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_FGRH32Regs);
-}
+MipsAsmParser::MatchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S) {
+ auto Token = Parser.getLexer().peekTok(false);
+
+ if (Token.is(AsmToken::Identifier)) {
+ DEBUG(dbgs() << ".. identifier\n");
+ StringRef Identifier = Token.getIdentifier();
+ OperandMatchResultTy ResTy =
+ MatchAnyRegisterNameWithoutDollar(Operands, Identifier, S);
+ return ResTy;
+ } else if (Token.is(AsmToken::Integer)) {
+ DEBUG(dbgs() << ".. integer\n");
+ Operands.push_back(MipsOperand::CreateNumericReg(
+ Token.getIntVal(), getContext().getRegisterInfo(), S, Token.getLoc(),
+ *this));
+ return MatchOperand_Success;
+ }
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_FCCRegs);
-}
+ DEBUG(dbgs() << Parser.getTok().getKind() << "\n");
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseACC64DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_ACC64DSP);
+ return MatchOperand_NoMatch;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseLO32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- // If the first token is not '$' we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar))
- return MatchOperand_NoMatch;
-
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the '$'
-
- const AsmToken &Tok = Parser.getTok(); // Get next token.
-
- if (Tok.isNot(AsmToken::Identifier))
- return MatchOperand_NoMatch;
+MipsAsmParser::ParseAnyRegister(OperandVector &Operands) {
+ DEBUG(dbgs() << "ParseAnyRegister\n");
- if (!Tok.getIdentifier().startswith("ac"))
- return MatchOperand_NoMatch;
+ auto Token = Parser.getTok();
- StringRef NumString = Tok.getIdentifier().substr(2);
+ SMLoc S = Token.getLoc();
- unsigned IntVal;
- if (NumString.getAsInteger(10, IntVal))
+ if (Token.isNot(AsmToken::Dollar)) {
+ DEBUG(dbgs() << ".. !$ -> try sym aliasing\n");
+ if (Token.is(AsmToken::Identifier)) {
+ if (searchSymbolAlias(Operands))
+ return MatchOperand_Success;
+ }
+ DEBUG(dbgs() << ".. !symalias -> NoMatch\n");
return MatchOperand_NoMatch;
+ }
+ DEBUG(dbgs() << ".. $\n");
- unsigned Reg = matchRegisterByNumber(IntVal, Mips::LO32DSPRegClassID);
-
- MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
- Op->setRegKind(MipsOperand::Kind_LO32DSP);
- Operands.push_back(Op);
-
- Parser.Lex(); // Eat the register number.
- return MatchOperand_Success;
+ OperandMatchResultTy ResTy = MatchAnyRegisterWithoutDollar(Operands, S);
+ if (ResTy == MatchOperand_Success) {
+ Parser.Lex(); // $
+ Parser.Lex(); // identifier
+ }
+ return ResTy;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseHI32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- // If the first token is not '$' we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar))
+MipsAsmParser::ParseImm(OperandVector &Operands) {
+ switch (getLexer().getKind()) {
+ default:
return MatchOperand_NoMatch;
+ case AsmToken::LParen:
+ case AsmToken::Minus:
+ case AsmToken::Plus:
+ case AsmToken::Integer:
+ case AsmToken::Tilde:
+ case AsmToken::String:
+ break;
+ }
+ const MCExpr *IdVal;
SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the '$'
-
- const AsmToken &Tok = Parser.getTok(); // Get next token.
-
- if (Tok.isNot(AsmToken::Identifier))
- return MatchOperand_NoMatch;
-
- if (!Tok.getIdentifier().startswith("ac"))
- return MatchOperand_NoMatch;
-
- StringRef NumString = Tok.getIdentifier().substr(2);
-
- unsigned IntVal;
- if (NumString.getAsInteger(10, IntVal))
- return MatchOperand_NoMatch;
-
- unsigned Reg = matchRegisterByNumber(IntVal, Mips::HI32DSPRegClassID);
-
- MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
- Op->setRegKind(MipsOperand::Kind_HI32DSP);
- Operands.push_back(Op);
+ if (getParser().parseExpression(IdVal))
+ return MatchOperand_ParseFail;
- Parser.Lex(); // Eat the register number.
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(MipsOperand::CreateImm(IdVal, S, E, *this));
return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCOP2(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- // If the first token is not '$' we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar))
- return MatchOperand_NoMatch;
-
- SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the '$'
-
- const AsmToken &Tok = Parser.getTok(); // Get next token.
-
- if (Tok.isNot(AsmToken::Integer))
- return MatchOperand_NoMatch;
-
- unsigned IntVal = Tok.getIntVal();
-
- unsigned Reg = matchRegisterByNumber(IntVal, Mips::COP2RegClassID);
-
- MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
- Op->setRegKind(MipsOperand::Kind_COP2);
- Operands.push_back(Op);
+MipsAsmParser::ParseJumpTarget(OperandVector &Operands) {
+ DEBUG(dbgs() << "ParseJumpTarget\n");
- Parser.Lex(); // Eat the register number.
- return MatchOperand_Success;
-}
-
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128BRegs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128BRegs);
-}
-
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128HRegs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128HRegs);
-}
-
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128WRegs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128WRegs);
-}
-
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128DRegs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128DRegs);
-}
+ SMLoc S = getLexer().getLoc();
-MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128CtrlRegs(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseMSACtrlRegs(Operands, (int)MipsOperand::Kind_MSA128CtrlRegs);
-}
+ // Integers and expressions are acceptable
+ OperandMatchResultTy ResTy = ParseImm(Operands);
+ if (ResTy != MatchOperand_NoMatch)
+ return ResTy;
-bool MipsAsmParser::searchSymbolAlias(
- SmallVectorImpl<MCParsedAsmOperand *> &Operands, unsigned RegKind) {
+ // Registers are a valid target and have priority over symbols.
+ ResTy = ParseAnyRegister(Operands);
+ if (ResTy != MatchOperand_NoMatch)
+ return ResTy;
- MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier());
- if (Sym) {
- SMLoc S = Parser.getTok().getLoc();
- const MCExpr *Expr;
- if (Sym->isVariable())
- Expr = Sym->getVariableValue();
- else
- return false;
- if (Expr->getKind() == MCExpr::SymbolRef) {
- MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
- const StringRef DefSymbol = Ref->getSymbol().getName();
- if (DefSymbol.startswith("$")) {
- int RegNum = -1;
- APInt IntVal(32, -1);
- if (!DefSymbol.substr(1).getAsInteger(10, IntVal))
- RegNum = matchRegisterByNumber(IntVal.getZExtValue(),
- isMips64() ? Mips::GPR64RegClassID
- : Mips::GPR32RegClassID);
- else {
- // Lookup for the register with the corresponding name.
- switch (Kind) {
- case MipsOperand::Kind_AFGR64Regs:
- case MipsOperand::Kind_FGR64Regs:
- RegNum = matchFPURegisterName(DefSymbol.substr(1));
- break;
- case MipsOperand::Kind_FGR32Regs:
- RegNum = matchFPURegisterName(DefSymbol.substr(1));
- break;
- case MipsOperand::Kind_GPR64:
- case MipsOperand::Kind_GPR32:
- default:
- RegNum = matchCPURegisterName(DefSymbol.substr(1));
- break;
- }
- if (RegNum > -1)
- RegNum = getReg(regKindToRegClass(Kind), RegNum);
- }
- if (RegNum > -1) {
- Parser.Lex();
- MipsOperand *op =
- MipsOperand::CreateReg(RegNum, S, Parser.getTok().getLoc());
- op->setRegKind(Kind);
- Operands.push_back(op);
- return true;
- }
- }
- } else if (Expr->getKind() == MCExpr::Constant) {
- Parser.Lex();
- const MCConstantExpr *Const = static_cast<const MCConstantExpr *>(Expr);
- MipsOperand *op =
- MipsOperand::CreateImm(Const, S, Parser.getTok().getLoc());
- Operands.push_back(op);
- return true;
- }
+ const MCExpr *Expr = nullptr;
+ if (Parser.parseExpression(Expr)) {
+ // We have no way of knowing if a symbol was consumed so we must ParseFail
+ return MatchOperand_ParseFail;
}
- return false;
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_HWRegs);
-}
-
-MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
- return parseRegs(Operands, (int)MipsOperand::Kind_CCRRegs);
+ Operands.push_back(
+ MipsOperand::CreateImm(Expr, S, getLexer().getLoc(), *this));
+ return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+MipsAsmParser::parseInvNum(OperandVector &Operands) {
const MCExpr *IdVal;
// If the first token is '$' we may have register operand.
if (Parser.getTok().is(AsmToken::Dollar))
@@ -2067,12 +2128,12 @@ MipsAsmParser::parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
int64_t Val = MCE->getValue();
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(
- MCConstantExpr::Create(0 - Val, getContext()), S, E));
+ MCConstantExpr::Create(0 - Val, getContext()), S, E, *this));
return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+MipsAsmParser::ParseLSAImm(OperandVector &Operands) {
switch (getLexer().getKind()) {
default:
return MatchOperand_NoMatch;
@@ -2105,8 +2166,8 @@ MipsAsmParser::parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
return MatchOperand_ParseFail;
}
- Operands.push_back(MipsOperand::CreateLSAImm(Expr, S,
- Parser.getTok().getLoc()));
+ Operands.push_back(
+ MipsOperand::CreateImm(Expr, S, Parser.getTok().getLoc(), *this));
return MatchOperand_Success;
}
@@ -2131,21 +2192,90 @@ MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
.Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
.Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
.Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
+ .Case("got_hi", MCSymbolRefExpr::VK_Mips_GOT_HI16)
+ .Case("got_lo", MCSymbolRefExpr::VK_Mips_GOT_LO16)
+ .Case("call_hi", MCSymbolRefExpr::VK_Mips_CALL_HI16)
+ .Case("call_lo", MCSymbolRefExpr::VK_Mips_CALL_LO16)
+ .Case("higher", MCSymbolRefExpr::VK_Mips_HIGHER)
+ .Case("highest", MCSymbolRefExpr::VK_Mips_HIGHEST)
+ .Case("pcrel_hi", MCSymbolRefExpr::VK_Mips_PCREL_HI16)
+ .Case("pcrel_lo", MCSymbolRefExpr::VK_Mips_PCREL_LO16)
.Default(MCSymbolRefExpr::VK_None);
+ assert(VK != MCSymbolRefExpr::VK_None);
+
return VK;
}
-bool MipsAsmParser::ParseInstruction(
- ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+/// Sometimes (i.e. load/stores) the operand may be followed immediately by
+/// either this.
+/// ::= '(', register, ')'
+/// handle it before we iterate so we don't get tripped up by the lack of
+/// a comma.
+bool MipsAsmParser::ParseParenSuffix(StringRef Name, OperandVector &Operands) {
+ if (getLexer().is(AsmToken::LParen)) {
+ Operands.push_back(
+ MipsOperand::CreateToken("(", getLexer().getLoc(), *this));
+ Parser.Lex();
+ if (ParseOperand(Operands, Name)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+ if (Parser.getTok().isNot(AsmToken::RParen)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "unexpected token, expected ')'");
+ }
+ Operands.push_back(
+ MipsOperand::CreateToken(")", getLexer().getLoc(), *this));
+ Parser.Lex();
+ }
+ return false;
+}
+
+/// Sometimes (i.e. in MSA) the operand may be followed immediately by
+/// either one of these.
+/// ::= '[', register, ']'
+/// ::= '[', integer, ']'
+/// handle it before we iterate so we don't get tripped up by the lack of
+/// a comma.
+bool MipsAsmParser::ParseBracketSuffix(StringRef Name,
+ OperandVector &Operands) {
+ if (getLexer().is(AsmToken::LBrac)) {
+ Operands.push_back(
+ MipsOperand::CreateToken("[", getLexer().getLoc(), *this));
+ Parser.Lex();
+ if (ParseOperand(Operands, Name)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+ if (Parser.getTok().isNot(AsmToken::RBrac)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "unexpected token, expected ']'");
+ }
+ Operands.push_back(
+ MipsOperand::CreateToken("]", getLexer().getLoc(), *this));
+ Parser.Lex();
+ }
+ return false;
+}
+
+bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) {
+ DEBUG(dbgs() << "ParseInstruction\n");
+ // We have reached first instruction, module directive after
+ // this is forbidden.
+ getTargetStreamer().setCanHaveModuleDir(false);
// Check if we have valid mnemonic
if (!mnemonicIsValid(Name, 0)) {
Parser.eatToEndOfStatement();
return Error(NameLoc, "Unknown instruction");
}
// First operand in MCInst is instruction mnemonic.
- Operands.push_back(MipsOperand::CreateToken(Name, NameLoc));
+ Operands.push_back(MipsOperand::CreateToken(Name, NameLoc, *this));
// Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
@@ -2155,6 +2285,9 @@ bool MipsAsmParser::ParseInstruction(
Parser.eatToEndOfStatement();
return Error(Loc, "unexpected token in argument list");
}
+ if (getLexer().is(AsmToken::LBrac) && ParseBracketSuffix(Name, Operands))
+ return true;
+ // AFAIK, parenthesis suffixes are never on the first operand
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
@@ -2164,6 +2297,13 @@ bool MipsAsmParser::ParseInstruction(
Parser.eatToEndOfStatement();
return Error(Loc, "unexpected token in argument list");
}
+ // Parse bracket and parenthesis suffixes before we iterate
+ if (getLexer().is(AsmToken::LBrac)) {
+ if (ParseBracketSuffix(Name, Operands))
+ return true;
+ } else if (getLexer().is(AsmToken::LParen) &&
+ ParseParenSuffix(Name, Operands))
+ return true;
}
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
@@ -2175,12 +2315,16 @@ bool MipsAsmParser::ParseInstruction(
return false;
}
-bool MipsAsmParser::reportParseError(StringRef ErrorMsg) {
+bool MipsAsmParser::reportParseError(Twine ErrorMsg) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
return Error(Loc, ErrorMsg);
}
+bool MipsAsmParser::reportParseError(SMLoc Loc, Twine ErrorMsg) {
+ return Error(Loc, ErrorMsg);
+}
+
bool MipsAsmParser::parseSetNoAtDirective() {
// Line should look like: ".set noat".
// set at reg to 0.
@@ -2222,7 +2366,7 @@ bool MipsAsmParser::parseSetAtDirective() {
return false;
}
- if (AtRegNo < 1 || AtRegNo > 31) {
+ if (AtRegNo < 0 || AtRegNo > 31) {
reportParseError("unexpected token in statement");
return false;
}
@@ -2253,6 +2397,7 @@ bool MipsAsmParser::parseSetReorderDirective() {
return false;
}
Options.setReorder();
+ getTargetStreamer().emitDirectiveSetReorder();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
@@ -2265,6 +2410,7 @@ bool MipsAsmParser::parseSetNoReorderDirective() {
return false;
}
Options.setNoreorder();
+ getTargetStreamer().emitDirectiveSetNoReorder();
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
@@ -2297,6 +2443,44 @@ bool MipsAsmParser::parseSetNoMacroDirective() {
return false;
}
+bool MipsAsmParser::parseSetNoMips16Directive() {
+ Parser.Lex();
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ // For now do nothing.
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
+bool MipsAsmParser::parseSetFpDirective() {
+ MipsABIFlagsSection::FpABIKind FpAbiVal;
+ // Line can be: .set fp=32
+ // .set fp=xx
+ // .set fp=64
+ Parser.Lex(); // Eat fp token
+ AsmToken Tok = Parser.getTok();
+ if (Tok.isNot(AsmToken::Equal)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Parser.Lex(); // Eat '=' token.
+ Tok = Parser.getTok();
+
+ if (!parseFpABIValue(FpAbiVal, ".set"))
+ return false;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ getTargetStreamer().emitDirectiveSetFp(FpAbiVal);
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
bool MipsAsmParser::parseSetAssignment() {
StringRef Name;
const MCExpr *Value;
@@ -2308,22 +2492,7 @@ bool MipsAsmParser::parseSetAssignment() {
return reportParseError("unexpected token in .set directive");
Lex(); // Eat comma
- if (getLexer().is(AsmToken::Dollar)) {
- MCSymbol *Symbol;
- SMLoc DollarLoc = getLexer().getLoc();
- // Consume the dollar sign, and check for a following identifier.
- Parser.Lex();
- // We have a '$' followed by something, make sure they are adjacent.
- if (DollarLoc.getPointer() + 1 != getTok().getLoc().getPointer())
- return true;
- StringRef Res =
- StringRef(DollarLoc.getPointer(),
- getTok().getEndLoc().getPointer() - DollarLoc.getPointer());
- Symbol = getContext().GetOrCreateSymbol(Res);
- Parser.Lex();
- Value =
- MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, getContext());
- } else if (Parser.parseExpression(Value))
+ if (Parser.parseExpression(Value))
return reportParseError("expected valid expression after comma");
// Check if the Name already exists as a symbol.
@@ -2336,6 +2505,154 @@ bool MipsAsmParser::parseSetAssignment() {
return false;
}
+bool MipsAsmParser::parseSetFeature(uint64_t Feature) {
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return reportParseError("unexpected token in .set directive");
+
+ switch (Feature) {
+ default:
+ llvm_unreachable("Unimplemented feature");
+ case Mips::FeatureDSP:
+ setFeatureBits(Mips::FeatureDSP, "dsp");
+ getTargetStreamer().emitDirectiveSetDsp();
+ break;
+ case Mips::FeatureMicroMips:
+ getTargetStreamer().emitDirectiveSetMicroMips();
+ break;
+ case Mips::FeatureMips16:
+ getTargetStreamer().emitDirectiveSetMips16();
+ break;
+ case Mips::FeatureMips32r2:
+ setFeatureBits(Mips::FeatureMips32r2, "mips32r2");
+ getTargetStreamer().emitDirectiveSetMips32R2();
+ break;
+ case Mips::FeatureMips64:
+ setFeatureBits(Mips::FeatureMips64, "mips64");
+ getTargetStreamer().emitDirectiveSetMips64();
+ break;
+ case Mips::FeatureMips64r2:
+ setFeatureBits(Mips::FeatureMips64r2, "mips64r2");
+ getTargetStreamer().emitDirectiveSetMips64R2();
+ break;
+ }
+ return false;
+}
+
+bool MipsAsmParser::eatComma(StringRef ErrorStr) {
+ if (getLexer().isNot(AsmToken::Comma)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, ErrorStr);
+ }
+
+ Parser.Lex(); // Eat the comma.
+ return true;
+}
+
+bool MipsAsmParser::parseDirectiveCPLoad(SMLoc Loc) {
+ if (Options.isReorder())
+ Warning(Loc, ".cpload in reorder section");
+
+ // FIXME: Warn if cpload is used in Mips16 mode.
+
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Reg;
+ OperandMatchResultTy ResTy = ParseAnyRegister(Reg);
+ if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
+ reportParseError("expected register containing function address");
+ return false;
+ }
+
+ MipsOperand &RegOpnd = static_cast<MipsOperand &>(*Reg[0]);
+ if (!RegOpnd.isGPRAsmReg()) {
+ reportParseError(RegOpnd.getStartLoc(), "invalid register");
+ return false;
+ }
+
+ getTargetStreamer().emitDirectiveCpload(RegOpnd.getGPR32Reg());
+ return false;
+}
+
+bool MipsAsmParser::parseDirectiveCPSetup() {
+ unsigned FuncReg;
+ unsigned Save;
+ bool SaveIsReg = true;
+
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> TmpReg;
+ OperandMatchResultTy ResTy = ParseAnyRegister(TmpReg);
+ if (ResTy == MatchOperand_NoMatch) {
+ reportParseError("expected register containing function address");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ MipsOperand &FuncRegOpnd = static_cast<MipsOperand &>(*TmpReg[0]);
+ if (!FuncRegOpnd.isGPRAsmReg()) {
+ reportParseError(FuncRegOpnd.getStartLoc(), "invalid register");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ FuncReg = FuncRegOpnd.getGPR32Reg();
+ TmpReg.clear();
+
+ if (!eatComma("expected comma parsing directive"))
+ return true;
+
+ ResTy = ParseAnyRegister(TmpReg);
+ if (ResTy == MatchOperand_NoMatch) {
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.is(AsmToken::Integer)) {
+ Save = Tok.getIntVal();
+ SaveIsReg = false;
+ Parser.Lex();
+ } else {
+ reportParseError("expected save register or stack offset");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ } else {
+ MipsOperand &SaveOpnd = static_cast<MipsOperand &>(*TmpReg[0]);
+ if (!SaveOpnd.isGPRAsmReg()) {
+ reportParseError(SaveOpnd.getStartLoc(), "invalid register");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ Save = SaveOpnd.getGPR32Reg();
+ }
+
+ if (!eatComma("expected comma parsing directive"))
+ return true;
+
+ StringRef Name;
+ if (Parser.parseIdentifier(Name))
+ reportParseError("expected identifier");
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ getTargetStreamer().emitDirectiveCpsetup(FuncReg, Save, *Sym, SaveIsReg);
+ return false;
+}
+
+bool MipsAsmParser::parseDirectiveNaN() {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ const AsmToken &Tok = Parser.getTok();
+
+ if (Tok.getString() == "2008") {
+ Parser.Lex();
+ getTargetStreamer().emitDirectiveNaN2008();
+ return false;
+ } else if (Tok.getString() == "legacy") {
+ Parser.Lex();
+ getTargetStreamer().emitDirectiveNaNLegacy();
+ return false;
+ }
+ }
+ // If we don't recognize the option passed to the .nan
+ // directive (e.g. no option or unknown option), emit an error.
+ reportParseError("invalid option in .nan directive");
+ return false;
+}
+
bool MipsAsmParser::parseDirectiveSet() {
// Get the next token.
@@ -2345,6 +2662,8 @@ bool MipsAsmParser::parseDirectiveSet() {
return parseSetNoAtDirective();
} else if (Tok.getString() == "at") {
return parseSetAtDirective();
+ } else if (Tok.getString() == "fp") {
+ return parseSetFpDirective();
} else if (Tok.getString() == "reorder") {
return parseSetReorderDirective();
} else if (Tok.getString() == "noreorder") {
@@ -2353,14 +2672,24 @@ bool MipsAsmParser::parseDirectiveSet() {
return parseSetMacroDirective();
} else if (Tok.getString() == "nomacro") {
return parseSetNoMacroDirective();
+ } else if (Tok.getString() == "mips16") {
+ return parseSetFeature(Mips::FeatureMips16);
} else if (Tok.getString() == "nomips16") {
- // Ignore this directive for now.
- Parser.eatToEndOfStatement();
- return false;
+ return parseSetNoMips16Directive();
} else if (Tok.getString() == "nomicromips") {
- // Ignore this directive for now.
+ getTargetStreamer().emitDirectiveSetNoMicroMips();
Parser.eatToEndOfStatement();
return false;
+ } else if (Tok.getString() == "micromips") {
+ return parseSetFeature(Mips::FeatureMicroMips);
+ } else if (Tok.getString() == "mips32r2") {
+ return parseSetFeature(Mips::FeatureMips32r2);
+ } else if (Tok.getString() == "mips64") {
+ return parseSetFeature(Mips::FeatureMips64);
+ } else if (Tok.getString() == "mips64r2") {
+ return parseSetFeature(Mips::FeatureMips64r2);
+ } else if (Tok.getString() == "dsp") {
+ return parseSetFeature(Mips::FeatureDSP);
} else {
// It is just an identifier, look for an assignment.
parseSetAssignment();
@@ -2370,37 +2699,9 @@ bool MipsAsmParser::parseDirectiveSet() {
return true;
}
-bool MipsAsmParser::parseDirectiveMipsHackStocg() {
- MCAsmParser &Parser = getParser();
- StringRef Name;
- if (Parser.parseIdentifier(Name))
- reportParseError("expected identifier");
-
- MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- if (getLexer().isNot(AsmToken::Comma))
- return TokError("unexpected token");
- Lex();
-
- int64_t Flags = 0;
- if (Parser.parseAbsoluteExpression(Flags))
- return TokError("unexpected token");
-
- getTargetStreamer().emitMipsHackSTOCG(Sym, Flags);
- return false;
-}
-
-bool MipsAsmParser::parseDirectiveMipsHackELFFlags() {
- int64_t Flags = 0;
- if (Parser.parseAbsoluteExpression(Flags))
- return TokError("unexpected token");
-
- getTargetStreamer().emitMipsHackELFFlags(Flags);
- return false;
-}
-
-/// parseDirectiveWord
+/// parseDataDirective
/// ::= .word [ expression (, expression)* ]
-bool MipsAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
+bool MipsAsmParser::parseDataDirective(unsigned Size, SMLoc L) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
@@ -2439,10 +2740,200 @@ bool MipsAsmParser::parseDirectiveGpWord() {
return false;
}
-bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
+/// parseDirectiveGpDWord
+/// ::= .gpdword local_sym
+bool MipsAsmParser::parseDirectiveGpDWord() {
+ const MCExpr *Value;
+ // EmitGPRel64Value requires an expression, so we are using base class
+ // method to evaluate the expression.
+ if (getParser().parseExpression(Value))
+ return true;
+ getParser().getStreamer().EmitGPRel64Value(Value);
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return Error(getLexer().getLoc(), "unexpected token in directive");
+ Parser.Lex(); // Eat EndOfStatement token.
+ return false;
+}
+
+bool MipsAsmParser::parseDirectiveOption() {
+ // Get the option token.
+ AsmToken Tok = Parser.getTok();
+ // At the moment only identifiers are supported.
+ if (Tok.isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(), "unexpected token in .option directive");
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ StringRef Option = Tok.getIdentifier();
+
+ if (Option == "pic0") {
+ getTargetStreamer().emitDirectiveOptionPic0();
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(),
+ "unexpected token in .option pic0 directive");
+ Parser.eatToEndOfStatement();
+ }
+ return false;
+ }
+
+ if (Option == "pic2") {
+ getTargetStreamer().emitDirectiveOptionPic2();
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(),
+ "unexpected token in .option pic2 directive");
+ Parser.eatToEndOfStatement();
+ }
+ return false;
+ }
+
+ // Unknown option.
+ Warning(Parser.getTok().getLoc(), "unknown option in .option directive");
+ Parser.eatToEndOfStatement();
+ return false;
+}
+
+/// parseDirectiveModule
+/// ::= .module oddspreg
+/// ::= .module nooddspreg
+/// ::= .module fp=value
+bool MipsAsmParser::parseDirectiveModule() {
+ MCAsmLexer &Lexer = getLexer();
+ SMLoc L = Lexer.getLoc();
+
+ if (!getTargetStreamer().getCanHaveModuleDir()) {
+ // TODO : get a better message.
+ reportParseError(".module directive must appear before any code");
+ return false;
+ }
+
+ if (Lexer.is(AsmToken::Identifier)) {
+ StringRef Option = Parser.getTok().getString();
+ Parser.Lex();
+
+ if (Option == "oddspreg") {
+ getTargetStreamer().emitDirectiveModuleOddSPReg(true, isABI_O32());
+ clearFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("Expected end of statement");
+ return false;
+ }
+
+ return false;
+ } else if (Option == "nooddspreg") {
+ if (!isABI_O32()) {
+ Error(L, "'.module nooddspreg' requires the O32 ABI");
+ return false;
+ }
+
+ getTargetStreamer().emitDirectiveModuleOddSPReg(false, isABI_O32());
+ setFeatureBits(Mips::FeatureNoOddSPReg, "nooddspreg");
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("Expected end of statement");
+ return false;
+ }
+
+ return false;
+ } else if (Option == "fp") {
+ return parseDirectiveModuleFP();
+ }
+
+ return Error(L, "'" + Twine(Option) + "' is not a valid .module option.");
+ }
+
+ return false;
+}
+
+/// parseDirectiveModuleFP
+/// ::= =32
+/// ::= =xx
+/// ::= =64
+bool MipsAsmParser::parseDirectiveModuleFP() {
+ MCAsmLexer &Lexer = getLexer();
+
+ if (Lexer.isNot(AsmToken::Equal)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+ Parser.Lex(); // Eat '=' token.
+
+ MipsABIFlagsSection::FpABIKind FpABI;
+ if (!parseFpABIValue(FpABI, ".module"))
+ return false;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token in statement");
+ return false;
+ }
+
+ // Emit appropriate flags.
+ getTargetStreamer().emitDirectiveModuleFP(FpABI, isABI_O32());
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
+bool MipsAsmParser::parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI,
+ StringRef Directive) {
+ MCAsmLexer &Lexer = getLexer();
+
+ if (Lexer.is(AsmToken::Identifier)) {
+ StringRef Value = Parser.getTok().getString();
+ Parser.Lex();
+
+ if (Value != "xx") {
+ reportParseError("unsupported value, expected 'xx', '32' or '64'");
+ return false;
+ }
+
+ if (!isABI_O32()) {
+ reportParseError("'" + Directive + " fp=xx' requires the O32 ABI");
+ return false;
+ }
+
+ FpABI = MipsABIFlagsSection::FpABIKind::XX;
+ return true;
+ }
+
+ if (Lexer.is(AsmToken::Integer)) {
+ unsigned Value = Parser.getTok().getIntVal();
+ Parser.Lex();
+
+ if (Value != 32 && Value != 64) {
+ reportParseError("unsupported value, expected 'xx', '32' or '64'");
+ return false;
+ }
+
+ if (Value == 32) {
+ if (!isABI_O32()) {
+ reportParseError("'" + Directive + " fp=32' requires the O32 ABI");
+ return false;
+ }
+
+ FpABI = MipsABIFlagsSection::FpABIKind::S32;
+ } else
+ FpABI = MipsABIFlagsSection::FpABIKind::S64;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getString();
+ if (IDVal == ".cpload")
+ return parseDirectiveCPLoad(DirectiveID.getLoc());
+ if (IDVal == ".dword") {
+ parseDataDirective(8, DirectiveID.getLoc());
+ return false;
+ }
+
if (IDVal == ".ent") {
// Ignore this directive for now.
Parser.Lex();
@@ -2477,22 +2968,42 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
}
+ if (IDVal == ".nan")
+ return parseDirectiveNaN();
+
if (IDVal == ".gpword") {
- // Ignore this directive for now.
parseDirectiveGpWord();
return false;
}
+ if (IDVal == ".gpdword") {
+ parseDirectiveGpDWord();
+ return false;
+ }
+
if (IDVal == ".word") {
- parseDirectiveWord(4, DirectiveID.getLoc());
+ parseDataDirective(4, DirectiveID.getLoc());
+ return false;
+ }
+
+ if (IDVal == ".option")
+ return parseDirectiveOption();
+
+ if (IDVal == ".abicalls") {
+ getTargetStreamer().emitDirectiveAbiCalls();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
+ Error(Parser.getTok().getLoc(), "unexpected token in directive");
+ // Clear line
+ Parser.eatToEndOfStatement();
+ }
return false;
}
- if (IDVal == ".mips_hack_stocg")
- return parseDirectiveMipsHackStocg();
+ if (IDVal == ".cpsetup")
+ return parseDirectiveCPSetup();
- if (IDVal == ".mips_hack_elf_flags")
- return parseDirectiveMipsHackELFFlags();
+ if (IDVal == ".module")
+ return parseDirectiveModule();
return true;
}
diff --git a/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 60508a8..f35a8de 100644
--- a/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/contrib/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -14,6 +14,7 @@
#include "Mips.h"
#include "MipsRegisterInfo.h"
#include "MipsSubtarget.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
@@ -24,6 +25,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mips-disassembler"
+
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
@@ -33,19 +36,16 @@ class MipsDisassemblerBase : public MCDisassembler {
public:
/// Constructor - Initializes the disassembler.
///
- MipsDisassemblerBase(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
+ MipsDisassemblerBase(const MCSubtargetInfo &STI, MCContext &Ctx,
bool bigEndian) :
- MCDisassembler(STI), RegInfo(Info),
+ MCDisassembler(STI, Ctx),
IsN64(STI.getFeatureBits() & Mips::FeatureN64), isBigEndian(bigEndian) {}
virtual ~MipsDisassemblerBase() {}
- const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
-
bool isN64() const { return IsN64; }
private:
- OwningPtr<const MCRegisterInfo> RegInfo;
bool IsN64;
protected:
bool isBigEndian;
@@ -57,19 +57,31 @@ class MipsDisassembler : public MipsDisassemblerBase {
public:
/// Constructor - Initializes the disassembler.
///
- MipsDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
- bool bigEndian) :
- MipsDisassemblerBase(STI, Info, bigEndian) {
- IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
- }
+ MipsDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, bool bigEndian)
+ : MipsDisassemblerBase(STI, Ctx, bigEndian) {
+ IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
+ }
+
+ bool hasMips3() const { return STI.getFeatureBits() & Mips::FeatureMips3; }
+ bool hasMips32() const { return STI.getFeatureBits() & Mips::FeatureMips32; }
+ bool hasMips32r6() const {
+ return STI.getFeatureBits() & Mips::FeatureMips32r6;
+ }
+
+ bool isGP64() const { return STI.getFeatureBits() & Mips::FeatureGP64Bit; }
+
+ bool hasCOP3() const {
+ // Only present in MIPS-I and MIPS-II
+ return !hasMips32() && !hasMips3();
+ }
/// getInstruction - See MCDisassembler.
- virtual DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
+ DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
};
@@ -78,17 +90,17 @@ class Mips64Disassembler : public MipsDisassemblerBase {
public:
/// Constructor - Initializes the disassembler.
///
- Mips64Disassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
+ Mips64Disassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
bool bigEndian) :
- MipsDisassemblerBase(STI, Info, bigEndian) {}
+ MipsDisassemblerBase(STI, Ctx, bigEndian) {}
/// getInstruction - See MCDisassembler.
- virtual DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
+ DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
};
} // end anonymous namespace
@@ -130,11 +142,6 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeFGRH32RegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
-
static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -145,6 +152,10 @@ static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFGRCCRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -195,6 +206,11 @@ static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeCOP2RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeBranchTarget(MCInst &Inst,
unsigned Offset,
uint64_t Address,
@@ -205,6 +221,16 @@ static DecodeStatus DecodeJumpTarget(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeBranchTarget21(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
// DecodeBranchTargetMM - Decode microMIPS branch offset, which is
// shifted left by 1 bit.
static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
@@ -241,6 +267,11 @@ static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeSimm16(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -263,6 +294,48 @@ static DecodeStatus DecodeExtSize(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeSimm19Lsl2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus DecodeSimm18Lsl3(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+/// INSVE_[BHWD] have an implicit operand that the generated decoder doesn't
+/// handle.
+template <typename InsnType>
+static DecodeStatus DecodeINSVE_DF(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeAddiGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeDaddiGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeBlezlGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeBgtzlGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeBgtzGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeBlezGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
namespace llvm {
extern Target TheMipselTarget, TheMipsTarget, TheMips64Target,
TheMips64elTarget;
@@ -270,26 +343,30 @@ extern Target TheMipselTarget, TheMipsTarget, TheMips64Target,
static MCDisassembler *createMipsDisassembler(
const Target &T,
- const MCSubtargetInfo &STI) {
- return new MipsDisassembler(STI, T.createMCRegInfo(""), true);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new MipsDisassembler(STI, Ctx, true);
}
static MCDisassembler *createMipselDisassembler(
const Target &T,
- const MCSubtargetInfo &STI) {
- return new MipsDisassembler(STI, T.createMCRegInfo(""), false);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new MipsDisassembler(STI, Ctx, false);
}
static MCDisassembler *createMips64Disassembler(
const Target &T,
- const MCSubtargetInfo &STI) {
- return new Mips64Disassembler(STI, T.createMCRegInfo(""), true);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new Mips64Disassembler(STI, Ctx, true);
}
static MCDisassembler *createMips64elDisassembler(
const Target &T,
- const MCSubtargetInfo &STI) {
- return new Mips64Disassembler(STI, T.createMCRegInfo(""), false);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new Mips64Disassembler(STI, Ctx, false);
}
extern "C" void LLVMInitializeMipsDisassembler() {
@@ -304,9 +381,316 @@ extern "C" void LLVMInitializeMipsDisassembler() {
createMips64elDisassembler);
}
-
#include "MipsGenDisassemblerTables.inc"
+static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
+ const MipsDisassemblerBase *Dis = static_cast<const MipsDisassemblerBase*>(D);
+ const MCRegisterInfo *RegInfo = Dis->getContext().getRegisterInfo();
+ return *(RegInfo->getRegClass(RC).begin() + RegNo);
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeINSVE_DF(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder) {
+ typedef DecodeStatus (*DecodeFN)(MCInst &, unsigned, uint64_t, const void *);
+ // The size of the n field depends on the element size
+ // The register class also depends on this.
+ InsnType tmp = fieldFromInstruction(insn, 17, 5);
+ unsigned NSize = 0;
+ DecodeFN RegDecoder = nullptr;
+ if ((tmp & 0x18) == 0x00) { // INSVE_B
+ NSize = 4;
+ RegDecoder = DecodeMSA128BRegisterClass;
+ } else if ((tmp & 0x1c) == 0x10) { // INSVE_H
+ NSize = 3;
+ RegDecoder = DecodeMSA128HRegisterClass;
+ } else if ((tmp & 0x1e) == 0x18) { // INSVE_W
+ NSize = 2;
+ RegDecoder = DecodeMSA128WRegisterClass;
+ } else if ((tmp & 0x1f) == 0x1c) { // INSVE_D
+ NSize = 1;
+ RegDecoder = DecodeMSA128DRegisterClass;
+ } else
+ llvm_unreachable("Invalid encoding");
+
+ assert(NSize != 0 && RegDecoder != nullptr);
+
+ // $wd
+ tmp = fieldFromInstruction(insn, 6, 5);
+ if (RegDecoder(MI, tmp, Address, Decoder) == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+ // $wd_in
+ if (RegDecoder(MI, tmp, Address, Decoder) == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+ // $n
+ tmp = fieldFromInstruction(insn, 16, NSize);
+ MI.addOperand(MCOperand::CreateImm(tmp));
+ // $ws
+ tmp = fieldFromInstruction(insn, 11, 5);
+ if (RegDecoder(MI, tmp, Address, Decoder) == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+ // $n2
+ MI.addOperand(MCOperand::CreateImm(0));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeAddiGroupBranch(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // If we are called then we can assume that MIPS32r6/MIPS64r6 is enabled
+ // (otherwise we would have matched the ADDI instruction from the earlier
+ // ISA's instead).
+ //
+ // We have:
+ // 0b001000 sssss ttttt iiiiiiiiiiiiiiii
+ // BOVC if rs >= rt
+ // BEQZALC if rs == 0 && rt != 0
+ // BEQC if rs < rt && rs != 0
+
+ InsnType Rs = fieldFromInstruction(insn, 21, 5);
+ InsnType Rt = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
+ bool HasRs = false;
+
+ if (Rs >= Rt) {
+ MI.setOpcode(Mips::BOVC);
+ HasRs = true;
+ } else if (Rs != 0 && Rs < Rt) {
+ MI.setOpcode(Mips::BEQC);
+ HasRs = true;
+ } else
+ MI.setOpcode(Mips::BEQZALC);
+
+ if (HasRs)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ MI.addOperand(MCOperand::CreateImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeDaddiGroupBranch(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // If we are called then we can assume that MIPS32r6/MIPS64r6 is enabled
+ // (otherwise we would have matched the ADDI instruction from the earlier
+ // ISA's instead).
+ //
+ // We have:
+ // 0b011000 sssss ttttt iiiiiiiiiiiiiiii
+ // BNVC if rs >= rt
+ // BNEZALC if rs == 0 && rt != 0
+ // BNEC if rs < rt && rs != 0
+
+ InsnType Rs = fieldFromInstruction(insn, 21, 5);
+ InsnType Rt = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
+ bool HasRs = false;
+
+ if (Rs >= Rt) {
+ MI.setOpcode(Mips::BNVC);
+ HasRs = true;
+ } else if (Rs != 0 && Rs < Rt) {
+ MI.setOpcode(Mips::BNEC);
+ HasRs = true;
+ } else
+ MI.setOpcode(Mips::BNEZALC);
+
+ if (HasRs)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ MI.addOperand(MCOperand::CreateImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeBlezlGroupBranch(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // If we are called then we can assume that MIPS32r6/MIPS64r6 is enabled
+ // (otherwise we would have matched the BLEZL instruction from the earlier
+ // ISA's instead).
+ //
+ // We have:
+ // 0b010110 sssss ttttt iiiiiiiiiiiiiiii
+ // Invalid if rs == 0
+ // BLEZC if rs == 0 && rt != 0
+ // BGEZC if rs == rt && rt != 0
+ // BGEC if rs != rt && rs != 0 && rt != 0
+
+ InsnType Rs = fieldFromInstruction(insn, 21, 5);
+ InsnType Rt = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
+ bool HasRs = false;
+
+ if (Rt == 0)
+ return MCDisassembler::Fail;
+ else if (Rs == 0)
+ MI.setOpcode(Mips::BLEZC);
+ else if (Rs == Rt)
+ MI.setOpcode(Mips::BGEZC);
+ else {
+ HasRs = true;
+ MI.setOpcode(Mips::BGEC);
+ }
+
+ if (HasRs)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+
+ MI.addOperand(MCOperand::CreateImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeBgtzlGroupBranch(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // If we are called then we can assume that MIPS32r6/MIPS64r6 is enabled
+ // (otherwise we would have matched the BGTZL instruction from the earlier
+ // ISA's instead).
+ //
+ // We have:
+ // 0b010111 sssss ttttt iiiiiiiiiiiiiiii
+ // Invalid if rs == 0
+ // BGTZC if rs == 0 && rt != 0
+ // BLTZC if rs == rt && rt != 0
+ // BLTC if rs != rt && rs != 0 && rt != 0
+
+ bool HasRs = false;
+
+ InsnType Rs = fieldFromInstruction(insn, 21, 5);
+ InsnType Rt = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
+
+ if (Rt == 0)
+ return MCDisassembler::Fail;
+ else if (Rs == 0)
+ MI.setOpcode(Mips::BGTZC);
+ else if (Rs == Rt)
+ MI.setOpcode(Mips::BLTZC);
+ else {
+ MI.setOpcode(Mips::BLTC);
+ HasRs = true;
+ }
+
+ if (HasRs)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+
+ MI.addOperand(MCOperand::CreateImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeBgtzGroupBranch(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // If we are called then we can assume that MIPS32r6/MIPS64r6 is enabled
+ // (otherwise we would have matched the BGTZ instruction from the earlier
+ // ISA's instead).
+ //
+ // We have:
+ // 0b000111 sssss ttttt iiiiiiiiiiiiiiii
+ // BGTZ if rt == 0
+ // BGTZALC if rs == 0 && rt != 0
+ // BLTZALC if rs != 0 && rs == rt
+ // BLTUC if rs != 0 && rs != rt
+
+ InsnType Rs = fieldFromInstruction(insn, 21, 5);
+ InsnType Rt = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
+ bool HasRs = false;
+ bool HasRt = false;
+
+ if (Rt == 0) {
+ MI.setOpcode(Mips::BGTZ);
+ HasRs = true;
+ } else if (Rs == 0) {
+ MI.setOpcode(Mips::BGTZALC);
+ HasRt = true;
+ } else if (Rs == Rt) {
+ MI.setOpcode(Mips::BLTZALC);
+ HasRs = true;
+ } else {
+ MI.setOpcode(Mips::BLTUC);
+ HasRs = true;
+ HasRt = true;
+ }
+
+ if (HasRs)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+
+ if (HasRt)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+
+ MI.addOperand(MCOperand::CreateImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeBlezGroupBranch(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // If we are called then we can assume that MIPS32r6/MIPS64r6 is enabled
+ // (otherwise we would have matched the BLEZL instruction from the earlier
+ // ISA's instead).
+ //
+ // We have:
+ // 0b000110 sssss ttttt iiiiiiiiiiiiiiii
+ // Invalid if rs == 0
+ // BLEZALC if rs == 0 && rt != 0
+ // BGEZALC if rs == rt && rt != 0
+ // BGEUC if rs != rt && rs != 0 && rt != 0
+
+ InsnType Rs = fieldFromInstruction(insn, 21, 5);
+ InsnType Rt = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
+ bool HasRs = false;
+
+ if (Rt == 0)
+ return MCDisassembler::Fail;
+ else if (Rs == 0)
+ MI.setOpcode(Mips::BLEZALC);
+ else if (Rs == Rt)
+ MI.setOpcode(Mips::BGEZALC);
+ else {
+ HasRs = true;
+ MI.setOpcode(Mips::BGEUC);
+ }
+
+ if (HasRs)
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+ MI.addOperand(MCOperand::CreateReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+
+ MI.addOperand(MCOperand::CreateImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
/// readInstruction - read four bytes from the MemoryObject
/// and return 32 bit word sorted according to the given endianess
static DecodeStatus readInstruction32(const MemoryObject &region,
@@ -366,6 +750,7 @@ MipsDisassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
if (IsMicroMips) {
+ DEBUG(dbgs() << "Trying MicroMips32 table (32-bit opcodes):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMicroMips32, instr, Insn, Address,
this, STI);
@@ -376,6 +761,37 @@ MipsDisassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
}
+ if (hasCOP3()) {
+ DEBUG(dbgs() << "Trying COP3_ table (32-bit opcodes):\n");
+ Result =
+ decodeInstruction(DecoderTableCOP3_32, instr, Insn, Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ }
+
+ if (hasMips32r6() && isGP64()) {
+ DEBUG(dbgs() << "Trying Mips32r6_64r6 (GPR64) table (32-bit opcodes):\n");
+ Result = decodeInstruction(DecoderTableMips32r6_64r6_GP6432, instr, Insn,
+ Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ }
+
+ if (hasMips32r6()) {
+ DEBUG(dbgs() << "Trying Mips32r6_64r6 table (32-bit opcodes):\n");
+ Result = decodeInstruction(DecoderTableMips32r6_64r632, instr, Insn,
+ Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ }
+
+ DEBUG(dbgs() << "Trying Mips table (32-bit opcodes):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMips32, instr, Insn, Address,
this, STI);
@@ -419,11 +835,6 @@ Mips64Disassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
}
-static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
- const MipsDisassemblerBase *Dis = static_cast<const MipsDisassemblerBase*>(D);
- return *(Dis->getRegInfo()->getRegClass(RC).begin() + RegNo);
-}
-
static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -498,18 +909,6 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeFGRH32RegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- if (RegNo > 31)
- return MCDisassembler::Fail;
-
- unsigned Reg = getReg(Decoder, Mips::FGRH32RegClassID, RegNo);
- Inst.addOperand(MCOperand::CreateReg(Reg));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -532,6 +931,17 @@ static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeFGRCCRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::FGRCCRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeMem(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -565,7 +975,37 @@ static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
Inst.addOperand(MCOperand::CreateReg(Reg));
Inst.addOperand(MCOperand::CreateReg(Base));
- Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ // The immediate field of an LD/ST instruction is scaled which means it must
+ // be multiplied (when decoding) by the size (in bytes) of the instructions'
+ // data format.
+ // .b - 1 byte
+ // .h - 2 bytes
+ // .w - 4 bytes
+ // .d - 8 bytes
+ switch(Inst.getOpcode())
+ {
+ default:
+ assert (0 && "Unexpected instruction");
+ return MCDisassembler::Fail;
+ break;
+ case Mips::LD_B:
+ case Mips::ST_B:
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+ break;
+ case Mips::LD_H:
+ case Mips::ST_H:
+ Inst.addOperand(MCOperand::CreateImm(Offset << 1));
+ break;
+ case Mips::LD_W:
+ case Mips::ST_W:
+ Inst.addOperand(MCOperand::CreateImm(Offset << 2));
+ break;
+ case Mips::LD_D:
+ case Mips::ST_D:
+ Inst.addOperand(MCOperand::CreateImm(Offset << 3));
+ break;
+ }
return MCDisassembler::Success;
}
@@ -581,6 +1021,9 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+ if (Inst.getOpcode() == Mips::SC_MM)
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+
Inst.addOperand(MCOperand::CreateReg(Reg));
Inst.addOperand(MCOperand::CreateReg(Base));
Inst.addOperand(MCOperand::CreateImm(Offset));
@@ -624,6 +1067,27 @@ static DecodeStatus DecodeFMem(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int64_t Offset = SignExtend64<9>((Insn >> 7) & 0x1ff);
+ unsigned Rt = fieldFromInstruction(Insn, 16, 5);
+ unsigned Base = fieldFromInstruction(Insn, 21, 5);
+
+ Rt = getReg(Decoder, Mips::GPR32RegClassID, Rt);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ if(Inst.getOpcode() == Mips::SC_R6 || Inst.getOpcode() == Mips::SCD_R6){
+ Inst.addOperand(MCOperand::CreateReg(Rt));
+ }
+
+ Inst.addOperand(MCOperand::CreateReg(Rt));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst,
unsigned RegNo,
@@ -745,12 +1209,23 @@ static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeCOP2RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::COP2RegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeBranchTarget(MCInst &Inst,
unsigned Offset,
uint64_t Address,
const void *Decoder) {
- unsigned BranchOffset = Offset & 0xffff;
- BranchOffset = SignExtend32<18>(BranchOffset << 2) + 4;
+ int32_t BranchOffset = (SignExtend32<16>(Offset) << 2) + 4;
Inst.addOperand(MCOperand::CreateImm(BranchOffset));
return MCDisassembler::Success;
}
@@ -765,12 +1240,31 @@ static DecodeStatus DecodeJumpTarget(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeBranchTarget21(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t BranchOffset = SignExtend32<21>(Offset) << 2;
+
+ Inst.addOperand(MCOperand::CreateImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t BranchOffset = SignExtend32<26>(Offset) << 2;
+
+ Inst.addOperand(MCOperand::CreateImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
unsigned Offset,
uint64_t Address,
const void *Decoder) {
- unsigned BranchOffset = Offset & 0xffff;
- BranchOffset = SignExtend32<18>(BranchOffset << 1);
+ int32_t BranchOffset = SignExtend32<16>(Offset) << 1;
Inst.addOperand(MCOperand::CreateImm(BranchOffset));
return MCDisassembler::Success;
}
@@ -820,3 +1314,15 @@ static DecodeStatus DecodeExtSize(MCInst &Inst,
Inst.addOperand(MCOperand::CreateImm(SignExtend32<16>(Size)));
return MCDisassembler::Success;
}
+
+static DecodeStatus DecodeSimm19Lsl2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<19>(Insn) << 2));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSimm18Lsl3(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<18>(Insn) << 3));
+ return MCDisassembler::Success;
+}
diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index 7884589..8c79751 100644
--- a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "MipsInstPrinter.h"
+#include "MCTargetDesc/MipsMCExpr.h"
#include "MipsInstrInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCExpr.h"
@@ -23,6 +23,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#define PRINT_ALIAS_INSTR
#include "MipsGenAsmWriter.inc"
@@ -83,6 +85,27 @@ void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
case Mips::RDHWR64:
O << "\t.set\tpush\n";
O << "\t.set\tmips32r2\n";
+ break;
+ case Mips::Save16:
+ O << "\tsave\t";
+ printSaveRestore(MI, O);
+ O << " # 16 bit inst\n";
+ return;
+ case Mips::SaveX16:
+ O << "\tsave\t";
+ printSaveRestore(MI, O);
+ O << "\n";
+ return;
+ case Mips::Restore16:
+ O << "\trestore\t";
+ printSaveRestore(MI, O);
+ O << " # 16 bit inst\n";
+ return;
+ case Mips::RestoreX16:
+ O << "\trestore\t";
+ printSaveRestore(MI, O);
+ O << "\n";
+ return;
}
// Try to print any aliases first.
@@ -108,8 +131,10 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS());
assert(SRE && CE && "Binary expression must be sym+const.");
Offset = CE->getValue();
- }
- else if (!(SRE = dyn_cast<MCSymbolRefExpr>(Expr)))
+ } else if (const MipsMCExpr *ME = dyn_cast<MipsMCExpr>(Expr)) {
+ ME->print(OS);
+ return;
+ } else if (!(SRE = dyn_cast<MCSymbolRefExpr>(Expr)))
assert(false && "Unexpected MCExpr type.");
MCSymbolRefExpr::VariantKind Kind = SRE->getKind();
@@ -141,6 +166,8 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
case MCSymbolRefExpr::VK_Mips_GOT_LO16: OS << "%got_lo("; break;
case MCSymbolRefExpr::VK_Mips_CALL_HI16: OS << "%call_hi("; break;
case MCSymbolRefExpr::VK_Mips_CALL_LO16: OS << "%call_lo("; break;
+ case MCSymbolRefExpr::VK_Mips_PCREL_HI16: OS << "%pcrel_hi("; break;
+ case MCSymbolRefExpr::VK_Mips_PCREL_LO16: OS << "%pcrel_lo("; break;
}
OS << SRE->getSymbol();
@@ -286,3 +313,14 @@ bool MipsInstPrinter::printAlias(const MCInst &MI, raw_ostream &OS) {
default: return false;
}
}
+
+void MipsInstPrinter::printSaveRestore(const MCInst *MI, raw_ostream &O) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ if (i != 0) O << ", ";
+ if (MI->getOperand(i).isReg())
+ printRegName(O, MI->getOperand(i).getReg());
+ else
+ printUnsignedImm(MI, i, O);
+ }
+}
+
diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index f75ae24..550a0f10 100644
--- a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -85,10 +85,12 @@ public:
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx, raw_ostream &O);
private:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
@@ -104,6 +106,7 @@ private:
bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo0,
unsigned OpNo1, raw_ostream &OS);
bool printAlias(const MCInst &MI, raw_ostream &OS);
+ void printSaveRestore(const MCInst *MI, raw_ostream &O);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
new file mode 100644
index 0000000..5b0f950
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
@@ -0,0 +1,66 @@
+//===-- MipsABIFlagsSection.cpp - Mips ELF ABI Flags Section ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsABIFlagsSection.h"
+
+using namespace llvm;
+
+uint8_t MipsABIFlagsSection::getFpABIValue() {
+ switch (FpABI) {
+ case FpABIKind::ANY:
+ return Val_GNU_MIPS_ABI_FP_ANY;
+ case FpABIKind::XX:
+ return Val_GNU_MIPS_ABI_FP_XX;
+ case FpABIKind::S32:
+ return Val_GNU_MIPS_ABI_FP_DOUBLE;
+ case FpABIKind::S64:
+ if (Is32BitABI)
+ return OddSPReg ? Val_GNU_MIPS_ABI_FP_64 : Val_GNU_MIPS_ABI_FP_64A;
+ return Val_GNU_MIPS_ABI_FP_DOUBLE;
+ }
+
+ llvm_unreachable("unexpected fp abi value");
+}
+
+StringRef MipsABIFlagsSection::getFpABIString(FpABIKind Value) {
+ switch (Value) {
+ case FpABIKind::XX:
+ return "xx";
+ case FpABIKind::S32:
+ return "32";
+ case FpABIKind::S64:
+ return "64";
+ default:
+ llvm_unreachable("unsupported fp abi value");
+ }
+}
+
+uint8_t MipsABIFlagsSection::getCPR1SizeValue() {
+ if (FpABI == FpABIKind::XX)
+ return (uint8_t)AFL_REG_32;
+ return (uint8_t)CPR1Size;
+}
+
+namespace llvm {
+MCStreamer &operator<<(MCStreamer &OS, MipsABIFlagsSection &ABIFlagsSection) {
+ // Write out a Elf_Internal_ABIFlags_v0 struct
+ OS.EmitIntValue(ABIFlagsSection.getVersionValue(), 2); // version
+ OS.EmitIntValue(ABIFlagsSection.getISALevelValue(), 1); // isa_level
+ OS.EmitIntValue(ABIFlagsSection.getISARevisionValue(), 1); // isa_rev
+ OS.EmitIntValue(ABIFlagsSection.getGPRSizeValue(), 1); // gpr_size
+ OS.EmitIntValue(ABIFlagsSection.getCPR1SizeValue(), 1); // cpr1_size
+ OS.EmitIntValue(ABIFlagsSection.getCPR2SizeValue(), 1); // cpr2_size
+ OS.EmitIntValue(ABIFlagsSection.getFpABIValue(), 1); // fp_abi
+ OS.EmitIntValue(ABIFlagsSection.getISAExtensionSetValue(), 4); // isa_ext
+ OS.EmitIntValue(ABIFlagsSection.getASESetValue(), 4); // ases
+ OS.EmitIntValue(ABIFlagsSection.getFlags1Value(), 4); // flags1
+ OS.EmitIntValue(ABIFlagsSection.getFlags2Value(), 4); // flags2
+ return OS;
+}
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
new file mode 100644
index 0000000..ea5bc12
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
@@ -0,0 +1,238 @@
+//===-- MipsABIFlagsSection.h - Mips ELF ABI Flags Section -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSABIFLAGSSECTION_H
+#define MIPSABIFLAGSSECTION_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+
+class MCStreamer;
+
+struct MipsABIFlagsSection {
+ // Values for the xxx_size bytes of an ABI flags structure.
+ enum AFL_REG {
+ AFL_REG_NONE = 0x00, // No registers.
+ AFL_REG_32 = 0x01, // 32-bit registers.
+ AFL_REG_64 = 0x02, // 64-bit registers.
+ AFL_REG_128 = 0x03 // 128-bit registers.
+ };
+
+ // Masks for the ases word of an ABI flags structure.
+ enum AFL_ASE {
+ AFL_ASE_DSP = 0x00000001, // DSP ASE.
+ AFL_ASE_DSPR2 = 0x00000002, // DSP R2 ASE.
+ AFL_ASE_EVA = 0x00000004, // Enhanced VA Scheme.
+ AFL_ASE_MCU = 0x00000008, // MCU (MicroController) ASE.
+ AFL_ASE_MDMX = 0x00000010, // MDMX ASE.
+ AFL_ASE_MIPS3D = 0x00000020, // MIPS-3D ASE.
+ AFL_ASE_MT = 0x00000040, // MT ASE.
+ AFL_ASE_SMARTMIPS = 0x00000080, // SmartMIPS ASE.
+ AFL_ASE_VIRT = 0x00000100, // VZ ASE.
+ AFL_ASE_MSA = 0x00000200, // MSA ASE.
+ AFL_ASE_MIPS16 = 0x00000400, // MIPS16 ASE.
+ AFL_ASE_MICROMIPS = 0x00000800, // MICROMIPS ASE.
+ AFL_ASE_XPA = 0x00001000 // XPA ASE.
+ };
+
+ // Values for the isa_ext word of an ABI flags structure.
+ enum AFL_EXT {
+ AFL_EXT_XLR = 1, // RMI Xlr instruction.
+ AFL_EXT_OCTEON2 = 2, // Cavium Networks Octeon2.
+ AFL_EXT_OCTEONP = 3, // Cavium Networks OcteonP.
+ AFL_EXT_LOONGSON_3A = 4, // Loongson 3A.
+ AFL_EXT_OCTEON = 5, // Cavium Networks Octeon.
+ AFL_EXT_5900 = 6, // MIPS R5900 instruction.
+ AFL_EXT_4650 = 7, // MIPS R4650 instruction.
+ AFL_EXT_4010 = 8, // LSI R4010 instruction.
+ AFL_EXT_4100 = 9, // NEC VR4100 instruction.
+ AFL_EXT_3900 = 10, // Toshiba R3900 instruction.
+ AFL_EXT_10000 = 11, // MIPS R10000 instruction.
+ AFL_EXT_SB1 = 12, // Broadcom SB-1 instruction.
+ AFL_EXT_4111 = 13, // NEC VR4111/VR4181 instruction.
+ AFL_EXT_4120 = 14, // NEC VR4120 instruction.
+ AFL_EXT_5400 = 15, // NEC VR5400 instruction.
+ AFL_EXT_5500 = 16, // NEC VR5500 instruction.
+ AFL_EXT_LOONGSON_2E = 17, // ST Microelectronics Loongson 2E.
+ AFL_EXT_LOONGSON_2F = 18 // ST Microelectronics Loongson 2F.
+ };
+
+ // Values for the fp_abi word of an ABI flags structure.
+ enum Val_GNU_MIPS_ABI {
+ Val_GNU_MIPS_ABI_FP_ANY = 0,
+ Val_GNU_MIPS_ABI_FP_DOUBLE = 1,
+ Val_GNU_MIPS_ABI_FP_XX = 5,
+ Val_GNU_MIPS_ABI_FP_64 = 6,
+ Val_GNU_MIPS_ABI_FP_64A = 7
+ };
+
+ enum AFL_FLAGS1 {
+ AFL_FLAGS1_ODDSPREG = 1
+ };
+
+ // Internal representation of the values used in .module fp=value
+ enum class FpABIKind { ANY, XX, S32, S64 };
+
+ // Version of flags structure.
+ uint16_t Version;
+ // The level of the ISA: 1-5, 32, 64.
+ uint8_t ISALevel;
+ // The revision of ISA: 0 for MIPS V and below, 1-n otherwise.
+ uint8_t ISARevision;
+ // The size of general purpose registers.
+ AFL_REG GPRSize;
+ // The size of co-processor 1 registers.
+ AFL_REG CPR1Size;
+ // The size of co-processor 2 registers.
+ AFL_REG CPR2Size;
+ // Processor-specific extension.
+ uint32_t ISAExtensionSet;
+ // Mask of ASEs used.
+ uint32_t ASESet;
+
+ bool OddSPReg;
+
+ bool Is32BitABI;
+
+protected:
+ // The floating-point ABI.
+ FpABIKind FpABI;
+
+public:
+ MipsABIFlagsSection()
+ : Version(0), ISALevel(0), ISARevision(0), GPRSize(AFL_REG_NONE),
+ CPR1Size(AFL_REG_NONE), CPR2Size(AFL_REG_NONE), ISAExtensionSet(0),
+ ASESet(0), OddSPReg(false), Is32BitABI(false), FpABI(FpABIKind::ANY) {}
+
+ uint16_t getVersionValue() { return (uint16_t)Version; }
+ uint8_t getISALevelValue() { return (uint8_t)ISALevel; }
+ uint8_t getISARevisionValue() { return (uint8_t)ISARevision; }
+ uint8_t getGPRSizeValue() { return (uint8_t)GPRSize; }
+ uint8_t getCPR1SizeValue();
+ uint8_t getCPR2SizeValue() { return (uint8_t)CPR2Size; }
+ uint8_t getFpABIValue();
+ uint32_t getISAExtensionSetValue() { return (uint32_t)ISAExtensionSet; }
+ uint32_t getASESetValue() { return (uint32_t)ASESet; }
+
+ uint32_t getFlags1Value() {
+ uint32_t Value = 0;
+
+ if (OddSPReg)
+ Value |= (uint32_t)AFL_FLAGS1_ODDSPREG;
+
+ return Value;
+ }
+
+ uint32_t getFlags2Value() { return 0; }
+
+ FpABIKind getFpABI() { return FpABI; }
+ void setFpABI(FpABIKind Value, bool IsABI32Bit) {
+ FpABI = Value;
+ Is32BitABI = IsABI32Bit;
+ }
+ StringRef getFpABIString(FpABIKind Value);
+
+ template <class PredicateLibrary>
+ void setISALevelAndRevisionFromPredicates(const PredicateLibrary &P) {
+ if (P.hasMips64()) {
+ ISALevel = 64;
+ if (P.hasMips64r6())
+ ISARevision = 6;
+ else if (P.hasMips64r2())
+ ISARevision = 2;
+ else
+ ISARevision = 1;
+ } else if (P.hasMips32()) {
+ ISALevel = 32;
+ if (P.hasMips32r6())
+ ISARevision = 6;
+ else if (P.hasMips32r2())
+ ISARevision = 2;
+ else
+ ISARevision = 1;
+ } else {
+ ISARevision = 0;
+ if (P.hasMips5())
+ ISALevel = 5;
+ else if (P.hasMips4())
+ ISALevel = 4;
+ else if (P.hasMips3())
+ ISALevel = 3;
+ else if (P.hasMips2())
+ ISALevel = 2;
+ else if (P.hasMips1())
+ ISALevel = 1;
+ else
+ llvm_unreachable("Unknown ISA level!");
+ }
+ }
+
+ template <class PredicateLibrary>
+ void setGPRSizeFromPredicates(const PredicateLibrary &P) {
+ GPRSize = P.isGP64bit() ? AFL_REG_64 : AFL_REG_32;
+ }
+
+ template <class PredicateLibrary>
+ void setCPR1SizeFromPredicates(const PredicateLibrary &P) {
+ if (P.abiUsesSoftFloat())
+ CPR1Size = AFL_REG_NONE;
+ else if (P.hasMSA())
+ CPR1Size = AFL_REG_128;
+ else
+ CPR1Size = P.isFP64bit() ? AFL_REG_64 : AFL_REG_32;
+ }
+
+ template <class PredicateLibrary>
+ void setASESetFromPredicates(const PredicateLibrary &P) {
+ ASESet = 0;
+ if (P.hasDSP())
+ ASESet |= AFL_ASE_DSP;
+ if (P.hasDSPR2())
+ ASESet |= AFL_ASE_DSPR2;
+ if (P.hasMSA())
+ ASESet |= AFL_ASE_MSA;
+ if (P.inMicroMipsMode())
+ ASESet |= AFL_ASE_MICROMIPS;
+ if (P.inMips16Mode())
+ ASESet |= AFL_ASE_MIPS16;
+ }
+
+ template <class PredicateLibrary>
+ void setFpAbiFromPredicates(const PredicateLibrary &P) {
+ Is32BitABI = P.isABI_O32();
+
+ FpABI = FpABIKind::ANY;
+ if (P.isABI_N32() || P.isABI_N64())
+ FpABI = FpABIKind::S64;
+ else if (P.isABI_O32()) {
+ if (P.isABI_FPXX())
+ FpABI = FpABIKind::XX;
+ else if (P.isFP64bit())
+ FpABI = FpABIKind::S64;
+ else
+ FpABI = FpABIKind::S32;
+ }
+ }
+
+ template <class PredicateLibrary>
+ void setAllFromPredicates(const PredicateLibrary &P) {
+ setISALevelAndRevisionFromPredicates(P);
+ setGPRSizeFromPredicates(P);
+ setCPR1SizeFromPredicates(P);
+ setASESetFromPredicates(P);
+ setFpAbiFromPredicates(P);
+ OddSPReg = P.useOddSPReg();
+ }
+};
+
+MCStreamer &operator<<(MCStreamer &OS, MipsABIFlagsSection &ABIFlagsSection);
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index 3e70b23..d8e6128 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -1,4 +1,4 @@
-//===-- MipsASMBackend.cpp - Mips Asm Backend ----------------------------===//
+//===-- MipsAsmBackend.cpp - Mips Asm Backend ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,32 +7,39 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the MipsAsmBackend and MipsELFObjectWriter classes.
+// This file implements the MipsAsmBackend class.
//
//===----------------------------------------------------------------------===//
//
-#include "MipsFixupKinds.h"
+#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsAsmBackend.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
// Prepare value for the target space for it
-static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
+ MCContext *Ctx = nullptr) {
+
+ unsigned Kind = Fixup.getKind();
// Add/subtract and shift
switch (Kind) {
default:
return 0;
+ case FK_Data_2:
case FK_GPRel_4:
case FK_Data_4:
case FK_Data_8:
@@ -49,6 +56,7 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Mips::fixup_MICROMIPS_GOT_PAGE:
case Mips::fixup_MICROMIPS_GOT_OFST:
case Mips::fixup_MICROMIPS_GOT_DISP:
+ case Mips::fixup_MIPS_PCLO16:
break;
case Mips::fixup_Mips_PC16:
// So far we are only using this type for branches.
@@ -56,8 +64,18 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
// so the displacement will be one instruction size less.
Value -= 4;
// The displacement is then divided by 4 to give us an 18 bit
- // address range.
- Value >>= 2;
+ // address range. Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 4;
+ // We now check if Value can be encoded as a 16-bit signed immediate.
+ if (!isIntN(16, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC16 fixup");
+ break;
+ case Mips::fixup_MIPS_PC19_S2:
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 4;
+ // We now check if Value can be encoded as a 19-bit signed immediate.
+ if (!isIntN(19, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC19 fixup");
break;
case Mips::fixup_Mips_26:
// So far we are only using this type for jumps.
@@ -70,6 +88,7 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Mips::fixup_Mips_GOT_HI16:
case Mips::fixup_Mips_CALL_HI16:
case Mips::fixup_MICROMIPS_HI16:
+ case Mips::fixup_MIPS_PCHI16:
// Get the 2nd 16-bits. Also add 1 if bit 15 is 1.
Value = ((Value + 0x8000) >> 16) & 0xffff;
break;
@@ -86,196 +105,291 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
break;
case Mips::fixup_MICROMIPS_PC16_S1:
Value -= 4;
- Value >>= 1;
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 2;
+ // We now check if Value can be encoded as a 16-bit signed immediate.
+ if (!isIntN(16, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC16 fixup");
+ break;
+ case Mips::fixup_MIPS_PC18_S3:
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 8;
+ // We now check if Value can be encoded as a 18-bit signed immediate.
+ if (!isIntN(18, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC18 fixup");
+ break;
+ case Mips::fixup_MIPS_PC21_S2:
+ Value -= 4;
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t) Value / 4;
+ // We now check if Value can be encoded as a 21-bit signed immediate.
+ if (!isIntN(21, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC21 fixup");
+ break;
+ case Mips::fixup_MIPS_PC26_S2:
+ Value -= 4;
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t) Value / 4;
+ // We now check if Value can be encoded as a 26-bit signed immediate.
+ if (!isIntN(26, Value) && Ctx)
+ Ctx->FatalError(Fixup.getLoc(), "out of range PC26 fixup");
break;
}
return Value;
}
-namespace {
-class MipsAsmBackend : public MCAsmBackend {
- Triple::OSType OSType;
- bool IsLittle; // Big or little endian
- bool Is64Bit; // 32 or 64 bit words
+MCObjectWriter *MipsAsmBackend::createObjectWriter(raw_ostream &OS) const {
+ return createMipsELFObjectWriter(OS,
+ MCELFObjectTargetWriter::getOSABI(OSType), IsLittle, Is64Bit);
+}
-public:
- MipsAsmBackend(const Target &T, Triple::OSType _OSType,
- bool _isLittle, bool _is64Bit)
- :MCAsmBackend(), OSType(_OSType), IsLittle(_isLittle), Is64Bit(_is64Bit) {}
+// Little-endian fixup data byte ordering:
+// mips32r2: a | b | x | x
+// microMIPS: x | x | a | b
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createMipsELFObjectWriter(OS,
- MCELFObjectTargetWriter::getOSABI(OSType), IsLittle, Is64Bit);
- }
+static bool needsMMLEByteOrder(unsigned Kind) {
+ return Kind >= Mips::fixup_MICROMIPS_26_S1 &&
+ Kind < Mips::LastTargetFixupKind;
+}
- /// ApplyFixup - Apply the \p Value for given \p Fixup into the provided
- /// data fragment, at the offset specified by the fixup and following the
- /// fixup kind as appropriate.
- void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const {
- MCFixupKind Kind = Fixup.getKind();
- Value = adjustFixupValue((unsigned)Kind, Value);
-
- if (!Value)
- return; // Doesn't change encoding.
-
- // Where do we start in the object
- unsigned Offset = Fixup.getOffset();
- // Number of bytes we need to fixup
- unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
- // Used to point to big endian bytes
- unsigned FullSize;
-
- switch ((unsigned)Kind) {
- case Mips::fixup_Mips_16:
- FullSize = 2;
- break;
- case Mips::fixup_Mips_64:
- FullSize = 8;
- break;
- default:
- FullSize = 4;
- break;
- }
-
- // Grab current value, if any, from bits.
- uint64_t CurVal = 0;
-
- for (unsigned i = 0; i != NumBytes; ++i) {
- unsigned Idx = IsLittle ? i : (FullSize - 1 - i);
- CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i*8);
- }
-
- uint64_t Mask = ((uint64_t)(-1) >>
- (64 - getFixupKindInfo(Kind).TargetSize));
- CurVal |= Value & Mask;
-
- // Write out the fixed up bytes back to the code/data bits.
- for (unsigned i = 0; i != NumBytes; ++i) {
- unsigned Idx = IsLittle ? i : (FullSize - 1 - i);
- Data[Offset + Idx] = (uint8_t)((CurVal >> (i*8)) & 0xff);
- }
- }
+// Calculate index for microMIPS specific little endian byte order
+static unsigned calculateMMLEIndex(unsigned i) {
+ assert(i <= 3 && "Index out of range!");
+
+ return (1 - i / 2) * 2 + i % 2;
+}
- unsigned getNumFixupKinds() const { return Mips::NumTargetFixupKinds; }
-
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
- const static MCFixupKindInfo Infos[Mips::NumTargetFixupKinds] = {
- // This table *must* be in same the order of fixup_* kinds in
- // MipsFixupKinds.h.
- //
- // name offset bits flags
- { "fixup_Mips_16", 0, 16, 0 },
- { "fixup_Mips_32", 0, 32, 0 },
- { "fixup_Mips_REL32", 0, 32, 0 },
- { "fixup_Mips_26", 0, 26, 0 },
- { "fixup_Mips_HI16", 0, 16, 0 },
- { "fixup_Mips_LO16", 0, 16, 0 },
- { "fixup_Mips_GPREL16", 0, 16, 0 },
- { "fixup_Mips_LITERAL", 0, 16, 0 },
- { "fixup_Mips_GOT_Global", 0, 16, 0 },
- { "fixup_Mips_GOT_Local", 0, 16, 0 },
- { "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_Mips_CALL16", 0, 16, 0 },
- { "fixup_Mips_GPREL32", 0, 32, 0 },
- { "fixup_Mips_SHIFT5", 6, 5, 0 },
- { "fixup_Mips_SHIFT6", 6, 5, 0 },
- { "fixup_Mips_64", 0, 64, 0 },
- { "fixup_Mips_TLSGD", 0, 16, 0 },
- { "fixup_Mips_GOTTPREL", 0, 16, 0 },
- { "fixup_Mips_TPREL_HI", 0, 16, 0 },
- { "fixup_Mips_TPREL_LO", 0, 16, 0 },
- { "fixup_Mips_TLSLDM", 0, 16, 0 },
- { "fixup_Mips_DTPREL_HI", 0, 16, 0 },
- { "fixup_Mips_DTPREL_LO", 0, 16, 0 },
- { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_Mips_GPOFF_HI", 0, 16, 0 },
- { "fixup_Mips_GPOFF_LO", 0, 16, 0 },
- { "fixup_Mips_GOT_PAGE", 0, 16, 0 },
- { "fixup_Mips_GOT_OFST", 0, 16, 0 },
- { "fixup_Mips_GOT_DISP", 0, 16, 0 },
- { "fixup_Mips_HIGHER", 0, 16, 0 },
- { "fixup_Mips_HIGHEST", 0, 16, 0 },
- { "fixup_Mips_GOT_HI16", 0, 16, 0 },
- { "fixup_Mips_GOT_LO16", 0, 16, 0 },
- { "fixup_Mips_CALL_HI16", 0, 16, 0 },
- { "fixup_Mips_CALL_LO16", 0, 16, 0 },
- { "fixup_MICROMIPS_26_S1", 0, 26, 0 },
- { "fixup_MICROMIPS_HI16", 0, 16, 0 },
- { "fixup_MICROMIPS_LO16", 0, 16, 0 },
- { "fixup_MICROMIPS_GOT16", 0, 16, 0 },
- { "fixup_MICROMIPS_PC16_S1", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_MICROMIPS_CALL16", 0, 16, 0 },
- { "fixup_MICROMIPS_GOT_DISP", 0, 16, 0 },
- { "fixup_MICROMIPS_GOT_PAGE", 0, 16, 0 },
- { "fixup_MICROMIPS_GOT_OFST", 0, 16, 0 },
- { "fixup_MICROMIPS_TLS_DTPREL_HI16", 0, 16, 0 },
- { "fixup_MICROMIPS_TLS_DTPREL_LO16", 0, 16, 0 },
- { "fixup_MICROMIPS_TLS_TPREL_HI16", 0, 16, 0 },
- { "fixup_MICROMIPS_TLS_TPREL_LO16", 0, 16, 0 }
- };
-
- if (Kind < FirstTargetFixupKind)
- return MCAsmBackend::getFixupKindInfo(Kind);
-
- assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
- "Invalid kind!");
- return Infos[Kind - FirstTargetFixupKind];
+/// ApplyFixup - Apply the \p Value for given \p Fixup into the provided
+/// data fragment, at the offset specified by the fixup and following the
+/// fixup kind as appropriate.
+void MipsAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+ MCFixupKind Kind = Fixup.getKind();
+ Value = adjustFixupValue(Fixup, Value);
+
+ if (!Value)
+ return; // Doesn't change encoding.
+
+ // Where do we start in the object
+ unsigned Offset = Fixup.getOffset();
+ // Number of bytes we need to fixup
+ unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
+ // Used to point to big endian bytes
+ unsigned FullSize;
+
+ switch ((unsigned)Kind) {
+ case FK_Data_2:
+ case Mips::fixup_Mips_16:
+ FullSize = 2;
+ break;
+ case FK_Data_8:
+ case Mips::fixup_Mips_64:
+ FullSize = 8;
+ break;
+ case FK_Data_4:
+ default:
+ FullSize = 4;
+ break;
}
- /// @name Target Relaxation Interfaces
- /// @{
+ // Grab current value, if any, from bits.
+ uint64_t CurVal = 0;
- /// MayNeedRelaxation - Check whether the given instruction may need
- /// relaxation.
- ///
- /// \param Inst - The instruction to test.
- bool mayNeedRelaxation(const MCInst &Inst) const {
- return false;
- }
+ bool microMipsLEByteOrder = needsMMLEByteOrder((unsigned) Kind);
- /// fixupNeedsRelaxation - Target specific predicate for whether a given
- /// fixup requires the associated instruction to be relaxed.
- bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
- // FIXME.
- assert(0 && "RelaxInstruction() unimplemented");
- return false;
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = IsLittle ? (microMipsLEByteOrder ? calculateMMLEIndex(i)
+ : i)
+ : (FullSize - 1 - i);
+ CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i*8);
}
- /// RelaxInstruction - Relax the instruction in the given fragment
- /// to the next wider instruction.
- ///
- /// \param Inst - The instruction to relax, which may be the same
- /// as the output.
- /// \param [out] Res On return, the relaxed instruction.
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
- }
+ uint64_t Mask = ((uint64_t)(-1) >>
+ (64 - getFixupKindInfo(Kind).TargetSize));
+ CurVal |= Value & Mask;
- /// @}
-
- /// WriteNopData - Write an (optimal) nop sequence of Count bytes
- /// to the given output. If the target cannot generate such a sequence,
- /// it should return an error.
- ///
- /// \return - True on success.
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
- // Check for a less than instruction size number of bytes
- // FIXME: 16 bit instructions are not handled yet here.
- // We shouldn't be using a hard coded number for instruction size.
- if (Count % 4) return false;
-
- uint64_t NumNops = Count / 4;
- for (uint64_t i = 0; i != NumNops; ++i)
- OW->Write32(0);
- return true;
+ // Write out the fixed up bytes back to the code/data bits.
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = IsLittle ? (microMipsLEByteOrder ? calculateMMLEIndex(i)
+ : i)
+ : (FullSize - 1 - i);
+ Data[Offset + Idx] = (uint8_t)((CurVal >> (i*8)) & 0xff);
}
-}; // class MipsAsmBackend
+}
+
+const MCFixupKindInfo &MipsAsmBackend::
+getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo LittleEndianInfos[Mips::NumTargetFixupKinds] = {
+ // This table *must* be in same the order of fixup_* kinds in
+ // MipsFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_Mips_16", 0, 16, 0 },
+ { "fixup_Mips_32", 0, 32, 0 },
+ { "fixup_Mips_REL32", 0, 32, 0 },
+ { "fixup_Mips_26", 0, 26, 0 },
+ { "fixup_Mips_HI16", 0, 16, 0 },
+ { "fixup_Mips_LO16", 0, 16, 0 },
+ { "fixup_Mips_GPREL16", 0, 16, 0 },
+ { "fixup_Mips_LITERAL", 0, 16, 0 },
+ { "fixup_Mips_GOT_Global", 0, 16, 0 },
+ { "fixup_Mips_GOT_Local", 0, 16, 0 },
+ { "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_CALL16", 0, 16, 0 },
+ { "fixup_Mips_GPREL32", 0, 32, 0 },
+ { "fixup_Mips_SHIFT5", 6, 5, 0 },
+ { "fixup_Mips_SHIFT6", 6, 5, 0 },
+ { "fixup_Mips_64", 0, 64, 0 },
+ { "fixup_Mips_TLSGD", 0, 16, 0 },
+ { "fixup_Mips_GOTTPREL", 0, 16, 0 },
+ { "fixup_Mips_TPREL_HI", 0, 16, 0 },
+ { "fixup_Mips_TPREL_LO", 0, 16, 0 },
+ { "fixup_Mips_TLSLDM", 0, 16, 0 },
+ { "fixup_Mips_DTPREL_HI", 0, 16, 0 },
+ { "fixup_Mips_DTPREL_LO", 0, 16, 0 },
+ { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_GPOFF_HI", 0, 16, 0 },
+ { "fixup_Mips_GPOFF_LO", 0, 16, 0 },
+ { "fixup_Mips_GOT_PAGE", 0, 16, 0 },
+ { "fixup_Mips_GOT_OFST", 0, 16, 0 },
+ { "fixup_Mips_GOT_DISP", 0, 16, 0 },
+ { "fixup_Mips_HIGHER", 0, 16, 0 },
+ { "fixup_Mips_HIGHEST", 0, 16, 0 },
+ { "fixup_Mips_GOT_HI16", 0, 16, 0 },
+ { "fixup_Mips_GOT_LO16", 0, 16, 0 },
+ { "fixup_Mips_CALL_HI16", 0, 16, 0 },
+ { "fixup_Mips_CALL_LO16", 0, 16, 0 },
+ { "fixup_Mips_PC18_S3", 0, 18, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PC19_S2", 0, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PC21_S2", 0, 21, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PC26_S2", 0, 26, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PCHI16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PCLO16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_26_S1", 0, 26, 0 },
+ { "fixup_MICROMIPS_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT16", 0, 16, 0 },
+ { "fixup_MICROMIPS_PC16_S1", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_CALL16", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_DISP", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_PAGE", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_OFST", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_GD", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_LDM", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_LO16", 0, 16, 0 }
+ };
+
+ const static MCFixupKindInfo BigEndianInfos[Mips::NumTargetFixupKinds] = {
+ // This table *must* be in same the order of fixup_* kinds in
+ // MipsFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_Mips_16", 16, 16, 0 },
+ { "fixup_Mips_32", 0, 32, 0 },
+ { "fixup_Mips_REL32", 0, 32, 0 },
+ { "fixup_Mips_26", 6, 26, 0 },
+ { "fixup_Mips_HI16", 16, 16, 0 },
+ { "fixup_Mips_LO16", 16, 16, 0 },
+ { "fixup_Mips_GPREL16", 16, 16, 0 },
+ { "fixup_Mips_LITERAL", 16, 16, 0 },
+ { "fixup_Mips_GOT_Global", 16, 16, 0 },
+ { "fixup_Mips_GOT_Local", 16, 16, 0 },
+ { "fixup_Mips_PC16", 16, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_CALL16", 16, 16, 0 },
+ { "fixup_Mips_GPREL32", 0, 32, 0 },
+ { "fixup_Mips_SHIFT5", 21, 5, 0 },
+ { "fixup_Mips_SHIFT6", 21, 5, 0 },
+ { "fixup_Mips_64", 0, 64, 0 },
+ { "fixup_Mips_TLSGD", 16, 16, 0 },
+ { "fixup_Mips_GOTTPREL", 16, 16, 0 },
+ { "fixup_Mips_TPREL_HI", 16, 16, 0 },
+ { "fixup_Mips_TPREL_LO", 16, 16, 0 },
+ { "fixup_Mips_TLSLDM", 16, 16, 0 },
+ { "fixup_Mips_DTPREL_HI", 16, 16, 0 },
+ { "fixup_Mips_DTPREL_LO", 16, 16, 0 },
+ { "fixup_Mips_Branch_PCRel",16, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_GPOFF_HI", 16, 16, 0 },
+ { "fixup_Mips_GPOFF_LO", 16, 16, 0 },
+ { "fixup_Mips_GOT_PAGE", 16, 16, 0 },
+ { "fixup_Mips_GOT_OFST", 16, 16, 0 },
+ { "fixup_Mips_GOT_DISP", 16, 16, 0 },
+ { "fixup_Mips_HIGHER", 16, 16, 0 },
+ { "fixup_Mips_HIGHEST", 16, 16, 0 },
+ { "fixup_Mips_GOT_HI16", 16, 16, 0 },
+ { "fixup_Mips_GOT_LO16", 16, 16, 0 },
+ { "fixup_Mips_CALL_HI16", 16, 16, 0 },
+ { "fixup_Mips_CALL_LO16", 16, 16, 0 },
+ { "fixup_Mips_PC18_S3", 14, 18, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PC19_S2", 13, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PC21_S2", 11, 21, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PC26_S2", 6, 26, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PCHI16", 16, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MIPS_PCLO16", 16, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_26_S1", 6, 26, 0 },
+ { "fixup_MICROMIPS_HI16", 16, 16, 0 },
+ { "fixup_MICROMIPS_LO16", 16, 16, 0 },
+ { "fixup_MICROMIPS_GOT16", 16, 16, 0 },
+ { "fixup_MICROMIPS_PC16_S1",16, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_CALL16", 16, 16, 0 },
+ { "fixup_MICROMIPS_GOT_DISP", 16, 16, 0 },
+ { "fixup_MICROMIPS_GOT_PAGE", 16, 16, 0 },
+ { "fixup_MICROMIPS_GOT_OFST", 16, 16, 0 },
+ { "fixup_MICROMIPS_TLS_GD", 16, 16, 0 },
+ { "fixup_MICROMIPS_TLS_LDM", 16, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_HI16", 16, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_LO16", 16, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_HI16", 16, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_LO16", 16, 16, 0 }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+
+ if (IsLittle)
+ return LittleEndianInfos[Kind - FirstTargetFixupKind];
+ return BigEndianInfos[Kind - FirstTargetFixupKind];
+}
-} // namespace
+/// WriteNopData - Write an (optimal) nop sequence of Count bytes
+/// to the given output. If the target cannot generate such a sequence,
+/// it should return an error.
+///
+/// \return - True on success.
+bool MipsAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ // Check for a less than instruction size number of bytes
+ // FIXME: 16 bit instructions are not handled yet here.
+ // We shouldn't be using a hard coded number for instruction size.
+ if (Count % 4) return false;
+
+ uint64_t NumNops = Count / 4;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->Write32(0);
+ return true;
+}
+
+/// processFixupValue - Target hook to process the literal value of a fixup
+/// if necessary.
+void MipsAsmBackend::processFixupValue(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup,
+ const MCFragment *DF,
+ const MCValue &Target,
+ uint64_t &Value,
+ bool &IsResolved) {
+ // At this point we'll ignore the value returned by adjustFixupValue as
+ // we are only checking if the fixup can be applied correctly. We have
+ // access to MCContext from here which allows us to report a fatal error
+ // with *possibly* a source code location.
+ (void)adjustFixupValue(Fixup, Value, &Asm.getContext());
+}
// MCAsmBackend
MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T,
@@ -309,4 +423,3 @@ MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T,
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/false, /*Is64Bit*/true);
}
-
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
new file mode 100644
index 0000000..d5c3dbc
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
@@ -0,0 +1,93 @@
+//===-- MipsAsmBackend.h - Mips Asm Backend ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MipsAsmBackend class.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef MIPSASMBACKEND_H
+#define MIPSASMBACKEND_H
+
+#include "MCTargetDesc/MipsFixupKinds.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/ADT/Triple.h"
+
+namespace llvm {
+
+class MCAssembler;
+struct MCFixupKindInfo;
+class Target;
+class MCObjectWriter;
+
+class MipsAsmBackend : public MCAsmBackend {
+ Triple::OSType OSType;
+ bool IsLittle; // Big or little endian
+ bool Is64Bit; // 32 or 64 bit words
+
+public:
+ MipsAsmBackend(const Target &T, Triple::OSType _OSType, bool _isLittle,
+ bool _is64Bit)
+ : MCAsmBackend(), OSType(_OSType), IsLittle(_isLittle),
+ Is64Bit(_is64Bit) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override;
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
+
+ unsigned getNumFixupKinds() const override {
+ return Mips::NumTargetFixupKinds;
+ }
+
+ /// @name Target Relaxation Interfaces
+ /// @{
+
+ /// MayNeedRelaxation - Check whether the given instruction may need
+ /// relaxation.
+ ///
+ /// \param Inst - The instruction to test.
+ bool mayNeedRelaxation(const MCInst &Inst) const override {
+ return false;
+ }
+
+ /// fixupNeedsRelaxation - Target specific predicate for whether a given
+ /// fixup requires the associated instruction to be relaxed.
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ // FIXME.
+ llvm_unreachable("RelaxInstruction() unimplemented");
+ return false;
+ }
+
+ /// RelaxInstruction - Relax the instruction in the given fragment
+ /// to the next wider instruction.
+ ///
+ /// \param Inst - The instruction to relax, which may be the same
+ /// as the output.
+ /// \param [out] Res On return, the relaxed instruction.
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {}
+
+ /// @}
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override;
+
+}; // class MipsAsmBackend
+
+} // namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index 7a55efd..d2323dc 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -120,34 +120,6 @@ namespace MipsII {
FormMask = 15
};
}
-
-inline static std::pair<const MCSymbolRefExpr*, int64_t>
-MipsGetSymAndOffset(const MCFixup &Fixup) {
- MCFixupKind FixupKind = Fixup.getKind();
-
- if ((FixupKind < FirstTargetFixupKind) ||
- (FixupKind >= MCFixupKind(Mips::LastTargetFixupKind)))
- return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0);
-
- const MCExpr *Expr = Fixup.getValue();
- MCExpr::ExprKind Kind = Expr->getKind();
-
- if (Kind == MCExpr::Binary) {
- const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Expr);
- const MCExpr *LHS = BE->getLHS();
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS());
-
- if ((LHS->getKind() != MCExpr::SymbolRef) || !CE)
- return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0);
-
- return std::make_pair(cast<MCSymbolRefExpr>(LHS), CE->getValue());
- }
-
- if (Kind != MCExpr::SymbolRef)
- return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0);
-
- return std::make_pair(cast<MCSymbolRefExpr>(Expr), 0);
-}
}
#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 83c7d4b..4ea7846 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -21,17 +21,6 @@
using namespace llvm;
namespace {
- struct RelEntry {
- RelEntry(const ELFRelocationEntry &R, const MCSymbol *S, int64_t O) :
- Reloc(R), Sym(S), Offset(O) {}
- ELFRelocationEntry Reloc;
- const MCSymbol *Sym;
- int64_t Offset;
- };
-
- typedef std::list<RelEntry> RelLs;
- typedef RelLs::iterator RelLsIter;
-
class MipsELFObjectWriter : public MCELFObjectTargetWriter {
public:
MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
@@ -39,16 +28,10 @@ namespace {
virtual ~MipsELFObjectWriter();
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
- virtual void sortRelocs(const MCAssembler &Asm,
- std::vector<ELFRelocationEntry> &Relocs);
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
+ bool needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const override;
};
}
@@ -60,26 +43,9 @@ MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
MipsELFObjectWriter::~MipsELFObjectWriter() {}
-const MCSymbol *MipsELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- assert(Target.getSymA() && "SymA cannot be 0.");
- const MCSymbol &Sym = Target.getSymA()->getSymbol().AliasedSymbol();
-
- if (Sym.getSection().getKind().isMergeableCString() ||
- Sym.getSection().getKind().isMergeableConst())
- return &Sym;
-
- return NULL;
-}
-
unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
+ bool IsPCRel) const {
// determine the type of the relocation
unsigned Type = (unsigned)ELF::R_MIPS_NONE;
unsigned Kind = (unsigned)Fixup.getKind();
@@ -210,6 +176,12 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case Mips::fixup_MICROMIPS_GOT_OFST:
Type = ELF::R_MICROMIPS_GOT_OFST;
break;
+ case Mips::fixup_MICROMIPS_TLS_GD:
+ Type = ELF::R_MICROMIPS_TLS_GD;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_LDM:
+ Type = ELF::R_MICROMIPS_TLS_LDM;
+ break;
case Mips::fixup_MICROMIPS_TLS_DTPREL_HI16:
Type = ELF::R_MICROMIPS_TLS_DTPREL_HI16;
break;
@@ -222,95 +194,62 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case Mips::fixup_MICROMIPS_TLS_TPREL_LO16:
Type = ELF::R_MICROMIPS_TLS_TPREL_LO16;
break;
+ case Mips::fixup_MIPS_PC19_S2:
+ Type = ELF::R_MIPS_PC19_S2;
+ break;
+ case Mips::fixup_MIPS_PC18_S3:
+ Type = ELF::R_MIPS_PC18_S3;
+ break;
+ case Mips::fixup_MIPS_PC21_S2:
+ Type = ELF::R_MIPS_PC21_S2;
+ break;
+ case Mips::fixup_MIPS_PC26_S2:
+ Type = ELF::R_MIPS_PC26_S2;
+ break;
+ case Mips::fixup_MIPS_PCHI16:
+ Type = ELF::R_MIPS_PCHI16;
+ break;
+ case Mips::fixup_MIPS_PCLO16:
+ Type = ELF::R_MIPS_PCLO16;
+ break;
}
return Type;
}
-// Return true if R is either a GOT16 against a local symbol or HI16.
-static bool NeedsMatchingLo(const MCAssembler &Asm, const RelEntry &R) {
- if (!R.Sym)
- return false;
-
- MCSymbolData &SD = Asm.getSymbolData(R.Sym->AliasedSymbol());
-
- return ((R.Reloc.Type == ELF::R_MIPS_GOT16) && !SD.isExternal()) ||
- (R.Reloc.Type == ELF::R_MIPS_HI16);
-}
-
-static bool HasMatchingLo(const MCAssembler &Asm, RelLsIter I, RelLsIter Last) {
- if (I == Last)
+bool
+MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const {
+ // FIXME: This is extremelly conservative. This really needs to use a
+ // whitelist with a clear explanation for why each realocation needs to
+ // point to the symbol, not to the section.
+ switch (Type) {
+ default:
+ return true;
+
+ case ELF::R_MIPS_GOT16:
+ case ELF::R_MIPS16_GOT16:
+ case ELF::R_MICROMIPS_GOT16:
+ llvm_unreachable("Should have been handled already");
+
+ // These relocations might be paired with another relocation. The pairing is
+ // done by the static linker by matching the symbol. Since we only see one
+ // relocation at a time, we have to force them to relocate with a symbol to
+ // avoid ending up with a pair where one points to a section and another
+ // points to a symbol.
+ case ELF::R_MIPS_HI16:
+ case ELF::R_MIPS16_HI16:
+ case ELF::R_MICROMIPS_HI16:
+ case ELF::R_MIPS_LO16:
+ case ELF::R_MIPS16_LO16:
+ case ELF::R_MICROMIPS_LO16:
+ return true;
+
+ case ELF::R_MIPS_26:
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_64:
+ case ELF::R_MIPS_GPREL16:
return false;
-
- RelLsIter Hi = I++;
-
- return (I->Reloc.Type == ELF::R_MIPS_LO16) && (Hi->Sym == I->Sym) &&
- (Hi->Offset == I->Offset);
-}
-
-static bool HasSameSymbol(const RelEntry &R0, const RelEntry &R1) {
- return R0.Sym == R1.Sym;
-}
-
-static int CompareOffset(const RelEntry &R0, const RelEntry &R1) {
- return (R0.Offset > R1.Offset) ? 1 : ((R0.Offset == R1.Offset) ? 0 : -1);
-}
-
-void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
- std::vector<ELFRelocationEntry> &Relocs) {
- // Call the default function first. Relocations are sorted in descending
- // order of r_offset.
- MCELFObjectTargetWriter::sortRelocs(Asm, Relocs);
-
- RelLs RelocLs;
- std::vector<RelLsIter> Unmatched;
-
- // Fill RelocLs. Traverse Relocs backwards so that relocations in RelocLs
- // are in ascending order of r_offset.
- for (std::vector<ELFRelocationEntry>::reverse_iterator R = Relocs.rbegin();
- R != Relocs.rend(); ++R) {
- std::pair<const MCSymbolRefExpr*, int64_t> P =
- MipsGetSymAndOffset(*R->Fixup);
- RelocLs.push_back(RelEntry(*R, P.first ? &P.first->getSymbol() : 0,
- P.second));
}
-
- // Get list of unmatched HI16 and GOT16.
- for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R)
- if (NeedsMatchingLo(Asm, *R) && !HasMatchingLo(Asm, R, --RelocLs.end()))
- Unmatched.push_back(R);
-
- // Insert unmatched HI16 and GOT16 immediately before their matching LO16.
- for (std::vector<RelLsIter>::iterator U = Unmatched.begin();
- U != Unmatched.end(); ++U) {
- RelLsIter LoPos = RelocLs.end(), HiPos = *U;
- bool MatchedLo = false;
-
- for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R) {
- if ((R->Reloc.Type == ELF::R_MIPS_LO16) && HasSameSymbol(*HiPos, *R) &&
- (CompareOffset(*R, *HiPos) >= 0) &&
- ((LoPos == RelocLs.end()) || ((CompareOffset(*R, *LoPos) < 0)) ||
- (!MatchedLo && !CompareOffset(*R, *LoPos))))
- LoPos = R;
-
- MatchedLo = NeedsMatchingLo(Asm, *R) &&
- HasMatchingLo(Asm, R, --RelocLs.end());
- }
-
- // If a matching LoPos was found, move HiPos and insert it before LoPos.
- // Make the offsets of HiPos and LoPos match.
- if (LoPos != RelocLs.end()) {
- HiPos->Offset = LoPos->Offset;
- RelocLs.insert(LoPos, *HiPos);
- RelocLs.erase(HiPos);
- }
- }
-
- // Put the sorted list back in reverse order.
- assert(Relocs.size() == RelocLs.size());
- unsigned I = RelocLs.size();
-
- for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R)
- Relocs[--I] = R->Reloc;
}
MCObjectWriter *llvm::createMipsELFObjectWriter(raw_ostream &OS,
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp
new file mode 100644
index 0000000..803ab85
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp
@@ -0,0 +1,43 @@
+//===-------- MipsELFStreamer.cpp - ELF Object Output ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsELFStreamer.h"
+#include "llvm/MC/MCInst.h"
+
+void MipsELFStreamer::EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) {
+ MCELFStreamer::EmitInstruction(Inst, STI);
+
+ MCContext &Context = getContext();
+ const MCRegisterInfo *MCRegInfo = Context.getRegisterInfo();
+
+ for (unsigned OpIndex = 0; OpIndex < Inst.getNumOperands(); ++OpIndex) {
+ const MCOperand &Op = Inst.getOperand(OpIndex);
+
+ if (!Op.isReg())
+ continue;
+
+ unsigned Reg = Op.getReg();
+ RegInfoRecord->SetPhysRegUsed(Reg, MCRegInfo);
+ }
+}
+
+void MipsELFStreamer::EmitMipsOptionRecords() {
+ for (const auto &I : MipsOptionRecords)
+ I->EmitMipsOptionRecord();
+}
+
+namespace llvm {
+MCELFStreamer *createMipsELFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI, bool RelaxAll,
+ bool NoExecStack) {
+ return new MipsELFStreamer(Context, MAB, OS, Emitter, STI);
+}
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
new file mode 100644
index 0000000..58863be
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
@@ -0,0 +1,58 @@
+//===-------- MipsELFStreamer.h - ELF Object Output -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a custom MCELFStreamer which allows us to insert some hooks before
+// emitting data into an actual object file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSELFSTREAMER_H
+#define MIPSELFSTREAMER_H
+
+#include "MipsOptionRecord.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCELFStreamer.h"
+#include <memory>
+
+namespace llvm {
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCSubtargetInfo;
+
+class MipsELFStreamer : public MCELFStreamer {
+ SmallVector<std::unique_ptr<MipsOptionRecord>, 8> MipsOptionRecords;
+ MipsRegInfoRecord *RegInfoRecord;
+
+public:
+ MipsELFStreamer(MCContext &Context, MCAsmBackend &MAB, raw_ostream &OS,
+ MCCodeEmitter *Emitter, const MCSubtargetInfo &STI)
+ : MCELFStreamer(Context, MAB, OS, Emitter) {
+
+ RegInfoRecord = new MipsRegInfoRecord(this, Context, STI);
+ MipsOptionRecords.push_back(
+ std::unique_ptr<MipsRegInfoRecord>(RegInfoRecord));
+ }
+
+ /// Overriding this function allows us to add arbitrary behaviour before the
+ /// \p Inst is actually emitted. For example, we can inspect the operands and
+ /// gather sufficient information that allows us to reason about the register
+ /// usage for the translation unit.
+ void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
+
+ /// Emits all the option records stored up until the point it's called.
+ void EmitMipsOptionRecords();
+};
+
+MCELFStreamer *createMipsELFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI, bool RelaxAll,
+ bool NoExecStack);
+} // namespace llvm.
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index 6ed44b7..05080f0 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -128,6 +128,24 @@ namespace Mips {
// resulting in - R_MIPS_CALL_LO16
fixup_Mips_CALL_LO16,
+ // resulting in - R_MIPS_PC18_S3
+ fixup_MIPS_PC18_S3,
+
+ // resulting in - R_MIPS_PC19_S2
+ fixup_MIPS_PC19_S2,
+
+ // resulting in - R_MIPS_PC21_S2
+ fixup_MIPS_PC21_S2,
+
+ // resulting in - R_MIPS_PC26_S2
+ fixup_MIPS_PC26_S2,
+
+ // resulting in - R_MIPS_PCHI16
+ fixup_MIPS_PCHI16,
+
+ // resulting in - R_MIPS_PCLO16
+ fixup_MIPS_PCLO16,
+
// resulting in - R_MICROMIPS_26_S1
fixup_MICROMIPS_26_S1,
@@ -155,6 +173,12 @@ namespace Mips {
// resulting in - R_MICROMIPS_GOT_OFST
fixup_MICROMIPS_GOT_OFST,
+ // resulting in - R_MICROMIPS_TLS_GD
+ fixup_MICROMIPS_TLS_GD,
+
+ // resulting in - R_MICROMIPS_TLS_LDM
+ fixup_MICROMIPS_TLS_LDM,
+
// resulting in - R_MICROMIPS_TLS_DTPREL_HI16
fixup_MICROMIPS_TLS_DTPREL_HI16,
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 6aa3c76..e415412 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -38,7 +38,7 @@ MipsMCAsmInfo::MipsMCAsmInfo(StringRef TT) {
ZeroDirective = "\t.space\t";
GPRel32Directive = "\t.gpword\t";
GPRel64Directive = "\t.gpdword\t";
- DebugLabelSuffix = "=.";
+ UseAssignmentForEHBegin = true;
SupportsDebugInformation = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
HasLEB128 = true;
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
index 1000113..37ba0c4 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
@@ -20,7 +20,7 @@ namespace llvm {
class StringRef;
class MipsMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
explicit MipsMCAsmInfo(StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 66428bd..43fc521 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -11,137 +11,42 @@
//
//===----------------------------------------------------------------------===//
//
-#define DEBUG_TYPE "mccodeemitter"
-#include "MCTargetDesc/MipsBaseInfo.h"
+
+#include "MipsMCCodeEmitter.h"
#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsMCExpr.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/raw_ostream.h"
+#define DEBUG_TYPE "mccodeemitter"
+
#define GET_INSTRMAP_INFO
#include "MipsGenInstrInfo.inc"
-
-using namespace llvm;
-
-namespace {
-class MipsMCCodeEmitter : public MCCodeEmitter {
- MipsMCCodeEmitter(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- void operator=(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- const MCInstrInfo &MCII;
- MCContext &Ctx;
- const MCSubtargetInfo &STI;
- bool IsLittleEndian;
- bool IsMicroMips;
-
-public:
- MipsMCCodeEmitter(const MCInstrInfo &mcii, MCContext &Ctx_,
- const MCSubtargetInfo &sti, bool IsLittle) :
- MCII(mcii), Ctx(Ctx_), STI (sti), IsLittleEndian(IsLittle) {
- IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
- }
-
- ~MipsMCCodeEmitter() {}
-
- void EmitByte(unsigned char C, raw_ostream &OS) const {
- OS << (char)C;
- }
-
- void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const {
- // Output the instruction encoding in little endian byte order.
- // Little-endian byte ordering:
- // mips32r2: 4 | 3 | 2 | 1
- // microMIPS: 2 | 1 | 4 | 3
- if (IsLittleEndian && Size == 4 && IsMicroMips) {
- EmitInstruction(Val>>16, 2, OS);
- EmitInstruction(Val, 2, OS);
- } else {
- for (unsigned i = 0; i < Size; ++i) {
- unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
- EmitByte((Val >> Shift) & 0xff, OS);
- }
- }
- }
-
- void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getBinaryCodeForInstr - TableGen'erated function for getting the
- // binary encoding for an instruction.
- uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getBranchJumpOpValue - Return binary encoding of the jump
- // target operand. If the machine operand requires relocation,
- // record the relocation and return zero.
- unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getBranchJumpOpValueMM - Return binary encoding of the microMIPS jump
- // target operand. If the machine operand requires relocation,
- // record the relocation and return zero.
- unsigned getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getBranchTargetOpValue - Return binary encoding of the branch
- // target operand. If the machine operand requires relocation,
- // record the relocation and return zero.
- unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getBranchTargetOpValue - Return binary encoding of the microMIPS branch
- // target operand. If the machine operand requires relocation,
- // record the relocation and return zero.
- unsigned getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getMachineOpValue - Return binary encoding of operand. If the machin
- // operand requires relocation, record the relocation and return zero.
- unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
- unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- // getLSAImmEncoding - Return binary encoding of LSA immediate.
- unsigned getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
-
- unsigned
- getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const;
-
-}; // class MipsMCCodeEmitter
-} // namespace
-
-MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx)
-{
- return new MipsMCCodeEmitter(MCII, Ctx, STI, false);
+#undef GET_INSTRMAP_INFO
+
+namespace llvm {
+MCCodeEmitter *createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new MipsMCCodeEmitter(MCII, Ctx, false);
}
-MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
- const MCRegisterInfo &MRI,
- const MCSubtargetInfo &STI,
- MCContext &Ctx)
-{
- return new MipsMCCodeEmitter(MCII, Ctx, STI, true);
+MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new MipsMCCodeEmitter(MCII, Ctx, true);
}
-
+} // End of namespace llvm.
// If the D<shift> instruction has a shift amount that is greater
// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
@@ -208,11 +113,38 @@ static void LowerDextDins(MCInst& InstIn) {
return;
}
+bool MipsMCCodeEmitter::isMicroMips(const MCSubtargetInfo &STI) const {
+ return STI.getFeatureBits() & Mips::FeatureMicroMips;
+}
+
+void MipsMCCodeEmitter::EmitByte(unsigned char C, raw_ostream &OS) const {
+ OS << (char)C;
+}
+
+void MipsMCCodeEmitter::EmitInstruction(uint64_t Val, unsigned Size,
+ const MCSubtargetInfo &STI,
+ raw_ostream &OS) const {
+ // Output the instruction encoding in little endian byte order.
+ // Little-endian byte ordering:
+ // mips32r2: 4 | 3 | 2 | 1
+ // microMIPS: 2 | 1 | 4 | 3
+ if (IsLittleEndian && Size == 4 && isMicroMips(STI)) {
+ EmitInstruction(Val >> 16, 2, STI, OS);
+ EmitInstruction(Val, 2, STI, OS);
+ } else {
+ for (unsigned i = 0; i < Size; ++i) {
+ unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
+ EmitByte((Val >> Shift) & 0xff, OS);
+ }
+ }
+}
+
/// EncodeInstruction - Emit the instruction.
/// Size the instruction with Desc.getSize().
void MipsMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const
{
// Non-pseudo instructions that get changed for direct object
@@ -235,7 +167,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
}
unsigned long N = Fixups.size();
- uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups);
+ uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
// Check for unimplemented opcodes.
// Unfortunately in MIPS both NOP and SLL will come in with Binary == 0
@@ -251,7 +183,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
Fixups.pop_back();
Opcode = NewOpcode;
TmpInst.setOpcode (NewOpcode);
- Binary = getBinaryCodeForInstr(TmpInst, Fixups);
+ Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
}
}
@@ -262,7 +194,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if (!Size)
llvm_unreachable("Desc.getSize() returns 0");
- EmitInstruction(Binary, Size, OS);
+ EmitInstruction(Binary, Size, STI, OS);
}
/// getBranchTargetOpValue - Return binary encoding of the branch
@@ -270,7 +202,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
/// record the relocation and return zero.
unsigned MipsMCCodeEmitter::
getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
@@ -291,7 +224,8 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
/// record the relocation and return zero.
unsigned MipsMCCodeEmitter::
getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
@@ -308,12 +242,76 @@ getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
return 0;
}
+/// getBranchTarget21OpValue - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTarget21OpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 4.
+ if (MO.isImm()) return MO.getImm() >> 2;
+
+ assert(MO.isExpr() &&
+ "getBranchTarget21OpValue expects only expressions or immediates");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MIPS_PC21_S2)));
+ return 0;
+}
+
+/// getBranchTarget26OpValue - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTarget26OpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 4.
+ if (MO.isImm()) return MO.getImm() >> 2;
+
+ assert(MO.isExpr() &&
+ "getBranchTarget26OpValue expects only expressions or immediates");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MIPS_PC26_S2)));
+ return 0;
+}
+
+/// getJumpOffset16OpValue - Return binary encoding of the jump
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getJumpOffset16OpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ if (MO.isImm()) return MO.getImm();
+
+ assert(MO.isExpr() &&
+ "getJumpOffset16OpValue expects only expressions or an immediate");
+
+ // TODO: Push fixup.
+ return 0;
+}
+
/// getJumpTargetOpValue - Return binary encoding of the jump
/// target operand. If the machine operand requires relocation,
/// record the relocation and return zero.
unsigned MipsMCCodeEmitter::
getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
// If the destination is an immediate, divide by 4.
@@ -330,7 +328,8 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
unsigned MipsMCCodeEmitter::
getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
// If the destination is an immediate, divide by 2.
@@ -346,7 +345,8 @@ getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
}
unsigned MipsMCCodeEmitter::
-getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
+getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
int64_t Res;
if (Expr->EvaluateAsAbsolute(Res))
@@ -358,101 +358,135 @@ getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
}
if (Kind == MCExpr::Binary) {
- unsigned Res = getExprOpValue(cast<MCBinaryExpr>(Expr)->getLHS(), Fixups);
- Res += getExprOpValue(cast<MCBinaryExpr>(Expr)->getRHS(), Fixups);
+ unsigned Res = getExprOpValue(cast<MCBinaryExpr>(Expr)->getLHS(), Fixups, STI);
+ Res += getExprOpValue(cast<MCBinaryExpr>(Expr)->getRHS(), Fixups, STI);
return Res;
}
- if (Kind == MCExpr::SymbolRef) {
- Mips::Fixups FixupKind = Mips::Fixups(0);
- switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
- default: llvm_unreachable("Unknown fixup kind!");
- break;
- case MCSymbolRefExpr::VK_Mips_GPOFF_HI :
- FixupKind = Mips::fixup_Mips_GPOFF_HI;
- break;
- case MCSymbolRefExpr::VK_Mips_GPOFF_LO :
- FixupKind = Mips::fixup_Mips_GPOFF_LO;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_PAGE :
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_PAGE
- : Mips::fixup_Mips_GOT_PAGE;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_OFST :
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_OFST
- : Mips::fixup_Mips_GOT_OFST;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_DISP :
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_DISP
- : Mips::fixup_Mips_GOT_DISP;
- break;
- case MCSymbolRefExpr::VK_Mips_GPREL:
- FixupKind = Mips::fixup_Mips_GPREL16;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_CALL:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_CALL16
- : Mips::fixup_Mips_CALL16;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT16:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT16
- : Mips::fixup_Mips_GOT_Global;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT16
- : Mips::fixup_Mips_GOT_Local;
- break;
- case MCSymbolRefExpr::VK_Mips_ABS_HI:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_HI16
- : Mips::fixup_Mips_HI16;
- break;
- case MCSymbolRefExpr::VK_Mips_ABS_LO:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_LO16
- : Mips::fixup_Mips_LO16;
- break;
- case MCSymbolRefExpr::VK_Mips_TLSGD:
- FixupKind = Mips::fixup_Mips_TLSGD;
- break;
- case MCSymbolRefExpr::VK_Mips_TLSLDM:
- FixupKind = Mips::fixup_Mips_TLSLDM;
- break;
- case MCSymbolRefExpr::VK_Mips_DTPREL_HI:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_DTPREL_HI16
- : Mips::fixup_Mips_DTPREL_HI;
- break;
- case MCSymbolRefExpr::VK_Mips_DTPREL_LO:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_DTPREL_LO16
- : Mips::fixup_Mips_DTPREL_LO;
- break;
- case MCSymbolRefExpr::VK_Mips_GOTTPREL:
- FixupKind = Mips::fixup_Mips_GOTTPREL;
- break;
- case MCSymbolRefExpr::VK_Mips_TPREL_HI:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_TPREL_HI16
- : Mips::fixup_Mips_TPREL_HI;
- break;
- case MCSymbolRefExpr::VK_Mips_TPREL_LO:
- FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_TPREL_LO16
- : Mips::fixup_Mips_TPREL_LO;
- break;
- case MCSymbolRefExpr::VK_Mips_HIGHER:
- FixupKind = Mips::fixup_Mips_HIGHER;
- break;
- case MCSymbolRefExpr::VK_Mips_HIGHEST:
- FixupKind = Mips::fixup_Mips_HIGHEST;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_HI16:
- FixupKind = Mips::fixup_Mips_GOT_HI16;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_LO16:
- FixupKind = Mips::fixup_Mips_GOT_LO16;
- break;
- case MCSymbolRefExpr::VK_Mips_CALL_HI16:
- FixupKind = Mips::fixup_Mips_CALL_HI16;
- break;
- case MCSymbolRefExpr::VK_Mips_CALL_LO16:
- FixupKind = Mips::fixup_Mips_CALL_LO16;
- break;
- } // switch
+ if (Kind == MCExpr::Target) {
+ const MipsMCExpr *MipsExpr = cast<MipsMCExpr>(Expr);
+
+ Mips::Fixups FixupKind = Mips::Fixups(0);
+ switch (MipsExpr->getKind()) {
+ default: llvm_unreachable("Unsupported fixup kind for target expression!");
+ case MipsMCExpr::VK_Mips_HIGHEST:
+ FixupKind = Mips::fixup_Mips_HIGHEST;
+ break;
+ case MipsMCExpr::VK_Mips_HIGHER:
+ FixupKind = Mips::fixup_Mips_HIGHER;
+ break;
+ case MipsMCExpr::VK_Mips_HI:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HI16
+ : Mips::fixup_Mips_HI16;
+ break;
+ case MipsMCExpr::VK_Mips_LO:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16
+ : Mips::fixup_Mips_LO16;
+ break;
+ }
+ Fixups.push_back(MCFixup::Create(0, MipsExpr, MCFixupKind(FixupKind)));
+ return 0;
+ }
+
+ if (Kind == MCExpr::SymbolRef) {
+ Mips::Fixups FixupKind = Mips::Fixups(0);
+
+ switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
+ default: llvm_unreachable("Unknown fixup kind!");
+ break;
+ case MCSymbolRefExpr::VK_Mips_GPOFF_HI :
+ FixupKind = Mips::fixup_Mips_GPOFF_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GPOFF_LO :
+ FixupKind = Mips::fixup_Mips_GPOFF_LO;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_PAGE :
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_PAGE
+ : Mips::fixup_Mips_GOT_PAGE;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_OFST :
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_OFST
+ : Mips::fixup_Mips_GOT_OFST;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_DISP :
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_DISP
+ : Mips::fixup_Mips_GOT_DISP;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GPREL:
+ FixupKind = Mips::fixup_Mips_GPREL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_CALL:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_CALL16
+ : Mips::fixup_Mips_CALL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT16:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT_Global;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT_Local;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HI16
+ : Mips::fixup_Mips_HI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16
+ : Mips::fixup_Mips_LO16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TLSGD:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_GD
+ : Mips::fixup_Mips_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TLSLDM:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_LDM
+ : Mips::fixup_Mips_TLSLDM;
+ break;
+ case MCSymbolRefExpr::VK_Mips_DTPREL_HI:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_DTPREL_HI16
+ : Mips::fixup_Mips_DTPREL_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_DTPREL_LO:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_DTPREL_LO16
+ : Mips::fixup_Mips_DTPREL_LO;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOTTPREL:
+ FixupKind = Mips::fixup_Mips_GOTTPREL;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_HI:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_TPREL_HI16
+ : Mips::fixup_Mips_TPREL_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_LO:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_TPREL_LO16
+ : Mips::fixup_Mips_TPREL_LO;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHER:
+ FixupKind = Mips::fixup_Mips_HIGHER;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHEST:
+ FixupKind = Mips::fixup_Mips_HIGHEST;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_HI16:
+ FixupKind = Mips::fixup_Mips_GOT_HI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_LO16:
+ FixupKind = Mips::fixup_Mips_GOT_LO16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_CALL_HI16:
+ FixupKind = Mips::fixup_Mips_CALL_HI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_CALL_LO16:
+ FixupKind = Mips::fixup_Mips_CALL_LO16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_PCREL_HI16:
+ FixupKind = Mips::fixup_MIPS_PCHI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_PCREL_LO16:
+ FixupKind = Mips::fixup_MIPS_PCLO16;
+ break;
+ } // switch
Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(FixupKind)));
return 0;
@@ -464,7 +498,8 @@ getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
/// operand requires relocation, record the relocation and return zero.
unsigned MipsMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Reg);
@@ -477,38 +512,85 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
}
// MO must be an Expr.
assert(MO.isExpr());
- return getExprOpValue(MO.getExpr(),Fixups);
+ return getExprOpValue(MO.getExpr(),Fixups, STI);
+}
+
+/// getMSAMemEncoding - Return binary encoding of memory operand for LD/ST
+/// instructions.
+unsigned
+MipsMCCodeEmitter::getMSAMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups, STI) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
+
+ // The immediate field of an LD/ST instruction is scaled which means it must
+ // be divided (when encoding) by the size (in bytes) of the instructions'
+ // data format.
+ // .b - 1 byte
+ // .h - 2 bytes
+ // .w - 4 bytes
+ // .d - 8 bytes
+ switch(MI.getOpcode())
+ {
+ default:
+ assert (0 && "Unexpected instruction");
+ break;
+ case Mips::LD_B:
+ case Mips::ST_B:
+ // We don't need to scale the offset in this case
+ break;
+ case Mips::LD_H:
+ case Mips::ST_H:
+ OffBits >>= 1;
+ break;
+ case Mips::LD_W:
+ case Mips::ST_W:
+ OffBits >>= 2;
+ break;
+ case Mips::LD_D:
+ case Mips::ST_D:
+ OffBits >>= 3;
+ break;
+ }
+
+ return (OffBits & 0xFFFF) | RegBits;
}
/// getMemEncoding - Return binary encoding of memory related operand.
/// If the offset operand requires relocation, record the relocation.
unsigned
MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
assert(MI.getOperand(OpNo).isReg());
- unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups) << 16;
- unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups, STI) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
return (OffBits & 0xFFFF) | RegBits;
}
unsigned MipsMCCodeEmitter::
getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Base register is encoded in bits 20-16, offset is encoded in bits 11-0.
assert(MI.getOperand(OpNo).isReg());
- unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups) << 16;
- unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
return (OffBits & 0x0FFF) | RegBits;
}
unsigned
MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
assert(MI.getOperand(OpNo).isImm());
- unsigned SizeEncoding = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups);
+ unsigned SizeEncoding = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI);
return SizeEncoding - 1;
}
@@ -516,22 +598,65 @@ MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
//
unsigned
MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
assert(MI.getOperand(OpNo-1).isImm());
assert(MI.getOperand(OpNo).isImm());
- unsigned Position = getMachineOpValue(MI, MI.getOperand(OpNo-1), Fixups);
- unsigned Size = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups);
+ unsigned Position = getMachineOpValue(MI, MI.getOperand(OpNo-1), Fixups, STI);
+ unsigned Size = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI);
return Position + Size - 1;
}
unsigned
MipsMCCodeEmitter::getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
assert(MI.getOperand(OpNo).isImm());
// The immediate is encoded as 'immediate - 1'.
- return getMachineOpValue(MI, MI.getOperand(OpNo), Fixups) - 1;
+ return getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI) - 1;
}
-#include "MipsGenMCCodeEmitter.inc"
+unsigned
+MipsMCCodeEmitter::getSimm19Lsl2Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isImm()) {
+ // The immediate is encoded as 'immediate << 2'.
+ unsigned Res = getMachineOpValue(MI, MO, Fixups, STI);
+ assert((Res & 3) == 0);
+ return Res >> 2;
+ }
+
+ assert(MO.isExpr() &&
+ "getSimm19Lsl2Encoding expects only expressions or an immediate");
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MIPS_PC19_S2)));
+ return 0;
+}
+
+unsigned
+MipsMCCodeEmitter::getSimm18Lsl3Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isImm()) {
+ // The immediate is encoded as 'immediate << 3'.
+ unsigned Res = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI);
+ assert((Res & 7) == 0);
+ return Res >> 3;
+ }
+
+ assert(MO.isExpr() &&
+ "getSimm18Lsl2Encoding expects only expressions or an immediate");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MIPS_PC18_S3)));
+ return 0;
+}
+
+#include "MipsGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
new file mode 100644
index 0000000..304167f
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
@@ -0,0 +1,154 @@
+//===-- MipsMCCodeEmitter.h - Convert Mips Code to Machine Code -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MipsMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef MIPS_MC_CODE_EMITTER_H
+#define MIPS_MC_CODE_EMITTER_H
+
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/Support/DataTypes.h"
+
+using namespace llvm;
+
+namespace llvm {
+class MCContext;
+class MCExpr;
+class MCInst;
+class MCInstrInfo;
+class MCFixup;
+class MCOperand;
+class MCSubtargetInfo;
+class raw_ostream;
+
+class MipsMCCodeEmitter : public MCCodeEmitter {
+ MipsMCCodeEmitter(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ const MCInstrInfo &MCII;
+ MCContext &Ctx;
+ bool IsLittleEndian;
+
+ bool isMicroMips(const MCSubtargetInfo &STI) const;
+
+public:
+ MipsMCCodeEmitter(const MCInstrInfo &mcii, MCContext &Ctx_, bool IsLittle)
+ : MCII(mcii), Ctx(Ctx_), IsLittleEndian(IsLittle) {}
+
+ ~MipsMCCodeEmitter() {}
+
+ void EmitByte(unsigned char C, raw_ostream &OS) const;
+
+ void EmitInstruction(uint64_t Val, unsigned Size, const MCSubtargetInfo &STI,
+ raw_ostream &OS) const;
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchJumpOpValue - Return binary encoding of the jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchJumpOpValueMM - Return binary encoding of the microMIPS jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchTargetOpValue - Return binary encoding of the branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchTargetOpValue - Return binary encoding of the microMIPS branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchTarget21OpValue - Return binary encoding of the branch
+ // offset operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTarget21OpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchTarget26OpValue - Return binary encoding of the branch
+ // offset operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTarget26OpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getJumpOffset16OpValue - Return binary encoding of the jump
+ // offset operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpOffset16OpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getMachineOpValue - Return binary encoding of operand. If the machin
+ // operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getMSAMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getLSAImmEncoding - Return binary encoding of LSA immediate.
+ unsigned getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getSimm19Lsl2Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getSimm18Lsl3Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getExprOpValue(const MCExpr *Expr, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+}; // class MipsMCCodeEmitter
+} // namespace llvm.
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
new file mode 100644
index 0000000..5bba3e5
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
@@ -0,0 +1,89 @@
+//===-- MipsMCExpr.cpp - Mips specific MC expression classes --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsMCExpr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectStreamer.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mipsmcexpr"
+
+bool MipsMCExpr::isSupportedBinaryExpr(MCSymbolRefExpr::VariantKind VK,
+ const MCBinaryExpr *BE) {
+ switch (VK) {
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ case MCSymbolRefExpr::VK_Mips_HIGHER:
+ case MCSymbolRefExpr::VK_Mips_HIGHEST:
+ break;
+ default:
+ return false;
+ }
+
+ // We support expressions of the form "(sym1 binop1 sym2) binop2 const",
+ // where "binop2 const" is optional.
+ if (isa<MCBinaryExpr>(BE->getLHS())) {
+ if (!isa<MCConstantExpr>(BE->getRHS()))
+ return false;
+ BE = cast<MCBinaryExpr>(BE->getLHS());
+ }
+ return (isa<MCSymbolRefExpr>(BE->getLHS())
+ && isa<MCSymbolRefExpr>(BE->getRHS()));
+}
+
+const MipsMCExpr*
+MipsMCExpr::Create(MCSymbolRefExpr::VariantKind VK, const MCExpr *Expr,
+ MCContext &Ctx) {
+ VariantKind Kind;
+ switch (VK) {
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ Kind = VK_Mips_LO;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ Kind = VK_Mips_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHER:
+ Kind = VK_Mips_HIGHER;
+ break;
+ case MCSymbolRefExpr::VK_Mips_HIGHEST:
+ Kind = VK_Mips_HIGHEST;
+ break;
+ default:
+ llvm_unreachable("Invalid kind!");
+ }
+
+ return new (Ctx) MipsMCExpr(Kind, Expr);
+}
+
+void MipsMCExpr::PrintImpl(raw_ostream &OS) const {
+ switch (Kind) {
+ default: llvm_unreachable("Invalid kind!");
+ case VK_Mips_LO: OS << "%lo"; break;
+ case VK_Mips_HI: OS << "%hi"; break;
+ case VK_Mips_HIGHER: OS << "%higher"; break;
+ case VK_Mips_HIGHEST: OS << "%highest"; break;
+ }
+
+ OS << '(';
+ Expr->print(OS);
+ OS << ')';
+}
+
+bool
+MipsMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ return getSubExpr()->EvaluateAsRelocatable(Res, Layout);
+}
+
+void MipsMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h
new file mode 100644
index 0000000..f193dc9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h
@@ -0,0 +1,66 @@
+//===-- MipsMCExpr.h - Mips specific MC expression classes ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSMCEXPR_H
+#define MIPSMCEXPR_H
+
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+
+namespace llvm {
+
+class MipsMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ VK_Mips_None,
+ VK_Mips_LO,
+ VK_Mips_HI,
+ VK_Mips_HIGHER,
+ VK_Mips_HIGHEST
+ };
+
+private:
+ const VariantKind Kind;
+ const MCExpr *Expr;
+
+ explicit MipsMCExpr(VariantKind Kind, const MCExpr *Expr)
+ : Kind(Kind), Expr(Expr) {}
+
+public:
+ static bool isSupportedBinaryExpr(MCSymbolRefExpr::VariantKind VK,
+ const MCBinaryExpr *BE);
+
+ static const MipsMCExpr *Create(MCSymbolRefExpr::VariantKind VK,
+ const MCExpr *Expr, MCContext &Ctx);
+
+ /// getOpcode - Get the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ /// getSubExpr - Get the child of this expression.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ void PrintImpl(raw_ostream &OS) const override;
+ bool EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ const MCSection *FindAssociatedSection() const override {
+ return getSubExpr()->FindAssociatedSection();
+ }
+
+ // There are no TLS MipsMCExprs at the moment.
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
new file mode 100644
index 0000000..01d5363
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
@@ -0,0 +1,33 @@
+//===-- MipsMCNaCl.h - NaCl-related declarations --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSMCNACL_H
+#define MIPSMCNACL_H
+
+#include "llvm/MC/MCELFStreamer.h"
+
+namespace llvm {
+
+// Log2 of the NaCl MIPS sandbox's instruction bundle size.
+static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u;
+
+bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
+ bool *IsStore = nullptr);
+bool baseRegNeedsLoadStoreMask(unsigned Reg);
+
+// This function creates an MCELFStreamer for Mips NaCl.
+MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS,
+ MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
+ bool RelaxAll, bool NoExecStack);
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 5548aaa..d2b929b 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -11,12 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MipsELFStreamer.h"
#include "MipsMCAsmInfo.h"
+#include "MipsMCNaCl.h"
+#include "MipsMCTargetDesc.h"
#include "MipsTargetStreamer.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCELF.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -28,6 +30,8 @@
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "MipsGenInstrInfo.inc"
@@ -37,38 +41,18 @@
#define GET_REGINFO_MC_DESC
#include "MipsGenRegisterInfo.inc"
-using namespace llvm;
-
-static std::string ParseMipsTriple(StringRef TT, StringRef CPU) {
- std::string MipsArchFeature;
- size_t DashPosition = 0;
- StringRef TheTriple;
-
- // Let's see if there is a dash, like mips-unknown-linux.
- DashPosition = TT.find('-');
-
- if (DashPosition == StringRef::npos) {
- // No dash, we check the string size.
- TheTriple = TT.substr(0);
- } else {
- // We are only interested in substring before dash.
- TheTriple = TT.substr(0,DashPosition);
- }
-
- if (TheTriple == "mips" || TheTriple == "mipsel") {
- if (CPU.empty() || CPU == "mips32") {
- MipsArchFeature = "+mips32";
- } else if (CPU == "mips32r2") {
- MipsArchFeature = "+mips32r2";
- }
- } else {
- if (CPU.empty() || CPU == "mips64") {
- MipsArchFeature = "+mips64";
- } else if (CPU == "mips64r2") {
- MipsArchFeature = "+mips64r2";
- }
+/// Select the Mips CPU for the given triple and cpu name.
+/// FIXME: Merge with the copy in MipsSubtarget.cpp
+static inline StringRef selectMipsCPU(StringRef TT, StringRef CPU) {
+ if (CPU.empty() || CPU == "generic") {
+ Triple TheTriple(TT);
+ if (TheTriple.getArch() == Triple::mips ||
+ TheTriple.getArch() == Triple::mipsel)
+ CPU = "mips32";
+ else
+ CPU = "mips64";
}
- return MipsArchFeature;
+ return CPU;
}
static MCInstrInfo *createMipsMCInstrInfo() {
@@ -85,15 +69,9 @@ static MCRegisterInfo *createMipsMCRegisterInfo(StringRef TT) {
static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU,
StringRef FS) {
- std::string ArchFS = ParseMipsTriple(TT,CPU);
- if (!FS.empty()) {
- if (!ArchFS.empty())
- ArchFS = ArchFS + "," + FS.str();
- else
- ArchFS = FS;
- }
+ CPU = selectMipsCPU(TT, CPU);
MCSubtargetInfo *X = new MCSubtargetInfo();
- InitMipsMCSubtargetInfo(X, TT, CPU, ArchFS);
+ InitMipsMCSubtargetInfo(X, TT, CPU, FS);
return X;
}
@@ -101,7 +79,7 @@ static MCAsmInfo *createMipsMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
MCAsmInfo *MAI = new MipsMCAsmInfo(TT);
unsigned SP = MRI.getDwarfRegNum(Mips::SP, true);
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, SP, 0);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, SP, 0);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -131,21 +109,34 @@ static MCInstPrinter *createMipsMCInstPrinter(const Target &T,
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Context, MCAsmBackend &MAB,
raw_ostream &OS, MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll, bool NoExecStack) {
- MipsTargetELFStreamer *S = new MipsTargetELFStreamer();
- return createELFStreamer(Context, S, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ MCStreamer *S;
+ if (!Triple(TT).isOSNaCl())
+ S = createMipsELFStreamer(Context, MAB, OS, Emitter, STI, RelaxAll,
+ NoExecStack);
+ else
+ S = createMipsNaClELFStreamer(Context, MAB, OS, Emitter, STI, RelaxAll,
+ NoExecStack);
+ new MipsTargetELFStreamer(*S, STI);
+ return S;
}
static MCStreamer *
createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory, MCInstPrinter *InstPrint,
- MCCodeEmitter *CE, MCAsmBackend *TAB, bool ShowInst) {
- MipsTargetAsmStreamer *S = new MipsTargetAsmStreamer(OS);
-
- return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
- useDwarfDirectory, InstPrint, CE, TAB,
- ShowInst);
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst) {
+ MCStreamer *S = llvm::createAsmStreamer(
+ Ctx, OS, isVerboseAsm, useDwarfDirectory, InstPrint, CE, TAB, ShowInst);
+ new MipsTargetAsmStreamer(*S, OS);
+ return S;
+}
+
+static MCStreamer *createMipsNullStreamer(MCContext &Ctx) {
+ MCStreamer *S = llvm::createNullStreamer(Ctx);
+ new MipsTargetStreamer(*S);
+ return S;
}
extern "C" void LLVMInitializeMipsTargetMC() {
@@ -202,6 +193,12 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterAsmStreamer(TheMips64Target, createMCAsmStreamer);
TargetRegistry::RegisterAsmStreamer(TheMips64elTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterNullStreamer(TheMipsTarget, createMipsNullStreamer);
+ TargetRegistry::RegisterNullStreamer(TheMipselTarget, createMipsNullStreamer);
+ TargetRegistry::RegisterNullStreamer(TheMips64Target, createMipsNullStreamer);
+ TargetRegistry::RegisterNullStreamer(TheMips64elTarget,
+ createMipsNullStreamer);
+
// Register the asm backend.
TargetRegistry::RegisterMCAsmBackend(TheMipsTarget,
createMipsAsmBackendEB32);
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index eabebfe..161d1ea 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -42,14 +42,18 @@ MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createMipsAsmBackendEB32(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEL32(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEB64(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEL64(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEB32(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEL32(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEB64(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEL64(const Target &T,
+ const MCRegisterInfo &MRI, StringRef TT,
+ StringRef CPU);
MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS,
uint8_t OSABI,
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
new file mode 100644
index 0000000..6cde8f9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
@@ -0,0 +1,272 @@
+//===-- MipsNaClELFStreamer.cpp - ELF Object Output for Mips NaCl ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements MCELFStreamer for Mips NaCl. It emits .o object files
+// as required by NaCl's SFI sandbox. It inserts address-masking instructions
+// before dangerous control-flow and memory access instructions. It inserts
+// address-masking instructions after instructions that change the stack
+// pointer. It ensures that the mask and the dangerous instruction are always
+// emitted in the same bundle. It aligns call + branch delay to the bundle end,
+// so that return address is always aligned to the start of next bundle.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Mips.h"
+#include "MipsELFStreamer.h"
+#include "MipsMCNaCl.h"
+#include "llvm/MC/MCELFStreamer.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mips-mc-nacl"
+
+namespace {
+
+const unsigned IndirectBranchMaskReg = Mips::T6;
+const unsigned LoadStoreStackMaskReg = Mips::T7;
+
+/// Extend the generic MCELFStreamer class so that it can mask dangerous
+/// instructions.
+
+class MipsNaClELFStreamer : public MipsELFStreamer {
+public:
+ MipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
+ MCCodeEmitter *Emitter, const MCSubtargetInfo &STI)
+ : MipsELFStreamer(Context, TAB, OS, Emitter, STI), PendingCall(false) {}
+
+ ~MipsNaClELFStreamer() {}
+
+private:
+ // Whether we started the sandboxing sequence for calls. Calls are bundled
+ // with branch delays and aligned to the bundle end.
+ bool PendingCall;
+
+ bool isIndirectJump(const MCInst &MI) {
+ if (MI.getOpcode() == Mips::JALR) {
+ // MIPS32r6/MIPS64r6 doesn't have a JR instruction and uses JALR instead.
+ // JALR is an indirect branch if the link register is $0.
+ assert(MI.getOperand(0).isReg());
+ return MI.getOperand(0).getReg() == Mips::ZERO;
+ }
+ return MI.getOpcode() == Mips::JR;
+ }
+
+ bool isStackPointerFirstOperand(const MCInst &MI) {
+ return (MI.getNumOperands() > 0 && MI.getOperand(0).isReg()
+ && MI.getOperand(0).getReg() == Mips::SP);
+ }
+
+ bool isCall(const MCInst &MI, bool *IsIndirectCall) {
+ unsigned Opcode = MI.getOpcode();
+
+ *IsIndirectCall = false;
+
+ switch (Opcode) {
+ default:
+ return false;
+
+ case Mips::JAL:
+ case Mips::BAL:
+ case Mips::BAL_BR:
+ case Mips::BLTZAL:
+ case Mips::BGEZAL:
+ return true;
+
+ case Mips::JALR:
+ // JALR is only a call if the link register is not $0. Otherwise it's an
+ // indirect branch.
+ assert(MI.getOperand(0).isReg());
+ if (MI.getOperand(0).getReg() == Mips::ZERO)
+ return false;
+
+ *IsIndirectCall = true;
+ return true;
+ }
+ }
+
+ void emitMask(unsigned AddrReg, unsigned MaskReg,
+ const MCSubtargetInfo &STI) {
+ MCInst MaskInst;
+ MaskInst.setOpcode(Mips::AND);
+ MaskInst.addOperand(MCOperand::CreateReg(AddrReg));
+ MaskInst.addOperand(MCOperand::CreateReg(AddrReg));
+ MaskInst.addOperand(MCOperand::CreateReg(MaskReg));
+ MipsELFStreamer::EmitInstruction(MaskInst, STI);
+ }
+
+ // Sandbox indirect branch or return instruction by inserting mask operation
+ // before it.
+ void sandboxIndirectJump(const MCInst &MI, const MCSubtargetInfo &STI) {
+ unsigned AddrReg = MI.getOperand(0).getReg();
+
+ EmitBundleLock(false);
+ emitMask(AddrReg, IndirectBranchMaskReg, STI);
+ MipsELFStreamer::EmitInstruction(MI, STI);
+ EmitBundleUnlock();
+ }
+
+ // Sandbox memory access or SP change. Insert mask operation before and/or
+ // after the instruction.
+ void sandboxLoadStoreStackChange(const MCInst &MI, unsigned AddrIdx,
+ const MCSubtargetInfo &STI, bool MaskBefore,
+ bool MaskAfter) {
+ EmitBundleLock(false);
+ if (MaskBefore) {
+ // Sandbox memory access.
+ unsigned BaseReg = MI.getOperand(AddrIdx).getReg();
+ emitMask(BaseReg, LoadStoreStackMaskReg, STI);
+ }
+ MipsELFStreamer::EmitInstruction(MI, STI);
+ if (MaskAfter) {
+ // Sandbox SP change.
+ unsigned SPReg = MI.getOperand(0).getReg();
+ assert((Mips::SP == SPReg) && "Unexpected stack-pointer register.");
+ emitMask(SPReg, LoadStoreStackMaskReg, STI);
+ }
+ EmitBundleUnlock();
+ }
+
+public:
+ /// This function is the one used to emit instruction data into the ELF
+ /// streamer. We override it to mask dangerous instructions.
+ void EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) override {
+ // Sandbox indirect jumps.
+ if (isIndirectJump(Inst)) {
+ if (PendingCall)
+ report_fatal_error("Dangerous instruction in branch delay slot!");
+ sandboxIndirectJump(Inst, STI);
+ return;
+ }
+
+ // Sandbox loads, stores and SP changes.
+ unsigned AddrIdx;
+ bool IsStore;
+ bool IsMemAccess = isBasePlusOffsetMemoryAccess(Inst.getOpcode(), &AddrIdx,
+ &IsStore);
+ bool IsSPFirstOperand = isStackPointerFirstOperand(Inst);
+ if (IsMemAccess || IsSPFirstOperand) {
+ bool MaskBefore = (IsMemAccess
+ && baseRegNeedsLoadStoreMask(Inst.getOperand(AddrIdx)
+ .getReg()));
+ bool MaskAfter = IsSPFirstOperand && !IsStore;
+ if (MaskBefore || MaskAfter) {
+ if (PendingCall)
+ report_fatal_error("Dangerous instruction in branch delay slot!");
+ sandboxLoadStoreStackChange(Inst, AddrIdx, STI, MaskBefore, MaskAfter);
+ return;
+ }
+ // fallthrough
+ }
+
+ // Sandbox calls by aligning call and branch delay to the bundle end.
+ // For indirect calls, emit the mask before the call.
+ bool IsIndirectCall;
+ if (isCall(Inst, &IsIndirectCall)) {
+ if (PendingCall)
+ report_fatal_error("Dangerous instruction in branch delay slot!");
+
+ // Start the sandboxing sequence by emitting call.
+ EmitBundleLock(true);
+ if (IsIndirectCall) {
+ unsigned TargetReg = Inst.getOperand(1).getReg();
+ emitMask(TargetReg, IndirectBranchMaskReg, STI);
+ }
+ MipsELFStreamer::EmitInstruction(Inst, STI);
+ PendingCall = true;
+ return;
+ }
+ if (PendingCall) {
+ // Finish the sandboxing sequence by emitting branch delay.
+ MipsELFStreamer::EmitInstruction(Inst, STI);
+ EmitBundleUnlock();
+ PendingCall = false;
+ return;
+ }
+
+ // None of the sandboxing applies, just emit the instruction.
+ MipsELFStreamer::EmitInstruction(Inst, STI);
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+
+bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
+ bool *IsStore) {
+ if (IsStore)
+ *IsStore = false;
+
+ switch (Opcode) {
+ default:
+ return false;
+
+ // Load instructions with base address register in position 1.
+ case Mips::LB:
+ case Mips::LBu:
+ case Mips::LH:
+ case Mips::LHu:
+ case Mips::LW:
+ case Mips::LWC1:
+ case Mips::LDC1:
+ case Mips::LL:
+ case Mips::LL_R6:
+ case Mips::LWL:
+ case Mips::LWR:
+ *AddrIdx = 1;
+ return true;
+
+ // Store instructions with base address register in position 1.
+ case Mips::SB:
+ case Mips::SH:
+ case Mips::SW:
+ case Mips::SWC1:
+ case Mips::SDC1:
+ case Mips::SWL:
+ case Mips::SWR:
+ *AddrIdx = 1;
+ if (IsStore)
+ *IsStore = true;
+ return true;
+
+ // Store instructions with base address register in position 2.
+ case Mips::SC:
+ case Mips::SC_R6:
+ *AddrIdx = 2;
+ if (IsStore)
+ *IsStore = true;
+ return true;
+ }
+}
+
+bool baseRegNeedsLoadStoreMask(unsigned Reg) {
+ // The contents of SP and thread pointer register do not require masking.
+ return Reg != Mips::SP && Reg != Mips::T8;
+}
+
+MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS,
+ MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
+ bool RelaxAll, bool NoExecStack) {
+ MipsNaClELFStreamer *S = new MipsNaClELFStreamer(Context, TAB, OS, Emitter,
+ STI);
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ if (NoExecStack)
+ S->getAssembler().setNoExecStack(true);
+
+ // Set bundle-alignment as required by the NaCl ABI for the target.
+ S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN);
+
+ return S;
+}
+
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
new file mode 100644
index 0000000..0ef2208
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
@@ -0,0 +1,92 @@
+//===-- MipsOptionRecord.cpp - Abstraction for storing information --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsOptionRecord.h"
+#include "MipsELFStreamer.h"
+#include "llvm/MC/MCSectionELF.h"
+
+using namespace llvm;
+
+void MipsRegInfoRecord::EmitMipsOptionRecord() {
+ MCAssembler &MCA = Streamer->getAssembler();
+ Triple T(STI.getTargetTriple());
+ uint64_t Features = STI.getFeatureBits();
+
+ Streamer->PushSection();
+
+ // We need to distinguish between N64 and the rest because at the moment
+ // we don't emit .Mips.options for other ELFs other than N64.
+ // Since .reginfo has the same information as .Mips.options (ODK_REGINFO),
+ // we can use the same abstraction (MipsRegInfoRecord class) to handle both.
+ if (Features & Mips::FeatureN64) {
+ // The EntrySize value of 1 seems strange since the records are neither
+ // 1-byte long nor fixed length but it matches the value GAS emits.
+ const MCSectionELF *Sec =
+ Context.getELFSection(".MIPS.options", ELF::SHT_MIPS_OPTIONS,
+ ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP,
+ SectionKind::getMetadata(), 1, "");
+ MCA.getOrCreateSectionData(*Sec).setAlignment(8);
+ Streamer->SwitchSection(Sec);
+
+ Streamer->EmitIntValue(1, 1); // kind
+ Streamer->EmitIntValue(40, 1); // size
+ Streamer->EmitIntValue(0, 2); // section
+ Streamer->EmitIntValue(0, 4); // info
+ Streamer->EmitIntValue(ri_gprmask, 4);
+ Streamer->EmitIntValue(0, 4); // pad
+ Streamer->EmitIntValue(ri_cprmask[0], 4);
+ Streamer->EmitIntValue(ri_cprmask[1], 4);
+ Streamer->EmitIntValue(ri_cprmask[2], 4);
+ Streamer->EmitIntValue(ri_cprmask[3], 4);
+ Streamer->EmitIntValue(ri_gp_value, 8);
+ } else {
+ const MCSectionELF *Sec =
+ Context.getELFSection(".reginfo", ELF::SHT_MIPS_REGINFO, ELF::SHF_ALLOC,
+ SectionKind::getMetadata(), 24, "");
+ MCA.getOrCreateSectionData(*Sec)
+ .setAlignment(Features & Mips::FeatureN32 ? 8 : 4);
+ Streamer->SwitchSection(Sec);
+
+ Streamer->EmitIntValue(ri_gprmask, 4);
+ Streamer->EmitIntValue(ri_cprmask[0], 4);
+ Streamer->EmitIntValue(ri_cprmask[1], 4);
+ Streamer->EmitIntValue(ri_cprmask[2], 4);
+ Streamer->EmitIntValue(ri_cprmask[3], 4);
+ assert((ri_gp_value & 0xffffffff) == ri_gp_value);
+ Streamer->EmitIntValue(ri_gp_value, 4);
+ }
+
+ Streamer->PopSection();
+}
+
+void MipsRegInfoRecord::SetPhysRegUsed(unsigned Reg,
+ const MCRegisterInfo *MCRegInfo) {
+ unsigned Value = 0;
+
+ for (MCSubRegIterator SubRegIt(Reg, MCRegInfo, true); SubRegIt.isValid();
+ ++SubRegIt) {
+ unsigned CurrentSubReg = *SubRegIt;
+
+ unsigned EncVal = MCRegInfo->getEncodingValue(CurrentSubReg);
+ Value |= 1 << EncVal;
+
+ if (GPR32RegClass->contains(CurrentSubReg) ||
+ GPR64RegClass->contains(CurrentSubReg))
+ ri_gprmask |= Value;
+ else if (FGR32RegClass->contains(CurrentSubReg) ||
+ FGR64RegClass->contains(CurrentSubReg) ||
+ AFGR64RegClass->contains(CurrentSubReg) ||
+ MSA128BRegClass->contains(CurrentSubReg))
+ ri_cprmask[1] |= Value;
+ else if (COP2RegClass->contains(CurrentSubReg))
+ ri_cprmask[2] |= Value;
+ else if (COP3RegClass->contains(CurrentSubReg))
+ ri_cprmask[3] |= Value;
+ }
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.cpp
deleted file mode 100644
index 1dc9bcb..0000000
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//===-- MipsReginfo.cpp - Registerinfo handling --------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-// .reginfo
-// Elf32_Word ri_gprmask
-// Elf32_Word ri_cprmask[4]
-// Elf32_Word ri_gp_value
-//
-// .MIPS.options - N64
-// Elf64_Byte kind (ODK_REGINFO)
-// Elf64_Byte size (40 bytes)
-// Elf64_Section section (0)
-// Elf64_Word info (unused)
-// Elf64_Word ri_gprmask ()
-// Elf64_Word ri_pad ()
-// Elf64_Word[4] ri_cprmask ()
-// Elf64_Addr ri_gp_value ()
-//
-// .MIPS.options - N32
-// Elf32_Byte kind (ODK_REGINFO)
-// Elf32_Byte size (36 bytes)
-// Elf32_Section section (0)
-// Elf32_Word info (unused)
-// Elf32_Word ri_gprmask ()
-// Elf32_Word ri_pad ()
-// Elf32_Word[4] ri_cprmask ()
-// Elf32_Addr ri_gp_value ()
-//
-//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/MipsReginfo.h"
-#include "MipsSubtarget.h"
-#include "MipsTargetObjectFile.h"
-#include "llvm/MC/MCStreamer.h"
-
-using namespace llvm;
-
-// Integrated assembler version
-void
-MipsReginfo::emitMipsReginfoSectionCG(MCStreamer &OS,
- const TargetLoweringObjectFile &TLOF,
- const MipsSubtarget &MST) const
-{
-
- if (OS.hasRawTextSupport())
- return;
-
- const MipsTargetObjectFile &TLOFELF =
- static_cast<const MipsTargetObjectFile &>(TLOF);
- OS.SwitchSection(TLOFELF.getReginfoSection());
-
- // .reginfo
- if (MST.isABI_O32()) {
- OS.EmitIntValue(0, 4); // ri_gprmask
- OS.EmitIntValue(0, 4); // ri_cpr[0]mask
- OS.EmitIntValue(0, 4); // ri_cpr[1]mask
- OS.EmitIntValue(0, 4); // ri_cpr[2]mask
- OS.EmitIntValue(0, 4); // ri_cpr[3]mask
- OS.EmitIntValue(0, 4); // ri_gp_value
- }
- // .MIPS.options
- else if (MST.isABI_N64()) {
- OS.EmitIntValue(1, 1); // kind
- OS.EmitIntValue(40, 1); // size
- OS.EmitIntValue(0, 2); // section
- OS.EmitIntValue(0, 4); // info
- OS.EmitIntValue(0, 4); // ri_gprmask
- OS.EmitIntValue(0, 4); // pad
- OS.EmitIntValue(0, 4); // ri_cpr[0]mask
- OS.EmitIntValue(0, 4); // ri_cpr[1]mask
- OS.EmitIntValue(0, 4); // ri_cpr[2]mask
- OS.EmitIntValue(0, 4); // ri_cpr[3]mask
- OS.EmitIntValue(0, 8); // ri_gp_value
- }
- else llvm_unreachable("Unsupported abi for reginfo");
-}
-
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.h
deleted file mode 100644
index 039b8ea..0000000
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsReginfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//=== MipsReginfo.h - MipsReginfo -----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENCE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MIPSREGINFO_H
-#define MIPSREGINFO_H
-
-namespace llvm {
- class MCStreamer;
- class TargetLoweringObjectFile;
- class MipsSubtarget;
-
- class MipsReginfo {
- void anchor();
- public:
- MipsReginfo() {}
-
- void emitMipsReginfoSectionCG(MCStreamer &OS,
- const TargetLoweringObjectFile &TLOF,
- const MipsSubtarget &MST) const;
- };
-
-} // namespace llvm
-
-#endif
-
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index 5e90bbc..4a178e2 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -11,57 +11,632 @@
//
//===----------------------------------------------------------------------===//
+#include "InstPrinter/MipsInstPrinter.h"
+#include "MipsELFStreamer.h"
+#include "MipsMCTargetDesc.h"
+#include "MipsTargetObjectFile.h"
#include "MipsTargetStreamer.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
using namespace llvm;
-static cl::opt<bool> PrintHackDirectives("print-hack-directives",
- cl::init(false), cl::Hidden);
+MipsTargetStreamer::MipsTargetStreamer(MCStreamer &S)
+ : MCTargetStreamer(S), canHaveModuleDirective(true) {}
+void MipsTargetStreamer::emitDirectiveSetMicroMips() {}
+void MipsTargetStreamer::emitDirectiveSetNoMicroMips() {}
+void MipsTargetStreamer::emitDirectiveSetMips16() {}
+void MipsTargetStreamer::emitDirectiveSetNoMips16() {}
+void MipsTargetStreamer::emitDirectiveSetReorder() {}
+void MipsTargetStreamer::emitDirectiveSetNoReorder() {}
+void MipsTargetStreamer::emitDirectiveSetMacro() {}
+void MipsTargetStreamer::emitDirectiveSetNoMacro() {}
+void MipsTargetStreamer::emitDirectiveSetAt() {}
+void MipsTargetStreamer::emitDirectiveSetNoAt() {}
+void MipsTargetStreamer::emitDirectiveEnd(StringRef Name) {}
+void MipsTargetStreamer::emitDirectiveEnt(const MCSymbol &Symbol) {}
+void MipsTargetStreamer::emitDirectiveAbiCalls() {}
+void MipsTargetStreamer::emitDirectiveNaN2008() {}
+void MipsTargetStreamer::emitDirectiveNaNLegacy() {}
+void MipsTargetStreamer::emitDirectiveOptionPic0() {}
+void MipsTargetStreamer::emitDirectiveOptionPic2() {}
+void MipsTargetStreamer::emitFrame(unsigned StackReg, unsigned StackSize,
+ unsigned ReturnReg) {}
+void MipsTargetStreamer::emitMask(unsigned CPUBitmask, int CPUTopSavedRegOff) {}
+void MipsTargetStreamer::emitFMask(unsigned FPUBitmask, int FPUTopSavedRegOff) {
+}
+void MipsTargetStreamer::emitDirectiveSetMips32R2() {}
+void MipsTargetStreamer::emitDirectiveSetMips64() {}
+void MipsTargetStreamer::emitDirectiveSetMips64R2() {}
+void MipsTargetStreamer::emitDirectiveSetDsp() {}
+void MipsTargetStreamer::emitDirectiveCpload(unsigned RegNo) {}
+void MipsTargetStreamer::emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
+ const MCSymbol &Sym, bool IsReg) {
+}
+void MipsTargetStreamer::emitDirectiveModuleOddSPReg(bool Enabled,
+ bool IsO32ABI) {
+ if (!Enabled && !IsO32ABI)
+ report_fatal_error("+nooddspreg is only valid for O32");
+}
-// pin vtable to this file
-void MipsTargetStreamer::anchor() {}
+MipsTargetAsmStreamer::MipsTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS)
+ : MipsTargetStreamer(S), OS(OS) {}
-MipsTargetAsmStreamer::MipsTargetAsmStreamer(formatted_raw_ostream &OS)
- : OS(OS) {}
+void MipsTargetAsmStreamer::emitDirectiveSetMicroMips() {
+ OS << "\t.set\tmicromips\n";
+ setCanHaveModuleDir(false);
+}
-void MipsTargetAsmStreamer::emitMipsHackELFFlags(unsigned Flags) {
- if (!PrintHackDirectives)
- return;
+void MipsTargetAsmStreamer::emitDirectiveSetNoMicroMips() {
+ OS << "\t.set\tnomicromips\n";
+ setCanHaveModuleDir(false);
+}
- OS << "\t.mips_hack_elf_flags 0x";
- OS.write_hex(Flags);
- OS << '\n';
+void MipsTargetAsmStreamer::emitDirectiveSetMips16() {
+ OS << "\t.set\tmips16\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetNoMips16() {
+ OS << "\t.set\tnomips16\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetReorder() {
+ OS << "\t.set\treorder\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetNoReorder() {
+ OS << "\t.set\tnoreorder\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetMacro() {
+ OS << "\t.set\tmacro\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetNoMacro() {
+ OS << "\t.set\tnomacro\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetAt() {
+ OS << "\t.set\tat\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetNoAt() {
+ OS << "\t.set\tnoat\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveEnd(StringRef Name) {
+ OS << "\t.end\t" << Name << '\n';
+}
+
+void MipsTargetAsmStreamer::emitDirectiveEnt(const MCSymbol &Symbol) {
+ OS << "\t.ent\t" << Symbol.getName() << '\n';
+}
+
+void MipsTargetAsmStreamer::emitDirectiveAbiCalls() { OS << "\t.abicalls\n"; }
+
+void MipsTargetAsmStreamer::emitDirectiveNaN2008() { OS << "\t.nan\t2008\n"; }
+
+void MipsTargetAsmStreamer::emitDirectiveNaNLegacy() {
+ OS << "\t.nan\tlegacy\n";
+}
+
+void MipsTargetAsmStreamer::emitDirectiveOptionPic0() {
+ OS << "\t.option\tpic0\n";
+}
+
+void MipsTargetAsmStreamer::emitDirectiveOptionPic2() {
+ OS << "\t.option\tpic2\n";
+}
+
+void MipsTargetAsmStreamer::emitFrame(unsigned StackReg, unsigned StackSize,
+ unsigned ReturnReg) {
+ OS << "\t.frame\t$"
+ << StringRef(MipsInstPrinter::getRegisterName(StackReg)).lower() << ","
+ << StackSize << ",$"
+ << StringRef(MipsInstPrinter::getRegisterName(ReturnReg)).lower() << '\n';
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetMips32R2() {
+ OS << "\t.set\tmips32r2\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetMips64() {
+ OS << "\t.set\tmips64\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetMips64R2() {
+ OS << "\t.set\tmips64r2\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveSetDsp() {
+ OS << "\t.set\tdsp\n";
+ setCanHaveModuleDir(false);
+}
+// Print a 32 bit hex number with all numbers.
+static void printHex32(unsigned Value, raw_ostream &OS) {
+ OS << "0x";
+ for (int i = 7; i >= 0; i--)
+ OS.write_hex((Value & (0xF << (i * 4))) >> (i * 4));
+}
+
+void MipsTargetAsmStreamer::emitMask(unsigned CPUBitmask,
+ int CPUTopSavedRegOff) {
+ OS << "\t.mask \t";
+ printHex32(CPUBitmask, OS);
+ OS << ',' << CPUTopSavedRegOff << '\n';
}
-void MipsTargetAsmStreamer::emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) {
- if (!PrintHackDirectives)
- return;
- OS << "\t.mips_hack_stocg ";
- OS << Sym->getName();
+void MipsTargetAsmStreamer::emitFMask(unsigned FPUBitmask,
+ int FPUTopSavedRegOff) {
+ OS << "\t.fmask\t";
+ printHex32(FPUBitmask, OS);
+ OS << "," << FPUTopSavedRegOff << '\n';
+}
+
+void MipsTargetAsmStreamer::emitDirectiveCpload(unsigned RegNo) {
+ OS << "\t.cpload\t$"
+ << StringRef(MipsInstPrinter::getRegisterName(RegNo)).lower() << "\n";
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetAsmStreamer::emitDirectiveCpsetup(unsigned RegNo,
+ int RegOrOffset,
+ const MCSymbol &Sym,
+ bool IsReg) {
+ OS << "\t.cpsetup\t$"
+ << StringRef(MipsInstPrinter::getRegisterName(RegNo)).lower() << ", ";
+
+ if (IsReg)
+ OS << "$"
+ << StringRef(MipsInstPrinter::getRegisterName(RegOrOffset)).lower();
+ else
+ OS << RegOrOffset;
+
OS << ", ";
- OS << Val;
- OS << '\n';
+
+ OS << Sym.getName() << "\n";
+ setCanHaveModuleDir(false);
}
-MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
- return static_cast<MCELFStreamer &>(*Streamer);
+void MipsTargetAsmStreamer::emitDirectiveModuleFP(
+ MipsABIFlagsSection::FpABIKind Value, bool Is32BitABI) {
+ MipsTargetStreamer::emitDirectiveModuleFP(Value, Is32BitABI);
+
+ StringRef ModuleValue;
+ OS << "\t.module\tfp=";
+ OS << ABIFlagsSection.getFpABIString(Value) << "\n";
}
-void MipsTargetELFStreamer::emitMipsHackELFFlags(unsigned Flags) {
+void MipsTargetAsmStreamer::emitDirectiveSetFp(
+ MipsABIFlagsSection::FpABIKind Value) {
+ StringRef ModuleValue;
+ OS << "\t.set\tfp=";
+ OS << ABIFlagsSection.getFpABIString(Value) << "\n";
+}
+
+void MipsTargetAsmStreamer::emitMipsAbiFlags() {
+ // No action required for text output.
+}
+
+void MipsTargetAsmStreamer::emitDirectiveModuleOddSPReg(bool Enabled,
+ bool IsO32ABI) {
+ MipsTargetStreamer::emitDirectiveModuleOddSPReg(Enabled, IsO32ABI);
+
+ OS << "\t.module\t" << (Enabled ? "" : "no") << "oddspreg\n";
+}
+
+// This part is for ELF object output.
+MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
+ const MCSubtargetInfo &STI)
+ : MipsTargetStreamer(S), MicroMipsEnabled(false), STI(STI) {
MCAssembler &MCA = getStreamer().getAssembler();
- MCA.setELFHeaderEFlags(Flags);
+ uint64_t Features = STI.getFeatureBits();
+ Triple T(STI.getTargetTriple());
+ Pic = (MCA.getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_)
+ ? true
+ : false;
+
+ // Update e_header flags
+ unsigned EFlags = 0;
+
+ // Architecture
+ if (Features & Mips::FeatureMips64r6)
+ EFlags |= ELF::EF_MIPS_ARCH_64R6;
+ else if (Features & Mips::FeatureMips64r2)
+ EFlags |= ELF::EF_MIPS_ARCH_64R2;
+ else if (Features & Mips::FeatureMips64)
+ EFlags |= ELF::EF_MIPS_ARCH_64;
+ else if (Features & Mips::FeatureMips5)
+ EFlags |= ELF::EF_MIPS_ARCH_5;
+ else if (Features & Mips::FeatureMips4)
+ EFlags |= ELF::EF_MIPS_ARCH_4;
+ else if (Features & Mips::FeatureMips3)
+ EFlags |= ELF::EF_MIPS_ARCH_3;
+ else if (Features & Mips::FeatureMips32r6)
+ EFlags |= ELF::EF_MIPS_ARCH_32R6;
+ else if (Features & Mips::FeatureMips32r2)
+ EFlags |= ELF::EF_MIPS_ARCH_32R2;
+ else if (Features & Mips::FeatureMips32)
+ EFlags |= ELF::EF_MIPS_ARCH_32;
+ else if (Features & Mips::FeatureMips2)
+ EFlags |= ELF::EF_MIPS_ARCH_2;
+ else
+ EFlags |= ELF::EF_MIPS_ARCH_1;
+
+ // ABI
+ // N64 does not require any ABI bits.
+ if (Features & Mips::FeatureO32)
+ EFlags |= ELF::EF_MIPS_ABI_O32;
+ else if (Features & Mips::FeatureN32)
+ EFlags |= ELF::EF_MIPS_ABI2;
+
+ if (Features & Mips::FeatureGP64Bit) {
+ if (Features & Mips::FeatureO32)
+ EFlags |= ELF::EF_MIPS_32BITMODE; /* Compatibility Mode */
+ } else if (Features & Mips::FeatureMips64r2 || Features & Mips::FeatureMips64)
+ EFlags |= ELF::EF_MIPS_32BITMODE;
+
+ // Other options.
+ if (Features & Mips::FeatureNaN2008)
+ EFlags |= ELF::EF_MIPS_NAN2008;
+
+ // -mabicalls and -mplt are not implemented but we should act as if they were
+ // given.
+ EFlags |= ELF::EF_MIPS_CPIC;
+ if (Features & Mips::FeatureN64)
+ EFlags |= ELF::EF_MIPS_PIC;
+
+ MCA.setELFHeaderEFlags(EFlags);
}
-// Set a symbol's STO flags
-void MipsTargetELFStreamer::emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) {
- MCSymbolData &Data = getStreamer().getOrCreateSymbolData(Sym);
+void MipsTargetELFStreamer::emitLabel(MCSymbol *Symbol) {
+ if (!isMicroMipsEnabled())
+ return;
+ MCSymbolData &Data = getStreamer().getOrCreateSymbolData(Symbol);
+ uint8_t Type = MCELF::GetType(Data);
+ if (Type != ELF::STT_FUNC)
+ return;
+
// The "other" values are stored in the last 6 bits of the second byte
// The traditional defines for STO values assume the full byte and thus
// the shift to pack it.
- MCELF::setOther(Data, Val >> 2);
+ MCELF::setOther(Data, ELF::STO_MIPS_MICROMIPS >> 2);
+}
+
+void MipsTargetELFStreamer::finish() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ const MCObjectFileInfo &OFI = *MCA.getContext().getObjectFileInfo();
+
+ // .bss, .text and .data are always at least 16-byte aligned.
+ MCSectionData &TextSectionData =
+ MCA.getOrCreateSectionData(*OFI.getTextSection());
+ MCSectionData &DataSectionData =
+ MCA.getOrCreateSectionData(*OFI.getDataSection());
+ MCSectionData &BSSSectionData =
+ MCA.getOrCreateSectionData(*OFI.getBSSSection());
+
+ TextSectionData.setAlignment(std::max(16u, TextSectionData.getAlignment()));
+ DataSectionData.setAlignment(std::max(16u, DataSectionData.getAlignment()));
+ BSSSectionData.setAlignment(std::max(16u, BSSSectionData.getAlignment()));
+
+ // Emit all the option records.
+ // At the moment we are only emitting .Mips.options (ODK_REGINFO) and
+ // .reginfo.
+ MipsELFStreamer &MEF = static_cast<MipsELFStreamer &>(Streamer);
+ MEF.EmitMipsOptionRecords();
+
+ emitMipsAbiFlags();
+}
+
+void MipsTargetELFStreamer::emitAssignment(MCSymbol *Symbol,
+ const MCExpr *Value) {
+ // If on rhs is micromips symbol then mark Symbol as microMips.
+ if (Value->getKind() != MCExpr::SymbolRef)
+ return;
+ const MCSymbol &RhsSym =
+ static_cast<const MCSymbolRefExpr *>(Value)->getSymbol();
+ MCSymbolData &Data = getStreamer().getOrCreateSymbolData(&RhsSym);
+ uint8_t Type = MCELF::GetType(Data);
+ if ((Type != ELF::STT_FUNC) ||
+ !(MCELF::getOther(Data) & (ELF::STO_MIPS_MICROMIPS >> 2)))
+ return;
+
+ MCSymbolData &SymbolData = getStreamer().getOrCreateSymbolData(Symbol);
+ // The "other" values are stored in the last 6 bits of the second byte.
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ MCELF::setOther(SymbolData, ELF::STO_MIPS_MICROMIPS >> 2);
+}
+
+MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
+ return static_cast<MCELFStreamer &>(Streamer);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetMicroMips() {
+ MicroMipsEnabled = true;
+
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags |= ELF::EF_MIPS_MICROMIPS;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetNoMicroMips() {
+ MicroMipsEnabled = false;
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetMips16() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags |= ELF::EF_MIPS_ARCH_ASE_M16;
+ MCA.setELFHeaderEFlags(Flags);
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetNoMips16() {
+ // FIXME: implement.
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetReorder() {
+ // FIXME: implement.
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetNoReorder() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags |= ELF::EF_MIPS_NOREORDER;
+ MCA.setELFHeaderEFlags(Flags);
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetMacro() {
+ // FIXME: implement.
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetNoMacro() {
+ // FIXME: implement.
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetAt() {
+ // FIXME: implement.
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetNoAt() {
+ // FIXME: implement.
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveEnd(StringRef Name) {
+ // FIXME: implement.
+}
+
+void MipsTargetELFStreamer::emitDirectiveEnt(const MCSymbol &Symbol) {
+ // FIXME: implement.
+}
+
+void MipsTargetELFStreamer::emitDirectiveAbiCalls() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags |= ELF::EF_MIPS_CPIC | ELF::EF_MIPS_PIC;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+void MipsTargetELFStreamer::emitDirectiveNaN2008() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags |= ELF::EF_MIPS_NAN2008;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+void MipsTargetELFStreamer::emitDirectiveNaNLegacy() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags &= ~ELF::EF_MIPS_NAN2008;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+void MipsTargetELFStreamer::emitDirectiveOptionPic0() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ // This option overrides other PIC options like -KPIC.
+ Pic = false;
+ Flags &= ~ELF::EF_MIPS_PIC;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+void MipsTargetELFStreamer::emitDirectiveOptionPic2() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Pic = true;
+ // NOTE: We are following the GAS behaviour here which means the directive
+ // 'pic2' also sets the CPIC bit in the ELF header. This is different from
+ // what is stated in the SYSV ABI which consider the bits EF_MIPS_PIC and
+ // EF_MIPS_CPIC to be mutually exclusive.
+ Flags |= ELF::EF_MIPS_PIC | ELF::EF_MIPS_CPIC;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+void MipsTargetELFStreamer::emitFrame(unsigned StackReg, unsigned StackSize,
+ unsigned ReturnReg) {
+ // FIXME: implement.
+}
+
+void MipsTargetELFStreamer::emitMask(unsigned CPUBitmask,
+ int CPUTopSavedRegOff) {
+ // FIXME: implement.
+}
+
+void MipsTargetELFStreamer::emitFMask(unsigned FPUBitmask,
+ int FPUTopSavedRegOff) {
+ // FIXME: implement.
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetMips32R2() {
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetMips64() {
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetMips64R2() {
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveSetDsp() {
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveCpload(unsigned RegNo) {
+ // .cpload $reg
+ // This directive expands to:
+ // lui $gp, %hi(_gp_disp)
+ // addui $gp, $gp, %lo(_gp_disp)
+ // addu $gp, $gp, $reg
+ // when support for position independent code is enabled.
+ if (!Pic || (isN32() || isN64()))
+ return;
+
+ // There's a GNU extension controlled by -mno-shared that allows
+ // locally-binding symbols to be accessed using absolute addresses.
+ // This is currently not supported. When supported -mno-shared makes
+ // .cpload expand to:
+ // lui $gp, %hi(__gnu_local_gp)
+ // addiu $gp, $gp, %lo(__gnu_local_gp)
+
+ StringRef SymName("_gp_disp");
+ MCAssembler &MCA = getStreamer().getAssembler();
+ MCSymbol *GP_Disp = MCA.getContext().GetOrCreateSymbol(SymName);
+ MCA.getOrCreateSymbolData(*GP_Disp);
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(Mips::LUi);
+ TmpInst.addOperand(MCOperand::CreateReg(Mips::GP));
+ const MCSymbolRefExpr *HiSym = MCSymbolRefExpr::Create(
+ "_gp_disp", MCSymbolRefExpr::VK_Mips_ABS_HI, MCA.getContext());
+ TmpInst.addOperand(MCOperand::CreateExpr(HiSym));
+ getStreamer().EmitInstruction(TmpInst, STI);
+
+ TmpInst.clear();
+
+ TmpInst.setOpcode(Mips::ADDiu);
+ TmpInst.addOperand(MCOperand::CreateReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::CreateReg(Mips::GP));
+ const MCSymbolRefExpr *LoSym = MCSymbolRefExpr::Create(
+ "_gp_disp", MCSymbolRefExpr::VK_Mips_ABS_LO, MCA.getContext());
+ TmpInst.addOperand(MCOperand::CreateExpr(LoSym));
+ getStreamer().EmitInstruction(TmpInst, STI);
+
+ TmpInst.clear();
+
+ TmpInst.setOpcode(Mips::ADDu);
+ TmpInst.addOperand(MCOperand::CreateReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::CreateReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::CreateReg(RegNo));
+ getStreamer().EmitInstruction(TmpInst, STI);
+
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo,
+ int RegOrOffset,
+ const MCSymbol &Sym,
+ bool IsReg) {
+ // Only N32 and N64 emit anything for .cpsetup iff PIC is set.
+ if (!Pic || !(isN32() || isN64()))
+ return;
+
+ MCAssembler &MCA = getStreamer().getAssembler();
+ MCInst Inst;
+
+ // Either store the old $gp in a register or on the stack
+ if (IsReg) {
+ // move $save, $gpreg
+ Inst.setOpcode(Mips::DADDu);
+ Inst.addOperand(MCOperand::CreateReg(RegOrOffset));
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateReg(Mips::ZERO));
+ } else {
+ // sd $gpreg, offset($sp)
+ Inst.setOpcode(Mips::SD);
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateReg(Mips::SP));
+ Inst.addOperand(MCOperand::CreateImm(RegOrOffset));
+ }
+ getStreamer().EmitInstruction(Inst, STI);
+ Inst.clear();
+
+ const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::Create(
+ Sym.getName(), MCSymbolRefExpr::VK_Mips_GPOFF_HI, MCA.getContext());
+ const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::Create(
+ Sym.getName(), MCSymbolRefExpr::VK_Mips_GPOFF_LO, MCA.getContext());
+ // lui $gp, %hi(%neg(%gp_rel(funcSym)))
+ Inst.setOpcode(Mips::LUi);
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateExpr(HiExpr));
+ getStreamer().EmitInstruction(Inst, STI);
+ Inst.clear();
+
+ // addiu $gp, $gp, %lo(%neg(%gp_rel(funcSym)))
+ Inst.setOpcode(Mips::ADDiu);
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateExpr(LoExpr));
+ getStreamer().EmitInstruction(Inst, STI);
+ Inst.clear();
+
+ // daddu $gp, $gp, $funcreg
+ Inst.setOpcode(Mips::DADDu);
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateReg(Mips::GP));
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
+ getStreamer().EmitInstruction(Inst, STI);
+
+ setCanHaveModuleDir(false);
+}
+
+void MipsTargetELFStreamer::emitMipsAbiFlags() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ MCContext &Context = MCA.getContext();
+ MCStreamer &OS = getStreamer();
+ const MCSectionELF *Sec =
+ Context.getELFSection(".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS,
+ ELF::SHF_ALLOC, SectionKind::getMetadata(), 24, "");
+ MCSectionData &ABIShndxSD = MCA.getOrCreateSectionData(*Sec);
+ ABIShndxSD.setAlignment(8);
+ OS.SwitchSection(Sec);
+
+ OS << ABIFlagsSection;
+}
+
+void MipsTargetELFStreamer::emitDirectiveModuleOddSPReg(bool Enabled,
+ bool IsO32ABI) {
+ MipsTargetStreamer::emitDirectiveModuleOddSPReg(Enabled, IsO32ABI);
+
+ ABIFlagsSection.OddSPReg = Enabled;
}
diff --git a/contrib/llvm/lib/Target/Mips/MSA.txt b/contrib/llvm/lib/Target/Mips/MSA.txt
index d1c4193..113375f 100644
--- a/contrib/llvm/lib/Target/Mips/MSA.txt
+++ b/contrib/llvm/lib/Target/Mips/MSA.txt
@@ -62,11 +62,16 @@ binsri.[bhwd], binsli.[bhwd]:
bmnz.v, bmz.v, bsel.v:
These three operations differ only in the operand that is tied to the
- result.
+ result and the order of the operands.
It is (currently) not possible to emit bmz.v, or bsel.v since bmnz.v is
the same operation and will be emitted instead.
In future, the compiler may choose between these three instructions
according to register allocation.
+ These three operations can be very confusing so here is a mapping
+ between the instructions and the vselect node in one place:
+ bmz.v wd, ws, wt/i8 -> (vselect wt/i8, wd, ws)
+ bmnz.v wd, ws, wt/i8 -> (vselect wt/i8, ws, wd)
+ bsel.v wd, ws, wt/i8 -> (vselect wd, wt/i8, ws)
bmnzi.b, bmzi.b:
Like their non-immediate counterparts, bmnzi.v and bmzi.v are the same
diff --git a/contrib/llvm/lib/Target/Mips/MicroMipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MicroMipsInstrFPU.td
new file mode 100644
index 0000000..b93017a
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MicroMipsInstrFPU.td
@@ -0,0 +1,148 @@
+let isCodeGenOnly = 1, Predicates = [InMicroMips] in {
+def FADD_S_MM : MMRel, ADDS_FT<"add.s", FGR32Opnd, II_ADD_S, 1, fadd>,
+ ADDS_FM_MM<0, 0x30>;
+def FDIV_S_MM : MMRel, ADDS_FT<"div.s", FGR32Opnd, II_DIV_S, 0, fdiv>,
+ ADDS_FM_MM<0, 0xf0>;
+def FMUL_S_MM : MMRel, ADDS_FT<"mul.s", FGR32Opnd, II_MUL_S, 1, fmul>,
+ ADDS_FM_MM<0, 0xb0>;
+def FSUB_S_MM : MMRel, ADDS_FT<"sub.s", FGR32Opnd, II_SUB_S, 0, fsub>,
+ ADDS_FM_MM<0, 0x70>;
+
+def FADD_MM : MMRel, ADDS_FT<"add.d", AFGR64Opnd, II_ADD_D, 1, fadd>,
+ ADDS_FM_MM<1, 0x30>;
+def FDIV_MM : MMRel, ADDS_FT<"div.d", AFGR64Opnd, II_DIV_D, 0, fdiv>,
+ ADDS_FM_MM<1, 0xf0>;
+def FMUL_MM : MMRel, ADDS_FT<"mul.d", AFGR64Opnd, II_MUL_D, 1, fmul>,
+ ADDS_FM_MM<1, 0xb0>;
+def FSUB_MM : MMRel, ADDS_FT<"sub.d", AFGR64Opnd, II_SUB_D, 0, fsub>,
+ ADDS_FM_MM<1, 0x70>;
+
+def LWC1_MM : MMRel, LW_FT<"lwc1", FGR32Opnd, II_LWC1, load>, LW_FM_MM<0x27>;
+def SWC1_MM : MMRel, SW_FT<"swc1", FGR32Opnd, II_SWC1, store>,
+ LW_FM_MM<0x26>;
+def LDC1_MM : MMRel, LW_FT<"ldc1", AFGR64Opnd, II_LDC1, load>, LW_FM_MM<0x2f>;
+def SDC1_MM : MMRel, SW_FT<"sdc1", AFGR64Opnd, II_SDC1, store>,
+ LW_FM_MM<0x2e>;
+def LWXC1_MM : MMRel, LWXC1_FT<"lwxc1", FGR32Opnd, II_LWXC1, load>,
+ LWXC1_FM_MM<0x48>, INSN_MIPS4_32R2_NOT_32R6_64R6;
+def SWXC1_MM : MMRel, SWXC1_FT<"swxc1", FGR32Opnd, II_SWXC1, store>,
+ SWXC1_FM_MM<0x88>, INSN_MIPS4_32R2_NOT_32R6_64R6;
+def LUXC1_MM : MMRel, LWXC1_FT<"luxc1", AFGR64Opnd, II_LUXC1>,
+ LWXC1_FM_MM<0x148>, INSN_MIPS5_32R2_NOT_32R6_64R6;
+def SUXC1_MM : MMRel, SWXC1_FT<"suxc1", AFGR64Opnd, II_SUXC1>,
+ SWXC1_FM_MM<0x188>, INSN_MIPS5_32R2_NOT_32R6_64R6;
+
+def FCMP_S32_MM : MMRel, CEQS_FT<"s", FGR32, II_C_CC_S, MipsFPCmp>,
+ CEQS_FM_MM<0>;
+def FCMP_D32_MM : MMRel, CEQS_FT<"d", AFGR64, II_C_CC_D, MipsFPCmp>,
+ CEQS_FM_MM<1>;
+
+def BC1F_MM : MMRel, BC1F_FT<"bc1f", brtarget_mm, IIBranch, MIPS_BRANCH_F>,
+ BC1F_FM_MM<0x1c>, ISA_MIPS1_NOT_32R6_64R6;
+def BC1T_MM : MMRel, BC1F_FT<"bc1t", brtarget_mm, IIBranch, MIPS_BRANCH_T>,
+ BC1F_FM_MM<0x1d>, ISA_MIPS1_NOT_32R6_64R6;
+
+def CEIL_W_S_MM : MMRel, ABSS_FT<"ceil.w.s", FGR32Opnd, FGR32Opnd, II_CEIL>,
+ ROUND_W_FM_MM<0, 0x6c>;
+def CVT_W_S_MM : MMRel, ABSS_FT<"cvt.w.s", FGR32Opnd, FGR32Opnd, II_CVT>,
+ ROUND_W_FM_MM<0, 0x24>;
+def FLOOR_W_S_MM : MMRel, ABSS_FT<"floor.w.s", FGR32Opnd, FGR32Opnd, II_FLOOR>,
+ ROUND_W_FM_MM<0, 0x2c>;
+def ROUND_W_S_MM : MMRel, ABSS_FT<"round.w.s", FGR32Opnd, FGR32Opnd, II_ROUND>,
+ ROUND_W_FM_MM<0, 0xec>;
+def TRUNC_W_S_MM : MMRel, ABSS_FT<"trunc.w.s", FGR32Opnd, FGR32Opnd, II_TRUNC>,
+ ROUND_W_FM_MM<0, 0xac>;
+def FSQRT_S_MM : MMRel, ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd, II_SQRT_S,
+ fsqrt>, ROUND_W_FM_MM<0, 0x28>;
+
+def CEIL_W_MM : MMRel, ABSS_FT<"ceil.w.d", FGR32Opnd, AFGR64Opnd, II_CEIL>,
+ ROUND_W_FM_MM<1, 0x6c>;
+def CVT_W_MM : MMRel, ABSS_FT<"cvt.w.d", FGR32Opnd, AFGR64Opnd, II_CVT>,
+ ROUND_W_FM_MM<1, 0x24>;
+def FLOOR_W_MM : MMRel, ABSS_FT<"floor.w.d", FGR32Opnd, AFGR64Opnd, II_FLOOR>,
+ ROUND_W_FM_MM<1, 0x2c>;
+def ROUND_W_MM : MMRel, ABSS_FT<"round.w.d", FGR32Opnd, AFGR64Opnd, II_ROUND>,
+ ROUND_W_FM_MM<1, 0xec>;
+def TRUNC_W_MM : MMRel, ABSS_FT<"trunc.w.d", FGR32Opnd, AFGR64Opnd, II_TRUNC>,
+ ROUND_W_FM_MM<1, 0xac>;
+
+def FSQRT_MM : MMRel, ABSS_FT<"sqrt.d", AFGR64Opnd, AFGR64Opnd, II_SQRT_D,
+ fsqrt>, ROUND_W_FM_MM<1, 0x28>;
+
+def CVT_L_S_MM : MMRel, ABSS_FT<"cvt.l.s", FGR64Opnd, FGR32Opnd, II_CVT>,
+ ROUND_W_FM_MM<0, 0x4>, INSN_MIPS3_32R2;
+def CVT_L_D64_MM : MMRel, ABSS_FT<"cvt.l.d", FGR64Opnd, FGR64Opnd, II_CVT>,
+ ROUND_W_FM_MM<1, 0x4>, INSN_MIPS3_32R2;
+
+def FABS_S_MM : MMRel, ABSS_FT<"abs.s", FGR32Opnd, FGR32Opnd, II_ABS, fabs>,
+ ABS_FM_MM<0, 0xd>;
+def FMOV_S_MM : MMRel, ABSS_FT<"mov.s", FGR32Opnd, FGR32Opnd, II_MOV_S>,
+ ABS_FM_MM<0, 0x1>;
+def FNEG_S_MM : MMRel, ABSS_FT<"neg.s", FGR32Opnd, FGR32Opnd, II_NEG, fneg>,
+ ABS_FM_MM<0, 0x2d>;
+def CVT_D_S_MM : MMRel, ABSS_FT<"cvt.d.s", AFGR64Opnd, FGR32Opnd, II_CVT>,
+ ABS_FM_MM<0, 0x4d>;
+def CVT_D32_W_MM : MMRel, ABSS_FT<"cvt.d.w", AFGR64Opnd, FGR32Opnd, II_CVT>,
+ ABS_FM_MM<1, 0x4d>;
+def CVT_S_D32_MM : MMRel, ABSS_FT<"cvt.s.d", FGR32Opnd, AFGR64Opnd, II_CVT>,
+ ABS_FM_MM<0, 0x6d>;
+def CVT_S_W_MM : MMRel, ABSS_FT<"cvt.s.w", FGR32Opnd, FGR32Opnd, II_CVT>,
+ ABS_FM_MM<1, 0x6d>;
+
+def FABS_MM : MMRel, ABSS_FT<"abs.d", AFGR64Opnd, AFGR64Opnd, II_ABS, fabs>,
+ ABS_FM_MM<1, 0xd>;
+def FNEG_MM : MMRel, ABSS_FT<"neg.d", AFGR64Opnd, AFGR64Opnd, II_NEG, fneg>,
+ ABS_FM_MM<1, 0x2d>;
+
+def FMOV_D32_MM : MMRel, ABSS_FT<"mov.d", AFGR64Opnd, AFGR64Opnd, II_MOV_D>,
+ ABS_FM_MM<1, 0x1>, AdditionalRequires<[NotFP64bit]>;
+
+def MOVZ_I_S_MM : MMRel, CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32Opnd,
+ II_MOVZ_S>, CMov_I_F_FM_MM<0x78, 0>;
+def MOVN_I_S_MM : MMRel, CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32Opnd,
+ II_MOVN_S>, CMov_I_F_FM_MM<0x38, 0>;
+def MOVZ_I_D32_MM : MMRel, CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64Opnd,
+ II_MOVZ_D>, CMov_I_F_FM_MM<0x78, 1>;
+def MOVN_I_D32_MM : MMRel, CMov_I_F_FT<"movn.d", GPR32Opnd, AFGR64Opnd,
+ II_MOVN_D>, CMov_I_F_FM_MM<0x38, 1>;
+
+def MOVT_S_MM : MMRel, CMov_F_F_FT<"movt.s", FGR32Opnd, II_MOVT_S,
+ MipsCMovFP_T>, CMov_F_F_FM_MM<0x60, 0>;
+def MOVF_S_MM : MMRel, CMov_F_F_FT<"movf.s", FGR32Opnd, II_MOVF_S,
+ MipsCMovFP_F>, CMov_F_F_FM_MM<0x20, 0>;
+def MOVT_D32_MM : MMRel, CMov_F_F_FT<"movt.d", AFGR64Opnd, II_MOVT_D,
+ MipsCMovFP_T>, CMov_F_F_FM_MM<0x60, 1>;
+def MOVF_D32_MM : MMRel, CMov_F_F_FT<"movf.d", AFGR64Opnd, II_MOVF_D,
+ MipsCMovFP_F>, CMov_F_F_FM_MM<0x20, 1>;
+
+def CFC1_MM : MMRel, MFC1_FT<"cfc1", GPR32Opnd, CCROpnd, II_CFC1>,
+ MFC1_FM_MM<0x40>;
+def CTC1_MM : MMRel, MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, II_CTC1>,
+ MFC1_FM_MM<0x60>;
+def MFC1_MM : MMRel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd,
+ II_MFC1, bitconvert>, MFC1_FM_MM<0x80>;
+def MTC1_MM : MMRel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd,
+ II_MTC1, bitconvert>, MFC1_FM_MM<0xa0>;
+def MFHC1_MM : MMRel, MFC1_FT<"mfhc1", GPR32Opnd, FGRH32Opnd, II_MFHC1>,
+ MFC1_FM_MM<3>, ISA_MIPS32R2;
+def MTHC1_MM : MMRel, MTC1_FT<"mthc1", FGRH32Opnd, GPR32Opnd, II_MTHC1>,
+ MFC1_FM_MM<7>, ISA_MIPS32R2;
+
+def MADD_S_MM : MMRel, MADDS_FT<"madd.s", FGR32Opnd, II_MADD_S, fadd>,
+ MADDS_FM_MM<0x1>;
+def MSUB_S_MM : MMRel, MADDS_FT<"msub.s", FGR32Opnd, II_MSUB_S, fsub>,
+ MADDS_FM_MM<0x21>;
+def NMADD_S_MM : MMRel, NMADDS_FT<"nmadd.s", FGR32Opnd, II_NMADD_S, fadd>,
+ MADDS_FM_MM<0x2>;
+def NMSUB_S_MM : MMRel, NMADDS_FT<"nmsub.s", FGR32Opnd, II_NMSUB_S, fsub>,
+ MADDS_FM_MM<0x22>;
+
+def MADD_D32_MM : MMRel, MADDS_FT<"madd.d", AFGR64Opnd, II_MADD_D, fadd>,
+ MADDS_FM_MM<0x9>;
+def MSUB_D32_MM : MMRel, MADDS_FT<"msub.d", AFGR64Opnd, II_MSUB_D, fsub>,
+ MADDS_FM_MM<0x29>;
+def NMADD_D32_MM : MMRel, NMADDS_FT<"nmadd.d", AFGR64Opnd, II_NMADD_D, fadd>,
+ MADDS_FM_MM<0xa>;
+def NMSUB_D32_MM : MMRel, NMADDS_FT<"nmsub.d", AFGR64Opnd, II_NMSUB_D, fsub>,
+ MADDS_FM_MM<0x2a>;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MicroMipsInstrFormats.td b/contrib/llvm/lib/Target/Mips/MicroMipsInstrFormats.td
index c12a32e..15b951d 100644
--- a/contrib/llvm/lib/Target/Mips/MicroMipsInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MicroMipsInstrFormats.td
@@ -1,3 +1,81 @@
+//===----------------------------------------------------------------------===//
+// MicroMIPS Base Classes
+//===----------------------------------------------------------------------===//
+
+//
+// Base class for MicroMips instructions.
+// This class does not depend on the instruction size.
+//
+class MicroMipsInstBase<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format f> : Instruction
+{
+ let Namespace = "Mips";
+ let DecoderNamespace = "MicroMips";
+
+ let OutOperandList = outs;
+ let InOperandList = ins;
+
+ let AsmString = asmstr;
+ let Pattern = pattern;
+ let Itinerary = itin;
+
+ let Predicates = [InMicroMips];
+
+ Format Form = f;
+}
+
+//
+// Base class for MicroMIPS 16-bit instructions.
+//
+class MicroMipsInst16<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin, Format f> :
+ MicroMipsInstBase<outs, ins, asmstr, pattern, itin, f>
+{
+ let Size = 2;
+ field bits<16> Inst;
+ field bits<16> SoftFail = 0;
+ bits<6> Opcode = 0x0;
+}
+
+//===----------------------------------------------------------------------===//
+// MicroMIPS 16-bit Instruction Formats
+//===----------------------------------------------------------------------===//
+
+class MOVE_FM_MM16<bits<6> funct> {
+ bits<5> rs;
+ bits<5> rd;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = funct;
+ let Inst{9-5} = rd;
+ let Inst{4-0} = rs;
+}
+
+class JALR_FM_MM16<bits<5> op> {
+ bits<5> rs;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = 0x11;
+ let Inst{9-5} = op;
+ let Inst{4-0} = rs;
+}
+
+class MFHILO_FM_MM16<bits<5> funct> {
+ bits<5> rd;
+
+ bits<16> Inst;
+
+ let Inst{15-10} = 0x11;
+ let Inst{9-5} = funct;
+ let Inst{4-0} = rd;
+}
+
+//===----------------------------------------------------------------------===//
+// MicroMIPS 32-bit Instruction Formats
+//===----------------------------------------------------------------------===//
+
class MMArch {
string Arch = "micromips";
list<dag> Pattern = [];
@@ -226,7 +304,7 @@ class JR_FM_MM<bits<8> funct> : MMArch {
let Inst{5-0} = 0x3c;
}
-class JALR_FM_MM<bits<10> funct> : MMArch {
+class JALR_FM_MM<bits<10> funct> {
bits<5> rs;
bits<5> rd;
@@ -276,6 +354,67 @@ class BGEZAL_FM_MM<bits<5> funct> : MMArch {
let Inst{15-0} = offset;
}
+class SYNC_FM_MM : MMArch {
+ bits<5> stype;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = 0x0;
+ let Inst{20-16} = stype;
+ let Inst{15-6} = 0x1ad;
+ let Inst{5-0} = 0x3c;
+}
+
+class BRK_FM_MM : MMArch {
+ bits<10> code_1;
+ bits<10> code_2;
+ bits<32> Inst;
+ let Inst{31-26} = 0x0;
+ let Inst{25-16} = code_1;
+ let Inst{15-6} = code_2;
+ let Inst{5-0} = 0x07;
+}
+
+class SYS_FM_MM : MMArch {
+ bits<10> code_;
+ bits<32> Inst;
+ let Inst{31-26} = 0x0;
+ let Inst{25-16} = code_;
+ let Inst{15-6} = 0x22d;
+ let Inst{5-0} = 0x3c;
+}
+
+class WAIT_FM_MM {
+ bits<10> code_;
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-16} = code_;
+ let Inst{15-6} = 0x24d;
+ let Inst{5-0} = 0x3c;
+}
+
+class ER_FM_MM<bits<10> funct> : MMArch {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-16} = 0x00;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class EI_FM_MM<bits<10> funct> : MMArch {
+ bits<32> Inst;
+ bits<5> rt;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = 0x00;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
class TEQ_FM_MM<bits<6> funct> : MMArch {
bits<5> rs;
bits<5> rt;
@@ -302,3 +441,183 @@ class TEQI_FM_MM<bits<5> funct> : MMArch {
let Inst{20-16} = rs;
let Inst{15-0} = imm16;
}
+
+class LL_FM_MM<bits<4> funct> {
+ bits<5> rt;
+ bits<21> addr;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x18;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = addr{20-16};
+ let Inst{15-12} = funct;
+ let Inst{11-0} = addr{11-0};
+}
+
+class ADDS_FM_MM<bits<2> fmt, bits<8> funct> : MMArch {
+ bits<5> ft;
+ bits<5> fs;
+ bits<5> fd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = ft;
+ let Inst{20-16} = fs;
+ let Inst{15-11} = fd;
+ let Inst{10} = 0;
+ let Inst{9-8} = fmt;
+ let Inst{7-0} = funct;
+
+ list<dag> Pattern = [];
+}
+
+class LWXC1_FM_MM<bits<9> funct> : MMArch {
+ bits<5> fd;
+ bits<5> base;
+ bits<5> index;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = index;
+ let Inst{20-16} = base;
+ let Inst{15-11} = fd;
+ let Inst{10-9} = 0x0;
+ let Inst{8-0} = funct;
+}
+
+class SWXC1_FM_MM<bits<9> funct> : MMArch {
+ bits<5> fs;
+ bits<5> base;
+ bits<5> index;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = index;
+ let Inst{20-16} = base;
+ let Inst{15-11} = fs;
+ let Inst{10-9} = 0x0;
+ let Inst{8-0} = funct;
+}
+
+class CEQS_FM_MM<bits<2> fmt> : MMArch {
+ bits<5> fs;
+ bits<5> ft;
+ bits<4> cond;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = ft;
+ let Inst{20-16} = fs;
+ let Inst{15-13} = 0x0; // cc
+ let Inst{12} = 0;
+ let Inst{11-10} = fmt;
+ let Inst{9-6} = cond;
+ let Inst{5-0} = 0x3c;
+}
+
+class BC1F_FM_MM<bits<5> tf> : MMArch {
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = tf;
+ let Inst{20-18} = 0x0; // cc
+ let Inst{17-16} = 0x0;
+ let Inst{15-0} = offset;
+}
+
+class ROUND_W_FM_MM<bits<1> fmt, bits<8> funct> : MMArch {
+ bits<5> fd;
+ bits<5> fs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = fd;
+ let Inst{20-16} = fs;
+ let Inst{15} = 0;
+ let Inst{14} = fmt;
+ let Inst{13-6} = funct;
+ let Inst{5-0} = 0x3b;
+}
+
+class ABS_FM_MM<bits<2> fmt, bits<7> funct> : MMArch {
+ bits<5> fd;
+ bits<5> fs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = fd;
+ let Inst{20-16} = fs;
+ let Inst{15} = 0;
+ let Inst{14-13} = fmt;
+ let Inst{12-6} = funct;
+ let Inst{5-0} = 0x3b;
+}
+
+class CMov_F_F_FM_MM<bits<9> func, bits<2> fmt> : MMArch {
+ bits<5> fd;
+ bits<5> fs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = fd;
+ let Inst{20-16} = fs;
+ let Inst{15-13} = 0x0; //cc
+ let Inst{12-11} = 0x0;
+ let Inst{10-9} = fmt;
+ let Inst{8-0} = func;
+}
+
+class CMov_I_F_FM_MM<bits<8> funct, bits<2> fmt> : MMArch {
+ bits<5> fd;
+ bits<5> fs;
+ bits<5> rt;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = fs;
+ let Inst{15-11} = fd;
+ let Inst{9-8} = fmt;
+ let Inst{7-0} = funct;
+}
+
+class MFC1_FM_MM<bits<8> funct> : MMArch {
+ bits<5> rt;
+ bits<5> fs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = fs;
+ let Inst{15-14} = 0x0;
+ let Inst{13-6} = funct;
+ let Inst{5-0} = 0x3b;
+}
+
+class MADDS_FM_MM<bits<6> funct>: MMArch {
+ bits<5> ft;
+ bits<5> fs;
+ bits<5> fd;
+ bits<5> fr;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = ft;
+ let Inst{20-16} = fs;
+ let Inst{15-11} = fd;
+ let Inst{10-6} = fr;
+ let Inst{5-0} = funct;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MicroMipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MicroMipsInstrInfo.td
index d9507fa..87a3a3e 100644
--- a/contrib/llvm/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -45,6 +45,64 @@ class StoreLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO,
let DecoderMethod = "DecodeMemMMImm12";
}
+class LLBaseMM<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins mem_mm_12:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ let mayLoad = 1;
+}
+
+class SCBaseMM<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$dst), (ins RO:$rt, mem_mm_12:$addr),
+ !strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ let mayStore = 1;
+ let Constraints = "$rt = $dst";
+}
+
+class LoadMM<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary> :
+ InstSE<(outs RO:$rt), (ins mem_mm_12:$addr),
+ !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, (OpNode addrimm12:$addr))], Itin, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ let canFoldAsLoad = 1;
+ let mayLoad = 1;
+}
+
+class MoveFromHILOMM<string opstr, RegisterOperand RO, Register UseReg> :
+ MicroMipsInst16<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"),
+ [], II_MFHI_MFLO, FrmR> {
+ let Uses = [UseReg];
+ let hasSideEffects = 0;
+}
+
+class MoveMM16<string opstr, RegisterOperand RO, bit isComm = 0,
+ InstrItinClass Itin = NoItinerary> :
+ MicroMipsInst16<(outs RO:$rd), (ins RO:$rs),
+ !strconcat(opstr, "\t$rd, $rs"), [], Itin, FrmR> {
+ let isCommutable = isComm;
+ let isReMaterializable = 1;
+}
+
+// 16-bit Jump and Link (Call)
+class JumpLinkRegMM16<string opstr, RegisterOperand RO> :
+ MicroMipsInst16<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"),
+ [(MipsJmpLink RO:$rs)], IIBranch, FrmR> {
+ let isCall = 1;
+ let hasDelaySlot = 1;
+ let Defs = [RA];
+}
+
+def MFHI16_MM : MoveFromHILOMM<"mfhi", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x10>;
+def MFLO16_MM : MoveFromHILOMM<"mflo", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x12>;
+def MOVE16_MM : MoveMM16<"move", GPR32Opnd>, MOVE_FM_MM16<0x03>;
+def JALR16_MM : JumpLinkRegMM16<"jalr", GPR32Opnd>, JALR_FM_MM16<0x0e>;
+
+class WaitMM<string opstr> :
+ InstSE<(outs), (ins uimm10:$code_), !strconcat(opstr, "\t$code_"), [],
+ NoItinerary, FrmOther, opstr>;
+
let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
/// Arithmetic Instructions (ALU Immediate)
def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd>,
@@ -63,6 +121,9 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
ADDI_FM_MM<0x1c>;
def LUi_MM : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM_MM;
+ def LEA_ADDiu_MM : MMRel, EffectiveAddress<"addiu", GPR32Opnd>,
+ LW_FM_MM<0xc>;
+
/// Arithmetic Instructions (3-Operand, R-Type)
def ADDu_MM : MMRel, ArithLogicR<"addu", GPR32Opnd>, ADD_FM_MM<0, 0x150>;
def SUBu_MM : MMRel, ArithLogicR<"subu", GPR32Opnd>, ADD_FM_MM<0, 0x1d0>;
@@ -72,38 +133,38 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def SLT_MM : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM_MM<0, 0x350>;
def SLTu_MM : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>,
ADD_FM_MM<0, 0x390>;
- def AND_MM : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IIAlu, and>,
+ def AND_MM : MMRel, ArithLogicR<"and", GPR32Opnd, 1, II_AND, and>,
ADD_FM_MM<0, 0x250>;
- def OR_MM : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IIAlu, or>,
+ def OR_MM : MMRel, ArithLogicR<"or", GPR32Opnd, 1, II_OR, or>,
ADD_FM_MM<0, 0x290>;
- def XOR_MM : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IIAlu, xor>,
+ def XOR_MM : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, II_XOR, xor>,
ADD_FM_MM<0, 0x310>;
def NOR_MM : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM_MM<0, 0x2d0>;
- def MULT_MM : MMRel, Mult<"mult", IIImul, GPR32Opnd, [HI0, LO0]>,
+ def MULT_MM : MMRel, Mult<"mult", II_MULT, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x22c>;
- def MULTu_MM : MMRel, Mult<"multu", IIImul, GPR32Opnd, [HI0, LO0]>,
+ def MULTu_MM : MMRel, Mult<"multu", II_MULTU, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x26c>;
- def SDIV_MM : MMRel, Div<"div", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ def SDIV_MM : MMRel, Div<"div", II_DIV, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x2ac>;
- def UDIV_MM : MMRel, Div<"divu", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ def UDIV_MM : MMRel, Div<"divu", II_DIVU, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x2ec>;
/// Shift Instructions
- def SLL_MM : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd>,
+ def SLL_MM : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, II_SLL>,
SRA_FM_MM<0, 0>;
- def SRL_MM : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd>,
+ def SRL_MM : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd, II_SRL>,
SRA_FM_MM<0x40, 0>;
- def SRA_MM : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd>,
+ def SRA_MM : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd, II_SRA>,
SRA_FM_MM<0x80, 0>;
- def SLLV_MM : MMRel, shift_rotate_reg<"sllv", GPR32Opnd>,
+ def SLLV_MM : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, II_SLLV>,
SRLV_FM_MM<0x10, 0>;
- def SRLV_MM : MMRel, shift_rotate_reg<"srlv", GPR32Opnd>,
+ def SRLV_MM : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, II_SRLV>,
SRLV_FM_MM<0x50, 0>;
- def SRAV_MM : MMRel, shift_rotate_reg<"srav", GPR32Opnd>,
+ def SRAV_MM : MMRel, shift_rotate_reg<"srav", GPR32Opnd, II_SRAV>,
SRLV_FM_MM<0x90, 0>;
- def ROTR_MM : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd>,
+ def ROTR_MM : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, II_ROTR>,
SRA_FM_MM<0xc0, 0>;
- def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd>,
+ def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, II_ROTRV>,
SRLV_FM_MM<0xd0, 0>;
/// Load and Store Instructions - aligned
@@ -118,6 +179,8 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def SW_MM : Store<"sw", GPR32Opnd>, MMRel, LW_FM_MM<0x3e>;
}
+ def LWU_MM : LoadMM<"lwu", GPR32Opnd, zextloadi32, II_LWU>, LL_FM_MM<0xe>;
+
/// Load and Store Instructions - unaligned
def LWL_MM : LoadLeftRightMM<"lwl", MipsLWL, GPR32Opnd, mem_mm_12>,
LWL_FM_MM<0x0>;
@@ -133,9 +196,9 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
NoItinerary>, ADD_FM_MM<0, 0x58>;
def MOVN_I_MM : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd,
NoItinerary>, ADD_FM_MM<0, 0x18>;
- def MOVT_I_MM : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, IIAlu>,
+ def MOVT_I_MM : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, II_MOVT>,
CMov_F_I_FM_MM<0x25>;
- def MOVF_I_MM : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, IIAlu>,
+ def MOVF_I_MM : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, II_MOVF>,
CMov_F_I_FM_MM<0x5>;
/// Move to/from HI/LO
@@ -149,21 +212,26 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
MFLO_FM_MM<0x075>;
/// Multiply Add/Sub Instructions
- def MADD_MM : MMRel, MArithR<"madd", 1>, MULT_FM_MM<0x32c>;
- def MADDU_MM : MMRel, MArithR<"maddu", 1>, MULT_FM_MM<0x36c>;
- def MSUB_MM : MMRel, MArithR<"msub">, MULT_FM_MM<0x3ac>;
- def MSUBU_MM : MMRel, MArithR<"msubu">, MULT_FM_MM<0x3ec>;
+ def MADD_MM : MMRel, MArithR<"madd", II_MADD, 1>, MULT_FM_MM<0x32c>;
+ def MADDU_MM : MMRel, MArithR<"maddu", II_MADDU, 1>, MULT_FM_MM<0x36c>;
+ def MSUB_MM : MMRel, MArithR<"msub", II_MSUB>, MULT_FM_MM<0x3ac>;
+ def MSUBU_MM : MMRel, MArithR<"msubu", II_MSUBU>, MULT_FM_MM<0x3ec>;
/// Count Leading
- def CLZ_MM : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM_MM<0x16c>;
- def CLO_MM : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM_MM<0x12c>;
+ def CLZ_MM : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM_MM<0x16c>,
+ ISA_MIPS32;
+ def CLO_MM : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM_MM<0x12c>,
+ ISA_MIPS32;
/// Sign Ext In Register Instructions.
- def SEB_MM : MMRel, SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM_MM<0x0ac>;
- def SEH_MM : MMRel, SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM_MM<0x0ec>;
+ def SEB_MM : MMRel, SignExtInReg<"seb", i8, GPR32Opnd, II_SEB>,
+ SEB_FM_MM<0x0ac>, ISA_MIPS32R2;
+ def SEH_MM : MMRel, SignExtInReg<"seh", i16, GPR32Opnd, II_SEH>,
+ SEB_FM_MM<0x0ec>, ISA_MIPS32R2;
/// Word Swap Bytes Within Halfwords
- def WSBH_MM : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM_MM<0x1ec>;
+ def WSBH_MM : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM_MM<0x1ec>,
+ ISA_MIPS32R2;
def EXT_MM : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, MipsExt>,
EXT_FM_MM<0x2c>;
@@ -175,14 +243,9 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def J_MM : MMRel, JumpFJ<jmptarget_mm, "j", br, bb, "j">,
J_FM_MM<0x35>;
def JAL_MM : MMRel, JumpLink<"jal", calltarget_mm>, J_FM_MM<0x3d>;
- def TAILCALL_MM : MMRel, JumpFJ<calltarget_mm, "j", MipsTailCall, imm,
- "tcall">, J_FM_MM<0x3d>, IsTailCall;
}
def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>;
- def JALR_MM : MMRel, JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>;
- def TAILCALL_R_MM : MMRel, JumpFR<"tcallr", GPR32Opnd, MipsTailCall>,
- JR_FM_MM<0x3c>, IsTailCall;
- def RET_MM : MMRel, RetBase<"ret", GPR32Opnd>, JR_FM_MM<0x3c>;
+ def JALR_MM : JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>;
/// Branch Instructions
def BEQ_MM : MMRel, CBranch<"beq", brtarget_mm, seteq, GPR32Opnd>,
@@ -202,6 +265,18 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def BLTZAL_MM : MMRel, BGEZAL_FT<"bltzal", brtarget_mm, GPR32Opnd>,
BGEZAL_FM_MM<0x01>;
+ /// Control Instructions
+ def SYNC_MM : MMRel, SYNC_FT<"sync">, SYNC_FM_MM;
+ def BREAK_MM : MMRel, BRK_FT<"break">, BRK_FM_MM;
+ def SYSCALL_MM : MMRel, SYS_FT<"syscall">, SYS_FM_MM;
+ def WAIT_MM : WaitMM<"wait">, WAIT_FM_MM;
+ def ERET_MM : MMRel, ER_FT<"eret">, ER_FM_MM<0x3cd>;
+ def DERET_MM : MMRel, ER_FT<"deret">, ER_FM_MM<0x38d>;
+ def EI_MM : MMRel, DEI_FT<"ei", GPR32Opnd>, EI_FM_MM<0x15d>,
+ ISA_MIPS32R2;
+ def DI_MM : MMRel, DEI_FT<"di", GPR32Opnd>, EI_FM_MM<0x11d>,
+ ISA_MIPS32R2;
+
/// Trap Instructions
def TEQ_MM : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM_MM<0x0>;
def TGE_MM : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM_MM<0x08>;
@@ -216,4 +291,16 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def TLTI_MM : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM_MM<0x08>;
def TLTIU_MM : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM_MM<0x0a>;
def TNEI_MM : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM_MM<0x0c>;
+
+ /// Load-linked, Store-conditional
+ def LL_MM : LLBaseMM<"ll", GPR32Opnd>, LL_FM_MM<0x3>;
+ def SC_MM : SCBaseMM<"sc", GPR32Opnd>, LL_FM_MM<0xb>;
+}
+
+//===----------------------------------------------------------------------===//
+// MicroMips instruction aliases
+//===----------------------------------------------------------------------===//
+
+let Predicates = [InMicroMips] in {
+ def : MipsInstAlias<"wait", (WAIT_MM 0x0), 1>;
}
diff --git a/contrib/llvm/lib/Target/Mips/Mips.h b/contrib/llvm/lib/Target/Mips/Mips.h
index e796deb..d512d65 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.h
+++ b/contrib/llvm/lib/Target/Mips/Mips.h
@@ -23,6 +23,7 @@ namespace llvm {
class FunctionPass;
FunctionPass *createMipsISelDag(MipsTargetMachine &TM);
+ FunctionPass *createMipsOptimizePICCallPass(MipsTargetMachine &TM);
FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM);
FunctionPass *createMipsLongBranchPass(MipsTargetMachine &TM);
FunctionPass *createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
diff --git a/contrib/llvm/lib/Target/Mips/Mips.td b/contrib/llvm/lib/Target/Mips/Mips.td
index b8e3f39..dd3bc9b 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.td
+++ b/contrib/llvm/lib/Target/Mips/Mips.td
@@ -15,6 +15,33 @@
include "llvm/Target/Target.td"
+// The overall idea of the PredicateControl class is to chop the Predicates list
+// into subsets that are usually overridden independently. This allows
+// subclasses to partially override the predicates of their superclasses without
+// having to re-add all the existing predicates.
+class PredicateControl {
+ // Predicates for the encoding scheme in use such as HasStdEnc
+ list<Predicate> EncodingPredicates = [];
+ // Predicates for the GPR size such as IsGP64bit
+ list<Predicate> GPRPredicates = [];
+ // Predicates for the FGR size and layout such as IsFP64bit
+ list<Predicate> FGRPredicates = [];
+ // Predicates for the instruction group membership such as ISA's and ASE's
+ list<Predicate> InsnPredicates = [];
+ // Predicates for anything else
+ list<Predicate> AdditionalPredicates = [];
+ list<Predicate> Predicates = !listconcat(EncodingPredicates,
+ GPRPredicates,
+ FGRPredicates,
+ InsnPredicates,
+ AdditionalPredicates);
+}
+
+// Like Requires<> but for the AdditionalPredicates list
+class AdditionalRequires<list<Predicate> preds> {
+ list<Predicate> AdditionalPredicates = preds;
+}
+
//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
@@ -34,6 +61,10 @@ def FeatureGP64Bit : SubtargetFeature<"gp64", "IsGP64bit", "true",
"General Purpose Registers are 64-bit wide.">;
def FeatureFP64Bit : SubtargetFeature<"fp64", "IsFP64bit", "true",
"Support 64-bit FP registers.">;
+def FeatureFPXX : SubtargetFeature<"fpxx", "IsFPXX", "true",
+ "Support for FPXX.">;
+def FeatureNaN2008 : SubtargetFeature<"nan2008", "IsNaN2008bit", "true",
+ "IEEE 754-2008 NaN encoding.">;
def FeatureSingleFloat : SubtargetFeature<"single-float", "IsSingleFloat",
"true", "Only supports single precision float">;
def FeatureO32 : SubtargetFeature<"o32", "MipsABI", "O32",
@@ -44,32 +75,67 @@ def FeatureN64 : SubtargetFeature<"n64", "MipsABI", "N64",
"Enable n64 ABI">;
def FeatureEABI : SubtargetFeature<"eabi", "MipsABI", "EABI",
"Enable eabi ABI">;
+def FeatureNoOddSPReg : SubtargetFeature<"nooddspreg", "UseOddSPReg", "false",
+ "Disable odd numbered single-precision "
+ "registers">;
def FeatureVFPU : SubtargetFeature<"vfpu", "HasVFPU",
"true", "Enable vector FPU instructions.">;
-def FeatureSEInReg : SubtargetFeature<"seinreg", "HasSEInReg", "true",
- "Enable 'signext in register' instructions.">;
-def FeatureCondMov : SubtargetFeature<"condmov", "HasCondMov", "true",
- "Enable 'conditional move' instructions.">;
-def FeatureSwap : SubtargetFeature<"swap", "HasSwap", "true",
- "Enable 'byte/half swap' instructions.">;
-def FeatureBitCount : SubtargetFeature<"bitcount", "HasBitCount", "true",
- "Enable 'count leading bits' instructions.">;
-def FeatureFPIdx : SubtargetFeature<"FPIdx", "HasFPIdx", "true",
- "Enable 'FP indexed load/store' instructions.">;
+def FeatureMips1 : SubtargetFeature<"mips1", "MipsArchVersion", "Mips1",
+ "Mips I ISA Support [highly experimental]">;
+def FeatureMips2 : SubtargetFeature<"mips2", "MipsArchVersion", "Mips2",
+ "Mips II ISA Support [highly experimental]",
+ [FeatureMips1]>;
+def FeatureMips3_32 : SubtargetFeature<"mips3_32", "HasMips3_32", "true",
+ "Subset of MIPS-III that is also in MIPS32 "
+ "[highly experimental]">;
+def FeatureMips3_32r2 : SubtargetFeature<"mips3_32r2", "HasMips3_32r2", "true",
+ "Subset of MIPS-III that is also in MIPS32r2 "
+ "[highly experimental]">;
+def FeatureMips3 : SubtargetFeature<"mips3", "MipsArchVersion", "Mips3",
+ "MIPS III ISA Support [highly experimental]",
+ [FeatureMips2, FeatureMips3_32,
+ FeatureMips3_32r2, FeatureGP64Bit,
+ FeatureFP64Bit]>;
+def FeatureMips4_32 : SubtargetFeature<"mips4_32", "HasMips4_32", "true",
+ "Subset of MIPS-IV that is also in MIPS32 "
+ "[highly experimental]">;
+def FeatureMips4_32r2 : SubtargetFeature<"mips4_32r2", "HasMips4_32r2", "true",
+ "Subset of MIPS-IV that is also in MIPS32r2 "
+ "[highly experimental]">;
+def FeatureMips4 : SubtargetFeature<"mips4", "MipsArchVersion",
+ "Mips4", "MIPS IV ISA Support",
+ [FeatureMips3, FeatureMips4_32,
+ FeatureMips4_32r2]>;
+def FeatureMips5_32r2 : SubtargetFeature<"mips5_32r2", "HasMips5_32r2", "true",
+ "Subset of MIPS-V that is also in MIPS32r2 "
+ "[highly experimental]">;
+def FeatureMips5 : SubtargetFeature<"mips5", "MipsArchVersion", "Mips5",
+ "MIPS V ISA Support [highly experimental]",
+ [FeatureMips4, FeatureMips5_32r2]>;
def FeatureMips32 : SubtargetFeature<"mips32", "MipsArchVersion", "Mips32",
"Mips32 ISA Support",
- [FeatureCondMov, FeatureBitCount]>;
+ [FeatureMips2, FeatureMips3_32,
+ FeatureMips4_32]>;
def FeatureMips32r2 : SubtargetFeature<"mips32r2", "MipsArchVersion",
"Mips32r2", "Mips32r2 ISA Support",
- [FeatureMips32, FeatureSEInReg, FeatureSwap,
- FeatureFPIdx]>;
+ [FeatureMips3_32r2, FeatureMips4_32r2,
+ FeatureMips5_32r2, FeatureMips32]>;
+def FeatureMips32r6 : SubtargetFeature<"mips32r6", "MipsArchVersion",
+ "Mips32r6",
+ "Mips32r6 ISA Support [experimental]",
+ [FeatureMips32r2, FeatureFP64Bit,
+ FeatureNaN2008]>;
def FeatureMips64 : SubtargetFeature<"mips64", "MipsArchVersion",
"Mips64", "Mips64 ISA Support",
- [FeatureGP64Bit, FeatureFP64Bit,
- FeatureMips32, FeatureFPIdx]>;
+ [FeatureMips5, FeatureMips32]>;
def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion",
"Mips64r2", "Mips64r2 ISA Support",
[FeatureMips64, FeatureMips32r2]>;
+def FeatureMips64r6 : SubtargetFeature<"mips64r6", "MipsArchVersion",
+ "Mips64r6",
+ "Mips64r6 ISA Support [experimental]",
+ [FeatureMips32r6, FeatureMips64r2,
+ FeatureNaN2008]>;
def FeatureMips16 : SubtargetFeature<"mips16", "InMips16Mode", "true",
"Mips16 mode">;
@@ -83,6 +149,10 @@ def FeatureMSA : SubtargetFeature<"msa", "HasMSA", "true", "Mips MSA ASE">;
def FeatureMicroMips : SubtargetFeature<"micromips", "InMicroMipsMode", "true",
"microMips mode">;
+def FeatureCnMips : SubtargetFeature<"cnmips", "HasCnMips",
+ "true", "Octeon cnMIPS Support",
+ [FeatureMips64r2]>;
+
//===----------------------------------------------------------------------===//
// Mips processors supported.
//===----------------------------------------------------------------------===//
@@ -90,16 +160,20 @@ def FeatureMicroMips : SubtargetFeature<"micromips", "InMicroMipsMode", "true",
class Proc<string Name, list<SubtargetFeature> Features>
: Processor<Name, MipsGenericItineraries, Features>;
-def : Proc<"mips32", [FeatureMips32]>;
-def : Proc<"mips32r2", [FeatureMips32r2]>;
-def : Proc<"mips64", [FeatureMips64]>;
-def : Proc<"mips64r2", [FeatureMips64r2]>;
-def : Proc<"mips16", [FeatureMips16]>;
-
-def MipsAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
+def : Proc<"mips1", [FeatureMips1, FeatureO32]>;
+def : Proc<"mips2", [FeatureMips2, FeatureO32]>;
+def : Proc<"mips32", [FeatureMips32, FeatureO32]>;
+def : Proc<"mips32r2", [FeatureMips32r2, FeatureO32]>;
+def : Proc<"mips32r6", [FeatureMips32r6, FeatureO32]>;
+
+def : Proc<"mips3", [FeatureMips3, FeatureN64]>;
+def : Proc<"mips4", [FeatureMips4, FeatureN64]>;
+def : Proc<"mips5", [FeatureMips5, FeatureN64]>;
+def : Proc<"mips64", [FeatureMips64, FeatureN64]>;
+def : Proc<"mips64r2", [FeatureMips64r2, FeatureN64]>;
+def : Proc<"mips64r6", [FeatureMips64r6, FeatureN64]>;
+def : Proc<"mips16", [FeatureMips16, FeatureO32]>;
+def : Proc<"octeon", [FeatureMips64r2, FeatureN64, FeatureCnMips]>;
def MipsAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
@@ -116,6 +190,5 @@ def MipsAsmParserVariant : AsmParserVariant {
def Mips : Target {
let InstructionSet = MipsInstrInfo;
let AssemblyParsers = [MipsAsmParser];
- let AssemblyWriters = [MipsAsmWriter];
let AssemblyParserVariants = [MipsAsmParserVariant];
}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
index 6655ff9..93706c2 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -15,6 +15,8 @@
#include "MCTargetDesc/MipsBaseInfo.h"
#include "Mips16InstrInfo.h"
#include "MipsInstrInfo.h"
+#include "MipsRegisterInfo.h"
+#include "MipsSubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -27,6 +29,9 @@
using namespace llvm;
+Mips16FrameLowering::Mips16FrameLowering(const MipsSubtarget &STI)
+ : MipsFrameLowering(STI, STI.stackAlignment()) {}
+
void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -47,30 +52,30 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
TII.makeFrame(Mips::SP, StackSize, MBB, MBBI);
// emit ".cfi_def_cfa_offset StackSize"
- MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(AdjustSPLabel, -StackSize));
-
- MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
- unsigned S2 = MRI->getDwarfRegNum(Mips::S2, true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S2, -8));
-
- unsigned S1 = MRI->getDwarfRegNum(Mips::S1, true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S1, -12));
-
- unsigned S0 = MRI->getDwarfRegNum(Mips::S0, true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S0, -16));
-
- unsigned RA = MRI->getDwarfRegNum(Mips::RA, true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, RA, -4));
-
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+
+ if (CSI.size()) {
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+
+ for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
+ E = CSI.end(); I != E; ++I) {
+ int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
+ unsigned Reg = I->getReg();
+ unsigned DReg = MRI->getDwarfRegNum(Reg, true);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, DReg, Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+ }
if (hasFP(MF))
BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0)
- .addReg(Mips::SP);
+ .addReg(Mips::SP).setMIFlag(MachineInstr::FrameSetup);
}
@@ -168,10 +173,15 @@ Mips16FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
void Mips16FrameLowering::
processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
- MF.getRegInfo().setPhysRegUsed(Mips::RA);
- MF.getRegInfo().setPhysRegUsed(Mips::S0);
- MF.getRegInfo().setPhysRegUsed(Mips::S1);
- MF.getRegInfo().setPhysRegUsed(Mips::S2);
+ const Mips16InstrInfo &TII =
+ *static_cast<const Mips16InstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RI = TII.getRegisterInfo();
+ const BitVector Reserved = RI.getReservedRegs(MF);
+ bool SaveS2 = Reserved[Mips::S2];
+ if (SaveS2)
+ MF.getRegInfo().setPhysRegUsed(Mips::S2);
+ if (hasFP(MF))
+ MF.getRegInfo().setPhysRegUsed(Mips::S0);
}
const MipsFrameLowering *
diff --git a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
index 8ce2ced..1fb7eda 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16FrameLowering.h
@@ -19,32 +19,31 @@
namespace llvm {
class Mips16FrameLowering : public MipsFrameLowering {
public:
- explicit Mips16FrameLowering(const MipsSubtarget &STI)
- : MipsFrameLowering(STI, STI.stackAlignment()) {}
+ explicit Mips16FrameLowering(const MipsSubtarget &STI);
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
+ RegScavenger *RS) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/Mips/Mips16HardFloat.cpp b/contrib/llvm/lib/Target/Mips/Mips16HardFloat.cpp
index 81bf18c..14055d6 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16HardFloat.cpp
@@ -11,14 +11,16 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips16-hard-float"
#include "Mips16HardFloat.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <string>
+#define DEBUG_TYPE "mips16-hard-float"
+
static void inlineAsmOut
(LLVMContext &C, StringRef AsmString, BasicBlock *BB ) {
std::vector<llvm::Type *> AsmArgTypes;
@@ -167,6 +169,11 @@ static bool needsFPReturnHelper(Function &F) {
return whichFPReturnVariant(RetType) != NoFPRet;
}
+static bool needsFPReturnHelper(const FunctionType &FT) {
+ Type* RetType = FT.getReturnType();
+ return whichFPReturnVariant(RetType) != NoFPRet;
+}
+
static bool needsFPHelperFromSig(Function &F) {
return needsFPStubFromParams(F) || needsFPReturnHelper(F);
}
@@ -239,8 +246,8 @@ static void swapFPIntParams
// Make sure that we know we already need a stub for this function.
// Having called needsFPHelperFromSig
//
-static void assureFPCallStub(Function &F, Module *M,
- const MipsSubtarget &Subtarget){
+static void assureFPCallStub(Function &F, Module *M,
+ const MipsSubtarget &Subtarget) {
// for now we only need them for static relocation
if (Subtarget.getRelocationModel() == Reloc::PIC_)
return;
@@ -348,9 +355,8 @@ static const char *IntrinsicInline[] =
};
static bool isIntrinsicInline(Function *F) {
- return std::binary_search(
- IntrinsicInline, array_endof(IntrinsicInline),
- F->getName());
+ return std::binary_search(std::begin(IntrinsicInline),
+ std::end(IntrinsicInline), F->getName());
}
//
// Returns of float, double and complex need to be handled with a helper
@@ -400,13 +406,31 @@ static bool fixupFPReturnAndCall
Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL));
CallInst::Create(F, Params, "", &Inst );
} else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
+ const Value* V = CI->getCalledValue();
+ const Type* T = nullptr;
+ if (V) T = V->getType();
+ const PointerType *PFT=nullptr;
+ if (T) PFT = dyn_cast<PointerType>(T);
+ const FunctionType *FT=nullptr;
+ if (PFT) FT = dyn_cast<FunctionType>(PFT->getElementType());
+ Function *F_ = CI->getCalledFunction();
+ if (FT && needsFPReturnHelper(*FT) &&
+ !(F_ && isIntrinsicInline(F_))) {
+ Modified=true;
+ F.addFnAttr("saveS2");
+ }
+ if (F_ && !isIntrinsicInline(F_)) {
// pic mode calls are handled by already defined
// helper functions
- if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
- Function *F_ = CI->getCalledFunction();
- if (F_ && !isIntrinsicInline(F_) && needsFPHelperFromSig(*F_)) {
- assureFPCallStub(*F_, M, Subtarget);
+ if (needsFPReturnHelper(*F_)) {
Modified=true;
+ F.addFnAttr("saveS2");
+ }
+ if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
+ if (needsFPHelperFromSig(*F_)) {
+ assureFPCallStub(*F_, M, Subtarget);
+ Modified=true;
+ }
}
}
}
@@ -476,8 +500,9 @@ namespace llvm {
// declared via attributes as nomips16, we must:
// 1) fixup all returns of float, double, single and double complex
// by calling a helper function before the actual return.
-// 2) generate helper functions (stubs) that can be called by mips32 functions
-// that will move parameters passed normally passed in floating point
+// 2) generate helper functions (stubs) that can be called by mips32
+// functions that will move parameters passed normally passed in
+// floating point
// registers the soft float equivalents.
// 3) in the case of static relocation, generate helper functions so that
// mips16 functions can call extern functions of unknown type (mips16 or
diff --git a/contrib/llvm/lib/Target/Mips/Mips16HardFloat.h b/contrib/llvm/lib/Target/Mips/Mips16HardFloat.h
index b7f712a..826887e 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16HardFloat.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16HardFloat.h
@@ -34,11 +34,11 @@ public:
TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MIPS16 Hard Float Pass";
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
protected:
/// Keep a pointer to the MipsSubtarget around so that we can make the right
diff --git a/contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.cpp
new file mode 100644
index 0000000..2eb6e5d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.cpp
@@ -0,0 +1,50 @@
+//===---- Mips16HardFloatInfo.cpp for Mips16 Hard Float -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips16 implementation of Mips16HardFloatInfo
+// namespace.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Mips16HardFloatInfo.h"
+#include <string.h>
+
+namespace llvm {
+
+namespace Mips16HardFloatInfo {
+
+const FuncNameSignature PredefinedFuncs[] = {
+ { "__floatdidf", { NoSig, DRet } },
+ { "__floatdisf", { NoSig, FRet } },
+ { "__floatundidf", { NoSig, DRet } },
+ { "__fixsfdi", { FSig, NoFPRet } },
+ { "__fixunsdfsi", { DSig, NoFPRet } },
+ { "__fixunsdfdi", { DSig, NoFPRet } },
+ { "__fixdfdi", { DSig, NoFPRet } },
+ { "__fixunssfsi", { FSig, NoFPRet } },
+ { "__fixunssfdi", { FSig, NoFPRet } },
+ { "__floatundisf", { NoSig, FRet } },
+ { nullptr, { NoSig, NoFPRet } }
+};
+
+// just do a search for now. there are very few of these special cases.
+//
+extern FuncSignature const *findFuncSignature(const char *name) {
+ const char *name_;
+ int i = 0;
+ while (PredefinedFuncs[i].Name) {
+ name_ = PredefinedFuncs[i].Name;
+ if (strcmp(name, name_) == 0)
+ return &PredefinedFuncs[i].Signature;
+ i++;
+ }
+ return nullptr;
+}
+}
+}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.h b/contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.h
new file mode 100644
index 0000000..02444d9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips16HardFloatInfo.h
@@ -0,0 +1,50 @@
+//===---- Mips16HardFloatInfo.h for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some data structures relevant to the implementation of
+// Mips16 hard float.
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPS16HARDFLOATINFO_H
+#define MIPS16HARDFLOATINFO_H
+
+namespace llvm {
+
+namespace Mips16HardFloatInfo {
+
+// Return types that matter for hard float are:
+// float, double, complex float, and complex double
+//
+enum FPReturnVariant { FRet, DRet, CFRet, CDRet, NoFPRet };
+
+//
+// Parameter type that matter are float, (float, float), (float, double),
+// double, (double, double), (double, float)
+//
+enum FPParamVariant { FSig, FFSig, FDSig, DSig, DDSig, DFSig, NoSig };
+
+struct FuncSignature {
+ FPParamVariant ParamSig;
+ FPReturnVariant RetSig;
+};
+
+struct FuncNameSignature {
+ const char *Name;
+ FuncSignature Signature;
+};
+
+extern const FuncNameSignature PredefinedFuncs[];
+
+extern FuncSignature const *findFuncSignature(const char *name);
+}
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index 4948f40..7b05842 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -11,10 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-isel"
#include "Mips16ISelDAGToDAG.h"
-#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsRegisterInfo.h"
@@ -24,19 +23,22 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "mips-isel"
+
bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- if (!Subtarget.inMips16Mode())
+ Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ if (!Subtarget->inMips16Mode())
return false;
return MipsDAGToDAGISel::runOnMachineFunction(MF);
}
@@ -44,7 +46,7 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
std::pair<SDNode*, SDNode*>
Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty,
bool HasLo, bool HasHi) {
- SDNode *Lo = 0, *Hi = 0;
+ SDNode *Lo = nullptr, *Hi = nullptr;
SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0),
N->getOperand(1));
SDValue InFlag = SDValue(Mul, 0);
@@ -224,10 +226,12 @@ bool Mips16DAGToDAGISel::selectAddr16(
// If an indexed floating point load/store can be emitted, return false.
const LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(Parent);
- if (LS &&
- (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) &&
- Subtarget.hasFPIdx())
- return false;
+ if (LS) {
+ if (LS->getMemoryVT() == MVT::f32 && Subtarget->hasMips4_32r2())
+ return false;
+ if (LS->getMemoryVT() == MVT::f64 && Subtarget->hasMips4_32r2())
+ return false;
+ }
}
Base = Addr;
Offset = CurDAG->getTargetConstant(0, ValTy);
@@ -297,7 +301,7 @@ std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
if (!SDValue(Node, 1).use_empty())
ReplaceUses(SDValue(Node, 1), SDValue(LoHi.second, 0));
- return std::make_pair(true, (SDNode*)NULL);
+ return std::make_pair(true, nullptr);
}
case ISD::MULHS:
@@ -308,7 +312,7 @@ std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
}
}
- return std::make_pair(false, (SDNode*)NULL);
+ return std::make_pair(false, nullptr);
}
FunctionPass *llvm::createMips16ISelDag(MipsTargetMachine &TM) {
diff --git a/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.h b/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.h
index 49dc6e5..e653b39 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.h
@@ -28,16 +28,16 @@ private:
SDValue getMips16SPAliasReg();
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
void getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg);
- virtual bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Alias);
+ bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
+ SDValue &Offset, SDValue &Alias) override;
- virtual std::pair<bool, SDNode*> selectNode(SDNode *Node);
+ std::pair<bool, SDNode*> selectNode(SDNode *Node) override;
- virtual void processFunctionAfterISel(MachineFunction &MF);
+ void processFunctionAfterISel(MachineFunction &MF) override;
// Insert instructions to initialize the global base register in the
// first MBB of the function.
diff --git a/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.cpp b/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.cpp
index 61d8bb8..587925d 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -10,17 +10,20 @@
// Subclass of MipsTargetLowering specialized for mips16.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-lower"
#include "Mips16ISelLowering.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetMachine.h"
-#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include <string>
using namespace llvm;
+#define DEBUG_TYPE "mips-lower"
+
static cl::opt<bool> DontExpandCondPseudos16(
"mips16-dont-expand-cond-pseudo",
cl::init(false),
@@ -115,20 +118,14 @@ static const Mips16IntrinsicHelperType Mips16IntrinsicHelper[] = {
{"truncf", "__mips16_call_stub_sf_1"},
};
-Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
- : MipsTargetLowering(TM) {
- //
- // set up as if mips32 and then revert so we can test the mechanism
- // for switching
- addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
- addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
- computeRegisterProperties();
- clearRegisterClasses();
+Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI)
+ : MipsTargetLowering(TM, STI) {
// Set up the register classes
addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
- if (Subtarget->inMips16HardFloat())
+ if (!TM.Options.UseSoftFloat)
setMips16HardFloatLibCalls();
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
@@ -154,12 +151,15 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
}
const MipsTargetLowering *
-llvm::createMips16TargetLowering(MipsTargetMachine &TM) {
- return new Mips16TargetLowering(TM);
+llvm::createMips16TargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI) {
+ return new Mips16TargetLowering(TM, STI);
}
bool
-Mips16TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
+Mips16TargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned,
+ bool *Fast) const {
return false;
}
@@ -349,7 +349,7 @@ unsigned int Mips16TargetLowering::getMips16HelperFunctionStubNumber
#define T P "0" , T1
#define P P_
static char const * vMips16Helper[MAX_STUB_NUMBER+1] =
- {0, T1 };
+ {nullptr, T1 };
#undef P
#define P P_ "sf_"
static char const * sfMips16Helper[MAX_STUB_NUMBER+1] =
@@ -426,11 +426,10 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
SelectionDAG &DAG = CLI.DAG;
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
- const char* Mips16HelperFunction = 0;
+ const char* Mips16HelperFunction = nullptr;
bool NeedMips16Helper = false;
- if (getTargetMachine().Options.UseSoftFloat &&
- Subtarget->inMips16HardFloat()) {
+ if (Subtarget.inMips16HardFloat()) {
//
// currently we don't have symbols tagged with the mips16 or mips32
// qualifier so we will assume that we don't know what kind it is.
@@ -440,19 +439,40 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) {
Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, S->getSymbol() };
- if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls),
- Find))
+ if (std::binary_search(std::begin(HardFloatLibCalls),
+ std::end(HardFloatLibCalls), Find))
LookupHelper = false;
else {
- Mips16IntrinsicHelperType IntrinsicFind = {S->getSymbol(), ""};
+ const char *Symbol = S->getSymbol();
+ Mips16IntrinsicHelperType IntrinsicFind = { Symbol, "" };
+ const Mips16HardFloatInfo::FuncSignature *Signature =
+ Mips16HardFloatInfo::findFuncSignature(Symbol);
+ if (!IsPICCall && (Signature && (FuncInfo->StubsNeeded.find(Symbol) ==
+ FuncInfo->StubsNeeded.end()))) {
+ FuncInfo->StubsNeeded[Symbol] = Signature;
+ //
+ // S2 is normally saved if the stub is for a function which
+ // returns a float or double value and is not otherwise. This is
+ // because more work is required after the function the stub
+ // is calling completes, and so the stub cannot directly return
+ // and the stub has no stack space to store the return address so
+ // S2 is used for that purpose.
+ // In order to take advantage of not saving S2, we need to also
+ // optimize the call in the stub and this requires some further
+ // functionality in MipsAsmPrinter which we don't have yet.
+ // So for now we always save S2. The optimization will be done
+ // in a follow-on patch.
+ //
+ if (1 || (Signature->RetSig != Mips16HardFloatInfo::NoFPRet))
+ FuncInfo->setSaveS2();
+ }
// one more look at list of intrinsics
- if (std::binary_search(Mips16IntrinsicHelper,
- array_endof(Mips16IntrinsicHelper),
- IntrinsicFind)) {
- const Mips16IntrinsicHelperType *h =(std::find(Mips16IntrinsicHelper,
- array_endof(Mips16IntrinsicHelper),
- IntrinsicFind));
- Mips16HelperFunction = h->Helper;
+ const Mips16IntrinsicHelperType *Helper =
+ std::lower_bound(std::begin(Mips16IntrinsicHelper),
+ std::end(Mips16IntrinsicHelper), IntrinsicFind);
+ if (Helper != std::end(Mips16IntrinsicHelper) &&
+ *Helper == IntrinsicFind) {
+ Mips16HelperFunction = Helper->Helper;
NeedMips16Helper = true;
LookupHelper = false;
}
@@ -463,13 +483,13 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL,
G->getGlobal()->getName().data() };
- if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls),
- Find))
+ if (std::binary_search(std::begin(HardFloatLibCalls),
+ std::end(HardFloatLibCalls), Find))
LookupHelper = false;
}
- if (LookupHelper) Mips16HelperFunction =
- getMips16HelperFunction(CLI.RetTy, CLI.Args, NeedMips16Helper);
-
+ if (LookupHelper)
+ Mips16HelperFunction =
+ getMips16HelperFunction(CLI.RetTy, CLI.getArgs(), NeedMips16Helper);
}
SDValue JumpTarget = Callee;
@@ -524,8 +544,7 @@ emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
@@ -587,8 +606,7 @@ MachineBasicBlock *Mips16TargetLowering::emitSelT16
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
@@ -652,8 +670,7 @@ MachineBasicBlock *Mips16TargetLowering::emitSeliT16
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
@@ -747,8 +764,8 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_CCRX16_ins(
unsigned CC = MI->getOperand(0).getReg();
unsigned regX = MI->getOperand(1).getReg();
unsigned regY = MI->getOperand(2).getReg();
- BuildMI(*BB, MI, MI->getDebugLoc(),
- TII->get(SltOpc)).addReg(regX).addReg(regY);
+ BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(SltOpc)).addReg(regX).addReg(
+ regY);
BuildMI(*BB, MI, MI->getDebugLoc(),
TII->get(Mips::MoveR3216), CC).addReg(Mips::T8);
MI->eraseFromParent(); // The pseudo instruction is gone now.
diff --git a/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.h b/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.h
index 33b953f..e7e4d7f 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16ISelLowering.h
@@ -11,26 +11,28 @@
//
//===----------------------------------------------------------------------===//
-#ifndef Mips16ISELLOWERING_H
-#define Mips16ISELLOWERING_H
+#ifndef MIPS16ISELLOWERING_H
+#define MIPS16ISELLOWERING_H
#include "MipsISelLowering.h"
namespace llvm {
class Mips16TargetLowering : public MipsTargetLowering {
public:
- explicit Mips16TargetLowering(MipsTargetMachine &TM);
+ explicit Mips16TargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI);
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
+ bool *Fast) const override;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const override;
private:
- virtual bool
- isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
- unsigned NextStackOffset,
- const MipsFunctionInfo& FI) const;
+ bool isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
+ unsigned NextStackOffset,
+ const MipsFunctionInfo& FI) const override;
void setMips16HardFloatLibCalls();
@@ -40,11 +42,12 @@ namespace llvm {
const char *getMips16HelperFunction
(Type* RetTy, ArgListTy &Args, bool &needHelper) const;
- virtual void
+ void
getOpndList(SmallVectorImpl<SDValue> &Ops,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
- CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const;
+ CallLoweringInfo &CLI, SDValue Callee,
+ SDValue Chain) const override;
MachineBasicBlock *emitSel16(unsigned Opc, MachineInstr *MI,
MachineBasicBlock *BB) const;
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index 000ea28..4dd9af2 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -1,3 +1,4 @@
+
//===-- Mips16InstrInfo.cpp - Mips16 Instruction Information --------------===//
//
// The LLVM Compiler Infrastructure
@@ -28,17 +29,10 @@
using namespace llvm;
-static cl::opt<bool> NeverUseSaveRestore(
- "mips16-never-use-save-restore",
- cl::init(false),
- cl::desc("For testing ability to adjust stack pointer "
- "without save/restore instruction"),
- cl::Hidden);
-
+#define DEBUG_TYPE "mips16-instrinfo"
-Mips16InstrInfo::Mips16InstrInfo(MipsTargetMachine &tm)
- : MipsInstrInfo(tm, Mips::Bimm16),
- RI(*tm.getSubtargetImpl()) {}
+Mips16InstrInfo::Mips16InstrInfo(const MipsSubtarget &STI)
+ : MipsInstrInfo(STI, Mips::Bimm16), RI(STI) {}
const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
return RI;
@@ -49,9 +43,8 @@ const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned Mips16InstrInfo::
-isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
+unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
return 0;
}
@@ -60,9 +53,8 @@ isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned Mips16InstrInfo::
-isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
-{
+unsigned Mips16InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
return 0;
}
@@ -98,11 +90,12 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MIB.addReg(SrcReg, getKillRegState(KillSrc));
}
-void Mips16InstrInfo::
-storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset) const {
+void Mips16InstrInfo::storeRegToStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ int64_t Offset) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
@@ -115,10 +108,12 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
.addMemOperand(MMO);
}
-void Mips16InstrInfo::
-loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset) const {
+void Mips16InstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ int64_t Offset) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
@@ -175,45 +170,57 @@ unsigned Mips16InstrInfo::getOppositeBranchOpc(unsigned Opc) const {
return 0;
}
+static void addSaveRestoreRegs(MachineInstrBuilder &MIB,
+ const std::vector<CalleeSavedInfo> &CSI,
+ unsigned Flags = 0) {
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ // Add the callee-saved register as live-in. Do not add if the register is
+ // RA and return address is taken, because it has already been added in
+ // method MipsTargetLowering::LowerRETURNADDR.
+ // It's killed at the spill, unless the register is RA and return address
+ // is taken.
+ unsigned Reg = CSI[e-i-1].getReg();
+ switch (Reg) {
+ case Mips::RA:
+ case Mips::S0:
+ case Mips::S1:
+ MIB.addReg(Reg, Flags);
+ break;
+ case Mips::S2:
+ break;
+ default:
+ llvm_unreachable("unexpected mips16 callee saved register");
+
+ }
+ }
+}
// Adjust SP by FrameSize bytes. Save RA, S0, S1
void Mips16InstrInfo::makeFrame(unsigned SP, int64_t FrameSize,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- if (!NeverUseSaveRestore) {
- if (isUInt<11>(FrameSize))
- BuildMI(MBB, I, DL, get(Mips::SaveRaF16)).addImm(FrameSize);
- else {
- int Base = 2040; // should create template function like isUInt that
- // returns largest possible n bit unsigned integer
- int64_t Remainder = FrameSize - Base;
- BuildMI(MBB, I, DL, get(Mips::SaveRaF16)). addImm(Base);
- if (isInt<16>(-Remainder))
- BuildAddiuSpImm(MBB, I, -Remainder);
- else
- adjustStackPtrBig(SP, -Remainder, MBB, I, Mips::V0, Mips::V1);
- }
-
- }
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const BitVector Reserved = RI.getReservedRegs(MF);
+ bool SaveS2 = Reserved[Mips::S2];
+ MachineInstrBuilder MIB;
+ unsigned Opc = ((FrameSize <= 128) && !SaveS2)? Mips::Save16:Mips::SaveX16;
+ MIB = BuildMI(MBB, I, DL, get(Opc));
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ addSaveRestoreRegs(MIB, CSI);
+ if (SaveS2)
+ MIB.addReg(Mips::S2);
+ if (isUInt<11>(FrameSize))
+ MIB.addImm(FrameSize);
else {
- //
- // sw ra, -4[sp]
- // sw s1, -8[sp]
- // sw s0, -12[sp]
-
- MachineInstrBuilder MIB1 = BuildMI(MBB, I, DL, get(Mips::SwRxSpImmX16),
- Mips::RA);
- MIB1.addReg(Mips::SP);
- MIB1.addImm(-4);
- MachineInstrBuilder MIB2 = BuildMI(MBB, I, DL, get(Mips::SwRxSpImmX16),
- Mips::S1);
- MIB2.addReg(Mips::SP);
- MIB2.addImm(-8);
- MachineInstrBuilder MIB3 = BuildMI(MBB, I, DL, get(Mips::SwRxSpImmX16),
- Mips::S0);
- MIB3.addReg(Mips::SP);
- MIB3.addImm(-12);
- adjustStackPtrBig(SP, -FrameSize, MBB, I, Mips::V0, Mips::V1);
+ int Base = 2040; // should create template function like isUInt that
+ // returns largest possible n bit unsigned integer
+ int64_t Remainder = FrameSize - Base;
+ MIB.addImm(Base);
+ if (isInt<16>(-Remainder))
+ BuildAddiuSpImm(MBB, I, -Remainder);
+ else
+ adjustStackPtrBig(SP, -Remainder, MBB, I, Mips::V0, Mips::V1);
}
}
@@ -222,42 +229,31 @@ void Mips16InstrInfo::restoreFrame(unsigned SP, int64_t FrameSize,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- if (!NeverUseSaveRestore) {
- if (isUInt<11>(FrameSize))
- BuildMI(MBB, I, DL, get(Mips::RestoreRaF16)).addImm(FrameSize);
- else {
- int Base = 2040; // should create template function like isUInt that
- // returns largest possible n bit unsigned integer
- int64_t Remainder = FrameSize - Base;
- if (isInt<16>(Remainder))
- BuildAddiuSpImm(MBB, I, Remainder);
- else
- adjustStackPtrBig(SP, Remainder, MBB, I, Mips::A0, Mips::A1);
- BuildMI(MBB, I, DL, get(Mips::RestoreRaF16)). addImm(Base);
- }
+ MachineFunction *MF = MBB.getParent();
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ const BitVector Reserved = RI.getReservedRegs(*MF);
+ bool SaveS2 = Reserved[Mips::S2];
+ MachineInstrBuilder MIB;
+ unsigned Opc = ((FrameSize <= 128) && !SaveS2)?
+ Mips::Restore16:Mips::RestoreX16;
+
+ if (!isUInt<11>(FrameSize)) {
+ unsigned Base = 2040;
+ int64_t Remainder = FrameSize - Base;
+ FrameSize = Base; // should create template function like isUInt that
+ // returns largest possible n bit unsigned integer
+
+ if (isInt<16>(Remainder))
+ BuildAddiuSpImm(MBB, I, Remainder);
+ else
+ adjustStackPtrBig(SP, Remainder, MBB, I, Mips::A0, Mips::A1);
}
- else {
- adjustStackPtrBig(SP, FrameSize, MBB, I, Mips::A0, Mips::A1);
- // lw ra, -4[sp]
- // lw s1, -8[sp]
- // lw s0, -12[sp]
- MachineInstrBuilder MIB1 = BuildMI(MBB, I, DL, get(Mips::LwRxSpImmX16),
- Mips::A0);
- MIB1.addReg(Mips::SP);
- MIB1.addImm(-4);
- MachineInstrBuilder MIB0 = BuildMI(MBB, I, DL, get(Mips::Move32R16),
- Mips::RA);
- MIB0.addReg(Mips::A0);
- MachineInstrBuilder MIB2 = BuildMI(MBB, I, DL, get(Mips::LwRxSpImmX16),
- Mips::S1);
- MIB2.addReg(Mips::SP);
- MIB2.addImm(-8);
- MachineInstrBuilder MIB3 = BuildMI(MBB, I, DL, get(Mips::LwRxSpImmX16),
- Mips::S0);
- MIB3.addReg(Mips::SP);
- MIB3.addImm(-12);
- }
-
+ MIB = BuildMI(MBB, I, DL, get(Opc));
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ addSaveRestoreRegs(MIB, CSI, RegState::Define);
+ if (SaveS2)
+ MIB.addReg(Mips::S2, RegState::Define);
+ MIB.addImm(FrameSize);
}
// Adjust SP by Amount bytes where bytes can be up to 32bit number.
@@ -270,9 +266,6 @@ void Mips16InstrInfo::adjustStackPtrBig(unsigned SP, int64_t Amount,
MachineBasicBlock::iterator I,
unsigned Reg1, unsigned Reg2) const {
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
-// MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
-// unsigned Reg1 = RegInfo.createVirtualRegister(&Mips::CPU16RegsRegClass);
-// unsigned Reg2 = RegInfo.createVirtualRegister(&Mips::CPU16RegsRegClass);
//
// li reg1, constant
// move reg2, sp
@@ -281,7 +274,7 @@ void Mips16InstrInfo::adjustStackPtrBig(unsigned SP, int64_t Amount,
//
//
MachineInstrBuilder MIB1 = BuildMI(MBB, I, DL, get(Mips::LwConstant32), Reg1);
- MIB1.addImm(Amount);
+ MIB1.addImm(Amount).addImm(-1);
MachineInstrBuilder MIB2 = BuildMI(MBB, I, DL, get(Mips::MoveR3216), Reg2);
MIB2.addReg(Mips::SP, RegState::Kill);
MachineInstrBuilder MIB3 = BuildMI(MBB, I, DL, get(Mips::AdduRxRyRz16), Reg1);
@@ -292,9 +285,9 @@ void Mips16InstrInfo::adjustStackPtrBig(unsigned SP, int64_t Amount,
MIB4.addReg(Reg1, RegState::Kill);
}
-void Mips16InstrInfo::adjustStackPtrBigUnrestricted(unsigned SP, int64_t Amount,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+void Mips16InstrInfo::adjustStackPtrBigUnrestricted(
+ unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
assert(false && "adjust stack pointer amount exceeded");
}
@@ -310,11 +303,10 @@ void Mips16InstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
/// This function generates the sequence of instructions needed to get the
/// result of adding register REG and immediate IMM.
-unsigned
-Mips16InstrInfo::loadImmediate(unsigned FrameReg,
- int64_t Imm, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
- unsigned &NewImm) const {
+unsigned Mips16InstrInfo::loadImmediate(unsigned FrameReg, int64_t Imm,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II,
+ DebugLoc DL, unsigned &NewImm) const {
//
// given original instruction is:
// Instr rx, T[offset] where offset is too big.
@@ -350,7 +342,7 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
Candidates.reset(MO.getReg());
}
- //
+
// If the same register was used and defined in an instruction, then
// it will not be in the list of candidates.
//
@@ -359,7 +351,6 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
// present as an operand of the instruction. this tells
// whether the register is live before the instruction. if it's not
// then we don't need to save it in case there are no free registers.
- //
int DefReg = 0;
for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
MachineOperand &MO = II->getOperand(i);
@@ -368,9 +359,8 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
break;
}
}
- //
- BitVector Available = rs.getRegsAvailable(&Mips::CPU16RegsRegClass);
+ BitVector Available = rs.getRegsAvailable(&Mips::CPU16RegsRegClass);
Available &= Candidates;
//
// we use T0 for the first register, if we need to save something away.
@@ -379,7 +369,6 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
unsigned FirstRegSaved =0, SecondRegSaved=0;
unsigned FirstRegSavedTo = 0, SecondRegSavedTo = 0;
-
Reg = Available.find_first();
if (Reg == -1) {
@@ -393,7 +382,7 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
}
else
Available.reset(Reg);
- BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm);
+ BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm).addImm(-1);
NewImm = 0;
if (FrameReg == Mips::SP) {
SpReg = Available.find_first();
@@ -417,7 +406,7 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(FrameReg)
.addReg(Reg, RegState::Kill);
if (FirstRegSaved || SecondRegSaved) {
- II = llvm::next(II);
+ II = std::next(II);
if (FirstRegSaved)
copyPhysReg(MBB, II, DL, FirstRegSaved, FirstRegSavedTo, true);
if (SecondRegSaved)
@@ -426,22 +415,6 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
return Reg;
}
-/// This function generates the sequence of instructions needed to get the
-/// result of adding register REG and immediate IMM.
-unsigned
-Mips16InstrInfo::basicLoadImmediate(
- unsigned FrameReg,
- int64_t Imm, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
- unsigned &NewImm) const {
- const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
- MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
- unsigned Reg = RegInfo.createVirtualRegister(RC);
- BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm);
- NewImm = 0;
- return Reg;
-}
-
unsigned Mips16InstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
return (Opc == Mips::BeqzRxImmX16 || Opc == Mips::BimmX16 ||
Opc == Mips::Bimm16 ||
@@ -463,7 +436,6 @@ void Mips16InstrInfo::ExpandRetRA16(MachineBasicBlock &MBB,
BuildMI(MBB, I, I->getDebugLoc(), get(Opc));
}
-
const MCInstrDesc &Mips16InstrInfo::AddiuSpImm(int64_t Imm) const {
if (validSpImm8(Imm))
return get(Mips::AddiuSpImm16);
@@ -477,8 +449,8 @@ void Mips16InstrInfo::BuildAddiuSpImm
BuildMI(MBB, I, DL, AddiuSpImm(Imm)).addImm(Imm);
}
-const MipsInstrInfo *llvm::createMips16InstrInfo(MipsTargetMachine &TM) {
- return new Mips16InstrInfo(TM);
+const MipsInstrInfo *llvm::createMips16InstrInfo(const MipsSubtarget &STI) {
+ return new Mips16InstrInfo(STI);
}
bool Mips16InstrInfo::validImmediate(unsigned Opcode, unsigned Reg,
@@ -518,7 +490,6 @@ bool Mips16InstrInfo::validImmediate(unsigned Opcode, unsigned Reg,
unsigned Mips16InstrInfo::getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const {
-
// Count the number of instructions in the asm.
bool atInsnStart = true;
unsigned Length = 0;
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
index d9a594b..a004c56 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.h
@@ -23,48 +23,48 @@ class Mips16InstrInfo : public MipsInstrInfo {
const Mips16RegisterInfo RI;
public:
- explicit Mips16InstrInfo(MipsTargetMachine &TM);
+ explicit Mips16InstrInfo(const MipsSubtarget &STI);
- virtual const MipsRegisterInfo &getRegisterInfo() const;
+ const MipsRegisterInfo &getRegisterInfo() const override;
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
- virtual void storeRegToStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
- int64_t Offset) const;
+ void storeRegToStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ int64_t Offset) const override;
- virtual void loadRegFromStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
- int64_t Offset) const;
+ void loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ int64_t Offset) const override;
- virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
- virtual unsigned getOppositeBranchOpc(unsigned Opc) const;
+ unsigned getOppositeBranchOpc(unsigned Opc) const override;
// Adjust SP by FrameSize bytes. Save RA, S0, S1
void makeFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB,
@@ -88,11 +88,6 @@ public:
MachineBasicBlock::iterator II, DebugLoc DL,
unsigned &NewImm) const;
- unsigned basicLoadImmediate(unsigned FrameReg,
- int64_t Imm, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
- unsigned &NewImm) const;
-
static bool validImmediate(unsigned Opcode, unsigned Reg, int64_t Amount);
static bool validSpImm8(int offset) {
@@ -109,9 +104,9 @@ public:
(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, int64_t Imm) const;
unsigned getInlineAsmLength(const char *Str,
- const MCAsmInfo &MAI) const;
+ const MCAsmInfo &MAI) const override;
private:
- virtual unsigned getAnalyzableBrOpc(unsigned Opc) const;
+ unsigned getAnalyzableBrOpc(unsigned Opc) const override;
void ExpandRetRA16(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc) const;
diff --git a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
index 7441c78..5e4eebb 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/Mips16InstrInfo.td
@@ -119,7 +119,18 @@ class FJAL16_ins<bits<1> _X, string asmstr,
!strconcat(asmstr, "\t$imm\n\tnop"),[],
itin> {
let isCodeGenOnly=1;
+ let Size=6;
}
+
+class FJALB16_ins<bits<1> _X, string asmstr,
+ InstrItinClass itin>:
+ FJAL16<_X, (outs), (ins simm20:$imm),
+ !strconcat(asmstr, "\t$imm\t# branch\n\tnop"),[],
+ itin> {
+ let isCodeGenOnly=1;
+ let Size=6;
+}
+
//
// EXT-I instruction format
//
@@ -289,7 +300,7 @@ class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>:
//
// This are pseudo formats for multiply
-// This first one can be changed to non pseudo now.
+// This first one can be changed to non-pseudo now.
//
// MULT
//
@@ -734,6 +745,13 @@ def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> {
def Jal16 : FJAL16_ins<0b0, "jal", IIAlu> {
let hasDelaySlot = 0; // not true, but we add the nop for now
let isCall=1;
+ let Defs = [RA];
+}
+
+def JalB16 : FJALB16_ins<0b0, "jal", IIAlu>, branch16 {
+ let hasDelaySlot = 0; // not true, but we add the nop for now
+ let isBranch=1;
+ let Defs = [RA];
}
//
@@ -769,7 +787,7 @@ def JrcRx16: FRR16_JALRC_ins<1, 1, 0, "jrc", IIAlu> {
// Purpose: Load Byte (Extended)
// To load a byte from memory as a signed value.
//
-def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IILoad>, MayLoad{
+def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, II_LB>, MayLoad{
let isCodeGenOnly = 1;
}
@@ -779,7 +797,7 @@ def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IILoad>, MayLoad{
// To load a byte from memory as a unsigned value.
//
def LbuRxRyOffMemX16:
- FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, IILoad>, MayLoad {
+ FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, II_LBU>, MayLoad {
let isCodeGenOnly = 1;
}
@@ -788,7 +806,7 @@ def LbuRxRyOffMemX16:
// Purpose: Load Halfword signed (Extended)
// To load a halfword from memory as a signed value.
//
-def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IILoad>, MayLoad{
+def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, II_LH>, MayLoad{
let isCodeGenOnly = 1;
}
@@ -798,7 +816,7 @@ def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IILoad>, MayLoad{
// To load a halfword from memory as an unsigned value.
//
def LhuRxRyOffMemX16:
- FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, IILoad>, MayLoad {
+ FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, II_LHU>, MayLoad {
let isCodeGenOnly = 1;
}
@@ -825,7 +843,7 @@ def LiRxImmAlignX16: FEXT_RI16_ins<0b01101, ".align 2\n\tli", IIAlu> {
// Purpose: Load Word (Extended)
// To load a word from memory as a signed value.
//
-def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IILoad>, MayLoad{
+def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, II_LW>, MayLoad{
let isCodeGenOnly = 1;
}
@@ -833,13 +851,13 @@ def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IILoad>, MayLoad{
// Purpose: Load Word (SP-Relative, Extended)
// To load an SP-relative word from memory as a signed value.
//
-def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10010, "lw", IILoad>, MayLoad{
+def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10010, "lw", II_LW>, MayLoad{
let Uses = [SP];
}
-def LwRxPcTcp16: FRI16_TCP_ins<0b10110, "lw", IILoad>, MayLoad;
+def LwRxPcTcp16: FRI16_TCP_ins<0b10110, "lw", II_LW>, MayLoad;
-def LwRxPcTcpX16: FEXT_RI16_TCP_ins<0b10110, "lw", IILoad>, MayLoad;
+def LwRxPcTcpX16: FEXT_RI16_TCP_ins<0b10110, "lw", II_LW>, MayLoad;
//
// Format: MOVE r32, rz MIPS16e
// Purpose: Move
@@ -941,26 +959,18 @@ def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>;
// stack
//
-// fixed form for restoring RA and the frame
-// for direct object emitter, encoding needs to be adjusted for the
-// frame size
-//
-let ra=1, s=0,s0=1,s1=1 in
-def RestoreRaF16:
- FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "restore\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IILoad >, MayLoad {
+def Restore16:
+ FI8_SVRS16<0b1, (outs), (ins variable_ops),
+ "", [], II_RESTORE >, MayLoad {
let isCodeGenOnly = 1;
- let Defs = [S0, S1, S2, RA, SP];
+ let Defs = [SP];
let Uses = [SP];
}
-// Use Restore to increment SP since SP is not a Mip 16 register, this
-// is an easy way to do that which does not require a register.
-//
-let ra=0, s=0,s0=0,s1=0 in
-def RestoreIncSpF16:
- FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "restore\t$frame_size", [], IILoad >, MayLoad {
+
+def RestoreX16:
+ FI8_SVRS16<0b1, (outs), (ins variable_ops),
+ "", [], II_RESTORE >, MayLoad {
let isCodeGenOnly = 1;
let Defs = [SP];
let Uses = [SP];
@@ -973,23 +983,17 @@ def RestoreIncSpF16:
// To set up a stack frame on entry to a subroutine,
// saving return address and static registers, and adjusting stack
//
-let ra=1, s=1,s0=1,s1=1 in
-def SaveRaF16:
- FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "save\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IIStore >, MayStore {
+def Save16:
+ FI8_SVRS16<0b1, (outs), (ins variable_ops),
+ "", [], II_SAVE >, MayStore {
let isCodeGenOnly = 1;
- let Uses = [RA, SP, S0, S1, S2];
+ let Uses = [SP];
let Defs = [SP];
}
-//
-// Use Save to decrement the SP by a constant since SP is not
-// a Mips16 register.
-//
-let ra=0, s=0,s0=0,s1=0 in
-def SaveDecSpF16:
- FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "save\t$frame_size", [], IIStore >, MayStore {
+def SaveX16:
+ FI8_SVRS16<0b1, (outs), (ins variable_ops),
+ "", [], II_SAVE >, MayStore {
let isCodeGenOnly = 1;
let Uses = [SP];
let Defs = [SP];
@@ -1000,7 +1004,7 @@ def SaveDecSpF16:
// To store a byte to memory.
//
def SbRxRyOffMemX16:
- FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIStore>, MayStore;
+ FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, II_SB>, MayStore;
//
// Format: SEB rx MIPS16e
@@ -1138,7 +1142,7 @@ def SelTBtneZSltiu: SeliT<"btnez", "sltiu">;
// To store a halfword to memory.
//
def ShRxRyOffMemX16:
- FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, IIStore>, MayStore;
+ FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, II_SH>, MayStore;
//
// Format: SLL rx, ry, sa MIPS16e
@@ -1274,7 +1278,7 @@ def SubuRxRyRz16: FRRR16_ins<0b11, "subu", IIAlu>, ArithLogic16Defs<0>;
// To store a word to memory.
//
def SwRxRyOffMemX16:
- FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, IIStore>, MayStore;
+ FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, II_SW>, MayStore;
//
// Format: SW rx, offset(sp) MIPS16e
@@ -1282,7 +1286,7 @@ def SwRxRyOffMemX16:
// To store an SP-relative word to memory.
//
def SwRxSpImmX16: FEXT_RI16_SP_Store_explicit_ins
- <0b11010, "sw", IIStore>, MayStore;
+ <0b11010, "sw", II_SW>, MayStore;
//
//
@@ -1366,15 +1370,19 @@ def : Mips16Pat<(MipsJmpLink (i32 texternalsym:$dst)),
(Jal16 texternalsym:$dst)>;
// Indirect branch
-def: Mips16Pat<
- (brind CPU16Regs:$rs),
- (JrcRx16 CPU16Regs:$rs)>;
+def: Mips16Pat<(brind CPU16Regs:$rs), (JrcRx16 CPU16Regs:$rs)> {
+ // Ensure that the addition of MIPS32r6/MIPS64r6 support does not change
+ // MIPS16's behaviour.
+ let AddedComplexity = 1;
+}
// Jump and Link (Call)
let isCall=1, hasDelaySlot=0 in
def JumpLinkReg16:
FRR16_JALRC<0, 0, 0, (outs), (ins CPU16Regs:$rs),
- "jalrc \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch>;
+ "jalrc \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch> {
+ let Defs = [RA];
+}
// Mips16 pseudos
let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1,
@@ -1890,7 +1898,7 @@ def GotPrologue16:
MipsPseudo16<
(outs CPU16Regs:$rh, CPU16Regs:$rl),
(ins simm16:$immHi, simm16:$immLo),
- ".align 2\n\tli\t$rh, $immHi\n\taddiu\t$rl, $$pc, $immLo\n ",[]> ;
+ "li\t$rh, $immHi\n\taddiu\t$rl, $$pc, $immLo\n ",[]> ;
// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
def cpinst_operand : Operand<i32> {
diff --git a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
index 9d0f2c9..dbee774 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "Mips16RegisterInfo.h"
-#include "Mips16InstrInfo.h"
#include "Mips.h"
#include "Mips16InstrInfo.h"
#include "MipsAnalyzeImmediate.h"
@@ -25,9 +24,8 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
@@ -41,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mips16-registerinfo"
+
Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST)
: MipsRegisterInfo(ST) {}
diff --git a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
index 13e82a3..f59f1a7 100644
--- a/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/Mips16RegisterInfo.h
@@ -23,24 +23,24 @@ class Mips16RegisterInfo : public MipsRegisterInfo {
public:
Mips16RegisterInfo(const MipsSubtarget &Subtarget);
- bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
- bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
- bool useFPForScavengingIndex(const MachineFunction &MF) const;
+ bool useFPForScavengingIndex(const MachineFunction &MF) const override;
bool saveScavengerRegister(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &UseMI,
const TargetRegisterClass *RC,
- unsigned Reg) const;
+ unsigned Reg) const override;
- virtual const TargetRegisterClass *intRegClass(unsigned Size) const;
+ const TargetRegisterClass *intRegClass(unsigned Size) const override;
private:
- virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
- int FrameIndex, uint64_t StackSize,
- int64_t SPOffset) const;
+ void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
+ int FrameIndex, uint64_t StackSize,
+ int64_t SPOffset) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/Mips32r6InstrFormats.td b/contrib/llvm/lib/Target/Mips/Mips32r6InstrFormats.td
new file mode 100644
index 0000000..e4ec96a
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips32r6InstrFormats.td
@@ -0,0 +1,543 @@
+//=- Mips32r6InstrFormats.td - Mips32r6 Instruction Formats -*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips32r6 instruction formats.
+//
+//===----------------------------------------------------------------------===//
+
+class MipsR6Inst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther>,
+ PredicateControl {
+ let DecoderNamespace = "Mips32r6_64r6";
+ let EncodingPredicates = [HasStdEnc];
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Field Values
+//
+//===----------------------------------------------------------------------===//
+
+class OPGROUP<bits<6> Val> {
+ bits<6> Value = Val;
+}
+def OPGROUP_COP1 : OPGROUP<0b010001>;
+def OPGROUP_COP2 : OPGROUP<0b010010>;
+def OPGROUP_ADDI : OPGROUP<0b001000>;
+def OPGROUP_AUI : OPGROUP<0b001111>;
+def OPGROUP_BLEZ : OPGROUP<0b000110>;
+def OPGROUP_BGTZ : OPGROUP<0b000111>;
+def OPGROUP_BLEZL : OPGROUP<0b010110>;
+def OPGROUP_BGTZL : OPGROUP<0b010111>;
+def OPGROUP_DADDI : OPGROUP<0b011000>;
+def OPGROUP_DAUI : OPGROUP<0b011101>;
+def OPGROUP_PCREL : OPGROUP<0b111011>;
+def OPGROUP_REGIMM : OPGROUP<0b000001>;
+def OPGROUP_SPECIAL : OPGROUP<0b000000>;
+// The spec occasionally names this value LL, LLD, SC, or SCD.
+def OPGROUP_SPECIAL3 : OPGROUP<0b011111>;
+// The spec names this constant LWC2, LDC2, SWC2, and SDC2 in different places.
+def OPGROUP_COP2LDST : OPGROUP<0b010010>;
+
+class OPCODE2<bits<2> Val> {
+ bits<2> Value = Val;
+}
+def OPCODE2_ADDIUPC : OPCODE2<0b00>;
+def OPCODE2_LWPC : OPCODE2<0b01>;
+def OPCODE2_LWUPC : OPCODE2<0b10>;
+
+class OPCODE3<bits<3> Val> {
+ bits<3> Value = Val;
+}
+def OPCODE3_LDPC : OPCODE3<0b110>;
+
+class OPCODE5<bits<5> Val> {
+ bits<5> Value = Val;
+}
+def OPCODE5_ALUIPC : OPCODE5<0b11111>;
+def OPCODE5_AUIPC : OPCODE5<0b11110>;
+def OPCODE5_DAHI : OPCODE5<0b00110>;
+def OPCODE5_DATI : OPCODE5<0b11110>;
+def OPCODE5_BC1EQZ : OPCODE5<0b01001>;
+def OPCODE5_BC1NEZ : OPCODE5<0b01101>;
+def OPCODE5_BC2EQZ : OPCODE5<0b01001>;
+def OPCODE5_BC2NEZ : OPCODE5<0b01101>;
+def OPCODE5_BGEZAL : OPCODE5<0b10001>;
+// The next four constants are unnamed in the spec. These names are taken from
+// the OPGROUP names they are used with.
+def OPCODE5_LDC2 : OPCODE5<0b01110>;
+def OPCODE5_LWC2 : OPCODE5<0b01010>;
+def OPCODE5_SDC2 : OPCODE5<0b01111>;
+def OPCODE5_SWC2 : OPCODE5<0b01011>;
+
+class OPCODE6<bits<6> Val> {
+ bits<6> Value = Val;
+}
+def OPCODE6_ALIGN : OPCODE6<0b100000>;
+def OPCODE6_DALIGN : OPCODE6<0b100100>;
+def OPCODE6_BITSWAP : OPCODE6<0b100000>;
+def OPCODE6_DBITSWAP : OPCODE6<0b100100>;
+def OPCODE6_JALR : OPCODE6<0b001001>;
+def OPCODE6_CACHE : OPCODE6<0b100101>;
+def OPCODE6_PREF : OPCODE6<0b110101>;
+// The next four constants are unnamed in the spec. These names are taken from
+// the OPGROUP names they are used with.
+def OPCODE6_LL : OPCODE6<0b110110>;
+def OPCODE6_LLD : OPCODE6<0b110111>;
+def OPCODE6_SC : OPCODE6<0b100110>;
+def OPCODE6_SCD : OPCODE6<0b100111>;
+def OPCODE6_CLO : OPCODE6<0b010001>;
+def OPCODE6_CLZ : OPCODE6<0b010000>;
+def OPCODE6_DCLO : OPCODE6<0b010011>;
+def OPCODE6_DCLZ : OPCODE6<0b010010>;
+def OPCODE6_LSA : OPCODE6<0b000101>;
+def OPCODE6_DLSA : OPCODE6<0b010101>;
+def OPCODE6_SDBBP : OPCODE6<0b001110>;
+
+class FIELD_FMT<bits<5> Val> {
+ bits<5> Value = Val;
+}
+def FIELD_FMT_S : FIELD_FMT<0b10000>;
+def FIELD_FMT_D : FIELD_FMT<0b10001>;
+
+class FIELD_CMP_COND<bits<5> Val> {
+ bits<5> Value = Val;
+}
+// Note: The CMP_COND_FMT names differ from the C_COND_FMT names.
+def FIELD_CMP_COND_AF : FIELD_CMP_COND<0b00000>;
+def FIELD_CMP_COND_UN : FIELD_CMP_COND<0b00001>;
+def FIELD_CMP_COND_EQ : FIELD_CMP_COND<0b00010>;
+def FIELD_CMP_COND_UEQ : FIELD_CMP_COND<0b00011>;
+def FIELD_CMP_COND_LT : FIELD_CMP_COND<0b00100>;
+def FIELD_CMP_COND_ULT : FIELD_CMP_COND<0b00101>;
+def FIELD_CMP_COND_LE : FIELD_CMP_COND<0b00110>;
+def FIELD_CMP_COND_ULE : FIELD_CMP_COND<0b00111>;
+def FIELD_CMP_COND_SAF : FIELD_CMP_COND<0b01000>;
+def FIELD_CMP_COND_SUN : FIELD_CMP_COND<0b01001>;
+def FIELD_CMP_COND_SEQ : FIELD_CMP_COND<0b01010>;
+def FIELD_CMP_COND_SUEQ : FIELD_CMP_COND<0b01011>;
+def FIELD_CMP_COND_SLT : FIELD_CMP_COND<0b01100>;
+def FIELD_CMP_COND_SULT : FIELD_CMP_COND<0b01101>;
+def FIELD_CMP_COND_SLE : FIELD_CMP_COND<0b01110>;
+def FIELD_CMP_COND_SULE : FIELD_CMP_COND<0b01111>;
+
+class FIELD_CMP_FORMAT<bits<5> Val> {
+ bits<5> Value = Val;
+}
+def FIELD_CMP_FORMAT_S : FIELD_CMP_FORMAT<0b10100>;
+def FIELD_CMP_FORMAT_D : FIELD_CMP_FORMAT<0b10101>;
+
+//===----------------------------------------------------------------------===//
+//
+// Disambiguators
+//
+//===----------------------------------------------------------------------===//
+//
+// Some encodings are ambiguous except by comparing field values.
+
+class DecodeDisambiguates<string Name> {
+ string DecoderMethod = !strconcat("Decode", Name);
+}
+
+class DecodeDisambiguatedBy<string Name> : DecodeDisambiguates<Name> {
+ string DecoderNamespace = "Mips32r6_64r6_Ambiguous";
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Encoding Formats
+//
+//===----------------------------------------------------------------------===//
+
+class AUI_FM : MipsR6Inst {
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_AUI.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-0} = imm;
+}
+
+class DAUI_FM : AUI_FM {
+ let Inst{31-26} = OPGROUP_DAUI.Value;
+}
+
+class BAL_FM : MipsR6Inst {
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_REGIMM.Value;
+ let Inst{25-21} = 0b00000;
+ let Inst{20-16} = OPCODE5_BGEZAL.Value;
+ let Inst{15-0} = offset;
+}
+
+class COP1_2R_FM<bits<6> funct, FIELD_FMT Format> : MipsR6Inst {
+ bits<5> fs;
+ bits<5> fd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_COP1.Value;
+ let Inst{25-21} = Format.Value;
+ let Inst{20-16} = 0b00000;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = fd;
+ let Inst{5-0} = funct;
+}
+
+class COP1_3R_FM<bits<6> funct, FIELD_FMT Format> : MipsR6Inst {
+ bits<5> ft;
+ bits<5> fs;
+ bits<5> fd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_COP1.Value;
+ let Inst{25-21} = Format.Value;
+ let Inst{20-16} = ft;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = fd;
+ let Inst{5-0} = funct;
+}
+
+class COP1_BCCZ_FM<OPCODE5 Operation> : MipsR6Inst {
+ bits<5> ft;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_COP1.Value;
+ let Inst{25-21} = Operation.Value;
+ let Inst{20-16} = ft;
+ let Inst{15-0} = offset;
+}
+
+class COP2_BCCZ_FM<OPCODE5 Operation> : MipsR6Inst {
+ bits<5> ct;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_COP2.Value;
+ let Inst{25-21} = Operation.Value;
+ let Inst{20-16} = ct;
+ let Inst{15-0} = offset;
+}
+
+class PCREL16_FM<OPCODE5 Operation> : MipsR6Inst {
+ bits<5> rs;
+ bits<16> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_PCREL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = Operation.Value;
+ let Inst{15-0} = imm;
+}
+
+class PCREL19_FM<OPCODE2 Operation> : MipsR6Inst {
+ bits<5> rs;
+ bits<19> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_PCREL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-19} = Operation.Value;
+ let Inst{18-0} = imm;
+}
+
+class PCREL18_FM<OPCODE3 Operation> : MipsR6Inst {
+ bits<5> rs;
+ bits<18> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_PCREL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-18} = Operation.Value;
+ let Inst{17-0} = imm;
+}
+
+class SPECIAL3_2R_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rd;
+ bits<5> rt;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL3.Value;
+ let Inst{25-21} = 0b00000;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0b00000;
+ let Inst{5-0} = Operation.Value;
+}
+
+class SPECIAL3_MEM_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<21> addr;
+ bits<5> hint;
+ bits<5> base = addr{20-16};
+ bits<9> offset = addr{8-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL3.Value;
+ let Inst{25-21} = base;
+ let Inst{20-16} = hint;
+ let Inst{15-7} = offset;
+ let Inst{6} = 0;
+ let Inst{5-0} = Operation.Value;
+}
+
+class SPECIAL_2R_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rd;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0b00000;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0b00001;
+ let Inst{5-0} = Operation.Value;
+}
+
+class SPECIAL_3R_FM<bits<5> mulop, bits<6> funct> : MipsR6Inst {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = mulop;
+ let Inst{5-0} = funct;
+}
+
+class SPECIAL_SDBBP_FM : MipsR6Inst {
+ bits<20> code_;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL.Value;
+ let Inst{25-6} = code_;
+ let Inst{5-0} = OPCODE6_SDBBP.Value;
+}
+
+// This class is ambiguous with other branches:
+// BEQC/BNEC require that rs > rt
+class CMP_BRANCH_2R_OFF16_FM<OPGROUP funct> : MipsR6Inst {
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-0} = offset;
+}
+
+// This class is ambiguous with other branches:
+// BLEZC/BGEZC/BEQZALC/BNEZALC/BGTZALC require that rs == 0 && rt != 0
+// The '1R_RT' in the name means 1 register in the rt field.
+class CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP funct> : MipsR6Inst {
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct.Value;
+ let Inst{25-21} = 0b00000;
+ let Inst{20-16} = rt;
+ let Inst{15-0} = offset;
+}
+
+// This class is ambiguous with other branches:
+// BLTZC/BGTZC/BLTZALC/BGEZALC require that rs == rt && rt != 0
+// The '1R_BOTH' in the name means 1 register in both the rs and rt fields.
+class CMP_BRANCH_1R_BOTH_OFF16_FM<OPGROUP funct> : MipsR6Inst {
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct.Value;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rt;
+ let Inst{15-0} = offset;
+}
+
+class CMP_BRANCH_OFF21_FM<bits<6> funct> : MipsR6Inst {
+ bits<5> rs; // rs != 0
+ bits<21> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct;
+ let Inst{25-21} = rs;
+ let Inst{20-0} = offset;
+}
+
+class JMP_IDX_COMPACT_FM<bits<6> funct> : MipsR6Inst {
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct;
+ let Inst{25-21} = 0b000000;
+ let Inst{20-16} = rt;
+ let Inst{15-0} = offset;
+}
+
+class BRANCH_OFF26_FM<bits<6> funct> : MipsR6Inst {
+ bits<32> Inst;
+ bits<26> offset;
+
+ let Inst{31-26} = funct;
+ let Inst{25-0} = offset;
+}
+
+class SPECIAL3_ALIGN_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<2> bp;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL3.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-8} = 0b010;
+ let Inst{7-6} = bp;
+ let Inst{5-0} = Operation.Value;
+}
+
+class SPECIAL3_DALIGN_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<3> bp;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL3.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-9} = 0b01;
+ let Inst{8-6} = bp;
+ let Inst{5-0} = Operation.Value;
+}
+
+class SPECIAL3_LL_SC_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rt;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<9> offset = addr{8-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL3.Value;
+ let Inst{25-21} = base;
+ let Inst{20-16} = rt;
+ let Inst{15-7} = offset;
+ let Inst{5-0} = Operation.Value;
+
+ string DecoderMethod = "DecodeSpecial3LlSc";
+}
+
+class SPECIAL_LSA_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<2> imm2;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-8} = 0b000;
+ let Inst{7-6} = imm2;
+ let Inst{5-0} = Operation.Value;
+}
+
+class REGIMM_FM<OPCODE5 Operation> : MipsR6Inst {
+ bits<5> rs;
+ bits<16> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_REGIMM.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = Operation.Value;
+ let Inst{15-0} = imm;
+}
+
+class COP1_CMP_CONDN_FM<FIELD_CMP_FORMAT Format,
+ FIELD_CMP_COND Cond> : MipsR6Inst {
+ bits<5> fd;
+ bits<5> fs;
+ bits<5> ft;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_COP1.Value;
+ let Inst{25-21} = Format.Value;
+ let Inst{20-16} = ft;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = fd;
+ let Inst{5} = 0;
+ let Inst{4-0} = Cond.Value;
+}
+
+class JR_HB_R6_FM<OPCODE6 Operation> : MipsR6Inst {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_SPECIAL.Value;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0;
+ let Inst{15-11} = 0;
+ let Inst{10} = 1;
+ let Inst{9-6} = 0;
+ let Inst{5-0} = Operation.Value;
+}
+
+class COP2LDST_FM<OPCODE5 Operation> : MipsR6Inst {
+ bits<5> rt;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<11> offset = addr{10-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = OPGROUP_COP2LDST.Value;
+ let Inst{25-21} = Operation.Value;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = base;
+ let Inst{10-0} = offset;
+}
diff --git a/contrib/llvm/lib/Target/Mips/Mips32r6InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips32r6InstrInfo.td
new file mode 100644
index 0000000..6d6735b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips32r6InstrInfo.td
@@ -0,0 +1,824 @@
+//=- Mips32r6InstrInfo.td - Mips32r6 Instruction Information -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips32r6 instructions.
+//
+//===----------------------------------------------------------------------===//
+
+include "Mips32r6InstrFormats.td"
+
+// Notes about removals/changes from MIPS32r6:
+// Reencoded: jr -> jalr
+// Reencoded: jr.hb -> jalr.hb
+
+def brtarget21 : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTarget21OpValue";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTarget21";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+def brtarget26 : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTarget26OpValue";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTarget26";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+def jmpoffset16 : Operand<OtherVT> {
+ let EncoderMethod = "getJumpOffset16OpValue";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+def calloffset16 : Operand<iPTR> {
+ let EncoderMethod = "getJumpOffset16OpValue";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Encodings
+//
+//===----------------------------------------------------------------------===//
+
+class ADDIUPC_ENC : PCREL19_FM<OPCODE2_ADDIUPC>;
+class ALIGN_ENC : SPECIAL3_ALIGN_FM<OPCODE6_ALIGN>;
+class ALUIPC_ENC : PCREL16_FM<OPCODE5_ALUIPC>;
+class AUI_ENC : AUI_FM;
+class AUIPC_ENC : PCREL16_FM<OPCODE5_AUIPC>;
+
+class BAL_ENC : BAL_FM;
+class BALC_ENC : BRANCH_OFF26_FM<0b111010>;
+class BC_ENC : BRANCH_OFF26_FM<0b110010>;
+class BEQC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_ADDI>,
+ DecodeDisambiguates<"AddiGroupBranch">;
+class BEQZALC_ENC : CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP_ADDI>,
+ DecodeDisambiguatedBy<"DaddiGroupBranch">;
+class BNEC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_DADDI>,
+ DecodeDisambiguates<"DaddiGroupBranch">;
+class BNEZALC_ENC : CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP_DADDI>,
+ DecodeDisambiguatedBy<"DaddiGroupBranch">;
+
+class BLTZC_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM<OPGROUP_BGTZL>,
+ DecodeDisambiguates<"BgtzlGroupBranch">;
+class BGEC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_BLEZL>,
+ DecodeDisambiguatedBy<"BlezlGroupBranch">;
+class BGEUC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_BLEZ>,
+ DecodeDisambiguatedBy<"BlezGroupBranch">;
+class BGEZC_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM<OPGROUP_BLEZL>,
+ DecodeDisambiguates<"BlezlGroupBranch">;
+class BGTZALC_ENC : CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP_BGTZ>,
+ DecodeDisambiguatedBy<"BgtzGroupBranch">;
+
+class BLTC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_BGTZL>,
+ DecodeDisambiguatedBy<"BgtzlGroupBranch">;
+class BLTUC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_BGTZ>,
+ DecodeDisambiguatedBy<"BgtzGroupBranch">;
+
+class BLEZC_ENC : CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP_BLEZL>,
+ DecodeDisambiguatedBy<"BlezlGroupBranch">;
+class BLTZALC_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM<OPGROUP_BGTZ>,
+ DecodeDisambiguates<"BgtzGroupBranch">;
+class BGTZC_ENC : CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP_BGTZL>,
+ DecodeDisambiguatedBy<"BgtzlGroupBranch">;
+
+class BEQZC_ENC : CMP_BRANCH_OFF21_FM<0b110110>;
+class BGEZALC_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM<OPGROUP_BLEZ>,
+ DecodeDisambiguates<"BlezGroupBranch">;
+class BNEZC_ENC : CMP_BRANCH_OFF21_FM<0b111110>;
+
+class BC1EQZ_ENC : COP1_BCCZ_FM<OPCODE5_BC1EQZ>;
+class BC1NEZ_ENC : COP1_BCCZ_FM<OPCODE5_BC1NEZ>;
+class BC2EQZ_ENC : COP2_BCCZ_FM<OPCODE5_BC2EQZ>;
+class BC2NEZ_ENC : COP2_BCCZ_FM<OPCODE5_BC2NEZ>;
+
+class JIALC_ENC : JMP_IDX_COMPACT_FM<0b111110>;
+class JIC_ENC : JMP_IDX_COMPACT_FM<0b110110>;
+class JR_HB_R6_ENC : JR_HB_R6_FM<OPCODE6_JALR>;
+class BITSWAP_ENC : SPECIAL3_2R_FM<OPCODE6_BITSWAP>;
+class BLEZALC_ENC : CMP_BRANCH_1R_RT_OFF16_FM<OPGROUP_BLEZ>,
+ DecodeDisambiguatedBy<"BlezGroupBranch">;
+class BNVC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_DADDI>,
+ DecodeDisambiguatedBy<"DaddiGroupBranch">;
+class BOVC_ENC : CMP_BRANCH_2R_OFF16_FM<OPGROUP_ADDI>,
+ DecodeDisambiguatedBy<"AddiGroupBranch">;
+class DIV_ENC : SPECIAL_3R_FM<0b00010, 0b011010>;
+class DIVU_ENC : SPECIAL_3R_FM<0b00010, 0b011011>;
+class MOD_ENC : SPECIAL_3R_FM<0b00011, 0b011010>;
+class MODU_ENC : SPECIAL_3R_FM<0b00011, 0b011011>;
+class MUH_ENC : SPECIAL_3R_FM<0b00011, 0b011000>;
+class MUHU_ENC : SPECIAL_3R_FM<0b00011, 0b011001>;
+class MUL_R6_ENC : SPECIAL_3R_FM<0b00010, 0b011000>;
+class MULU_ENC : SPECIAL_3R_FM<0b00010, 0b011001>;
+
+class MADDF_S_ENC : COP1_3R_FM<0b011000, FIELD_FMT_S>;
+class MADDF_D_ENC : COP1_3R_FM<0b011000, FIELD_FMT_D>;
+class MSUBF_S_ENC : COP1_3R_FM<0b011001, FIELD_FMT_S>;
+class MSUBF_D_ENC : COP1_3R_FM<0b011001, FIELD_FMT_D>;
+
+class SEL_D_ENC : COP1_3R_FM<0b010000, FIELD_FMT_D>;
+class SEL_S_ENC : COP1_3R_FM<0b010000, FIELD_FMT_S>;
+
+class SELEQZ_ENC : SPECIAL_3R_FM<0b00000, 0b110101>;
+class SELNEZ_ENC : SPECIAL_3R_FM<0b00000, 0b110111>;
+
+class LWPC_ENC : PCREL19_FM<OPCODE2_LWPC>;
+class LWUPC_ENC : PCREL19_FM<OPCODE2_LWUPC>;
+
+class MAX_S_ENC : COP1_3R_FM<0b011101, FIELD_FMT_S>;
+class MAX_D_ENC : COP1_3R_FM<0b011101, FIELD_FMT_D>;
+class MIN_S_ENC : COP1_3R_FM<0b011100, FIELD_FMT_S>;
+class MIN_D_ENC : COP1_3R_FM<0b011100, FIELD_FMT_D>;
+
+class MAXA_S_ENC : COP1_3R_FM<0b011111, FIELD_FMT_S>;
+class MAXA_D_ENC : COP1_3R_FM<0b011111, FIELD_FMT_D>;
+class MINA_S_ENC : COP1_3R_FM<0b011110, FIELD_FMT_S>;
+class MINA_D_ENC : COP1_3R_FM<0b011110, FIELD_FMT_D>;
+
+class SELEQZ_S_ENC : COP1_3R_FM<0b010100, FIELD_FMT_S>;
+class SELEQZ_D_ENC : COP1_3R_FM<0b010100, FIELD_FMT_D>;
+class SELNEZ_S_ENC : COP1_3R_FM<0b010111, FIELD_FMT_S>;
+class SELNEZ_D_ENC : COP1_3R_FM<0b010111, FIELD_FMT_D>;
+
+class RINT_S_ENC : COP1_2R_FM<0b011010, FIELD_FMT_S>;
+class RINT_D_ENC : COP1_2R_FM<0b011010, FIELD_FMT_D>;
+class CLASS_S_ENC : COP1_2R_FM<0b011011, FIELD_FMT_S>;
+class CLASS_D_ENC : COP1_2R_FM<0b011011, FIELD_FMT_D>;
+
+class CACHE_ENC : SPECIAL3_MEM_FM<OPCODE6_CACHE>;
+class PREF_ENC : SPECIAL3_MEM_FM<OPCODE6_PREF>;
+
+class LDC2_R6_ENC : COP2LDST_FM<OPCODE5_LDC2>;
+class LWC2_R6_ENC : COP2LDST_FM<OPCODE5_LWC2>;
+class SDC2_R6_ENC : COP2LDST_FM<OPCODE5_SDC2>;
+class SWC2_R6_ENC : COP2LDST_FM<OPCODE5_SWC2>;
+
+class LSA_R6_ENC : SPECIAL_LSA_FM<OPCODE6_LSA>;
+
+class LL_R6_ENC : SPECIAL3_LL_SC_FM<OPCODE6_LL>;
+class SC_R6_ENC : SPECIAL3_LL_SC_FM<OPCODE6_SC>;
+
+class CLO_R6_ENC : SPECIAL_2R_FM<OPCODE6_CLO>;
+class CLZ_R6_ENC : SPECIAL_2R_FM<OPCODE6_CLZ>;
+
+class SDBBP_R6_ENC : SPECIAL_SDBBP_FM;
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Multiclasses
+//
+//===----------------------------------------------------------------------===//
+
+class CMP_CONDN_DESC_BASE<string CondStr, string Typestr,
+ RegisterOperand FGROpnd,
+ SDPatternOperator Op = null_frag> {
+ dag OutOperandList = (outs FGRCCOpnd:$fd);
+ dag InOperandList = (ins FGROpnd:$fs, FGROpnd:$ft);
+ string AsmString = !strconcat("cmp.", CondStr, ".", Typestr, "\t$fd, $fs, $ft");
+ list<dag> Pattern = [(set FGRCCOpnd:$fd, (Op FGROpnd:$fs, FGROpnd:$ft))];
+}
+
+multiclass CMP_CC_M <FIELD_CMP_FORMAT Format, string Typestr,
+ RegisterOperand FGROpnd>{
+ def CMP_F_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_AF>,
+ CMP_CONDN_DESC_BASE<"af", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_UN_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_UN>,
+ CMP_CONDN_DESC_BASE<"un", Typestr, FGROpnd, setuo>,
+ ISA_MIPS32R6;
+ def CMP_EQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_EQ>,
+ CMP_CONDN_DESC_BASE<"eq", Typestr, FGROpnd, setoeq>,
+ ISA_MIPS32R6;
+ def CMP_UEQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_UEQ>,
+ CMP_CONDN_DESC_BASE<"ueq", Typestr, FGROpnd, setueq>,
+ ISA_MIPS32R6;
+ def CMP_LT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_LT>,
+ CMP_CONDN_DESC_BASE<"lt", Typestr, FGROpnd, setolt>,
+ ISA_MIPS32R6;
+ def CMP_ULT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_ULT>,
+ CMP_CONDN_DESC_BASE<"ult", Typestr, FGROpnd, setult>,
+ ISA_MIPS32R6;
+ def CMP_LE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_LE>,
+ CMP_CONDN_DESC_BASE<"le", Typestr, FGROpnd, setole>,
+ ISA_MIPS32R6;
+ def CMP_ULE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_ULE>,
+ CMP_CONDN_DESC_BASE<"ule", Typestr, FGROpnd, setule>,
+ ISA_MIPS32R6;
+ def CMP_SAF_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SAF>,
+ CMP_CONDN_DESC_BASE<"saf", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SUN_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SUN>,
+ CMP_CONDN_DESC_BASE<"sun", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SEQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SEQ>,
+ CMP_CONDN_DESC_BASE<"seq", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SUEQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SUEQ>,
+ CMP_CONDN_DESC_BASE<"sueq", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SLT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SLT>,
+ CMP_CONDN_DESC_BASE<"slt", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SULT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SULT>,
+ CMP_CONDN_DESC_BASE<"sult", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SLE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SLE>,
+ CMP_CONDN_DESC_BASE<"sle", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+ def CMP_SULE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SULE>,
+ CMP_CONDN_DESC_BASE<"sule", Typestr, FGROpnd>,
+ ISA_MIPS32R6;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Descriptions
+//
+//===----------------------------------------------------------------------===//
+
+class PCREL_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ Operand ImmOpnd> {
+ dag OutOperandList = (outs GPROpnd:$rs);
+ dag InOperandList = (ins ImmOpnd:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $imm");
+ list<dag> Pattern = [];
+}
+
+class ADDIUPC_DESC : PCREL_DESC_BASE<"addiupc", GPR32Opnd, simm19_lsl2>;
+class LWPC_DESC: PCREL_DESC_BASE<"lwpc", GPR32Opnd, simm19_lsl2>;
+class LWUPC_DESC: PCREL_DESC_BASE<"lwupc", GPR32Opnd, simm19_lsl2>;
+
+class ALIGN_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ Operand ImmOpnd> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, ImmOpnd:$bp);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $bp");
+ list<dag> Pattern = [];
+}
+
+class ALIGN_DESC : ALIGN_DESC_BASE<"align", GPR32Opnd, uimm2>;
+
+class ALUIPC_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rs);
+ dag InOperandList = (ins simm16:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $imm");
+ list<dag> Pattern = [];
+}
+
+class ALUIPC_DESC : ALUIPC_DESC_BASE<"aluipc", GPR32Opnd>;
+class AUIPC_DESC : ALUIPC_DESC_BASE<"auipc", GPR32Opnd>;
+
+class AUI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rs);
+ dag InOperandList = (ins GPROpnd:$rt, simm16:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $rt, $imm");
+ list<dag> Pattern = [];
+}
+
+class AUI_DESC : AUI_DESC_BASE<"aui", GPR32Opnd>;
+
+class BRANCH_DESC_BASE {
+ bit isBranch = 1;
+ bit isTerminator = 1;
+ bit hasDelaySlot = 0;
+}
+
+class BC_DESC_BASE<string instr_asm, DAGOperand opnd> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins opnd:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(instr_asm, "\t$offset");
+ bit isBarrier = 1;
+}
+
+class CMP_BC_DESC_BASE<string instr_asm, DAGOperand opnd,
+ RegisterOperand GPROpnd> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, opnd:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $rt, $offset");
+ list<Register> Defs = [AT];
+}
+
+class CMP_CBR_EQNE_Z_DESC_BASE<string instr_asm, DAGOperand opnd,
+ RegisterOperand GPROpnd> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins GPROpnd:$rs, opnd:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $offset");
+ list<Register> Defs = [AT];
+}
+
+class CMP_CBR_RT_Z_DESC_BASE<string instr_asm, DAGOperand opnd,
+ RegisterOperand GPROpnd> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins GPROpnd:$rt, opnd:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $offset");
+ list<Register> Defs = [AT];
+}
+
+class BAL_DESC : BC_DESC_BASE<"bal", brtarget> {
+ bit isCall = 1;
+ bit hasDelaySlot = 1;
+ list<Register> Defs = [RA];
+}
+
+class BALC_DESC : BC_DESC_BASE<"balc", brtarget26> {
+ bit isCall = 1;
+ list<Register> Defs = [RA];
+}
+
+class BC_DESC : BC_DESC_BASE<"bc", brtarget26>;
+class BGEC_DESC : CMP_BC_DESC_BASE<"bgec", brtarget, GPR32Opnd>;
+class BGEUC_DESC : CMP_BC_DESC_BASE<"bgeuc", brtarget, GPR32Opnd>;
+class BEQC_DESC : CMP_BC_DESC_BASE<"beqc", brtarget, GPR32Opnd>;
+class BNEC_DESC : CMP_BC_DESC_BASE<"bnec", brtarget, GPR32Opnd>;
+
+class BLTC_DESC : CMP_BC_DESC_BASE<"bltc", brtarget, GPR32Opnd>;
+class BLTUC_DESC : CMP_BC_DESC_BASE<"bltuc", brtarget, GPR32Opnd>;
+
+class BLTZC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bltzc", brtarget, GPR32Opnd>;
+class BGEZC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bgezc", brtarget, GPR32Opnd>;
+
+class BLEZC_DESC : CMP_CBR_RT_Z_DESC_BASE<"blezc", brtarget, GPR32Opnd>;
+class BGTZC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bgtzc", brtarget, GPR32Opnd>;
+
+class BEQZC_DESC : CMP_CBR_EQNE_Z_DESC_BASE<"beqzc", brtarget21, GPR32Opnd>;
+class BNEZC_DESC : CMP_CBR_EQNE_Z_DESC_BASE<"bnezc", brtarget21, GPR32Opnd>;
+
+class COP1_BCCZ_DESC_BASE<string instr_asm> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins FGR64Opnd:$ft, brtarget:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = instr_asm;
+ bit hasDelaySlot = 1;
+}
+
+class BC1EQZ_DESC : COP1_BCCZ_DESC_BASE<"bc1eqz $ft, $offset">;
+class BC1NEZ_DESC : COP1_BCCZ_DESC_BASE<"bc1nez $ft, $offset">;
+
+class COP2_BCCZ_DESC_BASE<string instr_asm> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins COP2Opnd:$ct, brtarget:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = instr_asm;
+ bit hasDelaySlot = 1;
+}
+
+class BC2EQZ_DESC : COP2_BCCZ_DESC_BASE<"bc2eqz $ct, $offset">;
+class BC2NEZ_DESC : COP2_BCCZ_DESC_BASE<"bc2nez $ct, $offset">;
+
+class BOVC_DESC : CMP_BC_DESC_BASE<"bovc", brtarget, GPR32Opnd>;
+class BNVC_DESC : CMP_BC_DESC_BASE<"bnvc", brtarget, GPR32Opnd>;
+
+class JMP_IDX_COMPACT_DESC_BASE<string opstr, DAGOperand opnd,
+ RegisterOperand GPROpnd> {
+ dag InOperandList = (ins GPROpnd:$rt, opnd:$offset);
+ string AsmString = !strconcat(opstr, "\t$rt, $offset");
+ list<dag> Pattern = [];
+ bit isTerminator = 1;
+ bit hasDelaySlot = 0;
+ string DecoderMethod = "DecodeSimm16";
+}
+
+class JIALC_DESC : JMP_IDX_COMPACT_DESC_BASE<"jialc", calloffset16,
+ GPR32Opnd> {
+ bit isCall = 1;
+ list<Register> Defs = [RA];
+}
+
+class JIC_DESC : JMP_IDX_COMPACT_DESC_BASE<"jic", jmpoffset16, GPR32Opnd> {
+ bit isBarrier = 1;
+ list<Register> Defs = [AT];
+}
+
+class JR_HB_R6_DESC : JR_HB_DESC_BASE<"jr.hb", GPR32Opnd> {
+ bit isBranch = 1;
+ bit isIndirectBranch = 1;
+ bit hasDelaySlot = 1;
+ bit isTerminator=1;
+ bit isBarrier=1;
+}
+
+class BITSWAP_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rt");
+ list<dag> Pattern = [];
+}
+
+class BITSWAP_DESC : BITSWAP_DESC_BASE<"bitswap", GPR32Opnd>;
+
+class DIVMOD_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ SDPatternOperator Op=null_frag> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set GPROpnd:$rd, (Op GPROpnd:$rs, GPROpnd:$rt))];
+
+ // This instruction doesn't trap division by zero itself. We must insert
+ // teq instructions as well.
+ bit usesCustomInserter = 1;
+}
+
+class DIV_DESC : DIVMOD_DESC_BASE<"div", GPR32Opnd, sdiv>;
+class DIVU_DESC : DIVMOD_DESC_BASE<"divu", GPR32Opnd, udiv>;
+class MOD_DESC : DIVMOD_DESC_BASE<"mod", GPR32Opnd, srem>;
+class MODU_DESC : DIVMOD_DESC_BASE<"modu", GPR32Opnd, urem>;
+
+class BEQZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"beqzalc", brtarget, GPR32Opnd> {
+ list<Register> Defs = [RA];
+}
+
+class BGEZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bgezalc", brtarget, GPR32Opnd> {
+ list<Register> Defs = [RA];
+}
+
+class BGTZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bgtzalc", brtarget, GPR32Opnd> {
+ list<Register> Defs = [RA];
+}
+
+class BLEZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"blezalc", brtarget, GPR32Opnd> {
+ list<Register> Defs = [RA];
+}
+
+class BLTZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bltzalc", brtarget, GPR32Opnd> {
+ list<Register> Defs = [RA];
+}
+
+class BNEZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bnezalc", brtarget, GPR32Opnd> {
+ list<Register> Defs = [RA];
+}
+
+class MUL_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ SDPatternOperator Op=null_frag> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set GPROpnd:$rd, (Op GPROpnd:$rs, GPROpnd:$rt))];
+}
+
+class MUH_DESC : MUL_R6_DESC_BASE<"muh", GPR32Opnd, mulhs>;
+class MUHU_DESC : MUL_R6_DESC_BASE<"muhu", GPR32Opnd, mulhu>;
+class MUL_R6_DESC : MUL_R6_DESC_BASE<"mul", GPR32Opnd, mul>;
+class MULU_DESC : MUL_R6_DESC_BASE<"mulu", GPR32Opnd>;
+
+class COP1_SEL_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
+ dag OutOperandList = (outs FGROpnd:$fd);
+ dag InOperandList = (ins FGRCCOpnd:$fd_in, FGROpnd:$fs, FGROpnd:$ft);
+ string AsmString = !strconcat(instr_asm, "\t$fd, $fs, $ft");
+ list<dag> Pattern = [(set FGROpnd:$fd, (select FGRCCOpnd:$fd_in,
+ FGROpnd:$ft,
+ FGROpnd:$fs))];
+ string Constraints = "$fd_in = $fd";
+}
+
+class SEL_D_DESC : COP1_SEL_DESC_BASE<"sel.d", FGR64Opnd> {
+ // We must insert a SUBREG_TO_REG around $fd_in
+ bit usesCustomInserter = 1;
+}
+class SEL_S_DESC : COP1_SEL_DESC_BASE<"sel.s", FGR32Opnd>;
+
+class SELEQNE_Z_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [];
+}
+
+class SELEQZ_DESC : SELEQNE_Z_DESC_BASE<"seleqz", GPR32Opnd>;
+class SELNEZ_DESC : SELEQNE_Z_DESC_BASE<"selnez", GPR32Opnd>;
+
+class COP1_4R_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
+ dag OutOperandList = (outs FGROpnd:$fd);
+ dag InOperandList = (ins FGROpnd:$fd_in, FGROpnd:$fs, FGROpnd:$ft);
+ string AsmString = !strconcat(instr_asm, "\t$fd, $fs, $ft");
+ list<dag> Pattern = [];
+ string Constraints = "$fd_in = $fd";
+}
+
+class MADDF_S_DESC : COP1_4R_DESC_BASE<"maddf.s", FGR32Opnd>;
+class MADDF_D_DESC : COP1_4R_DESC_BASE<"maddf.d", FGR64Opnd>;
+class MSUBF_S_DESC : COP1_4R_DESC_BASE<"msubf.s", FGR32Opnd>;
+class MSUBF_D_DESC : COP1_4R_DESC_BASE<"msubf.d", FGR64Opnd>;
+
+class MAX_MIN_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
+ dag OutOperandList = (outs FGROpnd:$fd);
+ dag InOperandList = (ins FGROpnd:$fs, FGROpnd:$ft);
+ string AsmString = !strconcat(instr_asm, "\t$fd, $fs, $ft");
+ list<dag> Pattern = [];
+}
+
+class MAX_S_DESC : MAX_MIN_DESC_BASE<"max.s", FGR32Opnd>;
+class MAX_D_DESC : MAX_MIN_DESC_BASE<"max.d", FGR64Opnd>;
+class MIN_S_DESC : MAX_MIN_DESC_BASE<"min.s", FGR32Opnd>;
+class MIN_D_DESC : MAX_MIN_DESC_BASE<"min.d", FGR64Opnd>;
+
+class MAXA_S_DESC : MAX_MIN_DESC_BASE<"maxa.s", FGR32Opnd>;
+class MAXA_D_DESC : MAX_MIN_DESC_BASE<"maxa.d", FGR64Opnd>;
+class MINA_S_DESC : MAX_MIN_DESC_BASE<"mina.s", FGR32Opnd>;
+class MINA_D_DESC : MAX_MIN_DESC_BASE<"mina.d", FGR64Opnd>;
+
+class SELEQNEZ_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
+ dag OutOperandList = (outs FGROpnd:$fd);
+ dag InOperandList = (ins FGROpnd:$fs, FGROpnd:$ft);
+ string AsmString = !strconcat(instr_asm, "\t$fd, $fs, $ft");
+ list<dag> Pattern = [];
+}
+
+class SELEQZ_S_DESC : SELEQNEZ_DESC_BASE<"seleqz.s", FGR32Opnd>;
+class SELEQZ_D_DESC : SELEQNEZ_DESC_BASE<"seleqz.d", FGR64Opnd>;
+class SELNEZ_S_DESC : SELEQNEZ_DESC_BASE<"selnez.s", FGR32Opnd>;
+class SELNEZ_D_DESC : SELEQNEZ_DESC_BASE<"selnez.d", FGR64Opnd>;
+
+class CLASS_RINT_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
+ dag OutOperandList = (outs FGROpnd:$fd);
+ dag InOperandList = (ins FGROpnd:$fs);
+ string AsmString = !strconcat(instr_asm, "\t$fd, $fs");
+ list<dag> Pattern = [];
+}
+
+class RINT_S_DESC : CLASS_RINT_DESC_BASE<"rint.s", FGR32Opnd>;
+class RINT_D_DESC : CLASS_RINT_DESC_BASE<"rint.d", FGR64Opnd>;
+class CLASS_S_DESC : CLASS_RINT_DESC_BASE<"class.s", FGR32Opnd>;
+class CLASS_D_DESC : CLASS_RINT_DESC_BASE<"class.d", FGR64Opnd>;
+
+class CACHE_HINT_DESC<string instr_asm, Operand MemOpnd,
+ RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins MemOpnd:$addr, uimm5:$hint);
+ string AsmString = !strconcat(instr_asm, "\t$hint, $addr");
+ list<dag> Pattern = [];
+}
+
+class CACHE_DESC : CACHE_HINT_DESC<"cache", mem_simm9, GPR32Opnd>;
+class PREF_DESC : CACHE_HINT_DESC<"pref", mem_simm9, GPR32Opnd>;
+
+class COP2LD_DESC_BASE<string instr_asm, RegisterOperand COPOpnd> {
+ dag OutOperandList = (outs COPOpnd:$rt);
+ dag InOperandList = (ins mem_simm11:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
+ list<dag> Pattern = [];
+ bit mayLoad = 1;
+}
+
+class LDC2_R6_DESC : COP2LD_DESC_BASE<"ldc2", COP2Opnd>;
+class LWC2_R6_DESC : COP2LD_DESC_BASE<"lwc2", COP2Opnd>;
+
+class COP2ST_DESC_BASE<string instr_asm, RegisterOperand COPOpnd> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins COPOpnd:$rt, mem_simm11:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
+ list<dag> Pattern = [];
+ bit mayStore = 1;
+}
+
+class SDC2_R6_DESC : COP2ST_DESC_BASE<"sdc2", COP2Opnd>;
+class SWC2_R6_DESC : COP2ST_DESC_BASE<"swc2", COP2Opnd>;
+
+class LSA_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ Operand ImmOpnd> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, ImmOpnd:$imm2);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $imm2");
+ list<dag> Pattern = [];
+}
+
+class LSA_R6_DESC : LSA_R6_DESC_BASE<"lsa", GPR32Opnd, uimm2>;
+
+class LL_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rt);
+ dag InOperandList = (ins mem_simm9:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
+ list<dag> Pattern = [];
+ bit mayLoad = 1;
+}
+
+class LL_R6_DESC : LL_R6_DESC_BASE<"ll", GPR32Opnd>;
+
+class SC_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$dst);
+ dag InOperandList = (ins GPROpnd:$rt, mem_simm9:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
+ list<dag> Pattern = [];
+ bit mayStore = 1;
+ string Constraints = "$rt = $dst";
+}
+
+class SC_R6_DESC : SC_R6_DESC_BASE<"sc", GPR32Opnd>;
+
+class CLO_CLZ_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs");
+}
+
+class CLO_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> :
+ CLO_CLZ_R6_DESC_BASE<instr_asm, GPROpnd> {
+ list<dag> Pattern = [(set GPROpnd:$rd, (ctlz (not GPROpnd:$rs)))];
+}
+
+class CLZ_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> :
+ CLO_CLZ_R6_DESC_BASE<instr_asm, GPROpnd> {
+ list<dag> Pattern = [(set GPROpnd:$rd, (ctlz GPROpnd:$rs))];
+}
+
+class CLO_R6_DESC : CLO_R6_DESC_BASE<"clo", GPR32Opnd>;
+class CLZ_R6_DESC : CLZ_R6_DESC_BASE<"clz", GPR32Opnd>;
+
+class SDBBP_R6_DESC {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins uimm20:$code_);
+ string AsmString = "sdbbp\t$code_";
+ list<dag> Pattern = [];
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Definitions
+//
+//===----------------------------------------------------------------------===//
+
+def ADDIUPC : ADDIUPC_ENC, ADDIUPC_DESC, ISA_MIPS32R6;
+def ALIGN : ALIGN_ENC, ALIGN_DESC, ISA_MIPS32R6;
+def ALUIPC : ALUIPC_ENC, ALUIPC_DESC, ISA_MIPS32R6;
+def AUI : AUI_ENC, AUI_DESC, ISA_MIPS32R6;
+def AUIPC : AUIPC_ENC, AUIPC_DESC, ISA_MIPS32R6;
+def BAL : BAL_ENC, BAL_DESC, ISA_MIPS32R6;
+def BALC : BALC_ENC, BALC_DESC, ISA_MIPS32R6;
+def BC1EQZ : BC1EQZ_ENC, BC1EQZ_DESC, ISA_MIPS32R6;
+def BC1NEZ : BC1NEZ_ENC, BC1NEZ_DESC, ISA_MIPS32R6;
+def BC2EQZ : BC2EQZ_ENC, BC2EQZ_DESC, ISA_MIPS32R6;
+def BC2NEZ : BC2NEZ_ENC, BC2NEZ_DESC, ISA_MIPS32R6;
+def BC : BC_ENC, BC_DESC, ISA_MIPS32R6;
+def BEQC : BEQC_ENC, BEQC_DESC, ISA_MIPS32R6;
+def BEQZALC : BEQZALC_ENC, BEQZALC_DESC, ISA_MIPS32R6;
+def BEQZC : BEQZC_ENC, BEQZC_DESC, ISA_MIPS32R6;
+def BGEC : BGEC_ENC, BGEC_DESC, ISA_MIPS32R6;
+def BGEUC : BGEUC_ENC, BGEUC_DESC, ISA_MIPS32R6;
+def BGEZALC : BGEZALC_ENC, BGEZALC_DESC, ISA_MIPS32R6;
+def BGEZC : BGEZC_ENC, BGEZC_DESC, ISA_MIPS32R6;
+def BGTZALC : BGTZALC_ENC, BGTZALC_DESC, ISA_MIPS32R6;
+def BGTZC : BGTZC_ENC, BGTZC_DESC, ISA_MIPS32R6;
+def BITSWAP : BITSWAP_ENC, BITSWAP_DESC, ISA_MIPS32R6;
+def BLEZALC : BLEZALC_ENC, BLEZALC_DESC, ISA_MIPS32R6;
+def BLEZC : BLEZC_ENC, BLEZC_DESC, ISA_MIPS32R6;
+def BLTC : BLTC_ENC, BLTC_DESC, ISA_MIPS32R6;
+def BLTUC : BLTUC_ENC, BLTUC_DESC, ISA_MIPS32R6;
+def BLTZALC : BLTZALC_ENC, BLTZALC_DESC, ISA_MIPS32R6;
+def BLTZC : BLTZC_ENC, BLTZC_DESC, ISA_MIPS32R6;
+def BNEC : BNEC_ENC, BNEC_DESC, ISA_MIPS32R6;
+def BNEZALC : BNEZALC_ENC, BNEZALC_DESC, ISA_MIPS32R6;
+def BNEZC : BNEZC_ENC, BNEZC_DESC, ISA_MIPS32R6;
+def BNVC : BNVC_ENC, BNVC_DESC, ISA_MIPS32R6;
+def BOVC : BOVC_ENC, BOVC_DESC, ISA_MIPS32R6;
+def CACHE_R6 : CACHE_ENC, CACHE_DESC, ISA_MIPS32R6;
+def CLASS_D : CLASS_D_ENC, CLASS_D_DESC, ISA_MIPS32R6;
+def CLASS_S : CLASS_S_ENC, CLASS_S_DESC, ISA_MIPS32R6;
+def CLO_R6 : CLO_R6_ENC, CLO_R6_DESC, ISA_MIPS32R6;
+def CLZ_R6 : CLZ_R6_ENC, CLZ_R6_DESC, ISA_MIPS32R6;
+defm S : CMP_CC_M<FIELD_CMP_FORMAT_S, "s", FGR32Opnd>;
+defm D : CMP_CC_M<FIELD_CMP_FORMAT_D, "d", FGR64Opnd>;
+def DIV : DIV_ENC, DIV_DESC, ISA_MIPS32R6;
+def DIVU : DIVU_ENC, DIVU_DESC, ISA_MIPS32R6;
+def JIALC : JIALC_ENC, JIALC_DESC, ISA_MIPS32R6;
+def JIC : JIC_ENC, JIC_DESC, ISA_MIPS32R6;
+def JR_HB_R6 : JR_HB_R6_ENC, JR_HB_R6_DESC, ISA_MIPS32R6;
+def LDC2_R6 : LDC2_R6_ENC, LDC2_R6_DESC, ISA_MIPS32R6;
+def LL_R6 : LL_R6_ENC, LL_R6_DESC, ISA_MIPS32R6;
+def LSA_R6 : LSA_R6_ENC, LSA_R6_DESC, ISA_MIPS32R6;
+def LWC2_R6 : LWC2_R6_ENC, LWC2_R6_DESC, ISA_MIPS32R6;
+def LWPC : LWPC_ENC, LWPC_DESC, ISA_MIPS32R6;
+def LWUPC : LWUPC_ENC, LWUPC_DESC, ISA_MIPS32R6;
+def MADDF_S : MADDF_S_ENC, MADDF_S_DESC, ISA_MIPS32R6;
+def MADDF_D : MADDF_D_ENC, MADDF_D_DESC, ISA_MIPS32R6;
+def MAXA_D : MAXA_D_ENC, MAXA_D_DESC, ISA_MIPS32R6;
+def MAXA_S : MAXA_S_ENC, MAXA_S_DESC, ISA_MIPS32R6;
+def MAX_D : MAX_D_ENC, MAX_D_DESC, ISA_MIPS32R6;
+def MAX_S : MAX_S_ENC, MAX_S_DESC, ISA_MIPS32R6;
+def MINA_D : MINA_D_ENC, MINA_D_DESC, ISA_MIPS32R6;
+def MINA_S : MINA_S_ENC, MINA_S_DESC, ISA_MIPS32R6;
+def MIN_D : MIN_D_ENC, MIN_D_DESC, ISA_MIPS32R6;
+def MIN_S : MIN_S_ENC, MIN_S_DESC, ISA_MIPS32R6;
+def MOD : MOD_ENC, MOD_DESC, ISA_MIPS32R6;
+def MODU : MODU_ENC, MODU_DESC, ISA_MIPS32R6;
+def MSUBF_S : MSUBF_S_ENC, MSUBF_S_DESC, ISA_MIPS32R6;
+def MSUBF_D : MSUBF_D_ENC, MSUBF_D_DESC, ISA_MIPS32R6;
+def MUH : MUH_ENC, MUH_DESC, ISA_MIPS32R6;
+def MUHU : MUHU_ENC, MUHU_DESC, ISA_MIPS32R6;
+def MUL_R6 : MUL_R6_ENC, MUL_R6_DESC, ISA_MIPS32R6;
+def MULU : MULU_ENC, MULU_DESC, ISA_MIPS32R6;
+def NAL; // BAL with rd=0
+def PREF_R6 : PREF_ENC, PREF_DESC, ISA_MIPS32R6;
+def RINT_D : RINT_D_ENC, RINT_D_DESC, ISA_MIPS32R6;
+def RINT_S : RINT_S_ENC, RINT_S_DESC, ISA_MIPS32R6;
+def SC_R6 : SC_R6_ENC, SC_R6_DESC, ISA_MIPS32R6;
+def SDBBP_R6 : SDBBP_R6_ENC, SDBBP_R6_DESC, ISA_MIPS32R6;
+def SDC2_R6 : SDC2_R6_ENC, SDC2_R6_DESC, ISA_MIPS32R6;
+def SELEQZ : SELEQZ_ENC, SELEQZ_DESC, ISA_MIPS32R6, GPR_32;
+def SELEQZ_D : SELEQZ_D_ENC, SELEQZ_D_DESC, ISA_MIPS32R6;
+def SELEQZ_S : SELEQZ_S_ENC, SELEQZ_S_DESC, ISA_MIPS32R6;
+def SELNEZ : SELNEZ_ENC, SELNEZ_DESC, ISA_MIPS32R6, GPR_32;
+def SELNEZ_D : SELNEZ_D_ENC, SELNEZ_D_DESC, ISA_MIPS32R6;
+def SELNEZ_S : SELNEZ_S_ENC, SELNEZ_S_DESC, ISA_MIPS32R6;
+def SEL_D : SEL_D_ENC, SEL_D_DESC, ISA_MIPS32R6;
+def SEL_S : SEL_S_ENC, SEL_S_DESC, ISA_MIPS32R6;
+def SWC2_R6 : SWC2_R6_ENC, SWC2_R6_DESC, ISA_MIPS32R6;
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Aliases
+//
+//===----------------------------------------------------------------------===//
+
+def : MipsInstAlias<"sdbbp", (SDBBP_R6 0)>, ISA_MIPS32R6;
+def : MipsInstAlias<"jr $rs", (JALR ZERO, GPR32Opnd:$rs), 1>, ISA_MIPS32R6;
+
+//===----------------------------------------------------------------------===//
+//
+// Patterns and Pseudo Instructions
+//
+//===----------------------------------------------------------------------===//
+
+// f32 comparisons supported via another comparison
+def : MipsPat<(setone f32:$lhs, f32:$rhs),
+ (NOR (CMP_UEQ_S f32:$lhs, f32:$rhs), ZERO)>, ISA_MIPS32R6;
+def : MipsPat<(seto f32:$lhs, f32:$rhs),
+ (NOR (CMP_UN_S f32:$lhs, f32:$rhs), ZERO)>, ISA_MIPS32R6;
+def : MipsPat<(setune f32:$lhs, f32:$rhs),
+ (NOR (CMP_EQ_S f32:$lhs, f32:$rhs), ZERO)>, ISA_MIPS32R6;
+def : MipsPat<(seteq f32:$lhs, f32:$rhs), (CMP_EQ_S f32:$lhs, f32:$rhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setgt f32:$lhs, f32:$rhs), (CMP_LE_S f32:$rhs, f32:$lhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setge f32:$lhs, f32:$rhs), (CMP_LT_S f32:$rhs, f32:$lhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setlt f32:$lhs, f32:$rhs), (CMP_LT_S f32:$lhs, f32:$rhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setlt f32:$lhs, f32:$rhs), (CMP_LE_S f32:$lhs, f32:$rhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setne f32:$lhs, f32:$rhs),
+ (NOR (CMP_EQ_S f32:$lhs, f32:$rhs), ZERO)>, ISA_MIPS32R6;
+
+// f64 comparisons supported via another comparison
+def : MipsPat<(setone f64:$lhs, f64:$rhs),
+ (NOR (CMP_UEQ_D f64:$lhs, f64:$rhs), ZERO)>, ISA_MIPS32R6;
+def : MipsPat<(seto f64:$lhs, f64:$rhs),
+ (NOR (CMP_UN_D f64:$lhs, f64:$rhs), ZERO)>, ISA_MIPS32R6;
+def : MipsPat<(setune f64:$lhs, f64:$rhs),
+ (NOR (CMP_EQ_D f64:$lhs, f64:$rhs), ZERO)>, ISA_MIPS32R6;
+def : MipsPat<(seteq f64:$lhs, f64:$rhs), (CMP_EQ_D f64:$lhs, f64:$rhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setgt f64:$lhs, f64:$rhs), (CMP_LE_D f64:$rhs, f64:$lhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setge f64:$lhs, f64:$rhs), (CMP_LT_D f64:$rhs, f64:$lhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setlt f64:$lhs, f64:$rhs), (CMP_LT_D f64:$lhs, f64:$rhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setlt f64:$lhs, f64:$rhs), (CMP_LE_D f64:$lhs, f64:$rhs)>,
+ ISA_MIPS32R6;
+def : MipsPat<(setne f64:$lhs, f64:$rhs),
+ (NOR (CMP_EQ_D f64:$lhs, f64:$rhs), ZERO)>, ISA_MIPS32R6;
+
+// i32 selects
+def : MipsPat<(select i32:$cond, i32:$t, i32:$f),
+ (OR (SELNEZ i32:$t, i32:$cond), (SELEQZ i32:$f, i32:$cond))>,
+ ISA_MIPS32R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immz)), i32:$t, i32:$f),
+ (OR (SELEQZ i32:$t, i32:$cond), (SELNEZ i32:$f, i32:$cond))>,
+ ISA_MIPS32R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immz)), i32:$t, i32:$f),
+ (OR (SELNEZ i32:$t, i32:$cond), (SELEQZ i32:$f, i32:$cond))>,
+ ISA_MIPS32R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immZExt16:$imm)), i32:$t, i32:$f),
+ (OR (SELEQZ i32:$t, (XORi i32:$cond, immZExt16:$imm)),
+ (SELNEZ i32:$f, (XORi i32:$cond, immZExt16:$imm)))>,
+ ISA_MIPS32R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immZExt16:$imm)), i32:$t, i32:$f),
+ (OR (SELNEZ i32:$t, (XORi i32:$cond, immZExt16:$imm)),
+ (SELEQZ i32:$f, (XORi i32:$cond, immZExt16:$imm)))>,
+ ISA_MIPS32R6;
+def : MipsPat<(select (i32 (setgt i32:$cond, immSExt16Plus1:$imm)), i32:$t,
+ i32:$f),
+ (OR (SELEQZ i32:$t, (SLTi i32:$cond, (Plus1 imm:$imm))),
+ (SELNEZ i32:$f, (SLTi i32:$cond, (Plus1 imm:$imm))))>,
+ ISA_MIPS32R6;
+def : MipsPat<(select (i32 (setugt i32:$cond, immSExt16Plus1:$imm)),
+ i32:$t, i32:$f),
+ (OR (SELEQZ i32:$t, (SLTiu i32:$cond, (Plus1 imm:$imm))),
+ (SELNEZ i32:$f, (SLTiu i32:$cond, (Plus1 imm:$imm))))>,
+ ISA_MIPS32R6;
+
+def : MipsPat<(select i32:$cond, i32:$t, immz),
+ (SELNEZ i32:$t, i32:$cond)>, ISA_MIPS32R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immz)), i32:$t, immz),
+ (SELNEZ i32:$t, i32:$cond)>, ISA_MIPS32R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immz)), i32:$t, immz),
+ (SELEQZ i32:$t, i32:$cond)>, ISA_MIPS32R6;
+def : MipsPat<(select i32:$cond, immz, i32:$f),
+ (SELEQZ i32:$f, i32:$cond)>, ISA_MIPS32R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immz)), immz, i32:$f),
+ (SELEQZ i32:$f, i32:$cond)>, ISA_MIPS32R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immz)), immz, i32:$f),
+ (SELNEZ i32:$f, i32:$cond)>, ISA_MIPS32R6;
diff --git a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
index 15ef654..f0b6814 100644
--- a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
@@ -20,6 +20,11 @@ def uimm16_64 : Operand<i64> {
let PrintMethod = "printUnsignedImm";
}
+// Signed Operand
+def simm10_64 : Operand<i64>;
+
+def imm64: Operand<i64>;
+
// Transformation Function - get Imm - 32.
def Subtract32 : SDNodeXForm<imm, [{
return getImm(N, (unsigned)N->getZExtValue() - 32);
@@ -28,6 +33,14 @@ def Subtract32 : SDNodeXForm<imm, [{
// shamt must fit in 6 bits.
def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
+// Node immediate fits as 10-bit sign extended on target immediate.
+// e.g. seqi, snei
+def immSExt10_64 : PatLeaf<(i64 imm),
+ [{ return isInt<10>(N->getSExtValue()); }]>;
+
+def immZExt16_64 : PatLeaf<(i64 imm),
+ [{ return isInt<16>(N->getZExtValue()); }]>;
+
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
@@ -53,153 +66,176 @@ let isPseudo = 1, isCodeGenOnly = 1 in {
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Mips64" in {
/// Arithmetic Instructions (ALU Immediate)
-def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd>, ADDI_FM<0x18>;
-def DADDiu : ArithLogicI<"daddiu", simm16_64, GPR64Opnd, IIArith,
+def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd>, ADDI_FM<0x18>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def DADDiu : ArithLogicI<"daddiu", simm16_64, GPR64Opnd, II_DADDIU,
immSExt16, add>,
- ADDI_FM<0x19>, IsAsCheapAsAMove;
+ ADDI_FM<0x19>, IsAsCheapAsAMove, ISA_MIPS3;
let isCodeGenOnly = 1 in {
def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, GPR64Opnd>,
SLTI_FM<0xa>;
def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, GPR64Opnd>,
SLTI_FM<0xb>;
-def ANDi64 : ArithLogicI<"andi", uimm16_64, GPR64Opnd, IILogic, immZExt16,
- and>,
+def ANDi64 : ArithLogicI<"andi", uimm16_64, GPR64Opnd, II_AND, immZExt16, and>,
ADDI_FM<0xc>;
-def ORi64 : ArithLogicI<"ori", uimm16_64, GPR64Opnd, IILogic, immZExt16,
- or>,
+def ORi64 : ArithLogicI<"ori", uimm16_64, GPR64Opnd, II_OR, immZExt16, or>,
ADDI_FM<0xd>;
-def XORi64 : ArithLogicI<"xori", uimm16_64, GPR64Opnd, IILogic, immZExt16,
- xor>,
+def XORi64 : ArithLogicI<"xori", uimm16_64, GPR64Opnd, II_XOR, immZExt16, xor>,
ADDI_FM<0xe>;
def LUi64 : LoadUpper<"lui", GPR64Opnd, uimm16_64>, LUI_FM;
}
/// Arithmetic Instructions (3-Operand, R-Type)
-def DADD : ArithLogicR<"dadd", GPR64Opnd>, ADD_FM<0, 0x2c>;
-def DADDu : ArithLogicR<"daddu", GPR64Opnd, 1, IIArith, add>,
- ADD_FM<0, 0x2d>;
-def DSUBu : ArithLogicR<"dsubu", GPR64Opnd, 0, IIArith, sub>,
- ADD_FM<0, 0x2f>;
+def DADD : ArithLogicR<"dadd", GPR64Opnd, 1, II_DADD>, ADD_FM<0, 0x2c>,
+ ISA_MIPS3;
+def DADDu : ArithLogicR<"daddu", GPR64Opnd, 1, II_DADDU, add>, ADD_FM<0, 0x2d>,
+ ISA_MIPS3;
+def DSUBu : ArithLogicR<"dsubu", GPR64Opnd, 0, II_DSUBU, sub>, ADD_FM<0, 0x2f>,
+ ISA_MIPS3;
+def DSUB : ArithLogicR<"dsub", GPR64Opnd, 0, II_DSUB>, ADD_FM<0, 0x2e>,
+ ISA_MIPS3;
let isCodeGenOnly = 1 in {
def SLT64 : SetCC_R<"slt", setlt, GPR64Opnd>, ADD_FM<0, 0x2a>;
def SLTu64 : SetCC_R<"sltu", setult, GPR64Opnd>, ADD_FM<0, 0x2b>;
-def AND64 : ArithLogicR<"and", GPR64Opnd, 1, IIArith, and>, ADD_FM<0, 0x24>;
-def OR64 : ArithLogicR<"or", GPR64Opnd, 1, IIArith, or>, ADD_FM<0, 0x25>;
-def XOR64 : ArithLogicR<"xor", GPR64Opnd, 1, IIArith, xor>, ADD_FM<0, 0x26>;
+def AND64 : ArithLogicR<"and", GPR64Opnd, 1, II_AND, and>, ADD_FM<0, 0x24>;
+def OR64 : ArithLogicR<"or", GPR64Opnd, 1, II_OR, or>, ADD_FM<0, 0x25>;
+def XOR64 : ArithLogicR<"xor", GPR64Opnd, 1, II_XOR, xor>, ADD_FM<0, 0x26>;
def NOR64 : LogicNOR<"nor", GPR64Opnd>, ADD_FM<0, 0x27>;
}
/// Shift Instructions
-def DSLL : shift_rotate_imm<"dsll", uimm6, GPR64Opnd, shl, immZExt6>,
- SRA_FM<0x38, 0>;
-def DSRL : shift_rotate_imm<"dsrl", uimm6, GPR64Opnd, srl, immZExt6>,
- SRA_FM<0x3a, 0>;
-def DSRA : shift_rotate_imm<"dsra", uimm6, GPR64Opnd, sra, immZExt6>,
- SRA_FM<0x3b, 0>;
-def DSLLV : shift_rotate_reg<"dsllv", GPR64Opnd, shl>, SRLV_FM<0x14, 0>;
-def DSRLV : shift_rotate_reg<"dsrlv", GPR64Opnd, srl>, SRLV_FM<0x16, 0>;
-def DSRAV : shift_rotate_reg<"dsrav", GPR64Opnd, sra>, SRLV_FM<0x17, 0>;
-def DSLL32 : shift_rotate_imm<"dsll32", uimm5, GPR64Opnd>, SRA_FM<0x3c, 0>;
-def DSRL32 : shift_rotate_imm<"dsrl32", uimm5, GPR64Opnd>, SRA_FM<0x3e, 0>;
-def DSRA32 : shift_rotate_imm<"dsra32", uimm5, GPR64Opnd>, SRA_FM<0x3f, 0>;
+def DSLL : shift_rotate_imm<"dsll", uimm6, GPR64Opnd, II_DSLL, shl, immZExt6>,
+ SRA_FM<0x38, 0>, ISA_MIPS3;
+def DSRL : shift_rotate_imm<"dsrl", uimm6, GPR64Opnd, II_DSRL, srl, immZExt6>,
+ SRA_FM<0x3a, 0>, ISA_MIPS3;
+def DSRA : shift_rotate_imm<"dsra", uimm6, GPR64Opnd, II_DSRA, sra, immZExt6>,
+ SRA_FM<0x3b, 0>, ISA_MIPS3;
+def DSLLV : shift_rotate_reg<"dsllv", GPR64Opnd, II_DSLLV, shl>,
+ SRLV_FM<0x14, 0>, ISA_MIPS3;
+def DSRLV : shift_rotate_reg<"dsrlv", GPR64Opnd, II_DSRLV, srl>,
+ SRLV_FM<0x16, 0>, ISA_MIPS3;
+def DSRAV : shift_rotate_reg<"dsrav", GPR64Opnd, II_DSRAV, sra>,
+ SRLV_FM<0x17, 0>, ISA_MIPS3;
+def DSLL32 : shift_rotate_imm<"dsll32", uimm5, GPR64Opnd, II_DSLL32>,
+ SRA_FM<0x3c, 0>, ISA_MIPS3;
+def DSRL32 : shift_rotate_imm<"dsrl32", uimm5, GPR64Opnd, II_DSRL32>,
+ SRA_FM<0x3e, 0>, ISA_MIPS3;
+def DSRA32 : shift_rotate_imm<"dsra32", uimm5, GPR64Opnd, II_DSRA32>,
+ SRA_FM<0x3f, 0>, ISA_MIPS3;
// Rotate Instructions
-let Predicates = [HasMips64r2, HasStdEnc] in {
- def DROTR : shift_rotate_imm<"drotr", uimm6, GPR64Opnd, rotr, immZExt6>,
- SRA_FM<0x3a, 1>;
- def DROTRV : shift_rotate_reg<"drotrv", GPR64Opnd, rotr>,
- SRLV_FM<0x16, 1>;
- def DROTR32 : shift_rotate_imm<"drotr32", uimm5, GPR64Opnd>, SRA_FM<0x3e, 1>;
-}
+def DROTR : shift_rotate_imm<"drotr", uimm6, GPR64Opnd, II_DROTR, rotr,
+ immZExt6>,
+ SRA_FM<0x3a, 1>, ISA_MIPS64R2;
+def DROTRV : shift_rotate_reg<"drotrv", GPR64Opnd, II_DROTRV, rotr>,
+ SRLV_FM<0x16, 1>, ISA_MIPS64R2;
+def DROTR32 : shift_rotate_imm<"drotr32", uimm5, GPR64Opnd, II_DROTR32>,
+ SRA_FM<0x3e, 1>, ISA_MIPS64R2;
/// Load and Store Instructions
/// aligned
let isCodeGenOnly = 1 in {
-def LB64 : Load<"lb", GPR64Opnd, sextloadi8, IILoad>, LW_FM<0x20>;
-def LBu64 : Load<"lbu", GPR64Opnd, zextloadi8, IILoad>, LW_FM<0x24>;
-def LH64 : Load<"lh", GPR64Opnd, sextloadi16, IILoad>, LW_FM<0x21>;
-def LHu64 : Load<"lhu", GPR64Opnd, zextloadi16, IILoad>, LW_FM<0x25>;
-def LW64 : Load<"lw", GPR64Opnd, sextloadi32, IILoad>, LW_FM<0x23>;
-def SB64 : Store<"sb", GPR64Opnd, truncstorei8, IIStore>, LW_FM<0x28>;
-def SH64 : Store<"sh", GPR64Opnd, truncstorei16, IIStore>, LW_FM<0x29>;
-def SW64 : Store<"sw", GPR64Opnd, truncstorei32, IIStore>, LW_FM<0x2b>;
+def LB64 : Load<"lb", GPR64Opnd, sextloadi8, II_LB>, LW_FM<0x20>;
+def LBu64 : Load<"lbu", GPR64Opnd, zextloadi8, II_LBU>, LW_FM<0x24>;
+def LH64 : Load<"lh", GPR64Opnd, sextloadi16, II_LH>, LW_FM<0x21>;
+def LHu64 : Load<"lhu", GPR64Opnd, zextloadi16, II_LHU>, LW_FM<0x25>;
+def LW64 : Load<"lw", GPR64Opnd, sextloadi32, II_LW>, LW_FM<0x23>;
+def SB64 : Store<"sb", GPR64Opnd, truncstorei8, II_SB>, LW_FM<0x28>;
+def SH64 : Store<"sh", GPR64Opnd, truncstorei16, II_SH>, LW_FM<0x29>;
+def SW64 : Store<"sw", GPR64Opnd, truncstorei32, II_SW>, LW_FM<0x2b>;
}
-def LWu : Load<"lwu", GPR64Opnd, zextloadi32, IILoad>, LW_FM<0x27>;
-def LD : Load<"ld", GPR64Opnd, load, IILoad>, LW_FM<0x37>;
-def SD : Store<"sd", GPR64Opnd, store, IIStore>, LW_FM<0x3f>;
+def LWu : Load<"lwu", GPR64Opnd, zextloadi32, II_LWU>, LW_FM<0x27>, ISA_MIPS3;
+def LD : Load<"ld", GPR64Opnd, load, II_LD>, LW_FM<0x37>, ISA_MIPS3;
+def SD : Store<"sd", GPR64Opnd, store, II_SD>, LW_FM<0x3f>, ISA_MIPS3;
/// load/store left/right
let isCodeGenOnly = 1 in {
-def LWL64 : LoadLeftRight<"lwl", MipsLWL, GPR64Opnd, IILoad>, LW_FM<0x22>;
-def LWR64 : LoadLeftRight<"lwr", MipsLWR, GPR64Opnd, IILoad>, LW_FM<0x26>;
-def SWL64 : StoreLeftRight<"swl", MipsSWL, GPR64Opnd, IIStore>, LW_FM<0x2a>;
-def SWR64 : StoreLeftRight<"swr", MipsSWR, GPR64Opnd, IIStore>, LW_FM<0x2e>;
+def LWL64 : LoadLeftRight<"lwl", MipsLWL, GPR64Opnd, II_LWL>, LW_FM<0x22>;
+def LWR64 : LoadLeftRight<"lwr", MipsLWR, GPR64Opnd, II_LWR>, LW_FM<0x26>;
+def SWL64 : StoreLeftRight<"swl", MipsSWL, GPR64Opnd, II_SWL>, LW_FM<0x2a>;
+def SWR64 : StoreLeftRight<"swr", MipsSWR, GPR64Opnd, II_SWR>, LW_FM<0x2e>;
}
-def LDL : LoadLeftRight<"ldl", MipsLDL, GPR64Opnd, IILoad>, LW_FM<0x1a>;
-def LDR : LoadLeftRight<"ldr", MipsLDR, GPR64Opnd, IILoad>, LW_FM<0x1b>;
-def SDL : StoreLeftRight<"sdl", MipsSDL, GPR64Opnd, IIStore>, LW_FM<0x2c>;
-def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd, IIStore>, LW_FM<0x2d>;
+def LDL : LoadLeftRight<"ldl", MipsLDL, GPR64Opnd, II_LDL>, LW_FM<0x1a>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def LDR : LoadLeftRight<"ldr", MipsLDR, GPR64Opnd, II_LDR>, LW_FM<0x1b>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def SDL : StoreLeftRight<"sdl", MipsSDL, GPR64Opnd, II_SDL>, LW_FM<0x2c>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd, II_SDR>, LW_FM<0x2d>,
+ ISA_MIPS3_NOT_32R6_64R6;
/// Load-linked, Store-conditional
-def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>;
-def SCD : SCBase<"scd", GPR64Opnd>, LW_FM<0x3c>;
+def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>, ISA_MIPS3_NOT_32R6_64R6;
+def SCD : SCBase<"scd", GPR64Opnd>, LW_FM<0x3c>, ISA_MIPS3_NOT_32R6_64R6;
/// Jump and Branch Instructions
let isCodeGenOnly = 1 in {
-def JR64 : IndirectBranch<"jr", GPR64Opnd>, MTLO_FM<8>;
-def BEQ64 : CBranch<"beq", brtarget, seteq, GPR64Opnd>, BEQ_FM<4>;
-def BNE64 : CBranch<"bne", brtarget, setne, GPR64Opnd>, BEQ_FM<5>;
-def BGEZ64 : CBranchZero<"bgez", brtarget, setge, GPR64Opnd>, BGEZ_FM<1, 1>;
-def BGTZ64 : CBranchZero<"bgtz", brtarget, setgt, GPR64Opnd>, BGEZ_FM<7, 0>;
-def BLEZ64 : CBranchZero<"blez", brtarget, setle, GPR64Opnd>, BGEZ_FM<6, 0>;
-def BLTZ64 : CBranchZero<"bltz", brtarget, setlt, GPR64Opnd>, BGEZ_FM<1, 0>;
-def JALR64 : JumpLinkReg<"jalr", GPR64Opnd>, JALR_FM;
-def JALR64Pseudo : JumpLinkRegPseudo<GPR64Opnd, JALR, RA, GPR32Opnd>;
-def TAILCALL64_R : JumpFR<"tcallr", GPR64Opnd, MipsTailCall>,
- MTLO_FM<8>, IsTailCall;
+ def JR64 : IndirectBranch<"jr", GPR64Opnd>, MTLO_FM<8>;
+ def BEQ64 : CBranch<"beq", brtarget, seteq, GPR64Opnd>, BEQ_FM<4>;
+ def BNE64 : CBranch<"bne", brtarget, setne, GPR64Opnd>, BEQ_FM<5>;
+ def BGEZ64 : CBranchZero<"bgez", brtarget, setge, GPR64Opnd>, BGEZ_FM<1, 1>;
+ def BGTZ64 : CBranchZero<"bgtz", brtarget, setgt, GPR64Opnd>, BGEZ_FM<7, 0>;
+ def BLEZ64 : CBranchZero<"blez", brtarget, setle, GPR64Opnd>, BGEZ_FM<6, 0>;
+ def BLTZ64 : CBranchZero<"bltz", brtarget, setlt, GPR64Opnd>, BGEZ_FM<1, 0>;
+ def JALR64 : JumpLinkReg<"jalr", GPR64Opnd>, JALR_FM;
+ def JALR64Pseudo : JumpLinkRegPseudo<GPR64Opnd, JALR, RA, GPR32Opnd>;
+ def TAILCALL64_R : TailCallReg<GPR64Opnd, JR, GPR32Opnd>;
}
+def PseudoReturn64 : PseudoReturnBase<GPR64Opnd>;
+def PseudoIndirectBranch64 : PseudoIndirectBranchBase<GPR64Opnd>;
+
/// Multiply and Divide Instructions.
-def DMULT : Mult<"dmult", IIImult, GPR64Opnd, [HI0_64, LO0_64]>,
- MULT_FM<0, 0x1c>;
-def DMULTu : Mult<"dmultu", IIImult, GPR64Opnd, [HI0_64, LO0_64]>,
- MULT_FM<0, 0x1d>;
+def DMULT : Mult<"dmult", II_DMULT, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1c>, ISA_MIPS3_NOT_32R6_64R6;
+def DMULTu : Mult<"dmultu", II_DMULTU, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1d>, ISA_MIPS3_NOT_32R6_64R6;
def PseudoDMULT : MultDivPseudo<DMULT, ACC128, GPR64Opnd, MipsMult,
- IIImult>;
+ II_DMULT>, ISA_MIPS3_NOT_32R6_64R6;
def PseudoDMULTu : MultDivPseudo<DMULTu, ACC128, GPR64Opnd, MipsMultu,
- IIImult>;
-def DSDIV : Div<"ddiv", IIIdiv, GPR64Opnd, [HI0_64, LO0_64]>, MULT_FM<0, 0x1e>;
-def DUDIV : Div<"ddivu", IIIdiv, GPR64Opnd, [HI0_64, LO0_64]>, MULT_FM<0, 0x1f>;
+ II_DMULTU>, ISA_MIPS3_NOT_32R6_64R6;
+def DSDIV : Div<"ddiv", II_DDIV, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1e>, ISA_MIPS3_NOT_32R6_64R6;
+def DUDIV : Div<"ddivu", II_DDIVU, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1f>, ISA_MIPS3_NOT_32R6_64R6;
def PseudoDSDIV : MultDivPseudo<DSDIV, ACC128, GPR64Opnd, MipsDivRem,
- IIIdiv, 0, 1, 1>;
+ II_DDIV, 0, 1, 1>, ISA_MIPS3_NOT_32R6_64R6;
def PseudoDUDIV : MultDivPseudo<DUDIV, ACC128, GPR64Opnd, MipsDivRemU,
- IIIdiv, 0, 1, 1>;
+ II_DDIVU, 0, 1, 1>, ISA_MIPS3_NOT_32R6_64R6;
let isCodeGenOnly = 1 in {
-def MTHI64 : MoveToLOHI<"mthi", GPR64Opnd, [HI0_64]>, MTLO_FM<0x11>;
-def MTLO64 : MoveToLOHI<"mtlo", GPR64Opnd, [LO0_64]>, MTLO_FM<0x13>;
-def MFHI64 : MoveFromLOHI<"mfhi", GPR64Opnd, AC0_64>, MFLO_FM<0x10>;
-def MFLO64 : MoveFromLOHI<"mflo", GPR64Opnd, AC0_64>, MFLO_FM<0x12>;
-def PseudoMFHI64 : PseudoMFLOHI<GPR64, ACC128, MipsMFHI>;
-def PseudoMFLO64 : PseudoMFLOHI<GPR64, ACC128, MipsMFLO>;
-def PseudoMTLOHI64 : PseudoMTLOHI<ACC128, GPR64>;
+def MTHI64 : MoveToLOHI<"mthi", GPR64Opnd, [HI0_64]>, MTLO_FM<0x11>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def MTLO64 : MoveToLOHI<"mtlo", GPR64Opnd, [LO0_64]>, MTLO_FM<0x13>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def MFHI64 : MoveFromLOHI<"mfhi", GPR64Opnd, AC0_64>, MFLO_FM<0x10>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def MFLO64 : MoveFromLOHI<"mflo", GPR64Opnd, AC0_64>, MFLO_FM<0x12>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def PseudoMFHI64 : PseudoMFLOHI<GPR64, ACC128, MipsMFHI>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def PseudoMFLO64 : PseudoMFLOHI<GPR64, ACC128, MipsMFLO>,
+ ISA_MIPS3_NOT_32R6_64R6;
+def PseudoMTLOHI64 : PseudoMTLOHI<ACC128, GPR64>, ISA_MIPS3_NOT_32R6_64R6;
/// Sign Ext In Register Instructions.
-def SEB64 : SignExtInReg<"seb", i8, GPR64Opnd>, SEB_FM<0x10, 0x20>;
-def SEH64 : SignExtInReg<"seh", i16, GPR64Opnd>, SEB_FM<0x18, 0x20>;
+def SEB64 : SignExtInReg<"seb", i8, GPR64Opnd, II_SEB>, SEB_FM<0x10, 0x20>,
+ ISA_MIPS32R2;
+def SEH64 : SignExtInReg<"seh", i16, GPR64Opnd, II_SEH>, SEB_FM<0x18, 0x20>,
+ ISA_MIPS32R2;
}
/// Count Leading
-def DCLZ : CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>;
-def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>;
+def DCLZ : CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>, ISA_MIPS64_NOT_64R6;
+def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>, ISA_MIPS64_NOT_64R6;
/// Double Word Swap Bytes/HalfWords
-def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>;
-def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>;
+def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>, ISA_MIPS64R2;
+def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>, ISA_MIPS64R2;
def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd>, LW_FM<0x19>;
@@ -216,24 +252,122 @@ def DINSM : InsBase<"dinsm", GPR64Opnd, uimm5>, EXT_FM<5>;
let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
def DSLL64_32 : FR<0x00, 0x3c, (outs GPR64:$rd), (ins GPR32:$rt),
- "dsll\t$rd, $rt, 32", [], IIArith>;
+ "dsll\t$rd, $rt, 32", [], II_DSLL>;
def SLL64_32 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR32:$rt),
- "sll\t$rd, $rt, 0", [], IIArith>;
+ "sll\t$rd, $rt, 0", [], II_SLL>;
def SLL64_64 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR64:$rt),
- "sll\t$rd, $rt, 0", [], IIArith>;
+ "sll\t$rd, $rt, 0", [], II_SLL>;
}
+
+// We need the following pseudo instruction to avoid offset calculation for
+// long branches. See the comment in file MipsLongBranch.cpp for detailed
+// explanation.
+
+// Expands to: daddiu $dst, $src, %PART($tgt - $baltgt)
+// where %PART may be %hi or %lo, depending on the relocation kind
+// that $tgt is annotated with.
+def LONG_BRANCH_DADDiu : PseudoSE<(outs GPR64Opnd:$dst),
+ (ins GPR64Opnd:$src, brtarget:$tgt, brtarget:$baltgt), []>;
+
+// Cavium Octeon cmMIPS instructions
+let EncodingPredicates = []<Predicate>, // FIXME: The lack of HasStdEnc is probably a bug
+ AdditionalPredicates = [HasCnMips] in {
+
+class Count1s<string opstr, RegisterOperand RO>:
+ InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
+ [(set RO:$rd, (ctpop RO:$rs))], II_POP, FrmR, opstr> {
+ let TwoOperandAliasConstraint = "$rd = $rs";
+}
+
+class ExtsCins<string opstr, SDPatternOperator Op = null_frag>:
+ InstSE<(outs GPR64Opnd:$rt), (ins GPR64Opnd:$rs, uimm5:$pos, uimm5:$lenm1),
+ !strconcat(opstr, " $rt, $rs, $pos, $lenm1"),
+ [(set GPR64Opnd:$rt, (Op GPR64Opnd:$rs, imm:$pos, imm:$lenm1))],
+ NoItinerary, FrmR, opstr> {
+ let TwoOperandAliasConstraint = "$rt = $rs";
}
+
+class SetCC64_R<string opstr, PatFrag cond_op> :
+ InstSE<(outs GPR64Opnd:$rd), (ins GPR64Opnd:$rs, GPR64Opnd:$rt),
+ !strconcat(opstr, "\t$rd, $rs, $rt"),
+ [(set GPR64Opnd:$rd, (cond_op GPR64Opnd:$rs, GPR64Opnd:$rt))],
+ II_SEQ_SNE, FrmR, opstr> {
+ let TwoOperandAliasConstraint = "$rd = $rs";
+}
+
+class SetCC64_I<string opstr, PatFrag cond_op>:
+ InstSE<(outs GPR64Opnd:$rt), (ins GPR64Opnd:$rs, simm10_64:$imm10),
+ !strconcat(opstr, "\t$rt, $rs, $imm10"),
+ [(set GPR64Opnd:$rt, (cond_op GPR64Opnd:$rs, immSExt10_64:$imm10))],
+ II_SEQI_SNEI, FrmI, opstr> {
+ let TwoOperandAliasConstraint = "$rt = $rs";
+}
+
+// Unsigned Byte Add
+let Pattern = [(set GPR64Opnd:$rd,
+ (and (add GPR64Opnd:$rs, GPR64Opnd:$rt), 255))] in
+def BADDu : ArithLogicR<"baddu", GPR64Opnd, 1, II_BADDU>,
+ ADD_FM<0x1c, 0x28>;
+
+// Multiply Doubleword to GPR
+let Defs = [HI0, LO0, P0, P1, P2] in
+def DMUL : ArithLogicR<"dmul", GPR64Opnd, 1, II_DMUL, mul>,
+ ADD_FM<0x1c, 0x03>;
+
+// Extract a signed bit field /+32
+def EXTS : ExtsCins<"exts">, EXTS_FM<0x3a>;
+def EXTS32: ExtsCins<"exts32">, EXTS_FM<0x3b>;
+
+// Clear and insert a bit field /+32
+def CINS : ExtsCins<"cins">, EXTS_FM<0x32>;
+def CINS32: ExtsCins<"cins32">, EXTS_FM<0x33>;
+
+// Move to multiplier/product register
+def MTM0 : MoveToLOHI<"mtm0", GPR64Opnd, [MPL0, P0, P1, P2]>, MTMR_FM<0x08>;
+def MTM1 : MoveToLOHI<"mtm1", GPR64Opnd, [MPL1, P0, P1, P2]>, MTMR_FM<0x0c>;
+def MTM2 : MoveToLOHI<"mtm2", GPR64Opnd, [MPL2, P0, P1, P2]>, MTMR_FM<0x0d>;
+def MTP0 : MoveToLOHI<"mtp0", GPR64Opnd, [P0]>, MTMR_FM<0x09>;
+def MTP1 : MoveToLOHI<"mtp1", GPR64Opnd, [P1]>, MTMR_FM<0x0a>;
+def MTP2 : MoveToLOHI<"mtp2", GPR64Opnd, [P2]>, MTMR_FM<0x0b>;
+
+// Count Ones in a Word/Doubleword
+def POP : Count1s<"pop", GPR32Opnd>, POP_FM<0x2c>;
+def DPOP : Count1s<"dpop", GPR64Opnd>, POP_FM<0x2d>;
+
+// Set on equal/not equal
+def SEQ : SetCC64_R<"seq", seteq>, SEQ_FM<0x2a>;
+def SEQi : SetCC64_I<"seqi", seteq>, SEQI_FM<0x2e>;
+def SNE : SetCC64_R<"sne", setne>, SEQ_FM<0x2b>;
+def SNEi : SetCC64_I<"snei", setne>, SEQI_FM<0x2f>;
+
+// 192-bit x 64-bit Unsigned Multiply and Add
+let Defs = [P0, P1, P2] in
+def V3MULU: ArithLogicR<"v3mulu", GPR64Opnd, 0, II_DMUL>,
+ ADD_FM<0x1c, 0x11>;
+
+// 64-bit Unsigned Multiply and Add Move
+let Defs = [MPL0, P0, P1, P2] in
+def VMM0 : ArithLogicR<"vmm0", GPR64Opnd, 0, II_DMUL>,
+ ADD_FM<0x1c, 0x10>;
+
+// 64-bit Unsigned Multiply and Add
+let Defs = [MPL1, MPL2, P0, P1, P2] in
+def VMULU : ArithLogicR<"vmulu", GPR64Opnd, 0, II_DMUL>,
+ ADD_FM<0x1c, 0x0f>;
+
+}
+
+}
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
// extended loads
-let Predicates = [HasStdEnc] in {
- def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
- def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
- def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>;
- def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>;
-}
+def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
+def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
+def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>;
+def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>;
// hi/lo relocs
def : MipsPat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;
@@ -286,8 +420,7 @@ defm : SetgeImmPats<GPR64, SLTi64, SLTiu64>;
// truncate
def : MipsPat<(i32 (trunc GPR64:$src)),
- (SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>,
- Requires<[HasStdEnc]>;
+ (SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>;
// 32-to-64-bit extension
def : MipsPat<(i64 (anyext GPR32:$src)), (SLL64_32 GPR32:$src)>;
@@ -304,27 +437,76 @@ def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
-def : InstAlias<"move $dst, $src",
- (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"daddu $rs, $rt, $imm",
- (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
- 0>;
-def : InstAlias<"dadd $rs, $rt, $imm",
- (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
- 0>;
+def : MipsInstAlias<"move $dst, $src",
+ (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
+ GPR_64;
+def : MipsInstAlias<"daddu $rs, $rt, $imm",
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>;
+def : MipsInstAlias<"dadd $rs, $rt, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+def : MipsInstAlias<"daddu $rs, $imm",
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rs, simm16_64:$imm),
+ 0>;
+def : MipsInstAlias<"dadd $rs, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rs, simm16_64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+def : MipsInstAlias<"add $rs, $imm",
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm),
+ 0>;
+def : MipsInstAlias<"addu $rs, $imm",
+ (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm),
+ 0>;
+def : MipsInstAlias<"dsll $rd, $rt, $rs",
+ (DSLLV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
+ ISA_MIPS3;
+def : MipsInstAlias<"dsubu $rt, $rs, $imm",
+ (DADDiu GPR64Opnd:$rt, GPR64Opnd:$rs,
+ InvertedImOperand64:$imm), 0>;
+def : MipsInstAlias<"dsubi $rs, $rt, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt,
+ InvertedImOperand64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+def : MipsInstAlias<"dsubi $rs, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rs,
+ InvertedImOperand64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+def : MipsInstAlias<"dsub $rs, $rt, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt,
+ InvertedImOperand64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+def : MipsInstAlias<"dsub $rs, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rs,
+ InvertedImOperand64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+def : MipsInstAlias<"dsubu $rs, $imm",
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rs,
+ InvertedImOperand64:$imm),
+ 0>;
+def : MipsInstAlias<"dsra $rd, $rt, $rs",
+ (DSRAV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
+ ISA_MIPS3;
+def : MipsInstAlias<"dsrl $rd, $rt, $rs",
+ (DSRLV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
+ ISA_MIPS3;
+
+class LoadImm64< string instr_asm, Operand Od, RegisterOperand RO> :
+ MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm64),
+ !strconcat(instr_asm, "\t$rt, $imm64")> ;
+def LoadImm64Reg : LoadImm64<"dli", imm64, GPR64Opnd>;
/// Move between CPU and coprocessor registers
let DecoderNamespace = "Mips64", Predicates = [HasMips64] in {
def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd>, MFC3OP_FM<0x10, 1>;
-def DMTC0 : MFC3OP<"dmtc0", GPR64Opnd>, MFC3OP_FM<0x10, 5>;
-def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd>, MFC3OP_FM<0x12, 1>;
-def DMTC2 : MFC3OP<"dmtc2", GPR64Opnd>, MFC3OP_FM<0x12, 5>;
+def DMTC0 : MFC3OP<"dmtc0", GPR64Opnd>, MFC3OP_FM<0x10, 5>, ISA_MIPS3;
+def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd>, MFC3OP_FM<0x12, 1>, ISA_MIPS3;
+def DMTC2 : MFC3OP<"dmtc2", GPR64Opnd>, MFC3OP_FM<0x12, 5>, ISA_MIPS3;
}
// Two operand (implicit 0 selector) versions:
-def : InstAlias<"dmfc0 $rt, $rd", (DMFC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
-def : InstAlias<"dmtc0 $rt, $rd", (DMTC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
-def : InstAlias<"dmfc2 $rt, $rd", (DMFC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
-def : InstAlias<"dmtc2 $rt, $rd", (DMTC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"dmfc0 $rt, $rd", (DMFC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"dmtc0 $rt, $rd", (DMTC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"dmfc2 $rt, $rd", (DMFC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"dmtc2 $rt, $rd", (DMTC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
diff --git a/contrib/llvm/lib/Target/Mips/Mips64r6InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips64r6InstrInfo.td
new file mode 100644
index 0000000..6b546e8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/Mips64r6InstrInfo.td
@@ -0,0 +1,217 @@
+//=- Mips64r6InstrInfo.td - Mips64r6 Instruction Information -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips64r6 instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// Notes about removals/changes from MIPS32r6:
+// Reencoded: dclo, dclz
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Encodings
+//
+//===----------------------------------------------------------------------===//
+
+class DALIGN_ENC : SPECIAL3_DALIGN_FM<OPCODE6_DALIGN>;
+class DAUI_ENC : DAUI_FM;
+class DAHI_ENC : REGIMM_FM<OPCODE5_DAHI>;
+class DATI_ENC : REGIMM_FM<OPCODE5_DATI>;
+class DBITSWAP_ENC : SPECIAL3_2R_FM<OPCODE6_DBITSWAP>;
+class DCLO_R6_ENC : SPECIAL_2R_FM<OPCODE6_DCLO>;
+class DCLZ_R6_ENC : SPECIAL_2R_FM<OPCODE6_DCLZ>;
+class DDIV_ENC : SPECIAL_3R_FM<0b00010, 0b011110>;
+class DDIVU_ENC : SPECIAL_3R_FM<0b00010, 0b011111>;
+class DLSA_R6_ENC : SPECIAL_LSA_FM<OPCODE6_DLSA>;
+class DMOD_ENC : SPECIAL_3R_FM<0b00011, 0b011110>;
+class DMODU_ENC : SPECIAL_3R_FM<0b00011, 0b011111>;
+class DMUH_ENC : SPECIAL_3R_FM<0b00011, 0b011100>;
+class DMUHU_ENC : SPECIAL_3R_FM<0b00011, 0b011101>;
+class DMUL_R6_ENC : SPECIAL_3R_FM<0b00010, 0b011100>;
+class DMULU_ENC : SPECIAL_3R_FM<0b00010, 0b011101>;
+class LDPC_ENC : PCREL18_FM<OPCODE3_LDPC>;
+class LLD_R6_ENC : SPECIAL3_LL_SC_FM<OPCODE6_LLD>;
+class SCD_R6_ENC : SPECIAL3_LL_SC_FM<OPCODE6_SCD>;
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Descriptions
+//
+//===----------------------------------------------------------------------===//
+
+class AHI_ATI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rs);
+ dag InOperandList = (ins GPROpnd:$rt, simm16:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $imm");
+ string Constraints = "$rs = $rt";
+}
+
+class DALIGN_DESC : ALIGN_DESC_BASE<"dalign", GPR64Opnd, uimm3>;
+class DAHI_DESC : AHI_ATI_DESC_BASE<"dahi", GPR64Opnd>;
+class DATI_DESC : AHI_ATI_DESC_BASE<"dati", GPR64Opnd>;
+class DAUI_DESC : AUI_DESC_BASE<"daui", GPR64Opnd>;
+class DBITSWAP_DESC : BITSWAP_DESC_BASE<"dbitswap", GPR64Opnd>;
+class DCLO_R6_DESC : CLO_R6_DESC_BASE<"dclo", GPR64Opnd>;
+class DCLZ_R6_DESC : CLZ_R6_DESC_BASE<"dclz", GPR64Opnd>;
+class DDIV_DESC : DIVMOD_DESC_BASE<"ddiv", GPR64Opnd, sdiv>;
+class DDIVU_DESC : DIVMOD_DESC_BASE<"ddivu", GPR64Opnd, udiv>;
+class DLSA_R6_DESC : LSA_R6_DESC_BASE<"dlsa", GPR64Opnd, uimm2>;
+class DMOD_DESC : DIVMOD_DESC_BASE<"dmod", GPR64Opnd, srem>;
+class DMODU_DESC : DIVMOD_DESC_BASE<"dmodu", GPR64Opnd, urem>;
+class DMUH_DESC : MUL_R6_DESC_BASE<"dmuh", GPR64Opnd, mulhs>;
+class DMUHU_DESC : MUL_R6_DESC_BASE<"dmuhu", GPR64Opnd, mulhu>;
+class DMUL_R6_DESC : MUL_R6_DESC_BASE<"dmul", GPR64Opnd, mul>;
+class DMULU_DESC : MUL_R6_DESC_BASE<"dmulu", GPR64Opnd>;
+class LDPC_DESC : PCREL_DESC_BASE<"ldpc", GPR64Opnd, simm18_lsl3>;
+class LLD_R6_DESC : LL_R6_DESC_BASE<"lld", GPR64Opnd>;
+class SCD_R6_DESC : SC_R6_DESC_BASE<"scd", GPR64Opnd>;
+class SELEQZ64_DESC : SELEQNE_Z_DESC_BASE<"seleqz", GPR64Opnd>;
+class SELNEZ64_DESC : SELEQNE_Z_DESC_BASE<"selnez", GPR64Opnd>;
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Definitions
+//
+//===----------------------------------------------------------------------===//
+
+def DAHI : DAHI_ENC, DAHI_DESC, ISA_MIPS64R6;
+def DALIGN : DALIGN_ENC, DALIGN_DESC, ISA_MIPS64R6;
+def DATI : DATI_ENC, DATI_DESC, ISA_MIPS64R6;
+def DAUI : DAUI_ENC, DAUI_DESC, ISA_MIPS64R6;
+def DBITSWAP : DBITSWAP_ENC, DBITSWAP_DESC, ISA_MIPS64R6;
+def DCLO_R6 : DCLO_R6_ENC, DCLO_R6_DESC, ISA_MIPS64R6;
+def DCLZ_R6 : DCLZ_R6_ENC, DCLZ_R6_DESC, ISA_MIPS64R6;
+def DDIV : DDIV_ENC, DDIV_DESC, ISA_MIPS64R6;
+def DDIVU : DDIVU_ENC, DDIVU_DESC, ISA_MIPS64R6;
+def DLSA_R6 : DLSA_R6_ENC, DLSA_R6_DESC, ISA_MIPS64R6;
+def DMOD : DMOD_ENC, DMOD_DESC, ISA_MIPS64R6;
+def DMODU : DMODU_ENC, DMODU_DESC, ISA_MIPS64R6;
+def DMUH: DMUH_ENC, DMUH_DESC, ISA_MIPS64R6;
+def DMUHU: DMUHU_ENC, DMUHU_DESC, ISA_MIPS64R6;
+def DMUL_R6: DMUL_R6_ENC, DMUL_R6_DESC, ISA_MIPS64R6;
+def DMULU: DMULU_ENC, DMULU_DESC, ISA_MIPS64R6;
+def LDPC: LDPC_ENC, LDPC_DESC, ISA_MIPS64R6;
+def LLD_R6 : LLD_R6_ENC, LLD_R6_DESC, ISA_MIPS32R6;
+def SCD_R6 : SCD_R6_ENC, SCD_R6_DESC, ISA_MIPS32R6;
+let DecoderNamespace = "Mips32r6_64r6_GP64" in {
+ def SELEQZ64 : SELEQZ_ENC, SELEQZ64_DESC, ISA_MIPS32R6, GPR_64;
+ def SELNEZ64 : SELNEZ_ENC, SELNEZ64_DESC, ISA_MIPS32R6, GPR_64;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction Aliases
+//
+//===----------------------------------------------------------------------===//
+
+def : MipsInstAlias<"jr $rs", (JALR64 ZERO_64, GPR64Opnd:$rs), 1>, ISA_MIPS64R6;
+
+//===----------------------------------------------------------------------===//
+//
+// Patterns and Pseudo Instructions
+//
+//===----------------------------------------------------------------------===//
+
+// i64 selects
+def : MipsPat<(select i64:$cond, i64:$t, i64:$f),
+ (OR64 (SELNEZ64 i64:$t, i64:$cond),
+ (SELEQZ64 i64:$f, i64:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i64:$cond, immz)), i64:$t, i64:$f),
+ (OR64 (SELEQZ64 i64:$t, i64:$cond),
+ (SELNEZ64 i64:$f, i64:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i64:$cond, immz)), i64:$t, i64:$f),
+ (OR64 (SELNEZ64 i64:$t, i64:$cond),
+ (SELEQZ64 i64:$f, i64:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i64:$cond, immZExt16_64:$imm)), i64:$t, i64:$f),
+ (OR64 (SELEQZ64 i64:$t, (XORi64 i64:$cond, immZExt16_64:$imm)),
+ (SELNEZ64 i64:$f, (XORi64 i64:$cond, immZExt16_64:$imm)))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i64:$cond, immZExt16_64:$imm)), i64:$t, i64:$f),
+ (OR64 (SELNEZ64 i64:$t, (XORi64 i64:$cond, immZExt16_64:$imm)),
+ (SELEQZ64 i64:$f, (XORi64 i64:$cond, immZExt16_64:$imm)))>,
+ ISA_MIPS64R6;
+def : MipsPat<
+ (select (i32 (setgt i64:$cond, immSExt16Plus1:$imm)), i64:$t, i64:$f),
+ (OR64 (SELEQZ64 i64:$t,
+ (SUBREG_TO_REG (i64 0), (SLTi64 i64:$cond, (Plus1 imm:$imm)),
+ sub_32)),
+ (SELNEZ64 i64:$f,
+ (SUBREG_TO_REG (i64 0), (SLTi64 i64:$cond, (Plus1 imm:$imm)),
+ sub_32)))>,
+ ISA_MIPS64R6;
+def : MipsPat<
+ (select (i32 (setugt i64:$cond, immSExt16Plus1:$imm)), i64:$t, i64:$f),
+ (OR64 (SELEQZ64 i64:$t,
+ (SUBREG_TO_REG (i64 0), (SLTiu64 i64:$cond, (Plus1 imm:$imm)),
+ sub_32)),
+ (SELNEZ64 i64:$f,
+ (SUBREG_TO_REG (i64 0), (SLTiu64 i64:$cond, (Plus1 imm:$imm)),
+ sub_32)))>,
+ ISA_MIPS64R6;
+
+def : MipsPat<(select (i32 (setne i64:$cond, immz)), i64:$t, immz),
+ (SELNEZ64 i64:$t, i64:$cond)>, ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i64:$cond, immz)), i64:$t, immz),
+ (SELEQZ64 i64:$t, i64:$cond)>, ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i64:$cond, immz)), immz, i64:$f),
+ (SELEQZ64 i64:$f, i64:$cond)>, ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i64:$cond, immz)), immz, i64:$f),
+ (SELNEZ64 i64:$f, i64:$cond)>, ISA_MIPS64R6;
+
+// i64 selects from an i32 comparison
+// One complicating factor here is that bits 32-63 of an i32 are undefined.
+// FIXME: Ideally, setcc would always produce an i64 on MIPS64 targets.
+// This would allow us to remove the sign-extensions here.
+def : MipsPat<(select i32:$cond, i64:$t, i64:$f),
+ (OR64 (SELNEZ64 i64:$t, (SLL64_32 i32:$cond)),
+ (SELEQZ64 i64:$f, (SLL64_32 i32:$cond)))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immz)), i64:$t, i64:$f),
+ (OR64 (SELEQZ64 i64:$t, (SLL64_32 i32:$cond)),
+ (SELNEZ64 i64:$f, (SLL64_32 i32:$cond)))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immz)), i64:$t, i64:$f),
+ (OR64 (SELNEZ64 i64:$t, (SLL64_32 i32:$cond)),
+ (SELEQZ64 i64:$f, (SLL64_32 i32:$cond)))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immZExt16:$imm)), i64:$t, i64:$f),
+ (OR64 (SELEQZ64 i64:$t, (SLL64_32 (XORi i32:$cond,
+ immZExt16:$imm))),
+ (SELNEZ64 i64:$f, (SLL64_32 (XORi i32:$cond,
+ immZExt16:$imm))))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immZExt16:$imm)), i64:$t, i64:$f),
+ (OR64 (SELNEZ64 i64:$t, (SLL64_32 (XORi i32:$cond,
+ immZExt16:$imm))),
+ (SELEQZ64 i64:$f, (SLL64_32 (XORi i32:$cond,
+ immZExt16:$imm))))>,
+ ISA_MIPS64R6;
+
+def : MipsPat<(select i32:$cond, i64:$t, immz),
+ (SELNEZ64 i64:$t, (SLL64_32 i32:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immz)), i64:$t, immz),
+ (SELNEZ64 i64:$t, (SLL64_32 i32:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immz)), i64:$t, immz),
+ (SELEQZ64 i64:$t, (SLL64_32 i32:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select i32:$cond, immz, i64:$f),
+ (SELEQZ64 i64:$f, (SLL64_32 i32:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (setne i32:$cond, immz)), immz, i64:$f),
+ (SELEQZ64 i64:$f, (SLL64_32 i32:$cond))>,
+ ISA_MIPS64R6;
+def : MipsPat<(select (i32 (seteq i32:$cond, immz)), immz, i64:$f),
+ (SELNEZ64 i64:$f, (SLL64_32 i32:$cond))>,
+ ISA_MIPS64R6;
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 45c4398..7f21d68 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-asm-printer"
#include "InstPrinter/MipsInstPrinter.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsMCNaCl.h"
#include "Mips.h"
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
@@ -27,35 +27,61 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
+#include <string>
using namespace llvm;
+#define DEBUG_TYPE "mips-asm-printer"
+
MipsTargetStreamer &MipsAsmPrinter::getTargetStreamer() {
- return static_cast<MipsTargetStreamer &>(OutStreamer.getTargetStreamer());
+ return static_cast<MipsTargetStreamer &>(*OutStreamer.getTargetStreamer());
}
bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &TM.getSubtarget<MipsSubtarget>();
+
// Initialize TargetLoweringObjectFile.
- if (Subtarget->allowMixed16_32())
- const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
+ const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
.Initialize(OutContext, TM);
+
MipsFI = MF.getInfo<MipsFunctionInfo>();
+ if (Subtarget->inMips16Mode())
+ for (std::map<
+ const char *,
+ const llvm::Mips16HardFloatInfo::FuncSignature *>::const_iterator
+ it = MipsFI->StubsNeeded.begin();
+ it != MipsFI->StubsNeeded.end(); ++it) {
+ const char *Symbol = it->first;
+ const llvm::Mips16HardFloatInfo::FuncSignature *Signature = it->second;
+ if (StubsNeeded.find(Symbol) == StubsNeeded.end())
+ StubsNeeded[Symbol] = Signature;
+ }
MCP = MF.getConstantPool();
+
+ // In NaCl, all indirect jump targets must be aligned to bundle size.
+ if (Subtarget->isTargetNaCl())
+ NaClAlignIndirectJumpTargets(MF);
+
AsmPrinter::runOnMachineFunction(MF);
return true;
}
@@ -67,7 +93,46 @@ bool MipsAsmPrinter::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) {
#include "MipsGenMCPseudoLowering.inc"
+// Lower PseudoReturn/PseudoIndirectBranch/PseudoIndirectBranch64 to JR, JR_MM,
+// JALR, or JALR64 as appropriate for the target
+void MipsAsmPrinter::emitPseudoIndirectBranch(MCStreamer &OutStreamer,
+ const MachineInstr *MI) {
+ bool HasLinkReg = false;
+ MCInst TmpInst0;
+
+ if (Subtarget->hasMips64r6()) {
+ // MIPS64r6 should use (JALR64 ZERO_64, $rs)
+ TmpInst0.setOpcode(Mips::JALR64);
+ HasLinkReg = true;
+ } else if (Subtarget->hasMips32r6()) {
+ // MIPS32r6 should use (JALR ZERO, $rs)
+ TmpInst0.setOpcode(Mips::JALR);
+ HasLinkReg = true;
+ } else if (Subtarget->inMicroMipsMode())
+ // microMIPS should use (JR_MM $rs)
+ TmpInst0.setOpcode(Mips::JR_MM);
+ else {
+ // Everything else should use (JR $rs)
+ TmpInst0.setOpcode(Mips::JR);
+ }
+
+ MCOperand MCOp;
+
+ if (HasLinkReg) {
+ unsigned ZeroReg = Subtarget->isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
+ TmpInst0.addOperand(MCOperand::CreateReg(ZeroReg));
+ }
+
+ lowerOperand(MI->getOperand(0), MCOp);
+ TmpInst0.addOperand(MCOp);
+
+ EmitToStreamer(OutStreamer, TmpInst0);
+}
+
void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ MipsTargetStreamer &TS = getTargetStreamer();
+ TS.setCanHaveModuleDir(false);
+
if (MI->isDebugValue()) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
@@ -117,6 +182,14 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (emitPseudoExpansionLowering(OutStreamer, &*I))
continue;
+ if (I->getOpcode() == Mips::PseudoReturn ||
+ I->getOpcode() == Mips::PseudoReturn64 ||
+ I->getOpcode() == Mips::PseudoIndirectBranch ||
+ I->getOpcode() == Mips::PseudoIndirectBranch64) {
+ emitPseudoIndirectBranch(OutStreamer, &*I);
+ continue;
+ }
+
// The inMips16Mode() test is not permanent.
// Some instructions are marked as pseudo right now which
// would make the test fail for the wrong reason but
@@ -124,12 +197,13 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// removing another test for this situation downstream in the
// callchain.
//
- if (I->isPseudo() && !Subtarget->inMips16Mode())
+ if (I->isPseudo() && !Subtarget->inMips16Mode()
+ && !isLongBranchPseudo(I->getOpcode()))
llvm_unreachable("Pseudo opcode found in EmitInstruction()");
MCInst TmpInst0;
MCInstLowering.Lower(I, TmpInst0);
- OutStreamer.EmitInstruction(TmpInst0);
+ EmitToStreamer(OutStreamer, TmpInst0);
} while ((++I != E) && I->isInsideBundle()); // Delay slot check
}
@@ -170,7 +244,7 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Create a bitmask with all callee saved registers for CPU or Floating Point
// registers. For CPU registers consider RA, GP and FP for saving if necessary.
-void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
+void MipsAsmPrinter::printSavedRegsBitmask() {
// CPU and FPU Saved Registers Bitmasks
unsigned CPUBitmask = 0, FPUBitmask = 0;
int CPUTopSavedRegOff, FPUTopSavedRegOff;
@@ -218,20 +292,12 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
// CPU Regs are saved below FP Regs.
CPUTopSavedRegOff = CPUBitmask ? -CSFPRegsSize - CPURegSize : 0;
+ MipsTargetStreamer &TS = getTargetStreamer();
// Print CPUBitmask
- O << "\t.mask \t"; printHex32(CPUBitmask, O);
- O << ',' << CPUTopSavedRegOff << '\n';
+ TS.emitMask(CPUBitmask, CPUTopSavedRegOff);
// Print FPUBitmask
- O << "\t.fmask\t"; printHex32(FPUBitmask, O);
- O << "," << FPUTopSavedRegOff << '\n';
-}
-
-// Print a 32 bit hex number with all numbers.
-void MipsAsmPrinter::printHex32(unsigned Value, raw_ostream &O) {
- O << "0x";
- for (int i = 7; i >= 0; i--)
- O.write_hex((Value & (0xF << (i*4))) >> (i*4));
+ TS.emitFMask(FPUBitmask, FPUTopSavedRegOff);
}
//===----------------------------------------------------------------------===//
@@ -246,11 +312,7 @@ void MipsAsmPrinter::emitFrameDirective() {
unsigned returnReg = RI.getRARegister();
unsigned stackSize = MF->getFrameInfo()->getStackSize();
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText("\t.frame\t$" +
- StringRef(MipsInstPrinter::getRegisterName(stackReg)).lower() +
- "," + Twine(stackSize) + ",$" +
- StringRef(MipsInstPrinter::getRegisterName(returnReg)).lower());
+ getTargetStreamer().emitFrame(stackReg, stackSize, returnReg);
}
/// Emit Set directives.
@@ -265,25 +327,32 @@ const char *MipsAsmPrinter::getCurrentABIString() const {
}
void MipsAsmPrinter::EmitFunctionEntryLabel() {
- if (OutStreamer.hasRawTextSupport()) {
- if (Subtarget->inMips16Mode())
- OutStreamer.EmitRawText(StringRef("\t.set\tmips16"));
- else
- OutStreamer.EmitRawText(StringRef("\t.set\tnomips16"));
- // leave out until FSF available gas has micromips changes
- // OutStreamer.EmitRawText(StringRef("\t.set\tnomicromips"));
- OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
- }
+ MipsTargetStreamer &TS = getTargetStreamer();
+
+ // NaCl sandboxing requires that indirect call instructions are masked.
+ // This means that function entry points should be bundle-aligned.
+ if (Subtarget->isTargetNaCl())
+ EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN));
if (Subtarget->inMicroMipsMode())
- getTargetStreamer().emitMipsHackSTOCG(CurrentFnSym,
- (unsigned)ELF::STO_MIPS_MICROMIPS);
+ TS.emitDirectiveSetMicroMips();
+ else
+ TS.emitDirectiveSetNoMicroMips();
+
+ if (Subtarget->inMips16Mode())
+ TS.emitDirectiveSetMips16();
+ else
+ TS.emitDirectiveSetNoMips16();
+
+ TS.emitDirectiveEnt(*CurrentFnSym);
OutStreamer.EmitLabel(CurrentFnSym);
}
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
/// the first basic block in the function.
void MipsAsmPrinter::EmitFunctionBodyStart() {
+ MipsTargetStreamer &TS = getTargetStreamer();
+
MCInstLowering.Initialize(&MF->getContext());
bool IsNakedFunction =
@@ -293,34 +362,30 @@ void MipsAsmPrinter::EmitFunctionBodyStart() {
if (!IsNakedFunction)
emitFrameDirective();
- if (OutStreamer.hasRawTextSupport()) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- if (!IsNakedFunction)
- printSavedRegsBitmask(OS);
- OutStreamer.EmitRawText(OS.str());
- if (!Subtarget->inMips16Mode()) {
- OutStreamer.EmitRawText(StringRef("\t.set\tnoreorder"));
- OutStreamer.EmitRawText(StringRef("\t.set\tnomacro"));
- OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
- }
+ if (!IsNakedFunction)
+ printSavedRegsBitmask();
+
+ if (!Subtarget->inMips16Mode()) {
+ TS.emitDirectiveSetNoReorder();
+ TS.emitDirectiveSetNoMacro();
+ TS.emitDirectiveSetNoAt();
}
}
/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
/// the last basic block in the function.
void MipsAsmPrinter::EmitFunctionBodyEnd() {
+ MipsTargetStreamer &TS = getTargetStreamer();
+
// There are instruction for this macros, but they must
// always be at the function end, and we can't emit and
// break with BB logic.
- if (OutStreamer.hasRawTextSupport()) {
- if (!Subtarget->inMips16Mode()) {
- OutStreamer.EmitRawText(StringRef("\t.set\tat"));
- OutStreamer.EmitRawText(StringRef("\t.set\tmacro"));
- OutStreamer.EmitRawText(StringRef("\t.set\treorder"));
- }
- OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
+ if (!Subtarget->inMips16Mode()) {
+ TS.emitDirectiveSetAt();
+ TS.emitDirectiveSetMacro();
+ TS.emitDirectiveSetReorder();
}
+ TS.emitDirectiveEnd(CurrentFnSym->getName());
// Make sure to terminate any constant pools that were at the end
// of the function.
if (!InConstantPool)
@@ -495,6 +560,7 @@ bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand(opNum);
bool closeP = false;
@@ -542,17 +608,8 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
break;
}
- case MachineOperand::MO_ExternalSymbol:
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- break;
-
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
- break;
-
case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI"
+ O << DL->getPrivateGlobalPrefix() << "CPI"
<< getFunctionNumber() << "_" << MO.getIndex();
if (MO.getOffset())
O << "+" << MO.getOffset();
@@ -612,86 +669,329 @@ printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
}
void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
- // FIXME: Use SwitchSection.
-
// TODO: Need to add -mabicalls and -mno-abicalls flags.
// Currently we assume that -mabicalls is the default.
- if (OutStreamer.hasRawTextSupport()) {
- OutStreamer.EmitRawText(StringRef("\t.abicalls"));
- Reloc::Model RM = Subtarget->getRelocationModel();
- if (RM == Reloc::Static && !Subtarget->hasMips64())
- OutStreamer.EmitRawText(StringRef("\t.option\tpic0"));
+ bool IsABICalls = true;
+ if (IsABICalls) {
+ getTargetStreamer().emitDirectiveAbiCalls();
+ Reloc::Model RM = TM.getRelocationModel();
+ // FIXME: This condition should be a lot more complicated that it is here.
+ // Ideally it should test for properties of the ABI and not the ABI
+ // itself.
+ // For the moment, I'm only correcting enough to make MIPS-IV work.
+ if (RM == Reloc::Static && !Subtarget->isABI_N64())
+ getTargetStreamer().emitDirectiveOptionPic0();
}
// Tell the assembler which ABI we are using
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText("\t.section .mdebug." +
- Twine(getCurrentABIString()));
+ std::string SectionName = std::string(".mdebug.") + getCurrentABIString();
+ OutStreamer.SwitchSection(OutContext.getELFSection(
+ SectionName, ELF::SHT_PROGBITS, 0, SectionKind::getDataRel()));
+
+ // NaN: At the moment we only support:
+ // 1. .nan legacy (default)
+ // 2. .nan 2008
+ Subtarget->isNaN2008() ? getTargetStreamer().emitDirectiveNaN2008()
+ : getTargetStreamer().emitDirectiveNaNLegacy();
// TODO: handle O64 ABI
- if (OutStreamer.hasRawTextSupport()) {
- if (Subtarget->isABI_EABI()) {
- if (Subtarget->isGP32bit())
- OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long32"));
- else
- OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long64"));
- }
+
+ if (Subtarget->isABI_EABI()) {
+ if (Subtarget->isGP32bit())
+ OutStreamer.SwitchSection(
+ OutContext.getELFSection(".gcc_compiled_long32", ELF::SHT_PROGBITS, 0,
+ SectionKind::getDataRel()));
+ else
+ OutStreamer.SwitchSection(
+ OutContext.getELFSection(".gcc_compiled_long64", ELF::SHT_PROGBITS, 0,
+ SectionKind::getDataRel()));
}
- // return to previous section
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText(StringRef("\t.previous"));
+ getTargetStreamer().updateABIInfo(*Subtarget);
+
+ // We should always emit a '.module fp=...' but binutils 2.24 does not accept
+ // it. We therefore emit it when it contradicts the ABI defaults (-mfpxx or
+ // -mfp64) and omit it otherwise.
+ if (Subtarget->isABI_O32() && (Subtarget->isABI_FPXX() ||
+ Subtarget->isFP64bit()))
+ getTargetStreamer().emitDirectiveModuleFP();
+
+ // We should always emit a '.module [no]oddspreg' but binutils 2.24 does not
+ // accept it. We therefore emit it when it contradicts the default or an
+ // option has changed the default (i.e. FPXX) and omit it otherwise.
+ if (Subtarget->isABI_O32() && (!Subtarget->useOddSPReg() ||
+ Subtarget->isABI_FPXX()))
+ getTargetStreamer().emitDirectiveModuleOddSPReg(Subtarget->useOddSPReg(),
+ Subtarget->isABI_O32());
+}
+void MipsAsmPrinter::EmitJal(MCSymbol *Symbol) {
+ MCInst I;
+ I.setOpcode(Mips::JAL);
+ I.addOperand(
+ MCOperand::CreateExpr(MCSymbolRefExpr::Create(Symbol, OutContext)));
+ OutStreamer.EmitInstruction(I, getSubtargetInfo());
}
-static void emitELFHeaderFlagsCG(MipsTargetStreamer &TargetStreamer,
- const MipsSubtarget &Subtarget) {
- // Update e_header flags
- unsigned EFlags = 0;
+void MipsAsmPrinter::EmitInstrReg(unsigned Opcode, unsigned Reg) {
+ MCInst I;
+ I.setOpcode(Opcode);
+ I.addOperand(MCOperand::CreateReg(Reg));
+ OutStreamer.EmitInstruction(I, getSubtargetInfo());
+}
- // TODO: Need to add -mabicalls and -mno-abicalls flags.
- // Currently we assume that -mabicalls is the default.
- EFlags |= ELF::EF_MIPS_CPIC;
+void MipsAsmPrinter::EmitInstrRegReg(unsigned Opcode, unsigned Reg1,
+ unsigned Reg2) {
+ MCInst I;
+ //
+ // Because of the current td files for Mips32, the operands for MTC1
+ // appear backwards from their normal assembly order. It's not a trivial
+ // change to fix this in the td file so we adjust for it here.
+ //
+ if (Opcode == Mips::MTC1) {
+ unsigned Temp = Reg1;
+ Reg1 = Reg2;
+ Reg2 = Temp;
+ }
+ I.setOpcode(Opcode);
+ I.addOperand(MCOperand::CreateReg(Reg1));
+ I.addOperand(MCOperand::CreateReg(Reg2));
+ OutStreamer.EmitInstruction(I, getSubtargetInfo());
+}
- if (Subtarget.inMips16Mode())
- EFlags |= ELF::EF_MIPS_ARCH_ASE_M16;
- else
- EFlags |= ELF::EF_MIPS_NOREORDER;
-
- // Architecture
- if (Subtarget.hasMips64r2())
- EFlags |= ELF::EF_MIPS_ARCH_64R2;
- else if (Subtarget.hasMips64())
- EFlags |= ELF::EF_MIPS_ARCH_64;
- else if (Subtarget.hasMips32r2())
- EFlags |= ELF::EF_MIPS_ARCH_32R2;
- else
- EFlags |= ELF::EF_MIPS_ARCH_32;
+void MipsAsmPrinter::EmitInstrRegRegReg(unsigned Opcode, unsigned Reg1,
+ unsigned Reg2, unsigned Reg3) {
+ MCInst I;
+ I.setOpcode(Opcode);
+ I.addOperand(MCOperand::CreateReg(Reg1));
+ I.addOperand(MCOperand::CreateReg(Reg2));
+ I.addOperand(MCOperand::CreateReg(Reg3));
+ OutStreamer.EmitInstruction(I, getSubtargetInfo());
+}
- if (Subtarget.inMicroMipsMode())
- EFlags |= ELF::EF_MIPS_MICROMIPS;
+void MipsAsmPrinter::EmitMovFPIntPair(unsigned MovOpc, unsigned Reg1,
+ unsigned Reg2, unsigned FPReg1,
+ unsigned FPReg2, bool LE) {
+ if (!LE) {
+ unsigned temp = Reg1;
+ Reg1 = Reg2;
+ Reg2 = temp;
+ }
+ EmitInstrRegReg(MovOpc, Reg1, FPReg1);
+ EmitInstrRegReg(MovOpc, Reg2, FPReg2);
+}
- // ABI
- if (Subtarget.isABI_O32())
- EFlags |= ELF::EF_MIPS_ABI_O32;
+void MipsAsmPrinter::EmitSwapFPIntParams(Mips16HardFloatInfo::FPParamVariant PV,
+ bool LE, bool ToFP) {
+ using namespace Mips16HardFloatInfo;
+ unsigned MovOpc = ToFP ? Mips::MTC1 : Mips::MFC1;
+ switch (PV) {
+ case FSig:
+ EmitInstrRegReg(MovOpc, Mips::A0, Mips::F12);
+ break;
+ case FFSig:
+ EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F14, LE);
+ break;
+ case FDSig:
+ EmitInstrRegReg(MovOpc, Mips::A0, Mips::F12);
+ EmitMovFPIntPair(MovOpc, Mips::A2, Mips::A3, Mips::F14, Mips::F15, LE);
+ break;
+ case DSig:
+ EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
+ break;
+ case DDSig:
+ EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
+ EmitMovFPIntPair(MovOpc, Mips::A2, Mips::A3, Mips::F14, Mips::F15, LE);
+ break;
+ case DFSig:
+ EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F12, Mips::F13, LE);
+ EmitInstrRegReg(MovOpc, Mips::A2, Mips::F14);
+ break;
+ case NoSig:
+ return;
+ }
+}
- // Relocation Model
- Reloc::Model RM = Subtarget.getRelocationModel();
- if (RM == Reloc::PIC_ || RM == Reloc::Default)
- EFlags |= ELF::EF_MIPS_PIC;
- else if (RM == Reloc::Static)
- ; // Do nothing for Reloc::Static
- else
- llvm_unreachable("Unsupported relocation model for e_flags");
+void
+MipsAsmPrinter::EmitSwapFPIntRetval(Mips16HardFloatInfo::FPReturnVariant RV,
+ bool LE) {
+ using namespace Mips16HardFloatInfo;
+ unsigned MovOpc = Mips::MFC1;
+ switch (RV) {
+ case FRet:
+ EmitInstrRegReg(MovOpc, Mips::V0, Mips::F0);
+ break;
+ case DRet:
+ EmitMovFPIntPair(MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
+ break;
+ case CFRet:
+ EmitMovFPIntPair(MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
+ break;
+ case CDRet:
+ EmitMovFPIntPair(MovOpc, Mips::V0, Mips::V1, Mips::F0, Mips::F1, LE);
+ EmitMovFPIntPair(MovOpc, Mips::A0, Mips::A1, Mips::F2, Mips::F3, LE);
+ break;
+ case NoFPRet:
+ break;
+ }
+}
- TargetStreamer.emitMipsHackELFFlags(EFlags);
+void MipsAsmPrinter::EmitFPCallStub(
+ const char *Symbol, const Mips16HardFloatInfo::FuncSignature *Signature) {
+ MCSymbol *MSymbol = OutContext.GetOrCreateSymbol(StringRef(Symbol));
+ using namespace Mips16HardFloatInfo;
+ bool LE = Subtarget->isLittle();
+ //
+ // .global xxxx
+ //
+ OutStreamer.EmitSymbolAttribute(MSymbol, MCSA_Global);
+ const char *RetType;
+ //
+ // make the comment field identifying the return and parameter
+ // types of the floating point stub
+ // # Stub function to call rettype xxxx (params)
+ //
+ switch (Signature->RetSig) {
+ case FRet:
+ RetType = "float";
+ break;
+ case DRet:
+ RetType = "double";
+ break;
+ case CFRet:
+ RetType = "complex";
+ break;
+ case CDRet:
+ RetType = "double complex";
+ break;
+ case NoFPRet:
+ RetType = "";
+ break;
+ }
+ const char *Parms;
+ switch (Signature->ParamSig) {
+ case FSig:
+ Parms = "float";
+ break;
+ case FFSig:
+ Parms = "float, float";
+ break;
+ case FDSig:
+ Parms = "float, double";
+ break;
+ case DSig:
+ Parms = "double";
+ break;
+ case DDSig:
+ Parms = "double, double";
+ break;
+ case DFSig:
+ Parms = "double, float";
+ break;
+ case NoSig:
+ Parms = "";
+ break;
+ }
+ OutStreamer.AddComment("\t# Stub function to call " + Twine(RetType) + " " +
+ Twine(Symbol) + " (" + Twine(Parms) + ")");
+ //
+ // probably not necessary but we save and restore the current section state
+ //
+ OutStreamer.PushSection();
+ //
+ // .section mips16.call.fpxxxx,"ax",@progbits
+ //
+ const MCSectionELF *M = OutContext.getELFSection(
+ ".mips16.call.fp." + std::string(Symbol), ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_EXECINSTR, SectionKind::getText());
+ OutStreamer.SwitchSection(M, nullptr);
+ //
+ // .align 2
+ //
+ OutStreamer.EmitValueToAlignment(4);
+ MipsTargetStreamer &TS = getTargetStreamer();
+ //
+ // .set nomips16
+ // .set nomicromips
+ //
+ TS.emitDirectiveSetNoMips16();
+ TS.emitDirectiveSetNoMicroMips();
+ //
+ // .ent __call_stub_fp_xxxx
+ // .type __call_stub_fp_xxxx,@function
+ // __call_stub_fp_xxxx:
+ //
+ std::string x = "__call_stub_fp_" + std::string(Symbol);
+ MCSymbol *Stub = OutContext.GetOrCreateSymbol(StringRef(x));
+ TS.emitDirectiveEnt(*Stub);
+ MCSymbol *MType =
+ OutContext.GetOrCreateSymbol("__call_stub_fp_" + Twine(Symbol));
+ OutStreamer.EmitSymbolAttribute(MType, MCSA_ELF_TypeFunction);
+ OutStreamer.EmitLabel(Stub);
+ //
+ // we just handle non pic for now. these function will not be
+ // called otherwise. when the full stub generation is moved here
+ // we need to deal with pic.
+ //
+ if (Subtarget->getRelocationModel() == Reloc::PIC_)
+ llvm_unreachable("should not be here if we are compiling pic");
+ TS.emitDirectiveSetReorder();
+ //
+ // We need to add a MipsMCExpr class to MCTargetDesc to fully implement
+ // stubs without raw text but this current patch is for compiler generated
+ // functions and they all return some value.
+ // The calling sequence for non pic is different in that case and we need
+ // to implement %lo and %hi in order to handle the case of no return value
+ // See the corresponding method in Mips16HardFloat for details.
+ //
+ // mov the return address to S2.
+ // we have no stack space to store it and we are about to make another call.
+ // We need to make sure that the enclosing function knows to save S2
+ // This should have already been handled.
+ //
+ // Mov $18, $31
+
+ EmitInstrRegRegReg(Mips::ADDu, Mips::S2, Mips::RA, Mips::ZERO);
+
+ EmitSwapFPIntParams(Signature->ParamSig, LE, true);
+
+ // Jal xxxx
+ //
+ EmitJal(MSymbol);
+
+ // fix return values
+ EmitSwapFPIntRetval(Signature->RetSig, LE);
+ //
+ // do the return
+ // if (Signature->RetSig == NoFPRet)
+ // llvm_unreachable("should not be any stubs here with no return value");
+ // else
+ EmitInstrReg(Mips::JR, Mips::S2);
+
+ MCSymbol *Tmp = OutContext.CreateTempSymbol();
+ OutStreamer.EmitLabel(Tmp);
+ const MCSymbolRefExpr *E = MCSymbolRefExpr::Create(Stub, OutContext);
+ const MCSymbolRefExpr *T = MCSymbolRefExpr::Create(Tmp, OutContext);
+ const MCExpr *T_min_E = MCBinaryExpr::CreateSub(T, E, OutContext);
+ OutStreamer.EmitELFSize(Stub, T_min_E);
+ TS.emitDirectiveEnd(x);
+ OutStreamer.PopSection();
}
void MipsAsmPrinter::EmitEndOfAsmFile(Module &M) {
- // Emit Mips ELF register info
- Subtarget->getMReginfo().emitMipsReginfoSectionCG(
- OutStreamer, getObjFileLowering(), *Subtarget);
- emitELFHeaderFlagsCG(getTargetStreamer(), *Subtarget);
+ // Emit needed stubs
+ //
+ for (std::map<
+ const char *,
+ const llvm::Mips16HardFloatInfo::FuncSignature *>::const_iterator
+ it = StubsNeeded.begin();
+ it != StubsNeeded.end(); ++it) {
+ const char *Symbol = it->first;
+ const llvm::Mips16HardFloatInfo::FuncSignature *Signature = it->second;
+ EmitFPCallStub(Symbol, Signature);
+ }
+ // return to the text section
+ OutStreamer.SwitchSection(OutContext.getObjectFileInfo()->getTextSection());
}
void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
@@ -699,6 +999,34 @@ void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
// TODO: implement
}
+// Align all targets of indirect branches on bundle size. Used only if target
+// is NaCl.
+void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) {
+ // Align all blocks that are jumped to through jump table.
+ if (MachineJumpTableInfo *JtInfo = MF.getJumpTableInfo()) {
+ const std::vector<MachineJumpTableEntry> &JT = JtInfo->getJumpTables();
+ for (unsigned I = 0; I < JT.size(); ++I) {
+ const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs;
+
+ for (unsigned J = 0; J < MBBs.size(); ++J)
+ MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ }
+ }
+
+ // If basic block address is taken, block can be target of indirect branch.
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ MBB != E; ++MBB) {
+ if (MBB->hasAddressTaken())
+ MBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ }
+}
+
+bool MipsAsmPrinter::isLongBranchPseudo(int Opcode) const {
+ return (Opcode == Mips::LONG_BRANCH_LUi
+ || Opcode == Mips::LONG_BRANCH_ADDiu
+ || Opcode == Mips::LONG_BRANCH_DADDiu);
+}
+
// Force static initialization.
extern "C" void LLVMInitializeMipsAsmPrinter() {
RegisterAsmPrinter<MipsAsmPrinter> X(TheMipsTarget);
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
index 11c6acd..abbd39b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
@@ -14,6 +14,7 @@
#ifndef MIPSASMPRINTER_H
#define MIPSASMPRINTER_H
+#include "Mips16HardFloatInfo.h"
#include "MipsMCInstLower.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
@@ -39,6 +40,12 @@ private:
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
const MachineInstr *MI);
+ // Emit PseudoReturn, PseudoReturn64, PseudoIndirectBranch,
+ // and PseudoIndirectBranch64 as a JR, JR_MM, JALR, or JALR64 as appropriate
+ // for the target.
+ void emitPseudoIndirectBranch(MCStreamer &OutStreamer,
+ const MachineInstr *MI);
+
// lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
@@ -50,7 +57,31 @@ private:
/// pool entries so we can properly mark them as data regions.
bool InConstantPool;
- bool UsingConstantPools;
+ std::map<const char *, const llvm::Mips16HardFloatInfo::FuncSignature *>
+ StubsNeeded;
+
+ void EmitJal(MCSymbol *Symbol);
+
+ void EmitInstrReg(unsigned Opcode, unsigned Reg);
+
+ void EmitInstrRegReg(unsigned Opcode, unsigned Reg1, unsigned Reg2);
+
+ void EmitInstrRegRegReg(unsigned Opcode, unsigned Reg1, unsigned Reg2,
+ unsigned Reg3);
+
+ void EmitMovFPIntPair(unsigned MovOpc, unsigned Reg1, unsigned Reg2,
+ unsigned FPReg1, unsigned FPReg2, bool LE);
+
+ void EmitSwapFPIntParams(Mips16HardFloatInfo::FPParamVariant, bool LE,
+ bool ToFP);
+
+ void EmitSwapFPIntRetval(Mips16HardFloatInfo::FPReturnVariant, bool LE);
+
+ void EmitFPCallStub(const char *, const Mips16HardFloatInfo::FuncSignature *);
+
+ void NaClAlignIndirectJumpTargets(MachineFunction &MF);
+
+ bool isLongBranchPseudo(int Opcode) const;
public:
@@ -58,51 +89,53 @@ public:
const MipsFunctionInfo *MipsFI;
MipsMCInstLower MCInstLowering;
- explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), MCP(0), InConstantPool(false),
- MCInstLowering(*this) {
- Subtarget = &TM.getSubtarget<MipsSubtarget>();
- UsingConstantPools =
- (Subtarget->inMips16Mode() && Subtarget->useConstantIslands());
- }
+ // We initialize the subtarget here and in runOnMachineFunction
+ // since there are certain target specific flags (ABI) that could
+ // reside on the TargetMachine, but are on the subtarget currently
+ // and we need them for the beginning of file output before we've
+ // seen a single function.
+ explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer), MCP(nullptr), InConstantPool(false),
+ Subtarget(&TM.getSubtarget<MipsSubtarget>()), MCInstLowering(*this) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Mips Assembly Printer";
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual void EmitConstantPool() LLVM_OVERRIDE {
+ void EmitConstantPool() override {
+ bool UsingConstantPools =
+ (Subtarget->inMips16Mode() && Subtarget->useConstantIslands());
if (!UsingConstantPools)
AsmPrinter::EmitConstantPool();
// we emit constant pools customly!
}
- void EmitInstruction(const MachineInstr *MI);
- void printSavedRegsBitmask(raw_ostream &O);
- void printHex32(unsigned int Value, raw_ostream &O);
+ void EmitInstruction(const MachineInstr *MI) override;
+ void printSavedRegsBitmask();
void emitFrameDirective();
const char *getCurrentABIString() const;
- virtual void EmitFunctionEntryLabel();
- virtual void EmitFunctionBodyStart();
- virtual void EmitFunctionBodyEnd();
- virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock*
- MBB) const;
+ void EmitFunctionEntryLabel() override;
+ void EmitFunctionBodyStart() override;
+ void EmitFunctionBodyEnd() override;
+ bool isBlockOnlyReachableByFallthrough(
+ const MachineBasicBlock* MBB) const override;
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O);
void printUnsignedImm8(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier = 0);
- void EmitStartOfAsmFile(Module &M);
- void EmitEndOfAsmFile(Module &M);
+ const char *Modifier = nullptr);
+ void EmitStartOfAsmFile(Module &M) override;
+ void EmitEndOfAsmFile(Module &M) override;
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
};
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
index 66391cb..b1cd3c3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
+++ b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
@@ -26,9 +26,9 @@ def RetCC_MipsO32 : CallingConv<[
// f32 are returned in registers F0, F2
CCIfType<[f32], CCAssignToReg<[F0, F2]>>,
- // f64 arguments are returned in D0_64 and D1_64 in FP64bit mode or
+ // f64 arguments are returned in D0_64 and D2_64 in FP64bit mode or
// in D0 and D1 in FP32bit mode.
- CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCAssignToReg<[D0_64, D1_64]>>>,
+ CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCAssignToReg<[D0_64, D2_64]>>>,
CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()", CCAssignToReg<[D0, D1]>>>
]>;
@@ -192,12 +192,24 @@ def CC_Mips_FastCC : CallingConv<[
// Integer arguments are passed in integer registers. All scratch registers,
// except for AT, V0 and T9, are available to be used as argument registers.
- CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6,
- T7, T8, V1]>>,
+ CCIfType<[i32], CCIfSubtarget<"isNotTargetNaCl()",
+ CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, V1]>>>,
+
+ // In NaCl, T6, T7 and T8 are reserved and not available as argument
+ // registers for fastcc. T6 contains the mask for sandboxing control flow
+ // (indirect jumps and calls). T7 contains the mask for sandboxing memory
+ // accesses (loads and stores). T8 contains the thread pointer.
+ CCIfType<[i32], CCIfSubtarget<"isTargetNaCl()",
+ CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, V1]>>>,
// f32 arguments are passed in single-precision floating pointer registers.
- CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10,
- F11, F12, F13, F14, F15, F16, F17, F18, F19]>>,
+ CCIfType<[f32], CCIfSubtarget<"useOddSPReg()",
+ CCAssignToReg<[F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13,
+ F14, F15, F16, F17, F18, F19]>>>,
+
+ // Don't use odd numbered single-precision registers for -mno-odd-spreg.
+ CCIfType<[f32], CCIfSubtarget<"noOddSPReg()",
+ CCAssignToReg<[F0, F2, F4, F6, F8, F10, F12, F14, F16, F18]>>>,
// Stack parameter slots for i32 and f32 are 32-bit words and 4-byte aligned.
CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
@@ -232,18 +244,26 @@ def RetCC_Mips : CallingConv<[
def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP,
(sequence "S%u", 7, 0))>;
+def CSR_O32_FPXX : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP,
+ (sequence "S%u", 7, 0))> {
+ let OtherPreserved = (add (decimate (sequence "F%u", 30, 20), 2));
+}
+
def CSR_O32 : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP,
(sequence "S%u", 7, 0))>;
-def CSR_O32_FP64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 20), RA, FP,
- (sequence "S%u", 7, 0))>;
+def CSR_O32_FP64 :
+ CalleeSavedRegs<(add (decimate (sequence "D%u_64", 30, 20), 2), RA, FP,
+ (sequence "S%u", 7, 0))>;
-def CSR_N32 : CalleeSavedRegs<(add D31_64, D29_64, D27_64, D25_64, D24_64,
- D23_64, D22_64, D21_64, RA_64, FP_64, GP_64,
+def CSR_N32 : CalleeSavedRegs<(add D20_64, D22_64, D24_64, D26_64, D28_64,
+ D30_64, RA_64, FP_64, GP_64,
(sequence "S%u_64", 7, 0))>;
def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64,
GP_64, (sequence "S%u_64", 7, 0))>;
def CSR_Mips16RetHelper :
- CalleeSavedRegs<(add V0, V1, (sequence "A%u", 3, 0), S0, S1)>;
+ CalleeSavedRegs<(add V0, V1, FP,
+ (sequence "A%u", 3, 0), (sequence "S%u", 7, 0),
+ (sequence "D%u", 15, 10))>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
index ca4163d..794c718 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -12,7 +12,6 @@
//
//===---------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "MipsInstrInfo.h"
@@ -24,8 +23,8 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/Passes.h"
@@ -41,6 +40,8 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumEmitted, "Number of machine instructions emitted");
namespace {
@@ -56,7 +57,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
const std::vector<MachineJumpTableEntry> *MJTEs;
bool IsPIC;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineModuleInfo> ();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -65,13 +66,13 @@ class MipsCodeEmitter : public MachineFunctionPass {
public:
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(ID), JTI(0), II(0), TD(0),
- TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
+ : MachineFunctionPass(ID), JTI(nullptr), II(nullptr), TD(nullptr),
+ TM(tm), MCE(mce), MCPEs(nullptr), MJTEs(nullptr),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Mips Machine Code Emitter";
}
@@ -109,20 +110,29 @@ private:
unsigned getBranchTargetOpValueMM(const MachineInstr &MI,
unsigned OpNo) const;
+ unsigned getBranchTarget21OpValue(const MachineInstr &MI,
+ unsigned OpNo) const;
+ unsigned getBranchTarget26OpValue(const MachineInstr &MI,
+ unsigned OpNo) const;
+ unsigned getJumpOffset16OpValue(const MachineInstr &MI, unsigned OpNo) const;
+
unsigned getBranchTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemEncodingMMImm12(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getMSAMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getLSAImmEncoding(const MachineInstr &MI, unsigned OpNo) const;
-
- void emitGlobalAddressUnaligned(const GlobalValue *GV, unsigned Reloc,
- int Offset) const;
+ unsigned getSimm19Lsl2Encoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getSimm18Lsl3Encoding(const MachineInstr &MI, unsigned OpNo) const;
/// Expand pseudo instructions with accumulator register operands.
void expandACCInstr(MachineBasicBlock::instr_iterator MI,
MachineBasicBlock &MBB, unsigned Opc) const;
+ void expandPseudoIndirectBranch(MachineBasicBlock::instr_iterator MI,
+ MachineBasicBlock &MBB) const;
+
/// \brief Expand pseudo instruction. Return true if MI was expanded.
bool expandPseudos(MachineBasicBlock::instr_iterator &MI,
MachineBasicBlock &MBB) const;
@@ -140,7 +150,7 @@ bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
TD = Target.getDataLayout();
Subtarget = &TM.getSubtarget<MipsSubtarget> ();
MCPEs = &MF.getConstantPool()->getConstants();
- MJTEs = 0;
+ MJTEs = nullptr;
if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables();
JTI->Initialize(MF, IsPIC, Subtarget->isLittle());
MCE.setModuleInfo(&getAnalysis<MachineModuleInfo> ());
@@ -203,6 +213,24 @@ unsigned MipsCodeEmitter::getBranchTargetOpValueMM(const MachineInstr &MI,
return 0;
}
+unsigned MipsCodeEmitter::getBranchTarget21OpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getBranchTarget26OpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getJumpOffset16OpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
unsigned OpNo) const {
MachineOperand MO = MI.getOperand(OpNo);
@@ -224,6 +252,12 @@ unsigned MipsCodeEmitter::getMemEncodingMMImm12(const MachineInstr &MI,
return 0;
}
+unsigned MipsCodeEmitter::getMSAMemEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI,
unsigned OpNo) const {
// size is encoded as size-1.
@@ -243,6 +277,18 @@ unsigned MipsCodeEmitter::getLSAImmEncoding(const MachineInstr &MI,
return 0;
}
+unsigned MipsCodeEmitter::getSimm18Lsl3Encoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getSimm19Lsl2Encoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
@@ -273,14 +319,6 @@ void MipsCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
MayNeedFarStub));
}
-void MipsCodeEmitter::emitGlobalAddressUnaligned(const GlobalValue *GV,
- unsigned Reloc, int Offset) const {
- MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- const_cast<GlobalValue *>(GV), 0, false));
- MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset() + Offset,
- Reloc, const_cast<GlobalValue *>(GV), 0, false));
-}
-
void MipsCodeEmitter::
emitExternalSymbolAddress(const char *ES, unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
@@ -338,9 +376,44 @@ void MipsCodeEmitter::expandACCInstr(MachineBasicBlock::instr_iterator MI,
.addReg(MI->getOperand(1).getReg()).addReg(MI->getOperand(2).getReg());
}
+void MipsCodeEmitter::expandPseudoIndirectBranch(
+ MachineBasicBlock::instr_iterator MI, MachineBasicBlock &MBB) const {
+ // This logic is duplicated from MipsAsmPrinter::emitPseudoIndirectBranch()
+ bool HasLinkReg = false;
+ unsigned Opcode = 0;
+
+ if (Subtarget->hasMips64r6()) {
+ // MIPS64r6 should use (JALR64 ZERO_64, $rs)
+ Opcode = Mips::JALR64;
+ HasLinkReg = true;
+ } else if (Subtarget->hasMips32r6()) {
+ // MIPS32r6 should use (JALR ZERO, $rs)
+ Opcode = Mips::JALR;
+ HasLinkReg = true;
+ } else if (Subtarget->inMicroMipsMode())
+ // microMIPS should use (JR_MM $rs)
+ Opcode = Mips::JR_MM;
+ else {
+ // Everything else should use (JR $rs)
+ Opcode = Mips::JR;
+ }
+
+ auto MIB = BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Opcode));
+
+ if (HasLinkReg) {
+ unsigned ZeroReg = Subtarget->isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
+ MIB.addReg(ZeroReg);
+ }
+
+ MIB.addReg(MI->getOperand(0).getReg());
+}
+
bool MipsCodeEmitter::expandPseudos(MachineBasicBlock::instr_iterator &MI,
MachineBasicBlock &MBB) const {
switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("Unhandled pseudo");
+ return false;
case Mips::NOP:
BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::SLL), Mips::ZERO)
.addReg(Mips::ZERO).addImm(0);
@@ -381,8 +454,17 @@ bool MipsCodeEmitter::expandPseudos(MachineBasicBlock::instr_iterator &MI,
case Mips::PseudoMSUBU:
expandACCInstr(MI, MBB, Mips::MSUBU);
break;
- default:
- return false;
+ case Mips::PseudoReturn:
+ case Mips::PseudoReturn64:
+ case Mips::PseudoIndirectBranch:
+ case Mips::PseudoIndirectBranch64:
+ expandPseudoIndirectBranch(MI, MBB);
+ break;
+ case TargetOpcode::CFI_INSTRUCTION:
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ // Do nothing
+ return false;
}
(MI--)->eraseFromBundle();
diff --git a/contrib/llvm/lib/Target/Mips/MipsCondMov.td b/contrib/llvm/lib/Target/Mips/MipsCondMov.td
index 2de1430..690f626 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCondMov.td
+++ b/contrib/llvm/lib/Target/Mips/MipsCondMov.td
@@ -27,7 +27,7 @@ class CMov_I_I_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC,
class CMov_I_F_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC,
InstrItinClass Itin> :
InstSE<(outs DRC:$fd), (ins DRC:$fs, CRC:$rt, DRC:$F),
- !strconcat(opstr, "\t$fd, $fs, $rt"), [], Itin, FrmFR> {
+ !strconcat(opstr, "\t$fd, $fs, $rt"), [], Itin, FrmFR, opstr> {
let Constraints = "$F = $fd";
}
@@ -47,7 +47,7 @@ class CMov_F_F_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
InstSE<(outs RC:$fd), (ins RC:$fs, FCCRegsOpnd:$fcc, RC:$F),
!strconcat(opstr, "\t$fd, $fs, $fcc"),
[(set RC:$fd, (OpNode RC:$fs, FCCRegsOpnd:$fcc, RC:$F))],
- Itin, FrmFR> {
+ Itin, FrmFR, opstr> {
let Constraints = "$F = $fd";
}
@@ -103,143 +103,163 @@ multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst,
}
// Instantiation of instructions.
-def MOVZ_I_I : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd, IIArith>,
- ADD_FM<0, 0xa>;
-
-let Predicates = [HasStdEnc], isCodeGenOnly = 1 in {
- def MOVZ_I_I64 : CMov_I_I_FT<"movz", GPR32Opnd, GPR64Opnd, IIArith>,
- ADD_FM<0, 0xa>;
- def MOVZ_I64_I : CMov_I_I_FT<"movz", GPR64Opnd, GPR32Opnd, IIArith>,
- ADD_FM<0, 0xa>;
- def MOVZ_I64_I64 : CMov_I_I_FT<"movz", GPR64Opnd, GPR64Opnd, IIArith>,
- ADD_FM<0, 0xa>;
+def MOVZ_I_I : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd, II_MOVZ>,
+ ADD_FM<0, 0xa>, INSN_MIPS4_32_NOT_32R6_64R6;
+
+let isCodeGenOnly = 1 in {
+ def MOVZ_I_I64 : CMov_I_I_FT<"movz", GPR32Opnd, GPR64Opnd, II_MOVZ>,
+ ADD_FM<0, 0xa>, INSN_MIPS4_32_NOT_32R6_64R6;
+ def MOVZ_I64_I : CMov_I_I_FT<"movz", GPR64Opnd, GPR32Opnd, II_MOVZ>,
+ ADD_FM<0, 0xa>, INSN_MIPS4_32_NOT_32R6_64R6;
+ def MOVZ_I64_I64 : CMov_I_I_FT<"movz", GPR64Opnd, GPR64Opnd, II_MOVZ>,
+ ADD_FM<0, 0xa>, INSN_MIPS4_32_NOT_32R6_64R6;
}
-def MOVN_I_I : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd, IIArith>,
- ADD_FM<0, 0xb>;
+def MOVN_I_I : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd, II_MOVN>,
+ ADD_FM<0, 0xb>, INSN_MIPS4_32_NOT_32R6_64R6;
-let Predicates = [HasStdEnc], isCodeGenOnly = 1 in {
- def MOVN_I_I64 : CMov_I_I_FT<"movn", GPR32Opnd, GPR64Opnd, IIArith>,
- ADD_FM<0, 0xb>;
- def MOVN_I64_I : CMov_I_I_FT<"movn", GPR64Opnd, GPR32Opnd, IIArith>,
- ADD_FM<0, 0xb>;
- def MOVN_I64_I64 : CMov_I_I_FT<"movn", GPR64Opnd, GPR64Opnd, IIArith>,
- ADD_FM<0, 0xb>;
+let isCodeGenOnly = 1 in {
+ def MOVN_I_I64 : CMov_I_I_FT<"movn", GPR32Opnd, GPR64Opnd, II_MOVN>,
+ ADD_FM<0, 0xb>, INSN_MIPS4_32_NOT_32R6_64R6;
+ def MOVN_I64_I : CMov_I_I_FT<"movn", GPR64Opnd, GPR32Opnd, II_MOVN>,
+ ADD_FM<0, 0xb>, INSN_MIPS4_32_NOT_32R6_64R6;
+ def MOVN_I64_I64 : CMov_I_I_FT<"movn", GPR64Opnd, GPR64Opnd, II_MOVN>,
+ ADD_FM<0, 0xb>, INSN_MIPS4_32_NOT_32R6_64R6;
}
-def MOVZ_I_S : CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32Opnd, IIFmove>,
- CMov_I_F_FM<18, 16>;
+def MOVZ_I_S : MMRel, CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32Opnd, II_MOVZ_S>,
+ CMov_I_F_FM<18, 16>, INSN_MIPS4_32_NOT_32R6_64R6;
let isCodeGenOnly = 1 in
-def MOVZ_I64_S : CMov_I_F_FT<"movz.s", GPR64Opnd, FGR32Opnd, IIFmove>,
- CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]>;
+def MOVZ_I64_S : CMov_I_F_FT<"movz.s", GPR64Opnd, FGR32Opnd, II_MOVZ_S>,
+ CMov_I_F_FM<18, 16>, INSN_MIPS4_32_NOT_32R6_64R6,
+ AdditionalRequires<[HasMips64]>;
-def MOVN_I_S : CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32Opnd, IIFmove>,
- CMov_I_F_FM<19, 16>;
+def MOVN_I_S : MMRel, CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32Opnd, II_MOVN_S>,
+ CMov_I_F_FM<19, 16>, INSN_MIPS4_32_NOT_32R6_64R6;
let isCodeGenOnly = 1 in
-def MOVN_I64_S : CMov_I_F_FT<"movn.s", GPR64Opnd, FGR32Opnd, IIFmove>,
- CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]>;
-
-let Predicates = [NotFP64bit, HasStdEnc] in {
- def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64Opnd, IIFmove>,
- CMov_I_F_FM<18, 17>;
- def MOVN_I_D32 : CMov_I_F_FT<"movn.d", GPR32Opnd, AFGR64Opnd, IIFmove>,
- CMov_I_F_FM<19, 17>;
-}
+def MOVN_I64_S : CMov_I_F_FT<"movn.s", GPR64Opnd, FGR32Opnd, II_MOVN_S>,
+ CMov_I_F_FM<19, 16>, INSN_MIPS4_32_NOT_32R6_64R6,
+ AdditionalRequires<[IsGP64bit]>;
+
+def MOVZ_I_D32 : MMRel, CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64Opnd,
+ II_MOVZ_D>, CMov_I_F_FM<18, 17>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_32;
+def MOVN_I_D32 : MMRel, CMov_I_F_FT<"movn.d", GPR32Opnd, AFGR64Opnd,
+ II_MOVN_D>, CMov_I_F_FM<19, 17>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_32;
-let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", GPR32Opnd, FGR64Opnd, IIFmove>,
- CMov_I_F_FM<18, 17>;
- def MOVN_I_D64 : CMov_I_F_FT<"movn.d", GPR32Opnd, FGR64Opnd, IIFmove>,
- CMov_I_F_FM<19, 17>;
+let DecoderNamespace = "Mips64" in {
+ def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", GPR32Opnd, FGR64Opnd, II_MOVZ_D>,
+ CMov_I_F_FM<18, 17>, INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
+ def MOVN_I_D64 : CMov_I_F_FT<"movn.d", GPR32Opnd, FGR64Opnd, II_MOVN_D>,
+ CMov_I_F_FM<19, 17>, INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
let isCodeGenOnly = 1 in {
- def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", GPR64Opnd, FGR64Opnd,
- IIFmove>, CMov_I_F_FM<18, 17>;
- def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", GPR64Opnd, FGR64Opnd,
- IIFmove>, CMov_I_F_FM<19, 17>;
+ def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", GPR64Opnd, FGR64Opnd, II_MOVZ_D>,
+ CMov_I_F_FM<18, 17>, INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
+ def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", GPR64Opnd, FGR64Opnd, II_MOVN_D>,
+ CMov_I_F_FM<19, 17>, INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
}
}
-def MOVT_I : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, IIArith, MipsCMovFP_T>,
- CMov_F_I_FM<1>;
+def MOVT_I : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, II_MOVT, MipsCMovFP_T>,
+ CMov_F_I_FM<1>, INSN_MIPS4_32_NOT_32R6_64R6;
let isCodeGenOnly = 1 in
-def MOVT_I64 : CMov_F_I_FT<"movt", GPR64Opnd, IIArith, MipsCMovFP_T>,
- CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]>;
+def MOVT_I64 : CMov_F_I_FT<"movt", GPR64Opnd, II_MOVT, MipsCMovFP_T>,
+ CMov_F_I_FM<1>, INSN_MIPS4_32_NOT_32R6_64R6,
+ AdditionalRequires<[IsGP64bit]>;
-def MOVF_I : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, IIArith, MipsCMovFP_F>,
- CMov_F_I_FM<0>;
+def MOVF_I : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, II_MOVF, MipsCMovFP_F>,
+ CMov_F_I_FM<0>, INSN_MIPS4_32_NOT_32R6_64R6;
let isCodeGenOnly = 1 in
-def MOVF_I64 : CMov_F_I_FT<"movf", GPR64Opnd, IIArith, MipsCMovFP_F>,
- CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]>;
-
-def MOVT_S : CMov_F_F_FT<"movt.s", FGR32Opnd, IIFmove, MipsCMovFP_T>,
- CMov_F_F_FM<16, 1>;
-def MOVF_S : CMov_F_F_FT<"movf.s", FGR32Opnd, IIFmove, MipsCMovFP_F>,
- CMov_F_F_FM<16, 0>;
-
-let Predicates = [NotFP64bit, HasStdEnc] in {
- def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64Opnd, IIFmove, MipsCMovFP_T>,
- CMov_F_F_FM<17, 1>;
- def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64Opnd, IIFmove, MipsCMovFP_F>,
- CMov_F_F_FM<17, 0>;
-}
+def MOVF_I64 : CMov_F_I_FT<"movf", GPR64Opnd, II_MOVF, MipsCMovFP_F>,
+ CMov_F_I_FM<0>, INSN_MIPS4_32_NOT_32R6_64R6,
+ AdditionalRequires<[IsGP64bit]>;
+
+def MOVT_S : MMRel, CMov_F_F_FT<"movt.s", FGR32Opnd, II_MOVT_S, MipsCMovFP_T>,
+ CMov_F_F_FM<16, 1>, INSN_MIPS4_32_NOT_32R6_64R6;
+def MOVF_S : MMRel, CMov_F_F_FT<"movf.s", FGR32Opnd, II_MOVF_S, MipsCMovFP_F>,
+ CMov_F_F_FM<16, 0>, INSN_MIPS4_32_NOT_32R6_64R6;
-let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64Opnd, IIFmove, MipsCMovFP_T>,
- CMov_F_F_FM<17, 1>;
- def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64Opnd, IIFmove, MipsCMovFP_F>,
- CMov_F_F_FM<17, 0>;
+def MOVT_D32 : MMRel, CMov_F_F_FT<"movt.d", AFGR64Opnd, II_MOVT_D,
+ MipsCMovFP_T>, CMov_F_F_FM<17, 1>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_32;
+def MOVF_D32 : MMRel, CMov_F_F_FT<"movf.d", AFGR64Opnd, II_MOVF_D,
+ MipsCMovFP_F>, CMov_F_F_FM<17, 0>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_32;
+
+let DecoderNamespace = "Mips64" in {
+ def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64Opnd, II_MOVT_D, MipsCMovFP_T>,
+ CMov_F_F_FM<17, 1>, INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
+ def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64Opnd, II_MOVF_D, MipsCMovFP_F>,
+ CMov_F_F_FM<17, 0>, INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
}
// Instantiation of conditional move patterns.
-defm : MovzPats0<GPR32, GPR32, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
-defm : MovzPats1<GPR32, GPR32, MOVZ_I_I, XOR>;
-defm : MovzPats2<GPR32, GPR32, MOVZ_I_I, XORi>;
-let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovzPats0<GPR32, GPR64, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats0<GPR64, GPR32, MOVZ_I_I, SLT64, SLTu64, SLTi64,
- SLTiu64>;
- defm : MovzPats0<GPR64, GPR64, MOVZ_I_I64, SLT64, SLTu64, SLTi64,
- SLTiu64>;
- defm : MovzPats1<GPR32, GPR64, MOVZ_I_I64, XOR>;
- defm : MovzPats1<GPR64, GPR32, MOVZ_I64_I, XOR64>;
- defm : MovzPats1<GPR64, GPR64, MOVZ_I64_I64, XOR64>;
- defm : MovzPats2<GPR32, GPR64, MOVZ_I_I64, XORi>;
- defm : MovzPats2<GPR64, GPR32, MOVZ_I64_I, XORi64>;
- defm : MovzPats2<GPR64, GPR64, MOVZ_I64_I64, XORi64>;
-}
+defm : MovzPats0<GPR32, GPR32, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>,
+ INSN_MIPS4_32_NOT_32R6_64R6;
+defm : MovzPats1<GPR32, GPR32, MOVZ_I_I, XOR>, INSN_MIPS4_32_NOT_32R6_64R6;
+defm : MovzPats2<GPR32, GPR32, MOVZ_I_I, XORi>, INSN_MIPS4_32_NOT_32R6_64R6;
-defm : MovnPats<GPR32, GPR32, MOVN_I_I, XOR>;
-let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovnPats<GPR32, GPR64, MOVN_I_I64, XOR>;
- defm : MovnPats<GPR64, GPR32, MOVN_I64_I, XOR64>;
- defm : MovnPats<GPR64, GPR64, MOVN_I64_I64, XOR64>;
-}
+defm : MovzPats0<GPR32, GPR64, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats0<GPR64, GPR32, MOVZ_I_I, SLT64, SLTu64, SLTi64, SLTiu64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats0<GPR64, GPR64, MOVZ_I_I64, SLT64, SLTu64, SLTi64, SLTiu64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats1<GPR32, GPR64, MOVZ_I_I64, XOR>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats1<GPR64, GPR32, MOVZ_I64_I, XOR64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats1<GPR64, GPR64, MOVZ_I64_I64, XOR64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats2<GPR32, GPR64, MOVZ_I_I64, XORi>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats2<GPR64, GPR32, MOVZ_I64_I, XORi64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats2<GPR64, GPR64, MOVZ_I64_I64, XORi64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
-defm : MovzPats0<GPR32, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
-defm : MovzPats1<GPR32, FGR32, MOVZ_I_S, XOR>;
-defm : MovnPats<GPR32, FGR32, MOVN_I_S, XOR>;
-let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovzPats0<GPR64, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
- SLTiu64>;
- defm : MovzPats1<GPR64, FGR32, MOVZ_I64_S, XOR64>;
- defm : MovnPats<GPR64, FGR32, MOVN_I64_S, XOR64>;
-}
+defm : MovnPats<GPR32, GPR32, MOVN_I_I, XOR>, INSN_MIPS4_32_NOT_32R6_64R6;
-let Predicates = [NotFP64bit, HasStdEnc] in {
- defm : MovzPats0<GPR32, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats1<GPR32, AFGR64, MOVZ_I_D32, XOR>;
- defm : MovnPats<GPR32, AFGR64, MOVN_I_D32, XOR>;
-}
-let Predicates = [IsFP64bit, HasStdEnc] in {
- defm : MovzPats0<GPR32, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats0<GPR64, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
- SLTiu64>;
- defm : MovzPats1<GPR32, FGR64, MOVZ_I_D64, XOR>;
- defm : MovzPats1<GPR64, FGR64, MOVZ_I64_D64, XOR64>;
- defm : MovnPats<GPR32, FGR64, MOVN_I_D64, XOR>;
- defm : MovnPats<GPR64, FGR64, MOVN_I64_D64, XOR64>;
-}
+defm : MovnPats<GPR32, GPR64, MOVN_I_I64, XOR>, INSN_MIPS4_32_NOT_32R6_64R6,
+ GPR_64;
+defm : MovnPats<GPR64, GPR32, MOVN_I64_I, XOR64>, INSN_MIPS4_32_NOT_32R6_64R6,
+ GPR_64;
+defm : MovnPats<GPR64, GPR64, MOVN_I64_I64, XOR64>, INSN_MIPS4_32_NOT_32R6_64R6,
+ GPR_64;
+
+defm : MovzPats0<GPR32, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>,
+ INSN_MIPS4_32_NOT_32R6_64R6;
+defm : MovzPats1<GPR32, FGR32, MOVZ_I_S, XOR>, INSN_MIPS4_32_NOT_32R6_64R6;
+defm : MovnPats<GPR32, FGR32, MOVN_I_S, XOR>, INSN_MIPS4_32_NOT_32R6_64R6;
+
+defm : MovzPats0<GPR64, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64, SLTiu64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
+defm : MovzPats1<GPR64, FGR32, MOVZ_I64_S, XOR64>, INSN_MIPS4_32_NOT_32R6_64R6,
+ GPR_64;
+defm : MovnPats<GPR64, FGR32, MOVN_I64_S, XOR64>, INSN_MIPS4_32_NOT_32R6_64R6,
+ GPR_64;
+
+defm : MovzPats0<GPR32, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_32;
+defm : MovzPats1<GPR32, AFGR64, MOVZ_I_D32, XOR>, INSN_MIPS4_32_NOT_32R6_64R6,
+ FGR_32;
+defm : MovnPats<GPR32, AFGR64, MOVN_I_D32, XOR>, INSN_MIPS4_32_NOT_32R6_64R6,
+ FGR_32;
+
+defm : MovzPats0<GPR32, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
+defm : MovzPats0<GPR64, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64, SLTiu64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
+defm : MovzPats1<GPR32, FGR64, MOVZ_I_D64, XOR>, INSN_MIPS4_32_NOT_32R6_64R6,
+ FGR_64;
+defm : MovzPats1<GPR64, FGR64, MOVZ_I64_D64, XOR64>,
+ INSN_MIPS4_32_NOT_32R6_64R6, FGR_64;
+defm : MovnPats<GPR32, FGR64, MOVN_I_D64, XOR>, INSN_MIPS4_32_NOT_32R6_64R6,
+ FGR_64;
+defm : MovnPats<GPR64, FGR64, MOVN_I64_D64, XOR64>, INSN_MIPS4_32_NOT_32R6_64R6,
+ FGR_64;
diff --git a/contrib/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/contrib/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index c46bbac..80bf573 100644
--- a/contrib/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -17,12 +17,10 @@
//
// The constants can be not just numbers but addresses of functions and labels.
// This can be particularly helpful in static relocation mode for embedded
-// non linux targets.
+// non-linux targets.
//
//
-#define DEBUG_TYPE "mips-constant-islands"
-
#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "Mips16InstrInfo.h"
@@ -34,19 +32,21 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/Format.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "mips-constant-islands"
+
STATISTIC(NumCPEs, "Number of constpool entries");
STATISTIC(NumSplit, "Number of uncond branches inserted");
STATISTIC(NumCBrFixed, "Number of cond branches fixed");
@@ -77,6 +77,113 @@ static cl::opt<bool> NoLoadRelaxation(
cl::desc("Don't relax loads to long loads - for testing purposes"),
cl::Hidden);
+static unsigned int branchTargetOperand(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case Mips::Bimm16:
+ case Mips::BimmX16:
+ case Mips::Bteqz16:
+ case Mips::BteqzX16:
+ case Mips::Btnez16:
+ case Mips::BtnezX16:
+ case Mips::JalB16:
+ return 0;
+ case Mips::BeqzRxImm16:
+ case Mips::BeqzRxImmX16:
+ case Mips::BnezRxImm16:
+ case Mips::BnezRxImmX16:
+ return 1;
+ }
+ llvm_unreachable("Unknown branch type");
+}
+
+static bool isUnconditionalBranch(unsigned int Opcode) {
+ switch (Opcode) {
+ default: return false;
+ case Mips::Bimm16:
+ case Mips::BimmX16:
+ case Mips::JalB16:
+ return true;
+ }
+}
+
+static unsigned int longformBranchOpcode(unsigned int Opcode) {
+ switch (Opcode) {
+ case Mips::Bimm16:
+ case Mips::BimmX16:
+ return Mips::BimmX16;
+ case Mips::Bteqz16:
+ case Mips::BteqzX16:
+ return Mips::BteqzX16;
+ case Mips::Btnez16:
+ case Mips::BtnezX16:
+ return Mips::BtnezX16;
+ case Mips::JalB16:
+ return Mips::JalB16;
+ case Mips::BeqzRxImm16:
+ case Mips::BeqzRxImmX16:
+ return Mips::BeqzRxImmX16;
+ case Mips::BnezRxImm16:
+ case Mips::BnezRxImmX16:
+ return Mips::BnezRxImmX16;
+ }
+ llvm_unreachable("Unknown branch type");
+}
+
+//
+// FIXME: need to go through this whole constant islands port and check the math
+// for branch ranges and clean this up and make some functions to calculate things
+// that are done many times identically.
+// Need to refactor some of the code to call this routine.
+//
+static unsigned int branchMaxOffsets(unsigned int Opcode) {
+ unsigned Bits, Scale;
+ switch (Opcode) {
+ case Mips::Bimm16:
+ Bits = 11;
+ Scale = 2;
+ break;
+ case Mips::BimmX16:
+ Bits = 16;
+ Scale = 2;
+ break;
+ case Mips::BeqzRxImm16:
+ Bits = 8;
+ Scale = 2;
+ break;
+ case Mips::BeqzRxImmX16:
+ Bits = 16;
+ Scale = 2;
+ break;
+ case Mips::BnezRxImm16:
+ Bits = 8;
+ Scale = 2;
+ break;
+ case Mips::BnezRxImmX16:
+ Bits = 16;
+ Scale = 2;
+ break;
+ case Mips::Bteqz16:
+ Bits = 8;
+ Scale = 2;
+ break;
+ case Mips::BteqzX16:
+ Bits = 16;
+ Scale = 2;
+ break;
+ case Mips::Btnez16:
+ Bits = 8;
+ Scale = 2;
+ break;
+ case Mips::BtnezX16:
+ Bits = 16;
+ Scale = 2;
+ break;
+ default:
+ llvm_unreachable("Unknown branch type");
+ }
+ unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
+ return MaxOffs;
+}
namespace {
@@ -258,17 +365,16 @@ namespace {
public:
static char ID;
MipsConstantIslands(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_),
- ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
- STI(&TM.getSubtarget<MipsSubtarget>()), MF(0), MCP(0),
- PrescannedForConstants(false){}
+ : MachineFunctionPass(ID), TM(tm),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_),
+ ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()), STI(nullptr),
+ MF(nullptr), MCP(nullptr), PrescannedForConstants(false) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Mips Constant Islands";
}
- bool runOnMachineFunction(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F) override;
void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
@@ -277,16 +383,12 @@ namespace {
unsigned getOffsetOf(MachineInstr *MI) const;
unsigned getUserOffset(CPUser&) const;
void dumpBBs();
- void verify();
bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
unsigned Disp, bool NegativeOK);
bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
const CPUser &U);
- bool isLongFormOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
- const CPUser &U);
-
void computeBlockSize(MachineBasicBlock *MBB);
MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
@@ -320,14 +422,6 @@ namespace {
char MipsConstantIslands::ID = 0;
} // end of anonymous namespace
-
-bool MipsConstantIslands::isLongFormOffsetInRange
- (unsigned UserOffset, unsigned TrialOffset,
- const CPUser &U) {
- return isOffsetInRange(UserOffset, TrialOffset,
- U.getLongFormMaxDisp(), U.NegOk);
-}
-
bool MipsConstantIslands::isOffsetInRange
(unsigned UserOffset, unsigned TrialOffset,
const CPUser &U) {
@@ -355,9 +449,9 @@ bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// FIXME:
MF = &mf;
MCP = mf.getConstantPool();
+ STI = &mf.getTarget().getSubtarget<MipsSubtarget>();
DEBUG(dbgs() << "constant island machine function " << "\n");
- if (!TM.getSubtarget<MipsSubtarget>().inMips16Mode() ||
- !MipsSubtarget::useConstantIslands()) {
+ if (!STI->inMips16Mode() || !MipsSubtarget::useConstantIslands()) {
return false;
}
TII = (const Mips16InstrInfo*)MF->getTarget().getInstrInfo();
@@ -509,10 +603,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) {
// Get the next machine basic block in the function.
MachineFunction::iterator MBBI = MBB;
// Can't fall off end of function.
- if (llvm::next(MBBI) == MBB->getParent()->end())
+ if (std::next(MBBI) == MBB->getParent()->end())
return false;
- MachineBasicBlock *NextBB = llvm::next(MBBI);
+ MachineBasicBlock *NextBB = std::next(MBBI);
for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I)
if (*I == NextBB)
@@ -533,7 +627,7 @@ MipsConstantIslands::CPEntry
if (CPEs[i].CPEMI == CPEMI)
return &CPEs[i];
}
- return NULL;
+ return nullptr;
}
/// getCPELogAlign - Returns the required alignment of the constant pool entry
@@ -603,6 +697,55 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
Bits = 16;
Scale = 2;
isCond = false;
+ break;
+ case Mips::BeqzRxImm16:
+ UOpc=Mips::Bimm16;
+ Bits = 8;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::BeqzRxImmX16:
+ UOpc=Mips::Bimm16;
+ Bits = 16;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::BnezRxImm16:
+ UOpc=Mips::Bimm16;
+ Bits = 8;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::BnezRxImmX16:
+ UOpc=Mips::Bimm16;
+ Bits = 16;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::Bteqz16:
+ UOpc=Mips::Bimm16;
+ Bits = 8;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::BteqzX16:
+ UOpc=Mips::Bimm16;
+ Bits = 16;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::Btnez16:
+ UOpc=Mips::Bimm16;
+ Bits = 8;
+ Scale = 2;
+ isCond = true;
+ break;
+ case Mips::BtnezX16:
+ UOpc=Mips::Bimm16;
+ Bits = 16;
+ Scale = 2;
+ isCond = true;
+ break;
}
// Record this immediate branch.
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
@@ -634,11 +777,11 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
Bits = 8;
Scale = 4;
LongFormOpcode = Mips::LwRxPcTcpX16;
- LongFormBits = 16;
+ LongFormBits = 14;
LongFormScale = 1;
break;
case Mips::LwRxPcTcpX16:
- Bits = 16;
+ Bits = 14;
Scale = 1;
NegOk = true;
break;
@@ -776,7 +919,7 @@ MachineBasicBlock *MipsConstantIslands::splitBlockBeforeInstr
CompareMBBNumbers);
MachineBasicBlock* WaterBB = *IP;
if (WaterBB == OrigBB)
- WaterList.insert(llvm::next(IP), NewBB);
+ WaterList.insert(std::next(IP), NewBB);
else
WaterList.insert(IP, OrigBB);
NewWaterList.insert(OrigBB);
@@ -921,7 +1064,7 @@ bool MipsConstantIslands::decrementCPEReferenceCount(unsigned CPI,
assert(CPE && "Unexpected!");
if (--CPE->RefCount == 0) {
removeDeadCPEMI(CPEMI);
- CPE->CPEMI = NULL;
+ CPE->CPEMI = nullptr;
--NumCPEs;
return true;
}
@@ -954,7 +1097,7 @@ int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
if (CPEs[i].CPEMI == CPEMI)
continue;
// Removing CPEs can leave empty entries, skip
- if (CPEs[i].CPEMI == NULL)
+ if (CPEs[i].CPEMI == nullptr)
continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
U.NegOk)) {
@@ -1010,7 +1153,7 @@ int MipsConstantIslands::findLongFormInRangeCPEntry
if (CPEs[i].CPEMI == CPEMI)
continue;
// Removing CPEs can leave empty entries, skip
- if (CPEs[i].CPEMI == NULL)
+ if (CPEs[i].CPEMI == nullptr)
continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
U.getLongFormMaxDisp(), U.NegOk)) {
@@ -1062,7 +1205,7 @@ bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
return false;
unsigned BestGrowth = ~0u;
- for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
+ for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
--IP) {
MachineBasicBlock* WaterBB = *IP;
// Check if water is in range and is either at a lower address than the
@@ -1121,7 +1264,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
<< format(", expected CPE offset %#x\n", CPEOffset));
- NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
+ NewMBB = std::next(MachineFunction::iterator(UserMBB));
// Add an unconditional branch from UserMBB to fallthrough block. Record
// it for branch lengthening; this new branch will not get out of range,
// but if the preceding conditional branch is out of range, the targets
@@ -1174,8 +1317,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
//MachineInstr *LastIT = 0;
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
Offset < BaseInsertOffset;
- Offset += TII->GetInstSizeInBytes(MI),
- MI = llvm::next(MI)) {
+ Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
assert(MI != UserMBB->end() && "Fell off end of block");
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
CPUser &U = CPUsers[CPUIndex];
@@ -1232,7 +1374,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
NewWaterList.insert(NewIsland);
// The new CPE goes before the following block (NewMBB).
- NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
+ NewMBB = std::next(MachineFunction::iterator(WaterBB));
} else {
// No water found.
@@ -1250,7 +1392,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
// next iteration for constant pools, but in this context, we don't want
// it. Check for this so it will be removed from the WaterList.
// Also remove any entry from NewWaterList.
- MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
+ MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB));
IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
if (IP != WaterList.end())
NewWaterList.erase(WaterBB);
@@ -1275,6 +1417,10 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
// Decrement the old entry, and remove it if refcount becomes 0.
decrementCPEReferenceCount(CPI, CPEMI);
+ // No existing clone of this CPE is within range.
+ // We will be generating a new clone. Get a UID for it.
+ unsigned ID = createPICLabelUId();
+
// Now that we have an island to add the CPE to, clone the original CPE and
// add it to the island.
U.HighWaterMark = NewIsland;
@@ -1288,11 +1434,9 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
// Increase the size of the island block to account for the new entry.
BBInfo[NewIsland->getNumber()].Size += Size;
- adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
+ adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland)));
+
- // No existing clone of this CPE is within range.
- // We will be generating a new clone. Get a UID for it.
- unsigned ID = createPICLabelUId();
// Finally, change the CPI in the instruction operand to be ID.
for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
@@ -1341,7 +1485,7 @@ bool MipsConstantIslands::removeUnusedCPEntries() {
for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
removeDeadCPEMI(CPEs[j].CPEMI);
- CPEs[j].CPEMI = NULL;
+ CPEs[j].CPEMI = nullptr;
MadeChange = true;
}
}
@@ -1380,7 +1524,8 @@ unsigned PCAdj = 4;
/// away to fit in its displacement field.
bool MipsConstantIslands::fixupImmediateBr(ImmBranch &Br) {
MachineInstr *MI = Br.MI;
- MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
+ unsigned TargetOperand = branchTargetOperand(MI);
+ MachineBasicBlock *DestBB = MI->getOperand(TargetOperand).getMBB();
// Check to see if the DestBB is already in-range.
if (isBBInRange(MI, DestBB, Br.MaxDisp))
@@ -1399,9 +1544,29 @@ bool
MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
MachineInstr *MI = Br.MI;
MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
// Use BL to implement far jump.
- Br.MaxDisp = ((1 << 16)-1) * 2;
- MI->setDesc(TII->get(Mips::BimmX16));
+ unsigned BimmX16MaxDisp = ((1 << 16)-1) * 2;
+ if (isBBInRange(MI, DestBB, BimmX16MaxDisp)) {
+ Br.MaxDisp = BimmX16MaxDisp;
+ MI->setDesc(TII->get(Mips::BimmX16));
+ }
+ else {
+ // need to give the math a more careful look here
+ // this is really a segment address and not
+ // a PC relative address. FIXME. But I think that
+ // just reducing the bits by 1 as I've done is correct.
+ // The basic block we are branching too much be longword aligned.
+ // we know that RA is saved because we always save it right now.
+ // this requirement will be relaxed later but we also have an alternate
+ // way to implement this that I will implement that does not need jal.
+ // We should have a way to back out this alignment restriction if we "can" later.
+ // but it is not harmful.
+ //
+ DestBB->setAlignment(2);
+ Br.MaxDisp = ((1<<24)-1) * 2;
+ MI->setDesc(TII->get(Mips::JalB16));
+ }
BBInfo[MBB->getNumber()].Size += 2;
adjustBBOffsetsAfter(MBB);
HasFarJump = true;
@@ -1412,23 +1577,33 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
return true;
}
+
/// fixupConditionalBr - Fix up a conditional branch whose destination is too
/// far away to fit in its displacement field. It is converted to an inverse
/// conditional branch + an unconditional branch to the destination.
bool
MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MachineInstr *MI = Br.MI;
- MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
+ unsigned TargetOperand = branchTargetOperand(MI);
+ MachineBasicBlock *DestBB = MI->getOperand(TargetOperand).getMBB();
+ unsigned Opcode = MI->getOpcode();
+ unsigned LongFormOpcode = longformBranchOpcode(Opcode);
+ unsigned LongFormMaxOff = branchMaxOffsets(LongFormOpcode);
+
+ // Check to see if the DestBB is already in-range.
+ if (isBBInRange(MI, DestBB, LongFormMaxOff)) {
+ Br.MaxDisp = LongFormMaxOff;
+ MI->setDesc(TII->get(LongFormOpcode));
+ return true;
+ }
// Add an unconditional branch to the destination and invert the branch
// condition to jump over it:
- // blt L1
+ // bteqz L1
// =>
- // bge L2
+ // bnez L2
// b L1
// L2:
- unsigned CCReg = 0; // FIXME
- unsigned CC=0; //FIXME
// If the branch is at the end of its MBB and that has a fall-through block,
// direct the updated conditional branch to the fall-through block. Otherwise,
@@ -1436,29 +1611,34 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MachineBasicBlock *MBB = MI->getParent();
MachineInstr *BMI = &MBB->back();
bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
-
+ unsigned OppositeBranchOpcode = TII->getOppositeBranchOpc(Opcode);
+
++NumCBrFixed;
if (BMI != MI) {
- if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
- BMI->getOpcode() == Br.UncondBr) {
+ if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
+ isUnconditionalBranch(BMI->getOpcode())) {
// Last MI in the BB is an unconditional branch. Can we simply invert the
// condition and swap destinations:
- // beq L1
+ // beqz L1
// b L2
// =>
- // bne L2
+ // bnez L2
// b L1
- MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
+ unsigned BMITargetOperand = branchTargetOperand(BMI);
+ MachineBasicBlock *NewDest =
+ BMI->getOperand(BMITargetOperand).getMBB();
if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
<< *BMI);
- BMI->getOperand(0).setMBB(DestBB);
- MI->getOperand(0).setMBB(NewDest);
+ MI->setDesc(TII->get(OppositeBranchOpcode));
+ BMI->getOperand(BMITargetOperand).setMBB(DestBB);
+ MI->getOperand(TargetOperand).setMBB(NewDest);
return true;
}
}
}
+
if (NeedSplit) {
splitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding an unconditional
@@ -1468,7 +1648,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MBB->back().eraseFromParent();
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
}
- MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
+ MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
<< " also invert condition and change dest. to BB#"
@@ -1476,8 +1656,14 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
// Insert a new conditional branch and a new unconditional branch.
// Also update the ImmBranch as well as adding a new entry for the new branch.
- BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
- .addMBB(NextBB).addImm(CC).addReg(CCReg);
+ if (MI->getNumExplicitOperands() == 2) {
+ BuildMI(MBB, DebugLoc(), TII->get(OppositeBranchOpcode))
+ .addReg(MI->getOperand(0).getReg())
+ .addMBB(NextBB);
+ } else {
+ BuildMI(MBB, DebugLoc(), TII->get(OppositeBranchOpcode))
+ .addMBB(NextBB);
+ }
Br.MI = &MBB->back();
BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
@@ -1496,13 +1682,13 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
void MipsConstantIslands::prescanForConstants() {
unsigned J = 0;
(void)J;
- PrescannedForConstants = true;
for (MachineFunction::iterator B =
MF->begin(), E = MF->end(); B != E; ++B) {
for (MachineBasicBlock::instr_iterator I =
B->instr_begin(), EB = B->instr_end(); I != EB; ++I) {
switch(I->getDesc().getOpcode()) {
case Mips::LwConstant32: {
+ PrescannedForConstants = true;
DEBUG(dbgs() << "constant island constant " << *I << "\n");
J = I->getNumOperands();
DEBUG(dbgs() << "num operands " << J << "\n");
diff --git a/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td
index cf09113..b5d52ce 100644
--- a/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MipsDSPInstrFormats.td
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
-def HasDSP : Predicate<"Subtarget.hasDSP()">,
+def HasDSP : Predicate<"Subtarget->hasDSP()">,
AssemblerPredicate<"FeatureDSP">;
-def HasDSPR2 : Predicate<"Subtarget.hasDSPR2()">,
+def HasDSPR2 : Predicate<"Subtarget->hasDSPR2()">,
AssemblerPredicate<"FeatureDSPR2">;
// Fields.
diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index ffbd83b..bcfbc12 100644
--- a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -11,8 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "delay-slot-filler"
-
+#include "MCTargetDesc/MipsMCNaCl.h"
#include "Mips.h"
#include "MipsInstrInfo.h"
#include "MipsTargetMachine.h"
@@ -24,6 +23,7 @@
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -32,6 +32,8 @@
using namespace llvm;
+#define DEBUG_TYPE "delay-slot-filler"
+
STATISTIC(FilledSlots, "Number of delay slots filled");
STATISTIC(UsefulSlots, "Number of delay slots filled with instructions that"
" are not NOP.");
@@ -65,20 +67,6 @@ namespace {
typedef MachineBasicBlock::reverse_iterator ReverseIter;
typedef SmallDenseMap<MachineBasicBlock*, MachineInstr*, 2> BB2BrMap;
- /// \brief A functor comparing edge weight of two blocks.
- struct CmpWeight {
- CmpWeight(const MachineBasicBlock &S,
- const MachineBranchProbabilityInfo &P) : Src(S), Prob(P) {}
-
- bool operator()(const MachineBasicBlock *Dst0,
- const MachineBasicBlock *Dst1) const {
- return Prob.getEdgeWeight(&Src, Dst0) < Prob.getEdgeWeight(&Src, Dst1);
- }
-
- const MachineBasicBlock &Src;
- const MachineBranchProbabilityInfo &Prob;
- };
-
class RegDefsUses {
public:
RegDefsUses(TargetMachine &TM);
@@ -137,7 +125,7 @@ namespace {
public:
NoMemInstr() : InspectMemInstr(true) {}
private:
- virtual bool hasHazard_(const MachineInstr &MI) { return true; }
+ bool hasHazard_(const MachineInstr &MI) override { return true; }
};
/// This subclass accepts loads from stacks and constant loads.
@@ -145,7 +133,7 @@ namespace {
public:
LoadFromStackOrConst() : InspectMemInstr(false) {}
private:
- virtual bool hasHazard_(const MachineInstr &MI);
+ bool hasHazard_(const MachineInstr &MI) override;
};
/// This subclass uses memory dependence information to determine whether a
@@ -155,19 +143,21 @@ namespace {
MemDefsUses(const MachineFrameInfo *MFI);
private:
- virtual bool hasHazard_(const MachineInstr &MI);
+ typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
+
+ bool hasHazard_(const MachineInstr &MI) override;
/// Update Defs and Uses. Return true if there exist dependences that
/// disqualify the delay slot candidate between V and values in Uses and
/// Defs.
- bool updateDefsUses(const Value *V, bool MayStore);
+ bool updateDefsUses(ValueType V, bool MayStore);
/// Get the list of underlying objects of MI's memory operand.
bool getUnderlyingObjects(const MachineInstr &MI,
- SmallVectorImpl<const Value *> &Objects) const;
+ SmallVectorImpl<ValueType> &Objects) const;
const MachineFrameInfo *MFI;
- SmallPtrSet<const Value*, 4> Uses, Defs;
+ SmallPtrSet<ValueType, 4> Uses, Defs;
/// Flags indicating whether loads or stores with no underlying objects have
/// been seen.
@@ -179,19 +169,26 @@ namespace {
Filler(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm) { }
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Mips Delay Slot Filler";
}
- bool runOnMachineFunction(MachineFunction &F) {
+ bool runOnMachineFunction(MachineFunction &F) override {
bool Changed = false;
for (MachineFunction::iterator FI = F.begin(), FE = F.end();
FI != FE; ++FI)
Changed |= runOnMachineBasicBlock(*FI);
+
+ // This pass invalidates liveness information when it reorders
+ // instructions to fill delay slot. Without this, -verify-machineinstrs
+ // will fail.
+ if (Changed)
+ F.getRegInfo().invalidateLiveness();
+
return Changed;
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -412,16 +409,15 @@ bool LoadFromStackOrConst::hasHazard_(const MachineInstr &MI) {
if (MI.mayStore())
return true;
- if (!MI.hasOneMemOperand() || !(*MI.memoperands_begin())->getValue())
+ if (!MI.hasOneMemOperand() || !(*MI.memoperands_begin())->getPseudoValue())
return true;
- const Value *V = (*MI.memoperands_begin())->getValue();
-
- if (isa<FixedStackPseudoSourceValue>(V))
- return false;
-
- if (const PseudoSourceValue *PSV = dyn_cast<const PseudoSourceValue>(V))
- return !PSV->isConstant(0) && V != PseudoSourceValue::getStack();
+ if (const PseudoSourceValue *PSV =
+ (*MI.memoperands_begin())->getPseudoValue()) {
+ if (isa<FixedStackPseudoSourceValue>(PSV))
+ return false;
+ return !PSV->isConstant(nullptr) && PSV != PseudoSourceValue::getStack();
+ }
return true;
}
@@ -432,11 +428,11 @@ MemDefsUses::MemDefsUses(const MachineFrameInfo *MFI_)
bool MemDefsUses::hasHazard_(const MachineInstr &MI) {
bool HasHazard = false;
- SmallVector<const Value *, 4> Objs;
+ SmallVector<ValueType, 4> Objs;
// Check underlying object list.
if (getUnderlyingObjects(MI, Objs)) {
- for (SmallVectorImpl<const Value *>::const_iterator I = Objs.begin();
+ for (SmallVectorImpl<ValueType>::const_iterator I = Objs.begin();
I != Objs.end(); ++I)
HasHazard |= updateDefsUses(*I, MI.mayStore());
@@ -453,7 +449,7 @@ bool MemDefsUses::hasHazard_(const MachineInstr &MI) {
return HasHazard;
}
-bool MemDefsUses::updateDefsUses(const Value *V, bool MayStore) {
+bool MemDefsUses::updateDefsUses(ValueType V, bool MayStore) {
if (MayStore)
return !Defs.insert(V) || Uses.count(V) || SeenNoObjStore || SeenNoObjLoad;
@@ -463,10 +459,20 @@ bool MemDefsUses::updateDefsUses(const Value *V, bool MayStore) {
bool MemDefsUses::
getUnderlyingObjects(const MachineInstr &MI,
- SmallVectorImpl<const Value *> &Objects) const {
- if (!MI.hasOneMemOperand() || !(*MI.memoperands_begin())->getValue())
+ SmallVectorImpl<ValueType> &Objects) const {
+ if (!MI.hasOneMemOperand() ||
+ (!(*MI.memoperands_begin())->getValue() &&
+ !(*MI.memoperands_begin())->getPseudoValue()))
return false;
+ if (const PseudoSourceValue *PSV =
+ (*MI.memoperands_begin())->getPseudoValue()) {
+ if (!PSV->isAliased(MFI))
+ return false;
+ Objects.push_back(PSV);
+ return true;
+ }
+
const Value *V = (*MI.memoperands_begin())->getValue();
SmallVector<Value *, 4> Objs;
@@ -474,10 +480,7 @@ getUnderlyingObjects(const MachineInstr &MI,
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), E = Objs.end();
I != E; ++I) {
- if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(*I)) {
- if (PSV->isAliased(MFI))
- return false;
- } else if (!isIdentifiedObject(V))
+ if (!isIdentifiedObject(V))
return false;
Objects.push_back(*I);
@@ -514,8 +517,8 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
// Bundle the NOP to the instruction with the delay slot.
const MipsInstrInfo *TII =
static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
- BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
- MIBundleBuilder(MBB, I, llvm::next(llvm::next(I)));
+ BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
+ MIBundleBuilder(MBB, I, std::next(I, 2));
}
return Changed;
@@ -545,6 +548,18 @@ bool Filler::searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
if (delayHasHazard(*I, RegDU, IM))
continue;
+ if (TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
+ // In NaCl, instructions that must be masked are forbidden in delay slots.
+ // We only check for loads, stores and SP changes. Calls, returns and
+ // branches are not checked because non-NaCl targets never put them in
+ // delay slots.
+ unsigned AddrIdx;
+ if ((isBasePlusOffsetMemoryAccess(I->getOpcode(), &AddrIdx)
+ && baseRegNeedsLoadStoreMask(I->getOperand(AddrIdx).getReg()))
+ || I->modifiesRegister(Mips::SP, TM.getRegisterInfo()))
+ continue;
+ }
+
Filler = I;
return true;
}
@@ -565,8 +580,8 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const {
if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler))
return false;
- MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base());
- MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
+ MBB.splice(std::next(Slot), &MBB, std::next(Filler).base());
+ MIBundleBuilder(MBB, Slot, std::next(Slot, 2));
++UsefulSlots;
return true;
}
@@ -582,11 +597,11 @@ bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
RegDU.setCallerSaved(*Slot);
- if (!searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler))
+ if (!searchRange(MBB, std::next(Slot), MBB.end(), RegDU, NM, Filler))
return false;
- MBB.splice(llvm::next(Slot), &MBB, Filler);
- MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
+ MBB.splice(std::next(Slot), &MBB, Filler);
+ MIBundleBuilder(MBB, Slot, std::next(Slot, 2));
++UsefulSlots;
return true;
}
@@ -603,7 +618,7 @@ bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
RegDefsUses RegDU(TM);
bool HasMultipleSuccs = false;
BB2BrMap BrMap;
- OwningPtr<InspectMemInstr> IM;
+ std::unique_ptr<InspectMemInstr> IM;
Iter Filler;
// Iterate over SuccBB's predecessor list.
@@ -637,19 +652,23 @@ bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
MachineBasicBlock *Filler::selectSuccBB(MachineBasicBlock &B) const {
if (B.succ_empty())
- return NULL;
+ return nullptr;
// Select the successor with the larget edge weight.
- CmpWeight Cmp(B, getAnalysis<MachineBranchProbabilityInfo>());
- MachineBasicBlock *S = *std::max_element(B.succ_begin(), B.succ_end(), Cmp);
- return S->isLandingPad() ? NULL : S;
+ auto &Prob = getAnalysis<MachineBranchProbabilityInfo>();
+ MachineBasicBlock *S = *std::max_element(B.succ_begin(), B.succ_end(),
+ [&](const MachineBasicBlock *Dst0,
+ const MachineBasicBlock *Dst1) {
+ return Prob.getEdgeWeight(&B, Dst0) < Prob.getEdgeWeight(&B, Dst1);
+ });
+ return S->isLandingPad() ? nullptr : S;
}
std::pair<MipsInstrInfo::BranchType, MachineInstr *>
Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const {
const MipsInstrInfo *TII =
static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
- MachineBasicBlock *TrueBB = 0, *FalseBB = 0;
+ MachineBasicBlock *TrueBB = nullptr, *FalseBB = nullptr;
SmallVector<MachineInstr*, 2> BranchInstrs;
SmallVector<MachineOperand, 2> Cond;
@@ -657,11 +676,11 @@ Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const {
TII->AnalyzeBranch(MBB, TrueBB, FalseBB, Cond, false, BranchInstrs);
if ((R == MipsInstrInfo::BT_None) || (R == MipsInstrInfo::BT_NoBranch))
- return std::make_pair(R, (MachineInstr*)NULL);
+ return std::make_pair(R, nullptr);
if (R != MipsInstrInfo::BT_CondUncond) {
if (!hasUnoccupiedSlot(BranchInstrs[0]))
- return std::make_pair(MipsInstrInfo::BT_None, (MachineInstr*)NULL);
+ return std::make_pair(MipsInstrInfo::BT_None, nullptr);
assert(((R != MipsInstrInfo::BT_Uncond) || (TrueBB == &Dst)));
@@ -678,7 +697,7 @@ Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const {
if (hasUnoccupiedSlot(BranchInstrs[1]) && (FalseBB == &Dst))
return std::make_pair(MipsInstrInfo::BT_Uncond, BranchInstrs[1]);
- return std::make_pair(MipsInstrInfo::BT_None, (MachineInstr*)NULL);
+ return std::make_pair(MipsInstrInfo::BT_None, nullptr);
}
bool Filler::examinePred(MachineBasicBlock &Pred, const MachineBasicBlock &Succ,
@@ -714,6 +733,6 @@ bool Filler::delayHasHazard(const MachineInstr &Candidate, RegDefsUses &RegDU,
bool Filler::terminateSearch(const MachineInstr &Candidate) const {
return (Candidate.isTerminator() || Candidate.isCall() ||
- Candidate.isLabel() || Candidate.isInlineAsm() ||
+ Candidate.isPosition() || Candidate.isInlineAsm() ||
Candidate.hasUnmodeledSideEffects());
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp b/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp
new file mode 100644
index 0000000..617801b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -0,0 +1,400 @@
+//===-- MipsastISel.cpp - Mips FastISel implementation
+//---------------------===//
+
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "MipsRegisterInfo.h"
+#include "MipsISelLowering.h"
+#include "MipsMachineFunction.h"
+#include "MipsSubtarget.h"
+#include "MipsTargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+// All possible address modes.
+typedef struct Address {
+ enum { RegBase, FrameIndexBase } BaseType;
+
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ int64_t Offset;
+
+ // Innocuous defaults for our address.
+ Address() : BaseType(RegBase), Offset(0) { Base.Reg = 0; }
+} Address;
+
+class MipsFastISel final : public FastISel {
+
+ /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ Module &M;
+ const TargetMachine &TM;
+ const TargetInstrInfo &TII;
+ const TargetLowering &TLI;
+ const MipsSubtarget *Subtarget;
+ MipsFunctionInfo *MFI;
+
+ // Convenience variables to avoid some queries.
+ LLVMContext *Context;
+
+ bool TargetSupported;
+
+public:
+ explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo)
+ : FastISel(funcInfo, libInfo),
+ M(const_cast<Module &>(*funcInfo.Fn->getParent())),
+ TM(funcInfo.MF->getTarget()), TII(*TM.getInstrInfo()),
+ TLI(*TM.getTargetLowering()),
+ Subtarget(&TM.getSubtarget<MipsSubtarget>()) {
+ MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
+ Context = &funcInfo.Fn->getContext();
+ TargetSupported = ((Subtarget->getRelocationModel() == Reloc::PIC_) &&
+ (Subtarget->hasMips32r2() && (Subtarget->isABI_O32())));
+ }
+
+ bool TargetSelectInstruction(const Instruction *I) override;
+ unsigned TargetMaterializeConstant(const Constant *C) override;
+
+ bool ComputeAddress(const Value *Obj, Address &Addr);
+
+private:
+ bool EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
+ unsigned Alignment = 0);
+ bool EmitStore(MVT VT, unsigned SrcReg, Address &Addr,
+ unsigned Alignment = 0);
+ bool SelectLoad(const Instruction *I);
+ bool SelectRet(const Instruction *I);
+ bool SelectStore(const Instruction *I);
+
+ bool isTypeLegal(Type *Ty, MVT &VT);
+ bool isLoadTypeLegal(Type *Ty, MVT &VT);
+
+ unsigned MaterializeFP(const ConstantFP *CFP, MVT VT);
+ unsigned MaterializeGV(const GlobalValue *GV, MVT VT);
+ unsigned MaterializeInt(const Constant *C, MVT VT);
+ unsigned Materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
+
+ // for some reason, this default is not generated by tablegen
+ // so we explicitly generate it here.
+ //
+ unsigned FastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill, uint64_t imm1,
+ uint64_t imm2, unsigned Op3, bool Op3IsKill) {
+ return 0;
+ }
+
+ MachineInstrBuilder EmitInst(unsigned Opc) {
+ return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ }
+
+ MachineInstrBuilder EmitInst(unsigned Opc, unsigned DstReg) {
+ return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ DstReg);
+ }
+
+ MachineInstrBuilder EmitInstStore(unsigned Opc, unsigned SrcReg,
+ unsigned MemReg, int64_t MemOffset) {
+ return EmitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
+ }
+
+ MachineInstrBuilder EmitInstLoad(unsigned Opc, unsigned DstReg,
+ unsigned MemReg, int64_t MemOffset) {
+ return EmitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
+ }
+
+#include "MipsGenFastISel.inc"
+};
+
+bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
+ EVT evt = TLI.getValueType(Ty, true);
+ // Only handle simple types.
+ if (evt == MVT::Other || !evt.isSimple())
+ return false;
+ VT = evt.getSimpleVT();
+
+ // Handle all legal types, i.e. a register that will directly hold this
+ // value.
+ return TLI.isTypeLegal(VT);
+}
+
+bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
+ if (isTypeLegal(Ty, VT))
+ return true;
+ // We will extend this in a later patch:
+ // If this is a type than can be sign or zero-extended to a basic operation
+ // go ahead and accept it now.
+ if (VT == MVT::i8 || VT == MVT::i16)
+ return true;
+ return false;
+}
+
+bool MipsFastISel::ComputeAddress(const Value *Obj, Address &Addr) {
+ // This construct looks a big awkward but it is how other ports handle this
+ // and as this function is more fully completed, these cases which
+ // return false will have additional code in them.
+ //
+ if (isa<Instruction>(Obj))
+ return false;
+ else if (isa<ConstantExpr>(Obj))
+ return false;
+ Addr.Base.Reg = getRegForValue(Obj);
+ return Addr.Base.Reg != 0;
+}
+
+bool MipsFastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
+ unsigned Alignment) {
+ //
+ // more cases will be handled here in following patches.
+ //
+ unsigned Opc;
+ switch (VT.SimpleTy) {
+ case MVT::i32: {
+ ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Opc = Mips::LW;
+ break;
+ }
+ case MVT::i16: {
+ ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Opc = Mips::LHu;
+ break;
+ }
+ case MVT::i8: {
+ ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Opc = Mips::LBu;
+ break;
+ }
+ case MVT::f32: {
+ ResultReg = createResultReg(&Mips::FGR32RegClass);
+ Opc = Mips::LWC1;
+ break;
+ }
+ case MVT::f64: {
+ ResultReg = createResultReg(&Mips::AFGR64RegClass);
+ Opc = Mips::LDC1;
+ break;
+ }
+ default:
+ return false;
+ }
+ EmitInstLoad(Opc, ResultReg, Addr.Base.Reg, Addr.Offset);
+ return true;
+}
+
+// Materialize a constant into a register, and return the register
+// number (or zero if we failed to handle it).
+unsigned MipsFastISel::TargetMaterializeConstant(const Constant *C) {
+ EVT CEVT = TLI.getValueType(C->getType(), true);
+
+ // Only handle simple types.
+ if (!CEVT.isSimple())
+ return 0;
+ MVT VT = CEVT.getSimpleVT();
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return MaterializeFP(CFP, VT);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return MaterializeGV(GV, VT);
+ else if (isa<ConstantInt>(C))
+ return MaterializeInt(C, VT);
+
+ return 0;
+}
+
+bool MipsFastISel::EmitStore(MVT VT, unsigned SrcReg, Address &Addr,
+ unsigned Alignment) {
+ //
+ // more cases will be handled here in following patches.
+ //
+ unsigned Opc;
+ switch (VT.SimpleTy) {
+ case MVT::i8:
+ Opc = Mips::SB;
+ break;
+ case MVT::i16:
+ Opc = Mips::SH;
+ break;
+ case MVT::i32:
+ Opc = Mips::SW;
+ break;
+ case MVT::f32:
+ Opc = Mips::SWC1;
+ break;
+ case MVT::f64:
+ Opc = Mips::SDC1;
+ break;
+ default:
+ return false;
+ }
+ EmitInstStore(Opc, SrcReg, Addr.Base.Reg, Addr.Offset);
+ return true;
+}
+
+bool MipsFastISel::SelectLoad(const Instruction *I) {
+ // Atomic loads need special handling.
+ if (cast<LoadInst>(I)->isAtomic())
+ return false;
+
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(I->getType(), VT))
+ return false;
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!ComputeAddress(I->getOperand(0), Addr))
+ return false;
+
+ unsigned ResultReg;
+ if (!EmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
+ return false;
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool MipsFastISel::SelectStore(const Instruction *I) {
+ Value *Op0 = I->getOperand(0);
+ unsigned SrcReg = 0;
+
+ // Atomic stores need special handling.
+ if (cast<StoreInst>(I)->isAtomic())
+ return false;
+
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
+ return false;
+
+ // Get the value to be stored into a register.
+ SrcReg = getRegForValue(Op0);
+ if (SrcReg == 0)
+ return false;
+
+ // See if we can handle this address.
+ Address Addr;
+ if (!ComputeAddress(I->getOperand(1), Addr))
+ return false;
+
+ if (!EmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
+ return false;
+ return true;
+}
+
+bool MipsFastISel::SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+ if (Ret->getNumOperands() > 0) {
+ return false;
+ }
+ EmitInst(Mips::RetRA);
+ return true;
+}
+
+bool MipsFastISel::TargetSelectInstruction(const Instruction *I) {
+ if (!TargetSupported)
+ return false;
+ switch (I->getOpcode()) {
+ default:
+ break;
+ case Instruction::Load:
+ return SelectLoad(I);
+ case Instruction::Store:
+ return SelectStore(I);
+ case Instruction::Ret:
+ return SelectRet(I);
+ }
+ return false;
+}
+}
+
+unsigned MipsFastISel::MaterializeFP(const ConstantFP *CFP, MVT VT) {
+ int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
+ if (VT == MVT::f32) {
+ const TargetRegisterClass *RC = &Mips::FGR32RegClass;
+ unsigned DestReg = createResultReg(RC);
+ unsigned TempReg = Materialize32BitInt(Imm, &Mips::GPR32RegClass);
+ EmitInst(Mips::MTC1, DestReg).addReg(TempReg);
+ return DestReg;
+ } else if (VT == MVT::f64) {
+ const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
+ unsigned DestReg = createResultReg(RC);
+ unsigned TempReg1 = Materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
+ unsigned TempReg2 =
+ Materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
+ EmitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
+ return DestReg;
+ }
+ return 0;
+}
+
+unsigned MipsFastISel::MaterializeGV(const GlobalValue *GV, MVT VT) {
+ // For now 32-bit only.
+ if (VT != MVT::i32)
+ return 0;
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
+ unsigned DestReg = createResultReg(RC);
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ bool IsThreadLocal = GVar && GVar->isThreadLocal();
+ // TLS not supported at this time.
+ if (IsThreadLocal)
+ return 0;
+ EmitInst(Mips::LW, DestReg).addReg(MFI->getGlobalBaseReg()).addGlobalAddress(
+ GV, 0, MipsII::MO_GOT);
+ return DestReg;
+}
+unsigned MipsFastISel::MaterializeInt(const Constant *C, MVT VT) {
+ if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
+ return 0;
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
+ const ConstantInt *CI = cast<ConstantInt>(C);
+ int64_t Imm;
+ if (CI->isNegative())
+ Imm = CI->getSExtValue();
+ else
+ Imm = CI->getZExtValue();
+ return Materialize32BitInt(Imm, RC);
+}
+
+unsigned MipsFastISel::Materialize32BitInt(int64_t Imm,
+ const TargetRegisterClass *RC) {
+ unsigned ResultReg = createResultReg(RC);
+
+ if (isInt<16>(Imm)) {
+ unsigned Opc = Mips::ADDiu;
+ EmitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
+ return ResultReg;
+ } else if (isUInt<16>(Imm)) {
+ EmitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
+ return ResultReg;
+ }
+ unsigned Lo = Imm & 0xFFFF;
+ unsigned Hi = (Imm >> 16) & 0xFFFF;
+ if (Lo) {
+ // Both Lo and Hi have nonzero bits.
+ unsigned TmpReg = createResultReg(RC);
+ EmitInst(Mips::LUi, TmpReg).addImm(Hi);
+ EmitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
+ } else {
+ EmitInst(Mips::LUi, ResultReg).addImm(Hi);
+ }
+ return ResultReg;
+}
+
+namespace llvm {
+FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) {
+ return new MipsFastISel(funcInfo, libInfo);
+}
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index eb9d49f..61afe17 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -82,9 +82,8 @@ using namespace llvm;
//
//===----------------------------------------------------------------------===//
-const MipsFrameLowering *MipsFrameLowering::create(MipsTargetMachine &TM,
- const MipsSubtarget &ST) {
- if (TM.getSubtargetImpl()->inMips16Mode())
+const MipsFrameLowering *MipsFrameLowering::create(const MipsSubtarget &ST) {
+ if (ST.inMips16Mode())
return llvm::createMips16FrameLowering(ST);
return llvm::createMipsSEFrameLowering(ST);
@@ -110,7 +109,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
Offset = std::max(Offset, -MFI->getObjectOffset(I));
// Conservatively assume all callee-saved registers will be saved.
- for (const uint16_t *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
+ for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
Offset = RoundUpToAlignment(Offset + Size, Size);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
index 6a5f79d..9d59309 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
@@ -15,7 +15,6 @@
#define MIPS_FRAMEINFO_H
#include "Mips.h"
-#include "MipsSubtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -29,10 +28,9 @@ public:
explicit MipsFrameLowering(const MipsSubtarget &sti, unsigned Alignment)
: TargetFrameLowering(StackGrowsDown, Alignment, 0, Alignment), STI(sti) {}
- static const MipsFrameLowering *create(MipsTargetMachine &TM,
- const MipsSubtarget &ST);
+ static const MipsFrameLowering *create(const MipsSubtarget &ST);
- bool hasFP(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const override;
protected:
uint64_t estimateStackSize(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
index c417bd5..0bdabf3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -11,31 +11,32 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-isel"
#include "MipsISelDAGToDAG.h"
-#include "Mips16ISelDAGToDAG.h"
-#include "MipsSEISelDAGToDAG.h"
-#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "Mips.h"
+#include "Mips16ISelDAGToDAG.h"
#include "MipsMachineFunction.h"
#include "MipsRegisterInfo.h"
+#include "MipsSEISelDAGToDAG.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "mips-isel"
+
//===----------------------------------------------------------------------===//
// Instruction Selector Implementation
//===----------------------------------------------------------------------===//
@@ -46,6 +47,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &TM.getSubtarget<MipsSubtarget>();
bool Ret = SelectionDAGISel::runOnMachineFunction(MF);
processFunctionAfterISel(MF);
@@ -93,6 +95,12 @@ bool MipsDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
return false;
}
+bool MipsDAGToDAGISel::selectIntAddrMSA(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
bool MipsDAGToDAGISel::selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Alias) {
llvm_unreachable("Unimplemented function.");
@@ -176,7 +184,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return NULL;
+ return nullptr;
}
// See if subclasses can handle this node.
@@ -195,8 +203,9 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
#ifndef NDEBUG
case ISD::LOAD:
case ISD::STORE:
- assert(cast<MemSDNode>(Node)->getMemoryVT().getSizeInBits() / 8 <=
- cast<MemSDNode>(Node)->getAlignment() &&
+ assert((Subtarget->systemSupportsUnalignedAccess() ||
+ cast<MemSDNode>(Node)->getMemoryVT().getSizeInBits() / 8 <=
+ cast<MemSDNode>(Node)->getAlignment()) &&
"Unexpected unaligned loads/stores.");
break;
#endif
@@ -206,7 +215,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
SDNode *ResNode = SelectCode(Node);
DEBUG(errs() << "=> ");
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
DEBUG(Node->dump(CurDAG));
else
DEBUG(ResNode->dump(CurDAG));
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.h b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.h
index a4d9da5..52f4c0d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -32,21 +32,21 @@ namespace llvm {
class MipsDAGToDAGISel : public SelectionDAGISel {
public:
explicit MipsDAGToDAGISel(MipsTargetMachine &TM)
- : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<MipsSubtarget>()) {}
+ : SelectionDAGISel(TM), Subtarget(nullptr) {}
// Pass Name
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MIPS DAG->DAG Pattern Instruction Selection";
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
protected:
SDNode *getGlobalBaseReg();
/// Keep a pointer to the MipsSubtarget around so that we can make the right
/// decision when generating code for different targets.
- const MipsSubtarget &Subtarget;
+ const MipsSubtarget *Subtarget;
private:
// Include the pieces autogenerated from the target description.
@@ -73,6 +73,10 @@ private:
virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ /// Match addr+simm10 and addr
+ virtual bool selectIntAddrMSA(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
virtual bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Alias);
@@ -106,7 +110,7 @@ private:
/// starting at bit zero.
virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
- virtual SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
virtual std::pair<bool, SDNode*> selectNode(SDNode *Node) = 0;
@@ -117,9 +121,9 @@ private:
virtual void processFunctionAfterISel(MachineFunction &MF) = 0;
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
};
/// createMipsISelDag - This pass converts a legalized DAG into a
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 1e8250c..40dc8e4 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -11,7 +11,6 @@
// selection DAG.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-lower"
#include "MipsISelLowering.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MCTargetDesc/MipsBaseInfo.h"
@@ -39,6 +38,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mips-lower"
+
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool>
@@ -50,16 +51,21 @@ NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
cl::desc("MIPS: Don't trap on integer division by zero."),
cl::init(false));
-static const uint16_t O32IntRegs[4] = {
+cl::opt<bool>
+EnableMipsFastISel("mips-fast-isel", cl::Hidden,
+ cl::desc("Allow mips-fast-isel to be used"),
+ cl::init(false));
+
+static const MCPhysReg O32IntRegs[4] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
};
-static const uint16_t Mips64IntRegs[8] = {
+static const MCPhysReg Mips64IntRegs[8] = {
Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64
};
-static const uint16_t Mips64DPRegs[8] = {
+static const MCPhysReg Mips64DPRegs[8] = {
Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
};
@@ -197,20 +203,23 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::ILVR: return "MipsISD::ILVR";
case MipsISD::PCKEV: return "MipsISD::PCKEV";
case MipsISD::PCKOD: return "MipsISD::PCKOD";
- default: return NULL;
+ case MipsISD::INSVE: return "MipsISD::INSVE";
+ default: return nullptr;
}
}
-MipsTargetLowering::
-MipsTargetLowering(MipsTargetMachine &TM)
- : TargetLowering(TM, new MipsTargetObjectFile()),
- Subtarget(&TM.getSubtarget<MipsSubtarget>()),
- HasMips64(Subtarget->hasMips64()), IsN64(Subtarget->isABI_N64()),
- IsO32(Subtarget->isABI_O32()) {
+MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI)
+ : TargetLowering(TM, new MipsTargetObjectFile()), Subtarget(STI) {
// Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+ // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
+ // does. Integer booleans still use 0 and 1.
+ if (Subtarget.hasMips32r6())
+ setBooleanContents(ZeroOrOneBooleanContent,
+ ZeroOrNegativeOneBooleanContent);
// Load extented operations for i1 types must be promoted
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
@@ -247,12 +256,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
- if (!TM.Options.NoNaNsFPMath) {
- setOperationAction(ISD::FABS, MVT::f32, Custom);
- setOperationAction(ISD::FABS, MVT::f64, Custom);
- }
-
- if (HasMips64) {
+ if (Subtarget.isGP64bit()) {
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
@@ -264,14 +268,14 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
}
- if (!HasMips64) {
+ if (!Subtarget.isGP64bit()) {
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
setOperationAction(ISD::ADD, MVT::i32, Custom);
- if (HasMips64)
+ if (Subtarget.isGP64bit())
setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SDIV, MVT::i32, Expand);
@@ -288,14 +292,20 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::BR_CC, MVT::f64, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::i64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
- setOperationAction(ISD::CTPOP, MVT::i64, Expand);
+ if (Subtarget.hasCnMips()) {
+ setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+ setOperationAction(ISD::CTPOP, MVT::i64, Legal);
+ } else {
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i64, Expand);
+ }
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i64, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
@@ -307,10 +317,10 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- if (!Subtarget->hasMips32r2())
+ if (!Subtarget.hasMips32r2())
setOperationAction(ISD::ROTR, MVT::i32, Expand);
- if (!Subtarget->hasMips64r2())
+ if (!Subtarget.hasMips64r2())
setOperationAction(ISD::ROTR, MVT::i64, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
@@ -331,11 +341,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FREM, MVT::f32, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
- if (!TM.Options.NoNaNsFPMath) {
- setOperationAction(ISD::FNEG, MVT::f32, Expand);
- setOperationAction(ISD::FNEG, MVT::f64, Expand);
- }
-
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::Other, Expand);
@@ -353,22 +358,23 @@ MipsTargetLowering(MipsTargetMachine &TM)
setInsertFencesForAtomic(true);
- if (!Subtarget->hasSEInReg()) {
+ if (!Subtarget.hasMips32r2()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
}
- if (!Subtarget->hasBitCount()) {
+ // MIPS16 lacks MIPS32's clz and clo instructions.
+ if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+ if (!Subtarget.hasMips64())
setOperationAction(ISD::CTLZ, MVT::i64, Expand);
- }
- if (!Subtarget->hasSwap()) {
+ if (!Subtarget.hasMips32r2())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ if (!Subtarget.hasMips64r2())
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
- }
- if (HasMips64) {
+ if (Subtarget.isGP64bit()) {
setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
@@ -384,21 +390,34 @@ MipsTargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::ADD);
- setMinFunctionAlignment(HasMips64 ? 3 : 2);
+ setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
- setStackPointerRegisterToSaveRestore(IsN64 ? Mips::SP_64 : Mips::SP);
+ setStackPointerRegisterToSaveRestore(Subtarget.isABI_N64() ? Mips::SP_64
+ : Mips::SP);
- setExceptionPointerRegister(IsN64 ? Mips::A0_64 : Mips::A0);
- setExceptionSelectorRegister(IsN64 ? Mips::A1_64 : Mips::A1);
+ setExceptionPointerRegister(Subtarget.isABI_N64() ? Mips::A0_64 : Mips::A0);
+ setExceptionSelectorRegister(Subtarget.isABI_N64() ? Mips::A1_64 : Mips::A1);
MaxStoresPerMemcpy = 16;
+
+ isMicroMips = Subtarget.inMicroMipsMode();
}
-const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM) {
- if (TM.getSubtargetImpl()->inMips16Mode())
- return llvm::createMips16TargetLowering(TM);
+const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM,
+ const MipsSubtarget &STI) {
+ if (STI.inMips16Mode())
+ return llvm::createMips16TargetLowering(TM, STI);
- return llvm::createMipsSETargetLowering(TM);
+ return llvm::createMipsSETargetLowering(TM, STI);
+}
+
+// Create a fast isel object.
+FastISel *
+MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const {
+ if (!EnableMipsFastISel)
+ return TargetLowering::createFastISel(funcInfo, libInfo);
+ return Mips::createFastISel(funcInfo, libInfo);
}
EVT MipsTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
@@ -409,7 +428,7 @@ EVT MipsTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -519,7 +538,7 @@ static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -535,28 +554,74 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (!FalseTy.isInteger())
return SDValue();
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(False);
+ ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
- if (!CN || CN->getZExtValue())
+ // If the RHS (False) is 0, we swap the order of the operands
+ // of ISD::SELECT (obviously also inverting the condition) so that we can
+ // take advantage of conditional moves using the $0 register.
+ // Example:
+ // return (a != 0) ? x : 0;
+ // load $reg, x
+ // movz $reg, $0, a
+ if (!FalseC)
return SDValue();
const SDLoc DL(N);
- ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
+
+ if (!FalseC->getZExtValue()) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
+ SDValue True = N->getOperand(1);
+
+ SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
+ SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
+
+ return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
+ }
+
+ // If both operands are integer constants there's a possibility that we
+ // can do some interesting optimizations.
SDValue True = N->getOperand(1);
+ ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
+
+ if (!TrueC || !True.getValueType().isInteger())
+ return SDValue();
- SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
- SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
+ // We'll also ignore MVT::i64 operands as this optimizations proves
+ // to be ineffective because of the required sign extensions as the result
+ // of a SETCC operator is always MVT::i32 for non-vector types.
+ if (True.getValueType() == MVT::i64)
+ return SDValue();
+
+ int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
+
+ // 1) (a < x) ? y : y-1
+ // slti $reg1, a, x
+ // addiu $reg2, $reg1, y-1
+ if (Diff == 1)
+ return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
+
+ // 2) (a < x) ? y-1 : y
+ // slti $reg1, a, x
+ // xor $reg1, $reg1, 1
+ // addiu $reg2, $reg1, y-1
+ if (Diff == -1) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
+ SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
+ SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
+ return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
+ }
- return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
+ // Couldn't optimize.
+ return SDValue();
}
static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// Pattern match EXT.
// $dst = and ((sra or srl) $src , pos), (2**size - 1)
// => ext $dst, $src, size, pos
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
return SDValue();
SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
@@ -592,12 +657,12 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// Pattern match INS.
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
// => ins $dst, $src, size, pos, $src1
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
return SDValue();
SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
@@ -646,7 +711,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
if (DCI.isBeforeLegalizeOps())
@@ -728,7 +793,6 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::SETCC: return lowerSETCC(Op, DAG);
case ISD::VASTART: return lowerVASTART(Op, DAG);
case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
- case ISD::FABS: return lowerFABS(Op, DAG);
case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
@@ -759,10 +823,10 @@ addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
return VReg;
}
-static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI,
- MachineBasicBlock &MBB,
- const TargetInstrInfo &TII,
- bool Is64Bit) {
+static MachineBasicBlock *insertDivByZeroTrap(MachineInstr *MI,
+ MachineBasicBlock &MBB,
+ const TargetInstrInfo &TII,
+ bool Is64Bit) {
if (NoZeroDivCheck)
return &MBB;
@@ -770,7 +834,7 @@ static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI,
MachineBasicBlock::iterator I(MI);
MachineInstrBuilder MIB;
MachineOperand &Divisor = MI->getOperand(2);
- MIB = BuildMI(MBB, llvm::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
+ MIB = BuildMI(MBB, std::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
.addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
.addReg(Mips::ZERO).addImm(7);
@@ -780,6 +844,10 @@ static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI,
// Clear Divisor's kill flag.
Divisor.setIsKill(false);
+
+ // We would normally delete the original instruction here but in this case
+ // we only needed to inject an additional instruction rather than replace it.
+
return &MBB;
}
@@ -862,10 +930,22 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return emitAtomicCmpSwap(MI, BB, 8);
case Mips::PseudoSDIV:
case Mips::PseudoUDIV:
- return expandPseudoDIV(MI, *BB, *getTargetMachine().getInstrInfo(), false);
+ case Mips::DIV:
+ case Mips::DIVU:
+ case Mips::MOD:
+ case Mips::MODU:
+ return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(),
+ false);
case Mips::PseudoDSDIV:
case Mips::PseudoDUDIV:
- return expandPseudoDIV(MI, *BB, *getTargetMachine().getInstrInfo(), true);
+ case Mips::DDIV:
+ case Mips::DDIVU:
+ case Mips::DMOD:
+ case Mips::DMODU:
+ return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(),
+ true);
+ case Mips::SEL_D:
+ return emitSEL_D(MI, BB);
}
}
@@ -885,16 +965,20 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned LL, SC, AND, NOR, ZERO, BEQ;
if (Size == 4) {
- LL = Mips::LL;
- SC = Mips::SC;
+ if (isMicroMips) {
+ LL = Mips::LL_MM;
+ SC = Mips::SC_MM;
+ } else {
+ LL = Subtarget.hasMips32r6() ? Mips::LL_R6 : Mips::LL;
+ SC = Subtarget.hasMips32r6() ? Mips::SC_R6 : Mips::SC;
+ }
AND = Mips::AND;
NOR = Mips::NOR;
ZERO = Mips::ZERO;
BEQ = Mips::BEQ;
- }
- else {
- LL = Mips::LLD;
- SC = Mips::SCD;
+ } else {
+ LL = Subtarget.hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
+ SC = Subtarget.hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
AND = Mips::AND64;
NOR = Mips::NOR64;
ZERO = Mips::ZERO_64;
@@ -920,7 +1004,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
@@ -956,11 +1040,39 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
return exitMBB;
}
-MachineBasicBlock *
-MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size, unsigned BinOpcode,
- bool Nand) const {
+MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
+ MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
+ unsigned SrcReg) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ if (Subtarget.hasMips32r2() && Size == 1) {
+ BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
+ return BB;
+ }
+
+ if (Subtarget.hasMips32r2() && Size == 2) {
+ BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
+ return BB;
+ }
+
+ MachineFunction *MF = BB->getParent();
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ unsigned ScrReg = RegInfo.createVirtualRegister(RC);
+
+ assert(Size < 32);
+ int64_t ShiftImm = 32 - (Size * 8);
+
+ BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
+ BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
+
+ return BB;
+}
+
+MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
+ MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode,
+ bool Nand) const {
assert((Size == 1 || Size == 2) &&
"Unsupported size for EmitAtomicBinaryPartial.");
@@ -990,7 +1102,6 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
unsigned StoreVal = RegInfo.createVirtualRegister(RC);
unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
unsigned SrlRes = RegInfo.createVirtualRegister(RC);
- unsigned SllRes = RegInfo.createVirtualRegister(RC);
unsigned Success = RegInfo.createVirtualRegister(RC);
// insert new blocks after the current block
@@ -1006,7 +1117,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(loopMBB);
@@ -1030,7 +1141,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- if (Subtarget->isLittle()) {
+ if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
unsigned Off = RegInfo.createVirtualRegister(RC);
@@ -1096,19 +1207,14 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// sinkMBB:
// and maskedoldval1,oldval,mask
// srl srlres,maskedoldval1,shiftamt
- // sll sllres,srlres,24
- // sra dest,sllres,24
+ // sign_extend dest,srlres
BB = sinkMBB;
- int64_t ShiftImm = (Size == 1) ? 24 : 16;
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
.addReg(MaskedOldVal1).addReg(ShiftAmt);
- BuildMI(BB, DL, TII->get(Mips::SLL), SllRes)
- .addReg(SrlRes).addImm(ShiftImm);
- BuildMI(BB, DL, TII->get(Mips::SRA), Dest)
- .addReg(SllRes).addImm(ShiftImm);
+ BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
MI->eraseFromParent(); // The instruction is gone now.
@@ -1128,8 +1234,8 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
unsigned LL, SC, ZERO, BNE, BEQ;
if (Size == 4) {
- LL = Mips::LL;
- SC = Mips::SC;
+ LL = isMicroMips ? Mips::LL_MM : Mips::LL;
+ SC = isMicroMips ? Mips::SC_MM : Mips::SC;
ZERO = Mips::ZERO;
BNE = Mips::BNE;
BEQ = Mips::BEQ;
@@ -1161,7 +1267,7 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
@@ -1229,7 +1335,6 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
unsigned StoreVal = RegInfo.createVirtualRegister(RC);
unsigned SrlRes = RegInfo.createVirtualRegister(RC);
- unsigned SllRes = RegInfo.createVirtualRegister(RC);
unsigned Success = RegInfo.createVirtualRegister(RC);
// insert new blocks after the current block
@@ -1247,7 +1352,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(loop1MBB);
@@ -1276,7 +1381,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- if (Subtarget->isLittle()) {
+ if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
unsigned Off = RegInfo.createVirtualRegister(RC);
@@ -1326,23 +1431,44 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// sinkMBB:
// srl srlres,maskedoldval0,shiftamt
- // sll sllres,srlres,24
- // sra dest,sllres,24
+ // sign_extend dest,srlres
BB = sinkMBB;
- int64_t ShiftImm = (Size == 1) ? 24 : 16;
BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
.addReg(MaskedOldVal0).addReg(ShiftAmt);
- BuildMI(BB, DL, TII->get(Mips::SLL), SllRes)
- .addReg(SrlRes).addImm(ShiftImm);
- BuildMI(BB, DL, TII->get(Mips::SRA), Dest)
- .addReg(SllRes).addImm(ShiftImm);
+ BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
+MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ MachineFunction *MF = BB->getParent();
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ MachineBasicBlock::iterator II(MI);
+
+ unsigned Fc = MI->getOperand(1).getReg();
+ const auto &FGR64RegClass = TRI->getRegClass(Mips::FGR64RegClassID);
+
+ unsigned Fc2 = RegInfo.createVirtualRegister(FGR64RegClass);
+
+ BuildMI(*BB, II, DL, TII->get(Mips::SUBREG_TO_REG), Fc2)
+ .addImm(0)
+ .addReg(Fc)
+ .addImm(Mips::sub_lo);
+
+ // We don't erase the original instruction, we just replace the condition
+ // register with the 64-bit super-register.
+ MI->getOperand(1).setReg(Fc2);
+
+ return BB;
+}
+
//===----------------------------------------------------------------------===//
// Misc Lower Operation implementation
//===----------------------------------------------------------------------===//
@@ -1365,7 +1491,8 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
0);
Chain = Addr.getValue(1);
- if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) || IsN64) {
+ if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) ||
+ Subtarget.isABI_N64()) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
@@ -1383,6 +1510,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(2);
SDLoc DL(Op);
+ assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
// Return if flag is not set by a floating point comparison.
@@ -1402,6 +1530,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue MipsTargetLowering::
lowerSELECT(SDValue Op, SelectionDAG &DAG) const
{
+ assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
// Return if flag is not set by a floating point comparison.
@@ -1427,6 +1556,7 @@ lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
}
SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
+ assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue Cond = createFPCmp(DAG, Op);
assert(Cond.getOpcode() == MipsISD::FPCmp &&
@@ -1446,7 +1576,8 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = N->getGlobal();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
+ !Subtarget.isABI_N64()) {
const MipsTargetObjectFile &TLOF =
(const MipsTargetObjectFile&)getObjFileLowering();
@@ -1455,7 +1586,7 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
MipsII::MO_GPREL);
SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, DL,
- DAG.getVTList(MVT::i32), &GA, 1);
+ DAG.getVTList(MVT::i32), GA);
SDValue GPReg = DAG.getRegister(Mips::GP, MVT::i32);
return DAG.getNode(ISD::ADD, DL, MVT::i32, GPReg, GPRelNode);
}
@@ -1465,7 +1596,8 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
}
if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
- return getAddrLocal(N, Ty, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG,
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
if (LargeGOT)
return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
@@ -1473,7 +1605,9 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
MachinePointerInfo::getGOT());
return getAddrGlobal(N, Ty, DAG,
- HasMips64 ? MipsII::MO_GOT_DISP : MipsII::MO_GOT16,
+ (Subtarget.isABI_N32() || Subtarget.isABI_N64())
+ ? MipsII::MO_GOT_DISP
+ : MipsII::MO_GOT16,
DAG.getEntryNode(), MachinePointerInfo::getGOT());
}
@@ -1482,10 +1616,12 @@ SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
+ !Subtarget.isABI_N64())
return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(N, Ty, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG,
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
}
SDValue MipsTargetLowering::
@@ -1521,11 +1657,9 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
Entry.Ty = PtrTy;
Args.push_back(Entry);
- TargetLowering::CallLoweringInfo CLI(DAG.getEntryNode(), PtrTy,
- false, false, false, false, 0, CallingConv::C,
- /*IsTailCall=*/false, /*doesNotRet=*/false,
- /*isReturnValueUsed=*/true,
- TlsGetAddr, Args, DAG, DL);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL).setChain(DAG.getEntryNode())
+ .setCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args), 0);
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
SDValue Ret = CallResult.first;
@@ -1575,10 +1709,12 @@ lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
+ !Subtarget.isABI_N64())
return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(N, Ty, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG,
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
}
SDValue MipsTargetLowering::
@@ -1596,10 +1732,12 @@ lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
+ !Subtarget.isABI_N64())
return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(N, Ty, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG,
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
}
SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
@@ -1714,69 +1852,10 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
SDValue
MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
- if (Subtarget->hasMips64())
- return lowerFCOPYSIGN64(Op, DAG, Subtarget->hasExtractInsert());
-
- return lowerFCOPYSIGN32(Op, DAG, Subtarget->hasExtractInsert());
-}
-
-static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG,
- bool HasExtractInsert) {
- SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
- SDLoc DL(Op);
-
- // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
- // to i32.
- SDValue X = (Op.getValueType() == MVT::f32) ?
- DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
- DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
- Const1);
-
- // Clear MSB.
- if (HasExtractInsert)
- Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
- DAG.getRegister(Mips::ZERO, MVT::i32),
- DAG.getConstant(31, MVT::i32), Const1, X);
- else {
- SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
- Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
- }
+ if (Subtarget.isGP64bit())
+ return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
- if (Op.getValueType() == MVT::f32)
- return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
-
- SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Op.getOperand(0), DAG.getConstant(0, MVT::i32));
- return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
-}
-
-static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG,
- bool HasExtractInsert) {
- SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
- SDLoc DL(Op);
-
- // Bitcast to integer node.
- SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
-
- // Clear MSB.
- if (HasExtractInsert)
- Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
- DAG.getRegister(Mips::ZERO_64, MVT::i64),
- DAG.getConstant(63, MVT::i32), Const1, X);
- else {
- SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
- Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
- }
-
- return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
-}
-
-SDValue
-MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
- if (Subtarget->hasMips64() && (Op.getValueType() == MVT::f64))
- return lowerFABS64(Op, DAG, Subtarget->hasExtractInsert());
-
- return lowerFABS32(Op, DAG, Subtarget->hasExtractInsert());
+ return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
}
SDValue MipsTargetLowering::
@@ -1789,13 +1868,17 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
SDLoc DL(Op);
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
- IsN64 ? Mips::FP_64 : Mips::FP, VT);
+ SDValue FrameAddr =
+ DAG.getCopyFromReg(DAG.getEntryNode(), DL,
+ Subtarget.isABI_N64() ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
// check the depth
assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) &&
"Return address can be determined only for current frame.");
@@ -1803,7 +1886,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT VT = Op.getSimpleValueType();
- unsigned RA = IsN64 ? Mips::RA_64 : Mips::RA;
+ unsigned RA = Subtarget.isABI_N64() ? Mips::RA_64 : Mips::RA;
MFI->setReturnAddressIsTaken(true);
// Return RA, which contains the return address. Mark it an implicit live-in.
@@ -1825,12 +1908,12 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc DL(Op);
- EVT Ty = IsN64 ? MVT::i64 : MVT::i32;
+ EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
// Store stack offset in V1, store jump target in V0. Glue CopyToReg and
// EH_RETURN nodes, so that instructions are emitted back-to-back.
- unsigned OffsetReg = IsN64 ? Mips::V1_64 : Mips::V1;
- unsigned AddrReg = IsN64 ? Mips::V0_64 : Mips::V0;
+ unsigned OffsetReg = Subtarget.isABI_N64() ? Mips::V1_64 : Mips::V1;
+ unsigned AddrReg = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
@@ -1877,7 +1960,7 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
SDValue Ops[2] = {Lo, Hi};
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
@@ -1918,7 +2001,7 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
ShiftRightHi);
SDValue Ops[2] = {Lo, Hi};
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
@@ -1934,7 +2017,7 @@ static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
DAG.getConstant(Offset, BasePtrVT));
SDValue Ops[] = { Chain, Ptr, Src };
- return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT,
+ return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
LD->getMemOperand());
}
@@ -1943,12 +2026,15 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadSDNode *LD = cast<LoadSDNode>(Op);
EVT MemVT = LD->getMemoryVT();
+ if (Subtarget.systemSupportsUnalignedAccess())
+ return Op;
+
// Return if load is aligned or if MemVT is neither i32 nor i64.
if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
((MemVT != MVT::i32) && (MemVT != MVT::i64)))
return SDValue();
- bool IsLittle = Subtarget->isLittle();
+ bool IsLittle = Subtarget.isLittle();
EVT VT = Op.getValueType();
ISD::LoadExtType ExtType = LD->getExtensionType();
SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
@@ -1997,7 +2083,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
SDValue Ops[] = { SRL, LWR.getValue(1) };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
@@ -2012,7 +2098,7 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
DAG.getConstant(Offset, BasePtrVT));
SDValue Ops[] = { Chain, Value, Ptr };
- return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT,
+ return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
SD->getMemOperand());
}
@@ -2066,9 +2152,10 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
EVT MemVT = SD->getMemoryVT();
// Lower unaligned integer stores.
- if ((SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
+ if (!Subtarget.systemSupportsUnalignedAccess() &&
+ (SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
((MemVT == MVT::i32) || (MemVT == MVT::i64)))
- return lowerUnalignedIntStore(SD, DAG, Subtarget->isLittle());
+ return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
return lowerFP_TO_SINT_STORE(SD, DAG);
}
@@ -2123,12 +2210,12 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State, const uint16_t *F64Regs) {
+ CCState &State, const MCPhysReg *F64Regs) {
static const unsigned IntRegsSize = 4, FloatRegsSize = 2;
- static const uint16_t IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
- static const uint16_t F32Regs[] = { Mips::F12, Mips::F14 };
+ static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
+ static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
// Do not process byval args here.
if (ArgFlags.isByVal())
@@ -2200,7 +2287,7 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- static const uint16_t F64Regs[] = { Mips::D6, Mips::D7 };
+ static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
}
@@ -2208,7 +2295,7 @@ static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- static const uint16_t F64Regs[] = { Mips::D12_64, Mips::D14_64 };
+ static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
}
@@ -2254,8 +2341,8 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
// in PIC mode) allow symbols to be resolved via lazy binding.
// The lazy binding stub requires GP to point to the GOT.
if (IsPICCall && !InternalLinkage) {
- unsigned GPReg = IsN64 ? Mips::GP_64 : Mips::GP;
- EVT Ty = IsN64 ? MVT::i64 : MVT::i32;
+ unsigned GPReg = Subtarget.isABI_N64() ? Mips::GP_64 : Mips::GP;
+ EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
}
@@ -2281,11 +2368,11 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
- if (Subtarget->inMips16HardFloat()) {
+ if (Subtarget.inMips16HardFloat()) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
llvm::StringRef Sym = G->getGlobal()->getName();
Function *F = G->getGlobal()->getParent()->getFunction(Sym);
- if (F->hasFnAttribute("__Mips16RetHelper")) {
+ if (F && F->hasFnAttribute("__Mips16RetHelper")) {
Mask = MipsRegisterInfo::getMips16RetHelperMask();
}
}
@@ -2324,12 +2411,12 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
getTargetMachine(), ArgLocs, *DAG.getContext());
MipsCC::SpecialCallingConvType SpecialCallingConv =
getSpecialCallingConv(Callee);
- MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo,
- SpecialCallingConv);
+ MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
+ CCInfo, SpecialCallingConv);
MipsCCInfo.analyzeCallOperands(Outs, IsVarArg,
- Subtarget->mipsSEUsesSoftFloat(),
- Callee.getNode(), CLI.Args);
+ Subtarget.abiUsesSoftFloat(),
+ Callee.getNode(), CLI.getArgs());
// Get a count of how many bytes are to be pushed on the stack.
unsigned NextStackOffset = CCInfo.getNextStackOffset();
@@ -2340,6 +2427,10 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isEligibleForTailCallOptimization(MipsCCInfo, NextStackOffset,
*MF.getInfo<MipsFunctionInfo>());
+ if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
+
if (IsTailCall)
++NumTailCalls;
@@ -2353,9 +2444,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!IsTailCall)
Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
- SDValue StackPtr = DAG.getCopyFromReg(Chain, DL,
- IsN64 ? Mips::SP_64 : Mips::SP,
- getPointerTy());
+ SDValue StackPtr = DAG.getCopyFromReg(
+ Chain, DL, Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP,
+ getPointerTy());
// With EABI is it possible to have 16 args on registers.
std::deque< std::pair<unsigned, SDValue> > RegsToPass;
@@ -2377,7 +2468,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
assert(!IsTailCall &&
"Do not tail-call optimize if there is a byval argument.");
passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
- MipsCCInfo, *ByValArg, Flags, Subtarget->isLittle());
+ MipsCCInfo, *ByValArg, Flags, Subtarget.isLittle());
++ByValArg;
continue;
}
@@ -2396,7 +2487,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Arg, DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
Arg, DAG.getConstant(1, MVT::i32));
- if (!Subtarget->isLittle())
+ if (!Subtarget.isLittle())
std::swap(Lo, Hi);
unsigned LocRegLo = VA.getLocReg();
unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
@@ -2436,13 +2527,14 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Transform all store nodes into one single node because all store
// nodes are independent of each other.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- bool IsPICCall = (IsN64 || IsPIC); // true if calls are translated to jalr $25
+ bool IsPICCall =
+ (Subtarget.isABI_N64() || IsPIC); // true if calls are translated to
+ // jalr $25
bool GlobalOrExternal = false, InternalLinkage = false;
SDValue CalleeLo;
EVT Ty = Callee.getValueType();
@@ -2453,7 +2545,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InternalLinkage = Val->hasInternalLinkage();
if (InternalLinkage)
- Callee = getAddrLocal(G, Ty, DAG, HasMips64);
+ Callee = getAddrLocal(G, Ty, DAG,
+ Subtarget.isABI_N32() || Subtarget.isABI_N64());
else if (LargeGOT)
Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
MipsII::MO_CALL_LO16, Chain,
@@ -2469,7 +2562,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
- if (!IsN64 && !IsPIC) // !N64 && static
+ if (!Subtarget.isABI_N64() && !IsPIC) // !N64 && static
Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
MipsII::MO_NO_FLAG);
else if (LargeGOT)
@@ -2490,9 +2583,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CLI, Callee, Chain);
if (IsTailCall)
- return DAG.getNode(MipsISD::TailCall, DL, MVT::Other, &Ops[0], Ops.size());
+ return DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
- Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
SDValue InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
@@ -2520,9 +2613,10 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
+ MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
+ CCInfo);
- MipsCCInfo.analyzeCallResult(Ins, Subtarget->mipsSEUsesSoftFloat(),
+ MipsCCInfo.analyzeCallResult(Ins, Subtarget.abiUsesSoftFloat(),
CallNode, RetTy);
// Copy all of the result registers out of their specified physreg.
@@ -2567,10 +2661,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
+ MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
+ CCInfo);
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
- bool UseSoftFloat = Subtarget->mipsSEUsesSoftFloat();
+ bool UseSoftFloat = Subtarget.abiUsesSoftFloat();
MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, FuncArg);
MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
@@ -2629,11 +2724,12 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
(RegVT == MVT::i64 && ValVT == MVT::f64) ||
(RegVT == MVT::f64 && ValVT == MVT::i64))
ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
- else if (IsO32 && RegVT == MVT::i32 && ValVT == MVT::f64) {
+ else if (Subtarget.isABI_O32() && RegVT == MVT::i32 &&
+ ValVT == MVT::f64) {
unsigned Reg2 = addLiveIn(DAG.getMachineFunction(),
getNextIntArgReg(ArgReg), RC);
SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
- if (!Subtarget->isLittle())
+ if (!Subtarget.isLittle())
std::swap(ArgValue, ArgValue2);
ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
ArgValue, ArgValue2);
@@ -2659,18 +2755,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
}
}
- // The mips ABIs for returning structs by value requires that we copy
- // the sret argument into $v0 for the return. Save the argument into
- // a virtual register so that we can access it from the return points.
- if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
- unsigned Reg = MipsFI->getSRetReturnReg();
- if (!Reg) {
- Reg = MF.getRegInfo().
- createVirtualRegister(getRegClassFor(IsN64 ? MVT::i64 : MVT::i32));
- MipsFI->setSRetReturnReg(Reg);
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ // The mips ABIs for returning structs by value requires that we copy
+ // the sret argument into $v0 for the return. Save the argument into
+ // a virtual register so that we can access it from the return points.
+ if (Ins[i].Flags.isSRet()) {
+ unsigned Reg = MipsFI->getSRetReturnReg();
+ if (!Reg) {
+ Reg = MF.getRegInfo().createVirtualRegister(
+ getRegClassFor(Subtarget.isABI_N64() ? MVT::i64 : MVT::i32));
+ MipsFI->setSRetReturnReg(Reg);
+ }
+ SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
+ break;
}
- SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[0]);
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
}
if (IsVarArg)
@@ -2680,8 +2779,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// the size of Ins and InVals. This only happens when on varg functions
if (!OutChains.empty()) {
OutChains.push_back(Chain);
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &OutChains[0], OutChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
}
return Chain;
@@ -2716,10 +2814,11 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs,
*DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
+ MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
+ CCInfo);
// Analyze return values.
- MipsCCInfo.analyzeReturn(Outs, Subtarget->mipsSEUsesSoftFloat(),
+ MipsCCInfo.analyzeReturn(Outs, Subtarget.abiUsesSoftFloat(),
MF.getFunction()->getReturnType());
SDValue Flag;
@@ -2752,7 +2851,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
- unsigned V0 = IsN64 ? Mips::V0_64 : Mips::V0;
+ unsigned V0 = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
Flag = Chain.getValue(1);
@@ -2766,7 +2865,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
RetOps.push_back(Flag);
// Return on Mips is always a "jr $ra"
- return DAG.getNode(MipsISD::Ret, DL, MVT::Other, &RetOps[0], RetOps.size());
+ return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
}
//===----------------------------------------------------------------------===//
@@ -2816,7 +2915,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
@@ -2830,7 +2929,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
weight = CW_Register;
break;
case 'f': // FPU or MSA register
- if (Subtarget->hasMSA() && type->isVectorTy() &&
+ if (Subtarget.hasMSA() && type->isVectorTy() &&
cast<VectorType>(type)->getBitWidth() == 128)
weight = CW_Register;
else if (type->isFloatTy())
@@ -2894,12 +2993,12 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
if (!R.first)
- return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+ return std::make_pair(0U, nullptr);
if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
// No numeric characters follow "hi" or "lo".
if (R.second)
- return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+ return std::make_pair(0U, nullptr);
RC = TRI->getRegClass(Prefix == "hi" ?
Mips::HI32RegClassID : Mips::LO32RegClassID);
@@ -2909,7 +3008,7 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
// No numeric characters follow the name.
if (R.second)
- return std::make_pair((unsigned)0, (const TargetRegisterClass *)0);
+ return std::make_pair(0U, nullptr);
Reg = StringSwitch<unsigned long long>(Prefix)
.Case("$msair", Mips::MSAIR)
@@ -2923,20 +3022,20 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
.Default(0);
if (!Reg)
- return std::make_pair((unsigned)0, (const TargetRegisterClass *)0);
+ return std::make_pair(0U, nullptr);
RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
return std::make_pair(Reg, RC);
}
if (!R.second)
- return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+ return std::make_pair(0U, nullptr);
if (Prefix == "$f") { // Parse $f0-$f31.
// If the size of FP registers is 64-bit or Reg is an even number, select
// the 64-bit register class. Otherwise, select the 32-bit register class.
if (VT == MVT::Other)
- VT = (Subtarget->isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
+ VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
RC = getRegClassFor(VT);
@@ -2969,16 +3068,16 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
- if (Subtarget->inMips16Mode())
+ if (Subtarget.inMips16Mode())
return std::make_pair(0U, &Mips::CPU16RegsRegClass);
return std::make_pair(0U, &Mips::GPR32RegClass);
}
- if (VT == MVT::i64 && !HasMips64)
+ if (VT == MVT::i64 && !Subtarget.isGP64bit())
return std::make_pair(0U, &Mips::GPR32RegClass);
- if (VT == MVT::i64 && HasMips64)
+ if (VT == MVT::i64 && Subtarget.isGP64bit())
return std::make_pair(0U, &Mips::GPR64RegClass);
// This will generate an error message
- return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
+ return std::make_pair(0U, nullptr);
case 'f': // FPU or MSA register
if (VT == MVT::v16i8)
return std::make_pair(0U, &Mips::MSA128BRegClass);
@@ -2990,8 +3089,8 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
return std::make_pair(0U, &Mips::MSA128DRegClass);
else if (VT == MVT::f32)
return std::make_pair(0U, &Mips::FGR32RegClass);
- else if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
- if (Subtarget->isFP64bit())
+ else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
+ if (Subtarget.isFP64bit())
return std::make_pair(0U, &Mips::FGR64RegClass);
return std::make_pair(0U, &Mips::AFGR64RegClass);
}
@@ -3008,7 +3107,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
case 'x': // register suitable for indirect jump
// Fixme: Not triggering the use of both hi and low
// This will generate an error message
- return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
+ return std::make_pair(0U, nullptr);
}
}
@@ -3027,7 +3126,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
- SDValue Result(0, 0);
+ SDValue Result;
// Only support length 1 constraints for now.
if (Constraint.length() > 1) return;
@@ -3147,7 +3246,7 @@ EVT MipsTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- if (Subtarget->hasMips64())
+ if (Subtarget.hasMips64())
return MVT::i64;
return MVT::i32;
@@ -3162,7 +3261,7 @@ bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
unsigned MipsTargetLowering::getJumpTableEncoding() const {
- if (IsN64)
+ if (Subtarget.isABI_N64())
return MachineJumpTableInfo::EK_GPRel64BlockAddress;
return TargetLowering::getJumpTableEncoding();
@@ -3211,12 +3310,12 @@ static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
MipsTargetLowering::MipsCC::SpecialCallingConvType
MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const {
MipsCC::SpecialCallingConvType SpecialCallingConv =
- MipsCC::NoSpecialCallingConv;;
- if (Subtarget->inMips16HardFloat()) {
+ MipsCC::NoSpecialCallingConv;
+ if (Subtarget.inMips16HardFloat()) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
llvm::StringRef Sym = G->getGlobal()->getName();
Function *F = G->getGlobal()->getParent()->getFunction(Sym);
- if (F->hasFnAttribute("__Mips16RetHelper")) {
+ if (F && F->hasFnAttribute("__Mips16RetHelper")) {
SpecialCallingConv = MipsCC::Mips16RetHelperConv;
}
}
@@ -3267,7 +3366,7 @@ analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args,
dbgs() << "Call operand #" << I << " has unhandled type "
<< EVT(ArgVT).getEVTString();
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -3290,7 +3389,7 @@ analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Args,
continue;
}
- MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), 0, IsSoftFloat);
+ MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), nullptr, IsSoftFloat);
if (!FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo))
continue;
@@ -3299,7 +3398,7 @@ analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Args,
dbgs() << "Formal Arg #" << I << " has unhandled type "
<< EVT(ArgVT).getEVTString();
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -3324,7 +3423,7 @@ analyzeReturn(const SmallVectorImpl<Ty> &RetVals, bool IsSoftFloat,
dbgs() << "Call result #" << I << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
}
@@ -3338,7 +3437,7 @@ analyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsSoftFloat,
void MipsTargetLowering::MipsCC::
analyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsSoftFloat,
const Type *RetTy) const {
- analyzeReturn(Outs, IsSoftFloat, 0, RetTy);
+ analyzeReturn(Outs, IsSoftFloat, nullptr, RetTy);
}
void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
@@ -3372,7 +3471,7 @@ unsigned MipsTargetLowering::MipsCC::reservedArgArea() const {
return (IsO32 && (CallConv != CallingConv::Fast)) ? 16 : 0;
}
-const uint16_t *MipsTargetLowering::MipsCC::intArgRegs() const {
+const MCPhysReg *MipsTargetLowering::MipsCC::intArgRegs() const {
return IsO32 ? O32IntRegs : Mips64IntRegs;
}
@@ -3389,7 +3488,7 @@ llvm::CCAssignFn *MipsTargetLowering::MipsCC::varArgFn() const {
return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN_VarArg;
}
-const uint16_t *MipsTargetLowering::MipsCC::shadowRegs() const {
+const MCPhysReg *MipsTargetLowering::MipsCC::shadowRegs() const {
return IsO32 ? O32IntRegs : Mips64DPRegs;
}
@@ -3397,7 +3496,7 @@ void MipsTargetLowering::MipsCC::allocateRegs(ByValArgInfo &ByVal,
unsigned ByValSize,
unsigned Align) {
unsigned RegSize = regSize(), NumIntArgRegs = numIntArgRegs();
- const uint16_t *IntArgRegs = intArgRegs(), *ShadowRegs = shadowRegs();
+ const MCPhysReg *IntArgRegs = intArgRegs(), *ShadowRegs = shadowRegs();
assert(!(ByValSize % RegSize) && !(Align % RegSize) &&
"Byval argument's size and alignment should be a multiple of"
"RegSize.");
@@ -3482,21 +3581,22 @@ passByValArg(SDValue Chain, SDLoc DL,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const {
- unsigned ByValSize = Flags.getByValSize();
- unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
- unsigned RegSize = CC.regSize();
- unsigned Alignment = std::min(Flags.getByValAlign(), RegSize);
- EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSize * 8);
+ unsigned ByValSizeInBytes = Flags.getByValSize();
+ unsigned OffsetInBytes = 0; // From beginning of struct
+ unsigned RegSizeInBytes = CC.regSize();
+ unsigned Alignment = std::min(Flags.getByValAlign(), RegSizeInBytes);
+ EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
if (ByVal.NumRegs) {
- const uint16_t *ArgRegs = CC.intArgRegs();
- bool LeftoverBytes = (ByVal.NumRegs * RegSize > ByValSize);
+ const MCPhysReg *ArgRegs = CC.intArgRegs();
+ bool LeftoverBytes = (ByVal.NumRegs * RegSizeInBytes > ByValSizeInBytes);
unsigned I = 0;
// Copy words to registers.
- for (; I < ByVal.NumRegs - LeftoverBytes; ++I, Offset += RegSize) {
+ for (; I < ByVal.NumRegs - LeftoverBytes;
+ ++I, OffsetInBytes += RegSizeInBytes) {
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
- DAG.getConstant(Offset, PtrTy));
+ DAG.getConstant(OffsetInBytes, PtrTy));
SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
MachinePointerInfo(), false, false, false,
Alignment);
@@ -3506,38 +3606,38 @@ passByValArg(SDValue Chain, SDLoc DL,
}
// Return if the struct has been fully copied.
- if (ByValSize == Offset)
+ if (ByValSizeInBytes == OffsetInBytes)
return;
// Copy the remainder of the byval argument with sub-word loads and shifts.
if (LeftoverBytes) {
- assert((ByValSize > Offset) && (ByValSize < Offset + RegSize) &&
- "Size of the remainder should be smaller than RegSize.");
+ assert((ByValSizeInBytes > OffsetInBytes) &&
+ (ByValSizeInBytes < OffsetInBytes + RegSizeInBytes) &&
+ "Size of the remainder should be smaller than RegSizeInBytes.");
SDValue Val;
- for (unsigned LoadSize = RegSize / 2, TotalSizeLoaded = 0;
- Offset < ByValSize; LoadSize /= 2) {
- unsigned RemSize = ByValSize - Offset;
+ for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
+ OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
+ unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
- if (RemSize < LoadSize)
+ if (RemainingSizeInBytes < LoadSizeInBytes)
continue;
// Load subword.
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
- DAG.getConstant(Offset, PtrTy));
- SDValue LoadVal =
- DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr,
- MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
- false, false, Alignment);
+ DAG.getConstant(OffsetInBytes, PtrTy));
+ SDValue LoadVal = DAG.getExtLoad(
+ ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
+ MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, Alignment);
MemOpChains.push_back(LoadVal.getValue(1));
// Shift the loaded value.
unsigned Shamt;
if (isLittle)
- Shamt = TotalSizeLoaded;
+ Shamt = TotalBytesLoaded * 8;
else
- Shamt = (RegSize - (TotalSizeLoaded + LoadSize)) * 8;
+ Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
DAG.getConstant(Shamt, MVT::i32));
@@ -3547,9 +3647,9 @@ passByValArg(SDValue Chain, SDLoc DL,
else
Val = Shift;
- Offset += LoadSize;
- TotalSizeLoaded += LoadSize;
- Alignment = std::min(Alignment, LoadSize);
+ OffsetInBytes += LoadSizeInBytes;
+ TotalBytesLoaded += LoadSizeInBytes;
+ Alignment = std::min(Alignment, LoadSizeInBytes);
}
unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
@@ -3559,14 +3659,14 @@ passByValArg(SDValue Chain, SDLoc DL,
}
// Copy remainder of byval arg to it with memcpy.
- unsigned MemCpySize = ByValSize - Offset;
+ unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
- DAG.getConstant(Offset, PtrTy));
+ DAG.getConstant(OffsetInBytes, PtrTy));
SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
DAG.getIntPtrConstant(ByVal.Address));
Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),
Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false,
- MachinePointerInfo(0), MachinePointerInfo(0));
+ MachinePointerInfo(), MachinePointerInfo());
MemOpChains.push_back(Chain);
}
@@ -3574,7 +3674,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
const MipsCC &CC, SDValue Chain,
SDLoc DL, SelectionDAG &DAG) const {
unsigned NumRegs = CC.numIntArgRegs();
- const uint16_t *ArgRegs = CC.intArgRegs();
+ const MCPhysReg *ArgRegs = CC.intArgRegs();
const CCState &CCInfo = CC.getCCInfo();
unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumRegs);
unsigned RegSize = CC.regSize();
@@ -3608,7 +3708,8 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
MachinePointerInfo(), false, false, 0);
- cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(0);
+ cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
+ (Value *)nullptr);
OutChains.push_back(Store);
}
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
index 65f68f0..10e4e0b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -15,9 +15,8 @@
#ifndef MipsISELLOWERING_H
#define MipsISELLOWERING_H
-#include "Mips.h"
-#include "MipsSubtarget.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "Mips.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/Function.h"
@@ -184,6 +183,9 @@ namespace llvm {
PCKEV, // Pack even elements
PCKOD, // Pack odd elements
+ // Vector Lane Copy
+ INSVE, // Copy element from one vector to another
+
// Combined (XOR (OR $a, $b), -1)
VNOR,
@@ -207,39 +209,49 @@ namespace llvm {
// TargetLowering Implementation
//===--------------------------------------------------------------------===//
class MipsFunctionInfo;
+ class MipsSubtarget;
class MipsTargetLowering : public TargetLowering {
+ bool isMicroMips;
public:
- explicit MipsTargetLowering(MipsTargetMachine &TM);
+ explicit MipsTargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI);
- static const MipsTargetLowering *create(MipsTargetMachine &TM);
+ static const MipsTargetLowering *create(MipsTargetMachine &TM,
+ const MipsSubtarget &STI);
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+ /// createFastISel - This method returns a target specific FastISel object,
+ /// or null if the target does not support "fast" ISel.
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const override;
- virtual void LowerOperationWrapper(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
+
+ void LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
/// LowerOperation - Provide custom lowering hooks for some operations.
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const override;
/// getTargetNodeName - This method returns the name of a target specific
// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
/// getSetCCResultType - get the ISD::SETCC result ValueType
- EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const override;
struct LTStr {
bool operator()(const char *S1, const char *S2) const {
@@ -254,17 +266,17 @@ namespace llvm {
// computing a local symbol's address:
//
// (add (load (wrapper $gp, %got(sym)), %lo(sym))
- template<class NodeTy>
+ template <class NodeTy>
SDValue getAddrLocal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
- bool HasMips64) const {
+ bool IsN32OrN64) const {
SDLoc DL(N);
- unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
+ unsigned GOTFlag = IsN32OrN64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
getTargetNode(N, Ty, DAG, GOTFlag));
SDValue Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), GOT,
MachinePointerInfo::getGOT(), false, false,
false, 0);
- unsigned LoFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
+ unsigned LoFlag = IsN32OrN64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
SDValue Lo = DAG.getNode(MipsISD::Lo, DL, Ty,
getTargetNode(N, Ty, DAG, LoFlag));
return DAG.getNode(ISD::ADD, DL, Ty, Load, Lo);
@@ -378,7 +390,7 @@ namespace llvm {
unsigned reservedArgArea() const;
/// Return pointer to array of integer argument registers.
- const uint16_t *intArgRegs() const;
+ const MCPhysReg *intArgRegs() const;
typedef SmallVectorImpl<ByValArgInfo>::const_iterator byval_iterator;
byval_iterator byval_begin() const { return ByValArgs.begin(); }
@@ -399,7 +411,7 @@ namespace llvm {
/// Return the function that analyzes variable argument list functions.
llvm::CCAssignFn *varArgFn() const;
- const uint16_t *shadowRegs() const;
+ const MCPhysReg *shadowRegs() const;
void allocateRegs(ByValArgInfo &ByVal, unsigned ByValSize,
unsigned Align);
@@ -426,9 +438,7 @@ namespace llvm {
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
// Subtarget Info
- const MipsSubtarget *Subtarget;
-
- bool HasMips64, IsN64, IsO32;
+ const MipsSubtarget &Subtarget;
private:
// Create a TargetGlobalAddress node.
@@ -515,41 +525,39 @@ namespace llvm {
void writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC,
SDValue Chain, SDLoc DL, SelectionDAG &DAG) const;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain,
SDValue Arg, SDLoc DL, bool IsTailCall,
SelectionDAG &DAG) const;
- virtual SDValue
- LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual bool
- CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+
+ SDValue LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc dl, SelectionDAG &DAG) const override;
// Inline asm support
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
+ AsmOperandInfo &info, const char *constraint) const override;
/// This function parses registers that appear in inline-asm constraints.
/// It returns pair (0, 0) on failure.
@@ -558,33 +566,39 @@ namespace llvm {
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
/// being processed is 'm'.
- virtual void LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
- virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
- virtual EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
- unsigned SrcAlign,
- bool IsMemset, bool ZeroMemset,
- bool MemcpyStrSrc,
- MachineFunction &MF) const;
+ EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
+ unsigned SrcAlign,
+ bool IsMemset, bool ZeroMemset,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const override;
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
- virtual unsigned getJumpTableEncoding() const;
+ unsigned getJumpTableEncoding() const override;
+
+ /// Emit a sign-extension using sll/sra, seb, or seh appropriately.
+ MachineBasicBlock *emitSignExtendToI32InReg(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Size, unsigned DstReg,
+ unsigned SrcRec) const;
MachineBasicBlock *emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size, unsigned BinOpcode, bool Nand = false) const;
@@ -595,11 +609,19 @@ namespace llvm {
MachineBasicBlock *BB, unsigned Size) const;
MachineBasicBlock *emitAtomicCmpSwapPartword(MachineInstr *MI,
MachineBasicBlock *BB, unsigned Size) const;
+ MachineBasicBlock *emitSEL_D(MachineInstr *MI, MachineBasicBlock *BB) const;
};
/// Create MipsTargetLowering objects.
- const MipsTargetLowering *createMips16TargetLowering(MipsTargetMachine &TM);
- const MipsTargetLowering *createMipsSETargetLowering(MipsTargetMachine &TM);
+ const MipsTargetLowering *
+ createMips16TargetLowering(MipsTargetMachine &TM, const MipsSubtarget &STI);
+ const MipsTargetLowering *
+ createMipsSETargetLowering(MipsTargetMachine &TM, const MipsSubtarget &STI);
+
+ namespace Mips {
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
+ }
}
#endif // MipsISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
index 9f7ce9a..29d8e30 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
@@ -57,15 +57,25 @@ let PrintMethod = "printFCCOperand", DecoderMethod = "DecodeCondCode" in
// Feature predicates.
//===----------------------------------------------------------------------===//
-def IsFP64bit : Predicate<"Subtarget.isFP64bit()">,
+def IsFP64bit : Predicate<"Subtarget->isFP64bit()">,
AssemblerPredicate<"FeatureFP64Bit">;
-def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">,
+def NotFP64bit : Predicate<"!Subtarget->isFP64bit()">,
AssemblerPredicate<"!FeatureFP64Bit">;
-def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">,
+def IsSingleFloat : Predicate<"Subtarget->isSingleFloat()">,
AssemblerPredicate<"FeatureSingleFloat">;
-def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">,
+def IsNotSingleFloat : Predicate<"!Subtarget->isSingleFloat()">,
AssemblerPredicate<"!FeatureSingleFloat">;
+//===----------------------------------------------------------------------===//
+// Mips FGR size adjectives.
+// They are mutually exclusive.
+//===----------------------------------------------------------------------===//
+
+class FGR_32 { list<Predicate> FGRPredicates = [NotFP64bit]; }
+class FGR_64 { list<Predicate> FGRPredicates = [IsFP64bit]; }
+
+//===----------------------------------------------------------------------===//
+
// FP immediate patterns.
def fpimm0 : PatLeaf<(fpimm), [{
return N->isExactlyValue(+0.0);
@@ -93,16 +103,17 @@ class ADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, bit IsComm,
SDPatternOperator OpNode= null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fs, $ft"),
- [(set RC:$fd, (OpNode RC:$fs, RC:$ft))], Itin, FrmFR> {
+ [(set RC:$fd, (OpNode RC:$fs, RC:$ft))], Itin, FrmFR, opstr> {
let isCommutable = IsComm;
}
multiclass ADDS_M<string opstr, InstrItinClass Itin, bit IsComm,
SDPatternOperator OpNode = null_frag> {
- def _D32 : ADDS_FT<opstr, AFGR64Opnd, Itin, IsComm, OpNode>,
- Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ADDS_FT<opstr, FGR64Opnd, Itin, IsComm, OpNode>,
- Requires<[IsFP64bit, HasStdEnc]> {
+ def _D32 : MMRel, ADDS_FT<opstr, AFGR64Opnd, Itin, IsComm, OpNode>,
+ AdditionalRequires<[NotFP64bit]>;
+ def _D64 : ADDS_FT<opstr, FGR64Opnd, Itin,
+ IsComm, OpNode>,
+ AdditionalRequires<[IsFP64bit]> {
string DecoderNamespace = "Mips64";
}
}
@@ -110,24 +121,24 @@ multiclass ADDS_M<string opstr, InstrItinClass Itin, bit IsComm,
class ABSS_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$fd), (ins SrcRC:$fs), !strconcat(opstr, "\t$fd, $fs"),
- [(set DstRC:$fd, (OpNode SrcRC:$fs))], Itin, FrmFR>,
+ [(set DstRC:$fd, (OpNode SrcRC:$fs))], Itin, FrmFR, opstr>,
NeverHasSideEffects;
multiclass ABSS_M<string opstr, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> {
- def _D32 : ABSS_FT<opstr, AFGR64Opnd, AFGR64Opnd, Itin, OpNode>,
- Requires<[NotFP64bit, HasStdEnc]>;
+ def _D32 : MMRel, ABSS_FT<opstr, AFGR64Opnd, AFGR64Opnd, Itin, OpNode>,
+ AdditionalRequires<[NotFP64bit]>;
def _D64 : ABSS_FT<opstr, FGR64Opnd, FGR64Opnd, Itin, OpNode>,
- Requires<[IsFP64bit, HasStdEnc]> {
+ AdditionalRequires<[IsFP64bit]> {
string DecoderNamespace = "Mips64";
}
}
multiclass ROUND_M<string opstr, InstrItinClass Itin> {
- def _D32 : ABSS_FT<opstr, FGR32Opnd, AFGR64Opnd, Itin>,
- Requires<[NotFP64bit, HasStdEnc]>;
+ def _D32 : MMRel, ABSS_FT<opstr, FGR32Opnd, AFGR64Opnd, Itin>,
+ AdditionalRequires<[NotFP64bit]>;
def _D64 : ABSS_FT<opstr, FGR32Opnd, FGR64Opnd, Itin>,
- Requires<[IsFP64bit, HasStdEnc]> {
+ AdditionalRequires<[IsFP64bit]> {
let DecoderNamespace = "Mips64";
}
}
@@ -135,17 +146,26 @@ multiclass ROUND_M<string opstr, InstrItinClass Itin> {
class MFC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"),
- [(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>;
+ [(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR, opstr>;
class MTC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"),
- [(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>;
+ [(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR, opstr>;
+
+class MTC1_64_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
+ InstrItinClass Itin> :
+ InstSE<(outs DstRC:$fs), (ins DstRC:$fs_in, SrcRC:$rt),
+ !strconcat(opstr, "\t$rt, $fs"), [], Itin, FrmFR, opstr> {
+ // $fs_in is part of a white lie to work around a widespread bug in the FPU
+ // implementation. See expandBuildPairF64 for details.
+ let Constraints = "$fs = $fs_in";
+}
class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> :
InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI> {
+ [(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI, opstr> {
let DecoderMethod = "DecodeFMem";
let mayLoad = 1;
}
@@ -153,7 +173,7 @@ class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> :
InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI> {
+ [(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI, opstr> {
let DecoderMethod = "DecodeFMem";
let mayStore = 1;
}
@@ -162,20 +182,22 @@ class MADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
- [(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))], Itin, FrmFR>;
+ [(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))], Itin,
+ FrmFR, opstr>;
class NMADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
[(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))],
- Itin, FrmFR>;
+ Itin, FrmFR, opstr>;
class LWXC1_FT<string opstr, RegisterOperand DRC,
InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
InstSE<(outs DRC:$fd), (ins PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$fd, ${index}(${base})"),
- [(set DRC:$fd, (OpNode (add iPTR:$base, iPTR:$index)))], Itin, FrmFI> {
+ [(set DRC:$fd, (OpNode (add iPTR:$base, iPTR:$index)))], Itin,
+ FrmFI, opstr> {
let AddedComplexity = 20;
}
@@ -183,15 +205,17 @@ class SWXC1_FT<string opstr, RegisterOperand DRC,
InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
InstSE<(outs), (ins DRC:$fs, PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$fs, ${index}(${base})"),
- [(OpNode DRC:$fs, (add iPTR:$base, iPTR:$index))], Itin, FrmFI> {
+ [(OpNode DRC:$fs, (add iPTR:$base, iPTR:$index))], Itin,
+ FrmFI, opstr> {
let AddedComplexity = 20;
}
-class BC1F_FT<string opstr, InstrItinClass Itin,
+class BC1F_FT<string opstr, DAGOperand opnd, InstrItinClass Itin,
SDPatternOperator Op = null_frag> :
- InstSE<(outs), (ins FCCRegsOpnd:$fcc, brtarget:$offset),
+ InstSE<(outs), (ins FCCRegsOpnd:$fcc, opnd:$offset),
!strconcat(opstr, "\t$fcc, $offset"),
- [(MipsFPBrcond Op, FCCRegsOpnd:$fcc, bb:$offset)], Itin, FrmFI> {
+ [(MipsFPBrcond Op, FCCRegsOpnd:$fcc, bb:$offset)], Itin,
+ FrmFI, opstr> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
@@ -202,129 +226,129 @@ class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs), (ins RC:$fs, RC:$ft, condcode:$cond),
!strconcat("c.$cond.", typestr, "\t$fs, $ft"),
- [(OpNode RC:$fs, RC:$ft, imm:$cond)], Itin, FrmFR> {
+ [(OpNode RC:$fs, RC:$ft, imm:$cond)], Itin, FrmFR,
+ !strconcat("c.$cond.", typestr)> {
let Defs = [FCC0];
let isCodeGenOnly = 1;
}
-class C_COND_FT<string CondStr, string Typestr, RegisterOperand RC> :
+class C_COND_FT<string CondStr, string Typestr, RegisterOperand RC,
+ InstrItinClass itin> :
InstSE<(outs), (ins RC:$fs, RC:$ft),
- !strconcat("c.", CondStr, ".", Typestr, "\t$fs, $ft"), [], IIFcmp,
+ !strconcat("c.", CondStr, ".", Typestr, "\t$fs, $ft"), [], itin,
FrmFR>;
-multiclass C_COND_M<string TypeStr, RegisterOperand RC, bits<5> fmt> {
- def C_F_#NAME : C_COND_FT<"f", TypeStr, RC>, C_COND_FM<fmt, 0>;
- def C_UN_#NAME : C_COND_FT<"un", TypeStr, RC>, C_COND_FM<fmt, 1>;
- def C_EQ_#NAME : C_COND_FT<"eq", TypeStr, RC>, C_COND_FM<fmt, 2>;
- def C_UEQ_#NAME : C_COND_FT<"ueq", TypeStr, RC>, C_COND_FM<fmt, 3>;
- def C_OLT_#NAME : C_COND_FT<"olt", TypeStr, RC>, C_COND_FM<fmt, 4>;
- def C_ULT_#NAME : C_COND_FT<"ult", TypeStr, RC>, C_COND_FM<fmt, 5>;
- def C_OLE_#NAME : C_COND_FT<"ole", TypeStr, RC>, C_COND_FM<fmt, 6>;
- def C_ULE_#NAME : C_COND_FT<"ule", TypeStr, RC>, C_COND_FM<fmt, 7>;
- def C_SF_#NAME : C_COND_FT<"sf", TypeStr, RC>, C_COND_FM<fmt, 8>;
- def C_NGLE_#NAME : C_COND_FT<"ngle", TypeStr, RC>, C_COND_FM<fmt, 9>;
- def C_SEQ_#NAME : C_COND_FT<"seq", TypeStr, RC>, C_COND_FM<fmt, 10>;
- def C_NGL_#NAME : C_COND_FT<"ngl", TypeStr, RC>, C_COND_FM<fmt, 11>;
- def C_LT_#NAME : C_COND_FT<"lt", TypeStr, RC>, C_COND_FM<fmt, 12>;
- def C_NGE_#NAME : C_COND_FT<"nge", TypeStr, RC>, C_COND_FM<fmt, 13>;
- def C_LE_#NAME : C_COND_FT<"le", TypeStr, RC>, C_COND_FM<fmt, 14>;
- def C_NGT_#NAME : C_COND_FT<"ngt", TypeStr, RC>, C_COND_FM<fmt, 15>;
-}
-
-defm S : C_COND_M<"s", FGR32Opnd, 16>;
-defm D32 : C_COND_M<"d", AFGR64Opnd, 17>,
- Requires<[NotFP64bit, HasStdEnc]>;
+multiclass C_COND_M<string TypeStr, RegisterOperand RC, bits<5> fmt,
+ InstrItinClass itin> {
+ def C_F_#NAME : C_COND_FT<"f", TypeStr, RC, itin>, C_COND_FM<fmt, 0>;
+ def C_UN_#NAME : C_COND_FT<"un", TypeStr, RC, itin>, C_COND_FM<fmt, 1>;
+ def C_EQ_#NAME : C_COND_FT<"eq", TypeStr, RC, itin>, C_COND_FM<fmt, 2>;
+ def C_UEQ_#NAME : C_COND_FT<"ueq", TypeStr, RC, itin>, C_COND_FM<fmt, 3>;
+ def C_OLT_#NAME : C_COND_FT<"olt", TypeStr, RC, itin>, C_COND_FM<fmt, 4>;
+ def C_ULT_#NAME : C_COND_FT<"ult", TypeStr, RC, itin>, C_COND_FM<fmt, 5>;
+ def C_OLE_#NAME : C_COND_FT<"ole", TypeStr, RC, itin>, C_COND_FM<fmt, 6>;
+ def C_ULE_#NAME : C_COND_FT<"ule", TypeStr, RC, itin>, C_COND_FM<fmt, 7>;
+ def C_SF_#NAME : C_COND_FT<"sf", TypeStr, RC, itin>, C_COND_FM<fmt, 8>;
+ def C_NGLE_#NAME : C_COND_FT<"ngle", TypeStr, RC, itin>, C_COND_FM<fmt, 9>;
+ def C_SEQ_#NAME : C_COND_FT<"seq", TypeStr, RC, itin>, C_COND_FM<fmt, 10>;
+ def C_NGL_#NAME : C_COND_FT<"ngl", TypeStr, RC, itin>, C_COND_FM<fmt, 11>;
+ def C_LT_#NAME : C_COND_FT<"lt", TypeStr, RC, itin>, C_COND_FM<fmt, 12>;
+ def C_NGE_#NAME : C_COND_FT<"nge", TypeStr, RC, itin>, C_COND_FM<fmt, 13>;
+ def C_LE_#NAME : C_COND_FT<"le", TypeStr, RC, itin>, C_COND_FM<fmt, 14>;
+ def C_NGT_#NAME : C_COND_FT<"ngt", TypeStr, RC, itin>, C_COND_FM<fmt, 15>;
+}
+
+defm S : C_COND_M<"s", FGR32Opnd, 16, II_C_CC_S>, ISA_MIPS1_NOT_32R6_64R6;
+defm D32 : C_COND_M<"d", AFGR64Opnd, 17, II_C_CC_D>, ISA_MIPS1_NOT_32R6_64R6,
+ AdditionalRequires<[NotFP64bit]>;
let DecoderNamespace = "Mips64" in
-defm D64 : C_COND_M<"d", FGR64Opnd, 17>, Requires<[IsFP64bit, HasStdEnc]>;
+defm D64 : C_COND_M<"d", FGR64Opnd, 17, II_C_CC_D>, ISA_MIPS1_NOT_32R6_64R6,
+ AdditionalRequires<[IsFP64bit]>;
//===----------------------------------------------------------------------===//
// Floating Point Instructions
//===----------------------------------------------------------------------===//
-def ROUND_W_S : ABSS_FT<"round.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0xc, 16>;
-def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0xd, 16>;
-def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0xe, 16>;
-def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0xf, 16>;
-def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+def ROUND_W_S : MMRel, ABSS_FT<"round.w.s", FGR32Opnd, FGR32Opnd, II_ROUND>,
+ ABSS_FM<0xc, 16>, ISA_MIPS2;
+def TRUNC_W_S : MMRel, ABSS_FT<"trunc.w.s", FGR32Opnd, FGR32Opnd, II_TRUNC>,
+ ABSS_FM<0xd, 16>, ISA_MIPS2;
+def CEIL_W_S : MMRel, ABSS_FT<"ceil.w.s", FGR32Opnd, FGR32Opnd, II_CEIL>,
+ ABSS_FM<0xe, 16>, ISA_MIPS2;
+def FLOOR_W_S : MMRel, ABSS_FT<"floor.w.s", FGR32Opnd, FGR32Opnd, II_FLOOR>,
+ ABSS_FM<0xf, 16>, ISA_MIPS2;
+def CVT_W_S : MMRel, ABSS_FT<"cvt.w.s", FGR32Opnd, FGR32Opnd, II_CVT>,
ABSS_FM<0x24, 16>;
-defm ROUND_W : ROUND_M<"round.w.d", IIFcvt>, ABSS_FM<0xc, 17>;
-defm TRUNC_W : ROUND_M<"trunc.w.d", IIFcvt>, ABSS_FM<0xd, 17>;
-defm CEIL_W : ROUND_M<"ceil.w.d", IIFcvt>, ABSS_FM<0xe, 17>;
-defm FLOOR_W : ROUND_M<"floor.w.d", IIFcvt>, ABSS_FM<0xf, 17>;
-defm CVT_W : ROUND_M<"cvt.w.d", IIFcvt>, ABSS_FM<0x24, 17>;
-
-let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def ROUND_L_S : ABSS_FT<"round.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x8, 16>;
- def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0x8, 17>;
- def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x9, 16>;
- def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0x9, 17>;
- def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0xa, 16>;
- def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0xa, 17>;
- def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0xb, 16>;
- def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0xb, 17>;
-}
-
-def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32Opnd, FGR32Opnd, IIFcvt>,
+defm ROUND_W : ROUND_M<"round.w.d", II_ROUND>, ABSS_FM<0xc, 17>, ISA_MIPS2;
+defm TRUNC_W : ROUND_M<"trunc.w.d", II_TRUNC>, ABSS_FM<0xd, 17>, ISA_MIPS2;
+defm CEIL_W : ROUND_M<"ceil.w.d", II_CEIL>, ABSS_FM<0xe, 17>, ISA_MIPS2;
+defm FLOOR_W : ROUND_M<"floor.w.d", II_FLOOR>, ABSS_FM<0xf, 17>, ISA_MIPS2;
+defm CVT_W : ROUND_M<"cvt.w.d", II_CVT>, ABSS_FM<0x24, 17>;
+
+let DecoderNamespace = "Mips64" in {
+ def ROUND_L_S : ABSS_FT<"round.l.s", FGR64Opnd, FGR32Opnd, II_ROUND>,
+ ABSS_FM<0x8, 16>, FGR_64;
+ def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64Opnd, FGR64Opnd, II_ROUND>,
+ ABSS_FM<0x8, 17>, FGR_64;
+ def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64Opnd, FGR32Opnd, II_TRUNC>,
+ ABSS_FM<0x9, 16>, FGR_64;
+ def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64Opnd, FGR64Opnd, II_TRUNC>,
+ ABSS_FM<0x9, 17>, FGR_64;
+ def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64Opnd, FGR32Opnd, II_CEIL>,
+ ABSS_FM<0xa, 16>, FGR_64;
+ def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64Opnd, FGR64Opnd, II_CEIL>,
+ ABSS_FM<0xa, 17>, FGR_64;
+ def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64Opnd, FGR32Opnd, II_FLOOR>,
+ ABSS_FM<0xb, 16>, FGR_64;
+ def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64Opnd, FGR64Opnd, II_FLOOR>,
+ ABSS_FM<0xb, 17>, FGR_64;
+}
+
+def CVT_S_W : MMRel, ABSS_FT<"cvt.s.w", FGR32Opnd, FGR32Opnd, II_CVT>,
ABSS_FM<0x20, 20>;
-def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x25, 16>;
-def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0x25, 17>;
-
-let Predicates = [NotFP64bit, HasStdEnc] in {
- def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32Opnd, AFGR64Opnd, IIFcvt>,
- ABSS_FM<0x20, 17>;
- def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x21, 20>;
- def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x21, 16>;
-}
-
-let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0x20, 17>;
- def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0x20, 21>;
- def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x21, 20>;
- def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
- ABSS_FM<0x21, 16>;
- def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64Opnd, FGR64Opnd, IIFcvt>,
- ABSS_FM<0x21, 21>;
+def CVT_L_S : MMRel, ABSS_FT<"cvt.l.s", FGR64Opnd, FGR32Opnd, II_CVT>,
+ ABSS_FM<0x25, 16>, INSN_MIPS3_32R2;
+def CVT_L_D64: MMRel, ABSS_FT<"cvt.l.d", FGR64Opnd, FGR64Opnd, II_CVT>,
+ ABSS_FM<0x25, 17>, INSN_MIPS3_32R2;
+
+def CVT_S_D32 : MMRel, ABSS_FT<"cvt.s.d", FGR32Opnd, AFGR64Opnd, II_CVT>,
+ ABSS_FM<0x20, 17>, FGR_32;
+def CVT_D32_W : MMRel, ABSS_FT<"cvt.d.w", AFGR64Opnd, FGR32Opnd, II_CVT>,
+ ABSS_FM<0x21, 20>, FGR_32;
+def CVT_D32_S : MMRel, ABSS_FT<"cvt.d.s", AFGR64Opnd, FGR32Opnd, II_CVT>,
+ ABSS_FM<0x21, 16>, FGR_32;
+
+let DecoderNamespace = "Mips64" in {
+ def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32Opnd, FGR64Opnd, II_CVT>,
+ ABSS_FM<0x20, 17>, FGR_64;
+ def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32Opnd, FGR64Opnd, II_CVT>,
+ ABSS_FM<0x20, 21>, FGR_64;
+ def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64Opnd, FGR32Opnd, II_CVT>,
+ ABSS_FM<0x21, 20>, FGR_64;
+ def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64Opnd, FGR32Opnd, II_CVT>,
+ ABSS_FM<0x21, 16>, FGR_64;
+ def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64Opnd, FGR64Opnd, II_CVT>,
+ ABSS_FM<0x21, 21>, FGR_64;
}
let isPseudo = 1, isCodeGenOnly = 1 in {
- def PseudoCVT_S_W : ABSS_FT<"", FGR32Opnd, GPR32Opnd, IIFcvt>;
- def PseudoCVT_D32_W : ABSS_FT<"", AFGR64Opnd, GPR32Opnd, IIFcvt>;
- def PseudoCVT_S_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, IIFcvt>;
- def PseudoCVT_D64_W : ABSS_FT<"", FGR64Opnd, GPR32Opnd, IIFcvt>;
- def PseudoCVT_D64_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, IIFcvt>;
+ def PseudoCVT_S_W : ABSS_FT<"", FGR32Opnd, GPR32Opnd, II_CVT>;
+ def PseudoCVT_D32_W : ABSS_FT<"", AFGR64Opnd, GPR32Opnd, II_CVT>;
+ def PseudoCVT_S_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, II_CVT>;
+ def PseudoCVT_D64_W : ABSS_FT<"", FGR64Opnd, GPR32Opnd, II_CVT>;
+ def PseudoCVT_D64_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, II_CVT>;
}
-let Predicates = [NoNaNsFPMath, HasStdEnc] in {
- def FABS_S : ABSS_FT<"abs.s", FGR32Opnd, FGR32Opnd, IIFcvt, fabs>,
- ABSS_FM<0x5, 16>;
- def FNEG_S : ABSS_FT<"neg.s", FGR32Opnd, FGR32Opnd, IIFcvt, fneg>,
- ABSS_FM<0x7, 16>;
- defm FABS : ABSS_M<"abs.d", IIFcvt, fabs>, ABSS_FM<0x5, 17>;
- defm FNEG : ABSS_M<"neg.d", IIFcvt, fneg>, ABSS_FM<0x7, 17>;
-}
+def FABS_S : MMRel, ABSS_FT<"abs.s", FGR32Opnd, FGR32Opnd, II_ABS, fabs>,
+ ABSS_FM<0x5, 16>;
+def FNEG_S : MMRel, ABSS_FT<"neg.s", FGR32Opnd, FGR32Opnd, II_NEG, fneg>,
+ ABSS_FM<0x7, 16>;
+defm FABS : ABSS_M<"abs.d", II_ABS, fabs>, ABSS_FM<0x5, 17>;
+defm FNEG : ABSS_M<"neg.d", II_NEG, fneg>, ABSS_FM<0x7, 17>;
-def FSQRT_S : ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd, IIFsqrtSingle,
- fsqrt>, ABSS_FM<0x4, 16>;
-defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>;
+def FSQRT_S : MMRel, ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd, II_SQRT_S, fsqrt>,
+ ABSS_FM<0x4, 16>, ISA_MIPS2;
+defm FSQRT : ABSS_M<"sqrt.d", II_SQRT_D, fsqrt>, ABSS_FM<0x4, 17>, ISA_MIPS2;
// The odd-numbered registers are only referenced when doing loads,
// stores, and moves between floating-point and integer registers.
@@ -332,137 +356,168 @@ defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>;
// regardless of register aliasing.
/// Move Control Registers From/To CPU Registers
-def CFC1 : MFC1_FT<"cfc1", GPR32Opnd, CCROpnd, IIFmove>, MFC1_FM<2>;
-def CTC1 : MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, IIFmove>, MFC1_FM<6>;
-def MFC1 : MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, IIFmoveC1, bitconvert>,
- MFC1_FM<0>;
-def MTC1 : MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, IIFmoveC1, bitconvert>,
- MFC1_FM<4>;
-def MFHC1 : MFC1_FT<"mfhc1", GPR32Opnd, FGRH32Opnd, IIFmoveC1>,
- MFC1_FM<3>;
-def MTHC1 : MTC1_FT<"mthc1", FGRH32Opnd, GPR32Opnd, IIFmoveC1>,
- MFC1_FM<7>;
-def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64Opnd, IIFmoveC1,
- bitconvert>, MFC1_FM<1>;
-def DMTC1 : MTC1_FT<"dmtc1", FGR64Opnd, GPR64Opnd, IIFmoveC1,
- bitconvert>, MFC1_FM<5>;
-
-def FMOV_S : ABSS_FT<"mov.s", FGR32Opnd, FGR32Opnd, IIFmove>,
+def CFC1 : MMRel, MFC1_FT<"cfc1", GPR32Opnd, CCROpnd, II_CFC1>, MFC1_FM<2>;
+def CTC1 : MMRel, MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, II_CTC1>, MFC1_FM<6>;
+def MFC1 : MMRel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, II_MFC1,
+ bitconvert>, MFC1_FM<0>;
+def MTC1 : MMRel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, II_MTC1,
+ bitconvert>, MFC1_FM<4>;
+def MFHC1_D32 : MMRel, MFC1_FT<"mfhc1", GPR32Opnd, AFGR64Opnd, II_MFHC1>,
+ MFC1_FM<3>, ISA_MIPS32R2, AdditionalRequires<[NotFP64bit]>;
+def MFHC1_D64 : MFC1_FT<"mfhc1", GPR32Opnd, FGR64Opnd, II_MFHC1>,
+ MFC1_FM<3>, ISA_MIPS32R2, AdditionalRequires<[IsFP64bit]> {
+ let DecoderNamespace = "Mips64";
+}
+def MTHC1_D32 : MMRel, MTC1_64_FT<"mthc1", AFGR64Opnd, GPR32Opnd, II_MTHC1>,
+ MFC1_FM<7>, ISA_MIPS32R2, AdditionalRequires<[NotFP64bit]>;
+def MTHC1_D64 : MTC1_64_FT<"mthc1", FGR64Opnd, GPR32Opnd, II_MTHC1>,
+ MFC1_FM<7>, ISA_MIPS32R2, AdditionalRequires<[IsFP64bit]> {
+ let DecoderNamespace = "Mips64";
+}
+def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64Opnd, II_DMFC1,
+ bitconvert>, MFC1_FM<1>, ISA_MIPS3;
+def DMTC1 : MTC1_FT<"dmtc1", FGR64Opnd, GPR64Opnd, II_DMTC1,
+ bitconvert>, MFC1_FM<5>, ISA_MIPS3;
+
+def FMOV_S : MMRel, ABSS_FT<"mov.s", FGR32Opnd, FGR32Opnd, II_MOV_S>,
ABSS_FM<0x6, 16>;
-def FMOV_D32 : ABSS_FT<"mov.d", AFGR64Opnd, AFGR64Opnd, IIFmove>,
- ABSS_FM<0x6, 17>, Requires<[NotFP64bit, HasStdEnc]>;
-def FMOV_D64 : ABSS_FT<"mov.d", FGR64Opnd, FGR64Opnd, IIFmove>,
- ABSS_FM<0x6, 17>, Requires<[IsFP64bit, HasStdEnc]> {
+def FMOV_D32 : MMRel, ABSS_FT<"mov.d", AFGR64Opnd, AFGR64Opnd, II_MOV_D>,
+ ABSS_FM<0x6, 17>, AdditionalRequires<[NotFP64bit]>;
+def FMOV_D64 : ABSS_FT<"mov.d", FGR64Opnd, FGR64Opnd, II_MOV_D>,
+ ABSS_FM<0x6, 17>, AdditionalRequires<[IsFP64bit]> {
let DecoderNamespace = "Mips64";
}
/// Floating Point Memory Instructions
-let Predicates = [HasStdEnc] in {
- def LWC1 : LW_FT<"lwc1", FGR32Opnd, IIFLoad, load>, LW_FM<0x31>;
- def SWC1 : SW_FT<"swc1", FGR32Opnd, IIFStore, store>, LW_FM<0x39>;
-}
-
-let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def LDC164 : LW_FT<"ldc1", FGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
- def SDC164 : SW_FT<"sdc1", FGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
-}
-
-let Predicates = [NotFP64bit, HasStdEnc] in {
- def LDC1 : LW_FT<"ldc1", AFGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
- def SDC1 : SW_FT<"sdc1", AFGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
-}
-
-/// Cop2 Memory Instructions
-let Predicates = [HasStdEnc] in {
- def LWC2 : LW_FT<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>;
- def SWC2 : SW_FT<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>;
- def LDC2 : LW_FT<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>;
- def SDC2 : SW_FT<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>;
+def LWC1 : MMRel, LW_FT<"lwc1", FGR32Opnd, II_LWC1, load>, LW_FM<0x31>;
+def SWC1 : MMRel, SW_FT<"swc1", FGR32Opnd, II_SWC1, store>, LW_FM<0x39>;
+
+let DecoderNamespace = "Mips64" in {
+ def LDC164 : LW_FT<"ldc1", FGR64Opnd, II_LDC1, load>, LW_FM<0x35>, ISA_MIPS2,
+ FGR_64;
+ def SDC164 : SW_FT<"sdc1", FGR64Opnd, II_SDC1, store>, LW_FM<0x3d>, ISA_MIPS2,
+ FGR_64;
+}
+
+def LDC1 : MMRel, LW_FT<"ldc1", AFGR64Opnd, II_LDC1, load>, LW_FM<0x35>,
+ ISA_MIPS2, FGR_32;
+def SDC1 : MMRel, SW_FT<"sdc1", AFGR64Opnd, II_SDC1, store>, LW_FM<0x3d>,
+ ISA_MIPS2, FGR_32;
+
+// Cop2 Memory Instructions
+// FIXME: These aren't really FPU instructions and as such don't belong in this
+// file
+def LWC2 : LW_FT<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def SWC2 : SW_FT<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def LDC2 : LW_FT<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>,
+ ISA_MIPS2_NOT_32R6_64R6;
+def SDC2 : SW_FT<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>,
+ ISA_MIPS2_NOT_32R6_64R6;
+
+// Cop3 Memory Instructions
+// FIXME: These aren't really FPU instructions and as such don't belong in this
+// file
+let DecoderNamespace = "COP3_" in {
+ def LWC3 : LW_FT<"lwc3", COP3Opnd, NoItinerary, load>, LW_FM<0x33>;
+ def SWC3 : SW_FT<"swc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3b>;
+ def LDC3 : LW_FT<"ldc3", COP3Opnd, NoItinerary, load>, LW_FM<0x37>,
+ ISA_MIPS2;
+ def SDC3 : SW_FT<"sdc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3f>,
+ ISA_MIPS2;
}
// Indexed loads and stores.
-let Predicates = [HasFPIdx, HasStdEnc] in {
- def LWXC1 : LWXC1_FT<"lwxc1", FGR32Opnd, IIFLoad, load>, LWXC1_FM<0>;
- def SWXC1 : SWXC1_FT<"swxc1", FGR32Opnd, IIFStore, store>, SWXC1_FM<8>;
+// Base register + offset register addressing mode (indicated by "x" in the
+// instruction mnemonic) is disallowed under NaCl.
+let AdditionalPredicates = [IsNotNaCl] in {
+ def LWXC1 : MMRel, LWXC1_FT<"lwxc1", FGR32Opnd, II_LWXC1, load>, LWXC1_FM<0>,
+ INSN_MIPS4_32R2_NOT_32R6_64R6;
+ def SWXC1 : MMRel, SWXC1_FT<"swxc1", FGR32Opnd, II_SWXC1, store>, SWXC1_FM<8>,
+ INSN_MIPS4_32R2_NOT_32R6_64R6;
}
-let Predicates = [HasFPIdx, NotFP64bit, HasStdEnc] in {
- def LDXC1 : LWXC1_FT<"ldxc1", AFGR64Opnd, IIFLoad, load>, LWXC1_FM<1>;
- def SDXC1 : SWXC1_FT<"sdxc1", AFGR64Opnd, IIFStore, store>, SWXC1_FM<9>;
+let AdditionalPredicates = [NotInMicroMips, IsNotNaCl] in {
+ def LDXC1 : LWXC1_FT<"ldxc1", AFGR64Opnd, II_LDXC1, load>, LWXC1_FM<1>,
+ INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
+ def SDXC1 : SWXC1_FT<"sdxc1", AFGR64Opnd, II_SDXC1, store>, SWXC1_FM<9>,
+ INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
}
-let Predicates = [HasFPIdx, IsFP64bit, HasStdEnc],
- DecoderNamespace="Mips64" in {
- def LDXC164 : LWXC1_FT<"ldxc1", FGR64Opnd, IIFLoad, load>, LWXC1_FM<1>;
- def SDXC164 : SWXC1_FT<"sdxc1", FGR64Opnd, IIFStore, store>, SWXC1_FM<9>;
+let DecoderNamespace="Mips64" in {
+ def LDXC164 : LWXC1_FT<"ldxc1", FGR64Opnd, II_LDXC1, load>, LWXC1_FM<1>,
+ INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
+ def SDXC164 : SWXC1_FT<"sdxc1", FGR64Opnd, II_SDXC1, store>, SWXC1_FM<9>,
+ INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
}
// Load/store doubleword indexed unaligned.
-let Predicates = [NotFP64bit, HasStdEnc] in {
- def LUXC1 : LWXC1_FT<"luxc1", AFGR64Opnd, IIFLoad>, LWXC1_FM<0x5>;
- def SUXC1 : SWXC1_FT<"suxc1", AFGR64Opnd, IIFStore>, SWXC1_FM<0xd>;
+let AdditionalPredicates = [IsNotNaCl] in {
+ def LUXC1 : MMRel, LWXC1_FT<"luxc1", AFGR64Opnd, II_LUXC1>, LWXC1_FM<0x5>,
+ INSN_MIPS5_32R2_NOT_32R6_64R6, FGR_32;
+ def SUXC1 : MMRel, SWXC1_FT<"suxc1", AFGR64Opnd, II_SUXC1>, SWXC1_FM<0xd>,
+ INSN_MIPS5_32R2_NOT_32R6_64R6, FGR_32;
}
-let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace="Mips64" in {
- def LUXC164 : LWXC1_FT<"luxc1", FGR64Opnd, IIFLoad>, LWXC1_FM<0x5>;
- def SUXC164 : SWXC1_FT<"suxc1", FGR64Opnd, IIFStore>, SWXC1_FM<0xd>;
+let DecoderNamespace="Mips64" in {
+ def LUXC164 : LWXC1_FT<"luxc1", FGR64Opnd, II_LUXC1>, LWXC1_FM<0x5>,
+ INSN_MIPS5_32R2_NOT_32R6_64R6, FGR_64;
+ def SUXC164 : SWXC1_FT<"suxc1", FGR64Opnd, II_SUXC1>, SWXC1_FM<0xd>,
+ INSN_MIPS5_32R2_NOT_32R6_64R6, FGR_64;
}
/// Floating-point Aritmetic
-def FADD_S : ADDS_FT<"add.s", FGR32Opnd, IIFadd, 1, fadd>,
+def FADD_S : MMRel, ADDS_FT<"add.s", FGR32Opnd, II_ADD_S, 1, fadd>,
ADDS_FM<0x00, 16>;
-defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>;
-def FDIV_S : ADDS_FT<"div.s", FGR32Opnd, IIFdivSingle, 0, fdiv>,
+defm FADD : ADDS_M<"add.d", II_ADD_D, 1, fadd>, ADDS_FM<0x00, 17>;
+def FDIV_S : MMRel, ADDS_FT<"div.s", FGR32Opnd, II_DIV_S, 0, fdiv>,
ADDS_FM<0x03, 16>;
-defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>;
-def FMUL_S : ADDS_FT<"mul.s", FGR32Opnd, IIFmulSingle, 1, fmul>,
+defm FDIV : ADDS_M<"div.d", II_DIV_D, 0, fdiv>, ADDS_FM<0x03, 17>;
+def FMUL_S : MMRel, ADDS_FT<"mul.s", FGR32Opnd, II_MUL_S, 1, fmul>,
ADDS_FM<0x02, 16>;
-defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>;
-def FSUB_S : ADDS_FT<"sub.s", FGR32Opnd, IIFadd, 0, fsub>,
+defm FMUL : ADDS_M<"mul.d", II_MUL_D, 1, fmul>, ADDS_FM<0x02, 17>;
+def FSUB_S : MMRel, ADDS_FT<"sub.s", FGR32Opnd, II_SUB_S, 0, fsub>,
ADDS_FM<0x01, 16>;
-defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>;
+defm FSUB : ADDS_M<"sub.d", II_SUB_D, 0, fsub>, ADDS_FM<0x01, 17>;
-let Predicates = [HasMips32r2, HasStdEnc] in {
- def MADD_S : MADDS_FT<"madd.s", FGR32Opnd, IIFmulSingle, fadd>,
- MADDS_FM<4, 0>;
- def MSUB_S : MADDS_FT<"msub.s", FGR32Opnd, IIFmulSingle, fsub>,
- MADDS_FM<5, 0>;
-}
+def MADD_S : MMRel, MADDS_FT<"madd.s", FGR32Opnd, II_MADD_S, fadd>,
+ MADDS_FM<4, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
+def MSUB_S : MMRel, MADDS_FT<"msub.s", FGR32Opnd, II_MSUB_S, fsub>,
+ MADDS_FM<5, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
-let Predicates = [HasMips32r2, NoNaNsFPMath, HasStdEnc] in {
- def NMADD_S : NMADDS_FT<"nmadd.s", FGR32Opnd, IIFmulSingle, fadd>,
- MADDS_FM<6, 0>;
- def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32Opnd, IIFmulSingle, fsub>,
- MADDS_FM<7, 0>;
+let AdditionalPredicates = [NoNaNsFPMath] in {
+ def NMADD_S : MMRel, NMADDS_FT<"nmadd.s", FGR32Opnd, II_NMADD_S, fadd>,
+ MADDS_FM<6, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
+ def NMSUB_S : MMRel, NMADDS_FT<"nmsub.s", FGR32Opnd, II_NMSUB_S, fsub>,
+ MADDS_FM<7, 0>, ISA_MIPS32R2_NOT_32R6_64R6;
}
-let Predicates = [HasMips32r2, NotFP64bit, HasStdEnc] in {
- def MADD_D32 : MADDS_FT<"madd.d", AFGR64Opnd, IIFmulDouble, fadd>,
- MADDS_FM<4, 1>;
- def MSUB_D32 : MADDS_FT<"msub.d", AFGR64Opnd, IIFmulDouble, fsub>,
- MADDS_FM<5, 1>;
-}
+def MADD_D32 : MMRel, MADDS_FT<"madd.d", AFGR64Opnd, II_MADD_D, fadd>,
+ MADDS_FM<4, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
+def MSUB_D32 : MMRel, MADDS_FT<"msub.d", AFGR64Opnd, II_MSUB_D, fsub>,
+ MADDS_FM<5, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
-let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath, HasStdEnc] in {
- def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64Opnd, IIFmulDouble, fadd>,
- MADDS_FM<6, 1>;
- def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64Opnd, IIFmulDouble, fsub>,
- MADDS_FM<7, 1>;
+let AdditionalPredicates = [NoNaNsFPMath] in {
+ def NMADD_D32 : MMRel, NMADDS_FT<"nmadd.d", AFGR64Opnd, II_NMADD_D, fadd>,
+ MADDS_FM<6, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
+ def NMSUB_D32 : MMRel, NMADDS_FT<"nmsub.d", AFGR64Opnd, II_NMSUB_D, fsub>,
+ MADDS_FM<7, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_32;
}
-let Predicates = [HasMips32r2, IsFP64bit, HasStdEnc], isCodeGenOnly=1 in {
- def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, IIFmulDouble, fadd>,
- MADDS_FM<4, 1>;
- def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, IIFmulDouble, fsub>,
- MADDS_FM<5, 1>;
+let isCodeGenOnly=1 in {
+ def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, II_MADD_D, fadd>,
+ MADDS_FM<4, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
+ def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, II_MSUB_D, fsub>,
+ MADDS_FM<5, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
}
-let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc],
+let AdditionalPredicates = [NoNaNsFPMath],
isCodeGenOnly=1 in {
- def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, IIFmulDouble, fadd>,
- MADDS_FM<6, 1>;
- def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, IIFmulDouble, fsub>,
- MADDS_FM<7, 1>;
+ def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, II_NMADD_D, fadd>,
+ MADDS_FM<6, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
+ def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, II_NMSUB_D, fsub>,
+ MADDS_FM<7, 1>, ISA_MIPS32R2_NOT_32R6_64R6, FGR_64;
}
//===----------------------------------------------------------------------===//
@@ -473,8 +528,10 @@ let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc],
def MIPS_BRANCH_F : PatLeaf<(i32 0)>;
def MIPS_BRANCH_T : PatLeaf<(i32 1)>;
-def BC1F : BC1F_FT<"bc1f", IIBranch, MIPS_BRANCH_F>, BC1F_FM<0, 0>;
-def BC1T : BC1F_FT<"bc1t", IIBranch, MIPS_BRANCH_T>, BC1F_FM<0, 1>;
+def BC1F : MMRel, BC1F_FT<"bc1f", brtarget, IIBranch, MIPS_BRANCH_F>,
+ BC1F_FM<0, 0>, ISA_MIPS1_NOT_32R6_64R6;
+def BC1T : MMRel, BC1F_FT<"bc1t", brtarget, IIBranch, MIPS_BRANCH_T>,
+ BC1F_FM<0, 1>, ISA_MIPS1_NOT_32R6_64R6;
//===----------------------------------------------------------------------===//
// Floating Point Flag Conditions
@@ -499,12 +556,13 @@ def MIPS_FCOND_LE : PatLeaf<(i32 14)>;
def MIPS_FCOND_NGT : PatLeaf<(i32 15)>;
/// Floating Point Compare
-def FCMP_S32 : CEQS_FT<"s", FGR32, IIFcmp, MipsFPCmp>, CEQS_FM<16>;
-def FCMP_D32 : CEQS_FT<"d", AFGR64, IIFcmp, MipsFPCmp>, CEQS_FM<17>,
- Requires<[NotFP64bit, HasStdEnc]>;
+def FCMP_S32 : MMRel, CEQS_FT<"s", FGR32, II_C_CC_S, MipsFPCmp>, CEQS_FM<16>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def FCMP_D32 : MMRel, CEQS_FT<"d", AFGR64, II_C_CC_D, MipsFPCmp>, CEQS_FM<17>,
+ ISA_MIPS1_NOT_32R6_64R6, AdditionalRequires<[NotFP64bit]>;
let DecoderNamespace = "Mips64" in
-def FCMP_D64 : CEQS_FT<"d", FGR64, IIFcmp, MipsFPCmp>, CEQS_FM<17>,
- Requires<[IsFP64bit, HasStdEnc]>;
+def FCMP_D64 : CEQS_FT<"d", FGR64, II_C_CC_D, MipsFPCmp>, CEQS_FM<17>,
+ ISA_MIPS1_NOT_32R6_64R6, AdditionalRequires<[IsFP64bit]>;
//===----------------------------------------------------------------------===//
// Floating Point Pseudo-Instructions
@@ -517,9 +575,9 @@ class BuildPairF64Base<RegisterOperand RO> :
[(set RO:$dst, (MipsBuildPairF64 GPR32Opnd:$lo, GPR32Opnd:$hi))]>;
def BuildPairF64 : BuildPairF64Base<AFGR64Opnd>,
- Requires<[NotFP64bit, HasStdEnc]>;
+ AdditionalRequires<[NotFP64bit]>;
def BuildPairF64_64 : BuildPairF64Base<FGR64Opnd>,
- Requires<[IsFP64bit, HasStdEnc]>;
+ AdditionalRequires<[IsFP64bit]>;
// This pseudo instr gets expanded into 2 mfc1 instrs after register
// allocation.
@@ -530,15 +588,17 @@ class ExtractElementF64Base<RegisterOperand RO> :
[(set GPR32Opnd:$dst, (MipsExtractElementF64 RO:$src, imm:$n))]>;
def ExtractElementF64 : ExtractElementF64Base<AFGR64Opnd>,
- Requires<[NotFP64bit, HasStdEnc]>;
+ AdditionalRequires<[NotFP64bit]>;
def ExtractElementF64_64 : ExtractElementF64Base<FGR64Opnd>,
- Requires<[IsFP64bit, HasStdEnc]>;
+ AdditionalRequires<[IsFP64bit]>;
//===----------------------------------------------------------------------===//
// InstAliases.
//===----------------------------------------------------------------------===//
-def : InstAlias<"bc1t $offset", (BC1T FCC0, brtarget:$offset)>;
-def : InstAlias<"bc1f $offset", (BC1F FCC0, brtarget:$offset)>;
+def : MipsInstAlias<"bc1t $offset", (BC1T FCC0, brtarget:$offset)>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def : MipsInstAlias<"bc1f $offset", (BC1F FCC0, brtarget:$offset)>,
+ ISA_MIPS1_NOT_32R6_64R6;
//===----------------------------------------------------------------------===//
// Floating Point Patterns
@@ -551,55 +611,45 @@ def : MipsPat<(f32 (sint_to_fp GPR32Opnd:$src)),
def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
(TRUNC_W_S FGR32Opnd:$src)>;
-let Predicates = [NotFP64bit, HasStdEnc] in {
- def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
- (PseudoCVT_D32_W GPR32Opnd:$src)>;
- def : MipsPat<(MipsTruncIntFP AFGR64Opnd:$src),
- (TRUNC_W_D32 AFGR64Opnd:$src)>;
- def : MipsPat<(f32 (fround AFGR64Opnd:$src)),
- (CVT_S_D32 AFGR64Opnd:$src)>;
- def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
- (CVT_D32_S FGR32Opnd:$src)>;
-}
+def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_D32_W GPR32Opnd:$src)>, FGR_32;
+def : MipsPat<(MipsTruncIntFP AFGR64Opnd:$src),
+ (TRUNC_W_D32 AFGR64Opnd:$src)>, FGR_32;
+def : MipsPat<(f32 (fround AFGR64Opnd:$src)),
+ (CVT_S_D32 AFGR64Opnd:$src)>, FGR_32;
+def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
+ (CVT_D32_S FGR32Opnd:$src)>, FGR_32;
+
+def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>, FGR_64;
+def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>, FGR_64;
+
+def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_D64_W GPR32Opnd:$src)>, FGR_64;
+def : MipsPat<(f32 (sint_to_fp GPR64Opnd:$src)),
+ (EXTRACT_SUBREG (PseudoCVT_S_L GPR64Opnd:$src), sub_lo)>, FGR_64;
+def : MipsPat<(f64 (sint_to_fp GPR64Opnd:$src)),
+ (PseudoCVT_D64_L GPR64Opnd:$src)>, FGR_64;
+
+def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
+ (TRUNC_W_D64 FGR64Opnd:$src)>, FGR_64;
+def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
+ (TRUNC_L_S FGR32Opnd:$src)>, FGR_64;
+def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
+ (TRUNC_L_D64 FGR64Opnd:$src)>, FGR_64;
-let Predicates = [IsFP64bit, HasStdEnc] in {
- def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>;
- def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>;
-
- def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
- (PseudoCVT_D64_W GPR32Opnd:$src)>;
- def : MipsPat<(f32 (sint_to_fp GPR64Opnd:$src)),
- (EXTRACT_SUBREG (PseudoCVT_S_L GPR64Opnd:$src), sub_lo)>;
- def : MipsPat<(f64 (sint_to_fp GPR64Opnd:$src)),
- (PseudoCVT_D64_L GPR64Opnd:$src)>;
-
- def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
- (TRUNC_W_D64 FGR64Opnd:$src)>;
- def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
- (TRUNC_L_S FGR32Opnd:$src)>;
- def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
- (TRUNC_L_D64 FGR64Opnd:$src)>;
-
- def : MipsPat<(f32 (fround FGR64Opnd:$src)),
- (CVT_S_D64 FGR64Opnd:$src)>;
- def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
- (CVT_D64_S FGR32Opnd:$src)>;
-}
+def : MipsPat<(f32 (fround FGR64Opnd:$src)),
+ (CVT_S_D64 FGR64Opnd:$src)>, FGR_64;
+def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
+ (CVT_D64_S FGR32Opnd:$src)>, FGR_64;
// Patterns for loads/stores with a reg+imm operand.
let AddedComplexity = 40 in {
- let Predicates = [HasStdEnc] in {
- def : LoadRegImmPat<LWC1, f32, load>;
- def : StoreRegImmPat<SWC1, f32>;
- }
+ def : LoadRegImmPat<LWC1, f32, load>;
+ def : StoreRegImmPat<SWC1, f32>;
- let Predicates = [IsFP64bit, HasStdEnc] in {
- def : LoadRegImmPat<LDC164, f64, load>;
- def : StoreRegImmPat<SDC164, f64>;
- }
+ def : LoadRegImmPat<LDC164, f64, load>, FGR_64;
+ def : StoreRegImmPat<SDC164, f64>, FGR_64;
- let Predicates = [NotFP64bit, HasStdEnc] in {
- def : LoadRegImmPat<LDC1, f64, load>;
- def : StoreRegImmPat<SDC1, f64>;
- }
+ def : LoadRegImmPat<LDC1, f64, load>, FGR_32;
+ def : StoreRegImmPat<SDC1, f64>, FGR_32;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
index 737a018..6a01ae5 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
@@ -93,8 +93,8 @@ class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern,
// Mips32/64 Instruction Format
class InstSE<dag outs, dag ins, string asmstr, list<dag> pattern,
InstrItinClass itin, Format f, string opstr = ""> :
- MipsInst<outs, ins, asmstr, pattern, itin, f> {
- let Predicates = [HasStdEnc];
+ MipsInst<outs, ins, asmstr, pattern, itin, f>, PredicateControl {
+ let EncodingPredicates = [HasStdEnc];
string BaseOpcode = opstr;
string Arch;
}
@@ -109,9 +109,9 @@ class MipsPseudo<dag outs, dag ins, list<dag> pattern,
// Mips32/64 Pseudo Instruction Format
class PseudoSE<dag outs, dag ins, list<dag> pattern,
- InstrItinClass itin = IIPseudo>:
- MipsPseudo<outs, ins, pattern, itin> {
- let Predicates = [HasStdEnc];
+ InstrItinClass itin = IIPseudo> :
+ MipsPseudo<outs, ins, pattern, itin>, PredicateControl {
+ let EncodingPredicates = [HasStdEnc];
}
// Pseudo-instructions for alternate assembly syntax (never used by codegen).
@@ -375,7 +375,7 @@ class LUI_FM : StdArch {
let Inst{15-0} = imm16;
}
-class JALR_FM : StdArch {
+class JALR_FM {
bits<5> rd;
bits<5> rs;
@@ -401,7 +401,7 @@ class BGEZAL_FM<bits<5> funct> : StdArch {
let Inst{15-0} = offset;
}
-class SYNC_FM {
+class SYNC_FM : StdArch {
bits<5> stype;
bits<32> Inst;
@@ -479,11 +479,91 @@ class TEQI_FM<bits<5> funct> : StdArch {
let Inst{20-16} = funct;
let Inst{15-0} = imm16;
}
+
+class WAIT_FM : StdArch {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25} = 1;
+ let Inst{24-6} = 0;
+ let Inst{5-0} = 0x20;
+}
+
+class EXTS_FM<bits<6> funct> : StdArch {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> pos;
+ bits<5> lenm1;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x1c;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = lenm1;
+ let Inst{10-6} = pos;
+ let Inst{5-0} = funct;
+}
+
+class MTMR_FM<bits<6> funct> : StdArch {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x1c;
+ let Inst{25-21} = rs;
+ let Inst{20-6} = 0;
+ let Inst{5-0} = funct;
+}
+
+class POP_FM<bits<6> funct> : StdArch {
+ bits<5> rd;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x1c;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0;
+ let Inst{5-0} = funct;
+}
+
+class SEQ_FM<bits<6> funct> : StdArch {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x1c;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-6} = 0;
+ let Inst{5-0} = funct;
+}
+
+class SEQI_FM<bits<6> funct> : StdArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<10> imm10;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x1c;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = imm10;
+ let Inst{5-0} = funct;
+}
+
//===----------------------------------------------------------------------===//
// System calls format <op|code_|funct>
//===----------------------------------------------------------------------===//
-class SYS_FM<bits<6> funct>
+class SYS_FM<bits<6> funct> : StdArch
{
bits<20> code_;
bits<32> Inst;
@@ -496,7 +576,7 @@ class SYS_FM<bits<6> funct>
// Break instruction format <op|code_1|funct>
//===----------------------------------------------------------------------===//
-class BRK_FM<bits<6> funct>
+class BRK_FM<bits<6> funct> : StdArch
{
bits<10> code_1;
bits<10> code_2;
@@ -511,7 +591,7 @@ class BRK_FM<bits<6> funct>
// Exception return format <Cop0|1|0|funct>
//===----------------------------------------------------------------------===//
-class ER_FM<bits<6> funct>
+class ER_FM<bits<6> funct> : StdArch
{
bits<32> Inst;
let Inst{31-26} = 0x10;
@@ -525,7 +605,7 @@ class ER_FM<bits<6> funct>
// Enable/disable interrupt instruction format <Cop0|MFMC0|rt|12|0|sc|0|0>
//===----------------------------------------------------------------------===//
-class EI_FM<bits<1> sc>
+class EI_FM<bits<1> sc> : StdArch
{
bits<32> Inst;
bits<5> rt;
@@ -569,7 +649,7 @@ class FFI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern>:
let Inst{15-0} = imm16;
}
-class ADDS_FM<bits<6> funct, bits<5> fmt> {
+class ADDS_FM<bits<6> funct, bits<5> fmt> : StdArch {
bits<5> fd;
bits<5> fs;
bits<5> ft;
@@ -584,7 +664,7 @@ class ADDS_FM<bits<6> funct, bits<5> fmt> {
let Inst{5-0} = funct;
}
-class ABSS_FM<bits<6> funct, bits<5> fmt> {
+class ABSS_FM<bits<6> funct, bits<5> fmt> : StdArch {
bits<5> fd;
bits<5> fs;
@@ -598,7 +678,7 @@ class ABSS_FM<bits<6> funct, bits<5> fmt> {
let Inst{5-0} = funct;
}
-class MFC1_FM<bits<5> funct> {
+class MFC1_FM<bits<5> funct> : StdArch {
bits<5> rt;
bits<5> fs;
@@ -623,7 +703,7 @@ class LW_FM<bits<6> op> : StdArch {
let Inst{15-0} = addr{15-0};
}
-class MADDS_FM<bits<3> funct, bits<3> fmt> {
+class MADDS_FM<bits<3> funct, bits<3> fmt> : StdArch {
bits<5> fd;
bits<5> fr;
bits<5> fs;
@@ -640,7 +720,7 @@ class MADDS_FM<bits<3> funct, bits<3> fmt> {
let Inst{2-0} = fmt;
}
-class LWXC1_FM<bits<6> funct> {
+class LWXC1_FM<bits<6> funct> : StdArch {
bits<5> fd;
bits<5> base;
bits<5> index;
@@ -655,7 +735,7 @@ class LWXC1_FM<bits<6> funct> {
let Inst{5-0} = funct;
}
-class SWXC1_FM<bits<6> funct> {
+class SWXC1_FM<bits<6> funct> : StdArch {
bits<5> fs;
bits<5> base;
bits<5> index;
@@ -670,7 +750,7 @@ class SWXC1_FM<bits<6> funct> {
let Inst{5-0} = funct;
}
-class BC1F_FM<bit nd, bit tf> {
+class BC1F_FM<bit nd, bit tf> : StdArch {
bits<3> fcc;
bits<16> offset;
@@ -684,7 +764,7 @@ class BC1F_FM<bit nd, bit tf> {
let Inst{15-0} = offset;
}
-class CEQS_FM<bits<5> fmt> {
+class CEQS_FM<bits<5> fmt> : StdArch {
bits<5> fs;
bits<5> ft;
bits<4> cond;
@@ -704,7 +784,7 @@ class C_COND_FM<bits<5> fmt, bits<4> c> : CEQS_FM<fmt> {
let cond = c;
}
-class CMov_I_F_FM<bits<6> funct, bits<5> fmt> {
+class CMov_I_F_FM<bits<6> funct, bits<5> fmt> : StdArch {
bits<5> fd;
bits<5> fs;
bits<5> rt;
@@ -736,7 +816,7 @@ class CMov_F_I_FM<bit tf> : StdArch {
let Inst{5-0} = 1;
}
-class CMov_F_F_FM<bits<5> fmt, bit tf> {
+class CMov_F_F_FM<bits<5> fmt, bit tf> : StdArch {
bits<5> fd;
bits<5> fs;
bits<3> fcc;
@@ -752,3 +832,75 @@ class CMov_F_F_FM<bits<5> fmt, bit tf> {
let Inst{10-6} = fd;
let Inst{5-0} = 0x11;
}
+
+class BARRIER_FM<bits<5> op> : StdArch {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0; // SPECIAL
+ let Inst{25-21} = 0;
+ let Inst{20-16} = 0; // rt = 0
+ let Inst{15-11} = 0; // rd = 0
+ let Inst{10-6} = op; // Operation
+ let Inst{5-0} = 0; // SLL
+}
+
+class SDBBP_FM : StdArch {
+ bits<20> code_;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b011100; // SPECIAL2
+ let Inst{25-6} = code_;
+ let Inst{5-0} = 0b111111; // SDBBP
+}
+
+class JR_HB_FM<bits<6> op> : StdArch{
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0; // SPECIAL
+ let Inst{25-21} = rs;
+ let Inst{20-11} = 0;
+ let Inst{10} = 1;
+ let Inst{9-6} = 0;
+ let Inst{5-0} = op;
+}
+
+class JALR_HB_FM<bits<6> op> : StdArch {
+ bits<5> rd;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0; // SPECIAL
+ let Inst{25-21} = rs;
+ let Inst{20-16} = 0;
+ let Inst{15-11} = rd;
+ let Inst{10} = 1;
+ let Inst{9-6} = 0;
+ let Inst{5-0} = op;
+}
+
+class COP0_TLB_FM<bits<6> op> : StdArch {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10; // COP0
+ let Inst{25} = 1; // CO
+ let Inst{24-6} = 0;
+ let Inst{5-0} = op; // Operation
+}
+
+class CACHEOP_FM<bits<6> op> : StdArch {
+ bits<21> addr;
+ bits<5> hint;
+ bits<5> base = addr{20-16};
+ bits<16> offset = addr{15-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-21} = base;
+ let Inst{20-16} = hint;
+ let Inst{15-0} = offset;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 0ebad05..dcc0e24 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -22,23 +22,23 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#include "MipsGenInstrInfo.inc"
-using namespace llvm;
-
// Pin the vtable to this file.
void MipsInstrInfo::anchor() {}
-MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm, unsigned UncondBr)
- : MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
- TM(tm), UncondBrOpc(UncondBr) {}
+MipsInstrInfo::MipsInstrInfo(const MipsSubtarget &STI, unsigned UncondBr)
+ : MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
+ Subtarget(STI), UncondBrOpc(UncondBr) {}
-const MipsInstrInfo *MipsInstrInfo::create(MipsTargetMachine &TM) {
- if (TM.getSubtargetImpl()->inMips16Mode())
- return llvm::createMips16InstrInfo(TM);
+const MipsInstrInfo *MipsInstrInfo::create(MipsSubtarget &STI) {
+ if (STI.inMips16Mode())
+ return llvm::createMips16InstrInfo(STI);
- return llvm::createMipsSEInstrInfo(TM);
+ return llvm::createMipsSEInstrInfo(STI);
}
bool MipsInstrInfo::isZeroImm(const MachineOperand &op) const {
@@ -94,10 +94,10 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return (BT == BT_None) || (BT == BT_Indirect);
}
-void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB, DebugLoc DL,
- const SmallVectorImpl<MachineOperand>& Cond)
- const {
+void
+MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ DebugLoc DL,
+ const SmallVectorImpl<MachineOperand> &Cond) const {
unsigned Opc = Cond[0].getImm();
const MCInstrDesc &MCID = get(Opc);
MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID);
@@ -113,11 +113,9 @@ void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB,
MIB.addMBB(TBB);
}
-unsigned MipsInstrInfo::
-InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
+unsigned MipsInstrInfo::InsertBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -145,9 +143,7 @@ InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return 1;
}
-unsigned MipsInstrInfo::
-RemoveBranch(MachineBasicBlock &MBB) const
-{
+unsigned MipsInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend();
MachineBasicBlock::reverse_iterator FirstBr;
unsigned removed;
@@ -160,7 +156,7 @@ RemoveBranch(MachineBasicBlock &MBB) const
// Up to 2 branches are removed.
// Note that indirect branches are not removed.
- for(removed = 0; I != REnd && removed < 2; ++I, ++removed)
+ for (removed = 0; I != REnd && removed < 2; ++I, ++removed)
if (!getAnalyzableBrOpc(I->getOpcode()))
break;
@@ -171,20 +167,18 @@ RemoveBranch(MachineBasicBlock &MBB) const
/// ReverseBranchCondition - Return the inverse opcode of the
/// specified Branch instruction.
-bool MipsInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
-{
+bool MipsInstrInfo::ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
assert( (Cond.size() && Cond.size() <= 3) &&
"Invalid Mips branch condition!");
Cond[0].setImm(getOppositeBranchOpc(Cond[0].getImm()));
return false;
}
-MipsInstrInfo::BranchType MipsInstrInfo::
-AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB, SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify,
- SmallVectorImpl<MachineInstr*> &BranchInstrs) const {
+MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
+ MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond, bool AllowModify,
+ SmallVectorImpl<MachineInstr *> &BranchInstrs) const {
MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend();
@@ -195,7 +189,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
if (I == REnd || !isUnpredicatedTerminator(&*I)) {
// This block ends with no branches (it just falls through to its succ).
// Leave TBB/FBB null.
- TBB = FBB = NULL;
+ TBB = FBB = nullptr;
return BT_NoBranch;
}
@@ -209,7 +203,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
// Get the second to last instruction in the block.
unsigned SecondLastOpc = 0;
- MachineInstr *SecondLastInst = NULL;
+ MachineInstr *SecondLastInst = nullptr;
if (++I != REnd) {
SecondLastInst = &*I;
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
index d9ac961..bdf2fd3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -9,6 +9,10 @@
//
// This file contains the Mips implementation of the TargetInstrInfo class.
//
+// FIXME: We need to override TargetInstrInfo::getInlineAsmLength method in
+// order for MipsLongBranch pass to work correctly when the code has inline
+// assembly. The returned value doesn't have to be the asm instruction's exact
+// size in bytes; MipsLongBranch only expects it to be the correct upper bound.
//===----------------------------------------------------------------------===//
#ifndef MIPSINSTRUCTIONINFO_H
@@ -29,7 +33,7 @@ namespace llvm {
class MipsInstrInfo : public MipsGenInstrInfo {
virtual void anchor();
protected:
- MipsTargetMachine &TM;
+ const MipsSubtarget &Subtarget;
unsigned UncondBrOpc;
public:
@@ -42,25 +46,25 @@ public:
BT_Indirect // One indirct branch.
};
- explicit MipsInstrInfo(MipsTargetMachine &TM, unsigned UncondBrOpc);
+ explicit MipsInstrInfo(const MipsSubtarget &STI, unsigned UncondBrOpc);
- static const MipsInstrInfo *create(MipsTargetMachine &TM);
+ static const MipsInstrInfo *create(MipsSubtarget &STI);
/// Branch Analysis
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
- virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
BranchType AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -69,8 +73,8 @@ public:
SmallVectorImpl<MachineInstr*> &BranchInstrs) const;
/// Insert nop instruction when hazard condition is found
- virtual void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
+ void insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
@@ -83,19 +87,19 @@ public:
/// Return the number of bytes of code the specified instruction may be.
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override {
storeRegToStack(MBB, MBBI, SrcReg, isKill, FrameIndex, RC, TRI, 0);
}
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override {
loadRegFromStack(MBB, MBBI, DestReg, FrameIndex, RC, TRI, 0);
}
@@ -136,8 +140,8 @@ private:
};
/// Create MipsInstrInfo objects.
-const MipsInstrInfo *createMips16InstrInfo(MipsTargetMachine &TM);
-const MipsInstrInfo *createMipsSEInstrInfo(MipsTargetMachine &TM);
+const MipsInstrInfo *createMips16InstrInfo(const MipsSubtarget &STI);
+const MipsInstrInfo *createMipsSEInstrInfo(const MipsSubtarget &STI);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
index ebdbaa4..8e9472c 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -146,52 +146,146 @@ def MipsSDR : SDNode<"MipsISD::SDR", SDTStore,
//===----------------------------------------------------------------------===//
// Mips Instruction Predicate Definitions.
//===----------------------------------------------------------------------===//
-def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">,
- AssemblerPredicate<"FeatureSEInReg">;
-def HasBitCount : Predicate<"Subtarget.hasBitCount()">,
- AssemblerPredicate<"FeatureBitCount">;
-def HasSwap : Predicate<"Subtarget.hasSwap()">,
- AssemblerPredicate<"FeatureSwap">;
-def HasCondMov : Predicate<"Subtarget.hasCondMov()">,
- AssemblerPredicate<"FeatureCondMov">;
-def HasFPIdx : Predicate<"Subtarget.hasFPIdx()">,
- AssemblerPredicate<"FeatureFPIdx">;
-def HasMips32 : Predicate<"Subtarget.hasMips32()">,
+def HasMips2 : Predicate<"Subtarget->hasMips2()">,
+ AssemblerPredicate<"FeatureMips2">;
+def HasMips3_32 : Predicate<"Subtarget->hasMips3_32()">,
+ AssemblerPredicate<"FeatureMips3_32">;
+def HasMips3_32r2 : Predicate<"Subtarget->hasMips3_32r2()">,
+ AssemblerPredicate<"FeatureMips3_32r2">;
+def HasMips3 : Predicate<"Subtarget->hasMips3()">,
+ AssemblerPredicate<"FeatureMips3">;
+def HasMips4_32 : Predicate<"Subtarget->hasMips4_32()">,
+ AssemblerPredicate<"FeatureMips4_32">;
+def HasMips4_32r2 : Predicate<"Subtarget->hasMips4_32r2()">,
+ AssemblerPredicate<"FeatureMips4_32r2">;
+def HasMips5_32r2 : Predicate<"Subtarget->hasMips5_32r2()">,
+ AssemblerPredicate<"FeatureMips5_32r2">;
+def HasMips32 : Predicate<"Subtarget->hasMips32()">,
AssemblerPredicate<"FeatureMips32">;
-def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">,
+def HasMips32r2 : Predicate<"Subtarget->hasMips32r2()">,
AssemblerPredicate<"FeatureMips32r2">;
-def HasMips64 : Predicate<"Subtarget.hasMips64()">,
+def HasMips32r6 : Predicate<"Subtarget->hasMips32r6()">,
+ AssemblerPredicate<"FeatureMips32r6">;
+def NotMips32r6 : Predicate<"!Subtarget->hasMips32r6()">,
+ AssemblerPredicate<"!FeatureMips32r6">;
+def IsGP64bit : Predicate<"Subtarget->isGP64bit()">,
+ AssemblerPredicate<"FeatureGP64Bit">;
+def IsGP32bit : Predicate<"!Subtarget->isGP64bit()">,
+ AssemblerPredicate<"!FeatureGP64Bit">;
+def HasMips64 : Predicate<"Subtarget->hasMips64()">,
AssemblerPredicate<"FeatureMips64">;
-def NotMips64 : Predicate<"!Subtarget.hasMips64()">,
- AssemblerPredicate<"!FeatureMips64">;
-def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">,
+def HasMips64r2 : Predicate<"Subtarget->hasMips64r2()">,
AssemblerPredicate<"FeatureMips64r2">;
-def IsN64 : Predicate<"Subtarget.isABI_N64()">,
+def HasMips64r6 : Predicate<"Subtarget->hasMips64r6()">,
+ AssemblerPredicate<"FeatureMips64r6">;
+def NotMips64r6 : Predicate<"!Subtarget->hasMips64r6()">,
+ AssemblerPredicate<"!FeatureMips64r6">;
+def IsN64 : Predicate<"Subtarget->isABI_N64()">,
AssemblerPredicate<"FeatureN64">;
-def NotN64 : Predicate<"!Subtarget.isABI_N64()">,
- AssemblerPredicate<"!FeatureN64">;
-def InMips16Mode : Predicate<"Subtarget.inMips16Mode()">,
+def InMips16Mode : Predicate<"Subtarget->inMips16Mode()">,
AssemblerPredicate<"FeatureMips16">;
+def HasCnMips : Predicate<"Subtarget->hasCnMips()">,
+ AssemblerPredicate<"FeatureCnMips">;
def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">,
AssemblerPredicate<"FeatureMips32">;
def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">,
AssemblerPredicate<"FeatureMips32">;
-def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">,
- AssemblerPredicate<"FeatureMips32">;
-def HasStdEnc : Predicate<"Subtarget.hasStandardEncoding()">,
+def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">;
+def HasStdEnc : Predicate<"Subtarget->hasStandardEncoding()">,
AssemblerPredicate<"!FeatureMips16">;
-def NotDSP : Predicate<"!Subtarget.hasDSP()">;
-def InMicroMips : Predicate<"Subtarget.inMicroMipsMode()">,
+def NotDSP : Predicate<"!Subtarget->hasDSP()">;
+def InMicroMips : Predicate<"Subtarget->inMicroMipsMode()">,
AssemblerPredicate<"FeatureMicroMips">;
-def NotInMicroMips : Predicate<"!Subtarget.inMicroMipsMode()">,
+def NotInMicroMips : Predicate<"!Subtarget->inMicroMipsMode()">,
AssemblerPredicate<"!FeatureMicroMips">;
-def IsLE : Predicate<"Subtarget.isLittle()">;
-def IsBE : Predicate<"!Subtarget.isLittle()">;
+def IsLE : Predicate<"Subtarget->isLittle()">;
+def IsBE : Predicate<"!Subtarget->isLittle()">;
+def IsNotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
+
+//===----------------------------------------------------------------------===//
+// Mips GPR size adjectives.
+// They are mutually exclusive.
+//===----------------------------------------------------------------------===//
+
+class GPR_32 { list<Predicate> GPRPredicates = [IsGP32bit]; }
+class GPR_64 { list<Predicate> GPRPredicates = [IsGP64bit]; }
+
+//===----------------------------------------------------------------------===//
+// Mips ISA/ASE membership and instruction group membership adjectives.
+// They are mutually exclusive.
+//===----------------------------------------------------------------------===//
+
+// FIXME: I'd prefer to use additive predicates to build the instruction sets
+// but we are short on assembler feature bits at the moment. Using a
+// subtractive predicate will hopefully keep us under the 32 predicate
+// limit long enough to develop an alternative way to handle P1||P2
+// predicates.
+class ISA_MIPS1_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [NotMips32r6, NotMips64r6];
+}
+class ISA_MIPS2 { list<Predicate> InsnPredicates = [HasMips2]; }
+class ISA_MIPS2_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips2, NotMips32r6, NotMips64r6];
+}
+class ISA_MIPS3 { list<Predicate> InsnPredicates = [HasMips3]; }
+class ISA_MIPS3_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips3, NotMips32r6, NotMips64r6];
+}
+class ISA_MIPS32 { list<Predicate> InsnPredicates = [HasMips32]; }
+class ISA_MIPS32_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips32, NotMips32r6, NotMips64r6];
+}
+class ISA_MIPS32R2 { list<Predicate> InsnPredicates = [HasMips32r2]; }
+class ISA_MIPS32R2_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips32r2, NotMips32r6, NotMips64r6];
+}
+class ISA_MIPS64 { list<Predicate> InsnPredicates = [HasMips64]; }
+class ISA_MIPS64_NOT_64R6 {
+ list<Predicate> InsnPredicates = [HasMips64, NotMips64r6];
+}
+class ISA_MIPS64R2 { list<Predicate> InsnPredicates = [HasMips64r2]; }
+class ISA_MIPS32R6 { list<Predicate> InsnPredicates = [HasMips32r6]; }
+class ISA_MIPS64R6 { list<Predicate> InsnPredicates = [HasMips64r6]; }
+
+// The portions of MIPS-III that were also added to MIPS32
+class INSN_MIPS3_32 { list<Predicate> InsnPredicates = [HasMips3_32]; }
-class MipsPat<dag pattern, dag result> : Pat<pattern, result> {
- let Predicates = [HasStdEnc];
+// The portions of MIPS-III that were also added to MIPS32 but were removed in
+// MIPS32r6 and MIPS64r6.
+class INSN_MIPS3_32_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips3_32, NotMips32r6, NotMips64r6];
}
+// The portions of MIPS-III that were also added to MIPS32
+class INSN_MIPS3_32R2 { list<Predicate> InsnPredicates = [HasMips3_32r2]; }
+
+// The portions of MIPS-IV that were also added to MIPS32 but were removed in
+// MIPS32r6 and MIPS64r6.
+class INSN_MIPS4_32_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips4_32, NotMips32r6, NotMips64r6];
+}
+
+// The portions of MIPS-IV that were also added to MIPS32r2 but were removed in
+// MIPS32r6 and MIPS64r6.
+class INSN_MIPS4_32R2_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips4_32r2, NotMips32r6, NotMips64r6];
+}
+
+// The portions of MIPS-V that were also added to MIPS32r2 but were removed in
+// MIPS32r6 and MIPS64r6.
+class INSN_MIPS5_32R2_NOT_32R6_64R6 {
+ list<Predicate> InsnPredicates = [HasMips5_32r2, NotMips32r6, NotMips64r6];
+}
+
+//===----------------------------------------------------------------------===//
+
+class MipsPat<dag pattern, dag result> : Pat<pattern, result>, PredicateControl {
+ let EncodingPredicates = [HasStdEnc];
+}
+
+class MipsInstAlias<string Asm, dag Result, bit Emit = 0b1> :
+ InstAlias<Asm, Result, Emit>, PredicateControl;
+
class IsCommutable {
bit isCommutable = 1;
}
@@ -235,23 +329,49 @@ include "MipsInstrFormats.td"
// Mips Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
+def MipsJumpTargetAsmOperand : AsmOperandClass {
+ let Name = "JumpTarget";
+ let ParserMethod = "ParseJumpTarget";
+ let PredicateMethod = "isImm";
+ let RenderMethod = "addImmOperands";
+}
+
// Instruction operand types
def jmptarget : Operand<OtherVT> {
let EncoderMethod = "getJumpTargetOpValue";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
}
def brtarget : Operand<OtherVT> {
let EncoderMethod = "getBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
let DecoderMethod = "DecodeBranchTarget";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
}
def calltarget : Operand<iPTR> {
let EncoderMethod = "getJumpTargetOpValue";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
}
+def simm9 : Operand<i32>;
+def simm10 : Operand<i32>;
+def simm11 : Operand<i32>;
+
def simm16 : Operand<i32> {
let DecoderMethod= "DecodeSimm16";
}
+def simm19_lsl2 : Operand<i32> {
+ let EncoderMethod = "getSimm19Lsl2Encoding";
+ let DecoderMethod = "DecodeSimm19Lsl2";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
+def simm18_lsl3 : Operand<i32> {
+ let EncoderMethod = "getSimm18Lsl3Encoding";
+ let DecoderMethod = "DecodeSimm18Lsl3";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
def simm20 : Operand<i32> {
}
@@ -265,7 +385,20 @@ def simm16_64 : Operand<i64> {
let DecoderMethod = "DecodeSimm16";
}
+// Zero
+def uimmz : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
// Unsigned Operand
+def uimm2 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
+def uimm3 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
def uimm5 : Operand<i32> {
let PrintMethod = "printUnsignedImm";
}
@@ -286,24 +419,30 @@ def MipsMemAsmOperand : AsmOperandClass {
let ParserMethod = "parseMemOperand";
}
+def MipsMemSimm11AsmOperand : AsmOperandClass {
+ let Name = "MemOffsetSimm11";
+ let SuperClasses = [MipsMemAsmOperand];
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithSimmOffset<11>";
+ //let DiagnosticType = "Simm11";
+}
+
def MipsInvertedImmoperand : AsmOperandClass {
let Name = "InvNum";
let RenderMethod = "addImmOperands";
let ParserMethod = "parseInvNum";
}
-def PtrRegAsmOperand : AsmOperandClass {
- let Name = "PtrReg";
- let ParserMethod = "parsePtrReg";
+def InvertedImOperand : Operand<i32> {
+ let ParserMatchClass = MipsInvertedImmoperand;
}
-
-def InvertedImOperand : Operand<i32> {
+def InvertedImOperand64 : Operand<i64> {
let ParserMatchClass = MipsInvertedImmoperand;
}
-// Address operand
-def mem : Operand<iPTR> {
+class mem_generic : Operand<iPTR> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
@@ -311,6 +450,26 @@ def mem : Operand<iPTR> {
let OperandType = "OPERAND_MEMORY";
}
+// Address operand
+def mem : mem_generic;
+
+// MSA specific address operand
+def mem_msa : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, simm10);
+ let EncoderMethod = "getMSAMemEncoding";
+}
+
+def mem_simm9 : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, simm9);
+ let EncoderMethod = "getMemEncoding";
+}
+
+def mem_simm11 : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, simm11);
+ let EncoderMethod = "getMemEncoding";
+ let ParserMatchClass = MipsMemSimm11AsmOperand;
+}
+
def mem_ea : Operand<iPTR> {
let PrintMethod = "printMemOperandEA";
let MIOperandInfo = (ops ptr_rc, simm16);
@@ -321,7 +480,7 @@ def mem_ea : Operand<iPTR> {
def PtrRC : Operand<iPTR> {
let MIOperandInfo = (ops ptr_rc);
let DecoderMethod = "DecodePtrRegisterClass";
- let ParserMatchClass = PtrRegAsmOperand;
+ let ParserMatchClass = GPR32AsmOperand;
}
// size operand of ext instruction
@@ -349,6 +508,9 @@ def HI16 : SDNodeXForm<imm, [{
// Plus 1.
def Plus1 : SDNodeXForm<imm, [{ return getImm(N, N->getSExtValue() + 1); }]>;
+// Node immediate is zero (e.g. insve.d)
+def immz : PatLeaf<(imm), [{ return N->getSExtValue() == 0; }]>;
+
// Node immediate fits as 16-bit sign extended on target immediate.
// e.g. addi, andi
def immSExt8 : PatLeaf<(imm), [{ return isInt<8>(N->getSExtValue()); }]>;
@@ -400,6 +562,8 @@ def addrRegReg :
def addrDefault :
ComplexPattern<iPTR, 2, "selectAddrDefault", [frameindex]>;
+def addrimm10 : ComplexPattern<iPTR, 2, "selectIntAddrMSA", [frameindex]>;
+
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
@@ -413,6 +577,7 @@ class ArithLogicR<string opstr, RegisterOperand RO, bit isComm = 0,
[(set RO:$rd, (OpNode RO:$rs, RO:$rt))], Itin, FrmR, opstr> {
let isCommutable = isComm;
let isReMaterializable = 1;
+ let TwoOperandAliasConstraint = "$rd = $rs";
}
// Arithmetic and logical instructions with 2 register operands.
@@ -429,9 +594,9 @@ class ArithLogicI<string opstr, Operand Od, RegisterOperand RO,
}
// Arithmetic Multiply ADD/SUB
-class MArithR<string opstr, bit isComm = 0> :
+class MArithR<string opstr, InstrItinClass itin, bit isComm = 0> :
InstSE<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
- !strconcat(opstr, "\t$rs, $rt"), [], IIImult, FrmR, opstr> {
+ !strconcat(opstr, "\t$rs, $rt"), [], itin, FrmR, opstr> {
let Defs = [HI0, LO0];
let Uses = [HI0, LO0];
let isCommutable = isComm;
@@ -441,28 +606,32 @@ class MArithR<string opstr, bit isComm = 0> :
class LogicNOR<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs, RO:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
- [(set RO:$rd, (not (or RO:$rs, RO:$rt)))], IIArith, FrmR, opstr> {
+ [(set RO:$rd, (not (or RO:$rs, RO:$rt)))], II_NOR, FrmR, opstr> {
let isCommutable = 1;
}
// Shifts
class shift_rotate_imm<string opstr, Operand ImmOpnd,
- RegisterOperand RO, SDPatternOperator OpNode = null_frag,
+ RegisterOperand RO, InstrItinClass itin,
+ SDPatternOperator OpNode = null_frag,
SDPatternOperator PF = null_frag> :
InstSE<(outs RO:$rd), (ins RO:$rt, ImmOpnd:$shamt),
!strconcat(opstr, "\t$rd, $rt, $shamt"),
- [(set RO:$rd, (OpNode RO:$rt, PF:$shamt))], IIArith, FrmR, opstr>;
+ [(set RO:$rd, (OpNode RO:$rt, PF:$shamt))], itin, FrmR, opstr> {
+ let TwoOperandAliasConstraint = "$rt = $rd";
+}
-class shift_rotate_reg<string opstr, RegisterOperand RO,
+class shift_rotate_reg<string opstr, RegisterOperand RO, InstrItinClass itin,
SDPatternOperator OpNode = null_frag>:
InstSE<(outs RO:$rd), (ins RO:$rt, GPR32Opnd:$rs),
!strconcat(opstr, "\t$rd, $rt, $rs"),
- [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs))], IIArith, FrmR, opstr>;
+ [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs))], itin, FrmR,
+ opstr>;
// Load Upper Imediate
class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>:
InstSE<(outs RO:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
- [], IIArith, FrmI, opstr>, IsAsCheapAsAMove {
+ [], II_LUI, FrmI, opstr>, IsAsCheapAsAMove {
let neverHasSideEffects = 1;
let isReMaterializable = 1;
}
@@ -533,14 +702,14 @@ class SetCC_R<string opstr, PatFrag cond_op, RegisterOperand RO> :
InstSE<(outs GPR32Opnd:$rd), (ins RO:$rs, RO:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
[(set GPR32Opnd:$rd, (cond_op RO:$rs, RO:$rt))],
- IIslt, FrmR, opstr>;
+ II_SLT_SLTU, FrmR, opstr>;
class SetCC_I<string opstr, PatFrag cond_op, Operand Od, PatLeaf imm_type,
RegisterOperand RO>:
InstSE<(outs GPR32Opnd:$rt), (ins RO:$rs, Od:$imm16),
!strconcat(opstr, "\t$rt, $rs, $imm16"),
[(set GPR32Opnd:$rt, (cond_op RO:$rs, imm_type:$imm16))],
- IIslt, FrmI, opstr>;
+ II_SLTI_SLTIU, FrmI, opstr>;
// Jump
class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator,
@@ -562,7 +731,7 @@ class UncondBranch<Instruction BEQInst> :
let isTerminator = 1;
let isBarrier = 1;
let hasDelaySlot = 1;
- let Predicates = [RelocPIC, HasStdEnc];
+ let AdditionalPredicates = [RelocPIC];
let Defs = [AT];
}
@@ -574,20 +743,11 @@ class JumpFR<string opstr, RegisterOperand RO,
FrmR, opstr>;
// Indirect branch
-class IndirectBranch<string opstr, RegisterOperand RO> :
- JumpFR<opstr, RO, brind> {
+class IndirectBranch<string opstr, RegisterOperand RO> : JumpFR<opstr, RO> {
let isBranch = 1;
let isIndirectBranch = 1;
}
-// Return instruction
-class RetBase<string opstr, RegisterOperand RO>: JumpFR<opstr, RO> {
- let isReturn = 1;
- let isCodeGenOnly = 1;
- let hasCtrlDep = 1;
- let hasExtraSrcRegAllocReq = 1;
-}
-
// Jump and Link (Call)
let isCall=1, hasDelaySlot=1, Defs = [RA] in {
class JumpLink<string opstr, DAGOperand opnd> :
@@ -603,7 +763,7 @@ let isCall=1, hasDelaySlot=1, Defs = [RA] in {
class JumpLinkReg<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [], IIBranch, FrmR, opstr>;
+ [], IIBranch, FrmR>;
class BGEZAL_FT<string opstr, DAGOperand opnd, RegisterOperand RO> :
InstSE<(outs), (ins RO:$rs, opnd:$offset),
@@ -611,6 +771,18 @@ let isCall=1, hasDelaySlot=1, Defs = [RA] in {
}
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, hasDelaySlot = 1,
+ hasExtraSrcRegAllocReq = 1, Defs = [AT] in {
+ class TailCall<Instruction JumpInst> :
+ PseudoSE<(outs), (ins calltarget:$target), [], IIBranch>,
+ PseudoInstExpansion<(JumpInst jmptarget:$target)>;
+
+ class TailCallReg<RegisterOperand RO, Instruction JRInst,
+ RegisterOperand ResRO = RO> :
+ PseudoSE<(outs), (ins RO:$rs), [(MipsTailCall RO:$rs)], IIBranch>,
+ PseudoInstExpansion<(JRInst ResRO:$rs)>;
+}
+
class BAL_BR_Pseudo<Instruction RealInst> :
PseudoSE<(outs), (ins brtarget:$offset), [], IIBranch>,
PseudoInstExpansion<(RealInst ZERO, brtarget:$offset)> {
@@ -624,36 +796,32 @@ class BAL_BR_Pseudo<Instruction RealInst> :
// Syscall
class SYS_FT<string opstr> :
InstSE<(outs), (ins uimm20:$code_),
- !strconcat(opstr, "\t$code_"), [], NoItinerary, FrmI>;
+ !strconcat(opstr, "\t$code_"), [], NoItinerary, FrmI, opstr>;
// Break
class BRK_FT<string opstr> :
InstSE<(outs), (ins uimm10:$code_1, uimm10:$code_2),
- !strconcat(opstr, "\t$code_1, $code_2"), [], NoItinerary, FrmOther>;
+ !strconcat(opstr, "\t$code_1, $code_2"), [], NoItinerary,
+ FrmOther, opstr>;
// (D)Eret
class ER_FT<string opstr> :
InstSE<(outs), (ins),
- opstr, [], NoItinerary, FrmOther>;
+ opstr, [], NoItinerary, FrmOther, opstr>;
// Interrupts
class DEI_FT<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins),
- !strconcat(opstr, "\t$rt"), [], NoItinerary, FrmOther>;
+ !strconcat(opstr, "\t$rt"), [], NoItinerary, FrmOther, opstr>;
// Wait
class WAIT_FT<string opstr> :
- InstSE<(outs), (ins), opstr, [], NoItinerary, FrmOther> {
- let Inst{31-26} = 0x10;
- let Inst{25} = 1;
- let Inst{24-6} = 0;
- let Inst{5-0} = 0x20;
-}
+ InstSE<(outs), (ins), opstr, [], NoItinerary, FrmOther, opstr>;
// Sync
let hasSideEffects = 1 in
-class SYNC_FT :
+class SYNC_FT<string opstr> :
InstSE<(outs), (ins i32imm:$stype), "sync $stype", [(MipsSync imm:$stype)],
- NoItinerary, FrmOther>;
+ NoItinerary, FrmOther, opstr>;
let hasSideEffects = 1 in
class TEQ_FT<string opstr, RegisterOperand RO> :
@@ -690,12 +858,13 @@ class MultDivPseudo<Instruction RealInst, RegisterClass R0, RegisterOperand R1,
// Pseudo multiply add/sub instruction with explicit accumulator register
// operands.
-class MAddSubPseudo<Instruction RealInst, SDPatternOperator OpNode>
+class MAddSubPseudo<Instruction RealInst, SDPatternOperator OpNode,
+ InstrItinClass itin>
: PseudoSE<(outs ACC64:$ac),
(ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64:$acin),
[(set ACC64:$ac,
(OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64:$acin))],
- IIImult>,
+ itin>,
PseudoInstExpansion<(RealInst GPR32Opnd:$rs, GPR32Opnd:$rt)> {
string Constraints = "$acin = $ac";
}
@@ -710,21 +879,22 @@ class Div<string opstr, InstrItinClass itin, RegisterOperand RO,
// Move from Hi/Lo
class PseudoMFLOHI<RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode>
: PseudoSE<(outs DstRC:$rd), (ins SrcRC:$hilo),
- [(set DstRC:$rd, (OpNode SrcRC:$hilo))], IIHiLo>;
+ [(set DstRC:$rd, (OpNode SrcRC:$hilo))], II_MFHI_MFLO>;
class MoveFromLOHI<string opstr, RegisterOperand RO, Register UseReg>:
- InstSE<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR,
- opstr> {
+ InstSE<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"), [], II_MFHI_MFLO,
+ FrmR, opstr> {
let Uses = [UseReg];
let neverHasSideEffects = 1;
}
class PseudoMTLOHI<RegisterClass DstRC, RegisterClass SrcRC>
: PseudoSE<(outs DstRC:$lohi), (ins SrcRC:$lo, SrcRC:$hi),
- [(set DstRC:$lohi, (MipsMTLOHI SrcRC:$lo, SrcRC:$hi))], IIHiLo>;
+ [(set DstRC:$lohi, (MipsMTLOHI SrcRC:$lo, SrcRC:$hi))],
+ II_MTHI_MTLO>;
class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
- InstSE<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo,
+ InstSE<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), [], II_MTHI_MTLO,
FrmR, opstr> {
let Defs = DefRegs;
let neverHasSideEffects = 1;
@@ -732,7 +902,8 @@ class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
class EffectiveAddress<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins mem_ea:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RO:$rt, addr:$addr)], NoItinerary, FrmI> {
+ [(set RO:$rt, addr:$addr)], NoItinerary, FrmI,
+ !strconcat(opstr, "_lea")> {
let isCodeGenOnly = 1;
let DecoderMethod = "DecodeMem";
}
@@ -740,34 +911,29 @@ class EffectiveAddress<string opstr, RegisterOperand RO> :
// Count Leading Ones/Zeros in Word
class CountLeading0<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz RO:$rs))], IIArith, FrmR, opstr>,
- Requires<[HasBitCount, HasStdEnc]>;
+ [(set RO:$rd, (ctlz RO:$rs))], II_CLZ, FrmR, opstr>;
class CountLeading1<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz (not RO:$rs)))], IIArith, FrmR, opstr>,
- Requires<[HasBitCount, HasStdEnc]>;
-
+ [(set RO:$rd, (ctlz (not RO:$rs)))], II_CLO, FrmR, opstr>;
// Sign Extend in Register.
-class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO> :
+class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO,
+ InstrItinClass itin> :
InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"),
- [(set RO:$rd, (sext_inreg RO:$rt, vt))], IIseb, FrmR, opstr> {
- let Predicates = [HasSEInReg, HasStdEnc];
-}
+ [(set RO:$rd, (sext_inreg RO:$rt, vt))], itin, FrmR, opstr>;
// Subword Swap
class SubwordSwap<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"), [],
NoItinerary, FrmR, opstr> {
- let Predicates = [HasSwap, HasStdEnc];
let neverHasSideEffects = 1;
}
// Read Hardware
class ReadHardware<RegisterOperand CPURegOperand, RegisterOperand RO> :
InstSE<(outs CPURegOperand:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [],
- IIArith, FrmR>;
+ II_RDHWR, FrmR>;
// Ext and Ins
class ExtBase<string opstr, RegisterOperand RO, Operand PosOpnd,
@@ -775,17 +941,14 @@ class ExtBase<string opstr, RegisterOperand RO, Operand PosOpnd,
InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ext:$size),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
[(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size))], NoItinerary,
- FrmR, opstr> {
- let Predicates = [HasMips32r2, HasStdEnc];
-}
+ FrmR, opstr>, ISA_MIPS32R2;
class InsBase<string opstr, RegisterOperand RO, Operand PosOpnd,
SDPatternOperator Op = null_frag>:
InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ins:$size, RO:$src),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
[(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size, RO:$src))],
- NoItinerary, FrmR, opstr> {
- let Predicates = [HasMips32r2, HasStdEnc];
+ NoItinerary, FrmR, opstr>, ISA_MIPS32R2 {
let Constraints = "$src = $rt";
}
@@ -876,6 +1039,18 @@ let isPseudo = 1, isCodeGenOnly = 1 in {
def STORE_ACC64 : Store<"", ACC64>;
}
+// We need these two pseudo instructions to avoid offset calculation for long
+// branches. See the comment in file MipsLongBranch.cpp for detailed
+// explanation.
+
+// Expands to: lui $dst, %hi($tgt - $baltgt)
+def LONG_BRANCH_LUi : PseudoSE<(outs GPR32Opnd:$dst),
+ (ins brtarget:$tgt, brtarget:$baltgt), []>;
+
+// Expands to: addiu $dst, $src, %lo($tgt - $baltgt)
+def LONG_BRANCH_ADDiu : PseudoSE<(outs GPR32Opnd:$dst),
+ (ins GPR32Opnd:$src, brtarget:$tgt, brtarget:$baltgt), []>;
+
//===----------------------------------------------------------------------===//
// Instruction definition
//===----------------------------------------------------------------------===//
@@ -884,88 +1059,95 @@ let isPseudo = 1, isCodeGenOnly = 1 in {
//===----------------------------------------------------------------------===//
/// Arithmetic Instructions (ALU Immediate)
-def ADDiu : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd, IIArith, immSExt16,
+def ADDiu : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd, II_ADDIU, immSExt16,
add>,
ADDI_FM<0x9>, IsAsCheapAsAMove;
-def ADDi : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM<0x8>;
+def ADDi : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM<0x8>,
+ ISA_MIPS1_NOT_32R6_64R6;
def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xa>;
def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xb>;
-def ANDi : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd, IILogic, immZExt16,
+def ANDi : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd, II_ANDI, immZExt16,
and>,
ADDI_FM<0xc>;
-def ORi : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd, IILogic, immZExt16,
+def ORi : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd, II_ORI, immZExt16,
or>,
ADDI_FM<0xd>;
-def XORi : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd, IILogic, immZExt16,
+def XORi : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd, II_XORI, immZExt16,
xor>,
ADDI_FM<0xe>;
def LUi : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM;
/// Arithmetic Instructions (3-Operand, R-Type)
-def ADDu : MMRel, ArithLogicR<"addu", GPR32Opnd, 1, IIArith, add>,
+def ADDu : MMRel, ArithLogicR<"addu", GPR32Opnd, 1, II_ADDU, add>,
ADD_FM<0, 0x21>;
-def SUBu : MMRel, ArithLogicR<"subu", GPR32Opnd, 0, IIArith, sub>,
+def SUBu : MMRel, ArithLogicR<"subu", GPR32Opnd, 0, II_SUBU, sub>,
ADD_FM<0, 0x23>;
let Defs = [HI0, LO0] in
-def MUL : MMRel, ArithLogicR<"mul", GPR32Opnd, 1, IIImul, mul>,
- ADD_FM<0x1c, 2>;
+def MUL : MMRel, ArithLogicR<"mul", GPR32Opnd, 1, II_MUL, mul>,
+ ADD_FM<0x1c, 2>, ISA_MIPS32_NOT_32R6_64R6;
def ADD : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM<0, 0x20>;
def SUB : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM<0, 0x22>;
def SLT : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM<0, 0x2a>;
def SLTu : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM<0, 0x2b>;
-def AND : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IILogic, and>,
+def AND : MMRel, ArithLogicR<"and", GPR32Opnd, 1, II_AND, and>,
ADD_FM<0, 0x24>;
-def OR : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IILogic, or>,
+def OR : MMRel, ArithLogicR<"or", GPR32Opnd, 1, II_OR, or>,
ADD_FM<0, 0x25>;
-def XOR : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IILogic, xor>,
+def XOR : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, II_XOR, xor>,
ADD_FM<0, 0x26>;
def NOR : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM<0, 0x27>;
/// Shift Instructions
-def SLL : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, shl, immZExt5>,
- SRA_FM<0, 0>;
-def SRL : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd, srl, immZExt5>,
- SRA_FM<2, 0>;
-def SRA : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd, sra, immZExt5>,
- SRA_FM<3, 0>;
-def SLLV : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, shl>, SRLV_FM<4, 0>;
-def SRLV : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, srl>, SRLV_FM<6, 0>;
-def SRAV : MMRel, shift_rotate_reg<"srav", GPR32Opnd, sra>, SRLV_FM<7, 0>;
+def SLL : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, II_SLL, shl,
+ immZExt5>, SRA_FM<0, 0>;
+def SRL : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd, II_SRL, srl,
+ immZExt5>, SRA_FM<2, 0>;
+def SRA : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd, II_SRA, sra,
+ immZExt5>, SRA_FM<3, 0>;
+def SLLV : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, II_SLLV, shl>,
+ SRLV_FM<4, 0>;
+def SRLV : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, II_SRLV, srl>,
+ SRLV_FM<6, 0>;
+def SRAV : MMRel, shift_rotate_reg<"srav", GPR32Opnd, II_SRAV, sra>,
+ SRLV_FM<7, 0>;
// Rotate Instructions
-let Predicates = [HasMips32r2, HasStdEnc] in {
- def ROTR : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, rotr,
- immZExt5>,
- SRA_FM<2, 1>;
- def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, rotr>,
- SRLV_FM<6, 1>;
-}
+def ROTR : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, II_ROTR, rotr,
+ immZExt5>,
+ SRA_FM<2, 1>, ISA_MIPS32R2;
+def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, II_ROTRV, rotr>,
+ SRLV_FM<6, 1>, ISA_MIPS32R2;
/// Load and Store Instructions
/// aligned
-def LB : Load<"lb", GPR32Opnd, sextloadi8, IILoad>, MMRel, LW_FM<0x20>;
-def LBu : Load<"lbu", GPR32Opnd, zextloadi8, IILoad, addrDefault>, MMRel,
+def LB : Load<"lb", GPR32Opnd, sextloadi8, II_LB>, MMRel, LW_FM<0x20>;
+def LBu : Load<"lbu", GPR32Opnd, zextloadi8, II_LBU, addrDefault>, MMRel,
LW_FM<0x24>;
-def LH : Load<"lh", GPR32Opnd, sextloadi16, IILoad, addrDefault>, MMRel,
+def LH : Load<"lh", GPR32Opnd, sextloadi16, II_LH, addrDefault>, MMRel,
LW_FM<0x21>;
-def LHu : Load<"lhu", GPR32Opnd, zextloadi16, IILoad>, MMRel, LW_FM<0x25>;
-def LW : Load<"lw", GPR32Opnd, load, IILoad, addrDefault>, MMRel,
+def LHu : Load<"lhu", GPR32Opnd, zextloadi16, II_LHU>, MMRel, LW_FM<0x25>;
+def LW : Load<"lw", GPR32Opnd, load, II_LW, addrDefault>, MMRel,
LW_FM<0x23>;
-def SB : Store<"sb", GPR32Opnd, truncstorei8, IIStore>, MMRel, LW_FM<0x28>;
-def SH : Store<"sh", GPR32Opnd, truncstorei16, IIStore>, MMRel, LW_FM<0x29>;
-def SW : Store<"sw", GPR32Opnd, store, IIStore>, MMRel, LW_FM<0x2b>;
+def SB : Store<"sb", GPR32Opnd, truncstorei8, II_SB>, MMRel, LW_FM<0x28>;
+def SH : Store<"sh", GPR32Opnd, truncstorei16, II_SH>, MMRel, LW_FM<0x29>;
+def SW : Store<"sw", GPR32Opnd, store, II_SW>, MMRel, LW_FM<0x2b>;
/// load/store left/right
-let Predicates = [NotInMicroMips] in {
-def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd, IILoad>, LW_FM<0x22>;
-def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd, IILoad>, LW_FM<0x26>;
-def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd, IIStore>, LW_FM<0x2a>;
-def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, IIStore>, LW_FM<0x2e>;
-}
-
-def SYNC : SYNC_FT, SYNC_FM;
+let EncodingPredicates = []<Predicate>, // FIXME: Lack of HasStdEnc is probably a bug
+ AdditionalPredicates = [NotInMicroMips] in {
+def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd, II_LWL>, LW_FM<0x22>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd, II_LWR>, LW_FM<0x26>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd, II_SWL>, LW_FM<0x2a>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, II_SWR>, LW_FM<0x2e>,
+ ISA_MIPS1_NOT_32R6_64R6;
+}
+
+def SYNC : MMRel, SYNC_FT<"sync">, SYNC_FM, ISA_MIPS32;
def TEQ : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>;
def TGE : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM<0x30>;
def TGEU : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM<0x31>;
@@ -973,32 +1155,42 @@ def TLT : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM<0x32>;
def TLTU : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM<0x33>;
def TNE : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM<0x36>;
-def TEQI : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM<0xc>;
-def TGEI : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM<0x8>;
-def TGEIU : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM<0x9>;
-def TLTI : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM<0xa>;
-def TTLTIU : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM<0xb>;
-def TNEI : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM<0xe>;
-
-def BREAK : BRK_FT<"break">, BRK_FM<0xd>;
-def SYSCALL : SYS_FT<"syscall">, SYS_FM<0xc>;
+def TEQI : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM<0xc>,
+ ISA_MIPS2_NOT_32R6_64R6;
+def TGEI : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM<0x8>,
+ ISA_MIPS2_NOT_32R6_64R6;
+def TGEIU : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM<0x9>,
+ ISA_MIPS2_NOT_32R6_64R6;
+def TLTI : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM<0xa>,
+ ISA_MIPS2_NOT_32R6_64R6;
+def TTLTIU : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM<0xb>,
+ ISA_MIPS2_NOT_32R6_64R6;
+def TNEI : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM<0xe>,
+ ISA_MIPS2_NOT_32R6_64R6;
+
+def BREAK : MMRel, BRK_FT<"break">, BRK_FM<0xd>;
+def SYSCALL : MMRel, SYS_FT<"syscall">, SYS_FM<0xc>;
def TRAP : TrapBase<BREAK>;
+def SDBBP : SYS_FT<"sdbbp">, SDBBP_FM, ISA_MIPS32_NOT_32R6_64R6;
-def ERET : ER_FT<"eret">, ER_FM<0x18>;
-def DERET : ER_FT<"deret">, ER_FM<0x1f>;
+def ERET : MMRel, ER_FT<"eret">, ER_FM<0x18>, INSN_MIPS3_32;
+def DERET : MMRel, ER_FT<"deret">, ER_FM<0x1f>, ISA_MIPS32;
-def EI : DEI_FT<"ei", GPR32Opnd>, EI_FM<1>;
-def DI : DEI_FT<"di", GPR32Opnd>, EI_FM<0>;
+def EI : MMRel, DEI_FT<"ei", GPR32Opnd>, EI_FM<1>, ISA_MIPS32R2;
+def DI : MMRel, DEI_FT<"di", GPR32Opnd>, EI_FM<0>, ISA_MIPS32R2;
-def WAIT : WAIT_FT<"wait">;
+let EncodingPredicates = []<Predicate>, // FIXME: Lack of HasStdEnc is probably a bug
+ AdditionalPredicates = [NotInMicroMips] in {
+def WAIT : WAIT_FT<"wait">, WAIT_FM;
/// Load-linked, Store-conditional
-def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>;
-def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>;
+def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>, ISA_MIPS2_NOT_32R6_64R6;
+def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>, ISA_MIPS2_NOT_32R6_64R6;
+}
/// Jump and Branch Instructions
def J : MMRel, JumpFJ<jmptarget, "j", br, bb, "j">, FJ<2>,
- Requires<[RelocStatic, HasStdEnc]>, IsBranch;
+ AdditionalRequires<[RelocStatic]>, IsBranch;
def JR : MMRel, IndirectBranch<"jr", GPR32Opnd>, MTLO_FM<8>;
def BEQ : MMRel, CBranch<"beq", brtarget, seteq, GPR32Opnd>, BEQ_FM<4>;
def BNE : MMRel, CBranch<"bne", brtarget, setne, GPR32Opnd>, BEQ_FM<5>;
@@ -1013,17 +1205,50 @@ def BLTZ : MMRel, CBranchZero<"bltz", brtarget, setlt, GPR32Opnd>,
def B : UncondBranch<BEQ>;
def JAL : MMRel, JumpLink<"jal", calltarget>, FJ<3>;
-def JALR : MMRel, JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM;
-def JALRPseudo : JumpLinkRegPseudo<GPR32Opnd, JALR, RA>;
-def BGEZAL : MMRel, BGEZAL_FT<"bgezal", brtarget, GPR32Opnd>, BGEZAL_FM<0x11>;
-def BLTZAL : MMRel, BGEZAL_FT<"bltzal", brtarget, GPR32Opnd>, BGEZAL_FM<0x10>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def JALR : JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM;
+ def JALRPseudo : JumpLinkRegPseudo<GPR32Opnd, JALR, RA>;
+}
+
+// FIXME: JALX really requires either MIPS16 or microMIPS in addition to MIPS32.
+def JALX : JumpLink<"jalx", calltarget>, FJ<0x1D>, ISA_MIPS32_NOT_32R6_64R6;
+def BGEZAL : MMRel, BGEZAL_FT<"bgezal", brtarget, GPR32Opnd>, BGEZAL_FM<0x11>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def BLTZAL : MMRel, BGEZAL_FT<"bltzal", brtarget, GPR32Opnd>, BGEZAL_FM<0x10>,
+ ISA_MIPS1_NOT_32R6_64R6;
def BAL_BR : BAL_BR_Pseudo<BGEZAL>;
-def TAILCALL : MMRel, JumpFJ<calltarget, "j", MipsTailCall, imm, "tcall">,
- FJ<2>, IsTailCall;
-def TAILCALL_R : MMRel, JumpFR<"tcallr", GPR32Opnd, MipsTailCall>, MTLO_FM<8>,
- IsTailCall;
+def TAILCALL : TailCall<J>;
+def TAILCALL_R : TailCallReg<GPR32Opnd, JR>;
+
+// Indirect branches are matched as PseudoIndirectBranch/PseudoIndirectBranch64
+// then are expanded to JR, JR64, JALR, or JALR64 depending on the ISA.
+class PseudoIndirectBranchBase<RegisterOperand RO> :
+ MipsPseudo<(outs), (ins RO:$rs), [(brind RO:$rs)], IIBranch> {
+ let isTerminator=1;
+ let isBarrier=1;
+ let hasDelaySlot = 1;
+ let isBranch = 1;
+ let isIndirectBranch = 1;
+}
+
+def PseudoIndirectBranch : PseudoIndirectBranchBase<GPR32Opnd>;
+
+// Return instructions are matched as a RetRA instruction, then ar expanded
+// into PseudoReturn/PseudoReturn64 after register allocation. Finally,
+// MipsAsmPrinter expands this into JR, JR64, JALR, or JALR64 depending on the
+// ISA.
+class PseudoReturnBase<RegisterOperand RO> : MipsPseudo<(outs), (ins RO:$rs),
+ [], IIBranch> {
+ let isTerminator = 1;
+ let isBarrier = 1;
+ let hasDelaySlot = 1;
+ let isReturn = 1;
+ let isCodeGenOnly = 1;
+ let hasCtrlDep = 1;
+ let hasExtraSrcRegAllocReq = 1;
+}
-def RET : MMRel, RetBase<"ret", GPR32Opnd>, MTLO_FM<8>;
+def PseudoReturn : PseudoReturnBase<GPR32Opnd>;
// Exception handling related node and instructions.
// The conversion sequence is:
@@ -1047,30 +1272,41 @@ let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1 in {
}
/// Multiply and Divide Instructions.
-def MULT : MMRel, Mult<"mult", IIImult, GPR32Opnd, [HI0, LO0]>,
- MULT_FM<0, 0x18>;
-def MULTu : MMRel, Mult<"multu", IIImult, GPR32Opnd, [HI0, LO0]>,
- MULT_FM<0, 0x19>;
-def SDIV : MMRel, Div<"div", IIIdiv, GPR32Opnd, [HI0, LO0]>,
- MULT_FM<0, 0x1a>;
-def UDIV : MMRel, Div<"divu", IIIdiv, GPR32Opnd, [HI0, LO0]>,
- MULT_FM<0, 0x1b>;
-
-def MTHI : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>, MTLO_FM<0x11>;
-def MTLO : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>, MTLO_FM<0x13>;
-def MFHI : MMRel, MoveFromLOHI<"mfhi", GPR32Opnd, AC0>, MFLO_FM<0x10>;
-def MFLO : MMRel, MoveFromLOHI<"mflo", GPR32Opnd, AC0>, MFLO_FM<0x12>;
+def MULT : MMRel, Mult<"mult", II_MULT, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x18>, ISA_MIPS1_NOT_32R6_64R6;
+def MULTu : MMRel, Mult<"multu", II_MULTU, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x19>, ISA_MIPS1_NOT_32R6_64R6;
+def SDIV : MMRel, Div<"div", II_DIV, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1a>, ISA_MIPS1_NOT_32R6_64R6;
+def UDIV : MMRel, Div<"divu", II_DIVU, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1b>, ISA_MIPS1_NOT_32R6_64R6;
+
+def MTHI : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>, MTLO_FM<0x11>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def MTLO : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>, MTLO_FM<0x13>,
+ ISA_MIPS1_NOT_32R6_64R6;
+let EncodingPredicates = []<Predicate>, // FIXME: Lack of HasStdEnc is probably a bug
+ AdditionalPredicates = [NotInMicroMips] in {
+def MFHI : MMRel, MoveFromLOHI<"mfhi", GPR32Opnd, AC0>, MFLO_FM<0x10>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def MFLO : MMRel, MoveFromLOHI<"mflo", GPR32Opnd, AC0>, MFLO_FM<0x12>,
+ ISA_MIPS1_NOT_32R6_64R6;
+}
/// Sign Ext In Register Instructions.
-def SEB : MMRel, SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM<0x10, 0x20>;
-def SEH : MMRel, SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM<0x18, 0x20>;
+def SEB : MMRel, SignExtInReg<"seb", i8, GPR32Opnd, II_SEB>,
+ SEB_FM<0x10, 0x20>, ISA_MIPS32R2;
+def SEH : MMRel, SignExtInReg<"seh", i16, GPR32Opnd, II_SEH>,
+ SEB_FM<0x18, 0x20>, ISA_MIPS32R2;
/// Count Leading
-def CLZ : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM<0x20>;
-def CLO : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM<0x21>;
+def CLZ : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM<0x20>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def CLO : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM<0x21>,
+ ISA_MIPS32_NOT_32R6_64R6;
/// Word Swap Bytes Within Halfwords
-def WSBH : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM<2, 0x20>;
+def WSBH : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM<2, 0x20>, ISA_MIPS32R2;
/// No operation.
def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
@@ -1079,30 +1315,40 @@ def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu", GPR32Opnd>, LW_FM<9>;
+def LEA_ADDiu : MMRel, EffectiveAddress<"addiu", GPR32Opnd>, LW_FM<9>;
// MADD*/MSUB*
-def MADD : MMRel, MArithR<"madd", 1>, MULT_FM<0x1c, 0>;
-def MADDU : MMRel, MArithR<"maddu", 1>, MULT_FM<0x1c, 1>;
-def MSUB : MMRel, MArithR<"msub">, MULT_FM<0x1c, 4>;
-def MSUBU : MMRel, MArithR<"msubu">, MULT_FM<0x1c, 5>;
-
-let Predicates = [HasStdEnc, NotDSP] in {
-def PseudoMULT : MultDivPseudo<MULT, ACC64, GPR32Opnd, MipsMult, IIImult>;
-def PseudoMULTu : MultDivPseudo<MULTu, ACC64, GPR32Opnd, MipsMultu, IIImult>;
-def PseudoMFHI : PseudoMFLOHI<GPR32, ACC64, MipsMFHI>;
-def PseudoMFLO : PseudoMFLOHI<GPR32, ACC64, MipsMFLO>;
-def PseudoMTLOHI : PseudoMTLOHI<ACC64, GPR32>;
-def PseudoMADD : MAddSubPseudo<MADD, MipsMAdd>;
-def PseudoMADDU : MAddSubPseudo<MADDU, MipsMAddu>;
-def PseudoMSUB : MAddSubPseudo<MSUB, MipsMSub>;
-def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu>;
-}
-
-def PseudoSDIV : MultDivPseudo<SDIV, ACC64, GPR32Opnd, MipsDivRem, IIIdiv,
- 0, 1, 1>;
-def PseudoUDIV : MultDivPseudo<UDIV, ACC64, GPR32Opnd, MipsDivRemU, IIIdiv,
- 0, 1, 1>;
+def MADD : MMRel, MArithR<"madd", II_MADD, 1>, MULT_FM<0x1c, 0>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def MADDU : MMRel, MArithR<"maddu", II_MADDU, 1>, MULT_FM<0x1c, 1>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def MSUB : MMRel, MArithR<"msub", II_MSUB>, MULT_FM<0x1c, 4>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def MSUBU : MMRel, MArithR<"msubu", II_MSUBU>, MULT_FM<0x1c, 5>,
+ ISA_MIPS32_NOT_32R6_64R6;
+
+let AdditionalPredicates = [NotDSP] in {
+def PseudoMULT : MultDivPseudo<MULT, ACC64, GPR32Opnd, MipsMult, II_MULT>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def PseudoMULTu : MultDivPseudo<MULTu, ACC64, GPR32Opnd, MipsMultu, II_MULTU>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def PseudoMFHI : PseudoMFLOHI<GPR32, ACC64, MipsMFHI>, ISA_MIPS1_NOT_32R6_64R6;
+def PseudoMFLO : PseudoMFLOHI<GPR32, ACC64, MipsMFLO>, ISA_MIPS1_NOT_32R6_64R6;
+def PseudoMTLOHI : PseudoMTLOHI<ACC64, GPR32>, ISA_MIPS1_NOT_32R6_64R6;
+def PseudoMADD : MAddSubPseudo<MADD, MipsMAdd, II_MADD>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def PseudoMADDU : MAddSubPseudo<MADDU, MipsMAddu, II_MADDU>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def PseudoMSUB : MAddSubPseudo<MSUB, MipsMSub, II_MSUB>,
+ ISA_MIPS32_NOT_32R6_64R6;
+def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu, II_MSUBU>,
+ ISA_MIPS32_NOT_32R6_64R6;
+}
+
+def PseudoSDIV : MultDivPseudo<SDIV, ACC64, GPR32Opnd, MipsDivRem, II_DIV,
+ 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
+def PseudoUDIV : MultDivPseudo<UDIV, ACC64, GPR32Opnd, MipsDivRemU, II_DIVU,
+ 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
def RDHWR : ReadHardware<GPR32Opnd, HWRegsOpnd>, RDHWR_FM;
@@ -1110,68 +1356,161 @@ def EXT : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, MipsExt>, EXT_FM<0>;
def INS : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>, EXT_FM<4>;
/// Move Control Registers From/To CPU Registers
-def MFC0 : MFC3OP<"mfc0", GPR32Opnd>, MFC3OP_FM<0x10, 0>;
-def MTC0 : MFC3OP<"mtc0", GPR32Opnd>, MFC3OP_FM<0x10, 4>;
+def MFC0 : MFC3OP<"mfc0", GPR32Opnd>, MFC3OP_FM<0x10, 0>, ISA_MIPS32;
+def MTC0 : MFC3OP<"mtc0", GPR32Opnd>, MFC3OP_FM<0x10, 4>, ISA_MIPS32;
def MFC2 : MFC3OP<"mfc2", GPR32Opnd>, MFC3OP_FM<0x12, 0>;
def MTC2 : MFC3OP<"mtc2", GPR32Opnd>, MFC3OP_FM<0x12, 4>;
+class Barrier<string asmstr> : InstSE<(outs), (ins), asmstr, [], NoItinerary,
+ FrmOther>;
+def SSNOP : Barrier<"ssnop">, BARRIER_FM<1>;
+def EHB : Barrier<"ehb">, BARRIER_FM<3>;
+def PAUSE : Barrier<"pause">, BARRIER_FM<5>, ISA_MIPS32R2;
+
+// JR_HB and JALR_HB are defined here using the new style naming
+// scheme because some of this code is shared with Mips32r6InstrInfo.td
+// and because of that it doesn't follow the naming convention of the
+// rest of the file. To avoid a mixture of old vs new style, the new
+// style was chosen.
+class JR_HB_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins GPROpnd:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rs");
+ list<dag> Pattern = [];
+}
+
+class JALR_HB_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs");
+ list<dag> Pattern = [];
+}
+
+class JR_HB_DESC : InstSE<(outs), (ins), "", [], NoItinerary, FrmJ>,
+ JR_HB_DESC_BASE<"jr.hb", GPR32Opnd> {
+ let isBranch=1;
+ let isIndirectBranch=1;
+ let hasDelaySlot=1;
+ let isTerminator=1;
+ let isBarrier=1;
+}
+
+class JALR_HB_DESC : InstSE<(outs), (ins), "", [], NoItinerary, FrmJ>,
+ JALR_HB_DESC_BASE<"jalr.hb", GPR32Opnd> {
+ let isIndirectBranch=1;
+ let hasDelaySlot=1;
+}
+
+class JR_HB_ENC : JR_HB_FM<8>;
+class JALR_HB_ENC : JALR_HB_FM<9>;
+
+def JR_HB : JR_HB_DESC, JR_HB_ENC, ISA_MIPS32_NOT_32R6_64R6;
+def JALR_HB : JALR_HB_DESC, JALR_HB_ENC, ISA_MIPS32;
+
+class TLB<string asmstr> : InstSE<(outs), (ins), asmstr, [], NoItinerary,
+ FrmOther>;
+def TLBP : TLB<"tlbp">, COP0_TLB_FM<0x08>;
+def TLBR : TLB<"tlbr">, COP0_TLB_FM<0x01>;
+def TLBWI : TLB<"tlbwi">, COP0_TLB_FM<0x02>;
+def TLBWR : TLB<"tlbwr">, COP0_TLB_FM<0x06>;
+
+class CacheOp<string instr_asm, Operand MemOpnd, RegisterOperand GPROpnd> :
+ InstSE<(outs), (ins MemOpnd:$addr, uimm5:$hint),
+ !strconcat(instr_asm, "\t$hint, $addr"), [], NoItinerary, FrmOther>;
+
+def CACHE : CacheOp<"cache", mem, GPR32Opnd>, CACHEOP_FM<0b101111>,
+ INSN_MIPS3_32_NOT_32R6_64R6;
+def PREF : CacheOp<"pref", mem, GPR32Opnd>, CACHEOP_FM<0b110011>,
+ INSN_MIPS3_32_NOT_32R6_64R6;
+
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
-def : InstAlias<"move $dst, $src",
- (ADDu GPR32Opnd:$dst, GPR32Opnd:$src,ZERO), 1>,
- Requires<[NotMips64]>;
-def : InstAlias<"bal $offset", (BGEZAL ZERO, brtarget:$offset), 0>;
-def : InstAlias<"addu $rs, $rt, $imm",
- (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : InstAlias<"add $rs, $rt, $imm",
- (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : InstAlias<"and $rs, $rt, $imm",
- (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : InstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>;
-def : InstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>;
-def : InstAlias<"jal $rs", (JALR RA, GPR32Opnd:$rs), 0>;
-def : InstAlias<"jal $rd,$rs", (JALR GPR32Opnd:$rd, GPR32Opnd:$rs), 0>;
-def : InstAlias<"not $rt, $rs",
- (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
-def : InstAlias<"neg $rt, $rs",
- (SUB GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
-def : InstAlias<"negu $rt, $rs",
- (SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
-def : InstAlias<"slt $rs, $rt, $imm",
- (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : InstAlias<"xor $rs, $rt, $imm",
- (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
-def : InstAlias<"or $rs, $rt, $imm",
- (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
-def : InstAlias<"nop", (SLL ZERO, ZERO, 0), 1>;
-def : InstAlias<"mfc0 $rt, $rd", (MFC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
-def : InstAlias<"mtc0 $rt, $rd", (MTC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
-def : InstAlias<"mfc2 $rt, $rd", (MFC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
-def : InstAlias<"mtc2 $rt, $rd", (MTC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
-def : InstAlias<"b $offset", (BEQ ZERO, ZERO, brtarget:$offset), 0>;
-def : InstAlias<"bnez $rs,$offset",
- (BNE GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
-def : InstAlias<"beqz $rs,$offset",
- (BEQ GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
-def : InstAlias<"syscall", (SYSCALL 0), 1>;
-
-def : InstAlias<"break $imm", (BREAK uimm10:$imm, 0), 1>;
-def : InstAlias<"break", (BREAK 0, 0), 1>;
-def : InstAlias<"ei", (EI ZERO), 1>;
-def : InstAlias<"di", (DI ZERO), 1>;
-
-def : InstAlias<"teq $rs, $rt", (TEQ GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : InstAlias<"tge $rs, $rt", (TGE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : InstAlias<"tgeu $rs, $rt", (TGEU GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : InstAlias<"tlt $rs, $rt", (TLT GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : InstAlias<"tltu $rs, $rt", (TLTU GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : InstAlias<"tne $rs, $rt", (TNE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : InstAlias<"sub, $rd, $rs, $imm",
- (ADDi GPR32Opnd:$rd, GPR32Opnd:$rs, InvertedImOperand:$imm)>;
-def : InstAlias<"subu, $rd, $rs, $imm",
- (ADDiu GPR32Opnd:$rd, GPR32Opnd:$rs, InvertedImOperand:$imm)>;
-
+def : MipsInstAlias<"move $dst, $src",
+ (ADDu GPR32Opnd:$dst, GPR32Opnd:$src,ZERO), 1>,
+ GPR_32 {
+ let AdditionalPredicates = [NotInMicroMips];
+}
+def : MipsInstAlias<"bal $offset", (BGEZAL ZERO, brtarget:$offset), 0>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def : MipsInstAlias<"addu $rs, $rt, $imm",
+ (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+def : MipsInstAlias<"add $rs, $rt, $imm",
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+def : MipsInstAlias<"and $rs, $rt, $imm",
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+def : MipsInstAlias<"and $rs, $imm",
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm), 0>;
+def : MipsInstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>;
+let Predicates = [NotInMicroMips] in {
+def : MipsInstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>;
+}
+def : MipsInstAlias<"jal $rs", (JALR RA, GPR32Opnd:$rs), 0>;
+def : MipsInstAlias<"jal $rd,$rs", (JALR GPR32Opnd:$rd, GPR32Opnd:$rs), 0>;
+def : MipsInstAlias<"jalr.hb $rs", (JALR_HB RA, GPR32Opnd:$rs), 1>, ISA_MIPS32;
+def : MipsInstAlias<"not $rt, $rs",
+ (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
+def : MipsInstAlias<"neg $rt, $rs",
+ (SUB GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
+def : MipsInstAlias<"negu $rt",
+ (SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rt), 0>;
+def : MipsInstAlias<"negu $rt, $rs",
+ (SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
+def : MipsInstAlias<"slt $rs, $rt, $imm",
+ (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+def : MipsInstAlias<"sltu $rt, $rs, $imm",
+ (SLTiu GPR32Opnd:$rt, GPR32Opnd:$rs, simm16:$imm), 0>;
+def : MipsInstAlias<"xor $rs, $rt, $imm",
+ (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
+def : MipsInstAlias<"or $rs, $rt, $imm",
+ (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
+def : MipsInstAlias<"or $rs, $imm",
+ (ORi GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>;
+def : MipsInstAlias<"nop", (SLL ZERO, ZERO, 0), 1>;
+def : MipsInstAlias<"mfc0 $rt, $rd", (MFC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"mtc0 $rt, $rd", (MTC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"mfc2 $rt, $rd", (MFC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"mtc2 $rt, $rd", (MTC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : MipsInstAlias<"b $offset", (BEQ ZERO, ZERO, brtarget:$offset), 0>;
+def : MipsInstAlias<"bnez $rs,$offset",
+ (BNE GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
+def : MipsInstAlias<"beqz $rs,$offset",
+ (BEQ GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
+def : MipsInstAlias<"syscall", (SYSCALL 0), 1>;
+
+def : MipsInstAlias<"break", (BREAK 0, 0), 1>;
+def : MipsInstAlias<"break $imm", (BREAK uimm10:$imm, 0), 1>;
+def : MipsInstAlias<"ei", (EI ZERO), 1>;
+def : MipsInstAlias<"di", (DI ZERO), 1>;
+
+def : MipsInstAlias<"teq $rs, $rt", (TEQ GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : MipsInstAlias<"tge $rs, $rt", (TGE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : MipsInstAlias<"tgeu $rs, $rt", (TGEU GPR32Opnd:$rs, GPR32Opnd:$rt, 0),
+ 1>;
+def : MipsInstAlias<"tlt $rs, $rt", (TLT GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : MipsInstAlias<"tltu $rs, $rt", (TLTU GPR32Opnd:$rs, GPR32Opnd:$rt, 0),
+ 1>;
+def : MipsInstAlias<"tne $rs, $rt", (TNE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : MipsInstAlias<"sll $rd, $rt, $rs",
+ (SLLV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+def : MipsInstAlias<"sub, $rd, $rs, $imm",
+ (ADDi GPR32Opnd:$rd, GPR32Opnd:$rs,
+ InvertedImOperand:$imm), 0>;
+def : MipsInstAlias<"sub $rs, $imm",
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rs, InvertedImOperand:$imm),
+ 0>;
+def : MipsInstAlias<"subu, $rd, $rs, $imm",
+ (ADDiu GPR32Opnd:$rd, GPR32Opnd:$rs,
+ InvertedImOperand:$imm), 0>;
+def : MipsInstAlias<"subu $rs, $imm", (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rs,
+ InvertedImOperand:$imm), 0>;
+def : MipsInstAlias<"sra $rd, $rt, $rs",
+ (SRAV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+def : MipsInstAlias<"srl $rd, $rt, $rs",
+ (SRLV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+def : MipsInstAlias<"sdbbp", (SDBBP 0)>, ISA_MIPS32_NOT_32R6_64R6;
+def : MipsInstAlias<"sync",
+ (SYNC 0), 1>, ISA_MIPS2;
//===----------------------------------------------------------------------===//
// Assembler Pseudo Instructions
//===----------------------------------------------------------------------===//
@@ -1217,13 +1556,17 @@ def : MipsPat<(i32 imm:$imm),
// Carry MipsPatterns
def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
(SUBu GPR32:$lhs, GPR32:$rhs)>;
-let Predicates = [HasStdEnc, NotDSP] in {
+let AdditionalPredicates = [NotDSP] in {
def : MipsPat<(addc GPR32:$lhs, GPR32:$rhs),
(ADDu GPR32:$lhs, GPR32:$rhs)>;
def : MipsPat<(addc GPR32:$src, immSExt16:$imm),
(ADDiu GPR32:$src, imm:$imm)>;
}
+// SYNC
+def : MipsPat<(MipsSync (i32 immz)),
+ (SYNC 0)>, ISA_MIPS2;
+
// Call
def : MipsPat<(MipsJmpLink (i32 tglobaladdr:$dst)),
(JAL tglobaladdr:$dst)>;
@@ -1286,14 +1629,11 @@ def : MipsPat<(not GPR32:$in),
(NOR GPR32Opnd:$in, ZERO)>;
// extended loads
-let Predicates = [HasStdEnc] in {
- def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
- def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
- def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
-}
+def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
+def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
+def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
// peepholes
-let Predicates = [HasStdEnc] in
def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
// brcond patterns
@@ -1387,11 +1727,9 @@ def : MipsPat<(bswap GPR32:$rt), (ROTR (WSBH GPR32:$rt), 16)>;
// Load halfword/word patterns.
let AddedComplexity = 40 in {
- let Predicates = [HasStdEnc] in {
- def : LoadRegImmPat<LBu, i32, zextloadi8>;
- def : LoadRegImmPat<LH, i32, sextloadi16>;
- def : LoadRegImmPat<LW, i32, load>;
- }
+ def : LoadRegImmPat<LBu, i32, zextloadi8>;
+ def : LoadRegImmPat<LH, i32, sextloadi16>;
+ def : LoadRegImmPat<LW, i32, load>;
}
//===----------------------------------------------------------------------===//
@@ -1402,6 +1740,9 @@ include "MipsInstrFPU.td"
include "Mips64InstrInfo.td"
include "MipsCondMov.td"
+include "Mips32r6InstrInfo.td"
+include "Mips64r6InstrInfo.td"
+
//
// Mips16
@@ -1419,3 +1760,4 @@ include "MipsMSAInstrInfo.td"
// Micromips
include "MicroMipsInstrFormats.td"
include "MicroMipsInstrInfo.td"
+include "MicroMipsInstrFPU.td"
diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
index d76cb1d..2072488 100644
--- a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "MipsJITInfo.h"
#include "MipsInstrInfo.h"
#include "MipsRelocations.h"
@@ -25,6 +24,8 @@
#include <cstdlib>
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
void MipsJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
unsigned NewAddr = (intptr_t)New;
diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
index ecda310..c9dfd83 100644
--- a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
@@ -37,26 +37,26 @@ class MipsJITInfo : public TargetJITInfo {
/// overwriting OLD with a branch to NEW. This is used for self-modifying
/// code.
///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
+ void replaceMachineCodeForFunction(void *Old, void *New) override;
// getStubLayout - Returns the size and alignment of the largest call stub
// on Mips.
- virtual StubLayout getStubLayout();
+ StubLayout getStubLayout() override;
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
/// small native function that simply calls the function at the specified
/// address.
- virtual void *emitFunctionStub(const Function *F, void *Fn,
- JITCodeEmitter &JCE);
+ void *emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE) override;
/// getLazyResolverFunction - Expose the lazy resolver to the JIT.
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
+ LazyResolverFn getLazyResolverFunction(JITCompilerFn) override;
/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char *GOTBase);
+ void relocate(void *Function, MachineRelocation *MR,
+ unsigned NumRelocs, unsigned char *GOTBase) override;
/// Initialize - Initialize internal stage for the function being JITted.
void Initialize(const MachineFunction &MF, bool isPIC,
diff --git a/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp b/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
index 2efe578..27110b6 100644
--- a/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsLongBranch.cpp
@@ -10,16 +10,12 @@
// This pass expands a branch or jump instruction into a long branch if its
// offset is too large to fit into its immediate field.
//
-// FIXME:
-// 1. Fix pc-region jump instructions which cross 256MB segment boundaries.
-// 2. If program has inline assembly statements whose size cannot be
-// determined accurately, load branch target addresses from the GOT.
+// FIXME: Fix pc-region jump instructions which cross 256MB segment boundaries.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-long-branch"
-
#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsMCNaCl.h"
#include "MipsTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -33,6 +29,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mips-long-branch"
+
STATISTIC(LongBranches, "Number of long branches.");
static cl::opt<bool> SkipLongBranch(
@@ -56,7 +54,7 @@ namespace {
bool HasLongBranch;
MachineInstr *Br;
- MBBInfo() : Size(0), HasLongBranch(false), Br(0) {}
+ MBBInfo() : Size(0), HasLongBranch(false), Br(nullptr) {}
};
class MipsLongBranch : public MachineFunctionPass {
@@ -67,13 +65,14 @@ namespace {
: MachineFunctionPass(ID), TM(tm),
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
- LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 13 : 9)) {}
+ LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 10 :
+ (!TM.getSubtarget<MipsSubtarget>().isTargetNaCl() ? 9 : 10))) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Mips Long Branch";
}
- bool runOnMachineFunction(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F) override;
private:
void splitMBB(MachineBasicBlock *MBB);
@@ -111,7 +110,7 @@ static MachineBasicBlock *getTargetMBB(const MachineInstr &Br) {
}
assert(false && "This instruction does not have an MBB operand.");
- return 0;
+ return nullptr;
}
// Traverse the list of instructions backwards until a non-debug instruction is
@@ -134,7 +133,7 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) {
(!LastBr->isConditionalBranch() && !LastBr->isUnconditionalBranch()))
return;
- ReverseIter FirstBr = getNonDebugInstr(llvm::next(LastBr), End);
+ ReverseIter FirstBr = getNonDebugInstr(std::next(LastBr), End);
// MBB has only one branch instruction if FirstBr is not a branch
// instruction.
@@ -154,7 +153,7 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) {
NewMBB->removeSuccessor(Tgt);
MBB->addSuccessor(NewMBB);
MBB->addSuccessor(Tgt);
- MF->insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
+ MF->insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
NewMBB->splice(NewMBB->end(), MBB, (++LastBr).base(), MBB->end());
}
@@ -267,20 +266,21 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
LongBrMBB->addSuccessor(BalTgtMBB);
BalTgtMBB->addSuccessor(TgtMBB);
- int64_t TgtAddress = MBBInfos[TgtMBB->getNumber()].Address;
- unsigned BalTgtMBBSize = 5;
- int64_t Offset = TgtAddress - (I.Address + I.Size - BalTgtMBBSize * 4);
- int64_t Lo = SignExtend64<16>(Offset & 0xffff);
- int64_t Hi = SignExtend64<16>(((Offset + 0x8000) >> 16) & 0xffff);
+ // We must select between the MIPS32r6/MIPS64r6 BAL (which is a normal
+ // instruction) and the pre-MIPS32r6/MIPS64r6 definition (which is an
+ // pseudo-instruction wrapping BGEZAL).
+
+ const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
+ unsigned BalOp = Subtarget.hasMips32r6() ? Mips::BAL : Mips::BAL_BR;
if (ABI != MipsSubtarget::N64) {
// $longbr:
// addiu $sp, $sp, -8
// sw $ra, 0($sp)
- // bal $baltgt
// lui $at, %hi($tgt - $baltgt)
- // $baltgt:
+ // bal $baltgt
// addiu $at, $at, %lo($tgt - $baltgt)
+ // $baltgt:
// addu $at, $ra, $at
// lw $ra, 0($sp)
// jr $at
@@ -295,35 +295,64 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SW)).addReg(Mips::RA)
.addReg(Mips::SP).addImm(0);
+ // LUi and ADDiu instructions create 32-bit offset of the target basic
+ // block from the target of BAL instruction. We cannot use immediate
+ // value for this offset because it cannot be determined accurately when
+ // the program has inline assembly statements. We therefore use the
+ // relocation expressions %hi($tgt-$baltgt) and %lo($tgt-$baltgt) which
+ // are resolved during the fixup, so the values will always be correct.
+ //
+ // Since we cannot create %hi($tgt-$baltgt) and %lo($tgt-$baltgt)
+ // expressions at this point (it is possible only at the MC layer),
+ // we replace LUi and ADDiu with pseudo instructions
+ // LONG_BRANCH_LUi and LONG_BRANCH_ADDiu, and add both basic
+ // blocks as operands to these instructions. When lowering these pseudo
+ // instructions to LUi and ADDiu in the MC layer, we will create
+ // %hi($tgt-$baltgt) and %lo($tgt-$baltgt) expressions and add them as
+ // operands to lowered instructions.
+
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_LUi), Mips::AT)
+ .addMBB(TgtMBB).addMBB(BalTgtMBB);
MIBundleBuilder(*LongBrMBB, Pos)
- .append(BuildMI(*MF, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB))
- .append(BuildMI(*MF, DL, TII->get(Mips::LUi), Mips::AT).addImm(Hi));
+ .append(BuildMI(*MF, DL, TII->get(BalOp)).addMBB(BalTgtMBB))
+ .append(BuildMI(*MF, DL, TII->get(Mips::LONG_BRANCH_ADDiu), Mips::AT)
+ .addReg(Mips::AT)
+ .addMBB(TgtMBB)
+ .addMBB(BalTgtMBB));
Pos = BalTgtMBB->begin();
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::AT)
- .addReg(Mips::AT).addImm(Lo);
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDu), Mips::AT)
.addReg(Mips::RA).addReg(Mips::AT);
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LW), Mips::RA)
.addReg(Mips::SP).addImm(0);
- MIBundleBuilder(*BalTgtMBB, Pos)
- .append(BuildMI(*MF, DL, TII->get(Mips::JR)).addReg(Mips::AT))
- .append(BuildMI(*MF, DL, TII->get(Mips::ADDiu), Mips::SP)
- .addReg(Mips::SP).addImm(8));
+ if (!TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
+ MIBundleBuilder(*BalTgtMBB, Pos)
+ .append(BuildMI(*MF, DL, TII->get(Mips::JR)).addReg(Mips::AT))
+ .append(BuildMI(*MF, DL, TII->get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(8));
+ } else {
+ // In NaCl, modifying the sp is not allowed in branch delay slot.
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(8);
+
+ MIBundleBuilder(*BalTgtMBB, Pos)
+ .append(BuildMI(*MF, DL, TII->get(Mips::JR)).addReg(Mips::AT))
+ .append(BuildMI(*MF, DL, TII->get(Mips::NOP)));
+
+ // Bundle-align the target of indirect branch JR.
+ TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ }
} else {
// $longbr:
// daddiu $sp, $sp, -16
// sd $ra, 0($sp)
- // lui64 $at, %highest($tgt - $baltgt)
- // daddiu $at, $at, %higher($tgt - $baltgt)
+ // daddiu $at, $zero, %hi($tgt - $baltgt)
// dsll $at, $at, 16
- // daddiu $at, $at, %hi($tgt - $baltgt)
// bal $baltgt
- // dsll $at, $at, 16
- // $baltgt:
// daddiu $at, $at, %lo($tgt - $baltgt)
+ // $baltgt:
// daddu $at, $ra, $at
// ld $ra, 0($sp)
// jr64 $at
@@ -331,9 +360,20 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
// $fallthrough:
//
- int64_t Higher = SignExtend64<16>(((Offset + 0x80008000) >> 32) & 0xffff);
- int64_t Highest =
- SignExtend64<16>(((Offset + 0x800080008000LL) >> 48) & 0xffff);
+ // We assume the branch is within-function, and that offset is within
+ // +/- 2GB. High 32 bits will therefore always be zero.
+
+ // Note that this will work even if the offset is negative, because
+ // of the +1 modification that's added in that case. For example, if the
+ // offset is -1MB (0xFFFFFFFFFFF00000), the computation for %higher is
+ //
+ // 0xFFFFFFFFFFF00000 + 0x80008000 = 0x000000007FF08000
+ //
+ // and the bits [47:32] are zero. For %highest
+ //
+ // 0xFFFFFFFFFFF00000 + 0x800080008000 = 0x000080007FF08000
+ //
+ // and the bits [63:48] are zero.
Pos = LongBrMBB->begin();
@@ -341,24 +381,22 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
.addReg(Mips::SP_64).addImm(-16);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SD)).addReg(Mips::RA_64)
.addReg(Mips::SP_64).addImm(0);
- BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LUi64), Mips::AT_64)
- .addImm(Highest);
- BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64)
- .addReg(Mips::AT_64).addImm(Higher);
+ BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_DADDiu),
+ Mips::AT_64).addReg(Mips::ZERO_64)
+ .addMBB(TgtMBB, MipsII::MO_ABS_HI).addMBB(BalTgtMBB);
BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64)
.addReg(Mips::AT_64).addImm(16);
- BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64)
- .addReg(Mips::AT_64).addImm(Hi);
MIBundleBuilder(*LongBrMBB, Pos)
- .append(BuildMI(*MF, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB))
- .append(BuildMI(*MF, DL, TII->get(Mips::DSLL), Mips::AT_64)
- .addReg(Mips::AT_64).addImm(16));
+ .append(BuildMI(*MF, DL, TII->get(BalOp)).addMBB(BalTgtMBB))
+ .append(
+ BuildMI(*MF, DL, TII->get(Mips::LONG_BRANCH_DADDiu), Mips::AT_64)
+ .addReg(Mips::AT_64)
+ .addMBB(TgtMBB, MipsII::MO_ABS_LO)
+ .addMBB(BalTgtMBB));
Pos = BalTgtMBB->begin();
- BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64)
- .addReg(Mips::AT_64).addImm(Lo);
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDu), Mips::AT_64)
.addReg(Mips::RA_64).addReg(Mips::AT_64);
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LD), Mips::RA_64)
@@ -370,8 +408,7 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
.addReg(Mips::SP_64).addImm(16));
}
- assert(BalTgtMBBSize == BalTgtMBB->size());
- assert(LongBrMBB->size() + BalTgtMBBSize == LongBranchSeqSize);
+ assert(LongBrMBB->size() + BalTgtMBB->size() == LongBranchSeqSize);
} else {
// $longbr:
// j $tgt
@@ -412,7 +449,8 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
const MipsInstrInfo *TII =
static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
- if (TM.getSubtarget<MipsSubtarget>().inMips16Mode())
+ const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
+ if (STI.inMips16Mode() || !STI.enableLongBranchPass())
return false;
if ((TM.getRelocationModel() == Reloc::PIC_) &&
TM.getSubtarget<MipsSubtarget>().isABI_O32() &&
@@ -438,9 +476,18 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
continue;
int ShVal = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode() ? 2 : 4;
+ int64_t Offset = computeOffset(I->Br) / ShVal;
+
+ if (TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
+ // The offset calculation does not include sandboxing instructions
+ // that will be added later in the MC layer. Since at this point we
+ // don't know the exact amount of code that "sandboxing" will add, we
+ // conservatively estimate that code will not grow more than 100%.
+ Offset *= 2;
+ }
// Check if offset fits into 16-bit immediate field of branches.
- if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / ShVal))
+ if (!ForceLongBranch && isInt<16>(Offset))
continue;
I->HasLongBranch = true;
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
index b6dfadc..821392e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
@@ -18,10 +18,10 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
@@ -151,7 +151,75 @@ MCOperand MipsMCInstLower::LowerOperand(const MachineOperand &MO,
return MCOperand();
}
+MCOperand MipsMCInstLower::createSub(MachineBasicBlock *BB1,
+ MachineBasicBlock *BB2,
+ MCSymbolRefExpr::VariantKind Kind) const {
+ const MCSymbolRefExpr *Sym1 = MCSymbolRefExpr::Create(BB1->getSymbol(), *Ctx);
+ const MCSymbolRefExpr *Sym2 = MCSymbolRefExpr::Create(BB2->getSymbol(), *Ctx);
+ const MCBinaryExpr *Sub = MCBinaryExpr::CreateSub(Sym1, Sym2, *Ctx);
+
+ return MCOperand::CreateExpr(MipsMCExpr::Create(Kind, Sub, *Ctx));
+}
+
+void MipsMCInstLower::
+lowerLongBranchLUi(const MachineInstr *MI, MCInst &OutMI) const {
+ OutMI.setOpcode(Mips::LUi);
+
+ // Lower register operand.
+ OutMI.addOperand(LowerOperand(MI->getOperand(0)));
+
+ // Create %hi($tgt-$baltgt).
+ OutMI.addOperand(createSub(MI->getOperand(1).getMBB(),
+ MI->getOperand(2).getMBB(),
+ MCSymbolRefExpr::VK_Mips_ABS_HI));
+}
+
+void MipsMCInstLower::
+lowerLongBranchADDiu(const MachineInstr *MI, MCInst &OutMI, int Opcode,
+ MCSymbolRefExpr::VariantKind Kind) const {
+ OutMI.setOpcode(Opcode);
+
+ // Lower two register operands.
+ for (unsigned I = 0, E = 2; I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ OutMI.addOperand(LowerOperand(MO));
+ }
+
+ // Create %lo($tgt-$baltgt) or %hi($tgt-$baltgt).
+ OutMI.addOperand(createSub(MI->getOperand(2).getMBB(),
+ MI->getOperand(3).getMBB(), Kind));
+}
+
+bool MipsMCInstLower::lowerLongBranch(const MachineInstr *MI,
+ MCInst &OutMI) const {
+ switch (MI->getOpcode()) {
+ default:
+ return false;
+ case Mips::LONG_BRANCH_LUi:
+ lowerLongBranchLUi(MI, OutMI);
+ return true;
+ case Mips::LONG_BRANCH_ADDiu:
+ lowerLongBranchADDiu(MI, OutMI, Mips::ADDiu,
+ MCSymbolRefExpr::VK_Mips_ABS_LO);
+ return true;
+ case Mips::LONG_BRANCH_DADDiu:
+ unsigned TargetFlags = MI->getOperand(2).getTargetFlags();
+ if (TargetFlags == MipsII::MO_ABS_HI)
+ lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu,
+ MCSymbolRefExpr::VK_Mips_ABS_HI);
+ else if (TargetFlags == MipsII::MO_ABS_LO)
+ lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu,
+ MCSymbolRefExpr::VK_Mips_ABS_LO);
+ else
+ report_fatal_error("Unexpected flags for LONG_BRANCH_DADDiu");
+ return true;
+ }
+}
+
void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+ if (lowerLongBranch(MI, OutMI))
+ return;
+
OutMI.setOpcode(MI->getOpcode());
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
index 4570bd9..269190f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
@@ -9,6 +9,7 @@
#ifndef MIPSMCINSTLOWER_H
#define MIPSMCINSTLOWER_H
+#include "MCTargetDesc/MipsMCExpr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/Support/Compiler.h"
@@ -36,6 +37,13 @@ public:
private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;
+ MCOperand createSub(MachineBasicBlock *BB1, MachineBasicBlock *BB2,
+ MCSymbolRefExpr::VariantKind Kind) const;
+ void lowerLongBranchLUi(const MachineInstr *MI, MCInst &OutMI) const;
+ void lowerLongBranchADDiu(const MachineInstr *MI, MCInst &OutMI,
+ int Opcode,
+ MCSymbolRefExpr::VariantKind Kind) const;
+ bool lowerLongBranch(const MachineInstr *MI, MCInst &OutMI) const;
};
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMSAInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsMSAInstrFormats.td
index 875dc0b..bff2d0f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMSAInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MipsMSAInstrFormats.td
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-def HasMSA : Predicate<"Subtarget.hasMSA()">,
+def HasMSA : Predicate<"Subtarget->hasMSA()">,
AssemblerPredicate<"FeatureMSA">;
class MSAInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther> {
@@ -15,6 +15,10 @@ class MSAInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther> {
let Inst{31-26} = 0b011110;
}
+class MSA64Inst : MSAInst {
+ let Predicates = [HasMSA, HasMips64];
+}
+
class MSACBranch : MSAInst {
let Inst{31-26} = 0b010001;
}
@@ -23,7 +27,11 @@ class MSASpecial : MSAInst {
let Inst{31-26} = 0b000000;
}
-class PseudoMSA<dag outs, dag ins, list<dag> pattern,
+class MSA64Special : MSA64Inst {
+ let Inst{31-26} = 0b000000;
+}
+
+class MSAPseudo<dag outs, dag ins, list<dag> pattern,
InstrItinClass itin = IIPseudo>:
MipsPseudo<outs, ins, pattern, itin> {
let Predicates = [HasMSA];
@@ -92,6 +100,17 @@ class MSA_2R_FILL_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSAInst {
let Inst{5-0} = minor;
}
+class MSA_2R_FILL_D_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSA64Inst {
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-18} = major;
+ let Inst{17-16} = df;
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
class MSA_2R_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSAInst {
bits<5> ws;
bits<5> wd;
@@ -274,6 +293,19 @@ class MSA_ELM_COPY_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
let Inst{5-0} = minor;
}
+class MSA_ELM_COPY_D_FMT<bits<4> major, bits<6> minor>: MSA64Inst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-17} = 0b11100;
+ let Inst{16} = n{0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
class MSA_ELM_INSERT_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
bits<6> n;
bits<5> rs;
@@ -313,6 +345,19 @@ class MSA_ELM_INSERT_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
let Inst{5-0} = minor;
}
+class MSA_ELM_INSERT_D_FMT<bits<4> major, bits<6> minor>: MSA64Inst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-17} = 0b11100;
+ let Inst{16} = n{0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
class MSA_I5_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
bits<5> imm;
bits<5> ws;
@@ -404,3 +449,17 @@ class SPECIAL_LSA_FMT<bits<6> minor>: MSASpecial {
let Inst{7-6} = sa;
let Inst{5-0} = minor;
}
+
+class SPECIAL_DLSA_FMT<bits<6> minor>: MSA64Special {
+ bits<5> rs;
+ bits<5> rt;
+ bits<5> rd;
+ bits<2> sa;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-8} = 0b000;
+ let Inst{7-6} = sa;
+ let Inst{5-0} = minor;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td
index 82c51a6..285bb14 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -27,6 +27,9 @@ def SDT_SHF : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<0>,
SDTCisVT<1, i32>, SDTCisSameAs<0, 2>]>;
def SDT_ILV : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<0>,
SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>]>;
+def SDT_INSVE : SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisVT<2, i32>, SDTCisSameAs<0, 3>,
+ SDTCisVT<4, i32>]>;
def MipsVAllNonZero : SDNode<"MipsISD::VALL_NONZERO", SDT_MipsVecCond>;
def MipsVAnyNonZero : SDNode<"MipsISD::VANY_NONZERO", SDT_MipsVecCond>;
@@ -50,6 +53,7 @@ def MipsILVL : SDNode<"MipsISD::ILVL", SDT_ILV>;
def MipsILVR : SDNode<"MipsISD::ILVR", SDT_ILV>;
def MipsPCKEV : SDNode<"MipsISD::PCKEV", SDT_ILV>;
def MipsPCKOD : SDNode<"MipsISD::PCKOD", SDT_ILV>;
+def MipsINSVE : SDNode<"MipsISD::INSVE", SDT_INSVE>;
def vsetcc : SDNode<"ISD::SETCC", SDT_VSetCC>;
def vfsetcc : SDNode<"ISD::SETCC", SDT_VFSetCC>;
@@ -61,15 +65,11 @@ def MipsVExtractZExt : SDNode<"MipsISD::VEXTRACT_ZEXT_ELT",
// Operands
-def uimm2 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
-}
-
// The immediate of an LSA instruction needs special handling
// as the encoded value should be subtracted by one.
def uimm2LSAAsmOperand : AsmOperandClass {
let Name = "LSAImm";
- let ParserMethod = "parseLSAImm";
+ let ParserMethod = "ParseLSAImm";
let RenderMethod = "addImmOperands";
}
@@ -80,10 +80,6 @@ def LSAImm : Operand<i32> {
let ParserMatchClass = uimm2LSAAsmOperand;
}
-def uimm3 : Operand<i32> {
- let PrintMethod = "printUnsignedImm8";
-}
-
def uimm4 : Operand<i32> {
let PrintMethod = "printUnsignedImm8";
}
@@ -94,8 +90,6 @@ def uimm8 : Operand<i32> {
def simm5 : Operand<i32>;
-def simm10 : Operand<i32>;
-
def vsplat_uimm1 : Operand<vAny> {
let PrintMethod = "printUnsignedImm8";
}
@@ -137,6 +131,8 @@ def vextract_sext_i16 : PatFrag<(ops node:$vec, node:$idx),
(MipsVExtractSExt node:$vec, node:$idx, i16)>;
def vextract_sext_i32 : PatFrag<(ops node:$vec, node:$idx),
(MipsVExtractSExt node:$vec, node:$idx, i32)>;
+def vextract_sext_i64 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i64)>;
def vextract_zext_i8 : PatFrag<(ops node:$vec, node:$idx),
(MipsVExtractZExt node:$vec, node:$idx, i8)>;
@@ -144,6 +140,8 @@ def vextract_zext_i16 : PatFrag<(ops node:$vec, node:$idx),
(MipsVExtractZExt node:$vec, node:$idx, i16)>;
def vextract_zext_i32 : PatFrag<(ops node:$vec, node:$idx),
(MipsVExtractZExt node:$vec, node:$idx, i32)>;
+def vextract_zext_i64 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i64)>;
def vinsert_v16i8 : PatFrag<(ops node:$vec, node:$val, node:$idx),
(v16i8 (vector_insert node:$vec, node:$val, node:$idx))>;
@@ -151,6 +149,17 @@ def vinsert_v8i16 : PatFrag<(ops node:$vec, node:$val, node:$idx),
(v8i16 (vector_insert node:$vec, node:$val, node:$idx))>;
def vinsert_v4i32 : PatFrag<(ops node:$vec, node:$val, node:$idx),
(v4i32 (vector_insert node:$vec, node:$val, node:$idx))>;
+def vinsert_v2i64 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v2i64 (vector_insert node:$vec, node:$val, node:$idx))>;
+
+def insve_v16i8 : PatFrag<(ops node:$v1, node:$i1, node:$v2, node:$i2),
+ (v16i8 (MipsINSVE node:$v1, node:$i1, node:$v2, node:$i2))>;
+def insve_v8i16 : PatFrag<(ops node:$v1, node:$i1, node:$v2, node:$i2),
+ (v8i16 (MipsINSVE node:$v1, node:$i1, node:$v2, node:$i2))>;
+def insve_v4i32 : PatFrag<(ops node:$v1, node:$i1, node:$v2, node:$i2),
+ (v4i32 (MipsINSVE node:$v1, node:$i1, node:$v2, node:$i2))>;
+def insve_v2i64 : PatFrag<(ops node:$v1, node:$i1, node:$v2, node:$i2),
+ (v2i64 (MipsINSVE node:$v1, node:$i1, node:$v2, node:$i2))>;
class vfsetcc_type<ValueType ResTy, ValueType OpTy, CondCode CC> :
PatFrag<(ops node:$lhs, node:$rhs),
@@ -232,7 +241,7 @@ def vsplati32 : PatFrag<(ops node:$e0),
(v4i32 (build_vector node:$e0, node:$e0,
node:$e0, node:$e0))>;
def vsplati64 : PatFrag<(ops node:$e0),
- (v2i64 (build_vector:$v0 node:$e0, node:$e0))>;
+ (v2i64 (build_vector node:$e0, node:$e0))>;
def vsplatf32 : PatFrag<(ops node:$e0),
(v4f32 (build_vector node:$e0, node:$e0,
node:$e0, node:$e0))>;
@@ -614,10 +623,12 @@ class CLTI_U_D_ENC : MSA_I5_FMT<0b011, 0b11, 0b000111>;
class COPY_S_B_ENC : MSA_ELM_COPY_B_FMT<0b0010, 0b011001>;
class COPY_S_H_ENC : MSA_ELM_COPY_H_FMT<0b0010, 0b011001>;
class COPY_S_W_ENC : MSA_ELM_COPY_W_FMT<0b0010, 0b011001>;
+class COPY_S_D_ENC : MSA_ELM_COPY_D_FMT<0b0010, 0b011001>;
class COPY_U_B_ENC : MSA_ELM_COPY_B_FMT<0b0011, 0b011001>;
class COPY_U_H_ENC : MSA_ELM_COPY_H_FMT<0b0011, 0b011001>;
class COPY_U_W_ENC : MSA_ELM_COPY_W_FMT<0b0011, 0b011001>;
+class COPY_U_D_ENC : MSA_ELM_COPY_D_FMT<0b0011, 0b011001>;
class CTCMSA_ENC : MSA_ELM_CTCMSA_FMT<0b0000111110, 0b011001>;
@@ -724,6 +735,7 @@ class FFQR_D_ENC : MSA_2RF_FMT<0b110011011, 0b1, 0b011110>;
class FILL_B_ENC : MSA_2R_FILL_FMT<0b11000000, 0b00, 0b011110>;
class FILL_H_ENC : MSA_2R_FILL_FMT<0b11000000, 0b01, 0b011110>;
class FILL_W_ENC : MSA_2R_FILL_FMT<0b11000000, 0b10, 0b011110>;
+class FILL_D_ENC : MSA_2R_FILL_D_FMT<0b11000000, 0b11, 0b011110>;
class FLOG2_W_ENC : MSA_2RF_FMT<0b110010111, 0b0, 0b011110>;
class FLOG2_D_ENC : MSA_2RF_FMT<0b110010111, 0b1, 0b011110>;
@@ -851,6 +863,7 @@ class ILVR_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010100>;
class INSERT_B_ENC : MSA_ELM_INSERT_B_FMT<0b0100, 0b011001>;
class INSERT_H_ENC : MSA_ELM_INSERT_H_FMT<0b0100, 0b011001>;
class INSERT_W_ENC : MSA_ELM_INSERT_W_FMT<0b0100, 0b011001>;
+class INSERT_D_ENC : MSA_ELM_INSERT_D_FMT<0b0100, 0b011001>;
class INSVE_B_ENC : MSA_ELM_B_FMT<0b0101, 0b011001>;
class INSVE_H_ENC : MSA_ELM_H_FMT<0b0101, 0b011001>;
@@ -868,6 +881,7 @@ class LDI_W_ENC : MSA_I10_FMT<0b110, 0b10, 0b000111>;
class LDI_D_ENC : MSA_I10_FMT<0b110, 0b11, 0b000111>;
class LSA_ENC : SPECIAL_LSA_FMT<0b000101>;
+class DLSA_ENC : SPECIAL_DLSA_FMT<0b010101>;
class MADD_Q_H_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011100>;
class MADD_Q_W_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011100>;
@@ -1221,8 +1235,12 @@ class MSA_BIT_BINSXI_DESC_BASE<string instr_asm, ValueType Ty,
dag OutOperandList = (outs ROWD:$wd);
dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, vsplat_uimm8:$m);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
- list<dag> Pattern = [(set ROWD:$wd, (vselect (Ty Mask:$m), (Ty ROWD:$wd_in),
- ROWS:$ws))];
+ // Note that binsxi and vselect treat the condition operand the opposite
+ // way to each other.
+ // (vselect cond, if_set, if_clear)
+ // (BSEL_V cond, if_clear, if_set)
+ list<dag> Pattern = [(set ROWD:$wd, (vselect (Ty Mask:$m), (Ty ROWD:$ws),
+ ROWS:$wd_in))];
InstrItinClass Itinerary = itin;
string Constraints = "$wd = $wd_in";
}
@@ -1261,20 +1279,22 @@ class MSA_COPY_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass Itinerary = itin;
}
-class MSA_ELM_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
- InstrItinClass itin = NoItinerary> {
+class MSA_ELM_SLD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWS:$ws, uimm4:$n);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, uimm4:$n);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$n]");
- list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt4:$n))];
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in, ROWS:$ws,
+ immZExt4:$n))];
+ string Constraints = "$wd = $wd_in";
InstrItinClass Itinerary = itin;
}
class MSA_COPY_PSEUDO_BASE<SDPatternOperator OpNode, ValueType VecTy,
RegisterClass RCD, RegisterClass RCWS> :
- MipsPseudo<(outs RCD:$wd), (ins RCWS:$ws, uimm4:$n),
- [(set RCD:$wd, (OpNode (VecTy RCWS:$ws), immZExt4:$n))]> {
+ MSAPseudo<(outs RCD:$wd), (ins RCWS:$ws, uimm4:$n),
+ [(set RCD:$wd, (OpNode (VecTy RCWS:$ws), immZExt4:$n))]> {
bit usesCustomInserter = 1;
}
@@ -1300,17 +1320,6 @@ class MSA_I8_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass Itinerary = itin;
}
-// This class is deprecated and will be removed in the next few patches
-class MSA_I8_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
- InstrItinClass itin = NoItinerary> {
- dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWS:$ws, uimm8:$u8);
- string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
- list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt8:$u8))];
- InstrItinClass Itinerary = itin;
-}
-
class MSA_I8_SHF_DESC_BASE<string instr_asm, RegisterOperand ROWD,
RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> {
@@ -1355,8 +1364,8 @@ class MSA_2R_FILL_DESC_BASE<string instr_asm, ValueType VT,
class MSA_2R_FILL_PSEUDO_BASE<ValueType VT, SDPatternOperator OpNode,
RegisterClass RCWD, RegisterClass RCWS = RCWD> :
- MipsPseudo<(outs RCWD:$wd), (ins RCWS:$fs),
- [(set RCWD:$wd, (OpNode RCWS:$fs))]> {
+ MSAPseudo<(outs RCWD:$wd), (ins RCWS:$fs),
+ [(set RCWD:$wd, (OpNode RCWS:$fs))]> {
let usesCustomInserter = 1;
}
@@ -1398,9 +1407,9 @@ class MSA_3R_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWS:$ws, GPR32:$rt);
+ dag InOperandList = (ins ROWS:$ws, GPR32Opnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$rt]");
- list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32:$rt))];
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32Opnd:$rt))];
InstrItinClass Itinerary = itin;
}
@@ -1421,10 +1430,12 @@ class MSA_3R_SLD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWS:$ws, GPR32:$rt);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, GPR32Opnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$rt]");
- list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32:$rt))];
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in, ROWS:$ws,
+ GPR32Opnd:$rt))];
InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
}
class MSA_3R_4R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
@@ -1434,8 +1445,8 @@ class MSA_3R_4R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
dag OutOperandList = (outs ROWD:$wd);
dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
- list<dag> Pattern = [(set ROWD:$wd,
- (OpNode ROWD:$wd_in, ROWS:$ws, ROWT:$wt))];
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in, ROWS:$ws,
+ ROWT:$wt))];
InstrItinClass Itinerary = itin;
string Constraints = "$wd = $wd_in";
}
@@ -1479,22 +1490,32 @@ class MSA_INSERT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
class MSA_INSERT_PSEUDO_BASE<SDPatternOperator OpNode, ValueType Ty,
RegisterOperand ROWD, RegisterOperand ROFS> :
- MipsPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, uimm6:$n, ROFS:$fs),
- [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs,
+ MSAPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, uimm6:$n, ROFS:$fs),
+ [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs,
immZExt6:$n))]> {
bit usesCustomInserter = 1;
string Constraints = "$wd = $wd_in";
}
+class MSA_INSERT_VIDX_PSEUDO_BASE<SDPatternOperator OpNode, ValueType Ty,
+ RegisterOperand ROWD, RegisterOperand ROFS> :
+ MSAPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, GPR32Opnd:$n, ROFS:$fs),
+ [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs,
+ GPR32Opnd:$n))]> {
+ bit usesCustomInserter = 1;
+ string Constraints = "$wd = $wd_in";
+}
+
class MSA_INSVE_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWD:$wd_in, uimm6:$n, ROWS:$ws);
- string AsmString = !strconcat(instr_asm, "\t$wd[$n], $ws[0]");
+ dag InOperandList = (ins ROWD:$wd_in, uimm6:$n, ROWS:$ws, uimmz:$n2);
+ string AsmString = !strconcat(instr_asm, "\t$wd[$n], $ws[$n2]");
list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in,
immZExt6:$n,
- ROWS:$ws))];
+ ROWS:$ws,
+ immz:$n2))];
InstrItinClass Itinerary = itin;
string Constraints = "$wd = $wd_in";
}
@@ -1525,8 +1546,8 @@ class MSA_ELM_SPLAT_DESC_BASE<string instr_asm, SplatComplexPattern SplatImm,
class MSA_VEC_PSEUDO_BASE<SDPatternOperator OpNode, RegisterOperand ROWD,
RegisterOperand ROWS = ROWD,
RegisterOperand ROWT = ROWD> :
- MipsPseudo<(outs ROWD:$wd), (ins ROWS:$ws, ROWT:$wt),
- [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))]>;
+ MSAPseudo<(outs ROWD:$wd), (ins ROWS:$ws, ROWT:$wt),
+ [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))]>;
class ADD_A_B_DESC : MSA_3R_DESC_BASE<"add_a.b", int_mips_add_a_b, MSA128BOpnd>,
IsCommutable;
@@ -1735,10 +1756,14 @@ class BNEG_H_DESC : MSA_3R_DESC_BASE<"bneg.h", vbneg_h, MSA128HOpnd>;
class BNEG_W_DESC : MSA_3R_DESC_BASE<"bneg.w", vbneg_w, MSA128WOpnd>;
class BNEG_D_DESC : MSA_3R_DESC_BASE<"bneg.d", vbneg_d, MSA128DOpnd>;
-class BNEGI_B_DESC : MSA_BIT_B_DESC_BASE<"bnegi.b", xor, vsplat_uimm_pow2, MSA128BOpnd>;
-class BNEGI_H_DESC : MSA_BIT_H_DESC_BASE<"bnegi.h", xor, vsplat_uimm_pow2, MSA128HOpnd>;
-class BNEGI_W_DESC : MSA_BIT_W_DESC_BASE<"bnegi.w", xor, vsplat_uimm_pow2, MSA128WOpnd>;
-class BNEGI_D_DESC : MSA_BIT_D_DESC_BASE<"bnegi.d", xor, vsplat_uimm_pow2, MSA128DOpnd>;
+class BNEGI_B_DESC : MSA_BIT_B_DESC_BASE<"bnegi.b", xor, vsplat_uimm_pow2,
+ MSA128BOpnd>;
+class BNEGI_H_DESC : MSA_BIT_H_DESC_BASE<"bnegi.h", xor, vsplat_uimm_pow2,
+ MSA128HOpnd>;
+class BNEGI_W_DESC : MSA_BIT_W_DESC_BASE<"bnegi.w", xor, vsplat_uimm_pow2,
+ MSA128WOpnd>;
+class BNEGI_D_DESC : MSA_BIT_D_DESC_BASE<"bnegi.d", xor, vsplat_uimm_pow2,
+ MSA128DOpnd>;
class BNZ_B_DESC : MSA_CBRANCH_DESC_BASE<"bnz.b", MSA128BOpnd>;
class BNZ_H_DESC : MSA_CBRANCH_DESC_BASE<"bnz.h", MSA128HOpnd>;
@@ -1752,9 +1777,13 @@ class BSEL_V_DESC {
dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
MSA128BOpnd:$wt);
string AsmString = "bsel.v\t$wd, $ws, $wt";
+ // Note that vselect and BSEL_V treat the condition operand the opposite way
+ // from each other.
+ // (vselect cond, if_set, if_clear)
+ // (BSEL_V cond, if_clear, if_set)
list<dag> Pattern = [(set MSA128BOpnd:$wd,
- (vselect MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
- MSA128BOpnd:$wt))];
+ (vselect MSA128BOpnd:$wd_in, MSA128BOpnd:$wt,
+ MSA128BOpnd:$ws))];
InstrItinClass Itinerary = NoItinerary;
string Constraints = "$wd = $wd_in";
}
@@ -1764,9 +1793,13 @@ class BSELI_B_DESC {
dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
vsplat_uimm8:$u8);
string AsmString = "bseli.b\t$wd, $ws, $u8";
+ // Note that vselect and BSEL_V treat the condition operand the opposite way
+ // from each other.
+ // (vselect cond, if_set, if_clear)
+ // (BSEL_V cond, if_clear, if_set)
list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wd_in,
- MSA128BOpnd:$ws,
- vsplati8_uimm8:$u8))];
+ vsplati8_uimm8:$u8,
+ MSA128BOpnd:$ws))];
InstrItinClass Itinerary = NoItinerary;
string Constraints = "$wd = $wd_in";
}
@@ -1880,6 +1913,8 @@ class COPY_S_H_DESC : MSA_COPY_DESC_BASE<"copy_s.h", vextract_sext_i16, v8i16,
GPR32Opnd, MSA128HOpnd>;
class COPY_S_W_DESC : MSA_COPY_DESC_BASE<"copy_s.w", vextract_sext_i32, v4i32,
GPR32Opnd, MSA128WOpnd>;
+class COPY_S_D_DESC : MSA_COPY_DESC_BASE<"copy_s.d", vextract_sext_i64, v2i64,
+ GPR64Opnd, MSA128DOpnd>;
class COPY_U_B_DESC : MSA_COPY_DESC_BASE<"copy_u.b", vextract_zext_i8, v16i8,
GPR32Opnd, MSA128BOpnd>;
@@ -1887,6 +1922,8 @@ class COPY_U_H_DESC : MSA_COPY_DESC_BASE<"copy_u.h", vextract_zext_i16, v8i16,
GPR32Opnd, MSA128HOpnd>;
class COPY_U_W_DESC : MSA_COPY_DESC_BASE<"copy_u.w", vextract_zext_i32, v4i32,
GPR32Opnd, MSA128WOpnd>;
+class COPY_U_D_DESC : MSA_COPY_DESC_BASE<"copy_u.d", vextract_zext_i64, v2i64,
+ GPR64Opnd, MSA128DOpnd>;
class COPY_FW_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v4f32, FGR32,
MSA128W>;
@@ -2047,11 +2084,11 @@ class FEXP2_W_DESC : MSA_3RF_DESC_BASE<"fexp2.w", mul_fexp2, MSA128WOpnd>;
class FEXP2_D_DESC : MSA_3RF_DESC_BASE<"fexp2.d", mul_fexp2, MSA128DOpnd>;
let usesCustomInserter = 1 in {
class FEXP2_W_1_PSEUDO_DESC :
- MipsPseudo<(outs MSA128W:$wd), (ins MSA128W:$ws),
- [(set MSA128W:$wd, (fexp2 MSA128W:$ws))]>;
+ MSAPseudo<(outs MSA128W:$wd), (ins MSA128W:$ws),
+ [(set MSA128W:$wd, (fexp2 MSA128W:$ws))]>;
class FEXP2_D_1_PSEUDO_DESC :
- MipsPseudo<(outs MSA128D:$wd), (ins MSA128D:$ws),
- [(set MSA128D:$wd, (fexp2 MSA128D:$ws))]>;
+ MSAPseudo<(outs MSA128D:$wd), (ins MSA128D:$ws),
+ [(set MSA128D:$wd, (fexp2 MSA128D:$ws))]>;
}
class FEXUPL_W_DESC : MSA_2RF_DESC_BASE<"fexupl.w", int_mips_fexupl_w,
@@ -2086,6 +2123,8 @@ class FILL_H_DESC : MSA_2R_FILL_DESC_BASE<"fill.h", v8i16, vsplati16,
MSA128HOpnd, GPR32Opnd>;
class FILL_W_DESC : MSA_2R_FILL_DESC_BASE<"fill.w", v4i32, vsplati32,
MSA128WOpnd, GPR32Opnd>;
+class FILL_D_DESC : MSA_2R_FILL_DESC_BASE<"fill.d", v2i64, vsplati64,
+ MSA128DOpnd, GPR64Opnd>;
class FILL_FW_PSEUDO_DESC : MSA_2R_FILL_PSEUDO_BASE<v4f32, vsplatf32, MSA128W,
FGR32>;
@@ -2259,24 +2298,40 @@ class INSERT_H_DESC : MSA_INSERT_DESC_BASE<"insert.h", vinsert_v8i16,
MSA128HOpnd, GPR32Opnd>;
class INSERT_W_DESC : MSA_INSERT_DESC_BASE<"insert.w", vinsert_v4i32,
MSA128WOpnd, GPR32Opnd>;
+class INSERT_D_DESC : MSA_INSERT_DESC_BASE<"insert.d", vinsert_v2i64,
+ MSA128DOpnd, GPR64Opnd>;
+
+class INSERT_B_VIDX_PSEUDO_DESC :
+ MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v16i8, MSA128BOpnd, GPR32Opnd>;
+class INSERT_H_VIDX_PSEUDO_DESC :
+ MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v8i16, MSA128HOpnd, GPR32Opnd>;
+class INSERT_W_VIDX_PSEUDO_DESC :
+ MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v4i32, MSA128WOpnd, GPR32Opnd>;
+class INSERT_D_VIDX_PSEUDO_DESC :
+ MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v2i64, MSA128DOpnd, GPR64Opnd>;
class INSERT_FW_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v4f32,
MSA128WOpnd, FGR32Opnd>;
class INSERT_FD_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v2f64,
MSA128DOpnd, FGR64Opnd>;
-class INSVE_B_DESC : MSA_INSVE_DESC_BASE<"insve.b", int_mips_insve_b,
+class INSERT_FW_VIDX_PSEUDO_DESC :
+ MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v4f32, MSA128WOpnd, FGR32Opnd>;
+class INSERT_FD_VIDX_PSEUDO_DESC :
+ MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v2f64, MSA128DOpnd, FGR64Opnd>;
+
+class INSVE_B_DESC : MSA_INSVE_DESC_BASE<"insve.b", insve_v16i8,
MSA128BOpnd>;
-class INSVE_H_DESC : MSA_INSVE_DESC_BASE<"insve.h", int_mips_insve_h,
+class INSVE_H_DESC : MSA_INSVE_DESC_BASE<"insve.h", insve_v8i16,
MSA128HOpnd>;
-class INSVE_W_DESC : MSA_INSVE_DESC_BASE<"insve.w", int_mips_insve_w,
+class INSVE_W_DESC : MSA_INSVE_DESC_BASE<"insve.w", insve_v4i32,
MSA128WOpnd>;
-class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", int_mips_insve_d,
+class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", insve_v2i64,
MSA128DOpnd>;
class LD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
ValueType TyNode, RegisterOperand ROWD,
- Operand MemOpnd = mem, ComplexPattern Addr = addrRegImm,
+ Operand MemOpnd = mem_msa, ComplexPattern Addr = addrimm10,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
dag InOperandList = (ins MemOpnd:$addr);
@@ -2296,16 +2351,21 @@ class LDI_H_DESC : MSA_I10_LDI_DESC_BASE<"ldi.h", MSA128HOpnd>;
class LDI_W_DESC : MSA_I10_LDI_DESC_BASE<"ldi.w", MSA128WOpnd>;
class LDI_D_DESC : MSA_I10_LDI_DESC_BASE<"ldi.d", MSA128DOpnd>;
-class LSA_DESC {
- dag OutOperandList = (outs GPR32Opnd:$rd);
- dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, LSAImm:$sa);
- string AsmString = "lsa\t$rd, $rs, $rt, $sa";
- list<dag> Pattern = [(set GPR32Opnd:$rd, (add GPR32Opnd:$rs,
- (shl GPR32Opnd:$rt,
+class LSA_DESC_BASE<string instr_asm, RegisterOperand RORD,
+ RegisterOperand RORS = RORD, RegisterOperand RORT = RORD,
+ InstrItinClass itin = NoItinerary > {
+ dag OutOperandList = (outs RORD:$rd);
+ dag InOperandList = (ins RORS:$rs, RORT:$rt, LSAImm:$sa);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $sa");
+ list<dag> Pattern = [(set RORD:$rd, (add RORT:$rt,
+ (shl RORS:$rs,
immZExt2Lsa:$sa)))];
- InstrItinClass Itinerary = NoItinerary;
+ InstrItinClass Itinerary = itin;
}
+class LSA_DESC : LSA_DESC_BASE<"lsa", GPR32Opnd>;
+class DLSA_DESC : LSA_DESC_BASE<"dlsa", GPR64Opnd>;
+
class MADD_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.h", int_mips_madd_q_h,
MSA128HOpnd>;
class MADD_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.w", int_mips_madd_q_w,
@@ -2502,10 +2562,14 @@ class SLD_H_DESC : MSA_3R_SLD_DESC_BASE<"sld.h", int_mips_sld_h, MSA128HOpnd>;
class SLD_W_DESC : MSA_3R_SLD_DESC_BASE<"sld.w", int_mips_sld_w, MSA128WOpnd>;
class SLD_D_DESC : MSA_3R_SLD_DESC_BASE<"sld.d", int_mips_sld_d, MSA128DOpnd>;
-class SLDI_B_DESC : MSA_ELM_DESC_BASE<"sldi.b", int_mips_sldi_b, MSA128BOpnd>;
-class SLDI_H_DESC : MSA_ELM_DESC_BASE<"sldi.h", int_mips_sldi_h, MSA128HOpnd>;
-class SLDI_W_DESC : MSA_ELM_DESC_BASE<"sldi.w", int_mips_sldi_w, MSA128WOpnd>;
-class SLDI_D_DESC : MSA_ELM_DESC_BASE<"sldi.d", int_mips_sldi_d, MSA128DOpnd>;
+class SLDI_B_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.b", int_mips_sldi_b,
+ MSA128BOpnd>;
+class SLDI_H_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.h", int_mips_sldi_h,
+ MSA128HOpnd>;
+class SLDI_W_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.w", int_mips_sldi_w,
+ MSA128WOpnd>;
+class SLDI_D_DESC : MSA_ELM_SLD_DESC_BASE<"sldi.d", int_mips_sldi_d,
+ MSA128DOpnd>;
class SLL_B_DESC : MSA_3R_DESC_BASE<"sll.b", shl, MSA128BOpnd>;
class SLL_H_DESC : MSA_3R_DESC_BASE<"sll.h", shl, MSA128HOpnd>;
@@ -2597,7 +2661,7 @@ class SRLRI_D_DESC : MSA_BIT_D_X_DESC_BASE<"srlri.d", int_mips_srlri_d,
class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
ValueType TyNode, RegisterOperand ROWD,
- Operand MemOpnd = mem, ComplexPattern Addr = addrRegImm,
+ Operand MemOpnd = mem_msa, ComplexPattern Addr = addrimm10,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs);
dag InOperandList = (ins ROWD:$wd, MemOpnd:$addr);
@@ -2810,8 +2874,12 @@ def BNZ_V : BNZ_V_ENC, BNZ_V_DESC;
def BSEL_V : BSEL_V_ENC, BSEL_V_DESC;
class MSA_BSEL_PSEUDO_BASE<RegisterOperand RO, ValueType Ty> :
- MipsPseudo<(outs RO:$wd), (ins RO:$wd_in, RO:$ws, RO:$wt),
- [(set RO:$wd, (Ty (vselect RO:$wd_in, RO:$ws, RO:$wt)))]>,
+ MSAPseudo<(outs RO:$wd), (ins RO:$wd_in, RO:$ws, RO:$wt),
+ [(set RO:$wd, (Ty (vselect RO:$wd_in, RO:$wt, RO:$ws)))]>,
+ // Note that vselect and BSEL_V treat the condition operand the opposite way
+ // from each other.
+ // (vselect cond, if_set, if_clear)
+ // (BSEL_V cond, if_clear, if_set)
PseudoInstExpansion<(BSEL_V MSA128BOpnd:$wd, MSA128BOpnd:$wd_in,
MSA128BOpnd:$ws, MSA128BOpnd:$wt)> {
let Constraints = "$wd_in = $wd";
@@ -2897,10 +2965,12 @@ def CLTI_U_D : CLTI_U_D_ENC, CLTI_U_D_DESC;
def COPY_S_B : COPY_S_B_ENC, COPY_S_B_DESC;
def COPY_S_H : COPY_S_H_ENC, COPY_S_H_DESC;
def COPY_S_W : COPY_S_W_ENC, COPY_S_W_DESC;
+def COPY_S_D : COPY_S_D_ENC, COPY_S_D_DESC;
def COPY_U_B : COPY_U_B_ENC, COPY_U_B_DESC;
def COPY_U_H : COPY_U_H_ENC, COPY_U_H_DESC;
def COPY_U_W : COPY_U_W_ENC, COPY_U_W_DESC;
+def COPY_U_D : COPY_U_D_ENC, COPY_U_D_DESC;
def COPY_FW_PSEUDO : COPY_FW_PSEUDO_DESC;
def COPY_FD_PSEUDO : COPY_FD_PSEUDO_DESC;
@@ -3012,6 +3082,7 @@ def FFQR_D : FFQR_D_ENC, FFQR_D_DESC;
def FILL_B : FILL_B_ENC, FILL_B_DESC;
def FILL_H : FILL_H_ENC, FILL_H_DESC;
def FILL_W : FILL_W_ENC, FILL_W_DESC;
+def FILL_D : FILL_D_ENC, FILL_D_DESC;
def FILL_FW_PSEUDO : FILL_FW_PSEUDO_DESC;
def FILL_FD_PSEUDO : FILL_FD_PSEUDO_DESC;
@@ -3141,18 +3212,30 @@ def ILVR_D : ILVR_D_ENC, ILVR_D_DESC;
def INSERT_B : INSERT_B_ENC, INSERT_B_DESC;
def INSERT_H : INSERT_H_ENC, INSERT_H_DESC;
def INSERT_W : INSERT_W_ENC, INSERT_W_DESC;
+def INSERT_D : INSERT_D_ENC, INSERT_D_DESC;
// INSERT_FW_PSEUDO defined after INSVE_W
// INSERT_FD_PSEUDO defined after INSVE_D
-def INSVE_B : INSVE_B_ENC, INSVE_B_DESC;
-def INSVE_H : INSVE_H_ENC, INSVE_H_DESC;
-def INSVE_W : INSVE_W_ENC, INSVE_W_DESC;
-def INSVE_D : INSVE_D_ENC, INSVE_D_DESC;
+// There is a fourth operand that is not present in the encoding. Use a
+// custom decoder to get a chance to add it.
+let DecoderMethod = "DecodeINSVE_DF" in {
+ def INSVE_B : INSVE_B_ENC, INSVE_B_DESC;
+ def INSVE_H : INSVE_H_ENC, INSVE_H_DESC;
+ def INSVE_W : INSVE_W_ENC, INSVE_W_DESC;
+ def INSVE_D : INSVE_D_ENC, INSVE_D_DESC;
+}
def INSERT_FW_PSEUDO : INSERT_FW_PSEUDO_DESC;
def INSERT_FD_PSEUDO : INSERT_FD_PSEUDO_DESC;
+def INSERT_B_VIDX_PSEUDO : INSERT_B_VIDX_PSEUDO_DESC;
+def INSERT_H_VIDX_PSEUDO : INSERT_H_VIDX_PSEUDO_DESC;
+def INSERT_W_VIDX_PSEUDO : INSERT_W_VIDX_PSEUDO_DESC;
+def INSERT_D_VIDX_PSEUDO : INSERT_D_VIDX_PSEUDO_DESC;
+def INSERT_FW_VIDX_PSEUDO : INSERT_FW_VIDX_PSEUDO_DESC;
+def INSERT_FD_VIDX_PSEUDO : INSERT_FD_VIDX_PSEUDO_DESC;
+
def LD_B: LD_B_ENC, LD_B_DESC;
def LD_H: LD_H_ENC, LD_H_DESC;
def LD_W: LD_W_ENC, LD_W_DESC;
@@ -3164,6 +3247,7 @@ def LDI_W : LDI_W_ENC, LDI_W_DESC;
def LDI_D : LDI_D_ENC, LDI_D_DESC;
def LSA : LSA_ENC, LSA_DESC;
+def DLSA : DLSA_ENC, DLSA_DESC;
def MADD_Q_H : MADD_Q_H_ENC, MADD_Q_H_DESC;
def MADD_Q_W : MADD_Q_W_ENC, MADD_Q_W_DESC;
@@ -3464,46 +3548,23 @@ class MSAPat<dag pattern, dag result, list<Predicate> pred = [HasMSA]> :
def : MSAPat<(extractelt (v4i32 MSA128W:$ws), immZExt4:$idx),
(COPY_S_W MSA128W:$ws, immZExt4:$idx)>;
-def : MSAPat<(v16i8 (load addr:$addr)), (LD_B addr:$addr)>;
-def : MSAPat<(v8i16 (load addr:$addr)), (LD_H addr:$addr)>;
-def : MSAPat<(v4i32 (load addr:$addr)), (LD_W addr:$addr)>;
-def : MSAPat<(v2i64 (load addr:$addr)), (LD_D addr:$addr)>;
-def : MSAPat<(v8f16 (load addr:$addr)), (LD_H addr:$addr)>;
-def : MSAPat<(v4f32 (load addr:$addr)), (LD_W addr:$addr)>;
-def : MSAPat<(v2f64 (load addr:$addr)), (LD_D addr:$addr)>;
-
-def : MSAPat<(v8f16 (load addrRegImm:$addr)), (LD_H addrRegImm:$addr)>;
-def : MSAPat<(v4f32 (load addrRegImm:$addr)), (LD_W addrRegImm:$addr)>;
-def : MSAPat<(v2f64 (load addrRegImm:$addr)), (LD_D addrRegImm:$addr)>;
-
-def : MSAPat<(store (v16i8 MSA128B:$ws), addr:$addr),
- (ST_B MSA128B:$ws, addr:$addr)>;
-def : MSAPat<(store (v8i16 MSA128H:$ws), addr:$addr),
- (ST_H MSA128H:$ws, addr:$addr)>;
-def : MSAPat<(store (v4i32 MSA128W:$ws), addr:$addr),
- (ST_W MSA128W:$ws, addr:$addr)>;
-def : MSAPat<(store (v2i64 MSA128D:$ws), addr:$addr),
- (ST_D MSA128D:$ws, addr:$addr)>;
-def : MSAPat<(store (v8f16 MSA128H:$ws), addr:$addr),
- (ST_H MSA128H:$ws, addr:$addr)>;
-def : MSAPat<(store (v4f32 MSA128W:$ws), addr:$addr),
- (ST_W MSA128W:$ws, addr:$addr)>;
-def : MSAPat<(store (v2f64 MSA128D:$ws), addr:$addr),
- (ST_D MSA128D:$ws, addr:$addr)>;
-
-def ST_FH : MSAPat<(store (v8f16 MSA128H:$ws), addrRegImm:$addr),
- (ST_H MSA128H:$ws, addrRegImm:$addr)>;
-def ST_FW : MSAPat<(store (v4f32 MSA128W:$ws), addrRegImm:$addr),
- (ST_W MSA128W:$ws, addrRegImm:$addr)>;
-def ST_FD : MSAPat<(store (v2f64 MSA128D:$ws), addrRegImm:$addr),
- (ST_D MSA128D:$ws, addrRegImm:$addr)>;
+def : MSAPat<(v8f16 (load addrimm10:$addr)), (LD_H addrimm10:$addr)>;
+def : MSAPat<(v4f32 (load addrimm10:$addr)), (LD_W addrimm10:$addr)>;
+def : MSAPat<(v2f64 (load addrimm10:$addr)), (LD_D addrimm10:$addr)>;
+
+def ST_FH : MSAPat<(store (v8f16 MSA128H:$ws), addrimm10:$addr),
+ (ST_H MSA128H:$ws, addrimm10:$addr)>;
+def ST_FW : MSAPat<(store (v4f32 MSA128W:$ws), addrimm10:$addr),
+ (ST_W MSA128W:$ws, addrimm10:$addr)>;
+def ST_FD : MSAPat<(store (v2f64 MSA128D:$ws), addrimm10:$addr),
+ (ST_D MSA128D:$ws, addrimm10:$addr)>;
class MSA_FABS_PSEUDO_DESC_BASE<RegisterOperand ROWD,
RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> :
- MipsPseudo<(outs ROWD:$wd),
- (ins ROWS:$ws),
- [(set ROWD:$wd, (fabs ROWS:$ws))]> {
+ MSAPseudo<(outs ROWD:$wd),
+ (ins ROWS:$ws),
+ [(set ROWD:$wd, (fabs ROWS:$ws))]> {
InstrItinClass Itinerary = itin;
}
def FABS_W : MSA_FABS_PSEUDO_DESC_BASE<MSA128WOpnd>,
@@ -3518,7 +3579,7 @@ class MSABitconvertPat<ValueType DstVT, ValueType SrcVT,
MSAPat<(DstVT (bitconvert SrcVT:$src)),
(COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>;
-// These are endian-independant because the element size doesnt change
+// These are endian-independent because the element size doesnt change
def : MSABitconvertPat<v8i16, v8f16, MSA128H>;
def : MSABitconvertPat<v4i32, v4f32, MSA128W>;
def : MSABitconvertPat<v2i64, v2f64, MSA128D>;
@@ -3692,3 +3753,55 @@ def SZ_D_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v2i64,
MSA128D, NoItinerary>;
def SZ_V_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAnyZero, v16i8,
MSA128B, NoItinerary>;
+
+// Vector extraction with variable index
+def : MSAPat<(i32 (vextract_sext_i8 v16i8:$ws, i32:$idx)),
+ (SRA (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG (SPLAT_B v16i8:$ws,
+ i32:$idx),
+ sub_lo)),
+ GPR32), (i32 24))>;
+def : MSAPat<(i32 (vextract_sext_i16 v8i16:$ws, i32:$idx)),
+ (SRA (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG (SPLAT_H v8i16:$ws,
+ i32:$idx),
+ sub_lo)),
+ GPR32), (i32 16))>;
+def : MSAPat<(i32 (vextract_sext_i32 v4i32:$ws, i32:$idx)),
+ (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG (SPLAT_W v4i32:$ws,
+ i32:$idx),
+ sub_lo)),
+ GPR32)>;
+def : MSAPat<(i64 (vextract_sext_i64 v2i64:$ws, i32:$idx)),
+ (COPY_TO_REGCLASS (i64 (EXTRACT_SUBREG (SPLAT_D v2i64:$ws,
+ i32:$idx),
+ sub_64)),
+ GPR64), [HasMSA, IsGP64bit]>;
+
+def : MSAPat<(i32 (vextract_zext_i8 v16i8:$ws, i32:$idx)),
+ (SRL (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG (SPLAT_B v16i8:$ws,
+ i32:$idx),
+ sub_lo)),
+ GPR32), (i32 24))>;
+def : MSAPat<(i32 (vextract_zext_i16 v8i16:$ws, i32:$idx)),
+ (SRL (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG (SPLAT_H v8i16:$ws,
+ i32:$idx),
+ sub_lo)),
+ GPR32), (i32 16))>;
+def : MSAPat<(i32 (vextract_zext_i32 v4i32:$ws, i32:$idx)),
+ (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG (SPLAT_W v4i32:$ws,
+ i32:$idx),
+ sub_lo)),
+ GPR32)>;
+def : MSAPat<(i64 (vextract_zext_i64 v2i64:$ws, i32:$idx)),
+ (COPY_TO_REGCLASS (i64 (EXTRACT_SUBREG (SPLAT_D v2i64:$ws,
+ i32:$idx),
+ sub_64)),
+ GPR64), [HasMSA, IsGP64bit]>;
+
+def : MSAPat<(f32 (vector_extract v4f32:$ws, i32:$idx)),
+ (f32 (EXTRACT_SUBREG (SPLAT_W v4f32:$ws,
+ i32:$idx),
+ sub_lo))>;
+def : MSAPat<(f64 (vector_extract v2f64:$ws, i32:$idx)),
+ (f64 (EXTRACT_SUBREG (SPLAT_D v2f64:$ws,
+ i32:$idx),
+ sub_64))>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
index dedf802..bc896be 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
@@ -27,7 +27,7 @@ FixGlobalBaseReg("mips-fix-global-base-reg", cl::Hidden, cl::init(true),
MipsCallEntry::MipsCallEntry(const StringRef &N) {
#ifndef NDEBUG
Name = N;
- Val = 0;
+ Val = nullptr;
#endif
}
@@ -65,9 +65,8 @@ MipsFunctionInfo::~MipsFunctionInfo() {
++I)
delete I->getValue();
- for (ValueMap<const GlobalValue *, const MipsCallEntry *>::iterator
- I = GlobalCallEntries.begin(), E = GlobalCallEntries.end(); I != E; ++I)
- delete I->second;
+ for (const auto &Entry : GlobalCallEntries)
+ delete Entry.second;
}
bool MipsFunctionInfo::globalBaseRegSet() const {
@@ -138,4 +137,12 @@ MachinePointerInfo MipsFunctionInfo::callPtrInfo(const GlobalValue *Val) {
return MachinePointerInfo(E);
}
+int MipsFunctionInfo::getMoveF64ViaSpillFI(const TargetRegisterClass *RC) {
+ if (MoveF64ViaSpillFI == -1) {
+ MoveF64ViaSpillFI = MF.getFrameInfo()->CreateStackObject(
+ RC->getSize(), RC->getAlignment(), false);
+ }
+ return MoveF64ViaSpillFI;
+}
+
void MipsFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
index 43bf682..61260e5 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
@@ -14,16 +14,18 @@
#ifndef MIPS_MACHINE_FUNCTION_INFO_H
#define MIPS_MACHINE_FUNCTION_INFO_H
-#include "MipsSubtarget.h"
+#include "Mips16HardFloatInfo.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/ValueMap.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ValueMap.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include <map>
+#include <string>
#include <utility>
namespace llvm {
@@ -34,12 +36,12 @@ class MipsCallEntry : public PseudoSourceValue {
public:
explicit MipsCallEntry(const StringRef &N);
explicit MipsCallEntry(const GlobalValue *V);
- virtual bool isConstant(const MachineFrameInfo *) const;
- virtual bool isAliased(const MachineFrameInfo *) const;
- virtual bool mayAlias(const MachineFrameInfo *) const;
+ bool isConstant(const MachineFrameInfo *) const override;
+ bool isAliased(const MachineFrameInfo *) const override;
+ bool mayAlias(const MachineFrameInfo *) const override;
private:
- virtual void printCustom(raw_ostream &O) const;
+ void printCustom(raw_ostream &O) const override;
#ifndef NDEBUG
std::string Name;
const GlobalValue *Val;
@@ -50,10 +52,10 @@ private:
/// Mips target-specific information for each MachineFunction.
class MipsFunctionInfo : public MachineFunctionInfo {
public:
- MipsFunctionInfo(MachineFunction& MF)
- : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0),
- VarArgsFrameIndex(0), CallsEhReturn(false)
- {}
+ MipsFunctionInfo(MachineFunction &MF)
+ : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0),
+ VarArgsFrameIndex(0), CallsEhReturn(false), SaveS2(false),
+ MoveF64ViaSpillFI(-1) {}
~MipsFunctionInfo();
@@ -92,6 +94,14 @@ public:
/// representing a GOT entry for a global function.
MachinePointerInfo callPtrInfo(const GlobalValue *Val);
+ void setSaveS2() { SaveS2 = true; }
+ bool hasSaveS2() const { return SaveS2; }
+
+ int getMoveF64ViaSpillFI(const TargetRegisterClass *RC);
+
+ std::map<const char *, const llvm::Mips16HardFloatInfo::FuncSignature *>
+ StubsNeeded;
+
private:
virtual void anchor();
@@ -126,6 +136,13 @@ private:
/// Frame objects for spilling eh data registers.
int EhDataRegFI[4];
+ // saveS2
+ bool SaveS2;
+
+ /// FrameIndex for expanding BuildPairF64 nodes to spill and reload when the
+ /// O32 FPXX ABI is enabled. -1 is used to denote invalid index.
+ int MoveF64ViaSpillFI;
+
/// MipsCallEntry maps.
StringMap<const MipsCallEntry *> ExternalCallEntries;
ValueMap<const GlobalValue *, const MipsCallEntry *> GlobalCallEntries;
diff --git a/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
index c6abf17..b011e8f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
@@ -14,11 +14,13 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#define DEBUG_TYPE "mips-isel"
+
namespace llvm {
bool MipsModuleDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
DEBUG(errs() << "In MipsModuleDAGToDAGISel::runMachineFunction\n");
- const_cast<MipsSubtarget&>(Subtarget).resetSubtarget(&MF);
+ TM.resetSubtarget(&MF);
return false;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.h b/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.h
index fda35ae..f7a0310 100644
--- a/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.h
+++ b/contrib/llvm/lib/Target/Mips/MipsModuleISelDAGToDAG.h
@@ -37,25 +37,17 @@ public:
static char ID;
explicit MipsModuleDAGToDAGISel(MipsTargetMachine &TM_)
- : MachineFunctionPass(ID),
- TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {}
+ : MachineFunctionPass(ID), TM(TM_) {}
// Pass Name
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MIPS DAG->DAG Pattern Instruction Selection";
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual SDNode *Select(SDNode *N) {
- llvm_unreachable("unexpected");
- }
+ bool runOnMachineFunction(MachineFunction &MF) override;
protected:
- /// Keep a pointer to the MipsSubtarget around so that we can make the right
- /// decision when generating code for different targets.
- const TargetMachine &TM;
- const MipsSubtarget &Subtarget;
+ MipsTargetMachine &TM;
};
/// createMipsISelDag - This pass converts a legalized DAG into a
diff --git a/contrib/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp b/contrib/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp
new file mode 100644
index 0000000..c234049
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp
@@ -0,0 +1,301 @@
+//===--------- MipsOptimizePICCall.cpp - Optimize PIC Calls ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass eliminates unnecessary instructions that set up $gp and replace
+// instructions that load target function addresses with copy instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Mips.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MipsMachineFunction.h"
+#include "MipsTargetMachine.h"
+#include "llvm/ADT/ScopedHashTable.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "optimize-mips-pic-call"
+
+static cl::opt<bool> LoadTargetFromGOT("mips-load-target-from-got",
+ cl::init(true),
+ cl::desc("Load target address from GOT"),
+ cl::Hidden);
+
+static cl::opt<bool> EraseGPOpnd("mips-erase-gp-opnd",
+ cl::init(true), cl::desc("Erase GP Operand"),
+ cl::Hidden);
+
+namespace {
+typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
+
+typedef std::pair<unsigned, unsigned> CntRegP;
+typedef RecyclingAllocator<BumpPtrAllocator,
+ ScopedHashTableVal<ValueType, CntRegP> >
+AllocatorTy;
+typedef ScopedHashTable<ValueType, CntRegP, DenseMapInfo<ValueType>,
+ AllocatorTy> ScopedHTType;
+
+class MBBInfo {
+public:
+ MBBInfo(MachineDomTreeNode *N);
+ const MachineDomTreeNode *getNode() const;
+ bool isVisited() const;
+ void preVisit(ScopedHTType &ScopedHT);
+ void postVisit();
+
+private:
+ MachineDomTreeNode *Node;
+ ScopedHTType::ScopeTy *HTScope;
+};
+
+class OptimizePICCall : public MachineFunctionPass {
+public:
+ OptimizePICCall(TargetMachine &tm) : MachineFunctionPass(ID) {}
+
+ const char *getPassName() const override { return "Mips OptimizePICCall"; }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+private:
+ /// \brief Visit MBB.
+ bool visitNode(MBBInfo &MBBI);
+
+ /// \brief Test if MI jumps to a function via a register.
+ ///
+ /// Also, return the virtual register containing the target function's address
+ /// and the underlying object in Reg and Val respectively, if the function's
+ /// address can be resolved lazily.
+ bool isCallViaRegister(MachineInstr &MI, unsigned &Reg,
+ ValueType &Val) const;
+
+ /// \brief Return the number of instructions that dominate the current
+ /// instruction and load the function address from object Entry.
+ unsigned getCount(ValueType Entry);
+
+ /// \brief Return the destination virtual register of the last instruction
+ /// that loads from object Entry.
+ unsigned getReg(ValueType Entry);
+
+ /// \brief Update ScopedHT.
+ void incCntAndSetReg(ValueType Entry, unsigned Reg);
+
+ ScopedHTType ScopedHT;
+ static char ID;
+};
+
+char OptimizePICCall::ID = 0;
+} // end of anonymous namespace
+
+/// Return the first MachineOperand of MI if it is a used virtual register.
+static MachineOperand *getCallTargetRegOpnd(MachineInstr &MI) {
+ if (MI.getNumOperands() == 0)
+ return nullptr;
+
+ MachineOperand &MO = MI.getOperand(0);
+
+ if (!MO.isReg() || !MO.isUse() ||
+ !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ return nullptr;
+
+ return &MO;
+}
+
+/// Return type of register Reg.
+static MVT::SimpleValueType getRegTy(unsigned Reg, MachineFunction &MF) {
+ const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
+ assert(RC->vt_end() - RC->vt_begin() == 1);
+ return *RC->vt_begin();
+}
+
+/// Do the following transformation:
+///
+/// jalr $vreg
+/// =>
+/// copy $t9, $vreg
+/// jalr $t9
+static void setCallTargetReg(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I) {
+ MachineFunction &MF = *MBB->getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ unsigned SrcReg = I->getOperand(0).getReg();
+ unsigned DstReg = getRegTy(SrcReg, MF) == MVT::i32 ? Mips::T9 : Mips::T9_64;
+ BuildMI(*MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), DstReg)
+ .addReg(SrcReg);
+ I->getOperand(0).setReg(DstReg);
+}
+
+/// Search MI's operands for register GP and erase it.
+static void eraseGPOpnd(MachineInstr &MI) {
+ if (!EraseGPOpnd)
+ return;
+
+ MachineFunction &MF = *MI.getParent()->getParent();
+ MVT::SimpleValueType Ty = getRegTy(MI.getOperand(0).getReg(), MF);
+ unsigned Reg = Ty == MVT::i32 ? Mips::GP : Mips::GP_64;
+
+ for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
+ MachineOperand &MO = MI.getOperand(I);
+ if (MO.isReg() && MO.getReg() == Reg) {
+ MI.RemoveOperand(I);
+ return;
+ }
+ }
+
+ llvm_unreachable(nullptr);
+}
+
+MBBInfo::MBBInfo(MachineDomTreeNode *N) : Node(N), HTScope(nullptr) {}
+
+const MachineDomTreeNode *MBBInfo::getNode() const { return Node; }
+
+bool MBBInfo::isVisited() const { return HTScope; }
+
+void MBBInfo::preVisit(ScopedHTType &ScopedHT) {
+ HTScope = new ScopedHTType::ScopeTy(ScopedHT);
+}
+
+void MBBInfo::postVisit() {
+ delete HTScope;
+}
+
+// OptimizePICCall methods.
+bool OptimizePICCall::runOnMachineFunction(MachineFunction &F) {
+ if (F.getTarget().getSubtarget<MipsSubtarget>().inMips16Mode())
+ return false;
+
+ // Do a pre-order traversal of the dominator tree.
+ MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
+ bool Changed = false;
+
+ SmallVector<MBBInfo, 8> WorkList(1, MBBInfo(MDT->getRootNode()));
+
+ while (!WorkList.empty()) {
+ MBBInfo &MBBI = WorkList.back();
+
+ // If this MBB has already been visited, destroy the scope for the MBB and
+ // pop it from the work list.
+ if (MBBI.isVisited()) {
+ MBBI.postVisit();
+ WorkList.pop_back();
+ continue;
+ }
+
+ // Visit the MBB and add its children to the work list.
+ MBBI.preVisit(ScopedHT);
+ Changed |= visitNode(MBBI);
+ const MachineDomTreeNode *Node = MBBI.getNode();
+ const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();
+ WorkList.append(Children.begin(), Children.end());
+ }
+
+ return Changed;
+}
+
+bool OptimizePICCall::visitNode(MBBInfo &MBBI) {
+ bool Changed = false;
+ MachineBasicBlock *MBB = MBBI.getNode()->getBlock();
+
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I) {
+ unsigned Reg;
+ ValueType Entry;
+
+ // Skip instructions that are not call instructions via registers.
+ if (!isCallViaRegister(*I, Reg, Entry))
+ continue;
+
+ Changed = true;
+ unsigned N = getCount(Entry);
+
+ if (N != 0) {
+ // If a function has been called more than twice, we do not have to emit a
+ // load instruction to get the function address from the GOT, but can
+ // instead reuse the address that has been loaded before.
+ if (N >= 2 && !LoadTargetFromGOT)
+ getCallTargetRegOpnd(*I)->setReg(getReg(Entry));
+
+ // Erase the $gp operand if this isn't the first time a function has
+ // been called. $gp needs to be set up only if the function call can go
+ // through a lazy binding stub.
+ eraseGPOpnd(*I);
+ }
+
+ if (Entry)
+ incCntAndSetReg(Entry, Reg);
+
+ setCallTargetReg(MBB, I);
+ }
+
+ return Changed;
+}
+
+bool OptimizePICCall::isCallViaRegister(MachineInstr &MI, unsigned &Reg,
+ ValueType &Val) const {
+ if (!MI.isCall())
+ return false;
+
+ MachineOperand *MO = getCallTargetRegOpnd(MI);
+
+ // Return if MI is not a function call via a register.
+ if (!MO)
+ return false;
+
+ // Get the instruction that loads the function address from the GOT.
+ Reg = MO->getReg();
+ Val = (Value*)nullptr;
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ MachineInstr *DefMI = MRI.getVRegDef(Reg);
+
+ assert(DefMI);
+
+ // See if DefMI is an instruction that loads from a GOT entry that holds the
+ // address of a lazy binding stub.
+ if (!DefMI->mayLoad() || DefMI->getNumOperands() < 3)
+ return true;
+
+ unsigned Flags = DefMI->getOperand(2).getTargetFlags();
+
+ if (Flags != MipsII::MO_GOT_CALL && Flags != MipsII::MO_CALL_LO16)
+ return true;
+
+ // Return the underlying object for the GOT entry in Val.
+ assert(DefMI->hasOneMemOperand());
+ Val = (*DefMI->memoperands_begin())->getValue();
+ if (!Val)
+ Val = (*DefMI->memoperands_begin())->getPseudoValue();
+ return true;
+}
+
+unsigned OptimizePICCall::getCount(ValueType Entry) {
+ return ScopedHT.lookup(Entry).first;
+}
+
+unsigned OptimizePICCall::getReg(ValueType Entry) {
+ unsigned Reg = ScopedHT.lookup(Entry).second;
+ assert(Reg);
+ return Reg;
+}
+
+void OptimizePICCall::incCntAndSetReg(ValueType Entry, unsigned Reg) {
+ CntRegP P = ScopedHT.lookup(Entry);
+ ScopedHT.insert(Entry, std::make_pair(P.first + 1, Reg));
+}
+
+/// Return an OptimizeCall object.
+FunctionPass *llvm::createMipsOptimizePICCallPass(MipsTargetMachine &TM) {
+ return new OptimizePICCall(TM);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsOptionRecord.h b/contrib/llvm/lib/Target/Mips/MipsOptionRecord.h
new file mode 100644
index 0000000..c0abce3
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsOptionRecord.h
@@ -0,0 +1,80 @@
+//===-- MipsOptionRecord.h - Abstraction for storing information ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MipsOptionRecord - Abstraction for storing arbitrary information in
+// ELF files. Arbitrary information (e.g. register usage) can be stored in Mips
+// specific ELF sections like .Mips.options. Specific records should subclass
+// MipsOptionRecord and provide an implementation to EmitMipsOptionRecord which
+// basically just dumps the information into an ELF section. More information
+// about .Mips.option can be found in the SysV ABI and the 64-bit ELF Object
+// specification.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSOPTIONRECORD_H
+#define MIPSOPTIONRECORD_H
+
+#include "MipsMCTargetDesc.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCRegisterInfo.h"
+
+using namespace llvm;
+
+namespace llvm {
+class MipsELFStreamer;
+class MCSubtargetInfo;
+}
+
+class MipsOptionRecord {
+public:
+ virtual ~MipsOptionRecord(){};
+ virtual void EmitMipsOptionRecord() = 0;
+};
+
+class MipsRegInfoRecord : public MipsOptionRecord {
+public:
+ MipsRegInfoRecord(MipsELFStreamer *S, MCContext &Context,
+ const MCSubtargetInfo &STI)
+ : Streamer(S), Context(Context), STI(STI) {
+ ri_gprmask = 0;
+ ri_cprmask[0] = ri_cprmask[1] = ri_cprmask[2] = ri_cprmask[3] = 0;
+ ri_gp_value = 0;
+
+ const MCRegisterInfo *TRI = Context.getRegisterInfo();
+ GPR32RegClass = &(TRI->getRegClass(Mips::GPR32RegClassID));
+ GPR64RegClass = &(TRI->getRegClass(Mips::GPR64RegClassID));
+ FGR32RegClass = &(TRI->getRegClass(Mips::FGR32RegClassID));
+ FGR64RegClass = &(TRI->getRegClass(Mips::FGR64RegClassID));
+ AFGR64RegClass = &(TRI->getRegClass(Mips::AFGR64RegClassID));
+ MSA128BRegClass = &(TRI->getRegClass(Mips::MSA128BRegClassID));
+ COP2RegClass = &(TRI->getRegClass(Mips::COP2RegClassID));
+ COP3RegClass = &(TRI->getRegClass(Mips::COP3RegClassID));
+ }
+ ~MipsRegInfoRecord() {}
+
+ void EmitMipsOptionRecord();
+ void SetPhysRegUsed(unsigned Reg, const MCRegisterInfo *MCRegInfo);
+
+private:
+ MipsELFStreamer *Streamer;
+ MCContext &Context;
+ const MCSubtargetInfo &STI;
+ const MCRegisterClass *GPR32RegClass;
+ const MCRegisterClass *GPR64RegClass;
+ const MCRegisterClass *FGR32RegClass;
+ const MCRegisterClass *FGR64RegClass;
+ const MCRegisterClass *AFGR64RegClass;
+ const MCRegisterClass *MSA128BRegClass;
+ const MCRegisterClass *COP2RegClass;
+ const MCRegisterClass *COP3RegClass;
+ uint32_t ri_gprmask;
+ uint32_t ri_cprmask[4];
+ int64_t ri_gp_value;
+};
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsOs16.cpp b/contrib/llvm/lib/Target/Mips/MipsOs16.cpp
index fe60841..7aae964 100644
--- a/contrib/llvm/lib/Target/Mips/MipsOs16.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsOs16.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-os16"
#include "MipsOs16.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#define DEBUG_TYPE "mips-os16"
+
static cl::opt<std::string> Mips32FunctionMask(
"mips32-function-mask",
diff --git a/contrib/llvm/lib/Target/Mips/MipsOs16.h b/contrib/llvm/lib/Target/Mips/MipsOs16.h
index 21beef8..55e5a81 100644
--- a/contrib/llvm/lib/Target/Mips/MipsOs16.h
+++ b/contrib/llvm/lib/Target/Mips/MipsOs16.h
@@ -34,11 +34,11 @@ public:
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "MIPS Os16 Optimization";
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index 3105b02..084449b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-reg-info"
-
#include "MipsRegisterInfo.h"
#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
@@ -24,9 +22,9 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -37,11 +35,13 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "mips-reg-info"
+
#define GET_REGINFO_TARGET_DESC
#include "MipsGenRegisterInfo.inc"
-using namespace llvm;
-
MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST)
: MipsGenRegisterInfo(Mips::RA), Subtarget(ST) {}
@@ -79,8 +79,8 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
//===----------------------------------------------------------------------===//
/// Mips Callee Saved Registers
-const uint16_t* MipsRegisterInfo::
-getCalleeSavedRegs(const MachineFunction *MF) const {
+const MCPhysReg *
+MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_SaveList;
@@ -93,6 +93,9 @@ getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isFP64bit())
return CSR_O32_FP64_SaveList;
+ if (Subtarget.isFPXX())
+ return CSR_O32_FPXX_SaveList;
+
return CSR_O32_SaveList;
}
@@ -110,6 +113,9 @@ MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
if (Subtarget.isFP64bit())
return CSR_O32_FP64_RegMask;
+ if (Subtarget.isFPXX())
+ return CSR_O32_FPXX_RegMask;
+
return CSR_O32_RegMask;
}
@@ -119,11 +125,11 @@ const uint32_t *MipsRegisterInfo::getMips16RetHelperMask() {
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
- static const uint16_t ReservedGPR32[] = {
+ static const MCPhysReg ReservedGPR32[] = {
Mips::ZERO, Mips::K0, Mips::K1, Mips::SP
};
- static const uint16_t ReservedGPR64[] = {
+ static const MCPhysReg ReservedGPR64[] = {
Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
};
@@ -133,6 +139,13 @@ getReservedRegs(const MachineFunction &MF) const {
for (unsigned I = 0; I < array_lengthof(ReservedGPR32); ++I)
Reserved.set(ReservedGPR32[I]);
+ // Reserve registers for the NaCl sandbox.
+ if (Subtarget.isTargetNaCl()) {
+ Reserved.set(Mips::T6); // Reserved for control flow mask.
+ Reserved.set(Mips::T7); // Reserved for memory access mask.
+ Reserved.set(Mips::T8); // Reserved for thread pointer.
+ }
+
for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I)
Reserved.set(ReservedGPR64[I]);
@@ -179,10 +192,13 @@ getReservedRegs(const MachineFunction &MF) const {
// Reserve RA if in mips16 mode.
if (Subtarget.inMips16Mode()) {
+ const MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
Reserved.set(Mips::RA);
Reserved.set(Mips::RA_64);
Reserved.set(Mips::T0);
Reserved.set(Mips::T1);
+ if (MF.getFunction()->hasFnAttribute("saveS2") || MipsFI->hasSaveS2())
+ Reserved.set(Mips::S2);
}
// Reserve GP if small section is used.
@@ -191,6 +207,11 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(Mips::GP_64);
}
+ if (Subtarget.isABI_O32() && !Subtarget.useOddSPReg()) {
+ for (const auto &Reg : Mips::OddSPRegClass)
+ Reserved.set(Reg);
+ }
+
return Reserved;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
index 0450c6f..b34496f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -43,30 +43,31 @@ public:
/// Code Generation virtual methods...
const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
- unsigned Kind) const;
+ unsigned Kind) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const;
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ MachineFunction &MF) const override;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
static const uint32_t *getMips16RetHelperMask();
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
- virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
- virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
/// Stack Frame Processing Methods
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const;
/// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
/// \brief Return GPR register class.
virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0;
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
index 3173d09..74dfa4f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
@@ -205,11 +205,16 @@ let Namespace = "Mips" in {
foreach I = 0-31 in
def COP2#I : MipsReg<#I, ""#I>;
+ // COP3 registers.
+ foreach I = 0-31 in
+ def COP3#I : MipsReg<#I, ""#I>;
+
// PC register
def PC : Register<"pc">;
// Hardware register $29
- def HWR29 : MipsReg<29, "29">;
+ foreach I = 0-31 in
+ def HWR#I : MipsReg<#I, ""#I>;
// Accum registers
foreach I = 0-3 in
@@ -245,6 +250,15 @@ let Namespace = "Mips" in {
def MSARequest : MipsReg<5, "5">;
def MSAMap : MipsReg<6, "6">;
def MSAUnmap : MipsReg<7, "7">;
+
+ // Octeon multiplier and product registers
+ def MPL0 : MipsReg<0, "mpl0">;
+ def MPL1 : MipsReg<1, "mpl1">;
+ def MPL2 : MipsReg<2, "mpl2">;
+ def P0 : MipsReg<0, "p0">;
+ def P1 : MipsReg<1, "p1">;
+ def P2 : MipsReg<2, "p2">;
+
}
//===----------------------------------------------------------------------===//
@@ -326,6 +340,15 @@ def AFGR64 : RegisterClass<"Mips", [f64], 64, (add
def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)>;
+// Used to reserve odd registers when given -mattr=+nooddspreg
+// FIXME: Remove double precision registers from this set.
+def OddSP : RegisterClass<"Mips", [f32], 32,
+ (add (decimate (sequence "F%u", 1, 31), 2),
+ (decimate (sequence "F_HI%u", 1, 31), 2),
+ (decimate (sequence "D%u", 1, 15), 2),
+ (decimate (sequence "D%u_64", 1, 31), 2))>,
+ Unallocatable;
+
// FP control registers.
def CCR : RegisterClass<"Mips", [i32], 32, (sequence "FCR%u", 0, 31)>,
Unallocatable;
@@ -334,6 +357,10 @@ def CCR : RegisterClass<"Mips", [i32], 32, (sequence "FCR%u", 0, 31)>,
def FCC : RegisterClass<"Mips", [i32], 32, (sequence "FCC%u", 0, 7)>,
Unallocatable;
+// MIPS32r6/MIPS64r6 store FPU condition codes in normal FGR registers.
+// This class allows us to represent this in codegen patterns.
+def FGRCC : RegisterClass<"Mips", [i32], 32, (sequence "F%u", 0, 31)>;
+
def MSA128B: RegisterClass<"Mips", [v16i8], 128,
(sequence "W%u", 0, 31)>;
def MSA128H: RegisterClass<"Mips", [v8i16, v8f16], 128,
@@ -355,7 +382,8 @@ def LO64 : RegisterClass<"Mips", [i64], 64, (add LO0_64)>;
def HI64 : RegisterClass<"Mips", [i64], 64, (add HI0_64)>;
// Hardware registers
-def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>, Unallocatable;
+def HWRegs : RegisterClass<"Mips", [i32], 32, (sequence "HWR%u", 0, 31)>,
+ Unallocatable;
// Accumulator Registers
def ACC64 : RegisterClass<"Mips", [untyped], 64, (add AC0)> {
@@ -376,89 +404,81 @@ def DSPCC : RegisterClass<"Mips", [v4i8, v2i16], 32, (add DSPCCond)>;
def COP2 : RegisterClass<"Mips", [i32], 32, (sequence "COP2%u", 0, 31)>,
Unallocatable;
+// Coprocessor 3 registers.
+def COP3 : RegisterClass<"Mips", [i32], 32, (sequence "COP3%u", 0, 31)>,
+ Unallocatable;
+
+// Octeon multiplier and product registers
+def OCTEON_MPL : RegisterClass<"Mips", [i64], 64, (add MPL0, MPL1, MPL2)>,
+ Unallocatable;
+def OCTEON_P : RegisterClass<"Mips", [i64], 64, (add P0, P1, P2)>,
+ Unallocatable;
+
// Register Operands.
class MipsAsmRegOperand : AsmOperandClass {
- let RenderMethod = "addRegAsmOperands";
-}
-def GPR32AsmOperand : MipsAsmRegOperand {
- let Name = "GPR32Asm";
- let ParserMethod = "parseGPR32";
+ let ParserMethod = "ParseAnyRegister";
}
def GPR64AsmOperand : MipsAsmRegOperand {
- let Name = "GPR64Asm";
- let ParserMethod = "parseGPR64";
+ let Name = "GPR64AsmReg";
+ let PredicateMethod = "isGPRAsmReg";
}
-def ACC64DSPAsmOperand : MipsAsmRegOperand {
- let Name = "ACC64DSPAsm";
- let ParserMethod = "parseACC64DSP";
+def GPR32AsmOperand : MipsAsmRegOperand {
+ let Name = "GPR32AsmReg";
+ let PredicateMethod = "isGPRAsmReg";
}
-def LO32DSPAsmOperand : MipsAsmRegOperand {
- let Name = "LO32DSPAsm";
- let ParserMethod = "parseLO32DSP";
+def ACC64DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "ACC64DSPAsmReg";
+ let PredicateMethod = "isACCAsmReg";
}
def HI32DSPAsmOperand : MipsAsmRegOperand {
- let Name = "HI32DSPAsm";
- let ParserMethod = "parseHI32DSP";
+ let Name = "HI32DSPAsmReg";
+ let PredicateMethod = "isACCAsmReg";
+}
+
+def LO32DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "LO32DSPAsmReg";
+ let PredicateMethod = "isACCAsmReg";
}
def CCRAsmOperand : MipsAsmRegOperand {
- let Name = "CCRAsm";
- let ParserMethod = "parseCCRRegs";
+ let Name = "CCRAsmReg";
}
def AFGR64AsmOperand : MipsAsmRegOperand {
- let Name = "AFGR64Asm";
- let ParserMethod = "parseAFGR64Regs";
+ let Name = "AFGR64AsmReg";
+ let PredicateMethod = "isFGRAsmReg";
}
def FGR64AsmOperand : MipsAsmRegOperand {
- let Name = "FGR64Asm";
- let ParserMethod = "parseFGR64Regs";
+ let Name = "FGR64AsmReg";
+ let PredicateMethod = "isFGRAsmReg";
}
def FGR32AsmOperand : MipsAsmRegOperand {
- let Name = "FGR32Asm";
- let ParserMethod = "parseFGR32Regs";
+ let Name = "FGR32AsmReg";
+ let PredicateMethod = "isFGRAsmReg";
}
def FGRH32AsmOperand : MipsAsmRegOperand {
- let Name = "FGRH32Asm";
- let ParserMethod = "parseFGRH32Regs";
+ let Name = "FGRH32AsmReg";
+ let PredicateMethod = "isFGRAsmReg";
}
def FCCRegsAsmOperand : MipsAsmRegOperand {
- let Name = "FCCRegsAsm";
- let ParserMethod = "parseFCCRegs";
-}
-
-def MSA128BAsmOperand : MipsAsmRegOperand {
- let Name = "MSA128BAsm";
- let ParserMethod = "parseMSA128BRegs";
-}
-
-def MSA128HAsmOperand : MipsAsmRegOperand {
- let Name = "MSA128HAsm";
- let ParserMethod = "parseMSA128HRegs";
-}
-
-def MSA128WAsmOperand : MipsAsmRegOperand {
- let Name = "MSA128WAsm";
- let ParserMethod = "parseMSA128WRegs";
+ let Name = "FCCAsmReg";
}
-def MSA128DAsmOperand : MipsAsmRegOperand {
- let Name = "MSA128DAsm";
- let ParserMethod = "parseMSA128DRegs";
+def MSA128AsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128AsmReg";
}
-def MSA128CRAsmOperand : MipsAsmRegOperand {
- let Name = "MSA128CRAsm";
- let ParserMethod = "parseMSA128CtrlRegs";
+def MSACtrlAsmOperand : MipsAsmRegOperand {
+ let Name = "MSACtrlAsmReg";
}
def GPR32Opnd : RegisterOperand<GPR32> {
@@ -478,13 +498,15 @@ def CCROpnd : RegisterOperand<CCR> {
}
def HWRegsAsmOperand : MipsAsmRegOperand {
- let Name = "HWRegsAsm";
- let ParserMethod = "parseHWRegs";
+ let Name = "HWRegsAsmReg";
}
def COP2AsmOperand : MipsAsmRegOperand {
- let Name = "COP2Asm";
- let ParserMethod = "parseCOP2";
+ let Name = "COP2AsmReg";
+}
+
+def COP3AsmOperand : MipsAsmRegOperand {
+ let Name = "COP3AsmReg";
}
def HWRegsOpnd : RegisterOperand<HWRegs> {
@@ -503,6 +525,12 @@ def FGR32Opnd : RegisterOperand<FGR32> {
let ParserMatchClass = FGR32AsmOperand;
}
+def FGRCCOpnd : RegisterOperand<FGRCC> {
+ // The assembler doesn't use register classes so we can re-use
+ // FGR32AsmOperand.
+ let ParserMatchClass = FGR32AsmOperand;
+}
+
def FGRH32Opnd : RegisterOperand<FGRH32> {
let ParserMatchClass = FGRH32AsmOperand;
}
@@ -527,23 +555,27 @@ def COP2Opnd : RegisterOperand<COP2> {
let ParserMatchClass = COP2AsmOperand;
}
+def COP3Opnd : RegisterOperand<COP3> {
+ let ParserMatchClass = COP3AsmOperand;
+}
+
def MSA128BOpnd : RegisterOperand<MSA128B> {
- let ParserMatchClass = MSA128BAsmOperand;
+ let ParserMatchClass = MSA128AsmOperand;
}
def MSA128HOpnd : RegisterOperand<MSA128H> {
- let ParserMatchClass = MSA128HAsmOperand;
+ let ParserMatchClass = MSA128AsmOperand;
}
def MSA128WOpnd : RegisterOperand<MSA128W> {
- let ParserMatchClass = MSA128WAsmOperand;
+ let ParserMatchClass = MSA128AsmOperand;
}
def MSA128DOpnd : RegisterOperand<MSA128D> {
- let ParserMatchClass = MSA128DAsmOperand;
+ let ParserMatchClass = MSA128AsmOperand;
}
def MSA128CROpnd : RegisterOperand<MSACtrl> {
- let ParserMatchClass = MSA128CRAsmOperand;
+ let ParserMatchClass = MSACtrlAsmOperand;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
index 33ed4b3..d0a17cd 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -16,6 +16,7 @@
#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsSEInstrInfo.h"
+#include "MipsSubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -63,6 +64,10 @@ private:
bool expandCopy(MachineBasicBlock &MBB, Iter I);
bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
unsigned MFLoOpc);
+ bool expandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, bool FP64) const;
+ bool expandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, bool FP64) const;
MachineFunction &MF;
MachineRegisterInfo &MRI;
@@ -107,6 +112,22 @@ bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
case Mips::STORE_ACC128:
expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
break;
+ case Mips::BuildPairF64:
+ if (expandBuildPairF64(MBB, I, false))
+ MBB.erase(I);
+ return false;
+ case Mips::BuildPairF64_64:
+ if (expandBuildPairF64(MBB, I, true))
+ MBB.erase(I);
+ return false;
+ case Mips::ExtractElementF64:
+ if (expandExtractElementF64(MBB, I, false))
+ MBB.erase(I);
+ return false;
+ case Mips::ExtractElementF64_64:
+ if (expandExtractElementF64(MBB, I, true))
+ MBB.erase(I);
+ return false;
case TargetOpcode::COPY:
if (!expandCopy(MBB, I))
return false;
@@ -257,6 +278,123 @@ bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
return true;
}
+/// This method expands the same instruction that MipsSEInstrInfo::
+/// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
+/// available and the case where the ABI is FP64A. It is implemented here
+/// because frame indexes are eliminated before MipsSEInstrInfo::
+/// expandBuildPairF64 is called.
+bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ bool FP64) const {
+ // For fpxx and when mthc1 is not available, use:
+ // spill + reload via ldc1
+ //
+ // The case where dmtc1 is available doesn't need to be handled here
+ // because it never creates a BuildPairF64 node.
+ //
+ // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
+ // for odd-numbered double precision values (because the lower 32-bits is
+ // transferred with mtc1 which is redirected to the upper half of the even
+ // register). Unfortunately, we have to make this decision before register
+ // allocation so for now we use a spill/reload sequence for all
+ // double-precision values in regardless of being an odd/even register.
+
+ const TargetMachine &TM = MF.getTarget();
+ const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
+ if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
+ (FP64 && !Subtarget.useOddSPReg())) {
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(TM.getInstrInfo());
+ const MipsRegisterInfo &TRI =
+ *static_cast<const MipsRegisterInfo*>(TM.getRegisterInfo());
+
+ unsigned DstReg = I->getOperand(0).getReg();
+ unsigned LoReg = I->getOperand(1).getReg();
+ unsigned HiReg = I->getOperand(2).getReg();
+
+ // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
+ // the cases where mthc1 is not available). 64-bit architectures and
+ // MIPS32r2 or later can use FGR64 though.
+ assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
+ !Subtarget.isFP64bit());
+
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
+ const TargetRegisterClass *RC2 =
+ FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
+
+ // We re-use the same spill slot each time so that the stack frame doesn't
+ // grow too much in functions with a large number of moves.
+ int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2);
+ TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, &TRI,
+ 0);
+ TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, &TRI,
+ 4);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, 0);
+ return true;
+ }
+
+ return false;
+}
+
+/// This method expands the same instruction that MipsSEInstrInfo::
+/// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
+/// available and the case where the ABI is FP64A. It is implemented here
+/// because frame indexes are eliminated before MipsSEInstrInfo::
+/// expandExtractElementF64 is called.
+bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ bool FP64) const {
+ // For fpxx and when mfhc1 is not available, use:
+ // spill + reload via ldc1
+ //
+ // The case where dmfc1 is available doesn't need to be handled here
+ // because it never creates a ExtractElementF64 node.
+ //
+ // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
+ // for odd-numbered double precision values (because the lower 32-bits is
+ // transferred with mfc1 which is redirected to the upper half of the even
+ // register). Unfortunately, we have to make this decision before register
+ // allocation so for now we use a spill/reload sequence for all
+ // double-precision values in regardless of being an odd/even register.
+
+ const TargetMachine &TM = MF.getTarget();
+ const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
+ if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
+ (FP64 && !Subtarget.useOddSPReg())) {
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo *>(TM.getInstrInfo());
+ const MipsRegisterInfo &TRI =
+ *static_cast<const MipsRegisterInfo *>(TM.getRegisterInfo());
+
+ unsigned DstReg = I->getOperand(0).getReg();
+ unsigned SrcReg = I->getOperand(1).getReg();
+ unsigned N = I->getOperand(2).getImm();
+
+ // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
+ // the cases where mfhc1 is not available). 64-bit architectures and
+ // MIPS32r2 or later can use FGR64 though.
+ assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
+ !Subtarget.isFP64bit());
+
+ const TargetRegisterClass *RC =
+ FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
+ const TargetRegisterClass *RC2 = &Mips::GPR32RegClass;
+
+ // We re-use the same spill slot each time so that the stack frame doesn't
+ // grow too much in functions with a large number of moves.
+ int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC);
+ TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC, &TRI,
+ 0);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, N * 4);
+ return true;
+ }
+
+ return false;
+}
+
+MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI)
+ : MipsFrameLowering(STI, STI.stackAlignment()) {}
+
unsigned MipsSEFrameLowering::ehDataReg(unsigned I) const {
static const unsigned EhDataReg[] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
@@ -299,11 +437,10 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
TII.adjustStackPtr(SP, -StackSize, MBB, MBBI);
// emit ".cfi_def_cfa_offset StackSize"
- MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(AdjustSPLabel, -StackSize));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -315,10 +452,6 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
// Iterate over list of callee-saved registers and emit .cfi_offset
// directives.
- MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
-
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) {
int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
@@ -335,14 +468,37 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
if (!STI.isLittle())
std::swap(Reg0, Reg1);
- MMI.addFrameInst(
- MCCFIInstruction::createOffset(CSLabel, Reg0, Offset));
- MMI.addFrameInst(
- MCCFIInstruction::createOffset(CSLabel, Reg1, Offset + 4));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ } else if (Mips::FGR64RegClass.contains(Reg)) {
+ unsigned Reg0 = MRI->getDwarfRegNum(Reg, true);
+ unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1;
+
+ if (!STI.isLittle())
+ std::swap(Reg0, Reg1);
+
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
} else {
// Reg is either in GPR32 or FGR32.
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- CSLabel, MRI->getDwarfRegNum(Reg, 1), Offset));
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(Reg, 1), Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
}
@@ -360,27 +516,27 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
}
// Emit .cfi_offset directives for eh data registers.
- MCSymbol *CSLabel2 = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel2);
for (int I = 0; I < 4; ++I) {
int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I));
unsigned Reg = MRI->getDwarfRegNum(ehDataReg(I), true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel2, Reg, Offset));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg, Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
// if framepointer enabled, set it to point to the stack pointer.
if (hasFP(MF)) {
// Insert instruction "move $fp, $sp" at this location.
- BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO);
+ BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO)
+ .setMIFlag(MachineInstr::FrameSetup);
// emit ".cfi_def_cfa_register $fp"
- MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl,
- TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
- MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
- SetFPLabel, MRI->getDwarfRegNum(FP, true)));
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
+ nullptr, MRI->getDwarfRegNum(FP, true)));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h
index 8fa9e46..e832848 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSEFrameLowering.h
@@ -20,27 +20,26 @@ namespace llvm {
class MipsSEFrameLowering : public MipsFrameLowering {
public:
- explicit MipsSEFrameLowering(const MipsSubtarget &STI)
- : MipsFrameLowering(STI, STI.stackAlignment()) {}
+ explicit MipsSEFrameLowering(const MipsSubtarget &STI);
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
+ RegScavenger *RS) const override;
unsigned ehDataReg(unsigned I) const;
};
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 737660e..47e1931 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -11,10 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-isel"
#include "MipsSEISelDAGToDAG.h"
-#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsRegisterInfo.h"
@@ -24,19 +23,22 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "mips-isel"
+
bool MipsSEDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- if (Subtarget.inMips16Mode())
+ Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ if (Subtarget->inMips16Mode())
return false;
return MipsDAGToDAGISel::runOnMachineFunction(MF);
}
@@ -104,7 +106,7 @@ bool MipsSEDAGToDAGISel::replaceUsesWithZeroReg(MachineRegisterInfo *MRI,
// Replace uses with ZeroReg.
for (MachineRegisterInfo::use_iterator U = MRI->use_begin(DstReg),
E = MRI->use_end(); U != E;) {
- MachineOperand &MO = U.getOperand();
+ MachineOperand &MO = *U;
unsigned OpNo = U.getOperandNo();
MachineInstr *MI = MO.getParent();
++U;
@@ -133,7 +135,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
unsigned V0, V1, GlobalBaseReg = MipsFI->getGlobalBaseReg();
const TargetRegisterClass *RC;
- if (Subtarget.isABI_N64())
+ if (Subtarget->isABI_N64())
RC = (const TargetRegisterClass*)&Mips::GPR64RegClass;
else
RC = (const TargetRegisterClass*)&Mips::GPR32RegClass;
@@ -141,7 +143,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
V0 = RegInfo.createVirtualRegister(RC);
V1 = RegInfo.createVirtualRegister(RC);
- if (Subtarget.isABI_N64()) {
+ if (Subtarget->isABI_N64()) {
MF.getRegInfo().addLiveIn(Mips::T9_64);
MBB.addLiveIn(Mips::T9_64);
@@ -173,7 +175,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
MF.getRegInfo().addLiveIn(Mips::T9);
MBB.addLiveIn(Mips::T9);
- if (Subtarget.isABI_N32()) {
+ if (Subtarget->isABI_N32()) {
// lui $v0, %hi(%neg(%gp_rel(fname)))
// addu $v1, $v0, $t9
// addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
@@ -186,7 +188,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
return;
}
- assert(Subtarget.isABI_O32());
+ assert(Subtarget->isABI_O32());
// For O32 ABI, the following instruction sequence is emitted to initialize
// the global base register:
@@ -248,18 +250,49 @@ SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
SDValue(AddCarry, 0));
}
+/// Match frameindex
+bool MipsSEDAGToDAGISel::selectAddrFrameIndex(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ EVT ValTy = Addr.getValueType();
+
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ Offset = CurDAG->getTargetConstant(0, ValTy);
+ return true;
+ }
+ return false;
+}
+
+/// Match frameindex+offset and frameindex|offset
+bool MipsSEDAGToDAGISel::selectAddrFrameIndexOffset(SDValue Addr, SDValue &Base,
+ SDValue &Offset,
+ unsigned OffsetBits) const {
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
+ if (isIntN(OffsetBits, CN->getSExtValue())) {
+ EVT ValTy = Addr.getValueType();
+
+ // If the first operand is a FI, get the TargetFI Node
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
+ (Addr.getOperand(0)))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ else
+ Base = Addr.getOperand(0);
+
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
+ return true;
+ }
+ }
+ return false;
+}
+
/// ComplexPattern used on MipsInstrInfo
/// Used on Mips Load/Store instructions
bool MipsSEDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
- EVT ValTy = Addr.getValueType();
-
// if Address is FI, get the TargetFrameIndex.
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- Offset = CurDAG->getTargetConstant(0, ValTy);
+ if (selectAddrFrameIndex(Addr, Base, Offset))
return true;
- }
// on PIC code Load GA
if (Addr.getOpcode() == MipsISD::Wrapper) {
@@ -275,21 +308,8 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
}
// Addresses of the form FI+const or FI|const
- if (CurDAG->isBaseWithConstantOffset(Addr)) {
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
- if (isInt<16>(CN->getSExtValue())) {
-
- // If the first operand is a FI, get the TargetFI Node
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
- (Addr.getOperand(0)))
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- else
- Base = Addr.getOperand(0);
-
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
- return true;
- }
- }
+ if (selectAddrFrameIndexOffset(Addr, Base, Offset, 16))
+ return true;
// Operand is a result from an ADD.
if (Addr.getOpcode() == ISD::ADD) {
@@ -343,27 +363,25 @@ bool MipsSEDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base,
selectAddrDefault(Addr, Base, Offset);
}
-/// Used on microMIPS Load/Store unaligned instructions (12-bit offset)
-bool MipsSEDAGToDAGISel::selectAddrRegImm12(SDValue Addr, SDValue &Base,
+bool MipsSEDAGToDAGISel::selectAddrRegImm10(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
- EVT ValTy = Addr.getValueType();
+ if (selectAddrFrameIndex(Addr, Base, Offset))
+ return true;
- // Addresses of the form FI+const or FI|const
- if (CurDAG->isBaseWithConstantOffset(Addr)) {
- ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
- if (isInt<12>(CN->getSExtValue())) {
+ if (selectAddrFrameIndexOffset(Addr, Base, Offset, 10))
+ return true;
- // If the first operand is a FI then get the TargetFI Node
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
- (Addr.getOperand(0)))
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- else
- Base = Addr.getOperand(0);
+ return false;
+}
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
- return true;
- }
- }
+/// Used on microMIPS Load/Store unaligned instructions (12-bit offset)
+bool MipsSEDAGToDAGISel::selectAddrRegImm12(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ if (selectAddrFrameIndex(Addr, Base, Offset))
+ return true;
+
+ if (selectAddrFrameIndexOffset(Addr, Base, Offset, 12))
+ return true;
return false;
}
@@ -374,18 +392,29 @@ bool MipsSEDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
selectAddrDefault(Addr, Base, Offset);
}
+bool MipsSEDAGToDAGISel::selectIntAddrMSA(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ if (selectAddrRegImm10(Addr, Base, Offset))
+ return true;
+
+ if (selectAddrDefault(Addr, Base, Offset))
+ return true;
+
+ return false;
+}
+
// Select constant vector splats.
//
// Returns true and sets Imm if:
// * MSA is enabled
// * N is a ISD::BUILD_VECTOR representing a constant splat
bool MipsSEDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const {
- if (!Subtarget.hasMSA())
+ if (!Subtarget->hasMSA())
return false;
BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N);
- if (Node == NULL)
+ if (!Node)
return false;
APInt SplatValue, SplatUndef;
@@ -394,7 +423,7 @@ bool MipsSEDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const {
if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
HasAnyUndefs, 8,
- !Subtarget.isLittle()))
+ !Subtarget->isLittle()))
return false;
Imm = SplatValue;
@@ -620,7 +649,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
}
case ISD::ADDE: {
- if (Subtarget.hasDSP()) // Select DSP instructions, ADDSC and ADDWC.
+ if (Subtarget->hasDSP()) // Select DSP instructions, ADDSC and ADDWC.
break;
SDValue InFlag = Node->getOperand(2);
Result = selectAddESubE(Mips::ADDu, InFlag, InFlag.getValue(0), DL, Node);
@@ -630,11 +659,11 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
case ISD::ConstantFP: {
ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Node);
if (Node->getValueType(0) == MVT::f64 && CN->isExactlyValue(+0.0)) {
- if (Subtarget.hasMips64()) {
+ if (Subtarget->isGP64bit()) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO_64, MVT::i64);
Result = CurDAG->getMachineNode(Mips::DMTC1, DL, MVT::f64, Zero);
- } else if (Subtarget.isFP64bit()) {
+ } else if (Subtarget->isFP64bit()) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO, MVT::i32);
Result = CurDAG->getMachineNode(Mips::BuildPairF64_64, DL, MVT::f64,
@@ -785,17 +814,17 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
EVT ResVecTy = BVN->getValueType(0);
EVT ViaVecTy;
- if (!Subtarget.hasMSA() || !BVN->getValueType(0).is128BitVector())
- return std::make_pair(false, (SDNode*)NULL);
+ if (!Subtarget->hasMSA() || !BVN->getValueType(0).is128BitVector())
+ return std::make_pair(false, nullptr);
if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
HasAnyUndefs, 8,
- !Subtarget.isLittle()))
- return std::make_pair(false, (SDNode*)NULL);
+ !Subtarget->isLittle()))
+ return std::make_pair(false, nullptr);
switch (SplatBitSize) {
default:
- return std::make_pair(false, (SDNode*)NULL);
+ return std::make_pair(false, nullptr);
case 8:
LdiOp = Mips::LDI_B;
ViaVecTy = MVT::v16i8;
@@ -815,7 +844,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
}
if (!SplatValue.isSignedIntN(10))
- return std::make_pair(false, (SDNode*)NULL);
+ return std::make_pair(false, nullptr);
SDValue Imm = CurDAG->getTargetConstant(SplatValue,
ViaVecTy.getVectorElementType());
@@ -841,7 +870,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
}
- return std::make_pair(false, (SDNode*)NULL);
+ return std::make_pair(false, nullptr);
}
FunctionPass *llvm::createMipsSEISelDag(MipsTargetMachine &TM) {
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h b/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h
index dc52064..57328d2 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -25,7 +25,7 @@ public:
private:
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
void addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
MachineFunction &MF);
@@ -40,60 +40,70 @@ private:
SDNode *selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS,
SDLoc DL, SDNode *Node) const;
- virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
+ bool selectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset) const;
+ bool selectAddrFrameIndexOffset(SDValue Addr, SDValue &Base, SDValue &Offset,
+ unsigned OffsetBits) const;
- virtual bool selectAddrRegReg(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
+ bool selectAddrRegImm(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
- virtual bool selectAddrDefault(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
+ bool selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
- virtual bool selectIntAddr(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
+ bool selectAddrDefault(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
- virtual bool selectAddrRegImm12(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
+ bool selectIntAddr(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
- virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
+ bool selectAddrRegImm10(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ bool selectAddrRegImm12(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ bool selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
+
+ bool selectIntAddrMSA(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
/// \brief Select constant vector splats.
- virtual bool selectVSplat(SDNode *N, APInt &Imm) const;
+ bool selectVSplat(SDNode *N, APInt &Imm) const override;
/// \brief Select constant vector splats whose value fits in a given integer.
- virtual bool selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
+ bool selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
unsigned ImmBitSize) const;
/// \brief Select constant vector splats whose value fits in a uimm1.
- virtual bool selectVSplatUimm1(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm1(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a uimm2.
- virtual bool selectVSplatUimm2(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm2(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a uimm3.
- virtual bool selectVSplatUimm3(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm3(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a uimm4.
- virtual bool selectVSplatUimm4(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm4(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a uimm5.
- virtual bool selectVSplatUimm5(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm5(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a uimm6.
- virtual bool selectVSplatUimm6(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm6(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a uimm8.
- virtual bool selectVSplatUimm8(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimm8(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value fits in a simm5.
- virtual bool selectVSplatSimm5(SDValue N, SDValue &Imm) const;
+ bool selectVSplatSimm5(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value is a power of 2.
- virtual bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value is the inverse of a
/// power of 2.
- virtual bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const;
+ bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value is a run of set bits
/// ending at the most significant bit
- virtual bool selectVSplatMaskL(SDValue N, SDValue &Imm) const;
+ bool selectVSplatMaskL(SDValue N, SDValue &Imm) const override;
/// \brief Select constant vector splats whose value is a run of set bits
/// starting at bit zero.
- virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
+ bool selectVSplatMaskR(SDValue N, SDValue &Imm) const override;
- virtual std::pair<bool, SDNode*> selectNode(SDNode *Node);
+ std::pair<bool, SDNode*> selectNode(SDNode *Node) override;
- virtual void processFunctionAfterISel(MachineFunction &MF);
+ void processFunctionAfterISel(MachineFunction &MF) override;
// Insert instructions to initialize the global base register in the
// first MBB of the function.
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
index 809adc0..8173615 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -10,7 +10,6 @@
// Subclass of MipsTargetLowering specialized for mips32/64.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-isel"
#include "MipsSEISelLowering.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetMachine.h"
@@ -24,6 +23,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mips-isel"
+
static cl::opt<bool>
EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
cl::desc("MIPS: Enable tail calls."), cl::init(false));
@@ -33,15 +34,16 @@ static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
"stores to their single precision "
"counterparts"));
-MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
- : MipsTargetLowering(TM) {
+MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI)
+ : MipsTargetLowering(TM, STI) {
// Set up the register classes
addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
- if (HasMips64)
+ if (Subtarget.isGP64bit())
addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
- if (Subtarget->hasDSP() || Subtarget->hasMSA()) {
+ if (Subtarget.hasDSP() || Subtarget.hasMSA()) {
// Expand all truncating stores and extending loads.
unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
@@ -57,7 +59,7 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
}
}
- if (Subtarget->hasDSP()) {
+ if (Subtarget.hasDSP()) {
MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
@@ -81,10 +83,10 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::VSELECT);
}
- if (Subtarget->hasDSPR2())
+ if (Subtarget.hasDSPR2())
setOperationAction(ISD::MUL, MVT::v2i16, Legal);
- if (Subtarget->hasMSA()) {
+ if (Subtarget.hasMSA()) {
addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
@@ -100,12 +102,12 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::XOR);
}
- if (!Subtarget->mipsSEUsesSoftFloat()) {
+ if (!Subtarget.abiUsesSoftFloat()) {
addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
// When dealing with single precision only, use libcalls
- if (!Subtarget->isSingleFloat()) {
- if (Subtarget->isFP64bit())
+ if (!Subtarget.isSingleFloat()) {
+ if (Subtarget.isFP64bit())
addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
else
addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
@@ -117,10 +119,14 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::MULHS, MVT::i32, Custom);
setOperationAction(ISD::MULHU, MVT::i32, Custom);
- if (HasMips64) {
+ if (Subtarget.hasCnMips())
+ setOperationAction(ISD::MUL, MVT::i64, Legal);
+ else if (Subtarget.isGP64bit())
+ setOperationAction(ISD::MUL, MVT::i64, Custom);
+
+ if (Subtarget.isGP64bit()) {
setOperationAction(ISD::MULHS, MVT::i64, Custom);
setOperationAction(ISD::MULHU, MVT::i64, Custom);
- setOperationAction(ISD::MUL, MVT::i64, Custom);
}
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
@@ -147,12 +153,91 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::STORE, MVT::f64, Custom);
}
+ if (Subtarget.hasMips32r6()) {
+ // MIPS32r6 replaces the accumulator-based multiplies with a three register
+ // instruction
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::MUL, MVT::i32, Legal);
+ setOperationAction(ISD::MULHS, MVT::i32, Legal);
+ setOperationAction(ISD::MULHU, MVT::i32, Legal);
+
+ // MIPS32r6 replaces the accumulator-based division/remainder with separate
+ // three register division and remainder instructions.
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIV, MVT::i32, Legal);
+ setOperationAction(ISD::UDIV, MVT::i32, Legal);
+ setOperationAction(ISD::SREM, MVT::i32, Legal);
+ setOperationAction(ISD::UREM, MVT::i32, Legal);
+
+ // MIPS32r6 replaces conditional moves with an equivalent that removes the
+ // need for three GPR read ports.
+ setOperationAction(ISD::SETCC, MVT::i32, Legal);
+ setOperationAction(ISD::SELECT, MVT::i32, Legal);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+
+ setOperationAction(ISD::SETCC, MVT::f32, Legal);
+ setOperationAction(ISD::SELECT, MVT::f32, Legal);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+
+ assert(Subtarget.isFP64bit() && "FR=1 is required for MIPS32r6");
+ setOperationAction(ISD::SETCC, MVT::f64, Legal);
+ setOperationAction(ISD::SELECT, MVT::f64, Legal);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+
+ setOperationAction(ISD::BRCOND, MVT::Other, Legal);
+
+ // Floating point > and >= are supported via < and <=
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
+
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
+ }
+
+ if (Subtarget.hasMips64r6()) {
+ // MIPS64r6 replaces the accumulator-based multiplies with a three register
+ // instruction
+ setOperationAction(ISD::MUL, MVT::i64, Legal);
+ setOperationAction(ISD::MULHS, MVT::i64, Legal);
+ setOperationAction(ISD::MULHU, MVT::i64, Legal);
+
+ // MIPS32r6 replaces the accumulator-based division/remainder with separate
+ // three register division and remainder instructions.
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIV, MVT::i64, Legal);
+ setOperationAction(ISD::UDIV, MVT::i64, Legal);
+ setOperationAction(ISD::SREM, MVT::i64, Legal);
+ setOperationAction(ISD::UREM, MVT::i64, Legal);
+
+ // MIPS64r6 replaces conditional moves with an equivalent that removes the
+ // need for three GPR read ports.
+ setOperationAction(ISD::SETCC, MVT::i64, Legal);
+ setOperationAction(ISD::SELECT, MVT::i64, Legal);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+ }
+
computeRegisterProperties();
}
const MipsTargetLowering *
-llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
- return new MipsSETargetLowering(TM);
+llvm::createMipsSETargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI) {
+ return new MipsSETargetLowering(TM, STI);
+}
+
+const TargetRegisterClass *
+MipsSETargetLowering::getRepRegClassFor(MVT VT) const {
+ if (VT == MVT::Untyped)
+ return Subtarget.hasDSP() ? &Mips::ACC64DSPRegClass : &Mips::ACC64RegClass;
+
+ return TargetLowering::getRepRegClassFor(VT);
}
// Enable MSA support for the given integer type and Register class.
@@ -244,9 +329,21 @@ addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
}
bool
-MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
+MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned,
+ bool *Fast) const {
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
+ if (Subtarget.systemSupportsUnalignedAccess()) {
+ // MIPS32r6/MIPS64r6 is required to support unaligned access. It's
+ // implementation defined whether this is handled by hardware, software, or
+ // a hybrid of the two but it's expected that most implementations will
+ // handle the majority of cases in hardware.
+ if (Fast)
+ *Fast = true;
+ return true;
+ }
+
switch (SVT) {
case MVT::i64:
case MVT::i32:
@@ -428,12 +525,12 @@ static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
- selectMADD(N, &DAG))
+ if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
+ N->getValueType(0) == MVT::i32 && selectMADD(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -448,8 +545,8 @@ static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
// - Removes redundant zero extensions performed by an ISD::AND.
static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
- if (!Subtarget->hasMSA())
+ const MipsSubtarget &Subtarget) {
+ if (!Subtarget.hasMSA())
return SDValue();
SDValue Op0 = N->getOperand(0);
@@ -481,7 +578,8 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
Log2 == ExtendTySize) {
SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
- Op0->getVTList(), Ops, Op0->getNumOperands());
+ Op0->getVTList(),
+ makeArrayRef(Ops, Op0->getNumOperands()));
return Op0;
}
}
@@ -501,7 +599,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian) {
BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N.getNode());
- if (Node == NULL)
+ if (!Node)
return false;
APInt SplatValue, SplatUndef;
@@ -563,8 +661,8 @@ static bool isBitwiseInverse(SDValue N, SDValue OfNode) {
// vector type.
static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
- if (!Subtarget->hasMSA())
+ const MipsSubtarget &Subtarget) {
+ if (!Subtarget.hasMSA())
return SDValue();
EVT Ty = N->getValueType(0);
@@ -580,7 +678,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
SDValue Op0Op1 = Op0->getOperand(1);
SDValue Op1Op0 = Op1->getOperand(0);
SDValue Op1Op1 = Op1->getOperand(1);
- bool IsLittleEndian = !Subtarget->isLittle();
+ bool IsLittleEndian = !Subtarget.isLittle();
SDValue IfSet, IfClr, Cond;
bool IsConstantMask = false;
@@ -675,7 +773,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
}
// Transform the DAG into an equivalent VSELECT.
- return DAG.getNode(ISD::VSELECT, SDLoc(N), Ty, Cond, IfClr, IfSet);
+ return DAG.getNode(ISD::VSELECT, SDLoc(N), Ty, Cond, IfSet, IfClr);
}
return SDValue();
@@ -683,11 +781,11 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ if (Subtarget.hasMips32() && N->getValueType(0) == MVT::i32 &&
selectMSUB(N, &DAG))
return SDValue(N, 0);
@@ -747,7 +845,7 @@ static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
SelectionDAG &DAG,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
// See if this is a vector splat immediate node.
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
@@ -755,12 +853,12 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
- if (!Subtarget->hasDSP())
+ if (!Subtarget.hasDSP())
return SDValue();
if (!BV ||
!BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
- EltSize, !Subtarget->isLittle()) ||
+ EltSize, !Subtarget.isLittle()) ||
(SplatBitSize != EltSize) ||
(SplatValue.getZExtValue() >= EltSize))
return SDValue();
@@ -771,7 +869,7 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
EVT Ty = N->getValueType(0);
if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
@@ -794,10 +892,10 @@ static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
// used for DSPr2.
static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
EVT Ty = N->getValueType(0);
- if (Subtarget->hasMSA()) {
+ if (Subtarget.hasMSA()) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -825,13 +923,14 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
Op0Op0->getOperand(2) };
DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
- Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
+ Op0Op0->getVTList(),
+ makeArrayRef(Ops, Op0Op0->getNumOperands()));
return Op0Op0;
}
}
}
- if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
+ if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget.hasDSPR2()))
return SDValue();
return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget);
@@ -840,10 +939,10 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
EVT Ty = N->getValueType(0);
- if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8))
+ if (((Ty != MVT::v2i16) || !Subtarget.hasDSPR2()) && (Ty != MVT::v4i8))
return SDValue();
return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget);
@@ -937,10 +1036,10 @@ static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
}
static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
- const MipsSubtarget *Subtarget) {
+ const MipsSubtarget &Subtarget) {
EVT Ty = N->getValueType(0);
- if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
+ if (Subtarget.hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
// Try the following combines:
// (xor (or $a, $b), (build_vector allones))
// (xor (or $a, $b), (bitcast (build_vector allones)))
@@ -1045,6 +1144,18 @@ MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return emitINSERT_FW(MI, BB);
case Mips::INSERT_FD_PSEUDO:
return emitINSERT_FD(MI, BB);
+ case Mips::INSERT_B_VIDX_PSEUDO:
+ return emitINSERT_DF_VIDX(MI, BB, 1, false);
+ case Mips::INSERT_H_VIDX_PSEUDO:
+ return emitINSERT_DF_VIDX(MI, BB, 2, false);
+ case Mips::INSERT_W_VIDX_PSEUDO:
+ return emitINSERT_DF_VIDX(MI, BB, 4, false);
+ case Mips::INSERT_D_VIDX_PSEUDO:
+ return emitINSERT_DF_VIDX(MI, BB, 8, false);
+ case Mips::INSERT_FW_VIDX_PSEUDO:
+ return emitINSERT_DF_VIDX(MI, BB, 4, true);
+ case Mips::INSERT_FD_VIDX_PSEUDO:
+ return emitINSERT_DF_VIDX(MI, BB, 8, true);
case Mips::FILL_FW_PSEUDO:
return emitFILL_FW(MI, BB);
case Mips::FILL_FD_PSEUDO:
@@ -1077,14 +1188,7 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
- // T9 should contain the address of the callee function if
- // -reloction-model=pic or it is an indirect call.
- if (IsPICCall || !GlobalOrExternal) {
- unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
- RegsToPass.push_front(std::make_pair(T9Reg, Callee));
- } else
- Ops.push_back(Callee);
-
+ Ops.push_back(Callee);
MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
InternalLinkage, CLI, Callee, Chain);
}
@@ -1113,12 +1217,12 @@ SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Nd.isNonTemporal(), Nd.isInvariant(),
std::min(Nd.getAlignment(), 4U));
- if (!Subtarget->isLittle())
+ if (!Subtarget.isLittle())
std::swap(Lo, Hi);
SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
SDValue Ops[2] = {BP, Hi.getValue(1)};
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
@@ -1136,7 +1240,7 @@ SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
Val, DAG.getConstant(1, MVT::i32));
- if (!Subtarget->isLittle())
+ if (!Subtarget.isLittle())
std::swap(Lo, Hi);
// i32 store to lower address.
@@ -1154,6 +1258,9 @@ SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
bool HasLo, bool HasHi,
SelectionDAG &DAG) const {
+ // MIPS32r6/MIPS64r6 removed accumulator based multiplies.
+ assert(!Subtarget.hasMips32r6());
+
EVT Ty = Op.getOperand(0).getValueType();
SDLoc DL(Op);
SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
@@ -1169,7 +1276,7 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
return HasLo ? Lo : Hi;
SDValue Vals[] = { Lo, Hi };
- return DAG.getMergeValues(Vals, 2, DL);
+ return DAG.getMergeValues(Vals, DL);
}
@@ -1236,7 +1343,7 @@ static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I);
// Create node.
- SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size());
+ SDValue Val = DAG.getNode(Opc, DL, ResTys, Ops);
SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val;
if (!HasChainIn)
@@ -1244,7 +1351,7 @@ static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
assert(Val->getValueType(1) == MVT::Other);
SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) };
- return DAG.getMergeValues(Vals, 2, DL);
+ return DAG.getMergeValues(Vals, DL);
}
// Lower an MSA copy intrinsic into the specified SelectionDAG node
@@ -1281,8 +1388,8 @@ static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
- SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
- ViaVecTy.getVectorNumElements());
+ SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy,
+ makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
if (ViaVecTy != ResVecTy)
Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, Result);
@@ -1321,8 +1428,8 @@ static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
SplatValueA, SplatValueB, SplatValueA, SplatValueB,
SplatValueA, SplatValueB, SplatValueA, SplatValueB };
- SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
- ViaVecTy.getVectorNumElements());
+ SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy,
+ makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
if (VecTy != ViaVecTy)
Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result);
@@ -1356,7 +1463,7 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
}
}
- if (Exp2Imm.getNode() == NULL) {
+ if (!Exp2Imm.getNode()) {
// We couldnt constant fold, do a vector shift instead
// Extend i32 to i64 if necessary. Sign or zero extend doesn't matter since
@@ -1464,25 +1571,27 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_binsli_h:
case Intrinsic::mips_binsli_w:
case Intrinsic::mips_binsli_d: {
+ // binsli_x(IfClear, IfSet, nbits) -> (vselect LBitsMask, IfSet, IfClear)
EVT VecTy = Op->getValueType(0);
EVT EltTy = VecTy.getVectorElementType();
APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(),
Op->getConstantOperandVal(3));
return DAG.getNode(ISD::VSELECT, DL, VecTy,
- DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
- Op->getOperand(2));
+ DAG.getConstant(Mask, VecTy, true), Op->getOperand(2),
+ Op->getOperand(1));
}
case Intrinsic::mips_binsri_b:
case Intrinsic::mips_binsri_h:
case Intrinsic::mips_binsri_w:
case Intrinsic::mips_binsri_d: {
+ // binsri_x(IfClear, IfSet, nbits) -> (vselect RBitsMask, IfSet, IfClear)
EVT VecTy = Op->getValueType(0);
EVT EltTy = VecTy.getVectorElementType();
APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(),
Op->getConstantOperandVal(3));
return DAG.getNode(ISD::VSELECT, DL, VecTy,
- DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
- Op->getOperand(2));
+ DAG.getConstant(Mask, VecTy, true), Op->getOperand(2),
+ Op->getOperand(1));
}
case Intrinsic::mips_bmnz_v:
return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
@@ -1514,7 +1623,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_bnegi_w:
case Intrinsic::mips_bnegi_d:
return lowerMSABinaryBitImmIntr(Op, DAG, ISD::XOR, Op->getOperand(2),
- !Subtarget->isLittle());
+ !Subtarget.isLittle());
case Intrinsic::mips_bnz_b:
case Intrinsic::mips_bnz_h:
case Intrinsic::mips_bnz_w:
@@ -1525,13 +1634,15 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0),
Op->getOperand(1));
case Intrinsic::mips_bsel_v:
+ // bsel_v(Mask, IfClear, IfSet) -> (vselect Mask, IfSet, IfClear)
return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
- Op->getOperand(1), Op->getOperand(2),
- Op->getOperand(3));
+ Op->getOperand(1), Op->getOperand(3),
+ Op->getOperand(2));
case Intrinsic::mips_bseli_b:
+ // bseli_v(Mask, IfClear, IfSet) -> (vselect Mask, IfSet, IfClear)
return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
- Op->getOperand(1), Op->getOperand(2),
- lowerMSASplatImm(Op, 3, DAG));
+ Op->getOperand(1), lowerMSASplatImm(Op, 3, DAG),
+ Op->getOperand(2));
case Intrinsic::mips_bset_b:
case Intrinsic::mips_bset_h:
case Intrinsic::mips_bset_w:
@@ -1548,7 +1659,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_bseti_w:
case Intrinsic::mips_bseti_d:
return lowerMSABinaryBitImmIntr(Op, DAG, ISD::OR, Op->getOperand(2),
- !Subtarget->isLittle());
+ !Subtarget.isLittle());
case Intrinsic::mips_bz_b:
case Intrinsic::mips_bz_h:
case Intrinsic::mips_bz_w:
@@ -1623,25 +1734,34 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_copy_s_w:
return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
case Intrinsic::mips_copy_s_d:
- // Don't lower directly into VEXTRACT_SEXT_ELT since i64 might be illegal.
- // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
- // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
- Op->getOperand(1), Op->getOperand(2));
+ if (Subtarget.hasMips64())
+ // Lower directly into VEXTRACT_SEXT_ELT since i64 is legal on Mips64.
+ return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
+ else {
+ // Lower into the generic EXTRACT_VECTOR_ELT node and let the type
+ // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
+ Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ }
case Intrinsic::mips_copy_u_b:
case Intrinsic::mips_copy_u_h:
case Intrinsic::mips_copy_u_w:
return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
case Intrinsic::mips_copy_u_d:
- // Don't lower directly into VEXTRACT_ZEXT_ELT since i64 might be illegal.
- // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
- // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
- //
- // Note: When i64 is illegal, this results in copy_s.w instructions instead
- // of copy_u.w instructions. This makes no difference to the behaviour
- // since i64 is only illegal when the register file is 32-bit.
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
- Op->getOperand(1), Op->getOperand(2));
+ if (Subtarget.hasMips64())
+ // Lower directly into VEXTRACT_ZEXT_ELT since i64 is legal on Mips64.
+ return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
+ else {
+ // Lower into the generic EXTRACT_VECTOR_ELT node and let the type
+ // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
+ // Note: When i64 is illegal, this results in copy_s.w instructions
+ // instead of copy_u.w instructions. This makes no difference to the
+ // behaviour since i64 is only illegal when the register file is 32-bit.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
+ Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ }
case Intrinsic::mips_div_s_b:
case Intrinsic::mips_div_s_h:
case Intrinsic::mips_div_s_w:
@@ -1723,7 +1843,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
// If ResTy is v2i64 then the type legalizer will break this node down into
// an equivalent v4i32.
- return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, Ops);
}
case Intrinsic::mips_fexp2_w:
case Intrinsic::mips_fexp2_d: {
@@ -1798,12 +1918,20 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_insert_d:
return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
Op->getOperand(1), Op->getOperand(3), Op->getOperand(2));
+ case Intrinsic::mips_insve_b:
+ case Intrinsic::mips_insve_h:
+ case Intrinsic::mips_insve_w:
+ case Intrinsic::mips_insve_d:
+ return DAG.getNode(MipsISD::INSVE, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2), Op->getOperand(3),
+ DAG.getConstant(0, MVT::i32));
case Intrinsic::mips_ldi_b:
case Intrinsic::mips_ldi_h:
case Intrinsic::mips_ldi_w:
case Intrinsic::mips_ldi_d:
return lowerMSASplatImm(Op, 1, DAG);
- case Intrinsic::mips_lsa: {
+ case Intrinsic::mips_lsa:
+ case Intrinsic::mips_dlsa: {
EVT ResTy = Op->getValueType(0);
return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, SDLoc(Op), ResTy,
@@ -2198,12 +2326,12 @@ SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
unsigned SplatBitSize;
bool HasAnyUndefs;
- if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
+ if (!Subtarget.hasMSA() || !ResTy.is128BitVector())
return SDValue();
if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
HasAnyUndefs, 8,
- !Subtarget->isLittle()) && SplatBitSize <= 64) {
+ !Subtarget.isLittle()) && SplatBitSize <= 64) {
// We can only cope with 8, 16, 32, or 64-bit elements
if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
SplatBitSize != 64)
@@ -2540,8 +2668,7 @@ static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
++I)
Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
- SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
- Ops.size());
+ SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, Ops);
if (Using1stVec && Using2ndVec) {
Op0 = Op->getOperand(0);
@@ -2553,7 +2680,14 @@ static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
else
llvm_unreachable("shuffle vector mask references neither vector operand?");
- return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
+ // VECTOR_SHUFFLE concatenates the vectors in an vectorwise fashion.
+ // <0b00, 0b01> + <0b10, 0b11> -> <0b00, 0b01, 0b10, 0b11>
+ // VSHF concatenates the vectors in a bitwise fashion:
+ // <0b00, 0b01> + <0b10, 0b11> ->
+ // 0b0100 + 0b1110 -> 0b01001110
+ // <0b10, 0b11, 0b00, 0b01>
+ // We must therefore swap the operands to get the correct result.
+ return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op1, Op0);
}
// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
@@ -2616,7 +2750,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
+ MachineFunction::iterator It = std::next(MachineFunction::iterator(BB));
MachineFunction *F = BB->getParent();
MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -2626,7 +2760,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
F->insert(It, Sink);
// Transfer the remainder of BB and its successor edges to Sink.
- Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
+ Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
BB->end());
Sink->transferSuccessorsAndUpdatePHIs(BB);
@@ -2681,7 +2815,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
+ MachineFunction::iterator It = std::next(MachineFunction::iterator(BB));
MachineFunction *F = BB->getParent();
MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -2691,7 +2825,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
F->insert(It, Sink);
// Transfer the remainder of BB and its successor edges to Sink.
- Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
+ Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
BB->end());
Sink->transferSuccessorsAndUpdatePHIs(BB);
@@ -2750,7 +2884,7 @@ emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
else {
unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
- BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
}
@@ -2770,7 +2904,7 @@ emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
// valid because FR=1 mode which is the only supported mode in MSA.
MachineBasicBlock * MipsSETargetLowering::
emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
- assert(Subtarget->isFP64bit());
+ assert(Subtarget.isFP64bit());
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
@@ -2817,7 +2951,8 @@ MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_W), Wd)
.addReg(Wd_in)
.addImm(Lane)
- .addReg(Wt);
+ .addReg(Wt)
+ .addImm(0);
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
@@ -2832,7 +2967,7 @@ MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
MachineBasicBlock *
MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
MachineBasicBlock *BB) const {
- assert(Subtarget->isFP64bit());
+ assert(Subtarget.isFP64bit());
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
@@ -2850,7 +2985,133 @@ MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_D), Wd)
.addReg(Wd_in)
.addImm(Lane)
- .addReg(Wt);
+ .addReg(Wt)
+ .addImm(0);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the INSERT_([BHWD]|F[WD])_VIDX pseudo instruction.
+//
+// For integer:
+// (INSERT_([BHWD]|F[WD])_PSEUDO $wd, $wd_in, $n, $rs)
+// =>
+// (SLL $lanetmp1, $lane, <log2size)
+// (SLD_B $wdtmp1, $wd_in, $wd_in, $lanetmp1)
+// (INSERT_[BHWD], $wdtmp2, $wdtmp1, 0, $rs)
+// (NEG $lanetmp2, $lanetmp1)
+// (SLD_B $wd, $wdtmp2, $wdtmp2, $lanetmp2)
+//
+// For floating point:
+// (INSERT_([BHWD]|F[WD])_PSEUDO $wd, $wd_in, $n, $fs)
+// =>
+// (SUBREG_TO_REG $wt, $fs, <subreg>)
+// (SLL $lanetmp1, $lane, <log2size)
+// (SLD_B $wdtmp1, $wd_in, $wd_in, $lanetmp1)
+// (INSVE_[WD], $wdtmp2, 0, $wdtmp1, 0)
+// (NEG $lanetmp2, $lanetmp1)
+// (SLD_B $wd, $wdtmp2, $wdtmp2, $lanetmp2)
+MachineBasicBlock *
+MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned EltSizeInBytes,
+ bool IsFP) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned SrcVecReg = MI->getOperand(1).getReg();
+ unsigned LaneReg = MI->getOperand(2).getReg();
+ unsigned SrcValReg = MI->getOperand(3).getReg();
+
+ const TargetRegisterClass *VecRC = nullptr;
+ const TargetRegisterClass *GPRRC =
+ Subtarget.isGP64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ unsigned EltLog2Size;
+ unsigned InsertOp = 0;
+ unsigned InsveOp = 0;
+ switch (EltSizeInBytes) {
+ default:
+ llvm_unreachable("Unexpected size");
+ case 1:
+ EltLog2Size = 0;
+ InsertOp = Mips::INSERT_B;
+ InsveOp = Mips::INSVE_B;
+ VecRC = &Mips::MSA128BRegClass;
+ break;
+ case 2:
+ EltLog2Size = 1;
+ InsertOp = Mips::INSERT_H;
+ InsveOp = Mips::INSVE_H;
+ VecRC = &Mips::MSA128HRegClass;
+ break;
+ case 4:
+ EltLog2Size = 2;
+ InsertOp = Mips::INSERT_W;
+ InsveOp = Mips::INSVE_W;
+ VecRC = &Mips::MSA128WRegClass;
+ break;
+ case 8:
+ EltLog2Size = 3;
+ InsertOp = Mips::INSERT_D;
+ InsveOp = Mips::INSVE_D;
+ VecRC = &Mips::MSA128DRegClass;
+ break;
+ }
+
+ if (IsFP) {
+ unsigned Wt = RegInfo.createVirtualRegister(VecRC);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
+ .addImm(0)
+ .addReg(SrcValReg)
+ .addImm(EltSizeInBytes == 8 ? Mips::sub_64 : Mips::sub_lo);
+ SrcValReg = Wt;
+ }
+
+ // Convert the lane index into a byte index
+ if (EltSizeInBytes != 1) {
+ unsigned LaneTmp1 = RegInfo.createVirtualRegister(GPRRC);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SLL), LaneTmp1)
+ .addReg(LaneReg)
+ .addImm(EltLog2Size);
+ LaneReg = LaneTmp1;
+ }
+
+ // Rotate bytes around so that the desired lane is element zero
+ unsigned WdTmp1 = RegInfo.createVirtualRegister(VecRC);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), WdTmp1)
+ .addReg(SrcVecReg)
+ .addReg(SrcVecReg)
+ .addReg(LaneReg);
+
+ unsigned WdTmp2 = RegInfo.createVirtualRegister(VecRC);
+ if (IsFP) {
+ // Use insve.df to insert to element zero
+ BuildMI(*BB, MI, DL, TII->get(InsveOp), WdTmp2)
+ .addReg(WdTmp1)
+ .addImm(0)
+ .addReg(SrcValReg)
+ .addImm(0);
+ } else {
+ // Use insert.df to insert to element zero
+ BuildMI(*BB, MI, DL, TII->get(InsertOp), WdTmp2)
+ .addReg(WdTmp1)
+ .addReg(SrcValReg)
+ .addImm(0);
+ }
+
+ // Rotate elements the rest of the way for a full rotation.
+ // sld.df inteprets $rt modulo the number of columns so we only need to negate
+ // the lane index to do this.
+ unsigned LaneTmp2 = RegInfo.createVirtualRegister(GPRRC);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SUB), LaneTmp2)
+ .addReg(Mips::ZERO)
+ .addReg(LaneReg);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), Wd)
+ .addReg(WdTmp2)
+ .addReg(WdTmp2)
+ .addReg(LaneTmp2);
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
@@ -2895,7 +3156,7 @@ MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
MachineBasicBlock *
MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
MachineBasicBlock *BB) const {
- assert(Subtarget->isFP64bit());
+ assert(Subtarget.isFP64bit());
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.h
index c5210d9..00d8683 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSEISelLowering.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MipsSEISELLOWERING_H
-#define MipsSEISELLOWERING_H
+#ifndef MIPSSEISELLOWERING_H
+#define MIPSSEISELLOWERING_H
#include "MipsISelLowering.h"
#include "MipsRegisterInfo.h"
@@ -20,7 +20,8 @@
namespace llvm {
class MipsSETargetLowering : public MipsTargetLowering {
public:
- explicit MipsSETargetLowering(MipsTargetMachine &TM);
+ explicit MipsSETargetLowering(MipsTargetMachine &TM,
+ const MipsSubtarget &STI);
/// \brief Enable MSA support for the given integer type and Register
/// class.
@@ -30,39 +31,35 @@ namespace llvm {
void addMSAFloatType(MVT::SimpleValueType Ty,
const TargetRegisterClass *RC);
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS = 0,
+ bool *Fast = nullptr) const override;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const override;
- virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
- EVT VT) const {
+ bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
+ EVT VT) const override {
return false;
}
- virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
- if (VT == MVT::Untyped)
- return Subtarget->hasDSP() ? &Mips::ACC64DSPRegClass :
- &Mips::ACC64RegClass;
-
- return TargetLowering::getRepRegClassFor(VT);
- }
+ const TargetRegisterClass *getRepRegClassFor(MVT VT) const override;
private:
- virtual bool
- isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
- unsigned NextStackOffset,
- const MipsFunctionInfo& FI) const;
+ bool isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
+ unsigned NextStackOffset,
+ const MipsFunctionInfo& FI) const override;
- virtual void
+ void
getOpndList(SmallVectorImpl<SDValue> &Ops,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
- CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const;
+ CallLoweringInfo &CLI, SDValue Callee,
+ SDValue Chain) const override;
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
@@ -96,6 +93,11 @@ namespace llvm {
/// \brief Emit the INSERT_FD pseudo instruction
MachineBasicBlock *emitINSERT_FD(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ /// \brief Emit the INSERT_([BHWD]|F[WD])_VIDX pseudo instruction
+ MachineBasicBlock *emitINSERT_DF_VIDX(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned EltSizeInBytes,
+ bool IsFP) const;
/// \brief Emit the FILL_FW pseudo instruction
MachineBasicBlock *emitFILL_FW(MachineInstr *MI,
MachineBasicBlock *BB) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
index 02931a3..69cb74c 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -24,11 +24,10 @@
using namespace llvm;
-MipsSEInstrInfo::MipsSEInstrInfo(MipsTargetMachine &tm)
- : MipsInstrInfo(tm,
- tm.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J),
- RI(*tm.getSubtargetImpl()),
- IsN64(tm.getSubtarget<MipsSubtarget>().isABI_N64()) {}
+MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
+ : MipsInstrInfo(STI, STI.getRelocationModel() == Reloc::PIC_ ? Mips::B
+ : Mips::J),
+ RI(STI), IsN64(STI.isABI_N64()) {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
return RI;
@@ -84,19 +83,25 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
unsigned Opc = 0, ZeroReg = 0;
+ bool isMicroMips = Subtarget.inMicroMipsMode();
if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg.
- if (Mips::GPR32RegClass.contains(SrcReg))
- Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
- else if (Mips::CCRRegClass.contains(SrcReg))
+ if (Mips::GPR32RegClass.contains(SrcReg)) {
+ if (isMicroMips)
+ Opc = Mips::MOVE16_MM;
+ else
+ Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
+ } else if (Mips::CCRRegClass.contains(SrcReg))
Opc = Mips::CFC1;
else if (Mips::FGR32RegClass.contains(SrcReg))
Opc = Mips::MFC1;
- else if (Mips::HI32RegClass.contains(SrcReg))
- Opc = Mips::MFHI, SrcReg = 0;
- else if (Mips::LO32RegClass.contains(SrcReg))
- Opc = Mips::MFLO, SrcReg = 0;
- else if (Mips::HI32DSPRegClass.contains(SrcReg))
+ else if (Mips::HI32RegClass.contains(SrcReg)) {
+ Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
+ SrcReg = 0;
+ } else if (Mips::LO32RegClass.contains(SrcReg)) {
+ Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO;
+ SrcReg = 0;
+ } else if (Mips::HI32DSPRegClass.contains(SrcReg))
Opc = Mips::MFHI_DSP;
else if (Mips::LO32DSPRegClass.contains(SrcReg))
Opc = Mips::MFLO_DSP;
@@ -259,18 +264,22 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
MachineBasicBlock &MBB = *MI->getParent();
+ bool isMicroMips = Subtarget.inMicroMipsMode();
+ unsigned Opc;
switch(MI->getDesc().getOpcode()) {
default:
return false;
case Mips::RetRA:
- expandRetRA(MBB, MI, Mips::RET);
+ expandRetRA(MBB, MI);
break;
case Mips::PseudoMFHI:
- expandPseudoMFHiLo(MBB, MI, Mips::MFHI);
+ Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
+ expandPseudoMFHiLo(MBB, MI, Opc);
break;
case Mips::PseudoMFLO:
- expandPseudoMFHiLo(MBB, MI, Mips::MFLO);
+ Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO;
+ expandPseudoMFHiLo(MBB, MI, Opc);
break;
case Mips::PseudoMFHI64:
expandPseudoMFHiLo(MBB, MI, Mips::MFHI64);
@@ -350,7 +359,7 @@ unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
+ const MipsSubtarget &STI = Subtarget;
DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
@@ -358,7 +367,7 @@ void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
if (isInt<16>(Amount))// addi sp, sp, amount
BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
else { // Expand immediate that doesn't fit in 16-bit.
- unsigned Reg = loadImmediate(Amount, MBB, I, DL, 0);
+ unsigned Reg = loadImmediate(Amount, MBB, I, DL, nullptr);
BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill);
}
}
@@ -370,7 +379,7 @@ MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
MachineBasicBlock::iterator II, DebugLoc DL,
unsigned *NewImm) const {
MipsAnalyzeImmediate AnalyzeImm;
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
+ const MipsSubtarget &STI = Subtarget;
MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
unsigned Size = STI.isABI_N64() ? 64 : 32;
unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
@@ -418,9 +427,12 @@ unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
}
void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned Opc) const {
- BuildMI(MBB, I, I->getDebugLoc(), get(Opc)).addReg(Mips::RA);
+ MachineBasicBlock::iterator I) const {
+ if (Subtarget.isGP64bit())
+ BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn64))
+ .addReg(Mips::RA_64);
+ else
+ BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn)).addReg(Mips::RA);
}
std::pair<bool, bool>
@@ -481,7 +493,8 @@ void MipsSEInstrInfo::expandCvtFPInt(MachineBasicBlock &MBB,
DebugLoc DL = I->getDebugLoc();
bool DstIsLarger, SrcIsLarger;
- tie(DstIsLarger, SrcIsLarger) = compareOpndSize(CvtOpc, *MBB.getParent());
+ std::tie(DstIsLarger, SrcIsLarger) =
+ compareOpndSize(CvtOpc, *MBB.getParent());
if (DstIsLarger)
TmpReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
@@ -505,9 +518,30 @@ void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
- if (SubIdx == Mips::sub_hi && FP64)
- BuildMI(MBB, I, dl, get(Mips::MFHC1), DstReg).addReg(SubReg);
- else
+ // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2()));
+
+ // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg()));
+
+ if (SubIdx == Mips::sub_hi && Subtarget.hasMTHC1()) {
+ // FIXME: Strictly speaking MFHC1 only reads the top 32-bits however, we
+ // claim to read the whole 64-bits as part of a white lie used to
+ // temporarily work around a widespread bug in the -mfp64 support.
+ // The problem is that none of the 32-bit fpu ops mention the fact
+ // that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
+ // requires a major overhaul of the FPU implementation which can't
+ // be done right now due to time constraints.
+ // MFHC1 is one of two instructions that are affected since they are
+ // the only instructions that don't read the lower 32-bits.
+ // We therefore pretend that it reads the bottom 32-bits to
+ // artificially create a dependency and prevent the scheduler
+ // changing the behaviour of the code.
+ BuildMI(MBB, I, dl, get(FP64 ? Mips::MFHC1_D64 : Mips::MFHC1_D32), DstReg)
+ .addReg(SrcReg);
+ } else
BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg);
}
@@ -520,19 +554,49 @@ void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB,
DebugLoc dl = I->getDebugLoc();
const TargetRegisterInfo &TRI = getRegisterInfo();
- // For FP32 mode:
- // mtc1 Lo, $fp
- // mtc1 Hi, $fp + 1
- // For FP64 mode:
+ // When mthc1 is available, use:
// mtc1 Lo, $fp
// mthc1 Hi, $fp
+ //
+ // Otherwise, for O32 FPXX ABI:
+ // spill + reload via ldc1
+ // This case is handled by the frame lowering code.
+ //
+ // Otherwise, for FP32:
+ // mtc1 Lo, $fp
+ // mtc1 Hi, $fp + 1
+ //
+ // The case where dmtc1 is available doesn't need to be handled here
+ // because it never creates a BuildPairF64 node.
+
+ // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2()));
+
+ // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload
+ // in MipsSEFrameLowering.cpp.
+ assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg()));
BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo))
.addReg(LoReg);
- if (FP64)
- BuildMI(MBB, I, dl, get(Mips::MTHC1), TRI.getSubReg(DstReg, Mips::sub_hi))
- .addReg(HiReg);
+ if (Subtarget.hasMTHC1()) {
+ // FIXME: The .addReg(DstReg) is a white lie used to temporarily work
+ // around a widespread bug in the -mfp64 support.
+ // The problem is that none of the 32-bit fpu ops mention the fact
+ // that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
+ // requires a major overhaul of the FPU implementation which can't
+ // be done right now due to time constraints.
+ // MTHC1 is one of two instructions that are affected since they are
+ // the only instructions that don't read the lower 32-bits.
+ // We therefore pretend that it reads the bottom 32-bits to
+ // artificially create a dependency and prevent the scheduler
+ // changing the behaviour of the code.
+ BuildMI(MBB, I, dl, get(FP64 ? Mips::MTHC1_D64 : Mips::MTHC1_D32), DstReg)
+ .addReg(DstReg)
+ .addReg(HiReg);
+ } else if (Subtarget.isABI_FPXX())
+ llvm_unreachable("BuildPairF64 not expanded in frame lowering code!");
else
BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi))
.addReg(HiReg);
@@ -543,29 +607,31 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
// This pseudo instruction is generated as part of the lowering of
// ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and
// indirect jump to TargetReg
- const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
- unsigned ADDU = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned JR = STI.isABI_N64() ? Mips::JR64 : Mips::JR;
- unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
- unsigned RA = STI.isABI_N64() ? Mips::RA_64 : Mips::RA;
- unsigned T9 = STI.isABI_N64() ? Mips::T9_64 : Mips::T9;
- unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ADDU = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned SP = Subtarget.isGP64bit() ? Mips::SP_64 : Mips::SP;
+ unsigned RA = Subtarget.isGP64bit() ? Mips::RA_64 : Mips::RA;
+ unsigned T9 = Subtarget.isGP64bit() ? Mips::T9_64 : Mips::T9;
+ unsigned ZERO = Subtarget.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
unsigned OffsetReg = I->getOperand(0).getReg();
unsigned TargetReg = I->getOperand(1).getReg();
// addu $ra, $v0, $zero
// addu $sp, $sp, $v1
- // jr $ra
+ // jr $ra (via RetRA)
+ const TargetMachine &TM = MBB.getParent()->getTarget();
if (TM.getRelocationModel() == Reloc::PIC_)
BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), T9)
- .addReg(TargetReg).addReg(ZERO);
+ .addReg(TargetReg)
+ .addReg(ZERO);
BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), RA)
- .addReg(TargetReg).addReg(ZERO);
+ .addReg(TargetReg)
+ .addReg(ZERO);
BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP)
- .addReg(SP).addReg(OffsetReg);
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(JR)).addReg(RA);
+ .addReg(SP)
+ .addReg(OffsetReg);
+ expandRetRA(MBB, I);
}
-const MipsInstrInfo *llvm::createMipsSEInstrInfo(MipsTargetMachine &TM) {
- return new MipsSEInstrInfo(TM);
+const MipsInstrInfo *llvm::createMipsSEInstrInfo(const MipsSubtarget &STI) {
+ return new MipsSEInstrInfo(STI);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
index 6d2dd90..9576fef 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSEInstrInfo.h
@@ -24,48 +24,48 @@ class MipsSEInstrInfo : public MipsInstrInfo {
bool IsN64;
public:
- explicit MipsSEInstrInfo(MipsTargetMachine &TM);
+ explicit MipsSEInstrInfo(const MipsSubtarget &STI);
- virtual const MipsRegisterInfo &getRegisterInfo() const;
+ const MipsRegisterInfo &getRegisterInfo() const override;
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
- virtual void storeRegToStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
- int64_t Offset) const;
+ void storeRegToStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ int64_t Offset) const override;
- virtual void loadRegFromStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
- int64_t Offset) const;
+ void loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ int64_t Offset) const override;
- virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
- virtual unsigned getOppositeBranchOpc(unsigned Opc) const;
+ unsigned getOppositeBranchOpc(unsigned Opc) const override;
/// Adjust SP by Amount bytes.
void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
@@ -79,10 +79,9 @@ public:
unsigned *NewImm) const;
private:
- virtual unsigned getAnalyzableBrOpc(unsigned Opc) const;
+ unsigned getAnalyzableBrOpc(unsigned Opc) const override;
- void expandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned Opc) const;
+ void expandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const;
std::pair<bool, bool> compareOpndSize(unsigned Opc,
const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
index 2d44084..0af1a6b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -24,9 +24,8 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
@@ -40,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mips-reg-info"
+
MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST)
: MipsRegisterInfo(ST) {}
@@ -62,21 +63,42 @@ MipsSERegisterInfo::intRegClass(unsigned Size) const {
return &Mips::GPR64RegClass;
}
-/// Determine whether a given opcode is an MSA load/store (supporting 10-bit
-/// offsets) or a non-MSA load/store (supporting 16-bit offsets).
-static inline bool isMSALoadOrStore(const unsigned Opcode) {
+/// Get the size of the offset supported by the given load/store.
+/// The result includes the effects of any scale factors applied to the
+/// instruction immediate.
+static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode) {
switch (Opcode) {
case Mips::LD_B:
+ case Mips::ST_B:
+ return 10;
case Mips::LD_H:
+ case Mips::ST_H:
+ return 10 + 1 /* scale factor */;
case Mips::LD_W:
+ case Mips::ST_W:
+ return 10 + 2 /* scale factor */;
case Mips::LD_D:
- case Mips::ST_B:
+ case Mips::ST_D:
+ return 10 + 3 /* scale factor */;
+ default:
+ return 16;
+ }
+}
+
+/// Get the scale factor applied to the immediate in the given load/store.
+static inline unsigned getLoadStoreOffsetAlign(const unsigned Opcode) {
+ switch (Opcode) {
+ case Mips::LD_H:
case Mips::ST_H:
+ return 2;
+ case Mips::LD_W:
case Mips::ST_W:
+ return 4;
+ case Mips::LD_D:
case Mips::ST_D:
- return true;
+ return 8;
default:
- return false;
+ return 1;
}
}
@@ -131,13 +153,16 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
if (!MI.isDebugValue()) {
// Make sure Offset fits within the field available.
- // For MSA instructions, this is a 10-bit signed immediate, otherwise it is
- // a 16-bit signed immediate.
- unsigned OffsetBitSize = isMSALoadOrStore(MI.getOpcode()) ? 10 : 16;
-
- if (OffsetBitSize == 10 && !isInt<10>(Offset) && isInt<16>(Offset)) {
- // If we have an offset that needs to fit into a signed 10-bit immediate
- // and doesn't, but does fit into 16-bits then use an ADDiu
+ // For MSA instructions, this is a 10-bit signed immediate (scaled by
+ // element size), otherwise it is a 16-bit signed immediate.
+ unsigned OffsetBitSize = getLoadStoreOffsetSizeInBits(MI.getOpcode());
+ unsigned OffsetAlign = getLoadStoreOffsetAlign(MI.getOpcode());
+
+ if (OffsetBitSize < 16 && isInt<16>(Offset) &&
+ (!isIntN(OffsetBitSize, Offset) ||
+ OffsetToAlignment(Offset, OffsetAlign) != 0)) {
+ // If we have an offset that needs to fit into a signed n-bit immediate
+ // (where n < 16) and doesn't, but does fit into 16-bits then use an ADDiu
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = II->getDebugLoc();
unsigned ADDiu = Subtarget.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
@@ -164,7 +189,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
*static_cast<const MipsSEInstrInfo *>(
MBB.getParent()->getTarget().getInstrInfo());
unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL,
- OffsetBitSize == 16 ? &NewImm : NULL);
+ OffsetBitSize == 16 ? &NewImm : nullptr);
BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg)
.addReg(Reg, RegState::Kill);
diff --git a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
index 76cdd9d..f2f3a7e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSERegisterInfo.h
@@ -24,16 +24,16 @@ class MipsSERegisterInfo : public MipsRegisterInfo {
public:
MipsSERegisterInfo(const MipsSubtarget &Subtarget);
- bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
- bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
- virtual const TargetRegisterClass *intRegClass(unsigned Size) const;
+ const TargetRegisterClass *intRegClass(unsigned Size) const override;
private:
- virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
- int FrameIndex, uint64_t StackSize,
- int64_t SPOffset) const;
+ void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo,
+ int FrameIndex, uint64_t StackSize,
+ int64_t SPOffset) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MipsSchedule.td b/contrib/llvm/lib/Target/Mips/MipsSchedule.td
index 2779064..ea98199 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSchedule.td
+++ b/contrib/llvm/lib/Target/Mips/MipsSchedule.td
@@ -17,60 +17,295 @@ def IMULDIV : FuncUnit;
// Instruction Itinerary classes used for Mips
//===----------------------------------------------------------------------===//
def IIAlu : InstrItinClass;
-def IIArith : InstrItinClass;
-def IILogic : InstrItinClass;
-def IILoad : InstrItinClass;
-def IIStore : InstrItinClass;
-def IIXfer : InstrItinClass;
def IIBranch : InstrItinClass;
-def IIHiLo : InstrItinClass;
-def IIImul : InstrItinClass;
-def IIImult : InstrItinClass;
-def IIIdiv : InstrItinClass;
-def IIseb : InstrItinClass;
-def IIslt : InstrItinClass;
-def IIFcvt : InstrItinClass;
-def IIFmove : InstrItinClass;
-def IIFcmp : InstrItinClass;
-def IIFadd : InstrItinClass;
-def IIFmulSingle : InstrItinClass;
-def IIFmulDouble : InstrItinClass;
-def IIFdivSingle : InstrItinClass;
-def IIFdivDouble : InstrItinClass;
-def IIFsqrtSingle : InstrItinClass;
-def IIFsqrtDouble : InstrItinClass;
-def IIFrecipFsqrtStep : InstrItinClass;
-def IIFLoad : InstrItinClass;
-def IIFStore : InstrItinClass;
-def IIFmoveC1 : InstrItinClass;
def IIPseudo : InstrItinClass;
+def II_ABS : InstrItinClass;
+def II_ADDI : InstrItinClass;
+def II_ADDIU : InstrItinClass;
+def II_ADDU : InstrItinClass;
+def II_ADD_D : InstrItinClass;
+def II_ADD_S : InstrItinClass;
+def II_AND : InstrItinClass;
+def II_ANDI : InstrItinClass;
+def II_BADDU : InstrItinClass;
+def II_CEIL : InstrItinClass;
+def II_CFC1 : InstrItinClass;
+def II_CLO : InstrItinClass;
+def II_CLZ : InstrItinClass;
+def II_CTC1 : InstrItinClass;
+def II_CVT : InstrItinClass;
+def II_C_CC_D : InstrItinClass; // Any c.<cc>.d instruction
+def II_C_CC_S : InstrItinClass; // Any c.<cc>.s instruction
+def II_DADDIU : InstrItinClass;
+def II_DADDU : InstrItinClass;
+def II_DADD : InstrItinClass;
+def II_DDIV : InstrItinClass;
+def II_DDIVU : InstrItinClass;
+def II_DIV : InstrItinClass;
+def II_DIVU : InstrItinClass;
+def II_DIV_D : InstrItinClass;
+def II_DIV_S : InstrItinClass;
+def II_DMFC1 : InstrItinClass;
+def II_DMTC1 : InstrItinClass;
+def II_DMUL : InstrItinClass;
+def II_DMULT : InstrItinClass;
+def II_DMULTU : InstrItinClass;
+def II_DROTR : InstrItinClass;
+def II_DROTR32 : InstrItinClass;
+def II_DROTRV : InstrItinClass;
+def II_DSLL : InstrItinClass;
+def II_DSLL32 : InstrItinClass;
+def II_DSLLV : InstrItinClass;
+def II_DSRA : InstrItinClass;
+def II_DSRA32 : InstrItinClass;
+def II_DSRAV : InstrItinClass;
+def II_DSRL : InstrItinClass;
+def II_DSRL32 : InstrItinClass;
+def II_DSRLV : InstrItinClass;
+def II_DSUBU : InstrItinClass;
+def II_DSUB : InstrItinClass;
+def II_FLOOR : InstrItinClass;
+def II_LB : InstrItinClass;
+def II_LBU : InstrItinClass;
+def II_LD : InstrItinClass;
+def II_LDC1 : InstrItinClass;
+def II_LDL : InstrItinClass;
+def II_LDR : InstrItinClass;
+def II_LDXC1 : InstrItinClass;
+def II_LH : InstrItinClass;
+def II_LHU : InstrItinClass;
+def II_LUI : InstrItinClass;
+def II_LUXC1 : InstrItinClass;
+def II_LW : InstrItinClass;
+def II_LWC1 : InstrItinClass;
+def II_LWL : InstrItinClass;
+def II_LWR : InstrItinClass;
+def II_LWU : InstrItinClass;
+def II_LWXC1 : InstrItinClass;
+def II_MADD : InstrItinClass;
+def II_MADDU : InstrItinClass;
+def II_MADD_D : InstrItinClass;
+def II_MADD_S : InstrItinClass;
+def II_MFC1 : InstrItinClass;
+def II_MFHC1 : InstrItinClass;
+def II_MFHI_MFLO : InstrItinClass; // mfhi and mflo
+def II_MOVF : InstrItinClass;
+def II_MOVF_D : InstrItinClass;
+def II_MOVF_S : InstrItinClass;
+def II_MOVN : InstrItinClass;
+def II_MOVN_D : InstrItinClass;
+def II_MOVN_S : InstrItinClass;
+def II_MOVT : InstrItinClass;
+def II_MOVT_D : InstrItinClass;
+def II_MOVT_S : InstrItinClass;
+def II_MOVZ : InstrItinClass;
+def II_MOVZ_D : InstrItinClass;
+def II_MOVZ_S : InstrItinClass;
+def II_MOV_D : InstrItinClass;
+def II_MOV_S : InstrItinClass;
+def II_MSUB : InstrItinClass;
+def II_MSUBU : InstrItinClass;
+def II_MSUB_D : InstrItinClass;
+def II_MSUB_S : InstrItinClass;
+def II_MTC1 : InstrItinClass;
+def II_MTHC1 : InstrItinClass;
+def II_MTHI_MTLO : InstrItinClass; // mthi and mtlo
+def II_MUL : InstrItinClass;
+def II_MULT : InstrItinClass;
+def II_MULTU : InstrItinClass;
+def II_MUL_D : InstrItinClass;
+def II_MUL_S : InstrItinClass;
+def II_NEG : InstrItinClass;
+def II_NMADD_D : InstrItinClass;
+def II_NMADD_S : InstrItinClass;
+def II_NMSUB_D : InstrItinClass;
+def II_NMSUB_S : InstrItinClass;
+def II_NOR : InstrItinClass;
+def II_OR : InstrItinClass;
+def II_ORI : InstrItinClass;
+def II_POP : InstrItinClass;
+def II_RDHWR : InstrItinClass;
+def II_RESTORE : InstrItinClass;
+def II_ROTR : InstrItinClass;
+def II_ROTRV : InstrItinClass;
+def II_ROUND : InstrItinClass;
+def II_SAVE : InstrItinClass;
+def II_SB : InstrItinClass;
+def II_SD : InstrItinClass;
+def II_SDC1 : InstrItinClass;
+def II_SDL : InstrItinClass;
+def II_SDR : InstrItinClass;
+def II_SDXC1 : InstrItinClass;
+def II_SEB : InstrItinClass;
+def II_SEH : InstrItinClass;
+def II_SEQ_SNE : InstrItinClass; // seq and sne
+def II_SEQI_SNEI : InstrItinClass; // seqi and snei
+def II_SH : InstrItinClass;
+def II_SLL : InstrItinClass;
+def II_SLLV : InstrItinClass;
+def II_SLTI_SLTIU : InstrItinClass; // slti and sltiu
+def II_SLT_SLTU : InstrItinClass; // slt and sltu
+def II_SQRT_D : InstrItinClass;
+def II_SQRT_S : InstrItinClass;
+def II_SRA : InstrItinClass;
+def II_SRAV : InstrItinClass;
+def II_SRL : InstrItinClass;
+def II_SRLV : InstrItinClass;
+def II_SUBU : InstrItinClass;
+def II_SUB_D : InstrItinClass;
+def II_SUB_S : InstrItinClass;
+def II_SUXC1 : InstrItinClass;
+def II_SW : InstrItinClass;
+def II_SWC1 : InstrItinClass;
+def II_SWL : InstrItinClass;
+def II_SWR : InstrItinClass;
+def II_SWXC1 : InstrItinClass;
+def II_TRUNC : InstrItinClass;
+def II_XOR : InstrItinClass;
+def II_XORI : InstrItinClass;
+
//===----------------------------------------------------------------------===//
// Mips Generic instruction itineraries.
//===----------------------------------------------------------------------===//
def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>,
- InstrItinData<IIArith , [InstrStage<1, [ALU]>]>,
- InstrItinData<IILogic , [InstrStage<1, [ALU]>]>,
- InstrItinData<IILoad , [InstrStage<3, [ALU]>]>,
- InstrItinData<IIStore , [InstrStage<1, [ALU]>]>,
- InstrItinData<IIXfer , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_ADDI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ADDIU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ADDU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_AND , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_BADDU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SLL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SRA , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SRL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ROTR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SLLV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SRAV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SRLV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ROTRV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_CLO , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_CLZ , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DADDIU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DADDU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DADD , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSLL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSRL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSRA , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSLLV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSRLV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSRAV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSUBU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSUB , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DROTR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DROTRV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_LUI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOVF , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOVN , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOVN_S , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOVN_D , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOVT , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOVZ , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_NOR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_OR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_POP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_RDHWR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SUBU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_XOR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ANDI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ORI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_XORI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_LB , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LBU , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LH , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LHU , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LW , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWL , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWR , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LD , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LDL , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LDR , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_RESTORE , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_SB , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SH , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SW , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SWL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SWR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SDL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SDR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SD , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SAVE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SEQ_SNE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SEQI_SNEI , [InstrStage<1, [ALU]>]>,
InstrItinData<IIBranch , [InstrStage<1, [ALU]>]>,
- InstrItinData<IIHiLo , [InstrStage<1, [IMULDIV]>]>,
- InstrItinData<IIImul , [InstrStage<17, [IMULDIV]>]>,
- InstrItinData<IIIdiv , [InstrStage<38, [IMULDIV]>]>,
- InstrItinData<IIFcvt , [InstrStage<1, [ALU]>]>,
- InstrItinData<IIFmove , [InstrStage<2, [ALU]>]>,
- InstrItinData<IIFcmp , [InstrStage<3, [ALU]>]>,
- InstrItinData<IIFadd , [InstrStage<4, [ALU]>]>,
- InstrItinData<IIFmulSingle , [InstrStage<7, [ALU]>]>,
- InstrItinData<IIFmulDouble , [InstrStage<8, [ALU]>]>,
- InstrItinData<IIFdivSingle , [InstrStage<23, [ALU]>]>,
- InstrItinData<IIFdivDouble , [InstrStage<36, [ALU]>]>,
- InstrItinData<IIFsqrtSingle , [InstrStage<54, [ALU]>]>,
- InstrItinData<IIFsqrtDouble , [InstrStage<12, [ALU]>]>,
- InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]>,
- InstrItinData<IIFLoad , [InstrStage<3, [ALU]>]>,
- InstrItinData<IIFStore , [InstrStage<1, [ALU]>]>,
- InstrItinData<IIFmoveC1 , [InstrStage<2, [ALU]>]>
+ InstrItinData<II_DMUL , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DMULT , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DMULTU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MADD , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MADDU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MFHI_MFLO , [InstrStage<1, [IMULDIV]>]>,
+ InstrItinData<II_MSUB , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MSUBU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MTHI_MTLO , [InstrStage<1, [IMULDIV]>]>,
+ InstrItinData<II_MUL , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MULT , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MULTU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MSUB , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MSUBU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DIV , [InstrStage<38, [IMULDIV]>]>,
+ InstrItinData<II_DIVU , [InstrStage<38, [IMULDIV]>]>,
+ InstrItinData<II_DDIV , [InstrStage<38, [IMULDIV]>]>,
+ InstrItinData<II_DDIVU , [InstrStage<38, [IMULDIV]>]>,
+ InstrItinData<II_CEIL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_CVT , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ABS , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_FLOOR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_NEG , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ROUND , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TRUNC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_MOV_D , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOV_S , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_CFC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_CTC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOVF_D , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOVF_S , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOVT_D , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOVT_S , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOVZ_D , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MOVZ_S , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_C_CC_S , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_C_CC_D , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_ADD_D , [InstrStage<4, [ALU]>]>,
+ InstrItinData<II_ADD_S , [InstrStage<4, [ALU]>]>,
+ InstrItinData<II_SUB_D , [InstrStage<4, [ALU]>]>,
+ InstrItinData<II_SUB_S , [InstrStage<4, [ALU]>]>,
+ InstrItinData<II_MUL_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_MADD_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_MSUB_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_NMADD_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_NMSUB_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_MUL_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_MADD_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_MSUB_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_NMADD_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_NMSUB_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_DIV_S , [InstrStage<23, [ALU]>]>,
+ InstrItinData<II_DIV_D , [InstrStage<36, [ALU]>]>,
+ InstrItinData<II_SQRT_S , [InstrStage<54, [ALU]>]>,
+ InstrItinData<II_SQRT_D , [InstrStage<12, [ALU]>]>,
+ InstrItinData<II_LDC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LDXC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWXC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LUXC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_SDC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SWC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SDXC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SWXC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SUXC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DMFC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_DMTC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MFC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MTC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MFHC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MTHC1 , [InstrStage<2, [ALU]>]>
]>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp
index e4d70fc..edd8f67 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp
@@ -11,13 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-selectiondag-info"
#include "MipsTargetMachine.h"
using namespace llvm;
-MipsSelectionDAGInfo::MipsSelectionDAGInfo(const MipsTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+#define DEBUG_TYPE "mips-selectiondag-info"
+
+MipsSelectionDAGInfo::MipsSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
MipsSelectionDAGInfo::~MipsSelectionDAGInfo() {
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h
index 6cafb55..2b3d527 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h
@@ -22,7 +22,7 @@ class MipsTargetMachine;
class MipsSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit MipsSelectionDAGInfo(const MipsTargetMachine &TM);
+ explicit MipsSelectionDAGInfo(const DataLayout &DL);
~MipsSelectionDAGInfo();
};
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
index 0a81072..5bf875d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
@@ -11,13 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mips-subtarget"
-
#include "MipsMachineFunction.h"
-#include "MipsSubtarget.h"
-#include "MipsTargetMachine.h"
#include "Mips.h"
#include "MipsRegisterInfo.h"
+#include "MipsSubtarget.h"
+#include "MipsTargetMachine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
@@ -25,13 +23,14 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "mips-subtarget"
+
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "MipsGenSubtargetInfo.inc"
-
-using namespace llvm;
-
// FIXME: Maybe this should be on by default when Mips16 is specified
//
static cl::opt<bool> Mixed16_32(
@@ -55,44 +54,96 @@ Mips16HardFloat("mips16-hard-float", cl::NotHidden,
static cl::opt<bool>
Mips16ConstantIslands(
- "mips16-constant-islands", cl::Hidden,
- cl::desc("MIPS: mips16 constant islands enable. experimental feature"),
- cl::init(false));
+ "mips16-constant-islands", cl::NotHidden,
+ cl::desc("MIPS: mips16 constant islands enable."),
+ cl::init(true));
+
+/// Select the Mips CPU for the given triple and cpu name.
+/// FIXME: Merge with the copy in MipsMCTargetDesc.cpp
+static StringRef selectMipsCPU(Triple TT, StringRef CPU) {
+ if (CPU.empty() || CPU == "generic") {
+ if (TT.getArch() == Triple::mips || TT.getArch() == Triple::mipsel)
+ CPU = "mips32";
+ else
+ CPU = "mips64";
+ }
+ return CPU;
+}
void MipsSubtarget::anchor() { }
+static std::string computeDataLayout(const MipsSubtarget &ST) {
+ std::string Ret = "";
+
+ // There are both little and big endian mips.
+ if (ST.isLittle())
+ Ret += "e";
+ else
+ Ret += "E";
+
+ Ret += "-m:m";
+
+ // Pointers are 32 bit on some ABIs.
+ if (!ST.isABI_N64())
+ Ret += "-p:32:32";
+
+ // 8 and 16 bit integers only need no have natural alignment, but try to
+ // align them to 32 bits. 64 bit integers have natural alignment.
+ Ret += "-i8:8:32-i16:16:32-i64:64";
+
+ // 32 bit registers are always available and the stack is at least 64 bit
+ // aligned. On N64 64 bit registers are also available and the stack is
+ // 128 bit aligned.
+ if (ST.isABI_N64() || ST.isABI_N32())
+ Ret += "-n32:64-S128";
+ else
+ Ret += "-n32-S64";
+
+ return Ret;
+}
+
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool little,
- Reloc::Model _RM, MipsTargetMachine *_TM) :
- MipsGenSubtargetInfo(TT, CPU, FS),
- MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
- IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
- IsLinux(true), HasSEInReg(false), HasCondMov(false), HasSwap(false),
- HasBitCount(false), HasFPIdx(false),
- InMips16Mode(false), InMips16HardFloat(Mips16HardFloat),
- InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
- AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16), HasMSA(false),
- RM(_RM), OverrideMode(NoOverride), TM(_TM)
-{
- std::string CPUName = CPU;
- if (CPUName.empty())
- CPUName = "mips32";
-
- // Parse features string.
- ParseSubtargetFeatures(CPUName, FS);
+ MipsTargetMachine *_TM)
+ : MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(Mips32),
+ MipsABI(UnknownABI), IsLittle(little), IsSingleFloat(false),
+ IsFPXX(false), IsFP64bit(false), UseOddSPReg(true), IsNaN2008bit(false),
+ IsGP64bit(false), HasVFPU(false), HasCnMips(false), IsLinux(true),
+ HasMips3_32(false), HasMips3_32r2(false), HasMips4_32(false),
+ HasMips4_32r2(false), HasMips5_32r2(false), InMips16Mode(false),
+ InMips16HardFloat(Mips16HardFloat), InMicroMipsMode(false), HasDSP(false),
+ HasDSPR2(false), AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
+ HasMSA(false), TM(_TM), TargetTriple(TT),
+ DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS, TM))),
+ TSInfo(DL), JITInfo(), InstrInfo(MipsInstrInfo::create(*this)),
+ FrameLowering(MipsFrameLowering::create(*this)),
+ TLInfo(MipsTargetLowering::create(*TM, *this)) {
PreviousInMips16Mode = InMips16Mode;
- // Initialize scheduling itinerary for the specified CPU.
- InstrItins = getInstrItineraryForCPU(CPUName);
+ // Don't even attempt to generate code for MIPS-I, MIPS-II, MIPS-III, and
+ // MIPS-V. They have not been tested and currently exist for the integrated
+ // assembler only.
+ if (MipsArchVersion == Mips1)
+ report_fatal_error("Code generation for MIPS-I is not implemented", false);
+ if (MipsArchVersion == Mips2)
+ report_fatal_error("Code generation for MIPS-II is not implemented", false);
+ if (MipsArchVersion == Mips3)
+ report_fatal_error("Code generation for MIPS-III is not implemented",
+ false);
+ if (MipsArchVersion == Mips5)
+ report_fatal_error("Code generation for MIPS-V is not implemented", false);
- // Set MipsABI if it hasn't been set yet.
- if (MipsABI == UnknownABI)
- MipsABI = hasMips64() ? N64 : O32;
+ // Assert exactly one ABI was chosen.
+ assert(MipsABI != UnknownABI);
+ assert((((getFeatureBits() & Mips::FeatureO32) != 0) +
+ ((getFeatureBits() & Mips::FeatureEABI) != 0) +
+ ((getFeatureBits() & Mips::FeatureN32) != 0) +
+ ((getFeatureBits() & Mips::FeatureN64) != 0)) == 1);
// Check if Architecture and ABI are compatible.
- assert(((!hasMips64() && (isABI_O32() || isABI_EABI())) ||
- (hasMips64() && (isABI_N32() || isABI_N64()))) &&
+ assert(((!isGP64bit() && (isABI_O32() || isABI_EABI())) ||
+ (isGP64bit() && (isABI_N32() || isABI_N64()))) &&
"Invalid Arch & ABI pair.");
if (hasMSA() && !isFP64bit())
@@ -100,73 +151,61 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
"See -mattr=+fp64.",
false);
+ if (!isABI_O32() && !useOddSPReg())
+ report_fatal_error("-mattr=+nooddspreg requires the O32 ABI.", false);
+
+ if (IsFPXX && (isABI_N32() || isABI_N64()))
+ report_fatal_error("FPXX is not permitted for the N32/N64 ABI's.", false);
+
+ if (hasMips32r6()) {
+ StringRef ISA = hasMips64r6() ? "MIPS64r6" : "MIPS32r6";
+
+ assert(isFP64bit());
+ assert(isNaN2008());
+ if (hasDSP())
+ report_fatal_error(ISA + " is not compatible with the DSP ASE", false);
+ }
+
// Is the target system Linux ?
if (TT.find("linux") == std::string::npos)
IsLinux = false;
// Set UseSmallSection.
- UseSmallSection = !IsLinux && (RM == Reloc::Static);
- // set some subtarget specific features
- if (inMips16Mode())
- HasBitCount=false;
+ // TODO: Investigate the IsLinux check. I suspect it's really checking for
+ // bare-metal.
+ UseSmallSection = !IsLinux && (TM->getRelocationModel() == Reloc::Static);
}
-bool
-MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode &Mode,
- RegClassVector &CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_NONE;
+/// This overrides the PostRAScheduler bit in the SchedModel for any CPU.
+bool MipsSubtarget::enablePostMachineScheduler() const { return true; }
+
+void MipsSubtarget::getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
CriticalPathRCs.clear();
- CriticalPathRCs.push_back(hasMips64() ?
+ CriticalPathRCs.push_back(isGP64bit() ?
&Mips::GPR64RegClass : &Mips::GPR32RegClass);
- return OptLevel >= CodeGenOpt::Aggressive;
}
-//FIXME: This logic for reseting the subtarget along with
-// the helper classes can probably be simplified but there are a lot of
-// cases so we will defer rewriting this to later.
-//
-void MipsSubtarget::resetSubtarget(MachineFunction *MF) {
- bool ChangeToMips16 = false, ChangeToNoMips16 = false;
- DEBUG(dbgs() << "resetSubtargetFeatures" << "\n");
- AttributeSet FnAttrs = MF->getFunction()->getAttributes();
- ChangeToMips16 = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- "mips16");
- ChangeToNoMips16 = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
- "nomips16");
- assert (!(ChangeToMips16 & ChangeToNoMips16) &&
- "mips16 and nomips16 specified on the same function");
- if (ChangeToMips16) {
- if (PreviousInMips16Mode)
- return;
- OverrideMode = Mips16Override;
- PreviousInMips16Mode = true;
- TM->setHelperClassesMips16();
- return;
- } else if (ChangeToNoMips16) {
- if (!PreviousInMips16Mode)
- return;
- OverrideMode = NoMips16Override;
- PreviousInMips16Mode = false;
- TM->setHelperClassesMipsSE();
- return;
- } else {
- if (OverrideMode == NoOverride)
- return;
- OverrideMode = NoOverride;
- DEBUG(dbgs() << "back to default" << "\n");
- if (inMips16Mode() && !PreviousInMips16Mode) {
- TM->setHelperClassesMips16();
- PreviousInMips16Mode = true;
- } else if (!inMips16Mode() && PreviousInMips16Mode) {
- TM->setHelperClassesMipsSE();
- PreviousInMips16Mode = false;
- }
- return;
- }
+CodeGenOpt::Level MipsSubtarget::getOptLevelToEnablePostRAScheduler() const {
+ return CodeGenOpt::Aggressive;
}
-bool MipsSubtarget::mipsSEUsesSoftFloat() const {
+MipsSubtarget &
+MipsSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS,
+ const TargetMachine *TM) {
+ std::string CPUName = selectMipsCPU(TargetTriple, CPU);
+
+ // Parse features string.
+ ParseSubtargetFeatures(CPUName, FS);
+ // Initialize scheduling itinerary for the specified CPU.
+ InstrItins = getInstrItineraryForCPU(CPUName);
+
+ if (InMips16Mode && !TM->Options.UseSoftFloat)
+ InMips16HardFloat = true;
+
+ return *this;
+}
+
+bool MipsSubtarget::abiUsesSoftFloat() const {
return TM->Options.UseSoftFloat && !InMips16HardFloat;
}
@@ -174,3 +213,7 @@ bool MipsSubtarget::useConstantIslands() {
DEBUG(dbgs() << "use constant islands " << Mips16ConstantIslands << "\n");
return Mips16ConstantIslands;
}
+
+Reloc::Model MipsSubtarget::getRelocationModel() const {
+ return TM->getRelocationModel();
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
index 6b2ab12..f326462 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
@@ -14,11 +14,15 @@
#ifndef MIPSSUBTARGET_H
#define MIPSSUBTARGET_H
-#include "MCTargetDesc/MipsReginfo.h"
+#include "MipsFrameLowering.h"
+#include "MipsISelLowering.h"
+#include "MipsInstrInfo.h"
+#include "MipsJITInfo.h"
+#include "MipsSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetSubtargetInfo.h"
-
#include <string>
#define GET_SUBTARGETINFO_HEADER
@@ -39,9 +43,9 @@ public:
};
protected:
-
enum MipsArchEnum {
- Mips32, Mips32r2, Mips64, Mips64r2
+ Mips1, Mips2, Mips32, Mips32r2, Mips32r6, Mips3, Mips4, Mips5, Mips64,
+ Mips64r2, Mips64r6
};
// Mips architecture version
@@ -58,15 +62,28 @@ protected:
// floating point registers instead of only using even ones.
bool IsSingleFloat;
+ // IsFPXX - MIPS O32 modeless ABI.
+ bool IsFPXX;
+
// IsFP64bit - The target processor has 64-bit floating point registers.
bool IsFP64bit;
+ /// Are odd single-precision registers permitted?
+ /// This corresponds to -modd-spreg and -mno-odd-spreg
+ bool UseOddSPReg;
+
+ // IsNan2008 - IEEE 754-2008 NaN encoding.
+ bool IsNaN2008bit;
+
// IsFP64bit - General-purpose registers are 64 bits wide
bool IsGP64bit;
// HasVFPU - Processor has a vector floating point unit.
bool HasVFPU;
+ // CPU supports cnMIPS (Cavium Networks Octeon CPU).
+ bool HasCnMips;
+
// isLinux - Target system is Linux. Is false we consider ELFOS for now.
bool IsLinux;
@@ -75,20 +92,20 @@ protected:
/// Features related to the presence of specific instructions.
- // HasSEInReg - SEB and SEH (signext in register) instructions.
- bool HasSEInReg;
+ // HasMips3_32 - The subset of MIPS-III instructions added to MIPS32
+ bool HasMips3_32;
- // HasCondMov - Conditional mov (MOVZ, MOVN) instructions.
- bool HasCondMov;
+ // HasMips3_32r2 - The subset of MIPS-III instructions added to MIPS32r2
+ bool HasMips3_32r2;
- // HasSwap - Byte and half swap instructions.
- bool HasSwap;
+ // HasMips4_32 - Has the subset of MIPS-IV present in MIPS32
+ bool HasMips4_32;
- // HasBitCount - Count leading '1' and '0' bits.
- bool HasBitCount;
+ // HasMips4_32r2 - Has the subset of MIPS-IV present in MIPS32r2
+ bool HasMips4_32r2;
- // HasFPIdx -- Floating point indexed load/store instructions.
- bool HasFPIdx;
+ // HasMips5_32r2 - Has the subset of MIPS-V present in MIPS32r2
+ bool HasMips5_32r2;
// InMips16 -- can process Mips16 instructions
bool InMips16Mode;
@@ -118,68 +135,90 @@ protected:
InstrItineraryData InstrItins;
- // The instance to the register info section object
- MipsReginfo MRI;
-
- // Relocation Model
- Reloc::Model RM;
-
// We can override the determination of whether we are in mips16 mode
// as from the command line
enum {NoOverride, Mips16Override, NoMips16Override} OverrideMode;
MipsTargetMachine *TM;
+ Triple TargetTriple;
+
+ const DataLayout DL; // Calculates type size & alignment
+ const MipsSelectionDAGInfo TSInfo;
+ MipsJITInfo JITInfo;
+ std::unique_ptr<const MipsInstrInfo> InstrInfo;
+ std::unique_ptr<const MipsFrameLowering> FrameLowering;
+ std::unique_ptr<const MipsTargetLowering> TLInfo;
+
public:
- virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
+ /// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
+ bool enablePostMachineScheduler() const override;
+ void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
+ CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const override;
/// Only O32 and EABI supported right now.
bool isABI_EABI() const { return MipsABI == EABI; }
bool isABI_N64() const { return MipsABI == N64; }
bool isABI_N32() const { return MipsABI == N32; }
bool isABI_O32() const { return MipsABI == O32; }
+ bool isABI_FPXX() const { return isABI_O32() && IsFPXX; }
unsigned getTargetABI() const { return MipsABI; }
/// This constructor initializes the data members to match that
/// of the specified triple.
MipsSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool little, Reloc::Model RM,
- MipsTargetMachine *TM);
+ const std::string &FS, bool little, MipsTargetMachine *TM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool hasMips32() const { return MipsArchVersion >= Mips32; }
- bool hasMips32r2() const { return MipsArchVersion == Mips32r2 ||
- MipsArchVersion == Mips64r2; }
+ bool hasMips1() const { return MipsArchVersion >= Mips1; }
+ bool hasMips2() const { return MipsArchVersion >= Mips2; }
+ bool hasMips3() const { return MipsArchVersion >= Mips3; }
+ bool hasMips4() const { return MipsArchVersion >= Mips4; }
+ bool hasMips5() const { return MipsArchVersion >= Mips5; }
+ bool hasMips4_32() const { return HasMips4_32; }
+ bool hasMips4_32r2() const { return HasMips4_32r2; }
+ bool hasMips32() const {
+ return MipsArchVersion >= Mips32 && MipsArchVersion != Mips3 &&
+ MipsArchVersion != Mips4 && MipsArchVersion != Mips5;
+ }
+ bool hasMips32r2() const {
+ return MipsArchVersion == Mips32r2 || MipsArchVersion == Mips32r6 ||
+ MipsArchVersion == Mips64r2 || MipsArchVersion == Mips64r6;
+ }
+ bool hasMips32r6() const {
+ return MipsArchVersion == Mips32r6 || MipsArchVersion == Mips64r6;
+ }
bool hasMips64() const { return MipsArchVersion >= Mips64; }
- bool hasMips64r2() const { return MipsArchVersion == Mips64r2; }
+ bool hasMips64r2() const {
+ return MipsArchVersion == Mips64r2 || MipsArchVersion == Mips64r6;
+ }
+ bool hasMips64r6() const { return MipsArchVersion == Mips64r6; }
+
+ bool hasCnMips() const { return HasCnMips; }
bool isLittle() const { return IsLittle; }
+ bool isFPXX() const { return IsFPXX; }
bool isFP64bit() const { return IsFP64bit; }
+ bool useOddSPReg() const { return UseOddSPReg; }
+ bool noOddSPReg() const { return !UseOddSPReg; }
+ bool isNaN2008() const { return IsNaN2008bit; }
bool isNotFP64bit() const { return !IsFP64bit; }
bool isGP64bit() const { return IsGP64bit; }
bool isGP32bit() const { return !IsGP64bit; }
bool isSingleFloat() const { return IsSingleFloat; }
bool isNotSingleFloat() const { return !IsSingleFloat; }
bool hasVFPU() const { return HasVFPU; }
- bool inMips16Mode() const {
- switch (OverrideMode) {
- case NoOverride:
- return InMips16Mode;
- case Mips16Override:
- return true;
- case NoMips16Override:
- return false;
- }
- llvm_unreachable("Unexpected mode");
- }
+ bool inMips16Mode() const { return InMips16Mode; }
bool inMips16ModeDefault() const {
return InMips16Mode;
}
+ // Hard float for mips16 means essentially to compile as soft float
+ // but to use a runtime library for soft float that is written with
+ // native mips32 floating point instructions (those runtime routines
+ // run in mips32 hard float mode).
bool inMips16HardFloat() const {
return inMips16Mode() && InMips16HardFloat;
}
@@ -192,19 +231,15 @@ public:
bool hasStandardEncoding() const { return !inMips16Mode(); }
- bool mipsSEUsesSoftFloat() const;
+ bool abiUsesSoftFloat() const;
bool enableLongBranchPass() const {
return hasStandardEncoding() || allowMixed16_32();
}
/// Features related to the presence of specific instructions.
- bool hasSEInReg() const { return HasSEInReg; }
- bool hasCondMov() const { return HasCondMov; }
- bool hasSwap() const { return HasSwap; }
- bool hasBitCount() const { return HasBitCount; }
- bool hasFPIdx() const { return HasFPIdx; }
bool hasExtractInsert() const { return !inMips16Mode() && hasMips32r2(); }
+ bool hasMTHC1() const { return hasMips32r2(); }
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
bool allowMixed16_32() const { return inMips16ModeDefault() |
@@ -212,23 +247,43 @@ public:
bool os16() const { return Os16;};
-// for now constant islands are on for the whole compilation unit but we only
-// really use them if in addition we are in mips16 mode
-//
-static bool useConstantIslands();
+ bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
+ bool isNotTargetNaCl() const { return !TargetTriple.isOSNaCl(); }
- unsigned stackAlignment() const { return hasMips64() ? 16 : 8; }
+ // for now constant islands are on for the whole compilation unit but we only
+ // really use them if in addition we are in mips16 mode
+ static bool useConstantIslands();
- // Grab MipsRegInfo object
- const MipsReginfo &getMReginfo() const { return MRI; }
+ unsigned stackAlignment() const { return hasMips64() ? 16 : 8; }
// Grab relocation model
- Reloc::Model getRelocationModel() const {return RM;}
-
- /// \brief Reset the subtarget for the Mips target.
- void resetSubtarget(MachineFunction *MF);
-
-
+ Reloc::Model getRelocationModel() const;
+
+ MipsSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS,
+ const TargetMachine *TM);
+
+ /// Does the system support unaligned memory access.
+ ///
+ /// MIPS32r6/MIPS64r6 require full unaligned access support but does not
+ /// specify which component of the system provides it. Hardware, software, and
+ /// hybrid implementations are all valid.
+ bool systemSupportsUnalignedAccess() const { return hasMips32r6(); }
+
+ // Set helper classes
+ void setHelperClassesMips16();
+ void setHelperClassesMipsSE();
+
+ MipsJITInfo *getJITInfo() { return &JITInfo; }
+ const MipsSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const MipsInstrInfo *getInstrInfo() const { return InstrInfo.get(); }
+ const TargetFrameLowering *getFrameLowering() const {
+ return FrameLowering.get();
+ }
+ const MipsRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo->getRegisterInfo();
+ }
+ const MipsTargetLowering *getTargetLowering() const { return TLInfo.get(); }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 5046c1b..bb1870e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -13,29 +13,29 @@
#include "MipsTargetMachine.h"
#include "Mips.h"
+#include "Mips16FrameLowering.h"
+#include "Mips16HardFloat.h"
+#include "Mips16ISelDAGToDAG.h"
+#include "Mips16ISelLowering.h"
+#include "Mips16InstrInfo.h"
#include "MipsFrameLowering.h"
#include "MipsInstrInfo.h"
#include "MipsModuleISelDAGToDAG.h"
#include "MipsOs16.h"
#include "MipsSEFrameLowering.h"
-#include "MipsSEInstrInfo.h"
-#include "MipsSEISelLowering.h"
#include "MipsSEISelDAGToDAG.h"
-#include "Mips16FrameLowering.h"
-#include "Mips16HardFloat.h"
-#include "Mips16InstrInfo.h"
-#include "Mips16ISelDAGToDAG.h"
-#include "Mips16ISelLowering.h"
+#include "MipsSEISelLowering.h"
+#include "MipsSEInstrInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/PassManager.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
-
+#define DEBUG_TYPE "mips"
extern "C" void LLVMInitializeMipsTarget() {
// Register the target.
@@ -45,73 +45,26 @@ extern "C" void LLVMInitializeMipsTarget() {
RegisterTargetMachine<MipselTargetMachine> B(TheMips64elTarget);
}
-// DataLayout --> Big-endian, 32-bit pointer/ABI/alignment
-// The stack is always 8 byte aligned
// On function prologue, the stack is created by decrementing
// its pointer. Once decremented, all references are done with positive
// offset from the stack/frame pointer, using StackGrowsUp enables
// an easier handling.
// Using CodeModel::Large enables different CALL behavior.
-MipsTargetMachine::
-MipsTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool isLittle)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, isLittle, RM, this),
- DL(isLittle ?
- (Subtarget.isABI_N64() ?
- "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-"
- "n32:64-S128" :
- "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32-S64") :
- (Subtarget.isABI_N64() ?
- "E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-"
- "n32:64-S128" :
- "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32-S64")),
- InstrInfo(MipsInstrInfo::create(*this)),
- FrameLowering(MipsFrameLowering::create(*this, Subtarget)),
- TLInfo(MipsTargetLowering::create(*this)), TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()), JITInfo() {
+MipsTargetMachine::MipsTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool isLittle)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(nullptr), DefaultSubtarget(TT, CPU, FS, isLittle, this),
+ NoMips16Subtarget(TT, CPU, FS.empty() ? "-mips16" : FS.str() + ",-mips16",
+ isLittle, this),
+ Mips16Subtarget(TT, CPU, FS.empty() ? "+mips16" : FS.str() + ",+mips16",
+ isLittle, this) {
+ Subtarget = &DefaultSubtarget;
initAsmInfo();
}
-
-void MipsTargetMachine::setHelperClassesMips16() {
- InstrInfoSE.swap(InstrInfo);
- FrameLoweringSE.swap(FrameLowering);
- TLInfoSE.swap(TLInfo);
- if (!InstrInfo16) {
- InstrInfo.reset(MipsInstrInfo::create(*this));
- FrameLowering.reset(MipsFrameLowering::create(*this, Subtarget));
- TLInfo.reset(MipsTargetLowering::create(*this));
- } else {
- InstrInfo16.swap(InstrInfo);
- FrameLowering16.swap(FrameLowering);
- TLInfo16.swap(TLInfo);
- }
- assert(TLInfo && "null target lowering 16");
- assert(InstrInfo && "null instr info 16");
- assert(FrameLowering && "null frame lowering 16");
-}
-
-void MipsTargetMachine::setHelperClassesMipsSE() {
- InstrInfo16.swap(InstrInfo);
- FrameLowering16.swap(FrameLowering);
- TLInfo16.swap(TLInfo);
- if (!InstrInfoSE) {
- InstrInfo.reset(MipsInstrInfo::create(*this));
- FrameLowering.reset(MipsFrameLowering::create(*this, Subtarget));
- TLInfo.reset(MipsTargetLowering::create(*this));
- } else {
- InstrInfoSE.swap(InstrInfo);
- FrameLoweringSE.swap(FrameLowering);
- TLInfoSE.swap(TLInfo);
- }
- assert(TLInfo && "null target lowering in SE");
- assert(InstrInfo && "null instr info SE");
- assert(FrameLowering && "null frame lowering SE");
-}
void MipsebTargetMachine::anchor() { }
MipsebTargetMachine::
@@ -130,6 +83,23 @@ MipselTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL)
: MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+void MipsTargetMachine::resetSubtarget(MachineFunction *MF) {
+ DEBUG(dbgs() << "resetSubtarget\n");
+ AttributeSet FnAttrs = MF->getFunction()->getAttributes();
+ bool Mips16Attr = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, "mips16");
+ bool NoMips16Attr =
+ FnAttrs.hasAttribute(AttributeSet::FunctionIndex, "nomips16");
+ assert(!(Mips16Attr && NoMips16Attr) &&
+ "mips16 and nomips16 specified on the same function");
+ if (Mips16Attr)
+ Subtarget = &Mips16Subtarget;
+ else if (NoMips16Attr)
+ Subtarget = &NoMips16Subtarget;
+ else
+ Subtarget = &DefaultSubtarget;
+ return;
+}
+
namespace {
/// Mips Code Generator Pass Configuration Options.
class MipsPassConfig : public TargetPassConfig {
@@ -151,9 +121,13 @@ public:
return *getMipsTargetMachine().getSubtargetImpl();
}
- virtual void addIRPasses();
- virtual bool addInstSelector();
- virtual bool addPreEmitPass();
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ void addMachineSSAOptimization() override;
+ bool addPreEmitPass() override;
+
+ bool addPreRegAlloc() override;
+
};
} // namespace
@@ -172,18 +146,28 @@ void MipsPassConfig::addIRPasses() {
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
bool MipsPassConfig::addInstSelector() {
- if (getMipsSubtarget().allowMixed16_32()) {
- addPass(createMipsModuleISelDag(getMipsTargetMachine()));
- addPass(createMips16ISelDag(getMipsTargetMachine()));
- addPass(createMipsSEISelDag(getMipsTargetMachine()));
- } else {
- addPass(createMipsISelDag(getMipsTargetMachine()));
- }
+ addPass(createMipsModuleISelDag(getMipsTargetMachine()));
+ addPass(createMips16ISelDag(getMipsTargetMachine()));
+ addPass(createMipsSEISelDag(getMipsTargetMachine()));
return false;
}
+void MipsPassConfig::addMachineSSAOptimization() {
+ addPass(createMipsOptimizePICCallPass(getMipsTargetMachine()));
+ TargetPassConfig::addMachineSSAOptimization();
+}
+
+bool MipsPassConfig::addPreRegAlloc() {
+ if (getOptLevel() == CodeGenOpt::None) {
+ addPass(createMipsOptimizePICCallPass(getMipsTargetMachine()));
+ return true;
+ }
+ else
+ return false;
+}
+
void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
- if (Subtarget.allowMixed16_32()) {
+ if (Subtarget->allowMixed16_32()) {
DEBUG(errs() << "No ");
//FIXME: The Basic Target Transform Info
// pass needs to become a function pass instead of
@@ -200,15 +184,9 @@ void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// print out the code after the passes.
bool MipsPassConfig::addPreEmitPass() {
MipsTargetMachine &TM = getMipsTargetMachine();
- const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
addPass(createMipsDelaySlotFillerPass(TM));
-
- if (Subtarget.enableLongBranchPass())
- addPass(createMipsLongBranchPass(TM));
- if (Subtarget.inMips16Mode() ||
- Subtarget.allowMixed16_32())
- addPass(createMipsConstantIslandPass(TM));
-
+ addPass(createMipsLongBranchPass(TM));
+ addPass(createMipsConstantIslandPass(TM));
return true;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
index 5a9a11d..bcf411f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
@@ -14,16 +14,9 @@
#ifndef MIPSTARGETMACHINE_H
#define MIPSTARGETMACHINE_H
-#include "MipsFrameLowering.h"
-#include "MipsISelLowering.h"
-#include "MipsInstrInfo.h"
-#include "MipsJITInfo.h"
-#include "MipsSelectionDAGInfo.h"
#include "MipsSubtarget.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -32,70 +25,57 @@ class formatted_raw_ostream;
class MipsRegisterInfo;
class MipsTargetMachine : public LLVMTargetMachine {
- MipsSubtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- OwningPtr<const MipsInstrInfo> InstrInfo;
- OwningPtr<const MipsFrameLowering> FrameLowering;
- OwningPtr<const MipsTargetLowering> TLInfo;
- OwningPtr<const MipsInstrInfo> InstrInfo16;
- OwningPtr<const MipsFrameLowering> FrameLowering16;
- OwningPtr<const MipsTargetLowering> TLInfo16;
- OwningPtr<const MipsInstrInfo> InstrInfoSE;
- OwningPtr<const MipsFrameLowering> FrameLoweringSE;
- OwningPtr<const MipsTargetLowering> TLInfoSE;
- MipsSelectionDAGInfo TSInfo;
- const InstrItineraryData &InstrItins;
- MipsJITInfo JITInfo;
+ MipsSubtarget *Subtarget;
+ MipsSubtarget DefaultSubtarget;
+ MipsSubtarget NoMips16Subtarget;
+ MipsSubtarget Mips16Subtarget;
public:
- MipsTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool isLittle);
+ MipsTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
virtual ~MipsTargetMachine() {}
- virtual void addAnalysisPasses(PassManagerBase &PM);
+ void addAnalysisPasses(PassManagerBase &PM) override;
- virtual const MipsInstrInfo *getInstrInfo() const
- { return InstrInfo.get(); }
- virtual const TargetFrameLowering *getFrameLowering() const
- { return FrameLowering.get(); }
- virtual const MipsSubtarget *getSubtargetImpl() const
- { return &Subtarget; }
- virtual const DataLayout *getDataLayout() const
- { return &DL;}
-
- virtual const InstrItineraryData *getInstrItineraryData() const {
- return Subtarget.inMips16Mode() ? 0 : &InstrItins;
+ const MipsInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
-
- virtual MipsJITInfo *getJITInfo()
- { return &JITInfo; }
-
- virtual const MipsRegisterInfo *getRegisterInfo() const {
- return &InstrInfo->getRegisterInfo();
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
-
- virtual const MipsTargetLowering *getTargetLowering() const {
- return TLInfo.get();
+ const MipsSubtarget *getSubtargetImpl() const override {
+ if (Subtarget)
+ return Subtarget;
+ return &DefaultSubtarget;
}
-
- virtual const MipsSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
+ const InstrItineraryData *getInstrItineraryData() const override {
+ return Subtarget->inMips16Mode()
+ ? nullptr
+ : &getSubtargetImpl()->getInstrItineraryData();
+ }
+ MipsJITInfo *getJITInfo() override {
+ return Subtarget->getJITInfo();
+ }
+ const MipsRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
}
+ const MipsTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
+ }
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
+ }
+ const MipsSelectionDAGInfo* getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+ /// \brief Reset the subtarget for the Mips target.
+ void resetSubtarget(MachineFunction *MF);
// Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
- virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE);
-
- // Set helper classes
- void setHelperClassesMips16();
-
- void setHelperClassesMipsSE();
-
-
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+ bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE) override;
};
/// MipsebTargetMachine - Mips32/64 big endian target machine.
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
index 4c748c5..13f9408 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -37,21 +37,6 @@ void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getBSS());
-
- // Register info information
- const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
- if (Subtarget.isABI_N64() || Subtarget.isABI_N32())
- ReginfoSection =
- getContext().getELFSection(".MIPS.options",
- ELF::SHT_MIPS_OPTIONS,
- ELF::SHF_ALLOC |ELF::SHF_MIPS_NOSTRIP,
- SectionKind::getMetadata());
- else
- ReginfoSection =
- getContext().getELFSection(".reginfo",
- ELF::SHT_MIPS_REGINFO,
- ELF::SHF_ALLOC,
- SectionKind::getMetadata());
}
// A address must be loaded from a small section if its size is less than the
@@ -103,7 +88,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
const MCSection *MipsTargetObjectFile::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+ Mangler &Mang, const TargetMachine &TM) const {
// TODO: Could also support "weak" symbols as well with ".gnu.linkonce.s.*"
// sections?
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h
index c0e9140..2bf5a75 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h
@@ -17,10 +17,9 @@ namespace llvm {
class MipsTargetObjectFile : public TargetLoweringObjectFileELF {
const MCSection *SmallDataSection;
const MCSection *SmallBSSSection;
- const MCSection *ReginfoSection;
public:
- void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
/// IsGlobalInSmallSection - Return true if this global address should be
@@ -31,12 +30,8 @@ namespace llvm {
const TargetMachine &TM) const;
const MCSection *SelectSectionForGlobal(const GlobalValue *GV,
- SectionKind Kind,
- Mangler *Mang,
- const TargetMachine &TM) const;
-
- // TODO: Classify globals as mips wishes.
- const MCSection *getReginfoSection() const { return ReginfoSection; }
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetStreamer.h b/contrib/llvm/lib/Target/Mips/MipsTargetStreamer.h
index 96966fd..99f7d4c 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetStreamer.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetStreamer.h
@@ -12,14 +12,83 @@
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCStreamer.h"
+#include "MCTargetDesc/MipsABIFlagsSection.h"
namespace llvm {
-class MipsTargetStreamer : public MCTargetStreamer {
- virtual void anchor();
+struct MipsABIFlagsSection;
+
+class MipsTargetStreamer : public MCTargetStreamer {
public:
- virtual void emitMipsHackELFFlags(unsigned Flags) = 0;
- virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) = 0;
+ MipsTargetStreamer(MCStreamer &S);
+ virtual void emitDirectiveSetMicroMips();
+ virtual void emitDirectiveSetNoMicroMips();
+ virtual void emitDirectiveSetMips16();
+ virtual void emitDirectiveSetNoMips16();
+
+ virtual void emitDirectiveSetReorder();
+ virtual void emitDirectiveSetNoReorder();
+ virtual void emitDirectiveSetMacro();
+ virtual void emitDirectiveSetNoMacro();
+ virtual void emitDirectiveSetAt();
+ virtual void emitDirectiveSetNoAt();
+ virtual void emitDirectiveEnd(StringRef Name);
+
+ virtual void emitDirectiveEnt(const MCSymbol &Symbol);
+ virtual void emitDirectiveAbiCalls();
+ virtual void emitDirectiveNaN2008();
+ virtual void emitDirectiveNaNLegacy();
+ virtual void emitDirectiveOptionPic0();
+ virtual void emitDirectiveOptionPic2();
+ virtual void emitFrame(unsigned StackReg, unsigned StackSize,
+ unsigned ReturnReg);
+ virtual void emitMask(unsigned CPUBitmask, int CPUTopSavedRegOff);
+ virtual void emitFMask(unsigned FPUBitmask, int FPUTopSavedRegOff);
+
+ virtual void emitDirectiveSetMips32R2();
+ virtual void emitDirectiveSetMips64();
+ virtual void emitDirectiveSetMips64R2();
+ virtual void emitDirectiveSetDsp();
+
+ // PIC support
+ virtual void emitDirectiveCpload(unsigned RegNo);
+ virtual void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
+ const MCSymbol &Sym, bool IsReg);
+
+ /// Emit a '.module fp=value' directive using the given values.
+ /// Updates the .MIPS.abiflags section
+ virtual void emitDirectiveModuleFP(MipsABIFlagsSection::FpABIKind Value,
+ bool Is32BitABI) {
+ ABIFlagsSection.setFpABI(Value, Is32BitABI);
+ }
+
+ /// Emit a '.module fp=value' directive using the current values of the
+ /// .MIPS.abiflags section.
+ void emitDirectiveModuleFP() {
+ emitDirectiveModuleFP(ABIFlagsSection.getFpABI(),
+ ABIFlagsSection.Is32BitABI);
+ }
+
+ virtual void emitDirectiveModuleOddSPReg(bool Enabled, bool IsO32ABI);
+ virtual void emitDirectiveSetFp(MipsABIFlagsSection::FpABIKind Value){};
+ virtual void emitMipsAbiFlags(){};
+ void setCanHaveModuleDir(bool Can) { canHaveModuleDirective = Can; }
+ bool getCanHaveModuleDir() { return canHaveModuleDirective; }
+
+ // This method enables template classes to set internal abi flags
+ // structure values.
+ template <class PredicateLibrary>
+ void updateABIInfo(const PredicateLibrary &P) {
+ ABIFlagsSection.setAllFromPredicates(P);
+ }
+
+ MipsABIFlagsSection &getABIFlagsSection() { return ABIFlagsSection; }
+
+protected:
+ MipsABIFlagsSection ABIFlagsSection;
+
+private:
+ bool canHaveModuleDirective;
};
// This part is for ascii assembly output
@@ -27,18 +96,106 @@ class MipsTargetAsmStreamer : public MipsTargetStreamer {
formatted_raw_ostream &OS;
public:
- MipsTargetAsmStreamer(formatted_raw_ostream &OS);
- virtual void emitMipsHackELFFlags(unsigned Flags);
- virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val);
+ MipsTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+ void emitDirectiveSetMicroMips() override;
+ void emitDirectiveSetNoMicroMips() override;
+ void emitDirectiveSetMips16() override;
+ void emitDirectiveSetNoMips16() override;
+
+ void emitDirectiveSetReorder() override;
+ void emitDirectiveSetNoReorder() override;
+ void emitDirectiveSetMacro() override;
+ void emitDirectiveSetNoMacro() override;
+ void emitDirectiveSetAt() override;
+ void emitDirectiveSetNoAt() override;
+ void emitDirectiveEnd(StringRef Name) override;
+
+ void emitDirectiveEnt(const MCSymbol &Symbol) override;
+ void emitDirectiveAbiCalls() override;
+ void emitDirectiveNaN2008() override;
+ void emitDirectiveNaNLegacy() override;
+ void emitDirectiveOptionPic0() override;
+ void emitDirectiveOptionPic2() override;
+ void emitFrame(unsigned StackReg, unsigned StackSize,
+ unsigned ReturnReg) override;
+ void emitMask(unsigned CPUBitmask, int CPUTopSavedRegOff) override;
+ void emitFMask(unsigned FPUBitmask, int FPUTopSavedRegOff) override;
+
+ void emitDirectiveSetMips32R2() override;
+ void emitDirectiveSetMips64() override;
+ void emitDirectiveSetMips64R2() override;
+ void emitDirectiveSetDsp() override;
+
+ // PIC support
+ virtual void emitDirectiveCpload(unsigned RegNo);
+ void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
+ const MCSymbol &Sym, bool IsReg) override;
+
+ // ABI Flags
+ void emitDirectiveModuleFP(MipsABIFlagsSection::FpABIKind Value,
+ bool Is32BitABI) override;
+ void emitDirectiveModuleOddSPReg(bool Enabled, bool IsO32ABI) override;
+ void emitDirectiveSetFp(MipsABIFlagsSection::FpABIKind Value) override;
+ void emitMipsAbiFlags() override;
};
// This part is for ELF object output
class MipsTargetELFStreamer : public MipsTargetStreamer {
+ bool MicroMipsEnabled;
+ const MCSubtargetInfo &STI;
+ bool Pic;
+
public:
+ bool isMicroMipsEnabled() const { return MicroMipsEnabled; }
MCELFStreamer &getStreamer();
- virtual void emitMipsHackELFFlags(unsigned Flags);
- virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val);
+ MipsTargetELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+
+ void emitLabel(MCSymbol *Symbol) override;
+ void emitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
+ void finish() override;
+
+ void emitDirectiveSetMicroMips() override;
+ void emitDirectiveSetNoMicroMips() override;
+ void emitDirectiveSetMips16() override;
+ void emitDirectiveSetNoMips16() override;
+
+ void emitDirectiveSetReorder() override;
+ void emitDirectiveSetNoReorder() override;
+ void emitDirectiveSetMacro() override;
+ void emitDirectiveSetNoMacro() override;
+ void emitDirectiveSetAt() override;
+ void emitDirectiveSetNoAt() override;
+ void emitDirectiveEnd(StringRef Name) override;
+
+ void emitDirectiveEnt(const MCSymbol &Symbol) override;
+ void emitDirectiveAbiCalls() override;
+ void emitDirectiveNaN2008() override;
+ void emitDirectiveNaNLegacy() override;
+ void emitDirectiveOptionPic0() override;
+ void emitDirectiveOptionPic2() override;
+ void emitFrame(unsigned StackReg, unsigned StackSize,
+ unsigned ReturnReg) override;
+ void emitMask(unsigned CPUBitmask, int CPUTopSavedRegOff) override;
+ void emitFMask(unsigned FPUBitmask, int FPUTopSavedRegOff) override;
+
+ void emitDirectiveSetMips32R2() override;
+ void emitDirectiveSetMips64() override;
+ void emitDirectiveSetMips64R2() override;
+ void emitDirectiveSetDsp() override;
+
+ // PIC support
+ virtual void emitDirectiveCpload(unsigned RegNo);
+ void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
+ const MCSymbol &Sym, bool IsReg) override;
+
+ // ABI Flags
+ void emitDirectiveModuleOddSPReg(bool Enabled, bool IsO32ABI) override;
+ void emitMipsAbiFlags() override;
+
+protected:
+ bool isO32() const { return STI.getFeatureBits() & Mips::FeatureO32; }
+ bool isN32() const { return STI.getFeatureBits() & Mips::FeatureN32; }
+ bool isN64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
};
}
-
#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp b/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
index d5be0e4..80b2f62 100644
--- a/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp
@@ -11,20 +11,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "InstPrinter/NVPTXInstPrinter.h"
-#include "NVPTX.h"
#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "NVPTX.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#include "NVPTXGenAsmWriter.inc"
@@ -56,13 +57,13 @@ void NVPTXInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
OS << "%r";
break;
case 4:
- OS << "%rl";
+ OS << "%rd";
break;
case 5:
OS << "%f";
break;
case 6:
- OS << "%fl";
+ OS << "%fd";
break;
}
diff --git a/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h b/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
index 93029ae..1fb3c57 100644
--- a/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
+++ b/contrib/llvm/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
@@ -27,8 +27,8 @@ public:
NVPTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) override;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
@@ -37,15 +37,15 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printCmpMode(const MCInst *MI, int OpNum, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printLdStCode(const MCInst *MI, int OpNum,
- raw_ostream &O, const char *Modifier = 0);
+ raw_ostream &O, const char *Modifier = nullptr);
void printMemOperand(const MCInst *MI, int OpNum,
- raw_ostream &O, const char *Modifier = 0);
+ raw_ostream &O, const char *Modifier = nullptr);
void printProtoIdent(const MCInst *MI, int OpNum,
- raw_ostream &O, const char *Modifier = 0);
+ raw_ostream &O, const char *Modifier = nullptr);
};
}
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
index edf4a80..16ec19c 100644
--- a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
@@ -43,14 +43,16 @@ enum PropertyAnnotation {
PROPERTY_ISSAMPLER,
PROPERTY_ISREADONLY_IMAGE_PARAM,
PROPERTY_ISWRITEONLY_IMAGE_PARAM,
+ PROPERTY_ISREADWRITE_IMAGE_PARAM,
PROPERTY_ISKERNEL_FUNCTION,
PROPERTY_ALIGN,
+ PROPERTY_MANAGED,
// last property
PROPERTY_LAST
};
-const unsigned AnnotationNameLen = 8; // length of each annotation name
+const unsigned AnnotationNameLen = 9; // length of each annotation name
const char PropertyAnnotationNames[PROPERTY_LAST + 1][AnnotationNameLen + 1] = {
"maxntidx", // PROPERTY_MAXNTID_X
"maxntidy", // PROPERTY_MAXNTID_Y
@@ -64,8 +66,10 @@ const char PropertyAnnotationNames[PROPERTY_LAST + 1][AnnotationNameLen + 1] = {
"sampler", // PROPERTY_ISSAMPLER
"rdoimage", // PROPERTY_ISREADONLY_IMAGE_PARAM
"wroimage", // PROPERTY_ISWRITEONLY_IMAGE_PARAM
+ "rdwrimage", // PROPERTY_ISREADWRITE_IMAGE_PARAM
"kernel", // PROPERTY_ISKERNEL_FUNCTION
"align", // PROPERTY_ALIGN
+ "managed", // PROPERTY_MANAGED
// last property
"proplast", // PROPERTY_LAST
@@ -80,6 +84,17 @@ __attribute__((unused))
#endif
static const char *NamedMDForAnnotations = "nvvm.annotations";
+namespace NVPTXII {
+enum {
+ // These must be kept in sync with TSFlags in NVPTXInstrFormats.td
+ IsTexFlag = 0x80,
+ IsSuldMask = 0x300,
+ IsSuldShift = 8,
+ IsSustFlag = 0x400,
+ IsSurfTexQueryFlag = 0x800,
+ IsTexModeUnifiedFlag = 0x1000
+};
+}
}
#endif
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
index f2784b8..366341a 100644
--- a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -33,8 +33,6 @@ NVPTXMCAsmInfo::NVPTXMCAsmInfo(const StringRef &TT) {
CommentString = "//";
- PrivateGlobalPrefix = "$L__";
-
HasSetDirective = false;
HasSingleParameterDotFile = false;
@@ -49,7 +47,6 @@ NVPTXMCAsmInfo::NVPTXMCAsmInfo(const StringRef &TT) {
Data16bitsDirective = " .b16 ";
Data32bitsDirective = " .b32 ";
Data64bitsDirective = " .b64 ";
- PrivateGlobalPrefix = "";
ZeroDirective = " .b8";
AsciiDirective = " .b8";
AscizDirective = " .b8";
diff --git a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
index 871bac9..158ca90 100644
--- a/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
@@ -12,14 +12,16 @@
//===----------------------------------------------------------------------===//
#include "NVPTXMCTargetDesc.h"
-#include "NVPTXMCAsmInfo.h"
#include "InstPrinter/NVPTXInstPrinter.h"
+#include "NVPTXMCAsmInfo.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "NVPTXGenInstrInfo.inc"
@@ -29,8 +31,6 @@
#define GET_REGINFO_MC_DESC
#include "NVPTXGenRegisterInfo.inc"
-using namespace llvm;
-
static MCInstrInfo *createNVPTXMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitNVPTXMCInstrInfo(X);
@@ -66,7 +66,7 @@ static MCInstPrinter *createNVPTXMCInstPrinter(const Target &T,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
return new NVPTXInstPrinter(MAI, MII, MRI, STI);
- return 0;
+ return nullptr;
}
// Force static initialization.
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTX.h b/contrib/llvm/lib/Target/NVPTX/NVPTX.h
index 490b49d..e74c808 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTX.h
@@ -61,10 +61,14 @@ inline static const char *NVPTXCondCodeToString(NVPTXCC::CondCodes CC) {
FunctionPass *
createNVPTXISelDag(NVPTXTargetMachine &TM, llvm::CodeGenOpt::Level OptLevel);
+ModulePass *createNVPTXAssignValidGlobalNamesPass();
ModulePass *createGenericToNVVMPass();
+FunctionPass *createNVPTXFavorNonGenericAddrSpacesPass();
ModulePass *createNVVMReflectPass();
ModulePass *createNVVMReflectPass(const StringMap<int>& Mapping);
MachineFunctionPass *createNVPTXPrologEpilogPass();
+MachineFunctionPass *createNVPTXReplaceImageHandlesPass();
+FunctionPass *createNVPTXImageOptimizerPass();
bool isImageOrSamplerVal(const Value *, const Module *);
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTX.td b/contrib/llvm/lib/Target/NVPTX/NVPTX.td
index 6183a75..93fabf6 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTX.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTX.td
@@ -34,12 +34,18 @@ def SM30 : SubtargetFeature<"sm_30", "SmVersion", "30",
"Target SM 3.0">;
def SM35 : SubtargetFeature<"sm_35", "SmVersion", "35",
"Target SM 3.5">;
+def SM50 : SubtargetFeature<"sm_50", "SmVersion", "50",
+ "Target SM 5.0">;
// PTX Versions
def PTX30 : SubtargetFeature<"ptx30", "PTXVersion", "30",
"Use PTX version 3.0">;
def PTX31 : SubtargetFeature<"ptx31", "PTXVersion", "31",
"Use PTX version 3.1">;
+def PTX32 : SubtargetFeature<"ptx32", "PTXVersion", "32",
+ "Use PTX version 3.2">;
+def PTX40 : SubtargetFeature<"ptx40", "PTXVersion", "40",
+ "Use PTX version 4.0">;
//===----------------------------------------------------------------------===//
// NVPTX supported processors.
@@ -52,17 +58,12 @@ def : Proc<"sm_20", [SM20]>;
def : Proc<"sm_21", [SM21]>;
def : Proc<"sm_30", [SM30]>;
def : Proc<"sm_35", [SM35]>;
+def : Proc<"sm_50", [SM50]>;
def NVPTXInstrInfo : InstrInfo {
}
-def NVPTXAsmWriter : AsmWriter {
- bit isMCAsmWriter = 1;
- string AsmWriterClassName = "InstPrinter";
-}
-
def NVPTX : Target {
let InstructionSet = NVPTXInstrInfo;
- let AssemblyWriters = [NVPTXAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
index 19d73c5..5b61068 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
@@ -30,16 +30,17 @@ public:
static char ID; // Pass ID
NVPTXAllocaHoisting() : FunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DataLayout>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DataLayoutPass>();
+ AU.addPreserved("stack-protector");
AU.addPreserved<MachineFunctionAnalysis>();
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "NVPTX specific alloca hoisting";
}
- virtual bool runOnFunction(Function &function);
+ bool runOnFunction(Function &function) override;
};
extern FunctionPass *createAllocaHoisting();
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 7552fe7..187b88c 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -13,26 +13,27 @@
//===----------------------------------------------------------------------===//
#include "NVPTXAsmPrinter.h"
+#include "InstPrinter/NVPTXInstPrinter.h"
#include "MCTargetDesc/NVPTXMCAsmInfo.h"
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
+#include "NVPTXMachineFunctionInfo.h"
#include "NVPTXMCExpr.h"
#include "NVPTXRegisterInfo.h"
#include "NVPTXTargetMachine.h"
#include "NVPTXUtilities.h"
-#include "InstPrinter/NVPTXInstPrinter.h"
#include "cl_common_defines.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/MC/MCStreamer.h"
@@ -43,7 +44,6 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeValue.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include <sstream>
using namespace llvm;
@@ -132,7 +132,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
- if (CE == 0)
+ if (!CE)
llvm_unreachable("Unknown constant value to lower!");
switch (CE->getOpcode()) {
@@ -149,10 +149,25 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
std::string S;
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
- WriteAsOperand(OS, CE, /*PrintType=*/ false,
- !AP.MF ? 0 : AP.MF->getFunction()->getParent());
+ CE->printAsOperand(OS, /*PrintType=*/ false,
+ !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
report_fatal_error(OS.str());
}
+ case Instruction::AddrSpaceCast: {
+ // Strip any addrspace(1)->addrspace(0) addrspace casts. These will be
+ // handled by the generic() logic in the MCExpr printer
+ PointerType *DstTy = cast<PointerType>(CE->getType());
+ PointerType *SrcTy = cast<PointerType>(CE->getOperand(0)->getType());
+ if (SrcTy->getAddressSpace() == 1 && DstTy->getAddressSpace() == 0) {
+ return LowerConstant(cast<const Constant>(CE->getOperand(0)), AP);
+ }
+ std::string S;
+ raw_string_ostream OS(S);
+ OS << "Unsupported expression in static initializer: ";
+ CE->printAsOperand(OS, /*PrintType=*/ false,
+ !AP.MF ? nullptr : AP.MF->getFunction()->getParent());
+ report_fatal_error(OS.str());
+ }
case Instruction::GetElementPtr: {
const DataLayout &TD = *AP.TM.getDataLayout();
// Generate a symbolic expression for the byte address
@@ -308,16 +323,80 @@ void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst Inst;
lowerToMCInst(MI, Inst);
- OutStreamer.EmitInstruction(Inst);
+ EmitToStreamer(OutStreamer, Inst);
+}
+
+// Handle symbol backtracking for targets that do not support image handles
+bool NVPTXAsmPrinter::lowerImageHandleOperand(const MachineInstr *MI,
+ unsigned OpNo, MCOperand &MCOp) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ const MCInstrDesc &MCID = MI->getDesc();
+
+ if (MCID.TSFlags & NVPTXII::IsTexFlag) {
+ // This is a texture fetch, so operand 4 is a texref and operand 5 is
+ // a samplerref
+ if (OpNo == 4 && MO.isImm()) {
+ lowerImageHandleSymbol(MO.getImm(), MCOp);
+ return true;
+ }
+ if (OpNo == 5 && MO.isImm() && !(MCID.TSFlags & NVPTXII::IsTexModeUnifiedFlag)) {
+ lowerImageHandleSymbol(MO.getImm(), MCOp);
+ return true;
+ }
+
+ return false;
+ } else if (MCID.TSFlags & NVPTXII::IsSuldMask) {
+ unsigned VecSize =
+ 1 << (((MCID.TSFlags & NVPTXII::IsSuldMask) >> NVPTXII::IsSuldShift) - 1);
+
+ // For a surface load of vector size N, the Nth operand will be the surfref
+ if (OpNo == VecSize && MO.isImm()) {
+ lowerImageHandleSymbol(MO.getImm(), MCOp);
+ return true;
+ }
+
+ return false;
+ } else if (MCID.TSFlags & NVPTXII::IsSustFlag) {
+ // This is a surface store, so operand 0 is a surfref
+ if (OpNo == 0 && MO.isImm()) {
+ lowerImageHandleSymbol(MO.getImm(), MCOp);
+ return true;
+ }
+
+ return false;
+ } else if (MCID.TSFlags & NVPTXII::IsSurfTexQueryFlag) {
+ // This is a query, so operand 1 is a surfref/texref
+ if (OpNo == 1 && MO.isImm()) {
+ lowerImageHandleSymbol(MO.getImm(), MCOp);
+ return true;
+ }
+
+ return false;
+ }
+
+ return false;
+}
+
+void NVPTXAsmPrinter::lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp) {
+ // Ewwww
+ TargetMachine &TM = const_cast<TargetMachine&>(MF->getTarget());
+ NVPTXTargetMachine &nvTM = static_cast<NVPTXTargetMachine&>(TM);
+ const NVPTXMachineFunctionInfo *MFI = MF->getInfo<NVPTXMachineFunctionInfo>();
+ const char *Sym = MFI->getImageHandleSymbol(Index);
+ std::string *SymNamePtr =
+ nvTM.getManagedStrPool()->getManagedString(Sym);
+ MCOp = GetSymbolRef(OutContext.GetOrCreateSymbol(
+ StringRef(SymNamePtr->c_str())));
}
void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
OutMI.setOpcode(MI->getOpcode());
+ const NVPTXSubtarget &ST = TM.getSubtarget<NVPTXSubtarget>();
// Special: Do not mangle symbol operand of CALL_PROTOTYPE
if (MI->getOpcode() == NVPTX::CALL_PROTOTYPE) {
const MachineOperand &MO = MI->getOperand(0);
- OutMI.addOperand(GetSymbolRef(MO,
+ OutMI.addOperand(GetSymbolRef(
OutContext.GetOrCreateSymbol(Twine(MO.getSymbolName()))));
return;
}
@@ -326,6 +405,13 @@ void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
const MachineOperand &MO = MI->getOperand(i);
MCOperand MCOp;
+ if (!ST.hasImageHandles()) {
+ if (lowerImageHandleOperand(MI, i, MCOp)) {
+ OutMI.addOperand(MCOp);
+ continue;
+ }
+ }
+
if (lowerOperand(MO, MCOp))
OutMI.addOperand(MCOp);
}
@@ -346,10 +432,10 @@ bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
MO.getMBB()->getSymbol(), OutContext));
break;
case MachineOperand::MO_ExternalSymbol:
- MCOp = GetSymbolRef(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
+ MCOp = GetSymbolRef(GetExternalSymbolSymbol(MO.getSymbolName()));
break;
case MachineOperand::MO_GlobalAddress:
- MCOp = GetSymbolRef(MO, getSymbol(MO.getGlobal()));
+ MCOp = GetSymbolRef(getSymbol(MO.getGlobal()));
break;
case MachineOperand::MO_FPImmediate: {
const ConstantFP *Cnt = MO.getFPImm();
@@ -408,8 +494,7 @@ unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) {
}
}
-MCOperand NVPTXAsmPrinter::GetSymbolRef(const MachineOperand &MO,
- const MCSymbol *Symbol) {
+MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) {
const MCExpr *Expr;
Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
OutContext);
@@ -430,7 +515,7 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
O << " (";
if (isABI) {
- if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
+ if (Ty->isFloatingPointTy() || Ty->isIntegerTy()) {
unsigned size = 0;
if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
size = ITy->getBitWidth();
@@ -447,23 +532,7 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
<< " func_retval0";
} else {
if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) {
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*TLI, Ty, vtparts);
- unsigned totalsz = 0;
- for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
- unsigned elems = 1;
- EVT elemtype = vtparts[i];
- if (vtparts[i].isVector()) {
- elems = vtparts[i].getVectorNumElements();
- elemtype = vtparts[i].getVectorElementType();
- }
- for (unsigned j = 0, je = elems; j != je; ++j) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- totalsz += sz / 8;
- }
- }
+ unsigned totalsz = TD->getTypeAllocSize(Ty);
unsigned retAlignment = 0;
if (!llvm::getAlign(*F, 0, retAlignment))
retAlignment = TD->getABITypeAlignment(Ty);
@@ -700,12 +769,11 @@ static bool usedInGlobalVarDef(const Constant *C) {
return true;
}
- for (Value::const_use_iterator ui = C->use_begin(), ue = C->use_end();
- ui != ue; ++ui) {
- const Constant *C = dyn_cast<Constant>(*ui);
- if (usedInGlobalVarDef(C))
- return true;
- }
+ for (const User *U : C->users())
+ if (const Constant *C = dyn_cast<Constant>(U))
+ if (usedInGlobalVarDef(C))
+ return true;
+
return false;
}
@@ -731,11 +799,10 @@ static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
(md->getName().str() == "llvm.dbg.sp")))
return true;
- for (User::const_use_iterator ui = U->use_begin(), ue = U->use_end();
- ui != ue; ++ui) {
- if (usedInOneFunc(*ui, oneFunc) == false)
+ for (const User *UU : U->users())
+ if (usedInOneFunc(UU, oneFunc) == false)
return false;
- }
+
return true;
}
@@ -753,7 +820,7 @@ static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED)
return false;
- const Function *oneFunc = 0;
+ const Function *oneFunc = nullptr;
bool flag = usedInOneFunc(gv, oneFunc);
if (flag == false)
@@ -766,12 +833,11 @@ static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
static bool useFuncSeen(const Constant *C,
llvm::DenseMap<const Function *, bool> &seenMap) {
- for (Value::const_use_iterator ui = C->use_begin(), ue = C->use_end();
- ui != ue; ++ui) {
- if (const Constant *cu = dyn_cast<Constant>(*ui)) {
+ for (const User *U : C->users()) {
+ if (const Constant *cu = dyn_cast<Constant>(U)) {
if (useFuncSeen(cu, seenMap))
return true;
- } else if (const Instruction *I = dyn_cast<Instruction>(*ui)) {
+ } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
const BasicBlock *bb = I->getParent();
if (!bb)
continue;
@@ -798,10 +864,8 @@ void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
emitDeclaration(F, O);
continue;
}
- for (Value::const_use_iterator iter = F->use_begin(),
- iterEnd = F->use_end();
- iter != iterEnd; ++iter) {
- if (const Constant *C = dyn_cast<Constant>(*iter)) {
+ for (const User *U : F->users()) {
+ if (const Constant *C = dyn_cast<Constant>(U)) {
if (usedInGlobalVarDef(C)) {
// The use is in the initialization of a global variable
// that is a function pointer, so print a declaration
@@ -817,9 +881,9 @@ void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
}
}
- if (!isa<Instruction>(*iter))
+ if (!isa<Instruction>(U))
continue;
- const Instruction *instr = cast<Instruction>(*iter);
+ const Instruction *instr = cast<Instruction>(U);
const BasicBlock *bb = instr->getParent();
if (!bb)
continue;
@@ -844,10 +908,7 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
DbgFinder.processModule(M);
unsigned i = 1;
- for (DebugInfoFinder::iterator I = DbgFinder.compile_unit_begin(),
- E = DbgFinder.compile_unit_end();
- I != E; ++I) {
- DICompileUnit DIUnit(*I);
+ for (DICompileUnit DIUnit : DbgFinder.compile_units()) {
StringRef Filename(DIUnit.getFilename());
StringRef Dirname(DIUnit.getDirectory());
SmallString<128> FullPathName = Dirname;
@@ -862,10 +923,7 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
++i;
}
- for (DebugInfoFinder::iterator I = DbgFinder.subprogram_begin(),
- E = DbgFinder.subprogram_end();
- I != E; ++I) {
- DISubprogram SP(*I);
+ for (DISubprogram SP : DbgFinder.subprograms()) {
StringRef Filename(SP.getFilename());
StringRef Dirname(SP.getDirectory());
SmallString<128> FullPathName = Dirname;
@@ -895,7 +953,7 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
.Initialize(OutContext, TM);
- Mang = new Mangler(&TM);
+ Mang = new Mangler(TM.getDataLayout());
// Emit header before any dwarf directives are emitted below.
emitHeader(M, OS1);
@@ -1022,6 +1080,8 @@ bool NVPTXAsmPrinter::doFinalization(Module &M) {
for (i = 0; i < n; i++)
global_list.insert(global_list.end(), gv_array[i]);
+ clearAnnotationCache(&M);
+
delete[] gv_array;
return ret;
@@ -1043,6 +1103,10 @@ bool NVPTXAsmPrinter::doFinalization(Module &M) {
// external global variable with init -> .visible
// external without init -> .extern
// appending -> not allowed, assert.
+// for any linkage other than
+// internal, private, linker_private,
+// linker_private_weak, linker_private_weak_def_auto,
+// we emit -> .weak.
void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V,
raw_ostream &O) {
@@ -1068,6 +1132,9 @@ void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V,
msg.append(V->getName().str());
msg.append("has unsupported appending linkage type");
llvm_unreachable(msg.c_str());
+ } else if (!V->hasInternalLinkage() &&
+ !V->hasPrivateLinkage()) {
+ O << ".weak ";
}
}
}
@@ -1078,10 +1145,15 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
// Skip meta data
if (GVar->hasSection()) {
- if (GVar->getSection() == "llvm.metadata")
+ if (GVar->getSection() == StringRef("llvm.metadata"))
return;
}
+ // Skip LLVM intrinsic global variables
+ if (GVar->getName().startswith("llvm.") ||
+ GVar->getName().startswith("nvvm."))
+ return;
+
const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
@@ -1093,6 +1165,10 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
O << ".visible ";
else
O << ".extern ";
+ } else if (GVar->hasLinkOnceLinkage() || GVar->hasWeakLinkage() ||
+ GVar->hasAvailableExternallyLinkage() ||
+ GVar->hasCommonLinkage()) {
+ O << ".weak ";
}
if (llvm::isTexture(*GVar)) {
@@ -1117,10 +1193,10 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
if (llvm::isSampler(*GVar)) {
O << ".global .samplerref " << llvm::getSamplerName(*GVar);
- const Constant *Initializer = NULL;
+ const Constant *Initializer = nullptr;
if (GVar->hasInitializer())
Initializer = GVar->getInitializer();
- const ConstantInt *CI = NULL;
+ const ConstantInt *CI = nullptr;
if (Initializer)
CI = dyn_cast<ConstantInt>(Initializer);
if (CI) {
@@ -1160,7 +1236,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
O << "linear";
break;
case 2:
- assert(0 && "Anisotropic filtering is not supported");
+ llvm_unreachable("Anisotropic filtering is not supported");
default:
O << "nearest";
break;
@@ -1187,7 +1263,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
return;
}
- const Function *demotedFunc = 0;
+ const Function *demotedFunc = nullptr;
if (!processDemoted && canDemoteGlobalVar(GVar, demotedFunc)) {
O << "// " << GVar->getName().str() << " has been demoted\n";
if (localDecls.find(demotedFunc) != localDecls.end())
@@ -1202,12 +1278,17 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
O << ".";
emitPTXAddressSpace(PTy->getAddressSpace(), O);
+
+ if (isManaged(*GVar)) {
+ O << " .attribute(.managed)";
+ }
+
if (GVar->getAlignment() == 0)
O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
else
O << " .align " << GVar->getAlignment();
- if (ETy->isPrimitiveType() || ETy->isIntegerTy() || isa<PointerType>(ETy)) {
+ if (ETy->isSingleValueType()) {
O << " .";
// Special case: ABI requires that we use .u8 for predicates
if (ETy->isIntegerTy(1))
@@ -1219,13 +1300,24 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
// Ptx allows variable initilization only for constant and global state
// spaces.
- if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
- (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) &&
- GVar->hasInitializer()) {
- const Constant *Initializer = GVar->getInitializer();
- if (!Initializer->isNullValue()) {
- O << " = ";
- printScalarConstant(Initializer, O);
+ if (GVar->hasInitializer()) {
+ if ((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
+ (PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) {
+ const Constant *Initializer = GVar->getInitializer();
+ // 'undef' is treated as there is no value spefied.
+ if (!Initializer->isNullValue() && !isa<UndefValue>(Initializer)) {
+ O << " = ";
+ printScalarConstant(Initializer, O);
+ }
+ } else {
+ // The frontend adds zero-initializer to variables that don't have an
+ // initial value, so skip warning for this case.
+ if (!GVar->getInitializer()->isNullValue()) {
+ std::string warnMsg = "initial value of '" + GVar->getName().str() +
+ "' is not allowed in addrspace(" +
+ llvm::utostr_32(PTy->getAddressSpace()) + ")";
+ report_fatal_error(warnMsg.c_str());
+ }
}
}
} else {
@@ -1284,7 +1376,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
}
break;
default:
- assert(0 && "type not supported yet");
+ llvm_unreachable("type not supported yet");
}
}
@@ -1359,7 +1451,7 @@ NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
return "u32";
}
llvm_unreachable("unexpected type");
- return NULL;
+ return nullptr;
}
void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
@@ -1378,7 +1470,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
else
O << " .align " << GVar->getAlignment();
- if (ETy->isPrimitiveType() || ETy->isIntegerTy() || isa<PointerType>(ETy)) {
+ if (ETy->isSingleValueType()) {
O << " .";
O << getPTXFundamentalTypeStr(ETy);
O << " ";
@@ -1404,13 +1496,13 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
O << "]";
break;
default:
- assert(0 && "type not supported yet");
+ llvm_unreachable("type not supported yet");
}
return;
}
static unsigned int getOpenCLAlignment(const DataLayout *TD, Type *Ty) {
- if (Ty->isPrimitiveType() || Ty->isIntegerTy() || isa<PointerType>(Ty))
+ if (Ty->isSingleValueType())
return TD->getPrefTypeAlignment(Ty);
const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
@@ -1507,24 +1599,38 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
first = false;
// Handle image/sampler parameters
- if (llvm::isSampler(*I) || llvm::isImage(*I)) {
- if (llvm::isImage(*I)) {
- std::string sname = I->getName();
- if (llvm::isImageWriteOnly(*I))
- O << "\t.param .surfref " << *getSymbol(F) << "_param_"
- << paramIndex;
- else // Default image is read_only
- O << "\t.param .texref " << *getSymbol(F) << "_param_"
- << paramIndex;
- } else // Should be llvm::isSampler(*I)
- O << "\t.param .samplerref " << *getSymbol(F) << "_param_"
- << paramIndex;
- continue;
+ if (isKernelFunction(*F)) {
+ if (isSampler(*I) || isImage(*I)) {
+ if (isImage(*I)) {
+ std::string sname = I->getName();
+ if (isImageWriteOnly(*I) || isImageReadWrite(*I)) {
+ if (nvptxSubtarget.hasImageHandles())
+ O << "\t.param .u64 .ptr .surfref ";
+ else
+ O << "\t.param .surfref ";
+ O << *CurrentFnSym << "_param_" << paramIndex;
+ }
+ else { // Default image is read_only
+ if (nvptxSubtarget.hasImageHandles())
+ O << "\t.param .u64 .ptr .texref ";
+ else
+ O << "\t.param .texref ";
+ O << *CurrentFnSym << "_param_" << paramIndex;
+ }
+ } else {
+ if (nvptxSubtarget.hasImageHandles())
+ O << "\t.param .u64 .ptr .samplerref ";
+ else
+ O << "\t.param .samplerref ";
+ O << *CurrentFnSym << "_param_" << paramIndex;
+ }
+ continue;
+ }
}
if (PAL.hasAttribute(paramIndex + 1, Attribute::ByVal) == false) {
- if (Ty->isVectorTy()) {
- // Just print .param .b8 .align <a> .param[size];
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ // Just print .param .align <a> .b8 .param[size];
// <a> = PAL.getparamalignment
// size = typeallocsize of element type
unsigned align = PAL.getParamAlignment(paramIndex + 1);
@@ -1580,7 +1686,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
continue;
}
// Non-kernel function, just print .param .b<size> for ABI
- // and .reg .b<size> for non ABY
+ // and .reg .b<size> for non-ABI
unsigned sz = 0;
if (isa<IntegerType>(Ty)) {
sz = cast<IntegerType>(Ty)->getBitWidth();
@@ -1604,7 +1710,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
Type *ETy = PTy->getElementType();
if (isABI || isKernelFunc) {
- // Just print .param .b8 .align <a> .param[size];
+ // Just print .param .align <a> .b8 .param[size];
// <a> = PAL.getparamalignment
// size = typeallocsize of element type
unsigned align = PAL.getParamAlignment(paramIndex + 1);
@@ -1702,9 +1808,9 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
// O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
- // O << "\t.reg .s64 %rl<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .s64 %rd<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
- // O << "\t.reg .f64 %fl<" << NVPTXNumRegisters << ">;\n";
+ // O << "\t.reg .f64 %fd<" << NVPTXNumRegisters << ">;\n";
// Emit declaration of the virtual registers or 'physical' registers for
// each register class
@@ -1764,13 +1870,35 @@ void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
return;
}
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
- O << *getSymbol(GVar);
+ PointerType *PTy = dyn_cast<PointerType>(GVar->getType());
+ bool IsNonGenericPointer = false;
+ if (PTy && PTy->getAddressSpace() != 0) {
+ IsNonGenericPointer = true;
+ }
+ if (EmitGeneric && !isa<Function>(CPV) && !IsNonGenericPointer) {
+ O << "generic(";
+ O << *getSymbol(GVar);
+ O << ")";
+ } else {
+ O << *getSymbol(GVar);
+ }
return;
}
if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
const Value *v = Cexpr->stripPointerCasts();
+ PointerType *PTy = dyn_cast<PointerType>(Cexpr->getType());
+ bool IsNonGenericPointer = false;
+ if (PTy && PTy->getAddressSpace() != 0) {
+ IsNonGenericPointer = true;
+ }
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
- O << *getSymbol(GVar);
+ if (EmitGeneric && !isa<Function>(v) && !IsNonGenericPointer) {
+ O << "generic(";
+ O << *getSymbol(GVar);
+ O << ")";
+ } else {
+ O << *getSymbol(GVar);
+ }
return;
} else {
O << *LowerConstant(CPV, *this);
@@ -2087,21 +2215,6 @@ void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
O << *getSymbol(MO.getGlobal());
break;
- case MachineOperand::MO_ExternalSymbol: {
- const char *symbname = MO.getSymbolName();
- if (strstr(symbname, ".PARAM") == symbname) {
- unsigned index;
- sscanf(symbname + 6, "%u[];", &index);
- printParamName(index, O);
- } else if (strstr(symbname, ".HLPPARAM") == symbname) {
- unsigned index;
- sscanf(symbname + 9, "%u[];", &index);
- O << *CurrentFnSym << "_param_" << index << "_offset";
- } else
- O << symbname;
- break;
- }
-
case MachineOperand::MO_MachineBasicBlock:
O << *MO.getMBB()->getSymbol();
return;
@@ -2148,7 +2261,7 @@ void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) {
}
LineReader *NVPTXAsmPrinter::getReader(std::string filename) {
- if (reader == NULL) {
+ if (!reader) {
reader = new LineReader(filename);
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
index 3abe5d1..a9f9bdd 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -27,7 +27,6 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetMachine.h"
#include <fstream>
@@ -97,6 +96,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
unsigned curpos;
raw_ostream &O;
NVPTXAsmPrinter &AP;
+ bool EmitGeneric;
public:
AggBuffer(unsigned _size, raw_ostream &_O, NVPTXAsmPrinter &_AP)
@@ -105,6 +105,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
size = _size;
curpos = 0;
numSymbols = 0;
+ EmitGeneric = AP.EmitGeneric;
}
~AggBuffer() { delete[] buffer; }
unsigned addBytes(unsigned char *Ptr, int Num, int Bytes) {
@@ -156,7 +157,18 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
const Value *v = Symbols[nSym];
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
MCSymbol *Name = AP.getSymbol(GVar);
- O << *Name;
+ PointerType *PTy = dyn_cast<PointerType>(GVar->getType());
+ bool IsNonGenericPointer = false;
+ if (PTy && PTy->getAddressSpace() != 0) {
+ IsNonGenericPointer = true;
+ }
+ if (EmitGeneric && !isa<Function>(v) && !IsNonGenericPointer) {
+ O << "generic(";
+ O << *Name;
+ O << ")";
+ } else {
+ O << *Name;
+ }
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(v)) {
O << *nvptx::LowerConstant(Cexpr, AP);
} else
@@ -177,35 +189,32 @@ class LLVM_LIBRARY_VISIBILITY NVPTXAsmPrinter : public AsmPrinter {
friend class AggBuffer;
- virtual void emitSrcInText(StringRef filename, unsigned line);
+ void emitSrcInText(StringRef filename, unsigned line);
private:
- virtual const char *getPassName() const { return "NVPTX Assembly Printer"; }
+ const char *getPassName() const override { return "NVPTX Assembly Printer"; }
const Function *F;
std::string CurrentFnName;
- void EmitFunctionEntryLabel();
- void EmitFunctionBodyStart();
- void EmitFunctionBodyEnd();
- void emitImplicitDef(const MachineInstr *MI) const;
+ void EmitFunctionEntryLabel() override;
+ void EmitFunctionBodyStart() override;
+ void EmitFunctionBodyEnd() override;
+ void emitImplicitDef(const MachineInstr *MI) const override;
- void EmitInstruction(const MachineInstr *);
+ void EmitInstruction(const MachineInstr *) override;
void lowerToMCInst(const MachineInstr *MI, MCInst &OutMI);
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
- MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol);
+ MCOperand GetSymbolRef(const MCSymbol *Symbol);
unsigned encodeVirtualRegister(unsigned Reg);
- void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const {}
+ void EmitAlignment(unsigned NumBits, const GlobalValue *GV = nullptr) const {}
- void printGlobalVariable(const GlobalVariable *GVar);
void printVecModifiedImmediate(const MachineOperand &MO, const char *Modifier,
raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printImplicitDef(const MachineInstr *MI, raw_ostream &O) const;
- // definition autogenerated.
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
void printModuleLevelGV(const GlobalVariable *GVar, raw_ostream &O,
bool = false);
void printParamName(int paramIndex, raw_ostream &O);
@@ -225,15 +234,15 @@ private:
void printReturnValStr(const MachineFunction &MF, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &);
+ raw_ostream &) override;
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &);
+ raw_ostream &) override;
protected:
- bool doInitialization(Module &M);
- bool doFinalization(Module &M);
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
private:
std::string CurrentBankselLabelInBasicBlock;
@@ -278,14 +287,33 @@ private:
static const char *getRegisterName(unsigned RegNo);
void emitDemotedVars(const Function *, raw_ostream &);
+ bool lowerImageHandleOperand(const MachineInstr *MI, unsigned OpNo,
+ MCOperand &MCOp);
+ void lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp);
+
LineReader *reader;
LineReader *getReader(std::string);
+
+ // Used to control the need to emit .generic() in the initializer of
+ // module scope variables.
+ // Although ptx supports the hybrid mode like the following,
+ // .global .u32 a;
+ // .global .u32 b;
+ // .global .u32 addr[] = {a, generic(b)}
+ // we have difficulty representing the difference in the NVVM IR.
+ //
+ // Since the address value should always be generic in CUDA C and always
+ // be specific in OpenCL, we use this simple control here.
+ //
+ bool EmitGeneric;
+
public:
NVPTXAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer),
nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
CurrentBankselLabelInBasicBlock = "";
- reader = NULL;
+ reader = nullptr;
+ EmitGeneric = (nvptxSubtarget.getDrvInterface() == NVPTX::CUDA);
}
~NVPTXAsmPrinter() {
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
new file mode 100644
index 0000000..962b123
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
@@ -0,0 +1,84 @@
+//===-- NVPTXAssignValidGlobalNames.cpp - Assign valid names to globals ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Clean up the names of global variables in the module to not contain symbols
+// that are invalid in PTX.
+//
+// Currently NVPTX, like other backends, relies on generic symbol name
+// sanitizing done by MC. However, the ptxas assembler is more stringent and
+// disallows some additional characters in symbol names. This pass makes sure
+// such names do not reach MC at all.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+using namespace llvm;
+
+namespace {
+/// \brief NVPTXAssignValidGlobalNames
+class NVPTXAssignValidGlobalNames : public ModulePass {
+public:
+ static char ID;
+ NVPTXAssignValidGlobalNames() : ModulePass(ID) {}
+
+ bool runOnModule(Module &M) override;
+
+ /// \brief Clean up the name to remove symbols invalid in PTX.
+ std::string cleanUpName(StringRef Name);
+};
+}
+
+char NVPTXAssignValidGlobalNames::ID = 0;
+
+namespace llvm {
+void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry &);
+}
+
+INITIALIZE_PASS(NVPTXAssignValidGlobalNames, "nvptx-assign-valid-global-names",
+ "Assign valid PTX names to globals", false, false)
+
+bool NVPTXAssignValidGlobalNames::runOnModule(Module &M) {
+ for (GlobalVariable &GV : M.globals()) {
+ // We are only allowed to rename local symbols.
+ if (GV.hasLocalLinkage()) {
+ // setName doesn't do extra work if the name does not change.
+ // Note: this does not create collisions - if setName is asked to set the
+ // name to something that already exists, it adds a proper postfix to
+ // avoid collisions.
+ GV.setName(cleanUpName(GV.getName()));
+ }
+ }
+
+ return true;
+}
+
+std::string NVPTXAssignValidGlobalNames::cleanUpName(StringRef Name) {
+ std::string ValidName;
+ raw_string_ostream ValidNameStream(ValidName);
+ for (unsigned I = 0, E = Name.size(); I != E; ++I) {
+ char C = Name[I];
+ if (C == '.' || C == '@') {
+ ValidNameStream << "_$_";
+ } else {
+ ValidNameStream << C;
+ }
+ }
+
+ return ValidNameStream.str();
+}
+
+ModulePass *llvm::createNVPTXAssignValidGlobalNamesPass() {
+ return new NVPTXAssignValidGlobalNames();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
new file mode 100644
index 0000000..f3a095d
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
@@ -0,0 +1,195 @@
+//===-- NVPTXFavorNonGenericAddrSpace.cpp - ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// When a load/store accesses the generic address space, checks whether the
+// address is casted from a non-generic address space. If so, remove this
+// addrspacecast because accessing non-generic address spaces is typically
+// faster. Besides seeking addrspacecasts, this optimization also traces into
+// the base pointer of a GEP.
+//
+// For instance, the code below loads a float from an array allocated in
+// addrspace(3).
+//
+// %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
+// %1 = gep [10 x float]* %0, i64 0, i64 %i
+// %2 = load float* %1 ; emits ld.f32
+//
+// First, function hoistAddrSpaceCastFromGEP reorders the addrspacecast
+// and the GEP to expose more optimization opportunities to function
+// optimizeMemoryInst. The intermediate code looks like:
+//
+// %0 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
+// %1 = addrspacecast float addrspace(3)* %0 to float*
+// %2 = load float* %1 ; still emits ld.f32, but will be optimized shortly
+//
+// Then, function optimizeMemoryInstruction detects a load from addrspacecast'ed
+// generic pointers, and folds the load and the addrspacecast into a load from
+// the original address space. The final code looks like:
+//
+// %0 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
+// %2 = load float addrspace(3)* %0 ; emits ld.shared.f32
+//
+// This pass may remove an addrspacecast in a different BB. Therefore, we
+// implement it as a FunctionPass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+// An option to disable this optimization. Enable it by default.
+static cl::opt<bool> DisableFavorNonGeneric(
+ "disable-nvptx-favor-non-generic",
+ cl::init(false),
+ cl::desc("Do not convert generic address space usage "
+ "to non-generic address space usage"),
+ cl::Hidden);
+
+namespace {
+/// \brief NVPTXFavorNonGenericAddrSpaces
+class NVPTXFavorNonGenericAddrSpaces : public FunctionPass {
+public:
+ static char ID;
+ NVPTXFavorNonGenericAddrSpaces() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override;
+
+ /// Optimizes load/store instructions. Idx is the index of the pointer operand
+ /// (0 for load, and 1 for store). Returns true if it changes anything.
+ bool optimizeMemoryInstruction(Instruction *I, unsigned Idx);
+ /// Transforms "gep (addrspacecast X), indices" into "addrspacecast (gep X,
+ /// indices)". This reordering exposes to optimizeMemoryInstruction more
+ /// optimization opportunities on loads and stores. Returns true if it changes
+ /// the program.
+ bool hoistAddrSpaceCastFromGEP(GEPOperator *GEP);
+};
+}
+
+char NVPTXFavorNonGenericAddrSpaces::ID = 0;
+
+namespace llvm {
+void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
+}
+INITIALIZE_PASS(NVPTXFavorNonGenericAddrSpaces, "nvptx-favor-non-generic",
+ "Remove unnecessary non-generic-to-generic addrspacecasts",
+ false, false)
+
+// Decides whether removing Cast is valid and beneficial. Cast can be an
+// instruction or a constant expression.
+static bool IsEliminableAddrSpaceCast(Operator *Cast) {
+ // Returns false if not even an addrspacecast.
+ if (Cast->getOpcode() != Instruction::AddrSpaceCast)
+ return false;
+
+ Value *Src = Cast->getOperand(0);
+ PointerType *SrcTy = cast<PointerType>(Src->getType());
+ PointerType *DestTy = cast<PointerType>(Cast->getType());
+ // TODO: For now, we only handle the case where the addrspacecast only changes
+ // the address space but not the type. If the type also changes, we could
+ // still get rid of the addrspacecast by adding an extra bitcast, but we
+ // rarely see such scenarios.
+ if (SrcTy->getElementType() != DestTy->getElementType())
+ return false;
+
+ // Checks whether the addrspacecast is from a non-generic address space to the
+ // generic address space.
+ return (SrcTy->getAddressSpace() != AddressSpace::ADDRESS_SPACE_GENERIC &&
+ DestTy->getAddressSpace() == AddressSpace::ADDRESS_SPACE_GENERIC);
+}
+
+bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromGEP(
+ GEPOperator *GEP) {
+ Operator *Cast = dyn_cast<Operator>(GEP->getPointerOperand());
+ if (!Cast)
+ return false;
+
+ if (!IsEliminableAddrSpaceCast(Cast))
+ return false;
+
+ SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end());
+ if (Instruction *GEPI = dyn_cast<Instruction>(GEP)) {
+ // %1 = gep (addrspacecast X), indices
+ // =>
+ // %0 = gep X, indices
+ // %1 = addrspacecast %0
+ GetElementPtrInst *NewGEPI = GetElementPtrInst::Create(Cast->getOperand(0),
+ Indices,
+ GEP->getName(),
+ GEPI);
+ NewGEPI->setIsInBounds(GEP->isInBounds());
+ GEP->replaceAllUsesWith(
+ new AddrSpaceCastInst(NewGEPI, GEP->getType(), "", GEPI));
+ } else {
+ // GEP is a constant expression.
+ Constant *NewGEPCE = ConstantExpr::getGetElementPtr(
+ cast<Constant>(Cast->getOperand(0)),
+ Indices,
+ GEP->isInBounds());
+ GEP->replaceAllUsesWith(
+ ConstantExpr::getAddrSpaceCast(NewGEPCE, GEP->getType()));
+ }
+
+ return true;
+}
+
+bool NVPTXFavorNonGenericAddrSpaces::optimizeMemoryInstruction(Instruction *MI,
+ unsigned Idx) {
+ // If the pointer operand is a GEP, hoist the addrspacecast if any from the
+ // GEP to expose more optimization opportunites.
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(MI->getOperand(Idx))) {
+ hoistAddrSpaceCastFromGEP(GEP);
+ }
+
+ // load/store (addrspacecast X) => load/store X if shortcutting the
+ // addrspacecast is valid and can improve performance.
+ //
+ // e.g.,
+ // %1 = addrspacecast float addrspace(3)* %0 to float*
+ // %2 = load float* %1
+ // ->
+ // %2 = load float addrspace(3)* %0
+ //
+ // Note: the addrspacecast can also be a constant expression.
+ if (Operator *Cast = dyn_cast<Operator>(MI->getOperand(Idx))) {
+ if (IsEliminableAddrSpaceCast(Cast)) {
+ MI->setOperand(Idx, Cast->getOperand(0));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool NVPTXFavorNonGenericAddrSpaces::runOnFunction(Function &F) {
+ if (DisableFavorNonGeneric)
+ return false;
+
+ bool Changed = false;
+ for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
+ for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
+ if (isa<LoadInst>(I)) {
+ // V = load P
+ Changed |= optimizeMemoryInstruction(I, 0);
+ } else if (isa<StoreInst>(I)) {
+ // store V, P
+ Changed |= optimizeMemoryInstruction(I, 1);
+ }
+ }
+ }
+ return Changed;
+}
+
+FunctionPass *llvm::createNVPTXFavorNonGenericAddrSpacesPass() {
+ return new NVPTXFavorNonGenericAddrSpaces();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
index 9030584f..8b08841 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -26,6 +26,10 @@
using namespace llvm;
+NVPTXFrameLowering::NVPTXFrameLowering(NVPTXSubtarget &STI)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0),
+ is64bit(STI.is64Bit()) {}
+
bool NVPTXFrameLowering::hasFP(const MachineFunction &MF) const { return true; }
void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const {
@@ -43,17 +47,21 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF) const {
// cvta.local %SP, %SPL;
if (is64bit) {
unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int64RegsRegClass);
- MachineInstr *MI = BuildMI(
- MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes_64),
- NVPTX::VRFrame).addReg(LocalReg);
- BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR_64),
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, dl,
+ MF.getTarget().getInstrInfo()->get(NVPTX::cvta_local_yes_64),
+ NVPTX::VRFrame).addReg(LocalReg);
+ BuildMI(MBB, MI, dl,
+ MF.getTarget().getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR_64),
LocalReg).addImm(MF.getFunctionNumber());
} else {
unsigned LocalReg = MRI.createVirtualRegister(&NVPTX::Int32RegsRegClass);
- MachineInstr *MI = BuildMI(
- MBB, MBBI, dl, tm.getInstrInfo()->get(NVPTX::cvta_local_yes),
- NVPTX::VRFrame).addReg(LocalReg);
- BuildMI(MBB, MI, dl, tm.getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR),
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, dl,
+ MF.getTarget().getInstrInfo()->get(NVPTX::cvta_local_yes),
+ NVPTX::VRFrame).addReg(LocalReg);
+ BuildMI(MBB, MI, dl,
+ MF.getTarget().getInstrInfo()->get(NVPTX::MOV_DEPOT_ADDR),
LocalReg).addImm(MF.getFunctionNumber());
}
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
index 819f1dd..56fb673 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXFrameLowering.h
@@ -17,24 +17,20 @@
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
-class NVPTXTargetMachine;
-
+class NVPTXSubtarget;
class NVPTXFrameLowering : public TargetFrameLowering {
- NVPTXTargetMachine &tm;
bool is64bit;
public:
- explicit NVPTXFrameLowering(NVPTXTargetMachine &_tm, bool _is64bit)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 8, 0), tm(_tm),
- is64bit(_is64bit) {}
+ explicit NVPTXFrameLowering(NVPTXSubtarget &STI);
- virtual bool hasFP(const MachineFunction &MF) const;
- virtual void emitPrologue(MachineFunction &MF) const;
- virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ bool hasFP(const MachineFunction &MF) const override;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
index 9fb0dd8..faa9fdb 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
@@ -13,20 +13,19 @@
//===----------------------------------------------------------------------===//
#include "NVPTX.h"
-#include "NVPTXUtilities.h"
#include "MCTargetDesc/NVPTXBaseInfo.h"
-
-#include "llvm/PassManager.h"
+#include "NVPTXUtilities.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/PassManager.h"
using namespace llvm;
@@ -41,10 +40,9 @@ public:
GenericToNVVM() : ModulePass(ID) {}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {}
private:
Value *getOrInsertCVTA(Module *M, Function *F, GlobalVariable *GV,
@@ -64,7 +62,7 @@ private:
GVMapTy GVMap;
ConstantToValueMapTy ConstantToValueMap;
};
-}
+} // end namespace
char GenericToNVVM::ID = 0;
@@ -86,10 +84,11 @@ bool GenericToNVVM::runOnModule(Module &M) {
GlobalVariable *GV = I++;
if (GV->getType()->getAddressSpace() == llvm::ADDRESS_SPACE_GENERIC &&
!llvm::isTexture(*GV) && !llvm::isSurface(*GV) &&
- !GV->getName().startswith("llvm.")) {
+ !llvm::isSampler(*GV) && !GV->getName().startswith("llvm.")) {
GlobalVariable *NewGV = new GlobalVariable(
M, GV->getType()->getElementType(), GV->isConstant(),
- GV->getLinkage(), GV->hasInitializer() ? GV->getInitializer() : NULL,
+ GV->getLinkage(),
+ GV->hasInitializer() ? GV->getInitializer() : nullptr,
"", GV, GV->getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL);
NewGV->copyAttributesFrom(GV);
GVMap[GV] = NewGV;
@@ -147,10 +146,8 @@ bool GenericToNVVM::runOnModule(Module &M) {
// variable initializers, as other uses have been already been removed
// while walking through the instructions in function definitions.
for (Value::use_iterator UI = GV->use_begin(), UE = GV->use_end();
- UI != UE;) {
- Use &U = (UI++).getUse();
- U.set(BitCastNewGV);
- }
+ UI != UE;)
+ (UI++)->set(BitCastNewGV);
std::string Name = GV->getName();
GV->removeDeadConstantUsers();
GV->eraseFromParent();
@@ -165,7 +162,7 @@ Value *GenericToNVVM::getOrInsertCVTA(Module *M, Function *F,
GlobalVariable *GV,
IRBuilder<> &Builder) {
PointerType *GVType = GV->getType();
- Value *CVTA = NULL;
+ Value *CVTA = nullptr;
// See if the address space conversion requires the operand to be bitcast
// to i8 addrspace(n)* first.
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 4b8b306..05205fb 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -20,16 +20,9 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "nvptx-isel"
-
using namespace llvm;
-static cl::opt<int>
-FMAContractLevel("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
- " 1: do it 2: do it aggressively"),
- cl::init(2));
+#define DEBUG_TYPE "nvptx-isel"
static cl::opt<int> UsePrecDivF32(
"nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
@@ -59,16 +52,6 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel),
Subtarget(tm.getSubtarget<NVPTXSubtarget>()) {
-
- doFMAF32 = (OptLevel > 0) && Subtarget.hasFMAF32() && (FMAContractLevel >= 1);
- doFMAF64 = (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel >= 1);
- doFMAF32AGG =
- (OptLevel > 0) && Subtarget.hasFMAF32() && (FMAContractLevel == 2);
- doFMAF64AGG =
- (OptLevel > 0) && Subtarget.hasFMAF64() && (FMAContractLevel == 2);
-
- allowFMA = (FMAContractLevel >= 1);
-
doMulWide = (OptLevel > 0);
}
@@ -114,16 +97,21 @@ bool NVPTXDAGToDAGISel::useF32FTZ() const {
}
}
+bool NVPTXDAGToDAGISel::allowFMA() const {
+ const NVPTXTargetLowering *TL = Subtarget.getTargetLowering();
+ return TL->allowFMA(*MF, OptLevel);
+}
+
/// Select - Select instructions not customized! Used for
/// expanded, promoted and normal instructions.
SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
- SDNode *ResNode = NULL;
+ SDNode *ResNode = nullptr;
switch (N->getOpcode()) {
case ISD::LOAD:
ResNode = SelectLoad(N);
@@ -139,7 +127,7 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
case NVPTXISD::LDGV4:
case NVPTXISD::LDUV2:
case NVPTXISD::LDUV4:
- ResNode = SelectLDGLDUVector(N);
+ ResNode = SelectLDGLDU(N);
break;
case NVPTXISD::StoreV2:
case NVPTXISD::StoreV4:
@@ -162,6 +150,358 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
case NVPTXISD::StoreParamU32:
ResNode = SelectStoreParam(N);
break;
+ case ISD::INTRINSIC_WO_CHAIN:
+ ResNode = SelectIntrinsicNoChain(N);
+ break;
+ case ISD::INTRINSIC_W_CHAIN:
+ ResNode = SelectIntrinsicChain(N);
+ break;
+ case NVPTXISD::Tex1DFloatS32:
+ case NVPTXISD::Tex1DFloatFloat:
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ case NVPTXISD::Tex1DS32S32:
+ case NVPTXISD::Tex1DS32Float:
+ case NVPTXISD::Tex1DS32FloatLevel:
+ case NVPTXISD::Tex1DS32FloatGrad:
+ case NVPTXISD::Tex1DU32S32:
+ case NVPTXISD::Tex1DU32Float:
+ case NVPTXISD::Tex1DU32FloatLevel:
+ case NVPTXISD::Tex1DU32FloatGrad:
+ case NVPTXISD::Tex1DArrayFloatS32:
+ case NVPTXISD::Tex1DArrayFloatFloat:
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ case NVPTXISD::Tex1DArrayS32S32:
+ case NVPTXISD::Tex1DArrayS32Float:
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ case NVPTXISD::Tex1DArrayU32S32:
+ case NVPTXISD::Tex1DArrayU32Float:
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ case NVPTXISD::Tex2DFloatS32:
+ case NVPTXISD::Tex2DFloatFloat:
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ case NVPTXISD::Tex2DS32S32:
+ case NVPTXISD::Tex2DS32Float:
+ case NVPTXISD::Tex2DS32FloatLevel:
+ case NVPTXISD::Tex2DS32FloatGrad:
+ case NVPTXISD::Tex2DU32S32:
+ case NVPTXISD::Tex2DU32Float:
+ case NVPTXISD::Tex2DU32FloatLevel:
+ case NVPTXISD::Tex2DU32FloatGrad:
+ case NVPTXISD::Tex2DArrayFloatS32:
+ case NVPTXISD::Tex2DArrayFloatFloat:
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ case NVPTXISD::Tex2DArrayS32S32:
+ case NVPTXISD::Tex2DArrayS32Float:
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ case NVPTXISD::Tex2DArrayU32S32:
+ case NVPTXISD::Tex2DArrayU32Float:
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ case NVPTXISD::Tex3DFloatS32:
+ case NVPTXISD::Tex3DFloatFloat:
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ case NVPTXISD::Tex3DS32S32:
+ case NVPTXISD::Tex3DS32Float:
+ case NVPTXISD::Tex3DS32FloatLevel:
+ case NVPTXISD::Tex3DS32FloatGrad:
+ case NVPTXISD::Tex3DU32S32:
+ case NVPTXISD::Tex3DU32Float:
+ case NVPTXISD::Tex3DU32FloatLevel:
+ case NVPTXISD::Tex3DU32FloatGrad:
+ case NVPTXISD::TexCubeFloatFloat:
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ case NVPTXISD::TexCubeS32Float:
+ case NVPTXISD::TexCubeS32FloatLevel:
+ case NVPTXISD::TexCubeU32Float:
+ case NVPTXISD::TexCubeU32FloatLevel:
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ case NVPTXISD::TexCubeArrayS32Float:
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ case NVPTXISD::TexCubeArrayU32Float:
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ case NVPTXISD::Tld4R2DFloatFloat:
+ case NVPTXISD::Tld4G2DFloatFloat:
+ case NVPTXISD::Tld4B2DFloatFloat:
+ case NVPTXISD::Tld4A2DFloatFloat:
+ case NVPTXISD::Tld4R2DS64Float:
+ case NVPTXISD::Tld4G2DS64Float:
+ case NVPTXISD::Tld4B2DS64Float:
+ case NVPTXISD::Tld4A2DS64Float:
+ case NVPTXISD::Tld4R2DU64Float:
+ case NVPTXISD::Tld4G2DU64Float:
+ case NVPTXISD::Tld4B2DU64Float:
+ case NVPTXISD::Tld4A2DU64Float:
+ case NVPTXISD::TexUnified1DFloatS32:
+ case NVPTXISD::TexUnified1DFloatFloat:
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ case NVPTXISD::TexUnified1DS32S32:
+ case NVPTXISD::TexUnified1DS32Float:
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ case NVPTXISD::TexUnified1DU32S32:
+ case NVPTXISD::TexUnified1DU32Float:
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ case NVPTXISD::TexUnified2DFloatS32:
+ case NVPTXISD::TexUnified2DFloatFloat:
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ case NVPTXISD::TexUnified2DS32S32:
+ case NVPTXISD::TexUnified2DS32Float:
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ case NVPTXISD::TexUnified2DU32S32:
+ case NVPTXISD::TexUnified2DU32Float:
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ case NVPTXISD::TexUnified3DFloatS32:
+ case NVPTXISD::TexUnified3DFloatFloat:
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ case NVPTXISD::TexUnified3DS32S32:
+ case NVPTXISD::TexUnified3DS32Float:
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ case NVPTXISD::TexUnified3DU32S32:
+ case NVPTXISD::TexUnified3DU32Float:
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ ResNode = SelectTextureIntrinsic(N);
+ break;
+ case NVPTXISD::Suld1DI8Clamp:
+ case NVPTXISD::Suld1DI16Clamp:
+ case NVPTXISD::Suld1DI32Clamp:
+ case NVPTXISD::Suld1DI64Clamp:
+ case NVPTXISD::Suld1DV2I8Clamp:
+ case NVPTXISD::Suld1DV2I16Clamp:
+ case NVPTXISD::Suld1DV2I32Clamp:
+ case NVPTXISD::Suld1DV2I64Clamp:
+ case NVPTXISD::Suld1DV4I8Clamp:
+ case NVPTXISD::Suld1DV4I16Clamp:
+ case NVPTXISD::Suld1DV4I32Clamp:
+ case NVPTXISD::Suld1DArrayI8Clamp:
+ case NVPTXISD::Suld1DArrayI16Clamp:
+ case NVPTXISD::Suld1DArrayI32Clamp:
+ case NVPTXISD::Suld1DArrayI64Clamp:
+ case NVPTXISD::Suld1DArrayV2I8Clamp:
+ case NVPTXISD::Suld1DArrayV2I16Clamp:
+ case NVPTXISD::Suld1DArrayV2I32Clamp:
+ case NVPTXISD::Suld1DArrayV2I64Clamp:
+ case NVPTXISD::Suld1DArrayV4I8Clamp:
+ case NVPTXISD::Suld1DArrayV4I16Clamp:
+ case NVPTXISD::Suld1DArrayV4I32Clamp:
+ case NVPTXISD::Suld2DI8Clamp:
+ case NVPTXISD::Suld2DI16Clamp:
+ case NVPTXISD::Suld2DI32Clamp:
+ case NVPTXISD::Suld2DI64Clamp:
+ case NVPTXISD::Suld2DV2I8Clamp:
+ case NVPTXISD::Suld2DV2I16Clamp:
+ case NVPTXISD::Suld2DV2I32Clamp:
+ case NVPTXISD::Suld2DV2I64Clamp:
+ case NVPTXISD::Suld2DV4I8Clamp:
+ case NVPTXISD::Suld2DV4I16Clamp:
+ case NVPTXISD::Suld2DV4I32Clamp:
+ case NVPTXISD::Suld2DArrayI8Clamp:
+ case NVPTXISD::Suld2DArrayI16Clamp:
+ case NVPTXISD::Suld2DArrayI32Clamp:
+ case NVPTXISD::Suld2DArrayI64Clamp:
+ case NVPTXISD::Suld2DArrayV2I8Clamp:
+ case NVPTXISD::Suld2DArrayV2I16Clamp:
+ case NVPTXISD::Suld2DArrayV2I32Clamp:
+ case NVPTXISD::Suld2DArrayV2I64Clamp:
+ case NVPTXISD::Suld2DArrayV4I8Clamp:
+ case NVPTXISD::Suld2DArrayV4I16Clamp:
+ case NVPTXISD::Suld2DArrayV4I32Clamp:
+ case NVPTXISD::Suld3DI8Clamp:
+ case NVPTXISD::Suld3DI16Clamp:
+ case NVPTXISD::Suld3DI32Clamp:
+ case NVPTXISD::Suld3DI64Clamp:
+ case NVPTXISD::Suld3DV2I8Clamp:
+ case NVPTXISD::Suld3DV2I16Clamp:
+ case NVPTXISD::Suld3DV2I32Clamp:
+ case NVPTXISD::Suld3DV2I64Clamp:
+ case NVPTXISD::Suld3DV4I8Clamp:
+ case NVPTXISD::Suld3DV4I16Clamp:
+ case NVPTXISD::Suld3DV4I32Clamp:
+ case NVPTXISD::Suld1DI8Trap:
+ case NVPTXISD::Suld1DI16Trap:
+ case NVPTXISD::Suld1DI32Trap:
+ case NVPTXISD::Suld1DI64Trap:
+ case NVPTXISD::Suld1DV2I8Trap:
+ case NVPTXISD::Suld1DV2I16Trap:
+ case NVPTXISD::Suld1DV2I32Trap:
+ case NVPTXISD::Suld1DV2I64Trap:
+ case NVPTXISD::Suld1DV4I8Trap:
+ case NVPTXISD::Suld1DV4I16Trap:
+ case NVPTXISD::Suld1DV4I32Trap:
+ case NVPTXISD::Suld1DArrayI8Trap:
+ case NVPTXISD::Suld1DArrayI16Trap:
+ case NVPTXISD::Suld1DArrayI32Trap:
+ case NVPTXISD::Suld1DArrayI64Trap:
+ case NVPTXISD::Suld1DArrayV2I8Trap:
+ case NVPTXISD::Suld1DArrayV2I16Trap:
+ case NVPTXISD::Suld1DArrayV2I32Trap:
+ case NVPTXISD::Suld1DArrayV2I64Trap:
+ case NVPTXISD::Suld1DArrayV4I8Trap:
+ case NVPTXISD::Suld1DArrayV4I16Trap:
+ case NVPTXISD::Suld1DArrayV4I32Trap:
+ case NVPTXISD::Suld2DI8Trap:
+ case NVPTXISD::Suld2DI16Trap:
+ case NVPTXISD::Suld2DI32Trap:
+ case NVPTXISD::Suld2DI64Trap:
+ case NVPTXISD::Suld2DV2I8Trap:
+ case NVPTXISD::Suld2DV2I16Trap:
+ case NVPTXISD::Suld2DV2I32Trap:
+ case NVPTXISD::Suld2DV2I64Trap:
+ case NVPTXISD::Suld2DV4I8Trap:
+ case NVPTXISD::Suld2DV4I16Trap:
+ case NVPTXISD::Suld2DV4I32Trap:
+ case NVPTXISD::Suld2DArrayI8Trap:
+ case NVPTXISD::Suld2DArrayI16Trap:
+ case NVPTXISD::Suld2DArrayI32Trap:
+ case NVPTXISD::Suld2DArrayI64Trap:
+ case NVPTXISD::Suld2DArrayV2I8Trap:
+ case NVPTXISD::Suld2DArrayV2I16Trap:
+ case NVPTXISD::Suld2DArrayV2I32Trap:
+ case NVPTXISD::Suld2DArrayV2I64Trap:
+ case NVPTXISD::Suld2DArrayV4I8Trap:
+ case NVPTXISD::Suld2DArrayV4I16Trap:
+ case NVPTXISD::Suld2DArrayV4I32Trap:
+ case NVPTXISD::Suld3DI8Trap:
+ case NVPTXISD::Suld3DI16Trap:
+ case NVPTXISD::Suld3DI32Trap:
+ case NVPTXISD::Suld3DI64Trap:
+ case NVPTXISD::Suld3DV2I8Trap:
+ case NVPTXISD::Suld3DV2I16Trap:
+ case NVPTXISD::Suld3DV2I32Trap:
+ case NVPTXISD::Suld3DV2I64Trap:
+ case NVPTXISD::Suld3DV4I8Trap:
+ case NVPTXISD::Suld3DV4I16Trap:
+ case NVPTXISD::Suld3DV4I32Trap:
+ case NVPTXISD::Suld1DI8Zero:
+ case NVPTXISD::Suld1DI16Zero:
+ case NVPTXISD::Suld1DI32Zero:
+ case NVPTXISD::Suld1DI64Zero:
+ case NVPTXISD::Suld1DV2I8Zero:
+ case NVPTXISD::Suld1DV2I16Zero:
+ case NVPTXISD::Suld1DV2I32Zero:
+ case NVPTXISD::Suld1DV2I64Zero:
+ case NVPTXISD::Suld1DV4I8Zero:
+ case NVPTXISD::Suld1DV4I16Zero:
+ case NVPTXISD::Suld1DV4I32Zero:
+ case NVPTXISD::Suld1DArrayI8Zero:
+ case NVPTXISD::Suld1DArrayI16Zero:
+ case NVPTXISD::Suld1DArrayI32Zero:
+ case NVPTXISD::Suld1DArrayI64Zero:
+ case NVPTXISD::Suld1DArrayV2I8Zero:
+ case NVPTXISD::Suld1DArrayV2I16Zero:
+ case NVPTXISD::Suld1DArrayV2I32Zero:
+ case NVPTXISD::Suld1DArrayV2I64Zero:
+ case NVPTXISD::Suld1DArrayV4I8Zero:
+ case NVPTXISD::Suld1DArrayV4I16Zero:
+ case NVPTXISD::Suld1DArrayV4I32Zero:
+ case NVPTXISD::Suld2DI8Zero:
+ case NVPTXISD::Suld2DI16Zero:
+ case NVPTXISD::Suld2DI32Zero:
+ case NVPTXISD::Suld2DI64Zero:
+ case NVPTXISD::Suld2DV2I8Zero:
+ case NVPTXISD::Suld2DV2I16Zero:
+ case NVPTXISD::Suld2DV2I32Zero:
+ case NVPTXISD::Suld2DV2I64Zero:
+ case NVPTXISD::Suld2DV4I8Zero:
+ case NVPTXISD::Suld2DV4I16Zero:
+ case NVPTXISD::Suld2DV4I32Zero:
+ case NVPTXISD::Suld2DArrayI8Zero:
+ case NVPTXISD::Suld2DArrayI16Zero:
+ case NVPTXISD::Suld2DArrayI32Zero:
+ case NVPTXISD::Suld2DArrayI64Zero:
+ case NVPTXISD::Suld2DArrayV2I8Zero:
+ case NVPTXISD::Suld2DArrayV2I16Zero:
+ case NVPTXISD::Suld2DArrayV2I32Zero:
+ case NVPTXISD::Suld2DArrayV2I64Zero:
+ case NVPTXISD::Suld2DArrayV4I8Zero:
+ case NVPTXISD::Suld2DArrayV4I16Zero:
+ case NVPTXISD::Suld2DArrayV4I32Zero:
+ case NVPTXISD::Suld3DI8Zero:
+ case NVPTXISD::Suld3DI16Zero:
+ case NVPTXISD::Suld3DI32Zero:
+ case NVPTXISD::Suld3DI64Zero:
+ case NVPTXISD::Suld3DV2I8Zero:
+ case NVPTXISD::Suld3DV2I16Zero:
+ case NVPTXISD::Suld3DV2I32Zero:
+ case NVPTXISD::Suld3DV2I64Zero:
+ case NVPTXISD::Suld3DV4I8Zero:
+ case NVPTXISD::Suld3DV4I16Zero:
+ case NVPTXISD::Suld3DV4I32Zero:
+ ResNode = SelectSurfaceIntrinsic(N);
+ break;
+ case ISD::AND:
+ case ISD::SRA:
+ case ISD::SRL:
+ // Try to select BFE
+ ResNode = SelectBFE(N);
+ break;
+ case ISD::ADDRSPACECAST:
+ ResNode = SelectAddrSpaceCast(N);
+ break;
default:
break;
}
@@ -170,9 +510,24 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
return SelectCode(N);
}
+SDNode *NVPTXDAGToDAGISel::SelectIntrinsicChain(SDNode *N) {
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ switch (IID) {
+ default:
+ return NULL;
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_p:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_p:
+ return SelectLDGLDU(N);
+ }
+}
+
static unsigned int getCodeAddrSpace(MemSDNode *N,
const NVPTXSubtarget &Subtarget) {
- const Value *Src = N->getSrcValue();
+ const Value *Src = N->getMemOperand()->getValue();
if (!Src)
return NVPTX::PTXLdStInstCode::GENERIC;
@@ -191,18 +546,96 @@ static unsigned int getCodeAddrSpace(MemSDNode *N,
return NVPTX::PTXLdStInstCode::GENERIC;
}
+SDNode *NVPTXDAGToDAGISel::SelectIntrinsicNoChain(SDNode *N) {
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ switch (IID) {
+ default:
+ return nullptr;
+ case Intrinsic::nvvm_texsurf_handle_internal:
+ return SelectTexSurfHandle(N);
+ }
+}
+
+SDNode *NVPTXDAGToDAGISel::SelectTexSurfHandle(SDNode *N) {
+ // Op 0 is the intrinsic ID
+ SDValue Wrapper = N->getOperand(1);
+ SDValue GlobalVal = Wrapper.getOperand(0);
+ return CurDAG->getMachineNode(NVPTX::texsurf_handles, SDLoc(N), MVT::i64,
+ GlobalVal);
+}
+
+SDNode *NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
+ SDValue Src = N->getOperand(0);
+ AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
+ unsigned SrcAddrSpace = CastN->getSrcAddressSpace();
+ unsigned DstAddrSpace = CastN->getDestAddressSpace();
+
+ assert(SrcAddrSpace != DstAddrSpace &&
+ "addrspacecast must be between different address spaces");
+
+ if (DstAddrSpace == ADDRESS_SPACE_GENERIC) {
+ // Specific to generic
+ unsigned Opc;
+ switch (SrcAddrSpace) {
+ default: report_fatal_error("Bad address space in addrspacecast");
+ case ADDRESS_SPACE_GLOBAL:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_global_yes_64
+ : NVPTX::cvta_global_yes;
+ break;
+ case ADDRESS_SPACE_SHARED:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_shared_yes_64
+ : NVPTX::cvta_shared_yes;
+ break;
+ case ADDRESS_SPACE_CONST:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_const_yes_64
+ : NVPTX::cvta_const_yes;
+ break;
+ case ADDRESS_SPACE_LOCAL:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_local_yes_64
+ : NVPTX::cvta_local_yes;
+ break;
+ }
+ return CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), Src);
+ } else {
+ // Generic to specific
+ if (SrcAddrSpace != 0)
+ report_fatal_error("Cannot cast between two non-generic address spaces");
+ unsigned Opc;
+ switch (DstAddrSpace) {
+ default: report_fatal_error("Bad address space in addrspacecast");
+ case ADDRESS_SPACE_GLOBAL:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_global_yes_64
+ : NVPTX::cvta_to_global_yes;
+ break;
+ case ADDRESS_SPACE_SHARED:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_shared_yes_64
+ : NVPTX::cvta_to_shared_yes;
+ break;
+ case ADDRESS_SPACE_CONST:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_const_yes_64
+ : NVPTX::cvta_to_const_yes;
+ break;
+ case ADDRESS_SPACE_LOCAL:
+ Opc = Subtarget.is64Bit() ? NVPTX::cvta_to_local_yes_64
+ : NVPTX::cvta_to_local_yes;
+ break;
+ }
+ return CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), Src);
+ }
+}
+
SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT LoadedVT = LD->getMemoryVT();
- SDNode *NVPTXLD = NULL;
+ SDNode *NVPTXLD = nullptr;
// do not support pre/post inc/dec
if (LD->isIndexed())
- return NULL;
+ return nullptr;
if (!LoadedVT.isSimple())
- return NULL;
+ return nullptr;
// Address Space Setting
unsigned int codeAddrSpace = getCodeAddrSpace(LD, Subtarget);
@@ -225,7 +658,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
else if (num == 4)
vecType = NVPTX::PTXLdStInstCode::V4;
else
- return NULL;
+ return nullptr;
}
// Type Setting: fromType + fromTypeWidth
@@ -274,7 +707,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_avar;
break;
default:
- return NULL;
+ return nullptr;
}
SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
getI32Imm(vecType), getI32Imm(fromType),
@@ -303,7 +736,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_asi;
break;
default:
- return NULL;
+ return nullptr;
}
SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
getI32Imm(vecType), getI32Imm(fromType),
@@ -333,7 +766,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_ari_64;
break;
default:
- return NULL;
+ return nullptr;
}
} else {
switch (TargetVT) {
@@ -356,7 +789,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_ari;
break;
default:
- return NULL;
+ return nullptr;
}
}
SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
@@ -385,7 +818,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_areg_64;
break;
default:
- return NULL;
+ return nullptr;
}
} else {
switch (TargetVT) {
@@ -408,7 +841,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_areg;
break;
default:
- return NULL;
+ return nullptr;
}
}
SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
@@ -417,7 +850,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
}
- if (NVPTXLD != NULL) {
+ if (NVPTXLD) {
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(NVPTXLD)->setMemRefs(MemRefs0, MemRefs0 + 1);
@@ -438,7 +871,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
EVT LoadedVT = MemSD->getMemoryVT();
if (!LoadedVT.isSimple())
- return NULL;
+ return nullptr;
// Address Space Setting
unsigned int CodeAddrSpace = getCodeAddrSpace(MemSD, Subtarget);
@@ -484,7 +917,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
VecType = NVPTX::PTXLdStInstCode::V4;
break;
default:
- return NULL;
+ return nullptr;
}
EVT EltVT = N->getValueType(0);
@@ -492,11 +925,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_avar;
break;
@@ -520,7 +953,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_avar;
break;
@@ -546,11 +979,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
: SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_asi;
break;
@@ -574,7 +1007,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_asi;
break;
@@ -601,11 +1034,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
if (Subtarget.is64Bit()) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_ari_64;
break;
@@ -629,7 +1062,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_ari_64;
break;
@@ -648,11 +1081,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_ari;
break;
@@ -676,7 +1109,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_ari;
break;
@@ -703,11 +1136,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
if (Subtarget.is64Bit()) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_areg_64;
break;
@@ -731,7 +1164,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_areg_64;
break;
@@ -750,11 +1183,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_areg;
break;
@@ -778,7 +1211,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_areg;
break;
@@ -809,26 +1242,105 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
return LD;
}
-SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
+SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
SDValue Chain = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
+ SDValue Op1;
+ MemSDNode *Mem;
+ bool IsLDG = true;
+
+ // If this is an LDG intrinsic, the address is the third operand. Its its an
+ // LDG/LDU SD node (from custom vector handling), then its the second operand
+ if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
+ Op1 = N->getOperand(2);
+ Mem = cast<MemIntrinsicSDNode>(N);
+ unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ switch (IID) {
+ default:
+ return NULL;
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_p:
+ IsLDG = true;
+ break;
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_p:
+ IsLDG = false;
+ break;
+ }
+ } else {
+ Op1 = N->getOperand(1);
+ Mem = cast<MemSDNode>(N);
+ }
+
unsigned Opcode;
SDLoc DL(N);
SDNode *LD;
- MemSDNode *Mem = cast<MemSDNode>(N);
SDValue Base, Offset, Addr;
- EVT EltVT = Mem->getMemoryVT().getVectorElementType();
+ EVT EltVT = Mem->getMemoryVT();
+ if (EltVT.isVector()) {
+ EltVT = EltVT.getVectorElementType();
+ }
if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
+ case ISD::INTRINSIC_W_CHAIN:
+ if (IsLDG) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8avar;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i16avar;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i32avar;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i64avar;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f32avar;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f64avar;
+ break;
+ }
+ } else {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8avar;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i16avar;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i32avar;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i64avar;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f32avar;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f64avar;
+ break;
+ }
+ }
+ break;
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_avar;
break;
@@ -852,7 +1364,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_avar;
break;
@@ -876,7 +1388,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_avar;
break;
@@ -894,7 +1406,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_avar;
break;
@@ -912,19 +1424,67 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
}
SDValue Ops[] = { Addr, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(),
- ArrayRef<SDValue>(Ops, 2));
+ LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else if (Subtarget.is64Bit()
? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
: SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
if (Subtarget.is64Bit()) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
+ case ISD::INTRINSIC_W_CHAIN:
+ if (IsLDG) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8ari64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i16ari64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i32ari64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i64ari64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f32ari64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f64ari64;
+ break;
+ }
+ } else {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8ari64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i16ari64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i32ari64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i64ari64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f32ari64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f64ari64;
+ break;
+ }
+ }
+ break;
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari64;
break;
@@ -948,7 +1508,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari64;
break;
@@ -972,7 +1532,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari64;
break;
@@ -990,7 +1550,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari64;
break;
@@ -1009,11 +1569,60 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
+ case ISD::INTRINSIC_W_CHAIN:
+ if (IsLDG) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8ari;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i16ari;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i32ari;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i64ari;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f32ari;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f64ari;
+ break;
+ }
+ } else {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8ari;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i16ari;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i32ari;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i64ari;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f32ari;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f64ari;
+ break;
+ }
+ }
+ break;
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari32;
break;
@@ -1037,7 +1646,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari32;
break;
@@ -1061,7 +1670,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari32;
break;
@@ -1079,7 +1688,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari32;
break;
@@ -1099,17 +1708,65 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
SDValue Ops[] = { Base, Offset, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(),
- ArrayRef<SDValue>(Ops, 3));
+ LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else {
if (Subtarget.is64Bit()) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
+ case ISD::INTRINSIC_W_CHAIN:
+ if (IsLDG) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8areg64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i16areg64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i32areg64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i64areg64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f32areg64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f64areg64;
+ break;
+ }
+ } else {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8areg64;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i16areg64;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i32areg64;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i64areg64;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f32areg64;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f64areg64;
+ break;
+ }
+ }
+ break;
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg64;
break;
@@ -1133,7 +1790,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg64;
break;
@@ -1157,7 +1814,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg64;
break;
@@ -1175,7 +1832,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg64;
break;
@@ -1194,11 +1851,60 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
+ case ISD::INTRINSIC_W_CHAIN:
+ if (IsLDG) {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8areg;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i16areg;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i32areg;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i64areg;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f32areg;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDG_GLOBAL_f64areg;
+ break;
+ }
+ } else {
+ switch (EltVT.getSimpleVT().SimpleTy) {
+ default:
+ return nullptr;
+ case MVT::i8:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8areg;
+ break;
+ case MVT::i16:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i16areg;
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i32areg;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i64areg;
+ break;
+ case MVT::f32:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f32areg;
+ break;
+ case MVT::f64:
+ Opcode = NVPTX::INT_PTX_LDU_GLOBAL_f64areg;
+ break;
+ }
+ }
+ break;
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg32;
break;
@@ -1222,7 +1928,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg32;
break;
@@ -1246,7 +1952,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg32;
break;
@@ -1264,7 +1970,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg32;
break;
@@ -1283,12 +1989,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) {
}
SDValue Ops[] = { Op1, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(),
- ArrayRef<SDValue>(Ops, 2));
+ LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
}
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
- MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ MemRefs0[0] = Mem->getMemOperand();
cast<MachineSDNode>(LD)->setMemRefs(MemRefs0, MemRefs0 + 1);
return LD;
@@ -1298,14 +2003,14 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
SDLoc dl(N);
StoreSDNode *ST = cast<StoreSDNode>(N);
EVT StoreVT = ST->getMemoryVT();
- SDNode *NVPTXST = NULL;
+ SDNode *NVPTXST = nullptr;
// do not support pre/post inc/dec
if (ST->isIndexed())
- return NULL;
+ return nullptr;
if (!StoreVT.isSimple())
- return NULL;
+ return nullptr;
// Address Space Setting
unsigned int codeAddrSpace = getCodeAddrSpace(ST, Subtarget);
@@ -1328,7 +2033,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
else if (num == 4)
vecType = NVPTX::PTXLdStInstCode::V4;
else
- return NULL;
+ return nullptr;
}
// Type Setting: toType + toTypeWidth
@@ -1372,7 +2077,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_avar;
break;
default:
- return NULL;
+ return nullptr;
}
SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
getI32Imm(vecType), getI32Imm(toType),
@@ -1401,7 +2106,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_asi;
break;
default:
- return NULL;
+ return nullptr;
}
SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
getI32Imm(vecType), getI32Imm(toType),
@@ -1431,7 +2136,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_ari_64;
break;
default:
- return NULL;
+ return nullptr;
}
} else {
switch (SourceVT) {
@@ -1454,7 +2159,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_ari;
break;
default:
- return NULL;
+ return nullptr;
}
}
SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
@@ -1483,7 +2188,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_areg_64;
break;
default:
- return NULL;
+ return nullptr;
}
} else {
switch (SourceVT) {
@@ -1506,7 +2211,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_areg;
break;
default:
- return NULL;
+ return nullptr;
}
}
SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
@@ -1515,7 +2220,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
}
- if (NVPTXST != NULL) {
+ if (NVPTXST) {
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(NVPTXST)->setMemRefs(MemRefs0, MemRefs0 + 1);
@@ -1582,7 +2287,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
N2 = N->getOperand(5);
break;
default:
- return NULL;
+ return nullptr;
}
StOps.push_back(getI32Imm(IsVolatile));
@@ -1594,11 +2299,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
if (SelectDirectAddr(N2, Addr)) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_avar;
break;
@@ -1622,7 +2327,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_avar;
break;
@@ -1644,11 +2349,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
: SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_asi;
break;
@@ -1672,7 +2377,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_asi;
break;
@@ -1696,11 +2401,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
if (Subtarget.is64Bit()) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_ari_64;
break;
@@ -1724,7 +2429,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_ari_64;
break;
@@ -1743,11 +2448,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_ari;
break;
@@ -1771,7 +2476,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_ari;
break;
@@ -1794,11 +2499,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
if (Subtarget.is64Bit()) {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_areg_64;
break;
@@ -1822,7 +2527,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_areg_64;
break;
@@ -1841,11 +2546,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_areg;
break;
@@ -1869,7 +2574,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_areg;
break;
@@ -1910,7 +2615,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
unsigned VecSize;
switch (Node->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::LoadParam:
VecSize = 1;
break;
@@ -1929,11 +2634,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
switch (VecSize) {
default:
- return NULL;
+ return nullptr;
case 1:
switch (MemVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opc = NVPTX::LoadParamMemI8;
break;
@@ -1960,7 +2665,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
case 2:
switch (MemVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opc = NVPTX::LoadParamMemV2I8;
break;
@@ -1987,7 +2692,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
case 4:
switch (MemVT.getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opc = NVPTX::LoadParamMemV4I8;
break;
@@ -2014,7 +2719,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
VTs = CurDAG->getVTList(EltVT, EltVT, MVT::Other, MVT::Glue);
} else {
EVT EVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other, MVT::Glue };
- VTs = CurDAG->getVTList(&EVTs[0], 5);
+ VTs = CurDAG->getVTList(EVTs);
}
unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
@@ -2040,7 +2745,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
unsigned NumElts = 1;
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreRetval:
NumElts = 1;
break;
@@ -2065,11 +2770,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
unsigned Opcode = 0;
switch (NumElts) {
default:
- return NULL;
+ return nullptr;
case 1:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opcode = NVPTX::StoreRetvalI8;
break;
@@ -2096,7 +2801,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
case 2:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opcode = NVPTX::StoreRetvalV2I8;
break;
@@ -2123,7 +2828,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
case 4:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opcode = NVPTX::StoreRetvalV4I8;
break;
@@ -2166,7 +2871,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
unsigned NumElts = 1;
switch (N->getOpcode()) {
default:
- return NULL;
+ return nullptr;
case NVPTXISD::StoreParamU32:
case NVPTXISD::StoreParamS32:
case NVPTXISD::StoreParam:
@@ -2197,11 +2902,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
default:
switch (NumElts) {
default:
- return NULL;
+ return nullptr;
case 1:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opcode = NVPTX::StoreParamI8;
break;
@@ -2228,7 +2933,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
case 2:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opcode = NVPTX::StoreParamV2I8;
break;
@@ -2255,7 +2960,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
case 4:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return NULL;
+ return nullptr;
case MVT::i1:
Opcode = NVPTX::StoreParamV4I8;
break;
@@ -2308,6 +3013,1940 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
return Ret;
}
+SDNode *NVPTXDAGToDAGISel::SelectTextureIntrinsic(SDNode *N) {
+ SDValue Chain = N->getOperand(0);
+ SDNode *Ret = nullptr;
+ unsigned Opc = 0;
+ SmallVector<SDValue, 8> Ops;
+
+ switch (N->getOpcode()) {
+ default: return nullptr;
+ case NVPTXISD::Tex1DFloatS32:
+ Opc = NVPTX::TEX_1D_F32_S32;
+ break;
+ case NVPTXISD::Tex1DFloatFloat:
+ Opc = NVPTX::TEX_1D_F32_F32;
+ break;
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ Opc = NVPTX::TEX_1D_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ Opc = NVPTX::TEX_1D_F32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex1DS32S32:
+ Opc = NVPTX::TEX_1D_S32_S32;
+ break;
+ case NVPTXISD::Tex1DS32Float:
+ Opc = NVPTX::TEX_1D_S32_F32;
+ break;
+ case NVPTXISD::Tex1DS32FloatLevel:
+ Opc = NVPTX::TEX_1D_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex1DS32FloatGrad:
+ Opc = NVPTX::TEX_1D_S32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex1DU32S32:
+ Opc = NVPTX::TEX_1D_U32_S32;
+ break;
+ case NVPTXISD::Tex1DU32Float:
+ Opc = NVPTX::TEX_1D_U32_F32;
+ break;
+ case NVPTXISD::Tex1DU32FloatLevel:
+ Opc = NVPTX::TEX_1D_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex1DU32FloatGrad:
+ Opc = NVPTX::TEX_1D_U32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex1DArrayFloatS32:
+ Opc = NVPTX::TEX_1D_ARRAY_F32_S32;
+ break;
+ case NVPTXISD::Tex1DArrayFloatFloat:
+ Opc = NVPTX::TEX_1D_ARRAY_F32_F32;
+ break;
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ Opc = NVPTX::TEX_1D_ARRAY_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ Opc = NVPTX::TEX_1D_ARRAY_F32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex1DArrayS32S32:
+ Opc = NVPTX::TEX_1D_ARRAY_S32_S32;
+ break;
+ case NVPTXISD::Tex1DArrayS32Float:
+ Opc = NVPTX::TEX_1D_ARRAY_S32_F32;
+ break;
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ Opc = NVPTX::TEX_1D_ARRAY_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ Opc = NVPTX::TEX_1D_ARRAY_S32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex1DArrayU32S32:
+ Opc = NVPTX::TEX_1D_ARRAY_U32_S32;
+ break;
+ case NVPTXISD::Tex1DArrayU32Float:
+ Opc = NVPTX::TEX_1D_ARRAY_U32_F32;
+ break;
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ Opc = NVPTX::TEX_1D_ARRAY_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ Opc = NVPTX::TEX_1D_ARRAY_U32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex2DFloatS32:
+ Opc = NVPTX::TEX_2D_F32_S32;
+ break;
+ case NVPTXISD::Tex2DFloatFloat:
+ Opc = NVPTX::TEX_2D_F32_F32;
+ break;
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ Opc = NVPTX::TEX_2D_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ Opc = NVPTX::TEX_2D_F32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex2DS32S32:
+ Opc = NVPTX::TEX_2D_S32_S32;
+ break;
+ case NVPTXISD::Tex2DS32Float:
+ Opc = NVPTX::TEX_2D_S32_F32;
+ break;
+ case NVPTXISD::Tex2DS32FloatLevel:
+ Opc = NVPTX::TEX_2D_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex2DS32FloatGrad:
+ Opc = NVPTX::TEX_2D_S32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex2DU32S32:
+ Opc = NVPTX::TEX_2D_U32_S32;
+ break;
+ case NVPTXISD::Tex2DU32Float:
+ Opc = NVPTX::TEX_2D_U32_F32;
+ break;
+ case NVPTXISD::Tex2DU32FloatLevel:
+ Opc = NVPTX::TEX_2D_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex2DU32FloatGrad:
+ Opc = NVPTX::TEX_2D_U32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex2DArrayFloatS32:
+ Opc = NVPTX::TEX_2D_ARRAY_F32_S32;
+ break;
+ case NVPTXISD::Tex2DArrayFloatFloat:
+ Opc = NVPTX::TEX_2D_ARRAY_F32_F32;
+ break;
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ Opc = NVPTX::TEX_2D_ARRAY_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ Opc = NVPTX::TEX_2D_ARRAY_F32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex2DArrayS32S32:
+ Opc = NVPTX::TEX_2D_ARRAY_S32_S32;
+ break;
+ case NVPTXISD::Tex2DArrayS32Float:
+ Opc = NVPTX::TEX_2D_ARRAY_S32_F32;
+ break;
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ Opc = NVPTX::TEX_2D_ARRAY_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ Opc = NVPTX::TEX_2D_ARRAY_S32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex2DArrayU32S32:
+ Opc = NVPTX::TEX_2D_ARRAY_U32_S32;
+ break;
+ case NVPTXISD::Tex2DArrayU32Float:
+ Opc = NVPTX::TEX_2D_ARRAY_U32_F32;
+ break;
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ Opc = NVPTX::TEX_2D_ARRAY_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ Opc = NVPTX::TEX_2D_ARRAY_U32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex3DFloatS32:
+ Opc = NVPTX::TEX_3D_F32_S32;
+ break;
+ case NVPTXISD::Tex3DFloatFloat:
+ Opc = NVPTX::TEX_3D_F32_F32;
+ break;
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ Opc = NVPTX::TEX_3D_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ Opc = NVPTX::TEX_3D_F32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex3DS32S32:
+ Opc = NVPTX::TEX_3D_S32_S32;
+ break;
+ case NVPTXISD::Tex3DS32Float:
+ Opc = NVPTX::TEX_3D_S32_F32;
+ break;
+ case NVPTXISD::Tex3DS32FloatLevel:
+ Opc = NVPTX::TEX_3D_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex3DS32FloatGrad:
+ Opc = NVPTX::TEX_3D_S32_F32_GRAD;
+ break;
+ case NVPTXISD::Tex3DU32S32:
+ Opc = NVPTX::TEX_3D_U32_S32;
+ break;
+ case NVPTXISD::Tex3DU32Float:
+ Opc = NVPTX::TEX_3D_U32_F32;
+ break;
+ case NVPTXISD::Tex3DU32FloatLevel:
+ Opc = NVPTX::TEX_3D_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tex3DU32FloatGrad:
+ Opc = NVPTX::TEX_3D_U32_F32_GRAD;
+ break;
+ case NVPTXISD::TexCubeFloatFloat:
+ Opc = NVPTX::TEX_CUBE_F32_F32;
+ break;
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ Opc = NVPTX::TEX_CUBE_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexCubeS32Float:
+ Opc = NVPTX::TEX_CUBE_S32_F32;
+ break;
+ case NVPTXISD::TexCubeS32FloatLevel:
+ Opc = NVPTX::TEX_CUBE_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexCubeU32Float:
+ Opc = NVPTX::TEX_CUBE_U32_F32;
+ break;
+ case NVPTXISD::TexCubeU32FloatLevel:
+ Opc = NVPTX::TEX_CUBE_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ Opc = NVPTX::TEX_CUBE_ARRAY_F32_F32;
+ break;
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ Opc = NVPTX::TEX_CUBE_ARRAY_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexCubeArrayS32Float:
+ Opc = NVPTX::TEX_CUBE_ARRAY_S32_F32;
+ break;
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ Opc = NVPTX::TEX_CUBE_ARRAY_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexCubeArrayU32Float:
+ Opc = NVPTX::TEX_CUBE_ARRAY_U32_F32;
+ break;
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ Opc = NVPTX::TEX_CUBE_ARRAY_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tld4R2DFloatFloat:
+ Opc = NVPTX::TLD4_R_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4G2DFloatFloat:
+ Opc = NVPTX::TLD4_G_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4B2DFloatFloat:
+ Opc = NVPTX::TLD4_B_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4A2DFloatFloat:
+ Opc = NVPTX::TLD4_A_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4R2DS64Float:
+ Opc = NVPTX::TLD4_R_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4G2DS64Float:
+ Opc = NVPTX::TLD4_G_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4B2DS64Float:
+ Opc = NVPTX::TLD4_B_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4A2DS64Float:
+ Opc = NVPTX::TLD4_A_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4R2DU64Float:
+ Opc = NVPTX::TLD4_R_2D_U32_F32;
+ break;
+ case NVPTXISD::Tld4G2DU64Float:
+ Opc = NVPTX::TLD4_G_2D_U32_F32;
+ break;
+ case NVPTXISD::Tld4B2DU64Float:
+ Opc = NVPTX::TLD4_B_2D_U32_F32;
+ break;
+ case NVPTXISD::Tld4A2DU64Float:
+ Opc = NVPTX::TLD4_A_2D_U32_F32;
+ break;
+ case NVPTXISD::TexUnified1DFloatS32:
+ Opc = NVPTX::TEX_UNIFIED_1D_F32_S32;
+ break;
+ case NVPTXISD::TexUnified1DFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_1D_F32_F32;
+ break;
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_1D_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_1D_F32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified1DS32S32:
+ Opc = NVPTX::TEX_UNIFIED_1D_S32_S32;
+ break;
+ case NVPTXISD::TexUnified1DS32Float:
+ Opc = NVPTX::TEX_UNIFIED_1D_S32_F32;
+ break;
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_1D_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_1D_S32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified1DU32S32:
+ Opc = NVPTX::TEX_UNIFIED_1D_U32_S32;
+ break;
+ case NVPTXISD::TexUnified1DU32Float:
+ Opc = NVPTX::TEX_UNIFIED_1D_U32_F32;
+ break;
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_1D_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_1D_U32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_F32_S32;
+ break;
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_F32_F32;
+ break;
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_F32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_S32_S32;
+ break;
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_S32_F32;
+ break;
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_S32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_U32_S32;
+ break;
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_U32_F32;
+ break;
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_1D_ARRAY_U32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified2DFloatS32:
+ Opc = NVPTX::TEX_UNIFIED_2D_F32_S32;
+ break;
+ case NVPTXISD::TexUnified2DFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_2D_F32_F32;
+ break;
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_2D_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_2D_F32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified2DS32S32:
+ Opc = NVPTX::TEX_UNIFIED_2D_S32_S32;
+ break;
+ case NVPTXISD::TexUnified2DS32Float:
+ Opc = NVPTX::TEX_UNIFIED_2D_S32_F32;
+ break;
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_2D_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_2D_S32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified2DU32S32:
+ Opc = NVPTX::TEX_UNIFIED_2D_U32_S32;
+ break;
+ case NVPTXISD::TexUnified2DU32Float:
+ Opc = NVPTX::TEX_UNIFIED_2D_U32_F32;
+ break;
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_2D_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_2D_U32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_F32_S32;
+ break;
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_F32_F32;
+ break;
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_F32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_S32_S32;
+ break;
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_S32_F32;
+ break;
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_S32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_U32_S32;
+ break;
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_U32_F32;
+ break;
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_2D_ARRAY_U32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified3DFloatS32:
+ Opc = NVPTX::TEX_UNIFIED_3D_F32_S32;
+ break;
+ case NVPTXISD::TexUnified3DFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_3D_F32_F32;
+ break;
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_3D_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_3D_F32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified3DS32S32:
+ Opc = NVPTX::TEX_UNIFIED_3D_S32_S32;
+ break;
+ case NVPTXISD::TexUnified3DS32Float:
+ Opc = NVPTX::TEX_UNIFIED_3D_S32_F32;
+ break;
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_3D_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_3D_S32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnified3DU32S32:
+ Opc = NVPTX::TEX_UNIFIED_3D_U32_S32;
+ break;
+ case NVPTXISD::TexUnified3DU32Float:
+ Opc = NVPTX::TEX_UNIFIED_3D_U32_F32;
+ break;
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_3D_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ Opc = NVPTX::TEX_UNIFIED_3D_U32_F32_GRAD;
+ break;
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_F32_F32;
+ break;
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_S32_F32;
+ break;
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_U32_F32;
+ break;
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_ARRAY_F32_F32;
+ break;
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_ARRAY_F32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_ARRAY_S32_F32;
+ break;
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_ARRAY_S32_F32_LEVEL;
+ break;
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_ARRAY_U32_F32;
+ break;
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ Opc = NVPTX::TEX_UNIFIED_CUBE_ARRAY_U32_F32_LEVEL;
+ break;
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ Opc = NVPTX::TLD4_UNIFIED_R_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ Opc = NVPTX::TLD4_UNIFIED_G_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ Opc = NVPTX::TLD4_UNIFIED_B_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ Opc = NVPTX::TLD4_UNIFIED_A_2D_F32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ Opc = NVPTX::TLD4_UNIFIED_R_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ Opc = NVPTX::TLD4_UNIFIED_G_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ Opc = NVPTX::TLD4_UNIFIED_B_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ Opc = NVPTX::TLD4_UNIFIED_A_2D_S32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ Opc = NVPTX::TLD4_UNIFIED_R_2D_U32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ Opc = NVPTX::TLD4_UNIFIED_G_2D_U32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ Opc = NVPTX::TLD4_UNIFIED_B_2D_U32_F32;
+ break;
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ Opc = NVPTX::TLD4_UNIFIED_A_2D_U32_F32;
+ break;
+ }
+
+ // Copy over operands
+ for (unsigned i = 1; i < N->getNumOperands(); ++i) {
+ Ops.push_back(N->getOperand(i));
+ }
+
+ Ops.push_back(Chain);
+ Ret = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
+ return Ret;
+}
+
+SDNode *NVPTXDAGToDAGISel::SelectSurfaceIntrinsic(SDNode *N) {
+ SDValue Chain = N->getOperand(0);
+ SDValue TexHandle = N->getOperand(1);
+ SDNode *Ret = nullptr;
+ unsigned Opc = 0;
+ SmallVector<SDValue, 8> Ops;
+ switch (N->getOpcode()) {
+ default: return nullptr;
+ case NVPTXISD::Suld1DI8Clamp:
+ Opc = NVPTX::SULD_1D_I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI16Clamp:
+ Opc = NVPTX::SULD_1D_I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI32Clamp:
+ Opc = NVPTX::SULD_1D_I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI64Clamp:
+ Opc = NVPTX::SULD_1D_I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I8Clamp:
+ Opc = NVPTX::SULD_1D_V2I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I16Clamp:
+ Opc = NVPTX::SULD_1D_V2I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I32Clamp:
+ Opc = NVPTX::SULD_1D_V2I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I64Clamp:
+ Opc = NVPTX::SULD_1D_V2I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I8Clamp:
+ Opc = NVPTX::SULD_1D_V4I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I16Clamp:
+ Opc = NVPTX::SULD_1D_V4I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I32Clamp:
+ Opc = NVPTX::SULD_1D_V4I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI8Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI16Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI32Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI64Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I8Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I16Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I32Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I64Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I8Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I16Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I32Clamp:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI8Clamp:
+ Opc = NVPTX::SULD_2D_I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI16Clamp:
+ Opc = NVPTX::SULD_2D_I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI32Clamp:
+ Opc = NVPTX::SULD_2D_I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI64Clamp:
+ Opc = NVPTX::SULD_2D_I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I8Clamp:
+ Opc = NVPTX::SULD_2D_V2I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I16Clamp:
+ Opc = NVPTX::SULD_2D_V2I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I32Clamp:
+ Opc = NVPTX::SULD_2D_V2I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I64Clamp:
+ Opc = NVPTX::SULD_2D_V2I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I8Clamp:
+ Opc = NVPTX::SULD_2D_V4I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I16Clamp:
+ Opc = NVPTX::SULD_2D_V4I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I32Clamp:
+ Opc = NVPTX::SULD_2D_V4I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI8Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI16Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI32Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI64Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I8Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I16Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I32Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I64Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I8Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I16Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I32Clamp:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI8Clamp:
+ Opc = NVPTX::SULD_3D_I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI16Clamp:
+ Opc = NVPTX::SULD_3D_I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI32Clamp:
+ Opc = NVPTX::SULD_3D_I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI64Clamp:
+ Opc = NVPTX::SULD_3D_I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I8Clamp:
+ Opc = NVPTX::SULD_3D_V2I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I16Clamp:
+ Opc = NVPTX::SULD_3D_V2I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I32Clamp:
+ Opc = NVPTX::SULD_3D_V2I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I64Clamp:
+ Opc = NVPTX::SULD_3D_V2I64_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I8Clamp:
+ Opc = NVPTX::SULD_3D_V4I8_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I16Clamp:
+ Opc = NVPTX::SULD_3D_V4I16_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I32Clamp:
+ Opc = NVPTX::SULD_3D_V4I32_CLAMP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI8Trap:
+ Opc = NVPTX::SULD_1D_I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI16Trap:
+ Opc = NVPTX::SULD_1D_I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI32Trap:
+ Opc = NVPTX::SULD_1D_I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI64Trap:
+ Opc = NVPTX::SULD_1D_I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I8Trap:
+ Opc = NVPTX::SULD_1D_V2I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I16Trap:
+ Opc = NVPTX::SULD_1D_V2I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I32Trap:
+ Opc = NVPTX::SULD_1D_V2I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I64Trap:
+ Opc = NVPTX::SULD_1D_V2I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I8Trap:
+ Opc = NVPTX::SULD_1D_V4I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I16Trap:
+ Opc = NVPTX::SULD_1D_V4I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I32Trap:
+ Opc = NVPTX::SULD_1D_V4I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI8Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI16Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI32Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI64Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I8Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I16Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I32Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I64Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I8Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I16Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I32Trap:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI8Trap:
+ Opc = NVPTX::SULD_2D_I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI16Trap:
+ Opc = NVPTX::SULD_2D_I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI32Trap:
+ Opc = NVPTX::SULD_2D_I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI64Trap:
+ Opc = NVPTX::SULD_2D_I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I8Trap:
+ Opc = NVPTX::SULD_2D_V2I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I16Trap:
+ Opc = NVPTX::SULD_2D_V2I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I32Trap:
+ Opc = NVPTX::SULD_2D_V2I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I64Trap:
+ Opc = NVPTX::SULD_2D_V2I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I8Trap:
+ Opc = NVPTX::SULD_2D_V4I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I16Trap:
+ Opc = NVPTX::SULD_2D_V4I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I32Trap:
+ Opc = NVPTX::SULD_2D_V4I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI8Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI16Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI32Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI64Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I8Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I16Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I32Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I64Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I8Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I16Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I32Trap:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI8Trap:
+ Opc = NVPTX::SULD_3D_I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI16Trap:
+ Opc = NVPTX::SULD_3D_I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI32Trap:
+ Opc = NVPTX::SULD_3D_I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI64Trap:
+ Opc = NVPTX::SULD_3D_I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I8Trap:
+ Opc = NVPTX::SULD_3D_V2I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I16Trap:
+ Opc = NVPTX::SULD_3D_V2I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I32Trap:
+ Opc = NVPTX::SULD_3D_V2I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I64Trap:
+ Opc = NVPTX::SULD_3D_V2I64_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I8Trap:
+ Opc = NVPTX::SULD_3D_V4I8_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I16Trap:
+ Opc = NVPTX::SULD_3D_V4I16_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I32Trap:
+ Opc = NVPTX::SULD_3D_V4I32_TRAP;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI8Zero:
+ Opc = NVPTX::SULD_1D_I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI16Zero:
+ Opc = NVPTX::SULD_1D_I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI32Zero:
+ Opc = NVPTX::SULD_1D_I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DI64Zero:
+ Opc = NVPTX::SULD_1D_I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I8Zero:
+ Opc = NVPTX::SULD_1D_V2I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I16Zero:
+ Opc = NVPTX::SULD_1D_V2I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I32Zero:
+ Opc = NVPTX::SULD_1D_V2I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV2I64Zero:
+ Opc = NVPTX::SULD_1D_V2I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I8Zero:
+ Opc = NVPTX::SULD_1D_V4I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I16Zero:
+ Opc = NVPTX::SULD_1D_V4I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DV4I32Zero:
+ Opc = NVPTX::SULD_1D_V4I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI8Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI16Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI32Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayI64Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I8Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I16Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I32Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV2I64Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V2I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I8Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I16Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld1DArrayV4I32Zero:
+ Opc = NVPTX::SULD_1D_ARRAY_V4I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI8Zero:
+ Opc = NVPTX::SULD_2D_I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI16Zero:
+ Opc = NVPTX::SULD_2D_I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI32Zero:
+ Opc = NVPTX::SULD_2D_I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DI64Zero:
+ Opc = NVPTX::SULD_2D_I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I8Zero:
+ Opc = NVPTX::SULD_2D_V2I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I16Zero:
+ Opc = NVPTX::SULD_2D_V2I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I32Zero:
+ Opc = NVPTX::SULD_2D_V2I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV2I64Zero:
+ Opc = NVPTX::SULD_2D_V2I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I8Zero:
+ Opc = NVPTX::SULD_2D_V4I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I16Zero:
+ Opc = NVPTX::SULD_2D_V4I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DV4I32Zero:
+ Opc = NVPTX::SULD_2D_V4I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI8Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI16Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI32Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayI64Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I8Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I16Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I32Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV2I64Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V2I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I8Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I16Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld2DArrayV4I32Zero:
+ Opc = NVPTX::SULD_2D_ARRAY_V4I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI8Zero:
+ Opc = NVPTX::SULD_3D_I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI16Zero:
+ Opc = NVPTX::SULD_3D_I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI32Zero:
+ Opc = NVPTX::SULD_3D_I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DI64Zero:
+ Opc = NVPTX::SULD_3D_I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I8Zero:
+ Opc = NVPTX::SULD_3D_V2I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I16Zero:
+ Opc = NVPTX::SULD_3D_V2I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I32Zero:
+ Opc = NVPTX::SULD_3D_V2I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV2I64Zero:
+ Opc = NVPTX::SULD_3D_V2I64_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I8Zero:
+ Opc = NVPTX::SULD_3D_V4I8_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I16Zero:
+ Opc = NVPTX::SULD_3D_V4I16_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ case NVPTXISD::Suld3DV4I32Zero:
+ Opc = NVPTX::SULD_3D_V4I32_ZERO;
+ Ops.push_back(TexHandle);
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ Ops.push_back(N->getOperand(4));
+ Ops.push_back(Chain);
+ break;
+ }
+ Ret = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
+ return Ret;
+}
+
+
+/// SelectBFE - Look for instruction sequences that can be made more efficient
+/// by using the 'bfe' (bit-field extract) PTX instruction
+SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ SDValue Len;
+ SDValue Start;
+ SDValue Val;
+ bool IsSigned = false;
+
+ if (N->getOpcode() == ISD::AND) {
+ // Canonicalize the operands
+ // We want 'and %val, %mask'
+ if (isa<ConstantSDNode>(LHS) && !isa<ConstantSDNode>(RHS)) {
+ std::swap(LHS, RHS);
+ }
+
+ ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS);
+ if (!Mask) {
+ // We need a constant mask on the RHS of the AND
+ return NULL;
+ }
+
+ // Extract the mask bits
+ uint64_t MaskVal = Mask->getZExtValue();
+ if (!isMask_64(MaskVal)) {
+ // We *could* handle shifted masks here, but doing so would require an
+ // 'and' operation to fix up the low-order bits so we would trade
+ // shr+and for bfe+and, which has the same throughput
+ return NULL;
+ }
+
+ // How many bits are in our mask?
+ uint64_t NumBits = CountTrailingOnes_64(MaskVal);
+ Len = CurDAG->getTargetConstant(NumBits, MVT::i32);
+
+ if (LHS.getOpcode() == ISD::SRL || LHS.getOpcode() == ISD::SRA) {
+ // We have a 'srl/and' pair, extract the effective start bit and length
+ Val = LHS.getNode()->getOperand(0);
+ Start = LHS.getNode()->getOperand(1);
+ ConstantSDNode *StartConst = dyn_cast<ConstantSDNode>(Start);
+ if (StartConst) {
+ uint64_t StartVal = StartConst->getZExtValue();
+ // How many "good" bits do we have left? "good" is defined here as bits
+ // that exist in the original value, not shifted in.
+ uint64_t GoodBits = Start.getValueType().getSizeInBits() - StartVal;
+ if (NumBits > GoodBits) {
+ // Do not handle the case where bits have been shifted in. In theory
+ // we could handle this, but the cost is likely higher than just
+ // emitting the srl/and pair.
+ return NULL;
+ }
+ Start = CurDAG->getTargetConstant(StartVal, MVT::i32);
+ } else {
+ // Do not handle the case where the shift amount (can be zero if no srl
+ // was found) is not constant. We could handle this case, but it would
+ // require run-time logic that would be more expensive than just
+ // emitting the srl/and pair.
+ return NULL;
+ }
+ } else {
+ // Do not handle the case where the LHS of the and is not a shift. While
+ // it would be trivial to handle this case, it would just transform
+ // 'and' -> 'bfe', but 'and' has higher-throughput.
+ return NULL;
+ }
+ } else if (N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) {
+ if (LHS->getOpcode() == ISD::AND) {
+ ConstantSDNode *ShiftCnst = dyn_cast<ConstantSDNode>(RHS);
+ if (!ShiftCnst) {
+ // Shift amount must be constant
+ return NULL;
+ }
+
+ uint64_t ShiftAmt = ShiftCnst->getZExtValue();
+
+ SDValue AndLHS = LHS->getOperand(0);
+ SDValue AndRHS = LHS->getOperand(1);
+
+ // Canonicalize the AND to have the mask on the RHS
+ if (isa<ConstantSDNode>(AndLHS)) {
+ std::swap(AndLHS, AndRHS);
+ }
+
+ ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(AndRHS);
+ if (!MaskCnst) {
+ // Mask must be constant
+ return NULL;
+ }
+
+ uint64_t MaskVal = MaskCnst->getZExtValue();
+ uint64_t NumZeros;
+ uint64_t NumBits;
+ if (isMask_64(MaskVal)) {
+ NumZeros = 0;
+ // The number of bits in the result bitfield will be the number of
+ // trailing ones (the AND) minus the number of bits we shift off
+ NumBits = CountTrailingOnes_64(MaskVal) - ShiftAmt;
+ } else if (isShiftedMask_64(MaskVal)) {
+ NumZeros = countTrailingZeros(MaskVal);
+ unsigned NumOnes = CountTrailingOnes_64(MaskVal >> NumZeros);
+ // The number of bits in the result bitfield will be the number of
+ // trailing zeros plus the number of set bits in the mask minus the
+ // number of bits we shift off
+ NumBits = NumZeros + NumOnes - ShiftAmt;
+ } else {
+ // This is not a mask we can handle
+ return NULL;
+ }
+
+ if (ShiftAmt < NumZeros) {
+ // Handling this case would require extra logic that would make this
+ // transformation non-profitable
+ return NULL;
+ }
+
+ Val = AndLHS;
+ Start = CurDAG->getTargetConstant(ShiftAmt, MVT::i32);
+ Len = CurDAG->getTargetConstant(NumBits, MVT::i32);
+ } else if (LHS->getOpcode() == ISD::SHL) {
+ // Here, we have a pattern like:
+ //
+ // (sra (shl val, NN), MM)
+ // or
+ // (srl (shl val, NN), MM)
+ //
+ // If MM >= NN, we can efficiently optimize this with bfe
+ Val = LHS->getOperand(0);
+
+ SDValue ShlRHS = LHS->getOperand(1);
+ ConstantSDNode *ShlCnst = dyn_cast<ConstantSDNode>(ShlRHS);
+ if (!ShlCnst) {
+ // Shift amount must be constant
+ return NULL;
+ }
+ uint64_t InnerShiftAmt = ShlCnst->getZExtValue();
+
+ SDValue ShrRHS = RHS;
+ ConstantSDNode *ShrCnst = dyn_cast<ConstantSDNode>(ShrRHS);
+ if (!ShrCnst) {
+ // Shift amount must be constant
+ return NULL;
+ }
+ uint64_t OuterShiftAmt = ShrCnst->getZExtValue();
+
+ // To avoid extra codegen and be profitable, we need Outer >= Inner
+ if (OuterShiftAmt < InnerShiftAmt) {
+ return NULL;
+ }
+
+ // If the outer shift is more than the type size, we have no bitfield to
+ // extract (since we also check that the inner shift is <= the outer shift
+ // then this also implies that the inner shift is < the type size)
+ if (OuterShiftAmt >= Val.getValueType().getSizeInBits()) {
+ return NULL;
+ }
+
+ Start =
+ CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, MVT::i32);
+ Len =
+ CurDAG->getTargetConstant(Val.getValueType().getSizeInBits() -
+ OuterShiftAmt, MVT::i32);
+
+ if (N->getOpcode() == ISD::SRA) {
+ // If we have a arithmetic right shift, we need to use the signed bfe
+ // variant
+ IsSigned = true;
+ }
+ } else {
+ // No can do...
+ return NULL;
+ }
+ } else {
+ // No can do...
+ return NULL;
+ }
+
+
+ unsigned Opc;
+ // For the BFE operations we form here from "and" and "srl", always use the
+ // unsigned variants.
+ if (Val.getValueType() == MVT::i32) {
+ if (IsSigned) {
+ Opc = NVPTX::BFE_S32rii;
+ } else {
+ Opc = NVPTX::BFE_U32rii;
+ }
+ } else if (Val.getValueType() == MVT::i64) {
+ if (IsSigned) {
+ Opc = NVPTX::BFE_S64rii;
+ } else {
+ Opc = NVPTX::BFE_U64rii;
+ }
+ } else {
+ // We cannot handle this type
+ return NULL;
+ }
+
+ SDValue Ops[] = {
+ Val, Start, Len
+ };
+
+ SDNode *Ret =
+ CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
+
+ return Ret;
+}
+
// SelectDirectAddr - Match a direct address for DAG.
// A direct address could be a globaladdress or externalsymbol.
bool NVPTXDAGToDAGISel::SelectDirectAddr(SDValue N, SDValue &Address) {
@@ -2401,14 +5040,18 @@ bool NVPTXDAGToDAGISel::SelectADDRri64(SDNode *OpNode, SDValue Addr,
bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N,
unsigned int spN) const {
- const Value *Src = NULL;
+ const Value *Src = nullptr;
// Even though MemIntrinsicSDNode is a subclas of MemSDNode,
// the classof() for MemSDNode does not include MemIntrinsicSDNode
// (See SelectionDAGNodes.h). So we need to check for both.
if (MemSDNode *mN = dyn_cast<MemSDNode>(N)) {
- Src = mN->getSrcValue();
+ if (spN == 0 && mN->getMemOperand()->getPseudoValue())
+ return true;
+ Src = mN->getMemOperand()->getValue();
} else if (MemSDNode *mN = dyn_cast<MemIntrinsicSDNode>(N)) {
- Src = mN->getSrcValue();
+ if (spN == 0 && mN->getMemOperand()->getPseudoValue())
+ return true;
+ Src = mN->getMemOperand()->getValue();
}
if (!Src)
return false;
@@ -2440,24 +5083,3 @@ bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand(
}
return true;
}
-
-// Return true if N is a undef or a constant.
-// If N was undef, return a (i8imm 0) in Retval
-// If N was imm, convert it to i8imm and return in Retval
-// Note: The convert to i8imm is required, otherwise the
-// pattern matcher inserts a bunch of IMOVi8rr to convert
-// the imm to i8imm, and this causes instruction selection
-// to fail.
-bool NVPTXDAGToDAGISel::UndefOrImm(SDValue Op, SDValue N, SDValue &Retval) {
- if (!(N.getOpcode() == ISD::UNDEF) && !(N.getOpcode() == ISD::Constant))
- return false;
-
- if (N.getOpcode() == ISD::UNDEF)
- Retval = CurDAG->getTargetConstant(0, MVT::i8);
- else {
- ConstantSDNode *cn = cast<ConstantSDNode>(N.getNode());
- unsigned retval = cn->getZExtValue();
- Retval = CurDAG->getTargetConstant(retval, MVT::i8);
- }
- return true;
-}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index d961e50..c62fc25 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "nvptx-isel"
-
#include "NVPTX.h"
#include "NVPTXISelLowering.h"
#include "NVPTXRegisterInfo.h"
@@ -26,47 +24,48 @@ namespace {
class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
- // If true, generate corresponding FPCONTRACT. This is
- // language dependent (i.e. CUDA and OpenCL works differently).
- bool doFMAF64;
- bool doFMAF32;
- bool doFMAF64AGG;
- bool doFMAF32AGG;
- bool allowFMA;
-
// If true, generate mul.wide from sext and mul
bool doMulWide;
int getDivF32Level() const;
bool usePrecSqrtF32() const;
bool useF32FTZ() const;
+ bool allowFMA() const;
public:
explicit NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
CodeGenOpt::Level OptLevel);
// Pass Name
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "NVPTX DAG->DAG Pattern Instruction Selection";
}
const NVPTXSubtarget &Subtarget;
- virtual bool SelectInlineAsmMemoryOperand(
- const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
private:
// Include the pieces autogenerated from the target description.
#include "NVPTXGenDAGISel.inc"
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
+ SDNode *SelectIntrinsicNoChain(SDNode *N);
+ SDNode *SelectIntrinsicChain(SDNode *N);
+ SDNode *SelectTexSurfHandle(SDNode *N);
SDNode *SelectLoad(SDNode *N);
SDNode *SelectLoadVector(SDNode *N);
- SDNode *SelectLDGLDUVector(SDNode *N);
+ SDNode *SelectLDGLDU(SDNode *N);
SDNode *SelectStore(SDNode *N);
SDNode *SelectStoreVector(SDNode *N);
SDNode *SelectLoadParam(SDNode *N);
SDNode *SelectStoreRetval(SDNode *N);
SDNode *SelectStoreParam(SDNode *N);
+ SDNode *SelectAddrSpaceCast(SDNode *N);
+ SDNode *SelectTextureIntrinsic(SDNode *N);
+ SDNode *SelectSurfaceIntrinsic(SDNode *N);
+ SDNode *SelectBFE(SDNode *N);
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i32);
@@ -91,7 +90,5 @@ private:
bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const;
- bool UndefOrImm(SDValue Op, SDValue N, SDValue &Retval);
-
};
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 6a8be75..d76b20a 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
@@ -29,10 +30,10 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <sstream>
@@ -47,6 +48,12 @@ static cl::opt<bool> sched4reg(
"nvptx-sched4reg",
cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
+static cl::opt<unsigned>
+FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
+ " 1: do it 2: do it aggressively"),
+ cl::init(2));
+
static bool IsPTXVectorType(MVT VT) {
switch (VT.SimpleTy) {
default:
@@ -75,7 +82,7 @@ static bool IsPTXVectorType(MVT VT) {
/// LowerCall, and LowerReturn.
static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets = 0,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
uint64_t StartingOffset = 0) {
SmallVector<EVT, 16> TempVTs;
SmallVector<uint64_t, 16> TempOffsets;
@@ -111,6 +118,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
setBooleanContents(ZeroOrNegativeOneBooleanContent);
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// Jump is Expensive. Don't create extra control flow for 'and', 'or'
// condition branches.
@@ -130,7 +138,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
// Operations not directly supported by NVPTX.
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
setOperationAction(ISD::BR_CC, MVT::f64, Expand);
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
@@ -146,6 +160,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
+
if (nvptxSubtarget.hasROT64()) {
setOperationAction(ISD::ROTL, MVT::i64, Legal);
setOperationAction(ISD::ROTR, MVT::i64, Legal);
@@ -182,8 +203,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
// Turn FP extload into load/fextend
+ setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
// Turn FP truncstore into trunc + store.
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// PTX does not support load / store predicate registers
@@ -237,6 +261,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setOperationAction(ISD::CTPOP, MVT::i32, Legal);
setOperationAction(ISD::CTPOP, MVT::i64, Legal);
+ // We have some custom DAG combine patterns for these nodes
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SHL);
+
// Now deduce the information based on the above mentioned
// actions
computeRegisterProperties();
@@ -245,7 +276,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default:
- return 0;
+ return nullptr;
case NVPTXISD::CALL:
return "NVPTXISD::CALL";
case NVPTXISD::RET_FLAG:
@@ -328,11 +359,509 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "NVPTXISD::StoreV2";
case NVPTXISD::StoreV4:
return "NVPTXISD::StoreV4";
+ case NVPTXISD::FUN_SHFL_CLAMP:
+ return "NVPTXISD::FUN_SHFL_CLAMP";
+ case NVPTXISD::FUN_SHFR_CLAMP:
+ return "NVPTXISD::FUN_SHFR_CLAMP";
+ case NVPTXISD::IMAD:
+ return "NVPTXISD::IMAD";
+ case NVPTXISD::MUL_WIDE_SIGNED:
+ return "NVPTXISD::MUL_WIDE_SIGNED";
+ case NVPTXISD::MUL_WIDE_UNSIGNED:
+ return "NVPTXISD::MUL_WIDE_UNSIGNED";
+ case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
+ case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ return "NVPTXISD::Tex1DFloatFloatLevel";
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ return "NVPTXISD::Tex1DFloatFloatGrad";
+ case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
+ case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
+ case NVPTXISD::Tex1DS32FloatLevel:
+ return "NVPTXISD::Tex1DS32FloatLevel";
+ case NVPTXISD::Tex1DS32FloatGrad:
+ return "NVPTXISD::Tex1DS32FloatGrad";
+ case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
+ case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
+ case NVPTXISD::Tex1DU32FloatLevel:
+ return "NVPTXISD::Tex1DU32FloatLevel";
+ case NVPTXISD::Tex1DU32FloatGrad:
+ return "NVPTXISD::Tex1DU32FloatGrad";
+ case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
+ case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex1DArrayFloatFloatLevel";
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex1DArrayFloatFloatGrad";
+ case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
+ case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ return "NVPTXISD::Tex1DArrayS32FloatLevel";
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ return "NVPTXISD::Tex1DArrayS32FloatGrad";
+ case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
+ case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ return "NVPTXISD::Tex1DArrayU32FloatLevel";
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ return "NVPTXISD::Tex1DArrayU32FloatGrad";
+ case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
+ case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ return "NVPTXISD::Tex2DFloatFloatLevel";
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ return "NVPTXISD::Tex2DFloatFloatGrad";
+ case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
+ case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
+ case NVPTXISD::Tex2DS32FloatLevel:
+ return "NVPTXISD::Tex2DS32FloatLevel";
+ case NVPTXISD::Tex2DS32FloatGrad:
+ return "NVPTXISD::Tex2DS32FloatGrad";
+ case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
+ case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
+ case NVPTXISD::Tex2DU32FloatLevel:
+ return "NVPTXISD::Tex2DU32FloatLevel";
+ case NVPTXISD::Tex2DU32FloatGrad:
+ return "NVPTXISD::Tex2DU32FloatGrad";
+ case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
+ case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex2DArrayFloatFloatLevel";
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex2DArrayFloatFloatGrad";
+ case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
+ case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ return "NVPTXISD::Tex2DArrayS32FloatLevel";
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ return "NVPTXISD::Tex2DArrayS32FloatGrad";
+ case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
+ case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ return "NVPTXISD::Tex2DArrayU32FloatLevel";
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ return "NVPTXISD::Tex2DArrayU32FloatGrad";
+ case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
+ case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ return "NVPTXISD::Tex3DFloatFloatLevel";
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ return "NVPTXISD::Tex3DFloatFloatGrad";
+ case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
+ case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
+ case NVPTXISD::Tex3DS32FloatLevel:
+ return "NVPTXISD::Tex3DS32FloatLevel";
+ case NVPTXISD::Tex3DS32FloatGrad:
+ return "NVPTXISD::Tex3DS32FloatGrad";
+ case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
+ case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
+ case NVPTXISD::Tex3DU32FloatLevel:
+ return "NVPTXISD::Tex3DU32FloatLevel";
+ case NVPTXISD::Tex3DU32FloatGrad:
+ return "NVPTXISD::Tex3DU32FloatGrad";
+ case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ return "NVPTXISD::TexCubeFloatFloatLevel";
+ case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
+ case NVPTXISD::TexCubeS32FloatLevel:
+ return "NVPTXISD::TexCubeS32FloatLevel";
+ case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
+ case NVPTXISD::TexCubeU32FloatLevel:
+ return "NVPTXISD::TexCubeU32FloatLevel";
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ return "NVPTXISD::TexCubeArrayFloatFloat";
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexCubeArrayS32Float:
+ return "NVPTXISD::TexCubeArrayS32Float";
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexCubeArrayS32FloatLevel";
+ case NVPTXISD::TexCubeArrayU32Float:
+ return "NVPTXISD::TexCubeArrayU32Float";
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4R2DFloatFloat:
+ return "NVPTXISD::Tld4R2DFloatFloat";
+ case NVPTXISD::Tld4G2DFloatFloat:
+ return "NVPTXISD::Tld4G2DFloatFloat";
+ case NVPTXISD::Tld4B2DFloatFloat:
+ return "NVPTXISD::Tld4B2DFloatFloat";
+ case NVPTXISD::Tld4A2DFloatFloat:
+ return "NVPTXISD::Tld4A2DFloatFloat";
+ case NVPTXISD::Tld4R2DS64Float:
+ return "NVPTXISD::Tld4R2DS64Float";
+ case NVPTXISD::Tld4G2DS64Float:
+ return "NVPTXISD::Tld4G2DS64Float";
+ case NVPTXISD::Tld4B2DS64Float:
+ return "NVPTXISD::Tld4B2DS64Float";
+ case NVPTXISD::Tld4A2DS64Float:
+ return "NVPTXISD::Tld4A2DS64Float";
+ case NVPTXISD::Tld4R2DU64Float:
+ return "NVPTXISD::Tld4R2DU64Float";
+ case NVPTXISD::Tld4G2DU64Float:
+ return "NVPTXISD::Tld4G2DU64Float";
+ case NVPTXISD::Tld4B2DU64Float:
+ return "NVPTXISD::Tld4B2DU64Float";
+ case NVPTXISD::Tld4A2DU64Float:
+ return "NVPTXISD::Tld4A2DU64Float";
+
+ case NVPTXISD::TexUnified1DFloatS32:
+ return "NVPTXISD::TexUnified1DFloatS32";
+ case NVPTXISD::TexUnified1DFloatFloat:
+ return "NVPTXISD::TexUnified1DFloatFloat";
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DFloatFloatLevel";
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DFloatFloatGrad";
+ case NVPTXISD::TexUnified1DS32S32:
+ return "NVPTXISD::TexUnified1DS32S32";
+ case NVPTXISD::TexUnified1DS32Float:
+ return "NVPTXISD::TexUnified1DS32Float";
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ return "NVPTXISD::TexUnified1DS32FloatLevel";
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ return "NVPTXISD::TexUnified1DS32FloatGrad";
+ case NVPTXISD::TexUnified1DU32S32:
+ return "NVPTXISD::TexUnified1DU32S32";
+ case NVPTXISD::TexUnified1DU32Float:
+ return "NVPTXISD::TexUnified1DU32Float";
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ return "NVPTXISD::TexUnified1DU32FloatLevel";
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ return "NVPTXISD::TexUnified1DU32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ return "NVPTXISD::TexUnified1DArrayFloatS32";
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ return "NVPTXISD::TexUnified1DArrayFloatFloat";
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ return "NVPTXISD::TexUnified1DArrayS32S32";
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ return "NVPTXISD::TexUnified1DArrayS32Float";
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ return "NVPTXISD::TexUnified1DArrayU32S32";
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ return "NVPTXISD::TexUnified1DArrayU32Float";
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified2DFloatS32:
+ return "NVPTXISD::TexUnified2DFloatS32";
+ case NVPTXISD::TexUnified2DFloatFloat:
+ return "NVPTXISD::TexUnified2DFloatFloat";
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DFloatFloatLevel";
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DFloatFloatGrad";
+ case NVPTXISD::TexUnified2DS32S32:
+ return "NVPTXISD::TexUnified2DS32S32";
+ case NVPTXISD::TexUnified2DS32Float:
+ return "NVPTXISD::TexUnified2DS32Float";
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ return "NVPTXISD::TexUnified2DS32FloatLevel";
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ return "NVPTXISD::TexUnified2DS32FloatGrad";
+ case NVPTXISD::TexUnified2DU32S32:
+ return "NVPTXISD::TexUnified2DU32S32";
+ case NVPTXISD::TexUnified2DU32Float:
+ return "NVPTXISD::TexUnified2DU32Float";
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ return "NVPTXISD::TexUnified2DU32FloatLevel";
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ return "NVPTXISD::TexUnified2DU32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ return "NVPTXISD::TexUnified2DArrayFloatS32";
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ return "NVPTXISD::TexUnified2DArrayFloatFloat";
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ return "NVPTXISD::TexUnified2DArrayS32S32";
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ return "NVPTXISD::TexUnified2DArrayS32Float";
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ return "NVPTXISD::TexUnified2DArrayU32S32";
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ return "NVPTXISD::TexUnified2DArrayU32Float";
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified3DFloatS32:
+ return "NVPTXISD::TexUnified3DFloatS32";
+ case NVPTXISD::TexUnified3DFloatFloat:
+ return "NVPTXISD::TexUnified3DFloatFloat";
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ return "NVPTXISD::TexUnified3DFloatFloatLevel";
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ return "NVPTXISD::TexUnified3DFloatFloatGrad";
+ case NVPTXISD::TexUnified3DS32S32:
+ return "NVPTXISD::TexUnified3DS32S32";
+ case NVPTXISD::TexUnified3DS32Float:
+ return "NVPTXISD::TexUnified3DS32Float";
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ return "NVPTXISD::TexUnified3DS32FloatLevel";
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ return "NVPTXISD::TexUnified3DS32FloatGrad";
+ case NVPTXISD::TexUnified3DU32S32:
+ return "NVPTXISD::TexUnified3DU32S32";
+ case NVPTXISD::TexUnified3DU32Float:
+ return "NVPTXISD::TexUnified3DU32Float";
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ return "NVPTXISD::TexUnified3DU32FloatLevel";
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ return "NVPTXISD::TexUnified3DU32FloatGrad";
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeFloatFloat";
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ return "NVPTXISD::TexUnifiedCubeS32Float";
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ return "NVPTXISD::TexUnifiedCubeU32Float";
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayS32Float";
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayU32Float";
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ return "NVPTXISD::Tld4UnifiedR2DS64Float";
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ return "NVPTXISD::Tld4UnifiedG2DS64Float";
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ return "NVPTXISD::Tld4UnifiedB2DS64Float";
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ return "NVPTXISD::Tld4UnifiedA2DS64Float";
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ return "NVPTXISD::Tld4UnifiedR2DU64Float";
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ return "NVPTXISD::Tld4UnifiedG2DU64Float";
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ return "NVPTXISD::Tld4UnifiedB2DU64Float";
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ return "NVPTXISD::Tld4UnifiedA2DU64Float";
+
+ case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
+ case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
+ case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
+ case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
+ case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
+ case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
+ case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
+ case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
+ case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
+ case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
+ case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
+
+ case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
+ case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
+ case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
+ case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
+ case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
+ case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
+ case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
+ case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
+ case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
+ case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
+ case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
+ case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
+ case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
+ case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
+ case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
+ case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
+ case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
+ case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
+ case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
+ case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
+ case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
+
+ case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
+ case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
+ case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
+ case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
+ case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
+ case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
+ case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
+ case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
+ case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
+ case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
+ case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
+ case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
+ case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
+ case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
+ case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
+ case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
+ case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
+ case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
+ case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
+ case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
+ case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
+
+ case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
+ case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
+ case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
+ case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
+ case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
+ case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
+ case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
+ case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
+ case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
+ case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
+ case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
+
+ case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
+ case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
+ case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
+ case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
+ case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
+ case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
+ case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
+ case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
+ case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
+ case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
+ case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
+
+ case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
+ case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
+ case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
+ case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
+ case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
+ case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
+ case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
+ case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
+ case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
+ case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
+ case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
+
+ case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
+ case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
+ case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
+ case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
+ case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
+ case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
+ case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
+ case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
+ case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
+ case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
+ case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
+
+ case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
+ case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
+ case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
+ case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
+ case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
+ case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
+ case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
+ case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
+ case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
+ case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
+ case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
+
+ case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
+ case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
+ case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
+ case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
+ case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
+ case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
+ case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
+ case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
+ case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
+ case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
+ case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
+
+ case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
+ case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
+ case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
+ case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
+ case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
+ case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
+ case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
+ case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
+ case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
+ case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
+ case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
+
+ case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
+ case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
+ case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
+ case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
+ case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
+ case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
+ case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
+ case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
+ case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
+ case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
+ case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
+
+ case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
+ case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
+ case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
+ case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
+ case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
+ case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
+ case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
+ case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
+ case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
+ case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
+ case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
+
+ case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
+ case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
+ case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
+ case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
+ case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
+ case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
+ case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
+ case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
+ case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
+ case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
+ case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
}
}
-bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const {
- return VT == MVT::i1;
+TargetLoweringBase::LegalizeTypeAction
+NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
+ return TypeSplitVector;
+
+ return TargetLoweringBase::getPreferredVectorAction(VT);
}
SDValue
@@ -361,7 +890,7 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
O << "()";
} else {
O << "(";
- if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
+ if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
unsigned size = 0;
if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
size = ITy->getBitWidth();
@@ -377,26 +906,12 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
} else if (isa<PointerType>(retTy)) {
O << ".param .b" << getPointerTy().getSizeInBits() << " _";
} else {
- if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, retTy, vtparts);
- unsigned totalsz = 0;
- for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
- unsigned elems = 1;
- EVT elemtype = vtparts[i];
- if (vtparts[i].isVector()) {
- elems = vtparts[i].getVectorNumElements();
- elemtype = vtparts[i].getVectorElementType();
- }
- // TODO: no need to loop
- for (unsigned j = 0, je = elems; j != je; ++j) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- totalsz += sz / 8;
- }
- }
- O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
+ if((retTy->getTypeID() == Type::StructTyID) ||
+ isa<VectorType>(retTy)) {
+ O << ".param .align "
+ << retAlignment
+ << " .b8 _["
+ << getDataLayout()->getTypeAllocSize(retTy) << "]";
} else {
assert(false && "Unknown return type");
}
@@ -526,7 +1041,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
- ArgListTy &Args = CLI.Args;
+ ArgListTy &Args = CLI.getArgs();
Type *retTy = CLI.RetTy;
ImmutableCallSite *CS = CLI.CS;
@@ -565,7 +1080,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Ty->isAggregateType()) {
// aggregate
SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, Ty, vtparts);
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, Ty, vtparts, &Offsets, 0);
unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
// declare .param .align <align> .b8 .param<n>[<size>];
@@ -575,36 +1091,28 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(paramCount, MVT::i32),
DAG.getConstant(sz, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps, 5);
+ DeclareParamOps);
InFlag = Chain.getValue(1);
- unsigned curOffset = 0;
for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
- unsigned elems = 1;
EVT elemtype = vtparts[j];
- if (vtparts[j].isVector()) {
- elems = vtparts[j].getVectorNumElements();
- elemtype = vtparts[j].getVectorElementType();
- }
- for (unsigned k = 0, ke = elems; k != ke; ++k) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- SDValue StVal = OutVals[OIdx];
- if (elemtype.getSizeInBits() < 16) {
- StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
- }
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(curOffset, MVT::i32),
- StVal, InFlag };
- Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
- CopyParamVTs, &CopyParamOps[0], 5,
- elemtype, MachinePointerInfo());
- InFlag = Chain.getValue(1);
- curOffset += sz / 8;
- ++OIdx;
+ unsigned ArgAlign = GreatestCommonDivisor64(align, Offsets[j]);
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
+ SDValue StVal = OutVals[OIdx];
+ if (elemtype.getSizeInBits() < 16) {
+ StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
}
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(Offsets[j], MVT::i32),
+ StVal, InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
+ CopyParamVTs, CopyParamOps,
+ elemtype, MachinePointerInfo(),
+ ArgAlign);
+ InFlag = Chain.getValue(1);
+ ++OIdx;
}
if (vtparts.size() > 0)
--OIdx;
@@ -621,7 +1129,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(paramCount, MVT::i32),
DAG.getConstant(sz, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps, 5);
+ DeclareParamOps);
InFlag = Chain.getValue(1);
unsigned NumElts = ObjectVT.getVectorNumElements();
EVT EltVT = ObjectVT.getVectorElementType();
@@ -644,7 +1152,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(0, MVT::i32), Elt,
InFlag };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
- CopyParamVTs, &CopyParamOps[0], 5,
+ CopyParamVTs, CopyParamOps,
MemVT, MachinePointerInfo());
InFlag = Chain.getValue(1);
} else if (NumElts == 2) {
@@ -661,7 +1169,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(0, MVT::i32), Elt0, Elt1,
InFlag };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
- CopyParamVTs, &CopyParamOps[0], 6,
+ CopyParamVTs, CopyParamOps,
MemVT, MachinePointerInfo());
InFlag = Chain.getValue(1);
} else {
@@ -735,9 +1243,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(InFlag);
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0],
- Ops.size(), MemVT,
- MachinePointerInfo());
+ Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, Ops,
+ MemVT, MachinePointerInfo());
InFlag = Chain.getValue(1);
curOffset += PerStoreOffset;
}
@@ -762,7 +1269,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(sz, MVT::i32),
DAG.getConstant(0, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
- DeclareParamOps, 5);
+ DeclareParamOps);
InFlag = Chain.getValue(1);
SDValue OutV = OutVals[OIdx];
if (needExtend) {
@@ -781,7 +1288,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
opcode = NVPTXISD::StoreParamU32;
else if (Outs[OIdx].Flags.isSExt())
opcode = NVPTXISD::StoreParamS32;
- Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5,
+ Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps,
VT, MachinePointerInfo());
InFlag = Chain.getValue(1);
@@ -790,13 +1297,15 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
// struct or vector
SmallVector<EVT, 16> vtparts;
+ SmallVector<uint64_t, 16> Offsets;
const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
assert(PTy && "Type of a byval parameter should be pointer");
- ComputeValueVTs(*this, PTy->getElementType(), vtparts);
+ ComputePTXValueVTs(*this, PTy->getElementType(), vtparts, &Offsets, 0);
// declare .param .align <align> .b8 .param<n>[<size>];
unsigned sz = Outs[OIdx].Flags.getByValSize();
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
// The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
// so we don't need to worry about natural alignment or not.
// See TargetLowering::LowerCallTo().
@@ -806,40 +1315,30 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InFlag
};
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps, 5);
+ DeclareParamOps);
InFlag = Chain.getValue(1);
- unsigned curOffset = 0;
for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
- unsigned elems = 1;
EVT elemtype = vtparts[j];
- if (vtparts[j].isVector()) {
- elems = vtparts[j].getVectorNumElements();
- elemtype = vtparts[j].getVectorElementType();
+ int curOffset = Offsets[j];
+ unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
+ SDValue srcAddr =
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
+ DAG.getConstant(curOffset, getPointerTy()));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), false, false, false,
+ PartAlign);
+ if (elemtype.getSizeInBits() < 16) {
+ theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
}
- for (unsigned k = 0, ke = elems; k != ke; ++k) {
- unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8))
- sz = 8;
- SDValue srcAddr =
- DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
- DAG.getConstant(curOffset, getPointerTy()));
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), false, false, false,
- 0);
- if (elemtype.getSizeInBits() < 16) {
- theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
- }
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(curOffset, MVT::i32), theVal,
- InFlag };
- Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
- CopyParamOps, 5, elemtype,
- MachinePointerInfo());
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(curOffset, MVT::i32), theVal,
+ InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, elemtype,
+ MachinePointerInfo());
- InFlag = Chain.getValue(1);
- curOffset += sz / 8;
- }
+ InFlag = Chain.getValue(1);
}
++paramCount;
}
@@ -856,8 +1355,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// .param .align 16 .b8 retval0[<size-in-bytes>], or
// .param .b<size-in-bits> retval0
unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
- if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
- retTy->isPointerTy()) {
+ if (retTy->isSingleValueType()) {
// Scalar needs to be at least 32bit wide
if (resultsz < 32)
resultsz = 32;
@@ -866,7 +1364,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(resultsz, MVT::i32),
DAG.getConstant(0, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
- DeclareRetOps, 5);
+ DeclareRetOps);
InFlag = Chain.getValue(1);
} else {
retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
@@ -876,7 +1374,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(resultsz / 8, MVT::i32),
DAG.getConstant(0, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
- DeclareRetOps, 5);
+ DeclareRetOps);
InFlag = Chain.getValue(1);
}
}
@@ -896,7 +1394,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue ProtoOps[] = {
Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
};
- Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, &ProtoOps[0], 3);
+ Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
InFlag = Chain.getValue(1);
}
// Op to just print "call"
@@ -905,20 +1403,20 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
};
Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
- dl, PrintCallVTs, PrintCallOps, 3);
+ dl, PrintCallVTs, PrintCallOps);
InFlag = Chain.getValue(1);
// Ops to print out the function name
SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CallVoidOps[] = { Chain, Callee, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
+ Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
InFlag = Chain.getValue(1);
// Ops to print out the param list
SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CallArgBeginOps[] = { Chain, InFlag };
Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
- CallArgBeginOps, 2);
+ CallArgBeginOps);
InFlag = Chain.getValue(1);
for (unsigned i = 0, e = paramCount; i != e; ++i) {
@@ -930,27 +1428,25 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
DAG.getConstant(i, MVT::i32), InFlag };
- Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
+ Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
InFlag = Chain.getValue(1);
}
SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
InFlag };
- Chain =
- DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, 3);
+ Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
InFlag = Chain.getValue(1);
if (!Func) {
SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
InFlag };
- Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
+ Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
InFlag = Chain.getValue(1);
}
// Generate loads from param memory/moves from registers for result
if (Ins.size() > 0) {
- unsigned resoffset = 0;
if (retTy && retTy->isVectorTy()) {
EVT ObjectVT = getValueType(retTy);
unsigned NumElts = ObjectVT.getVectorNumElements();
@@ -959,29 +1455,29 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
ObjectVT) == NumElts &&
"Vector was not scalarized");
unsigned sz = EltVT.getSizeInBits();
- bool needTruncate = sz < 16 ? true : false;
+ bool needTruncate = sz < 8 ? true : false;
if (NumElts == 1) {
// Just a simple load
- std::vector<EVT> LoadRetVTs;
- if (needTruncate) {
- // If loading i1 result, generate
- // load i16
+ SmallVector<EVT, 4> LoadRetVTs;
+ if (EltVT == MVT::i1 || EltVT == MVT::i8) {
+ // If loading i1/i8 result, generate
+ // load.b8 i16
+ // if i1
// trunc i16 to i1
LoadRetVTs.push_back(MVT::i16);
} else
LoadRetVTs.push_back(EltVT);
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- std::vector<SDValue> LoadRetOps;
+ SmallVector<SDValue, 4> LoadRetOps;
LoadRetOps.push_back(Chain);
LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
LoadRetOps.push_back(InFlag);
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
- DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
- LoadRetOps.size(), EltVT, MachinePointerInfo());
+ DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
Chain = retval.getValue(1);
InFlag = retval.getValue(2);
SDValue Ret0 = retval;
@@ -990,10 +1486,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InVals.push_back(Ret0);
} else if (NumElts == 2) {
// LoadV2
- std::vector<EVT> LoadRetVTs;
- if (needTruncate) {
- // If loading i1 result, generate
- // load i16
+ SmallVector<EVT, 4> LoadRetVTs;
+ if (EltVT == MVT::i1 || EltVT == MVT::i8) {
+ // If loading i1/i8 result, generate
+ // load.b8 i16
+ // if i1
// trunc i16 to i1
LoadRetVTs.push_back(MVT::i16);
LoadRetVTs.push_back(MVT::i16);
@@ -1003,15 +1500,14 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- std::vector<SDValue> LoadRetOps;
+ SmallVector<SDValue, 4> LoadRetOps;
LoadRetOps.push_back(Chain);
LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
LoadRetOps.push_back(InFlag);
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParamV2, dl,
- DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
- LoadRetOps.size(), EltVT, MachinePointerInfo());
+ DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
Chain = retval.getValue(2);
InFlag = retval.getValue(3);
SDValue Ret0 = retval.getValue(0);
@@ -1037,9 +1533,10 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
for (unsigned i = 0; i < NumElts; i += VecSize) {
SmallVector<EVT, 8> LoadRetVTs;
- if (needTruncate) {
- // If loading i1 result, generate
- // load i16
+ if (EltVT == MVT::i1 || EltVT == MVT::i8) {
+ // If loading i1/i8 result, generate
+ // load.b8 i16
+ // if i1
// trunc i16 to i1
for (unsigned j = 0; j < VecSize; ++j)
LoadRetVTs.push_back(MVT::i16);
@@ -1055,8 +1552,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
LoadRetOps.push_back(InFlag);
SDValue retval = DAG.getMemIntrinsicNode(
- Opc, dl, DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()),
- &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo());
+ Opc, dl, DAG.getVTList(LoadRetVTs),
+ LoadRetOps, EltVT, MachinePointerInfo());
if (VecSize == 2) {
Chain = retval.getValue(2);
InFlag = retval.getValue(3);
@@ -1078,10 +1575,13 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
} else {
SmallVector<EVT, 16> VTs;
- ComputePTXValueVTs(*this, retTy, VTs);
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, retTy, VTs, &Offsets, 0);
assert(VTs.size() == Ins.size() && "Bad value decomposition");
+ unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
unsigned sz = VTs[i].getSizeInBits();
+ unsigned AlignI = GreatestCommonDivisor64(RetAlign, Offsets[i]);
bool needTruncate = sz < 8 ? true : false;
if (VTs[i].isInteger() && (sz < 8))
sz = 8;
@@ -1107,19 +1607,18 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<SDValue, 4> LoadRetOps;
LoadRetOps.push_back(Chain);
LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
+ LoadRetOps.push_back(DAG.getConstant(Offsets[i], MVT::i32));
LoadRetOps.push_back(InFlag);
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
- DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
- LoadRetOps.size(), TheLoadType, MachinePointerInfo());
+ DAG.getVTList(LoadRetVTs), LoadRetOps,
+ TheLoadType, MachinePointerInfo(), AlignI);
Chain = retval.getValue(1);
InFlag = retval.getValue(2);
SDValue Ret0 = retval.getValue(0);
if (needTruncate)
Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
InVals.push_back(Ret0);
- resoffset += sz / 8;
}
}
}
@@ -1154,8 +1653,128 @@ NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(j)));
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), &Ops[0],
- Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
+}
+
+/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
+
+ if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
+
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // dHi = aHi >> Amt
+ // dLo = shf.r.clamp aLo, aHi, Amt
+
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // - if (Amt>=size) then
+ // dLo = aHi >> (Amt-size)
+ // dHi = aHi >> Amt (this is either all 0 or all 1)
+ // else
+ // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
+ // dHi = aHi >> Amt
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, MVT::i32), ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+/// LowerShiftLeftParts - Lower SHL_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SHL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+
+ if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
+
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // dHi = shf.l.clamp aLo, aHi, Amt
+ // dLo = aLo << Amt
+
+ SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // - if (Amt>=size) then
+ // dLo = aLo << Amt (all 0)
+ // dLo = aLo << (Amt-size)
+ // else
+ // dLo = aLo << Amt
+ // dHi = (aHi << Amt) | (aLo >> (size-Amt))
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, MVT::i32), ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
}
SDValue
@@ -1178,6 +1797,11 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerSTORE(Op, DAG);
case ISD::LOAD:
return LowerLOAD(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS:
+ return LowerShiftRightParts(Op, DAG);
default:
llvm_unreachable("Custom lowering not defined for operation");
}
@@ -1210,7 +1834,7 @@ SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
// load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
// in LegalizeDAG.cpp which also uses MergeValues.
SDValue Ops[] = { result, LD->getChain() };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
@@ -1253,13 +1877,28 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
break;
}
+ MemSDNode *MemSD = cast<MemSDNode>(N);
+ const DataLayout *TD = getDataLayout();
+
+ unsigned Align = MemSD->getAlignment();
+ unsigned PrefAlign =
+ TD->getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This store is not sufficiently aligned, so bail out and let this vector
+ // store be scalarized. Note that we may still be able to emit smaller
+ // vector stores. For example, if we are storing a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return SDValue();
+ }
+
unsigned Opcode = 0;
EVT EltVT = ValVT.getVectorElementType();
unsigned NumElts = ValVT.getVectorNumElements();
// Since StoreV2 is a target node, we cannot rely on DAG type legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // stored type to i16 and propogate the "real" type as the memory type.
+ // stored type to i16 and propagate the "real" type as the memory type.
bool NeedExt = false;
if (EltVT.getSizeInBits() < 16)
NeedExt = true;
@@ -1295,10 +1934,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
Ops.push_back(N->getOperand(i));
}
- MemSDNode *MemSD = cast<MemSDNode>(N);
-
SDValue NewSt = DAG.getMemIntrinsicNode(
- Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(),
+ Opcode, DL, DAG.getVTList(MVT::Other), Ops,
MemSD->getMemoryVT(), MemSD->getMemOperand());
//return DCI.CombineTo(N, NewSt, true);
@@ -1391,7 +2028,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
const Function *F = MF.getFunction();
const AttributeSet &PAL = F->getAttributes();
- const TargetLowering *TLI = nvTM->getTargetLowering();
+ const TargetLowering *TLI = DAG.getTarget().getTargetLowering();
SDValue Root = DAG.getRoot();
std::vector<SDValue> OutChains;
@@ -1430,7 +2067,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
if (isImageOrSamplerVal(
theArgs[i],
(theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
- : 0))) {
+ : nullptr))) {
assert(isKernel && "Only kernels can have image/sampler params");
InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
continue;
@@ -1445,8 +2082,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
assert(vtparts.size() > 0 && "empty aggregate type not expected");
for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
++parti) {
- EVT partVT = vtparts[parti];
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
++InsIdx;
}
if (vtparts.size() > 0)
@@ -1684,8 +2320,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
//}
if (!OutChains.empty())
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0],
- OutChains.size()));
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
return Chain;
}
@@ -1727,7 +2362,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
- DAG.getVTList(MVT::Other), &Ops[0], 3,
+ DAG.getVTList(MVT::Other), Ops,
EltVT, MachinePointerInfo());
} else if (NumElts == 2) {
@@ -1743,7 +2378,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
StoreVal1 };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
- DAG.getVTList(MVT::Other), &Ops[0], 4,
+ DAG.getVTList(MVT::Other), Ops,
EltVT, MachinePointerInfo());
} else {
// V4 stores
@@ -1763,7 +2398,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
unsigned Offset = 0;
EVT VecVT =
- EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
+ EVT::getVectorVT(F->getContext(), EltVT, VecSize);
unsigned PerStoreOffset =
TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
@@ -1815,19 +2450,17 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
Chain =
- DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0],
- Ops.size(), EltVT, MachinePointerInfo());
+ DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), Ops,
+ EltVT, MachinePointerInfo());
Offset += PerStoreOffset;
}
}
} else {
SmallVector<EVT, 16> ValVTs;
- // const_cast is necessary since we are still using an LLVM version from
- // before the type system re-write.
- ComputePTXValueVTs(*this, RetTy, ValVTs);
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, RetTy, ValVTs, &Offsets, 0);
assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
- unsigned SizeSoFar = 0;
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
SDValue theVal = OutVals[i];
EVT TheValType = theVal.getValueType();
@@ -1851,16 +2484,14 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
else if (TmpVal.getValueType().getSizeInBits() < 16)
TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
- SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
+ SDValue Ops[] = {
+ Chain,
+ DAG.getConstant(Offsets[i], MVT::i32),
+ TmpVal };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
- DAG.getVTList(MVT::Other), &Ops[0],
- 3, TheStoreType,
+ DAG.getVTList(MVT::Other), Ops,
+ TheStoreType,
MachinePointerInfo());
- if(TheValType.isVector())
- SizeSoFar +=
- TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
- else
- SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
}
}
}
@@ -1892,6 +2523,702 @@ bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
return false;
}
+static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ return NVPTXISD::Tex1DFloatS32;
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloat;
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ return NVPTXISD::Tex1DS32S32;
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ return NVPTXISD::Tex1DS32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ return NVPTXISD::Tex1DU32S32;
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ return NVPTXISD::Tex1DU32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ return NVPTXISD::Tex1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ return NVPTXISD::Tex1DArrayS32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ return NVPTXISD::Tex1DArrayU32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ return NVPTXISD::Tex2DFloatS32;
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloat;
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ return NVPTXISD::Tex2DS32S32;
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ return NVPTXISD::Tex2DS32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ return NVPTXISD::Tex2DU32S32;
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ return NVPTXISD::Tex2DU32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ return NVPTXISD::Tex2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ return NVPTXISD::Tex2DArrayS32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ return NVPTXISD::Tex2DArrayU32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ return NVPTXISD::Tex3DFloatS32;
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloat;
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ return NVPTXISD::Tex3DS32S32;
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ return NVPTXISD::Tex3DS32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ return NVPTXISD::Tex3DU32S32;
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ return NVPTXISD::Tex3DU32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloat;
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ return NVPTXISD::TexCubeS32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ return NVPTXISD::TexCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ return NVPTXISD::TexCubeU32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ return NVPTXISD::TexCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4R2DFloatFloat;
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4G2DFloatFloat;
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4B2DFloatFloat;
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4A2DFloatFloat;
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4R2DS64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4G2DS64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4B2DS64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4A2DS64Float;
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4R2DU64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4G2DU64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4B2DU64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4A2DU64Float;
+
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ return NVPTXISD::TexUnified1DFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ return NVPTXISD::TexUnified1DS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ return NVPTXISD::TexUnified1DU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ return NVPTXISD::TexUnified1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ return NVPTXISD::TexUnified1DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ return NVPTXISD::TexUnified1DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ return NVPTXISD::TexUnified2DFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ return NVPTXISD::TexUnified2DS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ return NVPTXISD::TexUnified2DU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ return NVPTXISD::TexUnified2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ return NVPTXISD::TexUnified2DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ return NVPTXISD::TexUnified2DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ return NVPTXISD::TexUnified3DFloatS32;
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ return NVPTXISD::TexUnified3DS32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ return NVPTXISD::TexUnified3DU32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedR2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedG2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedB2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedA2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedR2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedG2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedB2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedA2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedR2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedG2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedB2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedA2DU64Float;
+ }
+}
+
+static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ return NVPTXISD::Suld1DI8Clamp;
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ return NVPTXISD::Suld1DI16Clamp;
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ return NVPTXISD::Suld1DI32Clamp;
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ return NVPTXISD::Suld1DI64Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ return NVPTXISD::Suld1DV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ return NVPTXISD::Suld1DV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ return NVPTXISD::Suld1DV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ return NVPTXISD::Suld1DV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ return NVPTXISD::Suld1DV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ return NVPTXISD::Suld1DV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ return NVPTXISD::Suld1DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ return NVPTXISD::Suld1DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ return NVPTXISD::Suld1DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ return NVPTXISD::Suld1DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ return NVPTXISD::Suld1DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ return NVPTXISD::Suld1DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ return NVPTXISD::Suld1DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ return NVPTXISD::Suld1DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ return NVPTXISD::Suld1DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ return NVPTXISD::Suld1DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ return NVPTXISD::Suld1DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ return NVPTXISD::Suld1DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ return NVPTXISD::Suld2DI8Clamp;
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ return NVPTXISD::Suld2DI16Clamp;
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ return NVPTXISD::Suld2DI32Clamp;
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ return NVPTXISD::Suld2DI64Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ return NVPTXISD::Suld2DV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ return NVPTXISD::Suld2DV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ return NVPTXISD::Suld2DV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ return NVPTXISD::Suld2DV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ return NVPTXISD::Suld2DV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ return NVPTXISD::Suld2DV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ return NVPTXISD::Suld2DV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ return NVPTXISD::Suld2DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ return NVPTXISD::Suld2DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ return NVPTXISD::Suld2DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ return NVPTXISD::Suld2DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ return NVPTXISD::Suld2DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ return NVPTXISD::Suld2DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ return NVPTXISD::Suld2DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ return NVPTXISD::Suld2DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ return NVPTXISD::Suld2DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ return NVPTXISD::Suld2DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ return NVPTXISD::Suld2DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ return NVPTXISD::Suld3DI8Clamp;
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ return NVPTXISD::Suld3DI16Clamp;
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ return NVPTXISD::Suld3DI32Clamp;
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ return NVPTXISD::Suld3DI64Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ return NVPTXISD::Suld3DV2I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ return NVPTXISD::Suld3DV2I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ return NVPTXISD::Suld3DV2I32Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ return NVPTXISD::Suld3DV2I64Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ return NVPTXISD::Suld3DV4I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ return NVPTXISD::Suld3DV4I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ return NVPTXISD::Suld3DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ return NVPTXISD::Suld1DI8Trap;
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ return NVPTXISD::Suld1DI16Trap;
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ return NVPTXISD::Suld1DI32Trap;
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ return NVPTXISD::Suld1DI64Trap;
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ return NVPTXISD::Suld1DV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ return NVPTXISD::Suld1DV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ return NVPTXISD::Suld1DV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ return NVPTXISD::Suld1DV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ return NVPTXISD::Suld1DV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ return NVPTXISD::Suld1DV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ return NVPTXISD::Suld1DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ return NVPTXISD::Suld1DArrayI8Trap;
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ return NVPTXISD::Suld1DArrayI16Trap;
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ return NVPTXISD::Suld1DArrayI32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ return NVPTXISD::Suld1DArrayI64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ return NVPTXISD::Suld1DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ return NVPTXISD::Suld1DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ return NVPTXISD::Suld1DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ return NVPTXISD::Suld1DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ return NVPTXISD::Suld1DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ return NVPTXISD::Suld1DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ return NVPTXISD::Suld1DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ return NVPTXISD::Suld2DI8Trap;
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ return NVPTXISD::Suld2DI16Trap;
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ return NVPTXISD::Suld2DI32Trap;
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ return NVPTXISD::Suld2DI64Trap;
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ return NVPTXISD::Suld2DV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ return NVPTXISD::Suld2DV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ return NVPTXISD::Suld2DV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ return NVPTXISD::Suld2DV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ return NVPTXISD::Suld2DV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ return NVPTXISD::Suld2DV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ return NVPTXISD::Suld2DV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ return NVPTXISD::Suld2DArrayI8Trap;
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ return NVPTXISD::Suld2DArrayI16Trap;
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ return NVPTXISD::Suld2DArrayI32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ return NVPTXISD::Suld2DArrayI64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ return NVPTXISD::Suld2DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ return NVPTXISD::Suld2DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ return NVPTXISD::Suld2DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ return NVPTXISD::Suld2DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ return NVPTXISD::Suld2DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ return NVPTXISD::Suld2DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ return NVPTXISD::Suld2DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ return NVPTXISD::Suld3DI8Trap;
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ return NVPTXISD::Suld3DI16Trap;
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ return NVPTXISD::Suld3DI32Trap;
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ return NVPTXISD::Suld3DI64Trap;
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ return NVPTXISD::Suld3DV2I8Trap;
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ return NVPTXISD::Suld3DV2I16Trap;
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ return NVPTXISD::Suld3DV2I32Trap;
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ return NVPTXISD::Suld3DV2I64Trap;
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ return NVPTXISD::Suld3DV4I8Trap;
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ return NVPTXISD::Suld3DV4I16Trap;
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ return NVPTXISD::Suld3DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ return NVPTXISD::Suld1DI8Zero;
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ return NVPTXISD::Suld1DI16Zero;
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ return NVPTXISD::Suld1DI32Zero;
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ return NVPTXISD::Suld1DI64Zero;
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ return NVPTXISD::Suld1DV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ return NVPTXISD::Suld1DV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ return NVPTXISD::Suld1DV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ return NVPTXISD::Suld1DV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ return NVPTXISD::Suld1DV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ return NVPTXISD::Suld1DV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ return NVPTXISD::Suld1DV4I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ return NVPTXISD::Suld1DArrayI8Zero;
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ return NVPTXISD::Suld1DArrayI16Zero;
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ return NVPTXISD::Suld1DArrayI32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ return NVPTXISD::Suld1DArrayI64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ return NVPTXISD::Suld1DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ return NVPTXISD::Suld1DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ return NVPTXISD::Suld1DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ return NVPTXISD::Suld1DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ return NVPTXISD::Suld1DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ return NVPTXISD::Suld1DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ return NVPTXISD::Suld1DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ return NVPTXISD::Suld2DI8Zero;
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ return NVPTXISD::Suld2DI16Zero;
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ return NVPTXISD::Suld2DI32Zero;
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ return NVPTXISD::Suld2DI64Zero;
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ return NVPTXISD::Suld2DV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ return NVPTXISD::Suld2DV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ return NVPTXISD::Suld2DV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ return NVPTXISD::Suld2DV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ return NVPTXISD::Suld2DV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ return NVPTXISD::Suld2DV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ return NVPTXISD::Suld2DV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ return NVPTXISD::Suld2DArrayI8Zero;
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ return NVPTXISD::Suld2DArrayI16Zero;
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ return NVPTXISD::Suld2DArrayI32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ return NVPTXISD::Suld2DArrayI64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ return NVPTXISD::Suld2DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ return NVPTXISD::Suld2DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ return NVPTXISD::Suld2DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ return NVPTXISD::Suld2DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ return NVPTXISD::Suld2DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ return NVPTXISD::Suld2DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ return NVPTXISD::Suld2DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ return NVPTXISD::Suld3DI8Zero;
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ return NVPTXISD::Suld3DI16Zero;
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ return NVPTXISD::Suld3DI32Zero;
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ return NVPTXISD::Suld3DI64Zero;
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ return NVPTXISD::Suld3DV2I8Zero;
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ return NVPTXISD::Suld3DV2I16Zero;
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ return NVPTXISD::Suld3DV2I32Zero;
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ return NVPTXISD::Suld3DV2I64Zero;
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ return NVPTXISD::Suld3DV4I8Zero;
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ return NVPTXISD::Suld3DV4I16Zero;
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ return NVPTXISD::Suld3DV4I32Zero;
+ }
+}
+
// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
// TgtMemIntrinsic
// because we need the information that is only available in the "Value" type
@@ -1928,23 +3255,456 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic(
case Intrinsic::nvvm_ldu_global_i:
case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
+ case Intrinsic::nvvm_ldu_global_p: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
Info.memVT = getValueType(I.getType());
- else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ Info.memVT = getPointerTy();
+ else
Info.memVT = getValueType(I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+
+ // alignment is available as metadata.
+ // Grab it and set the alignment.
+ assert(I.hasMetadataOtherThanDebugLoc() && "Must have alignment metadata");
+ MDNode *AlignMD = I.getMetadata("align");
+ assert(AlignMD && "Must have a non-null MDNode");
+ assert(AlignMD->getNumOperands() == 1 && "Must have a single operand");
+ Value *Align = AlignMD->getOperand(0);
+ int64_t Alignment = cast<ConstantInt>(Align)->getZExtValue();
+ Info.align = Alignment;
+
+ return true;
+ }
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p: {
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
+ Info.memVT = getValueType(I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
+ Info.memVT = getPointerTy();
else
- Info.memVT = MVT::f32;
+ Info.memVT = getValueType(I.getType());
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.vol = 0;
Info.readMem = true;
Info.writeMem = false;
- Info.align = 0;
+
+ // alignment is available as metadata.
+ // Grab it and set the alignment.
+ assert(I.hasMetadataOtherThanDebugLoc() && "Must have alignment metadata");
+ MDNode *AlignMD = I.getMetadata("align");
+ assert(AlignMD && "Must have a non-null MDNode");
+ assert(AlignMD->getNumOperands() == 1 && "Must have a single operand");
+ Value *Align = AlignMD->getOperand(0);
+ int64_t Alignment = cast<ConstantInt>(Align)->getZExtValue();
+ Info.align = Alignment;
+
return true;
+ }
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32: {
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4f32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32: {
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ case Intrinsic::nvvm_suld_3d_v4i8_zero: {
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i8;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ case Intrinsic::nvvm_suld_3d_v4i16_zero: {
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i16;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ case Intrinsic::nvvm_suld_3d_v4i32_zero: {
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ case Intrinsic::nvvm_suld_3d_v2i64_zero: {
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i64;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = 0;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
}
return false;
}
@@ -1999,6 +3759,7 @@ NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
switch (Constraint[0]) {
default:
break;
+ case 'b':
case 'r':
case 'h':
case 'c':
@@ -2018,6 +3779,8 @@ NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
+ case 'b':
+ return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
case 'c':
return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
case 'h':
@@ -2041,8 +3804,434 @@ unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
return 4;
}
+//===----------------------------------------------------------------------===//
+// NVPTX DAG Combining
+//===----------------------------------------------------------------------===//
+
+bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
+ CodeGenOpt::Level OptLevel) const {
+ const Function *F = MF.getFunction();
+ const TargetOptions &TO = MF.getTarget().Options;
+
+ // Always honor command-line argument
+ if (FMAContractLevelOpt.getNumOccurrences() > 0) {
+ return FMAContractLevelOpt > 0;
+ } else if (OptLevel == 0) {
+ // Do not contract if we're not optimizing the code
+ return false;
+ } else if (TO.AllowFPOpFusion == FPOpFusion::Fast || TO.UnsafeFPMath) {
+ // Honor TargetOptions flags that explicitly say fusion is okay
+ return true;
+ } else if (F->hasFnAttribute("unsafe-fp-math")) {
+ // Check for unsafe-fp-math=true coming from Clang
+ Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ StringRef Val = Attr.getValueAsString();
+ if (Val == "true")
+ return true;
+ }
+
+ // We did not have a clear indication that fusion is allowed, so assume not
+ return false;
+}
+
+/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
+/// operands N0 and N1. This is a helper for PerformADDCombine that is
+/// called with the default operands, and if that fails, with commuted
+/// operands.
+static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SelectionDAG &DAG = DCI.DAG;
+ // Skip non-integer, non-scalar case
+ EVT VT=N0.getValueType();
+ if (VT.isVector())
+ return SDValue();
+
+ // fold (add (mul a, b), c) -> (mad a, b, c)
+ //
+ if (N0.getOpcode() == ISD::MUL) {
+ assert (VT.isInteger());
+ // For integer:
+ // Since integer multiply-add costs the same as integer multiply
+ // but is more costly than integer add, do the fusion only when
+ // the mul is only used in the add.
+ if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
+ !N0.getNode()->hasOneUse())
+ return SDValue();
+
+ // Do the folding
+ return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ else if (N0.getOpcode() == ISD::FMUL) {
+ if (VT == MVT::f32 || VT == MVT::f64) {
+ NVPTXTargetLowering *TLI =
+ (NVPTXTargetLowering *)&DAG.getTargetLoweringInfo();
+ if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
+ return SDValue();
+
+ // For floating point:
+ // Do the fusion only when the mul has less than 5 uses and all
+ // are add.
+ // The heuristic is that if a use is not an add, then that use
+ // cannot be fused into fma, therefore mul is still needed anyway.
+ // If there are more than 4 uses, even if they are all add, fusing
+ // them will increase register pressue.
+ //
+ int numUses = 0;
+ int nonAddCount = 0;
+ for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
+ UE = N0.getNode()->use_end();
+ UI != UE; ++UI) {
+ numUses++;
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::FADD)
+ ++nonAddCount;
+ }
+ if (numUses >= 5)
+ return SDValue();
+ if (nonAddCount) {
+ int orderNo = N->getIROrder();
+ int orderNo2 = N0.getNode()->getIROrder();
+ // simple heuristics here for considering potential register
+ // pressure, the logics here is that the differnce are used
+ // to measure the distance between def and use, the longer distance
+ // more likely cause register pressure.
+ if (orderNo - orderNo2 < 500)
+ return SDValue();
+
+ // Now, check if at least one of the FMUL's operands is live beyond the node N,
+ // which guarantees that the FMA will not increase register pressure at node N.
+ bool opIsLive = false;
+ const SDNode *left = N0.getOperand(0).getNode();
+ const SDNode *right = N0.getOperand(1).getNode();
+
+ if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right))
+ opIsLive = true;
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::FMA, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ }
+
+ return SDValue();
+}
+
+/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
+///
+static SDValue PerformADDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // First try with the default operand order.
+ SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget,
+ OptLevel);
+ if (Result.getNode())
+ return Result;
+
+ // If that didn't work, try again with the operands commuted.
+ return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
+}
+
+static SDValue PerformANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // The type legalizer turns a vector load of i8 values into a zextload to i16
+ // registers, optionally ANY_EXTENDs it (if target type is integer),
+ // and ANDs off the high 8 bits. Since we turn this load into a
+ // target-specific DAG node, the DAG combiner fails to eliminate these AND
+ // nodes. Do that here.
+ SDValue Val = N->getOperand(0);
+ SDValue Mask = N->getOperand(1);
+
+ if (isa<ConstantSDNode>(Val)) {
+ std::swap(Val, Mask);
+ }
+
+ SDValue AExt;
+ // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
+ if (Val.getOpcode() == ISD::ANY_EXTEND) {
+ AExt = Val;
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->getOpcode() == NVPTXISD::LoadV2 ||
+ Val->getOpcode() == NVPTXISD::LoadV4) {
+ ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
+ if (!MaskCnst) {
+ // Not an AND with a constant
+ return SDValue();
+ }
+
+ uint64_t MaskVal = MaskCnst->getZExtValue();
+ if (MaskVal != 0xff) {
+ // Not an AND that chops off top 8 bits
+ return SDValue();
+ }
+
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
+ if (!Mem) {
+ // Not a MemSDNode?!?
+ return SDValue();
+ }
+
+ EVT MemVT = Mem->getMemoryVT();
+ if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
+ // We only handle the i8 case
+ return SDValue();
+ }
+
+ unsigned ExtType =
+ cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
+ getZExtValue();
+ if (ExtType == ISD::SEXTLOAD) {
+ // If for some reason the load is a sextload, the and is needed to zero
+ // out the high 8 bits
+ return SDValue();
+ }
+
+ bool AddTo = false;
+ if (AExt.getNode() != 0) {
+ // Re-insert the ext as a zext.
+ Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
+ AExt.getValueType(), Val);
+ AddTo = true;
+ }
+
+ // If we get here, the AND is unnecessary. Just replace it with the load
+ DCI.CombineTo(N, Val, AddTo);
+ }
+
+ return SDValue();
+}
+
+enum OperandSignedness {
+ Signed = 0,
+ Unsigned,
+ Unknown
+};
+
+/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
+/// that can be demoted to \p OptSize bits without loss of information. The
+/// signedness of the operand, if determinable, is placed in \p S.
+static bool IsMulWideOperandDemotable(SDValue Op,
+ unsigned OptSize,
+ OperandSignedness &S) {
+ S = Unknown;
+
+ if (Op.getOpcode() == ISD::SIGN_EXTEND ||
+ Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() == OptSize) {
+ S = Signed;
+ return true;
+ }
+ } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() == OptSize) {
+ S = Unsigned;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
+/// be demoted to \p OptSize bits without loss of information. If the operands
+/// contain a constant, it should appear as the RHS operand. The signedness of
+/// the operands is placed in \p IsSigned.
+static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
+ unsigned OptSize,
+ bool &IsSigned) {
+
+ OperandSignedness LHSSign;
+
+ // The LHS operand must be a demotable op
+ if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
+ return false;
+
+ // We should have been able to determine the signedness from the LHS
+ if (LHSSign == Unknown)
+ return false;
+
+ IsSigned = (LHSSign == Signed);
+
+ // The RHS can be a demotable op or a constant
+ if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
+ APInt Val = CI->getAPIntValue();
+ if (LHSSign == Unsigned) {
+ if (Val.isIntN(OptSize)) {
+ return true;
+ }
+ return false;
+ } else {
+ if (Val.isSignedIntN(OptSize)) {
+ return true;
+ }
+ return false;
+ }
+ } else {
+ OperandSignedness RHSSign;
+ if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
+ return false;
+
+ if (LHSSign != RHSSign)
+ return false;
+
+ return true;
+ }
+}
+
+/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
+/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
+/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
+/// amount.
+static SDValue TryMULWIDECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT MulType = N->getValueType(0);
+ if (MulType != MVT::i32 && MulType != MVT::i64) {
+ return SDValue();
+ }
+
+ unsigned OptSize = MulType.getSizeInBits() >> 1;
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // Canonicalize the multiply so the constant (if any) is on the right
+ if (N->getOpcode() == ISD::MUL) {
+ if (isa<ConstantSDNode>(LHS)) {
+ std::swap(LHS, RHS);
+ }
+ }
+
+ // If we have a SHL, determine the actual multiply amount
+ if (N->getOpcode() == ISD::SHL) {
+ ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
+ if (!ShlRHS) {
+ return SDValue();
+ }
+
+ APInt ShiftAmt = ShlRHS->getAPIntValue();
+ unsigned BitWidth = MulType.getSizeInBits();
+ if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
+ APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
+ RHS = DCI.DAG.getConstant(MulVal, MulType);
+ } else {
+ return SDValue();
+ }
+ }
+
+ bool Signed;
+ // Verify that our operands are demotable
+ if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
+ return SDValue();
+ }
+
+ EVT DemotedVT;
+ if (MulType == MVT::i32) {
+ DemotedVT = MVT::i16;
+ } else {
+ DemotedVT = MVT::i32;
+ }
+
+ // Truncate the operands to the correct size. Note that these are just for
+ // type consistency and will (likely) be eliminated in later phases.
+ SDValue TruncLHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, LHS);
+ SDValue TruncRHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, RHS);
+
+ unsigned Opc;
+ if (Signed) {
+ Opc = NVPTXISD::MUL_WIDE_SIGNED;
+ } else {
+ Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
+ }
+
+ return DCI.DAG.getNode(Opc, SDLoc(N), MulType, TruncLHS, TruncRHS);
+}
+
+/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
+static SDValue PerformMULCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ SDValue Ret = TryMULWIDECombine(N, DCI);
+ if (Ret.getNode())
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
+static SDValue PerformSHLCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ SDValue Ret = TryMULWIDECombine(N, DCI);
+ if (Ret.getNode())
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::FADD:
+ return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
+ case ISD::MUL:
+ return PerformMULCombine(N, DCI, OptLevel);
+ case ISD::SHL:
+ return PerformSHLCombine(N, DCI, OptLevel);
+ case ISD::AND:
+ return PerformANDCombine(N, DCI);
+ }
+ return SDValue();
+}
+
/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
+ const DataLayout *TD,
SmallVectorImpl<SDValue> &Results) {
EVT ResVT = N->getValueType(0);
SDLoc DL(N);
@@ -2070,12 +4259,26 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
break;
}
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+
+ unsigned Align = LD->getAlignment();
+ unsigned PrefAlign =
+ TD->getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This load is not sufficiently aligned, so bail out and let this vector
+ // load be scalarized. Note that we may still be able to emit smaller
+ // vector loads. For example, if we are loading a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return;
+ }
+
EVT EltVT = ResVT.getVectorElementType();
unsigned NumElts = ResVT.getVectorNumElements();
// Since LoadV2 is a target node, we cannot rely on DAG type legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propogate the "real" type as the memory type.
+ // loaded type to i16 and propagate the "real" type as the memory type.
bool NeedTrunc = false;
if (EltVT.getSizeInBits() < 16) {
EltVT = MVT::i16;
@@ -2095,7 +4298,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
case 4: {
Opcode = NVPTXISD::LoadV4;
EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs, 5);
+ LdResVTs = DAG.getVTList(ListVTs);
break;
}
}
@@ -2106,14 +4309,12 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
OtherOps.push_back(N->getOperand(i));
- LoadSDNode *LD = cast<LoadSDNode>(N);
-
// The select routine does not have access to the LoadSDNode instance, so
// pass along the extension information
OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
- OtherOps.size(), LD->getMemoryVT(),
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ LD->getMemoryVT(),
LD->getMemOperand());
SmallVector<SDValue, 4> ScalarRes;
@@ -2127,8 +4328,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
- SDValue BuildVec =
- DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
+ SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
@@ -2162,7 +4362,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
// Since LDU/LDG are target nodes, we cannot rely on DAG type
// legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propogate the "real" type as the memory type.
+ // loaded type to i16 and propagate the "real" type as the memory type.
bool NeedTrunc = false;
if (EltVT.getSizeInBits() < 16) {
EltVT = MVT::i16;
@@ -2208,7 +4408,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
break;
}
EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs, 5);
+ LdResVTs = DAG.getVTList(ListVTs);
break;
}
}
@@ -2225,9 +4425,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
- SDValue NewLD = DAG.getMemIntrinsicNode(
- Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(),
- MemSD->getMemoryVT(), MemSD->getMemOperand());
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ MemSD->getMemoryVT(),
+ MemSD->getMemOperand());
SmallVector<SDValue, 4> ScalarRes;
@@ -2242,7 +4442,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
SDValue BuildVec =
- DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
+ DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
@@ -2264,8 +4464,8 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
// We make sure the memory type is i8, which will be used during isel
// to select the proper instruction.
SDValue NewLD =
- DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
- Ops.size(), MVT::i8, MemSD->getMemOperand());
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
+ MVT::i8, MemSD->getMemOperand());
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
NewLD.getValue(0)));
@@ -2281,7 +4481,7 @@ void NVPTXTargetLowering::ReplaceNodeResults(
default:
report_fatal_error("Unhandled custom legalization");
case ISD::LOAD:
- ReplaceLoadVector(N, DAG, Results);
+ ReplaceLoadVector(N, DAG, getDataLayout(), Results);
return;
case ISD::INTRINSIC_W_CHAIN:
ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index 66e708f..bef6ed9 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -16,7 +16,6 @@
#define NVPTXISELLOWERING_H
#include "NVPTX.h"
-#include "NVPTXSubtarget.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
@@ -50,6 +49,11 @@ enum NodeType {
CallSeqBegin,
CallSeqEnd,
CallPrototype,
+ FUN_SHFL_CLAMP,
+ FUN_SHFR_CLAMP,
+ MUL_WIDE_SIGNED,
+ MUL_WIDE_UNSIGNED,
+ IMAD,
Dummy,
LoadV2 = ISD::FIRST_TARGET_MEMORY_OPCODE,
@@ -70,78 +74,440 @@ enum NodeType {
StoreParamU32, // to zext and store a <32bit value, not used currently
StoreRetval,
StoreRetvalV2,
- StoreRetvalV4
+ StoreRetvalV4,
+
+ // Texture intrinsics
+ Tex1DFloatS32,
+ Tex1DFloatFloat,
+ Tex1DFloatFloatLevel,
+ Tex1DFloatFloatGrad,
+ Tex1DS32S32,
+ Tex1DS32Float,
+ Tex1DS32FloatLevel,
+ Tex1DS32FloatGrad,
+ Tex1DU32S32,
+ Tex1DU32Float,
+ Tex1DU32FloatLevel,
+ Tex1DU32FloatGrad,
+ Tex1DArrayFloatS32,
+ Tex1DArrayFloatFloat,
+ Tex1DArrayFloatFloatLevel,
+ Tex1DArrayFloatFloatGrad,
+ Tex1DArrayS32S32,
+ Tex1DArrayS32Float,
+ Tex1DArrayS32FloatLevel,
+ Tex1DArrayS32FloatGrad,
+ Tex1DArrayU32S32,
+ Tex1DArrayU32Float,
+ Tex1DArrayU32FloatLevel,
+ Tex1DArrayU32FloatGrad,
+ Tex2DFloatS32,
+ Tex2DFloatFloat,
+ Tex2DFloatFloatLevel,
+ Tex2DFloatFloatGrad,
+ Tex2DS32S32,
+ Tex2DS32Float,
+ Tex2DS32FloatLevel,
+ Tex2DS32FloatGrad,
+ Tex2DU32S32,
+ Tex2DU32Float,
+ Tex2DU32FloatLevel,
+ Tex2DU32FloatGrad,
+ Tex2DArrayFloatS32,
+ Tex2DArrayFloatFloat,
+ Tex2DArrayFloatFloatLevel,
+ Tex2DArrayFloatFloatGrad,
+ Tex2DArrayS32S32,
+ Tex2DArrayS32Float,
+ Tex2DArrayS32FloatLevel,
+ Tex2DArrayS32FloatGrad,
+ Tex2DArrayU32S32,
+ Tex2DArrayU32Float,
+ Tex2DArrayU32FloatLevel,
+ Tex2DArrayU32FloatGrad,
+ Tex3DFloatS32,
+ Tex3DFloatFloat,
+ Tex3DFloatFloatLevel,
+ Tex3DFloatFloatGrad,
+ Tex3DS32S32,
+ Tex3DS32Float,
+ Tex3DS32FloatLevel,
+ Tex3DS32FloatGrad,
+ Tex3DU32S32,
+ Tex3DU32Float,
+ Tex3DU32FloatLevel,
+ Tex3DU32FloatGrad,
+ TexCubeFloatFloat,
+ TexCubeFloatFloatLevel,
+ TexCubeS32Float,
+ TexCubeS32FloatLevel,
+ TexCubeU32Float,
+ TexCubeU32FloatLevel,
+ TexCubeArrayFloatFloat,
+ TexCubeArrayFloatFloatLevel,
+ TexCubeArrayS32Float,
+ TexCubeArrayS32FloatLevel,
+ TexCubeArrayU32Float,
+ TexCubeArrayU32FloatLevel,
+ Tld4R2DFloatFloat,
+ Tld4G2DFloatFloat,
+ Tld4B2DFloatFloat,
+ Tld4A2DFloatFloat,
+ Tld4R2DS64Float,
+ Tld4G2DS64Float,
+ Tld4B2DS64Float,
+ Tld4A2DS64Float,
+ Tld4R2DU64Float,
+ Tld4G2DU64Float,
+ Tld4B2DU64Float,
+ Tld4A2DU64Float,
+ TexUnified1DFloatS32,
+ TexUnified1DFloatFloat,
+ TexUnified1DFloatFloatLevel,
+ TexUnified1DFloatFloatGrad,
+ TexUnified1DS32S32,
+ TexUnified1DS32Float,
+ TexUnified1DS32FloatLevel,
+ TexUnified1DS32FloatGrad,
+ TexUnified1DU32S32,
+ TexUnified1DU32Float,
+ TexUnified1DU32FloatLevel,
+ TexUnified1DU32FloatGrad,
+ TexUnified1DArrayFloatS32,
+ TexUnified1DArrayFloatFloat,
+ TexUnified1DArrayFloatFloatLevel,
+ TexUnified1DArrayFloatFloatGrad,
+ TexUnified1DArrayS32S32,
+ TexUnified1DArrayS32Float,
+ TexUnified1DArrayS32FloatLevel,
+ TexUnified1DArrayS32FloatGrad,
+ TexUnified1DArrayU32S32,
+ TexUnified1DArrayU32Float,
+ TexUnified1DArrayU32FloatLevel,
+ TexUnified1DArrayU32FloatGrad,
+ TexUnified2DFloatS32,
+ TexUnified2DFloatFloat,
+ TexUnified2DFloatFloatLevel,
+ TexUnified2DFloatFloatGrad,
+ TexUnified2DS32S32,
+ TexUnified2DS32Float,
+ TexUnified2DS32FloatLevel,
+ TexUnified2DS32FloatGrad,
+ TexUnified2DU32S32,
+ TexUnified2DU32Float,
+ TexUnified2DU32FloatLevel,
+ TexUnified2DU32FloatGrad,
+ TexUnified2DArrayFloatS32,
+ TexUnified2DArrayFloatFloat,
+ TexUnified2DArrayFloatFloatLevel,
+ TexUnified2DArrayFloatFloatGrad,
+ TexUnified2DArrayS32S32,
+ TexUnified2DArrayS32Float,
+ TexUnified2DArrayS32FloatLevel,
+ TexUnified2DArrayS32FloatGrad,
+ TexUnified2DArrayU32S32,
+ TexUnified2DArrayU32Float,
+ TexUnified2DArrayU32FloatLevel,
+ TexUnified2DArrayU32FloatGrad,
+ TexUnified3DFloatS32,
+ TexUnified3DFloatFloat,
+ TexUnified3DFloatFloatLevel,
+ TexUnified3DFloatFloatGrad,
+ TexUnified3DS32S32,
+ TexUnified3DS32Float,
+ TexUnified3DS32FloatLevel,
+ TexUnified3DS32FloatGrad,
+ TexUnified3DU32S32,
+ TexUnified3DU32Float,
+ TexUnified3DU32FloatLevel,
+ TexUnified3DU32FloatGrad,
+ TexUnifiedCubeFloatFloat,
+ TexUnifiedCubeFloatFloatLevel,
+ TexUnifiedCubeS32Float,
+ TexUnifiedCubeS32FloatLevel,
+ TexUnifiedCubeU32Float,
+ TexUnifiedCubeU32FloatLevel,
+ TexUnifiedCubeArrayFloatFloat,
+ TexUnifiedCubeArrayFloatFloatLevel,
+ TexUnifiedCubeArrayS32Float,
+ TexUnifiedCubeArrayS32FloatLevel,
+ TexUnifiedCubeArrayU32Float,
+ TexUnifiedCubeArrayU32FloatLevel,
+ Tld4UnifiedR2DFloatFloat,
+ Tld4UnifiedG2DFloatFloat,
+ Tld4UnifiedB2DFloatFloat,
+ Tld4UnifiedA2DFloatFloat,
+ Tld4UnifiedR2DS64Float,
+ Tld4UnifiedG2DS64Float,
+ Tld4UnifiedB2DS64Float,
+ Tld4UnifiedA2DS64Float,
+ Tld4UnifiedR2DU64Float,
+ Tld4UnifiedG2DU64Float,
+ Tld4UnifiedB2DU64Float,
+ Tld4UnifiedA2DU64Float,
+
+ // Surface intrinsics
+ Suld1DI8Clamp,
+ Suld1DI16Clamp,
+ Suld1DI32Clamp,
+ Suld1DI64Clamp,
+ Suld1DV2I8Clamp,
+ Suld1DV2I16Clamp,
+ Suld1DV2I32Clamp,
+ Suld1DV2I64Clamp,
+ Suld1DV4I8Clamp,
+ Suld1DV4I16Clamp,
+ Suld1DV4I32Clamp,
+
+ Suld1DArrayI8Clamp,
+ Suld1DArrayI16Clamp,
+ Suld1DArrayI32Clamp,
+ Suld1DArrayI64Clamp,
+ Suld1DArrayV2I8Clamp,
+ Suld1DArrayV2I16Clamp,
+ Suld1DArrayV2I32Clamp,
+ Suld1DArrayV2I64Clamp,
+ Suld1DArrayV4I8Clamp,
+ Suld1DArrayV4I16Clamp,
+ Suld1DArrayV4I32Clamp,
+
+ Suld2DI8Clamp,
+ Suld2DI16Clamp,
+ Suld2DI32Clamp,
+ Suld2DI64Clamp,
+ Suld2DV2I8Clamp,
+ Suld2DV2I16Clamp,
+ Suld2DV2I32Clamp,
+ Suld2DV2I64Clamp,
+ Suld2DV4I8Clamp,
+ Suld2DV4I16Clamp,
+ Suld2DV4I32Clamp,
+
+ Suld2DArrayI8Clamp,
+ Suld2DArrayI16Clamp,
+ Suld2DArrayI32Clamp,
+ Suld2DArrayI64Clamp,
+ Suld2DArrayV2I8Clamp,
+ Suld2DArrayV2I16Clamp,
+ Suld2DArrayV2I32Clamp,
+ Suld2DArrayV2I64Clamp,
+ Suld2DArrayV4I8Clamp,
+ Suld2DArrayV4I16Clamp,
+ Suld2DArrayV4I32Clamp,
+
+ Suld3DI8Clamp,
+ Suld3DI16Clamp,
+ Suld3DI32Clamp,
+ Suld3DI64Clamp,
+ Suld3DV2I8Clamp,
+ Suld3DV2I16Clamp,
+ Suld3DV2I32Clamp,
+ Suld3DV2I64Clamp,
+ Suld3DV4I8Clamp,
+ Suld3DV4I16Clamp,
+ Suld3DV4I32Clamp,
+
+ Suld1DI8Trap,
+ Suld1DI16Trap,
+ Suld1DI32Trap,
+ Suld1DI64Trap,
+ Suld1DV2I8Trap,
+ Suld1DV2I16Trap,
+ Suld1DV2I32Trap,
+ Suld1DV2I64Trap,
+ Suld1DV4I8Trap,
+ Suld1DV4I16Trap,
+ Suld1DV4I32Trap,
+
+ Suld1DArrayI8Trap,
+ Suld1DArrayI16Trap,
+ Suld1DArrayI32Trap,
+ Suld1DArrayI64Trap,
+ Suld1DArrayV2I8Trap,
+ Suld1DArrayV2I16Trap,
+ Suld1DArrayV2I32Trap,
+ Suld1DArrayV2I64Trap,
+ Suld1DArrayV4I8Trap,
+ Suld1DArrayV4I16Trap,
+ Suld1DArrayV4I32Trap,
+
+ Suld2DI8Trap,
+ Suld2DI16Trap,
+ Suld2DI32Trap,
+ Suld2DI64Trap,
+ Suld2DV2I8Trap,
+ Suld2DV2I16Trap,
+ Suld2DV2I32Trap,
+ Suld2DV2I64Trap,
+ Suld2DV4I8Trap,
+ Suld2DV4I16Trap,
+ Suld2DV4I32Trap,
+
+ Suld2DArrayI8Trap,
+ Suld2DArrayI16Trap,
+ Suld2DArrayI32Trap,
+ Suld2DArrayI64Trap,
+ Suld2DArrayV2I8Trap,
+ Suld2DArrayV2I16Trap,
+ Suld2DArrayV2I32Trap,
+ Suld2DArrayV2I64Trap,
+ Suld2DArrayV4I8Trap,
+ Suld2DArrayV4I16Trap,
+ Suld2DArrayV4I32Trap,
+
+ Suld3DI8Trap,
+ Suld3DI16Trap,
+ Suld3DI32Trap,
+ Suld3DI64Trap,
+ Suld3DV2I8Trap,
+ Suld3DV2I16Trap,
+ Suld3DV2I32Trap,
+ Suld3DV2I64Trap,
+ Suld3DV4I8Trap,
+ Suld3DV4I16Trap,
+ Suld3DV4I32Trap,
+
+ Suld1DI8Zero,
+ Suld1DI16Zero,
+ Suld1DI32Zero,
+ Suld1DI64Zero,
+ Suld1DV2I8Zero,
+ Suld1DV2I16Zero,
+ Suld1DV2I32Zero,
+ Suld1DV2I64Zero,
+ Suld1DV4I8Zero,
+ Suld1DV4I16Zero,
+ Suld1DV4I32Zero,
+
+ Suld1DArrayI8Zero,
+ Suld1DArrayI16Zero,
+ Suld1DArrayI32Zero,
+ Suld1DArrayI64Zero,
+ Suld1DArrayV2I8Zero,
+ Suld1DArrayV2I16Zero,
+ Suld1DArrayV2I32Zero,
+ Suld1DArrayV2I64Zero,
+ Suld1DArrayV4I8Zero,
+ Suld1DArrayV4I16Zero,
+ Suld1DArrayV4I32Zero,
+
+ Suld2DI8Zero,
+ Suld2DI16Zero,
+ Suld2DI32Zero,
+ Suld2DI64Zero,
+ Suld2DV2I8Zero,
+ Suld2DV2I16Zero,
+ Suld2DV2I32Zero,
+ Suld2DV2I64Zero,
+ Suld2DV4I8Zero,
+ Suld2DV4I16Zero,
+ Suld2DV4I32Zero,
+
+ Suld2DArrayI8Zero,
+ Suld2DArrayI16Zero,
+ Suld2DArrayI32Zero,
+ Suld2DArrayI64Zero,
+ Suld2DArrayV2I8Zero,
+ Suld2DArrayV2I16Zero,
+ Suld2DArrayV2I32Zero,
+ Suld2DArrayV2I64Zero,
+ Suld2DArrayV4I8Zero,
+ Suld2DArrayV4I16Zero,
+ Suld2DArrayV4I32Zero,
+
+ Suld3DI8Zero,
+ Suld3DI16Zero,
+ Suld3DI32Zero,
+ Suld3DI64Zero,
+ Suld3DV2I8Zero,
+ Suld3DV2I16Zero,
+ Suld3DV2I32Zero,
+ Suld3DV2I64Zero,
+ Suld3DV4I8Zero,
+ Suld3DV4I16Zero,
+ Suld3DV4I32Zero
};
}
+class NVPTXSubtarget;
+
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
//===--------------------------------------------------------------------===//
class NVPTXTargetLowering : public TargetLowering {
public:
explicit NVPTXTargetLowering(NVPTXTargetMachine &TM);
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(const GlobalValue *GV, int64_t Offset,
SelectionDAG &DAG) const;
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
bool isTypeSupportedInIntrinsic(MVT VT) const;
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
- unsigned Intrinsic) const;
+ unsigned Intrinsic) const override;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type
/// Used to guide target specific optimizations, like loop strength
/// reduction (LoopStrengthReduce.cpp) and memory optimization for
/// address mode (CodeGenPrepare.cpp)
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
/// getFunctionAlignment - Return the Log2 alignment of this function.
- virtual unsigned getFunctionAlignment(const Function *F) const;
+ unsigned getFunctionAlignment(const Function *F) const;
- virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const {
+ EVT getSetCCResultType(LLVMContext &Ctx, EVT VT) const override {
if (VT.isVector())
- return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
+ return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
return MVT::i1;
}
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT VT) const override;
- virtual SDValue LowerFormalArguments(
+ SDValue LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual SDValue
- LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
std::string getPrototype(Type *, const ArgListTy &,
const SmallVectorImpl<ISD::OutputArg> &,
unsigned retAlignment,
const ImmutableCallSite *CS) const;
- virtual SDValue
+ SDValue
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
- SelectionDAG &DAG) const;
+ SelectionDAG &DAG) const override;
- virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
NVPTXTargetMachine *nvTM;
// PTX always uses 32-bit shift amounts
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
+
+ TargetLoweringBase::LegalizeTypeAction
+ getPreferredVectorAction(EVT VT) const override;
- virtual bool shouldSplitVectorElementType(EVT VT) const;
+ bool allowFMA(MachineFunction &MF, CodeGenOpt::Level OptLevel) const;
+
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
+ return true;
+ }
private:
const NVPTXSubtarget &nvptxSubtarget; // cache the subtarget here
@@ -160,8 +526,12 @@ private:
SDValue LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const;
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
+ SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
unsigned getArgumentAlignment(SDValue Callee, const ImmutableCallSite *CS,
Type *Ty, unsigned Idx) const;
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
new file mode 100644
index 0000000..a98fb37
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
@@ -0,0 +1,178 @@
+//===-- NVPTXImageOptimizer.cpp - Image optimization pass -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements IR-level optimizations of image access code,
+// including:
+//
+// 1. Eliminate istypep intrinsics when image access qualifier is known
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "NVPTXUtilities.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/ConstantFolding.h"
+
+using namespace llvm;
+
+namespace {
+class NVPTXImageOptimizer : public FunctionPass {
+private:
+ static char ID;
+ SmallVector<Instruction*, 4> InstrToDelete;
+
+public:
+ NVPTXImageOptimizer();
+
+ bool runOnFunction(Function &F) override;
+
+private:
+ bool replaceIsTypePSampler(Instruction &I);
+ bool replaceIsTypePSurface(Instruction &I);
+ bool replaceIsTypePTexture(Instruction &I);
+ Value *cleanupValue(Value *V);
+ void replaceWith(Instruction *From, ConstantInt *To);
+};
+}
+
+char NVPTXImageOptimizer::ID = 0;
+
+NVPTXImageOptimizer::NVPTXImageOptimizer()
+ : FunctionPass(ID) {}
+
+bool NVPTXImageOptimizer::runOnFunction(Function &F) {
+ bool Changed = false;
+ InstrToDelete.clear();
+
+ // Look for call instructions in the function
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE;
+ ++BI) {
+ for (BasicBlock::iterator I = (*BI).begin(), E = (*BI).end();
+ I != E; ++I) {
+ Instruction &Instr = *I;
+ if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ Function *CalledF = CI->getCalledFunction();
+ if (CalledF && CalledF->isIntrinsic()) {
+ // This is an intrinsic function call, check if its an istypep
+ switch (CalledF->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::nvvm_istypep_sampler:
+ Changed |= replaceIsTypePSampler(Instr);
+ break;
+ case Intrinsic::nvvm_istypep_surface:
+ Changed |= replaceIsTypePSurface(Instr);
+ break;
+ case Intrinsic::nvvm_istypep_texture:
+ Changed |= replaceIsTypePTexture(Instr);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Delete any istypep instances we replaced in the IR
+ for (unsigned i = 0, e = InstrToDelete.size(); i != e; ++i)
+ InstrToDelete[i]->eraseFromParent();
+
+ return Changed;
+}
+
+bool NVPTXImageOptimizer::replaceIsTypePSampler(Instruction &I) {
+ Value *TexHandle = cleanupValue(I.getOperand(0));
+ if (isSampler(*TexHandle)) {
+ // This is an OpenCL sampler, so it must be a samplerref
+ replaceWith(&I, ConstantInt::getTrue(I.getContext()));
+ return true;
+ } else if (isImageWriteOnly(*TexHandle) ||
+ isImageReadWrite(*TexHandle) ||
+ isImageReadOnly(*TexHandle)) {
+ // This is an OpenCL image, so it cannot be a samplerref
+ replaceWith(&I, ConstantInt::getFalse(I.getContext()));
+ return true;
+ } else {
+ // The image type is unknown, so we cannot eliminate the intrinsic
+ return false;
+ }
+}
+
+bool NVPTXImageOptimizer::replaceIsTypePSurface(Instruction &I) {
+ Value *TexHandle = cleanupValue(I.getOperand(0));
+ if (isImageReadWrite(*TexHandle) ||
+ isImageWriteOnly(*TexHandle)) {
+ // This is an OpenCL read-only/read-write image, so it must be a surfref
+ replaceWith(&I, ConstantInt::getTrue(I.getContext()));
+ return true;
+ } else if (isImageReadOnly(*TexHandle) ||
+ isSampler(*TexHandle)) {
+ // This is an OpenCL read-only/ imageor sampler, so it cannot be
+ // a surfref
+ replaceWith(&I, ConstantInt::getFalse(I.getContext()));
+ return true;
+ } else {
+ // The image type is unknown, so we cannot eliminate the intrinsic
+ return false;
+ }
+}
+
+bool NVPTXImageOptimizer::replaceIsTypePTexture(Instruction &I) {
+ Value *TexHandle = cleanupValue(I.getOperand(0));
+ if (isImageReadOnly(*TexHandle)) {
+ // This is an OpenCL read-only image, so it must be a texref
+ replaceWith(&I, ConstantInt::getTrue(I.getContext()));
+ return true;
+ } else if (isImageWriteOnly(*TexHandle) ||
+ isImageReadWrite(*TexHandle) ||
+ isSampler(*TexHandle)) {
+ // This is an OpenCL read-write/write-only image or a sampler, so it
+ // cannot be a texref
+ replaceWith(&I, ConstantInt::getFalse(I.getContext()));
+ return true;
+ } else {
+ // The image type is unknown, so we cannot eliminate the intrinsic
+ return false;
+ }
+}
+
+void NVPTXImageOptimizer::replaceWith(Instruction *From, ConstantInt *To) {
+ // We implement "poor man's DCE" here to make sure any code that is no longer
+ // live is actually unreachable and can be trivially eliminated by the
+ // unreachable block elimination pass.
+ for (CallInst::use_iterator UI = From->use_begin(), UE = From->use_end();
+ UI != UE; ++UI) {
+ if (BranchInst *BI = dyn_cast<BranchInst>(*UI)) {
+ if (BI->isUnconditional()) continue;
+ BasicBlock *Dest;
+ if (To->isZero())
+ // Get false block
+ Dest = BI->getSuccessor(1);
+ else
+ // Get true block
+ Dest = BI->getSuccessor(0);
+ BranchInst::Create(Dest, BI);
+ InstrToDelete.push_back(BI);
+ }
+ }
+ From->replaceAllUsesWith(To);
+ InstrToDelete.push_back(From);
+}
+
+Value *NVPTXImageOptimizer::cleanupValue(Value *V) {
+ if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
+ return cleanupValue(EVI->getAggregateOperand());
+ }
+ return V;
+}
+
+FunctionPass *llvm::createNVPTXImageOptimizerPass() {
+ return new NVPTXImageOptimizer();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
index f11f1b8..ffcb5d5 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
@@ -36,8 +36,24 @@ class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
bit IsLoad = 0;
bit IsStore = 0;
- let TSFlags{3-0} = VecInstType;
- let TSFlags{4-4} = IsSimpleMove;
- let TSFlags{5-5} = IsLoad;
- let TSFlags{6-6} = IsStore;
+ bit IsTex = 0;
+ bit IsSust = 0;
+ bit IsSurfTexQuery = 0;
+ bit IsTexModeUnified = 0;
+
+ // The following field is encoded as log2 of the vector size minus one,
+ // with 0 meaning the operation is not a surface instruction. For example,
+ // if IsSuld == 2, then the instruction is a suld instruction with vector size
+ // 2**(2-1) = 2.
+ bits<2> IsSuld = 0;
+
+ let TSFlags{3-0} = VecInstType;
+ let TSFlags{4-4} = IsSimpleMove;
+ let TSFlags{5-5} = IsLoad;
+ let TSFlags{6-6} = IsStore;
+ let TSFlags{7} = IsTex;
+ let TSFlags{9-8} = IsSuld;
+ let TSFlags{10} = IsSust;
+ let TSFlags{11} = IsSurfTexQuery;
+ let TSFlags{12} = IsTexModeUnified;
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index 86ddd38..b5b4fbe 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -14,8 +14,6 @@
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
#include "NVPTXTargetMachine.h"
-#define GET_INSTRINFO_CTOR_DTOR
-#include "NVPTXGenInstrInfo.inc"
#include "llvm/IR/Function.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -24,12 +22,15 @@
using namespace llvm;
+#define GET_INSTRINFO_CTOR_DTOR
+#include "NVPTXGenInstrInfo.inc"
+
// Pin the vtable to this file.
void NVPTXInstrInfo::anchor() {}
// FIXME: Add the subtarget support on this constructor.
-NVPTXInstrInfo::NVPTXInstrInfo(NVPTXTargetMachine &tm)
- : NVPTXGenInstrInfo(), TM(tm), RegInfo(*TM.getSubtargetImpl()) {}
+NVPTXInstrInfo::NVPTXInstrInfo(NVPTXSubtarget &STI)
+ : NVPTXGenInstrInfo(), RegInfo(STI) {}
void NVPTXInstrInfo::copyPhysReg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
@@ -256,7 +257,7 @@ unsigned NVPTXInstrInfo::InsertBranch(
"NVPTX branch conditions have two components!");
// One-way branch.
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) // Unconditional branch
BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(TBB);
else // Conditional branch
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h
index 600fc5c..2ac2974 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.h
@@ -24,13 +24,12 @@
namespace llvm {
class NVPTXInstrInfo : public NVPTXGenInstrInfo {
- NVPTXTargetMachine &TM;
const NVPTXRegisterInfo RegInfo;
virtual void anchor();
public:
- explicit NVPTXInstrInfo(NVPTXTargetMachine &TM);
+ explicit NVPTXInstrInfo(NVPTXSubtarget &STI);
- virtual const NVPTXRegisterInfo &getRegisterInfo() const { return RegInfo; }
+ const NVPTXRegisterInfo &getRegisterInfo() const { return RegInfo; }
/* The following virtual functions are used in register allocation.
* They are not implemented because the existing interface and the logic
@@ -50,9 +49,9 @@ public:
* const TargetRegisterClass *RC) const;
*/
- virtual void copyPhysReg(
+ void copyPhysReg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg, bool KillSrc) const;
+ unsigned DestReg, unsigned SrcReg, bool KillSrc) const override;
virtual bool isMoveInstr(const MachineInstr &MI, unsigned &SrcReg,
unsigned &DestReg) const;
bool isLoadInstr(const MachineInstr &MI, unsigned &AddrSpace) const;
@@ -61,13 +60,13 @@ public:
virtual bool CanTailMerge(const MachineInstr *MI) const;
// Branch analysis.
- virtual bool AnalyzeBranch(
+ bool AnalyzeBranch(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- virtual unsigned InsertBranch(
+ SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const;
+ const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const override;
unsigned getLdStCodeAddrSpace(const MachineInstr &MI) const {
return MI.getOperand(2).getImm();
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index b23f1e4..9900b8c 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -139,17 +139,10 @@ def hasGenericLdSt : Predicate<"Subtarget.hasGenericLdSt()">;
def doF32FTZ : Predicate<"useF32FTZ()">;
def doNoF32FTZ : Predicate<"!useF32FTZ()">;
-def doFMAF32 : Predicate<"doFMAF32">;
-def doFMAF32_ftz : Predicate<"(doFMAF32 && useF32FTZ())">;
-def doFMAF32AGG : Predicate<"doFMAF32AGG">;
-def doFMAF32AGG_ftz : Predicate<"(doFMAF32AGG && useF32FTZ())">;
-def doFMAF64 : Predicate<"doFMAF64">;
-def doFMAF64AGG : Predicate<"doFMAF64AGG">;
-
def doMulWide : Predicate<"doMulWide">;
-def allowFMA : Predicate<"allowFMA">;
-def allowFMA_ftz : Predicate<"(allowFMA && useF32FTZ())">;
+def allowFMA : Predicate<"allowFMA()">;
+def noFMA : Predicate<"!allowFMA()">;
def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
@@ -158,9 +151,12 @@ def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
def hasHWROT32 : Predicate<"Subtarget.hasHWROT32()">;
+def noHWROT32 : Predicate<"!Subtarget.hasHWROT32()">;
def true : Predicate<"1">;
+def hasPTX31 : Predicate<"Subtarget.getPTXVersion() >= 31">;
+
//===----------------------------------------------------------------------===//
// Some Common Instruction Class Templates
@@ -219,13 +215,13 @@ multiclass F3<string OpcStr, SDNode OpNode> {
!strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst,
(OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA_ftz]>;
+ Requires<[allowFMA, doF32FTZ]>;
def f32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, f32imm:$b),
!strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst,
(OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA_ftz]>;
+ Requires<[allowFMA, doF32FTZ]>;
def f32rr : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, Float32Regs:$b),
!strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
@@ -245,34 +241,38 @@ multiclass F3_rn<string OpcStr, SDNode OpNode> {
(ins Float64Regs:$a, Float64Regs:$b),
!strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
[(set Float64Regs:$dst,
- (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[noFMA]>;
def f64ri : NVPTXInst<(outs Float64Regs:$dst),
(ins Float64Regs:$a, f64imm:$b),
!strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
[(set Float64Regs:$dst,
- (OpNode Float64Regs:$a, fpimm:$b))]>;
+ (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
def f32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, Float32Regs:$b),
!strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst,
(OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ]>;
+ Requires<[noFMA, doF32FTZ]>;
def f32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, f32imm:$b),
!strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst,
(OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ]>;
+ Requires<[noFMA, doF32FTZ]>;
def f32rr : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, Float32Regs:$b),
!strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA]>;
def f32ri : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, f32imm:$b),
!strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
[(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, fpimm:$b))]>;
+ (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
}
multiclass F2<string OpcStr, SDNode OpNode> {
@@ -461,33 +461,45 @@ def SHL2MUL16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(temp.shl(v), MVT::i16);
}]>;
-def MULWIDES64 : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b),
+def MULWIDES64
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
"mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int32Regs:$a, i64imm:$b),
+def MULWIDES64Imm64
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
"mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDEU64 : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b),
+def MULWIDEU64
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
"mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int32Regs:$a, i64imm:$b),
+def MULWIDEU64Imm64
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
"mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDES32 : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b),
+def MULWIDES32
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
"mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int16Regs:$a, i32imm:$b),
+def MULWIDES32Imm
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm32
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
"mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDEU32 : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int16Regs:$a, i32imm:$b),
+def MULWIDEU32
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
"mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm32
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
(MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
@@ -507,25 +519,63 @@ def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
(MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
Requires<[doMulWide]>;
def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
- (MULWIDES64Imm Int32Regs:$a, (i64 SInt32Const:$b))>,
+ (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
Requires<[doMulWide]>;
def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>, Requires<[doMulWide]>;
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
- (MULWIDEU64Imm Int32Regs:$a, (i64 UInt32Const:$b))>,
+ (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
Requires<[doMulWide]>;
def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>, Requires<[doMulWide]>;
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
- (MULWIDES32Imm Int16Regs:$a, (i32 SInt16Const:$b))>,
+ (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
Requires<[doMulWide]>;
def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>, Requires<[doMulWide]>;
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
- (MULWIDEU32Imm Int16Regs:$a, (i32 UInt16Const:$b))>,
+ (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+
+def SDTMulWide
+ : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def mul_wide_signed
+ : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
+def mul_wide_unsigned
+ : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
+
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+ (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+ (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
Requires<[doMulWide]>;
defm MULT : I3<"mul.lo.s", mul>;
@@ -541,69 +591,75 @@ defm SREM : I3<"rem.s", srem>;
defm UREM : I3<"rem.u", urem>;
// The ri version will not be selected as DAGCombiner::visitUREM will lower it.
+def SDTIMAD
+ : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>,
+ SDTCisInt<2>, SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>]>;
+def imad
+ : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
+
def MAD16rrr : NVPTXInst<(outs Int16Regs:$dst),
(ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
"mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (add
- (mul Int16Regs:$a, Int16Regs:$b), Int16Regs:$c))]>;
+ [(set Int16Regs:$dst,
+ (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
def MAD16rri : NVPTXInst<(outs Int16Regs:$dst),
(ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
"mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (add
- (mul Int16Regs:$a, Int16Regs:$b), imm:$c))]>;
+ [(set Int16Regs:$dst,
+ (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
def MAD16rir : NVPTXInst<(outs Int16Regs:$dst),
(ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
"mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (add
- (mul Int16Regs:$a, imm:$b), Int16Regs:$c))]>;
+ [(set Int16Regs:$dst,
+ (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
def MAD16rii : NVPTXInst<(outs Int16Regs:$dst),
(ins Int16Regs:$a, i16imm:$b, i16imm:$c),
"mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (add (mul Int16Regs:$a, imm:$b),
- imm:$c))]>;
+ [(set Int16Regs:$dst,
+ (imad Int16Regs:$a, imm:$b, imm:$c))]>;
def MAD32rrr : NVPTXInst<(outs Int32Regs:$dst),
(ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
"mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (add
- (mul Int32Regs:$a, Int32Regs:$b), Int32Regs:$c))]>;
+ [(set Int32Regs:$dst,
+ (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
def MAD32rri : NVPTXInst<(outs Int32Regs:$dst),
(ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
"mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (add
- (mul Int32Regs:$a, Int32Regs:$b), imm:$c))]>;
+ [(set Int32Regs:$dst,
+ (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
def MAD32rir : NVPTXInst<(outs Int32Regs:$dst),
(ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
"mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (add
- (mul Int32Regs:$a, imm:$b), Int32Regs:$c))]>;
+ [(set Int32Regs:$dst,
+ (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
def MAD32rii : NVPTXInst<(outs Int32Regs:$dst),
(ins Int32Regs:$a, i32imm:$b, i32imm:$c),
"mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (add
- (mul Int32Regs:$a, imm:$b), imm:$c))]>;
+ [(set Int32Regs:$dst,
+ (imad Int32Regs:$a, imm:$b, imm:$c))]>;
def MAD64rrr : NVPTXInst<(outs Int64Regs:$dst),
(ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
"mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (add
- (mul Int64Regs:$a, Int64Regs:$b), Int64Regs:$c))]>;
+ [(set Int64Regs:$dst,
+ (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
def MAD64rri : NVPTXInst<(outs Int64Regs:$dst),
(ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
"mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (add
- (mul Int64Regs:$a, Int64Regs:$b), imm:$c))]>;
+ [(set Int64Regs:$dst,
+ (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
def MAD64rir : NVPTXInst<(outs Int64Regs:$dst),
(ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
"mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (add
- (mul Int64Regs:$a, imm:$b), Int64Regs:$c))]>;
+ [(set Int64Regs:$dst,
+ (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
def MAD64rii : NVPTXInst<(outs Int64Regs:$dst),
(ins Int64Regs:$a, i64imm:$b, i64imm:$c),
"mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (add
- (mul Int64Regs:$a, imm:$b), imm:$c))]>;
-
+ [(set Int64Regs:$dst,
+ (imad Int64Regs:$a, imm:$b, imm:$c))]>;
def INEG16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
"neg.s16 \t$dst, $src;",
@@ -689,12 +745,24 @@ def FDIV32approxrr_ftz : NVPTXInst<(outs Float32Regs:$dst),
[(set Float32Regs:$dst,
(fdiv Float32Regs:$a, Float32Regs:$b))]>,
Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxri_ftz : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
def FDIV32approxrr : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, Float32Regs:$b),
"div.approx.f32 \t$dst, $a, $b;",
[(set Float32Regs:$dst,
(fdiv Float32Regs:$a, Float32Regs:$b))]>,
Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxri : NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst,
+ (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
//
// F32 Semi-accurate reciprocal
//
@@ -797,36 +865,26 @@ multiclass FPCONTRACT32<string OpcStr, Predicate Pred> {
def rrr : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, Float32Regs:$b, Float32Regs:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst, (fadd
- (fmul Float32Regs:$a, Float32Regs:$b),
- Float32Regs:$c))]>, Requires<[Pred]>;
- // This is to WAR a weird bug in Tablegen that does not automatically
- // generate the following permutated rule rrr2 from the above rrr.
- // So we explicitly add it here. This happens to FMA32 only.
- // See the comments at FMAD32 and FMA32 for more information.
- def rrr2 : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b, Float32Regs:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst, (fadd Float32Regs:$c,
- (fmul Float32Regs:$a, Float32Regs:$b)))]>,
+ [(set Float32Regs:$dst,
+ (fma Float32Regs:$a, Float32Regs:$b, Float32Regs:$c))]>,
Requires<[Pred]>;
def rri : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, Float32Regs:$b, f32imm:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst, (fadd
- (fmul Float32Regs:$a, Float32Regs:$b), fpimm:$c))]>,
+ [(set Float32Regs:$dst,
+ (fma Float32Regs:$a, Float32Regs:$b, fpimm:$c))]>,
Requires<[Pred]>;
def rir : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, f32imm:$b, Float32Regs:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst, (fadd
- (fmul Float32Regs:$a, fpimm:$b), Float32Regs:$c))]>,
+ [(set Float32Regs:$dst,
+ (fma Float32Regs:$a, fpimm:$b, Float32Regs:$c))]>,
Requires<[Pred]>;
def rii : NVPTXInst<(outs Float32Regs:$dst),
(ins Float32Regs:$a, f32imm:$b, f32imm:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst, (fadd
- (fmul Float32Regs:$a, fpimm:$b), fpimm:$c))]>,
+ [(set Float32Regs:$dst,
+ (fma Float32Regs:$a, fpimm:$b, fpimm:$c))]>,
Requires<[Pred]>;
}
@@ -834,73 +892,32 @@ multiclass FPCONTRACT64<string OpcStr, Predicate Pred> {
def rrr : NVPTXInst<(outs Float64Regs:$dst),
(ins Float64Regs:$a, Float64Regs:$b, Float64Regs:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst, (fadd
- (fmul Float64Regs:$a, Float64Regs:$b),
- Float64Regs:$c))]>, Requires<[Pred]>;
+ [(set Float64Regs:$dst,
+ (fma Float64Regs:$a, Float64Regs:$b, Float64Regs:$c))]>,
+ Requires<[Pred]>;
def rri : NVPTXInst<(outs Float64Regs:$dst),
(ins Float64Regs:$a, Float64Regs:$b, f64imm:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst, (fadd (fmul Float64Regs:$a,
- Float64Regs:$b), fpimm:$c))]>, Requires<[Pred]>;
+ [(set Float64Regs:$dst,
+ (fma Float64Regs:$a, Float64Regs:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
def rir : NVPTXInst<(outs Float64Regs:$dst),
(ins Float64Regs:$a, f64imm:$b, Float64Regs:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst, (fadd
- (fmul Float64Regs:$a, fpimm:$b), Float64Regs:$c))]>,
+ [(set Float64Regs:$dst,
+ (fma Float64Regs:$a, fpimm:$b, Float64Regs:$c))]>,
Requires<[Pred]>;
def rii : NVPTXInst<(outs Float64Regs:$dst),
(ins Float64Regs:$a, f64imm:$b, f64imm:$c),
!strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst, (fadd
- (fmul Float64Regs:$a, fpimm:$b), fpimm:$c))]>,
+ [(set Float64Regs:$dst,
+ (fma Float64Regs:$a, fpimm:$b, fpimm:$c))]>,
Requires<[Pred]>;
}
-// Due to a unknown reason (most likely a bug in tablegen), tablegen does not
-// automatically generate the rrr2 rule from
-// the rrr rule (see FPCONTRACT32) for FMA32, though it does for FMAD32.
-// If we reverse the order of the following two lines, then rrr2 rule will be
-// generated for FMA32, but not for rrr.
-// Therefore, we manually write the rrr2 rule in FPCONTRACT32.
-defm FMA32_ftz : FPCONTRACT32<"fma.rn.ftz.f32", doFMAF32_ftz>;
-defm FMA32 : FPCONTRACT32<"fma.rn.f32", doFMAF32>;
-defm FMA64 : FPCONTRACT64<"fma.rn.f64", doFMAF64>;
-
-// b*c-a => fmad(b, c, -a)
-multiclass FPCONTRACT32_SUB_PAT_MAD<NVPTXInst Inst, Predicate Pred> {
- def : Pat<(fsub (fmul Float32Regs:$b, Float32Regs:$c), Float32Regs:$a),
- (Inst Float32Regs:$b, Float32Regs:$c, (FNEGf32 Float32Regs:$a))>,
- Requires<[Pred]>;
-}
-
-// a-b*c => fmad(-b,c, a)
-// - legal because a-b*c <=> a+(-b*c) <=> a+(-b)*c
-// b*c-a => fmad(b, c, -a)
-// - legal because b*c-a <=> b*c+(-a)
-multiclass FPCONTRACT32_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
- def : Pat<(fsub Float32Regs:$a, (fmul Float32Regs:$b, Float32Regs:$c)),
- (Inst (FNEGf32 Float32Regs:$b), Float32Regs:$c, Float32Regs:$a)>,
- Requires<[Pred]>;
- def : Pat<(fsub (fmul Float32Regs:$b, Float32Regs:$c), Float32Regs:$a),
- (Inst Float32Regs:$b, Float32Regs:$c, (FNEGf32 Float32Regs:$a))>,
- Requires<[Pred]>;
-}
-
-// a-b*c => fmad(-b,c, a)
-// b*c-a => fmad(b, c, -a)
-multiclass FPCONTRACT64_SUB_PAT<NVPTXInst Inst, Predicate Pred> {
- def : Pat<(fsub Float64Regs:$a, (fmul Float64Regs:$b, Float64Regs:$c)),
- (Inst (FNEGf64 Float64Regs:$b), Float64Regs:$c, Float64Regs:$a)>,
- Requires<[Pred]>;
-
- def : Pat<(fsub (fmul Float64Regs:$b, Float64Regs:$c), Float64Regs:$a),
- (Inst Float64Regs:$b, Float64Regs:$c, (FNEGf64 Float64Regs:$a))>,
- Requires<[Pred]>;
-}
-
-defm FMAF32ext_ftz : FPCONTRACT32_SUB_PAT<FMA32_ftzrrr, doFMAF32AGG_ftz>;
-defm FMAF32ext : FPCONTRACT32_SUB_PAT<FMA32rrr, doFMAF32AGG>;
-defm FMAF64ext : FPCONTRACT64_SUB_PAT<FMA64rrr, doFMAF64AGG>;
+defm FMA32_ftz : FPCONTRACT32<"fma.rn.ftz.f32", doF32FTZ>;
+defm FMA32 : FPCONTRACT32<"fma.rn.f32", true>;
+defm FMA64 : FPCONTRACT64<"fma.rn.f64", true>;
def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
"sin.approx.f32 \t$dst, $src;",
@@ -1071,6 +1088,43 @@ multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode> {
defm SRA : RSHIFT_FORMAT<"shr.s", sra>;
defm SRL : RSHIFT_FORMAT<"shr.u", srl>;
+//
+// Rotate: use ptx shf instruction if available.
+//
+
+// 32 bit r2 = rotl r1, n
+// =>
+// r2 = shf.l r1, r1, n
+def ROTL32imm_hw : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]> ;
+
+def ROTL32reg_hw : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32 bit r2 = rotr r1, n
+// =>
+// r2 = shf.r r1, r1, n
+def ROTR32imm_hw : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTR32reg_hw : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+//
+// Rotate: if ptx shf instruction is not available, then use shift+add
+//
// 32bit
def ROT32imm_sw : NVPTXInst<(outs Int32Regs:$dst),
(ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
@@ -1088,9 +1142,11 @@ def SUB_FRM_32 : SDNodeXForm<imm, [{
}]>;
def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>;
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]>;
def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>;
+ (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
+ Requires<[noHWROT32]>;
def ROTL32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
Int32Regs:$amt),
@@ -1103,7 +1159,8 @@ def ROTL32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
!strconcat("shr.b32 \t%rhs, $src, %amt2;\n\t",
!strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
!strconcat("}}", ""))))))))),
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>;
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
def ROTR32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
Int32Regs:$amt),
@@ -1116,7 +1173,8 @@ def ROTR32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
!strconcat("shl.b32 \t%rhs, $src, %amt2;\n\t",
!strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
!strconcat("}}", ""))))))))),
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>;
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
// 64bit
def ROT64imm_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
@@ -1165,6 +1223,29 @@ def ROTR64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
!strconcat("}}", ""))))))))),
[(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+// BFE - bit-field extract
+
+multiclass BFE<string TyStr, RegisterClass RC> {
+ // BFE supports both 32-bit and 64-bit values, but the start and length
+ // operands are always 32-bit
+ def rrr
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rri
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rii
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, i32imm:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+}
+
+defm BFE_S32 : BFE<"s32", Int32Regs>;
+defm BFE_U32 : BFE<"u32", Int32Regs>;
+defm BFE_S64 : BFE<"s64", Int64Regs>;
+defm BFE_U64 : BFE<"u64", Int64Regs>;
//-----------------------------------
// General Comparison
@@ -1280,6 +1361,32 @@ def : Pat<(i1 (select Int1Regs:$p, Int1Regs:$a, Int1Regs:$b)),
(ORb1rr (ANDb1rr Int1Regs:$p, Int1Regs:$a),
(ANDb1rr (NOT1 Int1Regs:$p), Int1Regs:$b))>;
+//
+// Funnnel shift in clamp mode
+//
+// - SDNodes are created so they can be used in the DAG code,
+// e.g. NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+//
+def SDTIntShiftDOp: SDTypeProfile<1, 3,
+ [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisInt<3>]>;
+def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+def FUNSHFLCLAMP : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFL_CLAMP Int32Regs:$lo,
+ Int32Regs:$hi, Int32Regs:$amt))]>;
+
+def FUNSHFRCLAMP : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFR_CLAMP Int32Regs:$lo,
+ Int32Regs:$hi, Int32Regs:$amt))]>;
+
//-----------------------------------
// Data Movement (Load / Store, Move)
//-----------------------------------
@@ -1807,7 +1914,7 @@ def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
def StoreParamV4I32 : NVPTXInst<(outs), (ins Int32Regs:$val, Int32Regs:$val2,
Int32Regs:$val3, Int32Regs:$val4,
i32imm:$a, i32imm:$b),
- "st.param.b32\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
+ "st.param.v4.b32\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
[]>;
def StoreParamV4I16 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2,
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 14049b1..14e51aa 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -792,13 +792,18 @@ def INT_NVVM_H2F : F_MATH_1<!strconcat("{{\n\t",
"}}")))),
Float32Regs, Int16Regs, int_nvvm_h2f>;
-def : Pat<(f32 (f16_to_f32 Int16Regs:$a)),
+def : Pat<(f32 (f16_to_fp Int16Regs:$a)),
(CVT_f32_f16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i16 (f32_to_f16 Float32Regs:$a)),
+def : Pat<(i16 (fp_to_f16 Float32Regs:$a)),
(CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (f32_to_f16 Float32Regs:$a)),
+def : Pat<(i16 (fp_to_f16 Float32Regs:$a)),
(CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+def : Pat<(f64 (f16_to_fp Int16Regs:$a)),
+ (CVT_f64_f16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i16 (fp_to_f16 Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
+
//
// Bitcast
//
@@ -1057,12 +1062,24 @@ def atomic_load_max_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_max_32 node:$a, node:$b)>;
def atomic_load_max_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_max_32 node:$a, node:$b)>;
+def atomic_load_max_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b)
+ , (atomic_load_max_64 node:$a, node:$b)>;
+def atomic_load_max_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_max_64 node:$a, node:$b)>;
+def atomic_load_max_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_max_64 node:$a, node:$b)>;
def atomic_load_umax_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
(atomic_load_umax_32 node:$a, node:$b)>;
def atomic_load_umax_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_umax_32 node:$a, node:$b)>;
def atomic_load_umax_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_umax_32 node:$a, node:$b)>;
+def atomic_load_umax_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_umax_64 node:$a, node:$b)>;
+def atomic_load_umax_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_umax_64 node:$a, node:$b)>;
+def atomic_load_umax_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_umax_64 node:$a, node:$b)>;
defm INT_PTX_ATOM_LOAD_MAX_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".s32",
".max", atomic_load_max_32_g, i32imm, imm, hasAtomRedG32>;
@@ -1072,6 +1089,14 @@ defm INT_PTX_ATOM_LOAD_MAX_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".s32", ".max",
atomic_load_max_32_gen, i32imm, imm, hasAtomRedGen32>;
defm INT_PTX_ATOM_LOAD_MAX_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
".s32", ".max", atomic_load_max_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_LOAD_MAX_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".s64",
+ ".max", atomic_load_max_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_LOAD_MAX_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".s64",
+ ".max", atomic_load_max_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_LOAD_MAX_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".s64", ".max",
+ atomic_load_max_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_LOAD_MAX_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+ ".s64", ".max", atomic_load_max_64_gen, i64imm, imm, useAtomRedG64forGen64>;
defm INT_PTX_ATOM_LOAD_UMAX_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32",
".max", atomic_load_umax_32_g, i32imm, imm, hasAtomRedG32>;
defm INT_PTX_ATOM_LOAD_UMAX_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32",
@@ -1080,6 +1105,14 @@ defm INT_PTX_ATOM_LOAD_UMAX_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".max",
atomic_load_umax_32_gen, i32imm, imm, hasAtomRedGen32>;
defm INT_PTX_ATOM_LOAD_UMAX_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
".u32", ".max", atomic_load_umax_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_LOAD_UMAX_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".u64",
+ ".max", atomic_load_umax_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_LOAD_UMAX_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".u64",
+ ".max", atomic_load_umax_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".u64", ".max",
+ atomic_load_umax_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+ ".u64", ".max", atomic_load_umax_64_gen, i64imm, imm, useAtomRedG64forGen64>;
// atom_min
@@ -1089,12 +1122,24 @@ def atomic_load_min_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_min_32 node:$a, node:$b)>;
def atomic_load_min_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_min_32 node:$a, node:$b)>;
+def atomic_load_min_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_min_64 node:$a, node:$b)>;
+def atomic_load_min_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_min_64 node:$a, node:$b)>;
+def atomic_load_min_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_min_64 node:$a, node:$b)>;
def atomic_load_umin_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
(atomic_load_umin_32 node:$a, node:$b)>;
def atomic_load_umin_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_umin_32 node:$a, node:$b)>;
def atomic_load_umin_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_umin_32 node:$a, node:$b)>;
+def atomic_load_umin_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_umin_64 node:$a, node:$b)>;
+def atomic_load_umin_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_umin_64 node:$a, node:$b)>;
+def atomic_load_umin_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_umin_64 node:$a, node:$b)>;
defm INT_PTX_ATOM_LOAD_MIN_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".s32",
".min", atomic_load_min_32_g, i32imm, imm, hasAtomRedG32>;
@@ -1104,6 +1149,14 @@ defm INT_PTX_ATOM_LOAD_MIN_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".s32", ".min",
atomic_load_min_32_gen, i32imm, imm, hasAtomRedGen32>;
defm INT_PTX_ATOM_LOAD_MIN_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
".s32", ".min", atomic_load_min_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_LOAD_MIN_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".s64",
+ ".min", atomic_load_min_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_LOAD_MIN_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".s64",
+ ".min", atomic_load_min_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_LOAD_MIN_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".s64", ".min",
+ atomic_load_min_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_LOAD_MIN_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+ ".s64", ".min", atomic_load_min_64_gen, i64imm, imm, useAtomRedG64forGen64>;
defm INT_PTX_ATOM_LOAD_UMIN_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32",
".min", atomic_load_umin_32_g, i32imm, imm, hasAtomRedG32>;
defm INT_PTX_ATOM_LOAD_UMIN_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32",
@@ -1112,6 +1165,14 @@ defm INT_PTX_ATOM_LOAD_UMIN_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".min",
atomic_load_umin_32_gen, i32imm, imm, hasAtomRedGen32>;
defm INT_PTX_ATOM_LOAD_UMIN_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
".u32", ".min", atomic_load_umin_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_LOAD_UMIN_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".u64",
+ ".min", atomic_load_umin_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_LOAD_UMIN_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".u64",
+ ".min", atomic_load_umin_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".u64", ".min",
+ atomic_load_umin_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+ ".u64", ".min", atomic_load_umin_64_gen, i64imm, imm, useAtomRedG64forGen64>;
// atom_inc atom_dec
@@ -1153,6 +1214,12 @@ def atomic_load_and_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_and_32 node:$a, node:$b)>;
def atomic_load_and_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_and_32 node:$a, node:$b)>;
+def atomic_load_and_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_and_64 node:$a, node:$b)>;
+def atomic_load_and_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_and_64 node:$a, node:$b)>;
+def atomic_load_and_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_and_64 node:$a, node:$b)>;
defm INT_PTX_ATOM_AND_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".and",
atomic_load_and_32_g, i32imm, imm, hasAtomRedG32>;
@@ -1162,6 +1229,14 @@ defm INT_PTX_ATOM_AND_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".and",
atomic_load_and_32_gen, i32imm, imm, hasAtomRedGen32>;
defm INT_PTX_ATOM_AND_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
".and", atomic_load_and_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_AND_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".and",
+ atomic_load_and_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_AND_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".and",
+ atomic_load_and_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_AND_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".and",
+ atomic_load_and_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_AND_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+ ".and", atomic_load_and_64_gen, i64imm, imm, useAtomRedG64forGen64>;
// atom_or
@@ -1171,6 +1246,12 @@ def atomic_load_or_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_or_32 node:$a, node:$b)>;
def atomic_load_or_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_or_32 node:$a, node:$b)>;
+def atomic_load_or_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_or_64 node:$a, node:$b)>;
+def atomic_load_or_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_or_64 node:$a, node:$b)>;
+def atomic_load_or_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_or_64 node:$a, node:$b)>;
defm INT_PTX_ATOM_OR_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".or",
atomic_load_or_32_g, i32imm, imm, hasAtomRedG32>;
@@ -1180,6 +1261,14 @@ defm INT_PTX_ATOM_OR_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
".or", atomic_load_or_32_gen, i32imm, imm, useAtomRedG32forGen32>;
defm INT_PTX_ATOM_OR_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".or",
atomic_load_or_32_s, i32imm, imm, hasAtomRedS32>;
+defm INT_PTX_ATOM_OR_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".or",
+ atomic_load_or_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_OR_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".or",
+ atomic_load_or_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_OR_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+ ".or", atomic_load_or_64_gen, i64imm, imm, useAtomRedG64forGen64>;
+defm INT_PTX_ATOM_OR_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".or",
+ atomic_load_or_64_s, i64imm, imm, hasAtomRedS64>;
// atom_xor
@@ -1189,6 +1278,12 @@ def atomic_load_xor_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
(atomic_load_xor_32 node:$a, node:$b)>;
def atomic_load_xor_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
(atomic_load_xor_32 node:$a, node:$b)>;
+def atomic_load_xor_64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b),
+ (atomic_load_xor_64 node:$a, node:$b)>;
+def atomic_load_xor_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
+ (atomic_load_xor_64 node:$a, node:$b)>;
+def atomic_load_xor_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
+ (atomic_load_xor_64 node:$a, node:$b)>;
defm INT_PTX_ATOM_XOR_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".xor",
atomic_load_xor_32_g, i32imm, imm, hasAtomRedG32>;
@@ -1198,6 +1293,14 @@ defm INT_PTX_ATOM_XOR_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".xor",
atomic_load_xor_32_gen, i32imm, imm, hasAtomRedGen32>;
defm INT_PTX_ATOM_XOR_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
".xor", atomic_load_xor_32_gen, i32imm, imm, useAtomRedG32forGen32>;
+defm INT_PTX_ATOM_XOR_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".xor",
+ atomic_load_xor_64_g, i64imm, imm, hasAtomRedG64>;
+defm INT_PTX_ATOM_XOR_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".xor",
+ atomic_load_xor_64_s, i64imm, imm, hasAtomRedS64>;
+defm INT_PTX_ATOM_XOR_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".xor",
+ atomic_load_xor_64_gen, i64imm, imm, hasAtomRedGen64>;
+defm INT_PTX_ATOM_XOR_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+ ".xor", atomic_load_xor_64_gen, i64imm, imm, useAtomRedG64forGen64>;
// atom_cas
@@ -1276,67 +1379,33 @@ def INT_PTX_SREG_WARPSIZE : F_SREG<"mov.u32 \t$dst, WARP_SZ;", Int32Regs,
// Support for ldu on sm_20 or later
//-----------------------------------
-def ldu_i8 : PatFrag<(ops node:$ptr), (int_nvvm_ldu_global_i node:$ptr), [{
- MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
- return M->getMemoryVT() == MVT::i8;
-}]>;
-
// Scalar
-// @TODO: Revisit this, Changed imemAny to imem
-multiclass LDU_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> {
+multiclass LDU_G<string TyStr, NVPTXRegClass regclass> {
def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
!strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDU]>;
+ []>, Requires<[hasLDU]>;
def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src),
!strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDU]>;
- def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src),
+ []>, Requires<[hasLDU]>;
+ def avar: NVPTXInst<(outs regclass:$result), (ins imemAny:$src),
!strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>,
- Requires<[hasLDU]>;
+ []>, Requires<[hasLDU]>;
def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src),
!strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDU]>;
+ []>, Requires<[hasLDU]>;
def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src),
!strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>;
+ []>, Requires<[hasLDU]>;
}
-multiclass LDU_G_NOINTRIN<string TyStr, NVPTXRegClass regclass, PatFrag IntOp> {
- def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
- !strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDU]>;
- def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src),
- !strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDU]>;
- def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src),
- !strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>,
- Requires<[hasLDU]>;
- def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src),
- !strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDU]>;
- def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src),
- !strconcat("ldu.global.", TyStr),
- [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDU]>;
-}
-
-defm INT_PTX_LDU_GLOBAL_i8 : LDU_G_NOINTRIN<"u8 \t$result, [$src];", Int16Regs,
- ldu_i8>;
-defm INT_PTX_LDU_GLOBAL_i16 : LDU_G<"u16 \t$result, [$src];", Int16Regs,
-int_nvvm_ldu_global_i>;
-defm INT_PTX_LDU_GLOBAL_i32 : LDU_G<"u32 \t$result, [$src];", Int32Regs,
-int_nvvm_ldu_global_i>;
-defm INT_PTX_LDU_GLOBAL_i64 : LDU_G<"u64 \t$result, [$src];", Int64Regs,
-int_nvvm_ldu_global_i>;
-defm INT_PTX_LDU_GLOBAL_f32 : LDU_G<"f32 \t$result, [$src];", Float32Regs,
-int_nvvm_ldu_global_f>;
-defm INT_PTX_LDU_GLOBAL_f64 : LDU_G<"f64 \t$result, [$src];", Float64Regs,
-int_nvvm_ldu_global_f>;
-defm INT_PTX_LDU_GLOBAL_p32 : LDU_G<"u32 \t$result, [$src];", Int32Regs,
-int_nvvm_ldu_global_p>;
-defm INT_PTX_LDU_GLOBAL_p64 : LDU_G<"u64 \t$result, [$src];", Int64Regs,
-int_nvvm_ldu_global_p>;
+defm INT_PTX_LDU_GLOBAL_i8 : LDU_G<"u8 \t$result, [$src];", Int16Regs>;
+defm INT_PTX_LDU_GLOBAL_i16 : LDU_G<"u16 \t$result, [$src];", Int16Regs>;
+defm INT_PTX_LDU_GLOBAL_i32 : LDU_G<"u32 \t$result, [$src];", Int32Regs>;
+defm INT_PTX_LDU_GLOBAL_i64 : LDU_G<"u64 \t$result, [$src];", Int64Regs>;
+defm INT_PTX_LDU_GLOBAL_f32 : LDU_G<"f32 \t$result, [$src];", Float32Regs>;
+defm INT_PTX_LDU_GLOBAL_f64 : LDU_G<"f64 \t$result, [$src];", Float64Regs>;
+defm INT_PTX_LDU_GLOBAL_p32 : LDU_G<"u32 \t$result, [$src];", Int32Regs>;
+defm INT_PTX_LDU_GLOBAL_p64 : LDU_G<"u64 \t$result, [$src];", Int64Regs>;
// vector
@@ -1406,65 +1475,40 @@ defm INT_PTX_LDU_G_v4f32_ELE
// Support for ldg on sm_35 or later
//-----------------------------------
-def ldg_i8 : PatFrag<(ops node:$ptr), (int_nvvm_ldg_global_i node:$ptr), [{
- MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
- return M->getMemoryVT() == MVT::i8;
-}]>;
-
-multiclass LDG_G<string TyStr, NVPTXRegClass regclass, Intrinsic IntOp> {
+multiclass LDG_G<string TyStr, NVPTXRegClass regclass> {
def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
!strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDG]>;
+ []>, Requires<[hasLDG]>;
def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src),
!strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDG]>;
- def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src),
+ []>, Requires<[hasLDG]>;
+ def avar: NVPTXInst<(outs regclass:$result), (ins imemAny:$src),
!strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>,
- Requires<[hasLDG]>;
+ []>, Requires<[hasLDG]>;
def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src),
!strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDG]>;
+ []>, Requires<[hasLDG]>;
def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src),
!strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDG]>;
-}
-
-multiclass LDG_G_NOINTRIN<string TyStr, NVPTXRegClass regclass, PatFrag IntOp> {
- def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
- !strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp Int32Regs:$src))]>, Requires<[hasLDG]>;
- def areg64: NVPTXInst<(outs regclass:$result), (ins Int64Regs:$src),
- !strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp Int64Regs:$src))]>, Requires<[hasLDG]>;
- def avar: NVPTXInst<(outs regclass:$result), (ins imem:$src),
- !strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp (Wrapper tglobaladdr:$src)))]>,
- Requires<[hasLDG]>;
- def ari : NVPTXInst<(outs regclass:$result), (ins MEMri:$src),
- !strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp ADDRri:$src))]>, Requires<[hasLDG]>;
- def ari64 : NVPTXInst<(outs regclass:$result), (ins MEMri64:$src),
- !strconcat("ld.global.nc.", TyStr),
- [(set regclass:$result, (IntOp ADDRri64:$src))]>, Requires<[hasLDG]>;
+ []>, Requires<[hasLDG]>;
}
defm INT_PTX_LDG_GLOBAL_i8
- : LDG_G_NOINTRIN<"u8 \t$result, [$src];", Int16Regs, ldg_i8>;
+ : LDG_G<"u8 \t$result, [$src];", Int16Regs>;
defm INT_PTX_LDG_GLOBAL_i16
- : LDG_G<"u16 \t$result, [$src];", Int16Regs, int_nvvm_ldg_global_i>;
+ : LDG_G<"u16 \t$result, [$src];", Int16Regs>;
defm INT_PTX_LDG_GLOBAL_i32
- : LDG_G<"u32 \t$result, [$src];", Int32Regs, int_nvvm_ldg_global_i>;
+ : LDG_G<"u32 \t$result, [$src];", Int32Regs>;
defm INT_PTX_LDG_GLOBAL_i64
- : LDG_G<"u64 \t$result, [$src];", Int64Regs, int_nvvm_ldg_global_i>;
+ : LDG_G<"u64 \t$result, [$src];", Int64Regs>;
defm INT_PTX_LDG_GLOBAL_f32
- : LDG_G<"f32 \t$result, [$src];", Float32Regs, int_nvvm_ldg_global_f>;
+ : LDG_G<"f32 \t$result, [$src];", Float32Regs>;
defm INT_PTX_LDG_GLOBAL_f64
- : LDG_G<"f64 \t$result, [$src];", Float64Regs, int_nvvm_ldg_global_f>;
+ : LDG_G<"f64 \t$result, [$src];", Float64Regs>;
defm INT_PTX_LDG_GLOBAL_p32
- : LDG_G<"u32 \t$result, [$src];", Int32Regs, int_nvvm_ldg_global_p>;
+ : LDG_G<"u32 \t$result, [$src];", Int32Regs>;
defm INT_PTX_LDG_GLOBAL_p64
- : LDG_G<"u64 \t$result, [$src];", Int64Regs, int_nvvm_ldg_global_p>;
+ : LDG_G<"u64 \t$result, [$src];", Int64Regs>;
// vector
@@ -1666,6 +1710,9 @@ def : Pat<(i32 (int_nvvm_ptr_gen_to_local (int_nvvm_ptr_local_to_gen
(MoveParam texternalsym:$src)))),
(nvvm_move_ptr32 texternalsym:$src)>;
+def texsurf_handles
+ : NVPTXInst<(outs Int64Regs:$result), (ins imem:$src),
+ "mov.u64 \t$result, $src;", []>;
//-----------------------------------
// Compiler Error Warn
@@ -1686,6 +1733,5224 @@ def INT_NVVM_COMPILER_ERROR_64 : NVPTXInst<(outs), (ins Int64Regs:$a),
[(int_nvvm_compiler_error Int64Regs:$a)]>;
+// isspacep
+
+def ISSPACEP_CONST_32
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
+ "isspacep.const \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_const Int32Regs:$a))]>,
+ Requires<[hasPTX31]>;
+def ISSPACEP_CONST_64
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "isspacep.const \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_const Int64Regs:$a))]>,
+ Requires<[hasPTX31]>;
+def ISSPACEP_GLOBAL_32
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
+ "isspacep.global \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_global Int32Regs:$a))]>;
+def ISSPACEP_GLOBAL_64
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "isspacep.global \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_global Int64Regs:$a))]>;
+def ISSPACEP_LOCAL_32
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
+ "isspacep.local \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_local Int32Regs:$a))]>;
+def ISSPACEP_LOCAL_64
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "isspacep.local \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_local Int64Regs:$a))]>;
+def ISSPACEP_SHARED_32
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
+ "isspacep.shared \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_shared Int32Regs:$a))]>;
+def ISSPACEP_SHARED_64
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "isspacep.shared \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_isspacep_shared Int64Regs:$a))]>;
+
+
+// Special register reads
+def MOV_SPECIAL : NVPTXInst<(outs Int32Regs:$d),
+ (ins SpecialRegs:$r),
+ "mov.b32\t$d, $r;", []>;
+
+def : Pat<(int_nvvm_read_ptx_sreg_envreg0), (MOV_SPECIAL ENVREG0)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg1), (MOV_SPECIAL ENVREG1)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg2), (MOV_SPECIAL ENVREG2)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg3), (MOV_SPECIAL ENVREG3)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg4), (MOV_SPECIAL ENVREG4)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg5), (MOV_SPECIAL ENVREG5)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg6), (MOV_SPECIAL ENVREG6)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg7), (MOV_SPECIAL ENVREG7)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg8), (MOV_SPECIAL ENVREG8)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg9), (MOV_SPECIAL ENVREG9)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg10), (MOV_SPECIAL ENVREG10)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg11), (MOV_SPECIAL ENVREG11)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg12), (MOV_SPECIAL ENVREG12)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg13), (MOV_SPECIAL ENVREG13)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg14), (MOV_SPECIAL ENVREG14)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg15), (MOV_SPECIAL ENVREG15)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg16), (MOV_SPECIAL ENVREG16)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg17), (MOV_SPECIAL ENVREG17)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg18), (MOV_SPECIAL ENVREG18)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg19), (MOV_SPECIAL ENVREG19)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg20), (MOV_SPECIAL ENVREG20)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg21), (MOV_SPECIAL ENVREG21)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg22), (MOV_SPECIAL ENVREG22)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg23), (MOV_SPECIAL ENVREG23)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg24), (MOV_SPECIAL ENVREG24)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg25), (MOV_SPECIAL ENVREG25)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg26), (MOV_SPECIAL ENVREG26)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg27), (MOV_SPECIAL ENVREG27)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg28), (MOV_SPECIAL ENVREG28)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg29), (MOV_SPECIAL ENVREG29)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg30), (MOV_SPECIAL ENVREG30)>;
+def : Pat<(int_nvvm_read_ptx_sreg_envreg31), (MOV_SPECIAL ENVREG31)>;
+
+
+// rotate builtin support
+
+def ROTATE_B32_HW_IMM
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst,
+ (int_nvvm_rotate_b32 Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]> ;
+
+def ROTATE_B32_HW_REG
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst,
+ (int_nvvm_rotate_b32 Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]> ;
+
+def : Pat<(int_nvvm_rotate_b32 Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]> ;
+
+def : Pat<(int_nvvm_rotate_b32 Int32Regs:$src, Int32Regs:$amt),
+ (ROTL32reg_sw Int32Regs:$src, Int32Regs:$amt)>,
+ Requires<[noHWROT32]> ;
+
+def GET_LO_INT64
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %dummy;\n\t",
+ !strconcat("mov.b64 \t{$dst,%dummy}, $src;\n\t",
+ !strconcat("}}", "")))),
+ []> ;
+
+def GET_HI_INT64
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %dummy;\n\t",
+ !strconcat("mov.b64 \t{%dummy,$dst}, $src;\n\t",
+ !strconcat("}}", "")))),
+ []> ;
+
+def PACK_TWO_INT32
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$lo, Int32Regs:$hi),
+ "mov.b64 \t$dst, {{$lo, $hi}};", []> ;
+
+def : Pat<(int_nvvm_swap_lo_hi_b64 Int64Regs:$src),
+ (PACK_TWO_INT32 (GET_HI_INT64 Int64Regs:$src),
+ (GET_LO_INT64 Int64Regs:$src))> ;
+
+// funnel shift, requires >= sm_32
+def SHF_L_WRAP_B32_IMM
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
+
+def SHF_L_WRAP_B32_REG
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
+
+def SHF_R_WRAP_B32_IMM
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
+
+def SHF_R_WRAP_B32_REG
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
+
+// HW version of rotate 64
+def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, (i32 imm:$amt)),
+ (PACK_TWO_INT32
+ (SHF_L_WRAP_B32_IMM (GET_HI_INT64 Int64Regs:$src),
+ (GET_LO_INT64 Int64Regs:$src), imm:$amt),
+ (SHF_L_WRAP_B32_IMM (GET_LO_INT64 Int64Regs:$src),
+ (GET_HI_INT64 Int64Regs:$src), imm:$amt))>,
+ Requires<[hasHWROT32]>;
+
+def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, Int32Regs:$amt),
+ (PACK_TWO_INT32
+ (SHF_L_WRAP_B32_REG (GET_HI_INT64 Int64Regs:$src),
+ (GET_LO_INT64 Int64Regs:$src), Int32Regs:$amt),
+ (SHF_L_WRAP_B32_REG (GET_LO_INT64 Int64Regs:$src),
+ (GET_HI_INT64 Int64Regs:$src), Int32Regs:$amt))>,
+ Requires<[hasHWROT32]>;
+
+
+def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, (i32 imm:$amt)),
+ (PACK_TWO_INT32
+ (SHF_R_WRAP_B32_IMM (GET_LO_INT64 Int64Regs:$src),
+ (GET_HI_INT64 Int64Regs:$src), imm:$amt),
+ (SHF_R_WRAP_B32_IMM (GET_HI_INT64 Int64Regs:$src),
+ (GET_LO_INT64 Int64Regs:$src), imm:$amt))>,
+ Requires<[hasHWROT32]>;
+
+def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, Int32Regs:$amt),
+ (PACK_TWO_INT32
+ (SHF_R_WRAP_B32_REG (GET_LO_INT64 Int64Regs:$src),
+ (GET_HI_INT64 Int64Regs:$src), Int32Regs:$amt),
+ (SHF_R_WRAP_B32_REG (GET_HI_INT64 Int64Regs:$src),
+ (GET_LO_INT64 Int64Regs:$src), Int32Regs:$amt))>,
+ Requires<[hasHWROT32]>;
+
+// SW version of rotate 64
+def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]>;
+def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, Int32Regs:$amt),
+ (ROTL64reg_sw Int64Regs:$src, Int32Regs:$amt)>,
+ Requires<[noHWROT32]>;
+def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>,
+ Requires<[noHWROT32]>;
+def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, Int32Regs:$amt),
+ (ROTR64reg_sw Int64Regs:$src, Int32Regs:$amt)>,
+ Requires<[noHWROT32]>;
+
+
+//-----------------------------------
+// Texture Intrinsics
+//-----------------------------------
+
+// NOTE: For Fermi support, any new texture/surface/sampler intrinsics must be
+// also defined in NVPTXReplaceImageHandles.cpp
+
+// texmode_independent
+let IsTex = 1, IsTexModeUnified = 0 in {
+// Texture fetch instructions using handles
+def TEX_1D_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x),
+ "tex.1d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, [$t, $s, \\{$x\\}];",
+ []>;
+def TEX_1D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x),
+ "tex.1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, [$t, $s, \\{$x\\}];",
+ []>;
+def TEX_1D_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$lod),
+ "tex.level.1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x\\}], $lod;",
+ []>;
+def TEX_1D_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_1D_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x),
+ "tex.1d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, [$t, $s, \\{$x\\}];",
+ []>;
+def TEX_1D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x),
+ "tex.1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, [$t, $s, \\{$x\\}];",
+ []>;
+def TEX_1D_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x\\}], $lod;",
+ []>;
+def TEX_1D_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_1D_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x),
+ "tex.1d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, [$t, $s, \\{$x\\}];",
+ []>;
+def TEX_1D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x),
+ "tex.1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, [$t, $s, \\{$x\\}];",
+ []>;
+def TEX_1D_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x\\}], $lod;",
+ []>;
+def TEX_1D_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+
+def TEX_1D_ARRAY_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "tex.a1d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}];",
+ []>;
+def TEX_1D_ARRAY_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x),
+ "tex.a1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}];",
+ []>;
+def TEX_1D_ARRAY_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.a1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}], $lod;",
+ []>;
+def TEX_1D_ARRAY_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.a1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_1D_ARRAY_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "tex.a1d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}];",
+ []>;
+def TEX_1D_ARRAY_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x),
+ "tex.a1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}];",
+ []>;
+def TEX_1D_ARRAY_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.a1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}], $lod;",
+ []>;
+def TEX_1D_ARRAY_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.a1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_1D_ARRAY_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "tex.a1d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}];",
+ []>;
+def TEX_1D_ARRAY_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x),
+ "tex.a1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}];",
+ []>;
+def TEX_1D_ARRAY_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.a1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}], $lod;",
+ []>;
+def TEX_1D_ARRAY_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.a1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+
+def TEX_2D_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "tex.2d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TEX_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tex.2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TEX_2D_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$lod),
+ "tex.level.2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}], $lod;",
+ []>;
+def TEX_2D_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_2D_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "tex.2d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TEX_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tex.2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TEX_2D_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$lod),
+ "tex.level.2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}], $lod;",
+ []>;
+def TEX_2D_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_2D_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "tex.2d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TEX_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tex.2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TEX_2D_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$lod),
+ "tex.level.2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}], $lod;",
+ []>;
+def TEX_2D_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+
+def TEX_2D_ARRAY_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$y),
+ "tex.a2d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_2D_ARRAY_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y),
+ "tex.a2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_2D_ARRAY_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$lod),
+ "tex.level.a2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}], $lod;",
+ []>;
+def TEX_2D_ARRAY_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.a2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_2D_ARRAY_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$y),
+ "tex.a2d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_2D_ARRAY_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y),
+ "tex.a2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_2D_ARRAY_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$lod),
+ "tex.level.a2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}], $lod;",
+ []>;
+def TEX_2D_ARRAY_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.a2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_2D_ARRAY_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$y),
+ "tex.a2d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_2D_ARRAY_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y),
+ "tex.a2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_2D_ARRAY_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$lod),
+ "tex.level.a2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}], $lod;",
+ []>;
+def TEX_2D_ARRAY_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.a2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+
+def TEX_3D_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$z),
+ "tex.3d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_3D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z),
+ "tex.3d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_3D_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z, Float32Regs:$lod),
+ "tex.level.3d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_3D_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$gradx2, Float32Regs:$grady0,
+ Float32Regs:$grady1, Float32Regs:$grady2),
+ "tex.grad.3d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], "
+ "\\{$gradx0, $gradx1, $gradx2, $gradx2\\}, "
+ "\\{$grady0, $grady1, $grady2, $grady2\\};",
+ []>;
+def TEX_3D_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$z),
+ "tex.3d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_3D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z),
+ "tex.3d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_3D_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z, Float32Regs:$lod),
+ "tex.level.3d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_3D_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$gradx2, Float32Regs:$grady0,
+ Float32Regs:$grady1, Float32Regs:$grady2),
+ "tex.grad.3d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], "
+ "\\{$gradx0, $gradx1, $gradx2, $gradx2\\}, "
+ "\\{$grady0, $grady1, $grady2, $grady2\\};",
+ []>;
+def TEX_3D_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$z),
+ "tex.3d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_3D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z),
+ "tex.3d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_3D_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z, Float32Regs:$lod),
+ "tex.level.3d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_3D_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$gradx2, Float32Regs:$grady0,
+ Float32Regs:$grady1, Float32Regs:$grady2),
+ "tex.grad.3d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], "
+ "\\{$gradx0, $gradx1, $gradx2, $gradx2\\}, "
+ "\\{$grady0, $grady1, $grady2, $grady2\\};",
+ []>;
+
+def TEX_CUBE_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.cube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_CUBE_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.cube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_CUBE_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.cube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_CUBE_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.cube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_CUBE_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.cube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_CUBE_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.cube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+
+def TEX_CUBE_ARRAY_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.acube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $z\\}];",
+ []>;
+def TEX_CUBE_ARRAY_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.acube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $z\\}], $lod;",
+ []>;
+def TEX_CUBE_ARRAY_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.acube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $z\\}];",
+ []>;
+def TEX_CUBE_ARRAY_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.acube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $z\\}], $lod;",
+ []>;
+def TEX_CUBE_ARRAY_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.acube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $z\\}];",
+ []>;
+def TEX_CUBE_ARRAY_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int64Regs:$s, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.acube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, $s, \\{$l, $x, $y, $z\\}], $lod;",
+ []>;
+
+def TLD4_R_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.r.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_G_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.g.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_B_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.b.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_A_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.a.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_R_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.r.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_G_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.g.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_B_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.b.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_A_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.a.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_R_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.r.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_G_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.g.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_B_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.b.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+def TLD4_A_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Int64Regs:$s, Float32Regs:$x, Float32Regs:$y),
+ "tld4.a.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, $s, \\{$x, $y\\}];",
+ []>;
+}
+
+
+// texmode_unified
+let IsTex = 1, IsTexModeUnified = 1 in {
+// Texture fetch instructions using handles
+def TEX_UNIFIED_1D_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x),
+ "tex.1d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, [$t, \\{$x\\}];",
+ []>;
+def TEX_UNIFIED_1D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x),
+ "tex.1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, [$t, \\{$x\\}];",
+ []>;
+def TEX_UNIFIED_1D_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$lod),
+ "tex.level.1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x\\}], $lod;",
+ []>;
+def TEX_UNIFIED_1D_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_UNIFIED_1D_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x),
+ "tex.1d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, [$t, \\{$x\\}];",
+ []>;
+def TEX_UNIFIED_1D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x),
+ "tex.1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, [$t, \\{$x\\}];",
+ []>;
+def TEX_UNIFIED_1D_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x\\}], $lod;",
+ []>;
+def TEX_UNIFIED_1D_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_UNIFIED_1D_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x),
+ "tex.1d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, [$t, \\{$x\\}];",
+ []>;
+def TEX_UNIFIED_1D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x),
+ "tex.1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, [$t, \\{$x\\}];",
+ []>;
+def TEX_UNIFIED_1D_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x\\}], $lod;",
+ []>;
+def TEX_UNIFIED_1D_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+
+def TEX_UNIFIED_1D_ARRAY_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Int32Regs:$x),
+ "tex.a1d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}];",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x),
+ "tex.a1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}];",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.a1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}], $lod;",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.a1d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Int32Regs:$x),
+ "tex.a1d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}];",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x),
+ "tex.a1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}];",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.a1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}], $lod;",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.a1d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Int32Regs:$x),
+ "tex.a1d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}];",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x),
+ "tex.a1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}];",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$lod),
+ "tex.level.a1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}], $lod;",
+ []>;
+def TEX_UNIFIED_1D_ARRAY_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$gradx, Float32Regs:$grady),
+ "tex.grad.a1d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x\\}], \\{$gradx\\}, \\{$grady\\};",
+ []>;
+
+def TEX_UNIFIED_2D_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x, Int32Regs:$y),
+ "tex.2d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tex.2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$lod),
+ "tex.level.2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}], $lod;",
+ []>;
+def TEX_UNIFIED_2D_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_UNIFIED_2D_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x, Int32Regs:$y),
+ "tex.2d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tex.2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$lod),
+ "tex.level.2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}], $lod;",
+ []>;
+def TEX_UNIFIED_2D_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_UNIFIED_2D_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x, Int32Regs:$y),
+ "tex.2d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tex.2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$lod),
+ "tex.level.2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}], $lod;",
+ []>;
+def TEX_UNIFIED_2D_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+
+def TEX_UNIFIED_2D_ARRAY_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$y),
+ "tex.a2d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y),
+ "tex.a2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$lod),
+ "tex.level.a2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}], $lod;",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.a2d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$y),
+ "tex.a2d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y),
+ "tex.a2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$lod),
+ "tex.level.a2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}], $lod;",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.a2d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$y),
+ "tex.a2d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y),
+ "tex.a2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}];",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y, Float32Regs:$lod),
+ "tex.level.a2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}], $lod;",
+ []>;
+def TEX_UNIFIED_2D_ARRAY_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l, Float32Regs:$x,
+ Float32Regs:$y,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$grady0, Float32Regs:$grady1),
+ "tex.grad.a2d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $y\\}], \\{$gradx0, $gradx1\\}, "
+ "\\{$grady0, $grady1\\};",
+ []>;
+
+def TEX_UNIFIED_3D_F32_S32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$z),
+ "tex.3d.v4.f32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_3D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z),
+ "tex.3d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_3D_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z, Float32Regs:$lod),
+ "tex.level.3d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_3D_F32_F32_GRAD
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$gradx2, Float32Regs:$grady0,
+ Float32Regs:$grady1, Float32Regs:$grady2),
+ "tex.grad.3d.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], "
+ "\\{$gradx0, $gradx1, $gradx2, $gradx2\\}, "
+ "\\{$grady0, $grady1, $grady2, $grady2\\};",
+ []>;
+def TEX_UNIFIED_3D_S32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$z),
+ "tex.3d.v4.s32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_3D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z),
+ "tex.3d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_3D_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z, Float32Regs:$lod),
+ "tex.level.3d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_3D_S32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$gradx2, Float32Regs:$grady0,
+ Float32Regs:$grady1, Float32Regs:$grady2),
+ "tex.grad.3d.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], "
+ "\\{$gradx0, $gradx1, $gradx2, $gradx2\\}, "
+ "\\{$grady0, $grady1, $grady2, $grady2\\};",
+ []>;
+def TEX_UNIFIED_3D_U32_S32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$z),
+ "tex.3d.v4.u32.s32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_3D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z),
+ "tex.3d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_3D_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z, Float32Regs:$lod),
+ "tex.level.3d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_3D_U32_F32_GRAD
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y,
+ Float32Regs:$z,
+ Float32Regs:$gradx0, Float32Regs:$gradx1,
+ Float32Regs:$gradx2, Float32Regs:$grady0,
+ Float32Regs:$grady1, Float32Regs:$grady2),
+ "tex.grad.3d.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], "
+ "\\{$gradx0, $gradx1, $gradx2, $gradx2\\}, "
+ "\\{$grady0, $grady1, $grady2, $grady2\\};",
+ []>;
+
+def TEX_UNIFIED_CUBE_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.cube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_CUBE_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.cube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_CUBE_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.cube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_CUBE_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.cube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_CUBE_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.cube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}];",
+ []>;
+def TEX_UNIFIED_CUBE_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.cube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$x, $y, $z, $z\\}], $lod;",
+ []>;
+
+def TEX_UNIFIED_CUBE_ARRAY_F32_F32
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.acube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $z\\}];",
+ []>;
+def TEX_UNIFIED_CUBE_ARRAY_F32_F32_LEVEL
+ : NVPTXInst<(outs Float32Regs:$r, Float32Regs:$g,
+ Float32Regs:$b, Float32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.acube.v4.f32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_CUBE_ARRAY_S32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.acube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $z\\}];",
+ []>;
+def TEX_UNIFIED_CUBE_ARRAY_S32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.acube.v4.s32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $z\\}], $lod;",
+ []>;
+def TEX_UNIFIED_CUBE_ARRAY_U32_F32
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z),
+ "tex.acube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $z\\}];",
+ []>;
+def TEX_UNIFIED_CUBE_ARRAY_U32_F32_LEVEL
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$t, Int32Regs:$l,
+ Float32Regs:$x, Float32Regs:$y, Float32Regs:$z,
+ Float32Regs:$lod),
+ "tex.level.acube.v4.u32.f32\t\\{$r, $g, $b, $a\\}, "
+ "[$t, \\{$l, $x, $y, $z\\}], $lod;",
+ []>;
+
+def TLD4_UNIFIED_R_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.r.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_G_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.g.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_B_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.b.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_A_2D_F32_F32
+ : NVPTXInst<(outs Float32Regs:$v0, Float32Regs:$v1,
+ Float32Regs:$v2, Float32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.a.2d.v4.f32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_R_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.r.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_G_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.g.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_B_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.b.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_A_2D_S32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.a.2d.v4.s32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_R_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.r.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_G_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.g.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_B_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.b.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+def TLD4_UNIFIED_A_2D_U32_F32
+ : NVPTXInst<(outs Int32Regs:$v0, Int32Regs:$v1,
+ Int32Regs:$v2, Int32Regs:$v3),
+ (ins Int64Regs:$t, Float32Regs:$x, Float32Regs:$y),
+ "tld4.a.2d.v4.u32.f32\t\\{$v0, $v1, $v2, $v3\\}, "
+ "[$t, \\{$x, $y\\}];",
+ []>;
+}
+
+
+
+//=== Surface load instructions
+// .clamp variant
+let IsSuld = 1 in {
+def SULD_1D_I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b8.clamp \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b16.clamp \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b32.clamp \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b64.clamp \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b8.clamp \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b16.clamp \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b32.clamp \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b64.clamp \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b8.clamp \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b16.clamp \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b32.clamp \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b64.clamp \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b8.clamp \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b16.clamp \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b32.clamp \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b64.clamp \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+def SULD_3D_I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b8.clamp \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b16.clamp \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b32.clamp \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b64.clamp \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+let IsSuld = 2 in {
+def SULD_1D_V2I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b8.clamp \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b16.clamp \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b32.clamp \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b64.clamp \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_V2I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b8.clamp \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b16.clamp \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b32.clamp \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b64.clamp \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_V2I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b8.clamp \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b16.clamp \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b32.clamp \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b64.clamp \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_V2I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b8.clamp \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b16.clamp \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b32.clamp \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b64.clamp \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+def SULD_3D_V2I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b8.clamp \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b16.clamp \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b32.clamp \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I64_CLAMP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b64.clamp \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+let IsSuld = 3 in {
+def SULD_1D_V4I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b8.clamp \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V4I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b16.clamp \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V4I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b32.clamp \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_V4I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b8.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V4I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b16.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V4I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b32.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_V4I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b8.clamp \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V4I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b16.clamp \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V4I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b32.clamp \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_V4I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b8.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V4I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b16.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V4I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b32.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+
+def SULD_3D_V4I8_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b8.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V4I16_CLAMP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b16.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V4I32_CLAMP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b32.clamp \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+
+// .trap variant
+let IsSuld = 1 in {
+def SULD_1D_I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b8.trap \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b16.trap \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b32.trap \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b64.trap \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b8.trap \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b16.trap \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b32.trap \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b64.trap \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b8.trap \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b16.trap \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b32.trap \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b64.trap \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b8.trap \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b16.trap \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b32.trap \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b64.trap \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+def SULD_3D_I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b8.trap \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b16.trap \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b32.trap \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b64.trap \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+let IsSuld = 2 in {
+def SULD_1D_V2I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b8.trap \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b16.trap \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b32.trap \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b64.trap \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_V2I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b8.trap \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b16.trap \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b32.trap \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b64.trap \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_V2I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b8.trap \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b16.trap \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b32.trap \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b64.trap \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_V2I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b8.trap \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b16.trap \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b32.trap \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b64.trap \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+def SULD_3D_V2I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b8.trap \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b16.trap \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b32.trap \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I64_TRAP
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b64.trap \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+let IsSuld = 3 in {
+def SULD_1D_V4I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b8.trap \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V4I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b16.trap \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V4I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b32.trap \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_V4I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b8.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V4I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b16.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V4I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b32.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_V4I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b8.trap \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V4I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b16.trap \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V4I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b32.trap \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_V4I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b8.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V4I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b16.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V4I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b32.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+
+def SULD_3D_V4I8_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b8.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V4I16_TRAP
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b16.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V4I32_TRAP
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b32.trap \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+// .zero variant
+let IsSuld = 1 in {
+def SULD_1D_I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b8.zero \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b16.zero \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b32.zero \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.b64.zero \\{$r\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b8.zero \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b16.zero \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b32.zero \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.b64.zero \\{$r\\}, [$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b8.zero \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b16.zero \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b32.zero \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.b64.zero \\{$r\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b8.zero \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b16.zero \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b32.zero \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.b64.zero \\{$r\\}, [$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+def SULD_3D_I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b8.zero \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b16.zero \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b32.zero \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.b64.zero \\{$r\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+let IsSuld = 2 in {
+def SULD_1D_V2I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b8.zero \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b16.zero \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b32.zero \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V2I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v2.b64.zero \\{$r, $g\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_V2I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b8.zero \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b16.zero \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b32.zero \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V2I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v2.b64.zero \\{$r, $g\\}, [$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_V2I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b8.zero \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b16.zero \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b32.zero \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V2I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v2.b64.zero \\{$r, $g\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_V2I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b8.zero \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b16.zero \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b32.zero \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V2I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v2.b64.zero \\{$r, $g\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+def SULD_3D_V2I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b8.zero \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b16.zero \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b32.zero \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V2I64_ZERO
+ : NVPTXInst<(outs Int64Regs:$r, Int64Regs:$g),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v2.b64.zero \\{$r, $g\\}, [$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+let IsSuld = 3 in {
+def SULD_1D_V4I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b8.zero \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V4I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b16.zero \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+def SULD_1D_V4I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x),
+ "suld.b.1d.v4.b32.zero \\{$r, $g, $b, $a\\}, [$s, \\{$x\\}];",
+ []>;
+
+def SULD_1D_ARRAY_V4I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b8.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V4I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b16.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+def SULD_1D_ARRAY_V4I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x),
+ "suld.b.a1d.v4.b32.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x\\}];",
+ []>;
+
+def SULD_2D_V4I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b8.zero \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V4I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b16.zero \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+def SULD_2D_V4I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.2d.v4.b32.zero \\{$r, $g, $b, $a\\}, [$s, \\{$x, $y\\}];",
+ []>;
+
+def SULD_2D_ARRAY_V4I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b8.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V4I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b16.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+def SULD_2D_ARRAY_V4I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y),
+ "suld.b.a2d.v4.b32.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$l, $x, $y, $y\\}];",
+ []>;
+
+
+def SULD_3D_V4I8_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b8.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V4I16_ZERO
+ : NVPTXInst<(outs Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b16.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+def SULD_3D_V4I32_ZERO
+ : NVPTXInst<(outs Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z),
+ "suld.b.3d.v4.b32.zero \\{$r, $g, $b, $a\\}, "
+ "[$s, \\{$x, $y, $z, $z\\}];",
+ []>;
+}
+
+//-----------------------------------
+// Texture Query Intrinsics
+//-----------------------------------
+
+let IsSurfTexQuery = 1 in {
+def TXQ_CHANNEL_ORDER
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.channel_order.b32 \t$d, [$a];",
+ []>;
+def TXQ_CHANNEL_DATA_TYPE
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.channel_data_type.b32 \t$d, [$a];",
+ []>;
+def TXQ_WIDTH
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.width.b32 \t$d, [$a];",
+ []>;
+def TXQ_HEIGHT
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.height.b32 \t$d, [$a];",
+ []>;
+def TXQ_DEPTH
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.depth.b32 \t$d, [$a];",
+ []>;
+def TXQ_ARRAY_SIZE
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.array_size.b32 \t$d, [$a];",
+ []>;
+def TXQ_NUM_SAMPLES
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.num_samples.b32 \t$d, [$a];",
+ []>;
+def TXQ_NUM_MIPMAP_LEVELS
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "txq.num_mipmap_levels.b32 \t$d, [$a];",
+ []>;
+}
+
+def : Pat<(int_nvvm_txq_channel_order Int64Regs:$a),
+ (TXQ_CHANNEL_ORDER Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_channel_data_type Int64Regs:$a),
+ (TXQ_CHANNEL_DATA_TYPE Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_width Int64Regs:$a),
+ (TXQ_WIDTH Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_height Int64Regs:$a),
+ (TXQ_HEIGHT Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_depth Int64Regs:$a),
+ (TXQ_DEPTH Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_array_size Int64Regs:$a),
+ (TXQ_ARRAY_SIZE Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_num_samples Int64Regs:$a),
+ (TXQ_NUM_SAMPLES Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_num_mipmap_levels Int64Regs:$a),
+ (TXQ_NUM_MIPMAP_LEVELS Int64Regs:$a)>;
+
+
+//-----------------------------------
+// Surface Query Intrinsics
+//-----------------------------------
+
+let IsSurfTexQuery = 1 in {
+def SUQ_CHANNEL_ORDER
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "suq.channel_order.b32 \t$d, [$a];",
+ []>;
+def SUQ_CHANNEL_DATA_TYPE
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "suq.channel_data_type.b32 \t$d, [$a];",
+ []>;
+def SUQ_WIDTH
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "suq.width.b32 \t$d, [$a];",
+ []>;
+def SUQ_HEIGHT
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "suq.height.b32 \t$d, [$a];",
+ []>;
+def SUQ_DEPTH
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "suq.depth.b32 \t$d, [$a];",
+ []>;
+def SUQ_ARRAY_SIZE
+ : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "suq.array_size.b32 \t$d, [$a];",
+ []>;
+}
+
+def : Pat<(int_nvvm_suq_channel_order Int64Regs:$a),
+ (SUQ_CHANNEL_ORDER Int64Regs:$a)>;
+def : Pat<(int_nvvm_suq_channel_data_type Int64Regs:$a),
+ (SUQ_CHANNEL_DATA_TYPE Int64Regs:$a)>;
+def : Pat<(int_nvvm_suq_width Int64Regs:$a),
+ (SUQ_WIDTH Int64Regs:$a)>;
+def : Pat<(int_nvvm_suq_height Int64Regs:$a),
+ (SUQ_HEIGHT Int64Regs:$a)>;
+def : Pat<(int_nvvm_suq_depth Int64Regs:$a),
+ (SUQ_DEPTH Int64Regs:$a)>;
+def : Pat<(int_nvvm_suq_array_size Int64Regs:$a),
+ (SUQ_ARRAY_SIZE Int64Regs:$a)>;
+
+
+//===- Handle Query -------------------------------------------------------===//
+
+// TODO: These intrinsics are not yet finalized, pending PTX ISA design work
+def ISTYPEP_SAMPLER
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "istypep.samplerref \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_istypep_sampler Int64Regs:$a))]>;
+def ISTYPEP_SURFACE
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "istypep.surfref \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_istypep_surface Int64Regs:$a))]>;
+def ISTYPEP_TEXTURE
+ : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
+ "istypep.texref \t$d, $a;",
+ [(set Int1Regs:$d, (int_nvvm_istypep_texture Int64Regs:$a))]>;
+
+//===- Surface Stores -----------------------------------------------------===//
+
+let IsSust = 1 in {
+// Unformatted
+// .clamp variant
+def SUST_B_1D_B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.1d.b8.clamp \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.1d.b16.clamp \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ "sust.b.1d.b32.clamp \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int64Regs:$r),
+ "sust.b.1d.b64.clamp \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_V2B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.b.1d.v2.b8.clamp \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.b.1d.v2.b16.clamp \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ "sust.b.1d.v2.b32.clamp \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ "sust.b.1d.v2.b64.clamp \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V4B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.b.1d.v4.b8.clamp \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_V4B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.b.1d.v4.b16.clamp \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_V4B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ "sust.b.1d.v4.b32.clamp \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_1D_ARRAY_B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.a1d.b8.clamp \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.a1d.b16.clamp \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r),
+ "sust.b.a1d.b32.clamp \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int64Regs:$r),
+ "sust.b.a1d.b64.clamp \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.a1d.v2.b8.clamp \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.a1d.v2.b16.clamp \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.b.a1d.v2.b32.clamp \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int64Regs:$r,
+ Int64Regs:$g),
+ "sust.b.a1d.v2.b64.clamp \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a1d.v4.b8.clamp \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a1d.v4.b16.clamp \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.a1d.v4.b32.clamp \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_2D_B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.b.2d.b8.clamp \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.b.2d.b16.clamp \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ "sust.b.2d.b32.clamp \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ "sust.b.2d.b64.clamp \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_V2B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.2d.v2.b8.clamp \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.2d.v2.b16.clamp \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.b.2d.v2.b32.clamp \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r,
+ Int64Regs:$g),
+ "sust.b.2d.v2.b64.clamp \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V4B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.2d.v4.b8.clamp \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_V4B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.2d.v4.b16.clamp \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_V4B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.2d.v4.b32.clamp \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_2D_ARRAY_B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.b.a2d.b8.clamp \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.b.a2d.b16.clamp \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r),
+ "sust.b.a2d.b32.clamp \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r),
+ "sust.b.a2d.b64.clamp \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.a2d.v2.b8.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.a2d.v2.b16.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.b.a2d.v2.b32.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r, Int64Regs:$g),
+ "sust.b.a2d.v2.b64.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a2d.v4.b8.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a2d.v4.b16.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.a2d.v4.b32.clamp \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_3D_B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.b.3d.b8.clamp \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.b.3d.b16.clamp \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ "sust.b.3d.b32.clamp \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r),
+ "sust.b.3d.b64.clamp \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_V2B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.3d.v2.b8.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.3d.v2.b16.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.b.3d.v2.b32.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B64_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g),
+ "sust.b.3d.v2.b64.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V4B8_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.3d.v4.b8.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_3D_V4B16_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.3d.v4.b16.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_3D_V4B32_CLAMP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.3d.v4.b32.clamp \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+// .trap variant
+def SUST_B_1D_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.1d.b8.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.1d.b16.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ "sust.b.1d.b32.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int64Regs:$r),
+ "sust.b.1d.b64.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.b.1d.v2.b8.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.b.1d.v2.b16.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ "sust.b.1d.v2.b32.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ "sust.b.1d.v2.b64.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.b.1d.v4.b8.trap \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.b.1d.v4.b16.trap \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ "sust.b.1d.v4.b32.trap \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_1D_ARRAY_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.a1d.b8.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.a1d.b16.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r),
+ "sust.b.a1d.b32.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int64Regs:$r),
+ "sust.b.a1d.b64.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.a1d.v2.b8.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.a1d.v2.b16.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.b.a1d.v2.b32.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int64Regs:$r,
+ Int64Regs:$g),
+ "sust.b.a1d.v2.b64.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a1d.v4.b8.trap \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a1d.v4.b16.trap \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.a1d.v4.b32.trap \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_2D_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.b.2d.b8.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.b.2d.b16.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ "sust.b.2d.b32.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ "sust.b.2d.b64.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.2d.v2.b8.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.2d.v2.b16.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.b.2d.v2.b32.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r,
+ Int64Regs:$g),
+ "sust.b.2d.v2.b64.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.2d.v4.b8.trap \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.2d.v4.b16.trap \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.2d.v4.b32.trap \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_2D_ARRAY_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.b.a2d.b8.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.b.a2d.b16.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r),
+ "sust.b.a2d.b32.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r),
+ "sust.b.a2d.b64.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.a2d.v2.b8.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.a2d.v2.b16.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.b.a2d.v2.b32.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r, Int64Regs:$g),
+ "sust.b.a2d.v2.b64.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a2d.v4.b8.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a2d.v4.b16.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.a2d.v4.b32.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_3D_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.b.3d.b8.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.b.3d.b16.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ "sust.b.3d.b32.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r),
+ "sust.b.3d.b64.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.3d.v2.b8.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.3d.v2.b16.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.b.3d.v2.b32.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B64_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g),
+ "sust.b.3d.v2.b64.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.3d.v4.b8.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_3D_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.3d.v4.b16.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_3D_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.3d.v4.b32.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+// .zero variant
+def SUST_B_1D_B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.1d.b8.zero \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.1d.b16.zero \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ "sust.b.1d.b32.zero \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int64Regs:$r),
+ "sust.b.1d.b64.zero \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_V2B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.b.1d.v2.b8.zero \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.b.1d.v2.b16.zero \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ "sust.b.1d.v2.b32.zero \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V2B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ "sust.b.1d.v2.b64.zero \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_V4B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.b.1d.v4.b8.zero \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_V4B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.b.1d.v4.b16.zero \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_V4B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ "sust.b.1d.v4.b32.zero \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_1D_ARRAY_B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.a1d.b8.zero \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.b.a1d.b16.zero \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r),
+ "sust.b.a1d.b32.zero \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int64Regs:$r),
+ "sust.b.a1d.b64.zero \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.a1d.v2.b8.zero \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.a1d.v2.b16.zero \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.b.a1d.v2.b32.zero \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V2B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int64Regs:$r,
+ Int64Regs:$g),
+ "sust.b.a1d.v2.b64.zero \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a1d.v4.b8.zero \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a1d.v4.b16.zero \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_1D_ARRAY_V4B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.a1d.v4.b32.zero \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_2D_B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.b.2d.b8.zero \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.b.2d.b16.zero \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ "sust.b.2d.b32.zero \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ "sust.b.2d.b64.zero \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_V2B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.2d.v2.b8.zero \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.b.2d.v2.b16.zero \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.b.2d.v2.b32.zero \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V2B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r,
+ Int64Regs:$g),
+ "sust.b.2d.v2.b64.zero \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_B_2D_V4B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.2d.v4.b8.zero \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_V4B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.2d.v4.b16.zero \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_V4B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.2d.v4.b32.zero \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_2D_ARRAY_B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.b.a2d.b8.zero \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.b.a2d.b16.zero \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r),
+ "sust.b.a2d.b32.zero \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r),
+ "sust.b.a2d.b64.zero \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.a2d.v2.b8.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.a2d.v2.b16.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.b.a2d.v2.b32.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V2B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r, Int64Regs:$g),
+ "sust.b.a2d.v2.b64.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a2d.v4.b8.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.a2d.v4.b16.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_2D_ARRAY_V4B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.a2d.v4.b32.zero \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_B_3D_B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.b.3d.b8.zero \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.b.3d.b16.zero \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ "sust.b.3d.b32.zero \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r),
+ "sust.b.3d.b64.zero \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_B_3D_V2B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.3d.v2.b8.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.b.3d.v2.b16.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.b.3d.v2.b32.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V2B64_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g),
+ "sust.b.3d.v2.b64.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_B_3D_V4B8_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.3d.v4.b8.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_3D_V4B16_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.b.3d.v4.b16.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_B_3D_V4B32_ZERO
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.b.3d.v4.b32.zero \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+
+// Formatted
+
+def SUST_P_1D_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.p.1d.b8.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_P_1D_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ "sust.p.1d.b16.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_P_1D_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ "sust.p.1d.b32.trap \t[$s, \\{$x\\}], \\{$r\\};",
+ []>;
+def SUST_P_1D_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.p.1d.v2.b8.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_1D_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ "sust.p.1d.v2.b16.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_1D_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ "sust.p.1d.v2.b32.trap \t[$s, \\{$x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_1D_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.p.1d.v4.b8.trap \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_1D_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g,
+ Int16Regs:$b, Int16Regs:$a),
+ "sust.p.1d.v4.b16.trap \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_1D_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g,
+ Int32Regs:$b, Int32Regs:$a),
+ "sust.p.1d.v4.b32.trap \t[$s, \\{$x\\}], \\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_P_1D_ARRAY_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.p.a1d.b8.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_P_1D_ARRAY_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r),
+ "sust.p.a1d.b16.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_P_1D_ARRAY_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r),
+ "sust.p.a1d.b32.trap \t[$s, \\{$idx, $x\\}], \\{$r\\};",
+ []>;
+def SUST_P_1D_ARRAY_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.p.a1d.v2.b8.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_1D_ARRAY_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.p.a1d.v2.b16.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_1D_ARRAY_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.p.a1d.v2.b32.trap \t[$s, \\{$idx, $x\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_1D_ARRAY_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.a1d.v4.b8.trap \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_1D_ARRAY_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.a1d.v4.b16.trap \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_1D_ARRAY_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.p.a1d.v4.b32.trap \t[$s, \\{$idx, $x\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_P_2D_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.p.2d.b8.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_P_2D_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ "sust.p.2d.b16.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_P_2D_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ "sust.p.2d.b32.trap \t[$s, \\{$x, $y\\}], \\{$r\\};",
+ []>;
+def SUST_P_2D_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.p.2d.v2.b8.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_2D_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g),
+ "sust.p.2d.v2.b16.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_2D_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ "sust.p.2d.v2.b32.trap \t[$s, \\{$x, $y\\}], \\{$r, $g\\};",
+ []>;
+def SUST_P_2D_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.2d.v4.b8.trap \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_2D_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r,
+ Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.2d.v4.b16.trap \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_2D_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.p.2d.v4.b32.trap \t[$s, \\{$x, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_P_2D_ARRAY_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.p.a2d.b8.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_P_2D_ARRAY_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r),
+ "sust.p.a2d.b16.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_P_2D_ARRAY_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r),
+ "sust.p.a2d.b32.trap \t[$s, \\{$idx, $x, $y, $y\\}], \\{$r\\};",
+ []>;
+def SUST_P_2D_ARRAY_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.p.a2d.v2.b8.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_P_2D_ARRAY_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.p.a2d.v2.b16.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_P_2D_ARRAY_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.p.a2d.v2.b32.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_P_2D_ARRAY_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.a2d.v4.b8.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_2D_ARRAY_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.a2d.v4.b16.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_2D_ARRAY_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$idx, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.p.a2d.v4.b32.trap \t[$s, \\{$idx, $x, $y, $y\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+
+
+def SUST_P_3D_B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.p.3d.b8.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_P_3D_B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ "sust.p.3d.b16.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_P_3D_B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ "sust.p.3d.b32.trap \t[$s, \\{$x, $y, $z, $z\\}], \\{$r\\};",
+ []>;
+def SUST_P_3D_V2B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.p.3d.v2.b8.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_P_3D_V2B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ "sust.p.3d.v2.b16.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_P_3D_V2B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ "sust.p.3d.v2.b32.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g\\};",
+ []>;
+def SUST_P_3D_V4B8_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.3d.v4.b8.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_3D_V4B16_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ "sust.p.3d.v4.b16.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+def SUST_P_3D_V4B32_TRAP
+ : NVPTXInst<(outs),
+ (ins Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ "sust.p.3d.v4.b32.trap \t[$s, \\{$x, $y, $z, $z\\}], "
+ "\\{$r, $g, $b, $a\\};",
+ []>;
+}
+
+// Surface store instruction patterns
+// I'm not sure why we can't just include these in the instruction definitions,
+// but TableGen complains of type errors :(
+
+// .clamp variant
+def : Pat<(int_nvvm_sust_b_1d_i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_B8_CLAMP Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_B16_CLAMP Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ (SUST_B_1D_B32_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i64_clamp
+ Int64Regs:$s, Int32Regs:$x, Int64Regs:$r),
+ (SUST_B_1D_B64_CLAMP Int64Regs:$s, Int32Regs:$x, Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_V2B8_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_V2B16_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_1D_V2B32_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i64_clamp
+ Int64Regs:$s, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_1D_V2B64_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i8_clamp
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_V4B8_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i16_clamp
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_V4B16_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i32_clamp
+ Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_1D_V4B32_CLAMP Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_1d_array_i8_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_ARRAY_B8_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i16_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_ARRAY_B16_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i32_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r),
+ (SUST_B_1D_ARRAY_B32_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i64_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int64Regs:$r),
+ (SUST_B_1D_ARRAY_B64_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i8_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_ARRAY_V2B8_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i16_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_ARRAY_V2B16_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i32_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_1D_ARRAY_V2B32_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i64_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_1D_ARRAY_V2B64_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i8_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_ARRAY_V4B8_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i16_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_ARRAY_V4B16_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i32_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_1D_ARRAY_V4B32_CLAMP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_2d_i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_B8_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_B16_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_B_2D_B32_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i64_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ (SUST_B_2D_B64_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_V2B8_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_V2B16_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_2D_V2B32_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i64_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_2D_V2B64_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_V4B8_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_V4B16_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_2D_V4B32_CLAMP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_2d_array_i8_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_ARRAY_B8_CLAMP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i16_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_ARRAY_B16_CLAMP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i32_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_B_2D_ARRAY_B32_CLAMP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i64_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ (SUST_B_2D_ARRAY_B64_CLAMP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i8_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_ARRAY_V2B8_CLAMP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i16_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_ARRAY_V2B16_CLAMP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i32_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ (SUST_B_2D_ARRAY_V2B32_CLAMP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i64_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r,
+ Int64Regs:$g),
+ (SUST_B_2D_ARRAY_V2B64_CLAMP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i8_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_ARRAY_V4B8_CLAMP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i16_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_ARRAY_V4B16_CLAMP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i32_clamp
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_2D_ARRAY_V4B32_CLAMP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_3d_i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_B_3D_B8_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_B_3D_B16_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ (SUST_B_3D_B32_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i64_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r),
+ (SUST_B_3D_B64_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_3D_V2B8_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_3D_V2B16_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_3D_V2B32_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i64_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_3D_V2B64_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i8_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_3D_V4B8_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i16_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_3D_V4B16_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i32_clamp
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_3D_V4B32_CLAMP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+// .trap variant
+def : Pat<(int_nvvm_sust_b_1d_i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_B8_TRAP Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_B16_TRAP Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ (SUST_B_1D_B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i64_trap
+ Int64Regs:$s, Int32Regs:$x, Int64Regs:$r),
+ (SUST_B_1D_B64_TRAP Int64Regs:$s, Int32Regs:$x, Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_V2B8_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_V2B16_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_1D_V2B32_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i64_trap
+ Int64Regs:$s, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_1D_V2B64_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i8_trap
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_V4B8_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i16_trap
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_V4B16_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i32_trap
+ Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_1D_V4B32_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_1d_array_i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_ARRAY_B8_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_ARRAY_B16_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r),
+ (SUST_B_1D_ARRAY_B32_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i64_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int64Regs:$r),
+ (SUST_B_1D_ARRAY_B64_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_ARRAY_V2B8_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_ARRAY_V2B16_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_1D_ARRAY_V2B32_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i64_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_1D_ARRAY_V2B64_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_ARRAY_V4B8_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_ARRAY_V4B16_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_1D_ARRAY_V4B32_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_2d_i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_B8_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_B16_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_B_2D_B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i64_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ (SUST_B_2D_B64_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_V2B8_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_V2B16_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_2D_V2B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i64_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_2D_V2B64_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_V4B8_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_V4B16_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_2D_V4B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_2d_array_i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_ARRAY_B8_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_ARRAY_B16_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_B_2D_ARRAY_B32_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i64_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ (SUST_B_2D_ARRAY_B64_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_ARRAY_V2B8_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_ARRAY_V2B16_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ (SUST_B_2D_ARRAY_V2B32_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i64_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r,
+ Int64Regs:$g),
+ (SUST_B_2D_ARRAY_V2B64_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_ARRAY_V4B8_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_ARRAY_V4B16_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_2D_ARRAY_V4B32_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_3d_i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_B_3D_B8_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_B_3D_B16_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ (SUST_B_3D_B32_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i64_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r),
+ (SUST_B_3D_B64_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_3D_V2B8_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_3D_V2B16_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_3D_V2B32_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i64_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_3D_V2B64_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_3D_V4B8_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_3D_V4B16_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_3D_V4B32_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+// .zero variant
+def : Pat<(int_nvvm_sust_b_1d_i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_B8_ZERO Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_B16_ZERO Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ (SUST_B_1D_B32_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_i64_zero
+ Int64Regs:$s, Int32Regs:$x, Int64Regs:$r),
+ (SUST_B_1D_B64_ZERO Int64Regs:$s, Int32Regs:$x, Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_V2B8_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_V2B16_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_1D_V2B32_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v2i64_zero
+ Int64Regs:$s, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_1D_V2B64_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i8_zero
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_V4B8_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i16_zero
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_V4B16_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_v4i32_zero
+ Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_1D_V4B32_ZERO Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_1d_array_i8_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_ARRAY_B8_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i16_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_B_1D_ARRAY_B16_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i32_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r),
+ (SUST_B_1D_ARRAY_B32_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_i64_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int64Regs:$r),
+ (SUST_B_1D_ARRAY_B64_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i8_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_ARRAY_V2B8_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i16_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_1D_ARRAY_V2B16_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i32_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_1D_ARRAY_V2B32_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v2i64_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_1D_ARRAY_V2B64_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i8_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_ARRAY_V4B8_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i16_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_1D_ARRAY_V4B16_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_1d_array_v4i32_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_1D_ARRAY_V4B32_ZERO Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_2d_i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_B8_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_B16_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_B_2D_B32_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_i64_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ (SUST_B_2D_B64_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_V2B8_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_V2B16_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_2D_V2B32_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v2i64_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_2D_V2B64_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_V4B8_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_V4B16_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_v4i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_2D_V4B32_ZERO Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_2d_array_i8_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_ARRAY_B8_ZERO Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i16_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_B_2D_ARRAY_B16_ZERO Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i32_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_B_2D_ARRAY_B32_ZERO Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_i64_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r),
+ (SUST_B_2D_ARRAY_B64_ZERO Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i8_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_ARRAY_V2B8_ZERO Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i16_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_2D_ARRAY_V2B16_ZERO Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i32_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ (SUST_B_2D_ARRAY_V2B32_ZERO Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v2i64_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int64Regs:$r,
+ Int64Regs:$g),
+ (SUST_B_2D_ARRAY_V2B64_ZERO Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i8_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_ARRAY_V4B8_ZERO Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i16_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_2D_ARRAY_V4B16_ZERO Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_2d_array_v4i32_zero
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_2D_ARRAY_V4B32_ZERO Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_b_3d_i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_B_3D_B8_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_B_3D_B16_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ (SUST_B_3D_B32_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_i64_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r),
+ (SUST_B_3D_B64_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_3D_V2B8_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_B_3D_V2B16_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ (SUST_B_3D_V2B32_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v2i64_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g),
+ (SUST_B_3D_V2B64_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int64Regs:$r, Int64Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i8_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_3D_V4B8_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i16_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_B_3D_V4B16_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_b_3d_v4i32_zero
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_B_3D_V4B32_ZERO Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+
+def : Pat<(int_nvvm_sust_p_1d_i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_P_1D_B8_TRAP Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_1d_i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r),
+ (SUST_P_1D_B16_TRAP Int64Regs:$s, Int32Regs:$x, Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_1d_i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r),
+ (SUST_P_1D_B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_1d_v2i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_1D_V2B8_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_1d_v2i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_1D_V2B16_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_1d_v2i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_P_1D_V2B32_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_1d_v4i8_trap
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_1D_V4B8_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_1d_v4i16_trap
+ Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_1D_V4B16_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_1d_v4i32_trap
+ Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_P_1D_V4B32_TRAP Int64Regs:$s, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_p_1d_array_i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_P_1D_ARRAY_B8_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r),
+ (SUST_P_1D_ARRAY_B16_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r),
+ (SUST_P_1D_ARRAY_B32_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_v2i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_1D_ARRAY_V2B8_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_v2i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_1D_ARRAY_V2B16_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_v2i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$r, Int32Regs:$g),
+ (SUST_P_1D_ARRAY_V2B32_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_v4i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_1D_ARRAY_V4B8_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_v4i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_1D_ARRAY_V4B16_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_1d_array_v4i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_P_1D_ARRAY_V4B32_TRAP Int64Regs:$s, Int32Regs:$l, Int32Regs:$x,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_p_2d_i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_P_2D_B8_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_2d_i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_P_2D_B16_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_2d_i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_P_2D_B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_2d_v2i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_2D_V2B8_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_2d_v2i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_2D_V2B16_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_2d_v2i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g),
+ (SUST_P_2D_V2B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_2d_v4i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_2D_V4B8_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_2d_v4i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_2D_V4B16_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_2d_v4i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_P_2D_V4B32_TRAP Int64Regs:$s, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_p_2d_array_i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_P_2D_ARRAY_B8_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int16Regs:$r),
+ (SUST_P_2D_ARRAY_B16_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r),
+ (SUST_P_2D_ARRAY_B32_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_v2i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_2D_ARRAY_V2B8_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_v2i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_2D_ARRAY_V2B16_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_v2i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y, Int32Regs:$r,
+ Int32Regs:$g),
+ (SUST_P_2D_ARRAY_V2B32_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_v4i8_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_2D_ARRAY_V4B8_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_v4i16_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_2D_ARRAY_V4B16_TRAP Int64Regs:$s,
+ Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_2d_array_v4i32_trap
+ Int64Regs:$s, Int32Regs:$l, Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_P_2D_ARRAY_V4B32_TRAP Int64Regs:$s, Int32Regs:$l,
+ Int32Regs:$x, Int32Regs:$y,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
+
+def : Pat<(int_nvvm_sust_p_3d_i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_P_3D_B8_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_3d_i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r),
+ (SUST_P_3D_B16_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_3d_i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r),
+ (SUST_P_3D_B32_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r)>;
+
+def : Pat<(int_nvvm_sust_p_3d_v2i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_3D_V2B8_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_3d_v2i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g),
+ (SUST_P_3D_V2B16_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_3d_v2i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g),
+ (SUST_P_3D_V2B32_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g)>;
+
+def : Pat<(int_nvvm_sust_p_3d_v4i8_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_3D_V4B8_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_3d_v4i16_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a),
+ (SUST_P_3D_V4B16_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int16Regs:$r, Int16Regs:$g, Int16Regs:$b, Int16Regs:$a)>;
+
+def : Pat<(int_nvvm_sust_p_3d_v4i32_trap
+ Int64Regs:$s, Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a),
+ (SUST_P_3D_V4B32_TRAP Int64Regs:$s,
+ Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
+ Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+
+
//===-- Old PTX Back-end Intrinsics ---------------------------------------===//
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
index 7c257b4..f0c3663 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
@@ -16,12 +16,12 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/InstIterator.h"
using namespace llvm;
@@ -104,7 +104,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
- DataLayout *TD = &getAnalysis<DataLayout>();
+ const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
LLVMContext &Context = F.getParent()->getContext();
//
@@ -120,10 +120,10 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
if (load->hasOneUse() == false)
continue;
- if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize)
+ if (DL->getTypeStoreSize(load->getType()) < MaxAggrCopySize)
continue;
- User *use = *(load->use_begin());
+ User *use = load->user_back();
if (StoreInst *store = dyn_cast<StoreInst>(use)) {
if (store->getOperand(0) != load) //getValueOperand
continue;
@@ -163,10 +163,10 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
//
for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
LoadInst *load = aggrLoads[i];
- StoreInst *store = dyn_cast<StoreInst>(*load->use_begin());
+ StoreInst *store = dyn_cast<StoreInst>(*load->user_begin());
Value *srcAddr = load->getOperand(0);
Value *dstAddr = store->getOperand(1);
- unsigned numLoads = TD->getTypeStoreSize(load->getType());
+ unsigned numLoads = DL->getTypeStoreSize(load->getType());
Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
index 286e753..5ec1fc9 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
@@ -27,16 +27,17 @@ struct NVPTXLowerAggrCopies : public FunctionPass {
NVPTXLowerAggrCopies() : FunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DataLayout>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DataLayoutPass>();
+ AU.addPreserved("stack-protector");
AU.addPreserved<MachineFunctionAnalysis>();
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
static const unsigned MaxAggrCopySize = 128;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Lower aggregate copies/intrinsics into loops";
}
};
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp
index ca24764..137248b 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp
@@ -7,13 +7,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "nvptx-mcexpr"
#include "NVPTXMCExpr.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
using namespace llvm;
+#define DEBUG_TYPE "nvptx-mcexpr"
+
const NVPTXFloatMCExpr*
NVPTXFloatMCExpr::Create(VariantKind Kind, APFloat Flt, MCContext &Ctx) {
return new (Ctx) NVPTXFloatMCExpr(Kind, Flt);
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.h b/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.h
index 0efb231..5547649 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXMCExpr.h
@@ -61,18 +61,18 @@ public:
/// @}
- void PrintImpl(raw_ostream &OS) const;
+ void PrintImpl(raw_ostream &OS) const override;
bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const {
+ const MCAsmLayout *Layout) const override {
return false;
}
- void AddValueSymbols(MCAssembler *) const {};
- const MCSection *FindAssociatedSection() const {
- return NULL;
+ void visitUsedExpr(MCStreamer &Streamer) const override {};
+ const MCSection *FindAssociatedSection() const override {
+ return nullptr;
}
// There are no TLS NVPTXMCExprs at the moment.
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {}
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h b/contrib/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
new file mode 100644
index 0000000..67fb390
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
@@ -0,0 +1,46 @@
+//===-- NVPTXMachineFunctionInfo.h - NVPTX-specific Function Info --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class is attached to a MachineFunction instance and tracks target-
+// dependent information
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+class NVPTXMachineFunctionInfo : public MachineFunctionInfo {
+private:
+ /// Stores a mapping from index to symbol name for removing image handles
+ /// on Fermi.
+ SmallVector<std::string, 8> ImageHandleList;
+
+public:
+ NVPTXMachineFunctionInfo(MachineFunction &MF) {}
+
+ /// Returns the index for the symbol \p Symbol. If the symbol was previously,
+ /// added, the same index is returned. Otherwise, the symbol is added and the
+ /// new index is returned.
+ unsigned getImageHandleSymbolIndex(const char *Symbol) {
+ // Is the symbol already present?
+ for (unsigned i = 0, e = ImageHandleList.size(); i != e; ++i)
+ if (ImageHandleList[i] == std::string(Symbol))
+ return i;
+ // Nope, insert it
+ ImageHandleList.push_back(Symbol);
+ return ImageHandleList.size()-1;
+ }
+
+ /// Returns the symbol name at the given index.
+ const char *getImageHandleSymbol(unsigned Idx) const {
+ assert(ImageHandleList.size() > Idx && "Bad index");
+ return ImageHandleList[Idx].c_str();
+ }
+};
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
index 843ebed..348ab0c 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
@@ -14,24 +14,26 @@
//===----------------------------------------------------------------------===//
#include "NVPTX.h"
-#include "llvm/Pass.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "nvptx-prolog-epilog"
+
namespace {
class NVPTXPrologEpilogPass : public MachineFunctionPass {
public:
static char ID;
NVPTXPrologEpilogPass() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
void calculateFrameObjectOffsets(MachineFunction &Fn);
@@ -58,7 +60,7 @@ bool NVPTXPrologEpilogPass::runOnMachineFunction(MachineFunction &MF) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (!MI->getOperand(i).isFI())
continue;
- TRI.eliminateFrameIndex(MI, 0, i, NULL);
+ TRI.eliminateFrameIndex(MI, 0, i, nullptr);
Modified = true;
}
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
index 4d3a1d9..358ccce 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "nvptx-reg-info"
-
#include "NVPTXRegisterInfo.h"
#include "NVPTX.h"
#include "NVPTXSubtarget.h"
@@ -25,6 +23,8 @@
using namespace llvm;
+#define DEBUG_TYPE "nvptx-reg-info"
+
namespace llvm {
std::string getNVPTXRegClassName(TargetRegisterClass const *RC) {
if (RC == &NVPTX::Float32RegsRegClass) {
@@ -53,9 +53,9 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) {
return "%f";
}
if (RC == &NVPTX::Float64RegsRegClass) {
- return "%fl";
+ return "%fd";
} else if (RC == &NVPTX::Int64RegsRegClass) {
- return "%rl";
+ return "%rd";
} else if (RC == &NVPTX::Int32RegsRegClass) {
return "%r";
} else if (RC == &NVPTX::Int16RegsRegClass) {
@@ -78,19 +78,12 @@ NVPTXRegisterInfo::NVPTXRegisterInfo(const NVPTXSubtarget &st)
#include "NVPTXGenRegisterInfo.inc"
/// NVPTX Callee Saved Registers
-const uint16_t *
+const MCPhysReg *
NVPTXRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const uint16_t CalleeSavedRegs[] = { 0 };
+ static const MCPhysReg CalleeSavedRegs[] = { 0 };
return CalleeSavedRegs;
}
-// NVPTX Callee Saved Reg Classes
-const TargetRegisterClass *const *
-NVPTXRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass *const CalleeSavedRegClasses[] = { 0 };
- return CalleeSavedRegClasses;
-}
-
BitVector NVPTXRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
return Reserved;
@@ -113,12 +106,6 @@ void NVPTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
-int NVPTXRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
- return 0;
-}
-
unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return NVPTX::VRFrame;
}
-
-unsigned NVPTXRegisterInfo::getRARegister() const { return 0; }
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h
index 0a20f29..a7594be 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.h
@@ -16,11 +16,10 @@
#include "ManagedStringPool.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include <sstream>
#define GET_REGINFO_HEADER
#include "NVPTXGenRegisterInfo.inc"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include <sstream>
namespace llvm {
@@ -42,22 +41,16 @@ public:
//------------------------------------------------------
// NVPTX callee saved registers
- virtual const uint16_t *
- getCalleeSavedRegs(const MachineFunction *MF = 0) const;
-
- // NVPTX callee saved register classes
- virtual const TargetRegisterClass *const *
- getCalleeSavedRegClasses(const MachineFunction *MF) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const override;
- virtual BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
- virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
- unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
- virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const;
- virtual unsigned getFrameRegister(const MachineFunction &MF) const;
- virtual unsigned getRARegister() const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
ManagedStringPool *getStrPool() const {
return const_cast<ManagedStringPool *>(&ManagedStrPool);
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index 7a38a66..efcee6b 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -35,9 +35,9 @@ foreach i = 0-4 in {
def P#i : NVPTXReg<"%p"#i>; // Predicate
def RS#i : NVPTXReg<"%rs"#i>; // 16-bit
def R#i : NVPTXReg<"%r"#i>; // 32-bit
- def RL#i : NVPTXReg<"%rl"#i>; // 64-bit
+ def RL#i : NVPTXReg<"%rd"#i>; // 64-bit
def F#i : NVPTXReg<"%f"#i>; // 32-bit float
- def FL#i : NVPTXReg<"%fl"#i>; // 64-bit float
+ def FL#i : NVPTXReg<"%fd"#i>; // 64-bit float
// Arguments
def ia#i : NVPTXReg<"%ia"#i>;
@@ -46,6 +46,10 @@ foreach i = 0-4 in {
def da#i : NVPTXReg<"%da"#i>;
}
+foreach i = 0-31 in {
+ def ENVREG#i : NVPTXReg<"%envreg"#i>;
+}
+
//===----------------------------------------------------------------------===//
// Register classes
//===----------------------------------------------------------------------===//
@@ -61,4 +65,5 @@ def Float32ArgRegs : NVPTXRegClass<[f32], 32, (add (sequence "fa%u", 0, 4))>;
def Float64ArgRegs : NVPTXRegClass<[f64], 64, (add (sequence "da%u", 0, 4))>;
// Read NVPTXRegisterInfo.cpp to see how VRFrame and VRDepot are used.
-def SpecialRegs : NVPTXRegClass<[i32], 32, (add VRFrame, VRDepot)>;
+def SpecialRegs : NVPTXRegClass<[i32], 32, (add VRFrame, VRDepot,
+ (sequence "ENVREG%u", 0, 31))>;
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
new file mode 100644
index 0000000..20d4e27
--- /dev/null
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
@@ -0,0 +1,189 @@
+//===-- NVPTXReplaceImageHandles.cpp - Replace image handles for Fermi ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// On Fermi, image handles are not supported. To work around this, we traverse
+// the machine code and replace image handles with concrete symbols. For this
+// to work reliably, inlining of all function call must be performed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "NVPTXMachineFunctionInfo.h"
+#include "NVPTXSubtarget.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace llvm;
+
+namespace {
+class NVPTXReplaceImageHandles : public MachineFunctionPass {
+private:
+ static char ID;
+ DenseSet<MachineInstr *> InstrsToRemove;
+
+public:
+ NVPTXReplaceImageHandles();
+
+ bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual const char *getPassName() const {
+ return "NVPTX Replace Image Handles";
+ }
+private:
+ bool processInstr(MachineInstr &MI);
+ void replaceImageHandle(MachineOperand &Op, MachineFunction &MF);
+ bool findIndexForHandle(MachineOperand &Op, MachineFunction &MF,
+ unsigned &Idx);
+};
+}
+
+char NVPTXReplaceImageHandles::ID = 0;
+
+NVPTXReplaceImageHandles::NVPTXReplaceImageHandles()
+ : MachineFunctionPass(ID) {}
+
+bool NVPTXReplaceImageHandles::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ InstrsToRemove.clear();
+
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
+ ++BI) {
+ for (MachineBasicBlock::iterator I = (*BI).begin(), E = (*BI).end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+ Changed |= processInstr(MI);
+ }
+ }
+
+ // Now clean up any handle-access instructions
+ // This is needed in debug mode when code cleanup passes are not executed,
+ // but we need the handle access to be eliminated because they are not
+ // valid instructions when image handles are disabled.
+ for (DenseSet<MachineInstr *>::iterator I = InstrsToRemove.begin(),
+ E = InstrsToRemove.end(); I != E; ++I) {
+ (*I)->eraseFromParent();
+ }
+ return Changed;
+}
+
+bool NVPTXReplaceImageHandles::processInstr(MachineInstr &MI) {
+ MachineFunction &MF = *MI.getParent()->getParent();
+ const MCInstrDesc &MCID = MI.getDesc();
+
+ if (MCID.TSFlags & NVPTXII::IsTexFlag) {
+ // This is a texture fetch, so operand 4 is a texref and operand 5 is
+ // a samplerref
+ MachineOperand &TexHandle = MI.getOperand(4);
+ replaceImageHandle(TexHandle, MF);
+
+ if (!(MCID.TSFlags & NVPTXII::IsTexModeUnifiedFlag)) {
+ MachineOperand &SampHandle = MI.getOperand(5);
+ replaceImageHandle(SampHandle, MF);
+ }
+
+ return true;
+ } else if (MCID.TSFlags & NVPTXII::IsSuldMask) {
+ unsigned VecSize =
+ 1 << (((MCID.TSFlags & NVPTXII::IsSuldMask) >> NVPTXII::IsSuldShift) - 1);
+
+ // For a surface load of vector size N, the Nth operand will be the surfref
+ MachineOperand &SurfHandle = MI.getOperand(VecSize);
+
+ replaceImageHandle(SurfHandle, MF);
+
+ return true;
+ } else if (MCID.TSFlags & NVPTXII::IsSustFlag) {
+ // This is a surface store, so operand 0 is a surfref
+ MachineOperand &SurfHandle = MI.getOperand(0);
+
+ replaceImageHandle(SurfHandle, MF);
+
+ return true;
+ } else if (MCID.TSFlags & NVPTXII::IsSurfTexQueryFlag) {
+ // This is a query, so operand 1 is a surfref/texref
+ MachineOperand &Handle = MI.getOperand(1);
+
+ replaceImageHandle(Handle, MF);
+
+ return true;
+ }
+
+ return false;
+}
+
+void NVPTXReplaceImageHandles::
+replaceImageHandle(MachineOperand &Op, MachineFunction &MF) {
+ unsigned Idx;
+ if (findIndexForHandle(Op, MF, Idx)) {
+ Op.ChangeToImmediate(Idx);
+ }
+}
+
+bool NVPTXReplaceImageHandles::
+findIndexForHandle(MachineOperand &Op, MachineFunction &MF, unsigned &Idx) {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ NVPTXMachineFunctionInfo *MFI = MF.getInfo<NVPTXMachineFunctionInfo>();
+
+ assert(Op.isReg() && "Handle is not in a reg?");
+
+ // Which instruction defines the handle?
+ MachineInstr &TexHandleDef = *MRI.getVRegDef(Op.getReg());
+
+ switch (TexHandleDef.getOpcode()) {
+ case NVPTX::LD_i64_avar: {
+ // The handle is a parameter value being loaded, replace with the
+ // parameter symbol
+ const NVPTXSubtarget &ST = MF.getTarget().getSubtarget<NVPTXSubtarget>();
+ if (ST.getDrvInterface() == NVPTX::CUDA) {
+ // For CUDA, we preserve the param loads coming from function arguments
+ return false;
+ }
+
+ assert(TexHandleDef.getOperand(6).isSymbol() && "Load is not a symbol!");
+ StringRef Sym = TexHandleDef.getOperand(6).getSymbolName();
+ std::string ParamBaseName = MF.getName();
+ ParamBaseName += "_param_";
+ assert(Sym.startswith(ParamBaseName) && "Invalid symbol reference");
+ unsigned Param = atoi(Sym.data()+ParamBaseName.size());
+ std::string NewSym;
+ raw_string_ostream NewSymStr(NewSym);
+ NewSymStr << MF.getFunction()->getName() << "_param_" << Param;
+
+ InstrsToRemove.insert(&TexHandleDef);
+ Idx = MFI->getImageHandleSymbolIndex(NewSymStr.str().c_str());
+ return true;
+ }
+ case NVPTX::texsurf_handles: {
+ // The handle is a global variable, replace with the global variable name
+ assert(TexHandleDef.getOperand(1).isGlobal() && "Load is not a global!");
+ const GlobalValue *GV = TexHandleDef.getOperand(1).getGlobal();
+ assert(GV->hasName() && "Global sampler must be named!");
+ InstrsToRemove.insert(&TexHandleDef);
+ Idx = MFI->getImageHandleSymbolIndex(GV->getName().data());
+ return true;
+ }
+ case NVPTX::nvvm_move_i64:
+ case TargetOpcode::COPY: {
+ bool Res = findIndexForHandle(TexHandleDef.getOperand(1), MF, Idx);
+ if (Res) {
+ InstrsToRemove.insert(&TexHandleDef);
+ }
+ return Res;
+ }
+ default:
+ llvm_unreachable("Unknown instruction operating on handle");
+ }
+}
+
+MachineFunctionPass *llvm::createNVPTXReplaceImageHandlesPass() {
+ return new NVPTXReplaceImageHandles();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h
index f8a692e..aa0436b 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSection.h
@@ -31,16 +31,16 @@ public:
/// Override this as NVPTX has its own way of printing switching
/// to a section.
- virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
- raw_ostream &OS,
- const MCExpr *Subsection) const {}
+ void PrintSwitchToSection(const MCAsmInfo &MAI,
+ raw_ostream &OS,
+ const MCExpr *Subsection) const override {}
/// Base address of PTX sections is zero.
- virtual bool isBaseAddressKnownZero() const { return true; }
- virtual bool UseCodeAlign() const { return false; }
- virtual bool isVirtualSection() const { return false; }
- virtual std::string getLabelBeginName() const { return ""; }
- virtual std::string getLabelEndName() const { return ""; }
+ bool isBaseAddressKnownZero() const override { return true; }
+ bool UseCodeAlign() const override { return false; }
+ bool isVirtualSection() const override { return false; }
+ std::string getLabelBeginName() const override { return ""; }
+ std::string getLabelEndName() const override { return ""; }
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
deleted file mode 100644
index b64c308..0000000
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-//===- NVPTXSplitBBatBar.cpp - Split BB at Barrier --*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Split basic blocks so that a basic block that contains a barrier instruction
-// only contains the barrier instruction.
-//
-//===----------------------------------------------------------------------===//
-
-#include "NVPTXSplitBBatBar.h"
-#include "NVPTXUtilities.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/InstIterator.h"
-
-using namespace llvm;
-
-namespace llvm { FunctionPass *createSplitBBatBarPass(); }
-
-char NVPTXSplitBBatBar::ID = 0;
-
-bool NVPTXSplitBBatBar::runOnFunction(Function &F) {
-
- SmallVector<Instruction *, 4> SplitPoints;
- bool changed = false;
-
- // Collect all the split points in SplitPoints
- for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
- BasicBlock::iterator IB = BI->begin();
- BasicBlock::iterator II = IB;
- BasicBlock::iterator IE = BI->end();
-
- // Skit the first instruction. No splitting is needed at this
- // point even if this is a bar.
- while (II != IE) {
- if (IntrinsicInst *inst = dyn_cast<IntrinsicInst>(II)) {
- Intrinsic::ID id = inst->getIntrinsicID();
- // If this is a barrier, split at this instruction
- // and the next instruction.
- if (llvm::isBarrierIntrinsic(id)) {
- if (II != IB)
- SplitPoints.push_back(II);
- II++;
- if ((II != IE) && (!II->isTerminator())) {
- SplitPoints.push_back(II);
- II++;
- }
- continue;
- }
- }
- II++;
- }
- }
-
- for (unsigned i = 0; i != SplitPoints.size(); i++) {
- changed = true;
- Instruction *inst = SplitPoints[i];
- inst->getParent()->splitBasicBlock(inst, "bar_split");
- }
-
- return changed;
-}
-
-// This interface will most likely not be necessary, because this pass will
-// not be invoked by the driver, but will be used as a prerequisite to
-// another pass.
-FunctionPass *llvm::createSplitBBatBarPass() { return new NVPTXSplitBBatBar(); }
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h
deleted file mode 100644
index bdafba9..0000000
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//===-- llvm/lib/Target/NVPTX/NVPTXSplitBBatBar.h ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the NVIDIA specific declarations
-// for splitting basic blocks at barrier instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef NVPTX_SPLIT_BB_AT_BAR_H
-#define NVPTX_SPLIT_BB_AT_BAR_H
-
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/Pass.h"
-
-namespace llvm {
-
-// actual analysis class, which is a functionpass
-struct NVPTXSplitBBatBar : public FunctionPass {
- static char ID;
-
- NVPTXSplitBBatBar() : FunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<MachineFunctionAnalysis>();
- }
- virtual bool runOnFunction(Function &F);
-
- virtual const char *getPassName() const {
- return "Split basic blocks at barrier";
- }
-};
-
-extern FunctionPass *createSplitBBatBarPass();
-}
-
-#endif //NVPTX_SPLIT_BB_AT_BAR_H
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 9771a17..d5cded2 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -12,21 +12,54 @@
//===----------------------------------------------------------------------===//
#include "NVPTXSubtarget.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "nvptx-subtarget"
+
#define GET_SUBTARGETINFO_ENUM
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "NVPTXGenSubtargetInfo.inc"
-using namespace llvm;
-
-
// Pin the vtable to this file.
void NVPTXSubtarget::anchor() {}
+static std::string computeDataLayout(bool is64Bit) {
+ std::string Ret = "e";
+
+ if (!is64Bit)
+ Ret += "-p:32:32";
+
+ Ret += "-i64:64-v16:16-v32:32-n16:32:64";
+
+ return Ret;
+}
+
+NVPTXSubtarget &NVPTXSubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
+ // Provide the default CPU if we don't have one.
+ if (CPU.empty() && FS.size())
+ llvm_unreachable("we are not using FeatureStr");
+ TargetName = CPU.empty() ? "sm_20" : CPU;
+
+ ParseSubtargetFeatures(TargetName, FS);
+
+ // Set default to PTX 3.2 (CUDA 5.5)
+ if (PTXVersion == 0) {
+ PTXVersion = 32;
+ }
+
+ return *this;
+}
+
NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit)
+ const std::string &FS, const TargetMachine &TM,
+ bool is64Bit)
: NVPTXGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit), PTXVersion(0),
- SmVersion(20) {
+ SmVersion(20), DL(computeDataLayout(is64Bit)),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)),
+ TLInfo((NVPTXTargetMachine &)TM), TSInfo(&DL), FrameLowering(*this) {
Triple T(TT);
@@ -34,26 +67,4 @@ NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
drvInterface = NVPTX::NVCL;
else
drvInterface = NVPTX::CUDA;
-
- // Provide the default CPU if none
- std::string defCPU = "sm_20";
-
- ParseSubtargetFeatures((CPU.empty() ? defCPU : CPU), FS);
-
- // Get the TargetName from the FS if available
- if (FS.empty() && CPU.empty())
- TargetName = defCPU;
- else if (!CPU.empty())
- TargetName = CPU;
- else
- llvm_unreachable("we are not using FeatureStr");
-
- // We default to PTX 3.1, but we cannot just default to it in the initializer
- // since the attribute parser checks if the given option is >= the default.
- // So if we set ptx31 as the default, the ptx30 attribute would never match.
- // Instead, we use 0 as the default and manually set 31 if the default is
- // used.
- if (PTXVersion == 0) {
- PTXVersion = 31;
- }
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
index 004be11..4c41e4e 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -15,13 +15,18 @@
#define NVPTXSUBTARGET_H
#include "NVPTX.h"
+#include "NVPTXFrameLowering.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXInstrInfo.h"
+#include "NVPTXRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "NVPTXGenSubtargetInfo.inc"
-#include <string>
-
namespace llvm {
class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
@@ -36,12 +41,30 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
// SM version x.y is represented as 10*x+y, e.g. 3.1 == 31
unsigned int SmVersion;
+ const DataLayout DL; // Calculates type size & alignment
+ NVPTXInstrInfo InstrInfo;
+ NVPTXTargetLowering TLInfo;
+ TargetSelectionDAGInfo TSInfo;
+
+ // NVPTX does not have any call stack frame, but need a NVPTX specific
+ // FrameLowering class because TargetFrameLowering is abstract.
+ NVPTXFrameLowering FrameLowering;
+
public:
/// This constructor initializes the data members to match that
/// of the specified module.
///
NVPTXSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit);
+ const std::string &FS, const TargetMachine &TM, bool is64Bit);
+
+ const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const NVPTXRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const NVPTXTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
bool hasBrkPt() const { return SmVersion >= 11; }
bool hasAtomRedG32() const { return SmVersion >= 11; }
@@ -58,13 +81,24 @@ public:
bool hasFMAF32() const { return SmVersion >= 20; }
bool hasFMAF64() const { return SmVersion >= 13; }
bool hasLDG() const { return SmVersion >= 32; }
- bool hasLDU() const { return SmVersion >= 20; }
+ bool hasLDU() const { return ((SmVersion >= 20) && (SmVersion < 30)); }
bool hasGenericLdSt() const { return SmVersion >= 20; }
- inline bool hasHWROT32() const { return false; }
- inline bool hasSWROT32() const { return true; }
+ inline bool hasHWROT32() const { return SmVersion >= 32; }
+ inline bool hasSWROT32() const {
+ return ((SmVersion >= 20) && (SmVersion < 32));
+ }
inline bool hasROT32() const { return hasHWROT32() || hasSWROT32(); }
inline bool hasROT64() const { return SmVersion >= 20; }
+ bool hasImageHandles() const {
+ // Enable handles for Kepler+, where CUDA supports indirect surfaces and
+ // textures
+ if (getDrvInterface() == NVPTX::CUDA)
+ return (SmVersion >= 30);
+
+ // Disabled, otherwise
+ return false;
+ }
bool is64Bit() const { return Is64Bit; }
unsigned int getSmVersion() const { return SmVersion; }
@@ -73,22 +107,8 @@ public:
unsigned getPTXVersion() const { return PTXVersion; }
+ NVPTXSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
- std::string getDataLayout() const {
- const char *p;
- if (is64Bit())
- p = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
- "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
- "n16:32:64";
- else
- p = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
- "f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-"
- "n16:32:64";
-
- return std::string(p);
- }
-
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 46edd6d..069a1b9 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -16,16 +16,14 @@
#include "NVPTX.h"
#include "NVPTXAllocaHoisting.h"
#include "NVPTXLowerAggrCopies.h"
-#include "NVPTXSplitBBatBar.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCStreamer.h"
@@ -50,6 +48,8 @@ using namespace llvm;
namespace llvm {
void initializeNVVMReflectPass(PassRegistry&);
void initializeGenericToNVVMPass(PassRegistry&);
+void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
+void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
}
extern "C" void LLVMInitializeNVPTXTarget() {
@@ -61,17 +61,18 @@ extern "C" void LLVMInitializeNVPTXTarget() {
// but it's very NVPTX-specific.
initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
initializeGenericToNVVMPass(*PassRegistry::getPassRegistry());
+ initializeNVPTXAssignValidGlobalNamesPass(*PassRegistry::getPassRegistry());
+ initializeNVPTXFavorNonGenericAddrSpacesPass(
+ *PassRegistry::getPassRegistry());
}
-NVPTXTargetMachine::NVPTXTargetMachine(
- const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL, bool is64bit)
+NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool is64bit)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, is64bit), DL(Subtarget.getDataLayout()),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(
- *this, is64bit) /*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {
+ Subtarget(TT, CPU, FS, *this, is64bit) {
initAsmInfo();
}
@@ -101,14 +102,15 @@ public:
return getTM<NVPTXTargetMachine>();
}
- virtual void addIRPasses();
- virtual bool addInstSelector();
- virtual bool addPreRegAlloc();
- virtual bool addPostRegAlloc();
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ bool addPreRegAlloc() override;
+ bool addPostRegAlloc() override;
+ void addMachineSSAOptimization() override;
- virtual FunctionPass *createTargetRegisterAllocator(bool) LLVM_OVERRIDE;
- virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
- virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+ FunctionPass *createTargetRegisterAllocator(bool) override;
+ void addFastRegAlloc(FunctionPass *RegAllocPass) override;
+ void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
};
} // end anonymous namespace
@@ -128,15 +130,42 @@ void NVPTXPassConfig::addIRPasses() {
disablePass(&BranchFolderPassID);
disablePass(&TailDuplicateID);
+ addPass(createNVPTXImageOptimizerPass());
TargetPassConfig::addIRPasses();
+ addPass(createNVPTXAssignValidGlobalNamesPass());
addPass(createGenericToNVVMPass());
+ addPass(createNVPTXFavorNonGenericAddrSpacesPass());
+ addPass(createSeparateConstOffsetFromGEPPass());
+ // The SeparateConstOffsetFromGEP pass creates variadic bases that can be used
+ // by multiple GEPs. Run GVN or EarlyCSE to really reuse them. GVN generates
+ // significantly better code than EarlyCSE for some of our benchmarks.
+ if (getOptLevel() == CodeGenOpt::Aggressive)
+ addPass(createGVNPass());
+ else
+ addPass(createEarlyCSEPass());
+ // Both FavorNonGenericAddrSpaces and SeparateConstOffsetFromGEP may leave
+ // some dead code. We could remove dead code in an ad-hoc manner, but that
+ // requires manual work and might be error-prone.
+ //
+ // The FavorNonGenericAddrSpaces pass shortcuts unnecessary addrspacecasts,
+ // and leave them unused.
+ //
+ // SeparateConstOffsetFromGEP rebuilds a new index from the old index, and the
+ // old index and some of its intermediate results may become unused.
+ addPass(createDeadCodeEliminationPass());
}
bool NVPTXPassConfig::addInstSelector() {
+ const NVPTXSubtarget &ST =
+ getTM<NVPTXTargetMachine>().getSubtarget<NVPTXSubtarget>();
+
addPass(createLowerAggrCopies());
- addPass(createSplitBBatBarPass());
addPass(createAllocaHoisting());
addPass(createNVPTXISelDag(getNVPTXTargetMachine(), getOptLevel()));
+
+ if (!ST.hasImageHandles())
+ addPass(createNVPTXReplaceImageHandlesPass());
+
return false;
}
@@ -147,7 +176,7 @@ bool NVPTXPassConfig::addPostRegAlloc() {
}
FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) {
- return 0; // No reg alloc
+ return nullptr; // No reg alloc
}
void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
@@ -179,3 +208,43 @@ void NVPTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
printAndVerify("After StackSlotColoring");
}
+
+void NVPTXPassConfig::addMachineSSAOptimization() {
+ // Pre-ra tail duplication.
+ if (addPass(&EarlyTailDuplicateID))
+ printAndVerify("After Pre-RegAlloc TailDuplicate");
+
+ // Optimize PHIs before DCE: removing dead PHI cycles may make more
+ // instructions dead.
+ addPass(&OptimizePHIsID);
+
+ // This pass merges large allocas. StackSlotColoring is a different pass
+ // which merges spill slots.
+ addPass(&StackColoringID);
+
+ // If the target requests it, assign local variables to stack slots relative
+ // to one another and simplify frame index references where possible.
+ addPass(&LocalStackSlotAllocationID);
+
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ addPass(&DeadMachineInstructionElimID);
+ printAndVerify("After codegen DCE pass");
+
+ // Allow targets to insert passes that improve instruction level parallelism,
+ // like if-conversion. Such passes will typically need dominator trees and
+ // loop info, just like LICM and CSE below.
+ if (addILPOpts())
+ printAndVerify("After ILP optimizations");
+
+ addPass(&MachineLICMID);
+ addPass(&MachineCSEID);
+
+ addPass(&MachineSinkingID);
+ printAndVerify("After Machine LICM, CSE and Sinking passes");
+
+ addPass(&PeepholeOptimizerID);
+ printAndVerify("After codegen peephole optimization pass");
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
index 5fbcf73..a7a1c8f 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -14,13 +14,8 @@
#ifndef NVPTX_TARGETMACHINE_H
#define NVPTX_TARGETMACHINE_H
-#include "ManagedStringPool.h"
-#include "NVPTXFrameLowering.h"
-#include "NVPTXISelLowering.h"
-#include "NVPTXInstrInfo.h"
-#include "NVPTXRegisterInfo.h"
#include "NVPTXSubtarget.h"
-#include "llvm/IR/DataLayout.h"
+#include "ManagedStringPool.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
@@ -31,65 +26,52 @@ namespace llvm {
///
class NVPTXTargetMachine : public LLVMTargetMachine {
NVPTXSubtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- NVPTXInstrInfo InstrInfo;
- NVPTXTargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
-
- // NVPTX does not have any call stack frame, but need a NVPTX specific
- // FrameLowering class because TargetFrameLowering is abstract.
- NVPTXFrameLowering FrameLowering;
// Hold Strings that can be free'd all together with NVPTXTargetMachine
ManagedStringPool ManagedStrPool;
- //bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
- // bool DisableVerify, MCContext *&OutCtx);
-
public:
NVPTXTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OP, bool is64bit);
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const DataLayout *getDataLayout() const { return &DL; }
- virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget; }
-
- virtual const NVPTXRegisterInfo *getRegisterInfo() const {
- return &(InstrInfo.getRegisterInfo());
+ const NVPTXInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
-
- virtual NVPTXTargetLowering *getTargetLowering() const {
- return const_cast<NVPTXTargetLowering *>(&TLInfo);
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
-
- virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
- return &TSInfo;
+ const NVPTXSubtarget *getSubtargetImpl() const override { return &Subtarget; }
+ const NVPTXRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
}
- //virtual bool addInstSelector(PassManagerBase &PM,
- // CodeGenOpt::Level OptLevel);
+ const NVPTXTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
+ }
- //virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level);
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
ManagedStringPool *getManagedStrPool() const {
return const_cast<ManagedStringPool *>(&ManagedStrPool);
}
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
// Emission of machine code through JITCodeEmitter is not supported.
- virtual bool addPassesToEmitMachineCode(PassManagerBase &, JITCodeEmitter &,
- bool = true) {
+ bool addPassesToEmitMachineCode(PassManagerBase &, JITCodeEmitter &,
+ bool = true) override {
return true;
}
// Emission of machine code through MCJIT is not supported.
- virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&, raw_ostream &,
- bool = true) {
+ bool addPassesToEmitMC(PassManagerBase &, MCContext *&, raw_ostream &,
+ bool = true) override {
return true;
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
index 2a7394b..ba8086d 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXTargetObjectFile.h
@@ -22,31 +22,31 @@ class NVPTXTargetObjectFile : public TargetLoweringObjectFile {
public:
NVPTXTargetObjectFile() {
- TextSection = 0;
- DataSection = 0;
- BSSSection = 0;
- ReadOnlySection = 0;
+ TextSection = nullptr;
+ DataSection = nullptr;
+ BSSSection = nullptr;
+ ReadOnlySection = nullptr;
- StaticCtorSection = 0;
- StaticDtorSection = 0;
- LSDASection = 0;
- EHFrameSection = 0;
- DwarfAbbrevSection = 0;
- DwarfInfoSection = 0;
- DwarfLineSection = 0;
- DwarfFrameSection = 0;
- DwarfPubTypesSection = 0;
- DwarfDebugInlineSection = 0;
- DwarfStrSection = 0;
- DwarfLocSection = 0;
- DwarfARangesSection = 0;
- DwarfRangesSection = 0;
- DwarfMacroInfoSection = 0;
+ StaticCtorSection = nullptr;
+ StaticDtorSection = nullptr;
+ LSDASection = nullptr;
+ EHFrameSection = nullptr;
+ DwarfAbbrevSection = nullptr;
+ DwarfInfoSection = nullptr;
+ DwarfLineSection = nullptr;
+ DwarfFrameSection = nullptr;
+ DwarfPubTypesSection = nullptr;
+ DwarfDebugInlineSection = nullptr;
+ DwarfStrSection = nullptr;
+ DwarfLocSection = nullptr;
+ DwarfARangesSection = nullptr;
+ DwarfRangesSection = nullptr;
+ DwarfMacroInfoSection = nullptr;
}
virtual ~NVPTXTargetObjectFile();
- virtual void Initialize(MCContext &ctx, const TargetMachine &TM) {
+ void Initialize(MCContext &ctx, const TargetMachine &TM) override {
TargetLoweringObjectFile::Initialize(ctx, TM);
TextSection = new NVPTXSection(MCSection::SV_ELF, SectionKind::getText());
DataSection =
@@ -87,13 +87,14 @@ public:
new NVPTXSection(MCSection::SV_ELF, SectionKind::getMetadata());
}
- virtual const MCSection *getSectionForConstant(SectionKind Kind) const {
+ const MCSection *getSectionForConstant(SectionKind Kind,
+ const Constant *C) const override {
return ReadOnlySection;
}
- virtual const MCSection *
- getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
+ const MCSection *getExplicitSectionGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const override {
return DataSection;
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
index 6786eb0..a9fd190 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -22,9 +22,9 @@
#include <map>
#include <string>
#include <vector>
-//#include <iostream>
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/InstIterator.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/Support/MutexGuard.h"
using namespace llvm;
@@ -33,8 +33,15 @@ typedef std::map<const GlobalValue *, key_val_pair_t> global_val_annot_t;
typedef std::map<const Module *, global_val_annot_t> per_module_annot_t;
ManagedStatic<per_module_annot_t> annotationCache;
+static sys::Mutex Lock;
+
+void llvm::clearAnnotationCache(const llvm::Module *Mod) {
+ MutexGuard Guard(Lock);
+ annotationCache->erase(Mod);
+}
static void cacheAnnotationFromMD(const MDNode *md, key_val_pair_t &retval) {
+ MutexGuard Guard(Lock);
assert(md && "Invalid mdnode for annotation");
assert((md->getNumOperands() % 2) == 1 && "Invalid number of operands");
// start index = 1, to skip the global variable key
@@ -60,6 +67,7 @@ static void cacheAnnotationFromMD(const MDNode *md, key_val_pair_t &retval) {
}
static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
+ MutexGuard Guard(Lock);
NamedMDNode *NMD = m->getNamedMetadata(llvm::NamedMDForAnnotations);
if (!NMD)
return;
@@ -92,6 +100,7 @@ static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
bool llvm::findOneNVVMAnnotation(const GlobalValue *gv, std::string prop,
unsigned &retval) {
+ MutexGuard Guard(Lock);
const Module *m = gv->getParent();
if ((*annotationCache).find(m) == (*annotationCache).end())
cacheAnnotationFromMD(m, gv);
@@ -105,6 +114,7 @@ bool llvm::findOneNVVMAnnotation(const GlobalValue *gv, std::string prop,
bool llvm::findAllNVVMAnnotation(const GlobalValue *gv, std::string prop,
std::vector<unsigned> &retval) {
+ MutexGuard Guard(Lock);
const Module *m = gv->getParent();
if ((*annotationCache).find(m) == (*annotationCache).end())
cacheAnnotationFromMD(m, gv);
@@ -195,8 +205,37 @@ bool llvm::isImageWriteOnly(const llvm::Value &val) {
return false;
}
+bool llvm::isImageReadWrite(const llvm::Value &val) {
+ if (const Argument *arg = dyn_cast<Argument>(&val)) {
+ const Function *func = arg->getParent();
+ std::vector<unsigned> annot;
+ if (llvm::findAllNVVMAnnotation(func,
+ llvm::PropertyAnnotationNames[
+ llvm::PROPERTY_ISREADWRITE_IMAGE_PARAM],
+ annot)) {
+ if (std::find(annot.begin(), annot.end(), arg->getArgNo()) != annot.end())
+ return true;
+ }
+ }
+ return false;
+}
+
bool llvm::isImage(const llvm::Value &val) {
- return llvm::isImageReadOnly(val) || llvm::isImageWriteOnly(val);
+ return llvm::isImageReadOnly(val) || llvm::isImageWriteOnly(val) ||
+ llvm::isImageReadWrite(val);
+}
+
+bool llvm::isManaged(const llvm::Value &val) {
+ if(const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
+ unsigned annot;
+ if(llvm::findOneNVVMAnnotation(gv,
+ llvm::PropertyAnnotationNames[llvm::PROPERTY_MANAGED],
+ annot)) {
+ assert((annot == 1) && "Unexpected annotation on a managed symbol");
+ return true;
+ }
+ }
+ return false;
}
std::string llvm::getTextureName(const llvm::Value &val) {
@@ -354,12 +393,12 @@ llvm::skipPointerTransfer(const Value *V, bool ignore_GEP_indices) {
const Value *
llvm::skipPointerTransfer(const Value *V, std::set<const Value *> &processed) {
if (processed.find(V) != processed.end())
- return NULL;
+ return nullptr;
processed.insert(V);
const Value *V2 = V->stripPointerCasts();
if (V2 != V && processed.find(V2) != processed.end())
- return NULL;
+ return nullptr;
processed.insert(V2);
V = V2;
@@ -375,20 +414,20 @@ llvm::skipPointerTransfer(const Value *V, std::set<const Value *> &processed) {
continue;
} else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
if (V != V2 && processed.find(V) != processed.end())
- return NULL;
+ return nullptr;
processed.insert(PN);
- const Value *common = 0;
+ const Value *common = nullptr;
for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
const Value *pv = PN->getIncomingValue(i);
const Value *base = skipPointerTransfer(pv, processed);
if (base) {
- if (common == 0)
+ if (!common)
common = base;
else if (common != base)
return PN;
}
}
- if (common == 0)
+ if (!common)
return PN;
V = common;
}
@@ -406,7 +445,7 @@ BasicBlock *llvm::getParentBlock(Value *v) {
if (Instruction *I = dyn_cast<Instruction>(v))
return I->getParent();
- return 0;
+ return nullptr;
}
Function *llvm::getParentFunction(Value *v) {
@@ -419,13 +458,13 @@ Function *llvm::getParentFunction(Value *v) {
if (BasicBlock *B = dyn_cast<BasicBlock>(v))
return B->getParent();
- return 0;
+ return nullptr;
}
// Dump a block by name
void llvm::dumpBlock(Value *v, char *blockName) {
Function *F = getParentFunction(v);
- if (F == 0)
+ if (!F)
return;
for (Function::iterator it = F->begin(), ie = F->end(); it != ie; ++it) {
@@ -440,8 +479,8 @@ void llvm::dumpBlock(Value *v, char *blockName) {
// Find an instruction by name
Instruction *llvm::getInst(Value *base, char *instName) {
Function *F = getParentFunction(base);
- if (F == 0)
- return 0;
+ if (!F)
+ return nullptr;
for (inst_iterator it = inst_begin(F), ie = inst_end(F); it != ie; ++it) {
Instruction *I = &*it;
@@ -450,7 +489,7 @@ Instruction *llvm::getInst(Value *base, char *instName) {
}
}
- return 0;
+ return nullptr;
}
// Dump an instruction by nane
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index a208004..446bfa1 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -28,6 +28,8 @@ namespace llvm {
#define NVCL_IMAGE2D_READONLY_FUNCNAME "__is_image2D_readonly"
#define NVCL_IMAGE3D_READONLY_FUNCNAME "__is_image3D_readonly"
+void clearAnnotationCache(const llvm::Module *);
+
bool findOneNVVMAnnotation(const llvm::GlobalValue *, std::string, unsigned &);
bool findAllNVVMAnnotation(const llvm::GlobalValue *, std::string,
std::vector<unsigned> &);
@@ -38,6 +40,8 @@ bool isSampler(const llvm::Value &);
bool isImage(const llvm::Value &);
bool isImageReadOnly(const llvm::Value &);
bool isImageWriteOnly(const llvm::Value &);
+bool isImageReadWrite(const llvm::Value &);
+bool isManaged(const llvm::Value &);
std::string getTextureName(const llvm::Value &);
std::string getSurfaceName(const llvm::Value &);
diff --git a/contrib/llvm/lib/Target/NVPTX/NVVMReflect.cpp b/contrib/llvm/lib/Target/NVPTX/NVVMReflect.cpp
index 7406207..a8d6b95 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVVMReflect.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVVMReflect.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass replaces occurences of __nvvm_reflect("string") with an
+// This pass replaces occurrences of __nvvm_reflect("string") with an
// integer based on -nvvm-reflect-list string=<int> option given to this pass.
// If an undefined string value is seen in a call to __nvvm_reflect("string"),
// a default value of 0 will be used.
@@ -18,13 +18,14 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/Pass.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Constants.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_os_ostream.h"
@@ -38,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "nvptx-reflect"
+
namespace llvm { void initializeNVVMReflectPass(PassRegistry &); }
namespace {
@@ -45,17 +48,16 @@ class NVVMReflect : public ModulePass {
private:
StringMap<int> VarMap;
typedef DenseMap<std::string, int>::iterator VarMapIter;
- Function *ReflectFunction;
public:
static char ID;
- NVVMReflect() : ModulePass(ID), ReflectFunction(0) {
+ NVVMReflect() : ModulePass(ID) {
initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
VarMap.clear();
}
NVVMReflect(const StringMap<int> &Mapping)
- : ModulePass(ID), ReflectFunction(0) {
+ : ModulePass(ID) {
initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
for (StringMap<int>::const_iterator I = Mapping.begin(), E = Mapping.end();
I != E; ++I) {
@@ -63,9 +65,13 @@ public:
}
}
- void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); }
- virtual bool runOnModule(Module &);
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+ bool runOnModule(Module &) override;
+private:
+ bool handleFunction(Function *ReflectFunction);
void setVarMap();
};
}
@@ -84,7 +90,7 @@ NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), cl::Hidden,
char NVVMReflect::ID = 0;
INITIALIZE_PASS(NVVMReflect, "nvvm-reflect",
- "Replace occurences of __nvvm_reflect() calls with 0/1", false,
+ "Replace occurrences of __nvvm_reflect() calls with 0/1", false,
false)
static cl::list<std::string>
@@ -116,19 +122,7 @@ void NVVMReflect::setVarMap() {
}
}
-bool NVVMReflect::runOnModule(Module &M) {
- if (!NVVMReflectEnabled)
- return false;
-
- setVarMap();
-
- ReflectFunction = M.getFunction(NVVM_REFLECT_FUNCTION);
-
- // If reflect function is not used, then there will be
- // no entry in the module.
- if (ReflectFunction == 0)
- return false;
-
+bool NVVMReflect::handleFunction(Function *ReflectFunction) {
// Validate _reflect function
assert(ReflectFunction->isDeclaration() &&
"_reflect function should not have a body");
@@ -143,23 +137,23 @@ bool NVVMReflect::runOnModule(Module &M) {
// ConstantArray can be found successfully, see if it can be
// found in VarMap. If so, replace the uses of CallInst with the
// value found in VarMap. If not, replace the use with value 0.
- for (Value::use_iterator I = ReflectFunction->use_begin(),
- E = ReflectFunction->use_end();
- I != E; ++I) {
- assert(isa<CallInst>(*I) && "Only a call instruction can use _reflect");
- CallInst *Reflect = cast<CallInst>(*I);
+ for (User *U : ReflectFunction->users()) {
+ assert(isa<CallInst>(U) && "Only a call instruction can use _reflect");
+ CallInst *Reflect = cast<CallInst>(U);
assert((Reflect->getNumOperands() == 2) &&
"Only one operand expect for _reflect function");
// In cuda, we will have an extra constant-to-generic conversion of
// the string.
- const Value *conv = Reflect->getArgOperand(0);
- assert(isa<CallInst>(conv) && "Expected a const-to-gen conversion");
- const CallInst *ConvCall = cast<CallInst>(conv);
- const Value *str = ConvCall->getArgOperand(0);
- assert(isa<ConstantExpr>(str) &&
+ const Value *Str = Reflect->getArgOperand(0);
+ if (isa<CallInst>(Str)) {
+ // CUDA path
+ const CallInst *ConvCall = cast<CallInst>(Str);
+ Str = ConvCall->getArgOperand(0);
+ }
+ assert(isa<ConstantExpr>(Str) &&
"Format of _reflect function not recognized");
- const ConstantExpr *GEP = cast<ConstantExpr>(str);
+ const ConstantExpr *GEP = cast<ConstantExpr>(Str);
const Value *Sym = GEP->getOperand(0);
assert(isa<Constant>(Sym) && "Format of _reflect function not recognized");
@@ -193,3 +187,36 @@ bool NVVMReflect::runOnModule(Module &M) {
ToRemove[i]->eraseFromParent();
return true;
}
+
+bool NVVMReflect::runOnModule(Module &M) {
+ if (!NVVMReflectEnabled)
+ return false;
+
+ setVarMap();
+
+
+ bool Res = false;
+ std::string Name;
+ Type *Tys[1];
+ Type *I8Ty = Type::getInt8Ty(M.getContext());
+ Function *ReflectFunction;
+
+ // Check for standard overloaded versions of llvm.nvvm.reflect
+
+ for (unsigned i = 0; i != 5; ++i) {
+ Tys[0] = PointerType::get(I8Ty, i);
+ Name = Intrinsic::getName(Intrinsic::nvvm_reflect, Tys);
+ ReflectFunction = M.getFunction(Name);
+ if(ReflectFunction != 0) {
+ Res |= handleFunction(ReflectFunction);
+ }
+ }
+
+ ReflectFunction = M.getFunction(NVVM_REFLECT_FUNCTION);
+ // If reflect function is not used, then there will be
+ // no entry in the module.
+ if (ReflectFunction != 0)
+ Res |= handleFunction(ReflectFunction);
+
+ return Res;
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h b/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h
index 45cc0b8..02c5a94 100644
--- a/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h
+++ b/contrib/llvm/lib/Target/NVPTX/cl_common_defines.h
@@ -1,5 +1,5 @@
-#ifndef __CL_COMMON_DEFINES_H__
-#define __CL_COMMON_DEFINES_H__
+#ifndef CL_COMMON_DEFINES_H
+#define CL_COMMON_DEFINES_H
// This file includes defines that are common to both kernel code and
// the NVPTX back-end.
@@ -119,4 +119,4 @@ typedef enum clk_sampler_type {
#define CLK_LOCAL_MEM_FENCE (1 << 0)
#define CLK_GLOBAL_MEM_FENCE (1 << 1)
-#endif // __CL_COMMON_DEFINES_H__
+#endif // CL_COMMON_DEFINES_H
diff --git a/contrib/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/contrib/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index fe83fe1..d7066d5 100644
--- a/contrib/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -9,21 +9,23 @@
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCMCExpr.h"
-#include "llvm/MC/MCTargetAsmParser.h"
-#include "llvm/MC/MCStreamer.h"
+#include "PPCTargetStreamer.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -94,6 +96,44 @@ static unsigned VRegs[32] = {
PPC::V24, PPC::V25, PPC::V26, PPC::V27,
PPC::V28, PPC::V29, PPC::V30, PPC::V31
};
+static unsigned VSRegs[64] = {
+ PPC::VSL0, PPC::VSL1, PPC::VSL2, PPC::VSL3,
+ PPC::VSL4, PPC::VSL5, PPC::VSL6, PPC::VSL7,
+ PPC::VSL8, PPC::VSL9, PPC::VSL10, PPC::VSL11,
+ PPC::VSL12, PPC::VSL13, PPC::VSL14, PPC::VSL15,
+ PPC::VSL16, PPC::VSL17, PPC::VSL18, PPC::VSL19,
+ PPC::VSL20, PPC::VSL21, PPC::VSL22, PPC::VSL23,
+ PPC::VSL24, PPC::VSL25, PPC::VSL26, PPC::VSL27,
+ PPC::VSL28, PPC::VSL29, PPC::VSL30, PPC::VSL31,
+
+ PPC::VSH0, PPC::VSH1, PPC::VSH2, PPC::VSH3,
+ PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7,
+ PPC::VSH8, PPC::VSH9, PPC::VSH10, PPC::VSH11,
+ PPC::VSH12, PPC::VSH13, PPC::VSH14, PPC::VSH15,
+ PPC::VSH16, PPC::VSH17, PPC::VSH18, PPC::VSH19,
+ PPC::VSH20, PPC::VSH21, PPC::VSH22, PPC::VSH23,
+ PPC::VSH24, PPC::VSH25, PPC::VSH26, PPC::VSH27,
+ PPC::VSH28, PPC::VSH29, PPC::VSH30, PPC::VSH31
+};
+static unsigned VSFRegs[64] = {
+ PPC::F0, PPC::F1, PPC::F2, PPC::F3,
+ PPC::F4, PPC::F5, PPC::F6, PPC::F7,
+ PPC::F8, PPC::F9, PPC::F10, PPC::F11,
+ PPC::F12, PPC::F13, PPC::F14, PPC::F15,
+ PPC::F16, PPC::F17, PPC::F18, PPC::F19,
+ PPC::F20, PPC::F21, PPC::F22, PPC::F23,
+ PPC::F24, PPC::F25, PPC::F26, PPC::F27,
+ PPC::F28, PPC::F29, PPC::F30, PPC::F31,
+
+ PPC::VF0, PPC::VF1, PPC::VF2, PPC::VF3,
+ PPC::VF4, PPC::VF5, PPC::VF6, PPC::VF7,
+ PPC::VF8, PPC::VF9, PPC::VF10, PPC::VF11,
+ PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
+ PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
+ PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
+ PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
+ PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
+};
static unsigned CRBITRegs[32] = {
PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN,
PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN,
@@ -177,6 +217,7 @@ class PPCAsmParser : public MCTargetAsmParser {
MCAsmParser &Parser;
const MCInstrInfo &MII;
bool IsPPC64;
+ bool IsDarwin;
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
@@ -185,30 +226,34 @@ class PPCAsmParser : public MCTargetAsmParser {
bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
bool isPPC64() const { return IsPPC64; }
+ bool isDarwin() const { return IsDarwin; }
bool MatchRegisterName(const AsmToken &Tok,
unsigned &RegNo, int64_t &IntVal);
- virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
const MCExpr *ExtractModifierFromExpr(const MCExpr *E,
PPCMCExpr::VariantKind &Variant);
const MCExpr *FixupVariantKind(const MCExpr *E);
bool ParseExpression(const MCExpr *&EVal);
+ bool ParseDarwinExpression(const MCExpr *&EVal);
- bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ bool ParseOperand(OperandVector &Operands);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveTC(unsigned Size, SMLoc L);
bool ParseDirectiveMachine(SMLoc L);
+ bool ParseDarwinDirectiveMachine(SMLoc L);
+ bool ParseDirectiveAbiVersion(SMLoc L);
+ bool ParseDirectiveLocalEntry(SMLoc L);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
- void ProcessInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
+ void ProcessInstruction(MCInst &Inst, const OperandVector &Ops);
/// @name Auto-generated Match Functions
/// {
@@ -221,27 +266,29 @@ class PPCAsmParser : public MCTargetAsmParser {
public:
PPCAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
- const MCInstrInfo &_MII)
+ const MCInstrInfo &_MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(_MII) {
// Check for 64-bit vs. 32-bit pointer mode.
Triple TheTriple(STI.getTargetTriple());
IsPPC64 = (TheTriple.getArch() == Triple::ppc64 ||
TheTriple.getArch() == Triple::ppc64le);
+ IsDarwin = TheTriple.isMacOSX();
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
- virtual bool ParseInstruction(ParseInstructionInfo &Info,
- StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
- virtual bool ParseDirective(AsmToken DirectiveID);
+ bool ParseDirective(AsmToken DirectiveID) override;
- unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
- virtual const MCExpr *applyModifierToExpr(const MCExpr *E,
- MCSymbolRefExpr::VariantKind,
- MCContext &Ctx);
+ const MCExpr *applyModifierToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind,
+ MCContext &Ctx) override;
};
/// PPCOperand - Instances of this class represent a parsed PowerPC machine
@@ -306,10 +353,10 @@ public:
}
/// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const { return StartLoc; }
+ SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const { return EndLoc; }
+ SMLoc getEndLoc() const override { return EndLoc; }
/// isPPC64 - True if this operand is for an instruction in 64-bit mode.
bool isPPC64() const { return IsPPC64; }
@@ -334,11 +381,16 @@ public:
return TLSReg.Sym;
}
- unsigned getReg() const {
+ unsigned getReg() const override {
assert(isRegNumber() && "Invalid access!");
return (unsigned) Imm.Val;
}
+ unsigned getVSReg() const {
+ assert(isVSRegNumber() && "Invalid access!");
+ return (unsigned) Imm.Val;
+ }
+
unsigned getCCReg() const {
assert(isCCRegNumber() && "Invalid access!");
return (unsigned) (Kind == Immediate ? Imm.Val : Expr.CRVal);
@@ -354,8 +406,9 @@ public:
return 7 - countTrailingZeros<uint64_t>(Imm.Val);
}
- bool isToken() const { return Kind == Token; }
- bool isImm() const { return Kind == Immediate || Kind == Expression; }
+ bool isToken() const override { return Kind == Token; }
+ bool isImm() const override { return Kind == Immediate || Kind == Expression; }
+ bool isU2Imm() const { return Kind == Immediate && isUInt<2>(getImm()); }
bool isU5Imm() const { return Kind == Immediate && isUInt<5>(getImm()); }
bool isS5Imm() const { return Kind == Immediate && isInt<5>(getImm()); }
bool isU6Imm() const { return Kind == Immediate && isUInt<6>(getImm()); }
@@ -376,6 +429,7 @@ public:
(Kind == Immediate && isInt<16>(getImm()) &&
(getImm() & 3) == 0); }
bool isRegNumber() const { return Kind == Immediate && isUInt<5>(getImm()); }
+ bool isVSRegNumber() const { return Kind == Immediate && isUInt<6>(getImm()); }
bool isCCRegNumber() const { return (Kind == Expression
&& isUInt<3>(getExprCRVal())) ||
(Kind == Immediate
@@ -386,8 +440,8 @@ public:
&& isUInt<5>(getImm())); }
bool isCRBitMask() const { return Kind == Immediate && isUInt<8>(getImm()) &&
isPowerOf2_32(getImm()); }
- bool isMem() const { return false; }
- bool isReg() const { return false; }
+ bool isMem() const override { return false; }
+ bool isReg() const override { return false; }
void addRegOperands(MCInst &Inst, unsigned N) const {
llvm_unreachable("addRegOperands");
@@ -442,6 +496,16 @@ public:
Inst.addOperand(MCOperand::CreateReg(VRegs[getReg()]));
}
+ void addRegVSRCOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(VSRegs[getVSReg()]));
+ }
+
+ void addRegVSFRCOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(VSFRegs[getVSReg()]));
+ }
+
void addRegCRBITRCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(CRBITRegs[getCRBit()]));
@@ -483,10 +547,11 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- virtual void print(raw_ostream &OS) const;
+ void print(raw_ostream &OS) const override;
- static PPCOperand *CreateToken(StringRef Str, SMLoc S, bool IsPPC64) {
- PPCOperand *Op = new PPCOperand(Token);
+ static std::unique_ptr<PPCOperand> CreateToken(StringRef Str, SMLoc S,
+ bool IsPPC64) {
+ auto Op = make_unique<PPCOperand>(Token);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
@@ -495,22 +560,27 @@ public:
return Op;
}
- static PPCOperand *CreateTokenWithStringCopy(StringRef Str, SMLoc S,
- bool IsPPC64) {
+ static std::unique_ptr<PPCOperand>
+ CreateTokenWithStringCopy(StringRef Str, SMLoc S, bool IsPPC64) {
// Allocate extra memory for the string and copy it.
+ // FIXME: This is incorrect, Operands are owned by unique_ptr with a default
+ // deleter which will destroy them by simply using "delete", not correctly
+ // calling operator delete on this extra memory after calling the dtor
+ // explicitly.
void *Mem = ::operator new(sizeof(PPCOperand) + Str.size());
- PPCOperand *Op = new (Mem) PPCOperand(Token);
- Op->Tok.Data = (const char *)(Op + 1);
+ std::unique_ptr<PPCOperand> Op(new (Mem) PPCOperand(Token));
+ Op->Tok.Data = (const char *)(Op.get() + 1);
Op->Tok.Length = Str.size();
- std::memcpy((char *)(Op + 1), Str.data(), Str.size());
+ std::memcpy((void *)Op->Tok.Data, Str.data(), Str.size());
Op->StartLoc = S;
Op->EndLoc = S;
Op->IsPPC64 = IsPPC64;
return Op;
}
- static PPCOperand *CreateImm(int64_t Val, SMLoc S, SMLoc E, bool IsPPC64) {
- PPCOperand *Op = new PPCOperand(Immediate);
+ static std::unique_ptr<PPCOperand> CreateImm(int64_t Val, SMLoc S, SMLoc E,
+ bool IsPPC64) {
+ auto Op = make_unique<PPCOperand>(Immediate);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -518,9 +588,9 @@ public:
return Op;
}
- static PPCOperand *CreateExpr(const MCExpr *Val,
- SMLoc S, SMLoc E, bool IsPPC64) {
- PPCOperand *Op = new PPCOperand(Expression);
+ static std::unique_ptr<PPCOperand> CreateExpr(const MCExpr *Val, SMLoc S,
+ SMLoc E, bool IsPPC64) {
+ auto Op = make_unique<PPCOperand>(Expression);
Op->Expr.Val = Val;
Op->Expr.CRVal = EvaluateCRExpr(Val);
Op->StartLoc = S;
@@ -529,9 +599,9 @@ public:
return Op;
}
- static PPCOperand *CreateTLSReg(const MCSymbolRefExpr *Sym,
- SMLoc S, SMLoc E, bool IsPPC64) {
- PPCOperand *Op = new PPCOperand(TLSRegister);
+ static std::unique_ptr<PPCOperand>
+ CreateTLSReg(const MCSymbolRefExpr *Sym, SMLoc S, SMLoc E, bool IsPPC64) {
+ auto Op = make_unique<PPCOperand>(TLSRegister);
Op->TLSReg.Sym = Sym;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -539,8 +609,8 @@ public:
return Op;
}
- static PPCOperand *CreateFromMCExpr(const MCExpr *Val,
- SMLoc S, SMLoc E, bool IsPPC64) {
+ static std::unique_ptr<PPCOperand>
+ CreateFromMCExpr(const MCExpr *Val, SMLoc S, SMLoc E, bool IsPPC64) {
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val))
return CreateImm(CE->getValue(), S, E, IsPPC64);
@@ -571,10 +641,8 @@ void PPCOperand::print(raw_ostream &OS) const {
}
}
-
-void PPCAsmParser::
-ProcessInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+void PPCAsmParser::ProcessInstruction(MCInst &Inst,
+ const OperandVector &Operands) {
int Opcode = Inst.getOpcode();
switch (Opcode) {
case PPC::LAx: {
@@ -854,11 +922,10 @@ ProcessInstruction(MCInst &Inst,
}
}
-bool PPCAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool PPCAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
@@ -867,7 +934,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// Post-process instructions (typically extended mnemonics)
ProcessInstruction(Inst, Operands);
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ Out.EmitInstruction(Inst, STI);
return false;
case Match_MissingFeature:
return Error(IDLoc, "instruction use requires an option to be enabled");
@@ -879,7 +946,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((PPCOperand*)Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((PPCOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
}
@@ -960,7 +1027,7 @@ ExtractModifierFromExpr(const MCExpr *E,
switch (E->getKind()) {
case MCExpr::Target:
case MCExpr::Constant:
- return 0;
+ return nullptr;
case MCExpr::SymbolRef: {
const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
@@ -988,7 +1055,7 @@ ExtractModifierFromExpr(const MCExpr *E,
Variant = PPCMCExpr::VK_PPC_HIGHESTA;
break;
default:
- return 0;
+ return nullptr;
}
return MCSymbolRefExpr::Create(&SRE->getSymbol(), Context);
@@ -998,7 +1065,7 @@ ExtractModifierFromExpr(const MCExpr *E,
const MCUnaryExpr *UE = cast<MCUnaryExpr>(E);
const MCExpr *Sub = ExtractModifierFromExpr(UE->getSubExpr(), Variant);
if (!Sub)
- return 0;
+ return nullptr;
return MCUnaryExpr::Create(UE->getOpcode(), Sub, Context);
}
@@ -1009,7 +1076,7 @@ ExtractModifierFromExpr(const MCExpr *E,
const MCExpr *RHS = ExtractModifierFromExpr(BE->getRHS(), RHSVariant);
if (!LHS && !RHS)
- return 0;
+ return nullptr;
if (!LHS) LHS = BE->getLHS();
if (!RHS) RHS = BE->getRHS();
@@ -1021,7 +1088,7 @@ ExtractModifierFromExpr(const MCExpr *E,
else if (LHSVariant == RHSVariant)
Variant = LHSVariant;
else
- return 0;
+ return nullptr;
return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, Context);
}
@@ -1081,10 +1148,16 @@ FixupVariantKind(const MCExpr *E) {
llvm_unreachable("Invalid expression kind!");
}
-/// Parse an expression. This differs from the default "parseExpression"
-/// in that it handles complex \code @l/@ha \endcode modifiers.
+/// ParseExpression. This differs from the default "parseExpression" in that
+/// it handles modifiers.
bool PPCAsmParser::
ParseExpression(const MCExpr *&EVal) {
+
+ if (isDarwin())
+ return ParseDarwinExpression(EVal);
+
+ // (ELF Platforms)
+ // Handle \code @l/@ha \endcode
if (getParser().parseExpression(EVal))
return true;
@@ -1098,12 +1171,59 @@ ParseExpression(const MCExpr *&EVal) {
return false;
}
+/// ParseDarwinExpression. (MachO Platforms)
+/// This differs from the default "parseExpression" in that it handles detection
+/// of the \code hi16(), ha16() and lo16() \endcode modifiers. At present,
+/// parseExpression() doesn't recognise the modifiers when in the Darwin/MachO
+/// syntax form so it is done here. TODO: Determine if there is merit in arranging
+/// for this to be done at a higher level.
bool PPCAsmParser::
-ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ParseDarwinExpression(const MCExpr *&EVal) {
+ PPCMCExpr::VariantKind Variant = PPCMCExpr::VK_PPC_None;
+ switch (getLexer().getKind()) {
+ default:
+ break;
+ case AsmToken::Identifier:
+ // Compiler-generated Darwin identifiers begin with L,l,_ or "; thus
+ // something starting with any other char should be part of the
+ // asm syntax. If handwritten asm includes an identifier like lo16,
+ // then all bets are off - but no-one would do that, right?
+ StringRef poss = Parser.getTok().getString();
+ if (poss.equals_lower("lo16")) {
+ Variant = PPCMCExpr::VK_PPC_LO;
+ } else if (poss.equals_lower("hi16")) {
+ Variant = PPCMCExpr::VK_PPC_HI;
+ } else if (poss.equals_lower("ha16")) {
+ Variant = PPCMCExpr::VK_PPC_HA;
+ }
+ if (Variant != PPCMCExpr::VK_PPC_None) {
+ Parser.Lex(); // Eat the xx16
+ if (getLexer().isNot(AsmToken::LParen))
+ return Error(Parser.getTok().getLoc(), "expected '('");
+ Parser.Lex(); // Eat the '('
+ }
+ break;
+ }
+
+ if (getParser().parseExpression(EVal))
+ return true;
+
+ if (Variant != PPCMCExpr::VK_PPC_None) {
+ if (getLexer().isNot(AsmToken::RParen))
+ return Error(Parser.getTok().getLoc(), "expected ')'");
+ Parser.Lex(); // Eat the ')'
+ EVal = PPCMCExpr::Create(Variant, EVal, false, getParser().getContext());
+ }
+ return false;
+}
+
+/// ParseOperand
+/// This handles registers in the form 'NN', '%rNN' for ELF platforms and
+/// rNN for MachO.
+bool PPCAsmParser::ParseOperand(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
const MCExpr *EVal;
- PPCOperand *Op;
// Attempt to parse the next token as an immediate
switch (getLexer().getKind()) {
@@ -1115,20 +1235,35 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
int64_t IntVal;
if (!MatchRegisterName(Parser.getTok(), RegNo, IntVal)) {
Parser.Lex(); // Eat the identifier token.
- Op = PPCOperand::CreateImm(IntVal, S, E, isPPC64());
- Operands.push_back(Op);
+ Operands.push_back(PPCOperand::CreateImm(IntVal, S, E, isPPC64()));
return false;
}
return Error(S, "invalid register name");
+ case AsmToken::Identifier:
+ // Note that non-register-name identifiers from the compiler will begin
+ // with '_', 'L'/'l' or '"'. Of course, handwritten asm could include
+ // identifiers like r31foo - so we fall through in the event that parsing
+ // a register name fails.
+ if (isDarwin()) {
+ unsigned RegNo;
+ int64_t IntVal;
+ if (!MatchRegisterName(Parser.getTok(), RegNo, IntVal)) {
+ Parser.Lex(); // Eat the identifier token.
+ Operands.push_back(PPCOperand::CreateImm(IntVal, S, E, isPPC64()));
+ return false;
+ }
+ }
+ // Fall-through to process non-register-name identifiers as expression.
// All other expressions
case AsmToken::LParen:
case AsmToken::Plus:
case AsmToken::Minus:
case AsmToken::Integer:
- case AsmToken::Identifier:
case AsmToken::Dot:
case AsmToken::Dollar:
+ case AsmToken::Exclaim:
+ case AsmToken::Tilde:
if (!ParseExpression(EVal))
break;
/* fall through */
@@ -1137,8 +1272,7 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
// Push the parsed operand into the list of operands
- Op = PPCOperand::CreateFromMCExpr(EVal, S, E, isPPC64());
- Operands.push_back(Op);
+ Operands.push_back(PPCOperand::CreateFromMCExpr(EVal, S, E, isPPC64()));
// Check whether this is a TLS call expression
bool TLSCall = false;
@@ -1157,8 +1291,7 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
E = Parser.getTok().getLoc();
Parser.Lex(); // Eat the ')'.
- Op = PPCOperand::CreateFromMCExpr(TLSSym, S, E, isPPC64());
- Operands.push_back(Op);
+ Operands.push_back(PPCOperand::CreateFromMCExpr(TLSSym, S, E, isPPC64()));
}
// Otherwise, check for D-form memory operands
@@ -1177,11 +1310,25 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
break;
case AsmToken::Integer:
- if (getParser().parseAbsoluteExpression(IntVal) ||
+ if (!isDarwin()) {
+ if (getParser().parseAbsoluteExpression(IntVal) ||
IntVal < 0 || IntVal > 31)
return Error(S, "invalid register number");
+ } else {
+ return Error(S, "unexpected integer value");
+ }
break;
+ case AsmToken::Identifier:
+ if (isDarwin()) {
+ unsigned RegNo;
+ if (!MatchRegisterName(Parser.getTok(), RegNo, IntVal)) {
+ Parser.Lex(); // Eat the identifier token.
+ break;
+ }
+ }
+ // Fall-through..
+
default:
return Error(S, "invalid memory operand");
}
@@ -1191,17 +1338,15 @@ ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
E = Parser.getTok().getLoc();
Parser.Lex(); // Eat the ')'.
- Op = PPCOperand::CreateImm(IntVal, S, E, isPPC64());
- Operands.push_back(Op);
+ Operands.push_back(PPCOperand::CreateImm(IntVal, S, E, isPPC64()));
}
return false;
}
/// Parse an instruction mnemonic followed by its operands.
-bool PPCAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool PPCAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) {
// The first operand is the token for the instruction name.
// If the next character is a '+' or '-', we need to add it to the
// instruction name, to match what TableGen is doing.
@@ -1261,14 +1406,23 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
/// ParseDirective parses the PPC specific directives
bool PPCAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
- if (IDVal == ".word")
- return ParseDirectiveWord(2, DirectiveID.getLoc());
- if (IDVal == ".llong")
- return ParseDirectiveWord(8, DirectiveID.getLoc());
- if (IDVal == ".tc")
- return ParseDirectiveTC(isPPC64()? 8 : 4, DirectiveID.getLoc());
- if (IDVal == ".machine")
- return ParseDirectiveMachine(DirectiveID.getLoc());
+ if (!isDarwin()) {
+ if (IDVal == ".word")
+ return ParseDirectiveWord(2, DirectiveID.getLoc());
+ if (IDVal == ".llong")
+ return ParseDirectiveWord(8, DirectiveID.getLoc());
+ if (IDVal == ".tc")
+ return ParseDirectiveTC(isPPC64()? 8 : 4, DirectiveID.getLoc());
+ if (IDVal == ".machine")
+ return ParseDirectiveMachine(DirectiveID.getLoc());
+ if (IDVal == ".abiversion")
+ return ParseDirectiveAbiVersion(DirectiveID.getLoc());
+ if (IDVal == ".localentry")
+ return ParseDirectiveLocalEntry(DirectiveID.getLoc());
+ } else {
+ if (IDVal == ".machine")
+ return ParseDarwinDirectiveMachine(DirectiveID.getLoc());
+ }
return true;
}
@@ -1279,7 +1433,7 @@ bool PPCAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
for (;;) {
const MCExpr *Value;
if (getParser().parseExpression(Value))
- return true;
+ return false;
getParser().getStreamer().EmitValue(Value, Size);
@@ -1303,8 +1457,10 @@ bool PPCAsmParser::ParseDirectiveTC(unsigned Size, SMLoc L) {
while (getLexer().isNot(AsmToken::EndOfStatement)
&& getLexer().isNot(AsmToken::Comma))
Parser.Lex();
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
// Align to word size.
@@ -1314,12 +1470,14 @@ bool PPCAsmParser::ParseDirectiveTC(unsigned Size, SMLoc L) {
return ParseDirectiveWord(Size, L);
}
-/// ParseDirectiveMachine
+/// ParseDirectiveMachine (ELF platforms)
/// ::= .machine [ cpu | "push" | "pop" ]
bool PPCAsmParser::ParseDirectiveMachine(SMLoc L) {
if (getLexer().isNot(AsmToken::Identifier) &&
- getLexer().isNot(AsmToken::String))
- return Error(L, "unexpected token in directive");
+ getLexer().isNot(AsmToken::String)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
StringRef CPU = Parser.getTok().getIdentifier();
Parser.Lex();
@@ -1329,15 +1487,118 @@ bool PPCAsmParser::ParseDirectiveMachine(SMLoc L) {
// Implement ".machine any" (by doing nothing) for the benefit
// of existing assembler code. Likewise, we can then implement
// ".machine push" and ".machine pop" as no-op.
- if (CPU != "any" && CPU != "push" && CPU != "pop")
- return Error(L, "unrecognized machine type");
+ if (CPU != "any" && CPU != "push" && CPU != "pop") {
+ Error(L, "unrecognized machine type");
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
+ PPCTargetStreamer &TStreamer =
+ *static_cast<PPCTargetStreamer *>(
+ getParser().getStreamer().getTargetStreamer());
+ TStreamer.emitMachine(CPU);
+
+ return false;
+}
+
+/// ParseDarwinDirectiveMachine (Mach-o platforms)
+/// ::= .machine cpu-identifier
+bool PPCAsmParser::ParseDarwinDirectiveMachine(SMLoc L) {
+ if (getLexer().isNot(AsmToken::Identifier) &&
+ getLexer().isNot(AsmToken::String)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
+
+ StringRef CPU = Parser.getTok().getIdentifier();
+ Parser.Lex();
+
+ // FIXME: this is only the 'default' set of cpu variants.
+ // However we don't act on this information at present, this is simply
+ // allowing parsing to proceed with minimal sanity checking.
+ if (CPU != "ppc7400" && CPU != "ppc" && CPU != "ppc64") {
+ Error(L, "unrecognized cpu type");
+ return false;
+ }
+
+ if (isPPC64() && (CPU == "ppc7400" || CPU == "ppc")) {
+ Error(L, "wrong cpu type specified for 64bit");
+ return false;
+ }
+ if (!isPPC64() && CPU == "ppc64") {
+ Error(L, "wrong cpu type specified for 32bit");
+ return false;
+ }
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
return false;
}
+/// ParseDirectiveAbiVersion
+/// ::= .abiversion constant-expression
+bool PPCAsmParser::ParseDirectiveAbiVersion(SMLoc L) {
+ int64_t AbiVersion;
+ if (getParser().parseAbsoluteExpression(AbiVersion)){
+ Error(L, "expected constant expression");
+ return false;
+ }
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
+
+ PPCTargetStreamer &TStreamer =
+ *static_cast<PPCTargetStreamer *>(
+ getParser().getStreamer().getTargetStreamer());
+ TStreamer.emitAbiVersion(AbiVersion);
+
+ return false;
+}
+
+/// ParseDirectiveLocalEntry
+/// ::= .localentry symbol, expression
+bool PPCAsmParser::ParseDirectiveLocalEntry(SMLoc L) {
+ StringRef Name;
+ if (getParser().parseIdentifier(Name)) {
+ Error(L, "expected identifier in directive");
+ return false;
+ }
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
+ Lex();
+
+ const MCExpr *Expr;
+ if (getParser().parseExpression(Expr)) {
+ Error(L, "expected expression");
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
+
+ PPCTargetStreamer &TStreamer =
+ *static_cast<PPCTargetStreamer *>(
+ getParser().getStreamer().getTargetStreamer());
+ TStreamer.emitLocalEntry(Sym, Expr);
+
+ return false;
+}
+
+
+
/// Force static initialization.
extern "C" void LLVMInitializePowerPCAsmParser() {
RegisterMCAsmParser<PPCAsmParser> A(ThePPC32Target);
@@ -1351,7 +1612,7 @@ extern "C" void LLVMInitializePowerPCAsmParser() {
// Define this matcher function after the auto-generated include so we
// have the match class enum definitions.
-unsigned PPCAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
+unsigned PPCAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
unsigned Kind) {
// If the kind is a token for a literal immediate, check if our asm
// operand matches. This is for InstAliases which have a fixed-value
@@ -1365,8 +1626,8 @@ unsigned PPCAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
default: return Match_InvalidOperand;
}
- PPCOperand *Op = static_cast<PPCOperand*>(AsmOp);
- if (Op->isImm() && Op->getImm() == ImmVal)
+ PPCOperand &Op = static_cast<PPCOperand &>(AsmOp);
+ if (Op.isImm() && Op.getImm() == ImmVal)
return Match_Success;
return Match_InvalidOperand;
@@ -1392,6 +1653,6 @@ PPCAsmParser::applyModifierToExpr(const MCExpr *E,
case MCSymbolRefExpr::VK_PPC_HIGHESTA:
return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HIGHESTA, E, false, Ctx);
default:
- return 0;
+ return nullptr;
}
}
diff --git a/contrib/llvm/lib/Target/PowerPC/Disassembler/CMakeLists.txt b/contrib/llvm/lib/Target/PowerPC/Disassembler/CMakeLists.txt
new file mode 100644
index 0000000..ca457df
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/Disassembler/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMPowerPCDisassembler
+ PPCDisassembler.cpp
+ )
diff --git a/contrib/llvm/lib/Target/PowerPC/Disassembler/LLVMBuild.txt b/contrib/llvm/lib/Target/PowerPC/Disassembler/LLVMBuild.txt
new file mode 100644
index 0000000..b0978c2
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/Disassembler/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===-- ./lib/Target/PowerPC/Disassembler/LLVMBuild.txt ---------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = PowerPCDisassembler
+parent = PowerPC
+required_libraries = MC PowerPCInfo Support
+add_to_library_groups = PowerPC
diff --git a/contrib/llvm/lib/Target/PowerPC/Disassembler/Makefile b/contrib/llvm/lib/Target/PowerPC/Disassembler/Makefile
new file mode 100644
index 0000000..86e3b47
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/Disassembler/Makefile
@@ -0,0 +1,16 @@
+##===-- lib/Target/PowerPC/Disassembler/Makefile -----------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMPowerPCDisassembler
+
+# Hack: we need to include 'main' PPC target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp b/contrib/llvm/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
new file mode 100644
index 0000000..a2305a9
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
@@ -0,0 +1,348 @@
+//===------ PPCDisassembler.cpp - Disassembler for PowerPC ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPC.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-disassembler"
+
+typedef MCDisassembler::DecodeStatus DecodeStatus;
+
+namespace {
+class PPCDisassembler : public MCDisassembler {
+public:
+ PPCDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
+ virtual ~PPCDisassembler() {}
+
+ // Override MCDisassembler.
+ virtual DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
+};
+} // end anonymous namespace
+
+static MCDisassembler *createPPCDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new PPCDisassembler(STI, Ctx);
+}
+
+extern "C" void LLVMInitializePowerPCDisassembler() {
+ // Register the disassembler for each target.
+ TargetRegistry::RegisterMCDisassembler(ThePPC32Target,
+ createPPCDisassembler);
+ TargetRegistry::RegisterMCDisassembler(ThePPC64Target,
+ createPPCDisassembler);
+ TargetRegistry::RegisterMCDisassembler(ThePPC64LETarget,
+ createPPCDisassembler);
+}
+
+// FIXME: These can be generated by TableGen from the existing register
+// encoding values!
+
+static const unsigned CRRegs[] = {
+ PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
+ PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7
+};
+
+static const unsigned CRBITRegs[] = {
+ PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN,
+ PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN,
+ PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
+ PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
+ PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
+ PPC::CR5LT, PPC::CR5GT, PPC::CR5EQ, PPC::CR5UN,
+ PPC::CR6LT, PPC::CR6GT, PPC::CR6EQ, PPC::CR6UN,
+ PPC::CR7LT, PPC::CR7GT, PPC::CR7EQ, PPC::CR7UN
+};
+
+static const unsigned FRegs[] = {
+ PPC::F0, PPC::F1, PPC::F2, PPC::F3,
+ PPC::F4, PPC::F5, PPC::F6, PPC::F7,
+ PPC::F8, PPC::F9, PPC::F10, PPC::F11,
+ PPC::F12, PPC::F13, PPC::F14, PPC::F15,
+ PPC::F16, PPC::F17, PPC::F18, PPC::F19,
+ PPC::F20, PPC::F21, PPC::F22, PPC::F23,
+ PPC::F24, PPC::F25, PPC::F26, PPC::F27,
+ PPC::F28, PPC::F29, PPC::F30, PPC::F31
+};
+
+static const unsigned VRegs[] = {
+ PPC::V0, PPC::V1, PPC::V2, PPC::V3,
+ PPC::V4, PPC::V5, PPC::V6, PPC::V7,
+ PPC::V8, PPC::V9, PPC::V10, PPC::V11,
+ PPC::V12, PPC::V13, PPC::V14, PPC::V15,
+ PPC::V16, PPC::V17, PPC::V18, PPC::V19,
+ PPC::V20, PPC::V21, PPC::V22, PPC::V23,
+ PPC::V24, PPC::V25, PPC::V26, PPC::V27,
+ PPC::V28, PPC::V29, PPC::V30, PPC::V31
+};
+
+static const unsigned VSRegs[] = {
+ PPC::VSL0, PPC::VSL1, PPC::VSL2, PPC::VSL3,
+ PPC::VSL4, PPC::VSL5, PPC::VSL6, PPC::VSL7,
+ PPC::VSL8, PPC::VSL9, PPC::VSL10, PPC::VSL11,
+ PPC::VSL12, PPC::VSL13, PPC::VSL14, PPC::VSL15,
+ PPC::VSL16, PPC::VSL17, PPC::VSL18, PPC::VSL19,
+ PPC::VSL20, PPC::VSL21, PPC::VSL22, PPC::VSL23,
+ PPC::VSL24, PPC::VSL25, PPC::VSL26, PPC::VSL27,
+ PPC::VSL28, PPC::VSL29, PPC::VSL30, PPC::VSL31,
+
+ PPC::VSH0, PPC::VSH1, PPC::VSH2, PPC::VSH3,
+ PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7,
+ PPC::VSH8, PPC::VSH9, PPC::VSH10, PPC::VSH11,
+ PPC::VSH12, PPC::VSH13, PPC::VSH14, PPC::VSH15,
+ PPC::VSH16, PPC::VSH17, PPC::VSH18, PPC::VSH19,
+ PPC::VSH20, PPC::VSH21, PPC::VSH22, PPC::VSH23,
+ PPC::VSH24, PPC::VSH25, PPC::VSH26, PPC::VSH27,
+ PPC::VSH28, PPC::VSH29, PPC::VSH30, PPC::VSH31
+};
+
+static const unsigned VSFRegs[] = {
+ PPC::F0, PPC::F1, PPC::F2, PPC::F3,
+ PPC::F4, PPC::F5, PPC::F6, PPC::F7,
+ PPC::F8, PPC::F9, PPC::F10, PPC::F11,
+ PPC::F12, PPC::F13, PPC::F14, PPC::F15,
+ PPC::F16, PPC::F17, PPC::F18, PPC::F19,
+ PPC::F20, PPC::F21, PPC::F22, PPC::F23,
+ PPC::F24, PPC::F25, PPC::F26, PPC::F27,
+ PPC::F28, PPC::F29, PPC::F30, PPC::F31,
+
+ PPC::VF0, PPC::VF1, PPC::VF2, PPC::VF3,
+ PPC::VF4, PPC::VF5, PPC::VF6, PPC::VF7,
+ PPC::VF8, PPC::VF9, PPC::VF10, PPC::VF11,
+ PPC::VF12, PPC::VF13, PPC::VF14, PPC::VF15,
+ PPC::VF16, PPC::VF17, PPC::VF18, PPC::VF19,
+ PPC::VF20, PPC::VF21, PPC::VF22, PPC::VF23,
+ PPC::VF24, PPC::VF25, PPC::VF26, PPC::VF27,
+ PPC::VF28, PPC::VF29, PPC::VF30, PPC::VF31
+};
+
+static const unsigned GPRegs[] = {
+ PPC::R0, PPC::R1, PPC::R2, PPC::R3,
+ PPC::R4, PPC::R5, PPC::R6, PPC::R7,
+ PPC::R8, PPC::R9, PPC::R10, PPC::R11,
+ PPC::R12, PPC::R13, PPC::R14, PPC::R15,
+ PPC::R16, PPC::R17, PPC::R18, PPC::R19,
+ PPC::R20, PPC::R21, PPC::R22, PPC::R23,
+ PPC::R24, PPC::R25, PPC::R26, PPC::R27,
+ PPC::R28, PPC::R29, PPC::R30, PPC::R31
+};
+
+static const unsigned GP0Regs[] = {
+ PPC::ZERO, PPC::R1, PPC::R2, PPC::R3,
+ PPC::R4, PPC::R5, PPC::R6, PPC::R7,
+ PPC::R8, PPC::R9, PPC::R10, PPC::R11,
+ PPC::R12, PPC::R13, PPC::R14, PPC::R15,
+ PPC::R16, PPC::R17, PPC::R18, PPC::R19,
+ PPC::R20, PPC::R21, PPC::R22, PPC::R23,
+ PPC::R24, PPC::R25, PPC::R26, PPC::R27,
+ PPC::R28, PPC::R29, PPC::R30, PPC::R31
+};
+
+static const unsigned G8Regs[] = {
+ PPC::X0, PPC::X1, PPC::X2, PPC::X3,
+ PPC::X4, PPC::X5, PPC::X6, PPC::X7,
+ PPC::X8, PPC::X9, PPC::X10, PPC::X11,
+ PPC::X12, PPC::X13, PPC::X14, PPC::X15,
+ PPC::X16, PPC::X17, PPC::X18, PPC::X19,
+ PPC::X20, PPC::X21, PPC::X22, PPC::X23,
+ PPC::X24, PPC::X25, PPC::X26, PPC::X27,
+ PPC::X28, PPC::X29, PPC::X30, PPC::X31
+};
+
+template <std::size_t N>
+static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo,
+ const unsigned (&Regs)[N]) {
+ assert(RegNo < N && "Invalid register number");
+ Inst.addOperand(MCOperand::CreateReg(Regs[RegNo]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeCRRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, CRRegs);
+}
+
+static DecodeStatus DecodeCRBITRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, CRBITRegs);
+}
+
+static DecodeStatus DecodeF4RCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, FRegs);
+}
+
+static DecodeStatus DecodeF8RCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, FRegs);
+}
+
+static DecodeStatus DecodeVRRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, VRegs);
+}
+
+static DecodeStatus DecodeVSRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, VSRegs);
+}
+
+static DecodeStatus DecodeVSFRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, VSFRegs);
+}
+
+static DecodeStatus DecodeGPRCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, GPRegs);
+}
+
+static DecodeStatus DecodeGPRC_NOR0RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, GP0Regs);
+}
+
+static DecodeStatus DecodeG8RCRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, G8Regs);
+}
+
+#define DecodePointerLikeRegClass0 DecodeGPRCRegisterClass
+#define DecodePointerLikeRegClass1 DecodeGPRC_NOR0RegisterClass
+
+template<unsigned N>
+static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ return MCDisassembler::Success;
+}
+
+template<unsigned N>
+static DecodeStatus decodeSImmOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<N>(Imm)));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeMemRIOperands(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ // Decode the memri field (imm, reg), which has the low 16-bits as the
+ // displacement and the next 5 bits as the register #.
+
+ uint64_t Base = Imm >> 16;
+ uint64_t Disp = Imm & 0xFFFF;
+
+ assert(Base < 32 && "Invalid base register");
+
+ switch (Inst.getOpcode()) {
+ default: break;
+ case PPC::LBZU:
+ case PPC::LHAU:
+ case PPC::LHZU:
+ case PPC::LWZU:
+ case PPC::LFSU:
+ case PPC::LFDU:
+ // Add the tied output operand.
+ Inst.addOperand(MCOperand::CreateReg(GP0Regs[Base]));
+ break;
+ case PPC::STBU:
+ case PPC::STHU:
+ case PPC::STWU:
+ case PPC::STFSU:
+ case PPC::STFDU:
+ Inst.insert(Inst.begin(), MCOperand::CreateReg(GP0Regs[Base]));
+ break;
+ }
+
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<16>(Disp)));
+ Inst.addOperand(MCOperand::CreateReg(GP0Regs[Base]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeMemRIXOperands(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ // Decode the memrix field (imm, reg), which has the low 14-bits as the
+ // displacement and the next 5 bits as the register #.
+
+ uint64_t Base = Imm >> 14;
+ uint64_t Disp = Imm & 0x3FFF;
+
+ assert(Base < 32 && "Invalid base register");
+
+ if (Inst.getOpcode() == PPC::LDU)
+ // Add the tied output operand.
+ Inst.addOperand(MCOperand::CreateReg(GP0Regs[Base]));
+ else if (Inst.getOpcode() == PPC::STDU)
+ Inst.insert(Inst.begin(), MCOperand::CreateReg(GP0Regs[Base]));
+
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<16>(Disp << 2)));
+ Inst.addOperand(MCOperand::CreateReg(GP0Regs[Base]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeCRBitMOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ // The cr bit encoding is 0x80 >> cr_reg_num.
+
+ unsigned Zeros = countTrailingZeros(Imm);
+ assert(Zeros < 8 && "Invalid CR bit value");
+
+ Inst.addOperand(MCOperand::CreateReg(CRRegs[7 - Zeros]));
+ return MCDisassembler::Success;
+}
+
+#include "PPCGenDisassemblerTables.inc"
+
+DecodeStatus PPCDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
+ const MemoryObject &Region,
+ uint64_t Address,
+ raw_ostream &os,
+ raw_ostream &cs) const {
+ // Get the four bytes of the instruction.
+ uint8_t Bytes[4];
+ Size = 4;
+ if (Region.readBytes(Address, Size, Bytes) == -1) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
+
+ // The instruction is big-endian encoded.
+ uint32_t Inst = (Bytes[0] << 24) |
+ (Bytes[1] << 16) |
+ (Bytes[2] << 8) |
+ (Bytes[3] << 0);
+
+ return decodeInstruction(DecoderTable32, MI, Inst, Address, this, STI);
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index fd268e4..771b6f5 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "PPCInstPrinter.h"
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCPredicates.h"
@@ -24,6 +23,8 @@
#include "llvm/Target/TargetOpcodes.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
// FIXME: Once the integrated assembler supports full register names, tie this
// to the verbose-asm setting.
static cl::opt<bool>
@@ -150,6 +151,9 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
case PPC::PRED_NU:
O << "nu";
return;
+ case PPC::PRED_BIT_SET:
+ case PPC::PRED_BIT_UNSET:
+ llvm_unreachable("Invalid use of bit predicate code");
}
llvm_unreachable("Invalid predicate code");
}
@@ -185,6 +189,9 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
case PPC::PRED_NU_PLUS:
O << "+";
return;
+ case PPC::PRED_BIT_SET:
+ case PPC::PRED_BIT_UNSET:
+ llvm_unreachable("Invalid use of bit predicate code");
}
llvm_unreachable("Invalid predicate code");
}
@@ -194,6 +201,13 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
printOperand(MI, OpNo+1, O);
}
+void PPCInstPrinter::printU2ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned int Value = MI->getOperand(OpNo).getImm();
+ assert(Value <= 3 && "Invalid u2imm argument!");
+ O << (unsigned int)Value;
+}
+
void PPCInstPrinter::printS5ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
int Value = MI->getOperand(OpNo).getImm();
@@ -317,7 +331,10 @@ static const char *stripRegisterPrefix(const char *RegName) {
switch (RegName[0]) {
case 'r':
case 'f':
- case 'v': return RegName + 1;
+ case 'v':
+ if (RegName[1] == 's')
+ return RegName + 2;
+ return RegName + 1;
case 'c': if (RegName[1] == 'r') return RegName + 2;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index 8a4c03d..211a628 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -31,8 +31,8 @@ public:
return IsDarwin;
}
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
@@ -41,9 +41,9 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printPredicateOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier = 0);
-
+ raw_ostream &O, const char *Modifier = nullptr);
+ void printU2ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printS5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index 0d42081..c54d5e7 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -9,7 +9,9 @@
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCFixupKinds.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCELF.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCMachObjectWriter.h"
@@ -71,14 +73,18 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
namespace {
class PPCAsmBackend : public MCAsmBackend {
-const Target &TheTarget;
+ const Target &TheTarget;
+ bool IsLittleEndian;
public:
- PPCAsmBackend(const Target &T) : MCAsmBackend(), TheTarget(T) {}
+ PPCAsmBackend(const Target &T, bool isLittle) : MCAsmBackend(), TheTarget(T),
+ IsLittleEndian(isLittle) {}
- unsigned getNumFixupKinds() const { return PPC::NumTargetFixupKinds; }
+ unsigned getNumFixupKinds() const override {
+ return PPC::NumTargetFixupKinds;
+ }
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
- const static MCFixupKindInfo Infos[PPC::NumTargetFixupKinds] = {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
+ const static MCFixupKindInfo InfosBE[PPC::NumTargetFixupKinds] = {
// name offset bits flags
{ "fixup_ppc_br24", 6, 24, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel },
@@ -88,17 +94,27 @@ public:
{ "fixup_ppc_half16ds", 0, 14, 0 },
{ "fixup_ppc_nofixup", 0, 0, 0 }
};
+ const static MCFixupKindInfo InfosLE[PPC::NumTargetFixupKinds] = {
+ // name offset bits flags
+ { "fixup_ppc_br24", 2, 24, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_ppc_brcond14", 2, 14, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_ppc_br24abs", 2, 24, 0 },
+ { "fixup_ppc_brcond14abs", 2, 14, 0 },
+ { "fixup_ppc_half16", 0, 16, 0 },
+ { "fixup_ppc_half16ds", 2, 14, 0 },
+ { "fixup_ppc_nofixup", 0, 0, 0 }
+ };
if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
"Invalid kind!");
- return Infos[Kind - FirstTargetFixupKind];
+ return (IsLittleEndian? InfosLE : InfosBE)[Kind - FirstTargetFixupKind];
}
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const {
+ uint64_t Value, bool IsPCRel) const override {
Value = adjustFixupValue(Fixup.getKind(), Value);
if (!Value) return; // Doesn't change encoding.
@@ -108,11 +124,37 @@ public:
// For each byte of the fragment that the fixup touches, mask in the bits
// from the fixup value. The Value has been "split up" into the appropriate
// bitfields above.
- for (unsigned i = 0; i != NumBytes; ++i)
- Data[Offset + i] |= uint8_t((Value >> ((NumBytes - i - 1)*8)) & 0xff);
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = IsLittleEndian ? i : (NumBytes - 1 - i);
+ Data[Offset + i] |= uint8_t((Value >> (Idx * 8)) & 0xff);
+ }
+ }
+
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override {
+ switch ((PPC::Fixups)Fixup.getKind()) {
+ default: break;
+ case PPC::fixup_ppc_br24:
+ case PPC::fixup_ppc_br24abs:
+ // If the target symbol has a local entry point we must not attempt
+ // to resolve the fixup directly. Emit a relocation and leave
+ // resolution of the final target address to the linker.
+ if (const MCSymbolRefExpr *A = Target.getSymA()) {
+ const MCSymbolData &Data = Asm.getSymbolData(A->getSymbol());
+ // The "other" values are stored in the last 6 bits of the second byte.
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ unsigned Other = MCELF::getOther(Data) << 2;
+ if ((Other & ELF::STO_PPC64_LOCAL_MASK) != 0)
+ IsResolved = false;
+ }
+ break;
+ }
}
- bool mayNeedRelaxation(const MCInst &Inst) const {
+ bool mayNeedRelaxation(const MCInst &Inst) const override {
// FIXME.
return false;
}
@@ -120,18 +162,18 @@ public:
bool fixupNeedsRelaxation(const MCFixup &Fixup,
uint64_t Value,
const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
+ const MCAsmLayout &Layout) const override {
// FIXME.
llvm_unreachable("relaxInstruction() unimplemented");
}
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
// FIXME.
llvm_unreachable("relaxInstruction() unimplemented");
}
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override {
uint64_t NumNops = Count / 4;
for (uint64_t i = 0; i != NumNops; ++i)
OW->Write32(0x60000000);
@@ -152,6 +194,10 @@ public:
assert(Name == "ppc32" && "Unknown target name!");
return 4;
}
+
+ bool isLittleEndian() const {
+ return IsLittleEndian;
+ }
};
} // end anonymous namespace
@@ -160,9 +206,9 @@ public:
namespace {
class DarwinPPCAsmBackend : public PPCAsmBackend {
public:
- DarwinPPCAsmBackend(const Target &T) : PPCAsmBackend(T) { }
+ DarwinPPCAsmBackend(const Target &T) : PPCAsmBackend(T, false) { }
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
bool is64 = getPointerSize() == 8;
return createPPCMachObjectWriter(
OS,
@@ -170,26 +216,18 @@ namespace {
(is64 ? MachO::CPU_TYPE_POWERPC64 : MachO::CPU_TYPE_POWERPC),
MachO::CPU_SUBTYPE_POWERPC_ALL);
}
-
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
- return false;
- }
};
class ELFPPCAsmBackend : public PPCAsmBackend {
uint8_t OSABI;
public:
- ELFPPCAsmBackend(const Target &T, uint8_t OSABI) :
- PPCAsmBackend(T), OSABI(OSABI) { }
+ ELFPPCAsmBackend(const Target &T, bool IsLittleEndian, uint8_t OSABI) :
+ PPCAsmBackend(T, IsLittleEndian), OSABI(OSABI) { }
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
bool is64 = getPointerSize() == 8;
- return createPPCELFObjectWriter(OS, is64, OSABI);
- }
-
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
- return false;
+ return createPPCELFObjectWriter(OS, is64, isLittleEndian(), OSABI);
}
};
@@ -202,5 +240,6 @@ MCAsmBackend *llvm::createPPCAsmBackend(const Target &T,
return new DarwinPPCAsmBackend(T);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
- return new ELFPPCAsmBackend(T, OSABI);
+ bool IsLittleEndian = Triple(TT).getArch() == Triple::ppc64le;
+ return new ELFPPCAsmBackend(T, IsLittleEndian, OSABI);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
index 0e34f6a..ca81317 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
@@ -9,7 +9,9 @@
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCFixupKinds.h"
+#include "MCTargetDesc/PPCMCExpr.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCELF.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCValue.h"
@@ -27,17 +29,11 @@ namespace {
virtual unsigned getRelocTypeInner(const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const;
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
- virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
+
+ bool needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const override;
};
}
@@ -49,12 +45,39 @@ PPCELFObjectWriter::PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI)
PPCELFObjectWriter::~PPCELFObjectWriter() {
}
+static MCSymbolRefExpr::VariantKind getAccessVariant(const MCValue &Target,
+ const MCFixup &Fixup) {
+ const MCExpr *Expr = Fixup.getValue();
+
+ if (Expr->getKind() != MCExpr::Target)
+ return Target.getAccessVariant();
+
+ switch (cast<PPCMCExpr>(Expr)->getKind()) {
+ case PPCMCExpr::VK_PPC_None:
+ return MCSymbolRefExpr::VK_None;
+ case PPCMCExpr::VK_PPC_LO:
+ return MCSymbolRefExpr::VK_PPC_LO;
+ case PPCMCExpr::VK_PPC_HI:
+ return MCSymbolRefExpr::VK_PPC_HI;
+ case PPCMCExpr::VK_PPC_HA:
+ return MCSymbolRefExpr::VK_PPC_HA;
+ case PPCMCExpr::VK_PPC_HIGHERA:
+ return MCSymbolRefExpr::VK_PPC_HIGHERA;
+ case PPCMCExpr::VK_PPC_HIGHER:
+ return MCSymbolRefExpr::VK_PPC_HIGHER;
+ case PPCMCExpr::VK_PPC_HIGHEST:
+ return MCSymbolRefExpr::VK_PPC_HIGHEST;
+ case PPCMCExpr::VK_PPC_HIGHESTA:
+ return MCSymbolRefExpr::VK_PPC_HIGHESTA;
+ }
+ llvm_unreachable("unknown PPCMCExpr kind");
+}
+
unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const
{
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ MCSymbolRefExpr::VariantKind Modifier = getAccessVariant(Target, Fixup);
// determine the type of the relocation
unsigned Type;
@@ -379,64 +402,31 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target,
unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
+ bool IsPCRel) const {
return getRelocTypeInner(Target, Fixup, IsPCRel);
}
-const MCSymbol *PPCELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- assert(Target.getSymA() && "SymA cannot be 0");
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
-
- bool EmitThisSym;
- switch (Modifier) {
- // GOT references always need a relocation, even if the
- // target symbol is local.
- case MCSymbolRefExpr::VK_GOT:
- case MCSymbolRefExpr::VK_PPC_GOT_LO:
- case MCSymbolRefExpr::VK_PPC_GOT_HI:
- case MCSymbolRefExpr::VK_PPC_GOT_HA:
- EmitThisSym = true;
- break;
- default:
- EmitThisSym = false;
- break;
- }
-
- if (EmitThisSym)
- return &Target.getSymA()->getSymbol().AliasedSymbol();
- return NULL;
-}
-
-const MCSymbol *PPCELFObjectWriter::undefinedExplicitRelSym(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- assert(Target.getSymA() && "SymA cannot be 0");
- const MCSymbol &Symbol = Target.getSymA()->getSymbol().AliasedSymbol();
-
- unsigned RelocType = getRelocTypeInner(Target, Fixup, IsPCRel);
-
- // The .odp creation emits a relocation against the symbol ".TOC." which
- // create a R_PPC64_TOC relocation. However the relocation symbol name
- // in final object creation should be NULL, since the symbol does not
- // really exist, it is just the reference to TOC base for the current
- // object file.
- bool EmitThisSym = RelocType != ELF::R_PPC64_TOC;
+bool PPCELFObjectWriter::needsRelocateWithSymbol(const MCSymbolData &SD,
+ unsigned Type) const {
+ switch (Type) {
+ default:
+ return false;
- if (EmitThisSym && !Symbol.isTemporary())
- return &Symbol;
- return NULL;
+ case ELF::R_PPC_REL24:
+ // If the target symbol has a local entry point, we must keep the
+ // target symbol to preserve that information for the linker.
+ // The "other" values are stored in the last 6 bits of the second byte.
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ unsigned Other = MCELF::getOther(SD) << 2;
+ return (Other & ELF::STO_PPC64_LOCAL_MASK) != 0;
+ }
}
MCObjectWriter *llvm::createPPCELFObjectWriter(raw_ostream &OS,
bool Is64Bit,
+ bool IsLittleEndian,
uint8_t OSABI) {
MCELFObjectTargetWriter *MOTW = new PPCELFObjectWriter(Is64Bit, OSABI);
- return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/false);
+ return createELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index 1d9c064..b95a2ac 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -18,24 +18,6 @@ using namespace llvm;
void PPCMCAsmInfoDarwin::anchor() { }
-/// This version of the constructor is here to maintain ABI compatibility with
-/// LLVM 3.4.0
-PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit) {
- if (is64Bit) {
- PointerSize = CalleeSaveStackSlotSize = 8;
- }
- IsLittleEndian = false;
-
- CommentString = ";";
- ExceptionsType = ExceptionHandling::DwarfCFI;
-
- if (!is64Bit)
- Data64bitsDirective = 0; // We can't emit a 64-bit unit in PPC32 mode.
-
- AssemblerDialect = 1; // New-Style mnemonics.
- SupportsDebugInformation= true; // Debug information.
-}
-
PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit, const Triple& T) {
if (is64Bit) {
PointerSize = CalleeSaveStackSlotSize = 8;
@@ -46,32 +28,32 @@ PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit, const Triple& T) {
ExceptionsType = ExceptionHandling::DwarfCFI;
if (!is64Bit)
- Data64bitsDirective = 0; // We can't emit a 64-bit unit in PPC32 mode.
+ Data64bitsDirective = nullptr; // We can't emit a 64-bit unit in PPC32 mode.
AssemblerDialect = 1; // New-Style mnemonics.
SupportsDebugInformation= true; // Debug information.
- // old assembler lacks some directives
+ // The installed assembler for OSX < 10.6 lacks some directives.
// FIXME: this should really be a check on the assembler characteristics
// rather than OS version
if (T.isMacOSX() && T.isMacOSXVersionLT(10, 6))
HasWeakDefCanBeHiddenDirective = false;
+
+ UseIntegratedAssembler = true;
}
void PPCLinuxMCAsmInfo::anchor() { }
-PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
+PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit, const Triple& T) {
if (is64Bit) {
PointerSize = CalleeSaveStackSlotSize = 8;
}
- IsLittleEndian = false;
+ IsLittleEndian = T.getArch() == Triple::ppc64le;
// ".comm align is in bytes but .align is pow-2."
AlignmentIsInBytes = false;
CommentString = "#";
- GlobalPrefix = "";
- PrivateGlobalPrefix = ".L";
// Uses '.section' before '.bss' directive
UsesELFSectionDirectiveForBSS = true;
@@ -89,7 +71,12 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
ExceptionsType = ExceptionHandling::DwarfCFI;
ZeroDirective = "\t.space\t";
- Data64bitsDirective = is64Bit ? "\t.quad\t" : 0;
+ Data64bitsDirective = is64Bit ? "\t.quad\t" : nullptr;
AssemblerDialect = 1; // New-Style mnemonics.
+
+ if (T.getOS() == llvm::Triple::FreeBSD ||
+ (T.getOS() == llvm::Triple::NetBSD && !is64Bit) ||
+ (T.getOS() == llvm::Triple::OpenBSD && !is64Bit))
+ UseIntegratedAssembler = true;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
index 633970c..754330b 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
@@ -21,18 +21,15 @@ namespace llvm {
class Triple;
class PPCMCAsmInfoDarwin : public MCAsmInfoDarwin {
- virtual void anchor();
+ void anchor() override;
public:
- /// This version of the constructor is here to maintain ABI compatibility
- /// with LLVM 3.4.0.
- explicit PPCMCAsmInfoDarwin(bool is64Bit);
explicit PPCMCAsmInfoDarwin(bool is64Bit, const Triple&);
};
class PPCLinuxMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
- explicit PPCLinuxMCAsmInfo(bool is64Bit);
+ explicit PPCLinuxMCAsmInfo(bool is64Bit, const Triple&);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 346a9be..435a93f 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCFixupKinds.h"
#include "llvm/ADT/Statistic.h"
@@ -26,6 +25,8 @@
#include "llvm/Target/TargetOpcodes.h"
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
namespace {
@@ -33,70 +34,113 @@ class PPCMCCodeEmitter : public MCCodeEmitter {
PPCMCCodeEmitter(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION;
void operator=(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION;
- const MCSubtargetInfo &STI;
+ const MCInstrInfo &MCII;
const MCContext &CTX;
- Triple TT;
+ bool IsLittleEndian;
public:
- PPCMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : STI(sti), CTX(ctx), TT(STI.getTargetTriple()) {
+ PPCMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx, bool isLittle)
+ : MCII(mcii), CTX(ctx), IsLittleEndian(isLittle) {
}
~PPCMCCodeEmitter() {}
unsigned getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getImm16Encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getMemRIEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getTLSRegEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getTLSCallEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override {
// For fast-isel, a float COPY_TO_REGCLASS can survive this long.
// It's just a nop to keep the register classes happy, so don't
// generate anything.
unsigned Opcode = MI.getOpcode();
+ const MCInstrDesc &Desc = MCII.get(Opcode);
if (Opcode == TargetOpcode::COPY_TO_REGCLASS)
return;
- uint64_t Bits = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
- // BL8_NOP etc. all have a size of 8 because of the following 'nop'.
- unsigned Size = 4; // FIXME: Have Desc.getSize() return the correct value!
- if (Opcode == PPC::BL8_NOP || Opcode == PPC::BLA8_NOP ||
- Opcode == PPC::BL8_NOP_TLS)
- Size = 8;
-
- // Output the constant in big endian byte order.
- int ShiftValue = (Size * 8) - 8;
- for (unsigned i = 0; i != Size; ++i) {
- OS << (char)(Bits >> ShiftValue);
- Bits <<= 8;
+ // Output the constant in big/little endian byte order.
+ unsigned Size = Desc.getSize();
+ switch (Size) {
+ case 4:
+ if (IsLittleEndian) {
+ OS << (char)(Bits);
+ OS << (char)(Bits >> 8);
+ OS << (char)(Bits >> 16);
+ OS << (char)(Bits >> 24);
+ } else {
+ OS << (char)(Bits >> 24);
+ OS << (char)(Bits >> 16);
+ OS << (char)(Bits >> 8);
+ OS << (char)(Bits);
+ }
+ break;
+ case 8:
+ // If we emit a pair of instructions, the first one is
+ // always in the top 32 bits, even on little-endian.
+ if (IsLittleEndian) {
+ OS << (char)(Bits >> 32);
+ OS << (char)(Bits >> 40);
+ OS << (char)(Bits >> 48);
+ OS << (char)(Bits >> 56);
+ OS << (char)(Bits);
+ OS << (char)(Bits >> 8);
+ OS << (char)(Bits >> 16);
+ OS << (char)(Bits >> 24);
+ } else {
+ OS << (char)(Bits >> 56);
+ OS << (char)(Bits >> 48);
+ OS << (char)(Bits >> 40);
+ OS << (char)(Bits >> 32);
+ OS << (char)(Bits >> 24);
+ OS << (char)(Bits >> 16);
+ OS << (char)(Bits >> 8);
+ OS << (char)(Bits);
+ }
+ break;
+ default:
+ llvm_unreachable ("Invalid instruction size");
}
++MCNumEmitted; // Keep track of the # of mi's emitted.
@@ -110,14 +154,17 @@ MCCodeEmitter *llvm::createPPCMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
- return new PPCMCCodeEmitter(MCII, STI, Ctx);
+ Triple TT(STI.getTargetTriple());
+ bool IsLittleEndian = TT.getArch() == Triple::ppc64le;
+ return new PPCMCCodeEmitter(MCII, Ctx, IsLittleEndian);
}
unsigned PPCMCCodeEmitter::
getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
- if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
@@ -126,9 +173,10 @@ getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
}
unsigned PPCMCCodeEmitter::getCondBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
- if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
@@ -138,9 +186,10 @@ unsigned PPCMCCodeEmitter::getCondBrEncoding(const MCInst &MI, unsigned OpNo,
unsigned PPCMCCodeEmitter::
getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
- if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
@@ -150,9 +199,10 @@ getAbsDirectBrEncoding(const MCInst &MI, unsigned OpNo,
unsigned PPCMCCodeEmitter::
getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
- if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
@@ -161,79 +211,87 @@ getAbsCondBrEncoding(const MCInst &MI, unsigned OpNo,
}
unsigned PPCMCCodeEmitter::getImm16Encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
- if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
// Add a fixup for the immediate field.
- Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
+ Fixups.push_back(MCFixup::Create(IsLittleEndian? 0 : 2, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_half16));
return 0;
}
unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Encode (imm, reg) as a memri, which has the low 16-bits as the
// displacement and the next 5 bits as the register #.
assert(MI.getOperand(OpNo+1).isReg());
- unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups) << 16;
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI) << 16;
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isImm())
- return (getMachineOpValue(MI, MO, Fixups) & 0xFFFF) | RegBits;
+ return (getMachineOpValue(MI, MO, Fixups, STI) & 0xFFFF) | RegBits;
// Add a fixup for the displacement field.
- Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
+ Fixups.push_back(MCFixup::Create(IsLittleEndian? 0 : 2, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_half16));
return RegBits;
}
unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Encode (imm, reg) as a memrix, which has the low 14-bits as the
// displacement and the next 5 bits as the register #.
assert(MI.getOperand(OpNo+1).isReg());
- unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups) << 14;
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI) << 14;
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isImm())
- return ((getMachineOpValue(MI, MO, Fixups) >> 2) & 0x3FFF) | RegBits;
+ return ((getMachineOpValue(MI, MO, Fixups, STI) >> 2) & 0x3FFF) | RegBits;
// Add a fixup for the displacement field.
- Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
+ Fixups.push_back(MCFixup::Create(IsLittleEndian? 0 : 2, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_half16ds));
return RegBits;
}
unsigned PPCMCCodeEmitter::getTLSRegEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
- if (MO.isReg()) return getMachineOpValue(MI, MO, Fixups);
+ if (MO.isReg()) return getMachineOpValue(MI, MO, Fixups, STI);
// Add a fixup for the TLS register, which simply provides a relocation
// hint to the linker that this statement is part of a relocation sequence.
// Return the thread-pointer register's encoding.
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_nofixup));
- return CTX.getRegisterInfo()->getEncodingValue(PPC::X13);
+ Triple TT(STI.getTargetTriple());
+ bool isPPC64 = TT.getArch() == Triple::ppc64 || TT.getArch() == Triple::ppc64le;
+ return CTX.getRegisterInfo()->getEncodingValue(isPPC64 ? PPC::X13 : PPC::R2);
}
unsigned PPCMCCodeEmitter::getTLSCallEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// For special TLS calls, we need two fixups; one for the branch target
// (__tls_get_addr), which we create via getDirectBrEncoding as usual,
// and one for the TLSGD or TLSLD symbol, which is emitted here.
const MCOperand &MO = MI.getOperand(OpNo+1);
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_nofixup));
- return getDirectBrEncoding(MI, OpNo, Fixups);
+ return getDirectBrEncoding(MI, OpNo, Fixups, STI);
}
unsigned PPCMCCodeEmitter::
get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
assert((MI.getOpcode() == PPC::MTOCRF || MI.getOpcode() == PPC::MTOCRF8 ||
MI.getOpcode() == PPC::MFOCRF || MI.getOpcode() == PPC::MFOCRF8) &&
@@ -244,7 +302,8 @@ get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
unsigned PPCMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg()) {
// MTOCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
// The GPR operand should come through here though.
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
index d7e8402..3ac0aca 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.cpp
@@ -7,14 +7,16 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ppcmcexpr"
#include "PPCMCExpr.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCObjectStreamer.h"
using namespace llvm;
+#define DEBUG_TYPE "ppcmcexpr"
+
const PPCMCExpr*
PPCMCExpr::Create(VariantKind Kind, const MCExpr *Expr,
bool isDarwin, MCContext &Ctx) {
@@ -54,7 +56,7 @@ PPCMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
const MCAsmLayout *Layout) const {
MCValue Value;
- if (!Layout || !getSubExpr()->EvaluateAsRelocatable(Value, *Layout))
+ if (!getSubExpr()->EvaluateAsRelocatable(Value, Layout))
return false;
if (Value.isAbsolute()) {
@@ -86,6 +88,9 @@ PPCMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
}
Res = MCValue::get(Result);
} else {
+ if (!Layout)
+ return false;
+
MCContext &Context = Layout->getAssembler().getContext();
const MCSymbolRefExpr *Sym = Value.getSymA();
MCSymbolRefExpr::VariantKind Modifier = Sym->getKind();
@@ -123,33 +128,6 @@ PPCMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
return true;
}
-// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
-// that method should be made public?
-static void AddValueSymbols_(const MCExpr *Value, MCAssembler *Asm) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- llvm_unreachable("Can't handle nested target expr!");
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbols_(BE->getLHS(), Asm);
- AddValueSymbols_(BE->getRHS(), Asm);
- break;
- }
-
- case MCExpr::SymbolRef:
- Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbols_(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
- break;
- }
-}
-
-void PPCMCExpr::AddValueSymbols(MCAssembler *Asm) const {
- AddValueSymbols_(getSubExpr(), Asm);
+void PPCMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h
index e44c7c1..bca4085 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCExpr.h
@@ -10,9 +10,9 @@
#ifndef PPCMCEXPR_H
#define PPCMCEXPR_H
+#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/MC/MCAsmLayout.h"
namespace llvm {
@@ -76,16 +76,16 @@ public:
/// @}
- void PrintImpl(raw_ostream &OS) const;
+ void PrintImpl(raw_ostream &OS) const override;
bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const;
- void AddValueSymbols(MCAssembler *) const;
- const MCSection *FindAssociatedSection() const {
+ const MCAsmLayout *Layout) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ const MCSection *FindAssociatedSection() const override {
return getSubExpr()->FindAssociatedSection();
}
// There are no TLS PPCMCExprs at the moment.
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {}
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 6a50518..4c6780f 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -16,16 +16,22 @@
#include "PPCMCAsmInfo.h"
#include "PPCTargetStreamer.h"
#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "PPCGenInstrInfo.inc"
@@ -35,10 +41,9 @@
#define GET_REGINFO_MC_DESC
#include "PPCGenRegisterInfo.inc"
-using namespace llvm;
-
// Pin the vtable to this file.
PPCTargetStreamer::~PPCTargetStreamer() {}
+PPCTargetStreamer::PPCTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
static MCInstrInfo *createPPCMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
@@ -74,12 +79,12 @@ static MCAsmInfo *createPPCMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
if (TheTriple.isOSDarwin())
MAI = new PPCMCAsmInfoDarwin(isPPC64, TheTriple);
else
- MAI = new PPCLinuxMCAsmInfo(isPPC64);
+ MAI = new PPCLinuxMCAsmInfo(isPPC64, TheTriple);
// Initial state of the frame pointer is R1.
unsigned Reg = isPPC64 ? PPC::X1 : PPC::R1;
MCCFIInstruction Inst =
- MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(Reg, true), 0);
+ MCCFIInstruction::createDefCfa(nullptr, MRI.getDwarfRegNum(Reg, true), 0);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -112,20 +117,90 @@ class PPCTargetAsmStreamer : public PPCTargetStreamer {
formatted_raw_ostream &OS;
public:
- PPCTargetAsmStreamer(formatted_raw_ostream &OS) : OS(OS) {}
- virtual void emitTCEntry(const MCSymbol &S) {
+ PPCTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS)
+ : PPCTargetStreamer(S), OS(OS) {}
+ void emitTCEntry(const MCSymbol &S) override {
OS << "\t.tc ";
OS << S.getName();
OS << "[TC],";
OS << S.getName();
OS << '\n';
}
+ void emitMachine(StringRef CPU) override {
+ OS << "\t.machine " << CPU << '\n';
+ }
+ virtual void emitAbiVersion(int AbiVersion) override {
+ OS << "\t.abiversion " << AbiVersion << '\n';
+ }
+ virtual void emitLocalEntry(MCSymbol *S, const MCExpr *LocalOffset) {
+ OS << "\t.localentry\t" << *S << ", " << *LocalOffset << '\n';
+ }
};
class PPCTargetELFStreamer : public PPCTargetStreamer {
- virtual void emitTCEntry(const MCSymbol &S) {
+public:
+ PPCTargetELFStreamer(MCStreamer &S) : PPCTargetStreamer(S) {}
+ MCELFStreamer &getStreamer() {
+ return static_cast<MCELFStreamer &>(Streamer);
+ }
+ virtual void emitTCEntry(const MCSymbol &S) override {
// Creates a R_PPC64_TOC relocation
- Streamer->EmitSymbolValue(&S, 8);
+ Streamer.EmitSymbolValue(&S, 8);
+ }
+ void emitMachine(StringRef CPU) override {
+ // FIXME: Is there anything to do in here or does this directive only
+ // limit the parser?
+ }
+ virtual void emitAbiVersion(int AbiVersion) override {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags &= ~ELF::EF_PPC64_ABI;
+ Flags |= (AbiVersion & ELF::EF_PPC64_ABI);
+ MCA.setELFHeaderEFlags(Flags);
+ }
+ virtual void emitLocalEntry(MCSymbol *S, const MCExpr *LocalOffset) {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ MCSymbolData &Data = getStreamer().getOrCreateSymbolData(S);
+
+ int64_t Res;
+ if (!LocalOffset->EvaluateAsAbsolute(Res, MCA))
+ report_fatal_error(".localentry expression must be absolute.");
+
+ unsigned Encoded = ELF::encodePPC64LocalEntryOffset(Res);
+ if (Res != ELF::decodePPC64LocalEntryOffset(Encoded))
+ report_fatal_error(".localentry expression cannot be encoded.");
+
+ // The "other" values are stored in the last 6 bits of the second byte.
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ unsigned Other = MCELF::getOther(Data) << 2;
+ Other &= ~ELF::STO_PPC64_LOCAL_MASK;
+ Other |= Encoded;
+ MCELF::setOther(Data, Other >> 2);
+
+ // For GAS compatibility, unless we already saw a .abiversion directive,
+ // set e_flags to indicate ELFv2 ABI.
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ if ((Flags & ELF::EF_PPC64_ABI) == 0)
+ MCA.setELFHeaderEFlags(Flags | 2);
+ }
+};
+
+class PPCTargetMachOStreamer : public PPCTargetStreamer {
+public:
+ PPCTargetMachOStreamer(MCStreamer &S) : PPCTargetStreamer(S) {}
+ void emitTCEntry(const MCSymbol &S) override {
+ llvm_unreachable("Unknown pseudo-op: .tc");
+ }
+ void emitMachine(StringRef CPU) override {
+ // FIXME: We should update the CPUType, CPUSubType in the Object file if
+ // the new values are different from the defaults.
+ }
+ virtual void emitAbiVersion(int AbiVersion) override {
+ llvm_unreachable("Unknown pseudo-op: .abiversion");
+ }
+ virtual void emitLocalEntry(MCSymbol *S, const MCExpr *LocalOffset) {
+ llvm_unreachable("Unknown pseudo-op: .localentry");
}
};
}
@@ -135,25 +210,31 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Ctx, MCAsmBackend &MAB,
raw_ostream &OS,
MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll,
bool NoExecStack) {
- if (Triple(TT).isOSDarwin())
- return createMachOStreamer(Ctx, MAB, OS, Emitter, RelaxAll);
+ if (Triple(TT).isOSDarwin()) {
+ MCStreamer *S = createMachOStreamer(Ctx, MAB, OS, Emitter, RelaxAll);
+ new PPCTargetMachOStreamer(*S);
+ return S;
+ }
- PPCTargetStreamer *S = new PPCTargetELFStreamer();
- return createELFStreamer(Ctx, S, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ MCStreamer *S =
+ createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ new PPCTargetELFStreamer(*S);
+ return S;
}
static MCStreamer *
createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory, MCInstPrinter *InstPrint,
- MCCodeEmitter *CE, MCAsmBackend *TAB, bool ShowInst) {
- PPCTargetStreamer *S = new PPCTargetAsmStreamer(OS);
-
- return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
- useDwarfDirectory, InstPrint, CE, TAB,
- ShowInst);
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst) {
+
+ MCStreamer *S = llvm::createAsmStreamer(
+ Ctx, OS, isVerboseAsm, useDwarfDirectory, InstPrint, CE, TAB, ShowInst);
+ new PPCTargetAsmStreamer(*S, OS);
+ return S;
}
static MCInstPrinter *createPPCMCInstPrinter(const Target &T,
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
index 0b0ca24..474395b 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
@@ -46,6 +46,7 @@ MCAsmBackend *createPPCAsmBackend(const Target &T, const MCRegisterInfo &MRI,
/// createPPCELFObjectWriter - Construct an PPC ELF object writer.
MCObjectWriter *createPPCELFObjectWriter(raw_ostream &OS,
bool Is64Bit,
+ bool IsLittleEndian,
uint8_t OSABI);
/// createPPCELFObjectWriter - Construct a PPC Mach-O object writer.
MCObjectWriter *createPPCMachObjectWriter(raw_ostream &OS, bool Is64Bit,
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
index bbafe2e..cff27ba 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
@@ -44,7 +44,7 @@ public:
void RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm,
const MCAsmLayout &Layout, const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
- uint64_t &FixedValue) {
+ uint64_t &FixedValue) override {
if (Writer->is64Bit()) {
report_fatal_error("Relocation emission for MachO/PPC64 unimplemented.");
} else
@@ -206,7 +206,7 @@ bool PPCMachObjectWriter::RecordScatteredRelocation(
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
- MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+ const MCSymbolData *A_SD = &Asm.getSymbolData(*A);
if (!A_SD->getFragment())
report_fatal_error("symbol '" + A->getName() +
@@ -219,7 +219,7 @@ bool PPCMachObjectWriter::RecordScatteredRelocation(
uint32_t Value2 = 0;
if (const MCSymbolRefExpr *B = Target.getSymB()) {
- MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+ const MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
if (!B_SD->getFragment())
report_fatal_error("symbol '" + B->getSymbol().getName() +
@@ -324,7 +324,7 @@ void PPCMachObjectWriter::RecordPPCRelocation(
// this doesn't seem right for RIT_PPC_BR24
// Get the symbol data, if any.
- MCSymbolData *SD = 0;
+ const MCSymbolData *SD = nullptr;
if (Target.getSymA())
SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp
index 63facc5..c2987b6 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.cpp
@@ -42,6 +42,10 @@ PPC::Predicate PPC::InvertPredicate(PPC::Predicate Opcode) {
case PPC::PRED_LE_PLUS: return PPC::PRED_GT_MINUS;
case PPC::PRED_NU_PLUS: return PPC::PRED_UN_MINUS;
case PPC::PRED_UN_PLUS: return PPC::PRED_NU_MINUS;
+
+ // Simple predicates for single condition-register bits.
+ case PPC::PRED_BIT_SET: return PPC::PRED_BIT_UNSET;
+ case PPC::PRED_BIT_UNSET: return PPC::PRED_BIT_SET;
}
llvm_unreachable("Unknown PPC branch opcode!");
}
@@ -72,6 +76,10 @@ PPC::Predicate PPC::getSwappedPredicate(PPC::Predicate Opcode) {
case PPC::PRED_LE_PLUS: return PPC::PRED_GE_PLUS;
case PPC::PRED_NU_PLUS: return PPC::PRED_NU_PLUS;
case PPC::PRED_UN_PLUS: return PPC::PRED_UN_PLUS;
+
+ case PPC::PRED_BIT_SET:
+ case PPC::PRED_BIT_UNSET:
+ llvm_unreachable("Invalid use of bit predicate code");
}
llvm_unreachable("Unknown PPC branch opcode!");
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
index d498c2f..10e328a 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
@@ -48,7 +48,12 @@ namespace PPC {
PRED_GT_PLUS = (1 << 5) | 15,
PRED_NE_PLUS = (2 << 5) | 7,
PRED_UN_PLUS = (3 << 5) | 15,
- PRED_NU_PLUS = (3 << 5) | 7
+ PRED_NU_PLUS = (3 << 5) | 7,
+
+ // When dealing with individual condition-register bits, we have simple set
+ // and unset predicates.
+ PRED_BIT_SET = 1024,
+ PRED_BIT_UNSET = 1025
};
/// Invert the specified predicate. != -> ==, < -> >=.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.h b/contrib/llvm/lib/Target/PowerPC/PPC.h
index 216e321..ba5fa4f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.h
@@ -23,6 +23,7 @@
namespace llvm {
class PPCTargetMachine;
+ class PassRegistry;
class FunctionPass;
class ImmutablePass;
class JITCodeEmitter;
@@ -35,6 +36,9 @@ namespace llvm {
FunctionPass *createPPCCTRLoopsVerify();
#endif
FunctionPass *createPPCEarlyReturnPass();
+ FunctionPass *createPPCVSXCopyPass();
+ FunctionPass *createPPCVSXCopyCleanupPass();
+ FunctionPass *createPPCVSXFMAMutatePass();
FunctionPass *createPPCBranchSelectionPass();
FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
FunctionPass *createPPCJITCodeEmitterPass(PPCTargetMachine &TM,
@@ -45,6 +49,9 @@ namespace llvm {
/// \brief Creates an PPC-specific Target Transformation Info pass.
ImmutablePass *createPPCTargetTransformInfoPass(const PPCTargetMachine *TM);
+ void initializePPCVSXFMAMutatePass(PassRegistry&);
+ extern char &PPCVSXFMAMutateID;
+
namespace PPCII {
/// Target Operand Flag enum.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.td b/contrib/llvm/lib/Target/PowerPC/PPC.td
index 54e3d40..a9842b2 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.td
@@ -46,11 +46,14 @@ def DirectivePwr5x: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR5X", ""
def DirectivePwr6: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6", "">;
def DirectivePwr6x: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6X", "">;
def DirectivePwr7: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR7", "">;
+def DirectivePwr8: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR8", "">;
def Feature64Bit : SubtargetFeature<"64bit","Has64BitSupport", "true",
"Enable 64-bit instructions">;
def Feature64BitRegs : SubtargetFeature<"64bitregs","Use64BitRegs", "true",
"Enable 64-bit registers usage for ppc32 [beta]">;
+def FeatureCRBits : SubtargetFeature<"crbits", "UseCRBits", "true",
+ "Use condition-register bits individually">;
def FeatureAltivec : SubtargetFeature<"altivec","HasAltivec", "true",
"Enable Altivec instructions">;
def FeatureMFOCRF : SubtargetFeature<"mfocrf","HasMFOCRF", "true",
@@ -88,7 +91,8 @@ def FeatureBookE : SubtargetFeature<"booke", "IsBookE", "true",
def FeatureQPX : SubtargetFeature<"qpx","HasQPX", "true",
"Enable QPX instructions">;
def FeatureVSX : SubtargetFeature<"vsx","HasVSX", "true",
- "Enable VSX instructions">;
+ "Enable VSX instructions",
+ [FeatureAltivec]>;
def DeprecatedMFTB : SubtargetFeature<"", "DeprecatedMFTB", "true",
"Treat mftb as deprecated">;
@@ -110,6 +114,12 @@ def DeprecatedDST : SubtargetFeature<"", "DeprecatedDST", "true",
// their record-form variants.
class RecFormRel;
+// AltVSXFMARel - Filter class used to relate the primary addend-killing VSX
+// FMA instruction forms with their corresponding factor-killing forms.
+class AltVSXFMARel {
+ bit IsVSXFMAAlt = 0;
+}
+
//===----------------------------------------------------------------------===//
// Relation Map Definitions.
//===----------------------------------------------------------------------===//
@@ -140,6 +150,19 @@ def getNonRecordFormOpcode : InstrMapping {
let ValueCols = [["0"]];
}
+def getAltVSXFMAOpcode : InstrMapping {
+ let FilterClass = "AltVSXFMARel";
+ // Instructions with the same BaseName and Interpretation64Bit values
+ // form a row.
+ let RowFields = ["BaseName"];
+ // Instructions with the same RC value form a column.
+ let ColFields = ["IsVSXFMAAlt"];
+ // The key column are the (default) addend-killing instructions.
+ let KeyCol = ["0"];
+ // Value columns IsVSXFMAAlt=1
+ let ValueCols = [["1"]];
+}
+
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
@@ -153,12 +176,12 @@ include "PPCInstrInfo.td"
//
def : Processor<"generic", G3Itineraries, [Directive32]>;
-def : Processor<"440", PPC440Itineraries, [Directive440, FeatureISEL,
- FeatureFRES, FeatureFRSQRTE,
- FeatureBookE, DeprecatedMFTB]>;
-def : Processor<"450", PPC440Itineraries, [Directive440, FeatureISEL,
- FeatureFRES, FeatureFRSQRTE,
- FeatureBookE, DeprecatedMFTB]>;
+def : ProcessorModel<"440", PPC440Model, [Directive440, FeatureISEL,
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureBookE, DeprecatedMFTB]>;
+def : ProcessorModel<"450", PPC440Model, [Directive440, FeatureISEL,
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureBookE, DeprecatedMFTB]>;
def : Processor<"601", G3Itineraries, [Directive601]>;
def : Processor<"602", G3Itineraries, [Directive602]>;
def : Processor<"603", G3Itineraries, [Directive603,
@@ -254,7 +277,7 @@ def : ProcessorModel<"pwr6x", G5Model,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, Feature64Bit,
DeprecatedMFTB, DeprecatedDST]>;
-def : ProcessorModel<"pwr7", G5Model,
+def : ProcessorModel<"pwr7", P7Model,
[DirectivePwr7, FeatureAltivec,
FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
@@ -263,6 +286,15 @@ def : ProcessorModel<"pwr7", G5Model,
FeaturePOPCNTD, FeatureLDBRX,
Feature64Bit /*, Feature64BitRegs */,
DeprecatedMFTB, DeprecatedDST]>;
+def : ProcessorModel<"pwr8", P7Model /* FIXME: Update to P8Model when available */,
+ [DirectivePwr8, FeatureAltivec,
+ FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
+ FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
+ FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX,
+ FeatureFPRND, FeatureFPCVT, FeatureISEL,
+ FeaturePOPCNTD, FeatureLDBRX,
+ Feature64Bit /*, Feature64BitRegs */,
+ DeprecatedMFTB, DeprecatedDST]>;
def : Processor<"ppc", G3Itineraries, [Directive32]>;
def : ProcessorModel<"ppc64", G5Model,
[Directive64, FeatureAltivec,
@@ -283,11 +315,11 @@ include "PPCCallingConv.td"
def PPCInstrInfo : InstrInfo {
let isLittleEndianEncoding = 1;
-}
-def PPCAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
+ // FIXME: Unset this when no longer needed!
+ let decodePositionallyEncodedOperands = 1;
+
+ let noNamedPositionallyEncodedOperands = 1;
}
def PPCAsmParser : AsmParser {
@@ -306,8 +338,7 @@ def PPCAsmParserVariant : AsmParserVariant {
def PPC : Target {
// Information about the instructions.
let InstructionSet = PPCInstrInfo;
-
- let AssemblyWriters = [PPCAsmWriter];
+
let AssemblyParsers = [PPCAsmParser];
let AssemblyParserVariants = [PPCAsmParserVariant];
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 484de19..1384022 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -16,29 +16,29 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asmprinter"
#include "PPC.h"
#include "InstPrinter/PPCInstPrinter.h"
#include "PPCMachineFunctionInfo.h"
-#include "MCTargetDesc/PPCPredicates.h"
#include "MCTargetDesc/PPCMCExpr.h"
+#include "MCTargetDesc/PPCPredicates.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "PPCTargetStreamer.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -56,12 +56,13 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "asmprinter"
+
namespace {
class PPCAsmPrinter : public AsmPrinter {
protected:
@@ -73,22 +74,22 @@ namespace {
: AsmPrinter(TM, Streamer),
Subtarget(TM.getSubtarget<PPCSubtarget>()), TOCLabelID(0) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "PowerPC Assembly Printer";
}
MCSymbol *lookUpOrCreateTOCEntry(MCSymbol *Sym);
- virtual void EmitInstruction(const MachineInstr *MI);
+ void EmitInstruction(const MachineInstr *MI) override;
void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
};
/// PPCLinuxAsmPrinter - PowerPC assembly printer, customized for Linux
@@ -97,16 +98,17 @@ namespace {
explicit PPCLinuxAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: PPCAsmPrinter(TM, Streamer) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Linux PPC Assembly Printer";
}
- bool doFinalization(Module &M);
- void EmitStartOfAsmFile(Module &M);
+ bool doFinalization(Module &M) override;
+ void EmitStartOfAsmFile(Module &M) override;
- virtual void EmitFunctionEntryLabel();
+ void EmitFunctionEntryLabel() override;
- void EmitFunctionBodyEnd();
+ void EmitFunctionBodyStart() override;
+ void EmitFunctionBodyEnd() override;
};
/// PPCDarwinAsmPrinter - PowerPC assembly printer, customized for Darwin/Mac
@@ -116,12 +118,12 @@ namespace {
explicit PPCDarwinAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: PPCAsmPrinter(TM, Streamer) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Darwin PPC Assembly Printer";
}
- bool doFinalization(Module &M);
- void EmitStartOfAsmFile(Module &M);
+ bool doFinalization(Module &M) override;
+ void EmitStartOfAsmFile(Module &M) override;
void EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs);
};
@@ -133,7 +135,10 @@ static const char *stripRegisterPrefix(const char *RegName) {
switch (RegName[0]) {
case 'r':
case 'f':
- case 'v': return RegName + 1;
+ case 'v':
+ if (RegName[1] == 's')
+ return RegName + 2;
+ return RegName + 1;
case 'c': if (RegName[1] == 'r') return RegName + 2;
}
@@ -142,6 +147,7 @@ static const char *stripRegisterPrefix(const char *RegName) {
void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
@@ -160,37 +166,13 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
case MachineOperand::MO_MachineBasicBlock:
O << *MO.getMBB()->getSymbol();
return;
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
- // FIXME: PIC relocation model
- return;
case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
+ O << DL->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
<< '_' << MO.getIndex();
return;
case MachineOperand::MO_BlockAddress:
O << *GetBlockAddressSymbol(MO.getBlockAddress());
return;
- case MachineOperand::MO_ExternalSymbol: {
- // Computing the address of an external symbol, not calling it.
- if (TM.getRelocationModel() == Reloc::Static) {
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- return;
- }
-
- MCSymbol *NLPSym =
- OutContext.GetOrCreateSymbol(StringRef(MAI->getGlobalPrefix())+
- MO.getSymbolName()+"$non_lazy_ptr");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(NLPSym);
- if (StubSym.getPointer() == 0)
- StubSym = MachineModuleInfoImpl::
- StubValueTy(GetExternalSymbolSymbol(MO.getSymbolName()), true);
-
- O << *NLPSym;
- return;
- }
case MachineOperand::MO_GlobalAddress: {
// Computing the address of a global symbol, not calling it.
const GlobalValue *GV = MO.getGlobal();
@@ -200,21 +182,21 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
if (TM.getRelocationModel() != Reloc::Static &&
(GV->isDeclaration() || GV->isWeakForLinker())) {
if (!GV->hasHiddenVisibility()) {
- SymToPrint = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>()
.getGVStubEntry(SymToPrint);
- if (StubSym.getPointer() == 0)
+ if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
} else if (GV->isDeclaration() || GV->hasCommonLinkage() ||
GV->hasAvailableExternallyLinkage()) {
- SymToPrint = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().
getHiddenGVStubEntry(SymToPrint);
- if (StubSym.getPointer() == 0)
+ if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
} else {
@@ -231,7 +213,7 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
}
default:
- O << "<unknown operand type: " << MO.getType() << ">";
+ O << "<unknown operand type: " << (unsigned)MO.getType() << ">";
return;
}
}
@@ -308,13 +290,13 @@ bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
/// exists for it. If not, create one. Then return a symbol that references
/// the TOC entry.
MCSymbol *PPCAsmPrinter::lookUpOrCreateTOCEntry(MCSymbol *Sym) {
-
+ const DataLayout *DL = TM.getDataLayout();
MCSymbol *&TOCEntry = TOC[Sym];
// To avoid name clash check if the name already exists.
- while (TOCEntry == 0) {
- if (OutContext.LookupSymbol(Twine(MAI->getPrivateGlobalPrefix()) +
- "C" + Twine(TOCLabelID++)) == 0) {
+ while (!TOCEntry) {
+ if (OutContext.LookupSymbol(Twine(DL->getPrivateGlobalPrefix()) +
+ "C" + Twine(TOCLabelID++)) == nullptr) {
TOCEntry = GetTempSymbol("C", TOCLabelID);
}
}
@@ -344,7 +326,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbol *PICBase = MF->getPICBaseSymbol();
// Emit the 'bl'.
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BL)
// FIXME: We would like an efficient form for this, so we don't have to do
// a lot of extra uniquing.
.addExpr(MCSymbolRefExpr::Create(PICBase, OutContext)));
@@ -369,7 +351,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
PB,
OutContext));
TmpInst.addOperand(MO);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case PPC::UpdateGBR: {
@@ -378,7 +360,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
TmpInst.setOpcode(PPC::ADD4);
TmpInst.addOperand(TmpInst.getOperand(0));
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case PPC::LWZtoc: {
@@ -392,7 +374,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Map symbol -> label of TOC entry
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI());
- MCSymbol *MOSymbol = NULL;
+ MCSymbol *MOSymbol = nullptr;
if (MO.isGlobal())
MOSymbol = getSymbol(MO.getGlobal());
else if (MO.isCPI())
@@ -410,7 +392,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutContext);
Exp = MCBinaryExpr::CreateSub(Exp, PB, OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case PPC::LDtocJTI:
@@ -426,7 +408,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Map symbol -> label of TOC entry
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI());
- MCSymbol *MOSymbol = 0;
+ MCSymbol *MOSymbol = nullptr;
if (MO.isGlobal())
MOSymbol = getSymbol(MO.getGlobal());
else if (MO.isCPI())
@@ -440,7 +422,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
@@ -448,45 +430,42 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Transform %Xd = ADDIStocHA %X2, <ga:@sym>
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
- // Change the opcode to ADDIS8. If the global address is external,
- // has common linkage, is a function address, or is a jump table
+ // Change the opcode to ADDIS8. If the global address is external, has
+ // common linkage, is a non-local function address, or is a jump table
// address, then generate a TOC entry and reference that. Otherwise
// reference the symbol directly.
TmpInst.setOpcode(PPC::ADDIS8);
const MachineOperand &MO = MI->getOperand(2);
assert((MO.isGlobal() || MO.isCPI() || MO.isJTI()) &&
"Invalid operand for ADDIStocHA!");
- MCSymbol *MOSymbol = 0;
+ MCSymbol *MOSymbol = nullptr;
bool IsExternal = false;
- bool IsFunction = false;
+ bool IsNonLocalFunction = false;
bool IsCommon = false;
bool IsAvailExt = false;
if (MO.isGlobal()) {
- const GlobalValue *GValue = MO.getGlobal();
- const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
- const GlobalValue *RealGValue = GAlias ?
- GAlias->resolveAliasedGlobal(false) : GValue;
- MOSymbol = getSymbol(RealGValue);
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
- IsExternal = GVar && !GVar->hasInitializer();
- IsCommon = GVar && RealGValue->hasCommonLinkage();
- IsFunction = !GVar;
- IsAvailExt = GVar && RealGValue->hasAvailableExternallyLinkage();
+ const GlobalValue *GV = MO.getGlobal();
+ MOSymbol = getSymbol(GV);
+ IsExternal = GV->isDeclaration();
+ IsCommon = GV->hasCommonLinkage();
+ IsNonLocalFunction = GV->getType()->getElementType()->isFunctionTy() &&
+ (GV->isDeclaration() || GV->isWeakForLinker());
+ IsAvailExt = GV->hasAvailableExternallyLinkage();
} else if (MO.isCPI())
MOSymbol = GetCPISymbol(MO.getIndex());
else if (MO.isJTI())
MOSymbol = GetJTISymbol(MO.getIndex());
- if (IsExternal || IsFunction || IsCommon || IsAvailExt || MO.isJTI() ||
- TM.getCodeModel() == CodeModel::Large)
+ if (IsExternal || IsNonLocalFunction || IsCommon || IsAvailExt ||
+ MO.isJTI() || TM.getCodeModel() == CodeModel::Large)
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
const MCExpr *Exp =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_HA,
OutContext);
TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case PPC::LDtocL: {
@@ -500,7 +479,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MachineOperand &MO = MI->getOperand(1);
assert((MO.isGlobal() || MO.isJTI() || MO.isCPI()) &&
"Invalid operand for LDtocL!");
- MCSymbol *MOSymbol = 0;
+ MCSymbol *MOSymbol = nullptr;
if (MO.isJTI())
MOSymbol = lookUpOrCreateTOCEntry(GetJTISymbol(MO.getIndex()));
@@ -511,14 +490,10 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
else if (MO.isGlobal()) {
const GlobalValue *GValue = MO.getGlobal();
- const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
- const GlobalValue *RealGValue = GAlias ?
- GAlias->resolveAliasedGlobal(false) : GValue;
- MOSymbol = getSymbol(RealGValue);
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
-
- if (!GVar || !GVar->hasInitializer() || RealGValue->hasCommonLinkage() ||
- RealGValue->hasAvailableExternallyLinkage() ||
+ MOSymbol = getSymbol(GValue);
+ if (GValue->getType()->getElementType()->isFunctionTy() ||
+ GValue->isDeclaration() || GValue->hasCommonLinkage() ||
+ GValue->hasAvailableExternallyLinkage() ||
TM.getCodeModel() == CodeModel::Large)
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
}
@@ -527,7 +502,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case PPC::ADDItocL: {
@@ -540,30 +515,28 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
TmpInst.setOpcode(PPC::ADDI8);
const MachineOperand &MO = MI->getOperand(2);
assert((MO.isGlobal() || MO.isCPI()) && "Invalid operand for ADDItocL");
- MCSymbol *MOSymbol = 0;
+ MCSymbol *MOSymbol = nullptr;
bool IsExternal = false;
- bool IsFunction = false;
+ bool IsNonLocalFunction = false;
if (MO.isGlobal()) {
- const GlobalValue *GValue = MO.getGlobal();
- const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
- const GlobalValue *RealGValue = GAlias ?
- GAlias->resolveAliasedGlobal(false) : GValue;
- MOSymbol = getSymbol(RealGValue);
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
- IsExternal = GVar && !GVar->hasInitializer();
- IsFunction = !GVar;
+ const GlobalValue *GV = MO.getGlobal();
+ MOSymbol = getSymbol(GV);
+ IsExternal = GV->isDeclaration();
+ IsNonLocalFunction = GV->getType()->getElementType()->isFunctionTy() &&
+ (GV->isDeclaration() || GV->isWeakForLinker());
} else if (MO.isCPI())
MOSymbol = GetCPISymbol(MO.getIndex());
- if (IsFunction || IsExternal || TM.getCodeModel() == CodeModel::Large)
+ if (IsNonLocalFunction || IsExternal ||
+ TM.getCodeModel() == CodeModel::Large)
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
const MCExpr *Exp =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO,
OutContext);
TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
case PPC::ADDISgotTprelHA: {
@@ -576,7 +549,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymGotTprel =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_HA,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
.addReg(PPC::X2)
.addExpr(SymGotTprel));
@@ -596,7 +569,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO,
OutContext);
TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
return;
}
@@ -605,7 +578,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbol *GOTRef = OutContext.CreateTempSymbol();
MCSymbol *NextInstr = OutContext.CreateTempSymbol();
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BL)
// FIXME: We would like an efficient form for this, so we don't have to do
// a lot of extra uniquing.
.addExpr(MCSymbolRefExpr::Create(NextInstr, OutContext)));
@@ -616,13 +589,13 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutStreamer.EmitLabel(GOTRef);
OutStreamer.EmitValue(OffsExpr, 4);
OutStreamer.EmitLabel(NextInstr);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MFLR)
.addReg(MI->getOperand(0).getReg()));
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::LWZ)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LWZ)
.addReg(MI->getOperand(1).getReg())
.addImm(0)
.addReg(MI->getOperand(0).getReg()));
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADD4)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADD4)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
.addReg(MI->getOperand(0).getReg()));
@@ -636,10 +609,10 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymGotTlsHA =
MCSymbolRefExpr::Create(GOTSymbol, MCSymbolRefExpr::VK_PPC_HA,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::LI)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LI)
.addReg(MI->getOperand(0).getReg())
.addExpr(SymGotTlsL));
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(0).getReg())
.addExpr(SymGotTlsHA));
@@ -655,7 +628,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymGotTlsGD =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HA,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
.addReg(PPC::X2)
.addExpr(SymGotTlsGD));
@@ -675,7 +648,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO :
MCSymbolRefExpr::VK_PPC_GOT_TLSGD,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
.addExpr(SymGotTlsGD));
@@ -703,10 +677,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymVar =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ?
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget.isPPC64() ?
PPC::BL8_NOP_TLS : PPC::BL_TLS)
- .addExpr(TlsRef)
- .addExpr(SymVar));
+ .addExpr(TlsRef)
+ .addExpr(SymVar));
return;
}
case PPC::ADDIStlsldHA: {
@@ -719,7 +694,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymGotTlsLD =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HA,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
.addReg(MI->getOperand(0).getReg())
.addReg(PPC::X2)
.addExpr(SymGotTlsLD));
@@ -731,7 +706,6 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case PPC::ADDItlsldL32: {
// Transform: %Rd = ADDItlsldL32 %Rs, <ga:@sym>
// Into: %Rd = ADDI %Rs, sym@got@tlsld
-
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
@@ -740,10 +714,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO :
MCSymbolRefExpr::VK_PPC_GOT_TLSLD,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
- .addReg(MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg())
- .addExpr(SymGotTlsLD));
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addExpr(SymGotTlsLD));
return;
}
case PPC::GETtlsldADDR:
@@ -769,10 +744,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymVar =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ?
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget.isPPC64() ?
PPC::BL8_NOP_TLS : PPC::BL_TLS)
- .addExpr(TlsRef)
- .addExpr(SymVar));
+ .addExpr(TlsRef)
+ .addExpr(SymVar));
return;
}
case PPC::ADDISdtprelHA:
@@ -781,17 +757,17 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
case PPC::ADDISdtprelHA32: {
// Transform: %Rd = ADDISdtprelHA32 %R3, <ga:@sym>
// Into: %Rd = ADDIS %R3, sym@dtprel@ha
-
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
const MCExpr *SymDtprel =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_HA,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDIS8 : PPC::ADDIS)
- .addReg(MI->getOperand(0).getReg())
- .addReg(PPC::X3)
- .addExpr(SymDtprel));
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDIS8 : PPC::ADDIS)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(Subtarget.isPPC64() ? PPC::X3 : PPC::R3)
+ .addExpr(SymDtprel));
return;
}
case PPC::ADDIdtprelL:
@@ -806,10 +782,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *SymDtprel =
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_LO,
OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
- .addReg(MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg())
- .addExpr(SymDtprel));
+ EmitToStreamer(OutStreamer,
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addExpr(SymDtprel));
return;
}
case PPC::MFOCRF:
@@ -821,7 +798,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MI->getOpcode() == PPC::MFOCRF ? PPC::MFCR : PPC::MFCR8;
OutStreamer.AddComment(PPCInstPrinter::
getRegisterName(MI->getOperand(1).getReg()));
- OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode)
+ EmitToStreamer(OutStreamer, MCInstBuilder(NewOpcode)
.addReg(MI->getOperand(0).getReg()));
return;
}
@@ -837,7 +814,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
->getEncodingValue(MI->getOperand(0).getReg());
OutStreamer.AddComment(PPCInstPrinter::
getRegisterName(MI->getOperand(0).getReg()));
- OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode)
+ EmitToStreamer(OutStreamer, MCInstBuilder(NewOpcode)
.addImm(Mask)
.addReg(MI->getOperand(1).getReg()));
return;
@@ -865,10 +842,18 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
}
void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module &M) {
+ if (Subtarget.isELFv2ABI()) {
+ PPCTargetStreamer *TS =
+ static_cast<PPCTargetStreamer *>(OutStreamer.getTargetStreamer());
+
+ if (TS)
+ TS->emitAbiVersion(2);
+ }
+
if (Subtarget.isPPC64() || TM.getRelocationModel() != Reloc::PIC_)
return AsmPrinter::EmitStartOfAsmFile(M);
@@ -905,7 +890,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
if (!Subtarget.isPPC64()) {
const PPCFunctionInfo *PPCFI = MF->getInfo<PPCFunctionInfo>();
- if (PPCFI->usesPICBase()) {
+ if (PPCFI->usesPICBase()) {
MCSymbol *RelocSymbol = PPCFI->getPICOffsetSymbol();
MCSymbol *PICBase = MF->getPICBaseSymbol();
OutStreamer.EmitLabel(RelocSymbol);
@@ -922,7 +907,11 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
} else
return AsmPrinter::EmitFunctionEntryLabel();
}
-
+
+ // ELFv2 ABI - Normal entry label.
+ if (Subtarget.isELFv2ABI())
+ return AsmPrinter::EmitFunctionEntryLabel();
+
// Emit an official procedure descriptor.
MCSectionSubPair Current = OutStreamer.getCurrentSection();
const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".opd",
@@ -959,7 +948,7 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
bool isPPC64 = TD->getPointerSizeInBits() == 64;
PPCTargetStreamer &TS =
- static_cast<PPCTargetStreamer &>(OutStreamer.getTargetStreamer());
+ static_cast<PPCTargetStreamer &>(*OutStreamer.getTargetStreamer());
if (!TOC.empty()) {
const MCSectionELF *Section;
@@ -968,7 +957,7 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
Section = OutStreamer.getContext().getELFSection(".toc",
ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
SectionKind::getReadOnly());
- else
+ else
Section = OutStreamer.getContext().getELFSection(".got2",
ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
SectionKind::getReadOnly());
@@ -1007,6 +996,68 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
return AsmPrinter::doFinalization(M);
}
+/// EmitFunctionBodyStart - Emit a global entry point prefix for ELFv2.
+void PPCLinuxAsmPrinter::EmitFunctionBodyStart() {
+ // In the ELFv2 ABI, in functions that use the TOC register, we need to
+ // provide two entry points. The ABI guarantees that when calling the
+ // local entry point, r2 is set up by the caller to contain the TOC base
+ // for this function, and when calling the global entry point, r12 is set
+ // up by the caller to hold the address of the global entry point. We
+ // thus emit a prefix sequence along the following lines:
+ //
+ // func:
+ // # global entry point
+ // addis r2,r12,(.TOC.-func)@ha
+ // addi r2,r2,(.TOC.-func)@l
+ // .localentry func, .-func
+ // # local entry point, followed by function body
+ //
+ // This ensures we have r2 set up correctly while executing the function
+ // body, no matter which entry point is called.
+ if (Subtarget.isELFv2ABI()
+ // Only do all that if the function uses r2 in the first place.
+ && !MF->getRegInfo().use_empty(PPC::X2)) {
+
+ MCSymbol *GlobalEntryLabel = OutContext.CreateTempSymbol();
+ OutStreamer.EmitLabel(GlobalEntryLabel);
+ const MCSymbolRefExpr *GlobalEntryLabelExp =
+ MCSymbolRefExpr::Create(GlobalEntryLabel, OutContext);
+
+ MCSymbol *TOCSymbol = OutContext.GetOrCreateSymbol(StringRef(".TOC."));
+ const MCExpr *TOCDeltaExpr =
+ MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(TOCSymbol, OutContext),
+ GlobalEntryLabelExp, OutContext);
+
+ const MCExpr *TOCDeltaHi =
+ PPCMCExpr::CreateHa(TOCDeltaExpr, false, OutContext);
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS)
+ .addReg(PPC::X2)
+ .addReg(PPC::X12)
+ .addExpr(TOCDeltaHi));
+
+ const MCExpr *TOCDeltaLo =
+ PPCMCExpr::CreateLo(TOCDeltaExpr, false, OutContext);
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDI)
+ .addReg(PPC::X2)
+ .addReg(PPC::X2)
+ .addExpr(TOCDeltaLo));
+
+ MCSymbol *LocalEntryLabel = OutContext.CreateTempSymbol();
+ OutStreamer.EmitLabel(LocalEntryLabel);
+ const MCSymbolRefExpr *LocalEntryLabelExp =
+ MCSymbolRefExpr::Create(LocalEntryLabel, OutContext);
+ const MCExpr *LocalOffsetExp =
+ MCBinaryExpr::CreateSub(LocalEntryLabelExp,
+ GlobalEntryLabelExp, OutContext);
+
+ PPCTargetStreamer *TS =
+ static_cast<PPCTargetStreamer *>(OutStreamer.getTargetStreamer());
+
+ if (TS)
+ TS->emitLocalEntry(CurrentFnSym, LocalOffsetExp);
+ }
+}
+
/// EmitFunctionBodyEnd - Print the traceback table before the .size
/// directive.
///
@@ -1057,13 +1108,12 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
if (Subtarget.isPPC64() && Directive < PPC::DIR_64)
Directive = PPC::DIR_64;
assert(Directive <= PPC::DIR_64 && "Directive out of range.");
-
- // FIXME: This is a total hack, finish mc'izing the PPC backend.
- if (OutStreamer.hasRawTextSupport()) {
- assert(Directive < array_lengthof(CPUDirectives) &&
- "CPUDirectives[] might not be up-to-date!");
- OutStreamer.EmitRawText("\t.machine " + Twine(CPUDirectives[Directive]));
- }
+
+ assert(Directive < array_lengthof(CPUDirectives) &&
+ "CPUDirectives[] might not be up-to-date!");
+ PPCTargetStreamer &TStreamer =
+ *static_cast<PPCTargetStreamer *>(OutStreamer.getTargetStreamer());
+ TStreamer.emitMachine(CPUDirectives[Directive]);
// Prime text sections so they are adjacent. This reduces the likelihood a
// large data or debug section causes a branch to exceed 16M limit.
@@ -1073,14 +1123,14 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
if (TM.getRelocationModel() == Reloc::PIC_) {
OutStreamer.SwitchSection(
OutContext.getMachOSection("__TEXT", "__picsymbolstub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
32, SectionKind::getText()));
} else if (TM.getRelocationModel() == Reloc::DynamicNoPIC) {
OutStreamer.SwitchSection(
OutContext.getMachOSection("__TEXT","__symbol_stub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
16, SectionKind::getText()));
}
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
@@ -1112,8 +1162,8 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
if (TM.getRelocationModel() == Reloc::PIC_) {
const MCSection *StubSection =
OutContext.getMachOSection("__TEXT", "__picsymbolstub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
32, SectionKind::getText());
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.SwitchSection(StubSection);
@@ -1133,32 +1183,32 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
MCBinaryExpr::CreateSub(LazyPtrExpr, Anon, OutContext);
// mflr r0
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R0));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MFLR).addReg(PPC::R0));
// bcl 20, 31, AnonSymbol
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BCLalways).addExpr(Anon));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BCLalways).addExpr(Anon));
OutStreamer.EmitLabel(AnonSymbol);
// mflr r11
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::MFLR).addReg(PPC::R11));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MFLR).addReg(PPC::R11));
// addis r11, r11, ha16(LazyPtr - AnonSymbol)
const MCExpr *SubHa16 = PPCMCExpr::CreateHa(Sub, isDarwin, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS)
.addReg(PPC::R11)
.addReg(PPC::R11)
.addExpr(SubHa16));
// mtlr r0
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::MTLR).addReg(PPC::R0));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MTLR).addReg(PPC::R0));
// ldu r12, lo16(LazyPtr - AnonSymbol)(r11)
// lwzu r12, lo16(LazyPtr - AnonSymbol)(r11)
const MCExpr *SubLo16 = PPCMCExpr::CreateLo(Sub, isDarwin, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
+ EmitToStreamer(OutStreamer, MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
.addReg(PPC::R12)
.addExpr(SubLo16).addExpr(SubLo16)
.addReg(PPC::R11));
// mtctr r12
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
// bctr
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BCTR));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BCTR));
OutStreamer.SwitchSection(LSPSection);
OutStreamer.EmitLabel(LazyPtr);
@@ -1180,8 +1230,8 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
const MCSection *StubSection =
OutContext.getMachOSection("__TEXT","__symbol_stub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
16, SectionKind::getText());
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
MCSymbol *Stub = Stubs[i].first;
@@ -1197,7 +1247,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// lis r11, ha16(LazyPtr)
const MCExpr *LazyPtrHa16 =
PPCMCExpr::CreateHa(LazyPtrExpr, isDarwin, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::LIS)
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LIS)
.addReg(PPC::R11)
.addExpr(LazyPtrHa16));
@@ -1205,15 +1255,15 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// lwzu r12, lo16(LazyPtr)(r11)
const MCExpr *LazyPtrLo16 =
PPCMCExpr::CreateLo(LazyPtrExpr, isDarwin, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
+ EmitToStreamer(OutStreamer, MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
.addReg(PPC::R12)
.addExpr(LazyPtrLo16).addExpr(LazyPtrLo16)
.addReg(PPC::R11));
// mtctr r12
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
// bctr
- OutStreamer.EmitInstruction(MCInstBuilder(PPC::BCTR));
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BCTR));
OutStreamer.SwitchSection(LSPSection);
OutStreamer.EmitLabel(LazyPtr);
@@ -1254,7 +1304,7 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
for (std::vector<const Function*>::const_iterator I = Personalities.begin(),
E = Personalities.end(); I != E; ++I) {
if (*I) {
- MCSymbol *NLPSym = GetSymbolWithGlobalValueBase(*I, "$non_lazy_ptr");
+ MCSymbol *NLPSym = getSymbolWithGlobalValueBase(*I, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
MMIMacho.getGVStubEntry(NLPSym);
StubSym = MachineModuleInfoImpl::StubValueTy(getSymbol(*I), true);
@@ -1343,4 +1393,5 @@ static AsmPrinter *createPPCAsmPrinterPass(TargetMachine &tm,
extern "C" void LLVMInitializePowerPCAsmPrinter() {
TargetRegistry::RegisterAsmPrinter(ThePPC32Target, createPPCAsmPrinterPass);
TargetRegistry::RegisterAsmPrinter(ThePPC64Target, createPPCAsmPrinterPass);
+ TargetRegistry::RegisterAsmPrinter(ThePPC64LETarget, createPPCAsmPrinterPass);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 3e608ca..ee90671 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ppc-branch-select"
#include "PPC.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "PPCInstrBuilder.h"
@@ -26,6 +25,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "ppc-branch-select"
+
STATISTIC(NumExpanded, "Number of branches expanded to long format");
namespace llvm {
@@ -42,9 +43,9 @@ namespace {
/// BlockSizes - The sizes of the basic blocks in the function.
std::vector<unsigned> BlockSizes;
- virtual bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "PowerPC Branch Selector";
}
};
@@ -112,9 +113,12 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
unsigned MBBStartOffset = 0;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
- MachineBasicBlock *Dest = 0;
+ MachineBasicBlock *Dest = nullptr;
if (I->getOpcode() == PPC::BCC && !I->getOperand(2).isImm())
Dest = I->getOperand(2).getMBB();
+ else if ((I->getOpcode() == PPC::BC || I->getOpcode() == PPC::BCn) &&
+ !I->getOperand(1).isImm())
+ Dest = I->getOperand(1).getMBB();
else if ((I->getOpcode() == PPC::BDNZ8 || I->getOpcode() == PPC::BDNZ ||
I->getOpcode() == PPC::BDZ8 || I->getOpcode() == PPC::BDZ) &&
!I->getOperand(0).isImm())
@@ -166,6 +170,12 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
// Jump over the uncond branch inst (i.e. $PC+8) on opposite condition.
BuildMI(MBB, I, dl, TII->get(PPC::BCC))
.addImm(PPC::InvertPredicate(Pred)).addReg(CRReg).addImm(2);
+ } else if (I->getOpcode() == PPC::BC) {
+ unsigned CRBit = I->getOperand(0).getReg();
+ BuildMI(MBB, I, dl, TII->get(PPC::BCn)).addReg(CRBit).addImm(2);
+ } else if (I->getOpcode() == PPC::BCn) {
+ unsigned CRBit = I->getOperand(0).getReg();
+ BuildMI(MBB, I, dl, TII->get(PPC::BC)).addReg(CRBit).addImm(2);
} else if (I->getOpcode() == PPC::BDNZ) {
BuildMI(MBB, I, dl, TII->get(PPC::BDZ)).addImm(2);
} else if (I->getOpcode() == PPC::BDNZ8) {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
index e419b9b..ec1e34d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -23,31 +23,29 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ctrloops"
-
#include "llvm/Transforms/Scalar.h"
-#include "llvm/ADT/Statistic.h"
+#include "PPC.h"
+#include "PPCTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/PassSupport.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
-#include "llvm/Target/TargetLibraryInfo.h"
-#include "PPCTargetMachine.h"
-#include "PPC.h"
#ifndef NDEBUG
#include "llvm/CodeGen/MachineDominators.h"
@@ -61,6 +59,8 @@
using namespace llvm;
+#define DEBUG_TYPE "ctrloops"
+
#ifndef NDEBUG
static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1));
#endif
@@ -84,20 +84,20 @@ namespace {
public:
static char ID;
- PPCCTRLoops() : FunctionPass(ID), TM(0) {
+ PPCCTRLoops() : FunctionPass(ID), TM(nullptr) {
initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
}
PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
}
@@ -109,7 +109,7 @@ namespace {
PPCTargetMachine *TM;
LoopInfo *LI;
ScalarEvolution *SE;
- DataLayout *TD;
+ const DataLayout *DL;
DominatorTree *DT;
const TargetLibraryInfo *LibInfo;
};
@@ -128,12 +128,12 @@ namespace {
initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
MachineDominatorTree *MDT;
@@ -145,7 +145,7 @@ namespace {
INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
@@ -170,8 +170,9 @@ FunctionPass *llvm::createPPCCTRLoopsVerify() {
bool PPCCTRLoops::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
- DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
LibInfo = getAnalysisIfAvailable<TargetLibraryInfo>();
bool MadeChange = false;
@@ -188,7 +189,7 @@ bool PPCCTRLoops::runOnFunction(Function &F) {
static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) {
if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
- return ITy->getBitWidth() > (Is32Bit ? 32 : 64);
+ return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
return false;
}
@@ -369,6 +370,14 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
J->getOpcode() == Instruction::URem ||
J->getOpcode() == Instruction::SRem)) {
return true;
+ } else if (TT.isArch32Bit() &&
+ isLargeIntegerTy(false, J->getType()->getScalarType()) &&
+ (J->getOpcode() == Instruction::Shl ||
+ J->getOpcode() == Instruction::AShr ||
+ J->getOpcode() == Instruction::LShr)) {
+ // Only on PPC32, for 128-bit integers (specifically not 64-bit
+ // integers), these might be runtime calls.
+ return true;
} else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
// On PowerPC, indirect jumps use the counter register.
return true;
@@ -423,9 +432,9 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
SmallVector<BasicBlock*, 4> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
- BasicBlock *CountedExitBlock = 0;
- const SCEV *ExitCount = 0;
- BranchInst *CountedExitBranch = 0;
+ BasicBlock *CountedExitBlock = nullptr;
+ const SCEV *ExitCount = nullptr;
+ BranchInst *CountedExitBranch = nullptr;
for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
IE = ExitingBlocks.end(); I != IE; ++I) {
const SCEV *EC = SE->getExitCount(L, *I);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td b/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
index e8e7f4c..222760a 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
@@ -15,6 +15,8 @@
/// CCIfSubtarget - Match if the current subtarget has a feature F.
class CCIfSubtarget<string F, CCAction A>
: CCIf<!strconcat("State.getTarget().getSubtarget<PPCSubtarget>().", F), A>;
+class CCIfNotSubtarget<string F, CCAction A>
+ : CCIf<!strconcat("!State.getTarget().getSubtarget<PPCSubtarget>().", F), A>;
//===----------------------------------------------------------------------===//
// Return Value Calling Convention
@@ -23,17 +25,24 @@ class CCIfSubtarget<string F, CCAction A>
// Return-value convention for PowerPC
def RetCC_PPC : CallingConv<[
// On PPC64, integer return values are always promoted to i64
- CCIfType<[i32], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
+ CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
+ CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
+
+ // Floating point types returned as "direct" go into F1 .. F8; note that
+ // only the ELFv2 ABI fully utilizes all these registers.
+ CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
+ CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
- CCIfType<[f32], CCAssignToReg<[F1, F2]>>,
- CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4]>>,
-
- // Vector types are always returned in V2.
- CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToReg<[V2]>>
+ // Vector types returned as "direct" go into V2 .. V9; note that only the
+ // ELFv2 ABI fully utilizes all these registers.
+ CCIfType<[v16i8, v8i16, v4i32, v4f32],
+ CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>,
+ CCIfType<[v2f64, v2i64],
+ CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>
]>;
@@ -46,6 +55,7 @@ def RetCC_PPC : CallingConv<[
// Only handle ints and floats. All ints are promoted to i64.
// Vector types and quadword ints are not handled.
def CC_PPC64_ELF_FIS : CallingConv<[
+ CCIfType<[i1], CCPromoteToType<i64>>,
CCIfType<[i8], CCPromoteToType<i64>>,
CCIfType<[i16], CCPromoteToType<i64>>,
CCIfType<[i32], CCPromoteToType<i64>>,
@@ -58,14 +68,18 @@ def CC_PPC64_ELF_FIS : CallingConv<[
// and multiple register returns are "supported" to avoid compile
// errors, but none are handled by the fast selector.
def RetCC_PPC64_ELF_FIS : CallingConv<[
+ CCIfType<[i1], CCPromoteToType<i64>>,
CCIfType<[i8], CCPromoteToType<i64>>,
CCIfType<[i16], CCPromoteToType<i64>>,
CCIfType<[i32], CCPromoteToType<i64>>,
CCIfType<[i64], CCAssignToReg<[X3, X4]>>,
CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
- CCIfType<[f32], CCAssignToReg<[F1, F2]>>,
- CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4]>>,
- CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToReg<[V2]>>
+ CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
+ CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
+ CCIfType<[v16i8, v8i16, v4i32, v4f32],
+ CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>,
+ CCIfType<[v2f64, v2i64],
+ CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>
]>;
//===----------------------------------------------------------------------===//
@@ -73,6 +87,8 @@ def RetCC_PPC64_ELF_FIS : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_PPC32_SVR4_Common : CallingConv<[
+ CCIfType<[i1], CCPromoteToType<i32>>,
+
// The ABI requires i64 to be passed in two adjacent registers with the first
// register having an odd register number.
CCIfType<[i32], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>,
@@ -97,7 +113,7 @@ def CC_PPC32_SVR4_Common : CallingConv<[
CCIfType<[f32,f64], CCAssignToStack<8, 8>>,
// Vectors get 16-byte stack slots that are 16-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToStack<16, 16>>
+ CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>
]>;
// This calling convention puts vector arguments always on the stack. It is used
@@ -113,6 +129,9 @@ def CC_PPC32_SVR4 : CallingConv<[
// The first 12 Vector arguments are passed in AltiVec registers.
CCIfType<[v16i8, v8i16, v4i32, v4f32],
CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13]>>,
+ CCIfType<[v2f64, v2i64],
+ CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9,
+ VSH10, VSH11, VSH12, VSH13]>>,
CCDelegateTo<CC_PPC32_SVR4_Common>
]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
index 418736e..0875523 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
@@ -32,7 +32,7 @@ namespace {
JITCodeEmitter &MCE;
MachineModuleInfo *MMI;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineModuleInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -73,11 +73,13 @@ namespace {
unsigned getTLSRegEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getTLSCallEncoding(const MachineInstr &MI, unsigned OpNo) const;
- const char *getPassName() const { return "PowerPC Machine Code Emitter"; }
+ const char *getPassName() const override {
+ return "PowerPC Machine Code Emitter";
+ }
/// runOnMachineFunction - emits the given MachineFunction to memory
///
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
/// emitBasicBlock - emits the given MachineBasicBlock to memory
///
@@ -102,7 +104,7 @@ bool PPCCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
MMI = &getAnalysis<MachineModuleInfo>();
MCE.setModuleInfo(MMI);
do {
- MovePCtoLROffset = 0;
+ MovePCtoLROffset = nullptr;
MCE.startFunction(MF);
for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
emitBasicBlock(*BB);
@@ -121,7 +123,8 @@ void PPCCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) {
default:
MCE.emitWordBE(getBinaryCodeForInstr(MI));
break;
- case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::CFI_INSTRUCTION:
+ break;
case TargetOpcode::EH_LABEL:
MCE.emitLabel(MI.getOperand(0).getMCSymbol());
break;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index 970c804..2e524d6 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -13,12 +13,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ppcfastisel"
#include "PPC.h"
+#include "MCTargetDesc/PPCPredicates.h"
#include "PPCISelLowering.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
-#include "MCTargetDesc/PPCPredicates.h"
#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/FastISel.h"
@@ -28,12 +27,12 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -58,6 +57,8 @@
//===----------------------------------------------------------------------===//
using namespace llvm;
+#define DEBUG_TYPE "ppcfastisel"
+
namespace {
typedef struct Address {
@@ -80,12 +81,12 @@ typedef struct Address {
}
} Address;
-class PPCFastISel : public FastISel {
+class PPCFastISel final : public FastISel {
const TargetMachine &TM;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- const PPCSubtarget &PPCSubTarget;
+ const PPCSubtarget *PPCSubTarget;
LLVMContext *Context;
public:
@@ -95,31 +96,29 @@ class PPCFastISel : public FastISel {
TM(FuncInfo.MF->getTarget()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
- PPCSubTarget(
- *((static_cast<const PPCTargetMachine *>(&TM))->getSubtargetImpl())
- ),
+ PPCSubTarget(&TM.getSubtarget<PPCSubtarget>()),
Context(&FuncInfo.Fn->getContext()) { }
// Backend specific FastISel code.
private:
- virtual bool TargetSelectInstruction(const Instruction *I);
- virtual unsigned TargetMaterializeConstant(const Constant *C);
- virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
- virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
- const LoadInst *LI);
- virtual bool FastLowerArguments();
- virtual unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm);
- virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- uint64_t Imm);
- virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill);
- virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill);
+ bool TargetSelectInstruction(const Instruction *I) override;
+ unsigned TargetMaterializeConstant(const Constant *C) override;
+ unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
+ bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) override;
+ bool FastLowerArguments() override;
+ unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
+ unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm);
+ unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill);
+ unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
// Instruction selection routines.
private:
@@ -127,7 +126,6 @@ class PPCFastISel : public FastISel {
bool SelectStore(const Instruction *I);
bool SelectBranch(const Instruction *I);
bool SelectIndirectBr(const Instruction *I);
- bool SelectCmp(const Instruction *I);
bool SelectFPExt(const Instruction *I);
bool SelectFPTrunc(const Instruction *I);
bool SelectIToFP(const Instruction *I, bool IsSigned);
@@ -283,7 +281,7 @@ bool PPCFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
// Given a value Obj, create an Address object Addr that represents its
// address. Return false if we can't handle it.
bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
- const User *U = NULL;
+ const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
// Don't walk into other basic blocks unless the object is an alloca from
@@ -325,11 +323,11 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
II != IE; ++II, ++GTI) {
const Value *Op = *II;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
} else {
- uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
for (;;) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
@@ -407,7 +405,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
// register and continue. This should almost never happen.
if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) {
unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(Addr.Base.FI).addImm(0);
Addr.Base.Reg = ResultReg;
Addr.BaseType = Address::RegBase;
@@ -499,13 +497,13 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI),
MFI.getObjectAlignment(Addr.Base.FI));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
// Base reg with offset in range.
} else if (UseOffset) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(Addr.Offset).addReg(Addr.Base.Reg);
// Indexed form.
@@ -529,7 +527,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
case PPC::LFS: Opc = PPC::LFSX; break;
case PPC::LFD: Opc = PPC::LFDX; break;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(Addr.Base.Reg).addReg(IndexReg);
}
@@ -557,7 +555,7 @@ bool PPCFastISel::SelectLoad(const Instruction *I) {
// to constrain RA from using R0/X0 when this is not legal.
unsigned AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
- AssignedReg ? MRI.getRegClass(AssignedReg) : 0;
+ AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr;
unsigned ResultReg = 0;
if (!PPCEmitLoad(VT, ResultReg, Addr, RC))
@@ -615,12 +613,15 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI),
MFI.getObjectAlignment(Addr.Base.FI));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)).addReg(SrcReg)
- .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addReg(SrcReg)
+ .addImm(Addr.Offset)
+ .addFrameIndex(Addr.Base.FI)
+ .addMemOperand(MMO);
// Base reg with offset in range.
} else if (UseOffset)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg);
// Indexed form.
@@ -640,7 +641,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
case PPC::STFS: Opc = PPC::STFSX; break;
case PPC::STFD: Opc = PPC::STFDX; break;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addReg(Addr.Base.Reg).addReg(IndexReg);
}
@@ -704,9 +705,9 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
CondReg))
return false;
- BuildMI(*BrBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCC))
+ BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC))
.addImm(PPCPred).addReg(CondReg).addMBB(TBB);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
@@ -714,7 +715,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
dyn_cast<ConstantInt>(BI->getCondition())) {
uint64_t Imm = CI->getZExtValue();
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
- FastEmitBranch(Target, DL);
+ FastEmitBranch(Target, DbgLoc);
return true;
}
@@ -737,6 +738,9 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
return false;
MVT SrcVT = SrcEVT.getSimpleVT();
+ if (SrcVT == MVT::i1 && PPCSubTarget->useCRBits())
+ return false;
+
// See if operand 2 is an immediate encodeable in the compare.
// FIXME: Operands are not in canonical order at -O0, so an immediate
// operand in position 1 is a lost opportunity for now. We are
@@ -811,10 +815,10 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
}
if (!UseImm)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg)
.addReg(SrcReg1).addReg(SrcReg2);
else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg)
.addReg(SrcReg1).addImm(Imm);
return true;
@@ -853,7 +857,7 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
// Round the result to single precision.
unsigned DestReg = createResultReg(&PPC::F4RCRegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP), DestReg)
.addReg(SrcReg);
UpdateValueMap(I, DestReg);
@@ -895,7 +899,7 @@ unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
if (!IsSigned) {
LoadOpc = PPC::LFIWZX;
Addr.Offset = 4;
- } else if (PPCSubTarget.hasLFIWAX()) {
+ } else if (PPCSubTarget->hasLFIWAX()) {
LoadOpc = PPC::LFIWAX;
Addr.Offset = 4;
}
@@ -936,7 +940,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
// We can only lower an unsigned convert if we have the newer
// floating-point conversion operations.
- if (!IsSigned && !PPCSubTarget.hasFPCVT())
+ if (!IsSigned && !PPCSubTarget->hasFPCVT())
return false;
// FIXME: For now we require the newer floating-point conversion operations
@@ -944,7 +948,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
// to single-precision float. Otherwise we have to generate a lot of
// fiddly code to avoid double rounding. If necessary, the fiddly code
// can be found in PPCTargetLowering::LowerINT_TO_FP().
- if (DstVT == MVT::f32 && !PPCSubTarget.hasFPCVT())
+ if (DstVT == MVT::f32 && !PPCSubTarget->hasFPCVT())
return false;
// Extend the input if necessary.
@@ -972,7 +976,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
Opc = IsSigned ? PPC::FCFID : PPC::FCFIDU;
// Generate the convert.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(FPReg);
UpdateValueMap(I, DestReg);
@@ -1007,7 +1011,7 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
// to determine the required register class.
unsigned AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
- AssignedReg ? MRI.getRegClass(AssignedReg) : 0;
+ AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr;
unsigned ResultReg = 0;
if (!PPCEmitLoad(VT, ResultReg, Addr, RC, !IsSigned))
@@ -1027,7 +1031,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
return false;
// If we don't have FCTIDUZ and we need it, punt to SelectionDAG.
- if (DstVT == MVT::i64 && !IsSigned && !PPCSubTarget.hasFPCVT())
+ if (DstVT == MVT::i64 && !IsSigned && !PPCSubTarget->hasFPCVT())
return false;
Value *Src = I->getOperand(0);
@@ -1048,7 +1052,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
const TargetRegisterClass *InRC = MRI.getRegClass(SrcReg);
if (InRC == &PPC::F4RCRegClass) {
unsigned TmpReg = createResultReg(&PPC::F8RCRegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY_TO_REGCLASS), TmpReg)
.addReg(SrcReg).addImm(PPC::F8RCRegClassID);
SrcReg = TmpReg;
@@ -1063,12 +1067,12 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
if (IsSigned)
Opc = PPC::FCTIWZ;
else
- Opc = PPCSubTarget.hasFPCVT() ? PPC::FCTIWUZ : PPC::FCTIDZ;
+ Opc = PPCSubTarget->hasFPCVT() ? PPC::FCTIWUZ : PPC::FCTIDZ;
else
Opc = IsSigned ? PPC::FCTIDZ : PPC::FCTIDUZ;
// Generate the convert.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(SrcReg);
// Now move the integer value from a float register to an integer register.
@@ -1161,8 +1165,10 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
}
if (UseImm) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
- .addReg(SrcReg1).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg)
+ .addReg(SrcReg1)
+ .addImm(Imm);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1177,7 +1183,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
if (ISDOpcode == ISD::SUB)
std::swap(SrcReg1, SrcReg2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg1).addReg(SrcReg2);
UpdateValueMap(I, ResultReg);
return true;
@@ -1195,6 +1201,13 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
bool IsVarArg) {
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
+
+ // Reserve space for the linkage area on the stack.
+ bool isELFv2ABI = PPCSubTarget->isELFv2ABI();
+ unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
+ isELFv2ABI);
+ CCInfo.AllocateStack(LinkageSize, 8);
+
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
// Bail out if we can't handle any of the arguments.
@@ -1204,7 +1217,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
// Skip vector arguments for now, as well as long double and
// uint128_t, and anything that isn't passed in a register.
- if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64 ||
+ if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64 || ArgVT == MVT::i1 ||
!VA.isRegLoc() || VA.needsCustom())
return false;
@@ -1216,8 +1229,16 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
// Get a count of how many bytes are to be pushed onto the stack.
NumBytes = CCInfo.getNextStackOffset();
+ // The prolog code of the callee may store up to 8 GPR argument registers to
+ // the stack, allowing va_start to index over them in memory if its varargs.
+ // Because we cannot tell if this is needed on the caller side, we have to
+ // conservatively assume that it is needed. As such, make sure we have at
+ // least enough stack space for the caller to store the 8 GPRs.
+ // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area.
+ NumBytes = std::max(NumBytes, LinkageSize + 64);
+
// Issue CALLSEQ_START.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TII.getCallFrameSetupOpcode()))
.addImm(NumBytes);
@@ -1276,9 +1297,9 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
++NextGPR;
} else
ArgReg = NextGPR++;
-
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ArgReg).addReg(Arg);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ArgReg).addReg(Arg);
RegArgs.push_back(ArgReg);
}
@@ -1291,7 +1312,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const Instruction *I, CallingConv::ID CC,
unsigned &NumBytes, bool IsVarArg) {
// Issue CallSEQ_END.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TII.getCallFrameDestroyOpcode()))
.addImm(NumBytes).addImm(0);
@@ -1321,14 +1342,14 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const TargetRegisterClass *CpyRC = TLI.getRegClassFor(CopyVT);
ResultReg = createResultReg(CpyRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(SourcePhysReg);
// If necessary, round the floating result to single precision.
} else if (CopyVT == MVT::f64) {
ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP),
ResultReg).addReg(SourcePhysReg);
// If only the low half of a general register is needed, generate
@@ -1339,7 +1360,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
ResultReg = createResultReg(&PPC::GPRCRegClass);
// Convert physical register from G8RC to GPRC.
SourcePhysReg -= PPC::X0 - PPC::R0;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(SourcePhysReg);
}
@@ -1446,7 +1467,7 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
if (Arg == 0)
return false;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(*II);
@@ -1471,7 +1492,7 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
// Build direct call with NOP for TOC restore.
// FIXME: We can and should optimize away the NOP for local calls.
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(PPC::BL8_NOP));
// Add callee.
MIB.addGlobalAddress(GV);
@@ -1480,6 +1501,10 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
for (unsigned II = 0, IE = RegArgs.size(); II != IE; ++II)
MIB.addReg(RegArgs[II], RegState::Implicit);
+ // Direct calls in the ELFv2 ABI need the TOC register live into the call.
+ if (PPCSubTarget->isELFv2ABI())
+ MIB.addReg(PPC::X2, RegState::Implicit);
+
// Add a register mask with the call-preserved registers. Proper
// defs for return values will be added by setPhysRegsDeadExcept().
MIB.addRegMask(TRI.getCallPreservedMask(CC));
@@ -1528,8 +1553,8 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
const Constant *C = cast<Constant>(RV);
unsigned SrcReg = PPCMaterializeInt(C, MVT::i64);
unsigned RetReg = ValLocs[0].getLocReg();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- RetReg).addReg(SrcReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg);
RetRegs.push_back(RetReg);
} else {
@@ -1584,14 +1609,14 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
}
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), RetRegs[i])
.addReg(SrcReg);
}
}
}
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(PPC::BLR));
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
@@ -1621,7 +1646,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??");
Opc = PPC::EXTSW_32_64;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(SrcReg);
// Unsigned 32-bit extensions use RLWINM.
@@ -1633,7 +1658,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??");
MB = 16;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLWINM),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLWINM),
DestReg)
.addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31);
@@ -1646,7 +1671,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
MB = 48;
else
MB = 32;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(PPC::RLDICL_32_64), DestReg)
.addReg(SrcReg).addImm(/*SH=*/0).addImm(MB);
}
@@ -1660,9 +1685,9 @@ bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
if (AddrReg == 0)
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::MTCTR8))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::MTCTR8))
.addReg(AddrReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCTR8));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCTR8));
const IndirectBrInst *IB = cast<IndirectBrInst>(I);
for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
@@ -1690,7 +1715,8 @@ bool PPCFastISel::SelectTrunc(const Instruction *I) {
// The only interesting case is when we need to switch register classes.
if (SrcVT == MVT::i64) {
unsigned ResultReg = createResultReg(&PPC::GPRCRegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(SrcReg, 0, PPC::sub_32);
SrcReg = ResultReg;
}
@@ -1797,7 +1823,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
return 0;
// All FP constants are loaded from the constant pool.
- unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
assert(Align > 0 && "Unexpectedly missing alignment information!");
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
@@ -1813,25 +1839,25 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
// For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocCPT),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocCPT),
TmpReg)
.addConstantPoolIndex(Idx).addReg(PPC::X2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addImm(0).addReg(TmpReg).addMemOperand(MMO);
} else {
// Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)).
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA),
TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx);
// But for large code model, we must generate a LDtocL followed
// by the LF[SD].
if (CModel == CodeModel::Large) {
unsigned TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL),
TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addImm(0).addReg(TmpReg2);
} else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO)
.addReg(TmpReg)
.addMemOperand(MMO);
@@ -1855,25 +1881,20 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// FIXME: Jump tables are not yet required because fast-isel doesn't
// handle switches; if that changes, we need them as well. For now,
// what follows assumes everything's a generic (or TLS) global address.
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
- if (!GVar) {
- // If GV is an alias, use the aliasee for determining thread-locality.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
- }
// FIXME: We don't yet handle the complexity of TLS.
- bool IsTLS = GVar && GVar->isThreadLocal();
- if (IsTLS)
+ if (GV->isThreadLocal())
return 0;
// For small code model, generate a simple TOC load.
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtoc), DestReg)
- .addGlobalAddress(GV).addReg(PPC::X2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtoc),
+ DestReg)
+ .addGlobalAddress(GV)
+ .addReg(PPC::X2);
else {
- // If the address is an externally defined symbol, a symbol with
- // common or externally available linkage, a function address, or a
+ // If the address is an externally defined symbol, a symbol with common
+ // or externally available linkage, a non-local function address, or a
// jump table address (not yet needed), or if we are generating code
// for large code model, we generate:
// LDtocL(GV, ADDIStocHA(%X2, GV))
@@ -1881,20 +1902,21 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// ADDItocL(ADDIStocHA(%X2, GV), GV)
// Either way, start with the ADDIStocHA:
unsigned HighPartReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA),
HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
- // !GVar implies a function address. An external variable is one
- // without an initializer.
// If/when switches are implemented, jump tables should be handled
// on the "if" path here.
- if (CModel == CodeModel::Large || !GVar || !GVar->hasInitializer() ||
- GVar->hasCommonLinkage() || GVar->hasAvailableExternallyLinkage())
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
+ if (CModel == CodeModel::Large ||
+ (GV->getType()->getElementType()->isFunctionTy() &&
+ (GV->isDeclaration() || GV->isWeakForLinker())) ||
+ GV->isDeclaration() || GV->hasCommonLinkage() ||
+ GV->hasAvailableExternallyLinkage())
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL),
DestReg).addGlobalAddress(GV).addReg(HighPartReg);
else
// Otherwise generate the ADDItocL.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDItocL),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDItocL),
DestReg).addReg(HighPartReg).addGlobalAddress(GV);
}
@@ -1912,21 +1934,21 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
if (isInt<16>(Imm))
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg)
.addImm(Imm);
else if (Lo) {
// Both Lo and Hi have nonzero bits.
unsigned TmpReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
.addImm(Hi);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg)
.addReg(TmpReg).addImm(Lo);
} else
// Just Hi bits.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg)
.addImm(Hi);
@@ -1966,7 +1988,7 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
unsigned TmpReg2;
if (Imm) {
TmpReg2 = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLDICR),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLDICR),
TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift);
} else
TmpReg2 = TmpReg1;
@@ -1974,14 +1996,14 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
unsigned TmpReg3, Hi, Lo;
if ((Hi = (Remainder >> 16) & 0xFFFF)) {
TmpReg3 = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORIS8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORIS8),
TmpReg3).addReg(TmpReg2).addImm(Hi);
} else
TmpReg3 = TmpReg2;
if ((Lo = Remainder & 0xFFFF)) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORI8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORI8),
ResultReg).addReg(TmpReg3).addImm(Lo);
return ResultReg;
}
@@ -1993,6 +2015,15 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
// Materialize an integer constant into a register, and return
// the register number (or zero if we failed to handle it).
unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
+ // If we're using CR bit registers for i1 values, handle that as a special
+ // case first.
+ if (VT == MVT::i1 && PPCSubTarget->useCRBits()) {
+ const ConstantInt *CI = cast<ConstantInt>(C);
+ unsigned ImmReg = createResultReg(&PPC::CRBITRCRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(CI->isZero() ? PPC::CRUNSET : PPC::CRSET), ImmReg);
+ return ImmReg;
+ }
if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
VT != MVT::i8 && VT != MVT::i1)
@@ -2006,7 +2037,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
if (isInt<16>(CI->getSExtValue())) {
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
unsigned ImmReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ImmReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
.addImm(CI->getSExtValue());
return ImmReg;
}
@@ -2055,7 +2086,7 @@ unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
if (SI != FuncInfo.StaticAllocaMap.end()) {
unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(SI->second).addImm(0);
return ResultReg;
}
@@ -2134,7 +2165,7 @@ bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
unsigned ResultReg = MI->getOperand(0).getReg();
- if (!PPCEmitLoad(VT, ResultReg, Addr, 0, IsZExt))
+ if (!PPCEmitLoad(VT, ResultReg, Addr, nullptr, IsZExt))
return false;
MI->eraseFromParent();
@@ -2158,6 +2189,15 @@ unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
if (Opc != ISD::Constant)
return 0;
+ // If we're using CR bit registers for i1 values, handle that as a special
+ // case first.
+ if (VT == MVT::i1 && PPCSubTarget->useCRBits()) {
+ unsigned ImmReg = createResultReg(&PPC::CRBITRCRegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Imm == 0 ? PPC::CRUNSET : PPC::CRSET), ImmReg);
+ return ImmReg;
+ }
+
if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
VT != MVT::i8 && VT != MVT::i1)
return 0;
@@ -2237,6 +2277,6 @@ namespace llvm {
if (Subtarget->isPPC64() && Subtarget->isSVR4ABI())
return new PPCFastISel(FuncInfo, LibInfo);
- return 0;
+ return nullptr;
}
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 407fdc6..b2577a9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -15,6 +15,7 @@
#include "PPCInstrBuilder.h"
#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
+#include "PPCSubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -35,6 +36,167 @@ static const uint16_t VRRegNo[] = {
PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31
};
+PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
+ (STI.hasQPX() || STI.isBGQ()) ? 32 : 16, 0),
+ Subtarget(STI) {}
+
+// With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
+const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots(
+ unsigned &NumEntries) const {
+ if (Subtarget.isDarwinABI()) {
+ NumEntries = 1;
+ if (Subtarget.isPPC64()) {
+ static const SpillSlot darwin64Offsets = {PPC::X31, -8};
+ return &darwin64Offsets;
+ } else {
+ static const SpillSlot darwinOffsets = {PPC::R31, -4};
+ return &darwinOffsets;
+ }
+ }
+
+ // Early exit if not using the SVR4 ABI.
+ if (!Subtarget.isSVR4ABI()) {
+ NumEntries = 0;
+ return nullptr;
+ }
+
+ // Note that the offsets here overlap, but this is fixed up in
+ // processFunctionBeforeFrameFinalized.
+
+ static const SpillSlot Offsets[] = {
+ // Floating-point register save area offsets.
+ {PPC::F31, -8},
+ {PPC::F30, -16},
+ {PPC::F29, -24},
+ {PPC::F28, -32},
+ {PPC::F27, -40},
+ {PPC::F26, -48},
+ {PPC::F25, -56},
+ {PPC::F24, -64},
+ {PPC::F23, -72},
+ {PPC::F22, -80},
+ {PPC::F21, -88},
+ {PPC::F20, -96},
+ {PPC::F19, -104},
+ {PPC::F18, -112},
+ {PPC::F17, -120},
+ {PPC::F16, -128},
+ {PPC::F15, -136},
+ {PPC::F14, -144},
+
+ // General register save area offsets.
+ {PPC::R31, -4},
+ {PPC::R30, -8},
+ {PPC::R29, -12},
+ {PPC::R28, -16},
+ {PPC::R27, -20},
+ {PPC::R26, -24},
+ {PPC::R25, -28},
+ {PPC::R24, -32},
+ {PPC::R23, -36},
+ {PPC::R22, -40},
+ {PPC::R21, -44},
+ {PPC::R20, -48},
+ {PPC::R19, -52},
+ {PPC::R18, -56},
+ {PPC::R17, -60},
+ {PPC::R16, -64},
+ {PPC::R15, -68},
+ {PPC::R14, -72},
+
+ // CR save area offset. We map each of the nonvolatile CR fields
+ // to the slot for CR2, which is the first of the nonvolatile CR
+ // fields to be assigned, so that we only allocate one save slot.
+ // See PPCRegisterInfo::hasReservedSpillSlot() for more information.
+ {PPC::CR2, -4},
+
+ // VRSAVE save area offset.
+ {PPC::VRSAVE, -4},
+
+ // Vector register save area
+ {PPC::V31, -16},
+ {PPC::V30, -32},
+ {PPC::V29, -48},
+ {PPC::V28, -64},
+ {PPC::V27, -80},
+ {PPC::V26, -96},
+ {PPC::V25, -112},
+ {PPC::V24, -128},
+ {PPC::V23, -144},
+ {PPC::V22, -160},
+ {PPC::V21, -176},
+ {PPC::V20, -192}};
+
+ static const SpillSlot Offsets64[] = {
+ // Floating-point register save area offsets.
+ {PPC::F31, -8},
+ {PPC::F30, -16},
+ {PPC::F29, -24},
+ {PPC::F28, -32},
+ {PPC::F27, -40},
+ {PPC::F26, -48},
+ {PPC::F25, -56},
+ {PPC::F24, -64},
+ {PPC::F23, -72},
+ {PPC::F22, -80},
+ {PPC::F21, -88},
+ {PPC::F20, -96},
+ {PPC::F19, -104},
+ {PPC::F18, -112},
+ {PPC::F17, -120},
+ {PPC::F16, -128},
+ {PPC::F15, -136},
+ {PPC::F14, -144},
+
+ // General register save area offsets.
+ {PPC::X31, -8},
+ {PPC::X30, -16},
+ {PPC::X29, -24},
+ {PPC::X28, -32},
+ {PPC::X27, -40},
+ {PPC::X26, -48},
+ {PPC::X25, -56},
+ {PPC::X24, -64},
+ {PPC::X23, -72},
+ {PPC::X22, -80},
+ {PPC::X21, -88},
+ {PPC::X20, -96},
+ {PPC::X19, -104},
+ {PPC::X18, -112},
+ {PPC::X17, -120},
+ {PPC::X16, -128},
+ {PPC::X15, -136},
+ {PPC::X14, -144},
+
+ // VRSAVE save area offset.
+ {PPC::VRSAVE, -4},
+
+ // Vector register save area
+ {PPC::V31, -16},
+ {PPC::V30, -32},
+ {PPC::V29, -48},
+ {PPC::V28, -64},
+ {PPC::V27, -80},
+ {PPC::V26, -96},
+ {PPC::V25, -112},
+ {PPC::V24, -128},
+ {PPC::V23, -144},
+ {PPC::V22, -160},
+ {PPC::V21, -176},
+ {PPC::V20, -192}};
+
+ if (Subtarget.isPPC64()) {
+ NumEntries = array_lengthof(Offsets64);
+
+ return Offsets64;
+ } else {
+ NumEntries = array_lengthof(Offsets);
+
+ return Offsets;
+ }
+}
+
/// RemoveVRSaveCode - We have found that this function does not need any code
/// to manipulate the VRSAVE register, even though it uses vector registers.
/// This can happen when the only registers used are known to be live in or out
@@ -222,7 +384,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
if (!DisableRedZone &&
(Subtarget.isPPC64() || // 32-bit SVR4, no stack-
!Subtarget.isSVR4ABI() || // allocated locals.
- FrameSize == 0) &&
+ FrameSize == 0) &&
FrameSize <= 224 && // Fits in red zone.
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
@@ -236,9 +398,10 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
// Get the maximum call frame size of all the calls.
unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
- // Maximum call frame needs to be at least big enough for linkage and 8 args.
- unsigned minCallFrameSize = getMinCallFrameSize(Subtarget.isPPC64(),
- Subtarget.isDarwinABI());
+ // Maximum call frame needs to be at least big enough for linkage area.
+ unsigned minCallFrameSize = getLinkageSize(Subtarget.isPPC64(),
+ Subtarget.isDarwinABI(),
+ Subtarget.isELFv2ABI());
maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
// If we have dynamic alloca then maxCallFrameSize needs to be aligned so
@@ -281,8 +444,8 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::Naked))
+ if (MF.getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
@@ -299,7 +462,7 @@ void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
const PPCRegisterInfo *RegInfo =
static_cast<const PPCRegisterInfo*>(MF.getTarget().getRegisterInfo());
bool HasBP = RegInfo->hasBasePointer(MF);
- unsigned BPReg = HasBP ? (unsigned) RegInfo->getBaseRegister(MF): FPReg;
+ unsigned BPReg = HasBP ? (unsigned) RegInfo->getBaseRegister(MF) : FPReg;
unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FPReg;
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
@@ -351,12 +514,10 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Get the ABI.
bool isDarwinABI = Subtarget.isDarwinABI();
bool isSVR4ABI = Subtarget.isSVR4ABI();
+ bool isELFv2ABI = Subtarget.isELFv2ABI();
assert((isDarwinABI || isSVR4ABI) &&
"Currently only Darwin and SVR4 ABIs are supported for PowerPC.");
- // Prepare for frame info.
- MCSymbol *FrameLabel = 0;
-
// Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
// process it.
if (!isSVR4ABI)
@@ -430,7 +591,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
assert(FPIndex && "No Frame Pointer Save Slot!");
FPOffset = FFI->getObjectOffset(FPIndex);
} else {
- FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ FPOffset =
+ PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
}
}
@@ -466,6 +628,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
"Prologue CR saving supported only in 64-bit mode");
if (!MustSaveCRs.empty()) { // will only occur for PPC64
+ // FIXME: In the ELFv2 ABI, we are not required to save all CR fields.
+ // If only one or two CR fields are clobbered, it could be more
+ // efficient to use mfocrf to selectively save just those fields.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI, dl, TII.get(PPC::MFCR8), TempReg);
for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
@@ -564,36 +729,38 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Add the "machine moves" for the instructions we generated above, but in
// reverse order.
if (needsFrameMoves) {
- // Mark effective beginning of when frame pointer becomes valid.
- FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(FrameLabel);
-
// Show update of SP.
assert(NegFrameSize);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(FrameLabel, NegFrameSize));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, NegFrameSize));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
if (HasFP) {
unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
- MMI.addFrameInst(
- MCCFIInstruction::createOffset(FrameLabel, Reg, FPOffset));
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg, FPOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
if (HasBP) {
unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
- MMI.addFrameInst(
- MCCFIInstruction::createOffset(FrameLabel, Reg, BPOffset));
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg, BPOffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
if (MustSaveLR) {
unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
- MMI.addFrameInst(
- MCCFIInstruction::createOffset(FrameLabel, Reg, LROffset));
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, Reg, LROffset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
- MCSymbol *ReadyLabel = 0;
-
// If there is a frame pointer, copy R1 into R31
if (HasFP) {
BuildMI(MBB, MBBI, dl, OrInst, FPReg)
@@ -601,19 +768,17 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(SPReg);
if (needsFrameMoves) {
- ReadyLabel = MMI.getContext().CreateTempSymbol();
-
// Mark effective beginning of when frame pointer is ready.
- BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(ReadyLabel);
-
unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
- MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(ReadyLabel, Reg));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
+
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
if (needsFrameMoves) {
- MCSymbol *Label = HasFP ? ReadyLabel : FrameLabel;
-
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
@@ -634,14 +799,22 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// For 64-bit SVR4 when we have spilled CRs, the spill location
// is SP+8, not a frame-relative slot.
if (isSVR4ABI && isPPC64 && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- Label, MRI->getDwarfRegNum(PPC::CR2, true), 8));
+ // In the ELFv1 ABI, only CR2 is noted in CFI and stands in for
+ // the whole CR word. In the ELFv2 ABI, every CR that was
+ // actually saved gets its own CFI record.
+ unsigned CRReg = isELFv2ABI? Reg : (unsigned) PPC::CR2;
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(CRReg, true), 8));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
continue;
}
int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- Label, MRI->getDwarfRegNum(Reg, true), Offset));
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
}
@@ -716,7 +889,8 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
assert(FPIndex && "No Frame Pointer Save Slot!");
FPOffset = FFI->getObjectOffset(FPIndex);
} else {
- FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ FPOffset =
+ PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
}
}
@@ -937,9 +1111,9 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
MFI->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
}
- // For 32-bit SVR4, allocate the nonvolatile CR spill slot iff the
+ // For 32-bit SVR4, allocate the nonvolatile CR spill slot iff the
// function uses CR 2, 3, or 4.
- if (!isPPC64 && !isDarwinABI &&
+ if (!isPPC64 && !isDarwinABI &&
(MRI.isPhysRegUsed(PPC::CR2) ||
MRI.isPhysRegUsed(PPC::CR3) ||
MRI.isPhysRegUsed(PPC::CR4))) {
@@ -1113,10 +1287,10 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
unsigned Reg = CSI[i].getReg();
if ((Subtarget.isSVR4ABI() && Reg == PPC::CR2)
- // Leave Darwin logic as-is.
- || (!Subtarget.isSVR4ABI() &&
- (PPC::CRBITRCRegClass.contains(Reg) ||
- PPC::CRRCRegClass.contains(Reg)))) {
+ // Leave Darwin logic as-is.
+ || (!Subtarget.isSVR4ABI() &&
+ (PPC::CRBITRCRegClass.contains(Reg) ||
+ PPC::CRRCRegClass.contains(Reg)))) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -1197,11 +1371,11 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
}
}
-bool
+bool
PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
// Currently, this function only handles SVR4 32- and 64-bit ABIs.
// Return false otherwise to maintain pre-existing behavior.
@@ -1214,7 +1388,7 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
DebugLoc DL;
bool CRSpilled = false;
MachineInstrBuilder CRMIB;
-
+
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
// Only Darwin actually uses the VRSAVE register, but it can still appear
@@ -1244,21 +1418,21 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
CRSpilled = true;
FuncInfo->setSpillsCR();
- // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have
- // the same frame index in PPCRegisterInfo::hasReservedSpillSlot.
- CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)
+ // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have
+ // the same frame index in PPCRegisterInfo::hasReservedSpillSlot.
+ CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)
.addReg(Reg, RegState::ImplicitKill);
- MBB.insert(MI, CRMIB);
- MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW))
- .addReg(PPC::R12,
- getKillRegState(true)),
- CSI[i].getFrameIdx()));
+ MBB.insert(MI, CRMIB);
+ MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW))
+ .addReg(PPC::R12,
+ getKillRegState(true)),
+ CSI[i].getFrameIdx()));
}
} else {
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(MBB, MI, Reg, true,
- CSI[i].getFrameIdx(), RC, TRI);
+ CSI[i].getFrameIdx(), RC, TRI);
}
}
return true;
@@ -1267,8 +1441,8 @@ PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
static void
restoreCRs(bool isPPC64, bool is31,
bool CR2Spilled, bool CR3Spilled, bool CR4Spilled,
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI, unsigned CSIIndex) {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI, unsigned CSIIndex) {
MachineFunction *MF = MBB.getParent();
const PPCInstrInfo &TII =
@@ -1282,12 +1456,12 @@ restoreCRs(bool isPPC64, bool is31,
else {
// 32-bit: FP-relative
MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ),
- PPC::R12),
- CSI[CSIIndex].getFrameIdx()));
+ PPC::R12),
+ CSI[CSIIndex].getFrameIdx()));
RestoreOp = PPC::MTOCRF;
MoveReg = PPC::R12;
}
-
+
if (CR2Spilled)
MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2)
.addReg(MoveReg, getKillRegState(!CR3Spilled && !CR4Spilled)));
@@ -1342,11 +1516,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MBB.erase(I);
}
-bool
+bool
PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
// Currently, this function only handles SVR4 32- and 64-bit ABIs.
// Return false otherwise to maintain pre-existing behavior.
@@ -1394,20 +1568,20 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
// When we first encounter a non-CR register after seeing at
// least one CR register, restore all spilled CRs together.
if ((CR2Spilled || CR3Spilled || CR4Spilled)
- && !(PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
+ && !(PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
bool is31 = needsFP(*MF);
restoreCRs(Subtarget.isPPC64(), is31,
CR2Spilled, CR3Spilled, CR4Spilled,
- MBB, I, CSI, CSIIndex);
- CR2Spilled = CR3Spilled = CR4Spilled = false;
+ MBB, I, CSI, CSIIndex);
+ CR2Spilled = CR3Spilled = CR4Spilled = false;
}
// Default behavior for non-CR saves.
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(),
- RC, TRI);
+ RC, TRI);
assert(I != MBB.begin() &&
- "loadRegFromStackSlot didn't insert any code!");
+ "loadRegFromStackSlot didn't insert any code!");
}
// Insert in reverse order.
@@ -1416,16 +1590,15 @@ PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
else {
I = BeforeI;
++I;
- }
+ }
}
// If we haven't yet spilled the CRs, do so now.
if (CR2Spilled || CR3Spilled || CR4Spilled) {
- bool is31 = needsFP(*MF);
+ bool is31 = needsFP(*MF);
restoreCRs(Subtarget.isPPC64(), is31, CR2Spilled, CR3Spilled, CR4Spilled,
- MBB, I, CSI, CSIIndex);
+ MBB, I, CSI, CSIIndex);
}
return true;
}
-
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
index bd7350e..c0c7d24 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
@@ -14,23 +14,18 @@
#define POWERPC_FRAMEINFO_H
#include "PPC.h"
-#include "PPCSubtarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
- class PPCSubtarget;
+class PPCSubtarget;
class PPCFrameLowering: public TargetFrameLowering {
const PPCSubtarget &Subtarget;
public:
- PPCFrameLowering(const PPCSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
- (sti.hasQPX() || sti.isBGQ()) ? 32 : 16, 0),
- Subtarget(sti) {
- }
+ PPCFrameLowering(const PPCSubtarget &STI);
unsigned determineFrameLayout(MachineFunction &MF,
bool UpdateMF = true,
@@ -38,37 +33,37 @@ public:
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- bool hasFP(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const override;
bool needsFP(const MachineFunction &MF) const;
void replaceFPWithRealFP(MachineFunction &MF) const;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
void addScavengingSpillSlot(MachineFunction &MF, RegScavenger *RS) const;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
/// targetHandlesStackFrameRounding - Returns true if the target is
/// responsible for rounding up the stack frame (probably at emitPrologue
/// time).
- bool targetHandlesStackFrameRounding() const { return true; }
+ bool targetHandlesStackFrameRounding() const override { return true; }
/// getReturnSaveOffset - Return the previous frame offset to save the
/// return address.
@@ -79,6 +74,12 @@ public:
return isPPC64 ? 16 : 4;
}
+ /// getTOCSaveOffset - Return the previous frame offset to save the
+ /// TOC register -- 64-bit SVR4 ABI only.
+ static unsigned getTOCSaveOffset(bool isELFv2ABI) {
+ return isELFv2ABI ? 24 : 40;
+ }
+
/// getFramePointerSaveOffset - Return the previous frame offset to save the
/// frame pointer.
static unsigned getFramePointerSaveOffset(bool isPPC64, bool isDarwinABI) {
@@ -108,198 +109,18 @@ public:
/// getLinkageSize - Return the size of the PowerPC ABI linkage area.
///
- static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI) {
+ static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI,
+ bool isELFv2ABI) {
if (isDarwinABI || isPPC64)
- return 6 * (isPPC64 ? 8 : 4);
+ return (isELFv2ABI ? 4 : 6) * (isPPC64 ? 8 : 4);
// SVR4 ABI:
return 8;
}
- /// getMinCallArgumentsSize - Return the size of the minium PowerPC ABI
- /// argument area.
- static unsigned getMinCallArgumentsSize(bool isPPC64, bool isDarwinABI) {
- // For the Darwin ABI / 64-bit SVR4 ABI:
- // The prolog code of the callee may store up to 8 GPR argument registers to
- // the stack, allowing va_start to index over them in memory if its varargs.
- // Because we cannot tell if this is needed on the caller side, we have to
- // conservatively assume that it is needed. As such, make sure we have at
- // least enough stack space for the caller to store the 8 GPRs.
- if (isDarwinABI || isPPC64)
- return 8 * (isPPC64 ? 8 : 4);
-
- // 32-bit SVR4 ABI:
- // There is no default stack allocated for the 8 first GPR arguments.
- return 0;
- }
-
- /// getMinCallFrameSize - Return the minimum size a call frame can be using
- /// the PowerPC ABI.
- static unsigned getMinCallFrameSize(bool isPPC64, bool isDarwinABI) {
- // The call frame needs to be at least big enough for linkage and 8 args.
- return getLinkageSize(isPPC64, isDarwinABI) +
- getMinCallArgumentsSize(isPPC64, isDarwinABI);
- }
-
- // With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
const SpillSlot *
- getCalleeSavedSpillSlots(unsigned &NumEntries) const {
- if (Subtarget.isDarwinABI()) {
- NumEntries = 1;
- if (Subtarget.isPPC64()) {
- static const SpillSlot darwin64Offsets = {PPC::X31, -8};
- return &darwin64Offsets;
- } else {
- static const SpillSlot darwinOffsets = {PPC::R31, -4};
- return &darwinOffsets;
- }
- }
-
- // Early exit if not using the SVR4 ABI.
- if (!Subtarget.isSVR4ABI()) {
- NumEntries = 0;
- return 0;
- }
-
- // Note that the offsets here overlap, but this is fixed up in
- // processFunctionBeforeFrameFinalized.
-
- static const SpillSlot Offsets[] = {
- // Floating-point register save area offsets.
- {PPC::F31, -8},
- {PPC::F30, -16},
- {PPC::F29, -24},
- {PPC::F28, -32},
- {PPC::F27, -40},
- {PPC::F26, -48},
- {PPC::F25, -56},
- {PPC::F24, -64},
- {PPC::F23, -72},
- {PPC::F22, -80},
- {PPC::F21, -88},
- {PPC::F20, -96},
- {PPC::F19, -104},
- {PPC::F18, -112},
- {PPC::F17, -120},
- {PPC::F16, -128},
- {PPC::F15, -136},
- {PPC::F14, -144},
-
- // General register save area offsets.
- {PPC::R31, -4},
- {PPC::R30, -8},
- {PPC::R29, -12},
- {PPC::R28, -16},
- {PPC::R27, -20},
- {PPC::R26, -24},
- {PPC::R25, -28},
- {PPC::R24, -32},
- {PPC::R23, -36},
- {PPC::R22, -40},
- {PPC::R21, -44},
- {PPC::R20, -48},
- {PPC::R19, -52},
- {PPC::R18, -56},
- {PPC::R17, -60},
- {PPC::R16, -64},
- {PPC::R15, -68},
- {PPC::R14, -72},
-
- // CR save area offset. We map each of the nonvolatile CR fields
- // to the slot for CR2, which is the first of the nonvolatile CR
- // fields to be assigned, so that we only allocate one save slot.
- // See PPCRegisterInfo::hasReservedSpillSlot() for more information.
- {PPC::CR2, -4},
-
- // VRSAVE save area offset.
- {PPC::VRSAVE, -4},
-
- // Vector register save area
- {PPC::V31, -16},
- {PPC::V30, -32},
- {PPC::V29, -48},
- {PPC::V28, -64},
- {PPC::V27, -80},
- {PPC::V26, -96},
- {PPC::V25, -112},
- {PPC::V24, -128},
- {PPC::V23, -144},
- {PPC::V22, -160},
- {PPC::V21, -176},
- {PPC::V20, -192}
- };
-
- static const SpillSlot Offsets64[] = {
- // Floating-point register save area offsets.
- {PPC::F31, -8},
- {PPC::F30, -16},
- {PPC::F29, -24},
- {PPC::F28, -32},
- {PPC::F27, -40},
- {PPC::F26, -48},
- {PPC::F25, -56},
- {PPC::F24, -64},
- {PPC::F23, -72},
- {PPC::F22, -80},
- {PPC::F21, -88},
- {PPC::F20, -96},
- {PPC::F19, -104},
- {PPC::F18, -112},
- {PPC::F17, -120},
- {PPC::F16, -128},
- {PPC::F15, -136},
- {PPC::F14, -144},
-
- // General register save area offsets.
- {PPC::X31, -8},
- {PPC::X30, -16},
- {PPC::X29, -24},
- {PPC::X28, -32},
- {PPC::X27, -40},
- {PPC::X26, -48},
- {PPC::X25, -56},
- {PPC::X24, -64},
- {PPC::X23, -72},
- {PPC::X22, -80},
- {PPC::X21, -88},
- {PPC::X20, -96},
- {PPC::X19, -104},
- {PPC::X18, -112},
- {PPC::X17, -120},
- {PPC::X16, -128},
- {PPC::X15, -136},
- {PPC::X14, -144},
-
- // VRSAVE save area offset.
- {PPC::VRSAVE, -4},
-
- // Vector register save area
- {PPC::V31, -16},
- {PPC::V30, -32},
- {PPC::V29, -48},
- {PPC::V28, -64},
- {PPC::V27, -80},
- {PPC::V26, -96},
- {PPC::V25, -112},
- {PPC::V24, -128},
- {PPC::V23, -144},
- {PPC::V22, -160},
- {PPC::V21, -176},
- {PPC::V20, -192}
- };
-
- if (Subtarget.isPPC64()) {
- NumEntries = array_lengthof(Offsets64);
-
- return Offsets64;
- } else {
- NumEntries = array_lengthof(Offsets);
-
- return Offsets;
- }
- }
+ getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
};
-
} // End llvm namespace
#endif
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 0df50e1..d9b242c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -11,38 +11,226 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "pre-RA-sched"
#include "PPCHazardRecognizers.h"
#include "PPC.h"
#include "PPCInstrInfo.h"
+#include "PPCTargetMachine.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-//===----------------------------------------------------------------------===//
-// PowerPC Scoreboard Hazard Recognizer
-void PPCScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
+#define DEBUG_TYPE "pre-RA-sched"
+
+bool PPCDispatchGroupSBHazardRecognizer::isLoadAfterStore(SUnit *SU) {
+ // FIXME: Move this.
+ if (isBCTRAfterSet(SU))
+ return true;
+
const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
if (!MCID)
- // This is a PPC pseudo-instruction.
- return;
+ return false;
+
+ if (!MCID->mayLoad())
+ return false;
+
+ // SU is a load; for any predecessors in this dispatch group, that are stores,
+ // and with which we have an ordering dependency, return true.
+ for (unsigned i = 0, ie = (unsigned) SU->Preds.size(); i != ie; ++i) {
+ const MCInstrDesc *PredMCID = DAG->getInstrDesc(SU->Preds[i].getSUnit());
+ if (!PredMCID || !PredMCID->mayStore())
+ continue;
+
+ if (!SU->Preds[i].isNormalMemory() && !SU->Preds[i].isBarrier())
+ continue;
+
+ for (unsigned j = 0, je = CurGroup.size(); j != je; ++j)
+ if (SU->Preds[i].getSUnit() == CurGroup[j])
+ return true;
+ }
+
+ return false;
+}
+
+bool PPCDispatchGroupSBHazardRecognizer::isBCTRAfterSet(SUnit *SU) {
+ const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
+ if (!MCID)
+ return false;
+
+ if (!MCID->isBranch())
+ return false;
+
+ // SU is a branch; for any predecessors in this dispatch group, with which we
+ // have a data dependence and set the counter register, return true.
+ for (unsigned i = 0, ie = (unsigned) SU->Preds.size(); i != ie; ++i) {
+ const MCInstrDesc *PredMCID = DAG->getInstrDesc(SU->Preds[i].getSUnit());
+ if (!PredMCID || PredMCID->getSchedClass() != PPC::Sched::IIC_SprMTSPR)
+ continue;
+
+ if (SU->Preds[i].isCtrl())
+ continue;
+
+ for (unsigned j = 0, je = CurGroup.size(); j != je; ++j)
+ if (SU->Preds[i].getSUnit() == CurGroup[j])
+ return true;
+ }
- ScoreboardHazardRecognizer::EmitInstruction(SU);
+ return false;
+}
+
+// FIXME: Remove this when we don't need this:
+namespace llvm { namespace PPC { extern int getNonRecordFormOpcode(uint16_t); } }
+
+// FIXME: A lot of code in PPCDispatchGroupSBHazardRecognizer is P7 specific.
+
+bool PPCDispatchGroupSBHazardRecognizer::mustComeFirst(const MCInstrDesc *MCID,
+ unsigned &NSlots) {
+ // FIXME: Indirectly, this information is contained in the itinerary, and
+ // we should derive it from there instead of separately specifying it
+ // here.
+ unsigned IIC = MCID->getSchedClass();
+ switch (IIC) {
+ default:
+ NSlots = 1;
+ break;
+ case PPC::Sched::IIC_IntDivW:
+ case PPC::Sched::IIC_IntDivD:
+ case PPC::Sched::IIC_LdStLoadUpd:
+ case PPC::Sched::IIC_LdStLDU:
+ case PPC::Sched::IIC_LdStLFDU:
+ case PPC::Sched::IIC_LdStLFDUX:
+ case PPC::Sched::IIC_LdStLHA:
+ case PPC::Sched::IIC_LdStLHAU:
+ case PPC::Sched::IIC_LdStLWA:
+ case PPC::Sched::IIC_LdStSTDU:
+ case PPC::Sched::IIC_LdStSTFDU:
+ NSlots = 2;
+ break;
+ case PPC::Sched::IIC_LdStLoadUpdX:
+ case PPC::Sched::IIC_LdStLDUX:
+ case PPC::Sched::IIC_LdStLHAUX:
+ case PPC::Sched::IIC_LdStLWARX:
+ case PPC::Sched::IIC_LdStLDARX:
+ case PPC::Sched::IIC_LdStSTDUX:
+ case PPC::Sched::IIC_LdStSTDCX:
+ case PPC::Sched::IIC_LdStSTWCX:
+ case PPC::Sched::IIC_BrMCRX: // mtcr
+ // FIXME: Add sync/isync (here and in the itinerary).
+ NSlots = 4;
+ break;
+ }
+
+ // FIXME: record-form instructions need a different itinerary class.
+ if (NSlots == 1 && PPC::getNonRecordFormOpcode(MCID->getOpcode()) != -1)
+ NSlots = 2;
+
+ switch (IIC) {
+ default:
+ // All multi-slot instructions must come first.
+ return NSlots > 1;
+ case PPC::Sched::IIC_BrCR: // cr logicals
+ case PPC::Sched::IIC_SprMFCR:
+ case PPC::Sched::IIC_SprMFCRF:
+ case PPC::Sched::IIC_SprMTSPR:
+ return true;
+ }
}
ScheduleHazardRecognizer::HazardType
-PPCScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
+PPCDispatchGroupSBHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
+ if (Stalls == 0 && isLoadAfterStore(SU))
+ return NoopHazard;
+
return ScoreboardHazardRecognizer::getHazardType(SU, Stalls);
}
-void PPCScoreboardHazardRecognizer::AdvanceCycle() {
- ScoreboardHazardRecognizer::AdvanceCycle();
+bool PPCDispatchGroupSBHazardRecognizer::ShouldPreferAnother(SUnit *SU) {
+ const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
+ unsigned NSlots;
+ if (MCID && mustComeFirst(MCID, NSlots) && CurSlots)
+ return true;
+
+ return ScoreboardHazardRecognizer::ShouldPreferAnother(SU);
+}
+
+unsigned PPCDispatchGroupSBHazardRecognizer::PreEmitNoops(SUnit *SU) {
+ // We only need to fill out a maximum of 5 slots here: The 6th slot could
+ // only be a second branch, and otherwise the next instruction will start a
+ // new group.
+ if (isLoadAfterStore(SU) && CurSlots < 6) {
+ unsigned Directive =
+ DAG->TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ // If we're using a special group-terminating nop, then we need only one.
+ if (Directive == PPC::DIR_PWR6 || Directive == PPC::DIR_PWR7 ||
+ Directive == PPC::DIR_PWR8 )
+ return 1;
+
+ return 5 - CurSlots;
+ }
+
+ return ScoreboardHazardRecognizer::PreEmitNoops(SU);
}
-void PPCScoreboardHazardRecognizer::Reset() {
- ScoreboardHazardRecognizer::Reset();
+void PPCDispatchGroupSBHazardRecognizer::EmitInstruction(SUnit *SU) {
+ const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
+ if (MCID) {
+ if (CurSlots == 5 || (MCID->isBranch() && CurBranches == 1)) {
+ CurGroup.clear();
+ CurSlots = CurBranches = 0;
+ } else {
+ DEBUG(dbgs() << "**** Adding to dispatch group: SU(" <<
+ SU->NodeNum << "): ");
+ DEBUG(DAG->dumpNode(SU));
+
+ unsigned NSlots;
+ bool MustBeFirst = mustComeFirst(MCID, NSlots);
+
+ // If this instruction must come first, but does not, then it starts a
+ // new group.
+ if (MustBeFirst && CurSlots) {
+ CurSlots = CurBranches = 0;
+ CurGroup.clear();
+ }
+
+ CurSlots += NSlots;
+ CurGroup.push_back(SU);
+
+ if (MCID->isBranch())
+ ++CurBranches;
+ }
+ }
+
+ return ScoreboardHazardRecognizer::EmitInstruction(SU);
+}
+
+void PPCDispatchGroupSBHazardRecognizer::AdvanceCycle() {
+ return ScoreboardHazardRecognizer::AdvanceCycle();
+}
+
+void PPCDispatchGroupSBHazardRecognizer::RecedeCycle() {
+ llvm_unreachable("Bottom-up scheduling not supported");
+}
+
+void PPCDispatchGroupSBHazardRecognizer::Reset() {
+ CurGroup.clear();
+ CurSlots = CurBranches = 0;
+ return ScoreboardHazardRecognizer::Reset();
+}
+
+void PPCDispatchGroupSBHazardRecognizer::EmitNoop() {
+ unsigned Directive =
+ DAG->TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ // If the group has now filled all of its slots, or if we're using a special
+ // group-terminating nop, the group is complete.
+ if (Directive == PPC::DIR_PWR6 || Directive == PPC::DIR_PWR7 ||
+ Directive == PPC::DIR_PWR8 || CurSlots == 6) {
+ CurGroup.clear();
+ CurSlots = CurBranches = 0;
+ } else {
+ CurGroup.push_back(nullptr);
+ ++CurSlots;
+ }
}
//===----------------------------------------------------------------------===//
@@ -71,8 +259,8 @@ void PPCScoreboardHazardRecognizer::Reset() {
// 3. Handling of the esoteric cases in "Resource-based Instruction Grouping".
//
-PPCHazardRecognizer970::PPCHazardRecognizer970(const TargetMachine &TM)
- : TM(TM) {
+PPCHazardRecognizer970::PPCHazardRecognizer970(const ScheduleDAG &DAG)
+ : DAG(DAG) {
EndDispatchGroup();
}
@@ -91,7 +279,7 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,
bool &isCracked,
bool &isLoad, bool &isStore) {
- const MCInstrDesc &MCID = TM.getInstrInfo()->get(Opcode);
+ const MCInstrDesc &MCID = DAG.TII->get(Opcode);
isLoad = MCID.mayLoad();
isStore = MCID.mayStore();
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
index 84b8e6de..23f76c16 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
@@ -21,19 +21,30 @@
namespace llvm {
-/// PPCScoreboardHazardRecognizer - This class implements a scoreboard-based
-/// hazard recognizer for generic PPC processors.
-class PPCScoreboardHazardRecognizer : public ScoreboardHazardRecognizer {
+/// PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based
+/// hazard recognizer for PPC ooo processors with dispatch-group hazards.
+class PPCDispatchGroupSBHazardRecognizer : public ScoreboardHazardRecognizer {
const ScheduleDAG *DAG;
+ SmallVector<SUnit *, 7> CurGroup;
+ unsigned CurSlots, CurBranches;
+
+ bool isLoadAfterStore(SUnit *SU);
+ bool isBCTRAfterSet(SUnit *SU);
+ bool mustComeFirst(const MCInstrDesc *MCID, unsigned &NSlots);
public:
- PPCScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+ PPCDispatchGroupSBHazardRecognizer(const InstrItineraryData *ItinData,
const ScheduleDAG *DAG_) :
- ScoreboardHazardRecognizer(ItinData, DAG_), DAG(DAG_) {}
-
- virtual HazardType getHazardType(SUnit *SU, int Stalls);
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
- virtual void Reset();
+ ScoreboardHazardRecognizer(ItinData, DAG_), DAG(DAG_),
+ CurSlots(0), CurBranches(0) {}
+
+ HazardType getHazardType(SUnit *SU, int Stalls) override;
+ bool ShouldPreferAnother(SUnit* SU) override;
+ unsigned PreEmitNoops(SUnit *SU) override;
+ void EmitInstruction(SUnit *SU) override;
+ void AdvanceCycle() override;
+ void RecedeCycle() override;
+ void Reset() override;
+ void EmitNoop() override;
};
/// PPCHazardRecognizer970 - This class defines a finite state automata that
@@ -43,7 +54,7 @@ public:
/// setting the CTR register then branching through it within a dispatch group),
/// or storing then loading from the same address within a dispatch group.
class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
- const TargetMachine &TM;
+ const ScheduleDAG &DAG;
unsigned NumIssued; // Number of insts issued, including advanced cycles.
@@ -64,11 +75,11 @@ class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
unsigned NumStores;
public:
- PPCHazardRecognizer970(const TargetMachine &TM);
- virtual HazardType getHazardType(SUnit *SU, int Stalls);
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
- virtual void Reset();
+ PPCHazardRecognizer970(const ScheduleDAG &DAG);
+ virtual HazardType getHazardType(SUnit *SU, int Stalls) override;
+ virtual void EmitInstruction(SUnit *SU) override;
+ virtual void AdvanceCycle() override;
+ virtual void Reset() override;
private:
/// EndDispatchGroup - Called when we are finishing a new dispatch group.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 733b4ad..490f6d2 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ppc-codegen"
#include "PPC.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "PPCMachineFunctionInfo.h"
@@ -28,6 +27,7 @@
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -35,6 +35,12 @@
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
+#define DEBUG_TYPE "ppc-codegen"
+
+// FIXME: Remove this once the bug has been fixed!
+cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug",
+cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden);
+
namespace llvm {
void initializePPCDAGToDAGISelPass(PassRegistry&);
}
@@ -46,29 +52,31 @@ namespace {
///
class PPCDAGToDAGISel : public SelectionDAGISel {
const PPCTargetMachine &TM;
- const PPCTargetLowering &PPCLowering;
- const PPCSubtarget &PPCSubTarget;
+ const PPCTargetLowering *PPCLowering;
+ const PPCSubtarget *PPCSubTarget;
unsigned GlobalBaseReg;
public:
explicit PPCDAGToDAGISel(PPCTargetMachine &tm)
: SelectionDAGISel(tm), TM(tm),
- PPCLowering(*TM.getTargetLowering()),
- PPCSubTarget(*TM.getSubtargetImpl()) {
+ PPCLowering(TM.getTargetLowering()),
+ PPCSubTarget(TM.getSubtargetImpl()) {
initializePPCDAGToDAGISelPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
// Make sure we re-emit a set of the global base reg if necessary
GlobalBaseReg = 0;
+ PPCLowering = TM.getTargetLowering();
+ PPCSubTarget = TM.getSubtargetImpl();
SelectionDAGISel::runOnMachineFunction(MF);
- if (!PPCSubTarget.isSVR4ABI())
+ if (!PPCSubTarget->isSVR4ABI())
InsertVRSaveCode(MF);
return true;
}
- virtual void PostprocessISelDAG();
+ void PostprocessISelDAG() override;
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
@@ -84,7 +92,7 @@ namespace {
/// getSmallIPtrImm - Return a target constant of pointer type.
inline SDValue getSmallIPtrImm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, PPCLowering.getPointerTy());
+ return CurDAG->getTargetConstant(Imm, PPCLowering->getPointerTy());
}
/// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s
@@ -105,7 +113,7 @@ namespace {
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
SDNode *SelectBitfieldInsert(SDNode *N);
@@ -117,7 +125,7 @@ namespace {
/// a base register plus a signed 16-bit displacement [r+imm].
bool SelectAddrImm(SDValue N, SDValue &Disp,
SDValue &Base) {
- return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG, false);
+ return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, false);
}
/// SelectAddrImmOffs - Return true if the operand is valid for a preinc
@@ -137,20 +145,20 @@ namespace {
/// represented as an indexed [r+r] operation. Returns false if it can
/// be represented by [r+imm], which are preferred.
bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) {
- return PPCLowering.SelectAddressRegReg(N, Base, Index, *CurDAG);
+ return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG);
}
/// SelectAddrIdxOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) {
- return PPCLowering.SelectAddressRegRegOnly(N, Base, Index, *CurDAG);
+ return PPCLowering->SelectAddressRegRegOnly(N, Base, Index, *CurDAG);
}
/// SelectAddrImmX4 - Returns true if the address N can be represented by
/// a base register plus a signed 16-bit displacement that is a multiple of 4.
/// Suitable for use by STD and friends.
bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) {
- return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG, true);
+ return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, true);
}
// Select an address into a single register.
@@ -164,16 +172,16 @@ namespace {
/// a register. The case of adding a (possibly relocatable) constant to a
/// register can be improved, but it is wrong to substitute Reg+Reg for
/// Reg in an asm, because the load or store opcode would have to change.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps) {
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override {
OutOps.push_back(Op);
return false;
}
void InsertVRSaveCode(MachineFunction &MF);
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "PowerPC DAG->DAG Pattern Instruction Selection";
}
@@ -182,6 +190,12 @@ namespace {
private:
SDNode *SelectSETCC(SDNode *N);
+
+ void PeepholePPC64();
+ void PeepholeCROps();
+
+ bool AllUsersSelectZero(SDNode *N);
+ void SwapAllSelectUsers(SDNode *N);
};
}
@@ -261,15 +275,15 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc dl;
- if (PPCLowering.getPointerTy() == MVT::i32) {
- if (PPCSubTarget.isTargetELF())
+ if (PPCLowering->getPointerTy() == MVT::i32) {
+ if (PPCSubTarget->isTargetELF())
GlobalBaseReg = PPC::R30;
else
GlobalBaseReg =
RegInfo->createVirtualRegister(&PPC::GPRC_NOR0RegClass);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
- if (PPCSubTarget.isTargetELF()) {
+ if (PPCSubTarget->isTargetELF()) {
unsigned TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
BuildMI(FirstMBB, MBBI, dl,
TII.get(PPC::GetGBRO), TempReg).addReg(GlobalBaseReg);
@@ -284,7 +298,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
}
}
return CurDAG->getRegister(GlobalBaseReg,
- PPCLowering.getPointerTy()).getNode();
+ PPCLowering->getPointerTy()).getNode();
}
/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
@@ -416,8 +430,8 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SDLoc dl(N);
APInt LKZ, LKO, RKZ, RKO;
- CurDAG->ComputeMaskedBits(Op0, LKZ, LKO);
- CurDAG->ComputeMaskedBits(Op1, RKZ, RKO);
+ CurDAG->computeKnownBits(Op0, LKZ, LKO);
+ CurDAG->computeKnownBits(Op1, RKZ, RKO);
unsigned TargetMask = LKZ.getZExtValue();
unsigned InsertMask = RKZ.getZExtValue();
@@ -460,11 +474,18 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SH = (Op1Opc == ISD::SHL) ? Value : 32 - Value;
}
if (Op1Opc == ISD::AND) {
+ // The AND mask might not be a constant, and we need to make sure that
+ // if we're going to fold the masking with the insert, all bits not
+ // know to be zero in the mask are known to be one.
+ APInt MKZ, MKO;
+ CurDAG->computeKnownBits(Op1.getOperand(1), MKZ, MKO);
+ bool CanFoldMask = InsertMask == MKO.getZExtValue();
+
unsigned SHOpc = Op1.getOperand(0).getOpcode();
- if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) &&
+ if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && CanFoldMask &&
isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) {
- // Note that Value must be in range here (less than 32) because
- // otherwise there would not be any bits set in InsertMask.
+ // Note that Value must be in range here (less than 32) because
+ // otherwise there would not be any bits set in InsertMask.
Op1 = Op1.getOperand(0).getOperand(0);
SH = (SHOpc == ISD::SHL) ? Value : 32 - Value;
}
@@ -476,7 +497,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops);
}
}
- return 0;
+ return nullptr;
}
/// SelectCC - Select a comparison of the specified values with the specified
@@ -574,7 +595,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
Opc = PPC::FCMPUS;
} else {
assert(LHS.getValueType() == MVT::f64 && "Unknown vt!");
- Opc = PPC::FCMPUD;
+ Opc = PPCSubTarget->hasVSX() ? PPC::XSCMPUDP : PPC::FCMPUD;
}
return SDValue(CurDAG->getMachineNode(Opc, dl, MVT::i32, LHS, RHS), 0);
}
@@ -642,85 +663,108 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert) {
// getVCmpInst: return the vector compare instruction for the specified
// vector type and condition code. Since this is for altivec specific code,
// only support the altivec types (v16i8, v8i16, v4i32, and v4f32).
-static unsigned int getVCmpInst(MVT::SimpleValueType VecVT, ISD::CondCode CC) {
- switch (CC) {
- case ISD::SETEQ:
- case ISD::SETUEQ:
- case ISD::SETNE:
- case ISD::SETUNE:
- if (VecVT == MVT::v16i8)
- return PPC::VCMPEQUB;
- else if (VecVT == MVT::v8i16)
- return PPC::VCMPEQUH;
- else if (VecVT == MVT::v4i32)
- return PPC::VCMPEQUW;
- // v4f32 != v4f32 could be translate to unordered not equal
- else if (VecVT == MVT::v4f32)
- return PPC::VCMPEQFP;
- break;
- case ISD::SETLT:
- case ISD::SETGT:
- case ISD::SETLE:
- case ISD::SETGE:
- if (VecVT == MVT::v16i8)
- return PPC::VCMPGTSB;
- else if (VecVT == MVT::v8i16)
- return PPC::VCMPGTSH;
- else if (VecVT == MVT::v4i32)
- return PPC::VCMPGTSW;
- else if (VecVT == MVT::v4f32)
- return PPC::VCMPGTFP;
- break;
- case ISD::SETULT:
- case ISD::SETUGT:
- case ISD::SETUGE:
- case ISD::SETULE:
- if (VecVT == MVT::v16i8)
- return PPC::VCMPGTUB;
- else if (VecVT == MVT::v8i16)
- return PPC::VCMPGTUH;
- else if (VecVT == MVT::v4i32)
- return PPC::VCMPGTUW;
- break;
- case ISD::SETOEQ:
- if (VecVT == MVT::v4f32)
- return PPC::VCMPEQFP;
- break;
- case ISD::SETOLT:
- case ISD::SETOGT:
- case ISD::SETOLE:
- if (VecVT == MVT::v4f32)
- return PPC::VCMPGTFP;
- break;
- case ISD::SETOGE:
- if (VecVT == MVT::v4f32)
- return PPC::VCMPGEFP;
- break;
- default:
- break;
- }
- llvm_unreachable("Invalid integer vector compare condition");
-}
+static unsigned int getVCmpInst(MVT VecVT, ISD::CondCode CC,
+ bool HasVSX, bool &Swap, bool &Negate) {
+ Swap = false;
+ Negate = false;
-// getVCmpEQInst: return the equal compare instruction for the specified vector
-// type. Since this is for altivec specific code, only support the altivec
-// types (v16i8, v8i16, v4i32, and v4f32).
-static unsigned int getVCmpEQInst(MVT::SimpleValueType VecVT) {
- switch (VecVT) {
- case MVT::v16i8:
- return PPC::VCMPEQUB;
- case MVT::v8i16:
- return PPC::VCMPEQUH;
- case MVT::v4i32:
- return PPC::VCMPEQUW;
- case MVT::v4f32:
- return PPC::VCMPEQFP;
- default:
- llvm_unreachable("Invalid integer vector compare condition");
+ if (VecVT.isFloatingPoint()) {
+ /* Handle some cases by swapping input operands. */
+ switch (CC) {
+ case ISD::SETLE: CC = ISD::SETGE; Swap = true; break;
+ case ISD::SETLT: CC = ISD::SETGT; Swap = true; break;
+ case ISD::SETOLE: CC = ISD::SETOGE; Swap = true; break;
+ case ISD::SETOLT: CC = ISD::SETOGT; Swap = true; break;
+ case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break;
+ case ISD::SETUGT: CC = ISD::SETULT; Swap = true; break;
+ default: break;
+ }
+ /* Handle some cases by negating the result. */
+ switch (CC) {
+ case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break;
+ case ISD::SETUNE: CC = ISD::SETOEQ; Negate = true; break;
+ case ISD::SETULE: CC = ISD::SETOGT; Negate = true; break;
+ case ISD::SETULT: CC = ISD::SETOGE; Negate = true; break;
+ default: break;
+ }
+ /* We have instructions implementing the remaining cases. */
+ switch (CC) {
+ case ISD::SETEQ:
+ case ISD::SETOEQ:
+ if (VecVT == MVT::v4f32)
+ return HasVSX ? PPC::XVCMPEQSP : PPC::VCMPEQFP;
+ else if (VecVT == MVT::v2f64)
+ return PPC::XVCMPEQDP;
+ break;
+ case ISD::SETGT:
+ case ISD::SETOGT:
+ if (VecVT == MVT::v4f32)
+ return HasVSX ? PPC::XVCMPGTSP : PPC::VCMPGTFP;
+ else if (VecVT == MVT::v2f64)
+ return PPC::XVCMPGTDP;
+ break;
+ case ISD::SETGE:
+ case ISD::SETOGE:
+ if (VecVT == MVT::v4f32)
+ return HasVSX ? PPC::XVCMPGESP : PPC::VCMPGEFP;
+ else if (VecVT == MVT::v2f64)
+ return PPC::XVCMPGEDP;
+ break;
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid floating-point vector compare condition");
+ } else {
+ /* Handle some cases by swapping input operands. */
+ switch (CC) {
+ case ISD::SETGE: CC = ISD::SETLE; Swap = true; break;
+ case ISD::SETLT: CC = ISD::SETGT; Swap = true; break;
+ case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break;
+ case ISD::SETULT: CC = ISD::SETUGT; Swap = true; break;
+ default: break;
+ }
+ /* Handle some cases by negating the result. */
+ switch (CC) {
+ case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break;
+ case ISD::SETUNE: CC = ISD::SETUEQ; Negate = true; break;
+ case ISD::SETLE: CC = ISD::SETGT; Negate = true; break;
+ case ISD::SETULE: CC = ISD::SETUGT; Negate = true; break;
+ default: break;
+ }
+ /* We have instructions implementing the remaining cases. */
+ switch (CC) {
+ case ISD::SETEQ:
+ case ISD::SETUEQ:
+ if (VecVT == MVT::v16i8)
+ return PPC::VCMPEQUB;
+ else if (VecVT == MVT::v8i16)
+ return PPC::VCMPEQUH;
+ else if (VecVT == MVT::v4i32)
+ return PPC::VCMPEQUW;
+ break;
+ case ISD::SETGT:
+ if (VecVT == MVT::v16i8)
+ return PPC::VCMPGTSB;
+ else if (VecVT == MVT::v8i16)
+ return PPC::VCMPGTSH;
+ else if (VecVT == MVT::v4i32)
+ return PPC::VCMPGTSW;
+ break;
+ case ISD::SETUGT:
+ if (VecVT == MVT::v16i8)
+ return PPC::VCMPGTUB;
+ else if (VecVT == MVT::v8i16)
+ return PPC::VCMPGTUH;
+ else if (VecVT == MVT::v4i32)
+ return PPC::VCMPGTUW;
+ break;
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid integer vector compare condition");
}
}
-
SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
SDLoc dl(N);
unsigned Imm;
@@ -728,7 +772,8 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
EVT PtrVT = CurDAG->getTargetLoweringInfo().getPointerTy();
bool isPPC64 = (PtrVT == MVT::i64);
- if (isInt32Immediate(N->getOperand(1), Imm)) {
+ if (!PPCSubTarget->useCRBits() &&
+ isInt32Immediate(N->getOperand(1), Imm)) {
// We can codegen setcc op, imm very efficiently compared to a brcond.
// Check for those cases here.
// setcc op, 0
@@ -739,7 +784,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
case ISD::SETEQ: {
Op = SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Op), 0);
SDValue Ops[] = { Op, getI32Imm(27), getI32Imm(5), getI32Imm(31) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
case ISD::SETNE: {
if (isPPC64) break;
@@ -751,14 +796,14 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
}
case ISD::SETLT: {
SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
case ISD::SETGT: {
SDValue T =
SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Op), 0);
T = SDValue(CurDAG->getMachineNode(PPC::ANDC, dl, MVT::i32, T, Op), 0);
SDValue Ops[] = { T, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
}
} else if (Imm == ~0U) { // setcc op, -1
@@ -788,7 +833,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
SDValue AN = SDValue(CurDAG->getMachineNode(PPC::AND, dl, MVT::i32, AD,
Op), 0);
SDValue Ops[] = { AN, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
case ISD::SETGT: {
SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
@@ -808,56 +853,25 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
// vector compare operations return the same type as the operands.
if (LHS.getValueType().isVector()) {
EVT VecVT = LHS.getValueType();
- MVT::SimpleValueType VT = VecVT.getSimpleVT().SimpleTy;
- unsigned int VCmpInst = getVCmpInst(VT, CC);
-
- switch (CC) {
- case ISD::SETEQ:
- case ISD::SETOEQ:
- case ISD::SETUEQ:
- return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
- case ISD::SETNE:
- case ISD::SETONE:
- case ISD::SETUNE: {
- SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0);
- return CurDAG->SelectNodeTo(N, PPC::VNOR, VecVT, VCmp, VCmp);
- }
- case ISD::SETLT:
- case ISD::SETOLT:
- case ISD::SETULT:
- return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, RHS, LHS);
- case ISD::SETGT:
- case ISD::SETOGT:
- case ISD::SETUGT:
- return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
- case ISD::SETGE:
- case ISD::SETOGE:
- case ISD::SETUGE: {
- // Small optimization: Altivec provides a 'Vector Compare Greater Than
- // or Equal To' instruction (vcmpgefp), so in this case there is no
- // need for extra logic for the equal compare.
- if (VecVT.getSimpleVT().isFloatingPoint()) {
- return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
- } else {
- SDValue VCmpGT(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0);
- unsigned int VCmpEQInst = getVCmpEQInst(VT);
- SDValue VCmpEQ(CurDAG->getMachineNode(VCmpEQInst, dl, VecVT, LHS, RHS), 0);
- return CurDAG->SelectNodeTo(N, PPC::VOR, VecVT, VCmpGT, VCmpEQ);
- }
- }
- case ISD::SETLE:
- case ISD::SETOLE:
- case ISD::SETULE: {
- SDValue VCmpLE(CurDAG->getMachineNode(VCmpInst, dl, VecVT, RHS, LHS), 0);
- unsigned int VCmpEQInst = getVCmpEQInst(VT);
- SDValue VCmpEQ(CurDAG->getMachineNode(VCmpEQInst, dl, VecVT, LHS, RHS), 0);
- return CurDAG->SelectNodeTo(N, PPC::VOR, VecVT, VCmpLE, VCmpEQ);
- }
- default:
- llvm_unreachable("Invalid vector compare type: should be expanded by legalize");
+ bool Swap, Negate;
+ unsigned int VCmpInst = getVCmpInst(VecVT.getSimpleVT(), CC,
+ PPCSubTarget->hasVSX(), Swap, Negate);
+ if (Swap)
+ std::swap(LHS, RHS);
+
+ if (Negate) {
+ SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0);
+ return CurDAG->SelectNodeTo(N, PPCSubTarget->hasVSX() ? PPC::XXLNOR :
+ PPC::VNOR,
+ VecVT, VCmp, VCmp);
}
+
+ return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS);
}
+ if (PPCSubTarget->useCRBits())
+ return nullptr;
+
bool Inv;
unsigned Idx = getCRIdxForSetCC(CC, Inv);
SDValue CCReg = SelectCC(LHS, RHS, CC, dl);
@@ -866,7 +880,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
// Force the ccreg into CR7.
SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
- SDValue InFlag(0, 0); // Null incoming flag value.
+ SDValue InFlag(nullptr, 0); // Null incoming flag value.
CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
@@ -876,7 +890,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
getI32Imm(31), getI32Imm(31) };
if (!Inv)
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
// Get the specified bit.
SDValue Tmp =
@@ -891,7 +905,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
switch (N->getOpcode()) {
@@ -972,8 +986,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
break;
}
- case ISD::SETCC:
- return SelectSETCC(N);
+ case ISD::SETCC: {
+ SDNode *SN = SelectSETCC(N);
+ if (SN)
+ return SN;
+ break;
+ }
case PPCISD::GlobalBaseReg:
return getGlobalBaseReg();
@@ -1069,7 +1087,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Offset, Base, Chain };
return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0),
- PPCLowering.getPointerTy(),
+ PPCLowering->getPointerTy(),
MVT::Other, Ops);
} else {
unsigned Opcode;
@@ -1104,7 +1122,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Base, Offset, Chain };
return CurDAG->getMachineNode(Opcode, dl, LD->getValueType(0),
- PPCLowering.getPointerTy(),
+ PPCLowering->getPointerTy(),
MVT::Other, Ops);
}
}
@@ -1119,7 +1137,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
isRotateAndMask(N->getOperand(0).getNode(), Imm, false, SH, MB, ME)) {
SDValue Val = N->getOperand(0).getOperand(0);
SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
// If this is just a masked value where the input is not handled above, and
// is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
@@ -1128,20 +1146,34 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
N->getOperand(0).getOpcode() != ISD::ROTL) {
SDValue Val = N->getOperand(0);
SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
// If this is a 64-bit zero-extension mask, emit rldicl.
if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) &&
isMask_64(Imm64)) {
SDValue Val = N->getOperand(0);
MB = 64 - CountTrailingOnes_64(Imm64);
- SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB) };
- return CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops, 3);
+ SH = 0;
+
+ // If the operand is a logical right shift, we can fold it into this
+ // instruction: rldicl(rldicl(x, 64-n, n), 0, mb) -> rldicl(x, 64-n, mb)
+ // for n <= mb. The right shift is really a left rotate followed by a
+ // mask, and this mask is a more-restrictive sub-mask of the mask implied
+ // by the shift.
+ if (Val.getOpcode() == ISD::SRL &&
+ isInt32Immediate(Val.getOperand(1).getNode(), Imm) && Imm <= MB) {
+ assert(Imm < 64 && "Illegal shift amount");
+ Val = Val.getOperand(0);
+ SH = 64 - Imm;
+ }
+
+ SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB) };
+ return CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops);
}
// AND X, 0 -> 0, not "rlwinm 32".
if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) {
ReplaceUses(SDValue(N, 0), N->getOperand(1));
- return NULL;
+ return nullptr;
}
// ISD::OR doesn't get all the bitfield insertion fun.
// (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) is a bitfield insert
@@ -1174,7 +1206,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
// Other cases are autogenerated.
@@ -1186,17 +1218,45 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
// Other cases are autogenerated.
break;
}
+ // FIXME: Remove this once the ANDI glue bug is fixed:
+ case PPCISD::ANDIo_1_EQ_BIT:
+ case PPCISD::ANDIo_1_GT_BIT: {
+ if (!ANDIGlueBug)
+ break;
+
+ EVT InVT = N->getOperand(0).getValueType();
+ assert((InVT == MVT::i64 || InVT == MVT::i32) &&
+ "Invalid input type for ANDIo_1_EQ_BIT");
+
+ unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDIo8 : PPC::ANDIo;
+ SDValue AndI(CurDAG->getMachineNode(Opcode, dl, InVT, MVT::Glue,
+ N->getOperand(0),
+ CurDAG->getTargetConstant(1, InVT)), 0);
+ SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32);
+ SDValue SRIdxVal =
+ CurDAG->getTargetConstant(N->getOpcode() == PPCISD::ANDIo_1_EQ_BIT ?
+ PPC::sub_eq : PPC::sub_gt, MVT::i32);
+
+ return CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1,
+ CR0Reg, SRIdxVal,
+ SDValue(AndI.getNode(), 1) /* glue */);
+ }
case ISD::SELECT_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
EVT PtrVT = CurDAG->getTargetLoweringInfo().getPointerTy();
bool isPPC64 = (PtrVT == MVT::i64);
+ // If this is a select of i1 operands, we'll pattern match it.
+ if (PPCSubTarget->useCRBits() &&
+ N->getOperand(0).getValueType() == MVT::i1)
+ break;
+
// Handle the setcc cases here. select_cc lhs, 0, 1, 0, cc
if (!isPPC64)
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
@@ -1215,6 +1275,36 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl);
+
+ if (N->getValueType(0) == MVT::i1) {
+ // An i1 select is: (c & t) | (!c & f).
+ bool Inv;
+ unsigned Idx = getCRIdxForSetCC(CC, Inv);
+
+ unsigned SRI;
+ switch (Idx) {
+ default: llvm_unreachable("Invalid CC index");
+ case 0: SRI = PPC::sub_lt; break;
+ case 1: SRI = PPC::sub_gt; break;
+ case 2: SRI = PPC::sub_eq; break;
+ case 3: SRI = PPC::sub_un; break;
+ }
+
+ SDValue CCBit = CurDAG->getTargetExtractSubreg(SRI, dl, MVT::i1, CCReg);
+
+ SDValue NotCCBit(CurDAG->getMachineNode(PPC::CRNOR, dl, MVT::i1,
+ CCBit, CCBit), 0);
+ SDValue C = Inv ? NotCCBit : CCBit,
+ NotC = Inv ? CCBit : NotCCBit;
+
+ SDValue CAndT(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1,
+ C, N->getOperand(2)), 0);
+ SDValue NotCAndF(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1,
+ NotC, N->getOperand(3)), 0);
+
+ return CurDAG->SelectNodeTo(N, PPC::CROR, MVT::i1, CAndT, NotCAndF);
+ }
+
unsigned BROpc = getPredicateForSetCC(CC);
unsigned SelectCCOp;
@@ -1231,16 +1321,60 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3),
getI32Imm(BROpc) };
- return CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops, 4);
+ return CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops);
}
+ case ISD::VSELECT:
+ if (PPCSubTarget->hasVSX()) {
+ SDValue Ops[] = { N->getOperand(2), N->getOperand(1), N->getOperand(0) };
+ return CurDAG->SelectNodeTo(N, PPC::XXSEL, N->getValueType(0), Ops);
+ }
+
+ break;
+ case ISD::VECTOR_SHUFFLE:
+ if (PPCSubTarget->hasVSX() && (N->getValueType(0) == MVT::v2f64 ||
+ N->getValueType(0) == MVT::v2i64)) {
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
+
+ SDValue Op1 = N->getOperand(SVN->getMaskElt(0) < 2 ? 0 : 1),
+ Op2 = N->getOperand(SVN->getMaskElt(1) < 2 ? 0 : 1);
+ unsigned DM[2];
+
+ for (int i = 0; i < 2; ++i)
+ if (SVN->getMaskElt(i) <= 0 || SVN->getMaskElt(i) == 2)
+ DM[i] = 0;
+ else
+ DM[i] = 1;
+
+ SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), MVT::i32);
+
+ if (Op1 == Op2 && DM[0] == 0 && DM[1] == 0 &&
+ Op1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
+ isa<LoadSDNode>(Op1.getOperand(0))) {
+ LoadSDNode *LD = cast<LoadSDNode>(Op1.getOperand(0));
+ SDValue Base, Offset;
+
+ if (LD->isUnindexed() &&
+ SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) {
+ SDValue Chain = LD->getChain();
+ SDValue Ops[] = { Base, Offset, Chain };
+ return CurDAG->SelectNodeTo(N, PPC::LXVDSX,
+ N->getValueType(0), Ops);
+ }
+ }
+
+ SDValue Ops[] = { Op1, Op2, DMV };
+ return CurDAG->SelectNodeTo(N, PPC::XXPERMDI, N->getValueType(0), Ops);
+ }
+
+ break;
case PPCISD::BDNZ:
case PPCISD::BDZ: {
- bool IsPPC64 = PPCSubTarget.isPPC64();
+ bool IsPPC64 = PPCSubTarget->isPPC64();
SDValue Ops[] = { N->getOperand(1), N->getOperand(0) };
return CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ ?
(IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
(IsPPC64 ? PPC::BDZ8 : PPC::BDZ),
- MVT::Other, Ops, 2);
+ MVT::Other, Ops);
}
case PPCISD::COND_BRANCH: {
// Op #0 is the Chain.
@@ -1253,14 +1387,36 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
getI32Imm(cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3),
N->getOperand(0), N->getOperand(4) };
- return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 5);
+ return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
}
case ISD::BR_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
+ unsigned PCC = getPredicateForSetCC(CC);
+
+ if (N->getOperand(2).getValueType() == MVT::i1) {
+ unsigned Opc;
+ bool Swap;
+ switch (PCC) {
+ default: llvm_unreachable("Unexpected Boolean-operand predicate");
+ case PPC::PRED_LT: Opc = PPC::CRANDC; Swap = true; break;
+ case PPC::PRED_LE: Opc = PPC::CRORC; Swap = true; break;
+ case PPC::PRED_EQ: Opc = PPC::CREQV; Swap = false; break;
+ case PPC::PRED_GE: Opc = PPC::CRORC; Swap = false; break;
+ case PPC::PRED_GT: Opc = PPC::CRANDC; Swap = false; break;
+ case PPC::PRED_NE: Opc = PPC::CRXOR; Swap = false; break;
+ }
+
+ SDValue BitComp(CurDAG->getMachineNode(Opc, dl, MVT::i1,
+ N->getOperand(Swap ? 3 : 2),
+ N->getOperand(Swap ? 2 : 3)), 0);
+ return CurDAG->SelectNodeTo(N, PPC::BC, MVT::Other,
+ BitComp, N->getOperand(4), N->getOperand(0));
+ }
+
SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
- SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode,
+ SDValue Ops[] = { getI32Imm(PCC), CondCode,
N->getOperand(4), N->getOperand(0) };
- return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 4);
+ return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
}
case ISD::BRIND: {
// FIXME: Should custom lower this.
@@ -1273,12 +1429,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain);
}
case PPCISD::TOC_ENTRY: {
- if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
+ if (PPCSubTarget->isSVR4ABI() && !PPCSubTarget->isPPC64()) {
SDValue GA = N->getOperand(0);
return CurDAG->getMachineNode(PPC::LWZtoc, dl, MVT::i32, GA,
N->getOperand(1));
- }
- assert (PPCSubTarget.isPPC64() &&
+ }
+ assert (PPCSubTarget->isPPC64() &&
"Only supported for 64-bit ABI and 32-bit SVR4");
// For medium and large code model, we generate two instructions as
@@ -1288,10 +1444,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (CModel != CodeModel::Medium && CModel != CodeModel::Large)
break;
- // The first source operand is a TargetGlobalAddress or a
- // TargetJumpTable. If it is an externally defined symbol, a symbol
- // with common linkage, a function address, or a jump table address,
- // or if we are generating code for large code model, we generate:
+ // The first source operand is a TargetGlobalAddress or a TargetJumpTable.
+ // If it is an externally defined symbol, a symbol with common linkage,
+ // a non-local function address, or a jump table address, or if we are
+ // generating code for large code model, we generate:
// LDtocL(<ga:@sym>, ADDIStocHA(%X2, <ga:@sym>))
// Otherwise we generate:
// ADDItocL(ADDIStocHA(%X2, <ga:@sym>), <ga:@sym>)
@@ -1306,18 +1462,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) {
const GlobalValue *GValue = G->getGlobal();
- const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue);
- const GlobalValue *RealGValue = GAlias ?
- GAlias->resolveAliasedGlobal(false) : GValue;
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue);
- assert((GVar || isa<Function>(RealGValue)) &&
- "Unexpected global value subclass!");
-
- // An external variable is one without an initializer. For these,
- // for variables with common linkage, and for Functions, generate
- // the LDtocL form.
- if (!GVar || !GVar->hasInitializer() || RealGValue->hasCommonLinkage() ||
- RealGValue->hasAvailableExternallyLinkage())
+ if ((GValue->getType()->getElementType()->isFunctionTy() &&
+ (GValue->isDeclaration() || GValue->isWeakForLinker())) ||
+ GValue->isDeclaration() || GValue->hasCommonLinkage() ||
+ GValue->hasAvailableExternallyLinkage())
return CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
SDValue(Tmp, 0));
}
@@ -1327,9 +1475,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
case PPCISD::PPC32_PICGOT: {
// Generate a PIC-safe GOT reference.
- assert(!PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI() &&
+ assert(!PPCSubTarget->isPPC64() && PPCSubTarget->isSVR4ABI() &&
"PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4");
- return CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT, PPCLowering.getPointerTy(), MVT::i32);
+ return CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT, PPCLowering->getPointerTy(), MVT::i32);
}
case PPCISD::VADD_SPLAT: {
// This expands into one of three sequences, depending on whether
@@ -1407,7 +1555,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return SelectCode(N);
}
-/// PostProcessISelDAG - Perform some late peephole optimizations
+/// PostprocessISelDAG - Perform some late peephole optimizations
/// on the DAG representation.
void PPCDAGToDAGISel::PostprocessISelDAG() {
@@ -1415,8 +1563,480 @@ void PPCDAGToDAGISel::PostprocessISelDAG() {
if (TM.getOptLevel() == CodeGenOpt::None)
return;
+ PeepholePPC64();
+ PeepholeCROps();
+}
+
+// Check if all users of this node will become isel where the second operand
+// is the constant zero. If this is so, and if we can negate the condition,
+// then we can flip the true and false operands. This will allow the zero to
+// be folded with the isel so that we don't need to materialize a register
+// containing zero.
+bool PPCDAGToDAGISel::AllUsersSelectZero(SDNode *N) {
+ // If we're not using isel, then this does not matter.
+ if (!PPCSubTarget->hasISEL())
+ return false;
+
+ for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
+ UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (!User->isMachineOpcode())
+ return false;
+ if (User->getMachineOpcode() != PPC::SELECT_I4 &&
+ User->getMachineOpcode() != PPC::SELECT_I8)
+ return false;
+
+ SDNode *Op2 = User->getOperand(2).getNode();
+ if (!Op2->isMachineOpcode())
+ return false;
+
+ if (Op2->getMachineOpcode() != PPC::LI &&
+ Op2->getMachineOpcode() != PPC::LI8)
+ return false;
+
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op2->getOperand(0));
+ if (!C)
+ return false;
+
+ if (!C->isNullValue())
+ return false;
+ }
+
+ return true;
+}
+
+void PPCDAGToDAGISel::SwapAllSelectUsers(SDNode *N) {
+ SmallVector<SDNode *, 4> ToReplace;
+ for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
+ UI != UE; ++UI) {
+ SDNode *User = *UI;
+ assert((User->getMachineOpcode() == PPC::SELECT_I4 ||
+ User->getMachineOpcode() == PPC::SELECT_I8) &&
+ "Must have all select users");
+ ToReplace.push_back(User);
+ }
+
+ for (SmallVector<SDNode *, 4>::iterator UI = ToReplace.begin(),
+ UE = ToReplace.end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ SDNode *ResNode =
+ CurDAG->getMachineNode(User->getMachineOpcode(), SDLoc(User),
+ User->getValueType(0), User->getOperand(0),
+ User->getOperand(2),
+ User->getOperand(1));
+
+ DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
+ DEBUG(User->dump(CurDAG));
+ DEBUG(dbgs() << "\nNew: ");
+ DEBUG(ResNode->dump(CurDAG));
+ DEBUG(dbgs() << "\n");
+
+ ReplaceUses(User, ResNode);
+ }
+}
+
+void PPCDAGToDAGISel::PeepholeCROps() {
+ bool IsModified;
+ do {
+ IsModified = false;
+ for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
+ E = CurDAG->allnodes_end(); I != E; ++I) {
+ MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
+ if (!MachineNode || MachineNode->use_empty())
+ continue;
+ SDNode *ResNode = MachineNode;
+
+ bool Op1Set = false, Op1Unset = false,
+ Op1Not = false,
+ Op2Set = false, Op2Unset = false,
+ Op2Not = false;
+
+ unsigned Opcode = MachineNode->getMachineOpcode();
+ switch (Opcode) {
+ default: break;
+ case PPC::CRAND:
+ case PPC::CRNAND:
+ case PPC::CROR:
+ case PPC::CRXOR:
+ case PPC::CRNOR:
+ case PPC::CREQV:
+ case PPC::CRANDC:
+ case PPC::CRORC: {
+ SDValue Op = MachineNode->getOperand(1);
+ if (Op.isMachineOpcode()) {
+ if (Op.getMachineOpcode() == PPC::CRSET)
+ Op2Set = true;
+ else if (Op.getMachineOpcode() == PPC::CRUNSET)
+ Op2Unset = true;
+ else if (Op.getMachineOpcode() == PPC::CRNOR &&
+ Op.getOperand(0) == Op.getOperand(1))
+ Op2Not = true;
+ }
+ } // fallthrough
+ case PPC::BC:
+ case PPC::BCn:
+ case PPC::SELECT_I4:
+ case PPC::SELECT_I8:
+ case PPC::SELECT_F4:
+ case PPC::SELECT_F8:
+ case PPC::SELECT_VRRC: {
+ SDValue Op = MachineNode->getOperand(0);
+ if (Op.isMachineOpcode()) {
+ if (Op.getMachineOpcode() == PPC::CRSET)
+ Op1Set = true;
+ else if (Op.getMachineOpcode() == PPC::CRUNSET)
+ Op1Unset = true;
+ else if (Op.getMachineOpcode() == PPC::CRNOR &&
+ Op.getOperand(0) == Op.getOperand(1))
+ Op1Not = true;
+ }
+ }
+ break;
+ }
+
+ bool SelectSwap = false;
+ switch (Opcode) {
+ default: break;
+ case PPC::CRAND:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // x & x = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Set)
+ // 1 & y = y
+ ResNode = MachineNode->getOperand(1).getNode();
+ else if (Op2Set)
+ // x & 1 = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Unset || Op2Unset)
+ // x & 0 = 0 & y = 0
+ ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Not)
+ // ~x & y = andc(y, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(0).
+ getOperand(0));
+ else if (Op2Not)
+ // x & ~y = andc(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1).
+ getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1)),
+ SelectSwap = true;
+ break;
+ case PPC::CRNAND:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // nand(x, x) -> nor(x, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(0));
+ else if (Op1Set)
+ // nand(1, y) -> nor(y, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(1));
+ else if (Op2Set)
+ // nand(x, 1) -> nor(x, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(0));
+ else if (Op1Unset || Op2Unset)
+ // nand(x, 0) = nand(0, y) = 1
+ ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Not)
+ // nand(~x, y) = ~(~x & y) = x | ~y = orc(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1));
+ else if (Op2Not)
+ // nand(x, ~y) = ~x | y = orc(y, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1).
+ getOperand(0),
+ MachineNode->getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1)),
+ SelectSwap = true;
+ break;
+ case PPC::CROR:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // x | x = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Set || Op2Set)
+ // x | 1 = 1 | y = 1
+ ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Unset)
+ // 0 | y = y
+ ResNode = MachineNode->getOperand(1).getNode();
+ else if (Op2Unset)
+ // x | 0 = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Not)
+ // ~x | y = orc(y, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(0).
+ getOperand(0));
+ else if (Op2Not)
+ // x | ~y = orc(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1).
+ getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1)),
+ SelectSwap = true;
+ break;
+ case PPC::CRXOR:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // xor(x, x) = 0
+ ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Set)
+ // xor(1, y) -> nor(y, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(1));
+ else if (Op2Set)
+ // xor(x, 1) -> nor(x, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(0));
+ else if (Op1Unset)
+ // xor(0, y) = y
+ ResNode = MachineNode->getOperand(1).getNode();
+ else if (Op2Unset)
+ // xor(x, 0) = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Not)
+ // xor(~x, y) = eqv(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1));
+ else if (Op2Not)
+ // xor(x, ~y) = eqv(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1).
+ getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1)),
+ SelectSwap = true;
+ break;
+ case PPC::CRNOR:
+ if (Op1Set || Op2Set)
+ // nor(1, y) -> 0
+ ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Unset)
+ // nor(0, y) = ~y -> nor(y, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(1));
+ else if (Op2Unset)
+ // nor(x, 0) = ~x
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(0));
+ else if (Op1Not)
+ // nor(~x, y) = andc(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1));
+ else if (Op2Not)
+ // nor(x, ~y) = andc(y, x)
+ ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1).
+ getOperand(0),
+ MachineNode->getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1)),
+ SelectSwap = true;
+ break;
+ case PPC::CREQV:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // eqv(x, x) = 1
+ ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Set)
+ // eqv(1, y) = y
+ ResNode = MachineNode->getOperand(1).getNode();
+ else if (Op2Set)
+ // eqv(x, 1) = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Unset)
+ // eqv(0, y) = ~y -> nor(y, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(1));
+ else if (Op2Unset)
+ // eqv(x, 0) = ~x
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(0));
+ else if (Op1Not)
+ // eqv(~x, y) = xor(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1));
+ else if (Op2Not)
+ // eqv(x, ~y) = xor(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1).
+ getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1)),
+ SelectSwap = true;
+ break;
+ case PPC::CRANDC:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // andc(x, x) = 0
+ ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Set)
+ // andc(1, y) = ~y
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(1));
+ else if (Op1Unset || Op2Set)
+ // andc(0, y) = andc(x, 1) = 0
+ ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op2Unset)
+ // andc(x, 0) = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Not)
+ // andc(~x, y) = ~(x | y) = nor(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1));
+ else if (Op2Not)
+ // andc(x, ~y) = x & y
+ ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1).
+ getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(0)),
+ SelectSwap = true;
+ break;
+ case PPC::CRORC:
+ if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
+ // orc(x, x) = 1
+ ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op1Set || Op2Unset)
+ // orc(1, y) = orc(x, 0) = 1
+ ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
+ MVT::i1);
+ else if (Op2Set)
+ // orc(x, 1) = x
+ ResNode = MachineNode->getOperand(0).getNode();
+ else if (Op1Unset)
+ // orc(0, y) = ~y
+ ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(1));
+ else if (Op1Not)
+ // orc(~x, y) = ~(x & y) = nand(x, y)
+ ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1));
+ else if (Op2Not)
+ // orc(x, ~y) = x | y
+ ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(0),
+ MachineNode->getOperand(1).
+ getOperand(0));
+ else if (AllUsersSelectZero(MachineNode))
+ ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
+ MVT::i1, MachineNode->getOperand(1),
+ MachineNode->getOperand(0)),
+ SelectSwap = true;
+ break;
+ case PPC::SELECT_I4:
+ case PPC::SELECT_I8:
+ case PPC::SELECT_F4:
+ case PPC::SELECT_F8:
+ case PPC::SELECT_VRRC:
+ if (Op1Set)
+ ResNode = MachineNode->getOperand(1).getNode();
+ else if (Op1Unset)
+ ResNode = MachineNode->getOperand(2).getNode();
+ else if (Op1Not)
+ ResNode = CurDAG->getMachineNode(MachineNode->getMachineOpcode(),
+ SDLoc(MachineNode),
+ MachineNode->getValueType(0),
+ MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(2),
+ MachineNode->getOperand(1));
+ break;
+ case PPC::BC:
+ case PPC::BCn:
+ if (Op1Not)
+ ResNode = CurDAG->getMachineNode(Opcode == PPC::BC ? PPC::BCn :
+ PPC::BC,
+ SDLoc(MachineNode),
+ MVT::Other,
+ MachineNode->getOperand(0).
+ getOperand(0),
+ MachineNode->getOperand(1),
+ MachineNode->getOperand(2));
+ // FIXME: Handle Op1Set, Op1Unset here too.
+ break;
+ }
+
+ // If we're inverting this node because it is used only by selects that
+ // we'd like to swap, then swap the selects before the node replacement.
+ if (SelectSwap)
+ SwapAllSelectUsers(MachineNode);
+
+ if (ResNode != MachineNode) {
+ DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
+ DEBUG(MachineNode->dump(CurDAG));
+ DEBUG(dbgs() << "\nNew: ");
+ DEBUG(ResNode->dump(CurDAG));
+ DEBUG(dbgs() << "\n");
+
+ ReplaceUses(MachineNode, ResNode);
+ IsModified = true;
+ }
+ }
+ if (IsModified)
+ CurDAG->RemoveDeadNodes();
+ } while (IsModified);
+}
+
+void PPCDAGToDAGISel::PeepholePPC64() {
// These optimizations are currently supported only for 64-bit SVR4.
- if (PPCSubTarget.isDarwin() || !PPCSubTarget.isPPC64())
+ if (PPCSubTarget->isDarwin() || !PPCSubTarget->isPPC64())
return;
SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
@@ -1574,8 +2194,8 @@ FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM) {
static void initializePassOnce(PassRegistry &Registry) {
const char *Name = "PowerPC DAG->DAG Pattern Instruction Selection";
- PassInfo *PI = new PassInfo(Name, "ppc-codegen", &SelectionDAGISel::ID, 0,
- false, false);
+ PassInfo *PI = new PassInfo(Name, "ppc-codegen", &SelectionDAGISel::ID,
+ nullptr, false, false);
Registry.registerPass(*PI, true);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 52f18d9..1247e86 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -18,6 +18,8 @@
#include "PPCTargetMachine.h"
#include "PPCTargetObjectFile.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -46,20 +48,21 @@ cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hi
static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
-static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
- if (TM.getSubtargetImpl()->isDarwin())
- return new TargetLoweringObjectFileMachO();
+// FIXME: Remove this once the bug has been fixed!
+extern cl::opt<bool> ANDIGlueBug;
- if (TM.getSubtargetImpl()->isSVR4ABI())
- return new PPC64LinuxTargetObjectFile();
+static TargetLoweringObjectFile *createTLOF(const Triple &TT) {
+ // If it isn't a Mach-O file then it's going to be a linux ELF
+ // object file.
+ if (TT.isOSDarwin())
+ return new TargetLoweringObjectFileMachO();
- return new TargetLoweringObjectFileELF();
+ return new PPC64LinuxTargetObjectFile();
}
PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
- : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) {
- const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
-
+ : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))),
+ Subtarget(*TM.getSubtargetImpl()) {
setPow2DivIsCheap();
// Use _setjmp/_longjmp instead of setjmp/longjmp.
@@ -68,7 +71,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
// arguments are at least 4/8 bytes aligned.
- bool isPPC64 = Subtarget->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
setMinStackArgumentAlignment(isPPC64 ? 8:4);
// Set up the register classes.
@@ -94,6 +97,39 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
+ if (Subtarget.useCRBits()) {
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ if (isPPC64 || Subtarget.hasFPCVT()) {
+ setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
+ AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
+ isPPC64 ? MVT::i64 : MVT::i32);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
+ AddPromotedToType (ISD::UINT_TO_FP, MVT::i1,
+ isPPC64 ? MVT::i64 : MVT::i32);
+ } else {
+ setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
+ }
+
+ // PowerPC does not support direct load / store of condition registers
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
+
+ // FIXME: Remove this once the ANDI glue bug is fixed:
+ if (ANDIGlueBug)
+ setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
+
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setTruncStoreAction(MVT::i64, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i32, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i16, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i8, MVT::i1, Expand);
+
+ addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
+ }
+
// This is used in the ppcf128->int sequence. Note it has different semantics
// from FP_ROUND: that rounds to nearest, this rounds to zero.
setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
@@ -139,17 +175,17 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
// If we're enabling GP optimizations, use hardware square root
- if (!Subtarget->hasFSQRT() &&
+ if (!Subtarget.hasFSQRT() &&
!(TM.Options.UnsafeFPMath &&
- Subtarget->hasFRSQRTE() && Subtarget->hasFRE()))
+ Subtarget.hasFRSQRTE() && Subtarget.hasFRE()))
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
- if (!Subtarget->hasFSQRT() &&
+ if (!Subtarget.hasFSQRT() &&
!(TM.Options.UnsafeFPMath &&
- Subtarget->hasFRSQRTES() && Subtarget->hasFRES()))
+ Subtarget.hasFRSQRTES() && Subtarget.hasFRES()))
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
- if (Subtarget->hasFCPSGN()) {
+ if (Subtarget.hasFCPSGN()) {
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
} else {
@@ -157,7 +193,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
}
- if (Subtarget->hasFPRND()) {
+ if (Subtarget.hasFPRND()) {
setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
setOperationAction(ISD::FCEIL, MVT::f64, Legal);
setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
@@ -179,7 +215,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
- if (Subtarget->hasPOPCNTD()) {
+ if (Subtarget.hasPOPCNTD()) {
setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
} else {
@@ -191,21 +227,25 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::ROTR, MVT::i32 , Expand);
setOperationAction(ISD::ROTR, MVT::i64 , Expand);
- // PowerPC does not have Select
- setOperationAction(ISD::SELECT, MVT::i32, Expand);
- setOperationAction(ISD::SELECT, MVT::i64, Expand);
- setOperationAction(ISD::SELECT, MVT::f32, Expand);
- setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ if (!Subtarget.useCRBits()) {
+ // PowerPC does not have Select
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ }
// PowerPC wants to turn select_cc of FP into fsel when possible.
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
// PowerPC wants to optimize integer setcc a bit
- setOperationAction(ISD::SETCC, MVT::i32, Custom);
+ if (!Subtarget.useCRBits())
+ setOperationAction(ISD::SETCC, MVT::i32, Custom);
// PowerPC does not have BRCOND which requires SetCC
- setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+ if (!Subtarget.useCRBits())
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
@@ -256,7 +296,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
- if (Subtarget->isSVR4ABI()) {
+ if (Subtarget.isSVR4ABI()) {
if (isPPC64) {
// VAARG always uses double-word chunks, so promote anything smaller.
setOperationAction(ISD::VAARG, MVT::i1, Promote);
@@ -276,7 +316,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
} else
setOperationAction(ISD::VAARG, MVT::Other, Expand);
- if (Subtarget->isSVR4ABI() && !isPPC64)
+ if (Subtarget.isSVR4ABI() && !isPPC64)
// VACOPY is custom lowered with the 32-bit SVR4 ABI.
setOperationAction(ISD::VACOPY , MVT::Other, Custom);
else
@@ -309,7 +349,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
- if (Subtarget->has64BitSupport()) {
+ if (Subtarget.has64BitSupport()) {
// They also have instructions for converting between i64 and fp.
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
@@ -319,7 +359,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// We cannot do this with Promote because i64 is not a legal type.
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
- if (PPCSubTarget.hasLFIWAX() || Subtarget->isPPC64())
+ if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
} else {
// PowerPC does not have FP_TO_UINT on 32-bit implementations.
@@ -327,8 +367,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
}
// With the instructions enabled under FPCVT, we can do everything.
- if (PPCSubTarget.hasFPCVT()) {
- if (Subtarget->has64BitSupport()) {
+ if (Subtarget.hasFPCVT()) {
+ if (Subtarget.has64BitSupport()) {
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
@@ -341,7 +381,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
}
- if (Subtarget->use64BitRegs()) {
+ if (Subtarget.use64BitRegs()) {
// 64-bit PowerPC implementations can support i64 types directly
addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
// BUILD_PAIR can't be handled natively, and should be expanded to shl/or
@@ -357,7 +397,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
}
- if (Subtarget->hasAltivec()) {
+ if (Subtarget.hasAltivec()) {
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
@@ -413,12 +453,15 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
+ setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
+ setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
@@ -445,7 +488,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::OR , MVT::v4i32, Legal);
setOperationAction(ISD::XOR , MVT::v4i32, Legal);
setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
- setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v4i32,
+ Subtarget.useCRBits() ? Legal : Expand);
setOperationAction(ISD::STORE , MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
@@ -464,7 +508,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
setOperationAction(ISD::FMA, MVT::v4f32, Legal);
- if (TM.Options.UnsafeFPMath) {
+ if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
}
@@ -484,16 +528,83 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// Altivec does not contain unordered floating-point compare instructions
setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
- setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand);
- setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand);
- setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand);
- setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand);
-
setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
+
+ if (Subtarget.hasVSX()) {
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
+ setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
+
+ setOperationAction(ISD::MUL, MVT::v2f64, Legal);
+ setOperationAction(ISD::FMA, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v8i16, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
+
+ // Share the Altivec comparison restrictions.
+ setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
+ setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
+
+ setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
+ setOperationAction(ISD::STORE, MVT::v2f64, Legal);
+
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
+
+ addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
+
+ addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
+ addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
+
+ // VSX v2i64 only supports non-arithmetic operations.
+ setOperationAction(ISD::ADD, MVT::v2i64, Expand);
+ setOperationAction(ISD::SUB, MVT::v2i64, Expand);
+
+ setOperationAction(ISD::SHL, MVT::v2i64, Expand);
+ setOperationAction(ISD::SRA, MVT::v2i64, Expand);
+ setOperationAction(ISD::SRL, MVT::v2i64, Expand);
+
+ setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
+
+ setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
+ AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
+ setOperationAction(ISD::STORE, MVT::v2i64, Promote);
+ AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
+
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
+
+ setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
+
+ // Vector operation legalization checks the result type of
+ // SIGN_EXTEND_INREG, overall legalization checks the inner type.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
+
+ addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
+ }
}
- if (Subtarget->has64BitSupport()) {
+ if (Subtarget.has64BitSupport()) {
setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
}
@@ -507,6 +618,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// Altivec instructions set fields to all zeros or all ones.
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+ if (!isPPC64) {
+ // These libcalls are not available in 32-bit.
+ setLibcallName(RTLIB::SHL_I128, nullptr);
+ setLibcallName(RTLIB::SRL_I128, nullptr);
+ setLibcallName(RTLIB::SRA_I128, nullptr);
+ }
+
if (isPPC64) {
setStackPointerRegisterToSaveRestore(PPC::X1);
setExceptionPointerRegister(PPC::X3);
@@ -522,9 +640,21 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::BR_CC);
+ if (Subtarget.useCRBits())
+ setTargetDAGCombine(ISD::BRCOND);
setTargetDAGCombine(ISD::BSWAP);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
+ setTargetDAGCombine(ISD::SIGN_EXTEND);
+ setTargetDAGCombine(ISD::ZERO_EXTEND);
+ setTargetDAGCombine(ISD::ANY_EXTEND);
+
+ if (Subtarget.useCRBits()) {
+ setTargetDAGCombine(ISD::TRUNCATE);
+ setTargetDAGCombine(ISD::SETCC);
+ setTargetDAGCombine(ISD::SELECT_CC);
+ }
+
// Use reciprocal estimates.
if (TM.Options.UnsafeFPMath) {
setTargetDAGCombine(ISD::FDIV);
@@ -532,7 +662,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
}
// Darwin long double math library functions have $LDBL128 appended.
- if (Subtarget->isDarwin()) {
+ if (Subtarget.isDarwin()) {
setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
@@ -545,18 +675,23 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
}
+ // With 32 condition bits, we don't need to sink (and duplicate) compares
+ // aggressively in CodeGenPrep.
+ if (Subtarget.useCRBits())
+ setHasMultipleConditionRegisters();
+
setMinFunctionAlignment(2);
- if (PPCSubTarget.isDarwin())
+ if (Subtarget.isDarwin())
setPrefFunctionAlignment(4);
- if (isPPC64 && Subtarget->isJITCodeModel())
+ if (isPPC64 && Subtarget.isJITCodeModel())
// Temporary workaround for the inability of PPC64 JIT to handle jump
// tables.
setSupportJumpTables(false);
setInsertFencesForAtomic(true);
- if (Subtarget->enableMachineScheduler())
+ if (Subtarget.enableMachineScheduler())
setSchedulingPreference(Sched::Source);
else
setSchedulingPreference(Sched::Hybrid);
@@ -565,8 +700,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// The Freescale cores does better with aggressive inlining of memcpy and
// friends. Gcc uses same threshold of 128 bytes (= 32 word stores).
- if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc ||
- Subtarget->getDarwinDirective() == PPC::DIR_E5500) {
+ if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
+ Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
MaxStoresPerMemset = 32;
MaxStoresPerMemsetOptSize = 16;
MaxStoresPerMemcpy = 32;
@@ -610,20 +745,20 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
/// function arguments in the caller parameter area.
unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
// Darwin passes everything on 4 byte boundary.
- if (PPCSubTarget.isDarwin())
+ if (Subtarget.isDarwin())
return 4;
// 16byte and wider vectors are passed on 16byte boundary.
// The rest is 8 on PPC64 and 4 on PPC32 boundary.
- unsigned Align = PPCSubTarget.isPPC64() ? 8 : 4;
- if (PPCSubTarget.hasAltivec() || PPCSubTarget.hasQPX())
- getMaxByValAlign(Ty, Align, PPCSubTarget.hasQPX() ? 32 : 16);
+ unsigned Align = Subtarget.isPPC64() ? 8 : 4;
+ if (Subtarget.hasAltivec() || Subtarget.hasQPX())
+ getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
return Align;
}
const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case PPCISD::FSEL: return "PPCISD::FSEL";
case PPCISD::FCFID: return "PPCISD::FCFID";
case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
@@ -637,7 +772,6 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::Hi: return "PPCISD::Hi";
case PPCISD::Lo: return "PPCISD::Lo";
case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
- case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE";
case PPCISD::LOAD: return "PPCISD::LOAD";
case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC";
case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
@@ -689,7 +823,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector())
- return MVT::i32;
+ return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
return VT.changeVectorElementTypeToInteger();
}
@@ -718,15 +852,29 @@ static bool isConstantOrUndef(int Op, int Val) {
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
-bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
- if (!isUnary) {
+/// The ShuffleKind distinguishes between big-endian operations with
+/// two different inputs (0), either-endian operations with two identical
+/// inputs (1), and little-endian operantion with two different inputs (2).
+/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
+bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
+ SelectionDAG &DAG) {
+ if (ShuffleKind == 0) {
+ if (DAG.getTarget().getDataLayout()->isLittleEndian())
+ return false;
for (unsigned i = 0; i != 16; ++i)
- if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
+ if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
return false;
- } else {
+ } else if (ShuffleKind == 2) {
+ if (!DAG.getTarget().getDataLayout()->isLittleEndian())
+ return false;
+ for (unsigned i = 0; i != 16; ++i)
+ if (!isConstantOrUndef(N->getMaskElt(i), i*2))
+ return false;
+ } else if (ShuffleKind == 1) {
+ unsigned j = DAG.getTarget().getDataLayout()->isLittleEndian() ? 0 : 1;
for (unsigned i = 0; i != 8; ++i)
- if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) ||
- !isConstantOrUndef(N->getMaskElt(i+8), i*2+1))
+ if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
+ !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
return false;
}
return true;
@@ -734,18 +882,33 @@ bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
-bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
- if (!isUnary) {
+/// The ShuffleKind distinguishes between big-endian operations with
+/// two different inputs (0), either-endian operations with two identical
+/// inputs (1), and little-endian operantion with two different inputs (2).
+/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
+bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
+ SelectionDAG &DAG) {
+ if (ShuffleKind == 0) {
+ if (DAG.getTarget().getDataLayout()->isLittleEndian())
+ return false;
for (unsigned i = 0; i != 16; i += 2)
if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
!isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
return false;
- } else {
+ } else if (ShuffleKind == 2) {
+ if (!DAG.getTarget().getDataLayout()->isLittleEndian())
+ return false;
+ for (unsigned i = 0; i != 16; i += 2)
+ if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
+ !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
+ return false;
+ } else if (ShuffleKind == 1) {
+ unsigned j = DAG.getTarget().getDataLayout()->isLittleEndian() ? 0 : 2;
for (unsigned i = 0; i != 8; i += 2)
- if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
- !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) ||
- !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) ||
- !isConstantOrUndef(N->getMaskElt(i+9), i*2+3))
+ if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
+ !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
+ !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
+ !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
return false;
}
return true;
@@ -755,8 +918,8 @@ bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
///
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned LHSStart, unsigned RHSStart) {
- assert(N->getValueType(0) == MVT::v16i8 &&
- "PPC only supports shuffles by bytes!");
+ if (N->getValueType(0) != MVT::v16i8)
+ return false;
assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
"Unsupported merge size!");
@@ -772,29 +935,66 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
}
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
-/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
+/// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
+/// The ShuffleKind distinguishes between big-endian merges with two
+/// different inputs (0), either-endian merges with two identical inputs (1),
+/// and little-endian merges with two different inputs (2). For the latter,
+/// the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
- bool isUnary) {
- if (!isUnary)
- return isVMerge(N, UnitSize, 8, 24);
- return isVMerge(N, UnitSize, 8, 8);
+ unsigned ShuffleKind, SelectionDAG &DAG) {
+ if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
+ if (ShuffleKind == 1) // unary
+ return isVMerge(N, UnitSize, 0, 0);
+ else if (ShuffleKind == 2) // swapped
+ return isVMerge(N, UnitSize, 0, 16);
+ else
+ return false;
+ } else {
+ if (ShuffleKind == 1) // unary
+ return isVMerge(N, UnitSize, 8, 8);
+ else if (ShuffleKind == 0) // normal
+ return isVMerge(N, UnitSize, 8, 24);
+ else
+ return false;
+ }
}
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
-/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
+/// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
+/// The ShuffleKind distinguishes between big-endian merges with two
+/// different inputs (0), either-endian merges with two identical inputs (1),
+/// and little-endian merges with two different inputs (2). For the latter,
+/// the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
- bool isUnary) {
- if (!isUnary)
- return isVMerge(N, UnitSize, 0, 16);
- return isVMerge(N, UnitSize, 0, 0);
+ unsigned ShuffleKind, SelectionDAG &DAG) {
+ if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
+ if (ShuffleKind == 1) // unary
+ return isVMerge(N, UnitSize, 8, 8);
+ else if (ShuffleKind == 2) // swapped
+ return isVMerge(N, UnitSize, 8, 24);
+ else
+ return false;
+ } else {
+ if (ShuffleKind == 1) // unary
+ return isVMerge(N, UnitSize, 0, 0);
+ else if (ShuffleKind == 0) // normal
+ return isVMerge(N, UnitSize, 0, 16);
+ else
+ return false;
+ }
}
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
-int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
- assert(N->getValueType(0) == MVT::v16i8 &&
- "PPC only supports shuffles by bytes!");
+/// The ShuffleKind distinguishes between big-endian operations with two
+/// different inputs (0), either-endian operations with two identical inputs
+/// (1), and little-endian operations with two different inputs (2). For the
+/// latter, the input operands are swapped (see PPCInstrAltivec.td).
+int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
+ SelectionDAG &DAG) {
+ if (N->getValueType(0) != MVT::v16i8)
+ return -1;
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
@@ -809,19 +1009,26 @@ int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
// numbered from this value.
unsigned ShiftAmt = SVOp->getMaskElt(i);
if (ShiftAmt < i) return -1;
+
ShiftAmt -= i;
+ bool isLE = DAG.getTarget().getDataLayout()->isLittleEndian();
- if (!isUnary) {
+ if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
// Check the rest of the elements to see if they are consecutive.
for (++i; i != 16; ++i)
if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
return -1;
- } else {
+ } else if (ShuffleKind == 1) {
// Check the rest of the elements to see if they are consecutive.
for (++i; i != 16; ++i)
if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
return -1;
- }
+ } else
+ return -1;
+
+ if (ShuffleKind == 2 && isLE)
+ ShiftAmt = 16 - ShiftAmt;
+
return ShiftAmt;
}
@@ -873,10 +1080,14 @@ bool PPC::isAllNegativeZeroVector(SDNode *N) {
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
-unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
+unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
+ SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
assert(isSplatShuffleMask(SVOp, EltSize));
- return SVOp->getMaskElt(0) / EltSize;
+ if (DAG.getTarget().getDataLayout()->isLittleEndian())
+ return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
+ else
+ return SVOp->getMaskElt(0) / EltSize;
}
/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
@@ -884,7 +1095,7 @@ unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
/// the constant being splatted. The ByteSize field indicates the number of
/// bytes of each element [124] -> [bhw].
SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
- SDValue OpVal(0, 0);
+ SDValue OpVal(nullptr, 0);
// If ByteSize of the splat is bigger than the element size of the
// build_vector, then we have a case where we are checking for a splat where
@@ -903,7 +1114,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
- if (UniquedVals[i&(Multiple-1)].getNode() == 0)
+ if (!UniquedVals[i&(Multiple-1)].getNode())
UniquedVals[i&(Multiple-1)] = N->getOperand(i);
else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
return SDValue(); // no match.
@@ -918,21 +1129,21 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
bool LeadingZero = true;
bool LeadingOnes = true;
for (unsigned i = 0; i != Multiple-1; ++i) {
- if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs.
+ if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
}
// Finally, check the least significant entry.
if (LeadingZero) {
- if (UniquedVals[Multiple-1].getNode() == 0)
+ if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
if (Val < 16)
return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
}
if (LeadingOnes) {
- if (UniquedVals[Multiple-1].getNode() == 0)
+ if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
@@ -945,13 +1156,13 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// Check to see if this buildvec has a single non-undef value in its elements.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
- if (OpVal.getNode() == 0)
+ if (!OpVal.getNode())
OpVal = N->getOperand(i);
else if (OpVal != N->getOperand(i))
return SDValue();
}
- if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def.
+ if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
unsigned ValSizeInBytes = EltSize;
uint64_t Value = 0;
@@ -1000,7 +1211,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
/// sign extension from a 16-bit value. If so, this returns true and the
/// immediate.
static bool isIntS16Immediate(SDNode *N, short &Imm) {
- if (N->getOpcode() != ISD::Constant)
+ if (!isa<ConstantSDNode>(N))
return false;
Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
@@ -1039,12 +1250,12 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
// disjoint.
APInt LHSKnownZero, LHSKnownOne;
APInt RHSKnownZero, RHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0),
- LHSKnownZero, LHSKnownOne);
+ DAG.computeKnownBits(N.getOperand(0),
+ LHSKnownZero, LHSKnownOne);
if (LHSKnownZero.getBoolValue()) {
- DAG.ComputeMaskedBits(N.getOperand(1),
- RHSKnownZero, RHSKnownOne);
+ DAG.computeKnownBits(N.getOperand(1),
+ RHSKnownZero, RHSKnownOne);
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
if (~(LHSKnownZero | RHSKnownZero) == 0) {
@@ -1144,12 +1355,18 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
APInt LHSKnownZero, LHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
+ DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
- Base = N.getOperand(0);
+ if (FrameIndexSDNode *FI =
+ dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
+ Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
+ fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
+ } else {
+ Base = N.getOperand(0);
+ }
Disp = DAG.getTargetConstant(imm, N.getValueType());
return true;
}
@@ -1162,7 +1379,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
short Imm;
if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
- Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
+ Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
CN->getValueType(0));
return true;
}
@@ -1213,7 +1430,7 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
}
// Otherwise, do it the hard way, using R0 as the base register.
- Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
+ Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
N.getValueType());
Index = N;
return true;
@@ -1304,11 +1521,14 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
/// GetLabelAccessInfo - Return true if we should reference labels using a
/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
- unsigned &LoOpFlags, const GlobalValue *GV = 0) {
+ unsigned &LoOpFlags,
+ const GlobalValue *GV = nullptr) {
HiOpFlags = PPCII::MO_HA;
LoOpFlags = PPCII::MO_LO;
+ // Don't use the pic base if not in PIC relocation model.
bool isPIC = TM.getRelocationModel() == Reloc::PIC_;
+
if (isPIC) {
HiOpFlags |= PPCII::MO_PIC_FLAG;
LoOpFlags |= PPCII::MO_PIC_FLAG;
@@ -1356,7 +1576,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
- if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
+ if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA,
DAG.getRegister(PPC::X2, MVT::i64));
@@ -1365,11 +1585,11 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
- if (isPIC && PPCSubTarget.isSVR4ABI()) {
+ if (isPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
PPCII::MO_PIC_FLAG);
SDLoc DL(CP);
- return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i32, GA,
+ return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
}
@@ -1386,7 +1606,7 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
- if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
+ if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA,
DAG.getRegister(PPC::X2, MVT::i64));
@@ -1395,7 +1615,7 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
- if (isPIC && PPCSubTarget.isSVR4ABI()) {
+ if (isPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
PPCII::MO_PIC_FLAG);
SDLoc DL(GA);
@@ -1416,7 +1636,6 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
-
SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
@@ -1433,7 +1652,7 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDLoc dl(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy();
- bool is64bit = PPCSubTarget.isPPC64();
+ bool is64bit = Subtarget.isPPC64();
TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
@@ -1538,7 +1757,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
- if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
+ if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
DAG.getRegister(PPC::X2, MVT::i64));
@@ -1547,7 +1766,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
- if (isPIC && PPCSubTarget.isSVR4ABI()) {
+ if (isPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
GSDN->getOffset(),
PPCII::MO_PIC_FLAG);
@@ -1574,6 +1793,27 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDLoc dl(Op);
+ if (Op.getValueType() == MVT::v2i64) {
+ // When the operands themselves are v2i64 values, we need to do something
+ // special because VSX has no underlying comparison operations for these.
+ if (Op.getOperand(0).getValueType() == MVT::v2i64) {
+ // Equality can be handled by casting to the legal type for Altivec
+ // comparisons, everything else needs to be expanded.
+ if (CC == ISD::SETEQ || CC == ISD::SETNE) {
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
+ DAG.getSetCC(dl, MVT::v4i32,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
+ DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
+ CC));
+ }
+
+ return SDValue();
+ }
+
+ // We handle most of these in the usual way.
+ return Op;
+ }
+
// If we're comparing for equality to zero, expose the fact that this is
// implented as a ctlz/srl pair on ppc, so that the dag combiner can
// fold the new nodes.
@@ -1767,17 +2007,13 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
Entry.Node = Nest; Args.push_back(Entry);
// Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
- TargetLowering::CallLoweringInfo CLI(Chain,
- Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0,
- CallingConv::C,
- /*isTailCall=*/false,
- /*doesNotRet=*/false,
- /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("__trampoline_setup", PtrVT),
- Args, DAG, dl);
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("__trampoline_setup", PtrVT),
+ std::move(Args), 0);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
}
@@ -1898,7 +2134,7 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
- static const uint16_t ArgRegs[] = {
+ static const MCPhysReg ArgRegs[] = {
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
@@ -1925,7 +2161,7 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
- static const uint16_t ArgRegs[] = {
+ static const MCPhysReg ArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
};
@@ -1949,8 +2185,8 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
/// GetFPR - Get the set of FP registers that should be allocated for arguments,
/// on Darwin.
-static const uint16_t *GetFPR() {
- static const uint16_t FPR[] = {
+static const MCPhysReg *GetFPR() {
+ static const MCPhysReg FPR[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
};
@@ -1962,14 +2198,119 @@ static const uint16_t *GetFPR() {
/// the stack.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
unsigned PtrByteSize) {
- unsigned ArgSize = ArgVT.getSizeInBits()/8;
+ unsigned ArgSize = ArgVT.getStoreSize();
if (Flags.isByVal())
ArgSize = Flags.getByValSize();
- ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+
+ // Round up to multiples of the pointer size, except for array members,
+ // which are always packed.
+ if (!Flags.isInConsecutiveRegs())
+ ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
return ArgSize;
}
+/// CalculateStackSlotAlignment - Calculates the alignment of this argument
+/// on the stack.
+static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
+ ISD::ArgFlagsTy Flags,
+ unsigned PtrByteSize) {
+ unsigned Align = PtrByteSize;
+
+ // Altivec parameters are padded to a 16 byte boundary.
+ if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
+ ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
+ ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
+ Align = 16;
+
+ // ByVal parameters are aligned as requested.
+ if (Flags.isByVal()) {
+ unsigned BVAlign = Flags.getByValAlign();
+ if (BVAlign > PtrByteSize) {
+ if (BVAlign % PtrByteSize != 0)
+ llvm_unreachable(
+ "ByVal alignment is not a multiple of the pointer size");
+
+ Align = BVAlign;
+ }
+ }
+
+ // Array members are always packed to their original alignment.
+ if (Flags.isInConsecutiveRegs()) {
+ // If the array member was split into multiple registers, the first
+ // needs to be aligned to the size of the full type. (Except for
+ // ppcf128, which is only aligned as its f64 components.)
+ if (Flags.isSplit() && OrigVT != MVT::ppcf128)
+ Align = OrigVT.getStoreSize();
+ else
+ Align = ArgVT.getStoreSize();
+ }
+
+ return Align;
+}
+
+/// CalculateStackSlotUsed - Return whether this argument will use its
+/// stack slot (instead of being passed in registers). ArgOffset,
+/// AvailableFPRs, and AvailableVRs must hold the current argument
+/// position, and will be updated to account for this argument.
+static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
+ ISD::ArgFlagsTy Flags,
+ unsigned PtrByteSize,
+ unsigned LinkageSize,
+ unsigned ParamAreaSize,
+ unsigned &ArgOffset,
+ unsigned &AvailableFPRs,
+ unsigned &AvailableVRs) {
+ bool UseMemory = false;
+
+ // Respect alignment of argument on the stack.
+ unsigned Align =
+ CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
+ ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
+ // If there's no space left in the argument save area, we must
+ // use memory (this check also catches zero-sized arguments).
+ if (ArgOffset >= LinkageSize + ParamAreaSize)
+ UseMemory = true;
+
+ // Allocate argument on the stack.
+ ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
+ if (Flags.isInConsecutiveRegsLast())
+ ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ // If we overran the argument save area, we must use memory
+ // (this check catches arguments passed partially in memory)
+ if (ArgOffset > LinkageSize + ParamAreaSize)
+ UseMemory = true;
+
+ // However, if the argument is actually passed in an FPR or a VR,
+ // we don't use memory after all.
+ if (!Flags.isByVal()) {
+ if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
+ if (AvailableFPRs > 0) {
+ --AvailableFPRs;
+ return false;
+ }
+ if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
+ ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
+ ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
+ if (AvailableVRs > 0) {
+ --AvailableVRs;
+ return false;
+ }
+ }
+
+ return UseMemory;
+}
+
+/// EnsureStackAlignment - Round stack frame size up from NumBytes to
+/// ensure minimum alignment required for target.
+static unsigned EnsureStackAlignment(const TargetMachine &Target,
+ unsigned NumBytes) {
+ unsigned TargetAlign = Target.getFrameLowering()->getStackAlignment();
+ unsigned AlignMask = TargetAlign - 1;
+ NumBytes = (NumBytes + AlignMask) & ~AlignMask;
+ return NumBytes;
+}
+
SDValue
PPCTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
@@ -1978,8 +2319,8 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
- if (PPCSubTarget.isSVR4ABI()) {
- if (PPCSubTarget.isPPC64())
+ if (Subtarget.isSVR4ABI()) {
+ if (Subtarget.isPPC64())
return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
dl, DAG, InVals);
else
@@ -2045,7 +2386,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
getTargetMachine(), ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack.
- CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
+ unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false, false);
+ CCInfo.AllocateStack(LinkageSize, PtrByteSize);
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
@@ -2060,6 +2402,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
switch (ValVT.getSimpleVT().SimpleTy) {
default:
llvm_unreachable("ValVT not supported by formal arguments Lowering");
+ case MVT::i1:
case MVT::i32:
RC = &PPC::GPRCRegClass;
break;
@@ -2067,7 +2410,10 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
RC = &PPC::F4RCRegClass;
break;
case MVT::f64:
- RC = &PPC::F8RCRegClass;
+ if (Subtarget.hasVSX())
+ RC = &PPC::VSFRCRegClass;
+ else
+ RC = &PPC::F8RCRegClass;
break;
case MVT::v16i8:
case MVT::v8i16:
@@ -2075,18 +2421,26 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
case MVT::v4f32:
RC = &PPC::VRRCRegClass;
break;
+ case MVT::v2f64:
+ case MVT::v2i64:
+ RC = &PPC::VSHRCRegClass;
+ break;
}
// Transform the arguments stored in physical registers into virtual ones.
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
+ ValVT == MVT::i1 ? MVT::i32 : ValVT);
+
+ if (ValVT == MVT::i1)
+ ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
InVals.push_back(ArgValue);
} else {
// Argument stored in memory.
assert(VA.isMemLoc());
- unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8;
+ unsigned ArgSize = VA.getLocVT().getStoreSize();
int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
isImmutable);
@@ -2112,36 +2466,27 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
// Area that is at least reserved in the caller of this function.
unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
+ MinReservedArea = std::max(MinReservedArea, LinkageSize);
// Set the size that is at least reserved in caller of this function. Tail
// call optimized function's reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
-
- MinReservedArea =
- std::max(MinReservedArea,
- PPCFrameLowering::getMinCallFrameSize(false, false));
-
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
- getStackAlignment();
- unsigned AlignMask = TargetAlign-1;
- MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
-
- FI->setMinReservedArea(MinReservedArea);
+ MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
+ FuncInfo->setMinReservedArea(MinReservedArea);
SmallVector<SDValue, 8> MemOps;
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
- static const uint16_t GPArgRegs[] = {
+ static const MCPhysReg GPArgRegs[] = {
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
- static const uint16_t FPArgRegs[] = {
+ static const MCPhysReg FPArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
};
@@ -2203,8 +2548,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
}
if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl,
- MVT::Other, &MemOps[0], MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return Chain;
}
@@ -2222,33 +2566,7 @@ PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
- return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
-}
-
-// Set the size that is at least reserved in caller of this function. Tail
-// call optimized functions' reserved stack space needs to be aligned so that
-// taking the difference between two stack areas will result in an aligned
-// stack.
-void
-PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG,
- unsigned nAltivecParamsAtEnd,
- unsigned MinReservedArea,
- bool isPPC64) const {
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
- // Add the Altivec parameters at the end, if needed.
- if (nAltivecParamsAtEnd) {
- MinReservedArea = ((MinReservedArea+15)/16)*16;
- MinReservedArea += 16*nAltivecParamsAtEnd;
- }
- MinReservedArea =
- std::max(MinReservedArea,
- PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
- unsigned TargetAlign
- = DAG.getMachineFunction().getTarget().getFrameLowering()->
- getStackAlignment();
- unsigned AlignMask = TargetAlign-1;
- MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
- FI->setMinReservedArea(MinReservedArea);
+ return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
}
SDValue
@@ -2261,6 +2579,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
SmallVectorImpl<SDValue> &InVals) const {
// TODO: add description of PPC stack frame format, or at least some docs.
//
+ bool isELFv2ABI = Subtarget.isELFv2ABI();
+ bool isLittleEndian = Subtarget.isLittleEndian();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
@@ -2271,63 +2591,75 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
(CallConv == CallingConv::Fast));
unsigned PtrByteSize = 8;
- unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true);
- // Area that is at least reserved in caller of this function.
- unsigned MinReservedArea = ArgOffset;
+ unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
+ isELFv2ABI);
- static const uint16_t GPR[] = {
+ static const MCPhysReg GPR[] = {
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
- static const uint16_t *FPR = GetFPR();
+ static const MCPhysReg *FPR = GetFPR();
- static const uint16_t VR[] = {
+ static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
+ static const MCPhysReg VSRH[] = {
+ PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
+ PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
+ };
const unsigned Num_GPR_Regs = array_lengthof(GPR);
const unsigned Num_FPR_Regs = 13;
const unsigned Num_VR_Regs = array_lengthof(VR);
- unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+ // Do a first pass over the arguments to determine whether the ABI
+ // guarantees that our caller has allocated the parameter save area
+ // on its stack frame. In the ELFv1 ABI, this is always the case;
+ // in the ELFv2 ABI, it is true if this is a vararg function or if
+ // any parameter is located in a stack slot.
+
+ bool HasParameterArea = !isELFv2ABI || isVarArg;
+ unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
+ unsigned NumBytes = LinkageSize;
+ unsigned AvailableFPRs = Num_FPR_Regs;
+ unsigned AvailableVRs = Num_VR_Regs;
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i)
+ if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
+ PtrByteSize, LinkageSize, ParamAreaSize,
+ NumBytes, AvailableFPRs, AvailableVRs))
+ HasParameterArea = true;
// Add DAG nodes to load the arguments or copy them out of registers. On
// entry to a function on PPC, the arguments start after the linkage area,
// although the first ones are often in registers.
+ unsigned ArgOffset = LinkageSize;
+ unsigned GPR_idx, FPR_idx = 0, VR_idx = 0;
SmallVector<SDValue, 8> MemOps;
- unsigned nAltivecParamsAtEnd = 0;
Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
unsigned CurArgIdx = 0;
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
bool needsLoad = false;
EVT ObjectVT = Ins[ArgNo].VT;
- unsigned ObjSize = ObjectVT.getSizeInBits()/8;
+ EVT OrigVT = Ins[ArgNo].ArgVT;
+ unsigned ObjSize = ObjectVT.getStoreSize();
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
CurArgIdx = Ins[ArgNo].OrigArgIndex;
+ /* Respect alignment of argument on the stack. */
+ unsigned Align =
+ CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
+ ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
unsigned CurArgOffset = ArgOffset;
- // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
- if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
- ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
- if (isVarArg) {
- MinReservedArea = ((MinReservedArea+15)/16)*16;
- MinReservedArea += CalculateStackSlotSize(ObjectVT,
- Flags,
- PtrByteSize);
- } else
- nAltivecParamsAtEnd++;
- } else
- // Calculate min reserved area.
- MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
- Flags,
- PtrByteSize);
+ /* Compute GPR index associated with argument offset. */
+ GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
+ GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
// FIXME the codegen can be much improved in some cases.
// We do not have to keep everything in memory.
@@ -2349,21 +2681,31 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
continue;
}
- unsigned BVAlign = Flags.getByValAlign();
- if (BVAlign > 8) {
- ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign;
- CurArgOffset = ArgOffset;
- }
-
- // All aggregates smaller than 8 bytes must be passed right-justified.
- if (ObjSize < PtrByteSize)
- CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize);
- // The value of the object is its address.
- int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
+ // Create a stack object covering all stack doublewords occupied
+ // by the argument. If the argument is (fully or partially) on
+ // the stack, or if the argument is fully in registers but the
+ // caller has allocated the parameter save anyway, we can refer
+ // directly to the caller's stack frame. Otherwise, create a
+ // local copy in our own frame.
+ int FI;
+ if (HasParameterArea ||
+ ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
+ FI = MFI->CreateFixedObject(ArgSize, ArgOffset, true);
+ else
+ FI = MFI->CreateStackObject(ArgSize, Align, false);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- InVals.push_back(FIN);
- if (ObjSize < 8) {
+ // Handle aggregates smaller than 8 bytes.
+ if (ObjSize < PtrByteSize) {
+ // The value of the object is its address, which differs from the
+ // address of the enclosing doubleword on big-endian systems.
+ SDValue Arg = FIN;
+ if (!isLittleEndian) {
+ SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, PtrVT);
+ Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
+ }
+ InVals.push_back(Arg);
+
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
@@ -2372,25 +2714,19 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
EVT ObjType = (ObjSize == 1 ? MVT::i8 :
(ObjSize == 2 ? MVT::i16 : MVT::i32));
- Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
+ Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
MachinePointerInfo(FuncArg),
ObjType, false, false, 0);
} else {
// For sizes that don't fit a truncating store (3, 5, 6, 7),
// store the whole register as-is to the parameter save area
- // slot. The address of the parameter was already calculated
- // above (InVals.push_back(FIN)) to be the right-justified
- // offset within the slot. For this store, we need a new
- // frame index that points at the beginning of the slot.
- int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
- SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ // slot.
Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo(FuncArg),
false, false, 0);
}
MemOps.push_back(Store);
- ++GPR_idx;
}
// Whether we copied from a register or not, advance the offset
// into the parameter save area by a full doubleword.
@@ -2398,44 +2734,48 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
continue;
}
+ // The value of the object is its address, which is the address of
+ // its first stack doubleword.
+ InVals.push_back(FIN);
+
+ // Store whatever pieces of the object are in registers to memory.
for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
- // Store whatever pieces of the object are in registers
- // to memory. ArgOffset will be the address of the beginning
- // of the object.
- if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg;
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
- int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
- SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(FuncArg, j),
- false, false, 0);
- MemOps.push_back(Store);
- ++GPR_idx;
- ArgOffset += PtrByteSize;
- } else {
- ArgOffset += ArgSize - j;
+ if (GPR_idx == Num_GPR_Regs)
break;
+
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
+ SDValue Addr = FIN;
+ if (j) {
+ SDValue Off = DAG.getConstant(j, PtrVT);
+ Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
}
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
+ MachinePointerInfo(FuncArg, j),
+ false, false, 0);
+ MemOps.push_back(Store);
+ ++GPR_idx;
}
+ ArgOffset += ArgSize;
continue;
}
switch (ObjectVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled argument type!");
+ case MVT::i1:
case MVT::i32:
case MVT::i64:
+ // These can be scalar arguments or elements of an integer array type
+ // passed directly. Clang may use those instead of "byval" aggregate
+ // types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
- if (ObjectVT == MVT::i32)
+ if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
// value to MVT::i64 and then truncate to the correct register size.
ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
-
- ++GPR_idx;
} else {
needsLoad = true;
ArgSize = PtrByteSize;
@@ -2445,63 +2785,76 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
case MVT::f32:
case MVT::f64:
- // Every 8 bytes of argument space consumes one of the GPRs available for
- // argument passing.
- if (GPR_idx != Num_GPR_Regs) {
- ++GPR_idx;
- }
+ // These can be scalar arguments or elements of a float array type
+ // passed directly. The latter are used to implement ELFv2 homogenous
+ // float aggregates.
if (FPR_idx != Num_FPR_Regs) {
unsigned VReg;
if (ObjectVT == MVT::f32)
VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
else
- VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
+ VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() ?
+ &PPC::VSFRCRegClass :
+ &PPC::F8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++FPR_idx;
+ } else if (GPR_idx != Num_GPR_Regs) {
+ // This can only ever happen in the presence of f32 array types,
+ // since otherwise we never run out of FPRs before running out
+ // of GPRs.
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
+
+ if (ObjectVT == MVT::f32) {
+ if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
+ ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
+ DAG.getConstant(32, MVT::i32));
+ ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
+ }
+
+ ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
} else {
needsLoad = true;
- ArgSize = PtrByteSize;
}
- ArgOffset += 8;
+ // When passing an array of floats, the array occupies consecutive
+ // space in the argument area; only round up to the next doubleword
+ // at the end of the array. Otherwise, each float takes 8 bytes.
+ ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
+ ArgOffset += ArgSize;
+ if (Flags.isInConsecutiveRegsLast())
+ ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
break;
case MVT::v4f32:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
- // Note that vector arguments in registers don't reserve stack space,
- // except in varargs functions.
+ case MVT::v2f64:
+ case MVT::v2i64:
+ // These can be scalar arguments or elements of a vector array type
+ // passed directly. The latter are used to implement ELFv2 homogenous
+ // vector aggregates.
if (VR_idx != Num_VR_Regs) {
- unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
+ unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ?
+ MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) :
+ MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
- if (isVarArg) {
- while ((ArgOffset % 16) != 0) {
- ArgOffset += PtrByteSize;
- if (GPR_idx != Num_GPR_Regs)
- GPR_idx++;
- }
- ArgOffset += 16;
- GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
- }
++VR_idx;
} else {
- // Vectors are aligned.
- ArgOffset = ((ArgOffset+15)/16)*16;
- CurArgOffset = ArgOffset;
- ArgOffset += 16;
needsLoad = true;
}
+ ArgOffset += 16;
break;
}
// We need to load the argument to a virtual register if we determined
// above that we ran out of physical registers of the appropriate type.
if (needsLoad) {
- int FI = MFI->CreateFixedObject(ObjSize,
- CurArgOffset + (ArgSize - ObjSize),
- isImmutable);
+ if (ObjSize < ArgSize && !isLittleEndian)
+ CurArgOffset += ArgSize - ObjSize;
+ int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
false, false, false, 0);
@@ -2510,11 +2863,19 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
InVals.push_back(ArgVal);
}
+ // Area that is at least reserved in the caller of this function.
+ unsigned MinReservedArea;
+ if (HasParameterArea)
+ MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
+ else
+ MinReservedArea = LinkageSize;
+
// Set the size that is at least reserved in caller of this function. Tail
// call optimized functions' reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true);
+ MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
+ FuncInfo->setMinReservedArea(MinReservedArea);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
@@ -2528,7 +2889,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// If this function is vararg, store any remaining integer argument regs
// to their spots on the stack so that they may be loaded by deferencing the
// result of va_next.
- for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
+ for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
+ GPR_idx < Num_GPR_Regs; ++GPR_idx) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
@@ -2541,8 +2903,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
}
if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl,
- MVT::Other, &MemOps[0], MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return Chain;
}
@@ -2568,22 +2929,24 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
(CallConv == CallingConv::Fast));
unsigned PtrByteSize = isPPC64 ? 8 : 4;
- unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
+ unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
+ false);
+ unsigned ArgOffset = LinkageSize;
// Area that is at least reserved in caller of this function.
unsigned MinReservedArea = ArgOffset;
- static const uint16_t GPR_32[] = { // 32-bit registers.
+ static const MCPhysReg GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- static const uint16_t GPR_64[] = { // 64-bit registers.
+ static const MCPhysReg GPR_64[] = { // 64-bit registers.
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
- static const uint16_t *FPR = GetFPR();
+ static const MCPhysReg *FPR = GetFPR();
- static const uint16_t VR[] = {
+ static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
@@ -2594,7 +2957,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
- const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
+ const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
// In 32-bit non-varargs functions, the stack space for vectors is after the
// stack space for non-vectors. We do not use this space unless we have
@@ -2621,6 +2984,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
switch(ObjectVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled argument type!");
+ case MVT::i1:
case MVT::i32:
case MVT::f32:
VecArgOffset += 4;
@@ -2744,11 +3108,16 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
switch (ObjectVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled argument type!");
+ case MVT::i1:
case MVT::i32:
if (!isPPC64) {
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+
+ if (ObjectVT == MVT::i1)
+ ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
+
++GPR_idx;
} else {
needsLoad = true;
@@ -2764,7 +3133,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
- if (ObjectVT == MVT::i32)
+ if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
// value to MVT::i64 and then truncate to the correct register size.
ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
@@ -2853,11 +3222,21 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
InVals.push_back(ArgVal);
}
+ // Allow for Altivec parameters at the end, if needed.
+ if (nAltivecParamsAtEnd) {
+ MinReservedArea = ((MinReservedArea+15)/16)*16;
+ MinReservedArea += 16*nAltivecParamsAtEnd;
+ }
+
+ // Area that is at least reserved in the caller of this function.
+ MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
+
// Set the size that is at least reserved in caller of this function. Tail
// call optimized functions' reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
// stack.
- setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64);
+ MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
+ FuncInfo->setMinReservedArea(MinReservedArea);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
@@ -2891,80 +3270,11 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
}
if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl,
- MVT::Other, &MemOps[0], MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
return Chain;
}
-/// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus
-/// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI.
-static unsigned
-CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
- bool isPPC64,
- bool isVarArg,
- unsigned CC,
- const SmallVectorImpl<ISD::OutputArg>
- &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- unsigned &nAltivecParamsAtEnd) {
- // Count how many bytes are to be pushed on the stack, including the linkage
- // area, and parameter passing area. We start with 24/48 bytes, which is
- // prereserved space for [SP][CR][LR][3 x unused].
- unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true);
- unsigned NumOps = Outs.size();
- unsigned PtrByteSize = isPPC64 ? 8 : 4;
-
- // Add up all the space actually used.
- // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
- // they all go in registers, but we must reserve stack space for them for
- // possible use by the caller. In varargs or 64-bit calls, parameters are
- // assigned stack space in order, with padding so Altivec parameters are
- // 16-byte aligned.
- nAltivecParamsAtEnd = 0;
- for (unsigned i = 0; i != NumOps; ++i) {
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- EVT ArgVT = Outs[i].VT;
- // Varargs Altivec parameters are padded to a 16 byte boundary.
- if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
- ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
- if (!isVarArg && !isPPC64) {
- // Non-varargs Altivec parameters go after all the non-Altivec
- // parameters; handle those later so we know how much padding we need.
- nAltivecParamsAtEnd++;
- continue;
- }
- // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
- NumBytes = ((NumBytes+15)/16)*16;
- }
- NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
- }
-
- // Allow for Altivec parameters at the end, if needed.
- if (nAltivecParamsAtEnd) {
- NumBytes = ((NumBytes+15)/16)*16;
- NumBytes += 16*nAltivecParamsAtEnd;
- }
-
- // The prolog code of the callee may store up to 8 GPR argument registers to
- // the stack, allowing va_start to index over them in memory if its varargs.
- // Because we cannot tell if this is needed on the caller side, we have to
- // conservatively assume that it is needed. As such, make sure we have at
- // least enough stack space for the caller to store the 8 GPRs.
- NumBytes = std::max(NumBytes,
- PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
-
- // Tail call needs the stack to be aligned.
- if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().
- getFrameLowering()->getStackAlignment();
- unsigned AlignMask = TargetAlign-1;
- NumBytes = (NumBytes + AlignMask) & ~AlignMask;
- }
-
- return NumBytes;
-}
-
/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
/// adjusted to accommodate the arguments for the tailcall.
static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
@@ -3007,7 +3317,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (Flags.isByVal()) return false;
}
- // Non PIC/GOT tail calls are supported.
+ // Non-PIC/GOT tail calls are supported.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return true;
@@ -3025,12 +3335,12 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
/// 32-bit value is representable in the immediate field of a BxA instruction.
static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
- if (!C) return 0;
+ if (!C) return nullptr;
int Addr = C->getZExtValue();
if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
SignExtend32<26>(Addr) != Addr)
- return 0; // Top 6 bits have to be sext of immediate.
+ return nullptr; // Top 6 bits have to be sext of immediate.
return DAG.getConstant((int)C->getZExtValue() >> 2,
DAG.getTargetLoweringInfo().getPointerTy()).getNode();
@@ -3136,7 +3446,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
SDLoc dl) const {
if (SPDiff) {
// Load the LR and FP stack slot for later adjusting.
- EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
+ EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
LROpOut = getReturnAddrFrameIndex(DAG);
LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
false, false, false, 0);
@@ -3166,8 +3476,8 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
SDLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- false, false, MachinePointerInfo(0),
- MachinePointerInfo(0));
+ false, false, MachinePointerInfo(),
+ MachinePointerInfo());
}
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
@@ -3212,8 +3522,7 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
MemOpChains2, dl);
if (!MemOpChains2.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains2[0], MemOpChains2.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
// Store the return address to the appropriate stack slot.
Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
@@ -3230,10 +3539,11 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall,
SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
- const PPCSubtarget &PPCSubTarget) {
+ const PPCSubtarget &Subtarget) {
- bool isPPC64 = PPCSubTarget.isPPC64();
- bool isSVR4ABI = PPCSubTarget.isSVR4ABI();
+ bool isPPC64 = Subtarget.isPPC64();
+ bool isSVR4ABI = Subtarget.isSVR4ABI();
+ bool isELFv2ABI = Subtarget.isELFv2ABI();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
NodeTys.push_back(MVT::Other); // Returns a chain
@@ -3242,11 +3552,12 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
unsigned CallOpc = PPCISD::CALL;
bool needIndirectCall = true;
- if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
- // If this is an absolute destination address, use the munged value.
- Callee = SDValue(Dest, 0);
- needIndirectCall = false;
- }
+ if (!isSVR4ABI || !isPPC64)
+ if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
+ // If this is an absolute destination address, use the munged value.
+ Callee = SDValue(Dest, 0);
+ needIndirectCall = false;
+ }
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
// XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
@@ -3255,11 +3566,11 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) {
unsigned OpFlags = 0;
if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
- (PPCSubTarget.getTargetTriple().isMacOSX() &&
- PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
+ (Subtarget.getTargetTriple().isMacOSX() &&
+ Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
(G->getGlobal()->isDeclaration() ||
G->getGlobal()->isWeakForLinker())) ||
- (PPCSubTarget.isTargetELF() && !isPPC64 &&
+ (Subtarget.isTargetELF() && !isPPC64 &&
!G->getGlobal()->hasLocalLinkage() &&
DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
// PC-relative references to external symbols should go through $stub,
@@ -3281,9 +3592,11 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
unsigned char OpFlags = 0;
- if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
- (PPCSubTarget.getTargetTriple().isMacOSX() &&
- PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) {
+ if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
+ (Subtarget.getTargetTriple().isMacOSX() &&
+ Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) ||
+ (Subtarget.isTargetELF() && !isPPC64 &&
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_) ) {
// PC-relative references to external symbols should go through $stub,
// unless we're building with the leopard linker or later, which
// automatically synthesizes these stubs.
@@ -3300,7 +3613,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// to do the call, we can't use PPCISD::CALL.
SDValue MTCTROps[] = {Chain, Callee, InFlag};
- if (isSVR4ABI && isPPC64) {
+ if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
// Function pointers in the 64-bit SVR4 ABI do not point to the function
// entry point, but to the function descriptor (the function entry point
// address is part of the function descriptor though).
@@ -3330,8 +3643,8 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// Load the address of the function entry point from the function
// descriptor.
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue);
- SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps,
- InFlag.getNode() ? 3 : 2);
+ SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs,
+ makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
Chain = LoadFuncPtr.getValue(1);
InFlag = LoadFuncPtr.getValue(2);
@@ -3357,8 +3670,10 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// additional register being allocated and an unnecessary move instruction
// being generated.
VTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue TOCOff = DAG.getIntPtrConstant(8);
+ SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain,
- Callee, InFlag);
+ AddTOC, InFlag);
Chain = LoadTOCPtr.getValue(0);
InFlag = LoadTOCPtr.getValue(1);
@@ -3367,8 +3682,8 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
MTCTROps[2] = InFlag;
}
- Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps,
- 2 + (InFlag.getNode() != 0));
+ Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
+ makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
InFlag = Chain.getValue(1);
NodeTys.clear();
@@ -3376,9 +3691,9 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
NodeTys.push_back(MVT::Glue);
Ops.push_back(Chain);
CallOpc = PPCISD::BCTRL;
- Callee.setNode(0);
+ Callee.setNode(nullptr);
// Add use of X11 (holding environment pointer)
- if (isSVR4ABI && isPPC64)
+ if (isSVR4ABI && isPPC64 && !isELFv2ABI)
Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
// Add CTR register as callee so a bctr can be emitted later.
if (isTailCall)
@@ -3400,6 +3715,10 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
+ // Direct calls in the ELFv2 ABI need the TOC register live into the call.
+ if (Callee.getNode() && isELFv2ABI)
+ Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
+
return CallOpc;
}
@@ -3469,14 +3788,16 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals) const {
+
+ bool isELFv2ABI = Subtarget.isELFv2ABI();
std::vector<EVT> NodeTys;
SmallVector<SDValue, 8> Ops;
unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
isTailCall, RegsToPass, Ops, NodeTys,
- PPCSubTarget);
+ Subtarget);
// Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
- if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
+ if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
// When performing tail call optimization the callee pops its arguments off
@@ -3504,7 +3825,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
isa<ConstantSDNode>(Callee)) &&
"Expecting an global address, external symbol, absolute value or register");
- return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size());
+ return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
}
// Add a NOP immediately after the branch instruction when using the 64-bit
@@ -3517,7 +3838,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
// same TOC), the NOP will remain unchanged.
bool needsTOCRestore = false;
- if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) {
+ if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64()) {
if (CallOpc == PPCISD::BCTRL) {
// This is a call through a function pointer.
// Restore the caller TOC from the save area into R2.
@@ -3537,12 +3858,17 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
}
}
- Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
if (needsTOCRestore) {
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag);
+ EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
+ unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
+ SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset);
+ SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
+ Chain = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, AddTOC, InFlag);
InFlag = Chain.getValue(1);
}
@@ -3574,8 +3900,12 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
- if (PPCSubTarget.isSVR4ABI()) {
- if (PPCSubTarget.isPPC64())
+ if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ report_fatal_error("failed to perform tail call elimination on a call "
+ "site marked musttail");
+
+ if (Subtarget.isSVR4ABI()) {
+ if (Subtarget.isPPC64())
return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
isTailCall, Outs, OutVals, Ins,
dl, DAG, InVals);
@@ -3628,7 +3958,8 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
getTargetMachine(), ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack.
- CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
+ CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false, false),
+ PtrByteSize);
if (isVarArg) {
// Handle fixed and variable vector arguments differently.
@@ -3654,7 +3985,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
errs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << "\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
} else {
@@ -3748,6 +4079,9 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
}
if (VA.isRegLoc()) {
+ if (Arg.getValueType() == MVT::i1)
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
+
seenFloatArg |= VA.getLocVT().isFloatingPoint();
// Put argument in a physical register.
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
@@ -3772,8 +4106,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
@@ -3791,7 +4124,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
SDValue Ops[] = { Chain, InFlag };
Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
- dl, VTs, Ops, InFlag.getNode() ? 2 : 1);
+ dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
InFlag = Chain.getValue(1);
}
@@ -3835,6 +4168,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
+ bool isELFv2ABI = Subtarget.isELFv2ABI();
+ bool isLittleEndian = Subtarget.isLittleEndian();
unsigned NumOps = Outs.size();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -3851,16 +4186,44 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CallConv == CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
- unsigned nAltivecParamsAtEnd = 0;
-
// Count how many bytes are to be pushed on the stack, including the linkage
- // area, and parameter passing area. We start with at least 48 bytes, which
- // is reserved space for [SP][CR][LR][3 x unused].
- // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result
- // of this call.
- unsigned NumBytes =
- CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv,
- Outs, OutVals, nAltivecParamsAtEnd);
+ // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
+ // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
+ // area is 32 bytes reserved space for [SP][CR][LR][TOC].
+ unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
+ isELFv2ABI);
+ unsigned NumBytes = LinkageSize;
+
+ // Add up all the space actually used.
+ for (unsigned i = 0; i != NumOps; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ EVT ArgVT = Outs[i].VT;
+ EVT OrigVT = Outs[i].ArgVT;
+
+ /* Respect alignment of argument on the stack. */
+ unsigned Align =
+ CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
+ NumBytes = ((NumBytes + Align - 1) / Align) * Align;
+
+ NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
+ if (Flags.isInConsecutiveRegsLast())
+ NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+ }
+
+ unsigned NumBytesActuallyUsed = NumBytes;
+
+ // The prolog code of the callee may store up to 8 GPR argument registers to
+ // the stack, allowing va_start to index over them in memory if its varargs.
+ // Because we cannot tell if this is needed on the caller side, we have to
+ // conservatively assume that it is needed. As such, make sure we have at
+ // least enough stack space for the caller to store the 8 GPRs.
+ // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area.
+ NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
+
+ // Tail call needs the stack to be aligned.
+ if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ CallConv == CallingConv::Fast)
+ NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
@@ -3892,19 +4255,24 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// memory. Also, if this is a vararg function, floating point operations
// must be stored to our stack, and loaded into integer regs as well, if
// any integer regs are available for argument passing.
- unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true);
- unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
+ unsigned ArgOffset = LinkageSize;
+ unsigned GPR_idx, FPR_idx = 0, VR_idx = 0;
- static const uint16_t GPR[] = {
+ static const MCPhysReg GPR[] = {
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
- static const uint16_t *FPR = GetFPR();
+ static const MCPhysReg *FPR = GetFPR();
- static const uint16_t VR[] = {
+ static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
+ static const MCPhysReg VSRH[] = {
+ PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
+ PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
+ };
+
const unsigned NumGPRs = array_lengthof(GPR);
const unsigned NumFPRs = 13;
const unsigned NumVRs = array_lengthof(VR);
@@ -3916,6 +4284,17 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ EVT ArgVT = Outs[i].VT;
+ EVT OrigVT = Outs[i].ArgVT;
+
+ /* Respect alignment of argument on the stack. */
+ unsigned Align =
+ CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
+ ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
+
+ /* Compute GPR index associated with argument offset. */
+ GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
+ GPR_idx = std::min(GPR_idx, NumGPRs);
// PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it.
@@ -3926,7 +4305,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
// Promote integers to 64-bit values.
- if (Arg.getValueType() == MVT::i32) {
+ if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
// FIXME: Should this use ANY_EXTEND if neither sext nor zext?
unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
@@ -3948,15 +4327,6 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (Size == 0)
continue;
- unsigned BVAlign = Flags.getByValAlign();
- if (BVAlign > 8) {
- if (BVAlign % PtrByteSize != 0)
- llvm_unreachable(
- "ByVal alignment is not a multiple of the pointer size");
-
- ArgOffset = ((ArgOffset+BVAlign-1)/BVAlign)*BVAlign;
- }
-
// All aggregates smaller than 8 bytes must be passed right-justified.
if (Size==1 || Size==2 || Size==4) {
EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
@@ -3965,7 +4335,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
MachinePointerInfo(), VT,
false, false, 0);
MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load));
ArgOffset += PtrByteSize;
continue;
@@ -3973,9 +4343,12 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
}
if (GPR_idx == NumGPRs && Size < 8) {
- SDValue Const = DAG.getConstant(PtrByteSize - Size,
- PtrOff.getValueType());
- SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
+ SDValue AddPtr = PtrOff;
+ if (!isLittleEndian) {
+ SDValue Const = DAG.getConstant(PtrByteSize - Size,
+ PtrOff.getValueType());
+ AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
+ }
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
CallSeqStart,
Flags, DAG, dl);
@@ -4010,8 +4383,11 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// small aggregates, particularly for packed ones.
// FIXME: It would be preferable to use the slot in the
// parameter save area instead of a new local variable.
- SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
- SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
+ SDValue AddPtr = PtrOff;
+ if (!isLittleEndian) {
+ SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
+ AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
+ }
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
CallSeqStart,
Flags, DAG, dl);
@@ -4021,7 +4397,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
MachinePointerInfo(),
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load));
// Done with this argument.
ArgOffset += PtrByteSize;
@@ -4050,10 +4426,14 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
switch (Arg.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unexpected ValueType for argument!");
+ case MVT::i1:
case MVT::i32:
case MVT::i64:
+ // These can be scalar arguments or elements of an integer array type
+ // passed directly. Clang may use those instead of "byval" aggregate
+ // types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != NumGPRs) {
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Arg));
} else {
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, isTailCall, false, MemOpChains,
@@ -4062,40 +4442,70 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
ArgOffset += PtrByteSize;
break;
case MVT::f32:
- case MVT::f64:
- if (FPR_idx != NumFPRs) {
+ case MVT::f64: {
+ // These can be scalar arguments or elements of a float array type
+ // passed directly. The latter are used to implement ELFv2 homogenous
+ // float aggregates.
+
+ // Named arguments go into FPRs first, and once they overflow, the
+ // remaining arguments go into GPRs and then the parameter save area.
+ // Unnamed arguments for vararg functions always go to GPRs and
+ // then the parameter save area. For now, put all arguments to vararg
+ // routines always in both locations (FPR *and* GPR or stack slot).
+ bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
+
+ // First load the argument into the next available FPR.
+ if (FPR_idx != NumFPRs)
RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
- if (isVarArg) {
- // A single float or an aggregate containing only a single float
- // must be passed right-justified in the stack doubleword, and
- // in the GPR, if one is available.
- SDValue StoreOff;
- if (Arg.getSimpleValueType().SimpleTy == MVT::f32) {
- SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
- StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
- } else
- StoreOff = PtrOff;
-
- SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff,
- MachinePointerInfo(), false, false, 0);
- MemOpChains.push_back(Store);
-
- // Float varargs are always shadowed in available integer registers
- if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
- MachinePointerInfo(), false, false,
- false, 0);
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
- }
- } else if (GPR_idx != NumGPRs)
- // If we have any FPRs remaining, we may also have GPRs remaining.
- ++GPR_idx;
+ // Next, load the argument into GPR or stack slot if needed.
+ if (!NeedGPROrStack)
+ ;
+ else if (GPR_idx != NumGPRs) {
+ // In the non-vararg case, this can only ever happen in the
+ // presence of f32 array types, since otherwise we never run
+ // out of FPRs before running out of GPRs.
+ SDValue ArgVal;
+
+ // Double values are always passed in a single GPR.
+ if (Arg.getValueType() != MVT::f32) {
+ ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
+
+ // Non-array float values are extended and passed in a GPR.
+ } else if (!Flags.isInConsecutiveRegs()) {
+ ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
+ ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
+
+ // If we have an array of floats, we collect every odd element
+ // together with its predecessor into one GPR.
+ } else if (ArgOffset % PtrByteSize != 0) {
+ SDValue Lo, Hi;
+ Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
+ Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
+ if (!isLittleEndian)
+ std::swap(Lo, Hi);
+ ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
+
+ // The final element, if even, goes into the first half of a GPR.
+ } else if (Flags.isInConsecutiveRegsLast()) {
+ ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
+ ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
+ if (!isLittleEndian)
+ ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
+ DAG.getConstant(32, MVT::i32));
+
+ // Non-final even elements are skipped; they will be handled
+ // together the with subsequent argument on the next go-around.
+ } else
+ ArgVal = SDValue();
+
+ if (ArgVal.getNode())
+ RegsToPass.push_back(std::make_pair(GPR[GPR_idx], ArgVal));
} else {
// Single-precision floating-point values are mapped to the
// second (rightmost) word of the stack doubleword.
- if (Arg.getValueType() == MVT::f32) {
+ if (Arg.getValueType() == MVT::f32 &&
+ !isLittleEndian && !Flags.isInConsecutiveRegs()) {
SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
}
@@ -4104,27 +4514,32 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
true, isTailCall, false, MemOpChains,
TailCallArguments, dl);
}
- ArgOffset += 8;
+ // When passing an array of floats, the array occupies consecutive
+ // space in the argument area; only round up to the next doubleword
+ // at the end of the array. Otherwise, each float takes 8 bytes.
+ ArgOffset += (Arg.getValueType() == MVT::f32 &&
+ Flags.isInConsecutiveRegs()) ? 4 : 8;
+ if (Flags.isInConsecutiveRegsLast())
+ ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
break;
+ }
case MVT::v4f32:
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
+ case MVT::v2f64:
+ case MVT::v2i64:
+ // These can be scalar arguments or elements of a vector array type
+ // passed directly. The latter are used to implement ELFv2 homogenous
+ // vector aggregates.
+
+ // For a varargs call, named arguments go into VRs or on the stack as
+ // usual; unnamed arguments always go to the stack or the corresponding
+ // GPRs when within range. For now, we always put the value in both
+ // locations (or even all three).
if (isVarArg) {
- // These go aligned on the stack, or in the corresponding R registers
- // when within range. The Darwin PPC ABI doc claims they also go in
- // V registers; in fact gcc does this only for arguments that are
- // prototyped, not for those that match the ... We do it for all
- // arguments, seems to work.
- while (ArgOffset % 16 !=0) {
- ArgOffset += PtrByteSize;
- if (GPR_idx != NumGPRs)
- GPR_idx++;
- }
// We could elide this store in the case where the object fits
// entirely in R registers. Maybe later.
- PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
- DAG.getConstant(ArgOffset, PtrVT));
SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(), false, false, 0);
MemOpChains.push_back(Store);
@@ -4133,7 +4548,13 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
MachinePointerInfo(),
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
+
+ unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
+ Arg.getSimpleValueType() == MVT::v2i64) ?
+ VSRH[VR_idx] : VR[VR_idx];
+ ++VR_idx;
+
+ RegsToPass.push_back(std::make_pair(VReg, Load));
}
ArgOffset += 16;
for (unsigned i=0; i<16; i+=PtrByteSize) {
@@ -4149,43 +4570,49 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
break;
}
- // Non-varargs Altivec params generally go in registers, but have
- // stack space allocated at the end.
+ // Non-varargs Altivec params go into VRs or on the stack.
if (VR_idx != NumVRs) {
- // Doesn't have GPR space allocated.
- RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
+ unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
+ Arg.getSimpleValueType() == MVT::v2i64) ?
+ VSRH[VR_idx] : VR[VR_idx];
+ ++VR_idx;
+
+ RegsToPass.push_back(std::make_pair(VReg, Arg));
} else {
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
true, isTailCall, true, MemOpChains,
TailCallArguments, dl);
- ArgOffset += 16;
}
+ ArgOffset += 16;
break;
}
}
+ assert(NumBytesActuallyUsed == ArgOffset);
+ (void)NumBytesActuallyUsed;
+
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Check if this is an indirect call (MTCTR/BCTRL).
// See PrepareCall() for more information about calls through function
// pointers in the 64-bit SVR4 ABI.
if (!isTailCall &&
!dyn_cast<GlobalAddressSDNode>(Callee) &&
- !dyn_cast<ExternalSymbolSDNode>(Callee) &&
- !isBLACompatibleAddress(Callee, DAG)) {
+ !dyn_cast<ExternalSymbolSDNode>(Callee)) {
// Load r2 into a virtual register and store it to the TOC save area.
SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
// TOC save area offset.
- SDValue PtrOff = DAG.getIntPtrConstant(40);
+ unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
+ SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
false, false, 0);
- // R12 must contain the address of an indirect callee. This does not
- // mean the MTCTR instruction must use R12; it's easier to model this
- // as an extra parameter, so do that.
- RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
+ // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
+ // This does not mean the MTCTR instruction must use R12; it's easier
+ // to model this as an extra parameter, so do that.
+ if (isELFv2ABI)
+ RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
}
// Build a sequence of copy-to-reg nodes chained together with token chain
@@ -4233,15 +4660,56 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallConv == CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
- unsigned nAltivecParamsAtEnd = 0;
-
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused].
- unsigned NumBytes =
- CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv,
- Outs, OutVals,
- nAltivecParamsAtEnd);
+ unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
+ false);
+ unsigned NumBytes = LinkageSize;
+
+ // Add up all the space actually used.
+ // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
+ // they all go in registers, but we must reserve stack space for them for
+ // possible use by the caller. In varargs or 64-bit calls, parameters are
+ // assigned stack space in order, with padding so Altivec parameters are
+ // 16-byte aligned.
+ unsigned nAltivecParamsAtEnd = 0;
+ for (unsigned i = 0; i != NumOps; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ EVT ArgVT = Outs[i].VT;
+ // Varargs Altivec parameters are padded to a 16 byte boundary.
+ if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
+ ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
+ ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
+ if (!isVarArg && !isPPC64) {
+ // Non-varargs Altivec parameters go after all the non-Altivec
+ // parameters; handle those later so we know how much padding we need.
+ nAltivecParamsAtEnd++;
+ continue;
+ }
+ // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
+ NumBytes = ((NumBytes+15)/16)*16;
+ }
+ NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
+ }
+
+ // Allow for Altivec parameters at the end, if needed.
+ if (nAltivecParamsAtEnd) {
+ NumBytes = ((NumBytes+15)/16)*16;
+ NumBytes += 16*nAltivecParamsAtEnd;
+ }
+
+ // The prolog code of the callee may store up to 8 GPR argument registers to
+ // the stack, allowing va_start to index over them in memory if its varargs.
+ // Because we cannot tell if this is needed on the caller side, we have to
+ // conservatively assume that it is needed. As such, make sure we have at
+ // least enough stack space for the caller to store the 8 GPRs.
+ NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
+
+ // Tail call needs the stack to be aligned.
+ if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ CallConv == CallingConv::Fast)
+ NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
@@ -4277,20 +4745,20 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// memory. Also, if this is a vararg function, floating point operations
// must be stored to our stack, and loaded into integer regs as well, if
// any integer regs are available for argument passing.
- unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
+ unsigned ArgOffset = LinkageSize;
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
- static const uint16_t GPR_32[] = { // 32-bit registers.
+ static const MCPhysReg GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- static const uint16_t GPR_64[] = { // 64-bit registers.
+ static const MCPhysReg GPR_64[] = { // 64-bit registers.
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
- static const uint16_t *FPR = GetFPR();
+ static const MCPhysReg *FPR = GetFPR();
- static const uint16_t VR[] = {
+ static const MCPhysReg VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
@@ -4298,7 +4766,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
const unsigned NumFPRs = 13;
const unsigned NumVRs = array_lengthof(VR);
- const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
+ const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
@@ -4381,9 +4849,13 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
switch (Arg.getSimpleValueType().SimpleTy) {
default: llvm_unreachable("Unexpected ValueType for argument!");
+ case MVT::i1:
case MVT::i32:
case MVT::i64:
if (GPR_idx != NumGPRs) {
+ if (Arg.getValueType() == MVT::i1)
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
+
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
} else {
LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
@@ -4524,8 +4996,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// On Darwin, R12 must contain the address of an indirect callee. This does
// not mean the MTCTR instruction must use R12; it's easier to model this as
@@ -4613,8 +5084,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
}
SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
@@ -4652,8 +5122,8 @@ SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
SDValue
PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- bool isPPC64 = PPCSubTarget.isPPC64();
- bool isDarwinABI = PPCSubTarget.isDarwinABI();
+ bool isPPC64 = Subtarget.isPPC64();
+ bool isDarwinABI = Subtarget.isDarwinABI();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Get current frame pointer save index. The users of this index will be
@@ -4676,8 +5146,8 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
SDValue
PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- bool isPPC64 = PPCSubTarget.isPPC64();
- bool isDarwinABI = PPCSubTarget.isDarwinABI();
+ bool isPPC64 = Subtarget.isPPC64();
+ bool isDarwinABI = Subtarget.isDarwinABI();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Get current frame pointer save index. The users of this index will be
@@ -4717,7 +5187,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// Build a DYNALLOC node.
SDValue Ops[3] = { Chain, NegSize, FPSIdx };
SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
- return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3);
+ return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
}
SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
@@ -4735,6 +5205,55 @@ SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
Op.getOperand(0), Op.getOperand(1));
}
+SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ assert(Op.getValueType() == MVT::i1 &&
+ "Custom lowering only for i1 loads");
+
+ // First, load 8 bits into 32 bits, then truncate to 1 bit.
+
+ SDLoc dl(Op);
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
+
+ SDValue Chain = LD->getChain();
+ SDValue BasePtr = LD->getBasePtr();
+ MachineMemOperand *MMO = LD->getMemOperand();
+
+ SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(), Chain,
+ BasePtr, MVT::i8, MMO);
+ SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
+
+ SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ assert(Op.getOperand(1).getValueType() == MVT::i1 &&
+ "Custom lowering only for i1 stores");
+
+ // First, zero extend to 32 bits, then use a truncating store to 8 bits.
+
+ SDLoc dl(Op);
+ StoreSDNode *ST = cast<StoreSDNode>(Op);
+
+ SDValue Chain = ST->getChain();
+ SDValue BasePtr = ST->getBasePtr();
+ SDValue Value = ST->getValue();
+ MachineMemOperand *MMO = ST->getMemOperand();
+
+ Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(), Value);
+ return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
+}
+
+// FIXME: Remove this once the ANDI glue bug is fixed:
+SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
+ assert(Op.getValueType() == MVT::i1 &&
+ "Custom lowering only for i1 results");
+
+ SDLoc DL(Op);
+ return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
+ Op.getOperand(0));
+}
+
/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
/// possible.
SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
@@ -4848,12 +5367,12 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
case MVT::i32:
Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
- (PPCSubTarget.hasFPCVT() ? PPCISD::FCTIWUZ :
+ (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ :
PPCISD::FCTIDZ),
dl, MVT::f64, Src);
break;
case MVT::i64:
- assert((Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()) &&
+ assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
"i64 FP_TO_UINT is supported only with FPCVT");
Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
PPCISD::FCTIDUZ,
@@ -4862,8 +5381,8 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
}
// Convert the FP value to an int value through memory.
- bool i32Stack = Op.getValueType() == MVT::i32 && PPCSubTarget.hasSTFIWX() &&
- (Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT());
+ bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
+ (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI);
@@ -4876,8 +5395,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
- DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops),
- MVT::i32, MMO);
+ DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
} else
Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
MPI, false, false, 0);
@@ -4901,17 +5419,22 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
return SDValue();
- assert((Op.getOpcode() == ISD::SINT_TO_FP || PPCSubTarget.hasFPCVT()) &&
+ if (Op.getOperand(0).getValueType() == MVT::i1)
+ return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
+ DAG.getConstantFP(1.0, Op.getValueType()),
+ DAG.getConstantFP(0.0, Op.getValueType()));
+
+ assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
"UINT_TO_FP is supported only with FPCVT");
// If we have FCFIDS, then use it when converting to single-precision.
// Otherwise, convert to double-precision and then round.
- unsigned FCFOp = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
+ unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
(Op.getOpcode() == ISD::UINT_TO_FP ?
PPCISD::FCFIDUS : PPCISD::FCFIDS) :
(Op.getOpcode() == ISD::UINT_TO_FP ?
PPCISD::FCFIDU : PPCISD::FCFID);
- MVT FCFTy = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
+ MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
MVT::f32 : MVT::f64;
if (Op.getOperand(0).getValueType() == MVT::i64) {
@@ -4927,7 +5450,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// However, if -enable-unsafe-fp-math is in effect, accept double
// rounding to avoid the extra overhead.
if (Op.getValueType() == MVT::f32 &&
- !PPCSubTarget.hasFPCVT() &&
+ !Subtarget.hasFPCVT() &&
!DAG.getTarget().Options.UnsafeFPMath) {
// Twiddle input to make sure the low 11 bits are zero. (If this
@@ -4965,7 +5488,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
- if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT())
+ if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
FP = DAG.getNode(ISD::FP_ROUND, dl,
MVT::f32, FP, DAG.getIntPtrConstant(0));
return FP;
@@ -4982,7 +5505,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue Ld;
- if (PPCSubTarget.hasLFIWAX() || PPCSubTarget.hasFPCVT()) {
+ if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
@@ -4999,9 +5522,9 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
PPCISD::LFIWZX : PPCISD::LFIWAX,
dl, DAG.getVTList(MVT::f64, MVT::Other),
- Ops, 2, MVT::i32, MMO);
+ Ops, MVT::i32, MMO);
} else {
- assert(PPCSubTarget.isPPC64() &&
+ assert(Subtarget.isPPC64() &&
"i32->FP without LFIWAX supported only on PPC64");
int FrameIdx = FrameInfo->CreateStackObject(8, 8, false);
@@ -5023,7 +5546,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// FCFID it and return it.
SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
- if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT())
+ if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
return FP;
}
@@ -5053,14 +5576,13 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
EVT VT = Op.getValueType();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- SDValue MFFSreg, InFlag;
// Save FP Control Word to register
EVT NodeTys[] = {
MVT::f64, // return register
MVT::Glue // unused in this context
};
- SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
+ SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
// Save FP register to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
@@ -5119,7 +5641,7 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
SDValue OutOps[] = { OutLo, OutHi };
- return DAG.getMergeValues(OutOps, 2, dl);
+ return DAG.getMergeValues(OutOps, dl);
}
SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
@@ -5148,7 +5670,7 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
SDValue OutOps[] = { OutLo, OutHi };
- return DAG.getMergeValues(OutOps, 2, dl);
+ return DAG.getMergeValues(OutOps, dl);
}
SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
@@ -5177,7 +5699,7 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
Tmp4, Tmp6, ISD::SETLE);
SDValue OutOps[] = { OutLo, OutHi };
- return DAG.getMergeValues(OutOps, 2, dl);
+ return DAG.getMergeValues(OutOps, dl);
}
//===----------------------------------------------------------------------===//
@@ -5206,8 +5728,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
SDValue Elt = DAG.getConstant(Val, MVT::i32);
SmallVector<SDValue, 8> Ops;
Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
- SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
- &Ops[0], Ops.size());
+ SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
}
@@ -5266,7 +5787,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
- assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
+ assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
// Check if this is a splat of a constant value.
APInt APSplatBits, APSplatUndef;
@@ -5314,10 +5835,14 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// we convert to a pseudo that will be expanded later into one of
// the above forms.
SDValue Elt = DAG.getConstant(SextVal, MVT::i32);
- EVT VT = Op.getValueType();
- int Size = VT == MVT::v16i8 ? 1 : (VT == MVT::v8i16 ? 2 : 4);
- SDValue EltSize = DAG.getConstant(Size, MVT::i32);
- return DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
+ EVT VT = (SplatSize == 1 ? MVT::v16i8 :
+ (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
+ SDValue EltSize = DAG.getConstant(SplatSize, MVT::i32);
+ SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
+ if (VT == Op.getValueType())
+ return RetVal;
+ else
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
}
// If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
@@ -5336,6 +5861,22 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
+ // The remaining cases assume either big endian element order or
+ // a splat-size that equates to the element size of the vector
+ // to be built. An example that doesn't work for little endian is
+ // {0, -1, 0, -1, 0, -1, 0, -1} which has a splat size of 32 bits
+ // and a vector element size of 16 bits. The code below will
+ // produce the vector in big endian element order, which for little
+ // endian is {-1, 0, -1, 0, -1, 0, -1, 0}.
+
+ // For now, just avoid these optimizations in that case.
+ // FIXME: Develop correct optimizations for LE with mismatched
+ // splat and element sizes.
+
+ if (Subtarget.isLittleEndian() &&
+ SplatSize != Op.getValueType().getVectorElementType().getSizeInBits())
+ return SDValue();
+
// Check to see if this is a wide variety of vsplti*, binop self cases.
static const signed char SplatCsts[] = {
-1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
@@ -5504,6 +6045,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SDValue V2 = Op.getOperand(1);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
EVT VT = Op.getValueType();
+ bool isLittleEndian = Subtarget.isLittleEndian();
// Cases that are handled by instructions that take permute immediates
// (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
@@ -5512,15 +6054,15 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
if (PPC::isSplatShuffleMask(SVOp, 1) ||
PPC::isSplatShuffleMask(SVOp, 2) ||
PPC::isSplatShuffleMask(SVOp, 4) ||
- PPC::isVPKUWUMShuffleMask(SVOp, true) ||
- PPC::isVPKUHUMShuffleMask(SVOp, true) ||
- PPC::isVSLDOIShuffleMask(SVOp, true) != -1 ||
- PPC::isVMRGLShuffleMask(SVOp, 1, true) ||
- PPC::isVMRGLShuffleMask(SVOp, 2, true) ||
- PPC::isVMRGLShuffleMask(SVOp, 4, true) ||
- PPC::isVMRGHShuffleMask(SVOp, 1, true) ||
- PPC::isVMRGHShuffleMask(SVOp, 2, true) ||
- PPC::isVMRGHShuffleMask(SVOp, 4, true)) {
+ PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
+ PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
+ PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
+ PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
+ PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
+ PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
+ PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
+ PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
+ PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG)) {
return Op;
}
}
@@ -5528,15 +6070,16 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Altivec has a variety of "shuffle immediates" that take two vector inputs
// and produce a fixed permutation. If any of these match, do not lower to
// VPERM.
- if (PPC::isVPKUWUMShuffleMask(SVOp, false) ||
- PPC::isVPKUHUMShuffleMask(SVOp, false) ||
- PPC::isVSLDOIShuffleMask(SVOp, false) != -1 ||
- PPC::isVMRGLShuffleMask(SVOp, 1, false) ||
- PPC::isVMRGLShuffleMask(SVOp, 2, false) ||
- PPC::isVMRGLShuffleMask(SVOp, 4, false) ||
- PPC::isVMRGHShuffleMask(SVOp, 1, false) ||
- PPC::isVMRGHShuffleMask(SVOp, 2, false) ||
- PPC::isVMRGHShuffleMask(SVOp, 4, false))
+ unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
+ if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
+ PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
+ PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
+ PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
+ PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
+ PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
+ PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
+ PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
+ PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG))
return Op;
// Check to see if this is a shuffle of 4-byte values. If so, we can use our
@@ -5570,7 +6113,9 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// If this shuffle can be expressed as a shuffle of 4-byte elements, use the
// perfect shuffle vector to determine if it is cost effective to do this as
// discrete instructions, or whether we should use a vperm.
- if (isFourElementShuffle) {
+ // For now, we skip this for little endian until such time as we have a
+ // little-endian perfect shuffle table.
+ if (isFourElementShuffle && !isLittleEndian) {
// Compute the index in the perfect shuffle table.
unsigned PFTableIndex =
PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
@@ -5599,6 +6144,11 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
// that it is in input element units, not in bytes. Convert now.
+
+ // For little endian, the order of the input vectors is reversed, and
+ // the permutation mask is complemented with respect to 31. This is
+ // necessary to produce proper semantics with the big-endian-biased vperm
+ // instruction.
EVT EltVT = V1.getValueType().getVectorElementType();
unsigned BytesPerElement = EltVT.getSizeInBits()/8;
@@ -5607,13 +6157,22 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
for (unsigned j = 0; j != BytesPerElement; ++j)
- ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
- MVT::i32));
+ if (isLittleEndian)
+ ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement+j),
+ MVT::i32));
+ else
+ ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
+ MVT::i32));
}
SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
- &ResultMask[0], ResultMask.size());
- return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
+ ResultMask);
+ if (isLittleEndian)
+ return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
+ V2, V1, VPermMask);
+ else
+ return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
+ V1, V2, VPermMask);
}
/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
@@ -5687,7 +6246,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
DAG.getConstant(CompareOpc, MVT::i32)
};
EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
- SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
+ SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
// Now that we have the comparison, emit a copy from the CR to a GPR.
// This is flagged to the above dot comparison.
@@ -5728,6 +6287,30 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return Flags;
}
+SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
+ // instructions), but for smaller types, we need to first extend up to v2i32
+ // before doing going farther.
+ if (Op.getValueType() == MVT::v2i64) {
+ EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ if (ExtVT != MVT::v2i32) {
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
+ Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
+ DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
+ ExtVT.getVectorElementType(), 4)));
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
+ Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
+ DAG.getValueType(MVT::v2i32));
+ }
+
+ return Op;
+ }
+
+ return SDValue();
+}
+
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
@@ -5782,6 +6365,7 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
LHS, RHS, Zero, DAG, dl);
} else if (Op.getValueType() == MVT::v16i8) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
+ bool isLittleEndian = Subtarget.isLittleEndian();
// Multiply the even 8-bit parts, producing 16-bit sums.
SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
@@ -5793,13 +6377,24 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
LHS, RHS, DAG, dl, MVT::v8i16);
OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
- // Merge the results together.
+ // Merge the results together. Because vmuleub and vmuloub are
+ // instructions with a big-endian bias, we must reverse the
+ // element numbering and reverse the meaning of "odd" and "even"
+ // when generating little endian code.
int Ops[16];
for (unsigned i = 0; i != 8; ++i) {
- Ops[i*2 ] = 2*i+1;
- Ops[i*2+1] = 2*i+1+16;
+ if (isLittleEndian) {
+ Ops[i*2 ] = 2*i;
+ Ops[i*2+1] = 2*i+16;
+ } else {
+ Ops[i*2 ] = 2*i+1;
+ Ops[i*2+1] = 2*i+1+16;
+ }
}
- return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
+ if (isLittleEndian)
+ return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
+ else
+ return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
} else {
llvm_unreachable("Unknown mul to lower!");
}
@@ -5819,21 +6414,24 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::VASTART:
- return LowerVASTART(Op, DAG, PPCSubTarget);
+ return LowerVASTART(Op, DAG, Subtarget);
case ISD::VAARG:
- return LowerVAARG(Op, DAG, PPCSubTarget);
+ return LowerVAARG(Op, DAG, Subtarget);
case ISD::VACOPY:
- return LowerVACOPY(Op, DAG, PPCSubTarget);
+ return LowerVACOPY(Op, DAG, Subtarget);
- case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
+ case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget);
case ISD::DYNAMIC_STACKALLOC:
- return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
+ return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
+ case ISD::LOAD: return LowerLOAD(Op, DAG);
+ case ISD::STORE: return LowerSTORE(Op, DAG);
+ case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::FP_TO_UINT:
case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG,
@@ -5852,6 +6450,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
+ case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
// For counter-based loop handling.
@@ -5895,7 +6494,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
EVT VT = N->getValueType(0);
if (VT == MVT::i64) {
- SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget);
+ SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget);
Results.push_back(NewNode);
Results.push_back(NewNode.getValue(1));
@@ -5957,8 +6556,7 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
F->insert(It, loopMBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
@@ -6007,7 +6605,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
// lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
// registers without caring whether they're 32 or 64, but here we're
// doing actual arithmetic on the addresses.
- bool is64bit = PPCSubTarget.isPPC64();
+ bool is64bit = Subtarget.isPPC64();
unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -6026,8 +6624,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
F->insert(It, loopMBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
@@ -6179,7 +6776,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// Note that the structure of the jmp_buf used here is not compatible
@@ -6203,7 +6800,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
unsigned BufReg = MI->getOperand(1).getReg();
- if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) {
+ if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
.addReg(PPC::X2)
.addImm(TOCOffset)
@@ -6216,12 +6813,12 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
unsigned BaseReg;
if (MF->getFunction()->getAttributes().hasAttribute(
AttributeSet::FunctionIndex, Attribute::Naked))
- BaseReg = PPCSubTarget.isPPC64() ? PPC::X1 : PPC::R1;
+ BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
else
- BaseReg = PPCSubTarget.isPPC64() ? PPC::BP8 : PPC::BP;
+ BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
MIB = BuildMI(*thisMBB, MI, DL,
- TII->get(PPCSubTarget.isPPC64() ? PPC::STD : PPC::STW))
+ TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
.addReg(BaseReg)
.addImm(BPOffset)
.addReg(BufReg);
@@ -6245,10 +6842,10 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// mainMBB:
// mainDstReg = 0
MIB = BuildMI(mainMBB, DL,
- TII->get(PPCSubTarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
+ TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
// Store IP
- if (PPCSubTarget.isPPC64()) {
+ if (Subtarget.isPPC64()) {
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
.addReg(LabelReg)
.addImm(LabelOffset)
@@ -6299,7 +6896,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
unsigned BP = (PVT == MVT::i64) ? PPC::X30 :
- (PPCSubTarget.isSVR4ABI() &&
+ (Subtarget.isSVR4ABI() &&
MF->getTarget().getRelocationModel() == Reloc::PIC_ ?
PPC::R29 : PPC::R30);
@@ -6363,7 +6960,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload TOC
- if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) {
+ if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
.addImm(TOCOffset)
.addReg(BufReg);
@@ -6401,10 +6998,16 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
- if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
- MI->getOpcode() == PPC::SELECT_CC_I8)) {
+ if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
+ MI->getOpcode() == PPC::SELECT_CC_I8 ||
+ MI->getOpcode() == PPC::SELECT_I4 ||
+ MI->getOpcode() == PPC::SELECT_I8)) {
SmallVector<MachineOperand, 2> Cond;
- Cond.push_back(MI->getOperand(4));
+ if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
+ MI->getOpcode() == PPC::SELECT_CC_I8)
+ Cond.push_back(MI->getOperand(4));
+ else
+ Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
Cond.push_back(MI->getOperand(1));
DebugLoc dl = MI->getDebugLoc();
@@ -6416,9 +7019,12 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MI->getOpcode() == PPC::SELECT_CC_I8 ||
MI->getOpcode() == PPC::SELECT_CC_F4 ||
MI->getOpcode() == PPC::SELECT_CC_F8 ||
- MI->getOpcode() == PPC::SELECT_CC_VRRC) {
-
-
+ MI->getOpcode() == PPC::SELECT_CC_VRRC ||
+ MI->getOpcode() == PPC::SELECT_I4 ||
+ MI->getOpcode() == PPC::SELECT_I8 ||
+ MI->getOpcode() == PPC::SELECT_F4 ||
+ MI->getOpcode() == PPC::SELECT_F8 ||
+ MI->getOpcode() == PPC::SELECT_VRRC) {
// The incoming instruction knows the destination vreg to set, the
// condition code register to branch on, the true/false values to
// select between, and a branch opcode to use.
@@ -6432,23 +7038,31 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- unsigned SelectPred = MI->getOperand(4).getImm();
DebugLoc dl = MI->getDebugLoc();
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- BuildMI(BB, dl, TII->get(PPC::BCC))
- .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ if (MI->getOpcode() == PPC::SELECT_I4 ||
+ MI->getOpcode() == PPC::SELECT_I8 ||
+ MI->getOpcode() == PPC::SELECT_F4 ||
+ MI->getOpcode() == PPC::SELECT_F8 ||
+ MI->getOpcode() == PPC::SELECT_VRRC) {
+ BuildMI(BB, dl, TII->get(PPC::BC))
+ .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ } else {
+ unsigned SelectPred = MI->getOperand(4).getImm();
+ BuildMI(BB, dl, TII->get(PPC::BCC))
+ .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ }
// copy0MBB:
// %FalseValue = ...
@@ -6504,13 +7118,13 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
- BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC);
+ BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
- BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC);
+ BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
- BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC);
+ BB = EmitAtomicBinary(MI, BB, false, PPC::NAND);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
- BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8);
+ BB = EmitAtomicBinary(MI, BB, true, PPC::NAND8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
@@ -6550,8 +7164,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, midMBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
@@ -6602,7 +7215,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// We must use 64-bit registers for addresses when targeting 64-bit,
// since we're actually doing arithmetic on them. Other registers
// can be 32-bit.
- bool is64bit = PPCSubTarget.isPPC64();
+ bool is64bit = Subtarget.isPPC64();
bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
unsigned dest = MI->getOperand(0).getReg();
@@ -6621,8 +7234,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, midMBB);
F->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
@@ -6771,6 +7383,27 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Restore FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg);
+ } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT ||
+ MI->getOpcode() == PPC::ANDIo_1_GT_BIT ||
+ MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
+ MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) {
+ unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
+ MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ?
+ PPC::ANDIo8 : PPC::ANDIo;
+ bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT ||
+ MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8);
+
+ MachineRegisterInfo &RegInfo = F->getRegInfo();
+ unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
+ &PPC::GPRCRegClass :
+ &PPC::G8RCRegClass);
+
+ DebugLoc dl = MI->getDebugLoc();
+ BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
+ .addReg(MI->getOperand(1).getReg()).addImm(1);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
+ MI->getOperand(0).getReg())
+ .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
} else {
llvm_unreachable("Unexpected instr type to insert");
}
@@ -6790,9 +7423,10 @@ SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op,
EVT VT = Op.getValueType();
- if ((VT == MVT::f32 && PPCSubTarget.hasFRES()) ||
- (VT == MVT::f64 && PPCSubTarget.hasFRE()) ||
- (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) {
+ if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
+ (VT == MVT::f64 && Subtarget.hasFRE()) ||
+ (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
+ (VT == MVT::v2f64 && Subtarget.hasVSX())) {
// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
// For the reciprocal, we need to find the zero of the function:
@@ -6805,7 +7439,7 @@ SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op,
// correct after every iteration. The minimum architected relative
// accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has
// 23 digits and double has 52 digits.
- int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3;
+ int Iterations = Subtarget.hasRecipPrec() ? 1 : 3;
if (VT.getScalarType() == MVT::f64)
++Iterations;
@@ -6852,9 +7486,10 @@ SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op,
EVT VT = Op.getValueType();
- if ((VT == MVT::f32 && PPCSubTarget.hasFRSQRTES()) ||
- (VT == MVT::f64 && PPCSubTarget.hasFRSQRTE()) ||
- (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) {
+ if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
+ (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
+ (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
+ (VT == MVT::v2f64 && Subtarget.hasVSX())) {
// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
// For the reciprocal sqrt, we need to find the zero of the function:
@@ -6867,7 +7502,7 @@ SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op,
// correct after every iteration. The minimum architected relative
// accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has
// 23 digits and double has 52 digits.
- int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3;
+ int Iterations = Subtarget.hasRecipPrec() ? 1 : 3;
if (VT.getScalarType() == MVT::f64)
++Iterations;
@@ -6945,8 +7580,8 @@ static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base,
return true;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- const GlobalValue *GV1 = NULL;
- const GlobalValue *GV2 = NULL;
+ const GlobalValue *GV1 = nullptr;
+ const GlobalValue *GV2 = nullptr;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
@@ -6984,10 +7619,9 @@ static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
if (!Visited.count(ChainLD->getChain().getNode()))
Queue.push_back(ChainLD->getChain().getNode());
} else if (ChainNext->getOpcode() == ISD::TokenFactor) {
- for (SDNode::op_iterator O = ChainNext->op_begin(),
- OE = ChainNext->op_end(); O != OE; ++O)
- if (!Visited.count(O->getNode()))
- Queue.push_back(O->getNode());
+ for (const SDUse &O : ChainNext->ops())
+ if (!Visited.count(O.getNode()))
+ Queue.push_back(O.getNode());
} else
LoadRoots.insert(ChainNext);
}
@@ -7025,6 +7659,534 @@ static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
return false;
}
+SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(N);
+
+ assert(Subtarget.useCRBits() &&
+ "Expecting to be tracking CR bits");
+ // If we're tracking CR bits, we need to be careful that we don't have:
+ // trunc(binary-ops(zext(x), zext(y)))
+ // or
+ // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
+ // such that we're unnecessarily moving things into GPRs when it would be
+ // better to keep them in CR bits.
+
+ // Note that trunc here can be an actual i1 trunc, or can be the effective
+ // truncation that comes from a setcc or select_cc.
+ if (N->getOpcode() == ISD::TRUNCATE &&
+ N->getValueType(0) != MVT::i1)
+ return SDValue();
+
+ if (N->getOperand(0).getValueType() != MVT::i32 &&
+ N->getOperand(0).getValueType() != MVT::i64)
+ return SDValue();
+
+ if (N->getOpcode() == ISD::SETCC ||
+ N->getOpcode() == ISD::SELECT_CC) {
+ // If we're looking at a comparison, then we need to make sure that the
+ // high bits (all except for the first) don't matter the result.
+ ISD::CondCode CC =
+ cast<CondCodeSDNode>(N->getOperand(
+ N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
+ unsigned OpBits = N->getOperand(0).getValueSizeInBits();
+
+ if (ISD::isSignedIntSetCC(CC)) {
+ if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
+ DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
+ return SDValue();
+ } else if (ISD::isUnsignedIntSetCC(CC)) {
+ if (!DAG.MaskedValueIsZero(N->getOperand(0),
+ APInt::getHighBitsSet(OpBits, OpBits-1)) ||
+ !DAG.MaskedValueIsZero(N->getOperand(1),
+ APInt::getHighBitsSet(OpBits, OpBits-1)))
+ return SDValue();
+ } else {
+ // This is neither a signed nor an unsigned comparison, just make sure
+ // that the high bits are equal.
+ APInt Op1Zero, Op1One;
+ APInt Op2Zero, Op2One;
+ DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One);
+ DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One);
+
+ // We don't really care about what is known about the first bit (if
+ // anything), so clear it in all masks prior to comparing them.
+ Op1Zero.clearBit(0); Op1One.clearBit(0);
+ Op2Zero.clearBit(0); Op2One.clearBit(0);
+
+ if (Op1Zero != Op2Zero || Op1One != Op2One)
+ return SDValue();
+ }
+ }
+
+ // We now know that the higher-order bits are irrelevant, we just need to
+ // make sure that all of the intermediate operations are bit operations, and
+ // all inputs are extensions.
+ if (N->getOperand(0).getOpcode() != ISD::AND &&
+ N->getOperand(0).getOpcode() != ISD::OR &&
+ N->getOperand(0).getOpcode() != ISD::XOR &&
+ N->getOperand(0).getOpcode() != ISD::SELECT &&
+ N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
+ N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
+ N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
+ N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
+ N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
+ return SDValue();
+
+ if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
+ N->getOperand(1).getOpcode() != ISD::AND &&
+ N->getOperand(1).getOpcode() != ISD::OR &&
+ N->getOperand(1).getOpcode() != ISD::XOR &&
+ N->getOperand(1).getOpcode() != ISD::SELECT &&
+ N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
+ N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
+ N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
+ N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
+ N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
+ return SDValue();
+
+ SmallVector<SDValue, 4> Inputs;
+ SmallVector<SDValue, 8> BinOps, PromOps;
+ SmallPtrSet<SDNode *, 16> Visited;
+
+ for (unsigned i = 0; i < 2; ++i) {
+ if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
+ N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
+ N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
+ N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
+ isa<ConstantSDNode>(N->getOperand(i)))
+ Inputs.push_back(N->getOperand(i));
+ else
+ BinOps.push_back(N->getOperand(i));
+
+ if (N->getOpcode() == ISD::TRUNCATE)
+ break;
+ }
+
+ // Visit all inputs, collect all binary operations (and, or, xor and
+ // select) that are all fed by extensions.
+ while (!BinOps.empty()) {
+ SDValue BinOp = BinOps.back();
+ BinOps.pop_back();
+
+ if (!Visited.insert(BinOp.getNode()))
+ continue;
+
+ PromOps.push_back(BinOp);
+
+ for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
+ // The condition of the select is not promoted.
+ if (BinOp.getOpcode() == ISD::SELECT && i == 0)
+ continue;
+ if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
+ continue;
+
+ if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
+ BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
+ BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
+ BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
+ isa<ConstantSDNode>(BinOp.getOperand(i))) {
+ Inputs.push_back(BinOp.getOperand(i));
+ } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
+ BinOp.getOperand(i).getOpcode() == ISD::OR ||
+ BinOp.getOperand(i).getOpcode() == ISD::XOR ||
+ BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
+ BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
+ BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
+ BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
+ BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
+ BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
+ BinOps.push_back(BinOp.getOperand(i));
+ } else {
+ // We have an input that is not an extension or another binary
+ // operation; we'll abort this transformation.
+ return SDValue();
+ }
+ }
+ }
+
+ // Make sure that this is a self-contained cluster of operations (which
+ // is not quite the same thing as saying that everything has only one
+ // use).
+ for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
+ if (isa<ConstantSDNode>(Inputs[i]))
+ continue;
+
+ for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
+ UE = Inputs[i].getNode()->use_end();
+ UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User != N && !Visited.count(User))
+ return SDValue();
+
+ // Make sure that we're not going to promote the non-output-value
+ // operand(s) or SELECT or SELECT_CC.
+ // FIXME: Although we could sometimes handle this, and it does occur in
+ // practice that one of the condition inputs to the select is also one of
+ // the outputs, we currently can't deal with this.
+ if (User->getOpcode() == ISD::SELECT) {
+ if (User->getOperand(0) == Inputs[i])
+ return SDValue();
+ } else if (User->getOpcode() == ISD::SELECT_CC) {
+ if (User->getOperand(0) == Inputs[i] ||
+ User->getOperand(1) == Inputs[i])
+ return SDValue();
+ }
+ }
+ }
+
+ for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
+ for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
+ UE = PromOps[i].getNode()->use_end();
+ UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User != N && !Visited.count(User))
+ return SDValue();
+
+ // Make sure that we're not going to promote the non-output-value
+ // operand(s) or SELECT or SELECT_CC.
+ // FIXME: Although we could sometimes handle this, and it does occur in
+ // practice that one of the condition inputs to the select is also one of
+ // the outputs, we currently can't deal with this.
+ if (User->getOpcode() == ISD::SELECT) {
+ if (User->getOperand(0) == PromOps[i])
+ return SDValue();
+ } else if (User->getOpcode() == ISD::SELECT_CC) {
+ if (User->getOperand(0) == PromOps[i] ||
+ User->getOperand(1) == PromOps[i])
+ return SDValue();
+ }
+ }
+ }
+
+ // Replace all inputs with the extension operand.
+ for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
+ // Constants may have users outside the cluster of to-be-promoted nodes,
+ // and so we need to replace those as we do the promotions.
+ if (isa<ConstantSDNode>(Inputs[i]))
+ continue;
+ else
+ DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
+ }
+
+ // Replace all operations (these are all the same, but have a different
+ // (i1) return type). DAG.getNode will validate that the types of
+ // a binary operator match, so go through the list in reverse so that
+ // we've likely promoted both operands first. Any intermediate truncations or
+ // extensions disappear.
+ while (!PromOps.empty()) {
+ SDValue PromOp = PromOps.back();
+ PromOps.pop_back();
+
+ if (PromOp.getOpcode() == ISD::TRUNCATE ||
+ PromOp.getOpcode() == ISD::SIGN_EXTEND ||
+ PromOp.getOpcode() == ISD::ZERO_EXTEND ||
+ PromOp.getOpcode() == ISD::ANY_EXTEND) {
+ if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
+ PromOp.getOperand(0).getValueType() != MVT::i1) {
+ // The operand is not yet ready (see comment below).
+ PromOps.insert(PromOps.begin(), PromOp);
+ continue;
+ }
+
+ SDValue RepValue = PromOp.getOperand(0);
+ if (isa<ConstantSDNode>(RepValue))
+ RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
+
+ DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
+ continue;
+ }
+
+ unsigned C;
+ switch (PromOp.getOpcode()) {
+ default: C = 0; break;
+ case ISD::SELECT: C = 1; break;
+ case ISD::SELECT_CC: C = 2; break;
+ }
+
+ if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
+ PromOp.getOperand(C).getValueType() != MVT::i1) ||
+ (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
+ PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
+ // The to-be-promoted operands of this node have not yet been
+ // promoted (this should be rare because we're going through the
+ // list backward, but if one of the operands has several users in
+ // this cluster of to-be-promoted nodes, it is possible).
+ PromOps.insert(PromOps.begin(), PromOp);
+ continue;
+ }
+
+ SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
+ PromOp.getNode()->op_end());
+
+ // If there are any constant inputs, make sure they're replaced now.
+ for (unsigned i = 0; i < 2; ++i)
+ if (isa<ConstantSDNode>(Ops[C+i]))
+ Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
+
+ DAG.ReplaceAllUsesOfValueWith(PromOp,
+ DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
+ }
+
+ // Now we're left with the initial truncation itself.
+ if (N->getOpcode() == ISD::TRUNCATE)
+ return N->getOperand(0);
+
+ // Otherwise, this is a comparison. The operands to be compared have just
+ // changed type (to i1), but everything else is the same.
+ return SDValue(N, 0);
+}
+
+SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(N);
+
+ // If we're tracking CR bits, we need to be careful that we don't have:
+ // zext(binary-ops(trunc(x), trunc(y)))
+ // or
+ // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
+ // such that we're unnecessarily moving things into CR bits that can more
+ // efficiently stay in GPRs. Note that if we're not certain that the high
+ // bits are set as required by the final extension, we still may need to do
+ // some masking to get the proper behavior.
+
+ // This same functionality is important on PPC64 when dealing with
+ // 32-to-64-bit extensions; these occur often when 32-bit values are used as
+ // the return values of functions. Because it is so similar, it is handled
+ // here as well.
+
+ if (N->getValueType(0) != MVT::i32 &&
+ N->getValueType(0) != MVT::i64)
+ return SDValue();
+
+ if (!((N->getOperand(0).getValueType() == MVT::i1 &&
+ Subtarget.useCRBits()) ||
+ (N->getOperand(0).getValueType() == MVT::i32 &&
+ Subtarget.isPPC64())))
+ return SDValue();
+
+ if (N->getOperand(0).getOpcode() != ISD::AND &&
+ N->getOperand(0).getOpcode() != ISD::OR &&
+ N->getOperand(0).getOpcode() != ISD::XOR &&
+ N->getOperand(0).getOpcode() != ISD::SELECT &&
+ N->getOperand(0).getOpcode() != ISD::SELECT_CC)
+ return SDValue();
+
+ SmallVector<SDValue, 4> Inputs;
+ SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
+ SmallPtrSet<SDNode *, 16> Visited;
+
+ // Visit all inputs, collect all binary operations (and, or, xor and
+ // select) that are all fed by truncations.
+ while (!BinOps.empty()) {
+ SDValue BinOp = BinOps.back();
+ BinOps.pop_back();
+
+ if (!Visited.insert(BinOp.getNode()))
+ continue;
+
+ PromOps.push_back(BinOp);
+
+ for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
+ // The condition of the select is not promoted.
+ if (BinOp.getOpcode() == ISD::SELECT && i == 0)
+ continue;
+ if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
+ continue;
+
+ if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
+ isa<ConstantSDNode>(BinOp.getOperand(i))) {
+ Inputs.push_back(BinOp.getOperand(i));
+ } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
+ BinOp.getOperand(i).getOpcode() == ISD::OR ||
+ BinOp.getOperand(i).getOpcode() == ISD::XOR ||
+ BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
+ BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
+ BinOps.push_back(BinOp.getOperand(i));
+ } else {
+ // We have an input that is not a truncation or another binary
+ // operation; we'll abort this transformation.
+ return SDValue();
+ }
+ }
+ }
+
+ // Make sure that this is a self-contained cluster of operations (which
+ // is not quite the same thing as saying that everything has only one
+ // use).
+ for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
+ if (isa<ConstantSDNode>(Inputs[i]))
+ continue;
+
+ for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
+ UE = Inputs[i].getNode()->use_end();
+ UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User != N && !Visited.count(User))
+ return SDValue();
+
+ // Make sure that we're not going to promote the non-output-value
+ // operand(s) or SELECT or SELECT_CC.
+ // FIXME: Although we could sometimes handle this, and it does occur in
+ // practice that one of the condition inputs to the select is also one of
+ // the outputs, we currently can't deal with this.
+ if (User->getOpcode() == ISD::SELECT) {
+ if (User->getOperand(0) == Inputs[i])
+ return SDValue();
+ } else if (User->getOpcode() == ISD::SELECT_CC) {
+ if (User->getOperand(0) == Inputs[i] ||
+ User->getOperand(1) == Inputs[i])
+ return SDValue();
+ }
+ }
+ }
+
+ for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
+ for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
+ UE = PromOps[i].getNode()->use_end();
+ UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User != N && !Visited.count(User))
+ return SDValue();
+
+ // Make sure that we're not going to promote the non-output-value
+ // operand(s) or SELECT or SELECT_CC.
+ // FIXME: Although we could sometimes handle this, and it does occur in
+ // practice that one of the condition inputs to the select is also one of
+ // the outputs, we currently can't deal with this.
+ if (User->getOpcode() == ISD::SELECT) {
+ if (User->getOperand(0) == PromOps[i])
+ return SDValue();
+ } else if (User->getOpcode() == ISD::SELECT_CC) {
+ if (User->getOperand(0) == PromOps[i] ||
+ User->getOperand(1) == PromOps[i])
+ return SDValue();
+ }
+ }
+ }
+
+ unsigned PromBits = N->getOperand(0).getValueSizeInBits();
+ bool ReallyNeedsExt = false;
+ if (N->getOpcode() != ISD::ANY_EXTEND) {
+ // If all of the inputs are not already sign/zero extended, then
+ // we'll still need to do that at the end.
+ for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
+ if (isa<ConstantSDNode>(Inputs[i]))
+ continue;
+
+ unsigned OpBits =
+ Inputs[i].getOperand(0).getValueSizeInBits();
+ assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
+
+ if ((N->getOpcode() == ISD::ZERO_EXTEND &&
+ !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
+ APInt::getHighBitsSet(OpBits,
+ OpBits-PromBits))) ||
+ (N->getOpcode() == ISD::SIGN_EXTEND &&
+ DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
+ (OpBits-(PromBits-1)))) {
+ ReallyNeedsExt = true;
+ break;
+ }
+ }
+ }
+
+ // Replace all inputs, either with the truncation operand, or a
+ // truncation or extension to the final output type.
+ for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
+ // Constant inputs need to be replaced with the to-be-promoted nodes that
+ // use them because they might have users outside of the cluster of
+ // promoted nodes.
+ if (isa<ConstantSDNode>(Inputs[i]))
+ continue;
+
+ SDValue InSrc = Inputs[i].getOperand(0);
+ if (Inputs[i].getValueType() == N->getValueType(0))
+ DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
+ else if (N->getOpcode() == ISD::SIGN_EXTEND)
+ DAG.ReplaceAllUsesOfValueWith(Inputs[i],
+ DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
+ else if (N->getOpcode() == ISD::ZERO_EXTEND)
+ DAG.ReplaceAllUsesOfValueWith(Inputs[i],
+ DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
+ else
+ DAG.ReplaceAllUsesOfValueWith(Inputs[i],
+ DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
+ }
+
+ // Replace all operations (these are all the same, but have a different
+ // (promoted) return type). DAG.getNode will validate that the types of
+ // a binary operator match, so go through the list in reverse so that
+ // we've likely promoted both operands first.
+ while (!PromOps.empty()) {
+ SDValue PromOp = PromOps.back();
+ PromOps.pop_back();
+
+ unsigned C;
+ switch (PromOp.getOpcode()) {
+ default: C = 0; break;
+ case ISD::SELECT: C = 1; break;
+ case ISD::SELECT_CC: C = 2; break;
+ }
+
+ if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
+ PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
+ (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
+ PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
+ // The to-be-promoted operands of this node have not yet been
+ // promoted (this should be rare because we're going through the
+ // list backward, but if one of the operands has several users in
+ // this cluster of to-be-promoted nodes, it is possible).
+ PromOps.insert(PromOps.begin(), PromOp);
+ continue;
+ }
+
+ SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
+ PromOp.getNode()->op_end());
+
+ // If this node has constant inputs, then they'll need to be promoted here.
+ for (unsigned i = 0; i < 2; ++i) {
+ if (!isa<ConstantSDNode>(Ops[C+i]))
+ continue;
+ if (Ops[C+i].getValueType() == N->getValueType(0))
+ continue;
+
+ if (N->getOpcode() == ISD::SIGN_EXTEND)
+ Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
+ else if (N->getOpcode() == ISD::ZERO_EXTEND)
+ Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
+ else
+ Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
+ }
+
+ DAG.ReplaceAllUsesOfValueWith(PromOp,
+ DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
+ }
+
+ // Now we're left with the initial extension itself.
+ if (!ReallyNeedsExt)
+ return N->getOperand(0);
+
+ // To zero extend, just mask off everything except for the first bit (in the
+ // i1 case).
+ if (N->getOpcode() == ISD::ZERO_EXTEND)
+ return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
+ DAG.getConstant(APInt::getLowBitsSet(
+ N->getValueSizeInBits(0), PromBits),
+ N->getValueType(0)));
+
+ assert(N->getOpcode() == ISD::SIGN_EXTEND &&
+ "Invalid extension type");
+ EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0));
+ SDValue ShiftCst =
+ DAG.getConstant(N->getValueSizeInBits(0)-PromBits, ShiftAmountTy);
+ return DAG.getNode(ISD::SRA, dl, N->getValueType(0),
+ DAG.getNode(ISD::SHL, dl, N->getValueType(0),
+ N->getOperand(0), ShiftCst), ShiftCst);
+}
+
SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
const TargetMachine &TM = getTargetMachine();
@@ -7051,6 +8213,14 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
return N->getOperand(0);
}
break;
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ return DAGCombineExtBoolTrunc(N, DCI);
+ case ISD::TRUNCATE:
+ case ISD::SETCC:
+ case ISD::SELECT_CC:
+ return DAGCombineTruncBoolExt(N, DCI);
case ISD::FDIV: {
assert(TM.Options.UnsafeFPMath &&
"Reciprocal estimates require UnsafeFPMath");
@@ -7058,7 +8228,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
if (N->getOperand(1).getOpcode() == ISD::FSQRT) {
SDValue RV =
DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI);
- if (RV.getNode() != 0) {
+ if (RV.getNode()) {
DCI.AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, dl, N->getValueType(0),
N->getOperand(0), RV);
@@ -7068,7 +8238,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
SDValue RV =
DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0),
DCI);
- if (RV.getNode() != 0) {
+ if (RV.getNode()) {
DCI.AddToWorklist(RV.getNode());
RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)),
N->getValueType(0), RV);
@@ -7081,7 +8251,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
SDValue RV =
DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0),
DCI);
- if (RV.getNode() != 0) {
+ if (RV.getNode()) {
DCI.AddToWorklist(RV.getNode());
RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)),
N->getValueType(0), RV,
@@ -7093,7 +8263,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI);
- if (RV.getNode() != 0) {
+ if (RV.getNode()) {
DCI.AddToWorklist(RV.getNode());
return DAG.getNode(ISD::FMUL, dl, N->getValueType(0),
N->getOperand(0), RV);
@@ -7108,12 +8278,12 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// Compute this as 1/(1/sqrt(X)), which is the reciprocal of the
// reciprocal sqrt.
SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI);
- if (RV.getNode() != 0) {
+ if (RV.getNode()) {
DCI.AddToWorklist(RV.getNode());
RV = DAGCombineFastRecip(RV, DCI);
- if (RV.getNode() != 0) {
- // Unfortunately, RV is now NaN if the input was exactly 0. Select out
- // this case and force the answer to 0.
+ if (RV.getNode()) {
+ // Unfortunately, RV is now NaN if the input was exactly 0. Select out
+ // this case and force the answer to 0.
EVT VT = RV.getValueType();
@@ -7189,7 +8359,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
};
Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
- DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops),
+ DAG.getVTList(MVT::Other), Ops,
cast<StoreSDNode>(N)->getMemoryVT(),
cast<StoreSDNode>(N)->getMemOperand());
DCI.AddToWorklist(Val.getNode());
@@ -7216,8 +8386,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
};
return
DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
- Ops, array_lengthof(Ops),
- cast<StoreSDNode>(N)->getMemoryVT(),
+ Ops, cast<StoreSDNode>(N)->getMemoryVT(),
cast<StoreSDNode>(N)->getMemOperand());
}
break;
@@ -7234,6 +8403,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// This is a type-legal unaligned Altivec load.
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
+ bool isLittleEndian = Subtarget.isLittleEndian();
// This implements the loading of unaligned vectors as described in
// the venerable Apple Velocity Engine overview. Specifically:
@@ -7241,25 +8411,28 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
//
// The general idea is to expand a sequence of one or more unaligned
- // loads into a alignment-based permutation-control instruction (lvsl),
- // a series of regular vector loads (which always truncate their
- // input address to an aligned address), and a series of permutations.
- // The results of these permutations are the requested loaded values.
- // The trick is that the last "extra" load is not taken from the address
- // you might suspect (sizeof(vector) bytes after the last requested
- // load), but rather sizeof(vector) - 1 bytes after the last
- // requested vector. The point of this is to avoid a page fault if the
- // base address happend to be aligned. This works because if the base
- // address is aligned, then adding less than a full vector length will
- // cause the last vector in the sequence to be (re)loaded. Otherwise,
- // the next vector will be fetched as you might suspect was necessary.
+ // loads into an alignment-based permutation-control instruction (lvsl
+ // or lvsr), a series of regular vector loads (which always truncate
+ // their input address to an aligned address), and a series of
+ // permutations. The results of these permutations are the requested
+ // loaded values. The trick is that the last "extra" load is not taken
+ // from the address you might suspect (sizeof(vector) bytes after the
+ // last requested load), but rather sizeof(vector) - 1 bytes after the
+ // last requested vector. The point of this is to avoid a page fault if
+ // the base address happened to be aligned. This works because if the
+ // base address is aligned, then adding less than a full vector length
+ // will cause the last vector in the sequence to be (re)loaded.
+ // Otherwise, the next vector will be fetched as you might suspect was
+ // necessary.
// We might be able to reuse the permutation generation from
// a different base address offset from this one by an aligned amount.
// The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
// optimization later.
- SDValue PermCntl = BuildIntrinsicOp(Intrinsic::ppc_altivec_lvsl, Ptr,
- DAG, dl, MVT::v16i8);
+ Intrinsic::ID Intr = (isLittleEndian ?
+ Intrinsic::ppc_altivec_lvsr :
+ Intrinsic::ppc_altivec_lvsl);
+ SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, MVT::v16i8);
// Refine the alignment of the original load (a "new" load created here
// which was identical to the first except for the alignment would be
@@ -7308,8 +8481,18 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
if (ExtraLoad.getValueType() != MVT::v4i32)
ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad);
- SDValue Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm,
- BaseLoad, ExtraLoad, PermCntl, DAG, dl);
+ // Because vperm has a big-endian bias, we must reverse the order
+ // of the input vectors and complement the permute control vector
+ // when generating little endian code. We have already handled the
+ // latter by using lvsr instead of lvsl, so just reverse BaseLoad
+ // and ExtraLoad here.
+ SDValue Perm;
+ if (isLittleEndian)
+ Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm,
+ ExtraLoad, BaseLoad, PermCntl, DAG, dl);
+ else
+ Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm,
+ BaseLoad, ExtraLoad, PermCntl, DAG, dl);
if (VT != MVT::v4i32)
Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm);
@@ -7334,24 +8517,26 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
++UI;
SmallVector<SDValue, 8> Ops;
- for (SDNode::op_iterator O = User->op_begin(),
- OE = User->op_end(); O != OE; ++O) {
- if (*O == Use)
+ for (const SDUse &O : User->ops()) {
+ if (O == Use)
Ops.push_back(To);
else
- Ops.push_back(*O);
+ Ops.push_back(O);
}
- DAG.UpdateNodeOperands(User, Ops.data(), Ops.size());
+ DAG.UpdateNodeOperands(User, Ops);
}
return SDValue(N, 0);
}
}
break;
- case ISD::INTRINSIC_WO_CHAIN:
- if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() ==
- Intrinsic::ppc_altivec_lvsl &&
+ case ISD::INTRINSIC_WO_CHAIN: {
+ bool isLittleEndian = Subtarget.isLittleEndian();
+ Intrinsic::ID Intr = (isLittleEndian ?
+ Intrinsic::ppc_altivec_lvsr :
+ Intrinsic::ppc_altivec_lvsl);
+ if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == Intr &&
N->getOperand(1)->getOpcode() == ISD::ADD) {
SDValue Add = N->getOperand(1);
@@ -7363,8 +8548,8 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
UE = BasePtr->use_end(); UI != UE; ++UI) {
if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
- Intrinsic::ppc_altivec_lvsl) {
- // We've found another LVSL, and this address if an aligned
+ Intr) {
+ // We've found another LVSL/LVSR, and this address is an aligned
// multiple of that one. The results will be the same, so use the
// one we've just found instead.
@@ -7373,6 +8558,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
}
}
+ }
break;
case ISD::BSWAP:
@@ -7395,7 +8581,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
DAG.getVTList(N->getValueType(0) == MVT::i64 ?
MVT::i64 : MVT::i32, MVT::Other),
- Ops, 3, LD->getMemoryVT(), LD->getMemOperand());
+ Ops, LD->getMemoryVT(), LD->getMemOperand());
// If this is an i16 load, insert the truncate.
SDValue ResVal = BSLoad;
@@ -7425,7 +8611,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
!N->getOperand(2).hasOneUse()) {
// Scan all of the users of the LHS, looking for VCMPo's that match.
- SDNode *VCMPoNode = 0;
+ SDNode *VCMPoNode = nullptr;
SDNode *LHSN = N->getOperand(0).getNode();
for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
@@ -7446,9 +8632,9 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// Look at the (necessarily single) use of the flag value. If it has a
// chain, this transformation is more complex. Note that multiple things
// could use the value result, which we should ignore.
- SDNode *FlagUser = 0;
+ SDNode *FlagUser = nullptr;
for (SDNode::use_iterator UI = VCMPoNode->use_begin();
- FlagUser == 0; ++UI) {
+ FlagUser == nullptr; ++UI) {
assert(UI != VCMPoNode->use_end() && "Didn't find user!");
SDNode *User = *UI;
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
@@ -7466,6 +8652,25 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case ISD::BRCOND: {
+ SDValue Cond = N->getOperand(1);
+ SDValue Target = N->getOperand(2);
+
+ if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
+ cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
+ Intrinsic::ppc_is_decremented_ctr_nonzero) {
+
+ // We now need to make the intrinsic dead (it cannot be instruction
+ // selected).
+ DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
+ assert(Cond.getNode()->hasOneUse() &&
+ "Counter decrement has more than one use");
+
+ return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
+ N->getOperand(0), Target);
+ }
+ }
+ break;
case ISD::BR_CC: {
// If this is a branch on an altivec predicate comparison, lower this so
// that we don't have to do a MFOCRF: instead, branch directly on CR6. This
@@ -7534,7 +8739,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAG.getConstant(CompareOpc, MVT::i32)
};
EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
- SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
+ SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
// Unpack the result based on how the target uses it.
PPC::Predicate CompOpc;
@@ -7570,11 +8775,11 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// Inline Assembly Support
//===----------------------------------------------------------------------===//
-void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
@@ -7630,6 +8835,11 @@ PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
// suboptimal.
return C_Memory;
}
+ } else if (Constraint == "wc") { // individual CR bits.
+ return C_RegisterClass;
+ } else if (Constraint == "wa" || Constraint == "wd" ||
+ Constraint == "wf" || Constraint == "ws") {
+ return C_RegisterClass; // VSX registers.
}
return TargetLowering::getConstraintType(Constraint);
}
@@ -7644,10 +8854,21 @@ PPCTargetLowering::getSingleConstraintMatchWeight(
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
+
// Look at the constraint type.
+ if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
+ return CW_Register; // an individual CR bit.
+ else if ((StringRef(constraint) == "wa" ||
+ StringRef(constraint) == "wd" ||
+ StringRef(constraint) == "wf") &&
+ type->isVectorTy())
+ return CW_Register;
+ else if (StringRef(constraint) == "ws" && type->isDoubleTy())
+ return CW_Register;
+
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
@@ -7685,11 +8906,11 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// GCC RS6000 Constraint Letters
switch (Constraint[0]) {
case 'b': // R1-R31
- if (VT == MVT::i64 && PPCSubTarget.isPPC64())
+ if (VT == MVT::i64 && Subtarget.isPPC64())
return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
case 'r': // R0-R31
- if (VT == MVT::i64 && PPCSubTarget.isPPC64())
+ if (VT == MVT::i64 && Subtarget.isPPC64())
return std::make_pair(0U, &PPC::G8RCRegClass);
return std::make_pair(0U, &PPC::GPRCRegClass);
case 'f':
@@ -7703,6 +8924,13 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'y': // crrc
return std::make_pair(0U, &PPC::CRRCRegClass);
}
+ } else if (Constraint == "wc") { // an individual CR bit.
+ return std::make_pair(0U, &PPC::CRBITRCRegClass);
+ } else if (Constraint == "wa" || Constraint == "wd" ||
+ Constraint == "wf") {
+ return std::make_pair(0U, &PPC::VSRCRegClass);
+ } else if (Constraint == "ws") {
+ return std::make_pair(0U, &PPC::VSFRCRegClass);
}
std::pair<unsigned, const TargetRegisterClass*> R =
@@ -7714,7 +8942,7 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// register.
// FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
// the AsmName field from *RegisterInfo.td, then this would not be necessary.
- if (R.first && VT == MVT::i64 && PPCSubTarget.isPPC64() &&
+ if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
PPC::GPRCRegClass.contains(R.first)) {
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
return std::make_pair(TRI->getMatchingSuperReg(R.first,
@@ -7732,7 +8960,7 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
- SDValue Result(0,0);
+ SDValue Result;
// Only support length 1 constraints.
if (Constraint.length() > 1) return;
@@ -7838,6 +9066,9 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
SDLoc dl(Op);
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -7845,8 +9076,8 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
// the stack.
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
FuncInfo->setLRStoreRequired();
- bool isPPC64 = PPCSubTarget.isPPC64();
- bool isDarwinABI = PPCSubTarget.isDarwinABI();
+ bool isPPC64 = Subtarget.isPPC64();
+ bool isDarwinABI = Subtarget.isDarwinABI();
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
@@ -7896,6 +9127,30 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
return FrameAddr;
}
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned PPCTargetLowering::getRegisterByName(const char* RegName,
+ EVT VT) const {
+ bool isPPC64 = Subtarget.isPPC64();
+ bool isDarwinABI = Subtarget.isDarwinABI();
+
+ if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) ||
+ (!isPPC64 && VT != MVT::i32))
+ report_fatal_error("Invalid register global variable type");
+
+ bool is64Bit = isPPC64 && VT == MVT::i64;
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
+ .Case("r2", isDarwinABI ? 0 : (is64Bit ? PPC::X2 : PPC::R2))
+ .Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
+ (is64Bit ? PPC::X13 : PPC::R13))
+ .Default(0);
+
+ if (Reg)
+ return Reg;
+ report_fatal_error("Invalid register name global variable");
+}
+
bool
PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The PowerPC target isn't yet aware of offsets.
@@ -7918,14 +9173,51 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- if (this->PPCSubTarget.isPPC64()) {
+ if (Subtarget.isPPC64()) {
return MVT::i64;
} else {
return MVT::i32;
}
}
+/// \brief Returns true if it is beneficial to convert a load of a constant
+/// to just the constant itself.
+bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0 || BitSize > 64)
+ return false;
+ return true;
+}
+
+bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+ unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
+ unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
+ return NumBits1 == 64 && NumBits2 == 32;
+}
+
+bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
+ if (!VT1.isInteger() || !VT2.isInteger())
+ return false;
+ unsigned NumBits1 = VT1.getSizeInBits();
+ unsigned NumBits2 = VT2.getSizeInBits();
+ return NumBits1 == 64 && NumBits2 == 32;
+}
+
+bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ return isInt<16>(Imm) || isUInt<16>(Imm);
+}
+
+bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
+ return isInt<16>(Imm) || isUInt<16>(Imm);
+}
+
bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned,
bool *Fast) const {
if (DisablePPCUnaligned)
return false;
@@ -7939,8 +9231,14 @@ bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
if (!VT.isSimple())
return false;
- if (VT.getSimpleVT().isVector())
- return false;
+ if (VT.getSimpleVT().isVector()) {
+ if (Subtarget.hasVSX()) {
+ if (VT != MVT::v2f64 && VT != MVT::v2i64)
+ return false;
+ } else {
+ return false;
+ }
+ }
if (VT == MVT::ppcf128)
return false;
@@ -7968,8 +9266,17 @@ bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
return false;
}
+bool
+PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
+ EVT VT , unsigned DefinedValues) const {
+ if (VT == MVT::v2i64)
+ return false;
+
+ return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
+}
+
Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
- if (DisableILPPref || PPCSubTarget.enableMachineScheduler())
+ if (DisableILPPref || Subtarget.enableMachineScheduler())
return TargetLowering::getSchedulingPreference(N);
return Sched::ILP;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 09b20cb..74d3c65 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -18,9 +18,8 @@
#include "PPC.h"
#include "PPCInstrInfo.h"
#include "PPCRegisterInfo.h"
-#include "PPCSubtarget.h"
-#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
namespace llvm {
@@ -71,19 +70,14 @@ namespace llvm {
TOC_ENTRY,
- /// The following three target-specific nodes are used for calls through
+ /// The following two target-specific nodes are used for calls through
/// function pointers in the 64-bit SVR4 ABI.
- /// Restore the TOC from the TOC save area of the current stack frame.
- /// This is basically a hard coded load instruction which additionally
- /// takes/produces a flag.
- TOC_RESTORE,
-
/// Like a regular LOAD but additionally taking/producing a flag.
LOAD,
- /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is
- /// a hard coded load instruction.
+ /// Like LOAD (taking/producing a flag), but using r2 as hard-coded
+ /// destination.
LOAD_TOC,
/// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
@@ -121,6 +115,12 @@ namespace llvm {
/// resultant GPR. Bits corresponding to other CR regs are undefined.
MFOCRF,
+ // FIXME: Remove these once the ANDI glue bug is fixed:
+ /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
+ /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
+ /// implement truncation of i32 or i64 to i1.
+ ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
+
// EH_SJLJ_SETJMP - SjLj exception handling setjmp.
EH_SJLJ_SETJMP,
@@ -177,6 +177,8 @@ namespace llvm {
CR6SET,
CR6UNSET,
+ /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
+ /// on PPC32.
PPC32_GOT,
/// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
@@ -299,25 +301,28 @@ namespace llvm {
namespace PPC {
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
- bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
+ bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
+ SelectionDAG &DAG);
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
- bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
+ bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
+ SelectionDAG &DAG);
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
- bool isUnary);
+ unsigned ShuffleKind, SelectionDAG &DAG);
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
- bool isUnary);
+ unsigned ShuffleKind, SelectionDAG &DAG);
- /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
- /// amount, otherwise return -1.
- int isVSLDOIShuffleMask(SDNode *N, bool isUnary);
+ /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
+ /// shift amount, otherwise return -1.
+ int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
+ SelectionDAG &DAG);
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
@@ -330,7 +335,7 @@ namespace llvm {
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
- unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
+ unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
/// get_VSPLTI_elt - If this is a build_vector of constants which can be
/// formed by using a vspltis[bhw] instruction of the specified element
@@ -339,28 +344,29 @@ namespace llvm {
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
}
+ class PPCSubtarget;
class PPCTargetLowering : public TargetLowering {
- const PPCSubtarget &PPCSubTarget;
+ const PPCSubtarget &Subtarget;
public:
explicit PPCTargetLowering(PPCTargetMachine &TM);
/// getTargetNodeName() - This method returns the name of a target specific
/// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
/// getSetCCResultType - Return the ISD::SETCC ValueType
- virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
- virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
+ bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
+ SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
/// SelectAddressRegReg - Given the specified addressed, check to see if it
/// can be represented as an indexed [r+r] operation. Returns false if it
@@ -380,29 +386,31 @@ namespace llvm {
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
SelectionDAG &DAG) const;
- Sched::Preference getSchedulingPreference(SDNode *N) const;
+ Sched::Preference getSchedulingPreference(SDNode *N) const override;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const override;
+
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
- virtual MachineBasicBlock *
+ MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *MBB) const override;
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB, bool is64Bit,
unsigned BinOpcode) const;
@@ -416,34 +424,58 @@ namespace llvm {
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const;
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
+ AsmOperandInfo &info, const char *constraint) const override;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
- unsigned getByValTypeAlignment(Type *Ty) const;
+ unsigned getByValTypeAlignment(Type *Ty) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
- virtual void LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
+
+ /// isLegalICmpImmediate - Return true if the specified immediate is legal
+ /// icmp immediate, that is the target has icmp instructions which can
+ /// compare a register against the immediate without having to materialize
+ /// the immediate into a register.
+ bool isLegalICmpImmediate(int64_t Imm) const override;
+
+ /// isLegalAddImmediate - Return true if the specified immediate is legal
+ /// add immediate, that is the target has add instructions which can
+ /// add a register and the immediate without having to materialize
+ /// the immediate into a register.
+ bool isLegalAddImmediate(int64_t Imm) const override;
+
+ /// isTruncateFree - Return true if it's free to truncate a value of
+ /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
+ /// register X1 to i32 by referencing its sub-register R1.
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
+
+ /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// to just the constant itself.
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
- virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
@@ -456,25 +488,46 @@ namespace llvm {
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
- virtual EVT
+ EVT
getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
- MachineFunction &MF) const;
+ MachineFunction &MF) const override;
/// Is unaligned memory access allowed for the given type, and is it fast
/// relative to software emulation.
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned AddrSpace,
+ bool *Fast = nullptr) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
/// expanded to FMAs when this method returns true, otherwise fmuladd is
/// expanded to fmul + fadd.
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
+
+ // Should we expand the build vector with shuffles?
+ bool
+ shouldExpandBuildVectorWithShuffles(EVT VT,
+ unsigned DefinedValues) const override;
/// createFastISel - This method returns a target-specific FastISel object,
/// or null if the target does not support "fast" instruction selection.
- virtual FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
- const TargetLibraryInfo *LibInfo) const;
+ FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) const override;
+
+ /// \brief Returns true if an argument of type Ty needs to be passed in a
+ /// contiguous block of registers in calling convention CallConv.
+ bool functionArgumentNeedsConsecutiveRegisters(
+ Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
+ // We support any array type as "consecutive" block in the parameter
+ // save area. The element type defines the alignment requirement and
+ // whether the argument should go in GPRs, FPRs, or VRs if available.
+ //
+ // Note that clang uses this capability both to implement the ELFv2
+ // homogeneous float/vector aggregate ABI, and to avoid having to use
+ // "byval" when passing aggregates that might fully fit in registers.
+ return Ty->isArrayTy();
+ }
private:
SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
@@ -515,6 +568,9 @@ namespace llvm {
const PPCSubtarget &Subtarget) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget) const;
+ SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, SDLoc dl) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
@@ -526,6 +582,7 @@ namespace llvm {
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
@@ -544,39 +601,34 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual SDValue
+ SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual bool
+ bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const;
+ LLVMContext &Context) const override;
- virtual SDValue
+ SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const override;
SDValue
extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG,
SDValue ArgVal, SDLoc dl) const;
- void
- setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG,
- unsigned nAltivecParamsAtEnd,
- unsigned MinReservedArea, bool isPPC64) const;
-
SDValue
LowerFormalArguments_Darwin(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
@@ -631,6 +683,8 @@ namespace llvm {
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue DAGCombineFastRecip(SDValue Op, DAGCombinerInfo &DCI) const;
SDValue DAGCombineFastRecipFSQRT(SDValue Op, DAGCombinerInfo &DCI) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 36d3a7d..9ed384f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -19,11 +19,13 @@ def s16imm64 : Operand<i64> {
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS16ImmAsmOperand;
+ let DecoderMethod = "decodeSImmOperand<16>";
}
def u16imm64 : Operand<i64> {
let PrintMethod = "printU16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCU16ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<16>";
}
def s17imm64 : Operand<i64> {
// This operand type is used for addis/lis to allow the assembler parser
@@ -32,6 +34,7 @@ def s17imm64 : Operand<i64> {
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS17ImmAsmOperand;
+ let DecoderMethod = "decodeSImmOperand<16>";
}
def tocentry : Operand<iPTR> {
let MIOperandInfo = (ops i64imm:$imm);
@@ -76,15 +79,22 @@ def HI48_64 : SDNodeXForm<imm, [{
// Calls.
//
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isBranch = 1, isIndirectBranch = 1, Uses = [CTR8] in {
- def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>,
+ def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
+ []>,
+ Requires<[In64BitMode]>;
+ def BCCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
+ "b${cond:cc}ctr${cond:pm} ${cond:reg}", IIC_BrB,
+ []>,
Requires<[In64BitMode]>;
- let isCodeGenOnly = 1 in
- def BCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
- "b${cond:cc}ctr${cond:pm} ${cond:reg}", BrB, []>,
+ def BCCTR8 : XLForm_2_br2<19, 528, 12, 0, (outs), (ins crbitrc:$bi),
+ "bcctr 12, $bi, 0", IIC_BrB, []>,
+ Requires<[In64BitMode]>;
+ def BCCTR8n : XLForm_2_br2<19, 528, 4, 0, (outs), (ins crbitrc:$bi),
+ "bcctr 4, $bi, 0", IIC_BrB, []>,
Requires<[In64BitMode]>;
}
}
@@ -103,9 +113,9 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
let isReturn = 1, Defs = [CTR8], Uses = [CTR8, LR8, RM] in {
def BDZLR8 : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins),
- "bdzlr", BrB, []>;
+ "bdzlr", IIC_BrB, []>;
def BDNZLR8 : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins),
- "bdnzlr", BrB, []>;
+ "bdnzlr", IIC_BrB, []>;
}
}
@@ -115,41 +125,58 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL8 : IForm<18, 0, 1, (outs), (ins calltarget:$func),
- "bl $func", BrB, []>; // See Pat patterns below.
+ "bl $func", IIC_BrB, []>; // See Pat patterns below.
def BL8_TLS : IForm<18, 0, 1, (outs), (ins tlscall:$func),
- "bl $func", BrB, []>;
+ "bl $func", IIC_BrB, []>;
def BLA8 : IForm<18, 1, 1, (outs), (ins abscalltarget:$func),
- "bla $func", BrB, [(PPCcall (i64 imm:$func))]>;
+ "bla $func", IIC_BrB, [(PPCcall (i64 imm:$func))]>;
}
let Uses = [RM], isCodeGenOnly = 1 in {
def BL8_NOP : IForm_and_DForm_4_zero<18, 0, 1, 24,
(outs), (ins calltarget:$func),
- "bl $func\n\tnop", BrB, []>;
+ "bl $func\n\tnop", IIC_BrB, []>;
def BL8_NOP_TLS : IForm_and_DForm_4_zero<18, 0, 1, 24,
(outs), (ins tlscall:$func),
- "bl $func\n\tnop", BrB, []>;
+ "bl $func\n\tnop", IIC_BrB, []>;
def BLA8_NOP : IForm_and_DForm_4_zero<18, 1, 1, 24,
(outs), (ins abscalltarget:$func),
- "bla $func\n\tnop", BrB,
+ "bla $func\n\tnop", IIC_BrB,
[(PPCcall_nop (i64 imm:$func))]>;
}
let Uses = [CTR8, RM] in {
def BCTRL8 : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins),
- "bctrl", BrB, [(PPCbctrl)]>,
+ "bctrl", IIC_BrB, [(PPCbctrl)]>,
Requires<[In64BitMode]>;
- let isCodeGenOnly = 1 in
- def BCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
- "b${cond:cc}ctrl${cond:pm} ${cond:reg}", BrB, []>,
- Requires<[In64BitMode]>;
+ let isCodeGenOnly = 1 in {
+ def BCCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
+ "b${cond:cc}ctrl${cond:pm} ${cond:reg}", IIC_BrB,
+ []>,
+ Requires<[In64BitMode]>;
+
+ def BCCTRL8 : XLForm_2_br2<19, 528, 12, 1, (outs), (ins crbitrc:$bi),
+ "bcctrl 12, $bi, 0", IIC_BrB, []>,
+ Requires<[In64BitMode]>;
+ def BCCTRL8n : XLForm_2_br2<19, 528, 4, 1, (outs), (ins crbitrc:$bi),
+ "bcctrl 4, $bi, 0", IIC_BrB, []>,
+ Requires<[In64BitMode]>;
+ }
}
}
} // Interpretation64Bit
+// FIXME: Duplicating this for the asm parser should be unnecessary, but the
+// previous definition must be marked as CodeGen only to prevent decoding
+// conflicts.
+let Interpretation64Bit = 1, isAsmParserOnly = 1 in
+let isCall = 1, PPC970_Unit = 7, Defs = [LR8], Uses = [RM] in
+def BL8_TLS_ : IForm<18, 0, 1, (outs), (ins tlscall:$func),
+ "bl $func", IIC_BrB, []>;
+
// Calls
def : Pat<(PPCcall (i64 tglobaladdr:$dst)),
(BL8 tglobaladdr:$dst)>;
@@ -195,16 +222,16 @@ let usesCustomInserter = 1 in {
// Instructions to support atomic operations
def LDARX : XForm_1<31, 84, (outs g8rc:$rD), (ins memrr:$ptr),
- "ldarx $rD, $ptr", LdStLDARX,
+ "ldarx $rD, $ptr", IIC_LdStLDARX,
[(set i64:$rD, (PPClarx xoaddr:$ptr))]>;
let Defs = [CR0] in
def STDCX : XForm_1<31, 214, (outs), (ins g8rc:$rS, memrr:$dst),
- "stdcx. $rS, $dst", LdStSTDCX,
+ "stdcx. $rS, $dst", IIC_LdStSTDCX,
[(PPCstcx i64:$rS, xoaddr:$dst)]>,
isDOT;
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNdi8 :Pseudo< (outs),
(ins calltarget:$dst, i32imm:$offset),
@@ -221,28 +248,23 @@ def TCRETURNri8 : Pseudo<(outs), (ins CTRRC8:$dst, i32imm:$offset),
"#TC_RETURNr8 $dst $offset",
[]>;
-let isCodeGenOnly = 1 in {
-
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR8, RM] in
-def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>,
+def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
+ []>,
Requires<[In64BitMode]>;
-
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILB8 : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
- "b $dst", BrB,
+ "b $dst", IIC_BrB,
[]>;
-
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILBA8 : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst),
- "ba $dst", BrB,
+ "ba $dst", IIC_BrB,
[]>;
-
-}
} // Interpretation64Bit
def : Pat<(PPCtc_return (i64 tglobaladdr:$dst), imm:$imm),
@@ -256,23 +278,23 @@ def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
// 64-bit CR instructions
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let neverHasSideEffects = 1 in {
def MTOCRF8: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins g8rc:$ST),
- "mtocrf $FXM, $ST", BrMCRX>,
+ "mtocrf $FXM, $ST", IIC_BrMCRX>,
PPC970_DGroup_First, PPC970_Unit_CRU;
def MTCRF8 : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, g8rc:$rS),
- "mtcrf $FXM, $rS", BrMCRX>,
+ "mtcrf $FXM, $rS", IIC_BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
let hasExtraSrcRegAllocReq = 1 in // to enable post-ra anti-dep breaking.
def MFOCRF8: XFXForm_5a<31, 19, (outs g8rc:$rT), (ins crbitm:$FXM),
- "mfocrf $rT, $FXM", SprMFCR>,
+ "mfocrf $rT, $FXM", IIC_SprMFCRF>,
PPC970_DGroup_First, PPC970_Unit_CRU;
def MFCR8 : XFXForm_3<31, 19, (outs g8rc:$rT), (ins),
- "mfcr $rT", SprMFCR>,
+ "mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // neverHasSideEffects = 1
@@ -294,24 +316,24 @@ let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
let Uses = [CTR8] in {
def MFCTR8 : XFXForm_1_ext<31, 339, 9, (outs g8rc:$rT), (ins),
- "mfctr $rT", SprMFSPR>,
+ "mfctr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Pattern = [(PPCmtctr i64:$rS)], Defs = [CTR8] in {
def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS),
- "mtctr $rS", SprMTSPR>,
+ "mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
-let hasSideEffects = 1, isCodeGenOnly = 1, Defs = [CTR8] in {
+let hasSideEffects = 1, Defs = [CTR8] in {
let Pattern = [(int_ppc_mtctr i64:$rS)] in
def MTCTR8loop : XFXForm_7_ext<31, 467, 9, (outs), (ins g8rc:$rS),
- "mtctr $rS", SprMTSPR>,
+ "mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
-let isCodeGenOnly = 1, Pattern = [(set i64:$rT, readcyclecounter)] in
+let Pattern = [(set i64:$rT, readcyclecounter)] in
def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs g8rc:$rT), (ins),
- "mfspr $rT, 268", SprMFTB>,
+ "mfspr $rT, 268", IIC_SprMFTB>,
PPC970_DGroup_First, PPC970_Unit_FXU;
// Note that encoding mftb using mfspr is now the preferred form,
// and has been since at least ISA v2.03. The mftb instruction has
@@ -325,12 +347,12 @@ def DYNALLOC8 : Pseudo<(outs g8rc:$result), (ins g8rc:$negsize, memri:$fpsi),"#D
let Defs = [LR8] in {
def MTLR8 : XFXForm_7_ext<31, 467, 8, (outs), (ins g8rc:$rS),
- "mtlr $rS", SprMTSPR>,
+ "mtlr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Uses = [LR8] in {
def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs g8rc:$rT), (ins),
- "mflr $rT", SprMFSPR>,
+ "mflr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
} // Interpretation64Bit
@@ -342,213 +364,236 @@ def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs g8rc:$rT), (ins),
let PPC970_Unit = 1 in { // FXU Operations.
let Interpretation64Bit = 1 in {
let neverHasSideEffects = 1 in {
+let isCodeGenOnly = 1 in {
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LI8 : DForm_2_r0<14, (outs g8rc:$rD), (ins s16imm64:$imm),
- "li $rD, $imm", IntSimple,
+ "li $rD, $imm", IIC_IntSimple,
[(set i64:$rD, imm64SExt16:$imm)]>;
def LIS8 : DForm_2_r0<15, (outs g8rc:$rD), (ins s17imm64:$imm),
- "lis $rD, $imm", IntSimple,
+ "lis $rD, $imm", IIC_IntSimple,
[(set i64:$rD, imm16ShiftedSExt:$imm)]>;
}
// Logical ops.
+let isCommutable = 1 in {
defm NAND8: XForm_6r<31, 476, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "nand", "$rA, $rS, $rB", IntSimple,
+ "nand", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (not (and i64:$rS, i64:$rB)))]>;
defm AND8 : XForm_6r<31, 28, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "and", "$rA, $rS, $rB", IntSimple,
+ "and", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (and i64:$rS, i64:$rB))]>;
+} // isCommutable
defm ANDC8: XForm_6r<31, 60, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "andc", "$rA, $rS, $rB", IntSimple,
+ "andc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (and i64:$rS, (not i64:$rB)))]>;
+let isCommutable = 1 in {
defm OR8 : XForm_6r<31, 444, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "or", "$rA, $rS, $rB", IntSimple,
+ "or", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (or i64:$rS, i64:$rB))]>;
defm NOR8 : XForm_6r<31, 124, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "nor", "$rA, $rS, $rB", IntSimple,
+ "nor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (not (or i64:$rS, i64:$rB)))]>;
+} // isCommutable
defm ORC8 : XForm_6r<31, 412, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "orc", "$rA, $rS, $rB", IntSimple,
+ "orc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (or i64:$rS, (not i64:$rB)))]>;
+let isCommutable = 1 in {
defm EQV8 : XForm_6r<31, 284, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "eqv", "$rA, $rS, $rB", IntSimple,
+ "eqv", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (not (xor i64:$rS, i64:$rB)))]>;
defm XOR8 : XForm_6r<31, 316, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
- "xor", "$rA, $rS, $rB", IntSimple,
+ "xor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i64:$rA, (xor i64:$rS, i64:$rB))]>;
+} // let isCommutable = 1
// Logical ops with immediate.
let Defs = [CR0] in {
-def ANDIo8 : DForm_4<28, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "andi. $dst, $src1, $src2", IntGeneral,
+def ANDIo8 : DForm_4<28, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "andi. $dst, $src1, $src2", IIC_IntGeneral,
[(set i64:$dst, (and i64:$src1, immZExt16:$src2))]>,
isDOT;
-def ANDISo8 : DForm_4<29, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "andis. $dst, $src1, $src2", IntGeneral,
+def ANDISo8 : DForm_4<29, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "andis. $dst, $src1, $src2", IIC_IntGeneral,
[(set i64:$dst, (and i64:$src1, imm16ShiftedZExt:$src2))]>,
isDOT;
}
-def ORI8 : DForm_4<24, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "ori $dst, $src1, $src2", IntSimple,
+def ORI8 : DForm_4<24, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "ori $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (or i64:$src1, immZExt16:$src2))]>;
-def ORIS8 : DForm_4<25, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "oris $dst, $src1, $src2", IntSimple,
+def ORIS8 : DForm_4<25, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "oris $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (or i64:$src1, imm16ShiftedZExt:$src2))]>;
-def XORI8 : DForm_4<26, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "xori $dst, $src1, $src2", IntSimple,
+def XORI8 : DForm_4<26, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "xori $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (xor i64:$src1, immZExt16:$src2))]>;
-def XORIS8 : DForm_4<27, (outs g8rc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "xoris $dst, $src1, $src2", IntSimple,
+def XORIS8 : DForm_4<27, (outs g8rc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "xoris $dst, $src1, $src2", IIC_IntSimple,
[(set i64:$dst, (xor i64:$src1, imm16ShiftedZExt:$src2))]>;
+let isCommutable = 1 in
defm ADD8 : XOForm_1r<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "add", "$rT, $rA, $rB", IntSimple,
+ "add", "$rT, $rA, $rB", IIC_IntSimple,
[(set i64:$rT, (add i64:$rA, i64:$rB))]>;
// ADD8 has a special form: reg = ADD8(reg, sym@tls) for use by the
// initial-exec thread-local storage model.
def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB),
- "add $rT, $rA, $rB", IntSimple,
+ "add $rT, $rA, $rB", IIC_IntSimple,
[(set i64:$rT, (add i64:$rA, tglobaltlsaddr:$rB))]>;
+let isCommutable = 1 in
defm ADDC8 : XOForm_1rc<31, 10, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "addc", "$rT, $rA, $rB", IntGeneral,
+ "addc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (addc i64:$rA, i64:$rB))]>,
PPC970_DGroup_Cracked;
+
let Defs = [CARRY] in
def ADDIC8 : DForm_2<12, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
- "addic $rD, $rA, $imm", IntGeneral,
+ "addic $rD, $rA, $imm", IIC_IntGeneral,
[(set i64:$rD, (addc i64:$rA, imm64SExt16:$imm))]>;
def ADDI8 : DForm_2<14, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s16imm64:$imm),
- "addi $rD, $rA, $imm", IntSimple,
+ "addi $rD, $rA, $imm", IIC_IntSimple,
[(set i64:$rD, (add i64:$rA, imm64SExt16:$imm))]>;
def ADDIS8 : DForm_2<15, (outs g8rc:$rD), (ins g8rc_nox0:$rA, s17imm64:$imm),
- "addis $rD, $rA, $imm", IntSimple,
+ "addis $rD, $rA, $imm", IIC_IntSimple,
[(set i64:$rD, (add i64:$rA, imm16ShiftedSExt:$imm))]>;
let Defs = [CARRY] in {
def SUBFIC8: DForm_2< 8, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
- "subfic $rD, $rA, $imm", IntGeneral,
+ "subfic $rD, $rA, $imm", IIC_IntGeneral,
[(set i64:$rD, (subc imm64SExt16:$imm, i64:$rA))]>;
defm SUBFC8 : XOForm_1r<31, 8, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "subfc", "$rT, $rA, $rB", IntGeneral,
+ "subfc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (subc i64:$rB, i64:$rA))]>,
PPC970_DGroup_Cracked;
}
defm SUBF8 : XOForm_1r<31, 40, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "subf", "$rT, $rA, $rB", IntGeneral,
+ "subf", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (sub i64:$rB, i64:$rA))]>;
defm NEG8 : XOForm_3r<31, 104, 0, (outs g8rc:$rT), (ins g8rc:$rA),
- "neg", "$rT, $rA", IntSimple,
+ "neg", "$rT, $rA", IIC_IntSimple,
[(set i64:$rT, (ineg i64:$rA))]>;
let Uses = [CARRY] in {
+let isCommutable = 1 in
defm ADDE8 : XOForm_1rc<31, 138, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "adde", "$rT, $rA, $rB", IntGeneral,
+ "adde", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (adde i64:$rA, i64:$rB))]>;
defm ADDME8 : XOForm_3rc<31, 234, 0, (outs g8rc:$rT), (ins g8rc:$rA),
- "addme", "$rT, $rA", IntGeneral,
+ "addme", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (adde i64:$rA, -1))]>;
defm ADDZE8 : XOForm_3rc<31, 202, 0, (outs g8rc:$rT), (ins g8rc:$rA),
- "addze", "$rT, $rA", IntGeneral,
+ "addze", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (adde i64:$rA, 0))]>;
defm SUBFE8 : XOForm_1rc<31, 136, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "subfe", "$rT, $rA, $rB", IntGeneral,
+ "subfe", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (sube i64:$rB, i64:$rA))]>;
defm SUBFME8 : XOForm_3rc<31, 232, 0, (outs g8rc:$rT), (ins g8rc:$rA),
- "subfme", "$rT, $rA", IntGeneral,
+ "subfme", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (sube -1, i64:$rA))]>;
defm SUBFZE8 : XOForm_3rc<31, 200, 0, (outs g8rc:$rT), (ins g8rc:$rA),
- "subfze", "$rT, $rA", IntGeneral,
+ "subfze", "$rT, $rA", IIC_IntGeneral,
[(set i64:$rT, (sube 0, i64:$rA))]>;
}
+} // isCodeGenOnly
+// FIXME: Duplicating this for the asm parser should be unnecessary, but the
+// previous definition must be marked as CodeGen only to prevent decoding
+// conflicts.
+let isAsmParserOnly = 1 in
+def ADD8TLS_ : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB),
+ "add $rT, $rA, $rB", IIC_IntSimple, []>;
+let isCommutable = 1 in {
defm MULHD : XOForm_1r<31, 73, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "mulhd", "$rT, $rA, $rB", IntMulHW,
+ "mulhd", "$rT, $rA, $rB", IIC_IntMulHW,
[(set i64:$rT, (mulhs i64:$rA, i64:$rB))]>;
defm MULHDU : XOForm_1r<31, 9, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "mulhdu", "$rT, $rA, $rB", IntMulHWU,
+ "mulhdu", "$rT, $rA, $rB", IIC_IntMulHWU,
[(set i64:$rT, (mulhu i64:$rA, i64:$rB))]>;
+} // isCommutable
}
} // Interpretation64Bit
let isCompare = 1, neverHasSideEffects = 1 in {
def CMPD : XForm_16_ext<31, 0, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB),
- "cmpd $crD, $rA, $rB", IntCompare>, isPPC64;
+ "cmpd $crD, $rA, $rB", IIC_IntCompare>, isPPC64;
def CMPLD : XForm_16_ext<31, 32, (outs crrc:$crD), (ins g8rc:$rA, g8rc:$rB),
- "cmpld $crD, $rA, $rB", IntCompare>, isPPC64;
- def CMPDI : DForm_5_ext<11, (outs crrc:$crD), (ins g8rc:$rA, s16imm:$imm),
- "cmpdi $crD, $rA, $imm", IntCompare>, isPPC64;
- def CMPLDI : DForm_6_ext<10, (outs crrc:$dst), (ins g8rc:$src1, u16imm:$src2),
- "cmpldi $dst, $src1, $src2", IntCompare>, isPPC64;
+ "cmpld $crD, $rA, $rB", IIC_IntCompare>, isPPC64;
+ def CMPDI : DForm_5_ext<11, (outs crrc:$crD), (ins g8rc:$rA, s16imm64:$imm),
+ "cmpdi $crD, $rA, $imm", IIC_IntCompare>, isPPC64;
+ def CMPLDI : DForm_6_ext<10, (outs crrc:$dst), (ins g8rc:$src1, u16imm64:$src2),
+ "cmpldi $dst, $src1, $src2",
+ IIC_IntCompare>, isPPC64;
}
let neverHasSideEffects = 1 in {
defm SLD : XForm_6r<31, 27, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
- "sld", "$rA, $rS, $rB", IntRotateD,
+ "sld", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCshl i64:$rS, i32:$rB))]>, isPPC64;
defm SRD : XForm_6r<31, 539, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
- "srd", "$rA, $rS, $rB", IntRotateD,
+ "srd", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCsrl i64:$rS, i32:$rB))]>, isPPC64;
defm SRAD : XForm_6rc<31, 794, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
- "srad", "$rA, $rS, $rB", IntRotateD,
+ "srad", "$rA, $rS, $rB", IIC_IntRotateD,
[(set i64:$rA, (PPCsra i64:$rS, i32:$rB))]>, isPPC64;
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
defm EXTSB8 : XForm_11r<31, 954, (outs g8rc:$rA), (ins g8rc:$rS),
- "extsb", "$rA, $rS", IntSimple,
+ "extsb", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i8))]>;
defm EXTSH8 : XForm_11r<31, 922, (outs g8rc:$rA), (ins g8rc:$rS),
- "extsh", "$rA, $rS", IntSimple,
+ "extsh", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i16))]>;
} // Interpretation64Bit
// For fast-isel:
let isCodeGenOnly = 1 in {
def EXTSB8_32_64 : XForm_11<31, 954, (outs g8rc:$rA), (ins gprc:$rS),
- "extsb $rA, $rS", IntSimple, []>, isPPC64;
+ "extsb $rA, $rS", IIC_IntSimple, []>, isPPC64;
def EXTSH8_32_64 : XForm_11<31, 922, (outs g8rc:$rA), (ins gprc:$rS),
- "extsh $rA, $rS", IntSimple, []>, isPPC64;
+ "extsh $rA, $rS", IIC_IntSimple, []>, isPPC64;
} // isCodeGenOnly for fast-isel
defm EXTSW : XForm_11r<31, 986, (outs g8rc:$rA), (ins g8rc:$rS),
- "extsw", "$rA, $rS", IntSimple,
+ "extsw", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext_inreg i64:$rS, i32))]>, isPPC64;
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm EXTSW_32_64 : XForm_11r<31, 986, (outs g8rc:$rA), (ins gprc:$rS),
- "extsw", "$rA, $rS", IntSimple,
+ "extsw", "$rA, $rS", IIC_IntSimple,
[(set i64:$rA, (sext i32:$rS))]>, isPPC64;
defm SRADI : XSForm_1rc<31, 413, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH),
- "sradi", "$rA, $rS, $SH", IntRotateDI,
+ "sradi", "$rA, $rS, $SH", IIC_IntRotateDI,
[(set i64:$rA, (sra i64:$rS, (i32 imm:$SH)))]>, isPPC64;
defm CNTLZD : XForm_11r<31, 58, (outs g8rc:$rA), (ins g8rc:$rS),
- "cntlzd", "$rA, $rS", IntGeneral,
+ "cntlzd", "$rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctlz i64:$rS))]>;
def POPCNTD : XForm_11<31, 506, (outs g8rc:$rA), (ins g8rc:$rS),
- "popcntd $rA, $rS", IntGeneral,
+ "popcntd $rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctpop i64:$rS))]>;
// popcntw also does a population count on the high 32 bits (storing the
// results in the high 32-bits of the output). We'll ignore that here (which is
// safe because we never separately use the high part of the 64-bit registers).
def POPCNTW : XForm_11<31, 378, (outs gprc:$rA), (ins gprc:$rS),
- "popcntw $rA, $rS", IntGeneral,
+ "popcntw $rA, $rS", IIC_IntGeneral,
[(set i32:$rA, (ctpop i32:$rS))]>;
defm DIVD : XOForm_1r<31, 489, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "divd", "$rT, $rA, $rB", IntDivD,
+ "divd", "$rT, $rA, $rB", IIC_IntDivD,
[(set i64:$rT, (sdiv i64:$rA, i64:$rB))]>, isPPC64,
PPC970_DGroup_First, PPC970_DGroup_Cracked;
defm DIVDU : XOForm_1r<31, 457, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "divdu", "$rT, $rA, $rB", IntDivD,
+ "divdu", "$rT, $rA, $rB", IIC_IntDivD,
[(set i64:$rT, (udiv i64:$rA, i64:$rB))]>, isPPC64,
PPC970_DGroup_First, PPC970_DGroup_Cracked;
+let isCommutable = 1 in
defm MULLD : XOForm_1r<31, 233, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
- "mulld", "$rT, $rA, $rB", IntMulHD,
+ "mulld", "$rT, $rA, $rB", IIC_IntMulHD,
[(set i64:$rT, (mul i64:$rA, i64:$rB))]>, isPPC64;
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def MULLI8 : DForm_2<7, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
- "mulli $rD, $rA, $imm", IntMulLI,
+ "mulli $rD, $rA, $imm", IIC_IntMulLI,
[(set i64:$rD, (mul i64:$rA, imm64SExt16:$imm))]>;
}
@@ -556,7 +601,7 @@ let neverHasSideEffects = 1 in {
let isCommutable = 1 in {
defm RLDIMI : MDForm_1r<30, 3, (outs g8rc:$rA),
(ins g8rc:$rSi, g8rc:$rS, u6imm:$SH, u6imm:$MBE),
- "rldimi", "$rA, $rS, $SH, $MBE", IntRotateDI,
+ "rldimi", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64, RegConstraint<"$rSi = $rA">,
NoEncode<"$rSi">;
}
@@ -564,43 +609,53 @@ defm RLDIMI : MDForm_1r<30, 3, (outs g8rc:$rA),
// Rotate instructions.
defm RLDCL : MDSForm_1r<30, 8,
(outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE),
- "rldcl", "$rA, $rS, $rB, $MBE", IntRotateD,
+ "rldcl", "$rA, $rS, $rB, $MBE", IIC_IntRotateD,
[]>, isPPC64;
defm RLDCR : MDSForm_1r<30, 9,
(outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB, u6imm:$MBE),
- "rldcr", "$rA, $rS, $rB, $MBE", IntRotateD,
+ "rldcr", "$rA, $rS, $rB, $MBE", IIC_IntRotateD,
[]>, isPPC64;
defm RLDICL : MDForm_1r<30, 0,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
- "rldicl", "$rA, $rS, $SH, $MBE", IntRotateDI,
+ "rldicl", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
// For fast-isel:
let isCodeGenOnly = 1 in
def RLDICL_32_64 : MDForm_1<30, 0,
(outs g8rc:$rA),
(ins gprc:$rS, u6imm:$SH, u6imm:$MBE),
- "rldicl $rA, $rS, $SH, $MBE", IntRotateDI,
+ "rldicl $rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
// End fast-isel.
defm RLDICR : MDForm_1r<30, 1,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
- "rldicr", "$rA, $rS, $SH, $MBE", IntRotateDI,
+ "rldicr", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
defm RLDIC : MDForm_1r<30, 2,
(outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH, u6imm:$MBE),
- "rldic", "$rA, $rS, $SH, $MBE", IntRotateDI,
+ "rldic", "$rA, $rS, $SH, $MBE", IIC_IntRotateDI,
[]>, isPPC64;
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
defm RLWINM8 : MForm_2r<21, (outs g8rc:$rA),
(ins g8rc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
- "rlwinm", "$rA, $rS, $SH, $MB, $ME", IntGeneral,
+ "rlwinm", "$rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>;
+let isCommutable = 1 in {
+// RLWIMI can be commuted if the rotate amount is zero.
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
+defm RLWIMI8 : MForm_2r<20, (outs g8rc:$rA),
+ (ins g8rc:$rSi, g8rc:$rS, u5imm:$SH, u5imm:$MB,
+ u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME",
+ IIC_IntRotate, []>, PPC970_DGroup_Cracked,
+ RegConstraint<"$rSi = $rA">, NoEncode<"$rSi">;
+}
+
let isSelect = 1 in
def ISEL8 : AForm_4<31, 15,
(outs g8rc:$rT), (ins g8rc_nox0:$rA, g8rc:$rB, crbitrc:$cond),
- "isel $rT, $rA, $rB, $cond", IntGeneral,
+ "isel $rT, $rA, $rB, $cond", IIC_IntGeneral,
[]>;
} // Interpretation64Bit
} // neverHasSideEffects = 1
@@ -614,111 +669,111 @@ def ISEL8 : AForm_4<31, 15,
// Sign extending loads.
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHA8: DForm_1<42, (outs g8rc:$rD), (ins memri:$src),
- "lha $rD, $src", LdStLHA,
+ "lha $rD, $src", IIC_LdStLHA,
[(set i64:$rD, (sextloadi16 iaddr:$src))]>,
PPC970_DGroup_Cracked;
def LWA : DSForm_1<58, 2, (outs g8rc:$rD), (ins memrix:$src),
- "lwa $rD, $src", LdStLWA,
+ "lwa $rD, $src", IIC_LdStLWA,
[(set i64:$rD,
(aligned4sextloadi32 ixaddr:$src))]>, isPPC64,
PPC970_DGroup_Cracked;
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAX8: XForm_1<31, 343, (outs g8rc:$rD), (ins memrr:$src),
- "lhax $rD, $src", LdStLHA,
+ "lhax $rD, $src", IIC_LdStLHA,
[(set i64:$rD, (sextloadi16 xaddr:$src))]>,
PPC970_DGroup_Cracked;
def LWAX : XForm_1<31, 341, (outs g8rc:$rD), (ins memrr:$src),
- "lwax $rD, $src", LdStLHA,
+ "lwax $rD, $src", IIC_LdStLHA,
[(set i64:$rD, (sextloadi32 xaddr:$src))]>, isPPC64,
PPC970_DGroup_Cracked;
// For fast-isel:
let isCodeGenOnly = 1, mayLoad = 1 in {
def LWA_32 : DSForm_1<58, 2, (outs gprc:$rD), (ins memrix:$src),
- "lwa $rD, $src", LdStLWA, []>, isPPC64,
+ "lwa $rD, $src", IIC_LdStLWA, []>, isPPC64,
PPC970_DGroup_Cracked;
def LWAX_32 : XForm_1<31, 341, (outs gprc:$rD), (ins memrr:$src),
- "lwax $rD, $src", LdStLHA, []>, isPPC64,
+ "lwax $rD, $src", IIC_LdStLHA, []>, isPPC64,
PPC970_DGroup_Cracked;
} // end fast-isel isCodeGenOnly
// Update forms.
let mayLoad = 1, neverHasSideEffects = 1 in {
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAU8 : DForm_1<43, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memri:$addr),
- "lhau $rD, $addr", LdStLHAU,
+ "lhau $rD, $addr", IIC_LdStLHAU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
// NO LWAU!
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def LHAUX8 : XForm_1<31, 375, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lhaux $rD, $addr", LdStLHAU,
+ "lhaux $rD, $addr", IIC_LdStLHAUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LWAUX : XForm_1<31, 373, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lwaux $rD, $addr", LdStLHAU,
+ "lwaux $rD, $addr", IIC_LdStLHAUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
}
}
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
// Zero extending loads.
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZ8 : DForm_1<34, (outs g8rc:$rD), (ins memri:$src),
- "lbz $rD, $src", LdStLoad,
+ "lbz $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi8 iaddr:$src))]>;
def LHZ8 : DForm_1<40, (outs g8rc:$rD), (ins memri:$src),
- "lhz $rD, $src", LdStLoad,
+ "lhz $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi16 iaddr:$src))]>;
def LWZ8 : DForm_1<32, (outs g8rc:$rD), (ins memri:$src),
- "lwz $rD, $src", LdStLoad,
+ "lwz $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi32 iaddr:$src))]>, isPPC64;
def LBZX8 : XForm_1<31, 87, (outs g8rc:$rD), (ins memrr:$src),
- "lbzx $rD, $src", LdStLoad,
+ "lbzx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi8 xaddr:$src))]>;
def LHZX8 : XForm_1<31, 279, (outs g8rc:$rD), (ins memrr:$src),
- "lhzx $rD, $src", LdStLoad,
+ "lhzx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi16 xaddr:$src))]>;
def LWZX8 : XForm_1<31, 23, (outs g8rc:$rD), (ins memrr:$src),
- "lwzx $rD, $src", LdStLoad,
+ "lwzx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (zextloadi32 xaddr:$src))]>;
// Update forms.
let mayLoad = 1, neverHasSideEffects = 1 in {
def LBZU8 : DForm_1<35, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lbzu $rD, $addr", LdStLoadUpd,
+ "lbzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU8 : DForm_1<41, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lhzu $rD, $addr", LdStLoadUpd,
+ "lhzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU8 : DForm_1<33, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lwzu $rD, $addr", LdStLoadUpd,
+ "lwzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LBZUX8 : XForm_1<31, 119, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lbzux $rD, $addr", LdStLoadUpd,
+ "lbzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LHZUX8 : XForm_1<31, 311, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lhzux $rD, $addr", LdStLoadUpd,
+ "lhzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LWZUX8 : XForm_1<31, 55, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lwzux $rD, $addr", LdStLoadUpd,
+ "lwzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
}
@@ -729,7 +784,7 @@ def LWZUX8 : XForm_1<31, 55, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
// Full 8-byte loads.
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LD : DSForm_1<58, 0, (outs g8rc:$rD), (ins memrix:$src),
- "ld $rD, $src", LdStLD,
+ "ld $rD, $src", IIC_LdStLD,
[(set i64:$rD, (aligned4load ixaddr:$src))]>, isPPC64;
// The following three definitions are selected for small code model only.
// Otherwise, we need to create two instructions to form a 32-bit offset,
@@ -747,33 +802,27 @@ def LDtocCPT: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
[(set i64:$rD,
(PPCtoc_entry tconstpool:$disp, i64:$reg))]>, isPPC64;
-let hasSideEffects = 1, isCodeGenOnly = 1 in {
-let RST = 2, DS = 2 in
-def LDinto_toc: DSForm_1a<58, 0, (outs), (ins g8rc:$reg),
- "ld 2, 8($reg)", LdStLD,
- [(PPCload_toc i64:$reg)]>, isPPC64;
-
-let RST = 2, DS = 10, RA = 1 in
-def LDtoc_restore : DSForm_1a<58, 0, (outs), (ins),
- "ld 2, 40(1)", LdStLD,
- [(PPCtoc_restore)]>, isPPC64;
-}
+let hasSideEffects = 1, isCodeGenOnly = 1, RST = 2, Defs = [X2] in
+def LDinto_toc: DSForm_1<58, 0, (outs), (ins memrix:$src),
+ "ld 2, $src", IIC_LdStLD,
+ [(PPCload_toc ixaddr:$src)]>, isPPC64;
+
def LDX : XForm_1<31, 21, (outs g8rc:$rD), (ins memrr:$src),
- "ldx $rD, $src", LdStLD,
+ "ldx $rD, $src", IIC_LdStLD,
[(set i64:$rD, (load xaddr:$src))]>, isPPC64;
def LDBRX : XForm_1<31, 532, (outs g8rc:$rD), (ins memrr:$src),
- "ldbrx $rD, $src", LdStLoad,
+ "ldbrx $rD, $src", IIC_LdStLoad,
[(set i64:$rD, (PPClbrx xoaddr:$src, i64))]>, isPPC64;
let mayLoad = 1, neverHasSideEffects = 1 in {
def LDU : DSForm_1<58, 1, (outs g8rc:$rD, ptr_rc_nor0:$ea_result), (ins memrix:$addr),
- "ldu $rD, $addr", LdStLDU,
+ "ldu $rD, $addr", IIC_LdStLDU,
[]>, RegConstraint<"$addr.reg = $ea_result">, isPPC64,
NoEncode<"$ea_result">;
def LDUX : XForm_1<31, 53, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "ldux $rD, $addr", LdStLDU,
+ "ldux $rD, $addr", IIC_LdStLDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
}
@@ -856,78 +905,79 @@ def ADDIdtprelL : Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
isPPC64;
let PPC970_Unit = 2 in {
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
// Truncating stores.
def STB8 : DForm_1<38, (outs), (ins g8rc:$rS, memri:$src),
- "stb $rS, $src", LdStStore,
+ "stb $rS, $src", IIC_LdStStore,
[(truncstorei8 i64:$rS, iaddr:$src)]>;
def STH8 : DForm_1<44, (outs), (ins g8rc:$rS, memri:$src),
- "sth $rS, $src", LdStStore,
+ "sth $rS, $src", IIC_LdStStore,
[(truncstorei16 i64:$rS, iaddr:$src)]>;
def STW8 : DForm_1<36, (outs), (ins g8rc:$rS, memri:$src),
- "stw $rS, $src", LdStStore,
+ "stw $rS, $src", IIC_LdStStore,
[(truncstorei32 i64:$rS, iaddr:$src)]>;
def STBX8 : XForm_8<31, 215, (outs), (ins g8rc:$rS, memrr:$dst),
- "stbx $rS, $dst", LdStStore,
+ "stbx $rS, $dst", IIC_LdStStore,
[(truncstorei8 i64:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHX8 : XForm_8<31, 407, (outs), (ins g8rc:$rS, memrr:$dst),
- "sthx $rS, $dst", LdStStore,
+ "sthx $rS, $dst", IIC_LdStStore,
[(truncstorei16 i64:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STWX8 : XForm_8<31, 151, (outs), (ins g8rc:$rS, memrr:$dst),
- "stwx $rS, $dst", LdStStore,
+ "stwx $rS, $dst", IIC_LdStStore,
[(truncstorei32 i64:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
} // Interpretation64Bit
// Normal 8-byte stores.
def STD : DSForm_1<62, 0, (outs), (ins g8rc:$rS, memrix:$dst),
- "std $rS, $dst", LdStSTD,
+ "std $rS, $dst", IIC_LdStSTD,
[(aligned4store i64:$rS, ixaddr:$dst)]>, isPPC64;
def STDX : XForm_8<31, 149, (outs), (ins g8rc:$rS, memrr:$dst),
- "stdx $rS, $dst", LdStSTD,
+ "stdx $rS, $dst", IIC_LdStSTD,
[(store i64:$rS, xaddr:$dst)]>, isPPC64,
PPC970_DGroup_Cracked;
def STDBRX: XForm_8<31, 660, (outs), (ins g8rc:$rS, memrr:$dst),
- "stdbrx $rS, $dst", LdStStore,
+ "stdbrx $rS, $dst", IIC_LdStStore,
[(PPCstbrx i64:$rS, xoaddr:$dst, i64)]>, isPPC64,
PPC970_DGroup_Cracked;
}
// Stores with Update (pre-inc).
let PPC970_Unit = 2, mayStore = 1 in {
-let Interpretation64Bit = 1 in {
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
def STBU8 : DForm_1<39, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst),
- "stbu $rS, $dst", LdStStoreUpd, []>,
+ "stbu $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STHU8 : DForm_1<45, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst),
- "sthu $rS, $dst", LdStStoreUpd, []>,
+ "sthu $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STWU8 : DForm_1<37, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memri:$dst),
- "stwu $rS, $dst", LdStStoreUpd, []>,
+ "stwu $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
-def STDU : DSForm_1<62, 1, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrix:$dst),
- "stdu $rS, $dst", LdStSTDU, []>,
- RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">,
- isPPC64;
def STBUX8: XForm_8<31, 247, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst),
- "stbux $rS, $dst", LdStStoreUpd, []>,
+ "stbux $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STHUX8: XForm_8<31, 439, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst),
- "sthux $rS, $dst", LdStStoreUpd, []>,
+ "sthux $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STWUX8: XForm_8<31, 183, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst),
- "stwux $rS, $dst", LdStStoreUpd, []>,
+ "stwux $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
} // Interpretation64Bit
+def STDU : DSForm_1<62, 1, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrix:$dst),
+ "stdu $rS, $dst", IIC_LdStSTDU, []>,
+ RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">,
+ isPPC64;
+
def STDUX : XForm_8<31, 181, (outs ptr_rc_nor0:$ea_res), (ins g8rc:$rS, memrr:$dst),
- "stdux $rS, $dst", LdStSTDU, []>,
+ "stdux $rS, $dst", IIC_LdStSTDUX, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked, isPPC64;
}
@@ -962,29 +1012,29 @@ def : Pat<(pre_store i64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
let PPC970_Unit = 3, neverHasSideEffects = 1,
Uses = [RM] in { // FPU Operations.
defm FCFID : XForm_26r<63, 846, (outs f8rc:$frD), (ins f8rc:$frB),
- "fcfid", "$frD, $frB", FPGeneral,
+ "fcfid", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfcfid f64:$frB))]>, isPPC64;
defm FCTID : XForm_26r<63, 814, (outs f8rc:$frD), (ins f8rc:$frB),
- "fctid", "$frD, $frB", FPGeneral,
+ "fctid", "$frD, $frB", IIC_FPGeneral,
[]>, isPPC64;
defm FCTIDZ : XForm_26r<63, 815, (outs f8rc:$frD), (ins f8rc:$frB),
- "fctidz", "$frD, $frB", FPGeneral,
+ "fctidz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctidz f64:$frB))]>, isPPC64;
defm FCFIDU : XForm_26r<63, 974, (outs f8rc:$frD), (ins f8rc:$frB),
- "fcfidu", "$frD, $frB", FPGeneral,
+ "fcfidu", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfcfidu f64:$frB))]>, isPPC64;
defm FCFIDS : XForm_26r<59, 846, (outs f4rc:$frD), (ins f8rc:$frB),
- "fcfids", "$frD, $frB", FPGeneral,
+ "fcfids", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfcfids f64:$frB))]>, isPPC64;
defm FCFIDUS : XForm_26r<59, 974, (outs f4rc:$frD), (ins f8rc:$frB),
- "fcfidus", "$frD, $frB", FPGeneral,
+ "fcfidus", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfcfidus f64:$frB))]>, isPPC64;
defm FCTIDUZ : XForm_26r<63, 943, (outs f8rc:$frD), (ins f8rc:$frB),
- "fctiduz", "$frD, $frB", FPGeneral,
+ "fctiduz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctiduz f64:$frB))]>, isPPC64;
defm FCTIWUZ : XForm_26r<63, 143, (outs f8rc:$frD), (ins f8rc:$frB),
- "fctiwuz", "$frD, $frB", FPGeneral,
+ "fctiwuz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctiwuz f64:$frB))]>, isPPC64;
}
@@ -1002,6 +1052,14 @@ def : Pat<(i64 (anyext i32:$in)),
def : Pat<(i32 (trunc i64:$in)),
(EXTRACT_SUBREG $in, sub_32)>;
+// Implement the 'not' operation with the NOR instruction.
+// (we could use the default xori pattern, but nor has lower latency on some
+// cores (such as the A2)).
+def i64not : OutPatFrag<(ops node:$in),
+ (NOR8 $in, $in)>;
+def : Pat<(not i64:$in),
+ (i64not $in)>;
+
// Extending loads with i64 targets.
def : Pat<(zextloadi1 iaddr:$src),
(LBZ8 iaddr:$src)>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index a55abe3..b271b5d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -22,111 +22,160 @@ def vnot_ppc : PatFrag<(ops node:$in),
def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
+ return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
}]>;
def vpkuwum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
+ return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
}]>;
def vpkuhum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
+ return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
}]>;
def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
+ return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
}]>;
+// These fragments are provided for little-endian, where the inputs must be
+// swapped for correct semantics.
+def vpkuhum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
+}]>;
+def vpkuwum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
+}]>;
def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 0, *CurDAG);
}]>;
def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 0, *CurDAG);
}]>;
def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 0, *CurDAG);
}]>;
def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 0, *CurDAG);
}]>;
def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 0, *CurDAG);
}]>;
def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 0, *CurDAG);
}]>;
def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 1, *CurDAG);
}]>;
def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 1, *CurDAG);
}]>;
def vmrglw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 1, *CurDAG);
}]>;
def vmrghb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 1, *CurDAG);
}]>;
def vmrghh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 1, *CurDAG);
}]>;
def vmrghw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 1, *CurDAG);
+}]>;
+
+
+// These fragments are provided for little-endian, where the inputs must be
+// swapped for correct semantics.
+def vmrglb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 2, *CurDAG);
+}]>;
+def vmrglh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 2, *CurDAG);
+}]>;
+def vmrglw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 2, *CurDAG);
+}]>;
+def vmrghb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 2, *CurDAG);
+}]>;
+def vmrghh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 2, *CurDAG);
+}]>;
+def vmrghw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 2, *CurDAG);
}]>;
def VSLDOI_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, false));
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, 0, *CurDAG));
}]>;
def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVSLDOIShuffleMask(N, false) != -1;
+ return PPC::isVSLDOIShuffleMask(N, 0, *CurDAG) != -1;
}], VSLDOI_get_imm>;
/// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into
/// vector_shuffle(X,undef,mask) by the dag combiner.
def VSLDOI_unary_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, true));
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, 1, *CurDAG));
}]>;
def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
- return PPC::isVSLDOIShuffleMask(N, true) != -1;
+ return PPC::isVSLDOIShuffleMask(N, 1, *CurDAG) != -1;
}], VSLDOI_unary_get_imm>;
+/// VSLDOI_swapped* - These fragments are provided for little-endian, where
+/// the inputs must be swapped for correct semantics.
+def VSLDOI_swapped_get_imm : SDNodeXForm<vector_shuffle, [{
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, 2, *CurDAG));
+}]>;
+def vsldoi_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVSLDOIShuffleMask(N, 2, *CurDAG) != -1;
+}], VSLDOI_get_imm>;
+
+
// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
def VSPLTB_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::getVSPLTImmediate(N, 1));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 1, *CurDAG));
}]>;
def vspltb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
}], VSPLTB_get_imm>;
def VSPLTH_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::getVSPLTImmediate(N, 2));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 2, *CurDAG));
}]>;
def vsplth_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
}], VSPLTH_get_imm>;
def VSPLTW_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::getVSPLTImmediate(N, 4));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 4, *CurDAG));
}]>;
def vspltw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
@@ -164,7 +213,7 @@ def vecspltisw : PatLeaf<(build_vector), [{
// VA1a_Int_Ty - A VAForm_1a intrinsic definition of specific type.
class VA1a_Int_Ty<bits<6> xo, string opc, Intrinsic IntID, ValueType Ty>
: VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
- !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP,
+ !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
[(set Ty:$vD, (IntID Ty:$vA, Ty:$vB, Ty:$vC))]>;
// VA1a_Int_Ty2 - A VAForm_1a intrinsic definition where the type of the
@@ -172,7 +221,7 @@ class VA1a_Int_Ty<bits<6> xo, string opc, Intrinsic IntID, ValueType Ty>
class VA1a_Int_Ty2<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
ValueType InTy>
: VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
- !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP,
+ !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
[(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB, InTy:$vC))]>;
// VA1a_Int_Ty3 - A VAForm_1a intrinsic definition where there are two
@@ -180,14 +229,14 @@ class VA1a_Int_Ty2<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
class VA1a_Int_Ty3<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
ValueType In1Ty, ValueType In2Ty>
: VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
- !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP,
+ !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
[(set OutTy:$vD,
(IntID In1Ty:$vA, In1Ty:$vB, In2Ty:$vC))]>;
// VX1_Int_Ty - A VXForm_1 intrinsic definition of specific type.
class VX1_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
: VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- !strconcat(opc, " $vD, $vA, $vB"), VecFP,
+ !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
[(set Ty:$vD, (IntID Ty:$vA, Ty:$vB))]>;
// VX1_Int_Ty2 - A VXForm_1 intrinsic definition where the type of the
@@ -195,7 +244,7 @@ class VX1_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
class VX1_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
ValueType InTy>
: VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- !strconcat(opc, " $vD, $vA, $vB"), VecFP,
+ !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
[(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB))]>;
// VX1_Int_Ty3 - A VXForm_1 intrinsic definition where there are two
@@ -203,13 +252,13 @@ class VX1_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
class VX1_Int_Ty3<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
ValueType In1Ty, ValueType In2Ty>
: VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- !strconcat(opc, " $vD, $vA, $vB"), VecFP,
+ !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
[(set OutTy:$vD, (IntID In1Ty:$vA, In2Ty:$vB))]>;
// VX2_Int_SP - A VXForm_2 intrinsic definition of vector single-precision type.
class VX2_Int_SP<bits<11> xo, string opc, Intrinsic IntID>
: VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB),
- !strconcat(opc, " $vD, $vB"), VecFP,
+ !strconcat(opc, " $vD, $vB"), IIC_VecFP,
[(set v4f32:$vD, (IntID v4f32:$vB))]>;
// VX2_Int_Ty2 - A VXForm_2 intrinsic definition where the type of the
@@ -217,128 +266,130 @@ class VX2_Int_SP<bits<11> xo, string opc, Intrinsic IntID>
class VX2_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
ValueType InTy>
: VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB),
- !strconcat(opc, " $vD, $vB"), VecFP,
+ !strconcat(opc, " $vD, $vB"), IIC_VecFP,
[(set OutTy:$vD, (IntID InTy:$vB))]>;
//===----------------------------------------------------------------------===//
// Instruction Definitions.
-def HasAltivec : Predicate<"PPCSubTarget.hasAltivec()">;
+def HasAltivec : Predicate<"PPCSubTarget->hasAltivec()">;
let Predicates = [HasAltivec] in {
let isCodeGenOnly = 1 in {
def DSS : DSS_Form<822, (outs),
(ins u5imm:$ZERO0, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2),
- "dss $STRM", LdStLoad /*FIXME*/, []>,
+ "dss $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSSALL : DSS_Form<822, (outs),
(ins u5imm:$ONE, u5imm:$ZERO0,u5imm:$ZERO1,u5imm:$ZERO2),
- "dssall", LdStLoad /*FIXME*/, []>,
+ "dssall", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DST : DSS_Form<342, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSTT : DSS_Form<342, (outs),
(ins u5imm:$ONE, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dstt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSTST : DSS_Form<374, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dstst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSTSTT : DSS_Form<374, (outs),
(ins u5imm:$ONE, u5imm:$STRM, gprc:$rA, gprc:$rB),
- "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dststt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DST64 : DSS_Form<342, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSTT64 : DSS_Form<342, (outs),
(ins u5imm:$ONE, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dstt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSTST64 : DSS_Form<374, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dstst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
def DSTSTT64 : DSS_Form<374, (outs),
(ins u5imm:$ONE, u5imm:$STRM, g8rc:$rA, gprc:$rB),
- "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>,
+ "dststt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST>;
}
def MFVSCR : VXForm_4<1540, (outs vrrc:$vD), (ins),
- "mfvscr $vD", LdStStore,
+ "mfvscr $vD", IIC_LdStStore,
[(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>;
def MTVSCR : VXForm_5<1604, (outs), (ins vrrc:$vB),
- "mtvscr $vB", LdStLoad,
+ "mtvscr $vB", IIC_LdStLoad,
[(int_ppc_altivec_mtvscr v4i32:$vB)]>;
let canFoldAsLoad = 1, PPC970_Unit = 2 in { // Loads.
def LVEBX: XForm_1<31, 7, (outs vrrc:$vD), (ins memrr:$src),
- "lvebx $vD, $src", LdStLoad,
+ "lvebx $vD, $src", IIC_LdStLoad,
[(set v16i8:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
def LVEHX: XForm_1<31, 39, (outs vrrc:$vD), (ins memrr:$src),
- "lvehx $vD, $src", LdStLoad,
+ "lvehx $vD, $src", IIC_LdStLoad,
[(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
def LVEWX: XForm_1<31, 71, (outs vrrc:$vD), (ins memrr:$src),
- "lvewx $vD, $src", LdStLoad,
+ "lvewx $vD, $src", IIC_LdStLoad,
[(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
def LVX : XForm_1<31, 103, (outs vrrc:$vD), (ins memrr:$src),
- "lvx $vD, $src", LdStLoad,
+ "lvx $vD, $src", IIC_LdStLoad,
[(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
def LVXL : XForm_1<31, 359, (outs vrrc:$vD), (ins memrr:$src),
- "lvxl $vD, $src", LdStLoad,
+ "lvxl $vD, $src", IIC_LdStLoad,
[(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
}
def LVSL : XForm_1<31, 6, (outs vrrc:$vD), (ins memrr:$src),
- "lvsl $vD, $src", LdStLoad,
+ "lvsl $vD, $src", IIC_LdStLoad,
[(set v16i8:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
PPC970_Unit_LSU;
def LVSR : XForm_1<31, 38, (outs vrrc:$vD), (ins memrr:$src),
- "lvsr $vD, $src", LdStLoad,
+ "lvsr $vD, $src", IIC_LdStLoad,
[(set v16i8:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
PPC970_Unit_LSU;
let PPC970_Unit = 2 in { // Stores.
def STVEBX: XForm_8<31, 135, (outs), (ins vrrc:$rS, memrr:$dst),
- "stvebx $rS, $dst", LdStStore,
+ "stvebx $rS, $dst", IIC_LdStStore,
[(int_ppc_altivec_stvebx v16i8:$rS, xoaddr:$dst)]>;
def STVEHX: XForm_8<31, 167, (outs), (ins vrrc:$rS, memrr:$dst),
- "stvehx $rS, $dst", LdStStore,
+ "stvehx $rS, $dst", IIC_LdStStore,
[(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>;
def STVEWX: XForm_8<31, 199, (outs), (ins vrrc:$rS, memrr:$dst),
- "stvewx $rS, $dst", LdStStore,
+ "stvewx $rS, $dst", IIC_LdStStore,
[(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>;
def STVX : XForm_8<31, 231, (outs), (ins vrrc:$rS, memrr:$dst),
- "stvx $rS, $dst", LdStStore,
+ "stvx $rS, $dst", IIC_LdStStore,
[(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>;
def STVXL : XForm_8<31, 487, (outs), (ins vrrc:$rS, memrr:$dst),
- "stvxl $rS, $dst", LdStStore,
+ "stvxl $rS, $dst", IIC_LdStStore,
[(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>;
}
let PPC970_Unit = 5 in { // VALU Operations.
// VA-Form instructions. 3-input AltiVec ops.
+let isCommutable = 1 in {
def VMADDFP : VAForm_1<46, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB),
- "vmaddfp $vD, $vA, $vC, $vB", VecFP,
+ "vmaddfp $vD, $vA, $vC, $vB", IIC_VecFP,
[(set v4f32:$vD,
(fma v4f32:$vA, v4f32:$vC, v4f32:$vB))]>;
// FIXME: The fma+fneg pattern won't match because fneg is not legal.
def VNMSUBFP: VAForm_1<47, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB),
- "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
+ "vnmsubfp $vD, $vA, $vC, $vB", IIC_VecFP,
[(set v4f32:$vD, (fneg (fma v4f32:$vA, v4f32:$vC,
- (fneg v4f32:$vB))))]>;
+ (fneg v4f32:$vB))))]>;
def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>;
def VMHRADDSHS : VA1a_Int_Ty<33, "vmhraddshs", int_ppc_altivec_vmhraddshs,
v8i16>;
def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>;
+} // isCommutable
def VPERM : VA1a_Int_Ty3<43, "vperm", int_ppc_altivec_vperm,
v4i32, v4i32, v16i8>;
@@ -346,23 +397,24 @@ def VSEL : VA1a_Int_Ty<42, "vsel", int_ppc_altivec_vsel, v4i32>;
// Shuffles.
def VSLDOI : VAForm_2<44, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, u5imm:$SH),
- "vsldoi $vD, $vA, $vB, $SH", VecFP,
+ "vsldoi $vD, $vA, $vB, $SH", IIC_VecFP,
[(set v16i8:$vD,
(vsldoi_shuffle:$SH v16i8:$vA, v16i8:$vB))]>;
// VX-Form instructions. AltiVec arithmetic ops.
+let isCommutable = 1 in {
def VADDFP : VXForm_1<10, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vaddfp $vD, $vA, $vB", VecFP,
+ "vaddfp $vD, $vA, $vB", IIC_VecFP,
[(set v4f32:$vD, (fadd v4f32:$vA, v4f32:$vB))]>;
def VADDUBM : VXForm_1<0, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vaddubm $vD, $vA, $vB", VecGeneral,
+ "vaddubm $vD, $vA, $vB", IIC_VecGeneral,
[(set v16i8:$vD, (add v16i8:$vA, v16i8:$vB))]>;
def VADDUHM : VXForm_1<64, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vadduhm $vD, $vA, $vB", VecGeneral,
+ "vadduhm $vD, $vA, $vB", IIC_VecGeneral,
[(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>;
def VADDUWM : VXForm_1<128, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vadduwm $vD, $vA, $vB", VecGeneral,
+ "vadduwm $vD, $vA, $vB", IIC_VecGeneral,
[(set v4i32:$vD, (add v4i32:$vA, v4i32:$vB))]>;
def VADDCUW : VX1_Int_Ty<384, "vaddcuw", int_ppc_altivec_vaddcuw, v4i32>;
@@ -372,30 +424,31 @@ def VADDSWS : VX1_Int_Ty<896, "vaddsws", int_ppc_altivec_vaddsws, v4i32>;
def VADDUBS : VX1_Int_Ty<512, "vaddubs", int_ppc_altivec_vaddubs, v16i8>;
def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>;
def VADDUWS : VX1_Int_Ty<640, "vadduws", int_ppc_altivec_vadduws, v4i32>;
-
-
+} // isCommutable
+
+let isCommutable = 1 in
def VAND : VXForm_1<1028, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vand $vD, $vA, $vB", VecFP,
+ "vand $vD, $vA, $vB", IIC_VecFP,
[(set v4i32:$vD, (and v4i32:$vA, v4i32:$vB))]>;
def VANDC : VXForm_1<1092, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vandc $vD, $vA, $vB", VecFP,
+ "vandc $vD, $vA, $vB", IIC_VecFP,
[(set v4i32:$vD, (and v4i32:$vA,
(vnot_ppc v4i32:$vB)))]>;
def VCFSX : VXForm_1<842, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vcfsx $vD, $vB, $UIMM", VecFP,
+ "vcfsx $vD, $vB, $UIMM", IIC_VecFP,
[(set v4f32:$vD,
(int_ppc_altivec_vcfsx v4i32:$vB, imm:$UIMM))]>;
def VCFUX : VXForm_1<778, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vcfux $vD, $vB, $UIMM", VecFP,
+ "vcfux $vD, $vB, $UIMM", IIC_VecFP,
[(set v4f32:$vD,
(int_ppc_altivec_vcfux v4i32:$vB, imm:$UIMM))]>;
def VCTSXS : VXForm_1<970, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vctsxs $vD, $vB, $UIMM", VecFP,
+ "vctsxs $vD, $vB, $UIMM", IIC_VecFP,
[(set v4i32:$vD,
(int_ppc_altivec_vctsxs v4f32:$vB, imm:$UIMM))]>;
def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vctuxs $vD, $vB, $UIMM", VecFP,
+ "vctuxs $vD, $vB, $UIMM", IIC_VecFP,
[(set v4i32:$vD,
(int_ppc_altivec_vctuxs v4f32:$vB, imm:$UIMM))]>;
@@ -404,25 +457,26 @@ def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
// to floating-point (sint_to_fp/uint_to_fp) conversions.
let isCodeGenOnly = 1, VA = 0 in {
def VCFSX_0 : VXForm_1<842, (outs vrrc:$vD), (ins vrrc:$vB),
- "vcfsx $vD, $vB, 0", VecFP,
+ "vcfsx $vD, $vB, 0", IIC_VecFP,
[(set v4f32:$vD,
(int_ppc_altivec_vcfsx v4i32:$vB, 0))]>;
def VCTUXS_0 : VXForm_1<906, (outs vrrc:$vD), (ins vrrc:$vB),
- "vctuxs $vD, $vB, 0", VecFP,
+ "vctuxs $vD, $vB, 0", IIC_VecFP,
[(set v4i32:$vD,
(int_ppc_altivec_vctuxs v4f32:$vB, 0))]>;
def VCFUX_0 : VXForm_1<778, (outs vrrc:$vD), (ins vrrc:$vB),
- "vcfux $vD, $vB, 0", VecFP,
+ "vcfux $vD, $vB, 0", IIC_VecFP,
[(set v4f32:$vD,
(int_ppc_altivec_vcfux v4i32:$vB, 0))]>;
def VCTSXS_0 : VXForm_1<970, (outs vrrc:$vD), (ins vrrc:$vB),
- "vctsxs $vD, $vB, 0", VecFP,
+ "vctsxs $vD, $vB, 0", IIC_VecFP,
[(set v4i32:$vD,
(int_ppc_altivec_vctsxs v4f32:$vB, 0))]>;
}
def VEXPTEFP : VX2_Int_SP<394, "vexptefp", int_ppc_altivec_vexptefp>;
def VLOGEFP : VX2_Int_SP<458, "vlogefp", int_ppc_altivec_vlogefp>;
+let isCommutable = 1 in {
def VAVGSB : VX1_Int_Ty<1282, "vavgsb", int_ppc_altivec_vavgsb, v16i8>;
def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>;
def VAVGSW : VX1_Int_Ty<1410, "vavgsw", int_ppc_altivec_vavgsw, v4i32>;
@@ -444,24 +498,25 @@ def VMINSW : VX1_Int_Ty< 898, "vminsw", int_ppc_altivec_vminsw, v4i32>;
def VMINUB : VX1_Int_Ty< 514, "vminub", int_ppc_altivec_vminub, v16i8>;
def VMINUH : VX1_Int_Ty< 578, "vminuh", int_ppc_altivec_vminuh, v8i16>;
def VMINUW : VX1_Int_Ty< 642, "vminuw", int_ppc_altivec_vminuw, v4i32>;
+} // isCommutable
def VMRGHB : VXForm_1< 12, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vmrghb $vD, $vA, $vB", VecFP,
+ "vmrghb $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD, (vmrghb_shuffle v16i8:$vA, v16i8:$vB))]>;
def VMRGHH : VXForm_1< 76, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vmrghh $vD, $vA, $vB", VecFP,
+ "vmrghh $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD, (vmrghh_shuffle v16i8:$vA, v16i8:$vB))]>;
def VMRGHW : VXForm_1<140, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vmrghw $vD, $vA, $vB", VecFP,
+ "vmrghw $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD, (vmrghw_shuffle v16i8:$vA, v16i8:$vB))]>;
def VMRGLB : VXForm_1<268, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vmrglb $vD, $vA, $vB", VecFP,
+ "vmrglb $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD, (vmrglb_shuffle v16i8:$vA, v16i8:$vB))]>;
def VMRGLH : VXForm_1<332, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vmrglh $vD, $vA, $vB", VecFP,
+ "vmrglh $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD, (vmrglh_shuffle v16i8:$vA, v16i8:$vB))]>;
def VMRGLW : VXForm_1<396, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vmrglw $vD, $vA, $vB", VecFP,
+ "vmrglw $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD, (vmrglw_shuffle v16i8:$vA, v16i8:$vB))]>;
def VMSUMMBM : VA1a_Int_Ty3<37, "vmsummbm", int_ppc_altivec_vmsummbm,
@@ -477,6 +532,7 @@ def VMSUMUHM : VA1a_Int_Ty3<38, "vmsumuhm", int_ppc_altivec_vmsumuhm,
def VMSUMUHS : VA1a_Int_Ty3<39, "vmsumuhs", int_ppc_altivec_vmsumuhs,
v4i32, v8i16, v4i32>;
+let isCommutable = 1 in {
def VMULESB : VX1_Int_Ty2<776, "vmulesb", int_ppc_altivec_vmulesb,
v8i16, v16i8>;
def VMULESH : VX1_Int_Ty2<840, "vmulesh", int_ppc_altivec_vmulesh,
@@ -493,6 +549,7 @@ def VMULOUB : VX1_Int_Ty2< 8, "vmuloub", int_ppc_altivec_vmuloub,
v8i16, v16i8>;
def VMULOUH : VX1_Int_Ty2< 72, "vmulouh", int_ppc_altivec_vmulouh,
v4i32, v8i16>;
+} // isCommutable
def VREFP : VX2_Int_SP<266, "vrefp", int_ppc_altivec_vrefp>;
def VRFIM : VX2_Int_SP<714, "vrfim", int_ppc_altivec_vrfim>;
@@ -504,16 +561,16 @@ def VRSQRTEFP : VX2_Int_SP<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>;
def VSUBCUW : VX1_Int_Ty<1408, "vsubcuw", int_ppc_altivec_vsubcuw, v4i32>;
def VSUBFP : VXForm_1<74, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vsubfp $vD, $vA, $vB", VecGeneral,
+ "vsubfp $vD, $vA, $vB", IIC_VecGeneral,
[(set v4f32:$vD, (fsub v4f32:$vA, v4f32:$vB))]>;
def VSUBUBM : VXForm_1<1024, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vsububm $vD, $vA, $vB", VecGeneral,
+ "vsububm $vD, $vA, $vB", IIC_VecGeneral,
[(set v16i8:$vD, (sub v16i8:$vA, v16i8:$vB))]>;
def VSUBUHM : VXForm_1<1088, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vsubuhm $vD, $vA, $vB", VecGeneral,
+ "vsubuhm $vD, $vA, $vB", IIC_VecGeneral,
[(set v8i16:$vD, (sub v8i16:$vA, v8i16:$vB))]>;
def VSUBUWM : VXForm_1<1152, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vsubuwm $vD, $vA, $vB", VecGeneral,
+ "vsubuwm $vD, $vA, $vB", IIC_VecGeneral,
[(set v4i32:$vD, (sub v4i32:$vA, v4i32:$vB))]>;
def VSUBSBS : VX1_Int_Ty<1792, "vsubsbs" , int_ppc_altivec_vsubsbs, v16i8>;
@@ -534,15 +591,17 @@ def VSUM4UBS: VX1_Int_Ty3<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs,
v4i32, v16i8, v4i32>;
def VNOR : VXForm_1<1284, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vnor $vD, $vA, $vB", VecFP,
+ "vnor $vD, $vA, $vB", IIC_VecFP,
[(set v4i32:$vD, (vnot_ppc (or v4i32:$vA,
v4i32:$vB)))]>;
+let isCommutable = 1 in {
def VOR : VXForm_1<1156, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vor $vD, $vA, $vB", VecFP,
+ "vor $vD, $vA, $vB", IIC_VecFP,
[(set v4i32:$vD, (or v4i32:$vA, v4i32:$vB))]>;
def VXOR : VXForm_1<1220, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vxor $vD, $vA, $vB", VecFP,
+ "vxor $vD, $vA, $vB", IIC_VecFP,
[(set v4i32:$vD, (xor v4i32:$vA, v4i32:$vB))]>;
+} // isCommutable
def VRLB : VX1_Int_Ty< 4, "vrlb", int_ppc_altivec_vrlb, v16i8>;
def VRLH : VX1_Int_Ty< 68, "vrlh", int_ppc_altivec_vrlh, v8i16>;
@@ -556,15 +615,15 @@ def VSLH : VX1_Int_Ty< 324, "vslh", int_ppc_altivec_vslh, v8i16>;
def VSLW : VX1_Int_Ty< 388, "vslw", int_ppc_altivec_vslw, v4i32>;
def VSPLTB : VXForm_1<524, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vspltb $vD, $vB, $UIMM", VecPerm,
+ "vspltb $vD, $vB, $UIMM", IIC_VecPerm,
[(set v16i8:$vD,
(vspltb_shuffle:$UIMM v16i8:$vB, (undef)))]>;
def VSPLTH : VXForm_1<588, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vsplth $vD, $vB, $UIMM", VecPerm,
+ "vsplth $vD, $vB, $UIMM", IIC_VecPerm,
[(set v16i8:$vD,
(vsplth_shuffle:$UIMM v16i8:$vB, (undef)))]>;
def VSPLTW : VXForm_1<652, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
- "vspltw $vD, $vB, $UIMM", VecPerm,
+ "vspltw $vD, $vB, $UIMM", IIC_VecPerm,
[(set v16i8:$vD,
(vspltw_shuffle:$UIMM v16i8:$vB, (undef)))]>;
@@ -580,13 +639,13 @@ def VSRW : VX1_Int_Ty< 644, "vsrw" , int_ppc_altivec_vsrw , v4i32>;
def VSPLTISB : VXForm_3<780, (outs vrrc:$vD), (ins s5imm:$SIMM),
- "vspltisb $vD, $SIMM", VecPerm,
+ "vspltisb $vD, $SIMM", IIC_VecPerm,
[(set v16i8:$vD, (v16i8 vecspltisb:$SIMM))]>;
def VSPLTISH : VXForm_3<844, (outs vrrc:$vD), (ins s5imm:$SIMM),
- "vspltish $vD, $SIMM", VecPerm,
+ "vspltish $vD, $SIMM", IIC_VecPerm,
[(set v8i16:$vD, (v8i16 vecspltish:$SIMM))]>;
def VSPLTISW : VXForm_3<908, (outs vrrc:$vD), (ins s5imm:$SIMM),
- "vspltisw $vD, $SIMM", VecPerm,
+ "vspltisw $vD, $SIMM", IIC_VecPerm,
[(set v4i32:$vD, (v4i32 vecspltisw:$SIMM))]>;
// Vector Pack.
@@ -601,13 +660,13 @@ def VPKSWSS : VX1_Int_Ty2<462, "vpkswss", int_ppc_altivec_vpkswss,
def VPKSWUS : VX1_Int_Ty2<334, "vpkswus", int_ppc_altivec_vpkswus,
v8i16, v4i32>;
def VPKUHUM : VXForm_1<14, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vpkuhum $vD, $vA, $vB", VecFP,
+ "vpkuhum $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD,
(vpkuhum_shuffle v16i8:$vA, v16i8:$vB))]>;
def VPKUHUS : VX1_Int_Ty2<142, "vpkuhus", int_ppc_altivec_vpkuhus,
v16i8, v8i16>;
def VPKUWUM : VXForm_1<78, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
- "vpkuwum $vD, $vA, $vB", VecFP,
+ "vpkuwum $vD, $vA, $vB", IIC_VecFP,
[(set v16i8:$vD,
(vpkuwum_shuffle v16i8:$vA, v16i8:$vB))]>;
def VPKUWUS : VX1_Int_Ty2<206, "vpkuwus", int_ppc_altivec_vpkuwus,
@@ -631,10 +690,12 @@ def VUPKLSH : VX2_Int_Ty2<718, "vupklsh", int_ppc_altivec_vupklsh,
// Altivec Comparisons.
class VCMP<bits<10> xo, string asmstr, ValueType Ty>
- : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),asmstr,VecFPCompare,
+ : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
+ IIC_VecFPCompare,
[(set Ty:$vD, (Ty (PPCvcmp Ty:$vA, Ty:$vB, xo)))]>;
class VCMPo<bits<10> xo, string asmstr, ValueType Ty>
- : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),asmstr,VecFPCompare,
+ : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
+ IIC_VecFPCompare,
[(set Ty:$vD, (Ty (PPCvcmp_o Ty:$vA, Ty:$vB, xo)))]> {
let Defs = [CR6];
let RC = 1;
@@ -676,24 +737,24 @@ def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
let isCodeGenOnly = 1 in {
def V_SET0B : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
- "vxor $vD, $vD, $vD", VecFP,
+ "vxor $vD, $vD, $vD", IIC_VecFP,
[(set v16i8:$vD, (v16i8 immAllZerosV))]>;
def V_SET0H : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
- "vxor $vD, $vD, $vD", VecFP,
+ "vxor $vD, $vD, $vD", IIC_VecFP,
[(set v8i16:$vD, (v8i16 immAllZerosV))]>;
def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
- "vxor $vD, $vD, $vD", VecFP,
+ "vxor $vD, $vD, $vD", IIC_VecFP,
[(set v4i32:$vD, (v4i32 immAllZerosV))]>;
let IMM=-1 in {
def V_SETALLONESB : VXForm_3<908, (outs vrrc:$vD), (ins),
- "vspltisw $vD, -1", VecFP,
+ "vspltisw $vD, -1", IIC_VecFP,
[(set v16i8:$vD, (v16i8 immAllOnesV))]>;
def V_SETALLONESH : VXForm_3<908, (outs vrrc:$vD), (ins),
- "vspltisw $vD, -1", VecFP,
+ "vspltisw $vD, -1", IIC_VecFP,
[(set v8i16:$vD, (v8i16 immAllOnesV))]>;
def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
- "vspltisw $vD, -1", VecFP,
+ "vspltisw $vD, -1", IIC_VecFP,
[(set v4i32:$vD, (v4i32 immAllOnesV))]>;
}
}
@@ -761,6 +822,16 @@ def:Pat<(vpkuwum_unary_shuffle v16i8:$vA, undef),
def:Pat<(vpkuhum_unary_shuffle v16i8:$vA, undef),
(VPKUHUM $vA, $vA)>;
+// Match vsldoi(y,x), vpkuwum(y,x), vpkuhum(y,x), i.e., swapped operands.
+// These fragments are matched for little-endian, where the inputs must
+// be swapped for correct semantics.
+def:Pat<(vsldoi_swapped_shuffle:$in v16i8:$vA, v16i8:$vB),
+ (VSLDOI $vB, $vA, (VSLDOI_swapped_get_imm $in))>;
+def:Pat<(vpkuwum_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VPKUWUM $vB, $vA)>;
+def:Pat<(vpkuhum_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VPKUHUM $vB, $vA)>;
+
// Match vmrg*(x,x)
def:Pat<(vmrglb_unary_shuffle v16i8:$vA, undef),
(VMRGLB $vA, $vA)>;
@@ -775,6 +846,22 @@ def:Pat<(vmrghh_unary_shuffle v16i8:$vA, undef),
def:Pat<(vmrghw_unary_shuffle v16i8:$vA, undef),
(VMRGHW $vA, $vA)>;
+// Match vmrg*(y,x), i.e., swapped operands. These fragments
+// are matched for little-endian, where the inputs must be
+// swapped for correct semantics.
+def:Pat<(vmrglb_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VMRGLB $vB, $vA)>;
+def:Pat<(vmrglh_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VMRGLH $vB, $vA)>;
+def:Pat<(vmrglw_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VMRGLW $vB, $vA)>;
+def:Pat<(vmrghb_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VMRGHB $vB, $vA)>;
+def:Pat<(vmrghh_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VMRGHH $vB, $vA)>;
+def:Pat<(vmrghw_swapped_shuffle v16i8:$vA, v16i8:$vB),
+ (VMRGHW $vB, $vA)>;
+
// Logical Operations
def : Pat<(vnot_ppc v4i32:$vA), (VNOR $vA, $vA)>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
index 29233d4..1e4396c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
@@ -14,6 +14,8 @@
class I<bits<6> opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin>
: Instruction {
field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+ let Size = 4;
bit PPC64 = 0; // Default value, override with isPPC64
@@ -67,6 +69,8 @@ class I2<bits<6> opcode1, bits<6> opcode2, dag OOL, dag IOL, string asmstr,
InstrItinClass itin>
: Instruction {
field bits<64> Inst;
+ field bits<64> SoftFail = 0;
+ let Size = 8;
bit PPC64 = 0; // Default value, override with isPPC64
@@ -109,7 +113,7 @@ class IForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr,
// 1.7.2 B-Form
class BForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr>
- : I<opcode, OOL, IOL, asmstr, BrB> {
+ : I<opcode, OOL, IOL, asmstr, IIC_BrB> {
bits<7> BIBO; // 2 bits of BI and 5 bits of BO.
bits<3> CR;
bits<14> BD;
@@ -135,7 +139,7 @@ class BForm_1<bits<6> opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL,
class BForm_2<bits<6> opcode, bits<5> bo, bits<5> bi, bit aa, bit lk,
dag OOL, dag IOL, string asmstr>
- : I<opcode, OOL, IOL, asmstr, BrB> {
+ : I<opcode, OOL, IOL, asmstr, IIC_BrB> {
bits<14> BD;
let Inst{6-10} = bo;
@@ -147,7 +151,7 @@ class BForm_2<bits<6> opcode, bits<5> bo, bits<5> bi, bit aa, bit lk,
class BForm_3<bits<6> opcode, bit aa, bit lk,
dag OOL, dag IOL, string asmstr>
- : I<opcode, OOL, IOL, asmstr, BrB> {
+ : I<opcode, OOL, IOL, asmstr, IIC_BrB> {
bits<5> BO;
bits<5> BI;
bits<14> BD;
@@ -159,6 +163,19 @@ class BForm_3<bits<6> opcode, bit aa, bit lk,
let Inst{31} = lk;
}
+class BForm_4<bits<6> opcode, bits<5> bo, bit aa, bit lk,
+ dag OOL, dag IOL, string asmstr>
+ : I<opcode, OOL, IOL, asmstr, IIC_BrB> {
+ bits<5> BI;
+ bits<14> BD;
+
+ let Inst{6-10} = bo;
+ let Inst{11-15} = BI;
+ let Inst{16-29} = BD;
+ let Inst{30} = aa;
+ let Inst{31} = lk;
+}
+
// 1.7.3 SC-Form
class SCForm<bits<6> opcode, bits<1> xo,
dag OOL, dag IOL, string asmstr, InstrItinClass itin,
@@ -258,6 +275,15 @@ class DForm_4_zero<bits<6> opcode, dag OOL, dag IOL, string asmstr,
let Addr = 0;
}
+class DForm_4_fixedreg_zero<bits<6> opcode, bits<5> R, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin,
+ list<dag> pattern>
+ : DForm_4<opcode, OOL, IOL, asmstr, itin, pattern> {
+ let A = R;
+ let B = R;
+ let C = 0;
+}
+
class IForm_and_DForm_1<bits<6> opcode1, bit aa, bit lk, bits<6> opcode2,
dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -334,20 +360,6 @@ class DSForm_1<bits<6> opcode, bits<2> xo, dag OOL, dag IOL, string asmstr,
let Inst{30-31} = xo;
}
-class DSForm_1a<bits<6> opcode, bits<2> xo, dag OOL, dag IOL, string asmstr,
- InstrItinClass itin, list<dag> pattern>
- : I<opcode, OOL, IOL, asmstr, itin> {
- bits<5> RST;
- bits<14> DS;
- bits<5> RA;
-
- let Pattern = pattern;
-
- let Inst{6-10} = RST;
- let Inst{11-15} = RA;
- let Inst{16-29} = DS;
- let Inst{30-31} = xo;
-}
// 1.7.6 X-Form
class XForm_base_r3xo<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
@@ -567,6 +579,173 @@ class XForm_16b<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let A = 0;
}
+// XX*-Form (VSX)
+class XX1Form<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<5> A;
+ bits<5> B;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = A;
+ let Inst{16-20} = B;
+ let Inst{21-30} = xo;
+ let Inst{31} = XT{5};
+}
+
+class XX2Form<bits<6> opcode, bits<9> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = 0;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+class XX2Form_1<bits<6> opcode, bits<9> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> CR;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-8} = CR;
+ let Inst{9-15} = 0;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = 0;
+}
+
+class XX2Form_2<bits<6> opcode, bits<9> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XB;
+ bits<2> D;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-13} = 0;
+ let Inst{14-15} = D;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+class XX3Form<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XA;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = XA{4-0};
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-28} = xo;
+ let Inst{29} = XA{5};
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+class XX3Form_1<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> CR;
+ bits<6> XA;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-8} = CR;
+ let Inst{9-10} = 0;
+ let Inst{11-15} = XA{4-0};
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-28} = xo;
+ let Inst{29} = XA{5};
+ let Inst{30} = XB{5};
+ let Inst{31} = 0;
+}
+
+class XX3Form_2<bits<6> opcode, bits<5> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XA;
+ bits<6> XB;
+ bits<2> D;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = XA{4-0};
+ let Inst{16-20} = XB{4-0};
+ let Inst{21} = 0;
+ let Inst{22-23} = D;
+ let Inst{24-28} = xo;
+ let Inst{29} = XA{5};
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+class XX3Form_Rc<bits<6> opcode, bits<7> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XA;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ bit RC = 0; // set by isDOT
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = XA{4-0};
+ let Inst{16-20} = XB{4-0};
+ let Inst{21} = RC;
+ let Inst{22-28} = xo;
+ let Inst{29} = XA{5};
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+class XX4Form<bits<6> opcode, bits<2> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XA;
+ bits<6> XB;
+ bits<6> XC;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = XA{4-0};
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-25} = XC{4-0};
+ let Inst{26-27} = xo;
+ let Inst{28} = XC{5};
+ let Inst{29} = XA{5};
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
// DCB_Form - Form X instruction, used for dcb* instructions.
class DCB_Form<bits<10> xo, bits<5> immfield, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -664,6 +843,12 @@ class XLForm_2_br<bits<6> opcode, bits<10> xo, bit lk,
let BH = 0;
}
+class XLForm_2_br2<bits<6> opcode, bits<10> xo, bits<5> bo, bit lk,
+ dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern>
+ : XLForm_2<opcode, xo, lk, OOL, IOL, asmstr, itin, pattern> {
+ let BO = bo;
+ let BH = 0;
+}
class XLForm_2_ext<bits<6> opcode, bits<10> xo, bits<5> bo, bits<5> bi, bit lk,
dag OOL, dag IOL, string asmstr, InstrItinClass itin, list<dag> pattern>
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 80bc27a..9bac91d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -18,26 +18,32 @@
#include "PPCInstrBuilder.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCTargetMachine.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-instr-info"
+
#define GET_INSTRMAP_INFO
#define GET_INSTRINFO_CTOR_DTOR
#include "PPCGenInstrInfo.inc"
-using namespace llvm;
-
static cl::
opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
cl::desc("Disable analysis for CTR loops"));
@@ -45,26 +51,35 @@ opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden,
static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt",
cl::desc("Disable compare instruction optimization"), cl::Hidden);
+static cl::opt<bool> DisableVSXFMAMutate("disable-ppc-vsx-fma-mutation",
+cl::desc("Disable VSX FMA instruction mutation"), cl::Hidden);
+
+static cl::opt<bool> VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy",
+cl::desc("Causes the backend to crash instead of generating a nop VSX copy"),
+cl::Hidden);
+
// Pin the vtable to this file.
void PPCInstrInfo::anchor() {}
-PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
- : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
- TM(tm), RI(*TM.getSubtargetImpl()) {}
+PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI)
+ : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
+ Subtarget(STI), RI(STI) {}
/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
/// this target when scheduling the DAG.
-ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer(
- const TargetMachine *TM,
- const ScheduleDAG *DAG) const {
- unsigned Directive = TM->getSubtarget<PPCSubtarget>().getDarwinDirective();
+ScheduleHazardRecognizer *
+PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+ const ScheduleDAG *DAG) const {
+ unsigned Directive =
+ static_cast<const PPCSubtarget *>(STI)->getDarwinDirective();
if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 ||
Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) {
- const InstrItineraryData *II = TM->getInstrItineraryData();
- return new PPCScoreboardHazardRecognizer(II, DAG);
+ const InstrItineraryData *II =
+ &static_cast<const PPCSubtarget *>(STI)->getInstrItineraryData();
+ return new ScoreboardHazardRecognizer(II, DAG);
}
- return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG);
+ return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
}
/// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
@@ -72,17 +87,72 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer(
ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer(
const InstrItineraryData *II,
const ScheduleDAG *DAG) const {
- unsigned Directive = TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ unsigned Directive =
+ DAG->TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+
+ if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
+ return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
// Most subtargets use a PPC970 recognizer.
if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 &&
Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) {
- assert(TM.getInstrInfo() && "No InstrInfo?");
+ assert(DAG->TII && "No InstrInfo?");
+
+ return new PPCHazardRecognizer970(*DAG);
+ }
+
+ return new ScoreboardHazardRecognizer(II, DAG);
+}
+
+
+int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const {
+ int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
+ UseMI, UseIdx);
+
+ const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
+ unsigned Reg = DefMO.getReg();
+
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ bool IsRegCR;
+ if (TRI->isVirtualRegister(Reg)) {
+ const MachineRegisterInfo *MRI =
+ &DefMI->getParent()->getParent()->getRegInfo();
+ IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
+ MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
+ } else {
+ IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
+ PPC::CRBITRCRegClass.contains(Reg);
+ }
- return new PPCHazardRecognizer970(TM);
+ if (UseMI->isBranch() && IsRegCR) {
+ if (Latency < 0)
+ Latency = getInstrLatency(ItinData, DefMI);
+
+ // On some cores, there is an additional delay between writing to a condition
+ // register, and using it from a branch.
+ unsigned Directive = Subtarget.getDarwinDirective();
+ switch (Directive) {
+ default: break;
+ case PPC::DIR_7400:
+ case PPC::DIR_750:
+ case PPC::DIR_970:
+ case PPC::DIR_E5500:
+ case PPC::DIR_PWR4:
+ case PPC::DIR_PWR5:
+ case PPC::DIR_PWR5X:
+ case PPC::DIR_PWR6:
+ case PPC::DIR_PWR6X:
+ case PPC::DIR_PWR7:
+ case PPC::DIR_PWR8:
+ Latency += 2;
+ break;
+ }
}
- return new PPCScoreboardHazardRecognizer(II, DAG);
+ return Latency;
}
// Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
@@ -110,7 +180,9 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case PPC::LFS:
case PPC::LFD:
case PPC::RESTORE_CR:
+ case PPC::RESTORE_CRBIT:
case PPC::LVX:
+ case PPC::LXVD2X:
case PPC::RESTORE_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
@@ -134,7 +206,9 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case PPC::STFS:
case PPC::STFD:
case PPC::SPILL_CR:
+ case PPC::SPILL_CRBIT:
case PPC::STVX:
+ case PPC::STXVD2X:
case PPC::SPILL_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
@@ -156,12 +230,14 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// Normal instructions can be commuted the obvious way.
if (MI->getOpcode() != PPC::RLWIMI &&
- MI->getOpcode() != PPC::RLWIMIo)
+ MI->getOpcode() != PPC::RLWIMIo &&
+ MI->getOpcode() != PPC::RLWIMI8 &&
+ MI->getOpcode() != PPC::RLWIMI8o)
return TargetInstrInfo::commuteInstruction(MI, NewMI);
// Cannot commute if it has a non-zero rotate count.
if (MI->getOperand(3).getImm() != 0)
- return 0;
+ return nullptr;
// If we have a zero rotate count, we have:
// M = mask(MB,ME)
@@ -174,6 +250,8 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
unsigned Reg0 = MI->getOperand(0).getReg();
unsigned Reg1 = MI->getOperand(1).getReg();
unsigned Reg2 = MI->getOperand(2).getReg();
+ unsigned SubReg1 = MI->getOperand(1).getSubReg();
+ unsigned SubReg2 = MI->getOperand(2).getSubReg();
bool Reg1IsKill = MI->getOperand(1).isKill();
bool Reg2IsKill = MI->getOperand(2).isKill();
bool ChangeReg0 = false;
@@ -183,6 +261,7 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// Must be two address instruction!
assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!");
+ assert(MI->getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
Reg2IsKill = false;
ChangeReg0 = true;
}
@@ -203,10 +282,14 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
.addImm((MB-1) & 31);
}
- if (ChangeReg0)
+ if (ChangeReg0) {
MI->getOperand(0).setReg(Reg2);
+ MI->getOperand(0).setSubReg(SubReg2);
+ }
MI->getOperand(2).setReg(Reg1);
MI->getOperand(1).setReg(Reg2);
+ MI->getOperand(2).setSubReg(SubReg1);
+ MI->getOperand(1).setSubReg(SubReg2);
MI->getOperand(2).setIsKill(Reg1IsKill);
MI->getOperand(1).setIsKill(Reg2IsKill);
@@ -216,13 +299,38 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
return MI;
}
+bool PPCInstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const {
+ // For VSX A-Type FMA instructions, it is the first two operands that can be
+ // commuted, however, because the non-encoded tied input operand is listed
+ // first, the operands to swap are actually the second and third.
+
+ int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
+ if (AltOpc == -1)
+ return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+
+ SrcOpIdx1 = 2;
+ SrcOpIdx2 = 3;
+ return true;
+}
+
void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
+ // This function is used for scheduling, and the nop wanted here is the type
+ // that terminates dispatch groups on the POWER cores.
+ unsigned Directive = Subtarget.getDarwinDirective();
+ unsigned Opcode;
+ switch (Directive) {
+ default: Opcode = PPC::NOP; break;
+ case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break;
+ case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break;
+ case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */
+ }
+
DebugLoc DL;
- BuildMI(MBB, MI, DL, get(PPC::NOP));
+ BuildMI(MBB, MI, DL, get(Opcode));
}
-
// Branch analysis.
// Note: If the condition register is set to CTR or CTR8 then this is a
// BDNZ (imm == 1) or BDZ (imm == 0) branch.
@@ -230,7 +338,7 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
@@ -263,6 +371,22 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
Cond.push_back(LastInst->getOperand(0));
Cond.push_back(LastInst->getOperand(1));
return false;
+ } else if (LastInst->getOpcode() == PPC::BC) {
+ if (!LastInst->getOperand(1).isMBB())
+ return true;
+ // Block ends with fall-through condbranch.
+ TBB = LastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
+ Cond.push_back(LastInst->getOperand(0));
+ return false;
+ } else if (LastInst->getOpcode() == PPC::BCn) {
+ if (!LastInst->getOperand(1).isMBB())
+ return true;
+ // Block ends with fall-through condbranch.
+ TBB = LastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
+ Cond.push_back(LastInst->getOperand(0));
+ return false;
} else if (LastInst->getOpcode() == PPC::BDNZ8 ||
LastInst->getOpcode() == PPC::BDNZ) {
if (!LastInst->getOperand(0).isMBB())
@@ -310,6 +434,26 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
Cond.push_back(SecondLastInst->getOperand(1));
FBB = LastInst->getOperand(0).getMBB();
return false;
+ } else if (SecondLastInst->getOpcode() == PPC::BC &&
+ LastInst->getOpcode() == PPC::B) {
+ if (!SecondLastInst->getOperand(1).isMBB() ||
+ !LastInst->getOperand(0).isMBB())
+ return true;
+ TBB = SecondLastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
+ Cond.push_back(SecondLastInst->getOperand(0));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ } else if (SecondLastInst->getOpcode() == PPC::BCn &&
+ LastInst->getOpcode() == PPC::B) {
+ if (!SecondLastInst->getOperand(1).isMBB() ||
+ !LastInst->getOperand(0).isMBB())
+ return true;
+ TBB = SecondLastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET));
+ Cond.push_back(SecondLastInst->getOperand(0));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
} else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 ||
SecondLastInst->getOpcode() == PPC::BDNZ) &&
LastInst->getOpcode() == PPC::B) {
@@ -367,6 +511,7 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
--I;
}
if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC &&
+ I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
return 0;
@@ -379,6 +524,7 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
if (I == MBB.begin()) return 1;
--I;
if (I->getOpcode() != PPC::BCC &&
+ I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn &&
I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ &&
I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ)
return 1;
@@ -398,19 +544,23 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
assert((Cond.size() == 2 || Cond.size() == 0) &&
"PPC branch conditions have two components!");
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
// One-way branch.
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) // Unconditional branch
BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8)
BuildMI(&MBB, DL, get(Cond[0].getImm() ?
(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
(isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
+ else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
+ BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB);
+ else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
+ BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB);
else // Conditional branch
BuildMI(&MBB, DL, get(PPC::BCC))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
+ .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB);
return 1;
}
@@ -419,9 +569,13 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
BuildMI(&MBB, DL, get(Cond[0].getImm() ?
(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
(isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB);
+ else if (Cond[0].getImm() == PPC::PRED_BIT_SET)
+ BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB);
+ else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET)
+ BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB);
else
BuildMI(&MBB, DL, get(PPC::BCC))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
+ .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB);
BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
return 2;
}
@@ -431,7 +585,7 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
const SmallVectorImpl<MachineOperand> &Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
- if (!TM.getSubtargetImpl()->hasISEL())
+ if (!Subtarget.hasISEL())
return false;
if (Cond.size() != 2)
@@ -475,7 +629,7 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
assert(Cond.size() == 2 &&
"PPC branch conditions have two components!");
- assert(TM.getSubtargetImpl()->hasISEL() &&
+ assert(Subtarget.hasISEL() &&
"Cannot insert select on target without ISEL support");
// Get the register classes.
@@ -506,6 +660,8 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
case PPC::PRED_LE: SubIdx = PPC::sub_gt; SwapOps = true; break;
case PPC::PRED_UN: SubIdx = PPC::sub_un; SwapOps = false; break;
case PPC::PRED_NU: SubIdx = PPC::sub_un; SwapOps = true; break;
+ case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break;
+ case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break;
}
unsigned FirstReg = SwapOps ? FalseReg : TrueReg,
@@ -534,6 +690,47 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
+ // We can end up with self copies and similar things as a result of VSX copy
+ // legalization. Promote them here.
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ if (PPC::F8RCRegClass.contains(DestReg) &&
+ PPC::VSLRCRegClass.contains(SrcReg)) {
+ unsigned SuperReg =
+ TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
+
+ if (VSXSelfCopyCrash && SrcReg == SuperReg)
+ llvm_unreachable("nop VSX copy");
+
+ DestReg = SuperReg;
+ } else if (PPC::VRRCRegClass.contains(DestReg) &&
+ PPC::VSHRCRegClass.contains(SrcReg)) {
+ unsigned SuperReg =
+ TRI->getMatchingSuperReg(DestReg, PPC::sub_128, &PPC::VSRCRegClass);
+
+ if (VSXSelfCopyCrash && SrcReg == SuperReg)
+ llvm_unreachable("nop VSX copy");
+
+ DestReg = SuperReg;
+ } else if (PPC::F8RCRegClass.contains(SrcReg) &&
+ PPC::VSLRCRegClass.contains(DestReg)) {
+ unsigned SuperReg =
+ TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
+
+ if (VSXSelfCopyCrash && DestReg == SuperReg)
+ llvm_unreachable("nop VSX copy");
+
+ SrcReg = SuperReg;
+ } else if (PPC::VRRCRegClass.contains(SrcReg) &&
+ PPC::VSHRCRegClass.contains(DestReg)) {
+ unsigned SuperReg =
+ TRI->getMatchingSuperReg(SrcReg, PPC::sub_128, &PPC::VSRCRegClass);
+
+ if (VSXSelfCopyCrash && DestReg == SuperReg)
+ llvm_unreachable("nop VSX copy");
+
+ SrcReg = SuperReg;
+ }
+
unsigned Opc;
if (PPC::GPRCRegClass.contains(DestReg, SrcReg))
Opc = PPC::OR;
@@ -545,6 +742,18 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opc = PPC::MCRF;
else if (PPC::VRRCRegClass.contains(DestReg, SrcReg))
Opc = PPC::VOR;
+ else if (PPC::VSRCRegClass.contains(DestReg, SrcReg))
+ // There are two different ways this can be done:
+ // 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only
+ // issue in VSU pipeline 0.
+ // 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but
+ // can go to either pipeline.
+ // We'll always use xxlor here, because in practically all cases where
+ // copies are generated, they are close enough to some use that the
+ // lower-latency form is preferable.
+ Opc = PPC::XXLOR;
+ else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::XXLORf;
else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
Opc = PPC::CROR;
else
@@ -599,47 +808,31 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
FrameIdx));
return true;
} else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
- // FIXME: We use CRi here because there is no mtcrf on a bit. Since the
- // backend currently only uses CR1EQ as an individual bit, this should
- // not cause any bug. If we need other uses of CR bits, the following
- // code may be invalid.
- unsigned Reg = 0;
- if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR0GT ||
- SrcReg == PPC::CR0EQ || SrcReg == PPC::CR0UN)
- Reg = PPC::CR0;
- else if (SrcReg == PPC::CR1LT || SrcReg == PPC::CR1GT ||
- SrcReg == PPC::CR1EQ || SrcReg == PPC::CR1UN)
- Reg = PPC::CR1;
- else if (SrcReg == PPC::CR2LT || SrcReg == PPC::CR2GT ||
- SrcReg == PPC::CR2EQ || SrcReg == PPC::CR2UN)
- Reg = PPC::CR2;
- else if (SrcReg == PPC::CR3LT || SrcReg == PPC::CR3GT ||
- SrcReg == PPC::CR3EQ || SrcReg == PPC::CR3UN)
- Reg = PPC::CR3;
- else if (SrcReg == PPC::CR4LT || SrcReg == PPC::CR4GT ||
- SrcReg == PPC::CR4EQ || SrcReg == PPC::CR4UN)
- Reg = PPC::CR4;
- else if (SrcReg == PPC::CR5LT || SrcReg == PPC::CR5GT ||
- SrcReg == PPC::CR5EQ || SrcReg == PPC::CR5UN)
- Reg = PPC::CR5;
- else if (SrcReg == PPC::CR6LT || SrcReg == PPC::CR6GT ||
- SrcReg == PPC::CR6EQ || SrcReg == PPC::CR6UN)
- Reg = PPC::CR6;
- else if (SrcReg == PPC::CR7LT || SrcReg == PPC::CR7GT ||
- SrcReg == PPC::CR7EQ || SrcReg == PPC::CR7UN)
- Reg = PPC::CR7;
-
- return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
- &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS);
-
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CRBIT))
+ .addReg(SrcReg,
+ getKillRegState(isKill)),
+ FrameIdx));
+ return true;
} else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX))
.addReg(SrcReg,
getKillRegState(isKill)),
FrameIdx));
NonRI = true;
+ } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXVD2X))
+ .addReg(SrcReg,
+ getKillRegState(isKill)),
+ FrameIdx));
+ NonRI = true;
+ } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSDX))
+ .addReg(SrcReg,
+ getKillRegState(isKill)),
+ FrameIdx));
+ NonRI = true;
} else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
- assert(TM.getSubtargetImpl()->isDarwin() &&
+ assert(Subtarget.isDarwin() &&
"VRSAVE only needs spill/restore on Darwin");
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE))
.addReg(SrcReg,
@@ -717,42 +910,24 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
FrameIdx));
return true;
} else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
-
- unsigned Reg = 0;
- if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT ||
- DestReg == PPC::CR0EQ || DestReg == PPC::CR0UN)
- Reg = PPC::CR0;
- else if (DestReg == PPC::CR1LT || DestReg == PPC::CR1GT ||
- DestReg == PPC::CR1EQ || DestReg == PPC::CR1UN)
- Reg = PPC::CR1;
- else if (DestReg == PPC::CR2LT || DestReg == PPC::CR2GT ||
- DestReg == PPC::CR2EQ || DestReg == PPC::CR2UN)
- Reg = PPC::CR2;
- else if (DestReg == PPC::CR3LT || DestReg == PPC::CR3GT ||
- DestReg == PPC::CR3EQ || DestReg == PPC::CR3UN)
- Reg = PPC::CR3;
- else if (DestReg == PPC::CR4LT || DestReg == PPC::CR4GT ||
- DestReg == PPC::CR4EQ || DestReg == PPC::CR4UN)
- Reg = PPC::CR4;
- else if (DestReg == PPC::CR5LT || DestReg == PPC::CR5GT ||
- DestReg == PPC::CR5EQ || DestReg == PPC::CR5UN)
- Reg = PPC::CR5;
- else if (DestReg == PPC::CR6LT || DestReg == PPC::CR6GT ||
- DestReg == PPC::CR6EQ || DestReg == PPC::CR6UN)
- Reg = PPC::CR6;
- else if (DestReg == PPC::CR7LT || DestReg == PPC::CR7GT ||
- DestReg == PPC::CR7EQ || DestReg == PPC::CR7UN)
- Reg = PPC::CR7;
-
- return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
- &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS);
-
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
+ get(PPC::RESTORE_CRBIT), DestReg),
+ FrameIdx));
+ return true;
} else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg),
FrameIdx));
NonRI = true;
+ } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXVD2X), DestReg),
+ FrameIdx));
+ NonRI = true;
+ } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSDX), DestReg),
+ FrameIdx));
+ NonRI = true;
} else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) {
- assert(TM.getSubtargetImpl()->isDarwin() &&
+ assert(Subtarget.isDarwin() &&
"VRSAVE only needs spill/restore on Darwin");
NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
get(PPC::RESTORE_VRSAVE),
@@ -866,7 +1041,7 @@ bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned ZeroReg;
if (UseInfo->isLookupPtrRegClass()) {
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
} else {
ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ?
@@ -933,13 +1108,21 @@ bool PPCInstrInfo::PredicateInstruction(
unsigned OpC = MI->getOpcode();
if (OpC == PPC::BLR) {
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
MI->setDesc(get(Pred[0].getImm() ?
(isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) :
(isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
- } else {
+ } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
MI->setDesc(get(PPC::BCLR));
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
+ } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
+ MI->setDesc(get(PPC::BCLRn));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
+ } else {
+ MI->setDesc(get(PPC::BCCLR));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
.addImm(Pred[0].getImm())
.addReg(Pred[1].getReg());
}
@@ -947,10 +1130,26 @@ bool PPCInstrInfo::PredicateInstruction(
return true;
} else if (OpC == PPC::B) {
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
MI->setDesc(get(Pred[0].getImm() ?
(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
(isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
+ } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
+ MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
+ MI->RemoveOperand(0);
+
+ MI->setDesc(get(PPC::BC));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg())
+ .addMBB(MBB);
+ } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
+ MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
+ MI->RemoveOperand(0);
+
+ MI->setDesc(get(PPC::BCn));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg())
+ .addMBB(MBB);
} else {
MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
MI->RemoveOperand(0);
@@ -969,9 +1168,24 @@ bool PPCInstrInfo::PredicateInstruction(
llvm_unreachable("Cannot predicate bctr[l] on the ctr register");
bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8;
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
- MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) :
- (setLR ? PPC::BCCTRL : PPC::BCCTR)));
+ bool isPPC64 = Subtarget.isPPC64();
+
+ if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
+ MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) :
+ (setLR ? PPC::BCCTRL : PPC::BCCTR)));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
+ return true;
+ } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
+ MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n) :
+ (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
+ return true;
+ }
+
+ MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8) :
+ (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
.addImm(Pred[0].getImm())
.addReg(Pred[1].getReg());
@@ -1115,7 +1329,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// for equality checks (as those don't depend on the sign). On PPC64,
// we are restricted to equality for unsigned 64-bit comparisons and for
// signed 32-bit comparisons the applicability is more restricted.
- bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
+ bool isPPC64 = Subtarget.isPPC64();
bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
@@ -1156,8 +1370,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
if (equalityOnly) {
// We need to check the uses of the condition register in order to reject
// non-equality comparisons.
- for (MachineRegisterInfo::use_iterator I = MRI->use_begin(CRReg),
- IE = MRI->use_end(); I != IE; ++I) {
+ for (MachineRegisterInfo::use_instr_iterator I =MRI->use_instr_begin(CRReg),
+ IE = MRI->use_instr_end(); I != IE; ++I) {
MachineInstr *UseMI = &*I;
if (UseMI->getOpcode() == PPC::BCC) {
unsigned Pred = UseMI->getOperand(0).getImm();
@@ -1179,8 +1393,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end();
I != EL; ++I) {
bool FoundUse = false;
- for (MachineRegisterInfo::use_iterator J = MRI->use_begin(CRReg),
- JE = MRI->use_end(); J != JE; ++J)
+ for (MachineRegisterInfo::use_instr_iterator J =MRI->use_instr_begin(CRReg),
+ JE = MRI->use_instr_end(); J != JE; ++J)
if (&*J == &*I) {
FoundUse = true;
break;
@@ -1193,10 +1407,10 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// There are two possible candidates which can be changed to set CR[01].
// One is MI, the other is a SUB instruction.
// For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
- MachineInstr *Sub = NULL;
+ MachineInstr *Sub = nullptr;
if (SrcReg2 != 0)
// MI is not a candidate for CMPrr.
- MI = NULL;
+ MI = nullptr;
// FIXME: Conservatively refuse to convert an instruction which isn't in the
// same BB as the comparison. This is to allow the check below to avoid calls
// (and other explicit clobbers); instead we should really check for these
@@ -1289,15 +1503,16 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
}
if (ShouldSwap)
- for (MachineRegisterInfo::use_iterator I = MRI->use_begin(CRReg),
- IE = MRI->use_end(); I != IE; ++I) {
+ for (MachineRegisterInfo::use_instr_iterator
+ I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end();
+ I != IE; ++I) {
MachineInstr *UseMI = &*I;
if (UseMI->getOpcode() == PPC::BCC) {
PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm();
assert((!equalityOnly ||
Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) &&
"Invalid predicate for equality-only optimization");
- PredsToUpdate.push_back(std::make_pair(&((*I).getOperand(0)),
+ PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)),
PPC::getSwappedPredicate(Pred)));
} else if (UseMI->getOpcode() == PPC::ISEL ||
UseMI->getOpcode() == PPC::ISEL8) {
@@ -1310,7 +1525,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
else if (NewSubReg == PPC::sub_gt)
NewSubReg = PPC::sub_lt;
- SubRegsToUpdate.push_back(std::make_pair(&((*I).getOperand(3)),
+ SubRegsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(3)),
NewSubReg));
} else // We need to abort on a user we don't understand.
return false;
@@ -1322,7 +1537,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
CmpInstr->eraseFromParent();
MachineBasicBlock::iterator MII = MI;
- BuildMI(*MI->getParent(), llvm::next(MII), MI->getDebugLoc(),
+ BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
get(TargetOpcode::COPY), CRReg)
.addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0);
@@ -1367,26 +1582,508 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
/// instruction may be. This returns the maximum number of bytes.
///
unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
- case PPC::INLINEASM: { // Inline Asm: Variable size.
+ unsigned Opcode = MI->getOpcode();
+
+ if (Opcode == PPC::INLINEASM) {
const MachineFunction *MF = MI->getParent()->getParent();
const char *AsmStr = MI->getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
- }
- case PPC::PROLOG_LABEL:
- case PPC::EH_LABEL:
- case PPC::GC_LABEL:
- case PPC::DBG_VALUE:
- return 0;
- case PPC::BL8_NOP:
- case PPC::BLA8_NOP:
- return 8;
- default:
- return 4; // PowerPC instructions are all 4 bytes
+ } else {
+ const MCInstrDesc &Desc = get(Opcode);
+ return Desc.getSize();
}
}
#undef DEBUG_TYPE
+#define DEBUG_TYPE "ppc-vsx-fma-mutate"
+
+namespace {
+ // PPCVSXFMAMutate pass - For copies between VSX registers and non-VSX registers
+ // (Altivec and scalar floating-point registers), we need to transform the
+ // copies into subregister copies with other restrictions.
+ struct PPCVSXFMAMutate : public MachineFunctionPass {
+ static char ID;
+ PPCVSXFMAMutate() : MachineFunctionPass(ID) {
+ initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
+ }
+
+ LiveIntervals *LIS;
+
+ const PPCTargetMachine *TM;
+ const PPCInstrInfo *TII;
+
+protected:
+ bool processBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
+ I != IE; ++I) {
+ MachineInstr *MI = I;
+
+ // The default (A-type) VSX FMA form kills the addend (it is taken from
+ // the target register, which is then updated to reflect the result of
+ // the FMA). If the instruction, however, kills one of the registers
+ // used for the product, then we can use the M-form instruction (which
+ // will take that value from the to-be-defined register).
+
+ int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
+ if (AltOpc == -1)
+ continue;
+
+ // This pass is run after register coalescing, and so we're looking for
+ // a situation like this:
+ // ...
+ // %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+ // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
+ // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // ...
+ // %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
+ // %RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
+ // ...
+ // Where we can eliminate the copy by changing from the A-type to the
+ // M-type instruction. Specifically, for this example, this means:
+ // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
+ // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // is replaced by:
+ // %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
+ // %RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
+ // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+
+ SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
+
+ VNInfo *AddendValNo =
+ LIS->getInterval(MI->getOperand(1).getReg()).Query(FMAIdx).valueIn();
+ MachineInstr *AddendMI = LIS->getInstructionFromIndex(AddendValNo->def);
+
+ // The addend and this instruction must be in the same block.
+
+ if (!AddendMI || AddendMI->getParent() != MI->getParent())
+ continue;
+
+ // The addend must be a full copy within the same register class.
+
+ if (!AddendMI->isFullCopy())
+ continue;
+
+ unsigned AddendSrcReg = AddendMI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(AddendSrcReg)) {
+ if (MRI.getRegClass(AddendMI->getOperand(0).getReg()) !=
+ MRI.getRegClass(AddendSrcReg))
+ continue;
+ } else {
+ // If AddendSrcReg is a physical register, make sure the destination
+ // register class contains it.
+ if (!MRI.getRegClass(AddendMI->getOperand(0).getReg())
+ ->contains(AddendSrcReg))
+ continue;
+ }
+
+ // In theory, there could be other uses of the addend copy before this
+ // fma. We could deal with this, but that would require additional
+ // logic below and I suspect it will not occur in any relevant
+ // situations.
+ bool OtherUsers = false;
+ for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
+ J != JE; --J)
+ if (J->readsVirtualRegister(AddendMI->getOperand(0).getReg())) {
+ OtherUsers = true;
+ break;
+ }
+
+ if (OtherUsers)
+ continue;
+
+ // Find one of the product operands that is killed by this instruction.
+
+ unsigned KilledProdOp = 0, OtherProdOp = 0;
+ if (LIS->getInterval(MI->getOperand(2).getReg())
+ .Query(FMAIdx).isKill()) {
+ KilledProdOp = 2;
+ OtherProdOp = 3;
+ } else if (LIS->getInterval(MI->getOperand(3).getReg())
+ .Query(FMAIdx).isKill()) {
+ KilledProdOp = 3;
+ OtherProdOp = 2;
+ }
+
+ // If there are no killed product operands, then this transformation is
+ // likely not profitable.
+ if (!KilledProdOp)
+ continue;
+
+ // In order to replace the addend here with the source of the copy,
+ // it must still be live here.
+ if (!LIS->getInterval(AddendMI->getOperand(1).getReg()).liveAt(FMAIdx))
+ continue;
+
+ // Transform: (O2 * O3) + O1 -> (O2 * O1) + O3.
+
+ unsigned AddReg = AddendMI->getOperand(1).getReg();
+ unsigned KilledProdReg = MI->getOperand(KilledProdOp).getReg();
+ unsigned OtherProdReg = MI->getOperand(OtherProdOp).getReg();
+
+ unsigned AddSubReg = AddendMI->getOperand(1).getSubReg();
+ unsigned KilledProdSubReg = MI->getOperand(KilledProdOp).getSubReg();
+ unsigned OtherProdSubReg = MI->getOperand(OtherProdOp).getSubReg();
+
+ bool AddRegKill = AddendMI->getOperand(1).isKill();
+ bool KilledProdRegKill = MI->getOperand(KilledProdOp).isKill();
+ bool OtherProdRegKill = MI->getOperand(OtherProdOp).isKill();
+
+ bool AddRegUndef = AddendMI->getOperand(1).isUndef();
+ bool KilledProdRegUndef = MI->getOperand(KilledProdOp).isUndef();
+ bool OtherProdRegUndef = MI->getOperand(OtherProdOp).isUndef();
+
+ unsigned OldFMAReg = MI->getOperand(0).getReg();
+
+ assert(OldFMAReg == AddendMI->getOperand(0).getReg() &&
+ "Addend copy not tied to old FMA output!");
+
+ DEBUG(dbgs() << "VSX FMA Mutation:\n " << *MI;);
+
+ MI->getOperand(0).setReg(KilledProdReg);
+ MI->getOperand(1).setReg(KilledProdReg);
+ MI->getOperand(3).setReg(AddReg);
+ MI->getOperand(2).setReg(OtherProdReg);
+
+ MI->getOperand(0).setSubReg(KilledProdSubReg);
+ MI->getOperand(1).setSubReg(KilledProdSubReg);
+ MI->getOperand(3).setSubReg(AddSubReg);
+ MI->getOperand(2).setSubReg(OtherProdSubReg);
+
+ MI->getOperand(1).setIsKill(KilledProdRegKill);
+ MI->getOperand(3).setIsKill(AddRegKill);
+ MI->getOperand(2).setIsKill(OtherProdRegKill);
+
+ MI->getOperand(1).setIsUndef(KilledProdRegUndef);
+ MI->getOperand(3).setIsUndef(AddRegUndef);
+ MI->getOperand(2).setIsUndef(OtherProdRegUndef);
+
+ MI->setDesc(TII->get(AltOpc));
+
+ DEBUG(dbgs() << " -> " << *MI);
+
+ // The killed product operand was killed here, so we can reuse it now
+ // for the result of the fma.
+
+ LiveInterval &FMAInt = LIS->getInterval(OldFMAReg);
+ VNInfo *FMAValNo = FMAInt.getVNInfoAt(FMAIdx.getRegSlot());
+ for (auto UI = MRI.reg_nodbg_begin(OldFMAReg), UE = MRI.reg_nodbg_end();
+ UI != UE;) {
+ MachineOperand &UseMO = *UI;
+ MachineInstr *UseMI = UseMO.getParent();
+ ++UI;
+
+ // Don't replace the result register of the copy we're about to erase.
+ if (UseMI == AddendMI)
+ continue;
+
+ UseMO.setReg(KilledProdReg);
+ UseMO.setSubReg(KilledProdSubReg);
+ }
+
+ // Extend the live intervals of the killed product operand to hold the
+ // fma result.
+
+ LiveInterval &NewFMAInt = LIS->getInterval(KilledProdReg);
+ for (LiveInterval::iterator AI = FMAInt.begin(), AE = FMAInt.end();
+ AI != AE; ++AI) {
+ // Don't add the segment that corresponds to the original copy.
+ if (AI->valno == AddendValNo)
+ continue;
+
+ VNInfo *NewFMAValNo =
+ NewFMAInt.getNextValue(AI->start,
+ LIS->getVNInfoAllocator());
+
+ NewFMAInt.addSegment(LiveInterval::Segment(AI->start, AI->end,
+ NewFMAValNo));
+ }
+ DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
+
+ FMAInt.removeValNo(FMAValNo);
+ DEBUG(dbgs() << " trimmed: " << FMAInt << '\n');
+
+ // Remove the (now unused) copy.
+
+ DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
+ LIS->RemoveMachineInstrFromMaps(AddendMI);
+ AddendMI->eraseFromParent();
+
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
+ // If we don't have VSX then go ahead and return without doing
+ // anything.
+ if (!TM->getSubtargetImpl()->hasVSX())
+ return false;
+
+ LIS = &getAnalysis<LiveIntervals>();
+
+ TII = TM->getInstrInfo();
+
+ bool Changed = false;
+
+ if (DisableVSXFMAMutate)
+ return Changed;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS_BEGIN(PPCVSXFMAMutate, DEBUG_TYPE,
+ "PowerPC VSX FMA Mutation", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_END(PPCVSXFMAMutate, DEBUG_TYPE,
+ "PowerPC VSX FMA Mutation", false, false)
+
+char &llvm::PPCVSXFMAMutateID = PPCVSXFMAMutate::ID;
+
+char PPCVSXFMAMutate::ID = 0;
+FunctionPass*
+llvm::createPPCVSXFMAMutatePass() { return new PPCVSXFMAMutate(); }
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "ppc-vsx-copy"
+
+namespace llvm {
+ void initializePPCVSXCopyPass(PassRegistry&);
+}
+
+namespace {
+ // PPCVSXCopy pass - For copies between VSX registers and non-VSX registers
+ // (Altivec and scalar floating-point registers), we need to transform the
+ // copies into subregister copies with other restrictions.
+ struct PPCVSXCopy : public MachineFunctionPass {
+ static char ID;
+ PPCVSXCopy() : MachineFunctionPass(ID) {
+ initializePPCVSXCopyPass(*PassRegistry::getPassRegistry());
+ }
+
+ const PPCTargetMachine *TM;
+ const PPCInstrInfo *TII;
+
+ bool IsRegInClass(unsigned Reg, const TargetRegisterClass *RC,
+ MachineRegisterInfo &MRI) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ return RC->hasSubClassEq(MRI.getRegClass(Reg));
+ } else if (RC->contains(Reg)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ bool IsVSReg(unsigned Reg, MachineRegisterInfo &MRI) {
+ return IsRegInClass(Reg, &PPC::VSRCRegClass, MRI);
+ }
+
+ bool IsVRReg(unsigned Reg, MachineRegisterInfo &MRI) {
+ return IsRegInClass(Reg, &PPC::VRRCRegClass, MRI);
+ }
+
+ bool IsF8Reg(unsigned Reg, MachineRegisterInfo &MRI) {
+ return IsRegInClass(Reg, &PPC::F8RCRegClass, MRI);
+ }
+
+protected:
+ bool processBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
+ I != IE; ++I) {
+ MachineInstr *MI = I;
+ if (!MI->isFullCopy())
+ continue;
+
+ MachineOperand &DstMO = MI->getOperand(0);
+ MachineOperand &SrcMO = MI->getOperand(1);
+
+ if ( IsVSReg(DstMO.getReg(), MRI) &&
+ !IsVSReg(SrcMO.getReg(), MRI)) {
+ // This is a copy *to* a VSX register from a non-VSX register.
+ Changed = true;
+
+ const TargetRegisterClass *SrcRC =
+ IsVRReg(SrcMO.getReg(), MRI) ? &PPC::VSHRCRegClass :
+ &PPC::VSLRCRegClass;
+ assert((IsF8Reg(SrcMO.getReg(), MRI) ||
+ IsVRReg(SrcMO.getReg(), MRI)) &&
+ "Unknown source for a VSX copy");
+
+ unsigned NewVReg = MRI.createVirtualRegister(SrcRC);
+ BuildMI(MBB, MI, MI->getDebugLoc(),
+ TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg)
+ .addImm(1) // add 1, not 0, because there is no implicit clearing
+ // of the high bits.
+ .addOperand(SrcMO)
+ .addImm(IsVRReg(SrcMO.getReg(), MRI) ? PPC::sub_128 :
+ PPC::sub_64);
+
+ // The source of the original copy is now the new virtual register.
+ SrcMO.setReg(NewVReg);
+ } else if (!IsVSReg(DstMO.getReg(), MRI) &&
+ IsVSReg(SrcMO.getReg(), MRI)) {
+ // This is a copy *from* a VSX register to a non-VSX register.
+ Changed = true;
+
+ const TargetRegisterClass *DstRC =
+ IsVRReg(DstMO.getReg(), MRI) ? &PPC::VSHRCRegClass :
+ &PPC::VSLRCRegClass;
+ assert((IsF8Reg(DstMO.getReg(), MRI) ||
+ IsVRReg(DstMO.getReg(), MRI)) &&
+ "Unknown destination for a VSX copy");
+
+ // Copy the VSX value into a new VSX register of the correct subclass.
+ unsigned NewVReg = MRI.createVirtualRegister(DstRC);
+ BuildMI(MBB, MI, MI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), NewVReg)
+ .addOperand(SrcMO);
+
+ // Transform the original copy into a subregister extraction copy.
+ SrcMO.setReg(NewVReg);
+ SrcMO.setSubReg(IsVRReg(DstMO.getReg(), MRI) ? PPC::sub_128 :
+ PPC::sub_64);
+ }
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
+ // If we don't have VSX on the subtarget, don't do anything.
+ if (!TM->getSubtargetImpl()->hasVSX())
+ return false;
+ TII = TM->getInstrInfo();
+
+ bool Changed = false;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS(PPCVSXCopy, DEBUG_TYPE,
+ "PowerPC VSX Copy Legalization", false, false)
+
+char PPCVSXCopy::ID = 0;
+FunctionPass*
+llvm::createPPCVSXCopyPass() { return new PPCVSXCopy(); }
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "ppc-vsx-copy-cleanup"
+
+namespace llvm {
+ void initializePPCVSXCopyCleanupPass(PassRegistry&);
+}
+
+namespace {
+ // PPCVSXCopyCleanup pass - We sometimes end up generating self copies of VSX
+ // registers (mostly because the ABI code still places all values into the
+ // "traditional" floating-point and vector registers). Remove them here.
+ struct PPCVSXCopyCleanup : public MachineFunctionPass {
+ static char ID;
+ PPCVSXCopyCleanup() : MachineFunctionPass(ID) {
+ initializePPCVSXCopyCleanupPass(*PassRegistry::getPassRegistry());
+ }
+
+ const PPCTargetMachine *TM;
+ const PPCInstrInfo *TII;
+
+protected:
+ bool processBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+
+ SmallVector<MachineInstr *, 4> ToDelete;
+ for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end();
+ I != IE; ++I) {
+ MachineInstr *MI = I;
+ if (MI->getOpcode() == PPC::XXLOR &&
+ MI->getOperand(0).getReg() == MI->getOperand(1).getReg() &&
+ MI->getOperand(0).getReg() == MI->getOperand(2).getReg())
+ ToDelete.push_back(MI);
+ }
+
+ if (!ToDelete.empty())
+ Changed = true;
+
+ for (unsigned i = 0, ie = ToDelete.size(); i != ie; ++i) {
+ DEBUG(dbgs() << "Removing VSX self-copy: " << *ToDelete[i]);
+ ToDelete[i]->eraseFromParent();
+ }
+
+ return Changed;
+ }
+
+public:
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
+ // If we don't have VSX don't bother doing anything here.
+ if (!TM->getSubtargetImpl()->hasVSX())
+ return false;
+ TII = TM->getInstrInfo();
+
+ bool Changed = false;
+
+ for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
+ MachineBasicBlock &B = *I++;
+ if (processBlock(B))
+ Changed = true;
+ }
+
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+INITIALIZE_PASS(PPCVSXCopyCleanup, DEBUG_TYPE,
+ "PowerPC VSX Copy Cleanup", false, false)
+
+char PPCVSXCopyCleanup::ID = 0;
+FunctionPass*
+llvm::createPPCVSXCopyCleanupPass() { return new PPCVSXCopyCleanup(); }
+
+#undef DEBUG_TYPE
#define DEBUG_TYPE "ppc-early-ret"
STATISTIC(NumBCLR, "Number of early conditional returns");
STATISTIC(NumBLR, "Number of early returns");
@@ -1428,7 +2125,7 @@ protected:
if (J->getOpcode() == PPC::B) {
if (J->getOperand(0).getMBB() == &ReturnMBB) {
// This is an unconditional branch to the return. Replace the
- // branch with a blr.
+ // branch with a blr.
BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BLR));
MachineBasicBlock::iterator K = J--;
K->eraseFromParent();
@@ -1440,7 +2137,7 @@ protected:
if (J->getOperand(2).getMBB() == &ReturnMBB) {
// This is a conditional branch to the return. Replace the branch
// with a bclr.
- BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCLR))
+ BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCCLR))
.addImm(J->getOperand(0).getImm())
.addReg(J->getOperand(1).getReg());
MachineBasicBlock::iterator K = J--;
@@ -1449,6 +2146,20 @@ protected:
++NumBCLR;
continue;
}
+ } else if (J->getOpcode() == PPC::BC || J->getOpcode() == PPC::BCn) {
+ if (J->getOperand(1).getMBB() == &ReturnMBB) {
+ // This is a conditional branch to the return. Replace the branch
+ // with a bclr.
+ BuildMI(**PI, J, J->getDebugLoc(),
+ TII->get(J->getOpcode() == PPC::BC ?
+ PPC::BCLR : PPC::BCLRn))
+ .addReg(J->getOperand(0).getReg());
+ MachineBasicBlock::iterator K = J--;
+ K->eraseFromParent();
+ BlockChanged = true;
+ ++NumBCLR;
+ continue;
+ }
} else if (J->isBranch()) {
if (J->isIndirectBranch()) {
if (ReturnMBB.hasAddressTaken())
@@ -1470,7 +2181,7 @@ protected:
if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB))
OtherReference = true;
- // Predecessors are stored in a vector and can't be removed here.
+ // Predecessors are stored in a vector and can't be removed here.
if (!OtherReference && BlockChanged) {
PredToRemove.push_back(*PI);
}
@@ -1501,7 +2212,7 @@ protected:
}
public:
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
TM = static_cast<const PPCTargetMachine *>(&MF.getTarget());
TII = TM->getInstrInfo();
@@ -1513,7 +2224,7 @@ public:
return Changed;
for (MachineFunction::iterator I = MF.begin(); I != MF.end();) {
- MachineBasicBlock &B = *I++;
+ MachineBasicBlock &B = *I++;
if (processBlock(B))
Changed = true;
}
@@ -1521,7 +2232,7 @@ public:
return Changed;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
@@ -1533,4 +2244,3 @@ INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE,
char PPCEarlyReturn::ID = 0;
FunctionPass*
llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); }
-
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index f140c41..83f14c6 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -65,7 +65,7 @@ enum PPC970_Unit {
class PPCInstrInfo : public PPCGenInstrInfo {
- PPCTargetMachine &TM;
+ PPCSubtarget &Subtarget;
const PPCRegisterInfo RI;
bool StoreRegToStackSlot(MachineFunction &MF,
@@ -80,142 +80,154 @@ class PPCInstrInfo : public PPCGenInstrInfo {
bool &NonRI, bool &SpillsVRS) const;
virtual void anchor();
public:
- explicit PPCInstrInfo(PPCTargetMachine &TM);
+ explicit PPCInstrInfo(PPCSubtarget &STI);
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const PPCRegisterInfo &getRegisterInfo() const { return RI; }
+ const PPCRegisterInfo &getRegisterInfo() const { return RI; }
ScheduleHazardRecognizer *
- CreateTargetHazardRecognizer(const TargetMachine *TM,
- const ScheduleDAG *DAG) const;
+ CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+ const ScheduleDAG *DAG) const override;
ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
- const ScheduleDAG *DAG) const;
+ const ScheduleDAG *DAG) const override;
+
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const override;
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const override {
+ return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx,
+ UseNode, UseIdx);
+ }
bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
- unsigned &SubIdx) const;
+ unsigned &SubIdx) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ int &FrameIndex) const override;
// commuteInstruction - We can commute rlwimi instructions, but only if the
// rotate amt is zero. We also have to munge the immediates a bit.
- virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
+ MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const override;
+
+ bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const override;
- virtual void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
+ void insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
// Branch analysis.
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
// Select analysis.
- virtual bool canInsertSelect(const MachineBasicBlock&,
- const SmallVectorImpl<MachineOperand> &Cond,
- unsigned, unsigned, int&, int&, int&) const;
- virtual void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
- unsigned TrueReg, unsigned FalseReg) const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
- virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
- unsigned Reg, MachineRegisterInfo *MRI) const;
+ bool canInsertSelect(const MachineBasicBlock&,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned, unsigned, int&, int&, int&) const override;
+ void insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const override;
// If conversion by predication (only supported by some branch instructions).
// All of the profitability checks always return true; it is always
// profitable to use the predicated branches.
- virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumCycles, unsigned ExtraPredCycles,
- const BranchProbability &Probability) const {
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCycles, unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const override {
return true;
}
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
- unsigned NumT, unsigned ExtraT,
- MachineBasicBlock &FMBB,
- unsigned NumF, unsigned ExtraF,
- const BranchProbability &Probability) const;
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumT, unsigned ExtraT,
+ MachineBasicBlock &FMBB,
+ unsigned NumF, unsigned ExtraF,
+ const BranchProbability &Probability) const override;
- virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
- unsigned NumCycles,
- const BranchProbability
- &Probability) const {
+ bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCycles,
+ const BranchProbability
+ &Probability) const override {
return true;
}
- virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
- MachineBasicBlock &FMBB) const {
+ bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+ MachineBasicBlock &FMBB) const override {
return false;
}
// Predication support.
- bool isPredicated(const MachineInstr *MI) const;
+ bool isPredicated(const MachineInstr *MI) const override;
- virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
+ bool isUnpredicatedTerminator(const MachineInstr *MI) const override;
- virtual
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const;
+ const SmallVectorImpl<MachineOperand> &Pred) const override;
- virtual
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
+ const SmallVectorImpl<MachineOperand> &Pred2) const override;
- virtual bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
+ bool DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const override;
- virtual bool isPredicable(MachineInstr *MI) const;
+ bool isPredicable(MachineInstr *MI) const override;
// Comparison optimization.
- virtual bool analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const;
+ bool analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const override;
- virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
- unsigned SrcReg, unsigned SrcReg2,
- int Mask, int Value,
- const MachineRegisterInfo *MRI) const;
+ bool optimizeCompareInstr(MachineInstr *CmpInstr,
+ unsigned SrcReg, unsigned SrcReg2,
+ int Mask, int Value,
+ const MachineRegisterInfo *MRI) const override;
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
- virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+ unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
};
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 3a3acdd..636ac5d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -144,9 +144,6 @@ def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>,
[SDNPHasChain, SDNPSideEffect,
SDNPInGlue, SDNPOutGlue]>;
-def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>,
- [SDNPHasChain, SDNPSideEffect,
- SDNPInGlue, SDNPOutGlue]>;
def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCbctrl : SDNode<"PPCISD::BCTRL", SDTNone,
@@ -293,6 +290,12 @@ def imm16ShiftedSExt : PatLeaf<(imm), [{
return N->getZExtValue() == (uint64_t)(int)N->getZExtValue();
}], HI16>;
+def imm64ZExt32 : Operand<i64>, ImmLeaf<i64, [{
+ // imm64ZExt32 predicate - True if the i64 immediate fits in a 32-bit
+ // zero extended field.
+ return isUInt<32>(Imm);
+}]>;
+
// Some r+i load/store instructions (such as LD, STD, LDU, etc.) that require
// restricted memrix (4-aligned) constants are alignment sensitive. If these
// offsets are hidden behind TOC entries than the values of the lower-order
@@ -409,6 +412,14 @@ def crrc : RegisterOperand<CRRC> {
let ParserMatchClass = PPCRegCRRCAsmOperand;
}
+def PPCU2ImmAsmOperand : AsmOperandClass {
+ let Name = "U2Imm"; let PredicateMethod = "isU2Imm";
+ let RenderMethod = "addImmOperands";
+}
+def u2imm : Operand<i32> {
+ let PrintMethod = "printU2ImmOperand";
+ let ParserMatchClass = PPCU2ImmAsmOperand;
+}
def PPCS5ImmAsmOperand : AsmOperandClass {
let Name = "S5Imm"; let PredicateMethod = "isS5Imm";
let RenderMethod = "addImmOperands";
@@ -416,6 +427,7 @@ def PPCS5ImmAsmOperand : AsmOperandClass {
def s5imm : Operand<i32> {
let PrintMethod = "printS5ImmOperand";
let ParserMatchClass = PPCS5ImmAsmOperand;
+ let DecoderMethod = "decodeSImmOperand<5>";
}
def PPCU5ImmAsmOperand : AsmOperandClass {
let Name = "U5Imm"; let PredicateMethod = "isU5Imm";
@@ -424,6 +436,7 @@ def PPCU5ImmAsmOperand : AsmOperandClass {
def u5imm : Operand<i32> {
let PrintMethod = "printU5ImmOperand";
let ParserMatchClass = PPCU5ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<5>";
}
def PPCU6ImmAsmOperand : AsmOperandClass {
let Name = "U6Imm"; let PredicateMethod = "isU6Imm";
@@ -432,6 +445,7 @@ def PPCU6ImmAsmOperand : AsmOperandClass {
def u6imm : Operand<i32> {
let PrintMethod = "printU6ImmOperand";
let ParserMatchClass = PPCU6ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<6>";
}
def PPCS16ImmAsmOperand : AsmOperandClass {
let Name = "S16Imm"; let PredicateMethod = "isS16Imm";
@@ -441,6 +455,7 @@ def s16imm : Operand<i32> {
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS16ImmAsmOperand;
+ let DecoderMethod = "decodeSImmOperand<16>";
}
def PPCU16ImmAsmOperand : AsmOperandClass {
let Name = "U16Imm"; let PredicateMethod = "isU16Imm";
@@ -450,6 +465,7 @@ def u16imm : Operand<i32> {
let PrintMethod = "printU16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCU16ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<16>";
}
def PPCS17ImmAsmOperand : AsmOperandClass {
let Name = "S17Imm"; let PredicateMethod = "isS17Imm";
@@ -462,6 +478,7 @@ def s17imm : Operand<i32> {
let PrintMethod = "printS16ImmOperand";
let EncoderMethod = "getImm16Encoding";
let ParserMatchClass = PPCS17ImmAsmOperand;
+ let DecoderMethod = "decodeSImmOperand<16>";
}
def PPCDirectBrAsmOperand : AsmOperandClass {
let Name = "DirectBr"; let PredicateMethod = "isDirectBr";
@@ -507,6 +524,7 @@ def PPCCRBitMaskOperand : AsmOperandClass {
def crbitm: Operand<i8> {
let PrintMethod = "printcrbitm";
let EncoderMethod = "get_crbitm_encoding";
+ let DecoderMethod = "decodeCRBitMOperand";
let ParserMatchClass = PPCCRBitMaskOperand;
}
// Address operands
@@ -544,6 +562,7 @@ def memri : Operand<iPTR> {
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispRI:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getMemRIEncoding";
+ let DecoderMethod = "decodeMemRIOperands";
}
def memrr : Operand<iPTR> {
let PrintMethod = "printMemRegReg";
@@ -553,6 +572,7 @@ def memrix : Operand<iPTR> { // memri where the imm is 4-aligned.
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispRIX:$imm, ptr_rc_nor0:$reg);
let EncoderMethod = "getMemRIXEncoding";
+ let DecoderMethod = "decodeMemRIXOperands";
}
// A single-register address. This is used with the SjLj
@@ -596,10 +616,10 @@ def iaddroff : ComplexPattern<iPTR, 1, "SelectAddrImmOffs", [], []>;
//===----------------------------------------------------------------------===//
// PowerPC Instruction Predicate Definitions.
-def In32BitMode : Predicate<"!PPCSubTarget.isPPC64()">;
-def In64BitMode : Predicate<"PPCSubTarget.isPPC64()">;
-def IsBookE : Predicate<"PPCSubTarget.isBookE()">;
-def IsNotBookE : Predicate<"!PPCSubTarget.isBookE()">;
+def In32BitMode : Predicate<"!PPCSubTarget->isPPC64()">;
+def In64BitMode : Predicate<"PPCSubTarget->isPPC64()">;
+def IsBookE : Predicate<"PPCSubTarget->isBookE()">;
+def IsNotBookE : Predicate<"!PPCSubTarget->isBookE()">;
//===----------------------------------------------------------------------===//
// PowerPC Multiclass Definitions.
@@ -633,20 +653,6 @@ multiclass XForm_6rc<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
}
}
-multiclass XForm_10r<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
- string asmbase, string asmstr, InstrItinClass itin,
- list<dag> pattern> {
- let BaseName = asmbase in {
- def NAME : XForm_10<opcode, xo, OOL, IOL,
- !strconcat(asmbase, !strconcat(" ", asmstr)), itin,
- pattern>, RecFormRel;
- let Defs = [CR0] in
- def o : XForm_10<opcode, xo, OOL, IOL,
- !strconcat(asmbase, !strconcat(". ", asmstr)), itin,
- []>, isDOT, RecFormRel;
- }
-}
-
multiclass XForm_10rc<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
string asmbase, string asmstr, InstrItinClass itin,
list<dag> pattern> {
@@ -907,30 +913,63 @@ let usesCustomInserter = 1, // Expanded after instruction selection.
def SELECT_CC_VRRC: Pseudo<(outs vrrc:$dst), (ins crrc:$cond, vrrc:$T, vrrc:$F,
i32imm:$BROPC), "#SELECT_CC_VRRC",
[]>;
+
+ // SELECT_* pseudo instructions, like SELECT_CC_* but taking condition
+ // register bit directly.
+ def SELECT_I4 : Pseudo<(outs gprc:$dst), (ins crbitrc:$cond,
+ gprc_nor0:$T, gprc_nor0:$F), "#SELECT_I4",
+ [(set i32:$dst, (select i1:$cond, i32:$T, i32:$F))]>;
+ def SELECT_I8 : Pseudo<(outs g8rc:$dst), (ins crbitrc:$cond,
+ g8rc_nox0:$T, g8rc_nox0:$F), "#SELECT_I8",
+ [(set i64:$dst, (select i1:$cond, i64:$T, i64:$F))]>;
+ def SELECT_F4 : Pseudo<(outs f4rc:$dst), (ins crbitrc:$cond,
+ f4rc:$T, f4rc:$F), "#SELECT_F4",
+ [(set f32:$dst, (select i1:$cond, f32:$T, f32:$F))]>;
+ def SELECT_F8 : Pseudo<(outs f8rc:$dst), (ins crbitrc:$cond,
+ f8rc:$T, f8rc:$F), "#SELECT_F8",
+ [(set f64:$dst, (select i1:$cond, f64:$T, f64:$F))]>;
+ def SELECT_VRRC: Pseudo<(outs vrrc:$dst), (ins crbitrc:$cond,
+ vrrc:$T, vrrc:$F), "#SELECT_VRRC",
+ [(set v4i32:$dst,
+ (select i1:$cond, v4i32:$T, v4i32:$F))]>;
}
// SPILL_CR - Indicate that we're dumping the CR register, so we'll need to
// scavenge a register for it.
-let mayStore = 1 in
+let mayStore = 1 in {
def SPILL_CR : Pseudo<(outs), (ins crrc:$cond, memri:$F),
"#SPILL_CR", []>;
+def SPILL_CRBIT : Pseudo<(outs), (ins crbitrc:$cond, memri:$F),
+ "#SPILL_CRBIT", []>;
+}
// RESTORE_CR - Indicate that we're restoring the CR register (previously
// spilled), so we'll need to scavenge a register for it.
-let mayLoad = 1 in
+let mayLoad = 1 in {
def RESTORE_CR : Pseudo<(outs crrc:$cond), (ins memri:$F),
"#RESTORE_CR", []>;
+def RESTORE_CRBIT : Pseudo<(outs crbitrc:$cond), (ins memri:$F),
+ "#RESTORE_CRBIT", []>;
+}
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isReturn = 1, Uses = [LR, RM] in
- def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", BrB,
+ def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB,
[(retflag)]>;
let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in {
- def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>;
+ def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
+ []>;
- let isCodeGenOnly = 1 in
- def BCCTR : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
- "b${cond:cc}ctr${cond:pm} ${cond:reg}", BrB, []>;
+ let isCodeGenOnly = 1 in {
+ def BCCCTR : XLForm_2_br<19, 528, 0, (outs), (ins pred:$cond),
+ "b${cond:cc}ctr${cond:pm} ${cond:reg}", IIC_BrB,
+ []>;
+
+ def BCCTR : XLForm_2_br2<19, 528, 12, 0, (outs), (ins crbitrc:$bi),
+ "bcctr 12, $bi, 0", IIC_BrB, []>;
+ def BCCTRn : XLForm_2_br2<19, 528, 4, 0, (outs), (ins crbitrc:$bi),
+ "bcctr 4, $bi, 0", IIC_BrB, []>;
+ }
}
}
@@ -941,10 +980,10 @@ let Defs = [LR] in
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
let isBarrier = 1 in {
def B : IForm<18, 0, 0, (outs), (ins directbrtarget:$dst),
- "b $dst", BrB,
+ "b $dst", IIC_BrB,
[(br bb:$dst)]>;
def BA : IForm<18, 1, 0, (outs), (ins absdirectbrtarget:$dst),
- "ba $dst", BrB, []>;
+ "ba $dst", IIC_BrB, []>;
}
// BCC represents an arbitrary conditional branch on a predicate.
@@ -958,23 +997,39 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
"b${cond:cc}a${cond:pm} ${cond:reg}, $dst">;
let isReturn = 1, Uses = [LR, RM] in
- def BCLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$cond),
- "b${cond:cc}lr${cond:pm} ${cond:reg}", BrB, []>;
+ def BCCLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$cond),
+ "b${cond:cc}lr${cond:pm} ${cond:reg}", IIC_BrB, []>;
+ }
+
+ let isCodeGenOnly = 1 in {
+ let Pattern = [(brcond i1:$bi, bb:$dst)] in
+ def BC : BForm_4<16, 12, 0, 0, (outs), (ins crbitrc:$bi, condbrtarget:$dst),
+ "bc 12, $bi, $dst">;
+
+ let Pattern = [(brcond (not i1:$bi), bb:$dst)] in
+ def BCn : BForm_4<16, 4, 0, 0, (outs), (ins crbitrc:$bi, condbrtarget:$dst),
+ "bc 4, $bi, $dst">;
+
+ let isReturn = 1, Uses = [LR, RM] in
+ def BCLR : XLForm_2_br2<19, 16, 12, 0, (outs), (ins crbitrc:$bi),
+ "bclr 12, $bi, 0", IIC_BrB, []>;
+ def BCLRn : XLForm_2_br2<19, 16, 4, 0, (outs), (ins crbitrc:$bi),
+ "bclr 4, $bi, 0", IIC_BrB, []>;
}
let isReturn = 1, Defs = [CTR], Uses = [CTR, LR, RM] in {
def BDZLR : XLForm_2_ext<19, 16, 18, 0, 0, (outs), (ins),
- "bdzlr", BrB, []>;
+ "bdzlr", IIC_BrB, []>;
def BDNZLR : XLForm_2_ext<19, 16, 16, 0, 0, (outs), (ins),
- "bdnzlr", BrB, []>;
+ "bdnzlr", IIC_BrB, []>;
def BDZLRp : XLForm_2_ext<19, 16, 27, 0, 0, (outs), (ins),
- "bdzlr+", BrB, []>;
+ "bdzlr+", IIC_BrB, []>;
def BDNZLRp: XLForm_2_ext<19, 16, 25, 0, 0, (outs), (ins),
- "bdnzlr+", BrB, []>;
+ "bdnzlr+", IIC_BrB, []>;
def BDZLRm : XLForm_2_ext<19, 16, 26, 0, 0, (outs), (ins),
- "bdzlr-", BrB, []>;
+ "bdzlr-", IIC_BrB, []>;
def BDNZLRm: XLForm_2_ext<19, 16, 24, 0, 0, (outs), (ins),
- "bdnzlr-", BrB, []>;
+ "bdnzlr-", IIC_BrB, []>;
}
let Defs = [CTR], Uses = [CTR] in {
@@ -1017,35 +1072,56 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL : IForm<18, 0, 1, (outs), (ins calltarget:$func),
- "bl $func", BrB, []>; // See Pat patterns below.
+ "bl $func", IIC_BrB, []>; // See Pat patterns below.
def BLA : IForm<18, 1, 1, (outs), (ins abscalltarget:$func),
- "bla $func", BrB, [(PPCcall (i32 imm:$func))]>;
+ "bla $func", IIC_BrB, [(PPCcall (i32 imm:$func))]>;
let isCodeGenOnly = 1 in {
def BL_TLS : IForm<18, 0, 1, (outs), (ins tlscall32:$func),
- "bl $func", BrB, []>;
+ "bl $func", IIC_BrB, []>;
def BCCL : BForm<16, 0, 1, (outs), (ins pred:$cond, condbrtarget:$dst),
"b${cond:cc}l${cond:pm} ${cond:reg}, $dst">;
def BCCLA : BForm<16, 1, 1, (outs), (ins pred:$cond, abscondbrtarget:$dst),
"b${cond:cc}la${cond:pm} ${cond:reg}, $dst">;
+
+ def BCL : BForm_4<16, 12, 0, 1, (outs),
+ (ins crbitrc:$bi, condbrtarget:$dst),
+ "bcl 12, $bi, $dst">;
+ def BCLn : BForm_4<16, 4, 0, 1, (outs),
+ (ins crbitrc:$bi, condbrtarget:$dst),
+ "bcl 4, $bi, $dst">;
}
}
let Uses = [CTR, RM] in {
def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins),
- "bctrl", BrB, [(PPCbctrl)]>,
+ "bctrl", IIC_BrB, [(PPCbctrl)]>,
Requires<[In32BitMode]>;
- let isCodeGenOnly = 1 in
- def BCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
- "b${cond:cc}ctrl${cond:pm} ${cond:reg}", BrB, []>;
+ let isCodeGenOnly = 1 in {
+ def BCCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins pred:$cond),
+ "b${cond:cc}ctrl${cond:pm} ${cond:reg}", IIC_BrB,
+ []>;
+
+ def BCCTRL : XLForm_2_br2<19, 528, 12, 1, (outs), (ins crbitrc:$bi),
+ "bcctrl 12, $bi, 0", IIC_BrB, []>;
+ def BCCTRLn : XLForm_2_br2<19, 528, 4, 1, (outs), (ins crbitrc:$bi),
+ "bcctrl 4, $bi, 0", IIC_BrB, []>;
+ }
}
let Uses = [LR, RM] in {
def BLRL : XLForm_2_ext<19, 16, 20, 0, 1, (outs), (ins),
- "blrl", BrB, []>;
+ "blrl", IIC_BrB, []>;
+
+ let isCodeGenOnly = 1 in {
+ def BCCLRL : XLForm_2_br<19, 16, 1, (outs), (ins pred:$cond),
+ "b${cond:cc}lrl${cond:pm} ${cond:reg}", IIC_BrB,
+ []>;
- let isCodeGenOnly = 1 in
- def BCLRL : XLForm_2_br<19, 16, 1, (outs), (ins pred:$cond),
- "b${cond:cc}lrl${cond:pm} ${cond:reg}", BrB, []>;
+ def BCLRL : XLForm_2_br2<19, 16, 12, 1, (outs), (ins crbitrc:$bi),
+ "bclrl 12, $bi, 0", IIC_BrB, []>;
+ def BCLRLn : XLForm_2_br2<19, 16, 4, 1, (outs), (ins crbitrc:$bi),
+ "bclrl 4, $bi, 0", IIC_BrB, []>;
+ }
}
let Defs = [CTR], Uses = [CTR, RM] in {
def BDZL : BForm_1<16, 18, 0, 1, (outs), (ins condbrtarget:$dst),
@@ -1075,17 +1151,17 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
}
let Defs = [CTR], Uses = [CTR, LR, RM] in {
def BDZLRL : XLForm_2_ext<19, 16, 18, 0, 1, (outs), (ins),
- "bdzlrl", BrB, []>;
+ "bdzlrl", IIC_BrB, []>;
def BDNZLRL : XLForm_2_ext<19, 16, 16, 0, 1, (outs), (ins),
- "bdnzlrl", BrB, []>;
+ "bdnzlrl", IIC_BrB, []>;
def BDZLRLp : XLForm_2_ext<19, 16, 27, 0, 1, (outs), (ins),
- "bdzlrl+", BrB, []>;
+ "bdzlrl+", IIC_BrB, []>;
def BDNZLRLp: XLForm_2_ext<19, 16, 25, 0, 1, (outs), (ins),
- "bdnzlrl+", BrB, []>;
+ "bdnzlrl+", IIC_BrB, []>;
def BDZLRLm : XLForm_2_ext<19, 16, 26, 0, 1, (outs), (ins),
- "bdzlrl-", BrB, []>;
+ "bdzlrl-", IIC_BrB, []>;
def BDNZLRLm: XLForm_2_ext<19, 16, 24, 0, 1, (outs), (ins),
- "bdnzlrl-", BrB, []>;
+ "bdnzlrl-", IIC_BrB, []>;
}
}
@@ -1111,19 +1187,19 @@ let isCodeGenOnly = 1 in {
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR, RM] in
-def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>,
- Requires<[In32BitMode]>;
+def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB,
+ []>, Requires<[In32BitMode]>;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILB : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
- "b $dst", BrB,
+ "b $dst", IIC_BrB,
[]>;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in
def TAILBA : IForm<18, 0, 0, (outs), (ins abscalltarget:$dst),
- "ba $dst", BrB,
+ "ba $dst", IIC_BrB,
[]>;
}
@@ -1149,33 +1225,33 @@ let isBranch = 1, isTerminator = 1 in {
// System call.
let PPC970_Unit = 7 in {
def SC : SCForm<17, 1, (outs), (ins i32imm:$lev),
- "sc $lev", BrB, [(PPCsc (i32 imm:$lev))]>;
+ "sc $lev", IIC_BrB, [(PPCsc (i32 imm:$lev))]>;
}
// DCB* instructions.
-def DCBA : DCB_Form<758, 0, (outs), (ins memrr:$dst),
- "dcba $dst", LdStDCBF, [(int_ppc_dcba xoaddr:$dst)]>,
+def DCBA : DCB_Form<758, 0, (outs), (ins memrr:$dst), "dcba $dst",
+ IIC_LdStDCBF, [(int_ppc_dcba xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBF : DCB_Form<86, 0, (outs), (ins memrr:$dst),
- "dcbf $dst", LdStDCBF, [(int_ppc_dcbf xoaddr:$dst)]>,
+def DCBF : DCB_Form<86, 0, (outs), (ins memrr:$dst), "dcbf $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbf xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBI : DCB_Form<470, 0, (outs), (ins memrr:$dst),
- "dcbi $dst", LdStDCBF, [(int_ppc_dcbi xoaddr:$dst)]>,
+def DCBI : DCB_Form<470, 0, (outs), (ins memrr:$dst), "dcbi $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbi xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBST : DCB_Form<54, 0, (outs), (ins memrr:$dst),
- "dcbst $dst", LdStDCBF, [(int_ppc_dcbst xoaddr:$dst)]>,
+def DCBST : DCB_Form<54, 0, (outs), (ins memrr:$dst), "dcbst $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbst xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBT : DCB_Form<278, 0, (outs), (ins memrr:$dst),
- "dcbt $dst", LdStDCBF, [(int_ppc_dcbt xoaddr:$dst)]>,
+def DCBT : DCB_Form<278, 0, (outs), (ins memrr:$dst), "dcbt $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbt xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBTST : DCB_Form<246, 0, (outs), (ins memrr:$dst),
- "dcbtst $dst", LdStDCBF, [(int_ppc_dcbtst xoaddr:$dst)]>,
+def DCBTST : DCB_Form<246, 0, (outs), (ins memrr:$dst), "dcbtst $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbtst xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBZ : DCB_Form<1014, 0, (outs), (ins memrr:$dst),
- "dcbz $dst", LdStDCBF, [(int_ppc_dcbz xoaddr:$dst)]>,
+def DCBZ : DCB_Form<1014, 0, (outs), (ins memrr:$dst), "dcbz $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbz xoaddr:$dst)]>,
PPC970_DGroup_Single;
-def DCBZL : DCB_Form<1014, 1, (outs), (ins memrr:$dst),
- "dcbzl $dst", LdStDCBF, [(int_ppc_dcbzl xoaddr:$dst)]>,
+def DCBZL : DCB_Form<1014, 1, (outs), (ins memrr:$dst), "dcbzl $dst",
+ IIC_LdStDCBF, [(int_ppc_dcbzl xoaddr:$dst)]>,
PPC970_DGroup_Single;
def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)),
@@ -1263,26 +1339,26 @@ let usesCustomInserter = 1 in {
// Instructions to support atomic operations
def LWARX : XForm_1<31, 20, (outs gprc:$rD), (ins memrr:$src),
- "lwarx $rD, $src", LdStLWARX,
+ "lwarx $rD, $src", IIC_LdStLWARX,
[(set i32:$rD, (PPClarx xoaddr:$src))]>;
let Defs = [CR0] in
def STWCX : XForm_1<31, 150, (outs), (ins gprc:$rS, memrr:$dst),
- "stwcx. $rS, $dst", LdStSTWCX,
+ "stwcx. $rS, $dst", IIC_LdStSTWCX,
[(PPCstcx i32:$rS, xoaddr:$dst)]>,
isDOT;
let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
-def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStLoad, [(trap)]>;
+def TRAP : XForm_24<31, 4, (outs), (ins), "trap", IIC_LdStLoad, [(trap)]>;
def TWI : DForm_base<3, (outs), (ins u5imm:$to, gprc:$rA, s16imm:$imm),
- "twi $to, $rA, $imm", IntTrapW, []>;
+ "twi $to, $rA, $imm", IIC_IntTrapW, []>;
def TW : XForm_1<31, 4, (outs), (ins u5imm:$to, gprc:$rA, gprc:$rB),
- "tw $to, $rA, $rB", IntTrapW, []>;
+ "tw $to, $rA, $rB", IIC_IntTrapW, []>;
def TDI : DForm_base<2, (outs), (ins u5imm:$to, g8rc:$rA, s16imm:$imm),
- "tdi $to, $rA, $imm", IntTrapD, []>;
+ "tdi $to, $rA, $imm", IIC_IntTrapD, []>;
def TD : XForm_1<31, 68, (outs), (ins u5imm:$to, g8rc:$rA, g8rc:$rB),
- "td $to, $rA, $rB", IntTrapD, []>;
+ "td $to, $rA, $rB", IIC_IntTrapD, []>;
//===----------------------------------------------------------------------===//
// PPC32 Load Instructions.
@@ -1291,56 +1367,56 @@ def TD : XForm_1<31, 68, (outs), (ins u5imm:$to, g8rc:$rA, g8rc:$rB),
// Unindexed (r+i) Loads.
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZ : DForm_1<34, (outs gprc:$rD), (ins memri:$src),
- "lbz $rD, $src", LdStLoad,
+ "lbz $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi8 iaddr:$src))]>;
def LHA : DForm_1<42, (outs gprc:$rD), (ins memri:$src),
- "lha $rD, $src", LdStLHA,
+ "lha $rD, $src", IIC_LdStLHA,
[(set i32:$rD, (sextloadi16 iaddr:$src))]>,
PPC970_DGroup_Cracked;
def LHZ : DForm_1<40, (outs gprc:$rD), (ins memri:$src),
- "lhz $rD, $src", LdStLoad,
+ "lhz $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi16 iaddr:$src))]>;
def LWZ : DForm_1<32, (outs gprc:$rD), (ins memri:$src),
- "lwz $rD, $src", LdStLoad,
+ "lwz $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (load iaddr:$src))]>;
def LFS : DForm_1<48, (outs f4rc:$rD), (ins memri:$src),
- "lfs $rD, $src", LdStLFD,
+ "lfs $rD, $src", IIC_LdStLFD,
[(set f32:$rD, (load iaddr:$src))]>;
def LFD : DForm_1<50, (outs f8rc:$rD), (ins memri:$src),
- "lfd $rD, $src", LdStLFD,
+ "lfd $rD, $src", IIC_LdStLFD,
[(set f64:$rD, (load iaddr:$src))]>;
// Unindexed (r+i) Loads with Update (preinc).
let mayLoad = 1, neverHasSideEffects = 1 in {
def LBZU : DForm_1<35, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lbzu $rD, $addr", LdStLoadUpd,
+ "lbzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHAU : DForm_1<43, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lhau $rD, $addr", LdStLHAU,
+ "lhau $rD, $addr", IIC_LdStLHAU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU : DForm_1<41, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lhzu $rD, $addr", LdStLoadUpd,
+ "lhzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU : DForm_1<33, (outs gprc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lwzu $rD, $addr", LdStLoadUpd,
+ "lwzu $rD, $addr", IIC_LdStLoadUpd,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LFSU : DForm_1<49, (outs f4rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lfsu $rD, $addr", LdStLFDU,
+ "lfsu $rD, $addr", IIC_LdStLFDU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LFDU : DForm_1<51, (outs f8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr),
- "lfdu $rD, $addr", LdStLFDU,
+ "lfdu $rD, $addr", IIC_LdStLFDU,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
@@ -1348,37 +1424,37 @@ def LFDU : DForm_1<51, (outs f8rc:$rD, ptr_rc_nor0:$ea_result), (ins memri:$addr
// Indexed (r+r) Loads with Update (preinc).
def LBZUX : XForm_1<31, 119, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lbzux $rD, $addr", LdStLoadUpd,
+ "lbzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LHAUX : XForm_1<31, 375, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lhaux $rD, $addr", LdStLHAU,
+ "lhaux $rD, $addr", IIC_LdStLHAUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LHZUX : XForm_1<31, 311, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lhzux $rD, $addr", LdStLoadUpd,
+ "lhzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LWZUX : XForm_1<31, 55, (outs gprc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lwzux $rD, $addr", LdStLoadUpd,
+ "lwzux $rD, $addr", IIC_LdStLoadUpdX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LFSUX : XForm_1<31, 567, (outs f4rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lfsux $rD, $addr", LdStLFDU,
+ "lfsux $rD, $addr", IIC_LdStLFDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
def LFDUX : XForm_1<31, 631, (outs f8rc:$rD, ptr_rc_nor0:$ea_result),
(ins memrr:$addr),
- "lfdux $rD, $addr", LdStLFDU,
+ "lfdux $rD, $addr", IIC_LdStLFDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">;
}
@@ -1388,45 +1464,45 @@ def LFDUX : XForm_1<31, 631, (outs f8rc:$rD, ptr_rc_nor0:$ea_result),
//
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZX : XForm_1<31, 87, (outs gprc:$rD), (ins memrr:$src),
- "lbzx $rD, $src", LdStLoad,
+ "lbzx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi8 xaddr:$src))]>;
def LHAX : XForm_1<31, 343, (outs gprc:$rD), (ins memrr:$src),
- "lhax $rD, $src", LdStLHA,
+ "lhax $rD, $src", IIC_LdStLHA,
[(set i32:$rD, (sextloadi16 xaddr:$src))]>,
PPC970_DGroup_Cracked;
def LHZX : XForm_1<31, 279, (outs gprc:$rD), (ins memrr:$src),
- "lhzx $rD, $src", LdStLoad,
+ "lhzx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (zextloadi16 xaddr:$src))]>;
def LWZX : XForm_1<31, 23, (outs gprc:$rD), (ins memrr:$src),
- "lwzx $rD, $src", LdStLoad,
+ "lwzx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (load xaddr:$src))]>;
def LHBRX : XForm_1<31, 790, (outs gprc:$rD), (ins memrr:$src),
- "lhbrx $rD, $src", LdStLoad,
+ "lhbrx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (PPClbrx xoaddr:$src, i16))]>;
def LWBRX : XForm_1<31, 534, (outs gprc:$rD), (ins memrr:$src),
- "lwbrx $rD, $src", LdStLoad,
+ "lwbrx $rD, $src", IIC_LdStLoad,
[(set i32:$rD, (PPClbrx xoaddr:$src, i32))]>;
def LFSX : XForm_25<31, 535, (outs f4rc:$frD), (ins memrr:$src),
- "lfsx $frD, $src", LdStLFD,
+ "lfsx $frD, $src", IIC_LdStLFD,
[(set f32:$frD, (load xaddr:$src))]>;
def LFDX : XForm_25<31, 599, (outs f8rc:$frD), (ins memrr:$src),
- "lfdx $frD, $src", LdStLFD,
+ "lfdx $frD, $src", IIC_LdStLFD,
[(set f64:$frD, (load xaddr:$src))]>;
def LFIWAX : XForm_25<31, 855, (outs f8rc:$frD), (ins memrr:$src),
- "lfiwax $frD, $src", LdStLFD,
+ "lfiwax $frD, $src", IIC_LdStLFD,
[(set f64:$frD, (PPClfiwax xoaddr:$src))]>;
def LFIWZX : XForm_25<31, 887, (outs f8rc:$frD), (ins memrr:$src),
- "lfiwzx $frD, $src", LdStLFD,
+ "lfiwzx $frD, $src", IIC_LdStLFD,
[(set f64:$frD, (PPClfiwzx xoaddr:$src))]>;
}
// Load Multiple
def LMW : DForm_1<46, (outs gprc:$rD), (ins memri:$src),
- "lmw $rD, $src", LdStLMW, []>;
+ "lmw $rD, $src", IIC_LdStLMW, []>;
//===----------------------------------------------------------------------===//
// PPC32 Store Instructions.
@@ -1435,38 +1511,38 @@ def LMW : DForm_1<46, (outs gprc:$rD), (ins memri:$src),
// Unindexed (r+i) Stores.
let PPC970_Unit = 2 in {
def STB : DForm_1<38, (outs), (ins gprc:$rS, memri:$src),
- "stb $rS, $src", LdStStore,
+ "stb $rS, $src", IIC_LdStStore,
[(truncstorei8 i32:$rS, iaddr:$src)]>;
def STH : DForm_1<44, (outs), (ins gprc:$rS, memri:$src),
- "sth $rS, $src", LdStStore,
+ "sth $rS, $src", IIC_LdStStore,
[(truncstorei16 i32:$rS, iaddr:$src)]>;
def STW : DForm_1<36, (outs), (ins gprc:$rS, memri:$src),
- "stw $rS, $src", LdStStore,
+ "stw $rS, $src", IIC_LdStStore,
[(store i32:$rS, iaddr:$src)]>;
def STFS : DForm_1<52, (outs), (ins f4rc:$rS, memri:$dst),
- "stfs $rS, $dst", LdStSTFD,
+ "stfs $rS, $dst", IIC_LdStSTFD,
[(store f32:$rS, iaddr:$dst)]>;
def STFD : DForm_1<54, (outs), (ins f8rc:$rS, memri:$dst),
- "stfd $rS, $dst", LdStSTFD,
+ "stfd $rS, $dst", IIC_LdStSTFD,
[(store f64:$rS, iaddr:$dst)]>;
}
// Unindexed (r+i) Stores with Update (preinc).
let PPC970_Unit = 2, mayStore = 1 in {
def STBU : DForm_1<39, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst),
- "stbu $rS, $dst", LdStStoreUpd, []>,
+ "stbu $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STHU : DForm_1<45, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst),
- "sthu $rS, $dst", LdStStoreUpd, []>,
+ "sthu $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STWU : DForm_1<37, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memri:$dst),
- "stwu $rS, $dst", LdStStoreUpd, []>,
+ "stwu $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STFSU : DForm_1<53, (outs ptr_rc_nor0:$ea_res), (ins f4rc:$rS, memri:$dst),
- "stfsu $rS, $dst", LdStSTFDU, []>,
+ "stfsu $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def STFDU : DForm_1<55, (outs ptr_rc_nor0:$ea_res), (ins f8rc:$rS, memri:$dst),
- "stfdu $rS, $dst", LdStSTFDU, []>,
+ "stfdu $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
}
@@ -1487,59 +1563,59 @@ def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iaddroff:$ptroff),
// Indexed (r+r) Stores.
let PPC970_Unit = 2 in {
def STBX : XForm_8<31, 215, (outs), (ins gprc:$rS, memrr:$dst),
- "stbx $rS, $dst", LdStStore,
+ "stbx $rS, $dst", IIC_LdStStore,
[(truncstorei8 i32:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHX : XForm_8<31, 407, (outs), (ins gprc:$rS, memrr:$dst),
- "sthx $rS, $dst", LdStStore,
+ "sthx $rS, $dst", IIC_LdStStore,
[(truncstorei16 i32:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STWX : XForm_8<31, 151, (outs), (ins gprc:$rS, memrr:$dst),
- "stwx $rS, $dst", LdStStore,
+ "stwx $rS, $dst", IIC_LdStStore,
[(store i32:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHBRX: XForm_8<31, 918, (outs), (ins gprc:$rS, memrr:$dst),
- "sthbrx $rS, $dst", LdStStore,
+ "sthbrx $rS, $dst", IIC_LdStStore,
[(PPCstbrx i32:$rS, xoaddr:$dst, i16)]>,
PPC970_DGroup_Cracked;
def STWBRX: XForm_8<31, 662, (outs), (ins gprc:$rS, memrr:$dst),
- "stwbrx $rS, $dst", LdStStore,
+ "stwbrx $rS, $dst", IIC_LdStStore,
[(PPCstbrx i32:$rS, xoaddr:$dst, i32)]>,
PPC970_DGroup_Cracked;
def STFIWX: XForm_28<31, 983, (outs), (ins f8rc:$frS, memrr:$dst),
- "stfiwx $frS, $dst", LdStSTFD,
+ "stfiwx $frS, $dst", IIC_LdStSTFD,
[(PPCstfiwx f64:$frS, xoaddr:$dst)]>;
def STFSX : XForm_28<31, 663, (outs), (ins f4rc:$frS, memrr:$dst),
- "stfsx $frS, $dst", LdStSTFD,
+ "stfsx $frS, $dst", IIC_LdStSTFD,
[(store f32:$frS, xaddr:$dst)]>;
def STFDX : XForm_28<31, 727, (outs), (ins f8rc:$frS, memrr:$dst),
- "stfdx $frS, $dst", LdStSTFD,
+ "stfdx $frS, $dst", IIC_LdStSTFD,
[(store f64:$frS, xaddr:$dst)]>;
}
// Indexed (r+r) Stores with Update (preinc).
let PPC970_Unit = 2, mayStore = 1 in {
def STBUX : XForm_8<31, 247, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memrr:$dst),
- "stbux $rS, $dst", LdStStoreUpd, []>,
+ "stbux $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STHUX : XForm_8<31, 439, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memrr:$dst),
- "sthux $rS, $dst", LdStStoreUpd, []>,
+ "sthux $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STWUX : XForm_8<31, 183, (outs ptr_rc_nor0:$ea_res), (ins gprc:$rS, memrr:$dst),
- "stwux $rS, $dst", LdStStoreUpd, []>,
+ "stwux $rS, $dst", IIC_LdStStoreUpd, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STFSUX: XForm_8<31, 695, (outs ptr_rc_nor0:$ea_res), (ins f4rc:$rS, memrr:$dst),
- "stfsux $rS, $dst", LdStSTFDU, []>,
+ "stfsux $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
def STFDUX: XForm_8<31, 759, (outs ptr_rc_nor0:$ea_res), (ins f8rc:$rS, memrr:$dst),
- "stfdux $rS, $dst", LdStSTFDU, []>,
+ "stfdux $rS, $dst", IIC_LdStSTFDU, []>,
RegConstraint<"$dst.ptrreg = $ea_res">, NoEncode<"$ea_res">,
PPC970_DGroup_Cracked;
}
@@ -1560,14 +1636,14 @@ def : Pat<(pre_store f64:$rS, iPTR:$ptrreg, iPTR:$ptroff),
// Store Multiple
def STMW : DForm_1<47, (outs), (ins gprc:$rS, memri:$dst),
- "stmw $rS, $dst", LdStLMW, []>;
+ "stmw $rS, $dst", IIC_LdStLMW, []>;
def SYNC : XForm_24_sync<31, 598, (outs), (ins i32imm:$L),
- "sync $L", LdStSync, []>, Requires<[IsNotBookE]>;
+ "sync $L", IIC_LdStSync, []>, Requires<[IsNotBookE]>;
let isCodeGenOnly = 1 in {
def MSYNC : XForm_24_sync<31, 598, (outs), (ins),
- "msync", LdStSync, []>, Requires<[IsBookE]> {
+ "msync", IIC_LdStSync, []>, Requires<[IsBookE]> {
let L = 0;
}
}
@@ -1581,41 +1657,41 @@ def : Pat<(int_ppc_sync), (MSYNC)>, Requires<[IsBookE]>;
let PPC970_Unit = 1 in { // FXU Operations.
def ADDI : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$imm),
- "addi $rD, $rA, $imm", IntSimple,
+ "addi $rD, $rA, $imm", IIC_IntSimple,
[(set i32:$rD, (add i32:$rA, imm32SExt16:$imm))]>;
let BaseName = "addic" in {
let Defs = [CARRY] in
def ADDIC : DForm_2<12, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
- "addic $rD, $rA, $imm", IntGeneral,
+ "addic $rD, $rA, $imm", IIC_IntGeneral,
[(set i32:$rD, (addc i32:$rA, imm32SExt16:$imm))]>,
RecFormRel, PPC970_DGroup_Cracked;
let Defs = [CARRY, CR0] in
def ADDICo : DForm_2<13, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
- "addic. $rD, $rA, $imm", IntGeneral,
+ "addic. $rD, $rA, $imm", IIC_IntGeneral,
[]>, isDOT, RecFormRel;
}
def ADDIS : DForm_2<15, (outs gprc:$rD), (ins gprc_nor0:$rA, s17imm:$imm),
- "addis $rD, $rA, $imm", IntSimple,
+ "addis $rD, $rA, $imm", IIC_IntSimple,
[(set i32:$rD, (add i32:$rA, imm16ShiftedSExt:$imm))]>;
let isCodeGenOnly = 1 in
def LA : DForm_2<14, (outs gprc:$rD), (ins gprc_nor0:$rA, s16imm:$sym),
- "la $rD, $sym($rA)", IntGeneral,
+ "la $rD, $sym($rA)", IIC_IntGeneral,
[(set i32:$rD, (add i32:$rA,
(PPClo tglobaladdr:$sym, 0)))]>;
def MULLI : DForm_2< 7, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
- "mulli $rD, $rA, $imm", IntMulLI,
+ "mulli $rD, $rA, $imm", IIC_IntMulLI,
[(set i32:$rD, (mul i32:$rA, imm32SExt16:$imm))]>;
let Defs = [CARRY] in
def SUBFIC : DForm_2< 8, (outs gprc:$rD), (ins gprc:$rA, s16imm:$imm),
- "subfic $rD, $rA, $imm", IntGeneral,
+ "subfic $rD, $rA, $imm", IIC_IntGeneral,
[(set i32:$rD, (subc imm32SExt16:$imm, i32:$rA))]>;
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LI : DForm_2_r0<14, (outs gprc:$rD), (ins s16imm:$imm),
- "li $rD, $imm", IntSimple,
+ "li $rD, $imm", IIC_IntSimple,
[(set i32:$rD, imm32SExt16:$imm)]>;
def LIS : DForm_2_r0<15, (outs gprc:$rD), (ins s17imm:$imm),
- "lis $rD, $imm", IntSimple,
+ "lis $rD, $imm", IIC_IntSimple,
[(set i32:$rD, imm16ShiftedSExt:$imm)]>;
}
}
@@ -1623,154 +1699,170 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
let PPC970_Unit = 1 in { // FXU Operations.
let Defs = [CR0] in {
def ANDIo : DForm_4<28, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
- "andi. $dst, $src1, $src2", IntGeneral,
+ "andi. $dst, $src1, $src2", IIC_IntGeneral,
[(set i32:$dst, (and i32:$src1, immZExt16:$src2))]>,
isDOT;
def ANDISo : DForm_4<29, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
- "andis. $dst, $src1, $src2", IntGeneral,
+ "andis. $dst, $src1, $src2", IIC_IntGeneral,
[(set i32:$dst, (and i32:$src1, imm16ShiftedZExt:$src2))]>,
isDOT;
}
def ORI : DForm_4<24, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
- "ori $dst, $src1, $src2", IntSimple,
+ "ori $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (or i32:$src1, immZExt16:$src2))]>;
def ORIS : DForm_4<25, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
- "oris $dst, $src1, $src2", IntSimple,
+ "oris $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (or i32:$src1, imm16ShiftedZExt:$src2))]>;
def XORI : DForm_4<26, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
- "xori $dst, $src1, $src2", IntSimple,
+ "xori $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (xor i32:$src1, immZExt16:$src2))]>;
def XORIS : DForm_4<27, (outs gprc:$dst), (ins gprc:$src1, u16imm:$src2),
- "xoris $dst, $src1, $src2", IntSimple,
+ "xoris $dst, $src1, $src2", IIC_IntSimple,
[(set i32:$dst, (xor i32:$src1, imm16ShiftedZExt:$src2))]>;
-def NOP : DForm_4_zero<24, (outs), (ins), "nop", IntSimple,
+
+def NOP : DForm_4_zero<24, (outs), (ins), "nop", IIC_IntSimple,
[]>;
+let isCodeGenOnly = 1 in {
+// The POWER6 and POWER7 have special group-terminating nops.
+def NOP_GT_PWR6 : DForm_4_fixedreg_zero<24, 1, (outs), (ins),
+ "ori 1, 1, 0", IIC_IntSimple, []>;
+def NOP_GT_PWR7 : DForm_4_fixedreg_zero<24, 2, (outs), (ins),
+ "ori 2, 2, 0", IIC_IntSimple, []>;
+}
+
let isCompare = 1, neverHasSideEffects = 1 in {
def CMPWI : DForm_5_ext<11, (outs crrc:$crD), (ins gprc:$rA, s16imm:$imm),
- "cmpwi $crD, $rA, $imm", IntCompare>;
+ "cmpwi $crD, $rA, $imm", IIC_IntCompare>;
def CMPLWI : DForm_6_ext<10, (outs crrc:$dst), (ins gprc:$src1, u16imm:$src2),
- "cmplwi $dst, $src1, $src2", IntCompare>;
+ "cmplwi $dst, $src1, $src2", IIC_IntCompare>;
}
}
let PPC970_Unit = 1, neverHasSideEffects = 1 in { // FXU Operations.
+let isCommutable = 1 in {
defm NAND : XForm_6r<31, 476, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "nand", "$rA, $rS, $rB", IntSimple,
+ "nand", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (not (and i32:$rS, i32:$rB)))]>;
defm AND : XForm_6r<31, 28, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "and", "$rA, $rS, $rB", IntSimple,
+ "and", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (and i32:$rS, i32:$rB))]>;
+} // isCommutable
defm ANDC : XForm_6r<31, 60, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "andc", "$rA, $rS, $rB", IntSimple,
+ "andc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (and i32:$rS, (not i32:$rB)))]>;
+let isCommutable = 1 in {
defm OR : XForm_6r<31, 444, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "or", "$rA, $rS, $rB", IntSimple,
+ "or", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (or i32:$rS, i32:$rB))]>;
defm NOR : XForm_6r<31, 124, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "nor", "$rA, $rS, $rB", IntSimple,
+ "nor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (not (or i32:$rS, i32:$rB)))]>;
+} // isCommutable
defm ORC : XForm_6r<31, 412, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "orc", "$rA, $rS, $rB", IntSimple,
+ "orc", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (or i32:$rS, (not i32:$rB)))]>;
+let isCommutable = 1 in {
defm EQV : XForm_6r<31, 284, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "eqv", "$rA, $rS, $rB", IntSimple,
+ "eqv", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (not (xor i32:$rS, i32:$rB)))]>;
defm XOR : XForm_6r<31, 316, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "xor", "$rA, $rS, $rB", IntSimple,
+ "xor", "$rA, $rS, $rB", IIC_IntSimple,
[(set i32:$rA, (xor i32:$rS, i32:$rB))]>;
+} // isCommutable
defm SLW : XForm_6r<31, 24, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "slw", "$rA, $rS, $rB", IntGeneral,
+ "slw", "$rA, $rS, $rB", IIC_IntGeneral,
[(set i32:$rA, (PPCshl i32:$rS, i32:$rB))]>;
defm SRW : XForm_6r<31, 536, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "srw", "$rA, $rS, $rB", IntGeneral,
+ "srw", "$rA, $rS, $rB", IIC_IntGeneral,
[(set i32:$rA, (PPCsrl i32:$rS, i32:$rB))]>;
defm SRAW : XForm_6rc<31, 792, (outs gprc:$rA), (ins gprc:$rS, gprc:$rB),
- "sraw", "$rA, $rS, $rB", IntShift,
+ "sraw", "$rA, $rS, $rB", IIC_IntShift,
[(set i32:$rA, (PPCsra i32:$rS, i32:$rB))]>;
}
let PPC970_Unit = 1 in { // FXU Operations.
let neverHasSideEffects = 1 in {
defm SRAWI : XForm_10rc<31, 824, (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH),
- "srawi", "$rA, $rS, $SH", IntShift,
+ "srawi", "$rA, $rS, $SH", IIC_IntShift,
[(set i32:$rA, (sra i32:$rS, (i32 imm:$SH)))]>;
defm CNTLZW : XForm_11r<31, 26, (outs gprc:$rA), (ins gprc:$rS),
- "cntlzw", "$rA, $rS", IntGeneral,
+ "cntlzw", "$rA, $rS", IIC_IntGeneral,
[(set i32:$rA, (ctlz i32:$rS))]>;
defm EXTSB : XForm_11r<31, 954, (outs gprc:$rA), (ins gprc:$rS),
- "extsb", "$rA, $rS", IntSimple,
+ "extsb", "$rA, $rS", IIC_IntSimple,
[(set i32:$rA, (sext_inreg i32:$rS, i8))]>;
defm EXTSH : XForm_11r<31, 922, (outs gprc:$rA), (ins gprc:$rS),
- "extsh", "$rA, $rS", IntSimple,
+ "extsh", "$rA, $rS", IIC_IntSimple,
[(set i32:$rA, (sext_inreg i32:$rS, i16))]>;
}
let isCompare = 1, neverHasSideEffects = 1 in {
def CMPW : XForm_16_ext<31, 0, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB),
- "cmpw $crD, $rA, $rB", IntCompare>;
+ "cmpw $crD, $rA, $rB", IIC_IntCompare>;
def CMPLW : XForm_16_ext<31, 32, (outs crrc:$crD), (ins gprc:$rA, gprc:$rB),
- "cmplw $crD, $rA, $rB", IntCompare>;
+ "cmplw $crD, $rA, $rB", IIC_IntCompare>;
}
}
let PPC970_Unit = 3 in { // FPU Operations.
//def FCMPO : XForm_17<63, 32, (outs CRRC:$crD), (ins FPRC:$fA, FPRC:$fB),
-// "fcmpo $crD, $fA, $fB", FPCompare>;
+// "fcmpo $crD, $fA, $fB", IIC_FPCompare>;
let isCompare = 1, neverHasSideEffects = 1 in {
def FCMPUS : XForm_17<63, 0, (outs crrc:$crD), (ins f4rc:$fA, f4rc:$fB),
- "fcmpu $crD, $fA, $fB", FPCompare>;
+ "fcmpu $crD, $fA, $fB", IIC_FPCompare>;
+ let Interpretation64Bit = 1, isCodeGenOnly = 1 in
def FCMPUD : XForm_17<63, 0, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
- "fcmpu $crD, $fA, $fB", FPCompare>;
+ "fcmpu $crD, $fA, $fB", IIC_FPCompare>;
}
let Uses = [RM] in {
let neverHasSideEffects = 1 in {
defm FCTIW : XForm_26r<63, 14, (outs f8rc:$frD), (ins f8rc:$frB),
- "fctiw", "$frD, $frB", FPGeneral,
+ "fctiw", "$frD, $frB", IIC_FPGeneral,
[]>;
defm FCTIWZ : XForm_26r<63, 15, (outs f8rc:$frD), (ins f8rc:$frB),
- "fctiwz", "$frD, $frB", FPGeneral,
+ "fctiwz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfctiwz f64:$frB))]>;
defm FRSP : XForm_26r<63, 12, (outs f4rc:$frD), (ins f8rc:$frB),
- "frsp", "$frD, $frB", FPGeneral,
+ "frsp", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fround f64:$frB))]>;
- let Interpretation64Bit = 1 in
+ let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIND : XForm_26r<63, 392, (outs f8rc:$frD), (ins f8rc:$frB),
- "frin", "$frD, $frB", FPGeneral,
+ "frin", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (frnd f64:$frB))]>;
defm FRINS : XForm_26r<63, 392, (outs f4rc:$frD), (ins f4rc:$frB),
- "frin", "$frD, $frB", FPGeneral,
+ "frin", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (frnd f32:$frB))]>;
}
let neverHasSideEffects = 1 in {
- let Interpretation64Bit = 1 in
+ let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIPD : XForm_26r<63, 456, (outs f8rc:$frD), (ins f8rc:$frB),
- "frip", "$frD, $frB", FPGeneral,
+ "frip", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fceil f64:$frB))]>;
defm FRIPS : XForm_26r<63, 456, (outs f4rc:$frD), (ins f4rc:$frB),
- "frip", "$frD, $frB", FPGeneral,
+ "frip", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fceil f32:$frB))]>;
- let Interpretation64Bit = 1 in
+ let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIZD : XForm_26r<63, 424, (outs f8rc:$frD), (ins f8rc:$frB),
- "friz", "$frD, $frB", FPGeneral,
+ "friz", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (ftrunc f64:$frB))]>;
defm FRIZS : XForm_26r<63, 424, (outs f4rc:$frD), (ins f4rc:$frB),
- "friz", "$frD, $frB", FPGeneral,
+ "friz", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (ftrunc f32:$frB))]>;
- let Interpretation64Bit = 1 in
+ let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FRIMD : XForm_26r<63, 488, (outs f8rc:$frD), (ins f8rc:$frB),
- "frim", "$frD, $frB", FPGeneral,
+ "frim", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (ffloor f64:$frB))]>;
defm FRIMS : XForm_26r<63, 488, (outs f4rc:$frD), (ins f4rc:$frB),
- "frim", "$frD, $frB", FPGeneral,
+ "frim", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (ffloor f32:$frB))]>;
defm FSQRT : XForm_26r<63, 22, (outs f8rc:$frD), (ins f8rc:$frB),
- "fsqrt", "$frD, $frB", FPSqrt,
+ "fsqrt", "$frD, $frB", IIC_FPSqrtD,
[(set f64:$frD, (fsqrt f64:$frB))]>;
defm FSQRTS : XForm_26r<59, 22, (outs f4rc:$frD), (ins f4rc:$frB),
- "fsqrts", "$frD, $frB", FPSqrt,
+ "fsqrts", "$frD, $frB", IIC_FPSqrtS,
[(set f32:$frD, (fsqrt f32:$frB))]>;
}
}
@@ -1782,54 +1874,54 @@ let Uses = [RM] in {
/// sneak into a d-group with a store).
let neverHasSideEffects = 1 in
defm FMR : XForm_26r<63, 72, (outs f4rc:$frD), (ins f4rc:$frB),
- "fmr", "$frD, $frB", FPGeneral,
+ "fmr", "$frD, $frB", IIC_FPGeneral,
[]>, // (set f32:$frD, f32:$frB)
PPC970_Unit_Pseudo;
let PPC970_Unit = 3, neverHasSideEffects = 1 in { // FPU Operations.
// These are artificially split into two different forms, for 4/8 byte FP.
defm FABSS : XForm_26r<63, 264, (outs f4rc:$frD), (ins f4rc:$frB),
- "fabs", "$frD, $frB", FPGeneral,
+ "fabs", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fabs f32:$frB))]>;
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FABSD : XForm_26r<63, 264, (outs f8rc:$frD), (ins f8rc:$frB),
- "fabs", "$frD, $frB", FPGeneral,
+ "fabs", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fabs f64:$frB))]>;
defm FNABSS : XForm_26r<63, 136, (outs f4rc:$frD), (ins f4rc:$frB),
- "fnabs", "$frD, $frB", FPGeneral,
+ "fnabs", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fneg (fabs f32:$frB)))]>;
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FNABSD : XForm_26r<63, 136, (outs f8rc:$frD), (ins f8rc:$frB),
- "fnabs", "$frD, $frB", FPGeneral,
+ "fnabs", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fneg (fabs f64:$frB)))]>;
defm FNEGS : XForm_26r<63, 40, (outs f4rc:$frD), (ins f4rc:$frB),
- "fneg", "$frD, $frB", FPGeneral,
+ "fneg", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (fneg f32:$frB))]>;
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FNEGD : XForm_26r<63, 40, (outs f8rc:$frD), (ins f8rc:$frB),
- "fneg", "$frD, $frB", FPGeneral,
+ "fneg", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (fneg f64:$frB))]>;
defm FCPSGNS : XForm_28r<63, 8, (outs f4rc:$frD), (ins f4rc:$frA, f4rc:$frB),
- "fcpsgn", "$frD, $frA, $frB", FPGeneral,
+ "fcpsgn", "$frD, $frA, $frB", IIC_FPGeneral,
[(set f32:$frD, (fcopysign f32:$frB, f32:$frA))]>;
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FCPSGND : XForm_28r<63, 8, (outs f8rc:$frD), (ins f8rc:$frA, f8rc:$frB),
- "fcpsgn", "$frD, $frA, $frB", FPGeneral,
+ "fcpsgn", "$frD, $frA, $frB", IIC_FPGeneral,
[(set f64:$frD, (fcopysign f64:$frB, f64:$frA))]>;
// Reciprocal estimates.
defm FRE : XForm_26r<63, 24, (outs f8rc:$frD), (ins f8rc:$frB),
- "fre", "$frD, $frB", FPGeneral,
+ "fre", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfre f64:$frB))]>;
defm FRES : XForm_26r<59, 24, (outs f4rc:$frD), (ins f4rc:$frB),
- "fres", "$frD, $frB", FPGeneral,
+ "fres", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfre f32:$frB))]>;
defm FRSQRTE : XForm_26r<63, 26, (outs f8rc:$frD), (ins f8rc:$frB),
- "frsqrte", "$frD, $frB", FPGeneral,
+ "frsqrte", "$frD, $frB", IIC_FPGeneral,
[(set f64:$frD, (PPCfrsqrte f64:$frB))]>;
defm FRSQRTES : XForm_26r<59, 26, (outs f4rc:$frD), (ins f4rc:$frB),
- "frsqrtes", "$frD, $frB", FPGeneral,
+ "frsqrtes", "$frD, $frB", IIC_FPGeneral,
[(set f32:$frD, (PPCfrsqrte f32:$frB))]>;
}
@@ -1837,57 +1929,67 @@ defm FRSQRTES : XForm_26r<59, 26, (outs f4rc:$frD), (ins f4rc:$frB),
//
let neverHasSideEffects = 1 in
def MCRF : XLForm_3<19, 0, (outs crrc:$BF), (ins crrc:$BFA),
- "mcrf $BF, $BFA", BrMCR>,
+ "mcrf $BF, $BFA", IIC_BrMCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
+let isCommutable = 1 in {
def CRAND : XLForm_1<19, 257, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "crand $CRD, $CRA, $CRB", BrCR, []>;
+ "crand $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (and i1:$CRA, i1:$CRB))]>;
def CRNAND : XLForm_1<19, 225, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "crnand $CRD, $CRA, $CRB", BrCR, []>;
+ "crnand $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (not (and i1:$CRA, i1:$CRB)))]>;
def CROR : XLForm_1<19, 449, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "cror $CRD, $CRA, $CRB", BrCR, []>;
+ "cror $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (or i1:$CRA, i1:$CRB))]>;
def CRXOR : XLForm_1<19, 193, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "crxor $CRD, $CRA, $CRB", BrCR, []>;
+ "crxor $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (xor i1:$CRA, i1:$CRB))]>;
def CRNOR : XLForm_1<19, 33, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "crnor $CRD, $CRA, $CRB", BrCR, []>;
+ "crnor $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (not (or i1:$CRA, i1:$CRB)))]>;
def CREQV : XLForm_1<19, 289, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "creqv $CRD, $CRA, $CRB", BrCR, []>;
+ "creqv $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (not (xor i1:$CRA, i1:$CRB)))]>;
+} // isCommutable
def CRANDC : XLForm_1<19, 129, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "crandc $CRD, $CRA, $CRB", BrCR, []>;
+ "crandc $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (and i1:$CRA, (not i1:$CRB)))]>;
def CRORC : XLForm_1<19, 417, (outs crbitrc:$CRD),
(ins crbitrc:$CRA, crbitrc:$CRB),
- "crorc $CRD, $CRA, $CRB", BrCR, []>;
+ "crorc $CRD, $CRA, $CRB", IIC_BrCR,
+ [(set i1:$CRD, (or i1:$CRA, (not i1:$CRB)))]>;
let isCodeGenOnly = 1 in {
def CRSET : XLForm_1_ext<19, 289, (outs crbitrc:$dst), (ins),
- "creqv $dst, $dst, $dst", BrCR,
- []>;
+ "creqv $dst, $dst, $dst", IIC_BrCR,
+ [(set i1:$dst, 1)]>;
def CRUNSET: XLForm_1_ext<19, 193, (outs crbitrc:$dst), (ins),
- "crxor $dst, $dst, $dst", BrCR,
- []>;
+ "crxor $dst, $dst, $dst", IIC_BrCR,
+ [(set i1:$dst, 0)]>;
let Defs = [CR1EQ], CRD = 6 in {
def CR6SET : XLForm_1_ext<19, 289, (outs), (ins),
- "creqv 6, 6, 6", BrCR,
+ "creqv 6, 6, 6", IIC_BrCR,
[(PPCcr6set)]>;
def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins),
- "crxor 6, 6, 6", BrCR,
+ "crxor 6, 6, 6", IIC_BrCR,
[(PPCcr6unset)]>;
}
}
@@ -1896,38 +1998,38 @@ def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins),
//
def MFSPR : XFXForm_1<31, 339, (outs gprc:$RT), (ins i32imm:$SPR),
- "mfspr $RT, $SPR", SprMFSPR>;
+ "mfspr $RT, $SPR", IIC_SprMFSPR>;
def MTSPR : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, gprc:$RT),
- "mtspr $SPR, $RT", SprMTSPR>;
+ "mtspr $SPR, $RT", IIC_SprMTSPR>;
def MFTB : XFXForm_1<31, 371, (outs gprc:$RT), (ins i32imm:$SPR),
- "mftb $RT, $SPR", SprMFTB>, Deprecated<DeprecatedMFTB>;
+ "mftb $RT, $SPR", IIC_SprMFTB>, Deprecated<DeprecatedMFTB>;
let Uses = [CTR] in {
def MFCTR : XFXForm_1_ext<31, 339, 9, (outs gprc:$rT), (ins),
- "mfctr $rT", SprMFSPR>,
+ "mfctr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Defs = [CTR], Pattern = [(PPCmtctr i32:$rS)] in {
def MTCTR : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS),
- "mtctr $rS", SprMTSPR>,
+ "mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let hasSideEffects = 1, isCodeGenOnly = 1, Defs = [CTR] in {
let Pattern = [(int_ppc_mtctr i32:$rS)] in
def MTCTRloop : XFXForm_7_ext<31, 467, 9, (outs), (ins gprc:$rS),
- "mtctr $rS", SprMTSPR>,
+ "mtctr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Defs = [LR] in {
def MTLR : XFXForm_7_ext<31, 467, 8, (outs), (ins gprc:$rS),
- "mtlr $rS", SprMTSPR>,
+ "mtlr $rS", IIC_SprMTSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
let Uses = [LR] in {
def MFLR : XFXForm_1_ext<31, 339, 8, (outs gprc:$rT), (ins),
- "mflr $rT", SprMFSPR>,
+ "mflr $rT", IIC_SprMFSPR>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
@@ -1936,19 +2038,19 @@ let isCodeGenOnly = 1 in {
// like a GPR on the PPC970. As such, copies in and out have the same
// performance characteristics as an OR instruction.
def MTVRSAVE : XFXForm_7_ext<31, 467, 256, (outs), (ins gprc:$rS),
- "mtspr 256, $rS", IntGeneral>,
+ "mtspr 256, $rS", IIC_IntGeneral>,
PPC970_DGroup_Single, PPC970_Unit_FXU;
def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT), (ins),
- "mfspr $rT, 256", IntGeneral>,
+ "mfspr $rT, 256", IIC_IntGeneral>,
PPC970_DGroup_First, PPC970_Unit_FXU;
def MTVRSAVEv : XFXForm_7_ext<31, 467, 256,
(outs VRSAVERC:$reg), (ins gprc:$rS),
- "mtspr 256, $rS", IntGeneral>,
+ "mtspr 256, $rS", IIC_IntGeneral>,
PPC970_DGroup_Single, PPC970_Unit_FXU;
def MFVRSAVEv : XFXForm_1_ext<31, 339, 256, (outs gprc:$rT),
(ins VRSAVERC:$reg),
- "mfspr $rT, 256", IntGeneral>,
+ "mfspr $rT, 256", IIC_IntGeneral>,
PPC970_DGroup_First, PPC970_Unit_FXU;
}
@@ -1966,20 +2068,20 @@ def RESTORE_VRSAVE : Pseudo<(outs VRSAVERC:$vrsave), (ins memri:$F),
let neverHasSideEffects = 1 in {
def MTOCRF: XFXForm_5a<31, 144, (outs crbitm:$FXM), (ins gprc:$ST),
- "mtocrf $FXM, $ST", BrMCRX>,
+ "mtocrf $FXM, $ST", IIC_BrMCRX>,
PPC970_DGroup_First, PPC970_Unit_CRU;
def MTCRF : XFXForm_5<31, 144, (outs), (ins i32imm:$FXM, gprc:$rS),
- "mtcrf $FXM, $rS", BrMCRX>,
+ "mtcrf $FXM, $rS", IIC_BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
let hasExtraSrcRegAllocReq = 1 in // to enable post-ra anti-dep breaking.
def MFOCRF: XFXForm_5a<31, 19, (outs gprc:$rT), (ins crbitm:$FXM),
- "mfocrf $rT, $FXM", SprMFCR>,
+ "mfocrf $rT, $FXM", IIC_SprMFCRF>,
PPC970_DGroup_First, PPC970_Unit_CRU;
def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins),
- "mfcr $rT", SprMFCR>,
+ "mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // neverHasSideEffects = 1
@@ -1993,18 +2095,18 @@ let usesCustomInserter = 1, Uses = [RM] in {
// to manipulate FPSCR. Note that FPSCR is not modeled at the DAG level.
let Uses = [RM], Defs = [RM] in {
def MTFSB0 : XForm_43<63, 70, (outs), (ins u5imm:$FM),
- "mtfsb0 $FM", IntMTFSB0, []>,
+ "mtfsb0 $FM", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MTFSB1 : XForm_43<63, 38, (outs), (ins u5imm:$FM),
- "mtfsb1 $FM", IntMTFSB0, []>,
+ "mtfsb1 $FM", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
def MTFSF : XFLForm<63, 711, (outs), (ins i32imm:$FM, f8rc:$rT),
- "mtfsf $FM, $rT", IntMTFSB0, []>,
+ "mtfsf $FM, $rT", IIC_IntMTFSB0, []>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
}
let Uses = [RM] in {
def MFFS : XForm_42<63, 583, (outs f8rc:$rT), (ins),
- "mffs $rT", IntMFFS,
+ "mffs $rT", IIC_IntMFFS,
[(set f64:$rT, (PPCmffs))]>,
PPC970_DGroup_Single, PPC970_Unit_FPU;
}
@@ -2012,63 +2114,68 @@ let Uses = [RM] in {
let PPC970_Unit = 1, neverHasSideEffects = 1 in { // FXU Operations.
// XO-Form instructions. Arithmetic instructions that can set overflow bit
-//
+let isCommutable = 1 in
defm ADD4 : XOForm_1r<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "add", "$rT, $rA, $rB", IntSimple,
+ "add", "$rT, $rA, $rB", IIC_IntSimple,
[(set i32:$rT, (add i32:$rA, i32:$rB))]>;
let isCodeGenOnly = 1 in
def ADD4TLS : XOForm_1<31, 266, 0, (outs gprc:$rT), (ins gprc:$rA, tlsreg32:$rB),
- "add $rT, $rA, $rB", IntSimple,
+ "add $rT, $rA, $rB", IIC_IntSimple,
[(set i32:$rT, (add i32:$rA, tglobaltlsaddr:$rB))]>;
+let isCommutable = 1 in
defm ADDC : XOForm_1rc<31, 10, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "addc", "$rT, $rA, $rB", IntGeneral,
+ "addc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (addc i32:$rA, i32:$rB))]>,
PPC970_DGroup_Cracked;
+
defm DIVW : XOForm_1r<31, 491, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "divw", "$rT, $rA, $rB", IntDivW,
+ "divw", "$rT, $rA, $rB", IIC_IntDivW,
[(set i32:$rT, (sdiv i32:$rA, i32:$rB))]>,
PPC970_DGroup_First, PPC970_DGroup_Cracked;
defm DIVWU : XOForm_1r<31, 459, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "divwu", "$rT, $rA, $rB", IntDivW,
+ "divwu", "$rT, $rA, $rB", IIC_IntDivW,
[(set i32:$rT, (udiv i32:$rA, i32:$rB))]>,
PPC970_DGroup_First, PPC970_DGroup_Cracked;
+let isCommutable = 1 in {
defm MULHW : XOForm_1r<31, 75, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "mulhw", "$rT, $rA, $rB", IntMulHW,
+ "mulhw", "$rT, $rA, $rB", IIC_IntMulHW,
[(set i32:$rT, (mulhs i32:$rA, i32:$rB))]>;
defm MULHWU : XOForm_1r<31, 11, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "mulhwu", "$rT, $rA, $rB", IntMulHWU,
+ "mulhwu", "$rT, $rA, $rB", IIC_IntMulHWU,
[(set i32:$rT, (mulhu i32:$rA, i32:$rB))]>;
defm MULLW : XOForm_1r<31, 235, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "mullw", "$rT, $rA, $rB", IntMulHW,
+ "mullw", "$rT, $rA, $rB", IIC_IntMulHW,
[(set i32:$rT, (mul i32:$rA, i32:$rB))]>;
+} // isCommutable
defm SUBF : XOForm_1r<31, 40, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "subf", "$rT, $rA, $rB", IntGeneral,
+ "subf", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (sub i32:$rB, i32:$rA))]>;
defm SUBFC : XOForm_1rc<31, 8, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "subfc", "$rT, $rA, $rB", IntGeneral,
+ "subfc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (subc i32:$rB, i32:$rA))]>,
PPC970_DGroup_Cracked;
defm NEG : XOForm_3r<31, 104, 0, (outs gprc:$rT), (ins gprc:$rA),
- "neg", "$rT, $rA", IntSimple,
+ "neg", "$rT, $rA", IIC_IntSimple,
[(set i32:$rT, (ineg i32:$rA))]>;
let Uses = [CARRY] in {
+let isCommutable = 1 in
defm ADDE : XOForm_1rc<31, 138, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "adde", "$rT, $rA, $rB", IntGeneral,
+ "adde", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (adde i32:$rA, i32:$rB))]>;
defm ADDME : XOForm_3rc<31, 234, 0, (outs gprc:$rT), (ins gprc:$rA),
- "addme", "$rT, $rA", IntGeneral,
+ "addme", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (adde i32:$rA, -1))]>;
defm ADDZE : XOForm_3rc<31, 202, 0, (outs gprc:$rT), (ins gprc:$rA),
- "addze", "$rT, $rA", IntGeneral,
+ "addze", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (adde i32:$rA, 0))]>;
defm SUBFE : XOForm_1rc<31, 136, 0, (outs gprc:$rT), (ins gprc:$rA, gprc:$rB),
- "subfe", "$rT, $rA, $rB", IntGeneral,
+ "subfe", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i32:$rT, (sube i32:$rB, i32:$rA))]>;
defm SUBFME : XOForm_3rc<31, 232, 0, (outs gprc:$rT), (ins gprc:$rA),
- "subfme", "$rT, $rA", IntGeneral,
+ "subfme", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (sube -1, i32:$rA))]>;
defm SUBFZE : XOForm_3rc<31, 200, 0, (outs gprc:$rT), (ins gprc:$rA),
- "subfze", "$rT, $rA", IntGeneral,
+ "subfze", "$rT, $rA", IIC_IntGeneral,
[(set i32:$rT, (sube 0, i32:$rA))]>;
}
}
@@ -2078,90 +2185,96 @@ defm SUBFZE : XOForm_3rc<31, 200, 0, (outs gprc:$rT), (ins gprc:$rA),
//
let PPC970_Unit = 3, neverHasSideEffects = 1 in { // FPU Operations.
let Uses = [RM] in {
+let isCommutable = 1 in {
defm FMADD : AForm_1r<63, 29,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
- "fmadd", "$FRT, $FRA, $FRC, $FRB", FPFused,
+ "fmadd", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT, (fma f64:$FRA, f64:$FRC, f64:$FRB))]>;
defm FMADDS : AForm_1r<59, 29,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
- "fmadds", "$FRT, $FRA, $FRC, $FRB", FPGeneral,
+ "fmadds", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fma f32:$FRA, f32:$FRC, f32:$FRB))]>;
defm FMSUB : AForm_1r<63, 28,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
- "fmsub", "$FRT, $FRA, $FRC, $FRB", FPFused,
+ "fmsub", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT,
(fma f64:$FRA, f64:$FRC, (fneg f64:$FRB)))]>;
defm FMSUBS : AForm_1r<59, 28,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
- "fmsubs", "$FRT, $FRA, $FRC, $FRB", FPGeneral,
+ "fmsubs", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT,
(fma f32:$FRA, f32:$FRC, (fneg f32:$FRB)))]>;
defm FNMADD : AForm_1r<63, 31,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
- "fnmadd", "$FRT, $FRA, $FRC, $FRB", FPFused,
+ "fnmadd", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT,
(fneg (fma f64:$FRA, f64:$FRC, f64:$FRB)))]>;
defm FNMADDS : AForm_1r<59, 31,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
- "fnmadds", "$FRT, $FRA, $FRC, $FRB", FPGeneral,
+ "fnmadds", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT,
(fneg (fma f32:$FRA, f32:$FRC, f32:$FRB)))]>;
defm FNMSUB : AForm_1r<63, 30,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
- "fnmsub", "$FRT, $FRA, $FRC, $FRB", FPFused,
+ "fnmsub", "$FRT, $FRA, $FRC, $FRB", IIC_FPFused,
[(set f64:$FRT, (fneg (fma f64:$FRA, f64:$FRC,
(fneg f64:$FRB))))]>;
defm FNMSUBS : AForm_1r<59, 30,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC, f4rc:$FRB),
- "fnmsubs", "$FRT, $FRA, $FRC, $FRB", FPGeneral,
+ "fnmsubs", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fneg (fma f32:$FRA, f32:$FRC,
(fneg f32:$FRB))))]>;
+} // isCommutable
}
// FSEL is artificially split into 4 and 8-byte forms for the result. To avoid
// having 4 of these, force the comparison to always be an 8-byte double (code
// should use an FMRSD if the input comparison value really wants to be a float)
// and 4/8 byte forms for the result and operand type..
-let Interpretation64Bit = 1 in
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in
defm FSELD : AForm_1r<63, 23,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC, f8rc:$FRB),
- "fsel", "$FRT, $FRA, $FRC, $FRB", FPGeneral,
+ "fsel", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f64:$FRT, (PPCfsel f64:$FRA, f64:$FRC, f64:$FRB))]>;
defm FSELS : AForm_1r<63, 23,
(outs f4rc:$FRT), (ins f8rc:$FRA, f4rc:$FRC, f4rc:$FRB),
- "fsel", "$FRT, $FRA, $FRC, $FRB", FPGeneral,
+ "fsel", "$FRT, $FRA, $FRC, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (PPCfsel f64:$FRA, f32:$FRC, f32:$FRB))]>;
let Uses = [RM] in {
+ let isCommutable = 1 in {
defm FADD : AForm_2r<63, 21,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB),
- "fadd", "$FRT, $FRA, $FRB", FPAddSub,
+ "fadd", "$FRT, $FRA, $FRB", IIC_FPAddSub,
[(set f64:$FRT, (fadd f64:$FRA, f64:$FRB))]>;
defm FADDS : AForm_2r<59, 21,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB),
- "fadds", "$FRT, $FRA, $FRB", FPGeneral,
+ "fadds", "$FRT, $FRA, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fadd f32:$FRA, f32:$FRB))]>;
+ } // isCommutable
defm FDIV : AForm_2r<63, 18,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB),
- "fdiv", "$FRT, $FRA, $FRB", FPDivD,
+ "fdiv", "$FRT, $FRA, $FRB", IIC_FPDivD,
[(set f64:$FRT, (fdiv f64:$FRA, f64:$FRB))]>;
defm FDIVS : AForm_2r<59, 18,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB),
- "fdivs", "$FRT, $FRA, $FRB", FPDivS,
+ "fdivs", "$FRT, $FRA, $FRB", IIC_FPDivS,
[(set f32:$FRT, (fdiv f32:$FRA, f32:$FRB))]>;
+ let isCommutable = 1 in {
defm FMUL : AForm_3r<63, 25,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRC),
- "fmul", "$FRT, $FRA, $FRC", FPFused,
+ "fmul", "$FRT, $FRA, $FRC", IIC_FPFused,
[(set f64:$FRT, (fmul f64:$FRA, f64:$FRC))]>;
defm FMULS : AForm_3r<59, 25,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRC),
- "fmuls", "$FRT, $FRA, $FRC", FPGeneral,
+ "fmuls", "$FRT, $FRA, $FRC", IIC_FPGeneral,
[(set f32:$FRT, (fmul f32:$FRA, f32:$FRC))]>;
+ } // isCommutable
defm FSUB : AForm_2r<63, 20,
(outs f8rc:$FRT), (ins f8rc:$FRA, f8rc:$FRB),
- "fsub", "$FRT, $FRA, $FRB", FPAddSub,
+ "fsub", "$FRT, $FRA, $FRB", IIC_FPAddSub,
[(set f64:$FRT, (fsub f64:$FRA, f64:$FRB))]>;
defm FSUBS : AForm_2r<59, 20,
(outs f4rc:$FRT), (ins f4rc:$FRA, f4rc:$FRB),
- "fsubs", "$FRT, $FRA, $FRB", FPGeneral,
+ "fsubs", "$FRT, $FRA, $FRB", IIC_FPGeneral,
[(set f32:$FRT, (fsub f32:$FRA, f32:$FRB))]>;
}
}
@@ -2171,7 +2284,7 @@ let PPC970_Unit = 1 in { // FXU Operations.
let isSelect = 1 in
def ISEL : AForm_4<31, 15,
(outs gprc:$rT), (ins gprc_nor0:$rA, gprc:$rB, crbitrc:$cond),
- "isel $rT, $rA, $rB, $cond", IntGeneral,
+ "isel $rT, $rA, $rB, $cond", IIC_IntGeneral,
[]>;
}
@@ -2182,24 +2295,24 @@ let isCommutable = 1 in {
// RLWIMI can be commuted if the rotate amount is zero.
defm RLWIMI : MForm_2r<20, (outs gprc:$rA),
(ins gprc:$rSi, gprc:$rS, u5imm:$SH, u5imm:$MB,
- u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME", IntRotate,
- []>, PPC970_DGroup_Cracked, RegConstraint<"$rSi = $rA">,
- NoEncode<"$rSi">;
+ u5imm:$ME), "rlwimi", "$rA, $rS, $SH, $MB, $ME",
+ IIC_IntRotate, []>, PPC970_DGroup_Cracked,
+ RegConstraint<"$rSi = $rA">, NoEncode<"$rSi">;
}
let BaseName = "rlwinm" in {
def RLWINM : MForm_2<21,
(outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
- "rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral,
+ "rlwinm $rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>, RecFormRel;
let Defs = [CR0] in
def RLWINMo : MForm_2<21,
(outs gprc:$rA), (ins gprc:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
- "rlwinm. $rA, $rS, $SH, $MB, $ME", IntGeneral,
+ "rlwinm. $rA, $rS, $SH, $MB, $ME", IIC_IntGeneral,
[]>, isDOT, RecFormRel, PPC970_DGroup_Cracked;
}
defm RLWNM : MForm_2r<23, (outs gprc:$rA),
(ins gprc:$rS, gprc:$rB, u5imm:$MB, u5imm:$ME),
- "rlwnm", "$rA, $rS, $rB, $MB, $ME", IntGeneral,
+ "rlwnm", "$rA, $rS, $rB, $MB, $ME", IIC_IntGeneral,
[]>;
}
} // neverHasSideEffects = 1
@@ -2213,8 +2326,10 @@ def : Pat<(i32 imm:$imm),
(ORI (LIS (HI16 imm:$imm)), (LO16 imm:$imm))>;
// Implement the 'not' operation with the NOR instruction.
-def NOT : Pat<(not i32:$in),
- (NOR $in, $in)>;
+def i32not : OutPatFrag<(ops node:$in),
+ (NOR $in, $in)>;
+def : Pat<(not i32:$in),
+ (i32not $in)>;
// ADD an arbitrary immediate.
def : Pat<(add i32:$in, imm:$imm),
@@ -2285,18 +2400,6 @@ def : Pat<(add i32:$in, (PPChi tjumptable:$g, 0)),
def : Pat<(add i32:$in, (PPChi tblockaddress:$g, 0)),
(ADDIS $in, tblockaddress:$g)>;
-// Support for Position-independent code
-def LWZtoc: Pseudo<(outs gprc:$rD), (ins tocentry32:$disp, gprc:$reg),
- "#LWZtoc",
- [(set i32:$rD,
- (PPCtoc_entry tglobaladdr:$disp, i32:$reg))]>;
-// Get Global (GOT) Base Register offset, from the word immediately preceding
-// the function label.
-def GetGBRO: Pseudo<(outs gprc:$rT), (ins gprc:$rI), "#GetGBRO", []>;
-// Update the Global(GOT) Base Register with the above offset.
-def UpdateGBR: Pseudo<(outs gprc:$rT), (ins gprc:$rI), "#UpdateGBR", []>;
-
-
// Support for thread-local storage.
def PPC32GOT: Pseudo<(outs gprc:$rD), (ins), "#PPC32GOT",
[(set i32:$rD, (PPCppc32GOT))]>;
@@ -2313,6 +2416,7 @@ def LDgotTprelL32: Pseudo<(outs gprc:$rD), (ins s16imm:$disp, gprc_nor0:$reg),
(PPCldGotTprelL tglobaltlsaddr:$disp, i32:$reg))]>;
def : Pat<(PPCaddTls i32:$in, tglobaltlsaddr:$g),
(ADD4TLS $in, tglobaltlsaddr:$g)>;
+
def ADDItlsgdL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
"#ADDItlsgdL32",
[(set i32:$rD,
@@ -2339,6 +2443,17 @@ def ADDISdtprelHA32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp)
(PPCaddisDtprelHA i32:$reg,
tglobaltlsaddr:$disp))]>;
+// Support for Position-independent code
+def LWZtoc: Pseudo<(outs gprc:$rD), (ins tocentry32:$disp, gprc:$reg),
+ "#LWZtoc",
+ [(set i32:$rD,
+ (PPCtoc_entry tglobaladdr:$disp, i32:$reg))]>;
+// Get Global (GOT) Base Register offset, from the word immediately preceding
+// the function label.
+def GetGBRO: Pseudo<(outs gprc:$rT), (ins gprc:$rI), "#GetGBRO", []>;
+// Update the Global(GOT) Base Register with the above offset.
+def UpdateGBR: Pseudo<(outs gprc:$rT), (ins gprc:$rI), "#UpdateGBR", []>;
+
// Standard shifts. These are represented separately from the real shifts above
// so that we can distinguish between shifts that allow 5-bit and 6-bit shift
@@ -2395,52 +2510,561 @@ def : Pat<(fcopysign f32:$frB, f64:$frA),
include "PPCInstrAltivec.td"
include "PPCInstr64Bit.td"
+include "PPCInstrVSX.td"
+
+def crnot : OutPatFrag<(ops node:$in),
+ (CRNOR $in, $in)>;
+def : Pat<(not i1:$in),
+ (crnot $in)>;
+
+// Patterns for arithmetic i1 operations.
+def : Pat<(add i1:$a, i1:$b),
+ (CRXOR $a, $b)>;
+def : Pat<(sub i1:$a, i1:$b),
+ (CRXOR $a, $b)>;
+def : Pat<(mul i1:$a, i1:$b),
+ (CRAND $a, $b)>;
+
+// We're sometimes asked to materialize i1 -1, which is just 1 in this case
+// (-1 is used to mean all bits set).
+def : Pat<(i1 -1), (CRSET)>;
+
+// i1 extensions, implemented in terms of isel.
+def : Pat<(i32 (zext i1:$in)),
+ (SELECT_I4 $in, (LI 1), (LI 0))>;
+def : Pat<(i32 (sext i1:$in)),
+ (SELECT_I4 $in, (LI -1), (LI 0))>;
+
+def : Pat<(i64 (zext i1:$in)),
+ (SELECT_I8 $in, (LI8 1), (LI8 0))>;
+def : Pat<(i64 (sext i1:$in)),
+ (SELECT_I8 $in, (LI8 -1), (LI8 0))>;
+
+// FIXME: We should choose either a zext or a sext based on other constants
+// already around.
+def : Pat<(i32 (anyext i1:$in)),
+ (SELECT_I4 $in, (LI 1), (LI 0))>;
+def : Pat<(i64 (anyext i1:$in)),
+ (SELECT_I8 $in, (LI8 1), (LI8 0))>;
+
+// match setcc on i1 variables.
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETLT)),
+ (CRANDC $s2, $s1)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETULT)),
+ (CRANDC $s2, $s1)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETLE)),
+ (CRORC $s2, $s1)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETULE)),
+ (CRORC $s2, $s1)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETEQ)),
+ (CREQV $s1, $s2)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETGE)),
+ (CRORC $s1, $s2)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETUGE)),
+ (CRORC $s1, $s2)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETGT)),
+ (CRANDC $s1, $s2)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETUGT)),
+ (CRANDC $s1, $s2)>;
+def : Pat<(i1 (setcc i1:$s1, i1:$s2, SETNE)),
+ (CRXOR $s1, $s2)>;
+
+// match setcc on non-i1 (non-vector) variables. Note that SETUEQ, SETOGE,
+// SETOLE, SETONE, SETULT and SETUGT should be expanded by legalize for
+// floating-point types.
+
+multiclass CRNotPat<dag pattern, dag result> {
+ def : Pat<pattern, (crnot result)>;
+ def : Pat<(not pattern), result>;
+
+ // We can also fold the crnot into an extension:
+ def : Pat<(i32 (zext pattern)),
+ (SELECT_I4 result, (LI 0), (LI 1))>;
+ def : Pat<(i32 (sext pattern)),
+ (SELECT_I4 result, (LI 0), (LI -1))>;
+
+ // We can also fold the crnot into an extension:
+ def : Pat<(i64 (zext pattern)),
+ (SELECT_I8 result, (LI8 0), (LI8 1))>;
+ def : Pat<(i64 (sext pattern)),
+ (SELECT_I8 result, (LI8 0), (LI8 -1))>;
+
+ // FIXME: We should choose either a zext or a sext based on other constants
+ // already around.
+ def : Pat<(i32 (anyext pattern)),
+ (SELECT_I4 result, (LI 0), (LI 1))>;
+
+ def : Pat<(i64 (anyext pattern)),
+ (SELECT_I8 result, (LI8 0), (LI8 1))>;
+}
+
+// FIXME: Because of what seems like a bug in TableGen's type-inference code,
+// we need to write imm:$imm in the output patterns below, not just $imm, or
+// else the resulting matcher will not correctly add the immediate operand
+// (making it a register operand instead).
+
+// extended SETCC.
+multiclass ExtSetCCPat<CondCode cc, PatFrag pfrag,
+ OutPatFrag rfrag, OutPatFrag rfrag8> {
+ def : Pat<(i32 (zext (i1 (pfrag i32:$s1, cc)))),
+ (rfrag $s1)>;
+ def : Pat<(i64 (zext (i1 (pfrag i64:$s1, cc)))),
+ (rfrag8 $s1)>;
+ def : Pat<(i64 (zext (i1 (pfrag i32:$s1, cc)))),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (rfrag $s1), sub_32)>;
+ def : Pat<(i32 (zext (i1 (pfrag i64:$s1, cc)))),
+ (EXTRACT_SUBREG (rfrag8 $s1), sub_32)>;
+
+ def : Pat<(i32 (anyext (i1 (pfrag i32:$s1, cc)))),
+ (rfrag $s1)>;
+ def : Pat<(i64 (anyext (i1 (pfrag i64:$s1, cc)))),
+ (rfrag8 $s1)>;
+ def : Pat<(i64 (anyext (i1 (pfrag i32:$s1, cc)))),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (rfrag $s1), sub_32)>;
+ def : Pat<(i32 (anyext (i1 (pfrag i64:$s1, cc)))),
+ (EXTRACT_SUBREG (rfrag8 $s1), sub_32)>;
+}
+
+// Note that we do all inversions below with i(32|64)not, instead of using
+// (xori x, 1) because on the A2 nor has single-cycle latency while xori
+// has 2-cycle latency.
+
+defm : ExtSetCCPat<SETEQ,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, 0, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (CNTLZW $in), 27, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (CNTLZD $in), 58, 63)> >;
+
+defm : ExtSetCCPat<SETNE,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, 0, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (i32not (CNTLZW $in)), 27, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (i64not (CNTLZD $in)), 58, 63)> >;
+
+defm : ExtSetCCPat<SETLT,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, 0, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM $in, 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL $in, 1, 63)> >;
+
+defm : ExtSetCCPat<SETGE,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, 0, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (i32not $in), 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (i64not $in), 1, 63)> >;
+
+defm : ExtSetCCPat<SETGT,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, 0, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (ANDC (NEG $in), $in), 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (ANDC8 (NEG8 $in), $in), 1, 63)> >;
+
+defm : ExtSetCCPat<SETLE,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, 0, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (ORC $in, (NEG $in)), 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (ORC8 $in, (NEG8 $in)), 1, 63)> >;
+
+defm : ExtSetCCPat<SETLT,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, -1, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (AND $in, (ADDI $in, 1)), 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (AND8 $in, (ADDI8 $in, 1)), 1, 63)> >;
+
+defm : ExtSetCCPat<SETGE,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, -1, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (NAND $in, (ADDI $in, 1)), 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (NAND8 $in, (ADDI8 $in, 1)), 1, 63)> >;
+
+defm : ExtSetCCPat<SETGT,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, -1, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM (i32not $in), 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL (i64not $in), 1, 63)> >;
+
+defm : ExtSetCCPat<SETLE,
+ PatFrag<(ops node:$in, node:$cc),
+ (setcc $in, -1, $cc)>,
+ OutPatFrag<(ops node:$in),
+ (RLWINM $in, 1, 31, 31)>,
+ OutPatFrag<(ops node:$in),
+ (RLDICL $in, 1, 63)> >;
+
+// SETCC for i32.
+def : Pat<(i1 (setcc i32:$s1, immZExt16:$imm, SETULT)),
+ (EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_lt)>;
+def : Pat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETLT)),
+ (EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_lt)>;
+def : Pat<(i1 (setcc i32:$s1, immZExt16:$imm, SETUGT)),
+ (EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_gt)>;
+def : Pat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETGT)),
+ (EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_gt)>;
+def : Pat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETEQ)),
+ (EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_eq)>;
+def : Pat<(i1 (setcc i32:$s1, immZExt16:$imm, SETEQ)),
+ (EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_eq)>;
+
+// For non-equality comparisons, the default code would materialize the
+// constant, then compare against it, like this:
+// lis r2, 4660
+// ori r2, r2, 22136
+// cmpw cr0, r3, r2
+// beq cr0,L6
+// Since we are just comparing for equality, we can emit this instead:
+// xoris r0,r3,0x1234
+// cmplwi cr0,r0,0x5678
+// beq cr0,L6
+
+def : Pat<(i1 (setcc i32:$s1, imm:$imm, SETEQ)),
+ (EXTRACT_SUBREG (CMPLWI (XORIS $s1, (HI16 imm:$imm)),
+ (LO16 imm:$imm)), sub_eq)>;
+
+defm : CRNotPat<(i1 (setcc i32:$s1, immZExt16:$imm, SETUGE)),
+ (EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETGE)),
+ (EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, immZExt16:$imm, SETULE)),
+ (EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETLE)),
+ (EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, imm32SExt16:$imm, SETNE)),
+ (EXTRACT_SUBREG (CMPWI $s1, imm:$imm), sub_eq)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, immZExt16:$imm, SETNE)),
+ (EXTRACT_SUBREG (CMPLWI $s1, imm:$imm), sub_eq)>;
+
+defm : CRNotPat<(i1 (setcc i32:$s1, imm:$imm, SETNE)),
+ (EXTRACT_SUBREG (CMPLWI (XORIS $s1, (HI16 imm:$imm)),
+ (LO16 imm:$imm)), sub_eq)>;
+
+def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETULT)),
+ (EXTRACT_SUBREG (CMPLW $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETLT)),
+ (EXTRACT_SUBREG (CMPW $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETUGT)),
+ (EXTRACT_SUBREG (CMPLW $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETGT)),
+ (EXTRACT_SUBREG (CMPW $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc i32:$s1, i32:$s2, SETEQ)),
+ (EXTRACT_SUBREG (CMPW $s1, $s2), sub_eq)>;
+
+defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETUGE)),
+ (EXTRACT_SUBREG (CMPLW $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETGE)),
+ (EXTRACT_SUBREG (CMPW $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETULE)),
+ (EXTRACT_SUBREG (CMPLW $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETLE)),
+ (EXTRACT_SUBREG (CMPW $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i32:$s1, i32:$s2, SETNE)),
+ (EXTRACT_SUBREG (CMPW $s1, $s2), sub_eq)>;
+
+// SETCC for i64.
+def : Pat<(i1 (setcc i64:$s1, immZExt16:$imm, SETULT)),
+ (EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_lt)>;
+def : Pat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETLT)),
+ (EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_lt)>;
+def : Pat<(i1 (setcc i64:$s1, immZExt16:$imm, SETUGT)),
+ (EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_gt)>;
+def : Pat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETGT)),
+ (EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_gt)>;
+def : Pat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETEQ)),
+ (EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_eq)>;
+def : Pat<(i1 (setcc i64:$s1, immZExt16:$imm, SETEQ)),
+ (EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_eq)>;
+
+// For non-equality comparisons, the default code would materialize the
+// constant, then compare against it, like this:
+// lis r2, 4660
+// ori r2, r2, 22136
+// cmpd cr0, r3, r2
+// beq cr0,L6
+// Since we are just comparing for equality, we can emit this instead:
+// xoris r0,r3,0x1234
+// cmpldi cr0,r0,0x5678
+// beq cr0,L6
+
+def : Pat<(i1 (setcc i64:$s1, imm64ZExt32:$imm, SETEQ)),
+ (EXTRACT_SUBREG (CMPLDI (XORIS8 $s1, (HI16 imm:$imm)),
+ (LO16 imm:$imm)), sub_eq)>;
+
+defm : CRNotPat<(i1 (setcc i64:$s1, immZExt16:$imm, SETUGE)),
+ (EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETGE)),
+ (EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, immZExt16:$imm, SETULE)),
+ (EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETLE)),
+ (EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, imm64SExt16:$imm, SETNE)),
+ (EXTRACT_SUBREG (CMPDI $s1, imm:$imm), sub_eq)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, immZExt16:$imm, SETNE)),
+ (EXTRACT_SUBREG (CMPLDI $s1, imm:$imm), sub_eq)>;
+
+defm : CRNotPat<(i1 (setcc i64:$s1, imm64ZExt32:$imm, SETNE)),
+ (EXTRACT_SUBREG (CMPLDI (XORIS8 $s1, (HI16 imm:$imm)),
+ (LO16 imm:$imm)), sub_eq)>;
+
+def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETULT)),
+ (EXTRACT_SUBREG (CMPLD $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETLT)),
+ (EXTRACT_SUBREG (CMPD $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETUGT)),
+ (EXTRACT_SUBREG (CMPLD $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETGT)),
+ (EXTRACT_SUBREG (CMPD $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc i64:$s1, i64:$s2, SETEQ)),
+ (EXTRACT_SUBREG (CMPD $s1, $s2), sub_eq)>;
+
+defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETUGE)),
+ (EXTRACT_SUBREG (CMPLD $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETGE)),
+ (EXTRACT_SUBREG (CMPD $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETULE)),
+ (EXTRACT_SUBREG (CMPLD $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETLE)),
+ (EXTRACT_SUBREG (CMPD $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc i64:$s1, i64:$s2, SETNE)),
+ (EXTRACT_SUBREG (CMPD $s1, $s2), sub_eq)>;
+
+// SETCC for f32.
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOLT)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETLT)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOGT)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETGT)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETOEQ)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETEQ)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
+def : Pat<(i1 (setcc f32:$s1, f32:$s2, SETUO)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_un)>;
+
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETUGE)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETGE)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETULE)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETLE)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETUNE)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETNE)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_eq)>;
+defm : CRNotPat<(i1 (setcc f32:$s1, f32:$s2, SETO)),
+ (EXTRACT_SUBREG (FCMPUS $s1, $s2), sub_un)>;
+
+// SETCC for f64.
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOLT)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETLT)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOGT)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETGT)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETOEQ)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETEQ)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
+def : Pat<(i1 (setcc f64:$s1, f64:$s2, SETUO)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_un)>;
+
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETUGE)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETGE)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_lt)>;
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETULE)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETLE)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_gt)>;
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETUNE)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETNE)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_eq)>;
+defm : CRNotPat<(i1 (setcc f64:$s1, f64:$s2, SETO)),
+ (EXTRACT_SUBREG (FCMPUD $s1, $s2), sub_un)>;
+
+// match select on i1 variables:
+def : Pat<(i1 (select i1:$cond, i1:$tval, i1:$fval)),
+ (CROR (CRAND $cond , $tval),
+ (CRAND (crnot $cond), $fval))>;
+
+// match selectcc on i1 variables:
+// select (lhs == rhs), tval, fval is:
+// ((lhs == rhs) & tval) | (!(lhs == rhs) & fval)
+def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETLT)),
+ (CROR (CRAND (CRANDC $rhs, $lhs), $tval),
+ (CRAND (CRORC $lhs, $rhs), $fval))>;
+def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETLE)),
+ (CROR (CRAND (CRORC $rhs, $lhs), $tval),
+ (CRAND (CRANDC $lhs, $rhs), $fval))>;
+def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETEQ)),
+ (CROR (CRAND (CREQV $lhs, $rhs), $tval),
+ (CRAND (CRXOR $lhs, $rhs), $fval))>;
+def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETGE)),
+ (CROR (CRAND (CRORC $lhs, $rhs), $tval),
+ (CRAND (CRANDC $rhs, $lhs), $fval))>;
+def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETGT)),
+ (CROR (CRAND (CRANDC $lhs, $rhs), $tval),
+ (CRAND (CRORC $rhs, $lhs), $fval))>;
+def : Pat <(i1 (selectcc i1:$lhs, i1:$rhs, i1:$tval, i1:$fval, SETNE)),
+ (CROR (CRAND (CREQV $lhs, $rhs), $fval),
+ (CRAND (CRXOR $lhs, $rhs), $tval))>;
+
+// match selectcc on i1 variables with non-i1 output.
+def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETLT)),
+ (SELECT_I4 (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETLE)),
+ (SELECT_I4 (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETEQ)),
+ (SELECT_I4 (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETGE)),
+ (SELECT_I4 (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETGT)),
+ (SELECT_I4 (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(i32 (selectcc i1:$lhs, i1:$rhs, i32:$tval, i32:$fval, SETNE)),
+ (SELECT_I4 (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETLT)),
+ (SELECT_I8 (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETLE)),
+ (SELECT_I8 (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETEQ)),
+ (SELECT_I8 (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETGE)),
+ (SELECT_I8 (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETGT)),
+ (SELECT_I8 (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(i64 (selectcc i1:$lhs, i1:$rhs, i64:$tval, i64:$fval, SETNE)),
+ (SELECT_I8 (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLT)),
+ (SELECT_F4 (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETLE)),
+ (SELECT_F4 (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETEQ)),
+ (SELECT_F4 (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGE)),
+ (SELECT_F4 (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETGT)),
+ (SELECT_F4 (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(f32 (selectcc i1:$lhs, i1:$rhs, f32:$tval, f32:$fval, SETNE)),
+ (SELECT_F4 (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLT)),
+ (SELECT_F8 (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETLE)),
+ (SELECT_F8 (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETEQ)),
+ (SELECT_F8 (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGE)),
+ (SELECT_F8 (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETGT)),
+ (SELECT_F8 (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(f64 (selectcc i1:$lhs, i1:$rhs, f64:$tval, f64:$fval, SETNE)),
+ (SELECT_F8 (CRXOR $lhs, $rhs), $tval, $fval)>;
+
+def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETLT)),
+ (SELECT_VRRC (CRANDC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETLE)),
+ (SELECT_VRRC (CRORC $rhs, $lhs), $tval, $fval)>;
+def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETEQ)),
+ (SELECT_VRRC (CREQV $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETGE)),
+ (SELECT_VRRC (CRORC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETGT)),
+ (SELECT_VRRC (CRANDC $lhs, $rhs), $tval, $fval)>;
+def : Pat<(v4i32 (selectcc i1:$lhs, i1:$rhs, v4i32:$tval, v4i32:$fval, SETNE)),
+ (SELECT_VRRC (CRXOR $lhs, $rhs), $tval, $fval)>;
+let usesCustomInserter = 1 in {
+def ANDIo_1_EQ_BIT : Pseudo<(outs crbitrc:$dst), (ins gprc:$in),
+ "#ANDIo_1_EQ_BIT",
+ [(set i1:$dst, (trunc (not i32:$in)))]>;
+def ANDIo_1_GT_BIT : Pseudo<(outs crbitrc:$dst), (ins gprc:$in),
+ "#ANDIo_1_GT_BIT",
+ [(set i1:$dst, (trunc i32:$in))]>;
+
+def ANDIo_1_EQ_BIT8 : Pseudo<(outs crbitrc:$dst), (ins g8rc:$in),
+ "#ANDIo_1_EQ_BIT8",
+ [(set i1:$dst, (trunc (not i64:$in)))]>;
+def ANDIo_1_GT_BIT8 : Pseudo<(outs crbitrc:$dst), (ins g8rc:$in),
+ "#ANDIo_1_GT_BIT8",
+ [(set i1:$dst, (trunc i64:$in))]>;
+}
+
+def : Pat<(i1 (not (trunc i32:$in))),
+ (ANDIo_1_EQ_BIT $in)>;
+def : Pat<(i1 (not (trunc i64:$in))),
+ (ANDIo_1_EQ_BIT8 $in)>;
//===----------------------------------------------------------------------===//
// PowerPC Instructions used for assembler/disassembler only
//
def ISYNC : XLForm_2_ext<19, 150, 0, 0, 0, (outs), (ins),
- "isync", SprISYNC, []>;
+ "isync", IIC_SprISYNC, []>;
def ICBI : XForm_1a<31, 982, (outs), (ins memrr:$src),
- "icbi $src", LdStICBI, []>;
+ "icbi $src", IIC_LdStICBI, []>;
def EIEIO : XForm_24_eieio<31, 854, (outs), (ins),
- "eieio", LdStLoad, []>;
+ "eieio", IIC_LdStLoad, []>;
def WAIT : XForm_24_sync<31, 62, (outs), (ins i32imm:$L),
- "wait $L", LdStLoad, []>;
+ "wait $L", IIC_LdStLoad, []>;
def MTMSR: XForm_mtmsr<31, 146, (outs), (ins gprc:$RS, i32imm:$L),
- "mtmsr $RS, $L", SprMTMSR>;
+ "mtmsr $RS, $L", IIC_SprMTMSR>;
def MFMSR : XForm_rs<31, 83, (outs gprc:$RT), (ins),
- "mfmsr $RT", SprMFMSR, []>;
+ "mfmsr $RT", IIC_SprMFMSR, []>;
def MTMSRD : XForm_mtmsr<31, 178, (outs), (ins gprc:$RS, i32imm:$L),
- "mtmsrd $RS, $L", SprMTMSRD>;
+ "mtmsrd $RS, $L", IIC_SprMTMSRD>;
def SLBIE : XForm_16b<31, 434, (outs), (ins gprc:$RB),
- "slbie $RB", SprSLBIE, []>;
+ "slbie $RB", IIC_SprSLBIE, []>;
def SLBMTE : XForm_26<31, 402, (outs), (ins gprc:$RS, gprc:$RB),
- "slbmte $RS, $RB", SprSLBMTE, []>;
+ "slbmte $RS, $RB", IIC_SprSLBMTE, []>;
def SLBMFEE : XForm_26<31, 915, (outs gprc:$RT), (ins gprc:$RB),
- "slbmfee $RT, $RB", SprSLBMFEE, []>;
+ "slbmfee $RT, $RB", IIC_SprSLBMFEE, []>;
-def SLBIA : XForm_0<31, 498, (outs), (ins), "slbia", SprSLBIA, []>;
+def SLBIA : XForm_0<31, 498, (outs), (ins), "slbia", IIC_SprSLBIA, []>;
def TLBSYNC : XForm_0<31, 566, (outs), (ins),
- "tlbsync", SprTLBSYNC, []>;
+ "tlbsync", IIC_SprTLBSYNC, []>;
def TLBIEL : XForm_16b<31, 274, (outs), (ins gprc:$RB),
- "tlbiel $RB", SprTLBIEL, []>;
+ "tlbiel $RB", IIC_SprTLBIEL, []>;
def TLBIE : XForm_26<31, 306, (outs), (ins gprc:$RS, gprc:$RB),
- "tlbie $RB,$RS", SprTLBIE, []>;
+ "tlbie $RB,$RS", IIC_SprTLBIE, []>;
//===----------------------------------------------------------------------===//
// PowerPC Assembler Instruction Aliases
@@ -2656,19 +3280,19 @@ let PPC970_Unit = 7 in {
let Defs = [CTR], Uses = [CTR, LR, RM] in
def gBCLR : XLForm_2<19, 16, 0, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
- "bclr $bo, $bi, $bh", BrB, []>;
+ "bclr $bo, $bi, $bh", IIC_BrB, []>;
let Defs = [LR, CTR], Uses = [CTR, LR, RM] in
def gBCLRL : XLForm_2<19, 16, 1, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
- "bclrl $bo, $bi, $bh", BrB, []>;
+ "bclrl $bo, $bi, $bh", IIC_BrB, []>;
let Defs = [CTR], Uses = [CTR, LR, RM] in
def gBCCTR : XLForm_2<19, 528, 0, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
- "bcctr $bo, $bi, $bh", BrB, []>;
+ "bcctr $bo, $bi, $bh", IIC_BrB, []>;
let Defs = [LR, CTR], Uses = [CTR, LR, RM] in
def gBCCTRL : XLForm_2<19, 528, 1, (outs),
(ins u5imm:$bo, crbitrc:$bi, i32imm:$bh),
- "bcctrl $bo, $bi, $bh", BrB, []>;
+ "bcctrl $bo, $bi, $bh", IIC_BrB, []>;
}
def : InstAlias<"bclr $bo, $bi", (gBCLR u5imm:$bo, crbitrc:$bi, 0)>;
def : InstAlias<"bclrl $bo, $bi", (gBCLRL u5imm:$bo, crbitrc:$bi, 0)>;
@@ -2711,14 +3335,14 @@ multiclass BranchExtendedMnemonicPM<string name, string pm, int bibo> {
(BCCA bibo, CR0, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"lr"#pm#" $cc",
- (BCLR bibo, crrc:$cc)>;
+ (BCCLR bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"lr"#pm,
- (BCLR bibo, CR0)>;
+ (BCCLR bibo, CR0)>;
def : InstAlias<"b"#name#"ctr"#pm#" $cc",
- (BCCTR bibo, crrc:$cc)>;
+ (BCCCTR bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"ctr"#pm,
- (BCCTR bibo, CR0)>;
+ (BCCCTR bibo, CR0)>;
def : InstAlias<"b"#name#"l"#pm#" $cc, $dst",
(BCCL bibo, crrc:$cc, condbrtarget:$dst)>;
@@ -2731,14 +3355,14 @@ multiclass BranchExtendedMnemonicPM<string name, string pm, int bibo> {
(BCCLA bibo, CR0, abscondbrtarget:$dst)>;
def : InstAlias<"b"#name#"lrl"#pm#" $cc",
- (BCLRL bibo, crrc:$cc)>;
+ (BCCLRL bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"lrl"#pm,
- (BCLRL bibo, CR0)>;
+ (BCCLRL bibo, CR0)>;
def : InstAlias<"b"#name#"ctrl"#pm#" $cc",
- (BCCTRL bibo, crrc:$cc)>;
+ (BCCCTRL bibo, crrc:$cc)>;
def : InstAlias<"b"#name#"ctrl"#pm,
- (BCCTRL bibo, CR0)>;
+ (BCCCTRL bibo, CR0)>;
}
multiclass BranchExtendedMnemonic<string name, int bibo> {
defm : BranchExtendedMnemonicPM<name, "", bibo>;
@@ -2762,18 +3386,18 @@ def : InstAlias<"cmpwi $rA, $imm", (CMPWI CR0, gprc:$rA, s16imm:$imm)>;
def : InstAlias<"cmpw $rA, $rB", (CMPW CR0, gprc:$rA, gprc:$rB)>;
def : InstAlias<"cmplwi $rA, $imm", (CMPLWI CR0, gprc:$rA, u16imm:$imm)>;
def : InstAlias<"cmplw $rA, $rB", (CMPLW CR0, gprc:$rA, gprc:$rB)>;
-def : InstAlias<"cmpdi $rA, $imm", (CMPDI CR0, g8rc:$rA, s16imm:$imm)>;
+def : InstAlias<"cmpdi $rA, $imm", (CMPDI CR0, g8rc:$rA, s16imm64:$imm)>;
def : InstAlias<"cmpd $rA, $rB", (CMPD CR0, g8rc:$rA, g8rc:$rB)>;
-def : InstAlias<"cmpldi $rA, $imm", (CMPLDI CR0, g8rc:$rA, u16imm:$imm)>;
+def : InstAlias<"cmpldi $rA, $imm", (CMPLDI CR0, g8rc:$rA, u16imm64:$imm)>;
def : InstAlias<"cmpld $rA, $rB", (CMPLD CR0, g8rc:$rA, g8rc:$rB)>;
def : InstAlias<"cmpi $bf, 0, $rA, $imm", (CMPWI crrc:$bf, gprc:$rA, s16imm:$imm)>;
def : InstAlias<"cmp $bf, 0, $rA, $rB", (CMPW crrc:$bf, gprc:$rA, gprc:$rB)>;
def : InstAlias<"cmpli $bf, 0, $rA, $imm", (CMPLWI crrc:$bf, gprc:$rA, u16imm:$imm)>;
def : InstAlias<"cmpl $bf, 0, $rA, $rB", (CMPLW crrc:$bf, gprc:$rA, gprc:$rB)>;
-def : InstAlias<"cmpi $bf, 1, $rA, $imm", (CMPDI crrc:$bf, g8rc:$rA, s16imm:$imm)>;
+def : InstAlias<"cmpi $bf, 1, $rA, $imm", (CMPDI crrc:$bf, g8rc:$rA, s16imm64:$imm)>;
def : InstAlias<"cmp $bf, 1, $rA, $rB", (CMPD crrc:$bf, g8rc:$rA, g8rc:$rB)>;
-def : InstAlias<"cmpli $bf, 1, $rA, $imm", (CMPLDI crrc:$bf, g8rc:$rA, u16imm:$imm)>;
+def : InstAlias<"cmpli $bf, 1, $rA, $imm", (CMPLDI crrc:$bf, g8rc:$rA, u16imm64:$imm)>;
def : InstAlias<"cmpl $bf, 1, $rA, $rB", (CMPLD crrc:$bf, g8rc:$rA, g8rc:$rB)>;
multiclass TrapExtendedMnemonic<string name, int to> {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrVSX.td
new file mode 100644
index 0000000..49bcc48
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -0,0 +1,816 @@
+//===- PPCInstrVSX.td - The PowerPC VSX Extension --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the VSX extension to the PowerPC instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+def PPCRegVSRCAsmOperand : AsmOperandClass {
+ let Name = "RegVSRC"; let PredicateMethod = "isVSRegNumber";
+}
+def vsrc : RegisterOperand<VSRC> {
+ let ParserMatchClass = PPCRegVSRCAsmOperand;
+}
+
+def PPCRegVSFRCAsmOperand : AsmOperandClass {
+ let Name = "RegVSFRC"; let PredicateMethod = "isVSRegNumber";
+}
+def vsfrc : RegisterOperand<VSFRC> {
+ let ParserMatchClass = PPCRegVSFRCAsmOperand;
+}
+
+multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, dag OOL, dag IOL,
+ string asmbase, string asmstr, InstrItinClass itin,
+ list<dag> pattern> {
+ let BaseName = asmbase in {
+ def NAME : XX3Form_Rc<opcode, xo, OOL, IOL,
+ !strconcat(asmbase, !strconcat(" ", asmstr)), itin,
+ pattern>;
+ let Defs = [CR6] in
+ def o : XX3Form_Rc<opcode, xo, OOL, IOL,
+ !strconcat(asmbase, !strconcat(". ", asmstr)), itin,
+ []>, isDOT;
+ }
+}
+
+def HasVSX : Predicate<"PPCSubTarget->hasVSX()">;
+let Predicates = [HasVSX] in {
+let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
+let neverHasSideEffects = 1 in { // VSX instructions don't have side effects.
+let Uses = [RM] in {
+
+ // Load indexed instructions
+ let mayLoad = 1, canFoldAsLoad = 1 in {
+ def LXSDX : XForm_1<31, 588,
+ (outs vsfrc:$XT), (ins memrr:$src),
+ "lxsdx $XT, $src", IIC_LdStLFD,
+ [(set f64:$XT, (load xoaddr:$src))]>;
+
+ def LXVD2X : XForm_1<31, 844,
+ (outs vsrc:$XT), (ins memrr:$src),
+ "lxvd2x $XT, $src", IIC_LdStLFD,
+ [(set v2f64:$XT, (load xoaddr:$src))]>;
+
+ def LXVDSX : XForm_1<31, 332,
+ (outs vsrc:$XT), (ins memrr:$src),
+ "lxvdsx $XT, $src", IIC_LdStLFD, []>;
+
+ def LXVW4X : XForm_1<31, 780,
+ (outs vsrc:$XT), (ins memrr:$src),
+ "lxvw4x $XT, $src", IIC_LdStLFD, []>;
+ }
+
+ // Store indexed instructions
+ let mayStore = 1 in {
+ def STXSDX : XX1Form<31, 716,
+ (outs), (ins vsfrc:$XT, memrr:$dst),
+ "stxsdx $XT, $dst", IIC_LdStSTFD,
+ [(store f64:$XT, xoaddr:$dst)]>;
+
+ def STXVD2X : XX1Form<31, 972,
+ (outs), (ins vsrc:$XT, memrr:$dst),
+ "stxvd2x $XT, $dst", IIC_LdStSTFD,
+ [(store v2f64:$XT, xoaddr:$dst)]>;
+
+ def STXVW4X : XX1Form<31, 908,
+ (outs), (ins vsrc:$XT, memrr:$dst),
+ "stxvw4x $XT, $dst", IIC_LdStSTFD, []>;
+ }
+
+ // Add/Mul Instructions
+ let isCommutable = 1 in {
+ def XSADDDP : XX3Form<60, 32,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xsadddp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fadd f64:$XA, f64:$XB))]>;
+ def XSMULDP : XX3Form<60, 48,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xsmuldp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fmul f64:$XA, f64:$XB))]>;
+
+ def XVADDDP : XX3Form<60, 96,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvadddp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fadd v2f64:$XA, v2f64:$XB))]>;
+
+ def XVADDSP : XX3Form<60, 64,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvaddsp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fadd v4f32:$XA, v4f32:$XB))]>;
+
+ def XVMULDP : XX3Form<60, 112,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvmuldp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fmul v2f64:$XA, v2f64:$XB))]>;
+
+ def XVMULSP : XX3Form<60, 80,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvmulsp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fmul v4f32:$XA, v4f32:$XB))]>;
+ }
+
+ // Subtract Instructions
+ def XSSUBDP : XX3Form<60, 40,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xssubdp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fsub f64:$XA, f64:$XB))]>;
+
+ def XVSUBDP : XX3Form<60, 104,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvsubdp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fsub v2f64:$XA, v2f64:$XB))]>;
+ def XVSUBSP : XX3Form<60, 72,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvsubsp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fsub v4f32:$XA, v4f32:$XB))]>;
+
+ // FMA Instructions
+ let BaseName = "XSMADDADP" in {
+ let isCommutable = 1 in
+ def XSMADDADP : XX3Form<60, 33,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsmaddadp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fma f64:$XA, f64:$XB, f64:$XTi))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XSMADDMDP : XX3Form<60, 41,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XSMSUBADP" in {
+ let isCommutable = 1 in
+ def XSMSUBADP : XX3Form<60, 49,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsmsubadp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fma f64:$XA, f64:$XB, (fneg f64:$XTi)))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XSMSUBMDP : XX3Form<60, 57,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XSNMADDADP" in {
+ let isCommutable = 1 in
+ def XSNMADDADP : XX3Form<60, 161,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsnmaddadp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, f64:$XTi)))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XSNMADDMDP : XX3Form<60, 169,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsnmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XSNMSUBADP" in {
+ let isCommutable = 1 in
+ def XSNMSUBADP : XX3Form<60, 177,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsnmsubadp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, (fneg f64:$XTi))))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XSNMSUBMDP : XX3Form<60, 185,
+ (outs vsfrc:$XT), (ins vsfrc:$XTi, vsfrc:$XA, vsfrc:$XB),
+ "xsnmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVMADDADP" in {
+ let isCommutable = 1 in
+ def XVMADDADP : XX3Form<60, 97,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmaddadp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVMADDMDP : XX3Form<60, 105,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVMADDASP" in {
+ let isCommutable = 1 in
+ def XVMADDASP : XX3Form<60, 65,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmaddasp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVMADDMSP : XX3Form<60, 73,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmaddmsp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVMSUBADP" in {
+ let isCommutable = 1 in
+ def XVMSUBADP : XX3Form<60, 113,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmsubadp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi)))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVMSUBMDP : XX3Form<60, 121,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVMSUBASP" in {
+ let isCommutable = 1 in
+ def XVMSUBASP : XX3Form<60, 81,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmsubasp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi)))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVMSUBMSP : XX3Form<60, 89,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvmsubmsp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVNMADDADP" in {
+ let isCommutable = 1 in
+ def XVNMADDADP : XX3Form<60, 225,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmaddadp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, v2f64:$XTi)))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVNMADDMDP : XX3Form<60, 233,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmaddmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVNMADDASP" in {
+ let isCommutable = 1 in
+ def XVNMADDASP : XX3Form<60, 193,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmaddasp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, v4f32:$XTi)))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVNMADDMSP : XX3Form<60, 201,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmaddmsp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVNMSUBADP" in {
+ let isCommutable = 1 in
+ def XVNMSUBADP : XX3Form<60, 241,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmsubadp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fneg (fma v2f64:$XA, v2f64:$XB, (fneg v2f64:$XTi))))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVNMSUBMDP : XX3Form<60, 249,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmsubmdp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ let BaseName = "XVNMSUBASP" in {
+ let isCommutable = 1 in
+ def XVNMSUBASP : XX3Form<60, 209,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmsubasp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fneg (fma v4f32:$XA, v4f32:$XB, (fneg v4f32:$XTi))))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ let IsVSXFMAAlt = 1 in
+ def XVNMSUBMSP : XX3Form<60, 217,
+ (outs vsrc:$XT), (ins vsrc:$XTi, vsrc:$XA, vsrc:$XB),
+ "xvnmsubmsp $XT, $XA, $XB", IIC_VecFP, []>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+ AltVSXFMARel;
+ }
+
+ // Division Instructions
+ def XSDIVDP : XX3Form<60, 56,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xsdivdp $XT, $XA, $XB", IIC_FPDivD,
+ [(set f64:$XT, (fdiv f64:$XA, f64:$XB))]>;
+ def XSSQRTDP : XX2Form<60, 75,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xssqrtdp $XT, $XB", IIC_FPSqrtD,
+ [(set f64:$XT, (fsqrt f64:$XB))]>;
+
+ def XSREDP : XX2Form<60, 90,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsredp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfre f64:$XB))]>;
+ def XSRSQRTEDP : XX2Form<60, 74,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrsqrtedp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfrsqrte f64:$XB))]>;
+
+ def XSTDIVDP : XX3Form_1<60, 61,
+ (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
+ "xstdivdp $crD, $XA, $XB", IIC_FPCompare, []>;
+ def XSTSQRTDP : XX2Form_1<60, 106,
+ (outs crrc:$crD), (ins vsfrc:$XB),
+ "xstsqrtdp $crD, $XB", IIC_FPCompare, []>;
+
+ def XVDIVDP : XX3Form<60, 120,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvdivdp $XT, $XA, $XB", IIC_FPDivD,
+ [(set v2f64:$XT, (fdiv v2f64:$XA, v2f64:$XB))]>;
+ def XVDIVSP : XX3Form<60, 88,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvdivsp $XT, $XA, $XB", IIC_FPDivS,
+ [(set v4f32:$XT, (fdiv v4f32:$XA, v4f32:$XB))]>;
+
+ def XVSQRTDP : XX2Form<60, 203,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvsqrtdp $XT, $XB", IIC_FPSqrtD,
+ [(set v2f64:$XT, (fsqrt v2f64:$XB))]>;
+ def XVSQRTSP : XX2Form<60, 139,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvsqrtsp $XT, $XB", IIC_FPSqrtS,
+ [(set v4f32:$XT, (fsqrt v4f32:$XB))]>;
+
+ def XVTDIVDP : XX3Form_1<60, 125,
+ (outs crrc:$crD), (ins vsrc:$XA, vsrc:$XB),
+ "xvtdivdp $crD, $XA, $XB", IIC_FPCompare, []>;
+ def XVTDIVSP : XX3Form_1<60, 93,
+ (outs crrc:$crD), (ins vsrc:$XA, vsrc:$XB),
+ "xvtdivsp $crD, $XA, $XB", IIC_FPCompare, []>;
+
+ def XVTSQRTDP : XX2Form_1<60, 234,
+ (outs crrc:$crD), (ins vsrc:$XB),
+ "xvtsqrtdp $crD, $XB", IIC_FPCompare, []>;
+ def XVTSQRTSP : XX2Form_1<60, 170,
+ (outs crrc:$crD), (ins vsrc:$XB),
+ "xvtsqrtsp $crD, $XB", IIC_FPCompare, []>;
+
+ def XVREDP : XX2Form<60, 218,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvredp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (PPCfre v2f64:$XB))]>;
+ def XVRESP : XX2Form<60, 154,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvresp $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (PPCfre v4f32:$XB))]>;
+
+ def XVRSQRTEDP : XX2Form<60, 202,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrsqrtedp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (PPCfrsqrte v2f64:$XB))]>;
+ def XVRSQRTESP : XX2Form<60, 138,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrsqrtesp $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (PPCfrsqrte v4f32:$XB))]>;
+
+ // Compare Instructions
+ def XSCMPODP : XX3Form_1<60, 43,
+ (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
+ "xscmpodp $crD, $XA, $XB", IIC_FPCompare, []>;
+ def XSCMPUDP : XX3Form_1<60, 35,
+ (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
+ "xscmpudp $crD, $XA, $XB", IIC_FPCompare, []>;
+
+ defm XVCMPEQDP : XX3Form_Rcr<60, 99,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpeqdp", "$XT, $XA, $XB", IIC_VecFPCompare, []>;
+ defm XVCMPEQSP : XX3Form_Rcr<60, 67,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpeqsp", "$XT, $XA, $XB", IIC_VecFPCompare, []>;
+ defm XVCMPGEDP : XX3Form_Rcr<60, 115,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpgedp", "$XT, $XA, $XB", IIC_VecFPCompare, []>;
+ defm XVCMPGESP : XX3Form_Rcr<60, 83,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpgesp", "$XT, $XA, $XB", IIC_VecFPCompare, []>;
+ defm XVCMPGTDP : XX3Form_Rcr<60, 107,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpgtdp", "$XT, $XA, $XB", IIC_VecFPCompare, []>;
+ defm XVCMPGTSP : XX3Form_Rcr<60, 75,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpgtsp", "$XT, $XA, $XB", IIC_VecFPCompare, []>;
+
+ // Move Instructions
+ def XSABSDP : XX2Form<60, 345,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsabsdp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (fabs f64:$XB))]>;
+ def XSNABSDP : XX2Form<60, 361,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsnabsdp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (fneg (fabs f64:$XB)))]>;
+ def XSNEGDP : XX2Form<60, 377,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsnegdp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (fneg f64:$XB))]>;
+ def XSCPSGNDP : XX3Form<60, 176,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xscpsgndp $XT, $XA, $XB", IIC_VecFP,
+ [(set f64:$XT, (fcopysign f64:$XB, f64:$XA))]>;
+
+ def XVABSDP : XX2Form<60, 473,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvabsdp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fabs v2f64:$XB))]>;
+
+ def XVABSSP : XX2Form<60, 409,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvabssp $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fabs v4f32:$XB))]>;
+
+ def XVCPSGNDP : XX3Form<60, 240,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcpsgndp $XT, $XA, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fcopysign v2f64:$XB, v2f64:$XA))]>;
+ def XVCPSGNSP : XX3Form<60, 208,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcpsgnsp $XT, $XA, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fcopysign v4f32:$XB, v4f32:$XA))]>;
+
+ def XVNABSDP : XX2Form<60, 489,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvnabsdp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fneg (fabs v2f64:$XB)))]>;
+ def XVNABSSP : XX2Form<60, 425,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvnabssp $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fneg (fabs v4f32:$XB)))]>;
+
+ def XVNEGDP : XX2Form<60, 505,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvnegdp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fneg v2f64:$XB))]>;
+ def XVNEGSP : XX2Form<60, 441,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvnegsp $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fneg v4f32:$XB))]>;
+
+ // Conversion Instructions
+ def XSCVDPSP : XX2Form<60, 265,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvdpsp $XT, $XB", IIC_VecFP, []>;
+ def XSCVDPSXDS : XX2Form<60, 344,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvdpsxds $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfctidz f64:$XB))]>;
+ def XSCVDPSXWS : XX2Form<60, 88,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvdpsxws $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfctiwz f64:$XB))]>;
+ def XSCVDPUXDS : XX2Form<60, 328,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvdpuxds $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfctiduz f64:$XB))]>;
+ def XSCVDPUXWS : XX2Form<60, 72,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvdpuxws $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfctiwuz f64:$XB))]>;
+ def XSCVSPDP : XX2Form<60, 329,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvspdp $XT, $XB", IIC_VecFP, []>;
+ def XSCVSXDDP : XX2Form<60, 376,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvsxddp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfcfid f64:$XB))]>;
+ def XSCVUXDDP : XX2Form<60, 360,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvuxddp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfcfidu f64:$XB))]>;
+
+ def XVCVDPSP : XX2Form<60, 393,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvdpsp $XT, $XB", IIC_VecFP, []>;
+ def XVCVDPSXDS : XX2Form<60, 472,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvdpsxds $XT, $XB", IIC_VecFP,
+ [(set v2i64:$XT, (fp_to_sint v2f64:$XB))]>;
+ def XVCVDPSXWS : XX2Form<60, 216,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvdpsxws $XT, $XB", IIC_VecFP, []>;
+ def XVCVDPUXDS : XX2Form<60, 456,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvdpuxds $XT, $XB", IIC_VecFP,
+ [(set v2i64:$XT, (fp_to_uint v2f64:$XB))]>;
+ def XVCVDPUXWS : XX2Form<60, 200,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvdpuxws $XT, $XB", IIC_VecFP, []>;
+
+ def XVCVSPDP : XX2Form<60, 457,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvspdp $XT, $XB", IIC_VecFP, []>;
+ def XVCVSPSXDS : XX2Form<60, 408,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvspsxds $XT, $XB", IIC_VecFP, []>;
+ def XVCVSPSXWS : XX2Form<60, 152,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvspsxws $XT, $XB", IIC_VecFP, []>;
+ def XVCVSPUXDS : XX2Form<60, 392,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvspuxds $XT, $XB", IIC_VecFP, []>;
+ def XVCVSPUXWS : XX2Form<60, 136,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvspuxws $XT, $XB", IIC_VecFP, []>;
+ def XVCVSXDDP : XX2Form<60, 504,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvsxddp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (sint_to_fp v2i64:$XB))]>;
+ def XVCVSXDSP : XX2Form<60, 440,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvsxdsp $XT, $XB", IIC_VecFP, []>;
+ def XVCVSXWDP : XX2Form<60, 248,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvsxwdp $XT, $XB", IIC_VecFP, []>;
+ def XVCVSXWSP : XX2Form<60, 184,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvsxwsp $XT, $XB", IIC_VecFP, []>;
+ def XVCVUXDDP : XX2Form<60, 488,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvuxddp $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (uint_to_fp v2i64:$XB))]>;
+ def XVCVUXDSP : XX2Form<60, 424,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvuxdsp $XT, $XB", IIC_VecFP, []>;
+ def XVCVUXWDP : XX2Form<60, 232,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvuxwdp $XT, $XB", IIC_VecFP, []>;
+ def XVCVUXWSP : XX2Form<60, 168,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvuxwsp $XT, $XB", IIC_VecFP, []>;
+
+ // Rounding Instructions
+ def XSRDPI : XX2Form<60, 73,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrdpi $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (frnd f64:$XB))]>;
+ def XSRDPIC : XX2Form<60, 107,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrdpic $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (fnearbyint f64:$XB))]>;
+ def XSRDPIM : XX2Form<60, 121,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrdpim $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (ffloor f64:$XB))]>;
+ def XSRDPIP : XX2Form<60, 105,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrdpip $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (fceil f64:$XB))]>;
+ def XSRDPIZ : XX2Form<60, 89,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrdpiz $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (ftrunc f64:$XB))]>;
+
+ def XVRDPI : XX2Form<60, 201,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrdpi $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (frnd v2f64:$XB))]>;
+ def XVRDPIC : XX2Form<60, 235,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrdpic $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fnearbyint v2f64:$XB))]>;
+ def XVRDPIM : XX2Form<60, 249,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrdpim $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (ffloor v2f64:$XB))]>;
+ def XVRDPIP : XX2Form<60, 233,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrdpip $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (fceil v2f64:$XB))]>;
+ def XVRDPIZ : XX2Form<60, 217,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrdpiz $XT, $XB", IIC_VecFP,
+ [(set v2f64:$XT, (ftrunc v2f64:$XB))]>;
+
+ def XVRSPI : XX2Form<60, 137,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrspi $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (frnd v4f32:$XB))]>;
+ def XVRSPIC : XX2Form<60, 171,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrspic $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fnearbyint v4f32:$XB))]>;
+ def XVRSPIM : XX2Form<60, 185,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrspim $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (ffloor v4f32:$XB))]>;
+ def XVRSPIP : XX2Form<60, 169,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrspip $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (fceil v4f32:$XB))]>;
+ def XVRSPIZ : XX2Form<60, 153,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvrspiz $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (ftrunc v4f32:$XB))]>;
+
+ // Max/Min Instructions
+ let isCommutable = 1 in {
+ def XSMAXDP : XX3Form<60, 160,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xsmaxdp $XT, $XA, $XB", IIC_VecFP, []>;
+ def XSMINDP : XX3Form<60, 168,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xsmindp $XT, $XA, $XB", IIC_VecFP, []>;
+
+ def XVMAXDP : XX3Form<60, 224,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvmaxdp $XT, $XA, $XB", IIC_VecFP, []>;
+ def XVMINDP : XX3Form<60, 232,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvmindp $XT, $XA, $XB", IIC_VecFP, []>;
+
+ def XVMAXSP : XX3Form<60, 192,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvmaxsp $XT, $XA, $XB", IIC_VecFP, []>;
+ def XVMINSP : XX3Form<60, 200,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvminsp $XT, $XA, $XB", IIC_VecFP, []>;
+ } // isCommutable
+} // Uses = [RM]
+
+ // Logical Instructions
+ let isCommutable = 1 in
+ def XXLAND : XX3Form<60, 130,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxland $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (and v4i32:$XA, v4i32:$XB))]>;
+ def XXLANDC : XX3Form<60, 138,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxlandc $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (and v4i32:$XA,
+ (vnot_ppc v4i32:$XB)))]>;
+ let isCommutable = 1 in {
+ def XXLNOR : XX3Form<60, 162,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxlnor $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (vnot_ppc (or v4i32:$XA,
+ v4i32:$XB)))]>;
+ def XXLOR : XX3Form<60, 146,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxlor $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (or v4i32:$XA, v4i32:$XB))]>;
+ let isCodeGenOnly = 1 in
+ def XXLORf: XX3Form<60, 146,
+ (outs vsfrc:$XT), (ins vsfrc:$XA, vsfrc:$XB),
+ "xxlor $XT, $XA, $XB", IIC_VecGeneral, []>;
+ def XXLXOR : XX3Form<60, 154,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxlxor $XT, $XA, $XB", IIC_VecGeneral,
+ [(set v4i32:$XT, (xor v4i32:$XA, v4i32:$XB))]>;
+ } // isCommutable
+
+ // Permutation Instructions
+ def XXMRGHW : XX3Form<60, 18,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxmrghw $XT, $XA, $XB", IIC_VecPerm, []>;
+ def XXMRGLW : XX3Form<60, 50,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xxmrglw $XT, $XA, $XB", IIC_VecPerm, []>;
+
+ def XXPERMDI : XX3Form_2<60, 10,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$DM),
+ "xxpermdi $XT, $XA, $XB, $DM", IIC_VecPerm, []>;
+ def XXSEL : XX4Form<60, 3,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, vsrc:$XC),
+ "xxsel $XT, $XA, $XB, $XC", IIC_VecPerm, []>;
+
+ def XXSLDWI : XX3Form_2<60, 2,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$SHW),
+ "xxsldwi $XT, $XA, $XB, $SHW", IIC_VecPerm, []>;
+ def XXSPLTW : XX2Form_2<60, 164,
+ (outs vsrc:$XT), (ins vsrc:$XB, u2imm:$UIM),
+ "xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>;
+} // neverHasSideEffects
+} // AddedComplexity
+
+def : InstAlias<"xvmovdp $XT, $XB",
+ (XVCPSGNDP vsrc:$XT, vsrc:$XB, vsrc:$XB)>;
+def : InstAlias<"xvmovsp $XT, $XB",
+ (XVCPSGNSP vsrc:$XT, vsrc:$XB, vsrc:$XB)>;
+
+def : InstAlias<"xxspltd $XT, $XB, 0",
+ (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 0)>;
+def : InstAlias<"xxspltd $XT, $XB, 1",
+ (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 3)>;
+def : InstAlias<"xxmrghd $XT, $XA, $XB",
+ (XXPERMDI vsrc:$XT, vsrc:$XA, vsrc:$XB, 0)>;
+def : InstAlias<"xxmrgld $XT, $XA, $XB",
+ (XXPERMDI vsrc:$XT, vsrc:$XA, vsrc:$XB, 3)>;
+def : InstAlias<"xxswapd $XT, $XB",
+ (XXPERMDI vsrc:$XT, vsrc:$XB, vsrc:$XB, 2)>;
+
+let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
+def : Pat<(v2f64 (scalar_to_vector f64:$A)),
+ (v2f64 (SUBREG_TO_REG (i64 1), $A, sub_64))>;
+
+def : Pat<(f64 (vector_extract v2f64:$S, 0)),
+ (f64 (EXTRACT_SUBREG $S, sub_64))>;
+def : Pat<(f64 (vector_extract v2f64:$S, 1)),
+ (f64 (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64))>;
+
+// Additional fnmsub patterns: -a*c + b == -(a*c - b)
+def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B),
+ (XSNMSUBADP $B, $C, $A)>;
+def : Pat<(fma f64:$A, (fneg f64:$C), f64:$B),
+ (XSNMSUBADP $B, $C, $A)>;
+
+def : Pat<(fma (fneg v2f64:$A), v2f64:$C, v2f64:$B),
+ (XVNMSUBADP $B, $C, $A)>;
+def : Pat<(fma v2f64:$A, (fneg v2f64:$C), v2f64:$B),
+ (XVNMSUBADP $B, $C, $A)>;
+
+def : Pat<(fma (fneg v4f32:$A), v4f32:$C, v4f32:$B),
+ (XVNMSUBASP $B, $C, $A)>;
+def : Pat<(fma v4f32:$A, (fneg v4f32:$C), v4f32:$B),
+ (XVNMSUBASP $B, $C, $A)>;
+
+def : Pat<(v2f64 (bitconvert v4f32:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2f64 (bitconvert v4i32:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2f64 (bitconvert v8i16:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2f64 (bitconvert v16i8:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+
+def : Pat<(v4f32 (bitconvert v2f64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v4i32 (bitconvert v2f64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v8i16 (bitconvert v2f64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v16i8 (bitconvert v2f64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+
+def : Pat<(v2i64 (bitconvert v4f32:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2i64 (bitconvert v4i32:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2i64 (bitconvert v8i16:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+def : Pat<(v2i64 (bitconvert v16i8:$A)),
+ (COPY_TO_REGCLASS $A, VSRC)>;
+
+def : Pat<(v4f32 (bitconvert v2i64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v4i32 (bitconvert v2i64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v8i16 (bitconvert v2i64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v16i8 (bitconvert v2i64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+
+def : Pat<(v2f64 (bitconvert v2i64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+def : Pat<(v2i64 (bitconvert v2f64:$A)),
+ (COPY_TO_REGCLASS $A, VRRC)>;
+
+// sign extension patterns
+// To extend "in place" from v2i32 to v2i64, we have input data like:
+// | undef | i32 | undef | i32 |
+// but xvcvsxwdp expects the input in big-Endian format:
+// | i32 | undef | i32 | undef |
+// so we need to shift everything to the left by one i32 (word) before
+// the conversion.
+def : Pat<(sext_inreg v2i64:$C, v2i32),
+ (XVCVDPSXDS (XVCVSXWDP (XXSLDWI $C, $C, 1)))>;
+def : Pat<(v2f64 (sint_to_fp (sext_inreg v2i64:$C, v2i32))),
+ (XVCVSXWDP (XXSLDWI $C, $C, 1))>;
+
+} // AddedComplexity
+} // HasVSX
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
index 5e3a48d..e5f113a 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
@@ -11,10 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "PPCJITInfo.h"
#include "PPCRelocations.h"
-#include "PPCTargetMachine.h"
+#include "PPCSubtarget.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -22,8 +21,15 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
static TargetJITInfo::JITCompilerFn JITCompilerFunction;
+PPCJITInfo::PPCJITInfo(PPCSubtarget &STI)
+ : Subtarget(STI), is64Bit(STI.isPPC64()) {
+ useGOT = 0;
+}
+
#define BUILD_ADDIS(RD,RS,IMM16) \
((15 << 26) | ((RD) << 21) | ((RS) << 16) | ((IMM16) & 65535))
#define BUILD_ORI(RD,RS,UIMM16) \
@@ -214,6 +220,10 @@ asm(
".text\n"
".align 2\n"
".globl PPC64CompilationCallback\n"
+#if _CALL_ELF == 2
+ ".type PPC64CompilationCallback,@function\n"
+"PPC64CompilationCallback:\n"
+#else
".section \".opd\",\"aw\",@progbits\n"
".align 3\n"
"PPC64CompilationCallback:\n"
@@ -223,6 +233,7 @@ asm(
".align 4\n"
".type PPC64CompilationCallback,@function\n"
".L.PPC64CompilationCallback:\n"
+#endif
# else
asm(
".text\n"
@@ -387,7 +398,7 @@ void *PPCJITInfo::emitFunctionStub(const Function* F, void *Fn,
JCE.emitWordBE(0xf821ffb1); // stdu r1,-80(r1)
JCE.emitWordBE(0x7d6802a6); // mflr r11
JCE.emitWordBE(0xf9610060); // std r11, 96(r1)
- } else if (TM.getSubtargetImpl()->isDarwinABI()){
+ } else if (Subtarget.isDarwinABI()){
JCE.emitWordBE(0x9421ffe0); // stwu r1,-32(r1)
JCE.emitWordBE(0x7d6802a6); // mflr r11
JCE.emitWordBE(0x91610028); // stw r11, 40(r1)
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h
index 46d4a08..b6b37ff 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h
@@ -18,32 +18,29 @@
#include "llvm/Target/TargetJITInfo.h"
namespace llvm {
- class PPCTargetMachine;
+class PPCSubtarget;
+class PPCJITInfo : public TargetJITInfo {
+protected:
+ PPCSubtarget &Subtarget;
+ bool is64Bit;
- class PPCJITInfo : public TargetJITInfo {
- protected:
- PPCTargetMachine &TM;
- bool is64Bit;
- public:
- PPCJITInfo(PPCTargetMachine &tm, bool tmIs64Bit) : TM(tm) {
- useGOT = 0;
- is64Bit = tmIs64Bit;
- }
+public:
+ PPCJITInfo(PPCSubtarget &STI);
- virtual StubLayout getStubLayout();
- virtual void *emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE);
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase);
-
- /// replaceMachineCodeForFunction - Make it so that calling the function
- /// whose machine code is at OLD turns into a call to NEW, perhaps by
- /// overwriting OLD with a branch to NEW. This is used for self-modifying
- /// code.
- ///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
- };
+ StubLayout getStubLayout() override;
+ void *emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE) override;
+ LazyResolverFn getLazyResolverFunction(JITCompilerFn) override;
+ void relocate(void *Function, MachineRelocation *MR, unsigned NumRelocs,
+ unsigned char *GOTBase) override;
+
+ /// replaceMachineCodeForFunction - Make it so that calling the function
+ /// whose machine code is at OLD turns into a call to NEW, perhaps by
+ /// overwriting OLD with a branch to NEW. This is used for self-modifying
+ /// code.
+ ///
+ void replaceMachineCodeForFunction(void *Old, void *New) override;
+};
}
#endif
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
index 83f5703..6680413 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -20,12 +20,14 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
static MachineModuleInfoMachO &getMachOMMI(AsmPrinter &AP) {
@@ -35,36 +37,41 @@ static MachineModuleInfoMachO &getMachOMMI(AsmPrinter &AP) {
static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
const TargetMachine &TM = AP.TM;
+ Mangler *Mang = AP.Mang;
+ const DataLayout *DL = TM.getDataLayout();
MCContext &Ctx = AP.OutContext;
bool isDarwin = TM.getSubtarget<PPCSubtarget>().isDarwin();
SmallString<128> Name;
+ StringRef Suffix;
+ if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB) {
+ if (isDarwin)
+ Suffix = "$stub";
+ } else if (MO.getTargetFlags() & PPCII::MO_NLP_FLAG)
+ Suffix = "$non_lazy_ptr";
+
+ if (!Suffix.empty())
+ Name += DL->getPrivateGlobalPrefix();
+
+ unsigned PrefixLen = Name.size();
+
if (!MO.isGlobal()) {
assert(MO.isSymbol() && "Isn't a symbol reference");
- Name += AP.MAI->getGlobalPrefix();
- Name += MO.getSymbolName();
- } else {
+ Mang->getNameWithPrefix(Name, MO.getSymbolName());
+ } else {
const GlobalValue *GV = MO.getGlobal();
- bool isImplicitlyPrivate = false;
- if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB ||
- (MO.getTargetFlags() & PPCII::MO_NLP_FLAG))
- isImplicitlyPrivate = true;
-
- AP.Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
+ TM.getNameWithPrefix(Name, GV, *Mang);
}
-
+
+ unsigned OrigLen = Name.size() - PrefixLen;
+
+ Name += Suffix;
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
+ StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen);
+
// If the target flags on the operand changes the name of the symbol, do that
// before we return the symbol.
if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB && isDarwin) {
- Name += "$stub";
- const char *PGP = AP.MAI->getPrivateGlobalPrefix();
- const char *Prefix = "";
- if (!Name.startswith(PGP)) {
- // http://llvm.org/bugs/show_bug.cgi?id=15763
- // all stubs and lazy_ptrs should be local symbols, which need leading 'L'
- Prefix = PGP;
- }
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Twine(Prefix) + Twine(Name));
MachineModuleInfoImpl::StubValueTy &StubSym =
getMachOMMI(AP).getFnStubEntry(Sym);
if (StubSym.getPointer())
@@ -76,10 +83,9 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
StubValueTy(AP.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
} else {
- Name.erase(Name.end()-5, Name.end());
StubSym =
MachineModuleInfoImpl::
- StubValueTy(Ctx.GetOrCreateSymbol(Name.str()), false);
+ StubValueTy(Ctx.GetOrCreateSymbol(OrigName), false);
}
return Sym;
}
@@ -87,16 +93,13 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
// If the symbol reference is actually to a non_lazy_ptr, not to the symbol,
// then add the suffix.
if (MO.getTargetFlags() & PPCII::MO_NLP_FLAG) {
- Name += "$non_lazy_ptr";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
-
MachineModuleInfoMachO &MachO = getMachOMMI(AP);
MachineModuleInfoImpl::StubValueTy &StubSym =
(MO.getTargetFlags() & PPCII::MO_NLP_HIDDEN_FLAG) ?
MachO.getHiddenGVStubEntry(Sym) : MachO.getGVStubEntry(Sym);
- if (StubSym.getPointer() == 0) {
+ if (!StubSym.getPointer()) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym = MachineModuleInfoImpl::
StubValueTy(AP.getSymbol(MO.getGlobal()),
@@ -105,7 +108,7 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
return Sym;
}
- return Ctx.GetOrCreateSymbol(Name.str());
+ return Sym;
}
static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
index 027ae3e..9da1b1b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
#include "PPCMachineFunctionInfo.h"
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetMachine.h"
@@ -17,7 +17,7 @@ using namespace llvm;
void PPCFunctionInfo::anchor() { }
MCSymbol *PPCFunctionInfo::getPICOffsetSymbol() const {
- const MCAsmInfo *MAI = MF.getTarget().getMCAsmInfo();
- return MF.getContext().GetOrCreateSymbol(Twine(MAI->getPrivateGlobalPrefix())+
+ const DataLayout *DL = MF.getTarget().getDataLayout();
+ return MF.getContext().GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
Twine(MF.getFunctionNumber())+"$poff");
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 71495da0..9895ee6 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "reginfo"
#include "PPCRegisterInfo.h"
#include "PPC.h"
#include "PPCFrameLowering.h"
@@ -27,7 +26,6 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
@@ -43,11 +41,13 @@
#include "llvm/Target/TargetOptions.h"
#include <cstdlib>
+using namespace llvm;
+
+#define DEBUG_TYPE "reginfo"
+
#define GET_REGINFO_TARGET_DESC
#include "PPCGenRegisterInfo.inc"
-using namespace llvm;
-
static cl::opt<bool>
EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
@@ -97,7 +97,7 @@ PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
return &PPC::GPRCRegClass;
}
-const uint16_t*
+const MCPhysReg*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isDarwinABI())
return Subtarget.isPPC64() ? (Subtarget.hasAltivec() ?
@@ -200,7 +200,7 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(PPC::R31);
if (hasBasePointer(MF)) {
- if (Subtarget.isSVR4ABI() && !Subtarget.isPPC64() &&
+ if (Subtarget.isSVR4ABI() && !Subtarget.isPPC64() &&
MF.getTarget().getRelocationModel() == Reloc::PIC_)
Reserved.set(PPC::R29);
else
@@ -239,12 +239,33 @@ PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
case PPC::F8RCRegClassID:
case PPC::F4RCRegClassID:
case PPC::VRRCRegClassID:
+ case PPC::VFRCRegClassID:
+ case PPC::VSLRCRegClassID:
+ case PPC::VSHRCRegClassID:
return 32 - DefaultSafety;
+ case PPC::VSRCRegClassID:
+ case PPC::VSFRCRegClassID:
+ return 64 - DefaultSafety;
case PPC::CRRCRegClassID:
return 8 - DefaultSafety;
}
}
+const TargetRegisterClass*
+PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)const {
+ if (Subtarget.hasVSX()) {
+ // With VSX, we can inflate various sub-register classes to the full VSX
+ // register set.
+
+ if (RC == &PPC::F8RCRegClass)
+ return &PPC::VSFRCRegClass;
+ else if (RC == &PPC::VRRCRegClass)
+ return &PPC::VSRCRegClass;
+ }
+
+ return TargetRegisterInfo::getLargestLegalSuperClass(RC);
+}
+
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
@@ -461,6 +482,127 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
MBB.erase(II);
}
+static unsigned getCRFromCRBit(unsigned SrcReg) {
+ unsigned Reg = 0;
+ if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR0GT ||
+ SrcReg == PPC::CR0EQ || SrcReg == PPC::CR0UN)
+ Reg = PPC::CR0;
+ else if (SrcReg == PPC::CR1LT || SrcReg == PPC::CR1GT ||
+ SrcReg == PPC::CR1EQ || SrcReg == PPC::CR1UN)
+ Reg = PPC::CR1;
+ else if (SrcReg == PPC::CR2LT || SrcReg == PPC::CR2GT ||
+ SrcReg == PPC::CR2EQ || SrcReg == PPC::CR2UN)
+ Reg = PPC::CR2;
+ else if (SrcReg == PPC::CR3LT || SrcReg == PPC::CR3GT ||
+ SrcReg == PPC::CR3EQ || SrcReg == PPC::CR3UN)
+ Reg = PPC::CR3;
+ else if (SrcReg == PPC::CR4LT || SrcReg == PPC::CR4GT ||
+ SrcReg == PPC::CR4EQ || SrcReg == PPC::CR4UN)
+ Reg = PPC::CR4;
+ else if (SrcReg == PPC::CR5LT || SrcReg == PPC::CR5GT ||
+ SrcReg == PPC::CR5EQ || SrcReg == PPC::CR5UN)
+ Reg = PPC::CR5;
+ else if (SrcReg == PPC::CR6LT || SrcReg == PPC::CR6GT ||
+ SrcReg == PPC::CR6EQ || SrcReg == PPC::CR6UN)
+ Reg = PPC::CR6;
+ else if (SrcReg == PPC::CR7LT || SrcReg == PPC::CR7GT ||
+ SrcReg == PPC::CR7EQ || SrcReg == PPC::CR7UN)
+ Reg = PPC::CR7;
+
+ assert(Reg != 0 && "Invalid CR bit register");
+ return Reg;
+}
+
+void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
+ unsigned FrameIndex) const {
+ // Get the instruction.
+ MachineInstr &MI = *II; // ; SPILL_CRBIT <SrcReg>, <offset>
+ // Get the instruction's basic block.
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ DebugLoc dl = MI.getDebugLoc();
+
+ bool LP64 = Subtarget.isPPC64();
+ const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
+ const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+
+ unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
+ unsigned SrcReg = MI.getOperand(0).getReg();
+
+ BuildMI(MBB, II, dl, TII.get(TargetOpcode::KILL),
+ getCRFromCRBit(SrcReg))
+ .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
+
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
+ .addReg(getCRFromCRBit(SrcReg));
+
+ // If the saved register wasn't CR0LT, shift the bits left so that the bit to
+ // store is the first one. Mask all but that bit.
+ unsigned Reg1 = Reg;
+ Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
+
+ // rlwinm rA, rA, ShiftBits, 0, 0.
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
+ .addReg(Reg1, RegState::Kill)
+ .addImm(getEncodingValue(SrcReg))
+ .addImm(0).addImm(0);
+
+ addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
+ .addReg(Reg, RegState::Kill),
+ FrameIndex);
+
+ // Discard the pseudo instruction.
+ MBB.erase(II);
+}
+
+void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
+ unsigned FrameIndex) const {
+ // Get the instruction.
+ MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CRBIT <offset>
+ // Get the instruction's basic block.
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ DebugLoc dl = MI.getDebugLoc();
+
+ bool LP64 = Subtarget.isPPC64();
+ const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
+ const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+
+ unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
+ unsigned DestReg = MI.getOperand(0).getReg();
+ assert(MI.definesRegister(DestReg) &&
+ "RESTORE_CRBIT does not define its destination");
+
+ addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
+ Reg), FrameIndex);
+
+ BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg);
+
+ unsigned RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
+ .addReg(getCRFromCRBit(DestReg));
+
+ unsigned ShiftBits = getEncodingValue(DestReg);
+ // rlwimi r11, r10, 32-ShiftBits, ..., ...
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
+ .addReg(RegO, RegState::Kill).addReg(Reg, RegState::Kill)
+ .addImm(ShiftBits ? 32-ShiftBits : 0)
+ .addImm(ShiftBits).addImm(ShiftBits);
+
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF),
+ getCRFromCRBit(DestReg))
+ .addReg(RegO, RegState::Kill)
+ // Make sure we have a use dependency all the way through this
+ // sequence of instructions. We can't have the other bits in the CR
+ // modified in between the mfocrf and the mtocrf.
+ .addReg(getCRFromCRBit(DestReg), RegState::Implicit);
+
+ // Discard the pseudo instruction.
+ MBB.erase(II);
+}
+
void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const {
// Get the instruction.
@@ -604,6 +746,12 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
} else if (OpC == PPC::RESTORE_CR) {
lowerCRRestore(II, FrameIndex);
return;
+ } else if (OpC == PPC::SPILL_CRBIT) {
+ lowerCRBitSpilling(II, FrameIndex);
+ return;
+ } else if (OpC == PPC::RESTORE_CRBIT) {
+ lowerCRBitRestore(II, FrameIndex);
+ return;
} else if (OpC == PPC::SPILL_VRSAVE) {
lowerVRSAVESpilling(II, FrameIndex);
return;
@@ -753,16 +901,6 @@ bool PPCRegisterInfo::
needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
assert(Offset < 0 && "Local offset must be negative");
- unsigned FIOperandNum = 0;
- while (!MI->getOperand(FIOperandNum).isFI()) {
- ++FIOperandNum;
- assert(FIOperandNum < MI->getNumOperands() &&
- "Instr doesn't have FrameIndex operand!");
- }
-
- unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
- Offset += MI->getOperand(OffsetOperandNo).getImm();
-
// It's the load/store FI references that cause issues, as it can be difficult
// to materialize the offset if it won't fit in the literal field. Estimate
// based on the size of the local frame and some conservative assumptions
@@ -828,11 +966,8 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
.addFrameIndex(FrameIdx).addImm(Offset);
}
-void
-PPCRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
- unsigned BaseReg, int64_t Offset) const {
- MachineInstr &MI = *I;
-
+void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const {
unsigned FIOperandNum = 0;
while (!MI.getOperand(FIOperandNum).isFI()) {
++FIOperandNum;
@@ -844,10 +979,28 @@ PPCRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
Offset += MI.getOperand(OffsetOperandNo).getImm();
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const MCInstrDesc &MCID = MI.getDesc();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MRI.constrainRegClass(BaseReg,
+ TII.getRegClass(MCID, FIOperandNum, this, MF));
}
bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
int64_t Offset) const {
+ unsigned FIOperandNum = 0;
+ while (!MI->getOperand(FIOperandNum).isFI()) {
+ ++FIOperandNum;
+ assert(FIOperandNum < MI->getNumOperands() &&
+ "Instr doesn't have FrameIndex operand!");
+ }
+
+ unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
+ Offset += MI->getOperand(OffsetOperandNo).getImm();
+
return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
(isInt<16>(Offset) && (!usesIXAddr(*MI) || (Offset & 3) == 0));
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index dd3bb40..13a35f6 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -15,8 +15,8 @@
#ifndef POWERPC32_REGISTERINFO_H
#define POWERPC32_REGISTERINFO_H
-#include "llvm/ADT/DenseMap.h"
#include "PPC.h"
+#include "llvm/ADT/DenseMap.h"
#define GET_REGINFO_HEADER
#include "PPCGenRegisterInfo.inc"
@@ -34,33 +34,37 @@ public:
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
- virtual const TargetRegisterClass *
- getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const;
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const;
+ MachineFunction &MF) const override;
+
+ const TargetRegisterClass*
+ getLargestLegalSuperClass(const TargetRegisterClass *RC) const override;
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const uint32_t *getCallPreservedMask(CallingConv::ID CC) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction* MF =nullptr) const override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID CC) const override;
const uint32_t *getNoPreservedMask() const;
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
/// We require the register scavenger.
- bool requiresRegisterScavenging(const MachineFunction &MF) const {
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
}
- bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
return true;
}
- bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override {
return true;
}
- virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+ bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override {
return true;
}
@@ -69,34 +73,39 @@ public:
unsigned FrameIndex) const;
void lowerCRRestore(MachineBasicBlock::iterator II,
unsigned FrameIndex) const;
+ void lowerCRBitSpilling(MachineBasicBlock::iterator II,
+ unsigned FrameIndex) const;
+ void lowerCRBitRestore(MachineBasicBlock::iterator II,
+ unsigned FrameIndex) const;
void lowerVRSAVESpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex) const;
void lowerVRSAVERestore(MachineBasicBlock::iterator II,
unsigned FrameIndex) const;
bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
- int &FrameIdx) const;
+ int &FrameIdx) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
// Support for virtual base registers.
- bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const;
+ bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
- int64_t Offset) const;
- void resolveFrameIndex(MachineBasicBlock::iterator I,
- unsigned BaseReg, int64_t Offset) const;
- bool isFrameOffsetLegal(const MachineInstr *MI, int64_t Offset) const;
+ int64_t Offset) const override;
+ void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const override;
+ bool isFrameOffsetLegal(const MachineInstr *MI,
+ int64_t Offset) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
// Base pointer (stack realignment) support.
unsigned getBaseRegister(const MachineFunction &MF) const;
bool hasBasePointer(const MachineFunction &MF) const;
bool canRealignStack(const MachineFunction &MF) const;
- bool needsStackRealignment(const MachineFunction &MF) const;
+ bool needsStackRealignment(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 43663ce..b3d145b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -16,6 +16,8 @@ def sub_gt : SubRegIndex<1, 1>;
def sub_eq : SubRegIndex<1, 2>;
def sub_un : SubRegIndex<1, 3>;
def sub_32 : SubRegIndex<32>;
+def sub_64 : SubRegIndex<64>;
+def sub_128 : SubRegIndex<128>;
}
@@ -47,9 +49,36 @@ class FPR<bits<5> num, string n> : PPCReg<n> {
let HWEncoding{4-0} = num;
}
-// VR - One of the 32 128-bit vector registers
-class VR<bits<5> num, string n> : PPCReg<n> {
+// VF - One of the 32 64-bit floating-point subregisters of the vector
+// registers (used by VSX).
+class VF<bits<5> num, string n> : PPCReg<n> {
let HWEncoding{4-0} = num;
+ let HWEncoding{5} = 1;
+}
+
+// VR - One of the 32 128-bit vector registers
+class VR<VF SubReg, string n> : PPCReg<n> {
+ let HWEncoding{4-0} = SubReg.HWEncoding{4-0};
+ let HWEncoding{5} = 0;
+ let SubRegs = [SubReg];
+ let SubRegIndices = [sub_64];
+}
+
+// VSRL - One of the 32 128-bit VSX registers that overlap with the scalar
+// floating-point registers.
+class VSRL<FPR SubReg, string n> : PPCReg<n> {
+ let HWEncoding = SubReg.HWEncoding;
+ let SubRegs = [SubReg];
+ let SubRegIndices = [sub_64];
+}
+
+// VSRH - One of the 32 128-bit VSX registers that overlap with the vector
+// registers.
+class VSRH<VR SubReg, string n> : PPCReg<n> {
+ let HWEncoding{4-0} = SubReg.HWEncoding{4-0};
+ let HWEncoding{5} = 1;
+ let SubRegs = [SubReg];
+ let SubRegIndices = [sub_128];
}
// CR - One of the 8 4-bit condition registers
@@ -80,12 +109,27 @@ foreach Index = 0-31 in {
DwarfRegNum<[!add(Index, 32), !add(Index, 32)]>;
}
+// Floating-point vector subregisters (for VSX)
+foreach Index = 0-31 in {
+ def VF#Index : VF<Index, "vs" # !add(Index, 32)>;
+}
+
// Vector registers
foreach Index = 0-31 in {
- def V#Index : VR<Index, "v"#Index>,
+ def V#Index : VR<!cast<VF>("VF"#Index), "v"#Index>,
DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
}
+// VSX registers
+foreach Index = 0-31 in {
+ def VSL#Index : VSRL<!cast<FPR>("F"#Index), "vs"#Index>,
+ DwarfRegAlias<!cast<FPR>("F"#Index)>;
+}
+foreach Index = 0-31 in {
+ def VSH#Index : VSRH<!cast<VR>("V"#Index), "vs" # !add(Index, 32)>,
+ DwarfRegAlias<!cast<VR>("V"#Index)>;
+}
+
// The reprsentation of r0 when treated as the constant 0.
def ZERO : GPR<0, "0">;
def ZERO8 : GP8<ZERO, "0">;
@@ -211,17 +255,39 @@ def VRRC : RegisterClass<"PPC", [v16i8,v8i16,v4i32,v4f32], 128,
V12, V13, V14, V15, V16, V17, V18, V19, V31, V30,
V29, V28, V27, V26, V25, V24, V23, V22, V21, V20)>;
-def CRBITRC : RegisterClass<"PPC", [i32], 32,
- (add CR0LT, CR0GT, CR0EQ, CR0UN,
- CR1LT, CR1GT, CR1EQ, CR1UN,
- CR2LT, CR2GT, CR2EQ, CR2UN,
+// VSX register classes (the allocation order mirrors that of the corresponding
+// subregister classes).
+def VSLRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
+ (add (sequence "VSL%u", 0, 13),
+ (sequence "VSL%u", 31, 14))>;
+def VSHRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
+ (add VSH2, VSH3, VSH4, VSH5, VSH0, VSH1, VSH6, VSH7,
+ VSH8, VSH9, VSH10, VSH11, VSH12, VSH13, VSH14,
+ VSH15, VSH16, VSH17, VSH18, VSH19, VSH31, VSH30,
+ VSH29, VSH28, VSH27, VSH26, VSH25, VSH24, VSH23,
+ VSH22, VSH21, VSH20)>;
+def VSRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
+ (add VSLRC, VSHRC)>;
+
+// Register classes for the 64-bit "scalar" VSX subregisters.
+def VFRC : RegisterClass<"PPC", [f64], 64,
+ (add VF2, VF3, VF4, VF5, VF0, VF1, VF6, VF7,
+ VF8, VF9, VF10, VF11, VF12, VF13, VF14,
+ VF15, VF16, VF17, VF18, VF19, VF31, VF30,
+ VF29, VF28, VF27, VF26, VF25, VF24, VF23,
+ VF22, VF21, VF20)>;
+def VSFRC : RegisterClass<"PPC", [f64], 64, (add F8RC, VFRC)>;
+
+def CRBITRC : RegisterClass<"PPC", [i1], 32,
+ (add CR2LT, CR2GT, CR2EQ, CR2UN,
CR3LT, CR3GT, CR3EQ, CR3UN,
CR4LT, CR4GT, CR4EQ, CR4UN,
CR5LT, CR5GT, CR5EQ, CR5UN,
CR6LT, CR6GT, CR6EQ, CR6UN,
- CR7LT, CR7GT, CR7EQ, CR7UN)>
-{
- let CopyCost = -1;
+ CR7LT, CR7GT, CR7EQ, CR7UN,
+ CR1LT, CR1GT, CR1EQ, CR1UN,
+ CR0LT, CR0GT, CR0EQ, CR0UN)> {
+ let Size = 32;
}
def CRRC : RegisterClass<"PPC", [i32], 32, (add CR0, CR1, CR5, CR6,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
index 92ba69c..1221d41 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
@@ -8,114 +8,106 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Functional units across PowerPC chips sets
-//
-def BPU : FuncUnit; // Branch unit
-def SLU : FuncUnit; // Store/load unit
-def SRU : FuncUnit; // special register unit
-def IU1 : FuncUnit; // integer unit 1 (simple)
-def IU2 : FuncUnit; // integer unit 2 (complex)
-def FPU1 : FuncUnit; // floating point unit 1
-def FPU2 : FuncUnit; // floating point unit 2
-def VPU : FuncUnit; // vector permutation unit
-def VIU1 : FuncUnit; // vector integer unit 1 (simple)
-def VIU2 : FuncUnit; // vector integer unit 2 (complex)
-def VFPU : FuncUnit; // vector floating point unit
-
-//===----------------------------------------------------------------------===//
// Instruction Itinerary classes used for PowerPC
//
-def IntSimple : InstrItinClass;
-def IntGeneral : InstrItinClass;
-def IntCompare : InstrItinClass;
-def IntDivD : InstrItinClass;
-def IntDivW : InstrItinClass;
-def IntMFFS : InstrItinClass;
-def IntMFVSCR : InstrItinClass;
-def IntMTFSB0 : InstrItinClass;
-def IntMTSRD : InstrItinClass;
-def IntMulHD : InstrItinClass;
-def IntMulHW : InstrItinClass;
-def IntMulHWU : InstrItinClass;
-def IntMulLI : InstrItinClass;
-def IntRFID : InstrItinClass;
-def IntRotateD : InstrItinClass;
-def IntRotateDI : InstrItinClass;
-def IntRotate : InstrItinClass;
-def IntShift : InstrItinClass;
-def IntTrapD : InstrItinClass;
-def IntTrapW : InstrItinClass;
-def BrB : InstrItinClass;
-def BrCR : InstrItinClass;
-def BrMCR : InstrItinClass;
-def BrMCRX : InstrItinClass;
-def LdStDCBA : InstrItinClass;
-def LdStDCBF : InstrItinClass;
-def LdStDCBI : InstrItinClass;
-def LdStLoad : InstrItinClass;
-def LdStLoadUpd : InstrItinClass;
-def LdStStore : InstrItinClass;
-def LdStStoreUpd : InstrItinClass;
-def LdStDSS : InstrItinClass;
-def LdStICBI : InstrItinClass;
-def LdStLD : InstrItinClass;
-def LdStLDU : InstrItinClass;
-def LdStLDARX : InstrItinClass;
-def LdStLFD : InstrItinClass;
-def LdStLFDU : InstrItinClass;
-def LdStLHA : InstrItinClass;
-def LdStLHAU : InstrItinClass;
-def LdStLMW : InstrItinClass;
-def LdStLVecX : InstrItinClass;
-def LdStLWA : InstrItinClass;
-def LdStLWARX : InstrItinClass;
-def LdStSLBIA : InstrItinClass;
-def LdStSLBIE : InstrItinClass;
-def LdStSTD : InstrItinClass;
-def LdStSTDCX : InstrItinClass;
-def LdStSTDU : InstrItinClass;
-def LdStSTFD : InstrItinClass;
-def LdStSTFDU : InstrItinClass;
-def LdStSTVEBX : InstrItinClass;
-def LdStSTWCX : InstrItinClass;
-def LdStSync : InstrItinClass;
-def SprISYNC : InstrItinClass;
-def SprMFSR : InstrItinClass;
-def SprMTMSR : InstrItinClass;
-def SprMTSR : InstrItinClass;
-def SprTLBSYNC : InstrItinClass;
-def SprMFCR : InstrItinClass;
-def SprMFMSR : InstrItinClass;
-def SprMFSPR : InstrItinClass;
-def SprMFTB : InstrItinClass;
-def SprMTSPR : InstrItinClass;
-def SprMTSRIN : InstrItinClass;
-def SprRFI : InstrItinClass;
-def SprSC : InstrItinClass;
-def FPGeneral : InstrItinClass;
-def FPAddSub : InstrItinClass;
-def FPCompare : InstrItinClass;
-def FPDivD : InstrItinClass;
-def FPDivS : InstrItinClass;
-def FPFused : InstrItinClass;
-def FPRes : InstrItinClass;
-def FPSqrt : InstrItinClass;
-def VecGeneral : InstrItinClass;
-def VecFP : InstrItinClass;
-def VecFPCompare : InstrItinClass;
-def VecComplex : InstrItinClass;
-def VecPerm : InstrItinClass;
-def VecFPRound : InstrItinClass;
-def VecVSL : InstrItinClass;
-def VecVSR : InstrItinClass;
-def SprMTMSRD : InstrItinClass;
-def SprSLIE : InstrItinClass;
-def SprSLBIE : InstrItinClass;
-def SprSLBMTE : InstrItinClass;
-def SprSLBMFEE : InstrItinClass;
-def SprSLBIA : InstrItinClass;
-def SprTLBIEL : InstrItinClass;
-def SprTLBIE : InstrItinClass;
+def IIC_IntSimple : InstrItinClass;
+def IIC_IntGeneral : InstrItinClass;
+def IIC_IntCompare : InstrItinClass;
+def IIC_IntDivD : InstrItinClass;
+def IIC_IntDivW : InstrItinClass;
+def IIC_IntMFFS : InstrItinClass;
+def IIC_IntMFVSCR : InstrItinClass;
+def IIC_IntMTFSB0 : InstrItinClass;
+def IIC_IntMTSRD : InstrItinClass;
+def IIC_IntMulHD : InstrItinClass;
+def IIC_IntMulHW : InstrItinClass;
+def IIC_IntMulHWU : InstrItinClass;
+def IIC_IntMulLI : InstrItinClass;
+def IIC_IntRFID : InstrItinClass;
+def IIC_IntRotateD : InstrItinClass;
+def IIC_IntRotateDI : InstrItinClass;
+def IIC_IntRotate : InstrItinClass;
+def IIC_IntShift : InstrItinClass;
+def IIC_IntTrapD : InstrItinClass;
+def IIC_IntTrapW : InstrItinClass;
+def IIC_BrB : InstrItinClass;
+def IIC_BrCR : InstrItinClass;
+def IIC_BrMCR : InstrItinClass;
+def IIC_BrMCRX : InstrItinClass;
+def IIC_LdStDCBA : InstrItinClass;
+def IIC_LdStDCBF : InstrItinClass;
+def IIC_LdStDCBI : InstrItinClass;
+def IIC_LdStLoad : InstrItinClass;
+def IIC_LdStLoadUpd : InstrItinClass;
+def IIC_LdStLoadUpdX : InstrItinClass;
+def IIC_LdStStore : InstrItinClass;
+def IIC_LdStStoreUpd : InstrItinClass;
+def IIC_LdStDSS : InstrItinClass;
+def IIC_LdStICBI : InstrItinClass;
+def IIC_LdStLD : InstrItinClass;
+def IIC_LdStLDU : InstrItinClass;
+def IIC_LdStLDUX : InstrItinClass;
+def IIC_LdStLDARX : InstrItinClass;
+def IIC_LdStLFD : InstrItinClass;
+def IIC_LdStLFDU : InstrItinClass;
+def IIC_LdStLFDUX : InstrItinClass;
+def IIC_LdStLHA : InstrItinClass;
+def IIC_LdStLHAU : InstrItinClass;
+def IIC_LdStLHAUX : InstrItinClass;
+def IIC_LdStLMW : InstrItinClass;
+def IIC_LdStLVecX : InstrItinClass;
+def IIC_LdStLWA : InstrItinClass;
+def IIC_LdStLWARX : InstrItinClass;
+def IIC_LdStSLBIA : InstrItinClass;
+def IIC_LdStSLBIE : InstrItinClass;
+def IIC_LdStSTD : InstrItinClass;
+def IIC_LdStSTDCX : InstrItinClass;
+def IIC_LdStSTDU : InstrItinClass;
+def IIC_LdStSTDUX : InstrItinClass;
+def IIC_LdStSTFD : InstrItinClass;
+def IIC_LdStSTFDU : InstrItinClass;
+def IIC_LdStSTVEBX : InstrItinClass;
+def IIC_LdStSTWCX : InstrItinClass;
+def IIC_LdStSync : InstrItinClass;
+def IIC_SprISYNC : InstrItinClass;
+def IIC_SprMFSR : InstrItinClass;
+def IIC_SprMTMSR : InstrItinClass;
+def IIC_SprMTSR : InstrItinClass;
+def IIC_SprTLBSYNC : InstrItinClass;
+def IIC_SprMFCR : InstrItinClass;
+def IIC_SprMFCRF : InstrItinClass;
+def IIC_SprMFMSR : InstrItinClass;
+def IIC_SprMFSPR : InstrItinClass;
+def IIC_SprMFTB : InstrItinClass;
+def IIC_SprMTSPR : InstrItinClass;
+def IIC_SprMTSRIN : InstrItinClass;
+def IIC_SprRFI : InstrItinClass;
+def IIC_SprSC : InstrItinClass;
+def IIC_FPGeneral : InstrItinClass;
+def IIC_FPAddSub : InstrItinClass;
+def IIC_FPCompare : InstrItinClass;
+def IIC_FPDivD : InstrItinClass;
+def IIC_FPDivS : InstrItinClass;
+def IIC_FPFused : InstrItinClass;
+def IIC_FPRes : InstrItinClass;
+def IIC_FPSqrtD : InstrItinClass;
+def IIC_FPSqrtS : InstrItinClass;
+def IIC_VecGeneral : InstrItinClass;
+def IIC_VecFP : InstrItinClass;
+def IIC_VecFPCompare : InstrItinClass;
+def IIC_VecComplex : InstrItinClass;
+def IIC_VecPerm : InstrItinClass;
+def IIC_VecFPRound : InstrItinClass;
+def IIC_VecVSL : InstrItinClass;
+def IIC_VecVSR : InstrItinClass;
+def IIC_SprMTMSRD : InstrItinClass;
+def IIC_SprSLIE : InstrItinClass;
+def IIC_SprSLBIE : InstrItinClass;
+def IIC_SprSLBMTE : InstrItinClass;
+def IIC_SprSLBMFEE : InstrItinClass;
+def IIC_SprSLBIA : InstrItinClass;
+def IIC_SprTLBIEL : InstrItinClass;
+def IIC_SprTLBIE : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
@@ -125,6 +117,7 @@ include "PPCSchedule440.td"
include "PPCScheduleG4.td"
include "PPCScheduleG4Plus.td"
include "PPCScheduleG5.td"
+include "PPCScheduleP7.td"
include "PPCScheduleA2.td"
include "PPCScheduleE500mc.td"
include "PPCScheduleE5500.td"
@@ -136,392 +129,392 @@ include "PPCScheduleE5500.td"
//
// opcode itinerary class
// ====== ===============
-// add IntSimple
-// addc IntGeneral
-// adde IntGeneral
-// addi IntSimple
-// addic IntGeneral
-// addic. IntGeneral
-// addis IntSimple
-// addme IntGeneral
-// addze IntGeneral
-// and IntSimple
-// andc IntSimple
-// andi. IntGeneral
-// andis. IntGeneral
-// b BrB
-// bc BrB
-// bcctr BrB
-// bclr BrB
-// cmp IntCompare
-// cmpi IntCompare
-// cmpl IntCompare
-// cmpli IntCompare
-// cntlzd IntRotateD
-// cntlzw IntGeneral
-// crand BrCR
-// crandc BrCR
-// creqv BrCR
-// crnand BrCR
-// crnor BrCR
-// cror BrCR
-// crorc BrCR
-// crxor BrCR
-// dcba LdStDCBA
-// dcbf LdStDCBF
-// dcbi LdStDCBI
-// dcbst LdStDCBF
-// dcbt LdStLoad
-// dcbtst LdStLoad
-// dcbz LdStDCBF
-// divd IntDivD
-// divdu IntDivD
-// divw IntDivW
-// divwu IntDivW
-// dss LdStDSS
-// dst LdStDSS
-// dstst LdStDSS
-// eciwx LdStLoad
-// ecowx LdStLoad
-// eieio LdStLoad
-// eqv IntSimple
-// extsb IntSimple
-// extsh IntSimple
-// extsw IntSimple
-// fabs FPGeneral
-// fadd FPAddSub
-// fadds FPGeneral
-// fcfid FPGeneral
-// fcmpo FPCompare
-// fcmpu FPCompare
-// fctid FPGeneral
-// fctidz FPGeneral
-// fctiw FPGeneral
-// fctiwz FPGeneral
-// fdiv FPDivD
-// fdivs FPDivS
-// fmadd FPFused
-// fmadds FPGeneral
-// fmr FPGeneral
-// fmsub FPFused
-// fmsubs FPGeneral
-// fmul FPFused
-// fmuls FPGeneral
-// fnabs FPGeneral
-// fneg FPGeneral
-// fnmadd FPFused
-// fnmadds FPGeneral
-// fnmsub FPFused
-// fnmsubs FPGeneral
-// fres FPRes
-// frsp FPGeneral
-// frsqrte FPGeneral
-// fsel FPGeneral
-// fsqrt FPSqrt
-// fsqrts FPSqrt
-// fsub FPAddSub
-// fsubs FPGeneral
-// icbi LdStICBI
-// isync SprISYNC
-// lbz LdStLoad
-// lbzu LdStLoadUpd
-// lbzux LdStLoadUpd
-// lbzx LdStLoad
-// ld LdStLD
-// ldarx LdStLDARX
-// ldu LdStLDU
-// ldux LdStLDU
-// ldx LdStLD
-// lfd LdStLFD
-// lfdu LdStLFDU
-// lfdux LdStLFDU
-// lfdx LdStLFD
-// lfs LdStLFD
-// lfsu LdStLFDU
-// lfsux LdStLFDU
-// lfsx LdStLFD
-// lha LdStLHA
-// lhau LdStLHAU
-// lhaux LdStLHAU
-// lhax LdStLHA
-// lhbrx LdStLoad
-// lhz LdStLoad
-// lhzu LdStLoadUpd
-// lhzux LdStLoadUpd
-// lhzx LdStLoad
-// lmw LdStLMW
-// lswi LdStLMW
-// lswx LdStLMW
-// lvebx LdStLVecX
-// lvehx LdStLVecX
-// lvewx LdStLVecX
-// lvsl LdStLVecX
-// lvsr LdStLVecX
-// lvx LdStLVecX
-// lvxl LdStLVecX
-// lwa LdStLWA
-// lwarx LdStLWARX
-// lwaux LdStLHAU
-// lwax LdStLHA
-// lwbrx LdStLoad
-// lwz LdStLoad
-// lwzu LdStLoadUpd
-// lwzux LdStLoadUpd
-// lwzx LdStLoad
-// mcrf BrMCR
-// mcrfs FPGeneral
-// mcrxr BrMCRX
-// mfcr SprMFCR
-// mffs IntMFFS
-// mfmsr SprMFMSR
-// mfspr SprMFSPR
-// mfsr SprMFSR
-// mfsrin SprMFSR
-// mftb SprMFTB
-// mfvscr IntMFVSCR
-// mtcrf BrMCRX
-// mtfsb0 IntMTFSB0
-// mtfsb1 IntMTFSB0
-// mtfsf IntMTFSB0
-// mtfsfi IntMTFSB0
-// mtmsr SprMTMSR
-// mtmsrd LdStLD
-// mtspr SprMTSPR
-// mtsr SprMTSR
-// mtsrd IntMTSRD
-// mtsrdin IntMTSRD
-// mtsrin SprMTSRIN
-// mtvscr IntMFVSCR
-// mulhd IntMulHD
-// mulhdu IntMulHD
-// mulhw IntMulHW
-// mulhwu IntMulHWU
-// mulld IntMulHD
-// mulli IntMulLI
-// mullw IntMulHW
-// nand IntSimple
-// neg IntSimple
-// nor IntSimple
-// or IntSimple
-// orc IntSimple
-// ori IntSimple
-// oris IntSimple
-// rfi SprRFI
-// rfid IntRFID
-// rldcl IntRotateD
-// rldcr IntRotateD
-// rldic IntRotateDI
-// rldicl IntRotateDI
-// rldicr IntRotateDI
-// rldimi IntRotateDI
-// rlwimi IntRotate
-// rlwinm IntGeneral
-// rlwnm IntGeneral
-// sc SprSC
-// slbia LdStSLBIA
-// slbie LdStSLBIE
-// sld IntRotateD
-// slw IntGeneral
-// srad IntRotateD
-// sradi IntRotateDI
-// sraw IntShift
-// srawi IntShift
-// srd IntRotateD
-// srw IntGeneral
-// stb LdStStore
-// stbu LdStStoreUpd
-// stbux LdStStoreUpd
-// stbx LdStStore
-// std LdStSTD
-// stdcx. LdStSTDCX
-// stdu LdStSTDU
-// stdux LdStSTDU
-// stdx LdStSTD
-// stfd LdStSTFD
-// stfdu LdStSTFDU
-// stfdux LdStSTFDU
-// stfdx LdStSTFD
-// stfiwx LdStSTFD
-// stfs LdStSTFD
-// stfsu LdStSTFDU
-// stfsux LdStSTFDU
-// stfsx LdStSTFD
-// sth LdStStore
-// sthbrx LdStStore
-// sthu LdStStoreUpd
-// sthux LdStStoreUpd
-// sthx LdStStore
-// stmw LdStLMW
-// stswi LdStLMW
-// stswx LdStLMW
-// stvebx LdStSTVEBX
-// stvehx LdStSTVEBX
-// stvewx LdStSTVEBX
-// stvx LdStSTVEBX
-// stvxl LdStSTVEBX
-// stw LdStStore
-// stwbrx LdStStore
-// stwcx. LdStSTWCX
-// stwu LdStStoreUpd
-// stwux LdStStoreUpd
-// stwx LdStStore
-// subf IntGeneral
-// subfc IntGeneral
-// subfe IntGeneral
-// subfic IntGeneral
-// subfme IntGeneral
-// subfze IntGeneral
-// sync LdStSync
-// td IntTrapD
-// tdi IntTrapD
-// tlbia LdStSLBIA
-// tlbie LdStDCBF
-// tlbsync SprTLBSYNC
-// tw IntTrapW
-// twi IntTrapW
-// vaddcuw VecGeneral
-// vaddfp VecFP
-// vaddsbs VecGeneral
-// vaddshs VecGeneral
-// vaddsws VecGeneral
-// vaddubm VecGeneral
-// vaddubs VecGeneral
-// vadduhm VecGeneral
-// vadduhs VecGeneral
-// vadduwm VecGeneral
-// vadduws VecGeneral
-// vand VecGeneral
-// vandc VecGeneral
-// vavgsb VecGeneral
-// vavgsh VecGeneral
-// vavgsw VecGeneral
-// vavgub VecGeneral
-// vavguh VecGeneral
-// vavguw VecGeneral
-// vcfsx VecFP
-// vcfux VecFP
-// vcmpbfp VecFPCompare
-// vcmpeqfp VecFPCompare
-// vcmpequb VecGeneral
-// vcmpequh VecGeneral
-// vcmpequw VecGeneral
-// vcmpgefp VecFPCompare
-// vcmpgtfp VecFPCompare
-// vcmpgtsb VecGeneral
-// vcmpgtsh VecGeneral
-// vcmpgtsw VecGeneral
-// vcmpgtub VecGeneral
-// vcmpgtuh VecGeneral
-// vcmpgtuw VecGeneral
-// vctsxs VecFP
-// vctuxs VecFP
-// vexptefp VecFP
-// vlogefp VecFP
-// vmaddfp VecFP
-// vmaxfp VecFPCompare
-// vmaxsb VecGeneral
-// vmaxsh VecGeneral
-// vmaxsw VecGeneral
-// vmaxub VecGeneral
-// vmaxuh VecGeneral
-// vmaxuw VecGeneral
-// vmhaddshs VecComplex
-// vmhraddshs VecComplex
-// vminfp VecFPCompare
-// vminsb VecGeneral
-// vminsh VecGeneral
-// vminsw VecGeneral
-// vminub VecGeneral
-// vminuh VecGeneral
-// vminuw VecGeneral
-// vmladduhm VecComplex
-// vmrghb VecPerm
-// vmrghh VecPerm
-// vmrghw VecPerm
-// vmrglb VecPerm
-// vmrglh VecPerm
-// vmrglw VecPerm
-// vmsubfp VecFP
-// vmsummbm VecComplex
-// vmsumshm VecComplex
-// vmsumshs VecComplex
-// vmsumubm VecComplex
-// vmsumuhm VecComplex
-// vmsumuhs VecComplex
-// vmulesb VecComplex
-// vmulesh VecComplex
-// vmuleub VecComplex
-// vmuleuh VecComplex
-// vmulosb VecComplex
-// vmulosh VecComplex
-// vmuloub VecComplex
-// vmulouh VecComplex
-// vnor VecGeneral
-// vor VecGeneral
-// vperm VecPerm
-// vpkpx VecPerm
-// vpkshss VecPerm
-// vpkshus VecPerm
-// vpkswss VecPerm
-// vpkswus VecPerm
-// vpkuhum VecPerm
-// vpkuhus VecPerm
-// vpkuwum VecPerm
-// vpkuwus VecPerm
-// vrefp VecFPRound
-// vrfim VecFPRound
-// vrfin VecFPRound
-// vrfip VecFPRound
-// vrfiz VecFPRound
-// vrlb VecGeneral
-// vrlh VecGeneral
-// vrlw VecGeneral
-// vrsqrtefp VecFP
-// vsel VecGeneral
-// vsl VecVSL
-// vslb VecGeneral
-// vsldoi VecPerm
-// vslh VecGeneral
-// vslo VecPerm
-// vslw VecGeneral
-// vspltb VecPerm
-// vsplth VecPerm
-// vspltisb VecPerm
-// vspltish VecPerm
-// vspltisw VecPerm
-// vspltw VecPerm
-// vsr VecVSR
-// vsrab VecGeneral
-// vsrah VecGeneral
-// vsraw VecGeneral
-// vsrb VecGeneral
-// vsrh VecGeneral
-// vsro VecPerm
-// vsrw VecGeneral
-// vsubcuw VecGeneral
-// vsubfp VecFP
-// vsubsbs VecGeneral
-// vsubshs VecGeneral
-// vsubsws VecGeneral
-// vsububm VecGeneral
-// vsububs VecGeneral
-// vsubuhm VecGeneral
-// vsubuhs VecGeneral
-// vsubuwm VecGeneral
-// vsubuws VecGeneral
-// vsum2sws VecComplex
-// vsum4sbs VecComplex
-// vsum4shs VecComplex
-// vsum4ubs VecComplex
-// vsumsws VecComplex
-// vupkhpx VecPerm
-// vupkhsb VecPerm
-// vupkhsh VecPerm
-// vupklpx VecPerm
-// vupklsb VecPerm
-// vupklsh VecPerm
-// vxor VecGeneral
-// xor IntSimple
-// xori IntSimple
-// xoris IntSimple
+// add IIC_IntSimple
+// addc IIC_IntGeneral
+// adde IIC_IntGeneral
+// addi IIC_IntSimple
+// addic IIC_IntGeneral
+// addic. IIC_IntGeneral
+// addis IIC_IntSimple
+// addme IIC_IntGeneral
+// addze IIC_IntGeneral
+// and IIC_IntSimple
+// andc IIC_IntSimple
+// andi. IIC_IntGeneral
+// andis. IIC_IntGeneral
+// b IIC_BrB
+// bc IIC_BrB
+// bcctr IIC_BrB
+// bclr IIC_BrB
+// cmp IIC_IntCompare
+// cmpi IIC_IntCompare
+// cmpl IIC_IntCompare
+// cmpli IIC_IntCompare
+// cntlzd IIC_IntRotateD
+// cntlzw IIC_IntGeneral
+// crand IIC_BrCR
+// crandc IIC_BrCR
+// creqv IIC_BrCR
+// crnand IIC_BrCR
+// crnor IIC_BrCR
+// cror IIC_BrCR
+// crorc IIC_BrCR
+// crxor IIC_BrCR
+// dcba IIC_LdStDCBA
+// dcbf IIC_LdStDCBF
+// dcbi IIC_LdStDCBI
+// dcbst IIC_LdStDCBF
+// dcbt IIC_LdStLoad
+// dcbtst IIC_LdStLoad
+// dcbz IIC_LdStDCBF
+// divd IIC_IntDivD
+// divdu IIC_IntDivD
+// divw IIC_IntDivW
+// divwu IIC_IntDivW
+// dss IIC_LdStDSS
+// dst IIC_LdStDSS
+// dstst IIC_LdStDSS
+// eciwx IIC_LdStLoad
+// ecowx IIC_LdStLoad
+// eieio IIC_LdStLoad
+// eqv IIC_IntSimple
+// extsb IIC_IntSimple
+// extsh IIC_IntSimple
+// extsw IIC_IntSimple
+// fabs IIC_FPGeneral
+// fadd IIC_FPAddSub
+// fadds IIC_FPGeneral
+// fcfid IIC_FPGeneral
+// fcmpo IIC_FPCompare
+// fcmpu IIC_FPCompare
+// fctid IIC_FPGeneral
+// fctidz IIC_FPGeneral
+// fctiw IIC_FPGeneral
+// fctiwz IIC_FPGeneral
+// fdiv IIC_FPDivD
+// fdivs IIC_FPDivS
+// fmadd IIC_FPFused
+// fmadds IIC_FPGeneral
+// fmr IIC_FPGeneral
+// fmsub IIC_FPFused
+// fmsubs IIC_FPGeneral
+// fmul IIC_FPFused
+// fmuls IIC_FPGeneral
+// fnabs IIC_FPGeneral
+// fneg IIC_FPGeneral
+// fnmadd IIC_FPFused
+// fnmadds IIC_FPGeneral
+// fnmsub IIC_FPFused
+// fnmsubs IIC_FPGeneral
+// fres IIC_FPRes
+// frsp IIC_FPGeneral
+// frsqrte IIC_FPGeneral
+// fsel IIC_FPGeneral
+// fsqrt IIC_FPSqrtD
+// fsqrts IIC_FPSqrtS
+// fsub IIC_FPAddSub
+// fsubs IIC_FPGeneral
+// icbi IIC_LdStICBI
+// isync IIC_SprISYNC
+// lbz IIC_LdStLoad
+// lbzu IIC_LdStLoadUpd
+// lbzux IIC_LdStLoadUpdX
+// lbzx IIC_LdStLoad
+// ld IIC_LdStLD
+// ldarx IIC_LdStLDARX
+// ldu IIC_LdStLDU
+// ldux IIC_LdStLDUX
+// ldx IIC_LdStLD
+// lfd IIC_LdStLFD
+// lfdu IIC_LdStLFDU
+// lfdux IIC_LdStLFDUX
+// lfdx IIC_LdStLFD
+// lfs IIC_LdStLFD
+// lfsu IIC_LdStLFDU
+// lfsux IIC_LdStLFDUX
+// lfsx IIC_LdStLFD
+// lha IIC_LdStLHA
+// lhau IIC_LdStLHAU
+// lhaux IIC_LdStLHAUX
+// lhax IIC_LdStLHA
+// lhbrx IIC_LdStLoad
+// lhz IIC_LdStLoad
+// lhzu IIC_LdStLoadUpd
+// lhzux IIC_LdStLoadUpdX
+// lhzx IIC_LdStLoad
+// lmw IIC_LdStLMW
+// lswi IIC_LdStLMW
+// lswx IIC_LdStLMW
+// lvebx IIC_LdStLVecX
+// lvehx IIC_LdStLVecX
+// lvewx IIC_LdStLVecX
+// lvsl IIC_LdStLVecX
+// lvsr IIC_LdStLVecX
+// lvx IIC_LdStLVecX
+// lvxl IIC_LdStLVecX
+// lwa IIC_LdStLWA
+// lwarx IIC_LdStLWARX
+// lwaux IIC_LdStLHAUX
+// lwax IIC_LdStLHA
+// lwbrx IIC_LdStLoad
+// lwz IIC_LdStLoad
+// lwzu IIC_LdStLoadUpd
+// lwzux IIC_LdStLoadUpdX
+// lwzx IIC_LdStLoad
+// mcrf IIC_BrMCR
+// mcrfs IIC_FPGeneral
+// mcrxr IIC_BrMCRX
+// mfcr IIC_SprMFCR
+// mffs IIC_IntMFFS
+// mfmsr IIC_SprMFMSR
+// mfspr IIC_SprMFSPR
+// mfsr IIC_SprMFSR
+// mfsrin IIC_SprMFSR
+// mftb IIC_SprMFTB
+// mfvscr IIC_IntMFVSCR
+// mtcrf IIC_BrMCRX
+// mtfsb0 IIC_IntMTFSB0
+// mtfsb1 IIC_IntMTFSB0
+// mtfsf IIC_IntMTFSB0
+// mtfsfi IIC_IntMTFSB0
+// mtmsr IIC_SprMTMSR
+// mtmsrd IIC_LdStLD
+// mtspr IIC_SprMTSPR
+// mtsr IIC_SprMTSR
+// mtsrd IIC_IntMTSRD
+// mtsrdin IIC_IntMTSRD
+// mtsrin IIC_SprMTSRIN
+// mtvscr IIC_IntMFVSCR
+// mulhd IIC_IntMulHD
+// mulhdu IIC_IntMulHD
+// mulhw IIC_IntMulHW
+// mulhwu IIC_IntMulHWU
+// mulld IIC_IntMulHD
+// mulli IIC_IntMulLI
+// mullw IIC_IntMulHW
+// nand IIC_IntSimple
+// neg IIC_IntSimple
+// nor IIC_IntSimple
+// or IIC_IntSimple
+// orc IIC_IntSimple
+// ori IIC_IntSimple
+// oris IIC_IntSimple
+// rfi IIC_SprRFI
+// rfid IIC_IntRFID
+// rldcl IIC_IntRotateD
+// rldcr IIC_IntRotateD
+// rldic IIC_IntRotateDI
+// rldicl IIC_IntRotateDI
+// rldicr IIC_IntRotateDI
+// rldimi IIC_IntRotateDI
+// rlwimi IIC_IntRotate
+// rlwinm IIC_IntGeneral
+// rlwnm IIC_IntGeneral
+// sc IIC_SprSC
+// slbia IIC_LdStSLBIA
+// slbie IIC_LdStSLBIE
+// sld IIC_IntRotateD
+// slw IIC_IntGeneral
+// srad IIC_IntRotateD
+// sradi IIC_IntRotateDI
+// sraw IIC_IntShift
+// srawi IIC_IntShift
+// srd IIC_IntRotateD
+// srw IIC_IntGeneral
+// stb IIC_LdStStore
+// stbu IIC_LdStStoreUpd
+// stbux IIC_LdStStoreUpd
+// stbx IIC_LdStStore
+// std IIC_LdStSTD
+// stdcx. IIC_LdStSTDCX
+// stdu IIC_LdStSTDU
+// stdux IIC_LdStSTDUX
+// stdx IIC_LdStSTD
+// stfd IIC_LdStSTFD
+// stfdu IIC_LdStSTFDU
+// stfdux IIC_LdStSTFDU
+// stfdx IIC_LdStSTFD
+// stfiwx IIC_LdStSTFD
+// stfs IIC_LdStSTFD
+// stfsu IIC_LdStSTFDU
+// stfsux IIC_LdStSTFDU
+// stfsx IIC_LdStSTFD
+// sth IIC_LdStStore
+// sthbrx IIC_LdStStore
+// sthu IIC_LdStStoreUpd
+// sthux IIC_LdStStoreUpd
+// sthx IIC_LdStStore
+// stmw IIC_LdStLMW
+// stswi IIC_LdStLMW
+// stswx IIC_LdStLMW
+// stvebx IIC_LdStSTVEBX
+// stvehx IIC_LdStSTVEBX
+// stvewx IIC_LdStSTVEBX
+// stvx IIC_LdStSTVEBX
+// stvxl IIC_LdStSTVEBX
+// stw IIC_LdStStore
+// stwbrx IIC_LdStStore
+// stwcx. IIC_LdStSTWCX
+// stwu IIC_LdStStoreUpd
+// stwux IIC_LdStStoreUpd
+// stwx IIC_LdStStore
+// subf IIC_IntGeneral
+// subfc IIC_IntGeneral
+// subfe IIC_IntGeneral
+// subfic IIC_IntGeneral
+// subfme IIC_IntGeneral
+// subfze IIC_IntGeneral
+// sync IIC_LdStSync
+// td IIC_IntTrapD
+// tdi IIC_IntTrapD
+// tlbia IIC_LdStSLBIA
+// tlbie IIC_LdStDCBF
+// tlbsync IIC_SprTLBSYNC
+// tw IIC_IntTrapW
+// twi IIC_IntTrapW
+// vaddcuw IIC_VecGeneral
+// vaddfp IIC_VecFP
+// vaddsbs IIC_VecGeneral
+// vaddshs IIC_VecGeneral
+// vaddsws IIC_VecGeneral
+// vaddubm IIC_VecGeneral
+// vaddubs IIC_VecGeneral
+// vadduhm IIC_VecGeneral
+// vadduhs IIC_VecGeneral
+// vadduwm IIC_VecGeneral
+// vadduws IIC_VecGeneral
+// vand IIC_VecGeneral
+// vandc IIC_VecGeneral
+// vavgsb IIC_VecGeneral
+// vavgsh IIC_VecGeneral
+// vavgsw IIC_VecGeneral
+// vavgub IIC_VecGeneral
+// vavguh IIC_VecGeneral
+// vavguw IIC_VecGeneral
+// vcfsx IIC_VecFP
+// vcfux IIC_VecFP
+// vcmpbfp IIC_VecFPCompare
+// vcmpeqfp IIC_VecFPCompare
+// vcmpequb IIC_VecGeneral
+// vcmpequh IIC_VecGeneral
+// vcmpequw IIC_VecGeneral
+// vcmpgefp IIC_VecFPCompare
+// vcmpgtfp IIC_VecFPCompare
+// vcmpgtsb IIC_VecGeneral
+// vcmpgtsh IIC_VecGeneral
+// vcmpgtsw IIC_VecGeneral
+// vcmpgtub IIC_VecGeneral
+// vcmpgtuh IIC_VecGeneral
+// vcmpgtuw IIC_VecGeneral
+// vctsxs IIC_VecFP
+// vctuxs IIC_VecFP
+// vexptefp IIC_VecFP
+// vlogefp IIC_VecFP
+// vmaddfp IIC_VecFP
+// vmaxfp IIC_VecFPCompare
+// vmaxsb IIC_VecGeneral
+// vmaxsh IIC_VecGeneral
+// vmaxsw IIC_VecGeneral
+// vmaxub IIC_VecGeneral
+// vmaxuh IIC_VecGeneral
+// vmaxuw IIC_VecGeneral
+// vmhaddshs IIC_VecComplex
+// vmhraddshs IIC_VecComplex
+// vminfp IIC_VecFPCompare
+// vminsb IIC_VecGeneral
+// vminsh IIC_VecGeneral
+// vminsw IIC_VecGeneral
+// vminub IIC_VecGeneral
+// vminuh IIC_VecGeneral
+// vminuw IIC_VecGeneral
+// vmladduhm IIC_VecComplex
+// vmrghb IIC_VecPerm
+// vmrghh IIC_VecPerm
+// vmrghw IIC_VecPerm
+// vmrglb IIC_VecPerm
+// vmrglh IIC_VecPerm
+// vmrglw IIC_VecPerm
+// vmsubfp IIC_VecFP
+// vmsummbm IIC_VecComplex
+// vmsumshm IIC_VecComplex
+// vmsumshs IIC_VecComplex
+// vmsumubm IIC_VecComplex
+// vmsumuhm IIC_VecComplex
+// vmsumuhs IIC_VecComplex
+// vmulesb IIC_VecComplex
+// vmulesh IIC_VecComplex
+// vmuleub IIC_VecComplex
+// vmuleuh IIC_VecComplex
+// vmulosb IIC_VecComplex
+// vmulosh IIC_VecComplex
+// vmuloub IIC_VecComplex
+// vmulouh IIC_VecComplex
+// vnor IIC_VecGeneral
+// vor IIC_VecGeneral
+// vperm IIC_VecPerm
+// vpkpx IIC_VecPerm
+// vpkshss IIC_VecPerm
+// vpkshus IIC_VecPerm
+// vpkswss IIC_VecPerm
+// vpkswus IIC_VecPerm
+// vpkuhum IIC_VecPerm
+// vpkuhus IIC_VecPerm
+// vpkuwum IIC_VecPerm
+// vpkuwus IIC_VecPerm
+// vrefp IIC_VecFPRound
+// vrfim IIC_VecFPRound
+// vrfin IIC_VecFPRound
+// vrfip IIC_VecFPRound
+// vrfiz IIC_VecFPRound
+// vrlb IIC_VecGeneral
+// vrlh IIC_VecGeneral
+// vrlw IIC_VecGeneral
+// vrsqrtefp IIC_VecFP
+// vsel IIC_VecGeneral
+// vsl IIC_VecVSL
+// vslb IIC_VecGeneral
+// vsldoi IIC_VecPerm
+// vslh IIC_VecGeneral
+// vslo IIC_VecPerm
+// vslw IIC_VecGeneral
+// vspltb IIC_VecPerm
+// vsplth IIC_VecPerm
+// vspltisb IIC_VecPerm
+// vspltish IIC_VecPerm
+// vspltisw IIC_VecPerm
+// vspltw IIC_VecPerm
+// vsr IIC_VecVSR
+// vsrab IIC_VecGeneral
+// vsrah IIC_VecGeneral
+// vsraw IIC_VecGeneral
+// vsrb IIC_VecGeneral
+// vsrh IIC_VecGeneral
+// vsro IIC_VecPerm
+// vsrw IIC_VecGeneral
+// vsubcuw IIC_VecGeneral
+// vsubfp IIC_VecFP
+// vsubsbs IIC_VecGeneral
+// vsubshs IIC_VecGeneral
+// vsubsws IIC_VecGeneral
+// vsububm IIC_VecGeneral
+// vsububs IIC_VecGeneral
+// vsubuhm IIC_VecGeneral
+// vsubuhs IIC_VecGeneral
+// vsubuwm IIC_VecGeneral
+// vsubuws IIC_VecGeneral
+// vsum2sws IIC_VecComplex
+// vsum4sbs IIC_VecComplex
+// vsum4shs IIC_VecComplex
+// vsum4ubs IIC_VecComplex
+// vsumsws IIC_VecComplex
+// vupkhpx IIC_VecPerm
+// vupkhsb IIC_VecPerm
+// vupkhsh IIC_VecPerm
+// vupklpx IIC_VecPerm
+// vupklsb IIC_VecPerm
+// vupklsh IIC_VecPerm
+// vxor IIC_VecGeneral
+// xor IIC_IntSimple
+// xori IIC_IntSimple
+// xoris IIC_IntSimple
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
index 37b6eac..218fed2 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
@@ -26,43 +26,39 @@
//===----------------------------------------------------------------------===//
// Functional units on the PowerPC 440/450 chip sets
//
-def IFTH1 : FuncUnit; // Fetch unit 1
-def IFTH2 : FuncUnit; // Fetch unit 2
-def PDCD1 : FuncUnit; // Decode unit 1
-def PDCD2 : FuncUnit; // Decode unit 2
-def DISS1 : FuncUnit; // Issue unit 1
-def DISS2 : FuncUnit; // Issue unit 2
-def LRACC : FuncUnit; // Register access and dispatch for
- // the simple integer (J-pipe) and
- // load/store (L-pipe) pipelines
-def IRACC : FuncUnit; // Register access and dispatch for
- // the complex integer (I-pipe) pipeline
-def FRACC : FuncUnit; // Register access and dispatch for
- // the floating-point execution (F-pipe) pipeline
-def IEXE1 : FuncUnit; // Execution stage 1 for the I pipeline
-def IEXE2 : FuncUnit; // Execution stage 2 for the I pipeline
-def IWB : FuncUnit; // Write-back unit for the I pipeline
-def JEXE1 : FuncUnit; // Execution stage 1 for the J pipeline
-def JEXE2 : FuncUnit; // Execution stage 2 for the J pipeline
-def JWB : FuncUnit; // Write-back unit for the J pipeline
-def AGEN : FuncUnit; // Address generation for the L pipeline
-def CRD : FuncUnit; // D-cache access for the L pipeline
-def LWB : FuncUnit; // Write-back unit for the L pipeline
-def FEXE1 : FuncUnit; // Execution stage 1 for the F pipeline
-def FEXE2 : FuncUnit; // Execution stage 2 for the F pipeline
-def FEXE3 : FuncUnit; // Execution stage 3 for the F pipeline
-def FEXE4 : FuncUnit; // Execution stage 4 for the F pipeline
-def FEXE5 : FuncUnit; // Execution stage 5 for the F pipeline
-def FEXE6 : FuncUnit; // Execution stage 6 for the F pipeline
-def FWB : FuncUnit; // Write-back unit for the F pipeline
+def P440_DISS1 : FuncUnit; // Issue unit 1
+def P440_DISS2 : FuncUnit; // Issue unit 2
+def P440_LRACC : FuncUnit; // Register access and dispatch for
+ // the simple integer (J-pipe) and
+ // load/store (L-pipe) pipelines
+def P440_IRACC : FuncUnit; // Register access and dispatch for
+ // the complex integer (I-pipe) pipeline
+def P440_FRACC : FuncUnit; // Register access and dispatch for
+ // the floating-point execution (F-pipe) pipeline
+def P440_IEXE1 : FuncUnit; // Execution stage 1 for the I pipeline
+def P440_IEXE2 : FuncUnit; // Execution stage 2 for the I pipeline
+def P440_IWB : FuncUnit; // Write-back unit for the I pipeline
+def P440_JEXE1 : FuncUnit; // Execution stage 1 for the J pipeline
+def P440_JEXE2 : FuncUnit; // Execution stage 2 for the J pipeline
+def P440_JWB : FuncUnit; // Write-back unit for the J pipeline
+def P440_AGEN : FuncUnit; // Address generation for the L pipeline
+def P440_CRD : FuncUnit; // D-cache access for the L pipeline
+def P440_LWB : FuncUnit; // Write-back unit for the L pipeline
+def P440_FEXE1 : FuncUnit; // Execution stage 1 for the F pipeline
+def P440_FEXE2 : FuncUnit; // Execution stage 2 for the F pipeline
+def P440_FEXE3 : FuncUnit; // Execution stage 3 for the F pipeline
+def P440_FEXE4 : FuncUnit; // Execution stage 4 for the F pipeline
+def P440_FEXE5 : FuncUnit; // Execution stage 5 for the F pipeline
+def P440_FEXE6 : FuncUnit; // Execution stage 6 for the F pipeline
+def P440_FWB : FuncUnit; // Write-back unit for the F pipeline
-def LWARX_Hold : FuncUnit; // This is a pseudo-unit which is used
- // to make sure that no lwarx/stwcx.
- // instructions are issued while another
- // lwarx/stwcx. is in the L pipe.
+def P440_LWARX_Hold : FuncUnit; // This is a pseudo-unit which is used
+ // to make sure that no lwarx/stwcx.
+ // instructions are issued while another
+ // lwarx/stwcx. is in the L pipe.
-def GPR_Bypass : Bypass; // The bypass for general-purpose regs.
-def FPR_Bypass : Bypass; // The bypass for floating-point regs.
+def P440_GPR_Bypass : Bypass; // The bypass for general-purpose regs.
+def P440_FPR_Bypass : Bypass; // The bypass for floating-point regs.
// Notes:
// Instructions are held in the FRACC, LRACC and IRACC pipeline
@@ -104,560 +100,500 @@ def FPR_Bypass : Bypass; // The bypass for floating-point regs.
def PPC440Itineraries : ProcessorItineraries<
- [IFTH1, IFTH2, PDCD1, PDCD2, DISS1, DISS2, FRACC,
- IRACC, IEXE1, IEXE2, IWB, LRACC, JEXE1, JEXE2, JWB, AGEN, CRD, LWB,
- FEXE1, FEXE2, FEXE3, FEXE4, FEXE5, FEXE6, FWB, LWARX_Hold],
- [GPR_Bypass, FPR_Bypass], [
- InstrItinData<IntSimple , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC, LRACC]>,
- InstrStage<1, [IEXE1, JEXE1]>,
- InstrStage<1, [IEXE2, JEXE2]>,
- InstrStage<1, [IWB, JWB]>],
- [6, 4, 4],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntGeneral , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC, LRACC]>,
- InstrStage<1, [IEXE1, JEXE1]>,
- InstrStage<1, [IEXE2, JEXE2]>,
- InstrStage<1, [IWB, JWB]>],
- [6, 4, 4],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntCompare , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC, LRACC]>,
- InstrStage<1, [IEXE1, JEXE1]>,
- InstrStage<1, [IEXE2, JEXE2]>,
- InstrStage<1, [IWB, JWB]>],
- [6, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntDivW , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<33, [IWB]>],
- [40, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMFFS , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [7, 4, 4],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMTFSB0 , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [7, 4, 4],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHW , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHWU , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulLI , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotate , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC, LRACC]>,
- InstrStage<1, [IEXE1, JEXE1]>,
- InstrStage<1, [IEXE2, JEXE2]>,
- InstrStage<1, [IWB, JWB]>],
- [6, 4, 4],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntShift , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC, LRACC]>,
- InstrStage<1, [IEXE1, JEXE1]>,
- InstrStage<1, [IEXE2, JEXE2]>,
- InstrStage<1, [IWB, JWB]>],
- [6, 4, 4],
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntTrapW , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [6, 4],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<BrB , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<BrCR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<BrMCR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<BrMCRX , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4, 4],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBA , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStDCBF , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStDCBI , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLoad , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [9, 5],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLoadUpd , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [9, 5],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStStore , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStStoreUpd, [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStICBI , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTFD , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5, 5],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTFDU , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5, 5],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLFD , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [9, 5, 5],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLFDU , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [9, 5, 5],
- [NoBypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLHA , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLHAU , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLMW , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLWARX , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1]>,
- InstrStage<1, [IRACC], 0>,
- InstrStage<4, [LWARX_Hold], 0>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTD , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTDU , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<2, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTDCX , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1]>,
- InstrStage<1, [IRACC], 0>,
- InstrStage<4, [LWARX_Hold], 0>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTWCX , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1]>,
- InstrStage<1, [IRACC], 0>,
- InstrStage<4, [LWARX_Hold], 0>,
- InstrStage<1, [LRACC]>,
- InstrStage<1, [AGEN]>,
- InstrStage<1, [CRD]>,
- InstrStage<1, [LWB]>],
- [8, 5],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSync , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [LRACC]>,
- InstrStage<3, [AGEN], 1>,
- InstrStage<2, [CRD], 1>,
- InstrStage<1, [LWB]>]>,
- InstrItinData<SprISYNC , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC], 0>,
- InstrStage<1, [LRACC], 0>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [FEXE1], 0>,
- InstrStage<1, [AGEN], 0>,
- InstrStage<1, [JEXE1], 0>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [FEXE2], 0>,
- InstrStage<1, [CRD], 0>,
- InstrStage<1, [JEXE2], 0>,
- InstrStage<1, [IEXE2]>,
- InstrStage<6, [FEXE3], 0>,
- InstrStage<6, [LWB], 0>,
- InstrStage<6, [JWB], 0>,
- InstrStage<6, [IWB]>]>,
- InstrItinData<SprMFSR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [6, 4],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMTMSR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [6, 4],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMTSR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<3, [IWB]>],
- [9, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprTLBSYNC , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>]>,
- InstrItinData<SprMFCR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMFMSR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [7, 4],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMFSPR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<3, [IWB]>],
- [10, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMFTB , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<3, [IWB]>],
- [10, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSPR , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<3, [IWB]>],
- [10, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSRIN , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<3, [IWB]>],
- [10, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprRFI , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprSC , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [IRACC]>,
- InstrStage<1, [IEXE1]>,
- InstrStage<1, [IEXE2]>,
- InstrStage<1, [IWB]>],
- [8, 4],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<FPGeneral , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<1, [FWB]>],
- [10, 4, 4],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPAddSub , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<1, [FWB]>],
- [10, 4, 4],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPCompare , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<1, [FWB]>],
- [10, 4, 4],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivD , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<25, [FWB]>],
- [35, 4, 4],
- [NoBypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivS , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<13, [FWB]>],
- [23, 4, 4],
- [NoBypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPFused , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<1, [FWB]>],
- [10, 4, 4, 4],
- [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPRes , [InstrStage<1, [IFTH1, IFTH2]>,
- InstrStage<1, [PDCD1, PDCD2]>,
- InstrStage<1, [DISS1, DISS2]>,
- InstrStage<1, [FRACC]>,
- InstrStage<1, [FEXE1]>,
- InstrStage<1, [FEXE2]>,
- InstrStage<1, [FEXE3]>,
- InstrStage<1, [FEXE4]>,
- InstrStage<1, [FEXE5]>,
- InstrStage<1, [FEXE6]>,
- InstrStage<1, [FWB]>],
- [10, 4],
- [FPR_Bypass, FPR_Bypass]>
+ [P440_DISS1, P440_DISS2, P440_FRACC, P440_IRACC, P440_IEXE1, P440_IEXE2,
+ P440_IWB, P440_LRACC, P440_JEXE1, P440_JEXE2, P440_JWB, P440_AGEN, P440_CRD,
+ P440_LWB, P440_FEXE1, P440_FEXE2, P440_FEXE3, P440_FEXE4, P440_FEXE5,
+ P440_FEXE6, P440_FWB, P440_LWARX_Hold],
+ [P440_GPR_Bypass, P440_FPR_Bypass], [
+ InstrItinData<IIC_IntSimple, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC, P440_LRACC]>,
+ InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
+ InstrStage<1, [P440_IEXE2, P440_JEXE2]>,
+ InstrStage<1, [P440_IWB, P440_JWB]>],
+ [2, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntGeneral, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC, P440_LRACC]>,
+ InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
+ InstrStage<1, [P440_IEXE2, P440_JEXE2]>,
+ InstrStage<1, [P440_IWB, P440_JWB]>],
+ [2, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntCompare, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC, P440_LRACC]>,
+ InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
+ InstrStage<1, [P440_IEXE2, P440_JEXE2]>,
+ InstrStage<1, [P440_IWB, P440_JWB]>],
+ [2, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntDivW, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<33, [P440_IWB]>],
+ [36, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntMFFS, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [3, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntMTFSB0, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [3, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulHW, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulHWU, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulLI, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntRotate, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC, P440_LRACC]>,
+ InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
+ InstrStage<1, [P440_IEXE2, P440_JEXE2]>,
+ InstrStage<1, [P440_IWB, P440_JWB]>],
+ [2, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntShift, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC, P440_LRACC]>,
+ InstrStage<1, [P440_IEXE1, P440_JEXE1]>,
+ InstrStage<1, [P440_IEXE2, P440_JEXE2]>,
+ InstrStage<1, [P440_IWB, P440_JWB]>],
+ [2, 0, 0],
+ [P440_GPR_Bypass,
+ P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_IntTrapW, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [2, 0],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_BrB, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_BrCR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_BrMCR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_BrMCRX, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0, 0],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBA, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBF, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBI, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoad, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [5, 1, 1],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoadUpd,[InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [5, 2, 1, 1],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoadUpdX,[InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [5, 2, 1, 1],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStStore, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [1, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStStoreUpd,[InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [2, 1, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStICBI, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTFD, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [1, 1, 1],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTFDU, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [2, 1, 1, 1],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLFD, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [5, 1, 1],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLFDU, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [5, 2, 1, 1],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLFDUX, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [5, 2, 1, 1],
+ [NoBypass, P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLHA, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLHAU, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLHAUX, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLMW, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLWARX, [InstrStage<1, [P440_DISS1]>,
+ InstrStage<1, [P440_IRACC], 0>,
+ InstrStage<4, [P440_LWARX_Hold], 0>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTD, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTDU, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [2, 1, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTDUX, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<2, [P440_LWB]>],
+ [2, 1, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTDCX, [InstrStage<1, [P440_DISS1]>,
+ InstrStage<1, [P440_IRACC], 0>,
+ InstrStage<4, [P440_LWARX_Hold], 0>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTWCX, [InstrStage<1, [P440_DISS1]>,
+ InstrStage<1, [P440_IRACC], 0>,
+ InstrStage<4, [P440_LWARX_Hold], 0>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<1, [P440_AGEN]>,
+ InstrStage<1, [P440_CRD]>,
+ InstrStage<1, [P440_LWB]>],
+ [4, 1, 1],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSync, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_LRACC]>,
+ InstrStage<3, [P440_AGEN], 1>,
+ InstrStage<2, [P440_CRD], 1>,
+ InstrStage<1, [P440_LWB]>]>,
+ InstrItinData<IIC_SprISYNC, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC], 0>,
+ InstrStage<1, [P440_LRACC], 0>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_FEXE1], 0>,
+ InstrStage<1, [P440_AGEN], 0>,
+ InstrStage<1, [P440_JEXE1], 0>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_FEXE2], 0>,
+ InstrStage<1, [P440_CRD], 0>,
+ InstrStage<1, [P440_JEXE2], 0>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<6, [P440_FEXE3], 0>,
+ InstrStage<6, [P440_LWB], 0>,
+ InstrStage<6, [P440_JWB], 0>,
+ InstrStage<6, [P440_IWB]>]>,
+ InstrItinData<IIC_SprMFSR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [2, 0],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTMSR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [2, 0],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<3, [P440_IWB]>],
+ [5, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprTLBSYNC, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>]>,
+ InstrItinData<IIC_SprMFCR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMFMSR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [3, 0],
+ [P440_GPR_Bypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMFSPR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<3, [P440_IWB]>],
+ [6, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMFTB, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<3, [P440_IWB]>],
+ [6, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSPR, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<3, [P440_IWB]>],
+ [6, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSRIN, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<3, [P440_IWB]>],
+ [6, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprRFI, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_SprSC, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_IRACC]>,
+ InstrStage<1, [P440_IEXE1]>,
+ InstrStage<1, [P440_IEXE2]>,
+ InstrStage<1, [P440_IWB]>],
+ [4, 0],
+ [NoBypass, P440_GPR_Bypass]>,
+ InstrItinData<IIC_FPGeneral, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<1, [P440_FWB]>],
+ [6, 0, 0],
+ [P440_FPR_Bypass,
+ P440_FPR_Bypass, P440_FPR_Bypass]>,
+ InstrItinData<IIC_FPAddSub, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<1, [P440_FWB]>],
+ [6, 0, 0],
+ [P440_FPR_Bypass,
+ P440_FPR_Bypass, P440_FPR_Bypass]>,
+ InstrItinData<IIC_FPCompare, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<1, [P440_FWB]>],
+ [6, 0, 0],
+ [P440_FPR_Bypass, P440_FPR_Bypass,
+ P440_FPR_Bypass]>,
+ InstrItinData<IIC_FPDivD, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<25, [P440_FWB]>],
+ [31, 0, 0],
+ [NoBypass, P440_FPR_Bypass, P440_FPR_Bypass]>,
+ InstrItinData<IIC_FPDivS, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<13, [P440_FWB]>],
+ [19, 0, 0],
+ [NoBypass, P440_FPR_Bypass, P440_FPR_Bypass]>,
+ InstrItinData<IIC_FPFused, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<1, [P440_FWB]>],
+ [6, 0, 0, 0],
+ [P440_FPR_Bypass,
+ P440_FPR_Bypass, P440_FPR_Bypass,
+ P440_FPR_Bypass]>,
+ InstrItinData<IIC_FPRes, [InstrStage<1, [P440_DISS1, P440_DISS2]>,
+ InstrStage<1, [P440_FRACC]>,
+ InstrStage<1, [P440_FEXE1]>,
+ InstrStage<1, [P440_FEXE2]>,
+ InstrStage<1, [P440_FEXE3]>,
+ InstrStage<1, [P440_FEXE4]>,
+ InstrStage<1, [P440_FEXE5]>,
+ InstrStage<1, [P440_FEXE6]>,
+ InstrStage<1, [P440_FWB]>],
+ [6, 0],
+ [P440_FPR_Bypass, P440_FPR_Bypass]>
]>;
+
+// ===---------------------------------------------------------------------===//
+// PPC440 machine model for scheduling and other instruction cost heuristics.
+
+def PPC440Model : SchedMachineModel {
+ let IssueWidth = 2; // 2 instructions are dispatched per cycle.
+ let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
+ let LoadLatency = 5; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+
+ let Itineraries = PPC440Itineraries;
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
index 1612cd2..1447696 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
@@ -14,8 +14,8 @@
//===----------------------------------------------------------------------===//
// Functional units on the PowerPC A2 chip sets
//
-def XU : FuncUnit; // XU pipeline
-def FU : FuncUnit; // FI pipeline
+def A2_XU : FuncUnit; // A2_XU pipeline
+def A2_FU : FuncUnit; // FI pipeline
//
// This file defines the itinerary class data for the PPC A2 processor.
@@ -24,126 +24,140 @@ def FU : FuncUnit; // FI pipeline
def PPCA2Itineraries : ProcessorItineraries<
- [XU, FU], [], [
- InstrItinData<IntSimple , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<IntGeneral , [InstrStage<1, [XU]>],
- [2, 1, 1]>,
- InstrItinData<IntCompare , [InstrStage<1, [XU]>],
- [2, 1, 1]>,
- InstrItinData<IntDivW , [InstrStage<1, [XU]>],
- [39, 1, 1]>,
- InstrItinData<IntDivD , [InstrStage<1, [XU]>],
- [71, 1, 1]>,
- InstrItinData<IntMulHW , [InstrStage<1, [XU]>],
- [5, 1, 1]>,
- InstrItinData<IntMulHWU , [InstrStage<1, [XU]>],
- [5, 1, 1]>,
- InstrItinData<IntMulLI , [InstrStage<1, [XU]>],
- [6, 1, 1]>,
- InstrItinData<IntRotate , [InstrStage<1, [XU]>],
- [2, 1, 1]>,
- InstrItinData<IntRotateD , [InstrStage<1, [XU]>],
- [2, 1, 1]>,
- InstrItinData<IntRotateDI , [InstrStage<1, [XU]>],
- [2, 1, 1]>,
- InstrItinData<IntShift , [InstrStage<1, [XU]>],
- [2, 1, 1]>,
- InstrItinData<IntTrapW , [InstrStage<1, [XU]>],
- [2, 1]>,
- InstrItinData<IntTrapD , [InstrStage<1, [XU]>],
- [2, 1]>,
- InstrItinData<BrB , [InstrStage<1, [XU]>],
- [6, 1, 1]>,
- InstrItinData<BrCR , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<BrMCR , [InstrStage<1, [XU]>],
- [5, 1, 1]>,
- InstrItinData<BrMCRX , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStDCBA , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStDCBF , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStDCBI , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStLoad , [InstrStage<1, [XU]>],
- [6, 1, 1]>,
- InstrItinData<LdStLoadUpd , [InstrStage<1, [XU]>],
- [6, 8, 1, 1]>,
- InstrItinData<LdStLDU , [InstrStage<1, [XU]>],
- [6, 1, 1]>,
- InstrItinData<LdStStore , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStStoreUpd, [InstrStage<1, [XU]>],
- [2, 1, 1, 1]>,
- InstrItinData<LdStICBI, [InstrStage<1, [XU]>],
- [16, 1, 1]>,
- InstrItinData<LdStSTFD , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStSTFDU , [InstrStage<1, [XU]>],
- [2, 1, 1, 1]>,
- InstrItinData<LdStLFD , [InstrStage<1, [XU]>],
- [7, 1, 1]>,
- InstrItinData<LdStLFDU , [InstrStage<1, [XU]>],
- [7, 9, 1, 1]>,
- InstrItinData<LdStLHA , [InstrStage<1, [XU]>],
- [6, 1, 1]>,
- InstrItinData<LdStLHAU , [InstrStage<1, [XU]>],
- [6, 8, 1, 1]>,
- InstrItinData<LdStLWARX , [InstrStage<1, [XU]>],
- [82, 1, 1]>, // L2 latency
- InstrItinData<LdStSTD , [InstrStage<1, [XU]>],
- [1, 1, 1]>,
- InstrItinData<LdStSTDU , [InstrStage<1, [XU]>],
- [2, 1, 1, 1]>,
- InstrItinData<LdStSTDCX , [InstrStage<1, [XU]>],
- [82, 1, 1]>, // L2 latency
- InstrItinData<LdStSTWCX , [InstrStage<1, [XU]>],
- [82, 1, 1]>, // L2 latency
- InstrItinData<LdStSync , [InstrStage<1, [XU]>],
- [6]>,
- InstrItinData<SprISYNC , [InstrStage<1, [XU]>],
- [16]>,
- InstrItinData<SprMTMSR , [InstrStage<1, [XU]>],
- [16, 1]>,
- InstrItinData<SprMFCR , [InstrStage<1, [XU]>],
- [6, 1]>,
- InstrItinData<SprMFMSR , [InstrStage<1, [XU]>],
- [4, 1]>,
- InstrItinData<SprMFSPR , [InstrStage<1, [XU]>],
- [6, 1]>,
- InstrItinData<SprMFTB , [InstrStage<1, [XU]>],
- [4, 1]>,
- InstrItinData<SprMTSPR , [InstrStage<1, [XU]>],
- [6, 1]>,
- InstrItinData<SprRFI , [InstrStage<1, [XU]>],
- [16]>,
- InstrItinData<SprSC , [InstrStage<1, [XU]>],
- [16]>,
- InstrItinData<FPGeneral , [InstrStage<1, [FU]>],
- [6, 1, 1]>,
- InstrItinData<FPAddSub , [InstrStage<1, [FU]>],
- [6, 1, 1]>,
- InstrItinData<FPCompare , [InstrStage<1, [FU]>],
- [5, 1, 1]>,
- InstrItinData<FPDivD , [InstrStage<1, [FU]>],
- [72, 1, 1]>,
- InstrItinData<FPDivS , [InstrStage<1, [FU]>],
- [59, 1, 1]>,
- InstrItinData<FPSqrt , [InstrStage<1, [FU]>],
- [69, 1, 1]>,
- InstrItinData<FPFused , [InstrStage<1, [FU]>],
- [6, 1, 1, 1]>,
- InstrItinData<FPRes , [InstrStage<1, [FU]>],
- [6, 1]>
+ [A2_XU, A2_FU], [], [
+ InstrItinData<IIC_IntSimple, [InstrStage<1, [A2_XU]>],
+ [1, 0, 0]>,
+ InstrItinData<IIC_IntGeneral, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0]>,
+ InstrItinData<IIC_IntCompare, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0]>,
+ InstrItinData<IIC_IntDivW, [InstrStage<1, [A2_XU]>],
+ [39, 0, 0]>,
+ InstrItinData<IIC_IntDivD, [InstrStage<1, [A2_XU]>],
+ [71, 0, 0]>,
+ InstrItinData<IIC_IntMulHW, [InstrStage<1, [A2_XU]>],
+ [5, 0, 0]>,
+ InstrItinData<IIC_IntMulHWU, [InstrStage<1, [A2_XU]>],
+ [5, 0, 0]>,
+ InstrItinData<IIC_IntMulLI, [InstrStage<1, [A2_XU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_IntRotate, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0]>,
+ InstrItinData<IIC_IntRotateD, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0]>,
+ InstrItinData<IIC_IntRotateDI, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0]>,
+ InstrItinData<IIC_IntShift, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0]>,
+ InstrItinData<IIC_IntTrapW, [InstrStage<1, [A2_XU]>],
+ [2, 0]>,
+ InstrItinData<IIC_IntTrapD, [InstrStage<1, [A2_XU]>],
+ [2, 0]>,
+ InstrItinData<IIC_BrB, [InstrStage<1, [A2_XU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_BrCR, [InstrStage<1, [A2_XU]>],
+ [1, 0, 0]>,
+ InstrItinData<IIC_BrMCR, [InstrStage<1, [A2_XU]>],
+ [5, 0, 0]>,
+ InstrItinData<IIC_BrMCRX, [InstrStage<1, [A2_XU]>],
+ [1, 0, 0]>,
+ InstrItinData<IIC_LdStDCBA, [InstrStage<1, [A2_XU]>],
+ [1, 0, 0]>,
+ InstrItinData<IIC_LdStDCBF, [InstrStage<1, [A2_XU]>],
+ [1, 0, 0]>,
+ InstrItinData<IIC_LdStDCBI, [InstrStage<1, [A2_XU]>],
+ [1, 0, 0]>,
+ InstrItinData<IIC_LdStLoad, [InstrStage<1, [A2_XU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_LdStLoadUpd, [InstrStage<1, [A2_XU]>],
+ [6, 8, 0, 0]>,
+ InstrItinData<IIC_LdStLoadUpdX,[InstrStage<1, [A2_XU]>],
+ [6, 8, 0, 0]>,
+ InstrItinData<IIC_LdStLDU, [InstrStage<1, [A2_XU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_LdStLDUX, [InstrStage<1, [A2_XU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_LdStStore, [InstrStage<1, [A2_XU]>],
+ [0, 0, 0]>,
+ InstrItinData<IIC_LdStStoreUpd,[InstrStage<1, [A2_XU]>],
+ [2, 0, 0, 0]>,
+ InstrItinData<IIC_LdStICBI, [InstrStage<1, [A2_XU]>],
+ [16, 0, 0]>,
+ InstrItinData<IIC_LdStSTFD, [InstrStage<1, [A2_XU]>],
+ [0, 0, 0]>,
+ InstrItinData<IIC_LdStSTFDU, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0, 0]>,
+ InstrItinData<IIC_LdStLFD, [InstrStage<1, [A2_XU]>],
+ [7, 0, 0]>,
+ InstrItinData<IIC_LdStLFDU, [InstrStage<1, [A2_XU]>],
+ [7, 9, 0, 0]>,
+ InstrItinData<IIC_LdStLFDUX, [InstrStage<1, [A2_XU]>],
+ [7, 9, 0, 0]>,
+ InstrItinData<IIC_LdStLHA, [InstrStage<1, [A2_XU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_LdStLHAU, [InstrStage<1, [A2_XU]>],
+ [6, 8, 0, 0]>,
+ InstrItinData<IIC_LdStLHAUX, [InstrStage<1, [A2_XU]>],
+ [6, 8, 0, 0]>,
+ InstrItinData<IIC_LdStLWARX, [InstrStage<1, [A2_XU]>],
+ [82, 0, 0]>, // L2 latency
+ InstrItinData<IIC_LdStSTD, [InstrStage<1, [A2_XU]>],
+ [0, 0, 0]>,
+ InstrItinData<IIC_LdStSTDU, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0, 0]>,
+ InstrItinData<IIC_LdStSTDUX, [InstrStage<1, [A2_XU]>],
+ [2, 0, 0, 0]>,
+ InstrItinData<IIC_LdStSTDCX, [InstrStage<1, [A2_XU]>],
+ [82, 0, 0]>, // L2 latency
+ InstrItinData<IIC_LdStSTWCX, [InstrStage<1, [A2_XU]>],
+ [82, 0, 0]>, // L2 latency
+ InstrItinData<IIC_LdStSync, [InstrStage<1, [A2_XU]>],
+ [6]>,
+ InstrItinData<IIC_SprISYNC, [InstrStage<1, [A2_XU]>],
+ [16]>,
+ InstrItinData<IIC_SprMTMSR, [InstrStage<1, [A2_XU]>],
+ [16, 0]>,
+ InstrItinData<IIC_SprMFCR, [InstrStage<1, [A2_XU]>],
+ [6, 0]>,
+ InstrItinData<IIC_SprMFCRF, [InstrStage<1, [A2_XU]>],
+ [1, 0]>,
+ InstrItinData<IIC_SprMFMSR, [InstrStage<1, [A2_XU]>],
+ [4, 0]>,
+ InstrItinData<IIC_SprMFSPR, [InstrStage<1, [A2_XU]>],
+ [6, 0]>,
+ InstrItinData<IIC_SprMFTB, [InstrStage<1, [A2_XU]>],
+ [4, 0]>,
+ InstrItinData<IIC_SprMTSPR, [InstrStage<1, [A2_XU]>],
+ [6, 0]>,
+ InstrItinData<IIC_SprRFI, [InstrStage<1, [A2_XU]>],
+ [16]>,
+ InstrItinData<IIC_SprSC, [InstrStage<1, [A2_XU]>],
+ [16]>,
+ InstrItinData<IIC_FPGeneral, [InstrStage<1, [A2_FU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_FPAddSub, [InstrStage<1, [A2_FU]>],
+ [6, 0, 0]>,
+ InstrItinData<IIC_FPCompare, [InstrStage<1, [A2_FU]>],
+ [5, 0, 0]>,
+ InstrItinData<IIC_FPDivD, [InstrStage<1, [A2_FU]>],
+ [72, 0, 0]>,
+ InstrItinData<IIC_FPDivS, [InstrStage<1, [A2_FU]>],
+ [59, 0, 0]>,
+ InstrItinData<IIC_FPSqrtD, [InstrStage<1, [A2_FU]>],
+ [69, 0, 0]>,
+ InstrItinData<IIC_FPSqrtS, [InstrStage<1, [A2_FU]>],
+ [65, 0, 0]>,
+ InstrItinData<IIC_FPFused, [InstrStage<1, [A2_FU]>],
+ [6, 0, 0, 0]>,
+ InstrItinData<IIC_FPRes, [InstrStage<1, [A2_FU]>],
+ [6, 0]>
]>;
// ===---------------------------------------------------------------------===//
// A2 machine model for scheduling and other instruction cost heuristics.
def PPCA2Model : SchedMachineModel {
- let IssueWidth = 1; // 2 micro-ops are dispatched per cycle.
+ let IssueWidth = 1; // 1 instruction is dispatched per cycle.
let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 6; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td
index c189b9e..dab89e3 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE500mc.td
@@ -19,238 +19,285 @@
// * Decode & Dispatch
// Can dispatch up to 2 instructions per clock cycle to either the GPR Issue
// queues (GIQx), FP Issue Queue (FIQ), or Branch issue queue (BIQ).
-def DIS0 : FuncUnit; // Dispatch stage - insn 1
-def DIS1 : FuncUnit; // Dispatch stage - insn 2
+def E500_DIS0 : FuncUnit; // Dispatch stage - insn 1
+def E500_DIS1 : FuncUnit; // Dispatch stage - insn 2
// * Execute
// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX.
// Some instructions can only execute in SFX0 but not SFX1.
// The CFX has a bypass path, allowing non-divide instructions to execute
// while a divide instruction is executed.
-def SFX0 : FuncUnit; // Simple unit 0
-def SFX1 : FuncUnit; // Simple unit 1
-def BU : FuncUnit; // Branch unit
-def CFX_DivBypass
- : FuncUnit; // CFX divide bypass path
-def CFX_0 : FuncUnit; // CFX pipeline
-def LSU_0 : FuncUnit; // LSU pipeline
-def FPU_0 : FuncUnit; // FPU pipeline
+def E500_SFX0 : FuncUnit; // Simple unit 0
+def E500_SFX1 : FuncUnit; // Simple unit 1
+def E500_BU : FuncUnit; // Branch unit
+def E500_CFX_DivBypass
+ : FuncUnit; // CFX divide bypass path
+def E500_CFX_0 : FuncUnit; // CFX pipeline
+def E500_LSU_0 : FuncUnit; // LSU pipeline
+def E500_FPU_0 : FuncUnit; // FPU pipeline
-def CR_Bypass : Bypass;
+def E500_GPR_Bypass : Bypass;
+def E500_FPR_Bypass : Bypass;
+def E500_CR_Bypass : Bypass;
def PPCE500mcItineraries : ProcessorItineraries<
- [DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, LSU_0, FPU_0],
- [CR_Bypass, GPR_Bypass, FPR_Bypass], [
- InstrItinData<IntSimple , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1, 1], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1, 1], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntCompare , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [5, 1, 1], // Latency = 1 or 2
- [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntDivW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<14, [CFX_DivBypass]>],
- [17, 1, 1], // Latency=4..35, Repeat= 4..35
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMFFS , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<8, [FPU_0]>],
- [11], // Latency = 8
- [FPR_Bypass]>,
- InstrItinData<IntMTFSB0 , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<8, [FPU_0]>],
- [11, 1, 1], // Latency = 8
- [NoBypass, NoBypass, NoBypass]>,
- InstrItinData<IntMulHW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0]>],
- [7, 1, 1], // Latency = 4, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHWU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0]>],
- [7, 1, 1], // Latency = 4, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulLI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0]>],
- [7, 1, 1], // Latency = 4, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotate , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1, 1], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntShift , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1, 1], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntTrapW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [SFX0]>],
- [5, 1], // Latency = 2, Repeat rate = 2
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<BrB , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [BU]>],
- [4, 1], // Latency = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<BrCR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [BU]>],
- [4, 1, 1], // Latency = 1
- [CR_Bypass, CR_Bypass, CR_Bypass]>,
- InstrItinData<BrMCR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [BU]>],
- [4, 1], // Latency = 1
- [CR_Bypass, CR_Bypass]>,
- InstrItinData<BrMCRX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1, 1], // Latency = 1
- [CR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBA , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBF , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLoad , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLoadUpd , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStStore , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStStoreUpd, [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [NoBypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStICBI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTFD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTFDU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLFD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 1, 1], // Latency = 4
- [FPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLFDU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 1, 1], // Latency = 4
- [FPR_Bypass, GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLHA , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLHAU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLMW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 1], // Latency = r+3
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLWARX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<3, [LSU_0]>],
- [6, 1, 1], // Latency = 3, Repeat rate = 3
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTWCX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [6, 1], // Latency = 3
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSync , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>]>,
- InstrItinData<SprMFSR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [SFX0]>],
- [7, 1],
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMTMSR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [SFX0, SFX1]>],
- [5, 1], // Latency = 2, Repeat rate = 4
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMTSR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0]>],
- [5, 1],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprTLBSYNC , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0], 0>]>,
- InstrItinData<SprMFCR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<5, [SFX0]>],
- [8, 1],
- [GPR_Bypass, CR_Bypass]>,
- InstrItinData<SprMFMSR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [SFX0]>],
- [7, 1], // Latency = 4, Repeat rate = 4
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMFSPR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1], // Latency = 1, Repeat rate = 1
- [GPR_Bypass, CR_Bypass]>,
- InstrItinData<SprMFTB , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [SFX0]>],
- [7, 1], // Latency = 4, Repeat rate = 4
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSPR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [4, 1], // Latency = 1, Repeat rate = 1
- [CR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMTSRIN , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0]>],
- [4, 1],
- [NoBypass, GPR_Bypass]>,
- InstrItinData<FPGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [FPU_0]>],
- [11, 1, 1], // Latency = 8, Repeat rate = 2
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPAddSub , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [FPU_0]>],
- [13, 1, 1], // Latency = 10, Repeat rate = 4
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPCompare , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [FPU_0]>],
- [11, 1, 1], // Latency = 8, Repeat rate = 2
- [CR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<68, [FPU_0]>],
- [71, 1, 1], // Latency = 68, Repeat rate = 68
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivS , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<38, [FPU_0]>],
- [41, 1, 1], // Latency = 38, Repeat rate = 38
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPFused , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [FPU_0]>],
- [13, 1, 1, 1], // Latency = 10, Repeat rate = 4
- [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPRes , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<38, [FPU_0]>],
- [41, 1], // Latency = 38, Repeat rate = 38
- [FPR_Bypass, FPR_Bypass]>
+ [E500_DIS0, E500_DIS1, E500_SFX0, E500_SFX1, E500_BU, E500_CFX_DivBypass,
+ E500_CFX_0, E500_LSU_0, E500_FPU_0],
+ [E500_CR_Bypass, E500_GPR_Bypass, E500_FPR_Bypass], [
+ InstrItinData<IIC_IntSimple, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntGeneral, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntCompare, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [5, 1, 1], // Latency = 1 or 2
+ [E500_CR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntDivW, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_CFX_0], 0>,
+ InstrStage<14, [E500_CFX_DivBypass]>],
+ [17, 1, 1], // Latency=4..35, Repeat= 4..35
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMFFS, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<8, [E500_FPU_0]>],
+ [11], // Latency = 8
+ [E500_FPR_Bypass]>,
+ InstrItinData<IIC_IntMTFSB0, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<8, [E500_FPU_0]>],
+ [11, 1, 1], // Latency = 8
+ [NoBypass, NoBypass, NoBypass]>,
+ InstrItinData<IIC_IntMulHW, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_CFX_0]>],
+ [7, 1, 1], // Latency = 4, Repeat rate = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulHWU, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_CFX_0]>],
+ [7, 1, 1], // Latency = 4, Repeat rate = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulLI, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_CFX_0]>],
+ [7, 1, 1], // Latency = 4, Repeat rate = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntRotate, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntShift, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_IntTrapW, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<2, [E500_SFX0]>],
+ [5, 1], // Latency = 2, Repeat rate = 2
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_BrB, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_BU]>],
+ [4, 1], // Latency = 1
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_BrCR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_BU]>],
+ [4, 1, 1], // Latency = 1
+ [E500_CR_Bypass,
+ E500_CR_Bypass, E500_CR_Bypass]>,
+ InstrItinData<IIC_BrMCR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_BU]>],
+ [4, 1], // Latency = 1
+ [E500_CR_Bypass, E500_CR_Bypass]>,
+ InstrItinData<IIC_BrMCRX, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1, 1], // Latency = 1
+ [E500_CR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBA, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3, Repeat rate = 1
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBF, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBI, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoad, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoadUpd, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLoadUpdX,[InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStStore, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStStoreUpd,[InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, E500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStICBI, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTFD, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1, 1], // Latency = 3
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTFDU, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1, 1], // Latency = 3
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLFD, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [7, 1, 1], // Latency = 4
+ [E500_FPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLFDU, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [7, 1, 1], // Latency = 4
+ [E500_FPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLFDUX, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [7, 1, 1], // Latency = 4
+ [E500_FPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLHA, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLHAU, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLHAUX, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLMW, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [7, 1], // Latency = r+3
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLWARX, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<3, [E500_LSU_0]>],
+ [6, 1, 1], // Latency = 3, Repeat rate = 3
+ [E500_GPR_Bypass,
+ E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTWCX, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>],
+ [6, 1], // Latency = 3
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSync, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0]>]>,
+ InstrItinData<IIC_SprMFSR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<4, [E500_SFX0]>],
+ [7, 1],
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTMSR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<2, [E500_SFX0, E500_SFX1]>],
+ [5, 1], // Latency = 2, Repeat rate = 4
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0]>],
+ [5, 1],
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_SprTLBSYNC, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_LSU_0], 0>]>,
+ InstrItinData<IIC_SprMFCR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<5, [E500_SFX0]>],
+ [8, 1],
+ [E500_GPR_Bypass, E500_CR_Bypass]>,
+ InstrItinData<IIC_SprMFCRF, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<5, [E500_SFX0]>],
+ [8, 1],
+ [E500_GPR_Bypass, E500_CR_Bypass]>,
+ InstrItinData<IIC_SprMFMSR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<4, [E500_SFX0]>],
+ [7, 1], // Latency = 4, Repeat rate = 4
+ [E500_GPR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMFSPR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1], // Latency = 1, Repeat rate = 1
+ [E500_GPR_Bypass, E500_CR_Bypass]>,
+ InstrItinData<IIC_SprMFTB, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<4, [E500_SFX0]>],
+ [7, 1], // Latency = 4, Repeat rate = 4
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSPR, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0, E500_SFX1]>],
+ [4, 1], // Latency = 1, Repeat rate = 1
+ [E500_CR_Bypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSRIN, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<1, [E500_SFX0]>],
+ [4, 1],
+ [NoBypass, E500_GPR_Bypass]>,
+ InstrItinData<IIC_FPGeneral, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<2, [E500_FPU_0]>],
+ [11, 1, 1], // Latency = 8, Repeat rate = 2
+ [E500_FPR_Bypass,
+ E500_FPR_Bypass, E500_FPR_Bypass]>,
+ InstrItinData<IIC_FPAddSub, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<4, [E500_FPU_0]>],
+ [13, 1, 1], // Latency = 10, Repeat rate = 4
+ [E500_FPR_Bypass,
+ E500_FPR_Bypass, E500_FPR_Bypass]>,
+ InstrItinData<IIC_FPCompare, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<2, [E500_FPU_0]>],
+ [11, 1, 1], // Latency = 8, Repeat rate = 2
+ [E500_CR_Bypass,
+ E500_FPR_Bypass, E500_FPR_Bypass]>,
+ InstrItinData<IIC_FPDivD, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<68, [E500_FPU_0]>],
+ [71, 1, 1], // Latency = 68, Repeat rate = 68
+ [E500_FPR_Bypass,
+ E500_FPR_Bypass, E500_FPR_Bypass]>,
+ InstrItinData<IIC_FPDivS, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<38, [E500_FPU_0]>],
+ [41, 1, 1], // Latency = 38, Repeat rate = 38
+ [E500_FPR_Bypass,
+ E500_FPR_Bypass, E500_FPR_Bypass]>,
+ InstrItinData<IIC_FPFused, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<4, [E500_FPU_0]>],
+ [13, 1, 1, 1], // Latency = 10, Repeat rate = 4
+ [E500_FPR_Bypass,
+ E500_FPR_Bypass, E500_FPR_Bypass,
+ E500_FPR_Bypass]>,
+ InstrItinData<IIC_FPRes, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
+ InstrStage<38, [E500_FPU_0]>],
+ [41, 1], // Latency = 38, Repeat rate = 38
+ [E500_FPR_Bypass, E500_FPR_Bypass]>
]>;
// ===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td
index 7a24d20..de097d9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleE5500.td
@@ -20,280 +20,344 @@
// * Decode & Dispatch
// Can dispatch up to 2 instructions per clock cycle to either the GPR Issue
// queues (GIQx), FP Issue Queue (FIQ), or Branch issue queue (BIQ).
-// def DIS0 : FuncUnit;
-// def DIS1 : FuncUnit;
+def E5500_DIS0 : FuncUnit;
+def E5500_DIS1 : FuncUnit;
// * Execute
// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX.
// The CFX has a bypass path, allowing non-divide instructions to execute
// while a divide instruction is being executed.
-// def SFX0 : FuncUnit; // Simple unit 0
-// def SFX1 : FuncUnit; // Simple unit 1
-// def BU : FuncUnit; // Branch unit
-// def CFX_DivBypass
-// : FuncUnit; // CFX divide bypass path
-// def CFX_0 : FuncUnit; // CFX pipeline stage 0
+def E5500_SFX0 : FuncUnit; // Simple unit 0
+def E5500_SFX1 : FuncUnit; // Simple unit 1
+def E5500_BU : FuncUnit; // Branch unit
+def E5500_CFX_DivBypass
+ : FuncUnit; // CFX divide bypass path
+def E5500_CFX_0 : FuncUnit; // CFX pipeline stage 0
-def CFX_1 : FuncUnit; // CFX pipeline stage 1
+def E5500_CFX_1 : FuncUnit; // CFX pipeline stage 1
-// def LSU_0 : FuncUnit; // LSU pipeline
-// def FPU_0 : FuncUnit; // FPU pipeline
+def E5500_LSU_0 : FuncUnit; // LSU pipeline
+def E5500_FPU_0 : FuncUnit; // FPU pipeline
-// def CR_Bypass : Bypass;
+def E5500_GPR_Bypass : Bypass;
+def E5500_FPR_Bypass : Bypass;
+def E5500_CR_Bypass : Bypass;
def PPCE5500Itineraries : ProcessorItineraries<
- [DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, CFX_1,
- LSU_0, FPU_0],
- [CR_Bypass, GPR_Bypass, FPR_Bypass], [
- InstrItinData<IntSimple , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [5, 2, 2], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [5, 2, 2], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntCompare , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [6, 2, 2], // Latency = 1 or 2
- [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntDivD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<26, [CFX_DivBypass]>],
- [30, 2, 2], // Latency= 4..26, Repeat rate= 4..26
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntDivW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<16, [CFX_DivBypass]>],
- [20, 2, 2], // Latency= 4..16, Repeat rate= 4..16
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMFFS , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [FPU_0]>],
- [11], // Latency = 7, Repeat rate = 1
- [FPR_Bypass]>,
- InstrItinData<IntMTFSB0 , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<7, [FPU_0]>],
- [11, 2, 2], // Latency = 7, Repeat rate = 7
- [NoBypass, NoBypass, NoBypass]>,
- InstrItinData<IntMulHD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<2, [CFX_1]>],
- [9, 2, 2], // Latency = 4..7, Repeat rate = 2..4
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<1, [CFX_1]>],
- [8, 2, 2], // Latency = 4, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulHWU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<1, [CFX_1]>],
- [8, 2, 2], // Latency = 4, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntMulLI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0], 0>,
- InstrStage<2, [CFX_1]>],
- [8, 2, 2], // Latency = 4 or 5, Repeat = 2
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotate , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [5, 2, 2], // Latency = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotateD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [SFX0, SFX1]>],
- [6, 2, 2], // Latency = 2, Repeat rate = 2
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntRotateDI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [5, 2, 2], // Latency = 1, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntShift , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [SFX0, SFX1]>],
- [6, 2, 2], // Latency = 2, Repeat rate = 2
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<IntTrapW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [SFX0]>],
- [6, 2], // Latency = 2, Repeat rate = 2
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<BrB , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [BU]>],
- [5, 2], // Latency = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<BrCR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [BU]>],
- [5, 2, 2], // Latency = 1
- [CR_Bypass, CR_Bypass, CR_Bypass]>,
- InstrItinData<BrMCR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [BU]>],
- [5, 2], // Latency = 1
- [CR_Bypass, CR_Bypass]>,
- InstrItinData<BrMCRX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0]>],
- [5, 2, 2], // Latency = 1
- [CR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBA , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBF , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStDCBI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLoad , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLoadUpd , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLDARX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<3, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLDU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStStore , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStStoreUpd, [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStICBI , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTFD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTFDU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLFD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [8, 2, 2], // Latency = 4, Repeat rate = 1
- [FPR_Bypass, GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLFDU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [8, 2, 2], // Latency = 4, Repeat rate = 1
- [FPR_Bypass, GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLHA , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStLHAU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [GPR_Bypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStLMW , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [LSU_0]>],
- [8, 2], // Latency = r+3, Repeat rate = r+3
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStLWARX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<3, [LSU_0]>],
- [7, 2, 2], // Latency = 3, Repeat rate = 3
- [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
- InstrItinData<LdStSTD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTDCX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSTDU , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass],
- 2>, // 2 micro-ops
- InstrItinData<LdStSTWCX , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>],
- [7, 2], // Latency = 3, Repeat rate = 1
- [NoBypass, GPR_Bypass]>,
- InstrItinData<LdStSync , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0]>]>,
- InstrItinData<SprMTMSR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [CFX_0]>],
- [6, 2], // Latency = 2, Repeat rate = 4
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprTLBSYNC , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [LSU_0], 0>]>,
- InstrItinData<SprMFCR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<5, [CFX_0]>],
- [9, 2], // Latency = 5, Repeat rate = 5
- [GPR_Bypass, CR_Bypass]>,
- InstrItinData<SprMFMSR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [SFX0]>],
- [8, 2], // Latency = 4, Repeat rate = 4
- [GPR_Bypass, GPR_Bypass]>,
- InstrItinData<SprMFSPR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [CFX_0]>],
- [5], // Latency = 1, Repeat rate = 1
- [GPR_Bypass]>,
- InstrItinData<SprMFTB , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<4, [CFX_0]>],
- [8, 2], // Latency = 4, Repeat rate = 4
- [NoBypass, GPR_Bypass]>,
- InstrItinData<SprMTSPR , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [SFX0, SFX1]>],
- [5], // Latency = 1, Repeat rate = 1
- [GPR_Bypass]>,
- InstrItinData<FPGeneral , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [FPU_0]>],
- [11, 2, 2], // Latency = 7, Repeat rate = 1
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPAddSub , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [FPU_0]>],
- [11, 2, 2], // Latency = 7, Repeat rate = 1
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPCompare , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [FPU_0]>],
- [11, 2, 2], // Latency = 7, Repeat rate = 1
- [CR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivD , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<31, [FPU_0]>],
- [39, 2, 2], // Latency = 35, Repeat rate = 31
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPDivS , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<16, [FPU_0]>],
- [24, 2, 2], // Latency = 20, Repeat rate = 16
- [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPFused , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<1, [FPU_0]>],
- [11, 2, 2, 2], // Latency = 7, Repeat rate = 1
- [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
- InstrItinData<FPRes , [InstrStage<1, [DIS0, DIS1], 0>,
- InstrStage<2, [FPU_0]>],
- [12, 2], // Latency = 8, Repeat rate = 2
- [FPR_Bypass, FPR_Bypass]>
+ [E5500_DIS0, E5500_DIS1, E5500_SFX0, E5500_SFX1, E5500_BU,
+ E5500_CFX_DivBypass, E5500_CFX_0, E5500_CFX_1,
+ E5500_LSU_0, E5500_FPU_0],
+ [E5500_CR_Bypass, E5500_GPR_Bypass, E5500_FPR_Bypass], [
+ InstrItinData<IIC_IntSimple, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [5, 2, 2], // Latency = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntGeneral, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [5, 2, 2], // Latency = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntCompare, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [6, 2, 2], // Latency = 1 or 2
+ [E5500_CR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntDivD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0], 0>,
+ InstrStage<26, [E5500_CFX_DivBypass]>],
+ [30, 2, 2], // Latency= 4..26, Repeat rate= 4..26
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntDivW, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0], 0>,
+ InstrStage<16, [E5500_CFX_DivBypass]>],
+ [20, 2, 2], // Latency= 4..16, Repeat rate= 4..16
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMFFS, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_FPU_0]>],
+ [11], // Latency = 7, Repeat rate = 1
+ [E5500_FPR_Bypass]>,
+ InstrItinData<IIC_IntMTFSB0, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<7, [E5500_FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 7
+ [NoBypass, NoBypass, NoBypass]>,
+ InstrItinData<IIC_IntMulHD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0], 0>,
+ InstrStage<2, [E5500_CFX_1]>],
+ [9, 2, 2], // Latency = 4..7, Repeat rate = 2..4
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulHW, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0], 0>,
+ InstrStage<1, [E5500_CFX_1]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulHWU, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0], 0>,
+ InstrStage<1, [E5500_CFX_1]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntMulLI, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0], 0>,
+ InstrStage<2, [E5500_CFX_1]>],
+ [8, 2, 2], // Latency = 4 or 5, Repeat = 2
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntRotate, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [5, 2, 2], // Latency = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntRotateD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<2, [E5500_SFX0, E5500_SFX1]>],
+ [6, 2, 2], // Latency = 2, Repeat rate = 2
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntRotateDI, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [5, 2, 2], // Latency = 1, Repeat rate = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntShift, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<2, [E5500_SFX0, E5500_SFX1]>],
+ [6, 2, 2], // Latency = 2, Repeat rate = 2
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_IntTrapW, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<2, [E5500_SFX0]>],
+ [6, 2], // Latency = 2, Repeat rate = 2
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_BrB, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_BU]>],
+ [5, 2], // Latency = 1
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_BrCR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_BU]>],
+ [5, 2, 2], // Latency = 1
+ [E5500_CR_Bypass,
+ E5500_CR_Bypass, E5500_CR_Bypass]>,
+ InstrItinData<IIC_BrMCR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_BU]>],
+ [5, 2], // Latency = 1
+ [E5500_CR_Bypass, E5500_CR_Bypass]>,
+ InstrItinData<IIC_BrMCRX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0]>],
+ [5, 2, 2], // Latency = 1
+ [E5500_CR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBA, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBF, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStDCBI, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoad, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLoadUpd, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLoadUpdX,[InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLDARX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<3, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 3
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLDU, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLDUX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStStore, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStStoreUpd,[InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStICBI, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTFD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTFDU, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLFD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [E5500_FPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLFDU, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [E5500_FPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLFDUX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [8, 2, 2], // Latency = 4, Repeat rate = 1
+ [E5500_FPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLHA, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLHAU, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLHAUX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [E5500_GPR_Bypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStLMW, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<4, [E5500_LSU_0]>],
+ [8, 2], // Latency = r+3, Repeat rate = r+3
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStLWARX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<3, [E5500_LSU_0]>],
+ [7, 2, 2], // Latency = 3, Repeat rate = 3
+ [E5500_GPR_Bypass,
+ E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTDCX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSTDU, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStSTDUX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass],
+ 2>, // 2 micro-ops
+ InstrItinData<IIC_LdStSTWCX, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>],
+ [7, 2], // Latency = 3, Repeat rate = 1
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_LdStSync, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0]>]>,
+ InstrItinData<IIC_SprMTMSR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<2, [E5500_CFX_0]>],
+ [6, 2], // Latency = 2, Repeat rate = 4
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_SprTLBSYNC, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_LSU_0], 0>]>,
+ InstrItinData<IIC_SprMFCR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<5, [E5500_CFX_0]>],
+ [9, 2], // Latency = 5, Repeat rate = 5
+ [E5500_GPR_Bypass, E5500_CR_Bypass]>,
+ InstrItinData<IIC_SprMFCRF, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<5, [E5500_CFX_0]>],
+ [9, 2], // Latency = 5, Repeat rate = 5
+ [E5500_GPR_Bypass, E5500_CR_Bypass]>,
+ InstrItinData<IIC_SprMFMSR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<4, [E5500_SFX0]>],
+ [8, 2], // Latency = 4, Repeat rate = 4
+ [E5500_GPR_Bypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMFSPR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_CFX_0]>],
+ [5], // Latency = 1, Repeat rate = 1
+ [E5500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMFTB, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<4, [E5500_CFX_0]>],
+ [8, 2], // Latency = 4, Repeat rate = 4
+ [NoBypass, E5500_GPR_Bypass]>,
+ InstrItinData<IIC_SprMTSPR, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_SFX0, E5500_SFX1]>],
+ [5], // Latency = 1, Repeat rate = 1
+ [E5500_GPR_Bypass]>,
+ InstrItinData<IIC_FPGeneral, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [E5500_FPR_Bypass,
+ E5500_FPR_Bypass, E5500_FPR_Bypass]>,
+ InstrItinData<IIC_FPAddSub, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [E5500_FPR_Bypass,
+ E5500_FPR_Bypass, E5500_FPR_Bypass]>,
+ InstrItinData<IIC_FPCompare, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_FPU_0]>],
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [E5500_CR_Bypass,
+ E5500_FPR_Bypass, E5500_FPR_Bypass]>,
+ InstrItinData<IIC_FPDivD, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<31, [E5500_FPU_0]>],
+ [39, 2, 2], // Latency = 35, Repeat rate = 31
+ [E5500_FPR_Bypass,
+ E5500_FPR_Bypass, E5500_FPR_Bypass]>,
+ InstrItinData<IIC_FPDivS, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<16, [E5500_FPU_0]>],
+ [24, 2, 2], // Latency = 20, Repeat rate = 16
+ [E5500_FPR_Bypass,
+ E5500_FPR_Bypass, E5500_FPR_Bypass]>,
+ InstrItinData<IIC_FPFused, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<1, [E5500_FPU_0]>],
+ [11, 2, 2, 2], // Latency = 7, Repeat rate = 1
+ [E5500_FPR_Bypass,
+ E5500_FPR_Bypass, E5500_FPR_Bypass,
+ E5500_FPR_Bypass]>,
+ InstrItinData<IIC_FPRes, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
+ InstrStage<2, [E5500_FPU_0]>],
+ [12, 2], // Latency = 8, Repeat rate = 2
+ [E5500_FPR_Bypass, E5500_FPR_Bypass]>
]>;
// ===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
index 72a0a39..21efd8f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
@@ -11,61 +11,70 @@
//
//===----------------------------------------------------------------------===//
+def G3_BPU : FuncUnit; // Branch unit
+def G3_SLU : FuncUnit; // Store/load unit
+def G3_SRU : FuncUnit; // special register unit
+def G3_IU1 : FuncUnit; // integer unit 1 (simple)
+def G3_IU2 : FuncUnit; // integer unit 2 (complex)
+def G3_FPU1 : FuncUnit; // floating point unit 1
def G3Itineraries : ProcessorItineraries<
- [IU1, IU2, FPU1, BPU, SRU, SLU], [], [
- InstrItinData<IntSimple , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
- InstrItinData<IntMFFS , [InstrStage<1, [FPU1]>]>,
- InstrItinData<IntMTFSB0 , [InstrStage<3, [FPU1]>]>,
- InstrItinData<IntMulHW , [InstrStage<5, [IU1]>]>,
- InstrItinData<IntMulHWU , [InstrStage<6, [IU1]>]>,
- InstrItinData<IntMulLI , [InstrStage<3, [IU1]>]>,
- InstrItinData<IntRotate , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntShift , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntTrapW , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<BrB , [InstrStage<1, [BPU]>]>,
- InstrItinData<BrCR , [InstrStage<1, [SRU]>]>,
- InstrItinData<BrMCR , [InstrStage<1, [SRU]>]>,
- InstrItinData<BrMCRX , [InstrStage<1, [SRU]>]>,
- InstrItinData<LdStDCBA , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStDCBI , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLoad , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLoadUpd , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStStore , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStStoreUpd, [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStICBI , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTFD , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStSTFDU , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLFD , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLFDU , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLHA , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLHAU , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLMW , [InstrStage<34, [SLU]>]>,
- InstrItinData<LdStLWARX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTWCX , [InstrStage<8, [SLU]>]>,
- InstrItinData<LdStSync , [InstrStage<3, [SLU]>]>,
- InstrItinData<SprISYNC , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprMFSR , [InstrStage<3, [SRU]>]>,
- InstrItinData<SprMTMSR , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMTSR , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprTLBSYNC , [InstrStage<3, [SRU]>]>,
- InstrItinData<SprMFCR , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMFMSR , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMFSPR , [InstrStage<3, [SRU]>]>,
- InstrItinData<SprMFTB , [InstrStage<3, [SRU]>]>,
- InstrItinData<SprMTSPR , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprMTSRIN , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprRFI , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprSC , [InstrStage<2, [SRU]>]>,
- InstrItinData<FPGeneral , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPAddSub , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPCompare , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPDivD , [InstrStage<31, [FPU1]>]>,
- InstrItinData<FPDivS , [InstrStage<17, [FPU1]>]>,
- InstrItinData<FPFused , [InstrStage<2, [FPU1]>]>,
- InstrItinData<FPRes , [InstrStage<10, [FPU1]>]>
+ [G3_IU1, G3_IU2, G3_FPU1, G3_BPU, G3_SRU, G3_SLU], [], [
+ InstrItinData<IIC_IntSimple , [InstrStage<1, [G3_IU1, G3_IU2]>]>,
+ InstrItinData<IIC_IntGeneral , [InstrStage<1, [G3_IU1, G3_IU2]>]>,
+ InstrItinData<IIC_IntCompare , [InstrStage<1, [G3_IU1, G3_IU2]>]>,
+ InstrItinData<IIC_IntDivW , [InstrStage<19, [G3_IU1]>]>,
+ InstrItinData<IIC_IntMFFS , [InstrStage<1, [G3_FPU1]>]>,
+ InstrItinData<IIC_IntMTFSB0 , [InstrStage<3, [G3_FPU1]>]>,
+ InstrItinData<IIC_IntMulHW , [InstrStage<5, [G3_IU1]>]>,
+ InstrItinData<IIC_IntMulHWU , [InstrStage<6, [G3_IU1]>]>,
+ InstrItinData<IIC_IntMulLI , [InstrStage<3, [G3_IU1]>]>,
+ InstrItinData<IIC_IntRotate , [InstrStage<1, [G3_IU1, G3_IU2]>]>,
+ InstrItinData<IIC_IntShift , [InstrStage<1, [G3_IU1, G3_IU2]>]>,
+ InstrItinData<IIC_IntTrapW , [InstrStage<2, [G3_IU1, G3_IU2]>]>,
+ InstrItinData<IIC_BrB , [InstrStage<1, [G3_BPU]>]>,
+ InstrItinData<IIC_BrCR , [InstrStage<1, [G3_SRU]>]>,
+ InstrItinData<IIC_BrMCR , [InstrStage<1, [G3_SRU]>]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<1, [G3_SRU]>]>,
+ InstrItinData<IIC_LdStDCBA , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStDCBF , [InstrStage<3, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStDCBI , [InstrStage<3, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLoad , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStStore , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStStoreUpd, [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStICBI , [InstrStage<3, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStSTFD , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLFD , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLFDU , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLFDUX , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLHA , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<2, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLMW , [InstrStage<34, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStLWARX , [InstrStage<3, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStSTWCX , [InstrStage<8, [G3_SLU]>]>,
+ InstrItinData<IIC_LdStSync , [InstrStage<3, [G3_SLU]>]>,
+ InstrItinData<IIC_SprISYNC , [InstrStage<2, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMFSR , [InstrStage<3, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMTMSR , [InstrStage<1, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMTSR , [InstrStage<2, [G3_SRU]>]>,
+ InstrItinData<IIC_SprTLBSYNC , [InstrStage<3, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMFCR , [InstrStage<1, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMFMSR , [InstrStage<1, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMFSPR , [InstrStage<3, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMFTB , [InstrStage<3, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMTSPR , [InstrStage<2, [G3_SRU]>]>,
+ InstrItinData<IIC_SprMTSRIN , [InstrStage<2, [G3_SRU]>]>,
+ InstrItinData<IIC_SprRFI , [InstrStage<2, [G3_SRU]>]>,
+ InstrItinData<IIC_SprSC , [InstrStage<2, [G3_SRU]>]>,
+ InstrItinData<IIC_FPGeneral , [InstrStage<1, [G3_FPU1]>]>,
+ InstrItinData<IIC_FPAddSub , [InstrStage<1, [G3_FPU1]>]>,
+ InstrItinData<IIC_FPCompare , [InstrStage<1, [G3_FPU1]>]>,
+ InstrItinData<IIC_FPDivD , [InstrStage<31, [G3_FPU1]>]>,
+ InstrItinData<IIC_FPDivS , [InstrStage<17, [G3_FPU1]>]>,
+ InstrItinData<IIC_FPFused , [InstrStage<2, [G3_FPU1]>]>,
+ InstrItinData<IIC_FPRes , [InstrStage<10, [G3_FPU1]>]>
]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
index fc9120d..340773e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
@@ -11,71 +11,86 @@
//
//===----------------------------------------------------------------------===//
+def G4_BPU : FuncUnit; // Branch unit
+def G4_SLU : FuncUnit; // Store/load unit
+def G4_SRU : FuncUnit; // special register unit
+def G4_IU1 : FuncUnit; // integer unit 1 (simple)
+def G4_IU2 : FuncUnit; // integer unit 2 (complex)
+def G4_FPU1 : FuncUnit; // floating point unit 1
+def G4_VPU : FuncUnit; // vector permutation unit
+def G4_VIU1 : FuncUnit; // vector integer unit 1 (simple)
+def G4_VIU2 : FuncUnit; // vector integer unit 2 (complex)
+def G4_VFPU : FuncUnit; // vector floating point unit
+
def G4Itineraries : ProcessorItineraries<
- [IU1, IU2, SLU, SRU, BPU, FPU1, VIU1, VIU2, VPU, VFPU], [], [
- InstrItinData<IntSimple , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
- InstrItinData<IntMFFS , [InstrStage<3, [FPU1]>]>,
- InstrItinData<IntMFVSCR , [InstrStage<1, [VIU1]>]>,
- InstrItinData<IntMTFSB0 , [InstrStage<3, [FPU1]>]>,
- InstrItinData<IntMulHW , [InstrStage<5, [IU1]>]>,
- InstrItinData<IntMulHWU , [InstrStage<6, [IU1]>]>,
- InstrItinData<IntMulLI , [InstrStage<3, [IU1]>]>,
- InstrItinData<IntRotate , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntShift , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntTrapW , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<BrB , [InstrStage<1, [BPU]>]>,
- InstrItinData<BrCR , [InstrStage<1, [SRU]>]>,
- InstrItinData<BrMCR , [InstrStage<1, [SRU]>]>,
- InstrItinData<BrMCRX , [InstrStage<1, [SRU]>]>,
- InstrItinData<LdStDCBF , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStDCBI , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLoad , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLoadUpd , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStStore , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStStoreUpd, [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStDSS , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStICBI , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStSTFD , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStSTFDU , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLFD , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLFDU , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLHA , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLHAU , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLMW , [InstrStage<34, [SLU]>]>,
- InstrItinData<LdStLVecX , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStLWARX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTVEBX , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStSTWCX , [InstrStage<5, [SLU]>]>,
- InstrItinData<LdStSync , [InstrStage<8, [SLU]>]>,
- InstrItinData<SprISYNC , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprMFSR , [InstrStage<3, [SRU]>]>,
- InstrItinData<SprMTMSR , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMTSR , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprTLBSYNC , [InstrStage<8, [SRU]>]>,
- InstrItinData<SprMFCR , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMFMSR , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMFSPR , [InstrStage<3, [SRU]>]>,
- InstrItinData<SprMFTB , [InstrStage<1, [SRU]>]>,
- InstrItinData<SprMTSPR , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprMTSRIN , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprRFI , [InstrStage<2, [SRU]>]>,
- InstrItinData<SprSC , [InstrStage<2, [SRU]>]>,
- InstrItinData<FPGeneral , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPAddSub , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPCompare , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPDivD , [InstrStage<31, [FPU1]>]>,
- InstrItinData<FPDivS , [InstrStage<17, [FPU1]>]>,
- InstrItinData<FPFused , [InstrStage<1, [FPU1]>]>,
- InstrItinData<FPRes , [InstrStage<10, [FPU1]>]>,
- InstrItinData<VecGeneral , [InstrStage<1, [VIU1]>]>,
- InstrItinData<VecFP , [InstrStage<4, [VFPU]>]>,
- InstrItinData<VecFPCompare, [InstrStage<1, [VIU1]>]>,
- InstrItinData<VecComplex , [InstrStage<3, [VIU2]>]>,
- InstrItinData<VecPerm , [InstrStage<1, [VPU]>]>,
- InstrItinData<VecFPRound , [InstrStage<4, [VFPU]>]>,
- InstrItinData<VecVSL , [InstrStage<1, [VIU1]>]>,
- InstrItinData<VecVSR , [InstrStage<1, [VIU1]>]>
+ [G4_IU1, G4_IU2, G4_SLU, G4_SRU, G4_BPU, G4_FPU1,
+ G4_VIU1, G4_VIU2, G4_VPU, G4_VFPU], [], [
+ InstrItinData<IIC_IntSimple , [InstrStage<1, [G4_IU1, G4_IU2]>]>,
+ InstrItinData<IIC_IntGeneral , [InstrStage<1, [G4_IU1, G4_IU2]>]>,
+ InstrItinData<IIC_IntCompare , [InstrStage<1, [G4_IU1, G4_IU2]>]>,
+ InstrItinData<IIC_IntDivW , [InstrStage<19, [G4_IU1]>]>,
+ InstrItinData<IIC_IntMFFS , [InstrStage<3, [G4_FPU1]>]>,
+ InstrItinData<IIC_IntMFVSCR , [InstrStage<1, [G4_VIU1]>]>,
+ InstrItinData<IIC_IntMTFSB0 , [InstrStage<3, [G4_FPU1]>]>,
+ InstrItinData<IIC_IntMulHW , [InstrStage<5, [G4_IU1]>]>,
+ InstrItinData<IIC_IntMulHWU , [InstrStage<6, [G4_IU1]>]>,
+ InstrItinData<IIC_IntMulLI , [InstrStage<3, [G4_IU1]>]>,
+ InstrItinData<IIC_IntRotate , [InstrStage<1, [G4_IU1, G4_IU2]>]>,
+ InstrItinData<IIC_IntShift , [InstrStage<1, [G4_IU1, G4_IU2]>]>,
+ InstrItinData<IIC_IntTrapW , [InstrStage<2, [G4_IU1, G4_IU2]>]>,
+ InstrItinData<IIC_BrB , [InstrStage<1, [G4_BPU]>]>,
+ InstrItinData<IIC_BrCR , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_BrMCR , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_LdStDCBF , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStDCBI , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLoad , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStStore , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStStoreUpd, [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStDSS , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStICBI , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStSTFD , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLFD , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLFDU , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLFDUX , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLHA , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLMW , [InstrStage<34, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLVecX , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStLWARX , [InstrStage<3, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStSTVEBX , [InstrStage<2, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStSTWCX , [InstrStage<5, [G4_SLU]>]>,
+ InstrItinData<IIC_LdStSync , [InstrStage<8, [G4_SLU]>]>,
+ InstrItinData<IIC_SprISYNC , [InstrStage<2, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMFSR , [InstrStage<3, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMTMSR , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMTSR , [InstrStage<2, [G4_SRU]>]>,
+ InstrItinData<IIC_SprTLBSYNC , [InstrStage<8, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMFCR , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMFMSR , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMFSPR , [InstrStage<3, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMFTB , [InstrStage<1, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMTSPR , [InstrStage<2, [G4_SRU]>]>,
+ InstrItinData<IIC_SprMTSRIN , [InstrStage<2, [G4_SRU]>]>,
+ InstrItinData<IIC_SprRFI , [InstrStage<2, [G4_SRU]>]>,
+ InstrItinData<IIC_SprSC , [InstrStage<2, [G4_SRU]>]>,
+ InstrItinData<IIC_FPGeneral , [InstrStage<1, [G4_FPU1]>]>,
+ InstrItinData<IIC_FPAddSub , [InstrStage<1, [G4_FPU1]>]>,
+ InstrItinData<IIC_FPCompare , [InstrStage<1, [G4_FPU1]>]>,
+ InstrItinData<IIC_FPDivD , [InstrStage<31, [G4_FPU1]>]>,
+ InstrItinData<IIC_FPDivS , [InstrStage<17, [G4_FPU1]>]>,
+ InstrItinData<IIC_FPFused , [InstrStage<1, [G4_FPU1]>]>,
+ InstrItinData<IIC_FPRes , [InstrStage<10, [G4_FPU1]>]>,
+ InstrItinData<IIC_VecGeneral , [InstrStage<1, [G4_VIU1]>]>,
+ InstrItinData<IIC_VecFP , [InstrStage<4, [G4_VFPU]>]>,
+ InstrItinData<IIC_VecFPCompare, [InstrStage<1, [G4_VIU1]>]>,
+ InstrItinData<IIC_VecComplex , [InstrStage<3, [G4_VIU2]>]>,
+ InstrItinData<IIC_VecPerm , [InstrStage<1, [G4_VPU]>]>,
+ InstrItinData<IIC_VecFPRound , [InstrStage<4, [G4_VFPU]>]>,
+ InstrItinData<IIC_VecVSL , [InstrStage<1, [G4_VIU1]>]>,
+ InstrItinData<IIC_VecVSR , [InstrStage<1, [G4_VIU1]>]>
]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
index a4e82ce..1d9f13f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
@@ -11,78 +11,102 @@
//
//===----------------------------------------------------------------------===//
-def IU3 : FuncUnit; // integer unit 3 (7450 simple)
-def IU4 : FuncUnit; // integer unit 4 (7450 simple)
+def G4P_BPU : FuncUnit; // Branch unit
+def G4P_SLU : FuncUnit; // Store/load unit
+def G4P_SRU : FuncUnit; // special register unit
+def G4P_IU1 : FuncUnit; // integer unit 1 (simple)
+def G4P_IU2 : FuncUnit; // integer unit 2 (complex)
+def G4P_IU3 : FuncUnit; // integer unit 3 (simple)
+def G4P_IU4 : FuncUnit; // integer unit 4 (simple)
+def G4P_FPU1 : FuncUnit; // floating point unit 1
+def G4P_VPU : FuncUnit; // vector permutation unit
+def G4P_VIU1 : FuncUnit; // vector integer unit 1 (simple)
+def G4P_VIU2 : FuncUnit; // vector integer unit 2 (complex)
+def G4P_VFPU : FuncUnit; // vector floating point unit
def G4PlusItineraries : ProcessorItineraries<
- [IU1, IU2, IU3, IU4, BPU, SLU, FPU1, VFPU, VIU1, VIU2, VPU], [], [
- InstrItinData<IntSimple , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<IntDivW , [InstrStage<23, [IU2]>]>,
- InstrItinData<IntMFFS , [InstrStage<5, [FPU1]>]>,
- InstrItinData<IntMFVSCR , [InstrStage<2, [VFPU]>]>,
- InstrItinData<IntMTFSB0 , [InstrStage<5, [FPU1]>]>,
- InstrItinData<IntMulHW , [InstrStage<4, [IU2]>]>,
- InstrItinData<IntMulHWU , [InstrStage<4, [IU2]>]>,
- InstrItinData<IntMulLI , [InstrStage<3, [IU2]>]>,
- InstrItinData<IntRotate , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<IntShift , [InstrStage<2, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<IntTrapW , [InstrStage<2, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<BrB , [InstrStage<1, [BPU]>]>,
- InstrItinData<BrCR , [InstrStage<2, [IU2]>]>,
- InstrItinData<BrMCR , [InstrStage<2, [IU2]>]>,
- InstrItinData<BrMCRX , [InstrStage<2, [IU2]>]>,
- InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStDCBI , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLoad , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLoadUpd , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStStore , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStStoreUpd, [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStDSS , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStICBI , [InstrStage<3, [IU2]>]>,
- InstrItinData<LdStSTFD , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTFDU , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLFD , [InstrStage<4, [SLU]>]>,
- InstrItinData<LdStLFDU , [InstrStage<4, [SLU]>]>,
- InstrItinData<LdStLHA , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLHAU , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLMW , [InstrStage<37, [SLU]>]>,
- InstrItinData<LdStLVecX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLWA , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLWARX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTD , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTDCX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTDU , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTVEBX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTWCX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSync , [InstrStage<35, [SLU]>]>,
- InstrItinData<SprISYNC , [InstrStage<0, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<SprMFSR , [InstrStage<4, [IU2]>]>,
- InstrItinData<SprMTMSR , [InstrStage<2, [IU2]>]>,
- InstrItinData<SprMTSR , [InstrStage<2, [IU2]>]>,
- InstrItinData<SprTLBSYNC , [InstrStage<3, [SLU]>]>,
- InstrItinData<SprMFCR , [InstrStage<2, [IU2]>]>,
- InstrItinData<SprMFMSR , [InstrStage<3, [IU2]>]>,
- InstrItinData<SprMFSPR , [InstrStage<4, [IU2]>]>,
- InstrItinData<SprMFTB , [InstrStage<5, [IU2]>]>,
- InstrItinData<SprMTSPR , [InstrStage<2, [IU2]>]>,
- InstrItinData<SprMTSRIN , [InstrStage<2, [IU2]>]>,
- InstrItinData<SprRFI , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<SprSC , [InstrStage<0, [IU1, IU2, IU3, IU4]>]>,
- InstrItinData<FPGeneral , [InstrStage<5, [FPU1]>]>,
- InstrItinData<FPAddSub , [InstrStage<5, [FPU1]>]>,
- InstrItinData<FPCompare , [InstrStage<5, [FPU1]>]>,
- InstrItinData<FPDivD , [InstrStage<35, [FPU1]>]>,
- InstrItinData<FPDivS , [InstrStage<21, [FPU1]>]>,
- InstrItinData<FPFused , [InstrStage<5, [FPU1]>]>,
- InstrItinData<FPRes , [InstrStage<14, [FPU1]>]>,
- InstrItinData<VecGeneral , [InstrStage<1, [VIU1]>]>,
- InstrItinData<VecFP , [InstrStage<4, [VFPU]>]>,
- InstrItinData<VecFPCompare, [InstrStage<2, [VFPU]>]>,
- InstrItinData<VecComplex , [InstrStage<4, [VIU2]>]>,
- InstrItinData<VecPerm , [InstrStage<2, [VPU]>]>,
- InstrItinData<VecFPRound , [InstrStage<4, [VIU1]>]>,
- InstrItinData<VecVSL , [InstrStage<2, [VPU]>]>,
- InstrItinData<VecVSR , [InstrStage<2, [VPU]>]>
+ [G4P_IU1, G4P_IU2, G4P_IU3, G4P_IU4, G4P_BPU, G4P_SLU, G4P_FPU1,
+ G4P_VFPU, G4P_VIU1, G4P_VIU2, G4P_VPU], [], [
+ InstrItinData<IIC_IntSimple , [InstrStage<1, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_IntGeneral , [InstrStage<1, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_IntCompare , [InstrStage<1, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_IntDivW , [InstrStage<23, [G4P_IU2]>]>,
+ InstrItinData<IIC_IntMFFS , [InstrStage<5, [G4P_FPU1]>]>,
+ InstrItinData<IIC_IntMFVSCR , [InstrStage<2, [G4P_VFPU]>]>,
+ InstrItinData<IIC_IntMTFSB0 , [InstrStage<5, [G4P_FPU1]>]>,
+ InstrItinData<IIC_IntMulHW , [InstrStage<4, [G4P_IU2]>]>,
+ InstrItinData<IIC_IntMulHWU , [InstrStage<4, [G4P_IU2]>]>,
+ InstrItinData<IIC_IntMulLI , [InstrStage<3, [G4P_IU2]>]>,
+ InstrItinData<IIC_IntRotate , [InstrStage<1, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_IntShift , [InstrStage<2, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_IntTrapW , [InstrStage<2, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_BrB , [InstrStage<1, [G4P_BPU]>]>,
+ InstrItinData<IIC_BrCR , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_BrMCR , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_LdStDCBF , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStDCBI , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLoad , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStStore , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStStoreUpd, [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStDSS , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStICBI , [InstrStage<3, [G4P_IU2]>]>,
+ InstrItinData<IIC_LdStSTFD , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLFD , [InstrStage<4, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLFDU , [InstrStage<4, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLFDUX , [InstrStage<4, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLHA , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLMW , [InstrStage<37, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLVecX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLWA , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStLWARX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTD , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTDCX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTDU , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTDUX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTVEBX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSTWCX , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_LdStSync , [InstrStage<35, [G4P_SLU]>]>,
+ InstrItinData<IIC_SprISYNC , [InstrStage<0, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_SprMFSR , [InstrStage<4, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMTMSR , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMTSR , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprTLBSYNC , [InstrStage<3, [G4P_SLU]>]>,
+ InstrItinData<IIC_SprMFCR , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMFMSR , [InstrStage<3, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMFSPR , [InstrStage<4, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMFTB , [InstrStage<5, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMTSPR , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprMTSRIN , [InstrStage<2, [G4P_IU2]>]>,
+ InstrItinData<IIC_SprRFI , [InstrStage<1, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_SprSC , [InstrStage<0, [G4P_IU1, G4P_IU2,
+ G4P_IU3, G4P_IU4]>]>,
+ InstrItinData<IIC_FPGeneral , [InstrStage<5, [G4P_FPU1]>]>,
+ InstrItinData<IIC_FPAddSub , [InstrStage<5, [G4P_FPU1]>]>,
+ InstrItinData<IIC_FPCompare , [InstrStage<5, [G4P_FPU1]>]>,
+ InstrItinData<IIC_FPDivD , [InstrStage<35, [G4P_FPU1]>]>,
+ InstrItinData<IIC_FPDivS , [InstrStage<21, [G4P_FPU1]>]>,
+ InstrItinData<IIC_FPFused , [InstrStage<5, [G4P_FPU1]>]>,
+ InstrItinData<IIC_FPRes , [InstrStage<14, [G4P_FPU1]>]>,
+ InstrItinData<IIC_VecGeneral , [InstrStage<1, [G4P_VIU1]>]>,
+ InstrItinData<IIC_VecFP , [InstrStage<4, [G4P_VFPU]>]>,
+ InstrItinData<IIC_VecFPCompare, [InstrStage<2, [G4P_VFPU]>]>,
+ InstrItinData<IIC_VecComplex , [InstrStage<4, [G4P_VIU2]>]>,
+ InstrItinData<IIC_VecPerm , [InstrStage<2, [G4P_VPU]>]>,
+ InstrItinData<IIC_VecFPRound , [InstrStage<4, [G4P_VIU1]>]>,
+ InstrItinData<IIC_VecVSL , [InstrStage<2, [G4P_VPU]>]>,
+ InstrItinData<IIC_VecVSR , [InstrStage<2, [G4P_VPU]>]>
]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
index c64998d..a3b73ab 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
@@ -11,90 +11,110 @@
//
//===----------------------------------------------------------------------===//
+def G5_BPU : FuncUnit; // Branch unit
+def G5_SLU : FuncUnit; // Store/load unit
+def G5_SRU : FuncUnit; // special register unit
+def G5_IU1 : FuncUnit; // integer unit 1 (simple)
+def G5_IU2 : FuncUnit; // integer unit 2 (complex)
+def G5_FPU1 : FuncUnit; // floating point unit 1
+def G5_FPU2 : FuncUnit; // floating point unit 2
+def G5_VPU : FuncUnit; // vector permutation unit
+def G5_VIU1 : FuncUnit; // vector integer unit 1 (simple)
+def G5_VIU2 : FuncUnit; // vector integer unit 2 (complex)
+def G5_VFPU : FuncUnit; // vector floating point unit
+
def G5Itineraries : ProcessorItineraries<
- [IU1, IU2, SLU, BPU, FPU1, FPU2, VFPU, VIU1, VIU2, VPU], [], [
- InstrItinData<IntSimple , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<IntGeneral , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<IntCompare , [InstrStage<3, [IU1, IU2]>]>,
- InstrItinData<IntDivD , [InstrStage<68, [IU1]>]>,
- InstrItinData<IntDivW , [InstrStage<36, [IU1]>]>,
- InstrItinData<IntMFFS , [InstrStage<6, [IU2]>]>,
- InstrItinData<IntMFVSCR , [InstrStage<1, [VFPU]>]>,
- InstrItinData<IntMTFSB0 , [InstrStage<6, [FPU1, FPU2]>]>,
- InstrItinData<IntMulHD , [InstrStage<7, [IU1, IU2]>]>,
- InstrItinData<IntMulHW , [InstrStage<5, [IU1, IU2]>]>,
- InstrItinData<IntMulHWU , [InstrStage<5, [IU1, IU2]>]>,
- InstrItinData<IntMulLI , [InstrStage<4, [IU1, IU2]>]>,
- InstrItinData<IntRFID , [InstrStage<1, [IU2]>]>,
- InstrItinData<IntRotateD , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<IntRotateDI , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<IntRotate , [InstrStage<4, [IU1, IU2]>]>,
- InstrItinData<IntShift , [InstrStage<2, [IU1, IU2]>]>,
- InstrItinData<IntTrapD , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<IntTrapW , [InstrStage<1, [IU1, IU2]>]>,
- InstrItinData<BrB , [InstrStage<1, [BPU]>]>,
- InstrItinData<BrCR , [InstrStage<4, [BPU]>]>,
- InstrItinData<BrMCR , [InstrStage<2, [BPU]>]>,
- InstrItinData<BrMCRX , [InstrStage<3, [BPU]>]>,
- InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLoad , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLoadUpd , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStStore , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStStoreUpd, [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStDSS , [InstrStage<10, [SLU]>]>,
- InstrItinData<LdStICBI , [InstrStage<40, [SLU]>]>,
- InstrItinData<LdStSTFD , [InstrStage<4, [SLU]>]>,
- InstrItinData<LdStSTFDU , [InstrStage<4, [SLU]>]>,
- InstrItinData<LdStLD , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLDU , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLDARX , [InstrStage<11, [SLU]>]>,
- InstrItinData<LdStLFD , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLFDU , [InstrStage<5, [SLU]>]>,
- InstrItinData<LdStLHA , [InstrStage<5, [SLU]>]>,
- InstrItinData<LdStLHAU , [InstrStage<5, [SLU]>]>,
- InstrItinData<LdStLMW , [InstrStage<64, [SLU]>]>,
- InstrItinData<LdStLVecX , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStLWA , [InstrStage<5, [SLU]>]>,
- InstrItinData<LdStLWARX , [InstrStage<11, [SLU]>]>,
- InstrItinData<LdStSLBIA , [InstrStage<40, [SLU]>]>, // needs work
- InstrItinData<LdStSLBIE , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStSTD , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTDU , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStSTDCX , [InstrStage<11, [SLU]>]>,
- InstrItinData<LdStSTVEBX , [InstrStage<5, [SLU]>]>,
- InstrItinData<LdStSTWCX , [InstrStage<11, [SLU]>]>,
- InstrItinData<LdStSync , [InstrStage<35, [SLU]>]>,
- InstrItinData<SprISYNC , [InstrStage<40, [SLU]>]>, // needs work
- InstrItinData<SprMFSR , [InstrStage<3, [SLU]>]>,
- InstrItinData<SprMTMSR , [InstrStage<3, [SLU]>]>,
- InstrItinData<SprMTSR , [InstrStage<3, [SLU]>]>,
- InstrItinData<SprTLBSYNC , [InstrStage<3, [SLU]>]>,
- InstrItinData<SprMFCR , [InstrStage<2, [IU2]>]>,
- InstrItinData<SprMFMSR , [InstrStage<3, [IU2]>]>,
- InstrItinData<SprMFSPR , [InstrStage<3, [IU2]>]>,
- InstrItinData<SprMFTB , [InstrStage<10, [IU2]>]>,
- InstrItinData<SprMTSPR , [InstrStage<8, [IU2]>]>,
- InstrItinData<SprSC , [InstrStage<1, [IU2]>]>,
- InstrItinData<FPGeneral , [InstrStage<6, [FPU1, FPU2]>]>,
- InstrItinData<FPAddSub , [InstrStage<6, [FPU1, FPU2]>]>,
- InstrItinData<FPCompare , [InstrStage<8, [FPU1, FPU2]>]>,
- InstrItinData<FPDivD , [InstrStage<33, [FPU1, FPU2]>]>,
- InstrItinData<FPDivS , [InstrStage<33, [FPU1, FPU2]>]>,
- InstrItinData<FPFused , [InstrStage<6, [FPU1, FPU2]>]>,
- InstrItinData<FPRes , [InstrStage<6, [FPU1, FPU2]>]>,
- InstrItinData<FPSqrt , [InstrStage<40, [FPU1, FPU2]>]>,
- InstrItinData<VecGeneral , [InstrStage<2, [VIU1]>]>,
- InstrItinData<VecFP , [InstrStage<8, [VFPU]>]>,
- InstrItinData<VecFPCompare, [InstrStage<2, [VFPU]>]>,
- InstrItinData<VecComplex , [InstrStage<5, [VIU2]>]>,
- InstrItinData<VecPerm , [InstrStage<3, [VPU]>]>,
- InstrItinData<VecFPRound , [InstrStage<8, [VFPU]>]>,
- InstrItinData<VecVSL , [InstrStage<2, [VIU1]>]>,
- InstrItinData<VecVSR , [InstrStage<3, [VPU]>]>
+ [G5_IU1, G5_IU2, G5_SLU, G5_BPU, G5_FPU1, G5_FPU2,
+ G5_VFPU, G5_VIU1, G5_VIU2, G5_VPU], [], [
+ InstrItinData<IIC_IntSimple , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntGeneral , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntCompare , [InstrStage<3, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntDivD , [InstrStage<68, [G5_IU1]>]>,
+ InstrItinData<IIC_IntDivW , [InstrStage<36, [G5_IU1]>]>,
+ InstrItinData<IIC_IntMFFS , [InstrStage<6, [G5_IU2]>]>,
+ InstrItinData<IIC_IntMFVSCR , [InstrStage<1, [G5_VFPU]>]>,
+ InstrItinData<IIC_IntMTFSB0 , [InstrStage<6, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_IntMulHD , [InstrStage<7, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntMulHW , [InstrStage<5, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntMulHWU , [InstrStage<5, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntMulLI , [InstrStage<4, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntRFID , [InstrStage<1, [G5_IU2]>]>,
+ InstrItinData<IIC_IntRotateD , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntRotateDI , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntRotate , [InstrStage<4, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntShift , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntTrapD , [InstrStage<1, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntTrapW , [InstrStage<1, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_BrB , [InstrStage<1, [G5_BPU]>]>,
+ InstrItinData<IIC_BrCR , [InstrStage<4, [G5_BPU]>]>,
+ InstrItinData<IIC_BrMCR , [InstrStage<2, [G5_BPU]>]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<3, [G5_BPU]>]>,
+ InstrItinData<IIC_LdStDCBF , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLoad , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStStore , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStStoreUpd, [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStDSS , [InstrStage<10, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStICBI , [InstrStage<40, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTFD , [InstrStage<4, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<4, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLD , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLDU , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLDUX , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLDARX , [InstrStage<11, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLFD , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLFDU , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLFDUX , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLHA , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLMW , [InstrStage<64, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLVecX , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLWA , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLWARX , [InstrStage<11, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSLBIA , [InstrStage<40, [G5_SLU]>]>, // needs work
+ InstrItinData<IIC_LdStSLBIE , [InstrStage<2, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTD , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTDU , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTDUX , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTDCX , [InstrStage<11, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTVEBX , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTWCX , [InstrStage<11, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSync , [InstrStage<35, [G5_SLU]>]>,
+ InstrItinData<IIC_SprISYNC , [InstrStage<40, [G5_SLU]>]>, // needs work
+ InstrItinData<IIC_SprMFSR , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_SprMTMSR , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_SprMTSR , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_SprTLBSYNC , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_SprMFCR , [InstrStage<2, [G5_IU2]>]>,
+ InstrItinData<IIC_SprMFCRF , [InstrStage<2, [G5_IU2]>]>,
+ InstrItinData<IIC_SprMFMSR , [InstrStage<3, [G5_IU2]>]>,
+ InstrItinData<IIC_SprMFSPR , [InstrStage<3, [G5_IU2]>]>,
+ InstrItinData<IIC_SprMFTB , [InstrStage<10, [G5_IU2]>]>,
+ InstrItinData<IIC_SprMTSPR , [InstrStage<8, [G5_IU2]>]>,
+ InstrItinData<IIC_SprSC , [InstrStage<1, [G5_IU2]>]>,
+ InstrItinData<IIC_FPGeneral , [InstrStage<6, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPAddSub , [InstrStage<6, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPCompare , [InstrStage<8, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPDivD , [InstrStage<33, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPDivS , [InstrStage<33, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPFused , [InstrStage<6, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPRes , [InstrStage<6, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPSqrtD , [InstrStage<40, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_FPSqrtS , [InstrStage<40, [G5_FPU1, G5_FPU2]>]>,
+ InstrItinData<IIC_VecGeneral , [InstrStage<2, [G5_VIU1]>]>,
+ InstrItinData<IIC_VecFP , [InstrStage<8, [G5_VFPU]>]>,
+ InstrItinData<IIC_VecFPCompare, [InstrStage<2, [G5_VFPU]>]>,
+ InstrItinData<IIC_VecComplex , [InstrStage<5, [G5_VIU2]>]>,
+ InstrItinData<IIC_VecPerm , [InstrStage<3, [G5_VPU]>]>,
+ InstrItinData<IIC_VecFPRound , [InstrStage<8, [G5_VFPU]>]>,
+ InstrItinData<IIC_VecVSL , [InstrStage<2, [G5_VIU1]>]>,
+ InstrItinData<IIC_VecVSR , [InstrStage<3, [G5_VPU]>]>
]>;
// ===---------------------------------------------------------------------===//
-// e5500 machine model for scheduling and other instruction cost heuristics.
+// G5 machine model for scheduling and other instruction cost heuristics.
def G5Model : SchedMachineModel {
let IssueWidth = 4; // 4 (non-branch) instructions are dispatched per cycle.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleP7.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleP7.td
new file mode 100644
index 0000000..d3e4269
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleP7.td
@@ -0,0 +1,385 @@
+//===-- PPCScheduleP7.td - PPC P7 Scheduling Definitions ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the POWER7 processor.
+//
+//===----------------------------------------------------------------------===//
+
+// Primary reference:
+// IBM POWER7 multicore server processor
+// B. Sinharoy, et al.
+// IBM J. Res. & Dev. (55) 3. May/June 2011.
+
+// Scheduling for the P7 involves tracking two types of resources:
+// 1. The dispatch bundle slots
+// 2. The functional unit resources
+
+// Dispatch units:
+def P7_DU1 : FuncUnit;
+def P7_DU2 : FuncUnit;
+def P7_DU3 : FuncUnit;
+def P7_DU4 : FuncUnit;
+def P7_DU5 : FuncUnit;
+def P7_DU6 : FuncUnit;
+
+def P7_LS1 : FuncUnit; // Load/Store pipeline 1
+def P7_LS2 : FuncUnit; // Load/Store pipeline 2
+
+def P7_FX1 : FuncUnit; // FX pipeline 1
+def P7_FX2 : FuncUnit; // FX pipeline 2
+
+// VS pipeline 1 (vector integer ops. always here)
+def P7_VS1 : FuncUnit; // VS pipeline 1
+// VS pipeline 2 (128-bit stores and perms. here)
+def P7_VS2 : FuncUnit; // VS pipeline 2
+
+def P7_CRU : FuncUnit; // CR unit (CR logicals and move-from-SPRs)
+def P7_BRU : FuncUnit; // BR unit
+
+// Notes:
+// Each LSU pipeline can also execute FX add and logical instructions.
+// Each LSU pipeline can complete a load or store in one cycle.
+//
+// Each store is broken into two parts, AGEN goes to the LSU while a
+// "data steering" op. goes to the FXU or VSU.
+//
+// FX loads have a two cycle load-to-use latency (so one "bubble" cycle).
+// VSU loads have a three cycle load-to-use latency (so two "bubble" cycle).
+//
+// Frequent FX ops. take only one cycle and results can be used again in the
+// next cycle (there is a self-bypass). Getting results from the other FX
+// pipeline takes an additional cycle.
+//
+// The VSU XS is similar to the POWER6, but with a pipeline length of 2 cycles
+// (instead of 3 cycles on the POWER6). VSU XS handles vector FX-style ops.
+// Dispatch of an instruction to VS1 that uses four single prec. inputs
+// (either to a float or XC op). prevents dispatch in that cycle to VS2 of any
+// floating point instruction.
+//
+// The VSU PM is similar to the POWER6, but with a pipeline length of 3 cycles
+// (instead of 4 cycles on the POWER6). vsel is handled by the PM pipeline
+// (unlike on the POWER6).
+//
+// FMA from the VSUs can forward results in 6 cycles. VS1 XS and vector FP
+// share the same write-back, and have a 5-cycle latency difference, so the
+// IFU/IDU will not dispatch an XS instructon 5 cycles after a vector FP
+// op. has been dispatched to VS1.
+//
+// Three cycles after an L1 cache hit, a dependent VSU instruction can issue.
+//
+// Instruction dispatch groups have (at most) four non-branch instructions, and
+// two branches. Unlike on the POWER4/5, a branch does not automatically
+// end the dispatch group, but a second branch must be the last in the group.
+
+def P7Itineraries : ProcessorItineraries<
+ [P7_DU1, P7_DU2, P7_DU3, P7_DU4, P7_DU5, P7_DU6,
+ P7_LS1, P7_LS2, P7_FX1, P7_FX2, P7_VS1, P7_VS2, P7_CRU, P7_BRU], [], [
+ InstrItinData<IIC_IntSimple , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2,
+ P7_LS1, P7_LS2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntGeneral , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntCompare , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ // FIXME: Add record-form itinerary data.
+ InstrItinData<IIC_IntDivW , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<36, [P7_FX1, P7_FX2]>],
+ [36, 1, 1]>,
+ InstrItinData<IIC_IntDivD , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<68, [P7_FX1, P7_FX2]>],
+ [68, 1, 1]>,
+ InstrItinData<IIC_IntMulHW , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_IntMulHWU , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_IntMulLI , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [4, 1, 1]>,
+ InstrItinData<IIC_IntRotate , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntRotateD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntShift , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_IntTrapW , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1]>,
+ InstrItinData<IIC_IntTrapD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1]>,
+ InstrItinData<IIC_BrB , [InstrStage<1, [P7_DU5, P7_DU6], 0>,
+ InstrStage<1, [P7_BRU]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_BrCR , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_CRU]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_BrMCR , [InstrStage<1, [P7_DU5, P7_DU6], 0>,
+ InstrStage<1, [P7_BRU]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<1, [P7_DU5, P7_DU6], 0>,
+ InstrStage<1, [P7_BRU]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLoad , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [2, 2, 1, 1]>,
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_LdStLDU , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [2, 2, 1, 1]>,
+ InstrItinData<IIC_LdStLDUX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLFD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLVecX , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLFDU , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLFDUX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 3, 1, 1]>,
+ InstrItinData<IIC_LdStLHA , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [4, 4, 1, 1]>,
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [4, 4, 1, 1]>,
+ InstrItinData<IIC_LdStLWA , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLWARX, [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLDARX, [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [3, 1, 1]>,
+ InstrItinData<IIC_LdStLMW , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_LdStStore , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTDU , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_LdStSTDUX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_LdStSTFD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_FX1, P7_FX2], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_LdStSTVEBX , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2], 0>,
+ InstrStage<1, [P7_VS2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTDCX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_LdStSTWCX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_LS1, P7_LS2]>],
+ [1, 1, 1]>,
+ InstrItinData<IIC_BrMCRX , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_DU2], 0>,
+ InstrStage<1, [P7_DU3], 0>,
+ InstrStage<1, [P7_DU4], 0>,
+ InstrStage<1, [P7_CRU]>,
+ InstrStage<1, [P7_FX1, P7_FX2]>],
+ [3, 1]>, // mtcr
+ InstrItinData<IIC_SprMFCR , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_CRU]>],
+ [6, 1]>,
+ InstrItinData<IIC_SprMFCRF , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_CRU]>],
+ [3, 1]>,
+ InstrItinData<IIC_SprMTSPR , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_FX1]>],
+ [4, 1]>, // mtctr
+ InstrItinData<IIC_FPGeneral , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [5, 1, 1]>,
+ InstrItinData<IIC_FPCompare , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [8, 1, 1]>,
+ InstrItinData<IIC_FPDivD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [33, 1, 1]>,
+ InstrItinData<IIC_FPDivS , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [27, 1, 1]>,
+ InstrItinData<IIC_FPSqrtD , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [44, 1, 1]>,
+ InstrItinData<IIC_FPSqrtS , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [32, 1, 1]>,
+ InstrItinData<IIC_FPFused , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [5, 1, 1, 1]>,
+ InstrItinData<IIC_FPRes , [InstrStage<1, [P7_DU1, P7_DU2,
+ P7_DU3, P7_DU4], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [5, 1, 1]>,
+ InstrItinData<IIC_VecGeneral , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VecVSL , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VecVSR , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1]>],
+ [2, 1, 1]>,
+ InstrItinData<IIC_VecFP , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [6, 1, 1]>,
+ InstrItinData<IIC_VecFPCompare, [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [6, 1, 1]>,
+ InstrItinData<IIC_VecFPRound , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1, P7_VS2]>],
+ [6, 1, 1]>,
+ InstrItinData<IIC_VecComplex , [InstrStage<1, [P7_DU1], 0>,
+ InstrStage<1, [P7_VS1]>],
+ [7, 1, 1]>,
+ InstrItinData<IIC_VecPerm , [InstrStage<1, [P7_DU1, P7_DU2], 0>,
+ InstrStage<1, [P7_VS2]>],
+ [3, 1, 1]>
+]>;
+
+// ===---------------------------------------------------------------------===//
+// P7 machine model for scheduling and other instruction cost heuristics.
+
+def P7Model : SchedMachineModel {
+ let IssueWidth = 6; // 4 (non-branch) instructions are dispatched per cycle.
+ // Note that the dispatch bundle size is 6 (including
+ // branches), but the total internal issue bandwidth per
+ // cycle (from all queues) is 8.
+
+ let MinLatency = 0; // Out-of-order dispatch.
+ let LoadLatency = 3; // Optimistic load latency assuming bypass.
+ // This is overriden by OperandCycles if the
+ // Itineraries are queried instead.
+ let MispredictPenalty = 16;
+
+ let Itineraries = P7Itineraries;
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
index d4258b4..dc16742 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
@@ -11,13 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "powerpc-selectiondag-info"
#include "PPCTargetMachine.h"
using namespace llvm;
-PPCSelectionDAGInfo::PPCSelectionDAGInfo(const PPCTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+#define DEBUG_TYPE "powerpc-selectiondag-info"
+
+PPCSelectionDAGInfo::PPCSelectionDAGInfo(const DataLayout *DL)
+ : TargetSelectionDAGInfo(DL) {}
-PPCSelectionDAGInfo::~PPCSelectionDAGInfo() {
-}
+PPCSelectionDAGInfo::~PPCSelectionDAGInfo() {}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
index 341b69c..b2e7f3b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
@@ -22,7 +22,7 @@ class PPCTargetMachine;
class PPCSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit PPCSelectionDAGInfo(const PPCTargetMachine &TM);
+ explicit PPCSelectionDAGInfo(const DataLayout *DL);
~PPCSelectionDAGInfo();
};
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
index 7231ab1..b51512d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -17,28 +17,72 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
#include <cstdlib>
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-subtarget"
+
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "PPCGenSubtargetInfo.inc"
-using namespace llvm;
+/// Return the datalayout string of a subtarget.
+static std::string getDataLayoutString(const PPCSubtarget &ST) {
+ const Triple &T = ST.getTargetTriple();
-PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit)
- : PPCGenSubtargetInfo(TT, CPU, FS)
- , IsPPC64(is64Bit)
- , TargetTriple(TT) {
+ std::string Ret;
+
+ // Most PPC* platforms are big endian, PPC64LE is little endian.
+ if (ST.isLittleEndian())
+ Ret = "e";
+ else
+ Ret = "E";
+
+ Ret += DataLayout::getManglingComponent(T);
+
+ // PPC32 has 32 bit pointers. The PS3 (OS Lv2) is a PPC64 machine with 32 bit
+ // pointers.
+ if (!ST.isPPC64() || T.getOS() == Triple::Lv2)
+ Ret += "-p:32:32";
+
+ // Note, the alignment values for f64 and i64 on ppc64 in Darwin
+ // documentation are wrong; these are correct (i.e. "what gcc does").
+ if (ST.isPPC64() || ST.isSVR4ABI())
+ Ret += "-i64:64";
+ else
+ Ret += "-f64:32:64";
+
+ // PPC64 has 32 and 64 bit registers, PPC32 has only 32 bit ones.
+ if (ST.isPPC64())
+ Ret += "-n32:64";
+ else
+ Ret += "-n32";
+
+ return Ret;
+}
+
+PPCSubtarget &PPCSubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
initializeEnvironment();
resetSubtargetFeatures(CPU, FS);
+ return *this;
}
+PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, PPCTargetMachine &TM,
+ bool is64Bit, CodeGenOpt::Level OptLevel)
+ : PPCGenSubtargetInfo(TT, CPU, FS), IsPPC64(is64Bit), TargetTriple(TT),
+ OptLevel(OptLevel),
+ FrameLowering(initializeSubtargetDependencies(CPU, FS)),
+ DL(getDataLayoutString(*this)), InstrInfo(*this), JITInfo(*this),
+ TLInfo(TM), TSInfo(&DL) {}
+
/// SetJITMode - This is called to inform the subtarget info that we are
/// producing code for the JIT.
void PPCSubtarget::SetJITMode() {
@@ -73,8 +117,10 @@ void PPCSubtarget::initializeEnvironment() {
HasMFOCRF = false;
Has64BitSupport = false;
Use64BitRegs = false;
+ UseCRBits = false;
HasAltivec = false;
HasQPX = false;
+ HasVSX = false;
HasFCPSGN = false;
HasFSQRT = false;
HasFRE = false;
@@ -124,6 +170,14 @@ void PPCSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
FullFS = "+64bit";
}
+ // At -O2 and above, track CR bits as individual registers.
+ if (OptLevel >= CodeGenOpt::Default) {
+ if (!FullFS.empty())
+ FullFS = "+crbits," + FullFS;
+ else
+ FullFS = "+crbits";
+ }
+
// Parse features string.
ParseSubtargetFeatures(CPUName, FullFS);
@@ -144,6 +198,11 @@ void PPCSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// Determine endianness.
IsLittleEndian = (TargetTriple.getArch() == Triple::ppc64le);
+
+ // FIXME: For now, we disable VSX in little-endian mode until endian
+ // issues in those instructions can be addressed.
+ if (IsLittleEndian)
+ HasVSX = false;
}
/// hasLazyResolverStub - Return true if accesses to the specified global have
@@ -163,23 +222,7 @@ bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV,
GV->hasCommonLinkage() || isDecl;
}
-bool PPCSubtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_ALL;
-
- CriticalPathRCs.clear();
-
- if (isPPC64())
- CriticalPathRCs.push_back(&PPC::G8RCRegClass);
- else
- CriticalPathRCs.push_back(&PPC::GPRCRegClass);
-
- return OptLevel >= CodeGenOpt::Default;
-}
-
-// Embedded cores need aggressive scheduling.
+// Embedded cores need aggressive scheduling (and some others also benefit).
static bool needsAggressiveScheduling(unsigned Directive) {
switch (Directive) {
default: return false;
@@ -187,6 +230,8 @@ static bool needsAggressiveScheduling(unsigned Directive) {
case PPC::DIR_A2:
case PPC::DIR_E500mc:
case PPC::DIR_E5500:
+ case PPC::DIR_PWR7:
+ case PPC::DIR_PWR8:
return true;
}
}
@@ -198,6 +243,19 @@ bool PPCSubtarget::enableMachineScheduler() const {
return needsAggressiveScheduling(DarwinDirective);
}
+// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
+bool PPCSubtarget::enablePostMachineScheduler() const { return true; }
+
+PPCGenSubtargetInfo::AntiDepBreakMode PPCSubtarget::getAntiDepBreakMode() const {
+ return TargetSubtargetInfo::ANTIDEP_ALL;
+}
+
+void PPCSubtarget::getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
+ CriticalPathRCs.clear();
+ CriticalPathRCs.push_back(isPPC64() ?
+ &PPC::G8RCRegClass : &PPC::GPRCRegClass);
+}
+
void PPCSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
MachineInstr *begin,
MachineInstr *end,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
index 0b8b1b3..a3cedaf 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
@@ -14,7 +14,13 @@
#ifndef POWERPCSUBTARGET_H
#define POWERPCSUBTARGET_H
+#include "PPCFrameLowering.h"
+#include "PPCInstrInfo.h"
+#include "PPCISelLowering.h"
+#include "PPCJITInfo.h"
+#include "PPCSelectionDAGInfo.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -50,6 +56,7 @@ namespace PPC {
DIR_PWR6,
DIR_PWR6X,
DIR_PWR7,
+ DIR_PWR8,
DIR_64
};
}
@@ -73,6 +80,7 @@ protected:
bool HasMFOCRF;
bool Has64BitSupport;
bool Use64BitRegs;
+ bool UseCRBits;
bool IsPPC64;
bool HasAltivec;
bool HasQPX;
@@ -98,12 +106,23 @@ protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
+ /// OptLevel - What default optimization level we're emitting code for.
+ CodeGenOpt::Level OptLevel;
+
+ PPCFrameLowering FrameLowering;
+ const DataLayout DL;
+ PPCInstrInfo InstrInfo;
+ PPCJITInfo JITInfo;
+ PPCTargetLowering TLInfo;
+ PPCSelectionDAGInfo TSInfo;
+
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
PPCSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit);
+ const std::string &FS, PPCTargetMachine &TM, bool is64Bit,
+ CodeGenOpt::Level OptLevel);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -122,12 +141,23 @@ public:
///
unsigned getDarwinDirective() const { return DarwinDirective; }
- /// getInstrItins - Return the instruction itineraies based on subtarget
+ /// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
+ const PPCFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ PPCJITInfo *getJITInfo() { return &JITInfo; }
+ const PPCTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const PPCSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+
+ /// initializeSubtargetDependencies - Initializes using a CPU and feature string
+ /// so that we can use initializer lists for subtarget initialization.
+ PPCSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
+
/// \brief Reset the features for the PowerPC target.
- virtual void resetSubtargetFeatures(const MachineFunction *MF);
+ void resetSubtargetFeatures(const MachineFunction *MF) override;
private:
void initializeEnvironment();
void resetSubtargetFeatures(StringRef CPU, StringRef FS);
@@ -146,6 +176,10 @@ public:
/// has64BitSupport() returns true.
bool use64BitRegs() const { return Use64BitRegs; }
+ /// useCRBits - Return true if we should store and manipulate i1 values in
+ /// the individual condition register bits.
+ bool useCRBits() const { return UseCRBits; }
+
/// hasLazyResolverStub - Return true if accesses to the specified global have
/// to go through a dyld lazy resolution stub. This means that an extra load
/// is required to get the address of the global.
@@ -172,6 +206,7 @@ public:
bool hasFPCVT() const { return HasFPCVT; }
bool hasAltivec() const { return HasAltivec; }
bool hasQPX() const { return HasQPX; }
+ bool hasVSX() const { return HasVSX; }
bool hasMFOCRF() const { return HasMFOCRF; }
bool hasISEL() const { return HasISEL; }
bool hasPOPCNTD() const { return HasPOPCNTD; }
@@ -184,29 +219,32 @@ public:
/// isDarwin - True if this is any darwin platform.
bool isDarwin() const { return TargetTriple.isMacOSX(); }
- /// isBGP - True if this is a BG/P platform.
- bool isBGP() const { return TargetTriple.getVendor() == Triple::BGP; }
/// isBGQ - True if this is a BG/Q platform.
bool isBGQ() const { return TargetTriple.getVendor() == Triple::BGQ; }
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
-// bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
+ bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isDarwinABI() const { return isDarwin(); }
bool isSVR4ABI() const { return !isDarwin(); }
+ /// FIXME: Should use a command-line option.
+ bool isELFv2ABI() const { return isPPC64() && isSVR4ABI() &&
+ isLittleEndian(); }
- /// enablePostRAScheduler - True at 'More' optimization.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
+ bool enableEarlyIfConversion() const override { return hasISEL(); }
// Scheduling customization.
- bool enableMachineScheduler() const;
+ bool enableMachineScheduler() const override;
+ // This overrides the PostRAScheduler bit in the SchedModel for each CPU.
+ bool enablePostMachineScheduler() const override;
+ AntiDepBreakMode getAntiDepBreakMode() const override;
+ void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
+
void overrideSchedPolicy(MachineSchedPolicy &Policy,
MachineInstr *begin,
MachineInstr *end,
- unsigned NumRegionInstrs) const;
- bool useAA() const;
+ unsigned NumRegionInstrs) const override;
+ bool useAA() const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index d6767d5..9563b90 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -26,6 +26,10 @@ static cl::
opt<bool> DisableCTRLoops("disable-ppc-ctrloops", cl::Hidden,
cl::desc("Disable CTR loops for PPC"));
+static cl::opt<bool>
+VSXFMAMutateEarly("schedule-ppc-vsx-fma-mutation-early",
+ cl::Hidden, cl::desc("Schedule VSX FMA instruction mutation early"));
+
extern "C" void LLVMInitializePowerPCTarget() {
// Register the targets
RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
@@ -33,59 +37,12 @@ extern "C" void LLVMInitializePowerPCTarget() {
RegisterTargetMachine<PPC64TargetMachine> C(ThePPC64LETarget);
}
-/// Return the datalayout string of a subtarget.
-static std::string getDataLayoutString(const PPCSubtarget &ST) {
- const Triple &T = ST.getTargetTriple();
-
- // PPC is big endian
- std::string Ret = "E";
-
- // PPC64 has 64 bit pointers, PPC32 has 32 bit pointers.
- if (ST.isPPC64())
- Ret += "-p:64:64";
- else
- Ret += "-p:32:32";
-
- // Note, the alignment values for f64 and i64 on ppc64 in Darwin
- // documentation are wrong; these are correct (i.e. "what gcc does").
- if (ST.isPPC64() || ST.isSVR4ABI())
- Ret += "-f64:64:64-i64:64:64";
- else
- Ret += "-f64:32:64";
-
- // Set support for 128 floats depending on the ABI.
- if (!ST.isPPC64() && ST.isSVR4ABI())
- Ret += "-f128:64:128";
-
- // Some ABIs support 128 bit vectors.
- if (ST.isPPC64() && ST.isSVR4ABI())
- Ret += "-v128:128:128";
-
- // PPC64 has 32 and 64 bit register, PPC32 has only 32 bit ones.
- if (ST.isPPC64())
- Ret += "-n32:64";
- else
- Ret += "-n32";
-
- return Ret;
-}
-
-PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
+PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool is64Bit)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, is64Bit),
- DL(getDataLayoutString(Subtarget)), InstrInfo(*this),
- FrameLowering(Subtarget), JITInfo(*this, is64Bit),
- TLInfo(*this), TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
-
- // The binutils for the BG/P are too old for CFI.
- if (Subtarget.isBGP())
- setMCUseCFI(false);
+ CodeGenOpt::Level OL, bool is64Bit)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this, is64Bit, OL) {
initAsmInfo();
}
@@ -129,11 +86,12 @@ public:
return *getPPCTargetMachine().getSubtargetImpl();
}
- virtual bool addPreISel();
- virtual bool addILPOpts();
- virtual bool addInstSelector();
- virtual bool addPreSched2();
- virtual bool addPreEmitPass();
+ bool addPreISel() override;
+ bool addILPOpts() override;
+ bool addInstSelector() override;
+ bool addPreRegAlloc() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // namespace
@@ -149,12 +107,8 @@ bool PPCPassConfig::addPreISel() {
}
bool PPCPassConfig::addILPOpts() {
- if (getPPCSubtarget().hasISEL()) {
- addPass(&EarlyIfConverterID);
- return true;
- }
-
- return false;
+ addPass(&EarlyIfConverterID);
+ return true;
}
bool PPCPassConfig::addInstSelector() {
@@ -166,10 +120,20 @@ bool PPCPassConfig::addInstSelector() {
addPass(createPPCCTRLoopsVerify());
#endif
+ addPass(createPPCVSXCopyPass());
+ return false;
+}
+
+bool PPCPassConfig::addPreRegAlloc() {
+ initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
+ insertPass(VSXFMAMutateEarly ? &RegisterCoalescerID : &MachineSchedulerID,
+ &PPCVSXFMAMutateID);
return false;
}
bool PPCPassConfig::addPreSched2() {
+ addPass(createPPCVSXCopyCleanupPass());
+
if (getOptLevel() != CodeGenOpt::None)
addPass(&IfConverterID);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
index 606ccb3..4c7029c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
@@ -14,11 +14,7 @@
#ifndef PPC_TARGETMACHINE_H
#define PPC_TARGETMACHINE_H
-#include "PPCFrameLowering.h"
-#include "PPCISelLowering.h"
#include "PPCInstrInfo.h"
-#include "PPCJITInfo.h"
-#include "PPCSelectionDAGInfo.h"
#include "PPCSubtarget.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
@@ -29,13 +25,6 @@ namespace llvm {
///
class PPCTargetMachine : public LLVMTargetMachine {
PPCSubtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- PPCInstrInfo InstrInfo;
- PPCFrameLowering FrameLowering;
- PPCJITInfo JITInfo;
- PPCTargetLowering TLInfo;
- PPCSelectionDAGInfo TSInfo;
- InstrItineraryData InstrItins;
public:
PPCTargetMachine(const Target &T, StringRef TT,
@@ -43,34 +32,38 @@ public:
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool is64Bit);
- virtual const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const PPCFrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const PPCInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual PPCJITInfo *getJITInfo() { return &JITInfo; }
- virtual const PPCTargetLowering *getTargetLowering() const {
- return &TLInfo;
+ const PPCFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const PPCSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
+ PPCJITInfo *getJITInfo() override { return Subtarget.getJITInfo(); }
+ const PPCTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
- virtual const PPCRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
+ const PPCSelectionDAGInfo* getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+ const PPCRegisterInfo *getRegisterInfo() const override {
+ return &getInstrInfo()->getRegisterInfo();
}
- virtual const DataLayout *getDataLayout() const { return &DL; }
- virtual const PPCSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const InstrItineraryData *getInstrItineraryData() const {
- return &InstrItins;
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
+ }
+ const PPCSubtarget *getSubtargetImpl() const override { return &Subtarget; }
+ const InstrItineraryData *getInstrItineraryData() const override {
+ return &getSubtargetImpl()->getInstrItineraryData();
}
// Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
- virtual bool addCodeEmitter(PassManagerBase &PM,
- JITCodeEmitter &JCE);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+ bool addCodeEmitter(PassManagerBase &PM,
+ JITCodeEmitter &JCE) override;
/// \brief Register PPC analysis passes with a pass manager.
- virtual void addAnalysisPasses(PassManagerBase &PM);
+ void addAnalysisPasses(PassManagerBase &PM) override;
};
/// PPC32TargetMachine - PowerPC 32-bit target machine.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.cpp
index ec1e606..2903cc1 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.cpp
@@ -8,10 +8,10 @@
//===----------------------------------------------------------------------===//
#include "PPCTargetObjectFile.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
@@ -22,16 +22,9 @@ Initialize(MCContext &Ctx, const TargetMachine &TM) {
InitializeELF(TM.Options.UseInitArray);
}
-const MCSection * PPC64LinuxTargetObjectFile::
-SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
-
- const MCSection *DefaultSection =
- TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang, TM);
-
- if (DefaultSection != ReadOnlySection)
- return DefaultSection;
-
+const MCSection *PPC64LinuxTargetObjectFile::SelectSectionForGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
// Here override ReadOnlySection to DataRelROSection for PPC64 SVR4 ABI
// when we have a constant that contains global relocations. This is
// necessary because of this ABI's handling of pointers to functions in
@@ -46,14 +39,17 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// linker, so we must use DataRelROSection instead of ReadOnlySection.
// For more information, see the description of ELIMINATE_COPY_RELOCS in
// GNU ld.
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ if (Kind.isReadOnly()) {
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
- if (GVar && GVar->isConstant() &&
- (GVar->getInitializer()->getRelocationInfo() ==
- Constant::GlobalRelocations))
- return DataRelROSection;
+ if (GVar && GVar->isConstant() &&
+ (GVar->getInitializer()->getRelocationInfo() ==
+ Constant::GlobalRelocations))
+ Kind = SectionKind::getReadOnlyWithRel();
+ }
- return DefaultSection;
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind,
+ Mang, TM);
}
const MCExpr *PPC64LinuxTargetObjectFile::
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h b/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h
index 262c522..3e71bbc 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h
@@ -20,14 +20,14 @@ namespace llvm {
/// 64-bit PowerPC Linux.
class PPC64LinuxTargetObjectFile : public TargetLoweringObjectFileELF {
- virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
- virtual const MCSection *
- SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const;
+ const MCSection *SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const override;
/// \brief Describe a TLS variable address within debug info.
- virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
+ const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetStreamer.h b/contrib/llvm/lib/Target/PowerPC/PPCTargetStreamer.h
index e876be1..73fb691 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetStreamer.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetStreamer.h
@@ -15,8 +15,12 @@
namespace llvm {
class PPCTargetStreamer : public MCTargetStreamer {
public:
+ PPCTargetStreamer(MCStreamer &S);
virtual ~PPCTargetStreamer();
virtual void emitTCEntry(const MCSymbol &S) = 0;
+ virtual void emitMachine(StringRef CPU) = 0;
+ virtual void emitAbiVersion(int AbiVersion) = 0;
+ virtual void emitLocalEntry(MCSymbol *S, const MCExpr *LocalOffset) = 0;
};
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 8879630..007901b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -14,17 +14,22 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ppctti"
#include "PPC.h"
#include "PPCTargetMachine.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
+#define DEBUG_TYPE "ppctti"
+
+static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
+cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
+
// Declare the pass initialization routine locally as target-specific passes
-// don't havve a target-wide initialization entry point, and so we rely on the
+// don't have a target-wide initialization entry point, and so we rely on the
// pass constructor initialization.
namespace llvm {
void initializePPCTTIPass(PassRegistry &);
@@ -32,35 +37,26 @@ void initializePPCTTIPass(PassRegistry &);
namespace {
-class PPCTTI : public ImmutablePass, public TargetTransformInfo {
- const PPCTargetMachine *TM;
+class PPCTTI final : public ImmutablePass, public TargetTransformInfo {
const PPCSubtarget *ST;
const PPCTargetLowering *TLI;
- /// Estimate the overhead of scalarizing an instruction. Insert and Extract
- /// are set if the result needs to be inserted and/or extracted from vectors.
- unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
-
public:
- PPCTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ PPCTTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
PPCTTI(const PPCTargetMachine *TM)
- : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
+ : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
TLI(TM->getTargetLowering()) {
initializePPCTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ virtual void initializePass() override {
pushTTIStack(this);
}
- virtual void finalizePass() {
- popTTIStack();
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
@@ -68,7 +64,7 @@ public:
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ virtual void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
@@ -76,31 +72,40 @@ public:
/// \name Scalar TTI Implementations
/// @{
- virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
- virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
+
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override;
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override;
+
+ virtual PopcntSupportKind
+ getPopcntSupport(unsigned TyWidth) const override;
+ virtual void getUnrollingPreferences(
+ Loop *L, UnrollingPreferences &UP) const override;
/// @}
/// \name Vector TTI Implementations
/// @{
- virtual unsigned getNumberOfRegisters(bool Vector) const;
- virtual unsigned getRegisterBitWidth(bool Vector) const;
- virtual unsigned getMaximumUnrollFactor() const;
+ virtual unsigned getNumberOfRegisters(bool Vector) const override;
+ virtual unsigned getRegisterBitWidth(bool Vector) const override;
+ virtual unsigned getMaximumUnrollFactor() const override;
virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
OperandValueKind,
- OperandValueKind) const;
+ OperandValueKind) const override;
virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const;
+ int Index, Type *SubTp) const override;
virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const;
+ Type *Src) const override;
virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const;
+ Type *CondTy) const override;
virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const;
+ unsigned Index) const override;
virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
- unsigned AddressSpace) const;
+ unsigned AddressSpace) const override;
/// @}
};
@@ -130,6 +135,142 @@ PPCTTI::PopcntSupportKind PPCTTI::getPopcntSupport(unsigned TyWidth) const {
return PSK_Software;
}
+unsigned PPCTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+ if (DisablePPCConstHoist)
+ return TargetTransformInfo::getIntImmCost(Imm, Ty);
+
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0)
+ return ~0U;
+
+ if (Imm == 0)
+ return TCC_Free;
+
+ if (Imm.getBitWidth() <= 64) {
+ if (isInt<16>(Imm.getSExtValue()))
+ return TCC_Basic;
+
+ if (isInt<32>(Imm.getSExtValue())) {
+ // A constant that can be materialized using lis.
+ if ((Imm.getZExtValue() & 0xFFFF) == 0)
+ return TCC_Basic;
+
+ return 2 * TCC_Basic;
+ }
+ }
+
+ return 4 * TCC_Basic;
+}
+
+unsigned PPCTTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) const {
+ if (DisablePPCConstHoist)
+ return TargetTransformInfo::getIntImmCost(IID, Idx, Imm, Ty);
+
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0)
+ return ~0U;
+
+ switch (IID) {
+ default: return TCC_Free;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
+ return TCC_Free;
+ break;
+ }
+ return PPCTTI::getIntImmCost(Imm, Ty);
+}
+
+unsigned PPCTTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) const {
+ if (DisablePPCConstHoist)
+ return TargetTransformInfo::getIntImmCost(Opcode, Idx, Imm, Ty);
+
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0)
+ return ~0U;
+
+ unsigned ImmIdx = ~0U;
+ bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
+ ZeroFree = false;
+ switch (Opcode) {
+ default: return TCC_Free;
+ case Instruction::GetElementPtr:
+ // Always hoist the base address of a GetElementPtr. This prevents the
+ // creation of new constants for every base constant that gets constant
+ // folded with the offset.
+ if (Idx == 0)
+ return 2 * TCC_Basic;
+ return TCC_Free;
+ case Instruction::And:
+ RunFree = true; // (for the rotate-and-mask instructions)
+ // Fallthrough...
+ case Instruction::Add:
+ case Instruction::Or:
+ case Instruction::Xor:
+ ShiftedFree = true;
+ // Fallthrough...
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ ImmIdx = 1;
+ break;
+ case Instruction::ICmp:
+ UnsignedFree = true;
+ ImmIdx = 1;
+ // Fallthrough... (zero comparisons can use record-form instructions)
+ case Instruction::Select:
+ ZeroFree = true;
+ break;
+ case Instruction::PHI:
+ case Instruction::Call:
+ case Instruction::Ret:
+ case Instruction::Load:
+ case Instruction::Store:
+ break;
+ }
+
+ if (ZeroFree && Imm == 0)
+ return TCC_Free;
+
+ if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
+ if (isInt<16>(Imm.getSExtValue()))
+ return TCC_Free;
+
+ if (RunFree) {
+ if (Imm.getBitWidth() <= 32 &&
+ (isShiftedMask_32(Imm.getZExtValue()) ||
+ isShiftedMask_32(~Imm.getZExtValue())))
+ return TCC_Free;
+
+
+ if (ST->isPPC64() &&
+ (isShiftedMask_64(Imm.getZExtValue()) ||
+ isShiftedMask_64(~Imm.getZExtValue())))
+ return TCC_Free;
+ }
+
+ if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
+ return TCC_Free;
+
+ if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
+ return TCC_Free;
+ }
+
+ return PPCTTI::getIntImmCost(Imm, Ty);
+}
+
void PPCTTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
if (ST->getDarwinDirective() == PPC::DIR_A2) {
// The A2 is in-order with a deep pipeline, and concatenation unrolling
@@ -141,7 +282,7 @@ void PPCTTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
unsigned PPCTTI::getNumberOfRegisters(bool Vector) const {
if (Vector && !ST->hasAltivec())
return 0;
- return 32;
+ return ST->hasVSX() ? 64 : 32;
}
unsigned PPCTTI::getRegisterBitWidth(bool Vector) const {
@@ -210,11 +351,21 @@ unsigned PPCTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
+ // Double-precision scalars are already located in index #0.
+ if (Index == 0)
+ return 0;
+
+ return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
+ }
+
// Estimated cost of a load-hit-store delay. This was obtained
// experimentally as a minimum needed to prevent unprofitable
// vectorization for the paq8p benchmark. It may need to be
// raised further if other unprofitable cases remain.
- unsigned LHSPenalty = 12;
+ unsigned LHSPenalty = 2;
+ if (ISD == ISD::INSERT_VECTOR_ELT)
+ LHSPenalty += 7;
// Vector element insert/extract with Altivec is very expensive,
// because they require store and reload with the attendant
@@ -235,14 +386,34 @@ unsigned PPCTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
"Invalid Opcode");
- // Each load/store unit costs 1.
- unsigned Cost = LT.first * 1;
+ unsigned Cost =
+ TargetTransformInfo::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
+
+ // VSX loads/stores support unaligned access.
+ if (ST->hasVSX()) {
+ if (LT.second == MVT::v2f64 || LT.second == MVT::v2i64)
+ return Cost;
+ }
+
+ bool UnalignedAltivec =
+ Src->isVectorTy() &&
+ Src->getPrimitiveSizeInBits() >= LT.second.getSizeInBits() &&
+ LT.second.getSizeInBits() == 128 &&
+ Opcode == Instruction::Load;
// PPC in general does not support unaligned loads and stores. They'll need
// to be decomposed based on the alignment factor.
unsigned SrcBytes = LT.second.getStoreSize();
- if (SrcBytes && Alignment && Alignment < SrcBytes)
- Cost *= (SrcBytes/Alignment);
+ if (SrcBytes && Alignment && Alignment < SrcBytes && !UnalignedAltivec) {
+ Cost += LT.first*(SrcBytes/Alignment-1);
+
+ // For a vector type, there is also scalarization overhead (only for
+ // stores, loads are expanded using the vector-load + permutation sequence,
+ // which is much less expensive).
+ if (Src->isVectorTy() && Opcode == Instruction::Store)
+ for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
+ Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
+ }
return Cost;
}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPU.h b/contrib/llvm/lib/Target/R600/AMDGPU.h
index 025b28e..d7e94f7 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPU.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPU.h
@@ -17,6 +17,7 @@
namespace llvm {
class AMDGPUInstrPrinter;
+class AMDGPUSubtarget;
class AMDGPUTargetMachine;
class FunctionPass;
class MCAsmInfo;
@@ -28,31 +29,49 @@ class TargetMachine;
FunctionPass *createR600VectorRegMerger(TargetMachine &tm);
FunctionPass *createR600TextureIntrinsicsReplacer();
FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm);
-FunctionPass *createR600EmitClauseMarkers(TargetMachine &tm);
+FunctionPass *createR600EmitClauseMarkers();
FunctionPass *createR600ClauseMergePass(TargetMachine &tm);
FunctionPass *createR600Packetizer(TargetMachine &tm);
FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm);
-FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm);
+FunctionPass *createAMDGPUCFGStructurizerPass();
// SI Passes
FunctionPass *createSITypeRewriter();
FunctionPass *createSIAnnotateControlFlowPass();
+FunctionPass *createSILowerI1CopiesPass();
+FunctionPass *createSIShrinkInstructionsPass();
FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
+FunctionPass *createSIFixSGPRLiveRangesPass();
FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
FunctionPass *createSIInsertWaits(TargetMachine &tm);
+void initializeSILowerI1CopiesPass(PassRegistry &);
+extern char &SILowerI1CopiesID;
+
// Passes common to R600 and SI
+FunctionPass *createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST);
Pass *createAMDGPUStructurizeCFGPass();
-FunctionPass *createAMDGPUConvertToISAPass(TargetMachine &tm);
FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
/// \brief Creates an AMDGPU-specific Target Transformation Info pass.
ImmutablePass *
createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM);
+void initializeSIFixSGPRLiveRangesPass(PassRegistry&);
+extern char &SIFixSGPRLiveRangesID;
+
+
extern Target TheAMDGPUTarget;
+namespace AMDGPU {
+enum TargetIndex {
+ TI_CONSTDATA_START
+};
+}
+
+#define END_OF_TEXT_LABEL_NAME "EndOfTextLabel"
+
} // End namespace llvm
namespace ShaderType {
@@ -68,7 +87,7 @@ namespace ShaderType {
/// various memory regions on the hardware. On the CPU
/// all of the address spaces point to the same memory,
/// however on the GPU, each address space points to
-/// a seperate piece of memory that is unique from other
+/// a separate piece of memory that is unique from other
/// memory locations.
namespace AMDGPUAS {
enum AddressSpaces {
@@ -76,8 +95,8 @@ enum AddressSpaces {
GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0).
CONSTANT_ADDRESS = 2, ///< Address space for constant memory
LOCAL_ADDRESS = 3, ///< Address space for local memory.
- REGION_ADDRESS = 4, ///< Address space for region memory.
- ADDRESS_NONE = 5, ///< Address space for unknown memory.
+ FLAT_ADDRESS = 4, ///< Address space for flat memory.
+ REGION_ADDRESS = 5, ///< Address space for region memory.
PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0)
PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1)
@@ -102,7 +121,8 @@ enum AddressSpaces {
CONSTANT_BUFFER_13 = 21,
CONSTANT_BUFFER_14 = 22,
CONSTANT_BUFFER_15 = 23,
- LAST_ADDRESS = 24
+ ADDRESS_NONE = 24, ///< Address space for unknown memory.
+ LAST_ADDRESS = ADDRESS_NONE
};
} // namespace AMDGPUAS
diff --git a/contrib/llvm/lib/Target/R600/AMDGPU.td b/contrib/llvm/lib/Target/R600/AMDGPU.td
index 182235b..5645f1a 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPU.td
+++ b/contrib/llvm/lib/Target/R600/AMDGPU.td
@@ -7,8 +7,7 @@
//
//==-----------------------------------------------------------------------===//
-// Include AMDIL TD files
-include "AMDILBase.td"
+include "llvm/Target/Target.td"
//===----------------------------------------------------------------------===//
// Subtarget Features
@@ -26,6 +25,11 @@ def FeatureIRStructurizer : SubtargetFeature <"disable-irstructurizer",
"false",
"Disable IR Structurizer">;
+def FeaturePromoteAlloca : SubtargetFeature <"promote-alloca",
+ "EnablePromoteAlloca",
+ "true",
+ "Enable promote alloca pass">;
+
// Target features
def FeatureIfCvt : SubtargetFeature <"disable-ifcvt",
@@ -33,36 +37,50 @@ def FeatureIfCvt : SubtargetFeature <"disable-ifcvt",
"false",
"Disable the if conversion pass">;
-def FeatureFP64 : SubtargetFeature<"fp64",
+def FeatureFP64 : SubtargetFeature<"fp64",
"FP64",
"true",
- "Enable 64bit double precision operations">;
+ "Enable double precision operations">;
+
+def FeatureFP64Denormals : SubtargetFeature<"fp64-denormals",
+ "FP64Denormals",
+ "true",
+ "Enable double precision denormal handling",
+ [FeatureFP64]>;
+
+// Some instructions do not support denormals despite this flag. Using
+// fp32 denormals also causes instructions to run at the double
+// precision rate for the device.
+def FeatureFP32Denormals : SubtargetFeature<"fp32-denormals",
+ "FP32Denormals",
+ "true",
+ "Enable single precision denormal handling">;
def Feature64BitPtr : SubtargetFeature<"64BitPtr",
"Is64bit",
"true",
- "Specify if 64bit addressing should be used.">;
-
-def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
- "Is32on64bit",
- "false",
- "Specify if 64bit sized pointers with 32bit addressing should be used.">;
+ "Specify if 64-bit addressing should be used">;
def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
"R600ALUInst",
"false",
- "Older version of ALU instructions encoding.">;
+ "Older version of ALU instructions encoding">;
def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
"HasVertexCache",
"true",
- "Specify use of dedicated vertex cache.">;
+ "Specify use of dedicated vertex cache">;
def FeatureCaymanISA : SubtargetFeature<"caymanISA",
"CaymanISA",
"true",
"Use Cayman ISA">;
+def FeatureCFALUBug : SubtargetFeature<"cfalubug",
+ "CFALUBug",
+ "true",
+ "GPU has CF_ALU bug">;
+
class SubtargetFeatureFetchLimit <string Value> :
SubtargetFeature <"fetch"#Value,
"TexVTXClauseSize",
@@ -72,47 +90,76 @@ class SubtargetFeatureFetchLimit <string Value> :
def FeatureFetchLimit8 : SubtargetFeatureFetchLimit <"8">;
def FeatureFetchLimit16 : SubtargetFeatureFetchLimit <"16">;
+class SubtargetFeatureWavefrontSize <int Value> : SubtargetFeature<
+ "wavefrontsize"#Value,
+ "WavefrontSize",
+ !cast<string>(Value),
+ "The number of threads per wavefront">;
+
+def FeatureWavefrontSize16 : SubtargetFeatureWavefrontSize<16>;
+def FeatureWavefrontSize32 : SubtargetFeatureWavefrontSize<32>;
+def FeatureWavefrontSize64 : SubtargetFeatureWavefrontSize<64>;
+
+class SubtargetFeatureLocalMemorySize <int Value> : SubtargetFeature<
+ "localmemorysize"#Value,
+ "LocalMemorySize",
+ !cast<string>(Value),
+ "The size of local memory in bytes">;
+
class SubtargetFeatureGeneration <string Value,
list<SubtargetFeature> Implies> :
SubtargetFeature <Value, "Gen", "AMDGPUSubtarget::"#Value,
Value#" GPU generation", Implies>;
+def FeatureLocalMemorySize0 : SubtargetFeatureLocalMemorySize<0>;
+def FeatureLocalMemorySize32768 : SubtargetFeatureLocalMemorySize<32768>;
+def FeatureLocalMemorySize65536 : SubtargetFeatureLocalMemorySize<65536>;
+
def FeatureR600 : SubtargetFeatureGeneration<"R600",
- [FeatureR600ALUInst, FeatureFetchLimit8]>;
+ [FeatureR600ALUInst, FeatureFetchLimit8, FeatureLocalMemorySize0]>;
def FeatureR700 : SubtargetFeatureGeneration<"R700",
- [FeatureFetchLimit16]>;
+ [FeatureFetchLimit16, FeatureLocalMemorySize0]>;
def FeatureEvergreen : SubtargetFeatureGeneration<"EVERGREEN",
- [FeatureFetchLimit16]>;
+ [FeatureFetchLimit16, FeatureLocalMemorySize32768]>;
def FeatureNorthernIslands : SubtargetFeatureGeneration<"NORTHERN_ISLANDS",
- [FeatureFetchLimit16]>;
+ [FeatureFetchLimit16, FeatureWavefrontSize64,
+ FeatureLocalMemorySize32768]
+>;
def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS",
- [Feature64BitPtr, FeatureFP64]>;
+ [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize32768,
+ FeatureWavefrontSize64]>;
def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
- [Feature64BitPtr, FeatureFP64]>;
+ [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
+ FeatureWavefrontSize64]>;
//===----------------------------------------------------------------------===//
def AMDGPUInstrInfo : InstrInfo {
let guessInstructionProperties = 1;
}
-//===----------------------------------------------------------------------===//
-// Declare the target which we are implementing
-//===----------------------------------------------------------------------===//
-def AMDGPUAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- int Variant = 0;
- bit isMCAsmWriter = 1;
-}
-
def AMDGPU : Target {
// Pull in Instruction Info:
let InstructionSet = AMDGPUInstrInfo;
- let AssemblyWriters = [AMDGPUAsmWriter];
+}
+
+// Dummy Instruction itineraries for pseudo instructions
+def ALU_NULL : FuncUnit;
+def NullALU : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Predicate helper class
+//===----------------------------------------------------------------------===//
+
+class PredicateControl {
+ Predicate SubtargetPredicate;
+ list<Predicate> OtherPredicates = [];
+ list<Predicate> Predicates = !listconcat([SubtargetPredicate],
+ OtherPredicates);
}
// Include AMDGPU TD files
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.cpp b/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.cpp
index 67bdba2..73faaa1 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.cpp
@@ -16,15 +16,16 @@
//===----------------------------------------------------------------------===//
//
-
#include "AMDGPUAsmPrinter.h"
#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
#include "R600Defines.h"
#include "R600MachineFunctionInfo.h"
#include "R600RegisterInfo.h"
#include "SIDefines.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
@@ -35,6 +36,41 @@
using namespace llvm;
+// TODO: This should get the default rounding mode from the kernel. We just set
+// the default here, but this could change if the OpenCL rounding mode pragmas
+// are used.
+//
+// The denormal mode here should match what is reported by the OpenCL runtime
+// for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
+// can also be override to flush with the -cl-denorms-are-zero compiler flag.
+//
+// AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
+// precision, and leaves single precision to flush all and does not report
+// CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
+// CL_FP_DENORM for both.
+//
+// FIXME: It seems some instructions do not support single precision denormals
+// regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
+// and sin_f32, cos_f32 on most parts).
+
+// We want to use these instructions, and using fp32 denormals also causes
+// instructions to run at the double precision rate for the device so it's
+// probably best to just report no single precision denormals.
+static uint32_t getFPMode(const MachineFunction &F) {
+ const AMDGPUSubtarget& ST = F.getTarget().getSubtarget<AMDGPUSubtarget>();
+ // TODO: Is there any real use for the flush in only / flush out only modes?
+
+ uint32_t FP32Denormals =
+ ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
+
+ uint32_t FP64Denormals =
+ ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
+
+ return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
+ FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
+ FP_DENORM_MODE_SP(FP32Denormals) |
+ FP_DENORM_MODE_DP(FP64Denormals);
+}
static AsmPrinter *createAMDGPUAsmPrinterPass(TargetMachine &tm,
MCStreamer &Streamer) {
@@ -46,28 +82,36 @@ extern "C" void LLVMInitializeR600AsmPrinter() {
}
AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer)
-{
- DisasmEnabled = TM.getSubtarget<AMDGPUSubtarget>().dumpCode() &&
- ! Streamer.hasRawTextSupport();
+ : AsmPrinter(TM, Streamer) {
+ DisasmEnabled = TM.getSubtarget<AMDGPUSubtarget>().dumpCode();
+}
+
+void AMDGPUAsmPrinter::EmitEndOfAsmFile(Module &M) {
+
+ // This label is used to mark the end of the .text section.
+ const TargetLoweringObjectFile &TLOF = getObjFileLowering();
+ OutStreamer.SwitchSection(TLOF.getTextSection());
+ MCSymbol *EndOfTextLabel =
+ OutContext.GetOrCreateSymbol(StringRef(END_OF_TEXT_LABEL_NAME));
+ OutStreamer.EmitLabel(EndOfTextLabel);
}
-/// We need to override this function so we can avoid
-/// the call to EmitFunctionHeader(), which the MCPureStreamer can't handle.
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
SetupMachineFunction(MF);
- if (OutStreamer.hasRawTextSupport()) {
- OutStreamer.EmitRawText("@" + MF.getName() + ":");
- }
+
+ OutStreamer.emitRawComment(Twine('@') + MF.getName() + Twine(':'));
MCContext &Context = getObjFileLowering().getContext();
const MCSectionELF *ConfigSection = Context.getELFSection(".AMDGPU.config",
ELF::SHT_PROGBITS, 0,
SectionKind::getReadOnly());
OutStreamer.SwitchSection(ConfigSection);
+
const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
+ SIProgramInfo KernelInfo;
if (STM.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
- EmitProgramInfoSI(MF);
+ getSIProgramInfo(KernelInfo, MF);
+ EmitProgramInfoSI(MF, KernelInfo);
} else {
EmitProgramInfoR600(MF);
}
@@ -79,6 +123,34 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
EmitFunctionBody();
+ if (isVerbose()) {
+ const MCSectionELF *CommentSection
+ = Context.getELFSection(".AMDGPU.csdata",
+ ELF::SHT_PROGBITS, 0,
+ SectionKind::getReadOnly());
+ OutStreamer.SwitchSection(CommentSection);
+
+ if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ OutStreamer.emitRawComment(" Kernel info:", false);
+ OutStreamer.emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen),
+ false);
+ OutStreamer.emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR),
+ false);
+ OutStreamer.emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR),
+ false);
+ OutStreamer.emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode),
+ false);
+ OutStreamer.emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode),
+ false);
+ OutStreamer.emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize),
+ false);
+ } else {
+ R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
+ OutStreamer.emitRawComment(
+ Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->StackSize)));
+ }
+ }
+
if (STM.dumpCode()) {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
MF.dump();
@@ -102,25 +174,21 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
return false;
}
-void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
+void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
unsigned MaxGPR = 0;
bool killPixel = false;
- const R600RegisterInfo * RI =
- static_cast<const R600RegisterInfo*>(TM.getRegisterInfo());
- R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
+ const R600RegisterInfo *RI
+ = static_cast<const R600RegisterInfo*>(TM.getRegisterInfo());
+ const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- MachineBasicBlock &MBB = *BB;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- MachineInstr &MI = *I;
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
if (MI.getOpcode() == AMDGPU::KILLGT)
killPixel = true;
unsigned numOperands = MI.getNumOperands();
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
- MachineOperand & MO = MI.getOperand(op_idx);
+ const MachineOperand &MO = MI.getOperand(op_idx);
if (!MO.isReg())
continue;
unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff;
@@ -136,7 +204,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
unsigned RsrcReg;
if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
// Evergreen / Northern Islands
- switch (MFI->ShaderType) {
+ switch (MFI->getShaderType()) {
default: // Fall through
case ShaderType::COMPUTE: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
case ShaderType::GEOMETRY: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
@@ -145,7 +213,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
}
} else {
// R600 / R700
- switch (MFI->ShaderType) {
+ switch (MFI->getShaderType()) {
default: // Fall through
case ShaderType::GEOMETRY: // Fall through
case ShaderType::COMPUTE: // Fall through
@@ -160,40 +228,38 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
OutStreamer.EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
OutStreamer.EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
- if (MFI->ShaderType == ShaderType::COMPUTE) {
+ if (MFI->getShaderType() == ShaderType::COMPUTE) {
OutStreamer.EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
OutStreamer.EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
}
}
-void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
- const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
+void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
+ const MachineFunction &MF) const {
+ uint64_t CodeSize = 0;
unsigned MaxSGPR = 0;
unsigned MaxVGPR = 0;
bool VCCUsed = false;
- const SIRegisterInfo * RI =
- static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
+ const SIRegisterInfo *RI
+ = static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- MachineBasicBlock &MBB = *BB;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- MachineInstr &MI = *I;
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ // TODO: CodeSize should account for multiple functions.
+ CodeSize += MI.getDesc().Size;
unsigned numOperands = MI.getNumOperands();
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
- MachineOperand &MO = MI.getOperand(op_idx);
- unsigned maxUsed;
+ const MachineOperand &MO = MI.getOperand(op_idx);
unsigned width = 0;
bool isSGPR = false;
- unsigned reg;
- unsigned hwReg;
+
if (!MO.isReg()) {
continue;
}
- reg = MO.getReg();
- if (reg == AMDGPU::VCC) {
+ unsigned reg = MO.getReg();
+ if (reg == AMDGPU::VCC || reg == AMDGPU::VCC_LO ||
+ reg == AMDGPU::VCC_HI) {
VCCUsed = true;
continue;
}
@@ -240,10 +306,10 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
isSGPR = false;
width = 16;
} else {
- assert(!"Unknown register class");
+ llvm_unreachable("Unknown register class");
}
- hwReg = RI->getEncodingValue(reg) & 0xff;
- maxUsed = hwReg + width - 1;
+ unsigned hwReg = RI->getEncodingValue(reg) & 0xff;
+ unsigned maxUsed = hwReg + width - 1;
if (isSGPR) {
MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR;
} else {
@@ -252,12 +318,36 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
}
}
}
- if (VCCUsed) {
+
+ if (VCCUsed)
MaxSGPR += 2;
- }
- SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>();
+
+ ProgInfo.NumVGPR = MaxVGPR;
+ ProgInfo.NumSGPR = MaxSGPR;
+
+ // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
+ // register.
+ ProgInfo.FloatMode = getFPMode(MF);
+
+ // XXX: Not quite sure what this does, but sc seems to unset this.
+ ProgInfo.IEEEMode = 0;
+
+ // Do not clamp NAN to 0.
+ ProgInfo.DX10Clamp = 0;
+
+ const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
+ ProgInfo.ScratchSize = FrameInfo->estimateStackSize(MF);
+
+ ProgInfo.CodeLen = CodeSize;
+}
+
+void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
+ const SIProgramInfo &KernelInfo) {
+ const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+
unsigned RsrcReg;
- switch (MFI->ShaderType) {
+ switch (MFI->getShaderType()) {
default: // Fall through
case ShaderType::COMPUTE: RsrcReg = R_00B848_COMPUTE_PGM_RSRC1; break;
case ShaderType::GEOMETRY: RsrcReg = R_00B228_SPI_SHADER_PGM_RSRC1_GS; break;
@@ -265,25 +355,58 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
case ShaderType::VERTEX: RsrcReg = R_00B128_SPI_SHADER_PGM_RSRC1_VS; break;
}
- OutStreamer.EmitIntValue(RsrcReg, 4);
- OutStreamer.EmitIntValue(S_00B028_VGPRS(MaxVGPR / 4) | S_00B028_SGPRS(MaxSGPR / 8), 4);
-
unsigned LDSAlignShift;
if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
- // LDS is allocated in 64 dword blocks
+ // LDS is allocated in 64 dword blocks.
LDSAlignShift = 8;
} else {
- // LDS is allocated in 128 dword blocks
+ // LDS is allocated in 128 dword blocks.
LDSAlignShift = 9;
}
+
unsigned LDSBlocks =
- RoundUpToAlignment(MFI->LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+ RoundUpToAlignment(MFI->LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+
+ // Scratch is allocated in 256 dword blocks.
+ unsigned ScratchAlignShift = 10;
+ // We need to program the hardware with the amount of scratch memory that
+ // is used by the entire wave. KernelInfo.ScratchSize is the amount of
+ // scratch memory used per thread.
+ unsigned ScratchBlocks =
+ RoundUpToAlignment(KernelInfo.ScratchSize * STM.getWavefrontSize(),
+ 1 << ScratchAlignShift) >> ScratchAlignShift;
+
+ if (MFI->getShaderType() == ShaderType::COMPUTE) {
+ OutStreamer.EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
+
+ const uint32_t ComputePGMRSrc1 =
+ S_00B848_VGPRS(KernelInfo.NumVGPR / 4) |
+ S_00B848_SGPRS(KernelInfo.NumSGPR / 8) |
+ S_00B848_PRIORITY(KernelInfo.Priority) |
+ S_00B848_FLOAT_MODE(KernelInfo.FloatMode) |
+ S_00B848_PRIV(KernelInfo.Priv) |
+ S_00B848_DX10_CLAMP(KernelInfo.DX10Clamp) |
+ S_00B848_IEEE_MODE(KernelInfo.DebugMode) |
+ S_00B848_IEEE_MODE(KernelInfo.IEEEMode);
+
+ OutStreamer.EmitIntValue(ComputePGMRSrc1, 4);
- if (MFI->ShaderType == ShaderType::COMPUTE) {
OutStreamer.EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
- OutStreamer.EmitIntValue(S_00B84C_LDS_SIZE(LDSBlocks), 4);
+ const uint32_t ComputePGMRSrc2 =
+ S_00B84C_LDS_SIZE(LDSBlocks) |
+ S_00B02C_SCRATCH_EN(ScratchBlocks > 0);
+
+ OutStreamer.EmitIntValue(ComputePGMRSrc2, 4);
+
+ OutStreamer.EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
+ OutStreamer.EmitIntValue(S_00B860_WAVESIZE(ScratchBlocks), 4);
+ } else {
+ OutStreamer.EmitIntValue(RsrcReg, 4);
+ OutStreamer.EmitIntValue(S_00B028_VGPRS(KernelInfo.NumVGPR / 4) |
+ S_00B028_SGPRS(KernelInfo.NumSGPR / 8), 4);
}
- if (MFI->ShaderType == ShaderType::PIXEL) {
+
+ if (MFI->getShaderType() == ShaderType::PIXEL) {
OutStreamer.EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
OutStreamer.EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(LDSBlocks), 4);
OutStreamer.EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.h b/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.h
index 05dc9bb..19907cf 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUAsmPrinter.h
@@ -16,29 +16,63 @@
#define AMDGPU_ASMPRINTER_H
#include "llvm/CodeGen/AsmPrinter.h"
-#include <string>
#include <vector>
namespace llvm {
class AMDGPUAsmPrinter : public AsmPrinter {
+private:
+ struct SIProgramInfo {
+ SIProgramInfo() :
+ NumVGPR(0),
+ NumSGPR(0),
+ Priority(0),
+ FloatMode(0),
+ Priv(0),
+ DX10Clamp(0),
+ DebugMode(0),
+ IEEEMode(0),
+ ScratchSize(0),
+ CodeLen(0) {}
+
+ // Fields set in PGM_RSRC1 pm4 packet.
+ uint32_t NumVGPR;
+ uint32_t NumSGPR;
+ uint32_t Priority;
+ uint32_t FloatMode;
+ uint32_t Priv;
+ uint32_t DX10Clamp;
+ uint32_t DebugMode;
+ uint32_t IEEEMode;
+ uint32_t ScratchSize;
+
+ // Bonus information for debugging.
+ uint64_t CodeLen;
+ };
+
+ void getSIProgramInfo(SIProgramInfo &Out, const MachineFunction &MF) const;
+ void findNumUsedRegistersSI(const MachineFunction &MF,
+ unsigned &NumSGPR,
+ unsigned &NumVGPR) const;
+
+ /// \brief Emit register usage information so that the GPU driver
+ /// can correctly setup the GPU state.
+ void EmitProgramInfoR600(const MachineFunction &MF);
+ void EmitProgramInfoSI(const MachineFunction &MF, const SIProgramInfo &KernelInfo);
public:
explicit AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "AMDGPU Assembly Printer";
}
- /// \brief Emit register usage information so that the GPU driver
- /// can correctly setup the GPU state.
- void EmitProgramInfoR600(MachineFunction &MF);
- void EmitProgramInfoSI(MachineFunction &MF);
-
/// Implemented in AMDGPUMCInstLower.cpp
- virtual void EmitInstruction(const MachineInstr *MI);
+ void EmitInstruction(const MachineInstr *MI) override;
+
+ void EmitEndOfAsmFile(Module &M) override;
protected:
bool DisasmEnabled;
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUCallingConv.td b/contrib/llvm/lib/Target/R600/AMDGPUCallingConv.td
index 65cdb24..3586c88 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUCallingConv.td
+++ b/contrib/llvm/lib/Target/R600/AMDGPUCallingConv.td
@@ -20,7 +20,7 @@ def CC_SI : CallingConv<[
CCIfInReg<CCIfType<[f32, i32] , CCAssignToReg<[
SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
- SGPR16
+ SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21
]>>>,
CCIfInReg<CCIfType<[i64] , CCAssignToRegWithShadow<
@@ -62,11 +62,11 @@ def CC_AMDGPU : CallingConv<[
CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() >= "
"AMDGPUSubtarget::SOUTHERN_ISLANDS && "
"State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"#
- "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+ "getShaderType() == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() < "
"AMDGPUSubtarget::SOUTHERN_ISLANDS && "
"State.getMachineFunction().getInfo<R600MachineFunctionInfo>()->"
- "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+ "getShaderType() == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
".getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_SI>>,
CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUConvertToISA.cpp b/contrib/llvm/lib/Target/R600/AMDGPUConvertToISA.cpp
deleted file mode 100644
index 50297d1..0000000
--- a/contrib/llvm/lib/Target/R600/AMDGPUConvertToISA.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//===-- AMDGPUConvertToISA.cpp - Lower AMDIL to HW ISA --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief This pass lowers AMDIL machine instructions to the appropriate
-/// hardware instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUInstrInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUConvertToISAPass : public MachineFunctionPass {
-
-private:
- static char ID;
- TargetMachine &TM;
-
-public:
- AMDGPUConvertToISAPass(TargetMachine &tm) :
- MachineFunctionPass(ID), TM(tm) { }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {return "AMDGPU Convert to ISA";}
-
-};
-
-} // End anonymous namespace
-
-char AMDGPUConvertToISAPass::ID = 0;
-
-FunctionPass *llvm::createAMDGPUConvertToISAPass(TargetMachine &tm) {
- return new AMDGPUConvertToISAPass(tm);
-}
-
-bool AMDGPUConvertToISAPass::runOnMachineFunction(MachineFunction &MF) {
- const AMDGPUInstrInfo * TII =
- static_cast<const AMDGPUInstrInfo*>(TM.getInstrInfo());
-
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- MachineBasicBlock &MBB = *BB;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- MachineInstr &MI = *I;
- TII->convertToISA(MI, MF, MBB.findDebugLoc(I));
- }
- }
- return false;
-}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.cpp b/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.cpp
index 40f14d2..9e8302e 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.cpp
@@ -74,20 +74,30 @@ unsigned AMDGPUFrameLowering::getStackWidth(const MachineFunction &MF) const {
int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int FI) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned Offset = 0;
+ // Start the offset at 2 so we don't overwrite work group information.
+ // XXX: We should only do this when the shader actually uses this
+ // information.
+ unsigned OffsetBytes = 2 * (getStackWidth(MF) * 4);
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
- unsigned Size = MFI->getObjectSize(i);
- Offset += (Size / (getStackWidth(MF) * 4));
+ OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
+ OffsetBytes += MFI->getObjectSize(i);
+ // Each register holds 4 bytes, so we must always align the offset to at
+ // least 4 bytes, so that 2 frame objects won't share the same register.
+ OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
}
- return Offset;
+
+ if (FI != -1)
+ OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
+
+ return OffsetBytes / (getStackWidth(MF) * 4);
}
const TargetFrameLowering::SpillSlot *
AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
NumEntries = 0;
- return 0;
+ return nullptr;
}
void
AMDGPUFrameLowering::emitPrologue(MachineFunction &MF) const {
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.h b/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.h
index cf5742e..d18ede5 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUFrameLowering.h
@@ -33,12 +33,13 @@ public:
/// \returns The number of 32-bit sub-registers that are used when storing
/// values to the stack.
- virtual unsigned getStackWidth(const MachineFunction &MF) const;
- virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
- virtual const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const;
- virtual void emitPrologue(MachineFunction &MF) const;
- virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
- virtual bool hasFP(const MachineFunction &MF) const;
+ unsigned getStackWidth(const MachineFunction &MF) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
+ const SpillSlot *
+ getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ bool hasFP(const MachineFunction &MF) const override;
};
} // namespace llvm
#endif // AMDILFRAME_LOWERING_H
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/contrib/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
index a989135..cc17b7e 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -14,17 +14,18 @@
#include "AMDGPUInstrInfo.h"
#include "AMDGPUISelLowering.h" // For AMDGPUISD
#include "AMDGPURegisterInfo.h"
+#include "AMDGPUSubtarget.h"
#include "R600InstrInfo.h"
+#include "SIDefines.h"
#include "SIISelLowering.h"
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/Compiler.h"
-#include <list>
-#include <queue>
+#include "llvm/IR/Function.h"
using namespace llvm;
@@ -43,11 +44,12 @@ public:
AMDGPUDAGToDAGISel(TargetMachine &TM);
virtual ~AMDGPUDAGToDAGISel();
- SDNode *Select(SDNode *N);
- virtual const char *getPassName() const;
- virtual void PostprocessISelDAG();
+ SDNode *Select(SDNode *N) override;
+ const char *getPassName() const override;
+ void PostprocessISelDAG() override;
private:
+ bool isInlineImmediate(SDNode *N) const;
inline SDValue getSmallIPtrImm(unsigned Imm);
bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
const R600InstrInfo *TII);
@@ -58,11 +60,9 @@ private:
bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
- SDValue SimplifyI24(SDValue &Op);
- bool SelectI24(SDValue Addr, SDValue &Op);
- bool SelectU24(SDValue Addr, SDValue &Op);
static bool checkType(const Value *ptr, unsigned int addrspace);
+ static bool checkPrivateAddress(const MachineMemOperand *Op);
static bool isGlobalStore(const StoreSDNode *N);
static bool isPrivateStore(const StoreSDNode *N);
@@ -77,12 +77,28 @@ private:
bool isLocalLoad(const LoadSDNode *N) const;
bool isRegionLoad(const LoadSDNode *N) const;
+ /// \returns True if the current basic block being selected is at control
+ /// flow depth 0. Meaning that the current block dominates the
+ // exit block.
+ bool isCFDepth0() const;
+
const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
- bool SelectGlobalValueVariableOffset(SDValue Addr,
- SDValue &BaseReg, SDValue& Offset);
+ bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
+ SDValue& Offset);
bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr, SDValue &Offset,
+ SDValue &ImmOffset) const;
+ bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
+ SDValue &SOffset, SDValue &ImmOffset) const;
+ bool SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
+ SDValue &SOffset, SDValue &Offset, SDValue &Offen,
+ SDValue &Idxen, SDValue &GLC, SDValue &SLC,
+ SDValue &TFE) const;
+
+ SDNode *SelectADD_SUB_I64(SDNode *N);
+ SDNode *SelectDIV_SCALE(SDNode *N);
// Include the pieces autogenerated from the target description.
#include "AMDGPUGenDAGISel.inc"
@@ -91,8 +107,7 @@ private:
/// \brief This pass converts a legalized DAG into a AMDGPU-specific
// DAG, ready for instruction scheduling.
-FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
- ) {
+FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
return new AMDGPUDAGToDAGISel(TM);
}
@@ -103,32 +118,39 @@ AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
}
+bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
+ const SITargetLowering *TL
+ = static_cast<const SITargetLowering *>(getTargetLowering());
+ return TL->analyzeImmediate(N) == 0;
+}
+
/// \brief Determine the register class for \p OpNo
/// \returns The register class of the virtual register that will be used for
/// the given operand number \OpNo or NULL if the register class cannot be
/// determined.
const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
unsigned OpNo) const {
- if (!N->isMachineOpcode()) {
- return NULL;
- }
+ if (!N->isMachineOpcode())
+ return nullptr;
+
switch (N->getMachineOpcode()) {
default: {
const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
unsigned OpIdx = Desc.getNumDefs() + OpNo;
if (OpIdx >= Desc.getNumOperands())
- return NULL;
+ return nullptr;
int RegClass = Desc.OpInfo[OpIdx].RegClass;
- if (RegClass == -1) {
- return NULL;
- }
+ if (RegClass == -1)
+ return nullptr;
+
return TM.getRegisterInfo()->getRegClass(RegClass);
}
case AMDGPU::REG_SEQUENCE: {
- const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
- cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
- unsigned SubRegIdx =
- dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
+ unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(RCID);
+
+ SDValue SubRegOp = N->getOperand(OpNo + 1);
+ unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
}
}
@@ -139,7 +161,7 @@ SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
}
bool AMDGPUDAGToDAGISel::SelectADDRParam(
- SDValue Addr, SDValue& R1, SDValue& R2) {
+ SDValue Addr, SDValue& R1, SDValue& R2) {
if (Addr.getOpcode() == ISD::FrameIndex) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
@@ -196,20 +218,35 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
unsigned int Opc = N->getOpcode();
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
+
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
switch (Opc) {
default: break;
+ // We are selecting i64 ADD here instead of custom lower it during
+ // DAG legalization, so we can fold some i64 ADDs used for address
+ // calculation into the LOAD and STORE instructions.
+ case ISD::ADD:
+ case ISD::SUB: {
+ if (N->getValueType(0) != MVT::i64 ||
+ ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ break;
+
+ return SelectADD_SUB_I64(N);
+ }
+ case ISD::SCALAR_TO_VECTOR:
+ case AMDGPUISD::BUILD_VERTICAL_VECTOR:
case ISD::BUILD_VECTOR: {
unsigned RegClassID;
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
const AMDGPURegisterInfo *TRI =
static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
const SIRegisterInfo *SIRI =
static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
EVT VT = N->getValueType(0);
unsigned NumVectorElts = VT.getVectorNumElements();
- assert(VT.getVectorElementType().bitsEq(MVT::i32));
+ EVT EltVT = VT.getVectorElementType();
+ assert(EltVT.bitsEq(MVT::i32));
if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
bool UseVReg = true;
for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
@@ -250,7 +287,12 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
// can't be bundled by our scheduler.
switch(NumVectorElts) {
case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
- case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
+ case 4:
+ if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
+ RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
+ else
+ RegClassID = AMDGPU::R600_Reg128RegClassID;
+ break;
default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
}
}
@@ -258,8 +300,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
if (NumVectorElts == 1) {
- return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
- VT.getVectorElementType(),
+ return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
N->getOperand(0), RegClass);
}
@@ -268,11 +309,12 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
// 16 = Max Num Vector Elements
// 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
// 1 = Vector Register Class
- SDValue RegSeqArgs[16 * 2 + 1];
+ SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
bool IsRegSeq = true;
- for (unsigned i = 0; i < N->getNumOperands(); i++) {
+ unsigned NOps = N->getNumOperands();
+ for (unsigned i = 0; i < NOps; i++) {
// XXX: Why is this here?
if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
IsRegSeq = false;
@@ -282,14 +324,27 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
RegSeqArgs[1 + (2 * i) + 1] =
CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
}
+
+ if (NOps != NumVectorElts) {
+ // Fill in the missing undef elements if this was a scalar_to_vector.
+ assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
+
+ MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ SDLoc(N), EltVT);
+ for (unsigned i = NOps; i < NumVectorElts; ++i) {
+ RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
+ RegSeqArgs[1 + (2 * i) + 1] =
+ CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
+ }
+ }
+
if (!IsRegSeq)
break;
return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
- RegSeqArgs, 2 * N->getNumOperands() + 1);
+ RegSeqArgs);
}
case ISD::BUILD_PAIR: {
SDValue RC, SubReg0, SubReg1;
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
break;
}
@@ -298,7 +353,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
} else if (N->getValueType(0) == MVT::i64) {
- RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
+ RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
} else {
@@ -309,8 +364,37 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
SDLoc(N), N->getValueType(0), Ops);
}
- case AMDGPUISD::REGISTER_LOAD: {
+
+ case ISD::Constant:
+ case ISD::ConstantFP: {
const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
+ N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
+ break;
+
+ uint64_t Imm;
+ if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
+ Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
+ else {
+ ConstantSDNode *C = cast<ConstantSDNode>(N);
+ Imm = C->getZExtValue();
+ }
+
+ SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
+ CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
+ SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
+ CurDAG->getConstant(Imm >> 32, MVT::i32));
+ const SDValue Ops[] = {
+ CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
+ SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
+ SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
+ };
+
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
+ N->getValueType(0), Ops);
+ }
+
+ case AMDGPUISD::REGISTER_LOAD: {
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
break;
SDValue Addr, Offset;
@@ -327,7 +411,6 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
Ops);
}
case AMDGPUISD::REGISTER_STORE: {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
break;
SDValue Addr, Offset;
@@ -343,42 +426,98 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
CurDAG->getVTList(MVT::Other),
Ops);
}
+
+ case AMDGPUISD::BFE_I32:
+ case AMDGPUISD::BFE_U32: {
+ if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ break;
+
+ // There is a scalar version available, but unlike the vector version which
+ // has a separate operand for the offset and width, the scalar version packs
+ // the width and offset into a single operand. Try to move to the scalar
+ // version if the offsets are constant, so that we can try to keep extended
+ // loads of kernel arguments in SGPRs.
+
+ // TODO: Technically we could try to pattern match scalar bitshifts of
+ // dynamic values, but it's probably not useful.
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!Offset)
+ break;
+
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
+ if (!Width)
+ break;
+
+ bool Signed = Opc == AMDGPUISD::BFE_I32;
+
+ // Transformation function, pack the offset and width of a BFE into
+ // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
+ // source, bits [5:0] contain the offset and bits [22:16] the width.
+
+ uint32_t OffsetVal = Offset->getZExtValue();
+ uint32_t WidthVal = Width->getZExtValue();
+
+ uint32_t PackedVal = OffsetVal | WidthVal << 16;
+
+ SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
+ return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
+ SDLoc(N),
+ MVT::i32,
+ N->getOperand(0),
+ PackedOffsetWidth);
+
+ }
+ case AMDGPUISD::DIV_SCALE: {
+ return SelectDIV_SCALE(N);
+ }
}
return SelectCode(N);
}
-bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
- if (!ptr) {
+bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
+ assert(AS != 0 && "Use checkPrivateAddress instead.");
+ if (!Ptr)
return false;
- }
- Type *ptrType = ptr->getType();
- return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
+
+ return Ptr->getType()->getPointerAddressSpace() == AS;
+}
+
+bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
+ if (Op->getPseudoValue())
+ return true;
+
+ if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
+ return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
+
+ return false;
}
bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
- return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
+ const Value *MemVal = N->getMemOperand()->getValue();
+ return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
}
bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
- if (CbId == -1) {
- return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
- }
- return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
+ const Value *MemVal = N->getMemOperand()->getValue();
+ if (CbId == -1)
+ return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
+
+ return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
}
bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
@@ -389,27 +528,26 @@ bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
return true;
}
}
- return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
- return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
- return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
- return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+ return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
}
bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
MachineMemOperand *MMO = N->getMemOperand();
- if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+ if (checkPrivateAddress(N->getMemOperand())) {
if (MMO) {
- const Value *V = MMO->getValue();
- const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
+ const PseudoSourceValue *PSV = MMO->getPseudoValue();
if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
return true;
}
@@ -419,24 +557,34 @@ bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
}
bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
- if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+ if (checkPrivateAddress(N->getMemOperand())) {
// Check to make sure we are not a constant pool load or a constant load
// that is marked as a private load
if (isCPLoad(N) || isConstantLoad(N, -1)) {
return false;
}
}
- if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
+
+ const Value *MemVal = N->getMemOperand()->getValue();
+ if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
+ !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
return true;
}
return false;
}
+bool AMDGPUDAGToDAGISel::isCFDepth0() const {
+ // FIXME: Figure out a way to use DominatorTree analysis here.
+ const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
+ const Function *Fn = FuncInfo->Fn;
+ return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
+}
+
+
const char *AMDGPUDAGToDAGISel::getPassName() const {
return "AMDGPU DAG->DAG Pattern Instruction Selection";
}
@@ -451,7 +599,7 @@ const char *AMDGPUDAGToDAGISel::getPassName() const {
//===----------------------------------------------------------------------===//
bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
- SDValue& IntPtr) {
+ SDValue& IntPtr) {
if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
return true;
@@ -461,7 +609,7 @@ bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
SDValue& BaseReg, SDValue &Offset) {
- if (!dyn_cast<ConstantSDNode>(Addr)) {
+ if (!isa<ConstantSDNode>(Addr)) {
BaseReg = Addr;
Offset = CurDAG->getIntPtrConstant(0, true);
return true;
@@ -471,7 +619,7 @@ bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
SDValue &Offset) {
- ConstantSDNode * IMMOffset;
+ ConstantSDNode *IMMOffset;
if (Addr.getOpcode() == ISD::ADD
&& (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
@@ -515,52 +663,225 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
return true;
}
-SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
- APInt Demanded = APInt(32, 0x00FFFFFF);
- APInt KnownZero, KnownOne;
- TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
- const TargetLowering *TLI = getTargetLowering();
- if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
- CurDAG->ReplaceAllUsesWith(Op, TLO.New);
- CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
- return SimplifyI24(TLO.New);
- } else {
- return Op;
+SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
+ SDLoc DL(N);
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ bool IsAdd = (N->getOpcode() == ISD::ADD);
+
+ SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
+ SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
+
+ SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ DL, MVT::i32, LHS, Sub0);
+ SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ DL, MVT::i32, LHS, Sub1);
+
+ SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ DL, MVT::i32, RHS, Sub0);
+ SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ DL, MVT::i32, RHS, Sub1);
+
+ SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
+ SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
+
+
+ unsigned Opc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
+ unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
+
+ if (!isCFDepth0()) {
+ Opc = IsAdd ? AMDGPU::V_ADD_I32_e32 : AMDGPU::V_SUB_I32_e32;
+ CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e32 : AMDGPU::V_SUBB_U32_e32;
}
+
+ SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
+ SDValue Carry(AddLo, 1);
+ SDNode *AddHi
+ = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
+ SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
+
+ SDValue Args[5] = {
+ CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
+ SDValue(AddLo,0),
+ Sub0,
+ SDValue(AddHi,0),
+ Sub1,
+ };
+ return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
+}
+
+SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
+ SDLoc SL(N);
+ EVT VT = N->getValueType(0);
+
+ assert(VT == MVT::f32 || VT == MVT::f64);
+
+ unsigned Opc
+ = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
+
+ const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
+
+ SDValue Ops[] = {
+ N->getOperand(0),
+ N->getOperand(1),
+ N->getOperand(2),
+ Zero,
+ Zero,
+ Zero,
+ Zero
+ };
+
+ return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
}
-bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
+static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
+ return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32,
+ Ptr), 0);
+}
- assert(Op.getValueType() == MVT::i32);
+static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
+ return isUInt<12>(Imm->getZExtValue());
+}
- if (CurDAG->ComputeNumSignBits(Op) == 9) {
- I24 = SimplifyI24(Op);
+bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr,
+ SDValue &Offset,
+ SDValue &ImmOffset) const {
+ SDLoc DL(Addr);
+
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ SDValue N0 = Addr.getOperand(0);
+ SDValue N1 = Addr.getOperand(1);
+ ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
+
+ if (isLegalMUBUFImmOffset(C1)) {
+
+ if (N0.getOpcode() == ISD::ADD) {
+ // (add (add N2, N3), C1)
+ SDValue N2 = N0.getOperand(0);
+ SDValue N3 = N0.getOperand(1);
+ Ptr = wrapAddr64Rsrc(CurDAG, DL, N2);
+ Offset = N3;
+ ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ return true;
+ }
+
+ // (add N0, C1)
+ Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getTargetConstant(0, MVT::i64));;
+ Offset = N0;
+ ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ return true;
+ }
+ }
+ if (Addr.getOpcode() == ISD::ADD) {
+ // (add N0, N1)
+ SDValue N0 = Addr.getOperand(0);
+ SDValue N1 = Addr.getOperand(1);
+ Ptr = wrapAddr64Rsrc(CurDAG, DL, N0);
+ Offset = N1;
+ ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
return true;
}
- return false;
+
+ // default case
+ Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getConstant(0, MVT::i64));
+ Offset = Addr;
+ ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
+ return true;
}
-bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
- APInt KnownZero;
- APInt KnownOne;
- CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
+/// \brief Return a resource descriptor with the 'Add TID' bit enabled
+/// The TID (Thread ID) is multipled by the stride value (bits [61:48]
+/// of the resource descriptor) to create an offset, which is added to the
+/// resource ponter.
+static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
+
+ uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
+ 0xffffffff;
- assert (Op.getValueType() == MVT::i32);
+ SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
+ SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
+ SDValue DataLo = DAG->getTargetConstant(
+ Rsrc & APInt::getAllOnesValue(32).getZExtValue(), MVT::i32);
+ SDValue DataHi = DAG->getTargetConstant(Rsrc >> 32, MVT::i32);
+
+ const SDValue Ops[] = { PtrLo, PtrHi, DataLo, DataHi };
+ return SDValue(DAG->getMachineNode(AMDGPU::SI_BUFFER_RSRC, DL,
+ MVT::v4i32, Ops), 0);
+}
- // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
- // i32. These smaller types are legal to use with the i24 instructions.
- if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
- Op.getOpcode() == ISD::ANY_EXTEND ||
- ISD::isEXTLoad(Op.getNode())) {
- U24 = SimplifyI24(Op);
+bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
+ SDValue &VAddr, SDValue &SOffset,
+ SDValue &ImmOffset) const {
+
+ SDLoc DL(Addr);
+ MachineFunction &MF = CurDAG->getMachineFunction();
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+
+ unsigned ScratchPtrReg =
+ TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
+ unsigned ScratchOffsetReg =
+ TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
+
+ Rsrc = buildScratchRSRC(CurDAG, DL, CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64));
+ SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
+ MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
+
+ // (add n0, c1)
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ SDValue N1 = Addr.getOperand(1);
+ ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
+
+ if (isLegalMUBUFImmOffset(C1)) {
+ VAddr = Addr.getOperand(0);
+ ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ return true;
+ }
+ }
+
+ // (add FI, n0)
+ if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
+ isa<FrameIndexSDNode>(Addr.getOperand(0))) {
+ VAddr = Addr.getOperand(1);
+ ImmOffset = Addr.getOperand(0);
return true;
}
- return false;
+
+ // (FI)
+ if (isa<FrameIndexSDNode>(Addr)) {
+ VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
+ CurDAG->getConstant(0, MVT::i32)), 0);
+ ImmOffset = Addr;
+ return true;
+ }
+
+ // (node)
+ VAddr = Addr;
+ ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc,
+ SDValue &VAddr, SDValue &SOffset,
+ SDValue &Offset, SDValue &Offen,
+ SDValue &Idxen, SDValue &GLC,
+ SDValue &SLC, SDValue &TFE) const {
+
+ GLC = CurDAG->getTargetConstant(0, MVT::i1);
+ SLC = CurDAG->getTargetConstant(0, MVT::i1);
+ TFE = CurDAG->getTargetConstant(0, MVT::i1);
+
+ Idxen = CurDAG->getTargetConstant(0, MVT::i1);
+ Offen = CurDAG->getTargetConstant(1, MVT::i1);
+
+ return SelectMUBUFScratch(Addr, SRsrc, VAddr, SOffset, Offset);
}
void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
const AMDGPUTargetLowering& Lowering =
- (*(const AMDGPUTargetLowering*)getTargetLowering());
+ *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
bool IsModified = false;
do {
IsModified = false;
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.cpp b/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
index 1029f30..5a46297b 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -16,9 +16,9 @@
#include "AMDGPUISelLowering.h"
#include "AMDGPU.h"
#include "AMDGPUFrameLowering.h"
+#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPURegisterInfo.h"
#include "AMDGPUSubtarget.h"
-#include "AMDILIntrinsicInfo.h"
#include "R600MachineFunctionInfo.h"
#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
@@ -27,24 +27,93 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
using namespace llvm;
+
+namespace {
+
+/// Diagnostic information for unimplemented or unsupported feature reporting.
+class DiagnosticInfoUnsupported : public DiagnosticInfo {
+private:
+ const Twine &Description;
+ const Function &Fn;
+
+ static int KindID;
+
+ static int getKindID() {
+ if (KindID == 0)
+ KindID = llvm::getNextAvailablePluginDiagnosticKind();
+ return KindID;
+ }
+
+public:
+ DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(getKindID(), Severity),
+ Description(Desc),
+ Fn(Fn) { }
+
+ const Function &getFunction() const { return Fn; }
+ const Twine &getDescription() const { return Description; }
+
+ void print(DiagnosticPrinter &DP) const override {
+ DP << "unsupported " << getDescription() << " in " << Fn.getName();
+ }
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == getKindID();
+ }
+};
+
+int DiagnosticInfoUnsupported::KindID = 0;
+}
+
+
static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() / 8, ArgFlags.getOrigAlign());
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
+ ArgFlags.getOrigAlign());
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return true;
}
#include "AMDGPUGenCallingConv.inc"
+// Find a larger type to do a load / store of a vector with.
+EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
+ unsigned StoreSize = VT.getStoreSizeInBits();
+ if (StoreSize <= 32)
+ return EVT::getIntegerVT(Ctx, StoreSize);
+
+ assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
+ return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
+}
+
+// Type for a vector that will be loaded to.
+EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
+ unsigned StoreSize = VT.getStoreSizeInBits();
+ if (StoreSize <= 32)
+ return EVT::getIntegerVT(Ctx, 32);
+
+ return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
+}
+
AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
TargetLowering(TM, new TargetLoweringObjectFileELF()) {
- // Initialize target lowering borrowed from AMDIL
- InitAMDILLowering();
+ Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
+
+ setOperationAction(ISD::Constant, MVT::i32, Legal);
+ setOperationAction(ISD::Constant, MVT::i64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
// We need to custom lower some of the intrinsics
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
@@ -59,9 +128,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FRINT, MVT::f32, Legal);
setOperationAction(ISD::FROUND, MVT::f32, Legal);
-
- // The hardware supports ROTR, but not ROTL
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
@@ -71,6 +138,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::STORE, MVT::v2f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
+ setOperationAction(ISD::STORE, MVT::i64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
+
setOperationAction(ISD::STORE, MVT::v4f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
@@ -83,6 +153,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::STORE, MVT::f64, Promote);
AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
+ setOperationAction(ISD::STORE, MVT::v2f64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
+
// Custom lowering of vector stores is required for local address space
// stores.
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
@@ -93,16 +166,27 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
+
// XXX: This can be change to Custom, once ExpandVectorStores can
// handle 64-bit stores.
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i8, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i1, Expand);
+ setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
+ setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
+
+
setOperationAction(ISD::LOAD, MVT::f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
+ setOperationAction(ISD::LOAD, MVT::i64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
+
setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
@@ -115,10 +199,19 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::LOAD, MVT::f64, Promote);
AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
+ setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
+
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
@@ -135,27 +228,74 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
- setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
+ if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
+ setOperationAction(ISD::FCEIL, MVT::f64, Custom);
+ setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
+ setOperationAction(ISD::FRINT, MVT::f64, Custom);
+ setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
+ }
+
+ if (!Subtarget->hasBFI()) {
+ // fcopysign can be done in a single instruction with BFI.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ }
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+ setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
- setOperationAction(ISD::MUL, MVT::i64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+
+ const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
+ for (MVT VT : ScalarIntVTs) {
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::SDIV, VT, Expand);
+
+ // GPU does not have divrem function for signed or unsigned.
+ setOperationAction(ISD::SDIVREM, VT, Custom);
+ setOperationAction(ISD::UDIVREM, VT, Custom);
+
+ // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+
+ setOperationAction(ISD::BSWAP, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ }
+
+ if (!Subtarget->hasBCNT(32))
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+
+ if (!Subtarget->hasBCNT(64))
+ setOperationAction(ISD::CTPOP, MVT::i64, Expand);
+ // The hardware supports 32-bit ROTR, but not ROTL.
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+
+ setOperationAction(ISD::MUL, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
- setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- static const MVT::SimpleValueType IntTypes[] = {
+ if (!Subtarget->hasFFBH())
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
+
+ if (!Subtarget->hasFFBL())
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+
+ static const MVT::SimpleValueType VectorIntTypes[] = {
MVT::v2i32, MVT::v4i32
};
- const size_t NumIntTypes = array_lengthof(IntTypes);
- for (unsigned int x = 0; x < NumIntTypes; ++x) {
- MVT::SimpleValueType VT = IntTypes[x];
- //Expand the following operations for the current type by default
+ for (MVT VT : VectorIntTypes) {
+ // Expand the following operations for the current type by default.
setOperationAction(ISD::ADD, VT, Expand);
setOperationAction(ISD::AND, VT, Expand);
setOperationAction(ISD::FP_TO_SINT, VT, Expand);
@@ -163,33 +303,94 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::MUL, VT, Expand);
setOperationAction(ISD::OR, VT, Expand);
setOperationAction(ISD::SHL, VT, Expand);
- setOperationAction(ISD::SINT_TO_FP, VT, Expand);
- setOperationAction(ISD::SRL, VT, Expand);
setOperationAction(ISD::SRA, VT, Expand);
+ setOperationAction(ISD::SRL, VT, Expand);
+ setOperationAction(ISD::ROTL, VT, Expand);
+ setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::SUB, VT, Expand);
- setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SINT_TO_FP, VT, Expand);
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
+ // TODO: Implement custom UREM / SREM routines.
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::SDIVREM, VT, Custom);
+ setOperationAction(ISD::UDIVREM, VT, Custom);
+ setOperationAction(ISD::ADDC, VT, Expand);
+ setOperationAction(ISD::SUBC, VT, Expand);
+ setOperationAction(ISD::ADDE, VT, Expand);
+ setOperationAction(ISD::SUBE, VT, Expand);
+ setOperationAction(ISD::SELECT, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::XOR, VT, Expand);
+ setOperationAction(ISD::BSWAP, VT, Expand);
+ setOperationAction(ISD::CTPOP, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
}
- static const MVT::SimpleValueType FloatTypes[] = {
+ static const MVT::SimpleValueType FloatVectorTypes[] = {
MVT::v2f32, MVT::v4f32
};
- const size_t NumFloatTypes = array_lengthof(FloatTypes);
- for (unsigned int x = 0; x < NumFloatTypes; ++x) {
- MVT::SimpleValueType VT = FloatTypes[x];
+ for (MVT VT : FloatVectorTypes) {
setOperationAction(ISD::FABS, VT, Expand);
setOperationAction(ISD::FADD, VT, Expand);
+ setOperationAction(ISD::FCEIL, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
setOperationAction(ISD::FDIV, VT, Expand);
+ setOperationAction(ISD::FEXP2, VT, Expand);
+ setOperationAction(ISD::FLOG2, VT, Expand);
+ setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
+ setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FMUL, VT, Expand);
+ setOperationAction(ISD::FMA, VT, Expand);
setOperationAction(ISD::FRINT, VT, Expand);
+ setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::FSQRT, VT, Expand);
+ setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FSUB, VT, Expand);
+ setOperationAction(ISD::FNEG, VT, Expand);
+ setOperationAction(ISD::SELECT, VT, Expand);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
}
+
+ setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
+ setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
+
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::STORE);
+
+ setSchedulingPreference(Sched::RegPressure);
+ setJumpIsExpensive(true);
+
+ setSelectIsExpensive(false);
+ PredictableSelectIsExpensive = false;
+
+ // There are no integer divide instructions, and these expand to a pretty
+ // large sequence of instructions.
+ setIntDivIsCheap(false);
+ setPow2DivIsCheap(false);
+
+ // TODO: Investigate this when 64-bit divides are implemented.
+ addBypassSlowDiv(64, 32);
+
+ // FIXME: Need to really handle these.
+ MaxStoresPerMemcpy = 4096;
+ MaxStoresPerMemmove = 4096;
+ MaxStoresPerMemset = 4096;
}
//===----------------------------------------------------------------------===//
@@ -200,6 +401,23 @@ MVT AMDGPUTargetLowering::getVectorIdxTy() const {
return MVT::i32;
}
+bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
+ return true;
+}
+
+// The backend supports 32 and 64 bit floating point immediates.
+// FIXME: Why are we reporting vectors of FP immediates as legal?
+bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
+ EVT ScalarVT = VT.getScalarType();
+ return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
+}
+
+// We don't want to shrink f64 / f32 constants.
+bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
+ EVT ScalarVT = VT.getScalarType();
+ return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
+}
+
bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
EVT CastTy) const {
if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
@@ -227,6 +445,47 @@ bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
return VT == MVT::f32;
}
+bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
+ // Truncate is just accessing a subregister.
+ return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
+}
+
+bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
+ // Truncate is just accessing a subregister.
+ return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
+ (Dest->getPrimitiveSizeInBits() % 32 == 0);
+}
+
+bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
+ const DataLayout *DL = getDataLayout();
+ unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
+ unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
+
+ return SrcSize == 32 && DestSize == 64;
+}
+
+bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
+ // Any register load of a 64-bit value really requires 2 32-bit moves. For all
+ // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
+ // this will enable reducing 64-bit operations the 32-bit, which is always
+ // good.
+ return Src == MVT::i32 && Dest == MVT::i64;
+}
+
+bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
+ return isZExtFree(Val.getValueType(), VT2);
+}
+
+bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
+ // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
+ // limited number of native 64-bit operations. Shrinking an operation to fit
+ // in a single 32-bit register should always be helpful. As currently used,
+ // this is much less general than the name suggests, and is only used in
+ // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
+ // not profitable, and may actually be harmful.
+ return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
+}
+
//===---------------------------------------------------------------------===//
// TargetLowering Callbacks
//===---------------------------------------------------------------------===//
@@ -251,67 +510,243 @@ SDValue AMDGPUTargetLowering::LowerReturn(
// Target specific lowering
//===---------------------------------------------------------------------===//
-SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
- const {
+SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SDValue Callee = CLI.Callee;
+ SelectionDAG &DAG = CLI.DAG;
+
+ const Function &Fn = *DAG.getMachineFunction().getFunction();
+
+ StringRef FuncName("<unknown>");
+
+ if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
+ FuncName = G->getSymbol();
+ else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ FuncName = G->getGlobal()->getName();
+
+ DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
+ DAG.getContext()->diagnose(NoCalls);
+ return SDValue();
+}
+
+SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default:
Op.getNode()->dump();
- assert(0 && "Custom lowering code for this"
- "instruction is not implemented yet!");
+ llvm_unreachable("Custom lowering code for this"
+ "instruction is not implemented yet!");
break;
- // AMDIL DAG lowering
- case ISD::SDIV: return LowerSDIV(Op, DAG);
- case ISD::SREM: return LowerSREM(Op, DAG);
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
- case ISD::BRCOND: return LowerBRCOND(Op, DAG);
- // AMDGPU DAG lowering
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::SDIV: return LowerSDIV(Op, DAG);
+ case ISD::SREM: return LowerSREM(Op, DAG);
case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
+ case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
+ case ISD::FCEIL: return LowerFCEIL(Op, DAG);
+ case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
+ case ISD::FRINT: return LowerFRINT(Op, DAG);
+ case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
+ case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
}
return Op;
}
+void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ case ISD::SIGN_EXTEND_INREG:
+ // Different parts of legalization seem to interpret which type of
+ // sign_extend_inreg is the one to check for custom lowering. The extended
+ // from type is what really matters, but some places check for custom
+ // lowering of the result type. This results in trying to use
+ // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
+ // nothing here and let the illegal result integer be handled normally.
+ return;
+ case ISD::LOAD: {
+ SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
+ if (!Node)
+ return;
+
+ Results.push_back(SDValue(Node, 0));
+ Results.push_back(SDValue(Node, 1));
+ // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
+ // function
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
+ return;
+ }
+ case ISD::STORE: {
+ SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
+ if (Lowered.getNode())
+ Results.push_back(Lowered);
+ return;
+ }
+ default:
+ return;
+ }
+}
+
+// FIXME: This implements accesses to initialized globals in the constant
+// address space by copying them to private and accessing that. It does not
+// properly handle illegal types or vectors. The private vector loads are not
+// scalarized, and the illegal scalars hit an assertion. This technique will not
+// work well with large initializers, and this should eventually be
+// removed. Initialized globals should be placed into a data section that the
+// runtime will load into a buffer before the kernel is executed. Uses of the
+// global need to be replaced with a pointer loaded from an implicit kernel
+// argument into this buffer holding the copy of the data, which will remove the
+// need for any of this.
+SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
+ const GlobalValue *GV,
+ const SDValue &InitPtr,
+ SDValue Chain,
+ SelectionDAG &DAG) const {
+ const DataLayout *TD = getTargetMachine().getDataLayout();
+ SDLoc DL(InitPtr);
+ Type *InitTy = Init->getType();
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
+ EVT VT = EVT::getEVT(InitTy);
+ PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
+ return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
+ TD->getPrefTypeAlignment(InitTy));
+ }
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
+ EVT VT = EVT::getEVT(CFP->getType());
+ PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
+ return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
+ TD->getPrefTypeAlignment(CFP->getType()));
+ }
+
+ if (StructType *ST = dyn_cast<StructType>(InitTy)) {
+ const StructLayout *SL = TD->getStructLayout(ST);
+
+ EVT PtrVT = InitPtr.getValueType();
+ SmallVector<SDValue, 8> Chains;
+
+ for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
+ SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
+
+ Constant *Elt = Init->getAggregateElement(I);
+ Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
+ }
+
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ }
+
+ if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
+ EVT PtrVT = InitPtr.getValueType();
+
+ unsigned NumElements;
+ if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
+ NumElements = AT->getNumElements();
+ else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
+ NumElements = VT->getNumElements();
+ else
+ llvm_unreachable("Unexpected type");
+
+ unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
+ SmallVector<SDValue, 8> Chains;
+ for (unsigned i = 0; i < NumElements; ++i) {
+ SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
+
+ Constant *Elt = Init->getAggregateElement(i);
+ Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
+ }
+
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ }
+
+ if (isa<UndefValue>(Init)) {
+ EVT VT = EVT::getEVT(InitTy);
+ PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
+ return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
+ TD->getPrefTypeAlignment(InitTy));
+ }
+
+ Init->dump();
+ llvm_unreachable("Unhandled constant initializer");
+}
+
SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
SDValue Op,
SelectionDAG &DAG) const {
const DataLayout *TD = getTargetMachine().getDataLayout();
GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
+ const GlobalValue *GV = G->getGlobal();
- assert(G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS);
- // XXX: What does the value of G->getOffset() mean?
- assert(G->getOffset() == 0 &&
+ switch (G->getAddressSpace()) {
+ default: llvm_unreachable("Global Address lowering not implemented for this "
+ "address space");
+ case AMDGPUAS::LOCAL_ADDRESS: {
+ // XXX: What does the value of G->getOffset() mean?
+ assert(G->getOffset() == 0 &&
"Do not know what to do with an non-zero offset");
- const GlobalValue *GV = G->getGlobal();
+ unsigned Offset;
+ if (MFI->LocalMemoryObjects.count(GV) == 0) {
+ uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
+ Offset = MFI->LDSSize;
+ MFI->LocalMemoryObjects[GV] = Offset;
+ // XXX: Account for alignment?
+ MFI->LDSSize += Size;
+ } else {
+ Offset = MFI->LocalMemoryObjects[GV];
+ }
- unsigned Offset;
- if (MFI->LocalMemoryObjects.count(GV) == 0) {
- uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
- Offset = MFI->LDSSize;
- MFI->LocalMemoryObjects[GV] = Offset;
- // XXX: Account for alignment?
- MFI->LDSSize += Size;
- } else {
- Offset = MFI->LocalMemoryObjects[GV];
+ return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
}
+ case AMDGPUAS::CONSTANT_ADDRESS: {
+ MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
+ Type *EltType = GV->getType()->getElementType();
+ unsigned Size = TD->getTypeAllocSize(EltType);
+ unsigned Alignment = TD->getPrefTypeAlignment(EltType);
+
+ MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
+ MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
+
+ int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
+ SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
+
+ const GlobalVariable *Var = cast<GlobalVariable>(GV);
+ if (!Var->hasInitializer()) {
+ // This has no use, but bugpoint will hit it.
+ return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
+ }
- return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
-}
+ const Constant *Init = Var->getInitializer();
+ SmallVector<SDNode*, 8> WorkList;
-void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Args,
- unsigned Start,
- unsigned Count) const {
- EVT VT = Op.getValueType();
- for (unsigned i = Start, e = Start + Count; i != e; ++i) {
- Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
- VT.getVectorElementType(),
- Op, DAG.getConstant(i, MVT::i32)));
+ for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
+ E = DAG.getEntryNode()->use_end(); I != E; ++I) {
+ if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
+ continue;
+ WorkList.push_back(*I);
+ }
+ SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
+ for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
+ E = WorkList.end(); I != E; ++I) {
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
+ Ops.push_back((*I)->getOperand(i));
+ }
+ DAG.UpdateNodeOperands(*I, Ops);
+ }
+ return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
+ }
}
}
@@ -321,26 +756,22 @@ SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
- ExtractVectorElements(A, DAG, Args, 0,
- A.getValueType().getVectorNumElements());
- ExtractVectorElements(B, DAG, Args, 0,
- B.getValueType().getVectorNumElements());
+ DAG.ExtractVectorElements(A, Args);
+ DAG.ExtractVectorElements(B, Args);
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
- &Args[0], Args.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
}
SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
SelectionDAG &DAG) const {
SmallVector<SDValue, 8> Args;
- EVT VT = Op.getValueType();
unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
- VT.getVectorNumElements());
+ EVT VT = Op.getValueType();
+ DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
+ VT.getVectorNumElements());
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
- &Args[0], Args.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
}
SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
@@ -350,8 +781,7 @@ SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
const AMDGPUFrameLowering *TFL =
static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
- FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
- assert(FIN);
+ FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
unsigned FrameIndex = FIN->getIndex();
unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
@@ -367,41 +797,140 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
switch (IntrinsicID) {
default: return Op;
- case AMDGPUIntrinsic::AMDIL_abs:
+ case AMDGPUIntrinsic::AMDGPU_abs:
+ case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
return LowerIntrinsicIABS(Op, DAG);
- case AMDGPUIntrinsic::AMDIL_exp:
- return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
case AMDGPUIntrinsic::AMDGPU_lrp:
return LowerIntrinsicLRP(Op, DAG);
- case AMDGPUIntrinsic::AMDIL_fraction:
+ case AMDGPUIntrinsic::AMDGPU_fract:
+ case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
- case AMDGPUIntrinsic::AMDIL_max:
- return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_clamp:
+ case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
+ return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case Intrinsic::AMDGPU_div_scale: {
+ // 3rd parameter required to be a constant.
+ const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
+ if (!Param)
+ return DAG.getUNDEF(VT);
+
+ // Translate to the operands expected by the machine instruction. The
+ // first parameter must be the same as the first instruction.
+ SDValue Numerator = Op.getOperand(1);
+ SDValue Denominator = Op.getOperand(2);
+ SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
+
+ return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, VT,
+ Src0, Denominator, Numerator);
+ }
+
+ case Intrinsic::AMDGPU_div_fmas:
+ return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case Intrinsic::AMDGPU_div_fixup:
+ return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case Intrinsic::AMDGPU_trig_preop:
+ return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::AMDGPU_rcp:
+ return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
+
+ case Intrinsic::AMDGPU_rsq:
+ return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
+ return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
+
+ case Intrinsic::AMDGPU_rsq_clamped:
+ return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
+
case AMDGPUIntrinsic::AMDGPU_imax:
return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_umax:
return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
- case AMDGPUIntrinsic::AMDIL_min:
- return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_imin:
return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_umin:
return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
Op.getOperand(2));
- case AMDGPUIntrinsic::AMDIL_round_nearest:
+
+ case AMDGPUIntrinsic::AMDGPU_umul24:
+ return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_imul24:
+ return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_umad24:
+ return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_imad24:
+ return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_bfe_i32:
+ return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
+ Op.getOperand(1),
+ Op.getOperand(2),
+ Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_bfe_u32:
+ return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
+ Op.getOperand(1),
+ Op.getOperand(2),
+ Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_bfi:
+ return DAG.getNode(AMDGPUISD::BFI, DL, VT,
+ Op.getOperand(1),
+ Op.getOperand(2),
+ Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_bfm:
+ return DAG.getNode(AMDGPUISD::BFM, DL, VT,
+ Op.getOperand(1),
+ Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_brev:
+ return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
+ return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
+ case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
+ return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
}
}
///IABS(a) = SMAX(sub(0, a), a)
SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
- SelectionDAG &DAG) const {
-
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
@@ -413,7 +942,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
/// Linear Interpolation
/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
@@ -427,16 +956,16 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
}
/// \brief Generate Min/Max node
-SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
+SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
+ SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue True = Op.getOperand(2);
- SDValue False = Op.getOperand(3);
- SDValue CC = Op.getOperand(4);
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ SDValue True = N->getOperand(2);
+ SDValue False = N->getOperand(3);
+ SDValue CC = N->getOperand(4);
if (VT != MVT::f32 ||
!((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
@@ -457,17 +986,15 @@ SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
case ISD::SETTRUE2:
case ISD::SETUO:
case ISD::SETO:
- assert(0 && "Operation should already be optimised !");
+ llvm_unreachable("Operation should already be optimised!");
case ISD::SETULE:
case ISD::SETULT:
case ISD::SETOLE:
case ISD::SETOLT:
case ISD::SETLE:
case ISD::SETLT: {
- if (LHS == True)
- return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
- else
- return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
+ unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
+ return DAG.getNode(Opc, DL, VT, LHS, RHS);
}
case ISD::SETGT:
case ISD::SETGE:
@@ -475,89 +1002,105 @@ SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
case ISD::SETOGE:
case ISD::SETUGT:
case ISD::SETOGT: {
- if (LHS == True)
- return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
- else
- return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
+ unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
+ return DAG.getNode(Opc, DL, VT, LHS, RHS);
}
case ISD::SETCC_INVALID:
- assert(0 && "Invalid setcc condcode !");
+ llvm_unreachable("Invalid setcc condcode!");
}
- return Op;
+ return SDValue();
}
SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
SelectionDAG &DAG) const {
LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
+ EVT LoadVT = Op.getValueType();
EVT EltVT = Op.getValueType().getVectorElementType();
EVT PtrVT = Load->getBasePtr().getValueType();
+
unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
SmallVector<SDValue, 8> Loads;
+ SmallVector<SDValue, 8> Chains;
+
SDLoc SL(Op);
for (unsigned i = 0, e = NumElts; i != e; ++i) {
SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
- Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
- Load->getChain(), Ptr,
- MachinePointerInfo(Load->getMemOperand()->getValue()),
- MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
- Load->getAlignment()));
+
+ SDValue NewLoad
+ = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
+ Load->getChain(), Ptr,
+ MachinePointerInfo(Load->getMemOperand()->getValue()),
+ MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
+ Load->getAlignment());
+ Loads.push_back(NewLoad.getValue(0));
+ Chains.push_back(NewLoad.getValue(1));
}
- return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), &Loads[0],
- Loads.size());
+
+ SDValue Ops[] = {
+ DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
+ DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
+ };
+
+ return DAG.getMergeValues(Ops, SL);
}
SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
SelectionDAG &DAG) const {
- StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
EVT MemVT = Store->getMemoryVT();
unsigned MemBits = MemVT.getSizeInBits();
- // Byte stores are really expensive, so if possible, try to pack
- // 32-bit vector truncatating store into an i32 store.
- // XXX: We could also handle optimize other vector bitwidths
+ // Byte stores are really expensive, so if possible, try to pack 32-bit vector
+ // truncating store into an i32 store.
+ // XXX: We could also handle optimize other vector bitwidths.
if (!MemVT.isVector() || MemBits > 32) {
return SDValue();
}
SDLoc DL(Op);
- const SDValue &Value = Store->getValue();
+ SDValue Value = Store->getValue();
EVT VT = Value.getValueType();
- const SDValue &Ptr = Store->getBasePtr();
+ EVT ElemVT = VT.getVectorElementType();
+ SDValue Ptr = Store->getBasePtr();
EVT MemEltVT = MemVT.getVectorElementType();
unsigned MemEltBits = MemEltVT.getSizeInBits();
unsigned MemNumElements = MemVT.getVectorNumElements();
- EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
- SDValue Mask;
- switch(MemEltBits) {
- case 8:
- Mask = DAG.getConstant(0xFF, PackedVT);
- break;
- case 16:
- Mask = DAG.getConstant(0xFFFF, PackedVT);
- break;
- default:
- llvm_unreachable("Cannot lower this vector store");
- }
+ unsigned PackedSize = MemVT.getStoreSizeInBits();
+ SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
+
+ assert(Value.getValueType().getScalarSizeInBits() >= 32);
+
SDValue PackedValue;
for (unsigned i = 0; i < MemNumElements; ++i) {
- EVT ElemVT = VT.getVectorElementType();
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
DAG.getConstant(i, MVT::i32));
- Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
- Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
- SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
- Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
+ Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
+ Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
+
+ SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
+ Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
+
if (i == 0) {
PackedValue = Elt;
} else {
- PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
+ PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
}
}
+
+ if (PackedSize < 32) {
+ EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
+ return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
+ Store->getMemOperand()->getPointerInfo(),
+ PackedVT,
+ Store->isNonTemporal(), Store->isVolatile(),
+ Store->getAlignment());
+ }
+
return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
- MachinePointerInfo(Store->getMemOperand()->getValue()),
+ Store->getMemOperand()->getPointerInfo(),
Store->isVolatile(), Store->isNonTemporal(),
Store->getAlignment());
}
@@ -585,34 +1128,404 @@ SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Store->getAlignment()));
}
- return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
+ return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
+}
+
+SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
+ ISD::LoadExtType ExtType = Load->getExtensionType();
+ EVT VT = Op.getValueType();
+ EVT MemVT = Load->getMemoryVT();
+
+ if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
+ // We can do the extload to 32-bits, and then need to separately extend to
+ // 64-bits.
+
+ SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
+ Load->getChain(),
+ Load->getBasePtr(),
+ MemVT,
+ Load->getMemOperand());
+
+ SDValue Ops[] = {
+ DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32),
+ ExtLoad32.getValue(1)
+ };
+
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
+ assert(VT == MVT::i1 && "Only i1 non-extloads expected");
+ // FIXME: Copied from PPC
+ // First, load into 32 bits, then truncate to 1 bit.
+
+ SDValue Chain = Load->getChain();
+ SDValue BasePtr = Load->getBasePtr();
+ MachineMemOperand *MMO = Load->getMemOperand();
+
+ SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
+ BasePtr, MVT::i8, MMO);
+
+ SDValue Ops[] = {
+ DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
+ NewLD.getValue(1)
+ };
+
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
+ Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
+ ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
+ return SDValue();
+
+
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
+ DAG.getConstant(2, MVT::i32));
+ SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
+ Load->getChain(), Ptr,
+ DAG.getTargetConstant(0, MVT::i32),
+ Op.getOperand(2));
+ SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
+ Load->getBasePtr(),
+ DAG.getConstant(0x3, MVT::i32));
+ SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
+ DAG.getConstant(3, MVT::i32));
+
+ Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
+
+ EVT MemEltVT = MemVT.getScalarType();
+ if (ExtType == ISD::SEXTLOAD) {
+ SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
+
+ SDValue Ops[] = {
+ DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
+ Load->getChain()
+ };
+
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ SDValue Ops[] = {
+ DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
+ Load->getChain()
+ };
+
+ return DAG.getMergeValues(Ops, DL);
}
SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
if (Result.getNode()) {
return Result;
}
StoreSDNode *Store = cast<StoreSDNode>(Op);
+ SDValue Chain = Store->getChain();
if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
Store->getValue().getValueType().isVector()) {
return SplitVectorStore(Op, DAG);
}
+
+ EVT MemVT = Store->getMemoryVT();
+ if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
+ MemVT.bitsLT(MVT::i32)) {
+ unsigned Mask = 0;
+ if (Store->getMemoryVT() == MVT::i8) {
+ Mask = 0xff;
+ } else if (Store->getMemoryVT() == MVT::i16) {
+ Mask = 0xffff;
+ }
+ SDValue BasePtr = Store->getBasePtr();
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
+ DAG.getConstant(2, MVT::i32));
+ SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
+ Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
+
+ SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
+ DAG.getConstant(0x3, MVT::i32));
+
+ SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
+ DAG.getConstant(3, MVT::i32));
+
+ SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
+ Store->getValue());
+
+ SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
+
+ SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
+ MaskedValue, ShiftAmt);
+
+ SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
+ ShiftAmt);
+ DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
+ DAG.getConstant(0xffffffff, MVT::i32));
+ Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
+
+ SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
+ return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
+ Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
+ }
return SDValue();
}
+SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT OVT = Op.getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ MVT INTTY;
+ MVT FLTTY;
+ if (!OVT.isVector()) {
+ INTTY = MVT::i32;
+ FLTTY = MVT::f32;
+ } else if (OVT.getVectorNumElements() == 2) {
+ INTTY = MVT::v2i32;
+ FLTTY = MVT::v2f32;
+ } else if (OVT.getVectorNumElements() == 4) {
+ INTTY = MVT::v4i32;
+ FLTTY = MVT::v4f32;
+ }
+ unsigned bitsize = OVT.getScalarType().getSizeInBits();
+ // char|short jq = ia ^ ib;
+ SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
+
+ // jq = jq >> (bitsize - 2)
+ jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT));
+
+ // jq = jq | 0x1
+ jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
+
+ // jq = (int)jq
+ jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
+
+ // int ia = (int)LHS;
+ SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
+
+ // int ib, (int)RHS;
+ SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
+
+ // float fa = (float)ia;
+ SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
+
+ // float fb = (float)ib;
+ SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
+
+ // float fq = native_divide(fa, fb);
+ SDValue fq = DAG.getNode(ISD::FMUL, DL, FLTTY,
+ fa, DAG.getNode(AMDGPUISD::RCP, DL, FLTTY, fb));
+
+ // fq = trunc(fq);
+ fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
+
+ // float fqneg = -fq;
+ SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
+
+ // float fr = mad(fqneg, fb, fa);
+ SDValue fr = DAG.getNode(ISD::FADD, DL, FLTTY,
+ DAG.getNode(ISD::MUL, DL, FLTTY, fqneg, fb), fa);
+
+ // int iq = (int)fq;
+ SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
+
+ // fr = fabs(fr);
+ fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
+
+ // fb = fabs(fb);
+ fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
+
+ // int cv = fr >= fb;
+ SDValue cv;
+ if (INTTY == MVT::i32) {
+ cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
+ } else {
+ cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
+ }
+ // jq = (cv ? jq : 0);
+ jq = DAG.getNode(ISD::SELECT, DL, OVT, cv, jq,
+ DAG.getConstant(0, OVT));
+ // dst = iq + jq;
+ iq = DAG.getSExtOrTrunc(iq, DL, OVT);
+ iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
+ return iq;
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT OVT = Op.getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ // The LowerSDIV32 function generates equivalent to the following IL.
+ // mov r0, LHS
+ // mov r1, RHS
+ // ilt r10, r0, 0
+ // ilt r11, r1, 0
+ // iadd r0, r0, r10
+ // iadd r1, r1, r11
+ // ixor r0, r0, r10
+ // ixor r1, r1, r11
+ // udiv r0, r0, r1
+ // ixor r10, r10, r11
+ // iadd r0, r0, r10
+ // ixor DST, r0, r10
+
+ // mov r0, LHS
+ SDValue r0 = LHS;
+
+ // mov r1, RHS
+ SDValue r1 = RHS;
+
+ // ilt r10, r0, 0
+ SDValue r10 = DAG.getSelectCC(DL,
+ r0, DAG.getConstant(0, OVT),
+ DAG.getConstant(-1, OVT),
+ DAG.getConstant(0, OVT),
+ ISD::SETLT);
+
+ // ilt r11, r1, 0
+ SDValue r11 = DAG.getSelectCC(DL,
+ r1, DAG.getConstant(0, OVT),
+ DAG.getConstant(-1, OVT),
+ DAG.getConstant(0, OVT),
+ ISD::SETLT);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // iadd r1, r1, r11
+ r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
+
+ // ixor r0, r0, r10
+ r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+
+ // ixor r1, r1, r11
+ r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
+
+ // udiv r0, r0, r1
+ r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
+
+ // ixor r10, r10, r11
+ r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // ixor DST, r0, r10
+ SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+ return DST;
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
+ return SDValue(Op.getNode(), 0);
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
+ EVT OVT = Op.getValueType().getScalarType();
+
+ if (OVT == MVT::i64)
+ return LowerSDIV64(Op, DAG);
+
+ if (OVT.getScalarType() == MVT::i32)
+ return LowerSDIV32(Op, DAG);
+
+ if (OVT == MVT::i16 || OVT == MVT::i8) {
+ // FIXME: We should be checking for the masked bits. This isn't reached
+ // because i8 and i16 are not legal types.
+ return LowerSDIV24(Op, DAG);
+ }
+
+ return SDValue(Op.getNode(), 0);
+}
+
+SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT OVT = Op.getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ // The LowerSREM32 function generates equivalent to the following IL.
+ // mov r0, LHS
+ // mov r1, RHS
+ // ilt r10, r0, 0
+ // ilt r11, r1, 0
+ // iadd r0, r0, r10
+ // iadd r1, r1, r11
+ // ixor r0, r0, r10
+ // ixor r1, r1, r11
+ // udiv r20, r0, r1
+ // umul r20, r20, r1
+ // sub r0, r0, r20
+ // iadd r0, r0, r10
+ // ixor DST, r0, r10
+
+ // mov r0, LHS
+ SDValue r0 = LHS;
+
+ // mov r1, RHS
+ SDValue r1 = RHS;
+
+ // ilt r10, r0, 0
+ SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
+
+ // ilt r11, r1, 0
+ SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // iadd r1, r1, r11
+ r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
+
+ // ixor r0, r0, r10
+ r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+
+ // ixor r1, r1, r11
+ r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
+
+ // udiv r20, r0, r1
+ SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
+
+ // umul r20, r20, r1
+ r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
+
+ // sub r0, r0, r20
+ r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // ixor DST, r0, r10
+ SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+ return DST;
+}
+
+SDValue AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
+ return SDValue(Op.getNode(), 0);
+}
+
+SDValue AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
+ EVT OVT = Op.getValueType();
+
+ if (OVT.getScalarType() == MVT::i64)
+ return LowerSREM64(Op, DAG);
+
+ if (OVT.getScalarType() == MVT::i32)
+ return LowerSREM32(Op, DAG);
+
+ return SDValue(Op.getNode(), 0);
+}
+
SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Num = Op.getOperand(0);
SDValue Den = Op.getOperand(1);
- SmallVector<SDValue, 8> Results;
-
// RCP = URECIP(Den) = 2^32 / Den + e
// e is rounding error.
SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
@@ -702,10 +1615,182 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
// Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
Remainder_A_Den, Rem, ISD::SETEQ);
- SDValue Ops[2];
- Ops[0] = Div;
- Ops[1] = Rem;
- return DAG.getMergeValues(Ops, 2, DL);
+ SDValue Ops[2] = {
+ Div,
+ Rem
+ };
+ return DAG.getMergeValues(Ops, DL);
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+
+ SDValue Zero = DAG.getConstant(0, VT);
+ SDValue NegOne = DAG.getConstant(-1, VT);
+
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+
+ SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
+ SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
+ SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
+ SDValue RSign = LHSign; // Remainder sign is the same as LHS
+
+ LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
+ RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
+
+ LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
+ RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
+
+ SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
+ SDValue Rem = Div.getValue(1);
+
+ Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
+ Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
+
+ Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
+ Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
+
+ SDValue Res[2] = {
+ Div,
+ Rem
+ };
+ return DAG.getMergeValues(Res, DL);
+}
+
+SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ // result = trunc(src)
+ // if (src > 0.0 && src != result)
+ // result += 1.0
+
+ SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
+
+ const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
+ const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
+
+ SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
+ SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
+ SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
+
+ SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
+ return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
+}
+
+SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ assert(Op.getValueType() == MVT::f64);
+
+ const SDValue Zero = DAG.getConstant(0, MVT::i32);
+ const SDValue One = DAG.getConstant(1, MVT::i32);
+
+ SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
+
+ // Extract the upper half, since this is where we will find the sign and
+ // exponent.
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
+
+ const unsigned FractBits = 52;
+ const unsigned ExpBits = 11;
+
+ // Extract the exponent.
+ SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_I32, SL, MVT::i32,
+ Hi,
+ DAG.getConstant(FractBits - 32, MVT::i32),
+ DAG.getConstant(ExpBits, MVT::i32));
+ SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
+ DAG.getConstant(1023, MVT::i32));
+
+ // Extract the sign bit.
+ const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
+ SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
+
+ // Extend back to to 64-bits.
+ SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
+ Zero, SignBit);
+ SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
+
+ SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
+ const SDValue FractMask
+ = DAG.getConstant((UINT64_C(1) << FractBits) - 1, MVT::i64);
+
+ SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
+ SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
+ SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
+
+ const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
+
+ SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
+ SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
+
+ SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
+ SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
+
+ return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
+}
+
+SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ assert(Op.getValueType() == MVT::f64);
+
+ APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
+ SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
+ SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
+
+ SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
+ SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
+
+ SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
+
+ APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
+ SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
+ SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
+
+ return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
+}
+
+SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
+ // FNEARBYINT and FRINT are the same, except in their handling of FP
+ // exceptions. Those aren't really meaningful for us, and OpenCL only has
+ // rint, so just treat them as equivalent.
+ return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
+}
+
+SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ // result = trunc(src);
+ // if (src < 0.0 && src != result)
+ // result += -1.0.
+
+ SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
+
+ const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
+ const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
+
+ SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
+ SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
+ SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
+
+ SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
+ return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
}
SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
@@ -725,7 +1810,275 @@ SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
+}
+SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
+ unsigned BitsDiff,
+ SelectionDAG &DAG) const {
+ MVT VT = Op.getSimpleValueType();
+ SDLoc DL(Op);
+ SDValue Shift = DAG.getConstant(BitsDiff, VT);
+ // Shift left by 'Shift' bits.
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
+ // Signed shift Right by 'Shift' bits.
+ return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
+}
+
+SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ MVT VT = Op.getSimpleValueType();
+ MVT ScalarVT = VT.getScalarType();
+
+ if (!VT.isVector())
+ return SDValue();
+
+ SDValue Src = Op.getOperand(0);
+ SDLoc DL(Op);
+
+ // TODO: Don't scalarize on Evergreen?
+ unsigned NElts = VT.getVectorNumElements();
+ SmallVector<SDValue, 8> Args;
+ DAG.ExtractVectorElements(Src, Args, 0, NElts);
+
+ SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
+ for (unsigned I = 0; I < NElts; ++I)
+ Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
+
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
+}
+
+//===----------------------------------------------------------------------===//
+// Custom DAG optimizations
+//===----------------------------------------------------------------------===//
+
+static bool isU24(SDValue Op, SelectionDAG &DAG) {
+ APInt KnownZero, KnownOne;
+ EVT VT = Op.getValueType();
+ DAG.computeKnownBits(Op, KnownZero, KnownOne);
+
+ return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
+}
+
+static bool isI24(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
+
+ // In order for this to be a signed 24-bit value, bit 23, must
+ // be a sign bit.
+ return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
+ // as unsigned 24-bit values.
+ (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
+}
+
+static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
+
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT VT = Op.getValueType();
+
+ APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
+ if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
+ DCI.CommitTargetLoweringOpt(TLO);
+}
+
+template <typename IntTy>
+static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
+ uint32_t Offset, uint32_t Width) {
+ if (Width + Offset < 32) {
+ IntTy Result = (Src0 << (32 - Offset - Width)) >> (32 - Width);
+ return DAG.getConstant(Result, MVT::i32);
+ }
+
+ return DAG.getConstant(Src0 >> Offset, MVT::i32);
+}
+
+static bool usesAllNormalStores(SDNode *LoadVal) {
+ for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
+ if (!ISD::isNormalStore(*I))
+ return false;
+ }
+
+ return true;
+}
+
+// If we have a copy of an illegal type, replace it with a load / store of an
+// equivalently sized legal type. This avoids intermediate bit pack / unpack
+// instructions emitted when handling extloads and truncstores. Ideally we could
+// recognize the pack / unpack pattern to eliminate it.
+SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (!DCI.isBeforeLegalize())
+ return SDValue();
+
+ StoreSDNode *SN = cast<StoreSDNode>(N);
+ SDValue Value = SN->getValue();
+ EVT VT = Value.getValueType();
+
+ if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
+ return SDValue();
+
+ LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
+ if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
+ return SDValue();
+
+ EVT MemVT = LoadVal->getMemoryVT();
+
+ SDLoc SL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
+
+ SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
+ LoadVT, SL,
+ LoadVal->getChain(),
+ LoadVal->getBasePtr(),
+ LoadVal->getOffset(),
+ LoadVT,
+ LoadVal->getMemOperand());
+
+ SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
+ DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
+
+ return DAG.getStore(SN->getChain(), SL, NewLoad,
+ SN->getBasePtr(), SN->getMemOperand());
+}
+
+SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ EVT VT = N->getValueType(0);
+
+ if (VT.isVector() || VT.getSizeInBits() > 32)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue Mul;
+
+ if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
+ N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
+ N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
+ Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
+ } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
+ N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
+ N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
+ Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
+ } else {
+ return SDValue();
+ }
+
+ // We need to use sext even for MUL_U24, because MUL_U24 is used
+ // for signed multiply of 8 and 16-bit types.
+ return DAG.getSExtOrTrunc(Mul, DL, VT);
+}
+
+SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+
+ switch(N->getOpcode()) {
+ default: break;
+ case ISD::MUL:
+ return performMulCombine(N, DCI);
+ case AMDGPUISD::MUL_I24:
+ case AMDGPUISD::MUL_U24: {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ simplifyI24(N0, DCI);
+ simplifyI24(N1, DCI);
+ return SDValue();
+ }
+ case ISD::SELECT_CC: {
+ return CombineMinMax(N, DAG);
+ }
+ case AMDGPUISD::BFE_I32:
+ case AMDGPUISD::BFE_U32: {
+ assert(!N->getValueType(0).isVector() &&
+ "Vector handling of BFE not implemented");
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
+ if (!Width)
+ break;
+
+ uint32_t WidthVal = Width->getZExtValue() & 0x1f;
+ if (WidthVal == 0)
+ return DAG.getConstant(0, MVT::i32);
+
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!Offset)
+ break;
+
+ SDValue BitsFrom = N->getOperand(0);
+ uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
+
+ bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
+
+ if (OffsetVal == 0) {
+ // This is already sign / zero extended, so try to fold away extra BFEs.
+ unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
+
+ unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
+ if (OpSignBits >= SignBits)
+ return BitsFrom;
+
+ EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
+ if (Signed) {
+ // This is a sign_extend_inreg. Replace it to take advantage of existing
+ // DAG Combines. If not eliminated, we will match back to BFE during
+ // selection.
+
+ // TODO: The sext_inreg of extended types ends, although we can could
+ // handle them in a single BFE.
+ return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
+ DAG.getValueType(SmallVT));
+ }
+
+ return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
+ }
+
+ if (ConstantSDNode *Val = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ if (Signed) {
+ return constantFoldBFE<int32_t>(DAG,
+ Val->getSExtValue(),
+ OffsetVal,
+ WidthVal);
+ }
+
+ return constantFoldBFE<uint32_t>(DAG,
+ Val->getZExtValue(),
+ OffsetVal,
+ WidthVal);
+ }
+
+ APInt Demanded = APInt::getBitsSet(32,
+ OffsetVal,
+ OffsetVal + WidthVal);
+
+ if ((OffsetVal + WidthVal) >= 32) {
+ SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
+ return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
+ BitsFrom, ShiftVal);
+ }
+
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
+ TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
+ DCI.CommitTargetLoweringOpt(TLO);
+ }
+
+ break;
+ }
+
+ case ISD::STORE:
+ return performStoreCombine(N, DCI);
+ }
+ return SDValue();
}
//===----------------------------------------------------------------------===//
@@ -803,17 +2156,17 @@ SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
// AMDIL DAG nodes
NODE_NAME_CASE(CALL);
NODE_NAME_CASE(UMUL);
- NODE_NAME_CASE(DIV_INF);
NODE_NAME_CASE(RET_FLAG);
NODE_NAME_CASE(BRANCH_COND);
// AMDGPU DAG nodes
NODE_NAME_CASE(DWORDADDR)
NODE_NAME_CASE(FRACT)
+ NODE_NAME_CASE(CLAMP)
NODE_NAME_CASE(FMAX)
NODE_NAME_CASE(SMAX)
NODE_NAME_CASE(UMAX)
@@ -821,6 +2174,24 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(SMIN)
NODE_NAME_CASE(UMIN)
NODE_NAME_CASE(URECIP)
+ NODE_NAME_CASE(DIV_SCALE)
+ NODE_NAME_CASE(DIV_FMAS)
+ NODE_NAME_CASE(DIV_FIXUP)
+ NODE_NAME_CASE(TRIG_PREOP)
+ NODE_NAME_CASE(RCP)
+ NODE_NAME_CASE(RSQ)
+ NODE_NAME_CASE(RSQ_LEGACY)
+ NODE_NAME_CASE(RSQ_CLAMPED)
+ NODE_NAME_CASE(DOT4)
+ NODE_NAME_CASE(BFE_U32)
+ NODE_NAME_CASE(BFE_I32)
+ NODE_NAME_CASE(BFI)
+ NODE_NAME_CASE(BFM)
+ NODE_NAME_CASE(BREV)
+ NODE_NAME_CASE(MUL_U24)
+ NODE_NAME_CASE(MUL_I24)
+ NODE_NAME_CASE(MAD_U24)
+ NODE_NAME_CASE(MAD_I24)
NODE_NAME_CASE(EXPORT)
NODE_NAME_CASE(CONST_ADDRESS)
NODE_NAME_CASE(REGISTER_LOAD)
@@ -831,7 +2202,124 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(SAMPLEB)
NODE_NAME_CASE(SAMPLED)
NODE_NAME_CASE(SAMPLEL)
+ NODE_NAME_CASE(CVT_F32_UBYTE0)
+ NODE_NAME_CASE(CVT_F32_UBYTE1)
+ NODE_NAME_CASE(CVT_F32_UBYTE2)
+ NODE_NAME_CASE(CVT_F32_UBYTE3)
+ NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
+ NODE_NAME_CASE(CONST_DATA_PTR)
NODE_NAME_CASE(STORE_MSKOR)
NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
}
}
+
+static void computeKnownBitsForMinMax(const SDValue Op0,
+ const SDValue Op1,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) {
+ APInt Op0Zero, Op0One;
+ APInt Op1Zero, Op1One;
+ DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
+ DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
+
+ KnownZero = Op0Zero & Op1Zero;
+ KnownOne = Op0One & Op1One;
+}
+
+void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
+ const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
+
+ KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
+
+ APInt KnownZero2;
+ APInt KnownOne2;
+ unsigned Opc = Op.getOpcode();
+
+ switch (Opc) {
+ default:
+ break;
+ case ISD::INTRINSIC_WO_CHAIN: {
+ // FIXME: The intrinsic should just use the node.
+ switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
+ case AMDGPUIntrinsic::AMDGPU_imax:
+ case AMDGPUIntrinsic::AMDGPU_umax:
+ case AMDGPUIntrinsic::AMDGPU_imin:
+ case AMDGPUIntrinsic::AMDGPU_umin:
+ computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
+ KnownZero, KnownOne, DAG, Depth);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ }
+ case AMDGPUISD::SMAX:
+ case AMDGPUISD::UMAX:
+ case AMDGPUISD::SMIN:
+ case AMDGPUISD::UMIN:
+ computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
+ KnownZero, KnownOne, DAG, Depth);
+ break;
+
+ case AMDGPUISD::BFE_I32:
+ case AMDGPUISD::BFE_U32: {
+ ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ if (!CWidth)
+ return;
+
+ unsigned BitWidth = 32;
+ uint32_t Width = CWidth->getZExtValue() & 0x1f;
+ if (Width == 0) {
+ KnownZero = APInt::getAllOnesValue(BitWidth);
+ KnownOne = APInt::getNullValue(BitWidth);
+ return;
+ }
+
+ // FIXME: This could do a lot more. If offset is 0, should be the same as
+ // sign_extend_inreg implementation, but that involves duplicating it.
+ if (Opc == AMDGPUISD::BFE_I32)
+ KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
+ else
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
+
+ break;
+ }
+ }
+}
+
+unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
+ SDValue Op,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ case AMDGPUISD::BFE_I32: {
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ if (!Width)
+ return 1;
+
+ unsigned SignBits = 32 - Width->getZExtValue() + 1;
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ if (!Offset || !Offset->isNullValue())
+ return SignBits;
+
+ // TODO: Could probably figure something out with non-0 offsets.
+ unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
+ return std::max(SignBits, Op0SignBits);
+ }
+
+ case AMDGPUISD::BFE_U32: {
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
+ }
+
+ default:
+ return 1;
+ }
+}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.h b/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.h
index 2dfd3cf..624d4e0 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUISelLowering.h
@@ -21,13 +21,18 @@
namespace llvm {
class AMDGPUMachineFunction;
+class AMDGPUSubtarget;
class MachineRegisterInfo;
class AMDGPUTargetLowering : public TargetLowering {
+protected:
+ const AMDGPUSubtarget *Subtarget;
+
private:
- void ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Args,
- unsigned Start, unsigned Count) const;
+ SDValue LowerConstantInitializer(const Constant* Init, const GlobalValue *GV,
+ const SDValue &InitPtr,
+ SDValue Chain,
+ SelectionDAG &DAG) const;
SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
@@ -36,25 +41,44 @@ private:
/// of the same bitwidth.
SDValue MergeVectorStore(const SDValue &Op, SelectionDAG &DAG) const;
/// \brief Split a vector store into multiple scalar stores.
- /// \returns The resulting chain.
+ /// \returns The resulting chain.
+
+ SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSDIV24(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSDIV32(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSDIV64(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSREM32(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSREM64(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
+
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue ExpandSIGN_EXTEND_INREG(SDValue Op,
+ unsigned BitsDiff,
+ SelectionDAG &DAG) const;
+ SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
protected:
+ static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
+ static EVT getEquivalentLoadRegType(LLVMContext &Context, EVT VT);
- /// \brief Helper function that adds Reg to the LiveIn list of the DAG's
- /// MachineFunction.
- ///
- /// \returns a RegisterSDNode representing Reg.
- virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
- const TargetRegisterClass *RC,
- unsigned Reg, EVT VT) const;
- SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
- SelectionDAG &DAG) const;
+ virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
+ SelectionDAG &DAG) const;
/// \brief Split a vector load into multiple scalar loads.
SDValue SplitVectorLoad(const SDValue &Op, SelectionDAG &DAG) const;
SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
bool isHWTrueValue(SDValue Op) const;
bool isHWFalseValue(SDValue Op) const;
@@ -74,67 +98,69 @@ protected:
public:
AMDGPUTargetLowering(TargetMachine &TM);
- virtual bool isFAbsFree(EVT VT) const;
- virtual bool isFNegFree(EVT VT) const;
- virtual MVT getVectorIdxTy() const;
- virtual bool isLoadBitCastBeneficial(EVT, EVT) const LLVM_OVERRIDE;
- virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const;
- virtual SDValue LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- CLI.Callee.dump();
- llvm_unreachable("Undefined function");
- }
+ bool isFAbsFree(EVT VT) const override;
+ bool isFNegFree(EVT VT) const override;
+ bool isTruncateFree(EVT Src, EVT Dest) const override;
+ bool isTruncateFree(Type *Src, Type *Dest) const override;
+
+ bool isZExtFree(Type *Src, Type *Dest) const override;
+ bool isZExtFree(EVT Src, EVT Dest) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
+
+ bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
+
+ MVT getVectorIdxTy() const override;
+ bool isSelectSupported(SelectSupportKind) const override;
+
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
+ bool ShouldShrinkFPConstant(EVT VT) const override;
+
+ bool isLoadBitCastBeneficial(EVT, EVT) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const override;
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+ void ReplaceNodeResults(SDNode * N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
- virtual const char* getTargetNodeName(unsigned Opcode) const;
+ SDValue CombineMinMax(SDNode *N, SelectionDAG &DAG) const;
+ const char* getTargetNodeName(unsigned Opcode) const override;
- virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const {
+ virtual SDNode *PostISelFolding(MachineSDNode *N,
+ SelectionDAG &DAG) const {
return N;
}
-// Functions defined in AMDILISelLowering.cpp
-public:
-
/// \brief Determine which of the bits specified in \p Mask are known to be
/// either zero or one and return them in the \p KnownZero and \p KnownOne
/// bitsets.
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
-
- virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallInst &I, unsigned Intrinsic) const;
-
- /// We want to mark f32/f64 floating point values as legal.
- bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
- /// We don't want to shrink f64/f32 constants.
- bool ShouldShrinkFPConstant(EVT VT) const;
+ virtual unsigned ComputeNumSignBitsForTargetNode(
+ SDValue Op,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
-private:
- void InitAMDILLowering();
- SDValue LowerSREM(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSREM8(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSREM16(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSREM32(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSREM64(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSDIV24(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSDIV32(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSDIV64(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
- EVT genIntType(uint32_t size = 32, uint32_t numEle = 1) const;
- SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
+ /// \brief Helper function that adds Reg to the LiveIn list of the DAG's
+ /// MachineFunction.
+ ///
+ /// \returns a RegisterSDNode representing Reg.
+ virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
+ const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const;
};
namespace AMDGPUISD {
@@ -144,12 +170,15 @@ enum {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
CALL, // Function call based on a single integer
UMUL, // 32bit unsigned multiplication
- DIV_INF, // Divide with infinity returned on zero divisor
RET_FLAG,
BRANCH_COND,
// End AMDIL ISD Opcodes
DWORDADDR,
FRACT,
+ CLAMP,
+
+ // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
+ // Denormals handled on some parts.
COS_HW,
SIN_HW,
FMAX,
@@ -159,7 +188,27 @@ enum {
SMIN,
UMIN,
URECIP,
+ DIV_SCALE,
+ DIV_FMAS,
+ DIV_FIXUP,
+ TRIG_PREOP, // 1 ULP max error for f64
+
+ // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
+ // For f64, max error 2^29 ULP, handles denormals.
+ RCP,
+ RSQ,
+ RSQ_LEGACY,
+ RSQ_CLAMPED,
DOT4,
+ BFE_U32, // Extract range of bits with zero extension to 32-bits.
+ BFE_I32, // Extract range of bits with sign extension to 32-bits.
+ BFI, // (src0 & src1) | (~src0 & src2)
+ BFM, // Insert a range of bits into a 32-bit word.
+ BREV, // Reverse bits.
+ MUL_U24,
+ MUL_I24,
+ MAD_U24,
+ MAD_I24,
TEXTURE_FETCH,
EXPORT,
CONST_ADDRESS,
@@ -170,6 +219,23 @@ enum {
SAMPLEB,
SAMPLED,
SAMPLEL,
+
+ // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
+ CVT_F32_UBYTE0,
+ CVT_F32_UBYTE1,
+ CVT_F32_UBYTE2,
+ CVT_F32_UBYTE3,
+ /// This node is for VLIW targets and it is used to represent a vector
+ /// that is stored in consecutive registers with the same channel.
+ /// For example:
+ /// |X |Y|Z|W|
+ /// T0|v.x| | | |
+ /// T1|v.y| | | |
+ /// T2|v.z| | | |
+ /// T3|v.w| | | |
+ BUILD_VERTICAL_VECTOR,
+ /// Pointer to the start of the shader's constant data.
+ CONST_DATA_PTR,
FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
STORE_MSKOR,
LOAD_CONSTANT,
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp b/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp
index 4f7084b..fef5b8c 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.cpp
@@ -20,19 +20,18 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRINFO_NAMED_OPS
#define GET_INSTRMAP_INFO
#include "AMDGPUGenInstrInfo.inc"
-using namespace llvm;
-
-
// Pin the vtable to this file.
void AMDGPUInstrInfo::anchor() {}
-AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
- : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
+AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
+ : AMDGPUGenInstrInfo(-1,-1), RI(st), ST(st) { }
const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
return RI;
@@ -85,7 +84,7 @@ AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const {
// TODO: Implement this function
- return NULL;
+ return nullptr;
}
bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
MachineBasicBlock &MBB) const {
@@ -110,7 +109,7 @@ AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(!"Not Implemented");
+ llvm_unreachable("Not Implemented");
}
void
@@ -119,22 +118,21 @@ AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(!"Not Implemented");
+ llvm_unreachable("Not Implemented");
}
bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
MachineBasicBlock *MBB = MI->getParent();
- int OffsetOpIdx =
- AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
+ int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::addr);
// addr is a custom operand with multiple MI operands, and only the
// first MI operand is given a name.
int RegOpIdx = OffsetOpIdx + 1;
- int ChanOpIdx =
- AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);
-
+ int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::chan);
if (isRegisterLoad(*MI)) {
- int DstOpIdx =
- AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::dst);
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
@@ -147,8 +145,8 @@ bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const
Address, OffsetReg);
}
} else if (isRegisterStore(*MI)) {
- int ValOpIdx =
- AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
+ int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::val);
AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
@@ -177,7 +175,7 @@ AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
// TODO: Implement this function
- return 0;
+ return nullptr;
}
MachineInstr*
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
@@ -185,7 +183,7 @@ AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
// TODO: Implement this function
- return 0;
+ return nullptr;
}
bool
AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
@@ -322,33 +320,11 @@ int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
return -1;
}
- Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
+ Offset = MF.getTarget().getFrameLowering()->getFrameIndexOffset(MF, -1);
return getIndirectIndexBegin(MF) + Offset;
}
-
-void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
- DebugLoc DL) const {
- MachineRegisterInfo &MRI = MF.getRegInfo();
- const AMDGPURegisterInfo & RI = getRegisterInfo();
-
- for (unsigned i = 0; i < MI.getNumOperands(); i++) {
- MachineOperand &MO = MI.getOperand(i);
- // Convert dst regclass to one that is supported by the ISA
- if (MO.isReg() && MO.isDef()) {
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
- const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
-
- assert(newRegClass);
-
- MRI.setRegClass(MO.getReg(), newRegClass);
- }
- }
- }
-}
-
int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
switch (Channels) {
default: return Opcode;
@@ -357,3 +333,14 @@ int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
}
}
+
+// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
+// header files, so we need to wrap it in a function that takes unsigned
+// instead.
+namespace llvm {
+namespace AMDGPU {
+int getMCOpcode(uint16_t Opcode, unsigned Gen) {
+ return getMCOpcode(Opcode);
+}
+}
+}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.h b/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.h
index ce5b58c..d5041f5 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.h
@@ -33,7 +33,7 @@
namespace llvm {
-class AMDGPUTargetMachine;
+class AMDGPUSubtarget;
class MachineFunction;
class MachineInstr;
class MachineInstrBuilder;
@@ -45,21 +45,22 @@ private:
MachineBasicBlock &MBB) const;
virtual void anchor();
protected:
- TargetMachine &TM;
+ const AMDGPUSubtarget &ST;
public:
- explicit AMDGPUInstrInfo(TargetMachine &tm);
+ explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st);
virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
- unsigned &DstReg, unsigned &SubIdx) const;
+ unsigned &DstReg, unsigned &SubIdx) const override;
- unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
+ int &FrameIndex) const override;
bool hasLoadFromStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
- int &FrameIndex) const;
+ int &FrameIndex) const override;
unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const;
@@ -70,7 +71,7 @@ public:
MachineInstr *
convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
+ LiveVariables *LV) const override;
virtual void copyPhysReg(MachineBasicBlock &MBB,
@@ -78,71 +79,64 @@ public:
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const = 0;
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
- virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
-
+ const TargetRegisterInfo *TRI) const override;
protected:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
+ int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
- MachineInstr *LoadMI) const;
+ MachineInstr *LoadMI) const override;
/// \returns the smallest register index that will be accessed by an indirect
/// read or write or -1 if indirect addressing is not used by this program.
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
+ int getIndirectIndexBegin(const MachineFunction &MF) const;
/// \returns the largest register index that will be accessed by an indirect
/// read or write or -1 if indirect addressing is not used by this program.
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
+ int getIndirectIndexEnd(const MachineFunction &MF) const;
public:
bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
+ const SmallVectorImpl<unsigned> &Ops) const override;
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr *> &NewMIs) const;
+ unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const override;
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode *> &NewNodes) const;
+ SmallVectorImpl<SDNode *> &NewNodes) const override;
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex = 0) const;
+ bool UnfoldLoad, bool UnfoldStore,
+ unsigned *LoadRegIndex = nullptr) const override;
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
- unsigned NumLoads) const;
+ unsigned NumLoads) const override;
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
- bool isPredicated(const MachineInstr *MI) const;
+ MachineBasicBlock::iterator MI) const override;
+ bool isPredicated(const MachineInstr *MI) const override;
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
+ const SmallVectorImpl<MachineOperand> &Pred2) const override;
bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
- bool isPredicable(MachineInstr *MI) const;
- bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
+ std::vector<MachineOperand> &Pred) const override;
+ bool isPredicable(MachineInstr *MI) const override;
+ bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
// Helper functions that check the opcode for status information
- bool isLoadInst(llvm::MachineInstr *MI) const;
- bool isExtLoadInst(llvm::MachineInstr *MI) const;
- bool isSWSExtLoadInst(llvm::MachineInstr *MI) const;
- bool isSExtLoadInst(llvm::MachineInstr *MI) const;
- bool isZExtLoadInst(llvm::MachineInstr *MI) const;
- bool isAExtLoadInst(llvm::MachineInstr *MI) const;
- bool isStoreInst(llvm::MachineInstr *MI) const;
- bool isTruncStoreInst(llvm::MachineInstr *MI) const;
bool isRegisterStore(const MachineInstr &MI) const;
bool isRegisterLoad(const MachineInstr &MI) const;
@@ -150,7 +144,6 @@ public:
// Pure virtual funtions to be implemented by sub-classes.
//===---------------------------------------------------------------------===//
- virtual unsigned getIEQOpcode() const = 0;
virtual bool isMov(unsigned opcode) const = 0;
/// \brief Calculate the "Indirect Address" for the given \p RegIndex and
@@ -183,12 +176,6 @@ public:
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const = 0;
-
- /// \brief Convert the AMDIL MachineInstr to a supported ISA
- /// MachineInstr
- virtual void convertToISA(MachineInstr & MI, MachineFunction &MF,
- DebugLoc DL) const;
-
/// \brief Build a MOV instruction.
virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.td b/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.td
index fccede0..820f1a8 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.td
+++ b/contrib/llvm/lib/Target/R600/AMDGPUInstrInfo.td
@@ -19,6 +19,14 @@ def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>, SDTCisInt<3>
]>;
+def AMDGPUTrigPreOp : SDTypeProfile<1, 2,
+ [SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]
+>;
+
+def AMDGPUDivScaleOp : SDTypeProfile<2, 3,
+ [SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>]
+>;
+
//===----------------------------------------------------------------------===//
// AMDGPU DAG Nodes
//
@@ -26,14 +34,31 @@ def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [
// This argument to this node is a dword address.
def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>;
+def AMDGPUcos : SDNode<"AMDGPUISD::COS_HW", SDTFPUnaryOp>;
+def AMDGPUsin : SDNode<"AMDGPUISD::SIN_HW", SDTFPUnaryOp>;
+
// out = a - floor(a)
def AMDGPUfract : SDNode<"AMDGPUISD::FRACT", SDTFPUnaryOp>;
+// out = 1.0 / a
+def AMDGPUrcp : SDNode<"AMDGPUISD::RCP", SDTFPUnaryOp>;
+
+// out = 1.0 / sqrt(a)
+def AMDGPUrsq : SDNode<"AMDGPUISD::RSQ", SDTFPUnaryOp>;
+
+// out = 1.0 / sqrt(a)
+def AMDGPUrsq_legacy : SDNode<"AMDGPUISD::RSQ_LEGACY", SDTFPUnaryOp>;
+
+// out = 1.0 / sqrt(a) result clamped to +/- max_float.
+def AMDGPUrsq_clamped : SDNode<"AMDGPUISD::RSQ_CLAMPED", SDTFPUnaryOp>;
+
// out = max(a, b) a and b are floats
def AMDGPUfmax : SDNode<"AMDGPUISD::FMAX", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]
>;
+def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPTernaryOp, []>;
+
// out = max(a, b) a and b are signed ints
def AMDGPUsmax : SDNode<"AMDGPUISD::SMAX", SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]
@@ -59,12 +84,38 @@ def AMDGPUumin : SDNode<"AMDGPUISD::UMIN", SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]
>;
+
+def AMDGPUcvt_f32_ubyte0 : SDNode<"AMDGPUISD::CVT_F32_UBYTE0",
+ SDTIntToFPOp, []>;
+def AMDGPUcvt_f32_ubyte1 : SDNode<"AMDGPUISD::CVT_F32_UBYTE1",
+ SDTIntToFPOp, []>;
+def AMDGPUcvt_f32_ubyte2 : SDNode<"AMDGPUISD::CVT_F32_UBYTE2",
+ SDTIntToFPOp, []>;
+def AMDGPUcvt_f32_ubyte3 : SDNode<"AMDGPUISD::CVT_F32_UBYTE3",
+ SDTIntToFPOp, []>;
+
+
// urecip - This operation is a helper for integer division, it returns the
// result of 1 / a as a fractional unsigned integer.
// out = (2^32 / a) + e
// e is rounding error
def AMDGPUurecip : SDNode<"AMDGPUISD::URECIP", SDTIntUnaryOp>;
+// Special case divide preop and flags.
+def AMDGPUdiv_scale : SDNode<"AMDGPUISD::DIV_SCALE", AMDGPUDivScaleOp>;
+
+// Special case divide FMA with scale and flags (src0 = Quotient,
+// src1 = Denominator, src2 = Numerator).
+def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", SDTFPTernaryOp>;
+
+// Single or double precision division fixup.
+// Special case divide fixup and flags(src0 = Quotient, src1 =
+// Denominator, src2 = Numerator).
+def AMDGPUdiv_fixup : SDNode<"AMDGPUISD::DIV_FIXUP", SDTFPTernaryOp>;
+
+// Look Up 2.0 / pi src0 with segment select src1[4:0]
+def AMDGPUtrig_preop : SDNode<"AMDGPUISD::TRIG_PREOP", AMDGPUTrigPreOp>;
+
def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD",
SDTypeProfile<1, 2, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
[SDNPHasChain, SDNPMayLoad]>;
@@ -86,3 +137,45 @@ def AMDGPUstore_mskor : SDNode<"AMDGPUISD::STORE_MSKOR",
def AMDGPUround : SDNode<"ISD::FROUND",
SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>>;
+
+def AMDGPUbfe_u32 : SDNode<"AMDGPUISD::BFE_U32", AMDGPUDTIntTernaryOp>;
+def AMDGPUbfe_i32 : SDNode<"AMDGPUISD::BFE_I32", AMDGPUDTIntTernaryOp>;
+def AMDGPUbfi : SDNode<"AMDGPUISD::BFI", AMDGPUDTIntTernaryOp>;
+def AMDGPUbfm : SDNode<"AMDGPUISD::BFM", SDTIntBinOp>;
+
+def AMDGPUbrev : SDNode<"AMDGPUISD::BREV", SDTIntUnaryOp>;
+
+// Signed and unsigned 24-bit mulitply. The highest 8-bits are ignore when
+// performing the mulitply. The result is a 32-bit value.
+def AMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24", SDTIntBinOp,
+ [SDNPCommutative]
+>;
+def AMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24", SDTIntBinOp,
+ [SDNPCommutative]
+>;
+
+def AMDGPUmad_u24 : SDNode<"AMDGPUISD::MAD_U24", AMDGPUDTIntTernaryOp,
+ []
+>;
+def AMDGPUmad_i24 : SDNode<"AMDGPUISD::MAD_I24", AMDGPUDTIntTernaryOp,
+ []
+>;
+
+//===----------------------------------------------------------------------===//
+// Flow Control Profile Types
+//===----------------------------------------------------------------------===//
+// Branch instruction where second and third are basic blocks
+def SDTIL_BRCond : SDTypeProfile<0, 2, [
+ SDTCisVT<0, OtherVT>
+ ]>;
+
+//===----------------------------------------------------------------------===//
+// Flow Control DAG Nodes
+//===----------------------------------------------------------------------===//
+def IL_brcond : SDNode<"AMDGPUISD::BRANCH_COND", SDTIL_BRCond, [SDNPHasChain]>;
+
+//===----------------------------------------------------------------------===//
+// Call/Return DAG Nodes
+//===----------------------------------------------------------------------===//
+def IL_retflag : SDNode<"AMDGPUISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUInstructions.td b/contrib/llvm/lib/Target/R600/AMDGPUInstructions.td
index 7acd673..cd35603 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUInstructions.td
+++ b/contrib/llvm/lib/Target/R600/AMDGPUInstructions.td
@@ -34,9 +34,34 @@ class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
}
+def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
+def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
+def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
+
def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
+let OperandType = "OPERAND_IMMEDIATE" in {
+
+def u32imm : Operand<i32> {
+ let PrintMethod = "printU32ImmOperand";
+}
+
+def u16imm : Operand<i16> {
+ let PrintMethod = "printU16ImmOperand";
+}
+
+def u8imm : Operand<i8> {
+ let PrintMethod = "printU8ImmOperand";
+}
+
+} // End OperandType = "OPERAND_IMMEDIATE"
+
+//===--------------------------------------------------------------------===//
+// Custom Operands
+//===--------------------------------------------------------------------===//
+def brtarget : Operand<OtherVT>;
+
//===----------------------------------------------------------------------===//
// PatLeafs for floating-point comparisons
//===----------------------------------------------------------------------===//
@@ -115,6 +140,43 @@ def COND_NULL : PatLeaf <
// Load/Store Pattern Fragments
//===----------------------------------------------------------------------===//
+class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
+}]>;
+
+class PrivateLoad <SDPatternOperator op> : PrivateMemOp <
+ (ops node:$ptr), (op node:$ptr)
+>;
+
+class PrivateStore <SDPatternOperator op> : PrivateMemOp <
+ (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
+>;
+
+def extloadi8_private : PrivateLoad <extloadi8>;
+def sextloadi8_private : PrivateLoad <sextloadi8>;
+def extloadi16_private : PrivateLoad <extloadi16>;
+def sextloadi16_private : PrivateLoad <sextloadi16>;
+def load_private : PrivateLoad <load>;
+
+def truncstorei8_private : PrivateStore <truncstorei8>;
+def truncstorei16_private : PrivateStore <truncstorei16>;
+def store_private : PrivateStore <store>;
+
+def global_store : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+// Global address space loads
+def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+// Constant address space loads
+def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
LoadSDNode *L = cast<LoadSDNode>(N);
return L->getExtensionType() == ISD::ZEXTLOAD ||
@@ -220,26 +282,55 @@ def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return isLocalLoad(dyn_cast<LoadSDNode>(N));
}]>;
-def atomic_load_add_local : PatFrag<(ops node:$ptr, node:$value),
- (atomic_load_add node:$ptr, node:$value), [{
- return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
-}]>;
-def atomic_load_sub_local : PatFrag<(ops node:$ptr, node:$value),
- (atomic_load_sub node:$ptr, node:$value), [{
- return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+class local_binary_atomic_op<SDNode atomic_op> :
+ PatFrag<(ops node:$ptr, node:$value),
+ (atomic_op node:$ptr, node:$value), [{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
}]>;
+
+def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
+def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
+def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
+def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
+def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
+def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
+def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
+def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
+def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
+def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
+def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
+
def mskor_global : PatFrag<(ops node:$val, node:$ptr),
(AMDGPUstore_mskor node:$val, node:$ptr), [{
return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
}]>;
+def atomic_cmp_swap_32_local :
+ PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
+ AtomicSDNode *AN = cast<AtomicSDNode>(N);
+ return AN->getMemoryVT() == MVT::i32 &&
+ AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+def atomic_cmp_swap_64_local :
+ PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
+ AtomicSDNode *AN = cast<AtomicSDNode>(N);
+ return AN->getMemoryVT() == MVT::i64 &&
+ AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+
class Constants {
int TWO_PI = 0x40c90fdb;
int PI = 0x40490fdb;
int TWO_PI_INV = 0x3e22f983;
int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
+int FP32_NEG_ONE = 0xbf800000;
+int FP32_ONE = 0x3f800000;
}
def CONST : Constants;
@@ -253,9 +344,6 @@ def FP_ONE : PatLeaf <
[{return N->isExactlyValue(1.0);}]
>;
-def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>;
-def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>;
-
let isCodeGenOnly = 1, isPseudo = 1 in {
let usesCustomInserter = 1 in {
@@ -264,7 +352,7 @@ class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
(outs rc:$dst),
(ins rc:$src0),
"CLAMP $dst, $src0",
- [(set f32:$dst, (int_AMDIL_clamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
+ [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
>;
class FABS <RegisterClass rc> : AMDGPUShaderInst <
@@ -322,7 +410,7 @@ class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
/* --------------------- */
/* Extract element pattern */
-class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
+class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
SubRegIndex sub_reg>
: Pat<
(sub_type (vector_extract vec_type:$src, sub_idx)),
@@ -337,12 +425,6 @@ class Insert_Element <ValueType elem_type, ValueType vec_type,
(INSERT_SUBREG $vec, $elem, sub_reg)
>;
-class Vector4_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$x, elemType:$y, elemType:$z, elemType:$w)),
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $x, sub0), $y, sub1), $z, sub2), $w, sub3)
->;
-
// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
// can handle COPY instructions.
// bitconvert pattern
@@ -360,7 +442,7 @@ class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
// BFI_INT patterns
-multiclass BFIPatterns <Instruction BFI_INT> {
+multiclass BFIPatterns <Instruction BFI_INT, Instruction LoadImm32> {
// Definition from ISA doc:
// (y & x) | (z & ~x)
@@ -376,6 +458,19 @@ multiclass BFIPatterns <Instruction BFI_INT> {
(BFI_INT $x, $y, $z)
>;
+ def : Pat <
+ (fcopysign f32:$src0, f32:$src1),
+ (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
+ >;
+
+ def : Pat <
+ (f64 (fcopysign f64:$src0, f64:$src1)),
+ (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (i32 (EXTRACT_SUBREG $src0, sub0)), sub0),
+ (BFI_INT (LoadImm32 0x7fffffff),
+ (i32 (EXTRACT_SUBREG $src0, sub1)),
+ (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
+ >;
}
// SHA-256 Ma patterns
@@ -420,7 +515,61 @@ class UMUL24Pattern <Instruction UMUL24> : Pat <
>;
*/
+class IMad24Pat<Instruction Inst> : Pat <
+ (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
+ (Inst $src0, $src1, $src2)
+>;
+
+class UMad24Pat<Instruction Inst> : Pat <
+ (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
+ (Inst $src0, $src1, $src2)
+>;
+
+multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
+ def _expand_imad24 : Pat <
+ (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
+ (AddInst (MulInst $src0, $src1), $src2)
+ >;
+
+ def _expand_imul24 : Pat <
+ (AMDGPUmul_i24 i32:$src0, i32:$src1),
+ (MulInst $src0, $src1)
+ >;
+}
+
+multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
+ def _expand_umad24 : Pat <
+ (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
+ (AddInst (MulInst $src0, $src1), $src2)
+ >;
+
+ def _expand_umul24 : Pat <
+ (AMDGPUmul_u24 i32:$src0, i32:$src1),
+ (MulInst $src0, $src1)
+ >;
+}
+
+class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
+ (fdiv FP_ONE, vt:$src),
+ (RcpInst $src)
+>;
+
+multiclass RsqPat<Instruction RsqInst, ValueType vt> {
+ def : Pat <
+ (fdiv FP_ONE, (fsqrt vt:$src)),
+ (RsqInst $src)
+ >;
+
+ def : Pat <
+ (AMDGPUrcp (fsqrt vt:$src)),
+ (RsqInst $src)
+ >;
+}
+
include "R600Instructions.td"
+include "R700Instructions.td"
+include "EvergreenInstructions.td"
+include "CaymanInstructions.td"
include "SIInstrInfo.td"
diff --git a/contrib/llvm/lib/Target/R600/AMDILIntrinsicInfo.cpp b/contrib/llvm/lib/Target/R600/AMDGPUIntrinsicInfo.cpp
index 762ee39..58916a9 100644
--- a/contrib/llvm/lib/Target/R600/AMDILIntrinsicInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUIntrinsicInfo.cpp
@@ -1,4 +1,4 @@
-//===- AMDILIntrinsicInfo.cpp - AMDGPU Intrinsic Information ------*- C++ -*-===//
+//===- AMDGPUIntrinsicInfo.cpp - AMDGPU Intrinsic Information ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,7 +12,7 @@
//
//===-----------------------------------------------------------------------===//
-#include "AMDILIntrinsicInfo.h"
+#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Intrinsics.h"
@@ -24,39 +24,37 @@ using namespace llvm;
#include "AMDGPUGenIntrinsics.inc"
#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-AMDGPUIntrinsicInfo::AMDGPUIntrinsicInfo(TargetMachine *tm)
- : TargetIntrinsicInfo() {
-}
+AMDGPUIntrinsicInfo::AMDGPUIntrinsicInfo(TargetMachine *tm)
+ : TargetIntrinsicInfo() {}
-std::string
-AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys,
- unsigned int numTys) const {
- static const char* const names[] = {
+std::string AMDGPUIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
+ unsigned numTys) const {
+ static const char *const names[] = {
#define GET_INTRINSIC_NAME_TABLE
#include "AMDGPUGenIntrinsics.inc"
#undef GET_INTRINSIC_NAME_TABLE
};
if (IntrID < Intrinsic::num_intrinsics) {
- return 0;
+ return nullptr;
}
- assert(IntrID < AMDGPUIntrinsic::num_AMDGPU_intrinsics
- && "Invalid intrinsic ID");
+ assert(IntrID < AMDGPUIntrinsic::num_AMDGPU_intrinsics &&
+ "Invalid intrinsic ID");
std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
return Result;
}
-unsigned int
-AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const {
+unsigned AMDGPUIntrinsicInfo::lookupName(const char *Name,
+ unsigned Len) const {
if (!StringRef(Name, Len).startswith("llvm."))
return 0; // All intrinsics start with 'llvm.'
#define GET_FUNCTION_RECOGNIZER
#include "AMDGPUGenIntrinsics.inc"
#undef GET_FUNCTION_RECOGNIZER
- AMDGPUIntrinsic::ID IntrinsicID
- = (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic;
+ AMDGPUIntrinsic::ID IntrinsicID =
+ (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic;
IntrinsicID = getIntrinsicForGCCBuiltin("AMDGPU", Name);
if (IntrinsicID != (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic) {
@@ -65,17 +63,15 @@ AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const {
return 0;
}
-bool
-AMDGPUIntrinsicInfo::isOverloaded(unsigned id) const {
- // Overload Table
+bool AMDGPUIntrinsicInfo::isOverloaded(unsigned id) const {
+// Overload Table
#define GET_INTRINSIC_OVERLOAD_TABLE
#include "AMDGPUGenIntrinsics.inc"
#undef GET_INTRINSIC_OVERLOAD_TABLE
}
-Function*
-AMDGPUIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
- Type **Tys,
- unsigned numTys) const {
+Function *AMDGPUIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
+ Type **Tys,
+ unsigned numTys) const {
llvm_unreachable("Not implemented");
}
diff --git a/contrib/llvm/lib/Target/R600/AMDILIntrinsicInfo.h b/contrib/llvm/lib/Target/R600/AMDGPUIntrinsicInfo.h
index 35559e2..5be68a2 100644
--- a/contrib/llvm/lib/Target/R600/AMDILIntrinsicInfo.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUIntrinsicInfo.h
@@ -1,4 +1,4 @@
-//===- AMDILIntrinsicInfo.h - AMDGPU Intrinsic Information ------*- C++ -*-===//
+//===- AMDGPUIntrinsicInfo.h - AMDGPU Intrinsic Information ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
/// \brief Interface for the AMDGPU Implementation of the Intrinsic Info class.
//
//===-----------------------------------------------------------------------===//
-#ifndef AMDIL_INTRINSICS_H
-#define AMDIL_INTRINSICS_H
+#ifndef AMDGPU_INTRINSICINFO_H
+#define AMDGPU_INTRINSICINFO_H
#include "llvm/IR/Intrinsics.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
@@ -34,16 +34,15 @@ enum ID {
class AMDGPUIntrinsicInfo : public TargetIntrinsicInfo {
public:
AMDGPUIntrinsicInfo(TargetMachine *tm);
- std::string getName(unsigned int IntrId, Type **Tys = 0,
- unsigned int numTys = 0) const;
- unsigned int lookupName(const char *Name, unsigned int Len) const;
- bool isOverloaded(unsigned int IID) const;
- Function *getDeclaration(Module *M, unsigned int ID,
- Type **Tys = 0,
- unsigned int numTys = 0) const;
+ std::string getName(unsigned IntrId, Type **Tys = nullptr,
+ unsigned numTys = 0) const override;
+ unsigned lookupName(const char *Name, unsigned Len) const override;
+ bool isOverloaded(unsigned IID) const override;
+ Function *getDeclaration(Module *M, unsigned ID,
+ Type **Tys = nullptr,
+ unsigned numTys = 0) const override;
};
} // end namespace llvm
-#endif // AMDIL_INTRINSICS_H
-
+#endif // AMDGPU_INTRINSICINFO_H
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUIntrinsics.td b/contrib/llvm/lib/Target/R600/AMDGPUIntrinsics.td
index 9f975bf..eee9c29 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUIntrinsics.td
+++ b/contrib/llvm/lib/Target/R600/AMDGPUIntrinsics.td
@@ -13,23 +13,28 @@
let TargetPrefix = "AMDGPU", isTarget = 1 in {
- def int_AMDGPU_load_const : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_load_imm : Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_reserve_reg : Intrinsic<[], [llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_store_output : Intrinsic<[], [llvm_float_ty, llvm_i32_ty], []>;
def int_AMDGPU_swizzle : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
-
+ def int_AMDGPU_abs : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_arl : Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_cndlt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_div : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+ def int_AMDGPU_fract : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ def int_AMDGPU_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+
+ // This is named backwards (instead of rsq_legacy) so we don't have
+ // to define it with the public builtins intrinsics. This is a
+ // workaround for how intrinsic names are parsed. If the name is
+ // llvm.AMDGPU.rsq.legacy, the parser assumes that you meant
+ // llvm.AMDGPU.rsq.{f32 | f64} and incorrectly mangled the name.
+ def int_AMDGPU_legacy_rsq : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
def int_AMDGPU_dp4 : Intrinsic<[llvm_float_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_AMDGPU_kill : Intrinsic<[], [llvm_float_ty], []>;
def int_AMDGPU_kilp : Intrinsic<[], [], []>;
def int_AMDGPU_lrp : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_mul : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_pow : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_rcp : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_rsq : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_seq : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_sgt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_AMDGPU_sge : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
@@ -49,9 +54,31 @@ let TargetPrefix = "AMDGPU", isTarget = 1 in {
def int_AMDGPU_imin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_umul24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_imul24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_imad24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_umad24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_cvt_f32_ubyte0 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_cvt_f32_ubyte1 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_cvt_f32_ubyte2 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_cvt_f32_ubyte3 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-
+ def int_AMDGPU_bfi : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_bfe_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_bfe_u32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_bfm : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_brev : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_barrier_local : Intrinsic<[], [], []>;
+ def int_AMDGPU_barrier_global : Intrinsic<[], [], []>;
+}
+
+// Legacy names for compatibility.
+let TargetPrefix = "AMDIL", isTarget = 1 in {
+ def int_AMDIL_abs : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ def int_AMDIL_fraction : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ def int_AMDIL_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+ def int_AMDIL_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ def int_AMDIL_round_nearest : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
}
let TargetPrefix = "TGSI", isTarget = 1 in {
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.cpp b/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.cpp
index 0ed598e..ce5c41c 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.cpp
@@ -15,12 +15,16 @@
#include "AMDGPUMCInstLower.h"
#include "AMDGPUAsmPrinter.h"
+#include "AMDGPUTargetMachine.h"
#include "InstPrinter/AMDGPUInstPrinter.h"
#include "R600InstrInfo.h"
+#include "SIInstrInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCObjectStreamer.h"
@@ -31,16 +35,30 @@
using namespace llvm;
-AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx):
- Ctx(ctx)
+AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &st):
+ Ctx(ctx), ST(st)
{ }
+enum AMDGPUMCInstLower::SISubtarget
+AMDGPUMCInstLower::AMDGPUSubtargetToSISubtarget(unsigned) const {
+ return AMDGPUMCInstLower::SI;
+}
+
+unsigned AMDGPUMCInstLower::getMCOpcode(unsigned MIOpcode) const {
+
+ int MCOpcode = AMDGPU::getMCOpcode(MIOpcode,
+ AMDGPUSubtargetToSISubtarget(ST.getGeneration()));
+ if (MCOpcode == -1)
+ MCOpcode = MIOpcode;
+
+ return MCOpcode;
+}
+
void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
- OutMI.setOpcode(MI->getOpcode());
- for (unsigned i = 0, e = MI->getNumExplicitOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ OutMI.setOpcode(getMCOpcode(MI->getOpcode()));
+ for (const MachineOperand &MO : MI->explicit_operands()) {
MCOperand MCOp;
switch (MO.getType()) {
default:
@@ -61,14 +79,36 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
case MachineOperand::MO_MachineBasicBlock:
MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
MO.getMBB()->getSymbol(), Ctx));
+ break;
+ case MachineOperand::MO_GlobalAddress: {
+ const GlobalValue *GV = MO.getGlobal();
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(StringRef(GV->getName()));
+ MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(Sym, Ctx));
+ break;
+ }
+ case MachineOperand::MO_TargetIndex: {
+ assert(MO.getIndex() == AMDGPU::TI_CONSTDATA_START);
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(StringRef(END_OF_TEXT_LABEL_NAME));
+ const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ MCOp = MCOperand::CreateExpr(Expr);
+ break;
+ }
}
OutMI.addOperand(MCOp);
}
}
void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- AMDGPUMCInstLower MCInstLowering(OutContext);
+ AMDGPUMCInstLower MCInstLowering(OutContext,
+ MF->getTarget().getSubtarget<AMDGPUSubtarget>());
+#ifdef _DEBUG
+ StringRef Err;
+ if (!TM.getInstrInfo()->verifyInstruction(MI, Err)) {
+ errs() << "Warning: Illegal instruction detected: " << Err << "\n";
+ MI->dump();
+ }
+#endif
if (MI->isBundle()) {
const MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::const_instr_iterator I = MI;
@@ -80,7 +120,7 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
} else {
MCInst TmpInst;
MCInstLowering.lower(MI, TmpInst);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
if (DisasmEnabled) {
// Disassemble instruction/operands to text.
@@ -99,7 +139,8 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCObjectStreamer &ObjStreamer = (MCObjectStreamer &)OutStreamer;
MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter();
- InstEmitter.EncodeInstruction(TmpInst, CodeStream, Fixups);
+ InstEmitter.EncodeInstruction(TmpInst, CodeStream, Fixups,
+ TM.getSubtarget<MCSubtargetInfo>());
CodeStream.flush();
HexLines.resize(HexLines.size() + 1);
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.h b/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.h
index d7d538e..58fe34d 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUMCInstLower.h
@@ -13,16 +13,30 @@
namespace llvm {
-class MCInst;
-class MCContext;
+class AMDGPUSubtarget;
class MachineInstr;
+class MCContext;
+class MCInst;
class AMDGPUMCInstLower {
+ // This must be kept in sync with the SISubtarget class in SIInstrInfo.td
+ enum SISubtarget {
+ SI = 0
+ };
+
MCContext &Ctx;
+ const AMDGPUSubtarget &ST;
+
+ /// Convert a member of the AMDGPUSubtarget::Generation enum to the
+ /// SISubtarget enum.
+ enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) const;
+
+ /// Get the MC opcode for this MachineInstr.
+ unsigned getMCOpcode(unsigned MIOpcode) const;
public:
- AMDGPUMCInstLower(MCContext &ctx);
+ AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &ST);
/// \brief Lower a MachineInstr to an MCInst
void lower(const MachineInstr *MI, MCInst &OutMI) const;
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.cpp b/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.cpp
index 14171f4..90af801 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.cpp
@@ -10,9 +10,9 @@ static const char *const ShaderTypeAttribute = "ShaderType";
void AMDGPUMachineFunction::anchor() {}
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
- MachineFunctionInfo() {
- ShaderType = ShaderType::COMPUTE;
- LDSSize = 0;
+ MachineFunctionInfo(),
+ ShaderType(ShaderType::COMPUTE),
+ LDSSize(0) {
AttributeSet Set = MF.getFunction()->getAttributes();
Attribute A = Set.getAttribute(AttributeSet::FunctionIndex,
ShaderTypeAttribute);
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.h b/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.h
index fea0b39..0854d58 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUMachineFunction.h
@@ -20,14 +20,19 @@ namespace llvm {
class AMDGPUMachineFunction : public MachineFunctionInfo {
virtual void anchor();
+ unsigned ShaderType;
+
public:
AMDGPUMachineFunction(const MachineFunction &MF);
- unsigned ShaderType;
/// A map to keep track of local memory objects and their offsets within
/// the local memory space.
std::map<const GlobalValue *, unsigned> LocalMemoryObjects;
/// Number of bytes in the LDS that are being used.
unsigned LDSSize;
+
+ unsigned getShaderType() const {
+ return ShaderType;
+ }
};
}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp b/contrib/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp
new file mode 100644
index 0000000..218750d
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp
@@ -0,0 +1,387 @@
+//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass eliminates allocas by either converting them into vectors or
+// by migrating them to local address space.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "amdgpu-promote-alloca"
+
+using namespace llvm;
+
+namespace {
+
+class AMDGPUPromoteAlloca : public FunctionPass,
+ public InstVisitor<AMDGPUPromoteAlloca> {
+
+ static char ID;
+ Module *Mod;
+ const AMDGPUSubtarget &ST;
+ int LocalMemAvailable;
+
+public:
+ AMDGPUPromoteAlloca(const AMDGPUSubtarget &st) : FunctionPass(ID), ST(st),
+ LocalMemAvailable(0) { }
+ virtual bool doInitialization(Module &M);
+ virtual bool runOnFunction(Function &F);
+ virtual const char *getPassName() const {
+ return "AMDGPU Promote Alloca";
+ }
+ void visitAlloca(AllocaInst &I);
+};
+
+} // End anonymous namespace
+
+char AMDGPUPromoteAlloca::ID = 0;
+
+bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
+ Mod = &M;
+ return false;
+}
+
+bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
+
+ const FunctionType *FTy = F.getFunctionType();
+
+ LocalMemAvailable = ST.getLocalMemorySize();
+
+
+ // If the function has any arguments in the local address space, then it's
+ // possible these arguments require the entire local memory space, so
+ // we cannot use local memory in the pass.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+ const Type *ParamTy = FTy->getParamType(i);
+ if (ParamTy->isPointerTy() &&
+ ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
+ LocalMemAvailable = 0;
+ DEBUG(dbgs() << "Function has local memory argument. Promoting to "
+ "local memory disabled.\n");
+ break;
+ }
+ }
+
+ if (LocalMemAvailable > 0) {
+ // Check how much local memory is being used by global objects
+ for (Module::global_iterator I = Mod->global_begin(),
+ E = Mod->global_end(); I != E; ++I) {
+ GlobalVariable *GV = I;
+ PointerType *GVTy = GV->getType();
+ if (GVTy->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
+ continue;
+ for (Value::use_iterator U = GV->use_begin(),
+ UE = GV->use_end(); U != UE; ++U) {
+ Instruction *Use = dyn_cast<Instruction>(*U);
+ if (!Use)
+ continue;
+ if (Use->getParent()->getParent() == &F)
+ LocalMemAvailable -=
+ Mod->getDataLayout()->getTypeAllocSize(GVTy->getElementType());
+ }
+ }
+ }
+
+ LocalMemAvailable = std::max(0, LocalMemAvailable);
+ DEBUG(dbgs() << LocalMemAvailable << "bytes free in local memory.\n");
+
+ visit(F);
+
+ return false;
+}
+
+static VectorType *arrayTypeToVecType(const Type *ArrayTy) {
+ return VectorType::get(ArrayTy->getArrayElementType(),
+ ArrayTy->getArrayNumElements());
+}
+
+static Value* calculateVectorIndex(Value *Ptr,
+ std::map<GetElementPtrInst*, Value*> GEPIdx) {
+ if (isa<AllocaInst>(Ptr))
+ return Constant::getNullValue(Type::getInt32Ty(Ptr->getContext()));
+
+ GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
+
+ return GEPIdx[GEP];
+}
+
+static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
+ // FIXME we only support simple cases
+ if (GEP->getNumOperands() != 3)
+ return NULL;
+
+ ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
+ if (!I0 || !I0->isZero())
+ return NULL;
+
+ return GEP->getOperand(2);
+}
+
+// Not an instruction handled below to turn into a vector.
+//
+// TODO: Check isTriviallyVectorizable for calls and handle other
+// instructions.
+static bool canVectorizeInst(Instruction *Inst) {
+ switch (Inst->getOpcode()) {
+ case Instruction::Load:
+ case Instruction::Store:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
+ Type *AllocaTy = Alloca->getAllocatedType();
+
+ DEBUG(dbgs() << "Alloca Candidate for vectorization \n");
+
+ // FIXME: There is no reason why we can't support larger arrays, we
+ // are just being conservative for now.
+ if (!AllocaTy->isArrayTy() ||
+ AllocaTy->getArrayElementType()->isVectorTy() ||
+ AllocaTy->getArrayNumElements() > 4) {
+
+ DEBUG(dbgs() << " Cannot convert type to vector");
+ return false;
+ }
+
+ std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
+ std::vector<Value*> WorkList;
+ for (User *AllocaUser : Alloca->users()) {
+ GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
+ if (!GEP) {
+ if (!canVectorizeInst(cast<Instruction>(AllocaUser)))
+ return false;
+
+ WorkList.push_back(AllocaUser);
+ continue;
+ }
+
+ Value *Index = GEPToVectorIndex(GEP);
+
+ // If we can't compute a vector index from this GEP, then we can't
+ // promote this alloca to vector.
+ if (!Index) {
+ DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n');
+ return false;
+ }
+
+ GEPVectorIdx[GEP] = Index;
+ for (User *GEPUser : AllocaUser->users()) {
+ if (!canVectorizeInst(cast<Instruction>(GEPUser)))
+ return false;
+
+ WorkList.push_back(GEPUser);
+ }
+ }
+
+ VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
+
+ DEBUG(dbgs() << " Converting alloca to vector "
+ << *AllocaTy << " -> " << *VectorTy << '\n');
+
+ for (std::vector<Value*>::iterator I = WorkList.begin(),
+ E = WorkList.end(); I != E; ++I) {
+ Instruction *Inst = cast<Instruction>(*I);
+ IRBuilder<> Builder(Inst);
+ switch (Inst->getOpcode()) {
+ case Instruction::Load: {
+ Value *Ptr = Inst->getOperand(0);
+ Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
+ Value *BitCast = Builder.CreateBitCast(Alloca, VectorTy->getPointerTo(0));
+ Value *VecValue = Builder.CreateLoad(BitCast);
+ Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
+ Inst->replaceAllUsesWith(ExtractElement);
+ Inst->eraseFromParent();
+ break;
+ }
+ case Instruction::Store: {
+ Value *Ptr = Inst->getOperand(1);
+ Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
+ Value *BitCast = Builder.CreateBitCast(Alloca, VectorTy->getPointerTo(0));
+ Value *VecValue = Builder.CreateLoad(BitCast);
+ Value *NewVecValue = Builder.CreateInsertElement(VecValue,
+ Inst->getOperand(0),
+ Index);
+ Builder.CreateStore(NewVecValue, BitCast);
+ Inst->eraseFromParent();
+ break;
+ }
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ break;
+
+ default:
+ Inst->dump();
+ llvm_unreachable("Inconsistency in instructions promotable to vector");
+ }
+ }
+ return true;
+}
+
+static void collectUsesWithPtrTypes(Value *Val, std::vector<Value*> &WorkList) {
+ for (User *User : Val->users()) {
+ if(std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
+ continue;
+ if (isa<CallInst>(User)) {
+ WorkList.push_back(User);
+ continue;
+ }
+ if (!User->getType()->isPointerTy())
+ continue;
+ WorkList.push_back(User);
+ collectUsesWithPtrTypes(User, WorkList);
+ }
+}
+
+void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
+ IRBuilder<> Builder(&I);
+
+ // First try to replace the alloca with a vector
+ Type *AllocaTy = I.getAllocatedType();
+
+ DEBUG(dbgs() << "Trying to promote " << I << '\n');
+
+ if (tryPromoteAllocaToVector(&I))
+ return;
+
+ DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
+
+ // FIXME: This is the maximum work group size. We should try to get
+ // value from the reqd_work_group_size function attribute if it is
+ // available.
+ unsigned WorkGroupSize = 256;
+ int AllocaSize = WorkGroupSize *
+ Mod->getDataLayout()->getTypeAllocSize(AllocaTy);
+
+ if (AllocaSize > LocalMemAvailable) {
+ DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
+ return;
+ }
+
+ DEBUG(dbgs() << "Promoting alloca to local memory\n");
+ LocalMemAvailable -= AllocaSize;
+
+ GlobalVariable *GV = new GlobalVariable(
+ *Mod, ArrayType::get(I.getAllocatedType(), 256), false,
+ GlobalValue::ExternalLinkage, 0, I.getName(), 0,
+ GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
+
+ FunctionType *FTy = FunctionType::get(
+ Type::getInt32Ty(Mod->getContext()), false);
+ AttributeSet AttrSet;
+ AttrSet.addAttribute(Mod->getContext(), 0, Attribute::ReadNone);
+
+ Value *ReadLocalSizeY = Mod->getOrInsertFunction(
+ "llvm.r600.read.local.size.y", FTy, AttrSet);
+ Value *ReadLocalSizeZ = Mod->getOrInsertFunction(
+ "llvm.r600.read.local.size.z", FTy, AttrSet);
+ Value *ReadTIDIGX = Mod->getOrInsertFunction(
+ "llvm.r600.read.tidig.x", FTy, AttrSet);
+ Value *ReadTIDIGY = Mod->getOrInsertFunction(
+ "llvm.r600.read.tidig.y", FTy, AttrSet);
+ Value *ReadTIDIGZ = Mod->getOrInsertFunction(
+ "llvm.r600.read.tidig.z", FTy, AttrSet);
+
+
+ Value *TCntY = Builder.CreateCall(ReadLocalSizeY);
+ Value *TCntZ = Builder.CreateCall(ReadLocalSizeZ);
+ Value *TIdX = Builder.CreateCall(ReadTIDIGX);
+ Value *TIdY = Builder.CreateCall(ReadTIDIGY);
+ Value *TIdZ = Builder.CreateCall(ReadTIDIGZ);
+
+ Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ);
+ Tmp0 = Builder.CreateMul(Tmp0, TIdX);
+ Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ);
+ Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
+ TID = Builder.CreateAdd(TID, TIdZ);
+
+ std::vector<Value*> Indices;
+ Indices.push_back(Constant::getNullValue(Type::getInt32Ty(Mod->getContext())));
+ Indices.push_back(TID);
+
+ Value *Offset = Builder.CreateGEP(GV, Indices);
+ I.mutateType(Offset->getType());
+ I.replaceAllUsesWith(Offset);
+ I.eraseFromParent();
+
+ std::vector<Value*> WorkList;
+
+ collectUsesWithPtrTypes(Offset, WorkList);
+
+ for (std::vector<Value*>::iterator i = WorkList.begin(),
+ e = WorkList.end(); i != e; ++i) {
+ Value *V = *i;
+ CallInst *Call = dyn_cast<CallInst>(V);
+ if (!Call) {
+ Type *EltTy = V->getType()->getPointerElementType();
+ PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
+ V->mutateType(NewTy);
+ continue;
+ }
+
+ IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
+ if (!Intr) {
+ std::vector<Type*> ArgTypes;
+ for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
+ ArgIdx != ArgEnd; ++ArgIdx) {
+ ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType());
+ }
+ Function *F = Call->getCalledFunction();
+ FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes,
+ F->isVarArg());
+ Constant *C = Mod->getOrInsertFunction(StringRef(F->getName().str() + ".local"), NewType,
+ F->getAttributes());
+ Function *NewF = cast<Function>(C);
+ Call->setCalledFunction(NewF);
+ continue;
+ }
+
+ Builder.SetInsertPoint(Intr);
+ switch (Intr->getIntrinsicID()) {
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ // These intrinsics are for address space 0 only
+ Intr->eraseFromParent();
+ continue;
+ case Intrinsic::memcpy: {
+ MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
+ Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
+ MemCpy->getLength(), MemCpy->getAlignment(),
+ MemCpy->isVolatile());
+ Intr->eraseFromParent();
+ continue;
+ }
+ case Intrinsic::memset: {
+ MemSetInst *MemSet = cast<MemSetInst>(Intr);
+ Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
+ MemSet->getLength(), MemSet->getAlignment(),
+ MemSet->isVolatile());
+ Intr->eraseFromParent();
+ continue;
+ }
+ default:
+ Intr->dump();
+ llvm_unreachable("Don't know how to promote alloca intrinsic use.");
+ }
+ }
+}
+
+FunctionPass *llvm::createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST) {
+ return new AMDGPUPromoteAlloca(ST);
+}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.cpp b/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.cpp
index 47617a7..3433280 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.cpp
@@ -17,9 +17,9 @@
using namespace llvm;
-AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm)
+AMDGPURegisterInfo::AMDGPURegisterInfo(const AMDGPUSubtarget &st)
: AMDGPUGenRegisterInfo(0),
- TM(tm)
+ ST(st)
{ }
//===----------------------------------------------------------------------===//
@@ -27,10 +27,10 @@ AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm)
// they are not supported at this time.
//===----------------------------------------------------------------------===//
-const uint16_t AMDGPURegisterInfo::CalleeSavedReg = AMDGPU::NoRegister;
+const MCPhysReg AMDGPURegisterInfo::CalleeSavedReg = AMDGPU::NoRegister;
-const uint16_t* AMDGPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
- const {
+const MCPhysReg*
+AMDGPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return &CalleeSavedReg;
}
@@ -38,7 +38,7 @@ void AMDGPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj,
unsigned FIOperandNum,
RegScavenger *RS) const {
- assert(!"Subroutines not supported yet");
+ llvm_unreachable("Subroutines not supported yet");
}
unsigned AMDGPURegisterInfo::getFrameRegister(const MachineFunction &MF) const {
@@ -54,7 +54,7 @@ unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const {
AMDGPU::sub15
};
- assert (Channel < array_lengthof(SubRegs));
+ assert(Channel < array_lengthof(SubRegs));
return SubRegs[Channel];
}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.h b/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.h
index 688e1a0..46aa7a1 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPURegisterInfo.h
@@ -25,29 +25,21 @@
namespace llvm {
-class AMDGPUTargetMachine;
+class AMDGPUSubtarget;
class TargetInstrInfo;
struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
- TargetMachine &TM;
- static const uint16_t CalleeSavedReg;
+ static const MCPhysReg CalleeSavedReg;
+ const AMDGPUSubtarget &ST;
- AMDGPURegisterInfo(TargetMachine &tm);
+ AMDGPURegisterInfo(const AMDGPUSubtarget &st);
- virtual BitVector getReservedRegs(const MachineFunction &MF) const {
+ BitVector getReservedRegs(const MachineFunction &MF) const override {
assert(!"Unimplemented"); return BitVector();
}
- /// \param RC is an AMDIL reg class.
- ///
- /// \returns The ISA reg class that is equivalent to \p RC.
- virtual const TargetRegisterClass * getISARegClass(
- const TargetRegisterClass * RC) const {
- assert(!"Unimplemented"); return NULL;
- }
-
virtual const TargetRegisterClass* getCFGStructurizerRegClass(MVT VT) const {
- assert(!"Unimplemented"); return NULL;
+ assert(!"Unimplemented"); return nullptr;
}
virtual unsigned getHWRegIndex(unsigned Reg) const {
@@ -58,11 +50,11 @@ struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
/// (e.g. getSubRegFromChannel(0) -> AMDGPU::sub0)
unsigned getSubRegFromChannel(unsigned Channel) const;
- const uint16_t* getCalleeSavedRegs(const MachineFunction *MF) const;
- void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ const MCPhysReg* getCalleeSavedRegs(const MachineFunction *MF) const override;
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
- RegScavenger *RS) const;
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ RegScavenger *RS) const override;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
unsigned getIndirectSubReg(unsigned IndirectIndex) const;
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.cpp b/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.cpp
index 061793a..e3c2a50 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.cpp
@@ -13,108 +13,77 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUSubtarget.h"
+#include "R600InstrInfo.h"
+#include "SIInstrInfo.h"
+#include "llvm/ADT/SmallString.h"
+
+#include "llvm/ADT/SmallString.h"
using namespace llvm;
+#define DEBUG_TYPE "amdgpu-subtarget"
+
#define GET_SUBTARGETINFO_ENUM
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "AMDGPUGenSubtargetInfo.inc"
-AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
- AMDGPUGenSubtargetInfo(TT, CPU, FS), DumpCode(false) {
- InstrItins = getInstrItineraryForCPU(CPU);
+AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef GPU, StringRef FS) :
+ AMDGPUGenSubtargetInfo(TT, GPU, FS),
+ DevName(GPU),
+ Is64bit(false),
+ DumpCode(false),
+ R600ALUInst(false),
+ HasVertexCache(false),
+ TexVTXClauseSize(0),
+ Gen(AMDGPUSubtarget::R600),
+ FP64(false),
+ FP64Denormals(false),
+ FP32Denormals(false),
+ CaymanISA(false),
+ EnableIRStructurizer(true),
+ EnablePromoteAlloca(false),
+ EnableIfCvt(true),
+ WavefrontSize(0),
+ CFALUBug(false),
+ LocalMemorySize(0),
+ InstrItins(getInstrItineraryForCPU(GPU)) {
+ // On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
+ // enabled, but some instructions do not respect them and they run at the
+ // double precision rate, so don't enable by default.
+ //
+ // We want to be able to turn these off, but making this a subtarget feature
+ // for SI has the unhelpful behavior that it unsets everything else if you
+ // disable it.
- // Default card
- StringRef GPU = CPU;
- Is64bit = false;
- DefaultSize[0] = 64;
- DefaultSize[1] = 1;
- DefaultSize[2] = 1;
- HasVertexCache = false;
- TexVTXClauseSize = 0;
- Gen = AMDGPUSubtarget::R600;
- FP64 = false;
- CaymanISA = false;
- EnableIRStructurizer = true;
- EnableIfCvt = true;
- ParseSubtargetFeatures(GPU, FS);
- DevName = GPU;
-}
+ SmallString<256> FullFS("+promote-alloca,+fp64-denormals,");
+ FullFS += FS;
-bool
-AMDGPUSubtarget::is64bit() const {
- return Is64bit;
-}
-bool
-AMDGPUSubtarget::hasVertexCache() const {
- return HasVertexCache;
-}
-short
-AMDGPUSubtarget::getTexVTXClauseSize() const {
- return TexVTXClauseSize;
-}
-enum AMDGPUSubtarget::Generation
-AMDGPUSubtarget::getGeneration() const {
- return Gen;
-}
-bool
-AMDGPUSubtarget::hasHWFP64() const {
- return FP64;
-}
-bool
-AMDGPUSubtarget::hasCaymanISA() const {
- return CaymanISA;
-}
-bool
-AMDGPUSubtarget::IsIRStructurizerEnabled() const {
- return EnableIRStructurizer;
-}
-bool
-AMDGPUSubtarget::isIfCvtEnabled() const {
- return EnableIfCvt;
-}
-bool
-AMDGPUSubtarget::isTargetELF() const {
- return false;
-}
-size_t
-AMDGPUSubtarget::getDefaultSize(uint32_t dim) const {
- if (dim > 3) {
- return 1;
- } else {
- return DefaultSize[dim];
- }
-}
-
-std::string
-AMDGPUSubtarget::getDataLayout() const {
- std::string DataLayout = std::string(
- "e"
- "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
- "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
- "-n32:64"
- );
+ ParseSubtargetFeatures(GPU, FullFS);
- if (hasHWFP64()) {
- DataLayout.append("-f64:64:64");
- }
+ if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ InstrInfo.reset(new R600InstrInfo(*this));
- if (is64bit()) {
- DataLayout.append("-p:64:64:64");
+ // FIXME: I don't think think Evergreen has any useful support for
+ // denormals, but should be checked. Should we issue a warning somewhere if
+ // someone tries to enable these?
+ FP32Denormals = false;
+ FP64Denormals = false;
} else {
- DataLayout.append("-p:32:32:32");
- }
-
- if (Gen >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
- DataLayout.append("-p3:32:32:32");
+ InstrInfo.reset(new SIInstrInfo(*this));
}
-
- return DataLayout;
}
-std::string
-AMDGPUSubtarget::getDeviceName() const {
- return DevName;
+unsigned AMDGPUSubtarget::getStackEntrySize() const {
+ assert(getGeneration() <= NORTHERN_ISLANDS);
+ switch(getWavefrontSize()) {
+ case 16:
+ return 8;
+ case 32:
+ return hasCaymanISA() ? 4 : 8;
+ case 64:
+ return 4;
+ default:
+ llvm_unreachable("Illegal wavefront size.");
+ }
}
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.h b/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.h
index 4288d27..a844b37 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUSubtarget.h
@@ -15,6 +15,7 @@
#ifndef AMDGPUSUBTARGET_H
#define AMDGPUSUBTARGET_H
#include "AMDGPU.h"
+#include "AMDGPUInstrInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -27,6 +28,9 @@
namespace llvm {
class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
+
+ std::unique_ptr<AMDGPUInstrInfo> InstrInfo;
+
public:
enum Generation {
R600 = 0,
@@ -38,49 +42,156 @@ public:
};
private:
- size_t DefaultSize[3];
std::string DevName;
bool Is64bit;
- bool Is32on64bit;
bool DumpCode;
bool R600ALUInst;
bool HasVertexCache;
short TexVTXClauseSize;
- enum Generation Gen;
+ Generation Gen;
bool FP64;
+ bool FP64Denormals;
+ bool FP32Denormals;
bool CaymanISA;
bool EnableIRStructurizer;
+ bool EnablePromoteAlloca;
bool EnableIfCvt;
+ unsigned WavefrontSize;
+ bool CFALUBug;
+ int LocalMemorySize;
InstrItineraryData InstrItins;
public:
AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS);
- const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
- virtual void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+ const AMDGPUInstrInfo *getInstrInfo() const {
+ return InstrInfo.get();
+ }
+
+ const InstrItineraryData &getInstrItineraryData() const {
+ return InstrItins;
+ }
+
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ bool is64bit() const {
+ return Is64bit;
+ }
+
+ bool hasVertexCache() const {
+ return HasVertexCache;
+ }
+
+ short getTexVTXClauseSize() const {
+ return TexVTXClauseSize;
+ }
+
+ Generation getGeneration() const {
+ return Gen;
+ }
+
+ bool hasHWFP64() const {
+ return FP64;
+ }
+
+ bool hasCaymanISA() const {
+ return CaymanISA;
+ }
+
+ bool hasFP32Denormals() const {
+ return FP32Denormals;
+ }
+
+ bool hasFP64Denormals() const {
+ return FP64Denormals;
+ }
+
+ bool hasBFE() const {
+ return (getGeneration() >= EVERGREEN);
+ }
+
+ bool hasBFI() const {
+ return (getGeneration() >= EVERGREEN);
+ }
+
+ bool hasBFM() const {
+ return hasBFE();
+ }
+
+ bool hasBCNT(unsigned Size) const {
+ if (Size == 32)
+ return (getGeneration() >= EVERGREEN);
- bool is64bit() const;
- bool hasVertexCache() const;
- short getTexVTXClauseSize() const;
- enum Generation getGeneration() const;
- bool hasHWFP64() const;
- bool hasCaymanISA() const;
- bool IsIRStructurizerEnabled() const;
- bool isIfCvtEnabled() const;
+ if (Size == 64)
+ return (getGeneration() >= SOUTHERN_ISLANDS);
- virtual bool enableMachineScheduler() const {
+ return false;
+ }
+
+ bool hasMulU24() const {
+ return (getGeneration() >= EVERGREEN);
+ }
+
+ bool hasMulI24() const {
+ return (getGeneration() >= SOUTHERN_ISLANDS ||
+ hasCaymanISA());
+ }
+
+ bool hasFFBL() const {
+ return (getGeneration() >= EVERGREEN);
+ }
+
+ bool hasFFBH() const {
+ return (getGeneration() >= EVERGREEN);
+ }
+
+ bool IsIRStructurizerEnabled() const {
+ return EnableIRStructurizer;
+ }
+
+ bool isPromoteAllocaEnabled() const {
+ return EnablePromoteAlloca;
+ }
+
+ bool isIfCvtEnabled() const {
+ return EnableIfCvt;
+ }
+
+ unsigned getWavefrontSize() const {
+ return WavefrontSize;
+ }
+
+ unsigned getStackEntrySize() const;
+
+ bool hasCFAluBug() const {
+ assert(getGeneration() <= NORTHERN_ISLANDS);
+ return CFALUBug;
+ }
+
+ int getLocalMemorySize() const {
+ return LocalMemorySize;
+ }
+
+ bool enableMachineScheduler() const override {
return getGeneration() <= NORTHERN_ISLANDS;
}
// Helper functions to simplify if statements
- bool isTargetELF() const;
- std::string getDataLayout() const;
- std::string getDeviceName() const;
- virtual size_t getDefaultSize(uint32_t dim) const;
- bool dumpCode() const { return DumpCode; }
- bool r600ALUEncoding() const { return R600ALUInst; }
+ bool isTargetELF() const {
+ return false;
+ }
+ StringRef getDeviceName() const {
+ return DevName;
+ }
+
+ bool dumpCode() const {
+ return DumpCode;
+ }
+ bool r600ALUEncoding() const {
+ return R600ALUInst;
+ }
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.cpp b/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.cpp
index bc4f5d7..56ba719 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -21,10 +21,10 @@
#include "SIISelLowering.h"
#include "SIInstrInfo.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/PassManager.h"
#include "llvm/Support/TargetRegistry.h"
@@ -33,7 +33,6 @@
#include "llvm/Transforms/Scalar.h"
#include <llvm/CodeGen/Passes.h>
-
using namespace llvm;
extern "C" void LLVMInitializeR600Target() {
@@ -42,13 +41,27 @@ extern "C" void LLVMInitializeR600Target() {
}
static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
- return new ScheduleDAGMI(C, new R600SchedStrategy());
+ return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
}
static MachineSchedRegistry
SchedCustomRegistry("r600", "Run R600's custom scheduler",
createR600MachineScheduler);
+static std::string computeDataLayout(const AMDGPUSubtarget &ST) {
+ std::string Ret = "e-p:32:32";
+
+ if (ST.is64bit()) {
+ // 32-bit local, and region pointers. 64-bit private, global, and constant.
+ Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
+ }
+
+ Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
+ "-v512:512-v1024:1024-v2048:2048-n32:64";
+
+ return Ret;
+}
+
AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
TargetOptions Options,
@@ -58,7 +71,7 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
:
LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel),
Subtarget(TT, CPU, FS),
- Layout(Subtarget.getDataLayout()),
+ Layout(computeDataLayout(Subtarget)),
FrameLowering(TargetFrameLowering::StackGrowsUp,
64 * 16 // Maximum stack alignment (long16)
, 0),
@@ -66,12 +79,11 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
InstrItins(&Subtarget.getInstrItineraryData()) {
// TLInfo uses InstrInfo so it must be initialized after.
if (Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
- InstrInfo.reset(new R600InstrInfo(*this));
TLInfo.reset(new R600TargetLowering(*this));
} else {
- InstrInfo.reset(new SIInstrInfo(*this));
TLInfo.reset(new SITargetLowering(*this));
}
+ setRequiresStructuredCFG(true);
initAsmInfo();
}
@@ -88,20 +100,21 @@ public:
return getTM<AMDGPUTargetMachine>();
}
- virtual ScheduleDAGInstrs *
- createMachineScheduler(MachineSchedContext *C) const {
+ ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const override {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
return createR600MachineScheduler(C);
- return 0;
+ return nullptr;
}
- virtual bool addPreISel();
- virtual bool addInstSelector();
- virtual bool addPreRegAlloc();
- virtual bool addPostRegAlloc();
- virtual bool addPreSched2();
- virtual bool addPreEmitPass();
+ virtual void addCodeGenPrepare();
+ bool addPreISel() override;
+ bool addInstSelector() override;
+ bool addPreRegAlloc() override;
+ bool addPostRegAlloc() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // End of anonymous namespace
@@ -121,13 +134,23 @@ void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
PM.add(createAMDGPUTargetTransformInfoPass(this));
}
+void AMDGPUPassConfig::addCodeGenPrepare() {
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+ if (ST.isPromoteAllocaEnabled()) {
+ addPass(createAMDGPUPromoteAlloca(ST));
+ addPass(createSROAPass());
+ }
+
+ TargetPassConfig::addCodeGenPrepare();
+}
+
bool
AMDGPUPassConfig::addPreISel() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
addPass(createFlattenCFGPass());
if (ST.IsIRStructurizerEnabled())
addPass(createStructurizeCFGPass());
- if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
addPass(createSinkingPass());
addPass(createSITypeRewriter());
addPass(createSIAnnotateControlFlowPass());
@@ -139,17 +162,23 @@ AMDGPUPassConfig::addPreISel() {
bool AMDGPUPassConfig::addInstSelector() {
addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
+ addPass(createSILowerI1CopiesPass());
return false;
}
bool AMDGPUPassConfig::addPreRegAlloc() {
- addPass(createAMDGPUConvertToISAPass(*TM));
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createR600VectorRegMerger(*TM));
} else {
addPass(createSIFixSGPRCopiesPass(*TM));
+ // SIFixSGPRCopies can generate a lot of duplicate instructions,
+ // so we need to run MachineCSE afterwards.
+ addPass(&MachineCSEID);
+ addPass(createSIShrinkInstructionsPass());
+ initializeSIFixSGPRLiveRangesPass(*PassRegistry::getPassRegistry());
+ insertPass(&RegisterCoalescerID, &SIFixSGPRLiveRangesID);
}
return false;
}
@@ -157,6 +186,7 @@ bool AMDGPUPassConfig::addPreRegAlloc() {
bool AMDGPUPassConfig::addPostRegAlloc() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+ addPass(createSIShrinkInstructionsPass());
if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createSIInsertWaits(*TM));
}
@@ -167,7 +197,7 @@ bool AMDGPUPassConfig::addPreSched2() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
- addPass(createR600EmitClauseMarkers(*TM));
+ addPass(createR600EmitClauseMarkers());
if (ST.isIfCvtEnabled())
addPass(&IfConverterID);
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
@@ -178,7 +208,7 @@ bool AMDGPUPassConfig::addPreSched2() {
bool AMDGPUPassConfig::addPreEmitPass() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
- addPass(createAMDGPUCFGStructurizerPass(*TM));
+ addPass(createAMDGPUCFGStructurizerPass());
addPass(createR600ExpandSpecialInstrsPass(*TM));
addPass(&FinalizeMachineBundlesID);
addPass(createR600Packetizer(*TM));
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.h b/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.h
index f942614..3bb15be 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.h
+++ b/contrib/llvm/lib/Target/R600/AMDGPUTargetMachine.h
@@ -17,10 +17,9 @@
#include "AMDGPUFrameLowering.h"
#include "AMDGPUInstrInfo.h"
+#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
-#include "AMDILIntrinsicInfo.h"
#include "R600ISelLowering.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/IR/DataLayout.h"
namespace llvm {
@@ -31,8 +30,7 @@ class AMDGPUTargetMachine : public LLVMTargetMachine {
const DataLayout Layout;
AMDGPUFrameLowering FrameLowering;
AMDGPUIntrinsicInfo IntrinsicInfo;
- OwningPtr<AMDGPUInstrInfo> InstrInfo;
- OwningPtr<AMDGPUTargetLowering> TLInfo;
+ std::unique_ptr<AMDGPUTargetLowering> TLInfo;
const InstrItineraryData *InstrItins;
public:
@@ -40,30 +38,32 @@ public:
StringRef CPU, TargetOptions Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
~AMDGPUTargetMachine();
- virtual const AMDGPUFrameLowering *getFrameLowering() const {
+ const AMDGPUFrameLowering *getFrameLowering() const override {
return &FrameLowering;
}
- virtual const AMDGPUIntrinsicInfo *getIntrinsicInfo() const {
+ const AMDGPUIntrinsicInfo *getIntrinsicInfo() const override {
return &IntrinsicInfo;
}
- virtual const AMDGPUInstrInfo *getInstrInfo() const {
- return InstrInfo.get();
+ const AMDGPUInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const AMDGPURegisterInfo *getRegisterInfo() const {
- return &InstrInfo->getRegisterInfo();
+ const AMDGPUSubtarget *getSubtargetImpl() const override {
+ return &Subtarget;
}
- virtual AMDGPUTargetLowering *getTargetLowering() const {
+ const AMDGPURegisterInfo *getRegisterInfo() const override {
+ return &getInstrInfo()->getRegisterInfo();
+ }
+ AMDGPUTargetLowering *getTargetLowering() const override {
return TLInfo.get();
}
- virtual const InstrItineraryData *getInstrItineraryData() const {
+ const InstrItineraryData *getInstrItineraryData() const override {
return InstrItins;
}
- virtual const DataLayout *getDataLayout() const { return &Layout; }
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ const DataLayout *getDataLayout() const override { return &Layout; }
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
/// \brief Register R600 analysis passes with a pass manager.
- virtual void addAnalysisPasses(PassManagerBase &PM);
+ void addAnalysisPasses(PassManagerBase &PM) override;
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/contrib/llvm/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
index 8db319c..88934b6 100644
--- a/contrib/llvm/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
@@ -15,15 +15,18 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "AMDGPUtti"
#include "AMDGPU.h"
#include "AMDGPUTargetMachine.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
+#define DEBUG_TYPE "AMDGPUtti"
+
// Declare the pass initialization routine locally as target-specific passes
// don't have a target-wide initialization entry point, and so we rely on the
// pass constructor initialization.
@@ -33,7 +36,7 @@ void initializeAMDGPUTTIPass(PassRegistry &);
namespace {
-class AMDGPUTTI : public ImmutablePass, public TargetTransformInfo {
+class AMDGPUTTI final : public ImmutablePass, public TargetTransformInfo {
const AMDGPUTargetMachine *TM;
const AMDGPUSubtarget *ST;
const AMDGPUTargetLowering *TLI;
@@ -43,7 +46,7 @@ class AMDGPUTTI : public ImmutablePass, public TargetTransformInfo {
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
public:
- AMDGPUTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ AMDGPUTTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
@@ -53,11 +56,9 @@ public:
initializeAMDGPUTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() { pushTTIStack(this); }
-
- virtual void finalizePass() { popTTIStack(); }
+ void initializePass() override { pushTTIStack(this); }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
@@ -65,13 +66,22 @@ public:
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo *)this;
return this;
}
- virtual bool hasBranchDivergence() const;
+ bool hasBranchDivergence() const override;
+
+ void getUnrollingPreferences(Loop *L,
+ UnrollingPreferences &UP) const override;
+
+ PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const override;
+
+ unsigned getNumberOfRegisters(bool Vector) const override;
+ unsigned getRegisterBitWidth(bool Vector) const override;
+ unsigned getMaximumUnrollFactor() const override;
/// @}
};
@@ -88,3 +98,56 @@ llvm::createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM) {
}
bool AMDGPUTTI::hasBranchDivergence() const { return true; }
+
+void AMDGPUTTI::getUnrollingPreferences(Loop *L,
+ UnrollingPreferences &UP) const {
+ for (const BasicBlock *BB : L->getBlocks()) {
+ for (const Instruction &I : *BB) {
+ const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
+ if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
+ continue;
+
+ const Value *Ptr = GEP->getPointerOperand();
+ const AllocaInst *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr));
+ if (Alloca) {
+ // We want to do whatever we can to limit the number of alloca
+ // instructions that make it through to the code generator. allocas
+ // require us to use indirect addressing, which is slow and prone to
+ // compiler bugs. If this loop does an address calculation on an
+ // alloca ptr, then we want to use a higher than normal loop unroll
+ // threshold. This will give SROA a better chance to eliminate these
+ // allocas.
+ //
+ // Don't use the maximum allowed value here as it will make some
+ // programs way too big.
+ UP.Threshold = 500;
+ }
+ }
+ }
+}
+
+AMDGPUTTI::PopcntSupportKind
+AMDGPUTTI::getPopcntSupport(unsigned TyWidth) const {
+ assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
+ return ST->hasBCNT(TyWidth) ? PSK_FastHardware : PSK_Software;
+}
+
+unsigned AMDGPUTTI::getNumberOfRegisters(bool Vec) const {
+ if (Vec)
+ return 0;
+
+ // Number of VGPRs on SI.
+ if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ return 256;
+
+ return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
+}
+
+unsigned AMDGPUTTI::getRegisterBitWidth(bool) const {
+ return 32;
+}
+
+unsigned AMDGPUTTI::getMaximumUnrollFactor() const {
+ // Semi-arbitrary large amount.
+ return 64;
+}
diff --git a/contrib/llvm/lib/Target/R600/AMDILBase.td b/contrib/llvm/lib/Target/R600/AMDILBase.td
deleted file mode 100644
index 5dcd478..0000000
--- a/contrib/llvm/lib/Target/R600/AMDILBase.td
+++ /dev/null
@@ -1,25 +0,0 @@
-//===- AMDIL.td - AMDIL Target Machine -------------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces which we are implementing
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-// Dummy Instruction itineraries for pseudo instructions
-def ALU_NULL : FuncUnit;
-def NullALU : InstrItinClass;
-
-//===----------------------------------------------------------------------===//
-// Register File, Calling Conv, Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-
-include "AMDILRegisterInfo.td"
-include "AMDILInstrInfo.td"
-
diff --git a/contrib/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp b/contrib/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp
index 507570f..f3a0391 100644
--- a/contrib/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp
+++ b/contrib/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp
@@ -8,19 +8,13 @@
/// \file
//==-----------------------------------------------------------------------===//
-#define DEBUG_TYPE "structcfg"
-
#include "AMDGPU.h"
#include "AMDGPUInstrInfo.h"
#include "R600InstrInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/Analysis/DominatorInternals.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
@@ -30,11 +24,16 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEBUG_TYPE "structcfg"
+
#define DEFAULT_VEC_SLOTS 8
// TODO: move-begin.
@@ -54,6 +53,10 @@ STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue "
STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks");
STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
+namespace llvm {
+ void initializeAMDGPUCFGStructurizerPass(PassRegistry&);
+}
+
//===----------------------------------------------------------------------===//
//
// Miscellaneous utility for CFGStructurizer.
@@ -131,16 +134,16 @@ public:
static char ID;
- AMDGPUCFGStructurizer(TargetMachine &tm) :
- MachineFunctionPass(ID), TM(tm),
- TII(static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
- TRI(&TII->getRegisterInfo()) { }
+ AMDGPUCFGStructurizer() :
+ MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) {
+ initializeAMDGPUCFGStructurizerPass(*PassRegistry::getPassRegistry());
+ }
- const char *getPassName() const {
- return "AMD IL Control Flow Graph structurizer Pass";
+ const char *getPassName() const override {
+ return "AMDGPU Control Flow Graph structurizer Pass";
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<MachineFunctionAnalysis>();
AU.addRequired<MachineFunctionAnalysis>();
AU.addRequired<MachineDominatorTree>();
@@ -156,14 +159,16 @@ public:
/// sure all loops have an exit block
bool prepare();
- bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+ TRI = &TII->getRegisterInfo();
DEBUG(MF.dump(););
OrderedBlks.clear();
FuncRep = &MF;
MLI = &getAnalysis<MachineLoopInfo>();
DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
MDT = &getAnalysis<MachineDominatorTree>();
- DEBUG(MDT->print(dbgs(), (const llvm::Module*)0););
+ DEBUG(MDT->print(dbgs(), (const llvm::Module*)nullptr););
PDT = &getAnalysis<MachinePostDominatorTree>();
DEBUG(PDT->print(dbgs()););
prepare();
@@ -173,7 +178,6 @@ public:
}
protected:
- TargetMachine &TM;
MachineDominatorTree *MDT;
MachinePostDominatorTree *PDT;
MachineLoopInfo *MLI;
@@ -220,7 +224,7 @@ protected:
/// Compute the reversed DFS post order of Blocks
void orderBlocks(MachineFunction *MF);
- // Function originaly from CFGStructTraits
+ // Function originally from CFGStructTraits
void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
DebugLoc DL = DebugLoc());
MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
@@ -330,7 +334,7 @@ protected:
MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I);
void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
void retireBlock(MachineBasicBlock *MBB);
- void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL);
+ void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = nullptr);
MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
/// This is work around solution for findNearestCommonDominator not avaiable
@@ -357,7 +361,7 @@ MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep)
const {
LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep);
if (It == LLInfoMap.end())
- return NULL;
+ return nullptr;
return (*It).second;
}
@@ -628,7 +632,7 @@ MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr(
MachineInstr *MI = &*It;
if (MI && (isCondBranch(MI) || isUncondBranch(MI)))
return MI;
- return NULL;
+ return nullptr;
}
MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
@@ -644,7 +648,7 @@ MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
break;
}
}
- return NULL;
+ return nullptr;
}
MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
@@ -654,7 +658,7 @@ MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
if (instr->getOpcode() == AMDGPU::RETURN)
return instr;
}
- return NULL;
+ return nullptr;
}
MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
@@ -664,7 +668,7 @@ MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
if (MI->getOpcode() == AMDGPU::CONTINUE)
return MI;
}
- return NULL;
+ return nullptr;
}
bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
@@ -786,7 +790,7 @@ bool AMDGPUCFGStructurizer::prepare() {
bool AMDGPUCFGStructurizer::run() {
//Assume reducible CFG...
- DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n";FuncRep->viewCFG(););
+ DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n");
#ifdef STRESSTEST
//Use the worse block ordering to test the algorithm.
@@ -815,7 +819,7 @@ bool AMDGPUCFGStructurizer::run() {
SmallVectorImpl<MachineBasicBlock *>::const_iterator SccBeginIter =
It;
- MachineBasicBlock *SccBeginMBB = NULL;
+ MachineBasicBlock *SccBeginMBB = nullptr;
int SccNumBlk = 0; // The number of active blocks, init to a
// maximum possible number.
int SccNumIter; // Number of iteration in this SCC.
@@ -858,8 +862,7 @@ bool AMDGPUCFGStructurizer::run() {
ContNextScc = false;
DEBUG(
dbgs() << "repeat processing SCC" << getSCCNum(MBB)
- << "sccNumIter = " << SccNumIter << "\n";
- FuncRep->viewCFG();
+ << "sccNumIter = " << SccNumIter << '\n';
);
} else {
// Finish the current scc.
@@ -871,7 +874,7 @@ bool AMDGPUCFGStructurizer::run() {
}
if (ContNextScc)
- SccBeginMBB = NULL;
+ SccBeginMBB = nullptr;
} //while, "one iteration" over the function.
MachineBasicBlock *EntryMBB =
@@ -915,12 +918,10 @@ bool AMDGPUCFGStructurizer::run() {
BlockInfoMap.clear();
LLInfoMap.clear();
- DEBUG(
- FuncRep->viewCFG();
- );
-
- if (!Finish)
- llvm_unreachable("IRREDUCIBL_CF");
+ if (!Finish) {
+ DEBUG(FuncRep->viewCFG());
+ llvm_unreachable("IRREDUCIBLE_CFG");
+ }
return true;
}
@@ -930,9 +931,9 @@ bool AMDGPUCFGStructurizer::run() {
void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) {
int SccNum = 0;
MachineBasicBlock *MBB;
- for (scc_iterator<MachineFunction *> It = scc_begin(MF), E = scc_end(MF);
- It != E; ++It, ++SccNum) {
- std::vector<MachineBasicBlock *> &SccNext = *It;
+ for (scc_iterator<MachineFunction *> It = scc_begin(MF); !It.isAtEnd();
+ ++It, ++SccNum) {
+ const std::vector<MachineBasicBlock *> &SccNext = *It;
for (std::vector<MachineBasicBlock *>::const_iterator
blockIter = SccNext.begin(), blockEnd = SccNext.end();
blockIter != blockEnd; ++blockIter) {
@@ -1025,7 +1026,7 @@ int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
} else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) {
// Triangle pattern, false is empty
LandBlk = FalseMBB;
- FalseMBB = NULL;
+ FalseMBB = nullptr;
} else if (FalseMBB->succ_size() == 1
&& *FalseMBB->succ_begin() == TrueMBB) {
// Triangle pattern, true is empty
@@ -1033,7 +1034,7 @@ int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
std::swap(TrueMBB, FalseMBB);
reversePredicateSetter(MBB->end());
LandBlk = FalseMBB;
- FalseMBB = NULL;
+ FalseMBB = nullptr;
} else if (FalseMBB->succ_size() == 1
&& isSameloopDetachedContbreak(TrueMBB, FalseMBB)) {
LandBlk = *FalseMBB->succ_begin();
@@ -1074,13 +1075,11 @@ int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
int AMDGPUCFGStructurizer::loopendPatternMatch() {
std::vector<MachineLoop *> NestedLoops;
- for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end();
- It != E; ++It) {
- df_iterator<MachineLoop *> LpIt = df_begin(*It),
- LpE = df_end(*It);
- for (; LpIt != LpE; ++LpIt)
- NestedLoops.push_back(*LpIt);
- }
+ for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end(); It != E;
+ ++It)
+ for (MachineLoop *ML : depth_first(*It))
+ NestedLoops.push_back(ML);
+
if (NestedLoops.size() == 0)
return 0;
@@ -1234,7 +1233,7 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
numClonedBlock += Num;
Num += serialPatternMatch(*HeadMBB->succ_begin());
- Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin()));
+ Num += serialPatternMatch(*std::next(HeadMBB->succ_begin()));
Num += ifPatternMatch(HeadMBB);
assert(Num > 0);
@@ -1243,7 +1242,7 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
DEBUG(
dbgs() << " not working\n";
);
- DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL;
+ DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : nullptr;
} // walk down the postDomTree
return Num;
@@ -1722,11 +1721,11 @@ AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
if (!LoopHeader || !LoopLatch)
- return NULL;
+ return nullptr;
MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch);
// Is LoopRep an infinite loop ?
if (!BranchMI || !isUncondBranch(BranchMI))
- return NULL;
+ return nullptr;
MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
FuncRep->push_back(DummyExitBlk); //insert to function
@@ -1763,7 +1762,7 @@ void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
if (MBB->succ_size() != 2)
return;
MachineBasicBlock *MBB1 = *MBB->succ_begin();
- MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin());
+ MachineBasicBlock *MBB2 = *std::next(MBB->succ_begin());
if (MBB1 != MBB2)
return;
@@ -1859,7 +1858,7 @@ AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
if (!Node1 || !Node2)
- return NULL;
+ return nullptr;
Node1 = Node1->getIDom();
while (Node1) {
@@ -1868,7 +1867,7 @@ AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
Node1 = Node1->getIDom();
}
- return NULL;
+ return nullptr;
}
MachineBasicBlock *
@@ -1899,6 +1898,14 @@ char AMDGPUCFGStructurizer::ID = 0;
} // end anonymous namespace
-FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm) {
- return new AMDGPUCFGStructurizer(tm);
+INITIALIZE_PASS_BEGIN(AMDGPUCFGStructurizer, "amdgpustructurizer",
+ "AMDGPU CFG Structurizer", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(AMDGPUCFGStructurizer, "amdgpustructurizer",
+ "AMDGPU CFG Structurizer", false, false)
+
+FunctionPass *llvm::createAMDGPUCFGStructurizerPass() {
+ return new AMDGPUCFGStructurizer();
}
diff --git a/contrib/llvm/lib/Target/R600/AMDILISelLowering.cpp b/contrib/llvm/lib/Target/R600/AMDILISelLowering.cpp
deleted file mode 100644
index 970787e..0000000
--- a/contrib/llvm/lib/Target/R600/AMDILISelLowering.cpp
+++ /dev/null
@@ -1,642 +0,0 @@
-//===-- AMDILISelLowering.cpp - AMDIL DAG Lowering Implementation ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief TargetLowering functions borrowed from AMDIL.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUISelLowering.h"
-#include "AMDGPURegisterInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILIntrinsicInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetOptions.h"
-
-using namespace llvm;
-//===----------------------------------------------------------------------===//
-// TargetLowering Implementation Help Functions End
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// TargetLowering Class Implementation Begins
-//===----------------------------------------------------------------------===//
-void AMDGPUTargetLowering::InitAMDILLowering() {
- static const int types[] = {
- (int)MVT::i8,
- (int)MVT::i16,
- (int)MVT::i32,
- (int)MVT::f32,
- (int)MVT::f64,
- (int)MVT::i64,
- (int)MVT::v2i8,
- (int)MVT::v4i8,
- (int)MVT::v2i16,
- (int)MVT::v4i16,
- (int)MVT::v4f32,
- (int)MVT::v4i32,
- (int)MVT::v2f32,
- (int)MVT::v2i32,
- (int)MVT::v2f64,
- (int)MVT::v2i64
- };
-
- static const int IntTypes[] = {
- (int)MVT::i8,
- (int)MVT::i16,
- (int)MVT::i32,
- (int)MVT::i64
- };
-
- static const int FloatTypes[] = {
- (int)MVT::f32,
- (int)MVT::f64
- };
-
- static const int VectorTypes[] = {
- (int)MVT::v2i8,
- (int)MVT::v4i8,
- (int)MVT::v2i16,
- (int)MVT::v4i16,
- (int)MVT::v4f32,
- (int)MVT::v4i32,
- (int)MVT::v2f32,
- (int)MVT::v2i32,
- (int)MVT::v2f64,
- (int)MVT::v2i64
- };
- const size_t NumTypes = array_lengthof(types);
- const size_t NumFloatTypes = array_lengthof(FloatTypes);
- const size_t NumIntTypes = array_lengthof(IntTypes);
- const size_t NumVectorTypes = array_lengthof(VectorTypes);
-
- const AMDGPUSubtarget &STM = getTargetMachine().getSubtarget<AMDGPUSubtarget>();
- // These are the current register classes that are
- // supported
-
- for (unsigned int x = 0; x < NumTypes; ++x) {
- MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x];
-
- //FIXME: SIGN_EXTEND_INREG is not meaningful for floating point types
- // We cannot sextinreg, expand to shifts
- setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
- setOperationAction(ISD::SUBE, VT, Expand);
- setOperationAction(ISD::SUBC, VT, Expand);
- setOperationAction(ISD::ADDE, VT, Expand);
- setOperationAction(ISD::ADDC, VT, Expand);
- setOperationAction(ISD::BRCOND, VT, Custom);
- setOperationAction(ISD::BR_JT, VT, Expand);
- setOperationAction(ISD::BRIND, VT, Expand);
- // TODO: Implement custom UREM/SREM routines
- setOperationAction(ISD::SREM, VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, VT, Expand);
- if (VT != MVT::i64 && VT != MVT::v2i64) {
- setOperationAction(ISD::SDIV, VT, Custom);
- }
- }
- for (unsigned int x = 0; x < NumFloatTypes; ++x) {
- MVT::SimpleValueType VT = (MVT::SimpleValueType)FloatTypes[x];
-
- // IL does not have these operations for floating point types
- setOperationAction(ISD::FP_ROUND_INREG, VT, Expand);
- setOperationAction(ISD::SETOLT, VT, Expand);
- setOperationAction(ISD::SETOGE, VT, Expand);
- setOperationAction(ISD::SETOGT, VT, Expand);
- setOperationAction(ISD::SETOLE, VT, Expand);
- setOperationAction(ISD::SETULT, VT, Expand);
- setOperationAction(ISD::SETUGE, VT, Expand);
- setOperationAction(ISD::SETUGT, VT, Expand);
- setOperationAction(ISD::SETULE, VT, Expand);
- }
-
- for (unsigned int x = 0; x < NumIntTypes; ++x) {
- MVT::SimpleValueType VT = (MVT::SimpleValueType)IntTypes[x];
-
- // GPU also does not have divrem function for signed or unsigned
- setOperationAction(ISD::SDIVREM, VT, Expand);
-
- // GPU does not have [S|U]MUL_LOHI functions as a single instruction
- setOperationAction(ISD::SMUL_LOHI, VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, VT, Expand);
-
- setOperationAction(ISD::BSWAP, VT, Expand);
-
- // GPU doesn't have any counting operators
- setOperationAction(ISD::CTPOP, VT, Expand);
- setOperationAction(ISD::CTTZ, VT, Expand);
- setOperationAction(ISD::CTLZ, VT, Expand);
- }
-
- for (unsigned int ii = 0; ii < NumVectorTypes; ++ii) {
- MVT::SimpleValueType VT = (MVT::SimpleValueType)VectorTypes[ii];
-
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
- setOperationAction(ISD::SDIVREM, VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, VT, Expand);
- // setOperationAction(ISD::VSETCC, VT, Expand);
- setOperationAction(ISD::SELECT_CC, VT, Expand);
-
- }
- setOperationAction(ISD::MULHU, MVT::i64, Expand);
- setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
- setOperationAction(ISD::MULHS, MVT::i64, Expand);
- setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
- setOperationAction(ISD::ADD, MVT::v2i64, Expand);
- setOperationAction(ISD::SREM, MVT::v2i64, Expand);
- setOperationAction(ISD::Constant , MVT::i64 , Legal);
- setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
- setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
- setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
- if (STM.hasHWFP64()) {
- // we support loading/storing v2f64 but not operations on the type
- setOperationAction(ISD::FADD, MVT::v2f64, Expand);
- setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
- setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
- setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand);
- setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
- setOperationAction(ISD::ConstantFP , MVT::f64 , Legal);
- // We want to expand vector conversions into their scalar
- // counterparts.
- setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand);
- setOperationAction(ISD::ANY_EXTEND, MVT::v2f64, Expand);
- setOperationAction(ISD::FABS, MVT::f64, Expand);
- setOperationAction(ISD::FABS, MVT::v2f64, Expand);
- }
- // TODO: Fix the UDIV24 algorithm so it works for these
- // types correctly. This needs vector comparisons
- // for this to work correctly.
- setOperationAction(ISD::UDIV, MVT::v2i8, Expand);
- setOperationAction(ISD::UDIV, MVT::v4i8, Expand);
- setOperationAction(ISD::UDIV, MVT::v2i16, Expand);
- setOperationAction(ISD::UDIV, MVT::v4i16, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
- setOperationAction(ISD::SUBC, MVT::Other, Expand);
- setOperationAction(ISD::ADDE, MVT::Other, Expand);
- setOperationAction(ISD::ADDC, MVT::Other, Expand);
- setOperationAction(ISD::BRCOND, MVT::Other, Custom);
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
-
-
- // Use the default implementation.
- setOperationAction(ISD::ConstantFP , MVT::f32 , Legal);
- setOperationAction(ISD::Constant , MVT::i32 , Legal);
-
- setSchedulingPreference(Sched::RegPressure);
- setPow2DivIsCheap(false);
- setSelectIsExpensive(true);
- setJumpIsExpensive(true);
-
- MaxStoresPerMemcpy = 4096;
- MaxStoresPerMemmove = 4096;
- MaxStoresPerMemset = 4096;
-
-}
-
-bool
-AMDGPUTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
- const CallInst &I, unsigned Intrinsic) const {
- return false;
-}
-
-// The backend supports 32 and 64 bit floating point immediates
-bool
-AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
- || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
- return true;
- } else {
- return false;
- }
-}
-
-bool
-AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
- if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
- || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
- return false;
- } else {
- return true;
- }
-}
-
-
-// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
-// be zero. Op is expected to be a target specific node. Used by DAG
-// combiner.
-
-void
-AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
- const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
- APInt KnownZero2;
- APInt KnownOne2;
- KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything
- switch (Op.getOpcode()) {
- default: break;
- case ISD::SELECT_CC:
- DAG.ComputeMaskedBits(
- Op.getOperand(1),
- KnownZero,
- KnownOne,
- Depth + 1
- );
- DAG.ComputeMaskedBits(
- Op.getOperand(0),
- KnownZero2,
- KnownOne2
- );
- assert((KnownZero & KnownOne) == 0
- && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0
- && "Bits known to be one AND zero?");
- // Only known if known in both the LHS and RHS
- KnownOne &= KnownOne2;
- KnownZero &= KnownZero2;
- break;
- };
-}
-
-//===----------------------------------------------------------------------===//
-// Other Lowering Hooks
-//===----------------------------------------------------------------------===//
-
-SDValue
-AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
- EVT OVT = Op.getValueType();
- SDValue DST;
- if (OVT.getScalarType() == MVT::i64) {
- DST = LowerSDIV64(Op, DAG);
- } else if (OVT.getScalarType() == MVT::i32) {
- DST = LowerSDIV32(Op, DAG);
- } else if (OVT.getScalarType() == MVT::i16
- || OVT.getScalarType() == MVT::i8) {
- DST = LowerSDIV24(Op, DAG);
- } else {
- DST = SDValue(Op.getNode(), 0);
- }
- return DST;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
- EVT OVT = Op.getValueType();
- SDValue DST;
- if (OVT.getScalarType() == MVT::i64) {
- DST = LowerSREM64(Op, DAG);
- } else if (OVT.getScalarType() == MVT::i32) {
- DST = LowerSREM32(Op, DAG);
- } else if (OVT.getScalarType() == MVT::i16) {
- DST = LowerSREM16(Op, DAG);
- } else if (OVT.getScalarType() == MVT::i8) {
- DST = LowerSREM8(Op, DAG);
- } else {
- DST = SDValue(Op.getNode(), 0);
- }
- return DST;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const {
- SDValue Data = Op.getOperand(0);
- VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1));
- SDLoc DL(Op);
- EVT DVT = Data.getValueType();
- EVT BVT = BaseType->getVT();
- unsigned baseBits = BVT.getScalarType().getSizeInBits();
- unsigned srcBits = DVT.isSimple() ? DVT.getScalarType().getSizeInBits() : 1;
- unsigned shiftBits = srcBits - baseBits;
- if (srcBits < 32) {
- // If the op is less than 32 bits, then it needs to extend to 32bits
- // so it can properly keep the upper bits valid.
- EVT IVT = genIntType(32, DVT.isVector() ? DVT.getVectorNumElements() : 1);
- Data = DAG.getNode(ISD::ZERO_EXTEND, DL, IVT, Data);
- shiftBits = 32 - baseBits;
- DVT = IVT;
- }
- SDValue Shift = DAG.getConstant(shiftBits, DVT);
- // Shift left by 'Shift' bits.
- Data = DAG.getNode(ISD::SHL, DL, DVT, Data, Shift);
- // Signed shift Right by 'Shift' bits.
- Data = DAG.getNode(ISD::SRA, DL, DVT, Data, Shift);
- if (srcBits < 32) {
- // Once the sign extension is done, the op needs to be converted to
- // its original type.
- Data = DAG.getSExtOrTrunc(Data, DL, Op.getOperand(0).getValueType());
- }
- return Data;
-}
-EVT
-AMDGPUTargetLowering::genIntType(uint32_t size, uint32_t numEle) const {
- int iSize = (size * numEle);
- int vEle = (iSize >> ((size == 64) ? 6 : 5));
- if (!vEle) {
- vEle = 1;
- }
- if (size == 64) {
- if (vEle == 1) {
- return EVT(MVT::i64);
- } else {
- return EVT(MVT::getVectorVT(MVT::i64, vEle));
- }
- } else {
- if (vEle == 1) {
- return EVT(MVT::i32);
- } else {
- return EVT(MVT::getVectorVT(MVT::i32, vEle));
- }
- }
-}
-
-SDValue
-AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
- SDValue Chain = Op.getOperand(0);
- SDValue Cond = Op.getOperand(1);
- SDValue Jump = Op.getOperand(2);
- SDValue Result;
- Result = DAG.getNode(
- AMDGPUISD::BRANCH_COND,
- SDLoc(Op),
- Op.getValueType(),
- Chain, Jump, Cond);
- return Result;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT OVT = Op.getValueType();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- MVT INTTY;
- MVT FLTTY;
- if (!OVT.isVector()) {
- INTTY = MVT::i32;
- FLTTY = MVT::f32;
- } else if (OVT.getVectorNumElements() == 2) {
- INTTY = MVT::v2i32;
- FLTTY = MVT::v2f32;
- } else if (OVT.getVectorNumElements() == 4) {
- INTTY = MVT::v4i32;
- FLTTY = MVT::v4f32;
- }
- unsigned bitsize = OVT.getScalarType().getSizeInBits();
- // char|short jq = ia ^ ib;
- SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
-
- // jq = jq >> (bitsize - 2)
- jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT));
-
- // jq = jq | 0x1
- jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
-
- // jq = (int)jq
- jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
-
- // int ia = (int)LHS;
- SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
-
- // int ib, (int)RHS;
- SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
-
- // float fa = (float)ia;
- SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
-
- // float fb = (float)ib;
- SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
-
- // float fq = native_divide(fa, fb);
- SDValue fq = DAG.getNode(AMDGPUISD::DIV_INF, DL, FLTTY, fa, fb);
-
- // fq = trunc(fq);
- fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
-
- // float fqneg = -fq;
- SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
-
- // float fr = mad(fqneg, fb, fa);
- SDValue fr = DAG.getNode(ISD::FADD, DL, FLTTY,
- DAG.getNode(ISD::MUL, DL, FLTTY, fqneg, fb), fa);
-
- // int iq = (int)fq;
- SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
-
- // fr = fabs(fr);
- fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
-
- // fb = fabs(fb);
- fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
-
- // int cv = fr >= fb;
- SDValue cv;
- if (INTTY == MVT::i32) {
- cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
- } else {
- cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
- }
- // jq = (cv ? jq : 0);
- jq = DAG.getNode(ISD::SELECT, DL, OVT, cv, jq,
- DAG.getConstant(0, OVT));
- // dst = iq + jq;
- iq = DAG.getSExtOrTrunc(iq, DL, OVT);
- iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
- return iq;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT OVT = Op.getValueType();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- // The LowerSDIV32 function generates equivalent to the following IL.
- // mov r0, LHS
- // mov r1, RHS
- // ilt r10, r0, 0
- // ilt r11, r1, 0
- // iadd r0, r0, r10
- // iadd r1, r1, r11
- // ixor r0, r0, r10
- // ixor r1, r1, r11
- // udiv r0, r0, r1
- // ixor r10, r10, r11
- // iadd r0, r0, r10
- // ixor DST, r0, r10
-
- // mov r0, LHS
- SDValue r0 = LHS;
-
- // mov r1, RHS
- SDValue r1 = RHS;
-
- // ilt r10, r0, 0
- SDValue r10 = DAG.getSelectCC(DL,
- r0, DAG.getConstant(0, OVT),
- DAG.getConstant(-1, MVT::i32),
- DAG.getConstant(0, MVT::i32),
- ISD::SETLT);
-
- // ilt r11, r1, 0
- SDValue r11 = DAG.getSelectCC(DL,
- r1, DAG.getConstant(0, OVT),
- DAG.getConstant(-1, MVT::i32),
- DAG.getConstant(0, MVT::i32),
- ISD::SETLT);
-
- // iadd r0, r0, r10
- r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
-
- // iadd r1, r1, r11
- r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
-
- // ixor r0, r0, r10
- r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
-
- // ixor r1, r1, r11
- r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
-
- // udiv r0, r0, r1
- r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
-
- // ixor r10, r10, r11
- r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
-
- // iadd r0, r0, r10
- r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
-
- // ixor DST, r0, r10
- SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
- return DST;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
- return SDValue(Op.getNode(), 0);
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT OVT = Op.getValueType();
- MVT INTTY = MVT::i32;
- if (OVT == MVT::v2i8) {
- INTTY = MVT::v2i32;
- } else if (OVT == MVT::v4i8) {
- INTTY = MVT::v4i32;
- }
- SDValue LHS = DAG.getSExtOrTrunc(Op.getOperand(0), DL, INTTY);
- SDValue RHS = DAG.getSExtOrTrunc(Op.getOperand(1), DL, INTTY);
- LHS = DAG.getNode(ISD::SREM, DL, INTTY, LHS, RHS);
- LHS = DAG.getSExtOrTrunc(LHS, DL, OVT);
- return LHS;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT OVT = Op.getValueType();
- MVT INTTY = MVT::i32;
- if (OVT == MVT::v2i16) {
- INTTY = MVT::v2i32;
- } else if (OVT == MVT::v4i16) {
- INTTY = MVT::v4i32;
- }
- SDValue LHS = DAG.getSExtOrTrunc(Op.getOperand(0), DL, INTTY);
- SDValue RHS = DAG.getSExtOrTrunc(Op.getOperand(1), DL, INTTY);
- LHS = DAG.getNode(ISD::SREM, DL, INTTY, LHS, RHS);
- LHS = DAG.getSExtOrTrunc(LHS, DL, OVT);
- return LHS;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT OVT = Op.getValueType();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- // The LowerSREM32 function generates equivalent to the following IL.
- // mov r0, LHS
- // mov r1, RHS
- // ilt r10, r0, 0
- // ilt r11, r1, 0
- // iadd r0, r0, r10
- // iadd r1, r1, r11
- // ixor r0, r0, r10
- // ixor r1, r1, r11
- // udiv r20, r0, r1
- // umul r20, r20, r1
- // sub r0, r0, r20
- // iadd r0, r0, r10
- // ixor DST, r0, r10
-
- // mov r0, LHS
- SDValue r0 = LHS;
-
- // mov r1, RHS
- SDValue r1 = RHS;
-
- // ilt r10, r0, 0
- SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
-
- // ilt r11, r1, 0
- SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
-
- // iadd r0, r0, r10
- r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
-
- // iadd r1, r1, r11
- r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
-
- // ixor r0, r0, r10
- r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
-
- // ixor r1, r1, r11
- r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
-
- // udiv r20, r0, r1
- SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
-
- // umul r20, r20, r1
- r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
-
- // sub r0, r0, r20
- r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
-
- // iadd r0, r0, r10
- r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
-
- // ixor DST, r0, r10
- SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
- return DST;
-}
-
-SDValue
-AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
- return SDValue(Op.getNode(), 0);
-}
diff --git a/contrib/llvm/lib/Target/R600/AMDILInstrInfo.td b/contrib/llvm/lib/Target/R600/AMDILInstrInfo.td
deleted file mode 100644
index 0f0c88d..0000000
--- a/contrib/llvm/lib/Target/R600/AMDILInstrInfo.td
+++ /dev/null
@@ -1,150 +0,0 @@
-//===------------ AMDILInstrInfo.td - AMDIL Target ------*-tablegen-*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-// This file describes the AMDIL instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-//===--------------------------------------------------------------------===//
-// Custom Operands
-//===--------------------------------------------------------------------===//
-def brtarget : Operand<OtherVT>;
-
-//===--------------------------------------------------------------------===//
-// Custom Selection DAG Type Profiles
-//===--------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Generic Profile Types
-//===----------------------------------------------------------------------===//
-
-def SDTIL_GenBinaryOp : SDTypeProfile<1, 2, [
- SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
- ]>;
-def SDTIL_GenTernaryOp : SDTypeProfile<1, 3, [
- SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisSameAs<2, 3>
- ]>;
-def SDTIL_GenVecBuild : SDTypeProfile<1, 1, [
- SDTCisEltOfVec<1, 0>
- ]>;
-
-//===----------------------------------------------------------------------===//
-// Flow Control Profile Types
-//===----------------------------------------------------------------------===//
-// Branch instruction where second and third are basic blocks
-def SDTIL_BRCond : SDTypeProfile<0, 2, [
- SDTCisVT<0, OtherVT>
- ]>;
-
-//===--------------------------------------------------------------------===//
-// Custom Selection DAG Nodes
-//===--------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Flow Control DAG Nodes
-//===----------------------------------------------------------------------===//
-def IL_brcond : SDNode<"AMDGPUISD::BRANCH_COND", SDTIL_BRCond, [SDNPHasChain]>;
-
-//===----------------------------------------------------------------------===//
-// Call/Return DAG Nodes
-//===----------------------------------------------------------------------===//
-def IL_retflag : SDNode<"AMDGPUISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-
-//===--------------------------------------------------------------------===//
-// Instructions
-//===--------------------------------------------------------------------===//
-// Floating point math functions
-def IL_div_inf : SDNode<"AMDGPUISD::DIV_INF", SDTIL_GenBinaryOp>;
-
-//===----------------------------------------------------------------------===//
-// Integer functions
-//===----------------------------------------------------------------------===//
-def IL_umul : SDNode<"AMDGPUISD::UMUL" , SDTIntBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-
-//===--------------------------------------------------------------------===//
-// Custom Pattern DAG Nodes
-//===--------------------------------------------------------------------===//
-def global_store : PatFrag<(ops node:$val, node:$ptr),
- (store node:$val, node:$ptr), [{
- return isGlobalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-//===----------------------------------------------------------------------===//
-// Load pattern fragments
-//===----------------------------------------------------------------------===//
-// Global address space loads
-def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-// Constant address space loads
-def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-//===----------------------------------------------------------------------===//
-// Complex addressing mode patterns
-//===----------------------------------------------------------------------===//
-def ADDR : ComplexPattern<i32, 2, "SelectADDR", [], []>;
-def ADDRF : ComplexPattern<i32, 2, "SelectADDR", [frameindex], []>;
-def ADDR64 : ComplexPattern<i64, 2, "SelectADDR64", [], []>;
-def ADDR64F : ComplexPattern<i64, 2, "SelectADDR64", [frameindex], []>;
-
-//===----------------------------------------------------------------------===//
-// Instruction format classes
-//===----------------------------------------------------------------------===//
-class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
-: Instruction {
-
- let Namespace = "AMDGPU";
- dag OutOperandList = outs;
- dag InOperandList = ins;
- let Pattern = pattern;
- let AsmString = !strconcat(asmstr, "\n");
- let isPseudo = 1;
- let Itinerary = NullALU;
- bit hasIEEEFlag = 0;
- bit hasZeroOpFlag = 0;
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
-}
-
-//===--------------------------------------------------------------------===//
-// Multiclass Instruction formats
-//===--------------------------------------------------------------------===//
-// Multiclass that handles branch instructions
-multiclass BranchConditional<SDNode Op, RegisterClass rci, RegisterClass rcf> {
- def _i32 : ILFormat<(outs),
- (ins brtarget:$target, rci:$src0),
- "; i32 Pseudo branch instruction",
- [(Op bb:$target, (i32 rci:$src0))]>;
- def _f32 : ILFormat<(outs),
- (ins brtarget:$target, rcf:$src0),
- "; f32 Pseudo branch instruction",
- [(Op bb:$target, (f32 rcf:$src0))]>;
-}
-
-// Only scalar types should generate flow control
-multiclass BranchInstr<string name> {
- def _i32 : ILFormat<(outs), (ins GPRI32:$src),
- !strconcat(name, " $src"), []>;
- def _f32 : ILFormat<(outs), (ins GPRF32:$src),
- !strconcat(name, " $src"), []>;
-}
-// Only scalar types should generate flow control
-multiclass BranchInstr2<string name> {
- def _i32 : ILFormat<(outs), (ins GPRI32:$src0, GPRI32:$src1),
- !strconcat(name, " $src0, $src1"), []>;
- def _f32 : ILFormat<(outs), (ins GPRF32:$src0, GPRF32:$src1),
- !strconcat(name, " $src0, $src1"), []>;
-}
-
-//===--------------------------------------------------------------------===//
-// Intrinsics support
-//===--------------------------------------------------------------------===//
-include "AMDILIntrinsics.td"
diff --git a/contrib/llvm/lib/Target/R600/AMDILIntrinsics.td b/contrib/llvm/lib/Target/R600/AMDILIntrinsics.td
deleted file mode 100644
index 6ec3559..0000000
--- a/contrib/llvm/lib/Target/R600/AMDILIntrinsics.td
+++ /dev/null
@@ -1,232 +0,0 @@
-//===- AMDILIntrinsics.td - Defines AMDIL Intrinscs -*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-// This file defines all of the amdil-specific intrinsics
-//
-//===---------------------------------------------------------------===//
-//===--------------------------------------------------------------------===//
-// Intrinsic classes
-// Generic versions of the above classes but for Target specific intrinsics
-// instead of SDNode patterns.
-//===--------------------------------------------------------------------===//
-let TargetPrefix = "AMDIL", isTarget = 1 in {
- class VoidIntLong :
- Intrinsic<[llvm_i64_ty], [], []>;
- class VoidIntInt :
- Intrinsic<[llvm_i32_ty], [], []>;
- class VoidIntBool :
- Intrinsic<[llvm_i32_ty], [], []>;
- class UnaryIntInt :
- Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class UnaryIntFloat :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class ConvertIntFTOI :
- Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
- class ConvertIntITOF :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty], [IntrNoMem]>;
- class UnaryIntNoRetInt :
- Intrinsic<[], [llvm_anyint_ty], []>;
- class UnaryIntNoRetFloat :
- Intrinsic<[], [llvm_anyfloat_ty], []>;
- class BinaryIntInt :
- Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- class BinaryIntFloat :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- class BinaryIntNoRetInt :
- Intrinsic<[], [llvm_anyint_ty, LLVMMatchType<0>], []>;
- class BinaryIntNoRetFloat :
- Intrinsic<[], [llvm_anyfloat_ty, LLVMMatchType<0>], []>;
- class TernaryIntInt :
- Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- class TernaryIntFloat :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- class QuaternaryIntInt :
- Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- class UnaryAtomicInt :
- Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
- class BinaryAtomicInt :
- Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
- class TernaryAtomicInt :
- Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
- class UnaryAtomicIntNoRet :
- Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
- class BinaryAtomicIntNoRet :
- Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
- class TernaryAtomicIntNoRet :
- Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
-}
-
-let TargetPrefix = "AMDIL", isTarget = 1 in {
- def int_AMDIL_abs : GCCBuiltin<"__amdil_abs">, UnaryIntInt;
-
- def int_AMDIL_bit_extract_i32 : GCCBuiltin<"__amdil_ibit_extract">,
- TernaryIntInt;
- def int_AMDIL_bit_extract_u32 : GCCBuiltin<"__amdil_ubit_extract">,
- TernaryIntInt;
- def int_AMDIL_bit_reverse_u32 : GCCBuiltin<"__amdil_ubit_reverse">,
- UnaryIntInt;
- def int_AMDIL_bit_count_i32 : GCCBuiltin<"__amdil_count_bits">,
- UnaryIntInt;
- def int_AMDIL_bit_find_first_lo : GCCBuiltin<"__amdil_ffb_lo">,
- UnaryIntInt;
- def int_AMDIL_bit_find_first_hi : GCCBuiltin<"__amdil_ffb_hi">,
- UnaryIntInt;
- def int_AMDIL_bit_find_first_sgn : GCCBuiltin<"__amdil_ffb_signed">,
- UnaryIntInt;
- def int_AMDIL_media_bitalign : GCCBuiltin<"__amdil_bitalign">,
- TernaryIntInt;
- def int_AMDIL_media_bytealign : GCCBuiltin<"__amdil_bytealign">,
- TernaryIntInt;
- def int_AMDIL_bit_insert_u32 : GCCBuiltin<"__amdil_ubit_insert">,
- QuaternaryIntInt;
- def int_AMDIL_bfi : GCCBuiltin<"__amdil_bfi">,
- TernaryIntInt;
- def int_AMDIL_bfm : GCCBuiltin<"__amdil_bfm">,
- BinaryIntInt;
- def int_AMDIL_mulhi_i32 : GCCBuiltin<"__amdil_imul_high">,
- BinaryIntInt;
- def int_AMDIL_mulhi_u32 : GCCBuiltin<"__amdil_umul_high">,
- BinaryIntInt;
- def int_AMDIL_mul24_i32 : GCCBuiltin<"__amdil_imul24">,
- BinaryIntInt;
- def int_AMDIL_mul24_u32 : GCCBuiltin<"__amdil_umul24">,
- BinaryIntInt;
- def int_AMDIL_mulhi24_i32 : GCCBuiltin<"__amdil_imul24_high">,
- BinaryIntInt;
- def int_AMDIL_mulhi24_u32 : GCCBuiltin<"__amdil_umul24_high">,
- BinaryIntInt;
- def int_AMDIL_carry_i32 : GCCBuiltin<"__amdil_carry">,
- BinaryIntInt;
- def int_AMDIL_borrow_i32 : GCCBuiltin<"__amdil_borrow">,
- BinaryIntInt;
- def int_AMDIL_min_i32 : GCCBuiltin<"__amdil_imin">,
- BinaryIntInt;
- def int_AMDIL_min_u32 : GCCBuiltin<"__amdil_umin">,
- BinaryIntInt;
- def int_AMDIL_min : GCCBuiltin<"__amdil_min">,
- BinaryIntFloat;
- def int_AMDIL_max_i32 : GCCBuiltin<"__amdil_imax">,
- BinaryIntInt;
- def int_AMDIL_max_u32 : GCCBuiltin<"__amdil_umax">,
- BinaryIntInt;
- def int_AMDIL_max : GCCBuiltin<"__amdil_max">,
- BinaryIntFloat;
- def int_AMDIL_media_lerp_u4 : GCCBuiltin<"__amdil_u4lerp">,
- TernaryIntInt;
- def int_AMDIL_media_sad : GCCBuiltin<"__amdil_sad">,
- TernaryIntInt;
- def int_AMDIL_media_sad_hi : GCCBuiltin<"__amdil_sadhi">,
- TernaryIntInt;
- def int_AMDIL_fraction : GCCBuiltin<"__amdil_fraction">,
- UnaryIntFloat;
- def int_AMDIL_clamp : GCCBuiltin<"__amdil_clamp">,
- TernaryIntFloat;
- def int_AMDIL_pireduce : GCCBuiltin<"__amdil_pireduce">,
- UnaryIntFloat;
- def int_AMDIL_round_nearest : GCCBuiltin<"__amdil_round_nearest">,
- UnaryIntFloat;
- def int_AMDIL_round_neginf : GCCBuiltin<"__amdil_round_neginf">,
- UnaryIntFloat;
- def int_AMDIL_round_zero : GCCBuiltin<"__amdil_round_zero">,
- UnaryIntFloat;
- def int_AMDIL_acos : GCCBuiltin<"__amdil_acos">,
- UnaryIntFloat;
- def int_AMDIL_atan : GCCBuiltin<"__amdil_atan">,
- UnaryIntFloat;
- def int_AMDIL_asin : GCCBuiltin<"__amdil_asin">,
- UnaryIntFloat;
- def int_AMDIL_cos : GCCBuiltin<"__amdil_cos">,
- UnaryIntFloat;
- def int_AMDIL_cos_vec : GCCBuiltin<"__amdil_cos_vec">,
- UnaryIntFloat;
- def int_AMDIL_tan : GCCBuiltin<"__amdil_tan">,
- UnaryIntFloat;
- def int_AMDIL_sin : GCCBuiltin<"__amdil_sin">,
- UnaryIntFloat;
- def int_AMDIL_sin_vec : GCCBuiltin<"__amdil_sin_vec">,
- UnaryIntFloat;
- def int_AMDIL_pow : GCCBuiltin<"__amdil_pow">, BinaryIntFloat;
- def int_AMDIL_div : GCCBuiltin<"__amdil_div">, BinaryIntFloat;
- def int_AMDIL_udiv : GCCBuiltin<"__amdil_udiv">, BinaryIntInt;
- def int_AMDIL_sqrt: GCCBuiltin<"__amdil_sqrt">,
- UnaryIntFloat;
- def int_AMDIL_sqrt_vec: GCCBuiltin<"__amdil_sqrt_vec">,
- UnaryIntFloat;
- def int_AMDIL_exp : GCCBuiltin<"__amdil_exp">,
- UnaryIntFloat;
- def int_AMDIL_exp_vec : GCCBuiltin<"__amdil_exp_vec">,
- UnaryIntFloat;
- def int_AMDIL_exn : GCCBuiltin<"__amdil_exn">,
- UnaryIntFloat;
- def int_AMDIL_log_vec : GCCBuiltin<"__amdil_log_vec">,
- UnaryIntFloat;
- def int_AMDIL_ln : GCCBuiltin<"__amdil_ln">,
- UnaryIntFloat;
- def int_AMDIL_sign: GCCBuiltin<"__amdil_sign">,
- UnaryIntFloat;
- def int_AMDIL_fma: GCCBuiltin<"__amdil_fma">,
- TernaryIntFloat;
- def int_AMDIL_rsq : GCCBuiltin<"__amdil_rsq">,
- UnaryIntFloat;
- def int_AMDIL_rsq_vec : GCCBuiltin<"__amdil_rsq_vec">,
- UnaryIntFloat;
- def int_AMDIL_length : GCCBuiltin<"__amdil_length">,
- UnaryIntFloat;
- def int_AMDIL_lerp : GCCBuiltin<"__amdil_lerp">,
- TernaryIntFloat;
- def int_AMDIL_media_sad4 : GCCBuiltin<"__amdil_sad4">,
- Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_i32_ty], []>;
-
- def int_AMDIL_frexp_f64 : GCCBuiltin<"__amdil_frexp">,
- Intrinsic<[llvm_v2i64_ty], [llvm_double_ty], []>;
- def int_AMDIL_ldexp : GCCBuiltin<"__amdil_ldexp">,
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyfloat_ty, llvm_anyint_ty], []>;
- def int_AMDIL_drcp : GCCBuiltin<"__amdil_rcp">,
- Intrinsic<[llvm_double_ty], [llvm_double_ty], []>;
- def int_AMDIL_convert_f16_f32 : GCCBuiltin<"__amdil_half_to_float">,
- ConvertIntITOF;
- def int_AMDIL_convert_f32_f16 : GCCBuiltin<"__amdil_float_to_half">,
- ConvertIntFTOI;
- def int_AMDIL_convert_f32_i32_rpi : GCCBuiltin<"__amdil_float_to_int_rpi">,
- ConvertIntFTOI;
- def int_AMDIL_convert_f32_i32_flr : GCCBuiltin<"__amdil_float_to_int_flr">,
- ConvertIntFTOI;
- def int_AMDIL_convert_f32_f16_near : GCCBuiltin<"__amdil_float_to_half_near">,
- ConvertIntFTOI;
- def int_AMDIL_convert_f32_f16_neg_inf : GCCBuiltin<"__amdil_float_to_half_neg_inf">,
- ConvertIntFTOI;
- def int_AMDIL_convert_f32_f16_plus_inf : GCCBuiltin<"__amdil_float_to_half_plus_inf">,
- ConvertIntFTOI;
- def int_AMDIL_media_convert_f2v4u8 : GCCBuiltin<"__amdil_f_2_u4">,
- Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], []>;
- def int_AMDIL_media_unpack_byte_0 : GCCBuiltin<"__amdil_unpack_0">,
- ConvertIntITOF;
- def int_AMDIL_media_unpack_byte_1 : GCCBuiltin<"__amdil_unpack_1">,
- ConvertIntITOF;
- def int_AMDIL_media_unpack_byte_2 : GCCBuiltin<"__amdil_unpack_2">,
- ConvertIntITOF;
- def int_AMDIL_media_unpack_byte_3 : GCCBuiltin<"__amdil_unpack_3">,
- ConvertIntITOF;
- def int_AMDIL_dp2_add : GCCBuiltin<"__amdil_dp2_add">,
- Intrinsic<[llvm_float_ty], [llvm_v2f32_ty,
- llvm_v2f32_ty, llvm_float_ty], []>;
- def int_AMDIL_dp2 : GCCBuiltin<"__amdil_dp2">,
- Intrinsic<[llvm_float_ty], [llvm_v2f32_ty,
- llvm_v2f32_ty], []>;
- def int_AMDIL_dp3 : GCCBuiltin<"__amdil_dp3">,
- Intrinsic<[llvm_float_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty], []>;
- def int_AMDIL_dp4 : GCCBuiltin<"__amdil_dp4">,
- Intrinsic<[llvm_float_ty], [llvm_v4f32_ty,
- llvm_v4f32_ty], []>;
-}
diff --git a/contrib/llvm/lib/Target/R600/AMDILRegisterInfo.td b/contrib/llvm/lib/Target/R600/AMDILRegisterInfo.td
deleted file mode 100644
index b9d0334..0000000
--- a/contrib/llvm/lib/Target/R600/AMDILRegisterInfo.td
+++ /dev/null
@@ -1,107 +0,0 @@
-//===- AMDILRegisterInfo.td - AMDIL Register defs ----------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-// Declarations that describe the AMDIL register file
-//
-//===----------------------------------------------------------------------===//
-
-class AMDILReg<bits<16> num, string n> : Register<n> {
- field bits<16> Value;
- let Value = num;
- let Namespace = "AMDGPU";
-}
-
-// We will start with 8 registers for each class before expanding to more
-// Since the swizzle is added based on the register class, we can leave it
-// off here and just specify different registers for different register classes
-def R1 : AMDILReg<1, "r1">, DwarfRegNum<[1]>;
-def R2 : AMDILReg<2, "r2">, DwarfRegNum<[2]>;
-def R3 : AMDILReg<3, "r3">, DwarfRegNum<[3]>;
-def R4 : AMDILReg<4, "r4">, DwarfRegNum<[4]>;
-def R5 : AMDILReg<5, "r5">, DwarfRegNum<[5]>;
-def R6 : AMDILReg<6, "r6">, DwarfRegNum<[6]>;
-def R7 : AMDILReg<7, "r7">, DwarfRegNum<[7]>;
-def R8 : AMDILReg<8, "r8">, DwarfRegNum<[8]>;
-def R9 : AMDILReg<9, "r9">, DwarfRegNum<[9]>;
-def R10 : AMDILReg<10, "r10">, DwarfRegNum<[10]>;
-def R11 : AMDILReg<11, "r11">, DwarfRegNum<[11]>;
-def R12 : AMDILReg<12, "r12">, DwarfRegNum<[12]>;
-def R13 : AMDILReg<13, "r13">, DwarfRegNum<[13]>;
-def R14 : AMDILReg<14, "r14">, DwarfRegNum<[14]>;
-def R15 : AMDILReg<15, "r15">, DwarfRegNum<[15]>;
-def R16 : AMDILReg<16, "r16">, DwarfRegNum<[16]>;
-def R17 : AMDILReg<17, "r17">, DwarfRegNum<[17]>;
-def R18 : AMDILReg<18, "r18">, DwarfRegNum<[18]>;
-def R19 : AMDILReg<19, "r19">, DwarfRegNum<[19]>;
-def R20 : AMDILReg<20, "r20">, DwarfRegNum<[20]>;
-
-// All registers between 1000 and 1024 are reserved and cannot be used
-// unless commented in this section
-// r1021-r1025 are used to dynamically calculate the local/group/thread/region/region_local ID's
-// r1020 is used to hold the frame index for local arrays
-// r1019 is used to hold the dynamic stack allocation pointer
-// r1018 is used as a temporary register for handwritten code
-// r1017 is used as a temporary register for handwritten code
-// r1016 is used as a temporary register for load/store code
-// r1015 is used as a temporary register for data segment offset
-// r1014 is used as a temporary register for store code
-// r1013 is used as the section data pointer register
-// r1012-r1010 and r1001-r1008 are used for temporary I/O registers
-// r1009 is used as the frame pointer register
-// r999 is used as the mem register.
-// r998 is used as the return address register.
-//def R1025 : AMDILReg<1025, "r1025">, DwarfRegNum<[1025]>;
-//def R1024 : AMDILReg<1024, "r1024">, DwarfRegNum<[1024]>;
-//def R1023 : AMDILReg<1023, "r1023">, DwarfRegNum<[1023]>;
-//def R1022 : AMDILReg<1022, "r1022">, DwarfRegNum<[1022]>;
-//def R1021 : AMDILReg<1021, "r1021">, DwarfRegNum<[1021]>;
-//def R1020 : AMDILReg<1020, "r1020">, DwarfRegNum<[1020]>;
-def SP : AMDILReg<1019, "r1019">, DwarfRegNum<[1019]>;
-def T1 : AMDILReg<1018, "r1018">, DwarfRegNum<[1018]>;
-def T2 : AMDILReg<1017, "r1017">, DwarfRegNum<[1017]>;
-def T3 : AMDILReg<1016, "r1016">, DwarfRegNum<[1016]>;
-def T4 : AMDILReg<1015, "r1015">, DwarfRegNum<[1015]>;
-def T5 : AMDILReg<1014, "r1014">, DwarfRegNum<[1014]>;
-def SDP : AMDILReg<1013, "r1013">, DwarfRegNum<[1013]>;
-def R1012: AMDILReg<1012, "r1012">, DwarfRegNum<[1012]>;
-def R1011: AMDILReg<1011, "r1011">, DwarfRegNum<[1011]>;
-def R1010: AMDILReg<1010, "r1010">, DwarfRegNum<[1010]>;
-def DFP : AMDILReg<1009, "r1009">, DwarfRegNum<[1009]>;
-def R1008: AMDILReg<1008, "r1008">, DwarfRegNum<[1008]>;
-def R1007: AMDILReg<1007, "r1007">, DwarfRegNum<[1007]>;
-def R1006: AMDILReg<1006, "r1006">, DwarfRegNum<[1006]>;
-def R1005: AMDILReg<1005, "r1005">, DwarfRegNum<[1005]>;
-def R1004: AMDILReg<1004, "r1004">, DwarfRegNum<[1004]>;
-def R1003: AMDILReg<1003, "r1003">, DwarfRegNum<[1003]>;
-def R1002: AMDILReg<1002, "r1002">, DwarfRegNum<[1002]>;
-def R1001: AMDILReg<1001, "r1001">, DwarfRegNum<[1001]>;
-def MEM : AMDILReg<999, "mem">, DwarfRegNum<[999]>;
-def RA : AMDILReg<998, "r998">, DwarfRegNum<[998]>;
-def FP : AMDILReg<997, "r997">, DwarfRegNum<[997]>;
-def GPRI16 : RegisterClass<"AMDGPU", [i16], 16,
- (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> {
- let AltOrders = [(add (sequence "R%u", 1, 20))];
- let AltOrderSelect = [{
- return 1;
- }];
- }
-def GPRI32 : RegisterClass<"AMDGPU", [i32], 32,
- (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> {
- let AltOrders = [(add (sequence "R%u", 1, 20))];
- let AltOrderSelect = [{
- return 1;
- }];
- }
-def GPRF32 : RegisterClass<"AMDGPU", [f32], 32,
- (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> {
- let AltOrders = [(add (sequence "R%u", 1, 20))];
- let AltOrderSelect = [{
- return 1;
- }];
- }
diff --git a/contrib/llvm/lib/Target/R600/CaymanInstructions.td b/contrib/llvm/lib/Target/R600/CaymanInstructions.td
new file mode 100644
index 0000000..2630345
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/CaymanInstructions.td
@@ -0,0 +1,224 @@
+//===-- CaymanInstructions.td - CM Instruction defs -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TableGen definitions for instructions which are available only on Cayman
+// family GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+def isCayman : Predicate<"Subtarget.hasCaymanISA()">;
+
+//===----------------------------------------------------------------------===//
+// Cayman Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isCayman] in {
+
+def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24",
+ [(set i32:$dst, (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2))], VecALU
+>;
+def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
+ [(set i32:$dst, (AMDGPUmul_i24 i32:$src0, i32:$src1))], VecALU
+>;
+
+def : IMad24Pat<MULADD_INT24_cm>;
+
+let isVector = 1 in {
+
+def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
+
+def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
+def MULHI_INT_cm : MULHI_INT_Common<0x90>;
+def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
+def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
+def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
+def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
+def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
+def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
+def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
+def SIN_cm : SIN_Common<0x8D>;
+def COS_cm : COS_Common<0x8E>;
+} // End isVector = 1
+
+def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
+
+defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
+defm : Expand24UBitOps<MULLO_UINT_cm, ADD_INT>;
+
+// RECIP_UINT emulation for Cayman
+// The multiplication scales from [0,1] to the unsigned integer range
+def : Pat <
+ (AMDGPUurecip i32:$src0),
+ (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
+ (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
+>;
+
+ def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
+ let ADDR = 0;
+ let POP_COUNT = 0;
+ let COUNT = 0;
+ }
+
+
+def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
+
+class RAT_STORE_DWORD <RegisterClass rc, ValueType vt, bits<4> mask> :
+ CF_MEM_RAT_CACHELESS <0x14, 0, mask,
+ (ins rc:$rw_gpr, R600_TReg32_X:$index_gpr),
+ "STORE_DWORD $rw_gpr, $index_gpr",
+ [(global_store vt:$rw_gpr, i32:$index_gpr)]> {
+ let eop = 0; // This bit is not used on Cayman.
+}
+
+def RAT_STORE_DWORD32 : RAT_STORE_DWORD <R600_TReg32_X, i32, 0x1>;
+def RAT_STORE_DWORD64 : RAT_STORE_DWORD <R600_Reg64, v2i32, 0x3>;
+def RAT_STORE_DWORD128 : RAT_STORE_DWORD <R600_Reg128, v4i32, 0xf>;
+
+class VTX_READ_cm <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : VTX_WORD0_cm, VTX_READ<name, buffer_id, outs, pattern> {
+
+ // Static fields
+ let VC_INST = 0;
+ let FETCH_TYPE = 2;
+ let FETCH_WHOLE_QUAD = 0;
+ let BUFFER_ID = buffer_id;
+ let SRC_REL = 0;
+ // XXX: We can infer this field based on the SRC_GPR. This would allow us
+ // to store vertex addresses in any channel, not just X.
+ let SRC_SEL_X = 0;
+ let SRC_SEL_Y = 0;
+ let STRUCTURED_READ = 0;
+ let LDS_REQ = 0;
+ let COALESCED_READ = 0;
+
+ let Inst{31-0} = Word0;
+}
+
+class VTX_READ_8_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 1; // FMT_8
+}
+
+class VTX_READ_16_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 5; // FMT_16
+
+}
+
+class VTX_READ_32_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 0xD; // COLOR_32
+
+ // This is not really necessary, but there were some GPU hangs that appeared
+ // to be caused by ALU instructions in the next instruction group that wrote
+ // to the $src_gpr registers of the VTX_READ.
+ // e.g.
+ // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
+ // %T2_X<def> = MOV %ZERO
+ //Adding this constraint prevents this from happening.
+ let Constraints = "$src_gpr.ptr = $dst_gpr";
+}
+
+class VTX_READ_64_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_64 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_Reg64:$dst_gpr), pattern> {
+
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 7;
+ let DST_SEL_W = 7;
+ let DATA_FORMAT = 0x1D; // COLOR_32_32
+}
+
+class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
+ (outs R600_Reg128:$dst_gpr), pattern> {
+
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 2;
+ let DST_SEL_W = 3;
+ let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
+
+ // XXX: Need to force VTX_READ_128 instructions to write to the same register
+ // that holds its buffer address to avoid potential hangs. We can't use
+ // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
+ // registers are different sizes.
+}
+
+//===----------------------------------------------------------------------===//
+// VTX Read from parameter memory space
+//===----------------------------------------------------------------------===//
+def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0,
+ [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0,
+ [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0,
+ [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_64_cm : VTX_READ_64_cm <0,
+ [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
+ [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+//===----------------------------------------------------------------------===//
+// VTX Read from global memory space
+//===----------------------------------------------------------------------===//
+
+// 8-bit reads
+def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
+ [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
+ [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+>;
+
+// 32-bit reads
+def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
+ [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 64-bit reads
+def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1,
+ [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 128-bit reads
+def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1,
+ [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+} // End isCayman
+
diff --git a/contrib/llvm/lib/Target/R600/EvergreenInstructions.td b/contrib/llvm/lib/Target/R600/EvergreenInstructions.td
new file mode 100644
index 0000000..484e522
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/EvergreenInstructions.td
@@ -0,0 +1,609 @@
+//===-- EvergreenInstructions.td - EG Instruction defs ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TableGen definitions for instructions which are:
+// - Available to Evergreen and newer VLIW4/VLIW5 GPUs
+// - Available only on Evergreen family GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+def isEG : Predicate<
+ "Subtarget.getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
+ "Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "!Subtarget.hasCaymanISA()"
+>;
+
+def isEGorCayman : Predicate<
+ "Subtarget.getGeneration() == AMDGPUSubtarget::EVERGREEN ||"
+ "Subtarget.getGeneration() ==AMDGPUSubtarget::NORTHERN_ISLANDS"
+>;
+
+//===----------------------------------------------------------------------===//
+// Evergreen / Cayman store instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isEGorCayman] in {
+
+class CF_MEM_RAT_CACHELESS <bits<6> rat_inst, bits<4> rat_id, bits<4> mask, dag ins,
+ string name, list<dag> pattern>
+ : EG_CF_RAT <0x57, rat_inst, rat_id, mask, (outs), ins,
+ "MEM_RAT_CACHELESS "#name, pattern>;
+
+class CF_MEM_RAT <bits<6> rat_inst, bits<4> rat_id, dag ins, string name,
+ list<dag> pattern>
+ : EG_CF_RAT <0x56, rat_inst, rat_id, 0xf /* mask */, (outs), ins,
+ "MEM_RAT "#name, pattern>;
+
+def RAT_MSKOR : CF_MEM_RAT <0x11, 0,
+ (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr),
+ "MSKOR $rw_gpr.XW, $index_gpr",
+ [(mskor_global v4i32:$rw_gpr, i32:$index_gpr)]
+> {
+ let eop = 0;
+}
+
+} // End let Predicates = [isEGorCayman]
+
+//===----------------------------------------------------------------------===//
+// Evergreen Only instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isEG] in {
+
+def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>;
+defm DIV_eg : DIV_Common<RECIP_IEEE_eg>;
+
+def MULLO_INT_eg : MULLO_INT_Common<0x8F>;
+def MULHI_INT_eg : MULHI_INT_Common<0x90>;
+def MULLO_UINT_eg : MULLO_UINT_Common<0x91>;
+def MULHI_UINT_eg : MULHI_UINT_Common<0x92>;
+def RECIP_UINT_eg : RECIP_UINT_Common<0x94>;
+def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>;
+def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;
+def LOG_IEEE_eg : LOG_IEEE_Common<0x83>;
+def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>;
+def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>;
+def SIN_eg : SIN_Common<0x8D>;
+def COS_eg : COS_Common<0x8E>;
+
+def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>;
+def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>;
+
+defm : Expand24IBitOps<MULLO_INT_eg, ADD_INT>;
+
+//===----------------------------------------------------------------------===//
+// Memory read/write instructions
+//===----------------------------------------------------------------------===//
+
+let usesCustomInserter = 1 in {
+
+// 32-bit store
+def RAT_WRITE_CACHELESS_32_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x1,
+ (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr, $index_gpr, $eop",
+ [(global_store i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+// 64-bit store
+def RAT_WRITE_CACHELESS_64_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x3,
+ (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr.XY, $index_gpr, $eop",
+ [(global_store v2i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+//128-bit store
+def RAT_WRITE_CACHELESS_128_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0xf,
+ (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr.XYZW, $index_gpr, $eop",
+ [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+} // End usesCustomInserter = 1
+
+class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : VTX_WORD0_eg, VTX_READ<name, buffer_id, outs, pattern> {
+
+ // Static fields
+ let VC_INST = 0;
+ let FETCH_TYPE = 2;
+ let FETCH_WHOLE_QUAD = 0;
+ let BUFFER_ID = buffer_id;
+ let SRC_REL = 0;
+ // XXX: We can infer this field based on the SRC_GPR. This would allow us
+ // to store vertex addresses in any channel, not just X.
+ let SRC_SEL_X = 0;
+
+ let Inst{31-0} = Word0;
+}
+
+class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 1;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 1; // FMT_8
+}
+
+class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+ let MEGA_FETCH_COUNT = 2;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 5; // FMT_16
+
+}
+
+class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 4;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 0xD; // COLOR_32
+
+ // This is not really necessary, but there were some GPU hangs that appeared
+ // to be caused by ALU instructions in the next instruction group that wrote
+ // to the $src_gpr registers of the VTX_READ.
+ // e.g.
+ // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
+ // %T2_X<def> = MOV %ZERO
+ //Adding this constraint prevents this from happening.
+ let Constraints = "$src_gpr.ptr = $dst_gpr";
+}
+
+class VTX_READ_64_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_64 $dst_gpr.XY, $src_gpr", buffer_id,
+ (outs R600_Reg64:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 8;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 7;
+ let DST_SEL_W = 7;
+ let DATA_FORMAT = 0x1D; // COLOR_32_32
+}
+
+class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
+ (outs R600_Reg128:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 16;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 2;
+ let DST_SEL_W = 3;
+ let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
+
+ // XXX: Need to force VTX_READ_128 instructions to write to the same register
+ // that holds its buffer address to avoid potential hangs. We can't use
+ // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
+ // registers are different sizes.
+}
+
+//===----------------------------------------------------------------------===//
+// VTX Read from parameter memory space
+//===----------------------------------------------------------------------===//
+
+def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
+ [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
+ [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
+ [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0,
+ [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
+ [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+//===----------------------------------------------------------------------===//
+// VTX Read from global memory space
+//===----------------------------------------------------------------------===//
+
+// 8-bit reads
+def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
+ [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
+ [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+>;
+
+// 32-bit reads
+def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
+ [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 64-bit reads
+def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1,
+ [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 128-bit reads
+def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
+ [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+} // End Predicates = [isEG]
+
+//===----------------------------------------------------------------------===//
+// Evergreen / Cayman Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isEGorCayman] in {
+
+// BFE_UINT - bit_extract, an optimization for mask and shift
+// Src0 = Input
+// Src1 = Offset
+// Src2 = Width
+//
+// bit_extract = (Input << (32 - Offset - Width)) >> (32 - Width)
+//
+// Example Usage:
+// (Offset, Width)
+//
+// (0, 8) = (Input << 24) >> 24 = (Input & 0xff) >> 0
+// (8, 8) = (Input << 16) >> 24 = (Input & 0xffff) >> 8
+// (16, 8) = (Input << 8) >> 24 = (Input & 0xffffff) >> 16
+// (24, 8) = (Input << 0) >> 24 = (Input & 0xffffffff) >> 24
+def BFE_UINT_eg : R600_3OP <0x4, "BFE_UINT",
+ [(set i32:$dst, (AMDGPUbfe_u32 i32:$src0, i32:$src1, i32:$src2))],
+ VecALU
+>;
+
+def BFE_INT_eg : R600_3OP <0x5, "BFE_INT",
+ [(set i32:$dst, (AMDGPUbfe_i32 i32:$src0, i32:$src1, i32:$src2))],
+ VecALU
+>;
+
+// XXX: This pattern is broken, disabling for now. See comment in
+// AMDGPUInstructions.td for more info.
+// def : BFEPattern <BFE_UINT_eg>;
+def BFI_INT_eg : R600_3OP <0x06, "BFI_INT",
+ [(set i32:$dst, (AMDGPUbfi i32:$src0, i32:$src1, i32:$src2))],
+ VecALU
+>;
+
+def : Pat<(i32 (sext_inreg i32:$src, i1)),
+ (BFE_INT_eg i32:$src, (i32 ZERO), (i32 ONE_INT))>;
+def : Pat<(i32 (sext_inreg i32:$src, i8)),
+ (BFE_INT_eg i32:$src, (i32 ZERO), (MOV_IMM_I32 8))>;
+def : Pat<(i32 (sext_inreg i32:$src, i16)),
+ (BFE_INT_eg i32:$src, (i32 ZERO), (MOV_IMM_I32 16))>;
+
+defm : BFIPatterns <BFI_INT_eg, MOV_IMM_I32>;
+
+def BFM_INT_eg : R600_2OP <0xA0, "BFM_INT",
+ [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))],
+ VecALU
+>;
+
+def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
+ [(set i32:$dst, (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2))], VecALU
+>;
+
+def : UMad24Pat<MULADD_UINT24_eg>;
+
+def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
+def : ROTRPattern <BIT_ALIGN_INT_eg>;
+def MULADD_eg : MULADD_Common<0x14>;
+def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;
+def ASHR_eg : ASHR_Common<0x15>;
+def LSHR_eg : LSHR_Common<0x16>;
+def LSHL_eg : LSHL_Common<0x17>;
+def CNDE_eg : CNDE_Common<0x19>;
+def CNDGT_eg : CNDGT_Common<0x1A>;
+def CNDGE_eg : CNDGE_Common<0x1B>;
+def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
+def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
+def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
+ [(set i32:$dst, (AMDGPUmul_u24 i32:$src0, i32:$src1))], VecALU
+>;
+def DOT4_eg : DOT4_Common<0xBE>;
+defm CUBE_eg : CUBE_Common<0xC0>;
+
+def BCNT_INT : R600_1OP_Helper <0xAA, "BCNT_INT", ctpop, VecALU>;
+
+def FFBH_UINT : R600_1OP_Helper <0xAB, "FFBH_UINT", ctlz_zero_undef, VecALU>;
+def FFBL_INT : R600_1OP_Helper <0xAC, "FFBL_INT", cttz_zero_undef, VecALU>;
+
+let hasSideEffects = 1 in {
+ def MOVA_INT_eg : R600_1OP <0xCC, "MOVA_INT", [], VecALU>;
+}
+
+def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common<MUL_LIT_eg, LOG_CLAMPED_eg, EXP_IEEE_eg>;
+
+def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
+ let Pattern = [];
+ let Itinerary = AnyALU;
+}
+
+def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
+
+def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> {
+ let Pattern = [];
+}
+
+def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
+
+def GROUP_BARRIER : InstR600 <
+ (outs), (ins), " GROUP_BARRIER", [(int_AMDGPU_barrier_local), (int_AMDGPU_barrier_global)], AnyALU>,
+ R600ALU_Word0,
+ R600ALU_Word1_OP2 <0x54> {
+
+ let dst = 0;
+ let dst_rel = 0;
+ let src0 = 0;
+ let src0_rel = 0;
+ let src0_neg = 0;
+ let src0_abs = 0;
+ let src1 = 0;
+ let src1_rel = 0;
+ let src1_neg = 0;
+ let src1_abs = 0;
+ let write = 0;
+ let omod = 0;
+ let clamp = 0;
+ let last = 1;
+ let bank_swizzle = 0;
+ let pred_sel = 0;
+ let update_exec_mask = 0;
+ let update_pred = 0;
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+
+ let ALUInst = 1;
+}
+
+def : Pat <
+ (int_AMDGPU_barrier_global),
+ (GROUP_BARRIER)
+>;
+
+//===----------------------------------------------------------------------===//
+// LDS Instructions
+//===----------------------------------------------------------------------===//
+class R600_LDS <bits<6> op, dag outs, dag ins, string asm,
+ list<dag> pattern = []> :
+
+ InstR600 <outs, ins, asm, pattern, XALU>,
+ R600_ALU_LDS_Word0,
+ R600LDS_Word1 {
+
+ bits<6> offset = 0;
+ let lds_op = op;
+
+ let Word1{27} = offset{0};
+ let Word1{12} = offset{1};
+ let Word1{28} = offset{2};
+ let Word1{31} = offset{3};
+ let Word0{12} = offset{4};
+ let Word0{25} = offset{5};
+
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+
+ let ALUInst = 1;
+ let HasNativeOperands = 1;
+ let UseNamedOperandTable = 1;
+}
+
+class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS <
+ lds_op,
+ (outs R600_Reg32:$dst),
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ LAST:$last, R600_Pred:$pred_sel,
+ BANK_SWIZZLE:$bank_swizzle),
+ " "#name#" $last OQAP, $src0$src0_rel $pred_sel",
+ pattern
+ > {
+
+ let src1 = 0;
+ let src1_rel = 0;
+ let src2 = 0;
+ let src2_rel = 0;
+
+ let usesCustomInserter = 1;
+ let LDS_1A = 1;
+ let DisableEncoding = "$dst";
+}
+
+class R600_LDS_1A1D <bits<6> lds_op, dag outs, string name, list<dag> pattern,
+ string dst =""> :
+ R600_LDS <
+ lds_op, outs,
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+ LAST:$last, R600_Pred:$pred_sel,
+ BANK_SWIZZLE:$bank_swizzle),
+ " "#name#" $last "#dst#"$src0$src0_rel, $src1$src1_rel, $pred_sel",
+ pattern
+ > {
+
+ field string BaseOp;
+
+ let src2 = 0;
+ let src2_rel = 0;
+ let LDS_1A1D = 1;
+}
+
+class R600_LDS_1A1D_NORET <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS_1A1D <lds_op, (outs), name, pattern> {
+ let BaseOp = name;
+}
+
+class R600_LDS_1A1D_RET <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS_1A1D <lds_op, (outs R600_Reg32:$dst), name##"_RET", pattern, "OQAP, "> {
+
+ let BaseOp = name;
+ let usesCustomInserter = 1;
+ let DisableEncoding = "$dst";
+}
+
+class R600_LDS_1A2D <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS <
+ lds_op,
+ (outs),
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+ R600_Reg32:$src2, REL:$src2_rel, SEL:$src2_sel,
+ LAST:$last, R600_Pred:$pred_sel, BANK_SWIZZLE:$bank_swizzle),
+ " "#name# "$last $src0$src0_rel, $src1$src1_rel, $src2$src2_rel, $pred_sel",
+ pattern> {
+ let LDS_1A2D = 1;
+}
+
+def LDS_ADD : R600_LDS_1A1D_NORET <0x0, "LDS_ADD", [] >;
+def LDS_SUB : R600_LDS_1A1D_NORET <0x1, "LDS_SUB", [] >;
+def LDS_WRITE : R600_LDS_1A1D_NORET <0xD, "LDS_WRITE",
+ [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)]
+>;
+def LDS_BYTE_WRITE : R600_LDS_1A1D_NORET<0x12, "LDS_BYTE_WRITE",
+ [(truncstorei8_local i32:$src1, i32:$src0)]
+>;
+def LDS_SHORT_WRITE : R600_LDS_1A1D_NORET<0x13, "LDS_SHORT_WRITE",
+ [(truncstorei16_local i32:$src1, i32:$src0)]
+>;
+def LDS_ADD_RET : R600_LDS_1A1D_RET <0x20, "LDS_ADD",
+ [(set i32:$dst, (atomic_load_add_local i32:$src0, i32:$src1))]
+>;
+def LDS_SUB_RET : R600_LDS_1A1D_RET <0x21, "LDS_SUB",
+ [(set i32:$dst, (atomic_load_sub_local i32:$src0, i32:$src1))]
+>;
+def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET",
+ [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))]
+>;
+def LDS_BYTE_READ_RET : R600_LDS_1A <0x36, "LDS_BYTE_READ_RET",
+ [(set i32:$dst, (sextloadi8_local i32:$src0))]
+>;
+def LDS_UBYTE_READ_RET : R600_LDS_1A <0x37, "LDS_UBYTE_READ_RET",
+ [(set i32:$dst, (az_extloadi8_local i32:$src0))]
+>;
+def LDS_SHORT_READ_RET : R600_LDS_1A <0x38, "LDS_SHORT_READ_RET",
+ [(set i32:$dst, (sextloadi16_local i32:$src0))]
+>;
+def LDS_USHORT_READ_RET : R600_LDS_1A <0x39, "LDS_USHORT_READ_RET",
+ [(set i32:$dst, (az_extloadi16_local i32:$src0))]
+>;
+
+// TRUNC is used for the FLT_TO_INT instructions to work around a
+// perceived problem where the rounding modes are applied differently
+// depending on the instruction and the slot they are in.
+// See:
+// https://bugs.freedesktop.org/show_bug.cgi?id=50232
+// Mesa commit: a1a0974401c467cb86ef818f22df67c21774a38c
+//
+// XXX: Lowering SELECT_CC will sometimes generate fp_to_[su]int nodes,
+// which do not need to be truncated since the fp values are 0.0f or 1.0f.
+// We should look into handling these cases separately.
+def : Pat<(fp_to_sint f32:$src0), (FLT_TO_INT_eg (TRUNC $src0))>;
+
+def : Pat<(fp_to_uint f32:$src0), (FLT_TO_UINT_eg (TRUNC $src0))>;
+
+// SHA-256 Patterns
+def : SHA256MaPattern <BFI_INT_eg, XOR_INT>;
+
+def : FROUNDPat <CNDGE_eg>;
+
+def EG_ExportSwz : ExportSwzInst {
+ let Word1{19-16} = 0; // BURST_COUNT
+ let Word1{20} = 0; // VALID_PIXEL_MODE
+ let Word1{21} = eop;
+ let Word1{29-22} = inst;
+ let Word1{30} = 0; // MARK
+ let Word1{31} = 1; // BARRIER
+}
+defm : ExportPattern<EG_ExportSwz, 83>;
+
+def EG_ExportBuf : ExportBufInst {
+ let Word1{19-16} = 0; // BURST_COUNT
+ let Word1{20} = 0; // VALID_PIXEL_MODE
+ let Word1{21} = eop;
+ let Word1{29-22} = inst;
+ let Word1{30} = 0; // MARK
+ let Word1{31} = 1; // BARRIER
+}
+defm : SteamOutputExportPattern<EG_ExportBuf, 0x40, 0x41, 0x42, 0x43>;
+
+def CF_TC_EG : CF_CLAUSE_EG<1, (ins i32imm:$ADDR, i32imm:$COUNT),
+ "TEX $COUNT @$ADDR"> {
+ let POP_COUNT = 0;
+}
+def CF_VC_EG : CF_CLAUSE_EG<2, (ins i32imm:$ADDR, i32imm:$COUNT),
+ "VTX $COUNT @$ADDR"> {
+ let POP_COUNT = 0;
+}
+def WHILE_LOOP_EG : CF_CLAUSE_EG<6, (ins i32imm:$ADDR),
+ "LOOP_START_DX10 @$ADDR"> {
+ let POP_COUNT = 0;
+ let COUNT = 0;
+}
+def END_LOOP_EG : CF_CLAUSE_EG<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> {
+ let POP_COUNT = 0;
+ let COUNT = 0;
+}
+def LOOP_BREAK_EG : CF_CLAUSE_EG<9, (ins i32imm:$ADDR),
+ "LOOP_BREAK @$ADDR"> {
+ let POP_COUNT = 0;
+ let COUNT = 0;
+}
+def CF_CONTINUE_EG : CF_CLAUSE_EG<8, (ins i32imm:$ADDR),
+ "CONTINUE @$ADDR"> {
+ let POP_COUNT = 0;
+ let COUNT = 0;
+}
+def CF_JUMP_EG : CF_CLAUSE_EG<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
+ "JUMP @$ADDR POP:$POP_COUNT"> {
+ let COUNT = 0;
+}
+def CF_PUSH_EG : CF_CLAUSE_EG<11, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
+ "PUSH @$ADDR POP:$POP_COUNT"> {
+ let COUNT = 0;
+}
+def CF_ELSE_EG : CF_CLAUSE_EG<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
+ "ELSE @$ADDR POP:$POP_COUNT"> {
+ let COUNT = 0;
+}
+def CF_CALL_FS_EG : CF_CLAUSE_EG<19, (ins), "CALL_FS"> {
+ let ADDR = 0;
+ let COUNT = 0;
+ let POP_COUNT = 0;
+}
+def POP_EG : CF_CLAUSE_EG<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
+ "POP @$ADDR POP:$POP_COUNT"> {
+ let COUNT = 0;
+}
+def CF_END_EG : CF_CLAUSE_EG<0, (ins), "CF_END"> {
+ let COUNT = 0;
+ let POP_COUNT = 0;
+ let ADDR = 0;
+ let END_OF_PROGRAM = 1;
+}
+
+} // End Predicates = [isEGorCayman]
diff --git a/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
index 99e1377..0927040 100644
--- a/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
@@ -12,6 +12,8 @@
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -23,6 +25,21 @@ void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
printAnnotation(OS, Annot);
}
+void AMDGPUInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << formatHex(MI->getOperand(OpNo).getImm() & 0xff);
+}
+
+void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << formatHex(MI->getOperand(OpNo).getImm() & 0xffff);
+}
+
+void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff);
+}
+
void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O) {
switch (reg) {
case AMDGPU::VCC:
@@ -41,43 +58,78 @@ void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O) {
break;
}
- // It's seems there's no way to use SIRegisterInfo here, and dealing with the
- // giant enum of all the different shifted sets of registers is pretty
- // unmanagable, so parse the name and reformat it to be prettier.
- StringRef Name(getRegisterName(reg));
-
- std::pair<StringRef, StringRef> Split = Name.split('_');
- StringRef SubRegName = Split.first;
- StringRef Rest = Split.second;
+ char Type;
+ unsigned NumRegs;
+
+ if (MRI.getRegClass(AMDGPU::VGPR_32RegClassID).contains(reg)) {
+ Type = 'v';
+ NumRegs = 1;
+ } else if (MRI.getRegClass(AMDGPU::SGPR_32RegClassID).contains(reg)) {
+ Type = 's';
+ NumRegs = 1;
+ } else if (MRI.getRegClass(AMDGPU::VReg_64RegClassID).contains(reg)) {
+ Type = 'v';
+ NumRegs = 2;
+ } else if (MRI.getRegClass(AMDGPU::SReg_64RegClassID).contains(reg)) {
+ Type = 's';
+ NumRegs = 2;
+ } else if (MRI.getRegClass(AMDGPU::VReg_128RegClassID).contains(reg)) {
+ Type = 'v';
+ NumRegs = 4;
+ } else if (MRI.getRegClass(AMDGPU::SReg_128RegClassID).contains(reg)) {
+ Type = 's';
+ NumRegs = 4;
+ } else if (MRI.getRegClass(AMDGPU::VReg_96RegClassID).contains(reg)) {
+ Type = 'v';
+ NumRegs = 3;
+ } else if (MRI.getRegClass(AMDGPU::VReg_256RegClassID).contains(reg)) {
+ Type = 'v';
+ NumRegs = 8;
+ } else if (MRI.getRegClass(AMDGPU::SReg_256RegClassID).contains(reg)) {
+ Type = 's';
+ NumRegs = 8;
+ } else if (MRI.getRegClass(AMDGPU::VReg_512RegClassID).contains(reg)) {
+ Type = 'v';
+ NumRegs = 16;
+ } else if (MRI.getRegClass(AMDGPU::SReg_512RegClassID).contains(reg)) {
+ Type = 's';
+ NumRegs = 16;
+ } else {
+ O << getRegisterName(reg);
+ return;
+ }
- if (SubRegName.size() <= 4) { // Must at least be as long as "SGPR"/"VGPR".
- O << Name;
+ // The low 8 bits of the encoding value is the register index, for both VGPRs
+ // and SGPRs.
+ unsigned RegIdx = MRI.getEncodingValue(reg) & ((1 << 8) - 1);
+ if (NumRegs == 1) {
+ O << Type << RegIdx;
return;
}
- unsigned RegIndex;
- StringRef RegIndexStr = SubRegName.drop_front(4);
+ O << Type << '[' << RegIdx << ':' << (RegIdx + NumRegs - 1) << ']';
+}
- if (RegIndexStr.getAsInteger(10, RegIndex)) {
- O << Name;
+void AMDGPUInstPrinter::printImmediate(uint32_t Imm, raw_ostream &O) {
+ int32_t SImm = static_cast<int32_t>(Imm);
+ if (SImm >= -16 && SImm <= 64) {
+ O << SImm;
return;
}
- if (SubRegName.front() == 'V')
- O << 'v';
- else if (SubRegName.front() == 'S')
- O << 's';
- else {
- O << Name;
+ if (Imm == FloatToBits(1.0f) ||
+ Imm == FloatToBits(-1.0f) ||
+ Imm == FloatToBits(0.5f) ||
+ Imm == FloatToBits(-0.5f) ||
+ Imm == FloatToBits(2.0f) ||
+ Imm == FloatToBits(-2.0f) ||
+ Imm == FloatToBits(4.0f) ||
+ Imm == FloatToBits(-4.0f)) {
+ O << BitsToFloat(Imm);
return;
}
- if (Rest.empty()) // Only 1 32-bit register
- O << RegIndex;
- else {
- unsigned NumReg = Rest.count('_') + 2;
- O << '[' << RegIndex << ':' << (RegIndex + NumReg - 1) << ']';
- }
+ O << formatHex(static_cast<uint64_t>(Imm));
}
void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
@@ -95,7 +147,7 @@ void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
break;
}
} else if (Op.isImm()) {
- O << Op.getImm();
+ printImmediate(Op.getImm(), O);
} else if (Op.isFPImm()) {
O << Op.getFPImm();
} else if (Op.isExpr()) {
@@ -106,6 +158,18 @@ void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
}
+void AMDGPUInstPrinter::printOperandAndMods(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned InputModifiers = MI->getOperand(OpNo).getImm();
+ if (InputModifiers & 0x1)
+ O << "-";
+ if (InputModifiers & 0x2)
+ O << "|";
+ printOperand(MI, OpNo + 1, O);
+ if (InputModifiers & 0x2)
+ O << "|";
+}
+
void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
unsigned Imm = MI->getOperand(OpNum).getImm();
@@ -152,13 +216,8 @@ void AMDGPUInstPrinter::printClamp(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- union Literal {
- float f;
- int32_t i;
- } L;
-
- L.i = MI->getOperand(OpNo).getImm();
- O << L.i << "(" << L.f << ")";
+ int32_t Imm = MI->getOperand(OpNo).getImm();
+ O << Imm << '(' << BitsToFloat(Imm) << ')';
}
void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
@@ -316,6 +375,37 @@ void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
}
}
+void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned SImm16 = MI->getOperand(OpNo).getImm();
+ unsigned Msg = SImm16 & 0xF;
+ if (Msg == 2 || Msg == 3) {
+ unsigned Op = (SImm16 >> 4) & 0xF;
+ if (Msg == 3)
+ O << "Gs_done(";
+ else
+ O << "Gs(";
+ if (Op == 0) {
+ O << "nop";
+ } else {
+ unsigned Stream = (SImm16 >> 8) & 0x3;
+ if (Op == 1)
+ O << "cut";
+ else if (Op == 2)
+ O << "emit";
+ else if (Op == 3)
+ O << "emit-cut";
+ O << " stream " << Stream;
+ }
+ O << "), [m0] ";
+ } else if (Msg == 1)
+ O << "interrupt ";
+ else if (Msg == 15)
+ O << "system ";
+ else
+ O << "unknown(" << Msg << ") ";
+}
+
void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
// Note: Mask values are taken from SIInsertWaits.cpp and not from ISA docs
diff --git a/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h b/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
index 77af942..6ca7170 100644
--- a/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
+++ b/contrib/llvm/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
@@ -29,31 +29,38 @@ public:
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
private:
+ void printU8ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU32ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printRegOperand(unsigned RegNo, raw_ostream &O);
+ void printImmediate(uint32_t Imm, raw_ostream &O);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printOperandAndMods(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O,
- StringRef Asm, StringRef Default = "");
- void printAbs(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printClamp(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printLast(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printNeg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printOMOD(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printRel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printUpdateExecMask(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printUpdatePred(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O,
+ StringRef Asm, StringRef Default = "");
+ static void printAbs(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printClamp(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printLast(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printNeg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printOMOD(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printRel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printUpdateExecMask(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O);
+ static void printUpdatePred(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printSendMsg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
index 29d0acf..d55f27b 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -9,9 +9,11 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "MCTargetDesc/AMDGPUFixupKinds.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/TargetRegistry.h"
@@ -23,19 +25,18 @@ namespace {
class AMDGPUMCObjectWriter : public MCObjectWriter {
public:
AMDGPUMCObjectWriter(raw_ostream &OS) : MCObjectWriter(OS, true) { }
- virtual void ExecutePostLayoutBinding(MCAssembler &Asm,
- const MCAsmLayout &Layout) {
+ void ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) override {
//XXX: Implement if necessary.
}
- virtual void RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue) {
+ void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, bool &IsPCRel,
+ uint64_t &FixedValue) override {
assert(!"Not implemented");
}
- virtual void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
+ void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
};
@@ -44,21 +45,23 @@ public:
AMDGPUAsmBackend(const Target &T)
: MCAsmBackend() {}
- virtual unsigned getNumFixupKinds() const { return 0; };
- virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const;
- virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
+ unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
return false;
}
- virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
assert(!"Not implemented");
}
- virtual bool mayNeedRelaxation(const MCInst &Inst) const { return false; }
- virtual bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override {
return true;
}
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
};
} //End anonymous namespace
@@ -71,11 +74,46 @@ void AMDGPUMCObjectWriter::WriteObject(MCAssembler &Asm,
}
void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
- unsigned DataSize, uint64_t Value) const {
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
+
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("Unknown fixup kind");
+ case AMDGPU::fixup_si_sopp_br: {
+ uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset());
+ *Dst = (Value - 4) / 4;
+ break;
+ }
+
+ case AMDGPU::fixup_si_rodata: {
+ uint32_t *Dst = (uint32_t*)(Data + Fixup.getOffset());
+ *Dst = Value;
+ break;
+ }
+
+ case AMDGPU::fixup_si_end_of_text: {
+ uint32_t *Dst = (uint32_t*)(Data + Fixup.getOffset());
+ // The value points to the last instruction in the text section, so we
+ // need to add 4 bytes to get to the start of the constants.
+ *Dst = Value + 4;
+ break;
+ }
+ }
+}
+
+const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
+ MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
+ // name offset bits flags
+ { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_si_rodata", 0, 32, 0 },
+ { "fixup_si_end_of_text", 0, 32, MCFixupKindInfo::FKF_IsPCRel }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
- uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset());
- assert(Fixup.getKind() == FK_PCRel_4);
- *Dst = (Value - 4) / 4;
+ return Infos[Kind - FirstTargetFixupKind];
}
//===----------------------------------------------------------------------===//
@@ -88,7 +126,7 @@ class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
public:
ELFAMDGPUAsmBackend(const Target &T) : AMDGPUAsmBackend(T) { }
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createAMDGPUELFObjectWriter(OS);
}
};
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
index 48fac9f..5fb94d5 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
@@ -10,6 +10,7 @@
#include "AMDGPUMCTargetDesc.h"
#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixup.h"
using namespace llvm;
@@ -19,10 +20,9 @@ class AMDGPUELFObjectWriter : public MCELFObjectTargetWriter {
public:
AMDGPUELFObjectWriter();
protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const {
- llvm_unreachable("Not implemented");
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override {
+ return Fixup.getKind();
}
};
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h
new file mode 100644
index 0000000..4b12e54
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h
@@ -0,0 +1,34 @@
+//===-- AMDGPUFixupKinds.h - AMDGPU Specific Fixup Entries ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AMDGPUFIXUPKINDS_H
+#define LLVM_AMDGPUFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace AMDGPU {
+enum Fixups {
+ /// 16-bit PC relative fixup for SOPP branch instructions.
+ fixup_si_sopp_br = FirstTargetFixupKind,
+
+ /// fixup for global addresses with constant initializers
+ fixup_si_rodata,
+
+ /// fixup for offset from instruction to end of text section
+ fixup_si_end_of_text,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+}
+}
+
+#endif // LLVM_AMDGPUFIXUPKINDS_H
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 9b26af7..78bbe0a 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -21,12 +21,8 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() {
LinkerRequiresNonEmptyDwarfLines = true;
MaxInstLength = 16;
SeparatorString = "\n";
- CommentColumn = 40;
CommentString = ";";
LabelSuffix = ":";
- GlobalPrefix = "@";
- PrivateGlobalPrefix = ";.";
- LinkerPrivateGlobalPrefix = "!";
InlineAsmStart = ";#ASMSTART";
InlineAsmEnd = ";#ASMEND";
AssemblerDialect = 0;
@@ -39,13 +35,11 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() {
Data16bitsDirective = ".short\t";
Data32bitsDirective = ".long\t";
Data64bitsDirective = ".quad\t";
- GPRel32Directive = 0;
+ GPRel32Directive = nullptr;
SunStyleELFSectionSwitchSyntax = true;
UsesELFSectionDirectiveForBSS = true;
- HasMicrosoftFastStdCallMangling = false;
//===--- Alignment Information ----------------------------------------===//
- AlignDirective = ".align\t";
AlignmentIsInBytes = true;
TextAlignFillValue = 0;
@@ -64,5 +58,5 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() {
const MCSection*
AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const {
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
index 22afd63..59aebec 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -22,7 +22,7 @@ class StringRef;
class AMDGPUMCAsmInfo : public MCAsmInfo {
public:
explicit AMDGPUMCAsmInfo(StringRef &TT);
- const MCSection* getNonexecutableStackSection(MCContext &CTX) const;
+ const MCSection* getNonexecutableStackSection(MCContext &CTX) const override;
};
} // namespace llvm
#endif // AMDGPUMCASMINFO_H
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
index d8cf64a..d5e432d 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
@@ -1,4 +1,4 @@
-//===-- AMDGPUCodeEmitter.h - AMDGPU Code Emitter interface -----------------===//
+//===-- AMDGPUCodeEmitter.h - AMDGPU Code Emitter interface -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -22,16 +22,25 @@ namespace llvm {
class MCInst;
class MCOperand;
+class MCSubtargetInfo;
class AMDGPUMCCodeEmitter : public MCCodeEmitter {
virtual void anchor();
public:
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
virtual uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ return 0;
+ }
+
+ virtual unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return 0;
}
};
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index a1bec28..38a2956 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -24,6 +24,8 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "AMDGPUGenInstrInfo.inc"
@@ -33,8 +35,6 @@
#define GET_REGINFO_MC_DESC
#include "AMDGPUGenRegisterInfo.inc"
-using namespace llvm;
-
static MCInstrInfo *createAMDGPUMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitAMDGPUMCInstrInfo(X);
@@ -86,9 +86,10 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Ctx, MCAsmBackend &MAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll,
bool NoExecStack) {
- return createELFStreamer(Ctx, 0, MAB, _OS, _Emitter, false, false);
+ return createELFStreamer(Ctx, MAB, _OS, _Emitter, false, false);
}
extern "C" void LLVMInitializeR600TargetMC() {
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/contrib/llvm/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
index dd8df65..dc1344f 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -34,21 +34,21 @@ class R600MCCodeEmitter : public AMDGPUMCCodeEmitter {
void operator=(const R600MCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
const MCRegisterInfo &MRI;
- const MCSubtargetInfo &STI;
public:
- R600MCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
- const MCSubtargetInfo &sti)
- : MCII(mcii), MRI(mri), STI(sti) { }
+ R600MCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri)
+ : MCII(mcii), MRI(mri) { }
/// \brief Encode the instruction and write it to the OS.
- virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
/// \returns the encoding for an MCOperand.
- virtual uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
private:
void EmitByte(unsigned int byte, raw_ostream &OS) const;
@@ -83,11 +83,12 @@ enum FCInstr {
MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
- return new R600MCCodeEmitter(MCII, MRI, STI);
+ return new R600MCCodeEmitter(MCII, MRI);
}
void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
if (MI.getOpcode() == AMDGPU::RETURN ||
MI.getOpcode() == AMDGPU::FETCH_CLAUSE ||
@@ -96,7 +97,7 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
MI.getOpcode() == AMDGPU::KILL) {
return;
} else if (IS_VTX(Desc)) {
- uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups, STI);
uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
if (!(STI.getFeatureBits() & AMDGPU::FeatureCaymanISA)) {
InstWord2 |= 1 << 19; // Mega-Fetch bit
@@ -120,7 +121,7 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
MI.getOperand(8).getImm() & 0x1F
};
- uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups, STI);
uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
@@ -130,7 +131,7 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
Emit(Word2, OS);
Emit((uint32_t) 0, OS);
} else {
- uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t Inst = getBinaryCodeForInstr(MI, Fixups, STI);
if ((STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst) &&
((Desc.TSFlags & R600_InstFlag::OP1) ||
Desc.TSFlags & R600_InstFlag::OP2)) {
@@ -168,19 +169,16 @@ unsigned R600MCCodeEmitter::getHWReg(unsigned RegNo) const {
uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixup) const {
+ SmallVectorImpl<MCFixup> &Fixup,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg()) {
- if (HAS_NATIVE_OPERANDS(MCII.get(MI.getOpcode()).TSFlags)) {
+ if (HAS_NATIVE_OPERANDS(MCII.get(MI.getOpcode()).TSFlags))
return MRI.getEncodingValue(MO.getReg());
- } else {
- return getHWReg(MO.getReg());
- }
- } else if (MO.isImm()) {
- return MO.getImm();
- } else {
- assert(0);
- return 0;
+ return getHWReg(MO.getReg());
}
+
+ assert(MO.isImm());
+ return MO.getImm();
}
#include "AMDGPUGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/contrib/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
index 5af8320..78776c1 100644
--- a/contrib/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -13,8 +13,10 @@
//
//===----------------------------------------------------------------------===//
+#include "AMDGPU.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
+#include "MCTargetDesc/AMDGPUFixupKinds.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCFixup.h"
@@ -39,6 +41,7 @@ class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
void operator=(const SIMCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
const MCRegisterInfo &MRI;
+ MCContext &Ctx;
/// \brief Can this operand also contain immediate values?
bool isSrcOperand(const MCInstrDesc &Desc, unsigned OpNo) const;
@@ -48,18 +51,26 @@ class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
public:
SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
- const MCSubtargetInfo &sti, MCContext &ctx)
- : MCII(mcii), MRI(mri) { }
+ MCContext &ctx)
+ : MCII(mcii), MRI(mri), Ctx(ctx) { }
~SIMCCodeEmitter() { }
- /// \breif Encode the instruction and write it to the OS.
- virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ /// \brief Encode the instruction and write it to the OS.
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
/// \returns the encoding for an MCOperand.
- virtual uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+ /// \brief Use a fixup to encode the simm16 field for SOPP branch
+ /// instructions.
+ unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
};
} // End anonymous namespace
@@ -68,7 +79,7 @@ MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
- return new SIMCCodeEmitter(MCII, MRI, STI, Ctx);
+ return new SIMCCodeEmitter(MCII, MRI, Ctx);
}
bool SIMCCodeEmitter::isSrcOperand(const MCInstrDesc &Desc,
@@ -88,6 +99,8 @@ uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO) const {
Imm.I = MO.getImm();
else if (MO.isFPImm())
Imm.F = MO.getFPImm();
+ else if (MO.isExpr())
+ return 255;
else
return ~0;
@@ -125,9 +138,10 @@ uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO) const {
}
void SIMCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
- uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups);
+ uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI);
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
unsigned bytes = Desc.getSize();
@@ -154,8 +168,13 @@ void SIMCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
IntFloatUnion Imm;
if (Op.isImm())
Imm.I = Op.getImm();
- else
+ else if (Op.isFPImm())
Imm.F = Op.getFPImm();
+ else {
+ assert(Op.isExpr());
+ // This will be replaced with a fixup value.
+ Imm.I = 0;
+ }
for (unsigned j = 0; j < 4; j++) {
OS.write((uint8_t) ((Imm.I >> (8 * j)) & 0xff));
@@ -166,17 +185,42 @@ void SIMCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
}
}
+unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ if (MO.isExpr()) {
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
+ return 0;
+ }
+
+ return getMachineOpValue(MI, MO, Fixups, STI);
+}
+
uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg())
return MRI.getEncodingValue(MO.getReg());
if (MO.isExpr()) {
- const MCExpr *Expr = MO.getExpr();
- MCFixupKind Kind = MCFixupKind(FK_PCRel_4);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
- return 0;
+ const MCSymbolRefExpr *Expr = cast<MCSymbolRefExpr>(MO.getExpr());
+ MCFixupKind Kind;
+ const MCSymbol *Sym =
+ Ctx.GetOrCreateSymbol(StringRef(END_OF_TEXT_LABEL_NAME));
+
+ if (&Expr->getSymbol() == Sym) {
+ // Add the offset to the beginning of the constant values.
+ Kind = (MCFixupKind)AMDGPU::fixup_si_end_of_text;
+ } else {
+ // This is used for constant data stored in .rodata.
+ Kind = (MCFixupKind)AMDGPU::fixup_si_rodata;
+ }
+ Fixups.push_back(MCFixup::Create(4, Expr, Kind, MI.getLoc()));
}
// Figure out the operand number, needed for isSrcOperand check
diff --git a/contrib/llvm/lib/Target/R600/Processors.td b/contrib/llvm/lib/Target/R600/Processors.td
index ee190e4..ce17d7c 100644
--- a/contrib/llvm/lib/Target/R600/Processors.td
+++ b/contrib/llvm/lib/Target/R600/Processors.td
@@ -9,46 +9,102 @@
class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features>
: Processor<Name, itin, Features>;
+
+//===----------------------------------------------------------------------===//
+// R600
+//===----------------------------------------------------------------------===//
def : Proc<"", R600_VLIW5_Itin,
[FeatureR600, FeatureVertexCache]>;
+
def : Proc<"r600", R600_VLIW5_Itin,
- [FeatureR600 , FeatureVertexCache]>;
+ [FeatureR600 , FeatureVertexCache, FeatureWavefrontSize64]>;
+
+def : Proc<"r630", R600_VLIW5_Itin,
+ [FeatureR600, FeatureVertexCache, FeatureWavefrontSize32]>;
+
def : Proc<"rs880", R600_VLIW5_Itin,
- [FeatureR600]>;
+ [FeatureR600, FeatureWavefrontSize16]>;
+
def : Proc<"rv670", R600_VLIW5_Itin,
- [FeatureR600, FeatureFP64, FeatureVertexCache]>;
+ [FeatureR600, FeatureFP64, FeatureVertexCache, FeatureWavefrontSize64]>;
+
+//===----------------------------------------------------------------------===//
+// R700
+//===----------------------------------------------------------------------===//
+
def : Proc<"rv710", R600_VLIW5_Itin,
- [FeatureR700, FeatureVertexCache]>;
+ [FeatureR700, FeatureVertexCache, FeatureWavefrontSize32]>;
+
def : Proc<"rv730", R600_VLIW5_Itin,
- [FeatureR700, FeatureVertexCache]>;
+ [FeatureR700, FeatureVertexCache, FeatureWavefrontSize32]>;
+
def : Proc<"rv770", R600_VLIW5_Itin,
- [FeatureR700, FeatureFP64, FeatureVertexCache]>;
+ [FeatureR700, FeatureFP64, FeatureVertexCache, FeatureWavefrontSize64]>;
+
+//===----------------------------------------------------------------------===//
+// Evergreen
+//===----------------------------------------------------------------------===//
+
def : Proc<"cedar", R600_VLIW5_Itin,
- [FeatureEvergreen, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache, FeatureWavefrontSize32,
+ FeatureCFALUBug]>;
+
def : Proc<"redwood", R600_VLIW5_Itin,
- [FeatureEvergreen, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache, FeatureWavefrontSize64,
+ FeatureCFALUBug]>;
+
def : Proc<"sumo", R600_VLIW5_Itin,
- [FeatureEvergreen]>;
+ [FeatureEvergreen, FeatureWavefrontSize64, FeatureCFALUBug]>;
+
def : Proc<"juniper", R600_VLIW5_Itin,
- [FeatureEvergreen, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache, FeatureWavefrontSize64]>;
+
def : Proc<"cypress", R600_VLIW5_Itin,
- [FeatureEvergreen, FeatureFP64, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureFP64, FeatureVertexCache,
+ FeatureWavefrontSize64]>;
+
+//===----------------------------------------------------------------------===//
+// Northern Islands
+//===----------------------------------------------------------------------===//
+
def : Proc<"barts", R600_VLIW5_Itin,
- [FeatureNorthernIslands, FeatureVertexCache]>;
+ [FeatureNorthernIslands, FeatureVertexCache, FeatureCFALUBug]>;
+
def : Proc<"turks", R600_VLIW5_Itin,
- [FeatureNorthernIslands, FeatureVertexCache]>;
+ [FeatureNorthernIslands, FeatureVertexCache, FeatureCFALUBug]>;
+
def : Proc<"caicos", R600_VLIW5_Itin,
- [FeatureNorthernIslands]>;
+ [FeatureNorthernIslands, FeatureCFALUBug]>;
+
def : Proc<"cayman", R600_VLIW4_Itin,
[FeatureNorthernIslands, FeatureFP64, FeatureCaymanISA]>;
+//===----------------------------------------------------------------------===//
+// Southern Islands
+//===----------------------------------------------------------------------===//
+
def : Proc<"SI", SI_Itin, [FeatureSouthernIslands]>;
+
def : Proc<"tahiti", SI_Itin, [FeatureSouthernIslands]>;
+
def : Proc<"pitcairn", SI_Itin, [FeatureSouthernIslands]>;
+
def : Proc<"verde", SI_Itin, [FeatureSouthernIslands]>;
+
def : Proc<"oland", SI_Itin, [FeatureSouthernIslands]>;
+
def : Proc<"hainan", SI_Itin, [FeatureSouthernIslands]>;
+
+//===----------------------------------------------------------------------===//
+// Sea Islands
+//===----------------------------------------------------------------------===//
+
def : Proc<"bonaire", SI_Itin, [FeatureSeaIslands]>;
+
def : Proc<"kabini", SI_Itin, [FeatureSeaIslands]>;
+
def : Proc<"kaveri", SI_Itin, [FeatureSeaIslands]>;
+
def : Proc<"hawaii", SI_Itin, [FeatureSeaIslands]>;
+
+def : Proc<"mullins", SI_Itin, [FeatureSeaIslands]>;
diff --git a/contrib/llvm/lib/Target/R600/R600ClauseMergePass.cpp b/contrib/llvm/lib/Target/R600/R600ClauseMergePass.cpp
index 33d2ca3..92bf0df 100644
--- a/contrib/llvm/lib/Target/R600/R600ClauseMergePass.cpp
+++ b/contrib/llvm/lib/Target/R600/R600ClauseMergePass.cpp
@@ -13,7 +13,6 @@
/// It needs to be called after IfCvt for best results.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "r600mergeclause"
#include "AMDGPU.h"
#include "R600Defines.h"
#include "R600InstrInfo.h"
@@ -27,6 +26,8 @@
using namespace llvm;
+#define DEBUG_TYPE "r600mergeclause"
+
namespace {
static bool isCFAlu(const MachineInstr *MI) {
@@ -50,7 +51,7 @@ private:
/// IfCvt pass can generate "disabled" ALU clause marker that need to be
/// removed and their content affected to the previous alu clause.
- /// This function parse instructions after CFAlu untill it find a disabled
+ /// This function parse instructions after CFAlu until it find a disabled
/// CFAlu and merge the content, or an enabled CFAlu.
void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
@@ -62,9 +63,9 @@ private:
public:
R600ClauseMergePass(TargetMachine &tm) : MachineFunctionPass(ID) { }
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const;
+ const char *getPassName() const override;
};
char R600ClauseMergePass::ID = 0;
diff --git a/contrib/llvm/lib/Target/R600/R600ControlFlowFinalizer.cpp b/contrib/llvm/lib/Target/R600/R600ControlFlowFinalizer.cpp
index 2a8276b..e37767a 100644
--- a/contrib/llvm/lib/Target/R600/R600ControlFlowFinalizer.cpp
+++ b/contrib/llvm/lib/Target/R600/R600ControlFlowFinalizer.cpp
@@ -12,9 +12,9 @@
/// computing their address on the fly ; it also sets STACK_SIZE info.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "r600cf"
#include "llvm/Support/Debug.h"
#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
#include "R600Defines.h"
#include "R600InstrInfo.h"
#include "R600MachineFunctionInfo.h"
@@ -26,8 +26,176 @@
using namespace llvm;
+#define DEBUG_TYPE "r600cf"
+
namespace {
+struct CFStack {
+
+ enum StackItem {
+ ENTRY = 0,
+ SUB_ENTRY = 1,
+ FIRST_NON_WQM_PUSH = 2,
+ FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
+ };
+
+ const AMDGPUSubtarget &ST;
+ std::vector<StackItem> BranchStack;
+ std::vector<StackItem> LoopStack;
+ unsigned MaxStackSize;
+ unsigned CurrentEntries;
+ unsigned CurrentSubEntries;
+
+ CFStack(const AMDGPUSubtarget &st, unsigned ShaderType) : ST(st),
+ // We need to reserve a stack entry for CALL_FS in vertex shaders.
+ MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
+ CurrentEntries(0), CurrentSubEntries(0) { }
+
+ unsigned getLoopDepth();
+ bool branchStackContains(CFStack::StackItem);
+ bool requiresWorkAroundForInst(unsigned Opcode);
+ unsigned getSubEntrySize(CFStack::StackItem Item);
+ void updateMaxStackSize();
+ void pushBranch(unsigned Opcode, bool isWQM = false);
+ void pushLoop();
+ void popBranch();
+ void popLoop();
+};
+
+unsigned CFStack::getLoopDepth() {
+ return LoopStack.size();
+}
+
+bool CFStack::branchStackContains(CFStack::StackItem Item) {
+ for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(),
+ E = BranchStack.end(); I != E; ++I) {
+ if (*I == Item)
+ return true;
+ }
+ return false;
+}
+
+bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
+ if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST.hasCaymanISA() &&
+ getLoopDepth() > 1)
+ return true;
+
+ if (!ST.hasCFAluBug())
+ return false;
+
+ switch(Opcode) {
+ default: return false;
+ case AMDGPU::CF_ALU_PUSH_BEFORE:
+ case AMDGPU::CF_ALU_ELSE_AFTER:
+ case AMDGPU::CF_ALU_BREAK:
+ case AMDGPU::CF_ALU_CONTINUE:
+ if (CurrentSubEntries == 0)
+ return false;
+ if (ST.getWavefrontSize() == 64) {
+ // We are being conservative here. We only require this work-around if
+ // CurrentSubEntries > 3 &&
+ // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
+ //
+ // We have to be conservative, because we don't know for certain that
+ // our stack allocation algorithm for Evergreen/NI is correct. Applying this
+ // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
+ // resources without any problems.
+ return CurrentSubEntries > 3;
+ } else {
+ assert(ST.getWavefrontSize() == 32);
+ // We are being conservative here. We only require the work-around if
+ // CurrentSubEntries > 7 &&
+ // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
+ // See the comment on the wavefront size == 64 case for why we are
+ // being conservative.
+ return CurrentSubEntries > 7;
+ }
+ }
+}
+
+unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
+ switch(Item) {
+ default:
+ return 0;
+ case CFStack::FIRST_NON_WQM_PUSH:
+ assert(!ST.hasCaymanISA());
+ if (ST.getGeneration() <= AMDGPUSubtarget::R700) {
+ // +1 For the push operation.
+ // +2 Extra space required.
+ return 3;
+ } else {
+ // Some documentation says that this is not necessary on Evergreen,
+ // but experimentation has show that we need to allocate 1 extra
+ // sub-entry for the first non-WQM push.
+ // +1 For the push operation.
+ // +1 Extra space required.
+ return 2;
+ }
+ case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
+ assert(ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
+ // +1 For the push operation.
+ // +1 Extra space required.
+ return 2;
+ case CFStack::SUB_ENTRY:
+ return 1;
+ }
+}
+
+void CFStack::updateMaxStackSize() {
+ unsigned CurrentStackSize = CurrentEntries +
+ (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
+ MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
+}
+
+void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
+ CFStack::StackItem Item = CFStack::ENTRY;
+ switch(Opcode) {
+ case AMDGPU::CF_PUSH_EG:
+ case AMDGPU::CF_ALU_PUSH_BEFORE:
+ if (!isWQM) {
+ if (!ST.hasCaymanISA() && !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
+ Item = CFStack::FIRST_NON_WQM_PUSH; // May not be required on Evergreen/NI
+ // See comment in
+ // CFStack::getSubEntrySize()
+ else if (CurrentEntries > 0 &&
+ ST.getGeneration() > AMDGPUSubtarget::EVERGREEN &&
+ !ST.hasCaymanISA() &&
+ !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
+ Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
+ else
+ Item = CFStack::SUB_ENTRY;
+ } else
+ Item = CFStack::ENTRY;
+ break;
+ }
+ BranchStack.push_back(Item);
+ if (Item == CFStack::ENTRY)
+ CurrentEntries++;
+ else
+ CurrentSubEntries += getSubEntrySize(Item);
+ updateMaxStackSize();
+}
+
+void CFStack::pushLoop() {
+ LoopStack.push_back(CFStack::ENTRY);
+ CurrentEntries++;
+ updateMaxStackSize();
+}
+
+void CFStack::popBranch() {
+ CFStack::StackItem Top = BranchStack.back();
+ if (Top == CFStack::ENTRY)
+ CurrentEntries--;
+ else
+ CurrentSubEntries-= getSubEntrySize(Top);
+ BranchStack.pop_back();
+}
+
+void CFStack::popLoop() {
+ CurrentEntries--;
+ LoopStack.pop_back();
+}
+
class R600ControlFlowFinalizer : public MachineFunctionPass {
private:
@@ -300,51 +468,30 @@ private:
}
}
- unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
- switch (ST.getGeneration()) {
- case AMDGPUSubtarget::R600:
- case AMDGPUSubtarget::R700:
- if (hasPush)
- StackSubEntry += 2;
- break;
- case AMDGPUSubtarget::EVERGREEN:
- if (hasPush)
- StackSubEntry ++;
- case AMDGPUSubtarget::NORTHERN_ISLANDS:
- StackSubEntry += 2;
- break;
- default: llvm_unreachable("Not a VLIW4/VLIW5 GPU");
- }
- return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
- }
-
public:
R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (0), TRI(0),
+ TII (nullptr), TRI(nullptr),
ST(tm.getSubtarget<AMDGPUSubtarget>()) {
const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
MaxFetchInst = ST.getTexVTXClauseSize();
}
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
+ R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
- unsigned MaxStack = 0;
- unsigned CurrentStack = 0;
- bool HasPush = false;
+ CFStack CFStack(ST, MFI->getShaderType());
for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
++MB) {
MachineBasicBlock &MBB = *MB;
unsigned CfCount = 0;
std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
std::vector<MachineInstr * > IfThenElseStack;
- R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
- if (MFI->ShaderType == 1) {
+ if (MFI->getShaderType() == ShaderType::VERTEX) {
BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
getHWInstrDesc(CF_CALL_FS));
CfCount++;
- MaxStack = 1;
}
std::vector<ClauseFile> FetchClauses, AluClauses;
std::vector<MachineInstr *> LastAlu(1);
@@ -356,21 +503,31 @@ public:
DEBUG(dbgs() << CfCount << ":"; I->dump(););
FetchClauses.push_back(MakeFetchClause(MBB, I));
CfCount++;
- LastAlu.back() = 0;
+ LastAlu.back() = nullptr;
continue;
}
MachineBasicBlock::iterator MI = I;
if (MI->getOpcode() != AMDGPU::ENDIF)
- LastAlu.back() = 0;
+ LastAlu.back() = nullptr;
if (MI->getOpcode() == AMDGPU::CF_ALU)
LastAlu.back() = MI;
I++;
+ bool RequiresWorkAround =
+ CFStack.requiresWorkAroundForInst(MI->getOpcode());
switch (MI->getOpcode()) {
case AMDGPU::CF_ALU_PUSH_BEFORE:
- CurrentStack++;
- MaxStack = std::max(MaxStack, CurrentStack);
- HasPush = true;
+ if (RequiresWorkAround) {
+ DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
+ BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
+ .addImm(CfCount + 1)
+ .addImm(1);
+ MI->setDesc(TII->get(AMDGPU::CF_ALU));
+ CfCount++;
+ CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
+ } else
+ CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
+
case AMDGPU::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));
@@ -378,8 +535,7 @@ public:
CfCount++;
break;
case AMDGPU::WHILELOOP: {
- CurrentStack+=4;
- MaxStack = std::max(MaxStack, CurrentStack);
+ CFStack.pushLoop();
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_WHILE_LOOP))
.addImm(1);
@@ -392,7 +548,7 @@ public:
break;
}
case AMDGPU::ENDLOOP: {
- CurrentStack-=4;
+ CFStack.popLoop();
std::pair<unsigned, std::set<MachineInstr *> > Pair =
LoopStack.back();
LoopStack.pop_back();
@@ -404,7 +560,7 @@ public:
break;
}
case AMDGPU::IF_PREDICATE_SET: {
- LastAlu.push_back(0);
+ LastAlu.push_back(nullptr);
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_JUMP))
.addImm(0)
@@ -430,7 +586,7 @@ public:
break;
}
case AMDGPU::ENDIF: {
- CurrentStack--;
+ CFStack.popBranch();
if (LastAlu.back()) {
ToPopAfter.push_back(LastAlu.back());
} else {
@@ -505,13 +661,13 @@ public:
.addImm(Alu->getOperand(8).getImm());
Alu->eraseFromParent();
}
- MFI->StackSize = getHWStackSize(MaxStack, HasPush);
+ MFI->StackSize = CFStack.MaxStackSize;
}
return false;
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "R600 Control Flow Finalizer Pass";
}
};
diff --git a/contrib/llvm/lib/Target/R600/R600Defines.h b/contrib/llvm/lib/Target/R600/R600Defines.h
index 1781f2a..f2f28fe 100644
--- a/contrib/llvm/lib/Target/R600/R600Defines.h
+++ b/contrib/llvm/lib/Target/R600/R600Defines.h
@@ -52,7 +52,7 @@ namespace R600_InstFlag {
#define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS)
-/// \brief Defines for extracting register infomation from register encoding
+/// \brief Defines for extracting register information from register encoding
#define HW_REG_MASK 0x1ff
#define HW_CHAN_SHIFT 9
diff --git a/contrib/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp b/contrib/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp
index 1bbfd2b..38afebe 100644
--- a/contrib/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp
+++ b/contrib/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp
@@ -25,12 +25,15 @@
using namespace llvm;
+namespace llvm {
+ void initializeR600EmitClauseMarkersPass(PassRegistry&);
+}
+
namespace {
-class R600EmitClauseMarkersPass : public MachineFunctionPass {
+class R600EmitClauseMarkers : public MachineFunctionPass {
private:
- static char ID;
const R600InstrInfo *TII;
int Address;
@@ -287,10 +290,13 @@ private:
}
public:
- R600EmitClauseMarkersPass(TargetMachine &tm) : MachineFunctionPass(ID),
- TII(0), Address(0) { }
+ static char ID;
+ R600EmitClauseMarkers() : MachineFunctionPass(ID), TII(nullptr), Address(0) {
+
+ initializeR600EmitClauseMarkersPass(*PassRegistry::getPassRegistry());
+ }
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
@@ -309,17 +315,21 @@ public:
return false;
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "R600 Emit Clause Markers Pass";
}
};
-char R600EmitClauseMarkersPass::ID = 0;
+char R600EmitClauseMarkers::ID = 0;
} // end anonymous namespace
+INITIALIZE_PASS_BEGIN(R600EmitClauseMarkers, "emitclausemarkers",
+ "R600 Emit Clause Markters", false, false)
+INITIALIZE_PASS_END(R600EmitClauseMarkers, "emitclausemarkers",
+ "R600 Emit Clause Markters", false, false)
-llvm::FunctionPass *llvm::createR600EmitClauseMarkers(TargetMachine &TM) {
- return new R600EmitClauseMarkersPass(TM);
+llvm::FunctionPass *llvm::createR600EmitClauseMarkers() {
+ return new R600EmitClauseMarkers();
}
diff --git a/contrib/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/contrib/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp
index aeee4aa..732b06d 100644
--- a/contrib/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp
+++ b/contrib/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp
@@ -33,16 +33,16 @@ private:
static char ID;
const R600InstrInfo *TII;
- bool ExpandInputPerspective(MachineInstr& MI);
- bool ExpandInputConstant(MachineInstr& MI);
+ void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI,
+ unsigned Op);
public:
R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
- TII(0) { }
+ TII(nullptr) { }
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "R600 Expand special instructions pass";
}
};
@@ -55,6 +55,15 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
return new R600ExpandSpecialInstrsPass(TM);
}
+void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
+ const MachineInstr *OldMI, unsigned Op) {
+ int OpIdx = TII->getOperandIdx(*OldMI, Op);
+ if (OpIdx > -1) {
+ uint64_t Val = OldMI->getOperand(OpIdx).getImm();
+ TII->setImmOperand(NewMI, Op, Val);
+ }
+}
+
bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
@@ -66,7 +75,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MachineBasicBlock::iterator I = MBB.begin();
while (I != MBB.end()) {
MachineInstr &MI = *I;
- I = llvm::next(I);
+ I = std::next(I);
// Expand LDS_*_RET instructions
if (TII->isLDSRetInstr(MI.getOpcode())) {
@@ -325,6 +334,12 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
if (NotLast) {
TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
}
+ SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
+ SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);
+ SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_abs);
+ SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_abs);
+ SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_neg);
+ SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_neg);
}
MI.eraseFromParent();
}
diff --git a/contrib/llvm/lib/Target/R600/R600ISelLowering.cpp b/contrib/llvm/lib/Target/R600/R600ISelLowering.cpp
index 0fcb488..52315bf 100644
--- a/contrib/llvm/lib/Target/R600/R600ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/R600/R600ISelLowering.cpp
@@ -13,9 +13,13 @@
//===----------------------------------------------------------------------===//
#include "R600ISelLowering.h"
+#include "AMDGPUFrameLowering.h"
+#include "AMDGPUIntrinsicInfo.h"
+#include "AMDGPUSubtarget.h"
#include "R600Defines.h"
#include "R600InstrInfo.h"
#include "R600MachineFunctionInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -65,6 +69,7 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::FSUB, MVT::f32, Expand);
@@ -78,13 +83,37 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::SETCC, MVT::i32, Expand);
setOperationAction(ISD::SETCC, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
- setOperationAction(ISD::SELECT, MVT::v2f32, Expand);
setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
- setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
+
+ // Expand sign extension of vectors
+ if (!Subtarget->hasBFE())
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Expand);
+
+ if (!Subtarget->hasBFE())
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Expand);
+
+ if (!Subtarget->hasBFE())
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
+
// Legalize loads and stores to the private address space.
setOperationAction(ISD::LOAD, MVT::i32, Custom);
@@ -111,14 +140,47 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
+
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i32, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
+
setTargetDAGCombine(ISD::FP_ROUND);
setTargetDAGCombine(ISD::FP_TO_SINT);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::SELECT_CC);
setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
+ setOperationAction(ISD::SUB, MVT::i64, Expand);
+
+ // These should be replaced by UDVIREM, but it does not happen automatically
+ // during Type Legalization
+ setOperationAction(ISD::UDIV, MVT::i64, Custom);
+ setOperationAction(ISD::UREM, MVT::i64, Custom);
+ setOperationAction(ISD::SDIV, MVT::i64, Custom);
+ setOperationAction(ISD::SREM, MVT::i64, Custom);
+
+ // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32
+ // to be Legal/Custom in order to avoid library calls.
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
+
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
+ for (MVT VT : ScalarIntVTs) {
+ setOperationAction(ISD::ADDC, VT, Expand);
+ setOperationAction(ISD::SUBC, VT, Expand);
+ setOperationAction(ISD::ADDE, VT, Expand);
+ setOperationAction(ISD::SUBE, VT, Expand);
+ }
+
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
setSchedulingPreference(Sched::Source);
@@ -207,7 +269,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
- unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
+ unsigned EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
.addOperand(MI->getOperand(0))
@@ -457,9 +519,9 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
// Instruction is left unmodified if its not the last one of its type
bool isLastInstructionOfItsType = true;
unsigned InstExportType = MI->getOperand(1).getImm();
- for (MachineBasicBlock::iterator NextExportInst = llvm::next(I),
+ for (MachineBasicBlock::iterator NextExportInst = std::next(I),
EndBlock = BB->end(); NextExportInst != EndBlock;
- NextExportInst = llvm::next(NextExportInst)) {
+ NextExportInst = std::next(NextExportInst)) {
if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz ||
NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) {
unsigned CurrentInstExportType = NextExportInst->getOperand(1)
@@ -470,7 +532,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
}
}
}
- bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0;
+ bool EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
if (!EOP && !isLastInstructionOfItsType)
return BB;
unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40;
@@ -510,11 +572,24 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
+ case ISD::SHL_PARTS: return LowerSHLParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS: return LowerSRXParts(Op, DAG);
case ISD::FCOS:
case ISD::FSIN: return LowerTrig(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
- case ISD::LOAD: return LowerLOAD(Op, DAG);
+ case ISD::LOAD: {
+ SDValue Result = LowerLOAD(Op, DAG);
+ assert((!Result.getNode() ||
+ Result.getNode()->getNumValues() == 2) &&
+ "Load should return a value and a chain");
+ return Result;
+ }
+
+ case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
case ISD::INTRINSIC_VOID: {
SDValue Chain = Op.getOperand(0);
@@ -538,8 +613,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(2, MVT::i32), // SWZ_Z
DAG.getConstant(3, MVT::i32) // SWZ_W
};
- return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(),
- Args, 8);
+ return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(), Args);
}
// default for switch(IntrinsicID)
@@ -689,7 +763,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
Op.getOperand(9),
Op.getOperand(10)
};
- return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs, 19);
+ return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs);
}
case AMDGPUIntrinsic::AMDGPU_dp4: {
SDValue Args[8] = {
@@ -710,7 +784,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
DAG.getConstant(3, MVT::i32))
};
- return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args, 8);
+ return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args);
}
case Intrinsic::r600_read_ngroups_x:
@@ -750,6 +824,9 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::r600_read_tidig_z:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_Z, VT);
+ case Intrinsic::AMDGPU_rsq:
+ // XXX - I'm assuming SI's RSQ_LEGACY matches R600's behavior.
+ return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
}
// break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode())
break;
@@ -762,23 +839,189 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
switch (N->getOpcode()) {
- default: return;
- case ISD::FP_TO_UINT: Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG));
+ default:
+ AMDGPUTargetLowering::ReplaceNodeResults(N, Results, DAG);
return;
- case ISD::LOAD: {
- SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
- Results.push_back(SDValue(Node, 0));
- Results.push_back(SDValue(Node, 1));
- // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
- // function
- DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
+ case ISD::FP_TO_UINT:
+ if (N->getValueType(0) == MVT::i1) {
+ Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG));
+ return;
+ }
+ // Fall-through. Since we don't care about out of bounds values
+ // we can use FP_TO_SINT for uints too. The DAGLegalizer code for uint
+ // considers some extra cases which are not necessary here.
+ case ISD::FP_TO_SINT: {
+ SDValue Result;
+ if (expandFP_TO_SINT(N, Result, DAG))
+ Results.push_back(Result);
return;
}
- case ISD::STORE:
- SDNode *Node = LowerSTORE(SDValue(N, 0), DAG).getNode();
- Results.push_back(SDValue(Node, 0));
- return;
+ case ISD::UDIV: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
+ N->getOperand(0), N->getOperand(1));
+ Results.push_back(UDIVREM);
+ break;
+ }
+ case ISD::UREM: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
+ N->getOperand(0), N->getOperand(1));
+ Results.push_back(UDIVREM.getValue(1));
+ break;
+ }
+ case ISD::SDIV: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue SDIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(VT, VT),
+ N->getOperand(0), N->getOperand(1));
+ Results.push_back(SDIVREM);
+ break;
+ }
+ case ISD::SREM: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue SDIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(VT, VT),
+ N->getOperand(0), N->getOperand(1));
+ Results.push_back(SDIVREM.getValue(1));
+ break;
+ }
+ case ISD::SDIVREM: {
+ SDValue Op = SDValue(N, 1);
+ SDValue RES = LowerSDIVREM(Op, DAG);
+ Results.push_back(RES);
+ Results.push_back(RES.getValue(1));
+ break;
+ }
+ case ISD::UDIVREM: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
+
+ SDValue one = DAG.getConstant(1, HalfVT);
+ SDValue zero = DAG.getConstant(0, HalfVT);
+
+ //HiLo split
+ SDValue LHS = N->getOperand(0);
+ SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
+ SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
+
+ SDValue RHS = N->getOperand(1);
+ SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
+ SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
+
+ // Get Speculative values
+ SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
+ SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
+
+ SDValue REM_Hi = zero;
+ SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
+
+ SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
+ SDValue DIV_Lo = zero;
+
+ const unsigned halfBitWidth = HalfVT.getSizeInBits();
+
+ for (unsigned i = 0; i < halfBitWidth; ++i) {
+ SDValue POS = DAG.getConstant(halfBitWidth - i - 1, HalfVT);
+ // Get Value of high bit
+ SDValue HBit;
+ if (halfBitWidth == 32 && Subtarget->hasBFE()) {
+ HBit = DAG.getNode(AMDGPUISD::BFE_U32, DL, HalfVT, LHS_Lo, POS, one);
+ } else {
+ HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
+ HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
+ }
+
+ SDValue Carry = DAG.getNode(ISD::SRL, DL, HalfVT, REM_Lo,
+ DAG.getConstant(halfBitWidth - 1, HalfVT));
+ REM_Hi = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Hi, one);
+ REM_Hi = DAG.getNode(ISD::OR, DL, HalfVT, REM_Hi, Carry);
+
+ REM_Lo = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Lo, one);
+ REM_Lo = DAG.getNode(ISD::OR, DL, HalfVT, REM_Lo, HBit);
+
+
+ SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
+
+ SDValue BIT = DAG.getConstant(1 << (halfBitWidth - i - 1), HalfVT);
+ SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETGE);
+
+ DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
+
+ // Update REM
+
+ SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
+
+ REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETGE);
+ REM_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, zero);
+ REM_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, one);
+ }
+
+ SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
+ SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
+ Results.push_back(DIV);
+ Results.push_back(REM);
+ break;
+ }
+ }
+}
+
+SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG,
+ SDValue Vector) const {
+
+ SDLoc DL(Vector);
+ EVT VecVT = Vector.getValueType();
+ EVT EltVT = VecVT.getVectorElementType();
+ SmallVector<SDValue, 8> Args;
+
+ for (unsigned i = 0, e = VecVT.getVectorNumElements();
+ i != e; ++i) {
+ Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
+ Vector, DAG.getConstant(i, getVectorIdxTy())));
}
+
+ return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args);
+}
+
+SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(Op);
+ SDValue Vector = Op.getOperand(0);
+ SDValue Index = Op.getOperand(1);
+
+ if (isa<ConstantSDNode>(Index) ||
+ Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR)
+ return Op;
+
+ Vector = vectorToVerticalVector(DAG, Vector);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(),
+ Vector, Index);
+}
+
+SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Vector = Op.getOperand(0);
+ SDValue Value = Op.getOperand(1);
+ SDValue Index = Op.getOperand(2);
+
+ if (isa<ConstantSDNode>(Index) ||
+ Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR)
+ return Op;
+
+ Vector = vectorToVerticalVector(DAG, Vector);
+ SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(),
+ Vector, Value, Index);
+ return vectorToVerticalVector(DAG, Insert);
}
SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
@@ -812,6 +1055,80 @@ SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
DAG.getConstantFP(3.14159265359, MVT::f32));
}
+SDValue R600TargetLowering::LowerSHLParts(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shift = Op.getOperand(2);
+ SDValue Zero = DAG.getConstant(0, VT);
+ SDValue One = DAG.getConstant(1, VT);
+
+ SDValue Width = DAG.getConstant(VT.getSizeInBits(), VT);
+ SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, VT);
+ SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width);
+ SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift);
+
+ // The dance around Width1 is necessary for 0 special case.
+ // Without it the CompShift might be 32, producing incorrect results in
+ // Overflow. So we do the shift in two steps, the alternative is to
+ // add a conditional to filter the special case.
+
+ SDValue Overflow = DAG.getNode(ISD::SRL, DL, VT, Lo, CompShift);
+ Overflow = DAG.getNode(ISD::SRL, DL, VT, Overflow, One);
+
+ SDValue HiSmall = DAG.getNode(ISD::SHL, DL, VT, Hi, Shift);
+ HiSmall = DAG.getNode(ISD::OR, DL, VT, HiSmall, Overflow);
+ SDValue LoSmall = DAG.getNode(ISD::SHL, DL, VT, Lo, Shift);
+
+ SDValue HiBig = DAG.getNode(ISD::SHL, DL, VT, Lo, BigShift);
+ SDValue LoBig = Zero;
+
+ Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT);
+ Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT);
+
+ return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi);
+}
+
+SDValue R600TargetLowering::LowerSRXParts(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shift = Op.getOperand(2);
+ SDValue Zero = DAG.getConstant(0, VT);
+ SDValue One = DAG.getConstant(1, VT);
+
+ const bool SRA = Op.getOpcode() == ISD::SRA_PARTS;
+
+ SDValue Width = DAG.getConstant(VT.getSizeInBits(), VT);
+ SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, VT);
+ SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width);
+ SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift);
+
+ // The dance around Width1 is necessary for 0 special case.
+ // Without it the CompShift might be 32, producing incorrect results in
+ // Overflow. So we do the shift in two steps, the alternative is to
+ // add a conditional to filter the special case.
+
+ SDValue Overflow = DAG.getNode(ISD::SHL, DL, VT, Hi, CompShift);
+ Overflow = DAG.getNode(ISD::SHL, DL, VT, Overflow, One);
+
+ SDValue HiSmall = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, Shift);
+ SDValue LoSmall = DAG.getNode(ISD::SRL, DL, VT, Lo, Shift);
+ LoSmall = DAG.getNode(ISD::OR, DL, VT, LoSmall, Overflow);
+
+ SDValue LoBig = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, BigShift);
+ SDValue HiBig = SRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, Width1) : Zero;
+
+ Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT);
+ Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT);
+
+ return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi);
+}
+
SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(
ISD::SETCC,
@@ -958,13 +1275,6 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode);
}
-
- // Possible Min/Max pattern
- SDValue MinMax = LowerMinMax(Op, DAG);
- if (MinMax.getNode()) {
- return MinMax;
- }
-
// If we make it this for it means we have no native instructions to handle
// this SELECT_CC, so we must lower it.
SDValue HWTrue, HWFalse;
@@ -977,7 +1287,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
HWFalse = DAG.getConstant(0, CompareVT);
}
else {
- assert(!"Unhandled value type in LowerSELECT_CC");
+ llvm_unreachable("Unhandled value type in LowerSELECT_CC");
}
// Lower this unsupported SELECT_CC into a combination of two supported
@@ -990,7 +1300,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
DAG.getCondCode(ISD::SETNE));
}
-/// LLVM generates byte-addresed pointers. For indirect addressing, we need to
+/// LLVM generates byte-addressed pointers. For indirect addressing, we need to
/// convert these pointers to a register index. Each register holds
/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
/// \p StackWidth, which tells us how many of the 4 sub-registrers will be used
@@ -1086,10 +1396,10 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
DAG.getConstant(0, MVT::i32),
Mask
};
- SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src, 4);
+ SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src);
SDValue Args[3] = { Chain, Input, DWordAddr };
return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
- Op->getVTList(), Args, 3, MemVT,
+ Op->getVTList(), Args, MemVT,
StoreNode->getMemOperand());
} else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR &&
Value.getValueType().bitsGE(MVT::i32)) {
@@ -1099,7 +1409,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Ptr, DAG.getConstant(2, MVT::i32)));
if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
- assert(!"Truncated and indexed stores not supported yet");
+ llvm_unreachable("Truncated and indexed stores not supported yet");
} else {
Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
}
@@ -1113,6 +1423,10 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
+ SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
+ if (Ret.getNode()) {
+ return Ret;
+ }
// Lowering for indirect addressing
const MachineFunction &MF = DAG.getMachineFunction();
@@ -1125,7 +1439,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
if (ValueVT.isVector()) {
unsigned NumElemVT = ValueVT.getVectorNumElements();
EVT ElemVT = ValueVT.getVectorElementType();
- SDValue Stores[4];
+ SmallVector<SDValue, 4> Stores(NumElemVT);
assert(NumElemVT >= StackWidth && "Stack width cannot be greater than "
"vector width in load");
@@ -1142,7 +1456,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Chain, Elem, Ptr,
DAG.getTargetConstant(Channel, MVT::i32));
}
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores, NumElemVT);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
} else {
if (ValueVT == MVT::i8) {
Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value);
@@ -1204,12 +1518,35 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
SDValue Ptr = Op.getOperand(1);
SDValue LoweredLoad;
+ SDValue Ret = AMDGPUTargetLowering::LowerLOAD(Op, DAG);
+ if (Ret.getNode()) {
+ SDValue Ops[2] = {
+ Ret,
+ Chain
+ };
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ // Lower loads constant address space global variable loads
+ if (LoadNode->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
+ isa<GlobalVariable>(
+ GetUnderlyingObject(LoadNode->getMemOperand()->getValue()))) {
+
+ SDValue Ptr = DAG.getZExtOrTrunc(LoadNode->getBasePtr(), DL,
+ getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
+ Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
+ DAG.getConstant(2, MVT::i32));
+ return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op->getVTList(),
+ LoadNode->getChain(), Ptr,
+ DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
+ }
+
if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) {
SDValue MergedValues[2] = {
SplitVectorLoad(Op, DAG),
Chain
};
- return DAG.getMergeValues(MergedValues, 2, DL);
+ return DAG.getMergeValues(MergedValues, DL);
}
int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
@@ -1217,8 +1554,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) ||
(LoadNode->getExtensionType() == ISD::ZEXTLOAD))) {
SDValue Result;
- if (isa<ConstantExpr>(LoadNode->getSrcValue()) ||
- isa<Constant>(LoadNode->getSrcValue()) ||
+ if (isa<ConstantExpr>(LoadNode->getMemOperand()->getValue()) ||
+ isa<Constant>(LoadNode->getMemOperand()->getValue()) ||
isa<ConstantSDNode>(Ptr)) {
SDValue Slots[4];
for (unsigned i = 0; i < 4; i++) {
@@ -1237,9 +1574,10 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
NewVT = VT;
NumElements = VT.getVectorNumElements();
}
- Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, Slots, NumElements);
+ Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT,
+ makeArrayRef(Slots, NumElements));
} else {
- // non constant ptr cant be folded, keeps it as a v4f32 load
+ // non-constant ptr can't be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)),
DAG.getConstant(LoadNode->getAddressSpace() -
@@ -1253,10 +1591,10 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
}
SDValue MergedValues[2] = {
- Result,
- Chain
+ Result,
+ Chain
};
- return DAG.getMergeValues(MergedValues, 2, DL);
+ return DAG.getMergeValues(MergedValues, DL);
}
// For most operations returning SDValue() will result in the node being
@@ -1280,7 +1618,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Shl, ShiftAmount);
SDValue MergedValues[2] = { Sra, Chain };
- return DAG.getMergeValues(MergedValues, 2, DL);
+ return DAG.getMergeValues(MergedValues, DL);
}
if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
@@ -1317,7 +1655,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
Loads[i] = DAG.getUNDEF(ElemVT);
}
EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4);
- LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads, 4);
+ LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads);
} else {
LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT,
Chain, Ptr,
@@ -1325,11 +1663,21 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
Op.getOperand(2));
}
- SDValue Ops[2];
- Ops[0] = LoweredLoad;
- Ops[1] = Chain;
+ SDValue Ops[2] = {
+ LoweredLoad,
+ Chain
+ };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
+}
+
+SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Cond = Op.getOperand(1);
+ SDValue Jump = Op.getOperand(2);
+
+ return DAG.getNode(AMDGPUISD::BRANCH_COND, SDLoc(Op), Op.getValueType(),
+ Chain, Jump, Cond);
}
/// XXX Only kernel functions are supported, so we can assume for now that
@@ -1346,12 +1694,11 @@ SDValue R600TargetLowering::LowerFormalArguments(
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
MachineFunction &MF = DAG.getMachineFunction();
- unsigned ShaderType = MF.getInfo<R600MachineFunctionInfo>()->ShaderType;
+ unsigned ShaderType = MF.getInfo<R600MachineFunctionInfo>()->getShaderType();
SmallVector<ISD::InputArg, 8> LocalIns;
- getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
- LocalIns);
+ getOriginalFunctionArgs(DAG, MF.getFunction(), Ins, LocalIns);
AnalyzeFormalArguments(CCInfo, LocalIns);
@@ -1370,34 +1717,45 @@ SDValue R600TargetLowering::LowerFormalArguments(
PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
AMDGPUAS::CONSTANT_BUFFER_0);
+ // i64 isn't a legal type, so the register type used ends up as i32, which
+ // isn't expected here. It attempts to create this sextload, but it ends up
+ // being invalid. Somehow this seems to work with i64 arguments, but breaks
+ // for <1 x i64>.
+
// The first 36 bytes of the input buffer contains information about
// thread group and global sizes.
- SDValue Arg = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain,
+
+ // FIXME: This should really check the extload type, but the handling of
+ // extload vecto parameters seems to be broken.
+ //ISD::LoadExtType Ext = Ins[i].Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
+ ISD::LoadExtType Ext = ISD::SEXTLOAD;
+ SDValue Arg = DAG.getExtLoad(Ext, DL, VT, Chain,
DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
MachinePointerInfo(UndefValue::get(PtrTy)),
MemVT, false, false, 4);
- // 4 is the prefered alignment for
- // the CONSTANT memory space.
+
+ // 4 is the preferred alignment for the CONSTANT memory space.
InVals.push_back(Arg);
}
return Chain;
}
EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
- if (!VT.isVector()) return MVT::i32;
+ if (!VT.isVector())
+ return MVT::i32;
return VT.changeVectorElementTypeToInteger();
}
-static SDValue
-CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry,
- DenseMap<unsigned, unsigned> &RemapSwizzle) {
+static SDValue CompactSwizzlableVector(
+ SelectionDAG &DAG, SDValue VectorEntry,
+ DenseMap<unsigned, unsigned> &RemapSwizzle) {
assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
assert(RemapSwizzle.empty());
SDValue NewBldVec[4] = {
- VectorEntry.getOperand(0),
- VectorEntry.getOperand(1),
- VectorEntry.getOperand(2),
- VectorEntry.getOperand(3)
+ VectorEntry.getOperand(0),
+ VectorEntry.getOperand(1),
+ VectorEntry.getOperand(2),
+ VectorEntry.getOperand(3)
};
for (unsigned i = 0; i < 4; i++) {
@@ -1428,7 +1786,7 @@ CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry,
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
- VectorEntry.getValueType(), NewBldVec, 4);
+ VectorEntry.getValueType(), NewBldVec);
}
static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
@@ -1442,17 +1800,20 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
VectorEntry.getOperand(3)
};
bool isUnmovable[4] = { false, false, false, false };
- for (unsigned i = 0; i < 4; i++)
+ for (unsigned i = 0; i < 4; i++) {
RemapSwizzle[i] = i;
+ if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
+ ->getZExtValue();
+ if (i == Idx)
+ isUnmovable[Idx] = true;
+ }
+ }
for (unsigned i = 0; i < 4; i++) {
if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
->getZExtValue();
- if (i == Idx) {
- isUnmovable[Idx] = true;
- continue;
- }
if (isUnmovable[Idx])
continue;
// Swap i and Idx
@@ -1463,7 +1824,7 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
- VectorEntry.getValueType(), NewBldVec, 4);
+ VectorEntry.getValueType(), NewBldVec);
}
@@ -1501,6 +1862,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
+ default: return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
// (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a)
case ISD::FP_ROUND: {
SDValue Arg = N->getOperand(0);
@@ -1590,8 +1952,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
}
// Return the new vector
- return DAG.getNode(ISD::BUILD_VECTOR, dl,
- VT, &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
// Extract_vec (Build_vector) generated by custom lowering
@@ -1615,6 +1976,11 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
}
case ISD::SELECT_CC: {
+ // Try common optimizations
+ SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
+ if (Ret.getNode())
+ return Ret;
+
// fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq ->
// selectcc x, y, a, b, inv(cc)
//
@@ -1674,7 +2040,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
};
SDLoc DL(N);
NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG);
- return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8);
+ return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs);
}
case AMDGPUISD::TEXTURE_FETCH: {
SDValue Arg = N->getOperand(1);
@@ -1704,10 +2070,11 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
};
NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG);
return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(),
- NewArgs, 19);
+ NewArgs);
}
}
- return SDValue();
+
+ return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
static bool
@@ -1756,8 +2123,7 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
};
std::vector<unsigned> Consts;
- for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
- int OtherSrcIdx = SrcIndices[i];
+ for (int OtherSrcIdx : SrcIndices) {
int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
if (OtherSrcIdx < 0 || OtherSelIdx < 0)
continue;
@@ -1768,14 +2134,14 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
if (RegisterSDNode *Reg =
dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) {
if (Reg->getReg() == AMDGPU::ALU_CONST) {
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(
- ParentNode->getOperand(OtherSelIdx));
+ ConstantSDNode *Cst
+ = cast<ConstantSDNode>(ParentNode->getOperand(OtherSelIdx));
Consts.push_back(Cst->getZExtValue());
}
}
}
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
+ ConstantSDNode *Cst = cast<ConstantSDNode>(CstOffset);
Consts.push_back(Cst->getZExtValue());
if (!TII->fitsConstReadLimitations(Consts)) {
return false;
@@ -1847,9 +2213,8 @@ SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
SDValue FakeOp;
std::vector<SDValue> Ops;
- for(SDNode::op_iterator I = Node->op_begin(), E = Node->op_end();
- I != E; ++I)
- Ops.push_back(*I);
+ for (const SDUse &I : Node->ops())
+ Ops.push_back(I);
if (Opcode == AMDGPU::DOT_4) {
int OperandIdx[] = {
diff --git a/contrib/llvm/lib/Target/R600/R600ISelLowering.h b/contrib/llvm/lib/Target/R600/R600ISelLowering.h
index c10257e..d22c8c9 100644
--- a/contrib/llvm/lib/Target/R600/R600ISelLowering.h
+++ b/contrib/llvm/lib/Target/R600/R600ISelLowering.h
@@ -24,26 +24,26 @@ class R600InstrInfo;
class R600TargetLowering : public AMDGPUTargetLowering {
public:
R600TargetLowering(TargetMachine &TM);
- virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock * BB) const;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock * BB) const override;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
void ReplaceNodeResults(SDNode * N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
- virtual SDValue LowerFormalArguments(
- SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const;
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+ SDValue LowerFormalArguments(
+ SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ EVT getSetCCResultType(LLVMContext &, EVT VT) const override;
private:
unsigned Gen;
/// Each OpenCL kernel has nine implicit parameters that are stored in the
/// first nine dwords of a Vertex Buffer. These implicit parameters are
- /// lowered to load instructions which retreive the values from the Vertex
+ /// lowered to load instructions which retrieve the values from the Vertex
/// Buffer.
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
SDLoc DL, unsigned DwordOffset) const;
@@ -51,22 +51,25 @@ private:
void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const;
SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG) const;
+ SDValue vectorToVerticalVector(SelectionDAG &DAG, SDValue Vector) const;
- /// \brief Lower ROTL opcode to BITALIGN
- SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
-
+ SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSHLParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSRXParts(SDValue Op, SelectionDAG &DAG) const;
SDValue stackPtrToRegIndex(SDValue Ptr, unsigned StackWidth,
SelectionDAG &DAG) const;
void getStackAddress(unsigned StackWidth, unsigned ElemIdx,
unsigned &Channel, unsigned &PtrIncr) const;
bool isZero(SDValue Op) const;
- virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
+ SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
};
} // End namespace llvm;
diff --git a/contrib/llvm/lib/Target/R600/R600InstrInfo.cpp b/contrib/llvm/lib/Target/R600/R600InstrInfo.cpp
index 2eca6cf..99920b7 100644
--- a/contrib/llvm/lib/Target/R600/R600InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/R600InstrInfo.cpp
@@ -23,15 +23,14 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#include "AMDGPUGenDFAPacketizer.inc"
-using namespace llvm;
-
-R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
- : AMDGPUInstrInfo(tm),
- RI(tm),
- ST(tm.getSubtarget<AMDGPUSubtarget>())
+R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st)
+ : AMDGPUInstrInfo(st),
+ RI(st)
{ }
const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
@@ -52,11 +51,15 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
unsigned VectorComponents = 0;
- if (AMDGPU::R600_Reg128RegClass.contains(DestReg) &&
- AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
+ if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
+ AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
+ (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
+ AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
VectorComponents = 4;
- } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) &&
- AMDGPU::R600_Reg64RegClass.contains(SrcReg)) {
+ } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
+ AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
+ (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
+ AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
VectorComponents = 2;
}
@@ -89,10 +92,6 @@ bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
return true;
}
-unsigned R600InstrInfo::getIEQOpcode() const {
- return AMDGPU::SETE_INT;
-}
-
bool R600InstrInfo::isMov(unsigned Opcode) const {
@@ -206,8 +205,10 @@ bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
}
bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
- const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
- return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
+ return MFI->getShaderType() != ShaderType::COMPUTE &&
+ usesVertexCache(MI->getOpcode());
}
bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
@@ -215,9 +216,11 @@ bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
}
bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
- const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
- return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
- usesTextureCache(MI->getOpcode());
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
+ return (MFI->getShaderType() == ShaderType::COMPUTE &&
+ usesVertexCache(MI->getOpcode())) ||
+ usesTextureCache(MI->getOpcode());
}
bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
@@ -316,7 +319,7 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
continue;
}
-
+
}
return Result;
}
@@ -677,7 +680,7 @@ findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
return MI;
}
- return NULL;
+ return nullptr;
}
static
@@ -717,8 +720,8 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// Remove successive JUMP
- while (I != MBB.begin() && llvm::prior(I)->getOpcode() == AMDGPU::JUMP) {
- MachineBasicBlock::iterator PriorI = llvm::prior(I);
+ while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
+ MachineBasicBlock::iterator PriorI = std::prev(I);
if (AllowModify)
I->removeFromParent();
I = PriorI;
@@ -768,23 +771,13 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return true;
}
-int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
- const MachineInstr *MI = op.getParent();
-
- switch (MI->getDesc().OpInfo->RegClass) {
- default: // FIXME: fallthrough??
- case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
- case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
- };
-}
-
static
MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
It != E; ++It) {
if (It->getOpcode() == AMDGPU::CF_ALU ||
It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
- return llvm::prior(It.base());
+ return std::prev(It.base());
}
return MBB.end();
}
@@ -797,7 +790,7 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
DebugLoc DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) {
BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
return 1;
@@ -1064,10 +1057,34 @@ unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return 2;
}
+bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+
+ switch(MI->getOpcode()) {
+ default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
+ case AMDGPU::R600_EXTRACT_ELT_V2:
+ case AMDGPU::R600_EXTRACT_ELT_V4:
+ buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
+ RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
+ MI->getOperand(2).getReg(),
+ RI.getHWRegChan(MI->getOperand(1).getReg()));
+ break;
+ case AMDGPU::R600_INSERT_ELT_V2:
+ case AMDGPU::R600_INSERT_ELT_V4:
+ buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value
+ RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
+ MI->getOperand(3).getReg(), // Offset
+ RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel
+ break;
+ }
+ MI->eraseFromParent();
+ return true;
+}
+
void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const {
const AMDGPUFrameLowering *TFL =
- static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
+ static_cast<const AMDGPUFrameLowering*>(
+ MF.getTarget().getFrameLowering());
unsigned StackWidth = TFL->getStackWidth(MF);
int End = getIndirectIndexEnd(MF);
@@ -1100,7 +1117,22 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const {
- unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
+ return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
+}
+
+MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg,
+ unsigned AddrChan) const {
+ unsigned AddrReg;
+ switch (AddrChan) {
+ default: llvm_unreachable("Invalid Channel");
+ case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
+ case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
+ case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
+ case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
+ }
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X, OffsetReg);
setImmOperand(MOVA, AMDGPU::OpName::write, 0);
@@ -1117,7 +1149,22 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const {
- unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
+ return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
+}
+
+MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg,
+ unsigned AddrChan) const {
+ unsigned AddrReg;
+ switch (AddrChan) {
+ default: llvm_unreachable("Invalid Channel");
+ case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
+ case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
+ case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
+ case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
+ }
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X,
OffsetReg);
@@ -1220,7 +1267,6 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
const {
assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
unsigned Opcode;
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
if (ST.getGeneration() <= AMDGPUSubtarget::R700)
Opcode = AMDGPU::DOT4_r600;
else
diff --git a/contrib/llvm/lib/Target/R600/R600InstrInfo.h b/contrib/llvm/lib/Target/R600/R600InstrInfo.h
index 13d9810..1c3cb63 100644
--- a/contrib/llvm/lib/Target/R600/R600InstrInfo.h
+++ b/contrib/llvm/lib/Target/R600/R600InstrInfo.h
@@ -32,12 +32,22 @@ namespace llvm {
class R600InstrInfo : public AMDGPUInstrInfo {
private:
const R600RegisterInfo RI;
- const AMDGPUSubtarget &ST;
- int getBranchInstr(const MachineOperand &op) const;
std::vector<std::pair<int, unsigned> >
ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV, unsigned &ConstCount) const;
+
+ MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg,
+ unsigned AddrChan) const;
+
+ MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg,
+ unsigned AddrChan) const;
public:
enum BankSwizzle {
ALU_VEC_012_SCL_210 = 0,
@@ -48,15 +58,15 @@ namespace llvm {
ALU_VEC_210
};
- explicit R600InstrInfo(AMDGPUTargetMachine &tm);
+ explicit R600InstrInfo(const AMDGPUSubtarget &st);
- const R600RegisterInfo &getRegisterInfo() const;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ const R600RegisterInfo &getRegisterInfo() const override;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) const;
+ MachineBasicBlock::iterator MBBI) const override;
bool isTrig(const MachineInstr &MI) const;
bool isPlaceHolderOpcode(unsigned opcode) const;
@@ -138,83 +148,84 @@ namespace llvm {
/// Same but using const index set instead of MI set.
bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
- /// \breif Vector instructions are instructions that must fill all
+ /// \brief Vector instructions are instructions that must fill all
/// instruction slots within an instruction group.
bool isVector(const MachineInstr &MI) const;
- virtual unsigned getIEQOpcode() const;
- virtual bool isMov(unsigned Opcode) const;
+ bool isMov(unsigned Opcode) const override;
DFAPacketizer *CreateTargetScheduleState(const TargetMachine *TM,
- const ScheduleDAG *DAG) const;
+ const ScheduleDAG *DAG) const override;
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const;
+ SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const override;
- unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const override;
- unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
- bool isPredicated(const MachineInstr *MI) const;
+ bool isPredicated(const MachineInstr *MI) const override;
- bool isPredicable(MachineInstr *MI) const;
+ bool isPredicable(MachineInstr *MI) const override;
bool
isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
- const BranchProbability &Probability) const;
+ const BranchProbability &Probability) const override;
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
unsigned ExtraPredCycles,
- const BranchProbability &Probability) const ;
+ const BranchProbability &Probability) const override ;
bool
isProfitableToIfCvt(MachineBasicBlock &TMBB,
unsigned NumTCycles, unsigned ExtraTCycles,
MachineBasicBlock &FMBB,
unsigned NumFCycles, unsigned ExtraFCycles,
- const BranchProbability &Probability) const;
+ const BranchProbability &Probability) const override;
bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
+ std::vector<MachineOperand> &Pred) const override;
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
+ const SmallVectorImpl<MachineOperand> &Pred2) const override;
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
- MachineBasicBlock &FMBB) const;
+ MachineBasicBlock &FMBB) const override;
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const;
+ const SmallVectorImpl<MachineOperand> &Pred) const override;
- unsigned int getPredicationCost(const MachineInstr *) const;
+ unsigned int getPredicationCost(const MachineInstr *) const override;
unsigned int getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
- unsigned *PredCost = 0) const;
+ unsigned *PredCost = nullptr) const override;
+
+ int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const override { return 1;}
- virtual int getInstrLatency(const InstrItineraryData *ItinData,
- SDNode *Node) const { return 1;}
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
/// \brief Reserve the registers that may be accesed using indirect addressing.
void reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const;
- virtual unsigned calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const;
+ unsigned calculateIndirectAddress(unsigned RegIndex,
+ unsigned Channel) const override;
- virtual const TargetRegisterClass *getIndirectAddrRegClass() const;
+ const TargetRegisterClass *getIndirectAddrRegClass() const override;
- virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg, unsigned Address,
- unsigned OffsetReg) const;
+ MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg) const override;
- virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg, unsigned Address,
- unsigned OffsetReg) const;
+ MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg) const override;
unsigned getMaxAlusPerClause() const;
@@ -244,7 +255,7 @@ namespace llvm {
MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg) const;
+ unsigned DstReg, unsigned SrcReg) const override;
/// \brief Get the index of Op in the MachineInstr.
///
diff --git a/contrib/llvm/lib/Target/R600/R600Instructions.td b/contrib/llvm/lib/Target/R600/R600Instructions.td
index 74c65da..704507d 100644
--- a/contrib/llvm/lib/Target/R600/R600Instructions.td
+++ b/contrib/llvm/lib/Target/R600/R600Instructions.td
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// R600 Tablegen instruction definitions
+// TableGen definitions for instructions which are available on R600 family
+// GPUs.
//
//===----------------------------------------------------------------------===//
@@ -124,7 +125,7 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
class R600_1OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
InstrItinClass itin = AnyALU> :
R600_1OP <inst, opName,
- [(set R600_Reg32:$dst, (node R600_Reg32:$src0))]
+ [(set R600_Reg32:$dst, (node R600_Reg32:$src0))], itin
>;
// If you add or change the operands for R600_2OP instructions, you must
@@ -160,10 +161,10 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
}
class R600_2OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
- InstrItinClass itim = AnyALU> :
+ InstrItinClass itin = AnyALU> :
R600_2OP <inst, opName,
[(set R600_Reg32:$dst, (node R600_Reg32:$src0,
- R600_Reg32:$src1))]
+ R600_Reg32:$src1))], itin
>;
// If you add our change the operands for R600_3OP instructions, you must
@@ -215,7 +216,7 @@ class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern,
def TEX_SHADOW : PatLeaf<
(imm),
[{uint32_t TType = (uint32_t)N->getZExtValue();
- return (TType >= 6 && TType <= 8) || (TType >= 11 && TType <= 13);
+ return (TType >= 6 && TType <= 8) || TType == 13;
}]
>;
@@ -335,17 +336,6 @@ def load_param_exti8 : LoadParamFrag<az_extloadi8>;
def load_param_exti16 : LoadParamFrag<az_extloadi16>;
def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">;
-def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">;
-def isEG : Predicate<
- "Subtarget.getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
- "Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
- "!Subtarget.hasCaymanISA()">;
-
-def isCayman : Predicate<"Subtarget.hasCaymanISA()">;
-def isEGorCayman : Predicate<"Subtarget.getGeneration() == "
- "AMDGPUSubtarget::EVERGREEN"
- "|| Subtarget.getGeneration() =="
- "AMDGPUSubtarget::NORTHERN_ISLANDS">;
def isR600toCayman : Predicate<
"Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
@@ -642,6 +632,9 @@ ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG {
def CF_ALU : ALU_CLAUSE<8, "ALU">;
def CF_ALU_PUSH_BEFORE : ALU_CLAUSE<9, "ALU_PUSH_BEFORE">;
def CF_ALU_POP_AFTER : ALU_CLAUSE<10, "ALU_POP_AFTER">;
+def CF_ALU_CONTINUE : ALU_CLAUSE<13, "ALU_CONTINUE">;
+def CF_ALU_BREAK : ALU_CLAUSE<14, "ALU_BREAK">;
+def CF_ALU_ELSE_AFTER : ALU_CLAUSE<15, "ALU_ELSE_AFTER">;
def FETCH_CLAUSE : AMDGPUInst <(outs),
(ins i32imm:$addr), "Fetch clause starting at $addr:", [] > {
@@ -728,7 +721,7 @@ def SETNE_DX10 : R600_2OP <
>;
def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>;
-def TRUNC : R600_1OP_Helper <0x11, "TRUNC", int_AMDGPU_trunc>;
+def TRUNC : R600_1OP_Helper <0x11, "TRUNC", ftrunc>;
def CEIL : R600_1OP_Helper <0x12, "CEIL", fceil>;
def RNDNE : R600_1OP_Helper <0x13, "RNDNE", frint>;
def FLOOR : R600_1OP_Helper <0x14, "FLOOR", ffloor>;
@@ -1086,18 +1079,21 @@ class RECIP_UINT_Common <bits<11> inst> : R600_1OP_Helper <
let Itinerary = TransALU;
}
+// Clamped to maximum.
class RECIPSQRT_CLAMPED_Common <bits<11> inst> : R600_1OP_Helper <
- inst, "RECIPSQRT_CLAMPED", int_AMDGPU_rsq
+ inst, "RECIPSQRT_CLAMPED", AMDGPUrsq_clamped
> {
let Itinerary = TransALU;
}
-class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP <
- inst, "RECIPSQRT_IEEE", []
+class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
+ inst, "RECIPSQRT_IEEE", AMDGPUrsq_legacy
> {
let Itinerary = TransALU;
}
+// TODO: There is also RECIPSQRT_FF which clamps to zero.
+
class SIN_Common <bits<11> inst> : R600_1OP <
inst, "SIN", [(set f32:$dst, (SIN_HW f32:$src0))]>{
let Trig = 1;
@@ -1235,6 +1231,10 @@ let Predicates = [isR600] in {
"JUMP @$ADDR POP:$POP_COUNT"> {
let CNT = 0;
}
+ def CF_PUSH_ELSE_R600 : CF_CLAUSE_R600<12, (ins i32imm:$ADDR),
+ "PUSH_ELSE @$ADDR"> {
+ let CNT = 0;
+ }
def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"ELSE @$ADDR POP:$POP_COUNT"> {
let CNT = 0;
@@ -1257,561 +1257,6 @@ let Predicates = [isR600] in {
}
-//===----------------------------------------------------------------------===//
-// R700 Only instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isR700] in {
- def SIN_r700 : SIN_Common<0x6E>;
- def COS_r700 : COS_Common<0x6F>;
-}
-
-//===----------------------------------------------------------------------===//
-// Evergreen / Cayman store instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isEGorCayman] in {
-
-class CF_MEM_RAT_CACHELESS <bits<6> rat_inst, bits<4> rat_id, bits<4> mask, dag ins,
- string name, list<dag> pattern>
- : EG_CF_RAT <0x57, rat_inst, rat_id, mask, (outs), ins,
- "MEM_RAT_CACHELESS "#name, pattern>;
-
-class CF_MEM_RAT <bits<6> rat_inst, bits<4> rat_id, dag ins, string name,
- list<dag> pattern>
- : EG_CF_RAT <0x56, rat_inst, rat_id, 0xf /* mask */, (outs), ins,
- "MEM_RAT "#name, pattern>;
-
-def RAT_MSKOR : CF_MEM_RAT <0x11, 0,
- (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr),
- "MSKOR $rw_gpr.XW, $index_gpr",
- [(mskor_global v4i32:$rw_gpr, i32:$index_gpr)]
-> {
- let eop = 0;
-}
-
-} // End Predicates = [isEGorCayman]
-
-
-//===----------------------------------------------------------------------===//
-// Evergreen Only instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isEG] in {
-
-def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>;
-defm DIV_eg : DIV_Common<RECIP_IEEE_eg>;
-
-def MULLO_INT_eg : MULLO_INT_Common<0x8F>;
-def MULHI_INT_eg : MULHI_INT_Common<0x90>;
-def MULLO_UINT_eg : MULLO_UINT_Common<0x91>;
-def MULHI_UINT_eg : MULHI_UINT_Common<0x92>;
-def RECIP_UINT_eg : RECIP_UINT_Common<0x94>;
-def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>;
-def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;
-def LOG_IEEE_eg : LOG_IEEE_Common<0x83>;
-def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>;
-def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>;
-def SIN_eg : SIN_Common<0x8D>;
-def COS_eg : COS_Common<0x8E>;
-
-def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>;
-def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>;
-
-//===----------------------------------------------------------------------===//
-// Memory read/write instructions
-//===----------------------------------------------------------------------===//
-
-let usesCustomInserter = 1 in {
-
-// 32-bit store
-def RAT_WRITE_CACHELESS_32_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x1,
- (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- "STORE_RAW $rw_gpr, $index_gpr, $eop",
- [(global_store i32:$rw_gpr, i32:$index_gpr)]
->;
-
-// 64-bit store
-def RAT_WRITE_CACHELESS_64_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x3,
- (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- "STORE_RAW $rw_gpr.XY, $index_gpr, $eop",
- [(global_store v2i32:$rw_gpr, i32:$index_gpr)]
->;
-
-//128-bit store
-def RAT_WRITE_CACHELESS_128_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0xf,
- (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- "STORE_RAW $rw_gpr.XYZW, $index_gpr, $eop",
- [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
->;
-
-} // End usesCustomInserter = 1
-
-class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
- : VTX_WORD0_eg, VTX_READ<name, buffer_id, outs, pattern> {
-
- // Static fields
- let VC_INST = 0;
- let FETCH_TYPE = 2;
- let FETCH_WHOLE_QUAD = 0;
- let BUFFER_ID = buffer_id;
- let SRC_REL = 0;
- // XXX: We can infer this field based on the SRC_GPR. This would allow us
- // to store vertex addresses in any channel, not just X.
- let SRC_SEL_X = 0;
-
- let Inst{31-0} = Word0;
-}
-
-class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_TReg32_X:$dst_gpr), pattern> {
-
- let MEGA_FETCH_COUNT = 1;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 7; // Masked
- let DST_SEL_Z = 7; // Masked
- let DST_SEL_W = 7; // Masked
- let DATA_FORMAT = 1; // FMT_8
-}
-
-class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_TReg32_X:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 2;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 7; // Masked
- let DST_SEL_Z = 7; // Masked
- let DST_SEL_W = 7; // Masked
- let DATA_FORMAT = 5; // FMT_16
-
-}
-
-class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_TReg32_X:$dst_gpr), pattern> {
-
- let MEGA_FETCH_COUNT = 4;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 7; // Masked
- let DST_SEL_Z = 7; // Masked
- let DST_SEL_W = 7; // Masked
- let DATA_FORMAT = 0xD; // COLOR_32
-
- // This is not really necessary, but there were some GPU hangs that appeared
- // to be caused by ALU instructions in the next instruction group that wrote
- // to the $src_gpr registers of the VTX_READ.
- // e.g.
- // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
- // %T2_X<def> = MOV %ZERO
- //Adding this constraint prevents this from happening.
- let Constraints = "$src_gpr.ptr = $dst_gpr";
-}
-
-class VTX_READ_64_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_64 $dst_gpr.XY, $src_gpr", buffer_id,
- (outs R600_Reg64:$dst_gpr), pattern> {
-
- let MEGA_FETCH_COUNT = 8;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 1;
- let DST_SEL_Z = 7;
- let DST_SEL_W = 7;
- let DATA_FORMAT = 0x1D; // COLOR_32_32
-}
-
-class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
- (outs R600_Reg128:$dst_gpr), pattern> {
-
- let MEGA_FETCH_COUNT = 16;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 1;
- let DST_SEL_Z = 2;
- let DST_SEL_W = 3;
- let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
-
- // XXX: Need to force VTX_READ_128 instructions to write to the same register
- // that holds its buffer address to avoid potential hangs. We can't use
- // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
- // registers are different sizes.
-}
-
-//===----------------------------------------------------------------------===//
-// VTX Read from parameter memory space
-//===----------------------------------------------------------------------===//
-
-def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
- [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
- [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
- [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0,
- [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
- [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-//===----------------------------------------------------------------------===//
-// VTX Read from global memory space
-//===----------------------------------------------------------------------===//
-
-// 8-bit reads
-def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
- [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
- [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
->;
-
-// 32-bit reads
-def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
- [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 64-bit reads
-def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1,
- [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 128-bit reads
-def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
- [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-} // End Predicates = [isEG]
-
-//===----------------------------------------------------------------------===//
-// Evergreen / Cayman Instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isEGorCayman] in {
-
- // BFE_UINT - bit_extract, an optimization for mask and shift
- // Src0 = Input
- // Src1 = Offset
- // Src2 = Width
- //
- // bit_extract = (Input << (32 - Offset - Width)) >> (32 - Width)
- //
- // Example Usage:
- // (Offset, Width)
- //
- // (0, 8) = (Input << 24) >> 24 = (Input & 0xff) >> 0
- // (8, 8) = (Input << 16) >> 24 = (Input & 0xffff) >> 8
- // (16,8) = (Input << 8) >> 24 = (Input & 0xffffff) >> 16
- // (24,8) = (Input << 0) >> 24 = (Input & 0xffffffff) >> 24
- def BFE_UINT_eg : R600_3OP <0x4, "BFE_UINT",
- [(set i32:$dst, (int_AMDIL_bit_extract_u32 i32:$src0, i32:$src1,
- i32:$src2))],
- VecALU
- >;
-// XXX: This pattern is broken, disabling for now. See comment in
-// AMDGPUInstructions.td for more info.
-// def : BFEPattern <BFE_UINT_eg>;
-
- def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>;
- defm : BFIPatterns <BFI_INT_eg>;
-
- def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
- [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))], VecALU
- >;
- def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
- def : ROTRPattern <BIT_ALIGN_INT_eg>;
-
- def MULADD_eg : MULADD_Common<0x14>;
- def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;
- def ASHR_eg : ASHR_Common<0x15>;
- def LSHR_eg : LSHR_Common<0x16>;
- def LSHL_eg : LSHL_Common<0x17>;
- def CNDE_eg : CNDE_Common<0x19>;
- def CNDGT_eg : CNDGT_Common<0x1A>;
- def CNDGE_eg : CNDGE_Common<0x1B>;
- def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
- def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
- def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
- [(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU
- >;
- def DOT4_eg : DOT4_Common<0xBE>;
- defm CUBE_eg : CUBE_Common<0xC0>;
-
-let hasSideEffects = 1 in {
- def MOVA_INT_eg : R600_1OP <0xCC, "MOVA_INT", []>;
-}
-
- def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common<MUL_LIT_eg, LOG_CLAMPED_eg, EXP_IEEE_eg>;
-
- def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
- let Pattern = [];
- let Itinerary = AnyALU;
- }
-
- def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
-
- def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> {
- let Pattern = [];
- }
-
- def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
-
-def GROUP_BARRIER : InstR600 <
- (outs), (ins), " GROUP_BARRIER", [(int_AMDGPU_barrier_local)], AnyALU>,
- R600ALU_Word0,
- R600ALU_Word1_OP2 <0x54> {
-
- let dst = 0;
- let dst_rel = 0;
- let src0 = 0;
- let src0_rel = 0;
- let src0_neg = 0;
- let src0_abs = 0;
- let src1 = 0;
- let src1_rel = 0;
- let src1_neg = 0;
- let src1_abs = 0;
- let write = 0;
- let omod = 0;
- let clamp = 0;
- let last = 1;
- let bank_swizzle = 0;
- let pred_sel = 0;
- let update_exec_mask = 0;
- let update_pred = 0;
-
- let Inst{31-0} = Word0;
- let Inst{63-32} = Word1;
-
- let ALUInst = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// LDS Instructions
-//===----------------------------------------------------------------------===//
-class R600_LDS <bits<6> op, dag outs, dag ins, string asm,
- list<dag> pattern = []> :
-
- InstR600 <outs, ins, asm, pattern, XALU>,
- R600_ALU_LDS_Word0,
- R600LDS_Word1 {
-
- bits<6> offset = 0;
- let lds_op = op;
-
- let Word1{27} = offset{0};
- let Word1{12} = offset{1};
- let Word1{28} = offset{2};
- let Word1{31} = offset{3};
- let Word0{12} = offset{4};
- let Word0{25} = offset{5};
-
-
- let Inst{31-0} = Word0;
- let Inst{63-32} = Word1;
-
- let ALUInst = 1;
- let HasNativeOperands = 1;
- let UseNamedOperandTable = 1;
-}
-
-class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS <
- lds_op,
- (outs R600_Reg32:$dst),
- (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
- LAST:$last, R600_Pred:$pred_sel,
- BANK_SWIZZLE:$bank_swizzle),
- " "#name#" $last OQAP, $src0$src0_rel $pred_sel",
- pattern
- > {
-
- let src1 = 0;
- let src1_rel = 0;
- let src2 = 0;
- let src2_rel = 0;
-
- let usesCustomInserter = 1;
- let LDS_1A = 1;
- let DisableEncoding = "$dst";
-}
-
-class R600_LDS_1A1D <bits<6> lds_op, dag outs, string name, list<dag> pattern,
- string dst =""> :
- R600_LDS <
- lds_op, outs,
- (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
- R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
- LAST:$last, R600_Pred:$pred_sel,
- BANK_SWIZZLE:$bank_swizzle),
- " "#name#" $last "#dst#"$src0$src0_rel, $src1$src1_rel, $pred_sel",
- pattern
- > {
-
- field string BaseOp;
-
- let src2 = 0;
- let src2_rel = 0;
- let LDS_1A1D = 1;
-}
-
-class R600_LDS_1A1D_NORET <bits<6> lds_op, string name, list<dag> pattern> :
- R600_LDS_1A1D <lds_op, (outs), name, pattern> {
- let BaseOp = name;
-}
-
-class R600_LDS_1A1D_RET <bits<6> lds_op, string name, list<dag> pattern> :
- R600_LDS_1A1D <lds_op, (outs R600_Reg32:$dst), name##"_RET", pattern, "OQAP, "> {
-
- let BaseOp = name;
- let usesCustomInserter = 1;
- let DisableEncoding = "$dst";
-}
-
-class R600_LDS_1A2D <bits<6> lds_op, string name, list<dag> pattern> :
- R600_LDS <
- lds_op,
- (outs),
- (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
- R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
- R600_Reg32:$src2, REL:$src2_rel, SEL:$src2_sel,
- LAST:$last, R600_Pred:$pred_sel, BANK_SWIZZLE:$bank_swizzle),
- " "#name# "$last $src0$src0_rel, $src1$src1_rel, $src2$src2_rel, $pred_sel",
- pattern> {
- let LDS_1A2D = 1;
-}
-
-def LDS_ADD : R600_LDS_1A1D_NORET <0x0, "LDS_ADD", [] >;
-def LDS_SUB : R600_LDS_1A1D_NORET <0x1, "LDS_SUB", [] >;
-def LDS_WRITE : R600_LDS_1A1D_NORET <0xD, "LDS_WRITE",
- [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)]
->;
-def LDS_BYTE_WRITE : R600_LDS_1A1D_NORET<0x12, "LDS_BYTE_WRITE",
- [(truncstorei8_local i32:$src1, i32:$src0)]
->;
-def LDS_SHORT_WRITE : R600_LDS_1A1D_NORET<0x13, "LDS_SHORT_WRITE",
- [(truncstorei16_local i32:$src1, i32:$src0)]
->;
-def LDS_ADD_RET : R600_LDS_1A1D_RET <0x20, "LDS_ADD",
- [(set i32:$dst, (atomic_load_add_local i32:$src0, i32:$src1))]
->;
-def LDS_SUB_RET : R600_LDS_1A1D_RET <0x21, "LDS_SUB",
- [(set i32:$dst, (atomic_load_sub_local i32:$src0, i32:$src1))]
->;
-def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET",
- [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))]
->;
-def LDS_BYTE_READ_RET : R600_LDS_1A <0x36, "LDS_BYTE_READ_RET",
- [(set i32:$dst, (sextloadi8_local i32:$src0))]
->;
-def LDS_UBYTE_READ_RET : R600_LDS_1A <0x37, "LDS_UBYTE_READ_RET",
- [(set i32:$dst, (az_extloadi8_local i32:$src0))]
->;
-def LDS_SHORT_READ_RET : R600_LDS_1A <0x38, "LDS_SHORT_READ_RET",
- [(set i32:$dst, (sextloadi16_local i32:$src0))]
->;
-def LDS_USHORT_READ_RET : R600_LDS_1A <0x39, "LDS_USHORT_READ_RET",
- [(set i32:$dst, (az_extloadi16_local i32:$src0))]
->;
-
- // TRUNC is used for the FLT_TO_INT instructions to work around a
- // perceived problem where the rounding modes are applied differently
- // depending on the instruction and the slot they are in.
- // See:
- // https://bugs.freedesktop.org/show_bug.cgi?id=50232
- // Mesa commit: a1a0974401c467cb86ef818f22df67c21774a38c
- //
- // XXX: Lowering SELECT_CC will sometimes generate fp_to_[su]int nodes,
- // which do not need to be truncated since the fp values are 0.0f or 1.0f.
- // We should look into handling these cases separately.
- def : Pat<(fp_to_sint f32:$src0), (FLT_TO_INT_eg (TRUNC $src0))>;
-
- def : Pat<(fp_to_uint f32:$src0), (FLT_TO_UINT_eg (TRUNC $src0))>;
-
- // SHA-256 Patterns
- def : SHA256MaPattern <BFI_INT_eg, XOR_INT>;
-
- def : FROUNDPat <CNDGE_eg>;
-
- def EG_ExportSwz : ExportSwzInst {
- let Word1{19-16} = 0; // BURST_COUNT
- let Word1{20} = 0; // VALID_PIXEL_MODE
- let Word1{21} = eop;
- let Word1{29-22} = inst;
- let Word1{30} = 0; // MARK
- let Word1{31} = 1; // BARRIER
- }
- defm : ExportPattern<EG_ExportSwz, 83>;
-
- def EG_ExportBuf : ExportBufInst {
- let Word1{19-16} = 0; // BURST_COUNT
- let Word1{20} = 0; // VALID_PIXEL_MODE
- let Word1{21} = eop;
- let Word1{29-22} = inst;
- let Word1{30} = 0; // MARK
- let Word1{31} = 1; // BARRIER
- }
- defm : SteamOutputExportPattern<EG_ExportBuf, 0x40, 0x41, 0x42, 0x43>;
-
- def CF_TC_EG : CF_CLAUSE_EG<1, (ins i32imm:$ADDR, i32imm:$COUNT),
- "TEX $COUNT @$ADDR"> {
- let POP_COUNT = 0;
- }
- def CF_VC_EG : CF_CLAUSE_EG<2, (ins i32imm:$ADDR, i32imm:$COUNT),
- "VTX $COUNT @$ADDR"> {
- let POP_COUNT = 0;
- }
- def WHILE_LOOP_EG : CF_CLAUSE_EG<6, (ins i32imm:$ADDR),
- "LOOP_START_DX10 @$ADDR"> {
- let POP_COUNT = 0;
- let COUNT = 0;
- }
- def END_LOOP_EG : CF_CLAUSE_EG<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> {
- let POP_COUNT = 0;
- let COUNT = 0;
- }
- def LOOP_BREAK_EG : CF_CLAUSE_EG<9, (ins i32imm:$ADDR),
- "LOOP_BREAK @$ADDR"> {
- let POP_COUNT = 0;
- let COUNT = 0;
- }
- def CF_CONTINUE_EG : CF_CLAUSE_EG<8, (ins i32imm:$ADDR),
- "CONTINUE @$ADDR"> {
- let POP_COUNT = 0;
- let COUNT = 0;
- }
- def CF_JUMP_EG : CF_CLAUSE_EG<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
- "JUMP @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
- }
- def CF_ELSE_EG : CF_CLAUSE_EG<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
- "ELSE @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
- }
- def CF_CALL_FS_EG : CF_CLAUSE_EG<19, (ins), "CALL_FS"> {
- let ADDR = 0;
- let COUNT = 0;
- let POP_COUNT = 0;
- }
- def POP_EG : CF_CLAUSE_EG<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
- "POP @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
- }
- def CF_END_EG : CF_CLAUSE_EG<0, (ins), "CF_END"> {
- let COUNT = 0;
- let POP_COUNT = 0;
- let ADDR = 0;
- let END_OF_PROGRAM = 1;
- }
-
-} // End Predicates = [isEGorCayman]
//===----------------------------------------------------------------------===//
// Regist loads and stores - for indirect addressing
@@ -1819,217 +1264,6 @@ def LDS_USHORT_READ_RET : R600_LDS_1A <0x39, "LDS_USHORT_READ_RET",
defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
-//===----------------------------------------------------------------------===//
-// Cayman Instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isCayman] in {
-
-def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24",
- [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))], VecALU
->;
-def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
- [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU
->;
-
-let isVector = 1 in {
-
-def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
-
-def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
-def MULHI_INT_cm : MULHI_INT_Common<0x90>;
-def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
-def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
-def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
-def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
-def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
-def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
-def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
-def SIN_cm : SIN_Common<0x8D>;
-def COS_cm : COS_Common<0x8E>;
-} // End isVector = 1
-
-def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
-
-defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
-
-// RECIP_UINT emulation for Cayman
-// The multiplication scales from [0,1] to the unsigned integer range
-def : Pat <
- (AMDGPUurecip i32:$src0),
- (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
- (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
->;
-
- def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
- let ADDR = 0;
- let POP_COUNT = 0;
- let COUNT = 0;
- }
-
-def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
-
-class RAT_STORE_DWORD <RegisterClass rc, ValueType vt, bits<4> mask> :
- CF_MEM_RAT_CACHELESS <0x14, 0, mask,
- (ins rc:$rw_gpr, R600_TReg32_X:$index_gpr),
- "STORE_DWORD $rw_gpr, $index_gpr",
- [(global_store vt:$rw_gpr, i32:$index_gpr)]> {
- let eop = 0; // This bit is not used on Cayman.
-}
-
-def RAT_STORE_DWORD32 : RAT_STORE_DWORD <R600_TReg32_X, i32, 0x1>;
-def RAT_STORE_DWORD64 : RAT_STORE_DWORD <R600_Reg64, v2i32, 0x3>;
-def RAT_STORE_DWORD128 : RAT_STORE_DWORD <R600_Reg128, v4i32, 0xf>;
-
-class VTX_READ_cm <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
- : VTX_WORD0_cm, VTX_READ<name, buffer_id, outs, pattern> {
-
- // Static fields
- let VC_INST = 0;
- let FETCH_TYPE = 2;
- let FETCH_WHOLE_QUAD = 0;
- let BUFFER_ID = buffer_id;
- let SRC_REL = 0;
- // XXX: We can infer this field based on the SRC_GPR. This would allow us
- // to store vertex addresses in any channel, not just X.
- let SRC_SEL_X = 0;
- let SRC_SEL_Y = 0;
- let STRUCTURED_READ = 0;
- let LDS_REQ = 0;
- let COALESCED_READ = 0;
-
- let Inst{31-0} = Word0;
-}
-
-class VTX_READ_8_cm <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_cm <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_TReg32_X:$dst_gpr), pattern> {
-
- let DST_SEL_X = 0;
- let DST_SEL_Y = 7; // Masked
- let DST_SEL_Z = 7; // Masked
- let DST_SEL_W = 7; // Masked
- let DATA_FORMAT = 1; // FMT_8
-}
-
-class VTX_READ_16_cm <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_cm <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_TReg32_X:$dst_gpr), pattern> {
- let DST_SEL_X = 0;
- let DST_SEL_Y = 7; // Masked
- let DST_SEL_Z = 7; // Masked
- let DST_SEL_W = 7; // Masked
- let DATA_FORMAT = 5; // FMT_16
-
-}
-
-class VTX_READ_32_cm <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_cm <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_TReg32_X:$dst_gpr), pattern> {
-
- let DST_SEL_X = 0;
- let DST_SEL_Y = 7; // Masked
- let DST_SEL_Z = 7; // Masked
- let DST_SEL_W = 7; // Masked
- let DATA_FORMAT = 0xD; // COLOR_32
-
- // This is not really necessary, but there were some GPU hangs that appeared
- // to be caused by ALU instructions in the next instruction group that wrote
- // to the $src_gpr registers of the VTX_READ.
- // e.g.
- // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
- // %T2_X<def> = MOV %ZERO
- //Adding this constraint prevents this from happening.
- let Constraints = "$src_gpr.ptr = $dst_gpr";
-}
-
-class VTX_READ_64_cm <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_cm <"VTX_READ_64 $dst_gpr, $src_gpr", buffer_id,
- (outs R600_Reg64:$dst_gpr), pattern> {
-
- let DST_SEL_X = 0;
- let DST_SEL_Y = 1;
- let DST_SEL_Z = 7;
- let DST_SEL_W = 7;
- let DATA_FORMAT = 0x1D; // COLOR_32_32
-}
-
-class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_cm <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
- (outs R600_Reg128:$dst_gpr), pattern> {
-
- let DST_SEL_X = 0;
- let DST_SEL_Y = 1;
- let DST_SEL_Z = 2;
- let DST_SEL_W = 3;
- let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
-
- // XXX: Need to force VTX_READ_128 instructions to write to the same register
- // that holds its buffer address to avoid potential hangs. We can't use
- // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
- // registers are different sizes.
-}
-
-//===----------------------------------------------------------------------===//
-// VTX Read from parameter memory space
-//===----------------------------------------------------------------------===//
-def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0,
- [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0,
- [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0,
- [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_64_cm : VTX_READ_64_cm <0,
- [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
- [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-//===----------------------------------------------------------------------===//
-// VTX Read from global memory space
-//===----------------------------------------------------------------------===//
-
-// 8-bit reads
-def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
- [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
- [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
->;
-
-// 32-bit reads
-def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
- [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 64-bit reads
-def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1,
- [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 128-bit reads
-def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1,
- [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-} // End isCayman
-
-//===----------------------------------------------------------------------===//
-// Branch Instructions
-//===----------------------------------------------------------------------===//
-
-
-def IF_PREDICATE_SET : ILFormat<(outs), (ins GPRI32:$src),
- "IF_PREDICATE_SET $src", []>;
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -2104,15 +1338,6 @@ def TXD_SHADOW: InstR600 <
} // End isPseudo = 1
} // End usesCustomInserter = 1
-//===---------------------------------------------------------------------===//
-// Return instruction
-//===---------------------------------------------------------------------===//
-let isTerminator = 1, isReturn = 1, hasCtrlDep = 1,
- usesCustomInserter = 1 in {
- def RETURN : ILFormat<(outs), (ins variable_ops),
- "RETURN", [(IL_retflag)]>;
-}
-
//===----------------------------------------------------------------------===//
// Constant Buffer Addressing Support
@@ -2239,14 +1464,55 @@ let Inst{63-32} = Word1;
let VTXInst = 1;
}
+//===---------------------------------------------------------------------===//
+// Flow and Program control Instructions
+//===---------------------------------------------------------------------===//
+class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
+: Instruction {
+
+ let Namespace = "AMDGPU";
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let Pattern = pattern;
+ let AsmString = !strconcat(asmstr, "\n");
+ let isPseudo = 1;
+ let Itinerary = NullALU;
+ bit hasIEEEFlag = 0;
+ bit hasZeroOpFlag = 0;
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+}
+
+multiclass BranchConditional<SDNode Op, RegisterClass rci, RegisterClass rcf> {
+ def _i32 : ILFormat<(outs),
+ (ins brtarget:$target, rci:$src0),
+ "; i32 Pseudo branch instruction",
+ [(Op bb:$target, (i32 rci:$src0))]>;
+ def _f32 : ILFormat<(outs),
+ (ins brtarget:$target, rcf:$src0),
+ "; f32 Pseudo branch instruction",
+ [(Op bb:$target, (f32 rcf:$src0))]>;
+}
+
+// Only scalar types should generate flow control
+multiclass BranchInstr<string name> {
+ def _i32 : ILFormat<(outs), (ins R600_Reg32:$src),
+ !strconcat(name, " $src"), []>;
+ def _f32 : ILFormat<(outs), (ins R600_Reg32:$src),
+ !strconcat(name, " $src"), []>;
+}
+// Only scalar types should generate flow control
+multiclass BranchInstr2<string name> {
+ def _i32 : ILFormat<(outs), (ins R600_Reg32:$src0, R600_Reg32:$src1),
+ !strconcat(name, " $src0, $src1"), []>;
+ def _f32 : ILFormat<(outs), (ins R600_Reg32:$src0, R600_Reg32:$src1),
+ !strconcat(name, " $src0, $src1"), []>;
+}
-
-//===--------------------------------------------------------------------===//
-// Instructions support
-//===--------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
// Custom Inserter for Branches and returns, this eventually will be a
-// seperate pass
+// separate pass
//===---------------------------------------------------------------------===//
let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
def BRANCH : ILFormat<(outs), (ins brtarget:$target),
@@ -2256,13 +1522,22 @@ let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
}
//===---------------------------------------------------------------------===//
-// Flow and Program control Instructions
+// Return instruction
//===---------------------------------------------------------------------===//
+let isTerminator = 1, isReturn = 1, hasCtrlDep = 1,
+ usesCustomInserter = 1 in {
+ def RETURN : ILFormat<(outs), (ins variable_ops),
+ "RETURN", [(IL_retflag)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Branch Instructions
+//===----------------------------------------------------------------------===//
+
+def IF_PREDICATE_SET : ILFormat<(outs), (ins R600_Reg32:$src),
+ "IF_PREDICATE_SET $src", []>;
+
let isTerminator=1 in {
- def SWITCH : ILFormat< (outs), (ins GPRI32:$src),
- !strconcat("SWITCH", " $src"), []>;
- def CASE : ILFormat< (outs), (ins GPRI32:$src),
- !strconcat("CASE", " $src"), []>;
def BREAK : ILFormat< (outs), (ins),
"BREAK", []>;
def CONTINUE : ILFormat< (outs), (ins),
@@ -2307,6 +1582,60 @@ let isTerminator=1 in {
}
//===----------------------------------------------------------------------===//
+// Indirect addressing pseudo instructions
+//===----------------------------------------------------------------------===//
+
+let isPseudo = 1 in {
+
+class ExtractVertical <RegisterClass vec_rc> : InstR600 <
+ (outs R600_Reg32:$dst),
+ (ins vec_rc:$vec, R600_Reg32:$index), "",
+ [],
+ AnyALU
+>;
+
+let Constraints = "$dst = $vec" in {
+
+class InsertVertical <RegisterClass vec_rc> : InstR600 <
+ (outs vec_rc:$dst),
+ (ins vec_rc:$vec, R600_Reg32:$value, R600_Reg32:$index), "",
+ [],
+ AnyALU
+>;
+
+} // End Constraints = "$dst = $vec"
+
+} // End isPseudo = 1
+
+def R600_EXTRACT_ELT_V2 : ExtractVertical <R600_Reg64Vertical>;
+def R600_EXTRACT_ELT_V4 : ExtractVertical <R600_Reg128Vertical>;
+
+def R600_INSERT_ELT_V2 : InsertVertical <R600_Reg64Vertical>;
+def R600_INSERT_ELT_V4 : InsertVertical <R600_Reg128Vertical>;
+
+class ExtractVerticalPat <Instruction inst, ValueType vec_ty,
+ ValueType scalar_ty> : Pat <
+ (scalar_ty (extractelt vec_ty:$vec, i32:$index)),
+ (inst $vec, $index)
+>;
+
+def : ExtractVerticalPat <R600_EXTRACT_ELT_V2, v2i32, i32>;
+def : ExtractVerticalPat <R600_EXTRACT_ELT_V2, v2f32, f32>;
+def : ExtractVerticalPat <R600_EXTRACT_ELT_V4, v4i32, i32>;
+def : ExtractVerticalPat <R600_EXTRACT_ELT_V4, v4f32, f32>;
+
+class InsertVerticalPat <Instruction inst, ValueType vec_ty,
+ ValueType scalar_ty> : Pat <
+ (vec_ty (insertelt vec_ty:$vec, scalar_ty:$value, i32:$index)),
+ (inst $vec, $value, $index)
+>;
+
+def : InsertVerticalPat <R600_INSERT_ELT_V2, v2i32, i32>;
+def : InsertVerticalPat <R600_INSERT_ELT_V2, v2f32, f32>;
+def : InsertVerticalPat <R600_INSERT_ELT_V4, v4i32, i32>;
+def : InsertVerticalPat <R600_INSERT_ELT_V4, v4f32, f32>;
+
+//===----------------------------------------------------------------------===//
// ISel Patterns
//===----------------------------------------------------------------------===//
@@ -2358,9 +1687,6 @@ def : Insert_Element <i32, v4i32, 1, sub1>;
def : Insert_Element <i32, v4i32, 2, sub2>;
def : Insert_Element <i32, v4i32, 3, sub3>;
-def : Vector4_Build <v4f32, f32>;
-def : Vector4_Build <v4i32, i32>;
-
def : Extract_Element <f32, v2f32, 0, sub0>;
def : Extract_Element <f32, v2f32, 1, sub1>;
@@ -2387,6 +1713,12 @@ def : DwordAddrPat <i32, R600_Reg32>;
} // End isR600toCayman Predicate
+let Predicates = [isR600] in {
+// Intrinsic patterns
+defm : Expand24IBitOps<MULLO_INT_r600, ADD_INT>;
+defm : Expand24UBitOps<MULLO_UINT_r600, ADD_INT>;
+} // End isR600
+
def getLDSNoRetOp : InstrMapping {
let FilterClass = "R600_LDS_1A1D";
let RowFields = ["BaseOp"];
diff --git a/contrib/llvm/lib/Target/R600/R600MachineFunctionInfo.h b/contrib/llvm/lib/Target/R600/R600MachineFunctionInfo.h
index c1bec0a..b0ae22e 100644
--- a/contrib/llvm/lib/Target/R600/R600MachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/R600/R600MachineFunctionInfo.h
@@ -21,7 +21,7 @@
namespace llvm {
class R600MachineFunctionInfo : public AMDGPUMachineFunction {
- virtual void anchor();
+ void anchor() override;
public:
R600MachineFunctionInfo(const MachineFunction &MF);
SmallVector<unsigned, 4> LiveOuts;
diff --git a/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp b/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp
index da2a4d8..7ea654c 100644
--- a/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp
+++ b/contrib/llvm/lib/Target/R600/R600MachineScheduler.cpp
@@ -12,9 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "misched"
-
#include "R600MachineScheduler.h"
+#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
@@ -23,9 +22,11 @@
using namespace llvm;
-void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
+#define DEBUG_TYPE "misched"
- DAG = dag;
+void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
+ assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
+ DAG = static_cast<ScheduleDAGMILive*>(dag);
TII = static_cast<const R600InstrInfo*>(DAG->TII);
TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
@@ -56,7 +57,7 @@ unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
}
SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
- SUnit *SU = 0;
+ SUnit *SU = nullptr;
NextInstKind = IDOther;
IsTopNode = false;
@@ -72,7 +73,7 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
// OpenCL Programming Guide :
// The approx. number of WF that allows TEX inst to hide ALU inst is :
// 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
- float ALUFetchRationEstimate =
+ float ALUFetchRationEstimate =
(AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
(FetchInstCount + Available[IDFetch].size());
unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
@@ -316,7 +317,7 @@ int R600SchedStrategy::getInstKind(SUnit* SU) {
SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
if (Q.empty())
- return NULL;
+ return nullptr;
for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
It != E; ++It) {
SUnit *SU = *It;
@@ -331,7 +332,7 @@ SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
InstructionsGroupCandidate.pop_back();
}
}
- return NULL;
+ return nullptr;
}
void R600SchedStrategy::LoadAlu() {
@@ -448,11 +449,11 @@ SUnit* R600SchedStrategy::pickAlu() {
}
PrepareNextSlot();
}
- return NULL;
+ return nullptr;
}
SUnit* R600SchedStrategy::pickOther(int QID) {
- SUnit *SU = 0;
+ SUnit *SU = nullptr;
std::vector<SUnit *> &AQ = Available[QID];
if (AQ.empty()) {
@@ -464,4 +465,3 @@ SUnit* R600SchedStrategy::pickOther(int QID) {
}
return SU;
}
-
diff --git a/contrib/llvm/lib/Target/R600/R600MachineScheduler.h b/contrib/llvm/lib/Target/R600/R600MachineScheduler.h
index 97c8cde..fd475af 100644
--- a/contrib/llvm/lib/Target/R600/R600MachineScheduler.h
+++ b/contrib/llvm/lib/Target/R600/R600MachineScheduler.h
@@ -26,7 +26,7 @@ namespace llvm {
class R600SchedStrategy : public MachineSchedStrategy {
- const ScheduleDAGMI *DAG;
+ const ScheduleDAGMILive *DAG;
const R600InstrInfo *TII;
const R600RegisterInfo *TRI;
MachineRegisterInfo *MRI;
@@ -68,17 +68,16 @@ class R600SchedStrategy : public MachineSchedStrategy {
public:
R600SchedStrategy() :
- DAG(0), TII(0), TRI(0), MRI(0) {
+ DAG(nullptr), TII(nullptr), TRI(nullptr), MRI(nullptr) {
}
- virtual ~R600SchedStrategy() {
- }
+ virtual ~R600SchedStrategy() {}
- virtual void initialize(ScheduleDAGMI *dag);
- virtual SUnit *pickNode(bool &IsTopNode);
- virtual void schedNode(SUnit *SU, bool IsTopNode);
- virtual void releaseTopNode(SUnit *SU);
- virtual void releaseBottomNode(SUnit *SU);
+ void initialize(ScheduleDAGMI *dag) override;
+ SUnit *pickNode(bool &IsTopNode) override;
+ void schedNode(SUnit *SU, bool IsTopNode) override;
+ void releaseTopNode(SUnit *SU) override;
+ void releaseBottomNode(SUnit *SU) override;
private:
std::vector<MachineInstr *> InstructionsGroupCandidate;
diff --git a/contrib/llvm/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/contrib/llvm/lib/Target/R600/R600OptimizeVectorRegisters.cpp
index cf719c0..2314136 100644
--- a/contrib/llvm/lib/Target/R600/R600OptimizeVectorRegisters.cpp
+++ b/contrib/llvm/lib/Target/R600/R600OptimizeVectorRegisters.cpp
@@ -27,27 +27,28 @@
/// to reduce MOV count.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "vec-merger"
#include "llvm/Support/Debug.h"
#include "AMDGPU.h"
#include "R600InstrInfo.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "vec-merger"
+
namespace {
static bool
isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
- for (MachineRegisterInfo::def_iterator It = MRI.def_begin(Reg),
- E = MRI.def_end(); It != E; ++It) {
+ for (MachineRegisterInfo::def_instr_iterator It = MRI.def_instr_begin(Reg),
+ E = MRI.def_instr_end(); It != E; ++It) {
return (*It).isImplicitDef();
}
if (MRI.isReserved(Reg)) {
@@ -63,7 +64,7 @@ public:
DenseMap<unsigned, unsigned> RegToChan;
std::vector<unsigned> UndefReg;
RegSeqInfo(MachineRegisterInfo &MRI, MachineInstr *MI) : Instr(MI) {
- assert (MI->getOpcode() == AMDGPU::REG_SEQUENCE);
+ assert(MI->getOpcode() == AMDGPU::REG_SEQUENCE);
for (unsigned i = 1, e = Instr->getNumOperands(); i < e; i+=2) {
MachineOperand &MO = Instr->getOperand(i);
unsigned Chan = Instr->getOperand(i + 1).getImm();
@@ -107,9 +108,9 @@ private:
public:
static char ID;
R600VectorRegMerger(TargetMachine &tm) : MachineFunctionPass(ID),
- TII(0) { }
+ TII(nullptr) { }
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
@@ -118,11 +119,11 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "R600 Vector Registers Merge Pass";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
char R600VectorRegMerger::ID = 0;
@@ -213,8 +214,8 @@ MachineInstr *R600VectorRegMerger::RebuildVector(
DEBUG(dbgs() << " ->"; Pos->dump(););
DEBUG(dbgs() << " Updating Swizzle:\n");
- for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg),
- E = MRI->use_end(); It != E; ++It) {
+ for (MachineRegisterInfo::use_instr_iterator It = MRI->use_instr_begin(Reg),
+ E = MRI->use_instr_end(); It != E; ++It) {
DEBUG(dbgs() << " ";(*It).dump(); dbgs() << " ->");
SwizzleInput(*It, RemapChan);
DEBUG((*It).dump());
@@ -261,8 +262,8 @@ void R600VectorRegMerger::SwizzleInput(MachineInstr &MI,
}
bool R600VectorRegMerger::areAllUsesSwizzeable(unsigned Reg) const {
- for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg),
- E = MRI->use_end(); It != E; ++It) {
+ for (MachineRegisterInfo::use_instr_iterator It = MRI->use_instr_begin(Reg),
+ E = MRI->use_instr_end(); It != E; ++It) {
if (!canSwizzle(*It))
return false;
}
@@ -328,8 +329,9 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) {
if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
unsigned Reg = MI->getOperand(1).getReg();
- for (MachineRegisterInfo::def_iterator It = MRI->def_begin(Reg),
- E = MRI->def_end(); It != E; ++It) {
+ for (MachineRegisterInfo::def_instr_iterator
+ It = MRI->def_instr_begin(Reg), E = MRI->def_instr_end();
+ It != E; ++It) {
RemoveMI(&(*It));
}
}
diff --git a/contrib/llvm/lib/Target/R600/R600Packetizer.cpp b/contrib/llvm/lib/Target/R600/R600Packetizer.cpp
index cd9b6ea..74cf309 100644
--- a/contrib/llvm/lib/Target/R600/R600Packetizer.cpp
+++ b/contrib/llvm/lib/Target/R600/R600Packetizer.cpp
@@ -14,9 +14,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "packets"
#include "llvm/Support/Debug.h"
#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
#include "R600InstrInfo.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -28,6 +28,8 @@
using namespace llvm;
+#define DEBUG_TYPE "packets"
+
namespace {
class R600Packetizer : public MachineFunctionPass {
@@ -36,7 +38,7 @@ public:
static char ID;
R600Packetizer(const TargetMachine &TM) : MachineFunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
@@ -45,11 +47,11 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
+ const char *getPassName() const override {
return "R600 Packetizer";
}
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool runOnMachineFunction(MachineFunction &Fn) override;
};
char R600Packetizer::ID = 0;
@@ -66,7 +68,7 @@ private:
}
/// \returns register to PV chan mapping for bundle/single instructions that
- /// immediatly precedes I.
+ /// immediately precedes I.
DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I)
const {
DenseMap<unsigned, unsigned> Result;
@@ -155,18 +157,19 @@ public:
}
// initPacketizerState - initialize some internal flags.
- void initPacketizerState() {
+ void initPacketizerState() override {
ConsideredInstUsesAlreadyWrittenVectorElement = false;
}
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
- bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB) {
+ bool ignorePseudoInstruction(MachineInstr *MI,
+ MachineBasicBlock *MBB) override {
return false;
}
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
- bool isSoloInstruction(MachineInstr *MI) {
+ bool isSoloInstruction(MachineInstr *MI) override {
if (TII->isVector(*MI))
return true;
if (!TII->isALUInstr(MI->getOpcode()))
@@ -182,7 +185,7 @@ public:
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
- bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+ bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override {
MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr();
if (getSlot(MII) == getSlot(MIJ))
ConsideredInstUsesAlreadyWrittenVectorElement = true;
@@ -219,7 +222,9 @@ public:
// isLegalToPruneDependencies - Is it legal to prune dependece between SUI
// and SUJ.
- bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;}
+ bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override {
+ return false;
+ }
void setIsLastBit(MachineInstr *MI, unsigned Bit) const {
unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last);
@@ -288,7 +293,7 @@ public:
return true;
}
- MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ MachineBasicBlock::iterator addToPacket(MachineInstr *MI) override {
MachineBasicBlock::iterator FirstInBundle =
CurrentPacketMIs.empty() ? MI : CurrentPacketMIs.front();
const DenseMap<unsigned, unsigned> &PV =
@@ -311,7 +316,7 @@ public:
substitutePV(MI, PV);
MachineBasicBlock::iterator It = VLIWPacketizerList::addToPacket(MI);
if (isTransSlot) {
- endPacket(llvm::next(It)->getParent(), llvm::next(It));
+ endPacket(std::next(It)->getParent(), std::next(It));
}
return It;
}
@@ -371,20 +376,20 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
for(;I != MBB->begin(); --I, --RemainingCount) {
- if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
+ if (TII->isSchedulingBoundary(std::prev(I), MBB, Fn))
break;
}
I = MBB->begin();
// Skip empty scheduling regions.
if (I == RegionEnd) {
- RegionEnd = llvm::prior(RegionEnd);
+ RegionEnd = std::prev(RegionEnd);
--RemainingCount;
continue;
}
// Skip regions with one instruction.
- if (I == llvm::prior(RegionEnd)) {
- RegionEnd = llvm::prior(RegionEnd);
+ if (I == std::prev(RegionEnd)) {
+ RegionEnd = std::prev(RegionEnd);
continue;
}
diff --git a/contrib/llvm/lib/Target/R600/R600RegisterInfo.cpp b/contrib/llvm/lib/Target/R600/R600RegisterInfo.cpp
index f3bb88b..dc95675 100644
--- a/contrib/llvm/lib/Target/R600/R600RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/R600RegisterInfo.cpp
@@ -20,15 +20,14 @@
using namespace llvm;
-R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm)
-: AMDGPURegisterInfo(tm),
- TM(tm)
+R600RegisterInfo::R600RegisterInfo(const AMDGPUSubtarget &st)
+: AMDGPURegisterInfo(st)
{ RCW.RegWeight = 0; RCW.WeightLimit = 0;}
BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
- const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
+ const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(ST.getInstrInfo());
Reserved.set(AMDGPU::ZERO);
Reserved.set(AMDGPU::HALF);
@@ -55,16 +54,6 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-const TargetRegisterClass *
-R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const {
- switch (rc->getID()) {
- case AMDGPU::GPRF32RegClassID:
- case AMDGPU::GPRI32RegClassID:
- return &AMDGPU::R600_Reg32RegClass;
- default: return rc;
- }
-}
-
unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const {
return this->getEncodingValue(reg) >> HW_CHAN_SHIFT;
}
diff --git a/contrib/llvm/lib/Target/R600/R600RegisterInfo.h b/contrib/llvm/lib/Target/R600/R600RegisterInfo.h
index c74c49e..247808b 100644
--- a/contrib/llvm/lib/Target/R600/R600RegisterInfo.h
+++ b/contrib/llvm/lib/Target/R600/R600RegisterInfo.h
@@ -16,39 +16,32 @@
#define R600REGISTERINFO_H_
#include "AMDGPURegisterInfo.h"
-#include "AMDGPUTargetMachine.h"
namespace llvm {
-class R600TargetMachine;
+class AMDGPUSubtarget;
struct R600RegisterInfo : public AMDGPURegisterInfo {
- AMDGPUTargetMachine &TM;
RegClassWeight RCW;
- R600RegisterInfo(AMDGPUTargetMachine &tm);
+ R600RegisterInfo(const AMDGPUSubtarget &st);
- virtual BitVector getReservedRegs(const MachineFunction &MF) const;
-
- /// \param RC is an AMDIL reg class.
- ///
- /// \returns the R600 reg class that is equivalent to \p RC.
- virtual const TargetRegisterClass *getISARegClass(
- const TargetRegisterClass *RC) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
/// \brief get the HW encoding for a register's channel.
unsigned getHWRegChan(unsigned reg) const;
- virtual unsigned getHWRegIndex(unsigned Reg) const;
+ unsigned getHWRegIndex(unsigned Reg) const override;
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
- virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
+ const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const override;
- virtual const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const;
+ const RegClassWeight &
+ getRegClassWeight(const TargetRegisterClass *RC) const override;
// \returns true if \p Reg can be defined in one ALU caluse and used in another.
- virtual bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
+ bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/R600RegisterInfo.td b/contrib/llvm/lib/Target/R600/R600RegisterInfo.td
index 68bcd20..cc667d9 100644
--- a/contrib/llvm/lib/Target/R600/R600RegisterInfo.td
+++ b/contrib/llvm/lib/Target/R600/R600RegisterInfo.td
@@ -18,18 +18,28 @@ class R600RegWithChan <string name, bits<9> sel, string chan> :
class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> :
RegisterWithSubRegs<n, subregs> {
+ field bits<2> chan_encoding = 0;
let Namespace = "AMDGPU";
let SubRegIndices = [sub0, sub1, sub2, sub3];
- let HWEncoding = encoding;
+ let HWEncoding{8-0} = encoding{8-0};
+ let HWEncoding{10-9} = chan_encoding;
}
class R600Reg_64<string n, list<Register> subregs, bits<16> encoding> :
RegisterWithSubRegs<n, subregs> {
+ field bits<2> chan_encoding = 0;
let Namespace = "AMDGPU";
let SubRegIndices = [sub0, sub1];
let HWEncoding = encoding;
+ let HWEncoding{8-0} = encoding{8-0};
+ let HWEncoding{10-9} = chan_encoding;
}
+class R600Reg_64Vertical<int lo, int hi, string chan> : R600Reg_64 <
+ "V"#lo#hi#"_"#chan,
+ [!cast<Register>("T"#lo#"_"#chan), !cast<Register>("T"#hi#"_"#chan)],
+ lo
+>;
foreach Index = 0-127 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
@@ -54,6 +64,24 @@ foreach Index = 0-127 in {
Index>;
}
+foreach Chan = [ "X", "Y", "Z", "W"] in {
+
+ let chan_encoding = !if(!eq(Chan, "X"), 0,
+ !if(!eq(Chan, "Y"), 1,
+ !if(!eq(Chan, "Z"), 2,
+ !if(!eq(Chan, "W"), 3, 0)))) in {
+ def V0123_#Chan : R600Reg_128 <"V0123_"#Chan,
+ [!cast<Register>("T0_"#Chan),
+ !cast<Register>("T1_"#Chan),
+ !cast<Register>("T2_"#Chan),
+ !cast<Register>("T3_"#Chan)],
+ 0>;
+ def V01_#Chan : R600Reg_64Vertical<0, 1, Chan>;
+ def V23_#Chan : R600Reg_64Vertical<2, 3, Chan>;
+ }
+}
+
+
// KCACHE_BANK0
foreach Index = 159-128 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
@@ -130,8 +158,14 @@ def ALU_PARAM : R600Reg<"Param", 0>;
let isAllocatable = 0 in {
-// XXX: Only use the X channel, until we support wider stack widths
-def R600_Addr : RegisterClass <"AMDGPU", [i32], 127, (add (sequence "Addr%u_X", 0, 127))>;
+def R600_Addr : RegisterClass <"AMDGPU", [i32], 32, (add (sequence "Addr%u_X", 0, 127))>;
+
+// We only use Addr_[YZW] for vertical vectors.
+// FIXME if we add more vertical vector registers we will need to ad more
+// registers to these classes.
+def R600_Addr_Y : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Y)>;
+def R600_Addr_Z : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Z)>;
+def R600_Addr_W : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_W)>;
def R600_LDS_SRC_REG : RegisterClass<"AMDGPU", [i32], 32,
(add OQA, OQB, OQAP, OQBP, LDS_DIRECT_A, LDS_DIRECT_B)>;
@@ -206,5 +240,13 @@ def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128,
let CopyCost = -1;
}
+def R600_Reg128Vertical : RegisterClass<"AMDGPU", [v4f32, v4i32], 128,
+ (add V0123_W, V0123_Z, V0123_Y, V0123_X)
+>;
+
def R600_Reg64 : RegisterClass<"AMDGPU", [v2f32, v2i32], 64,
(add (sequence "T%u_XY", 0, 63))>;
+
+def R600_Reg64Vertical : RegisterClass<"AMDGPU", [v2f32, v2i32], 64,
+ (add V01_X, V01_Y, V01_Z, V01_W,
+ V23_X, V23_Y, V23_Z, V23_W)>;
diff --git a/contrib/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/contrib/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
index 3258894..419ec8b 100644
--- a/contrib/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
+++ b/contrib/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
@@ -18,7 +18,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/InstVisitor.h"
+#include "llvm/IR/InstVisitor.h"
using namespace llvm;
@@ -209,7 +209,7 @@ public:
FunctionPass(ID) {
}
- virtual bool doInitialization(Module &M) {
+ bool doInitialization(Module &M) override {
LLVMContext &Ctx = M.getContext();
Mod = &M;
FloatType = Type::getFloatTy(Ctx);
@@ -245,16 +245,16 @@ public:
return false;
}
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
visit(F);
return false;
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "R600 Texture Intrinsics Replacer";
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
}
void visitCallInst(CallInst &I) {
diff --git a/contrib/llvm/lib/Target/R600/R700Instructions.td b/contrib/llvm/lib/Target/R600/R700Instructions.td
new file mode 100644
index 0000000..9aad85d
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/R700Instructions.td
@@ -0,0 +1,21 @@
+//===-- R700Instructions.td - R700 Instruction defs -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TableGen definitions for instructions which are:
+// - Available to R700 and newer VLIW4/VLIW5 GPUs
+// - Available only on R700 family GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">;
+
+let Predicates = [isR700] in {
+ def SIN_r700 : SIN_Common<0x6E>;
+ def COS_r700 : COS_Common<0x6F>;
+}
diff --git a/contrib/llvm/lib/Target/R600/SIAnnotateControlFlow.cpp b/contrib/llvm/lib/Target/R600/SIAnnotateControlFlow.cpp
index 6bbdf59..91eb60b 100644
--- a/contrib/llvm/lib/Target/R600/SIAnnotateControlFlow.cpp
+++ b/contrib/llvm/lib/Target/R600/SIAnnotateControlFlow.cpp
@@ -14,8 +14,8 @@
#include "AMDGPU.h"
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
@@ -24,6 +24,8 @@
using namespace llvm;
+#define DEBUG_TYPE "si-annotate-control-flow"
+
namespace {
// Complex types used in this pass
@@ -63,7 +65,6 @@ class SIAnnotateControlFlow : public FunctionPass {
DominatorTree *DT;
StackVector Stack;
- SSAUpdater PhiInserter;
bool isTopOfStack(BasicBlock *BB);
@@ -79,7 +80,7 @@ class SIAnnotateControlFlow : public FunctionPass {
void insertElse(BranchInst *Term);
- void handleLoopCondition(Value *Cond);
+ Value *handleLoopCondition(Value *Cond, PHINode *Broken);
void handleLoop(BranchInst *Term);
@@ -89,17 +90,17 @@ public:
SIAnnotateControlFlow():
FunctionPass(ID) { }
- virtual bool doInitialization(Module &M);
+ bool doInitialization(Module &M) override;
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "SI annotate control flow";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
}
@@ -116,7 +117,7 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) {
Void = Type::getVoidTy(Context);
Boolean = Type::getInt1Ty(Context);
Int64 = Type::getInt64Ty(Context);
- ReturnStruct = StructType::get(Boolean, Int64, (Type *)0);
+ ReturnStruct = StructType::get(Boolean, Int64, (Type *)nullptr);
BoolTrue = ConstantInt::getTrue(Context);
BoolFalse = ConstantInt::getFalse(Context);
@@ -124,25 +125,25 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) {
Int64Zero = ConstantInt::get(Int64, 0);
If = M.getOrInsertFunction(
- IfIntrinsic, ReturnStruct, Boolean, (Type *)0);
+ IfIntrinsic, ReturnStruct, Boolean, (Type *)nullptr);
Else = M.getOrInsertFunction(
- ElseIntrinsic, ReturnStruct, Int64, (Type *)0);
+ ElseIntrinsic, ReturnStruct, Int64, (Type *)nullptr);
Break = M.getOrInsertFunction(
- BreakIntrinsic, Int64, Int64, (Type *)0);
+ BreakIntrinsic, Int64, Int64, (Type *)nullptr);
IfBreak = M.getOrInsertFunction(
- IfBreakIntrinsic, Int64, Boolean, Int64, (Type *)0);
+ IfBreakIntrinsic, Int64, Boolean, Int64, (Type *)nullptr);
ElseBreak = M.getOrInsertFunction(
- ElseBreakIntrinsic, Int64, Int64, Int64, (Type *)0);
+ ElseBreakIntrinsic, Int64, Int64, Int64, (Type *)nullptr);
Loop = M.getOrInsertFunction(
- LoopIntrinsic, Boolean, Int64, (Type *)0);
+ LoopIntrinsic, Boolean, Int64, (Type *)nullptr);
EndCf = M.getOrInsertFunction(
- EndCfIntrinsic, Void, Int64, (Type *)0);
+ EndCfIntrinsic, Void, Int64, (Type *)nullptr);
return false;
}
@@ -175,7 +176,7 @@ bool SIAnnotateControlFlow::isElse(PHINode *Phi) {
} else {
if (Phi->getIncomingValue(i) != BoolFalse)
return false;
-
+
}
}
return true;
@@ -202,20 +203,26 @@ void SIAnnotateControlFlow::insertElse(BranchInst *Term) {
}
/// \brief Recursively handle the condition leading to a loop
-void SIAnnotateControlFlow::handleLoopCondition(Value *Cond) {
+Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken) {
if (PHINode *Phi = dyn_cast<PHINode>(Cond)) {
+ BasicBlock *Parent = Phi->getParent();
+ PHINode *NewPhi = PHINode::Create(Int64, 0, "", &Parent->front());
+ Value *Ret = NewPhi;
- // Handle all non constant incoming values first
+ // Handle all non-constant incoming values first
for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = Phi->getIncomingValue(i);
- if (isa<ConstantInt>(Incoming))
+ BasicBlock *From = Phi->getIncomingBlock(i);
+ if (isa<ConstantInt>(Incoming)) {
+ NewPhi->addIncoming(Broken, From);
continue;
+ }
Phi->setIncomingValue(i, BoolFalse);
- handleLoopCondition(Incoming);
+ Value *PhiArg = handleLoopCondition(Incoming, Broken);
+ NewPhi->addIncoming(PhiArg, From);
}
- BasicBlock *Parent = Phi->getParent();
BasicBlock *IDom = DT->getNode(Parent)->getIDom()->getBlock();
for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {
@@ -228,33 +235,28 @@ void SIAnnotateControlFlow::handleLoopCondition(Value *Cond) {
if (From == IDom) {
CallInst *OldEnd = dyn_cast<CallInst>(Parent->getFirstInsertionPt());
if (OldEnd && OldEnd->getCalledFunction() == EndCf) {
- Value *Args[] = {
- OldEnd->getArgOperand(0),
- PhiInserter.GetValueAtEndOfBlock(Parent)
- };
- Value *Ret = CallInst::Create(ElseBreak, Args, "", OldEnd);
- PhiInserter.AddAvailableValue(Parent, Ret);
+ Value *Args[] = { OldEnd->getArgOperand(0), NewPhi };
+ Ret = CallInst::Create(ElseBreak, Args, "", OldEnd);
continue;
}
}
-
TerminatorInst *Insert = From->getTerminator();
- Value *Arg = PhiInserter.GetValueAtEndOfBlock(From);
- Value *Ret = CallInst::Create(Break, Arg, "", Insert);
- PhiInserter.AddAvailableValue(From, Ret);
+ Value *PhiArg = CallInst::Create(Break, Broken, "", Insert);
+ NewPhi->setIncomingValue(i, PhiArg);
}
eraseIfUnused(Phi);
+ return Ret;
} else if (Instruction *Inst = dyn_cast<Instruction>(Cond)) {
BasicBlock *Parent = Inst->getParent();
TerminatorInst *Insert = Parent->getTerminator();
- Value *Args[] = { Cond, PhiInserter.GetValueAtEndOfBlock(Parent) };
- Value *Ret = CallInst::Create(IfBreak, Args, "", Insert);
- PhiInserter.AddAvailableValue(Parent, Ret);
+ Value *Args[] = { Cond, Broken };
+ return CallInst::Create(IfBreak, Args, "", Insert);
} else {
- assert(0 && "Unhandled loop condition!");
+ llvm_unreachable("Unhandled loop condition!");
}
+ return 0;
}
/// \brief Handle a back edge (loop)
@@ -262,15 +264,11 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
BasicBlock *Target = Term->getSuccessor(1);
PHINode *Broken = PHINode::Create(Int64, 0, "", &Target->front());
- PhiInserter.Initialize(Int64, "");
- PhiInserter.AddAvailableValue(Target, Broken);
-
Value *Cond = Term->getCondition();
Term->setCondition(BoolTrue);
- handleLoopCondition(Cond);
+ Value *Arg = handleLoopCondition(Cond, Broken);
BasicBlock *BB = Term->getParent();
- Value *Arg = PhiInserter.GetValueAtEndOfBlock(BB);
for (pred_iterator PI = pred_begin(Target), PE = pred_end(Target);
PI != PE; ++PI) {
@@ -289,7 +287,7 @@ void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
/// \brief Annotate the control flow with intrinsics so the backend can
/// recognize if/then/else and loops.
bool SIAnnotateControlFlow::runOnFunction(Function &F) {
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
for (df_iterator<BasicBlock *> I = df_begin(&F.getEntryBlock()),
E = df_end(&F.getEntryBlock()); I != E; ++I) {
diff --git a/contrib/llvm/lib/Target/R600/SIDefines.h b/contrib/llvm/lib/Target/R600/SIDefines.h
index 2cbce28..b7e7a2d 100644
--- a/contrib/llvm/lib/Target/R600/SIDefines.h
+++ b/contrib/llvm/lib/Target/R600/SIDefines.h
@@ -32,7 +32,61 @@ enum {
#define S_00B028_VGPRS(x) (((x) & 0x3F) << 0)
#define S_00B028_SGPRS(x) (((x) & 0x0F) << 6)
#define R_00B84C_COMPUTE_PGM_RSRC2 0x00B84C
+#define S_00B02C_SCRATCH_EN(x) (((x) & 0x1) << 0)
#define S_00B84C_LDS_SIZE(x) (((x) & 0x1FF) << 15)
#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC
+
+#define R_00B848_COMPUTE_PGM_RSRC1 0x00B848
+#define S_00B848_VGPRS(x) (((x) & 0x3F) << 0)
+#define G_00B848_VGPRS(x) (((x) >> 0) & 0x3F)
+#define C_00B848_VGPRS 0xFFFFFFC0
+#define S_00B848_SGPRS(x) (((x) & 0x0F) << 6)
+#define G_00B848_SGPRS(x) (((x) >> 6) & 0x0F)
+#define C_00B848_SGPRS 0xFFFFFC3F
+#define S_00B848_PRIORITY(x) (((x) & 0x03) << 10)
+#define G_00B848_PRIORITY(x) (((x) >> 10) & 0x03)
+#define C_00B848_PRIORITY 0xFFFFF3FF
+#define S_00B848_FLOAT_MODE(x) (((x) & 0xFF) << 12)
+#define G_00B848_FLOAT_MODE(x) (((x) >> 12) & 0xFF)
+#define C_00B848_FLOAT_MODE 0xFFF00FFF
+#define S_00B848_PRIV(x) (((x) & 0x1) << 20)
+#define G_00B848_PRIV(x) (((x) >> 20) & 0x1)
+#define C_00B848_PRIV 0xFFEFFFFF
+#define S_00B848_DX10_CLAMP(x) (((x) & 0x1) << 21)
+#define G_00B848_DX10_CLAMP(x) (((x) >> 21) & 0x1)
+#define C_00B848_DX10_CLAMP 0xFFDFFFFF
+#define S_00B848_DEBUG_MODE(x) (((x) & 0x1) << 22)
+#define G_00B848_DEBUG_MODE(x) (((x) >> 22) & 0x1)
+#define C_00B848_DEBUG_MODE 0xFFBFFFFF
+#define S_00B848_IEEE_MODE(x) (((x) & 0x1) << 23)
+#define G_00B848_IEEE_MODE(x) (((x) >> 23) & 0x1)
+#define C_00B848_IEEE_MODE 0xFF7FFFFF
+
+
+// Helpers for setting FLOAT_MODE
+#define FP_ROUND_ROUND_TO_NEAREST 0
+#define FP_ROUND_ROUND_TO_INF 1
+#define FP_ROUND_ROUND_TO_NEGINF 2
+#define FP_ROUND_ROUND_TO_ZERO 3
+
+// Bits 3:0 control rounding mode. 1:0 control single precision, 3:2 double
+// precision.
+#define FP_ROUND_MODE_SP(x) ((x) & 0x3)
+#define FP_ROUND_MODE_DP(x) (((x) & 0x3) << 2)
+
+#define FP_DENORM_FLUSH_IN_FLUSH_OUT 0
+#define FP_DENORM_FLUSH_OUT 1
+#define FP_DENORM_FLUSH_IN 2
+#define FP_DENORM_FLUSH_NONE 3
+
+
+// Bits 7:4 control denormal handling. 5:4 control single precision, 6:7 double
+// precision.
+#define FP_DENORM_MODE_SP(x) (((x) & 0x3) << 4)
+#define FP_DENORM_MODE_DP(x) (((x) & 0x3) << 6)
+
+#define R_00B860_COMPUTE_TMPRING_SIZE 0x00B860
+#define S_00B860_WAVESIZE(x) (((x) & 0x1FFF) << 12)
+
#endif // SIDEFINES_H_
diff --git a/contrib/llvm/lib/Target/R600/SIFixSGPRCopies.cpp b/contrib/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
index f0065ea..5f71453 100644
--- a/contrib/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
+++ b/contrib/llvm/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -65,7 +65,6 @@
/// ultimately led to the creation of an illegal COPY.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sgpr-copies"
#include "AMDGPU.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -77,6 +76,8 @@
using namespace llvm;
+#define DEBUG_TYPE "sgpr-copies"
+
namespace {
class SIFixSGPRCopies : public MachineFunctionPass {
@@ -97,9 +98,9 @@ private:
public:
SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "SI Fix SGPR copies";
}
@@ -141,8 +142,8 @@ const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
RC = TRI->getSubRegClass(RC, SubReg);
- for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
- E = MRI.use_end(); I != E; ++I) {
+ for (MachineRegisterInfo::use_instr_iterator
+ I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
switch (I->getOpcode()) {
case AMDGPU::COPY:
RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
@@ -184,7 +185,8 @@ bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
const TargetRegisterClass *SrcRC;
if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
- DstRC == &AMDGPU::M0RegRegClass)
+ DstRC == &AMDGPU::M0RegRegClass ||
+ MRI.getRegClass(SrcReg) == &AMDGPU::VReg_1RegClass)
return false;
SrcRC = TRI->getSubRegClass(MRI.getRegClass(SrcReg), SrcSubReg);
@@ -256,6 +258,19 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
TII->moveToVALU(MI);
break;
}
+ case AMDGPU::INSERT_SUBREG: {
+ const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
+ DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
+ Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
+ Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
+ if (TRI->isSGPRClass(DstRC) &&
+ (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
+ DEBUG(dbgs() << " Fixing INSERT_SUBREG:\n");
+ DEBUG(MI.print(dbgs()));
+ TII->moveToVALU(MI);
+ }
+ break;
+ }
}
}
}
diff --git a/contrib/llvm/lib/Target/R600/SIFixSGPRLiveRanges.cpp b/contrib/llvm/lib/Target/R600/SIFixSGPRLiveRanges.cpp
new file mode 100644
index 0000000..7d116ee
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/SIFixSGPRLiveRanges.cpp
@@ -0,0 +1,110 @@
+//===-- SIFixSGPRLiveRanges.cpp - Fix SGPR live ranges ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// SALU instructions ignore control flow, so we need to modify the live ranges
+/// of the registers they define.
+///
+/// The strategy is to view the entire program as if it were a single basic
+/// block and calculate the intervals accordingly. We implement this
+/// by walking this list of segments for each LiveRange and setting the
+/// end of each segment equal to the start of the segment that immediately
+/// follows it.
+
+#include "AMDGPU.h"
+#include "SIRegisterInfo.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "si-fix-sgpr-live-ranges"
+
+namespace {
+
+class SIFixSGPRLiveRanges : public MachineFunctionPass {
+public:
+ static char ID;
+
+public:
+ SIFixSGPRLiveRanges() : MachineFunctionPass(ID) {
+ initializeSIFixSGPRLiveRangesPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF) override;
+
+ virtual const char *getPassName() const override {
+ return "SI Fix SGPR live ranges";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+} // End anonymous namespace.
+
+INITIALIZE_PASS_BEGIN(SIFixSGPRLiveRanges, DEBUG_TYPE,
+ "SI Fix SGPR Live Ranges", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(SIFixSGPRLiveRanges, DEBUG_TYPE,
+ "SI Fix SGPR Live Ranges", false, false)
+
+char SIFixSGPRLiveRanges::ID = 0;
+
+char &llvm::SIFixSGPRLiveRangesID = SIFixSGPRLiveRanges::ID;
+
+FunctionPass *llvm::createSIFixSGPRLiveRangesPass() {
+ return new SIFixSGPRLiveRanges();
+}
+
+bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
+
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+ MachineOperand *ExecUse = MI.findRegisterUseOperand(AMDGPU::EXEC);
+ if (ExecUse)
+ continue;
+
+ for (const MachineOperand &Def : MI.operands()) {
+ if (!Def.isReg() || !Def.isDef() ||!TargetRegisterInfo::isVirtualRegister(Def.getReg()))
+ continue;
+
+ const TargetRegisterClass *RC = MRI.getRegClass(Def.getReg());
+
+ if (!TRI->isSGPRClass(RC))
+ continue;
+ LiveInterval &LI = LIS->getInterval(Def.getReg());
+ for (unsigned i = 0, e = LI.size() - 1; i != e; ++i) {
+ LiveRange::Segment &Seg = LI.segments[i];
+ LiveRange::Segment &Next = LI.segments[i + 1];
+ Seg.end = Next.start;
+ }
+ }
+ }
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/R600/SIISelLowering.cpp b/contrib/llvm/lib/Target/R600/SIISelLowering.cpp
index d5d2b68..5a148a2 100644
--- a/contrib/llvm/lib/Target/R600/SIISelLowering.cpp
+++ b/contrib/llvm/lib/Target/R600/SIISelLowering.cpp
@@ -12,9 +12,16 @@
//
//===----------------------------------------------------------------------===//
+#ifdef _MSC_VER
+// Provide M_PI.
+#define _USE_MATH_DEFINES
+#include <cmath>
+#endif
+
#include "SIISelLowering.h"
#include "AMDGPU.h"
-#include "AMDILIntrinsicInfo.h"
+#include "AMDGPUIntrinsicInfo.h"
+#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
@@ -23,30 +30,27 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/Function.h"
-
-const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
+#include "llvm/ADT/SmallString.h"
using namespace llvm;
SITargetLowering::SITargetLowering(TargetMachine &TM) :
AMDGPUTargetLowering(TM) {
-
- addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
- addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
+ addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
- addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass);
- addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass);
+ addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
- addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass);
- addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass);
- addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
+ addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
+ addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
- addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass);
+ addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
- addRegisterClass(MVT::i128, &AMDGPU::SReg_128RegClass);
addRegisterClass(MVT::v8i32, &AMDGPU::VReg_256RegClass);
addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
@@ -76,15 +80,16 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
- setOperationAction(ISD::ADD, MVT::i64, Legal);
setOperationAction(ISD::ADD, MVT::i32, Legal);
setOperationAction(ISD::ADDC, MVT::i32, Legal);
setOperationAction(ISD::ADDE, MVT::i32, Legal);
+ setOperationAction(ISD::SUBC, MVT::i32, Legal);
+ setOperationAction(ISD::SUBE, MVT::i32, Legal);
- setOperationAction(ISD::BITCAST, MVT::i128, Legal);
+ setOperationAction(ISD::FSIN, MVT::f32, Custom);
+ setOperationAction(ISD::FCOS, MVT::f32, Custom);
// We need to custom lower vector stores from local memory
- setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
@@ -92,30 +97,40 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::STORE, MVT::v8i32, Custom);
setOperationAction(ISD::STORE, MVT::v16i32, Custom);
- // We need to custom lower loads/stores from private memory
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
- setOperationAction(ISD::LOAD, MVT::i64, Custom);
- setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
- setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
setOperationAction(ISD::STORE, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::i64, Custom);
- setOperationAction(ISD::STORE, MVT::i128, Custom);
setOperationAction(ISD::STORE, MVT::v2i32, Custom);
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+ setOperationAction(ISD::SELECT, MVT::f32, Promote);
+ AddPromotedToType(ISD::SELECT, MVT::f32, MVT::i32);
+ setOperationAction(ISD::SELECT, MVT::i64, Custom);
+ setOperationAction(ISD::SELECT, MVT::f64, Promote);
+ AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
-
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
- setOperationAction(ISD::ANY_EXTEND, MVT::i64, Custom);
- setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
- setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
@@ -123,26 +138,101 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+ setOperationAction(ISD::BRCOND, MVT::Other, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::i32, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+
+ setTruncStoreAction(MVT::i32, MVT::i8, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i16, Custom);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
- setTruncStoreAction(MVT::i128, MVT::i64, Expand);
setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
+
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+ setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
- setTargetDAGCombine(ISD::SELECT_CC);
+ // These should use UDIVREM, so set them to expand
+ setOperationAction(ISD::UDIV, MVT::i64, Expand);
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+
+ // We only support LOAD/STORE and vector manipulation ops for vectors
+ // with > 4 elements.
+ MVT VecTypes[] = {
+ MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32
+ };
+
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT, MVT::i1, Promote);
+
+ for (MVT VT : VecTypes) {
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
+ switch(Op) {
+ case ISD::LOAD:
+ case ISD::STORE:
+ case ISD::BUILD_VECTOR:
+ case ISD::BITCAST:
+ case ISD::EXTRACT_VECTOR_ELT:
+ case ISD::INSERT_VECTOR_ELT:
+ case ISD::CONCAT_VECTORS:
+ case ISD::INSERT_SUBVECTOR:
+ case ISD::EXTRACT_SUBVECTOR:
+ break;
+ default:
+ setOperationAction(Op, VT, Expand);
+ break;
+ }
+ }
+ }
+
+ for (int I = MVT::v1f64; I <= MVT::v8f64; ++I) {
+ MVT::SimpleValueType VT = static_cast<MVT::SimpleValueType>(I);
+ setOperationAction(ISD::FTRUNC, VT, Expand);
+ setOperationAction(ISD::FCEIL, VT, Expand);
+ setOperationAction(ISD::FFLOOR, VT, Expand);
+ }
+
+ if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
+ setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f64, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
+ setOperationAction(ISD::FRINT, MVT::f64, Legal);
+ }
+
+ // FIXME: These should be removed and handled the same was as f32 fneg. Source
+ // modifiers also work for the double instructions.
+ setOperationAction(ISD::FNEG, MVT::f64, Expand);
+ setOperationAction(ISD::FABS, MVT::f64, Expand);
+ setOperationAction(ISD::FDIV, MVT::f32, Custom);
+
+ setTargetDAGCombine(ISD::SELECT_CC);
setTargetDAGCombine(ISD::SETCC);
+ setTargetDAGCombine(ISD::UINT_TO_FP);
+
setSchedulingPreference(Sched::RegPressure);
}
@@ -151,21 +241,55 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
//===----------------------------------------------------------------------===//
bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned AddrSpace,
bool *IsFast) const {
+ if (IsFast)
+ *IsFast = false;
+
// XXX: This depends on the address space and also we may want to revist
// the alignment values we specify in the DataLayout.
+
+ // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
+ // which isn't a simple VT.
if (!VT.isSimple() || VT == MVT::Other)
return false;
+
+ // XXX - CI changes say "Support for unaligned memory accesses" but I don't
+ // see what for specifically. The wording everywhere else seems to be the
+ // same.
+
+ // XXX - The only mention I see of this in the ISA manual is for LDS direct
+ // reads the "byte address and must be dword aligned". Is it also true for the
+ // normal loads and stores?
+ if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS)
+ return false;
+
+ // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
+ // byte-address are ignored, thus forcing Dword alignment.
+ // This applies to private, global, and constant memory.
+ if (IsFast)
+ *IsFast = true;
return VT.bitsGT(MVT::i32);
}
-bool SITargetLowering::shouldSplitVectorElementType(EVT VT) const {
- return VT.bitsLE(MVT::i16);
+TargetLoweringBase::LegalizeTypeAction
+SITargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
+ return TypeSplitVector;
+
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
+bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ return TII->isInlineConstant(Imm);
}
SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
SDLoc DL, SDValue Chain,
- unsigned Offset) const {
+ unsigned Offset, bool Signed) const {
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
AMDGPUAS::CONSTANT_ADDRESS);
@@ -173,7 +297,7 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
DAG.getConstant(Offset, MVT::i64));
- return DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain, Ptr,
+ return DAG.getExtLoad(Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD, DL, VT, Chain, Ptr,
MachinePointerInfo(UndefValue::get(PtrTy)), MemVT,
false, false, MemVT.getSizeInBits() >> 3);
@@ -202,7 +326,7 @@ SDValue SITargetLowering::LowerFormalArguments(
const ISD::InputArg &Arg = Ins[i];
// First check if it's a PS input addr
- if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
+ if (Info->getShaderType() == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
!Arg.Flags.isByVal()) {
assert((PSInputNum <= 15) && "Too many PS inputs!");
@@ -218,7 +342,7 @@ SDValue SITargetLowering::LowerFormalArguments(
}
// Second split vertices into their elements
- if (Info->ShaderType != ShaderType::COMPUTE && Arg.VT.isVector()) {
+ if (Info->getShaderType() != ShaderType::COMPUTE && Arg.VT.isVector()) {
ISD::InputArg NewArg = Arg;
NewArg.Flags.setSplit();
NewArg.VT = Arg.VT.getVectorElementType();
@@ -234,7 +358,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NewArg.PartOffset += NewArg.VT.getStoreSize();
}
- } else if (Info->ShaderType != ShaderType::COMPUTE) {
+ } else if (Info->getShaderType() != ShaderType::COMPUTE) {
Splits.push_back(Arg);
}
}
@@ -244,20 +368,26 @@ SDValue SITargetLowering::LowerFormalArguments(
getTargetMachine(), ArgLocs, *DAG.getContext());
// At least one interpolation mode must be enabled or else the GPU will hang.
- if (Info->ShaderType == ShaderType::PIXEL && (Info->PSInputAddr & 0x7F) == 0) {
+ if (Info->getShaderType() == ShaderType::PIXEL &&
+ (Info->PSInputAddr & 0x7F) == 0) {
Info->PSInputAddr |= 1;
CCInfo.AllocateReg(AMDGPU::VGPR0);
CCInfo.AllocateReg(AMDGPU::VGPR1);
}
// The pointer to the list of arguments is stored in SGPR0, SGPR1
- if (Info->ShaderType == ShaderType::COMPUTE) {
+ // The pointer to the scratch buffer is stored in SGPR2, SGPR3
+ if (Info->getShaderType() == ShaderType::COMPUTE) {
+ Info->NumUserSGPRs = 4;
CCInfo.AllocateReg(AMDGPU::SGPR0);
CCInfo.AllocateReg(AMDGPU::SGPR1);
+ CCInfo.AllocateReg(AMDGPU::SGPR2);
+ CCInfo.AllocateReg(AMDGPU::SGPR3);
MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
+ MF.addLiveIn(AMDGPU::SGPR2_SGPR3, &AMDGPU::SReg_64RegClass);
}
- if (Info->ShaderType == ShaderType::COMPUTE) {
+ if (Info->getShaderType() == ShaderType::COMPUTE) {
getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
Splits);
}
@@ -281,7 +411,8 @@ SDValue SITargetLowering::LowerFormalArguments(
// The first 36 bytes of the input buffer contains information about
// thread group and global sizes.
SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, DAG.getRoot(),
- 36 + VA.getLocMemOffset());
+ 36 + VA.getLocMemOffset(),
+ Ins[i].Flags.isSExt());
InVals.push_back(Arg);
continue;
}
@@ -322,8 +453,7 @@ SDValue SITargetLowering::LowerFormalArguments(
for (unsigned j = 0; j != NumElements; ++j)
Regs.push_back(DAG.getUNDEF(VT));
- InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT,
- Regs.data(), Regs.size()));
+ InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs));
continue;
}
@@ -336,26 +466,26 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MachineInstr * MI, MachineBasicBlock * BB) const {
MachineBasicBlock::iterator I = *MI;
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
switch (MI->getOpcode()) {
default:
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case AMDGPU::BRANCH: return BB;
case AMDGPU::SI_ADDR64_RSRC: {
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
unsigned SuperReg = MI->getOperand(0).getReg();
- unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
- unsigned SubRegHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
- unsigned SubRegHiHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- unsigned SubRegHiLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
+ unsigned SubRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
+ unsigned SubRegHiHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned SubRegHiLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), SubRegLo)
.addOperand(MI->getOperand(1));
BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiLo)
.addImm(0);
BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiHi)
- .addImm(RSRC_DATA_FORMAT >> 32);
+ .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SubRegHi)
.addReg(SubRegHiLo)
.addImm(AMDGPU::sub0)
@@ -369,25 +499,52 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MI->eraseFromParent();
break;
}
+ case AMDGPU::SI_BUFFER_RSRC: {
+ unsigned SuperReg = MI->getOperand(0).getReg();
+ unsigned Args[4];
+ for (unsigned i = 0, e = 4; i < e; ++i) {
+ MachineOperand &Arg = MI->getOperand(i + 1);
+
+ if (Arg.isReg()) {
+ Args[i] = Arg.getReg();
+ continue;
+ }
+
+ assert(Arg.isImm());
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), Reg)
+ .addImm(Arg.getImm());
+ Args[i] = Reg;
+ }
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
+ SuperReg)
+ .addReg(Args[0])
+ .addImm(AMDGPU::sub0)
+ .addReg(Args[1])
+ .addImm(AMDGPU::sub1)
+ .addReg(Args[2])
+ .addImm(AMDGPU::sub2)
+ .addReg(Args[3])
+ .addImm(AMDGPU::sub3);
+ MI->eraseFromParent();
+ break;
+ }
case AMDGPU::V_SUB_F64: {
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
- BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64),
- MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg())
- .addReg(MI->getOperand(2).getReg())
- .addImm(0) /* src2 */
- .addImm(0) /* ABS */
- .addImm(0) /* CLAMP */
- .addImm(0) /* OMOD */
- .addImm(2); /* NEG */
+ unsigned DestReg = MI->getOperand(0).getReg();
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64), DestReg)
+ .addImm(0) // SRC0 modifiers
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(1) // SRC1 modifiers
+ .addReg(MI->getOperand(2).getReg())
+ .addImm(0) // SRC2 modifiers
+ .addImm(0) // src2
+ .addImm(0) // CLAMP
+ .addImm(0); // OMOD
MI->eraseFromParent();
break;
}
case AMDGPU::SI_RegisterStorePseudo: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
MachineInstrBuilder MIB =
BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::SI_RegisterStore),
@@ -396,6 +553,50 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MIB.addOperand(MI->getOperand(i));
MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::FABS_SI: {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32),
+ Reg)
+ .addImm(0x7fffffff);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_AND_B32_e32),
+ MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(Reg);
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::FNEG_SI: {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32),
+ Reg)
+ .addImm(0x80000000);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_XOR_B32_e32),
+ MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(Reg);
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::FCLAMP_SI: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F32_e64),
+ MI->getOperand(0).getReg())
+ .addImm(0) // SRC0 modifiers
+ .addOperand(MI->getOperand(1))
+ .addImm(0) // SRC1 modifiers
+ .addImm(0) // SRC1
+ .addImm(1) // CLAMP
+ .addImm(0); // OMOD
+ MI->eraseFromParent();
}
}
return BB;
@@ -439,65 +640,57 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
- case ISD::ADD: return LowerADD(Op, DAG);
+ case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::LOAD: {
- LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
- if ((Load->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
- Load->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
- Op.getValueType().isVector()) {
- SDValue MergedValues[2] = {
- SplitVectorLoad(Op, DAG),
- Load->getChain()
- };
- return DAG.getMergeValues(MergedValues, 2, SDLoc(Op));
- } else {
- return LowerLOAD(Op, DAG);
- }
+ SDValue Result = LowerLOAD(Op, DAG);
+ assert((!Result.getNode() ||
+ Result.getNode()->getNumValues() == 2) &&
+ "Load should return a value and a chain");
+ return Result;
}
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
+ case ISD::FSIN:
+ case ISD::FCOS:
+ return LowerTrig(Op, DAG);
+ case ISD::SELECT: return LowerSELECT(Op, DAG);
+ case ISD::FDIV: return LowerFDIV(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
- case ISD::ANY_EXTEND: // Fall-through
- case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT VT = Op.getValueType();
SDLoc DL(Op);
- //XXX: Hardcoded we only use two to store the pointer to the parameters.
- unsigned NumUserSGPRs = 2;
switch (IntrinsicID) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
case Intrinsic::r600_read_ngroups_x:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 0);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 0, false);
case Intrinsic::r600_read_ngroups_y:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4, false);
case Intrinsic::r600_read_ngroups_z:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 8);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 8, false);
case Intrinsic::r600_read_global_size_x:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 12);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 12, false);
case Intrinsic::r600_read_global_size_y:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 16);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 16, false);
case Intrinsic::r600_read_global_size_z:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 20);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 20, false);
case Intrinsic::r600_read_local_size_x:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 24);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 24, false);
case Intrinsic::r600_read_local_size_y:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 28);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 28, false);
case Intrinsic::r600_read_local_size_z:
- return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 32);
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 32, false);
case Intrinsic::r600_read_tgid_x:
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
- AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
+ AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0), VT);
case Intrinsic::r600_read_tgid_y:
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
- AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT);
+ AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1), VT);
case Intrinsic::r600_read_tgid_z:
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
- AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT);
+ AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2), VT);
case Intrinsic::r600_read_tidig_x:
return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
AMDGPU::VGPR0, VT);
@@ -509,7 +702,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
AMDGPU::VGPR2, VT);
case AMDGPUIntrinsic::SI_load_const: {
SDValue Ops [] = {
- ResourceDescriptorToi128(Op.getOperand(1), DAG),
+ Op.getOperand(1),
Op.getOperand(2)
};
@@ -518,7 +711,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant,
VT.getSizeInBits() / 8, 4);
return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
- Op->getVTList(), Ops, 2, VT, MMO);
+ Op->getVTList(), Ops, VT, MMO);
}
case AMDGPUIntrinsic::SI_sample:
return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG);
@@ -530,7 +723,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG);
case AMDGPUIntrinsic::SI_vs_load_input:
return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
- ResourceDescriptorToi128(Op.getOperand(1), DAG),
+ Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
}
@@ -545,7 +738,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Ops [] = {
Chain,
- ResourceDescriptorToi128(Op.getOperand(2), DAG),
+ Op.getOperand(2),
Op.getOperand(3),
Op.getOperand(4),
Op.getOperand(5),
@@ -566,8 +759,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
MachineMemOperand::MOStore,
VT.getSizeInBits() / 8, 4);
return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
- Op->getVTList(), Ops,
- sizeof(Ops)/sizeof(Ops[0]), VT, MMO);
+ Op->getVTList(), Ops, VT, MMO);
}
default:
break;
@@ -576,33 +768,6 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
-SDValue SITargetLowering::LowerADD(SDValue Op,
- SelectionDAG &DAG) const {
- if (Op.getValueType() != MVT::i64)
- return SDValue();
-
- SDLoc DL(Op);
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
-
- SDValue Zero = DAG.getConstant(0, MVT::i32);
- SDValue One = DAG.getConstant(1, MVT::i32);
-
- SDValue Lo0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, Zero);
- SDValue Hi0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, One);
-
- SDValue Lo1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, Zero);
- SDValue Hi1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, One);
-
- SDVTList VTList = DAG.getVTList(MVT::i32, MVT::Glue);
-
- SDValue AddLo = DAG.getNode(ISD::ADDC, DL, VTList, Lo0, Lo1);
- SDValue Carry = AddLo.getValue(1);
- SDValue AddHi = DAG.getNode(ISD::ADDE, DL, VTList, Hi0, Hi1, Carry);
-
- return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, AddLo, AddHi.getValue(0));
-}
-
/// \brief Helper function for LowerBRCOND
static SDNode *findUser(SDValue Value, unsigned Opcode) {
@@ -616,7 +781,22 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) {
if (I->getOpcode() == Opcode)
return *I;
}
- return 0;
+ return nullptr;
+}
+
+SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ const SIRegisterInfo &TRI = TII->getRegisterInfo();
+ FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op);
+ unsigned FrameIndex = FINode->getIndex();
+
+ CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ TRI.getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET), MVT::i32);
+
+ return DAG.getTargetFrameIndex(FrameIndex, MVT::i32);
}
/// This transforms the control flow intrinsics to get the branch destination as
@@ -628,7 +808,7 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SDNode *Intr = BRCOND.getOperand(1).getNode();
SDValue Target = BRCOND.getOperand(2);
- SDNode *BR = 0;
+ SDNode *BR = nullptr;
if (Intr->getOpcode() == ISD::SETCC) {
// As long as we negate the condition everything is fine
@@ -661,7 +841,7 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
// build the new intrinsic call
SDNode *Result = DAG.getNode(
Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
- DAG.getVTList(Res.data(), Res.size()), Ops.data(), Ops.size()).getNode();
+ DAG.getVTList(Res), Ops).getNode();
if (BR) {
// Give the branch instruction our target
@@ -669,7 +849,7 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
BR->getOperand(0),
BRCOND.getOperand(2)
};
- DAG.MorphNodeTo(BR, ISD::BR, BR->getVTList(), Ops, 2);
+ DAG.MorphNodeTo(BR, ISD::BR, BR->getVTList(), Ops);
}
SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
@@ -697,42 +877,57 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
return Chain;
}
-SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- LoadSDNode *Load = cast<LoadSDNode>(Op);
+SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
+ SDValue Op,
+ SelectionDAG &DAG) const {
+ GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
- if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
- return SDValue();
+ if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
+ return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
- SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
- Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
- SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
- DAG.getConstant(2, MVT::i32));
-
- SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
- Load->getChain(), Ptr,
- DAG.getTargetConstant(0, MVT::i32),
- Op.getOperand(2));
- SDValue MergedValues[2] = {
- Ret,
- Load->getChain()
- };
- return DAG.getMergeValues(MergedValues, 2, DL);
+ SDLoc DL(GSD);
+ const GlobalValue *GV = GSD->getGlobal();
+ MVT PtrVT = getPointerTy(GSD->getAddressSpace());
+
+ SDValue Ptr = DAG.getNode(AMDGPUISD::CONST_DATA_PTR, DL, PtrVT);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
+
+ SDValue PtrLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr,
+ DAG.getConstant(0, MVT::i32));
+ SDValue PtrHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr,
+ DAG.getConstant(1, MVT::i32));
+ SDValue Lo = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i32, MVT::Glue),
+ PtrLo, GA);
+ SDValue Hi = DAG.getNode(ISD::ADDE, DL, DAG.getVTList(MVT::i32, MVT::Glue),
+ PtrHi, DAG.getConstant(0, MVT::i32),
+ SDValue(Lo.getNode(), 1));
+ return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
}
-SDValue SITargetLowering::ResourceDescriptorToi128(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
- if (Op.getValueType() == MVT::i128) {
- return Op;
+ if (Op.getValueType().isVector()) {
+ assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
+ "Custom lowering for non-i32 vectors hasn't been implemented.");
+ unsigned NumElements = Op.getValueType().getVectorNumElements();
+ assert(NumElements != 2 && "v2 loads are supported for all address spaces.");
+ switch (Load->getAddressSpace()) {
+ default: break;
+ case AMDGPUAS::GLOBAL_ADDRESS:
+ case AMDGPUAS::PRIVATE_ADDRESS:
+ // v4 loads are supported for private and global memory.
+ if (NumElements <= 4)
+ break;
+ // fall-through
+ case AMDGPUAS::LOCAL_ADDRESS:
+ return SplitVectorLoad(Op, DAG);
+ }
}
- assert(Op.getOpcode() == ISD::UNDEF);
-
- return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), MVT::i128,
- DAG.getConstant(0, MVT::i64),
- DAG.getConstant(0, MVT::i64));
+ return AMDGPUTargetLowering::LowerLOAD(Op, DAG);
}
SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
@@ -740,42 +935,129 @@ SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
SelectionDAG &DAG) const {
return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1),
Op.getOperand(2),
- ResourceDescriptorToi128(Op.getOperand(3), DAG),
+ Op.getOperand(3),
Op.getOperand(4));
}
-SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getValueType() != MVT::i64)
+ return SDValue();
+
+ SDLoc DL(Op);
+ SDValue Cond = Op.getOperand(0);
+
+ SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue One = DAG.getConstant(1, MVT::i32);
+
+ SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
+ SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
+
+ SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
+ SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
+
+ SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
+
+ SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
+ SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
+
+ SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
+
+ SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi);
+ return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
+}
+
+// Catch division cases where we can use shortcuts with rcp and rsq
+// instructions.
+SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- SDValue True = Op.getOperand(2);
- SDValue False = Op.getOperand(3);
- SDValue CC = Op.getOperand(4);
EVT VT = Op.getValueType();
- SDLoc DL(Op);
+ bool Unsafe = DAG.getTarget().Options.UnsafeFPMath;
+
+ if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
+ if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) &&
+ CLHS->isExactlyValue(1.0)) {
+ // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
+ // the CI documentation has a worst case error of 1 ulp.
+ // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
+ // use it as long as we aren't trying to use denormals.
+
+ // 1.0 / sqrt(x) -> rsq(x)
+ //
+ // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
+ // error seems really high at 2^29 ULP.
+ if (RHS.getOpcode() == ISD::FSQRT)
+ return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
+
+ // 1.0 / x -> rcp(x)
+ return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
+ }
+ }
- // Possible Min/Max pattern
- SDValue MinMax = LowerMinMax(Op, DAG);
- if (MinMax.getNode()) {
- return MinMax;
+ if (Unsafe) {
+ // Turn into multiply by the reciprocal.
+ // x / y -> x * (1.0 / y)
+ SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
+ return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip);
}
- SDValue Cond = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, CC);
- return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
+ return SDValue();
}
-SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
+SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
+ SDValue FastLowered = LowerFastFDIV(Op, DAG);
+ if (FastLowered.getNode())
+ return FastLowered;
- if (VT != MVT::i64) {
+ // This uses v_rcp_f32 which does not handle denormals. Let this hit a
+ // selection error for now rather than do something incorrect.
+ if (Subtarget->hasFP32Denormals())
return SDValue();
- }
- SDValue Hi = DAG.getNode(ISD::SRA, DL, MVT::i32, Op.getOperand(0),
- DAG.getConstant(31, MVT::i32));
+ SDLoc SL(Op);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+
+ SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
- return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi);
+ const APFloat K0Val(BitsToFloat(0x6f800000));
+ const SDValue K0 = DAG.getConstantFP(K0Val, MVT::f32);
+
+ const APFloat K1Val(BitsToFloat(0x2f800000));
+ const SDValue K1 = DAG.getConstantFP(K1Val, MVT::f32);
+
+ const SDValue One = DAG.getTargetConstantFP(1.0, MVT::f32);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
+
+ SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
+
+ SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
+
+ r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
+
+ SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
+
+ SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
+
+ return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
+}
+
+SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
+ return SDValue();
+}
+
+SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+
+ if (VT == MVT::f32)
+ return LowerFDIV32(Op, DAG);
+
+ if (VT == MVT::f64)
+ return LowerFDIV64(Op, DAG);
+
+ llvm_unreachable("Unexpected type for fdiv");
}
SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
@@ -783,6 +1065,18 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
StoreSDNode *Store = cast<StoreSDNode>(Op);
EVT VT = Store->getMemoryVT();
+ // These stores are legal.
+ if (Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
+ VT.isVector() && VT.getVectorNumElements() == 2 &&
+ VT.getVectorElementType() == MVT::i32)
+ return SDValue();
+
+ if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
+ if (VT.isVector() && VT.getVectorNumElements() > 4)
+ return SplitVectorStore(Op, DAG);
+ return SDValue();
+ }
+
SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
if (Ret.getNode())
return Ret;
@@ -790,61 +1084,125 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
if (VT.isVector() && VT.getVectorNumElements() >= 8)
return SplitVectorStore(Op, DAG);
- if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
+ if (VT == MVT::i1)
+ return DAG.getTruncStore(Store->getChain(), DL,
+ DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
+ Store->getBasePtr(), MVT::i1, Store->getMemOperand());
+
+ return SDValue();
+}
+
+SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDValue Arg = Op.getOperand(0);
+ SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT,
+ DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg,
+ DAG.getConstantFP(0.5 / M_PI, VT)));
+
+ switch (Op.getOpcode()) {
+ case ISD::FCOS:
+ return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
+ case ISD::FSIN:
+ return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
+ default:
+ llvm_unreachable("Wrong trig opcode");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Custom DAG optimizations
+//===----------------------------------------------------------------------===//
+
+SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
+ DAGCombinerInfo &DCI) {
+ EVT VT = N->getValueType(0);
+ EVT ScalarVT = VT.getScalarType();
+ if (ScalarVT != MVT::f32)
return SDValue();
- SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
- SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
- DAG.getConstant(2, MVT::i32));
- SDValue Chain = Store->getChain();
- SmallVector<SDValue, 8> Values;
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
- if (VT == MVT::i64) {
- for (unsigned i = 0; i < 2; ++i) {
- Values.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
- Store->getValue(), DAG.getConstant(i, MVT::i32)));
- }
- } else if (VT == MVT::i128) {
- for (unsigned i = 0; i < 2; ++i) {
- for (unsigned j = 0; j < 2; ++j) {
- Values.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
- DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64,
- Store->getValue(), DAG.getConstant(i, MVT::i32)),
- DAG.getConstant(j, MVT::i32)));
- }
+ SDValue Src = N->getOperand(0);
+ EVT SrcVT = Src.getValueType();
+
+ // TODO: We could try to match extracting the higher bytes, which would be
+ // easier if i8 vectors weren't promoted to i32 vectors, particularly after
+ // types are legalized. v4i8 -> v4f32 is probably the only case to worry
+ // about in practice.
+ if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
+ if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
+ SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
+ DCI.AddToWorklist(Cvt.getNode());
+ return Cvt;
}
- } else {
- Values.push_back(Store->getValue());
}
- for (unsigned i = 0; i < Values.size(); ++i) {
- SDValue PartPtr = DAG.getNode(ISD::ADD, DL, MVT::i32,
- Ptr, DAG.getConstant(i, MVT::i32));
- Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
- Chain, Values[i], PartPtr,
- DAG.getTargetConstant(0, MVT::i32));
+ // We are primarily trying to catch operations on illegal vector types
+ // before they are expanded.
+ // For scalars, we can use the more flexible method of checking masked bits
+ // after legalization.
+ if (!DCI.isBeforeLegalize() ||
+ !SrcVT.isVector() ||
+ SrcVT.getVectorElementType() != MVT::i8) {
+ return SDValue();
}
- return Chain;
-}
-
-SDValue SITargetLowering::LowerZERO_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
- EVT VT = Op.getValueType();
- SDLoc DL(Op);
+ assert(DCI.isBeforeLegalize() && "Unexpected legal type");
- if (VT != MVT::i64) {
+ // Weird sized vectors are a pain to handle, but we know 3 is really the same
+ // size as 4.
+ unsigned NElts = SrcVT.getVectorNumElements();
+ if (!SrcVT.isSimple() && NElts != 3)
return SDValue();
+
+ // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to
+ // prevent a mess from expanding to v4i32 and repacking.
+ if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
+ EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT);
+ EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT);
+ EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts);
+
+ LoadSDNode *Load = cast<LoadSDNode>(Src);
+ SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT,
+ Load->getChain(),
+ Load->getBasePtr(),
+ LoadVT,
+ Load->getMemOperand());
+
+ // Make sure successors of the original load stay after it by updating
+ // them to use the new Chain.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1));
+
+ SmallVector<SDValue, 4> Elts;
+ if (RegVT.isVector())
+ DAG.ExtractVectorElements(NewLoad, Elts);
+ else
+ Elts.push_back(NewLoad);
+
+ SmallVector<SDValue, 4> Ops;
+
+ unsigned EltIdx = 0;
+ for (SDValue Elt : Elts) {
+ unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx);
+ for (unsigned I = 0; I < ComponentsInElt; ++I) {
+ unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I;
+ SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt);
+ DCI.AddToWorklist(Cvt.getNode());
+ Ops.push_back(Cvt);
+ }
+
+ ++EltIdx;
+ }
+
+ assert(Ops.size() == NElts);
+
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops);
}
- return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
+ return SDValue();
}
-//===----------------------------------------------------------------------===//
-// Custom DAG optimizations
-//===----------------------------------------------------------------------===//
-
SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -852,26 +1210,12 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
EVT VT = N->getValueType(0);
switch (N->getOpcode()) {
- default: break;
- case ISD::SELECT_CC: {
- ConstantSDNode *True, *False;
- // i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
- if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
- && (False = dyn_cast<ConstantSDNode>(N->getOperand(3)))
- && True->isAllOnesValue()
- && False->isNullValue()
- && VT == MVT::i1) {
- return DAG.getNode(ISD::SETCC, DL, VT, N->getOperand(0),
- N->getOperand(1), N->getOperand(4));
-
- }
- break;
- }
+ default: return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
case ISD::SETCC: {
SDValue Arg0 = N->getOperand(0);
SDValue Arg1 = N->getOperand(1);
SDValue CC = N->getOperand(2);
- ConstantSDNode * C = NULL;
+ ConstantSDNode * C = nullptr;
ISD::CondCode CCOp = dyn_cast<CondCodeSDNode>(CC)->get();
// i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne)
@@ -886,8 +1230,34 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+
+ case AMDGPUISD::CVT_F32_UBYTE0:
+ case AMDGPUISD::CVT_F32_UBYTE1:
+ case AMDGPUISD::CVT_F32_UBYTE2:
+ case AMDGPUISD::CVT_F32_UBYTE3: {
+ unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
+
+ SDValue Src = N->getOperand(0);
+ APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
+
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLO.ShrinkDemandedConstant(Src, Demanded) ||
+ TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) {
+ DCI.CommitTargetLoweringOpt(TLO);
+ }
+
+ break;
}
- return SDValue();
+
+ case ISD::UINT_TO_FP: {
+ return performUCharToFloatCombine(N, DCI);
+ }
+ }
+
+ return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
/// \brief Test if RegClass is one of the VSrc classes
@@ -918,9 +1288,11 @@ int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
return -1;
}
Imm.I = Node->getSExtValue();
- } else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N))
+ } else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) {
+ if (N->getValueType(0) != MVT::f32)
+ return -1;
Imm.F = Node->getValueAPF().convertToFloat();
- else
+ } else
return -1; // It isn't an immediate
if ((Imm.I >= -16 && Imm.I <= 64) ||
@@ -940,7 +1312,7 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
const SIInstrInfo *TII =
static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
- if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
+ if (!Mov || !TII->isMov(Mov->getMachineOpcode()))
return false;
const SDValue &Op = Mov->getOperand(0);
@@ -987,7 +1359,7 @@ const TargetRegisterClass *SITargetLowering::getRegClassForNode(
}
return TRI.getPhysRegClass(Reg);
}
- default: return NULL;
+ default: return nullptr;
}
}
const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode());
@@ -1047,7 +1419,7 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
else
return;
- // Nothing todo if they fit naturaly
+ // Nothing to do if they fit naturally
if (fitsRegClass(DAG, Operand, RegClass))
return;
@@ -1059,9 +1431,19 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
// This is a conservative aproach. It is possible that we can't determine the
// correct register class and copy too often, but better safe than sorry.
- SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
- SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(),
- Operand.getValueType(), Operand, RC);
+
+ SDNode *Node;
+ // We can't use COPY_TO_REGCLASS with FrameIndex arguments.
+ if (isa<FrameIndexSDNode>(Operand)) {
+ unsigned Opcode = Operand.getValueType() == MVT::i32 ?
+ AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
+ Node = DAG.getMachineNode(Opcode, SDLoc(), Operand.getValueType(),
+ Operand);
+ } else {
+ SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
+ Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(),
+ Operand.getValueType(), Operand, RC);
+ }
Operand = SDValue(Node, 0);
}
@@ -1091,22 +1473,22 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
// Commuted opcode if available
int OpcodeRev = Desc->isCommutable() ? TII->commuteOpcode(Opcode) : -1;
- const MCInstrDesc *DescRev = OpcodeRev == -1 ? 0 : &TII->get(OpcodeRev);
+ const MCInstrDesc *DescRev = OpcodeRev == -1 ? nullptr : &TII->get(OpcodeRev);
assert(!DescRev || DescRev->getNumDefs() == NumDefs);
assert(!DescRev || DescRev->getNumOperands() == NumOps);
// e64 version if available, -1 otherwise
int OpcodeE64 = AMDGPU::getVOPe64(Opcode);
- const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? 0 : &TII->get(OpcodeE64);
+ const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? nullptr : &TII->get(OpcodeE64);
+ int InputModifiers[3] = {0};
assert(!DescE64 || DescE64->getNumDefs() == NumDefs);
- assert(!DescE64 || DescE64->getNumOperands() == (NumOps + 4));
int32_t Immediate = Desc->getSize() == 4 ? 0 : -1;
bool HaveVSrc = false, HaveSSrc = false;
- // First figure out what we alread have in this instruction
+ // First figure out what we already have in this instruction.
for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
i != e && Op < NumOps; ++i, ++Op) {
@@ -1125,7 +1507,7 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
}
}
- // If we neither have VSrc nor SSrc it makes no sense to continue
+ // If we neither have VSrc nor SSrc, it makes no sense to continue.
if (!HaveVSrc && !HaveSSrc)
return Node;
@@ -1141,20 +1523,28 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
const SDValue &Operand = Node->getOperand(i);
Ops.push_back(Operand);
- // Already folded immediate ?
+ // Already folded immediate?
if (isa<ConstantSDNode>(Operand.getNode()) ||
isa<ConstantFPSDNode>(Operand.getNode()))
continue;
- // Is this a VSrc or SSrc operand ?
+ // Is this a VSrc or SSrc operand?
unsigned RegClass = Desc->OpInfo[Op].RegClass;
if (isVSrc(RegClass) || isSSrc(RegClass)) {
// Try to fold the immediates
if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) {
- // Folding didn't worked, make sure we don't hit the SReg limit
+ // Folding didn't work, make sure we don't hit the SReg limit.
ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed);
}
continue;
+ } else {
+ // If it's not a VSrc or SSrc operand check if we have a GlobalAddress.
+ // These will be lowered to immediates, so we will need to insert a MOV.
+ if (isa<GlobalAddressSDNode>(Ops[i])) {
+ SDNode *Node = DAG.getMachineNode(AMDGPU::V_MOV_B32_e32, SDLoc(),
+ Operand.getValueType(), Operand);
+ Ops[i] = SDValue(Node, 0);
+ }
}
if (i == 1 && DescRev && fitsRegClass(DAG, Ops[0], RegClass)) {
@@ -1168,18 +1558,18 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
fitsRegClass(DAG, Ops[1], OtherRegClass))) {
// Swap commutable operands
- SDValue Tmp = Ops[1];
- Ops[1] = Ops[0];
- Ops[0] = Tmp;
+ std::swap(Ops[0], Ops[1]);
Desc = DescRev;
- DescRev = 0;
+ DescRev = nullptr;
continue;
}
}
- if (DescE64 && !Immediate) {
+ if (Immediate)
+ continue;
+ if (DescE64) {
// Test if it makes sense to switch to e64 encoding
unsigned OtherRegClass = DescE64->OpInfo[Op].RegClass;
if (!isVSrc(OtherRegClass) && !isSSrc(OtherRegClass))
@@ -1194,14 +1584,46 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
Immediate = -1;
Promote2e64 = true;
Desc = DescE64;
- DescE64 = 0;
+ DescE64 = nullptr;
}
}
+
+ if (!DescE64 && !Promote2e64)
+ continue;
+ if (!Operand.isMachineOpcode())
+ continue;
+ if (Operand.getMachineOpcode() == AMDGPU::FNEG_SI) {
+ Ops.pop_back();
+ Ops.push_back(Operand.getOperand(0));
+ InputModifiers[i] = 1;
+ Promote2e64 = true;
+ if (!DescE64)
+ continue;
+ Desc = DescE64;
+ DescE64 = nullptr;
+ }
+ else if (Operand.getMachineOpcode() == AMDGPU::FABS_SI) {
+ Ops.pop_back();
+ Ops.push_back(Operand.getOperand(0));
+ InputModifiers[i] = 2;
+ Promote2e64 = true;
+ if (!DescE64)
+ continue;
+ Desc = DescE64;
+ DescE64 = nullptr;
+ }
}
if (Promote2e64) {
+ std::vector<SDValue> OldOps(Ops);
+ Ops.clear();
+ for (unsigned i = 0; i < OldOps.size(); ++i) {
+ // src_modifier
+ Ops.push_back(DAG.getTargetConstant(InputModifiers[i], MVT::i32));
+ Ops.push_back(OldOps[i]);
+ }
// Add the modifier flags while promoting
- for (unsigned i = 0; i < 4; ++i)
+ for (unsigned i = 0; i < 2; ++i)
Ops.push_back(DAG.getTargetConstant(0, MVT::i32));
}
@@ -1279,7 +1701,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32));
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
Ops.push_back(Node->getOperand(i));
- Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
+ Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
// If we only got one lane, replace it with a copy
// (if NewDmask has only one bit set...)
@@ -1311,7 +1733,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
}
}
-/// \brief Fold the instructions after slecting them
+/// \brief Fold the instructions after selecting them.
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
const SIInstrInfo *TII =
diff --git a/contrib/llvm/lib/Target/R600/SIISelLowering.h b/contrib/llvm/lib/Target/R600/SIISelLowering.h
index 9933ece..d106d4a 100644
--- a/contrib/llvm/lib/Target/R600/SIISelLowering.h
+++ b/contrib/llvm/lib/Target/R600/SIISelLowering.h
@@ -22,18 +22,22 @@ namespace llvm {
class SITargetLowering : public AMDGPUTargetLowering {
SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
- SDValue Chain, unsigned Offset) const;
+ SDValue Chain, unsigned Offset, bool Signed) const;
SDValue LowerSampleIntrinsic(unsigned Opcode, const SDValue &Op,
SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
+ SelectionDAG &DAG) const override;
+ SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
- SDValue ResourceDescriptorToi128(SDValue Op, SelectionDAG &DAG) const;
bool foldImm(SDValue &Operand, int32_t &Immediate,
bool &ScalarSlotUsed) const;
const TargetRegisterClass *getRegClassForNode(SelectionDAG &DAG,
@@ -47,31 +51,40 @@ class SITargetLowering : public AMDGPUTargetLowering {
void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
MachineSDNode *AdjustRegClass(MachineSDNode *N, SelectionDAG &DAG) const;
+ static SDValue performUCharToFloatCombine(SDNode *N,
+ DAGCombinerInfo &DCI);
+
public:
SITargetLowering(TargetMachine &tm);
- bool allowsUnalignedMemoryAccesses(EVT VT, bool *IsFast) const;
- virtual bool shouldSplitVectorElementType(EVT VT) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
+ bool *IsFast) const override;
+
+ TargetLoweringBase::LegalizeTypeAction
+ getPreferredVectorAction(EVT VT) const override;
+
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
- MachineBasicBlock * BB) const;
- virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
- virtual MVT getScalarShiftAmountTy(EVT VT) const;
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
- virtual void AdjustInstrPostInstrSelection(MachineInstr *MI,
- SDNode *Node) const;
+ MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
+ MachineBasicBlock * BB) const override;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
+ MVT getScalarShiftAmountTy(EVT VT) const override;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+ SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
+ void AdjustInstrPostInstrSelection(MachineInstr *MI,
+ SDNode *Node) const override;
int32_t analyzeImmediate(const SDNode *N) const;
SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC,
- unsigned Reg, EVT VT) const;
+ unsigned Reg, EVT VT) const override;
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/SIInsertWaits.cpp b/contrib/llvm/lib/Target/R600/SIInsertWaits.cpp
index 695ec40..7dfc31b 100644
--- a/contrib/llvm/lib/Target/R600/SIInsertWaits.cpp
+++ b/contrib/llvm/lib/Target/R600/SIInsertWaits.cpp
@@ -97,13 +97,13 @@ private:
public:
SIInsertWaits(TargetMachine &tm) :
MachineFunctionPass(ID),
- TII(0),
- TRI(0),
+ TII(nullptr),
+ TRI(nullptr),
ExpInstrTypesSeen(0) { }
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "SI insert wait instructions";
}
@@ -273,17 +273,17 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
continue;
NeedWait = true;
-
+
if (Ordered[i]) {
unsigned Value = LastIssued.Array[i] - Required.Array[i];
- // adjust the value to the real hardware posibilities
+ // Adjust the value to the real hardware possibilities.
Counts.Array[i] = std::min(Value, WaitCounts.Array[i]);
} else
Counts.Array[i] = 0;
- // Remember on what we have waited on
+ // Remember on what we have waited on.
WaitedOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
}
@@ -341,6 +341,8 @@ Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
return Result;
}
+// FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States"
+// around other non-memory instructions.
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
bool Changes = false;
diff --git a/contrib/llvm/lib/Target/R600/SIInstrFormats.td b/contrib/llvm/lib/Target/R600/SIInstrFormats.td
index 53ebaaf..00e69dd 100644
--- a/contrib/llvm/lib/Target/R600/SIInstrFormats.td
+++ b/contrib/llvm/lib/Target/R600/SIInstrFormats.td
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
- AMDGPUInst<outs, ins, asm, pattern> {
+ AMDGPUInst<outs, ins, asm, pattern>, PredicateControl {
field bits<1> VM_CNT = 0;
field bits<1> EXP_CNT = 0;
@@ -37,26 +37,35 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
let TSFlags{9} = SALU;
}
-class Enc32 <dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern> {
+class Enc32 {
field bits<32> Inst;
- let Size = 4;
+ int Size = 4;
}
-class Enc64 <dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern> {
+class Enc64 {
field bits<64> Inst;
- let Size = 8;
+ int Size = 8;
+}
+
+class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP3 = 1;
+
+ int Size = 8;
}
//===----------------------------------------------------------------------===//
// Scalar operations
//===----------------------------------------------------------------------===//
-class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32<outs, ins, asm, pattern> {
+class SOP1e <bits<8> op> : Enc32 {
bits<7> SDST;
bits<8> SSRC0;
@@ -65,16 +74,10 @@ class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{15-8} = op;
let Inst{22-16} = SDST;
let Inst{31-23} = 0x17d; //encoding;
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let SALU = 1;
}
-class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32 <outs, ins, asm, pattern> {
-
+class SOP2e <bits<7> op> : Enc32 {
+
bits<7> SDST;
bits<8> SSRC0;
bits<8> SSRC1;
@@ -84,15 +87,9 @@ class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{22-16} = SDST;
let Inst{29-23} = op;
let Inst{31-30} = 0x2; // encoding
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let SALU = 1;
}
-class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32<outs, ins, asm, pattern> {
+class SOPCe <bits<7> op> : Enc32 {
bits<8> SSRC0;
bits<8> SSRC1;
@@ -101,62 +98,90 @@ class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{15-8} = SSRC1;
let Inst{22-16} = op;
let Inst{31-23} = 0x17e;
-
- let DisableEncoding = "$dst";
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let SALU = 1;
}
-class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32 <outs, ins , asm, pattern> {
+class SOPKe <bits<5> op> : Enc32 {
bits <7> SDST;
bits <16> SIMM16;
-
+
let Inst{15-0} = SIMM16;
let Inst{22-16} = SDST;
let Inst{27-23} = op;
let Inst{31-28} = 0xb; //encoding
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let SALU = 1;
}
-class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern> : Enc32 <
- (outs),
- ins,
- asm,
- pattern > {
+class SOPPe <bits<7> op> : Enc32 {
- bits <16> SIMM16;
+ bits <16> simm16;
- let Inst{15-0} = SIMM16;
+ let Inst{15-0} = simm16;
let Inst{22-16} = op;
let Inst{31-23} = 0x17f; // encoding
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let SALU = 1;
}
-class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
- list<dag> pattern> : Enc32<outs, ins, asm, pattern> {
+class SMRDe <bits<5> op, bits<1> imm> : Enc32 {
bits<7> SDST;
bits<7> SBASE;
bits<8> OFFSET;
-
+
let Inst{7-0} = OFFSET;
let Inst{8} = imm;
let Inst{14-9} = SBASE{6-1};
let Inst{21-15} = SDST;
let Inst{26-22} = op;
let Inst{31-27} = 0x18; //encoding
+}
+
+class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI<outs, ins, asm, pattern>, SOP1e <op> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let SALU = 1;
+}
+
+class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern>, SOP2e<op> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let SALU = 1;
+}
+
+class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI<outs, ins, asm, pattern>, SOPCe <op> {
+
+ let DisableEncoding = "$dst";
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let SALU = 1;
+}
+
+class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins , asm, pattern>, SOPKe<op> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let SALU = 1;
+}
+
+class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern> :
+ InstSI <(outs), ins, asm, pattern >, SOPPe <op> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let SALU = 1;
+}
+
+class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
+ list<dag> pattern> : InstSI<outs, ins, asm, pattern>, SMRDe<op, imm> {
let LGKM_CNT = 1;
let SMRD = 1;
@@ -165,61 +190,47 @@ class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
//===----------------------------------------------------------------------===//
// Vector ALU operations
//===----------------------------------------------------------------------===//
-
-let Uses = [EXEC] in {
-class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32 <outs, ins, asm, pattern> {
+class VOP1e <bits<8> op> : Enc32 {
bits<8> VDST;
bits<9> SRC0;
-
+
let Inst{8-0} = SRC0;
let Inst{16-9} = op;
let Inst{24-17} = VDST;
let Inst{31-25} = 0x3f; //encoding
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
- let VOP1 = 1;
}
-class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32 <outs, ins, asm, pattern> {
+class VOP2e <bits<6> op> : Enc32 {
bits<8> VDST;
bits<9> SRC0;
bits<8> VSRC1;
-
+
let Inst{8-0} = SRC0;
let Inst{16-9} = VSRC1;
let Inst{24-17} = VDST;
let Inst{30-25} = op;
let Inst{31} = 0x0; //encoding
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
- let VOP2 = 1;
}
-class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc64 <outs, ins, asm, pattern> {
+class VOP3e <bits<9> op> : Enc64 {
bits<8> dst;
+ bits<2> src0_modifiers;
bits<9> src0;
+ bits<2> src1_modifiers;
bits<9> src1;
+ bits<2> src2_modifiers;
bits<9> src2;
- bits<3> abs;
bits<1> clamp;
bits<2> omod;
- bits<3> neg;
let Inst{7-0} = dst;
- let Inst{10-8} = abs;
+ let Inst{8} = src0_modifiers{1};
+ let Inst{9} = src1_modifiers{1};
+ let Inst{10} = src2_modifiers{1};
let Inst{11} = clamp;
let Inst{25-17} = op;
let Inst{31-26} = 0x34; //encoding
@@ -227,25 +238,22 @@ class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{49-41} = src1;
let Inst{58-50} = src2;
let Inst{60-59} = omod;
- let Inst{63-61} = neg;
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
- let VOP3 = 1;
+ let Inst{61} = src0_modifiers{0};
+ let Inst{62} = src1_modifiers{0};
+ let Inst{63} = src2_modifiers{0};
}
-class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc64 <outs, ins, asm, pattern> {
+class VOP3be <bits<9> op> : Enc64 {
bits<8> dst;
+ bits<2> src0_modifiers;
bits<9> src0;
+ bits<2> src1_modifiers;
bits<9> src1;
+ bits<2> src2_modifiers;
bits<9> src2;
bits<7> sdst;
bits<2> omod;
- bits<3> neg;
let Inst{7-0} = dst;
let Inst{14-8} = sdst;
@@ -255,17 +263,12 @@ class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{49-41} = src1;
let Inst{58-50} = src2;
let Inst{60-59} = omod;
- let Inst{63-61} = neg;
-
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let UseNamedOperandTable = 1;
- let VOP3 = 1;
+ let Inst{61} = src0_modifiers{0};
+ let Inst{62} = src1_modifiers{0};
+ let Inst{63} = src2_modifiers{0};
}
-class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
- Enc32 <(outs VCCReg:$dst), ins, asm, pattern> {
+class VOPCe <bits<8> op> : Enc32 {
bits<9> SRC0;
bits<8> VSRC1;
@@ -274,16 +277,9 @@ class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
let Inst{16-9} = VSRC1;
let Inst{24-17} = op;
let Inst{31-25} = 0x3e;
-
- let DisableEncoding = "$dst";
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let VOPC = 1;
}
-class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc32 <outs, ins, asm, pattern> {
+class VINTRPe <bits<2> op> : Enc32 {
bits<8> VDST;
bits<8> VSRC;
@@ -296,22 +292,9 @@ class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{17-16} = op;
let Inst{25-18} = VDST;
let Inst{31-26} = 0x32; // encoding
-
- let neverHasSideEffects = 1;
- let mayLoad = 1;
- let mayStore = 0;
}
-} // End Uses = [EXEC]
-
-//===----------------------------------------------------------------------===//
-// Vector I/O operations
-//===----------------------------------------------------------------------===//
-
-let Uses = [EXEC] in {
-
-class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc64 <outs, ins, asm, pattern> {
+class DSe <bits<8> op> : Enc64 {
bits<8> vdst;
bits<1> gds;
@@ -330,12 +313,9 @@ class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{47-40} = data0;
let Inst{55-48} = data1;
let Inst{63-56} = vdst;
-
- let LGKM_CNT = 1;
}
-class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc64<outs, ins, asm, pattern> {
+class MUBUFe <bits<7> op> : Enc64 {
bits<12> offset;
bits<1> offen;
@@ -364,15 +344,9 @@ class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{54} = slc;
let Inst{55} = tfe;
let Inst{63-56} = soffset;
-
- let VM_CNT = 1;
- let EXP_CNT = 1;
-
- let neverHasSideEffects = 1;
}
-class MTBUF <bits<3> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc64<outs, ins, asm, pattern> {
+class MTBUFe <bits<3> op> : Enc64 {
bits<8> VDATA;
bits<12> OFFSET;
@@ -403,15 +377,9 @@ class MTBUF <bits<3> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{54} = SLC;
let Inst{55} = TFE;
let Inst{63-56} = SOFFSET;
-
- let VM_CNT = 1;
- let EXP_CNT = 1;
-
- let neverHasSideEffects = 1;
}
-class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- Enc64 <outs, ins, asm, pattern> {
+class MIMGe <bits<7> op> : Enc64 {
bits<8> VDATA;
bits<4> DMASK;
@@ -424,7 +392,7 @@ class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
bits<1> SLC;
bits<8> VADDR;
bits<7> SRSRC;
- bits<7> SSAMP;
+ bits<7> SSAMP;
let Inst{11-8} = DMASK;
let Inst{12} = UNORM;
@@ -440,18 +408,9 @@ class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Inst{47-40} = VDATA;
let Inst{52-48} = SRSRC{6-2};
let Inst{57-53} = SSAMP{6-2};
-
- let VM_CNT = 1;
- let EXP_CNT = 1;
- let MIMG = 1;
}
-def EXP : Enc64<
- (outs),
- (ins i32imm:$en, i32imm:$tgt, i32imm:$compr, i32imm:$done, i32imm:$vm,
- VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3),
- "EXP $en, $tgt, $compr, $done, $vm, $src0, $src1, $src2, $src3",
- [] > {
+class EXPe : Enc64 {
bits<4> EN;
bits<6> TGT;
@@ -473,6 +432,102 @@ def EXP : Enc64<
let Inst{47-40} = VSRC1;
let Inst{55-48} = VSRC2;
let Inst{63-56} = VSRC3;
+}
+
+let Uses = [EXEC] in {
+
+class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern>, VOP1e<op> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP1 = 1;
+}
+
+class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern>, VOP2e<op> {
+
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP2 = 1;
+}
+
+class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ VOP3Common <outs, ins, asm, pattern>, VOP3e<op>;
+
+class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ VOP3Common <outs, ins, asm, pattern>, VOP3be<op>;
+
+class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
+ InstSI <(outs VCCReg:$dst), ins, asm, pattern>, VOPCe <op> {
+
+ let DisableEncoding = "$dst";
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOPC = 1;
+}
+
+class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern>, VINTRPe<op> {
+
+ let neverHasSideEffects = 1;
+ let mayLoad = 1;
+ let mayStore = 0;
+}
+
+} // End Uses = [EXEC]
+
+//===----------------------------------------------------------------------===//
+// Vector I/O operations
+//===----------------------------------------------------------------------===//
+
+let Uses = [EXEC] in {
+
+class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern> , DSe<op> {
+
+ let LGKM_CNT = 1;
+}
+
+class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI<outs, ins, asm, pattern>, MUBUFe <op> {
+
+ let VM_CNT = 1;
+ let EXP_CNT = 1;
+
+ let neverHasSideEffects = 1;
+ let UseNamedOperandTable = 1;
+}
+
+class MTBUF <bits<3> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI<outs, ins, asm, pattern>, MTBUFe <op> {
+
+ let VM_CNT = 1;
+ let EXP_CNT = 1;
+
+ let neverHasSideEffects = 1;
+}
+
+class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern>, MIMGe <op> {
+
+ let VM_CNT = 1;
+ let EXP_CNT = 1;
+ let MIMG = 1;
+}
+
+def EXP : InstSI<
+ (outs),
+ (ins i32imm:$en, i32imm:$tgt, i32imm:$compr, i32imm:$done, i32imm:$vm,
+ VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3),
+ "EXP $en, $tgt, $compr, $done, $vm, $src0, $src1, $src2, $src3",
+ [] >, EXPe {
let EXP_CNT = 1;
}
diff --git a/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp b/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp
index ab55c1b..51f4532 100644
--- a/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp
@@ -16,20 +16,17 @@
#include "SIInstrInfo.h"
#include "AMDGPUTargetMachine.h"
#include "SIDefines.h"
+#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
#include "llvm/MC/MCInstrDesc.h"
using namespace llvm;
-SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
- : AMDGPUInstrInfo(tm),
- RI(tm)
- { }
-
-const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
- return RI;
-}
+SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st)
+ : AMDGPUInstrInfo(st),
+ RI(st) { }
//===----------------------------------------------------------------------===//
// TargetInstrInfo callbacks
@@ -185,23 +182,226 @@ unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
return Opcode;
}
+void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill,
+ int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction *MF = MBB.getParent();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ DebugLoc DL = MBB.findDebugLoc(MI);
+ unsigned KillFlag = isKill ? RegState::Kill : 0;
+
+ if (RI.hasVGPRs(RC)) {
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Can't spill VGPR!");
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), AMDGPU::VGPR0)
+ .addReg(SrcReg);
+ } else if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
+ unsigned Lane = MFI->SpillTracker.reserveLanes(MRI, MF);
+ unsigned TgtReg = MFI->SpillTracker.LaneVGPR;
+
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32), TgtReg)
+ .addReg(SrcReg, KillFlag)
+ .addImm(Lane);
+ MFI->SpillTracker.addSpilledReg(FrameIndex, TgtReg, Lane);
+ } else if (RI.isSGPRClass(RC)) {
+ // We are only allowed to create one new instruction when spilling
+ // registers, so we need to use pseudo instruction for vector
+ // registers.
+ //
+ // Reserve a spot in the spill tracker for each sub-register of
+ // the vector register.
+ unsigned NumSubRegs = RC->getSize() / 4;
+ unsigned FirstLane = MFI->SpillTracker.reserveLanes(MRI, MF, NumSubRegs);
+ MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
+ FirstLane);
+
+ unsigned Opcode;
+ switch (RC->getSize() * 8) {
+ case 64: Opcode = AMDGPU::SI_SPILL_S64_SAVE; break;
+ case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break;
+ case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
+ case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
+ default: llvm_unreachable("Cannot spill register class");
+ }
+
+ BuildMI(MBB, MI, DL, get(Opcode), MFI->SpillTracker.LaneVGPR)
+ .addReg(SrcReg)
+ .addImm(FrameIndex);
+ } else {
+ llvm_unreachable("VGPR spilling not supported");
+ }
+}
+
+void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction *MF = MBB.getParent();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ if (RI.hasVGPRs(RC)) {
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ Ctx.emitError("SIInstrInfo::loadRegToStackSlot - Can't retrieve spilled VGPR!");
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
+ .addImm(0);
+ } else if (RI.isSGPRClass(RC)){
+ unsigned Opcode;
+ switch(RC->getSize() * 8) {
+ case 32: Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break;
+ case 64: Opcode = AMDGPU::SI_SPILL_S64_RESTORE; break;
+ case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break;
+ case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
+ case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
+ default: llvm_unreachable("Cannot spill register class");
+ }
+
+ SIMachineFunctionInfo::SpilledReg Spill =
+ MFI->SpillTracker.getSpilledReg(FrameIndex);
+
+ BuildMI(MBB, MI, DL, get(Opcode), DestReg)
+ .addReg(Spill.VGPR)
+ .addImm(FrameIndex);
+ } else {
+ llvm_unreachable("VGPR spilling not supported");
+ }
+}
+
+static unsigned getNumSubRegsForSpillOp(unsigned Op) {
+
+ switch (Op) {
+ case AMDGPU::SI_SPILL_S512_SAVE:
+ case AMDGPU::SI_SPILL_S512_RESTORE:
+ return 16;
+ case AMDGPU::SI_SPILL_S256_SAVE:
+ case AMDGPU::SI_SPILL_S256_RESTORE:
+ return 8;
+ case AMDGPU::SI_SPILL_S128_SAVE:
+ case AMDGPU::SI_SPILL_S128_RESTORE:
+ return 4;
+ case AMDGPU::SI_SPILL_S64_SAVE:
+ case AMDGPU::SI_SPILL_S64_RESTORE:
+ return 2;
+ case AMDGPU::SI_SPILL_S32_RESTORE:
+ return 1;
+ default: llvm_unreachable("Invalid spill opcode");
+ }
+}
+
+void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI,
+ int Count) const {
+ while (Count > 0) {
+ int Arg;
+ if (Count >= 8)
+ Arg = 7;
+ else
+ Arg = Count - 1;
+ Count -= 8;
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
+ .addImm(Arg);
+ }
+}
+
+bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+ SIMachineFunctionInfo *MFI =
+ MI->getParent()->getParent()->getInfo<SIMachineFunctionInfo>();
+ MachineBasicBlock &MBB = *MI->getParent();
+ DebugLoc DL = MBB.findDebugLoc(MI);
+ switch (MI->getOpcode()) {
+ default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
+
+ // SGPR register spill
+ case AMDGPU::SI_SPILL_S512_SAVE:
+ case AMDGPU::SI_SPILL_S256_SAVE:
+ case AMDGPU::SI_SPILL_S128_SAVE:
+ case AMDGPU::SI_SPILL_S64_SAVE: {
+ unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+ unsigned FrameIndex = MI->getOperand(2).getImm();
+
+ for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
+ SIMachineFunctionInfo::SpilledReg Spill;
+ unsigned SubReg = RI.getPhysRegSubReg(MI->getOperand(1).getReg(),
+ &AMDGPU::SGPR_32RegClass, i);
+ Spill = MFI->SpillTracker.getSpilledReg(FrameIndex);
+
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
+ MI->getOperand(0).getReg())
+ .addReg(SubReg)
+ .addImm(Spill.Lane + i);
+ }
+ MI->eraseFromParent();
+ break;
+ }
+
+ // SGPR register restore
+ case AMDGPU::SI_SPILL_S512_RESTORE:
+ case AMDGPU::SI_SPILL_S256_RESTORE:
+ case AMDGPU::SI_SPILL_S128_RESTORE:
+ case AMDGPU::SI_SPILL_S64_RESTORE:
+ case AMDGPU::SI_SPILL_S32_RESTORE: {
+ unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+
+ for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
+ SIMachineFunctionInfo::SpilledReg Spill;
+ unsigned FrameIndex = MI->getOperand(2).getImm();
+ unsigned SubReg = RI.getPhysRegSubReg(MI->getOperand(0).getReg(),
+ &AMDGPU::SGPR_32RegClass, i);
+ Spill = MFI->SpillTracker.getSpilledReg(FrameIndex);
+
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), SubReg)
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(Spill.Lane + i);
+ }
+ insertNOPs(MI, 3);
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::SI_CONSTDATA_PTR: {
+ unsigned Reg = MI->getOperand(0).getReg();
+ unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
+ unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
+
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_GETPC_B64), Reg);
+
+ // Add 32-bit offset from this instruction to the start of the constant data.
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_ADD_I32), RegLo)
+ .addReg(RegLo)
+ .addTargetIndex(AMDGPU::TI_CONSTDATA_START)
+ .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit);
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_ADDC_U32), RegHi)
+ .addReg(RegHi)
+ .addImm(0)
+ .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit)
+ .addReg(AMDGPU::SCC, RegState::Implicit);
+ MI->eraseFromParent();
+ break;
+ }
+ }
+ return true;
+}
+
MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
bool NewMI) const {
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
- return 0;
+ return nullptr;
// Cannot commute VOP2 if src0 is SGPR.
if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
- return 0;
+ return nullptr;
if (!MI->getOperand(2).isReg()) {
// XXX: Commute instructions with FPImm operands
if (NewMI || MI->getOperand(2).isFPImm() ||
(!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
- return 0;
+ return nullptr;
}
// XXX: Commute VOP3 instructions with abs and neg set.
@@ -210,11 +410,13 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
AMDGPU::OpName::abs)).getImm() ||
MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::neg)).getImm()))
- return 0;
+ return nullptr;
unsigned Reg = MI->getOperand(1).getReg();
+ unsigned SubReg = MI->getOperand(1).getSubReg();
MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
MI->getOperand(2).ChangeToRegister(Reg, false);
+ MI->getOperand(2).setSubReg(SubReg);
} else {
MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
}
@@ -249,6 +451,30 @@ SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
return RC != &AMDGPU::EXECRegRegClass;
}
+bool
+SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
+ AliasAnalysis *AA) const {
+ switch(MI->getOpcode()) {
+ default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
+ case AMDGPU::S_MOV_B32:
+ case AMDGPU::S_MOV_B64:
+ case AMDGPU::V_MOV_B32_e32:
+ return MI->getOperand(1).isImm();
+ }
+}
+
+namespace llvm {
+namespace AMDGPU {
+// Helper function generated by tablegen. We are wrapping this with
+// an SIInstrInfo function that reutrns bool rather than int.
+int isDS(uint16_t Opcode);
+}
+}
+
+bool SIInstrInfo::isDS(uint16_t Opcode) const {
+ return ::AMDGPU::isDS(Opcode) != -1;
+}
+
int SIInstrInfo::isMIMG(uint16_t Opcode) const {
return get(Opcode).TSFlags & SIInstrFlags::MIMG;
}
@@ -277,21 +503,40 @@ bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
}
+bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
+ int32_t Val = Imm.getSExtValue();
+ if (Val >= -16 && Val <= 64)
+ return true;
+
+ // The actual type of the operand does not seem to matter as long
+ // as the bits match one of the inline immediate values. For example:
+ //
+ // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
+ // so it is a legal inline immediate.
+ //
+ // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
+ // floating-point, so it is a legal inline immediate.
+
+ return (APInt::floatToBits(0.0f) == Imm) ||
+ (APInt::floatToBits(1.0f) == Imm) ||
+ (APInt::floatToBits(-1.0f) == Imm) ||
+ (APInt::floatToBits(0.5f) == Imm) ||
+ (APInt::floatToBits(-0.5f) == Imm) ||
+ (APInt::floatToBits(2.0f) == Imm) ||
+ (APInt::floatToBits(-2.0f) == Imm) ||
+ (APInt::floatToBits(4.0f) == Imm) ||
+ (APInt::floatToBits(-4.0f) == Imm);
+}
+
bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
- if(MO.isImm()) {
- return MO.getImm() >= -16 && MO.getImm() <= 64;
- }
+ if (MO.isImm())
+ return isInlineConstant(APInt(32, MO.getImm(), true));
+
if (MO.isFPImm()) {
- return MO.getFPImm()->isExactlyValue(0.0) ||
- MO.getFPImm()->isExactlyValue(0.5) ||
- MO.getFPImm()->isExactlyValue(-0.5) ||
- MO.getFPImm()->isExactlyValue(1.0) ||
- MO.getFPImm()->isExactlyValue(-1.0) ||
- MO.getFPImm()->isExactlyValue(2.0) ||
- MO.getFPImm()->isExactlyValue(-2.0) ||
- MO.getFPImm()->isExactlyValue(4.0) ||
- MO.getFPImm()->isExactlyValue(-4.0);
+ APFloat FpImm = MO.getFPImm()->getValueAPF();
+ return isInlineConstant(FpImm.bitcastToAPInt());
}
+
return false;
}
@@ -299,6 +544,42 @@ bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
}
+static bool compareMachineOp(const MachineOperand &Op0,
+ const MachineOperand &Op1) {
+ if (Op0.getType() != Op1.getType())
+ return false;
+
+ switch (Op0.getType()) {
+ case MachineOperand::MO_Register:
+ return Op0.getReg() == Op1.getReg();
+ case MachineOperand::MO_Immediate:
+ return Op0.getImm() == Op1.getImm();
+ case MachineOperand::MO_FPImmediate:
+ return Op0.getFPImm() == Op1.getFPImm();
+ default:
+ llvm_unreachable("Didn't expect to be comparing these operand types");
+ }
+}
+
+bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
+ const MachineOperand &MO) const {
+ const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo];
+
+ assert(MO.isImm() || MO.isFPImm());
+
+ if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
+ return true;
+
+ if (OpInfo.RegClass < 0)
+ return false;
+
+ return RI.regClassCanUseImmediate(OpInfo.RegClass);
+}
+
+bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
+ return AMDGPU::getVOPe32(Opcode) != -1;
+}
+
bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
StringRef &ErrInfo) const {
uint16_t Opcode = MI->getOpcode();
@@ -306,6 +587,58 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
+ // Make sure the number of operands is correct.
+ const MCInstrDesc &Desc = get(Opcode);
+ if (!Desc.isVariadic() &&
+ Desc.getNumOperands() != MI->getNumExplicitOperands()) {
+ ErrInfo = "Instruction has wrong number of operands.";
+ return false;
+ }
+
+ // Make sure the register classes are correct
+ for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
+ switch (Desc.OpInfo[i].OperandType) {
+ case MCOI::OPERAND_REGISTER: {
+ int RegClass = Desc.OpInfo[i].RegClass;
+ if (!RI.regClassCanUseImmediate(RegClass) &&
+ (MI->getOperand(i).isImm() || MI->getOperand(i).isFPImm())) {
+ ErrInfo = "Expected register, but got immediate";
+ return false;
+ }
+ }
+ break;
+ case MCOI::OPERAND_IMMEDIATE:
+ // Check if this operand is an immediate.
+ // FrameIndex operands will be replaced by immediates, so they are
+ // allowed.
+ if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm() &&
+ !MI->getOperand(i).isFI()) {
+ ErrInfo = "Expected immediate, but got non-immediate";
+ return false;
+ }
+ // Fall-through
+ default:
+ continue;
+ }
+
+ if (!MI->getOperand(i).isReg())
+ continue;
+
+ int RegClass = Desc.OpInfo[i].RegClass;
+ if (RegClass != -1) {
+ unsigned Reg = MI->getOperand(i).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = RI.getRegClass(RegClass);
+ if (!RC->contains(Reg)) {
+ ErrInfo = "Operand has incorrect register class.";
+ return false;
+ }
+ }
+ }
+
+
// Verify VOP*
if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
unsigned ConstantBusCount = 0;
@@ -364,6 +697,24 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
return false;
}
}
+
+ // Verify misc. restrictions on specific instructions.
+ if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
+ Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
+ MI->dump();
+
+ const MachineOperand &Src0 = MI->getOperand(2);
+ const MachineOperand &Src1 = MI->getOperand(3);
+ const MachineOperand &Src2 = MI->getOperand(4);
+ if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
+ if (!compareMachineOp(Src0, Src1) &&
+ !compareMachineOp(Src0, Src2)) {
+ ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
+ return false;
+ }
+ }
+ }
+
return true;
}
@@ -373,16 +724,49 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
case AMDGPU::COPY: return AMDGPU::COPY;
case AMDGPU::PHI: return AMDGPU::PHI;
+ case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
+ case AMDGPU::S_MOV_B32:
+ return MI.getOperand(1).isReg() ?
+ AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
+ case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
+ case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
+ case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
+ case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
+ case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
+ case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
+ case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
+ case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
+ case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
+ case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
+ case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
+ case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
+ case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
+ case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
+ case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
+ case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
+ case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
+ case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
+ case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
+ case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
+ case AMDGPU::S_LOAD_DWORD_IMM:
+ case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
+ case AMDGPU::S_LOAD_DWORDX2_IMM:
+ case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
+ case AMDGPU::S_LOAD_DWORDX4_IMM:
+ case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
+ case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e32;
+ case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
+ case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
}
}
@@ -406,6 +790,8 @@ bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
switch (MI.getOpcode()) {
case AMDGPU::COPY:
case AMDGPU::REG_SEQUENCE:
+ case AMDGPU::PHI:
+ case AMDGPU::INSERT_SUBREG:
return RI.hasVGPRs(getOpRegClass(MI, 0));
default:
return RI.hasVGPRs(getOpRegClass(MI, OpNo));
@@ -432,6 +818,84 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
MO.ChangeToRegister(Reg, false);
}
+unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
+ MachineRegisterInfo &MRI,
+ MachineOperand &SuperReg,
+ const TargetRegisterClass *SuperRC,
+ unsigned SubIdx,
+ const TargetRegisterClass *SubRC)
+ const {
+ assert(SuperReg.isReg());
+
+ unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
+ unsigned SubReg = MRI.createVirtualRegister(SubRC);
+
+ // Just in case the super register is itself a sub-register, copy it to a new
+ // value so we don't need to worry about merging its subreg index with the
+ // SubIdx passed to this function. The register coalescer should be able to
+ // eliminate this extra copy.
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
+ NewSuperReg)
+ .addOperand(SuperReg);
+
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
+ SubReg)
+ .addReg(NewSuperReg, 0, SubIdx);
+ return SubReg;
+}
+
+MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
+ MachineBasicBlock::iterator MII,
+ MachineRegisterInfo &MRI,
+ MachineOperand &Op,
+ const TargetRegisterClass *SuperRC,
+ unsigned SubIdx,
+ const TargetRegisterClass *SubRC) const {
+ if (Op.isImm()) {
+ // XXX - Is there a better way to do this?
+ if (SubIdx == AMDGPU::sub0)
+ return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
+ if (SubIdx == AMDGPU::sub1)
+ return MachineOperand::CreateImm(Op.getImm() >> 32);
+
+ llvm_unreachable("Unhandled register index for immediate");
+ }
+
+ unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
+ SubIdx, SubRC);
+ return MachineOperand::CreateReg(SubReg, false);
+}
+
+unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineBasicBlock::iterator MI,
+ MachineRegisterInfo &MRI,
+ const TargetRegisterClass *RC,
+ const MachineOperand &Op) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned Dst = MRI.createVirtualRegister(RC);
+
+ MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
+ LoDst)
+ .addImm(Op.getImm() & 0xFFFFFFFF);
+ MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
+ HiDst)
+ .addImm(Op.getImm() >> 32);
+
+ BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
+ .addReg(LoDst)
+ .addImm(AMDGPU::sub0)
+ .addReg(HiDst)
+ .addImm(AMDGPU::sub1);
+
+ Worklist.push_back(Lo);
+ Worklist.push_back(Hi);
+
+ return Dst;
+}
+
void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
@@ -506,11 +970,12 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
}
}
- // Legalize REG_SEQUENCE
+ // Legalize REG_SEQUENCE and PHI
// The register class of the operands much be the same type as the register
// class of the output.
- if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
- const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
+ if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
+ MI->getOpcode() == AMDGPU::PHI) {
+ const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
if (!MI->getOperand(i).isReg() ||
!TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
@@ -543,12 +1008,209 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
!TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
continue;
unsigned DstReg = MRI.createVirtualRegister(RC);
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ MachineBasicBlock *InsertBB;
+ MachineBasicBlock::iterator Insert;
+ if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
+ InsertBB = MI->getParent();
+ Insert = MI;
+ } else {
+ // MI is a PHI instruction.
+ InsertBB = MI->getOperand(i + 1).getMBB();
+ Insert = InsertBB->getFirstTerminator();
+ }
+ BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
get(AMDGPU::COPY), DstReg)
.addOperand(MI->getOperand(i));
MI->getOperand(i).setReg(DstReg);
}
}
+
+ // Legalize INSERT_SUBREG
+ // src0 must have the same register class as dst
+ if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
+ unsigned Dst = MI->getOperand(0).getReg();
+ unsigned Src0 = MI->getOperand(1).getReg();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+ const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
+ if (DstRC != Src0RC) {
+ MachineBasicBlock &MBB = *MI->getParent();
+ unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
+ .addReg(Src0);
+ MI->getOperand(1).setReg(NewSrc0);
+ }
+ return;
+ }
+
+ // Legalize MUBUF* instructions
+ // FIXME: If we start using the non-addr64 instructions for compute, we
+ // may need to legalize them here.
+
+ int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::srsrc);
+ int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::vaddr);
+ if (SRsrcIdx != -1 && VAddrIdx != -1) {
+ const TargetRegisterClass *VAddrRC =
+ RI.getRegClass(get(MI->getOpcode()).OpInfo[VAddrIdx].RegClass);
+
+ if(VAddrRC->getSize() == 8 &&
+ MRI.getRegClass(MI->getOperand(SRsrcIdx).getReg()) != VAddrRC) {
+ // We have a MUBUF instruction that uses a 64-bit vaddr register and
+ // srsrc has the incorrect register class. In order to fix this, we
+ // need to extract the pointer from the resource descriptor (srsrc),
+ // add it to the value of vadd, then store the result in the vaddr
+ // operand. Then, we need to set the pointer field of the resource
+ // descriptor to zero.
+
+ MachineBasicBlock &MBB = *MI->getParent();
+ MachineOperand &SRsrcOp = MI->getOperand(SRsrcIdx);
+ MachineOperand &VAddrOp = MI->getOperand(VAddrIdx);
+ unsigned SRsrcPtrLo, SRsrcPtrHi, VAddrLo, VAddrHi;
+ unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
+ unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
+
+ // SRsrcPtrLo = srsrc:sub0
+ SRsrcPtrLo = buildExtractSubReg(MI, MRI, SRsrcOp,
+ &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
+
+ // SRsrcPtrHi = srsrc:sub1
+ SRsrcPtrHi = buildExtractSubReg(MI, MRI, SRsrcOp,
+ &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
+
+ // VAddrLo = vaddr:sub0
+ VAddrLo = buildExtractSubReg(MI, MRI, VAddrOp,
+ &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
+
+ // VAddrHi = vaddr:sub1
+ VAddrHi = buildExtractSubReg(MI, MRI, VAddrOp,
+ &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
+
+ // NewVaddrLo = SRsrcPtrLo + VAddrLo
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
+ NewVAddrLo)
+ .addReg(SRsrcPtrLo)
+ .addReg(VAddrLo)
+ .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
+
+ // NewVaddrHi = SRsrcPtrHi + VAddrHi
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
+ NewVAddrHi)
+ .addReg(SRsrcPtrHi)
+ .addReg(VAddrHi)
+ .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
+ .addReg(AMDGPU::VCC, RegState::Implicit);
+
+ // NewVaddr = {NewVaddrHi, NewVaddrLo}
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
+ NewVAddr)
+ .addReg(NewVAddrLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(NewVAddrHi)
+ .addImm(AMDGPU::sub1);
+
+ // Zero64 = 0
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
+ Zero64)
+ .addImm(0);
+
+ // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
+ SRsrcFormatLo)
+ .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
+
+ // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
+ SRsrcFormatHi)
+ .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
+
+ // NewSRsrc = {Zero64, SRsrcFormat}
+ BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
+ NewSRsrc)
+ .addReg(Zero64)
+ .addImm(AMDGPU::sub0_sub1)
+ .addReg(SRsrcFormatLo)
+ .addImm(AMDGPU::sub2)
+ .addReg(SRsrcFormatHi)
+ .addImm(AMDGPU::sub3);
+
+ // Update the instruction to use NewVaddr
+ MI->getOperand(VAddrIdx).setReg(NewVAddr);
+ // Update the instruction to use NewSRsrc
+ MI->getOperand(SRsrcIdx).setReg(NewSRsrc);
+ }
+ }
+}
+
+void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ switch (MI->getOpcode()) {
+ case AMDGPU::S_LOAD_DWORD_IMM:
+ case AMDGPU::S_LOAD_DWORD_SGPR:
+ case AMDGPU::S_LOAD_DWORDX2_IMM:
+ case AMDGPU::S_LOAD_DWORDX2_SGPR:
+ case AMDGPU::S_LOAD_DWORDX4_IMM:
+ case AMDGPU::S_LOAD_DWORDX4_SGPR:
+ unsigned NewOpcode = getVALUOp(*MI);
+ unsigned RegOffset;
+ unsigned ImmOffset;
+
+ if (MI->getOperand(2).isReg()) {
+ RegOffset = MI->getOperand(2).getReg();
+ ImmOffset = 0;
+ } else {
+ assert(MI->getOperand(2).isImm());
+ // SMRD instructions take a dword offsets and MUBUF instructions
+ // take a byte offset.
+ ImmOffset = MI->getOperand(2).getImm() << 2;
+ RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ if (isUInt<12>(ImmOffset)) {
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
+ RegOffset)
+ .addImm(0);
+ } else {
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
+ RegOffset)
+ .addImm(ImmOffset);
+ ImmOffset = 0;
+ }
+ }
+
+ unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
+ unsigned DWord0 = RegOffset;
+ unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
+ .addImm(0);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
+ .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
+ .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
+ .addReg(DWord0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DWord1)
+ .addImm(AMDGPU::sub1)
+ .addReg(DWord2)
+ .addImm(AMDGPU::sub2)
+ .addReg(DWord3)
+ .addImm(AMDGPU::sub3);
+ MI->setDesc(get(NewOpcode));
+ if (MI->getOperand(2).isReg()) {
+ MI->getOperand(2).setReg(MI->getOperand(1).getReg());
+ } else {
+ MI->getOperand(2).ChangeToRegister(MI->getOperand(1).getReg(), false);
+ }
+ MI->getOperand(1).setReg(SRsrc);
+ MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset));
+ }
}
void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
@@ -557,11 +1219,80 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
while (!Worklist.empty()) {
MachineInstr *Inst = Worklist.pop_back_val();
+ MachineBasicBlock *MBB = Inst->getParent();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+
+ unsigned Opcode = Inst->getOpcode();
unsigned NewOpcode = getVALUOp(*Inst);
- if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
+
+ // Handle some special cases
+ switch (Opcode) {
+ default:
+ if (isSMRD(Inst->getOpcode())) {
+ moveSMRDToVALU(Inst, MRI);
+ }
+ break;
+ case AMDGPU::S_MOV_B64: {
+ DebugLoc DL = Inst->getDebugLoc();
+
+ // If the source operand is a register we can replace this with a
+ // copy.
+ if (Inst->getOperand(1).isReg()) {
+ MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
+ .addOperand(Inst->getOperand(0))
+ .addOperand(Inst->getOperand(1));
+ Worklist.push_back(Copy);
+ } else {
+ // Otherwise, we need to split this into two movs, because there is
+ // no 64-bit VALU move instruction.
+ unsigned Reg = Inst->getOperand(0).getReg();
+ unsigned Dst = split64BitImm(Worklist,
+ Inst,
+ MRI,
+ MRI.getRegClass(Reg),
+ Inst->getOperand(1));
+ MRI.replaceRegWith(Reg, Dst);
+ }
+ Inst->eraseFromParent();
+ continue;
+ }
+ case AMDGPU::S_AND_B64:
+ splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32);
+ Inst->eraseFromParent();
+ continue;
+
+ case AMDGPU::S_OR_B64:
+ splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32);
+ Inst->eraseFromParent();
+ continue;
+
+ case AMDGPU::S_XOR_B64:
+ splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32);
+ Inst->eraseFromParent();
continue;
- MachineRegisterInfo &MRI = Inst->getParent()->getParent()->getRegInfo();
+ case AMDGPU::S_NOT_B64:
+ splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
+ Inst->eraseFromParent();
+ continue;
+
+ case AMDGPU::S_BCNT1_I32_B64:
+ splitScalar64BitBCNT(Worklist, Inst);
+ Inst->eraseFromParent();
+ continue;
+
+ case AMDGPU::S_BFE_U64:
+ case AMDGPU::S_BFE_I64:
+ case AMDGPU::S_BFM_B64:
+ llvm_unreachable("Moving this op to VALU not implemented");
+ }
+
+ if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
+ // We cannot move this instruction to the VALU, so we should try to
+ // legalize its operands instead.
+ legalizeOperands(Inst);
+ continue;
+ }
// Use the new VALU Opcode.
const MCInstrDesc &NewDesc = get(NewOpcode);
@@ -576,27 +1307,56 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
Inst->RemoveOperand(i);
}
- // Add the implict and explicit register definitions.
- if (NewDesc.ImplicitUses) {
- for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
- unsigned Reg = NewDesc.ImplicitUses[i];
- Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
- }
+ if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
+ // We are converting these to a BFE, so we need to add the missing
+ // operands for the size and offset.
+ unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
+ Inst->addOperand(Inst->getOperand(1));
+ Inst->getOperand(1).ChangeToImmediate(0);
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(Size));
+
+ // XXX - Other pointless operands. There are 4, but it seems you only need
+ // 3 to not hit an assertion later in MCInstLower.
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
+ // The VALU version adds the second operand to the result, so insert an
+ // extra 0 operand.
+ Inst->addOperand(MachineOperand::CreateImm(0));
}
- if (NewDesc.ImplicitDefs) {
- for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
- unsigned Reg = NewDesc.ImplicitDefs[i];
- Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
- }
+ addDescImplicitUseDef(NewDesc, Inst);
+
+ if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
+ const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
+ // If we need to move this to VGPRs, we need to unpack the second operand
+ // back into the 2 separate ones for bit offset and width.
+ assert(OffsetWidthOp.isImm() &&
+ "Scalar BFE is only implemented for constant width and offset");
+ uint32_t Imm = OffsetWidthOp.getImm();
+
+ uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
+ uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
+
+ Inst->RemoveOperand(2); // Remove old immediate.
+ Inst->addOperand(Inst->getOperand(1));
+ Inst->getOperand(1).ChangeToImmediate(0);
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(Offset));
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(BitWidth));
+ Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst->addOperand(MachineOperand::CreateImm(0));
}
- legalizeOperands(Inst);
-
// Update the destination register class.
+
const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
- switch (Inst->getOpcode()) {
+ switch (Opcode) {
// For target instructions, getOpRegClass just returns the virtual
// register class associated with the operand, so we need to find an
// equivalent VGPR register class in order to move the instruction to the
@@ -604,6 +1364,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
case AMDGPU::COPY:
case AMDGPU::PHI:
case AMDGPU::REG_SEQUENCE:
+ case AMDGPU::INSERT_SUBREG:
if (RI.hasVGPRs(NewDstRC))
continue;
NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
@@ -618,9 +1379,12 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
MRI.replaceRegWith(DstReg, NewDstReg);
+ // Legalize the operands
+ legalizeOperands(Inst);
+
for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
E = MRI.use_end(); I != E; ++I) {
- MachineInstr &UseMI = *I;
+ MachineInstr &UseMI = *I->getParent();
if (!canReadVGPR(UseMI, I.getOperandNo())) {
Worklist.push_back(&UseMI);
}
@@ -642,6 +1406,180 @@ const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
return &AMDGPU::VReg_32RegClass;
}
+void SIInstrInfo::splitScalar64BitUnaryOp(
+ SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineInstr *Inst,
+ unsigned Opcode) const {
+ MachineBasicBlock &MBB = *Inst->getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ MachineOperand &Dest = Inst->getOperand(0);
+ MachineOperand &Src0 = Inst->getOperand(1);
+ DebugLoc DL = Inst->getDebugLoc();
+
+ MachineBasicBlock::iterator MII = Inst;
+
+ const MCInstrDesc &InstDesc = get(Opcode);
+ const TargetRegisterClass *Src0RC = Src0.isReg() ?
+ MRI.getRegClass(Src0.getReg()) :
+ &AMDGPU::SGPR_32RegClass;
+
+ const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
+
+ MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+ AMDGPU::sub0, Src0SubRC);
+
+ const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
+ const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
+
+ unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
+ MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
+ .addOperand(SrcReg0Sub0);
+
+ MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+ AMDGPU::sub1, Src0SubRC);
+
+ unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
+ MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
+ .addOperand(SrcReg0Sub1);
+
+ unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
+ BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+
+ MRI.replaceRegWith(Dest.getReg(), FullDestReg);
+
+ // Try to legalize the operands in case we need to swap the order to keep it
+ // valid.
+ Worklist.push_back(LoHalf);
+ Worklist.push_back(HiHalf);
+}
+
+void SIInstrInfo::splitScalar64BitBinaryOp(
+ SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineInstr *Inst,
+ unsigned Opcode) const {
+ MachineBasicBlock &MBB = *Inst->getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ MachineOperand &Dest = Inst->getOperand(0);
+ MachineOperand &Src0 = Inst->getOperand(1);
+ MachineOperand &Src1 = Inst->getOperand(2);
+ DebugLoc DL = Inst->getDebugLoc();
+
+ MachineBasicBlock::iterator MII = Inst;
+
+ const MCInstrDesc &InstDesc = get(Opcode);
+ const TargetRegisterClass *Src0RC = Src0.isReg() ?
+ MRI.getRegClass(Src0.getReg()) :
+ &AMDGPU::SGPR_32RegClass;
+
+ const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
+ const TargetRegisterClass *Src1RC = Src1.isReg() ?
+ MRI.getRegClass(Src1.getReg()) :
+ &AMDGPU::SGPR_32RegClass;
+
+ const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
+
+ MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+ AMDGPU::sub0, Src0SubRC);
+ MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
+ AMDGPU::sub0, Src1SubRC);
+
+ const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
+ const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
+
+ unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
+ MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
+ .addOperand(SrcReg0Sub0)
+ .addOperand(SrcReg1Sub0);
+
+ MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+ AMDGPU::sub1, Src0SubRC);
+ MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
+ AMDGPU::sub1, Src1SubRC);
+
+ unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
+ MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
+ .addOperand(SrcReg0Sub1)
+ .addOperand(SrcReg1Sub1);
+
+ unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
+ BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+
+ MRI.replaceRegWith(Dest.getReg(), FullDestReg);
+
+ // Try to legalize the operands in case we need to swap the order to keep it
+ // valid.
+ Worklist.push_back(LoHalf);
+ Worklist.push_back(HiHalf);
+}
+
+void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineInstr *Inst) const {
+ MachineBasicBlock &MBB = *Inst->getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ MachineBasicBlock::iterator MII = Inst;
+ DebugLoc DL = Inst->getDebugLoc();
+
+ MachineOperand &Dest = Inst->getOperand(0);
+ MachineOperand &Src = Inst->getOperand(1);
+
+ const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e32);
+ const TargetRegisterClass *SrcRC = Src.isReg() ?
+ MRI.getRegClass(Src.getReg()) :
+ &AMDGPU::SGPR_32RegClass;
+
+ unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+
+ const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
+
+ MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
+ AMDGPU::sub0, SrcSubRC);
+ MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
+ AMDGPU::sub1, SrcSubRC);
+
+ MachineInstr *First = BuildMI(MBB, MII, DL, InstDesc, MidReg)
+ .addOperand(SrcRegSub0)
+ .addImm(0);
+
+ MachineInstr *Second = BuildMI(MBB, MII, DL, InstDesc, ResultReg)
+ .addOperand(SrcRegSub1)
+ .addReg(MidReg);
+
+ MRI.replaceRegWith(Dest.getReg(), ResultReg);
+
+ Worklist.push_back(First);
+ Worklist.push_back(Second);
+}
+
+void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
+ MachineInstr *Inst) const {
+ // Add the implict and explicit register definitions.
+ if (NewDesc.ImplicitUses) {
+ for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
+ unsigned Reg = NewDesc.ImplicitUses[i];
+ Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
+ }
+ }
+
+ if (NewDesc.ImplicitDefs) {
+ for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
+ unsigned Reg = NewDesc.ImplicitDefs[i];
+ Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
+ }
+ }
+}
+
MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
@@ -705,3 +1643,12 @@ void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
}
+
+const MachineOperand *SIInstrInfo::getNamedOperand(const MachineInstr& MI,
+ unsigned OperandName) const {
+ int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
+ if (Idx == -1)
+ return nullptr;
+
+ return &MI.getOperand(Idx);
+}
diff --git a/contrib/llvm/lib/Target/R600/SIInstrInfo.h b/contrib/llvm/lib/Target/R600/SIInstrInfo.h
index 4af6348..4687539 100644
--- a/contrib/llvm/lib/Target/R600/SIInstrInfo.h
+++ b/contrib/llvm/lib/Target/R600/SIInstrInfo.h
@@ -25,50 +25,100 @@ class SIInstrInfo : public AMDGPUInstrInfo {
private:
const SIRegisterInfo RI;
- MachineInstrBuilder buildIndirectIndexLoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned OffsetVGPR,
- unsigned MovRelOp,
- unsigned Dst,
- unsigned Src0) const;
- // If you add or remove instructions from this function, you will
+ unsigned buildExtractSubReg(MachineBasicBlock::iterator MI,
+ MachineRegisterInfo &MRI,
+ MachineOperand &SuperReg,
+ const TargetRegisterClass *SuperRC,
+ unsigned SubIdx,
+ const TargetRegisterClass *SubRC) const;
+ MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI,
+ MachineRegisterInfo &MRI,
+ MachineOperand &SuperReg,
+ const TargetRegisterClass *SuperRC,
+ unsigned SubIdx,
+ const TargetRegisterClass *SubRC) const;
+
+ unsigned split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineBasicBlock::iterator MI,
+ MachineRegisterInfo &MRI,
+ const TargetRegisterClass *RC,
+ const MachineOperand &Op) const;
+
+ void splitScalar64BitUnaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineInstr *Inst, unsigned Opcode) const;
+
+ void splitScalar64BitBinaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineInstr *Inst, unsigned Opcode) const;
+
+ void splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
+ MachineInstr *Inst) const;
+
+ void addDescImplicitUseDef(const MCInstrDesc &Desc, MachineInstr *MI) const;
public:
- explicit SIInstrInfo(AMDGPUTargetMachine &tm);
+ explicit SIInstrInfo(const AMDGPUSubtarget &st);
- const SIRegisterInfo &getRegisterInfo() const;
+ const SIRegisterInfo &getRegisterInfo() const override {
+ return RI;
+ }
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
unsigned commuteOpcode(unsigned Opcode) const;
- virtual MachineInstr *commuteInstruction(MachineInstr *MI,
- bool NewMI=false) const;
+ MachineInstr *commuteInstruction(MachineInstr *MI,
+ bool NewMI=false) const override;
+
+ bool isTriviallyReMaterializable(const MachineInstr *MI,
+ AliasAnalysis *AA = nullptr) const;
- virtual unsigned getIEQOpcode() const { assert(!"Implement"); return 0;}
MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg) const;
- virtual bool isMov(unsigned Opcode) const;
+ unsigned DstReg, unsigned SrcReg) const override;
+ bool isMov(unsigned Opcode) const override;
- virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
+ bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
+ bool isDS(uint16_t Opcode) const;
int isMIMG(uint16_t Opcode) const;
int isSMRD(uint16_t Opcode) const;
bool isVOP1(uint16_t Opcode) const;
bool isVOP2(uint16_t Opcode) const;
bool isVOP3(uint16_t Opcode) const;
bool isVOPC(uint16_t Opcode) const;
+ bool isInlineConstant(const APInt &Imm) const;
bool isInlineConstant(const MachineOperand &MO) const;
bool isLiteralConstant(const MachineOperand &MO) const;
- virtual bool verifyInstruction(const MachineInstr *MI,
- StringRef &ErrInfo) const;
+ bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
+ const MachineOperand &MO) const;
+
+ /// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
+ /// This function will return false if you pass it a 32-bit instruction.
+ bool hasVALU32BitEncoding(unsigned Opcode) const;
+
+ bool verifyInstruction(const MachineInstr *MI,
+ StringRef &ErrInfo) const override;
bool isSALUInstr(const MachineInstr &MI) const;
static unsigned getVALUOp(const MachineInstr &MI);
+
bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const;
/// \brief Return the correct register class for \p OpNo. For target-specific
@@ -98,39 +148,53 @@ public:
/// create new instruction and insert them before \p MI.
void legalizeOperands(MachineInstr *MI) const;
+ void moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const;
+
/// \brief Replace this instruction's opcode with the equivalent VALU
/// opcode. This function will also move the users of \p MI to the
/// VALU if necessary.
void moveToVALU(MachineInstr &MI) const;
- virtual unsigned calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const;
+ unsigned calculateIndirectAddress(unsigned RegIndex,
+ unsigned Channel) const override;
- virtual const TargetRegisterClass *getIndirectAddrRegClass() const;
+ const TargetRegisterClass *getIndirectAddrRegClass() const override;
- virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg,
- unsigned Address,
- unsigned OffsetReg) const;
+ MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg,
+ unsigned Address,
+ unsigned OffsetReg) const override;
- virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg,
- unsigned Address,
- unsigned OffsetReg) const;
+ MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg,
+ unsigned Address,
+ unsigned OffsetReg) const override;
void reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const;
void LoadM0(MachineInstr *MoveRel, MachineBasicBlock::iterator I,
unsigned SavReg, unsigned IndexReg) const;
+
+ void insertNOPs(MachineBasicBlock::iterator MI, int Count) const;
+
+ /// \brief Returns the operand named \p Op. If \p MI does not have an
+ /// operand named \c Op, this function returns nullptr.
+ const MachineOperand *getNamedOperand(const MachineInstr& MI,
+ unsigned OperandName) const;
};
namespace AMDGPU {
int getVOPe64(uint16_t Opcode);
+ int getVOPe32(uint16_t Opcode);
int getCommuteRev(uint16_t Opcode);
int getCommuteOrig(uint16_t Opcode);
+ int getMCOpcode(uint16_t Opcode, unsigned Gen);
+
+ const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
+ const uint64_t RSRC_TID_ENABLE = 1LL << 55;
} // End namespace AMDGPU
diff --git a/contrib/llvm/lib/Target/R600/SIInstrInfo.td b/contrib/llvm/lib/Target/R600/SIInstrInfo.td
index b7879c6..b0ac20f 100644
--- a/contrib/llvm/lib/Target/R600/SIInstrInfo.td
+++ b/contrib/llvm/lib/Target/R600/SIInstrInfo.td
@@ -7,23 +7,25 @@
//
//===----------------------------------------------------------------------===//
+// Execpt for the NONE field, this must be kept in sync with the SISubtarget enum
+// in AMDGPUMCInstLower.h
+def SISubtarget {
+ int NONE = -1;
+ int SI = 0;
+}
+
//===----------------------------------------------------------------------===//
// SI DAG Nodes
//===----------------------------------------------------------------------===//
-// SMRD takes a 64bit memory address and can only add an 32bit offset
-def SIadd64bit32bit : SDNode<"ISD::ADD",
- SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisVT<0, i64>, SDTCisVT<2, i32>]>
->;
-
def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT",
- SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, i128>, SDTCisVT<2, i32>]>,
+ SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i32>]>,
[SDNPMayLoad, SDNPMemOperand]
>;
def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
SDTypeProfile<0, 13,
- [SDTCisVT<0, i128>, // rsrc(SGPR)
+ [SDTCisVT<0, v4i32>, // rsrc(SGPR)
SDTCisVT<1, iAny>, // vdata(VGPR)
SDTCisVT<2, i32>, // num_channels(imm)
SDTCisVT<3, i32>, // vaddr(VGPR)
@@ -41,13 +43,13 @@ def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
>;
def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT",
- SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, i128>, SDTCisVT<2, i16>,
+ SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i16>,
SDTCisVT<3, i32>]>
>;
class SDSample<string opcode> : SDNode <opcode,
SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v32i8>,
- SDTCisVT<3, i128>, SDTCisVT<4, i32>]>
+ SDTCisVT<3, v4i32>, SDTCisVT<4, i32>]>
>;
def SIsample : SDSample<"AMDGPUISD::SAMPLE">;
@@ -55,6 +57,10 @@ def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">;
def SIsampled : SDSample<"AMDGPUISD::SAMPLED">;
def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">;
+def SIconstdata_ptr : SDNode<
+ "AMDGPUISD::CONST_DATA_PTR", SDTypeProfile <1, 0, [SDTCisVT<0, i64>]>
+>;
+
// Transformation function, extract the lower 32bit of a 64bit immediate
def LO32 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, MVT::i32);
@@ -75,15 +81,14 @@ def HI32f : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
}]>;
-def IMM8bitDWORD : ImmLeaf <
- i32, [{
- return (Imm & ~0x3FC) == 0;
- }], SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(
- N->getZExtValue() >> 2, MVT::i32);
- }]>
+def IMM8bitDWORD : PatLeaf <(imm),
+ [{return (N->getZExtValue() & ~0x3FC) == 0;}]
>;
+def as_dword_i32imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue() >> 2, MVT::i32);
+}]>;
+
def as_i1imm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i1);
}]>;
@@ -96,13 +101,33 @@ def as_i16imm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i16);
}]>;
+def as_i32imm: SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i32);
+}]>;
+
+def IMM8bit : PatLeaf <(imm),
+ [{return isUInt<8>(N->getZExtValue());}]
+>;
+
def IMM12bit : PatLeaf <(imm),
[{return isUInt<12>(N->getZExtValue());}]
>;
+def IMM16bit : PatLeaf <(imm),
+ [{return isUInt<16>(N->getZExtValue());}]
+>;
+
+def IMM32bit : PatLeaf <(imm),
+ [{return isUInt<32>(N->getZExtValue());}]
+>;
+
+def mubuf_vaddr_offset : PatFrag<
+ (ops node:$ptr, node:$offset, node:$imm_offset),
+ (add (add node:$ptr, node:$offset), node:$imm_offset)
+>;
+
class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
- return
- (*(const SITargetLowering *)getTargetLowering()).analyzeImmediate(N) == 0;
+ return isInlineImmediate(N);
}]>;
class SGPRImm <dag frag> : PatLeaf<frag, [{
@@ -121,10 +146,27 @@ class SGPRImm <dag frag> : PatLeaf<frag, [{
return false;
}]>;
-def FRAMEri64 : Operand<iPTR> {
- let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
+//===----------------------------------------------------------------------===//
+// Custom Operands
+//===----------------------------------------------------------------------===//
+
+def FRAMEri32 : Operand<iPTR> {
+ let MIOperandInfo = (ops i32:$ptr, i32imm:$index);
}
+def sopp_brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getSOPPBrEncoding";
+ let OperandType = "OPERAND_PCREL";
+}
+
+//===----------------------------------------------------------------------===//
+// Complex patterns
+//===----------------------------------------------------------------------===//
+
+def MUBUFAddr32 : ComplexPattern<i64, 9, "SelectMUBUFAddr32">;
+def MUBUFAddr64 : ComplexPattern<i64, 3, "SelectMUBUFAddr64">;
+def MUBUFScratch : ComplexPattern<i64, 4, "SelectMUBUFScratch">;
+
//===----------------------------------------------------------------------===//
// SI assembler operands
//===----------------------------------------------------------------------===//
@@ -166,6 +208,12 @@ class SOP1_64 <bits<8> op, string opName, list<dag> pattern> : SOP1 <
opName#" $dst, $src0", pattern
>;
+// 64-bit input, 32-bit output.
+class SOP1_32_64 <bits<8> op, string opName, list<dag> pattern> : SOP1 <
+ op, (outs SReg_32:$dst), (ins SSrc_64:$src0),
+ opName#" $dst, $src0", pattern
+>;
+
class SOP2_32 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
op, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
opName#" $dst, $src0, $src1", pattern
@@ -181,15 +229,17 @@ class SOP2_SHIFT_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
opName#" $dst, $src0, $src1", pattern
>;
-class SOPC_32 <bits<7> op, string opName, list<dag> pattern> : SOPC <
- op, (outs SCCReg:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
- opName#" $dst, $src0, $src1", pattern
->;
-class SOPC_64 <bits<7> op, string opName, list<dag> pattern> : SOPC <
- op, (outs SCCReg:$dst), (ins SSrc_64:$src0, SSrc_64:$src1),
- opName#" $dst, $src0, $src1", pattern
->;
+class SOPC_Helper <bits<7> op, RegisterClass rc, ValueType vt,
+ string opName, PatLeaf cond> : SOPC <
+ op, (outs SCCReg:$dst), (ins rc:$src0, rc:$src1),
+ opName#" $dst, $src0, $src1", []>;
+
+class SOPC_32<bits<7> op, string opName, PatLeaf cond = COND_NULL>
+ : SOPC_Helper<op, SSrc_32, i32, opName, cond>;
+
+class SOPC_64<bits<7> op, string opName, PatLeaf cond = COND_NULL>
+ : SOPC_Helper<op, SSrc_64, i64, opName, cond>;
class SOPK_32 <bits<5> op, string opName, list<dag> pattern> : SOPK <
op, (outs SReg_32:$dst), (ins i16imm:$src0),
@@ -205,7 +255,7 @@ multiclass SMRD_Helper <bits<5> op, string asm, RegisterClass baseClass,
RegisterClass dstClass> {
def _IMM : SMRD <
op, 1, (outs dstClass:$dst),
- (ins baseClass:$sbase, i32imm:$offset),
+ (ins baseClass:$sbase, u32imm:$offset),
asm#" $dst, $sbase, $offset", []
>;
@@ -229,6 +279,66 @@ class VOP2_REV <string revOp, bit isOrig> {
bit IsOrig = isOrig;
}
+class SIMCInstr <string pseudo, int subtarget> {
+ string PseudoInstr = pseudo;
+ int Subtarget = subtarget;
+}
+
+class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
+ VOP3Common <outs, ins, "", pattern>,
+ VOP <opName>,
+ SIMCInstr<opName, SISubtarget.NONE> {
+ let isPseudo = 1;
+}
+
+class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
+ VOP3 <op, outs, ins, asm, []>,
+ SIMCInstr<opName, SISubtarget.SI>;
+
+multiclass VOP3_m <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern,
+ string opName> {
+
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+
+ def _si : VOP3_Real_si <op, outs, ins, asm, opName>;
+
+}
+
+multiclass VOP3_1_m <bits<8> op, dag outs, dag ins, string asm,
+ list<dag> pattern, string opName> {
+
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+
+ let src1 = 0, src1_modifiers = 0, src2 = 0, src2_modifiers = 0 in {
+
+ def _si : VOP3_Real_si <
+ {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+ outs, ins, asm, opName
+ >;
+
+ } // src1 = 0, src1_modifiers = 0, src2 = 0, src2_modifiers = 0
+}
+
+multiclass VOP3_2_m <bits<6> op, dag outs, dag ins, string asm,
+ list<dag> pattern, string opName, string revOp> {
+
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+
+ let src2 = 0, src2_modifiers = 0 in {
+
+ def _si : VOP3_Real_si <
+ {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+ outs, ins, asm, opName>,
+ VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
+
+ } // src2 = 0, src2_modifiers = 0
+}
+
+// This must always be right before the operand being input modified.
+def InputMods : OperandWithDefaultOps <i32, (ops (i32 0))> {
+ let PrintMethod = "printOperandAndMods";
+}
+
multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
string opName, list<dag> pattern> {
@@ -237,17 +347,11 @@ multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
opName#"_e32 $dst, $src0", pattern
>, VOP <opName>;
- def _e64 : VOP3 <
- {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+ defm _e64 : VOP3_1_m <
+ op,
(outs drc:$dst),
- (ins src:$src0,
- i32imm:$abs, i32imm:$clamp,
- i32imm:$omod, i32imm:$neg),
- opName#"_e64 $dst, $src0, $abs, $clamp, $omod, $neg", []
- >, VOP <opName> {
- let src1 = SIOperand.ZERO;
- let src2 = SIOperand.ZERO;
- }
+ (ins InputMods:$src0_modifiers, src:$src0, i32imm:$clamp, i32imm:$omod),
+ opName#"_e64 $dst, $src0_modifiers, $clamp, $omod", [], opName>;
}
multiclass VOP1_32 <bits<8> op, string opName, list<dag> pattern>
@@ -269,16 +373,14 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
opName#"_e32 $dst, $src0, $src1", pattern
>, VOP <opName>, VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
- def _e64 : VOP3 <
- {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+ defm _e64 : VOP3_2_m <
+ op,
(outs vrc:$dst),
- (ins arc:$src0, arc:$src1,
- i32imm:$abs, i32imm:$clamp,
- i32imm:$omod, i32imm:$neg),
- opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
- >, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
- let src2 = SIOperand.ZERO;
- }
+ (ins InputMods:$src0_modifiers, arc:$src0,
+ InputMods:$src1_modifiers, arc:$src1,
+ i32imm:$clamp, i32imm:$omod),
+ opName#"_e64 $dst, $src0_modifiers, $src1_modifiers, $clamp, $omod", [],
+ opName, revOp>;
}
multiclass VOP2_32 <bits<6> op, string opName, list<dag> pattern,
@@ -300,12 +402,13 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern,
def _e64 : VOP3b <
{1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
(outs VReg_32:$dst),
- (ins VSrc_32:$src0, VSrc_32:$src1,
- i32imm:$abs, i32imm:$clamp,
- i32imm:$omod, i32imm:$neg),
- opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
+ (ins InputMods: $src0_modifiers, VSrc_32:$src0,
+ InputMods:$src1_modifiers, VSrc_32:$src1,
+ i32imm:$clamp, i32imm:$omod),
+ opName#"_e64 $dst, $src0_modifiers, $src1_modifiers, $clamp, $omod", []
>, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
- let src2 = SIOperand.ZERO;
+ let src2 = 0;
+ let src2_modifiers = 0;
/* the VOP2 variant puts the carry out into VCC, the VOP3 variant
can write it into any SGPR. We currently don't use the carry out,
so for now hardcode it to VCC as well */
@@ -314,25 +417,28 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern,
}
multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
- string opName, ValueType vt, PatLeaf cond> {
-
+ string opName, ValueType vt, PatLeaf cond, bit defExec = 0> {
def _e32 : VOPC <
op, (ins arc:$src0, vrc:$src1),
opName#"_e32 $dst, $src0, $src1", []
- >, VOP <opName>;
+ >, VOP <opName> {
+ let Defs = !if(defExec, [EXEC], []);
+ }
def _e64 : VOP3 <
{0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
(outs SReg_64:$dst),
- (ins arc:$src0, arc:$src1,
- InstFlag:$abs, InstFlag:$clamp,
- InstFlag:$omod, InstFlag:$neg),
- opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg",
+ (ins InputMods:$src0_modifiers, arc:$src0,
+ InputMods:$src1_modifiers, arc:$src1,
+ InstFlag:$clamp, InstFlag:$omod),
+ opName#"_e64 $dst, $src0_modifiers, $src1_modifiers, $clamp, $omod",
!if(!eq(!cast<string>(cond), "COND_NULL"), []<dag>,
[(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
)
>, VOP <opName> {
- let src2 = SIOperand.ZERO;
+ let Defs = !if(defExec, [EXEC], []);
+ let src2 = 0;
+ let src2_modifiers = 0;
}
}
@@ -344,76 +450,172 @@ multiclass VOPC_64 <bits<8> op, string opName,
ValueType vt = untyped, PatLeaf cond = COND_NULL>
: VOPC_Helper <op, VReg_64, VSrc_64, opName, vt, cond>;
-class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
+multiclass VOPCX_32 <bits<8> op, string opName,
+ ValueType vt = untyped, PatLeaf cond = COND_NULL>
+ : VOPC_Helper <op, VReg_32, VSrc_32, opName, vt, cond, 1>;
+
+multiclass VOPCX_64 <bits<8> op, string opName,
+ ValueType vt = untyped, PatLeaf cond = COND_NULL>
+ : VOPC_Helper <op, VReg_64, VSrc_64, opName, vt, cond, 1>;
+
+multiclass VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3_m <
op, (outs VReg_32:$dst),
- (ins VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2,
- InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg),
- opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
->, VOP <opName>;
+ (ins InputMods: $src0_modifiers, VSrc_32:$src0, InputMods:$src1_modifiers,
+ VSrc_32:$src1, InputMods:$src2_modifiers, VSrc_32:$src2,
+ InstFlag:$clamp, InstFlag:$omod),
+ opName#" $dst, $src0_modifiers, $src1, $src2, $clamp, $omod", pattern, opName
+>;
-class VOP3_64_Shift <bits <9> op, string opName, list<dag> pattern> : VOP3 <
+class VOP3_64_32 <bits <9> op, string opName, list<dag> pattern> : VOP3 <
op, (outs VReg_64:$dst),
(ins VSrc_64:$src0, VSrc_32:$src1),
opName#" $dst, $src0, $src1", pattern
>, VOP <opName> {
- let src2 = SIOperand.ZERO;
- let abs = 0;
+ let src2 = 0;
+ let src2_modifiers = 0;
+ let src0_modifiers = 0;
let clamp = 0;
let omod = 0;
- let neg = 0;
}
class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
op, (outs VReg_64:$dst),
- (ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2,
+ (ins InputMods:$src0_modifiers, VSrc_64:$src0,
+ InputMods:$src1_modifiers, VSrc_64:$src1,
+ InputMods:$src2_modifiers, VSrc_64:$src2,
+ InstFlag:$clamp, InstFlag:$omod),
+ opName#" $dst, $src0_modifiers, $src1_modifiers, $src2_modifiers, $clamp, $omod", pattern
+>, VOP <opName>;
+
+
+class VOP3b_Helper <bits<9> op, RegisterClass vrc, RegisterClass arc,
+ string opName, list<dag> pattern> : VOP3 <
+ op, (outs vrc:$dst0, SReg_64:$dst1),
+ (ins arc:$src0, arc:$src1, arc:$src2,
InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg),
- opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
+ opName#" $dst0, $dst1, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>, VOP <opName>;
+
+class VOP3b_64 <bits<9> op, string opName, list<dag> pattern> :
+ VOP3b_Helper <op, VReg_64, VSrc_64, opName, pattern>;
+
+class VOP3b_32 <bits<9> op, string opName, list<dag> pattern> :
+ VOP3b_Helper <op, VReg_32, VSrc_32, opName, pattern>;
+
//===----------------------------------------------------------------------===//
// Vector I/O classes
//===----------------------------------------------------------------------===//
-class DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+class DS_1A <bits<8> op, dag outs, dag ins, string asm, list<dag> pat> :
+ DS <op, outs, ins, asm, pat> {
+ bits<16> offset;
+
+ // Single load interpret the 2 i8imm operands as a single i16 offset.
+ let offset0 = offset{7-0};
+ let offset1 = offset{15-8};
+}
+
+class DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass> : DS_1A <
op,
(outs regClass:$vdst),
- (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1,
- i8imm:$offset0, i8imm:$offset1),
- asm#" $vdst, $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]",
+ (ins i1imm:$gds, VReg_32:$addr, u16imm:$offset),
+ asm#" $vdst, $addr, $offset, [M0]",
[]> {
+ let data0 = 0;
+ let data1 = 0;
+ let mayLoad = 1;
+ let mayStore = 0;
+}
+
+class DS_Load2_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+ op,
+ (outs regClass:$vdst),
+ (ins i1imm:$gds, VReg_32:$addr, u8imm:$offset0, u8imm:$offset1),
+ asm#" $gds, $vdst, $addr, $offset0, $offset1, [M0]",
+ []> {
+ let data0 = 0;
+ let data1 = 0;
let mayLoad = 1;
let mayStore = 0;
}
-class DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+class DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass> : DS_1A <
op,
(outs),
- (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1,
- i8imm:$offset0, i8imm:$offset1),
- asm#" $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]",
+ (ins i1imm:$gds, VReg_32:$addr, regClass:$data0, u16imm:$offset),
+ asm#" $addr, $data0, $offset [M0]",
+ []> {
+ let data1 = 0;
+ let mayStore = 1;
+ let mayLoad = 0;
+ let vdst = 0;
+}
+
+class DS_Store2_Helper <bits<8> op, string asm, RegisterClass regClass> : DS_1A <
+ op,
+ (outs),
+ (ins i1imm:$gds, VReg_32:$addr, regClass:$data0, u8imm:$offset0, u8imm:$offset1),
+ asm#" $addr, $data0, $data1, $offset0, $offset1 [M0]",
[]> {
let mayStore = 1;
let mayLoad = 0;
let vdst = 0;
}
-class DS_1A1D_RET <bits<8> op, string asm, RegisterClass rc> : DS <
+// 1 address, 1 data.
+class DS_1A1D_RET <bits<8> op, string asm, RegisterClass rc> : DS_1A <
+ op,
+ (outs rc:$vdst),
+ (ins i1imm:$gds, VReg_32:$addr, rc:$data0, u16imm:$offset),
+ asm#" $vdst, $addr, $data0, $offset, [M0]",
+ []> {
+
+ let data1 = 0;
+ let mayStore = 1;
+ let mayLoad = 1;
+}
+
+// 1 address, 2 data.
+class DS_1A2D_RET <bits<8> op, string asm, RegisterClass rc> : DS_1A <
op,
(outs rc:$vdst),
- (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, i8imm:$offset0,
- i8imm:$offset1),
- asm#" $gds, $vdst, $addr, $data0, $offset0, $offset1, [M0]",
+ (ins i1imm:$gds, VReg_32:$addr, rc:$data0, rc:$data1, u16imm:$offset),
+ asm#" $vdst, $addr, $data0, $data1, $offset, [M0]",
[]> {
let mayStore = 1;
let mayLoad = 1;
+}
+
+// 1 address, 2 data.
+class DS_1A2D_NORET <bits<8> op, string asm, RegisterClass rc> : DS_1A <
+ op,
+ (outs),
+ (ins i1imm:$gds, VReg_32:$addr, rc:$data0, rc:$data1, u16imm:$offset),
+ asm#" $addr, $data0, $data1, $offset, [M0]",
+ []> {
+ let mayStore = 1;
+ let mayLoad = 1;
+}
+
+// 1 address, 1 data.
+class DS_1A1D_NORET <bits<8> op, string asm, RegisterClass rc> : DS_1A <
+ op,
+ (outs),
+ (ins i1imm:$gds, VReg_32:$addr, rc:$data0, u16imm:$offset),
+ asm#" $addr, $data0, $offset, [M0]",
+ []> {
+
let data1 = 0;
+ let mayStore = 1;
+ let mayLoad = 1;
}
class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF <
op,
(outs),
- (ins regClass:$vdata, i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc,
+ (ins regClass:$vdata, u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc,
i1imm:$addr64, i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr,
SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset),
asm#" $vdata, $offset, $offen, $idxen, $glc, $addr64, $dfmt,"
@@ -423,32 +625,34 @@ class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBU
let mayLoad = 0;
}
-multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> {
+multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass,
+ ValueType load_vt = i32,
+ SDPatternOperator ld = null_frag> {
let lds = 0, mayLoad = 1 in {
let addr64 = 0 in {
- let offen = 0, idxen = 0 in {
+ let offen = 0, idxen = 0, vaddr = 0 in {
def _OFFSET : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc, VReg_32:$vaddr,
- i16imm:$offset, SSrc_32:$soffset, i1imm:$glc,
+ (ins SReg_128:$srsrc,
+ u16imm:$offset, SSrc_32:$soffset, i1imm:$glc,
i1imm:$slc, i1imm:$tfe),
asm#" $vdata, $srsrc + $offset + $soffset, glc=$glc, slc=$slc, tfe=$tfe", []>;
}
- let offen = 1, idxen = 0, offset = 0 in {
+ let offen = 1, idxen = 0 in {
def _OFFEN : MUBUF <op, (outs regClass:$vdata),
(ins SReg_128:$srsrc, VReg_32:$vaddr,
- SSrc_32:$soffset, i1imm:$glc, i1imm:$slc,
+ SSrc_32:$soffset, u16imm:$offset, i1imm:$glc, i1imm:$slc,
i1imm:$tfe),
- asm#" $vdata, $srsrc + $vaddr + $soffset, glc=$glc, slc=$slc, tfe=$tfe", []>;
+ asm#" $vdata, $srsrc + $vaddr + $soffset + $offset, glc=$glc, slc=$slc, tfe=$tfe", []>;
}
let offen = 0, idxen = 1 in {
def _IDXEN : MUBUF <op, (outs regClass:$vdata),
(ins SReg_128:$srsrc, VReg_32:$vaddr,
- i16imm:$offset, SSrc_32:$soffset, i1imm:$glc,
+ u16imm:$offset, SSrc_32:$soffset, i1imm:$glc,
i1imm:$slc, i1imm:$tfe),
asm#" $vdata, $srsrc[$vaddr] + $offset + $soffset, glc=$glc, slc=$slc, tfe=$tfe", []>;
}
@@ -464,36 +668,54 @@ multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> {
let offen = 0, idxen = 0, addr64 = 1, glc = 0, slc = 0, tfe = 0, soffset = 128 /* ZERO */ in {
def _ADDR64 : MUBUF <op, (outs regClass:$vdata),
- (ins SReg_128:$srsrc, VReg_64:$vaddr, i16imm:$offset),
- asm#" $vdata, $srsrc + $vaddr + $offset", []>;
+ (ins SReg_128:$srsrc, VReg_64:$vaddr, u16imm:$offset),
+ asm#" $vdata, $srsrc + $vaddr + $offset",
+ [(set load_vt:$vdata, (ld (MUBUFAddr64 v4i32:$srsrc,
+ i64:$vaddr, u16imm:$offset)))]>;
}
}
}
-class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass> :
- MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr,
- i16imm:$offset),
- name#" $vdata, $srsrc + $vaddr + $offset",
- []> {
-
- let mayLoad = 0;
- let mayStore = 1;
+multiclass MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass,
+ ValueType store_vt, SDPatternOperator st> {
+
+ def "" : MUBUF <
+ op, (outs),
+ (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_32:$vaddr, SSrc_32:$soffset,
+ u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$slc,
+ i1imm:$tfe),
+ name#" $vdata, $srsrc, $vaddr, $soffset, $offset $offen $idxen $glc $slc $tfe",
+ []
+ > {
+ let addr64 = 0;
+ }
- // Encoding
- let offen = 0;
- let idxen = 0;
- let glc = 0;
- let addr64 = 1;
- let lds = 0;
- let slc = 0;
- let tfe = 0;
- let soffset = 128; // ZERO
+ def _ADDR64 : MUBUF <
+ op, (outs),
+ (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr, u16imm:$offset),
+ name#" $vdata, $srsrc + $vaddr + $offset",
+ [(st store_vt:$vdata,
+ (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, u16imm:$offset))]> {
+
+ let mayLoad = 0;
+ let mayStore = 1;
+
+ // Encoding
+ let offen = 0;
+ let idxen = 0;
+ let glc = 0;
+ let addr64 = 1;
+ let lds = 0;
+ let slc = 0;
+ let tfe = 0;
+ let soffset = 128; // ZERO
+ }
}
class MTBUF_Load_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF <
op,
(outs regClass:$dst),
- (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
+ (ins u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr, SReg_128:$srsrc,
i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset),
asm#" $dst, $offset, $offen, $idxen, $glc, $addr64, $dfmt,"
@@ -581,6 +803,53 @@ multiclass MIMG_Sampler <bits<7> op, string asm> {
defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4>;
}
+class MIMG_Gather_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ RegisterClass src_rc> : MIMG <
+ op,
+ (outs dst_rc:$vdata),
+ (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
+ SReg_256:$srsrc, SReg_128:$ssamp),
+ asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
+ #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
+ []> {
+ let mayLoad = 1;
+ let mayStore = 0;
+
+ // DMASK was repurposed for GATHER4. 4 components are always
+ // returned and DMASK works like a swizzle - it selects
+ // the component to fetch. The only useful DMASK values are
+ // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
+ // (red,red,red,red) etc.) The ISA document doesn't mention
+ // this.
+ // Therefore, disable all code which updates DMASK by setting these two:
+ let MIMG = 0;
+ let hasPostISelHook = 0;
+}
+
+multiclass MIMG_Gather_Src_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ int channels> {
+ def _V1 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+ def _V8 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_256>,
+ MIMG_Mask<asm#"_V8", channels>;
+ def _V16 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_512>,
+ MIMG_Mask<asm#"_V16", channels>;
+}
+
+multiclass MIMG_Gather <bits<7> op, string asm> {
+ defm _V1 : MIMG_Gather_Src_Helper<op, asm, VReg_32, 1>;
+ defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4>;
+}
+
//===----------------------------------------------------------------------===//
// Vector instruction mappings
//===----------------------------------------------------------------------===//
@@ -594,6 +863,15 @@ def getVOPe64 : InstrMapping {
let ValueCols = [["8"]];
}
+// Maps an opcode in e64 form to its e32 equivalent
+def getVOPe32 : InstrMapping {
+ let FilterClass = "VOP";
+ let RowFields = ["OpName"];
+ let ColFields = ["Size"];
+ let KeyCol = ["8"];
+ let ValueCols = [["4"]];
+}
+
// Maps an original opcode to its commuted version
def getCommuteRev : InstrMapping {
let FilterClass = "VOP2_REV";
@@ -620,4 +898,20 @@ def getCommuteOrig : InstrMapping {
let ValueCols = [["1"]];
}
+def isDS : InstrMapping {
+ let FilterClass = "DS";
+ let RowFields = ["Inst"];
+ let ColFields = ["Size"];
+ let KeyCol = ["8"];
+ let ValueCols = [["8"]];
+}
+
+def getMCOpcode : InstrMapping {
+ let FilterClass = "SIMCInstr";
+ let RowFields = ["PseudoInstr"];
+ let ColFields = ["Subtarget"];
+ let KeyCol = [!cast<string>(SISubtarget.NONE)];
+ let ValueCols = [[!cast<string>(SISubtarget.SI)]];
+}
+
include "SIInstructions.td"
diff --git a/contrib/llvm/lib/Target/R600/SIInstructions.td b/contrib/llvm/lib/Target/R600/SIInstructions.td
index 2ca6a95..aecd847 100644
--- a/contrib/llvm/lib/Target/R600/SIInstructions.td
+++ b/contrib/llvm/lib/Target/R600/SIInstructions.td
@@ -22,14 +22,66 @@ def InterpSlot : Operand<i32> {
let PrintMethod = "printInterpSlot";
}
-def SendMsgImm : Operand<i32>;
+def SendMsgImm : Operand<i32> {
+ let PrintMethod = "printSendMsg";
+}
def isSI : Predicate<"Subtarget.getGeneration() "
">= AMDGPUSubtarget::SOUTHERN_ISLANDS">;
+def isCI : Predicate<"Subtarget.getGeneration() "
+ ">= AMDGPUSubtarget::SEA_ISLANDS">;
+
+def isCFDepth0 : Predicate<"isCFDepth0()">;
+
def WAIT_FLAG : InstFlag<"printWaitFlag">;
-let Predicates = [isSI] in {
+let SubtargetPredicate = isSI in {
+let OtherPredicates = [isCFDepth0] in {
+
+//===----------------------------------------------------------------------===//
+// SMRD Instructions
+//===----------------------------------------------------------------------===//
+
+let mayLoad = 1 in {
+
+// We are using the SGPR_32 and not the SReg_32 register class for 32-bit
+// SMRD instructions, because the SGPR_32 register class does not include M0
+// and writing to M0 from an SMRD instruction will hang the GPU.
+defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SGPR_32>;
+defm S_LOAD_DWORDX2 : SMRD_Helper <0x01, "S_LOAD_DWORDX2", SReg_64, SReg_64>;
+defm S_LOAD_DWORDX4 : SMRD_Helper <0x02, "S_LOAD_DWORDX4", SReg_64, SReg_128>;
+defm S_LOAD_DWORDX8 : SMRD_Helper <0x03, "S_LOAD_DWORDX8", SReg_64, SReg_256>;
+defm S_LOAD_DWORDX16 : SMRD_Helper <0x04, "S_LOAD_DWORDX16", SReg_64, SReg_512>;
+
+defm S_BUFFER_LOAD_DWORD : SMRD_Helper <
+ 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SGPR_32
+>;
+
+defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper <
+ 0x09, "S_BUFFER_LOAD_DWORDX2", SReg_128, SReg_64
+>;
+
+defm S_BUFFER_LOAD_DWORDX4 : SMRD_Helper <
+ 0x0a, "S_BUFFER_LOAD_DWORDX4", SReg_128, SReg_128
+>;
+
+defm S_BUFFER_LOAD_DWORDX8 : SMRD_Helper <
+ 0x0b, "S_BUFFER_LOAD_DWORDX8", SReg_128, SReg_256
+>;
+
+defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
+ 0x0c, "S_BUFFER_LOAD_DWORDX16", SReg_128, SReg_512
+>;
+
+} // mayLoad = 1
+
+//def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>;
+//def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>;
+
+//===----------------------------------------------------------------------===//
+// SOP1 Instructions
+//===----------------------------------------------------------------------===//
let neverHasSideEffects = 1 in {
@@ -40,33 +92,58 @@ def S_CMOV_B32 : SOP1_32 <0x00000005, "S_CMOV_B32", []>;
def S_CMOV_B64 : SOP1_64 <0x00000006, "S_CMOV_B64", []>;
} // End isMoveImm = 1
-def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32", []>;
-def S_NOT_B64 : SOP1_64 <0x00000008, "S_NOT_B64", []>;
+def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32",
+ [(set i32:$dst, (not i32:$src0))]
+>;
+
+def S_NOT_B64 : SOP1_64 <0x00000008, "S_NOT_B64",
+ [(set i64:$dst, (not i64:$src0))]
+>;
def S_WQM_B32 : SOP1_32 <0x00000009, "S_WQM_B32", []>;
def S_WQM_B64 : SOP1_64 <0x0000000a, "S_WQM_B64", []>;
-def S_BREV_B32 : SOP1_32 <0x0000000b, "S_BREV_B32", []>;
+def S_BREV_B32 : SOP1_32 <0x0000000b, "S_BREV_B32",
+ [(set i32:$dst, (AMDGPUbrev i32:$src0))]
+>;
def S_BREV_B64 : SOP1_64 <0x0000000c, "S_BREV_B64", []>;
} // End neverHasSideEffects = 1
////def S_BCNT0_I32_B32 : SOP1_BCNT0 <0x0000000d, "S_BCNT0_I32_B32", []>;
////def S_BCNT0_I32_B64 : SOP1_BCNT0 <0x0000000e, "S_BCNT0_I32_B64", []>;
-////def S_BCNT1_I32_B32 : SOP1_BCNT1 <0x0000000f, "S_BCNT1_I32_B32", []>;
-////def S_BCNT1_I32_B64 : SOP1_BCNT1 <0x00000010, "S_BCNT1_I32_B64", []>;
-////def S_FF0_I32_B32 : SOP1_FF0 <0x00000011, "S_FF0_I32_B32", []>;
+def S_BCNT1_I32_B32 : SOP1_32 <0x0000000f, "S_BCNT1_I32_B32",
+ [(set i32:$dst, (ctpop i32:$src0))]
+>;
+def S_BCNT1_I32_B64 : SOP1_32_64 <0x00000010, "S_BCNT1_I32_B64", []>;
+
+////def S_FF0_I32_B32 : SOP1_32 <0x00000011, "S_FF0_I32_B32", []>;
////def S_FF0_I32_B64 : SOP1_FF0 <0x00000012, "S_FF0_I32_B64", []>;
-////def S_FF1_I32_B32 : SOP1_FF1 <0x00000013, "S_FF1_I32_B32", []>;
+def S_FF1_I32_B32 : SOP1_32 <0x00000013, "S_FF1_I32_B32",
+ [(set i32:$dst, (cttz_zero_undef i32:$src0))]
+>;
////def S_FF1_I32_B64 : SOP1_FF1 <0x00000014, "S_FF1_I32_B64", []>;
-//def S_FLBIT_I32_B32 : SOP1_32 <0x00000015, "S_FLBIT_I32_B32", []>;
+
+def S_FLBIT_I32_B32 : SOP1_32 <0x00000015, "S_FLBIT_I32_B32",
+ [(set i32:$dst, (ctlz_zero_undef i32:$src0))]
+>;
+
//def S_FLBIT_I32_B64 : SOP1_32 <0x00000016, "S_FLBIT_I32_B64", []>;
def S_FLBIT_I32 : SOP1_32 <0x00000017, "S_FLBIT_I32", []>;
//def S_FLBIT_I32_I64 : SOP1_32 <0x00000018, "S_FLBIT_I32_I64", []>;
-//def S_SEXT_I32_I8 : SOP1_32 <0x00000019, "S_SEXT_I32_I8", []>;
-//def S_SEXT_I32_I16 : SOP1_32 <0x0000001a, "S_SEXT_I32_I16", []>;
+def S_SEXT_I32_I8 : SOP1_32 <0x00000019, "S_SEXT_I32_I8",
+ [(set i32:$dst, (sext_inreg i32:$src0, i8))]
+>;
+def S_SEXT_I32_I16 : SOP1_32 <0x0000001a, "S_SEXT_I32_I16",
+ [(set i32:$dst, (sext_inreg i32:$src0, i16))]
+>;
+
////def S_BITSET0_B32 : SOP1_BITSET0 <0x0000001b, "S_BITSET0_B32", []>;
////def S_BITSET0_B64 : SOP1_BITSET0 <0x0000001c, "S_BITSET0_B64", []>;
////def S_BITSET1_B32 : SOP1_BITSET1 <0x0000001d, "S_BITSET1_B32", []>;
////def S_BITSET1_B64 : SOP1_BITSET1 <0x0000001e, "S_BITSET1_B64", []>;
-def S_GETPC_B64 : SOP1_64 <0x0000001f, "S_GETPC_B64", []>;
+def S_GETPC_B64 : SOP1 <
+ 0x0000001f, (outs SReg_64:$dst), (ins), "S_GETPC_B64 $dst", []
+> {
+ let SSRC0 = 0;
+}
def S_SETPC_B64 : SOP1_64 <0x00000020, "S_SETPC_B64", []>;
def S_SWAPPC_B64 : SOP1_64 <0x00000021, "S_SWAPPC_B64", []>;
def S_RFE_B64 : SOP1_64 <0x00000022, "S_RFE_B64", []>;
@@ -94,6 +171,150 @@ def S_MOVRELD_B64 : SOP1_64 <0x00000031, "S_MOVRELD_B64", []>;
def S_MOV_REGRD_B32 : SOP1_32 <0x00000033, "S_MOV_REGRD_B32", []>;
def S_ABS_I32 : SOP1_32 <0x00000034, "S_ABS_I32", []>;
def S_MOV_FED_B32 : SOP1_32 <0x00000035, "S_MOV_FED_B32", []>;
+
+//===----------------------------------------------------------------------===//
+// SOP2 Instructions
+//===----------------------------------------------------------------------===//
+
+let Defs = [SCC] in { // Carry out goes to SCC
+let isCommutable = 1 in {
+def S_ADD_U32 : SOP2_32 <0x00000000, "S_ADD_U32", []>;
+def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32",
+ [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
+>;
+} // End isCommutable = 1
+
+def S_SUB_U32 : SOP2_32 <0x00000001, "S_SUB_U32", []>;
+def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32",
+ [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
+>;
+
+let Uses = [SCC] in { // Carry in comes from SCC
+let isCommutable = 1 in {
+def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32",
+ [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+} // End isCommutable = 1
+
+def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32",
+ [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+} // End Uses = [SCC]
+} // End Defs = [SCC]
+
+def S_MIN_I32 : SOP2_32 <0x00000006, "S_MIN_I32",
+ [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))]
+>;
+def S_MIN_U32 : SOP2_32 <0x00000007, "S_MIN_U32",
+ [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))]
+>;
+def S_MAX_I32 : SOP2_32 <0x00000008, "S_MAX_I32",
+ [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))]
+>;
+def S_MAX_U32 : SOP2_32 <0x00000009, "S_MAX_U32",
+ [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))]
+>;
+
+def S_CSELECT_B32 : SOP2 <
+ 0x0000000a, (outs SReg_32:$dst),
+ (ins SReg_32:$src0, SReg_32:$src1, SCCReg:$scc), "S_CSELECT_B32",
+ []
+>;
+
+def S_CSELECT_B64 : SOP2_64 <0x0000000b, "S_CSELECT_B64", []>;
+
+def S_AND_B32 : SOP2_32 <0x0000000e, "S_AND_B32",
+ [(set i32:$dst, (and i32:$src0, i32:$src1))]
+>;
+
+def S_AND_B64 : SOP2_64 <0x0000000f, "S_AND_B64",
+ [(set i64:$dst, (and i64:$src0, i64:$src1))]
+>;
+
+def S_OR_B32 : SOP2_32 <0x00000010, "S_OR_B32",
+ [(set i32:$dst, (or i32:$src0, i32:$src1))]
+>;
+
+def S_OR_B64 : SOP2_64 <0x00000011, "S_OR_B64",
+ [(set i64:$dst, (or i64:$src0, i64:$src1))]
+>;
+
+def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32",
+ [(set i32:$dst, (xor i32:$src0, i32:$src1))]
+>;
+
+def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64",
+ [(set i64:$dst, (xor i64:$src0, i64:$src1))]
+>;
+def S_ANDN2_B32 : SOP2_32 <0x00000014, "S_ANDN2_B32", []>;
+def S_ANDN2_B64 : SOP2_64 <0x00000015, "S_ANDN2_B64", []>;
+def S_ORN2_B32 : SOP2_32 <0x00000016, "S_ORN2_B32", []>;
+def S_ORN2_B64 : SOP2_64 <0x00000017, "S_ORN2_B64", []>;
+def S_NAND_B32 : SOP2_32 <0x00000018, "S_NAND_B32", []>;
+def S_NAND_B64 : SOP2_64 <0x00000019, "S_NAND_B64", []>;
+def S_NOR_B32 : SOP2_32 <0x0000001a, "S_NOR_B32", []>;
+def S_NOR_B64 : SOP2_64 <0x0000001b, "S_NOR_B64", []>;
+def S_XNOR_B32 : SOP2_32 <0x0000001c, "S_XNOR_B32", []>;
+def S_XNOR_B64 : SOP2_64 <0x0000001d, "S_XNOR_B64", []>;
+
+// Use added complexity so these patterns are preferred to the VALU patterns.
+let AddedComplexity = 1 in {
+
+def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32",
+ [(set i32:$dst, (shl i32:$src0, i32:$src1))]
+>;
+def S_LSHL_B64 : SOP2_SHIFT_64 <0x0000001f, "S_LSHL_B64",
+ [(set i64:$dst, (shl i64:$src0, i32:$src1))]
+>;
+def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32",
+ [(set i32:$dst, (srl i32:$src0, i32:$src1))]
+>;
+def S_LSHR_B64 : SOP2_SHIFT_64 <0x00000021, "S_LSHR_B64",
+ [(set i64:$dst, (srl i64:$src0, i32:$src1))]
+>;
+def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32",
+ [(set i32:$dst, (sra i32:$src0, i32:$src1))]
+>;
+def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "S_ASHR_I64",
+ [(set i64:$dst, (sra i64:$src0, i32:$src1))]
+>;
+
+} // End AddedComplexity = 1
+
+def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>;
+def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>;
+def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>;
+def S_BFE_U32 : SOP2_32 <0x00000027, "S_BFE_U32", []>;
+def S_BFE_I32 : SOP2_32 <0x00000028, "S_BFE_I32", []>;
+def S_BFE_U64 : SOP2_64 <0x00000029, "S_BFE_U64", []>;
+def S_BFE_I64 : SOP2_64 <0x0000002a, "S_BFE_I64", []>;
+//def S_CBRANCH_G_FORK : SOP2_ <0x0000002b, "S_CBRANCH_G_FORK", []>;
+def S_ABSDIFF_I32 : SOP2_32 <0x0000002c, "S_ABSDIFF_I32", []>;
+
+//===----------------------------------------------------------------------===//
+// SOPC Instructions
+//===----------------------------------------------------------------------===//
+
+def S_CMP_EQ_I32 : SOPC_32 <0x00000000, "S_CMP_EQ_I32">;
+def S_CMP_LG_I32 : SOPC_32 <0x00000001, "S_CMP_LG_I32">;
+def S_CMP_GT_I32 : SOPC_32 <0x00000002, "S_CMP_GT_I32">;
+def S_CMP_GE_I32 : SOPC_32 <0x00000003, "S_CMP_GE_I32">;
+def S_CMP_LT_I32 : SOPC_32 <0x00000004, "S_CMP_LT_I32">;
+def S_CMP_LE_I32 : SOPC_32 <0x00000005, "S_CMP_LE_I32">;
+def S_CMP_EQ_U32 : SOPC_32 <0x00000006, "S_CMP_EQ_U32">;
+def S_CMP_LG_U32 : SOPC_32 <0x00000007, "S_CMP_LG_U32">;
+def S_CMP_GT_U32 : SOPC_32 <0x00000008, "S_CMP_GT_U32">;
+def S_CMP_GE_U32 : SOPC_32 <0x00000009, "S_CMP_GE_U32">;
+def S_CMP_LT_U32 : SOPC_32 <0x0000000a, "S_CMP_LT_U32">;
+def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "S_CMP_LE_U32">;
+////def S_BITCMP0_B32 : SOPC_BITCMP0 <0x0000000c, "S_BITCMP0_B32", []>;
+////def S_BITCMP1_B32 : SOPC_BITCMP1 <0x0000000d, "S_BITCMP1_B32", []>;
+////def S_BITCMP0_B64 : SOPC_BITCMP0 <0x0000000e, "S_BITCMP0_B64", []>;
+////def S_BITCMP1_B64 : SOPC_BITCMP1 <0x0000000f, "S_BITCMP1_B64", []>;
+//def S_SETVSKIP : SOPC_ <0x00000010, "S_SETVSKIP", []>;
+
+//===----------------------------------------------------------------------===//
+// SOPK Instructions
+//===----------------------------------------------------------------------===//
+
def S_MOVK_I32 : SOPK_32 <0x00000000, "S_MOVK_I32", []>;
def S_CMOVK_I32 : SOPK_32 <0x00000002, "S_CMOVK_I32", []>;
@@ -116,7 +337,7 @@ def S_CMPK_EQ_I32 : SOPK <
>;
*/
-let isCompare = 1 in {
+let isCompare = 1, Defs = [SCC] in {
def S_CMPK_LG_I32 : SOPK_32 <0x00000004, "S_CMPK_LG_I32", []>;
def S_CMPK_GT_I32 : SOPK_32 <0x00000005, "S_CMPK_GT_I32", []>;
def S_CMPK_GE_I32 : SOPK_32 <0x00000006, "S_CMPK_GE_I32", []>;
@@ -128,7 +349,7 @@ def S_CMPK_GT_U32 : SOPK_32 <0x0000000b, "S_CMPK_GT_U32", []>;
def S_CMPK_GE_U32 : SOPK_32 <0x0000000c, "S_CMPK_GE_U32", []>;
def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "S_CMPK_LT_U32", []>;
def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "S_CMPK_LE_U32", []>;
-} // End isCompare = 1
+} // End isCompare = 1, Defs = [SCC]
let Defs = [SCC], isCommutable = 1 in {
def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
@@ -142,6 +363,108 @@ def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "S_GETREG_REGRD_B32", []>;
//def S_SETREG_IMM32_B32 : SOPK_32 <0x00000015, "S_SETREG_IMM32_B32", []>;
//def EXP : EXP_ <0x00000000, "EXP", []>;
+} // End let OtherPredicates = [isCFDepth0]
+
+//===----------------------------------------------------------------------===//
+// SOPP Instructions
+//===----------------------------------------------------------------------===//
+
+def S_NOP : SOPP <0x00000000, (ins i16imm:$simm16), "S_NOP $simm16", []>;
+
+let isTerminator = 1 in {
+
+def S_ENDPGM : SOPP <0x00000001, (ins), "S_ENDPGM",
+ [(IL_retflag)]> {
+ let simm16 = 0;
+ let isBarrier = 1;
+ let hasCtrlDep = 1;
+}
+
+let isBranch = 1 in {
+def S_BRANCH : SOPP <
+ 0x00000002, (ins sopp_brtarget:$simm16), "S_BRANCH $simm16",
+ [(br bb:$simm16)]> {
+ let isBarrier = 1;
+}
+
+let DisableEncoding = "$scc" in {
+def S_CBRANCH_SCC0 : SOPP <
+ 0x00000004, (ins sopp_brtarget:$simm16, SCCReg:$scc),
+ "S_CBRANCH_SCC0 $simm16", []
+>;
+def S_CBRANCH_SCC1 : SOPP <
+ 0x00000005, (ins sopp_brtarget:$simm16, SCCReg:$scc),
+ "S_CBRANCH_SCC1 $simm16",
+ []
+>;
+} // End DisableEncoding = "$scc"
+
+def S_CBRANCH_VCCZ : SOPP <
+ 0x00000006, (ins sopp_brtarget:$simm16, VCCReg:$vcc),
+ "S_CBRANCH_VCCZ $simm16",
+ []
+>;
+def S_CBRANCH_VCCNZ : SOPP <
+ 0x00000007, (ins sopp_brtarget:$simm16, VCCReg:$vcc),
+ "S_CBRANCH_VCCNZ $simm16",
+ []
+>;
+
+let DisableEncoding = "$exec" in {
+def S_CBRANCH_EXECZ : SOPP <
+ 0x00000008, (ins sopp_brtarget:$simm16, EXECReg:$exec),
+ "S_CBRANCH_EXECZ $simm16",
+ []
+>;
+def S_CBRANCH_EXECNZ : SOPP <
+ 0x00000009, (ins sopp_brtarget:$simm16, EXECReg:$exec),
+ "S_CBRANCH_EXECNZ $simm16",
+ []
+>;
+} // End DisableEncoding = "$exec"
+
+
+} // End isBranch = 1
+} // End isTerminator = 1
+
+let hasSideEffects = 1 in {
+def S_BARRIER : SOPP <0x0000000a, (ins), "S_BARRIER",
+ [(int_AMDGPU_barrier_local)]
+> {
+ let simm16 = 0;
+ let isBarrier = 1;
+ let hasCtrlDep = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "S_WAITCNT $simm16",
+ []
+>;
+//def S_SETHALT : SOPP_ <0x0000000d, "S_SETHALT", []>;
+//def S_SLEEP : SOPP_ <0x0000000e, "S_SLEEP", []>;
+//def S_SETPRIO : SOPP_ <0x0000000f, "S_SETPRIO", []>;
+
+let Uses = [EXEC] in {
+ def S_SENDMSG : SOPP <0x00000010, (ins SendMsgImm:$simm16, M0Reg:$m0), "S_SENDMSG $simm16",
+ [(int_SI_sendmsg imm:$simm16, M0Reg:$m0)]
+ > {
+ let DisableEncoding = "$m0";
+ }
+} // End Uses = [EXEC]
+
+//def S_SENDMSGHALT : SOPP_ <0x00000011, "S_SENDMSGHALT", []>;
+//def S_TRAP : SOPP_ <0x00000012, "S_TRAP", []>;
+//def S_ICACHE_INV : SOPP_ <0x00000013, "S_ICACHE_INV", []>;
+//def S_INCPERFLEVEL : SOPP_ <0x00000014, "S_INCPERFLEVEL", []>;
+//def S_DECPERFLEVEL : SOPP_ <0x00000015, "S_DECPERFLEVEL", []>;
+//def S_TTRACEDATA : SOPP_ <0x00000016, "S_TTRACEDATA", []>;
+} // End hasSideEffects
+
+//===----------------------------------------------------------------------===//
+// VOPC Instructions
+//===----------------------------------------------------------------------===//
+
let isCompare = 1 in {
defm V_CMP_F_F32 : VOPC_32 <0x00000000, "V_CMP_F_F32">;
@@ -161,26 +484,26 @@ defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_UNE>;
defm V_CMP_NLT_F32 : VOPC_32 <0x0000000e, "V_CMP_NLT_F32">;
defm V_CMP_TRU_F32 : VOPC_32 <0x0000000f, "V_CMP_TRU_F32">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPX_F_F32 : VOPC_32 <0x00000010, "V_CMPX_F_F32">;
-defm V_CMPX_LT_F32 : VOPC_32 <0x00000011, "V_CMPX_LT_F32">;
-defm V_CMPX_EQ_F32 : VOPC_32 <0x00000012, "V_CMPX_EQ_F32">;
-defm V_CMPX_LE_F32 : VOPC_32 <0x00000013, "V_CMPX_LE_F32">;
-defm V_CMPX_GT_F32 : VOPC_32 <0x00000014, "V_CMPX_GT_F32">;
-defm V_CMPX_LG_F32 : VOPC_32 <0x00000015, "V_CMPX_LG_F32">;
-defm V_CMPX_GE_F32 : VOPC_32 <0x00000016, "V_CMPX_GE_F32">;
-defm V_CMPX_O_F32 : VOPC_32 <0x00000017, "V_CMPX_O_F32">;
-defm V_CMPX_U_F32 : VOPC_32 <0x00000018, "V_CMPX_U_F32">;
-defm V_CMPX_NGE_F32 : VOPC_32 <0x00000019, "V_CMPX_NGE_F32">;
-defm V_CMPX_NLG_F32 : VOPC_32 <0x0000001a, "V_CMPX_NLG_F32">;
-defm V_CMPX_NGT_F32 : VOPC_32 <0x0000001b, "V_CMPX_NGT_F32">;
-defm V_CMPX_NLE_F32 : VOPC_32 <0x0000001c, "V_CMPX_NLE_F32">;
-defm V_CMPX_NEQ_F32 : VOPC_32 <0x0000001d, "V_CMPX_NEQ_F32">;
-defm V_CMPX_NLT_F32 : VOPC_32 <0x0000001e, "V_CMPX_NLT_F32">;
-defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32">;
+defm V_CMPX_F_F32 : VOPCX_32 <0x00000010, "V_CMPX_F_F32">;
+defm V_CMPX_LT_F32 : VOPCX_32 <0x00000011, "V_CMPX_LT_F32">;
+defm V_CMPX_EQ_F32 : VOPCX_32 <0x00000012, "V_CMPX_EQ_F32">;
+defm V_CMPX_LE_F32 : VOPCX_32 <0x00000013, "V_CMPX_LE_F32">;
+defm V_CMPX_GT_F32 : VOPCX_32 <0x00000014, "V_CMPX_GT_F32">;
+defm V_CMPX_LG_F32 : VOPCX_32 <0x00000015, "V_CMPX_LG_F32">;
+defm V_CMPX_GE_F32 : VOPCX_32 <0x00000016, "V_CMPX_GE_F32">;
+defm V_CMPX_O_F32 : VOPCX_32 <0x00000017, "V_CMPX_O_F32">;
+defm V_CMPX_U_F32 : VOPCX_32 <0x00000018, "V_CMPX_U_F32">;
+defm V_CMPX_NGE_F32 : VOPCX_32 <0x00000019, "V_CMPX_NGE_F32">;
+defm V_CMPX_NLG_F32 : VOPCX_32 <0x0000001a, "V_CMPX_NLG_F32">;
+defm V_CMPX_NGT_F32 : VOPCX_32 <0x0000001b, "V_CMPX_NGT_F32">;
+defm V_CMPX_NLE_F32 : VOPCX_32 <0x0000001c, "V_CMPX_NLE_F32">;
+defm V_CMPX_NEQ_F32 : VOPCX_32 <0x0000001d, "V_CMPX_NEQ_F32">;
+defm V_CMPX_NLT_F32 : VOPCX_32 <0x0000001e, "V_CMPX_NLT_F32">;
+defm V_CMPX_TRU_F32 : VOPCX_32 <0x0000001f, "V_CMPX_TRU_F32">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">;
defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", f64, COND_OLT>;
@@ -199,26 +522,26 @@ defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", f64, COND_UNE>;
defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">;
defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPX_F_F64 : VOPC_64 <0x00000030, "V_CMPX_F_F64">;
-defm V_CMPX_LT_F64 : VOPC_64 <0x00000031, "V_CMPX_LT_F64">;
-defm V_CMPX_EQ_F64 : VOPC_64 <0x00000032, "V_CMPX_EQ_F64">;
-defm V_CMPX_LE_F64 : VOPC_64 <0x00000033, "V_CMPX_LE_F64">;
-defm V_CMPX_GT_F64 : VOPC_64 <0x00000034, "V_CMPX_GT_F64">;
-defm V_CMPX_LG_F64 : VOPC_64 <0x00000035, "V_CMPX_LG_F64">;
-defm V_CMPX_GE_F64 : VOPC_64 <0x00000036, "V_CMPX_GE_F64">;
-defm V_CMPX_O_F64 : VOPC_64 <0x00000037, "V_CMPX_O_F64">;
-defm V_CMPX_U_F64 : VOPC_64 <0x00000038, "V_CMPX_U_F64">;
-defm V_CMPX_NGE_F64 : VOPC_64 <0x00000039, "V_CMPX_NGE_F64">;
-defm V_CMPX_NLG_F64 : VOPC_64 <0x0000003a, "V_CMPX_NLG_F64">;
-defm V_CMPX_NGT_F64 : VOPC_64 <0x0000003b, "V_CMPX_NGT_F64">;
-defm V_CMPX_NLE_F64 : VOPC_64 <0x0000003c, "V_CMPX_NLE_F64">;
-defm V_CMPX_NEQ_F64 : VOPC_64 <0x0000003d, "V_CMPX_NEQ_F64">;
-defm V_CMPX_NLT_F64 : VOPC_64 <0x0000003e, "V_CMPX_NLT_F64">;
-defm V_CMPX_TRU_F64 : VOPC_64 <0x0000003f, "V_CMPX_TRU_F64">;
+defm V_CMPX_F_F64 : VOPCX_64 <0x00000030, "V_CMPX_F_F64">;
+defm V_CMPX_LT_F64 : VOPCX_64 <0x00000031, "V_CMPX_LT_F64">;
+defm V_CMPX_EQ_F64 : VOPCX_64 <0x00000032, "V_CMPX_EQ_F64">;
+defm V_CMPX_LE_F64 : VOPCX_64 <0x00000033, "V_CMPX_LE_F64">;
+defm V_CMPX_GT_F64 : VOPCX_64 <0x00000034, "V_CMPX_GT_F64">;
+defm V_CMPX_LG_F64 : VOPCX_64 <0x00000035, "V_CMPX_LG_F64">;
+defm V_CMPX_GE_F64 : VOPCX_64 <0x00000036, "V_CMPX_GE_F64">;
+defm V_CMPX_O_F64 : VOPCX_64 <0x00000037, "V_CMPX_O_F64">;
+defm V_CMPX_U_F64 : VOPCX_64 <0x00000038, "V_CMPX_U_F64">;
+defm V_CMPX_NGE_F64 : VOPCX_64 <0x00000039, "V_CMPX_NGE_F64">;
+defm V_CMPX_NLG_F64 : VOPCX_64 <0x0000003a, "V_CMPX_NLG_F64">;
+defm V_CMPX_NGT_F64 : VOPCX_64 <0x0000003b, "V_CMPX_NGT_F64">;
+defm V_CMPX_NLE_F64 : VOPCX_64 <0x0000003c, "V_CMPX_NLE_F64">;
+defm V_CMPX_NEQ_F64 : VOPCX_64 <0x0000003d, "V_CMPX_NEQ_F64">;
+defm V_CMPX_NLT_F64 : VOPCX_64 <0x0000003e, "V_CMPX_NLT_F64">;
+defm V_CMPX_TRU_F64 : VOPCX_64 <0x0000003f, "V_CMPX_TRU_F64">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMPS_F_F32 : VOPC_32 <0x00000040, "V_CMPS_F_F32">;
defm V_CMPS_LT_F32 : VOPC_32 <0x00000041, "V_CMPS_LT_F32">;
@@ -237,26 +560,26 @@ defm V_CMPS_NEQ_F32 : VOPC_32 <0x0000004d, "V_CMPS_NEQ_F32">;
defm V_CMPS_NLT_F32 : VOPC_32 <0x0000004e, "V_CMPS_NLT_F32">;
defm V_CMPS_TRU_F32 : VOPC_32 <0x0000004f, "V_CMPS_TRU_F32">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPSX_F_F32 : VOPC_32 <0x00000050, "V_CMPSX_F_F32">;
-defm V_CMPSX_LT_F32 : VOPC_32 <0x00000051, "V_CMPSX_LT_F32">;
-defm V_CMPSX_EQ_F32 : VOPC_32 <0x00000052, "V_CMPSX_EQ_F32">;
-defm V_CMPSX_LE_F32 : VOPC_32 <0x00000053, "V_CMPSX_LE_F32">;
-defm V_CMPSX_GT_F32 : VOPC_32 <0x00000054, "V_CMPSX_GT_F32">;
-defm V_CMPSX_LG_F32 : VOPC_32 <0x00000055, "V_CMPSX_LG_F32">;
-defm V_CMPSX_GE_F32 : VOPC_32 <0x00000056, "V_CMPSX_GE_F32">;
-defm V_CMPSX_O_F32 : VOPC_32 <0x00000057, "V_CMPSX_O_F32">;
-defm V_CMPSX_U_F32 : VOPC_32 <0x00000058, "V_CMPSX_U_F32">;
-defm V_CMPSX_NGE_F32 : VOPC_32 <0x00000059, "V_CMPSX_NGE_F32">;
-defm V_CMPSX_NLG_F32 : VOPC_32 <0x0000005a, "V_CMPSX_NLG_F32">;
-defm V_CMPSX_NGT_F32 : VOPC_32 <0x0000005b, "V_CMPSX_NGT_F32">;
-defm V_CMPSX_NLE_F32 : VOPC_32 <0x0000005c, "V_CMPSX_NLE_F32">;
-defm V_CMPSX_NEQ_F32 : VOPC_32 <0x0000005d, "V_CMPSX_NEQ_F32">;
-defm V_CMPSX_NLT_F32 : VOPC_32 <0x0000005e, "V_CMPSX_NLT_F32">;
-defm V_CMPSX_TRU_F32 : VOPC_32 <0x0000005f, "V_CMPSX_TRU_F32">;
+defm V_CMPSX_F_F32 : VOPCX_32 <0x00000050, "V_CMPSX_F_F32">;
+defm V_CMPSX_LT_F32 : VOPCX_32 <0x00000051, "V_CMPSX_LT_F32">;
+defm V_CMPSX_EQ_F32 : VOPCX_32 <0x00000052, "V_CMPSX_EQ_F32">;
+defm V_CMPSX_LE_F32 : VOPCX_32 <0x00000053, "V_CMPSX_LE_F32">;
+defm V_CMPSX_GT_F32 : VOPCX_32 <0x00000054, "V_CMPSX_GT_F32">;
+defm V_CMPSX_LG_F32 : VOPCX_32 <0x00000055, "V_CMPSX_LG_F32">;
+defm V_CMPSX_GE_F32 : VOPCX_32 <0x00000056, "V_CMPSX_GE_F32">;
+defm V_CMPSX_O_F32 : VOPCX_32 <0x00000057, "V_CMPSX_O_F32">;
+defm V_CMPSX_U_F32 : VOPCX_32 <0x00000058, "V_CMPSX_U_F32">;
+defm V_CMPSX_NGE_F32 : VOPCX_32 <0x00000059, "V_CMPSX_NGE_F32">;
+defm V_CMPSX_NLG_F32 : VOPCX_32 <0x0000005a, "V_CMPSX_NLG_F32">;
+defm V_CMPSX_NGT_F32 : VOPCX_32 <0x0000005b, "V_CMPSX_NGT_F32">;
+defm V_CMPSX_NLE_F32 : VOPCX_32 <0x0000005c, "V_CMPSX_NLE_F32">;
+defm V_CMPSX_NEQ_F32 : VOPCX_32 <0x0000005d, "V_CMPSX_NEQ_F32">;
+defm V_CMPSX_NLT_F32 : VOPCX_32 <0x0000005e, "V_CMPSX_NLT_F32">;
+defm V_CMPSX_TRU_F32 : VOPCX_32 <0x0000005f, "V_CMPSX_TRU_F32">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMPS_F_F64 : VOPC_64 <0x00000060, "V_CMPS_F_F64">;
defm V_CMPS_LT_F64 : VOPC_64 <0x00000061, "V_CMPS_LT_F64">;
@@ -305,18 +628,18 @@ defm V_CMP_NE_I32 : VOPC_32 <0x00000085, "V_CMP_NE_I32", i32, COND_NE>;
defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_SGE>;
defm V_CMP_T_I32 : VOPC_32 <0x00000087, "V_CMP_T_I32">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPX_F_I32 : VOPC_32 <0x00000090, "V_CMPX_F_I32">;
-defm V_CMPX_LT_I32 : VOPC_32 <0x00000091, "V_CMPX_LT_I32">;
-defm V_CMPX_EQ_I32 : VOPC_32 <0x00000092, "V_CMPX_EQ_I32">;
-defm V_CMPX_LE_I32 : VOPC_32 <0x00000093, "V_CMPX_LE_I32">;
-defm V_CMPX_GT_I32 : VOPC_32 <0x00000094, "V_CMPX_GT_I32">;
-defm V_CMPX_NE_I32 : VOPC_32 <0x00000095, "V_CMPX_NE_I32">;
-defm V_CMPX_GE_I32 : VOPC_32 <0x00000096, "V_CMPX_GE_I32">;
-defm V_CMPX_T_I32 : VOPC_32 <0x00000097, "V_CMPX_T_I32">;
+defm V_CMPX_F_I32 : VOPCX_32 <0x00000090, "V_CMPX_F_I32">;
+defm V_CMPX_LT_I32 : VOPCX_32 <0x00000091, "V_CMPX_LT_I32">;
+defm V_CMPX_EQ_I32 : VOPCX_32 <0x00000092, "V_CMPX_EQ_I32">;
+defm V_CMPX_LE_I32 : VOPCX_32 <0x00000093, "V_CMPX_LE_I32">;
+defm V_CMPX_GT_I32 : VOPCX_32 <0x00000094, "V_CMPX_GT_I32">;
+defm V_CMPX_NE_I32 : VOPCX_32 <0x00000095, "V_CMPX_NE_I32">;
+defm V_CMPX_GE_I32 : VOPCX_32 <0x00000096, "V_CMPX_GE_I32">;
+defm V_CMPX_T_I32 : VOPCX_32 <0x00000097, "V_CMPX_T_I32">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64">;
defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64", i64, COND_SLT>;
@@ -327,18 +650,18 @@ defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64", i64, COND_NE>;
defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64", i64, COND_SGE>;
defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPX_F_I64 : VOPC_64 <0x000000b0, "V_CMPX_F_I64">;
-defm V_CMPX_LT_I64 : VOPC_64 <0x000000b1, "V_CMPX_LT_I64">;
-defm V_CMPX_EQ_I64 : VOPC_64 <0x000000b2, "V_CMPX_EQ_I64">;
-defm V_CMPX_LE_I64 : VOPC_64 <0x000000b3, "V_CMPX_LE_I64">;
-defm V_CMPX_GT_I64 : VOPC_64 <0x000000b4, "V_CMPX_GT_I64">;
-defm V_CMPX_NE_I64 : VOPC_64 <0x000000b5, "V_CMPX_NE_I64">;
-defm V_CMPX_GE_I64 : VOPC_64 <0x000000b6, "V_CMPX_GE_I64">;
-defm V_CMPX_T_I64 : VOPC_64 <0x000000b7, "V_CMPX_T_I64">;
+defm V_CMPX_F_I64 : VOPCX_64 <0x000000b0, "V_CMPX_F_I64">;
+defm V_CMPX_LT_I64 : VOPCX_64 <0x000000b1, "V_CMPX_LT_I64">;
+defm V_CMPX_EQ_I64 : VOPCX_64 <0x000000b2, "V_CMPX_EQ_I64">;
+defm V_CMPX_LE_I64 : VOPCX_64 <0x000000b3, "V_CMPX_LE_I64">;
+defm V_CMPX_GT_I64 : VOPCX_64 <0x000000b4, "V_CMPX_GT_I64">;
+defm V_CMPX_NE_I64 : VOPCX_64 <0x000000b5, "V_CMPX_NE_I64">;
+defm V_CMPX_GE_I64 : VOPCX_64 <0x000000b6, "V_CMPX_GE_I64">;
+defm V_CMPX_T_I64 : VOPCX_64 <0x000000b7, "V_CMPX_T_I64">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMP_F_U32 : VOPC_32 <0x000000c0, "V_CMP_F_U32">;
defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32", i32, COND_ULT>;
@@ -349,18 +672,18 @@ defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32", i32, COND_NE>;
defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32", i32, COND_UGE>;
defm V_CMP_T_U32 : VOPC_32 <0x000000c7, "V_CMP_T_U32">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPX_F_U32 : VOPC_32 <0x000000d0, "V_CMPX_F_U32">;
-defm V_CMPX_LT_U32 : VOPC_32 <0x000000d1, "V_CMPX_LT_U32">;
-defm V_CMPX_EQ_U32 : VOPC_32 <0x000000d2, "V_CMPX_EQ_U32">;
-defm V_CMPX_LE_U32 : VOPC_32 <0x000000d3, "V_CMPX_LE_U32">;
-defm V_CMPX_GT_U32 : VOPC_32 <0x000000d4, "V_CMPX_GT_U32">;
-defm V_CMPX_NE_U32 : VOPC_32 <0x000000d5, "V_CMPX_NE_U32">;
-defm V_CMPX_GE_U32 : VOPC_32 <0x000000d6, "V_CMPX_GE_U32">;
-defm V_CMPX_T_U32 : VOPC_32 <0x000000d7, "V_CMPX_T_U32">;
+defm V_CMPX_F_U32 : VOPCX_32 <0x000000d0, "V_CMPX_F_U32">;
+defm V_CMPX_LT_U32 : VOPCX_32 <0x000000d1, "V_CMPX_LT_U32">;
+defm V_CMPX_EQ_U32 : VOPCX_32 <0x000000d2, "V_CMPX_EQ_U32">;
+defm V_CMPX_LE_U32 : VOPCX_32 <0x000000d3, "V_CMPX_LE_U32">;
+defm V_CMPX_GT_U32 : VOPCX_32 <0x000000d4, "V_CMPX_GT_U32">;
+defm V_CMPX_NE_U32 : VOPCX_32 <0x000000d5, "V_CMPX_NE_U32">;
+defm V_CMPX_GE_U32 : VOPCX_32 <0x000000d6, "V_CMPX_GE_U32">;
+defm V_CMPX_T_U32 : VOPCX_32 <0x000000d7, "V_CMPX_T_U32">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64">;
defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64", i64, COND_ULT>;
@@ -371,43 +694,153 @@ defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64", i64, COND_NE>;
defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64", i64, COND_UGE>;
defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64">;
-let hasSideEffects = 1, Defs = [EXEC] in {
+let hasSideEffects = 1 in {
-defm V_CMPX_F_U64 : VOPC_64 <0x000000f0, "V_CMPX_F_U64">;
-defm V_CMPX_LT_U64 : VOPC_64 <0x000000f1, "V_CMPX_LT_U64">;
-defm V_CMPX_EQ_U64 : VOPC_64 <0x000000f2, "V_CMPX_EQ_U64">;
-defm V_CMPX_LE_U64 : VOPC_64 <0x000000f3, "V_CMPX_LE_U64">;
-defm V_CMPX_GT_U64 : VOPC_64 <0x000000f4, "V_CMPX_GT_U64">;
-defm V_CMPX_NE_U64 : VOPC_64 <0x000000f5, "V_CMPX_NE_U64">;
-defm V_CMPX_GE_U64 : VOPC_64 <0x000000f6, "V_CMPX_GE_U64">;
-defm V_CMPX_T_U64 : VOPC_64 <0x000000f7, "V_CMPX_T_U64">;
+defm V_CMPX_F_U64 : VOPCX_64 <0x000000f0, "V_CMPX_F_U64">;
+defm V_CMPX_LT_U64 : VOPCX_64 <0x000000f1, "V_CMPX_LT_U64">;
+defm V_CMPX_EQ_U64 : VOPCX_64 <0x000000f2, "V_CMPX_EQ_U64">;
+defm V_CMPX_LE_U64 : VOPCX_64 <0x000000f3, "V_CMPX_LE_U64">;
+defm V_CMPX_GT_U64 : VOPCX_64 <0x000000f4, "V_CMPX_GT_U64">;
+defm V_CMPX_NE_U64 : VOPCX_64 <0x000000f5, "V_CMPX_NE_U64">;
+defm V_CMPX_GE_U64 : VOPCX_64 <0x000000f6, "V_CMPX_GE_U64">;
+defm V_CMPX_T_U64 : VOPCX_64 <0x000000f7, "V_CMPX_T_U64">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+} // End hasSideEffects = 1
defm V_CMP_CLASS_F32 : VOPC_32 <0x00000088, "V_CMP_CLASS_F32">;
-let hasSideEffects = 1, Defs = [EXEC] in {
-defm V_CMPX_CLASS_F32 : VOPC_32 <0x00000098, "V_CMPX_CLASS_F32">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+let hasSideEffects = 1 in {
+defm V_CMPX_CLASS_F32 : VOPCX_32 <0x00000098, "V_CMPX_CLASS_F32">;
+} // End hasSideEffects = 1
defm V_CMP_CLASS_F64 : VOPC_64 <0x000000a8, "V_CMP_CLASS_F64">;
-let hasSideEffects = 1, Defs = [EXEC] in {
-defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64">;
-} // End hasSideEffects = 1, Defs = [EXEC]
+let hasSideEffects = 1 in {
+defm V_CMPX_CLASS_F64 : VOPCX_64 <0x000000b8, "V_CMPX_CLASS_F64">;
+} // End hasSideEffects = 1
} // End isCompare = 1
-def DS_ADD_U32_RTN : DS_1A1D_RET <0x20, "DS_ADD_U32_RTN", VReg_32>;
-def DS_SUB_U32_RTN : DS_1A1D_RET <0x21, "DS_SUB_U32_RTN", VReg_32>;
+//===----------------------------------------------------------------------===//
+// DS Instructions
+//===----------------------------------------------------------------------===//
+
+
+def DS_ADD_U32 : DS_1A1D_NORET <0x0, "DS_ADD_U32", VReg_32>;
+def DS_SUB_U32 : DS_1A1D_NORET <0x1, "DS_SUB_U32", VReg_32>;
+def DS_RSUB_U32 : DS_1A1D_NORET <0x2, "DS_RSUB_U32", VReg_32>;
+def DS_INC_U32 : DS_1A1D_NORET <0x3, "DS_INC_U32", VReg_32>;
+def DS_DEC_U32 : DS_1A1D_NORET <0x4, "DS_DEC_U32", VReg_32>;
+def DS_MIN_I32 : DS_1A1D_NORET <0x5, "DS_MIN_I32", VReg_32>;
+def DS_MAX_I32 : DS_1A1D_NORET <0x6, "DS_MAX_I32", VReg_32>;
+def DS_MIN_U32 : DS_1A1D_NORET <0x7, "DS_MIN_U32", VReg_32>;
+def DS_MAX_U32 : DS_1A1D_NORET <0x8, "DS_MAX_U32", VReg_32>;
+def DS_AND_B32 : DS_1A1D_NORET <0x9, "DS_AND_B32", VReg_32>;
+def DS_OR_B32 : DS_1A1D_NORET <0xa, "DS_OR_B32", VReg_32>;
+def DS_XOR_B32 : DS_1A1D_NORET <0xb, "DS_XOR_B32", VReg_32>;
+def DS_MSKOR_B32 : DS_1A1D_NORET <0xc, "DS_MSKOR_B32", VReg_32>;
+def DS_CMPST_B32 : DS_1A2D_NORET <0x10, "DS_CMPST_B32", VReg_32>;
+def DS_CMPST_F32 : DS_1A2D_NORET <0x11, "DS_CMPST_F32", VReg_32>;
+def DS_MIN_F32 : DS_1A1D_NORET <0x12, "DS_MIN_F32", VReg_32>;
+def DS_MAX_F32 : DS_1A1D_NORET <0x13, "DS_MAX_F32", VReg_32>;
+
+def DS_ADD_RTN_U32 : DS_1A1D_RET <0x20, "DS_ADD_RTN_U32", VReg_32>;
+def DS_SUB_RTN_U32 : DS_1A1D_RET <0x21, "DS_SUB_RTN_U32", VReg_32>;
+def DS_RSUB_RTN_U32 : DS_1A1D_RET <0x22, "DS_RSUB_RTN_U32", VReg_32>;
+def DS_INC_RTN_U32 : DS_1A1D_RET <0x23, "DS_INC_RTN_U32", VReg_32>;
+def DS_DEC_RTN_U32 : DS_1A1D_RET <0x24, "DS_DEC_RTN_U32", VReg_32>;
+def DS_MIN_RTN_I32 : DS_1A1D_RET <0x25, "DS_MIN_RTN_I32", VReg_32>;
+def DS_MAX_RTN_I32 : DS_1A1D_RET <0x26, "DS_MAX_RTN_I32", VReg_32>;
+def DS_MIN_RTN_U32 : DS_1A1D_RET <0x27, "DS_MIN_RTN_U32", VReg_32>;
+def DS_MAX_RTN_U32 : DS_1A1D_RET <0x28, "DS_MAX_RTN_U32", VReg_32>;
+def DS_AND_RTN_B32 : DS_1A1D_RET <0x29, "DS_AND_RTN_B32", VReg_32>;
+def DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "DS_OR_RTN_B32", VReg_32>;
+def DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "DS_XOR_RTN_B32", VReg_32>;
+def DS_MSKOR_RTN_B32 : DS_1A1D_RET <0x2c, "DS_MSKOR_RTN_B32", VReg_32>;
+def DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "DS_WRXCHG_RTN_B32", VReg_32>;
+//def DS_WRXCHG2_RTN_B32 : DS_2A0D_RET <0x2e, "DS_WRXCHG2_RTN_B32", VReg_32>;
+//def DS_WRXCHG2ST64_RTN_B32 : DS_2A0D_RET <0x2f, "DS_WRXCHG2_RTN_B32", VReg_32>;
+def DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "DS_CMPST_RTN_B32", VReg_32>;
+def DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "DS_CMPST_RTN_F32", VReg_32>;
+def DS_MIN_RTN_F32 : DS_1A1D_RET <0x32, "DS_MIN_RTN_F32", VReg_32>;
+def DS_MAX_RTN_F32 : DS_1A1D_RET <0x33, "DS_MAX_RTN_F32", VReg_32>;
+
+let SubtargetPredicate = isCI in {
+def DS_WRAP_RTN_F32 : DS_1A1D_RET <0x34, "DS_WRAP_RTN_F32", VReg_32>;
+} // End isCI
+
+
+def DS_ADD_U64 : DS_1A1D_NORET <0x40, "DS_ADD_U64", VReg_32>;
+def DS_SUB_U64 : DS_1A1D_NORET <0x41, "DS_SUB_U64", VReg_32>;
+def DS_RSUB_U64 : DS_1A1D_NORET <0x42, "DS_RSUB_U64", VReg_32>;
+def DS_INC_U64 : DS_1A1D_NORET <0x43, "DS_INC_U64", VReg_32>;
+def DS_DEC_U64 : DS_1A1D_NORET <0x44, "DS_DEC_U64", VReg_32>;
+def DS_MIN_I64 : DS_1A1D_NORET <0x45, "DS_MIN_I64", VReg_64>;
+def DS_MAX_I64 : DS_1A1D_NORET <0x46, "DS_MAX_I64", VReg_64>;
+def DS_MIN_U64 : DS_1A1D_NORET <0x47, "DS_MIN_U64", VReg_64>;
+def DS_MAX_U64 : DS_1A1D_NORET <0x48, "DS_MAX_U64", VReg_64>;
+def DS_AND_B64 : DS_1A1D_NORET <0x49, "DS_AND_B64", VReg_64>;
+def DS_OR_B64 : DS_1A1D_NORET <0x4a, "DS_OR_B64", VReg_64>;
+def DS_XOR_B64 : DS_1A1D_NORET <0x4b, "DS_XOR_B64", VReg_64>;
+def DS_MSKOR_B64 : DS_1A1D_NORET <0x4c, "DS_MSKOR_B64", VReg_64>;
+def DS_CMPST_B64 : DS_1A2D_NORET <0x50, "DS_CMPST_B64", VReg_64>;
+def DS_CMPST_F64 : DS_1A2D_NORET <0x51, "DS_CMPST_F64", VReg_64>;
+def DS_MIN_F64 : DS_1A1D_NORET <0x52, "DS_MIN_F64", VReg_64>;
+def DS_MAX_F64 : DS_1A1D_NORET <0x53, "DS_MAX_F64", VReg_64>;
+
+def DS_ADD_RTN_U64 : DS_1A1D_RET <0x60, "DS_ADD_RTN_U64", VReg_64>;
+def DS_SUB_RTN_U64 : DS_1A1D_RET <0x61, "DS_SUB_RTN_U64", VReg_64>;
+def DS_RSUB_RTN_U64 : DS_1A1D_RET <0x62, "DS_RSUB_RTN_U64", VReg_64>;
+def DS_INC_RTN_U64 : DS_1A1D_RET <0x63, "DS_INC_RTN_U64", VReg_64>;
+def DS_DEC_RTN_U64 : DS_1A1D_RET <0x64, "DS_DEC_RTN_U64", VReg_64>;
+def DS_MIN_RTN_I64 : DS_1A1D_RET <0x65, "DS_MIN_RTN_I64", VReg_64>;
+def DS_MAX_RTN_I64 : DS_1A1D_RET <0x66, "DS_MAX_RTN_I64", VReg_64>;
+def DS_MIN_RTN_U64 : DS_1A1D_RET <0x67, "DS_MIN_RTN_U64", VReg_64>;
+def DS_MAX_RTN_U64 : DS_1A1D_RET <0x68, "DS_MAX_RTN_U64", VReg_64>;
+def DS_AND_RTN_B64 : DS_1A1D_RET <0x69, "DS_AND_RTN_B64", VReg_64>;
+def DS_OR_RTN_B64 : DS_1A1D_RET <0x6a, "DS_OR_RTN_B64", VReg_64>;
+def DS_XOR_RTN_B64 : DS_1A1D_RET <0x6b, "DS_XOR_RTN_B64", VReg_64>;
+def DS_MSKOR_RTN_B64 : DS_1A1D_RET <0x6c, "DS_MSKOR_RTN_B64", VReg_64>;
+def DS_WRXCHG_RTN_B64 : DS_1A1D_RET <0x6d, "DS_WRXCHG_RTN_B64", VReg_64>;
+//def DS_WRXCHG2_RTN_B64 : DS_2A0D_RET <0x6e, "DS_WRXCHG2_RTN_B64", VReg_64>;
+//def DS_WRXCHG2ST64_RTN_B64 : DS_2A0D_RET <0x6f, "DS_WRXCHG2_RTN_B64", VReg_64>;
+def DS_CMPST_RTN_B64 : DS_1A2D_RET <0x70, "DS_CMPST_RTN_B64", VReg_64>;
+def DS_CMPST_RTN_F64 : DS_1A2D_RET <0x71, "DS_CMPST_RTN_F64", VReg_64>;
+def DS_MIN_RTN_F64 : DS_1A1D_RET <0x72, "DS_MIN_F64", VReg_64>;
+def DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "DS_MAX_F64", VReg_64>;
+
+//let SubtargetPredicate = isCI in {
+// DS_CONDXCHG32_RTN_B64
+// DS_CONDXCHG32_RTN_B128
+//} // End isCI
+
+// TODO: _SRC2_* forms
+
def DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "DS_WRITE_B32", VReg_32>;
def DS_WRITE_B8 : DS_Store_Helper <0x00000001e, "DS_WRITE_B8", VReg_32>;
def DS_WRITE_B16 : DS_Store_Helper <0x00000001f, "DS_WRITE_B16", VReg_32>;
+def DS_WRITE_B64 : DS_Store_Helper <0x00000004d, "DS_WRITE_B64", VReg_64>;
+
def DS_READ_B32 : DS_Load_Helper <0x00000036, "DS_READ_B32", VReg_32>;
def DS_READ_I8 : DS_Load_Helper <0x00000039, "DS_READ_I8", VReg_32>;
def DS_READ_U8 : DS_Load_Helper <0x0000003a, "DS_READ_U8", VReg_32>;
def DS_READ_I16 : DS_Load_Helper <0x0000003b, "DS_READ_I16", VReg_32>;
def DS_READ_U16 : DS_Load_Helper <0x0000003c, "DS_READ_U16", VReg_32>;
+def DS_READ_B64 : DS_Load_Helper <0x00000076, "DS_READ_B64", VReg_64>;
+
+// 2 forms.
+def DS_WRITE2_B32 : DS_Load2_Helper <0x0000000E, "DS_WRITE2_B32", VReg_64>;
+def DS_WRITE2_B64 : DS_Load2_Helper <0x0000004E, "DS_WRITE2_B64", VReg_128>;
+
+def DS_READ2_B32 : DS_Load2_Helper <0x00000037, "DS_READ2_B32", VReg_64>;
+def DS_READ2_B64 : DS_Load2_Helper <0x00000075, "DS_READ2_B64", VReg_128>;
+
+// TODO: DS_READ2ST64_B32, DS_READ2ST64_B64,
+// DS_WRITE2ST64_B32, DS_WRITE2ST64_B64
+
+//===----------------------------------------------------------------------===//
+// MUBUF Instructions
+//===----------------------------------------------------------------------===//
//def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>;
//def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>;
@@ -417,32 +850,46 @@ defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMA
//def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "BUFFER_STORE_FORMAT_XY", []>;
//def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>;
//def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>;
-defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <0x00000008, "BUFFER_LOAD_UBYTE", VReg_32>;
-defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <0x00000009, "BUFFER_LOAD_SBYTE", VReg_32>;
-defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <0x0000000a, "BUFFER_LOAD_USHORT", VReg_32>;
-defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32>;
-defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>;
-defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>;
-defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>;
+defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <
+ 0x00000008, "BUFFER_LOAD_UBYTE", VReg_32, i32, az_extloadi8_global
+>;
+defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <
+ 0x00000009, "BUFFER_LOAD_SBYTE", VReg_32, i32, sextloadi8_global
+>;
+defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <
+ 0x0000000a, "BUFFER_LOAD_USHORT", VReg_32, i32, az_extloadi16_global
+>;
+defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <
+ 0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32, i32, sextloadi16_global
+>;
+defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <
+ 0x0000000c, "BUFFER_LOAD_DWORD", VReg_32, i32, global_load
+>;
+defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <
+ 0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64, v2i32, global_load
+>;
+defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <
+ 0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128, v4i32, global_load
+>;
-def BUFFER_STORE_BYTE : MUBUF_Store_Helper <
- 0x00000018, "BUFFER_STORE_BYTE", VReg_32
+defm BUFFER_STORE_BYTE : MUBUF_Store_Helper <
+ 0x00000018, "BUFFER_STORE_BYTE", VReg_32, i32, truncstorei8_global
>;
-def BUFFER_STORE_SHORT : MUBUF_Store_Helper <
- 0x0000001a, "BUFFER_STORE_SHORT", VReg_32
+defm BUFFER_STORE_SHORT : MUBUF_Store_Helper <
+ 0x0000001a, "BUFFER_STORE_SHORT", VReg_32, i32, truncstorei16_global
>;
-def BUFFER_STORE_DWORD : MUBUF_Store_Helper <
- 0x0000001c, "BUFFER_STORE_DWORD", VReg_32
+defm BUFFER_STORE_DWORD : MUBUF_Store_Helper <
+ 0x0000001c, "BUFFER_STORE_DWORD", VReg_32, i32, global_store
>;
-def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
- 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64
+defm BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
+ 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64, v2i32, global_store
>;
-def BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
- 0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128
+defm BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
+ 0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128, v4i32, global_store
>;
//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>;
//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>;
@@ -480,6 +927,11 @@ def BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
//def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <0x00000060, "BUFFER_ATOMIC_FMAX_X2", []>;
//def BUFFER_WBINVL1_SC : MUBUF_WBINVL1 <0x00000070, "BUFFER_WBINVL1_SC", []>;
//def BUFFER_WBINVL1 : MUBUF_WBINVL1 <0x00000071, "BUFFER_WBINVL1", []>;
+
+//===----------------------------------------------------------------------===//
+// MTBUF Instructions
+//===----------------------------------------------------------------------===//
+
//def TBUFFER_LOAD_FORMAT_X : MTBUF_ <0x00000000, "TBUFFER_LOAD_FORMAT_X", []>;
//def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "TBUFFER_LOAD_FORMAT_XY", []>;
//def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "TBUFFER_LOAD_FORMAT_XYZ", []>;
@@ -489,41 +941,10 @@ def TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "TBUFFER_STORE_FOR
def TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", VReg_128>;
def TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", VReg_128>;
-let mayLoad = 1 in {
-
-// We are using the SGPR_32 and not the SReg_32 register class for 32-bit
-// SMRD instructions, because the SGPR_32 register class does not include M0
-// and writing to M0 from an SMRD instruction will hang the GPU.
-defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SGPR_32>;
-defm S_LOAD_DWORDX2 : SMRD_Helper <0x01, "S_LOAD_DWORDX2", SReg_64, SReg_64>;
-defm S_LOAD_DWORDX4 : SMRD_Helper <0x02, "S_LOAD_DWORDX4", SReg_64, SReg_128>;
-defm S_LOAD_DWORDX8 : SMRD_Helper <0x03, "S_LOAD_DWORDX8", SReg_64, SReg_256>;
-defm S_LOAD_DWORDX16 : SMRD_Helper <0x04, "S_LOAD_DWORDX16", SReg_64, SReg_512>;
-
-defm S_BUFFER_LOAD_DWORD : SMRD_Helper <
- 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SGPR_32
->;
-
-defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper <
- 0x09, "S_BUFFER_LOAD_DWORDX2", SReg_128, SReg_64
->;
-
-defm S_BUFFER_LOAD_DWORDX4 : SMRD_Helper <
- 0x0a, "S_BUFFER_LOAD_DWORDX4", SReg_128, SReg_128
->;
-
-defm S_BUFFER_LOAD_DWORDX8 : SMRD_Helper <
- 0x0b, "S_BUFFER_LOAD_DWORDX8", SReg_128, SReg_256
->;
-
-defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
- 0x0c, "S_BUFFER_LOAD_DWORDX16", SReg_128, SReg_512
->;
-
-} // mayLoad = 1
+//===----------------------------------------------------------------------===//
+// MIMG Instructions
+//===----------------------------------------------------------------------===//
-//def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>;
-//def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>;
defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "IMAGE_LOAD">;
defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "IMAGE_LOAD_MIP">;
//def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_PCK", 0x00000002>;
@@ -552,81 +973,96 @@ defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "IMAGE_GET_RESINFO">;
//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_FCMPSWAP", 0x0000001d>;
//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMIN", 0x0000001e>;
//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMAX", 0x0000001f>;
-defm IMAGE_SAMPLE : MIMG_Sampler <0x00000020, "IMAGE_SAMPLE">;
-//def IMAGE_SAMPLE_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL", 0x00000021>;
-defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "IMAGE_SAMPLE_D">;
-//def IMAGE_SAMPLE_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL", 0x00000023>;
-defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "IMAGE_SAMPLE_L">;
-defm IMAGE_SAMPLE_B : MIMG_Sampler <0x00000025, "IMAGE_SAMPLE_B">;
-//def IMAGE_SAMPLE_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL", 0x00000026>;
-//def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>;
-defm IMAGE_SAMPLE_C : MIMG_Sampler <0x00000028, "IMAGE_SAMPLE_C">;
-//def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>;
-defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "IMAGE_SAMPLE_C_D">;
-//def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>;
-defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "IMAGE_SAMPLE_C_L">;
-defm IMAGE_SAMPLE_C_B : MIMG_Sampler <0x0000002d, "IMAGE_SAMPLE_C_B">;
-//def IMAGE_SAMPLE_C_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL", 0x0000002e>;
-//def IMAGE_SAMPLE_C_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ", 0x0000002f>;
-//def IMAGE_SAMPLE_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_O", 0x00000030>;
-//def IMAGE_SAMPLE_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL_O", 0x00000031>;
-//def IMAGE_SAMPLE_D_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_O", 0x00000032>;
-//def IMAGE_SAMPLE_D_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL_O", 0x00000033>;
-//def IMAGE_SAMPLE_L_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_L_O", 0x00000034>;
-//def IMAGE_SAMPLE_B_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_O", 0x00000035>;
-//def IMAGE_SAMPLE_B_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL_O", 0x00000036>;
-//def IMAGE_SAMPLE_LZ_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ_O", 0x00000037>;
-//def IMAGE_SAMPLE_C_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_O", 0x00000038>;
-//def IMAGE_SAMPLE_C_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL_O", 0x00000039>;
-//def IMAGE_SAMPLE_C_D_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_O", 0x0000003a>;
-//def IMAGE_SAMPLE_C_D_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL_O", 0x0000003b>;
-//def IMAGE_SAMPLE_C_L_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_L_O", 0x0000003c>;
-//def IMAGE_SAMPLE_C_B_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_O", 0x0000003d>;
-//def IMAGE_SAMPLE_C_B_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL_O", 0x0000003e>;
-//def IMAGE_SAMPLE_C_LZ_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ_O", 0x0000003f>;
-//def IMAGE_GATHER4 : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4", 0x00000040>;
-//def IMAGE_GATHER4_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_CL", 0x00000041>;
-//def IMAGE_GATHER4_L : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_L", 0x00000044>;
-//def IMAGE_GATHER4_B : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B", 0x00000045>;
-//def IMAGE_GATHER4_B_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B_CL", 0x00000046>;
-//def IMAGE_GATHER4_LZ : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_LZ", 0x00000047>;
-//def IMAGE_GATHER4_C : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C", 0x00000048>;
-//def IMAGE_GATHER4_C_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_CL", 0x00000049>;
-//def IMAGE_GATHER4_C_L : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_L", 0x0000004c>;
-//def IMAGE_GATHER4_C_B : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B", 0x0000004d>;
-//def IMAGE_GATHER4_C_B_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B_CL", 0x0000004e>;
-//def IMAGE_GATHER4_C_LZ : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_LZ", 0x0000004f>;
-//def IMAGE_GATHER4_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_O", 0x00000050>;
-//def IMAGE_GATHER4_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_CL_O", 0x00000051>;
-//def IMAGE_GATHER4_L_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_L_O", 0x00000054>;
-//def IMAGE_GATHER4_B_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B_O", 0x00000055>;
-//def IMAGE_GATHER4_B_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B_CL_O", 0x00000056>;
-//def IMAGE_GATHER4_LZ_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_LZ_O", 0x00000057>;
-//def IMAGE_GATHER4_C_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_O", 0x00000058>;
-//def IMAGE_GATHER4_C_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_CL_O", 0x00000059>;
-//def IMAGE_GATHER4_C_L_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_L_O", 0x0000005c>;
-//def IMAGE_GATHER4_C_B_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B_O", 0x0000005d>;
-//def IMAGE_GATHER4_C_B_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B_CL_O", 0x0000005e>;
-//def IMAGE_GATHER4_C_LZ_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_LZ_O", 0x0000005f>;
-//def IMAGE_GET_LOD : MIMG_NoPattern_ <"IMAGE_GET_LOD", 0x00000060>;
-//def IMAGE_SAMPLE_CD : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD", 0x00000068>;
-//def IMAGE_SAMPLE_CD_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_CL", 0x00000069>;
-//def IMAGE_SAMPLE_C_CD : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD", 0x0000006a>;
-//def IMAGE_SAMPLE_C_CD_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_CL", 0x0000006b>;
-//def IMAGE_SAMPLE_CD_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_O", 0x0000006c>;
-//def IMAGE_SAMPLE_CD_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_CL_O", 0x0000006d>;
-//def IMAGE_SAMPLE_C_CD_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_O", 0x0000006e>;
-//def IMAGE_SAMPLE_C_CD_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_CL_O", 0x0000006f>;
+defm IMAGE_SAMPLE : MIMG_Sampler <0x00000020, "IMAGE_SAMPLE">;
+defm IMAGE_SAMPLE_CL : MIMG_Sampler <0x00000021, "IMAGE_SAMPLE_CL">;
+defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "IMAGE_SAMPLE_D">;
+defm IMAGE_SAMPLE_D_CL : MIMG_Sampler <0x00000023, "IMAGE_SAMPLE_D_CL">;
+defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "IMAGE_SAMPLE_L">;
+defm IMAGE_SAMPLE_B : MIMG_Sampler <0x00000025, "IMAGE_SAMPLE_B">;
+defm IMAGE_SAMPLE_B_CL : MIMG_Sampler <0x00000026, "IMAGE_SAMPLE_B_CL">;
+defm IMAGE_SAMPLE_LZ : MIMG_Sampler <0x00000027, "IMAGE_SAMPLE_LZ">;
+defm IMAGE_SAMPLE_C : MIMG_Sampler <0x00000028, "IMAGE_SAMPLE_C">;
+defm IMAGE_SAMPLE_C_CL : MIMG_Sampler <0x00000029, "IMAGE_SAMPLE_C_CL">;
+defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "IMAGE_SAMPLE_C_D">;
+defm IMAGE_SAMPLE_C_D_CL : MIMG_Sampler <0x0000002b, "IMAGE_SAMPLE_C_D_CL">;
+defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "IMAGE_SAMPLE_C_L">;
+defm IMAGE_SAMPLE_C_B : MIMG_Sampler <0x0000002d, "IMAGE_SAMPLE_C_B">;
+defm IMAGE_SAMPLE_C_B_CL : MIMG_Sampler <0x0000002e, "IMAGE_SAMPLE_C_B_CL">;
+defm IMAGE_SAMPLE_C_LZ : MIMG_Sampler <0x0000002f, "IMAGE_SAMPLE_C_LZ">;
+defm IMAGE_SAMPLE_O : MIMG_Sampler <0x00000030, "IMAGE_SAMPLE_O">;
+defm IMAGE_SAMPLE_CL_O : MIMG_Sampler <0x00000031, "IMAGE_SAMPLE_CL_O">;
+defm IMAGE_SAMPLE_D_O : MIMG_Sampler <0x00000032, "IMAGE_SAMPLE_D_O">;
+defm IMAGE_SAMPLE_D_CL_O : MIMG_Sampler <0x00000033, "IMAGE_SAMPLE_D_CL_O">;
+defm IMAGE_SAMPLE_L_O : MIMG_Sampler <0x00000034, "IMAGE_SAMPLE_L_O">;
+defm IMAGE_SAMPLE_B_O : MIMG_Sampler <0x00000035, "IMAGE_SAMPLE_B_O">;
+defm IMAGE_SAMPLE_B_CL_O : MIMG_Sampler <0x00000036, "IMAGE_SAMPLE_B_CL_O">;
+defm IMAGE_SAMPLE_LZ_O : MIMG_Sampler <0x00000037, "IMAGE_SAMPLE_LZ_O">;
+defm IMAGE_SAMPLE_C_O : MIMG_Sampler <0x00000038, "IMAGE_SAMPLE_C_O">;
+defm IMAGE_SAMPLE_C_CL_O : MIMG_Sampler <0x00000039, "IMAGE_SAMPLE_C_CL_O">;
+defm IMAGE_SAMPLE_C_D_O : MIMG_Sampler <0x0000003a, "IMAGE_SAMPLE_C_D_O">;
+defm IMAGE_SAMPLE_C_D_CL_O : MIMG_Sampler <0x0000003b, "IMAGE_SAMPLE_C_D_CL_O">;
+defm IMAGE_SAMPLE_C_L_O : MIMG_Sampler <0x0000003c, "IMAGE_SAMPLE_C_L_O">;
+defm IMAGE_SAMPLE_C_B_O : MIMG_Sampler <0x0000003d, "IMAGE_SAMPLE_C_B_O">;
+defm IMAGE_SAMPLE_C_B_CL_O : MIMG_Sampler <0x0000003e, "IMAGE_SAMPLE_C_B_CL_O">;
+defm IMAGE_SAMPLE_C_LZ_O : MIMG_Sampler <0x0000003f, "IMAGE_SAMPLE_C_LZ_O">;
+defm IMAGE_GATHER4 : MIMG_Gather <0x00000040, "IMAGE_GATHER4">;
+defm IMAGE_GATHER4_CL : MIMG_Gather <0x00000041, "IMAGE_GATHER4_CL">;
+defm IMAGE_GATHER4_L : MIMG_Gather <0x00000044, "IMAGE_GATHER4_L">;
+defm IMAGE_GATHER4_B : MIMG_Gather <0x00000045, "IMAGE_GATHER4_B">;
+defm IMAGE_GATHER4_B_CL : MIMG_Gather <0x00000046, "IMAGE_GATHER4_B_CL">;
+defm IMAGE_GATHER4_LZ : MIMG_Gather <0x00000047, "IMAGE_GATHER4_LZ">;
+defm IMAGE_GATHER4_C : MIMG_Gather <0x00000048, "IMAGE_GATHER4_C">;
+defm IMAGE_GATHER4_C_CL : MIMG_Gather <0x00000049, "IMAGE_GATHER4_C_CL">;
+defm IMAGE_GATHER4_C_L : MIMG_Gather <0x0000004c, "IMAGE_GATHER4_C_L">;
+defm IMAGE_GATHER4_C_B : MIMG_Gather <0x0000004d, "IMAGE_GATHER4_C_B">;
+defm IMAGE_GATHER4_C_B_CL : MIMG_Gather <0x0000004e, "IMAGE_GATHER4_C_B_CL">;
+defm IMAGE_GATHER4_C_LZ : MIMG_Gather <0x0000004f, "IMAGE_GATHER4_C_LZ">;
+defm IMAGE_GATHER4_O : MIMG_Gather <0x00000050, "IMAGE_GATHER4_O">;
+defm IMAGE_GATHER4_CL_O : MIMG_Gather <0x00000051, "IMAGE_GATHER4_CL_O">;
+defm IMAGE_GATHER4_L_O : MIMG_Gather <0x00000054, "IMAGE_GATHER4_L_O">;
+defm IMAGE_GATHER4_B_O : MIMG_Gather <0x00000055, "IMAGE_GATHER4_B_O">;
+defm IMAGE_GATHER4_B_CL_O : MIMG_Gather <0x00000056, "IMAGE_GATHER4_B_CL_O">;
+defm IMAGE_GATHER4_LZ_O : MIMG_Gather <0x00000057, "IMAGE_GATHER4_LZ_O">;
+defm IMAGE_GATHER4_C_O : MIMG_Gather <0x00000058, "IMAGE_GATHER4_C_O">;
+defm IMAGE_GATHER4_C_CL_O : MIMG_Gather <0x00000059, "IMAGE_GATHER4_C_CL_O">;
+defm IMAGE_GATHER4_C_L_O : MIMG_Gather <0x0000005c, "IMAGE_GATHER4_C_L_O">;
+defm IMAGE_GATHER4_C_B_O : MIMG_Gather <0x0000005d, "IMAGE_GATHER4_C_B_O">;
+defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather <0x0000005e, "IMAGE_GATHER4_C_B_CL_O">;
+defm IMAGE_GATHER4_C_LZ_O : MIMG_Gather <0x0000005f, "IMAGE_GATHER4_C_LZ_O">;
+defm IMAGE_GET_LOD : MIMG_Sampler <0x00000060, "IMAGE_GET_LOD">;
+defm IMAGE_SAMPLE_CD : MIMG_Sampler <0x00000068, "IMAGE_SAMPLE_CD">;
+defm IMAGE_SAMPLE_CD_CL : MIMG_Sampler <0x00000069, "IMAGE_SAMPLE_CD_CL">;
+defm IMAGE_SAMPLE_C_CD : MIMG_Sampler <0x0000006a, "IMAGE_SAMPLE_C_CD">;
+defm IMAGE_SAMPLE_C_CD_CL : MIMG_Sampler <0x0000006b, "IMAGE_SAMPLE_C_CD_CL">;
+defm IMAGE_SAMPLE_CD_O : MIMG_Sampler <0x0000006c, "IMAGE_SAMPLE_CD_O">;
+defm IMAGE_SAMPLE_CD_CL_O : MIMG_Sampler <0x0000006d, "IMAGE_SAMPLE_CD_CL_O">;
+defm IMAGE_SAMPLE_C_CD_O : MIMG_Sampler <0x0000006e, "IMAGE_SAMPLE_C_CD_O">;
+defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, "IMAGE_SAMPLE_C_CD_CL_O">;
//def IMAGE_RSRC256 : MIMG_NoPattern_RSRC256 <"IMAGE_RSRC256", 0x0000007e>;
//def IMAGE_SAMPLER : MIMG_NoPattern_ <"IMAGE_SAMPLER", 0x0000007f>;
-//def V_NOP : VOP1_ <0x00000000, "V_NOP", []>;
+//===----------------------------------------------------------------------===//
+// VOP1 Instructions
+//===----------------------------------------------------------------------===//
+
+//def V_NOP : VOP1_ <0x00000000, "V_NOP", []>;
let neverHasSideEffects = 1, isMoveImm = 1 in {
defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>;
} // End neverHasSideEffects = 1, isMoveImm = 1
-defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>;
+let Uses = [EXEC] in {
+
+def V_READFIRSTLANE_B32 : VOP1 <
+ 0x00000002,
+ (outs SReg_32:$vdst),
+ (ins VReg_32:$src0),
+ "V_READFIRSTLANE_B32 $vdst, $src0",
+ []
+>;
+
+}
+
defm V_CVT_I32_F64 : VOP1_32_64 <0x00000003, "V_CVT_I32_F64",
[(set i32:$dst, (fp_to_sint f64:$src0))]
>;
@@ -646,8 +1082,12 @@ defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32",
[(set i32:$dst, (fp_to_sint f32:$src0))]
>;
defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>;
-////def V_CVT_F16_F32 : VOP1_F16 <0x0000000a, "V_CVT_F16_F32", []>;
-//defm V_CVT_F32_F16 : VOP1_32 <0x0000000b, "V_CVT_F32_F16", []>;
+defm V_CVT_F16_F32 : VOP1_32 <0x0000000a, "V_CVT_F16_F32",
+ [(set i32:$dst, (fp_to_f16 f32:$src0))]
+>;
+defm V_CVT_F32_F16 : VOP1_32 <0x0000000b, "V_CVT_F32_F16",
+ [(set f32:$dst, (f16_to_fp i32:$src0))]
+>;
//defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "V_CVT_RPI_I32_F32", []>;
//defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "V_CVT_FLR_I32_F32", []>;
//defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "V_CVT_OFF_F32_I4", []>;
@@ -657,17 +1097,30 @@ defm V_CVT_F32_F64 : VOP1_32_64 <0x0000000f, "V_CVT_F32_F64",
defm V_CVT_F64_F32 : VOP1_64_32 <0x00000010, "V_CVT_F64_F32",
[(set f64:$dst, (fextend f32:$src0))]
>;
-//defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0", []>;
-//defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1", []>;
-//defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2", []>;
-//defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3", []>;
-//defm V_CVT_U32_F64 : VOP1_32 <0x00000015, "V_CVT_U32_F64", []>;
-//defm V_CVT_F64_U32 : VOP1_64 <0x00000016, "V_CVT_F64_U32", []>;
+defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0",
+ [(set f32:$dst, (AMDGPUcvt_f32_ubyte0 i32:$src0))]
+>;
+defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1",
+ [(set f32:$dst, (AMDGPUcvt_f32_ubyte1 i32:$src0))]
+>;
+defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2",
+ [(set f32:$dst, (AMDGPUcvt_f32_ubyte2 i32:$src0))]
+>;
+defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3",
+ [(set f32:$dst, (AMDGPUcvt_f32_ubyte3 i32:$src0))]
+>;
+defm V_CVT_U32_F64 : VOP1_32_64 <0x00000015, "V_CVT_U32_F64",
+ [(set i32:$dst, (fp_to_uint f64:$src0))]
+>;
+defm V_CVT_F64_U32 : VOP1_64_32 <0x00000016, "V_CVT_F64_U32",
+ [(set f64:$dst, (uint_to_fp i32:$src0))]
+>;
+
defm V_FRACT_F32 : VOP1_32 <0x00000020, "V_FRACT_F32",
[(set f32:$dst, (AMDGPUfract f32:$src0))]
>;
defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32",
- [(set f32:$dst, (int_AMDGPU_trunc f32:$src0))]
+ [(set f32:$dst, (ftrunc f32:$src0))]
>;
defm V_CEIL_F32 : VOP1_32 <0x00000022, "V_CEIL_F32",
[(set f32:$dst, (fceil f32:$src0))]
@@ -685,32 +1138,45 @@ defm V_LOG_CLAMP_F32 : VOP1_32 <0x00000026, "V_LOG_CLAMP_F32", []>;
defm V_LOG_F32 : VOP1_32 <0x00000027, "V_LOG_F32",
[(set f32:$dst, (flog2 f32:$src0))]
>;
+
defm V_RCP_CLAMP_F32 : VOP1_32 <0x00000028, "V_RCP_CLAMP_F32", []>;
defm V_RCP_LEGACY_F32 : VOP1_32 <0x00000029, "V_RCP_LEGACY_F32", []>;
defm V_RCP_F32 : VOP1_32 <0x0000002a, "V_RCP_F32",
- [(set f32:$dst, (fdiv FP_ONE, f32:$src0))]
+ [(set f32:$dst, (AMDGPUrcp f32:$src0))]
>;
defm V_RCP_IFLAG_F32 : VOP1_32 <0x0000002b, "V_RCP_IFLAG_F32", []>;
-defm V_RSQ_CLAMP_F32 : VOP1_32 <0x0000002c, "V_RSQ_CLAMP_F32", []>;
+defm V_RSQ_CLAMP_F32 : VOP1_32 <0x0000002c, "V_RSQ_CLAMP_F32",
+ [(set f32:$dst, (AMDGPUrsq_clamped f32:$src0))]
+>;
defm V_RSQ_LEGACY_F32 : VOP1_32 <
0x0000002d, "V_RSQ_LEGACY_F32",
- [(set f32:$dst, (int_AMDGPU_rsq f32:$src0))]
+ [(set f32:$dst, (AMDGPUrsq_legacy f32:$src0))]
+>;
+defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32",
+ [(set f32:$dst, (AMDGPUrsq f32:$src0))]
>;
-defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", []>;
defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64",
- [(set f64:$dst, (fdiv FP_ONE, f64:$src0))]
+ [(set f64:$dst, (AMDGPUrcp f64:$src0))]
>;
defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>;
-defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64", []>;
-defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64", []>;
+defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64",
+ [(set f64:$dst, (AMDGPUrsq f64:$src0))]
+>;
+defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64",
+ [(set f64:$dst, (AMDGPUrsq_clamped f64:$src0))]
+>;
defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32",
[(set f32:$dst, (fsqrt f32:$src0))]
>;
defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64",
[(set f64:$dst, (fsqrt f64:$src0))]
>;
-defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32", []>;
-defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32", []>;
+defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32",
+ [(set f32:$dst, (AMDGPUsin f32:$src0))]
+>;
+defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32",
+ [(set f32:$dst, (AMDGPUcos f32:$src0))]
+>;
defm V_NOT_B32 : VOP1_32 <0x00000037, "V_NOT_B32", []>;
defm V_BFREV_B32 : VOP1_32 <0x00000038, "V_BFREV_B32", []>;
defm V_FFBH_U32 : VOP1_32 <0x00000039, "V_FFBH_U32", []>;
@@ -726,6 +1192,11 @@ defm V_MOVRELD_B32 : VOP1_32 <0x00000042, "V_MOVRELD_B32", []>;
defm V_MOVRELS_B32 : VOP1_32 <0x00000043, "V_MOVRELS_B32", []>;
defm V_MOVRELSD_B32 : VOP1_32 <0x00000044, "V_MOVRELSD_B32", []>;
+
+//===----------------------------------------------------------------------===//
+// VINTRP Instructions
+//===----------------------------------------------------------------------===//
+
def V_INTERP_P1_F32 : VINTRP <
0x00000000,
(outs VReg_32:$dst),
@@ -756,97 +1227,9 @@ def V_INTERP_MOV_F32 : VINTRP <
let DisableEncoding = "$m0";
}
-//def S_NOP : SOPP_ <0x00000000, "S_NOP", []>;
-
-let isTerminator = 1 in {
-
-def S_ENDPGM : SOPP <0x00000001, (ins), "S_ENDPGM",
- [(IL_retflag)]> {
- let SIMM16 = 0;
- let isBarrier = 1;
- let hasCtrlDep = 1;
-}
-
-let isBranch = 1 in {
-def S_BRANCH : SOPP <
- 0x00000002, (ins brtarget:$target), "S_BRANCH $target",
- [(br bb:$target)]> {
- let isBarrier = 1;
-}
-
-let DisableEncoding = "$scc" in {
-def S_CBRANCH_SCC0 : SOPP <
- 0x00000004, (ins brtarget:$target, SCCReg:$scc),
- "S_CBRANCH_SCC0 $target", []
->;
-def S_CBRANCH_SCC1 : SOPP <
- 0x00000005, (ins brtarget:$target, SCCReg:$scc),
- "S_CBRANCH_SCC1 $target",
- []
->;
-} // End DisableEncoding = "$scc"
-
-def S_CBRANCH_VCCZ : SOPP <
- 0x00000006, (ins brtarget:$target, VCCReg:$vcc),
- "S_CBRANCH_VCCZ $target",
- []
->;
-def S_CBRANCH_VCCNZ : SOPP <
- 0x00000007, (ins brtarget:$target, VCCReg:$vcc),
- "S_CBRANCH_VCCNZ $target",
- []
->;
-
-let DisableEncoding = "$exec" in {
-def S_CBRANCH_EXECZ : SOPP <
- 0x00000008, (ins brtarget:$target, EXECReg:$exec),
- "S_CBRANCH_EXECZ $target",
- []
->;
-def S_CBRANCH_EXECNZ : SOPP <
- 0x00000009, (ins brtarget:$target, EXECReg:$exec),
- "S_CBRANCH_EXECNZ $target",
- []
->;
-} // End DisableEncoding = "$exec"
-
-
-} // End isBranch = 1
-} // End isTerminator = 1
-
-let hasSideEffects = 1 in {
-def S_BARRIER : SOPP <0x0000000a, (ins), "S_BARRIER",
- [(int_AMDGPU_barrier_local)]
-> {
- let SIMM16 = 0;
- let isBarrier = 1;
- let hasCtrlDep = 1;
- let mayLoad = 1;
- let mayStore = 1;
-}
-
-def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "S_WAITCNT $simm16",
- []
->;
-//def S_SETHALT : SOPP_ <0x0000000d, "S_SETHALT", []>;
-//def S_SLEEP : SOPP_ <0x0000000e, "S_SLEEP", []>;
-//def S_SETPRIO : SOPP_ <0x0000000f, "S_SETPRIO", []>;
-
-let Uses = [EXEC] in {
- def S_SENDMSG : SOPP <0x00000010, (ins SendMsgImm:$simm16, M0Reg:$m0), "S_SENDMSG $simm16",
- [(int_SI_sendmsg imm:$simm16, M0Reg:$m0)]
- > {
- let DisableEncoding = "$m0";
- }
-} // End Uses = [EXEC]
-
-//def S_SENDMSGHALT : SOPP_ <0x00000011, "S_SENDMSGHALT", []>;
-//def S_TRAP : SOPP_ <0x00000012, "S_TRAP", []>;
-//def S_ICACHE_INV : SOPP_ <0x00000013, "S_ICACHE_INV", []>;
-//def S_INCPERFLEVEL : SOPP_ <0x00000014, "S_INCPERFLEVEL", []>;
-//def S_DECPERFLEVEL : SOPP_ <0x00000015, "S_DECPERFLEVEL", []>;
-//def S_TTRACEDATA : SOPP_ <0x00000016, "S_TTRACEDATA", []>;
-} // End hasSideEffects
+//===----------------------------------------------------------------------===//
+// VOP2 Instructions
+//===----------------------------------------------------------------------===//
def V_CNDMASK_B32_e32 : VOP2 <0x00000000, (outs VReg_32:$dst),
(ins VSrc_32:$src0, VReg_32:$src1, VCCReg:$vcc),
@@ -861,34 +1244,28 @@ def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst),
InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg),
"V_CNDMASK_B32_e64 $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg",
[(set i32:$dst, (select i1:$src2, i32:$src1, i32:$src0))]
->;
-
-//f32 pattern for V_CNDMASK_B32_e64
-def : Pat <
- (f32 (select i1:$src2, f32:$src1, f32:$src0)),
- (V_CNDMASK_B32_e64 $src0, $src1, $src2)
->;
+> {
+ let src0_modifiers = 0;
+ let src1_modifiers = 0;
+ let src2_modifiers = 0;
+}
-def : Pat <
- (i32 (trunc i64:$val)),
- (EXTRACT_SUBREG $val, sub0)
+def V_READLANE_B32 : VOP2 <
+ 0x00000001,
+ (outs SReg_32:$vdst),
+ (ins VReg_32:$src0, SSrc_32:$vsrc1),
+ "V_READLANE_B32 $vdst, $src0, $vsrc1",
+ []
>;
-//use two V_CNDMASK_B32_e64 instructions for f64
-def : Pat <
- (f64 (select i1:$src2, f64:$src1, f64:$src0)),
- (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
- (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub0),
- (EXTRACT_SUBREG $src1, sub0),
- $src2), sub0),
- (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub1),
- (EXTRACT_SUBREG $src1, sub1),
- $src2), sub1)
+def V_WRITELANE_B32 : VOP2 <
+ 0x00000002,
+ (outs VReg_32:$vdst),
+ (ins SReg_32:$src0, SSrc_32:$vsrc1),
+ "V_WRITELANE_B32 $vdst, $src0, $vsrc1",
+ []
>;
-defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
-defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>;
-
let isCommutable = 1 in {
defm V_ADD_F32 : VOP2_32 <0x00000003, "V_ADD_F32",
[(set f32:$dst, (fadd f32:$src0, f32:$src1))]
@@ -915,11 +1292,11 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24",
- [(set i32:$dst, (mul I24:$src0, I24:$src1))]
+ [(set i32:$dst, (AMDGPUmul_i24 i32:$src0, i32:$src1))]
>;
//defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24",
- [(set i32:$dst, (mul U24:$src0, U24:$src1))]
+ [(set i32:$dst, (AMDGPUmul_u24 i32:$src0, i32:$src1))]
>;
//defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
@@ -935,21 +1312,18 @@ defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32",
defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>;
defm V_MAX_F32 : VOP2_32 <0x00000010, "V_MAX_F32", []>;
defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32",
- [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))]
->;
+ [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))]>;
defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32",
- [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))]
->;
+ [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))]>;
defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32",
- [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))]
->;
+ [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))]>;
defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32",
- [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))]
->;
+ [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))]>;
defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32",
[(set i32:$dst, (srl i32:$src0, i32:$src1))]
>;
+
defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", [], "V_LSHR_B32">;
defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32",
@@ -967,8 +1341,7 @@ defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32",
defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", [], "V_LSHL_B32">;
defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32",
- [(set i32:$dst, (and i32:$src0, i32:$src1))]
->;
+ [(set i32:$dst, (and i32:$src0, i32:$src1))]>;
defm V_OR_B32 : VOP2_32 <0x0000001c, "V_OR_B32",
[(set i32:$dst, (or i32:$src0, i32:$src1))]
>;
@@ -978,25 +1351,30 @@ defm V_XOR_B32 : VOP2_32 <0x0000001d, "V_XOR_B32",
} // End isCommutable = 1
-defm V_BFM_B32 : VOP2_32 <0x0000001e, "V_BFM_B32", []>;
+defm V_BFM_B32 : VOP2_32 <0x0000001e, "V_BFM_B32",
+ [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))]>;
defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>;
defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>;
defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
-//defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>;
+defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>;
defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
// No patterns so that the scalar instructions are always selected.
// The scalar versions will be replaced with vector when needed later.
-defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32", [], VSrc_32>;
-defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32", [], VSrc_32>;
+defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32",
+ [(set i32:$dst, (add i32:$src0, i32:$src1))], VSrc_32>;
+defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32",
+ [(set i32:$dst, (sub i32:$src0, i32:$src1))], VSrc_32>;
defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", [], VSrc_32,
"V_SUB_I32">;
let Uses = [VCC] in { // Carry-in comes from VCC
-defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", [], VReg_32>;
-defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32", [], VReg_32>;
+defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32",
+ [(set i32:$dst, (adde i32:$src0, i32:$src1))], VReg_32>;
+defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32",
+ [(set i32:$dst, (sube i32:$src0, i32:$src1))], VReg_32>;
defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", [], VReg_32,
"V_SUBB_U32">;
} // End Uses = [VCC]
@@ -1011,56 +1389,51 @@ defm V_CVT_PKRTZ_F16_F32 : VOP2_32 <0x0000002f, "V_CVT_PKRTZ_F16_F32",
>;
////def V_CVT_PK_U16_U32 : VOP2_U16 <0x00000030, "V_CVT_PK_U16_U32", []>;
////def V_CVT_PK_I16_I32 : VOP2_I16 <0x00000031, "V_CVT_PK_I16_I32", []>;
-def S_CMP_EQ_I32 : SOPC_32 <0x00000000, "S_CMP_EQ_I32", []>;
-def S_CMP_LG_I32 : SOPC_32 <0x00000001, "S_CMP_LG_I32", []>;
-def S_CMP_GT_I32 : SOPC_32 <0x00000002, "S_CMP_GT_I32", []>;
-def S_CMP_GE_I32 : SOPC_32 <0x00000003, "S_CMP_GE_I32", []>;
-def S_CMP_LT_I32 : SOPC_32 <0x00000004, "S_CMP_LT_I32", []>;
-def S_CMP_LE_I32 : SOPC_32 <0x00000005, "S_CMP_LE_I32", []>;
-def S_CMP_EQ_U32 : SOPC_32 <0x00000006, "S_CMP_EQ_U32", []>;
-def S_CMP_LG_U32 : SOPC_32 <0x00000007, "S_CMP_LG_U32", []>;
-def S_CMP_GT_U32 : SOPC_32 <0x00000008, "S_CMP_GT_U32", []>;
-def S_CMP_GE_U32 : SOPC_32 <0x00000009, "S_CMP_GE_U32", []>;
-def S_CMP_LT_U32 : SOPC_32 <0x0000000a, "S_CMP_LT_U32", []>;
-def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "S_CMP_LE_U32", []>;
-////def S_BITCMP0_B32 : SOPC_BITCMP0 <0x0000000c, "S_BITCMP0_B32", []>;
-////def S_BITCMP1_B32 : SOPC_BITCMP1 <0x0000000d, "S_BITCMP1_B32", []>;
-////def S_BITCMP0_B64 : SOPC_BITCMP0 <0x0000000e, "S_BITCMP0_B64", []>;
-////def S_BITCMP1_B64 : SOPC_BITCMP1 <0x0000000f, "S_BITCMP1_B64", []>;
-//def S_SETVSKIP : SOPC_ <0x00000010, "S_SETVSKIP", []>;
+
+//===----------------------------------------------------------------------===//
+// VOP3 Instructions
+//===----------------------------------------------------------------------===//
let neverHasSideEffects = 1 in {
-def V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
-def V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", []>;
-def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24",
- [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))]
+defm V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
+defm V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32",
+ [(set f32:$dst, (fadd (fmul f32:$src0, f32:$src1), f32:$src2))]
>;
-def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24",
- [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))]
+defm V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24",
+ [(set i32:$dst, (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2))]
+>;
+defm V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24",
+ [(set i32:$dst, (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2))]
>;
} // End neverHasSideEffects
-def V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>;
-def V_CUBESC_F32 : VOP3_32 <0x00000145, "V_CUBESC_F32", []>;
-def V_CUBETC_F32 : VOP3_32 <0x00000146, "V_CUBETC_F32", []>;
-def V_CUBEMA_F32 : VOP3_32 <0x00000147, "V_CUBEMA_F32", []>;
-def V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32", []>;
-def V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32", []>;
-def V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32", []>;
-defm : BFIPatterns <V_BFI_B32>;
-def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32",
+
+defm V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>;
+defm V_CUBESC_F32 : VOP3_32 <0x00000145, "V_CUBESC_F32", []>;
+defm V_CUBETC_F32 : VOP3_32 <0x00000146, "V_CUBETC_F32", []>;
+defm V_CUBEMA_F32 : VOP3_32 <0x00000147, "V_CUBEMA_F32", []>;
+
+let neverHasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
+defm V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32",
+ [(set i32:$dst, (AMDGPUbfe_u32 i32:$src0, i32:$src1, i32:$src2))]>;
+defm V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32",
+ [(set i32:$dst, (AMDGPUbfe_i32 i32:$src0, i32:$src1, i32:$src2))]>;
+}
+
+defm V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32",
+ [(set i32:$dst, (AMDGPUbfi i32:$src0, i32:$src1, i32:$src2))]>;
+defm V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32",
[(set f32:$dst, (fma f32:$src0, f32:$src1, f32:$src2))]
>;
def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64",
[(set f64:$dst, (fma f64:$src0, f64:$src1, f64:$src2))]
>;
//def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>;
-def V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>;
-def : ROTRPattern <V_ALIGNBIT_B32>;
+defm V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>;
-def V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>;
-def V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
+defm V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>;
+defm V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>;
////def V_MIN3_I32 : VOP3_MIN3 <0x00000152, "V_MIN3_I32", []>;
////def V_MIN3_U32 : VOP3_MIN3 <0x00000153, "V_MIN3_U32", []>;
@@ -1073,18 +1446,22 @@ def V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
//def V_SAD_U8 : VOP3_U8 <0x0000015a, "V_SAD_U8", []>;
//def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "V_SAD_HI_U8", []>;
//def V_SAD_U16 : VOP3_U16 <0x0000015c, "V_SAD_U16", []>;
-def V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>;
+defm V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>;
////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>;
-def V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32", []>;
-def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64", []>;
+defm V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32",
+ [(set f32:$dst, (AMDGPUdiv_fixup f32:$src0, f32:$src1, f32:$src2))]
+>;
+def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64",
+ [(set f64:$dst, (AMDGPUdiv_fixup f64:$src0, f64:$src1, f64:$src2))]
+>;
-def V_LSHL_B64 : VOP3_64_Shift <0x00000161, "V_LSHL_B64",
+def V_LSHL_B64 : VOP3_64_32 <0x00000161, "V_LSHL_B64",
[(set i64:$dst, (shl i64:$src0, i32:$src1))]
>;
-def V_LSHR_B64 : VOP3_64_Shift <0x00000162, "V_LSHR_B64",
+def V_LSHR_B64 : VOP3_64_32 <0x00000162, "V_LSHR_B64",
[(set i64:$dst, (srl i64:$src0, i32:$src1))]
>;
-def V_ASHR_I64 : VOP3_64_Shift <0x00000163, "V_ASHR_I64",
+def V_ASHR_I64 : VOP3_64_32 <0x00000163, "V_ASHR_I64",
[(set i64:$dst, (sra i64:$src0, i32:$src1))]
>;
@@ -1097,162 +1474,61 @@ def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>;
} // isCommutable = 1
-def : Pat <
- (fadd f64:$src0, f64:$src1),
- (V_ADD_F64 $src0, $src1, (i64 0))
->;
-
-def : Pat <
- (fmul f64:$src0, f64:$src1),
- (V_MUL_F64 $src0, $src1, (i64 0))
->;
-
def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>;
let isCommutable = 1 in {
-def V_MUL_LO_U32 : VOP3_32 <0x00000169, "V_MUL_LO_U32", []>;
-def V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>;
-def V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>;
-def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
+defm V_MUL_LO_U32 : VOP3_32 <0x00000169, "V_MUL_LO_U32", []>;
+defm V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>;
+defm V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>;
+defm V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
} // isCommutable = 1
-def : Pat <
- (mul i32:$src0, i32:$src1),
- (V_MUL_LO_I32 $src0, $src1, (i32 0))
->;
+def V_DIV_SCALE_F32 : VOP3b_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
-def : Pat <
- (mulhu i32:$src0, i32:$src1),
- (V_MUL_HI_U32 $src0, $src1, (i32 0))
->;
+// Double precision division pre-scale.
+def V_DIV_SCALE_F64 : VOP3b_64 <0x0000016e, "V_DIV_SCALE_F64", []>;
-def : Pat <
- (mulhs i32:$src0, i32:$src1),
- (V_MUL_HI_I32 $src0, $src1, (i32 0))
+defm V_DIV_FMAS_F32 : VOP3_32 <0x0000016f, "V_DIV_FMAS_F32",
+ [(set f32:$dst, (AMDGPUdiv_fmas f32:$src0, f32:$src1, f32:$src2))]
+>;
+def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64",
+ [(set f64:$dst, (AMDGPUdiv_fmas f64:$src0, f64:$src1, f64:$src2))]
>;
-
-def V_DIV_SCALE_F32 : VOP3_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
-def V_DIV_SCALE_F64 : VOP3_64 <0x0000016e, "V_DIV_SCALE_F64", []>;
-def V_DIV_FMAS_F32 : VOP3_32 <0x0000016f, "V_DIV_FMAS_F32", []>;
-def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64", []>;
//def V_MSAD_U8 : VOP3_U8 <0x00000171, "V_MSAD_U8", []>;
//def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>;
//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>;
-def V_TRIG_PREOP_F64 : VOP3_64 <0x00000174, "V_TRIG_PREOP_F64", []>;
-
-let Defs = [SCC] in { // Carry out goes to SCC
-let isCommutable = 1 in {
-def S_ADD_U32 : SOP2_32 <0x00000000, "S_ADD_U32", []>;
-def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32",
- [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
+def V_TRIG_PREOP_F64 : VOP3_64_32 <0x00000174, "V_TRIG_PREOP_F64",
+ [(set f64:$dst, (AMDGPUtrig_preop f64:$src0, i32:$src1))]
>;
-} // End isCommutable = 1
-def S_SUB_U32 : SOP2_32 <0x00000001, "S_SUB_U32", []>;
-def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32",
- [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
->;
-
-let Uses = [SCC] in { // Carry in comes from SCC
-let isCommutable = 1 in {
-def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32",
- [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
-} // End isCommutable = 1
-
-def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32",
- [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
-} // End Uses = [SCC]
-} // End Defs = [SCC]
+//===----------------------------------------------------------------------===//
+// Pseudo Instructions
+//===----------------------------------------------------------------------===//
-def S_MIN_I32 : SOP2_32 <0x00000006, "S_MIN_I32", []>;
-def S_MIN_U32 : SOP2_32 <0x00000007, "S_MIN_U32", []>;
-def S_MAX_I32 : SOP2_32 <0x00000008, "S_MAX_I32", []>;
-def S_MAX_U32 : SOP2_32 <0x00000009, "S_MAX_U32", []>;
+let isCodeGenOnly = 1, isPseudo = 1 in {
-def S_CSELECT_B32 : SOP2 <
- 0x0000000a, (outs SReg_32:$dst),
- (ins SReg_32:$src0, SReg_32:$src1, SCCReg:$scc), "S_CSELECT_B32",
- []
+def V_MOV_I1 : InstSI <
+ (outs VReg_1:$dst),
+ (ins i1imm:$src),
+ "", [(set i1:$dst, (imm:$src))]
>;
-def S_CSELECT_B64 : SOP2_64 <0x0000000b, "S_CSELECT_B64", []>;
-
-def S_AND_B32 : SOP2_32 <0x0000000e, "S_AND_B32", []>;
-
-def S_AND_B64 : SOP2_64 <0x0000000f, "S_AND_B64",
- [(set i64:$dst, (and i64:$src0, i64:$src1))]
+def V_AND_I1 : InstSI <
+ (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "",
+ [(set i1:$dst, (and i1:$src0, i1:$src1))]
>;
-def : Pat <
- (i1 (and i1:$src0, i1:$src1)),
- (S_AND_B64 $src0, $src1)
+def V_OR_I1 : InstSI <
+ (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "",
+ [(set i1:$dst, (or i1:$src0, i1:$src1))]
>;
-def S_OR_B32 : SOP2_32 <0x00000010, "S_OR_B32", []>;
-def S_OR_B64 : SOP2_64 <0x00000011, "S_OR_B64", []>;
-def : Pat <
- (i1 (or i1:$src0, i1:$src1)),
- (S_OR_B64 $src0, $src1)
->;
-def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", []>;
-def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64",
+def V_XOR_I1 : InstSI <
+ (outs VReg_1:$dst), (ins VReg_1:$src0, VReg_1:$src1), "",
[(set i1:$dst, (xor i1:$src0, i1:$src1))]
>;
-def S_ANDN2_B32 : SOP2_32 <0x00000014, "S_ANDN2_B32", []>;
-def S_ANDN2_B64 : SOP2_64 <0x00000015, "S_ANDN2_B64", []>;
-def S_ORN2_B32 : SOP2_32 <0x00000016, "S_ORN2_B32", []>;
-def S_ORN2_B64 : SOP2_64 <0x00000017, "S_ORN2_B64", []>;
-def S_NAND_B32 : SOP2_32 <0x00000018, "S_NAND_B32", []>;
-def S_NAND_B64 : SOP2_64 <0x00000019, "S_NAND_B64", []>;
-def S_NOR_B32 : SOP2_32 <0x0000001a, "S_NOR_B32", []>;
-def S_NOR_B64 : SOP2_64 <0x0000001b, "S_NOR_B64", []>;
-def S_XNOR_B32 : SOP2_32 <0x0000001c, "S_XNOR_B32", []>;
-def S_XNOR_B64 : SOP2_64 <0x0000001d, "S_XNOR_B64", []>;
-
-// Use added complexity so these patterns are preferred to the VALU patterns.
-let AddedComplexity = 1 in {
-
-def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32",
- [(set i32:$dst, (shl i32:$src0, i32:$src1))]
->;
-def S_LSHL_B64 : SOP2_SHIFT_64 <0x0000001f, "S_LSHL_B64",
- [(set i64:$dst, (shl i64:$src0, i32:$src1))]
->;
-def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32",
- [(set i32:$dst, (srl i32:$src0, i32:$src1))]
->;
-def S_LSHR_B64 : SOP2_SHIFT_64 <0x00000021, "S_LSHR_B64",
- [(set i64:$dst, (srl i64:$src0, i32:$src1))]
->;
-def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32",
- [(set i32:$dst, (sra i32:$src0, i32:$src1))]
->;
-def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "S_ASHR_I64",
- [(set i64:$dst, (sra i64:$src0, i32:$src1))]
->;
-
-} // End AddedComplexity = 1
-
-def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>;
-def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>;
-def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>;
-def S_BFE_U32 : SOP2_32 <0x00000027, "S_BFE_U32", []>;
-def S_BFE_I32 : SOP2_32 <0x00000028, "S_BFE_I32", []>;
-def S_BFE_U64 : SOP2_64 <0x00000029, "S_BFE_U64", []>;
-def S_BFE_I64 : SOP2_64 <0x0000002a, "S_BFE_I64", []>;
-//def S_CBRANCH_G_FORK : SOP2_ <0x0000002b, "S_CBRANCH_G_FORK", []>;
-def S_ABSDIFF_I32 : SOP2_32 <0x0000002c, "S_ABSDIFF_I32", []>;
-
-let isCodeGenOnly = 1, isPseudo = 1 in {
-
-def LOAD_CONST : AMDGPUShaderInst <
- (outs GPRF32:$dst),
- (ins i32imm:$src),
- "LOAD_CONST $dst, $src",
- [(set GPRF32:$dst, (int_AMDGPU_load_const imm:$src))]
->;
// SI pseudo instructions. These are used by the CFG structurizer pass
// and should be lowered to ISA instructions prior to codegen.
@@ -1262,19 +1538,19 @@ let mayLoad = 1, mayStore = 1, hasSideEffects = 1,
let isBranch = 1, isTerminator = 1 in {
-def SI_IF : InstSI <
+def SI_IF: InstSI <
(outs SReg_64:$dst),
(ins SReg_64:$vcc, brtarget:$target),
- "SI_IF $dst, $vcc, $target",
+ "",
[(set i64:$dst, (int_SI_if i1:$vcc, bb:$target))]
>;
def SI_ELSE : InstSI <
(outs SReg_64:$dst),
(ins SReg_64:$src, brtarget:$target),
- "SI_ELSE $dst, $src, $target",
- [(set i64:$dst, (int_SI_else i64:$src, bb:$target))]> {
-
+ "",
+ [(set i64:$dst, (int_SI_else i64:$src, bb:$target))]
+> {
let Constraints = "$src = $dst";
}
@@ -1317,8 +1593,8 @@ def SI_END_CF : InstSI <
def SI_KILL : InstSI <
(outs),
- (ins VReg_32:$src),
- "SI_KIL $src",
+ (ins VSrc_32:$src),
+ "SI_KILL $src",
[(int_AMDGPU_kill f32:$src)]
>;
@@ -1327,22 +1603,22 @@ def SI_KILL : InstSI <
let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
-//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
+//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>;
let UseNamedOperandTable = 1 in {
-def SI_RegisterLoad : AMDGPUShaderInst <
+def SI_RegisterLoad : InstSI <
(outs VReg_32:$dst, SReg_64:$temp),
- (ins FRAMEri64:$addr, i32imm:$chan),
+ (ins FRAMEri32:$addr, i32imm:$chan),
"", []
> {
let isRegisterLoad = 1;
let mayLoad = 1;
}
-class SIRegStore<dag outs> : AMDGPUShaderInst <
+class SIRegStore<dag outs> : InstSI <
outs,
- (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
+ (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan),
"", []
> {
let isRegisterStore = 1;
@@ -1387,7 +1663,13 @@ let usesCustomInserter = 1 in {
// constant that can be used with the ADDR64 MUBUF instructions.
def SI_ADDR64_RSRC : InstSI <
(outs SReg_128:$srsrc),
- (ins SReg_64:$ptr),
+ (ins SSrc_64:$ptr),
+ "", []
+>;
+
+def SI_BUFFER_RSRC : InstSI <
+ (outs SReg_128:$srsrc),
+ (ins SReg_32:$ptr_lo, SReg_32:$ptr_hi, SSrc_32:$data_lo, SSrc_32:$data_hi),
"", []
>;
@@ -1395,13 +1677,49 @@ def V_SUB_F64 : InstSI <
(outs VReg_64:$dst),
(ins VReg_64:$src0, VReg_64:$src1),
"V_SUB_F64 $dst, $src0, $src1",
- []
+ [(set f64:$dst, (fsub f64:$src0, f64:$src1))]
>;
} // end usesCustomInserter
+multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
+
+ def _SAVE : InstSI <
+ (outs VReg_32:$dst),
+ (ins sgpr_class:$src, i32imm:$frame_idx),
+ "", []
+ >;
+
+ def _RESTORE : InstSI <
+ (outs sgpr_class:$dst),
+ (ins VReg_32:$src, i32imm:$frame_idx),
+ "", []
+ >;
+
+}
+
+defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
+defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
+defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
+defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
+defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
+
+let Defs = [SCC] in {
+
+def SI_CONSTDATA_PTR : InstSI <
+ (outs SReg_64:$dst),
+ (ins),
+ "", [(set SReg_64:$dst, (i64 SIconstdata_ptr))]
+>;
+
+} // End Defs = [SCC]
+
} // end IsCodeGenOnly, isPseudo
+} // end SubtargetPredicate = SI
+
+let Predicates = [isSI] in {
+
def : Pat<
(int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2),
(V_CNDMASK_B32_e64 $src2, $src1, (V_CMP_GT_F32_e64 0, $src0))
@@ -1409,12 +1727,12 @@ def : Pat<
def : Pat <
(int_AMDGPU_kilp),
- (SI_KILL (V_MOV_B32_e32 0xbf800000))
+ (SI_KILL 0xbf800000)
>;
/* int_SI_vs_load_input */
def : Pat<
- (SIload_input i128:$tlst, IMM12bit:$attr_offset, i32:$buf_idx_vgpr),
+ (SIload_input v4i32:$tlst, imm:$attr_offset, i32:$buf_idx_vgpr),
(BUFFER_LOAD_FORMAT_XYZW_IDXEN $tlst, $buf_idx_vgpr, imm:$attr_offset, 0, 0, 0, 0)
>;
@@ -1426,45 +1744,312 @@ def : Pat <
$src0, $src1, $src2, $src3)
>;
+//===----------------------------------------------------------------------===//
+// SMRD Patterns
+//===----------------------------------------------------------------------===//
+
+multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
+
+ // 1. Offset as 8bit DWORD immediate
+ def : Pat <
+ (constant_load (add i64:$sbase, (i64 IMM8bitDWORD:$offset))),
+ (vt (Instr_IMM $sbase, (as_dword_i32imm $offset)))
+ >;
+
+ // 2. Offset loaded in an 32bit SGPR
+ def : Pat <
+ (constant_load (add i64:$sbase, (i64 IMM32bit:$offset))),
+ (vt (Instr_SGPR $sbase, (S_MOV_B32 (i32 (as_i32imm $offset)))))
+ >;
+
+ // 3. No offset at all
+ def : Pat <
+ (constant_load i64:$sbase),
+ (vt (Instr_IMM $sbase, 0))
+ >;
+}
+
+defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
+defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
+defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
+
+// 1. Offset as 8bit DWORD immediate
def : Pat <
- (f64 (fsub f64:$src0, f64:$src1)),
- (V_SUB_F64 $src0, $src1)
+ (SIload_constant v4i32:$sbase, IMM8bitDWORD:$offset),
+ (S_BUFFER_LOAD_DWORD_IMM $sbase, (as_dword_i32imm $offset))
+>;
+
+// 2. Offset loaded in an 32bit SGPR
+def : Pat <
+ (SIload_constant v4i32:$sbase, imm:$offset),
+ (S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset))
+>;
+
+} // Predicates = [isSI] in {
+
+//===----------------------------------------------------------------------===//
+// SOP1 Patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isSI, isCFDepth0] in {
+
+def : Pat <
+ (i64 (ctpop i64:$src)),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (S_BCNT1_I32_B64 $src), sub0),
+ (S_MOV_B32 0), sub1)
+>;
+
+//===----------------------------------------------------------------------===//
+// SOP2 Patterns
+//===----------------------------------------------------------------------===//
+
+// V_ADD_I32_e32/S_ADD_I32 produces carry in VCC/SCC. For the vector
+// case, the sgpr-copies pass will fix this to use the vector version.
+def : Pat <
+ (i32 (addc i32:$src0, i32:$src1)),
+ (S_ADD_I32 $src0, $src1)
+>;
+
+} // Predicates = [isSI, isCFDepth0]
+
+let Predicates = [isSI] in {
+
+//===----------------------------------------------------------------------===//
+// SOPP Patterns
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+ (int_AMDGPU_barrier_global),
+ (S_BARRIER)
+>;
+
+//===----------------------------------------------------------------------===//
+// VOP1 Patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [UnsafeFPMath] in {
+def : RcpPat<V_RCP_F64_e32, f64>;
+defm : RsqPat<V_RSQ_F64_e32, f64>;
+defm : RsqPat<V_RSQ_F32_e32, f32>;
+}
+
+//===----------------------------------------------------------------------===//
+// VOP2 Patterns
+//===----------------------------------------------------------------------===//
+
+class BinOp64Pat <SDNode node, Instruction inst> : Pat <
+ (node i64:$src0, i64:$src1),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (inst (EXTRACT_SUBREG i64:$src0, sub0),
+ (EXTRACT_SUBREG i64:$src1, sub0)), sub0),
+ (inst (EXTRACT_SUBREG i64:$src0, sub1),
+ (EXTRACT_SUBREG i64:$src1, sub1)), sub1)
+>;
+
+def : BinOp64Pat <or, V_OR_B32_e32>;
+def : BinOp64Pat <xor, V_XOR_B32_e32>;
+
+class SextInReg <ValueType vt, int ShiftAmt> : Pat <
+ (sext_inreg i32:$src0, vt),
+ (V_ASHRREV_I32_e32 ShiftAmt, (V_LSHLREV_B32_e32 ShiftAmt, $src0))
+>;
+
+def : SextInReg <i8, 24>;
+def : SextInReg <i16, 16>;
+
+def : Pat <
+ (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
+ (V_BCNT_U32_B32_e32 $popcnt, $val)
+>;
+
+def : Pat <
+ (i32 (ctpop i32:$popcnt)),
+ (V_BCNT_U32_B32_e64 $popcnt, 0, 0, 0)
+>;
+
+def : Pat <
+ (i64 (ctpop i64:$src)),
+ (INSERT_SUBREG
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (V_BCNT_U32_B32_e32 (EXTRACT_SUBREG $src, sub1),
+ (V_BCNT_U32_B32_e64 (EXTRACT_SUBREG $src, sub0), 0, 0, 0)),
+ sub0),
+ (V_MOV_B32_e32 0), sub1)
+>;
+
+def : Pat <
+ (addc i32:$src0, i32:$src1),
+ (V_ADD_I32_e32 $src0, $src1)
>;
/********** ======================= **********/
/********** Image sampling patterns **********/
/********** ======================= **********/
+// Image + sampler
+class SampleRawPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, i32:$dmask, i32:$unorm,
+ i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
+ (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
+ (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
+ $addr, $rsrc, $sampler)
+>;
+
+multiclass SampleRawPatterns<SDPatternOperator name, string opcode> {
+ def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
+ def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
+ def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
+ def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V8), v8i32>;
+ def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V16), v16i32>;
+}
+
+// Image only
+class ImagePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v8i32:$rsrc, i32:$dmask, i32:$unorm,
+ i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
+ (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
+ (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
+ $addr, $rsrc)
+>;
+
+multiclass ImagePatterns<SDPatternOperator name, string opcode> {
+ def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
+ def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
+ def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
+}
+
+// Basic sample
+defm : SampleRawPatterns<int_SI_image_sample, "IMAGE_SAMPLE">;
+defm : SampleRawPatterns<int_SI_image_sample_cl, "IMAGE_SAMPLE_CL">;
+defm : SampleRawPatterns<int_SI_image_sample_d, "IMAGE_SAMPLE_D">;
+defm : SampleRawPatterns<int_SI_image_sample_d_cl, "IMAGE_SAMPLE_D_CL">;
+defm : SampleRawPatterns<int_SI_image_sample_l, "IMAGE_SAMPLE_L">;
+defm : SampleRawPatterns<int_SI_image_sample_b, "IMAGE_SAMPLE_B">;
+defm : SampleRawPatterns<int_SI_image_sample_b_cl, "IMAGE_SAMPLE_B_CL">;
+defm : SampleRawPatterns<int_SI_image_sample_lz, "IMAGE_SAMPLE_LZ">;
+defm : SampleRawPatterns<int_SI_image_sample_cd, "IMAGE_SAMPLE_CD">;
+defm : SampleRawPatterns<int_SI_image_sample_cd_cl, "IMAGE_SAMPLE_CD_CL">;
+
+// Sample with comparison
+defm : SampleRawPatterns<int_SI_image_sample_c, "IMAGE_SAMPLE_C">;
+defm : SampleRawPatterns<int_SI_image_sample_c_cl, "IMAGE_SAMPLE_C_CL">;
+defm : SampleRawPatterns<int_SI_image_sample_c_d, "IMAGE_SAMPLE_C_D">;
+defm : SampleRawPatterns<int_SI_image_sample_c_d_cl, "IMAGE_SAMPLE_C_D_CL">;
+defm : SampleRawPatterns<int_SI_image_sample_c_l, "IMAGE_SAMPLE_C_L">;
+defm : SampleRawPatterns<int_SI_image_sample_c_b, "IMAGE_SAMPLE_C_B">;
+defm : SampleRawPatterns<int_SI_image_sample_c_b_cl, "IMAGE_SAMPLE_C_B_CL">;
+defm : SampleRawPatterns<int_SI_image_sample_c_lz, "IMAGE_SAMPLE_C_LZ">;
+defm : SampleRawPatterns<int_SI_image_sample_c_cd, "IMAGE_SAMPLE_C_CD">;
+defm : SampleRawPatterns<int_SI_image_sample_c_cd_cl, "IMAGE_SAMPLE_C_CD_CL">;
+
+// Sample with offsets
+defm : SampleRawPatterns<int_SI_image_sample_o, "IMAGE_SAMPLE_O">;
+defm : SampleRawPatterns<int_SI_image_sample_cl_o, "IMAGE_SAMPLE_CL_O">;
+defm : SampleRawPatterns<int_SI_image_sample_d_o, "IMAGE_SAMPLE_D_O">;
+defm : SampleRawPatterns<int_SI_image_sample_d_cl_o, "IMAGE_SAMPLE_D_CL_O">;
+defm : SampleRawPatterns<int_SI_image_sample_l_o, "IMAGE_SAMPLE_L_O">;
+defm : SampleRawPatterns<int_SI_image_sample_b_o, "IMAGE_SAMPLE_B_O">;
+defm : SampleRawPatterns<int_SI_image_sample_b_cl_o, "IMAGE_SAMPLE_B_CL_O">;
+defm : SampleRawPatterns<int_SI_image_sample_lz_o, "IMAGE_SAMPLE_LZ_O">;
+defm : SampleRawPatterns<int_SI_image_sample_cd_o, "IMAGE_SAMPLE_CD_O">;
+defm : SampleRawPatterns<int_SI_image_sample_cd_cl_o, "IMAGE_SAMPLE_CD_CL_O">;
+
+// Sample with comparison and offsets
+defm : SampleRawPatterns<int_SI_image_sample_c_o, "IMAGE_SAMPLE_C_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_cl_o, "IMAGE_SAMPLE_C_CL_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_d_o, "IMAGE_SAMPLE_C_D_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_d_cl_o, "IMAGE_SAMPLE_C_D_CL_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_l_o, "IMAGE_SAMPLE_C_L_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_b_o, "IMAGE_SAMPLE_C_B_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_b_cl_o, "IMAGE_SAMPLE_C_B_CL_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_lz_o, "IMAGE_SAMPLE_C_LZ_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_cd_o, "IMAGE_SAMPLE_C_CD_O">;
+defm : SampleRawPatterns<int_SI_image_sample_c_cd_cl_o, "IMAGE_SAMPLE_C_CD_CL_O">;
+
+// Gather opcodes
+// Only the variants which make sense are defined.
+def : SampleRawPattern<int_SI_gather4, IMAGE_GATHER4_V4_V2, v2i32>;
+def : SampleRawPattern<int_SI_gather4, IMAGE_GATHER4_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_cl, IMAGE_GATHER4_CL_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_l, IMAGE_GATHER4_L_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_b, IMAGE_GATHER4_B_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_b_cl, IMAGE_GATHER4_B_CL_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_b_cl, IMAGE_GATHER4_B_CL_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_lz, IMAGE_GATHER4_LZ_V4_V2, v2i32>;
+def : SampleRawPattern<int_SI_gather4_lz, IMAGE_GATHER4_LZ_V4_V4, v4i32>;
+
+def : SampleRawPattern<int_SI_gather4_c, IMAGE_GATHER4_C_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_c_cl, IMAGE_GATHER4_C_CL_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_c_cl, IMAGE_GATHER4_C_CL_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_l, IMAGE_GATHER4_C_L_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_c_l, IMAGE_GATHER4_C_L_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_b, IMAGE_GATHER4_C_B_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_c_b, IMAGE_GATHER4_C_B_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_b_cl, IMAGE_GATHER4_C_B_CL_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_lz, IMAGE_GATHER4_C_LZ_V4_V4, v4i32>;
+
+def : SampleRawPattern<int_SI_gather4_o, IMAGE_GATHER4_O_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_cl_o, IMAGE_GATHER4_CL_O_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_cl_o, IMAGE_GATHER4_CL_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_l_o, IMAGE_GATHER4_L_O_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_l_o, IMAGE_GATHER4_L_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_b_o, IMAGE_GATHER4_B_O_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_b_o, IMAGE_GATHER4_B_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_b_cl_o, IMAGE_GATHER4_B_CL_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_lz_o, IMAGE_GATHER4_LZ_O_V4_V4, v4i32>;
+
+def : SampleRawPattern<int_SI_gather4_c_o, IMAGE_GATHER4_C_O_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_c_o, IMAGE_GATHER4_C_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_cl_o, IMAGE_GATHER4_C_CL_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_l_o, IMAGE_GATHER4_C_L_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_b_o, IMAGE_GATHER4_C_B_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_b_cl_o, IMAGE_GATHER4_C_B_CL_O_V4_V8, v8i32>;
+def : SampleRawPattern<int_SI_gather4_c_lz_o, IMAGE_GATHER4_C_LZ_O_V4_V4, v4i32>;
+def : SampleRawPattern<int_SI_gather4_c_lz_o, IMAGE_GATHER4_C_LZ_O_V4_V8, v8i32>;
+
+def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V1, i32>;
+def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V2, v2i32>;
+def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V4, v4i32>;
+
+def : ImagePattern<int_SI_getresinfo, IMAGE_GET_RESINFO_V4_V1, i32>;
+defm : ImagePatterns<int_SI_image_load, "IMAGE_LOAD">;
+defm : ImagePatterns<int_SI_image_load_mip, "IMAGE_LOAD_MIP">;
+
/* SIsample for simple 1D texture lookup */
def : Pat <
- (SIsample i32:$addr, v32i8:$rsrc, i128:$sampler, imm),
+ (SIsample i32:$addr, v32i8:$rsrc, v4i32:$sampler, imm),
(IMAGE_SAMPLE_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, i128:$sampler, imm),
+ (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, imm),
(opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_RECT),
+ (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_RECT),
(opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_ARRAY),
+ (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_ARRAY),
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
class SampleShadowPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_SHADOW),
+ (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW),
(opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
class SampleShadowArrayPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_SHADOW_ARRAY),
+ (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW_ARRAY),
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
@@ -1649,25 +2234,42 @@ def : BitConvert <f64, i64, VReg_64>;
def : BitConvert <v2f32, v2i32, VReg_64>;
def : BitConvert <v2i32, v2f32, VReg_64>;
def : BitConvert <v2i32, i64, VReg_64>;
-
+def : BitConvert <i64, v2i32, VReg_64>;
+def : BitConvert <v2f32, i64, VReg_64>;
+def : BitConvert <i64, v2f32, VReg_64>;
+def : BitConvert <v2i32, f64, VReg_64>;
+def : BitConvert <f64, v2i32, VReg_64>;
def : BitConvert <v4f32, v4i32, VReg_128>;
def : BitConvert <v4i32, v4f32, VReg_128>;
-def : BitConvert <v4i32, i128, VReg_128>;
-def : BitConvert <i128, v4i32, VReg_128>;
+def : BitConvert <v8f32, v8i32, SReg_256>;
+def : BitConvert <v8i32, v8f32, SReg_256>;
def : BitConvert <v8i32, v32i8, SReg_256>;
def : BitConvert <v32i8, v8i32, SReg_256>;
def : BitConvert <v8i32, v32i8, VReg_256>;
+def : BitConvert <v8i32, v8f32, VReg_256>;
+def : BitConvert <v8f32, v8i32, VReg_256>;
def : BitConvert <v32i8, v8i32, VReg_256>;
+def : BitConvert <v16i32, v16f32, VReg_512>;
+def : BitConvert <v16f32, v16i32, VReg_512>;
+
/********** =================== **********/
/********** Src & Dst modifiers **********/
/********** =================== **********/
+def FCLAMP_SI : AMDGPUShaderInst <
+ (outs VReg_32:$dst),
+ (ins VSrc_32:$src0),
+ "FCLAMP_SI $dst, $src0",
+ []
+> {
+ let usesCustomInserter = 1;
+}
+
def : Pat <
- (int_AMDIL_clamp f32:$src, (f32 FP_ZERO), (f32 FP_ONE)),
- (V_ADD_F32_e64 $src, (i32 0 /* SRC1 */),
- 0 /* ABS */, 1 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */)
+ (AMDGPUclamp f32:$src, (f32 FP_ZERO), (f32 FP_ONE)),
+ (FCLAMP_SI f32:$src)
>;
/********** ================================ **********/
@@ -1686,14 +2288,32 @@ def : Pat <
(V_OR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) /* Set sign bit */
>;
+def FABS_SI : AMDGPUShaderInst <
+ (outs VReg_32:$dst),
+ (ins VSrc_32:$src0),
+ "FABS_SI $dst, $src0",
+ []
+> {
+ let usesCustomInserter = 1;
+}
+
def : Pat <
(fabs f32:$src),
- (V_AND_B32_e32 $src, (V_MOV_B32_e32 0x7fffffff)) /* Clear sign bit */
+ (FABS_SI f32:$src)
>;
+def FNEG_SI : AMDGPUShaderInst <
+ (outs VReg_32:$dst),
+ (ins VSrc_32:$src0),
+ "FNEG_SI $dst, $src0",
+ []
+> {
+ let usesCustomInserter = 1;
+}
+
def : Pat <
(fneg f32:$src),
- (V_XOR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) /* Toggle sign bit */
+ (FNEG_SI f32:$src)
>;
/********** ================== **********/
@@ -1721,30 +2341,10 @@ def : Pat <
>;
def : Pat <
- (i1 imm:$imm),
- (S_MOV_B64 imm:$imm)
->;
-
-def : Pat <
(i64 InlineImm<i64>:$imm),
(S_MOV_B64 InlineImm<i64>:$imm)
>;
-// i64 immediates aren't supported in hardware, split it into two 32bit values
-def : Pat <
- (i64 imm:$imm),
- (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (S_MOV_B32 (i32 (LO32 imm:$imm))), sub0),
- (S_MOV_B32 (i32 (HI32 imm:$imm))), sub1)
->;
-
-def : Pat <
- (f64 fpimm:$imm),
- (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
- (V_MOV_B32_e32 (f32 (LO32f fpimm:$imm))), sub0),
- (V_MOV_B32_e32 (f32 (HI32f fpimm:$imm))), sub1)
->;
-
/********** ===================== **********/
/********** Interpolation Paterns **********/
/********** ===================== **********/
@@ -1775,26 +2375,11 @@ def : Pat <
>;
def : Pat<
- (fdiv f32:$src0, f32:$src1),
- (V_MUL_F32_e32 $src0, (V_RCP_F32_e32 $src1))
->;
-
-def : Pat<
(fdiv f64:$src0, f64:$src1),
(V_MUL_F64 $src0, (V_RCP_F64_e32 $src1), (i64 0))
>;
def : Pat <
- (fcos f32:$src0),
- (V_COS_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV)))
->;
-
-def : Pat <
- (fsin f32:$src0),
- (V_SIN_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV)))
->;
-
-def : Pat <
(int_AMDGPU_cube v4f32:$src),
(INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
(V_CUBETC_F32 (EXTRACT_SUBREG $src, sub0),
@@ -1820,27 +2405,18 @@ def : Pat <
(V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
>;
-def : Pat <
- (i32 (zext i1:$src0)),
+class Ext32Pat <SDNode ext> : Pat <
+ (i32 (ext i1:$src0)),
(V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
>;
-// 1. Offset as 8bit DWORD immediate
-def : Pat <
- (SIload_constant i128:$sbase, IMM8bitDWORD:$offset),
- (S_BUFFER_LOAD_DWORD_IMM $sbase, IMM8bitDWORD:$offset)
->;
-
-// 2. Offset loaded in an 32bit SGPR
-def : Pat <
- (SIload_constant i128:$sbase, imm:$offset),
- (S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset))
->;
+def : Ext32Pat <zext>;
+def : Ext32Pat <anyext>;
-// 3. Offset in an 32Bit VGPR
+// Offset in an 32Bit VGPR
def : Pat <
- (SIload_constant i128:$sbase, i32:$voff),
- (BUFFER_LOAD_DWORD_OFFEN $sbase, $voff, 0, 0, 0, 0)
+ (SIload_constant v4i32:$sbase, i32:$voff),
+ (BUFFER_LOAD_DWORD_OFFEN $sbase, $voff, 0, 0, 0, 0, 0)
>;
// The multiplication scales from [0,1] to the unsigned integer range
@@ -1854,175 +2430,228 @@ def : Pat <
def : Pat <
(int_SI_tid),
(V_MBCNT_HI_U32_B32_e32 0xffffffff,
- (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0, 0, 0))
+ (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0))
>;
-/********** ================== **********/
-/********** VOP3 Patterns **********/
-/********** ================== **********/
+//===----------------------------------------------------------------------===//
+// VOP3 Patterns
+//===----------------------------------------------------------------------===//
+
+def : IMad24Pat<V_MAD_I32_I24>;
+def : UMad24Pat<V_MAD_U32_U24>;
def : Pat <
- (f32 (fadd (fmul f32:$src0, f32:$src1), f32:$src2)),
- (V_MAD_F32 $src0, $src1, $src2)
+ (fadd f64:$src0, f64:$src1),
+ (V_ADD_F64 $src0, $src1, (i64 0))
>;
-/********** ======================= **********/
-/********** Load/Store Patterns **********/
-/********** ======================= **********/
-
-class DSReadPat <DS inst, ValueType vt, PatFrag frag> : Pat <
- (frag i32:$src0),
- (vt (inst 0, $src0, $src0, $src0, 0, 0))
+def : Pat <
+ (fmul f64:$src0, f64:$src1),
+ (V_MUL_F64 $src0, $src1, (i64 0))
>;
-def : DSReadPat <DS_READ_I8, i32, sextloadi8_local>;
-def : DSReadPat <DS_READ_U8, i32, az_extloadi8_local>;
-def : DSReadPat <DS_READ_I16, i32, sextloadi16_local>;
-def : DSReadPat <DS_READ_U16, i32, az_extloadi16_local>;
-def : DSReadPat <DS_READ_B32, i32, local_load>;
def : Pat <
- (local_load i32:$src0),
- (i32 (DS_READ_B32 0, $src0, $src0, $src0, 0, 0))
+ (mul i32:$src0, i32:$src1),
+ (V_MUL_LO_I32 $src0, $src1, (i32 0))
>;
-class DSWritePat <DS inst, ValueType vt, PatFrag frag> : Pat <
- (frag i32:$src1, i32:$src0),
- (inst 0, $src0, $src1, $src1, 0, 0)
+def : Pat <
+ (mulhu i32:$src0, i32:$src1),
+ (V_MUL_HI_U32 $src0, $src1, (i32 0))
>;
-def : DSWritePat <DS_WRITE_B8, i32, truncstorei8_local>;
-def : DSWritePat <DS_WRITE_B16, i32, truncstorei16_local>;
-def : DSWritePat <DS_WRITE_B32, i32, local_store>;
-
-def : Pat <(atomic_load_add_local i32:$ptr, i32:$val),
- (DS_ADD_U32_RTN 0, $ptr, $val, 0, 0)>;
+def : Pat <
+ (mulhs i32:$src0, i32:$src1),
+ (V_MUL_HI_I32 $src0, $src1, (i32 0))
+>;
-def : Pat <(atomic_load_sub_local i32:$ptr, i32:$val),
- (DS_SUB_U32_RTN 0, $ptr, $val, 0, 0)>;
+defm : BFIPatterns <V_BFI_B32, S_MOV_B32>;
+def : ROTRPattern <V_ALIGNBIT_B32>;
-/********** ================== **********/
-/********** SMRD Patterns **********/
-/********** ================== **********/
+/********** ======================= **********/
+/********** Load/Store Patterns **********/
+/********** ======================= **********/
-multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
+multiclass DSReadPat <DS inst, ValueType vt, PatFrag frag> {
+ def : Pat <
+ (vt (frag (add i32:$ptr, (i32 IMM16bit:$offset)))),
+ (inst (i1 0), $ptr, (as_i16imm $offset))
+ >;
- // 1. Offset as 8bit DWORD immediate
def : Pat <
- (constant_load (SIadd64bit32bit i64:$sbase, IMM8bitDWORD:$offset)),
- (vt (Instr_IMM $sbase, IMM8bitDWORD:$offset))
+ (frag i32:$src0),
+ (vt (inst 0, $src0, 0))
>;
+}
- // 2. Offset loaded in an 32bit SGPR
+defm : DSReadPat <DS_READ_I8, i32, sextloadi8_local>;
+defm : DSReadPat <DS_READ_U8, i32, az_extloadi8_local>;
+defm : DSReadPat <DS_READ_I16, i32, sextloadi16_local>;
+defm : DSReadPat <DS_READ_U16, i32, az_extloadi16_local>;
+defm : DSReadPat <DS_READ_B32, i32, local_load>;
+defm : DSReadPat <DS_READ_B64, v2i32, local_load>;
+
+multiclass DSWritePat <DS inst, ValueType vt, PatFrag frag> {
def : Pat <
- (constant_load (SIadd64bit32bit i64:$sbase, imm:$offset)),
- (vt (Instr_SGPR $sbase, (S_MOV_B32 imm:$offset)))
+ (frag vt:$value, (add i32:$ptr, (i32 IMM16bit:$offset))),
+ (inst (i1 0), $ptr, $value, (as_i16imm $offset))
>;
- // 3. No offset at all
def : Pat <
- (constant_load i64:$sbase),
- (vt (Instr_IMM $sbase, 0))
+ (frag vt:$val, i32:$ptr),
+ (inst 0, $ptr, $val, 0)
>;
}
-defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
-defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, i64>;
-defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, i128>;
-defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
-defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
+defm : DSWritePat <DS_WRITE_B8, i32, truncstorei8_local>;
+defm : DSWritePat <DS_WRITE_B16, i32, truncstorei16_local>;
+defm : DSWritePat <DS_WRITE_B32, i32, local_store>;
+defm : DSWritePat <DS_WRITE_B64, v2i32, local_store>;
-//===----------------------------------------------------------------------===//
-// MUBUF Patterns
-//===----------------------------------------------------------------------===//
-
-multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
- PatFrag global_ld, PatFrag constant_ld> {
+multiclass DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> {
def : Pat <
- (vt (global_ld (add i64:$ptr, (i64 IMM12bit:$offset)))),
- (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, (as_i16imm $offset))
+ (frag (add i32:$ptr, (i32 IMM16bit:$offset)), vt:$value),
+ (inst (i1 0), $ptr, $value, (as_i16imm $offset))
>;
def : Pat <
- (vt (global_ld i64:$ptr)),
- (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, 0)
+ (frag i32:$ptr, vt:$val),
+ (inst 0, $ptr, $val, 0)
>;
+}
+// Special case of DSAtomicRetPat for add / sub 1 -> inc / dec
+//
+// We need to use something for the data0, so we set a register to
+// -1. For the non-rtn variants, the manual says it does
+// DS[A] = (DS[A] >= D0) ? 0 : DS[A] + 1, and setting D0 to uint_max
+// will always do the increment so I'm assuming it's the same.
+//
+// We also load this -1 with s_mov_b32 / s_mov_b64 even though this
+// needs to be a VGPR. The SGPR copy pass will fix this, and it's
+// easier since there is no v_mov_b64.
+multiclass DSAtomicIncRetPat<DS inst, ValueType vt,
+ Instruction LoadImm, PatFrag frag> {
def : Pat <
- (vt (global_ld (add i64:$ptr, i64:$offset))),
- (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ (frag (add i32:$ptr, (i32 IMM16bit:$offset)), (vt 1)),
+ (inst (i1 0), $ptr, (LoadImm (vt -1)), (as_i16imm $offset))
>;
def : Pat <
- (vt (constant_ld (add i64:$ptr, i64:$offset))),
- (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ (frag i32:$ptr, (vt 1)),
+ (inst 0, $ptr, (LoadImm (vt -1)), 0)
>;
}
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32,
- sextloadi8_global, sextloadi8_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32,
- az_extloadi8_global, az_extloadi8_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32,
- sextloadi16_global, sextloadi16_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32,
- az_extloadi16_global, az_extloadi16_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32,
- global_load, constant_load>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64,
- global_load, constant_load>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64,
- az_extloadi32_global, az_extloadi32_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32,
- global_load, constant_load>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32,
- global_load, constant_load>;
-
-multiclass MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> {
+multiclass DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> {
+ def : Pat <
+ (frag (add i32:$ptr, (i32 IMM16bit:$offset)), vt:$cmp, vt:$swap),
+ (inst (i1 0), $ptr, $cmp, $swap, (as_i16imm $offset))
+ >;
def : Pat <
- (st vt:$value, i64:$ptr),
- (Instr $value, (SI_ADDR64_RSRC (i64 0)), $ptr, 0)
+ (frag i32:$ptr, vt:$cmp, vt:$swap),
+ (inst 0, $ptr, $cmp, $swap, 0)
>;
+}
+
+// 32-bit atomics.
+defm : DSAtomicIncRetPat<DS_INC_RTN_U32, i32,
+ S_MOV_B32, atomic_load_add_local>;
+defm : DSAtomicIncRetPat<DS_DEC_RTN_U32, i32,
+ S_MOV_B32, atomic_load_sub_local>;
+
+defm : DSAtomicRetPat<DS_WRXCHG_RTN_B32, i32, atomic_swap_local>;
+defm : DSAtomicRetPat<DS_ADD_RTN_U32, i32, atomic_load_add_local>;
+defm : DSAtomicRetPat<DS_SUB_RTN_U32, i32, atomic_load_sub_local>;
+defm : DSAtomicRetPat<DS_AND_RTN_B32, i32, atomic_load_and_local>;
+defm : DSAtomicRetPat<DS_OR_RTN_B32, i32, atomic_load_or_local>;
+defm : DSAtomicRetPat<DS_XOR_RTN_B32, i32, atomic_load_xor_local>;
+defm : DSAtomicRetPat<DS_MIN_RTN_I32, i32, atomic_load_min_local>;
+defm : DSAtomicRetPat<DS_MAX_RTN_I32, i32, atomic_load_max_local>;
+defm : DSAtomicRetPat<DS_MIN_RTN_U32, i32, atomic_load_umin_local>;
+defm : DSAtomicRetPat<DS_MAX_RTN_U32, i32, atomic_load_umax_local>;
+
+defm : DSAtomicCmpXChg<DS_CMPST_RTN_B32, i32, atomic_cmp_swap_32_local>;
+
+// 64-bit atomics.
+defm : DSAtomicIncRetPat<DS_INC_RTN_U64, i64,
+ S_MOV_B64, atomic_load_add_local>;
+defm : DSAtomicIncRetPat<DS_DEC_RTN_U64, i64,
+ S_MOV_B64, atomic_load_sub_local>;
+
+defm : DSAtomicRetPat<DS_WRXCHG_RTN_B64, i64, atomic_swap_local>;
+defm : DSAtomicRetPat<DS_ADD_RTN_U64, i64, atomic_load_add_local>;
+defm : DSAtomicRetPat<DS_SUB_RTN_U64, i64, atomic_load_sub_local>;
+defm : DSAtomicRetPat<DS_AND_RTN_B64, i64, atomic_load_and_local>;
+defm : DSAtomicRetPat<DS_OR_RTN_B64, i64, atomic_load_or_local>;
+defm : DSAtomicRetPat<DS_XOR_RTN_B64, i64, atomic_load_xor_local>;
+defm : DSAtomicRetPat<DS_MIN_RTN_I64, i64, atomic_load_min_local>;
+defm : DSAtomicRetPat<DS_MAX_RTN_I64, i64, atomic_load_max_local>;
+defm : DSAtomicRetPat<DS_MIN_RTN_U64, i64, atomic_load_umin_local>;
+defm : DSAtomicRetPat<DS_MAX_RTN_U64, i64, atomic_load_umax_local>;
+
+defm : DSAtomicCmpXChg<DS_CMPST_RTN_B64, i64, atomic_cmp_swap_64_local>;
+
+
+//===----------------------------------------------------------------------===//
+// MUBUF Patterns
+//===----------------------------------------------------------------------===//
+
+multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
+ PatFrag constant_ld> {
def : Pat <
- (st vt:$value, (add i64:$ptr, i64:$offset)),
- (Instr $value, (SI_ADDR64_RSRC $ptr), $offset, 0)
- >;
+ (vt (constant_ld (add i64:$ptr, i64:$offset))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+
}
-defm : MUBUFStore_Pattern <BUFFER_STORE_BYTE, i32, truncstorei8_global>;
-defm : MUBUFStore_Pattern <BUFFER_STORE_SHORT, i32, truncstorei16_global>;
-defm : MUBUFStore_Pattern <BUFFER_STORE_DWORD, i32, global_store>;
-defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, i64, global_store>;
-defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, v2i32, global_store>;
-defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4, v4i32, global_store>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, az_extloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, sextloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, az_extloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32, constant_load>;
+
+class MUBUFScratchLoadPat <MUBUF Instr, ValueType vt, PatFrag ld> : Pat <
+ (vt (ld (MUBUFScratch v4i32:$srsrc, i32:$vaddr,
+ i32:$soffset, u16imm:$offset))),
+ (Instr $srsrc, $vaddr, $soffset, $offset, 0, 0, 0)
+>;
+
+def : MUBUFScratchLoadPat <BUFFER_LOAD_SBYTE_OFFEN, i32, sextloadi8_private>;
+def : MUBUFScratchLoadPat <BUFFER_LOAD_UBYTE_OFFEN, i32, extloadi8_private>;
+def : MUBUFScratchLoadPat <BUFFER_LOAD_SSHORT_OFFEN, i32, sextloadi16_private>;
+def : MUBUFScratchLoadPat <BUFFER_LOAD_USHORT_OFFEN, i32, extloadi16_private>;
+def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORD_OFFEN, i32, load_private>;
+def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORDX2_OFFEN, v2i32, load_private>;
+def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORDX4_OFFEN, v4i32, load_private>;
// BUFFER_LOAD_DWORD*, addr64=0
multiclass MUBUF_Load_Dword <ValueType vt, MUBUF offset, MUBUF offen, MUBUF idxen,
MUBUF bothen> {
def : Pat <
- (vt (int_SI_buffer_load_dword i128:$rsrc, i32:$vaddr, i32:$soffset,
+ (vt (int_SI_buffer_load_dword v4i32:$rsrc, (i32 imm), i32:$soffset,
imm:$offset, 0, 0, imm:$glc, imm:$slc,
imm:$tfe)),
- (offset $rsrc, $vaddr, (as_i16imm $offset), $soffset, (as_i1imm $glc),
+ (offset $rsrc, (as_i16imm $offset), $soffset, (as_i1imm $glc),
(as_i1imm $slc), (as_i1imm $tfe))
>;
def : Pat <
- (vt (int_SI_buffer_load_dword i128:$rsrc, i32:$vaddr, i32:$soffset,
- imm, 1, 0, imm:$glc, imm:$slc,
+ (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset,
+ imm:$offset, 1, 0, imm:$glc, imm:$slc,
imm:$tfe)),
- (offen $rsrc, $vaddr, $soffset, (as_i1imm $glc), (as_i1imm $slc),
+ (offen $rsrc, $vaddr, $soffset, (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc),
(as_i1imm $tfe))
>;
def : Pat <
- (vt (int_SI_buffer_load_dword i128:$rsrc, i32:$vaddr, i32:$soffset,
+ (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset,
imm:$offset, 0, 1, imm:$glc, imm:$slc,
imm:$tfe)),
(idxen $rsrc, $vaddr, (as_i16imm $offset), $soffset, (as_i1imm $glc),
@@ -2030,7 +2659,7 @@ multiclass MUBUF_Load_Dword <ValueType vt, MUBUF offset, MUBUF offen, MUBUF idxe
>;
def : Pat <
- (vt (int_SI_buffer_load_dword i128:$rsrc, v2i32:$vaddr, i32:$soffset,
+ (vt (int_SI_buffer_load_dword v4i32:$rsrc, v2i32:$vaddr, i32:$soffset,
imm, 1, 1, imm:$glc, imm:$slc,
imm:$tfe)),
(bothen $rsrc, $vaddr, $soffset, (as_i1imm $glc), (as_i1imm $slc),
@@ -2045,13 +2674,41 @@ defm : MUBUF_Load_Dword <v2i32, BUFFER_LOAD_DWORDX2_OFFSET, BUFFER_LOAD_DWORDX2_
defm : MUBUF_Load_Dword <v4i32, BUFFER_LOAD_DWORDX4_OFFSET, BUFFER_LOAD_DWORDX4_OFFEN,
BUFFER_LOAD_DWORDX4_IDXEN, BUFFER_LOAD_DWORDX4_BOTHEN>;
+class MUBUFScratchStorePat <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
+ (st vt:$value, (MUBUFAddr32 v4i32:$srsrc, i32:$vaddr, i32:$soffset,
+ u16imm:$offset, i1imm:$offen, i1imm:$idxen,
+ i1imm:$glc, i1imm:$slc, i1imm:$tfe)),
+ (Instr $value, $srsrc, $vaddr, $soffset, $offset, $offen, $idxen,
+ $glc, $slc, $tfe)
+>;
+
+def : MUBUFScratchStorePat <BUFFER_STORE_BYTE, i32, truncstorei8_private>;
+def : MUBUFScratchStorePat <BUFFER_STORE_SHORT, i32, truncstorei16_private>;
+def : MUBUFScratchStorePat <BUFFER_STORE_DWORD, i32, store_private>;
+def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX2, v2i32, store_private>;
+def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX4, v4i32, store_private>;
+
+/*
+class MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
+ (st vt:$value, (MUBUFScratch v4i32:$srsrc, i64:$vaddr, u16imm:$offset)),
+ (Instr $value, $srsrc, $vaddr, $offset)
+>;
+
+def : MUBUFStore_Pattern <BUFFER_STORE_BYTE_ADDR64, i32, truncstorei8_private>;
+def : MUBUFStore_Pattern <BUFFER_STORE_SHORT_ADDR64, i32, truncstorei16_private>;
+def : MUBUFStore_Pattern <BUFFER_STORE_DWORD_ADDR64, i32, store_private>;
+def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2_ADDR64, v2i32, store_private>;
+def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4_ADDR64, v4i32, store_private>;
+
+*/
+
//===----------------------------------------------------------------------===//
// MTBUF Patterns
//===----------------------------------------------------------------------===//
// TBUFFER_STORE_FORMAT_*, addr64=0
class MTBUF_StoreResource <ValueType vt, int num_channels, MTBUF opcode> : Pat<
- (SItbuffer_store i128:$rsrc, vt:$vdata, num_channels, i32:$vaddr,
+ (SItbuffer_store v4i32:$rsrc, vt:$vdata, num_channels, i32:$vaddr,
i32:$soffset, imm:$inst_offset, imm:$dfmt,
imm:$nfmt, imm:$offen, imm:$idxen,
imm:$glc, imm:$slc, imm:$tfe),
@@ -2066,90 +2723,198 @@ def : MTBUF_StoreResource <v2i32, 2, TBUFFER_STORE_FORMAT_XY>;
def : MTBUF_StoreResource <v4i32, 3, TBUFFER_STORE_FORMAT_XYZ>;
def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>;
+let SubtargetPredicate = isCI in {
+
+// Sea island new arithmetic instructinos
+let neverHasSideEffects = 1 in {
+defm V_TRUNC_F64 : VOP1_64 <0x00000017, "V_TRUNC_F64",
+ [(set f64:$dst, (ftrunc f64:$src0))]
+>;
+defm V_CEIL_F64 : VOP1_64 <0x00000018, "V_CEIL_F64",
+ [(set f64:$dst, (fceil f64:$src0))]
+>;
+defm V_FLOOR_F64 : VOP1_64 <0x0000001A, "V_FLOOR_F64",
+ [(set f64:$dst, (ffloor f64:$src0))]
+>;
+defm V_RNDNE_F64 : VOP1_64 <0x00000019, "V_RNDNE_F64",
+ [(set f64:$dst, (frint f64:$src0))]
+>;
+
+defm V_QSAD_PK_U16_U8 : VOP3_32 <0x00000173, "V_QSAD_PK_U16_U8", []>;
+defm V_MQSAD_U16_U8 : VOP3_32 <0x000000172, "V_MQSAD_U16_U8", []>;
+defm V_MQSAD_U32_U8 : VOP3_32 <0x00000175, "V_MQSAD_U32_U8", []>;
+def V_MAD_U64_U32 : VOP3_64 <0x00000176, "V_MAD_U64_U32", []>;
+
+// XXX - Does this set VCC?
+def V_MAD_I64_I32 : VOP3_64 <0x00000177, "V_MAD_I64_I32", []>;
+} // End neverHasSideEffects = 1
+
+// Remaining instructions:
+// FLAT_*
+// S_CBRANCH_CDBGUSER
+// S_CBRANCH_CDBGSYS
+// S_CBRANCH_CDBGSYS_OR_USER
+// S_CBRANCH_CDBGSYS_AND_USER
+// S_DCACHE_INV_VOL
+// V_EXP_LEGACY_F32
+// V_LOG_LEGACY_F32
+// DS_NOP
+// DS_GWS_SEMA_RELEASE_ALL
+// DS_WRAP_RTN_B32
+// DS_CNDXCHG32_RTN_B64
+// DS_WRITE_B96
+// DS_WRITE_B128
+// DS_CONDXCHG32_RTN_B128
+// DS_READ_B96
+// DS_READ_B128
+// BUFFER_LOAD_DWORDX3
+// BUFFER_STORE_DWORDX3
+
+} // End iSCI
+
+
/********** ====================== **********/
/********** Indirect adressing **********/
/********** ====================== **********/
-multiclass SI_INDIRECT_Pattern <ValueType vt, SI_INDIRECT_DST IndDst> {
+multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, SI_INDIRECT_DST IndDst> {
// 1. Extract with offset
def : Pat<
(vector_extract vt:$vec, (add i32:$idx, imm:$off)),
- (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off))
+ (eltvt (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off))
>;
// 2. Extract without offset
def : Pat<
(vector_extract vt:$vec, i32:$idx),
- (f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0))
+ (eltvt (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0))
>;
// 3. Insert with offset
def : Pat<
- (vector_insert vt:$vec, f32:$val, (add i32:$idx, imm:$off)),
+ (vector_insert vt:$vec, eltvt:$val, (add i32:$idx, imm:$off)),
(IndDst (IMPLICIT_DEF), $vec, $idx, imm:$off, $val)
>;
// 4. Insert without offset
def : Pat<
- (vector_insert vt:$vec, f32:$val, i32:$idx),
+ (vector_insert vt:$vec, eltvt:$val, i32:$idx),
(IndDst (IMPLICIT_DEF), $vec, $idx, 0, $val)
>;
}
-defm : SI_INDIRECT_Pattern <v2f32, SI_INDIRECT_DST_V2>;
-defm : SI_INDIRECT_Pattern <v4f32, SI_INDIRECT_DST_V4>;
-defm : SI_INDIRECT_Pattern <v8f32, SI_INDIRECT_DST_V8>;
-defm : SI_INDIRECT_Pattern <v16f32, SI_INDIRECT_DST_V16>;
+defm : SI_INDIRECT_Pattern <v2f32, f32, SI_INDIRECT_DST_V2>;
+defm : SI_INDIRECT_Pattern <v4f32, f32, SI_INDIRECT_DST_V4>;
+defm : SI_INDIRECT_Pattern <v8f32, f32, SI_INDIRECT_DST_V8>;
+defm : SI_INDIRECT_Pattern <v16f32, f32, SI_INDIRECT_DST_V16>;
-/********** =============== **********/
-/********** Conditions **********/
-/********** =============== **********/
-
-def : Pat<
- (i1 (setcc f32:$src0, f32:$src1, SETO)),
- (V_CMP_O_F32_e64 $src0, $src1)
->;
-
-def : Pat<
- (i1 (setcc f32:$src0, f32:$src1, SETUO)),
- (V_CMP_U_F32_e64 $src0, $src1)
->;
+defm : SI_INDIRECT_Pattern <v2i32, i32, SI_INDIRECT_DST_V2>;
+defm : SI_INDIRECT_Pattern <v4i32, i32, SI_INDIRECT_DST_V4>;
+defm : SI_INDIRECT_Pattern <v8i32, i32, SI_INDIRECT_DST_V8>;
+defm : SI_INDIRECT_Pattern <v16i32, i32, SI_INDIRECT_DST_V16>;
//===----------------------------------------------------------------------===//
-// Miscellaneous Patterns
+// Conversion Patterns
//===----------------------------------------------------------------------===//
+def : Pat<(i32 (sext_inreg i32:$src, i1)),
+ (S_BFE_I32 i32:$src, 65536)>; // 0 | 1 << 16
+
+// TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it
+// might not be worth the effort, and will need to expand to shifts when
+// fixing SGPR copies.
+
+// Handle sext_inreg in i64
def : Pat <
- (i64 (trunc i128:$x)),
+ (i64 (sext_inreg i64:$src, i1)),
(INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (i32 (EXTRACT_SUBREG $x, sub0)), sub0),
- (i32 (EXTRACT_SUBREG $x, sub1)), sub1)
+ (S_BFE_I32 (EXTRACT_SUBREG i64:$src, sub0), 65536), sub0), // 0 | 1 << 16
+ (S_MOV_B32 -1), sub1)
>;
def : Pat <
- (i32 (trunc i64:$a)),
- (EXTRACT_SUBREG $a, sub0)
+ (i64 (sext_inreg i64:$src, i8)),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (S_SEXT_I32_I8 (EXTRACT_SUBREG i64:$src, sub0)), sub0),
+ (S_MOV_B32 -1), sub1)
>;
def : Pat <
- (i1 (trunc i32:$a)),
- (V_CMP_EQ_I32_e64 (V_AND_B32_e32 (i32 1), $a), 1)
+ (i64 (sext_inreg i64:$src, i16)),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (S_SEXT_I32_I16 (EXTRACT_SUBREG i64:$src, sub0)), sub0),
+ (S_MOV_B32 -1), sub1)
>;
-// V_ADD_I32_e32/S_ADD_I32 produces carry in VCC/SCC. For the vector
-// case, the sgpr-copies pass will fix this to use the vector version.
+class ZExt_i64_i32_Pat <SDNode ext> : Pat <
+ (i64 (ext i32:$src)),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $src, sub0),
+ (S_MOV_B32 0), sub1)
+>;
+
+class ZExt_i64_i1_Pat <SDNode ext> : Pat <
+ (i64 (ext i1:$src)),
+ (INSERT_SUBREG
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0),
+ (S_MOV_B32 0), sub1)
+>;
+
+
+def : ZExt_i64_i32_Pat<zext>;
+def : ZExt_i64_i32_Pat<anyext>;
+def : ZExt_i64_i1_Pat<zext>;
+def : ZExt_i64_i1_Pat<anyext>;
+
def : Pat <
- (i32 (addc i32:$src0, i32:$src1)),
- (S_ADD_I32 $src0, $src1)
+ (i64 (sext i32:$src)),
+ (INSERT_SUBREG
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $src, sub0),
+ (S_ASHR_I32 $src, 31), sub1)
>;
def : Pat <
- (or i64:$a, i64:$b),
+ (i64 (sext i1:$src)),
(INSERT_SUBREG
- (INSERT_SUBREG (IMPLICIT_DEF),
- (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub0), (EXTRACT_SUBREG $b, sub0)), sub0),
- (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub1), (EXTRACT_SUBREG $b, sub1)), sub1)
+ (INSERT_SUBREG
+ (i64 (IMPLICIT_DEF)),
+ (V_CNDMASK_B32_e64 0, -1, $src), sub0),
+ (V_CNDMASK_B32_e64 0, -1, $src), sub1)
+>;
+
+def : Pat <
+ (f32 (sint_to_fp i1:$src)),
+ (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_NEG_ONE, $src)
+>;
+
+def : Pat <
+ (f32 (uint_to_fp i1:$src)),
+ (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_ONE, $src)
+>;
+
+def : Pat <
+ (f64 (sint_to_fp i1:$src)),
+ (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
+>;
+
+def : Pat <
+ (f64 (uint_to_fp i1:$src)),
+ (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
+>;
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Patterns
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+ (i32 (trunc i64:$a)),
+ (EXTRACT_SUBREG $a, sub0)
+>;
+
+def : Pat <
+ (i1 (trunc i32:$a)),
+ (V_CMP_EQ_I32_e64 (V_AND_B32_e32 (i32 1), $a), 1)
>;
//============================================================================//
diff --git a/contrib/llvm/lib/Target/R600/SIIntrinsics.td b/contrib/llvm/lib/Target/R600/SIIntrinsics.td
index 00e32c0..027a0a2 100644
--- a/contrib/llvm/lib/Target/R600/SIIntrinsics.td
+++ b/contrib/llvm/lib/Target/R600/SIIntrinsics.td
@@ -54,15 +54,132 @@ let TargetPrefix = "SI", isTarget = 1 in {
def int_SI_sendmsg : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ // Fully-flexible SAMPLE instruction.
+ class SampleRaw : Intrinsic <
+ [llvm_v4f32_ty], // vdata(VGPR)
+ [llvm_anyint_ty, // vaddr(VGPR)
+ llvm_v8i32_ty, // rsrc(SGPR)
+ llvm_v4i32_ty, // sampler(SGPR)
+ llvm_i32_ty, // dmask(imm)
+ llvm_i32_ty, // unorm(imm)
+ llvm_i32_ty, // r128(imm)
+ llvm_i32_ty, // da(imm)
+ llvm_i32_ty, // glc(imm)
+ llvm_i32_ty, // slc(imm)
+ llvm_i32_ty, // tfe(imm)
+ llvm_i32_ty], // lwe(imm)
+ [IntrNoMem]>;
+
+ // Image instruction without a sampler.
+ class Image : Intrinsic <
+ [llvm_v4f32_ty], // vdata(VGPR)
+ [llvm_anyint_ty, // vaddr(VGPR)
+ llvm_v8i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // dmask(imm)
+ llvm_i32_ty, // unorm(imm)
+ llvm_i32_ty, // r128(imm)
+ llvm_i32_ty, // da(imm)
+ llvm_i32_ty, // glc(imm)
+ llvm_i32_ty, // slc(imm)
+ llvm_i32_ty, // tfe(imm)
+ llvm_i32_ty], // lwe(imm)
+ [IntrNoMem]>;
+
+ // Basic sample
+ def int_SI_image_sample : SampleRaw;
+ def int_SI_image_sample_cl : SampleRaw;
+ def int_SI_image_sample_d : SampleRaw;
+ def int_SI_image_sample_d_cl : SampleRaw;
+ def int_SI_image_sample_l : SampleRaw;
+ def int_SI_image_sample_b : SampleRaw;
+ def int_SI_image_sample_b_cl : SampleRaw;
+ def int_SI_image_sample_lz : SampleRaw;
+ def int_SI_image_sample_cd : SampleRaw;
+ def int_SI_image_sample_cd_cl : SampleRaw;
+
+ // Sample with comparison
+ def int_SI_image_sample_c : SampleRaw;
+ def int_SI_image_sample_c_cl : SampleRaw;
+ def int_SI_image_sample_c_d : SampleRaw;
+ def int_SI_image_sample_c_d_cl : SampleRaw;
+ def int_SI_image_sample_c_l : SampleRaw;
+ def int_SI_image_sample_c_b : SampleRaw;
+ def int_SI_image_sample_c_b_cl : SampleRaw;
+ def int_SI_image_sample_c_lz : SampleRaw;
+ def int_SI_image_sample_c_cd : SampleRaw;
+ def int_SI_image_sample_c_cd_cl : SampleRaw;
+
+ // Sample with offsets
+ def int_SI_image_sample_o : SampleRaw;
+ def int_SI_image_sample_cl_o : SampleRaw;
+ def int_SI_image_sample_d_o : SampleRaw;
+ def int_SI_image_sample_d_cl_o : SampleRaw;
+ def int_SI_image_sample_l_o : SampleRaw;
+ def int_SI_image_sample_b_o : SampleRaw;
+ def int_SI_image_sample_b_cl_o : SampleRaw;
+ def int_SI_image_sample_lz_o : SampleRaw;
+ def int_SI_image_sample_cd_o : SampleRaw;
+ def int_SI_image_sample_cd_cl_o : SampleRaw;
+
+ // Sample with comparison and offsets
+ def int_SI_image_sample_c_o : SampleRaw;
+ def int_SI_image_sample_c_cl_o : SampleRaw;
+ def int_SI_image_sample_c_d_o : SampleRaw;
+ def int_SI_image_sample_c_d_cl_o : SampleRaw;
+ def int_SI_image_sample_c_l_o : SampleRaw;
+ def int_SI_image_sample_c_b_o : SampleRaw;
+ def int_SI_image_sample_c_b_cl_o : SampleRaw;
+ def int_SI_image_sample_c_lz_o : SampleRaw;
+ def int_SI_image_sample_c_cd_o : SampleRaw;
+ def int_SI_image_sample_c_cd_cl_o : SampleRaw;
+
+ // Basic gather4
+ def int_SI_gather4 : SampleRaw;
+ def int_SI_gather4_cl : SampleRaw;
+ def int_SI_gather4_l : SampleRaw;
+ def int_SI_gather4_b : SampleRaw;
+ def int_SI_gather4_b_cl : SampleRaw;
+ def int_SI_gather4_lz : SampleRaw;
+
+ // Gather4 with comparison
+ def int_SI_gather4_c : SampleRaw;
+ def int_SI_gather4_c_cl : SampleRaw;
+ def int_SI_gather4_c_l : SampleRaw;
+ def int_SI_gather4_c_b : SampleRaw;
+ def int_SI_gather4_c_b_cl : SampleRaw;
+ def int_SI_gather4_c_lz : SampleRaw;
+
+ // Gather4 with offsets
+ def int_SI_gather4_o : SampleRaw;
+ def int_SI_gather4_cl_o : SampleRaw;
+ def int_SI_gather4_l_o : SampleRaw;
+ def int_SI_gather4_b_o : SampleRaw;
+ def int_SI_gather4_b_cl_o : SampleRaw;
+ def int_SI_gather4_lz_o : SampleRaw;
+
+ // Gather4 with comparison and offsets
+ def int_SI_gather4_c_o : SampleRaw;
+ def int_SI_gather4_c_cl_o : SampleRaw;
+ def int_SI_gather4_c_l_o : SampleRaw;
+ def int_SI_gather4_c_b_o : SampleRaw;
+ def int_SI_gather4_c_b_cl_o : SampleRaw;
+ def int_SI_gather4_c_lz_o : SampleRaw;
+
+ def int_SI_getlod : SampleRaw;
+
+ // Image instrinsics.
+ def int_SI_image_load : Image;
+ def int_SI_image_load_mip : Image;
+ def int_SI_getresinfo : Image;
+
+ // Deprecated image and sample intrinsics.
class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
def int_SI_sample : Sample;
def int_SI_sampleb : Sample;
def int_SI_sampled : Sample;
def int_SI_samplel : Sample;
-
def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
-
def int_SI_resinfo : Intrinsic <[llvm_v4i32_ty], [llvm_i32_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
/* Interpolation Intrinsics */
diff --git a/contrib/llvm/lib/Target/R600/SILowerControlFlow.cpp b/contrib/llvm/lib/Target/R600/SILowerControlFlow.cpp
index ef867d3..75b5a5e 100644
--- a/contrib/llvm/lib/Target/R600/SILowerControlFlow.cpp
+++ b/contrib/llvm/lib/Target/R600/SILowerControlFlow.cpp
@@ -55,6 +55,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Constants.h"
using namespace llvm;
@@ -66,8 +67,8 @@ private:
static const unsigned SkipThreshold = 12;
static char ID;
- const TargetRegisterInfo *TRI;
- const TargetInstrInfo *TII;
+ const SIRegisterInfo *TRI;
+ const SIInstrInfo *TII;
bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
@@ -85,17 +86,18 @@ private:
void Kill(MachineInstr &MI);
void Branch(MachineInstr &MI);
+ void InitM0ForLDS(MachineBasicBlock::iterator MI);
void LoadM0(MachineInstr &MI, MachineInstr *MovRel);
void IndirectSrc(MachineInstr &MI);
void IndirectDst(MachineInstr &MI);
public:
SILowerControlFlowPass(TargetMachine &tm) :
- MachineFunctionPass(ID), TRI(0), TII(0) { }
+ MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- const char *getPassName() const {
+ const char *getPassName() const override {
return "SI Lower control flow instructions";
}
@@ -109,23 +111,6 @@ FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
return new SILowerControlFlowPass(tm);
}
-static bool isDS(unsigned Opcode) {
- switch(Opcode) {
- default: return false;
- case AMDGPU::DS_ADD_U32_RTN:
- case AMDGPU::DS_SUB_U32_RTN:
- case AMDGPU::DS_WRITE_B32:
- case AMDGPU::DS_WRITE_B8:
- case AMDGPU::DS_WRITE_B16:
- case AMDGPU::DS_READ_B32:
- case AMDGPU::DS_READ_I8:
- case AMDGPU::DS_READ_U8:
- case AMDGPU::DS_READ_I16:
- case AMDGPU::DS_READ_U16:
- return true;
- }
-}
-
bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
MachineBasicBlock *To) {
@@ -162,7 +147,7 @@ void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
- if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType !=
+ if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
ShaderType::PIXEL ||
!shouldSkip(&MBB, &MBB.getParent()->back()))
return;
@@ -302,33 +287,50 @@ void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
}
void SILowerControlFlowPass::Branch(MachineInstr &MI) {
- MachineBasicBlock *Next = MI.getParent()->getNextNode();
- MachineBasicBlock *Target = MI.getOperand(0).getMBB();
- if (Target == Next)
+ if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
MI.eraseFromParent();
- else
- assert(0);
+
+ // If these aren't equal, this is probably an infinite loop.
}
void SILowerControlFlowPass::Kill(MachineInstr &MI) {
-
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
-
- // Kill is only allowed in pixel / geometry shaders
- assert(MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType ==
- ShaderType::PIXEL ||
- MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType ==
- ShaderType::GEOMETRY);
-
- // Clear this pixel from the exec mask if the operand is negative
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC)
- .addImm(0)
- .addOperand(MI.getOperand(0));
+ const MachineOperand &Op = MI.getOperand(0);
+
+#ifndef NDEBUG
+ const SIMachineFunctionInfo *MFI
+ = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
+ // Kill is only allowed in pixel / geometry shaders.
+ assert(MFI->getShaderType() == ShaderType::PIXEL ||
+ MFI->getShaderType() == ShaderType::GEOMETRY);
+#endif
+
+ // Clear this thread from the exec mask if the operand is negative
+ if ((Op.isImm() || Op.isFPImm())) {
+ // Constant operand: Set exec mask to 0 or do nothing
+ if (Op.isImm() ? (Op.getImm() & 0x80000000) :
+ Op.getFPImm()->isNegative()) {
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
+ .addImm(0);
+ }
+ } else {
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC)
+ .addImm(0)
+ .addOperand(Op);
+ }
MI.eraseFromParent();
}
+/// The m0 register stores the maximum allowable address for LDS reads and
+/// writes. Its value must be at least the size in bytes of LDS allocated by
+/// the shader. For simplicity, we set it to the maximum possible value.
+void SILowerControlFlowPass::InitM0ForLDS(MachineBasicBlock::iterator MI) {
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
+ AMDGPU::M0).addImm(0xffffffff);
+}
+
void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
MachineBasicBlock &MBB = *MI.getParent();
@@ -342,51 +344,57 @@ void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
.addReg(Idx);
MBB.insert(I, MovRel);
- MI.eraseFromParent();
- return;
- }
+ } else {
- assert(AMDGPU::SReg_64RegClass.contains(Save));
- assert(AMDGPU::VReg_32RegClass.contains(Idx));
+ assert(AMDGPU::SReg_64RegClass.contains(Save));
+ assert(AMDGPU::VReg_32RegClass.contains(Idx));
- // Save the EXEC mask
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
- .addReg(AMDGPU::EXEC);
+ // Save the EXEC mask
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
+ .addReg(AMDGPU::EXEC);
- // Read the next variant into VCC (lower 32 bits) <- also loop target
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32_e32), AMDGPU::VCC)
- .addReg(Idx);
+ // Read the next variant into VCC (lower 32 bits) <- also loop target
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
+ AMDGPU::VCC_LO)
+ .addReg(Idx);
- // Move index from VCC into M0
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
- .addReg(AMDGPU::VCC);
+ // Move index from VCC into M0
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ .addReg(AMDGPU::VCC_LO);
- // Compare the just read M0 value to all possible Idx values
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC)
- .addReg(AMDGPU::M0)
- .addReg(Idx);
+ // Compare the just read M0 value to all possible Idx values
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC)
+ .addReg(AMDGPU::M0)
+ .addReg(Idx);
- // Update EXEC, save the original EXEC value to VCC
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
- .addReg(AMDGPU::VCC);
+ // Update EXEC, save the original EXEC value to VCC
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
+ .addReg(AMDGPU::VCC);
- // Do the actual move
- MBB.insert(I, MovRel);
+ // Do the actual move
+ MBB.insert(I, MovRel);
- // Update EXEC, switch all done bits to 0 and all todo bits to 1
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
- .addReg(AMDGPU::EXEC)
- .addReg(AMDGPU::VCC);
+ // Update EXEC, switch all done bits to 0 and all todo bits to 1
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
+ .addReg(AMDGPU::EXEC)
+ .addReg(AMDGPU::VCC);
- // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
- .addImm(-7)
- .addReg(AMDGPU::EXEC);
+ // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
+ .addImm(-7)
+ .addReg(AMDGPU::EXEC);
- // Restore EXEC
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
- .addReg(Save);
+ // Restore EXEC
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
+ .addReg(Save);
+ }
+ // FIXME: Are there any values other than the LDS address clamp that need to
+ // be stored in the m0 register and may be live for more than a few
+ // instructions? If so, we should save the m0 register at the beginning
+ // of this function and restore it here.
+ // FIXME: Add support for LDS direct loads.
+ InitM0ForLDS(&MI);
MI.eraseFromParent();
}
@@ -434,8 +442,8 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
}
bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = static_cast<const SIInstrInfo*>(MF.getTarget().getInstrInfo());
+ TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
bool HaveKill = false;
@@ -447,12 +455,12 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
BI != BE; ++BI) {
MachineBasicBlock &MBB = *BI;
- for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I);
- I != MBB.end(); I = Next) {
+ MachineBasicBlock::iterator I, Next;
+ for (I = MBB.begin(); I != MBB.end(); I = Next) {
+ Next = std::next(I);
- Next = llvm::next(I);
MachineInstr &MI = *I;
- if (isDS(MI.getOpcode())) {
+ if (TII->isDS(MI.getOpcode())) {
NeedM0 = true;
NeedWQM = true;
}
@@ -531,11 +539,10 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
MachineBasicBlock &MBB = MF.front();
// Initialize M0 to a value that won't cause LDS access to be discarded
// due to offset clamping
- BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_MOV_B32),
- AMDGPU::M0).addImm(0xffffffff);
+ InitM0ForLDS(MBB.getFirstNonPHI());
}
- if (NeedWQM && MFI->ShaderType == ShaderType::PIXEL) {
+ if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
MachineBasicBlock &MBB = MF.front();
BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
AMDGPU::EXEC).addReg(AMDGPU::EXEC);
diff --git a/contrib/llvm/lib/Target/R600/SILowerI1Copies.cpp b/contrib/llvm/lib/Target/R600/SILowerI1Copies.cpp
new file mode 100644
index 0000000..db19235
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/SILowerI1Copies.cpp
@@ -0,0 +1,154 @@
+//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+/// i1 values are usually inserted by the CFG Structurize pass and they are
+/// unique in that they can be copied from VALU to SALU registers.
+/// This is not possible for any other value type. Since there are no
+/// MOV instructions for i1, we to use V_CMP_* and V_CNDMASK to move the i1.
+///
+//===----------------------------------------------------------------------===//
+//
+
+#define DEBUG_TYPE "si-i1-copies"
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+class SILowerI1Copies : public MachineFunctionPass {
+public:
+ static char ID;
+
+public:
+ SILowerI1Copies() : MachineFunctionPass(ID) {
+ initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF) override;
+
+ virtual const char *getPassName() const override {
+ return "SI Lower il Copies";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineDominatorTree>();
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+} // End anonymous namespace.
+
+INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE,
+ "SI Lower il Copies", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE,
+ "SI Lower il Copies", false, false)
+
+char SILowerI1Copies::ID = 0;
+
+char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
+
+FunctionPass *llvm::createSILowerI1CopiesPass() {
+ return new SILowerI1Copies();
+}
+
+bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ MF.getTarget().getInstrInfo());
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ std::vector<unsigned> I1Defs;
+
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ MachineBasicBlock::iterator I, Next;
+ for (I = MBB.begin(); I != MBB.end(); I = Next) {
+ Next = std::next(I);
+ MachineInstr &MI = *I;
+
+ if (MI.getOpcode() == AMDGPU::V_MOV_I1) {
+ I1Defs.push_back(MI.getOperand(0).getReg());
+ MI.setDesc(TII->get(AMDGPU::V_MOV_B32_e32));
+ continue;
+ }
+
+ if (MI.getOpcode() == AMDGPU::V_AND_I1) {
+ I1Defs.push_back(MI.getOperand(0).getReg());
+ MI.setDesc(TII->get(AMDGPU::V_AND_B32_e32));
+ continue;
+ }
+
+ if (MI.getOpcode() == AMDGPU::V_OR_I1) {
+ I1Defs.push_back(MI.getOperand(0).getReg());
+ MI.setDesc(TII->get(AMDGPU::V_OR_B32_e32));
+ continue;
+ }
+
+ if (MI.getOpcode() == AMDGPU::V_XOR_I1) {
+ I1Defs.push_back(MI.getOperand(0).getReg());
+ MI.setDesc(TII->get(AMDGPU::V_XOR_B32_e32));
+ continue;
+ }
+
+ if (MI.getOpcode() != AMDGPU::COPY ||
+ !TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()) ||
+ !TargetRegisterInfo::isVirtualRegister(MI.getOperand(1).getReg()))
+ continue;
+
+
+ const TargetRegisterClass *DstRC =
+ MRI.getRegClass(MI.getOperand(0).getReg());
+ const TargetRegisterClass *SrcRC =
+ MRI.getRegClass(MI.getOperand(1).getReg());
+
+ if (DstRC == &AMDGPU::VReg_1RegClass &&
+ TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
+ I1Defs.push_back(MI.getOperand(0).getReg());
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CNDMASK_B32_e64))
+ .addOperand(MI.getOperand(0))
+ .addImm(0)
+ .addImm(-1)
+ .addOperand(MI.getOperand(1))
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0);
+ MI.eraseFromParent();
+ } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
+ SrcRC == &AMDGPU::VReg_1RegClass) {
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_I32_e64))
+ .addOperand(MI.getOperand(0))
+ .addImm(0)
+ .addOperand(MI.getOperand(1))
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0);
+ MI.eraseFromParent();
+ }
+ }
+ }
+
+ for (unsigned Reg : I1Defs)
+ MRI.setRegClass(Reg, &AMDGPU::VReg_32RegClass);
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.cpp
index 071f9fa..c53a7e1 100644
--- a/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.cpp
@@ -10,6 +10,13 @@
#include "SIMachineFunctionInfo.h"
+#include "SIInstrInfo.h"
+#include "SIRegisterInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+
+#define MAX_LANES 64
using namespace llvm;
@@ -19,4 +26,72 @@ void SIMachineFunctionInfo::anchor() {}
SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
: AMDGPUMachineFunction(MF),
- PSInputAddr(0) { }
+ PSInputAddr(0),
+ SpillTracker(),
+ NumUserSGPRs(0) { }
+
+static unsigned createLaneVGPR(MachineRegisterInfo &MRI, MachineFunction *MF) {
+ unsigned VGPR = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+
+ // We need to add this register as live out for the function, in order to
+ // have the live range calculated directly.
+ //
+ // When register spilling begins, we have already calculated the live
+ // live intervals for all the registers. Since we are spilling SGPRs to
+ // VGPRs, we need to update the Lane VGPR's live interval every time we
+ // spill or restore a register.
+ //
+ // Unfortunately, there is no good way to update the live interval as
+ // the TargetInstrInfo callbacks for spilling and restoring don't give
+ // us access to the live interval information.
+ //
+ // We are lucky, though, because the InlineSpiller calls
+ // LiveRangeEdit::calculateRegClassAndHint() which iterates through
+ // all the new register that have been created when restoring a register
+ // and calls LiveIntervals::getInterval(), which creates and computes
+ // the live interval for the newly created register. However, once this
+ // live intervals is created, it doesn't change and since we usually reuse
+ // the Lane VGPR multiple times, this means any uses after the first aren't
+ // added to the live interval.
+ //
+ // To work around this, we add Lane VGPRs to the functions live out list,
+ // so that we can guarantee its live range will cover all of its uses.
+
+ for (MachineBasicBlock &MBB : *MF) {
+ if (MBB.back().getOpcode() == AMDGPU::S_ENDPGM) {
+ MBB.back().addOperand(*MF, MachineOperand::CreateReg(VGPR, false, true));
+ return VGPR;
+ }
+ }
+
+ LLVMContext &Ctx = MF->getFunction()->getContext();
+ Ctx.emitError("Could not find S_ENDPGM instruction.");
+
+ return VGPR;
+}
+
+unsigned SIMachineFunctionInfo::RegSpillTracker::reserveLanes(
+ MachineRegisterInfo &MRI, MachineFunction *MF, unsigned NumRegs) {
+ unsigned StartLane = CurrentLane;
+ CurrentLane += NumRegs;
+ if (!LaneVGPR) {
+ LaneVGPR = createLaneVGPR(MRI, MF);
+ } else {
+ if (CurrentLane >= MAX_LANES) {
+ StartLane = CurrentLane = 0;
+ LaneVGPR = createLaneVGPR(MRI, MF);
+ }
+ }
+ return StartLane;
+}
+
+void SIMachineFunctionInfo::RegSpillTracker::addSpilledReg(unsigned FrameIndex,
+ unsigned Reg,
+ int Lane) {
+ SpilledRegisters[FrameIndex] = SpilledReg(Reg, Lane);
+}
+
+const SIMachineFunctionInfo::SpilledReg&
+SIMachineFunctionInfo::RegSpillTracker::getSpilledReg(unsigned FrameIndex) {
+ return SpilledRegisters[FrameIndex];
+}
diff --git a/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.h b/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.h
index 2f1961c..9684d28 100644
--- a/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/R600/SIMachineFunctionInfo.h
@@ -16,16 +16,50 @@
#define SIMACHINEFUNCTIONINFO_H_
#include "AMDGPUMachineFunction.h"
+#include <map>
namespace llvm {
+class MachineRegisterInfo;
+
/// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
/// tells the hardware which interpolation parameters to load.
class SIMachineFunctionInfo : public AMDGPUMachineFunction {
- virtual void anchor();
+ void anchor() override;
public:
+
+ struct SpilledReg {
+ unsigned VGPR;
+ int Lane;
+ SpilledReg(unsigned R, int L) : VGPR (R), Lane (L) { }
+ SpilledReg() : VGPR(0), Lane(-1) { }
+ bool hasLane() { return Lane != -1;}
+ };
+
+ struct RegSpillTracker {
+ private:
+ unsigned CurrentLane;
+ std::map<unsigned, SpilledReg> SpilledRegisters;
+ public:
+ unsigned LaneVGPR;
+ RegSpillTracker() : CurrentLane(0), SpilledRegisters(), LaneVGPR(0) { }
+ /// \p NumRegs The number of consecutive registers what need to be spilled.
+ /// This function will ensure that all registers are stored in
+ /// the same VGPR.
+ /// \returns The lane to be used for storing the first register.
+ unsigned reserveLanes(MachineRegisterInfo &MRI, MachineFunction *MF,
+ unsigned NumRegs = 1);
+ void addSpilledReg(unsigned FrameIndex, unsigned Reg, int Lane = -1);
+ const SpilledReg& getSpilledReg(unsigned FrameIndex);
+ bool programSpillsRegisters() { return !SpilledRegisters.empty(); }
+ };
+
+ // SIMachineFunctionInfo definition
+
SIMachineFunctionInfo(const MachineFunction &MF);
unsigned PSInputAddr;
+ struct RegSpillTracker SpillTracker;
+ unsigned NumUserSGPRs;
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/SIRegisterInfo.cpp b/contrib/llvm/lib/Target/R600/SIRegisterInfo.cpp
index ed0bbaf..2a9a2ac 100644
--- a/contrib/llvm/lib/Target/R600/SIRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/R600/SIRegisterInfo.cpp
@@ -14,22 +14,23 @@
#include "SIRegisterInfo.h"
-#include "AMDGPUTargetMachine.h"
+#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
using namespace llvm;
-SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm)
-: AMDGPURegisterInfo(tm),
- TM(tm)
+SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
+: AMDGPURegisterInfo(st)
{ }
BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(AMDGPU::EXEC);
Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
- const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(TM.getInstrInfo());
- TII->reserveIndirectRegisters(Reserved, MF);
return Reserved;
}
@@ -38,12 +39,27 @@ unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
return RC->getNumRegs();
}
-const TargetRegisterClass *
-SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const {
- switch (rc->getID()) {
- case AMDGPU::GPRF32RegClassID:
- return &AMDGPU::VReg_32RegClass;
- default: return rc;
+bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
+ return Fn.getFrameInfo()->hasStackObjects();
+}
+
+void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ MachineFunction *MF = MI->getParent()->getParent();
+ MachineFrameInfo *FrameInfo = MF->getFrameInfo();
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
+ MachineOperand &FIOp = MI->getOperand(FIOperandNum);
+ int Index = MI->getOperand(FIOperandNum).getIndex();
+ int64_t Offset = FrameInfo->getObjectOffset(Index);
+
+ FIOp.ChangeToImmediate(Offset);
+ if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
+ unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
+ .addImm(Offset);
+ FIOp.ChangeToRegister(TmpReg, false);
}
}
@@ -56,7 +72,7 @@ const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
}
unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
- return getEncodingValue(Reg);
+ return getEncodingValue(Reg) & 0xff;
}
const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
@@ -71,13 +87,12 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
&AMDGPU::SReg_256RegClass
};
- for (unsigned i = 0, e = sizeof(BaseClasses) /
- sizeof(const TargetRegisterClass*); i != e; ++i) {
- if (BaseClasses[i]->contains(Reg)) {
- return BaseClasses[i];
+ for (const TargetRegisterClass *BaseClass : BaseClasses) {
+ if (BaseClass->contains(Reg)) {
+ return BaseClass;
}
}
- return NULL;
+ return nullptr;
}
bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const {
@@ -113,7 +128,7 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
} else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
return &AMDGPU::VReg_512RegClass;
}
- return NULL;
+ return nullptr;
}
const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
@@ -122,10 +137,52 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
return RC;
// If this register has a sub-register, we can safely assume it is a 32-bit
- // register, becuase all of SI's sub-registers are 32-bit.
+ // register, because all of SI's sub-registers are 32-bit.
if (isSGPRClass(RC)) {
return &AMDGPU::SGPR_32RegClass;
} else {
return &AMDGPU::VGPR_32RegClass;
}
}
+
+unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
+ const TargetRegisterClass *SubRC,
+ unsigned Channel) const {
+ unsigned Index = getHWRegIndex(Reg);
+ return SubRC->getRegister(Index + Channel);
+}
+
+bool SIRegisterInfo::regClassCanUseImmediate(int RCID) const {
+ switch (RCID) {
+ default: return false;
+ case AMDGPU::SSrc_32RegClassID:
+ case AMDGPU::SSrc_64RegClassID:
+ case AMDGPU::VSrc_32RegClassID:
+ case AMDGPU::VSrc_64RegClassID:
+ return true;
+ }
+}
+
+bool SIRegisterInfo::regClassCanUseImmediate(
+ const TargetRegisterClass *RC) const {
+ return regClassCanUseImmediate(RC->getID());
+}
+
+unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
+ enum PreloadedValue Value) const {
+
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+ switch (Value) {
+ case SIRegisterInfo::TGID_X:
+ return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
+ case SIRegisterInfo::TGID_Y:
+ return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
+ case SIRegisterInfo::TGID_Z:
+ return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
+ case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
+ return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
+ case SIRegisterInfo::SCRATCH_PTR:
+ return AMDGPU::SGPR2_SGPR3;
+ }
+ llvm_unreachable("unexpected preloaded value type");
+}
diff --git a/contrib/llvm/lib/Target/R600/SIRegisterInfo.h b/contrib/llvm/lib/Target/R600/SIRegisterInfo.h
index 8148f7f..5d0235c 100644
--- a/contrib/llvm/lib/Target/R600/SIRegisterInfo.h
+++ b/contrib/llvm/lib/Target/R600/SIRegisterInfo.h
@@ -20,29 +20,26 @@
namespace llvm {
-class AMDGPUTargetMachine;
-
struct SIRegisterInfo : public AMDGPURegisterInfo {
- AMDGPUTargetMachine &TM;
- SIRegisterInfo(AMDGPUTargetMachine &tm);
+ SIRegisterInfo(const AMDGPUSubtarget &st);
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
- virtual BitVector getReservedRegs(const MachineFunction &MF) const;
+ unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const override;
- virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const;
+ bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
- /// \param RC is an AMDIL reg class.
- ///
- /// \returns the SI register class that is equivalent to \p RC.
- virtual const TargetRegisterClass *
- getISARegClass(const TargetRegisterClass *RC) const;
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS) const override;
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
- virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
+ const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const override;
- virtual unsigned getHWRegIndex(unsigned Reg) const;
+ unsigned getHWRegIndex(unsigned Reg) const override;
/// \brief Return the 'base' register class for this register.
/// e.g. SGPR0 => SReg_32, VGPR => VReg_32 SGPR0_SGPR1 -> SReg_32, etc.
@@ -63,6 +60,33 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
/// be returned.
const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC,
unsigned SubIdx) const;
+
+ /// \p Channel This is the register channel (e.g. a value from 0-16), not the
+ /// SubReg index.
+ /// \returns The sub-register of Reg that is in Channel.
+ unsigned getPhysRegSubReg(unsigned Reg, const TargetRegisterClass *SubRC,
+ unsigned Channel) const;
+
+ /// \returns True if operands defined with this register class can accept
+ /// inline immediates.
+ bool regClassCanUseImmediate(int RCID) const;
+
+ /// \returns True if operands defined with this register class can accept
+ /// inline immediates.
+ bool regClassCanUseImmediate(const TargetRegisterClass *RC) const;
+
+ enum PreloadedValue {
+ TGID_X,
+ TGID_Y,
+ TGID_Z,
+ SCRATCH_WAVE_OFFSET,
+ SCRATCH_PTR
+ };
+
+ /// \brief Returns the physical register that \p Value is stored in.
+ unsigned getPreloadedValue(const MachineFunction &MF,
+ enum PreloadedValue Value) const;
+
};
} // End namespace llvm
diff --git a/contrib/llvm/lib/Target/R600/SIRegisterInfo.td b/contrib/llvm/lib/Target/R600/SIRegisterInfo.td
index 49bdbc9..8974b63 100644
--- a/contrib/llvm/lib/Target/R600/SIRegisterInfo.td
+++ b/contrib/llvm/lib/Target/R600/SIRegisterInfo.td
@@ -17,7 +17,16 @@ class SIReg <string n, bits<16> encoding = 0> : Register<n> {
}
// Special Registers
-def VCC : SIReg<"VCC", 106>;
+def VCC_LO : SIReg<"vcc_lo", 106>;
+def VCC_HI : SIReg<"vcc_hi", 107>;
+
+// VCC for 64-bit instructions
+def VCC : RegisterWithSubRegs<"VCC", [VCC_LO, VCC_HI]> {
+ let Namespace = "AMDGPU";
+ let SubRegIndices = [sub0, sub1];
+ let HWEncoding = 106;
+}
+
def EXEC : SIReg<"EXEC", 126>;
def SCC : SIReg<"SCC", 253>;
def M0 : SIReg <"M0", 124>;
@@ -150,7 +159,7 @@ def M0Reg : RegisterClass<"AMDGPU", [i32], 32, (add M0)>;
// Register class for all scalar registers (SGPRs + Special Registers)
def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
- (add SGPR_32, M0Reg)
+ (add SGPR_32, M0Reg, VCC_LO)
>;
def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64], 64, (add SGPR_64Regs)>;
@@ -159,7 +168,7 @@ def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, i1], 64,
(add SGPR_64Regs, VCCReg, EXECReg)
>;
-def SReg_128 : RegisterClass<"AMDGPU", [i128, v4i32], 128, (add SGPR_128)>;
+def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v16i8], 128, (add SGPR_128)>;
def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add SGPR_256)>;
@@ -174,14 +183,16 @@ def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> {
let Size = 96;
}
-def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, i128], 128, (add VGPR_128)>;
+def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128, (add VGPR_128)>;
def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add VGPR_256)>;
def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 512, (add VGPR_512)>;
+def VReg_1 : RegisterClass<"AMDGPU", [i1], 32, (add VGPR_32)>;
+
//===----------------------------------------------------------------------===//
-// [SV]Src_* register classes, can have either an immediate or an register
+// [SV]Src_(32|64) register classes, can have either an immediate or an register
//===----------------------------------------------------------------------===//
def SSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add SReg_32)>;
@@ -192,3 +203,9 @@ def VSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add VReg_32, SReg_32)>;
def VSrc_64 : RegisterClass<"AMDGPU", [i64, f64], 64, (add VReg_64, SReg_64)>;
+//===----------------------------------------------------------------------===//
+// SGPR and VGPR register classes
+//===----------------------------------------------------------------------===//
+
+def VSrc_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128,
+ (add VReg_128, SReg_128)>;
diff --git a/contrib/llvm/lib/Target/R600/SIShrinkInstructions.cpp b/contrib/llvm/lib/Target/R600/SIShrinkInstructions.cpp
new file mode 100644
index 0000000..745c4b6
--- /dev/null
+++ b/contrib/llvm/lib/Target/R600/SIShrinkInstructions.cpp
@@ -0,0 +1,194 @@
+//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+/// The pass tries to use the 32-bit encoding for instructions when possible.
+//===----------------------------------------------------------------------===//
+//
+
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define DEBUG_TYPE "si-shrink-instructions"
+
+STATISTIC(NumInstructionsShrunk,
+ "Number of 64-bit instruction reduced to 32-bit.");
+
+namespace llvm {
+ void initializeSIShrinkInstructionsPass(PassRegistry&);
+}
+
+using namespace llvm;
+
+namespace {
+
+class SIShrinkInstructions : public MachineFunctionPass {
+public:
+ static char ID;
+
+public:
+ SIShrinkInstructions() : MachineFunctionPass(ID) {
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF) override;
+
+ virtual const char *getPassName() const override {
+ return "SI Shrink Instructions";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+} // End anonymous namespace.
+
+INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
+ "SI Lower il Copies", false, false)
+INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
+ "SI Lower il Copies", false, false)
+
+char SIShrinkInstructions::ID = 0;
+
+FunctionPass *llvm::createSIShrinkInstructionsPass() {
+ return new SIShrinkInstructions();
+}
+
+static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
+ const MachineRegisterInfo &MRI) {
+ if (!MO->isReg())
+ return false;
+
+ if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
+ return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
+
+ return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
+}
+
+static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
+ const SIRegisterInfo &TRI,
+ const MachineRegisterInfo &MRI) {
+
+ const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
+ // Can't shrink instruction with three operands.
+ if (Src2)
+ return false;
+
+ const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
+ const MachineOperand *Src1Mod =
+ TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
+
+ if (Src1 && (!isVGPR(Src1, TRI, MRI) || Src1Mod->getImm() != 0))
+ return false;
+
+ // We don't need to check src0, all input types are legal, so just make
+ // sure src0 isn't using any modifiers.
+ const MachineOperand *Src0Mod =
+ TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
+ if (Src0Mod && Src0Mod->getImm() != 0)
+ return false;
+
+ // Check output modifiers
+ const MachineOperand *Omod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
+ if (Omod && Omod->getImm() != 0)
+ return false;
+
+ const MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
+ return !Clamp || Clamp->getImm() == 0;
+}
+
+bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ MF.getTarget().getInstrInfo());
+ const SIRegisterInfo &TRI = TII->getRegisterInfo();
+ std::vector<unsigned> I1Defs;
+
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ MachineBasicBlock::iterator I, Next;
+ for (I = MBB.begin(); I != MBB.end(); I = Next) {
+ Next = std::next(I);
+ MachineInstr &MI = *I;
+
+ if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
+ continue;
+
+ if (!canShrink(MI, TII, TRI, MRI)) {
+ // Try commtuing the instruction and see if that enables us to shrink
+ // it.
+ if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
+ !canShrink(MI, TII, TRI, MRI))
+ continue;
+ }
+
+ int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
+
+ // Op32 could be -1 here if we started with an instruction that had a
+ // a 32-bit encoding and then commuted it to an instruction that did not.
+ if (Op32 == -1)
+ continue;
+
+ if (TII->isVOPC(Op32)) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ // VOPC instructions can only write to the VCC register. We can't
+ // force them to use VCC here, because the register allocator
+ // has trouble with sequences like this, which cause the allocator
+ // to run out of registes if vreg0 and vreg1 belong to the VCCReg
+ // register class:
+ // vreg0 = VOPC;
+ // vreg1 = VOPC;
+ // S_AND_B64 vreg0, vreg1
+ //
+ // So, instead of forcing the instruction to write to VCC, we provide a
+ // hint to the register allocator to use VCC and then we
+ // we will run this pass again after RA and shrink it if it outpus to
+ // VCC.
+ MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
+ continue;
+ }
+ if (DstReg != AMDGPU::VCC)
+ continue;
+ }
+
+ // We can shrink this instruction
+ DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << "\n";);
+
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
+
+ // dst
+ MIB.addOperand(MI.getOperand(0));
+
+ MIB.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
+
+ const MachineOperand *Src1 =
+ TII->getNamedOperand(MI, AMDGPU::OpName::src1);
+ if (Src1)
+ MIB.addOperand(*Src1);
+
+ for (const MachineOperand &MO : MI.implicit_operands())
+ MIB.addOperand(MO);
+
+ DEBUG(dbgs() << "e32 MI = "; MI.dump(); dbgs() << "\n";);
+ ++NumInstructionsShrunk;
+ MI.eraseFromParent();
+ }
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/R600/SITypeRewriter.cpp b/contrib/llvm/lib/Target/R600/SITypeRewriter.cpp
index f194d8b..367963a 100644
--- a/contrib/llvm/lib/Target/R600/SITypeRewriter.cpp
+++ b/contrib/llvm/lib/Target/R600/SITypeRewriter.cpp
@@ -22,9 +22,8 @@
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
-
#include "llvm/IR/IRBuilder.h"
-#include "llvm/InstVisitor.h"
+#include "llvm/IR/InstVisitor.h"
using namespace llvm;
@@ -36,13 +35,13 @@ class SITypeRewriter : public FunctionPass,
static char ID;
Module *Mod;
Type *v16i8;
- Type *i128;
+ Type *v4i32;
public:
SITypeRewriter() : FunctionPass(ID) { }
- virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function &F);
- virtual const char *getPassName() const {
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+ const char *getPassName() const override {
return "SI Type Rewriter";
}
void visitLoadInst(LoadInst &I);
@@ -57,7 +56,7 @@ char SITypeRewriter::ID = 0;
bool SITypeRewriter::doInitialization(Module &M) {
Mod = &M;
v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16);
- i128 = Type::getIntNTy(M.getContext(), 128);
+ v4i32 = VectorType::get(Type::getInt32Ty(M.getContext()), 4);
return false;
}
@@ -70,11 +69,11 @@ bool SITypeRewriter::runOnFunction(Function &F) {
StringRef Str = A.getValueAsString();
Str.getAsInteger(0, ShaderType);
}
- if (ShaderType != ShaderType::COMPUTE) {
- visit(F);
- }
+ if (ShaderType == ShaderType::COMPUTE)
+ return false;
visit(F);
+ visit(F);
return false;
}
@@ -85,7 +84,8 @@ void SITypeRewriter::visitLoadInst(LoadInst &I) {
Type *ElemTy = PtrTy->getPointerElementType();
IRBuilder<> Builder(&I);
if (ElemTy == v16i8) {
- Value *BitCast = Builder.CreateBitCast(Ptr, Type::getIntNPtrTy(I.getContext(), 128, 2));
+ Value *BitCast = Builder.CreateBitCast(Ptr,
+ PointerType::get(v4i32,PtrTy->getPointerAddressSpace()));
LoadInst *Load = Builder.CreateLoad(BitCast);
SmallVector <std::pair<unsigned, MDNode*>, 8> MD;
I.getAllMetadataOtherThanDebugLoc(MD);
@@ -100,6 +100,7 @@ void SITypeRewriter::visitLoadInst(LoadInst &I) {
void SITypeRewriter::visitCallInst(CallInst &I) {
IRBuilder<> Builder(&I);
+
SmallVector <Value*, 8> Args;
SmallVector <Type*, 8> Types;
bool NeedToReplace = false;
@@ -108,18 +109,17 @@ void SITypeRewriter::visitCallInst(CallInst &I) {
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
Value *Arg = I.getArgOperand(i);
if (Arg->getType() == v16i8) {
- Args.push_back(Builder.CreateBitCast(Arg, i128));
- Types.push_back(i128);
+ Args.push_back(Builder.CreateBitCast(Arg, v4i32));
+ Types.push_back(v4i32);
NeedToReplace = true;
- Name = Name + ".i128";
+ Name = Name + ".v4i32";
} else if (Arg->getType()->isVectorTy() &&
Arg->getType()->getVectorNumElements() == 1 &&
Arg->getType()->getVectorElementType() ==
Type::getInt32Ty(I.getContext())){
Type *ElementTy = Arg->getType()->getVectorElementType();
std::string TypeName = "i32";
- InsertElementInst *Def = dyn_cast<InsertElementInst>(Arg);
- assert(Def);
+ InsertElementInst *Def = cast<InsertElementInst>(Arg);
Args.push_back(Def->getOperand(1));
Types.push_back(ElementTy);
std::string VecTypeName = "v1" + TypeName;
@@ -145,12 +145,12 @@ void SITypeRewriter::visitCallInst(CallInst &I) {
void SITypeRewriter::visitBitCast(BitCastInst &I) {
IRBuilder<> Builder(&I);
- if (I.getDestTy() != i128) {
+ if (I.getDestTy() != v4i32) {
return;
}
if (BitCastInst *Op = dyn_cast<BitCastInst>(I.getOperand(0))) {
- if (Op->getSrcTy() == i128) {
+ if (Op->getSrcTy() == v4i32) {
I.replaceAllUsesWith(Op->getOperand(0));
I.eraseFromParent();
}
diff --git a/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index e7addd7..9df0054 100644
--- a/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -12,9 +12,11 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/TargetRegistry.h"
@@ -45,38 +47,40 @@ class SparcAsmParser : public MCTargetAsmParser {
// public interface of the MCTargetAsmParser.
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
- bool ParseDirective(AsmToken DirectiveID);
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool ParseDirective(AsmToken DirectiveID) override;
- virtual unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
- unsigned Kind);
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
// Custom parse functions for Sparc specific operands.
- OperandMatchResultTy
- parseMEMOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ OperandMatchResultTy parseMEMOperand(OperandVector &Operands);
- OperandMatchResultTy
- parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Name);
+ OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Name);
OperandMatchResultTy
- parseSparcAsmOperand(SparcOperand *&Operand);
+ parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Operand,
+ bool isCall = false);
+
+ OperandMatchResultTy parseBranchModifiers(OperandVector &Operands);
// returns true if Tok is matched to a register and returns register in RegNo.
bool matchRegisterName(const AsmToken &Tok, unsigned &RegNo,
unsigned &RegKind);
bool matchSparcAsmModifiers(const MCExpr *&EVal, SMLoc &EndLoc);
+ bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool is64Bit() const { return STI.getTargetTriple().startswith("sparcv9"); }
public:
SparcAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(sti), Parser(parser) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
@@ -145,8 +149,6 @@ private:
SMLoc StartLoc, EndLoc;
- SparcOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
-
struct Token {
const char *Data;
unsigned Length;
@@ -174,10 +176,12 @@ private:
struct MemOp Mem;
};
public:
- bool isToken() const { return Kind == k_Token; }
- bool isReg() const { return Kind == k_Register; }
- bool isImm() const { return Kind == k_Immediate; }
- bool isMem() const { return isMEMrr() || isMEMri(); }
+ SparcOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+
+ bool isToken() const override { return Kind == k_Token; }
+ bool isReg() const override { return Kind == k_Register; }
+ bool isImm() const override { return Kind == k_Immediate; }
+ bool isMem() const override { return isMEMrr() || isMEMri(); }
bool isMEMrr() const { return Kind == k_MemoryReg; }
bool isMEMri() const { return Kind == k_MemoryImm; }
@@ -196,7 +200,7 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const {
+ unsigned getReg() const override {
assert((Kind == k_Register) && "Invalid access!");
return Reg.RegNum;
}
@@ -222,22 +226,22 @@ public:
}
/// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const {
+ SMLoc getStartLoc() const override {
return StartLoc;
}
/// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const {
+ SMLoc getEndLoc() const override {
return EndLoc;
}
- virtual void print(raw_ostream &OS) const {
+ void print(raw_ostream &OS) const override {
switch (Kind) {
case k_Token: OS << "Token: " << getToken() << "\n"; break;
case k_Register: OS << "Reg: #" << getReg() << "\n"; break;
case k_Immediate: OS << "Imm: " << getImm() << "\n"; break;
case k_MemoryReg: OS << "Mem: " << getMemBase() << "+"
<< getMemOffsetReg() << "\n"; break;
- case k_MemoryImm: assert(getMemOff() != 0);
+ case k_MemoryImm: assert(getMemOff() != nullptr);
OS << "Mem: " << getMemBase()
<< "+" << *getMemOff()
<< "\n"; break;
@@ -257,7 +261,7 @@ public:
void addExpr(MCInst &Inst, const MCExpr *Expr) const{
// Add as immediate when possible. Null MCExpr = 0.
- if (Expr == 0)
+ if (!Expr)
Inst.addOperand(MCOperand::CreateImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
@@ -283,8 +287,8 @@ public:
addExpr(Inst, Expr);
}
- static SparcOperand *CreateToken(StringRef Str, SMLoc S) {
- SparcOperand *Op = new SparcOperand(k_Token);
+ static std::unique_ptr<SparcOperand> CreateToken(StringRef Str, SMLoc S) {
+ auto Op = make_unique<SparcOperand>(k_Token);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
@@ -292,10 +296,9 @@ public:
return Op;
}
- static SparcOperand *CreateReg(unsigned RegNum,
- unsigned Kind,
- SMLoc S, SMLoc E) {
- SparcOperand *Op = new SparcOperand(k_Register);
+ static std::unique_ptr<SparcOperand> CreateReg(unsigned RegNum, unsigned Kind,
+ SMLoc S, SMLoc E) {
+ auto Op = make_unique<SparcOperand>(k_Register);
Op->Reg.RegNum = RegNum;
Op->Reg.Kind = (SparcOperand::RegisterKind)Kind;
Op->StartLoc = S;
@@ -303,61 +306,62 @@ public:
return Op;
}
- static SparcOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- SparcOperand *Op = new SparcOperand(k_Immediate);
+ static std::unique_ptr<SparcOperand> CreateImm(const MCExpr *Val, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<SparcOperand>(k_Immediate);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static SparcOperand *MorphToDoubleReg(SparcOperand *Op) {
- unsigned Reg = Op->getReg();
- assert(Op->Reg.Kind == rk_FloatReg);
+ static bool MorphToDoubleReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
+ assert(Op.Reg.Kind == rk_FloatReg);
unsigned regIdx = Reg - Sparc::F0;
if (regIdx % 2 || regIdx > 31)
- return 0;
- Op->Reg.RegNum = DoubleRegs[regIdx / 2];
- Op->Reg.Kind = rk_DoubleReg;
- return Op;
+ return false;
+ Op.Reg.RegNum = DoubleRegs[regIdx / 2];
+ Op.Reg.Kind = rk_DoubleReg;
+ return true;
}
- static SparcOperand *MorphToQuadReg(SparcOperand *Op) {
- unsigned Reg = Op->getReg();
+ static bool MorphToQuadReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
unsigned regIdx = 0;
- switch (Op->Reg.Kind) {
- default: assert(0 && "Unexpected register kind!");
+ switch (Op.Reg.Kind) {
+ default: llvm_unreachable("Unexpected register kind!");
case rk_FloatReg:
regIdx = Reg - Sparc::F0;
if (regIdx % 4 || regIdx > 31)
- return 0;
+ return false;
Reg = QuadFPRegs[regIdx / 4];
break;
case rk_DoubleReg:
regIdx = Reg - Sparc::D0;
if (regIdx % 2 || regIdx > 31)
- return 0;
+ return false;
Reg = QuadFPRegs[regIdx / 2];
break;
}
- Op->Reg.RegNum = Reg;
- Op->Reg.Kind = rk_QuadReg;
- return Op;
+ Op.Reg.RegNum = Reg;
+ Op.Reg.Kind = rk_QuadReg;
+ return true;
}
- static SparcOperand *MorphToMEMrr(unsigned Base, SparcOperand *Op) {
+ static std::unique_ptr<SparcOperand>
+ MorphToMEMrr(unsigned Base, std::unique_ptr<SparcOperand> Op) {
unsigned offsetReg = Op->getReg();
Op->Kind = k_MemoryReg;
Op->Mem.Base = Base;
Op->Mem.OffsetReg = offsetReg;
- Op->Mem.Off = 0;
+ Op->Mem.Off = nullptr;
return Op;
}
- static SparcOperand *CreateMEMri(unsigned Base,
- const MCExpr *Off,
- SMLoc S, SMLoc E) {
- SparcOperand *Op = new SparcOperand(k_MemoryImm);
+ static std::unique_ptr<SparcOperand>
+ CreateMEMri(unsigned Base, const MCExpr *Off, SMLoc S, SMLoc E) {
+ auto Op = make_unique<SparcOperand>(k_MemoryImm);
Op->Mem.Base = Base;
Op->Mem.OffsetReg = 0;
Op->Mem.Off = Off;
@@ -366,7 +370,8 @@ public:
return Op;
}
- static SparcOperand *MorphToMEMri(unsigned Base, SparcOperand *Op) {
+ static std::unique_ptr<SparcOperand>
+ MorphToMEMri(unsigned Base, std::unique_ptr<SparcOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryImm;
Op->Mem.Base = Base;
@@ -378,11 +383,11 @@ public:
} // end namespace
-bool SparcAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool SparcAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
SmallVector<MCInst, 8> Instructions;
unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
@@ -393,7 +398,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_Success: {
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ Out.EmitInstruction(Inst, STI);
return false;
}
@@ -407,7 +412,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((SparcOperand*) Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((SparcOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
@@ -415,7 +420,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return Error(ErrorLoc, "invalid operand for instruction");
}
case Match_MnemonicFail:
- return Error(IDLoc, "invalid instruction");
+ return Error(IDLoc, "invalid instruction mnemonic");
}
return true;
}
@@ -439,21 +444,28 @@ ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc)
return Error(StartLoc, "invalid register name");
}
-bool SparcAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands)
-{
- // Check if we have valid mnemonic.
- if (!mnemonicIsValid(Name, 0)) {
- Parser.eatToEndOfStatement();
- return Error(NameLoc, "Unknown instruction");
- }
+static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
+ unsigned VariantID);
+
+bool SparcAsmParser::ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
+
// First operand in MCInst is instruction mnemonic.
Operands.push_back(SparcOperand::CreateToken(Name, NameLoc));
+ // apply mnemonic aliases, if any, so that we can parse operands correctly.
+ applyMnemonicAliases(Name, getAvailableFeatures(), 0);
+
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
+ if (getLexer().is(AsmToken::Comma)) {
+ if (parseBranchModifiers(Operands) != MatchOperand_Success) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "unexpected token");
+ }
+ }
if (parseOperand(Operands, Name) != MatchOperand_Success) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
@@ -482,14 +494,57 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
bool SparcAsmParser::
ParseDirective(AsmToken DirectiveID)
{
- // Ignore all directives for now.
- Parser.eatToEndOfStatement();
+ StringRef IDVal = DirectiveID.getString();
+
+ if (IDVal == ".byte")
+ return parseDirectiveWord(1, DirectiveID.getLoc());
+
+ if (IDVal == ".half")
+ return parseDirectiveWord(2, DirectiveID.getLoc());
+
+ if (IDVal == ".word")
+ return parseDirectiveWord(4, DirectiveID.getLoc());
+
+ if (IDVal == ".nword")
+ return parseDirectiveWord(is64Bit() ? 8 : 4, DirectiveID.getLoc());
+
+ if (is64Bit() && IDVal == ".xword")
+ return parseDirectiveWord(8, DirectiveID.getLoc());
+
+ if (IDVal == ".register") {
+ // For now, ignore .register directive.
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ // Let the MC layer to handle other directives.
+ return true;
+}
+
+bool SparcAsmParser:: parseDirectiveWord(unsigned Size, SMLoc L) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ for (;;) {
+ const MCExpr *Value;
+ if (getParser().parseExpression(Value))
+ return true;
+
+ getParser().getStreamer().EmitValue(Value, Size);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ // FIXME: Improve diagnostic.
+ if (getLexer().isNot(AsmToken::Comma))
+ return Error(L, "unexpected token in directive");
+ Parser.Lex();
+ }
+ }
+ Parser.Lex();
return false;
}
-SparcAsmParser::OperandMatchResultTy SparcAsmParser::
-parseMEMOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands)
-{
+SparcAsmParser::OperandMatchResultTy
+SparcAsmParser::parseMEMOperand(OperandVector &Operands) {
SMLoc S, E;
unsigned BaseReg = 0;
@@ -504,7 +559,7 @@ parseMEMOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands)
case AsmToken::Comma:
case AsmToken::RBrac:
case AsmToken::EndOfStatement:
- Operands.push_back(SparcOperand::CreateMEMri(BaseReg, 0, S, E));
+ Operands.push_back(SparcOperand::CreateMEMri(BaseReg, nullptr, S, E));
return MatchOperand_Success;
case AsmToken:: Plus:
@@ -514,23 +569,20 @@ parseMEMOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands)
break;
}
- SparcOperand *Offset = 0;
+ std::unique_ptr<SparcOperand> Offset;
OperandMatchResultTy ResTy = parseSparcAsmOperand(Offset);
if (ResTy != MatchOperand_Success || !Offset)
return MatchOperand_NoMatch;
- Offset = (Offset->isImm()
- ? SparcOperand::MorphToMEMri(BaseReg, Offset)
- : SparcOperand::MorphToMEMrr(BaseReg, Offset));
+ Operands.push_back(
+ Offset->isImm() ? SparcOperand::MorphToMEMri(BaseReg, std::move(Offset))
+ : SparcOperand::MorphToMEMrr(BaseReg, std::move(Offset)));
- Operands.push_back(Offset);
return MatchOperand_Success;
}
-SparcAsmParser::OperandMatchResultTy SparcAsmParser::
-parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic)
-{
+SparcAsmParser::OperandMatchResultTy
+SparcAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
@@ -576,26 +628,27 @@ parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return MatchOperand_Success;
}
- SparcOperand *Op = 0;
- ResTy = parseSparcAsmOperand(Op);
+ std::unique_ptr<SparcOperand> Op;
+
+ ResTy = parseSparcAsmOperand(Op, (Mnemonic == "call"));
if (ResTy != MatchOperand_Success || !Op)
return MatchOperand_ParseFail;
// Push the parsed operand into the list of operands
- Operands.push_back(Op);
+ Operands.push_back(std::move(Op));
return MatchOperand_Success;
}
SparcAsmParser::OperandMatchResultTy
-SparcAsmParser::parseSparcAsmOperand(SparcOperand *&Op)
-{
+SparcAsmParser::parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Op,
+ bool isCall) {
SMLoc S = Parser.getTok().getLoc();
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
const MCExpr *EVal;
- Op = 0;
+ Op = nullptr;
switch (getLexer().getKind()) {
default: break;
@@ -621,11 +674,6 @@ SparcAsmParser::parseSparcAsmOperand(SparcOperand *&Op)
else
Op = SparcOperand::CreateToken("%icc", S);
break;
-
- case Sparc::FCC:
- assert(name == "fcc0" && "Cannot handle %fcc other than %fcc0 yet");
- Op = SparcOperand::CreateToken("%fcc0", S);
- break;
}
break;
}
@@ -649,6 +697,10 @@ SparcAsmParser::parseSparcAsmOperand(SparcOperand *&Op)
const MCExpr *Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
getContext());
+ if (isCall &&
+ getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_)
+ Res = SparcMCExpr::Create(SparcMCExpr::VK_Sparc_WPLT30, Res,
+ getContext());
Op = SparcOperand::CreateImm(Res, S, E);
}
break;
@@ -657,6 +709,27 @@ SparcAsmParser::parseSparcAsmOperand(SparcOperand *&Op)
return (Op) ? MatchOperand_Success : MatchOperand_ParseFail;
}
+SparcAsmParser::OperandMatchResultTy
+SparcAsmParser::parseBranchModifiers(OperandVector &Operands) {
+
+ // parse (,a|,pn|,pt)+
+
+ while (getLexer().is(AsmToken::Comma)) {
+
+ Parser.Lex(); // Eat the comma
+
+ if (!getLexer().is(AsmToken::Identifier))
+ return MatchOperand_ParseFail;
+ StringRef modName = Parser.getTok().getString();
+ if (modName == "a" || modName == "pn" || modName == "pt") {
+ Operands.push_back(SparcOperand::CreateToken(modName,
+ Parser.getTok().getLoc()));
+ Parser.Lex(); // eat the identifier.
+ }
+ }
+ return MatchOperand_Success;
+}
+
bool SparcAsmParser::matchRegisterName(const AsmToken &Tok,
unsigned &RegNo,
unsigned &RegKind)
@@ -704,7 +777,7 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok,
&& !name.substr(3).getAsInteger(10, intVal)
&& intVal < 4) {
// FIXME: check 64bit and handle %fcc1 - %fcc3
- RegNo = Sparc::FCC;
+ RegNo = Sparc::FCC0 + intVal;
RegKind = SparcOperand::rk_CCReg;
return true;
}
@@ -767,6 +840,31 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok,
return false;
}
+static bool hasGOTReference(const MCExpr *Expr) {
+ switch (Expr->getKind()) {
+ case MCExpr::Target:
+ if (const SparcMCExpr *SE = dyn_cast<SparcMCExpr>(Expr))
+ return hasGOTReference(SE->getSubExpr());
+ break;
+
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
+ return hasGOTReference(BE->getLHS()) || hasGOTReference(BE->getRHS());
+ }
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
+ return (SymRef.getSymbol().getName() == "_GLOBAL_OFFSET_TABLE_");
+ }
+
+ case MCExpr::Unary:
+ return hasGOTReference(cast<MCUnaryExpr>(Expr)->getSubExpr());
+ }
+ return false;
+}
bool SparcAsmParser::matchSparcAsmModifiers(const MCExpr *&EVal,
SMLoc &EndLoc)
@@ -790,6 +888,23 @@ bool SparcAsmParser::matchSparcAsmModifiers(const MCExpr *&EVal,
const MCExpr *subExpr;
if (Parser.parseParenExpression(subExpr, EndLoc))
return false;
+
+ bool isPIC = getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_;
+
+ switch(VK) {
+ default: break;
+ case SparcMCExpr::VK_Sparc_LO:
+ VK = (hasGOTReference(subExpr)
+ ? SparcMCExpr::VK_Sparc_PC10
+ : (isPIC ? SparcMCExpr::VK_Sparc_GOT10 : VK));
+ break;
+ case SparcMCExpr::VK_Sparc_HI:
+ VK = (hasGOTReference(subExpr)
+ ? SparcMCExpr::VK_Sparc_PC22
+ : (isPIC ? SparcMCExpr::VK_Sparc_GOT22 : VK));
+ break;
+ }
+
EVal = SparcMCExpr::Create(VK, subExpr, getContext());
return true;
}
@@ -804,18 +919,14 @@ extern "C" void LLVMInitializeSparcAsmParser() {
#define GET_MATCHER_IMPLEMENTATION
#include "SparcGenAsmMatcher.inc"
-
-
-unsigned SparcAsmParser::
-validateTargetOperandClass(MCParsedAsmOperand *GOp,
- unsigned Kind)
-{
- SparcOperand *Op = (SparcOperand*)GOp;
- if (Op->isFloatOrDoubleReg()) {
+unsigned SparcAsmParser::validateTargetOperandClass(MCParsedAsmOperand &GOp,
+ unsigned Kind) {
+ SparcOperand &Op = (SparcOperand &)GOp;
+ if (Op.isFloatOrDoubleReg()) {
switch (Kind) {
default: break;
case MCK_DFPRegs:
- if (!Op->isFloatReg() || SparcOperand::MorphToDoubleReg(Op))
+ if (!Op.isFloatReg() || SparcOperand::MorphToDoubleReg(Op))
return MCTargetAsmParser::Match_Success;
break;
case MCK_QFPRegs:
diff --git a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
index f23ddc2..f3441ff 100644
--- a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -12,7 +12,6 @@
// NOP is placed.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "delay-slot-filler"
#include "Sparc.h"
#include "SparcSubtarget.h"
#include "llvm/ADT/SmallSet.h"
@@ -27,6 +26,8 @@
using namespace llvm;
+#define DEBUG_TYPE "delay-slot-filler"
+
STATISTIC(FilledSlots, "Number of delay slots filled");
static cl::opt<bool> DisableDelaySlotFiller(
@@ -49,12 +50,12 @@ namespace {
Subtarget(&TM.getSubtarget<SparcSubtarget>()) {
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "SPARC Delay Slot Filler";
}
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
- bool runOnMachineFunction(MachineFunction &F) {
+ bool runOnMachineFunction(MachineFunction &F) override {
bool Changed = false;
// This pass invalidates liveness information when it reorders
@@ -211,12 +212,8 @@ Filler::findDelayInstr(MachineBasicBlock &MBB,
if (I->isDebugValue())
continue;
-
- if (I->hasUnmodeledSideEffects()
- || I->isInlineAsm()
- || I->isLabel()
- || I->hasDelaySlot()
- || I->isBundledWithSucc())
+ if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isPosition() ||
+ I->hasDelaySlot() || I->isBundledWithSucc())
break;
if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) {
@@ -479,7 +476,7 @@ bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
&& MBBI->getOperand(1).getReg() == SP::G0
&& MBBI->getOperand(2).getReg() == SP::G0);
- MachineBasicBlock::iterator PrevInst = llvm::prior(MBBI);
+ MachineBasicBlock::iterator PrevInst = std::prev(MBBI);
// It cannot be combined with a bundled instruction.
if (PrevInst->isBundledWithSucc())
diff --git a/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp b/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
index 6233805..4df0990 100644
--- a/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
+++ b/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sparc-disassembler"
-
#include "Sparc.h"
#include "SparcRegisterInfo.h"
#include "SparcSubtarget.h"
@@ -23,6 +21,8 @@
using namespace llvm;
+#define DEBUG_TYPE "sparc-disassembler"
+
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
@@ -32,22 +32,18 @@ class SparcDisassembler : public MCDisassembler {
public:
/// Constructor - Initializes the disassembler.
///
- SparcDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info) :
- MCDisassembler(STI), RegInfo(Info)
+ SparcDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) :
+ MCDisassembler(STI, Ctx)
{}
virtual ~SparcDisassembler() {}
- const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
-
/// getInstruction - See MCDisassembler.
- virtual DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const;
-private:
- OwningPtr<const MCRegisterInfo> RegInfo;
+ DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
};
}
@@ -58,8 +54,9 @@ namespace llvm {
static MCDisassembler *createSparcDisassembler(
const Target &T,
- const MCSubtargetInfo &STI) {
- return new SparcDisassembler(STI, T.createMCRegInfo(""));
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new SparcDisassembler(STI, Ctx);
}
@@ -113,6 +110,9 @@ static const unsigned QFPRegDecoderTable[] = {
SP::Q6, SP::Q14, ~0U, ~0U,
SP::Q7, SP::Q15, ~0U, ~0U } ;
+static const unsigned FCCRegDecoderTable[] = {
+ SP::FCC0, SP::FCC1, SP::FCC2, SP::FCC3 };
+
static DecodeStatus DecodeIntRegsRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -174,6 +174,42 @@ static DecodeStatus DecodeQFPRegsRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeFCCRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 3)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateReg(FCCRegDecoderTable[RegNo]));
+ return MCDisassembler::Success;
+}
+
+
+static DecodeStatus DecodeLoadInt(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadDFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadQFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeStoreInt(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreDFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreQFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeCall(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeSIMM13(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeJMPL(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeReturn(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeSWAP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
#include "SparcGenDisassemblerTables.inc"
@@ -226,3 +262,219 @@ SparcDisassembler::getInstruction(MCInst &instr,
return MCDisassembler::Fail;
}
+
+
+typedef DecodeStatus (*DecodeFunc)(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMem(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder,
+ bool isLoad, DecodeFunc DecodeRD) {
+ unsigned rd = fieldFromInstruction(insn, 25, 5);
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ bool isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ DecodeStatus status;
+ if (isLoad) {
+ status = DecodeRD(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ // Decode rs1.
+ status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode imm|rs2.
+ if (isImm)
+ MI.addOperand(MCOperand::CreateImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ if (!isLoad) {
+ status = DecodeRD(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeLoadInt(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeIntRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadDFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeDFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadQFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeQFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreInt(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeIntRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreDFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeDFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreQFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeQFPRegsRegisterClass);
+}
+
+static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Width, MCInst &MI,
+ const void *Decoder) {
+ const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
+ return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch,
+ Offset, Width);
+}
+
+static DecodeStatus DecodeCall(MCInst &MI, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned tgt = fieldFromInstruction(insn, 0, 30);
+ tgt <<= 2;
+ if (!tryAddingSymbolicOperand(tgt+Address, false, Address,
+ 0, 30, MI, Decoder))
+ MI.addOperand(MCOperand::CreateImm(tgt));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSIMM13(MCInst &MI, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned tgt = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ MI.addOperand(MCOperand::CreateImm(tgt));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeJMPL(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rd = fieldFromInstruction(insn, 25, 5);
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RD.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1.
+ status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1 | SIMM13.
+ if (isImm)
+ MI.addOperand(MCOperand::CreateImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeReturn(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RS1.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS2 | SIMM13.
+ if (isImm)
+ MI.addOperand(MCOperand::CreateImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSWAP(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rd = fieldFromInstruction(insn, 25, 5);
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RD.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1.
+ status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1 | SIMM13.
+ if (isImm)
+ MI.addOperand(MCOperand::CreateImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
index 6d7457a..5975a51 100644
--- a/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
@@ -11,20 +11,33 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "SparcInstPrinter.h"
-
#include "Sparc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
+// The generated AsmMatcher SparcGenAsmWriter uses "Sparc" as the target
+// namespace. But SPARC backend uses "SP" as its namespace.
+namespace llvm {
+namespace Sparc {
+ using namespace SP;
+}
+}
+
#define GET_INSTRUCTION_NAME
#define PRINT_ALIAS_INSTR
#include "SparcGenAsmWriter.inc"
+bool SparcInstPrinter::isV9() const {
+ return (STI.getFeatureBits() & Sparc::FeatureV9) != 0;
+}
+
void SparcInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const
{
OS << '%' << StringRef(getRegisterName(RegNo)).lower();
@@ -50,7 +63,15 @@ bool SparcInstPrinter::printSparcAliasInstr(const MCInst *MI, raw_ostream &O)
return false;
switch (MI->getOperand(0).getReg()) {
default: return false;
- case SP::G0: // jmp $addr
+ case SP::G0: // jmp $addr | ret | retl
+ if (MI->getOperand(2).isImm() &&
+ MI->getOperand(2).getImm() == 8) {
+ switch(MI->getOperand(1).getReg()) {
+ default: break;
+ case SP::I7: O << "\tret"; return true;
+ case SP::O7: O << "\tretl"; return true;
+ }
+ }
O << "\tjmp "; printMemOperand(MI, 1, O);
return true;
case SP::O7: // call $addr
@@ -58,6 +79,28 @@ bool SparcInstPrinter::printSparcAliasInstr(const MCInst *MI, raw_ostream &O)
return true;
}
}
+ case SP::V9FCMPS: case SP::V9FCMPD: case SP::V9FCMPQ:
+ case SP::V9FCMPES: case SP::V9FCMPED: case SP::V9FCMPEQ: {
+ if (isV9()
+ || (MI->getNumOperands() != 3)
+ || (!MI->getOperand(0).isReg())
+ || (MI->getOperand(0).getReg() != SP::FCC0))
+ return false;
+ // if V8, skip printing %fcc0.
+ switch(MI->getOpcode()) {
+ default:
+ case SP::V9FCMPS: O << "\tfcmps "; break;
+ case SP::V9FCMPD: O << "\tfcmpd "; break;
+ case SP::V9FCMPQ: O << "\tfcmpq "; break;
+ case SP::V9FCMPES: O << "\tfcmpes "; break;
+ case SP::V9FCMPED: O << "\tfcmped "; break;
+ case SP::V9FCMPEQ: O << "\tfcmpeq "; break;
+ }
+ printOperand(MI, 1, O);
+ O << ", ";
+ printOperand(MI, 2, O);
+ return true;
+ }
}
}
@@ -110,11 +153,17 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
switch (MI->getOpcode()) {
default: break;
case SP::FBCOND:
- case SP::MOVFCCrr:
- case SP::MOVFCCri:
- case SP::FMOVS_FCC:
- case SP::FMOVD_FCC:
- case SP::FMOVQ_FCC: // Make sure CC is a fp conditional flag.
+ case SP::FBCONDA:
+ case SP::BPFCC:
+ case SP::BPFCCA:
+ case SP::BPFCCNT:
+ case SP::BPFCCANT:
+ case SP::MOVFCCrr: case SP::V9MOVFCCrr:
+ case SP::MOVFCCri: case SP::V9MOVFCCri:
+ case SP::FMOVS_FCC: case SP::V9FMOVS_FCC:
+ case SP::FMOVD_FCC: case SP::V9FMOVD_FCC:
+ case SP::FMOVQ_FCC: case SP::V9FMOVQ_FCC:
+ // Make sure CC is a fp conditional flag.
CC = (CC < 16) ? (CC + 16) : CC;
break;
}
@@ -124,6 +173,6 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
bool SparcInstPrinter::printGetPCX(const MCInst *MI, unsigned opNum,
raw_ostream &O)
{
- assert(0 && "FIXME: Implement SparcInstPrinter::printGetPCX.");
+ llvm_unreachable("FIXME: Implement SparcInstPrinter::printGetPCX.");
return true;
}
diff --git a/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h
index 63ed41a..8fe4075 100644
--- a/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h
+++ b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h
@@ -15,30 +15,36 @@
#define SparcINSTPRINTER_H
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
namespace llvm {
class MCOperand;
class SparcInstPrinter : public MCInstPrinter {
+ const MCSubtargetInfo &STI;
public:
SparcInstPrinter(const MCAsmInfo &MAI,
const MCInstrInfo &MII,
- const MCRegisterInfo &MRI)
- : MCInstPrinter(MAI, MII, MRI) {}
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &sti)
+ : MCInstPrinter(MAI, MII, MRI), STI(sti) {}
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
bool printSparcAliasInstr(const MCInst *MI, raw_ostream &OS);
+ bool isV9() const;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
bool printAliasInstr(const MCInst *MI, raw_ostream &O);
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
void printOperand(const MCInst *MI, int opNum, raw_ostream &OS);
void printMemOperand(const MCInst *MI, int opNum, raw_ostream &OS,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printCCOperand(const MCInst *MI, int opNum, raw_ostream &OS);
bool printGetPCX(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index 6d2dd83..dcd81e3 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -8,11 +8,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCAsmBackend.h"
-#include "MCTargetDesc/SparcMCTargetDesc.h"
#include "MCTargetDesc/SparcFixupKinds.h"
+#include "MCTargetDesc/SparcMCTargetDesc.h"
#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -37,6 +39,12 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Sparc::fixup_sparc_br19:
return (Value >> 2) & 0x7ffff;
+ case Sparc::fixup_sparc_br16_2:
+ return (Value >> 2) & 0xc000;
+
+ case Sparc::fixup_sparc_br16_14:
+ return (Value >> 2) & 0x3fff;
+
case Sparc::fixup_sparc_pc22:
case Sparc::fixup_sparc_got22:
case Sparc::fixup_sparc_tls_gd_hi22:
@@ -94,16 +102,18 @@ namespace {
public:
SparcAsmBackend(const Target &T) : MCAsmBackend(), TheTarget(T) {}
- unsigned getNumFixupKinds() const {
+ unsigned getNumFixupKinds() const override {
return Sparc::NumTargetFixupKinds;
}
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[Sparc::NumTargetFixupKinds] = {
// name offset bits flags
{ "fixup_sparc_call30", 2, 30, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_br22", 10, 22, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_br19", 13, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16_2", 10, 2, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16_14", 18, 14, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_sparc_hi22", 10, 22, 0 },
{ "fixup_sparc_lo10", 22, 10, 0 },
{ "fixup_sparc_h44", 10, 22, 0 },
@@ -144,16 +154,15 @@ namespace {
return Infos[Kind - FirstTargetFixupKind];
}
- void processFixupValue(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFixup &Fixup,
- const MCFragment *DF,
- MCValue & Target,
- uint64_t &Value,
- bool &IsResolved) {
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override {
switch ((Sparc::Fixups)Fixup.getKind()) {
default: break;
case Sparc::fixup_sparc_wplt30:
+ if (Target.getSymA()->getSymbol().isTemporary())
+ return;
case Sparc::fixup_sparc_tls_gd_hi22:
case Sparc::fixup_sparc_tls_gd_lo10:
case Sparc::fixup_sparc_tls_gd_add:
@@ -175,7 +184,7 @@ namespace {
}
}
- bool mayNeedRelaxation(const MCInst &Inst) const {
+ bool mayNeedRelaxation(const MCInst &Inst) const override {
// FIXME.
return false;
}
@@ -185,20 +194,25 @@ namespace {
bool fixupNeedsRelaxation(const MCFixup &Fixup,
uint64_t Value,
const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const {
+ const MCAsmLayout &Layout) const override {
// FIXME.
- assert(0 && "fixupNeedsRelaxation() unimplemented");
+ llvm_unreachable("fixupNeedsRelaxation() unimplemented");
return false;
}
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
// FIXME.
- assert(0 && "relaxInstruction() unimplemented");
+ llvm_unreachable("relaxInstruction() unimplemented");
}
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
- // FIXME: Zero fill for now.
- for (uint64_t i = 0; i != Count; ++i)
- OW->Write8(0);
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override {
+ // Cannot emit NOP with size not multiple of 32 bits.
+ if (Count % 4 != 0)
+ return false;
+
+ uint64_t NumNops = Count / 4;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->Write32(0x01000000);
+
return true;
}
@@ -215,7 +229,7 @@ namespace {
SparcAsmBackend(T), OSType(OSType) { }
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const {
+ uint64_t Value, bool IsPCRel) const override {
Value = adjustFixupValue(Fixup.getKind(), Value);
if (!Value) return; // Doesn't change encoding.
@@ -230,14 +244,10 @@ namespace {
}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType);
return createSparcELFObjectWriter(OS, is64Bit(), OSABI);
}
-
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
- return false;
- }
};
} // end anonymous namespace
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
index 3a9929b..5ba82f1 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
@@ -28,24 +28,14 @@ namespace {
virtual ~SparcELFObjectWriter() {}
protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
-
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
};
}
-
unsigned SparcELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
+ bool IsPCRel) const {
if (const SparcMCExpr *SExpr = dyn_cast<SparcMCExpr>(Fixup.getValue())) {
if (SExpr->getKind() == SparcMCExpr::VK_Sparc_R_DISP32)
@@ -114,23 +104,6 @@ unsigned SparcELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_SPARC_NONE;
}
-const MCSymbol *SparcELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
-
- if (!Target.getSymA())
- return NULL;
- switch((unsigned)Fixup.getKind()) {
- default: break;
- case Sparc::fixup_sparc_got22:
- case Sparc::fixup_sparc_got10:
- return &Target.getSymA()->getSymbol().AliasedSymbol();
- }
- return NULL;
-}
-
MCObjectWriter *llvm::createSparcELFObjectWriter(raw_ostream &OS,
bool Is64Bit,
uint8_t OSABI) {
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
index 005a024..d42bcee 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
@@ -26,6 +26,10 @@ namespace llvm {
/// branches on icc/xcc
fixup_sparc_br19,
+ /// fixup_sparc_bpr - 16-bit fixup for bpr
+ fixup_sparc_br16_2,
+ fixup_sparc_br16_14,
+
/// fixup_sparc_hi22 - 22-bit fixup corresponding to %hi(foo)
/// for sethi
fixup_sparc_hi22,
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index 8d0dfec..df66ca9 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -32,7 +32,7 @@ SparcELFMCAsmInfo::SparcELFMCAsmInfo(StringRef TT) {
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
// .xword is only supported by V9.
- Data64bitsDirective = (isV9) ? "\t.xword\t" : 0;
+ Data64bitsDirective = (isV9) ? "\t.xword\t" : nullptr;
ZeroDirective = "\t.skip\t";
CommentString = "!";
HasLEB128 = true;
@@ -43,7 +43,9 @@ SparcELFMCAsmInfo::SparcELFMCAsmInfo(StringRef TT) {
SunStyleELFSectionSwitchSyntax = true;
UsesELFSectionDirectiveForBSS = true;
- PrivateGlobalPrefix = ".L";
+ if (TheTriple.getOS() == llvm::Triple::Solaris ||
+ TheTriple.getOS() == llvm::Triple::OpenBSD)
+ UseIntegratedAssembler = true;
}
const MCExpr*
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
index d53d09d..e126b68 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
@@ -20,15 +20,15 @@ namespace llvm {
class StringRef;
class SparcELFMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
explicit SparcELFMCAsmInfo(StringRef TT);
- virtual const MCExpr* getExprForPersonalitySymbol(const MCSymbol *Sym,
- unsigned Encoding,
- MCStreamer &Streamer) const;
- virtual const MCExpr* getExprForFDESymbol(const MCSymbol *Sym,
- unsigned Encoding,
- MCStreamer &Streamer) const;
+ const MCExpr*
+ getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
+ const MCExpr* getExprForFDESymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const override;
};
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
index ed756d9..eea9626 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
@@ -11,21 +11,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
#include "SparcMCExpr.h"
-#include "SparcMCTargetDesc.h"
#include "MCTargetDesc/SparcFixupKinds.h"
+#include "SparcMCTargetDesc.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
namespace {
@@ -40,22 +41,33 @@ public:
~SparcMCCodeEmitter() {}
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getCallTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getBranchPredTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getBranchOnRegTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
};
} // end anonymous namespace
@@ -69,8 +81,9 @@ MCCodeEmitter *llvm::createSparcMCCodeEmitter(const MCInstrInfo &MCII,
void SparcMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned Bits = getBinaryCodeForInstr(MI, Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ unsigned Bits = getBinaryCodeForInstr(MI, Fixups, STI);
// Output the constant in big endian byte order.
for (unsigned i = 0; i != 4; ++i) {
@@ -88,7 +101,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
}
if (tlsOpNo != 0) {
const MCOperand &MO = MI.getOperand(tlsOpNo);
- uint64_t op = getMachineOpValue(MI, MO, Fixups);
+ uint64_t op = getMachineOpValue(MI, MO, Fixups, STI);
assert(op == 0 && "Unexpected operand value!");
(void)op; // suppress warning.
}
@@ -99,7 +112,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
unsigned SparcMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg())
return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
@@ -119,16 +133,17 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
if (Expr->EvaluateAsAbsolute(Res))
return Res;
- assert(0 && "Unhandled expression!");
+ llvm_unreachable("Unhandled expression!");
return 0;
}
unsigned SparcMCCodeEmitter::
getCallTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm())
- return getMachineOpValue(MI, MO, Fixups);
+ return getMachineOpValue(MI, MO, Fixups, STI);
if (MI.getOpcode() == SP::TLS_CALL) {
// No fixups for __tls_get_addr. Will emit for fixups for tls_symbol in
@@ -159,18 +174,45 @@ getCallTargetOpValue(const MCInst &MI, unsigned OpNo,
unsigned SparcMCCodeEmitter::
getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm())
- return getMachineOpValue(MI, MO, Fixups);
+ return getMachineOpValue(MI, MO, Fixups, STI);
- Sparc::Fixups fixup = Sparc::fixup_sparc_br22;
- if (MI.getOpcode() == SP::BPXCC)
- fixup = Sparc::fixup_sparc_br19;
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br22));
+ return 0;
+}
+unsigned SparcMCCodeEmitter::
+getBranchPredTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm())
+ return getMachineOpValue(MI, MO, Fixups, STI);
+
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br19));
+ return 0;
+}
+unsigned SparcMCCodeEmitter::
+getBranchOnRegTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm())
+ return getMachineOpValue(MI, MO, Fixups, STI);
+
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br16_2));
Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
- (MCFixupKind)fixup));
+ (MCFixupKind)Sparc::fixup_sparc_br16_14));
+
return 0;
}
+
+
#include "SparcGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
index 0337c09..7f01ab0 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
@@ -12,17 +12,19 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sparcmcexpr"
#include "SparcMCExpr.h"
-#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Object/ELF.h"
using namespace llvm;
+#define DEBUG_TYPE "sparcmcexpr"
+
const SparcMCExpr*
SparcMCExpr::Create(VariantKind Kind, const MCExpr *Expr,
MCContext &Ctx) {
@@ -123,7 +125,7 @@ SparcMCExpr::VariantKind SparcMCExpr::parseVariantKind(StringRef name)
Sparc::Fixups SparcMCExpr::getFixupKind(SparcMCExpr::VariantKind Kind) {
switch (Kind) {
- default: assert(0 && "Unhandled SparcMCExpr::VariantKind");
+ default: llvm_unreachable("Unhandled SparcMCExpr::VariantKind");
case VK_Sparc_LO: return Sparc::fixup_sparc_lo10;
case VK_Sparc_HI: return Sparc::fixup_sparc_hi22;
case VK_Sparc_H44: return Sparc::fixup_sparc_h44;
@@ -160,9 +162,7 @@ Sparc::Fixups SparcMCExpr::getFixupKind(SparcMCExpr::VariantKind Kind) {
bool
SparcMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
const MCAsmLayout *Layout) const {
- if (!Layout)
- return false;
- return getSubExpr()->EvaluateAsRelocatable(Res, *Layout);
+ return getSubExpr()->EvaluateAsRelocatable(Res, Layout);
}
static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
@@ -220,35 +220,6 @@ void SparcMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
}
-// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
-// that method should be made public?
-// FIXME: really do above: now that at least three other backends are using it.
-static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) {
- switch (Value->getKind()) {
- case MCExpr::Target:
- llvm_unreachable("Can't handle nested target expr!");
- break;
-
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbolsImpl(BE->getLHS(), Asm);
- AddValueSymbolsImpl(BE->getRHS(), Asm);
- break;
- }
-
- case MCExpr::SymbolRef:
- Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
- break;
- }
-}
-
-void SparcMCExpr::AddValueSymbols(MCAssembler *Asm) const {
- AddValueSymbolsImpl(getSubExpr(), Asm);
+void SparcMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
index be6526e..f0d0ef3 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
@@ -85,15 +85,15 @@ public:
Sparc::Fixups getFixupKind() const { return getFixupKind(Kind); }
/// @}
- void PrintImpl(raw_ostream &OS) const;
+ void PrintImpl(raw_ostream &OS) const override;
bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const;
- void AddValueSymbols(MCAssembler *) const;
- const MCSection *FindAssociatedSection() const {
+ const MCAsmLayout *Layout) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ const MCSection *FindAssociatedSection() const override {
return getSubExpr()->FindAssociatedSection();
}
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const;
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
index 2832a71..571017d 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
@@ -12,9 +12,9 @@
//===----------------------------------------------------------------------===//
#include "SparcMCTargetDesc.h"
+#include "InstPrinter/SparcInstPrinter.h"
#include "SparcMCAsmInfo.h"
#include "SparcTargetStreamer.h"
-#include "InstPrinter/SparcInstPrinter.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -22,6 +22,8 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "SparcGenInstrInfo.inc"
@@ -31,14 +33,11 @@
#define GET_REGINFO_MC_DESC
#include "SparcGenRegisterInfo.inc"
-using namespace llvm;
-
-
static MCAsmInfo *createSparcMCAsmInfo(const MCRegisterInfo &MRI,
StringRef TT) {
MCAsmInfo *MAI = new SparcELFMCAsmInfo(TT);
unsigned Reg = MRI.getDwarfRegNum(SP::O6, true);
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -47,7 +46,7 @@ static MCAsmInfo *createSparcV9MCAsmInfo(const MCRegisterInfo &MRI,
StringRef TT) {
MCAsmInfo *MAI = new SparcELFMCAsmInfo(TT);
unsigned Reg = MRI.getDwarfRegNum(SP::O6, true);
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 2047);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 2047);
MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -67,6 +66,9 @@ static MCRegisterInfo *createSparcMCRegisterInfo(StringRef TT) {
static MCSubtargetInfo *createSparcMCSubtargetInfo(StringRef TT, StringRef CPU,
StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
+ Triple TheTriple(TT);
+ if (CPU.empty())
+ CPU = (TheTriple.getArch() == Triple::sparcv9) ? "v9" : "v8";
InitSparcMCSubtargetInfo(X, TT, CPU, FS);
return X;
}
@@ -123,21 +125,24 @@ static MCCodeGenInfo *createSparcV9MCCodeGenInfo(StringRef TT, Reloc::Model RM,
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Context, MCAsmBackend &MAB,
raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack) {
- SparcTargetELFStreamer *S = new SparcTargetELFStreamer();
- return createELFStreamer(Context, S, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ const MCSubtargetInfo &STI, bool RelaxAll,
+ bool NoExecStack) {
+ MCStreamer *S =
+ createELFStreamer(Context, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ new SparcTargetELFStreamer(*S);
+ return S;
}
static MCStreamer *
createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useLoc, bool useCFI,
- bool useDwarfDirectory, MCInstPrinter *InstPrint,
- MCCodeEmitter *CE, MCAsmBackend *TAB, bool ShowInst) {
- SparcTargetAsmStreamer *S = new SparcTargetAsmStreamer(OS);
-
- return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
- useDwarfDirectory, InstPrint, CE, TAB,
- ShowInst);
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst) {
+
+ MCStreamer *S = llvm::createAsmStreamer(
+ Ctx, OS, isVerboseAsm, useDwarfDirectory, InstPrint, CE, TAB, ShowInst);
+ new SparcTargetAsmStreamer(*S, OS);
+ return S;
}
static MCInstPrinter *createSparcMCInstPrinter(const Target &T,
@@ -146,7 +151,7 @@ static MCInstPrinter *createSparcMCInstPrinter(const Target &T,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
- return new SparcInstPrinter(MAI, MII, MRI);
+ return new SparcInstPrinter(MAI, MII, MRI, STI);
}
extern "C" void LLVMInitializeSparcTargetMC() {
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp
index 01043ae..94af791 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp
@@ -18,10 +18,13 @@
using namespace llvm;
// pin vtable to this file
+SparcTargetStreamer::SparcTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
+
void SparcTargetStreamer::anchor() {}
-SparcTargetAsmStreamer::SparcTargetAsmStreamer(formatted_raw_ostream &OS)
- : OS(OS) {}
+SparcTargetAsmStreamer::SparcTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS)
+ : SparcTargetStreamer(S), OS(OS) {}
void SparcTargetAsmStreamer::emitSparcRegisterIgnore(unsigned reg) {
OS << "\t.register "
@@ -35,6 +38,9 @@ void SparcTargetAsmStreamer::emitSparcRegisterScratch(unsigned reg) {
<< ", #scratch\n";
}
+SparcTargetELFStreamer::SparcTargetELFStreamer(MCStreamer &S)
+ : SparcTargetStreamer(S) {}
+
MCELFStreamer &SparcTargetELFStreamer::getStreamer() {
- return static_cast<MCELFStreamer &>(*Streamer);
+ return static_cast<MCELFStreamer &>(Streamer);
}
diff --git a/contrib/llvm/lib/Target/Sparc/Sparc.h b/contrib/llvm/lib/Target/Sparc/Sparc.h
index 8d46c60..de20aaa 100644
--- a/contrib/llvm/lib/Target/Sparc/Sparc.h
+++ b/contrib/llvm/lib/Target/Sparc/Sparc.h
@@ -42,8 +42,8 @@ namespace llvm {
// values must be kept in sync with the ones in the .td file.
namespace SPCC {
enum CondCodes {
- //ICC_A = 8 , // Always
- //ICC_N = 0 , // Never
+ ICC_A = 8 , // Always
+ ICC_N = 0 , // Never
ICC_NE = 9 , // Not Equal
ICC_E = 1 , // Equal
ICC_G = 10 , // Greater
@@ -59,8 +59,8 @@ namespace llvm {
ICC_VC = 15 , // Overflow Clear
ICC_VS = 7 , // Overflow Set
- //FCC_A = 8+16, // Always
- //FCC_N = 0+16, // Never
+ FCC_A = 8+16, // Always
+ FCC_N = 0+16, // Never
FCC_U = 7+16, // Unordered
FCC_G = 6+16, // Greater
FCC_UG = 5+16, // Unordered or Greater
@@ -80,6 +80,8 @@ namespace llvm {
inline static const char *SPARCCondCodeToString(SPCC::CondCodes CC) {
switch (CC) {
+ case SPCC::ICC_A: return "a";
+ case SPCC::ICC_N: return "n";
case SPCC::ICC_NE: return "ne";
case SPCC::ICC_E: return "e";
case SPCC::ICC_G: return "g";
@@ -94,6 +96,8 @@ namespace llvm {
case SPCC::ICC_NEG: return "neg";
case SPCC::ICC_VC: return "vc";
case SPCC::ICC_VS: return "vs";
+ case SPCC::FCC_A: return "a";
+ case SPCC::FCC_N: return "n";
case SPCC::FCC_U: return "u";
case SPCC::FCC_G: return "g";
case SPCC::FCC_UG: return "ug";
diff --git a/contrib/llvm/lib/Target/Sparc/Sparc.td b/contrib/llvm/lib/Target/Sparc/Sparc.td
index 05ff996..3159a46 100644
--- a/contrib/llvm/lib/Target/Sparc/Sparc.td
+++ b/contrib/llvm/lib/Target/Sparc/Sparc.td
@@ -29,6 +29,12 @@ def FeatureV8Deprecated
def FeatureVIS
: SubtargetFeature<"vis", "IsVIS", "true",
"Enable UltraSPARC Visual Instruction Set extensions">;
+def FeatureVIS2
+ : SubtargetFeature<"vis2", "IsVIS2", "true",
+ "Enable Visual Instruction Set extensions II">;
+def FeatureVIS3
+ : SubtargetFeature<"vis3", "IsVIS3", "true",
+ "Enable Visual Instruction Set extensions III">;
def FeatureHardQuad
: SubtargetFeature<"hard-quad-float", "HasHardQuad", "true",
@@ -69,17 +75,18 @@ def : Proc<"sparclite86x", []>;
def : Proc<"sparclet", []>;
def : Proc<"tsc701", []>;
def : Proc<"v9", [FeatureV9]>;
-def : Proc<"ultrasparc", [FeatureV9, FeatureV8Deprecated]>;
-def : Proc<"ultrasparc3", [FeatureV9, FeatureV8Deprecated]>;
-def : Proc<"niagara", [FeatureV9, FeatureV8Deprecated]>;
-def : Proc<"niagara2", [FeatureV9, FeatureV8Deprecated, UsePopc]>;
-def : Proc<"niagara3", [FeatureV9, FeatureV8Deprecated, UsePopc]>;
-def : Proc<"niagara4", [FeatureV9, FeatureV8Deprecated, UsePopc]>;
-
-def SparcAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
+def : Proc<"ultrasparc", [FeatureV9, FeatureV8Deprecated, FeatureVIS]>;
+def : Proc<"ultrasparc3", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
+ FeatureVIS2]>;
+def : Proc<"niagara", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
+ FeatureVIS2]>;
+def : Proc<"niagara2", [FeatureV9, FeatureV8Deprecated, UsePopc,
+ FeatureVIS, FeatureVIS2]>;
+def : Proc<"niagara3", [FeatureV9, FeatureV8Deprecated, UsePopc,
+ FeatureVIS, FeatureVIS2]>;
+def : Proc<"niagara4", [FeatureV9, FeatureV8Deprecated, UsePopc,
+ FeatureVIS, FeatureVIS2, FeatureVIS3]>;
+
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
@@ -89,6 +96,4 @@ def Sparc : Target {
// Pull in Instruction Info:
let InstructionSet = SparcInstrInfo;
let AssemblyParsers = [SparcAsmParser];
-
- let AssemblyWriters = [SparcAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
index b2c536d..1b7330e 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -12,19 +12,19 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "Sparc.h"
+#include "InstPrinter/SparcInstPrinter.h"
+#include "MCTargetDesc/SparcMCExpr.h"
#include "SparcInstrInfo.h"
#include "SparcTargetMachine.h"
#include "SparcTargetStreamer.h"
-#include "InstPrinter/SparcInstPrinter.h"
-#include "MCTargetDesc/SparcMCExpr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
@@ -32,30 +32,32 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
namespace {
class SparcAsmPrinter : public AsmPrinter {
SparcTargetStreamer &getTargetStreamer() {
- return static_cast<SparcTargetStreamer&>(OutStreamer.getTargetStreamer());
+ return static_cast<SparcTargetStreamer &>(
+ *OutStreamer.getTargetStreamer());
}
public:
explicit SparcAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Sparc Assembly Printer";
}
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS,
- const char *Modifier = 0);
+ const char *Modifier = nullptr);
void printCCOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
- virtual void EmitFunctionBodyStart();
- virtual void EmitInstruction(const MachineInstr *MI);
- virtual void EmitEndOfAsmFile(Module &M);
+ void EmitFunctionBodyStart() override;
+ void EmitInstruction(const MachineInstr *MI) override;
+ void EmitEndOfAsmFile(Module &M) override;
static const char *getRegisterName(unsigned RegNo) {
return SparcInstPrinter::getRegisterName(RegNo);
@@ -63,12 +65,13 @@ namespace {
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
- void LowerGETPCXAndEmitMCInsts(const MachineInstr *MI);
+ void LowerGETPCXAndEmitMCInsts(const MachineInstr *MI,
+ const MCSubtargetInfo &STI);
};
} // end of anonymous namespace
@@ -105,48 +108,54 @@ static MCOperand createPCXRelExprOp(SparcMCExpr::VariantKind Kind,
}
static void EmitCall(MCStreamer &OutStreamer,
- MCOperand &Callee)
+ MCOperand &Callee,
+ const MCSubtargetInfo &STI)
{
MCInst CallInst;
CallInst.setOpcode(SP::CALL);
CallInst.addOperand(Callee);
- OutStreamer.EmitInstruction(CallInst);
+ OutStreamer.EmitInstruction(CallInst, STI);
}
static void EmitSETHI(MCStreamer &OutStreamer,
- MCOperand &Imm, MCOperand &RD)
+ MCOperand &Imm, MCOperand &RD,
+ const MCSubtargetInfo &STI)
{
MCInst SETHIInst;
SETHIInst.setOpcode(SP::SETHIi);
SETHIInst.addOperand(RD);
SETHIInst.addOperand(Imm);
- OutStreamer.EmitInstruction(SETHIInst);
+ OutStreamer.EmitInstruction(SETHIInst, STI);
}
static void EmitBinary(MCStreamer &OutStreamer, unsigned Opcode,
- MCOperand &RS1, MCOperand &Src2, MCOperand &RD)
+ MCOperand &RS1, MCOperand &Src2, MCOperand &RD,
+ const MCSubtargetInfo &STI)
{
MCInst Inst;
Inst.setOpcode(Opcode);
Inst.addOperand(RD);
Inst.addOperand(RS1);
Inst.addOperand(Src2);
- OutStreamer.EmitInstruction(Inst);
+ OutStreamer.EmitInstruction(Inst, STI);
}
static void EmitOR(MCStreamer &OutStreamer,
- MCOperand &RS1, MCOperand &Imm, MCOperand &RD) {
- EmitBinary(OutStreamer, SP::ORri, RS1, Imm, RD);
+ MCOperand &RS1, MCOperand &Imm, MCOperand &RD,
+ const MCSubtargetInfo &STI) {
+ EmitBinary(OutStreamer, SP::ORri, RS1, Imm, RD, STI);
}
static void EmitADD(MCStreamer &OutStreamer,
- MCOperand &RS1, MCOperand &RS2, MCOperand &RD) {
- EmitBinary(OutStreamer, SP::ADDrr, RS1, RS2, RD);
+ MCOperand &RS1, MCOperand &RS2, MCOperand &RD,
+ const MCSubtargetInfo &STI) {
+ EmitBinary(OutStreamer, SP::ADDrr, RS1, RS2, RD, STI);
}
static void EmitSHL(MCStreamer &OutStreamer,
- MCOperand &RS1, MCOperand &Imm, MCOperand &RD) {
- EmitBinary(OutStreamer, SP::SLLri, RS1, Imm, RD);
+ MCOperand &RS1, MCOperand &Imm, MCOperand &RD,
+ const MCSubtargetInfo &STI) {
+ EmitBinary(OutStreamer, SP::SLLri, RS1, Imm, RD, STI);
}
@@ -154,15 +163,17 @@ static void EmitHiLo(MCStreamer &OutStreamer, MCSymbol *GOTSym,
SparcMCExpr::VariantKind HiKind,
SparcMCExpr::VariantKind LoKind,
MCOperand &RD,
- MCContext &OutContext) {
+ MCContext &OutContext,
+ const MCSubtargetInfo &STI) {
MCOperand hi = createSparcMCOperand(HiKind, GOTSym, OutContext);
MCOperand lo = createSparcMCOperand(LoKind, GOTSym, OutContext);
- EmitSETHI(OutStreamer, hi, RD);
- EmitOR(OutStreamer, RD, lo, RD);
+ EmitSETHI(OutStreamer, hi, RD, STI);
+ EmitOR(OutStreamer, RD, lo, RD, STI);
}
-void SparcAsmPrinter::LowerGETPCXAndEmitMCInsts(const MachineInstr *MI)
+void SparcAsmPrinter::LowerGETPCXAndEmitMCInsts(const MachineInstr *MI,
+ const MCSubtargetInfo &STI)
{
MCSymbol *GOTLabel =
OutContext.GetOrCreateSymbol(Twine("_GLOBAL_OFFSET_TABLE_"));
@@ -182,33 +193,33 @@ void SparcAsmPrinter::LowerGETPCXAndEmitMCInsts(const MachineInstr *MI)
case CodeModel::Small:
EmitHiLo(OutStreamer, GOTLabel,
SparcMCExpr::VK_Sparc_HI, SparcMCExpr::VK_Sparc_LO,
- MCRegOP, OutContext);
+ MCRegOP, OutContext, STI);
break;
case CodeModel::Medium: {
EmitHiLo(OutStreamer, GOTLabel,
SparcMCExpr::VK_Sparc_H44, SparcMCExpr::VK_Sparc_M44,
- MCRegOP, OutContext);
+ MCRegOP, OutContext, STI);
MCOperand imm = MCOperand::CreateExpr(MCConstantExpr::Create(12,
OutContext));
- EmitSHL(OutStreamer, MCRegOP, imm, MCRegOP);
+ EmitSHL(OutStreamer, MCRegOP, imm, MCRegOP, STI);
MCOperand lo = createSparcMCOperand(SparcMCExpr::VK_Sparc_L44,
GOTLabel, OutContext);
- EmitOR(OutStreamer, MCRegOP, lo, MCRegOP);
+ EmitOR(OutStreamer, MCRegOP, lo, MCRegOP, STI);
break;
}
case CodeModel::Large: {
EmitHiLo(OutStreamer, GOTLabel,
SparcMCExpr::VK_Sparc_HH, SparcMCExpr::VK_Sparc_HM,
- MCRegOP, OutContext);
+ MCRegOP, OutContext, STI);
MCOperand imm = MCOperand::CreateExpr(MCConstantExpr::Create(32,
OutContext));
- EmitSHL(OutStreamer, MCRegOP, imm, MCRegOP);
+ EmitSHL(OutStreamer, MCRegOP, imm, MCRegOP, STI);
// Use register %o7 to load the lower 32 bits.
MCOperand RegO7 = MCOperand::CreateReg(SP::O7);
EmitHiLo(OutStreamer, GOTLabel,
SparcMCExpr::VK_Sparc_HI, SparcMCExpr::VK_Sparc_LO,
- RegO7, OutContext);
- EmitADD(OutStreamer, MCRegOP, RegO7, MCRegOP);
+ RegO7, OutContext, STI);
+ EmitADD(OutStreamer, MCRegOP, RegO7, MCRegOP, STI);
}
}
return;
@@ -230,18 +241,18 @@ void SparcAsmPrinter::LowerGETPCXAndEmitMCInsts(const MachineInstr *MI)
OutStreamer.EmitLabel(StartLabel);
MCOperand Callee = createPCXCallOP(EndLabel, OutContext);
- EmitCall(OutStreamer, Callee);
+ EmitCall(OutStreamer, Callee, STI);
OutStreamer.EmitLabel(SethiLabel);
MCOperand hiImm = createPCXRelExprOp(SparcMCExpr::VK_Sparc_PC22,
GOTLabel, StartLabel, SethiLabel,
OutContext);
- EmitSETHI(OutStreamer, hiImm, MCRegOP);
+ EmitSETHI(OutStreamer, hiImm, MCRegOP, STI);
OutStreamer.EmitLabel(EndLabel);
MCOperand loImm = createPCXRelExprOp(SparcMCExpr::VK_Sparc_PC10,
GOTLabel, StartLabel, EndLabel,
OutContext);
- EmitOR(OutStreamer, MCRegOP, loImm, MCRegOP);
- EmitADD(OutStreamer, MCRegOP, RegO7, MCRegOP);
+ EmitOR(OutStreamer, MCRegOP, loImm, MCRegOP, STI);
+ EmitADD(OutStreamer, MCRegOP, RegO7, MCRegOP, STI);
}
void SparcAsmPrinter::EmitInstruction(const MachineInstr *MI)
@@ -253,7 +264,7 @@ void SparcAsmPrinter::EmitInstruction(const MachineInstr *MI)
// FIXME: Debug Value.
return;
case SP::GETPCX:
- LowerGETPCXAndEmitMCInsts(MI);
+ LowerGETPCXAndEmitMCInsts(MI, getSubtargetInfo());
return;
}
MachineBasicBlock::const_instr_iterator I = MI;
@@ -261,7 +272,7 @@ void SparcAsmPrinter::EmitInstruction(const MachineInstr *MI)
do {
MCInst TmpInst;
LowerSparcMachineInstrToMCInst(I, TmpInst, *this);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
} while ((++I != E) && I->isInsideBundle()); // Delay slot check.
}
@@ -285,6 +296,7 @@ void SparcAsmPrinter::EmitFunctionBodyStart() {
void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand (opNum);
SparcMCExpr::VariantKind TF = (SparcMCExpr::VariantKind) MO.getTargetFlags();
@@ -361,7 +373,7 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
O << MO.getSymbolName();
break;
case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
+ O << DL->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
<< MO.getIndex();
break;
default:
diff --git a/contrib/llvm/lib/Target/Sparc/SparcCodeEmitter.cpp b/contrib/llvm/lib/Target/Sparc/SparcCodeEmitter.cpp
index b7b2182..247da2a 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcCodeEmitter.cpp
@@ -12,7 +12,6 @@
//
//===---------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "Sparc.h"
#include "MCTargetDesc/SparcMCExpr.h"
#include "SparcRelocations.h"
@@ -25,6 +24,8 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumEmitted, "Number of machine instructions emitted");
namespace {
@@ -39,7 +40,7 @@ class SparcCodeEmitter : public MachineFunctionPass {
const std::vector<MachineConstantPoolEntry> *MCPEs;
bool IsPIC;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineModuleInfo> ();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -48,13 +49,13 @@ class SparcCodeEmitter : public MachineFunctionPass {
public:
SparcCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(ID), JTI(0), II(0), TD(0),
- TM(tm), MCE(mce), MCPEs(0),
+ : MachineFunctionPass(ID), JTI(nullptr), II(nullptr), TD(nullptr),
+ TM(tm), MCE(mce), MCPEs(nullptr),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Sparc Machine Code Emitter";
}
@@ -76,6 +77,10 @@ private:
unsigned) const;
unsigned getBranchTargetOpValue(const MachineInstr &MI,
unsigned) const;
+ unsigned getBranchPredTargetOpValue(const MachineInstr &MI,
+ unsigned) const;
+ unsigned getBranchOnRegTargetOpValue(const MachineInstr &MI,
+ unsigned) const;
void emitWord(unsigned Word);
@@ -141,7 +146,8 @@ void SparcCodeEmitter::emitInstruction(MachineBasicBlock::instr_iterator MI,
}
break;
}
- case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::CFI_INSTRUCTION:
+ break;
case TargetOpcode::EH_LABEL: {
MCE.emitLabel(MI->getOperand(0).getMCSymbol());
break;
@@ -198,6 +204,18 @@ unsigned SparcCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
return getMachineOpValue(MI, MO);
}
+unsigned SparcCodeEmitter::getBranchPredTargetOpValue(const MachineInstr &MI,
+ unsigned opIdx) const {
+ const MachineOperand MO = MI.getOperand(opIdx);
+ return getMachineOpValue(MI, MO);
+}
+
+unsigned SparcCodeEmitter::getBranchOnRegTargetOpValue(const MachineInstr &MI,
+ unsigned opIdx) const {
+ const MachineOperand MO = MI.getOperand(opIdx);
+ return getMachineOpValue(MI, MO);
+}
+
unsigned SparcCodeEmitter::getRelocation(const MachineInstr &MI,
const MachineOperand &MO) const {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
index c75998a..3cdfda3 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -14,6 +14,7 @@
#include "SparcFrameLowering.h"
#include "SparcInstrInfo.h"
#include "SparcMachineFunctionInfo.h"
+#include "SparcSubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -32,6 +33,9 @@ DisableLeafProc("disable-sparc-leaf-proc",
cl::desc("Disable Sparc leaf procedure optimization."),
cl::Hidden);
+SparcFrameLowering::SparcFrameLowering(const SparcSubtarget &ST)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
+ ST.is64Bit() ? 16 : 8, 0, ST.is64Bit() ? 16 : 8) {}
void SparcFrameLowering::emitSPAdjustment(MachineFunction &MF,
MachineBasicBlock &MBB,
@@ -99,28 +103,33 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
SAVEri = SP::ADDri;
SAVErr = SP::ADDrr;
}
- NumBytes = - SubTarget.getAdjustedFrameSize(NumBytes);
+ NumBytes =
+ -MF.getTarget().getSubtarget<SparcSubtarget>().getAdjustedFrameSize(
+ NumBytes);
emitSPAdjustment(MF, MBB, MBBI, NumBytes, SAVErr, SAVEri);
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
- MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(SP::PROLOG_LABEL)).addSym(FrameLabel);
-
unsigned regFP = MRI->getDwarfRegNum(SP::I6, true);
// Emit ".cfi_def_cfa_register 30".
- MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(FrameLabel,
- regFP));
+ unsigned CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, regFP));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
// Emit ".cfi_window_save".
- MMI.addFrameInst(MCCFIInstruction::createWindowSave(FrameLabel));
+ CFIIndex = MMI.addFrameInst(MCCFIInstruction::createWindowSave(nullptr));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
unsigned regInRA = MRI->getDwarfRegNum(SP::I7, true);
unsigned regOutRA = MRI->getDwarfRegNum(SP::O7, true);
// Emit ".cfi_register 15, 31".
- MMI.addFrameInst(MCCFIInstruction::createRegister(FrameLabel,
- regOutRA,
- regInRA));
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createRegister(nullptr, regOutRA, regInRA));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
void SparcFrameLowering::
@@ -159,7 +168,8 @@ void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
if (NumBytes == 0)
return;
- NumBytes = SubTarget.getAdjustedFrameSize(NumBytes);
+ NumBytes = MF.getTarget().getSubtarget<SparcSubtarget>().getAdjustedFrameSize(
+ NumBytes);
emitSPAdjustment(MF, MBB, MBBI, NumBytes, SP::ADDrr, SP::ADDri);
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
index 072fde3..a7d1b89 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
@@ -15,33 +15,29 @@
#define SPARC_FRAMEINFO_H
#include "Sparc.h"
-#include "SparcSubtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
- class SparcSubtarget;
+class SparcSubtarget;
class SparcFrameLowering : public TargetFrameLowering {
- const SparcSubtarget &SubTarget;
public:
- explicit SparcFrameLowering(const SparcSubtarget &ST)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
- ST.is64Bit() ? 16 : 8, 0, ST.is64Bit() ? 16 : 8),
- SubTarget(ST) {}
+ explicit SparcFrameLowering(const SparcSubtarget &ST);
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ void
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ bool hasFP(const MachineFunction &MF) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
private:
// Remap input registers to output registers for leaf procedure.
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index b012bfd..2fade27 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -41,7 +41,7 @@ public:
TM(tm) {
}
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
// Complex Pattern Selectors.
bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2);
@@ -49,11 +49,11 @@ public:
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "SPARC DAG->DAG Pattern Instruction Selection";
}
@@ -143,7 +143,7 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
switch (N->getOpcode()) {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index abe2de6..990f52a 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -53,7 +53,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State)
{
- static const uint16_t RegList[] = {
+ static const MCPhysReg RegList[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
// Try to get first reg.
@@ -235,8 +235,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
}
// Lower return values for the 64-bit ABI.
@@ -272,6 +271,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
// Integer return values must be sign or zero extended by the callee.
switch (VA.getLocInfo()) {
+ case CCValAssign::Full: break;
case CCValAssign::SExt:
OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
break;
@@ -280,8 +280,9 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
break;
case CCValAssign::AExt:
OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
- default:
break;
+ default:
+ llvm_unreachable("Unknown loc info!");
}
// The custom bit on an i32 return value indicates that it should be passed
@@ -313,8 +314,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
}
SDValue SparcTargetLowering::
@@ -355,10 +355,13 @@ LowerFormalArguments_32(SDValue Chain,
const unsigned StackOffset = 92;
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ unsigned InIdx = 0;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
CCValAssign &VA = ArgLocs[i];
- if (i == 0 && Ins[i].Flags.isSRet()) {
+ if (Ins[InIdx].Flags.isSRet()) {
+ if (InIdx != 0)
+ report_fatal_error("sparc only supports sret on the first parameter");
// Get SRet from [%fp+64].
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
@@ -491,11 +494,11 @@ LowerFormalArguments_32(SDValue Chain,
// Store remaining ArgRegs to the stack if this is a varargs function.
if (isVarArg) {
- static const uint16_t ArgRegs[] = {
+ static const MCPhysReg ArgRegs[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6);
- const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
+ const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
unsigned ArgOffset = CCInfo.getNextStackOffset();
if (NumAllocated == 6)
ArgOffset += StackOffset;
@@ -526,8 +529,7 @@ LowerFormalArguments_32(SDValue Chain,
if (!OutChains.empty()) {
OutChains.push_back(Chain);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
}
@@ -642,8 +644,7 @@ LowerFormalArguments_64(SDValue Chain,
}
if (!OutChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &OutChains[0], OutChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
return Chain;
}
@@ -661,7 +662,7 @@ static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
if (CS)
return CS->hasFnAttr(Attribute::ReturnsTwice);
- const Function *CalleeFn = 0;
+ const Function *CalleeFn = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
@@ -875,8 +876,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Emit all stores, make sure the occur before any copies into physregs.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
@@ -925,7 +925,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
if (InFlag.getNode())
Ops.push_back(InFlag);
- Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
@@ -959,9 +959,9 @@ static bool isFP128ABICall(const char *CalleeName)
"_Q_sqrt", "_Q_neg",
"_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
"_Q_lltoq", "_Q_ulltoq",
- 0
+ nullptr
};
- for (const char * const *I = ABICalls; *I != 0; ++I)
+ for (const char * const *I = ABICalls; *I != nullptr; ++I)
if (strcmp(CalleeName, *I) == 0)
return true;
return false;
@@ -970,7 +970,7 @@ static bool isFP128ABICall(const char *CalleeName)
unsigned
SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
{
- const Function *CalleeFn = 0;
+ const Function *CalleeFn = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
@@ -1192,8 +1192,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Emit all stores, make sure they occur before the call.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// Build a sequence of CopyToReg nodes glued together with token chain and
// glue operands which copy the outgoing args into registers. The InGlue is
@@ -1243,7 +1242,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Now the call itself.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
InGlue = Chain.getValue(1);
// Revert the stack pointer immediately after the call.
@@ -1261,7 +1260,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Set inreg flag manually for codegen generated library calls that
// return float.
- if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == 0)
+ if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr)
CLI.Ins[0].Flags.setInReg();
RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
@@ -1675,7 +1674,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case SPISD::CMPICC: return "SPISD::CMPICC";
case SPISD::CMPFCC: return "SPISD::CMPFCC";
case SPISD::BRICC: return "SPISD::BRICC";
@@ -1709,7 +1708,7 @@ EVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
/// be zero. Op is expected to be a target specific node. Used by DAG
/// combiner.
-void SparcTargetLowering::computeMaskedBitsForTargetNode
+void SparcTargetLowering::computeKnownBitsForTargetNode
(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
@@ -1723,10 +1722,8 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode
case SPISD::SELECT_ICC:
case SPISD::SELECT_XCC:
case SPISD::SELECT_FCC:
- DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
@@ -1912,7 +1909,7 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
Ops.push_back(InFlag);
- Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true),
DAG.getIntPtrConstant(0, true), InFlag, DL);
@@ -2031,13 +2028,10 @@ SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
for (unsigned i = 0, e = numArgs; i != e; ++i) {
Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
}
- TargetLowering::
- CallLoweringInfo CLI(Chain,
- RetTyABI,
- false, false, false, false,
- 0, CallingConv::C,
- false, false, true,
- Callee, Args, DAG, SDLoc(Op));
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
+ .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0);
+
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
// chain is in second result.
@@ -2063,7 +2057,7 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
SDLoc DL,
SelectionDAG &DAG) const {
- const char *LibCall = 0;
+ const char *LibCall = nullptr;
bool is64Bit = Subtarget->is64Bit();
switch(SPCC) {
default: llvm_unreachable("Unhandled conditional code!");
@@ -2090,13 +2084,9 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
- TargetLowering::
- CallLoweringInfo CLI(Chain,
- RetTy,
- false, false, false, false,
- 0, CallingConv::C,
- false, false, true,
- Callee, Args, DAG, DL);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL).setChain(Chain)
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
@@ -2172,7 +2162,7 @@ LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
llvm_unreachable("fpextend with non-float operand!");
- return SDValue(0, 0);
+ return SDValue();
}
static SDValue
@@ -2190,7 +2180,7 @@ LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
llvm_unreachable("fpround to non-float!");
- return SDValue(0, 0);
+ return SDValue();
}
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
@@ -2211,7 +2201,7 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
// Expand if the resulting type is illegal.
if (!TLI.isTypeLegal(VT))
- return SDValue(0, 0);
+ return SDValue();
// Otherwise, Convert the fp value to integer in an FP register.
if (VT == MVT::i32)
@@ -2242,7 +2232,7 @@ static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
// Expand if the operand type is illegal.
if (!TLI.isTypeLegal(OpVT))
- return SDValue(0, 0);
+ return SDValue();
// Otherwise, Convert the int value to FP in an FP register.
SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
@@ -2260,7 +2250,7 @@ static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
// quad floating point instructions and the resulting type is legal.
if (Op.getOperand(0).getValueType() != MVT::f128 ||
(hasHardQuad && TLI.isTypeLegal(VT)))
- return SDValue(0, 0);
+ return SDValue();
assert(VT == MVT::i32 || VT == MVT::i64);
@@ -2281,7 +2271,7 @@ static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
// Expand if it does not involve f128 or the target has support for
// quad floating point instructions and the operand type is legal.
if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
- return SDValue(0, 0);
+ return SDValue();
return TLI.LowerF128Op(Op, DAG,
TLI.getLibcallName(OpVT == MVT::i32
@@ -2426,7 +2416,7 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
DAG.getConstant(regSpillArea, VT));
SDValue Ops[2] = { NewVal, Chain };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
@@ -2492,6 +2482,9 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
EVT VT = Op.getValueType();
SDLoc dl(Op);
uint64_t depth = Op.getConstantOperandVal(0);
@@ -2592,10 +2585,9 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
SubRegOdd);
SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
SDValue(Lo64.getNode(), 1) };
- SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], 2);
+ SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
// Lower a f128 store into two f64 stores.
@@ -2639,12 +2631,12 @@ static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
LoPtr,
MachinePointerInfo(),
false, false, alignment);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], 2);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
- assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) && "invalid");
+ assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
+ && "invalid opcode");
if (Op.getValueType() == MVT::f64)
return LowerF64Op(Op, DAG, Op.getOpcode());
@@ -2720,7 +2712,7 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
SDValue Ops[2] = { Dst, Carry };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
@@ -2767,7 +2759,7 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
DAG.DeleteNode(MulResult.getNode());
SDValue Ops[2] = { BottomHalf, TopHalf } ;
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
@@ -2934,7 +2926,7 @@ SparcTargetLowering::expandSelectCC(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
+ std::next(MachineBasicBlock::iterator(MI)),
BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
@@ -3086,7 +3078,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
// Look at the constraint type.
@@ -3111,7 +3103,7 @@ LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
- SDValue Result(0, 0);
+ SDValue Result(nullptr, 0);
// Only support length 1 constraints for now.
if (Constraint.length() > 1)
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
index f7b45d0..a24cc82 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -55,47 +55,47 @@ namespace llvm {
const SparcSubtarget *Subtarget;
public:
SparcTargetLowering(TargetMachine &TM);
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
- virtual MachineBasicBlock *
+ MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *MBB) const override;
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType getConstraintType(const std::string &Constraint) const override;
ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo &info,
- const char *constraint) const;
+ const char *constraint) const override;
void LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ SelectionDAG &DAG) const override;
std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const override;
- virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
/// getSetCCResultType - Return the ISD::SETCC ValueType
- virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerFormalArguments_32(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
@@ -109,20 +109,20 @@ namespace llvm {
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
+ SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
+ SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const override;
SDValue LowerReturn_32(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
@@ -156,15 +156,15 @@ namespace llvm {
SDLoc DL,
SelectionDAG &DAG) const;
- bool ShouldShrinkFPConstant(EVT VT) const {
+ bool ShouldShrinkFPConstant(EVT VT) const override {
// Do not shrink FP constpool if VT == MVT::f128.
// (ldd, call _Q_fdtoq) is more expensive than two ldds.
return VT != MVT::f128;
}
- virtual void ReplaceNodeResults(SDNode *N,
+ void ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>& Results,
- SelectionDAG &DAG) const;
+ SelectionDAG &DAG) const override;
MachineBasicBlock *expandSelectCC(MachineInstr *MI, MachineBasicBlock *BB,
unsigned BROpcode) const;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td b/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td
index a5b48f9..54d8240 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td
@@ -235,7 +235,8 @@ def UDIVXri : F3_2<2, 0b001101,
let Predicates = [Is64Bit] in {
// 64-bit loads.
-defm LDX : Load<"ldx", 0b001011, load, I64Regs, i64>;
+let DecoderMethod = "DecodeLoadInt" in
+ defm LDX : Load<"ldx", 0b001011, load, I64Regs, i64>;
let mayLoad = 1, isCodeGenOnly = 1, isAsmParserOnly = 1 in
def TLS_LDXrr : F3_1<3, 0b001011,
@@ -270,10 +271,12 @@ def : Pat<(i64 (extloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
def : Pat<(i64 (extloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
// Sign-extending load of i32 into i64 is a new SPARC v9 instruction.
-defm LDSW : Load<"ldsw", 0b001000, sextloadi32, I64Regs, i64>;
+let DecoderMethod = "DecodeLoadInt" in
+ defm LDSW : Load<"ldsw", 0b001000, sextloadi32, I64Regs, i64>;
// 64-bit stores.
-defm STX : Store<"stx", 0b001110, store, I64Regs, i64>;
+let DecoderMethod = "DecodeStoreInt" in
+ defm STX : Store<"stx", 0b001110, store, I64Regs, i64>;
// Truncating stores from i64 are identical to the i32 stores.
def : Pat<(truncstorei8 i64:$src, ADDRrr:$addr), (STBrr ADDRrr:$addr, $src)>;
@@ -294,14 +297,6 @@ def : Pat<(store (i64 0), ADDRri:$dst), (STXri ADDRri:$dst, (i64 G0))>;
// 64-bit Conditionals.
//===----------------------------------------------------------------------===//
-// Conditional branch class on %xcc:
-class XBranchSP<dag ins, string asmstr, list<dag> pattern>
- : F2_3<0b001, 0b10, (outs), ins, asmstr, pattern> {
- let isBranch = 1;
- let isTerminator = 1;
- let hasDelaySlot = 1;
-}
-
//
// Flag-setting instructions like subcc and addcc set both icc and xcc flags.
// The icc flags correspond to the 32-bit result, and the xcc are for the
@@ -312,14 +307,12 @@ class XBranchSP<dag ins, string asmstr, list<dag> pattern>
let Predicates = [Is64Bit] in {
-let Uses = [ICC] in
-def BPXCC : XBranchSP<(ins brtarget:$imm19, CCOp:$cond),
- "b$cond %xcc, $imm19",
- [(SPbrxcc bb:$imm19, imm:$cond)]>;
+let Uses = [ICC], cc = 0b10 in
+ defm BPX : IPredBranch<"%xcc", [(SPbrxcc bb:$imm19, imm:$cond)]>;
// Conditional moves on %xcc.
let Uses = [ICC], Constraints = "$f = $rd" in {
-let cc = 0b110 in {
+let intcc = 1, cc = 0b10 in {
def MOVXCCrr : F4_1<0b101100, (outs IntRegs:$rd),
(ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
"mov$cond %xcc, $rs2, $rd",
@@ -332,7 +325,7 @@ def MOVXCCri : F4_2<0b101100, (outs IntRegs:$rd),
(SPselectxcc simm11:$simm11, i32:$f, imm:$cond))]>;
} // cc
-let opf_cc = 0b110 in {
+let intcc = 1, opf_cc = 0b10 in {
def FMOVS_XCC : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
(ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
"fmovs$cond %xcc, $rs2, $rd",
@@ -351,6 +344,84 @@ def FMOVQ_XCC : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
} // opf_cc
} // Uses, Constraints
+// Branch On integer register with Prediction (BPr).
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in
+multiclass BranchOnReg<bits<3> cond, string OpcStr> {
+ def napt : F2_4<cond, 0, 1, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, " $rs1, $imm16"), []>;
+ def apt : F2_4<cond, 1, 1, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, ",a $rs1, $imm16"), []>;
+ def napn : F2_4<cond, 0, 0, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, ",pn $rs1, $imm16"), []>;
+ def apn : F2_4<cond, 1, 0, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, ",a,pn $rs1, $imm16"), []>;
+}
+
+multiclass bpr_alias<string OpcStr, Instruction NAPT, Instruction APT> {
+ def : InstAlias<!strconcat(OpcStr, ",pt $rs1, $imm16"),
+ (NAPT I64Regs:$rs1, bprtarget16:$imm16), 0>;
+ def : InstAlias<!strconcat(OpcStr, ",a,pt $rs1, $imm16"),
+ (APT I64Regs:$rs1, bprtarget16:$imm16), 0>;
+}
+
+defm BPZ : BranchOnReg<0b001, "brz">;
+defm BPLEZ : BranchOnReg<0b010, "brlez">;
+defm BPLZ : BranchOnReg<0b011, "brlz">;
+defm BPNZ : BranchOnReg<0b101, "brnz">;
+defm BPGZ : BranchOnReg<0b110, "brgz">;
+defm BPGEZ : BranchOnReg<0b111, "brgez">;
+
+defm : bpr_alias<"brz", BPZnapt, BPZapt >;
+defm : bpr_alias<"brlez", BPLEZnapt, BPLEZapt>;
+defm : bpr_alias<"brlz", BPLZnapt, BPLZapt >;
+defm : bpr_alias<"brnz", BPNZnapt, BPNZapt >;
+defm : bpr_alias<"brgz", BPGZnapt, BPGZapt >;
+defm : bpr_alias<"brgez", BPGEZnapt, BPGEZapt>;
+
+// Move integer register on register condition (MOVr).
+multiclass MOVR< bits<3> rcond, string OpcStr> {
+ def rr : F4_4r<0b101111, 0b00000, rcond, (outs I64Regs:$rd),
+ (ins I64Regs:$rs1, IntRegs:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+
+ def ri : F4_4i<0b101111, rcond, (outs I64Regs:$rd),
+ (ins I64Regs:$rs1, i64imm:$simm10),
+ !strconcat(OpcStr, " $rs1, $simm10, $rd"), []>;
+}
+
+defm MOVRRZ : MOVR<0b001, "movrz">;
+defm MOVRLEZ : MOVR<0b010, "movrlez">;
+defm MOVRLZ : MOVR<0b011, "movrlz">;
+defm MOVRNZ : MOVR<0b101, "movrnz">;
+defm MOVRGZ : MOVR<0b110, "movrgz">;
+defm MOVRGEZ : MOVR<0b111, "movrgez">;
+
+// Move FP register on integer register condition (FMOVr).
+multiclass FMOVR<bits<3> rcond, string OpcStr> {
+
+ def S : F4_4r<0b110101, 0b00101, rcond,
+ (outs FPRegs:$rd), (ins I64Regs:$rs1, FPRegs:$rs2),
+ !strconcat(!strconcat("fmovrs", OpcStr)," $rs1, $rs2, $rd"),
+ []>;
+ def D : F4_4r<0b110101, 0b00110, rcond,
+ (outs FPRegs:$rd), (ins I64Regs:$rs1, FPRegs:$rs2),
+ !strconcat(!strconcat("fmovrd", OpcStr)," $rs1, $rs2, $rd"),
+ []>;
+ def Q : F4_4r<0b110101, 0b00111, rcond,
+ (outs FPRegs:$rd), (ins I64Regs:$rs1, FPRegs:$rs2),
+ !strconcat(!strconcat("fmovrq", OpcStr)," $rs1, $rs2, $rd"),
+ []>, Requires<[HasHardQuad]>;
+}
+
+let Predicates = [HasV9] in {
+ defm FMOVRZ : FMOVR<0b001, "z">;
+ defm FMOVRLEZ : FMOVR<0b010, "lez">;
+ defm FMOVRLZ : FMOVR<0b011, "lz">;
+ defm FMOVRNZ : FMOVR<0b101, "nz">;
+ defm FMOVRGZ : FMOVR<0b110, "gz">;
+ defm FMOVRGEZ : FMOVR<0b111, "gez">;
+}
+
//===----------------------------------------------------------------------===//
// 64-bit Floating Point Conversions.
//===----------------------------------------------------------------------===//
@@ -471,6 +542,9 @@ def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd),
[(set i64:$rd,
(atomic_swap_64 iPTR:$addr, i64:$rs2))]>;
+let Predicates = [Is64Bit], hasSideEffects = 1, Uses = [ICC], cc = 0b10 in
+ defm TXCC : TRAP<"%xcc">;
+
// Global addresses, constant pool entries
let Predicates = [Is64Bit] in {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td b/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td
index 7242c59..d36f67b 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td
@@ -13,31 +13,52 @@
// Instruction aliases for conditional moves.
// mov<cond> <ccreg> rs2, rd
-multiclass cond_mov_alias<string cond, int condVal, string ccreg,
+multiclass intcond_mov_alias<string cond, int condVal, string ccreg,
Instruction movrr, Instruction movri,
Instruction fmovs, Instruction fmovd> {
- // mov<cond> (%icc|%xcc|%fcc0), rs2, rd
+ // mov<cond> (%icc|%xcc), rs2, rd
def : InstAlias<!strconcat(!strconcat(!strconcat("mov", cond), ccreg),
", $rs2, $rd"),
(movrr IntRegs:$rd, IntRegs:$rs2, condVal)>;
- // mov<cond> (%icc|%xcc|%fcc0), simm11, rd
+ // mov<cond> (%icc|%xcc), simm11, rd
def : InstAlias<!strconcat(!strconcat(!strconcat("mov", cond), ccreg),
", $simm11, $rd"),
(movri IntRegs:$rd, i32imm:$simm11, condVal)>;
- // fmovs<cond> (%icc|%xcc|%fcc0), $rs2, $rd
+ // fmovs<cond> (%icc|%xcc), $rs2, $rd
def : InstAlias<!strconcat(!strconcat(!strconcat("fmovs", cond), ccreg),
", $rs2, $rd"),
(fmovs FPRegs:$rd, FPRegs:$rs2, condVal)>;
- // fmovd<cond> (%icc|%xcc|%fcc0), $rs2, $rd
+ // fmovd<cond> (%icc|%xcc), $rs2, $rd
def : InstAlias<!strconcat(!strconcat(!strconcat("fmovd", cond), ccreg),
", $rs2, $rd"),
(fmovd DFPRegs:$rd, DFPRegs:$rs2, condVal)>;
}
+// mov<cond> <ccreg> rs2, rd
+multiclass fpcond_mov_alias<string cond, int condVal,
+ Instruction movrr, Instruction movri,
+ Instruction fmovs, Instruction fmovd> {
+
+ // mov<cond> %fcc[0-3], rs2, rd
+ def : InstAlias<!strconcat(!strconcat("mov", cond), " $cc, $rs2, $rd"),
+ (movrr IntRegs:$rd, FCCRegs:$cc, IntRegs:$rs2, condVal)>;
+
+ // mov<cond> %fcc[0-3], simm11, rd
+ def : InstAlias<!strconcat(!strconcat("mov", cond), " $cc, $simm11, $rd"),
+ (movri IntRegs:$rd, FCCRegs:$cc, i32imm:$simm11, condVal)>;
+
+ // fmovs<cond> %fcc[0-3], $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat("fmovs", cond), " $cc, $rs2, $rd"),
+ (fmovs FPRegs:$rd, FCCRegs:$cc, FPRegs:$rs2, condVal)>;
+
+ // fmovd<cond> %fcc[0-3], $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat("fmovd", cond), " $cc, $rs2, $rd"),
+ (fmovd DFPRegs:$rd, FCCRegs:$cc, DFPRegs:$rs2, condVal)>;
+}
// Instruction aliases for integer conditional branches and moves.
multiclass int_cond_alias<string cond, int condVal> {
@@ -46,15 +67,64 @@ multiclass int_cond_alias<string cond, int condVal> {
def : InstAlias<!strconcat(!strconcat("b", cond), " $imm"),
(BCOND brtarget:$imm, condVal)>;
+ // b<cond>,a $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a $imm"),
+ (BCONDA brtarget:$imm, condVal)>;
+
+ // b<cond> %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), " %icc, $imm"),
+ (BPICC brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,pt %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pt %icc, $imm"),
+ (BPICC brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,a %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a %icc, $imm"),
+ (BPICCA brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,a,pt %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pt %icc, $imm"),
+ (BPICCA brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,pn %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pn %icc, $imm"),
+ (BPICCNT brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,a,pn %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pn %icc, $imm"),
+ (BPICCANT brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
// b<cond> %xcc, $imm
def : InstAlias<!strconcat(!strconcat("b", cond), " %xcc, $imm"),
(BPXCC brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
- defm : cond_mov_alias<cond, condVal, " %icc",
+ // b<cond>,pt %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pt %xcc, $imm"),
+ (BPXCC brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,a %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a %xcc, $imm"),
+ (BPXCCA brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,a,pt %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pt %xcc, $imm"),
+ (BPXCCA brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,pn %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pn %xcc, $imm"),
+ (BPXCCNT brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,a,pn %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pn %xcc, $imm"),
+ (BPXCCANT brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+
+ defm : intcond_mov_alias<cond, condVal, " %icc",
MOVICCrr, MOVICCri,
FMOVS_ICC, FMOVD_ICC>, Requires<[HasV9]>;
- defm : cond_mov_alias<cond, condVal, " %xcc",
+ defm : intcond_mov_alias<cond, condVal, " %xcc",
MOVXCCrr, MOVXCCri,
FMOVS_XCC, FMOVD_XCC>, Requires<[Is64Bit]>;
@@ -66,6 +136,59 @@ multiclass int_cond_alias<string cond, int condVal> {
(FMOVQ_XCC QFPRegs:$rd, QFPRegs:$rs2, condVal)>,
Requires<[Is64Bit, HasHardQuad]>;
+ // t<cond> %icc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $rs2"),
+ (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> %icc, rs => t<cond> %icc, G0 + rs
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs2"),
+ (TICCrr G0, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> %xcc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $rs2"),
+ (TXCCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> %xcc, rs => t<cond> %xcc, G0 + rs
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs2"),
+ (TXCCrr G0, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> rs1 + rs2 => t<cond> %icc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $rs2"),
+ (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>;
+
+ // t<cond> rs=> t<cond> %icc, G0 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs2"),
+ (TICCrr G0, IntRegs:$rs2, condVal)>;
+
+ // t<cond> %icc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $imm"),
+ (TICCri IntRegs:$rs1, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %icc, imm => t<cond> %icc, G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $imm"),
+ (TICCri G0, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %xcc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $imm"),
+ (TXCCri IntRegs:$rs1, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %xcc, imm => t<cond> %xcc, G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $imm"),
+ (TXCCri G0, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> rs1 + imm => t<cond> %icc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $imm"),
+ (TICCri IntRegs:$rs1, i32imm:$imm, condVal)>;
+
+ // t<cond> imm => t<cond> %icc, G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $imm"),
+ (TICCri G0, i32imm:$imm, condVal)>;
+
}
@@ -76,13 +199,48 @@ multiclass fp_cond_alias<string cond, int condVal> {
def : InstAlias<!strconcat(!strconcat("fb", cond), " $imm"),
(FBCOND brtarget:$imm, condVal), 0>;
- defm : cond_mov_alias<cond, condVal, " %fcc0",
- MOVFCCrr, MOVFCCri,
- FMOVS_FCC, FMOVD_FCC>, Requires<[HasV9]>;
+ // fb<cond>,a $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a $imm"),
+ (FBCONDA brtarget:$imm, condVal), 0>;
+
+ // fb<cond> %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), " $cc, $imm"),
+ (BPFCC brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,pt %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",pt $cc, $imm"),
+ (BPFCC brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,a %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a $cc, $imm"),
+ (BPFCCA brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,a,pt %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a,pt $cc, $imm"),
+ (BPFCCA brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,pn %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",pn $cc, $imm"),
+ (BPFCCNT brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,a,pn %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a,pn $cc, $imm"),
+ (BPFCCANT brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ defm : fpcond_mov_alias<cond, condVal,
+ V9MOVFCCrr, V9MOVFCCri,
+ V9FMOVS_FCC, V9FMOVD_FCC>, Requires<[HasV9]>;
// fmovq<cond> %fcc0, $rs2, $rd
- def : InstAlias<!strconcat(!strconcat("fmovq", cond), " %fcc0, $rs2, $rd"),
- (FMOVQ_ICC QFPRegs:$rd, QFPRegs:$rs2, condVal)>,
+ def : InstAlias<!strconcat(!strconcat("fmovq", cond), " $cc, $rs2, $rd"),
+ (V9FMOVQ_FCC QFPRegs:$rd, FCCRegs:$cc, QFPRegs:$rs2,
+ condVal)>,
Requires<[HasV9, HasHardQuad]>;
}
@@ -103,6 +261,8 @@ defm : int_cond_alias<"neg", 0b0110>;
defm : int_cond_alias<"vc", 0b1111>;
defm : int_cond_alias<"vs", 0b0111>;
+defm : fp_cond_alias<"a", 0b0000>;
+defm : fp_cond_alias<"n", 0b1000>;
defm : fp_cond_alias<"u", 0b0111>;
defm : fp_cond_alias<"g", 0b0110>;
defm : fp_cond_alias<"ug", 0b0101>;
@@ -118,16 +278,15 @@ defm : fp_cond_alias<"le", 0b1101>;
defm : fp_cond_alias<"ule", 0b1110>;
defm : fp_cond_alias<"o", 0b1111>;
-
// Instruction aliases for JMPL.
// jmp addr -> jmpl addr, %g0
-def : InstAlias<"jmp $addr", (JMPLrr G0, MEMrr:$addr)>;
-def : InstAlias<"jmp $addr", (JMPLri G0, MEMri:$addr)>;
+def : InstAlias<"jmp $addr", (JMPLrr G0, MEMrr:$addr), 0>;
+def : InstAlias<"jmp $addr", (JMPLri G0, MEMri:$addr), 0>;
// call addr -> jmpl addr, %o7
-def : InstAlias<"call $addr", (JMPLrr O7, MEMrr:$addr)>;
-def : InstAlias<"call $addr", (JMPLri O7, MEMri:$addr)>;
+def : InstAlias<"call $addr", (JMPLrr O7, MEMrr:$addr), 0>;
+def : InstAlias<"call $addr", (JMPLri O7, MEMri:$addr), 0>;
// retl -> RETL 8
def : InstAlias<"retl", (RETL 8)>;
@@ -140,3 +299,27 @@ def : InstAlias<"mov $rs2, $rd", (ORrr IntRegs:$rd, G0, IntRegs:$rs2)>;
// mov simm13, rd -> or %g0, simm13, rd
def : InstAlias<"mov $simm13, $rd", (ORri IntRegs:$rd, G0, i32imm:$simm13)>;
+
+// restore -> restore %g0, %g0, %g0
+def : InstAlias<"restore", (RESTORErr G0, G0, G0)>;
+
+def : MnemonicAlias<"return", "rett">, Requires<[HasV9]>;
+
+def : MnemonicAlias<"addc", "addx">, Requires<[HasV9]>;
+def : MnemonicAlias<"addccc", "addxcc">, Requires<[HasV9]>;
+
+def : MnemonicAlias<"subc", "subx">, Requires<[HasV9]>;
+def : MnemonicAlias<"subccc", "subxcc">, Requires<[HasV9]>;
+
+
+def : InstAlias<"fcmps $rs1, $rs2", (V9FCMPS FCC0, FPRegs:$rs1, FPRegs:$rs2)>;
+def : InstAlias<"fcmpd $rs1, $rs2", (V9FCMPD FCC0, DFPRegs:$rs1, DFPRegs:$rs2)>;
+def : InstAlias<"fcmpq $rs1, $rs2", (V9FCMPQ FCC0, QFPRegs:$rs1, QFPRegs:$rs2)>,
+ Requires<[HasHardQuad]>;
+
+def : InstAlias<"fcmpes $rs1, $rs2", (V9FCMPES FCC0, FPRegs:$rs1, FPRegs:$rs2)>;
+def : InstAlias<"fcmped $rs1, $rs2", (V9FCMPED FCC0, DFPRegs:$rs1,
+ DFPRegs:$rs2)>;
+def : InstAlias<"fcmpeq $rs1, $rs2", (V9FCMPEQ FCC0, QFPRegs:$rs1,
+ QFPRegs:$rs2)>,
+ Requires<[HasHardQuad]>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td b/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
index b38a663..3b5e238 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
@@ -51,38 +51,51 @@ class F2_1<bits<3> op2Val, dag outs, dag ins, string asmstr, list<dag> pattern>
let Inst{29-25} = rd;
}
-class F2_2<bits<3> op2Val, dag outs, dag ins, string asmstr,
+class F2_2<bits<3> op2Val, bit annul, dag outs, dag ins, string asmstr,
list<dag> pattern> : F2<outs, ins, asmstr, pattern> {
bits<4> cond;
- bit annul = 0; // currently unused
-
let op2 = op2Val;
let Inst{29} = annul;
let Inst{28-25} = cond;
}
-class F2_3<bits<3> op2Val, bits<2> ccVal, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : InstSP<outs, ins, asmstr, pattern> {
- bit annul;
+class F2_3<bits<3> op2Val, bit annul, bit pred,
+ dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSP<outs, ins, asmstr, pattern> {
+ bits<2> cc;
bits<4> cond;
- bit pred;
bits<19> imm19;
let op = 0; // op = 0
- bit annul = 0; // currently unused
- let pred = 1; // default is predict taken
-
let Inst{29} = annul;
let Inst{28-25} = cond;
let Inst{24-22} = op2Val;
- let Inst{21-20} = ccVal;
+ let Inst{21-20} = cc;
let Inst{19} = pred;
let Inst{18-0} = imm19;
}
+class F2_4<bits<3> cond, bit annul, bit pred,
+ dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSP<outs, ins, asmstr, pattern> {
+ bits<16> imm16;
+ bits<5> rs1;
+
+ let op = 0; // op = 0
+
+ let Inst{29} = annul;
+ let Inst{28} = 0;
+ let Inst{27-25} = cond;
+ let Inst{24-22} = 0b011;
+ let Inst{21-20} = imm16{15-14};
+ let Inst{19} = pred;
+ let Inst{18-14} = rs1;
+ let Inst{13-0} = imm16{13-0};
+}
+
+
//===----------------------------------------------------------------------===//
// Format #3 instruction classes in the Sparc
//===----------------------------------------------------------------------===//
@@ -159,7 +172,6 @@ class F3_3c<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
let op = opVal;
let op3 = op3val;
- let rd = 0;
let Inst{13-5} = opfval; // fp opcode
let Inst{4-0} = rs2;
@@ -218,44 +230,101 @@ class F4_1<bits<6> op3, dag outs, dag ins,
string asmstr, list<dag> pattern>
: F4<op3, outs, ins, asmstr, pattern> {
- bits<3> cc;
+ bit intcc;
+ bits<2> cc;
bits<4> cond;
bits<5> rs2;
let Inst{4-0} = rs2;
- let Inst{11} = cc{0};
- let Inst{12} = cc{1};
+ let Inst{12-11} = cc;
let Inst{13} = 0;
let Inst{17-14} = cond;
- let Inst{18} = cc{2};
+ let Inst{18} = intcc;
}
class F4_2<bits<6> op3, dag outs, dag ins,
string asmstr, list<dag> pattern>
: F4<op3, outs, ins, asmstr, pattern> {
- bits<3> cc;
+ bit intcc;
+ bits<2> cc;
bits<4> cond;
bits<11> simm11;
let Inst{10-0} = simm11;
- let Inst{11} = cc{0};
- let Inst{12} = cc{1};
+ let Inst{12-11} = cc;
let Inst{13} = 1;
let Inst{17-14} = cond;
- let Inst{18} = cc{2};
+ let Inst{18} = intcc;
}
class F4_3<bits<6> op3, bits<6> opf_low, dag outs, dag ins,
string asmstr, list<dag> pattern>
: F4<op3, outs, ins, asmstr, pattern> {
bits<4> cond;
- bits<3> opf_cc;
+ bit intcc;
+ bits<2> opf_cc;
bits<5> rs2;
let Inst{18} = 0;
let Inst{17-14} = cond;
- let Inst{13-11} = opf_cc;
+ let Inst{13} = intcc;
+ let Inst{12-11} = opf_cc;
let Inst{10-5} = opf_low;
let Inst{4-0} = rs2;
}
+
+class F4_4r<bits<6> op3, bits<5> opf_low, bits<3> rcond, dag outs, dag ins,
+ string asmstr, list<dag> pattern>
+ : F4<op3, outs, ins, asmstr, pattern> {
+ bits <5> rs1;
+ bits <5> rs2;
+ let Inst{18-14} = rs1;
+ let Inst{13} = 0; // IsImm
+ let Inst{12-10} = rcond;
+ let Inst{9-5} = opf_low;
+ let Inst{4-0} = rs2;
+}
+
+
+class F4_4i<bits<6> op3, bits<3> rcond, dag outs, dag ins,
+ string asmstr, list<dag> pattern>
+ : F4<op3, outs, ins, asmstr, pattern> {
+ bits<5> rs1;
+ bits<10> simm10;
+ let Inst{18-14} = rs1;
+ let Inst{13} = 1; // IsImm
+ let Inst{12-10} = rcond;
+ let Inst{9-0} = simm10;
+}
+
+
+class TRAPSP<bits<6> op3Val, bit isimm, dag outs, dag ins, string asmstr,
+ list<dag> pattern>: F3<outs, ins, asmstr, pattern> {
+
+ bits<4> cond;
+ bits<2> cc;
+
+ let op = 0b10;
+ let rd{4} = 0;
+ let rd{3-0} = cond;
+ let op3 = op3Val;
+ let Inst{13} = isimm;
+ let Inst{12-11} = cc;
+
+}
+
+class TRAPSPrr<bits<6> op3Val, dag outs, dag ins, string asmstr,
+ list<dag> pattern>: TRAPSP<op3Val, 0, outs, ins, asmstr, pattern> {
+ bits<5> rs2;
+
+ let Inst{10-5} = 0;
+ let Inst{4-0} = rs2;
+}
+class TRAPSPri<bits<6> op3Val, dag outs, dag ins, string asmstr,
+ list<dag> pattern>: TRAPSP<op3Val, 1, outs, ins, asmstr, pattern> {
+ bits<8> imm;
+
+ let Inst{10-8} = 0;
+ let Inst{7-0} = imm;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 6ecf81d..8b2e6bc 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -24,11 +24,10 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_INSTRINFO_CTOR_DTOR
-#include "SparcGenInstrInfo.inc"
-
using namespace llvm;
+#define GET_INSTRINFO_CTOR_DTOR
+#include "SparcGenInstrInfo.inc"
// Pin the vtable to this file.
void SparcInstrInfo::anchor() {}
@@ -89,6 +88,8 @@ static bool IsIntegerCC(unsigned CC)
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
{
switch(CC) {
+ case SPCC::ICC_A: return SPCC::ICC_N;
+ case SPCC::ICC_N: return SPCC::ICC_A;
case SPCC::ICC_NE: return SPCC::ICC_E;
case SPCC::ICC_E: return SPCC::ICC_NE;
case SPCC::ICC_G: return SPCC::ICC_LE;
@@ -104,6 +105,8 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
case SPCC::ICC_VC: return SPCC::ICC_VS;
case SPCC::ICC_VS: return SPCC::ICC_VC;
+ case SPCC::FCC_A: return SPCC::FCC_N;
+ case SPCC::FCC_N: return SPCC::FCC_A;
case SPCC::FCC_U: return SPCC::FCC_O;
case SPCC::FCC_O: return SPCC::FCC_U;
case SPCC::FCC_G: return SPCC::FCC_ULE;
@@ -154,14 +157,14 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
continue;
}
- while (llvm::next(I) != MBB.end())
- llvm::next(I)->eraseFromParent();
+ while (std::next(I) != MBB.end())
+ std::next(I)->eraseFromParent();
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- TBB = 0;
+ TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
UnCondBrIter = MBB.end();
@@ -281,7 +284,7 @@ void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
unsigned numSubRegs = 0;
unsigned movOpc = 0;
- const unsigned *subRegIdx = 0;
+ const unsigned *subRegIdx = nullptr;
const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 };
@@ -325,11 +328,11 @@ void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
} else
llvm_unreachable("Impossible reg-to-reg copy");
- if (numSubRegs == 0 || subRegIdx == 0 || movOpc == 0)
+ if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0)
return;
const TargetRegisterInfo *TRI = &getRegisterInfo();
- MachineInstr *MovMI = 0;
+ MachineInstr *MovMI = nullptr;
for (unsigned i = 0; i != numSubRegs; ++i) {
unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]);
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
index a86cbcb..3a1472e 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -45,52 +45,52 @@ public:
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const SparcRegisterInfo &getRegisterInfo() const { return RI; }
+ const SparcRegisterInfo &getRegisterInfo() const { return RI; }
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify = false) const ;
-
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override ;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
unsigned getGlobalBaseReg(MachineFunction *MF) const;
};
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
index ae10ca0..960261c 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -29,7 +29,8 @@ def Is64Bit : Predicate<"Subtarget.is64Bit()">;
// HasV9 - This predicate is true when the target processor supports V9
// instructions. Note that the machine may be running in 32-bit mode.
-def HasV9 : Predicate<"Subtarget.isV9()">;
+def HasV9 : Predicate<"Subtarget.isV9()">,
+ AssemblerPredicate<"FeatureV9">;
// HasNoV9 - This predicate is true when the target doesn't have V9
// instructions. Use of this is just a hack for the isel not having proper
@@ -37,7 +38,12 @@ def HasV9 : Predicate<"Subtarget.isV9()">;
def HasNoV9 : Predicate<"!Subtarget.isV9()">;
// HasVIS - This is true when the target processor has VIS extensions.
-def HasVIS : Predicate<"Subtarget.isVIS()">;
+def HasVIS : Predicate<"Subtarget.isVIS()">,
+ AssemblerPredicate<"FeatureVIS">;
+def HasVIS2 : Predicate<"Subtarget.isVIS2()">,
+ AssemblerPredicate<"FeatureVIS2">;
+def HasVIS3 : Predicate<"Subtarget.isVIS3()">,
+ AssemblerPredicate<"FeatureVIS3">;
// HasHardQuad - This is true when the target processor supports quad floating
// point instructions.
@@ -104,8 +110,21 @@ def brtarget : Operand<OtherVT> {
let EncoderMethod = "getBranchTargetOpValue";
}
+def bprtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchPredTargetOpValue";
+}
+
+def bprtarget16 : Operand<OtherVT> {
+ let EncoderMethod = "getBranchOnRegTargetOpValue";
+}
+
def calltarget : Operand<i32> {
let EncoderMethod = "getCallTargetOpValue";
+ let DecoderMethod = "DecodeCall";
+}
+
+def simm13Op : Operand<i32> {
+ let DecoderMethod = "DecodeSIMM13";
}
// Operand for printing out a condition code.
@@ -246,7 +265,7 @@ multiclass F3_12np<string OpcStr, bits<6> Op3Val> {
(outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
!strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
def ri : F3_2<2, Op3Val,
- (outs IntRegs:$rd), (ins IntRegs:$rs1, i32imm:$simm13),
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
!strconcat(OpcStr, " $rs1, $simm13, $rd"), []>;
}
@@ -316,8 +335,8 @@ let isBarrier = 1, isTerminator = 1, rd = 0b1000, rs1 = 0, simm13 = 5 in
def TA5 : F3_2<0b10, 0b111010, (outs), (ins), "ta 5", [(trap)]>;
let rd = 0 in
- def UNIMP : F2_1<0b000, (outs), (ins i32imm:$val),
- "unimp $val", []>;
+ def UNIMP : F2_1<0b000, (outs), (ins i32imm:$imm22),
+ "unimp $imm22", []>;
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
// instruction selection into a branch sequence. This has to handle all
@@ -344,7 +363,7 @@ let Uses = [ICC], usesCustomInserter = 1 in {
[(set f128:$dst, (SPselecticc f128:$T, f128:$F, imm:$Cond))]>;
}
-let usesCustomInserter = 1, Uses = [FCC] in {
+let usesCustomInserter = 1, Uses = [FCC0] in {
def SELECT_CC_Int_FCC
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
@@ -366,7 +385,8 @@ let usesCustomInserter = 1, Uses = [FCC] in {
}
// JMPL Instruction.
-let isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in {
+let isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
+ DecoderMethod = "DecodeJMPL" in {
def JMPLrr: F3_1<2, 0b111000, (outs IntRegs:$dst), (ins MEMrr:$addr),
"jmpl $addr, $dst", []>;
def JMPLri: F3_2<2, 0b111000, (outs IntRegs:$dst), (ins MEMri:$addr),
@@ -386,29 +406,47 @@ let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
"jmp %i7+$val", []>;
}
+let isReturn = 1, isTerminator = 1, hasDelaySlot = 1,
+ isBarrier = 1, rd = 0, DecoderMethod = "DecodeReturn" in {
+ def RETTrr : F3_1<2, 0b111001, (outs), (ins MEMrr:$addr),
+ "rett $addr", []>;
+ def RETTri : F3_2<2, 0b111001, (outs), (ins MEMri:$addr),
+ "rett $addr", []>;
+}
+
// Section B.1 - Load Integer Instructions, p. 90
-defm LDSB : Load<"ldsb", 0b001001, sextloadi8, IntRegs, i32>;
-defm LDSH : Load<"ldsh", 0b001010, sextloadi16, IntRegs, i32>;
-defm LDUB : Load<"ldub", 0b000001, zextloadi8, IntRegs, i32>;
-defm LDUH : Load<"lduh", 0b000010, zextloadi16, IntRegs, i32>;
-defm LD : Load<"ld", 0b000000, load, IntRegs, i32>;
+let DecoderMethod = "DecodeLoadInt" in {
+ defm LDSB : Load<"ldsb", 0b001001, sextloadi8, IntRegs, i32>;
+ defm LDSH : Load<"ldsh", 0b001010, sextloadi16, IntRegs, i32>;
+ defm LDUB : Load<"ldub", 0b000001, zextloadi8, IntRegs, i32>;
+ defm LDUH : Load<"lduh", 0b000010, zextloadi16, IntRegs, i32>;
+ defm LD : Load<"ld", 0b000000, load, IntRegs, i32>;
+}
// Section B.2 - Load Floating-point Instructions, p. 92
-defm LDF : Load<"ld", 0b100000, load, FPRegs, f32>;
-defm LDDF : Load<"ldd", 0b100011, load, DFPRegs, f64>;
-defm LDQF : Load<"ldq", 0b100010, load, QFPRegs, f128>,
- Requires<[HasV9, HasHardQuad]>;
+let DecoderMethod = "DecodeLoadFP" in
+ defm LDF : Load<"ld", 0b100000, load, FPRegs, f32>;
+let DecoderMethod = "DecodeLoadDFP" in
+ defm LDDF : Load<"ldd", 0b100011, load, DFPRegs, f64>;
+let DecoderMethod = "DecodeLoadQFP" in
+ defm LDQF : Load<"ldq", 0b100010, load, QFPRegs, f128>,
+ Requires<[HasV9, HasHardQuad]>;
// Section B.4 - Store Integer Instructions, p. 95
-defm STB : Store<"stb", 0b000101, truncstorei8, IntRegs, i32>;
-defm STH : Store<"sth", 0b000110, truncstorei16, IntRegs, i32>;
-defm ST : Store<"st", 0b000100, store, IntRegs, i32>;
+let DecoderMethod = "DecodeStoreInt" in {
+ defm STB : Store<"stb", 0b000101, truncstorei8, IntRegs, i32>;
+ defm STH : Store<"sth", 0b000110, truncstorei16, IntRegs, i32>;
+ defm ST : Store<"st", 0b000100, store, IntRegs, i32>;
+}
// Section B.5 - Store Floating-point Instructions, p. 97
-defm STF : Store<"st", 0b100100, store, FPRegs, f32>;
-defm STDF : Store<"std", 0b100111, store, DFPRegs, f64>;
-defm STQF : Store<"stq", 0b100110, store, QFPRegs, f128>,
- Requires<[HasV9, HasHardQuad]>;
+let DecoderMethod = "DecodeStoreFP" in
+ defm STF : Store<"st", 0b100100, store, FPRegs, f32>;
+let DecoderMethod = "DecodeStoreDFP" in
+ defm STDF : Store<"std", 0b100111, store, DFPRegs, f64>;
+let DecoderMethod = "DecodeStoreQFP" in
+ defm STQF : Store<"stq", 0b100110, store, QFPRegs, f128>,
+ Requires<[HasV9, HasHardQuad]>;
// Section B.9 - SETHI Instruction, p. 104
def SETHIi: F2_1<0b100,
@@ -422,42 +460,51 @@ let rd = 0, imm22 = 0 in
def NOP : F2_1<0b100, (outs), (ins), "nop", []>;
// Section B.11 - Logical Instructions, p. 106
-defm AND : F3_12<"and", 0b000001, and, IntRegs, i32, i32imm>;
+defm AND : F3_12<"and", 0b000001, and, IntRegs, i32, simm13Op>;
def ANDNrr : F3_1<2, 0b000101,
(outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
"andn $rs1, $rs2, $rd",
[(set i32:$rd, (and i32:$rs1, (not i32:$rs2)))]>;
def ANDNri : F3_2<2, 0b000101,
- (outs IntRegs:$rd), (ins IntRegs:$rs1, i32imm:$simm13),
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
"andn $rs1, $simm13, $rd", []>;
-defm OR : F3_12<"or", 0b000010, or, IntRegs, i32, i32imm>;
+defm OR : F3_12<"or", 0b000010, or, IntRegs, i32, simm13Op>;
def ORNrr : F3_1<2, 0b000110,
(outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
"orn $rs1, $rs2, $rd",
[(set i32:$rd, (or i32:$rs1, (not i32:$rs2)))]>;
def ORNri : F3_2<2, 0b000110,
- (outs IntRegs:$rd), (ins IntRegs:$rs1, i32imm:$simm13),
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
"orn $rs1, $simm13, $rd", []>;
-defm XOR : F3_12<"xor", 0b000011, xor, IntRegs, i32, i32imm>;
+defm XOR : F3_12<"xor", 0b000011, xor, IntRegs, i32, simm13Op>;
def XNORrr : F3_1<2, 0b000111,
(outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
"xnor $rs1, $rs2, $rd",
[(set i32:$rd, (not (xor i32:$rs1, i32:$rs2)))]>;
def XNORri : F3_2<2, 0b000111,
- (outs IntRegs:$rd), (ins IntRegs:$rs1, i32imm:$simm13),
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
"xnor $rs1, $simm13, $rd", []>;
+let Defs = [ICC] in {
+ defm ANDCC : F3_12np<"andcc", 0b010001>;
+ defm ANDNCC : F3_12np<"andncc", 0b010101>;
+ defm ORCC : F3_12np<"orcc", 0b010010>;
+ defm ORNCC : F3_12np<"orncc", 0b010110>;
+ defm XORCC : F3_12np<"xorcc", 0b010011>;
+ defm XNORCC : F3_12np<"xnorcc", 0b010111>;
+}
+
// Section B.12 - Shift Instructions, p. 107
-defm SLL : F3_12<"sll", 0b100101, shl, IntRegs, i32, i32imm>;
-defm SRL : F3_12<"srl", 0b100110, srl, IntRegs, i32, i32imm>;
-defm SRA : F3_12<"sra", 0b100111, sra, IntRegs, i32, i32imm>;
+defm SLL : F3_12<"sll", 0b100101, shl, IntRegs, i32, simm13Op>;
+defm SRL : F3_12<"srl", 0b100110, srl, IntRegs, i32, simm13Op>;
+defm SRA : F3_12<"sra", 0b100111, sra, IntRegs, i32, simm13Op>;
// Section B.13 - Add Instructions, p. 108
-defm ADD : F3_12<"add", 0b000000, add, IntRegs, i32, i32imm>;
+defm ADD : F3_12<"add", 0b000000, add, IntRegs, i32, simm13Op>;
// "LEA" forms of add (patterns to make tblgen happy)
let Predicates = [Is32Bit], isCodeGenOnly = 1 in
@@ -467,18 +514,24 @@ let Predicates = [Is32Bit], isCodeGenOnly = 1 in
[(set iPTR:$dst, ADDRri:$addr)]>;
let Defs = [ICC] in
- defm ADDCC : F3_12<"addcc", 0b010000, addc, IntRegs, i32, i32imm>;
+ defm ADDCC : F3_12<"addcc", 0b010000, addc, IntRegs, i32, simm13Op>;
+
+let Uses = [ICC] in
+ defm ADDC : F3_12np<"addx", 0b001000>;
let Uses = [ICC], Defs = [ICC] in
- defm ADDE : F3_12<"addxcc", 0b011000, adde, IntRegs, i32, i32imm>;
+ defm ADDE : F3_12<"addxcc", 0b011000, adde, IntRegs, i32, simm13Op>;
// Section B.15 - Subtract Instructions, p. 110
-defm SUB : F3_12 <"sub" , 0b000100, sub, IntRegs, i32, i32imm>;
+defm SUB : F3_12 <"sub" , 0b000100, sub, IntRegs, i32, simm13Op>;
let Uses = [ICC], Defs = [ICC] in
- defm SUBE : F3_12 <"subxcc" , 0b011100, sube, IntRegs, i32, i32imm>;
+ defm SUBE : F3_12 <"subxcc" , 0b011100, sube, IntRegs, i32, simm13Op>;
let Defs = [ICC] in
- defm SUBCC : F3_12 <"subcc", 0b010100, subc, IntRegs, i32, i32imm>;
+ defm SUBCC : F3_12 <"subcc", 0b010100, subc, IntRegs, i32, simm13Op>;
+
+let Uses = [ICC] in
+ defm SUBC : F3_12np <"subx", 0b001100>;
let Defs = [ICC], rd = 0 in {
def CMPrr : F3_1<2, 0b010100,
@@ -486,7 +539,7 @@ let Defs = [ICC], rd = 0 in {
"cmp $rs1, $rs2",
[(SPcmpicc i32:$rs1, i32:$rs2)]>;
def CMPri : F3_2<2, 0b010100,
- (outs), (ins IntRegs:$rs1, i32imm:$simm13),
+ (outs), (ins IntRegs:$rs1, simm13Op:$simm13),
"cmp $rs1, $simm13",
[(SPcmpicc i32:$rs1, (i32 simm13:$simm13))]>;
}
@@ -494,7 +547,12 @@ let Defs = [ICC], rd = 0 in {
// Section B.18 - Multiply Instructions, p. 113
let Defs = [Y] in {
defm UMUL : F3_12np<"umul", 0b001010>;
- defm SMUL : F3_12 <"smul", 0b001011, mul, IntRegs, i32, i32imm>;
+ defm SMUL : F3_12 <"smul", 0b001011, mul, IntRegs, i32, simm13Op>;
+}
+
+let Defs = [Y, ICC] in {
+ defm UMULCC : F3_12np<"umulcc", 0b011010>;
+ defm SMULCC : F3_12np<"smulcc", 0b011011>;
}
// Section B.19 - Divide Instructions, p. 115
@@ -503,6 +561,11 @@ let Defs = [Y] in {
defm SDIV : F3_12np<"sdiv", 0b001111>;
}
+let Defs = [Y, ICC] in {
+ defm UDIVCC : F3_12np<"udivcc", 0b011110>;
+ defm SDIVCC : F3_12np<"sdivcc", 0b011111>;
+}
+
// Section B.20 - SAVE and RESTORE, p. 117
defm SAVE : F3_12np<"save" , 0b111100>;
defm RESTORE : F3_12np<"restore", 0b111101>;
@@ -511,7 +574,7 @@ defm RESTORE : F3_12np<"restore", 0b111101>;
// unconditional branch class.
class BranchAlways<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b010, (outs), ins, asmstr, pattern> {
+ : F2_2<0b010, 0, (outs), ins, asmstr, pattern> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
@@ -521,14 +584,36 @@ class BranchAlways<dag ins, string asmstr, list<dag> pattern>
let cond = 8 in
def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
+
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
+
// conditional branch class:
class BranchSP<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b010, (outs), ins, asmstr, pattern> {
- let isBranch = 1;
- let isTerminator = 1;
- let hasDelaySlot = 1;
+ : F2_2<0b010, 0, (outs), ins, asmstr, pattern>;
+
+// conditional branch with annul class:
+class BranchSPA<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b010, 1, (outs), ins, asmstr, pattern>;
+
+// Conditional branch class on %icc|%xcc with predication:
+multiclass IPredBranch<string regstr, list<dag> CCPattern> {
+ def CC : F2_3<0b001, 0, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond ", !strconcat(regstr, ", $imm19")),
+ CCPattern>;
+ def CCA : F2_3<0b001, 1, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond,a ", !strconcat(regstr, ", $imm19")),
+ []>;
+ def CCNT : F2_3<0b001, 0, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond,pn ", !strconcat(regstr, ", $imm19")),
+ []>;
+ def CCANT : F2_3<0b001, 1, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond,a,pn ", !strconcat(regstr, ", $imm19")),
+ []>;
}
+} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
+
+
// Indirect branch instructions.
let isTerminator = 1, isBarrier = 1, hasDelaySlot = 1, isBranch =1,
isIndirectBranch = 1, rd = 0, isCodeGenOnly = 1 in {
@@ -542,33 +627,64 @@ let isTerminator = 1, isBarrier = 1, hasDelaySlot = 1, isBranch =1,
[(brind ADDRri:$ptr)]>;
}
-let Uses = [ICC] in
+let Uses = [ICC] in {
def BCOND : BranchSP<(ins brtarget:$imm22, CCOp:$cond),
"b$cond $imm22",
[(SPbricc bb:$imm22, imm:$cond)]>;
+ def BCONDA : BranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+ "b$cond,a $imm22", []>;
+
+ let Predicates = [HasV9], cc = 0b00 in
+ defm BPI : IPredBranch<"%icc", []>;
+}
// Section B.22 - Branch on Floating-point Condition Codes Instructions, p. 121
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
+
// floating-point conditional branch class:
class FPBranchSP<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b110, (outs), ins, asmstr, pattern> {
- let isBranch = 1;
- let isTerminator = 1;
- let hasDelaySlot = 1;
+ : F2_2<0b110, 0, (outs), ins, asmstr, pattern>;
+
+// floating-point conditional branch with annul class:
+class FPBranchSPA<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b110, 1, (outs), ins, asmstr, pattern>;
+
+// Conditional branch class on %fcc0-%fcc3 with predication:
+multiclass FPredBranch {
+ def CC : F2_3<0b101, 0, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond $cc, $imm19", []>;
+ def CCA : F2_3<0b101, 1, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond,a $cc, $imm19", []>;
+ def CCNT : F2_3<0b101, 0, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond,pn $cc, $imm19", []>;
+ def CCANT : F2_3<0b101, 1, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond,a,pn $cc, $imm19", []>;
}
+} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
-let Uses = [FCC] in
+let Uses = [FCC0] in {
def FBCOND : FPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
"fb$cond $imm22",
[(SPbrfcc bb:$imm22, imm:$cond)]>;
+ def FBCONDA : FPBranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+ "fb$cond,a $imm22", []>;
+}
+
+let Predicates = [HasV9] in
+ defm BPF : FPredBranch;
// Section B.24 - Call and Link Instruction, p. 125
// This is the only Format 1 instruction
let Uses = [O6],
hasDelaySlot = 1, isCall = 1 in {
- def CALL : InstSP<(outs), (ins calltarget:$dst, variable_ops),
- "call $dst", []> {
+ def CALL : InstSP<(outs), (ins calltarget:$disp, variable_ops),
+ "call $disp", []> {
bits<30> disp;
let op = 1;
let Inst{29-0} = disp;
@@ -596,11 +712,11 @@ let Uses = [Y], rs1 = 0, rs2 = 0 in
// Section B.29 - Write State Register Instructions
let Defs = [Y], rd = 0 in {
def WRYrr : F3_1<2, 0b110000,
- (outs), (ins IntRegs:$b, IntRegs:$c),
- "wr $b, $c, %y", []>;
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "wr $rs1, $rs2, %y", []>;
def WRYri : F3_2<2, 0b110000,
- (outs), (ins IntRegs:$b, i32imm:$c),
- "wr $b, $c, %y", []>;
+ (outs), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "wr $rs1, $simm13, %y", []>;
}
// Convert Integer to Floating-point Instructions, p. 141
def FITOS : F3_3u<2, 0b110100, 0b011000100,
@@ -771,7 +887,7 @@ def FDIVQ : F3_3<2, 0b110100, 0b001001111,
// This behavior is modeled with a forced noop after the instruction in
// DelaySlotFiller.
-let Defs = [FCC] in {
+let Defs = [FCC0], rd = 0, isCodeGenOnly = 1 in {
def FCMPS : F3_3c<2, 0b110101, 0b001010001,
(outs), (ins FPRegs:$rs1, FPRegs:$rs2),
"fcmps $rs1, $rs2",
@@ -823,7 +939,7 @@ let Uses = [O6], isCall = 1, hasDelaySlot = 1 in
// V9 Conditional Moves.
let Predicates = [HasV9], Constraints = "$f = $rd" in {
// Move Integer Register on Condition (MOVcc) p. 194 of the V9 manual.
- let Uses = [ICC], cc = 0b100 in {
+ let Uses = [ICC], intcc = 1, cc = 0b00 in {
def MOVICCrr
: F4_1<0b101100, (outs IntRegs:$rd),
(ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
@@ -838,7 +954,7 @@ let Predicates = [HasV9], Constraints = "$f = $rd" in {
(SPselecticc simm11:$simm11, i32:$f, imm:$cond))]>;
}
- let Uses = [FCC], cc = 0b000 in {
+ let Uses = [FCC0], intcc = 0, cc = 0b00 in {
def MOVFCCrr
: F4_1<0b101100, (outs IntRegs:$rd),
(ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
@@ -852,7 +968,7 @@ let Predicates = [HasV9], Constraints = "$f = $rd" in {
(SPselectfcc simm11:$simm11, i32:$f, imm:$cond))]>;
}
- let Uses = [ICC], opf_cc = 0b100 in {
+ let Uses = [ICC], intcc = 1, opf_cc = 0b00 in {
def FMOVS_ICC
: F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
(ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
@@ -871,7 +987,7 @@ let Predicates = [HasV9], Constraints = "$f = $rd" in {
Requires<[HasHardQuad]>;
}
- let Uses = [FCC], opf_cc = 0b000 in {
+ let Uses = [FCC0], intcc = 0, opf_cc = 0b00 in {
def FMOVS_FCC
: F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
(ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
@@ -921,6 +1037,59 @@ let Predicates = [HasV9] in {
Requires<[HasHardQuad]>;
}
+// Floating-point compare instruction with %fcc0-%fcc3.
+def V9FCMPS : F3_3c<2, 0b110101, 0b001010001,
+ (outs FCCRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fcmps $rd, $rs1, $rs2", []>;
+def V9FCMPD : F3_3c<2, 0b110101, 0b001010010,
+ (outs FCCRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fcmpd $rd, $rs1, $rs2", []>;
+def V9FCMPQ : F3_3c<2, 0b110101, 0b001010011,
+ (outs FCCRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fcmpq $rd, $rs1, $rs2", []>,
+ Requires<[HasHardQuad]>;
+
+let hasSideEffects = 1 in {
+ def V9FCMPES : F3_3c<2, 0b110101, 0b001010101,
+ (outs FCCRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fcmpes $rd, $rs1, $rs2", []>;
+ def V9FCMPED : F3_3c<2, 0b110101, 0b001010110,
+ (outs FCCRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fcmped $rd, $rs1, $rs2", []>;
+ def V9FCMPEQ : F3_3c<2, 0b110101, 0b001010111,
+ (outs FCCRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fcmpeq $rd, $rs1, $rs2", []>,
+ Requires<[HasHardQuad]>;
+}
+
+// Floating point conditional move instrucitons with %fcc0-%fcc3.
+let Predicates = [HasV9] in {
+ let Constraints = "$f = $rd", intcc = 0 in {
+ def V9MOVFCCrr
+ : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins FCCRegs:$cc, IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond $cc, $rs2, $rd", []>;
+ def V9MOVFCCri
+ : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins FCCRegs:$cc, i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond $cc, $simm11, $rd", []>;
+ def V9FMOVS_FCC
+ : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FCCRegs:$opf_cc, FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond $opf_cc, $rs2, $rd", []>;
+ def V9FMOVD_FCC
+ : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins FCCRegs:$opf_cc, DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond $opf_cc, $rs2, $rd", []>;
+ def V9FMOVQ_FCC
+ : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins FCCRegs:$opf_cc, QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovq$cond $opf_cc, $rs2, $rd", []>,
+ Requires<[HasHardQuad]>;
+ } // Constraints = "$f = $rd", ...
+} // let Predicates = [hasV9]
+
+
// POPCrr - This does a ctpop of a 64-bit register. As such, we have to clear
// the top 32-bits before using it. To do this clearing, we use a SRLri X,0.
let rs1 = 0 in
@@ -935,10 +1104,10 @@ let hasSideEffects =1, rd = 0, rs1 = 0b01111, rs2 = 0 in
def STBAR : F3_1<2, 0b101000, (outs), (ins), "stbar", []>;
let Predicates = [HasV9], hasSideEffects = 1, rd = 0, rs1 = 0b01111 in
- def MEMBARi : F3_2<2, 0b101000, (outs), (ins i32imm:$simm13),
+ def MEMBARi : F3_2<2, 0b101000, (outs), (ins simm13Op:$simm13),
"membar $simm13", []>;
-let Constraints = "$val = $dst" in {
+let Constraints = "$val = $dst", DecoderMethod = "DecodeSWAP" in {
def SWAPrr : F3_1<3, 0b001111,
(outs IntRegs:$dst), (ins MEMrr:$addr, IntRegs:$val),
"swap [$addr], $dst",
@@ -957,6 +1126,28 @@ let Predicates = [HasV9], Constraints = "$swap = $rd" in
[(set i32:$rd,
(atomic_cmp_swap iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+let Defs = [ICC] in {
+defm TADDCC : F3_12np<"taddcc", 0b100000>;
+defm TSUBCC : F3_12np<"tsubcc", 0b100001>;
+
+let hasSideEffects = 1 in {
+ defm TADDCCTV : F3_12np<"taddcctv", 0b100010>;
+ defm TSUBCCTV : F3_12np<"tsubcctv", 0b100011>;
+}
+}
+
+multiclass TRAP<string regStr> {
+ def rr : TRAPSPrr<0b111010, (outs), (ins IntRegs:$rs1, IntRegs:$rs2,
+ CCOp:$cond),
+ !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $rs2"), []>;
+ def ri : TRAPSPri<0b111010, (outs), (ins IntRegs:$rs1, i32imm:$imm,
+ CCOp:$cond),
+ !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $imm"), []>;
+}
+
+let hasSideEffects = 1, Uses = [ICC], cc = 0b00 in
+ defm TICC : TRAP<"%icc">;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
@@ -1032,4 +1223,5 @@ def : Pat<(atomic_store ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
include "SparcInstr64Bit.td"
+include "SparcInstrVIS.td"
include "SparcInstrAliases.td"
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td b/contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td
new file mode 100644
index 0000000..3e2b49d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td
@@ -0,0 +1,263 @@
+//===---- SparcInstrVIS.td - Visual Instruction Set extensions (VIS) -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction formats, definitions and patterns needed for
+// VIS, VIS II, VIS II instructions on SPARC.
+//===----------------------------------------------------------------------===//
+
+// VIS Instruction Format.
+class VISInstFormat<bits<9> opfval, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : F3_3<0b10, 0b110110, opfval, outs, ins, asmstr, pattern>;
+
+class VISInst<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$rs1, RC:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+
+// VIS Instruction with integer destination register.
+class VISInstID<bits<9> opfval, string OpcStr>
+ : VISInstFormat<opfval,
+ (outs I64Regs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+
+// For VIS Instructions with no operand.
+let rd = 0, rs1 = 0, rs2 = 0 in
+class VISInst0<bits<9> opfval, string asmstr>
+ : VISInstFormat<opfval, (outs), (ins), asmstr, []>;
+
+// For VIS Instructions with only rs1, rd operands.
+let rs2 = 0 in
+class VISInst1<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$rs1),
+ !strconcat(OpcStr, " $rs1, $rd"), []>;
+
+// For VIS Instructions with only rs2, rd operands.
+let rs1 = 0 in
+class VISInst2<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$rs2),
+ !strconcat(OpcStr, " $rs2, $rd"), []>;
+
+// For VIS Instructions with only rd operand.
+let Constraints = "$rd = $f", rs1 = 0, rs2 = 0 in
+class VISInstD<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$f),
+ !strconcat(OpcStr, " $rd"), []>;
+
+// VIS 1 Instructions
+let Predicates = [HasVIS] in {
+
+def FPADD16 : VISInst<0b001010000, "fpadd16">;
+def FPADD16S : VISInst<0b001010001, "fpadd16s">;
+def FPADD32 : VISInst<0b001010010, "fpadd32">;
+def FPADD32S : VISInst<0b001010011, "fpadd32s">;
+def FPSUB16 : VISInst<0b001010100, "fpsub16">;
+def FPSUB16S : VISInst<0b001010101, "fpsub16S">;
+def FPSUB32 : VISInst<0b001010110, "fpsub32">;
+def FPSUB32S : VISInst<0b001010111, "fpsub32S">;
+
+def FPACK16 : VISInst2<0b000111011, "fpack16">;
+def FPACK32 : VISInst <0b000111010, "fpack32">;
+def FPACKFIX : VISInst2<0b000111101, "fpackfix">;
+def FEXPAND : VISInst2<0b001001101, "fexpand">;
+def FPMERGE : VISInst <0b001001011, "fpmerge">;
+
+def FMUL8X16 : VISInst<0b00110001, "fmul8x16">;
+def FMUL8X16AU : VISInst<0b00110011, "fmul8x16au">;
+def FMUL8X16AL : VISInst<0b00110101, "fmul8x16al">;
+def FMUL8SUX16 : VISInst<0b00110110, "fmul8sux16">;
+def FMUL8ULX16 : VISInst<0b00110111, "fmul8ulx16">;
+def FMULD8SUX16 : VISInst<0b00111000, "fmuld8sux16">;
+def FMULD8ULX16 : VISInst<0b00111001, "fmuld8ulx16">;
+
+def ALIGNADDR : VISInst<0b000011000, "alignaddr", I64Regs>;
+def ALIGNADDRL : VISInst<0b000011010, "alignaddrl", I64Regs>;
+def FALIGNADATA : VISInst<0b001001000, "faligndata">;
+
+def FZERO : VISInstD<0b001100000, "fzero">;
+def FZEROS : VISInstD<0b001100001, "fzeros", FPRegs>;
+def FONE : VISInstD<0b001111110, "fone">;
+def FONES : VISInstD<0b001111111, "fones", FPRegs>;
+def FSRC1 : VISInst1<0b001110100, "fsrc1">;
+def FSRC1S : VISInst1<0b001110101, "fsrc1s", FPRegs>;
+def FSRC2 : VISInst2<0b001111000, "fsrc2">;
+def FSRC2S : VISInst2<0b001111001, "fsrc2s", FPRegs>;
+def FNOT1 : VISInst1<0b001101010, "fnot1">;
+def FNOT1S : VISInst1<0b001101011, "fnot1s", FPRegs>;
+def FNOT2 : VISInst2<0b001100110, "fnot2">;
+def FNOT2S : VISInst2<0b001100111, "fnot2s", FPRegs>;
+def FOR : VISInst<0b001111100, "for">;
+def FORS : VISInst<0b001111101, "fors", FPRegs>;
+def FNOR : VISInst<0b001100010, "fnor">;
+def FNORS : VISInst<0b001100011, "fnors", FPRegs>;
+def FAND : VISInst<0b001110000, "fand">;
+def FANDS : VISInst<0b001110001, "fands", FPRegs>;
+def FNAND : VISInst<0b001101110, "fnand">;
+def FNANDS : VISInst<0b001101111, "fnands", FPRegs>;
+def FXOR : VISInst<0b001101100, "fxor">;
+def FXORS : VISInst<0b001101101, "fxors", FPRegs>;
+def FXNOR : VISInst<0b001110010, "fxnor">;
+def FXNORS : VISInst<0b001110011, "fxnors", FPRegs>;
+
+def FORNOT1 : VISInst<0b001111010, "fornot1">;
+def FORNOT1S : VISInst<0b001111011, "fornot1s", FPRegs>;
+def FORNOT2 : VISInst<0b001110110, "fornot2">;
+def FORNOT2S : VISInst<0b001110111, "fornot2s", FPRegs>;
+def FANDNOT1 : VISInst<0b001101000, "fandnot1">;
+def FANDNOT1S : VISInst<0b001101001, "fandnot1s", FPRegs>;
+def FANDNOT2 : VISInst<0b001100100, "fandnot2">;
+def FANDNOT2S : VISInst<0b001100101, "fandnot2s", FPRegs>;
+
+def FCMPGT16 : VISInstID<0b000101000, "fcmpgt16">;
+def FCMPGT32 : VISInstID<0b000101100, "fcmpgt32">;
+def FCMPLE16 : VISInstID<0b000100000, "fcmple16">;
+def FCMPLE32 : VISInstID<0b000100100, "fcmple32">;
+def FCMPNE16 : VISInstID<0b000100010, "fcmpne16">;
+def FCMPNE32 : VISInstID<0b000100110, "fcmpne32">;
+def FCMPEQ16 : VISInstID<0b000101010, "fcmpeq16">;
+def FCMPEQ32 : VISInstID<0b000101110, "fcmpeq32">;
+
+
+def EDGE8 : VISInst<0b000000000, "edge8", I64Regs>;
+def EDGE8L : VISInst<0b000000010, "edge8l", I64Regs>;
+def EDGE16 : VISInst<0b000000100, "edge16", I64Regs>;
+def EDGE16L : VISInst<0b000000110, "edge16l", I64Regs>;
+def EDGE32 : VISInst<0b000001000, "edge32", I64Regs>;
+def EDGE32L : VISInst<0b000001010, "edge32l", I64Regs>;
+
+def PDIST : VISInst<0b00111110, "pdist">;
+
+def ARRAY8 : VISInst<0b000010000, "array8", I64Regs>;
+def ARRAY16 : VISInst<0b000010010, "array16", I64Regs>;
+def ARRAY32 : VISInst<0b000010100, "array32", I64Regs>;
+
+def SHUTDOWN : VISInst0<0b010000000, "shutdown">;
+
+} // Predicates = [HasVIS]
+
+
+// VIS 2 Instructions.
+let Predicates = [HasVIS2] in {
+
+def BMASK : VISInst<0b000011001, "bmask", I64Regs>;
+def BSHUFFLE : VISInst<0b000011100, "bshuffle">;
+
+def SIAM : VISInst0<0b010000001, "siam">;
+
+def EDGE8N : VISInst<0b000000001, "edge8n", I64Regs>;
+def EDGE8LN : VISInst<0b000000011, "edge8ln", I64Regs>;
+def EDGE16N : VISInst<0b000000101, "edge16n", I64Regs>;
+def EDGE16LN : VISInst<0b000000111, "edge16ln", I64Regs>;
+def EDGE32N : VISInst<0b000001001, "edge32n", I64Regs>;
+def EDGE32LN : VISInst<0b000001011, "edge32ln", I64Regs>;
+} // Predicates = [HasVIS2]
+
+
+// VIS 3 Instructions.
+let Predicates = [HasVIS3] in {
+
+let Uses = [ICC] in
+def ADDXC : VISInst<0b000010001, "addxc", I64Regs>;
+
+let Defs = [ICC], Uses = [ICC] in
+def ADDXCCC : VISInst<0b000010011, "addxccc", I64Regs>;
+
+let rd = 0, rs1 = 0 in {
+def CMASK8 : VISInstFormat<0b000011011, (outs), (ins I64Regs:$rs2),
+ "cmask8 $rs2", []>;
+def CMASK16 : VISInstFormat<0b000011101, (outs), (ins I64Regs:$rs2),
+ "cmask16 $rs2", []>;
+def CMASK32 : VISInstFormat<0b000011111, (outs), (ins I64Regs:$rs2),
+ "cmask32 $rs2", []>;
+
+}
+
+def FCHKSM16 : VISInst<0b01000100, "fchksm16">;
+
+def FHADDS : F3_3<0b10, 0b110100, 0b001100001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhadds $rs1, $rs2, $rd", []>;
+def FHADDD : F3_3<0b10, 0b110100, 0b001100010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhaddd $rs1, $rs2, $rd", []>;
+def FHSUBS : F3_3<0b10, 0b110100, 0b001100101,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhsubs $rs1, $rs2, $rd", []>;
+def FHSUBD : F3_3<0b10, 0b110100, 0b001100110,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhsubd $rs1, $rs2, $rd", []>;
+def FLCMPS : VISInstFormat<0b101010001, (outs FCCRegs:$rd),
+ (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "flcmps $rd, $rs1, $rs2", []>;
+def FLCMPD : VISInstFormat<0b101010010, (outs FCCRegs:$rd),
+ (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "flcmpd $rd, $rs1, $rs2", []>;
+
+def FMEAN16 : VISInst<0b001000000, "fmean16">;
+
+def FNADDS : F3_3<0b10, 0b110100, 0b001010001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnadds $rs1, $rs2, $rd", []>;
+def FNADDD : F3_3<0b10, 0b110100, 0b001010010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnaddd $rs1, $rs2, $rd", []>;
+def FNHADDS : F3_3<0b10, 0b110100, 0b001110001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhadds $rs1, $rs2, $rd", []>;
+def FNHADDD : F3_3<0b10, 0b110100, 0b001110010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhaddd $rs1, $rs2, $rd", []>;
+
+def FNMULS : F3_3<0b10, 0b110100, 0b001011001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhadds $rs1, $rs2, $rd", []>;
+def FNMULD : F3_3<0b10, 0b110100, 0b001011010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhaddd $rs1, $rs2, $rd", []>;
+def FNSMULD : F3_3<0b10, 0b110100, 0b001111001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhadds $rs1, $rs2, $rd", []>;
+
+def FPADD64 : VISInst<0b001000010, "fpadd64">;
+
+def FSLL16 : VISInst<0b00100001, "fsll16">;
+def FSRL16 : VISInst<0b00100011, "fsrl16">;
+def FSLL32 : VISInst<0b00100101, "fsll32">;
+def FSRL32 : VISInst<0b00100111, "fsrl32">;
+def FSLAS16 : VISInst<0b00101001, "fslas16">;
+def FSRA16 : VISInst<0b00101011, "fsra16">;
+def FSLAS32 : VISInst<0b00101101, "fslas32">;
+def FSRA32 : VISInst<0b00101111, "fsra32">;
+
+let rs1 = 0 in
+def LZCNT : VISInstFormat<0b000010111, (outs I64Regs:$rd),
+ (ins I64Regs:$rs2), "lzcnt $rs2, $rd", []>;
+
+let rs1 = 0 in {
+def MOVSTOSW : VISInstFormat<0b100010011, (outs I64Regs:$rd),
+ (ins DFPRegs:$rs2), "movstosw $rs2, $rd", []>;
+def MOVSTOUW : VISInstFormat<0b100010001, (outs I64Regs:$rd),
+ (ins DFPRegs:$rs2), "movstouw $rs2, $rd", []>;
+def MOVDTOX : VISInstFormat<0b100010000, (outs I64Regs:$rd),
+ (ins DFPRegs:$rs2), "movdtox $rs2, $rd", []>;
+def MOVWTOS : VISInstFormat<0b100011001, (outs DFPRegs:$rd),
+ (ins I64Regs:$rs2), "movdtox $rs2, $rd", []>;
+def MOVXTOD : VISInstFormat<0b100011000, (outs DFPRegs:$rd),
+ (ins I64Regs:$rs2), "movdtox $rs2, $rd", []>;
+}
+
+def PDISTN : VISInst<0b000111111, "pdistn">;
+
+def UMULXHI : VISInst<0b000010110, "umulxhi", I64Regs>;
+def XMULX : VISInst<0b100010101, "xmulx", I64Regs>;
+def XMULXHI : VISInst<0b100010111, "xmulxhi", I64Regs>;
+} // Predicates = [IsVIS3]
diff --git a/contrib/llvm/lib/Target/Sparc/SparcJITInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcJITInfo.cpp
index 959d12f..d0eec98 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcJITInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcJITInfo.cpp
@@ -10,7 +10,6 @@
// This file implements the JIT interfaces for the Sparc target.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "SparcJITInfo.h"
#include "Sparc.h"
#include "SparcRelocations.h"
@@ -20,6 +19,8 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
/// JITCompilerFunction - This contains the address of the JIT function used to
/// compile a function lazily.
static TargetJITInfo::JITCompilerFn JITCompilerFunction;
@@ -212,7 +213,8 @@ extern "C" void *SparcCompilationCallbackC(intptr_t StubAddr) {
void SparcJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
- assert(0 && "FIXME: Implement SparcJITInfo::replaceMachineCodeForFunction");
+ llvm_unreachable("FIXME: Implement SparcJITInfo::"
+ "replaceMachineCodeForFunction");
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcJITInfo.h b/contrib/llvm/lib/Target/Sparc/SparcJITInfo.h
index 9c6e488..ff1b43a 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcJITInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcJITInfo.h
@@ -34,27 +34,27 @@ class SparcJITInfo : public TargetJITInfo {
/// overwriting OLD with a branch to NEW. This is used for self-modifying
/// code.
///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
+ void replaceMachineCodeForFunction(void *Old, void *New) override;
// getStubLayout - Returns the size and alignment of the largest call stub
// on Sparc.
- virtual StubLayout getStubLayout();
+ StubLayout getStubLayout() override;
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
/// small native function that simply calls the function at the specified
/// address.
- virtual void *emitFunctionStub(const Function *F, void *Fn,
- JITCodeEmitter &JCE);
+ void *emitFunctionStub(const Function *F, void *Fn,
+ JITCodeEmitter &JCE) override;
/// getLazyResolverFunction - Expose the lazy resolver to the JIT.
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
+ LazyResolverFn getLazyResolverFunction(JITCompilerFn) override;
/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char *GOTBase);
+ void relocate(void *Function, MachineRelocation *MR,
+ unsigned NumRelocs, unsigned char *GOTBase) override;
/// Initialize - Initialize internal stage for the function being JITted.
void Initialize(const MachineFunction &MF, bool isPIC) {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp b/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp
index fc3ba90..9e94d2c 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp
@@ -14,16 +14,16 @@
#include "Sparc.h"
#include "MCTargetDesc/SparcMCExpr.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/MC/MCContext.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/ADT/SmallString.h"
using namespace llvm;
@@ -34,7 +34,7 @@ static MCOperand LowerSymbolOperand(const MachineInstr *MI,
SparcMCExpr::VariantKind Kind =
(SparcMCExpr::VariantKind)MO.getTargetFlags();
- const MCSymbol *Symbol = 0;
+ const MCSymbol *Symbol = nullptr;
switch(MO.getType()) {
default: llvm_unreachable("Unknown type in LowerSymbolOperand");
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
index f222382..dc1ec7c 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -25,11 +25,11 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
+using namespace llvm;
+
#define GET_REGINFO_TARGET_DESC
#include "SparcGenRegisterInfo.inc"
-using namespace llvm;
-
static cl::opt<bool>
ReserveAppRegisters("sparc-reserve-app-registers", cl::Hidden, cl::init(false),
cl::desc("Reserve application registers (%g2-%g4)"));
@@ -38,8 +38,8 @@ SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st)
: SparcGenRegisterInfo(SP::O7), Subtarget(st) {
}
-const uint16_t* SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
- const {
+const MCPhysReg*
+SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_SaveList;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
index 00b5a98..77f879a 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
@@ -31,25 +31,26 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
SparcRegisterInfo(SparcSubtarget &st);
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const uint32_t* getCallPreservedMask(CallingConv::ID CC) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF =nullptr) const override;
+ const uint32_t* getCallPreservedMask(CallingConv::ID CC) const override;
const uint32_t* getRTCallPreservedMask(CallingConv::ID CC) const;
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
- unsigned Kind) const;
+ unsigned Kind) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
index 2a575c0..2cadff1 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
@@ -16,7 +16,8 @@ class SparcReg<bits<16> Enc, string n> : Register<n> {
let Namespace = "SP";
}
-class SparcCtrlReg<string n>: Register<n> {
+class SparcCtrlReg<bits<16> Enc, string n>: Register<n> {
+ let HWEncoding = Enc;
let Namespace = "SP";
}
@@ -49,11 +50,12 @@ class Rq<bits<16> Enc, string n, list<Register> subregs> : SparcReg<Enc, n> {
}
// Control Registers
-def ICC : SparcCtrlReg<"ICC">; // This represents icc and xcc in 64-bit code.
-def FCC : SparcCtrlReg<"FCC">;
+def ICC : SparcCtrlReg<0, "ICC">; // This represents icc and xcc in 64-bit code.
+foreach I = 0-3 in
+ def FCC#I : SparcCtrlReg<I, "FCC"#I>;
// Y register
-def Y : SparcCtrlReg<"Y">;
+def Y : SparcCtrlReg<0, "Y">, DwarfRegNum<[64]>;
// Integer registers
def G0 : Ri< 0, "G0">, DwarfRegNum<[0]>;
@@ -204,3 +206,6 @@ def FPRegs : RegisterClass<"SP", [f32], 32, (sequence "F%u", 0, 31)>;
def DFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 31)>;
def QFPRegs : RegisterClass<"SP", [f128], 128, (sequence "Q%u", 0, 15)>;
+
+// Floating point control register classes.
+def FCCRegs : RegisterClass<"SP", [i1], 1, (sequence "FCC%u", 0, 3)>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.cpp
index 190c575..a308fc5 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.cpp
@@ -11,12 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sparc-selectiondag-info"
-#include "SparcTargetMachine.h"
+#include "SparcSelectionDAGInfo.h"
using namespace llvm;
-SparcSelectionDAGInfo::SparcSelectionDAGInfo(const SparcTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
+#define DEBUG_TYPE "sparc-selectiondag-info"
+
+SparcSelectionDAGInfo::SparcSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {
}
SparcSelectionDAGInfo::~SparcSelectionDAGInfo() {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.h b/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.h
index dcd4203..2346f41 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcSelectionDAGInfo.h
@@ -22,7 +22,7 @@ class SparcTargetMachine;
class SparcSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit SparcSelectionDAGInfo(const SparcTargetMachine &TM);
+ explicit SparcSelectionDAGInfo(const DataLayout &DL);
~SparcSelectionDAGInfo();
};
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
index 6fc9d56..eea0c8c 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
@@ -16,28 +16,54 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "sparc-subtarget"
+
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "SparcGenSubtargetInfo.inc"
-using namespace llvm;
-
void SparcSubtarget::anchor() { }
-SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64Bit) :
- SparcGenSubtargetInfo(TT, CPU, FS),
- IsV9(false),
- V8DeprecatedInsts(false),
- IsVIS(false),
- Is64Bit(is64Bit),
- HasHardQuad(false),
- UsePopc(false) {
+static std::string computeDataLayout(const SparcSubtarget &ST) {
+ // Sparc is big endian.
+ std::string Ret = "E-m:e";
+
+ // Some ABIs have 32bit pointers.
+ if (!ST.is64Bit())
+ Ret += "-p:32:32";
+
+ // Alignments for 64 bit integers.
+ Ret += "-i64:64";
+
+ // On SparcV9 128 floats are aligned to 128 bits, on others only to 64.
+ // On SparcV9 registers can hold 64 or 32 bits, on others only 32.
+ if (ST.is64Bit())
+ Ret += "-n32:64";
+ else
+ Ret += "-f128:64-n32";
+
+ if (ST.is64Bit())
+ Ret += "-S128";
+ else
+ Ret += "-S64";
+
+ return Ret;
+}
+
+SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
+ IsV9 = false;
+ V8DeprecatedInsts = false;
+ IsVIS = false;
+ HasHardQuad = false;
+ UsePopc = false;
// Determine default and user specified characteristics
std::string CPUName = CPU;
if (CPUName.empty())
- CPUName = (is64Bit) ? "v9" : "v8";
+ CPUName = (Is64Bit) ? "v9" : "v8";
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
@@ -45,8 +71,16 @@ SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
// Popc is a v9-only instruction.
if (!IsV9)
UsePopc = false;
+
+ return *this;
}
+SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, TargetMachine &TM,
+ bool is64Bit)
+ : SparcGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit),
+ DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS))),
+ InstrInfo(*this), TLInfo(TM), TSInfo(DL), FrameLowering(*this) {}
int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
index e4239e2..a335778 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
@@ -14,6 +14,13 @@
#ifndef SPARC_SUBTARGET_H
#define SPARC_SUBTARGET_H
+#include "SparcFrameLowering.h"
+#include "SparcInstrInfo.h"
+#include "SparcISelLowering.h"
+#include "SparcJITInfo.h"
+#include "SparcSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -27,17 +34,35 @@ class SparcSubtarget : public SparcGenSubtargetInfo {
virtual void anchor();
bool IsV9;
bool V8DeprecatedInsts;
- bool IsVIS;
+ bool IsVIS, IsVIS2, IsVIS3;
bool Is64Bit;
bool HasHardQuad;
bool UsePopc;
+ const DataLayout DL; // Calculates type size & alignment
+ SparcInstrInfo InstrInfo;
+ SparcTargetLowering TLInfo;
+ SparcSelectionDAGInfo TSInfo;
+ SparcFrameLowering FrameLowering;
+ SparcJITInfo JITInfo;
public:
SparcSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool is64bit);
+ const std::string &FS, TargetMachine &TM, bool is64bit);
+
+ const SparcInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const SparcRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const SparcTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const SparcSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ SparcJITInfo *getJITInfo() { return &JITInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
bool isV9() const { return IsV9; }
bool isVIS() const { return IsVIS; }
+ bool isVIS2() const { return IsVIS2; }
+ bool isVIS3() const { return IsVIS3; }
bool useDeprecatedV8Instructions() const { return V8DeprecatedInsts; }
bool hasHardQuad() const { return HasHardQuad; }
bool usePopc() const { return UsePopc; }
@@ -45,17 +70,9 @@ public:
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+ SparcSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
bool is64Bit() const { return Is64Bit; }
- std::string getDataLayout() const {
- const char *p;
- if (is64Bit()) {
- p = "E-p:64:64:64-i64:64:64-f64:64:64-f128:128:128-n32:64";
- } else {
- p = "E-p:32:32:32-i64:64:64-f64:64:64-f128:64:64-n32";
- }
- return std::string(p);
- }
/// The 64-bit ABI uses biased stack and frame pointers, so the stack frame
/// of the current function is the area from [%sp+BIAS] to [%fp+BIAS].
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index 0f93674..0130fac 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -32,11 +32,7 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
CodeGenOpt::Level OL,
bool is64bit)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, is64bit),
- DL(Subtarget.getDataLayout()),
- InstrInfo(Subtarget),
- TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) {
+ Subtarget(TT, CPU, FS, *this, is64bit) {
initAsmInfo();
}
@@ -51,8 +47,8 @@ public:
return getTM<SparcTargetMachine>();
}
- virtual bool addInstSelector();
- virtual bool addPreEmitPass();
+ bool addInstSelector() override;
+ bool addPreEmitPass() override;
};
} // namespace
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
index 8c9bcd3..03b5137 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
@@ -14,54 +14,44 @@
#ifndef SPARCTARGETMACHINE_H
#define SPARCTARGETMACHINE_H
-#include "SparcFrameLowering.h"
-#include "SparcISelLowering.h"
#include "SparcInstrInfo.h"
-#include "SparcJITInfo.h"
-#include "SparcSelectionDAGInfo.h"
#include "SparcSubtarget.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
class SparcTargetMachine : public LLVMTargetMachine {
SparcSubtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- SparcInstrInfo InstrInfo;
- SparcTargetLowering TLInfo;
- SparcSelectionDAGInfo TSInfo;
- SparcFrameLowering FrameLowering;
- SparcJITInfo JITInfo;
public:
SparcTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool is64bit);
- virtual const SparcInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const SparcInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const SparcSubtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual const SparcRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const SparcTargetLowering* getTargetLowering() const {
- return &TLInfo;
+ const SparcSubtarget *getSubtargetImpl() const override { return &Subtarget; }
+ const SparcRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
}
- virtual const SparcSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
+ const SparcTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
- virtual SparcJITInfo *getJITInfo() {
- return &JITInfo;
+ const SparcSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+ SparcJITInfo *getJITInfo() override { return Subtarget.getJITInfo(); }
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
- virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
- virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+ bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE) override;
};
/// SparcV8TargetMachine - Sparc 32-bit target machine
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp
index 18612bd..32b2240 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp
@@ -11,30 +11,25 @@
#include "MCTargetDesc/SparcMCExpr.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
-
-const MCExpr *SparcELFTargetObjectFile::
-getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const {
+const MCExpr *SparcELFTargetObjectFile::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
if (Encoding & dwarf::DW_EH_PE_pcrel) {
MachineModuleInfoELF &ELFMMI = MMI->getObjFileInfo<MachineModuleInfoELF>();
- //MCSymbol *SSym = getSymbolWithGlobalValueBase(*Mang, GV, ".DW.stub");
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, GV, true);
- NameStr.append(".DW.stub");
- MCSymbol *SSym = getContext().GetOrCreateSymbol(NameStr.str());
+ MCSymbol *SSym = getSymbolWithGlobalValueBase(GV, ".DW.stub", Mang, TM);
// Add information about the stub reference to ELFMMI so that the stub
// gets emitted by the asmprinter.
MachineModuleInfoImpl::StubValueTy &StubSym = ELFMMI.getGVStubEntry(SSym);
- if (StubSym.getPointer() == 0) {
- MCSymbol *Sym = getSymbol(*Mang, GV);
+ if (!StubSym.getPointer()) {
+ MCSymbol *Sym = TM.getSymbol(GV, Mang);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
@@ -43,6 +38,6 @@ getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
MCSymbolRefExpr::Create(SSym, Ctx), Ctx);
}
- return TargetLoweringObjectFileELF::
- getTTypeGlobalReference(GV, Mang, MMI, Encoding, Streamer);
+ return TargetLoweringObjectFileELF::getTTypeGlobalReference(
+ GV, Encoding, Mang, TM, MMI, Streamer);
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
index 7cf850d..c60675b 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
@@ -24,9 +24,10 @@ public:
{}
const MCExpr *
- getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const;
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h b/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h
index 73339ac..3767d8e 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h
@@ -18,6 +18,7 @@ class SparcTargetStreamer : public MCTargetStreamer {
virtual void anchor();
public:
+ SparcTargetStreamer(MCStreamer &S);
/// Emit ".register <reg>, #ignore".
virtual void emitSparcRegisterIgnore(unsigned reg) = 0;
/// Emit ".register <reg>, #scratch".
@@ -29,18 +30,19 @@ class SparcTargetAsmStreamer : public SparcTargetStreamer {
formatted_raw_ostream &OS;
public:
- SparcTargetAsmStreamer(formatted_raw_ostream &OS);
- virtual void emitSparcRegisterIgnore(unsigned reg);
- virtual void emitSparcRegisterScratch(unsigned reg);
+ SparcTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+ void emitSparcRegisterIgnore(unsigned reg) override;
+ void emitSparcRegisterScratch(unsigned reg) override;
};
// This part is for ELF object output
class SparcTargetELFStreamer : public SparcTargetStreamer {
public:
+ SparcTargetELFStreamer(MCStreamer &S);
MCELFStreamer &getStreamer();
- virtual void emitSparcRegisterIgnore(unsigned reg) {}
- virtual void emitSparcRegisterScratch(unsigned reg) {}
+ void emitSparcRegisterIgnore(unsigned reg) override {}
+ void emitSparcRegisterScratch(unsigned reg) override {}
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index 763f40c..758be41 100644
--- a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -22,7 +22,7 @@ using namespace llvm;
// Return true if Expr is in the range [MinValue, MaxValue].
static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) {
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) {
int64_t Value = CE->getValue();
return Value >= MinValue && Value <= MaxValue;
}
@@ -104,55 +104,55 @@ private:
MemOp Mem;
};
- SystemZOperand(OperandKind kind, SMLoc startLoc, SMLoc endLoc)
- : Kind(kind), StartLoc(startLoc), EndLoc(endLoc)
- {}
-
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
- if (Expr == 0)
+ if (!Expr)
Inst.addOperand(MCOperand::CreateImm(0));
- else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ else if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
else
Inst.addOperand(MCOperand::CreateExpr(Expr));
}
public:
+ SystemZOperand(OperandKind kind, SMLoc startLoc, SMLoc endLoc)
+ : Kind(kind), StartLoc(startLoc), EndLoc(endLoc) {}
+
// Create particular kinds of operand.
- static SystemZOperand *createInvalid(SMLoc StartLoc, SMLoc EndLoc) {
- return new SystemZOperand(KindInvalid, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand> createInvalid(SMLoc StartLoc,
+ SMLoc EndLoc) {
+ return make_unique<SystemZOperand>(KindInvalid, StartLoc, EndLoc);
}
- static SystemZOperand *createToken(StringRef Str, SMLoc Loc) {
- SystemZOperand *Op = new SystemZOperand(KindToken, Loc, Loc);
+ static std::unique_ptr<SystemZOperand> createToken(StringRef Str, SMLoc Loc) {
+ auto Op = make_unique<SystemZOperand>(KindToken, Loc, Loc);
Op->Token.Data = Str.data();
Op->Token.Length = Str.size();
return Op;
}
- static SystemZOperand *createReg(RegisterKind Kind, unsigned Num,
- SMLoc StartLoc, SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindReg, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createReg(RegisterKind Kind, unsigned Num, SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindReg, StartLoc, EndLoc);
Op->Reg.Kind = Kind;
Op->Reg.Num = Num;
return Op;
}
- static SystemZOperand *createAccessReg(unsigned Num, SMLoc StartLoc,
- SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindAccessReg, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createAccessReg(unsigned Num, SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindAccessReg, StartLoc, EndLoc);
Op->AccessReg = Num;
return Op;
}
- static SystemZOperand *createImm(const MCExpr *Expr, SMLoc StartLoc,
- SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindImm, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createImm(const MCExpr *Expr, SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindImm, StartLoc, EndLoc);
Op->Imm = Expr;
return Op;
}
- static SystemZOperand *createMem(RegisterKind RegKind, unsigned Base,
- const MCExpr *Disp, unsigned Index,
- const MCExpr *Length, SMLoc StartLoc,
- SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindMem, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createMem(RegisterKind RegKind, unsigned Base, const MCExpr *Disp,
+ unsigned Index, const MCExpr *Length, SMLoc StartLoc,
+ SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindMem, StartLoc, EndLoc);
Op->Mem.RegKind = RegKind;
Op->Mem.Base = Base;
Op->Mem.Index = Index;
@@ -162,7 +162,7 @@ public:
}
// Token operands
- virtual bool isToken() const LLVM_OVERRIDE {
+ bool isToken() const override {
return Kind == KindToken;
}
StringRef getToken() const {
@@ -171,13 +171,13 @@ public:
}
// Register operands.
- virtual bool isReg() const LLVM_OVERRIDE {
+ bool isReg() const override {
return Kind == KindReg;
}
bool isReg(RegisterKind RegKind) const {
return Kind == KindReg && Reg.Kind == RegKind;
}
- virtual unsigned getReg() const LLVM_OVERRIDE {
+ unsigned getReg() const override {
assert(Kind == KindReg && "Not a register");
return Reg.Num;
}
@@ -189,7 +189,7 @@ public:
}
// Immediate operands.
- virtual bool isImm() const LLVM_OVERRIDE {
+ bool isImm() const override {
return Kind == KindImm;
}
bool isImm(int64_t MinValue, int64_t MaxValue) const {
@@ -201,14 +201,14 @@ public:
}
// Memory operands.
- virtual bool isMem() const LLVM_OVERRIDE {
+ bool isMem() const override {
return Kind == KindMem;
}
bool isMem(RegisterKind RegKind, MemoryKind MemKind) const {
return (Kind == KindMem &&
Mem.RegKind == RegKind &&
(MemKind == BDXMem || !Mem.Index) &&
- (MemKind == BDLMem) == (Mem.Length != 0));
+ (MemKind == BDLMem) == (Mem.Length != nullptr));
}
bool isMemDisp12(RegisterKind RegKind, MemoryKind MemKind) const {
return isMem(RegKind, MemKind) && inRange(Mem.Disp, 0, 0xfff);
@@ -221,9 +221,9 @@ public:
}
// Override MCParsedAsmOperand.
- virtual SMLoc getStartLoc() const LLVM_OVERRIDE { return StartLoc; }
- virtual SMLoc getEndLoc() const LLVM_OVERRIDE { return EndLoc; }
- virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
+ SMLoc getStartLoc() const override { return StartLoc; }
+ SMLoc getEndLoc() const override { return EndLoc; }
+ void print(raw_ostream &OS) const override;
// Used by the TableGen code to add particular types of operand
// to an instruction.
@@ -313,25 +313,24 @@ private:
bool parseRegister(Register &Reg, RegisterGroup Group, const unsigned *Regs,
bool IsAddress = false);
- OperandMatchResultTy
- parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- RegisterGroup Group, const unsigned *Regs, RegisterKind Kind);
+ OperandMatchResultTy parseRegister(OperandVector &Operands,
+ RegisterGroup Group, const unsigned *Regs,
+ RegisterKind Kind);
bool parseAddress(unsigned &Base, const MCExpr *&Disp,
unsigned &Index, const MCExpr *&Length,
const unsigned *Regs, RegisterKind RegKind);
- OperandMatchResultTy
- parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- const unsigned *Regs, RegisterKind RegKind,
- MemoryKind MemKind);
+ OperandMatchResultTy parseAddress(OperandVector &Operands,
+ const unsigned *Regs, RegisterKind RegKind,
+ MemoryKind MemKind);
- bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic);
+ bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
public:
SystemZAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(sti), Parser(parser) {
MCAsmParserExtension::Initialize(Parser);
@@ -340,95 +339,72 @@ public:
}
// Override MCTargetAsmParser.
- virtual bool ParseDirective(AsmToken DirectiveID) LLVM_OVERRIDE;
- virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc) LLVM_OVERRIDE;
- virtual bool ParseInstruction(ParseInstructionInfo &Info,
- StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands)
- LLVM_OVERRIDE;
- virtual bool
- MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) LLVM_OVERRIDE;
+ bool ParseDirective(AsmToken DirectiveID) override;
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
// Used by the TableGen code to parse particular operand types.
- OperandMatchResultTy
- parseGR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGR32(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, GR32Reg);
}
- OperandMatchResultTy
- parseGRH32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGRH32(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GRH32Regs, GRH32Reg);
}
- OperandMatchResultTy
- parseGRX32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGRX32(OperandVector &Operands) {
llvm_unreachable("GRX32 should only be used for pseudo instructions");
}
- OperandMatchResultTy
- parseGR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGR64(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, GR64Reg);
}
- OperandMatchResultTy
- parseGR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGR128(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR128Regs, GR128Reg);
}
- OperandMatchResultTy
- parseADDR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseADDR32(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, ADDR32Reg);
}
- OperandMatchResultTy
- parseADDR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseADDR64(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, ADDR64Reg);
}
- OperandMatchResultTy
- parseADDR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseADDR128(OperandVector &Operands) {
llvm_unreachable("Shouldn't be used as an operand");
}
- OperandMatchResultTy
- parseFP32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseFP32(OperandVector &Operands) {
return parseRegister(Operands, RegFP, SystemZMC::FP32Regs, FP32Reg);
}
- OperandMatchResultTy
- parseFP64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseFP64(OperandVector &Operands) {
return parseRegister(Operands, RegFP, SystemZMC::FP64Regs, FP64Reg);
}
- OperandMatchResultTy
- parseFP128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseFP128(OperandVector &Operands) {
return parseRegister(Operands, RegFP, SystemZMC::FP128Regs, FP128Reg);
}
- OperandMatchResultTy
- parseBDAddr32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDAddr32(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR32Regs, ADDR32Reg, BDMem);
}
- OperandMatchResultTy
- parseBDAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDAddr64(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDMem);
}
- OperandMatchResultTy
- parseBDXAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDXAddr64(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDXMem);
}
- OperandMatchResultTy
- parseBDLAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDLAddr64(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDLMem);
}
- OperandMatchResultTy
- parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
- OperandMatchResultTy
- parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- int64_t MinVal, int64_t MaxVal);
- OperandMatchResultTy
- parsePCRel16(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseAccessReg(OperandVector &Operands);
+ OperandMatchResultTy parsePCRel(OperandVector &Operands, int64_t MinVal,
+ int64_t MaxVal);
+ OperandMatchResultTy parsePCRel16(OperandVector &Operands) {
return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1);
}
- OperandMatchResultTy
- parsePCRel32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parsePCRel32(OperandVector &Operands) {
return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1);
}
};
-}
+} // end anonymous namespace
#define GET_REGISTER_MATCHER
#define GET_SUBTARGET_FEATURE_NAME
@@ -498,9 +474,8 @@ bool SystemZAsmParser::parseRegister(Register &Reg, RegisterGroup Group,
// Parse a register and add it to Operands. The other arguments are as above.
SystemZAsmParser::OperandMatchResultTy
-SystemZAsmParser::parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- RegisterGroup Group, const unsigned *Regs,
- RegisterKind Kind) {
+SystemZAsmParser::parseRegister(OperandVector &Operands, RegisterGroup Group,
+ const unsigned *Regs, RegisterKind Kind) {
if (Parser.getTok().isNot(AsmToken::Percent))
return MatchOperand_NoMatch;
@@ -528,7 +503,7 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp,
// Parse the optional base and index.
Index = 0;
Base = 0;
- Length = 0;
+ Length = nullptr;
if (getLexer().is(AsmToken::LParen)) {
Parser.Lex();
@@ -567,9 +542,8 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp,
// Parse a memory operand and add it to Operands. The other arguments
// are as above.
SystemZAsmParser::OperandMatchResultTy
-SystemZAsmParser::parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- const unsigned *Regs, RegisterKind RegKind,
- MemoryKind MemKind) {
+SystemZAsmParser::parseAddress(OperandVector &Operands, const unsigned *Regs,
+ RegisterKind RegKind, MemoryKind MemKind) {
SMLoc StartLoc = Parser.getTok().getLoc();
unsigned Base, Index;
const MCExpr *Disp;
@@ -623,9 +597,9 @@ bool SystemZAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
return false;
}
-bool SystemZAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool SystemZAsmParser::ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
Operands.push_back(SystemZOperand::createToken(Name, NameLoc));
// Read the remaining operands.
@@ -656,9 +630,8 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
return false;
}
-bool SystemZAsmParser::
-parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic) {
+bool SystemZAsmParser::parseOperand(OperandVector &Operands,
+ StringRef Mnemonic) {
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
@@ -701,11 +674,11 @@ parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return false;
}
-bool SystemZAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
unsigned MatchResult;
@@ -715,7 +688,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
default: break;
case Match_Success:
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ Out.EmitInstruction(Inst, STI);
return false;
case Match_MissingFeature: {
@@ -740,7 +713,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((SystemZOperand*)Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((SystemZOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
@@ -754,13 +727,13 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
llvm_unreachable("Unexpected match type");
}
-SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
-parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parseAccessReg(OperandVector &Operands) {
if (Parser.getTok().isNot(AsmToken::Percent))
return MatchOperand_NoMatch;
Register Reg;
- if (parseRegister(Reg, RegAccess, 0))
+ if (parseRegister(Reg, RegAccess, nullptr))
return MatchOperand_ParseFail;
Operands.push_back(SystemZOperand::createAccessReg(Reg.Num,
@@ -769,9 +742,9 @@ parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
-parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- int64_t MinVal, int64_t MaxVal) {
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parsePCRel(OperandVector &Operands, int64_t MinVal,
+ int64_t MaxVal) {
MCContext &Ctx = getContext();
MCStreamer &Out = getStreamer();
const MCExpr *Expr;
@@ -781,7 +754,7 @@ parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
// For consistency with the GNU assembler, treat immediates as offsets
// from ".".
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) {
int64_t Value = CE->getValue();
if ((Value & 1) || Value < MinVal || Value > MaxVal) {
Error(StartLoc, "offset out of range");
diff --git a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
index fc3c38d..2350776 100644
--- a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
@@ -17,28 +17,29 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-disassembler"
+
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
class SystemZDisassembler : public MCDisassembler {
public:
- SystemZDisassembler(const MCSubtargetInfo &STI)
- : MCDisassembler(STI) {}
+ SystemZDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
virtual ~SystemZDisassembler() {}
// Override MCDisassembler.
- virtual DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const LLVM_OVERRIDE;
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ const MemoryObject &region, uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
};
} // end anonymous namespace
static MCDisassembler *createSystemZDisassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new SystemZDisassembler(STI);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new SystemZDisassembler(STI, Ctx);
}
extern "C" void LLVMInitializeSystemZDisassembler() {
diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
index e1e64d3..d2ba9b6 100644
--- a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
-
#include "SystemZInstPrinter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -16,6 +14,8 @@
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#include "SystemZGenAsmWriter.inc"
void SystemZInstPrinter::printAddress(unsigned Base, int64_t Disp,
diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
index 734ecf0..dce482b 100644
--- a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
+++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
@@ -38,10 +38,8 @@ public:
static void printOperand(const MCOperand &MO, raw_ostream &O);
// Override MCInstPrinter.
- virtual void printRegName(raw_ostream &O, unsigned RegNo) const
- LLVM_OVERRIDE;
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot)
- LLVM_OVERRIDE;
+ void printRegName(raw_ostream &O, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
private:
// Print various types of operand.
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index 26a8fae..6e7268d 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -43,37 +43,27 @@ public:
: OSABI(osABI) {}
// Override MCAsmBackend
- virtual unsigned getNumFixupKinds() const LLVM_OVERRIDE {
+ unsigned getNumFixupKinds() const override {
return SystemZ::NumTargetFixupKinds;
}
- virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const
- LLVM_OVERRIDE;
- virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const LLVM_OVERRIDE;
- virtual bool mayNeedRelaxation(const MCInst &Inst) const LLVM_OVERRIDE {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+ bool mayNeedRelaxation(const MCInst &Inst) const override {
return false;
}
- virtual bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *Fragment,
- const MCAsmLayout &Layout) const
- LLVM_OVERRIDE {
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *Fragment,
+ const MCAsmLayout &Layout) const override {
return false;
}
- virtual void relaxInstruction(const MCInst &Inst,
- MCInst &Res) const LLVM_OVERRIDE {
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
llvm_unreachable("SystemZ does do not have assembler relaxation");
}
- virtual bool writeNopData(uint64_t Count,
- MCObjectWriter *OW) const LLVM_OVERRIDE;
- virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const
- LLVM_OVERRIDE {
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createSystemZObjectWriter(OS, OSABI);
}
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const
- LLVM_OVERRIDE {
- return false;
- }
};
} // end anonymous namespace
@@ -95,7 +85,8 @@ SystemZMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
}
void SystemZMCAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
- unsigned DataSize, uint64_t Value) const {
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
MCFixupKind Kind = Fixup.getKind();
unsigned Offset = Fixup.getOffset();
unsigned Size = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index 965c41e..c46a36b 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -19,8 +19,6 @@ SystemZMCAsmInfo::SystemZMCAsmInfo(StringRef TT) {
IsLittleEndian = false;
CommentString = "#";
- GlobalPrefix = "";
- PrivateGlobalPrefix = ".L";
ZeroDirective = "\t.space\t";
Data64bitsDirective = "\t.quad\t";
UsesELFSectionDirectiveForBSS = true;
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
index b9ac92a..1de97af 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
@@ -21,10 +21,9 @@ public:
explicit SystemZMCAsmInfo(StringRef TT);
// Override MCAsmInfo;
- virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const
- LLVM_OVERRIDE;
+ const MCSection *getNonexecutableStackSection(MCContext &Ctx) const override;
};
-} // namespace llvm
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
index f07ea7b..27b4bd8 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/SystemZMCTargetDesc.h"
#include "MCTargetDesc/SystemZMCFixups.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -21,6 +20,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
namespace {
class SystemZMCCodeEmitter : public MCCodeEmitter {
const MCInstrInfo &MCII;
@@ -34,34 +35,41 @@ public:
~SystemZMCCodeEmitter() {}
// OVerride MCCodeEmitter.
- virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const
- LLVM_OVERRIDE;
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
private:
// Automatically generated by TableGen.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// Called by the TableGen code to get the binary encoding of operand
// MO in MI. Fixups is the list of fixups against MI.
uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// Called by the TableGen code to get the binary encoding of an address.
// The index or length, if any, is encoded first, followed by the base,
// followed by the displacement. In a 20-bit displacement,
// the low 12 bits are encoded before the high 8 bits.
uint64_t getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// Operand OpNum of MI needs a PC-relative fixup of kind Kind at
// Offset bytes from the start of MI. Add the fixup to Fixups
@@ -72,15 +80,17 @@ private:
unsigned Kind, int64_t Offset) const;
uint64_t getPC16DBLEncoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC16DBL, 2);
}
uint64_t getPC32DBLEncoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC32DBL, 2);
}
};
-}
+} // end anonymous namespace
MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
@@ -91,8 +101,9 @@ MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
void SystemZMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Bits = getBinaryCodeForInstr(MI, Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
unsigned Size = MCII.get(MI.getOpcode()).getSize();
// Big-endian insertion of Size bytes.
unsigned ShiftValue = (Size * 8) - 8;
@@ -104,7 +115,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
uint64_t SystemZMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg())
return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
if (MO.isImm())
@@ -114,38 +126,42 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
uint64_t SystemZMCCodeEmitter::
getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
assert(isUInt<4>(Base) && isUInt<12>(Disp));
return (Base << 12) | Disp;
}
uint64_t SystemZMCCodeEmitter::
getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
assert(isUInt<4>(Base) && isInt<20>(Disp));
return (Base << 20) | ((Disp & 0xfff) << 8) | ((Disp & 0xff000) >> 12);
}
uint64_t SystemZMCCodeEmitter::
getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
- uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI);
assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<4>(Index));
return (Index << 16) | (Base << 12) | Disp;
}
uint64_t SystemZMCCodeEmitter::
getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
- uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI);
assert(isUInt<4>(Base) && isInt<20>(Disp) && isUInt<4>(Index));
return (Index << 24) | (Base << 20) | ((Disp & 0xfff) << 8)
| ((Disp & 0xff000) >> 12);
@@ -153,10 +169,11 @@ getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
uint64_t SystemZMCCodeEmitter::
getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
- uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups) - 1;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
+ uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI) - 1;
assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<8>(Len));
return (Len << 16) | (Base << 12) | Disp;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
index 9c94ebb..a3aab71 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
@@ -14,18 +14,18 @@
namespace llvm {
namespace SystemZ {
- enum FixupKind {
- // These correspond directly to R_390_* relocations.
- FK_390_PC16DBL = FirstTargetFixupKind,
- FK_390_PC32DBL,
- FK_390_PLT16DBL,
- FK_390_PLT32DBL,
+enum FixupKind {
+ // These correspond directly to R_390_* relocations.
+ FK_390_PC16DBL = FirstTargetFixupKind,
+ FK_390_PC32DBL,
+ FK_390_PLT16DBL,
+ FK_390_PLT32DBL,
- // Marker
- LastTargetFixupKind,
- NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
- };
-}
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+} // end namespace SystemZ
} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
index 36e3d83..c6a1816 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
@@ -24,16 +24,10 @@ public:
protected:
// Override MCELFObjectTargetWriter.
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const LLVM_OVERRIDE;
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const LLVM_OVERRIDE;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
};
-} // end anonymouse namespace
+} // end anonymous namespace
SystemZObjectWriter::SystemZObjectWriter(uint8_t OSABI)
: MCELFObjectTargetWriter(/*Is64Bit=*/true, OSABI, ELF::EM_S390,
@@ -87,12 +81,8 @@ static unsigned getPLTReloc(unsigned Kind) {
unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
- MCSymbolRefExpr::VariantKind Modifier = (Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None :
- Target.getSymA()->getKind());
+ bool IsPCRel) const {
+ MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
unsigned Kind = Fixup.getKind();
switch (Modifier) {
case MCSymbolRefExpr::VK_None:
@@ -118,21 +108,6 @@ unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
}
}
-const MCSymbol *SystemZObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- // The addend in a PC-relative R_390_* relocation is always applied to
- // the PC-relative part of the address. If some kind of indirection
- // is applied to the symbol first, we can't use an addend there too.
- if (!Target.isAbsolute() &&
- Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None &&
- IsPCRel)
- return &Target.getSymA()->getSymbol().AliasedSymbol();
- return NULL;
-}
-
MCObjectWriter *llvm::createSystemZObjectWriter(raw_ostream &OS,
uint8_t OSABI) {
MCELFObjectTargetWriter *MOTW = new SystemZObjectWriter(OSABI);
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
index 9e1296b..cc94869 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -16,6 +16,8 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "SystemZGenInstrInfo.inc"
@@ -25,8 +27,6 @@
#define GET_REGINFO_MC_DESC
#include "SystemZGenRegisterInfo.inc"
-using namespace llvm;
-
const unsigned SystemZMC::GR32Regs[16] = {
SystemZ::R0L, SystemZ::R1L, SystemZ::R2L, SystemZ::R3L,
SystemZ::R4L, SystemZ::R5L, SystemZ::R6L, SystemZ::R7L,
@@ -98,7 +98,8 @@ static MCAsmInfo *createSystemZMCAsmInfo(const MCRegisterInfo &MRI,
StringRef TT) {
MCAsmInfo *MAI = new SystemZMCAsmInfo(TT);
MCCFIInstruction Inst =
- MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(SystemZ::R15D, true),
+ MCCFIInstruction::createDefCfa(nullptr,
+ MRI.getDwarfRegNum(SystemZ::R15D, true),
SystemZMC::CFAOffsetFromInitialSP);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -185,9 +186,10 @@ static MCStreamer *createSystemZMCObjectStreamer(const Target &T, StringRef TT,
MCAsmBackend &MAB,
raw_ostream &OS,
MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll,
bool NoExecStack) {
- return createELFStreamer(Ctx, 0, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ return createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
}
extern "C" void LLVMInitializeSystemZTargetMC() {
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
index 97e325b..cbaf9a8 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
@@ -28,47 +28,47 @@ class raw_ostream;
extern Target TheSystemZTarget;
namespace SystemZMC {
- // How many bytes are in the ABI-defined, caller-allocated part of
- // a stack frame.
- const int64_t CallFrameSize = 160;
-
- // The offset of the DWARF CFA from the incoming stack pointer.
- const int64_t CFAOffsetFromInitialSP = CallFrameSize;
-
- // Maps of asm register numbers to LLVM register numbers, with 0 indicating
- // an invalid register. In principle we could use 32-bit and 64-bit register
- // classes directly, provided that we relegated the GPR allocation order
- // in SystemZRegisterInfo.td to an AltOrder and left the default order
- // as %r0-%r15. It seems better to provide the same interface for
- // all classes though.
- extern const unsigned GR32Regs[16];
- extern const unsigned GRH32Regs[16];
- extern const unsigned GR64Regs[16];
- extern const unsigned GR128Regs[16];
- extern const unsigned FP32Regs[16];
- extern const unsigned FP64Regs[16];
- extern const unsigned FP128Regs[16];
-
- // Return the 0-based number of the first architectural register that
- // contains the given LLVM register. E.g. R1D -> 1.
- unsigned getFirstReg(unsigned Reg);
-
- // Return the given register as a GR64.
- inline unsigned getRegAsGR64(unsigned Reg) {
- return GR64Regs[getFirstReg(Reg)];
- }
-
- // Return the given register as a low GR32.
- inline unsigned getRegAsGR32(unsigned Reg) {
- return GR32Regs[getFirstReg(Reg)];
- }
-
- // Return the given register as a high GR32.
- inline unsigned getRegAsGRH32(unsigned Reg) {
- return GRH32Regs[getFirstReg(Reg)];
- }
+// How many bytes are in the ABI-defined, caller-allocated part of
+// a stack frame.
+const int64_t CallFrameSize = 160;
+
+// The offset of the DWARF CFA from the incoming stack pointer.
+const int64_t CFAOffsetFromInitialSP = CallFrameSize;
+
+// Maps of asm register numbers to LLVM register numbers, with 0 indicating
+// an invalid register. In principle we could use 32-bit and 64-bit register
+// classes directly, provided that we relegated the GPR allocation order
+// in SystemZRegisterInfo.td to an AltOrder and left the default order
+// as %r0-%r15. It seems better to provide the same interface for
+// all classes though.
+extern const unsigned GR32Regs[16];
+extern const unsigned GRH32Regs[16];
+extern const unsigned GR64Regs[16];
+extern const unsigned GR128Regs[16];
+extern const unsigned FP32Regs[16];
+extern const unsigned FP64Regs[16];
+extern const unsigned FP128Regs[16];
+
+// Return the 0-based number of the first architectural register that
+// contains the given LLVM register. E.g. R1D -> 1.
+unsigned getFirstReg(unsigned Reg);
+
+// Return the given register as a GR64.
+inline unsigned getRegAsGR64(unsigned Reg) {
+ return GR64Regs[getFirstReg(Reg)];
}
+// Return the given register as a low GR32.
+inline unsigned getRegAsGR32(unsigned Reg) {
+ return GR32Regs[getFirstReg(Reg)];
+}
+
+// Return the given register as a high GR32.
+inline unsigned getRegAsGRH32(unsigned Reg) {
+ return GRH32Regs[getFirstReg(Reg)];
+}
+} // end namespace SystemZMC
+
MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
diff --git a/contrib/llvm/lib/Target/SystemZ/README.txt b/contrib/llvm/lib/Target/SystemZ/README.txt
index afa6cf0..e089047 100644
--- a/contrib/llvm/lib/Target/SystemZ/README.txt
+++ b/contrib/llvm/lib/Target/SystemZ/README.txt
@@ -166,13 +166,6 @@ See CodeGen/SystemZ/alloca-01.ll for an example.
--
-Atomic loads and stores use the default compare-and-swap based implementation.
-This is much too conservative in practice, since the architecture guarantees
-that 1-, 2-, 4- and 8-byte loads and stores to aligned addresses are
-inherently atomic.
-
---
-
If needed, we can support 16-byte atomics using LPQ, STPQ and CSDG.
--
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.h b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
index dcebbad..15792494 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZ.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
@@ -19,97 +19,98 @@
#include "llvm/Support/CodeGen.h"
namespace llvm {
- class SystemZTargetMachine;
- class FunctionPass;
-
- namespace SystemZ {
- // Condition-code mask values.
- const unsigned CCMASK_0 = 1 << 3;
- const unsigned CCMASK_1 = 1 << 2;
- const unsigned CCMASK_2 = 1 << 1;
- const unsigned CCMASK_3 = 1 << 0;
- const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
-
- // Condition-code mask assignments for integer and floating-point
- // comparisons.
- const unsigned CCMASK_CMP_EQ = CCMASK_0;
- const unsigned CCMASK_CMP_LT = CCMASK_1;
- const unsigned CCMASK_CMP_GT = CCMASK_2;
- const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT;
- const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT;
- const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT;
-
- // Condition-code mask assignments for floating-point comparisons only.
- const unsigned CCMASK_CMP_UO = CCMASK_3;
- const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO;
-
- // All condition-code values produced by comparisons.
- const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
- const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
-
- // Condition-code mask assignments for CS.
- const unsigned CCMASK_CS_EQ = CCMASK_0;
- const unsigned CCMASK_CS_NE = CCMASK_1;
- const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1;
-
- // Condition-code mask assignments for a completed SRST loop.
- const unsigned CCMASK_SRST_FOUND = CCMASK_1;
- const unsigned CCMASK_SRST_NOTFOUND = CCMASK_2;
- const unsigned CCMASK_SRST = CCMASK_1 | CCMASK_2;
-
- // Condition-code mask assignments for TEST UNDER MASK.
- const unsigned CCMASK_TM_ALL_0 = CCMASK_0;
- const unsigned CCMASK_TM_MIXED_MSB_0 = CCMASK_1;
- const unsigned CCMASK_TM_MIXED_MSB_1 = CCMASK_2;
- const unsigned CCMASK_TM_ALL_1 = CCMASK_3;
- const unsigned CCMASK_TM_SOME_0 = CCMASK_TM_ALL_1 ^ CCMASK_ANY;
- const unsigned CCMASK_TM_SOME_1 = CCMASK_TM_ALL_0 ^ CCMASK_ANY;
- const unsigned CCMASK_TM_MSB_0 = CCMASK_0 | CCMASK_1;
- const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3;
- const unsigned CCMASK_TM = CCMASK_ANY;
-
- // The position of the low CC bit in an IPM result.
- const unsigned IPM_CC = 28;
-
- // Mask assignments for PFD.
- const unsigned PFD_READ = 1;
- const unsigned PFD_WRITE = 2;
-
- // Return true if Val fits an LLILL operand.
- static inline bool isImmLL(uint64_t Val) {
- return (Val & ~0x000000000000ffffULL) == 0;
- }
-
- // Return true if Val fits an LLILH operand.
- static inline bool isImmLH(uint64_t Val) {
- return (Val & ~0x00000000ffff0000ULL) == 0;
- }
-
- // Return true if Val fits an LLIHL operand.
- static inline bool isImmHL(uint64_t Val) {
- return (Val & ~0x00000ffff00000000ULL) == 0;
- }
-
- // Return true if Val fits an LLIHH operand.
- static inline bool isImmHH(uint64_t Val) {
- return (Val & ~0xffff000000000000ULL) == 0;
- }
-
- // Return true if Val fits an LLILF operand.
- static inline bool isImmLF(uint64_t Val) {
- return (Val & ~0x00000000ffffffffULL) == 0;
- }
-
- // Return true if Val fits an LLIHF operand.
- static inline bool isImmHF(uint64_t Val) {
- return (Val & ~0xffffffff00000000ULL) == 0;
- }
- }
-
- FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
- FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
- FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
- FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
-} // end namespace llvm;
+class SystemZTargetMachine;
+class FunctionPass;
+
+namespace SystemZ {
+// Condition-code mask values.
+const unsigned CCMASK_0 = 1 << 3;
+const unsigned CCMASK_1 = 1 << 2;
+const unsigned CCMASK_2 = 1 << 1;
+const unsigned CCMASK_3 = 1 << 0;
+const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+// Condition-code mask assignments for integer and floating-point
+// comparisons.
+const unsigned CCMASK_CMP_EQ = CCMASK_0;
+const unsigned CCMASK_CMP_LT = CCMASK_1;
+const unsigned CCMASK_CMP_GT = CCMASK_2;
+const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT;
+const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT;
+const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT;
+
+// Condition-code mask assignments for floating-point comparisons only.
+const unsigned CCMASK_CMP_UO = CCMASK_3;
+const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO;
+
+// All condition-code values produced by comparisons.
+const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
+const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+// Condition-code mask assignments for CS.
+const unsigned CCMASK_CS_EQ = CCMASK_0;
+const unsigned CCMASK_CS_NE = CCMASK_1;
+const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1;
+
+// Condition-code mask assignments for a completed SRST loop.
+const unsigned CCMASK_SRST_FOUND = CCMASK_1;
+const unsigned CCMASK_SRST_NOTFOUND = CCMASK_2;
+const unsigned CCMASK_SRST = CCMASK_1 | CCMASK_2;
+
+// Condition-code mask assignments for TEST UNDER MASK.
+const unsigned CCMASK_TM_ALL_0 = CCMASK_0;
+const unsigned CCMASK_TM_MIXED_MSB_0 = CCMASK_1;
+const unsigned CCMASK_TM_MIXED_MSB_1 = CCMASK_2;
+const unsigned CCMASK_TM_ALL_1 = CCMASK_3;
+const unsigned CCMASK_TM_SOME_0 = CCMASK_TM_ALL_1 ^ CCMASK_ANY;
+const unsigned CCMASK_TM_SOME_1 = CCMASK_TM_ALL_0 ^ CCMASK_ANY;
+const unsigned CCMASK_TM_MSB_0 = CCMASK_0 | CCMASK_1;
+const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3;
+const unsigned CCMASK_TM = CCMASK_ANY;
+
+// The position of the low CC bit in an IPM result.
+const unsigned IPM_CC = 28;
+
+// Mask assignments for PFD.
+const unsigned PFD_READ = 1;
+const unsigned PFD_WRITE = 2;
+
+// Return true if Val fits an LLILL operand.
+static inline bool isImmLL(uint64_t Val) {
+ return (Val & ~0x000000000000ffffULL) == 0;
+}
+
+// Return true if Val fits an LLILH operand.
+static inline bool isImmLH(uint64_t Val) {
+ return (Val & ~0x00000000ffff0000ULL) == 0;
+}
+
+// Return true if Val fits an LLIHL operand.
+static inline bool isImmHL(uint64_t Val) {
+ return (Val & ~0x00000ffff00000000ULL) == 0;
+}
+
+// Return true if Val fits an LLIHH operand.
+static inline bool isImmHH(uint64_t Val) {
+ return (Val & ~0xffff000000000000ULL) == 0;
+}
+
+// Return true if Val fits an LLILF operand.
+static inline bool isImmLF(uint64_t Val) {
+ return (Val & ~0x00000000ffffffffULL) == 0;
+}
+
+// Return true if Val fits an LLIHF operand.
+static inline bool isImmHF(uint64_t Val) {
+ return (Val & ~0xffffffff00000000ULL) == 0;
+}
+} // end namespace SystemZ
+
+FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
+FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
+FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
+} // end namespace llvm
+
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.td b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
index abf5c8e..5f82903 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZ.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
@@ -53,20 +53,10 @@ def SystemZAsmParser : AsmParser {
}
//===----------------------------------------------------------------------===//
-// Assembly writer
-//===----------------------------------------------------------------------===//
-
-def SystemZAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
-//===----------------------------------------------------------------------===//
// Top-level target declaration
//===----------------------------------------------------------------------===//
def SystemZ : Target {
let InstructionSet = SystemZInstrInfo;
let AssemblyParsers = [SystemZAsmParser];
- let AssemblyWriters = [SystemZAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index 75cbda4..8b18bc1 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -18,11 +18,11 @@
#include "SystemZMCInstLower.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
@@ -151,11 +151,20 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
#undef LOWER_HIGH
+ case SystemZ::Serialize:
+ if (Subtarget->hasFastSerialization())
+ LoweredMI = MCInstBuilder(SystemZ::AsmBCR)
+ .addImm(14).addReg(SystemZ::R0D);
+ else
+ LoweredMI = MCInstBuilder(SystemZ::AsmBCR)
+ .addImm(15).addReg(SystemZ::R0D);
+ break;
+
default:
Lower.lower(MI, LoweredMI);
break;
}
- OutStreamer.EmitInstruction(LoweredMI);
+ EmitToStreamer(OutStreamer, LoweredMI);
}
// Convert a SystemZ-specific constant pool modifier into the associated
@@ -170,8 +179,7 @@ getModifierVariantKind(SystemZCP::SystemZCPModifier Modifier) {
void SystemZAsmPrinter::
EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- SystemZConstantPoolValue *ZCPV =
- static_cast<SystemZConstantPoolValue*>(MCPV);
+ auto *ZCPV = static_cast<SystemZConstantPoolValue*>(MCPV);
const MCExpr *Expr =
MCSymbolRefExpr::Create(getSymbol(ZCPV->getGlobalValue()),
@@ -212,7 +220,7 @@ bool SystemZAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) {
if (Subtarget->isTargetELF()) {
- const TargetLoweringObjectFileELF &TLOFELF =
+ auto &TLOFELF =
static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
index 4b6c51b..20093bc 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
@@ -32,20 +32,18 @@ public:
}
// Override AsmPrinter.
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "SystemZ Assembly Printer";
}
- virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV)
- LLVM_OVERRIDE;
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS) LLVM_OVERRIDE;
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode,
- raw_ostream &OS) LLVM_OVERRIDE;
- virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
+ void EmitInstruction(const MachineInstr *MI) override;
+ void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override;
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+ void EmitEndOfAsmFile(Module &M) override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
index 298985e..4b1569d 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
@@ -11,13 +11,13 @@
#define SYSTEMZCALLINGCONV_H
namespace llvm {
- namespace SystemZ {
- const unsigned NumArgGPRs = 5;
- extern const unsigned ArgGPRs[NumArgGPRs];
+namespace SystemZ {
+ const unsigned NumArgGPRs = 5;
+ extern const unsigned ArgGPRs[NumArgGPRs];
- const unsigned NumArgFPRs = 4;
- extern const unsigned ArgFPRs[NumArgFPRs];
- }
-}
+ const unsigned NumArgFPRs = 4;
+ extern const unsigned ArgFPRs[NumArgFPRs];
+} // end namespace SystemZ
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
index c4f641e..fb0d1d8 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
@@ -13,7 +13,7 @@ class CCIfExtend<CCAction A>
: CCIf<"ArgFlags.isSExt() || ArgFlags.isZExt()", A>;
//===----------------------------------------------------------------------===//
-// SVR4 return value calling convention
+// z/Linux return value calling convention
//===----------------------------------------------------------------------===//
def RetCC_SystemZ : CallingConv<[
// Promote i32 to i64 if it has an explicit extension type.
@@ -39,7 +39,7 @@ def RetCC_SystemZ : CallingConv<[
]>;
//===----------------------------------------------------------------------===//
-// SVR4 argument calling conventions
+// z/Linux argument calling conventions
//===----------------------------------------------------------------------===//
def CC_SystemZ : CallingConv<[
// Promote i32 to i64 if it has an explicit extension type.
@@ -63,3 +63,9 @@ def CC_SystemZ : CallingConv<[
// Other arguments are passed in 8-byte-aligned 8-byte stack slots.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
]>;
+
+//===----------------------------------------------------------------------===//
+// z/Linux callee-saved registers
+//===----------------------------------------------------------------------===//
+def CSR_SystemZ : CalleeSavedRegs<(add (sequence "R%dD", 6, 15),
+ (sequence "F%dD", 8, 15))>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
index 6c70811..19cec21 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
@@ -43,7 +43,7 @@ getExistingMachineCPValue(MachineConstantPool *CP, unsigned Alignment) {
for (unsigned I = 0, E = Constants.size(); I != E; ++I) {
if (Constants[I].isMachineConstantPoolEntry() &&
(Constants[I].getAlignment() & AlignMask) == 0) {
- SystemZConstantPoolValue *ZCPV =
+ auto *ZCPV =
static_cast<SystemZConstantPoolValue *>(Constants[I].Val.MachineCPVal);
if (ZCPV->GV == GV && ZCPV->Modifier == Modifier)
return I;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
index 9927bdb..699718f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
@@ -18,10 +18,10 @@ namespace llvm {
class GlobalValue;
namespace SystemZCP {
- enum SystemZCPModifier {
- NTPOFF
- };
-}
+enum SystemZCPModifier {
+ NTPOFF
+};
+} // end namespace SystemZCP
/// A SystemZ-specific constant pool value. At present, the only
/// defined constant pool values are offsets of thread-local variables
@@ -39,17 +39,17 @@ public:
Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier);
// Override MachineConstantPoolValue.
- virtual unsigned getRelocationInfo() const LLVM_OVERRIDE;
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment) LLVM_OVERRIDE;
- virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) LLVM_OVERRIDE;
- virtual void print(raw_ostream &O) const LLVM_OVERRIDE;
+ unsigned getRelocationInfo() const override;
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) override;
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+ void print(raw_ostream &O) const override;
// Access SystemZ-specific fields.
const GlobalValue *getGlobalValue() const { return GV; }
SystemZCP::SystemZCPModifier getModifier() const { return Modifier; }
};
-} // End llvm namespace
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index b8a77db..dc210d60 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -13,8 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-elim-compare"
-
#include "SystemZTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -28,78 +26,79 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-elim-compare"
+
STATISTIC(BranchOnCounts, "Number of branch-on-count instructions");
STATISTIC(EliminatedComparisons, "Number of eliminated comparisons");
STATISTIC(FusedComparisons, "Number of fused compare-and-branch instructions");
namespace {
- // Represents the references to a particular register in one or more
- // instructions.
- struct Reference {
- Reference()
- : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {}
-
- Reference &operator|=(const Reference &Other) {
- Def |= Other.Def;
- IndirectDef |= Other.IndirectDef;
- Use |= Other.Use;
- IndirectUse |= Other.IndirectUse;
- return *this;
- }
+// Represents the references to a particular register in one or more
+// instructions.
+struct Reference {
+ Reference()
+ : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {}
+
+ Reference &operator|=(const Reference &Other) {
+ Def |= Other.Def;
+ IndirectDef |= Other.IndirectDef;
+ Use |= Other.Use;
+ IndirectUse |= Other.IndirectUse;
+ return *this;
+ }
- operator bool() const { return Def || Use; }
+ operator bool() const { return Def || Use; }
- // True if the register is defined or used in some form, either directly or
- // via a sub- or super-register.
- bool Def;
- bool Use;
+ // True if the register is defined or used in some form, either directly or
+ // via a sub- or super-register.
+ bool Def;
+ bool Use;
- // True if the register is defined or used indirectly, by a sub- or
- // super-register.
- bool IndirectDef;
- bool IndirectUse;
- };
+ // True if the register is defined or used indirectly, by a sub- or
+ // super-register.
+ bool IndirectDef;
+ bool IndirectUse;
+};
- class SystemZElimCompare : public MachineFunctionPass {
- public:
- static char ID;
- SystemZElimCompare(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(0), TRI(0) {}
+class SystemZElimCompare : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZElimCompare(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) {}
- virtual const char *getPassName() const {
- return "SystemZ Comparison Elimination";
- }
+ const char *getPassName() const override {
+ return "SystemZ Comparison Elimination";
+ }
+
+ bool processBlock(MachineBasicBlock &MBB);
+ bool runOnMachineFunction(MachineFunction &F) override;
- bool processBlock(MachineBasicBlock *MBB);
- bool runOnMachineFunction(MachineFunction &F);
-
- private:
- Reference getRegReferences(MachineInstr *MI, unsigned Reg);
- bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
- bool convertToLoadAndTest(MachineInstr *MI);
- bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
- bool optimizeCompareZero(MachineInstr *Compare,
+private:
+ Reference getRegReferences(MachineInstr *MI, unsigned Reg);
+ bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool convertToLoadAndTest(MachineInstr *MI);
+ bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
SmallVectorImpl<MachineInstr *> &CCUsers);
- bool fuseCompareAndBranch(MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool optimizeCompareZero(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool fuseCompareAndBranch(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
- const SystemZInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- };
+ const SystemZInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+};
- char SystemZElimCompare::ID = 0;
-} // end of anonymous namespace
+char SystemZElimCompare::ID = 0;
+} // end anonymous namespace
FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) {
return new SystemZElimCompare(TM);
}
// Return true if CC is live out of MBB.
-static bool isCCLiveOut(MachineBasicBlock *MBB) {
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI)
+static bool isCCLiveOut(MachineBasicBlock &MBB) {
+ for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
if ((*SI)->isLiveIn(SystemZ::CC))
return true;
return false;
@@ -328,8 +327,8 @@ optimizeCompareZero(MachineInstr *Compare,
// Search back for CC results that are based on the first operand.
unsigned SrcReg = Compare->getOperand(0).getReg();
unsigned SrcSubReg = Compare->getOperand(0).getSubReg();
- MachineBasicBlock *MBB = Compare->getParent();
- MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB->begin();
+ MachineBasicBlock &MBB = *Compare->getParent();
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB.begin();
Reference CCRefs;
Reference SrcRefs;
while (MBBI != MBBE) {
@@ -424,7 +423,7 @@ fuseCompareAndBranch(MachineInstr *Compare,
// Process all comparison instructions in MBB. Return true if something
// changed.
-bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) {
+bool SystemZElimCompare::processBlock(MachineBasicBlock &MBB) {
bool Changed = false;
// Walk backwards through the block looking for comparisons, recording
@@ -432,8 +431,8 @@ bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) {
// instructions before it.
bool CompleteCCUsers = !isCCLiveOut(MBB);
SmallVector<MachineInstr *, 4> CCUsers;
- MachineBasicBlock::iterator MBBI = MBB->end();
- while (MBBI != MBB->begin()) {
+ MachineBasicBlock::iterator MBBI = MBB.end();
+ while (MBBI != MBB.begin()) {
MachineInstr *MI = --MBBI;
if (CompleteCCUsers &&
MI->isCompare() &&
@@ -463,9 +462,8 @@ bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
TRI = &TII->getRegisterInfo();
bool Changed = false;
- for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
- MFI != MFE; ++MFI)
- Changed |= processBlock(MFI);
+ for (auto &MBB : F)
+ Changed |= processBlock(MBB);
return Changed;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index acfb491..055dbe9 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -10,8 +10,9 @@
#include "SystemZFrameLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZInstrBuilder.h"
+#include "SystemZInstrInfo.h"
#include "SystemZMachineFunctionInfo.h"
-#include "SystemZTargetMachine.h"
+#include "SystemZRegisterInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
@@ -20,35 +21,33 @@
using namespace llvm;
namespace {
- // The ABI-defined register save slots, relative to the incoming stack
- // pointer.
- static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
- { SystemZ::R2D, 0x10 },
- { SystemZ::R3D, 0x18 },
- { SystemZ::R4D, 0x20 },
- { SystemZ::R5D, 0x28 },
- { SystemZ::R6D, 0x30 },
- { SystemZ::R7D, 0x38 },
- { SystemZ::R8D, 0x40 },
- { SystemZ::R9D, 0x48 },
- { SystemZ::R10D, 0x50 },
- { SystemZ::R11D, 0x58 },
- { SystemZ::R12D, 0x60 },
- { SystemZ::R13D, 0x68 },
- { SystemZ::R14D, 0x70 },
- { SystemZ::R15D, 0x78 },
- { SystemZ::F0D, 0x80 },
- { SystemZ::F2D, 0x88 },
- { SystemZ::F4D, 0x90 },
- { SystemZ::F6D, 0x98 }
- };
-}
-
-SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm,
- const SystemZSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
- -SystemZMC::CallFrameSize, 8),
- TM(tm), STI(sti) {
+// The ABI-defined register save slots, relative to the incoming stack
+// pointer.
+static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
+ { SystemZ::R2D, 0x10 },
+ { SystemZ::R3D, 0x18 },
+ { SystemZ::R4D, 0x20 },
+ { SystemZ::R5D, 0x28 },
+ { SystemZ::R6D, 0x30 },
+ { SystemZ::R7D, 0x38 },
+ { SystemZ::R8D, 0x40 },
+ { SystemZ::R9D, 0x48 },
+ { SystemZ::R10D, 0x50 },
+ { SystemZ::R11D, 0x58 },
+ { SystemZ::R12D, 0x60 },
+ { SystemZ::R13D, 0x68 },
+ { SystemZ::R14D, 0x70 },
+ { SystemZ::R15D, 0x78 },
+ { SystemZ::F0D, 0x80 },
+ { SystemZ::F2D, 0x88 },
+ { SystemZ::F4D, 0x90 },
+ { SystemZ::F6D, 0x98 }
+};
+} // end anonymous namespace
+
+SystemZFrameLowering::SystemZFrameLowering()
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
+ -SystemZMC::CallFrameSize, 8) {
// Create a mapping from register number to save slot offset.
RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
for (unsigned I = 0, E = array_lengthof(SpillOffsetTable); I != E; ++I)
@@ -93,7 +92,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// save and restore the stack pointer at the same time, via STMG and LMG.
// This allows the deallocation to be done by the LMG, rather than needing
// a separate %r15 addition.
- const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF);
+ const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
for (unsigned I = 0; CSRegs[I]; ++I) {
unsigned Reg = CSRegs[I];
if (SystemZ::GR64BitRegClass.contains(Reg) && MRI.isPhysRegUsed(Reg)) {
@@ -108,9 +107,8 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// instruction, or an implicit one that comes between the explicit start
// and end registers.
static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,
- const SystemZTargetMachine &TM,
unsigned GPR64, bool IsImplicit) {
- const SystemZRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetRegisterInfo *RI = MBB.getParent()->getTarget().getRegisterInfo();
unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);
bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32);
if (!IsLive || !IsImplicit) {
@@ -176,8 +174,8 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG));
// Add the explicit register operands.
- addSavedGPR(MBB, MIB, TM, LowGPR, false);
- addSavedGPR(MBB, MIB, TM, HighGPR, false);
+ addSavedGPR(MBB, MIB, LowGPR, false);
+ addSavedGPR(MBB, MIB, HighGPR, false);
// Add the address.
MIB.addReg(SystemZ::R15D).addImm(StartOffset);
@@ -187,13 +185,13 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
unsigned Reg = CSI[I].getReg();
if (SystemZ::GR64BitRegClass.contains(Reg))
- addSavedGPR(MBB, MIB, TM, Reg, true);
+ addSavedGPR(MBB, MIB, Reg, true);
}
// ...likewise GPR varargs.
if (IsVarArg)
for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
- addSavedGPR(MBB, MIB, TM, SystemZ::ArgGPRs[I], true);
+ addSavedGPR(MBB, MIB, SystemZ::ArgGPRs[I], true);
}
// Save FPRs in the normal TargetInstrInfo way.
@@ -312,7 +310,7 @@ static void emitIncrement(MachineBasicBlock &MBB,
void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFFrame = MF.getFrameInfo();
- const SystemZInstrInfo *ZII =
+ auto *ZII =
static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -333,16 +331,14 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
llvm_unreachable("Couldn't skip over GPR saves");
// Add CFI for the GPR saves.
- MCSymbol *GPRSaveLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL,
- ZII->get(TargetOpcode::PROLOG_LABEL)).addSym(GPRSaveLabel);
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- unsigned Reg = I->getReg();
+ for (auto &Save : CSI) {
+ unsigned Reg = Save.getReg();
if (SystemZ::GR64BitRegClass.contains(Reg)) {
int64_t Offset = SPOffsetFromCFA + RegSpillOffsets[Reg];
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- GPRSaveLabel, MRI->getDwarfRegNum(Reg, true), Offset));
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
}
@@ -354,11 +350,10 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII);
// Add CFI for the allocation.
- MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
- .addSym(AdjustSPLabel);
- MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(
- AdjustSPLabel, SPOffsetFromCFA + Delta));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, SPOffsetFromCFA + Delta));
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
SPOffsetFromCFA += Delta;
}
@@ -368,26 +363,23 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(SystemZ::R15D);
// Add CFI for the new frame location.
- MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
- .addSym(SetFPLabel);
unsigned HardFP = MRI->getDwarfRegNum(SystemZ::R11D, true);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaRegister(SetFPLabel, HardFP));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, HardFP));
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
// Mark the FramePtr as live at the beginning of every block except
// the entry block. (We'll have marked R11 as live on entry when
// saving the GPRs.)
- for (MachineFunction::iterator
- I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I)
+ for (auto I = std::next(MF.begin()), E = MF.end(); I != E; ++I)
I->addLiveIn(SystemZ::R11D);
}
// Skip over the FPR saves.
- MCSymbol *FPRSaveLabel = 0;
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- unsigned Reg = I->getReg();
+ SmallVector<unsigned, 8> CFIIndexes;
+ for (auto &Save : CSI) {
+ unsigned Reg = Save.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
if (MBBI != MBB.end() &&
(MBBI->getOpcode() == SystemZ::STD ||
@@ -397,25 +389,25 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
llvm_unreachable("Couldn't skip over FPR save");
// Add CFI for the this save.
- if (!FPRSaveLabel)
- FPRSaveLabel = MMI.getContext().CreateTempSymbol();
- unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
- int64_t Offset = getFrameIndexOffset(MF, I->getFrameIdx());
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- FPRSaveLabel, Reg, SPOffsetFromCFA + Offset));
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ int64_t Offset = getFrameIndexOffset(MF, Save.getFrameIdx());
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, DwarfReg, SPOffsetFromCFA + Offset));
+ CFIIndexes.push_back(CFIIndex);
}
}
// Complete the CFI for the FPR saves, modelling them as taking effect
// after the last save.
- if (FPRSaveLabel)
- BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
- .addSym(FPRSaveLabel);
+ for (auto CFIIndex : CFIIndexes) {
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
}
void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- const SystemZInstrInfo *ZII =
+ auto *ZII =
static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
index 9b0a1d5..4d5fe6d 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -10,7 +10,6 @@
#ifndef SYSTEMZFRAMELOWERING_H
#define SYSTEMZFRAMELOWERING_H
-#include "SystemZSubtarget.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/Target/TargetFrameLowering.h"
@@ -21,48 +20,35 @@ class SystemZSubtarget;
class SystemZFrameLowering : public TargetFrameLowering {
IndexedMap<unsigned> RegSpillOffsets;
-protected:
- const SystemZTargetMachine &TM;
- const SystemZSubtarget &STI;
-
public:
- SystemZFrameLowering(const SystemZTargetMachine &tm,
- const SystemZSubtarget &sti);
+ SystemZFrameLowering();
// Override TargetFrameLowering.
- virtual bool isFPCloseToIncomingSP() const LLVM_OVERRIDE { return false; }
- virtual const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const
- LLVM_OVERRIDE;
- virtual void
- processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const LLVM_OVERRIDE;
- virtual bool
- spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const
- LLVM_OVERRIDE;
- virtual bool
- restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBII,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const
- LLVM_OVERRIDE;
- virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS) const;
- virtual void emitPrologue(MachineFunction &MF) const LLVM_OVERRIDE;
- virtual void emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const LLVM_OVERRIDE;
- virtual bool hasFP(const MachineFunction &MF) const LLVM_OVERRIDE;
- virtual int getFrameIndexOffset(const MachineFunction &MF,
- int FI) const LLVM_OVERRIDE;
- virtual bool hasReservedCallFrame(const MachineFunction &MF) const
- LLVM_OVERRIDE;
- virtual void
- eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const
- LLVM_OVERRIDE;
+ bool isFPCloseToIncomingSP() const override { return false; }
+ const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const
+ override;
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const override;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBII,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const
+ override;
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const override;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ bool hasFP(const MachineFunction &MF) const override;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const
+ override;
// Return the number of bytes in the callee-allocated part of the frame.
uint64_t getAllocatedStackSize(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index f4a2773..24f7584 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -19,6 +19,8 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-isel"
+
namespace {
// Used to build addressing modes.
struct SystemZAddressingMode {
@@ -72,14 +74,14 @@ struct SystemZAddressingMode {
errs() << "SystemZAddressingMode " << this << '\n';
errs() << " Base ";
- if (Base.getNode() != 0)
+ if (Base.getNode())
Base.getNode()->dump();
else
errs() << "null\n";
if (hasIndexField()) {
errs() << " Index ";
- if (Index.getNode() != 0)
+ if (Index.getNode())
Index.getNode()->dump();
else
errs() << "null\n";
@@ -318,16 +320,14 @@ public:
Subtarget(*TM.getSubtargetImpl()) { }
// Override MachineFunctionPass.
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "SystemZ DAG->DAG Pattern Instruction Selection";
}
// Override SelectionDAGISel.
- virtual SDNode *Select(SDNode *Node) LLVM_OVERRIDE;
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps)
- LLVM_OVERRIDE;
+ SDNode *Select(SDNode *Node) override;
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
// Include the pieces autogenerated from the target description.
#include "SystemZGenDAGISel.inc"
@@ -651,8 +651,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
return false;
// We need a constant mask.
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
if (!MaskNode)
return false;
@@ -666,7 +665,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
if (Used != (AndMask | InsertMask)) {
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
return false;
}
@@ -704,8 +703,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
if (RxSBG.Opcode == SystemZ::RNSBG)
return false;
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!MaskNode)
return false;
@@ -716,7 +714,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask |= KnownZero.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
@@ -729,8 +727,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
if (RxSBG.Opcode != SystemZ::RNSBG)
return false;
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!MaskNode)
return false;
@@ -741,7 +738,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask &= ~KnownOne.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
@@ -754,8 +751,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// Any 64-bit rotate left can be merged into the RxSBG.
if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
return false;
- ConstantSDNode *CountNode
- = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
@@ -764,9 +760,24 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
return true;
}
- case ISD::SIGN_EXTEND:
+ case ISD::ANY_EXTEND:
+ // Bits above the extended operand are don't-care.
+ RxSBG.Input = N.getOperand(0);
+ return true;
+
case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND: {
+ if (RxSBG.Opcode != SystemZ::RNSBG) {
+ // Restrict the mask to the extended operand.
+ unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
+ if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
+ return false;
+
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+ // Fall through.
+
+ case ISD::SIGN_EXTEND: {
// Check that the extension bits are don't-care (i.e. are masked out
// by the final mask).
unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
@@ -778,8 +789,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
}
case ISD::SHL: {
- ConstantSDNode *CountNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
@@ -806,8 +816,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
case ISD::SRL:
case ISD::SRA: {
- ConstantSDNode *CountNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
@@ -860,12 +869,12 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
Count += 1;
if (Count == 0)
- return 0;
+ return nullptr;
if (Count == 1) {
// Prefer to use normal shift instructions over RISBG, since they can handle
// all cases and are sometimes shorter.
if (N->getOpcode() != ISD::AND)
- return 0;
+ return nullptr;
// Prefer register extensions like LLC over RISBG. Also prefer to start
// out with normal ANDs if one instruction would be enough. We can convert
@@ -876,13 +885,13 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
SystemZ::isImmLF(~RISBG.Mask) ||
SystemZ::isImmHF(~RISBG.Mask)) {
// Force the new mask into the DAG, since it may include known-one bits.
- ConstantSDNode *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
+ auto *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
if (MaskN->getZExtValue() != RISBG.Mask) {
SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT);
N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
return SelectCode(N);
}
- return 0;
+ return nullptr;
}
}
@@ -920,7 +929,7 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
// Do nothing if neither operand is suitable.
if (Count[0] == 0 && Count[1] == 0)
- return 0;
+ return nullptr;
// Pick the deepest second operand.
unsigned I = Count[0] > Count[1] ? 0 : 1;
@@ -928,9 +937,9 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
// Prefer IC for character insertions from memory.
if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
- if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
+ if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
if (Load->getMemoryVT() == MVT::i8)
- return 0;
+ return nullptr;
// See whether we can avoid an AND in the first operand by converting
// ROSBG to RISBG.
@@ -979,8 +988,8 @@ bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
return true;
// Otherwise we need to check whether there's an alias.
- const Value *V1 = Load->getSrcValue();
- const Value *V2 = Store->getSrcValue();
+ const Value *V1 = Load->getMemOperand()->getValue();
+ const Value *V2 = Store->getMemOperand()->getValue();
if (!V1 || !V2)
return false;
@@ -996,8 +1005,8 @@ bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
}
bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
- StoreSDNode *Store = cast<StoreSDNode>(N);
- LoadSDNode *Load = cast<LoadSDNode>(Store->getValue());
+ auto *Store = cast<StoreSDNode>(N);
+ auto *Load = cast<LoadSDNode>(Store->getValue());
// Prefer not to use MVC if either address can use ... RELATIVE LONG
// instructions.
@@ -1016,9 +1025,9 @@ bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
unsigned I) const {
- StoreSDNode *StoreA = cast<StoreSDNode>(N);
- LoadSDNode *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
- LoadSDNode *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
+ auto *StoreA = cast<StoreSDNode>(N);
+ auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
+ auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
}
@@ -1030,11 +1039,11 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return 0;
+ return nullptr;
}
unsigned Opcode = Node->getOpcode();
- SDNode *ResNode = 0;
+ SDNode *ResNode = nullptr;
switch (Opcode) {
case ISD::OR:
if (Node->getOperand(1).getOpcode() != ISD::Constant)
@@ -1049,7 +1058,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// If this is a 64-bit operation in which both 32-bit halves are nonzero,
// split the operation into two.
if (!ResNode && Node->getValueType(0) == MVT::i64)
- if (ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
+ if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
uint64_t Val = Op1->getZExtValue();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val))
Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0),
@@ -1064,6 +1073,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
case ISD::ROTL:
case ISD::SHL:
case ISD::SRL:
+ case ISD::ZERO_EXTEND:
if (!ResNode)
ResNode = tryRISBGZero(Node);
break;
@@ -1079,20 +1089,6 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
}
break;
- case ISD::ATOMIC_LOAD_SUB:
- // Try to convert subtractions of constants to additions.
- if (ConstantSDNode *Op2 = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
- uint64_t Value = -Op2->getZExtValue();
- EVT VT = Node->getValueType(0);
- if (VT == MVT::i32 || isInt<32>(Value)) {
- SDValue Ops[] = { Node->getOperand(0), Node->getOperand(1),
- CurDAG->getConstant(int32_t(Value), VT) };
- Node = CurDAG->MorphNodeTo(Node, ISD::ATOMIC_LOAD_ADD,
- Node->getVTList(), Ops, array_lengthof(Ops));
- }
- }
- break;
-
case SystemZISD::SELECT_CCMASK: {
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
@@ -1120,7 +1116,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
ResNode = SelectCode(Node);
DEBUG(errs() << "=> ";
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
Node->dump(CurDAG);
else
ResNode->dump(CurDAG);
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index f6e1853..00c65f5 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-lower"
-
#include "SystemZISelLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZConstantPoolValue.h"
@@ -22,11 +20,12 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-
#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "systemz-lower"
+
namespace {
// Represents a sequence for extracting a 0/1 value from an IPM result:
// (((X ^ XORValue) + AddValue) >> Bit)
@@ -38,7 +37,28 @@ struct IPMConversion {
int64_t AddValue;
unsigned Bit;
};
-}
+
+// Represents information about a comparison.
+struct Comparison {
+ Comparison(SDValue Op0In, SDValue Op1In)
+ : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
+
+ // The operands to the comparison.
+ SDValue Op0, Op1;
+
+ // The opcode that should be used to compare Op0 and Op1.
+ unsigned Opcode;
+
+ // A SystemZICMP value. Only used for integer comparisons.
+ unsigned ICmpType;
+
+ // The mask of CC values that Opcode can produce.
+ unsigned CCValid;
+
+ // The mask of CC values for which the original condition is true.
+ unsigned CCMask;
+};
+} // end anonymous namespace
// Classify VT as either 32 or 64 bit.
static bool is32Bit(EVT VT) {
@@ -60,9 +80,9 @@ static MachineOperand earlyUseOperand(MachineOperand Op) {
return Op;
}
-SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
- : TargetLowering(tm, new TargetLoweringObjectFileELF()),
- Subtarget(*tm.getSubtargetImpl()), TM(tm) {
+SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
+ : TargetLowering(tm, new TargetLoweringObjectFileELF()),
+ Subtarget(tm.getSubtarget<SystemZSubtarget>()) {
MVT PtrVT = getPointerTy();
// Set up the register classes.
@@ -134,10 +154,14 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::SDIVREM, VT, Custom);
setOperationAction(ISD::UDIVREM, VT, Custom);
- // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP.
- // FIXME: probably much too conservative.
- setOperationAction(ISD::ATOMIC_LOAD, VT, Expand);
- setOperationAction(ISD::ATOMIC_STORE, VT, Expand);
+ // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
+ // stores, putting a serialization instruction after the stores.
+ setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
+
+ // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
+ // available, or if the operand is constant.
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
// No special instructions for these.
setOperationAction(ISD::CTPOP, VT, Expand);
@@ -152,8 +176,9 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::SMUL_LOHI, VT, Custom);
setOperationAction(ISD::UMUL_LOHI, VT, Custom);
- // We have instructions for signed but not unsigned FP conversion.
- setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ // Only z196 and above have native support for conversions to unsigned.
+ if (!Subtarget.hasFPExtension())
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
}
}
@@ -173,10 +198,12 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
- // We have instructions for signed but not unsigned FP conversion.
+ // z10 has instructions for signed but not unsigned FP conversion.
// Handle unsigned 32-bit types as signed 64-bit types.
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ if (!Subtarget.hasFPExtension()) {
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ }
// We have native support for a 64-bit CTLZ, via FLOGR.
setOperationAction(ISD::CTLZ, MVT::i32, Promote);
@@ -266,6 +293,9 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::VACOPY, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
+ // Codes for which we want to perform some z-specific combinations.
+ setTargetDAGCombine(ISD::SIGN_EXTEND);
+
// We want to use MVC in preference to even a single load/store pair.
MaxStoresPerMemcpy = 0;
MaxStoresPerMemcpyOptSize = 0;
@@ -310,6 +340,7 @@ bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned,
bool *Fast) const {
// Unaligned accesses should never be slower than the expanded version.
// We check specifically for aligned accesses in the few cases where
@@ -393,7 +424,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
@@ -416,31 +447,31 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
break;
case 'I': // Unsigned 8-bit constant
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isUInt<8>(C->getZExtValue()))
weight = CW_Constant;
break;
case 'J': // Unsigned 12-bit constant
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isUInt<12>(C->getZExtValue()))
weight = CW_Constant;
break;
case 'K': // Signed 16-bit constant
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isInt<16>(C->getSExtValue()))
weight = CW_Constant;
break;
case 'L': // Signed 20-bit displacement (on all targets we support)
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isInt<20>(C->getSExtValue()))
weight = CW_Constant;
break;
case 'M': // 0x7fffffff
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (C->getZExtValue() == 0x7fffffff)
weight = CW_Constant;
break;
@@ -461,7 +492,7 @@ parseRegisterNumber(const std::string &Constraint,
if (Index < 16 && Map[Index])
return std::make_pair(Map[Index], RC);
}
- return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
+ return std::make_pair(0U, nullptr);
}
std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
@@ -533,35 +564,35 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
if (Constraint.length() == 1) {
switch (Constraint[0]) {
case 'I': // Unsigned 8-bit constant
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<8>(C->getZExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
Op.getValueType()));
return;
case 'J': // Unsigned 12-bit constant
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<12>(C->getZExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
Op.getValueType()));
return;
case 'K': // Signed 16-bit constant
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<16>(C->getSExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
Op.getValueType()));
return;
case 'L': // Signed 20-bit displacement (on all targets we support)
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<20>(C->getSExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
Op.getValueType()));
return;
case 'M': // 0x7fffffff
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (C->getZExtValue() == 0x7fffffff)
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
Op.getValueType()));
@@ -642,12 +673,13 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
MachineRegisterInfo &MRI = MF.getRegInfo();
SystemZMachineFunctionInfo *FuncInfo =
MF.getInfo<SystemZMachineFunctionInfo>();
- const SystemZFrameLowering *TFL =
- static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
+ auto *TFL = static_cast<const SystemZFrameLowering *>(
+ DAG.getTarget().getFrameLowering());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs,
+ *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
unsigned NumFixedGPRs = 0;
@@ -742,8 +774,8 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
}
// Join the stores, which are independent of one another.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOps[NumFixedFPRs],
- SystemZ::NumArgFPRs - NumFixedFPRs);
+ makeArrayRef(&MemOps[NumFixedFPRs],
+ SystemZ::NumArgFPRs-NumFixedFPRs));
}
}
@@ -785,7 +817,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs,
+ *DAG.getContext());
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
// We don't support GuaranteedTailCallOpt, only automatically-detected
@@ -845,17 +878,16 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Join the stores, which are independent of one another.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// Accept direct calls by converting symbolic call addresses to the
// associated Target* opcodes. Force %r1 to be used for indirect
// tail calls.
SDValue Glue;
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
- } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
} else if (IsTailCall) {
@@ -882,6 +914,12 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
Ops.push_back(DAG.getRegister(RegsToPass[I].first,
RegsToPass[I].second.getValueType()));
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
// Glue the call to the argument copies, if any.
if (Glue.getNode())
Ops.push_back(Glue);
@@ -889,8 +927,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Emit the call.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (IsTailCall)
- return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
- Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
+ Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
Glue = Chain.getValue(1);
// Mark the end of the call, which is glued to the call itself.
@@ -902,7 +940,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs,
+ *DAG.getContext());
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
// Copy all of the result registers out of their specified physreg.
@@ -933,7 +972,8 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
// Assign locations to each returned value.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs,
+ *DAG.getContext());
RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
// Quick exit for void returns
@@ -966,8 +1006,12 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
if (Glue.getNode())
RetOps.push_back(Glue);
- return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
- RetOps.data(), RetOps.size());
+ return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
+}
+
+SDValue SystemZTargetLowering::
+prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const {
+ return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain);
}
// CC is a comparison that will be implemented using an integer or
@@ -1044,7 +1088,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
- // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
+ // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
// can be done by inverting the low CC bit and applying one of the
// sign-based extractions above.
if (CCMask == (CCValid & SystemZ::CCMASK_1))
@@ -1065,109 +1109,100 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
llvm_unreachable("Unexpected CC combination");
}
-// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
-// can be converted to a comparison against zero, adjust the operands
+// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
-static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
- SDValue &CmpOp0, SDValue &CmpOp1,
- unsigned &CCMask) {
- if (IsUnsigned)
+static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) {
+ if (C.ICmpType == SystemZICMP::UnsignedOnly)
return;
- ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
+ auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
if (!ConstOp1)
return;
int64_t Value = ConstOp1->getSExtValue();
- if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
- (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
- (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
- (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
- CCMask ^= SystemZ::CCMASK_CMP_EQ;
- CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
+ if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
+ (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
+ (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
+ (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
+ C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
+ C.Op1 = DAG.getConstant(0, C.Op1.getValueType());
}
}
-// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
-// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
-static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
- SDValue &CmpOp0, SDValue &CmpOp1,
- unsigned &CCMask) {
+// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
+// adjust the operands as necessary.
+static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) {
// For us to make any changes, it must a comparison between a single-use
// load and a constant.
- if (!CmpOp0.hasOneUse() ||
- CmpOp0.getOpcode() != ISD::LOAD ||
- CmpOp1.getOpcode() != ISD::Constant)
+ if (!C.Op0.hasOneUse() ||
+ C.Op0.getOpcode() != ISD::LOAD ||
+ C.Op1.getOpcode() != ISD::Constant)
return;
// We must have an 8- or 16-bit load.
- LoadSDNode *Load = cast<LoadSDNode>(CmpOp0);
+ auto *Load = cast<LoadSDNode>(C.Op0);
unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
if (NumBits != 8 && NumBits != 16)
return;
// The load must be an extending one and the constant must be within the
// range of the unextended value.
- ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1);
- uint64_t Value = Constant->getZExtValue();
+ auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
+ uint64_t Value = ConstOp1->getZExtValue();
uint64_t Mask = (1 << NumBits) - 1;
if (Load->getExtensionType() == ISD::SEXTLOAD) {
- int64_t SignedValue = Constant->getSExtValue();
- if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
+ // Make sure that ConstOp1 is in range of C.Op0.
+ int64_t SignedValue = ConstOp1->getSExtValue();
+ if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
return;
- // Unsigned comparison between two sign-extended values is equivalent
- // to unsigned comparison between two zero-extended values.
- if (IsUnsigned)
+ if (C.ICmpType != SystemZICMP::SignedOnly) {
+ // Unsigned comparison between two sign-extended values is equivalent
+ // to unsigned comparison between two zero-extended values.
Value &= Mask;
- else if (CCMask == SystemZ::CCMASK_CMP_EQ ||
- CCMask == SystemZ::CCMASK_CMP_NE)
- // Any choice of IsUnsigned is OK for equality comparisons.
- // We could use either CHHSI or CLHHSI for 16-bit comparisons,
- // but since we use CLHHSI for zero extensions, it seems better
- // to be consistent and do the same here.
- Value &= Mask, IsUnsigned = true;
- else if (NumBits == 8) {
+ } else if (NumBits == 8) {
// Try to treat the comparison as unsigned, so that we can use CLI.
// Adjust CCMask and Value as necessary.
- if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
+ if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
// Test whether the high bit of the byte is set.
- Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
- else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
+ Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
+ else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
// Test whether the high bit of the byte is clear.
- Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
+ Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
else
// No instruction exists for this combination.
return;
+ C.ICmpType = SystemZICMP::UnsignedOnly;
}
} else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
if (Value > Mask)
return;
- // Signed comparison between two zero-extended values is equivalent
- // to unsigned comparison.
- IsUnsigned = true;
+ assert(C.ICmpType == SystemZICMP::Any &&
+ "Signedness shouldn't matter here.");
} else
return;
// Make sure that the first operand is an i32 of the right extension type.
- ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
- if (CmpOp0.getValueType() != MVT::i32 ||
+ ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
+ ISD::SEXTLOAD :
+ ISD::ZEXTLOAD);
+ if (C.Op0.getValueType() != MVT::i32 ||
Load->getExtensionType() != ExtType)
- CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
- Load->getChain(), Load->getBasePtr(),
- Load->getPointerInfo(), Load->getMemoryVT(),
- Load->isVolatile(), Load->isNonTemporal(),
- Load->getAlignment());
+ C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
+ Load->getChain(), Load->getBasePtr(),
+ Load->getPointerInfo(), Load->getMemoryVT(),
+ Load->isVolatile(), Load->isNonTemporal(),
+ Load->getAlignment());
// Make sure that the second operand is an i32 with the right value.
- if (CmpOp1.getValueType() != MVT::i32 ||
- Value != Constant->getZExtValue())
- CmpOp1 = DAG.getConstant(Value, MVT::i32);
+ if (C.Op1.getValueType() != MVT::i32 ||
+ Value != ConstOp1->getZExtValue())
+ C.Op1 = DAG.getConstant(Value, MVT::i32);
}
// Return true if Op is either an unextended load, or a load suitable
// for integer register-memory comparisons of type ICmpType.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
- LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode());
+ auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
if (Load) {
// There are no instructions to compare a register with a memory byte.
if (Load->getMemoryVT() == MVT::i8)
@@ -1187,53 +1222,163 @@ static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
return false;
}
-// Return true if it is better to swap comparison operands Op0 and Op1.
-// ICmpType is the type of an integer comparison.
-static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1,
- unsigned ICmpType) {
+// Return true if it is better to swap the operands of C.
+static bool shouldSwapCmpOperands(const Comparison &C) {
// Leave f128 comparisons alone, since they have no memory forms.
- if (Op0.getValueType() == MVT::f128)
+ if (C.Op0.getValueType() == MVT::f128)
return false;
// Always keep a floating-point constant second, since comparisons with
// zero can use LOAD TEST and comparisons with other constants make a
// natural memory operand.
- if (isa<ConstantFPSDNode>(Op1))
+ if (isa<ConstantFPSDNode>(C.Op1))
return false;
// Never swap comparisons with zero since there are many ways to optimize
// those later.
- ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
- if (COp1 && COp1->getZExtValue() == 0)
+ auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
+ if (ConstOp1 && ConstOp1->getZExtValue() == 0)
+ return false;
+
+ // Also keep natural memory operands second if the loaded value is
+ // only used here. Several comparisons have memory forms.
+ if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
return false;
// Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
// In that case we generally prefer the memory to be second.
- if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) &&
- !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) {
+ if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
// The only exceptions are when the second operand is a constant and
// we can use things like CHHSI.
- if (!COp1)
+ if (!ConstOp1)
return true;
// The unsigned memory-immediate instructions can handle 16-bit
// unsigned integers.
- if (ICmpType != SystemZICMP::SignedOnly &&
- isUInt<16>(COp1->getZExtValue()))
+ if (C.ICmpType != SystemZICMP::SignedOnly &&
+ isUInt<16>(ConstOp1->getZExtValue()))
return false;
// The signed memory-immediate instructions can handle 16-bit
// signed integers.
- if (ICmpType != SystemZICMP::UnsignedOnly &&
- isInt<16>(COp1->getSExtValue()))
+ if (C.ICmpType != SystemZICMP::UnsignedOnly &&
+ isInt<16>(ConstOp1->getSExtValue()))
return false;
return true;
}
+
+ // Try to promote the use of CGFR and CLGFR.
+ unsigned Opcode0 = C.Op0.getOpcode();
+ if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
+ return true;
+ if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
+ return true;
+ if (C.ICmpType != SystemZICMP::SignedOnly &&
+ Opcode0 == ISD::AND &&
+ C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
+ return true;
+
return false;
}
+// Return a version of comparison CC mask CCMask in which the LT and GT
+// actions are swapped.
+static unsigned reverseCCMask(unsigned CCMask) {
+ return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
+ (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_UO));
+}
+
+// Check whether C tests for equality between X and Y and whether X - Y
+// or Y - X is also computed. In that case it's better to compare the
+// result of the subtraction against zero.
+static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) {
+ if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
+ C.CCMask == SystemZ::CCMASK_CMP_NE) {
+ for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
+ SDNode *N = *I;
+ if (N->getOpcode() == ISD::SUB &&
+ ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
+ (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
+ C.Op0 = SDValue(N, 0);
+ C.Op1 = DAG.getConstant(0, N->getValueType(0));
+ return;
+ }
+ }
+ }
+}
+
+// Check whether C compares a floating-point value with zero and if that
+// floating-point value is also negated. In this case we can use the
+// negation to set CC, so avoiding separate LOAD AND TEST and
+// LOAD (NEGATIVE/COMPLEMENT) instructions.
+static void adjustForFNeg(Comparison &C) {
+ auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
+ if (C1 && C1->isZero()) {
+ for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
+ SDNode *N = *I;
+ if (N->getOpcode() == ISD::FNEG) {
+ C.Op0 = SDValue(N, 0);
+ C.CCMask = reverseCCMask(C.CCMask);
+ return;
+ }
+ }
+ }
+}
+
+// Check whether C compares (shl X, 32) with 0 and whether X is
+// also sign-extended. In that case it is better to test the result
+// of the sign extension using LTGFR.
+//
+// This case is important because InstCombine transforms a comparison
+// with (sext (trunc X)) into a comparison with (shl X, 32).
+static void adjustForLTGFR(Comparison &C) {
+ // Check for a comparison between (shl X, 32) and 0.
+ if (C.Op0.getOpcode() == ISD::SHL &&
+ C.Op0.getValueType() == MVT::i64 &&
+ C.Op1.getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
+ if (C1 && C1->getZExtValue() == 32) {
+ SDValue ShlOp0 = C.Op0.getOperand(0);
+ // See whether X has any SIGN_EXTEND_INREG uses.
+ for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
+ SDNode *N = *I;
+ if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
+ C.Op0 = SDValue(N, 0);
+ return;
+ }
+ }
+ }
+ }
+}
+
+// If C compares the truncation of an extending load, try to compare
+// the untruncated value instead. This exposes more opportunities to
+// reuse CC.
+static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) {
+ if (C.Op0.getOpcode() == ISD::TRUNCATE &&
+ C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
+ C.Op1.getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
+ if (L->getMemoryVT().getStoreSizeInBits()
+ <= C.Op0.getValueType().getSizeInBits()) {
+ unsigned Type = L->getExtensionType();
+ if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
+ (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
+ C.Op0 = C.Op0.getOperand(0);
+ C.Op1 = DAG.getConstant(0, C.Op0.getValueType());
+ }
+ }
+ }
+}
+
// Return true if shift operation N has an in-range constant shift value.
// Store it in ShiftVal if so.
static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
- ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!Shift)
return false;
@@ -1341,118 +1486,143 @@ static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
return 0;
}
-// See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be
-// implemented as a TEST UNDER MASK instruction when the condition being
-// tested is as described by CCValid and CCMask. Update the arguments
-// with the TM version if so.
-static void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode,
- SDValue &CmpOp0, SDValue &CmpOp1,
- unsigned &CCValid, unsigned &CCMask,
- unsigned &ICmpType) {
+// See whether C can be implemented as a TEST UNDER MASK instruction.
+// Update the arguments with the TM version if so.
+static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) {
// Check that we have a comparison with a constant.
- ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1);
- if (!ConstCmpOp1)
+ auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
+ if (!ConstOp1)
return;
- uint64_t CmpVal = ConstCmpOp1->getZExtValue();
+ uint64_t CmpVal = ConstOp1->getZExtValue();
// Check whether the nonconstant input is an AND with a constant mask.
- if (CmpOp0.getOpcode() != ISD::AND)
- return;
- SDValue AndOp0 = CmpOp0.getOperand(0);
- SDValue AndOp1 = CmpOp0.getOperand(1);
- ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode());
- if (!Mask)
- return;
- uint64_t MaskVal = Mask->getZExtValue();
+ Comparison NewC(C);
+ uint64_t MaskVal;
+ ConstantSDNode *Mask = nullptr;
+ if (C.Op0.getOpcode() == ISD::AND) {
+ NewC.Op0 = C.Op0.getOperand(0);
+ NewC.Op1 = C.Op0.getOperand(1);
+ Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
+ if (!Mask)
+ return;
+ MaskVal = Mask->getZExtValue();
+ } else {
+ // There is no instruction to compare with a 64-bit immediate
+ // so use TMHH instead if possible. We need an unsigned ordered
+ // comparison with an i64 immediate.
+ if (NewC.Op0.getValueType() != MVT::i64 ||
+ NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
+ NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
+ NewC.ICmpType == SystemZICMP::SignedOnly)
+ return;
+ // Convert LE and GT comparisons into LT and GE.
+ if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
+ NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
+ if (CmpVal == uint64_t(-1))
+ return;
+ CmpVal += 1;
+ NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
+ }
+ // If the low N bits of Op1 are zero than the low N bits of Op0 can
+ // be masked off without changing the result.
+ MaskVal = -(CmpVal & -CmpVal);
+ NewC.ICmpType = SystemZICMP::UnsignedOnly;
+ }
// Check whether the combination of mask, comparison value and comparison
// type are suitable.
- unsigned BitSize = CmpOp0.getValueType().getSizeInBits();
+ unsigned BitSize = NewC.Op0.getValueType().getSizeInBits();
unsigned NewCCMask, ShiftVal;
- if (ICmpType != SystemZICMP::SignedOnly &&
- AndOp0.getOpcode() == ISD::SHL &&
- isSimpleShift(AndOp0, ShiftVal) &&
- (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal,
+ if (NewC.ICmpType != SystemZICMP::SignedOnly &&
+ NewC.Op0.getOpcode() == ISD::SHL &&
+ isSimpleShift(NewC.Op0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
+ MaskVal >> ShiftVal,
CmpVal >> ShiftVal,
SystemZICMP::Any))) {
- AndOp0 = AndOp0.getOperand(0);
- AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType());
- } else if (ICmpType != SystemZICMP::SignedOnly &&
- AndOp0.getOpcode() == ISD::SRL &&
- isSimpleShift(AndOp0, ShiftVal) &&
- (NewCCMask = getTestUnderMaskCond(BitSize, CCMask,
+ NewC.Op0 = NewC.Op0.getOperand(0);
+ MaskVal >>= ShiftVal;
+ } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
+ NewC.Op0.getOpcode() == ISD::SRL &&
+ isSimpleShift(NewC.Op0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
MaskVal << ShiftVal,
CmpVal << ShiftVal,
SystemZICMP::UnsignedOnly))) {
- AndOp0 = AndOp0.getOperand(0);
- AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType());
+ NewC.Op0 = NewC.Op0.getOperand(0);
+ MaskVal <<= ShiftVal;
} else {
- NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal,
- ICmpType);
+ NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
+ NewC.ICmpType);
if (!NewCCMask)
return;
}
// Go ahead and make the change.
- Opcode = SystemZISD::TM;
- CmpOp0 = AndOp0;
- CmpOp1 = AndOp1;
- ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
- bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
- CCValid = SystemZ::CCMASK_TM;
- CCMask = NewCCMask;
-}
-
-// Return a target node that compares CmpOp0 with CmpOp1 and stores a
-// 2-bit result in CC. Set CCValid to the CCMASK_* of all possible
-// 2-bit results and CCMask to the subset of those results that are
-// associated with Cond.
-static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG,
- SDLoc DL, SDValue CmpOp0, SDValue CmpOp1,
- ISD::CondCode Cond, unsigned &CCValid,
- unsigned &CCMask) {
- bool IsUnsigned = false;
- CCMask = CCMaskForCondCode(Cond);
- unsigned Opcode, ICmpType = 0;
- if (CmpOp0.getValueType().isFloatingPoint()) {
- CCValid = SystemZ::CCMASK_FCMP;
- Opcode = SystemZISD::FCMP;
+ C.Opcode = SystemZISD::TM;
+ C.Op0 = NewC.Op0;
+ if (Mask && Mask->getZExtValue() == MaskVal)
+ C.Op1 = SDValue(Mask, 0);
+ else
+ C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType());
+ C.CCValid = SystemZ::CCMASK_TM;
+ C.CCMask = NewCCMask;
+}
+
+// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
+static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
+ ISD::CondCode Cond) {
+ Comparison C(CmpOp0, CmpOp1);
+ C.CCMask = CCMaskForCondCode(Cond);
+ if (C.Op0.getValueType().isFloatingPoint()) {
+ C.CCValid = SystemZ::CCMASK_FCMP;
+ C.Opcode = SystemZISD::FCMP;
+ adjustForFNeg(C);
} else {
- IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
- CCValid = SystemZ::CCMASK_ICMP;
- CCMask &= CCValid;
- adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
- adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
- Opcode = SystemZISD::ICMP;
+ C.CCValid = SystemZ::CCMASK_ICMP;
+ C.Opcode = SystemZISD::ICMP;
// Choose the type of comparison. Equality and inequality tests can
// use either signed or unsigned comparisons. The choice also doesn't
// matter if both sign bits are known to be clear. In those cases we
// want to give the main isel code the freedom to choose whichever
// form fits best.
- if (CCMask == SystemZ::CCMASK_CMP_EQ ||
- CCMask == SystemZ::CCMASK_CMP_NE ||
- (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1)))
- ICmpType = SystemZICMP::Any;
- else if (IsUnsigned)
- ICmpType = SystemZICMP::UnsignedOnly;
+ if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
+ C.CCMask == SystemZ::CCMASK_CMP_NE ||
+ (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
+ C.ICmpType = SystemZICMP::Any;
+ else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
+ C.ICmpType = SystemZICMP::UnsignedOnly;
else
- ICmpType = SystemZICMP::SignedOnly;
+ C.ICmpType = SystemZICMP::SignedOnly;
+ C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
+ adjustZeroCmp(DAG, C);
+ adjustSubwordCmp(DAG, C);
+ adjustForSubtraction(DAG, C);
+ adjustForLTGFR(C);
+ adjustICmpTruncate(DAG, C);
}
- if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) {
- std::swap(CmpOp0, CmpOp1);
- CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) |
- (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
- (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
- (CCMask & SystemZ::CCMASK_CMP_UO));
+ if (shouldSwapCmpOperands(C)) {
+ std::swap(C.Op0, C.Op1);
+ C.CCMask = reverseCCMask(C.CCMask);
}
- adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask,
- ICmpType);
- if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM)
- return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1,
- DAG.getConstant(ICmpType, MVT::i32));
- return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1);
+ adjustForTestUnderMask(DAG, C);
+ return C;
+}
+
+// Emit the comparison instruction described by C.
+static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+ if (C.Opcode == SystemZISD::ICMP)
+ return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
+ DAG.getConstant(C.ICmpType, MVT::i32));
+ if (C.Opcode == SystemZISD::TM) {
+ bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
+ bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
+ return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
+ DAG.getConstant(RegisterOnly, MVT::i32));
+ }
+ return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
}
// Implement a 32-bit *MUL_LOHI operation by extending both operands to
@@ -1486,16 +1656,11 @@ static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
}
-SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue CmpOp0 = Op.getOperand(0);
- SDValue CmpOp1 = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- SDLoc DL(Op);
-
- unsigned CCValid, CCMask;
- SDValue Glue = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
-
+// Return an i32 value that is 1 if the CC value produced by Glue is
+// in the mask CCMask and 0 otherwise. CC is known to have a value
+// in CCValid, so other values can be ignored.
+static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue,
+ unsigned CCValid, unsigned CCMask) {
IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
@@ -1516,6 +1681,18 @@ SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
return Result;
}
+SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CmpOp0 = Op.getOperand(0);
+ SDValue CmpOp1 = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDLoc DL(Op);
+
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ SDValue Glue = emitCmp(DAG, DL, C);
+ return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
+}
+
SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
@@ -1524,11 +1701,33 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(4);
SDLoc DL(Op);
- unsigned CCValid, CCMask;
- SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ SDValue Glue = emitCmp(DAG, DL, C);
return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
- Chain, DAG.getConstant(CCValid, MVT::i32),
- DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
+ Chain, DAG.getConstant(C.CCValid, MVT::i32),
+ DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue);
+}
+
+// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
+// allowing Pos and Neg to be wider than CmpOp.
+static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
+ return (Neg.getOpcode() == ISD::SUB &&
+ Neg.getOperand(0).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
+ Neg.getOperand(1) == Pos &&
+ (Pos == CmpOp ||
+ (Pos.getOpcode() == ISD::SIGN_EXTEND &&
+ Pos.getOperand(0) == CmpOp)));
+}
+
+// Return the absolute or negative absolute of Op; IsNegative decides which.
+static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op,
+ bool IsNegative) {
+ Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
+ if (IsNegative)
+ Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
+ DAG.getConstant(0, Op.getValueType()), Op);
+ return Op;
}
SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
@@ -1540,18 +1739,56 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDLoc DL(Op);
- unsigned CCValid, CCMask;
- SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+
+ // Check for absolute and negative-absolute selections, including those
+ // where the comparison value is sign-extended (for LPGFR and LNGFR).
+ // This check supplements the one in DAGCombiner.
+ if (C.Opcode == SystemZISD::ICMP &&
+ C.CCMask != SystemZ::CCMASK_CMP_EQ &&
+ C.CCMask != SystemZ::CCMASK_CMP_NE &&
+ C.Op1.getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ if (isAbsolute(C.Op0, TrueOp, FalseOp))
+ return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
+ if (isAbsolute(C.Op0, FalseOp, TrueOp))
+ return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
+ }
+
+ SDValue Glue = emitCmp(DAG, DL, C);
+
+ // Special case for handling -1/0 results. The shifts we use here
+ // should get optimized with the IPM conversion sequence.
+ auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp);
+ auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp);
+ if (TrueC && FalseC) {
+ int64_t TrueVal = TrueC->getSExtValue();
+ int64_t FalseVal = FalseC->getSExtValue();
+ if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
+ // Invert the condition if we want -1 on false.
+ if (TrueVal == 0)
+ C.CCMask ^= C.CCValid;
+ SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
+ EVT VT = Op.getValueType();
+ // Extend the result to VT. Upper bits are ignored.
+ if (!is32Bit(VT))
+ Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
+ // Sign-extend from the low bit.
+ SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32);
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
+ return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
+ }
+ }
SmallVector<SDValue, 5> Ops;
Ops.push_back(TrueOp);
Ops.push_back(FalseOp);
- Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
- Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
- Ops.push_back(Flags);
+ Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32));
+ Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32));
+ Ops.push_back(Glue);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
- return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
}
SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
@@ -1560,8 +1797,8 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
const GlobalValue *GV = Node->getGlobal();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy();
- Reloc::Model RM = TM.getRelocationModel();
- CodeModel::Model CM = TM.getCodeModel();
+ Reloc::Model RM = DAG.getTarget().getRelocationModel();
+ CodeModel::Model CM = DAG.getTarget().getCodeModel();
SDValue Result;
if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
@@ -1598,7 +1835,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
EVT PtrVT = getPointerTy();
- TLSModel::Model model = TM.getTLSModel(GV);
+ TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
if (model != TLSModel::LocalExec)
llvm_unreachable("only local-exec TLS mode supported");
@@ -1743,7 +1980,7 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
false, false, 0);
Offset += 8;
}
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
@@ -1784,7 +2021,7 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
SDValue Ops[2] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
@@ -1826,7 +2063,7 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
}
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
@@ -1845,7 +2082,7 @@ SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
// low half first, so the results are in reverse order.
lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
@@ -1872,7 +2109,7 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SDValue Ops[2];
lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
Op0, Op1, Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
@@ -1890,7 +2127,7 @@ SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
else
lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
@@ -1899,8 +2136,8 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// Get the known-zero masks for each operand.
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
APInt KnownZero[2], KnownOne[2];
- DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
- DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
+ DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
+ DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
// See if the upper 32 bits of one operand and the lower 32 bits of the
// other are known zero. They are the low and high operands respectively.
@@ -1949,12 +2186,33 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
MVT::i64, HighOp, Low32);
}
+// Op is an atomic load. Lower it into a normal volatile load.
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
+ return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
+ Node->getChain(), Node->getBasePtr(),
+ Node->getMemoryVT(), Node->getMemOperand());
+}
+
+// Op is an atomic store. Lower it into a normal volatile store followed
+// by a serialization.
+SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
+ SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
+ Node->getBasePtr(), Node->getMemoryVT(),
+ Node->getMemOperand());
+ return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other,
+ Chain), 0);
+}
+
// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
-SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
- SelectionDAG &DAG,
- unsigned Opcode) const {
- AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
+ SelectionDAG &DAG,
+ unsigned Opcode) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
// 32-bit operations need no code outside the main loop.
EVT NarrowVT = Node->getMemoryVT();
@@ -1972,7 +2230,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
// Convert atomic subtracts of constants into additions.
if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
- if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) {
+ if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
Opcode = SystemZISD::ATOMIC_LOADW_ADD;
Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
}
@@ -2010,7 +2268,6 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
DAG.getConstant(BitSize, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
- array_lengthof(Ops),
NarrowVT, MMO);
// Rotate the result of the final CS so that the field is in the lower
@@ -2020,14 +2277,52 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
- return DAG.getMergeValues(RetOps, 2, DL);
+ return DAG.getMergeValues(RetOps, DL);
+}
+
+// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
+// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
+// operations into additions.
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
+ EVT MemVT = Node->getMemoryVT();
+ if (MemVT == MVT::i32 || MemVT == MVT::i64) {
+ // A full-width operation.
+ assert(Op.getValueType() == MemVT && "Mismatched VTs");
+ SDValue Src2 = Node->getVal();
+ SDValue NegSrc2;
+ SDLoc DL(Src2);
+
+ if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
+ // Use an addition if the operand is constant and either LAA(G) is
+ // available or the negative value is in the range of A(G)FHI.
+ int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
+ if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
+ NegSrc2 = DAG.getConstant(Value, MemVT);
+ } else if (Subtarget.hasInterlockedAccess1())
+ // Use LAA(G) if available.
+ NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT),
+ Src2);
+
+ if (NegSrc2.getNode())
+ return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
+ Node->getChain(), Node->getBasePtr(), NegSrc2,
+ Node->getMemOperand(), Node->getOrdering(),
+ Node->getSynchScope());
+
+ // Use the node as-is.
+ return Op;
+ }
+
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
}
// Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
// into a fullword ATOMIC_CMP_SWAPW operation.
SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
SelectionDAG &DAG) const {
- AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
// We have native support for 32-bit compare and swap.
EVT NarrowVT = Node->getMemoryVT();
@@ -2064,8 +2359,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
NegBitShift, DAG.getConstant(BitSize, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
- VTList, Ops, array_lengthof(Ops),
- NarrowVT, MMO);
+ VTList, Ops, NarrowVT, MMO);
return AtomicOp;
}
@@ -2094,14 +2388,14 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
- MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode());
+ auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
SDValue Ops[] = {
Op.getOperand(0),
DAG.getConstant(Code, MVT::i32),
Op.getOperand(1)
};
return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
- Node->getVTList(), Ops, array_lengthof(Ops),
+ Node->getVTList(), Ops,
Node->getMemoryVT(), Node->getMemOperand());
}
@@ -2143,27 +2437,31 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
case ISD::OR:
return lowerOR(Op, DAG);
case ISD::ATOMIC_SWAP:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
+ case ISD::ATOMIC_STORE:
+ return lowerATOMIC_STORE(Op, DAG);
+ case ISD::ATOMIC_LOAD:
+ return lowerATOMIC_LOAD(Op, DAG);
case ISD::ATOMIC_LOAD_ADD:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
case ISD::ATOMIC_LOAD_SUB:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
+ return lowerATOMIC_LOAD_SUB(Op, DAG);
case ISD::ATOMIC_LOAD_AND:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
case ISD::ATOMIC_LOAD_OR:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
case ISD::ATOMIC_LOAD_XOR:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
case ISD::ATOMIC_LOAD_NAND:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
case ISD::ATOMIC_LOAD_MIN:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
case ISD::ATOMIC_LOAD_MAX:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
case ISD::ATOMIC_LOAD_UMIN:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
case ISD::ATOMIC_LOAD_UMAX:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
case ISD::ATOMIC_CMP_SWAP:
return lowerATOMIC_CMP_SWAP(Op, DAG);
case ISD::STACKSAVE:
@@ -2185,6 +2483,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(SIBCALL);
OPCODE(PCREL_WRAPPER);
OPCODE(PCREL_OFFSET);
+ OPCODE(IABS);
OPCODE(ICMP);
OPCODE(FCMP);
OPCODE(TM);
@@ -2210,6 +2509,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(STPCPY);
OPCODE(SEARCH_STRING);
OPCODE(IPM);
+ OPCODE(SERIALIZE);
OPCODE(ATOMIC_SWAPW);
OPCODE(ATOMIC_LOADW_ADD);
OPCODE(ATOMIC_LOADW_SUB);
@@ -2224,10 +2524,43 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(ATOMIC_CMP_SWAPW);
OPCODE(PREFETCH);
}
- return NULL;
+ return nullptr;
#undef OPCODE
}
+SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned Opcode = N->getOpcode();
+ if (Opcode == ISD::SIGN_EXTEND) {
+ // Convert (sext (ashr (shl X, C1), C2)) to
+ // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
+ // cheap as narrower ones.
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
+ auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ SDValue Inner = N0.getOperand(0);
+ if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
+ if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
+ unsigned Extra = (VT.getSizeInBits() -
+ N0.getValueType().getSizeInBits());
+ unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
+ unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
+ EVT ShiftVT = N0.getOperand(1).getValueType();
+ SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
+ Inner.getOperand(0));
+ SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
+ DAG.getConstant(NewShlAmt, ShiftVT));
+ return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
+ DAG.getConstant(NewSraAmt, ShiftVT));
+ }
+ }
+ }
+ }
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Custom insertion
//===----------------------------------------------------------------------===//
@@ -2236,7 +2569,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
MachineFunction &MF = *MBB->getParent();
MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
- MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
+ MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
return NewMBB;
}
@@ -2246,8 +2579,7 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
return NewMBB;
}
@@ -2281,7 +2613,8 @@ static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
+ const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
+ MBB->getParent()->getTarget().getInstrInfo());
unsigned DestReg = MI->getOperand(0).getReg();
unsigned TrueReg = MI->getOperand(1).getReg();
@@ -2329,7 +2662,8 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned StoreOpcode, unsigned STOCOpcode,
bool Invert) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
+ const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
+ MBB->getParent()->getTarget().getInstrInfo());
unsigned SrcReg = MI->getOperand(0).getReg();
MachineOperand Base = MI->getOperand(1);
@@ -2344,7 +2678,7 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
// Use STOCOpcode if possible. We could use different store patterns in
// order to avoid matching the index register, but the performance trade-offs
// might be more complicated in that case.
- if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
if (Invert)
CCMask ^= CCValid;
BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
@@ -2396,8 +2730,9 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
unsigned BinOpcode,
unsigned BitSize,
bool Invert) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
@@ -2519,8 +2854,9 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
unsigned CompareOpcode,
unsigned KeepOldMask,
unsigned BitSize) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
@@ -2630,8 +2966,9 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
MachineBasicBlock *
SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the operands. Base can be a register or a frame index.
@@ -2746,8 +3083,9 @@ MachineBasicBlock *
SystemZTargetLowering::emitExt128(MachineInstr *MI,
MachineBasicBlock *MBB,
bool ClearEven, unsigned SubReg) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -2777,8 +3115,9 @@ MachineBasicBlock *
SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -2791,7 +3130,7 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
// When generating more than one CLC, all but the last will need to
// branch to the end when a difference is found.
MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
- splitBlockAfter(MI, MBB) : 0);
+ splitBlockAfter(MI, MBB) : nullptr);
// Check for the loop form, in which operand 5 is the trip count.
if (MI->getNumExplicitOperands() > 5) {
@@ -2946,8 +3285,9 @@ MachineBasicBlock *
SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index c6dcca6..e21b050 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -22,232 +22,233 @@
namespace llvm {
namespace SystemZISD {
- enum {
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- // Return with a flag operand. Operand 0 is the chain operand.
- RET_FLAG,
-
- // Calls a function. Operand 0 is the chain operand and operand 1
- // is the target address. The arguments start at operand 2.
- // There is an optional glue operand at the end.
- CALL,
- SIBCALL,
-
- // Wraps a TargetGlobalAddress that should be loaded using PC-relative
- // accesses (LARL). Operand 0 is the address.
- PCREL_WRAPPER,
-
- // Used in cases where an offset is applied to a TargetGlobalAddress.
- // Operand 0 is the full TargetGlobalAddress and operand 1 is a
- // PCREL_WRAPPER for an anchor point. This is used so that we can
- // cheaply refer to either the full address or the anchor point
- // as a register base.
- PCREL_OFFSET,
-
- // Integer comparisons. There are three operands: the two values
- // to compare, and an integer of type SystemZICMP.
- ICMP,
-
- // Floating-point comparisons. The two operands are the values to compare.
- FCMP,
-
- // Test under mask. The first operand is ANDed with the second operand
- // and the condition codes are set on the result. The third operand is
- // a boolean that is true if the condition codes need to distinguish
- // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
- // register forms do but the memory forms don't).
- TM,
-
- // Branches if a condition is true. Operand 0 is the chain operand;
- // operand 1 is the 4-bit condition-code mask, with bit N in
- // big-endian order meaning "branch if CC=N"; operand 2 is the
- // target block and operand 3 is the flag operand.
- BR_CCMASK,
-
- // Selects between operand 0 and operand 1. Operand 2 is the
- // mask of condition-code values for which operand 0 should be
- // chosen over operand 1; it has the same form as BR_CCMASK.
- // Operand 3 is the flag operand.
- SELECT_CCMASK,
-
- // Evaluates to the gap between the stack pointer and the
- // base of the dynamically-allocatable area.
- ADJDYNALLOC,
-
- // Extracts the value of a 32-bit access register. Operand 0 is
- // the number of the register.
- EXTRACT_ACCESS,
-
- // Wrappers around the ISD opcodes of the same name. The output and
- // first input operands are GR128s. The trailing numbers are the
- // widths of the second operand in bits.
- UMUL_LOHI64,
- SDIVREM32,
- SDIVREM64,
- UDIVREM32,
- UDIVREM64,
-
- // Use a series of MVCs to copy bytes from one memory location to another.
- // The operands are:
- // - the target address
- // - the source address
- // - the constant length
- //
- // This isn't a memory opcode because we'd need to attach two
- // MachineMemOperands rather than one.
- MVC,
-
- // Like MVC, but implemented as a loop that handles X*256 bytes
- // followed by straight-line code to handle the rest (if any).
- // The value of X is passed as an additional operand.
- MVC_LOOP,
-
- // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
- NC,
- NC_LOOP,
- OC,
- OC_LOOP,
- XC,
- XC_LOOP,
-
- // Use CLC to compare two blocks of memory, with the same comments
- // as for MVC and MVC_LOOP.
- CLC,
- CLC_LOOP,
-
- // Use an MVST-based sequence to implement stpcpy().
- STPCPY,
-
- // Use a CLST-based sequence to implement strcmp(). The two input operands
- // are the addresses of the strings to compare.
- STRCMP,
-
- // Use an SRST-based sequence to search a block of memory. The first
- // operand is the end address, the second is the start, and the third
- // is the character to search for. CC is set to 1 on success and 2
- // on failure.
- SEARCH_STRING,
-
- // Store the CC value in bits 29 and 28 of an integer.
- IPM,
-
- // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
- // ATOMIC_LOAD_<op>.
- //
- // Operand 0: the address of the containing 32-bit-aligned field
- // Operand 1: the second operand of <op>, in the high bits of an i32
- // for everything except ATOMIC_SWAPW
- // Operand 2: how many bits to rotate the i32 left to bring the first
- // operand into the high bits
- // Operand 3: the negative of operand 2, for rotating the other way
- // Operand 4: the width of the field in bits (8 or 16)
- ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
- ATOMIC_LOADW_ADD,
- ATOMIC_LOADW_SUB,
- ATOMIC_LOADW_AND,
- ATOMIC_LOADW_OR,
- ATOMIC_LOADW_XOR,
- ATOMIC_LOADW_NAND,
- ATOMIC_LOADW_MIN,
- ATOMIC_LOADW_MAX,
- ATOMIC_LOADW_UMIN,
- ATOMIC_LOADW_UMAX,
-
- // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
- //
- // Operand 0: the address of the containing 32-bit-aligned field
- // Operand 1: the compare value, in the low bits of an i32
- // Operand 2: the swap value, in the low bits of an i32
- // Operand 3: how many bits to rotate the i32 left to bring the first
- // operand into the high bits
- // Operand 4: the negative of operand 2, for rotating the other way
- // Operand 5: the width of the field in bits (8 or 16)
- ATOMIC_CMP_SWAPW,
-
- // Prefetch from the second operand using the 4-bit control code in
- // the first operand. The code is 1 for a load prefetch and 2 for
- // a store prefetch.
- PREFETCH
- };
-
- // Return true if OPCODE is some kind of PC-relative address.
- inline bool isPCREL(unsigned Opcode) {
- return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
- }
+enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+ // Return with a flag operand. Operand 0 is the chain operand.
+ RET_FLAG,
+
+ // Calls a function. Operand 0 is the chain operand and operand 1
+ // is the target address. The arguments start at operand 2.
+ // There is an optional glue operand at the end.
+ CALL,
+ SIBCALL,
+
+ // Wraps a TargetGlobalAddress that should be loaded using PC-relative
+ // accesses (LARL). Operand 0 is the address.
+ PCREL_WRAPPER,
+
+ // Used in cases where an offset is applied to a TargetGlobalAddress.
+ // Operand 0 is the full TargetGlobalAddress and operand 1 is a
+ // PCREL_WRAPPER for an anchor point. This is used so that we can
+ // cheaply refer to either the full address or the anchor point
+ // as a register base.
+ PCREL_OFFSET,
+
+ // Integer absolute.
+ IABS,
+
+ // Integer comparisons. There are three operands: the two values
+ // to compare, and an integer of type SystemZICMP.
+ ICMP,
+
+ // Floating-point comparisons. The two operands are the values to compare.
+ FCMP,
+
+ // Test under mask. The first operand is ANDed with the second operand
+ // and the condition codes are set on the result. The third operand is
+ // a boolean that is true if the condition codes need to distinguish
+ // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
+ // register forms do but the memory forms don't).
+ TM,
+
+ // Branches if a condition is true. Operand 0 is the chain operand;
+ // operand 1 is the 4-bit condition-code mask, with bit N in
+ // big-endian order meaning "branch if CC=N"; operand 2 is the
+ // target block and operand 3 is the flag operand.
+ BR_CCMASK,
+
+ // Selects between operand 0 and operand 1. Operand 2 is the
+ // mask of condition-code values for which operand 0 should be
+ // chosen over operand 1; it has the same form as BR_CCMASK.
+ // Operand 3 is the flag operand.
+ SELECT_CCMASK,
+
+ // Evaluates to the gap between the stack pointer and the
+ // base of the dynamically-allocatable area.
+ ADJDYNALLOC,
+
+ // Extracts the value of a 32-bit access register. Operand 0 is
+ // the number of the register.
+ EXTRACT_ACCESS,
+
+ // Wrappers around the ISD opcodes of the same name. The output and
+ // first input operands are GR128s. The trailing numbers are the
+ // widths of the second operand in bits.
+ UMUL_LOHI64,
+ SDIVREM32,
+ SDIVREM64,
+ UDIVREM32,
+ UDIVREM64,
+
+ // Use a series of MVCs to copy bytes from one memory location to another.
+ // The operands are:
+ // - the target address
+ // - the source address
+ // - the constant length
+ //
+ // This isn't a memory opcode because we'd need to attach two
+ // MachineMemOperands rather than one.
+ MVC,
+
+ // Like MVC, but implemented as a loop that handles X*256 bytes
+ // followed by straight-line code to handle the rest (if any).
+ // The value of X is passed as an additional operand.
+ MVC_LOOP,
+
+ // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
+ NC,
+ NC_LOOP,
+ OC,
+ OC_LOOP,
+ XC,
+ XC_LOOP,
+
+ // Use CLC to compare two blocks of memory, with the same comments
+ // as for MVC and MVC_LOOP.
+ CLC,
+ CLC_LOOP,
+
+ // Use an MVST-based sequence to implement stpcpy().
+ STPCPY,
+
+ // Use a CLST-based sequence to implement strcmp(). The two input operands
+ // are the addresses of the strings to compare.
+ STRCMP,
+
+ // Use an SRST-based sequence to search a block of memory. The first
+ // operand is the end address, the second is the start, and the third
+ // is the character to search for. CC is set to 1 on success and 2
+ // on failure.
+ SEARCH_STRING,
+
+ // Store the CC value in bits 29 and 28 of an integer.
+ IPM,
+
+ // Perform a serialization operation. (BCR 15,0 or BCR 14,0.)
+ SERIALIZE,
+
+ // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
+ // ATOMIC_LOAD_<op>.
+ //
+ // Operand 0: the address of the containing 32-bit-aligned field
+ // Operand 1: the second operand of <op>, in the high bits of an i32
+ // for everything except ATOMIC_SWAPW
+ // Operand 2: how many bits to rotate the i32 left to bring the first
+ // operand into the high bits
+ // Operand 3: the negative of operand 2, for rotating the other way
+ // Operand 4: the width of the field in bits (8 or 16)
+ ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ ATOMIC_LOADW_ADD,
+ ATOMIC_LOADW_SUB,
+ ATOMIC_LOADW_AND,
+ ATOMIC_LOADW_OR,
+ ATOMIC_LOADW_XOR,
+ ATOMIC_LOADW_NAND,
+ ATOMIC_LOADW_MIN,
+ ATOMIC_LOADW_MAX,
+ ATOMIC_LOADW_UMIN,
+ ATOMIC_LOADW_UMAX,
+
+ // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
+ //
+ // Operand 0: the address of the containing 32-bit-aligned field
+ // Operand 1: the compare value, in the low bits of an i32
+ // Operand 2: the swap value, in the low bits of an i32
+ // Operand 3: how many bits to rotate the i32 left to bring the first
+ // operand into the high bits
+ // Operand 4: the negative of operand 2, for rotating the other way
+ // Operand 5: the width of the field in bits (8 or 16)
+ ATOMIC_CMP_SWAPW,
+
+ // Prefetch from the second operand using the 4-bit control code in
+ // the first operand. The code is 1 for a load prefetch and 2 for
+ // a store prefetch.
+ PREFETCH
+};
+
+// Return true if OPCODE is some kind of PC-relative address.
+inline bool isPCREL(unsigned Opcode) {
+ return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
}
+} // end namespace SystemZISD
namespace SystemZICMP {
- // Describes whether an integer comparison needs to be signed or unsigned,
- // or whether either type is OK.
- enum {
- Any,
- UnsignedOnly,
- SignedOnly
- };
-}
+// Describes whether an integer comparison needs to be signed or unsigned,
+// or whether either type is OK.
+enum {
+ Any,
+ UnsignedOnly,
+ SignedOnly
+};
+} // end namespace SystemZICMP
class SystemZSubtarget;
class SystemZTargetMachine;
class SystemZTargetLowering : public TargetLowering {
public:
- explicit SystemZTargetLowering(SystemZTargetMachine &TM);
+ explicit SystemZTargetLowering(const TargetMachine &TM);
// Override TargetLowering.
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const LLVM_OVERRIDE {
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override {
return MVT::i32;
}
- virtual EVT getSetCCResultType(LLVMContext &, EVT) const LLVM_OVERRIDE;
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const LLVM_OVERRIDE;
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const LLVM_OVERRIDE;
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
- LLVM_OVERRIDE;
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
- LLVM_OVERRIDE;
- virtual bool isTruncateFree(Type *, Type *) const LLVM_OVERRIDE;
- virtual bool isTruncateFree(EVT, EVT) const LLVM_OVERRIDE;
- virtual const char *getTargetNodeName(unsigned Opcode) const LLVM_OVERRIDE;
- virtual std::pair<unsigned, const TargetRegisterClass *>
+ EVT getSetCCResultType(LLVMContext &, EVT) const override;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
+ bool *Fast) const override;
+ bool isTruncateFree(Type *, Type *) const override;
+ bool isTruncateFree(EVT, EVT) const override;
+ const char *getTargetNodeName(unsigned Opcode) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const LLVM_OVERRIDE;
- virtual TargetLowering::ConstraintType
- getConstraintType(const std::string &Constraint) const LLVM_OVERRIDE;
- virtual TargetLowering::ConstraintWeight
+ MVT VT) const override;
+ TargetLowering::ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
+ TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo &info,
- const char *constraint) const LLVM_OVERRIDE;
- virtual void
- LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const LLVM_OVERRIDE;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const LLVM_OVERRIDE;
- virtual SDValue LowerOperation(SDValue Op,
- SelectionDAG &DAG) const LLVM_OVERRIDE;
- virtual bool allowTruncateForTailCall(Type *, Type *) const LLVM_OVERRIDE;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const LLVM_OVERRIDE;
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
- virtual SDValue
- LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE;
+ const char *constraint) const override;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+ MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const
+ override;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ bool allowTruncateForTailCall(Type *, Type *) const override;
+ bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const override;
+ SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
+ SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
private:
const SystemZSubtarget &Subtarget;
- const SystemZTargetMachine &TM;
// Implement LowerOperation for individual opcodes.
SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
@@ -270,9 +271,13 @@ private:
SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG,
- unsigned Opcode) const;
+ SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG,
+ unsigned Opcode) const;
+ SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerLOAD_SEQUENCE_POINT(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
index fb699b9..84196e9 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
@@ -43,6 +43,6 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI) {
return MIB.addFrameIndex(FI).addImm(Offset).addReg(0).addMemOperand(MMO);
}
-} // End llvm namespace
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
index 6080046..e8841e1 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -46,9 +46,9 @@ let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
defm LTDBR : LoadAndTestRRE<"ltdb", 0xB312, FP64>;
defm LTXBR : LoadAndTestRRE<"ltxb", 0xB342, FP128>;
}
-def : CompareZeroFP<LTEBRCompare, FP32>;
-def : CompareZeroFP<LTDBRCompare, FP64>;
-def : CompareZeroFP<LTXBRCompare, FP128>;
+defm : CompareZeroFP<LTEBRCompare, FP32>;
+defm : CompareZeroFP<LTDBRCompare, FP64>;
+defm : CompareZeroFP<LTXBRCompare, FP128>;
// Moves between 64-bit integer and floating-point registers.
def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>;
@@ -133,6 +133,13 @@ def LEDBR : UnaryRRE<"ledb", 0xB344, fround, FP32, FP64>;
def LEXBR : UnaryRRE<"lexb", 0xB346, null_frag, FP128, FP128>;
def LDXBR : UnaryRRE<"ldxb", 0xB345, null_frag, FP128, FP128>;
+def LEDBRA : UnaryRRF4<"ledbra", 0xB344, FP32, FP64>,
+ Requires<[FeatureFPExtension]>;
+def LEXBRA : UnaryRRF4<"lexbra", 0xB346, FP128, FP128>,
+ Requires<[FeatureFPExtension]>;
+def LDXBRA : UnaryRRF4<"ldxbra", 0xB345, FP128, FP128>,
+ Requires<[FeatureFPExtension]>;
+
def : Pat<(f32 (fround FP128:$src)),
(EXTRACT_SUBREG (LEXBR FP128:$src), subreg_hh32)>;
def : Pat<(f64 (fround FP128:$src)),
@@ -157,6 +164,25 @@ def CEGBR : UnaryRRE<"cegb", 0xB3A4, sint_to_fp, FP32, GR64>;
def CDGBR : UnaryRRE<"cdgb", 0xB3A5, sint_to_fp, FP64, GR64>;
def CXGBR : UnaryRRE<"cxgb", 0xB3A6, sint_to_fp, FP128, GR64>;
+// Convert am unsigned integer register value to a floating-point one.
+let Predicates = [FeatureFPExtension] in {
+ def CELFBR : UnaryRRF4<"celfbr", 0xB390, FP32, GR32>;
+ def CDLFBR : UnaryRRF4<"cdlfbr", 0xB391, FP64, GR32>;
+ def CXLFBR : UnaryRRF4<"cxlfbr", 0xB392, FP128, GR32>;
+
+ def CELGBR : UnaryRRF4<"celgbr", 0xB3A0, FP32, GR64>;
+ def CDLGBR : UnaryRRF4<"cdlgbr", 0xB3A1, FP64, GR64>;
+ def CXLGBR : UnaryRRF4<"cxlgbr", 0xB3A2, FP128, GR64>;
+
+ def : Pat<(f32 (uint_to_fp GR32:$src)), (CELFBR 0, GR32:$src, 0)>;
+ def : Pat<(f64 (uint_to_fp GR32:$src)), (CDLFBR 0, GR32:$src, 0)>;
+ def : Pat<(f128 (uint_to_fp GR32:$src)), (CXLFBR 0, GR32:$src, 0)>;
+
+ def : Pat<(f32 (uint_to_fp GR64:$src)), (CELGBR 0, GR64:$src, 0)>;
+ def : Pat<(f64 (uint_to_fp GR64:$src)), (CDLGBR 0, GR64:$src, 0)>;
+ def : Pat<(f128 (uint_to_fp GR64:$src)), (CXLGBR 0, GR64:$src, 0)>;
+}
+
// Convert a floating-point register value to a signed integer value,
// with the second operand (modifier M3) specifying the rounding mode.
let Defs = [CC] in {
@@ -178,6 +204,28 @@ def : Pat<(i64 (fp_to_sint FP32:$src)), (CGEBR 5, FP32:$src)>;
def : Pat<(i64 (fp_to_sint FP64:$src)), (CGDBR 5, FP64:$src)>;
def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>;
+// Convert a floating-point register value to an unsigned integer value.
+let Predicates = [FeatureFPExtension] in {
+ let Defs = [CC] in {
+ def CLFEBR : UnaryRRF4<"clfebr", 0xB39C, GR32, FP32>;
+ def CLFDBR : UnaryRRF4<"clfdbr", 0xB39D, GR32, FP64>;
+ def CLFXBR : UnaryRRF4<"clfxbr", 0xB39E, GR32, FP128>;
+
+ def CLGEBR : UnaryRRF4<"clgebr", 0xB3AC, GR64, FP32>;
+ def CLGDBR : UnaryRRF4<"clgdbr", 0xB3AD, GR64, FP64>;
+ def CLGXBR : UnaryRRF4<"clgxbr", 0xB3AE, GR64, FP128>;
+ }
+
+ def : Pat<(i32 (fp_to_uint FP32:$src)), (CLFEBR 5, FP32:$src, 0)>;
+ def : Pat<(i32 (fp_to_uint FP64:$src)), (CLFDBR 5, FP64:$src, 0)>;
+ def : Pat<(i32 (fp_to_uint FP128:$src)), (CLFXBR 5, FP128:$src, 0)>;
+
+ def : Pat<(i64 (fp_to_uint FP32:$src)), (CLGEBR 5, FP32:$src, 0)>;
+ def : Pat<(i64 (fp_to_uint FP64:$src)), (CLGDBR 5, FP64:$src, 0)>;
+ def : Pat<(i64 (fp_to_uint FP128:$src)), (CLGXBR 5, FP128:$src, 0)>;
+}
+
+
//===----------------------------------------------------------------------===//
// Unary arithmetic
//===----------------------------------------------------------------------===//
@@ -217,15 +265,6 @@ def FIEBR : UnaryRRF<"fieb", 0xB357, FP32, FP32>;
def FIDBR : UnaryRRF<"fidb", 0xB35F, FP64, FP64>;
def FIXBR : UnaryRRF<"fixb", 0xB347, FP128, FP128>;
-// Extended forms of the previous three instructions. M4 can be set to 4
-// to suppress detection of inexact conditions.
-def FIEBRA : UnaryRRF4<"fiebra", 0xB357, FP32, FP32>,
- Requires<[FeatureFPExtension]>;
-def FIDBRA : UnaryRRF4<"fidbra", 0xB35F, FP64, FP64>,
- Requires<[FeatureFPExtension]>;
-def FIXBRA : UnaryRRF4<"fixbra", 0xB347, FP128, FP128>,
- Requires<[FeatureFPExtension]>;
-
// frint rounds according to the current mode (modifier 0) and detects
// inexact conditions.
def : Pat<(frint FP32:$src), (FIEBR 0, FP32:$src)>;
@@ -233,6 +272,12 @@ def : Pat<(frint FP64:$src), (FIDBR 0, FP64:$src)>;
def : Pat<(frint FP128:$src), (FIXBR 0, FP128:$src)>;
let Predicates = [FeatureFPExtension] in {
+ // Extended forms of the FIxBR instructions. M4 can be set to 4
+ // to suppress detection of inexact conditions.
+ def FIEBRA : UnaryRRF4<"fiebra", 0xB357, FP32, FP32>;
+ def FIDBRA : UnaryRRF4<"fidbra", 0xB35F, FP64, FP64>;
+ def FIXBRA : UnaryRRF4<"fixbra", 0xB347, FP128, FP128>;
+
// fnearbyint is like frint but does not detect inexact conditions.
def : Pat<(fnearbyint FP32:$src), (FIEBRA 0, FP32:$src, 4)>;
def : Pat<(fnearbyint FP64:$src), (FIDBRA 0, FP64:$src, 4)>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
index a8efe16..9f59a1c 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -511,30 +511,24 @@ class InstSS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
// to store. Other stored registers are added as implicit uses.
//
// Unary:
-// One register output operand and one input operand. The input
-// operand may be a register, immediate or memory.
+// One register output operand and one input operand.
//
// Binary:
-// One register output operand and two input operands. The first
-// input operand is always a register and he second may be a register,
-// immediate or memory.
-//
-// Shift:
-// One register output operand and two input operands. The first
-// input operand is a register and the second has the same form as
-// an address (although it isn't actually used to address memory).
+// One register output operand and two input operands.
//
// Compare:
-// Two input operands. The first operand is always a register,
-// the second may be a register, immediate or memory.
+// Two input operands and an implicit CC output operand.
//
// Ternary:
-// One register output operand and three register input operands.
+// One register output operand and three input operands.
+//
+// LoadAndOp:
+// One output operand and two input operands, one of which is an address.
+// The instruction both reads from and writes to the address.
//
// CmpSwap:
-// One output operand and three input operands. The first two
-// operands are registers and the third is an address. The instruction
-// both reads from and writes to the address.
+// One output operand and three input operands, one of which is an address.
+// The instruction both reads from and writes to the address.
//
// RotateSelect:
// One output operand and five input operands. The first two operands
@@ -687,7 +681,7 @@ class CondStoreRSY<string mnemonic, bits<16> opcode,
class AsmCondStoreRSY<string mnemonic, bits<16> opcode,
RegisterOperand cls, bits<5> bytes,
AddressingMode mode = bdaddr20only>
- : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, uimm8zx4:$R3),
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, imm32zx4:$R3),
mnemonic#"\t$R1, $BD2, $R3", []>,
Requires<[FeatureLoadStoreOnCond]> {
let mayStore = 1;
@@ -726,7 +720,7 @@ class UnaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2),
+ : InstRRF<opcode, (outs cls1:$R1), (ins imm32zx4:$R3, cls2:$R2),
mnemonic#"r\t$R1, $R3, $R2", []> {
let OpKey = mnemonic ## cls1;
let OpType = "reg";
@@ -735,7 +729,7 @@ class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
class UnaryRRF4<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2, uimm8zx4:$R4),
+ : InstRRF<opcode, (outs cls1:$R1), (ins imm32zx4:$R3, cls2:$R2, imm32zx4:$R4),
mnemonic#"\t$R1, $R3, $R2, $R4", []>;
// These instructions are generated by if conversion. The old value of R1
@@ -753,7 +747,7 @@ class CondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
// mask is the third operand rather than being part of the mnemonic.
class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, uimm8zx4:$R3),
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, imm32zx4:$R3),
mnemonic#"r\t$R1, $R2, $R3", []>,
Requires<[FeatureLoadStoreOnCond]> {
let Constraints = "$R1 = $R1src";
@@ -819,7 +813,7 @@ class CondUnaryRSY<string mnemonic, bits<16> opcode,
class AsmCondUnaryRSY<string mnemonic, bits<16> opcode,
RegisterOperand cls, bits<5> bytes,
AddressingMode mode = bdaddr20only>
- : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, uimm8zx4:$R3),
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, imm32zx4:$R3),
mnemonic#"\t$R1, $BD2, $R3", []>,
Requires<[FeatureLoadStoreOnCond]> {
let mayLoad = 1;
@@ -989,6 +983,33 @@ class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
let DisableEncoding = "$R1src";
}
+class BinaryRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2),
+ mnemonic#"\t$R1, $BD2",
+ [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> {
+ let R3 = 0;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>;
+
+multiclass BinaryRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRSY<mnemonic##"k", opcode2, null_frag, cls>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRS<mnemonic, opcode1, operator, cls>;
+ }
+}
+
class BinaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
AddressingMode mode = bdxaddr12only>
@@ -1073,33 +1094,6 @@ multiclass BinarySIPair<string mnemonic, bits<8> siOpcode,
}
}
-class ShiftRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls>
- : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2),
- mnemonic#"\t$R1, $BD2",
- [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> {
- let R3 = 0;
- let Constraints = "$R1 = $R1src";
- let DisableEncoding = "$R1src";
-}
-
-class ShiftRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls>
- : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2),
- mnemonic#"\t$R1, $R3, $BD2",
- [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>;
-
-multiclass ShiftRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
- SDPatternOperator operator, RegisterOperand cls> {
- let NumOpsKey = mnemonic in {
- let NumOpsValue = "3" in
- def K : ShiftRSY<mnemonic##"k", opcode2, null_frag, cls>,
- Requires<[FeatureDistinctOps]>;
- let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
- def "" : ShiftRS<mnemonic, opcode1, operator, cls>;
- }
-}
-
class CompareRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
: InstRR<opcode, (outs), (ins cls1:$R1, cls2:$R2),
@@ -1267,6 +1261,15 @@ class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
let AccessBytes = bytes;
}
+class LoadAndOpRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator mode:$BD2, cls:$R3))]> {
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
class CmpSwapRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls, AddressingMode mode = bdaddr12only>
: InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2),
@@ -1302,22 +1305,23 @@ multiclass CmpSwapRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode,
class RotateSelectRIEf<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
: InstRIEf<opcode, (outs cls1:$R1),
- (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ (ins cls1:$R1src, cls2:$R2, imm32zx8:$I3, imm32zx8:$I4,
+ imm32zx6:$I5),
mnemonic#"\t$R1, $R2, $I3, $I4, $I5", []> {
let Constraints = "$R1 = $R1src";
let DisableEncoding = "$R1src";
}
class PrefetchRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator>
- : InstRXY<opcode, (outs), (ins uimm8zx4:$R1, bdxaddr20only:$XBD2),
+ : InstRXY<opcode, (outs), (ins imm32zx4:$R1, bdxaddr20only:$XBD2),
mnemonic##"\t$R1, $XBD2",
- [(operator uimm8zx4:$R1, bdxaddr20only:$XBD2)]>;
+ [(operator imm32zx4:$R1, bdxaddr20only:$XBD2)]>;
class PrefetchRILPC<string mnemonic, bits<12> opcode,
SDPatternOperator operator>
- : InstRIL<opcode, (outs), (ins uimm8zx4:$R1, pcrel32:$I2),
+ : InstRIL<opcode, (outs), (ins imm32zx4:$R1, pcrel32:$I2),
mnemonic##"\t$R1, $I2",
- [(operator uimm8zx4:$R1, pcrel32:$I2)]> {
+ [(operator imm32zx4:$R1, pcrel32:$I2)]> {
// We want PC-relative addresses to be tried ahead of BD and BDX addresses.
// However, BDXs have two extra operands and are therefore 6 units more
// complex.
@@ -1437,7 +1441,8 @@ class StoreRXYPseudo<SDPatternOperator operator, RegisterOperand cls,
// of registers.
class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2>
: Pseudo<(outs cls1:$R1),
- (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ (ins cls1:$R1src, cls2:$R2, imm32zx8:$I3, imm32zx8:$I4,
+ imm32zx6:$I5),
[]> {
let Constraints = "$R1 = $R1src";
let DisableEncoding = "$R1src";
@@ -1447,9 +1452,9 @@ class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2>
// the value of the PSW's 2-bit condition code field.
class SelectWrapper<RegisterOperand cls>
: Pseudo<(outs cls:$dst),
- (ins cls:$src1, cls:$src2, uimm8zx4:$valid, uimm8zx4:$cc),
+ (ins cls:$src1, cls:$src2, imm32zx4:$valid, imm32zx4:$cc),
[(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2,
- uimm8zx4:$valid, uimm8zx4:$cc))]> {
+ imm32zx4:$valid, imm32zx4:$cc))]> {
let usesCustomInserter = 1;
// Although the instructions used by these nodes do not in themselves
// change CC, the insertion requires new blocks, and CC cannot be live
@@ -1463,14 +1468,14 @@ multiclass CondStores<RegisterOperand cls, SDPatternOperator store,
SDPatternOperator load, AddressingMode mode> {
let Defs = [CC], Uses = [CC], usesCustomInserter = 1 in {
def "" : Pseudo<(outs),
- (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ (ins cls:$new, mode:$addr, imm32zx4:$valid, imm32zx4:$cc),
[(store (z_select_ccmask cls:$new, (load mode:$addr),
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr)]>;
def Inv : Pseudo<(outs),
- (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ (ins cls:$new, mode:$addr, imm32zx4:$valid, imm32zx4:$cc),
[(store (z_select_ccmask (load mode:$addr), cls:$new,
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr)]>;
}
}
@@ -1598,6 +1603,7 @@ class CompareAliasRI<SDPatternOperator operator, RegisterOperand cls,
// An alias of a RotateSelectRIEf, but with different register sizes.
class RotateSelectAliasRIEf<RegisterOperand cls1, RegisterOperand cls2>
: Alias<6, (outs cls1:$R1),
- (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5), []> {
+ (ins cls1:$R1src, cls2:$R2, imm32zx8:$I3, imm32zx8:$I4,
+ imm32zx6:$I5), []> {
let Constraints = "$R1 = $R1src";
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index acfeed8..f58ab47 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -12,17 +12,17 @@
//===----------------------------------------------------------------------===//
#include "SystemZInstrInfo.h"
-#include "SystemZTargetMachine.h"
#include "SystemZInstrBuilder.h"
+#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRMAP_INFO
#include "SystemZGenInstrInfo.inc"
-using namespace llvm;
-
// Return a mask with Count low bits set.
static uint64_t allOnes(unsigned int Count) {
return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
@@ -40,9 +40,9 @@ static bool isHighReg(unsigned int Reg) {
// Pin the vtable to this file.
void SystemZInstrInfo::anchor() {}
-SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
+SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
: SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
- RI(tm), TM(tm) {
+ RI(), STI(sti) {
}
// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
@@ -53,7 +53,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
MachineFunction &MF = *MBB->getParent();
// Get two load or store instructions. Use the original instruction for one
- // of them (arbitarily the second here) and create a clone for the other.
+ // of them (arbitrarily the second here) and create a clone for the other.
MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
MBB->insert(MI, EarlierMI);
@@ -280,15 +280,15 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// If the block has any instructions after a JMP, delete them.
- while (llvm::next(I) != MBB.end())
- llvm::next(I)->eraseFromParent();
+ while (std::next(I) != MBB.end())
+ std::next(I)->eraseFromParent();
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
- TBB = 0;
+ TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
continue;
@@ -418,7 +418,7 @@ bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
static MachineInstr *getDef(unsigned Reg,
const MachineRegisterInfo *MRI) {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return 0;
+ return nullptr;
return MRI->getUniqueVRegDef(Reg);
}
@@ -442,7 +442,7 @@ static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI) {
- MachineInstr *LGFR = 0;
+ MachineInstr *LGFR = nullptr;
MachineInstr *RLL = getDef(SrcReg, MRI);
if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
LGFR = RLL;
@@ -488,7 +488,7 @@ SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
if (Value == 0 &&
!IsLogical &&
- removeIPMBasedCompare(Compare, SrcReg, MRI, TM.getRegisterInfo()))
+ removeIPMBasedCompare(Compare, SrcReg, MRI, &RI))
return true;
return false;
}
@@ -505,7 +505,7 @@ static unsigned getConditionalMove(unsigned Opcode) {
bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
unsigned Opcode = MI->getOpcode();
- if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
+ if (STI.hasLoadStoreOnCond() &&
getConditionalMove(Opcode))
return true;
return false;
@@ -537,12 +537,12 @@ PredicateInstruction(MachineInstr *MI,
unsigned CCMask = Pred[1].getImm();
assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
unsigned Opcode = MI->getOpcode();
- if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (STI.hasLoadStoreOnCond()) {
if (unsigned CondOpcode = getConditionalMove(Opcode)) {
MI->setDesc(get(CondOpcode));
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
.addImm(CCValid).addImm(CCMask)
- .addReg(SystemZ::CC, RegState::Implicit);;
+ .addReg(SystemZ::CC, RegState::Implicit);
return true;
}
}
@@ -628,16 +628,16 @@ static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
}
namespace {
- struct LogicOp {
- LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
- LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
- : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
+struct LogicOp {
+ LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
+ LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
+ : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
- operator bool() const { return RegSize; }
+ operator bool() const { return RegSize; }
- unsigned RegSize, ImmLSB, ImmSize;
- };
-}
+ unsigned RegSize, ImmLSB, ImmSize;
+};
+} // end anonymous namespace
static LogicOp interpretAndImmediate(unsigned Opcode) {
switch (Opcode) {
@@ -685,7 +685,7 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// We prefer to keep the two-operand form where possible both
// because it tends to be shorter and because some instructions
// have memory forms that can be used during spilling.
- if (TM.getSubtargetImpl()->hasDistinctOps()) {
+ if (STI.hasDistinctOps()) {
MachineOperand &Dest = MI->getOperand(0);
MachineOperand &Src = MI->getOperand(1);
unsigned DestReg = Dest.getReg();
@@ -740,7 +740,7 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return finishConvertToThreeAddress(MI, MIB, LV);
}
}
- return 0;
+ return nullptr;
}
MachineInstr *
@@ -761,12 +761,12 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
.addFrameIndex(FrameIndex).addImm(0)
.addImm(MI->getOperand(2).getImm());
}
- return 0;
+ return nullptr;
}
// All other cases require a single operand.
if (Ops.size() != 1)
- return 0;
+ return nullptr;
unsigned OpNum = Ops[0];
assert(Size == MF.getRegInfo()
@@ -858,14 +858,14 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
}
}
- return 0;
+ return nullptr;
}
MachineInstr *
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
- return 0;
+ return nullptr;
}
bool
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index be4c8fe..83009cb 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -26,93 +26,94 @@ namespace llvm {
class SystemZTargetMachine;
namespace SystemZII {
- enum {
- // See comments in SystemZInstrFormats.td.
- SimpleBDXLoad = (1 << 0),
- SimpleBDXStore = (1 << 1),
- Has20BitOffset = (1 << 2),
- HasIndex = (1 << 3),
- Is128Bit = (1 << 4),
- AccessSizeMask = (31 << 5),
- AccessSizeShift = 5,
- CCValuesMask = (15 << 10),
- CCValuesShift = 10,
- CompareZeroCCMaskMask = (15 << 14),
- CompareZeroCCMaskShift = 14,
- CCMaskFirst = (1 << 18),
- CCMaskLast = (1 << 19),
- IsLogical = (1 << 20)
- };
- static inline unsigned getAccessSize(unsigned int Flags) {
- return (Flags & AccessSizeMask) >> AccessSizeShift;
- }
- static inline unsigned getCCValues(unsigned int Flags) {
- return (Flags & CCValuesMask) >> CCValuesShift;
- }
- static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
- return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
- }
-
- // SystemZ MachineOperand target flags.
- enum {
- // Masks out the bits for the access model.
- MO_SYMBOL_MODIFIER = (1 << 0),
-
- // @GOT (aka @GOTENT)
- MO_GOT = (1 << 0)
- };
- // Classifies a branch.
- enum BranchType {
- // An instruction that branches on the current value of CC.
- BranchNormal,
-
- // An instruction that peforms a 32-bit signed comparison and branches
- // on the result.
- BranchC,
-
- // An instruction that peforms a 32-bit unsigned comparison and branches
- // on the result.
- BranchCL,
-
- // An instruction that peforms a 64-bit signed comparison and branches
- // on the result.
- BranchCG,
-
- // An instruction that peforms a 64-bit unsigned comparison and branches
- // on the result.
- BranchCLG,
+enum {
+ // See comments in SystemZInstrFormats.td.
+ SimpleBDXLoad = (1 << 0),
+ SimpleBDXStore = (1 << 1),
+ Has20BitOffset = (1 << 2),
+ HasIndex = (1 << 3),
+ Is128Bit = (1 << 4),
+ AccessSizeMask = (31 << 5),
+ AccessSizeShift = 5,
+ CCValuesMask = (15 << 10),
+ CCValuesShift = 10,
+ CompareZeroCCMaskMask = (15 << 14),
+ CompareZeroCCMaskShift = 14,
+ CCMaskFirst = (1 << 18),
+ CCMaskLast = (1 << 19),
+ IsLogical = (1 << 20)
+};
+static inline unsigned getAccessSize(unsigned int Flags) {
+ return (Flags & AccessSizeMask) >> AccessSizeShift;
+}
+static inline unsigned getCCValues(unsigned int Flags) {
+ return (Flags & CCValuesMask) >> CCValuesShift;
+}
+static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
+ return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
+}
- // An instruction that decrements a 32-bit register and branches if
- // the result is nonzero.
- BranchCT,
+// SystemZ MachineOperand target flags.
+enum {
+ // Masks out the bits for the access model.
+ MO_SYMBOL_MODIFIER = (1 << 0),
- // An instruction that decrements a 64-bit register and branches if
- // the result is nonzero.
- BranchCTG
- };
- // Information about a branch instruction.
- struct Branch {
- // The type of the branch.
- BranchType Type;
+ // @GOT (aka @GOTENT)
+ MO_GOT = (1 << 0)
+};
+// Classifies a branch.
+enum BranchType {
+ // An instruction that branches on the current value of CC.
+ BranchNormal,
+
+ // An instruction that peforms a 32-bit signed comparison and branches
+ // on the result.
+ BranchC,
+
+ // An instruction that peforms a 32-bit unsigned comparison and branches
+ // on the result.
+ BranchCL,
+
+ // An instruction that peforms a 64-bit signed comparison and branches
+ // on the result.
+ BranchCG,
+
+ // An instruction that peforms a 64-bit unsigned comparison and branches
+ // on the result.
+ BranchCLG,
+
+ // An instruction that decrements a 32-bit register and branches if
+ // the result is nonzero.
+ BranchCT,
+
+ // An instruction that decrements a 64-bit register and branches if
+ // the result is nonzero.
+ BranchCTG
+};
+// Information about a branch instruction.
+struct Branch {
+ // The type of the branch.
+ BranchType Type;
- // CCMASK_<N> is set if CC might be equal to N.
- unsigned CCValid;
+ // CCMASK_<N> is set if CC might be equal to N.
+ unsigned CCValid;
- // CCMASK_<N> is set if the branch should be taken when CC == N.
- unsigned CCMask;
+ // CCMASK_<N> is set if the branch should be taken when CC == N.
+ unsigned CCMask;
- // The target of the branch.
- const MachineOperand *Target;
+ // The target of the branch.
+ const MachineOperand *Target;
- Branch(BranchType type, unsigned ccValid, unsigned ccMask,
- const MachineOperand *target)
- : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
- };
-}
+ Branch(BranchType type, unsigned ccValid, unsigned ccMask,
+ const MachineOperand *target)
+ : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
+};
+} // end namespace SystemZII
+class SystemZSubtarget;
class SystemZInstrInfo : public SystemZGenInstrInfo {
const SystemZRegisterInfo RI;
- SystemZTargetMachine &TM;
+ SystemZSubtarget &STI;
void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
@@ -130,81 +131,66 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
virtual void anchor();
public:
- explicit SystemZInstrInfo(SystemZTargetMachine &TM);
+ explicit SystemZInstrInfo(SystemZSubtarget &STI);
// Override TargetInstrInfo.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const LLVM_OVERRIDE;
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const LLVM_OVERRIDE;
- virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
- int &SrcFrameIndex) const LLVM_OVERRIDE;
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const LLVM_OVERRIDE;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const LLVM_OVERRIDE;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const LLVM_OVERRIDE;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ int &SrcFrameIndex) const override;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
- unsigned &SrcReg2, int &Mask, int &Value) const
- LLVM_OVERRIDE;
+ unsigned &SrcReg2, int &Mask, int &Value) const override;
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
- const MachineRegisterInfo *MRI) const LLVM_OVERRIDE;
- virtual bool isPredicable(MachineInstr *MI) const LLVM_OVERRIDE;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
- unsigned ExtraPredCycles,
- const BranchProbability &Probability) const
- LLVM_OVERRIDE;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
- unsigned NumCyclesT,
- unsigned ExtraPredCyclesT,
- MachineBasicBlock &FMBB,
- unsigned NumCyclesF,
- unsigned ExtraPredCyclesF,
- const BranchProbability &Probability) const
- LLVM_OVERRIDE;
- virtual bool
- PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const
- LLVM_OVERRIDE;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const LLVM_OVERRIDE;
- virtual void
- storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
- virtual void
- loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
- virtual MachineInstr *
- convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
- virtual MachineInstr *
- foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
- virtual MachineInstr *
- foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const;
- virtual bool
- expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const LLVM_OVERRIDE;
- virtual bool
- ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
- LLVM_OVERRIDE;
+ const MachineRegisterInfo *MRI) const override;
+ bool isPredicable(MachineInstr *MI) const override;
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const override;
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumCyclesT, unsigned ExtraPredCyclesT,
+ MachineBasicBlock &FMBB,
+ unsigned NumCyclesF, unsigned ExtraPredCyclesF,
+ const BranchProbability &Probability) const override;
+ bool PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const
+ override;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const override;
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const override;
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const override;
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
+ override;
// Return the SystemZRegisterInfo, which this class owns.
const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
@@ -244,7 +230,7 @@ public:
// BRANCH exists, return the opcode for the latter, otherwise return 0.
// MI, if nonnull, is the compare instruction.
unsigned getCompareAndBranch(unsigned Opcode,
- const MachineInstr *MI = 0) const;
+ const MachineInstr *MI = nullptr) const;
// Emit code before MBBI in MI to move immediate value Value into
// physical register Reg.
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 6524e44..f4951ad 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -63,11 +63,11 @@ let isBranch = 1, isTerminator = 1, Uses = [CC] in {
def BRCL : InstRIL<0xC04, (outs), (ins cond4:$valid, cond4:$R1,
brtarget32:$I2), "jg$R1\t$I2", []>;
}
- def AsmBRC : InstRI<0xA74, (outs), (ins uimm8zx4:$R1, brtarget16:$I2),
+ def AsmBRC : InstRI<0xA74, (outs), (ins imm32zx4:$R1, brtarget16:$I2),
"brc\t$R1, $I2", []>;
- def AsmBRCL : InstRIL<0xC04, (outs), (ins uimm8zx4:$R1, brtarget32:$I2),
+ def AsmBRCL : InstRIL<0xC04, (outs), (ins imm32zx4:$R1, brtarget32:$I2),
"brcl\t$R1, $I2", []>;
- def AsmBCR : InstRR<0x07, (outs), (ins uimm8zx4:$R1, GR64:$R2),
+ def AsmBCR : InstRR<0x07, (outs), (ins imm32zx4:$R1, GR64:$R2),
"bcr\t$R1, $R2", []>;
}
@@ -109,7 +109,7 @@ multiclass CompareBranches<Operand ccmask, string pos1, string pos2> {
}
let isCodeGenOnly = 1 in
defm C : CompareBranches<cond4, "$M3", "">;
-defm AsmC : CompareBranches<uimm8zx4, "", "$M3, ">;
+defm AsmC : CompareBranches<imm32zx4, "", "$M3, ">;
// Define AsmParser mnemonics for each general condition-code mask
// (integer or floating-point)
@@ -233,9 +233,7 @@ defm CondStore64 : CondStores<GR64, nonvolatile_store,
// Call instructions
//===----------------------------------------------------------------------===//
-// The definitions here are for the call-clobbered registers.
-let isCall = 1, Defs = [R0D, R1D, R2D, R3D, R4D, R5D, R14D,
- F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D, CC] in {
+let isCall = 1, Defs = [R14D, CC] in {
def CallBRASL : Alias<6, (outs), (ins pcrel32:$I2, variable_ops),
[(z_call pcrel32:$I2)]>;
def CallBASR : Alias<2, (outs), (ins ADDR64:$R2, variable_ops),
@@ -595,22 +593,28 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
let Defs = [CC] in {
let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
- def LPR : UnaryRR <"lp", 0x10, z_iabs32, GR32, GR32>;
- def LPGR : UnaryRRE<"lpg", 0xB900, z_iabs64, GR64, GR64>;
+ def LPR : UnaryRR <"lp", 0x10, z_iabs, GR32, GR32>;
+ def LPGR : UnaryRRE<"lpg", 0xB900, z_iabs, GR64, GR64>;
}
let CCValues = 0xE, CompareZeroCCMask = 0xE in
def LPGFR : UnaryRRE<"lpgf", 0xB910, null_frag, GR64, GR32>;
}
+def : Pat<(z_iabs32 GR32:$src), (LPR GR32:$src)>;
+def : Pat<(z_iabs64 GR64:$src), (LPGR GR64:$src)>;
+defm : SXU<z_iabs, LPGFR>;
defm : SXU<z_iabs64, LPGFR>;
let Defs = [CC] in {
let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
- def LNR : UnaryRR <"ln", 0x11, z_inegabs32, GR32, GR32>;
- def LNGR : UnaryRRE<"lng", 0xB901, z_inegabs64, GR64, GR64>;
+ def LNR : UnaryRR <"ln", 0x11, z_inegabs, GR32, GR32>;
+ def LNGR : UnaryRRE<"lng", 0xB901, z_inegabs, GR64, GR64>;
}
let CCValues = 0xE, CompareZeroCCMask = 0xE in
def LNGFR : UnaryRRE<"lngf", 0xB911, null_frag, GR64, GR32>;
}
+def : Pat<(z_inegabs32 GR32:$src), (LNR GR32:$src)>;
+def : Pat<(z_inegabs64 GR64:$src), (LNGR GR64:$src)>;
+defm : SXU<z_inegabs, LNGFR>;
defm : SXU<z_inegabs64, LNGFR>;
let Defs = [CC] in {
@@ -753,7 +757,7 @@ let Defs = [CC], Uses = [CC] in {
// Subtraction
//===----------------------------------------------------------------------===//
-// Plain substraction. Although immediate forms exist, we use the
+// Plain subtraction. Although immediate forms exist, we use the
// add-immediate instruction instead.
let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
// Subtraction of a register.
@@ -849,7 +853,7 @@ let Defs = [CC] in {
}
// AND to memory
- defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, uimm8>;
+ defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, imm32zx8>;
// Block AND.
let mayLoad = 1, mayStore = 1 in
@@ -906,7 +910,7 @@ let Defs = [CC] in {
}
// OR to memory
- defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, uimm8>;
+ defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, imm32zx8>;
// Block OR.
let mayLoad = 1, mayStore = 1 in
@@ -946,7 +950,7 @@ let Defs = [CC] in {
}
// XOR to memory
- defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, uimm8>;
+ defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, imm32zx8>;
// Block XOR.
let mayLoad = 1, mayStore = 1 in
@@ -1009,26 +1013,26 @@ def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
// Shift left.
let neverHasSideEffects = 1 in {
- defm SLL : ShiftRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
- def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64>;
+ defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
+ def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>;
}
// Logical shift right.
let neverHasSideEffects = 1 in {
- defm SRL : ShiftRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
- def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64>;
+ defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
+ def SRLG : BinaryRSY<"srlg", 0xEB0C, srl, GR64>;
}
// Arithmetic shift right.
let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
- defm SRA : ShiftRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>;
- def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64>;
+ defm SRA : BinaryRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>;
+ def SRAG : BinaryRSY<"srag", 0xEB0A, sra, GR64>;
}
// Rotate left.
let neverHasSideEffects = 1 in {
- def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32>;
- def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64>;
+ def RLL : BinaryRSY<"rll", 0xEB1D, rotl, GR32>;
+ def RLLG : BinaryRSY<"rllg", 0xEB1C, rotl, GR64>;
}
// Rotate second operand left and inserted selected bits into first operand.
@@ -1043,15 +1047,15 @@ let Defs = [CC] in {
// Forms of RISBG that only affect one word of the destination register.
// They do not set CC.
-def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>, Requires<[FeatureHighWord]>;
-def RISBLL : RotateSelectAliasRIEf<GR32, GR32>, Requires<[FeatureHighWord]>;
-def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>, Requires<[FeatureHighWord]>;
-def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>, Requires<[FeatureHighWord]>;
-def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>, Requires<[FeatureHighWord]>;
-def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>,
- Requires<[FeatureHighWord]>;
-def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>,
- Requires<[FeatureHighWord]>;
+let Predicates = [FeatureHighWord] in {
+ def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>;
+ def RISBLL : RotateSelectAliasRIEf<GR32, GR32>;
+ def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>;
+ def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>;
+ def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>;
+ def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>;
+ def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>;
+}
// Rotate second operand left and perform a logical operation with selected
// bits of the first operand. The CC result only describes the selected bits,
@@ -1195,58 +1199,89 @@ def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
// Atomic operations
//===----------------------------------------------------------------------===//
-def ATOMIC_SWAPW : AtomicLoadWBinaryReg<z_atomic_swapw>;
-def ATOMIC_SWAP_32 : AtomicLoadBinaryReg32<atomic_swap_32>;
-def ATOMIC_SWAP_64 : AtomicLoadBinaryReg64<atomic_swap_64>;
-
-def ATOMIC_LOADW_AR : AtomicLoadWBinaryReg<z_atomic_loadw_add>;
-def ATOMIC_LOADW_AFI : AtomicLoadWBinaryImm<z_atomic_loadw_add, simm32>;
-def ATOMIC_LOAD_AR : AtomicLoadBinaryReg32<atomic_load_add_32>;
-def ATOMIC_LOAD_AHI : AtomicLoadBinaryImm32<atomic_load_add_32, imm32sx16>;
-def ATOMIC_LOAD_AFI : AtomicLoadBinaryImm32<atomic_load_add_32, simm32>;
-def ATOMIC_LOAD_AGR : AtomicLoadBinaryReg64<atomic_load_add_64>;
-def ATOMIC_LOAD_AGHI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx16>;
-def ATOMIC_LOAD_AGFI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx32>;
-
-def ATOMIC_LOADW_SR : AtomicLoadWBinaryReg<z_atomic_loadw_sub>;
-def ATOMIC_LOAD_SR : AtomicLoadBinaryReg32<atomic_load_sub_32>;
-def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
-
-def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
-def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
-def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
-def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>;
-def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>;
-def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
-def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
-def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>;
-def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>;
-def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>;
-def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>;
-def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>;
-def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>;
+def Serialize : Alias<2, (outs), (ins), [(z_serialize)]>;
+
+let Predicates = [FeatureInterlockedAccess1], Defs = [CC] in {
+ def LAA : LoadAndOpRSY<"laa", 0xEBF8, atomic_load_add_32, GR32>;
+ def LAAG : LoadAndOpRSY<"laag", 0xEBE8, atomic_load_add_64, GR64>;
+ def LAAL : LoadAndOpRSY<"laal", 0xEBFA, null_frag, GR32>;
+ def LAALG : LoadAndOpRSY<"laalg", 0xEBEA, null_frag, GR64>;
+ def LAN : LoadAndOpRSY<"lan", 0xEBF4, atomic_load_and_32, GR32>;
+ def LANG : LoadAndOpRSY<"lang", 0xEBE4, atomic_load_and_64, GR64>;
+ def LAO : LoadAndOpRSY<"lao", 0xEBF6, atomic_load_or_32, GR32>;
+ def LAOG : LoadAndOpRSY<"laog", 0xEBE6, atomic_load_or_64, GR64>;
+ def LAX : LoadAndOpRSY<"lax", 0xEBF7, atomic_load_xor_32, GR32>;
+ def LAXG : LoadAndOpRSY<"laxg", 0xEBE7, atomic_load_xor_64, GR64>;
+}
+
+def ATOMIC_SWAPW : AtomicLoadWBinaryReg<z_atomic_swapw>;
+def ATOMIC_SWAP_32 : AtomicLoadBinaryReg32<atomic_swap_32>;
+def ATOMIC_SWAP_64 : AtomicLoadBinaryReg64<atomic_swap_64>;
+
+def ATOMIC_LOADW_AR : AtomicLoadWBinaryReg<z_atomic_loadw_add>;
+def ATOMIC_LOADW_AFI : AtomicLoadWBinaryImm<z_atomic_loadw_add, simm32>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_AR : AtomicLoadBinaryReg32<atomic_load_add_32>;
+ def ATOMIC_LOAD_AHI : AtomicLoadBinaryImm32<atomic_load_add_32, imm32sx16>;
+ def ATOMIC_LOAD_AFI : AtomicLoadBinaryImm32<atomic_load_add_32, simm32>;
+ def ATOMIC_LOAD_AGR : AtomicLoadBinaryReg64<atomic_load_add_64>;
+ def ATOMIC_LOAD_AGHI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx16>;
+ def ATOMIC_LOAD_AGFI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx32>;
+}
+
+def ATOMIC_LOADW_SR : AtomicLoadWBinaryReg<z_atomic_loadw_sub>;
+def ATOMIC_LOAD_SR : AtomicLoadBinaryReg32<atomic_load_sub_32>;
+def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
+
+def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
+def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
+ def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32,
+ imm32ll16c>;
+ def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32,
+ imm32lh16c>;
+ def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
+ def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
+ def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64ll16c>;
+ def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64lh16c>;
+ def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64hl16c>;
+ def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64hh16c>;
+ def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64lf32c>;
+ def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64hf32c>;
+}
def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>;
def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>;
-def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
-def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
-def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
-def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
-def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
-def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
-def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
-def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
-def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
-def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
-def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
+ def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
+ def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
+ def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
+ def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
+ def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
+ def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
+ def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
+ def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
+ def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
+ def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
+}
def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>;
def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>;
-def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
-def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
-def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
-def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
-def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
+ def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
+ def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
+ def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
+ def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
+}
def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>;
def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand,
@@ -1366,15 +1401,15 @@ def : Pat<(sub GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
// Optimize sign-extended 1/0 selects to -1/0 selects. This is important
// for vector legalization.
-def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid, uimm8zx4:$cc)),
+def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, imm32zx4:$valid, imm32zx4:$cc)),
(i32 31)),
(i32 31)),
- (Select32 (LHI -1), (LHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
-def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid,
- uimm8zx4:$cc)))),
+ (Select32 (LHI -1), (LHI 0), imm32zx4:$valid, imm32zx4:$cc)>;
+def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, imm32zx4:$valid,
+ imm32zx4:$cc)))),
(i32 63)),
(i32 63)),
- (Select64 (LGHI -1), (LGHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
+ (Select64 (LGHI -1), (LGHI 0), imm32zx4:$valid, imm32zx4:$cc)>;
// Peepholes for turning scalar operations into block operations.
defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index ba027d4..8081334 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -53,8 +53,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-long-branch"
-
#include "SystemZTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -68,102 +66,105 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-long-branch"
+
STATISTIC(LongBranches, "Number of long branches.");
namespace {
- // Represents positional information about a basic block.
- struct MBBInfo {
- // The address that we currently assume the block has.
- uint64_t Address;
-
- // The size of the block in bytes, excluding terminators.
- // This value never changes.
- uint64_t Size;
-
- // The minimum alignment of the block, as a log2 value.
- // This value never changes.
- unsigned Alignment;
-
- // The number of terminators in this block. This value never changes.
- unsigned NumTerminators;
-
- MBBInfo()
- : Address(0), Size(0), Alignment(0), NumTerminators(0) {}
- };
-
- // Represents the state of a block terminator.
- struct TerminatorInfo {
- // If this terminator is a relaxable branch, this points to the branch
- // instruction, otherwise it is null.
- MachineInstr *Branch;
-
- // The address that we currently assume the terminator has.
- uint64_t Address;
-
- // The current size of the terminator in bytes.
- uint64_t Size;
-
- // If Branch is nonnull, this is the number of the target block,
- // otherwise it is unused.
- unsigned TargetBlock;
-
- // If Branch is nonnull, this is the length of the longest relaxed form,
- // otherwise it is zero.
- unsigned ExtraRelaxSize;
-
- TerminatorInfo() : Branch(0), Size(0), TargetBlock(0), ExtraRelaxSize(0) {}
- };
-
- // Used to keep track of the current position while iterating over the blocks.
- struct BlockPosition {
- // The address that we assume this position has.
- uint64_t Address;
-
- // The number of low bits in Address that are known to be the same
- // as the runtime address.
- unsigned KnownBits;
-
- BlockPosition(unsigned InitialAlignment)
- : Address(0), KnownBits(InitialAlignment) {}
- };
-
- class SystemZLongBranch : public MachineFunctionPass {
- public:
- static char ID;
- SystemZLongBranch(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(0) {}
-
- virtual const char *getPassName() const {
- return "SystemZ Long Branch";
- }
+// Represents positional information about a basic block.
+struct MBBInfo {
+ // The address that we currently assume the block has.
+ uint64_t Address;
+
+ // The size of the block in bytes, excluding terminators.
+ // This value never changes.
+ uint64_t Size;
+
+ // The minimum alignment of the block, as a log2 value.
+ // This value never changes.
+ unsigned Alignment;
+
+ // The number of terminators in this block. This value never changes.
+ unsigned NumTerminators;
+
+ MBBInfo()
+ : Address(0), Size(0), Alignment(0), NumTerminators(0) {}
+};
+
+// Represents the state of a block terminator.
+struct TerminatorInfo {
+ // If this terminator is a relaxable branch, this points to the branch
+ // instruction, otherwise it is null.
+ MachineInstr *Branch;
+
+ // The address that we currently assume the terminator has.
+ uint64_t Address;
+
+ // The current size of the terminator in bytes.
+ uint64_t Size;
+
+ // If Branch is nonnull, this is the number of the target block,
+ // otherwise it is unused.
+ unsigned TargetBlock;
+
+ // If Branch is nonnull, this is the length of the longest relaxed form,
+ // otherwise it is zero.
+ unsigned ExtraRelaxSize;
+
+ TerminatorInfo() : Branch(nullptr), Size(0), TargetBlock(0),
+ ExtraRelaxSize(0) {}
+};
+
+// Used to keep track of the current position while iterating over the blocks.
+struct BlockPosition {
+ // The address that we assume this position has.
+ uint64_t Address;
+
+ // The number of low bits in Address that are known to be the same
+ // as the runtime address.
+ unsigned KnownBits;
+
+ BlockPosition(unsigned InitialAlignment)
+ : Address(0), KnownBits(InitialAlignment) {}
+};
+
+class SystemZLongBranch : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZLongBranch(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(nullptr) {}
+
+ const char *getPassName() const override {
+ return "SystemZ Long Branch";
+ }
- bool runOnMachineFunction(MachineFunction &F);
-
- private:
- void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
- void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
- bool AssumeRelaxed);
- TerminatorInfo describeTerminator(MachineInstr *MI);
- uint64_t initMBBInfo();
- bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
- bool mustRelaxABranch();
- void setWorstCaseAddresses();
- void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode);
- void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode);
- void relaxBranch(TerminatorInfo &Terminator);
- void relaxBranches();
-
- const SystemZInstrInfo *TII;
- MachineFunction *MF;
- SmallVector<MBBInfo, 16> MBBs;
- SmallVector<TerminatorInfo, 16> Terminators;
- };
-
- char SystemZLongBranch::ID = 0;
-
- const uint64_t MaxBackwardRange = 0x10000;
- const uint64_t MaxForwardRange = 0xfffe;
-} // end of anonymous namespace
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+private:
+ void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
+ void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
+ bool AssumeRelaxed);
+ TerminatorInfo describeTerminator(MachineInstr *MI);
+ uint64_t initMBBInfo();
+ bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
+ bool mustRelaxABranch();
+ void setWorstCaseAddresses();
+ void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode);
+ void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode);
+ void relaxBranch(TerminatorInfo &Terminator);
+ void relaxBranches();
+
+ const SystemZInstrInfo *TII;
+ MachineFunction *MF;
+ SmallVector<MBBInfo, 16> MBBs;
+ SmallVector<TerminatorInfo, 16> Terminators;
+};
+
+char SystemZLongBranch::ID = 0;
+
+const uint64_t MaxBackwardRange = 0x10000;
+const uint64_t MaxForwardRange = 0xfffe;
+} // end anonymous namespace
FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) {
return new SystemZLongBranch(TM);
@@ -321,9 +322,8 @@ bool SystemZLongBranch::mustRelaxBranch(const TerminatorInfo &Terminator,
// Return true if, under current assumptions, any terminator needs
// to be relaxed.
bool SystemZLongBranch::mustRelaxABranch() {
- for (SmallVectorImpl<TerminatorInfo>::iterator TI = Terminators.begin(),
- TE = Terminators.end(); TI != TE; ++TI)
- if (mustRelaxBranch(*TI, TI->Address))
+ for (auto &Terminator : Terminators)
+ if (mustRelaxBranch(Terminator, Terminator.Address))
return true;
return false;
}
@@ -333,10 +333,9 @@ bool SystemZLongBranch::mustRelaxABranch() {
void SystemZLongBranch::setWorstCaseAddresses() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
BlockPosition Position(MF->getAlignment());
- for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
- BI != BE; ++BI) {
- skipNonTerminators(Position, *BI);
- for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ for (auto &Block : MBBs) {
+ skipNonTerminators(Position, Block);
+ for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
skipTerminator(Position, *TI, true);
++TI;
}
@@ -426,7 +425,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
Terminator.Size += Terminator.ExtraRelaxSize;
Terminator.ExtraRelaxSize = 0;
- Terminator.Branch = 0;
+ Terminator.Branch = nullptr;
++LongBranches;
}
@@ -435,10 +434,9 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
void SystemZLongBranch::relaxBranches() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
BlockPosition Position(MF->getAlignment());
- for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
- BI != BE; ++BI) {
- skipNonTerminators(Position, *BI);
- for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ for (auto &Block : MBBs) {
+ skipNonTerminators(Position, Block);
+ for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
assert(Position.Address <= TI->Address &&
"Addresses shouldn't go forwards");
if (mustRelaxBranch(*TI, Position.Address))
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
index ff9a6c0..df561e2 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
@@ -9,9 +9,9 @@
#include "SystemZMCInstLower.h"
#include "SystemZAsmPrinter.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
index f6d5ac8..90447ff 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
@@ -11,8 +11,8 @@
#define LLVM_SYSTEMZMCINSTLOWER_H
#include "llvm/MC/MCExpr.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class MCInst;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
index 845291f..50865f13 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -63,6 +63,6 @@ public:
void setManipulatesSP(bool MSP) { ManipulatesSP = MSP; }
};
-} // end llvm namespace
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
index 3ad146c..7be81dc 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
@@ -202,21 +202,6 @@ def S32Imm : ImmediateAsmOperand<"S32Imm">;
def U32Imm : ImmediateAsmOperand<"U32Imm">;
//===----------------------------------------------------------------------===//
-// 8-bit immediates
-//===----------------------------------------------------------------------===//
-
-def uimm8zx4 : Immediate<i8, [{
- return isUInt<4>(N->getZExtValue());
-}], NOOP_SDNodeXForm, "U4Imm">;
-
-def uimm8zx6 : Immediate<i8, [{
- return isUInt<6>(N->getZExtValue());
-}], NOOP_SDNodeXForm, "U6Imm">;
-
-def simm8 : Immediate<i8, [{}], SIMM8, "S8Imm">;
-def uimm8 : Immediate<i8, [{}], UIMM8, "U8Imm">;
-
-//===----------------------------------------------------------------------===//
// i32 immediates
//===----------------------------------------------------------------------===//
@@ -241,6 +226,14 @@ def imm32lh16c : Immediate<i32, [{
}], LH16, "U16Imm">;
// Short immediates
+def imm32zx4 : Immediate<i32, [{
+ return isUInt<4>(N->getZExtValue());
+}], NOOP_SDNodeXForm, "U4Imm">;
+
+def imm32zx6 : Immediate<i32, [{
+ return isUInt<6>(N->getZExtValue());
+}], NOOP_SDNodeXForm, "U6Imm">;
+
def imm32sx8 : Immediate<i32, [{
return isInt<8>(N->getSExtValue());
}], SIMM8, "S8Imm">;
@@ -470,13 +463,13 @@ def AccessReg : AsmOperandClass {
let Name = "AccessReg";
let ParserMethod = "parseAccessReg";
}
-def access_reg : Immediate<i8, [{ return N->getZExtValue() < 16; }],
+def access_reg : Immediate<i32, [{ return N->getZExtValue() < 16; }],
NOOP_SDNodeXForm, "AccessReg"> {
let ParserMatchClass = AccessReg;
}
// A 4-bit condition-code mask.
-def cond4 : PatLeaf<(i8 imm), [{ return (N->getZExtValue() < 16); }]>,
- Operand<i8> {
+def cond4 : PatLeaf<(i32 imm), [{ return (N->getZExtValue() < 16); }]>,
+ Operand<i32> {
let PrintMethod = "printCond4Operand";
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
index 31cabaa..c70e662 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
@@ -19,14 +19,14 @@ def SDT_ZICmp : SDTypeProfile<0, 3,
[SDTCisSameAs<0, 1>,
SDTCisVT<2, i32>]>;
def SDT_ZBRCCMask : SDTypeProfile<0, 3,
- [SDTCisVT<0, i8>,
- SDTCisVT<1, i8>,
+ [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>,
SDTCisVT<2, OtherVT>]>;
def SDT_ZSelectCCMask : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>,
SDTCisSameAs<1, 2>,
- SDTCisVT<3, i8>,
- SDTCisVT<4, i8>]>;
+ SDTCisVT<3, i32>,
+ SDTCisVT<4, i32>]>;
def SDT_ZWrapPtr : SDTypeProfile<1, 1,
[SDTCisSameAs<0, 1>,
SDTCisPtrTy<0>]>;
@@ -37,7 +37,7 @@ def SDT_ZWrapOffset : SDTypeProfile<1, 2,
def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
def SDT_ZExtractAccess : SDTypeProfile<1, 1,
[SDTCisVT<0, i32>,
- SDTCisVT<1, i8>]>;
+ SDTCisVT<1, i32>]>;
def SDT_ZGR128Binary32 : SDTypeProfile<1, 2,
[SDTCisVT<0, untyped>,
SDTCisVT<1, untyped>,
@@ -77,7 +77,7 @@ def SDT_ZString : SDTypeProfile<1, 3,
SDTCisVT<3, i32>]>;
def SDT_ZI32Intrinsic : SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>;
def SDT_ZPrefetch : SDTypeProfile<0, 2,
- [SDTCisVT<0, i8>,
+ [SDTCisVT<0, i32>,
SDTCisPtrTy<1>]>;
//===----------------------------------------------------------------------===//
@@ -103,6 +103,7 @@ def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
SDT_ZWrapOffset, []>;
+def z_iabs : SDNode<"SystemZISD::IABS", SDTIntUnaryOp, []>;
def z_icmp : SDNode<"SystemZISD::ICMP", SDT_ZICmp, [SDNPOutGlue]>;
def z_fcmp : SDNode<"SystemZISD::FCMP", SDT_ZCmp, [SDNPOutGlue]>;
def z_tm : SDNode<"SystemZISD::TM", SDT_ZICmp, [SDNPOutGlue]>;
@@ -119,6 +120,9 @@ def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>;
def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>;
def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
+def z_serialize : SDNode<"SystemZISD::SERIALIZE", SDTNone,
+ [SDNPHasChain, SDNPMayStore]>;
+
class AtomicWOp<string name, SDTypeProfile profile = SDT_ZAtomicLoadBinaryW>
: SDNode<"SystemZISD::"##name, profile,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
@@ -247,7 +251,7 @@ def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
// Aligned loads.
class AlignedLoad<SDPatternOperator load>
: PatFrag<(ops node:$addr), (load node:$addr), [{
- LoadSDNode *Load = cast<LoadSDNode>(N);
+ auto *Load = cast<LoadSDNode>(N);
return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
}]>;
def aligned_load : AlignedLoad<load>;
@@ -259,7 +263,7 @@ def aligned_azextloadi32 : AlignedLoad<azextloadi32>;
// Aligned stores.
class AlignedStore<SDPatternOperator store>
: PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
- StoreSDNode *Store = cast<StoreSDNode>(N);
+ auto *Store = cast<StoreSDNode>(N);
return Store->getAlignment() >= Store->getMemoryVT().getStoreSize();
}]>;
def aligned_store : AlignedStore<store>;
@@ -270,7 +274,7 @@ def aligned_truncstorei32 : AlignedStore<truncstorei32>;
// location multiple times.
class NonvolatileLoad<SDPatternOperator load>
: PatFrag<(ops node:$addr), (load node:$addr), [{
- LoadSDNode *Load = cast<LoadSDNode>(N);
+ auto *Load = cast<LoadSDNode>(N);
return !Load->isVolatile();
}]>;
def nonvolatile_load : NonvolatileLoad<load>;
@@ -281,7 +285,7 @@ def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
// Non-volatile stores.
class NonvolatileStore<SDPatternOperator store>
: PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
- StoreSDNode *Store = cast<StoreSDNode>(N);
+ auto *Store = cast<StoreSDNode>(N);
return !Store->isVolatile();
}]>;
def nonvolatile_store : NonvolatileStore<store>;
@@ -346,6 +350,9 @@ def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
APInt::getLowBitsSet(BitWidth, 8));
}]>;
+// Negative integer absolute.
+def z_inegabs : PatFrag<(ops node:$src), (ineg (z_iabs node:$src))>;
+
// Integer absolute, matching the canonical form generated by DAGCombiner.
def z_iabs32 : PatFrag<(ops node:$src),
(xor (add node:$src, (sra node:$src, (i32 31))),
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
index 7706351..e307f8a 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
@@ -101,15 +101,15 @@ multiclass CondStores64<Instruction insn, Instruction insninv,
SDPatternOperator store, SDPatternOperator load,
AddressingMode mode> {
def : Pat<(store (z_select_ccmask GR64:$new, (load mode:$addr),
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr),
(insn (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
- uimm8zx4:$valid, uimm8zx4:$cc)>;
+ imm32zx4:$valid, imm32zx4:$cc)>;
def : Pat<(store (z_select_ccmask (load mode:$addr), GR64:$new,
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr),
(insninv (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
- uimm8zx4:$valid, uimm8zx4:$cc)>;
+ imm32zx4:$valid, imm32zx4:$cc)>;
}
// Try to use MVC instruction INSN for a load of type LOAD followed by a store
@@ -148,5 +148,8 @@ multiclass BlockLoadStore<SDPatternOperator load, ValueType vt,
// Record that INSN is a LOAD AND TEST that can be used to compare
// registers in CLS against zero. The instruction has separate R1 and R2
// operands, but they must be the same when the instruction is used like this.
-class CompareZeroFP<Instruction insn, RegisterOperand cls>
- : Pat<(z_fcmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>;
+multiclass CompareZeroFP<Instruction insn, RegisterOperand cls> {
+ def : Pat<(z_fcmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>;
+ // The sign of the zero makes no difference.
+ def : Pat<(z_fcmp cls:$reg, (fpimmneg0)), (insn cls:$reg, cls:$reg)>;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
index f241fb0..e6b58f1 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
@@ -16,6 +16,9 @@ class SystemZFeature<string extname, string intname, string desc>
AssemblerPredicate<"Feature"##intname, extname>,
SubtargetFeature<extname, "Has"##intname, "true", desc>;
+class SystemZMissingFeature<string intname>
+ : Predicate<"!Subtarget.has"##intname##"()">;
+
def FeatureDistinctOps : SystemZFeature<
"distinct-ops", "DistinctOps",
"Assume that the distinct-operands facility is installed"
@@ -36,11 +39,24 @@ def FeatureFPExtension : SystemZFeature<
"Assume that the floating-point extension facility is installed"
>;
+def FeatureFastSerialization : SystemZFeature<
+ "fast-serialization", "FastSerialization",
+ "Assume that the fast-serialization facility is installed"
+>;
+
+def FeatureInterlockedAccess1 : SystemZFeature<
+ "interlocked-access1", "InterlockedAccess1",
+ "Assume that interlocked-access facility 1 is installed"
+>;
+def FeatureNoInterlockedAccess1 : SystemZMissingFeature<"InterlockedAccess1">;
+
def : Processor<"generic", NoItineraries, []>;
def : Processor<"z10", NoItineraries, []>;
def : Processor<"z196", NoItineraries,
[FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
- FeatureFPExtension]>;
+ FeatureFPExtension, FeatureFastSerialization,
+ FeatureInterlockedAccess1]>;
def : Processor<"zEC12", NoItineraries,
[FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
- FeatureFPExtension]>;
+ FeatureFPExtension, FeatureFastSerialization,
+ FeatureInterlockedAccess1]>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index b61ae88..f03bcc4 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -7,31 +7,29 @@
//
//===----------------------------------------------------------------------===//
+#include "SystemZInstrInfo.h"
#include "SystemZRegisterInfo.h"
-#include "SystemZTargetMachine.h"
+#include "SystemZSubtarget.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "SystemZGenRegisterInfo.inc"
-using namespace llvm;
-
-SystemZRegisterInfo::SystemZRegisterInfo(SystemZTargetMachine &tm)
- : SystemZGenRegisterInfo(SystemZ::R14D), TM(tm) {}
+SystemZRegisterInfo::SystemZRegisterInfo()
+ : SystemZGenRegisterInfo(SystemZ::R14D) {}
-const uint16_t*
+const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const uint16_t CalleeSavedRegs[] = {
- SystemZ::R6D, SystemZ::R7D, SystemZ::R8D, SystemZ::R9D,
- SystemZ::R10D, SystemZ::R11D, SystemZ::R12D, SystemZ::R13D,
- SystemZ::R14D, SystemZ::R15D,
- SystemZ::F8D, SystemZ::F9D, SystemZ::F10D, SystemZ::F11D,
- SystemZ::F12D, SystemZ::F13D, SystemZ::F14D, SystemZ::F15D,
- 0
- };
-
- return CalleeSavedRegs;
+ return CSR_SystemZ_SaveList;
+}
+
+const uint32_t *
+SystemZRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ return CSR_SystemZ_RegMask;
}
BitVector
@@ -63,8 +61,8 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
MachineBasicBlock &MBB = *MI->getParent();
MachineFunction &MF = *MBB.getParent();
- const SystemZInstrInfo &TII =
- *static_cast<const SystemZInstrInfo*>(TM.getInstrInfo());
+ auto *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc DL = MI->getDebugLoc();
@@ -84,7 +82,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
// See if the offset is in range, or if an equivalent instruction that
// accepts the offset exists.
unsigned Opcode = MI->getOpcode();
- unsigned OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset);
+ unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
if (OpcodeForOffset)
MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
else {
@@ -94,7 +92,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int64_t Mask = 0xffff;
do {
Offset = OldOffset & Mask;
- OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset);
+ OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
Mask >>= 1;
assert(Mask && "One offset must be OK");
} while (!OpcodeForOffset);
@@ -107,21 +105,21 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
&& MI->getOperand(FIOperandNum + 2).getReg() == 0) {
// Load the offset into the scratch register and use it as an index.
// The scratch register then dies here.
- TII.loadImmediate(MBB, MI, ScratchReg, HighOffset);
+ TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
false, false, true);
} else {
// Load the anchor address into a scratch register.
- unsigned LAOpcode = TII.getOpcodeForOffset(SystemZ::LA, HighOffset);
+ unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
if (LAOpcode)
- BuildMI(MBB, MI, DL, TII.get(LAOpcode),ScratchReg)
+ BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
.addReg(BasePtr).addImm(HighOffset).addReg(0);
else {
// Load the high offset into the scratch register and use it as
// an index.
- TII.loadImmediate(MBB, MI, ScratchReg, HighOffset);
- BuildMI(MBB, MI, DL, TII.get(SystemZ::AGR),ScratchReg)
+ TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
+ BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg)
.addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
}
@@ -130,7 +128,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
false, false, true);
}
}
- MI->setDesc(TII.get(OpcodeForOffset));
+ MI->setDesc(TII->get(OpcodeForOffset));
MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
index 13f45fa..9bffa46 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -19,48 +19,38 @@
namespace llvm {
namespace SystemZ {
- // Return the subreg to use for referring to the even and odd registers
- // in a GR128 pair. Is32Bit says whether we want a GR32 or GR64.
- inline unsigned even128(bool Is32bit) {
- return Is32bit ? subreg_hl32 : subreg_h64;
- }
- inline unsigned odd128(bool Is32bit) {
- return Is32bit ? subreg_l32 : subreg_l64;
- }
+// Return the subreg to use for referring to the even and odd registers
+// in a GR128 pair. Is32Bit says whether we want a GR32 or GR64.
+inline unsigned even128(bool Is32bit) {
+ return Is32bit ? subreg_hl32 : subreg_h64;
}
-
-class SystemZSubtarget;
-class SystemZInstrInfo;
+inline unsigned odd128(bool Is32bit) {
+ return Is32bit ? subreg_l32 : subreg_l64;
+}
+} // end namespace SystemZ
struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
-private:
- SystemZTargetMachine &TM;
-
public:
- SystemZRegisterInfo(SystemZTargetMachine &tm);
+ SystemZRegisterInfo();
// Override TargetRegisterInfo.h.
- virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
- LLVM_OVERRIDE {
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
}
- virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const
- LLVM_OVERRIDE {
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
return true;
}
- virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
- LLVM_OVERRIDE {
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override {
return true;
}
- virtual const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0)
- const LLVM_OVERRIDE;
- virtual BitVector getReservedRegs(const MachineFunction &MF)
- const LLVM_OVERRIDE;
- virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS) const LLVM_OVERRIDE;
- virtual unsigned getFrameRegister(const MachineFunction &MF) const
- LLVM_OVERRIDE;
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF = nullptr) const
+ override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID CC) const override;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const override;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
index 93d7c83..47ac20d 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
@@ -119,6 +119,29 @@ defm ADDR128 : SystemZRegClass<"ADDR128", untyped, 128, (sub GR128Bit, R0Q)>;
// Floating-point registers
//===----------------------------------------------------------------------===//
+// Maps FPR register numbers to their DWARF encoding.
+class DwarfMapping<int id> { int Id = id; }
+
+def F0Dwarf : DwarfMapping<16>;
+def F2Dwarf : DwarfMapping<17>;
+def F4Dwarf : DwarfMapping<18>;
+def F6Dwarf : DwarfMapping<19>;
+
+def F1Dwarf : DwarfMapping<20>;
+def F3Dwarf : DwarfMapping<21>;
+def F5Dwarf : DwarfMapping<22>;
+def F7Dwarf : DwarfMapping<23>;
+
+def F8Dwarf : DwarfMapping<24>;
+def F10Dwarf : DwarfMapping<25>;
+def F12Dwarf : DwarfMapping<26>;
+def F14Dwarf : DwarfMapping<27>;
+
+def F9Dwarf : DwarfMapping<28>;
+def F11Dwarf : DwarfMapping<29>;
+def F13Dwarf : DwarfMapping<30>;
+def F15Dwarf : DwarfMapping<31>;
+
// Lower 32 bits of one of the 16 64-bit floating-point registers
class FPR32<bits<16> num, string n> : SystemZReg<n> {
let HWEncoding = num;
@@ -142,7 +165,7 @@ class FPR128<bits<16> num, string n, FPR64 low, FPR64 high>
foreach I = 0-15 in {
def F#I#S : FPR32<I, "f"#I>;
def F#I#D : FPR64<I, "f"#I, !cast<FPR32>("F"#I#"S")>,
- DwarfRegNum<[!add(I, 16)]>;
+ DwarfRegNum<[!cast<DwarfMapping>("F"#I#"Dwarf").Id]>;
}
foreach I = [0, 1, 4, 5, 8, 9, 12, 13] in {
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index c7ebb5d..a3cba64 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -11,16 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-selectiondag-info"
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/SelectionDAG.h"
using namespace llvm;
-SystemZSelectionDAGInfo::
-SystemZSelectionDAGInfo(const SystemZTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+#define DEBUG_TYPE "systemz-selectiondag-info"
+
+SystemZSelectionDAGInfo::SystemZSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
SystemZSelectionDAGInfo::~SystemZSelectionDAGInfo() {
}
@@ -62,7 +61,7 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
if (IsVolatile)
return SDValue();
- if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size))
+ if (auto *CSize = dyn_cast<ConstantSDNode>(Size))
return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
Chain, Dst, Src, CSize->getZExtValue());
return SDValue();
@@ -93,11 +92,11 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
if (IsVolatile)
return SDValue();
- if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
if (Bytes == 0)
return SDValue();
- if (ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte)) {
+ if (auto *CByte = dyn_cast<ConstantSDNode>(Byte)) {
// Handle cases that can be done using at most two of
// MVI, MVHI, MVHHI and MVGHI. The latter two can only be
// used if ByteVal is all zeros or all ones; in other casees,
@@ -137,7 +136,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already");
// Handle the special case of a memset of 0, which can use XC.
- ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte);
+ auto *CByte = dyn_cast<ConstantSDNode>(Byte);
if (CByte && CByte->getZExtValue() == 0)
return emitMemMem(DAG, DL, SystemZISD::XC, SystemZISD::XC_LOOP,
Chain, Dst, Dst, Bytes);
@@ -194,7 +193,7 @@ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src1, SDValue Src2, SDValue Size,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
- if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
assert(Bytes > 0 && "Caller should have handled 0-size case");
Chain = emitCLC(DAG, DL, Chain, Src1, Src2, Bytes);
@@ -230,7 +229,7 @@ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32));
Ops.push_back(Glue);
VTs = DAG.getVTList(PtrVT, MVT::Glue);
- End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
return std::make_pair(End, Chain);
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
index 281d1e2..e9de146 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -22,59 +22,56 @@ class SystemZTargetMachine;
class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit SystemZSelectionDAGInfo(const SystemZTargetMachine &TM);
+ explicit SystemZSelectionDAGInfo(const DataLayout &DL);
~SystemZSelectionDAGInfo();
- virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool IsVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const
- LLVM_OVERRIDE;
+ MachinePointerInfo SrcPtrInfo) const override;
- virtual SDValue
- EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
- SDValue Chain, SDValue Dst, SDValue Byte,
- SDValue Size, unsigned Align, bool IsVolatile,
- MachinePointerInfo DstPtrInfo) const LLVM_OVERRIDE;
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
+ SDValue Chain, SDValue Dst, SDValue Byte,
+ SDValue Size, unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src1, SDValue Src2, SDValue Size,
MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo Op2PtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, SDValue Char, SDValue Length,
- MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo SrcPtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Dest, SDValue Src,
MachinePointerInfo DestPtrInfo,
MachinePointerInfo SrcPtrInfo,
- bool isStpcpy) const LLVM_OVERRIDE;
+ bool isStpcpy) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src1, SDValue Src2,
MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo Op2PtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src, MachinePointerInfo SrcPtrInfo) const
- LLVM_OVERRIDE;
+ SDValue Src,
+ MachinePointerInfo SrcPtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, SDValue MaxLength,
- MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo SrcPtrInfo) const override;
};
-}
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
index 537a545..aad899c 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -13,47 +13,47 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-shorten-inst"
-
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
using namespace llvm;
+#define DEBUG_TYPE "systemz-shorten-inst"
+
namespace {
- class SystemZShortenInst : public MachineFunctionPass {
- public:
- static char ID;
- SystemZShortenInst(const SystemZTargetMachine &tm);
+class SystemZShortenInst : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZShortenInst(const SystemZTargetMachine &tm);
- virtual const char *getPassName() const {
- return "SystemZ Instruction Shortening";
- }
+ const char *getPassName() const override {
+ return "SystemZ Instruction Shortening";
+ }
- bool processBlock(MachineBasicBlock *MBB);
- bool runOnMachineFunction(MachineFunction &F);
+ bool processBlock(MachineBasicBlock &MBB);
+ bool runOnMachineFunction(MachineFunction &F) override;
- private:
- bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
- unsigned LLIxL, unsigned LLIxH);
+private:
+ bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
+ unsigned LLIxL, unsigned LLIxH);
- const SystemZInstrInfo *TII;
+ const SystemZInstrInfo *TII;
- // LowGPRs[I] has bit N set if LLVM register I includes the low
- // word of GPR N. HighGPRs is the same for the high word.
- unsigned LowGPRs[SystemZ::NUM_TARGET_REGS];
- unsigned HighGPRs[SystemZ::NUM_TARGET_REGS];
- };
+ // LowGPRs[I] has bit N set if LLVM register I includes the low
+ // word of GPR N. HighGPRs is the same for the high word.
+ unsigned LowGPRs[SystemZ::NUM_TARGET_REGS];
+ unsigned HighGPRs[SystemZ::NUM_TARGET_REGS];
+};
- char SystemZShortenInst::ID = 0;
-} // end of anonymous namespace
+char SystemZShortenInst::ID = 0;
+} // end anonymous namespace
FunctionPass *llvm::createSystemZShortenInstPass(SystemZTargetMachine &TM) {
return new SystemZShortenInst(TM);
}
SystemZShortenInst::SystemZShortenInst(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(0), LowGPRs(), HighGPRs() {
+ : MachineFunctionPass(ID), TII(nullptr), LowGPRs(), HighGPRs() {
// Set up LowGPRs and HighGPRs.
for (unsigned I = 0; I < 16; ++I) {
LowGPRs[SystemZMC::GR32Regs[I]] |= 1 << I;
@@ -98,16 +98,15 @@ bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned *GPRMap,
}
// Process all instructions in MBB. Return true if something changed.
-bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
+bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) {
bool Changed = false;
// Work out which words are live on exit from the block.
unsigned LiveLow = 0;
unsigned LiveHigh = 0;
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI) {
- for (MachineBasicBlock::livein_iterator LI = (*SI)->livein_begin(),
- LE = (*SI)->livein_end(); LI != LE; ++LI) {
+ for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI) {
+ for (auto LI = (*SI)->livein_begin(), LE = (*SI)->livein_end();
+ LI != LE; ++LI) {
unsigned Reg = *LI;
assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
LiveLow |= LowGPRs[Reg];
@@ -116,8 +115,7 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
}
// Iterate backwards through the block looking for instructions to change.
- for (MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin(),
- MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) {
+ for (auto MBBI = MBB.rbegin(), MBBE = MBB.rend(); MBBI != MBBE; ++MBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
if (Opcode == SystemZ::IILF)
@@ -128,8 +126,8 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
SystemZ::LLIHH);
unsigned UsedLow = 0;
unsigned UsedHigh = 0;
- for (MachineInstr::mop_iterator MOI = MI.operands_begin(),
- MOE = MI.operands_end(); MOI != MOE; ++MOI) {
+ for (auto MOI = MI.operands_begin(), MOE = MI.operands_end();
+ MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
if (MO.isReg()) {
if (unsigned Reg = MO.getReg()) {
@@ -155,9 +153,8 @@ bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
bool Changed = false;
- for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
- MFI != MFE; ++MFI)
- Changed |= processBlock(MFI);
+ for (auto &MBB : F)
+ Changed |= processBlock(MBB);
return Changed;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
index 3971d5e..e160bc8 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -8,25 +8,23 @@
//===----------------------------------------------------------------------===//
#include "SystemZSubtarget.h"
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/Host.h"
-#include "MCTargetDesc/SystemZMCTargetDesc.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "systemz-subtarget"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "SystemZGenSubtargetInfo.inc"
-using namespace llvm;
-
-// Pin the vtabel to this file.
+// Pin the vtable to this file.
void SystemZSubtarget::anchor() {}
-SystemZSubtarget::SystemZSubtarget(const std::string &TT,
- const std::string &CPU,
- const std::string &FS)
- : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
- HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
- TargetTriple(TT) {
+SystemZSubtarget &
+SystemZSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
std::string CPUName = CPU;
if (CPUName.empty())
CPUName = "generic";
@@ -34,11 +32,26 @@ SystemZSubtarget::SystemZSubtarget(const std::string &TT,
if (CPUName == "generic")
CPUName = sys::getHostCPUName();
#endif
-
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
+ return *this;
}
+SystemZSubtarget::SystemZSubtarget(const std::string &TT,
+ const std::string &CPU,
+ const std::string &FS,
+ const TargetMachine &TM)
+ : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
+ HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
+ HasFastSerialization(false), HasInterlockedAccess1(false),
+ TargetTriple(TT),
+ // Make sure that global data has at least 16 bits of alignment by
+ // default, so that we can refer to it using LARL. We don't have any
+ // special requirements for stack variables though.
+ DL("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
+ TSInfo(DL), FrameLowering() {}
+
// Return true if GV binds locally under reloc model RM.
static bool bindsLocally(const GlobalValue *GV, Reloc::Model RM) {
// For non-PIC, all symbols bind locally.
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
index 5817491..4e8c710 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
@@ -14,6 +14,12 @@
#ifndef SYSTEMZSUBTARGET_H
#define SYSTEMZSUBTARGET_H
+#include "SystemZFrameLowering.h"
+#include "SystemZISelLowering.h"
+#include "SystemZInstrInfo.h"
+#include "SystemZRegisterInfo.h"
+#include "SystemZSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -32,16 +38,34 @@ protected:
bool HasLoadStoreOnCond;
bool HasHighWord;
bool HasFPExtension;
+ bool HasFastSerialization;
+ bool HasInterlockedAccess1;
private:
Triple TargetTriple;
-
+ const DataLayout DL;
+ SystemZInstrInfo InstrInfo;
+ SystemZTargetLowering TLInfo;
+ SystemZSelectionDAGInfo TSInfo;
+ SystemZFrameLowering FrameLowering;
+
+ SystemZSubtarget &initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS);
public:
SystemZSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
+ const std::string &FS, const TargetMachine &TM);
+
+ const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const SystemZInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const SystemZRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const SystemZTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
// This is important for reducing register pressure in vector code.
- virtual bool useAA() const LLVM_OVERRIDE { return true; }
+ bool useAA() const override { return true; }
// Automatically generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
@@ -58,6 +82,12 @@ public:
// Return true if the target has the floating-point extension facility.
bool hasFPExtension() const { return HasFPExtension; }
+ // Return true if the target has the fast-serialization facility.
+ bool hasFastSerialization() const { return HasFastSerialization; }
+
+ // Return true if the target has interlocked-access facility 1.
+ bool hasInterlockedAccess1() const { return HasInterlockedAccess1; }
+
// Return true if GV can be accessed using LARL for reloc model RM
// and code model CM.
bool isPC32DBLSymbol(const GlobalValue *GV, Reloc::Model RM,
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index dee92e9..0122e99 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -22,18 +22,10 @@ extern "C" void LLVMInitializeSystemZTarget() {
SystemZTargetMachine::SystemZTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM,
- CodeModel::Model CM,
+ Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
- // Make sure that global data has at least 16 bits of alignment by default,
- // so that we can refer to it using LARL. We don't have any special
- // requirements for stack variables though.
- DL("E-p:64:64:64-i1:8:16-i8:8:16-i16:16-i32:32-i64:64"
- "-f32:32-f64:64-f128:64-a0:8:16-n32:64"),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(*this, Subtarget) {
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -48,10 +40,10 @@ public:
return getTM<SystemZTargetMachine>();
}
- virtual void addIRPasses() LLVM_OVERRIDE;
- virtual bool addInstSelector() LLVM_OVERRIDE;
- virtual bool addPreSched2() LLVM_OVERRIDE;
- virtual bool addPreEmitPass() LLVM_OVERRIDE;
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // end anonymous namespace
@@ -66,7 +58,8 @@ bool SystemZPassConfig::addInstSelector() {
}
bool SystemZPassConfig::addPreSched2() {
- if (getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
+ if (getOptLevel() != CodeGenOpt::None &&
+ getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
addPass(&IfConverterID);
return true;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
index a99a98e..ded07e9 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -15,25 +15,15 @@
#ifndef SYSTEMZTARGETMACHINE_H
#define SYSTEMZTARGETMACHINE_H
-#include "SystemZFrameLowering.h"
-#include "SystemZISelLowering.h"
-#include "SystemZInstrInfo.h"
-#include "SystemZRegisterInfo.h"
#include "SystemZSubtarget.h"
-#include "SystemZSelectionDAGInfo.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
+class TargetFrameLowering;
+
class SystemZTargetMachine : public LLVMTargetMachine {
SystemZSubtarget Subtarget;
- const DataLayout DL;
- SystemZInstrInfo InstrInfo;
- SystemZTargetLowering TLInfo;
- SystemZSelectionDAGInfo TSInfo;
- SystemZFrameLowering FrameLowering;
public:
SystemZTargetMachine(const Target &T, StringRef TT, StringRef CPU,
@@ -42,31 +32,30 @@ public:
CodeGenOpt::Level OL);
// Override TargetMachine.
- virtual const TargetFrameLowering *getFrameLowering() const LLVM_OVERRIDE {
- return &FrameLowering;
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const SystemZInstrInfo *getInstrInfo() const LLVM_OVERRIDE {
- return &InstrInfo;
+ const SystemZInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const SystemZSubtarget *getSubtargetImpl() const LLVM_OVERRIDE {
+ const SystemZSubtarget *getSubtargetImpl() const override {
return &Subtarget;
}
- virtual const DataLayout *getDataLayout() const LLVM_OVERRIDE {
- return &DL;
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
- virtual const SystemZRegisterInfo *getRegisterInfo() const LLVM_OVERRIDE {
- return &InstrInfo.getRegisterInfo();
+ const SystemZRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
}
- virtual const SystemZTargetLowering *getTargetLowering() const LLVM_OVERRIDE {
- return &TLInfo;
+ const SystemZTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
- virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const
- LLVM_OVERRIDE {
- return &TSInfo;
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
}
// Override LLVMTargetMachine
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM) LLVM_OVERRIDE;
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Target.cpp b/contrib/llvm/lib/Target/Target.cpp
index 2190198..d277f82 100644
--- a/contrib/llvm/lib/Target/Target.cpp
+++ b/contrib/llvm/lib/Target/Target.cpp
@@ -24,14 +24,6 @@
using namespace llvm;
-inline DataLayout *unwrap(LLVMTargetDataRef P) {
- return reinterpret_cast<DataLayout*>(P);
-}
-
-inline LLVMTargetDataRef wrap(const DataLayout *P) {
- return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
-}
-
inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
return reinterpret_cast<TargetLibraryInfo*>(P);
}
@@ -42,7 +34,7 @@ inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) {
}
void llvm::initializeTarget(PassRegistry &Registry) {
- initializeDataLayoutPass(Registry);
+ initializeDataLayoutPassPass(Registry);
initializeTargetLibraryInfoPass(Registry);
}
@@ -55,7 +47,9 @@ LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) {
}
void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
- unwrap(PM)->add(new DataLayout(*unwrap(TD)));
+ // The DataLayoutPass must now be in sync with the module. Unfortunatelly we
+ // cannot enforce that from the C api.
+ unwrap(PM)->add(new DataLayoutPass(*unwrap(TD)));
}
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
@@ -113,7 +107,7 @@ unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
}
unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
- return unwrap(TD)->getCallFrameTypeAlignment(unwrap(Ty));
+ return unwrap(TD)->getABITypeAlignment(unwrap(Ty));
}
unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
diff --git a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
index 3e68fe1..616ff90 100644
--- a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
@@ -48,7 +48,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"__isoc99_sscanf",
"__memcpy_chk",
"__sincospi_stret",
- "__sincospi_stretf",
+ "__sincospif_stret",
"__sinpi",
"__sinpif",
"__sqrt_finite",
@@ -140,6 +140,12 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"floor",
"floorf",
"floorl",
+ "fmax",
+ "fmaxf",
+ "fmaxl",
+ "fmin",
+ "fminf",
+ "fminl",
"fmod",
"fmodf",
"fmodl",
@@ -184,6 +190,9 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"isdigit",
"labs",
"lchown",
+ "ldexp",
+ "ldexpf",
+ "ldexpl",
"llabs",
"log",
"log10",
@@ -369,8 +378,17 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
llvm_unreachable("TargetLibraryInfo function names must be sorted");
}
#endif // !NDEBUG
-
- // memset_pattern16 is only available on iOS 3.0 and Mac OS/X 10.5 and later.
+
+ // There are no library implementations of mempcy and memset for r600 and
+ // these can be difficult to lower in the backend.
+ if (T.getArch() == Triple::r600) {
+ TLI.setUnavailable(LibFunc::memcpy);
+ TLI.setUnavailable(LibFunc::memset);
+ TLI.setUnavailable(LibFunc::memset_pattern16);
+ return;
+ }
+
+ // memset_pattern16 is only available on iOS 3.0 and Mac OS X 10.5 and later.
if (T.isMacOSX()) {
if (T.isMacOSXVersionLT(10, 5))
TLI.setUnavailable(LibFunc::memset_pattern16);
@@ -387,7 +405,7 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::cospi);
TLI.setUnavailable(LibFunc::cospif);
TLI.setUnavailable(LibFunc::sincospi_stret);
- TLI.setUnavailable(LibFunc::sincospi_stretf);
+ TLI.setUnavailable(LibFunc::sincospif_stret);
}
if (T.isMacOSX() && T.getArch() == Triple::x86 &&
@@ -408,7 +426,7 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::fiprintf);
}
- if (T.getOS() == Triple::Win32) {
+ if (T.isOSWindows() && !T.isOSCygMing()) {
// Win32 does not support long double
TLI.setUnavailable(LibFunc::acosl);
TLI.setUnavailable(LibFunc::asinl);
@@ -422,8 +440,12 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::fabsf); // Win32 and Win64 both lack fabsf
TLI.setUnavailable(LibFunc::fabsl);
TLI.setUnavailable(LibFunc::floorl);
+ TLI.setUnavailable(LibFunc::fmaxl);
+ TLI.setUnavailable(LibFunc::fminl);
TLI.setUnavailable(LibFunc::fmodl);
TLI.setUnavailable(LibFunc::frexpl);
+ TLI.setUnavailable(LibFunc::ldexpf);
+ TLI.setUnavailable(LibFunc::ldexpl);
TLI.setUnavailable(LibFunc::logl);
TLI.setUnavailable(LibFunc::modfl);
TLI.setUnavailable(LibFunc::powl);
@@ -446,9 +468,6 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::cbrt);
TLI.setUnavailable(LibFunc::cbrtf);
TLI.setUnavailable(LibFunc::cbrtl);
- TLI.setUnavailable(LibFunc::exp10);
- TLI.setUnavailable(LibFunc::exp10f);
- TLI.setUnavailable(LibFunc::exp10l);
TLI.setUnavailable(LibFunc::exp2);
TLI.setUnavailable(LibFunc::exp2f);
TLI.setUnavailable(LibFunc::exp2l);
@@ -492,6 +511,8 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::coshf);
TLI.setUnavailable(LibFunc::expf);
TLI.setUnavailable(LibFunc::floorf);
+ TLI.setUnavailable(LibFunc::fminf);
+ TLI.setUnavailable(LibFunc::fmaxf);
TLI.setUnavailable(LibFunc::fmodf);
TLI.setUnavailable(LibFunc::logf);
TLI.setUnavailable(LibFunc::powf);
@@ -567,6 +588,43 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T,
TLI.setUnavailable(LibFunc::llabs);
}
+ switch (T.getOS()) {
+ case Triple::MacOSX:
+ // exp10 and exp10f are not available on OS X until 10.9 and iOS until 7.0
+ // and their names are __exp10 and __exp10f. exp10l is not available on
+ // OS X or iOS.
+ TLI.setUnavailable(LibFunc::exp10l);
+ if (T.isMacOSXVersionLT(10, 9)) {
+ TLI.setUnavailable(LibFunc::exp10);
+ TLI.setUnavailable(LibFunc::exp10f);
+ } else {
+ TLI.setAvailableWithName(LibFunc::exp10, "__exp10");
+ TLI.setAvailableWithName(LibFunc::exp10f, "__exp10f");
+ }
+ break;
+ case Triple::IOS:
+ TLI.setUnavailable(LibFunc::exp10l);
+ if (T.isOSVersionLT(7, 0)) {
+ TLI.setUnavailable(LibFunc::exp10);
+ TLI.setUnavailable(LibFunc::exp10f);
+ } else {
+ TLI.setAvailableWithName(LibFunc::exp10, "__exp10");
+ TLI.setAvailableWithName(LibFunc::exp10f, "__exp10f");
+ }
+ break;
+ case Triple::Linux:
+ // exp10, exp10f, exp10l is available on Linux (GLIBC) but are extremely
+ // buggy prior to glibc version 2.18. Until this version is widely deployed
+ // or we have a reasonable detection strategy, we cannot use exp10 reliably
+ // on Linux.
+ //
+ // Fall through to disable all of them.
+ default:
+ TLI.setUnavailable(LibFunc::exp10);
+ TLI.setUnavailable(LibFunc::exp10f);
+ TLI.setUnavailable(LibFunc::exp10l);
+ }
+
// ffsl is available on at least Darwin, Mac OS X, iOS, FreeBSD, and
// Linux (GLIBC):
// http://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man3/ffsl.3.html
diff --git a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
index 7b8d110..2569e92 100644
--- a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -18,6 +18,8 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
@@ -25,7 +27,7 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
@@ -40,10 +42,11 @@ using namespace llvm;
void TargetLoweringObjectFile::Initialize(MCContext &ctx,
const TargetMachine &TM) {
Ctx = &ctx;
+ DL = TM.getDataLayout();
InitMCObjectFileInfo(TM.getTargetTriple(),
TM.getRelocationModel(), TM.getCodeModel(), *Ctx);
}
-
+
TargetLoweringObjectFile::~TargetLoweringObjectFile() {
}
@@ -59,7 +62,7 @@ static bool isSuitableForBSS(const GlobalVariable *GV, bool NoZerosInBSS) {
return false;
// If the global has an explicit section specified, don't put it in BSS.
- if (!GV->getSection().empty())
+ if (GV->hasSection())
return false;
// If -nozero-initialized-in-bss is specified, don't ever use BSS.
@@ -97,20 +100,22 @@ static bool IsNullTerminatedString(const Constant *C) {
return false;
}
-/// Return the MCSymbol for the specified global value. This
-/// symbol is the main label that is the address of the global.
-MCSymbol *TargetLoweringObjectFile::getSymbol(Mangler &M,
- const GlobalValue *GV) const {
+MCSymbol *TargetLoweringObjectFile::getSymbolWithGlobalValueBase(
+ const GlobalValue *GV, StringRef Suffix, Mangler &Mang,
+ const TargetMachine &TM) const {
+ assert(!Suffix.empty());
+
SmallString<60> NameStr;
- M.getNameWithPrefix(NameStr, GV, false);
+ NameStr += DL->getPrivateGlobalPrefix();
+ TM.getNameWithPrefix(NameStr, GV, Mang);
+ NameStr.append(Suffix.begin(), Suffix.end());
return Ctx->GetOrCreateSymbol(NameStr.str());
}
-
-MCSymbol *TargetLoweringObjectFile::
-getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI) const {
- return getSymbol(*Mang, GV);
+MCSymbol *TargetLoweringObjectFile::getCFIPersonalitySymbol(
+ const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI) const {
+ return TM.getSymbol(GV, Mang);
}
void TargetLoweringObjectFile::emitPersonalityValue(MCStreamer &Streamer,
@@ -133,7 +138,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// Early exit - functions should be always in text sections.
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
- if (GVar == 0)
+ if (!GVar)
return SectionKind::getText();
// Handle thread-local data first.
@@ -252,7 +257,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
/// the specified global variable or function definition. This should not
/// be passed external (or available externally) globals.
const MCSection *TargetLoweringObjectFile::
-SectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler *Mang,
+SectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const {
// Select section name.
if (GV->hasSection())
@@ -263,22 +268,26 @@ SectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler *Mang,
return SelectSectionForGlobal(GV, Kind, Mang, TM);
}
+bool TargetLoweringObjectFile::isSectionAtomizableBySymbols(
+ const MCSection &Section) const {
+ return false;
+}
// Lame default implementation. Calculate the section name for global.
const MCSection *
TargetLoweringObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
SectionKind Kind,
- Mangler *Mang,
+ Mangler &Mang,
const TargetMachine &TM) const{
assert(!Kind.isThreadLocal() && "Doesn't support TLS");
if (Kind.isText())
return getTextSection();
- if (Kind.isBSS() && BSSSection != 0)
+ if (Kind.isBSS() && BSSSection != nullptr)
return BSSSection;
- if (Kind.isReadOnly() && ReadOnlySection != 0)
+ if (Kind.isReadOnly() && ReadOnlySection != nullptr)
return ReadOnlySection;
return getDataSection();
@@ -288,8 +297,9 @@ TargetLoweringObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
/// specified size and relocation information, return a section that it
/// should be placed in.
const MCSection *
-TargetLoweringObjectFile::getSectionForConstant(SectionKind Kind) const {
- if (Kind.isReadOnly() && ReadOnlySection != 0)
+TargetLoweringObjectFile::getSectionForConstant(SectionKind Kind,
+ const Constant *C) const {
+ if (Kind.isReadOnly() && ReadOnlySection != nullptr)
return ReadOnlySection;
return DataSection;
@@ -298,12 +308,12 @@ TargetLoweringObjectFile::getSectionForConstant(SectionKind Kind) const {
/// getTTypeGlobalReference - Return an MCExpr to use for a
/// reference to the specified global variable from exception
/// handling information.
-const MCExpr *TargetLoweringObjectFile::
-getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const {
+const MCExpr *TargetLoweringObjectFile::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
const MCSymbolRefExpr *Ref =
- MCSymbolRefExpr::Create(getSymbol(*Mang, GV), getContext());
+ MCSymbolRefExpr::Create(TM.getSymbol(GV, Mang), getContext());
return getTTypeReference(Ref, Encoding, Streamer);
}
diff --git a/contrib/llvm/lib/Target/TargetMachine.cpp b/contrib/llvm/lib/Target/TargetMachine.cpp
index cb42e83..95c8cb6 100644
--- a/contrib/llvm/lib/Target/TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/TargetMachine.cpp
@@ -17,30 +17,18 @@
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/SectionKind.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
//---------------------------------------------------------------------------
-// Command-line options that tend to be useful on more than one back-end.
-//
-
-namespace llvm {
- bool HasDivModLibcall;
- bool AsmVerbosityDefault(false);
-}
-
-static cl::opt<bool>
-DataSections("fdata-sections",
- cl::desc("Emit data into separate sections"),
- cl::init(false));
-static cl::opt<bool>
-FunctionSections("ffunction-sections",
- cl::desc("Emit functions into separate sections"),
- cl::init(false));
-
-//---------------------------------------------------------------------------
// TargetMachine Class
//
@@ -48,13 +36,8 @@ TargetMachine::TargetMachine(const Target &T,
StringRef TT, StringRef CPU, StringRef FS,
const TargetOptions &Options)
: TheTarget(T), TargetTriple(TT), TargetCPU(CPU), TargetFS(FS),
- CodeGenInfo(0), AsmInfo(0),
- MCRelaxAll(false),
- MCNoExecStack(false),
- MCSaveTempLabels(false),
- MCUseLoc(true),
- MCUseCFI(true),
- MCUseDwarfDirectory(false),
+ CodeGenInfo(nullptr), AsmInfo(nullptr),
+ RequireStructuredCFG(false),
Options(Options) {
}
@@ -67,7 +50,7 @@ TargetMachine::~TargetMachine() {
void TargetMachine::resetTargetOptions(const MachineFunction *MF) const {
const Function *F = MF->getFunction();
TargetOptions &TO = MF->getTarget().Options;
-
+
#define RESET_OPTION(X, Y) \
do { \
if (F->hasFnAttribute(Y)) \
@@ -84,6 +67,8 @@ void TargetMachine::resetTargetOptions(const MachineFunction *MF) const {
RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math");
RESET_OPTION(UseSoftFloat, "use-soft-float");
RESET_OPTION(DisableTailCalls, "disable-tail-calls");
+
+ TO.MCOptions.SanitizeAddress = F->hasFnAttribute(Attribute::SanitizeAddress);
}
/// getRelocationModel - Returns the code generation relocation model. The
@@ -103,8 +88,8 @@ CodeModel::Model TargetMachine::getCodeModel() const {
}
/// Get the IR-specified TLS model for Var.
-static TLSModel::Model getSelectedTLSModel(const GlobalVariable *Var) {
- switch (Var->getThreadLocalMode()) {
+static TLSModel::Model getSelectedTLSModel(const GlobalValue *GV) {
+ switch (GV->getThreadLocalMode()) {
case GlobalVariable::NotThreadLocal:
llvm_unreachable("getSelectedTLSModel for non-TLS variable");
break;
@@ -121,19 +106,13 @@ static TLSModel::Model getSelectedTLSModel(const GlobalVariable *Var) {
}
TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const {
- // If GV is an alias then use the aliasee for determining
- // thread-localness.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GV = GA->resolveAliasedGlobal(false);
- const GlobalVariable *Var = cast<GlobalVariable>(GV);
-
- bool isLocal = Var->hasLocalLinkage();
- bool isDeclaration = Var->isDeclaration();
+ bool isLocal = GV->hasLocalLinkage();
+ bool isDeclaration = GV->isDeclaration();
bool isPIC = getRelocationModel() == Reloc::PIC_;
bool isPIE = Options.PositionIndependentExecutable;
// FIXME: what should we do for protected and internal visibility?
// For variables, is internal different from hidden?
- bool isHidden = Var->hasHiddenVisibility();
+ bool isHidden = GV->hasHiddenVisibility();
TLSModel::Model Model;
if (isPIC && !isPIE) {
@@ -149,7 +128,7 @@ TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const {
}
// If the user specified a more specific model, use that.
- TLSModel::Model SelectedModel = getSelectedTLSModel(Var);
+ TLSModel::Model SelectedModel = getSelectedTLSModel(GV);
if (SelectedModel > Model)
return SelectedModel;
@@ -169,26 +148,51 @@ void TargetMachine::setOptLevel(CodeGenOpt::Level Level) const {
CodeGenInfo->setOptLevel(Level);
}
-bool TargetMachine::getAsmVerbosityDefault() {
- return AsmVerbosityDefault;
+bool TargetMachine::getAsmVerbosityDefault() const {
+ return Options.MCOptions.AsmVerbose;
}
void TargetMachine::setAsmVerbosityDefault(bool V) {
- AsmVerbosityDefault = V;
+ Options.MCOptions.AsmVerbose = V;
}
-bool TargetMachine::getFunctionSections() {
- return FunctionSections;
+bool TargetMachine::getFunctionSections() const {
+ return Options.FunctionSections;
}
-bool TargetMachine::getDataSections() {
- return DataSections;
+bool TargetMachine::getDataSections() const {
+ return Options.DataSections;
}
void TargetMachine::setFunctionSections(bool V) {
- FunctionSections = V;
+ Options.FunctionSections = V;
}
void TargetMachine::setDataSections(bool V) {
- DataSections = V;
+ Options.DataSections = V;
+}
+
+void TargetMachine::getNameWithPrefix(SmallVectorImpl<char> &Name,
+ const GlobalValue *GV, Mangler &Mang,
+ bool MayAlwaysUsePrivate) const {
+ if (MayAlwaysUsePrivate || !GV->hasPrivateLinkage()) {
+ // Simple case: If GV is not private, it is not important to find out if
+ // private labels are legal in this case or not.
+ Mang.getNameWithPrefix(Name, GV, false);
+ return;
+ }
+ SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, *this);
+ const TargetLoweringObjectFile &TLOF =
+ getTargetLowering()->getObjFileLowering();
+ const MCSection *TheSection = TLOF.SectionForGlobal(GV, GVKind, Mang, *this);
+ bool CannotUsePrivateLabel = TLOF.isSectionAtomizableBySymbols(*TheSection);
+ Mang.getNameWithPrefix(Name, GV, CannotUsePrivateLabel);
+}
+
+MCSymbol *TargetMachine::getSymbol(const GlobalValue *GV, Mangler &Mang) const {
+ SmallString<60> NameStr;
+ getNameWithPrefix(NameStr, GV, Mang);
+ const TargetLoweringObjectFile &TLOF =
+ getTargetLowering()->getObjFileLowering();
+ return TLOF.getContext().GetOrCreateSymbol(NameStr.str());
}
diff --git a/contrib/llvm/lib/Target/TargetMachineC.cpp b/contrib/llvm/lib/Target/TargetMachineC.cpp
index 3d5f827..20923c9 100644
--- a/contrib/llvm/lib/Target/TargetMachineC.cpp
+++ b/contrib/llvm/lib/Target/TargetMachineC.cpp
@@ -18,10 +18,11 @@
#include "llvm/IR/Module.h"
#include "llvm/PassManager.h"
#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
#include <cstdlib>
@@ -29,23 +30,6 @@
using namespace llvm;
-inline DataLayout *unwrap(LLVMTargetDataRef P) {
- return reinterpret_cast<DataLayout*>(P);
-}
-
-inline LLVMTargetDataRef wrap(const DataLayout *P) {
- return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
-}
-
-inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
- return reinterpret_cast<TargetLibraryInfo*>(P);
-}
-
-inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) {
- TargetLibraryInfo *X = const_cast<TargetLibraryInfo*>(P);
- return reinterpret_cast<LLVMTargetLibraryInfoRef>(X);
-}
-
inline TargetMachine *unwrap(LLVMTargetMachineRef P) {
return reinterpret_cast<TargetMachine*>(P);
}
@@ -62,7 +46,7 @@ inline LLVMTargetRef wrap(const Target * P) {
LLVMTargetRef LLVMGetFirstTarget() {
if(TargetRegistry::begin() == TargetRegistry::end()) {
- return NULL;
+ return nullptr;
}
const Target* target = &*TargetRegistry::begin();
@@ -80,7 +64,7 @@ LLVMTargetRef LLVMGetTargetFromName(const char *Name) {
return wrap(&*IT);
}
- return NULL;
+ return nullptr;
}
LLVMBool LLVMGetTargetFromTriple(const char* TripleStr, LLVMTargetRef *T,
@@ -212,7 +196,8 @@ static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
*ErrorMessage = strdup(error.c_str());
return true;
}
- pass.add(new DataLayout(*td));
+ Mod->setDataLayout(td);
+ pass.add(new DataLayoutPass(Mod));
TargetMachine::CodeGenFileType ft;
switch (codegen) {
@@ -238,7 +223,7 @@ static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) {
std::string error;
- raw_fd_ostream dest(Filename, error, sys::fs::F_Binary);
+ raw_fd_ostream dest(Filename, error, sys::fs::F_None);
if (!error.empty()) {
*ErrorMessage = strdup(error.c_str());
return true;
@@ -267,3 +252,7 @@ LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T,
char *LLVMGetDefaultTargetTriple(void) {
return strdup(sys::getDefaultTargetTriple().c_str());
}
+
+void LLVMAddAnalysisPasses(LLVMTargetMachineRef T, LLVMPassManagerRef PM) {
+ unwrap(T)->addAnalysisPasses(*unwrap(PM));
+}
diff --git a/contrib/llvm/lib/Target/TargetSubtargetInfo.cpp b/contrib/llvm/lib/Target/TargetSubtargetInfo.cpp
index 10e8db5..386a813 100644
--- a/contrib/llvm/lib/Target/TargetSubtargetInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetSubtargetInfo.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
//---------------------------------------------------------------------------
@@ -24,11 +24,12 @@ TargetSubtargetInfo::TargetSubtargetInfo() {}
TargetSubtargetInfo::~TargetSubtargetInfo() {}
// Temporary option to compare overall performance change when moving from the
-// SD scheduler to the MachineScheduler pass pipeline. It should be removed
-// before 3.4. The normal way to enable/disable the MachineScheduling pass
-// itself is by using -enable-misched. For targets that already use MI sched
-// (via MySubTarget::enableMachineScheduler()) -misched-bench=false negates the
-// subtarget hook.
+// SD scheduler to the MachineScheduler pass pipeline. This is convenient for
+// benchmarking during the transition from SD to MI scheduling. Once armv7 makes
+// the switch, it should go away. The normal way to enable/disable the
+// MachineScheduling pass itself is by using -enable-misched. For targets that
+// already use MI sched (via MySubTarget::enableMachineScheduler())
+// -misched-bench=false negates the subtarget hook.
static cl::opt<bool> BenchMachineSched("misched-bench", cl::Hidden,
cl::desc("Migrate from the target's default SD scheduler to MI scheduler"));
@@ -38,17 +39,21 @@ bool TargetSubtargetInfo::useMachineScheduler() const {
return enableMachineScheduler();
}
+bool TargetSubtargetInfo::enableAtomicExpandLoadLinked() const {
+ return true;
+}
+
bool TargetSubtargetInfo::enableMachineScheduler() const {
return false;
}
-bool TargetSubtargetInfo::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = ANTIDEP_NONE;
- CriticalPathRCs.clear();
- return false;
+bool TargetSubtargetInfo::enableRALocalReassignment(
+ CodeGenOpt::Level OptLevel) const {
+ return true;
+}
+
+bool TargetSubtargetInfo::enablePostMachineScheduler() const {
+ return getSchedModel()->PostRAScheduler;
}
bool TargetSubtargetInfo::useAA() const {
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
new file mode 100644
index 0000000..a365f62
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -0,0 +1,507 @@
+//===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/X86BaseInfo.h"
+#include "X86AsmInstrumentation.h"
+#include "X86Operand.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+namespace {
+
+static cl::opt<bool> ClAsanInstrumentAssembly(
+ "asan-instrument-assembly",
+ cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
+ cl::init(false));
+
+bool IsStackReg(unsigned Reg) {
+ return Reg == X86::RSP || Reg == X86::ESP || Reg == X86::SP;
+}
+
+std::string FuncName(unsigned AccessSize, bool IsWrite) {
+ return std::string("__asan_report_") + (IsWrite ? "store" : "load") +
+ utostr(AccessSize);
+}
+
+class X86AddressSanitizer : public X86AsmInstrumentation {
+public:
+ X86AddressSanitizer(const MCSubtargetInfo &STI) : STI(STI) {}
+ virtual ~X86AddressSanitizer() {}
+
+ // X86AsmInstrumentation implementation:
+ virtual void InstrumentInstruction(
+ const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
+ const MCInstrInfo &MII, MCStreamer &Out) override {
+ InstrumentMOV(Inst, Operands, Ctx, MII, Out);
+ }
+
+ // Should be implemented differently in x86_32 and x86_64 subclasses.
+ virtual void InstrumentMemOperandSmallImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) = 0;
+ virtual void InstrumentMemOperandLargeImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) = 0;
+
+ void InstrumentMemOperand(MCParsedAsmOperand &Op, unsigned AccessSize,
+ bool IsWrite, MCContext &Ctx, MCStreamer &Out);
+ void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
+ MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
+ void EmitInstruction(MCStreamer &Out, const MCInst &Inst) {
+ Out.EmitInstruction(Inst, STI);
+ }
+
+ void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
+
+protected:
+ const MCSubtargetInfo &STI;
+};
+
+void X86AddressSanitizer::InstrumentMemOperand(
+ MCParsedAsmOperand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) {
+ assert(Op.isMem() && "Op should be a memory operand.");
+ assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
+ "AccessSize should be a power of two, less or equal than 16.");
+
+ X86Operand &MemOp = static_cast<X86Operand &>(Op);
+ // FIXME: get rid of this limitation.
+ if (IsStackReg(MemOp.getMemBaseReg()) || IsStackReg(MemOp.getMemIndexReg()))
+ return;
+
+ // FIXME: take into account load/store alignment.
+ if (AccessSize < 8)
+ InstrumentMemOperandSmallImpl(MemOp, AccessSize, IsWrite, Ctx, Out);
+ else
+ InstrumentMemOperandLargeImpl(MemOp, AccessSize, IsWrite, Ctx, Out);
+}
+
+void X86AddressSanitizer::InstrumentMOV(
+ const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
+ const MCInstrInfo &MII, MCStreamer &Out) {
+ // Access size in bytes.
+ unsigned AccessSize = 0;
+
+ switch (Inst.getOpcode()) {
+ case X86::MOV8mi:
+ case X86::MOV8mr:
+ case X86::MOV8rm:
+ AccessSize = 1;
+ break;
+ case X86::MOV16mi:
+ case X86::MOV16mr:
+ case X86::MOV16rm:
+ AccessSize = 2;
+ break;
+ case X86::MOV32mi:
+ case X86::MOV32mr:
+ case X86::MOV32rm:
+ AccessSize = 4;
+ break;
+ case X86::MOV64mi32:
+ case X86::MOV64mr:
+ case X86::MOV64rm:
+ AccessSize = 8;
+ break;
+ case X86::MOVAPDmr:
+ case X86::MOVAPSmr:
+ case X86::MOVAPDrm:
+ case X86::MOVAPSrm:
+ AccessSize = 16;
+ break;
+ default:
+ return;
+ }
+
+ const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
+ for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
+ assert(Operands[Ix]);
+ MCParsedAsmOperand &Op = *Operands[Ix];
+ if (Op.isMem())
+ InstrumentMemOperand(Op, AccessSize, IsWrite, Ctx, Out);
+ }
+}
+
+class X86AddressSanitizer32 : public X86AddressSanitizer {
+public:
+ static const long kShadowOffset = 0x20000000;
+
+ X86AddressSanitizer32(const MCSubtargetInfo &STI)
+ : X86AddressSanitizer(STI) {}
+ virtual ~X86AddressSanitizer32() {}
+
+ virtual void InstrumentMemOperandSmallImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) override;
+ virtual void InstrumentMemOperandLargeImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) override;
+
+ private:
+ void EmitCallAsanReport(MCContext &Ctx, MCStreamer &Out, unsigned AccessSize,
+ bool IsWrite, unsigned AddressReg) {
+ EmitInstruction(Out, MCInstBuilder(X86::CLD));
+ EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
+
+ EmitInstruction(Out, MCInstBuilder(X86::AND64ri8).addReg(X86::ESP)
+ .addReg(X86::ESP).addImm(-16));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(AddressReg));
+
+
+ const std::string& Fn = FuncName(AccessSize, IsWrite);
+ MCSymbol *FnSym = Ctx.GetOrCreateSymbol(StringRef(Fn));
+ const MCSymbolRefExpr *FnExpr =
+ MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
+ EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
+ }
+};
+
+void X86AddressSanitizer32::InstrumentMemOperandSmallImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) {
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EAX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::ECX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EDX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
+
+ {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA32r);
+ Inst.addOperand(MCOperand::CreateReg(X86::EAX));
+ Op.addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOV32rr).addReg(X86::ECX).addReg(X86::EAX));
+ EmitInstruction(Out, MCInstBuilder(X86::SHR32ri).addReg(X86::ECX)
+ .addReg(X86::ECX).addImm(3));
+
+ {
+ MCInst Inst;
+ Inst.setOpcode(X86::MOV8rm);
+ Inst.addOperand(MCOperand::CreateReg(X86::CL));
+ const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::ECX, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+
+ EmitInstruction(Out,
+ MCInstBuilder(X86::TEST8rr).addReg(X86::CL).addReg(X86::CL));
+ MCSymbol *DoneSym = Ctx.CreateTempSymbol();
+ const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
+ EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOV32rr).addReg(X86::EDX).addReg(X86::EAX));
+ EmitInstruction(Out, MCInstBuilder(X86::AND32ri).addReg(X86::EDX)
+ .addReg(X86::EDX).addImm(7));
+
+ switch (AccessSize) {
+ case 1:
+ break;
+ case 2: {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA32r);
+ Inst.addOperand(MCOperand::CreateReg(X86::EDX));
+
+ const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::EDX, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ break;
+ }
+ case 4:
+ EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8).addReg(X86::EDX)
+ .addReg(X86::EDX).addImm(3));
+ break;
+ default:
+ assert(false && "Incorrect access size");
+ break;
+ }
+
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOVSX32rr8).addReg(X86::ECX).addReg(X86::CL));
+ EmitInstruction(
+ Out, MCInstBuilder(X86::CMP32rr).addReg(X86::EDX).addReg(X86::ECX));
+ EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr));
+
+ EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite, X86::EAX);
+ EmitLabel(Out, DoneSym);
+
+ EmitInstruction(Out, MCInstBuilder(X86::POPF32));
+ EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::EDX));
+ EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::ECX));
+ EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::EAX));
+}
+
+void X86AddressSanitizer32::InstrumentMemOperandLargeImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) {
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EAX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::ECX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
+
+ {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA32r);
+ Inst.addOperand(MCOperand::CreateReg(X86::EAX));
+ Op.addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOV32rr).addReg(X86::ECX).addReg(X86::EAX));
+ EmitInstruction(Out, MCInstBuilder(X86::SHR32ri).addReg(X86::ECX)
+ .addReg(X86::ECX).addImm(3));
+ {
+ MCInst Inst;
+ switch (AccessSize) {
+ case 8:
+ Inst.setOpcode(X86::CMP8mi);
+ break;
+ case 16:
+ Inst.setOpcode(X86::CMP16mi);
+ break;
+ default:
+ assert(false && "Incorrect access size");
+ break;
+ }
+ const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::ECX, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ Inst.addOperand(MCOperand::CreateImm(0));
+ EmitInstruction(Out, Inst);
+ }
+ MCSymbol *DoneSym = Ctx.CreateTempSymbol();
+ const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
+ EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+
+ EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite, X86::EAX);
+ EmitLabel(Out, DoneSym);
+
+ EmitInstruction(Out, MCInstBuilder(X86::POPF32));
+ EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::ECX));
+ EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::EAX));
+}
+
+class X86AddressSanitizer64 : public X86AddressSanitizer {
+public:
+ static const long kShadowOffset = 0x7fff8000;
+
+ X86AddressSanitizer64(const MCSubtargetInfo &STI)
+ : X86AddressSanitizer(STI) {}
+ virtual ~X86AddressSanitizer64() {}
+
+ virtual void InstrumentMemOperandSmallImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) override;
+ virtual void InstrumentMemOperandLargeImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) override;
+
+private:
+ void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA64r);
+ Inst.addOperand(MCOperand::CreateReg(X86::RSP));
+
+ const MCExpr *Disp = MCConstantExpr::Create(Offset, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+
+ void EmitCallAsanReport(MCContext &Ctx, MCStreamer &Out, unsigned AccessSize,
+ bool IsWrite) {
+ EmitInstruction(Out, MCInstBuilder(X86::CLD));
+ EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
+
+ EmitInstruction(Out, MCInstBuilder(X86::AND64ri8).addReg(X86::RSP)
+ .addReg(X86::RSP).addImm(-16));
+
+ const std::string& Fn = FuncName(AccessSize, IsWrite);
+ MCSymbol *FnSym = Ctx.GetOrCreateSymbol(StringRef(Fn));
+ const MCSymbolRefExpr *FnExpr =
+ MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
+ EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
+ }
+};
+
+void X86AddressSanitizer64::InstrumentMemOperandSmallImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) {
+ EmitAdjustRSP(Ctx, Out, -128);
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RAX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RCX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RDI));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
+ {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA64r);
+ Inst.addOperand(MCOperand::CreateReg(X86::RDI));
+ Op.addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RAX).addReg(X86::RDI));
+ EmitInstruction(Out, MCInstBuilder(X86::SHR64ri).addReg(X86::RAX)
+ .addReg(X86::RAX).addImm(3));
+ {
+ MCInst Inst;
+ Inst.setOpcode(X86::MOV8rm);
+ Inst.addOperand(MCOperand::CreateReg(X86::AL));
+ const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::RAX, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+
+ EmitInstruction(Out,
+ MCInstBuilder(X86::TEST8rr).addReg(X86::AL).addReg(X86::AL));
+ MCSymbol *DoneSym = Ctx.CreateTempSymbol();
+ const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
+ EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOV32rr).addReg(X86::ECX).addReg(X86::EDI));
+ EmitInstruction(Out, MCInstBuilder(X86::AND32ri).addReg(X86::ECX)
+ .addReg(X86::ECX).addImm(7));
+
+ switch (AccessSize) {
+ case 1:
+ break;
+ case 2: {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA32r);
+ Inst.addOperand(MCOperand::CreateReg(X86::ECX));
+
+ const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::ECX, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ break;
+ }
+ case 4:
+ EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8).addReg(X86::ECX)
+ .addReg(X86::ECX).addImm(3));
+ break;
+ default:
+ assert(false && "Incorrect access size");
+ break;
+ }
+
+ EmitInstruction(
+ Out, MCInstBuilder(X86::MOVSX32rr8).addReg(X86::EAX).addReg(X86::AL));
+ EmitInstruction(
+ Out, MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::EAX));
+ EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr));
+
+ EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite);
+ EmitLabel(Out, DoneSym);
+
+ EmitInstruction(Out, MCInstBuilder(X86::POPF64));
+ EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RDI));
+ EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RCX));
+ EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RAX));
+ EmitAdjustRSP(Ctx, Out, 128);
+}
+
+void X86AddressSanitizer64::InstrumentMemOperandLargeImpl(
+ X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx,
+ MCStreamer &Out) {
+ EmitAdjustRSP(Ctx, Out, -128);
+ EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RAX));
+ EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
+
+ {
+ MCInst Inst;
+ Inst.setOpcode(X86::LEA64r);
+ Inst.addOperand(MCOperand::CreateReg(X86::RAX));
+ Op.addMemOperands(Inst, 5);
+ EmitInstruction(Out, Inst);
+ }
+ EmitInstruction(Out, MCInstBuilder(X86::SHR64ri).addReg(X86::RAX)
+ .addReg(X86::RAX).addImm(3));
+ {
+ MCInst Inst;
+ switch (AccessSize) {
+ case 8:
+ Inst.setOpcode(X86::CMP8mi);
+ break;
+ case 16:
+ Inst.setOpcode(X86::CMP16mi);
+ break;
+ default:
+ assert(false && "Incorrect access size");
+ break;
+ }
+ const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
+ std::unique_ptr<X86Operand> Op(
+ X86Operand::CreateMem(0, Disp, X86::RAX, 0, 1, SMLoc(), SMLoc()));
+ Op->addMemOperands(Inst, 5);
+ Inst.addOperand(MCOperand::CreateImm(0));
+ EmitInstruction(Out, Inst);
+ }
+
+ MCSymbol *DoneSym = Ctx.CreateTempSymbol();
+ const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
+ EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr));
+
+ EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite);
+ EmitLabel(Out, DoneSym);
+
+ EmitInstruction(Out, MCInstBuilder(X86::POPF64));
+ EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RAX));
+ EmitAdjustRSP(Ctx, Out, 128);
+}
+
+} // End anonymous namespace
+
+X86AsmInstrumentation::X86AsmInstrumentation() {}
+X86AsmInstrumentation::~X86AsmInstrumentation() {}
+
+void X86AsmInstrumentation::InstrumentInstruction(
+ const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
+ const MCInstrInfo &MII, MCStreamer &Out) {}
+
+X86AsmInstrumentation *
+CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
+ const MCContext &Ctx, const MCSubtargetInfo &STI) {
+ Triple T(STI.getTargetTriple());
+ const bool hasCompilerRTSupport = T.isOSLinux();
+ if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
+ MCOptions.SanitizeAddress) {
+ if ((STI.getFeatureBits() & X86::Mode32Bit) != 0)
+ return new X86AddressSanitizer32(STI);
+ if ((STI.getFeatureBits() & X86::Mode64Bit) != 0)
+ return new X86AddressSanitizer64(STI);
+ }
+ return new X86AsmInstrumentation();
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
new file mode 100644
index 0000000..1bc3c09
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
@@ -0,0 +1,54 @@
+//===- X86AsmInstrumentation.h - Instrument X86 inline assembly *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_ASM_INSTRUMENTATION_H
+#define X86_ASM_INSTRUMENTATION_H
+
+#include "llvm/ADT/SmallVector.h"
+
+#include <memory>
+
+namespace llvm {
+
+class MCContext;
+class MCInst;
+class MCInstrInfo;
+class MCParsedAsmOperand;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCTargetOptions;
+
+class X86AsmInstrumentation;
+
+X86AsmInstrumentation *
+CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
+ const MCContext &Ctx, const MCSubtargetInfo &STI);
+
+class X86AsmInstrumentation {
+public:
+ virtual ~X86AsmInstrumentation();
+
+ // Instruments Inst. Should be called just before the original
+ // instruction is sent to Out.
+ virtual void InstrumentInstruction(
+ const MCInst &Inst,
+ SmallVectorImpl<std::unique_ptr<MCParsedAsmOperand>> &Operands,
+ MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
+
+protected:
+ friend X86AsmInstrumentation *
+ CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
+ const MCContext &Ctx, const MCSubtargetInfo &STI);
+
+ X86AsmInstrumentation();
+};
+
+} // End llvm namespace
+
+#endif // X86_ASM_INSTRUMENTATION_H
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 22b79b3..a11a238 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -8,6 +8,9 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/X86BaseInfo.h"
+#include "X86AsmInstrumentation.h"
+#include "X86AsmParserCommon.h"
+#include "X86Operand.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
@@ -17,6 +20,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
@@ -28,19 +32,23 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include <memory>
using namespace llvm;
namespace {
-struct X86Operand;
static const char OpPrecedence[] = {
- 0, // IC_PLUS
- 0, // IC_MINUS
- 1, // IC_MULTIPLY
- 1, // IC_DIVIDE
- 2, // IC_RPAREN
- 3, // IC_LPAREN
+ 0, // IC_OR
+ 1, // IC_AND
+ 2, // IC_LSHIFT
+ 2, // IC_RSHIFT
+ 3, // IC_PLUS
+ 3, // IC_MINUS
+ 4, // IC_MULTIPLY
+ 4, // IC_DIVIDE
+ 5, // IC_RPAREN
+ 6, // IC_LPAREN
0, // IC_IMM
0 // IC_REGISTER
};
@@ -48,10 +56,22 @@ static const char OpPrecedence[] = {
class X86AsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ const MCInstrInfo &MII;
ParseInstructionInfo *InstInfo;
+ std::unique_ptr<X86AsmInstrumentation> Instrumentation;
private:
+ SMLoc consumeToken() {
+ SMLoc Result = Parser.getTok().getLoc();
+ Parser.Lex();
+ return Result;
+ }
+
enum InfixCalculatorTok {
- IC_PLUS = 0,
+ IC_OR = 0,
+ IC_AND,
+ IC_LSHIFT,
+ IC_RSHIFT,
+ IC_PLUS,
IC_MINUS,
IC_MULTIPLY,
IC_DIVIDE,
@@ -176,6 +196,30 @@ private:
Val = Op1.second / Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
+ case IC_OR:
+ assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
+ "Or operation with an immediate and a register!");
+ Val = Op1.second | Op2.second;
+ OperandStack.push_back(std::make_pair(IC_IMM, Val));
+ break;
+ case IC_AND:
+ assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
+ "And operation with an immediate and a register!");
+ Val = Op1.second & Op2.second;
+ OperandStack.push_back(std::make_pair(IC_IMM, Val));
+ break;
+ case IC_LSHIFT:
+ assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
+ "Left shift operation with an immediate and a register!");
+ Val = Op1.second << Op2.second;
+ OperandStack.push_back(std::make_pair(IC_IMM, Val));
+ break;
+ case IC_RSHIFT:
+ assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
+ "Right shift operation with an immediate and a register!");
+ Val = Op1.second >> Op2.second;
+ OperandStack.push_back(std::make_pair(IC_IMM, Val));
+ break;
}
}
}
@@ -185,8 +229,13 @@ private:
};
enum IntelExprState {
+ IES_OR,
+ IES_AND,
+ IES_LSHIFT,
+ IES_RSHIFT,
IES_PLUS,
IES_MINUS,
+ IES_NOT,
IES_MULTIPLY,
IES_DIVIDE,
IES_LBRAC,
@@ -211,7 +260,7 @@ private:
public:
IntelExprStateMachine(int64_t imm, bool stoponlbrac, bool addimmprefix) :
State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0),
- Scale(1), Imm(imm), Sym(0), StopOnLBrac(stoponlbrac),
+ Scale(1), Imm(imm), Sym(nullptr), StopOnLBrac(stoponlbrac),
AddImmPrefix(addimmprefix) { Info.clear(); }
unsigned getBaseReg() { return BaseReg; }
@@ -231,6 +280,66 @@ private:
return Info;
}
+ void onOr() {
+ IntelExprState CurrState = State;
+ switch (State) {
+ default:
+ State = IES_ERROR;
+ break;
+ case IES_INTEGER:
+ case IES_RPAREN:
+ case IES_REGISTER:
+ State = IES_OR;
+ IC.pushOperator(IC_OR);
+ break;
+ }
+ PrevState = CurrState;
+ }
+ void onAnd() {
+ IntelExprState CurrState = State;
+ switch (State) {
+ default:
+ State = IES_ERROR;
+ break;
+ case IES_INTEGER:
+ case IES_RPAREN:
+ case IES_REGISTER:
+ State = IES_AND;
+ IC.pushOperator(IC_AND);
+ break;
+ }
+ PrevState = CurrState;
+ }
+ void onLShift() {
+ IntelExprState CurrState = State;
+ switch (State) {
+ default:
+ State = IES_ERROR;
+ break;
+ case IES_INTEGER:
+ case IES_RPAREN:
+ case IES_REGISTER:
+ State = IES_LSHIFT;
+ IC.pushOperator(IC_LSHIFT);
+ break;
+ }
+ PrevState = CurrState;
+ }
+ void onRShift() {
+ IntelExprState CurrState = State;
+ switch (State) {
+ default:
+ State = IES_ERROR;
+ break;
+ case IES_INTEGER:
+ case IES_RPAREN:
+ case IES_REGISTER:
+ State = IES_RSHIFT;
+ IC.pushOperator(IC_RSHIFT);
+ break;
+ }
+ PrevState = CurrState;
+ }
void onPlus() {
IntelExprState CurrState = State;
switch (State) {
@@ -264,6 +373,7 @@ private:
State = IES_ERROR;
break;
case IES_PLUS:
+ case IES_NOT:
case IES_MULTIPLY:
case IES_DIVIDE:
case IES_LPAREN:
@@ -293,6 +403,19 @@ private:
}
PrevState = CurrState;
}
+ void onNot() {
+ IntelExprState CurrState = State;
+ switch (State) {
+ default:
+ State = IES_ERROR;
+ break;
+ case IES_PLUS:
+ case IES_NOT:
+ State = IES_NOT;
+ break;
+ }
+ PrevState = CurrState;
+ }
void onRegister(unsigned Reg) {
IntelExprState CurrState = State;
switch (State) {
@@ -330,6 +453,7 @@ private:
break;
case IES_PLUS:
case IES_MINUS:
+ case IES_NOT:
State = IES_INTEGER;
Sym = SymRef;
SymName = SymRefName;
@@ -337,7 +461,7 @@ private:
break;
}
}
- void onInteger(int64_t TmpInt) {
+ bool onInteger(int64_t TmpInt, StringRef &ErrMsg) {
IntelExprState CurrState = State;
switch (State) {
default:
@@ -345,6 +469,11 @@ private:
break;
case IES_PLUS:
case IES_MINUS:
+ case IES_NOT:
+ case IES_OR:
+ case IES_AND:
+ case IES_LSHIFT:
+ case IES_RSHIFT:
case IES_DIVIDE:
case IES_MULTIPLY:
case IES_LPAREN:
@@ -354,21 +483,39 @@ private:
assert (!IndexReg && "IndexReg already set!");
IndexReg = TmpReg;
Scale = TmpInt;
+ if(Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
+ ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
+ return true;
+ }
// Get the scale and replace the 'Register * Scale' with '0'.
IC.popOperator();
} else if ((PrevState == IES_PLUS || PrevState == IES_MINUS ||
+ PrevState == IES_OR || PrevState == IES_AND ||
+ PrevState == IES_LSHIFT || PrevState == IES_RSHIFT ||
PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE ||
- PrevState == IES_LPAREN || PrevState == IES_LBRAC) &&
+ PrevState == IES_LPAREN || PrevState == IES_LBRAC ||
+ PrevState == IES_NOT) &&
CurrState == IES_MINUS) {
// Unary minus. No need to pop the minus operand because it was never
// pushed.
IC.pushOperand(IC_IMM, -TmpInt); // Push -Imm.
+ } else if ((PrevState == IES_PLUS || PrevState == IES_MINUS ||
+ PrevState == IES_OR || PrevState == IES_AND ||
+ PrevState == IES_LSHIFT || PrevState == IES_RSHIFT ||
+ PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE ||
+ PrevState == IES_LPAREN || PrevState == IES_LBRAC ||
+ PrevState == IES_NOT) &&
+ CurrState == IES_NOT) {
+ // Unary not. No need to pop the not operand because it was never
+ // pushed.
+ IC.pushOperand(IC_IMM, ~TmpInt); // Push ~Imm.
} else {
IC.pushOperand(IC_IMM, TmpInt);
}
break;
}
PrevState = CurrState;
+ return false;
}
void onStar() {
PrevState = State;
@@ -442,14 +589,22 @@ private:
break;
case IES_PLUS:
case IES_MINUS:
+ case IES_NOT:
+ case IES_OR:
+ case IES_AND:
+ case IES_LSHIFT:
+ case IES_RSHIFT:
case IES_MULTIPLY:
case IES_DIVIDE:
case IES_LPAREN:
- // FIXME: We don't handle this type of unary minus, yet.
+ // FIXME: We don't handle this type of unary minus or not, yet.
if ((PrevState == IES_PLUS || PrevState == IES_MINUS ||
+ PrevState == IES_OR || PrevState == IES_AND ||
+ PrevState == IES_LSHIFT || PrevState == IES_RSHIFT ||
PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE ||
- PrevState == IES_LPAREN || PrevState == IES_LBRAC) &&
- CurrState == IES_MINUS) {
+ PrevState == IES_LPAREN || PrevState == IES_LBRAC ||
+ PrevState == IES_NOT) &&
+ (CurrState == IES_MINUS || CurrState == IES_NOT)) {
State = IES_ERROR;
break;
}
@@ -486,61 +641,93 @@ private:
return Parser.Error(L, Msg, Ranges);
}
- X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) {
+ bool ErrorAndEatStatement(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None,
+ bool MatchingInlineAsm = false) {
+ Parser.eatToEndOfStatement();
+ return Error(L, Msg, Ranges, MatchingInlineAsm);
+ }
+
+ std::nullptr_t ErrorOperand(SMLoc Loc, StringRef Msg) {
Error(Loc, Msg);
- return 0;
+ return nullptr;
}
- X86Operand *ParseOperand();
- X86Operand *ParseATTOperand();
- X86Operand *ParseIntelOperand();
- X86Operand *ParseIntelOffsetOfOperator();
+ std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
+ std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
+ std::unique_ptr<X86Operand> ParseOperand();
+ std::unique_ptr<X86Operand> ParseATTOperand();
+ std::unique_ptr<X86Operand> ParseIntelOperand();
+ std::unique_ptr<X86Operand> ParseIntelOffsetOfOperator();
bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr *&NewDisp);
- X86Operand *ParseIntelOperator(unsigned OpKind);
- X86Operand *ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, unsigned Size);
- X86Operand *ParseIntelMemOperand(int64_t ImmDisp, SMLoc StartLoc,
- unsigned Size);
+ std::unique_ptr<X86Operand> ParseIntelOperator(unsigned OpKind);
+ std::unique_ptr<X86Operand>
+ ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start, unsigned Size);
+ std::unique_ptr<X86Operand>
+ ParseIntelMemOperand(int64_t ImmDisp, SMLoc StartLoc, unsigned Size);
bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
- X86Operand *ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
- int64_t ImmDisp, unsigned Size);
+ std::unique_ptr<X86Operand> ParseIntelBracExpression(unsigned SegReg,
+ SMLoc Start,
+ int64_t ImmDisp,
+ unsigned Size);
bool ParseIntelIdentifier(const MCExpr *&Val, StringRef &Identifier,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedOperand, SMLoc &End);
- X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
+ std::unique_ptr<X86Operand> ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
- X86Operand *CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp,
- unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc Start, SMLoc End,
- unsigned Size, StringRef Identifier,
- InlineAsmIdentifierInfo &Info);
+ std::unique_ptr<X86Operand>
+ CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
+ unsigned IndexReg, unsigned Scale, SMLoc Start,
+ SMLoc End, unsigned Size, StringRef Identifier,
+ InlineAsmIdentifierInfo &Info);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
- bool processInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
+ bool processInstruction(MCInst &Inst, const OperandVector &Ops);
+
+ /// Wrapper around MCStreamer::EmitInstruction(). Possibly adds
+ /// instrumentation around Inst.
+ void EmitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm);
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
+
+ virtual bool OmitRegisterFromClobberLists(unsigned RegNo) override;
- /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
- /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
- bool isSrcOp(X86Operand &Op);
+ /// doSrcDstMatch - Returns true if operands are matching in their
+ /// word size (%si and %di, %esi and %edi, etc.). Order depends on
+ /// the parsing mode (Intel vs. AT&T).
+ bool doSrcDstMatch(X86Operand &Op1, X86Operand &Op2);
- /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi)
- /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode.
- bool isDstOp(X86Operand &Op);
+ /// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
+ /// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
+ /// \return \c true if no parsing errors occurred, \c false otherwise.
+ bool HandleAVX512Operand(OperandVector &Operands,
+ const MCParsedAsmOperand &Op);
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
}
- void SwitchMode() {
- unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(X86::Mode64Bit));
+ bool is32BitMode() const {
+ // FIXME: Can tablegen auto-generate this?
+ return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
+ }
+ bool is16BitMode() const {
+ // FIXME: Can tablegen auto-generate this?
+ return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
+ }
+ void SwitchMode(uint64_t mode) {
+ uint64_t oldMode = STI.getFeatureBits() &
+ (X86::Mode64Bit | X86::Mode32Bit | X86::Mode16Bit);
+ unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(oldMode | mode));
setAvailableFeatures(FB);
+ assert(mode == (STI.getFeatureBits() &
+ (X86::Mode64Bit | X86::Mode32Bit | X86::Mode16Bit)));
}
bool isParsingIntelSyntax() {
@@ -557,19 +744,23 @@ private:
public:
X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
- const MCInstrInfo &MII)
- : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) {
+ const MCInstrInfo &mii,
+ const MCTargetOptions &Options)
+ : MCTargetAsmParser(), STI(sti), Parser(parser), MII(mii),
+ InstInfo(nullptr) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ Instrumentation.reset(
+ CreateX86AsmInstrumentation(Options, Parser.getContext(), STI));
}
- virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
- virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
- virtual bool ParseDirective(AsmToken DirectiveID);
+ bool ParseDirective(AsmToken DirectiveID) override;
};
} // end anonymous namespace
@@ -580,470 +771,63 @@ static unsigned MatchRegisterName(StringRef Name);
/// }
-static bool isImmSExti16i8Value(uint64_t Value) {
- return (( Value <= 0x000000000000007FULL)||
- (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
-}
-
-static bool isImmSExti32i8Value(uint64_t Value) {
- return (( Value <= 0x000000000000007FULL)||
- (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
-}
-
-static bool isImmZExtu32u8Value(uint64_t Value) {
- return (Value <= 0x00000000000000FFULL);
-}
-
-static bool isImmSExti64i8Value(uint64_t Value) {
- return (( Value <= 0x000000000000007FULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
-}
-
-static bool isImmSExti64i32Value(uint64_t Value) {
- return (( Value <= 0x000000007FFFFFFFULL)||
- (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
-}
-namespace {
-
-/// X86Operand - Instances of this class represent a parsed X86 machine
-/// instruction.
-struct X86Operand : public MCParsedAsmOperand {
- enum KindTy {
- Token,
- Register,
- Immediate,
- Memory
- } Kind;
-
- SMLoc StartLoc, EndLoc;
- SMLoc OffsetOfLoc;
- StringRef SymName;
- void *OpDecl;
- bool AddressOf;
-
- struct TokOp {
- const char *Data;
- unsigned Length;
- };
-
- struct RegOp {
- unsigned RegNo;
- };
-
- struct ImmOp {
- const MCExpr *Val;
- };
-
- struct MemOp {
- unsigned SegReg;
- const MCExpr *Disp;
- unsigned BaseReg;
- unsigned IndexReg;
- unsigned Scale;
- unsigned Size;
- };
-
- union {
- struct TokOp Tok;
- struct RegOp Reg;
- struct ImmOp Imm;
- struct MemOp Mem;
- };
-
- X86Operand(KindTy K, SMLoc Start, SMLoc End)
- : Kind(K), StartLoc(Start), EndLoc(End) {}
-
- StringRef getSymName() { return SymName; }
- void *getOpDecl() { return OpDecl; }
-
- /// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const { return StartLoc; }
- /// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const { return EndLoc; }
- /// getLocRange - Get the range between the first and last token of this
- /// operand.
- SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
- /// getOffsetOfLoc - Get the location of the offset operator.
- SMLoc getOffsetOfLoc() const { return OffsetOfLoc; }
-
- virtual void print(raw_ostream &OS) const {}
-
- StringRef getToken() const {
- assert(Kind == Token && "Invalid access!");
- return StringRef(Tok.Data, Tok.Length);
- }
- void setTokenValue(StringRef Value) {
- assert(Kind == Token && "Invalid access!");
- Tok.Data = Value.data();
- Tok.Length = Value.size();
- }
-
- unsigned getReg() const {
- assert(Kind == Register && "Invalid access!");
- return Reg.RegNo;
- }
-
- const MCExpr *getImm() const {
- assert(Kind == Immediate && "Invalid access!");
- return Imm.Val;
- }
-
- const MCExpr *getMemDisp() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.Disp;
- }
- unsigned getMemSegReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.SegReg;
- }
- unsigned getMemBaseReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.BaseReg;
- }
- unsigned getMemIndexReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.IndexReg;
- }
- unsigned getMemScale() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.Scale;
- }
-
- bool isToken() const {return Kind == Token; }
-
- bool isImm() const { return Kind == Immediate; }
-
- bool isImmSExti16i8() const {
- if (!isImm())
- return false;
-
- // If this isn't a constant expr, just assume it fits and let relaxation
- // handle it.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
- return true;
-
- // Otherwise, check the value is in a range that makes sense for this
- // extension.
- return isImmSExti16i8Value(CE->getValue());
- }
- bool isImmSExti32i8() const {
- if (!isImm())
- return false;
-
- // If this isn't a constant expr, just assume it fits and let relaxation
- // handle it.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
- return true;
-
- // Otherwise, check the value is in a range that makes sense for this
- // extension.
- return isImmSExti32i8Value(CE->getValue());
- }
- bool isImmZExtu32u8() const {
- if (!isImm())
- return false;
-
- // If this isn't a constant expr, just assume it fits and let relaxation
- // handle it.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
- return true;
-
- // Otherwise, check the value is in a range that makes sense for this
- // extension.
- return isImmZExtu32u8Value(CE->getValue());
- }
- bool isImmSExti64i8() const {
- if (!isImm())
- return false;
-
- // If this isn't a constant expr, just assume it fits and let relaxation
- // handle it.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
+static bool CheckBaseRegAndIndexReg(unsigned BaseReg, unsigned IndexReg,
+ StringRef &ErrMsg) {
+ // If we have both a base register and an index register make sure they are
+ // both 64-bit or 32-bit registers.
+ // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
+ if (BaseReg != 0 && IndexReg != 0) {
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
+ (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
+ X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) &&
+ IndexReg != X86::RIZ) {
+ ErrMsg = "base register is 64-bit, but index register is not";
return true;
-
- // Otherwise, check the value is in a range that makes sense for this
- // extension.
- return isImmSExti64i8Value(CE->getValue());
- }
- bool isImmSExti64i32() const {
- if (!isImm())
- return false;
-
- // If this isn't a constant expr, just assume it fits and let relaxation
- // handle it.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- if (!CE)
+ }
+ if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
+ (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) &&
+ IndexReg != X86::EIZ){
+ ErrMsg = "base register is 32-bit, but index register is not";
return true;
-
- // Otherwise, check the value is in a range that makes sense for this
- // extension.
- return isImmSExti64i32Value(CE->getValue());
- }
-
- bool isOffsetOf() const {
- return OffsetOfLoc.getPointer();
- }
-
- bool needAddressOf() const {
- return AddressOf;
- }
-
- bool isMem() const { return Kind == Memory; }
- bool isMem8() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 8);
- }
- bool isMem16() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 16);
- }
- bool isMem32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32);
- }
- bool isMem64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64);
- }
- bool isMem80() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 80);
- }
- bool isMem128() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 128);
- }
- bool isMem256() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 256);
- }
- bool isMem512() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 512);
- }
-
- bool isMemVX32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
- }
- bool isMemVY32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
- }
- bool isMemVX64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
- }
- bool isMemVY64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
- }
- bool isMemVZ32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
- }
- bool isMemVZ64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
- }
-
- bool isAbsMem() const {
- return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1;
- }
-
- bool isMemOffs8() const {
- return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8);
- }
- bool isMemOffs16() const {
- return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16);
- }
- bool isMemOffs32() const {
- return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32);
- }
- bool isMemOffs64() const {
- return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64);
- }
-
- bool isReg() const { return Kind == Register; }
-
- bool isGR32orGR64() const {
- return Kind == Register &&
- (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
- }
-
- void addExpr(MCInst &Inst, const MCExpr *Expr) const {
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(Expr));
- }
-
- void addRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
- }
-
- static unsigned getGR32FromGR64(unsigned RegNo) {
- switch (RegNo) {
- default: llvm_unreachable("Unexpected register");
- case X86::RAX: return X86::EAX;
- case X86::RCX: return X86::ECX;
- case X86::RDX: return X86::EDX;
- case X86::RBX: return X86::EBX;
- case X86::RBP: return X86::EBP;
- case X86::RSP: return X86::ESP;
- case X86::RSI: return X86::ESI;
- case X86::RDI: return X86::EDI;
- case X86::R8: return X86::R8D;
- case X86::R9: return X86::R9D;
- case X86::R10: return X86::R10D;
- case X86::R11: return X86::R11D;
- case X86::R12: return X86::R12D;
- case X86::R13: return X86::R13D;
- case X86::R14: return X86::R14D;
- case X86::R15: return X86::R15D;
- case X86::RIP: return X86::EIP;
- }
- }
-
- void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- unsigned RegNo = getReg();
- if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
- RegNo = getGR32FromGR64(RegNo);
- Inst.addOperand(MCOperand::CreateReg(RegNo));
- }
-
- void addImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addMemOperands(MCInst &Inst, unsigned N) const {
- assert((N == 5) && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
- Inst.addOperand(MCOperand::CreateImm(getMemScale()));
- Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
- addExpr(Inst, getMemDisp());
- Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
- }
-
- void addAbsMemOperands(MCInst &Inst, unsigned N) const {
- assert((N == 1) && "Invalid number of operands!");
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
- }
-
- void addMemOffsOperands(MCInst &Inst, unsigned N) const {
- assert((N == 1) && "Invalid number of operands!");
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
- }
-
- static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
- SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
- X86Operand *Res = new X86Operand(Token, Loc, EndLoc);
- Res->Tok.Data = Str.data();
- Res->Tok.Length = Str.size();
- return Res;
- }
-
- static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
- bool AddressOf = false,
- SMLoc OffsetOfLoc = SMLoc(),
- StringRef SymName = StringRef(),
- void *OpDecl = 0) {
- X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc);
- Res->Reg.RegNo = RegNo;
- Res->AddressOf = AddressOf;
- Res->OffsetOfLoc = OffsetOfLoc;
- Res->SymName = SymName;
- Res->OpDecl = OpDecl;
- return Res;
- }
-
- static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){
- X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc);
- Res->Imm.Val = Val;
- return Res;
- }
-
- /// Create an absolute memory operand.
- static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
- unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = 0) {
- X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
- Res->Mem.SegReg = 0;
- Res->Mem.Disp = Disp;
- Res->Mem.BaseReg = 0;
- Res->Mem.IndexReg = 0;
- Res->Mem.Scale = 1;
- Res->Mem.Size = Size;
- Res->SymName = SymName;
- Res->OpDecl = OpDecl;
- Res->AddressOf = false;
- return Res;
- }
-
- /// Create a generalized memory operand.
- static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
- unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
- unsigned Size = 0,
- StringRef SymName = StringRef(),
- void *OpDecl = 0) {
- // We should never just have a displacement, that should be parsed as an
- // absolute memory operand.
- assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
-
- // The scale should always be one of {1,2,4,8}.
- assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
- "Invalid scale!");
- X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
- Res->Mem.SegReg = SegReg;
- Res->Mem.Disp = Disp;
- Res->Mem.BaseReg = BaseReg;
- Res->Mem.IndexReg = IndexReg;
- Res->Mem.Scale = Scale;
- Res->Mem.Size = Size;
- Res->SymName = SymName;
- Res->OpDecl = OpDecl;
- Res->AddressOf = false;
- return Res;
+ }
+ if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) {
+ if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) {
+ ErrMsg = "base register is 16-bit, but index register is not";
+ return true;
+ }
+ if (((BaseReg == X86::BX || BaseReg == X86::BP) &&
+ IndexReg != X86::SI && IndexReg != X86::DI) ||
+ ((BaseReg == X86::SI || BaseReg == X86::DI) &&
+ IndexReg != X86::BX && IndexReg != X86::BP)) {
+ ErrMsg = "invalid 16-bit base/index register combination";
+ return true;
+ }
+ }
}
-};
-
-} // end anonymous namespace.
-
-bool X86AsmParser::isSrcOp(X86Operand &Op) {
- unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI;
-
- return (Op.isMem() &&
- (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::DS) &&
- isa<MCConstantExpr>(Op.Mem.Disp) &&
- cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
- Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0);
+ return false;
}
-bool X86AsmParser::isDstOp(X86Operand &Op) {
- unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI;
+bool X86AsmParser::doSrcDstMatch(X86Operand &Op1, X86Operand &Op2)
+{
+ // Return true and let a normal complaint about bogus operands happen.
+ if (!Op1.isMem() || !Op2.isMem())
+ return true;
- return Op.isMem() &&
- (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) &&
- isa<MCConstantExpr>(Op.Mem.Disp) &&
- cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
- Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0;
+ // Actually these might be the other way round if Intel syntax is
+ // being used. It doesn't matter.
+ unsigned diReg = Op1.Mem.BaseReg;
+ unsigned siReg = Op2.Mem.BaseReg;
+
+ if (X86MCRegisterClasses[X86::GR16RegClassID].contains(siReg))
+ return X86MCRegisterClasses[X86::GR16RegClassID].contains(diReg);
+ if (X86MCRegisterClasses[X86::GR32RegClassID].contains(siReg))
+ return X86MCRegisterClasses[X86::GR32RegClassID].contains(diReg);
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(siReg))
+ return X86MCRegisterClasses[X86::GR64RegClassID].contains(diReg);
+ // Again, return true and let another error happen.
+ return true;
}
bool X86AsmParser::ParseRegister(unsigned &RegNo,
@@ -1073,7 +857,7 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
RegNo = MatchRegisterName(Tok.getString().lower());
if (!is64BitMode()) {
- // FIXME: This should be done using Requires<In32BitMode> and
+ // FIXME: This should be done using Requires<Not64BitMode> and
// Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
// checked.
// FIXME: Check AH, CH, DH, BH cannot be used in an instruction requiring a
@@ -1155,7 +939,23 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
return false;
}
-X86Operand *X86AsmParser::ParseOperand() {
+std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
+ unsigned basereg =
+ is64BitMode() ? X86::RSI : (is32BitMode() ? X86::ESI : X86::SI);
+ const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
+ return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg,
+ /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0);
+}
+
+std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
+ unsigned basereg =
+ is64BitMode() ? X86::RDI : (is32BitMode() ? X86::EDI : X86::DI);
+ const MCExpr *Disp = MCConstantExpr::Create(0, getContext());
+ return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/basereg,
+ /*IndexReg=*/0, /*Scale=*/1, Loc, Loc, 0);
+}
+
+std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
if (isParsingIntelSyntax())
return ParseIntelOperand();
return ParseATTOperand();
@@ -1171,22 +971,23 @@ static unsigned getIntelMemOperandSize(StringRef OpStr) {
.Cases("XWORD", "xword", 80)
.Cases("XMMWORD", "xmmword", 128)
.Cases("YMMWORD", "ymmword", 256)
+ .Cases("ZMMWORD", "zmmword", 512)
+ .Cases("OPAQUE", "opaque", -1U) // needs to be non-zero, but doesn't matter
.Default(0);
return Size;
}
-X86Operand *
-X86AsmParser::CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp,
- unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc Start, SMLoc End,
- unsigned Size, StringRef Identifier,
- InlineAsmIdentifierInfo &Info){
+std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
+ unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg,
+ unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier,
+ InlineAsmIdentifierInfo &Info) {
// If this is not a VarDecl then assume it is a FuncDecl or some other label
// reference. We need an 'r' constraint here, so we need to create register
// operand to ensure proper matching. Just pick a GPR based on the size of
// a pointer.
if (isa<MCSymbolRefExpr>(Disp) && !Info.IsVarDecl) {
- unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
+ unsigned RegNo =
+ is64BitMode() ? X86::RBX : (is32BitMode() ? X86::EBX : X86::BX);
return X86Operand::CreateReg(RegNo, Start, End, /*AddressOf=*/true,
SMLoc(), Identifier, Info.OpDecl);
}
@@ -1292,7 +1093,8 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
if (SM.getStopOnLBrac() && getLexer().getKind() == AsmToken::LBrac)
break;
- switch (getLexer().getKind()) {
+ AsmToken::TokenKind TK = getLexer().getKind();
+ switch (TK) {
default: {
if (SM.isValidEndState()) {
Done = true;
@@ -1304,13 +1106,14 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
Done = true;
break;
}
+ case AsmToken::String:
case AsmToken::Identifier: {
// This could be a register or a symbolic displacement.
unsigned TmpReg;
const MCExpr *Val;
SMLoc IdentLoc = Tok.getLoc();
StringRef Identifier = Tok.getString();
- if(!ParseRegister(TmpReg, IdentLoc, End)) {
+ if (TK != AsmToken::String && !ParseRegister(TmpReg, IdentLoc, End)) {
SM.onRegister(TmpReg);
UpdateLocLex = false;
break;
@@ -1335,16 +1138,50 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
}
return Error(Tok.getLoc(), "Unexpected identifier!");
}
- case AsmToken::Integer:
+ case AsmToken::Integer: {
+ StringRef ErrMsg;
if (isParsingInlineAsm() && SM.getAddImmPrefix())
InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix,
Tok.getLoc()));
- SM.onInteger(Tok.getIntVal());
+ // Look for 'b' or 'f' following an Integer as a directional label
+ SMLoc Loc = getTok().getLoc();
+ int64_t IntVal = getTok().getIntVal();
+ End = consumeToken();
+ UpdateLocLex = false;
+ if (getLexer().getKind() == AsmToken::Identifier) {
+ StringRef IDVal = getTok().getString();
+ if (IDVal == "f" || IDVal == "b") {
+ MCSymbol *Sym =
+ getContext().GetDirectionalLocalSymbol(IntVal, IDVal == "b");
+ MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
+ const MCExpr *Val =
+ MCSymbolRefExpr::Create(Sym, Variant, getContext());
+ if (IDVal == "b" && Sym->isUndefined())
+ return Error(Loc, "invalid reference to undefined symbol");
+ StringRef Identifier = Sym->getName();
+ SM.onIdentifierExpr(Val, Identifier);
+ End = consumeToken();
+ } else {
+ if (SM.onInteger(IntVal, ErrMsg))
+ return Error(Loc, ErrMsg);
+ }
+ } else {
+ if (SM.onInteger(IntVal, ErrMsg))
+ return Error(Loc, ErrMsg);
+ }
break;
+ }
case AsmToken::Plus: SM.onPlus(); break;
case AsmToken::Minus: SM.onMinus(); break;
+ case AsmToken::Tilde: SM.onNot(); break;
case AsmToken::Star: SM.onStar(); break;
case AsmToken::Slash: SM.onDivide(); break;
+ case AsmToken::Pipe: SM.onOr(); break;
+ case AsmToken::Amp: SM.onAnd(); break;
+ case AsmToken::LessLess:
+ SM.onLShift(); break;
+ case AsmToken::GreaterGreater:
+ SM.onRShift(); break;
case AsmToken::LBrac: SM.onLBrac(); break;
case AsmToken::RBrac: SM.onRBrac(); break;
case AsmToken::LParen: SM.onLParen(); break;
@@ -1353,17 +1190,15 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
if (SM.hadError())
return Error(Tok.getLoc(), "unknown token in expression");
- if (!Done && UpdateLocLex) {
- End = Tok.getLoc();
- Parser.Lex(); // Consume the token.
- }
+ if (!Done && UpdateLocLex)
+ End = consumeToken();
}
return false;
}
-X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
- int64_t ImmDisp,
- unsigned Size) {
+std::unique_ptr<X86Operand>
+X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
+ int64_t ImmDisp, unsigned Size) {
const AsmToken &Tok = Parser.getTok();
SMLoc BracLoc = Tok.getLoc(), End = Tok.getEndLoc();
if (getLexer().isNot(AsmToken::LBrac))
@@ -1376,9 +1211,9 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// expression.
IntelExprStateMachine SM(ImmDisp, /*StopOnLBrac=*/false, /*AddImmPrefix=*/true);
if (ParseIntelExpression(SM, End))
- return 0;
+ return nullptr;
- const MCExpr *Disp = 0;
+ const MCExpr *Disp = nullptr;
if (const MCExpr *Sym = SM.getSym()) {
// A symbolic displacement.
Disp = Sym;
@@ -1402,7 +1237,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
if (Tok.getString().find('.') != StringRef::npos) {
const MCExpr *NewDisp;
if (ParseIntelDotOperator(Disp, NewDisp))
- return 0;
+ return nullptr;
End = Tok.getEndLoc();
Parser.Lex(); // Eat the field.
@@ -1420,6 +1255,11 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
else
return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, Start, End, Size);
}
+ StringRef ErrMsg;
+ if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) {
+ Error(StartInBrac, ErrMsg);
+ return nullptr;
+ }
return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start,
End, Size);
}
@@ -1435,7 +1275,7 @@ bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedOperand, SMLoc &End) {
assert (isParsingInlineAsm() && "Expected to be parsing inline assembly.");
- Val = 0;
+ Val = nullptr;
StringRef LineBuf(Identifier.data());
SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
@@ -1462,9 +1302,9 @@ bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
}
/// \brief Parse intel style segment override.
-X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg,
- SMLoc Start,
- unsigned Size) {
+std::unique_ptr<X86Operand>
+X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start,
+ unsigned Size) {
assert(SegReg != 0 && "Tried to parse a segment override without a segment!");
const AsmToken &Tok = Parser.getTok(); // Eat colon.
if (Tok.isNot(AsmToken::Colon))
@@ -1507,20 +1347,22 @@ X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg,
StringRef Identifier = Tok.getString();
if (ParseIntelIdentifier(Val, Identifier, Info,
/*Unevaluated=*/false, End))
- return 0;
+ return nullptr;
return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0,
/*Scale=*/1, Start, End, Size, Identifier, Info);
}
/// ParseIntelMemOperand - Parse intel style memory operand.
-X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start,
- unsigned Size) {
+std::unique_ptr<X86Operand> X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp,
+ SMLoc Start,
+ unsigned Size) {
const AsmToken &Tok = Parser.getTok();
SMLoc End;
// Parse ImmDisp [ BaseReg + Scale*IndexReg + Disp ].
if (getLexer().is(AsmToken::LBrac))
return ParseIntelBracExpression(/*SegReg=*/0, Start, ImmDisp, Size);
+ assert(ImmDisp == 0);
const MCExpr *Val;
if (!isParsingInlineAsm()) {
@@ -1534,9 +1376,40 @@ X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start,
StringRef Identifier = Tok.getString();
if (ParseIntelIdentifier(Val, Identifier, Info,
/*Unevaluated=*/false, End))
- return 0;
- return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0,
- /*Scale=*/1, Start, End, Size, Identifier, Info);
+ return nullptr;
+
+ if (!getLexer().is(AsmToken::LBrac))
+ return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0,
+ /*Scale=*/1, Start, End, Size, Identifier, Info);
+
+ Parser.Lex(); // Eat '['
+
+ // Parse Identifier [ ImmDisp ]
+ IntelExprStateMachine SM(/*ImmDisp=*/0, /*StopOnLBrac=*/true,
+ /*AddImmPrefix=*/false);
+ if (ParseIntelExpression(SM, End))
+ return nullptr;
+
+ if (SM.getSym()) {
+ Error(Start, "cannot use more than one symbol in memory operand");
+ return nullptr;
+ }
+ if (SM.getBaseReg()) {
+ Error(Start, "cannot use base register with variable reference");
+ return nullptr;
+ }
+ if (SM.getIndexReg()) {
+ Error(Start, "cannot use index register with variable reference");
+ return nullptr;
+ }
+
+ const MCExpr *Disp = MCConstantExpr::Create(SM.getImm(), getContext());
+ // BaseReg is non-zero to avoid assertions. In the context of inline asm,
+ // we're pointing to a local variable in memory, so the base register is
+ // really the frame or stack pointer.
+ return X86Operand::CreateMem(/*SegReg=*/0, Disp, /*BaseReg=*/1, /*IndexReg=*/0,
+ /*Scale=*/1, Start, End, Size, Identifier,
+ Info.OpDecl);
}
/// Parse the '.' operator.
@@ -1585,7 +1458,7 @@ bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
/// Parse the 'offset' operator. This operator is used to specify the
/// location rather then the content of a variable.
-X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() {
+std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOffsetOfOperator() {
const AsmToken &Tok = Parser.getTok();
SMLoc OffsetOfLoc = Tok.getLoc();
Parser.Lex(); // Eat offset.
@@ -1596,7 +1469,7 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() {
StringRef Identifier = Tok.getString();
if (ParseIntelIdentifier(Val, Identifier, Info,
/*Unevaluated=*/false, End))
- return 0;
+ return nullptr;
// Don't emit the offset operator.
InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
@@ -1604,7 +1477,8 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() {
// The offset operator will have an 'r' constraint, thus we need to create
// register operand to ensure proper matching. Just pick a GPR based on
// the size of a pointer.
- unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX;
+ unsigned RegNo =
+ is64BitMode() ? X86::RBX : (is32BitMode() ? X86::EBX : X86::BX);
return X86Operand::CreateReg(RegNo, Start, End, /*GetAddress=*/true,
OffsetOfLoc, Identifier, Info.OpDecl);
}
@@ -1621,18 +1495,18 @@ enum IntelOperatorKind {
/// variable. A variable's size is the product of its LENGTH and TYPE. The
/// TYPE operator returns the size of a C or C++ type or variable. If the
/// variable is an array, TYPE returns the size of a single element.
-X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) {
+std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperator(unsigned OpKind) {
const AsmToken &Tok = Parser.getTok();
SMLoc TypeLoc = Tok.getLoc();
Parser.Lex(); // Eat operator.
- const MCExpr *Val = 0;
+ const MCExpr *Val = nullptr;
InlineAsmIdentifierInfo Info;
SMLoc Start = Tok.getLoc(), End;
StringRef Identifier = Tok.getString();
if (ParseIntelIdentifier(Val, Identifier, Info,
/*Unevaluated=*/true, End))
- return 0;
+ return nullptr;
if (!Info.OpDecl)
return ErrorOperand(Start, "unable to lookup expression");
@@ -1654,7 +1528,7 @@ X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) {
return X86Operand::CreateImm(Imm, Start, End);
}
-X86Operand *X86AsmParser::ParseIntelOperand() {
+std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
const AsmToken &Tok = Parser.getTok();
SMLoc Start, End;
@@ -1682,12 +1556,12 @@ X86Operand *X86AsmParser::ParseIntelOperand() {
// Immediate.
if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Minus) ||
- getLexer().is(AsmToken::LParen)) {
+ getLexer().is(AsmToken::Tilde) || getLexer().is(AsmToken::LParen)) {
AsmToken StartTok = Tok;
IntelExprStateMachine SM(/*Imm=*/0, /*StopOnLBrac=*/true,
/*AddImmPrefix=*/false);
if (ParseIntelExpression(SM, End))
- return 0;
+ return nullptr;
int64_t Imm = SM.getImm();
if (isParsingInlineAsm()) {
@@ -1701,6 +1575,13 @@ X86Operand *X86AsmParser::ParseIntelOperand() {
}
if (getLexer().isNot(AsmToken::LBrac)) {
+ // If a directional label (ie. 1f or 2b) was parsed above from
+ // ParseIntelExpression() then SM.getSym() was set to a pointer to
+ // to the MCExpr with the directional local symbol and this is a
+ // memory operand not an immediate operand.
+ if (SM.getSym())
+ return X86Operand::CreateMem(SM.getSym(), Start, End, Size);
+
const MCExpr *ImmExpr = MCConstantExpr::Create(Imm, getContext());
return X86Operand::CreateImm(ImmExpr, Start, End);
}
@@ -1729,7 +1610,7 @@ X86Operand *X86AsmParser::ParseIntelOperand() {
return ParseIntelMemOperand(/*Disp=*/0, Start, Size);
}
-X86Operand *X86AsmParser::ParseATTOperand() {
+std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
switch (getLexer().getKind()) {
default:
// Parse a memory operand with no segment register.
@@ -1738,11 +1619,11 @@ X86Operand *X86AsmParser::ParseATTOperand() {
// Read the register.
unsigned RegNo;
SMLoc Start, End;
- if (ParseRegister(RegNo, Start, End)) return 0;
+ if (ParseRegister(RegNo, Start, End)) return nullptr;
if (RegNo == X86::EIZ || RegNo == X86::RIZ) {
Error(Start, "%eiz and %riz can only be used as index registers",
SMRange(Start, End));
- return 0;
+ return nullptr;
}
// If this is a segment register followed by a ':', then this is the start
@@ -1759,15 +1640,84 @@ X86Operand *X86AsmParser::ParseATTOperand() {
Parser.Lex();
const MCExpr *Val;
if (getParser().parseExpression(Val, End))
- return 0;
+ return nullptr;
return X86Operand::CreateImm(Val, Start, End);
}
}
}
+bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands,
+ const MCParsedAsmOperand &Op) {
+ if(STI.getFeatureBits() & X86::FeatureAVX512) {
+ if (getLexer().is(AsmToken::LCurly)) {
+ // Eat "{" and mark the current place.
+ const SMLoc consumedToken = consumeToken();
+ // Distinguish {1to<NUM>} from {%k<NUM>}.
+ if(getLexer().is(AsmToken::Integer)) {
+ // Parse memory broadcasting ({1to<NUM>}).
+ if (getLexer().getTok().getIntVal() != 1)
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Expected 1to<NUM> at this point");
+ Parser.Lex(); // Eat "1" of 1to8
+ if (!getLexer().is(AsmToken::Identifier) ||
+ !getLexer().getTok().getIdentifier().startswith("to"))
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Expected 1to<NUM> at this point");
+ // Recognize only reasonable suffixes.
+ const char *BroadcastPrimitive =
+ StringSwitch<const char*>(getLexer().getTok().getIdentifier())
+ .Case("to2", "{1to2}")
+ .Case("to4", "{1to4}")
+ .Case("to8", "{1to8}")
+ .Case("to16", "{1to16}")
+ .Default(nullptr);
+ if (!BroadcastPrimitive)
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Invalid memory broadcast primitive.");
+ Parser.Lex(); // Eat "toN" of 1toN
+ if (!getLexer().is(AsmToken::RCurly))
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Expected } at this point");
+ Parser.Lex(); // Eat "}"
+ Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive,
+ consumedToken));
+ // No AVX512 specific primitives can pass
+ // after memory broadcasting, so return.
+ return true;
+ } else {
+ // Parse mask register {%k1}
+ Operands.push_back(X86Operand::CreateToken("{", consumedToken));
+ if (std::unique_ptr<X86Operand> Op = ParseOperand()) {
+ Operands.push_back(std::move(Op));
+ if (!getLexer().is(AsmToken::RCurly))
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Expected } at this point");
+ Operands.push_back(X86Operand::CreateToken("}", consumeToken()));
+
+ // Parse "zeroing non-masked" semantic {z}
+ if (getLexer().is(AsmToken::LCurly)) {
+ Operands.push_back(X86Operand::CreateToken("{z}", consumeToken()));
+ if (!getLexer().is(AsmToken::Identifier) ||
+ getLexer().getTok().getIdentifier() != "z")
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Expected z at this point");
+ Parser.Lex(); // Eat the z
+ if (!getLexer().is(AsmToken::RCurly))
+ return !ErrorAndEatStatement(getLexer().getLoc(),
+ "Expected } at this point");
+ Parser.Lex(); // Eat the }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
/// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix
/// has already been parsed if present.
-X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
+std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
+ SMLoc MemStart) {
// We have to disambiguate a parenthesized expression "(4+5)" from the start
// of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
@@ -1776,7 +1726,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
if (getLexer().isNot(AsmToken::LParen)) {
SMLoc ExprEnd;
- if (getParser().parseExpression(Disp, ExprEnd)) return 0;
+ if (getParser().parseExpression(Disp, ExprEnd)) return nullptr;
// After parsing the base expression we could either have a parenthesized
// memory address or not. If not, return now. If so, eat the (.
@@ -1803,7 +1753,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// It must be an parenthesized expression, parse it now.
if (getParser().parseParenExpression(Disp, ExprEnd))
- return 0;
+ return nullptr;
// After parsing the base expression we could either have a parenthesized
// memory address or not. If not, return now. If so, eat the (.
@@ -1822,15 +1772,16 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// If we reached here, then we just ate the ( of the memory operand. Process
// the rest of the memory operand.
unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
- SMLoc IndexLoc;
+ SMLoc IndexLoc, BaseLoc;
if (getLexer().is(AsmToken::Percent)) {
SMLoc StartLoc, EndLoc;
- if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0;
+ BaseLoc = Parser.getTok().getLoc();
+ if (ParseRegister(BaseReg, StartLoc, EndLoc)) return nullptr;
if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) {
Error(StartLoc, "eiz and riz can only be used as index registers",
SMRange(StartLoc, EndLoc));
- return 0;
+ return nullptr;
}
}
@@ -1846,7 +1797,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
if (getLexer().is(AsmToken::Percent)) {
SMLoc L;
- if (ParseRegister(IndexReg, L, L)) return 0;
+ if (ParseRegister(IndexReg, L, L)) return nullptr;
if (getLexer().isNot(AsmToken::RParen)) {
// Parse the scale amount:
@@ -1854,7 +1805,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
if (getLexer().isNot(AsmToken::Comma)) {
Error(Parser.getTok().getLoc(),
"expected comma in scale expression");
- return 0;
+ return nullptr;
}
Parser.Lex(); // Eat the comma.
@@ -1864,13 +1815,18 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
int64_t ScaleVal;
if (getParser().parseAbsoluteExpression(ScaleVal)){
Error(Loc, "expected scale expression");
- return 0;
+ return nullptr;
}
// Validate the scale amount.
+ if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
+ ScaleVal != 1) {
+ Error(Loc, "scale factor in 16-bit address must be 1");
+ return nullptr;
+ }
if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
- return 0;
+ return nullptr;
}
Scale = (unsigned)ScaleVal;
}
@@ -1882,7 +1838,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
int64_t Value;
if (getParser().parseAbsoluteExpression(Value))
- return 0;
+ return nullptr;
if (Value != 1)
Warning(Loc, "scale factor without index register is ignored");
@@ -1893,38 +1849,39 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
if (getLexer().isNot(AsmToken::RParen)) {
Error(Parser.getTok().getLoc(), "unexpected token in memory operand");
- return 0;
+ return nullptr;
}
SMLoc MemEnd = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat the ')'.
- // If we have both a base register and an index register make sure they are
- // both 64-bit or 32-bit registers.
- // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
- if (BaseReg != 0 && IndexReg != 0) {
- if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
- (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg)) &&
- IndexReg != X86::RIZ) {
- Error(IndexLoc, "index register is 32-bit, but base register is 64-bit");
- return 0;
- }
- if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
- (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) &&
- IndexReg != X86::EIZ){
- Error(IndexLoc, "index register is 64-bit, but base register is 32-bit");
- return 0;
- }
+ // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed,
+ // and then only in non-64-bit modes. Except for DX, which is a special case
+ // because an unofficial form of in/out instructions uses it.
+ if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
+ (is64BitMode() || (BaseReg != X86::BX && BaseReg != X86::BP &&
+ BaseReg != X86::SI && BaseReg != X86::DI)) &&
+ BaseReg != X86::DX) {
+ Error(BaseLoc, "invalid 16-bit base register");
+ return nullptr;
+ }
+ if (BaseReg == 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) {
+ Error(IndexLoc, "16-bit memory operand may not include only index register");
+ return nullptr;
+ }
+
+ StringRef ErrMsg;
+ if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) {
+ Error(BaseLoc, ErrMsg);
+ return nullptr;
}
return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
MemStart, MemEnd);
}
-bool X86AsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) {
InstInfo = &Info;
StringRef PatchedName = Name;
@@ -1934,7 +1891,7 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
PatchedName = PatchedName.substr(0, Name.size()-1);
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
- const MCExpr *ExtraImmOp = 0;
+ const MCExpr *ExtraImmOp = nullptr;
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
(PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
@@ -2012,84 +1969,35 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
// Parse '*' modifier.
- if (getLexer().is(AsmToken::Star)) {
- SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(X86Operand::CreateToken("*", Loc));
- Parser.Lex(); // Eat the star.
- }
-
- // Read the first operand.
- if (X86Operand *Op = ParseOperand())
- Operands.push_back(Op);
- else {
- Parser.eatToEndOfStatement();
- return true;
- }
-
- while (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
-
- // Parse and remember the operand.
- if (X86Operand *Op = ParseOperand())
- Operands.push_back(Op);
- else {
- Parser.eatToEndOfStatement();
- return true;
- }
- }
-
- if (STI.getFeatureBits() & X86::FeatureAVX512) {
- // Parse mask register {%k1}
- if (getLexer().is(AsmToken::LCurly)) {
- SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(X86Operand::CreateToken("{", Loc));
- Parser.Lex(); // Eat the {
- if (X86Operand *Op = ParseOperand()) {
- Operands.push_back(Op);
- if (!getLexer().is(AsmToken::RCurly)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "Expected } at this point");
- }
- Loc = Parser.getTok().getLoc();
- Operands.push_back(X86Operand::CreateToken("}", Loc));
- Parser.Lex(); // Eat the }
- } else {
- Parser.eatToEndOfStatement();
+ if (getLexer().is(AsmToken::Star))
+ Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
+
+ // Read the operands.
+ while(1) {
+ if (std::unique_ptr<X86Operand> Op = ParseOperand()) {
+ Operands.push_back(std::move(Op));
+ if (!HandleAVX512Operand(Operands, *Operands.back()))
return true;
- }
- }
- // Parse "zeroing non-masked" semantic {z}
- if (getLexer().is(AsmToken::LCurly)) {
- SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(X86Operand::CreateToken("{z}", Loc));
- Parser.Lex(); // Eat the {
- if (!getLexer().is(AsmToken::Identifier) || getLexer().getTok().getIdentifier() != "z") {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "Expected z at this point");
- }
- Parser.Lex(); // Eat the z
- if (!getLexer().is(AsmToken::RCurly)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "Expected } at this point");
- }
- Parser.Lex(); // Eat the }
+ } else {
+ Parser.eatToEndOfStatement();
+ return true;
}
- }
+ // check for comma and eat it
+ if (getLexer().is(AsmToken::Comma))
+ Parser.Lex();
+ else
+ break;
+ }
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
- }
- }
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return ErrorAndEatStatement(getLexer().getLoc(),
+ "unexpected token in argument list");
+ }
- if (getLexer().is(AsmToken::EndOfStatement))
- Parser.Lex(); // Consume the EndOfStatement
- else if (isPrefix && getLexer().is(AsmToken::Slash))
- Parser.Lex(); // Consume the prefix separator Slash
+ // Consume the EndOfStatement or the prefix separator Slash
+ if (getLexer().is(AsmToken::EndOfStatement) ||
+ (isPrefix && getLexer().is(AsmToken::Slash)))
+ Parser.Lex();
if (ExtraImmOp && isParsingIntelSyntax())
Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
@@ -2099,126 +2007,122 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
// documented form in various unofficial manuals, so a lot of code uses it.
if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
Operands.size() == 3) {
- X86Operand &Op = *(X86Operand*)Operands.back();
+ X86Operand &Op = (X86Operand &)*Operands.back();
if (Op.isMem() && Op.Mem.SegReg == 0 &&
isa<MCConstantExpr>(Op.Mem.Disp) &&
cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
SMLoc Loc = Op.getEndLoc();
Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
- delete &Op;
}
}
// Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al".
if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") &&
Operands.size() == 3) {
- X86Operand &Op = *(X86Operand*)Operands.begin()[1];
+ X86Operand &Op = (X86Operand &)*Operands[1];
if (Op.isMem() && Op.Mem.SegReg == 0 &&
isa<MCConstantExpr>(Op.Mem.Disp) &&
cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
SMLoc Loc = Op.getEndLoc();
- Operands.begin()[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
- delete &Op;
- }
- }
- // Transform "ins[bwl] %dx, %es:(%edi)" into "ins[bwl]"
- if (Name.startswith("ins") && Operands.size() == 3 &&
- (Name == "insb" || Name == "insw" || Name == "insl")) {
- X86Operand &Op = *(X86Operand*)Operands.begin()[1];
- X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
- if (Op.isReg() && Op.getReg() == X86::DX && isDstOp(Op2)) {
- Operands.pop_back();
- Operands.pop_back();
- delete &Op;
- delete &Op2;
- }
- }
-
- // Transform "outs[bwl] %ds:(%esi), %dx" into "out[bwl]"
- if (Name.startswith("outs") && Operands.size() == 3 &&
- (Name == "outsb" || Name == "outsw" || Name == "outsl")) {
- X86Operand &Op = *(X86Operand*)Operands.begin()[1];
- X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
- if (isSrcOp(Op) && Op2.isReg() && Op2.getReg() == X86::DX) {
- Operands.pop_back();
- Operands.pop_back();
- delete &Op;
- delete &Op2;
- }
- }
-
- // Transform "movs[bwl] %ds:(%esi), %es:(%edi)" into "movs[bwl]"
- if (Name.startswith("movs") && Operands.size() == 3 &&
- (Name == "movsb" || Name == "movsw" || Name == "movsl" ||
- (is64BitMode() && Name == "movsq"))) {
- X86Operand &Op = *(X86Operand*)Operands.begin()[1];
- X86Operand &Op2 = *(X86Operand*)Operands.begin()[2];
- if (isSrcOp(Op) && isDstOp(Op2)) {
- Operands.pop_back();
- Operands.pop_back();
- delete &Op;
- delete &Op2;
- }
- }
- // Transform "lods[bwl] %ds:(%esi),{%al,%ax,%eax,%rax}" into "lods[bwl]"
- if (Name.startswith("lods") && Operands.size() == 3 &&
- (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
- Name == "lodsl" || (is64BitMode() && Name == "lodsq"))) {
- X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
- X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
- if (isSrcOp(*Op1) && Op2->isReg()) {
- const char *ins;
- unsigned reg = Op2->getReg();
- bool isLods = Name == "lods";
- if (reg == X86::AL && (isLods || Name == "lodsb"))
- ins = "lodsb";
- else if (reg == X86::AX && (isLods || Name == "lodsw"))
- ins = "lodsw";
- else if (reg == X86::EAX && (isLods || Name == "lodsl"))
- ins = "lodsl";
- else if (reg == X86::RAX && (isLods || Name == "lodsq"))
- ins = "lodsq";
- else
- ins = NULL;
- if (ins != NULL) {
- Operands.pop_back();
- Operands.pop_back();
- delete Op1;
- delete Op2;
- if (Name != ins)
- static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
- }
+ Operands[1] = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
}
}
- // Transform "stos[bwl] {%al,%ax,%eax,%rax},%es:(%edi)" into "stos[bwl]"
- if (Name.startswith("stos") && Operands.size() == 3 &&
+
+ // Append default arguments to "ins[bwld]"
+ if (Name.startswith("ins") && Operands.size() == 1 &&
+ (Name == "insb" || Name == "insw" || Name == "insl" ||
+ Name == "insd" )) {
+ if (isParsingIntelSyntax()) {
+ Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+ } else {
+ Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+ }
+ }
+
+ // Append default arguments to "outs[bwld]"
+ if (Name.startswith("outs") && Operands.size() == 1 &&
+ (Name == "outsb" || Name == "outsw" || Name == "outsl" ||
+ Name == "outsd" )) {
+ if (isParsingIntelSyntax()) {
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+ Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
+ } else {
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+ Operands.push_back(X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
+ }
+ }
+
+ // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
+ // values of $SIREG according to the mode. It would be nice if this
+ // could be achieved with InstAlias in the tables.
+ if (Name.startswith("lods") && Operands.size() == 1 &&
+ (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
+ Name == "lodsl" || Name == "lodsd" || Name == "lodsq"))
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+
+ // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
+ // values of $DIREG according to the mode. It would be nice if this
+ // could be achieved with InstAlias in the tables.
+ if (Name.startswith("stos") && Operands.size() == 1 &&
(Name == "stos" || Name == "stosb" || Name == "stosw" ||
- Name == "stosl" || (is64BitMode() && Name == "stosq"))) {
- X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
- X86Operand *Op2 = static_cast<X86Operand*>(Operands[2]);
- if (isDstOp(*Op2) && Op1->isReg()) {
- const char *ins;
- unsigned reg = Op1->getReg();
- bool isStos = Name == "stos";
- if (reg == X86::AL && (isStos || Name == "stosb"))
- ins = "stosb";
- else if (reg == X86::AX && (isStos || Name == "stosw"))
- ins = "stosw";
- else if (reg == X86::EAX && (isStos || Name == "stosl"))
- ins = "stosl";
- else if (reg == X86::RAX && (isStos || Name == "stosq"))
- ins = "stosq";
- else
- ins = NULL;
- if (ins != NULL) {
- Operands.pop_back();
- Operands.pop_back();
- delete Op1;
- delete Op2;
- if (Name != ins)
- static_cast<X86Operand*>(Operands[0])->setTokenValue(ins);
+ Name == "stosl" || Name == "stosd" || Name == "stosq"))
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+
+ // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
+ // values of $DIREG according to the mode. It would be nice if this
+ // could be achieved with InstAlias in the tables.
+ if (Name.startswith("scas") && Operands.size() == 1 &&
+ (Name == "scas" || Name == "scasb" || Name == "scasw" ||
+ Name == "scasl" || Name == "scasd" || Name == "scasq"))
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+
+ // Add default SI and DI operands to "cmps[bwlq]".
+ if (Name.startswith("cmps") &&
+ (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
+ Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
+ if (Operands.size() == 1) {
+ if (isParsingIntelSyntax()) {
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+ } else {
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+ }
+ } else if (Operands.size() == 3) {
+ X86Operand &Op = (X86Operand &)*Operands[1];
+ X86Operand &Op2 = (X86Operand &)*Operands[2];
+ if (!doSrcDstMatch(Op, Op2))
+ return Error(Op.getStartLoc(),
+ "mismatching source and destination index registers");
+ }
+ }
+
+ // Add default SI and DI operands to "movs[bwlq]".
+ if ((Name.startswith("movs") &&
+ (Name == "movs" || Name == "movsb" || Name == "movsw" ||
+ Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
+ (Name.startswith("smov") &&
+ (Name == "smov" || Name == "smovb" || Name == "smovw" ||
+ Name == "smovl" || Name == "smovd" || Name == "smovq"))) {
+ if (Operands.size() == 1) {
+ if (Name == "movsd")
+ Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
+ if (isParsingIntelSyntax()) {
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+ } else {
+ Operands.push_back(DefaultMemSIOperand(NameLoc));
+ Operands.push_back(DefaultMemDIOperand(NameLoc));
}
+ } else if (Operands.size() == 3) {
+ X86Operand &Op = (X86Operand &)*Operands[1];
+ X86Operand &Op2 = (X86Operand &)*Operands[2];
+ if (!doSrcDstMatch(Op, Op2))
+ return Error(Op.getStartLoc(),
+ "mismatching source and destination index registers");
}
}
@@ -2231,31 +2135,26 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
Operands.size() == 3) {
if (isParsingIntelSyntax()) {
// Intel syntax
- X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]);
- if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
- cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
- delete Operands[2];
+ X86Operand &Op1 = static_cast<X86Operand &>(*Operands[2]);
+ if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
+ cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
Operands.pop_back();
- }
} else {
- X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
- if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
- cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
- delete Operands[1];
+ X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
+ if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
+ cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
Operands.erase(Operands.begin() + 1);
- }
}
}
// Transforms "int $3" into "int3" as a size optimization. We can't write an
// instalias with an immediate operand yet.
if (Name == "int" && Operands.size() == 2) {
- X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
- if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
- cast<MCConstantExpr>(Op1->getImm())->getValue() == 3) {
- delete Operands[1];
+ X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
+ if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
+ cast<MCConstantExpr>(Op1.getImm())->getValue() == 3) {
Operands.erase(Operands.begin() + 1);
- static_cast<X86Operand*>(Operands[0])->setTokenValue("int3");
+ static_cast<X86Operand &>(*Operands[0]).setTokenValue("int3");
}
}
@@ -2301,9 +2200,7 @@ static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode,
return convertToSExti8(Inst, Opcode, X86::RAX, isCmp);
}
-bool X86AsmParser::
-processInstruction(MCInst &Inst,
- const SmallVectorImpl<MCParsedAsmOperand*> &Ops) {
+bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
switch (Inst.getOpcode()) {
default: return false;
case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8);
@@ -2383,43 +2280,48 @@ processInstruction(MCInst &Inst,
}
static const char *getSubtargetFeatureName(unsigned Val);
-bool X86AsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+
+void X86AsmParser::EmitInstruction(MCInst &Inst, OperandVector &Operands,
+ MCStreamer &Out) {
+ Instrumentation->InstrumentInstruction(Inst, Operands, getContext(), MII,
+ Out);
+ Out.EmitInstruction(Inst, STI);
+}
+
+bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
assert(!Operands.empty() && "Unexpect empty operand list!");
- X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
- assert(Op->isToken() && "Leading operand should always be a mnemonic!");
+ X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
+ assert(Op.isToken() && "Leading operand should always be a mnemonic!");
ArrayRef<SMRange> EmptyRanges = None;
// First, handle aliases that expand to multiple instructions.
// FIXME: This should be replaced with a real .td file alias mechanism.
// Also, MatchInstructionImpl should actually *do* the EmitInstruction
// call.
- if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" ||
- Op->getToken() == "fstsww" || Op->getToken() == "fstcww" ||
- Op->getToken() == "finit" || Op->getToken() == "fsave" ||
- Op->getToken() == "fstenv" || Op->getToken() == "fclex") {
+ if (Op.getToken() == "fstsw" || Op.getToken() == "fstcw" ||
+ Op.getToken() == "fstsww" || Op.getToken() == "fstcww" ||
+ Op.getToken() == "finit" || Op.getToken() == "fsave" ||
+ Op.getToken() == "fstenv" || Op.getToken() == "fclex") {
MCInst Inst;
Inst.setOpcode(X86::WAIT);
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
- Out.EmitInstruction(Inst);
-
- const char *Repl =
- StringSwitch<const char*>(Op->getToken())
- .Case("finit", "fninit")
- .Case("fsave", "fnsave")
- .Case("fstcw", "fnstcw")
- .Case("fstcww", "fnstcw")
- .Case("fstenv", "fnstenv")
- .Case("fstsw", "fnstsw")
- .Case("fstsww", "fnstsw")
- .Case("fclex", "fnclex")
- .Default(0);
+ EmitInstruction(Inst, Operands, Out);
+
+ const char *Repl = StringSwitch<const char *>(Op.getToken())
+ .Case("finit", "fninit")
+ .Case("fsave", "fnsave")
+ .Case("fstcw", "fnstcw")
+ .Case("fstcww", "fnstcw")
+ .Case("fstenv", "fnstenv")
+ .Case("fstsw", "fnstsw")
+ .Case("fstsww", "fnstsw")
+ .Case("fclex", "fnclex")
+ .Default(nullptr);
assert(Repl && "Unknown wait-prefixed instruction");
- delete Operands[0];
Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
}
@@ -2441,7 +2343,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
- Out.EmitInstruction(Inst);
+ EmitInstruction(Inst, Operands, Out);
Opcode = Inst.getOpcode();
return false;
case Match_MissingFeature: {
@@ -2472,11 +2374,11 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// following hack.
// Change the operand to point to a temporary token.
- StringRef Base = Op->getToken();
+ StringRef Base = Op.getToken();
SmallString<16> Tmp;
Tmp += Base;
Tmp += ' ';
- Op->setTokenValue(Tmp.str());
+ Op.setTokenValue(Tmp.str());
// If this instruction starts with an 'f', then it is a floating point stack
// instruction. These come in up to three forms for 32-bit, 64-bit, and
@@ -2517,7 +2419,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
ErrorInfoMissingFeature = ErrorInfoIgnore;
// Restore the old token.
- Op->setTokenValue(Base);
+ Op.setTokenValue(Base);
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
@@ -2528,7 +2430,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (NumSuccessfulMatches == 1) {
Inst.setLoc(IDLoc);
if (!MatchingInlineAsm)
- Out.EmitInstruction(Inst);
+ EmitInstruction(Inst, Operands, Out);
Opcode = Inst.getOpcode();
return false;
}
@@ -2567,8 +2469,8 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
(Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
if (!WasOriginallyInvalidOperand) {
- ArrayRef<SMRange> Ranges = MatchingInlineAsm ? EmptyRanges :
- Op->getLocRange();
+ ArrayRef<SMRange> Ranges =
+ MatchingInlineAsm ? EmptyRanges : Op.getLocRange();
return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
Ranges, MatchingInlineAsm);
}
@@ -2579,10 +2481,10 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return Error(IDLoc, "too few operands for instruction",
EmptyRanges, MatchingInlineAsm);
- X86Operand *Operand = (X86Operand*)Operands[ErrorInfo];
- if (Operand->getStartLoc().isValid()) {
- SMRange OperandRange = Operand->getLocRange();
- return Error(Operand->getStartLoc(), "invalid operand for instruction",
+ X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo];
+ if (Operand.getStartLoc().isValid()) {
+ SMRange OperandRange = Operand.getLocRange();
+ return Error(Operand.getStartLoc(), "invalid operand for instruction",
OperandRange, MatchingInlineAsm);
}
}
@@ -2622,6 +2524,9 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return true;
}
+bool X86AsmParser::OmitRegisterFromClobberLists(unsigned RegNo) {
+ return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
+}
bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
@@ -2635,11 +2540,9 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
} else if (IDVal.startswith(".intel_syntax")) {
getParser().setAssemblerDialect(1);
if (getLexer().isNot(AsmToken::EndOfStatement)) {
- if(Parser.getTok().getString() == "noprefix") {
- // FIXME : Handle noprefix
+ // FIXME: Handle noprefix
+ if (Parser.getTok().getString() == "noprefix")
Parser.Lex();
- } else
- return true;
}
return false;
}
@@ -2653,7 +2556,7 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
for (;;) {
const MCExpr *Value;
if (getParser().parseExpression(Value))
- return true;
+ return false;
getParser().getStreamer().EmitValue(Value, Size);
@@ -2661,8 +2564,10 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
break;
// FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
+ if (getLexer().isNot(AsmToken::Comma)) {
+ Error(L, "unexpected token in directive");
+ return false;
+ }
Parser.Lex();
}
}
@@ -2672,22 +2577,29 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
}
/// ParseDirectiveCode
-/// ::= .code32 | .code64
+/// ::= .code16 | .code32 | .code64
bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
- if (IDVal == ".code32") {
+ if (IDVal == ".code16") {
Parser.Lex();
- if (is64BitMode()) {
- SwitchMode();
+ if (!is16BitMode()) {
+ SwitchMode(X86::Mode16Bit);
+ getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
+ }
+ } else if (IDVal == ".code32") {
+ Parser.Lex();
+ if (!is32BitMode()) {
+ SwitchMode(X86::Mode32Bit);
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
}
} else if (IDVal == ".code64") {
Parser.Lex();
if (!is64BitMode()) {
- SwitchMode();
+ SwitchMode(X86::Mode64Bit);
getParser().getStreamer().EmitAssemblerFlag(MCAF_Code64);
}
} else {
- return Error(L, "unexpected directive " + IDVal);
+ Error(L, "unknown directive " + IDVal);
+ return false;
}
return false;
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h
new file mode 100644
index 0000000..ef1565f
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h
@@ -0,0 +1,43 @@
+//===-- X86AsmParserCommon.h - Common functions for X86AsmParser ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_ASM_PARSER_COMMON_H
+#define X86_ASM_PARSER_COMMON_H
+
+namespace llvm {
+
+inline bool isImmSExti16i8Value(uint64_t Value) {
+ return (( Value <= 0x000000000000007FULL)||
+ (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+inline bool isImmSExti32i8Value(uint64_t Value) {
+ return (( Value <= 0x000000000000007FULL)||
+ (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+inline bool isImmZExtu32u8Value(uint64_t Value) {
+ return (Value <= 0x00000000000000FFULL);
+}
+
+inline bool isImmSExti64i8Value(uint64_t Value) {
+ return (( Value <= 0x000000000000007FULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+inline bool isImmSExti64i32Value(uint64_t Value) {
+ return (( Value <= 0x000000007FFFFFFFULL)||
+ (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+} // End of namespace llvm
+
+#endif // X86_ASM_PARSER_COMMON_H
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h b/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h
new file mode 100644
index 0000000..1bbfc11
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -0,0 +1,488 @@
+//===-- X86Operand.h - Parsed X86 machine instruction --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_OPERAND_H
+#define X86_OPERAND_H
+
+#include "X86AsmParserCommon.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+/// X86Operand - Instances of this class represent a parsed X86 machine
+/// instruction.
+struct X86Operand : public MCParsedAsmOperand {
+ enum KindTy {
+ Token,
+ Register,
+ Immediate,
+ Memory
+ } Kind;
+
+ SMLoc StartLoc, EndLoc;
+ SMLoc OffsetOfLoc;
+ StringRef SymName;
+ void *OpDecl;
+ bool AddressOf;
+
+ struct TokOp {
+ const char *Data;
+ unsigned Length;
+ };
+
+ struct RegOp {
+ unsigned RegNo;
+ };
+
+ struct ImmOp {
+ const MCExpr *Val;
+ };
+
+ struct MemOp {
+ unsigned SegReg;
+ const MCExpr *Disp;
+ unsigned BaseReg;
+ unsigned IndexReg;
+ unsigned Scale;
+ unsigned Size;
+ };
+
+ union {
+ struct TokOp Tok;
+ struct RegOp Reg;
+ struct ImmOp Imm;
+ struct MemOp Mem;
+ };
+
+ X86Operand(KindTy K, SMLoc Start, SMLoc End)
+ : Kind(K), StartLoc(Start), EndLoc(End) {}
+
+ StringRef getSymName() override { return SymName; }
+ void *getOpDecl() override { return OpDecl; }
+
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const override { return StartLoc; }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const override { return EndLoc; }
+ /// getLocRange - Get the range between the first and last token of this
+ /// operand.
+ SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
+ /// getOffsetOfLoc - Get the location of the offset operator.
+ SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
+
+ void print(raw_ostream &OS) const override {}
+
+ StringRef getToken() const {
+ assert(Kind == Token && "Invalid access!");
+ return StringRef(Tok.Data, Tok.Length);
+ }
+ void setTokenValue(StringRef Value) {
+ assert(Kind == Token && "Invalid access!");
+ Tok.Data = Value.data();
+ Tok.Length = Value.size();
+ }
+
+ unsigned getReg() const override {
+ assert(Kind == Register && "Invalid access!");
+ return Reg.RegNo;
+ }
+
+ const MCExpr *getImm() const {
+ assert(Kind == Immediate && "Invalid access!");
+ return Imm.Val;
+ }
+
+ const MCExpr *getMemDisp() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.Disp;
+ }
+ unsigned getMemSegReg() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.SegReg;
+ }
+ unsigned getMemBaseReg() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.BaseReg;
+ }
+ unsigned getMemIndexReg() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.IndexReg;
+ }
+ unsigned getMemScale() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.Scale;
+ }
+
+ bool isToken() const override {return Kind == Token; }
+
+ bool isImm() const override { return Kind == Immediate; }
+
+ bool isImmSExti16i8() const {
+ if (!isImm())
+ return false;
+
+ // If this isn't a constant expr, just assume it fits and let relaxation
+ // handle it.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return true;
+
+ // Otherwise, check the value is in a range that makes sense for this
+ // extension.
+ return isImmSExti16i8Value(CE->getValue());
+ }
+ bool isImmSExti32i8() const {
+ if (!isImm())
+ return false;
+
+ // If this isn't a constant expr, just assume it fits and let relaxation
+ // handle it.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return true;
+
+ // Otherwise, check the value is in a range that makes sense for this
+ // extension.
+ return isImmSExti32i8Value(CE->getValue());
+ }
+ bool isImmZExtu32u8() const {
+ if (!isImm())
+ return false;
+
+ // If this isn't a constant expr, just assume it fits and let relaxation
+ // handle it.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return true;
+
+ // Otherwise, check the value is in a range that makes sense for this
+ // extension.
+ return isImmZExtu32u8Value(CE->getValue());
+ }
+ bool isImmSExti64i8() const {
+ if (!isImm())
+ return false;
+
+ // If this isn't a constant expr, just assume it fits and let relaxation
+ // handle it.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return true;
+
+ // Otherwise, check the value is in a range that makes sense for this
+ // extension.
+ return isImmSExti64i8Value(CE->getValue());
+ }
+ bool isImmSExti64i32() const {
+ if (!isImm())
+ return false;
+
+ // If this isn't a constant expr, just assume it fits and let relaxation
+ // handle it.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return true;
+
+ // Otherwise, check the value is in a range that makes sense for this
+ // extension.
+ return isImmSExti64i32Value(CE->getValue());
+ }
+
+ bool isOffsetOf() const override {
+ return OffsetOfLoc.getPointer();
+ }
+
+ bool needAddressOf() const override {
+ return AddressOf;
+ }
+
+ bool isMem() const override { return Kind == Memory; }
+ bool isMem8() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMem16() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMem32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMem64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64);
+ }
+ bool isMem80() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 80);
+ }
+ bool isMem128() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 128);
+ }
+ bool isMem256() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 256);
+ }
+ bool isMem512() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 512);
+ }
+
+ bool isMemVX32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
+ getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ }
+ bool isMemVY32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
+ getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ }
+ bool isMemVX64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
+ getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ }
+ bool isMemVY64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
+ getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ }
+ bool isMemVZ32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
+ getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ }
+ bool isMemVZ64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
+ getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ }
+
+ bool isAbsMem() const {
+ return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1;
+ }
+
+ bool isSrcIdx() const {
+ return !getMemIndexReg() && getMemScale() == 1 &&
+ (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
+ getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
+ cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
+ }
+ bool isSrcIdx8() const {
+ return isMem8() && isSrcIdx();
+ }
+ bool isSrcIdx16() const {
+ return isMem16() && isSrcIdx();
+ }
+ bool isSrcIdx32() const {
+ return isMem32() && isSrcIdx();
+ }
+ bool isSrcIdx64() const {
+ return isMem64() && isSrcIdx();
+ }
+
+ bool isDstIdx() const {
+ return !getMemIndexReg() && getMemScale() == 1 &&
+ (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
+ (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
+ getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
+ cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
+ }
+ bool isDstIdx8() const {
+ return isMem8() && isDstIdx();
+ }
+ bool isDstIdx16() const {
+ return isMem16() && isDstIdx();
+ }
+ bool isDstIdx32() const {
+ return isMem32() && isDstIdx();
+ }
+ bool isDstIdx64() const {
+ return isMem64() && isDstIdx();
+ }
+
+ bool isMemOffs8() const {
+ return Kind == Memory && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMemOffs16() const {
+ return Kind == Memory && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMemOffs32() const {
+ return Kind == Memory && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMemOffs64() const {
+ return Kind == Memory && !getMemBaseReg() &&
+ !getMemIndexReg() && getMemScale() == 1 && (!Mem.Size || Mem.Size == 64);
+ }
+
+ bool isReg() const override { return Kind == Register; }
+
+ bool isGR32orGR64() const {
+ return Kind == Register &&
+ (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ // Add as immediates when possible.
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(Expr));
+ }
+
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
+
+ static unsigned getGR32FromGR64(unsigned RegNo) {
+ switch (RegNo) {
+ default: llvm_unreachable("Unexpected register");
+ case X86::RAX: return X86::EAX;
+ case X86::RCX: return X86::ECX;
+ case X86::RDX: return X86::EDX;
+ case X86::RBX: return X86::EBX;
+ case X86::RBP: return X86::EBP;
+ case X86::RSP: return X86::ESP;
+ case X86::RSI: return X86::ESI;
+ case X86::RDI: return X86::EDI;
+ case X86::R8: return X86::R8D;
+ case X86::R9: return X86::R9D;
+ case X86::R10: return X86::R10D;
+ case X86::R11: return X86::R11D;
+ case X86::R12: return X86::R12D;
+ case X86::R13: return X86::R13D;
+ case X86::R14: return X86::R14D;
+ case X86::R15: return X86::R15D;
+ case X86::RIP: return X86::EIP;
+ }
+ }
+
+ void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ unsigned RegNo = getReg();
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
+ RegNo = getGR32FromGR64(RegNo);
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addMemOperands(MCInst &Inst, unsigned N) const {
+ assert((N == 5) && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
+ Inst.addOperand(MCOperand::CreateImm(getMemScale()));
+ Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
+ addExpr(Inst, getMemDisp());
+ Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
+ }
+
+ void addAbsMemOperands(MCInst &Inst, unsigned N) const {
+ assert((N == 1) && "Invalid number of operands!");
+ // Add as immediates when possible.
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
+ }
+
+ void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
+ assert((N == 2) && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
+ Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
+ }
+ void addDstIdxOperands(MCInst &Inst, unsigned N) const {
+ assert((N == 1) && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
+ }
+
+ void addMemOffsOperands(MCInst &Inst, unsigned N) const {
+ assert((N == 2) && "Invalid number of operands!");
+ // Add as immediates when possible.
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
+ Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
+ }
+
+ static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
+ SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
+ auto Res = llvm::make_unique<X86Operand>(Token, Loc, EndLoc);
+ Res->Tok.Data = Str.data();
+ Res->Tok.Length = Str.size();
+ return Res;
+ }
+
+ static std::unique_ptr<X86Operand>
+ CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
+ bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
+ StringRef SymName = StringRef(), void *OpDecl = nullptr) {
+ auto Res = llvm::make_unique<X86Operand>(Register, StartLoc, EndLoc);
+ Res->Reg.RegNo = RegNo;
+ Res->AddressOf = AddressOf;
+ Res->OffsetOfLoc = OffsetOfLoc;
+ Res->SymName = SymName;
+ Res->OpDecl = OpDecl;
+ return Res;
+ }
+
+ static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
+ SMLoc StartLoc, SMLoc EndLoc) {
+ auto Res = llvm::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
+ Res->Imm.Val = Val;
+ return Res;
+ }
+
+ /// Create an absolute memory operand.
+ static std::unique_ptr<X86Operand>
+ CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
+ StringRef SymName = StringRef(), void *OpDecl = nullptr) {
+ auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
+ Res->Mem.SegReg = 0;
+ Res->Mem.Disp = Disp;
+ Res->Mem.BaseReg = 0;
+ Res->Mem.IndexReg = 0;
+ Res->Mem.Scale = 1;
+ Res->Mem.Size = Size;
+ Res->SymName = SymName;
+ Res->OpDecl = OpDecl;
+ Res->AddressOf = false;
+ return Res;
+ }
+
+ /// Create a generalized memory operand.
+ static std::unique_ptr<X86Operand>
+ CreateMem(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
+ unsigned IndexReg, unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
+ unsigned Size = 0, StringRef SymName = StringRef(),
+ void *OpDecl = nullptr) {
+ // We should never just have a displacement, that should be parsed as an
+ // absolute memory operand.
+ assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
+
+ // The scale should always be one of {1,2,4,8}.
+ assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
+ "Invalid scale!");
+ auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
+ Res->Mem.SegReg = SegReg;
+ Res->Mem.Disp = Disp;
+ Res->Mem.BaseReg = BaseReg;
+ Res->Mem.IndexReg = IndexReg;
+ Res->Mem.Scale = Scale;
+ Res->Mem.Size = Size;
+ Res->SymName = SymName;
+ Res->OpDecl = OpDecl;
+ Res->AddressOf = false;
+ return Res;
+ }
+};
+
+} // End of namespace llvm
+
+#endif // X86_OPERAND
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 903e36c..521bd21 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -27,26 +27,30 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+using namespace llvm::X86Disassembler;
+
+#define DEBUG_TYPE "x86-disassembler"
+
#define GET_REGINFO_ENUM
#include "X86GenRegisterInfo.inc"
#define GET_INSTRINFO_ENUM
#include "X86GenInstrInfo.inc"
+#define GET_SUBTARGETINFO_ENUM
+#include "X86GenSubtargetInfo.inc"
-using namespace llvm;
-using namespace llvm::X86Disassembler;
-
-void x86DisassemblerDebug(const char *file,
- unsigned line,
- const char *s) {
+void llvm::X86Disassembler::Debug(const char *file, unsigned line,
+ const char *s) {
dbgs() << file << ":" << line << ": " << s;
}
-const char *x86DisassemblerGetInstrName(unsigned Opcode, const void *mii) {
+const char *llvm::X86Disassembler::GetInstrName(unsigned Opcode,
+ const void *mii) {
const MCInstrInfo *MII = static_cast<const MCInstrInfo *>(mii);
return MII->getName(Opcode);
}
-#define debug(s) DEBUG(x86DisassemblerDebug(__FILE__, __LINE__, s));
+#define debug(s) DEBUG(Debug(__FILE__, __LINE__, s));
namespace llvm {
@@ -72,13 +76,25 @@ static bool translateInstruction(MCInst &target,
InternalInstruction &source,
const MCDisassembler *Dis);
-X86GenericDisassembler::X86GenericDisassembler(const MCSubtargetInfo &STI,
- DisassemblerMode mode,
- const MCInstrInfo *MII)
- : MCDisassembler(STI), MII(MII), fMode(mode) {}
-
-X86GenericDisassembler::~X86GenericDisassembler() {
- delete MII;
+X86GenericDisassembler::X86GenericDisassembler(
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx,
+ std::unique_ptr<const MCInstrInfo> MII)
+ : MCDisassembler(STI, Ctx), MII(std::move(MII)) {
+ switch (STI.getFeatureBits() &
+ (X86::Mode16Bit | X86::Mode32Bit | X86::Mode64Bit)) {
+ case X86::Mode16Bit:
+ fMode = MODE_16BIT;
+ break;
+ case X86::Mode32Bit:
+ fMode = MODE_32BIT;
+ break;
+ case X86::Mode64Bit:
+ fMode = MODE_64BIT;
+ break;
+ default:
+ llvm_unreachable("Invalid CPU mode");
+ }
}
/// regionReader - a callback function that wraps the readByte method from
@@ -124,14 +140,14 @@ X86GenericDisassembler::getInstruction(MCInst &instr,
dlog_t loggerFn = logger;
if (&vStream == &nulls())
- loggerFn = 0; // Disable logging completely if it's going to nulls().
+ loggerFn = nullptr; // Disable logging completely if it's going to nulls().
int ret = decodeInstruction(&internalInstr,
regionReader,
(const void*)&region,
loggerFn,
(void*)&vStream,
- (const void*)MII,
+ (const void*)MII.get(),
address,
fMode);
@@ -207,6 +223,61 @@ static void tryAddingPcLoadReferenceComment(uint64_t Address, uint64_t Value,
Dis->tryAddingPcLoadReferenceComment(Value, Address);
}
+static const uint8_t segmentRegnums[SEG_OVERRIDE_max] = {
+ 0, // SEG_OVERRIDE_NONE
+ X86::CS,
+ X86::SS,
+ X86::DS,
+ X86::ES,
+ X86::FS,
+ X86::GS
+};
+
+/// translateSrcIndex - Appends a source index operand to an MCInst.
+///
+/// @param mcInst - The MCInst to append to.
+/// @param insn - The internal instruction.
+static bool translateSrcIndex(MCInst &mcInst, InternalInstruction &insn) {
+ unsigned baseRegNo;
+
+ if (insn.mode == MODE_64BIT)
+ baseRegNo = insn.prefixPresent[0x67] ? X86::ESI : X86::RSI;
+ else if (insn.mode == MODE_32BIT)
+ baseRegNo = insn.prefixPresent[0x67] ? X86::SI : X86::ESI;
+ else {
+ assert(insn.mode == MODE_16BIT);
+ baseRegNo = insn.prefixPresent[0x67] ? X86::ESI : X86::SI;
+ }
+ MCOperand baseReg = MCOperand::CreateReg(baseRegNo);
+ mcInst.addOperand(baseReg);
+
+ MCOperand segmentReg;
+ segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]);
+ mcInst.addOperand(segmentReg);
+ return false;
+}
+
+/// translateDstIndex - Appends a destination index operand to an MCInst.
+///
+/// @param mcInst - The MCInst to append to.
+/// @param insn - The internal instruction.
+
+static bool translateDstIndex(MCInst &mcInst, InternalInstruction &insn) {
+ unsigned baseRegNo;
+
+ if (insn.mode == MODE_64BIT)
+ baseRegNo = insn.prefixPresent[0x67] ? X86::EDI : X86::RDI;
+ else if (insn.mode == MODE_32BIT)
+ baseRegNo = insn.prefixPresent[0x67] ? X86::DI : X86::EDI;
+ else {
+ assert(insn.mode == MODE_16BIT);
+ baseRegNo = insn.prefixPresent[0x67] ? X86::EDI : X86::DI;
+ }
+ MCOperand baseReg = MCOperand::CreateReg(baseRegNo);
+ mcInst.addOperand(baseReg);
+ return false;
+}
+
/// translateImmediate - Appends an immediate operand to an MCInst.
///
/// @param mcInst - The MCInst to append to.
@@ -248,7 +319,7 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
}
// By default sign-extend all X86 immediates based on their encoding.
else if (type == TYPE_IMM8 || type == TYPE_IMM16 || type == TYPE_IMM32 ||
- type == TYPE_IMM64) {
+ type == TYPE_IMM64 || type == TYPE_IMMv) {
uint32_t Opcode = mcInst.getOpcode();
switch (operand.encoding) {
default:
@@ -315,6 +386,13 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
insn.immediateOffset, insn.immediateSize,
mcInst, Dis))
mcInst.addOperand(MCOperand::CreateImm(immediate));
+
+ if (type == TYPE_MOFFS8 || type == TYPE_MOFFS16 ||
+ type == TYPE_MOFFS32 || type == TYPE_MOFFS64) {
+ MCOperand segmentReg;
+ segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]);
+ mcInst.addOperand(segmentReg);
+ }
}
/// translateRMRegister - Translates a register stored in the R/M field of the
@@ -418,13 +496,22 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
bool IndexIs256 = (Opcode == X86::VGATHERQPDYrm ||
Opcode == X86::VGATHERDPSYrm ||
Opcode == X86::VGATHERQPSYrm ||
+ Opcode == X86::VGATHERDPDZrm ||
+ Opcode == X86::VPGATHERDQZrm ||
Opcode == X86::VPGATHERQQYrm ||
Opcode == X86::VPGATHERDDYrm ||
Opcode == X86::VPGATHERQDYrm);
- if (IndexIs128 || IndexIs256) {
+ bool IndexIs512 = (Opcode == X86::VGATHERQPDZrm ||
+ Opcode == X86::VGATHERDPSZrm ||
+ Opcode == X86::VGATHERQPSZrm ||
+ Opcode == X86::VPGATHERQQZrm ||
+ Opcode == X86::VPGATHERDDZrm ||
+ Opcode == X86::VPGATHERQDZrm);
+ if (IndexIs128 || IndexIs256 || IndexIs512) {
unsigned IndexOffset = insn.sibIndex -
(insn.addressSize == 8 ? SIB_INDEX_RAX:SIB_INDEX_EAX);
- SIBIndex IndexBase = IndexIs256 ? SIB_INDEX_YMM0 : SIB_INDEX_XMM0;
+ SIBIndex IndexBase = IndexIs512 ? SIB_INDEX_ZMM0 :
+ IndexIs256 ? SIB_INDEX_YMM0 : SIB_INDEX_XMM0;
insn.sibIndex = (SIBIndex)(IndexBase +
(insn.sibIndex == SIB_INDEX_NONE ? 4 : IndexOffset));
}
@@ -513,17 +600,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
}
displacement = MCOperand::CreateImm(insn.displacement);
-
- static const uint8_t segmentRegnums[SEG_OVERRIDE_max] = {
- 0, // SEG_OVERRIDE_NONE
- X86::CS,
- X86::SS,
- X86::DS,
- X86::ES,
- X86::FS,
- X86::GS
- };
-
+
segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]);
mcInst.addOperand(baseReg);
@@ -565,6 +642,9 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_XMM128:
case TYPE_XMM256:
case TYPE_XMM512:
+ case TYPE_VK1:
+ case TYPE_VK8:
+ case TYPE_VK16:
case TYPE_DEBUGREG:
case TYPE_CONTROLREG:
return translateRMRegister(mcInst, insn);
@@ -596,16 +676,25 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
///
/// @param mcInst - The MCInst to append to.
/// @param stackPos - The stack position to translate.
-/// @return - 0 on success; nonzero otherwise.
-static bool translateFPRegister(MCInst &mcInst,
- uint8_t stackPos) {
- if (stackPos >= 8) {
- debug("Invalid FP stack position");
+static void translateFPRegister(MCInst &mcInst,
+ uint8_t stackPos) {
+ mcInst.addOperand(MCOperand::CreateReg(X86::ST0 + stackPos));
+}
+
+/// translateMaskRegister - Translates a 3-bit mask register number to
+/// LLVM form, and appends it to an MCInst.
+///
+/// @param mcInst - The MCInst to append to.
+/// @param maskRegNum - Number of mask register from 0 to 7.
+/// @return - false on success; true otherwise.
+static bool translateMaskRegister(MCInst &mcInst,
+ uint8_t maskRegNum) {
+ if (maskRegNum >= 8) {
+ debug("Invalid mask register number");
return true;
}
-
- mcInst.addOperand(MCOperand::CreateReg(X86::ST0 + stackPos));
+ mcInst.addOperand(MCOperand::CreateReg(X86::K0 + maskRegNum));
return false;
}
@@ -626,7 +715,9 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
case ENCODING_REG:
translateRegister(mcInst, insn.reg);
return false;
- case ENCODING_RM:
+ case ENCODING_WRITEMASK:
+ return translateMaskRegister(mcInst, insn.writemask);
+ CASE_ENCODING_RM:
return translateRM(mcInst, operand, insn, Dis);
case ENCODING_CB:
case ENCODING_CW:
@@ -648,17 +739,20 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
insn,
Dis);
return false;
+ case ENCODING_SI:
+ return translateSrcIndex(mcInst, insn);
+ case ENCODING_DI:
+ return translateDstIndex(mcInst, insn);
case ENCODING_RB:
case ENCODING_RW:
case ENCODING_RD:
case ENCODING_RO:
- translateRegister(mcInst, insn.opcodeRegister);
- return false;
- case ENCODING_I:
- return translateFPRegister(mcInst, insn.opcodeModifier);
case ENCODING_Rv:
translateRegister(mcInst, insn.opcodeRegister);
return false;
+ case ENCODING_FP:
+ translateFPRegister(mcInst, insn.modRM & 7);
+ return false;
case ENCODING_VVVV:
translateRegister(mcInst, insn.vvvv);
return false;
@@ -693,13 +787,11 @@ static bool translateInstruction(MCInst &mcInst,
mcInst.setOpcode(X86::XACQUIRE_PREFIX);
}
- int index;
-
insn.numImmediatesTranslated = 0;
- for (index = 0; index < X86_MAX_OPERANDS; ++index) {
- if (insn.operands[index].encoding != ENCODING_NONE) {
- if (translateOperand(mcInst, insn.operands[index], insn, Dis)) {
+ for (const auto &Op : insn.operands) {
+ if (Op.encoding != ENCODING_NONE) {
+ if (translateOperand(mcInst, Op, insn, Dis)) {
return true;
}
}
@@ -708,22 +800,17 @@ static bool translateInstruction(MCInst &mcInst,
return false;
}
-static MCDisassembler *createX86_32Disassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new X86Disassembler::X86GenericDisassembler(STI, MODE_32BIT,
- T.createMCInstrInfo());
-}
-
-static MCDisassembler *createX86_64Disassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new X86Disassembler::X86GenericDisassembler(STI, MODE_64BIT,
- T.createMCInstrInfo());
+static MCDisassembler *createX86Disassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ std::unique_ptr<const MCInstrInfo> MII(T.createMCInstrInfo());
+ return new X86Disassembler::X86GenericDisassembler(STI, Ctx, std::move(MII));
}
extern "C" void LLVMInitializeX86Disassembler() {
// Register the disassembler.
TargetRegistry::RegisterMCDisassembler(TheX86_32Target,
- createX86_32Disassembler);
+ createX86Disassembler);
TargetRegistry::RegisterMCDisassembler(TheX86_64Target,
- createX86_64Disassembler);
+ createX86Disassembler);
}
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
index b92427a..4dc7c29 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
@@ -74,17 +74,7 @@
#ifndef X86DISASSEMBLER_H
#define X86DISASSEMBLER_H
-#define INSTRUCTION_SPECIFIER_FIELDS \
- uint16_t operands;
-
-#define INSTRUCTION_IDS \
- uint16_t instructionIDs;
-
#include "X86DisassemblerDecoderCommon.h"
-
-#undef INSTRUCTION_SPECIFIER_FIELDS
-#undef INSTRUCTION_IDS
-
#include "llvm/MC/MCDisassembler.h"
namespace llvm {
@@ -101,24 +91,19 @@ namespace X86Disassembler {
/// All each platform class should have to do is subclass the constructor, and
/// provide a different disassemblerMode value.
class X86GenericDisassembler : public MCDisassembler {
- const MCInstrInfo *MII;
+ std::unique_ptr<const MCInstrInfo> MII;
public:
/// Constructor - Initializes the disassembler.
///
- /// @param mode - The X86 architecture mode to decode for.
- X86GenericDisassembler(const MCSubtargetInfo &STI, DisassemblerMode mode,
- const MCInstrInfo *MII);
-private:
- ~X86GenericDisassembler();
+ X86GenericDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
+ std::unique_ptr<const MCInstrInfo> MII);
public:
/// getInstruction - See MCDisassembler.
- DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ const MemoryObject &region, uint64_t address,
raw_ostream &vStream,
- raw_ostream &cStream) const;
+ raw_ostream &cStream) const override;
private:
DisassemblerMode fMode;
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
index 16ee0d3..ab3d1f7 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
@@ -1,17 +1,17 @@
-/*===-- X86DisassemblerDecoder.c - Disassembler decoder ------------*- C -*-===*
- *
- * The LLVM Compiler Infrastructure
- *
- * This file is distributed under the University of Illinois Open Source
- * License. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===*
- *
- * This file is part of the X86 Disassembler.
- * It contains the implementation of the instruction decoder.
- * Documentation for the disassembler can be found in X86Disassembler.h.
- *
- *===----------------------------------------------------------------------===*/
+//===-- X86DisassemblerDecoder.c - Disassembler decoder -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler.
+// It contains the implementation of the instruction decoder.
+// Documentation for the disassembler can be found in X86Disassembler.h.
+//
+//===----------------------------------------------------------------------===//
#include <stdarg.h> /* for va_*() */
#include <stdio.h> /* for vsnprintf() */
@@ -20,13 +20,35 @@
#include "X86DisassemblerDecoder.h"
-#include "X86GenDisassemblerTables.inc"
+using namespace llvm::X86Disassembler;
+
+/// Specifies whether a ModR/M byte is needed and (if so) which
+/// instruction each possible value of the ModR/M byte corresponds to. Once
+/// this information is known, we have narrowed down to a single instruction.
+struct ModRMDecision {
+ uint8_t modrm_type;
+ uint16_t instructionIDs;
+};
+
+/// Specifies which set of ModR/M->instruction tables to look at
+/// given a particular opcode.
+struct OpcodeDecision {
+ ModRMDecision modRMDecisions[256];
+};
+
+/// Specifies which opcode->instruction tables to look at given
+/// a particular context (set of attributes). Since there are many possible
+/// contexts, the decoder first uses CONTEXTS_SYM to determine which context
+/// applies given a specific set of attributes. Hence there are only IC_max
+/// entries in this table, rather than 2^(ATTR_max).
+struct ContextDecision {
+ OpcodeDecision opcodeDecisions[IC_max];
+};
-#define TRUE 1
-#define FALSE 0
+#include "X86GenDisassemblerTables.inc"
#ifndef NDEBUG
-#define debug(s) do { x86DisassemblerDebug(__FILE__, __LINE__, s); } while (0)
+#define debug(s) do { Debug(__FILE__, __LINE__, s); } while (0)
#else
#define debug(s) do { } while (0)
#endif
@@ -40,8 +62,8 @@
* @return - The InstructionContext to use when looking up an
* an instruction with these attributes.
*/
-static InstructionContext contextForAttrs(uint8_t attrMask) {
- return CONTEXTS_SYM[attrMask];
+static InstructionContext contextForAttrs(uint16_t attrMask) {
+ return static_cast<InstructionContext>(CONTEXTS_SYM[attrMask]);
}
/*
@@ -53,12 +75,12 @@ static InstructionContext contextForAttrs(uint8_t attrMask) {
* contextForAttrs.
* @param opcode - The last byte of the instruction's opcode, not counting
* ModR/M extensions and escapes.
- * @return - TRUE if the ModR/M byte is required, FALSE otherwise.
+ * @return - true if the ModR/M byte is required, false otherwise.
*/
static int modRMRequired(OpcodeType type,
InstructionContext insnContext,
- uint8_t opcode) {
- const struct ContextDecision* decision = 0;
+ uint16_t opcode) {
+ const struct ContextDecision* decision = nullptr;
switch (type) {
case ONEBYTE:
@@ -73,12 +95,6 @@ static int modRMRequired(OpcodeType type,
case THREEBYTE_3A:
decision = &THREEBYTE3A_SYM;
break;
- case THREEBYTE_A6:
- decision = &THREEBYTEA6_SYM;
- break;
- case THREEBYTE_A7:
- decision = &THREEBYTEA7_SYM;
- break;
case XOP8_MAP:
decision = &XOP8_MAP_SYM;
break;
@@ -108,7 +124,7 @@ static InstrUID decode(OpcodeType type,
InstructionContext insnContext,
uint8_t opcode,
uint8_t modRM) {
- const struct ModRMDecision* dec = 0;
+ const struct ModRMDecision* dec = nullptr;
switch (type) {
case ONEBYTE:
@@ -123,12 +139,6 @@ static InstrUID decode(OpcodeType type,
case THREEBYTE_3A:
dec = &THREEBYTE3A_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
break;
- case THREEBYTE_A6:
- dec = &THREEBYTEA6_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
- break;
- case THREEBYTE_A7:
- dec = &THREEBYTEA7_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
- break;
case XOP8_MAP:
dec = &XOP8_MAP_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
break;
@@ -296,15 +306,15 @@ static void setPrefixPresent(struct InternalInstruction* insn,
* @param location - The location to query.
* @return - Whether the prefix is at that location.
*/
-static BOOL isPrefixAtLocation(struct InternalInstruction* insn,
+static bool isPrefixAtLocation(struct InternalInstruction* insn,
uint8_t prefix,
uint64_t location)
{
if (insn->prefixPresent[prefix] == 1 &&
insn->prefixLocations[prefix] == location)
- return TRUE;
+ return true;
else
- return FALSE;
+ return false;
}
/*
@@ -317,14 +327,14 @@ static BOOL isPrefixAtLocation(struct InternalInstruction* insn,
* bytes, and no prefixes conflicted; nonzero otherwise.
*/
static int readPrefixes(struct InternalInstruction* insn) {
- BOOL isPrefix = TRUE;
- BOOL prefixGroups[4] = { FALSE };
+ bool isPrefix = true;
+ bool prefixGroups[4] = { false };
uint64_t prefixLocation;
uint8_t byte = 0;
uint8_t nextByte;
- BOOL hasAdSize = FALSE;
- BOOL hasOpSize = FALSE;
+ bool hasAdSize = false;
+ bool hasOpSize = false;
dbgprintf(insn, "readPrefixes()");
@@ -356,7 +366,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
if ((byte == 0xf2 || byte == 0xf3) &&
((nextByte == 0xf0) |
((nextByte & 0xfe) == 0x86 || (nextByte & 0xf8) == 0x90)))
- insn->xAcquireRelease = TRUE;
+ insn->xAcquireRelease = true;
/*
* Also if the byte is 0xf3, and the following condition is met:
* - it is followed by a "mov mem, reg" (opcode 0x88/0x89) or
@@ -366,7 +376,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
if (byte == 0xf3 &&
(nextByte == 0x88 || nextByte == 0x89 ||
nextByte == 0xc6 || nextByte == 0xc7))
- insn->xAcquireRelease = TRUE;
+ insn->xAcquireRelease = true;
if (insn->mode == MODE_64BIT && (nextByte & 0xf0) == 0x40) {
if (consumeByte(insn, &nextByte))
return -1;
@@ -384,7 +394,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
case 0xf3: /* REP or REPE/REPZ */
if (prefixGroups[0])
dbgprintf(insn, "Redundant Group 1 prefix");
- prefixGroups[0] = TRUE;
+ prefixGroups[0] = true;
setPrefixPresent(insn, byte, prefixLocation);
break;
case 0x2e: /* CS segment override -OR- Branch not taken */
@@ -418,25 +428,25 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
if (prefixGroups[1])
dbgprintf(insn, "Redundant Group 2 prefix");
- prefixGroups[1] = TRUE;
+ prefixGroups[1] = true;
setPrefixPresent(insn, byte, prefixLocation);
break;
case 0x66: /* Operand-size override */
if (prefixGroups[2])
dbgprintf(insn, "Redundant Group 3 prefix");
- prefixGroups[2] = TRUE;
- hasOpSize = TRUE;
+ prefixGroups[2] = true;
+ hasOpSize = true;
setPrefixPresent(insn, byte, prefixLocation);
break;
case 0x67: /* Address-size override */
if (prefixGroups[3])
dbgprintf(insn, "Redundant Group 4 prefix");
- prefixGroups[3] = TRUE;
- hasAdSize = TRUE;
+ prefixGroups[3] = true;
+ hasAdSize = true;
setPrefixPresent(insn, byte, prefixLocation);
break;
default: /* Not a prefix byte */
- isPrefix = FALSE;
+ isPrefix = false;
break;
}
@@ -444,9 +454,58 @@ static int readPrefixes(struct InternalInstruction* insn) {
dbgprintf(insn, "Found prefix 0x%hhx", byte);
}
- insn->vexXopType = TYPE_NO_VEX_XOP;
+ insn->vectorExtensionType = TYPE_NO_VEX_XOP;
+
+ if (byte == 0x62) {
+ uint8_t byte1, byte2;
+
+ if (consumeByte(insn, &byte1)) {
+ dbgprintf(insn, "Couldn't read second byte of EVEX prefix");
+ return -1;
+ }
- if (byte == 0xc4) {
+ if (lookAtByte(insn, &byte2)) {
+ dbgprintf(insn, "Couldn't read third byte of EVEX prefix");
+ return -1;
+ }
+
+ if ((insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) &&
+ ((~byte1 & 0xc) == 0xc) && ((byte2 & 0x4) == 0x4)) {
+ insn->vectorExtensionType = TYPE_EVEX;
+ }
+ else {
+ unconsumeByte(insn); /* unconsume byte1 */
+ unconsumeByte(insn); /* unconsume byte */
+ insn->necessaryPrefixLocation = insn->readerCursor - 2;
+ }
+
+ if (insn->vectorExtensionType == TYPE_EVEX) {
+ insn->vectorExtensionPrefix[0] = byte;
+ insn->vectorExtensionPrefix[1] = byte1;
+ if (consumeByte(insn, &insn->vectorExtensionPrefix[2])) {
+ dbgprintf(insn, "Couldn't read third byte of EVEX prefix");
+ return -1;
+ }
+ if (consumeByte(insn, &insn->vectorExtensionPrefix[3])) {
+ dbgprintf(insn, "Couldn't read fourth byte of EVEX prefix");
+ return -1;
+ }
+
+ /* We simulate the REX prefix for simplicity's sake */
+ if (insn->mode == MODE_64BIT) {
+ insn->rexPrefix = 0x40
+ | (wFromEVEX3of4(insn->vectorExtensionPrefix[2]) << 3)
+ | (rFromEVEX2of4(insn->vectorExtensionPrefix[1]) << 2)
+ | (xFromEVEX2of4(insn->vectorExtensionPrefix[1]) << 1)
+ | (bFromEVEX2of4(insn->vectorExtensionPrefix[1]) << 0);
+ }
+
+ dbgprintf(insn, "Found EVEX prefix 0x%hhx 0x%hhx 0x%hhx 0x%hhx",
+ insn->vectorExtensionPrefix[0], insn->vectorExtensionPrefix[1],
+ insn->vectorExtensionPrefix[2], insn->vectorExtensionPrefix[3]);
+ }
+ }
+ else if (byte == 0xc4) {
uint8_t byte1;
if (lookAtByte(insn, &byte1)) {
@@ -455,7 +514,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
if (insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) {
- insn->vexXopType = TYPE_VEX_3B;
+ insn->vectorExtensionType = TYPE_VEX_3B;
insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
else {
@@ -463,33 +522,24 @@ static int readPrefixes(struct InternalInstruction* insn) {
insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
- if (insn->vexXopType == TYPE_VEX_3B) {
- insn->vexXopPrefix[0] = byte;
- consumeByte(insn, &insn->vexXopPrefix[1]);
- consumeByte(insn, &insn->vexXopPrefix[2]);
+ if (insn->vectorExtensionType == TYPE_VEX_3B) {
+ insn->vectorExtensionPrefix[0] = byte;
+ consumeByte(insn, &insn->vectorExtensionPrefix[1]);
+ consumeByte(insn, &insn->vectorExtensionPrefix[2]);
/* We simulate the REX prefix for simplicity's sake */
if (insn->mode == MODE_64BIT) {
insn->rexPrefix = 0x40
- | (wFromVEX3of3(insn->vexXopPrefix[2]) << 3)
- | (rFromVEX2of3(insn->vexXopPrefix[1]) << 2)
- | (xFromVEX2of3(insn->vexXopPrefix[1]) << 1)
- | (bFromVEX2of3(insn->vexXopPrefix[1]) << 0);
- }
-
- switch (ppFromVEX3of3(insn->vexXopPrefix[2]))
- {
- default:
- break;
- case VEX_PREFIX_66:
- hasOpSize = TRUE;
- break;
+ | (wFromVEX3of3(insn->vectorExtensionPrefix[2]) << 3)
+ | (rFromVEX2of3(insn->vectorExtensionPrefix[1]) << 2)
+ | (xFromVEX2of3(insn->vectorExtensionPrefix[1]) << 1)
+ | (bFromVEX2of3(insn->vectorExtensionPrefix[1]) << 0);
}
dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx 0x%hhx",
- insn->vexXopPrefix[0], insn->vexXopPrefix[1],
- insn->vexXopPrefix[2]);
+ insn->vectorExtensionPrefix[0], insn->vectorExtensionPrefix[1],
+ insn->vectorExtensionPrefix[2]);
}
}
else if (byte == 0xc5) {
@@ -501,31 +551,33 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
if (insn->mode == MODE_64BIT || (byte1 & 0xc0) == 0xc0) {
- insn->vexXopType = TYPE_VEX_2B;
+ insn->vectorExtensionType = TYPE_VEX_2B;
}
else {
unconsumeByte(insn);
}
- if (insn->vexXopType == TYPE_VEX_2B) {
- insn->vexXopPrefix[0] = byte;
- consumeByte(insn, &insn->vexXopPrefix[1]);
+ if (insn->vectorExtensionType == TYPE_VEX_2B) {
+ insn->vectorExtensionPrefix[0] = byte;
+ consumeByte(insn, &insn->vectorExtensionPrefix[1]);
if (insn->mode == MODE_64BIT) {
insn->rexPrefix = 0x40
- | (rFromVEX2of2(insn->vexXopPrefix[1]) << 2);
+ | (rFromVEX2of2(insn->vectorExtensionPrefix[1]) << 2);
}
- switch (ppFromVEX2of2(insn->vexXopPrefix[1]))
+ switch (ppFromVEX2of2(insn->vectorExtensionPrefix[1]))
{
default:
break;
case VEX_PREFIX_66:
- hasOpSize = TRUE;
+ hasOpSize = true;
break;
}
- dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx", insn->vexXopPrefix[0], insn->vexXopPrefix[1]);
+ dbgprintf(insn, "Found VEX prefix 0x%hhx 0x%hhx",
+ insn->vectorExtensionPrefix[0],
+ insn->vectorExtensionPrefix[1]);
}
}
else if (byte == 0x8f) {
@@ -537,7 +589,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
if ((byte1 & 0x38) != 0x0) { /* 0 in these 3 bits is a POP instruction. */
- insn->vexXopType = TYPE_XOP;
+ insn->vectorExtensionType = TYPE_XOP;
insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
else {
@@ -545,33 +597,33 @@ static int readPrefixes(struct InternalInstruction* insn) {
insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
- if (insn->vexXopType == TYPE_XOP) {
- insn->vexXopPrefix[0] = byte;
- consumeByte(insn, &insn->vexXopPrefix[1]);
- consumeByte(insn, &insn->vexXopPrefix[2]);
+ if (insn->vectorExtensionType == TYPE_XOP) {
+ insn->vectorExtensionPrefix[0] = byte;
+ consumeByte(insn, &insn->vectorExtensionPrefix[1]);
+ consumeByte(insn, &insn->vectorExtensionPrefix[2]);
/* We simulate the REX prefix for simplicity's sake */
if (insn->mode == MODE_64BIT) {
insn->rexPrefix = 0x40
- | (wFromXOP3of3(insn->vexXopPrefix[2]) << 3)
- | (rFromXOP2of3(insn->vexXopPrefix[1]) << 2)
- | (xFromXOP2of3(insn->vexXopPrefix[1]) << 1)
- | (bFromXOP2of3(insn->vexXopPrefix[1]) << 0);
+ | (wFromXOP3of3(insn->vectorExtensionPrefix[2]) << 3)
+ | (rFromXOP2of3(insn->vectorExtensionPrefix[1]) << 2)
+ | (xFromXOP2of3(insn->vectorExtensionPrefix[1]) << 1)
+ | (bFromXOP2of3(insn->vectorExtensionPrefix[1]) << 0);
}
- switch (ppFromXOP3of3(insn->vexXopPrefix[2]))
+ switch (ppFromXOP3of3(insn->vectorExtensionPrefix[2]))
{
default:
break;
case VEX_PREFIX_66:
- hasOpSize = TRUE;
+ hasOpSize = true;
break;
}
dbgprintf(insn, "Found XOP prefix 0x%hhx 0x%hhx 0x%hhx",
- insn->vexXopPrefix[0], insn->vexXopPrefix[1],
- insn->vexXopPrefix[2]);
+ insn->vectorExtensionPrefix[0], insn->vectorExtensionPrefix[1],
+ insn->vectorExtensionPrefix[2]);
}
}
else {
@@ -646,13 +698,29 @@ static int readOpcode(struct InternalInstruction* insn) {
insn->opcodeType = ONEBYTE;
- if (insn->vexXopType == TYPE_VEX_3B)
+ if (insn->vectorExtensionType == TYPE_EVEX)
{
- switch (mmmmmFromVEX2of3(insn->vexXopPrefix[1]))
- {
+ switch (mmFromEVEX2of4(insn->vectorExtensionPrefix[1])) {
+ default:
+ dbgprintf(insn, "Unhandled mm field for instruction (0x%hhx)",
+ mmFromEVEX2of4(insn->vectorExtensionPrefix[1]));
+ return -1;
+ case VEX_LOB_0F:
+ insn->opcodeType = TWOBYTE;
+ return consumeByte(insn, &insn->opcode);
+ case VEX_LOB_0F38:
+ insn->opcodeType = THREEBYTE_38;
+ return consumeByte(insn, &insn->opcode);
+ case VEX_LOB_0F3A:
+ insn->opcodeType = THREEBYTE_3A;
+ return consumeByte(insn, &insn->opcode);
+ }
+ }
+ else if (insn->vectorExtensionType == TYPE_VEX_3B) {
+ switch (mmmmmFromVEX2of3(insn->vectorExtensionPrefix[1])) {
default:
dbgprintf(insn, "Unhandled m-mmmm field for instruction (0x%hhx)",
- mmmmmFromVEX2of3(insn->vexXopPrefix[1]));
+ mmmmmFromVEX2of3(insn->vectorExtensionPrefix[1]));
return -1;
case VEX_LOB_0F:
insn->opcodeType = TWOBYTE;
@@ -665,18 +733,15 @@ static int readOpcode(struct InternalInstruction* insn) {
return consumeByte(insn, &insn->opcode);
}
}
- else if (insn->vexXopType == TYPE_VEX_2B)
- {
+ else if (insn->vectorExtensionType == TYPE_VEX_2B) {
insn->opcodeType = TWOBYTE;
return consumeByte(insn, &insn->opcode);
}
- else if (insn->vexXopType == TYPE_XOP)
- {
- switch (mmmmmFromXOP2of3(insn->vexXopPrefix[1]))
- {
+ else if (insn->vectorExtensionType == TYPE_XOP) {
+ switch (mmmmmFromXOP2of3(insn->vectorExtensionPrefix[1])) {
default:
dbgprintf(insn, "Unhandled m-mmmm field for instruction (0x%hhx)",
- mmmmmFromVEX2of3(insn->vexXopPrefix[1]));
+ mmmmmFromVEX2of3(insn->vectorExtensionPrefix[1]));
return -1;
case XOP_MAP_SELECT_8:
insn->opcodeType = XOP8_MAP;
@@ -713,20 +778,6 @@ static int readOpcode(struct InternalInstruction* insn) {
return -1;
insn->opcodeType = THREEBYTE_3A;
- } else if (current == 0xa6) {
- dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
-
- if (consumeByte(insn, &current))
- return -1;
-
- insn->opcodeType = THREEBYTE_A6;
- } else if (current == 0xa7) {
- dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
-
- if (consumeByte(insn, &current))
- return -1;
-
- insn->opcodeType = THREEBYTE_A7;
} else {
dbgprintf(insn, "Didn't find a three-byte escape prefix");
@@ -760,12 +811,10 @@ static int readModRM(struct InternalInstruction* insn);
*/
static int getIDWithAttrMask(uint16_t* instructionID,
struct InternalInstruction* insn,
- uint8_t attrMask) {
- BOOL hasModRMExtension;
+ uint16_t attrMask) {
+ bool hasModRMExtension;
- uint8_t instructionClass;
-
- instructionClass = contextForAttrs(attrMask);
+ InstructionContext instructionClass = contextForAttrs(attrMask);
hasModRMExtension = modRMRequired(insn->opcodeType,
instructionClass,
@@ -796,14 +845,14 @@ static int getIDWithAttrMask(uint16_t* instructionID,
* @param orig - The instruction that is not 16-bit
* @param equiv - The instruction that is 16-bit
*/
-static BOOL is16BitEquivalent(const char* orig, const char* equiv) {
+static bool is16BitEquivalent(const char* orig, const char* equiv) {
off_t i;
for (i = 0;; i++) {
if (orig[i] == '\0' && equiv[i] == '\0')
- return TRUE;
+ return true;
if (orig[i] == '\0' || equiv[i] == '\0')
- return FALSE;
+ return false;
if (orig[i] != equiv[i]) {
if ((orig[i] == 'Q' || orig[i] == 'L') && equiv[i] == 'W')
continue;
@@ -811,7 +860,7 @@ static BOOL is16BitEquivalent(const char* orig, const char* equiv) {
continue;
if ((orig[i] == '4' || orig[i] == '2') && equiv[i] == '6')
continue;
- return FALSE;
+ return false;
}
}
}
@@ -826,7 +875,7 @@ static BOOL is16BitEquivalent(const char* orig, const char* equiv) {
* nonzero otherwise.
*/
static int getID(struct InternalInstruction* insn, const void *miiArg) {
- uint8_t attrMask;
+ uint16_t attrMask;
uint16_t instructionID;
dbgprintf(insn, "getID()");
@@ -836,11 +885,11 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
if (insn->mode == MODE_64BIT)
attrMask |= ATTR_64BIT;
- if (insn->vexXopType != TYPE_NO_VEX_XOP) {
- attrMask |= ATTR_VEX;
+ if (insn->vectorExtensionType != TYPE_NO_VEX_XOP) {
+ attrMask |= (insn->vectorExtensionType == TYPE_EVEX) ? ATTR_EVEX : ATTR_VEX;
- if (insn->vexXopType == TYPE_VEX_3B) {
- switch (ppFromVEX3of3(insn->vexXopPrefix[2])) {
+ if (insn->vectorExtensionType == TYPE_EVEX) {
+ switch (ppFromEVEX3of4(insn->vectorExtensionPrefix[2])) {
case VEX_PREFIX_66:
attrMask |= ATTR_OPSIZE;
break;
@@ -852,11 +901,35 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
break;
}
- if (lFromVEX3of3(insn->vexXopPrefix[2]))
+ if (zFromEVEX4of4(insn->vectorExtensionPrefix[3]))
+ attrMask |= ATTR_EVEXKZ;
+ if (bFromEVEX4of4(insn->vectorExtensionPrefix[3]))
+ attrMask |= ATTR_EVEXB;
+ if (aaaFromEVEX4of4(insn->vectorExtensionPrefix[3]))
+ attrMask |= ATTR_EVEXK;
+ if (lFromEVEX4of4(insn->vectorExtensionPrefix[3]))
+ attrMask |= ATTR_EVEXL;
+ if (l2FromEVEX4of4(insn->vectorExtensionPrefix[3]))
+ attrMask |= ATTR_EVEXL2;
+ }
+ else if (insn->vectorExtensionType == TYPE_VEX_3B) {
+ switch (ppFromVEX3of3(insn->vectorExtensionPrefix[2])) {
+ case VEX_PREFIX_66:
+ attrMask |= ATTR_OPSIZE;
+ break;
+ case VEX_PREFIX_F3:
+ attrMask |= ATTR_XS;
+ break;
+ case VEX_PREFIX_F2:
+ attrMask |= ATTR_XD;
+ break;
+ }
+
+ if (lFromVEX3of3(insn->vectorExtensionPrefix[2]))
attrMask |= ATTR_VEXL;
}
- else if (insn->vexXopType == TYPE_VEX_2B) {
- switch (ppFromVEX2of2(insn->vexXopPrefix[1])) {
+ else if (insn->vectorExtensionType == TYPE_VEX_2B) {
+ switch (ppFromVEX2of2(insn->vectorExtensionPrefix[1])) {
case VEX_PREFIX_66:
attrMask |= ATTR_OPSIZE;
break;
@@ -868,11 +941,11 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
break;
}
- if (lFromVEX2of2(insn->vexXopPrefix[1]))
+ if (lFromVEX2of2(insn->vectorExtensionPrefix[1]))
attrMask |= ATTR_VEXL;
}
- else if (insn->vexXopType == TYPE_XOP) {
- switch (ppFromXOP3of3(insn->vexXopPrefix[2])) {
+ else if (insn->vectorExtensionType == TYPE_XOP) {
+ switch (ppFromXOP3of3(insn->vectorExtensionPrefix[2])) {
case VEX_PREFIX_66:
attrMask |= ATTR_OPSIZE;
break;
@@ -884,7 +957,7 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
break;
}
- if (lFromXOP3of3(insn->vexXopPrefix[2]))
+ if (lFromXOP3of3(insn->vectorExtensionPrefix[2]))
attrMask |= ATTR_VEXL;
}
else {
@@ -892,7 +965,7 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
}
}
else {
- if (isPrefixAtLocation(insn, 0x66, insn->necessaryPrefixLocation))
+ if (insn->mode != MODE_16BIT && isPrefixAtLocation(insn, 0x66, insn->necessaryPrefixLocation))
attrMask |= ATTR_OPSIZE;
else if (isPrefixAtLocation(insn, 0x67, insn->necessaryPrefixLocation))
attrMask |= ATTR_ADSIZE;
@@ -908,9 +981,29 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
if (getIDWithAttrMask(&instructionID, insn, attrMask))
return -1;
+ /*
+ * JCXZ/JECXZ need special handling for 16-bit mode because the meaning
+ * of the AdSize prefix is inverted w.r.t. 32-bit mode.
+ */
+ if (insn->mode == MODE_16BIT && insn->opcode == 0xE3) {
+ const struct InstructionSpecifier *spec;
+ spec = specifierForUID(instructionID);
+
+ /*
+ * Check for Ii8PCRel instructions. We could alternatively do a
+ * string-compare on the names, but this is probably cheaper.
+ */
+ if (x86OperandSets[spec->operands][0].type == TYPE_REL8) {
+ attrMask ^= ATTR_ADSIZE;
+ if (getIDWithAttrMask(&instructionID, insn, attrMask))
+ return -1;
+ }
+ }
+
/* The following clauses compensate for limitations of the tables. */
- if (insn->prefixPresent[0x66] && !(attrMask & ATTR_OPSIZE)) {
+ if ((insn->mode == MODE_16BIT || insn->prefixPresent[0x66]) &&
+ !(attrMask & ATTR_OPSIZE)) {
/*
* The instruction tables make no distinction between instructions that
* allow OpSize anywhere (i.e., 16-bit operations) and that need it in a
@@ -938,11 +1031,11 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
return 0;
}
- specName = x86DisassemblerGetInstrName(instructionID, miiArg);
- specWithOpSizeName =
- x86DisassemblerGetInstrName(instructionIDWithOpsize, miiArg);
+ specName = GetInstrName(instructionID, miiArg);
+ specWithOpSizeName = GetInstrName(instructionIDWithOpsize, miiArg);
- if (is16BitEquivalent(specName, specWithOpSizeName)) {
+ if (is16BitEquivalent(specName, specWithOpSizeName) &&
+ (insn->mode == MODE_16BIT) ^ insn->prefixPresent[0x66]) {
insn->instructionID = instructionIDWithOpsize;
insn->spec = specifierForUID(instructionIDWithOpsize);
} else {
@@ -1003,8 +1096,8 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
* @return - 0 if the SIB byte was successfully read; nonzero otherwise.
*/
static int readSIB(struct InternalInstruction* insn) {
- SIBIndex sibIndexBase = 0;
- SIBBase sibBaseBase = 0;
+ SIBIndex sibIndexBase = SIB_INDEX_NONE;
+ SIBBase sibBaseBase = SIB_BASE_NONE;
uint8_t index, base;
dbgprintf(insn, "readSIB()");
@@ -1012,13 +1105,12 @@ static int readSIB(struct InternalInstruction* insn) {
if (insn->consumedSIB)
return 0;
- insn->consumedSIB = TRUE;
+ insn->consumedSIB = true;
switch (insn->addressSize) {
case 2:
dbgprintf(insn, "SIB-based addressing doesn't work in 16-bit mode");
return -1;
- break;
case 4:
sibIndexBase = SIB_INDEX_EAX;
sibBaseBase = SIB_BASE_EAX;
@@ -1033,6 +1125,8 @@ static int readSIB(struct InternalInstruction* insn) {
return -1;
index = indexFromSIB(insn->sib) | (xFromREX(insn->rexPrefix) << 3);
+ if (insn->vectorExtensionType == TYPE_EVEX)
+ index |= v2FromEVEX4of4(insn->vectorExtensionPrefix[3]) << 4;
switch (index) {
case 0x4:
@@ -1109,12 +1203,12 @@ static int readDisplacement(struct InternalInstruction* insn) {
if (insn->consumedDisplacement)
return 0;
- insn->consumedDisplacement = TRUE;
+ insn->consumedDisplacement = true;
insn->displacementOffset = insn->readerCursor - insn->startLocation;
switch (insn->eaDisplacement) {
case EA_DISP_NONE:
- insn->consumedDisplacement = FALSE;
+ insn->consumedDisplacement = false;
break;
case EA_DISP_8:
if (consumeInt8(insn, &d8))
@@ -1133,7 +1227,7 @@ static int readDisplacement(struct InternalInstruction* insn) {
break;
}
- insn->consumedDisplacement = TRUE;
+ insn->consumedDisplacement = true;
return 0;
}
@@ -1154,7 +1248,7 @@ static int readModRM(struct InternalInstruction* insn) {
if (consumeByte(insn, &insn->modRM))
return -1;
- insn->consumedModRM = TRUE;
+ insn->consumedModRM = true;
mod = modFromModRM(insn->modRM);
rm = rmFromModRM(insn->modRM);
@@ -1182,6 +1276,10 @@ static int readModRM(struct InternalInstruction* insn) {
reg |= rFromREX(insn->rexPrefix) << 3;
rm |= bFromREX(insn->rexPrefix) << 3;
+ if (insn->vectorExtensionType == TYPE_EVEX) {
+ reg |= r2FromEVEX2of4(insn->vectorExtensionPrefix[1]) << 4;
+ rm |= xFromEVEX2of4(insn->vectorExtensionPrefix[1]) << 4;
+ }
insn->reg = (Reg)(insn->regBase + reg);
@@ -1204,6 +1302,7 @@ static int readModRM(struct InternalInstruction* insn) {
case 0x1:
insn->eaBase = (EABase)(insn->eaBaseBase + rm);
insn->eaDisplacement = EA_DISP_8;
+ insn->displacementSize = 1;
if (readDisplacement(insn))
return -1;
break;
@@ -1228,12 +1327,12 @@ static int readModRM(struct InternalInstruction* insn) {
case 0x0:
insn->eaDisplacement = EA_DISP_NONE; /* readSIB may override this */
switch (rm) {
+ case 0x14:
case 0x4:
case 0xc: /* in case REXW.b is set */
insn->eaBase = (insn->addressSize == 4 ?
EA_BASE_sib : EA_BASE_sib64);
- readSIB(insn);
- if (readDisplacement(insn))
+ if (readSIB(insn) || readDisplacement(insn))
return -1;
break;
case 0x5:
@@ -1248,14 +1347,16 @@ static int readModRM(struct InternalInstruction* insn) {
}
break;
case 0x1:
+ insn->displacementSize = 1;
+ /* FALLTHROUGH */
case 0x2:
insn->eaDisplacement = (mod == 0x1 ? EA_DISP_8 : EA_DISP_32);
switch (rm) {
+ case 0x14:
case 0x4:
case 0xc: /* in case REXW.b is set */
insn->eaBase = EA_BASE_sib;
- readSIB(insn);
- if (readDisplacement(insn))
+ if (readSIB(insn) || readDisplacement(insn))
return -1;
break;
default:
@@ -1311,6 +1412,10 @@ static int readModRM(struct InternalInstruction* insn) {
case TYPE_XMM32: \
case TYPE_XMM: \
return prefix##_XMM0 + index; \
+ case TYPE_VK1: \
+ case TYPE_VK8: \
+ case TYPE_VK16: \
+ return prefix##_K0 + index; \
case TYPE_MM64: \
case TYPE_MM32: \
case TYPE_MM: \
@@ -1383,7 +1488,7 @@ static int fixupReg(struct InternalInstruction *insn,
if (!valid)
return -1;
break;
- case ENCODING_RM:
+ CASE_ENCODING_RM:
if (insn->eaBase >= insn->eaRegBase) {
insn->eaBase = (EABase)fixupRMValue(insn,
(OperandType)op->type,
@@ -1399,44 +1504,11 @@ static int fixupReg(struct InternalInstruction *insn,
}
/*
- * readOpcodeModifier - Reads an operand from the opcode field of an
- * instruction. Handles AddRegFrm instructions.
- *
- * @param insn - The instruction whose opcode field is to be read.
- * @param inModRM - Indicates that the opcode field is to be read from the
- * ModR/M extension; useful for escape opcodes
- * @return - 0 on success; nonzero otherwise.
- */
-static int readOpcodeModifier(struct InternalInstruction* insn) {
- dbgprintf(insn, "readOpcodeModifier()");
-
- if (insn->consumedOpcodeModifier)
- return 0;
-
- insn->consumedOpcodeModifier = TRUE;
-
- switch (insn->spec->modifierType) {
- default:
- debug("Unknown modifier type.");
- return -1;
- case MODIFIER_NONE:
- debug("No modifier but an operand expects one.");
- return -1;
- case MODIFIER_OPCODE:
- insn->opcodeModifier = insn->opcode - insn->spec->modifierBase;
- return 0;
- case MODIFIER_MODRM:
- insn->opcodeModifier = insn->modRM - insn->spec->modifierBase;
- return 0;
- }
-}
-
-/*
* readOpcodeRegister - Reads an operand from the opcode field of an
* instruction and interprets it appropriately given the operand width.
* Handles AddRegFrm instructions.
*
- * @param insn - See readOpcodeModifier().
+ * @param insn - the instruction whose opcode field is to be read.
* @param size - The width (in bytes) of the register being specified.
* 1 means AL and friends, 2 means AX, 4 means EAX, and 8 means
* RAX.
@@ -1445,16 +1517,13 @@ static int readOpcodeModifier(struct InternalInstruction* insn) {
static int readOpcodeRegister(struct InternalInstruction* insn, uint8_t size) {
dbgprintf(insn, "readOpcodeRegister()");
- if (readOpcodeModifier(insn))
- return -1;
-
if (size == 0)
size = insn->registerSize;
switch (size) {
case 1:
insn->opcodeRegister = (Reg)(MODRM_REG_AL + ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
+ | (insn->opcode & 7)));
if (insn->rexPrefix &&
insn->opcodeRegister >= MODRM_REG_AL + 0x4 &&
insn->opcodeRegister < MODRM_REG_AL + 0x8) {
@@ -1466,17 +1535,17 @@ static int readOpcodeRegister(struct InternalInstruction* insn, uint8_t size) {
case 2:
insn->opcodeRegister = (Reg)(MODRM_REG_AX
+ ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
+ | (insn->opcode & 7)));
break;
case 4:
insn->opcodeRegister = (Reg)(MODRM_REG_EAX
+ ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
+ | (insn->opcode & 7)));
break;
case 8:
insn->opcodeRegister = (Reg)(MODRM_REG_RAX
+ ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
+ | (insn->opcode & 7)));
break;
}
@@ -1549,18 +1618,41 @@ static int readImmediate(struct InternalInstruction* insn, uint8_t size) {
static int readVVVV(struct InternalInstruction* insn) {
dbgprintf(insn, "readVVVV()");
- if (insn->vexXopType == TYPE_VEX_3B)
- insn->vvvv = vvvvFromVEX3of3(insn->vexXopPrefix[2]);
- else if (insn->vexXopType == TYPE_VEX_2B)
- insn->vvvv = vvvvFromVEX2of2(insn->vexXopPrefix[1]);
- else if (insn->vexXopType == TYPE_XOP)
- insn->vvvv = vvvvFromXOP3of3(insn->vexXopPrefix[2]);
+ int vvvv;
+ if (insn->vectorExtensionType == TYPE_EVEX)
+ vvvv = (v2FromEVEX4of4(insn->vectorExtensionPrefix[3]) << 4 |
+ vvvvFromEVEX3of4(insn->vectorExtensionPrefix[2]));
+ else if (insn->vectorExtensionType == TYPE_VEX_3B)
+ vvvv = vvvvFromVEX3of3(insn->vectorExtensionPrefix[2]);
+ else if (insn->vectorExtensionType == TYPE_VEX_2B)
+ vvvv = vvvvFromVEX2of2(insn->vectorExtensionPrefix[1]);
+ else if (insn->vectorExtensionType == TYPE_XOP)
+ vvvv = vvvvFromXOP3of3(insn->vectorExtensionPrefix[2]);
else
return -1;
if (insn->mode != MODE_64BIT)
- insn->vvvv &= 0x7;
+ vvvv &= 0x7;
+
+ insn->vvvv = static_cast<Reg>(vvvv);
+ return 0;
+}
+
+/*
+ * readMaskRegister - Reads an mask register from the opcode field of an
+ * instruction.
+ *
+ * @param insn - The instruction whose opcode field is to be read.
+ * @return - 0 on success; nonzero otherwise.
+ */
+static int readMaskRegister(struct InternalInstruction* insn) {
+ dbgprintf(insn, "readMaskRegister()");
+
+ if (insn->vectorExtensionType != TYPE_EVEX)
+ return -1;
+ insn->writemask =
+ static_cast<Reg>(aaaFromEVEX4of4(insn->vectorExtensionPrefix[3]));
return 0;
}
@@ -1572,7 +1664,6 @@ static int readVVVV(struct InternalInstruction* insn) {
* @return - 0 if all operands could be read; nonzero otherwise.
*/
static int readOperands(struct InternalInstruction* insn) {
- int index;
int hasVVVV, needVVVV;
int sawRegImm = 0;
@@ -1583,16 +1674,21 @@ static int readOperands(struct InternalInstruction* insn) {
hasVVVV = !readVVVV(insn);
needVVVV = hasVVVV && (insn->vvvv != 0);
- for (index = 0; index < X86_MAX_OPERANDS; ++index) {
- switch (x86OperandSets[insn->spec->operands][index].encoding) {
+ for (const auto &Op : x86OperandSets[insn->spec->operands]) {
+ switch (Op.encoding) {
case ENCODING_NONE:
+ case ENCODING_SI:
+ case ENCODING_DI:
break;
case ENCODING_REG:
- case ENCODING_RM:
+ CASE_ENCODING_RM:
if (readModRM(insn))
return -1;
- if (fixupReg(insn, &x86OperandSets[insn->spec->operands][index]))
+ if (fixupReg(insn, &Op))
return -1;
+ // Apply the AVX512 compressed displacement scaling factor.
+ if (Op.encoding != ENCODING_REG && insn->eaDisplacement == EA_DISP_8)
+ insn->displacement *= 1 << (Op.encoding - ENCODING_RM);
break;
case ENCODING_CB:
case ENCODING_CW:
@@ -1613,14 +1709,14 @@ static int readOperands(struct InternalInstruction* insn) {
}
if (readImmediate(insn, 1))
return -1;
- if (x86OperandSets[insn->spec->operands][index].type == TYPE_IMM3 &&
+ if (Op.type == TYPE_IMM3 &&
insn->immediates[insn->numImmediatesConsumed - 1] > 7)
return -1;
- if (x86OperandSets[insn->spec->operands][index].type == TYPE_IMM5 &&
+ if (Op.type == TYPE_IMM5 &&
insn->immediates[insn->numImmediatesConsumed - 1] > 31)
return -1;
- if (x86OperandSets[insn->spec->operands][index].type == TYPE_XMM128 ||
- x86OperandSets[insn->spec->operands][index].type == TYPE_XMM256)
+ if (Op.type == TYPE_XMM128 ||
+ Op.type == TYPE_XMM256)
sawRegImm = 1;
break;
case ENCODING_IW:
@@ -1663,15 +1759,17 @@ static int readOperands(struct InternalInstruction* insn) {
if (readOpcodeRegister(insn, 0))
return -1;
break;
- case ENCODING_I:
- if (readOpcodeModifier(insn))
- return -1;
+ case ENCODING_FP:
break;
case ENCODING_VVVV:
needVVVV = 0; /* Mark that we have found a VVVV operand. */
if (!hasVVVV)
return -1;
- if (fixupReg(insn, &x86OperandSets[insn->spec->operands][index]))
+ if (fixupReg(insn, &Op))
+ return -1;
+ break;
+ case ENCODING_WRITEMASK:
+ if (readMaskRegister(insn))
return -1;
break;
case ENCODING_DUP:
@@ -1708,14 +1806,10 @@ static int readOperands(struct InternalInstruction* insn) {
* @return - 0 if the instruction's memory could be read; nonzero if
* not.
*/
-int decodeInstruction(struct InternalInstruction* insn,
- byteReader_t reader,
- const void* readerArg,
- dlog_t logger,
- void* loggerArg,
- const void* miiArg,
- uint64_t startLoc,
- DisassemblerMode mode) {
+int llvm::X86Disassembler::decodeInstruction(
+ struct InternalInstruction *insn, byteReader_t reader,
+ const void *readerArg, dlog_t logger, void *loggerArg, const void *miiArg,
+ uint64_t startLoc, DisassemblerMode mode) {
memset(insn, 0, sizeof(struct InternalInstruction));
insn->reader = reader;
@@ -1734,7 +1828,7 @@ int decodeInstruction(struct InternalInstruction* insn,
readOperands(insn))
return -1;
- insn->operands = &x86OperandSets[insn->spec->operands][0];
+ insn->operands = x86OperandSets[insn->spec->operands];
insn->length = insn->readerCursor - insn->startLocation;
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 6d03d5c..8c45402 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -1,39 +1,28 @@
-/*===-- X86DisassemblerDecoderInternal.h - Disassembler decoder ---*- C -*-===*
- *
- * The LLVM Compiler Infrastructure
- *
- * This file is distributed under the University of Illinois Open Source
- * License. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===*
- *
- * This file is part of the X86 Disassembler.
- * It contains the public interface of the instruction decoder.
- * Documentation for the disassembler can be found in X86Disassembler.h.
- *
- *===----------------------------------------------------------------------===*/
+//===-- X86DisassemblerDecoderInternal.h - Disassembler decoder -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler.
+// It contains the public interface of the instruction decoder.
+// Documentation for the disassembler can be found in X86Disassembler.h.
+//
+//===----------------------------------------------------------------------===//
#ifndef X86DISASSEMBLERDECODER_H
#define X86DISASSEMBLERDECODER_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define INSTRUCTION_SPECIFIER_FIELDS \
- uint16_t operands;
-
-#define INSTRUCTION_IDS \
- uint16_t instructionIDs;
-
#include "X86DisassemblerDecoderCommon.h"
+#include "llvm/ADT/ArrayRef.h"
-#undef INSTRUCTION_SPECIFIER_FIELDS
-#undef INSTRUCTION_IDS
+namespace llvm {
+namespace X86Disassembler {
-/*
- * Accessor functions for various fields of an Intel instruction
- */
+// Accessor functions for various fields of an Intel instruction
#define modFromModRM(modRM) (((modRM) & 0xc0) >> 6)
#define regFromModRM(modRM) (((modRM) & 0x38) >> 3)
#define rmFromModRM(modRM) ((modRM) & 0x7)
@@ -45,6 +34,21 @@ extern "C" {
#define xFromREX(rex) (((rex) & 0x2) >> 1)
#define bFromREX(rex) ((rex) & 0x1)
+#define rFromEVEX2of4(evex) (((~(evex)) & 0x80) >> 7)
+#define xFromEVEX2of4(evex) (((~(evex)) & 0x40) >> 6)
+#define bFromEVEX2of4(evex) (((~(evex)) & 0x20) >> 5)
+#define r2FromEVEX2of4(evex) (((~(evex)) & 0x10) >> 4)
+#define mmFromEVEX2of4(evex) ((evex) & 0x3)
+#define wFromEVEX3of4(evex) (((evex) & 0x80) >> 7)
+#define vvvvFromEVEX3of4(evex) (((~(evex)) & 0x78) >> 3)
+#define ppFromEVEX3of4(evex) ((evex) & 0x3)
+#define zFromEVEX4of4(evex) (((evex) & 0x80) >> 7)
+#define l2FromEVEX4of4(evex) (((evex) & 0x40) >> 6)
+#define lFromEVEX4of4(evex) (((evex) & 0x20) >> 5)
+#define bFromEVEX4of4(evex) (((evex) & 0x10) >> 4)
+#define v2FromEVEX4of4(evex) (((~evex) & 0x8) >> 3)
+#define aaaFromEVEX4of4(evex) ((evex) & 0x7)
+
#define rFromVEX2of3(vex) (((~(vex)) & 0x80) >> 7)
#define xFromVEX2of3(vex) (((~(vex)) & 0x40) >> 6)
#define bFromVEX2of3(vex) (((~(vex)) & 0x20) >> 5)
@@ -68,10 +72,7 @@ extern "C" {
#define lFromXOP3of3(xop) (((xop) & 0x4) >> 2)
#define ppFromXOP3of3(xop) ((xop) & 0x3)
-/*
- * These enums represent Intel registers for use by the decoder.
- */
-
+// These enums represent Intel registers for use by the decoder.
#define REGS_8BIT \
ENTRY(AL) \
ENTRY(CL) \
@@ -314,6 +315,16 @@ extern "C" {
ENTRY(ZMM30) \
ENTRY(ZMM31)
+#define REGS_MASKS \
+ ENTRY(K0) \
+ ENTRY(K1) \
+ ENTRY(K2) \
+ ENTRY(K3) \
+ ENTRY(K4) \
+ ENTRY(K5) \
+ ENTRY(K6) \
+ ENTRY(K7)
+
#define REGS_SEGMENT \
ENTRY(ES) \
ENTRY(CS) \
@@ -361,18 +372,17 @@ extern "C" {
REGS_XMM \
REGS_YMM \
REGS_ZMM \
+ REGS_MASKS \
REGS_SEGMENT \
REGS_DEBUG \
REGS_CONTROL \
ENTRY(RIP)
-/*
- * EABase - All possible values of the base field for effective-address
- * computations, a.k.a. the Mod and R/M fields of the ModR/M byte. We
- * distinguish between bases (EA_BASE_*) and registers that just happen to be
- * referred to when Mod == 0b11 (EA_REG_*).
- */
-typedef enum {
+/// \brief All possible values of the base field for effective-address
+/// computations, a.k.a. the Mod and R/M fields of the ModR/M byte.
+/// We distinguish between bases (EA_BASE_*) and registers that just happen
+/// to be referred to when Mod == 0b11 (EA_REG_*).
+enum EABase {
EA_BASE_NONE,
#define ENTRY(x) EA_BASE_##x,
ALL_EA_BASES
@@ -381,15 +391,13 @@ typedef enum {
ALL_REGS
#undef ENTRY
EA_max
-} EABase;
-
-/*
- * SIBIndex - All possible values of the SIB index field.
- * Borrows entries from ALL_EA_BASES with the special case that
- * sib is synonymous with NONE.
- * Vector SIB: index can be XMM or YMM.
- */
-typedef enum {
+};
+
+/// \brief All possible values of the SIB index field.
+/// borrows entries from ALL_EA_BASES with the special case that
+/// sib is synonymous with NONE.
+/// Vector SIB: index can be XMM or YMM.
+enum SIBIndex {
SIB_INDEX_NONE,
#define ENTRY(x) SIB_INDEX_##x,
ALL_EA_BASES
@@ -398,23 +406,18 @@ typedef enum {
REGS_ZMM
#undef ENTRY
SIB_INDEX_max
-} SIBIndex;
+};
-/*
- * SIBBase - All possible values of the SIB base field.
- */
-typedef enum {
+/// \brief All possible values of the SIB base field.
+enum SIBBase {
SIB_BASE_NONE,
#define ENTRY(x) SIB_BASE_##x,
ALL_SIB_BASES
#undef ENTRY
SIB_BASE_max
-} SIBBase;
+};
-/*
- * EADisplacement - Possible displacement types for effective-address
- * computations.
- */
+/// \brief Possible displacement types for effective-address computations.
typedef enum {
EA_DISP_NONE,
EA_DISP_8,
@@ -422,20 +425,16 @@ typedef enum {
EA_DISP_32
} EADisplacement;
-/*
- * Reg - All possible values of the reg field in the ModR/M byte.
- */
-typedef enum {
+/// \brief All possible values of the reg field in the ModR/M byte.
+enum Reg {
#define ENTRY(x) MODRM_REG_##x,
ALL_REGS
#undef ENTRY
MODRM_REG_max
-} Reg;
+};
-/*
- * SegmentOverride - All possible segment overrides.
- */
-typedef enum {
+/// \brief All possible segment overrides.
+enum SegmentOverride {
SEG_OVERRIDE_NONE,
SEG_OVERRIDE_CS,
SEG_OVERRIDE_SS,
@@ -444,233 +443,220 @@ typedef enum {
SEG_OVERRIDE_FS,
SEG_OVERRIDE_GS,
SEG_OVERRIDE_max
-} SegmentOverride;
-
-/*
- * VEXLeadingOpcodeByte - Possible values for the VEX.m-mmmm field
- */
+};
-typedef enum {
+/// \brief Possible values for the VEX.m-mmmm field
+enum VEXLeadingOpcodeByte {
VEX_LOB_0F = 0x1,
VEX_LOB_0F38 = 0x2,
VEX_LOB_0F3A = 0x3
-} VEXLeadingOpcodeByte;
+};
-typedef enum {
+enum XOPMapSelect {
XOP_MAP_SELECT_8 = 0x8,
XOP_MAP_SELECT_9 = 0x9,
XOP_MAP_SELECT_A = 0xA
-} XOPMapSelect;
-
-/*
- * VEXPrefixCode - Possible values for the VEX.pp field
- */
+};
-typedef enum {
+/// \brief Possible values for the VEX.pp/EVEX.pp field
+enum VEXPrefixCode {
VEX_PREFIX_NONE = 0x0,
VEX_PREFIX_66 = 0x1,
VEX_PREFIX_F3 = 0x2,
VEX_PREFIX_F2 = 0x3
-} VEXPrefixCode;
+};
-typedef enum {
- TYPE_NO_VEX_XOP = 0x0,
- TYPE_VEX_2B = 0x1,
- TYPE_VEX_3B = 0x2,
- TYPE_XOP = 0x3
-} VEXXOPType;
-
-typedef uint8_t BOOL;
-
-/*
- * byteReader_t - Type for the byte reader that the consumer must provide to
- * the decoder. Reads a single byte from the instruction's address space.
- * @param arg - A baton that the consumer can associate with any internal
- * state that it needs.
- * @param byte - A pointer to a single byte in memory that should be set to
- * contain the value at address.
- * @param address - The address in the instruction's address space that should
- * be read from.
- * @return - -1 if the byte cannot be read for any reason; 0 otherwise.
- */
-typedef int (*byteReader_t)(const void* arg, uint8_t* byte, uint64_t address);
-
-/*
- * dlog_t - Type for the logging function that the consumer can provide to
- * get debugging output from the decoder.
- * @param arg - A baton that the consumer can associate with any internal
- * state that it needs.
- * @param log - A string that contains the message. Will be reused after
- * the logger returns.
- */
-typedef void (*dlog_t)(void* arg, const char *log);
-
-/*
- * The x86 internal instruction, which is produced by the decoder.
- */
+enum VectorExtensionType {
+ TYPE_NO_VEX_XOP = 0x0,
+ TYPE_VEX_2B = 0x1,
+ TYPE_VEX_3B = 0x2,
+ TYPE_EVEX = 0x3,
+ TYPE_XOP = 0x4
+};
+
+/// \brief Type for the byte reader that the consumer must provide to
+/// the decoder. Reads a single byte from the instruction's address space.
+/// \param arg A baton that the consumer can associate with any internal
+/// state that it needs.
+/// \param byte A pointer to a single byte in memory that should be set to
+/// contain the value at address.
+/// \param address The address in the instruction's address space that should
+/// be read from.
+/// \return -1 if the byte cannot be read for any reason; 0 otherwise.
+typedef int (*byteReader_t)(const void *arg, uint8_t *byte, uint64_t address);
+
+/// \brief Type for the logging function that the consumer can provide to
+/// get debugging output from the decoder.
+/// \param arg A baton that the consumer can associate with any internal
+/// state that it needs.
+/// \param log A string that contains the message. Will be reused after
+/// the logger returns.
+typedef void (*dlog_t)(void *arg, const char *log);
+
+/// The specification for how to extract and interpret a full instruction and
+/// its operands.
+struct InstructionSpecifier {
+ uint16_t operands;
+};
+
+/// The x86 internal instruction, which is produced by the decoder.
struct InternalInstruction {
- /* Reader interface (C) */
+ // Reader interface (C)
byteReader_t reader;
- /* Opaque value passed to the reader */
+ // Opaque value passed to the reader
const void* readerArg;
- /* The address of the next byte to read via the reader */
+ // The address of the next byte to read via the reader
uint64_t readerCursor;
- /* Logger interface (C) */
+ // Logger interface (C)
dlog_t dlog;
- /* Opaque value passed to the logger */
+ // Opaque value passed to the logger
void* dlogArg;
- /* General instruction information */
+ // General instruction information
- /* The mode to disassemble for (64-bit, protected, real) */
+ // The mode to disassemble for (64-bit, protected, real)
DisassemblerMode mode;
- /* The start of the instruction, usable with the reader */
+ // The start of the instruction, usable with the reader
uint64_t startLocation;
- /* The length of the instruction, in bytes */
+ // The length of the instruction, in bytes
size_t length;
- /* Prefix state */
+ // Prefix state
- /* 1 if the prefix byte corresponding to the entry is present; 0 if not */
+ // 1 if the prefix byte corresponding to the entry is present; 0 if not
uint8_t prefixPresent[0x100];
- /* contains the location (for use with the reader) of the prefix byte */
+ // contains the location (for use with the reader) of the prefix byte
uint64_t prefixLocations[0x100];
- /* The value of the VEX/XOP prefix, if present */
- uint8_t vexXopPrefix[3];
- /* The length of the VEX prefix (0 if not present) */
- VEXXOPType vexXopType;
- /* The value of the REX prefix, if present */
+ // The value of the vector extension prefix(EVEX/VEX/XOP), if present
+ uint8_t vectorExtensionPrefix[4];
+ // The type of the vector extension prefix
+ VectorExtensionType vectorExtensionType;
+ // The value of the REX prefix, if present
uint8_t rexPrefix;
- /* The location where a mandatory prefix would have to be (i.e., right before
- the opcode, or right before the REX prefix if one is present) */
+ // The location where a mandatory prefix would have to be (i.e., right before
+ // the opcode, or right before the REX prefix if one is present).
uint64_t necessaryPrefixLocation;
- /* The segment override type */
+ // The segment override type
SegmentOverride segmentOverride;
- /* 1 if the prefix byte, 0xf2 or 0xf3 is xacquire or xrelease */
- BOOL xAcquireRelease;
+ // 1 if the prefix byte, 0xf2 or 0xf3 is xacquire or xrelease
+ bool xAcquireRelease;
- /* Sizes of various critical pieces of data, in bytes */
+ // Sizes of various critical pieces of data, in bytes
uint8_t registerSize;
uint8_t addressSize;
uint8_t displacementSize;
uint8_t immediateSize;
- /* Offsets from the start of the instruction to the pieces of data, which is
- needed to find relocation entries for adding symbolic operands */
+ // Offsets from the start of the instruction to the pieces of data, which is
+ // needed to find relocation entries for adding symbolic operands.
uint8_t displacementOffset;
uint8_t immediateOffset;
- /* opcode state */
+ // opcode state
- /* The last byte of the opcode, not counting any ModR/M extension */
+ // The last byte of the opcode, not counting any ModR/M extension
uint8_t opcode;
- /* The ModR/M byte of the instruction, if it is an opcode extension */
+ // The ModR/M byte of the instruction, if it is an opcode extension
uint8_t modRMExtension;
- /* decode state */
+ // decode state
- /* The type of opcode, used for indexing into the array of decode tables */
+ // The type of opcode, used for indexing into the array of decode tables
OpcodeType opcodeType;
- /* The instruction ID, extracted from the decode table */
+ // The instruction ID, extracted from the decode table
uint16_t instructionID;
- /* The specifier for the instruction, from the instruction info table */
- const struct InstructionSpecifier *spec;
+ // The specifier for the instruction, from the instruction info table
+ const InstructionSpecifier *spec;
- /* state for additional bytes, consumed during operand decode. Pattern:
- consumed___ indicates that the byte was already consumed and does not
- need to be consumed again */
+ // state for additional bytes, consumed during operand decode. Pattern:
+ // consumed___ indicates that the byte was already consumed and does not
+ // need to be consumed again.
- /* The VEX.vvvv field, which contains a third register operand for some AVX
- instructions */
+ // The VEX.vvvv field, which contains a third register operand for some AVX
+ // instructions.
Reg vvvv;
- /* The ModR/M byte, which contains most register operands and some portion of
- all memory operands */
- BOOL consumedModRM;
+ // The writemask for AVX-512 instructions which is contained in EVEX.aaa
+ Reg writemask;
+
+ // The ModR/M byte, which contains most register operands and some portion of
+ // all memory operands.
+ bool consumedModRM;
uint8_t modRM;
- /* The SIB byte, used for more complex 32- or 64-bit memory operands */
- BOOL consumedSIB;
+ // The SIB byte, used for more complex 32- or 64-bit memory operands
+ bool consumedSIB;
uint8_t sib;
- /* The displacement, used for memory operands */
- BOOL consumedDisplacement;
+ // The displacement, used for memory operands
+ bool consumedDisplacement;
int32_t displacement;
- /* Immediates. There can be two in some cases */
+ // Immediates. There can be two in some cases
uint8_t numImmediatesConsumed;
uint8_t numImmediatesTranslated;
uint64_t immediates[2];
- /* A register or immediate operand encoded into the opcode */
- BOOL consumedOpcodeModifier;
- uint8_t opcodeModifier;
+ // A register or immediate operand encoded into the opcode
Reg opcodeRegister;
- /* Portions of the ModR/M byte */
+ // Portions of the ModR/M byte
- /* These fields determine the allowable values for the ModR/M fields, which
- depend on operand and address widths */
+ // These fields determine the allowable values for the ModR/M fields, which
+ // depend on operand and address widths.
EABase eaBaseBase;
EABase eaRegBase;
Reg regBase;
- /* The Mod and R/M fields can encode a base for an effective address, or a
- register. These are separated into two fields here */
+ // The Mod and R/M fields can encode a base for an effective address, or a
+ // register. These are separated into two fields here.
EABase eaBase;
EADisplacement eaDisplacement;
- /* The reg field always encodes a register */
+ // The reg field always encodes a register
Reg reg;
- /* SIB state */
+ // SIB state
SIBIndex sibIndex;
uint8_t sibScale;
SIBBase sibBase;
- const struct OperandSpecifier *operands;
+ ArrayRef<OperandSpecifier> operands;
};
-/* decodeInstruction - Decode one instruction and store the decoding results in
- * a buffer provided by the consumer.
- * @param insn - The buffer to store the instruction in. Allocated by the
- * consumer.
- * @param reader - The byteReader_t for the bytes to be read.
- * @param readerArg - An argument to pass to the reader for storing context
- * specific to the consumer. May be NULL.
- * @param logger - The dlog_t to be used in printing status messages from the
- * disassembler. May be NULL.
- * @param loggerArg - An argument to pass to the logger for storing context
- * specific to the logger. May be NULL.
- * @param startLoc - The address (in the reader's address space) of the first
- * byte in the instruction.
- * @param mode - The mode (16-bit, 32-bit, 64-bit) to decode in.
- * @return - Nonzero if there was an error during decode, 0 otherwise.
- */
-int decodeInstruction(struct InternalInstruction* insn,
+/// \brief Decode one instruction and store the decoding results in
+/// a buffer provided by the consumer.
+/// \param insn The buffer to store the instruction in. Allocated by the
+/// consumer.
+/// \param reader The byteReader_t for the bytes to be read.
+/// \param readerArg An argument to pass to the reader for storing context
+/// specific to the consumer. May be NULL.
+/// \param logger The dlog_t to be used in printing status messages from the
+/// disassembler. May be NULL.
+/// \param loggerArg An argument to pass to the logger for storing context
+/// specific to the logger. May be NULL.
+/// \param startLoc The address (in the reader's address space) of the first
+/// byte in the instruction.
+/// \param mode The mode (16-bit, 32-bit, 64-bit) to decode in.
+/// \return Nonzero if there was an error during decode, 0 otherwise.
+int decodeInstruction(InternalInstruction *insn,
byteReader_t reader,
- const void* readerArg,
+ const void *readerArg,
dlog_t logger,
- void* loggerArg,
- const void* miiArg,
+ void *loggerArg,
+ const void *miiArg,
uint64_t startLoc,
DisassemblerMode mode);
-/* x86DisassemblerDebug - C-accessible function for printing a message to
- * debugs()
- * @param file - The name of the file printing the debug message.
- * @param line - The line number that printed the debug message.
- * @param s - The message to print.
- */
+/// \brief Print a message to debugs()
+/// \param file The name of the file printing the debug message.
+/// \param line The line number that printed the debug message.
+/// \param s The message to print.
+void Debug(const char *file, unsigned line, const char *s);
-void x86DisassemblerDebug(const char *file,
- unsigned line,
- const char *s);
+const char *GetInstrName(unsigned Opcode, const void *mii);
-const char *x86DisassemblerGetInstrName(unsigned Opcode, const void *mii);
-
-#ifdef __cplusplus
-}
-#endif
+} // namespace X86Disassembler
+} // namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index dd1719c..13a7b55 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -1,37 +1,33 @@
-/*===-- X86DisassemblerDecoderCommon.h - Disassembler decoder -----*- C -*-===*
- *
- * The LLVM Compiler Infrastructure
- *
- * This file is distributed under the University of Illinois Open Source
- * License. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===*
- *
- * This file is part of the X86 Disassembler.
- * It contains common definitions used by both the disassembler and the table
- * generator.
- * Documentation for the disassembler can be found in X86Disassembler.h.
- *
- *===----------------------------------------------------------------------===*/
-
-/*
- * This header file provides those definitions that need to be shared between
- * the decoder and the table generator in a C-friendly manner.
- */
+//===-- X86DisassemblerDecoderCommon.h - Disassembler decoder ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler.
+// It contains common definitions used by both the disassembler and the table
+// generator.
+// Documentation for the disassembler can be found in X86Disassembler.h.
+//
+//===----------------------------------------------------------------------===//
#ifndef X86DISASSEMBLERDECODERCOMMON_H
#define X86DISASSEMBLERDECODERCOMMON_H
#include "llvm/Support/DataTypes.h"
+namespace llvm {
+namespace X86Disassembler {
+
#define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers
#define CONTEXTS_SYM x86DisassemblerContexts
#define ONEBYTE_SYM x86DisassemblerOneByteOpcodes
#define TWOBYTE_SYM x86DisassemblerTwoByteOpcodes
#define THREEBYTE38_SYM x86DisassemblerThreeByte38Opcodes
#define THREEBYTE3A_SYM x86DisassemblerThreeByte3AOpcodes
-#define THREEBYTEA6_SYM x86DisassemblerThreeByteA6Opcodes
-#define THREEBYTEA7_SYM x86DisassemblerThreeByteA7Opcodes
#define XOP8_MAP_SYM x86DisassemblerXOP8Opcodes
#define XOP9_MAP_SYM x86DisassemblerXOP9Opcodes
#define XOPA_MAP_SYM x86DisassemblerXOPAOpcodes
@@ -42,27 +38,29 @@
#define TWOBYTE_STR "x86DisassemblerTwoByteOpcodes"
#define THREEBYTE38_STR "x86DisassemblerThreeByte38Opcodes"
#define THREEBYTE3A_STR "x86DisassemblerThreeByte3AOpcodes"
-#define THREEBYTEA6_STR "x86DisassemblerThreeByteA6Opcodes"
-#define THREEBYTEA7_STR "x86DisassemblerThreeByteA7Opcodes"
#define XOP8_MAP_STR "x86DisassemblerXOP8Opcodes"
#define XOP9_MAP_STR "x86DisassemblerXOP9Opcodes"
#define XOPA_MAP_STR "x86DisassemblerXOPAOpcodes"
-/*
- * Attributes of an instruction that must be known before the opcode can be
- * processed correctly. Most of these indicate the presence of particular
- * prefixes, but ATTR_64BIT is simply an attribute of the decoding context.
- */
-#define ATTRIBUTE_BITS \
- ENUM_ENTRY(ATTR_NONE, 0x00) \
- ENUM_ENTRY(ATTR_64BIT, 0x01) \
- ENUM_ENTRY(ATTR_XS, 0x02) \
- ENUM_ENTRY(ATTR_XD, 0x04) \
- ENUM_ENTRY(ATTR_REXW, 0x08) \
- ENUM_ENTRY(ATTR_OPSIZE, 0x10) \
- ENUM_ENTRY(ATTR_ADSIZE, 0x20) \
- ENUM_ENTRY(ATTR_VEX, 0x40) \
- ENUM_ENTRY(ATTR_VEXL, 0x80)
+// Attributes of an instruction that must be known before the opcode can be
+// processed correctly. Most of these indicate the presence of particular
+// prefixes, but ATTR_64BIT is simply an attribute of the decoding context.
+#define ATTRIBUTE_BITS \
+ ENUM_ENTRY(ATTR_NONE, 0x00) \
+ ENUM_ENTRY(ATTR_64BIT, (0x1 << 0)) \
+ ENUM_ENTRY(ATTR_XS, (0x1 << 1)) \
+ ENUM_ENTRY(ATTR_XD, (0x1 << 2)) \
+ ENUM_ENTRY(ATTR_REXW, (0x1 << 3)) \
+ ENUM_ENTRY(ATTR_OPSIZE, (0x1 << 4)) \
+ ENUM_ENTRY(ATTR_ADSIZE, (0x1 << 5)) \
+ ENUM_ENTRY(ATTR_VEX, (0x1 << 6)) \
+ ENUM_ENTRY(ATTR_VEXL, (0x1 << 7)) \
+ ENUM_ENTRY(ATTR_EVEX, (0x1 << 8)) \
+ ENUM_ENTRY(ATTR_EVEXL, (0x1 << 9)) \
+ ENUM_ENTRY(ATTR_EVEXL2, (0x1 << 10)) \
+ ENUM_ENTRY(ATTR_EVEXK, (0x1 << 11)) \
+ ENUM_ENTRY(ATTR_EVEXKZ, (0x1 << 12)) \
+ ENUM_ENTRY(ATTR_EVEXB, (0x1 << 13))
#define ENUM_ENTRY(n, v) n = v,
enum attributeBits {
@@ -71,13 +69,11 @@ enum attributeBits {
};
#undef ENUM_ENTRY
-/*
- * Combinations of the above attributes that are relevant to instruction
- * decode. Although other combinations are possible, they can be reduced to
- * these without affecting the ultimately decoded instruction.
- */
+// Combinations of the above attributes that are relevant to instruction
+// decode. Although other combinations are possible, they can be reduced to
+// these without affecting the ultimately decoded instruction.
-/* Class name Rank Rationale for rank assignment */
+// Class name Rank Rationale for rank assignment
#define INSTRUCTION_CONTEXTS \
ENUM_ENTRY(IC, 0, "says nothing about the instruction") \
ENUM_ENTRY(IC_64BIT, 1, "says the instruction applies in " \
@@ -198,38 +194,38 @@ enum attributeBits {
ENUM_ENTRY(IC_EVEX_L2_W_XS_B, 4, "requires EVEX_B, L2, W and XS prefix") \
ENUM_ENTRY(IC_EVEX_L2_W_XD_B, 4, "requires EVEX_B, L2, W and XD prefix") \
ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4, "requires EVEX_B, L2, W and OpSize") \
- ENUM_ENTRY(IC_EVEX_K_B, 1, "requires EVEX_B and EVEX_K prefix") \
- ENUM_ENTRY(IC_EVEX_XS_K_B, 2, "requires EVEX_B, EVEX_K and the XS prefix") \
- ENUM_ENTRY(IC_EVEX_XD_K_B, 2, "requires EVEX_B, EVEX_K and the XD prefix") \
- ENUM_ENTRY(IC_EVEX_OPSIZE_K_B, 2, "requires EVEX_B, EVEX_K and the OpSize prefix") \
- ENUM_ENTRY(IC_EVEX_W_K_B, 3, "requires EVEX_B, EVEX_K and the W prefix") \
- ENUM_ENTRY(IC_EVEX_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, W, and XS prefix") \
- ENUM_ENTRY(IC_EVEX_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, W, and XD prefix") \
- ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, W, and OpSize") \
- ENUM_ENTRY(IC_EVEX_L_K_B, 3, "requires EVEX_B, EVEX_K and the L prefix") \
- ENUM_ENTRY(IC_EVEX_L_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L and XS prefix")\
- ENUM_ENTRY(IC_EVEX_L_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L and XD prefix")\
- ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, and OpSize") \
- ENUM_ENTRY(IC_EVEX_L_W_K_B, 3, "requires EVEX_B, EVEX_K, L and W") \
- ENUM_ENTRY(IC_EVEX_L_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XS prefix") \
- ENUM_ENTRY(IC_EVEX_L_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XD prefix") \
- ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, W and OpSize") \
- ENUM_ENTRY(IC_EVEX_L2_K_B, 3, "requires EVEX_B, EVEX_K and the L2 prefix") \
- ENUM_ENTRY(IC_EVEX_L2_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
- ENUM_ENTRY(IC_EVEX_L2_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
- ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, and OpSize") \
- ENUM_ENTRY(IC_EVEX_L2_W_K_B, 3, "requires EVEX_B, EVEX_K, L2 and W") \
- ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XS prefix") \
- ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XD prefix") \
- ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and OpSize") \
- ENUM_ENTRY(IC_EVEX_KZ_B, 1, "requires EVEX_B and EVEX_KZ prefix") \
- ENUM_ENTRY(IC_EVEX_XS_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XS prefix") \
- ENUM_ENTRY(IC_EVEX_XD_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XD prefix") \
- ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the OpSize prefix") \
- ENUM_ENTRY(IC_EVEX_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the W prefix") \
- ENUM_ENTRY(IC_EVEX_W_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and XS prefix") \
- ENUM_ENTRY(IC_EVEX_W_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and XD prefix") \
- ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_K_B, 1, "requires EVEX_B and EVEX_K prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_K_B, 2, "requires EVEX_B, EVEX_K and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_K_B, 2, "requires EVEX_B, EVEX_K and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_K_B, 2, "requires EVEX_B, EVEX_K and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_K_B, 3, "requires EVEX_B, EVEX_K and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, W, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_K_B, 3, "requires EVEX_B, EVEX_K and the L prefix") \
+ ENUM_ENTRY(IC_EVEX_L_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L_W_K_B, 3, "requires EVEX_B, EVEX_K, L and W") \
+ ENUM_ENTRY(IC_EVEX_L_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B,4, "requires EVEX_B, EVEX_K, L, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_K_B, 3, "requires EVEX_B, EVEX_K and the L2 prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_XS_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_XD_K_B, 4, "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
+ ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4, "requires EVEX_B, EVEX_K, L2, and OpSize") \
+ ENUM_ENTRY(IC_EVEX_L2_W_K_B, 3, "requires EVEX_B, EVEX_K, L2 and W") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, L2, W and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B,4, "requires EVEX_B, EVEX_K, L2, W and OpSize") \
+ ENUM_ENTRY(IC_EVEX_KZ_B, 1, "requires EVEX_B and EVEX_KZ prefix") \
+ ENUM_ENTRY(IC_EVEX_XS_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XS prefix") \
+ ENUM_ENTRY(IC_EVEX_XD_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XD prefix") \
+ ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the OpSize prefix") \
+ ENUM_ENTRY(IC_EVEX_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the W prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and XS prefix") \
+ ENUM_ENTRY(IC_EVEX_W_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and XD prefix") \
+ ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B, 4, "requires EVEX_B, EVEX_KZ, W, and OpSize") \
ENUM_ENTRY(IC_EVEX_L_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the L prefix") \
ENUM_ENTRY(IC_EVEX_L_XS_KZ_B, 4, "requires EVEX_B, EVEX_KZ and the L and XS prefix")\
ENUM_ENTRY(IC_EVEX_L_XD_KZ_B, 4, "requires EVEX_B, EVEX_KZ and the L and XD prefix")\
@@ -269,62 +265,52 @@ enum attributeBits {
ENUM_ENTRY(IC_EVEX_L2_W_KZ, 3, "requires EVEX_KZ, L2 and W") \
ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ, 4, "requires EVEX_KZ, L2, W and XS prefix") \
ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ, 4, "requires EVEX_KZ, L2, W and XD prefix") \
- ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4, "requires EVEX_KZ, L2, W and OpSize")
+ ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4, "requires EVEX_KZ, L2, W and OpSize")
#define ENUM_ENTRY(n, r, d) n,
-typedef enum {
+enum InstructionContext {
INSTRUCTION_CONTEXTS
IC_max
-} InstructionContext;
+};
#undef ENUM_ENTRY
-/*
- * Opcode types, which determine which decode table to use, both in the Intel
- * manual and also for the decoder.
- */
-typedef enum {
+// Opcode types, which determine which decode table to use, both in the Intel
+// manual and also for the decoder.
+enum OpcodeType {
ONEBYTE = 0,
TWOBYTE = 1,
THREEBYTE_38 = 2,
THREEBYTE_3A = 3,
- THREEBYTE_A6 = 4,
- THREEBYTE_A7 = 5,
- XOP8_MAP = 6,
- XOP9_MAP = 7,
- XOPA_MAP = 8
-} OpcodeType;
-
-/*
- * The following structs are used for the hierarchical decode table. After
- * determining the instruction's class (i.e., which IC_* constant applies to
- * it), the decoder reads the opcode. Some instructions require specific
- * values of the ModR/M byte, so the ModR/M byte indexes into the final table.
- *
- * If a ModR/M byte is not required, "required" is left unset, and the values
- * for each instructionID are identical.
- */
+ XOP8_MAP = 4,
+ XOP9_MAP = 5,
+ XOPA_MAP = 6
+};
+// The following structs are used for the hierarchical decode table. After
+// determining the instruction's class (i.e., which IC_* constant applies to
+// it), the decoder reads the opcode. Some instructions require specific
+// values of the ModR/M byte, so the ModR/M byte indexes into the final table.
+//
+// If a ModR/M byte is not required, "required" is left unset, and the values
+// for each instructionID are identical.
typedef uint16_t InstrUID;
-/*
- * ModRMDecisionType - describes the type of ModR/M decision, allowing the
- * consumer to determine the number of entries in it.
- *
- * MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded
- * instruction is the same.
- * MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode
- * corresponds to one instruction; otherwise, it corresponds to
- * a different instruction.
- * MODRM_SPLITMISC- If the ModR/M byte is between 0x00 and 0xbf, ModR/M byte
- * divided by 8 is used to select instruction; otherwise, each
- * value of the ModR/M byte could correspond to a different
- * instruction.
- * MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This
- corresponds to instructions that use reg field as opcode
- * MODRM_FULL - Potentially, each value of the ModR/M byte could correspond
- * to a different instruction.
- */
-
+// ModRMDecisionType - describes the type of ModR/M decision, allowing the
+// consumer to determine the number of entries in it.
+//
+// MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded
+// instruction is the same.
+// MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode
+// corresponds to one instruction; otherwise, it corresponds to
+// a different instruction.
+// MODRM_SPLITMISC- If the ModR/M byte is between 0x00 and 0xbf, ModR/M byte
+// divided by 8 is used to select instruction; otherwise, each
+// value of the ModR/M byte could correspond to a different
+// instruction.
+// MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This
+// corresponds to instructions that use reg field as opcode
+// MODRM_FULL - Potentially, each value of the ModR/M byte could correspond
+// to a different instruction.
#define MODRMTYPES \
ENUM_ENTRY(MODRM_ONEENTRY) \
ENUM_ENTRY(MODRM_SPLITRM) \
@@ -333,51 +319,32 @@ typedef uint16_t InstrUID;
ENUM_ENTRY(MODRM_FULL)
#define ENUM_ENTRY(n) n,
-typedef enum {
+enum ModRMDecisionType {
MODRMTYPES
MODRM_max
-} ModRMDecisionType;
-#undef ENUM_ENTRY
-
-/*
- * ModRMDecision - Specifies whether a ModR/M byte is needed and (if so) which
- * instruction each possible value of the ModR/M byte corresponds to. Once
- * this information is known, we have narrowed down to a single instruction.
- */
-struct ModRMDecision {
- uint8_t modrm_type;
-
- /* The macro below must be defined wherever this file is included. */
- INSTRUCTION_IDS
-};
-
-/*
- * OpcodeDecision - Specifies which set of ModR/M->instruction tables to look at
- * given a particular opcode.
- */
-struct OpcodeDecision {
- struct ModRMDecision modRMDecisions[256];
-};
-
-/*
- * ContextDecision - Specifies which opcode->instruction tables to look at given
- * a particular context (set of attributes). Since there are many possible
- * contexts, the decoder first uses CONTEXTS_SYM to determine which context
- * applies given a specific set of attributes. Hence there are only IC_max
- * entries in this table, rather than 2^(ATTR_max).
- */
-struct ContextDecision {
- struct OpcodeDecision opcodeDecisions[IC_max];
};
+#undef ENUM_ENTRY
-/*
- * Physical encodings of instruction operands.
- */
+#define CASE_ENCODING_RM \
+ case ENCODING_RM: \
+ case ENCODING_RM_CD2: \
+ case ENCODING_RM_CD4: \
+ case ENCODING_RM_CD8: \
+ case ENCODING_RM_CD16: \
+ case ENCODING_RM_CD32: \
+ case ENCODING_RM_CD64
+// Physical encodings of instruction operands.
#define ENCODINGS \
ENUM_ENTRY(ENCODING_NONE, "") \
ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \
ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \
+ ENUM_ENTRY(ENCODING_RM_CD2, "R/M operand with CDisp scaling of 2") \
+ ENUM_ENTRY(ENCODING_RM_CD4, "R/M operand with CDisp scaling of 4") \
+ ENUM_ENTRY(ENCODING_RM_CD8, "R/M operand with CDisp scaling of 8") \
+ ENUM_ENTRY(ENCODING_RM_CD16,"R/M operand with CDisp scaling of 16") \
+ ENUM_ENTRY(ENCODING_RM_CD32,"R/M operand with CDisp scaling of 32") \
+ ENUM_ENTRY(ENCODING_RM_CD64,"R/M operand with CDisp scaling of 64") \
ENUM_ENTRY(ENCODING_VVVV, "Register operand in VEX.vvvv byte.") \
ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.") \
ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \
@@ -395,27 +362,26 @@ struct ContextDecision {
ENUM_ENTRY(ENCODING_RW, "(AX..DI, R8W..R15W)") \
ENUM_ENTRY(ENCODING_RD, "(EAX..EDI, R8D..R15D)") \
ENUM_ENTRY(ENCODING_RO, "(RAX..RDI, R8..R15)") \
- ENUM_ENTRY(ENCODING_I, "Position on floating-point stack added to the " \
- "opcode byte") \
+ ENUM_ENTRY(ENCODING_FP, "Position on floating-point stack in ModR/M " \
+ "byte.") \
\
ENUM_ENTRY(ENCODING_Iv, "Immediate of operand size") \
ENUM_ENTRY(ENCODING_Ia, "Immediate of address size") \
ENUM_ENTRY(ENCODING_Rv, "Register code of operand size added to the " \
"opcode byte") \
ENUM_ENTRY(ENCODING_DUP, "Duplicate of another operand; ID is encoded " \
- "in type")
+ "in type") \
+ ENUM_ENTRY(ENCODING_SI, "Source index; encoded in OpSize/Adsize prefix") \
+ ENUM_ENTRY(ENCODING_DI, "Destination index; encoded in prefixes")
#define ENUM_ENTRY(n, d) n,
- typedef enum {
- ENCODINGS
- ENCODING_max
- } OperandEncoding;
+enum OperandEncoding {
+ ENCODINGS
+ ENCODING_max
+};
#undef ENUM_ENTRY
-/*
- * Semantic interpretations of instruction operands.
- */
-
+// Semantic interpretations of instruction operands.
#define TYPES \
ENUM_ENTRY(TYPE_NONE, "") \
ENUM_ENTRY(TYPE_REL8, "1-byte immediate address") \
@@ -454,6 +420,14 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_M16_16, "2+2-byte (BOUND)") \
ENUM_ENTRY(TYPE_M32_32, "4+4-byte (BOUND)") \
ENUM_ENTRY(TYPE_M16_64, "2+8-byte (LIDT, LGDT)") \
+ ENUM_ENTRY(TYPE_SRCIDX8, "1-byte memory at source index") \
+ ENUM_ENTRY(TYPE_SRCIDX16, "2-byte memory at source index") \
+ ENUM_ENTRY(TYPE_SRCIDX32, "4-byte memory at source index") \
+ ENUM_ENTRY(TYPE_SRCIDX64, "8-byte memory at source index") \
+ ENUM_ENTRY(TYPE_DSTIDX8, "1-byte memory at destination index") \
+ ENUM_ENTRY(TYPE_DSTIDX16, "2-byte memory at destination index") \
+ ENUM_ENTRY(TYPE_DSTIDX32, "4-byte memory at destination index") \
+ ENUM_ENTRY(TYPE_DSTIDX64, "8-byte memory at destination index") \
ENUM_ENTRY(TYPE_MOFFS8, "1-byte memory offset (relative to segment " \
"base)") \
ENUM_ENTRY(TYPE_MOFFS16, "2-byte") \
@@ -478,8 +452,13 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_XMM128, "16-byte") \
ENUM_ENTRY(TYPE_XMM256, "32-byte") \
ENUM_ENTRY(TYPE_XMM512, "64-byte") \
+ ENUM_ENTRY(TYPE_VK1, "1-bit") \
+ ENUM_ENTRY(TYPE_VK2, "2-bit") \
+ ENUM_ENTRY(TYPE_VK4, "4-bit") \
ENUM_ENTRY(TYPE_VK8, "8-bit") \
ENUM_ENTRY(TYPE_VK16, "16-bit") \
+ ENUM_ENTRY(TYPE_VK32, "32-bit") \
+ ENUM_ENTRY(TYPE_VK64, "64-bit") \
ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \
ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \
ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \
@@ -497,61 +476,42 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_M512, "512-bit FPU/MMX/XMM/MXCSR state")
#define ENUM_ENTRY(n, d) n,
-typedef enum {
+enum OperandType {
TYPES
TYPE_max
-} OperandType;
+};
#undef ENUM_ENTRY
-/*
- * OperandSpecifier - The specification for how to extract and interpret one
- * operand.
- */
+/// \brief The specification for how to extract and interpret one operand.
struct OperandSpecifier {
uint8_t encoding;
uint8_t type;
};
-/*
- * Indicates where the opcode modifier (if any) is to be found. Extended
- * opcodes with AddRegFrm have the opcode modifier in the ModR/M byte.
- */
-
+// Indicates where the opcode modifier (if any) is to be found. Extended
+// opcodes with AddRegFrm have the opcode modifier in the ModR/M byte.
#define MODIFIER_TYPES \
- ENUM_ENTRY(MODIFIER_NONE) \
- ENUM_ENTRY(MODIFIER_OPCODE) \
- ENUM_ENTRY(MODIFIER_MODRM)
+ ENUM_ENTRY(MODIFIER_NONE)
#define ENUM_ENTRY(n) n,
-typedef enum {
+enum ModifierType {
MODIFIER_TYPES
MODIFIER_max
-} ModifierType;
+};
#undef ENUM_ENTRY
-#define X86_MAX_OPERANDS 5
-
-/*
- * The specification for how to extract and interpret a full instruction and
- * its operands.
- */
-struct InstructionSpecifier {
- uint8_t modifierType;
- uint8_t modifierBase;
-
- /* The macro below must be defined wherever this file is included. */
- INSTRUCTION_SPECIFIER_FIELDS
-};
+static const unsigned X86_MAX_OPERANDS = 5;
-/*
- * Decoding mode for the Intel disassembler. 16-bit, 32-bit, and 64-bit mode
- * are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode,
- * respectively.
- */
-typedef enum {
+/// Decoding mode for the Intel disassembler. 16-bit, 32-bit, and 64-bit mode
+/// are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode,
+/// respectively.
+enum DisassemblerMode {
MODE_16BIT,
MODE_32BIT,
MODE_64BIT
-} DisassemblerMode;
+};
+
+} // namespace X86Disassembler
+} // namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index 4439311..b45b118 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "X86ATTInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
@@ -28,6 +27,8 @@
#include <map>
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
// Include the auto-generated portion of the assembly writer.
#define PRINT_ALIAS_INSTR
#include "X86GenAsmWriter.inc"
@@ -123,6 +124,16 @@ void X86ATTInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
}
}
+void X86ATTInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm() & 0x3;
+ switch (Imm) {
+ case 0: O << "{rn-sae}"; break;
+ case 1: O << "{rd-sae}"; break;
+ case 2: O << "{ru-sae}"; break;
+ case 3: O << "{rz-sae}"; break;
+ }
+}
/// printPCRelImm - This is used to print an immediate value that ends up
/// being encoded as a pc-relative value (e.g. for jumps and calls). These
/// print slightly differently than normal immediates. For example, a $ is not
@@ -172,16 +183,16 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- const MCOperand &BaseReg = MI->getOperand(Op);
- const MCOperand &IndexReg = MI->getOperand(Op+2);
- const MCOperand &DispSpec = MI->getOperand(Op+3);
- const MCOperand &SegReg = MI->getOperand(Op+4);
+ const MCOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg);
+ const MCOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
+ const MCOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
+ const MCOperand &SegReg = MI->getOperand(Op+X86::AddrSegmentReg);
O << markup("<mem:");
// If this has a segment register, print it.
if (SegReg.getReg()) {
- printOperand(MI, Op+4, O);
+ printOperand(MI, Op+X86::AddrSegmentReg, O);
O << ':';
}
@@ -197,12 +208,12 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
if (IndexReg.getReg() || BaseReg.getReg()) {
O << '(';
if (BaseReg.getReg())
- printOperand(MI, Op, O);
+ printOperand(MI, Op+X86::AddrBaseReg, O);
if (IndexReg.getReg()) {
O << ',';
- printOperand(MI, Op+2, O);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
+ printOperand(MI, Op+X86::AddrIndexReg, O);
+ unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm();
if (ScaleVal != 1) {
O << ','
<< markup("<imm:")
@@ -216,12 +227,49 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
O << markup(">");
}
+void X86ATTInstPrinter::printSrcIdx(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ const MCOperand &SegReg = MI->getOperand(Op+1);
+
+ O << markup("<mem:");
+
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+1, O);
+ O << ':';
+ }
+
+ O << "(";
+ printOperand(MI, Op, O);
+ O << ")";
+
+ O << markup(">");
+}
+
+void X86ATTInstPrinter::printDstIdx(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ O << markup("<mem:");
+
+ O << "%es:(";
+ printOperand(MI, Op, O);
+ O << ")";
+
+ O << markup(">");
+}
+
void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &DispSpec = MI->getOperand(Op);
+ const MCOperand &SegReg = MI->getOperand(Op+1);
O << markup("<mem:");
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+1, O);
+ O << ':';
+ }
+
if (DispSpec.isImm()) {
O << formatImm(DispSpec.getImm());
} else {
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index a8fab72..531183b 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -19,19 +19,21 @@
namespace llvm {
class MCOperand;
-
-class X86ATTInstPrinter : public MCInstPrinter {
+
+class X86ATTInstPrinter final : public MCInstPrinter {
public:
X86ATTInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI)
: MCInstPrinter(MAI, MII, MRI) {}
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) override;
// Autogenerated by tblgen, returns true if we successfully printed an
// alias.
bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx, raw_ostream &O);
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &OS);
@@ -42,7 +44,10 @@ public:
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
+ void printSrcIdx(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
+ void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
+ void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
@@ -88,6 +93,30 @@ public:
printMemReference(MI, OpNo, O);
}
+ void printSrcIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printSrcIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printSrcIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printSrcIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printDstIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printDstIdx(MI, OpNo, O);
+ }
+ void printDstIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printDstIdx(MI, OpNo, O);
+ }
+ void printDstIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printDstIdx(MI, OpNo, O);
+ }
+ void printDstIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printDstIdx(MI, OpNo, O);
+ }
void printMemOffs8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemOffset(MI, OpNo, O);
}
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
index 0f6eeb1..baf6507 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -16,7 +16,9 @@
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "Utils/X86ShuffleDecode.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/raw_ostream.h"
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -30,7 +32,7 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
const char *(*getRegName)(unsigned)) {
// If this is a shuffle operation, the switch should fill in this state.
SmallVector<int, 8> ShuffleMask;
- const char *DestName = 0, *Src1Name = 0, *Src2Name = 0;
+ const char *DestName = nullptr, *Src1Name = nullptr, *Src2Name = nullptr;
switch (MI->getOpcode()) {
case X86::INSERTPSrr:
@@ -38,7 +40,8 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
DestName = getRegName(MI->getOperand(0).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
Src2Name = getRegName(MI->getOperand(2).getReg());
- DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
+ if(MI->getOperand(3).isImm())
+ DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
break;
case X86::MOVLHPSrr:
@@ -65,9 +68,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPALIGNR128rm:
Src2Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePALIGNRMask(MVT::v16i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePALIGNRMask(MVT::v16i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::VPALIGNR256rr:
Src1Name = getRegName(MI->getOperand(2).getReg());
@@ -75,9 +79,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPALIGNR256rm:
Src2Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePALIGNRMask(MVT::v32i8,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePALIGNRMask(MVT::v32i8,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::PSHUFDri:
@@ -87,16 +92,20 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSHUFDmi:
case X86::VPSHUFDmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFMask(MVT::v4i32, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v4i32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::VPSHUFDYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPSHUFDYmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFMask(MVT::v8i32, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v8i32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
@@ -107,18 +116,20 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSHUFHWmi:
case X86::VPSHUFHWmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFHWMask(MVT::v8i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFHWMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::VPSHUFHWYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPSHUFHWYmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFHWMask(MVT::v16i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFHWMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::PSHUFLWri:
case X86::VPSHUFLWri:
@@ -127,18 +138,20 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSHUFLWmi:
case X86::VPSHUFLWmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFLWMask(MVT::v8i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFLWMask(MVT::v8i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::VPSHUFLWYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPSHUFLWYmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFLWMask(MVT::v16i16,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFLWMask(MVT::v16i16,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
break;
case X86::PUNPCKHBWrr:
@@ -293,8 +306,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::SHUFPDrmi:
case X86::VSHUFPDrmi:
- DecodeSHUFPMask(MVT::v2f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v2f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -302,8 +317,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::VSHUFPDYrmi:
- DecodeSHUFPMask(MVT::v4f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v4f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -314,8 +331,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::SHUFPSrmi:
case X86::VSHUFPSrmi:
- DecodeSHUFPMask(MVT::v4f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v4f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -323,8 +342,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::VSHUFPSYrmi:
- DecodeSHUFPMask(MVT::v8f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeSHUFPMask(MVT::v8f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -405,32 +426,40 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPERMILPSmi:
- DecodePSHUFMask(MVT::v4f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v4f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERMILPSYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPERMILPSYmi:
- DecodePSHUFMask(MVT::v8f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v8f32,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERMILPDri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPERMILPDmi:
- DecodePSHUFMask(MVT::v2f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v2f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERMILPDYri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::VPERMILPDYmi:
- DecodePSHUFMask(MVT::v4f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodePSHUFMask(MVT::v4f64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERM2F128rr:
@@ -440,9 +469,10 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPERM2F128rm:
case X86::VPERM2I128rm:
// For instruction comments purpose, assume the 256-bit vector is v4i64.
- DecodeVPERM2X128Mask(MVT::v4i64,
- MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeVPERM2X128Mask(MVT::v4i64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -452,8 +482,9 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::VPERMQYmi:
case X86::VPERMPDYmi:
- DecodeVPERMMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
- ShuffleMask);
+ if(MI->getOperand(MI->getNumOperands()-1).isImm())
+ DecodeVPERMMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
}
@@ -461,7 +492,7 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// If this was a shuffle operation, print the shuffle mask.
if (!ShuffleMask.empty()) {
- if (DestName == 0) DestName = Src1Name;
+ if (!DestName) DestName = Src1Name;
OS << (DestName ? DestName : "mem") << " = ";
// If the two sources are the same, canonicalize the input elements to be
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index e7e7b15..1c8466b 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "X86IntelInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
@@ -25,6 +24,8 @@
#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#include "X86GenAsmWriter1.inc"
void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
@@ -113,6 +114,17 @@ void X86IntelInstPrinter::printAVXCC(const MCInst *MI, unsigned Op,
}
}
+void X86IntelInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(Op).getImm() & 0x3;
+ switch (Imm) {
+ case 0: O << "{rn-sae}"; break;
+ case 1: O << "{rd-sae}"; break;
+ case 2: O << "{ru-sae}"; break;
+ case 3: O << "{rz-sae}"; break;
+ }
+}
+
/// printPCRelImm - This is used to print an immediate value that ends up
/// being encoded as a pc-relative value.
void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo,
@@ -151,15 +163,15 @@ void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- const MCOperand &BaseReg = MI->getOperand(Op);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
- const MCOperand &IndexReg = MI->getOperand(Op+2);
- const MCOperand &DispSpec = MI->getOperand(Op+3);
- const MCOperand &SegReg = MI->getOperand(Op+4);
+ const MCOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg);
+ unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm();
+ const MCOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
+ const MCOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
+ const MCOperand &SegReg = MI->getOperand(Op+X86::AddrSegmentReg);
// If this has a segment register, print it.
if (SegReg.getReg()) {
- printOperand(MI, Op+4, O);
+ printOperand(MI, Op+X86::AddrSegmentReg, O);
O << ':';
}
@@ -167,7 +179,7 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
bool NeedPlus = false;
if (BaseReg.getReg()) {
- printOperand(MI, Op, O);
+ printOperand(MI, Op+X86::AddrBaseReg, O);
NeedPlus = true;
}
@@ -175,7 +187,7 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
if (NeedPlus) O << " + ";
if (ScaleVal != 1)
O << ScaleVal << '*';
- printOperand(MI, Op+2, O);
+ printOperand(MI, Op+X86::AddrIndexReg, O);
NeedPlus = true;
}
@@ -201,9 +213,38 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
O << ']';
}
+void X86IntelInstPrinter::printSrcIdx(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ const MCOperand &SegReg = MI->getOperand(Op+1);
+
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+1, O);
+ O << ':';
+ }
+ O << '[';
+ printOperand(MI, Op, O);
+ O << ']';
+}
+
+void X86IntelInstPrinter::printDstIdx(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
+ // DI accesses are always ES-based.
+ O << "es:[";
+ printOperand(MI, Op, O);
+ O << ']';
+}
+
void X86IntelInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &DispSpec = MI->getOperand(Op);
+ const MCOperand &SegReg = MI->getOperand(Op+1);
+
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+1, O);
+ O << ':';
+ }
O << '[';
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index 590bf68..4d9b481 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -20,16 +20,16 @@
namespace llvm {
class MCOperand;
-
-class X86IntelInstPrinter : public MCInstPrinter {
+
+class X86IntelInstPrinter final : public MCInstPrinter {
public:
X86IntelInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
const MCRegisterInfo &MRI)
: MCInstPrinter(MAI, MII, MRI) {}
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
-
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) override;
+
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
@@ -40,6 +40,9 @@ public:
void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O);
void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSrcIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "opaque ptr ";
@@ -99,6 +102,39 @@ public:
printMemReference(MI, OpNo, O);
}
+
+ void printSrcIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "byte ptr ";
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printSrcIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "word ptr ";
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printSrcIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "dword ptr ";
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printSrcIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "qword ptr ";
+ printSrcIdx(MI, OpNo, O);
+ }
+ void printDstIdx8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "byte ptr ";
+ printDstIdx(MI, OpNo, O);
+ }
+ void printDstIdx16(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "word ptr ";
+ printDstIdx(MI, OpNo, O);
+ }
+ void printDstIdx32(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "dword ptr ";
+ printDstIdx(MI, OpNo, O);
+ }
+ void printDstIdx64(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "qword ptr ";
+ printDstIdx(MI, OpNo, O);
+ }
void printMemOffs8(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
O << "byte ptr ";
printMemOffset(MI, OpNo, O);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index f8e359b..23bca0d 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -37,23 +37,29 @@ MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
static unsigned getFixupKindLog2Size(unsigned Kind) {
switch (Kind) {
- default: llvm_unreachable("invalid fixup kind!");
+ default:
+ llvm_unreachable("invalid fixup kind!");
case FK_PCRel_1:
case FK_SecRel_1:
- case FK_Data_1: return 0;
+ case FK_Data_1:
+ return 0;
case FK_PCRel_2:
case FK_SecRel_2:
- case FK_Data_2: return 1;
+ case FK_Data_2:
+ return 1;
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
case X86::reloc_global_offset_table:
case FK_SecRel_4:
- case FK_Data_4: return 2;
+ case FK_Data_4:
+ return 2;
case FK_PCRel_8:
case FK_SecRel_8:
- case FK_Data_8: return 3;
+ case FK_Data_8:
+ case X86::reloc_global_offset_table8:
+ return 3;
}
}
@@ -67,11 +73,12 @@ public:
};
class X86AsmBackend : public MCAsmBackend {
- StringRef CPU;
+ const StringRef CPU;
bool HasNopl;
+ const uint64_t MaxNopLength;
public:
X86AsmBackend(const Target &T, StringRef _CPU)
- : MCAsmBackend(), CPU(_CPU) {
+ : MCAsmBackend(), CPU(_CPU), MaxNopLength(_CPU == "slm" ? 7 : 15) {
HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
@@ -79,11 +86,11 @@ public:
CPU != "c3" && CPU != "c3-2";
}
- unsigned getNumFixupKinds() const {
+ unsigned getNumFixupKinds() const override {
return X86::NumTargetFixupKinds;
}
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
{ "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
{ "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
@@ -100,7 +107,7 @@ public:
}
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const {
+ uint64_t Value, bool IsPCRel) const override {
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
assert(Fixup.getOffset() + Size <= DataSize &&
@@ -117,16 +124,15 @@ public:
Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
}
- bool mayNeedRelaxation(const MCInst &Inst) const;
+ bool mayNeedRelaxation(const MCInst &Inst) const override;
- bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const;
+ const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
- bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
};
} // end anonymous namespace
@@ -217,9 +223,9 @@ static unsigned getRelaxedOpcodeArith(unsigned Op) {
case X86::CMP64mi8: return X86::CMP64mi32;
// PUSH
- case X86::PUSHi8: return X86::PUSHi32;
- case X86::PUSHi16: return X86::PUSHi32;
- case X86::PUSH64i8: return X86::PUSH64i32;
+ case X86::PUSH32i8: return X86::PUSHi32;
+ case X86::PUSH16i8: return X86::PUSHi16;
+ case X86::PUSH64i8: return X86::PUSH64i32;
case X86::PUSH64i16: return X86::PUSH64i32;
}
}
@@ -314,7 +320,7 @@ bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
{0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
};
- // This CPU doesnt support long nops. If needed add more.
+ // This CPU doesn't support long nops. If needed add more.
// FIXME: Can we get this from the subtarget somehow?
// FIXME: We could generated something better than plain 0x90.
if (!HasNopl) {
@@ -326,7 +332,7 @@ bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
// 15 is the longest single nop instruction. Emit as many 15-byte nops as
// needed, then emit a nop of the remaining length.
do {
- const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15);
+ const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
for (uint8_t i = 0; i < Prefixes; i++)
OW->Write8(0x66);
@@ -347,14 +353,7 @@ class ELFX86AsmBackend : public X86AsmBackend {
public:
uint8_t OSABI;
ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU)
- : X86AsmBackend(T, CPU), OSABI(_OSABI) {
- HasReliableSymbolDifference = true;
- }
-
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
- const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section);
- return ES.getFlags() & ELF::SHF_MERGE;
- }
+ : X86AsmBackend(T, CPU), OSABI(_OSABI) {}
};
class ELFX86_32AsmBackend : public ELFX86AsmBackend {
@@ -362,17 +361,28 @@ public:
ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
: ELFX86AsmBackend(T, OSABI, CPU) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
}
};
+class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
+public:
+ ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
+ : ELFX86AsmBackend(T, OSABI, CPU) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
+ return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI,
+ ELF::EM_X86_64);
+ }
+};
+
class ELFX86_64AsmBackend : public ELFX86AsmBackend {
public:
ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
: ELFX86AsmBackend(T, OSABI, CPU) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
}
};
@@ -386,7 +396,7 @@ public:
, Is64Bit(is64Bit) {
}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createX86WinCOFFObjectWriter(OS, Is64Bit);
}
};
@@ -719,43 +729,37 @@ public:
};
class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
- bool SupportsCU;
public:
DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef CPU, bool SupportsCU)
- : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {}
+ StringRef CPU)
+ : DarwinX86AsmBackend(T, MRI, CPU, false) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
MachO::CPU_TYPE_I386,
MachO::CPU_SUBTYPE_I386_ALL);
}
/// \brief Generate the compact unwind encoding for the CFI instructions.
- virtual uint32_t
- generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
- return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
+ uint32_t generateCompactUnwindEncoding(
+ ArrayRef<MCCFIInstruction> Instrs) const override {
+ return generateCompactUnwindEncodingImpl(Instrs);
}
};
class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
- bool SupportsCU;
const MachO::CPUSubTypeX86 Subtype;
public:
DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef CPU, bool SupportsCU,
- MachO::CPUSubTypeX86 st)
- : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU),
- Subtype(st) {
- HasReliableSymbolDifference = true;
- }
+ StringRef CPU, MachO::CPUSubTypeX86 st)
+ : DarwinX86AsmBackend(T, MRI, CPU, true), Subtype(st) {}
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
MachO::CPU_TYPE_X86_64, Subtype);
}
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ bool doesSectionRequireSymbols(const MCSection &Section) const override {
// Temporary labels in the string literals sections require symbols. The
// issue is that the x86_64 relocation format does not allow symbol +
// offset, and so the linker does not have enough information to resolve the
@@ -765,33 +769,33 @@ public:
//
// See <rdar://problem/4765733>.
const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
- return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
+ return SMO.getType() == MachO::S_CSTRING_LITERALS;
}
- virtual bool isSectionAtomizable(const MCSection &Section) const {
+ bool isSectionAtomizable(const MCSection &Section) const override {
const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
// Fixed sized data sections are uniqued, they cannot be diced into atoms.
switch (SMO.getType()) {
default:
return true;
- case MCSectionMachO::S_4BYTE_LITERALS:
- case MCSectionMachO::S_8BYTE_LITERALS:
- case MCSectionMachO::S_16BYTE_LITERALS:
- case MCSectionMachO::S_LITERAL_POINTERS:
- case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
- case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
- case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
- case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
- case MCSectionMachO::S_INTERPOSING:
+ case MachO::S_4BYTE_LITERALS:
+ case MachO::S_8BYTE_LITERALS:
+ case MachO::S_16BYTE_LITERALS:
+ case MachO::S_LITERAL_POINTERS:
+ case MachO::S_NON_LAZY_SYMBOL_POINTERS:
+ case MachO::S_LAZY_SYMBOL_POINTERS:
+ case MachO::S_MOD_INIT_FUNC_POINTERS:
+ case MachO::S_MOD_TERM_FUNC_POINTERS:
+ case MachO::S_INTERPOSING:
return false;
}
}
/// \brief Generate the compact unwind encoding for the CFI instructions.
- virtual uint32_t
- generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
- return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
+ uint32_t generateCompactUnwindEncoding(
+ ArrayRef<MCCFIInstruction> Instrs) const override {
+ return generateCompactUnwindEncodingImpl(Instrs);
}
};
@@ -803,12 +807,10 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
StringRef CPU) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
- return new DarwinX86_32AsmBackend(T, MRI, CPU,
- TheTriple.isMacOSX() &&
- !TheTriple.isMacOSXVersionLT(10, 7));
+ if (TheTriple.isOSBinFormatMachO())
+ return new DarwinX86_32AsmBackend(T, MRI, CPU);
- if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
+ if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
return new WindowsX86AsmBackend(T, false, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
@@ -821,19 +823,20 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
StringRef CPU) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) {
+ if (TheTriple.isOSBinFormatMachO()) {
MachO::CPUSubTypeX86 CS =
StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
.Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H)
.Default(MachO::CPU_SUBTYPE_X86_64_ALL);
- return new DarwinX86_64AsmBackend(T, MRI, CPU,
- TheTriple.isMacOSX() &&
- !TheTriple.isMacOSXVersionLT(10, 7), CS);
+ return new DarwinX86_64AsmBackend(T, MRI, CPU, CS);
}
- if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
+ if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
return new WindowsX86AsmBackend(T, true, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+
+ if (TheTriple.getEnvironment() == Triple::GNUX32)
+ return new ELFX86_X32AsmBackend(T, OSABI, CPU);
return new ELFX86_64AsmBackend(T, OSABI, CPU);
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 1ef9814..026e4c4 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -18,9 +18,9 @@
#define X86BASEINFO_H
#include "X86MCTargetDesc.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/MC/MCInstrInfo.h"
namespace llvm {
@@ -255,6 +255,38 @@ namespace X86II {
///
MRMSrcMem = 6,
+ /// RawFrmMemOffs - This form is for instructions that store an absolute
+ /// memory offset as an immediate with a possible segment override.
+ RawFrmMemOffs = 7,
+
+ /// RawFrmSrc - This form is for instructions that use the source index
+ /// register SI/ESI/RSI with a possible segment override.
+ RawFrmSrc = 8,
+
+ /// RawFrmDst - This form is for instructions that use the destination index
+ /// register DI/EDI/ESI.
+ RawFrmDst = 9,
+
+ /// RawFrmSrc - This form is for instructions that use the the source index
+ /// register SI/ESI/ERI with a possible segment override, and also the
+ /// destination index register DI/ESI/RDI.
+ RawFrmDstSrc = 10,
+
+ /// RawFrmImm8 - This is used for the ENTER instruction, which has two
+ /// immediates, the first of which is a 16-bit immediate (specified by
+ /// the imm encoding) and the second is a 8-bit fixed value.
+ RawFrmImm8 = 11,
+
+ /// RawFrmImm16 - This is used for CALL FAR instructions, which have two
+ /// immediates, the first of which is a 16 or 32-bit immediate (specified by
+ /// the imm encoding) and the second is a 16-bit fixed value. In the AMD
+ /// manual, this operand is described as pntr16:32 and pntr16:16
+ RawFrmImm16 = 12,
+
+ /// MRMX[rm] - The forms are used to represent instructions that use a
+ /// Mod/RM byte, and don't use the middle field for anything.
+ MRMXr = 14, MRMXm = 15,
+
/// MRM[0-7][rm] - These forms are used to represent instructions that use
/// a Mod/RM byte, and use the middle field to hold extended opcode
/// information. In the intel manual these are represented as /0, /1, ...
@@ -268,94 +300,83 @@ namespace X86II {
MRM0m = 24, MRM1m = 25, MRM2m = 26, MRM3m = 27, // Format /0 /1 /2 /3
MRM4m = 28, MRM5m = 29, MRM6m = 30, MRM7m = 31, // Format /4 /5 /6 /7
- // MRMInitReg - This form is used for instructions whose source and
- // destinations are the same register.
- MRMInitReg = 32,
-
//// MRM_XX - A mod/rm byte of exactly 0xXX.
- MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35, MRM_C4 = 36,
- MRM_C8 = 37, MRM_C9 = 38, MRM_CA = 39, MRM_CB = 40,
- MRM_E8 = 41, MRM_F0 = 42, MRM_F8 = 45, MRM_F9 = 46,
- MRM_D0 = 47, MRM_D1 = 48, MRM_D4 = 49, MRM_D5 = 50,
- MRM_D6 = 51, MRM_D8 = 52, MRM_D9 = 53, MRM_DA = 54,
- MRM_DB = 55, MRM_DC = 56, MRM_DD = 57, MRM_DE = 58,
- MRM_DF = 59,
-
- /// RawFrmImm8 - This is used for the ENTER instruction, which has two
- /// immediates, the first of which is a 16-bit immediate (specified by
- /// the imm encoding) and the second is a 8-bit fixed value.
- RawFrmImm8 = 43,
-
- /// RawFrmImm16 - This is used for CALL FAR instructions, which have two
- /// immediates, the first of which is a 16 or 32-bit immediate (specified by
- /// the imm encoding) and the second is a 16-bit fixed value. In the AMD
- /// manual, this operand is described as pntr16:32 and pntr16:16
- RawFrmImm16 = 44,
-
- FormMask = 63,
+ MRM_C0 = 32, MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35,
+ MRM_C4 = 36, MRM_C8 = 37, MRM_C9 = 38, MRM_CA = 39,
+ MRM_CB = 40, MRM_D0 = 41, MRM_D1 = 42, MRM_D4 = 43,
+ MRM_D5 = 44, MRM_D6 = 45, MRM_D8 = 46, MRM_D9 = 47,
+ MRM_DA = 48, MRM_DB = 49, MRM_DC = 50, MRM_DD = 51,
+ MRM_DE = 52, MRM_DF = 53, MRM_E0 = 54, MRM_E1 = 55,
+ MRM_E2 = 56, MRM_E3 = 57, MRM_E4 = 58, MRM_E5 = 59,
+ MRM_E8 = 60, MRM_E9 = 61, MRM_EA = 62, MRM_EB = 63,
+ MRM_EC = 64, MRM_ED = 65, MRM_EE = 66, MRM_F0 = 67,
+ MRM_F1 = 68, MRM_F2 = 69, MRM_F3 = 70, MRM_F4 = 71,
+ MRM_F5 = 72, MRM_F6 = 73, MRM_F7 = 74, MRM_F8 = 75,
+ MRM_F9 = 76, MRM_FA = 77, MRM_FB = 78, MRM_FC = 79,
+ MRM_FD = 80, MRM_FE = 81, MRM_FF = 82,
+
+ FormMask = 127,
//===------------------------------------------------------------------===//
// Actual flags...
- // OpSize - Set if this instruction requires an operand size prefix (0x66),
- // which most often indicates that the instruction operates on 16 bit data
- // instead of 32 bit data.
- OpSize = 1 << 6,
+ // OpSize - OpSizeFixed implies instruction never needs a 0x66 prefix.
+ // OpSize16 means this is a 16-bit instruction and needs 0x66 prefix in
+ // 32-bit mode. OpSize32 means this is a 32-bit instruction needs a 0x66
+ // prefix in 16-bit mode.
+ OpSizeShift = 7,
+ OpSizeMask = 0x3 << OpSizeShift,
+
+ OpSize16 = 1,
+ OpSize32 = 2,
// AsSize - Set if this instruction requires an operand size prefix (0x67),
// which most often indicates that the instruction address 16 bit address
// instead of 32 bit address (or 32 bit address in 64 bit mode).
- AdSize = 1 << 7,
+ AdSizeShift = OpSizeShift + 2,
+ AdSize = 1 << AdSizeShift,
//===------------------------------------------------------------------===//
- // Op0Mask - There are several prefix bytes that are used to form two byte
- // opcodes. These are currently 0x0F, 0xF3, and 0xD8-0xDF. This mask is
- // used to obtain the setting of this field. If no bits in this field is
- // set, there is no prefix byte for obtaining a multibyte opcode.
+ // OpPrefix - There are several prefix bytes that are used as opcode
+ // extensions. These are 0x66, 0xF3, and 0xF2. If this field is 0 there is
+ // no prefix.
//
- Op0Shift = 8,
- Op0Mask = 0x1F << Op0Shift,
+ OpPrefixShift = AdSizeShift + 1,
+ OpPrefixMask = 0x7 << OpPrefixShift,
- // TB - TwoByte - Set if this instruction has a two byte opcode, which
- // starts with a 0x0F byte before the real opcode.
- TB = 1 << Op0Shift,
-
- // REP - The 0xF3 prefix byte indicating repetition of the following
- // instruction.
- REP = 2 << Op0Shift,
-
- // D8-DF - These escape opcodes are used by the floating point unit. These
- // values must remain sequential.
- D8 = 3 << Op0Shift, D9 = 4 << Op0Shift,
- DA = 5 << Op0Shift, DB = 6 << Op0Shift,
- DC = 7 << Op0Shift, DD = 8 << Op0Shift,
- DE = 9 << Op0Shift, DF = 10 << Op0Shift,
+ // PS, PD - Prefix code for packed single and double precision vector
+ // floating point operations performed in the SSE registers.
+ PS = 1 << OpPrefixShift, PD = 2 << OpPrefixShift,
// XS, XD - These prefix codes are for single and double precision scalar
// floating point operations performed in the SSE registers.
- XD = 11 << Op0Shift, XS = 12 << Op0Shift,
+ XS = 3 << OpPrefixShift, XD = 4 << OpPrefixShift,
- // T8, TA, A6, A7 - Prefix after the 0x0F prefix.
- T8 = 13 << Op0Shift, TA = 14 << Op0Shift,
- A6 = 15 << Op0Shift, A7 = 16 << Op0Shift,
+ //===------------------------------------------------------------------===//
+ // OpMap - This field determines which opcode map this instruction
+ // belongs to. i.e. one-byte, two-byte, 0x0f 0x38, 0x0f 0x3a, etc.
+ //
+ OpMapShift = OpPrefixShift + 3,
+ OpMapMask = 0x7 << OpMapShift,
- // T8XD - Prefix before and after 0x0F. Combination of T8 and XD.
- T8XD = 17 << Op0Shift,
+ // OB - OneByte - Set if this instruction has a one byte opcode.
+ OB = 0 << OpMapShift,
- // T8XS - Prefix before and after 0x0F. Combination of T8 and XS.
- T8XS = 18 << Op0Shift,
+ // TB - TwoByte - Set if this instruction has a two byte opcode, which
+ // starts with a 0x0F byte before the real opcode.
+ TB = 1 << OpMapShift,
- // TAXD - Prefix before and after 0x0F. Combination of TA and XD.
- TAXD = 19 << Op0Shift,
+ // T8, TA - Prefix after the 0x0F prefix.
+ T8 = 2 << OpMapShift, TA = 3 << OpMapShift,
// XOP8 - Prefix to include use of imm byte.
- XOP8 = 20 << Op0Shift,
+ XOP8 = 4 << OpMapShift,
// XOP9 - Prefix to exclude use of imm byte.
- XOP9 = 21 << Op0Shift,
+ XOP9 = 5 << OpMapShift,
// XOPA - Prefix to encode 0xA in VEX.MMMM of XOP instructions.
- XOPA = 22 << Op0Shift,
+ XOPA = 6 << OpMapShift,
//===------------------------------------------------------------------===//
// REX_W - REX prefixes are instruction prefixes used in 64-bit mode.
@@ -363,27 +384,28 @@ namespace X86II {
// etc. We only cares about REX.W and REX.R bits and only the former is
// statically determined.
//
- REXShift = Op0Shift + 5,
+ REXShift = OpMapShift + 3,
REX_W = 1 << REXShift,
//===------------------------------------------------------------------===//
// This three-bit field describes the size of an immediate operand. Zero is
// unused so that we can tell if we forgot to set a value.
ImmShift = REXShift + 1,
- ImmMask = 7 << ImmShift,
+ ImmMask = 15 << ImmShift,
Imm8 = 1 << ImmShift,
Imm8PCRel = 2 << ImmShift,
Imm16 = 3 << ImmShift,
Imm16PCRel = 4 << ImmShift,
Imm32 = 5 << ImmShift,
Imm32PCRel = 6 << ImmShift,
- Imm64 = 7 << ImmShift,
+ Imm32S = 7 << ImmShift,
+ Imm64 = 8 << ImmShift,
//===------------------------------------------------------------------===//
// FP Instruction Classification... Zero is non-fp instruction.
// FPTypeMask - Mask for all of the FP types...
- FPTypeShift = ImmShift + 3,
+ FPTypeShift = ImmShift + 4,
FPTypeMask = 7 << FPTypeShift,
// NotFP - The default, set for instructions that do not use FP registers.
@@ -419,51 +441,64 @@ namespace X86II {
LOCKShift = FPTypeShift + 3,
LOCK = 1 << LOCKShift,
- // Segment override prefixes. Currently we just need ability to address
- // stuff in gs and fs segments.
- SegOvrShift = LOCKShift + 1,
- SegOvrMask = 3 << SegOvrShift,
- FS = 1 << SegOvrShift,
- GS = 2 << SegOvrShift,
+ // REP prefix
+ REPShift = LOCKShift + 1,
+ REP = 1 << REPShift,
+
+ // Execution domain for SSE instructions.
+ // 0 means normal, non-SSE instruction.
+ SSEDomainShift = REPShift + 1,
+
+ // Encoding
+ EncodingShift = SSEDomainShift + 2,
+ EncodingMask = 0x3 << EncodingShift,
- // Execution domain for SSE instructions in bits 23, 24.
- // 0 in bits 23-24 means normal, non-SSE instruction.
- SSEDomainShift = SegOvrShift + 2,
+ // VEX - encoding using 0xC4/0xC5
+ VEX = 1,
+
+ /// XOP - Opcode prefix used by XOP instructions.
+ XOP = 2,
- OpcodeShift = SSEDomainShift + 2,
+ // VEX_EVEX - Specifies that this instruction use EVEX form which provides
+ // syntax support up to 32 512-bit register operands and up to 7 16-bit
+ // mask operands as well as source operand data swizzling/memory operand
+ // conversion, eviction hint, and rounding mode.
+ EVEX = 3,
+
+ // Opcode
+ OpcodeShift = EncodingShift + 2,
//===------------------------------------------------------------------===//
/// VEX - The opcode prefix used by AVX instructions
VEXShift = OpcodeShift + 8,
- VEX = 1U << 0,
/// VEX_W - Has a opcode specific functionality, but is used in the same
/// way as REX_W is for regular SSE instructions.
- VEX_W = 1U << 1,
+ VEX_W = 1U << 0,
/// VEX_4V - Used to specify an additional AVX/SSE register. Several 2
/// address instructions in SSE are represented as 3 address ones in AVX
/// and the additional register is encoded in VEX_VVVV prefix.
- VEX_4V = 1U << 2,
+ VEX_4V = 1U << 1,
/// VEX_4VOp3 - Similar to VEX_4V, but used on instructions that encode
/// operand 3 with VEX.vvvv.
- VEX_4VOp3 = 1U << 3,
+ VEX_4VOp3 = 1U << 2,
/// VEX_I8IMM - Specifies that the last register used in a AVX instruction,
/// must be encoded in the i8 immediate field. This usually happens in
/// instructions with 4 operands.
- VEX_I8IMM = 1U << 4,
+ VEX_I8IMM = 1U << 3,
/// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
/// instruction uses 256-bit wide registers. This is usually auto detected
/// if a VR256 register is used, but some AVX instructions also have this
/// field marked when using a f256 memory references.
- VEX_L = 1U << 5,
+ VEX_L = 1U << 4,
// VEX_LIG - Specifies that this instruction ignores the L-bit in the VEX
// prefix. Usually used for scalar instructions. Needed by disassembler.
- VEX_LIG = 1U << 6,
+ VEX_LIG = 1U << 5,
// TODO: we should combine VEX_L and VEX_LIG together to form a 2-bit field
// with following encoding:
@@ -473,31 +508,21 @@ namespace X86II {
// - 11 LIG (but, in insn encoding, leave VEX.L and EVEX.L in zeros.
// this will save 1 tsflag bit
- // VEX_EVEX - Specifies that this instruction use EVEX form which provides
- // syntax support up to 32 512-bit register operands and up to 7 16-bit
- // mask operands as well as source operand data swizzling/memory operand
- // conversion, eviction hint, and rounding mode.
- EVEX = 1U << 7,
-
// EVEX_K - Set if this instruction requires masking
- EVEX_K = 1U << 8,
+ EVEX_K = 1U << 6,
// EVEX_Z - Set if this instruction has EVEX.Z field set.
- EVEX_Z = 1U << 9,
+ EVEX_Z = 1U << 7,
// EVEX_L2 - Set if this instruction has EVEX.L' field set.
- EVEX_L2 = 1U << 10,
+ EVEX_L2 = 1U << 8,
// EVEX_B - Set if this instruction has EVEX.B field set.
- EVEX_B = 1U << 11,
+ EVEX_B = 1U << 9,
- // EVEX_CD8E - compressed disp8 form, element-size
- EVEX_CD8EShift = VEXShift + 12,
- EVEX_CD8EMask = 3,
-
- // EVEX_CD8V - compressed disp8 form, vector-width
- EVEX_CD8VShift = EVEX_CD8EShift + 2,
- EVEX_CD8VMask = 7,
+ // The scaling factor for the AVX512's 8-bit compressed displacement.
+ CD8_Scale_Shift = VEXShift + 10,
+ CD8_Scale_Mask = 127,
/// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the
/// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents
@@ -505,15 +530,17 @@ namespace X86II {
/// storing a classifier in the imm8 field. To simplify our implementation,
/// we handle this by storeing the classifier in the opcode field and using
/// this flag to indicate that the encoder should do the wacky 3DNow! thing.
- Has3DNow0F0FOpcode = 1U << 17,
+ Has3DNow0F0FOpcodeShift = CD8_Scale_Shift + 7,
+ Has3DNow0F0FOpcode = 1U << (Has3DNow0F0FOpcodeShift - VEXShift),
/// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in
/// ModRM or I8IMM. This is used for FMA4 and XOP instructions.
- MemOp4 = 1U << 18,
-
- /// XOP - Opcode prefix used by XOP instructions.
- XOP = 1U << 19
+ MemOp4Shift = Has3DNow0F0FOpcodeShift + 1,
+ MemOp4 = 1U << (MemOp4Shift - VEXShift),
+ /// Explicitly specified rounding control
+ EVEX_RCShift = MemOp4Shift + 1,
+ EVEX_RC = 1U << (EVEX_RCShift - VEXShift)
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
@@ -537,6 +564,7 @@ namespace X86II {
case X86II::Imm16:
case X86II::Imm16PCRel: return 2;
case X86II::Imm32:
+ case X86II::Imm32S:
case X86II::Imm32PCRel: return 4;
case X86II::Imm64: return 8;
}
@@ -554,6 +582,25 @@ namespace X86II {
case X86II::Imm8:
case X86II::Imm16:
case X86II::Imm32:
+ case X86II::Imm32S:
+ case X86II::Imm64:
+ return false;
+ }
+ }
+
+ /// isImmSigned - Return true if the immediate of the specified instruction's
+ /// TSFlags indicates that it is signed.
+ inline unsigned isImmSigned(uint64_t TSFlags) {
+ switch (TSFlags & X86II::ImmMask) {
+ default: llvm_unreachable("Unknown immediate signedness");
+ case X86II::Imm32S:
+ return true;
+ case X86II::Imm8:
+ case X86II::Imm8PCRel:
+ case X86II::Imm16:
+ case X86II::Imm16PCRel:
+ case X86II::Imm32:
+ case X86II::Imm32PCRel:
case X86II::Imm64:
return false;
}
@@ -595,10 +642,11 @@ namespace X86II {
/// counted as one operand.
///
inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
+ bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
+ bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
+
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg:
- // FIXME: Remove this form.
- return -1;
default: llvm_unreachable("Unknown FormMask value in getMemoryOperandNo!");
case X86II::Pseudo:
case X86II::RawFrm:
@@ -607,14 +655,14 @@ namespace X86II {
case X86II::MRMSrcReg:
case X86II::RawFrmImm8:
case X86II::RawFrmImm16:
+ case X86II::RawFrmMemOffs:
+ case X86II::RawFrmSrc:
+ case X86II::RawFrmDst:
+ case X86II::RawFrmDstSrc:
return -1;
case X86II::MRMDestMem:
return 0;
case X86II::MRMSrcMem: {
- bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
- bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
- bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
- bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
unsigned FirstMemOp = 1;
if (HasVEX_4V)
++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
@@ -627,11 +675,13 @@ namespace X86II {
// Opcode == X86::LEA16r || Opcode == X86::LEA32r)
return FirstMemOp;
}
+ case X86II::MRMXr:
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r:
return -1;
+ case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -640,17 +690,27 @@ namespace X86II {
unsigned FirstMemOp = 0;
if (HasVEX_4V)
++FirstMemOp;// Skip the register dest (which is encoded in VEX_VVVV).
+ if (HasEVEX_K)
+ ++FirstMemOp;// Skip the mask register
return FirstMemOp;
}
- case X86II::MRM_C1: case X86II::MRM_C2: case X86II::MRM_C3:
- case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9:
- case X86II::MRM_CA: case X86II::MRM_CB: case X86II::MRM_E8:
- case X86II::MRM_F0: case X86II::MRM_F8: case X86II::MRM_F9:
+ case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
+ case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
+ case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
case X86II::MRM_D0: case X86II::MRM_D1: case X86II::MRM_D4:
case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D8:
case X86II::MRM_D9: case X86II::MRM_DA: case X86II::MRM_DB:
case X86II::MRM_DC: case X86II::MRM_DD: case X86II::MRM_DE:
- case X86II::MRM_DF:
+ case X86II::MRM_DF: case X86II::MRM_E0: case X86II::MRM_E1:
+ case X86II::MRM_E2: case X86II::MRM_E3: case X86II::MRM_E4:
+ case X86II::MRM_E5: case X86II::MRM_E8: case X86II::MRM_E9:
+ case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
+ case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_F0:
+ case X86II::MRM_F1: case X86II::MRM_F2: case X86II::MRM_F3:
+ case X86II::MRM_F4: case X86II::MRM_F5: case X86II::MRM_F6:
+ case X86II::MRM_F7: case X86II::MRM_F8: case X86II::MRM_F9:
+ case X86II::MRM_FA: case X86II::MRM_FB: case X86II::MRM_FC:
+ case X86II::MRM_FD: case X86II::MRM_FE: case X86II::MRM_FF:
return -1;
}
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 3ddd865..3fdec87 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -24,9 +24,8 @@ namespace {
virtual ~X86ELFObjectWriter();
protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
};
}
@@ -41,13 +40,10 @@ X86ELFObjectWriter::~X86ELFObjectWriter()
unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
+ bool IsPCRel) const {
// determine the type of the relocation
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
unsigned Type;
if (getEMachine() == ELF::EM_X86_64) {
if (IsPCRel) {
@@ -57,6 +53,7 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
case FK_Data_8: Type = ELF::R_X86_64_PC64; break;
case FK_Data_4: Type = ELF::R_X86_64_PC32; break;
case FK_Data_2: Type = ELF::R_X86_64_PC16; break;
+ case FK_Data_1: Type = ELF::R_X86_64_PC8; break;
case FK_PCRel_8:
assert(Modifier == MCSymbolRefExpr::VK_None);
@@ -101,6 +98,12 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
} else {
switch ((unsigned)Fixup.getKind()) {
default: llvm_unreachable("invalid fixup kind!");
+ case X86::reloc_global_offset_table8:
+ Type = ELF::R_X86_64_GOTPC64;
+ break;
+ case X86::reloc_global_offset_table:
+ Type = ELF::R_X86_64_GOTPC32;
+ break;
case FK_Data_8:
switch (Modifier) {
default:
@@ -160,6 +163,28 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
Type = ELF::R_386_GOTPC;
break;
+ case FK_PCRel_1:
+ case FK_Data_1:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_386_PC8;
+ break;
+ }
+ break;
+
+ case FK_PCRel_2:
+ case FK_Data_2:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_386_PC16;
+ break;
+ }
+ break;
+
case X86::reloc_signed_4byte:
case FK_PCRel_4:
case FK_Data_4:
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
index a3eb4fb..b679316 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
@@ -11,8 +11,8 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Support/ELF.h"
@@ -25,7 +25,7 @@ class X86_64ELFRelocationInfo : public MCRelocationInfo {
public:
X86_64ELFRelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
- const MCExpr *createExprForRelocation(RelocationRef Rel) {
+ const MCExpr *createExprForRelocation(RelocationRef Rel) override {
uint64_t RelType; Rel.getType(RelType);
symbol_iterator SymI = Rel.getSymbol();
@@ -39,7 +39,7 @@ public:
if (Sym->isVariable() == false)
Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx));
- const MCExpr *Expr = 0;
+ const MCExpr *Expr = nullptr;
// If hasAddend is true, then we need to add Addend (r_addend) to Expr.
bool hasAddend = false;
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index f2e34cb..09396b7 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -23,6 +23,7 @@ enum Fixups {
reloc_global_offset_table, // 32-bit, relative to the start
// of the instruction. Used only
// for _GLOBAL_OFFSET_TABLE_.
+ reloc_global_offset_table8, // 64-bit variant.
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index 8d2b595..b1411bc 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -51,7 +51,7 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
TextAlignFillValue = 0x90;
if (!is64Bit)
- Data64bitsDirective = 0; // we can't emit a 64-bit unit
+ Data64bitsDirective = nullptr; // we can't emit a 64-bit unit
// Use ## as a comment string so that .s files generated by llvm can go
// through the GCC preprocessor without causing an error. This is needed
@@ -66,16 +66,18 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
- // FIXME: this should not depend on the target OS version, but on the ld64
- // version in use. From at least >= ld64-97.17 (Xcode 3.2.6) the abs-ified
- // FDE relocs may be used.
- DwarfFDESymbolsUseAbsDiff = T.isMacOSX() && !T.isMacOSXVersionLT(10, 6);
-
// old assembler lacks some directives
// FIXME: this should really be a check on the assembler characteristics
// rather than OS version
if (T.isMacOSX() && T.isMacOSXVersionLT(10, 6))
HasWeakDefCanBeHiddenDirective = false;
+
+ // Assume ld64 is new enough that the abs-ified FDE relocs may be used
+ // (actually, must, since otherwise the non-extern relocations we produce
+ // overwhelm ld64's tiny little mind and it fails).
+ DwarfFDESymbolsUseAbsDiff = true;
+
+ UseIntegratedAssembler = true;
}
X86_64MCAsmInfoDarwin::X86_64MCAsmInfoDarwin(const Triple &Triple)
@@ -100,8 +102,6 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
TextAlignFillValue = 0x90;
- PrivateGlobalPrefix = ".L";
-
// Set up DWARF directives
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
@@ -115,7 +115,11 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
// into two .words.
if ((T.getOS() == Triple::OpenBSD || T.getOS() == Triple::Bitrig) &&
T.getArch() == Triple::x86)
- Data64bitsDirective = 0;
+ Data64bitsDirective = nullptr;
+
+ // Always enable the integrated assembler by default.
+ // Clang also enabled it when the OS is Solaris but that is redundant here.
+ UseIntegratedAssembler = true;
}
const MCExpr *
@@ -139,8 +143,9 @@ void X86MCAsmInfoMicrosoft::anchor() { }
X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
if (Triple.getArch() == Triple::x86_64) {
- GlobalPrefix = "";
PrivateGlobalPrefix = ".L";
+ PointerSize = 8;
+ ExceptionsType = ExceptionHandling::WinEH;
}
AssemblerDialect = AsmWriterFlavor;
@@ -148,20 +153,25 @@ X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
TextAlignFillValue = 0x90;
AllowAtInName = true;
+
+ UseIntegratedAssembler = true;
}
void X86MCAsmInfoGNUCOFF::anchor() { }
X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
+ assert(Triple.isOSWindows() && "Windows is the only supported COFF target");
if (Triple.getArch() == Triple::x86_64) {
- GlobalPrefix = "";
PrivateGlobalPrefix = ".L";
+ PointerSize = 8;
+ ExceptionsType = ExceptionHandling::WinEH;
+ } else {
+ ExceptionsType = ExceptionHandling::DwarfCFI;
}
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
- // Exceptions handling
- ExceptionsType = ExceptionHandling::DwarfCFI;
+ UseIntegratedAssembler = true;
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
index 80979dd..a7509b0 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
@@ -23,34 +23,34 @@ namespace llvm {
class Triple;
class X86MCAsmInfoDarwin : public MCAsmInfoDarwin {
- virtual void anchor();
+ void anchor() override;
public:
explicit X86MCAsmInfoDarwin(const Triple &Triple);
};
struct X86_64MCAsmInfoDarwin : public X86MCAsmInfoDarwin {
explicit X86_64MCAsmInfoDarwin(const Triple &Triple);
- virtual const MCExpr *
- getExprForPersonalitySymbol(const MCSymbol *Sym,
- unsigned Encoding,
- MCStreamer &Streamer) const;
+ const MCExpr *
+ getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
};
class X86ELFMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
explicit X86ELFMCAsmInfo(const Triple &Triple);
- virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const;
+ const MCSection *
+ getNonexecutableStackSection(MCContext &Ctx) const override;
};
class X86MCAsmInfoMicrosoft : public MCAsmInfoMicrosoft {
- virtual void anchor();
+ void anchor() override;
public:
explicit X86MCAsmInfoMicrosoft(const Triple &Triple);
};
class X86MCAsmInfoGNUCOFF : public MCAsmInfoGNUCOFF {
- virtual void anchor();
+ void anchor() override;
public:
explicit X86MCAsmInfoGNUCOFF(const Triple &Triple);
};
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 7952607..075db11 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86FixupKinds.h"
@@ -27,29 +26,50 @@
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
namespace {
class X86MCCodeEmitter : public MCCodeEmitter {
X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
const MCInstrInfo &MCII;
- const MCSubtargetInfo &STI;
MCContext &Ctx;
public:
- X86MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : MCII(mcii), STI(sti), Ctx(ctx) {
+ X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
+ : MCII(mcii), Ctx(ctx) {
}
~X86MCCodeEmitter() {}
- bool is64BitMode() const {
- // FIXME: Can tablegen auto-generate this?
+ bool is64BitMode(const MCSubtargetInfo &STI) const {
return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
}
- bool is32BitMode() const {
- // FIXME: Can tablegen auto-generate this?
- return (STI.getFeatureBits() & X86::Mode64Bit) == 0;
+ bool is32BitMode(const MCSubtargetInfo &STI) const {
+ return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
+ }
+
+ bool is16BitMode(const MCSubtargetInfo &STI) const {
+ return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
+ }
+
+ /// Is16BitMemOperand - Return true if the specified instruction has
+ /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
+ bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+ const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
+
+ if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
+ Disp.isImm() && Disp.getImm() < 0x10000)
+ return true;
+ if ((BaseReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
+ return true;
+ return false;
}
unsigned GetX86RegNum(const MCOperand &MO) const {
@@ -126,21 +146,23 @@ public:
void EmitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const MCInstrDesc &Desc,
raw_ostream &OS) const;
- void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
- int MemOperand, const MCInst &MI,
- raw_ostream &OS) const;
+ void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
+ const MCInst &MI, raw_ostream &OS) const;
void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const MCInstrDesc &Desc,
+ const MCSubtargetInfo &STI,
raw_ostream &OS) const;
};
@@ -151,7 +173,7 @@ MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
- return new X86MCCodeEmitter(MCII, STI, Ctx);
+ return new X86MCCodeEmitter(MCII, Ctx);
}
/// isDisp8 - Return true if this signed displacement fits in a 8-bit
@@ -163,42 +185,22 @@ static bool isDisp8(int Value) {
/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
/// compressed dispacement field.
static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
- assert(((TSFlags >> X86II::VEXShift) & X86II::EVEX) &&
+ assert(((TSFlags & X86II::EncodingMask) >>
+ X86II::EncodingShift == X86II::EVEX) &&
"Compressed 8-bit displacement is only valid for EVEX inst.");
- unsigned CD8E = (TSFlags >> X86II::EVEX_CD8EShift) & X86II::EVEX_CD8EMask;
- unsigned CD8V = (TSFlags >> X86II::EVEX_CD8VShift) & X86II::EVEX_CD8VMask;
-
- if (CD8V == 0 && CD8E == 0) {
+ unsigned CD8_Scale =
+ (TSFlags >> X86II::CD8_Scale_Shift) & X86II::CD8_Scale_Mask;
+ if (CD8_Scale == 0) {
CValue = Value;
return isDisp8(Value);
}
-
- unsigned MemObjSize = 1U << CD8E;
- if (CD8V & 4) {
- // Fixed vector length
- MemObjSize *= 1U << (CD8V & 0x3);
- } else {
- // Modified vector length
- bool EVEX_b = (TSFlags >> X86II::VEXShift) & X86II::EVEX_B;
- if (!EVEX_b) {
- unsigned EVEX_LL = ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) ? 1 : 0;
- EVEX_LL += ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2) ? 2 : 0;
- assert(EVEX_LL < 3 && "");
-
- unsigned NumElems = (1U << (EVEX_LL + 4)) / MemObjSize;
- NumElems /= 1U << (CD8V & 0x3);
-
- MemObjSize *= NumElems;
- }
- }
- unsigned MemObjMask = MemObjSize - 1;
- assert((MemObjSize & MemObjMask) == 0 && "Invalid memory object size.");
-
- if (Value & MemObjMask) // Unaligned offset
+ unsigned Mask = CD8_Scale - 1;
+ assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.");
+ if (Value & Mask) // Unaligned offset
return false;
- Value /= MemObjSize;
+ Value /= (int)CD8_Scale;
bool Ret = (Value == (signed char)Value);
if (Ret)
@@ -212,6 +214,12 @@ static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
unsigned Size = X86II::getSizeOfImm(TSFlags);
bool isPCRel = X86II::isImmPCRel(TSFlags);
+ if (X86II::isImmSigned(TSFlags)) {
+ switch (Size) {
+ default: llvm_unreachable("Unsupported signed fixup size!");
+ case 4: return MCFixupKind(X86::reloc_signed_4byte);
+ }
+ }
return MCFixup::getKindForSize(Size, isPCRel);
}
@@ -245,20 +253,6 @@ static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
}
#endif
-/// Is16BitMemOperand - Return true if the specified instruction has
-/// a 16-bit memory operand. Op specifies the operand # of the memoperand.
-static bool Is16BitMemOperand(const MCInst &MI, unsigned Op) {
- const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
- const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
-
- if ((BaseReg.getReg() != 0 &&
- X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
- (IndexReg.getReg() != 0 &&
- X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
- return true;
- return false;
-}
-
/// StartsWithGlobalOffsetTable - Check if this expression starts with
/// _GLOBAL_OFFSET_TABLE_ and if it is of the form
/// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
@@ -272,7 +266,7 @@ enum GlobalOffsetTableExprKind {
};
static GlobalOffsetTableExprKind
StartsWithGlobalOffsetTable(const MCExpr *Expr) {
- const MCExpr *RHS = 0;
+ const MCExpr *RHS = nullptr;
if (Expr->getKind() == MCExpr::Binary) {
const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
Expr = BE->getLHS();
@@ -303,7 +297,7 @@ void X86MCCodeEmitter::
EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
- const MCExpr *Expr = NULL;
+ const MCExpr *Expr = nullptr;
if (DispOp.isImm()) {
// If this is a simple integer displacement that doesn't require a
// relocation, emit it now.
@@ -326,7 +320,13 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
if (Kind != GOT_None) {
assert(ImmOffset == 0);
- FixupKind = MCFixupKind(X86::reloc_global_offset_table);
+ if (Size == 8) {
+ FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
+ } else {
+ assert(Size == 4);
+ FixupKind = MCFixupKind(X86::reloc_global_offset_table);
+ }
+
if (Kind == GOT_Normal)
ImmOffset = CurByte;
} else if (Expr->getKind() == MCExpr::SymbolRef) {
@@ -366,17 +366,20 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
uint64_t TSFlags, unsigned &CurByte,
raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const{
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const{
const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
unsigned BaseReg = Base.getReg();
- bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
+ unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
+ X86II::EncodingShift;
+ bool HasEVEX = (Encoding == X86II::EVEX);
// Handle %rip relative addressing.
if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
- assert(is64BitMode() && "Rip-relative addressing requires 64-bit mode");
+ assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
@@ -402,6 +405,66 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
+ // 16-bit addressing forms of the ModR/M byte have a different encoding for
+ // the R/M field and are far more limited in which registers can be used.
+ if (Is16BitMemOperand(MI, Op, STI)) {
+ if (BaseReg) {
+ // For 32-bit addressing, the row and column values in Table 2-2 are
+ // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
+ // some special cases. And GetX86RegNum reflects that numbering.
+ // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
+ // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
+ // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
+ // while values 0-3 indicate the allowed combinations (base+index) of
+ // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
+ //
+ // R16Table[] is a lookup from the normal RegNo, to the row values from
+ // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
+ static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
+ unsigned RMfield = R16Table[BaseRegNo];
+
+ assert(RMfield && "invalid 16-bit base register");
+
+ if (IndexReg.getReg()) {
+ unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
+
+ assert(IndexReg16 && "invalid 16-bit index register");
+ // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
+ assert(((IndexReg16 ^ RMfield) & 2) &&
+ "invalid 16-bit base/index register combination");
+ assert(Scale.getImm() == 1 &&
+ "invalid scale for 16-bit memory reference");
+
+ // Allow base/index to appear in either order (although GAS doesn't).
+ if (IndexReg16 & 2)
+ RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
+ else
+ RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
+ }
+
+ if (Disp.isImm() && isDisp8(Disp.getImm())) {
+ if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
+ // There is no displacement; just the register.
+ EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
+ return;
+ }
+ // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
+ EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
+ EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
+ return;
+ }
+ // This is the [REG]+disp16 case.
+ EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
+ } else {
+ // There is no BaseReg; this is the plain [disp16] case.
+ EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
+ }
+
+ // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
+ EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
+ return;
+ }
+
// Determine whether a SIB byte is needed.
// If no BaseReg, issue a RIP relative instruction only if the MCE can
// resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
@@ -415,7 +478,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
BaseRegNo != N86::ESP &&
// If there is no base register and we're in 64-bit mode, we need a SIB
// byte to emit an addr that is just 'disp32' (the non-RIP relative form).
- (!is64BitMode() || BaseReg != 0)) {
+ (!is64BitMode(STI) || BaseReg != 0)) {
if (BaseReg == 0) { // [disp32] in X86-32 mode
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
@@ -530,11 +593,13 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
- bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
- bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
+ unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
+ X86II::EncodingShift;
+ bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
+ bool HasEVEX_RC = (TSFlags >> X86II::VEXShift) & X86II::EVEX_RC;
// VEX_R: opcode externsion equivalent to REX.R in
// 1's complement (inverted) form
@@ -563,9 +628,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// opcode extension, or ignored, depending on the opcode byte)
unsigned char VEX_W = 0;
- // XOP: Use XOP prefix byte 0x8f instead of VEX.
- bool XOP = false;
-
// VEX_5M (VEX m-mmmmm field):
//
// 0b00000: Reserved for future use
@@ -576,7 +638,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b01000: XOP map select - 08h instructions with imm byte
// 0b01001: XOP map select - 09h instructions with no imm byte
// 0b01010: XOP map select - 0Ah instructions with imm dword
- unsigned char VEX_5M = 0x1;
+ unsigned char VEX_5M = 0;
// VEX_4V (VEX vvvv field): a register specifier
// (in 1's complement form) or 1111 if unused.
@@ -610,91 +672,53 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// EVEX_b
unsigned char EVEX_b = 0;
+ // EVEX_rc
+ unsigned char EVEX_rc = 0;
+
// EVEX_aaa
unsigned char EVEX_aaa = 0;
- // Encode the operand size opcode prefix as needed.
- if (TSFlags & X86II::OpSize)
- VEX_PP = 0x01;
+ bool EncodeRC = false;
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
VEX_W = 1;
- if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
- XOP = true;
-
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
VEX_L = 1;
- if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
+ if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
EVEX_L2 = 1;
if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z))
EVEX_z = 1;
- if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
+ if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
EVEX_b = 1;
- switch (TSFlags & X86II::Op0Mask) {
- default: llvm_unreachable("Invalid prefix!");
- case X86II::T8: // 0F 38
- VEX_5M = 0x2;
- break;
- case X86II::TA: // 0F 3A
- VEX_5M = 0x3;
- break;
- case X86II::T8XS: // F3 0F 38
- VEX_PP = 0x2;
- VEX_5M = 0x2;
- break;
- case X86II::T8XD: // F2 0F 38
- VEX_PP = 0x3;
- VEX_5M = 0x2;
- break;
- case X86II::TAXD: // F2 0F 3A
- VEX_PP = 0x3;
- VEX_5M = 0x3;
- break;
- case X86II::XS: // F3 0F
- VEX_PP = 0x2;
- break;
- case X86II::XD: // F2 0F
- VEX_PP = 0x3;
- break;
- case X86II::XOP8:
- VEX_5M = 0x8;
- break;
- case X86II::XOP9:
- VEX_5M = 0x9;
- break;
- case X86II::XOPA:
- VEX_5M = 0xA;
- break;
- case X86II::TB: // VEX_5M/VEX_PP already correct
- break;
+ switch (TSFlags & X86II::OpPrefixMask) {
+ default: break; // VEX_PP already correct
+ case X86II::PD: VEX_PP = 0x1; break; // 66
+ case X86II::XS: VEX_PP = 0x2; break; // F3
+ case X86II::XD: VEX_PP = 0x3; break; // F2
}
+ switch (TSFlags & X86II::OpMapMask) {
+ default: llvm_unreachable("Invalid prefix!");
+ case X86II::TB: VEX_5M = 0x1; break; // 0F
+ case X86II::T8: VEX_5M = 0x2; break; // 0F 38
+ case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
+ case X86II::XOP8: VEX_5M = 0x8; break;
+ case X86II::XOP9: VEX_5M = 0x9; break;
+ case X86II::XOPA: VEX_5M = 0xA; break;
+ }
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc.getNumOperands();
- unsigned CurOp = 0;
- if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
- ++CurOp;
- else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
- Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1)
- // Special case for AVX-512 GATHER with 2 TIED_TO operands
- // Skip the first 2 operands: dst, mask_wb
- CurOp += 2;
- else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
- Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1)
- // Special case for GATHER with 2 TIED_TO operands
- // Skip the first 2 operands: dst, mask_wb
- CurOp += 2;
- else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0)
- // SCATTER
- ++CurOp;
+ unsigned CurOp = X86II::getOperandBias(Desc);
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!");
+ default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!");
+ case X86II::RawFrm:
+ break;
case X86II::MRMDestMem: {
// MRMDestMem instructions forms:
// MemAddr, src1(ModR/M)
@@ -707,7 +731,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
X86::AddrIndexReg).getReg()))
VEX_X = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
+ if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
X86::AddrIndexReg).getReg()))
EVEX_V2 = 0x0;
@@ -718,7 +742,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_V2 = 0x0;
CurOp++;
}
@@ -727,7 +751,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (MO.isReg()) {
if (X86II::isX86_64ExtendedReg(MO.getReg()))
VEX_R = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MO.getReg()))
+ if (X86II::is32ExtendedReg(MO.getReg()))
EVEX_R2 = 0x0;
}
break;
@@ -744,7 +768,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_R2 = 0x0;
CurOp++;
@@ -753,7 +777,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_V2 = 0x0;
CurOp++;
}
@@ -764,8 +788,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (X86II::isX86_64ExtendedReg(
MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
VEX_X = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
+ X86::AddrIndexReg).getReg()))
EVEX_V2 = 0x0;
if (HasVEX_4VOp3)
@@ -785,7 +809,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// src1(VEX_4V), MemAddr
if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_V2 = 0x0;
CurOp++;
}
@@ -812,7 +836,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_R2 = 0x0;
CurOp++;
@@ -821,7 +845,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_V2 = 0x0;
CurOp++;
}
@@ -831,11 +855,19 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_X = 0x0;
CurOp++;
if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ if (EVEX_b) {
+ if (HasEVEX_RC) {
+ unsigned RcOperand = NumOps-1;
+ assert(RcOperand >= CurOp);
+ EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
+ }
+ EncodeRC = true;
+ }
break;
case X86II::MRMDestReg:
// MRMDestReg instructions forms:
@@ -844,7 +876,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// dst(ModR/M), src1(VEX_4V), src2(ModR/M)
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_X = 0x0;
CurOp++;
@@ -853,15 +885,17 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_V2 = 0x0;
CurOp++;
}
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_R = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_R2 = 0x0;
+ if (EVEX_b)
+ EncodeRC = true;
break;
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
@@ -871,26 +905,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// dst(VEX_4V), src(ModR/M), imm8
if (HasVEX_4V) {
VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
EVEX_V2 = 0x0;
CurOp++;
- }
+ }
if (HasEVEX_K)
EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
- if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
+ if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_X = 0x0;
break;
- default: // RawFrm
- break;
}
- // Emit segment override opcode prefix as needed.
- EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
-
- if (!HasEVEX) {
+ if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
// VEX opcode prefix can have 2 or 3 bytes
//
// 3 bytes:
@@ -902,19 +931,25 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// | C5h | | R | vvvv | L | pp |
// +-----+ +-------------------+
//
+ // XOP uses a similar prefix:
+ // +-----+ +--------------+ +-------------------+
+ // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
+ // +-----+ +--------------+ +-------------------+
unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
- if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
+ // Can we use the 2 byte VEX prefix?
+ if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
EmitByte(0xC5, CurByte, OS);
EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
return;
}
// 3 byte VEX prefix
- EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS);
+ EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS);
EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
} else {
+ assert(Encoding == X86II::EVEX && "unknown encoding!");
// EVEX opcode prefix can have 4 bytes
//
// +-----+ +--------------+ +-------------------+ +------------------------+
@@ -935,12 +970,19 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
(VEX_4V << 3) |
(EVEX_U << 2) |
VEX_PP, CurByte, OS);
- EmitByte((EVEX_z << 7) |
- (EVEX_L2 << 6) |
- (VEX_L << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ if (EncodeRC)
+ EmitByte((EVEX_z << 7) |
+ (EVEX_rc << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
+ else
+ EmitByte((EVEX_z << 7) |
+ (EVEX_L2 << 6) |
+ (VEX_L << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
}
}
@@ -974,7 +1016,6 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!");
case X86II::MRMSrcReg:
if (MI.getOperand(0).isReg() &&
X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
@@ -1002,6 +1043,7 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
break;
}
+ case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -1039,33 +1081,20 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
-void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
- unsigned &CurByte, int MemOperand,
- const MCInst &MI,
- raw_ostream &OS) const {
- switch (TSFlags & X86II::SegOvrMask) {
- default: llvm_unreachable("Invalid segment!");
- case 0:
- // No segment override, check for explicit one on memory operand.
- if (MemOperand != -1) { // If the instruction has a memory operand.
- switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
- default: llvm_unreachable("Unknown segment register!");
- case 0: break;
- case X86::CS: EmitByte(0x2E, CurByte, OS); break;
- case X86::SS: EmitByte(0x36, CurByte, OS); break;
- case X86::DS: EmitByte(0x3E, CurByte, OS); break;
- case X86::ES: EmitByte(0x26, CurByte, OS); break;
- case X86::FS: EmitByte(0x64, CurByte, OS); break;
- case X86::GS: EmitByte(0x65, CurByte, OS); break;
- }
- }
- break;
- case X86II::FS:
- EmitByte(0x64, CurByte, OS);
- break;
- case X86II::GS:
- EmitByte(0x65, CurByte, OS);
- break;
+void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
+ unsigned SegOperand,
+ const MCInst &MI,
+ raw_ostream &OS) const {
+ // Check for explicit segment override on memory operand.
+ switch (MI.getOperand(SegOperand).getReg()) {
+ default: llvm_unreachable("Unknown segment register!");
+ case 0: break;
+ case X86::CS: EmitByte(0x2E, CurByte, OS); break;
+ case X86::SS: EmitByte(0x36, CurByte, OS); break;
+ case X86::DS: EmitByte(0x3E, CurByte, OS); break;
+ case X86::ES: EmitByte(0x26, CurByte, OS); break;
+ case X86::FS: EmitByte(0x64, CurByte, OS); break;
+ case X86::GS: EmitByte(0x65, CurByte, OS); break;
}
}
@@ -1076,118 +1105,56 @@ void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
+ const MCSubtargetInfo &STI,
raw_ostream &OS) const {
- // Emit the lock opcode prefix as needed.
- if (TSFlags & X86II::LOCK)
- EmitByte(0xF0, CurByte, OS);
-
- // Emit segment override opcode prefix as needed.
- EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
-
- // Emit the repeat opcode prefix as needed.
- if ((TSFlags & X86II::Op0Mask) == X86II::REP)
- EmitByte(0xF3, CurByte, OS);
-
- // Emit the address size opcode prefix as needed.
- bool need_address_override;
- if (TSFlags & X86II::AdSize) {
- need_address_override = true;
- } else if (MemOperand == -1) {
- need_address_override = false;
- } else if (is64BitMode()) {
- assert(!Is16BitMemOperand(MI, MemOperand));
- need_address_override = Is32BitMemOperand(MI, MemOperand);
- } else if (is32BitMode()) {
- assert(!Is64BitMemOperand(MI, MemOperand));
- need_address_override = Is16BitMemOperand(MI, MemOperand);
- } else {
- need_address_override = false;
- }
-
- if (need_address_override)
- EmitByte(0x67, CurByte, OS);
-
// Emit the operand size opcode prefix as needed.
- if (TSFlags & X86II::OpSize)
+ unsigned char OpSize = (TSFlags & X86II::OpSizeMask) >> X86II::OpSizeShift;
+ if (OpSize == (is16BitMode(STI) ? X86II::OpSize32 : X86II::OpSize16))
EmitByte(0x66, CurByte, OS);
- bool Need0FPrefix = false;
- switch (TSFlags & X86II::Op0Mask) {
- default: llvm_unreachable("Invalid prefix!");
- case 0: break; // No prefix!
- case X86II::REP: break; // already handled.
- case X86II::TB: // Two-byte opcode prefix
- case X86II::T8: // 0F 38
- case X86II::TA: // 0F 3A
- case X86II::A6: // 0F A6
- case X86II::A7: // 0F A7
- Need0FPrefix = true;
- break;
- case X86II::T8XS: // F3 0F 38
- EmitByte(0xF3, CurByte, OS);
- Need0FPrefix = true;
- break;
- case X86II::T8XD: // F2 0F 38
- EmitByte(0xF2, CurByte, OS);
- Need0FPrefix = true;
- break;
- case X86II::TAXD: // F2 0F 3A
- EmitByte(0xF2, CurByte, OS);
- Need0FPrefix = true;
+ switch (TSFlags & X86II::OpPrefixMask) {
+ case X86II::PD: // 66
+ EmitByte(0x66, CurByte, OS);
break;
- case X86II::XS: // F3 0F
+ case X86II::XS: // F3
EmitByte(0xF3, CurByte, OS);
- Need0FPrefix = true;
break;
- case X86II::XD: // F2 0F
+ case X86II::XD: // F2
EmitByte(0xF2, CurByte, OS);
- Need0FPrefix = true;
break;
- case X86II::D8: EmitByte(0xD8, CurByte, OS); break;
- case X86II::D9: EmitByte(0xD9, CurByte, OS); break;
- case X86II::DA: EmitByte(0xDA, CurByte, OS); break;
- case X86II::DB: EmitByte(0xDB, CurByte, OS); break;
- case X86II::DC: EmitByte(0xDC, CurByte, OS); break;
- case X86II::DD: EmitByte(0xDD, CurByte, OS); break;
- case X86II::DE: EmitByte(0xDE, CurByte, OS); break;
- case X86II::DF: EmitByte(0xDF, CurByte, OS); break;
}
// Handle REX prefix.
// FIXME: Can this come before F2 etc to simplify emission?
- if (is64BitMode()) {
+ if (is64BitMode(STI)) {
if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
EmitByte(0x40 | REX, CurByte, OS);
}
// 0x0F escape code must be emitted just before the opcode.
- if (Need0FPrefix)
+ switch (TSFlags & X86II::OpMapMask) {
+ case X86II::TB: // Two-byte opcode map
+ case X86II::T8: // 0F 38
+ case X86II::TA: // 0F 3A
EmitByte(0x0F, CurByte, OS);
+ break;
+ }
- // FIXME: Pull this up into previous switch if REX can be moved earlier.
- switch (TSFlags & X86II::Op0Mask) {
- case X86II::T8XS: // F3 0F 38
- case X86II::T8XD: // F2 0F 38
+ switch (TSFlags & X86II::OpMapMask) {
case X86II::T8: // 0F 38
EmitByte(0x38, CurByte, OS);
break;
- case X86II::TAXD: // F2 0F 3A
case X86II::TA: // 0F 3A
EmitByte(0x3A, CurByte, OS);
break;
- case X86II::A6: // 0F A6
- EmitByte(0xA6, CurByte, OS);
- break;
- case X86II::A7: // 0F A7
- EmitByte(0xA7, CurByte, OS);
- break;
}
}
void X86MCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
unsigned Opcode = MI.getOpcode();
const MCInstrDesc &Desc = MCII.get(Opcode);
uint64_t TSFlags = Desc.TSFlags;
@@ -1202,8 +1169,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
// Keep track of the current byte being emitted.
unsigned CurByte = 0;
- // Is this instruction encoded using the AVX VEX prefix?
- bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX;
+ // Encoding type for this instruction.
+ unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
+ X86II::EncodingShift;
// It uses the VEX.VVVV field?
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
@@ -1212,15 +1180,58 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
const unsigned MemOp4_I8IMMOperand = 2;
// It uses the EVEX.aaa field?
- bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
- bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
-
+ bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
+ bool HasEVEX_RC = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_RC);
+
// Determine where the memory operand starts, if present.
int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
if (MemoryOperand != -1) MemoryOperand += CurOp;
- if (!HasVEXPrefix)
- EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+ // Emit the lock opcode prefix as needed.
+ if (TSFlags & X86II::LOCK)
+ EmitByte(0xF0, CurByte, OS);
+
+ // Emit segment override opcode prefix as needed.
+ if (MemoryOperand >= 0)
+ EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
+ MI, OS);
+
+ // Emit the repeat opcode prefix as needed.
+ if (TSFlags & X86II::REP)
+ EmitByte(0xF3, CurByte, OS);
+
+ // Emit the address size opcode prefix as needed.
+ bool need_address_override;
+ // The AdSize prefix is only for 32-bit and 64-bit modes. Hm, perhaps we
+ // should introduce an AdSize16 bit instead of having seven special cases?
+ if ((!is16BitMode(STI) && TSFlags & X86II::AdSize) ||
+ (is16BitMode(STI) && (MI.getOpcode() == X86::JECXZ_32 ||
+ MI.getOpcode() == X86::MOV8o8a ||
+ MI.getOpcode() == X86::MOV16o16a ||
+ MI.getOpcode() == X86::MOV32o32a ||
+ MI.getOpcode() == X86::MOV8ao8 ||
+ MI.getOpcode() == X86::MOV16ao16 ||
+ MI.getOpcode() == X86::MOV32ao32))) {
+ need_address_override = true;
+ } else if (MemoryOperand < 0) {
+ need_address_override = false;
+ } else if (is64BitMode(STI)) {
+ assert(!Is16BitMemOperand(MI, MemoryOperand, STI));
+ need_address_override = Is32BitMemOperand(MI, MemoryOperand);
+ } else if (is32BitMode(STI)) {
+ assert(!Is64BitMemOperand(MI, MemoryOperand));
+ need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
+ } else {
+ assert(is16BitMode(STI));
+ assert(!Is64BitMemOperand(MI, MemoryOperand));
+ need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
+ }
+
+ if (need_address_override)
+ EmitByte(0x67, CurByte, OS);
+
+ if (Encoding == 0)
+ EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
else
EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
@@ -1231,15 +1242,62 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
unsigned SrcRegNum = 0;
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg:
- llvm_unreachable("FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
case X86II::Pseudo:
llvm_unreachable("Pseudo instruction shouldn't be emitted");
+ case X86II::RawFrmDstSrc: {
+ unsigned siReg = MI.getOperand(1).getReg();
+ assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
+ (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
+ (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
+ "SI and DI register sizes do not match");
+ // Emit segment override opcode prefix as needed (not for %ds).
+ if (MI.getOperand(2).getReg() != X86::DS)
+ EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
+ // Emit AdSize prefix as needed.
+ if ((!is32BitMode(STI) && siReg == X86::ESI) ||
+ (is32BitMode(STI) && siReg == X86::SI))
+ EmitByte(0x67, CurByte, OS);
+ CurOp += 3; // Consume operands.
+ EmitByte(BaseOpcode, CurByte, OS);
+ break;
+ }
+ case X86II::RawFrmSrc: {
+ unsigned siReg = MI.getOperand(0).getReg();
+ // Emit segment override opcode prefix as needed (not for %ds).
+ if (MI.getOperand(1).getReg() != X86::DS)
+ EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
+ // Emit AdSize prefix as needed.
+ if ((!is32BitMode(STI) && siReg == X86::ESI) ||
+ (is32BitMode(STI) && siReg == X86::SI))
+ EmitByte(0x67, CurByte, OS);
+ CurOp += 2; // Consume operands.
+ EmitByte(BaseOpcode, CurByte, OS);
+ break;
+ }
+ case X86II::RawFrmDst: {
+ unsigned siReg = MI.getOperand(0).getReg();
+ // Emit AdSize prefix as needed.
+ if ((!is32BitMode(STI) && siReg == X86::EDI) ||
+ (is32BitMode(STI) && siReg == X86::DI))
+ EmitByte(0x67, CurByte, OS);
+ ++CurOp; // Consume operand.
+ EmitByte(BaseOpcode, CurByte, OS);
+ break;
+ }
case X86II::RawFrm:
EmitByte(BaseOpcode, CurByte, OS);
break;
+ case X86II::RawFrmMemOffs:
+ // Emit segment override opcode prefix as needed.
+ EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
+ EmitByte(BaseOpcode, CurByte, OS);
+ EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
+ X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
+ CurByte, OS, Fixups);
+ ++CurOp; // skip segment operand
+ break;
case X86II::RawFrmImm8:
EmitByte(BaseOpcode, CurByte, OS);
EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
@@ -1288,7 +1346,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitMemModRMByte(MI, CurOp,
GetX86RegNum(MI.getOperand(SrcRegNum)),
- TSFlags, CurByte, OS, Fixups);
+ TSFlags, CurByte, OS, Fixups, STI);
CurOp = SrcRegNum + 1;
break;
@@ -1312,6 +1370,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
if (HasVEX_4VOp3)
++CurOp;
+ // do not count the rounding control operand
+ if (HasEVEX_RC)
+ NumOps--;
break;
case X86II::MRMSrcMem: {
@@ -1333,49 +1394,69 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
- TSFlags, CurByte, OS, Fixups);
+ TSFlags, CurByte, OS, Fixups, STI);
CurOp += AddrOperands + 1;
if (HasVEX_4VOp3)
++CurOp;
break;
}
+ case X86II::MRMXr:
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
- case X86II::MRM6r: case X86II::MRM7r:
+ case X86II::MRM6r: case X86II::MRM7r: {
if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
++CurOp;
+ if (HasEVEX_K) // Skip writemask
+ ++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
+ uint64_t Form = TSFlags & X86II::FormMask;
EmitRegModRMByte(MI.getOperand(CurOp++),
- (TSFlags & X86II::FormMask)-X86II::MRM0r,
+ (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
CurByte, OS);
break;
+ }
+
+ case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
- case X86II::MRM6m: case X86II::MRM7m:
+ case X86II::MRM6m: case X86II::MRM7m: {
if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
++CurOp;
+ if (HasEVEX_K) // Skip writemask
+ ++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
- TSFlags, CurByte, OS, Fixups);
+ uint64_t Form = TSFlags & X86II::FormMask;
+ EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m,
+ TSFlags, CurByte, OS, Fixups, STI);
CurOp += X86::AddrNumOperands;
break;
- case X86II::MRM_C1: case X86II::MRM_C2: case X86II::MRM_C3:
- case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9:
- case X86II::MRM_CA: case X86II::MRM_CB: case X86II::MRM_D0:
- case X86II::MRM_D1: case X86II::MRM_D4: case X86II::MRM_D5:
- case X86II::MRM_D6: case X86II::MRM_D8: case X86II::MRM_D9:
- case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC:
- case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF:
- case X86II::MRM_E8: case X86II::MRM_F0: case X86II::MRM_F8:
- case X86II::MRM_F9:
+ }
+ case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
+ case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
+ case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
+ case X86II::MRM_D0: case X86II::MRM_D1: case X86II::MRM_D4:
+ case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D8:
+ case X86II::MRM_D9: case X86II::MRM_DA: case X86II::MRM_DB:
+ case X86II::MRM_DC: case X86II::MRM_DD: case X86II::MRM_DE:
+ case X86II::MRM_DF: case X86II::MRM_E0: case X86II::MRM_E1:
+ case X86II::MRM_E2: case X86II::MRM_E3: case X86II::MRM_E4:
+ case X86II::MRM_E5: case X86II::MRM_E8: case X86II::MRM_E9:
+ case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
+ case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_F0:
+ case X86II::MRM_F1: case X86II::MRM_F2: case X86II::MRM_F3:
+ case X86II::MRM_F4: case X86II::MRM_F5: case X86II::MRM_F6:
+ case X86II::MRM_F7: case X86II::MRM_F8: case X86II::MRM_F9:
+ case X86II::MRM_FA: case X86II::MRM_FB: case X86II::MRM_FC:
+ case X86II::MRM_FD: case X86II::MRM_FE: case X86II::MRM_FF:
EmitByte(BaseOpcode, CurByte, OS);
unsigned char MRM;
switch (TSFlags & X86II::FormMask) {
default: llvm_unreachable("Invalid Form");
+ case X86II::MRM_C0: MRM = 0xC0; break;
case X86II::MRM_C1: MRM = 0xC1; break;
case X86II::MRM_C2: MRM = 0xC2; break;
case X86II::MRM_C3: MRM = 0xC3; break;
@@ -1397,10 +1478,35 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM_DD: MRM = 0xDD; break;
case X86II::MRM_DE: MRM = 0xDE; break;
case X86II::MRM_DF: MRM = 0xDF; break;
+ case X86II::MRM_E0: MRM = 0xE0; break;
+ case X86II::MRM_E1: MRM = 0xE1; break;
+ case X86II::MRM_E2: MRM = 0xE2; break;
+ case X86II::MRM_E3: MRM = 0xE3; break;
+ case X86II::MRM_E4: MRM = 0xE4; break;
+ case X86II::MRM_E5: MRM = 0xE5; break;
case X86II::MRM_E8: MRM = 0xE8; break;
+ case X86II::MRM_E9: MRM = 0xE9; break;
+ case X86II::MRM_EA: MRM = 0xEA; break;
+ case X86II::MRM_EB: MRM = 0xEB; break;
+ case X86II::MRM_EC: MRM = 0xEC; break;
+ case X86II::MRM_ED: MRM = 0xED; break;
+ case X86II::MRM_EE: MRM = 0xEE; break;
case X86II::MRM_F0: MRM = 0xF0; break;
+ case X86II::MRM_F1: MRM = 0xF1; break;
+ case X86II::MRM_F2: MRM = 0xF2; break;
+ case X86II::MRM_F3: MRM = 0xF3; break;
+ case X86II::MRM_F4: MRM = 0xF4; break;
+ case X86II::MRM_F5: MRM = 0xF5; break;
+ case X86II::MRM_F6: MRM = 0xF6; break;
+ case X86II::MRM_F7: MRM = 0xF7; break;
case X86II::MRM_F8: MRM = 0xF8; break;
case X86II::MRM_F9: MRM = 0xF9; break;
+ case X86II::MRM_FA: MRM = 0xFA; break;
+ case X86II::MRM_FB: MRM = 0xFB; break;
+ case X86II::MRM_FC: MRM = 0xFC; break;
+ case X86II::MRM_FD: MRM = 0xFD; break;
+ case X86II::MRM_FE: MRM = 0xFE; break;
+ case X86II::MRM_FF: MRM = 0xFF; break;
}
EmitByte(MRM, CurByte, OS);
break;
@@ -1432,17 +1538,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1,
CurByte, OS, Fixups);
} else {
- unsigned FixupKind;
- // FIXME: Is there a better way to know that we need a signed relocation?
- if (MI.getOpcode() == X86::ADD64ri32 ||
- MI.getOpcode() == X86::MOV64ri32 ||
- MI.getOpcode() == X86::MOV64mi32 ||
- MI.getOpcode() == X86::PUSH64i32)
- FixupKind = X86::reloc_signed_4byte;
- else
- FixupKind = getImmFixupKind(TSFlags);
EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
- X86II::getSizeOfImm(TSFlags), MCFixupKind(FixupKind),
+ X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
CurByte, OS, Fixups);
}
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 1cbdafd..3bfad6c 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -27,6 +27,12 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
+#if _MSC_VER
+#include <intrin.h>
+#endif
+
+using namespace llvm;
+
#define GET_REGINFO_MC_DESC
#include "X86GenRegisterInfo.inc"
@@ -36,20 +42,16 @@
#define GET_SUBTARGETINFO_MC_DESC
#include "X86GenSubtargetInfo.inc"
-#if _MSC_VER
-#include <intrin.h>
-#endif
-
-using namespace llvm;
-
-
std::string X86_MC::ParseX86Triple(StringRef TT) {
Triple TheTriple(TT);
std::string FS;
if (TheTriple.getArch() == Triple::x86_64)
- FS = "+64bit-mode";
+ FS = "+64bit-mode,-32bit-mode,-16bit-mode";
+ else if (TheTriple.getEnvironment() != Triple::CODE16)
+ FS = "-64bit-mode,+32bit-mode,-16bit-mode";
else
- FS = "-64bit-mode";
+ FS = "-64bit-mode,-32bit-mode,+16bit-mode";
+
return FS;
}
@@ -195,15 +197,13 @@ void X86_MC::DetectFamilyModel(unsigned EAX, unsigned &Family,
}
}
-unsigned X86_MC::getDwarfRegFlavour(StringRef TT, bool isEH) {
- Triple TheTriple(TT);
- if (TheTriple.getArch() == Triple::x86_64)
+unsigned X86_MC::getDwarfRegFlavour(Triple TT, bool isEH) {
+ if (TT.getArch() == Triple::x86_64)
return DWARFFlavour::X86_64;
- if (TheTriple.isOSDarwin())
+ if (TT.isOSDarwin())
return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic;
- if (TheTriple.getOS() == Triple::MinGW32 ||
- TheTriple.getOS() == Triple::Cygwin)
+ if (TT.isOSCygMing())
// Unsupported by now, just quick fallback
return DWARFFlavour::X86_32_Generic;
return DWARFFlavour::X86_32_Generic;
@@ -228,14 +228,8 @@ MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(StringRef TT, StringRef CPU,
}
std::string CPUName = CPU;
- if (CPUName.empty()) {
-#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\
- || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
- CPUName = sys::getHostCPUName();
-#else
+ if (CPUName.empty())
CPUName = "generic";
-#endif
- }
MCSubtargetInfo *X = new MCSubtargetInfo();
InitX86MCSubtargetInfo(X, TT, CPUName, ArchFS);
@@ -256,8 +250,8 @@ static MCRegisterInfo *createX86MCRegisterInfo(StringRef TT) {
MCRegisterInfo *X = new MCRegisterInfo();
InitX86MCRegisterInfo(X, RA,
- X86_MC::getDwarfRegFlavour(TT, false),
- X86_MC::getDwarfRegFlavour(TT, true),
+ X86_MC::getDwarfRegFlavour(TheTriple, false),
+ X86_MC::getDwarfRegFlavour(TheTriple, true),
RA);
X86_MC::InitLLVM2SEHRegisterMapping(X);
return X;
@@ -268,17 +262,18 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
bool is64Bit = TheTriple.getArch() == Triple::x86_64;
MCAsmInfo *MAI;
- if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) {
+ if (TheTriple.isOSBinFormatMachO()) {
if (is64Bit)
MAI = new X86_64MCAsmInfoDarwin(TheTriple);
else
MAI = new X86MCAsmInfoDarwin(TheTriple);
- } else if (TheTriple.getEnvironment() == Triple::ELF) {
+ } else if (TheTriple.isOSBinFormatELF()) {
// Force the use of an ELF container.
MAI = new X86ELFMCAsmInfo(TheTriple);
- } else if (TheTriple.getOS() == Triple::Win32) {
+ } else if (TheTriple.isWindowsMSVCEnvironment()) {
MAI = new X86MCAsmInfoMicrosoft(TheTriple);
- } else if (TheTriple.getOS() == Triple::MinGW32 || TheTriple.getOS() == Triple::Cygwin) {
+ } else if (TheTriple.isOSCygMing() ||
+ TheTriple.isWindowsItaniumEnvironment()) {
MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
} else {
// The default is ELF.
@@ -292,13 +287,13 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
// Initial state of the frame pointer is esp+stackGrowth.
unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(
- 0, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
+ nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
MAI->addInitialFrameState(Inst);
// Add return address to move list
unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
MCCFIInstruction Inst2 = MCCFIInstruction::createOffset(
- 0, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
+ nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
MAI->addInitialFrameState(Inst2);
return MAI;
@@ -358,17 +353,21 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCContext &Ctx, MCAsmBackend &MAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll,
bool NoExecStack) {
Triple TheTriple(TT);
- if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
+ switch (TheTriple.getObjectFormat()) {
+ default: llvm_unreachable("unsupported object format");
+ case Triple::MachO:
return createMachOStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll);
-
- if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
- return createWinCOFFStreamer(Ctx, MAB, *_Emitter, _OS, RelaxAll);
-
- return createELFStreamer(Ctx, 0, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
+ case Triple::COFF:
+ assert(TheTriple.isOSWindows() && "only Windows COFF is supported");
+ return createX86WinCOFFStreamer(Ctx, MAB, _Emitter, _OS, RelaxAll);
+ case Triple::ELF:
+ return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
+ }
}
static MCInstPrinter *createX86MCInstPrinter(const Target &T,
@@ -381,13 +380,13 @@ static MCInstPrinter *createX86MCInstPrinter(const Target &T,
return new X86ATTInstPrinter(MAI, MII, MRI);
if (SyntaxVariant == 1)
return new X86IntelInstPrinter(MAI, MII, MRI);
- return 0;
+ return nullptr;
}
static MCRelocationInfo *createX86MCRelocationInfo(StringRef TT,
MCContext &Ctx) {
Triple TheTriple(TT);
- if (TheTriple.isEnvironmentMachO() && TheTriple.getArch() == Triple::x86_64)
+ if (TheTriple.isOSBinFormatMachO() && TheTriple.getArch() == Triple::x86_64)
return createX86_64MachORelocationInfo(Ctx);
else if (TheTriple.isOSBinFormatELF())
return createX86_64ELFRelocationInfo(Ctx);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 41ae435..ebe74cf 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -26,7 +26,9 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCRelocationInfo;
+class MCStreamer;
class Target;
+class Triple;
class StringRef;
class raw_ostream;
@@ -63,7 +65,7 @@ namespace X86_MC {
void DetectFamilyModel(unsigned EAX, unsigned &Family, unsigned &Model);
- unsigned getDwarfRegFlavour(StringRef TT, bool isEH);
+ unsigned getDwarfRegFlavour(Triple TT, bool isEH);
void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
@@ -84,6 +86,14 @@ MCAsmBackend *createX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
MCAsmBackend *createX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
StringRef TT, StringRef CPU);
+/// createX86WinCOFFStreamer - Construct an X86 Windows COFF machine code
+/// streamer which will generate PE/COFF format object files.
+///
+/// Takes ownership of \p AB and \p CE.
+MCStreamer *createX86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB,
+ MCCodeEmitter *CE, raw_ostream &OS,
+ bool RelaxAll);
+
/// createX86MachObjectWriter - Construct an X86 Mach-O object writer.
MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS,
bool Is64Bit,
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
index 209b1d0e..3b81d53 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
@@ -11,8 +11,8 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Object/MachO.h"
using namespace llvm;
@@ -24,7 +24,7 @@ class X86_64MachORelocationInfo : public MCRelocationInfo {
public:
X86_64MachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
- const MCExpr *createExprForRelocation(RelocationRef Rel) {
+ const MCExpr *createExprForRelocation(RelocationRef Rel) override {
const MachOObjectFile *Obj = cast<MachOObjectFile>(Rel.getObjectFile());
uint64_t RelType; Rel.getType(RelType);
@@ -40,7 +40,7 @@ public:
// FIXME: check that the value is actually the same.
if (Sym->isVariable() == false)
Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx));
- const MCExpr *Expr = 0;
+ const MCExpr *Expr = nullptr;
switch(RelType) {
case X86_64_RELOC_TLV:
@@ -72,9 +72,9 @@ public:
break;
case X86_64_RELOC_SUBTRACTOR:
{
- RelocationRef RelNext;
- Obj->getRelocationNext(Rel.getRawDataRefImpl(), RelNext);
- any_relocation_info RENext = Obj->getRelocation(RelNext.getRawDataRefImpl());
+ Rel.moveNext();
+ any_relocation_info RENext =
+ Obj->getRelocation(Rel.getRawDataRefImpl());
// X86_64_SUBTRACTOR must be followed by a relocation of type
// X86_64_RELOC_UNSIGNED.
@@ -86,7 +86,7 @@ public:
const MCExpr *LHS = MCSymbolRefExpr::Create(Sym, Ctx);
- symbol_iterator RSymI = RelNext.getSymbol();
+ symbol_iterator RSymI = Rel.getSymbol();
uint64_t RSymAddr;
RSymI->getAddress(RSymAddr);
StringRef RSymName;
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index eb7c0b1..ead3338 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -63,7 +63,7 @@ public:
void RecordRelocation(MachObjectWriter *Writer,
const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue) {
+ MCValue Target, uint64_t &FixedValue) override {
if (Writer->is64Bit())
RecordX86_64Relocation(Writer, Asm, Layout, Fragment, Fixup, Target,
FixedValue);
@@ -146,13 +146,13 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
const MCSymbol *A = &Target.getSymA()->getSymbol();
if (A->isTemporary())
A = &A->AliasedSymbol();
- MCSymbolData &A_SD = Asm.getSymbolData(*A);
+ const MCSymbolData &A_SD = Asm.getSymbolData(*A);
const MCSymbolData *A_Base = Asm.getAtom(&A_SD);
const MCSymbol *B = &Target.getSymB()->getSymbol();
if (B->isTemporary())
B = &B->AliasedSymbol();
- MCSymbolData &B_SD = Asm.getSymbolData(*B);
+ const MCSymbolData &B_SD = Asm.getSymbolData(*B);
const MCSymbolData *B_Base = Asm.getAtom(&B_SD);
// Neither symbol can be modified.
@@ -186,9 +186,9 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
false);
Value += Writer->getSymbolAddress(&A_SD, Layout) -
- (A_Base == NULL ? 0 : Writer->getSymbolAddress(A_Base, Layout));
+ (!A_Base ? 0 : Writer->getSymbolAddress(A_Base, Layout));
Value -= Writer->getSymbolAddress(&B_SD, Layout) -
- (B_Base == NULL ? 0 : Writer->getSymbolAddress(B_Base, Layout));
+ (!B_Base ? 0 : Writer->getSymbolAddress(B_Base, Layout));
if (A_Base) {
Index = A_Base->getIndex();
@@ -220,7 +220,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
Type = MachO::X86_64_RELOC_SUBTRACTOR;
} else {
const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
- MCSymbolData &SD = Asm.getSymbolData(*Symbol);
+ const MCSymbolData &SD = Asm.getSymbolData(*Symbol);
const MCSymbolData *Base = Asm.getAtom(&SD);
// Relocations inside debug sections always use local relocations when
@@ -230,8 +230,8 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer,
if (Symbol->isInSection()) {
const MCSectionMachO &Section = static_cast<const MCSectionMachO&>(
Fragment->getParent()->getSection());
- if (Section.hasAttribute(MCSectionMachO::S_ATTR_DEBUG))
- Base = 0;
+ if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
+ Base = nullptr;
}
// x86_64 almost always uses external relocations, except when there is no
@@ -362,13 +362,14 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
MCValue Target,
unsigned Log2Size,
uint64_t &FixedValue) {
+ uint64_t OriginalFixedValue = FixedValue;
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
unsigned Type = MachO::GENERIC_RELOC_VANILLA;
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
- MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+ const MCSymbolData *A_SD = &Asm.getSymbolData(*A);
if (!A_SD->getFragment())
report_fatal_error("symbol '" + A->getName() +
@@ -381,7 +382,7 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
uint32_t Value2 = 0;
if (const MCSymbolRefExpr *B = Target.getSymB()) {
- MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+ const MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
if (!B_SD->getFragment())
report_fatal_error("symbol '" + B->getSymbol().getName() +
@@ -431,8 +432,10 @@ bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer,
// symbol, things can go badly.
//
// Required for 'as' compatibility.
- if (FixupOffset > 0xffffff)
+ if (FixupOffset > 0xffffff) {
+ FixedValue = OriginalFixedValue;
return false;
+ }
}
MachO::any_relocation_info MRE;
@@ -462,7 +465,7 @@ void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer,
unsigned IsPCRel = 0;
// Get the symbol data.
- MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+ const MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
unsigned Index = SD_A->getIndex();
// We're only going to have a second symbol in pic mode and it'll be a
@@ -473,7 +476,8 @@ void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer,
// If this is a subtraction then we're pcrel.
uint32_t FixupAddress =
Writer->getFragmentAddress(Fragment, Layout) + Fixup.getOffset();
- MCSymbolData *SD_B = &Asm.getSymbolData(Target.getSymB()->getSymbol());
+ const MCSymbolData *SD_B =
+ &Asm.getSymbolData(Target.getSymB()->getSymbol());
IsPCRel = 1;
FixedValue = (FixupAddress - Writer->getSymbolAddress(SD_B, Layout) +
Target.getConstant());
@@ -521,7 +525,7 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer,
}
// Get the symbol data, if any.
- MCSymbolData *SD = 0;
+ const MCSymbolData *SD = nullptr;
if (Target.getSymA())
SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index 6da4142..40af822 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -23,22 +23,18 @@ namespace llvm {
namespace {
class X86WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
- const bool Is64Bit;
-
public:
- X86WinCOFFObjectWriter(bool Is64Bit_);
+ X86WinCOFFObjectWriter(bool Is64Bit);
virtual ~X86WinCOFFObjectWriter();
- virtual unsigned getRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsCrossSection) const LLVM_OVERRIDE;
+ unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsCrossSection) const override;
};
}
-X86WinCOFFObjectWriter::X86WinCOFFObjectWriter(bool Is64Bit_)
- : MCWinCOFFObjectTargetWriter(Is64Bit_ ? COFF::IMAGE_FILE_MACHINE_AMD64 :
- COFF::IMAGE_FILE_MACHINE_I386),
- Is64Bit(Is64Bit_) {}
+X86WinCOFFObjectWriter::X86WinCOFFObjectWriter(bool Is64Bit)
+ : MCWinCOFFObjectTargetWriter(Is64Bit ? COFF::IMAGE_FILE_MACHINE_AMD64
+ : COFF::IMAGE_FILE_MACHINE_I386) {}
X86WinCOFFObjectWriter::~X86WinCOFFObjectWriter() {}
@@ -50,26 +46,46 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
- switch (FixupKind) {
- case FK_PCRel_4:
- case X86::reloc_riprel_4byte:
- case X86::reloc_riprel_4byte_movq_load:
- return Is64Bit ? COFF::IMAGE_REL_AMD64_REL32 : COFF::IMAGE_REL_I386_REL32;
- case FK_Data_4:
- case X86::reloc_signed_4byte:
- if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
- return Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32NB :
- COFF::IMAGE_REL_I386_DIR32NB;
- return Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32 : COFF::IMAGE_REL_I386_DIR32;
- case FK_Data_8:
- if (Is64Bit)
+ if (getMachine() == COFF::IMAGE_FILE_MACHINE_AMD64) {
+ switch (FixupKind) {
+ case FK_PCRel_4:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ return COFF::IMAGE_REL_AMD64_REL32;
+ case FK_Data_4:
+ case X86::reloc_signed_4byte:
+ if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
+ return COFF::IMAGE_REL_AMD64_ADDR32NB;
+ return COFF::IMAGE_REL_AMD64_ADDR32;
+ case FK_Data_8:
return COFF::IMAGE_REL_AMD64_ADDR64;
- llvm_unreachable("unsupported relocation type");
- case FK_SecRel_4:
- return Is64Bit ? COFF::IMAGE_REL_AMD64_SECREL : COFF::IMAGE_REL_I386_SECREL;
- default:
- llvm_unreachable("unsupported relocation type");
- }
+ case FK_SecRel_2:
+ return COFF::IMAGE_REL_AMD64_SECTION;
+ case FK_SecRel_4:
+ return COFF::IMAGE_REL_AMD64_SECREL;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ } else if (getMachine() == COFF::IMAGE_FILE_MACHINE_I386) {
+ switch (FixupKind) {
+ case FK_PCRel_4:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ return COFF::IMAGE_REL_I386_REL32;
+ case FK_Data_4:
+ case X86::reloc_signed_4byte:
+ if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
+ return COFF::IMAGE_REL_I386_DIR32NB;
+ return COFF::IMAGE_REL_I386_DIR32;
+ case FK_SecRel_2:
+ return COFF::IMAGE_REL_I386_SECTION;
+ case FK_SecRel_4:
+ return COFF::IMAGE_REL_I386_SECREL;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ } else
+ llvm_unreachable("Unsupported COFF machine type.");
}
MCObjectWriter *llvm::createX86WinCOFFObjectWriter(raw_ostream &OS,
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
new file mode 100644
index 0000000..6727f5e
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
@@ -0,0 +1,51 @@
+//===-- X86WinCOFFStreamer.cpp - X86 Target WinCOFF Streamer ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86MCTargetDesc.h"
+#include "llvm/MC/MCWinCOFFStreamer.h"
+
+using namespace llvm;
+
+namespace {
+class X86WinCOFFStreamer : public MCWinCOFFStreamer {
+public:
+ X86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB, MCCodeEmitter *CE,
+ raw_ostream &OS)
+ : MCWinCOFFStreamer(C, AB, *CE, OS) { }
+
+ void EmitWinEHHandlerData() override;
+ void FinishImpl() override;
+};
+
+void X86WinCOFFStreamer::EmitWinEHHandlerData() {
+ MCStreamer::EmitWinEHHandlerData();
+
+ // We have to emit the unwind info now, because this directive
+ // actually switches to the .xdata section!
+ MCWin64EHUnwindEmitter::EmitUnwindInfo(*this, getCurrentWinFrameInfo());
+}
+
+void X86WinCOFFStreamer::FinishImpl() {
+ EmitFrames(nullptr);
+ EmitWindowsUnwindTables();
+
+ MCWinCOFFStreamer::FinishImpl();
+}
+}
+
+namespace llvm {
+MCStreamer *createX86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB,
+ MCCodeEmitter *CE, raw_ostream &OS,
+ bool RelaxAll) {
+ X86WinCOFFStreamer *S = new X86WinCOFFStreamer(C, AB, CE, OS);
+ S->getAssembler().setRelaxAll(RelaxAll);
+ return S;
+}
+}
+
diff --git a/contrib/llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp b/contrib/llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
index 815d235..1ea8798 100644
--- a/contrib/llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
@@ -7,8 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "X86.h"
-#include "llvm/IR/Module.h"
+#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index bbd4904..5f2441c 100644
--- a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "X86ShuffleDecode.h"
+#include "llvm/CodeGen/MachineValueType.h"
//===----------------------------------------------------------------------===//
// Vector Mask Decoding
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
index 017ab32..9e75b6b 100644
--- a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -16,13 +16,14 @@
#define X86_SHUFFLE_DECODE_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/ValueTypes.h"
//===----------------------------------------------------------------------===//
// Vector Mask Decoding
//===----------------------------------------------------------------------===//
namespace llvm {
+class MVT;
+
enum {
SM_SentinelZero = -1
};
diff --git a/contrib/llvm/lib/Target/X86/X86.h b/contrib/llvm/lib/Target/X86/X86.h
index 947002f..d5522ed 100644
--- a/contrib/llvm/lib/Target/X86/X86.h
+++ b/contrib/llvm/lib/Target/X86/X86.h
@@ -15,26 +15,28 @@
#ifndef TARGET_X86_H
#define TARGET_X86_H
-#include "MCTargetDesc/X86BaseInfo.h"
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/Support/DataTypes.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/CodeGen.h"
namespace llvm {
class FunctionPass;
+class ImmutablePass;
class JITCodeEmitter;
class X86TargetMachine;
+/// createX86AtomicExpandPass - This pass expands atomic operations that cannot
+/// be handled natively in terms of a loop using cmpxchg.
+FunctionPass *createX86AtomicExpandPass(const X86TargetMachine *TM);
+
/// createX86ISelDag - This pass converts a legalized DAG into a
/// X86-specific DAG, ready for instruction scheduling.
///
FunctionPass *createX86ISelDag(X86TargetMachine &TM,
CodeGenOpt::Level OptLevel);
-/// createGlobalBaseRegPass - This pass initializes a global base
+/// createX86GlobalBaseRegPass - This pass initializes a global base
/// register for PIC on x86-32.
-FunctionPass* createGlobalBaseRegPass();
+FunctionPass* createX86GlobalBaseRegPass();
/// createCleanupLocalDynamicTLSPass() - This pass combines multiple accesses
/// to local-dynamic TLS variables so that the TLS base address for the module
diff --git a/contrib/llvm/lib/Target/X86/X86.td b/contrib/llvm/lib/Target/X86/X86.td
index 65c5552..cd32a0f 100644
--- a/contrib/llvm/lib/Target/X86/X86.td
+++ b/contrib/llvm/lib/Target/X86/X86.td
@@ -22,6 +22,10 @@ include "llvm/Target/Target.td"
def Mode64Bit : SubtargetFeature<"64bit-mode", "In64BitMode", "true",
"64-bit mode (x86_64)">;
+def Mode32Bit : SubtargetFeature<"32bit-mode", "In32BitMode", "true",
+ "32-bit mode (80386)">;
+def Mode16Bit : SubtargetFeature<"16bit-mode", "In16BitMode", "true",
+ "16-bit mode (i8086)">;
//===----------------------------------------------------------------------===//
// X86 Subtarget features
@@ -73,6 +77,8 @@ def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true",
[Feature64Bit]>;
def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
"Bit testing of memory is slow">;
+def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
+ "SHLD instruction is slow">;
def FeatureFastUAMem : SubtargetFeature<"fast-unaligned-mem",
"IsUAMemFast", "true",
"Fast unaligned memory access">;
@@ -98,7 +104,15 @@ def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
"Enable AVX-512 PreFetch Instructions",
[FeatureAVX512]>;
-
+def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true",
+ "Enable AVX-512 Doubleword and Quadword Instructions",
+ [FeatureAVX512]>;
+def FeatureBWI : SubtargetFeature<"avx512bw", "HasBWI", "true",
+ "Enable AVX-512 Byte and Word Instructions",
+ [FeatureAVX512]>;
+def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true",
+ "Enable AVX-512 Vector Length eXtensions",
+ [FeatureAVX512]>;
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
"Enable packed carry-less multiplication instructions",
[FeatureSSE2]>;
@@ -160,6 +174,10 @@ def FeatureCallRegIndirect : SubtargetFeature<"call-reg-indirect",
"Call register indirect">;
def FeatureLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LEAUsesAG", "true",
"LEA instruction needs inputs at AG stage">;
+def FeatureSlowLEA : SubtargetFeature<"slow-lea", "SlowLEA", "true",
+ "LEA instruction with certain arguments is slow">;
+def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
+ "INC and DEC instructions are slower than ADD and SUB">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
@@ -189,8 +207,7 @@ def : Proc<"pentium3m", [FeatureSSE1, FeatureSlowBTMem]>;
def : Proc<"pentium-m", [FeatureSSE2, FeatureSlowBTMem]>;
def : Proc<"pentium4", [FeatureSSE2]>;
def : Proc<"pentium4m", [FeatureSSE2, FeatureSlowBTMem]>;
-def : Proc<"x86-64", [FeatureSSE2, Feature64Bit, FeatureSlowBTMem,
- FeatureFastUAMem]>;
+
// Intel Core Duo.
def : ProcessorModel<"yonah", SandyBridgeModel,
[FeatureSSE3, FeatureSlowBTMem]>;
@@ -221,7 +238,8 @@ def : ProcessorModel<"slm", SLMModel, [ProcIntelSLM,
FeaturePCLMUL, FeatureAES,
FeatureCallRegIndirect,
FeaturePRFCHW,
- FeatureSlowBTMem]>;
+ FeatureSlowLEA, FeatureSlowIncDec,
+ FeatureSlowBTMem, FeatureFastUAMem]>;
// "Arrandale" along with corei3 and corei5
def : ProcessorModel<"corei7", SandyBridgeModel,
[FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem,
@@ -263,51 +281,70 @@ def : ProcessorModel<"knl", HaswellModel,
FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
- FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE]>;
+ FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
+ FeatureSlowIncDec]>;
+
+// SKX
+// FIXME: define SKX model
+def : ProcessorModel<"skx", HaswellModel,
+ [FeatureAVX512, FeatureCDI,
+ FeatureDQI, FeatureBWI, FeatureVLX,
+ FeatureCMPXCHG16B, FeatureFastUAMem, FeaturePOPCNT,
+ FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
+ FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
+ FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
+ FeatureSlowIncDec]>;
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [Feature3DNow]>;
def : Proc<"k6-3", [Feature3DNow]>;
-def : Proc<"athlon", [Feature3DNowA, FeatureSlowBTMem]>;
-def : Proc<"athlon-tbird", [Feature3DNowA, FeatureSlowBTMem]>;
-def : Proc<"athlon-4", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem]>;
-def : Proc<"athlon-xp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem]>;
-def : Proc<"athlon-mp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem]>;
+def : Proc<"athlon", [Feature3DNowA, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
+def : Proc<"athlon-tbird", [Feature3DNowA, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
+def : Proc<"athlon-4", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
+def : Proc<"athlon-xp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
+def : Proc<"athlon-mp", [FeatureSSE1, Feature3DNowA, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
def : Proc<"k8", [FeatureSSE2, Feature3DNowA, Feature64Bit,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"opteron", [FeatureSSE2, Feature3DNowA, Feature64Bit,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"athlon64", [FeatureSSE2, Feature3DNowA, Feature64Bit,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"athlon-fx", [FeatureSSE2, Feature3DNowA, Feature64Bit,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"k8-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"opteron-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"athlon64-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
- FeatureSlowBTMem]>;
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
def : Proc<"amdfam10", [FeatureSSE4A,
Feature3DNowA, FeatureCMPXCHG16B, FeatureLZCNT,
- FeaturePOPCNT, FeatureSlowBTMem]>;
+ FeaturePOPCNT, FeatureSlowBTMem,
+ FeatureSlowSHLD]>;
// Bobcat
def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B,
- FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT]>;
+ FeaturePRFCHW, FeatureLZCNT, FeaturePOPCNT,
+ FeatureSlowSHLD]>;
// Jaguar
def : Proc<"btver2", [FeatureAVX, FeatureSSE4A, FeatureCMPXCHG16B,
FeaturePRFCHW, FeatureAES, FeaturePCLMUL,
FeatureBMI, FeatureF16C, FeatureMOVBE,
- FeatureLZCNT, FeaturePOPCNT]>;
+ FeatureLZCNT, FeaturePOPCNT, FeatureSlowSHLD]>;
// Bulldozer
def : Proc<"bdver1", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
- FeatureLZCNT, FeaturePOPCNT]>;
+ FeatureLZCNT, FeaturePOPCNT, FeatureSlowSHLD]>;
// Piledriver
def : Proc<"bdver2", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
FeatureAES, FeaturePRFCHW, FeaturePCLMUL,
FeatureF16C, FeatureLZCNT,
- FeaturePOPCNT, FeatureBMI, FeatureTBM,
- FeatureFMA]>;
+ FeaturePOPCNT, FeatureBMI, FeatureTBM,
+ FeatureFMA, FeatureSlowSHLD]>;
// Steamroller
def : Proc<"bdver3", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
@@ -316,6 +353,13 @@ def : Proc<"bdver3", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B,
FeaturePOPCNT, FeatureBMI, FeatureTBM,
FeatureFMA, FeatureFSGSBase]>;
+// Excavator
+def : Proc<"bdver4", [FeatureAVX2, FeatureXOP, FeatureFMA4,
+ FeatureCMPXCHG16B, FeatureAES, FeaturePRFCHW,
+ FeaturePCLMUL, FeatureF16C, FeatureLZCNT,
+ FeaturePOPCNT, FeatureBMI, FeatureBMI2,
+ FeatureTBM, FeatureFMA, FeatureFSGSBase]>;
+
def : Proc<"geode", [Feature3DNowA]>;
def : Proc<"winchip-c6", [FeatureMMX]>;
@@ -323,6 +367,20 @@ def : Proc<"winchip2", [Feature3DNow]>;
def : Proc<"c3", [Feature3DNow]>;
def : Proc<"c3-2", [FeatureSSE1]>;
+// We also provide a generic 64-bit specific x86 processor model which tries to
+// be good for modern chips without enabling instruction set encodings past the
+// basic SSE2 and 64-bit ones. It disables slow things from any mainstream and
+// modern 64-bit x86 chip, and enables features that are generally beneficial.
+//
+// We currently use the Sandy Bridge model as the default scheduling model as
+// we use it across Nehalem, Westmere, Sandy Bridge, and Ivy Bridge which
+// covers a huge swath of x86 processors. If there are specific scheduling
+// knobs which need to be tuned differently for AMD chips, we might consider
+// forming a common base for them.
+def : ProcessorModel<"x86-64", SandyBridgeModel,
+ [FeatureSSE2, Feature64Bit, FeatureSlowBTMem,
+ FeatureFastUAMem]>;
+
//===----------------------------------------------------------------------===//
// Register File Description
//===----------------------------------------------------------------------===//
@@ -387,12 +445,10 @@ def IntelAsmParserVariant : AsmParserVariant {
def ATTAsmWriter : AsmWriter {
string AsmWriterClassName = "ATTInstPrinter";
int Variant = 0;
- bit isMCAsmWriter = 1;
}
def IntelAsmWriter : AsmWriter {
string AsmWriterClassName = "IntelInstPrinter";
int Variant = 1;
- bit isMCAsmWriter = 1;
}
def X86 : Target {
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 1f5f918..57c7a62 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -14,23 +14,23 @@
#include "X86AsmPrinter.h"
#include "InstPrinter/X86ATTInstPrinter.h"
-#include "X86.h"
-#include "X86COFFMachineModuleInfo.h"
+#include "MCTargetDesc/X86BaseInfo.h"
+#include "X86InstrInfo.h"
#include "X86MachineFunctionInfo.h"
-#include "X86TargetMachine.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/DebugInfo.h"
-#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
@@ -38,8 +38,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetOptions.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -51,7 +49,7 @@ using namespace llvm;
bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
SetupMachineFunction(MF);
- if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) {
+ if (Subtarget->isTargetCOFF()) {
bool Intrn = MF.getFunction()->hasInternalLinkage();
OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
OutStreamer.EmitCOFFSymbolStorageClass(Intrn ? COFF::IMAGE_SYM_CLASS_STATIC
@@ -74,56 +72,55 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
/// printSymbolOperand - Print a raw symbol reference operand. This handles
/// jump tables, constant pools, global address and external symbols, all of
/// which print to a label with various suffixes for relocation types etc.
-void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
- raw_ostream &O) {
+static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
+ raw_ostream &O) {
switch (MO.getType()) {
default: llvm_unreachable("unknown symbol type!");
- case MachineOperand::MO_JumpTableIndex:
- O << *GetJTISymbol(MO.getIndex());
- break;
case MachineOperand::MO_ConstantPoolIndex:
- O << *GetCPISymbol(MO.getIndex());
- printOffset(MO.getOffset(), O);
+ O << *P.GetCPISymbol(MO.getIndex());
+ P.printOffset(MO.getOffset(), O);
break;
case MachineOperand::MO_GlobalAddress: {
const GlobalValue *GV = MO.getGlobal();
MCSymbol *GVSym;
if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB)
- GVSym = GetSymbolWithGlobalValueBase(GV, "$stub");
+ GVSym = P.getSymbolWithGlobalValueBase(GV, "$stub");
else if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
- GVSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ GVSym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
else
- GVSym = getSymbol(GV);
+ GVSym = P.getSymbol(GV);
// Handle dllimport linkage.
if (MO.getTargetFlags() == X86II::MO_DLLIMPORT)
- GVSym = OutContext.GetOrCreateSymbol(Twine("__imp_") + GVSym->getName());
+ GVSym =
+ P.OutContext.GetOrCreateSymbol(Twine("__imp_") + GVSym->getName());
if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE) {
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
- if (StubSym.getPointer() == 0)
+ P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
+ if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
- StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
} else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(Sym);
- if (StubSym.getPointer() == 0)
+ P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(
+ Sym);
+ if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
- StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
} else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
+ MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$stub");
MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym.getPointer() == 0)
+ P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
+ if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
- StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
+ StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
}
// If the name begins with a dollar-sign, enclose it in parens. We do this
@@ -132,36 +129,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
O << *GVSym;
else
O << '(' << *GVSym << ')';
- printOffset(MO.getOffset(), O);
- break;
- }
- case MachineOperand::MO_ExternalSymbol: {
- const MCSymbol *SymToPrint;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
- SmallString<128> TempNameStr;
- TempNameStr += StringRef(MO.getSymbolName());
- TempNameStr += StringRef("$stub");
-
- MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
- MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym.getPointer() == 0) {
- TempNameStr.erase(TempNameStr.end()-5, TempNameStr.end());
- StubSym = MachineModuleInfoImpl::
- StubValueTy(OutContext.GetOrCreateSymbol(TempNameStr.str()),
- true);
- }
- SymToPrint = StubSym.getPointer();
- } else {
- SymToPrint = GetExternalSymbolSymbol(MO.getSymbolName());
- }
-
- // If the name begins with a dollar-sign, enclose it in parens. We do this
- // to avoid having it look like an integer immediate to the assembler.
- if (SymToPrint->getName()[0] != '$')
- O << *SymToPrint;
- else
- O << '(' << *SymToPrint << '(';
+ P.printOffset(MO.getOffset(), O);
break;
}
}
@@ -177,12 +145,12 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
// These affect the name of the symbol, not any suffix.
break;
case X86II::MO_GOT_ABSOLUTE_ADDRESS:
- O << " + [.-" << *MF->getPICBaseSymbol() << ']';
+ O << " + [.-" << *P.MF->getPICBaseSymbol() << ']';
break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
- O << '-' << *MF->getPICBaseSymbol();
+ O << '-' << *P.MF->getPICBaseSymbol();
break;
case X86II::MO_TLSGD: O << "@TLSGD"; break;
case X86II::MO_TLSLD: O << "@TLSLD"; break;
@@ -199,41 +167,40 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
case X86II::MO_PLT: O << "@PLT"; break;
case X86II::MO_TLVP: O << "@TLVP"; break;
case X86II::MO_TLVP_PIC_BASE:
- O << "@TLVP" << '-' << *MF->getPICBaseSymbol();
+ O << "@TLVP" << '-' << *P.MF->getPICBaseSymbol();
break;
case X86II::MO_SECREL: O << "@SECREL32"; break;
}
}
+static void printOperand(X86AsmPrinter &P, const MachineInstr *MI,
+ unsigned OpNo, raw_ostream &O,
+ const char *Modifier = nullptr, unsigned AsmVariant = 0);
+
/// printPCRelImm - This is used to print an immediate value that ends up
/// being encoded as a pc-relative value. These print slightly differently, for
/// example, a $ is not emitted.
-void X86AsmPrinter::printPCRelImm(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
+static void printPCRelImm(X86AsmPrinter &P, const MachineInstr *MI,
+ unsigned OpNo, raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
default: llvm_unreachable("Unknown pcrel immediate operand");
case MachineOperand::MO_Register:
// pc-relativeness was handled when computing the value in the reg.
- printOperand(MI, OpNo, O);
+ printOperand(P, MI, OpNo, O);
return;
case MachineOperand::MO_Immediate:
O << MO.getImm();
return;
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol:
- printSymbolOperand(MO, O);
+ printSymbolOperand(P, MO, O);
return;
}
}
-
-void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier,
- unsigned AsmVariant) {
+static void printOperand(X86AsmPrinter &P, const MachineInstr *MI,
+ unsigned OpNo, raw_ostream &O, const char *Modifier,
+ unsigned AsmVariant) {
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
default: llvm_unreachable("unknown operand type!");
@@ -256,22 +223,20 @@ void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
O << MO.getImm();
return;
- case MachineOperand::MO_JumpTableIndex:
- case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol: {
+ case MachineOperand::MO_GlobalAddress: {
if (AsmVariant == 0) O << '$';
- printSymbolOperand(MO, O);
+ printSymbolOperand(P, MO, O);
break;
}
}
}
-void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
- raw_ostream &O, const char *Modifier) {
- const MachineOperand &BaseReg = MI->getOperand(Op);
- const MachineOperand &IndexReg = MI->getOperand(Op+2);
- const MachineOperand &DispSpec = MI->getOperand(Op+3);
+static void printLeaMemReference(X86AsmPrinter &P, const MachineInstr *MI,
+ unsigned Op, raw_ostream &O,
+ const char *Modifier = nullptr) {
+ const MachineOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg);
+ const MachineOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
+ const MachineOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
// If we really don't want to print out (rip), don't.
bool HasBaseReg = BaseReg.getReg() != 0;
@@ -282,14 +247,18 @@ void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
// HasParenPart - True if we will print out the () part of the mem ref.
bool HasParenPart = IndexReg.getReg() || HasBaseReg;
- if (DispSpec.isImm()) {
+ switch (DispSpec.getType()) {
+ default:
+ llvm_unreachable("unknown operand type!");
+ case MachineOperand::MO_Immediate: {
int DispVal = DispSpec.getImm();
if (DispVal || !HasParenPart)
O << DispVal;
- } else {
- assert(DispSpec.isGlobal() || DispSpec.isCPI() ||
- DispSpec.isJTI() || DispSpec.isSymbol());
- printSymbolOperand(MI->getOperand(Op+3), O);
+ break;
+ }
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ConstantPoolIndex:
+ printSymbolOperand(P, DispSpec, O);
}
if (Modifier && strcmp(Modifier, "H") == 0)
@@ -301,12 +270,12 @@ void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
O << '(';
if (HasBaseReg)
- printOperand(MI, Op, O, Modifier);
+ printOperand(P, MI, Op+X86::AddrBaseReg, O, Modifier);
if (IndexReg.getReg()) {
O << ',';
- printOperand(MI, Op+2, O, Modifier);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
+ printOperand(P, MI, Op+X86::AddrIndexReg, O, Modifier);
+ unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm();
if (ScaleVal != 1)
O << ',' << ScaleVal;
}
@@ -314,29 +283,31 @@ void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
}
}
-void X86AsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
- raw_ostream &O, const char *Modifier) {
+static void printMemReference(X86AsmPrinter &P, const MachineInstr *MI,
+ unsigned Op, raw_ostream &O,
+ const char *Modifier = nullptr) {
assert(isMem(MI, Op) && "Invalid memory reference!");
- const MachineOperand &Segment = MI->getOperand(Op+4);
+ const MachineOperand &Segment = MI->getOperand(Op+X86::AddrSegmentReg);
if (Segment.getReg()) {
- printOperand(MI, Op+4, O, Modifier);
+ printOperand(P, MI, Op+X86::AddrSegmentReg, O, Modifier);
O << ':';
}
- printLeaMemReference(MI, Op, O, Modifier);
+ printLeaMemReference(P, MI, Op, O, Modifier);
}
-void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op,
- raw_ostream &O, const char *Modifier,
- unsigned AsmVariant){
- const MachineOperand &BaseReg = MI->getOperand(Op);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
- const MachineOperand &IndexReg = MI->getOperand(Op+2);
- const MachineOperand &DispSpec = MI->getOperand(Op+3);
- const MachineOperand &SegReg = MI->getOperand(Op+4);
+static void printIntelMemReference(X86AsmPrinter &P, const MachineInstr *MI,
+ unsigned Op, raw_ostream &O,
+ const char *Modifier = nullptr,
+ unsigned AsmVariant = 1) {
+ const MachineOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg);
+ unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm();
+ const MachineOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg);
+ const MachineOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp);
+ const MachineOperand &SegReg = MI->getOperand(Op+X86::AddrSegmentReg);
// If this has a segment register, print it.
if (SegReg.getReg()) {
- printOperand(MI, Op+4, O, Modifier, AsmVariant);
+ printOperand(P, MI, Op+X86::AddrSegmentReg, O, Modifier, AsmVariant);
O << ':';
}
@@ -344,7 +315,7 @@ void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op,
bool NeedPlus = false;
if (BaseReg.getReg()) {
- printOperand(MI, Op, O, Modifier, AsmVariant);
+ printOperand(P, MI, Op+X86::AddrBaseReg, O, Modifier, AsmVariant);
NeedPlus = true;
}
@@ -352,13 +323,13 @@ void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op,
if (NeedPlus) O << " + ";
if (ScaleVal != 1)
O << ScaleVal << '*';
- printOperand(MI, Op+2, O, Modifier, AsmVariant);
+ printOperand(P, MI, Op+X86::AddrIndexReg, O, Modifier, AsmVariant);
NeedPlus = true;
}
if (!DispSpec.isImm()) {
if (NeedPlus) O << " + ";
- printOperand(MI, Op+3, O, Modifier, AsmVariant);
+ printOperand(P, MI, Op+X86::AddrDisp, O, Modifier, AsmVariant);
} else {
int64_t DispVal = DispSpec.getImm();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) {
@@ -376,8 +347,8 @@ void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op,
O << ']';
}
-bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
- raw_ostream &O) {
+static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
+ char Mode, raw_ostream &O) {
unsigned Reg = MO.getReg();
switch (Mode) {
default: return true; // Unknown mode.
@@ -396,7 +367,7 @@ bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
case 'q':
// Print 64-bit register names if 64-bit integer registers are available.
// Otherwise, print 32-bit register names.
- MVT::SimpleValueType Ty = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
+ MVT::SimpleValueType Ty = P.getSubtarget().is64Bit() ? MVT::i64 : MVT::i32;
Reg = getX86SubSuperRegister(Reg, Ty);
break;
}
@@ -421,37 +392,50 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
// See if this is a generic print operand
return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'a': // This is an address. Currently only 'i' and 'r' are expected.
- if (MO.isImm()) {
+ switch (MO.getType()) {
+ default:
+ return true;
+ case MachineOperand::MO_Immediate:
O << MO.getImm();
return false;
- }
- if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol()) {
- printSymbolOperand(MO, O);
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_JumpTableIndex:
+ case MachineOperand::MO_ExternalSymbol:
+ llvm_unreachable("unexpected operand type!");
+ case MachineOperand::MO_GlobalAddress:
+ printSymbolOperand(*this, MO, O);
if (Subtarget->isPICStyleRIPRel())
O << "(%rip)";
return false;
- }
- if (MO.isReg()) {
+ case MachineOperand::MO_Register:
O << '(';
- printOperand(MI, OpNo, O);
+ printOperand(*this, MI, OpNo, O);
O << ')';
return false;
}
- return true;
case 'c': // Don't print "$" before a global var name or constant.
- if (MO.isImm())
+ switch (MO.getType()) {
+ default:
+ printOperand(*this, MI, OpNo, O);
+ break;
+ case MachineOperand::MO_Immediate:
O << MO.getImm();
- else if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol())
- printSymbolOperand(MO, O);
- else
- printOperand(MI, OpNo, O);
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_JumpTableIndex:
+ case MachineOperand::MO_ExternalSymbol:
+ llvm_unreachable("unexpected operand type!");
+ case MachineOperand::MO_GlobalAddress:
+ printSymbolOperand(*this, MO, O);
+ break;
+ }
return false;
case 'A': // Print '*' before a register (it must be a register)
if (MO.isReg()) {
O << '*';
- printOperand(MI, OpNo, O);
+ printOperand(*this, MI, OpNo, O);
return false;
}
return true;
@@ -462,12 +446,12 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
case 'k': // Print SImode register
case 'q': // Print DImode register
if (MO.isReg())
- return printAsmMRegister(MO, ExtraCode[0], O);
- printOperand(MI, OpNo, O);
+ return printAsmMRegister(*this, MO, ExtraCode[0], O);
+ printOperand(*this, MI, OpNo, O);
return false;
case 'P': // This is the operand of a call, treat specially.
- printPCRelImm(MI, OpNo, O);
+ printPCRelImm(*this, MI, OpNo, O);
return false;
case 'n': // Negate the immediate or print a '-' before the operand.
@@ -481,7 +465,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
}
}
- printOperand(MI, OpNo, O, /*Modifier*/ 0, AsmVariant);
+ printOperand(*this, MI, OpNo, O, /*Modifier*/ nullptr, AsmVariant);
return false;
}
@@ -490,7 +474,7 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
const char *ExtraCode,
raw_ostream &O) {
if (AsmVariant) {
- printIntelMemReference(MI, OpNo, O);
+ printIntelMemReference(*this, MI, OpNo, O);
return false;
}
@@ -507,19 +491,19 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
// These only apply to registers, ignore on mem.
break;
case 'H':
- printMemReference(MI, OpNo, O, "H");
+ printMemReference(*this, MI, OpNo, O, "H");
return false;
case 'P': // Don't print @PLT, but do print as memory.
- printMemReference(MI, OpNo, O, "no-rip");
+ printMemReference(*this, MI, OpNo, O, "no-rip");
return false;
}
}
- printMemReference(MI, OpNo, O);
+ printMemReference(*this, MI, OpNo, O);
return false;
}
void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget->isTargetEnvMacho())
+ if (Subtarget->isTargetMacho())
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
if (Subtarget->isTargetCOFF()) {
@@ -544,9 +528,78 @@ void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
}
}
+static void
+emitNonLazySymbolPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel,
+ MachineModuleInfoImpl::StubValueTy &MCSym) {
+ // L_foo$stub:
+ OutStreamer.EmitLabel(StubLabel);
+ // .indirect_symbol _foo
+ OutStreamer.EmitSymbolAttribute(MCSym.getPointer(), MCSA_IndirectSymbol);
+
+ if (MCSym.getInt())
+ // External to current translation unit.
+ OutStreamer.EmitIntValue(0, 4/*size*/);
+ else
+ // Internal to current translation unit.
+ //
+ // When we place the LSDA into the TEXT section, the type info
+ // pointers need to be indirect and pc-rel. We accomplish this by
+ // using NLPs; however, sometimes the types are local to the file.
+ // We need to fill in the value for the NLP in those cases.
+ OutStreamer.EmitValue(
+ MCSymbolRefExpr::Create(MCSym.getPointer(), OutStreamer.getContext()),
+ 4 /*size*/);
+}
+
+MCSymbol *X86AsmPrinter::GetCPISymbol(unsigned CPID) const {
+ if (Subtarget->isTargetKnownWindowsMSVC()) {
+ const MachineConstantPoolEntry &CPE =
+ MF->getConstantPool()->getConstants()[CPID];
+ if (!CPE.isMachineConstantPoolEntry()) {
+ SectionKind Kind = CPE.getSectionKind(TM.getDataLayout());
+ const Constant *C = CPE.Val.ConstVal;
+ const MCSectionCOFF *S = cast<MCSectionCOFF>(
+ getObjFileLowering().getSectionForConstant(Kind, C));
+ if (MCSymbol *Sym = S->getCOMDATSymbol()) {
+ if (Sym->isUndefined())
+ OutStreamer.EmitSymbolAttribute(Sym, MCSA_Global);
+ return Sym;
+ }
+ }
+ }
+
+ return AsmPrinter::GetCPISymbol(CPID);
+}
+
+void X86AsmPrinter::GenerateExportDirective(const MCSymbol *Sym, bool IsData) {
+ SmallString<128> Directive;
+ raw_svector_ostream OS(Directive);
+ StringRef Name = Sym->getName();
+
+ if (Subtarget->isTargetKnownWindowsMSVC())
+ OS << " /EXPORT:";
+ else
+ OS << " -export:";
+
+ if ((Subtarget->isTargetWindowsGNU() || Subtarget->isTargetWindowsCygwin()) &&
+ (Name[0] == getDataLayout().getGlobalPrefix()))
+ Name = Name.drop_front();
+
+ OS << Name;
+
+ if (IsData) {
+ if (Subtarget->isTargetKnownWindowsMSVC())
+ OS << ",DATA";
+ else
+ OS << ",data";
+ }
+
+ OS.flush();
+ OutStreamer.EmitBytes(Directive);
+}
void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetEnvMacho()) {
+ if (Subtarget->isTargetMacho()) {
// All darwin targets use mach-o.
MachineModuleInfoMachO &MMIMacho =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
@@ -558,17 +611,17 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
if (!Stubs.empty()) {
const MCSection *TheSection =
OutContext.getMachOSection("__IMPORT", "__jump_table",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_SELF_MODIFYING_CODE |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ MachO::S_SYMBOL_STUBS |
+ MachO::S_ATTR_SELF_MODIFYING_CODE |
+ MachO::S_ATTR_PURE_INSTRUCTIONS,
5, SectionKind::getMetadata());
OutStreamer.SwitchSection(TheSection);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ for (const auto &Stub : Stubs) {
// L_foo$stub:
- OutStreamer.EmitLabel(Stubs[i].first);
+ OutStreamer.EmitLabel(Stub.first);
// .indirect_symbol _foo
- OutStreamer.EmitSymbolAttribute(Stubs[i].second.getPointer(),
+ OutStreamer.EmitSymbolAttribute(Stub.second.getPointer(),
MCSA_IndirectSymbol);
// hlt; hlt; hlt; hlt; hlt hlt = 0xf4.
const char HltInsts[] = "\xf4\xf4\xf4\xf4\xf4";
@@ -584,48 +637,28 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
if (!Stubs.empty()) {
const MCSection *TheSection =
OutContext.getMachOSection("__IMPORT", "__pointers",
- MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
+ MachO::S_NON_LAZY_SYMBOL_POINTERS,
SectionKind::getMetadata());
OutStreamer.SwitchSection(TheSection);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$non_lazy_ptr:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .indirect_symbol _foo
- MachineModuleInfoImpl::StubValueTy &MCSym = Stubs[i].second;
- OutStreamer.EmitSymbolAttribute(MCSym.getPointer(),
- MCSA_IndirectSymbol);
- // .long 0
- if (MCSym.getInt())
- // External to current translation unit.
- OutStreamer.EmitIntValue(0, 4/*size*/);
- else
- // Internal to current translation unit.
- //
- // When we place the LSDA into the TEXT section, the type info
- // pointers need to be indirect and pc-rel. We accomplish this by
- // using NLPs. However, sometimes the types are local to the file. So
- // we need to fill in the value for the NLP in those cases.
- OutStreamer.EmitValue(MCSymbolRefExpr::Create(MCSym.getPointer(),
- OutContext), 4/*size*/);
- }
+ for (auto &Stub : Stubs)
+ emitNonLazySymbolPointer(OutStreamer, Stub.first, Stub.second);
+
Stubs.clear();
OutStreamer.AddBlankLine();
}
Stubs = MMIMacho.GetHiddenGVStubList();
if (!Stubs.empty()) {
- OutStreamer.SwitchSection(getObjFileLowering().getDataSection());
- EmitAlignment(2);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$non_lazy_ptr:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .long _foo
- OutStreamer.EmitValue(MCSymbolRefExpr::
- Create(Stubs[i].second.getPointer(),
- OutContext), 4/*size*/);
- }
+ const MCSection *TheSection =
+ OutContext.getMachOSection("__IMPORT", "__pointers",
+ MachO::S_NON_LAZY_SYMBOL_POINTERS,
+ SectionKind::getMetadata());
+ OutStreamer.SwitchSection(TheSection);
+
+ for (auto &Stub : Stubs)
+ emitNonLazySymbolPointer(OutStreamer, Stub.first, Stub.second);
+
Stubs.clear();
OutStreamer.AddBlankLine();
}
@@ -640,69 +673,45 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
}
- if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing() &&
- MMI->usesVAFloatArgument()) {
+ if (Subtarget->isTargetKnownWindowsMSVC() && MMI->usesVAFloatArgument()) {
StringRef SymbolName = Subtarget->is64Bit() ? "_fltused" : "__fltused";
MCSymbol *S = MMI->getContext().GetOrCreateSymbol(SymbolName);
OutStreamer.EmitSymbolAttribute(S, MCSA_Global);
}
- if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) {
- X86COFFMachineModuleInfo &COFFMMI =
- MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
-
- // Emit type information for external functions
- typedef X86COFFMachineModuleInfo::externals_iterator externals_iterator;
- for (externals_iterator I = COFFMMI.externals_begin(),
- E = COFFMMI.externals_end();
- I != E; ++I) {
- OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
- OutStreamer.EmitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
- OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
- << COFF::SCT_COMPLEX_TYPE_SHIFT);
- OutStreamer.EndCOFFSymbolDef();
- }
-
+ if (Subtarget->isTargetCOFF()) {
// Necessary for dllexport support
std::vector<const MCSymbol*> DLLExportedFns, DLLExportedGlobals;
- const TargetLoweringObjectFileCOFF &TLOFCOFF =
- static_cast<const TargetLoweringObjectFileCOFF&>(getObjFileLowering());
+ for (const auto &Function : M)
+ if (Function.hasDLLExportStorageClass())
+ DLLExportedFns.push_back(getSymbol(&Function));
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (I->hasDLLExportLinkage())
- DLLExportedFns.push_back(getSymbol(I));
+ for (const auto &Global : M.globals())
+ if (Global.hasDLLExportStorageClass())
+ DLLExportedGlobals.push_back(getSymbol(&Global));
- for (Module::const_global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I)
- if (I->hasDLLExportLinkage())
- DLLExportedGlobals.push_back(getSymbol(I));
+ for (const auto &Alias : M.aliases()) {
+ if (!Alias.hasDLLExportStorageClass())
+ continue;
+
+ if (Alias.getType()->getElementType()->isFunctionTy())
+ DLLExportedFns.push_back(getSymbol(&Alias));
+ else
+ DLLExportedGlobals.push_back(getSymbol(&Alias));
+ }
// Output linker support code for dllexported globals on windows.
if (!DLLExportedGlobals.empty() || !DLLExportedFns.empty()) {
+ const TargetLoweringObjectFileCOFF &TLOFCOFF =
+ static_cast<const TargetLoweringObjectFileCOFF&>(getObjFileLowering());
+
OutStreamer.SwitchSection(TLOFCOFF.getDrectveSection());
- SmallString<128> name;
- for (unsigned i = 0, e = DLLExportedGlobals.size(); i != e; ++i) {
- if (Subtarget->isTargetWindows())
- name = " /EXPORT:";
- else
- name = " -export:";
- name += DLLExportedGlobals[i]->getName();
- if (Subtarget->isTargetWindows())
- name += ",DATA";
- else
- name += ",data";
- OutStreamer.EmitBytes(name);
- }
- for (unsigned i = 0, e = DLLExportedFns.size(); i != e; ++i) {
- if (Subtarget->isTargetWindows())
- name = " /EXPORT:";
- else
- name = " -export:";
- name += DLLExportedFns[i]->getName();
- OutStreamer.EmitBytes(name);
- }
+ for (auto & Symbol : DLLExportedGlobals)
+ GenerateExportDirective(Symbol, /*IsData=*/true);
+ for (auto & Symbol : DLLExportedFns)
+ GenerateExportDirective(Symbol, /*IsData=*/false);
}
}
@@ -718,9 +727,9 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
const DataLayout *TD = TM.getDataLayout();
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- OutStreamer.EmitLabel(Stubs[i].first);
- OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
+ for (const auto &Stub : Stubs) {
+ OutStreamer.EmitLabel(Stub.first);
+ OutStreamer.EmitSymbolValue(Stub.second.getPointer(),
TD->getPointerSize());
}
Stubs.clear();
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
index 24a768b..b1bbe8e 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -10,76 +10,50 @@
#ifndef X86ASMPRINTER_H
#define X86ASMPRINTER_H
-#include "X86.h"
-#include "X86MachineFunctionInfo.h"
-#include "X86TargetMachine.h"
+#include "X86Subtarget.h"
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/StackMaps.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetMachine.h"
namespace llvm {
-
class MCStreamer;
+class MCSymbol;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
StackMaps SM;
- // Parses operands of PATCHPOINT and STACKMAP to produce stack map Location
- // structures. Returns a result location and an iterator to the operand
- // immediately following the operands consumed.
- //
- // This method is implemented in X86MCInstLower.cpp.
- static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
- stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- const TargetMachine &TM);
+ void GenerateExportDirective(const MCSymbol *Sym, bool IsData);
public:
explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), SM(*this, stackmapOperandParser) {
+ : AsmPrinter(TM, Streamer), SM(*this) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "X86 Assembly / Object Emitter";
}
const X86Subtarget &getSubtarget() const { return *Subtarget; }
- virtual void EmitStartOfAsmFile(Module &M) LLVM_OVERRIDE;
-
- virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
-
- virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
-
- void printSymbolOperand(const MachineOperand &MO, raw_ostream &O);
+ void EmitStartOfAsmFile(Module &M) override;
- // These methods are used by the tablegen'erated instruction printer.
- void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O,
- const char *Modifier = 0, unsigned AsmVariant = 0);
- void printPCRelImm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
+ void EmitEndOfAsmFile(Module &M) override;
- bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS) LLVM_OVERRIDE;
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS) LLVM_OVERRIDE;
+ void EmitInstruction(const MachineInstr *MI) override;
- void printMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O,
- const char *Modifier=NULL);
- void printLeaMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O,
- const char *Modifier=NULL);
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
- void printIntelMemReference(const MachineInstr *MI, unsigned Op,
- raw_ostream &O, const char *Modifier=NULL,
- unsigned AsmVariant = 1);
+ /// \brief Return the symbol for the specified constant pool entry.
+ MCSymbol *GetCPISymbol(unsigned CPID) const override;
- virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE;
+ bool runOnMachineFunction(MachineFunction &F) override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/X86/X86AtomicExpandPass.cpp b/contrib/llvm/lib/Target/X86/X86AtomicExpandPass.cpp
new file mode 100644
index 0000000..3dcadb1
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86AtomicExpandPass.cpp
@@ -0,0 +1,283 @@
+//===-- X86AtomicExpandPass.cpp - Expand illegal atomic instructions --0---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass (at IR level) to replace atomic instructions which
+// cannot be implemented as a single instruction with cmpxchg-based loops.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86TargetMachine.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-atomic-expand"
+
+namespace {
+ class X86AtomicExpandPass : public FunctionPass {
+ const X86TargetMachine *TM;
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit X86AtomicExpandPass(const X86TargetMachine *TM)
+ : FunctionPass(ID), TM(TM) {}
+
+ bool runOnFunction(Function &F) override;
+ bool expandAtomicInsts(Function &F);
+
+ bool needsCmpXchgNb(Type *MemType);
+
+ /// There are four kinds of atomic operations. Two never need expanding:
+ /// cmpxchg is what we expand the others *to*, and loads are easily handled
+ /// by ISelLowering. Atomicrmw and store can need expanding in some
+ /// circumstances.
+ bool shouldExpand(Instruction *Inst);
+
+ /// 128-bit atomic stores (64-bit on i686) need to be implemented in terms
+ /// of trivial cmpxchg16b loops. A simple store isn't necessarily atomic.
+ bool shouldExpandStore(StoreInst *SI);
+
+ /// Only some atomicrmw instructions need expanding -- some operations
+ /// (e.g. max) have absolutely no architectural support; some (e.g. or) have
+ /// limited support but can't return the previous value; some (e.g. add)
+ /// have complete support in the instruction set.
+ ///
+ /// Also, naturally, 128-bit operations always need to be expanded.
+ bool shouldExpandAtomicRMW(AtomicRMWInst *AI);
+
+ bool expandAtomicRMW(AtomicRMWInst *AI);
+ bool expandAtomicStore(StoreInst *SI);
+ };
+}
+
+char X86AtomicExpandPass::ID = 0;
+
+FunctionPass *llvm::createX86AtomicExpandPass(const X86TargetMachine *TM) {
+ return new X86AtomicExpandPass(TM);
+}
+
+bool X86AtomicExpandPass::runOnFunction(Function &F) {
+ SmallVector<Instruction *, 1> AtomicInsts;
+
+ // Changing control-flow while iterating through it is a bad idea, so gather a
+ // list of all atomic instructions before we start.
+ for (BasicBlock &BB : F)
+ for (Instruction &Inst : BB) {
+ if (isa<AtomicRMWInst>(&Inst) ||
+ (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
+ AtomicInsts.push_back(&Inst);
+ }
+
+ bool MadeChange = false;
+ for (Instruction *Inst : AtomicInsts) {
+ if (!shouldExpand(Inst))
+ continue;
+
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
+ MadeChange |= expandAtomicRMW(AI);
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ MadeChange |= expandAtomicStore(SI);
+
+ assert(MadeChange && "Atomic inst not expanded when it should be?");
+ Inst->eraseFromParent();
+ }
+
+ return MadeChange;
+}
+
+/// Returns true if the operand type is 1 step up from the native width, and
+/// the corresponding cmpxchg8b or cmpxchg16b instruction is available
+/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
+bool X86AtomicExpandPass::needsCmpXchgNb(llvm::Type *MemType) {
+ const X86Subtarget &Subtarget = TM->getSubtarget<X86Subtarget>();
+ unsigned OpWidth = MemType->getPrimitiveSizeInBits();
+
+ if (OpWidth == 64)
+ return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
+ if (OpWidth == 128)
+ return Subtarget.hasCmpxchg16b();
+
+ return false;
+}
+
+bool X86AtomicExpandPass::shouldExpandAtomicRMW(AtomicRMWInst *AI) {
+ const X86Subtarget &Subtarget = TM->getSubtarget<X86Subtarget>();
+ unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
+
+ if (needsCmpXchgNb(AI->getType()))
+ return true;
+
+ if (AI->getType()->getPrimitiveSizeInBits() > NativeWidth)
+ return false;
+
+ AtomicRMWInst::BinOp Op = AI->getOperation();
+ switch (Op) {
+ default:
+ llvm_unreachable("Unknown atomic operation");
+ case AtomicRMWInst::Xchg:
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ // It's better to use xadd, xsub or xchg for these in all cases.
+ return false;
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::And:
+ case AtomicRMWInst::Xor:
+ // If the atomicrmw's result isn't actually used, we can just add a "lock"
+ // prefix to a normal instruction for these operations.
+ return !AI->use_empty();
+ case AtomicRMWInst::Nand:
+ case AtomicRMWInst::Max:
+ case AtomicRMWInst::Min:
+ case AtomicRMWInst::UMax:
+ case AtomicRMWInst::UMin:
+ // These always require a non-trivial set of data operations on x86. We must
+ // use a cmpxchg loop.
+ return true;
+ }
+}
+
+bool X86AtomicExpandPass::shouldExpandStore(StoreInst *SI) {
+ if (needsCmpXchgNb(SI->getValueOperand()->getType()))
+ return true;
+
+ return false;
+}
+
+bool X86AtomicExpandPass::shouldExpand(Instruction *Inst) {
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
+ return shouldExpandAtomicRMW(AI);
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ return shouldExpandStore(SI);
+ return false;
+}
+
+/// Emit IR to implement the given atomicrmw operation on values in registers,
+/// returning the new value.
+static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
+ Value *Loaded, Value *Inc) {
+ Value *NewVal;
+ switch (Op) {
+ case AtomicRMWInst::Xchg:
+ return Inc;
+ case AtomicRMWInst::Add:
+ return Builder.CreateAdd(Loaded, Inc, "new");
+ case AtomicRMWInst::Sub:
+ return Builder.CreateSub(Loaded, Inc, "new");
+ case AtomicRMWInst::And:
+ return Builder.CreateAnd(Loaded, Inc, "new");
+ case AtomicRMWInst::Nand:
+ return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
+ case AtomicRMWInst::Or:
+ return Builder.CreateOr(Loaded, Inc, "new");
+ case AtomicRMWInst::Xor:
+ return Builder.CreateXor(Loaded, Inc, "new");
+ case AtomicRMWInst::Max:
+ NewVal = Builder.CreateICmpSGT(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::Min:
+ NewVal = Builder.CreateICmpSLE(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::UMax:
+ NewVal = Builder.CreateICmpUGT(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::UMin:
+ NewVal = Builder.CreateICmpULE(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ default:
+ break;
+ }
+ llvm_unreachable("Unknown atomic op");
+}
+
+bool X86AtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
+ AtomicOrdering Order =
+ AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
+ Value *Addr = AI->getPointerOperand();
+ BasicBlock *BB = AI->getParent();
+ Function *F = BB->getParent();
+ LLVMContext &Ctx = F->getContext();
+
+ // Given: atomicrmw some_op iN* %addr, iN %incr ordering
+ //
+ // The standard expansion we produce is:
+ // [...]
+ // %init_loaded = load atomic iN* %addr
+ // br label %loop
+ // loop:
+ // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
+ // %new = some_op iN %loaded, %incr
+ // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
+ // %new_loaded = extractvalue { iN, i1 } %pair, 0
+ // %success = extractvalue { iN, i1 } %pair, 1
+ // br i1 %success, label %atomicrmw.end, label %loop
+ // atomicrmw.end:
+ // [...]
+ BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
+ BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
+
+ // This grabs the DebugLoc from AI.
+ IRBuilder<> Builder(AI);
+
+ // The split call above "helpfully" added a branch at the end of BB (to the
+ // wrong place), but we want a load. It's easiest to just remove
+ // the branch entirely.
+ std::prev(BB->end())->eraseFromParent();
+ Builder.SetInsertPoint(BB);
+ LoadInst *InitLoaded = Builder.CreateLoad(Addr);
+ InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
+ Builder.CreateBr(LoopBB);
+
+ // Start the main loop block now that we've taken care of the preliminaries.
+ Builder.SetInsertPoint(LoopBB);
+ PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
+ Loaded->addIncoming(InitLoaded, BB);
+
+ Value *NewVal =
+ performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
+
+ Value *Pair = Builder.CreateAtomicCmpXchg(
+ Addr, Loaded, NewVal, Order,
+ AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
+ Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
+ Loaded->addIncoming(NewLoaded, LoopBB);
+
+ Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
+ Builder.CreateCondBr(Success, ExitBB, LoopBB);
+
+ AI->replaceAllUsesWith(NewLoaded);
+
+ return true;
+}
+
+bool X86AtomicExpandPass::expandAtomicStore(StoreInst *SI) {
+ // An atomic store might need cmpxchg16b (or 8b on x86) to execute. Express
+ // this in terms of the usual expansion to "atomicrmw xchg".
+ IRBuilder<> Builder(SI);
+ AtomicOrdering Order =
+ SI->getOrdering() == Unordered ? Monotonic : SI->getOrdering();
+ AtomicRMWInst *AI =
+ Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
+ SI->getValueOperand(), Order);
+
+ // Now we have an appropriate swap instruction, lower it as usual.
+ if (shouldExpandAtomicRMW(AI)) {
+ expandAtomicRMW(AI);
+ AI->eraseFromParent();
+ return true;
+ }
+
+ return AI;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
deleted file mode 100644
index 6a6125b..0000000
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//===-- X86COFFMachineModuleInfo.cpp - X86 COFF MMI Impl ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is an MMI implementation for X86 COFF (windows) targets.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86COFFMachineModuleInfo.h"
-using namespace llvm;
-
-
-X86COFFMachineModuleInfo::~X86COFFMachineModuleInfo() {
-}
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
deleted file mode 100644
index 0dfeb42..0000000
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
+++ /dev/null
@@ -1,46 +0,0 @@
-//===-- X86coffmachinemoduleinfo.h - X86 COFF MMI Impl ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is an MMI implementation for X86 COFF (windows) targets.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86COFF_MACHINEMODULEINFO_H
-#define X86COFF_MACHINEMODULEINFO_H
-
-#include "X86MachineFunctionInfo.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-
-namespace llvm {
- class X86MachineFunctionInfo;
- class DataLayout;
-
-/// X86COFFMachineModuleInfo - This is a MachineModuleInfoImpl implementation
-/// for X86 COFF targets.
-class X86COFFMachineModuleInfo : public MachineModuleInfoImpl {
- DenseSet<MCSymbol const *> Externals;
-public:
- X86COFFMachineModuleInfo(const MachineModuleInfo &) {}
- virtual ~X86COFFMachineModuleInfo();
-
- void addExternalFunction(MCSymbol* Symbol) {
- Externals.insert(Symbol);
- }
-
- typedef DenseSet<MCSymbol const *>::const_iterator externals_iterator;
- externals_iterator externals_begin() const { return Externals.begin(); }
- externals_iterator externals_end() const { return Externals.end(); }
-};
-
-
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm/lib/Target/X86/X86CallingConv.td
index a78b5c0..86c01bd 100644
--- a/contrib/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm/lib/Target/X86/X86CallingConv.td
@@ -52,7 +52,7 @@ def RetCC_X86Common : CallingConv<[
// 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
// can only be used by ABI non-compliant code. This vector type is only
// supported while using the AVX-512 target feature.
- CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
// MMX vector types are always returned in MM0. If the target doesn't have
@@ -252,7 +252,7 @@ def CC_X86_64_C : CallingConv<[
YMM4, YMM5, YMM6, YMM7]>>>>,
// The first 8 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
CCIfSubtarget<"hasAVX512()",
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
@@ -357,9 +357,16 @@ def CC_X86_64_WebKit_JS : CallingConv<[
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
- // Integer/FP values are always stored in stack slots that are 8 bytes in size
- // and 8-byte aligned.
- CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
+ // Only the first integer argument is passed in register.
+ CCIfType<[i32], CCAssignToReg<[EAX]>>,
+ CCIfType<[i64], CCAssignToReg<[RAX]>>,
+
+ // The remaining integer arguments are passed on the stack. 32bit integer and
+ // floating-point arguments are aligned to 4 byte and stored in 4 byte slots.
+ // 64bit integer and floating-point arguments are aligned to 8 byte and stored
+ // in 8 byte stack slots.
+ CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>
]>;
// No explicit register is specified for the AnyReg calling convention. The
@@ -453,18 +460,34 @@ def CC_X86_32_FastCall : CallingConv<[
CCDelegateTo<CC_X86_32_Common>
]>;
-def CC_X86_32_ThisCall : CallingConv<[
+def CC_X86_32_ThisCall_Common : CallingConv<[
+ // The first integer argument is passed in ECX
+ CCIfType<[i32], CCAssignToReg<[ECX]>>,
+
+ // Otherwise, same as everything else.
+ CCDelegateTo<CC_X86_32_Common>
+]>;
+
+def CC_X86_32_ThisCall_Mingw : CallingConv<[
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ CCDelegateTo<CC_X86_32_ThisCall_Common>
+]>;
+
+def CC_X86_32_ThisCall_Win : CallingConv<[
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
// Pass sret arguments indirectly through stack.
CCIfSRet<CCAssignToStack<4, 4>>,
- // The first integer argument is passed in ECX
- CCIfType<[i32], CCAssignToReg<[ECX]>>,
+ CCDelegateTo<CC_X86_32_ThisCall_Common>
+]>;
- // Otherwise, same as everything else.
- CCDelegateTo<CC_X86_32_Common>
+def CC_X86_32_ThisCall : CallingConv<[
+ CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
+ CCDelegateTo<CC_X86_32_ThisCall_Win>
]>;
def CC_X86_32_FastCC : CallingConv<[
@@ -597,10 +620,26 @@ def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
(sequence "XMM%u", 6, 15))>;
-def CSR_MostRegs_64 : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
+// All GPRs - except r11
+def CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
+ R8, R9, R10, RSP)>;
+
+// All registers - except r11
+def CSR_64_RT_AllRegs : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
+ (sequence "XMM%u", 0, 15))>;
+def CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
+ (sequence "YMM%u", 0, 15))>;
+
+def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
R11, R12, R13, R14, R15, RBP,
(sequence "XMM%u", 0, 15))>;
+def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP,
+ (sequence "XMM%u", 16, 31))>;
+def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP,
+ (sequence "YMM%u", 0, 31)),
+ (sequence "XMM%u", 0, 15))>;
+
// Standard C + YMM6-15
def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
R13, R14, R15,
@@ -618,6 +657,6 @@ def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
(sequence "YMM%u", 8, 15))>;
-def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add CSR_64,
+def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RDI, RSI, R14, R15,
(sequence "ZMM%u", 16, 31),
K4, K5, K6, K7)>;
diff --git a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
index 14385ed..a3ae7ee 100644
--- a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-emitter"
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86JITInfo.h"
@@ -36,6 +35,8 @@
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
+#define DEBUG_TYPE "x86-emitter"
+
STATISTIC(NumEmitted, "Number of machine instructions emitted");
namespace {
@@ -52,13 +53,13 @@ namespace {
public:
static char ID;
explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce)
- : MachineFunctionPass(ID), II(0), TD(0), TM(tm),
+ : MachineFunctionPass(ID), II(nullptr), TD(nullptr), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(false),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
- bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "X86 Machine Code Emitter";
}
@@ -76,7 +77,7 @@ namespace {
void emitInstruction(MachineInstr &MI, const MCInstrDesc *Desc);
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<MachineModuleInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -186,10 +187,6 @@ static unsigned determineREX(const MachineInstr &MI) {
}
switch (Desc.TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg:
- if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
- REX |= (1 << 0) | (1 << 2);
- break;
case X86II::MRMSrcReg: {
if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 2;
@@ -216,6 +213,7 @@ static unsigned determineREX(const MachineInstr &MI) {
}
break;
}
+ case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -453,7 +451,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
intptr_t PCAdj) {
const MachineOperand &Op3 = MI.getOperand(Op+3);
int DispVal = 0;
- const MachineOperand *DispForReloc = 0;
+ const MachineOperand *DispForReloc = nullptr;
// Figure out what sort of displacement we have to handle here.
if (Op3.isGlobal()) {
@@ -658,67 +656,20 @@ void Emitter<CodeEmitter>::emitOpcodePrefix(uint64_t TSFlags,
int MemOperand,
const MachineInstr &MI,
const MCInstrDesc *Desc) const {
- // Emit the lock opcode prefix as needed.
- if (Desc->TSFlags & X86II::LOCK)
- MCE.emitByte(0xF0);
-
- // Emit segment override opcode prefix as needed.
- emitSegmentOverridePrefix(TSFlags, MemOperand, MI);
-
- // Emit the repeat opcode prefix as needed.
- if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP)
- MCE.emitByte(0xF3);
-
- // Emit the address size opcode prefix as needed.
- bool need_address_override;
- if (TSFlags & X86II::AdSize) {
- need_address_override = true;
- } else if (MemOperand == -1) {
- need_address_override = false;
- } else if (Is64BitMode) {
- assert(!Is16BitMemOperand(MI, MemOperand));
- need_address_override = Is32BitMemOperand(MI, MemOperand);
- } else {
- assert(!Is64BitMemOperand(MI, MemOperand));
- need_address_override = Is16BitMemOperand(MI, MemOperand);
- }
-
- if (need_address_override)
- MCE.emitByte(0x67);
-
// Emit the operand size opcode prefix as needed.
- if (TSFlags & X86II::OpSize)
+ if (((TSFlags & X86II::OpSizeMask) >> X86II::OpSizeShift) == X86II::OpSize16)
MCE.emitByte(0x66);
- bool Need0FPrefix = false;
- switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::TB: // Two-byte opcode prefix
- case X86II::T8: // 0F 38
- case X86II::TA: // 0F 3A
- case X86II::A6: // 0F A6
- case X86II::A7: // 0F A7
- Need0FPrefix = true;
- break;
- case X86II::REP: break; // already handled.
- case X86II::T8XS: // F3 0F 38
- case X86II::XS: // F3 0F
- MCE.emitByte(0xF3);
- Need0FPrefix = true;
- break;
- case X86II::T8XD: // F2 0F 38
- case X86II::TAXD: // F2 0F 3A
- case X86II::XD: // F2 0F
- MCE.emitByte(0xF2);
- Need0FPrefix = true;
- break;
- case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
- case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
- MCE.emitByte(0xD8+
- (((Desc->TSFlags & X86II::Op0Mask)-X86II::D8)
- >> X86II::Op0Shift));
- break; // Two-byte opcode prefix
- default: llvm_unreachable("Invalid prefix!");
- case 0: break; // No prefix!
+ switch (Desc->TSFlags & X86II::OpPrefixMask) {
+ case X86II::PD: // 66
+ MCE.emitByte(0x66);
+ break;
+ case X86II::XS: // F3
+ MCE.emitByte(0xF3);
+ break;
+ case X86II::XD: // F2
+ MCE.emitByte(0xF2);
+ break;
}
// Handle REX prefix.
@@ -728,25 +679,21 @@ void Emitter<CodeEmitter>::emitOpcodePrefix(uint64_t TSFlags,
}
// 0x0F escape code must be emitted just before the opcode.
- if (Need0FPrefix)
+ switch (Desc->TSFlags & X86II::OpMapMask) {
+ case X86II::TB: // Two-byte opcode map
+ case X86II::T8: // 0F 38
+ case X86II::TA: // 0F 3A
MCE.emitByte(0x0F);
+ break;
+ }
- switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::T8XD: // F2 0F 38
- case X86II::T8XS: // F3 0F 38
- case X86II::T8: // 0F 38
- MCE.emitByte(0x38);
- break;
- case X86II::TAXD: // F2 0F 38
- case X86II::TA: // 0F 3A
- MCE.emitByte(0x3A);
- break;
- case X86II::A6: // 0F A6
- MCE.emitByte(0xA6);
- break;
- case X86II::A7: // 0F A7
- MCE.emitByte(0xA7);
- break;
+ switch (Desc->TSFlags & X86II::OpMapMask) {
+ case X86II::T8: // 0F 38
+ MCE.emitByte(0x38);
+ break;
+ case X86II::TA: // 0F 3A
+ MCE.emitByte(0x3A);
+ break;
}
}
@@ -778,29 +725,19 @@ template<class CodeEmitter>
void Emitter<CodeEmitter>::emitSegmentOverridePrefix(uint64_t TSFlags,
int MemOperand,
const MachineInstr &MI) const {
- switch (TSFlags & X86II::SegOvrMask) {
- default: llvm_unreachable("Invalid segment!");
- case 0:
- // No segment override, check for explicit one on memory operand.
- if (MemOperand != -1) { // If the instruction has a memory operand.
- switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
- default: llvm_unreachable("Unknown segment register!");
- case 0: break;
- case X86::CS: MCE.emitByte(0x2E); break;
- case X86::SS: MCE.emitByte(0x36); break;
- case X86::DS: MCE.emitByte(0x3E); break;
- case X86::ES: MCE.emitByte(0x26); break;
- case X86::FS: MCE.emitByte(0x64); break;
- case X86::GS: MCE.emitByte(0x65); break;
- }
- }
- break;
- case X86II::FS:
- MCE.emitByte(0x64);
- break;
- case X86II::GS:
- MCE.emitByte(0x65);
- break;
+ if (MemOperand < 0)
+ return; // No memory operand
+
+ // Check for explicit segment override on memory operand.
+ switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
+ default: llvm_unreachable("Unknown segment register!");
+ case 0: break;
+ case X86::CS: MCE.emitByte(0x2E); break;
+ case X86::SS: MCE.emitByte(0x36); break;
+ case X86::DS: MCE.emitByte(0x3E); break;
+ case X86::ES: MCE.emitByte(0x26); break;
+ case X86::FS: MCE.emitByte(0x64); break;
+ case X86::GS: MCE.emitByte(0x65); break;
}
}
@@ -809,6 +746,8 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
int MemOperand,
const MachineInstr &MI,
const MCInstrDesc *Desc) const {
+ unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
+ X86II::EncodingShift;
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
@@ -839,9 +778,6 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
// opcode extension, or ignored, depending on the opcode byte)
unsigned char VEX_W = 0;
- // XOP: Use XOP prefix byte 0x8f instead of VEX.
- bool XOP = false;
-
// VEX_5M (VEX m-mmmmm field):
//
// 0b00000: Reserved for future use
@@ -852,7 +788,7 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
// 0b01000: XOP map select - 08h instructions with imm byte
// 0b01001: XOP map select - 09h instructions with no imm byte
// 0b01010: XOP map select - 0Ah instructions with imm dword
- unsigned char VEX_5M = 0x1;
+ unsigned char VEX_5M = 0;
// VEX_4V (VEX vvvv field): a register specifier
// (in 1's complement form) or 1111 if unused.
@@ -875,58 +811,28 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
//
unsigned char VEX_PP = 0;
- // Encode the operand size opcode prefix as needed.
- if (TSFlags & X86II::OpSize)
- VEX_PP = 0x01;
-
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
VEX_W = 1;
- if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
- XOP = true;
-
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
VEX_L = 1;
- switch (TSFlags & X86II::Op0Mask) {
- default: llvm_unreachable("Invalid prefix!");
- case X86II::T8: // 0F 38
- VEX_5M = 0x2;
- break;
- case X86II::TA: // 0F 3A
- VEX_5M = 0x3;
- break;
- case X86II::T8XS: // F3 0F 38
- VEX_PP = 0x2;
- VEX_5M = 0x2;
- break;
- case X86II::T8XD: // F2 0F 38
- VEX_PP = 0x3;
- VEX_5M = 0x2;
- break;
- case X86II::TAXD: // F2 0F 3A
- VEX_PP = 0x3;
- VEX_5M = 0x3;
- break;
- case X86II::XS: // F3 0F
- VEX_PP = 0x2;
- break;
- case X86II::XD: // F2 0F
- VEX_PP = 0x3;
- break;
- case X86II::XOP8:
- VEX_5M = 0x8;
- break;
- case X86II::XOP9:
- VEX_5M = 0x9;
- break;
- case X86II::XOPA:
- VEX_5M = 0xA;
- break;
- case X86II::TB: // VEX_5M/VEX_PP already correct
- break;
+ switch (TSFlags & X86II::OpPrefixMask) {
+ default: break; // VEX_PP already correct
+ case X86II::PD: VEX_PP = 0x1; break; // 66
+ case X86II::XS: VEX_PP = 0x2; break; // F3
+ case X86II::XD: VEX_PP = 0x3; break; // F2
}
+ switch (TSFlags & X86II::OpMapMask) {
+ default: llvm_unreachable("Invalid prefix!");
+ case X86II::TB: VEX_5M = 0x1; break; // 0F
+ case X86II::T8: VEX_5M = 0x2; break; // 0F 38
+ case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
+ case X86II::XOP8: VEX_5M = 0x8; break;
+ case X86II::XOP9: VEX_5M = 0x9; break;
+ case X86II::XOPA: VEX_5M = 0xA; break;
+ }
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc->getNumOperands();
@@ -941,17 +847,8 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
}
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg:
- // Duplicate register.
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
-
- if (HasVEX_4V)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ default: llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
+ case X86II::RawFrm:
break;
case X86II::MRMDestMem: {
// MRMDestMem instructions forms:
@@ -1069,8 +966,6 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
break;
- default: // RawFrm
- break;
}
// Emit segment override opcode prefix as needed.
@@ -1087,16 +982,21 @@ void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
// | C5h | | R | vvvv | L | pp |
// +-----+ +-------------------+
//
+ // XOP uses a similar prefix:
+ // +-----+ +--------------+ +-------------------+
+ // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
+ // +-----+ +--------------+ +-------------------+
unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
- if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
+ // Can this use the 2 byte VEX prefix?
+ if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
MCE.emitByte(0xC5);
MCE.emitByte(LastByte | (VEX_R << 7));
return;
}
// 3 byte VEX prefix
- MCE.emitByte(XOP ? 0x8F : 0xC4);
+ MCE.emitByte(Encoding == X86II::XOP ? 0x8F : 0xC4);
MCE.emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M);
MCE.emitByte(LastByte | (VEX_W << 7));
}
@@ -1146,8 +1046,10 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
uint64_t TSFlags = Desc->TSFlags;
- // Is this instruction encoded using the AVX VEX prefix?
- bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX;
+ // Encoding type for this instruction.
+ unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
+ X86II::EncodingShift;
+
// It uses the VEX.VVVV field?
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
@@ -1158,7 +1060,35 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
if (MemoryOperand != -1) MemoryOperand += CurOp;
- if (!HasVEXPrefix)
+ // Emit the lock opcode prefix as needed.
+ if (Desc->TSFlags & X86II::LOCK)
+ MCE.emitByte(0xF0);
+
+ // Emit segment override opcode prefix as needed.
+ emitSegmentOverridePrefix(TSFlags, MemoryOperand, MI);
+
+ // Emit the repeat opcode prefix as needed.
+ if (Desc->TSFlags & X86II::REP)
+ MCE.emitByte(0xF3);
+
+ // Emit the address size opcode prefix as needed.
+ bool need_address_override;
+ if (TSFlags & X86II::AdSize) {
+ need_address_override = true;
+ } else if (MemoryOperand < 0) {
+ need_address_override = false;
+ } else if (Is64BitMode) {
+ assert(!Is16BitMemOperand(MI, MemoryOperand));
+ need_address_override = Is32BitMemOperand(MI, MemoryOperand);
+ } else {
+ assert(!Is64BitMemOperand(MI, MemoryOperand));
+ need_address_override = Is16BitMemOperand(MI, MemoryOperand);
+ }
+
+ if (need_address_override)
+ MCE.emitByte(0x67);
+
+ if (Encoding == 0)
emitOpcodePrefix(TSFlags, MemoryOperand, MI, Desc);
else
emitVEXOpcodePrefix(TSFlags, MemoryOperand, MI, Desc);
@@ -1183,10 +1113,16 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case TargetOpcode::INLINEASM:
// We allow inline assembler nodes with empty bodies - they can
// implicitly define registers, which is ok for JIT.
- if (MI.getOperand(0).getSymbolName()[0])
+ if (MI.getOperand(0).getSymbolName()[0]) {
+ DebugLoc DL = MI.getDebugLoc();
+ DL.print(MI.getParent()->getParent()->getFunction()->getContext(),
+ llvm::errs());
report_fatal_error("JIT does not support inline asm!");
+ }
+ break;
+ case TargetOpcode::DBG_VALUE:
+ case TargetOpcode::CFI_INSTRUCTION:
break;
- case TargetOpcode::PROLOG_LABEL:
case TargetOpcode::GC_LABEL:
case TargetOpcode::EH_LABEL:
MCE.emitLabel(MI.getOperand(0).getMCSymbol());
@@ -1195,6 +1131,16 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
break;
+
+ case X86::SEH_PushReg:
+ case X86::SEH_SaveReg:
+ case X86::SEH_SaveXMM:
+ case X86::SEH_StackAlloc:
+ case X86::SEH_SetFrame:
+ case X86::SEH_PushFrame:
+ case X86::SEH_EndPrologue:
+ break;
+
case X86::MOVPC32r: {
// This emits the "call" portion of this pseudo instruction.
MCE.emitByte(BaseOpcode);
@@ -1353,6 +1299,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
break;
}
+ case X86II::MRMXr:
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
@@ -1360,8 +1307,9 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
++CurOp;
MCE.emitByte(BaseOpcode);
+ uint64_t Form = (Desc->TSFlags & X86II::FormMask);
emitRegModRMByte(MI.getOperand(CurOp++).getReg(),
- (Desc->TSFlags & X86II::FormMask)-X86II::MRM0r);
+ (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r);
if (CurOp == NumOps)
break;
@@ -1390,6 +1338,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
break;
}
+ case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -1401,7 +1350,8 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
X86II::getSizeOfImm(Desc->TSFlags) : 4) : 0;
MCE.emitByte(BaseOpcode);
- emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
+ uint64_t Form = (Desc->TSFlags & X86II::FormMask);
+ emitMemModRMByte(MI, CurOp, (Form==X86II::MRMXm) ? 0 : Form - X86II::MRM0m,
PCAdj);
CurOp += X86::AddrNumOperands;
@@ -1432,41 +1382,81 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
break;
}
- case X86II::MRMInitReg:
+ case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
+ case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
+ case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
+ case X86II::MRM_D0: case X86II::MRM_D1: case X86II::MRM_D4:
+ case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D8:
+ case X86II::MRM_D9: case X86II::MRM_DA: case X86II::MRM_DB:
+ case X86II::MRM_DC: case X86II::MRM_DD: case X86II::MRM_DE:
+ case X86II::MRM_DF: case X86II::MRM_E0: case X86II::MRM_E1:
+ case X86II::MRM_E2: case X86II::MRM_E3: case X86II::MRM_E4:
+ case X86II::MRM_E5: case X86II::MRM_E8: case X86II::MRM_E9:
+ case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
+ case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_F0:
+ case X86II::MRM_F1: case X86II::MRM_F2: case X86II::MRM_F3:
+ case X86II::MRM_F4: case X86II::MRM_F5: case X86II::MRM_F6:
+ case X86II::MRM_F7: case X86II::MRM_F8: case X86II::MRM_F9:
+ case X86II::MRM_FA: case X86II::MRM_FB: case X86II::MRM_FC:
+ case X86II::MRM_FD: case X86II::MRM_FE: case X86II::MRM_FF:
MCE.emitByte(BaseOpcode);
- // Duplicate register, used by things like MOV8r0 (aka xor reg,reg).
- emitRegModRMByte(MI.getOperand(CurOp).getReg(),
- getX86RegNum(MI.getOperand(CurOp).getReg()));
- ++CurOp;
- break;
- case X86II::MRM_C1:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xC1);
- break;
- case X86II::MRM_C8:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xC8);
- break;
- case X86II::MRM_C9:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xC9);
- break;
- case X86II::MRM_CA:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xCA);
- break;
- case X86II::MRM_CB:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xCB);
- break;
- case X86II::MRM_E8:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xE8);
- break;
- case X86II::MRM_F0:
- MCE.emitByte(BaseOpcode);
- MCE.emitByte(0xF0);
+ unsigned char MRM;
+ switch (TSFlags & X86II::FormMask) {
+ default: llvm_unreachable("Invalid Form");
+ case X86II::MRM_C0: MRM = 0xC0; break;
+ case X86II::MRM_C1: MRM = 0xC1; break;
+ case X86II::MRM_C2: MRM = 0xC2; break;
+ case X86II::MRM_C3: MRM = 0xC3; break;
+ case X86II::MRM_C4: MRM = 0xC4; break;
+ case X86II::MRM_C8: MRM = 0xC8; break;
+ case X86II::MRM_C9: MRM = 0xC9; break;
+ case X86II::MRM_CA: MRM = 0xCA; break;
+ case X86II::MRM_CB: MRM = 0xCB; break;
+ case X86II::MRM_D0: MRM = 0xD0; break;
+ case X86II::MRM_D1: MRM = 0xD1; break;
+ case X86II::MRM_D4: MRM = 0xD4; break;
+ case X86II::MRM_D5: MRM = 0xD5; break;
+ case X86II::MRM_D6: MRM = 0xD6; break;
+ case X86II::MRM_D8: MRM = 0xD8; break;
+ case X86II::MRM_D9: MRM = 0xD9; break;
+ case X86II::MRM_DA: MRM = 0xDA; break;
+ case X86II::MRM_DB: MRM = 0xDB; break;
+ case X86II::MRM_DC: MRM = 0xDC; break;
+ case X86II::MRM_DD: MRM = 0xDD; break;
+ case X86II::MRM_DE: MRM = 0xDE; break;
+ case X86II::MRM_DF: MRM = 0xDF; break;
+ case X86II::MRM_E0: MRM = 0xE0; break;
+ case X86II::MRM_E1: MRM = 0xE1; break;
+ case X86II::MRM_E2: MRM = 0xE2; break;
+ case X86II::MRM_E3: MRM = 0xE3; break;
+ case X86II::MRM_E4: MRM = 0xE4; break;
+ case X86II::MRM_E5: MRM = 0xE5; break;
+ case X86II::MRM_E8: MRM = 0xE8; break;
+ case X86II::MRM_E9: MRM = 0xE9; break;
+ case X86II::MRM_EA: MRM = 0xEA; break;
+ case X86II::MRM_EB: MRM = 0xEB; break;
+ case X86II::MRM_EC: MRM = 0xEC; break;
+ case X86II::MRM_ED: MRM = 0xED; break;
+ case X86II::MRM_EE: MRM = 0xEE; break;
+ case X86II::MRM_F0: MRM = 0xF0; break;
+ case X86II::MRM_F1: MRM = 0xF1; break;
+ case X86II::MRM_F2: MRM = 0xF2; break;
+ case X86II::MRM_F3: MRM = 0xF3; break;
+ case X86II::MRM_F4: MRM = 0xF4; break;
+ case X86II::MRM_F5: MRM = 0xF5; break;
+ case X86II::MRM_F6: MRM = 0xF6; break;
+ case X86II::MRM_F7: MRM = 0xF7; break;
+ case X86II::MRM_F8: MRM = 0xF8; break;
+ case X86II::MRM_F9: MRM = 0xF9; break;
+ case X86II::MRM_FA: MRM = 0xFA; break;
+ case X86II::MRM_FB: MRM = 0xFB; break;
+ case X86II::MRM_FC: MRM = 0xFC; break;
+ case X86II::MRM_FD: MRM = 0xFD; break;
+ case X86II::MRM_FE: MRM = 0xFE; break;
+ case X86II::MRM_FF: MRM = 0xFF; break;
+ }
+ MCE.emitByte(MRM);
break;
}
@@ -1501,7 +1491,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
#ifndef NDEBUG
dbgs() << "Cannot encode all operands of: " << MI << "\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
MCE.processDebugLoc(MI.getDebugLoc(), false);
diff --git a/contrib/llvm/lib/Target/X86/X86FastISel.cpp b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
index 97f96ab..2d494b4 100644
--- a/contrib/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
@@ -15,33 +15,35 @@
#include "X86.h"
#include "X86CallingConv.h"
-#include "X86ISelLowering.h"
#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
namespace {
-class X86FastISel : public FastISel {
+class X86FastISel final : public FastISel {
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
@@ -62,28 +64,32 @@ public:
X86ScalarSSEf32 = Subtarget->hasSSE1();
}
- virtual bool TargetSelectInstruction(const Instruction *I);
+ bool TargetSelectInstruction(const Instruction *I) override;
/// \brief The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction. If possible,
/// try to fold the load as an operand to the instruction, returning true if
/// possible.
- virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
- const LoadInst *LI);
+ bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) override;
- virtual bool FastLowerArguments();
+ bool FastLowerArguments() override;
+ bool FastLowerCall(CallLoweringInfo &CLI) override;
+ bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
#include "X86GenFastISel.inc"
private:
bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
- bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
+ bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO,
+ unsigned &ResultReg);
bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM,
- bool Aligned = false);
- bool X86FastEmitStore(EVT VT, unsigned ValReg, const X86AddressMode &AM,
- bool Aligned = false);
+ MachineMemOperand *MMO = nullptr, bool Aligned = false);
+ bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
+ const X86AddressMode &AM,
+ MachineMemOperand *MMO = nullptr, bool Aligned = false);
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg);
@@ -107,6 +113,12 @@ private:
bool X86SelectDivRem(const Instruction *I);
+ bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
+
+ bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
+
+ bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
+
bool X86SelectSelect(const Instruction *I);
bool X86SelectTrunc(const Instruction *I);
@@ -114,11 +126,6 @@ private:
bool X86SelectFPExt(const Instruction *I);
bool X86SelectFPTrunc(const Instruction *I);
- bool X86VisitIntrinsicCall(const IntrinsicInst &I);
- bool X86SelectCall(const Instruction *I);
-
- bool DoSelectCall(const Instruction *I, const char *MemIntName);
-
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo();
}
@@ -128,11 +135,11 @@ private:
bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
- unsigned TargetMaterializeConstant(const Constant *C);
+ unsigned TargetMaterializeConstant(const Constant *C) override;
- unsigned TargetMaterializeAlloca(const AllocaInst *C);
+ unsigned TargetMaterializeAlloca(const AllocaInst *C) override;
- unsigned TargetMaterializeFloatZero(const ConstantFP *CF);
+ unsigned TargetMaterializeFloatZero(const ConstantFP *CF) override;
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
@@ -147,10 +154,182 @@ private:
bool TryEmitSmallMemcpy(X86AddressMode DestAM,
X86AddressMode SrcAM, uint64_t Len);
+
+ bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
+ const Value *Cond);
};
} // end anonymous namespace.
+static CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) {
+ // If both operands are the same, then try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = CI->getPredicate();
+ if (CI->getOperand(0) != CI->getOperand(1))
+ return Predicate;
+
+ switch (Predicate) {
+ default: llvm_unreachable("Invalid predicate!");
+ case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
+ case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
+ case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
+ case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
+ case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
+ case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
+ case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
+ case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
+ case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
+
+ case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
+ case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
+ case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
+ }
+
+ return Predicate;
+}
+
+static std::pair<X86::CondCode, bool>
+getX86ConditionCode(CmpInst::Predicate Predicate) {
+ X86::CondCode CC = X86::COND_INVALID;
+ bool NeedSwap = false;
+ switch (Predicate) {
+ default: break;
+ // Floating-point Predicates
+ case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
+ case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
+ case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
+ case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
+ case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
+ case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
+ case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
+ case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
+ case CmpInst::FCMP_OEQ: // fall-through
+ case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
+
+ // Integer Predicates
+ case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
+ case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
+ case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
+ case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
+ case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
+ case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
+ case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
+ case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
+ case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
+ case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
+ }
+
+ return std::make_pair(CC, NeedSwap);
+}
+
+static std::pair<unsigned, bool>
+getX86SSEConditionCode(CmpInst::Predicate Predicate) {
+ unsigned CC;
+ bool NeedSwap = false;
+
+ // SSE Condition code mapping:
+ // 0 - EQ
+ // 1 - LT
+ // 2 - LE
+ // 3 - UNORD
+ // 4 - NEQ
+ // 5 - NLT
+ // 6 - NLE
+ // 7 - ORD
+ switch (Predicate) {
+ default: llvm_unreachable("Unexpected predicate");
+ case CmpInst::FCMP_OEQ: CC = 0; break;
+ case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OLT: CC = 1; break;
+ case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_OLE: CC = 2; break;
+ case CmpInst::FCMP_UNO: CC = 3; break;
+ case CmpInst::FCMP_UNE: CC = 4; break;
+ case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_UGE: CC = 5; break;
+ case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through
+ case CmpInst::FCMP_UGT: CC = 6; break;
+ case CmpInst::FCMP_ORD: CC = 7; break;
+ case CmpInst::FCMP_UEQ:
+ case CmpInst::FCMP_ONE: CC = 8; break;
+ }
+
+ return std::make_pair(CC, NeedSwap);
+}
+
+/// \brief Check if it is possible to fold the condition from the XALU intrinsic
+/// into the user. The condition code will only be updated on success.
+bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
+ const Value *Cond) {
+ if (!isa<ExtractValueInst>(Cond))
+ return false;
+
+ const auto *EV = cast<ExtractValueInst>(Cond);
+ if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
+ return false;
+
+ const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
+ MVT RetVT;
+ const Function *Callee = II->getCalledFunction();
+ Type *RetTy =
+ cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
+ if (!isTypeLegal(RetTy, RetVT))
+ return false;
+
+ if (RetVT != MVT::i32 && RetVT != MVT::i64)
+ return false;
+
+ X86::CondCode TmpCC;
+ switch (II->getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
+ }
+
+ // Check if both instructions are in the same basic block.
+ if (II->getParent() != I->getParent())
+ return false;
+
+ // Make sure nothing is in the way
+ BasicBlock::const_iterator Start = I;
+ BasicBlock::const_iterator End = II;
+ for (auto Itr = std::prev(Start); Itr != End; --Itr) {
+ // We only expect extractvalue instructions between the intrinsic and the
+ // instruction to be selected.
+ if (!isa<ExtractValueInst>(Itr))
+ return false;
+
+ // Check that the extractvalue operand comes from the intrinsic.
+ const auto *EVI = cast<ExtractValueInst>(Itr);
+ if (EVI->getAggregateOperand() != II)
+ return false;
+ }
+
+ CC = TmpCC;
+ return true;
+}
+
bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
if (evt == MVT::Other || !evt.isSimple())
@@ -180,10 +359,10 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
- unsigned &ResultReg) {
+ MachineMemOperand *MMO, unsigned &ResultReg) {
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
switch (VT.getSimpleVT().SimpleTy) {
default: return false;
case MVT::i1:
@@ -228,8 +407,11 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
}
ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), ResultReg), AM);
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
+ addFullAddress(MIB, AM);
+ if (MMO)
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
return true;
}
@@ -237,9 +419,9 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
/// and a displacement offset, or a GlobalAddress,
/// i.e. V. Return true if it is possible.
-bool
-X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg,
- const X86AddressMode &AM, bool Aligned) {
+bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
+ const X86AddressMode &AM,
+ MachineMemOperand *MMO, bool Aligned) {
// Get opcode and regclass of the output for the given store instruction.
unsigned Opc = 0;
switch (VT.getSimpleVT().SimpleTy) {
@@ -248,8 +430,9 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg,
case MVT::i1: {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::AND8ri), AndResult).addReg(ValReg).addImm(1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::AND8ri), AndResult)
+ .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
ValReg = AndResult;
}
// FALLTHROUGH, handling i1 as i8.
@@ -288,16 +471,21 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg,
break;
}
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM).addReg(ValReg);
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
+ if (MMO)
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
+
return true;
}
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
- const X86AddressMode &AM, bool Aligned) {
+ const X86AddressMode &AM,
+ MachineMemOperand *MMO, bool Aligned) {
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
- Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
+ Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
// If this is a store of a simple constant, fold the constant into the store.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
@@ -317,10 +505,12 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
}
if (Opc) {
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM)
- .addImm(Signed ? (uint64_t) CI->getSExtValue() :
- CI->getZExtValue());
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
+ : CI->getZExtValue());
+ if (MMO)
+ MIB->addMemOperand(*FuncInfo.MF, MMO);
return true;
}
}
@@ -329,7 +519,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
if (ValReg == 0)
return false;
- return X86FastEmitStore(VT, ValReg, AM, Aligned);
+ bool ValKill = hasTrivialKill(Val);
+ return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
}
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
@@ -355,17 +546,8 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
return false;
// Can't handle TLS yet.
- if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->isThreadLocal())
- return false;
-
- // Can't handle TLS yet, part 2 (this is slightly crazy, but this is how
- // it works...).
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- if (const GlobalVariable *GVar =
- dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)))
- if (GVar->isThreadLocal())
- return false;
+ if (GV->isThreadLocal())
+ return false;
// RIP-relative addresses can't have additional register operands, so if
// we've already folded stuff into the addressing mode, just force the
@@ -406,7 +588,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
} else {
// Issue load from stub.
unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
X86AddressMode StubAM;
StubAM.Base.Reg = AM.Base.Reg;
StubAM.GV = GV;
@@ -428,7 +610,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
LoadReg = createResultReg(RC);
MachineInstrBuilder LoadMI =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
addFullAddress(LoadMI, StubAM);
// Ok, back to normal mode.
@@ -441,7 +623,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
// Now construct the final address. Note that the Disp, Scale,
// and Index values may already be set here.
AM.Base.Reg = LoadReg;
- AM.GV = 0;
+ AM.GV = nullptr;
return true;
}
}
@@ -467,7 +649,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
SmallVector<const Value *, 32> GEPs;
redo_gep:
- const User *U = NULL;
+ const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(V)) {
// Don't walk into other basic blocks; it's possible we haven't
@@ -547,14 +729,14 @@ redo_gep:
i != e; ++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;
}
// A array/variable index is always of the form i*S where S is the
// constant scale size. See if we can push the scale into immediates.
- uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
for (;;) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
@@ -626,7 +808,7 @@ redo_gep:
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
///
bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
- const User *U = NULL;
+ const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
const Instruction *I = dyn_cast<Instruction>(V);
// Record if the value is defined in the same basic block.
@@ -696,8 +878,8 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
(AM.Base.Reg != 0 || AM.IndexReg != 0))
return false;
- // Can't handle DLLImport.
- if (GV->hasDLLImportLinkage())
+ // Can't handle DLL Import.
+ if (GV->hasDLLImportStorageClass())
return false;
// Can't handle TLS.
@@ -749,19 +931,24 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
if (S->isAtomic())
return false;
- unsigned SABIAlignment =
- TD.getABITypeAlignment(S->getValueOperand()->getType());
- bool Aligned = S->getAlignment() == 0 || S->getAlignment() >= SABIAlignment;
+ const Value *Val = S->getValueOperand();
+ const Value *Ptr = S->getPointerOperand();
MVT VT;
- if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
+ if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
return false;
+ unsigned Alignment = S->getAlignment();
+ unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = ABIAlignment;
+ bool Aligned = Alignment >= ABIAlignment;
+
X86AddressMode AM;
- if (!X86SelectAddress(I->getOperand(1), AM))
+ if (!X86SelectAddress(Ptr, AM))
return false;
- return X86FastEmitStore(VT, I->getOperand(0), AM, Aligned);
+ return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
}
/// X86SelectRet - Select and emit code to implement ret instructions.
@@ -864,7 +1051,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// Avoid a cross-class copy. This is very unlikely.
if (!SrcRC->contains(DstReg))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
DstReg).addReg(SrcReg);
// Add register to return instruction.
@@ -876,19 +1063,19 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// a virtual register in the entry block, so now we copy the value out
// and into %rax. We also do the same with %eax for Win32.
if (F.hasStructRetAttr() &&
- (Subtarget->is64Bit() || Subtarget->isTargetWindows())) {
+ (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
unsigned Reg = X86MFInfo->getSRetReturnReg();
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()!");
unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
RetReg).addReg(Reg);
RetRegs.push_back(RetReg);
}
// Now emit the RET.
MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
MIB.addReg(RetRegs[i], RegState::Implicit);
return true;
@@ -896,25 +1083,29 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
/// X86SelectLoad - Select and emit code to implement load instructions.
///
-bool X86FastISel::X86SelectLoad(const Instruction *I) {
+bool X86FastISel::X86SelectLoad(const Instruction *I) {
+ const LoadInst *LI = cast<LoadInst>(I);
+
// Atomic loads need special handling.
- if (cast<LoadInst>(I)->isAtomic())
+ if (LI->isAtomic())
return false;
MVT VT;
- if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
+ if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
return false;
+ const Value *Ptr = LI->getPointerOperand();
+
X86AddressMode AM;
- if (!X86SelectAddress(I->getOperand(0), AM))
+ if (!X86SelectAddress(Ptr, AM))
return false;
unsigned ResultReg = 0;
- if (X86FastEmitLoad(VT, AM, ResultReg)) {
- UpdateValueMap(I, ResultReg);
- return true;
- }
- return false;
+ if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg))
+ return false;
+
+ UpdateValueMap(I, ResultReg);
+ return true;
}
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
@@ -961,14 +1152,14 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Op1))
- Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
+ Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr.
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareImmOpc))
.addReg(Op0Reg)
.addImm(Op1C->getSExtValue());
return true;
@@ -980,7 +1171,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareOpc))
.addReg(Op0Reg)
.addReg(Op1Reg);
@@ -994,73 +1185,89 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
return false;
- unsigned ResultReg = createResultReg(&X86::GR8RegClass);
- unsigned SetCCOpc;
- bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
- switch (CI->getPredicate()) {
- case CmpInst::FCMP_OEQ: {
- if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
+ // Try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ unsigned ResultReg = 0;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_FALSE: {
+ ResultReg = createResultReg(&X86::GR32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
+ ResultReg);
+ ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
+ X86::sub_8bit);
+ if (!ResultReg)
return false;
+ break;
+ }
+ case CmpInst::FCMP_TRUE: {
+ ResultReg = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
+ ResultReg).addImm(1);
+ break;
+ }
+ }
- unsigned EReg = createResultReg(&X86::GR8RegClass);
- unsigned NPReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::SETNPr), NPReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
+ if (ResultReg) {
UpdateValueMap(I, ResultReg);
return true;
}
- case CmpInst::FCMP_UNE: {
- if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
+
+ const Value *LHS = CI->getOperand(0);
+ const Value *RHS = CI->getOperand(1);
+
+ // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
+ // We don't have to materialize a zero constant for this case and can just use
+ // %x again on the RHS.
+ if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
+ const auto *RHSC = dyn_cast<ConstantFP>(RHS);
+ if (RHSC && RHSC->isNullValue())
+ RHS = LHS;
+ }
+
+ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
+ static unsigned SETFOpcTable[2][3] = {
+ { X86::SETEr, X86::SETNPr, X86::AND8rr },
+ { X86::SETNEr, X86::SETPr, X86::OR8rr }
+ };
+ unsigned *SETFOpc = nullptr;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
+ case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
+ }
+
+ ResultReg = createResultReg(&X86::GR8RegClass);
+ if (SETFOpc) {
+ if (!X86FastEmitCompare(LHS, RHS, VT))
return false;
- unsigned NEReg = createResultReg(&X86::GR8RegClass);
- unsigned PReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETNEr), NEReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETPr), PReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::OR8rr),ResultReg)
- .addReg(PReg).addReg(NEReg);
+ unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
+ unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
+ FlagReg1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
+ FlagReg2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
+ ResultReg).addReg(FlagReg1).addReg(FlagReg2);
UpdateValueMap(I, ResultReg);
return true;
}
- case CmpInst::FCMP_OGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
- case CmpInst::FCMP_OGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
- case CmpInst::FCMP_OLT: SwapArgs = true; SetCCOpc = X86::SETAr; break;
- case CmpInst::FCMP_OLE: SwapArgs = true; SetCCOpc = X86::SETAEr; break;
- case CmpInst::FCMP_ONE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
- case CmpInst::FCMP_ORD: SwapArgs = false; SetCCOpc = X86::SETNPr; break;
- case CmpInst::FCMP_UNO: SwapArgs = false; SetCCOpc = X86::SETPr; break;
- case CmpInst::FCMP_UEQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
- case CmpInst::FCMP_UGT: SwapArgs = true; SetCCOpc = X86::SETBr; break;
- case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
- case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
- case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
-
- case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
- case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
- case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
- case CmpInst::ICMP_UGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
- case CmpInst::ICMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
- case CmpInst::ICMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
- case CmpInst::ICMP_SGT: SwapArgs = false; SetCCOpc = X86::SETGr; break;
- case CmpInst::ICMP_SGE: SwapArgs = false; SetCCOpc = X86::SETGEr; break;
- case CmpInst::ICMP_SLT: SwapArgs = false; SetCCOpc = X86::SETLr; break;
- case CmpInst::ICMP_SLE: SwapArgs = false; SetCCOpc = X86::SETLEr; break;
- default:
- return false;
- }
- const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ X86::CondCode CC;
+ bool SwapArgs;
+ std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
+ unsigned Opc = X86::getSETFromCond(CC);
+
if (SwapArgs)
- std::swap(Op0, Op1);
+ std::swap(LHS, RHS);
- // Emit a compare of Op0/Op1.
- if (!X86FastEmitCompare(Op0, Op1, VT))
+ // Emit a compare of LHS/RHS.
+ if (!X86FastEmitCompare(LHS, RHS, VT))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1097,11 +1304,11 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
}
unsigned Result32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovInst), Result32)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
.addReg(ResultReg);
ResultReg = createResultReg(&X86::GR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
ResultReg)
.addImm(0).addReg(Result32).addImm(X86::sub_32bit);
} else if (DstVT != MVT::i8) {
@@ -1126,73 +1333,88 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Fold the common case of a conditional branch with a comparison
// in the same block (values defined on other blocks may not have
// initialized registers).
+ X86::CondCode CC;
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
+ // Try to optimize or fold the cmp.
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_FALSE: FastEmitBranch(FalseMBB, DbgLoc); return true;
+ case CmpInst::FCMP_TRUE: FastEmitBranch(TrueMBB, DbgLoc); return true;
+ }
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+
+ // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
+ // 0.0.
+ // We don't have to materialize a zero constant for this case and can just
+ // use %x again on the RHS.
+ if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
+ const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
+ if (CmpRHSC && CmpRHSC->isNullValue())
+ CmpRHS = CmpLHS;
+ }
+
// Try to take advantage of fallthrough opportunities.
- CmpInst::Predicate Predicate = CI->getPredicate();
if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::getInversePredicate(Predicate);
}
- bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
- unsigned BranchOpc; // Opcode to jump on, e.g. "X86::JA"
-
+ // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
+ // code check. Instead two branch instructions are required to check all
+ // the flags. First we change the predicate to a supported condition code,
+ // which will be the first branch. Later one we will emit the second
+ // branch.
+ bool NeedExtraBranch = false;
switch (Predicate) {
+ default: break;
case CmpInst::FCMP_OEQ:
- std::swap(TrueMBB, FalseMBB);
- Predicate = CmpInst::FCMP_UNE;
- // FALL THROUGH
- case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
- case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
- case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE_4; break;
- case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA_4; break;
- case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE_4; break;
- case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
- case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP_4; break;
- case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP_4; break;
- case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
- case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB_4; break;
- case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break;
- case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
- case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
-
- case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
- case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
- case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
- case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE_4; break;
- case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
- case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
- case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG_4; break;
- case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE_4; break;
- case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL_4; break;
- case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE_4; break;
- default:
- return false;
+ std::swap(TrueMBB, FalseMBB); // fall-through
+ case CmpInst::FCMP_UNE:
+ NeedExtraBranch = true;
+ Predicate = CmpInst::FCMP_ONE;
+ break;
}
- const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ bool SwapArgs;
+ unsigned BranchOpc;
+ std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
+
+ BranchOpc = X86::GetCondBranchFromCond(CC);
if (SwapArgs)
- std::swap(Op0, Op1);
+ std::swap(CmpLHS, CmpRHS);
// Emit a compare of the LHS and RHS, setting the flags.
- if (!X86FastEmitCompare(Op0, Op1, VT))
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
.addMBB(TrueMBB);
- if (Predicate == CmpInst::FCMP_UNE) {
- // X86 requires a second branch to handle UNE (and OEQ,
- // which is mapped to UNE above).
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
+ // X86 requires a second branch to handle UNE (and OEQ, which is mapped
+ // to UNE above).
+ if (NeedExtraBranch) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_4))
.addMBB(TrueMBB);
}
- FastEmitBranch(FalseMBB, DL);
- FuncInfo.MBB->addSuccessor(TrueMBB);
+ // Obtain the branch weight and add the TrueBB to the successor list.
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
+
+ // Emits an unconditional branch to the FalseBB, obtains the branch
+ // weight, and adds it to the successor list.
+ FastEmitBranch(FalseMBB, DbgLoc);
+
return true;
}
} else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
@@ -1212,7 +1434,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (TestOpc) {
unsigned OpReg = getRegForValue(TI->getOperand(0));
if (OpReg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TestOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
.addReg(OpReg).addImm(1);
unsigned JmpOpc = X86::JNE_4;
@@ -1221,13 +1443,35 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
JmpOpc = X86::JE_4;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(JmpOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB, DL);
- FuncInfo.MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DbgLoc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
return true;
}
}
+ } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
+ // Fake request the condition, otherwise the intrinsic might be completely
+ // optimized away.
+ unsigned TmpReg = getRegForValue(BI->getCondition());
+ if (TmpReg == 0)
+ return false;
+
+ unsigned BranchOpc = X86::GetCondBranchFromCond(CC);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
+ .addMBB(TrueMBB);
+ FastEmitBranch(FalseMBB, DbgLoc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
+ return true;
}
// Otherwise do a clumsy setcc and re-test it.
@@ -1236,18 +1480,22 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(OpReg).addImm(1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB, DL);
- FuncInfo.MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DbgLoc);
+ uint32_t BranchWeight = 0;
+ if (FuncInfo.BPI)
+ BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(),
+ TrueMBB->getBasicBlock());
+ FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight);
return true;
}
bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned CReg = 0, OpReg = 0;
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
if (I->getType()->isIntegerTy(8)) {
CReg = X86::CL;
RC = &X86::GR8RegClass;
@@ -1297,18 +1545,18 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
CReg).addReg(Op1Reg);
// The shift instruction uses X86::CL. If we defined a super-register
// of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
if (CReg != X86::CL)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::KILL), X86::CL)
.addReg(CReg, RegState::Kill);
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
.addReg(Op0Reg);
UpdateValueMap(I, ResultReg);
return true;
@@ -1409,38 +1657,38 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
return false;
// Move op0 into low-order input register.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
// Zero-extend or sign-extend into high-order input register.
if (OpEntry.OpSignExtend) {
if (OpEntry.IsOpSigned)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpSignExtend));
else {
unsigned Zero32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV32r0), Zero32);
// Copy the zero into the appropriate sub/super/identical physical
// register. Unfortunately the operations needed are not uniform enough to
// fit neatly into the table above.
if (VT.SimpleTy == MVT::i16) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), TypeEntry.HighInReg)
.addReg(Zero32, 0, X86::sub_16bit);
} else if (VT.SimpleTy == MVT::i32) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), TypeEntry.HighInReg)
.addReg(Zero32);
} else if (VT.SimpleTy == MVT::i64) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
.addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
}
}
}
// Generate the DIV/IDIV instruction.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
// For i8 remainder, we can't reference AH directly, as we'll end
// up with bogus copies like %R9B = COPY %AH. Reference AX
@@ -1456,11 +1704,11 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), SourceSuperReg).addReg(X86::AX);
// Shift AX right by 8 bits instead of using AH.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SHR16ri),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
ResultSuperReg).addReg(SourceSuperReg).addImm(8);
// Now reference the 8-bit subreg of the result.
@@ -1470,7 +1718,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
// Copy the result out of the physreg if we haven't already.
if (!ResultReg) {
ResultReg = createResultReg(TypeEntry.RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Copy), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
.addReg(OpEntry.DivRemResultReg);
}
UpdateValueMap(I, ResultReg);
@@ -1478,45 +1726,319 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
return true;
}
-bool X86FastISel::X86SelectSelect(const Instruction *I) {
- MVT VT;
- if (!isTypeLegal(I->getType(), VT))
+/// \brief Emit a conditional move instruction (if the are supported) to lower
+/// the select.
+bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
+ // Check if the subtarget supports these instructions.
+ if (!Subtarget->hasCMov())
return false;
- // We only use cmov here, if we don't have a cmov instruction bail.
- if (!Subtarget->hasCMov()) return false;
+ // FIXME: Add support for i8.
+ if (RetVT < MVT::i16 || RetVT > MVT::i64)
+ return false;
- unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
- if (VT == MVT::i16) {
- Opc = X86::CMOVE16rr;
- RC = &X86::GR16RegClass;
- } else if (VT == MVT::i32) {
- Opc = X86::CMOVE32rr;
- RC = &X86::GR32RegClass;
- } else if (VT == MVT::i64) {
- Opc = X86::CMOVE64rr;
- RC = &X86::GR64RegClass;
- } else {
+ const Value *Cond = I->getOperand(0);
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+ bool NeedTest = true;
+ X86::CondCode CC = X86::COND_NE;
+
+ // Optimize conditions coming from a compare if both instructions are in the
+ // same basic block (values defined in other basic blocks may not have
+ // initialized registers).
+ const auto *CI = dyn_cast<CmpInst>(Cond);
+ if (CI && (CI->getParent() == I->getParent())) {
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+
+ // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
+ static unsigned SETFOpcTable[2][3] = {
+ { X86::SETNPr, X86::SETEr , X86::TEST8rr },
+ { X86::SETPr, X86::SETNEr, X86::OR8rr }
+ };
+ unsigned *SETFOpc = nullptr;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_OEQ:
+ SETFOpc = &SETFOpcTable[0][0];
+ Predicate = CmpInst::ICMP_NE;
+ break;
+ case CmpInst::FCMP_UNE:
+ SETFOpc = &SETFOpcTable[1][0];
+ Predicate = CmpInst::ICMP_NE;
+ break;
+ }
+
+ bool NeedSwap;
+ std::tie(CC, NeedSwap) = getX86ConditionCode(Predicate);
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+ if (NeedSwap)
+ std::swap(CmpLHS, CmpRHS);
+
+ EVT CmpVT = TLI.getValueType(CmpLHS->getType());
+ // Emit a compare of the LHS and RHS, setting the flags.
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT))
+ return false;
+
+ if (SETFOpc) {
+ unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
+ unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
+ FlagReg1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
+ FlagReg2);
+ auto const &II = TII.get(SETFOpc[2]);
+ if (II.getNumDefs()) {
+ unsigned TmpReg = createResultReg(&X86::GR8RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
+ .addReg(FlagReg2).addReg(FlagReg1);
+ } else {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
+ .addReg(FlagReg2).addReg(FlagReg1);
+ }
+ }
+ NeedTest = false;
+ } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
+ // Fake request the condition, otherwise the intrinsic might be completely
+ // optimized away.
+ unsigned TmpReg = getRegForValue(Cond);
+ if (TmpReg == 0)
+ return false;
+
+ NeedTest = false;
+ }
+
+ if (NeedTest) {
+ // Selects operate on i1, however, CondReg is 8 bits width and may contain
+ // garbage. Indeed, only the less significant bit is supposed to be
+ // accurate. If we read more than the lsb, we may see non-zero values
+ // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
+ // the select. This is achieved by performing TEST against 1.
+ unsigned CondReg = getRegForValue(Cond);
+ if (CondReg == 0)
+ return false;
+ bool CondIsKill = hasTrivialKill(Cond);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
+ .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
+ }
+
+ const Value *LHS = I->getOperand(1);
+ const Value *RHS = I->getOperand(2);
+
+ unsigned RHSReg = getRegForValue(RHS);
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ if (!LHSReg || !RHSReg)
+ return false;
+
+ unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
+ unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
+ LHSReg, LHSIsKill);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+/// \brief Emit SSE instructions to lower the select.
+///
+/// Try to use SSE1/SSE2 instructions to simulate a select without branches.
+/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
+/// SSE instructions are available.
+bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
+ // Optimize conditions coming from a compare if both instructions are in the
+ // same basic block (values defined in other basic blocks may not have
+ // initialized registers).
+ const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
+ if (!CI || (CI->getParent() != I->getParent()))
return false;
+
+ if (I->getType() != CI->getOperand(0)->getType() ||
+ !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
+ (Subtarget->hasSSE2() && RetVT == MVT::f64) ))
+ return false;
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+
+ // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
+ // We don't have to materialize a zero constant for this case and can just use
+ // %x again on the RHS.
+ if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
+ const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
+ if (CmpRHSC && CmpRHSC->isNullValue())
+ CmpRHS = CmpLHS;
}
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
- if (Op0Reg == 0) return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
- if (Op1Reg == 0) return false;
- unsigned Op2Reg = getRegForValue(I->getOperand(2));
- if (Op2Reg == 0) return false;
+ unsigned CC;
+ bool NeedSwap;
+ std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
+ if (CC > 7)
+ return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
- .addReg(Op0Reg).addReg(Op0Reg);
- unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
- .addReg(Op1Reg).addReg(Op2Reg);
+ if (NeedSwap)
+ std::swap(CmpLHS, CmpRHS);
+
+ static unsigned OpcTable[2][2][4] = {
+ { { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr },
+ { X86::VCMPSSrr, X86::VFsANDPSrr, X86::VFsANDNPSrr, X86::VFsORPSrr } },
+ { { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr },
+ { X86::VCMPSDrr, X86::VFsANDPDrr, X86::VFsANDNPDrr, X86::VFsORPDrr } }
+ };
+
+ bool HasAVX = Subtarget->hasAVX();
+ unsigned *Opc = nullptr;
+ switch (RetVT.SimpleTy) {
+ default: return false;
+ case MVT::f32: Opc = &OpcTable[0][HasAVX][0]; break;
+ case MVT::f64: Opc = &OpcTable[1][HasAVX][0]; break;
+ }
+
+ const Value *LHS = I->getOperand(1);
+ const Value *RHS = I->getOperand(2);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned RHSReg = getRegForValue(RHS);
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ unsigned CmpLHSReg = getRegForValue(CmpLHS);
+ bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
+
+ unsigned CmpRHSReg = getRegForValue(CmpRHS);
+ bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
+
+ if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
+ return false;
+
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+ unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
+ CmpRHSReg, CmpRHSIsKill, CC);
+ unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
+ LHSReg, LHSIsKill);
+ unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
+ RHSReg, RHSIsKill);
+ unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
+ AndReg, /*IsKill=*/true);
UpdateValueMap(I, ResultReg);
return true;
}
+bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
+ // These are pseudo CMOV instructions and will be later expanded into control-
+ // flow.
+ unsigned Opc;
+ switch (RetVT.SimpleTy) {
+ default: return false;
+ case MVT::i8: Opc = X86::CMOV_GR8; break;
+ case MVT::i16: Opc = X86::CMOV_GR16; break;
+ case MVT::i32: Opc = X86::CMOV_GR32; break;
+ case MVT::f32: Opc = X86::CMOV_FR32; break;
+ case MVT::f64: Opc = X86::CMOV_FR64; break;
+ }
+
+ const Value *Cond = I->getOperand(0);
+ X86::CondCode CC = X86::COND_NE;
+
+ // Optimize conditions coming from a compare if both instructions are in the
+ // same basic block (values defined in other basic blocks may not have
+ // initialized registers).
+ const auto *CI = dyn_cast<CmpInst>(Cond);
+ if (CI && (CI->getParent() == I->getParent())) {
+ bool NeedSwap;
+ std::tie(CC, NeedSwap) = getX86ConditionCode(CI->getPredicate());
+ if (CC > X86::LAST_VALID_COND)
+ return false;
+
+ const Value *CmpLHS = CI->getOperand(0);
+ const Value *CmpRHS = CI->getOperand(1);
+
+ if (NeedSwap)
+ std::swap(CmpLHS, CmpRHS);
+
+ EVT CmpVT = TLI.getValueType(CmpLHS->getType());
+ if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT))
+ return false;
+ } else {
+ unsigned CondReg = getRegForValue(Cond);
+ if (CondReg == 0)
+ return false;
+ bool CondIsKill = hasTrivialKill(Cond);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
+ .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
+ }
+
+ const Value *LHS = I->getOperand(1);
+ const Value *RHS = I->getOperand(2);
+
+ unsigned LHSReg = getRegForValue(LHS);
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned RHSReg = getRegForValue(RHS);
+ bool RHSIsKill = hasTrivialKill(RHS);
+
+ if (!LHSReg || !RHSReg)
+ return false;
+
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+
+ unsigned ResultReg =
+ FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool X86FastISel::X86SelectSelect(const Instruction *I) {
+ MVT RetVT;
+ if (!isTypeLegal(I->getType(), RetVT))
+ return false;
+
+ // Check if we can fold the select.
+ if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
+ CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
+ const Value *Opnd = nullptr;
+ switch (Predicate) {
+ default: break;
+ case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
+ case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
+ }
+ // No need for a select anymore - this is an unconditional move.
+ if (Opnd) {
+ unsigned OpReg = getRegForValue(Opnd);
+ if (OpReg == 0)
+ return false;
+ bool OpIsKill = hasTrivialKill(Opnd);
+ const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(OpReg, getKillRegState(OpIsKill));
+ UpdateValueMap(I, ResultReg);
+ return true;
+ }
+ }
+
+ // First try to use real conditional move instructions.
+ if (X86FastEmitCMoveSelect(RetVT, I))
+ return true;
+
+ // Try to use a sequence of SSE instructions to simulate a conditional move.
+ if (X86FastEmitSSESelect(RetVT, I))
+ return true;
+
+ // Fall-back to pseudo conditional move instructions, which will be later
+ // converted to control-flow.
+ if (X86FastEmitPseudoSelect(RetVT, I))
+ return true;
+
+ return false;
+}
+
bool X86FastISel::X86SelectFPExt(const Instruction *I) {
// fpext from float to double.
if (X86ScalarSSEf64 &&
@@ -1526,7 +2048,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(&X86::FR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::CVTSS2SDrr), ResultReg)
.addReg(OpReg);
UpdateValueMap(I, ResultReg);
@@ -1545,7 +2067,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(&X86::FR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::CVTSD2SSrr), ResultReg)
.addReg(OpReg);
UpdateValueMap(I, ResultReg);
@@ -1585,7 +2107,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
(const TargetRegisterClass*)&X86::GR16_ABCDRegClass :
(const TargetRegisterClass*)&X86::GR32_ABCDRegClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
CopyReg).addReg(InputReg);
InputReg = CopyReg;
}
@@ -1628,8 +2150,8 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
}
unsigned Reg;
- bool RV = X86FastEmitLoad(VT, SrcAM, Reg);
- RV &= X86FastEmitStore(VT, Reg, DestAM);
+ bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
+ RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM);
assert(RV && "Failed to emit load or store??");
unsigned Size = VT.getSizeInBits()/8;
@@ -1641,24 +2163,88 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
return true;
}
-bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
+static bool isCommutativeIntrinsic(IntrinsicInst const *II) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
// FIXME: Handle more intrinsics.
- switch (I.getIntrinsicID()) {
+ switch (II->getIntrinsicID()) {
default: return false;
+ case Intrinsic::frameaddress: {
+ Type *RetTy = II->getCalledFunction()->getReturnType();
+
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC = nullptr;
+
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Invalid result type for frameaddress.");
+ case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
+ case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
+ }
+
+ // This needs to be set before we call getFrameRegister, otherwise we get
+ // the wrong frame register.
+ MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
+ unsigned FrameReg = RegInfo->getFrameRegister(*(FuncInfo.MF));
+ assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
+ (FrameReg == X86::EBP && VT == MVT::i32)) &&
+ "Invalid Frame Register!");
+
+ // Always make a copy of the frame register to to a vreg first, so that we
+ // never directly reference the frame register (the TwoAddressInstruction-
+ // Pass doesn't like that).
+ unsigned SrcReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
+
+ // Now recursively load from the frame address.
+ // movq (%rbp), %rax
+ // movq (%rax), %rax
+ // movq (%rax), %rax
+ // ...
+ unsigned DestReg;
+ unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
+ while (Depth--) {
+ DestReg = createResultReg(RC);
+ addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), DestReg), SrcReg);
+ SrcReg = DestReg;
+ }
+
+ UpdateValueMap(II, SrcReg);
+ return true;
+ }
case Intrinsic::memcpy: {
- const MemCpyInst &MCI = cast<MemCpyInst>(I);
+ const MemCpyInst *MCI = cast<MemCpyInst>(II);
// Don't handle volatile or variable length memcpys.
- if (MCI.isVolatile())
+ if (MCI->isVolatile())
return false;
- if (isa<ConstantInt>(MCI.getLength())) {
+ if (isa<ConstantInt>(MCI->getLength())) {
// Small memcpy's are common enough that we want to do them
// without a call if possible.
- uint64_t Len = cast<ConstantInt>(MCI.getLength())->getZExtValue();
+ uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
if (IsMemcpySmall(Len)) {
X86AddressMode DestAM, SrcAM;
- if (!X86SelectAddress(MCI.getRawDest(), DestAM) ||
- !X86SelectAddress(MCI.getRawSource(), SrcAM))
+ if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
+ !X86SelectAddress(MCI->getRawSource(), SrcAM))
return false;
TryEmitSmallMemcpy(DestAM, SrcAM, Len);
return true;
@@ -1666,35 +2252,37 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
}
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
- if (!MCI.getLength()->getType()->isIntegerTy(SizeWidth))
+ if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
return false;
- if (MCI.getSourceAddressSpace() > 255 || MCI.getDestAddressSpace() > 255)
+ if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
return false;
- return DoSelectCall(&I, "memcpy");
+ return LowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
- const MemSetInst &MSI = cast<MemSetInst>(I);
+ const MemSetInst *MSI = cast<MemSetInst>(II);
- if (MSI.isVolatile())
+ if (MSI->isVolatile())
return false;
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
- if (!MSI.getLength()->getType()->isIntegerTy(SizeWidth))
+ if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
return false;
- if (MSI.getDestAddressSpace() > 255)
+ if (MSI->getDestAddressSpace() > 255)
return false;
- return DoSelectCall(&I, "memset");
+ return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
}
case Intrinsic::stackprotector: {
// Emit code to store the stack guard onto the stack.
EVT PtrTy = TLI.getPointerTy();
- const Value *Op1 = I.getArgOperand(0); // The guard's value.
- const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
+ const Value *Op1 = II->getArgOperand(0); // The guard's value.
+ const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
+
+ MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
// Grab the frame index.
X86AddressMode AM;
@@ -1703,7 +2291,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return true;
}
case Intrinsic::dbg_declare: {
- const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
X86AddressMode AM;
assert(DI->getAddress() && "Null address should be checked earlier!");
if (!X86SelectAddress(DI->getAddress(), AM))
@@ -1711,57 +2299,239 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM).
addImm(0).addMetadata(DI->getVariable());
return true;
}
case Intrinsic::trap: {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
return true;
}
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow: {
- // FIXME: Should fold immediates.
+ case Intrinsic::sqrt: {
+ if (!Subtarget->hasSSE1())
+ return false;
- // Replace "add with overflow" intrinsics with an "add" instruction followed
- // by a seto/setc instruction.
- const Function *Callee = I.getCalledFunction();
- Type *RetTy =
- cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
+ Type *RetTy = II->getCalledFunction()->getReturnType();
MVT VT;
if (!isTypeLegal(RetTy, VT))
return false;
- const Value *Op1 = I.getArgOperand(0);
- const Value *Op2 = I.getArgOperand(1);
- unsigned Reg1 = getRegForValue(Op1);
- unsigned Reg2 = getRegForValue(Op2);
+ // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
+ // is not generated by FastISel yet.
+ // FIXME: Update this code once tablegen can handle it.
+ static const unsigned SqrtOpc[2][2] = {
+ {X86::SQRTSSr, X86::VSQRTSSr},
+ {X86::SQRTSDr, X86::VSQRTSDr}
+ };
+ bool HasAVX = Subtarget->hasAVX();
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (VT.SimpleTy) {
+ default: return false;
+ case MVT::f32: Opc = SqrtOpc[0][HasAVX]; RC = &X86::FR32RegClass; break;
+ case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break;
+ }
- if (Reg1 == 0 || Reg2 == 0)
- // FIXME: Handle values *not* in registers.
+ const Value *SrcVal = II->getArgOperand(0);
+ unsigned SrcReg = getRegForValue(SrcVal);
+
+ if (SrcReg == 0)
return false;
- unsigned OpC = 0;
- if (VT == MVT::i32)
- OpC = X86::ADD32rr;
- else if (VT == MVT::i64)
- OpC = X86::ADD64rr;
- else
+ unsigned ImplicitDefReg = 0;
+ if (HasAVX) {
+ ImplicitDefReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
+ }
+
+ unsigned ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ if (ImplicitDefReg)
+ MIB.addReg(ImplicitDefReg);
+
+ MIB.addReg(SrcReg);
+
+ UpdateValueMap(II, ResultReg);
+ return true;
+ }
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow: {
+ // This implements the basic lowering of the xalu with overflow intrinsics
+ // into add/sub/mul followed by either seto or setb.
+ const Function *Callee = II->getCalledFunction();
+ auto *Ty = cast<StructType>(Callee->getReturnType());
+ Type *RetTy = Ty->getTypeAtIndex(0U);
+ Type *CondTy = Ty->getTypeAtIndex(1);
+
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ if (VT < MVT::i8 || VT > MVT::i64)
+ return false;
+
+ const Value *LHS = II->getArgOperand(0);
+ const Value *RHS = II->getArgOperand(1);
+
+ // Canonicalize immediate to the RHS.
+ if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
+ isCommutativeIntrinsic(II))
+ std::swap(LHS, RHS);
+
+ unsigned BaseOpc, CondOpc;
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::sadd_with_overflow:
+ BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break;
+ case Intrinsic::uadd_with_overflow:
+ BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
+ case Intrinsic::ssub_with_overflow:
+ BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break;
+ case Intrinsic::usub_with_overflow:
+ BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
+ case Intrinsic::smul_with_overflow:
+ BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break;
+ case Intrinsic::umul_with_overflow:
+ BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break;
+ }
+
+ unsigned LHSReg = getRegForValue(LHS);
+ if (LHSReg == 0)
return false;
+ bool LHSIsKill = hasTrivialKill(LHS);
+
+ unsigned ResultReg = 0;
+ // Check if we have an immediate version.
+ if (auto const *C = dyn_cast<ConstantInt>(RHS)) {
+ ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
+ C->getZExtValue());
+ }
+
+ unsigned RHSReg;
+ bool RHSIsKill;
+ if (!ResultReg) {
+ RHSReg = getRegForValue(RHS);
+ if (RHSReg == 0)
+ return false;
+ RHSIsKill = hasTrivialKill(RHS);
+ ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
+ RHSIsKill);
+ }
+
+ // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
+ // it manually.
+ if (BaseOpc == X86ISD::UMUL && !ResultReg) {
+ static const unsigned MULOpc[] =
+ { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
+ static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
+ // First copy the first operand into RAX, which is an implicit input to
+ // the X86::MUL*r instruction.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
+ .addReg(LHSReg, getKillRegState(LHSIsKill));
+ ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
+ TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
+ } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
+ static const unsigned MULOpc[] =
+ { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
+ if (VT == MVT::i8) {
+ // Copy the first operand into AL, which is an implicit input to the
+ // X86::IMUL8r instruction.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), X86::AL)
+ .addReg(LHSReg, getKillRegState(LHSIsKill));
+ ResultReg = FastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
+ RHSIsKill);
+ } else
+ ResultReg = FastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
+ TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
+ RHSReg, RHSIsKill);
+ }
- // The call to CreateRegs builds two sequential registers, to store the
- // both the returned values.
- unsigned ResultReg = FuncInfo.CreateRegs(I.getType());
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
- .addReg(Reg1).addReg(Reg2);
+ if (!ResultReg)
+ return false;
- unsigned Opc = X86::SETBr;
- if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
- Opc = X86::SETOr;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg+1);
+ unsigned ResultReg2 = FuncInfo.CreateRegs(CondTy);
+ assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
+ ResultReg2);
- UpdateValueMap(&I, ResultReg, 2);
+ UpdateValueMap(II, ResultReg, 2);
+ return true;
+ }
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64: {
+ bool IsInputDouble;
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("Unexpected intrinsic.");
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ if (!Subtarget->hasSSE1())
+ return false;
+ IsInputDouble = false;
+ break;
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64:
+ if (!Subtarget->hasSSE2())
+ return false;
+ IsInputDouble = true;
+ break;
+ }
+
+ Type *RetTy = II->getCalledFunction()->getReturnType();
+ MVT VT;
+ if (!isTypeLegal(RetTy, VT))
+ return false;
+
+ static const unsigned CvtOpc[2][2][2] = {
+ { { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
+ { X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
+ { { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
+ { X86::CVTTSD2SI64rr, X86::VCVTTSD2SI64rr } }
+ };
+ bool HasAVX = Subtarget->hasAVX();
+ unsigned Opc;
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected result type.");
+ case MVT::i32: Opc = CvtOpc[IsInputDouble][0][HasAVX]; break;
+ case MVT::i64: Opc = CvtOpc[IsInputDouble][1][HasAVX]; break;
+ }
+
+ // Check if we can fold insertelement instructions into the convert.
+ const Value *Op = II->getArgOperand(0);
+ while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
+ const Value *Index = IE->getOperand(2);
+ if (!isa<ConstantInt>(Index))
+ break;
+ unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
+
+ if (Idx == 0) {
+ Op = IE->getOperand(1);
+ break;
+ }
+ Op = IE->getOperand(0);
+ }
+
+ unsigned Reg = getRegForValue(Op);
+ if (Reg == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(Reg);
+
+ UpdateValueMap(II, ResultReg);
return true;
}
}
@@ -1786,107 +2556,128 @@ bool X86FastISel::FastLowerArguments() {
return false;
// Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
- unsigned Idx = 1;
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I, ++Idx) {
- if (Idx > 6)
- return false;
-
+ unsigned GPRCnt = 0;
+ unsigned FPRCnt = 0;
+ unsigned Idx = 0;
+ for (auto const &Arg : F->args()) {
+ // The first argument is at index 1.
+ ++Idx;
if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
F->getAttributes().hasAttribute(Idx, Attribute::Nest))
return false;
- Type *ArgTy = I->getType();
+ Type *ArgTy = Arg.getType();
if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
return false;
EVT ArgVT = TLI.getValueType(ArgTy);
if (!ArgVT.isSimple()) return false;
switch (ArgVT.getSimpleVT().SimpleTy) {
+ default: return false;
case MVT::i32:
case MVT::i64:
+ ++GPRCnt;
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ if (!Subtarget->hasSSE1())
+ return false;
+ ++FPRCnt;
break;
- default:
- return false;
}
+
+ if (GPRCnt > 6)
+ return false;
+
+ if (FPRCnt > 8)
+ return false;
}
- static const uint16_t GPR32ArgRegs[] = {
+ static const MCPhysReg GPR32ArgRegs[] = {
X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
};
- static const uint16_t GPR64ArgRegs[] = {
+ static const MCPhysReg GPR64ArgRegs[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
};
+ static const MCPhysReg XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
+ };
- Idx = 0;
- const TargetRegisterClass *RC32 = TLI.getRegClassFor(MVT::i32);
- const TargetRegisterClass *RC64 = TLI.getRegClassFor(MVT::i64);
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I, ++Idx) {
- bool is32Bit = TLI.getValueType(I->getType()) == MVT::i32;
- const TargetRegisterClass *RC = is32Bit ? RC32 : RC64;
- unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx];
+ unsigned GPRIdx = 0;
+ unsigned FPRIdx = 0;
+ for (auto const &Arg : F->args()) {
+ MVT VT = TLI.getSimpleValueType(Arg.getType());
+ const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+ unsigned SrcReg;
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type.");
+ case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
+ case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
+ case MVT::f32: // fall-through
+ case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
+ }
unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(DstReg, getKillRegState(true));
- UpdateValueMap(I, ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(DstReg, getKillRegState(true));
+ UpdateValueMap(&Arg, ResultReg);
}
return true;
}
-bool X86FastISel::X86SelectCall(const Instruction *I) {
- const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = CI->getCalledValue();
-
- // Can't handle inline asm yet.
- if (isa<InlineAsm>(Callee))
- return false;
-
- // Handle intrinsic calls.
- if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
- return X86VisitIntrinsicCall(*II);
-
- // Allow SelectionDAG isel to handle tail calls.
- if (cast<CallInst>(I)->isTailCall())
- return false;
-
- return DoSelectCall(I, 0);
-}
-
-static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget,
- const ImmutableCallSite &CS) {
- if (Subtarget.is64Bit())
+static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
+ CallingConv::ID CC,
+ ImmutableCallSite *CS) {
+ if (Subtarget->is64Bit())
return 0;
- if (Subtarget.isTargetWindows())
+ if (Subtarget->getTargetTriple().isOSMSVCRT())
return 0;
- CallingConv::ID CC = CS.getCallingConv();
- if (CC == CallingConv::Fast || CC == CallingConv::GHC)
+ if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
+ CC == CallingConv::HiPE)
return 0;
- if (!CS.paramHasAttr(1, Attribute::StructRet))
+ if (CS && !CS->paramHasAttr(1, Attribute::StructRet))
return 0;
- if (CS.paramHasAttr(1, Attribute::InReg))
+ if (CS && CS->paramHasAttr(1, Attribute::InReg))
return 0;
return 4;
}
-// Select either a call, or an llvm.memcpy/memmove/memset intrinsic
-bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
- const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = CI->getCalledValue();
-
- // Handle only C and fastcc calling conventions for now.
- ImmutableCallSite CS(CI);
- CallingConv::ID CC = CS.getCallingConv();
- bool isWin64 = Subtarget->isCallingConvWin64(CC);
- if (CC != CallingConv::C && CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall && CC != CallingConv::X86_64_Win64 &&
- CC != CallingConv::X86_64_SysV)
+bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) {
+ auto &OutVals = CLI.OutVals;
+ auto &OutFlags = CLI.OutFlags;
+ auto &OutRegs = CLI.OutRegs;
+ auto &Ins = CLI.Ins;
+ auto &InRegs = CLI.InRegs;
+ CallingConv::ID CC = CLI.CallConv;
+ bool &IsTailCall = CLI.IsTailCall;
+ bool IsVarArg = CLI.IsVarArg;
+ const Value *Callee = CLI.Callee;
+ const char *SymName = CLI.SymName;
+
+ bool Is64Bit = Subtarget->is64Bit();
+ bool IsWin64 = Subtarget->isCallingConvWin64(CC);
+
+ // Handle only C, fastcc, and webkit_js calling conventions for now.
+ switch (CC) {
+ default: return false;
+ case CallingConv::C:
+ case CallingConv::Fast:
+ case CallingConv::WebKit_JS:
+ case CallingConv::X86_FastCall:
+ case CallingConv::X86_64_Win64:
+ case CallingConv::X86_64_SysV:
+ break;
+ }
+
+ // Allow SelectionDAG isel to handle tail calls.
+ if (IsTailCall)
return false;
// fastcc with -tailcallopt is intended to provide a guaranteed
@@ -1894,162 +2685,101 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
return false;
- PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- FunctionType *FTy = cast<FunctionType>(PT->getElementType());
- bool isVarArg = FTy->isVarArg();
-
// Don't know how to handle Win64 varargs yet. Nothing special needed for
- // x86-32. Special handling for x86-64 is implemented.
- if (isVarArg && isWin64)
+ // x86-32. Special handling for x86-64 is implemented.
+ if (IsVarArg && IsWin64)
return false;
- // Fast-isel doesn't know about callee-pop yet.
- if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg,
- TM.Options.GuaranteedTailCallOpt))
+ // Don't know about inalloca yet.
+ if (CLI.CS && CLI.CS->hasInAllocaArgument())
return false;
- // Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(I->getType(), CS.getAttributes(), Outs, TLI);
- bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- *FuncInfo.MF, FTy->isVarArg(),
- Outs, FTy->getContext());
- if (!CanLowerReturn)
- return false;
-
- // Materialize callee address in a register. FIXME: GV address can be
- // handled with a CALLpcrel32 instead.
- X86AddressMode CalleeAM;
- if (!X86SelectCallAddress(Callee, CalleeAM))
- return false;
- unsigned CalleeOp = 0;
- const GlobalValue *GV = 0;
- if (CalleeAM.GV != 0) {
- GV = CalleeAM.GV;
- } else if (CalleeAM.Base.Reg != 0) {
- CalleeOp = CalleeAM.Base.Reg;
- } else
+ // Fast-isel doesn't know about callee-pop yet.
+ if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
+ TM.Options.GuaranteedTailCallOpt))
return false;
- // Deal with call operands first.
- SmallVector<const Value *, 8> ArgVals;
- SmallVector<unsigned, 8> Args;
- SmallVector<MVT, 8> ArgVTs;
- SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
- unsigned arg_size = CS.arg_size();
- Args.reserve(arg_size);
- ArgVals.reserve(arg_size);
- ArgVTs.reserve(arg_size);
- ArgFlags.reserve(arg_size);
- for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
- i != e; ++i) {
- // If we're lowering a mem intrinsic instead of a regular call, skip the
- // last two arguments, which should not passed to the underlying functions.
- if (MemIntName && e-i <= 2)
- break;
- Value *ArgVal = *i;
- ISD::ArgFlagsTy Flags;
- unsigned AttrInd = i - CS.arg_begin() + 1;
- if (CS.paramHasAttr(AttrInd, Attribute::SExt))
- Flags.setSExt();
- if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
- Flags.setZExt();
-
- if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
- PointerType *Ty = cast<PointerType>(ArgVal->getType());
- Type *ElementTy = Ty->getElementType();
- unsigned FrameSize = TD.getTypeAllocSize(ElementTy);
- unsigned FrameAlign = CS.getParamAlignment(AttrInd);
- if (!FrameAlign)
- FrameAlign = TLI.getByValTypeAlignment(ElementTy);
- Flags.setByVal();
- Flags.setByValSize(FrameSize);
- Flags.setByValAlign(FrameAlign);
- if (!IsMemcpySmall(FrameSize))
- return false;
- }
-
- if (CS.paramHasAttr(AttrInd, Attribute::InReg))
- Flags.setInReg();
- if (CS.paramHasAttr(AttrInd, Attribute::Nest))
- Flags.setNest();
-
- // If this is an i1/i8/i16 argument, promote to i32 to avoid an extra
- // instruction. This is safe because it is common to all fastisel supported
- // calling conventions on x86.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(ArgVal)) {
- if (CI->getBitWidth() == 1 || CI->getBitWidth() == 8 ||
- CI->getBitWidth() == 16) {
+ // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
+ // instruction. This is safe because it is common to all FastISel supported
+ // calling conventions on x86.
+ for (int i = 0, e = OutVals.size(); i != e; ++i) {
+ Value *&Val = OutVals[i];
+ ISD::ArgFlagsTy Flags = OutFlags[i];
+ if (auto *CI = dyn_cast<ConstantInt>(Val)) {
+ if (CI->getBitWidth() < 32) {
if (Flags.isSExt())
- ArgVal = ConstantExpr::getSExt(CI,Type::getInt32Ty(CI->getContext()));
+ Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
else
- ArgVal = ConstantExpr::getZExt(CI,Type::getInt32Ty(CI->getContext()));
+ Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
}
}
- unsigned ArgReg;
-
// Passing bools around ends up doing a trunc to i1 and passing it.
// Codegen this as an argument + "and 1".
- if (ArgVal->getType()->isIntegerTy(1) && isa<TruncInst>(ArgVal) &&
- cast<TruncInst>(ArgVal)->getParent() == I->getParent() &&
- ArgVal->hasOneUse()) {
- ArgVal = cast<TruncInst>(ArgVal)->getOperand(0);
- ArgReg = getRegForValue(ArgVal);
- if (ArgReg == 0) return false;
-
- MVT ArgVT;
- if (!isTypeLegal(ArgVal->getType(), ArgVT)) return false;
-
- ArgReg = FastEmit_ri(ArgVT, ArgVT, ISD::AND, ArgReg,
- ArgVal->hasOneUse(), 1);
- } else {
- ArgReg = getRegForValue(ArgVal);
- }
+ if (auto *TI = dyn_cast<TruncInst>(Val)) {
+ if (TI->getType()->isIntegerTy(1) && CLI.CS &&
+ (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
+ TI->hasOneUse()) {
+ Val = cast<TruncInst>(Val)->getOperand(0);
+ unsigned ResultReg = getRegForValue(Val);
+
+ if (!ResultReg)
+ return false;
- if (ArgReg == 0) return false;
+ MVT ArgVT;
+ if (!isTypeLegal(Val->getType(), ArgVT))
+ return false;
- Type *ArgTy = ArgVal->getType();
- MVT ArgVT;
- if (!isTypeLegal(ArgTy, ArgVT))
- return false;
- if (ArgVT == MVT::x86mmx)
- return false;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
- Flags.setOrigAlign(OriginalAlignment);
+ ResultReg =
+ FastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
- Args.push_back(ArgReg);
- ArgVals.push_back(ArgVal);
- ArgVTs.push_back(ArgVT);
- ArgFlags.push_back(Flags);
+ if (!ResultReg)
+ return false;
+ UpdateValueMap(Val, ResultReg);
+ }
+ }
}
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs,
- I->getParent()->getContext());
+ CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs,
+ CLI.RetTy->getContext());
// Allocate shadow area for Win64
- if (isWin64)
+ if (IsWin64)
CCInfo.AllocateStack(32, 8);
- CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
+ SmallVector<MVT, 16> OutVTs;
+ for (auto *Val : OutVals) {
+ MVT VT;
+ if (!isTypeLegal(Val->getType(), VT))
+ return false;
+ OutVTs.push_back(VT);
+ }
+ CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
.addImm(NumBytes);
- // Process argument: walk the register/memloc assignments, inserting
- // copies / loads.
- SmallVector<unsigned, 4> RegArgs;
+ // Walk the register/memloc assignments, inserting copies/loads.
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(TM.getRegisterInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- unsigned Arg = Args[VA.getValNo()];
- EVT ArgVT = ArgVTs[VA.getValNo()];
+ CCValAssign const &VA = ArgLocs[i];
+ const Value *ArgVal = OutVals[VA.getValNo()];
+ MVT ArgVT = OutVTs[VA.getValNo()];
+
+ if (ArgVT == MVT::x86mmx)
+ return false;
+
+ unsigned ArgReg = getRegForValue(ArgVal);
+ if (!ArgReg)
+ return false;
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -2057,8 +2787,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
case CCValAssign::SExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
+ bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
ArgVT = VA.getLocVT();
break;
@@ -2066,8 +2796,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
case CCValAssign::ZExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
+ bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
ArgVT = VA.getLocVT();
break;
@@ -2075,31 +2805,32 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
case CCValAssign::AExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
- bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
+ bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
if (!Emitted)
- Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
+ Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
if (!Emitted)
- Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
+ Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
+ ArgVT, ArgReg);
assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
ArgVT = VA.getLocVT();
break;
}
case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
- ISD::BITCAST, Arg, /*TODO: Kill=*/false);
- assert(BC != 0 && "Failed to emit a bitcast!");
- Arg = BC;
+ ArgReg = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
+ /*TODO: Kill=*/false);
+ assert(ArgReg && "Failed to emit a bitcast!");
ArgVT = VA.getLocVT();
break;
}
- case CCValAssign::VExt:
+ case CCValAssign::VExt:
// VExt has not been implemented, so this should be impossible to reach
// for now. However, fallback to Selection DAG isel once implemented.
return false;
+ case CCValAssign::FPExt:
+ llvm_unreachable("Unexpected loc info!");
case CCValAssign::Indirect:
// FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
// support this.
@@ -2107,32 +2838,34 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
}
if (VA.isRegLoc()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- VA.getLocReg()).addReg(Arg);
- RegArgs.push_back(VA.getLocReg());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
+ OutRegs.push_back(VA.getLocReg());
} else {
+ assert(VA.isMemLoc());
unsigned LocMemOffset = VA.getLocMemOffset();
X86AddressMode AM;
- const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo*>(
- getTargetMachine()->getRegisterInfo());
AM.Base.Reg = RegInfo->getStackRegister();
AM.Disp = LocMemOffset;
- const Value *ArgVal = ArgVals[VA.getValNo()];
- ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()];
-
+ ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
+ unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
+ MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getStack(LocMemOffset), MachineMemOperand::MOStore,
+ ArgVT.getStoreSize(), Alignment);
if (Flags.isByVal()) {
X86AddressMode SrcAM;
- SrcAM.Base.Reg = Arg;
- bool Res = TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize());
- assert(Res && "memcpy length already checked!"); (void)Res;
+ SrcAM.Base.Reg = ArgReg;
+ if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
+ return false;
} else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
// If this is a really simple value, emit this with the Value* version
// of X86FastEmitStore. If it isn't simple, we don't want to do this,
// as it can cause us to reevaluate the argument.
- if (!X86FastEmitStore(ArgVT, ArgVal, AM))
+ if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
return false;
} else {
- if (!X86FastEmitStore(ArgVT, Arg, AM))
+ bool ValIsKill = hasTrivialKill(ArgVal);
+ if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
return false;
}
}
@@ -2142,41 +2875,57 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// GOT pointer.
if (Subtarget->isPICStyleGOT()) {
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- X86::EBX).addReg(Base);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
}
- if (Subtarget->is64Bit() && isVarArg && !isWin64) {
+ if (Is64Bit && IsVarArg && !IsWin64) {
+ // From AMD64 ABI document:
+ // For calls that may call functions that use varargs or stdargs
+ // (prototype-less calls or calls to functions containing ellipsis (...) in
+ // the declaration) %al is used as hidden argument to specify the number
+ // of SSE registers used. The contents of %al do not need to match exactly
+ // the number of registers, but must be an ubound on the number of SSE
+ // registers used and is in the range 0 - 8 inclusive.
+
// Count the number of XMM registers allocated.
- static const uint16_t XMMArgRegs[] = {
+ static const MCPhysReg XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::MOV8ri),
+ assert((Subtarget->hasSSE1() || !NumXMMRegs)
+ && "SSE registers cannot be used when SSE is disabled");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
X86::AL).addImm(NumXMMRegs);
}
+ // Materialize callee address in a register. FIXME: GV address can be
+ // handled with a CALLpcrel32 instead.
+ X86AddressMode CalleeAM;
+ if (!X86SelectCallAddress(Callee, CalleeAM))
+ return false;
+
+ unsigned CalleeOp = 0;
+ const GlobalValue *GV = nullptr;
+ if (CalleeAM.GV != nullptr) {
+ GV = CalleeAM.GV;
+ } else if (CalleeAM.Base.Reg != 0) {
+ CalleeOp = CalleeAM.Base.Reg;
+ } else
+ return false;
+
// Issue the call.
MachineInstrBuilder MIB;
if (CalleeOp) {
// Register-indirect call.
- unsigned CallOpc;
- if (Subtarget->is64Bit())
- CallOpc = X86::CALL64r;
- else
- CallOpc = X86::CALL32r;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
.addReg(CalleeOp);
-
} else {
// Direct call.
assert(GV && "Not a direct call");
- unsigned CallOpc;
- if (Subtarget->is64Bit())
- CallOpc = X86::CALL64pcrel32;
- else
- CallOpc = X86::CALLpcrel32;
+ unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
// See if we need any target-specific flags on the GV operand.
unsigned char OpFlags = 0;
@@ -2199,113 +2948,97 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
OpFlags = X86II::MO_DARWIN_STUB;
}
-
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc));
- if (MemIntName)
- MIB.addExternalSymbol(MemIntName, OpFlags);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
+ if (SymName)
+ MIB.addExternalSymbol(SymName, OpFlags);
else
MIB.addGlobalAddress(GV, 0, OpFlags);
}
- // Add a register mask with the call-preserved registers.
+ // Add a register mask operand representing the call-preserved registers.
// Proper defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
+ MIB.addRegMask(TRI.getCallPreservedMask(CC));
// Add an implicit use GOT pointer in EBX.
if (Subtarget->isPICStyleGOT())
MIB.addReg(X86::EBX, RegState::Implicit);
- if (Subtarget->is64Bit() && isVarArg && !isWin64)
+ if (Is64Bit && IsVarArg && !IsWin64)
MIB.addReg(X86::AL, RegState::Implicit);
// Add implicit physical register uses to the call.
- for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
- MIB.addReg(RegArgs[i], RegState::Implicit);
+ for (auto Reg : OutRegs)
+ MIB.addReg(Reg, RegState::Implicit);
// Issue CALLSEQ_END
+ unsigned NumBytesForCalleeToPop =
+ computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
- const unsigned NumBytesCallee = computeBytesPoppedByCallee(*Subtarget, CS);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
- .addImm(NumBytes).addImm(NumBytesCallee);
-
- // Build info for return calling conv lowering code.
- // FIXME: This is practically a copy-paste from TargetLowering::LowerCallTo.
- SmallVector<ISD::InputArg, 32> Ins;
- SmallVector<EVT, 4> RetTys;
- ComputeValueVTs(TLI, I->getType(), RetTys);
- for (unsigned i = 0, e = RetTys.size(); i != e; ++i) {
- EVT VT = RetTys[i];
- MVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT);
- unsigned NumRegs = TLI.getNumRegisters(I->getParent()->getContext(), VT);
- for (unsigned j = 0; j != NumRegs; ++j) {
- ISD::InputArg MyFlags;
- MyFlags.VT = RegisterVT;
- MyFlags.Used = !CS.getInstruction()->use_empty();
- if (CS.paramHasAttr(0, Attribute::SExt))
- MyFlags.Flags.setSExt();
- if (CS.paramHasAttr(0, Attribute::ZExt))
- MyFlags.Flags.setZExt();
- if (CS.paramHasAttr(0, Attribute::InReg))
- MyFlags.Flags.setInReg();
- Ins.push_back(MyFlags);
- }
- }
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
// Now handle call return values.
- SmallVector<unsigned, 4> UsedRegs;
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs,
- I->getParent()->getContext());
- unsigned ResultReg = FuncInfo.CreateRegs(I->getType());
+ CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs,
+ CLI.RetTy->getContext());
CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
+
+ // Copy all of the result registers out of their specified physreg.
+ unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
for (unsigned i = 0; i != RVLocs.size(); ++i) {
- EVT CopyVT = RVLocs[i].getValVT();
+ CCValAssign &VA = RVLocs[i];
+ EVT CopyVT = VA.getValVT();
unsigned CopyReg = ResultReg + i;
- // If this is a call to a function that returns an fp value on the x87 fp
- // stack, but where we prefer to use the value in xmm registers, copy it
- // out as F80 and use a truncate to move it from fp stack reg to xmm reg.
- if ((RVLocs[i].getLocReg() == X86::ST0 ||
- RVLocs[i].getLocReg() == X86::ST1)) {
- if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
+ // If this is x86-64, and we disabled SSE, we can't return FP values
+ if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
+ report_fatal_error("SSE register return with SSE disabled");
+ }
+
+ // If this is a call to a function that returns an fp value on the floating
+ // point stack, we must guarantee the value is popped from the stack, so
+ // a COPY is not good enough - the copy instruction may be eliminated if the
+ // return value is not used. We use the FpPOP_RETVAL instruction instead.
+ if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) {
+ // If we prefer to use the value in xmm registers, copy it out as f80 and
+ // use a truncate to move it from fp stack reg to xmm reg.
+ if (isScalarFPTypeInSSEReg(VA.getValVT())) {
CopyVT = MVT::f80;
CopyReg = createResultReg(&X86::RFP80RegClass);
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL),
- CopyReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::FpPOP_RETVAL), CopyReg);
+
+ // Round the f80 to the right size, which also moves it to the appropriate
+ // xmm register. This is accomplished by storing the f80 value in memory
+ // and then loading it back.
+ if (CopyVT != VA.getValVT()) {
+ EVT ResVT = VA.getValVT();
+ unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
+ unsigned MemSize = ResVT.getSizeInBits()/8;
+ int FI = MFI.CreateStackObject(MemSize, MemSize, false);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc)), FI)
+ .addReg(CopyReg);
+ Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg + i), FI);
+ }
} else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- CopyReg).addReg(RVLocs[i].getLocReg());
- UsedRegs.push_back(RVLocs[i].getLocReg());
- }
-
- if (CopyVT != RVLocs[i].getValVT()) {
- // Round the F80 the right size, which also moves to the appropriate xmm
- // register. This is accomplished by storing the F80 value in memory and
- // then loading it back. Ewww...
- EVT ResVT = RVLocs[i].getValVT();
- unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
- unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc)), FI)
- .addReg(CopyReg);
- Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg + i), FI);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg());
+ InRegs.push_back(VA.getLocReg());
}
}
- if (RVLocs.size())
- UpdateValueMap(I, ResultReg, RVLocs.size());
-
- // Set all unused physreg defs as dead.
- static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+ CLI.ResultReg = ResultReg;
+ CLI.NumResultRegs = RVLocs.size();
+ CLI.Call = MIB;
return true;
}
-
bool
X86FastISel::TargetSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
@@ -2323,8 +3056,6 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
return X86SelectZExt(I);
case Instruction::Br:
return X86SelectBranch(I);
- case Instruction::Call:
- return X86SelectCall(I);
case Instruction::LShr:
case Instruction::AShr:
case Instruction::Shl:
@@ -2371,7 +3102,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
switch (VT.SimpleTy) {
default: return 0;
case MVT::i8:
@@ -2414,30 +3145,39 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
return 0;
}
- // Materialize addresses with LEA instructions.
+ // Materialize addresses with LEA/MOV instructions.
if (isa<GlobalValue>(C)) {
X86AddressMode AM;
if (X86SelectAddress(C, AM)) {
// If the expression is just a basereg, then we're done, otherwise we need
// to emit an LEA.
if (AM.BaseType == X86AddressMode::RegBase &&
- AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == 0)
+ AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
return AM.Base.Reg;
- Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ if (TM.getRelocationModel() == Reloc::Static &&
+ TLI.getPointerTy() == MVT::i64) {
+ // The displacement code be more than 32 bits away so we need to use
+ // an instruction with a 64 bit immediate
+ Opc = X86::MOV64ri;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg).addGlobalAddress(cast<GlobalValue>(C));
+ } else {
+ Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg), AM);
+ }
return ResultReg;
}
return 0;
}
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(C->getType());
+ unsigned Align = DL.getPrefTypeAlignment(C->getType());
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TD.getTypeAllocSize(C->getType());
+ Align = DL.getTypeAllocSize(C->getType());
}
// x86-32 PIC requires a PIC base register for constant pools.
@@ -2457,7 +3197,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag);
@@ -2474,6 +3214,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
// X86SelectAddrss, and TargetMaterializeAlloca.
if (!FuncInfo.StaticAllocaMap.count(C))
return 0;
+ assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
X86AddressMode AM;
if (!X86SelectAddress(C, AM))
@@ -2481,7 +3222,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg), AM);
return ResultReg;
}
@@ -2493,7 +3234,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
// Get opcode and regclass for the given zero.
unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC = nullptr;
switch (VT.SimpleTy) {
default: return 0;
case MVT::f32:
@@ -2520,29 +3261,35 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
}
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
return ResultReg;
}
bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI) {
+ const Value *Ptr = LI->getPointerOperand();
X86AddressMode AM;
- if (!X86SelectAddress(LI->getOperand(0), AM))
+ if (!X86SelectAddress(Ptr, AM))
return false;
const X86InstrInfo &XII = (const X86InstrInfo&)TII;
- unsigned Size = TD.getTypeAllocSize(LI->getType());
+ unsigned Size = DL.getTypeAllocSize(LI->getType());
unsigned Alignment = LI->getAlignment();
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = DL.getABITypeAlignment(LI->getType());
+
SmallVector<MachineOperand, 8> AddrOps;
AM.getFullAddress(AddrOps);
MachineInstr *Result =
XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment);
- if (Result == 0) return false;
+ if (!Result)
+ return false;
+ Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
MI->eraseFromParent();
return true;
diff --git a/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp b/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp
index 38a8351..eb9f743 100644
--- a/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp
@@ -7,13 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the pass which will find instructions which
-// can be re-written as LEA instructions in order to reduce pipeline
-// delays for some models of the Intel Atom family.
+// This file defines the pass that finds instructions that can be
+// re-written as LEA instructions in order to reduce pipeline delays.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-fixup-LEAs"
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
@@ -28,84 +26,94 @@
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "x86-fixup-LEAs"
+
STATISTIC(NumLEAs, "Number of LEA instructions created");
namespace {
- class FixupLEAPass : public MachineFunctionPass {
- enum RegUsageState { RU_NotUsed, RU_Write, RU_Read };
- static char ID;
- /// \brief Loop over all of the instructions in the basic block
- /// replacing applicable instructions with LEA instructions,
- /// where appropriate.
- bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI);
+class FixupLEAPass : public MachineFunctionPass {
+ enum RegUsageState { RU_NotUsed, RU_Write, RU_Read };
+ static char ID;
+ /// \brief Loop over all of the instructions in the basic block
+ /// replacing applicable instructions with LEA instructions,
+ /// where appropriate.
+ bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI);
- virtual const char *getPassName() const { return "X86 Atom LEA Fixup";}
+ const char *getPassName() const override { return "X86 LEA Fixup"; }
- /// \brief Given a machine register, look for the instruction
- /// which writes it in the current basic block. If found,
- /// try to replace it with an equivalent LEA instruction.
- /// If replacement succeeds, then also process the the newly created
- /// instruction.
- void seekLEAFixup(MachineOperand& p, MachineBasicBlock::iterator& I,
- MachineFunction::iterator MFI);
+ /// \brief Given a machine register, look for the instruction
+ /// which writes it in the current basic block. If found,
+ /// try to replace it with an equivalent LEA instruction.
+ /// If replacement succeeds, then also process the the newly created
+ /// instruction.
+ void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I,
+ MachineFunction::iterator MFI);
- /// \brief Given a memory access or LEA instruction
- /// whose address mode uses a base and/or index register, look for
- /// an opportunity to replace the instruction which sets the base or index
- /// register with an equivalent LEA instruction.
- void processInstruction(MachineBasicBlock::iterator& I,
- MachineFunction::iterator MFI);
+ /// \brief Given a memory access or LEA instruction
+ /// whose address mode uses a base and/or index register, look for
+ /// an opportunity to replace the instruction which sets the base or index
+ /// register with an equivalent LEA instruction.
+ void processInstruction(MachineBasicBlock::iterator &I,
+ MachineFunction::iterator MFI);
- /// \brief Determine if an instruction references a machine register
- /// and, if so, whether it reads or writes the register.
- RegUsageState usesRegister(MachineOperand& p,
- MachineBasicBlock::iterator I);
+ /// \brief Given a LEA instruction which is unprofitable
+ /// on Silvermont try to replace it with an equivalent ADD instruction
+ void processInstructionForSLM(MachineBasicBlock::iterator &I,
+ MachineFunction::iterator MFI);
- /// \brief Step backwards through a basic block, looking
- /// for an instruction which writes a register within
- /// a maximum of INSTR_DISTANCE_THRESHOLD instruction latency cycles.
- MachineBasicBlock::iterator searchBackwards(MachineOperand& p,
- MachineBasicBlock::iterator& I,
- MachineFunction::iterator MFI);
+ /// \brief Determine if an instruction references a machine register
+ /// and, if so, whether it reads or writes the register.
+ RegUsageState usesRegister(MachineOperand &p, MachineBasicBlock::iterator I);
- /// \brief if an instruction can be converted to an
- /// equivalent LEA, insert the new instruction into the basic block
- /// and return a pointer to it. Otherwise, return zero.
- MachineInstr* postRAConvertToLEA(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI) const;
+ /// \brief Step backwards through a basic block, looking
+ /// for an instruction which writes a register within
+ /// a maximum of INSTR_DISTANCE_THRESHOLD instruction latency cycles.
+ MachineBasicBlock::iterator searchBackwards(MachineOperand &p,
+ MachineBasicBlock::iterator &I,
+ MachineFunction::iterator MFI);
- public:
- FixupLEAPass() : MachineFunctionPass(ID) {}
+ /// \brief if an instruction can be converted to an
+ /// equivalent LEA, insert the new instruction into the basic block
+ /// and return a pointer to it. Otherwise, return zero.
+ MachineInstr *postRAConvertToLEA(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI) const;
- /// \brief Loop over all of the basic blocks,
- /// replacing instructions by equivalent LEA instructions
- /// if needed and when possible.
- virtual bool runOnMachineFunction(MachineFunction &MF);
+public:
+ FixupLEAPass() : MachineFunctionPass(ID) {}
- private:
- MachineFunction *MF;
- const TargetMachine *TM;
- const TargetInstrInfo *TII; // Machine instruction info.
+ /// \brief Loop over all of the basic blocks,
+ /// replacing instructions by equivalent LEA instructions
+ /// if needed and when possible.
+ bool runOnMachineFunction(MachineFunction &MF) override;
- };
- char FixupLEAPass::ID = 0;
+private:
+ MachineFunction *MF;
+ const TargetMachine *TM;
+ const X86InstrInfo *TII; // Machine instruction info.
+};
+char FixupLEAPass::ID = 0;
}
MachineInstr *
FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI) const {
- MachineInstr* MI = MBBI;
- MachineInstr* NewMI;
+ MachineInstr *MI = MBBI;
+ MachineInstr *NewMI;
switch (MI->getOpcode()) {
- case X86::MOV32rr:
+ case X86::MOV32rr:
case X86::MOV64rr: {
- const MachineOperand& Src = MI->getOperand(1);
- const MachineOperand& Dest = MI->getOperand(0);
+ const MachineOperand &Src = MI->getOperand(1);
+ const MachineOperand &Dest = MI->getOperand(0);
NewMI = BuildMI(*MF, MI->getDebugLoc(),
- TII->get( MI->getOpcode() == X86::MOV32rr ? X86::LEA32r : X86::LEA64r))
- .addOperand(Dest)
- .addOperand(Src).addImm(1).addReg(0).addImm(0).addReg(0);
- MFI->insert(MBBI, NewMI); // Insert the new inst
+ TII->get(MI->getOpcode() == X86::MOV32rr ? X86::LEA32r
+ : X86::LEA64r))
+ .addOperand(Dest)
+ .addOperand(Src)
+ .addImm(1)
+ .addReg(0)
+ .addImm(0)
+ .addReg(0);
+ MFI->insert(MBBI, NewMI); // Insert the new inst
return NewMI;
}
case X86::ADD64ri32:
@@ -123,7 +131,7 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
if (!MI->getOperand(2).isImm()) {
// convertToThreeAddress will call getImm()
// which requires isImm() to be true
- return 0;
+ return nullptr;
}
break;
case X86::ADD16rr:
@@ -132,20 +140,22 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
// if src1 != src2, then convertToThreeAddress will
// need to create a Virtual register, which we cannot do
// after register allocation.
- return 0;
+ return nullptr;
}
}
- return TII->convertToThreeAddress(MFI, MBBI, 0);
+ return TII->convertToThreeAddress(MFI, MBBI, nullptr);
}
-FunctionPass *llvm::createX86FixupLEAs() {
- return new FixupLEAPass();
-}
+FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
- TM = &MF->getTarget();
- TII = TM->getInstrInfo();
+ TM = &Func.getTarget();
+ const X86Subtarget &ST = TM->getSubtarget<X86Subtarget>();
+ if (!ST.LEAusesAG() && !ST.slowLEA())
+ return false;
+
+ TII = static_cast<const X86InstrInfo *>(TM->getInstrInfo());
DEBUG(dbgs() << "Start X86FixupLEAs\n";);
// Process all basic blocks.
@@ -156,14 +166,14 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
return true;
}
-FixupLEAPass::RegUsageState FixupLEAPass::usesRegister(MachineOperand& p,
- MachineBasicBlock::iterator I) {
+FixupLEAPass::RegUsageState
+FixupLEAPass::usesRegister(MachineOperand &p, MachineBasicBlock::iterator I) {
RegUsageState RegUsage = RU_NotUsed;
- MachineInstr* MI = I;
+ MachineInstr *MI = I;
for (unsigned int i = 0; i < MI->getNumOperands(); ++i) {
- MachineOperand& opnd = MI->getOperand(i);
- if (opnd.isReg() && opnd.getReg() == p.getReg()){
+ MachineOperand &opnd = MI->getOperand(i);
+ if (opnd.isReg() && opnd.getReg() == p.getReg()) {
if (opnd.isDef())
return RU_Write;
RegUsage = RU_Read;
@@ -176,23 +186,22 @@ FixupLEAPass::RegUsageState FixupLEAPass::usesRegister(MachineOperand& p,
/// block, return a reference to the previous instruction in the block,
/// wrapping around to the last instruction of the block if the block
/// branches to itself.
-static inline bool getPreviousInstr(MachineBasicBlock::iterator& I,
+static inline bool getPreviousInstr(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
if (I == MFI->begin()) {
if (MFI->isPredecessor(MFI)) {
I = --MFI->end();
return true;
- }
- else
+ } else
return false;
}
--I;
return true;
}
-MachineBasicBlock::iterator FixupLEAPass::searchBackwards(MachineOperand& p,
- MachineBasicBlock::iterator& I,
- MachineFunction::iterator MFI) {
+MachineBasicBlock::iterator
+FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
+ MachineFunction::iterator MFI) {
int InstrDistance = 1;
MachineBasicBlock::iterator CurInst;
static const int INSTR_DISTANCE_THRESHOLD = 5;
@@ -200,63 +209,133 @@ MachineBasicBlock::iterator FixupLEAPass::searchBackwards(MachineOperand& p,
CurInst = I;
bool Found;
Found = getPreviousInstr(CurInst, MFI);
- while( Found && I != CurInst) {
+ while (Found && I != CurInst) {
if (CurInst->isCall() || CurInst->isInlineAsm())
break;
if (InstrDistance > INSTR_DISTANCE_THRESHOLD)
break; // too far back to make a difference
- if (usesRegister(p, CurInst) == RU_Write){
+ if (usesRegister(p, CurInst) == RU_Write) {
return CurInst;
}
InstrDistance += TII->getInstrLatency(TM->getInstrItineraryData(), CurInst);
Found = getPreviousInstr(CurInst, MFI);
}
- return 0;
+ return nullptr;
}
-void FixupLEAPass::processInstruction(MachineBasicBlock::iterator& I,
+void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
// Process a load, store, or LEA instruction.
MachineInstr *MI = I;
int opcode = MI->getOpcode();
- const MCInstrDesc& Desc = MI->getDesc();
+ const MCInstrDesc &Desc = MI->getDesc();
int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags, opcode);
if (AddrOffset >= 0) {
AddrOffset += X86II::getOperandBias(Desc);
- MachineOperand& p = MI->getOperand(AddrOffset + X86::AddrBaseReg);
+ MachineOperand &p = MI->getOperand(AddrOffset + X86::AddrBaseReg);
if (p.isReg() && p.getReg() != X86::ESP) {
seekLEAFixup(p, I, MFI);
}
- MachineOperand& q = MI->getOperand(AddrOffset + X86::AddrIndexReg);
+ MachineOperand &q = MI->getOperand(AddrOffset + X86::AddrIndexReg);
if (q.isReg() && q.getReg() != X86::ESP) {
seekLEAFixup(q, I, MFI);
}
}
}
-void FixupLEAPass::seekLEAFixup(MachineOperand& p,
- MachineBasicBlock::iterator& I,
+void FixupLEAPass::seekLEAFixup(MachineOperand &p,
+ MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
MachineBasicBlock::iterator MBI = searchBackwards(p, I, MFI);
if (MBI) {
- MachineInstr* NewMI = postRAConvertToLEA(MFI, MBI);
+ MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
if (NewMI) {
++NumLEAs;
- DEBUG(dbgs() << "Candidate to replace:"; MBI->dump(););
+ DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MBI->dump(););
// now to replace with an equivalent LEA...
- DEBUG(dbgs() << "Replaced by: "; NewMI->dump(););
+ DEBUG(dbgs() << "FixLEA: Replaced by: "; NewMI->dump(););
MFI->erase(MBI);
MachineBasicBlock::iterator J =
- static_cast<MachineBasicBlock::iterator> (NewMI);
+ static_cast<MachineBasicBlock::iterator>(NewMI);
processInstruction(J, MFI);
}
}
}
+void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
+ MachineFunction::iterator MFI) {
+ MachineInstr *MI = I;
+ const int opcode = MI->getOpcode();
+ if (opcode != X86::LEA16r && opcode != X86::LEA32r && opcode != X86::LEA64r &&
+ opcode != X86::LEA64_32r)
+ return;
+ if (MI->getOperand(5).getReg() != 0 || !MI->getOperand(4).isImm() ||
+ !TII->isSafeToClobberEFLAGS(*MFI, I))
+ return;
+ const unsigned DstR = MI->getOperand(0).getReg();
+ const unsigned SrcR1 = MI->getOperand(1).getReg();
+ const unsigned SrcR2 = MI->getOperand(3).getReg();
+ if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
+ return;
+ if (MI->getOperand(2).getImm() > 1)
+ return;
+ int addrr_opcode, addri_opcode;
+ switch (opcode) {
+ case X86::LEA16r:
+ addrr_opcode = X86::ADD16rr;
+ addri_opcode = X86::ADD16ri;
+ break;
+ case X86::LEA32r:
+ addrr_opcode = X86::ADD32rr;
+ addri_opcode = X86::ADD32ri;
+ break;
+ case X86::LEA64_32r:
+ case X86::LEA64r:
+ addrr_opcode = X86::ADD64rr;
+ addri_opcode = X86::ADD64ri32;
+ break;
+ default:
+ assert(false && "Unexpected LEA instruction");
+ }
+ DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
+ DEBUG(dbgs() << "FixLEA: Replaced by: ";);
+ MachineInstr *NewMI = nullptr;
+ const MachineOperand &Dst = MI->getOperand(0);
+ // Make ADD instruction for two registers writing to LEA's destination
+ if (SrcR1 != 0 && SrcR2 != 0) {
+ const MachineOperand &Src1 = MI->getOperand(SrcR1 == DstR ? 1 : 3);
+ const MachineOperand &Src2 = MI->getOperand(SrcR1 == DstR ? 3 : 1);
+ NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addrr_opcode))
+ .addOperand(Dst)
+ .addOperand(Src1)
+ .addOperand(Src2);
+ MFI->insert(I, NewMI);
+ DEBUG(NewMI->dump(););
+ }
+ // Make ADD instruction for immediate
+ if (MI->getOperand(4).getImm() != 0) {
+ const MachineOperand &SrcR = MI->getOperand(SrcR1 == DstR ? 1 : 3);
+ NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addri_opcode))
+ .addOperand(Dst)
+ .addOperand(SrcR)
+ .addImm(MI->getOperand(4).getImm());
+ MFI->insert(I, NewMI);
+ DEBUG(NewMI->dump(););
+ }
+ if (NewMI) {
+ MFI->erase(I);
+ I = static_cast<MachineBasicBlock::iterator>(NewMI);
+ }
+}
+
bool FixupLEAPass::processBasicBlock(MachineFunction &MF,
MachineFunction::iterator MFI) {
- for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I)
- processInstruction(I, MFI);
+ for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) {
+ if (TM->getSubtarget<X86Subtarget>().isSLM())
+ processInstructionForSLM(I, MFI);
+ else
+ processInstruction(I, MFI);
+ }
return false;
}
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
index 48470da..c8a3ab3 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -23,7 +23,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-codegen"
#include "X86.h"
#include "X86InstrInfo.h"
#include "llvm/ADT/DepthFirstIterator.h"
@@ -45,6 +44,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "x86-codegen"
+
STATISTIC(NumFXCH, "Number of fxch instructions inserted");
STATISTIC(NumFP , "Number of floating point instructions");
@@ -59,7 +60,7 @@ namespace {
memset(RegMap, 0, sizeof(RegMap));
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<EdgeBundles>();
AU.addPreservedID(MachineLoopInfoID);
@@ -67,9 +68,9 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const { return "X86 FP Stackifier"; }
+ const char *getPassName() const override { return "X86 FP Stackifier"; }
private:
const TargetInstrInfo *TII; // Machine instruction info.
@@ -430,9 +431,9 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
if (FPInstClass == X86II::NotFP)
continue; // Efficiently ignore non-fp insts!
- MachineInstr *PrevMI = 0;
+ MachineInstr *PrevMI = nullptr;
if (I != BB.begin())
- PrevMI = prior(I);
+ PrevMI = std::prev(I);
++NumFP; // Keep track of # of pseudo instrs
DEBUG(dbgs() << "\nFPInst:\t" << *MI);
@@ -475,10 +476,10 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
} else {
MachineBasicBlock::iterator Start = I;
// Rewind to first instruction newly inserted.
- while (Start != BB.begin() && prior(Start) != PrevI) --Start;
+ while (Start != BB.begin() && std::prev(Start) != PrevI) --Start;
dbgs() << "Inserted instructions:\n\t";
Start->print(dbgs(), &MF.getTarget());
- while (++Start != llvm::next(I)) {}
+ while (++Start != std::next(I)) {}
}
dumpStack();
);
@@ -905,7 +906,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Kill registers by popping.
if (Kills && I != MBB->begin()) {
- MachineBasicBlock::iterator I2 = llvm::prior(I);
+ MachineBasicBlock::iterator I2 = std::prev(I);
while (StackTop) {
unsigned KReg = getStackEntry(0);
if (!(Kills & (1 << KReg)))
@@ -1671,8 +1672,10 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
break;
}
- case X86::RET:
- case X86::RETI:
+ case X86::RETQ:
+ case X86::RETL:
+ case X86::RETIL:
+ case X86::RETIQ:
// If RET has an FP register use operand, pass the first one in ST(0) and
// the second one in ST(1).
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index 0d76534..8c029a8 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -29,6 +29,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -45,7 +46,7 @@ bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineModuleInfo &MMI = MF.getMMI();
- const TargetRegisterInfo *RegInfo = TM.getRegisterInfo();
+ const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
RegInfo->needsStackRealignment(MF) ||
@@ -107,8 +108,10 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Opc = MBBI->getOpcode();
switch (Opc) {
default: return 0;
- case X86::RET:
- case X86::RETI:
+ case X86::RETL:
+ case X86::RETQ:
+ case X86::RETIL:
+ case X86::RETIQ:
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
@@ -180,7 +183,7 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
}
}
- MachineInstr *MI = NULL;
+ MachineInstr *MI = nullptr;
if (UseLEA) {
MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
@@ -202,10 +205,10 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
static
void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, uint64_t *NumBytes = NULL) {
+ unsigned StackPtr, uint64_t *NumBytes = nullptr) {
if (MBBI == MBB.begin()) return;
- MachineBasicBlock::iterator PI = prior(MBBI);
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
unsigned Opc = PI->getOpcode();
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
@@ -223,17 +226,18 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
}
}
-/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
+/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
+/// iterator.
static
void mergeSPUpdatesDown(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, uint64_t *NumBytes = NULL) {
+ unsigned StackPtr, uint64_t *NumBytes = nullptr) {
// FIXME: THIS ISN'T RUN!!!
return;
if (MBBI == MBB.end()) return;
- MachineBasicBlock::iterator NI = llvm::next(MBBI);
+ MachineBasicBlock::iterator NI = std::next(MBBI);
if (NI == MBB.end()) return;
unsigned Opc = NI->getOpcode();
@@ -255,19 +259,19 @@ void mergeSPUpdatesDown(MachineBasicBlock &MBB,
}
/// mergeSPUpdates - Checks the instruction before/after the passed
-/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and the
-/// stack adjustment is returned as a positive value for ADD/LEA and a negative for
-/// SUB.
+/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
+/// the stack adjustment is returned as a positive value for ADD/LEA and a
+/// negative for SUB.
static int mergeSPUpdates(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr,
- bool doMergeWithPrevious) {
+ MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
+ bool doMergeWithPrevious) {
if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
(!doMergeWithPrevious && MBBI == MBB.end()))
return 0;
- MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
- MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
+ MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
+ MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
+ : std::next(MBBI);
unsigned Opc = PI->getOpcode();
int Offset = 0;
@@ -302,66 +306,32 @@ static bool isEAXLiveIn(MachineFunction &MF) {
return false;
}
-void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
- MCSymbol *Label,
- unsigned FramePtr) const {
+void
+X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const {
+ MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty()) return;
- const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
- bool HasFP = hasFP(MF);
-
- // Calculate amount of bytes used for return address storing.
- int stackGrowth = -RegInfo->getSlotSize();
-
- // FIXME: This is dirty hack. The code itself is pretty mess right now.
- // It should be rewritten from scratch and generalized sometimes.
-
- // Determine maximum offset (minimum due to stack growth).
- int64_t MaxOffset = 0;
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I)
- MaxOffset = std::min(MaxOffset,
- MFI->getObjectOffset(I->getFrameIdx()));
-
// Calculate offsets.
- int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
for (std::vector<CalleeSavedInfo>::const_iterator
I = CSI.begin(), E = CSI.end(); I != E; ++I) {
int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
unsigned Reg = I->getReg();
- Offset = MaxOffset - Offset + saveAreaOffset;
-
- // Don't output a new machine move if we're re-saving the frame
- // pointer. This happens when the PrologEpilogInserter has inserted an extra
- // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
- // generates one when frame pointers are used. If we generate a "machine
- // move" for this extra "PUSH", the linker will lose track of the fact that
- // the frame pointer should have the value of the first "PUSH" when it's
- // trying to unwind.
- //
- // FIXME: This looks inelegant. It's possibly correct, but it's covering up
- // another bug. I.e., one where we generate a prolog like this:
- //
- // pushl %ebp
- // movl %esp, %ebp
- // pushl %ebp
- // pushl %esi
- // ...
- //
- // The immediate re-push of EBP is unnecessary. At the least, it's an
- // optimization bug. EBP can be used as a scratch register in certain
- // cases, but probably not when we have a frame pointer.
- if (HasFP && FramePtr == Reg)
- continue;
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(Label, DwarfReg, Offset));
+ unsigned CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
+ Offset));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
@@ -373,8 +343,9 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
static bool usesTheStack(const MachineFunction &MF) {
const MachineRegisterInfo &MRI = MF.getRegInfo();
- for (MachineRegisterInfo::reg_iterator ri = MRI.reg_begin(X86::EFLAGS),
- re = MRI.reg_end(); ri != re; ++ri)
+ for (MachineRegisterInfo::reg_instr_iterator
+ ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
+ ri != re; ++ri)
if (ri->isCopy())
return true;
@@ -385,23 +356,107 @@ static bool usesTheStack(const MachineFunction &MF) {
/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
/// space for local variables. Also emit labels used by the exception handler to
/// generate the exception handling frames.
+
+/*
+ Here's a gist of what gets emitted:
+
+ ; Establish frame pointer, if needed
+ [if needs FP]
+ push %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ .seh_pushreg %rpb
+ mov %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+
+ ; Spill general-purpose registers
+ [for all callee-saved GPRs]
+ pushq %<reg>
+ [if not needs FP]
+ .cfi_def_cfa_offset (offset from RETADDR)
+ .seh_pushreg %<reg>
+
+ ; If the required stack alignment > default stack alignment
+ ; rsp needs to be re-aligned. This creates a "re-alignment gap"
+ ; of unknown size in the stack frame.
+ [if stack needs re-alignment]
+ and $MASK, %rsp
+
+ ; Allocate space for locals
+ [if target is Windows and allocated space > 4096 bytes]
+ ; Windows needs special care for allocations larger
+ ; than one page.
+ mov $NNN, %rax
+ call ___chkstk_ms/___chkstk
+ sub %rax, %rsp
+ [else]
+ sub $NNN, %rsp
+
+ [if needs FP]
+ .seh_stackalloc (size of XMM spill slots)
+ .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
+ [else]
+ .seh_stackalloc NNN
+
+ ; Spill XMMs
+ ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
+ ; they may get spilled on any platform, if the current function
+ ; calls @llvm.eh.unwind.init
+ [if needs FP]
+ [for all callee-saved XMM registers]
+ movaps %<xmm reg>, -MMM(%rbp)
+ [for all callee-saved XMM registers]
+ .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
+ ; i.e. the offset relative to (%rbp - SEHFrameOffset)
+ [else]
+ [for all callee-saved XMM registers]
+ movaps %<xmm reg>, KKK(%rsp)
+ [for all callee-saved XMM registers]
+ .seh_savexmm %<xmm reg>, KKK
+
+ .seh_endprologue
+
+ [if needs base pointer]
+ mov %rsp, %rbx
+
+ ; Emit CFI info
+ [if needs FP]
+ [for all callee-saved registers]
+ .cfi_offset %<reg>, (offset from %rbp)
+ [else]
+ .cfi_def_cfa_offset (offset from RETADDR)
+ [for all callee-saved registers]
+ .cfi_offset %<reg>, (offset from %rsp)
+
+ Notes:
+ - .seh directives are emitted only for Windows 64 ABI
+ - .cfi directives are emitted for all other ABIs
+ - for 32-bit code, substitute %e?? registers for %r??
+*/
+
void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
- const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
- const X86InstrInfo &TII = *TM.getInstrInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- bool needsFrameMoves = MMI.hasDebugInfo() ||
- Fn->needsUnwindTableEntry();
uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
bool HasFP = hasFP(MF);
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit();
bool IsLP64 = STI.isTarget64BitLP64();
bool IsWin64 = STI.isTargetWin64();
+ bool IsWinEH =
+ MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() ==
+ ExceptionHandling::WinEH; // Not necessarily synonymous with IsWin64.
+ bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();
+ bool NeedsDwarfCFI =
+ !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
bool UseLEA = STI.useLeaForSP();
unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize();
@@ -439,7 +494,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
!MFI->adjustsStack() && // No calls.
!IsWin64 && // Win64 has no Red Zone
!usesTheStack(MF) && // Don't push and pop.
- !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack
+ !MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
@@ -499,21 +554,28 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(FramePtr, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
- if (needsFrameMoves) {
+ if (NeedsDwarfCFI) {
// Mark the place where EBP/RBP was saved.
- MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
- .addSym(FrameLabel);
-
// Define the current CFA rule to use the provided offset.
assert(StackSize);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(FrameLabel, 2 * stackGrowth));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
// Change the rule for the FramePtr to be an "offset" rule.
unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
- MMI.addFrameInst(MCCFIInstruction::createOffset(FrameLabel, DwarfFramePtr,
- 2 * stackGrowth));
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr,
+ DwarfFramePtr, 2 * stackGrowth));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ if (NeedsWinEH) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
+ .addImm(FramePtr)
+ .setMIFlag(MachineInstr::FrameSetup);
}
// Update EBP with the new base value.
@@ -522,21 +584,18 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(StackPtr)
.setMIFlag(MachineInstr::FrameSetup);
- if (needsFrameMoves) {
+ if (NeedsDwarfCFI) {
// Mark effective beginning of when frame pointer becomes valid.
- MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
- .addSym(FrameLabel);
-
// Define the current CFA to use the EBP/RBP register.
unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaRegister(FrameLabel, DwarfFramePtr));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
- // Mark the FramePtr as live-in in every block except the entry.
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
- I != E; ++I)
+ // Mark the FramePtr as live-in in every block.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
I->addLiveIn(FramePtr);
} else {
NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
@@ -550,30 +609,28 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
(MBBI->getOpcode() == X86::PUSH32r ||
MBBI->getOpcode() == X86::PUSH64r)) {
PushedRegs = true;
- MBBI->setFlag(MachineInstr::FrameSetup);
+ unsigned Reg = MBBI->getOperand(0).getReg();
++MBBI;
- if (!HasFP && needsFrameMoves) {
+ if (!HasFP && NeedsDwarfCFI) {
// Mark callee-saved push instruction.
- MCSymbol *Label = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
-
// Define the current CFA rule to use the provided offset.
assert(StackSize);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(Label, StackOffset));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
StackOffset += stackGrowth;
}
+
+ if (NeedsWinEH) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
+ MachineInstr::FrameSetup);
+ }
}
// Realign stack after we pushed callee-saved registers (so that we'll be
// able to calculate their offsets from the frame pointer).
-
- // NOTE: We push the registers before realigning the stack, so
- // vector callee-saved (xmm) registers may be saved w/o proper
- // alignment in this way. However, currently these regs are saved in
- // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so
- // this shouldn't be a problem.
if (RegInfo->needsStackRealignment(MF)) {
assert(HasFP && "There should be a frame pointer if stack is realigned.");
MachineInstr *MI =
@@ -606,16 +663,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// responsible for adjusting the stack pointer. Touching the stack at 4K
// increments is necessary to ensure that the guard pages used by the OS
// virtual memory manager are allocated in correct sequence.
- if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetEnvMacho()) {
+ if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetMacho()) {
const char *StackProbeSymbol;
- bool isSPUpdateNeeded = false;
if (Is64Bit) {
- if (STI.isTargetCygMing())
- StackProbeSymbol = "___chkstk";
- else {
+ if (STI.isTargetCygMing()) {
+ StackProbeSymbol = "___chkstk_ms";
+ } else {
StackProbeSymbol = "__chkstk";
- isSPUpdateNeeded = true;
}
} else if (STI.isTargetCygMing())
StackProbeSymbol = "_alloca";
@@ -657,15 +712,15 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
.setMIFlag(MachineInstr::FrameSetup);
- // MSVC x64's __chkstk does not adjust %rsp itself.
- // It also does not clobber %rax so we can reuse it when adjusting %rsp.
- if (isSPUpdateNeeded) {
+ if (Is64Bit) {
+ // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
+ // themself. It also does not clobber %rax so we can reuse it when
+ // adjusting %rsp.
BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
.addReg(StackPtr)
.addReg(X86::RAX)
.setMIFlag(MachineInstr::FrameSetup);
}
-
if (isEAXAlive) {
// Restore EAX
MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
@@ -674,38 +729,103 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MI->setFlag(MachineInstr::FrameSetup);
MBB.insert(MBBI, MI);
}
- } else if (NumBytes)
+ } else if (NumBytes) {
emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64,
UseLEA, TII, *RegInfo);
+ }
+
+ int SEHFrameOffset = 0;
+ if (NeedsWinEH) {
+ if (HasFP) {
+ // We need to set frame base offset low enough such that all saved
+ // register offsets would be positive relative to it, but we can't
+ // just use NumBytes, because .seh_setframe offset must be <=240.
+ // So we pretend to have only allocated enough space to spill the
+ // non-volatile registers.
+ // We don't care about the rest of stack allocation, because unwinder
+ // will restore SP to (BP - SEHFrameOffset)
+ for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
+ int offset = MFI->getObjectOffset(Info.getFrameIdx());
+ SEHFrameOffset = std::max(SEHFrameOffset, abs(offset));
+ }
+ SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant
+
+ // This only needs to account for XMM spill slots, GPR slots
+ // are covered by the .seh_pushreg's emitted above.
+ unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();
+ if (Size) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(Size)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
+ .addImm(FramePtr)
+ .addImm(SEHFrameOffset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ // SP will be the base register for restoring XMMs
+ if (NumBytes) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+ }
+
+ // Skip the rest of register spilling code
+ while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
+ ++MBBI;
+
+ // Emit SEH info for non-GPRs
+ if (NeedsWinEH) {
+ for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
+ unsigned Reg = Info.getReg();
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ continue;
+ assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");
+
+ int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());
+ Offset += SEHFrameOffset;
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
+ .addImm(Reg)
+ .addImm(Offset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
// If we need a base pointer, set it up here. It's whatever the value
// of the stack pointer is at this point. Any variable size objects
// will be allocated after this, so we can still use the base pointer
// to reference locals.
if (RegInfo->hasBasePointer(MF)) {
- // Update the frame pointer with the current stack pointer.
+ // Update the base pointer with the current stack pointer.
unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr;
BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
.addReg(StackPtr)
.setMIFlag(MachineInstr::FrameSetup);
}
- if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
+ if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
// Mark end of stack pointer adjustment.
- MCSymbol *Label = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
- .addSym(Label);
-
if (!HasFP && NumBytes) {
// Define the current CFA rule to use the provided offset.
assert(StackSize);
- MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(
- Label, -StackSize + stackGrowth));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr,
+ -StackSize + stackGrowth));
+
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
if (PushedRegs)
- emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
+ emitCalleeSavedFrameMoves(MBB, MBBI, DL);
}
}
@@ -713,12 +833,14 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
- const X86InstrInfo &TII = *TM.getInstrInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
assert(MBBI != MBB.end() && "Returning block has no instructions");
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL = MBBI->getDebugLoc();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit();
bool IsLP64 = STI.isTarget64BitLP64();
bool UseLEA = STI.useLeaForSP();
@@ -730,8 +852,10 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
switch (RetOpcode) {
default:
llvm_unreachable("Can only insert epilog into returning blocks");
- case X86::RET:
- case X86::RETI:
+ case X86::RETQ:
+ case X86::RETL:
+ case X86::RETIL:
+ case X86::RETIQ:
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
@@ -781,7 +905,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
unsigned Opc = PI->getOpcode();
if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
@@ -883,12 +1007,13 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
addReg(JumpTarget.getReg(), RegState::Kill);
}
- MachineInstr *NewMI = prior(MBBI);
+ MachineInstr *NewMI = std::prev(MBBI);
NewMI->copyImplicitOps(MF, MBBI);
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);
- } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
+ } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
+ RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
(X86FI->getTCReturnAddrDelta() < 0)) {
// Add the return addr area delta back since we are not tail calling.
int delta = -1*X86FI->getTCReturnAddrDelta();
@@ -901,7 +1026,8 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
}
-int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
+int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
const MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -959,46 +1085,97 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
return getFrameIndexOffset(MF, FI);
}
-bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
+bool X86FrameLowering::assignCalleeSavedSpillSlots(
+ MachineFunction &MF, const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
+ unsigned SlotSize = RegInfo->getSlotSize();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- DebugLoc DL = MBB.findDebugLoc(MI);
+ unsigned CalleeSavedFrameSize = 0;
+ int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
- MachineFunction &MF = *MBB.getParent();
+ if (hasFP(MF)) {
+ // emitPrologue always spills frame register the first thing.
+ SpillSlotOffset -= SlotSize;
+ MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+
+ // Since emitPrologue and emitEpilogue will handle spilling and restoring of
+ // the frame register, we can delete it from CSI list and not have to worry
+ // about avoiding it later.
+ unsigned FPReg = RegInfo->getFrameRegister(MF);
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ if (CSI[i].getReg() == FPReg) {
+ CSI.erase(CSI.begin() + i);
+ break;
+ }
+ }
+ }
- unsigned SlotSize = STI.is64Bit() ? 8 : 4;
- unsigned FPReg = TRI->getFrameRegister(MF);
- unsigned CalleeFrameSize = 0;
+ // Assign slots for GPRs. It increases frame size.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+ if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
+ continue;
+
+ SpillSlotOffset -= SlotSize;
+ CalleeSavedFrameSize += SlotSize;
+
+ int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+ CSI[i - 1].setFrameIdx(SlotIndex);
+ }
+
+ X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
+
+ // Assign slots for XMMs.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
+ // ensure alignment
+ SpillSlotOffset -= abs(SpillSlotOffset) % RC->getAlignment();
+ // spill into slot
+ SpillSlotOffset -= RC->getSize();
+ int SlotIndex =
+ MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
+ CSI[i - 1].setFrameIdx(SlotIndex);
+ MFI->ensureMaxAlignment(RC->getAlignment());
+ }
+
+ return true;
+}
+
+bool X86FrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
// Push GPRs. It increases frame size.
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- if (!X86::GR64RegClass.contains(Reg) &&
- !X86::GR32RegClass.contains(Reg))
+ unsigned Reg = CSI[i - 1].getReg();
+
+ if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
- if (Reg == FPReg)
- // X86RegisterInfo::emitPrologue will handle spilling of frame register.
- continue;
- CalleeFrameSize += SlotSize;
+
BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
.setMIFlag(MachineInstr::FrameSetup);
}
- X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
-
// Make XMM regs spilled. X86 does not have ability of push/pop XMM.
// It can be done by spilling XMMs to stack frame.
- // Note that only Win64 ABI might spill XMMs.
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
if (X86::GR64RegClass.contains(Reg) ||
@@ -1007,8 +1184,12 @@ bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
- RC, TRI);
+
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
+ TRI);
+ --MI;
+ MI->setFlag(MachineInstr::FrameSetup);
+ ++MI;
}
return true;
@@ -1025,6 +1206,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
// Reload XMMs from stack frame.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
@@ -1032,22 +1214,19 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (X86::GR64RegClass.contains(Reg) ||
X86::GR32RegClass.contains(Reg))
continue;
+
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
- RC, TRI);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
}
// POP GPRs.
- unsigned FPReg = TRI->getFrameRegister(MF);
unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
if (!X86::GR64RegClass.contains(Reg) &&
!X86::GR32RegClass.contains(Reg))
continue;
- if (Reg == FPReg)
- // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
- continue;
+
BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
}
return true;
@@ -1055,9 +1234,10 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
void
X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
+ RegScavenger *RS) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
- const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -1077,22 +1257,6 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
TailCallReturnAddrDelta - SlotSize, true);
}
- if (hasFP(MF)) {
- assert((TailCallReturnAddrDelta <= 0) &&
- "The Delta should always be zero or negative");
- const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
-
- // Create a frame entry for the EBP register that must be saved.
- int FrameIdx = MFI->CreateFixedObject(SlotSize,
- -(int)SlotSize +
- TFI.getOffsetOfLocalArea() +
- TailCallReturnAddrDelta,
- true);
- assert(FrameIdx == MFI->getObjectIndexBegin() &&
- "Slot for EBP register must be last in order to be found!");
- (void)FrameIdx;
- }
-
// Spill the BasePtr if it's used.
if (RegInfo->hasBasePointer(MF))
MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
@@ -1150,8 +1314,9 @@ void
X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MachineBasicBlock &prologueMBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const X86InstrInfo &TII = *TM.getInstrInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
uint64_t StackSize;
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit();
unsigned TlsReg, TlsOffset;
DebugLoc DL;
@@ -1163,9 +1328,18 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
if (MF.getFunction()->isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
- !STI.isTargetWin32() && !STI.isTargetFreeBSD())
+ !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD())
report_fatal_error("Segmented stacks not supported on this platform.");
+ // Eventually StackSize will be calculated by a link-time pass; which will
+ // also decide whether checking code needs to be injected into this particular
+ // prologue.
+ StackSize = MFI->getStackSize();
+
+ // Do not generate a prologue for functions with a stack of size zero
+ if (StackSize == 0)
+ return;
+
MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -1190,11 +1364,6 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MF.push_front(allocMBB);
MF.push_front(checkMBB);
- // Eventually StackSize will be calculated by a link-time pass; which will
- // also decide whether checking code needs to be injected into this particular
- // prologue.
- StackSize = MFI->getStackSize();
-
// When the frame size is less than 256 we just compare the stack
// boundary directly to the value of the stack pointer, per gcc.
bool CompareStackPointer = StackSize < kSplitStackAvailable;
@@ -1207,6 +1376,9 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
} else if (STI.isTargetDarwin()) {
TlsReg = X86::GS;
TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
+ } else if (STI.isTargetWin64()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x28; // pvArbitrary, reserved for application use
} else if (STI.isTargetFreeBSD()) {
TlsReg = X86::FS;
TlsOffset = 0x18;
@@ -1244,27 +1416,28 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
.addImm(1).addReg(0).addImm(-StackSize).addReg(0);
- if (STI.isTargetLinux() || STI.isTargetWin32()) {
+ if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64()) {
BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else if (STI.isTargetDarwin()) {
- // TlsOffset doesn't fit into a mod r/m byte so we need an extra register
+ // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
unsigned ScratchReg2;
bool SaveScratch2;
if (CompareStackPointer) {
- // The primary scratch register is available for holding the TLS offset
+ // The primary scratch register is available for holding the TLS offset.
ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
SaveScratch2 = false;
} else {
// Need to use a second register to hold the TLS offset
ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
- // Unfortunately, with fastcc the second scratch register may hold an arg
+ // Unfortunately, with fastcc the second scratch register may hold an
+ // argument.
SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
}
- // If Scratch2 is live-in then it needs to be saved
+ // If Scratch2 is live-in then it needs to be saved.
assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
"Scratch register is live-in and not saved");
@@ -1341,18 +1514,21 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
///
/// CheckStack:
-/// temp0 = sp - MaxStack
-/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
+/// temp0 = sp - MaxStack
+/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
/// OldStart:
-/// ...
+/// ...
/// IncStack:
-/// call inc_stack # doubles the stack space
-/// temp0 = sp - MaxStack
-/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
+/// call inc_stack # doubles the stack space
+/// temp0 = sp - MaxStack
+/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
- const X86InstrInfo &TII = *TM.getInstrInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const unsigned SlotSize = TM.getRegisterInfo()->getSlotSize();
+ const unsigned SlotSize =
+ static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo())
+ ->getSlotSize();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
const bool Is64Bit = STI.is64Bit();
DebugLoc DL;
// HiPE-specific values
@@ -1481,12 +1657,14 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
void X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const X86InstrInfo &TII = *TM.getInstrInfo();
- const X86RegisterInfo &RegInfo = *TM.getRegisterInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const X86RegisterInfo &RegInfo =
+ *static_cast<const X86RegisterInfo *>(MF.getTarget().getRegisterInfo());
unsigned StackPtr = RegInfo.getStackRegister();
bool reseveCallFrame = hasReservedCallFrame(MF);
int Opcode = I->getOpcode();
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL = I->getDebugLoc();
uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
@@ -1504,10 +1682,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign =
+ MF.getTarget().getFrameLowering()->getStackAlignment();
Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
- MachineInstr *New = 0;
+ MachineInstr *New = nullptr;
if (Opcode == TII.getCallFrameSetupOpcode()) {
New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
StackPtr)
@@ -1552,7 +1731,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
MachineBasicBlock::iterator B = MBB.begin();
- while (I != B && !llvm::prior(I)->isCall())
+ while (I != B && !std::prev(I)->isCall())
--I;
MBB.insert(I, New);
}
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.h b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
index 3d3b011..5ad3d4d 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
@@ -14,8 +14,6 @@
#ifndef X86_FRAMELOWERING_H
#define X86_FRAMELOWERING_H
-#include "X86Subtarget.h"
-#include "llvm/MC/MCDwarf.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -24,51 +22,51 @@ class MCSymbol;
class X86TargetMachine;
class X86FrameLowering : public TargetFrameLowering {
- const X86TargetMachine &TM;
- const X86Subtarget &STI;
public:
- explicit X86FrameLowering(const X86TargetMachine &tm, const X86Subtarget &sti)
- : TargetFrameLowering(StackGrowsDown,
- sti.getStackAlignment(),
- (sti.is64Bit() ? -8 : -4)),
- TM(tm), STI(sti) {
- }
+ explicit X86FrameLowering(StackDirection D, unsigned StackAl, int LAO)
+ : TargetFrameLowering(StackGrowsDown, StackAl, LAO) {}
- void emitCalleeSavedFrameMoves(MachineFunction &MF, MCSymbol *Label,
- unsigned FramePtr) const;
+ void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void adjustForSegmentedStacks(MachineFunction &MF) const;
+ void adjustForSegmentedStacks(MachineFunction &MF) const override;
- void adjustForHiPEPrologue(MachineFunction &MF) const;
+ void adjustForHiPEPrologue(MachineFunction &MF) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
+
+ bool
+ assignCalleeSavedSpillSlots(MachineFunction &MF,
+ const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
- bool hasFP(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const;
+ unsigned &FrameReg) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 36d1690..ba2f5f6 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-isel"
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
@@ -36,6 +35,8 @@
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
+#define DEBUG_TYPE "x86-isel"
+
STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
//===----------------------------------------------------------------------===//
@@ -70,17 +71,18 @@ namespace {
X86ISelAddressMode()
: BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
- Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
- SymbolFlags(X86II::MO_NO_FLAG) {
+ Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
+ JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {
}
bool hasSymbolicDisplacement() const {
- return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
+ return GV != nullptr || CP != nullptr || ES != nullptr ||
+ JT != -1 || BlockAddr != nullptr;
}
bool hasBaseOrIndexReg() const {
return BaseType == FrameIndexBase ||
- IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
+ IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
}
/// isRIPRelative - Return true if this addressing mode is already RIP
@@ -102,14 +104,14 @@ namespace {
void dump() {
dbgs() << "X86ISelAddressMode " << this << '\n';
dbgs() << "Base_Reg ";
- if (Base_Reg.getNode() != 0)
+ if (Base_Reg.getNode())
Base_Reg.getNode()->dump();
else
dbgs() << "nul";
dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
<< " Scale" << Scale << '\n'
<< "IndexReg ";
- if (IndexReg.getNode() != 0)
+ if (IndexReg.getNode())
IndexReg.getNode()->dump();
else
dbgs() << "nul";
@@ -141,7 +143,7 @@ namespace {
/// ISel - X86 specific code to select X86 machine instructions for
/// SelectionDAG operations.
///
- class X86DAGToDAGISel : public SelectionDAGISel {
+ class X86DAGToDAGISel final : public SelectionDAGISel {
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
@@ -156,15 +158,22 @@ namespace {
Subtarget(&tm.getSubtarget<X86Subtarget>()),
OptForSize(false) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "X86 DAG->DAG Instruction Selection";
}
- virtual void EmitFunctionEntryCode();
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ // Reset the subtarget each time through.
+ Subtarget = &TM.getSubtarget<X86Subtarget>();
+ SelectionDAGISel::runOnMachineFunction(MF);
+ return true;
+ }
+
+ void EmitFunctionEntryCode() override;
- virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
+ bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
- virtual void PreprocessISelDAG();
+ void PreprocessISelDAG() override;
inline bool immSext8(SDNode *N) const {
return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
@@ -181,7 +190,7 @@ namespace {
#include "X86GenDAGISel.inc"
private:
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
SDNode *SelectGather(SDNode *N, unsigned Opc);
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT);
@@ -219,9 +228,9 @@ namespace {
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
@@ -344,7 +353,7 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
// addl %gs:0, %eax
// if the block also has an access to a second TLS address this will save
// a load.
- // FIXME: This is probably also true for non TLS addresses.
+ // FIXME: This is probably also true for non-TLS addresses.
if (Op1.getOpcode() == X86ISD::Wrapper) {
SDValue Val = Op1.getOperand(0);
if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
@@ -374,14 +383,13 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
else
Ops.push_back(Chain.getOperand(i));
SDValue NewChain =
- CurDAG->getNode(ISD::TokenFactor, SDLoc(Load),
- MVT::Other, &Ops[0], Ops.size());
+ CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
Ops.clear();
Ops.push_back(NewChain);
}
for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
Ops.push_back(OrigChain.getOperand(i));
- CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
@@ -390,7 +398,7 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
Ops.push_back(SDValue(Load.getNode(), 1));
for (unsigned i = 1, e = NumOps; i != e; ++i)
Ops.push_back(Call.getOperand(i));
- CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps);
+ CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
}
/// isCalleeLoad - Return true if call address is a load and it can be
@@ -612,7 +620,7 @@ bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
// gs:0 (or fs:0 on X86-64) contains its own address.
// For more information see http://people.redhat.com/drepper/tls.pdf
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
- if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
+ if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
Subtarget->isTargetLinux())
switch (N->getPointerInfo().getAddrSpace()) {
case 256:
@@ -733,7 +741,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
// a smaller encoding and avoids a scaled-index.
if (AM.Scale == 2 &&
AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base_Reg.getNode() == 0) {
+ AM.Base_Reg.getNode() == nullptr) {
AM.Base_Reg = AM.IndexReg;
AM.Scale = 1;
}
@@ -745,8 +753,8 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
Subtarget->is64Bit() &&
AM.Scale == 1 &&
AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base_Reg.getNode() == 0 &&
- AM.IndexReg.getNode() == 0 &&
+ AM.Base_Reg.getNode() == nullptr &&
+ AM.IndexReg.getNode() == nullptr &&
AM.SymbolFlags == X86II::MO_NO_FLAG &&
AM.hasSymbolicDisplacement())
AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
@@ -926,7 +934,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
APInt MaskedHighBits =
APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
+ DAG.computeKnownBits(X, KnownZero, KnownOne);
if (MaskedHighBits != KnownZero) return true;
// We've identified a pattern that can be transformed into a single shift
@@ -1009,7 +1017,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case ISD::FrameIndex:
if (AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base_Reg.getNode() == 0 &&
+ AM.Base_Reg.getNode() == nullptr &&
(!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
AM.BaseType = X86ISelAddressMode::FrameIndexBase;
AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
@@ -1018,7 +1026,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
break;
case ISD::SHL:
- if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
+ if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
break;
if (ConstantSDNode
@@ -1052,7 +1060,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case ISD::SRL: {
// Scale must not be used already.
- if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
+ if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
SDValue And = N.getOperand(0);
if (And.getOpcode() != ISD::AND) break;
@@ -1086,8 +1094,8 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case X86ISD::MUL_IMM:
// X*[3,5,9] -> X+X*[2,4,8]
if (AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base_Reg.getNode() == 0 &&
- AM.IndexReg.getNode() == 0) {
+ AM.Base_Reg.getNode() == nullptr &&
+ AM.IndexReg.getNode() == nullptr) {
if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
@@ -1237,7 +1245,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// with a constant to enable use of the scaled offset field.
// Scale must not be used already.
- if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
+ if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
SDValue Shift = N.getOperand(0);
if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
@@ -1276,7 +1284,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
// Is the base register already occupied?
if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
// If so, check to see if the scale index register is set.
- if (AM.IndexReg.getNode() == 0) {
+ if (!AM.IndexReg.getNode()) {
AM.IndexReg = N;
AM.Scale = 1;
return false;
@@ -1567,7 +1575,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
- return NULL;
+ return nullptr;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
@@ -1756,7 +1764,7 @@ static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
if (Node->hasAnyUseOfValue(0))
- return 0;
+ return nullptr;
SDLoc dl(Node);
@@ -1768,13 +1776,13 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
SDValue Val = Node->getOperand(2);
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
- return 0;
+ return nullptr;
// Which index into the table.
enum AtomicOpc Op;
switch (Node->getOpcode()) {
default:
- return 0;
+ return nullptr;
case ISD::ATOMIC_LOAD_OR:
Op = OR;
break;
@@ -1795,7 +1803,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
unsigned Opc = 0;
switch (NVT.SimpleTy) {
- default: return 0;
+ default: return nullptr;
case MVT::i8:
if (isCN)
Opc = AtomicOpcTbl[Op][ConstantI8];
@@ -1847,7 +1855,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
}
cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
SDValue RetVals[] = { Undef, Ret };
- return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
+ return CurDAG->getMergeValues(RetVals, dl).getNode();
}
/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
@@ -1990,7 +1998,7 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
// Make a new TokenFactor with all the other input chains except
// for the load.
InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
- MVT::Other, &ChainOps[0], ChainOps.size());
+ MVT::Other, ChainOps);
}
if (!ChainCheck)
return false;
@@ -2027,7 +2035,7 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
SDValue VMask = Node->getOperand(5);
ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
if (!Scale)
- return 0;
+ return nullptr;
SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
MVT::Other);
@@ -2058,7 +2066,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
Node->setNodeId(-1);
- return NULL; // Already selected.
+ return nullptr; // Already selected.
}
switch (Opcode) {
@@ -2108,7 +2116,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDNode *RetVal = SelectGather(Node, Opc);
if (RetVal)
// We already called ReplaceUses inside SelectGather.
- return NULL;
+ return nullptr;
break;
}
}
@@ -2118,38 +2126,6 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
return getGlobalBaseReg();
- case X86ISD::ATOMOR64_DAG:
- case X86ISD::ATOMXOR64_DAG:
- case X86ISD::ATOMADD64_DAG:
- case X86ISD::ATOMSUB64_DAG:
- case X86ISD::ATOMNAND64_DAG:
- case X86ISD::ATOMAND64_DAG:
- case X86ISD::ATOMMAX64_DAG:
- case X86ISD::ATOMMIN64_DAG:
- case X86ISD::ATOMUMAX64_DAG:
- case X86ISD::ATOMUMIN64_DAG:
- case X86ISD::ATOMSWAP64_DAG: {
- unsigned Opc;
- switch (Opcode) {
- default: llvm_unreachable("Impossible opcode");
- case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break;
- case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break;
- case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break;
- case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break;
- case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break;
- case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break;
- case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break;
- case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break;
- case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break;
- case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break;
- case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break;
- }
- SDNode *RetVal = SelectAtomic64(Node, Opc);
- if (RetVal)
- return RetVal;
- break;
- }
-
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
@@ -2259,7 +2235,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
- return NULL;
+ return nullptr;
}
case ISD::SMUL_LOHI:
@@ -2386,7 +2362,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the low half of the result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
- if (ResLo.getNode() == 0) {
+ if (!ResLo.getNode()) {
assert(LoReg && "Register for low half is not defined!");
ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
InFlag);
@@ -2397,7 +2373,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the high half of the result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
- if (ResHi.getNode() == 0) {
+ if (!ResHi.getNode()) {
assert(HiReg && "Register for high half is not defined!");
ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
InFlag);
@@ -2407,7 +2383,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
}
- return NULL;
+ return nullptr;
}
case ISD::SDIVREM:
@@ -2575,7 +2551,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
- return NULL;
+ return nullptr;
}
case X86ISD::CMP:
@@ -2632,7 +2608,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return NULL;
+ return nullptr;
}
// For example, "testl %eax, $2048" to "testb %ah, $8".
@@ -2669,7 +2645,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return NULL;
+ return nullptr;
}
// For example, "testl %eax, $32776" to "testw %ax, $32776".
@@ -2691,7 +2667,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return NULL;
+ return nullptr;
}
// For example, "testq %rax, $268468232" to "testl %eax, $268468232".
@@ -2713,7 +2689,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return NULL;
+ return nullptr;
}
}
break;
@@ -2740,7 +2716,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue StoredVal = StoreNode->getOperand(1);
unsigned Opc = StoredVal->getOpcode();
- LoadSDNode *LoadNode = 0;
+ LoadSDNode *LoadNode = nullptr;
SDValue InputChain;
if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
LoadNode, InputChain))
@@ -2772,7 +2748,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDNode *ResNode = SelectCode(Node);
DEBUG(dbgs() << "=> ";
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
Node->dump(CurDAG);
else
ResNode->dump(CurDAG);
@@ -2790,7 +2766,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
case 'v': // not offsetable ??
default: return true;
case 'm': // memory
- if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
+ if (!SelectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
return true;
break;
}
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1f20e29..6fc4c84 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -12,17 +12,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-isel"
#include "X86ISelLowering.h"
#include "Utils/X86ShuffleDecode.h"
-#include "X86.h"
#include "X86CallingConv.h"
#include "X86InstrBuilder.h"
+#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/VariadicFunction.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -31,6 +31,7 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
@@ -39,22 +40,35 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include <bitset>
+#include <numeric>
#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "x86-isel"
+
STATISTIC(NumTailCalls, "Number of tail calls");
+static cl::opt<bool> ExperimentalVectorWideningLegalization(
+ "x86-experimental-vector-widening-legalization", cl::init(false),
+ cl::desc("Enable an experimental vector type legalization through widening "
+ "rather than promotion."),
+ cl::Hidden);
+
+static cl::opt<bool> ExperimentalVectorShuffleLowering(
+ "x86-experimental-vector-shuffle-lowering", cl::init(false),
+ cl::desc("Enable an experimental vector shuffle lowering code path."),
+ cl::Hidden);
+
// Forward declarations.
static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
SDValue V2);
@@ -85,7 +99,8 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
- Vec->op_begin()+NormalizedIdxVal, ElemsPerChunk);
+ makeArrayRef(Vec->op_begin()+NormalizedIdxVal,
+ ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
@@ -176,27 +191,28 @@ static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
}
-static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
- const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
- bool is64Bit = Subtarget->is64Bit();
-
- if (Subtarget->isTargetEnvMacho()) {
- if (is64Bit)
+static TargetLoweringObjectFile *createTLOF(const Triple &TT) {
+ if (TT.isOSBinFormatMachO()) {
+ if (TT.getArch() == Triple::x86_64)
return new X86_64MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO();
}
- if (Subtarget->isTargetLinux())
+ if (TT.isOSLinux())
return new X86LinuxTargetObjectFile();
- if (Subtarget->isTargetELF())
+ if (TT.isOSBinFormatELF())
return new TargetLoweringObjectFileELF();
- if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho())
+ if (TT.isKnownWindowsMSVCEnvironment())
+ return new X86WindowsTargetObjectFile();
+ if (TT.isOSBinFormatCOFF())
return new TargetLoweringObjectFileCOFF();
llvm_unreachable("unknown subtarget type");
}
+// FIXME: This should stop caching the target machine as soon as
+// we can remove resetOperationActions et al.
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
- : TargetLowering(TM, createTLOF(TM)) {
+ : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
@@ -249,7 +265,7 @@ void X86TargetLowering::resetOperationActions() {
addBypassSlowDiv(64, 16);
}
- if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) {
+ if (Subtarget->isTargetKnownWindowsMSVC()) {
// Setup Windows compiler runtime calls.
setLibcallName(RTLIB::SDIV_I64, "_alldiv");
setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
@@ -264,17 +280,17 @@ void X86TargetLowering::resetOperationActions() {
// The _ftol2 runtime function has an unusual calling conv, which
// is modeled by a special pseudo-instruction.
- setLibcallName(RTLIB::FPTOUINT_F64_I64, 0);
- setLibcallName(RTLIB::FPTOUINT_F32_I64, 0);
- setLibcallName(RTLIB::FPTOUINT_F64_I32, 0);
- setLibcallName(RTLIB::FPTOUINT_F32_I32, 0);
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
}
if (Subtarget->isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
setUseUnderscoreLongJmp(false);
- } else if (Subtarget->isTargetMingw()) {
+ } else if (Subtarget->isTargetWindowsGNU()) {
// MS runtime is weird: it exports _setjmp, but longjmp!
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(false);
@@ -441,7 +457,13 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::BR_CC , MVT::i16, Expand);
setOperationAction(ISD::BR_CC , MVT::i32, Expand);
setOperationAction(ISD::BR_CC , MVT::i64, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
if (Subtarget->is64Bit())
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
@@ -495,6 +517,25 @@ void X86TargetLowering::resetOperationActions() {
}
}
+ // Special handling for half-precision floating point conversions.
+ // If we don't have F16C support, then lower half float conversions
+ // into library calls.
+ if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
+ setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
+ }
+
+ // There's never any support for operations beyond MVT::f32.
+ setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
+ setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f80, MVT::f16, Expand);
+
if (Subtarget->hasPOPCNT()) {
setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
} else {
@@ -506,7 +547,9 @@ void X86TargetLowering::resetOperationActions() {
}
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
- setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
+
+ if (!Subtarget->hasMOVBE())
+ setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
@@ -571,34 +614,18 @@ void X86TargetLowering::resetOperationActions() {
// Expand certain atomics
for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
MVT VT = IntVTs[i];
- setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
}
- if (!Subtarget->is64Bit()) {
- setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom);
- }
-
if (Subtarget->hasCmpxchg16b()) {
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
}
// FIXME - use subtarget debug flags
- if (!Subtarget->isTargetDarwin() &&
- !Subtarget->isTargetELF() &&
- !Subtarget->isTargetCygMing()) {
+ if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
+ !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
}
@@ -634,15 +661,8 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- if (Subtarget->isOSWindows() && !Subtarget->isTargetEnvMacho())
- setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
- MVT::i64 : MVT::i32, Custom);
- else if (TM.Options.EnableSegmentedStacks)
- setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
- MVT::i64 : MVT::i32, Custom);
- else
- setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
- MVT::i64 : MVT::i32, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
+ MVT::i64 : MVT::i32, Custom);
if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
// f32 and f64 use SSE.
@@ -831,7 +851,9 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FRINT, VT, Expand);
setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
@@ -862,13 +884,19 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE;
InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
setTruncStoreAction(VT,
(MVT::SimpleValueType)InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, Expand);
+
+ // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like types,
+ // we have to deal with them whether we ask for Expansion or not. Setting
+ // Expand causes its own optimisation problems though, so leave them legal.
+ if (VT.getVectorElementType() == MVT::i1)
+ setLoadExtAction(ISD::EXTLOAD, VT, Expand);
}
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
@@ -943,6 +971,10 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::ADD, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
+ setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
+ setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
+ setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
+ setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
setOperationAction(ISD::SUB, MVT::v16i8, Legal);
setOperationAction(ISD::SUB, MVT::v8i16, Legal);
setOperationAction(ISD::SUB, MVT::v4i32, Legal);
@@ -1033,6 +1065,10 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
+
+ setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
+ setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
+ setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
}
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
@@ -1061,11 +1097,14 @@ void X86TargetLowering::resetOperationActions() {
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
- setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
- setOperationAction(ISD::VSELECT, MVT::v2i64, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
+ // There is no BLENDI for byte vectors. We don't need to custom lower
+ // some vselects for now.
setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
- setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
- setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
// i8 and i16 vectors are custom , because the source register and source
// source memory operand types are not the same width. f32 vectors are
@@ -1108,9 +1147,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SHL, MVT::v4i32, Custom);
setOperationAction(ISD::SRA, MVT::v4i32, Custom);
-
- setOperationAction(ISD::SDIV, MVT::v8i16, Custom);
- setOperationAction(ISD::SDIV, MVT::v4i32, Custom);
}
if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
@@ -1151,9 +1187,12 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
setOperationAction(ISD::FABS, MVT::v4f64, Custom);
- setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
-
+ // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
+ // even though v8i16 is a legal type.
+ setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
+
setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
@@ -1172,8 +1211,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SRA, MVT::v16i16, Custom);
setOperationAction(ISD::SRA, MVT::v32i8, Custom);
- setOperationAction(ISD::SDIV, MVT::v16i16, Custom);
-
setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
@@ -1183,10 +1220,10 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
- setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
- setOperationAction(ISD::VSELECT, MVT::v4i64, Legal);
- setOperationAction(ISD::VSELECT, MVT::v8i32, Legal);
- setOperationAction(ISD::VSELECT, MVT::v8f32, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
@@ -1226,9 +1263,13 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::MUL, MVT::v16i16, Legal);
// Don't lower v32i8 because there is no 128-bit byte mul
- setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
+ setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
+ setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
+ setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
+ setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
- setOperationAction(ISD::SDIV, MVT::v8i32, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
} else {
setOperationAction(ISD::ADD, MVT::v4i64, Custom);
setOperationAction(ISD::ADD, MVT::v8i32, Custom);
@@ -1306,9 +1347,15 @@ void X86TargetLowering::resetOperationActions() {
addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
+ addRegisterClass(MVT::i1, &X86::VK1RegClass);
addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SETCC, MVT::i1, Custom);
+ setOperationAction(ISD::XOR, MVT::i1, Legal);
+ setOperationAction(ISD::OR, MVT::i1, Legal);
+ setOperationAction(ISD::AND, MVT::i1, Legal);
setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, Legal);
setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
@@ -1331,7 +1378,6 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
setOperationAction(ISD::FMA, MVT::v8f64, Legal);
setOperationAction(ISD::FMA, MVT::v16f32, Legal);
- setOperationAction(ISD::SDIV, MVT::v16i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
@@ -1346,17 +1392,20 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
+ setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
- setOperationAction(ISD::TRUNCATE, MVT::i1, Legal);
+ setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
@@ -1370,12 +1419,17 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
@@ -1406,6 +1460,11 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::OR, MVT::v16i32, Legal);
setOperationAction(ISD::XOR, MVT::v16i32, Legal);
+ if (Subtarget->hasCDI()) {
+ setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
+ }
+
// Custom lower several nodes.
for (int i = MVT::FIRST_VECTOR_VALUETYPE;
i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
@@ -1458,6 +1517,8 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+ if (!Subtarget->is64Bit())
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
// Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
// handle type legalization for these operations here.
@@ -1482,9 +1543,9 @@ void X86TargetLowering::resetOperationActions() {
if (!Subtarget->is64Bit()) {
// These libcalls are not available in 32-bit.
- setLibcallName(RTLIB::SHL_I128, 0);
- setLibcallName(RTLIB::SRL_I128, 0);
- setLibcallName(RTLIB::SRA_I128, 0);
+ setLibcallName(RTLIB::SHL_I128, nullptr);
+ setLibcallName(RTLIB::SRL_I128, nullptr);
+ setLibcallName(RTLIB::SRA_I128, nullptr);
}
// Combine sin / cos into one node or libcall if possible.
@@ -1500,6 +1561,15 @@ void X86TargetLowering::resetOperationActions() {
}
}
+ if (Subtarget->isTargetWin64()) {
+ setOperationAction(ISD::SDIV, MVT::i128, Custom);
+ setOperationAction(ISD::UDIV, MVT::i128, Custom);
+ setOperationAction(ISD::SREM, MVT::i128, Custom);
+ setOperationAction(ISD::UREM, MVT::i128, Custom);
+ setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
+ }
+
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
@@ -1524,6 +1594,8 @@ void X86TargetLowering::resetOperationActions() {
setTargetDAGCombine(ISD::TRUNCATE);
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
+ setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
+ setTargetDAGCombine(ISD::BUILD_VECTOR);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
@@ -1546,16 +1618,25 @@ void X86TargetLowering::resetOperationActions() {
setPrefFunctionAlignment(4); // 2^4 bytes.
}
+TargetLoweringBase::LegalizeTypeAction
+X86TargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (ExperimentalVectorWideningLegalization &&
+ VT.getVectorNumElements() != 1 &&
+ VT.getVectorElementType().getSimpleVT() != MVT::i1)
+ return TypeWidenVector;
+
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector())
- return MVT::i8;
+ return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
- const TargetMachine &TM = getTargetMachine();
- if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512())
+ if (Subtarget->hasAVX512())
switch(VT.getVectorNumElements()) {
case 8: return MVT::v8i1;
case 16: return MVT::v16i1;
- }
+ }
return VT.changeVectorElementTypeToInteger();
}
@@ -1661,7 +1742,9 @@ bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
}
bool
-X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
+X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned,
+ bool *Fast) const {
if (Fast)
*Fast = Subtarget->isUnalignedMemAccessFast();
return true;
@@ -1685,7 +1768,7 @@ const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid,MCContext &Ctx) const{
- assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
Subtarget->isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
@@ -1721,7 +1804,7 @@ getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
// FIXME: Why this routine is here? Move to RegInfo!
std::pair<const TargetRegisterClass*, uint8_t>
X86TargetLowering::findRepresentativeClass(MVT VT) const{
- const TargetRegisterClass *RRC = 0;
+ const TargetRegisterClass *RRC = nullptr;
uint8_t Cost = 1;
switch (VT.SimpleTy) {
default:
@@ -1784,13 +1867,13 @@ X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
+ CCState CCInfo(CallConv, isVarArg, MF, MF.getTarget(),
RVLocs, Context);
return CCInfo.CheckReturn(Outs, RetCC_X86);
}
-const uint16_t *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
- static const uint16_t ScratchRegs[] = { X86::R11, 0 };
+const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
+ static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
return ScratchRegs;
}
@@ -1804,7 +1887,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
+ CCState CCInfo(CallConv, isVarArg, MF, DAG.getTarget(),
RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
@@ -1832,6 +1915,9 @@ X86TargetLowering::LowerReturn(SDValue Chain,
else if (VA.getLocInfo() == CCValAssign::BCvt)
ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
+ assert(VA.getLocInfo() != CCValAssign::FPExt &&
+ "Unexpected FP-extend for return value.");
+
// If this is x86-64, and we disabled SSE, we can't return FP values,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
@@ -1886,7 +1972,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
- (Subtarget->is64Bit() || Subtarget->isTargetWindows())) {
+ (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg();
@@ -1910,8 +1996,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(X86ISD::RET_FLAG, dl,
- MVT::Other, &RetOps[0], RetOps.size());
+ return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
}
bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
@@ -1974,7 +2059,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
bool Is64Bit = Subtarget->is64Bit();
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ DAG.getTarget(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
@@ -2124,8 +2209,8 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
- bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv,
- getTargetMachine().Options.GuaranteedTailCallOpt);
+ bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
+ CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
EVT ValVT;
@@ -2175,7 +2260,6 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
MachineFrameInfo *MFI = MF.getFrameInfo();
bool Is64Bit = Subtarget->is64Bit();
- bool IsWindows = Subtarget->isTargetWindows();
bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
@@ -2183,7 +2267,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
+ CCState CCInfo(CallConv, isVarArg, MF, DAG.getTarget(),
ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
@@ -2222,6 +2306,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
RC = &X86::VR128RegClass;
else if (RegVT == MVT::x86mmx)
RC = &X86::VR64RegClass;
+ else if (RegVT == MVT::i1)
+ RC = &X86::VK1RegClass;
else if (RegVT == MVT::v8i1)
RC = &X86::VK8RegClass;
else if (RegVT == MVT::v16i1)
@@ -2264,22 +2350,25 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
InVals.push_back(ArgValue);
}
- // The x86-64 ABIs require that for returning structs by value we copy
- // the sret argument into %rax/%eax (depending on ABI) for the return.
- // Win32 requires us to put the sret argument to %eax as well.
- // Save the argument into a virtual register so that we can access it
- // from the return points.
- if (MF.getFunction()->hasStructRetAttr() &&
- (Subtarget->is64Bit() || Subtarget->isTargetWindows())) {
- X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- unsigned Reg = FuncInfo->getSRetReturnReg();
- if (!Reg) {
- MVT PtrTy = getPointerTy();
- Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
- FuncInfo->setSRetReturnReg(Reg);
+ if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ // The x86-64 ABIs require that for returning structs by value we copy
+ // the sret argument into %rax/%eax (depending on ABI) for the return.
+ // Win32 requires us to put the sret argument to %eax as well.
+ // Save the argument into a virtual register so that we can access it
+ // from the return points.
+ if (Ins[i].Flags.isSRet()) {
+ unsigned Reg = FuncInfo->getSRetReturnReg();
+ if (!Reg) {
+ MVT PtrTy = getPointerTy();
+ Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
+ FuncInfo->setSRetReturnReg(Reg);
+ }
+ SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
+ break;
+ }
}
- SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
unsigned StackSize = CCInfo.getNextStackOffset();
@@ -2299,17 +2388,17 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
// FIXME: We should really autogenerate these arrays
- static const uint16_t GPR64ArgRegsWin64[] = {
+ static const MCPhysReg GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
- static const uint16_t GPR64ArgRegs64Bit[] = {
+ static const MCPhysReg GPR64ArgRegs64Bit[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
- static const uint16_t XMMArgRegs64Bit[] = {
+ static const MCPhysReg XMMArgRegs64Bit[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
- const uint16_t *GPR64ArgRegs;
+ const MCPhysReg *GPR64ArgRegs;
unsigned NumXMMRegs = 0;
if (IsWin64) {
@@ -2342,7 +2431,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
TotalNumXMMRegs = 0;
if (IsWin64) {
- const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering();
+ const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
// Get to the caller-allocated home save location. Add 8 to account
// for the return address.
int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
@@ -2403,13 +2492,11 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SaveXMMOps.push_back(Val);
}
MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
- MVT::Other,
- &SaveXMMOps[0], SaveXMMOps.size()));
+ MVT::Other, SaveXMMOps));
}
if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOps[0], MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
}
}
@@ -2420,7 +2507,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
} else {
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
- if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows &&
+ if (!Is64Bit && !IsTailCallConvention(CallConv) &&
+ !Subtarget->getTargetTriple().isOSMSVCRT() &&
argsAreStructReturn(Ins) == StackStructReturn)
FuncInfo->setBytesToPopOnReturn(4);
}
@@ -2475,10 +2563,10 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call
/// optimization is performed and it is required (FPDiff!=0).
-static SDValue
-EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
- SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT,
- unsigned SlotSize, int FPDiff, SDLoc dl) {
+static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
+ SDValue Chain, SDValue RetAddrFrIdx,
+ EVT PtrVT, unsigned SlotSize,
+ int FPDiff, SDLoc dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
@@ -2509,14 +2597,19 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
- bool IsWindows = Subtarget->isTargetWindows();
StructReturnType SR = callIsStructReturn(Outs);
bool IsSibcall = false;
if (MF.getTarget().Options.DisableTailCalls)
isTailCall = false;
- if (isTailCall) {
+ bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
+ if (IsMustTail) {
+ // Force this to be a tail call. The verifier rules are enough to ensure
+ // that we can lower this successfully without moving the return address
+ // around.
+ isTailCall = true;
+ } else if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
@@ -2537,7 +2630,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
+ CCState CCInfo(CallConv, isVarArg, MF, MF.getTarget(),
ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
@@ -2552,12 +2645,12 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// This is a sibcall. The memory operands are available in caller's
// own caller's stack.
NumBytes = 0;
- else if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
IsTailCallConvention(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
- if (isTailCall && !IsSibcall) {
+ if (isTailCall && !IsSibcall && !IsMustTail) {
// Lower arguments at fp - stackoffset + fpdiff.
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
@@ -2570,9 +2663,21 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
X86Info->setTCReturnAddrDelta(FPDiff);
}
+ unsigned NumBytesToPush = NumBytes;
+ unsigned NumBytesToPop = NumBytes;
+
+ // If we have an inalloca argument, all stack space has already been allocated
+ // for us and be right at the top of the stack. We don't support multiple
+ // arguments passed in memory when using inalloca.
+ if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
+ NumBytesToPush = 0;
+ assert(ArgLocs.back().getLocMemOffset() == 0 &&
+ "an inalloca argument must be the only memory argument");
+ }
+
if (!IsSibcall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
- dl);
+ Chain = DAG.getCALLSEQ_START(
+ Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
SDValue RetAddrFrIdx;
// Load return address for tail calls.
@@ -2587,12 +2692,16 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ // Skip inalloca arguments, they have already been written.
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ if (Flags.isInAlloca())
+ continue;
+
CCValAssign &VA = ArgLocs[i];
EVT RegVT = VA.getLocVT();
SDValue Arg = OutVals[i];
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
bool isByVal = Flags.isByVal();
// Promote the value if needed.
@@ -2646,7 +2755,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
} else if (!IsSibcall && (!isTailCall || isByVal)) {
assert(VA.isMemLoc());
- if (StackPtr.getNode() == 0)
+ if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
@@ -2655,8 +2764,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (Subtarget->isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
@@ -2693,7 +2801,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// registers used and is in the range 0 - 8 inclusive.
// Count the number of XMM registers allocated.
- static const uint16_t XMMArgRegs[] = {
+ static const MCPhysReg XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
@@ -2705,8 +2813,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getConstant(NumXMMRegs, MVT::i8)));
}
- // For tail calls lower the arguments to the 'real' stack slot.
- if (isTailCall) {
+ // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
+ // don't need this because the eligibility check rejects calls that require
+ // shuffling arguments passed in memory.
+ if (!IsSibcall && isTailCall) {
// Force all the incoming stack arguments to be loaded from the stack
// before any new outgoing arguments are stored to the stack, because the
// outgoing stack slots may alias the incoming argument stack slots, and
@@ -2718,45 +2828,45 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<SDValue, 8> MemOpChains2;
SDValue FIN;
int FI = 0;
- if (getTargetMachine().Options.GuaranteedTailCallOpt) {
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- if (VA.isRegLoc())
- continue;
- assert(VA.isMemLoc());
- SDValue Arg = OutVals[i];
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- // Create frame index.
- int32_t Offset = VA.getLocMemOffset()+FPDiff;
- uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
- FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
- FIN = DAG.getFrameIndex(FI, getPointerTy());
-
- if (Flags.isByVal()) {
- // Copy relative to framepointer.
- SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
- if (StackPtr.getNode() == 0)
- StackPtr = DAG.getCopyFromReg(Chain, dl,
- RegInfo->getStackRegister(),
- getPointerTy());
- Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
-
- MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
- ArgChain,
- Flags, DAG, dl));
- } else {
- // Store relative to framepointer.
- MemOpChains2.push_back(
- DAG.getStore(ArgChain, dl, Arg, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, 0));
- }
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isRegLoc())
+ continue;
+ assert(VA.isMemLoc());
+ SDValue Arg = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ // Skip inalloca arguments. They don't require any work.
+ if (Flags.isInAlloca())
+ continue;
+ // Create frame index.
+ int32_t Offset = VA.getLocMemOffset()+FPDiff;
+ uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
+ FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
+ FIN = DAG.getFrameIndex(FI, getPointerTy());
+
+ if (Flags.isByVal()) {
+ // Copy relative to framepointer.
+ SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, dl,
+ RegInfo->getStackRegister(),
+ getPointerTy());
+ Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
+
+ MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
+ ArgChain,
+ Flags, DAG, dl));
+ } else {
+ // Store relative to framepointer.
+ MemOpChains2.push_back(
+ DAG.getStore(ArgChain, dl, Arg, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, 0));
}
}
if (!MemOpChains2.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains2[0], MemOpChains2.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
// Store the return address to the appropriate stack slot.
Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
@@ -2773,7 +2883,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InFlag = Chain.getValue(1);
}
- if (getTargetMachine().getCodeModel() == CodeModel::Large) {
+ if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
// In the 64-bit large code model, we have to make all calls
// through a register, since the call instruction's 32-bit
@@ -2787,7 +2897,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
const GlobalValue *GV = G->getGlobal();
- if (!GV->hasDLLImportLinkage()) {
+ if (!GV->hasDLLImportStorageClass()) {
unsigned char OpFlags = 0;
bool ExtraLoad = false;
unsigned WrapperKind = ISD::DELETED_NODE;
@@ -2797,7 +2907,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// has hidden or protected visibility, or if it is static or local, then
// we don't need to use the PLT - we can directly call it.
if (Subtarget->isTargetELF() &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
OpFlags = X86II::MO_PLT;
} else if (Subtarget->isPICStyleStubAny() &&
@@ -2839,7 +2949,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// On ELF targets, in either X86-64 or X86-32 mode, direct calls to
// external symbols should go through the PLT.
if (Subtarget->isTargetELF() &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_) {
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
OpFlags = X86II::MO_PLT;
} else if (Subtarget->isPICStyleStubAny() &&
(!Subtarget->getTargetTriple().isMacOSX() ||
@@ -2859,8 +2969,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<SDValue, 8> Ops;
if (!IsSibcall && isTailCall) {
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(NumBytesToPop, true),
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -2877,7 +2988,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
@@ -2892,32 +3003,33 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// This isn't right, although it's probably harmless on x86; liveouts
// should be computed from returns not tail calls. Consider a void
// function making a tail call to a function returning int.
- return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+ return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
}
- Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
- unsigned NumBytesForCalleeToPush;
+ unsigned NumBytesForCalleeToPop;
if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
- getTargetMachine().Options.GuaranteedTailCallOpt))
- NumBytesForCalleeToPush = NumBytes; // Callee pops everything
- else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows &&
+ DAG.getTarget().Options.GuaranteedTailCallOpt))
+ NumBytesForCalleeToPop = NumBytes; // Callee pops everything
+ else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
+ !Subtarget->getTargetTriple().isOSMSVCRT() &&
SR == StackStructReturn)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
// For MSVC Win32 targets, the caller pops the hidden struct pointer.
- NumBytesForCalleeToPush = 4;
+ NumBytesForCalleeToPop = 4;
else
- NumBytesForCalleeToPush = 0; // Callee pops nothing.
+ NumBytesForCalleeToPop = 0; // Callee pops nothing.
// Returns a flag for retval copy to use.
if (!IsSibcall) {
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(NumBytesForCalleeToPush,
+ DAG.getIntPtrConstant(NumBytesToPop, true),
+ DAG.getIntPtrConstant(NumBytesForCalleeToPop,
true),
InFlag, dl);
InFlag = Chain.getValue(1);
@@ -2947,7 +3059,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// If a tail called function callee has more arguments than the caller the
// caller needs to make sure that there is room to move the RETADDR to. This is
// achieved by reserving an area the size of the argument delta right after the
-// original REtADDR, but before the saved framepointer or the spilled registers
+// original RETADDR, but before the saved framepointer or the spilled registers
// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
// stack layout:
// arg1
@@ -3071,7 +3183,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
- if (getTargetMachine().Options.GuaranteedTailCallOpt) {
+ if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
if (IsTailCallConvention(CalleeCC) && CCMatch)
return true;
return false;
@@ -3083,7 +3195,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
if (RegInfo->needsStackRealignment(MF))
return false;
@@ -3092,9 +3204,13 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (isCalleeStructRet || isCallerStructRet)
return false;
- // An stdcall caller is expected to clean up its arguments; the callee
- // isn't going to do that.
- if (!CCMatch && CallerCC == CallingConv::X86_StdCall)
+ // An stdcall/thiscall caller is expected to clean up its arguments; the
+ // callee isn't going to do that.
+ // FIXME: this is more restrictive than needed. We could produce a tailcall
+ // when the stack adjustment matches. For example, with a thiscall that takes
+ // only one argument.
+ if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
+ CallerCC == CallingConv::X86_ThisCall))
return false;
// Do not sibcall optimize vararg calls unless all arguments are passed via
@@ -3108,7 +3224,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ DAG.getTarget(), ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
@@ -3129,7 +3245,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ DAG.getTarget(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
@@ -3143,12 +3259,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (!CCMatch) {
SmallVector<CCValAssign, 16> RVLocs1;
CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs1, *DAG.getContext());
+ DAG.getTarget(), RVLocs1, *DAG.getContext());
CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
SmallVector<CCValAssign, 16> RVLocs2;
CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs2, *DAG.getContext());
+ DAG.getTarget(), RVLocs2, *DAG.getContext());
CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
if (RVLocs1.size() != RVLocs2.size())
@@ -3175,7 +3291,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ DAG.getTarget(), ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64
if (IsCalleeWin64)
@@ -3192,7 +3308,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
const X86InstrInfo *TII =
- ((const X86TargetMachine&)getTargetMachine()).getInstrInfo();
+ static_cast<const X86InstrInfo *>(DAG.getTarget().getInstrInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
@@ -3215,12 +3331,12 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (!Subtarget->is64Bit() &&
((!isa<GlobalAddressSDNode>(Callee) &&
!isa<ExternalSymbolSDNode>(Callee)) ||
- getTargetMachine().getRelocationModel() == Reloc::PIC_)) {
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
unsigned NumInRegs = 0;
// In PIC we need an extra register to formulate the address computation
// for the callee.
unsigned MaxInRegs =
- (getTargetMachine().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
+ (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -3344,7 +3460,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
@@ -3415,6 +3531,24 @@ bool X86::isCalleePop(CallingConv::ID CallingConv,
}
}
+/// \brief Return true if the condition is an unsigned comparison operation.
+static bool isX86CCUnsigned(unsigned X86CC) {
+ switch (X86CC) {
+ default: llvm_unreachable("Invalid integer condition!");
+ case X86::COND_E: return true;
+ case X86::COND_G: return false;
+ case X86::COND_GE: return false;
+ case X86::COND_L: return false;
+ case X86::COND_LE: return false;
+ case X86::COND_NE: return true;
+ case X86::COND_B: return true;
+ case X86::COND_A: return true;
+ case X86::COND_BE: return true;
+ case X86::COND_AE: return true;
+ }
+ llvm_unreachable("covered switch fell through?!");
+}
+
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
/// specific condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
@@ -3533,6 +3667,18 @@ bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
}
+/// \brief Returns true if it is beneficial to convert a load of a constant
+/// to just the constant itself.
+bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0 || BitSize > 64)
+ return false;
+ return true;
+}
+
/// isUndefOrInRange - Return true if Val is undef or if its value falls within
/// the specified range (L, H].
static bool isUndefOrInRange(int Val, int Low, int Hi) {
@@ -3854,6 +4000,37 @@ static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
return true;
}
+/// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a shuffle of elements that is suitable for input to INSERTPS.
+/// i. e: If all but one element come from the same vector.
+static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
+ // TODO: Deal with AVX's VINSERTPS
+ if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
+ return false;
+
+ unsigned CorrectPosV1 = 0;
+ unsigned CorrectPosV2 = 0;
+ for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
+ if (Mask[i] == -1) {
+ ++CorrectPosV1;
+ ++CorrectPosV2;
+ continue;
+ }
+
+ if (Mask[i] == i)
+ ++CorrectPosV1;
+ else if (Mask[i] == i + 4)
+ ++CorrectPosV2;
+ }
+
+ if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
+ // We have 3 elements (undefs count as elements from any vector) from one
+ // vector, and one from another.
+ return true;
+
+ return false;
+}
+
//
// Some special combinations that can be optimized.
//
@@ -4073,6 +4250,29 @@ static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
return true;
}
+// Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
+// (src1[0], src0[1]), manipulation with 256-bit sub-vectors
+static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
+ if (!VT.is512BitVector())
+ return false;
+
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned HalfSize = NumElts/2;
+ if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
+ if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
+ *Imm = 1;
+ return true;
+ }
+ }
+ if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
+ if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
+ *Imm = 0;
+ return true;
+ }
+ }
+ return false;
+}
+
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSS,
/// MOVSD, and MOVD, i.e. setting the lowest element.
@@ -4208,7 +4408,7 @@ static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned LaneSize = NumElts/NumLanes;
// 2 or 4 elements in one lane
-
+
SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
for (unsigned l = 0; l != NumElts; l += LaneSize) {
for (unsigned i = 0; i != LaneSize; ++i) {
@@ -4551,38 +4751,22 @@ unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
return getInsertVINSERTImmediate(N, 256);
}
+/// isZero - Returns true if Elt is a constant integer zero
+static bool isZero(SDValue V) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
+ return C && C->isNullValue();
+}
+
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
/// constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Elt))
- return CN->isNullValue();
+ if (isZero(Elt))
+ return true;
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
return CFP->getValueAPF().isPosZero();
return false;
}
-/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in
-/// their permute mask.
-static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG) {
- MVT VT = SVOp->getSimpleValueType(0);
- unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> MaskVec;
-
- for (unsigned i = 0; i != NumElems; ++i) {
- int Idx = SVOp->getMaskElt(i);
- if (Idx >= 0) {
- if (Idx < (int)NumElems)
- Idx += NumElems;
- else
- Idx -= NumElems;
- }
- MaskVec.push_back(Idx);
- }
- return DAG.getVectorShuffle(VT, SDLoc(SVOp), SVOp->getOperand(1),
- SVOp->getOperand(0), &MaskVec[0]);
-}
-
/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
/// match movhlps. The lower half elements should come from upper half of
/// V1 (and in order), and the upper half elements should come from the upper
@@ -4604,7 +4788,7 @@ static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
/// isScalarLoadToVector - Returns true if the node is a scalar load that
/// is promoted to a vector. It also returns the LoadSDNode by reference if
/// required.
-static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) {
+static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
return false;
N = N->getOperand(0).getNode();
@@ -4668,19 +4852,6 @@ static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
return true;
}
-/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
-/// all the same.
-static bool isSplatVector(SDNode *N) {
- if (N->getOpcode() != ISD::BUILD_VECTOR)
- return false;
-
- SDValue SplatValue = N->getOperand(0);
- for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
- if (N->getOperand(i) != SplatValue)
- return false;
- return true;
-}
-
/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
/// to an zero vector.
/// FIXME: move to dag combiner / method on ShuffleVectorSDNode
@@ -4730,21 +4901,24 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
if (Subtarget->hasInt256()) { // AVX2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops,
- array_lengthof(Ops));
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
} else {
// 256-bit logic and arithmetic instructions in AVX are all
// floating-point, no support for integer ops. Emit fp zeroed vectors.
SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops,
- array_lengthof(Ops));
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
}
} else if (VT.is512BitVector()) { // AVX-512
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops, 16);
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
+ } else if (VT.getScalarType() == MVT::i1) {
+ assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
+ SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
+ SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
} else
llvm_unreachable("Unexpected vector type");
@@ -4764,8 +4938,7 @@ static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
if (VT.is256BitVector()) {
if (HasInt256) { // AVX2
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops,
- array_lengthof(Ops));
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
} else { // AVX
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
@@ -5227,7 +5400,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
return SDValue();
SDLoc dl(Op);
- SDValue V(0, 0);
+ SDValue V;
bool First = true;
for (unsigned i = 0; i < 16; ++i) {
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
@@ -5240,7 +5413,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
}
if ((i & 1) != 0) {
- SDValue ThisElt(0, 0), LastElt(0, 0);
+ SDValue ThisElt, LastElt;
bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
if (LastIsNonZero) {
LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
@@ -5275,7 +5448,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
return SDValue();
SDLoc dl(Op);
- SDValue V(0, 0);
+ SDValue V;
bool First = true;
for (unsigned i = 0; i < 8; ++i) {
bool isNonZero = (NonZeros & (1 << i)) != 0;
@@ -5296,6 +5469,79 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
return V;
}
+/// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
+static SDValue LowerBuildVectorv4x32(SDValue Op, unsigned NumElems,
+ unsigned NonZeros, unsigned NumNonZero,
+ unsigned NumZero, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget,
+ const TargetLowering &TLI) {
+ // We know there's at least one non-zero element
+ unsigned FirstNonZeroIdx = 0;
+ SDValue FirstNonZero = Op->getOperand(FirstNonZeroIdx);
+ while (FirstNonZero.getOpcode() == ISD::UNDEF ||
+ X86::isZeroNode(FirstNonZero)) {
+ ++FirstNonZeroIdx;
+ FirstNonZero = Op->getOperand(FirstNonZeroIdx);
+ }
+
+ if (FirstNonZero.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ !isa<ConstantSDNode>(FirstNonZero.getOperand(1)))
+ return SDValue();
+
+ SDValue V = FirstNonZero.getOperand(0);
+ MVT VVT = V.getSimpleValueType();
+ if (!Subtarget->hasSSE41() || (VVT != MVT::v4f32 && VVT != MVT::v4i32))
+ return SDValue();
+
+ unsigned FirstNonZeroDst =
+ cast<ConstantSDNode>(FirstNonZero.getOperand(1))->getZExtValue();
+ unsigned CorrectIdx = FirstNonZeroDst == FirstNonZeroIdx;
+ unsigned IncorrectIdx = CorrectIdx ? -1U : FirstNonZeroIdx;
+ unsigned IncorrectDst = CorrectIdx ? -1U : FirstNonZeroDst;
+
+ for (unsigned Idx = FirstNonZeroIdx + 1; Idx < NumElems; ++Idx) {
+ SDValue Elem = Op.getOperand(Idx);
+ if (Elem.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elem))
+ continue;
+
+ // TODO: What else can be here? Deal with it.
+ if (Elem.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ // TODO: Some optimizations are still possible here
+ // ex: Getting one element from a vector, and the rest from another.
+ if (Elem.getOperand(0) != V)
+ return SDValue();
+
+ unsigned Dst = cast<ConstantSDNode>(Elem.getOperand(1))->getZExtValue();
+ if (Dst == Idx)
+ ++CorrectIdx;
+ else if (IncorrectIdx == -1U) {
+ IncorrectIdx = Idx;
+ IncorrectDst = Dst;
+ } else
+ // There was already one element with an incorrect index.
+ // We can't optimize this case to an insertps.
+ return SDValue();
+ }
+
+ if (NumNonZero == CorrectIdx || NumNonZero == CorrectIdx + 1) {
+ SDLoc dl(Op);
+ EVT VT = Op.getSimpleValueType();
+ unsigned ElementMoveMask = 0;
+ if (IncorrectIdx == -1U)
+ ElementMoveMask = FirstNonZeroIdx << 6 | FirstNonZeroIdx << 4;
+ else
+ ElementMoveMask = IncorrectDst << 6 | IncorrectIdx << 4;
+
+ SDValue InsertpsMask =
+ DAG.getIntPtrConstant(ElementMoveMask | (~NonZeros & 0xf));
+ return DAG.getNode(X86ISD::INSERTPS, dl, VT, V, V, InsertpsMask);
+ }
+
+ return SDValue();
+}
+
/// getVShift - Return a vector logical shift node.
///
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
@@ -5400,7 +5646,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
EVT EltVT = VT.getVectorElementType();
unsigned NumElems = Elts.size();
- LoadSDNode *LDBase = NULL;
+ LoadSDNode *LDBase = nullptr;
unsigned LastLoadedElt = -1U;
// For each element in the initializer, see if we've found a load or an undef.
@@ -5465,8 +5711,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
SDValue ResNode =
- DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
- array_lengthof(Ops), MVT::i64,
+ DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
LDBase->getPointerInfo(),
LDBase->getAlignment(),
false/*isVolatile*/, true/*ReadMem*/,
@@ -5515,18 +5760,22 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
return SDValue();
case ISD::BUILD_VECTOR: {
- // The BUILD_VECTOR node must be a splat.
- if (!isSplatVector(Op.getNode()))
+ auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
+ BitVector UndefElements;
+ SDValue Splat = BVOp->getSplatValue(&UndefElements);
+
+ // We need a splat of a single value to use broadcast, and it doesn't
+ // make any sense if the value is only in one element of the vector.
+ if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
return SDValue();
- Ld = Op.getOperand(0);
+ Ld = Splat;
ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
- Ld.getOpcode() == ISD::ConstantFP);
+ Ld.getOpcode() == ISD::ConstantFP);
- // The suspected load node has several users. Make sure that all
- // of its users are from the BUILD_VECTOR node.
- // Constants may have multiple users.
- if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0))
+ // Make sure that all of the users of a non-constant load are from the
+ // BUILD_VECTOR node.
+ if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
return SDValue();
break;
}
@@ -5581,7 +5830,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
unsigned ScalarSize = CVT.getSizeInBits();
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) {
- const Constant *C = 0;
+ const Constant *C = nullptr;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
@@ -5626,6 +5875,41 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
return SDValue();
}
+/// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
+/// underlying vector and index.
+///
+/// Modifies \p ExtractedFromVec to the real vector and returns the real
+/// index.
+static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
+ SDValue ExtIdx) {
+ int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
+ if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
+ return Idx;
+
+ // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
+ // lowered this:
+ // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
+ // to:
+ // (extract_vector_elt (vector_shuffle<2,u,u,u>
+ // (extract_subvector (v8f32 %vreg0), Constant<4>),
+ // undef)
+ // Constant<0>)
+ // In this case the vector is the extract_subvector expression and the index
+ // is 2, as specified by the shuffle.
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
+ SDValue ShuffleVec = SVOp->getOperand(0);
+ MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
+ assert(ShuffleVecVT.getVectorElementType() ==
+ ExtractedFromVec.getSimpleValueType().getVectorElementType());
+
+ int ShuffleIdx = SVOp->getMaskElt(Idx);
+ if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
+ ExtractedFromVec = ShuffleVec;
+ return ShuffleIdx;
+ }
+ return Idx;
+}
+
static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -5659,34 +5943,32 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
SDValue ExtIdx = Op.getOperand(i).getOperand(1);
+ // Quit if non-constant index.
+ if (!isa<ConstantSDNode>(ExtIdx))
+ return SDValue();
+ int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
// Quit if extracted from vector of different type.
if (ExtractedFromVec.getValueType() != VT)
return SDValue();
- // Quit if non-constant index.
- if (!isa<ConstantSDNode>(ExtIdx))
- return SDValue();
-
- if (VecIn1.getNode() == 0)
+ if (!VecIn1.getNode())
VecIn1 = ExtractedFromVec;
else if (VecIn1 != ExtractedFromVec) {
- if (VecIn2.getNode() == 0)
+ if (!VecIn2.getNode())
VecIn2 = ExtractedFromVec;
else if (VecIn2 != ExtractedFromVec)
// Quit if more than 2 vectors to shuffle
return SDValue();
}
- unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
-
if (ExtractedFromVec == VecIn1)
Mask[i] = Idx;
else if (ExtractedFromVec == VecIn2)
Mask[i] = Idx + NumElems;
}
- if (VecIn1.getNode() == 0)
+ if (!VecIn1.getNode())
return SDValue();
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
@@ -5711,32 +5993,38 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
- Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT,
- Ops, VT.getVectorNumElements());
+ SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
if (ISD::isBuildVectorAllOnes(Op.getNode())) {
SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
- Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT,
- Ops, VT.getVectorNumElements());
+ SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
bool AllContants = true;
uint64_t Immediate = 0;
+ int NonConstIdx = -1;
+ bool IsSplat = true;
+ unsigned NumNonConsts = 0;
+ unsigned NumConsts = 0;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
if (In.getOpcode() == ISD::UNDEF)
continue;
if (!isa<ConstantSDNode>(In)) {
AllContants = false;
- break;
+ NonConstIdx = idx;
+ NumNonConsts++;
}
- if (cast<ConstantSDNode>(In)->getZExtValue())
+ else {
+ NumConsts++;
+ if (cast<ConstantSDNode>(In)->getZExtValue())
Immediate |= (1ULL << idx);
+ }
+ if (In != Op.getOperand(0))
+ IsSplat = false;
}
if (AllContants) {
@@ -5746,63 +6034,459 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(0));
}
- // Splat vector (with undefs)
- SDValue In = Op.getOperand(0);
- for (unsigned i = 1, e = Op.getNumOperands(); i != e; ++i) {
- if (Op.getOperand(i) != In && Op.getOperand(i).getOpcode() != ISD::UNDEF)
- llvm_unreachable("Unsupported predicate operation");
- }
-
- SDValue EFLAGS, X86CC;
- if (In.getOpcode() == ISD::SETCC) {
- SDValue Op0 = In.getOperand(0);
- SDValue Op1 = In.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(In.getOperand(2))->get();
- bool isFP = Op1.getValueType().isFloatingPoint();
- unsigned X86CCVal = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
-
- assert(X86CCVal != X86::COND_INVALID && "Unsupported predicate operation");
-
- X86CC = DAG.getConstant(X86CCVal, MVT::i8);
- EFLAGS = EmitCmp(Op0, Op1, X86CCVal, DAG);
- EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
- } else if (In.getOpcode() == X86ISD::SETCC) {
- X86CC = In.getOperand(0);
- EFLAGS = In.getOperand(1);
+ if (NumNonConsts == 1 && NonConstIdx != 0) {
+ SDValue DstVec;
+ if (NumConsts) {
+ SDValue VecAsImm = DAG.getConstant(Immediate,
+ MVT::getIntegerVT(VT.getSizeInBits()));
+ DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
+ }
+ else
+ DstVec = DAG.getUNDEF(VT);
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
+ Op.getOperand(NonConstIdx),
+ DAG.getIntPtrConstant(NonConstIdx));
+ }
+ if (!IsSplat && (NonConstIdx != 0))
+ llvm_unreachable("Unsupported BUILD_VECTOR operation");
+ MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
+ SDValue Select;
+ if (IsSplat)
+ Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
+ DAG.getConstant(-1, SelectVT),
+ DAG.getConstant(0, SelectVT));
+ else
+ Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
+ DAG.getConstant((Immediate | 1), SelectVT),
+ DAG.getConstant(Immediate, SelectVT));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Select);
+}
+
+/// \brief Return true if \p N implements a horizontal binop and return the
+/// operands for the horizontal binop into V0 and V1.
+///
+/// This is a helper function of PerformBUILD_VECTORCombine.
+/// This function checks that the build_vector \p N in input implements a
+/// horizontal operation. Parameter \p Opcode defines the kind of horizontal
+/// operation to match.
+/// For example, if \p Opcode is equal to ISD::ADD, then this function
+/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
+/// is equal to ISD::SUB, then this function checks if this is a horizontal
+/// arithmetic sub.
+///
+/// This function only analyzes elements of \p N whose indices are
+/// in range [BaseIdx, LastIdx).
+static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
+ SelectionDAG &DAG,
+ unsigned BaseIdx, unsigned LastIdx,
+ SDValue &V0, SDValue &V1) {
+ EVT VT = N->getValueType(0);
+
+ assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
+ assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
+ "Invalid Vector in input!");
+
+ bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
+ bool CanFold = true;
+ unsigned ExpectedVExtractIdx = BaseIdx;
+ unsigned NumElts = LastIdx - BaseIdx;
+ V0 = DAG.getUNDEF(VT);
+ V1 = DAG.getUNDEF(VT);
+
+ // Check if N implements a horizontal binop.
+ for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
+ SDValue Op = N->getOperand(i + BaseIdx);
+
+ // Skip UNDEFs.
+ if (Op->getOpcode() == ISD::UNDEF) {
+ // Update the expected vector extract index.
+ if (i * 2 == NumElts)
+ ExpectedVExtractIdx = BaseIdx;
+ ExpectedVExtractIdx += 2;
+ continue;
+ }
+
+ CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
+
+ if (!CanFold)
+ break;
+
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+
+ // Try to match the following pattern:
+ // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
+ CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ Op0.getOperand(0) == Op1.getOperand(0) &&
+ isa<ConstantSDNode>(Op0.getOperand(1)) &&
+ isa<ConstantSDNode>(Op1.getOperand(1)));
+ if (!CanFold)
+ break;
+
+ unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
+ unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
+
+ if (i * 2 < NumElts) {
+ if (V0.getOpcode() == ISD::UNDEF)
+ V0 = Op0.getOperand(0);
+ } else {
+ if (V1.getOpcode() == ISD::UNDEF)
+ V1 = Op0.getOperand(0);
+ if (i * 2 == NumElts)
+ ExpectedVExtractIdx = BaseIdx;
+ }
+
+ SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
+ if (I0 == ExpectedVExtractIdx)
+ CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
+ else if (IsCommutable && I1 == ExpectedVExtractIdx) {
+ // Try to match the following dag sequence:
+ // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
+ CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
+ } else
+ CanFold = false;
+
+ ExpectedVExtractIdx += 2;
+ }
+
+ return CanFold;
+}
+
+/// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
+/// a concat_vector.
+///
+/// This is a helper function of PerformBUILD_VECTORCombine.
+/// This function expects two 256-bit vectors called V0 and V1.
+/// At first, each vector is split into two separate 128-bit vectors.
+/// Then, the resulting 128-bit vectors are used to implement two
+/// horizontal binary operations.
+///
+/// The kind of horizontal binary operation is defined by \p X86Opcode.
+///
+/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
+/// the two new horizontal binop.
+/// When Mode is set, the first horizontal binop dag node would take as input
+/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
+/// horizontal binop dag node would take as input the lower 128-bit of V1
+/// and the upper 128-bit of V1.
+/// Example:
+/// HADD V0_LO, V0_HI
+/// HADD V1_LO, V1_HI
+///
+/// Otherwise, the first horizontal binop dag node takes as input the lower
+/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
+/// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
+/// Example:
+/// HADD V0_LO, V1_LO
+/// HADD V0_HI, V1_HI
+///
+/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
+/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
+/// the upper 128-bits of the result.
+static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
+ SDLoc DL, SelectionDAG &DAG,
+ unsigned X86Opcode, bool Mode,
+ bool isUndefLO, bool isUndefHI) {
+ EVT VT = V0.getValueType();
+ assert(VT.is256BitVector() && VT == V1.getValueType() &&
+ "Invalid nodes in input!");
+
+ unsigned NumElts = VT.getVectorNumElements();
+ SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
+ SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
+ SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
+ SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
+ EVT NewVT = V0_LO.getValueType();
+
+ SDValue LO = DAG.getUNDEF(NewVT);
+ SDValue HI = DAG.getUNDEF(NewVT);
+
+ if (Mode) {
+ // Don't emit a horizontal binop if the result is expected to be UNDEF.
+ if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
+ LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
+ if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
+ HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
} else {
- // The algorithm:
- // Bit1 = In & 0x1
- // if (Bit1 != 0)
- // ZF = 0
- // else
- // ZF = 1
- // if (ZF == 0)
- // res = allOnes ### CMOVNE -1, %res
- // else
- // res = allZero
- MVT InVT = In.getSimpleValueType();
- SDValue Bit1 = DAG.getNode(ISD::AND, dl, InVT, In, DAG.getConstant(1, InVT));
- EFLAGS = EmitTest(Bit1, X86::COND_NE, DAG);
- X86CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- }
-
- if (VT == MVT::v16i1) {
- SDValue Cst1 = DAG.getConstant(-1, MVT::i16);
- SDValue Cst0 = DAG.getConstant(0, MVT::i16);
- SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i16,
- Cst0, Cst1, X86CC, EFLAGS);
- return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp);
- }
-
- if (VT == MVT::v8i1) {
- SDValue Cst1 = DAG.getConstant(-1, MVT::i32);
- SDValue Cst0 = DAG.getConstant(0, MVT::i32);
- SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i32,
- Cst0, Cst1, X86CC, EFLAGS);
- CmovOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CmovOp);
- return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp);
- }
- llvm_unreachable("Unsupported predicate operation");
+ // Don't emit a horizontal binop if the result is expected to be UNDEF.
+ if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
+ V1_LO->getOpcode() != ISD::UNDEF))
+ LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
+
+ if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
+ V1_HI->getOpcode() != ISD::UNDEF))
+ HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
+ }
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
+}
+
+/// \brief Try to fold a build_vector that performs an 'addsub' into the
+/// sequence of 'vadd + vsub + blendi'.
+static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ SDLoc DL(BV);
+ EVT VT = BV->getValueType(0);
+ unsigned NumElts = VT.getVectorNumElements();
+ SDValue InVec0 = DAG.getUNDEF(VT);
+ SDValue InVec1 = DAG.getUNDEF(VT);
+
+ assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
+ VT == MVT::v2f64) && "build_vector with an invalid type found!");
+
+ // Don't try to emit a VSELECT that cannot be lowered into a blend.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
+ return SDValue();
+
+ // Odd-numbered elements in the input build vector are obtained from
+ // adding two integer/float elements.
+ // Even-numbered elements in the input build vector are obtained from
+ // subtracting two integer/float elements.
+ unsigned ExpectedOpcode = ISD::FSUB;
+ unsigned NextExpectedOpcode = ISD::FADD;
+ bool AddFound = false;
+ bool SubFound = false;
+
+ for (unsigned i = 0, e = NumElts; i != e; i++) {
+ SDValue Op = BV->getOperand(i);
+
+ // Skip 'undef' values.
+ unsigned Opcode = Op.getOpcode();
+ if (Opcode == ISD::UNDEF) {
+ std::swap(ExpectedOpcode, NextExpectedOpcode);
+ continue;
+ }
+
+ // Early exit if we found an unexpected opcode.
+ if (Opcode != ExpectedOpcode)
+ return SDValue();
+
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+
+ // Try to match the following pattern:
+ // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
+ // Early exit if we cannot match that sequence.
+ if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ !isa<ConstantSDNode>(Op0.getOperand(1)) ||
+ !isa<ConstantSDNode>(Op1.getOperand(1)) ||
+ Op0.getOperand(1) != Op1.getOperand(1))
+ return SDValue();
+
+ unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
+ if (I0 != i)
+ return SDValue();
+
+ // We found a valid add/sub node. Update the information accordingly.
+ if (i & 1)
+ AddFound = true;
+ else
+ SubFound = true;
+
+ // Update InVec0 and InVec1.
+ if (InVec0.getOpcode() == ISD::UNDEF)
+ InVec0 = Op0.getOperand(0);
+ if (InVec1.getOpcode() == ISD::UNDEF)
+ InVec1 = Op1.getOperand(0);
+
+ // Make sure that operands in input to each add/sub node always
+ // come from a same pair of vectors.
+ if (InVec0 != Op0.getOperand(0)) {
+ if (ExpectedOpcode == ISD::FSUB)
+ return SDValue();
+
+ // FADD is commutable. Try to commute the operands
+ // and then test again.
+ std::swap(Op0, Op1);
+ if (InVec0 != Op0.getOperand(0))
+ return SDValue();
+ }
+
+ if (InVec1 != Op1.getOperand(0))
+ return SDValue();
+
+ // Update the pair of expected opcodes.
+ std::swap(ExpectedOpcode, NextExpectedOpcode);
+ }
+
+ // Don't try to fold this build_vector into a VSELECT if it has
+ // too many UNDEF operands.
+ if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
+ InVec1.getOpcode() != ISD::UNDEF) {
+ // Emit a sequence of vector add and sub followed by a VSELECT.
+ // The new VSELECT will be lowered into a BLENDI.
+ // At ISel stage, we pattern-match the sequence 'add + sub + BLENDI'
+ // and emit a single ADDSUB instruction.
+ SDValue Sub = DAG.getNode(ExpectedOpcode, DL, VT, InVec0, InVec1);
+ SDValue Add = DAG.getNode(NextExpectedOpcode, DL, VT, InVec0, InVec1);
+
+ // Construct the VSELECT mask.
+ EVT MaskVT = VT.changeVectorElementTypeToInteger();
+ EVT SVT = MaskVT.getVectorElementType();
+ unsigned SVTBits = SVT.getSizeInBits();
+ SmallVector<SDValue, 8> Ops;
+
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ APInt Value = i & 1 ? APInt::getNullValue(SVTBits) :
+ APInt::getAllOnesValue(SVTBits);
+ SDValue Constant = DAG.getConstant(Value, SVT);
+ Ops.push_back(Constant);
+ }
+
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVT, Ops);
+ return DAG.getSelect(DL, VT, Mask, Sub, Add);
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ unsigned NumElts = VT.getVectorNumElements();
+ BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
+ SDValue InVec0, InVec1;
+
+ // Try to match an ADDSUB.
+ if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
+ (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
+ SDValue Value = matchAddSub(BV, DAG, Subtarget);
+ if (Value.getNode())
+ return Value;
+ }
+
+ // Try to match horizontal ADD/SUB.
+ unsigned NumUndefsLO = 0;
+ unsigned NumUndefsHI = 0;
+ unsigned Half = NumElts/2;
+
+ // Count the number of UNDEF operands in the build_vector in input.
+ for (unsigned i = 0, e = Half; i != e; ++i)
+ if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
+ NumUndefsLO++;
+
+ for (unsigned i = Half, e = NumElts; i != e; ++i)
+ if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
+ NumUndefsHI++;
+
+ // Early exit if this is either a build_vector of all UNDEFs or all the
+ // operands but one are UNDEF.
+ if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
+ return SDValue();
+
+ if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
+ // Try to match an SSE3 float HADD/HSUB.
+ if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
+ return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
+
+ if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
+ return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
+ } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
+ // Try to match an SSSE3 integer HADD/HSUB.
+ if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
+ return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
+
+ if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
+ return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
+ }
+
+ if (!Subtarget->hasAVX())
+ return SDValue();
+
+ if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
+ // Try to match an AVX horizontal add/sub of packed single/double
+ // precision floating point values from 256-bit vectors.
+ SDValue InVec2, InVec3;
+ if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
+ isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
+ ((InVec0.getOpcode() == ISD::UNDEF ||
+ InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
+ ((InVec1.getOpcode() == ISD::UNDEF ||
+ InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
+
+ if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
+ isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
+ ((InVec0.getOpcode() == ISD::UNDEF ||
+ InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
+ ((InVec1.getOpcode() == ISD::UNDEF ||
+ InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
+ } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
+ // Try to match an AVX2 horizontal add/sub of signed integers.
+ SDValue InVec2, InVec3;
+ unsigned X86Opcode;
+ bool CanFold = true;
+
+ if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
+ isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
+ ((InVec0.getOpcode() == ISD::UNDEF ||
+ InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
+ ((InVec1.getOpcode() == ISD::UNDEF ||
+ InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ X86Opcode = X86ISD::HADD;
+ else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
+ isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
+ ((InVec0.getOpcode() == ISD::UNDEF ||
+ InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
+ ((InVec1.getOpcode() == ISD::UNDEF ||
+ InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ X86Opcode = X86ISD::HSUB;
+ else
+ CanFold = false;
+
+ if (CanFold) {
+ // Fold this build_vector into a single horizontal add/sub.
+ // Do this only if the target has AVX2.
+ if (Subtarget->hasAVX2())
+ return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
+
+ // Do not try to expand this build_vector into a pair of horizontal
+ // add/sub if we can emit a pair of scalar add/sub.
+ if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
+ return SDValue();
+
+ // Convert this build_vector into a pair of horizontal binop followed by
+ // a concat vector.
+ bool isUndefLO = NumUndefsLO == Half;
+ bool isUndefHI = NumUndefsHI == Half;
+ return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
+ isUndefLO, isUndefHI);
+ }
+ }
+
+ if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
+ VT == MVT::v16i16) && Subtarget->hasAVX()) {
+ unsigned X86Opcode;
+ if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
+ X86Opcode = X86ISD::HADD;
+ else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
+ X86Opcode = X86ISD::HSUB;
+ else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
+ X86Opcode = X86ISD::FHADD;
+ else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
+ X86Opcode = X86ISD::FHSUB;
+ else
+ return SDValue();
+
+ // Don't try to expand this build_vector into a pair of horizontal add/sub
+ // if we can simply emit a pair of scalar add/sub.
+ if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
+ return SDValue();
+
+ // Convert this build_vector into two horizontal add/sub followed by
+ // a concat vector.
+ bool isUndefLO = NumUndefsLO == Half;
+ bool isUndefHI = NumUndefsHI == Half;
+ return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
+ isUndefLO, isUndefHI);
+ }
+
+ return SDValue();
}
SDValue
@@ -5995,20 +6679,23 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
- if (VT.is256BitVector()) {
- SmallVector<SDValue, 32> V;
+ if (VT.is256BitVector() || VT.is512BitVector()) {
+ SmallVector<SDValue, 64> V;
for (unsigned i = 0; i != NumElems; ++i)
V.push_back(Op.getOperand(i));
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build both the lower and upper subvector.
- SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2);
- SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2],
- NumElems/2);
+ SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
+ makeArrayRef(&V[0], NumElems/2));
+ SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
+ makeArrayRef(&V[NumElems / 2], NumElems/2));
// Recreate the wider vector with the lower and upper part.
- return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
+ if (VT.is256BitVector())
+ return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
+ return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
}
// Let legalizer expand 2-wide build_vectors.
@@ -6036,6 +6723,14 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (V.getNode()) return V;
}
+ // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
+ if (EVTBits == 32 && NumElems == 4) {
+ SDValue V = LowerBuildVectorv4x32(Op, NumElems, NonZeros, NumNonZero,
+ NumZero, DAG, Subtarget, *this);
+ if (V.getNode())
+ return V;
+ }
+
// If element VT is == 32 bits, turn it into a number of shuffles.
SmallVector<SDValue, 8> V(NumElems);
if (NumElems == 4 && NumZero > 0) {
@@ -6157,49 +6852,1184 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
if(ResVT.is256BitVector())
return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
+ if (Op.getNumOperands() == 4) {
+ MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
+ ResVT.getVectorNumElements()/2);
+ SDValue V3 = Op.getOperand(2);
+ SDValue V4 = Op.getOperand(3);
+ return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
+ Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
+ }
return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
}
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getNumOperands() == 2);
+ MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
+ assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
+ (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
+ Op.getNumOperands() == 4)));
- // AVX/AVX-512 can use the vinsertf128 instruction to create 256-bit vectors
+ // AVX can use the vinsertf128 instruction to create 256-bit vectors
// from two other 128-bit ones.
+
+ // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
return LowerAVXCONCAT_VECTORS(Op, DAG);
}
-// Try to lower a shuffle node into a simple blend instruction.
-static SDValue
-LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
- SDValue V1 = SVOp->getOperand(0);
- SDValue V2 = SVOp->getOperand(1);
- SDLoc dl(SVOp);
- MVT VT = SVOp->getSimpleValueType(0);
+
+//===----------------------------------------------------------------------===//
+// Vector shuffle lowering
+//
+// This is an experimental code path for lowering vector shuffles on x86. It is
+// designed to handle arbitrary vector shuffles and blends, gracefully
+// degrading performance as necessary. It works hard to recognize idiomatic
+// shuffles and lower them to optimal instruction patterns without leaving
+// a framework that allows reasonably efficient handling of all vector shuffle
+// patterns.
+//===----------------------------------------------------------------------===//
+
+/// \brief Tiny helper function to identify a no-op mask.
+///
+/// This is a somewhat boring predicate function. It checks whether the mask
+/// array input, which is assumed to be a single-input shuffle mask of the kind
+/// used by the X86 shuffle instructions (not a fully general
+/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
+/// in-place shuffle are 'no-op's.
+static bool isNoopShuffleMask(ArrayRef<int> Mask) {
+ for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ if (Mask[i] != -1 && Mask[i] != i)
+ return false;
+ return true;
+}
+
+/// \brief Helper function to classify a mask as a single-input mask.
+///
+/// This isn't a generic single-input test because in the vector shuffle
+/// lowering we canonicalize single inputs to be the first input operand. This
+/// means we can more quickly test for a single input by only checking whether
+/// an input from the second operand exists. We also assume that the size of
+/// mask corresponds to the size of the input vectors which isn't true in the
+/// fully general case.
+static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
+ for (int M : Mask)
+ if (M >= (int)Mask.size())
+ return false;
+ return true;
+}
+
+/// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
+///
+/// This helper function produces an 8-bit shuffle immediate corresponding to
+/// the ubiquitous shuffle encoding scheme used in x86 instructions for
+/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
+/// example.
+///
+/// NB: We rely heavily on "undef" masks preserving the input lane.
+static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
+ assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
+ assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
+ assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
+ assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
+
+ unsigned Imm = 0;
+ Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
+ Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
+ Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
+ Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
+ return DAG.getConstant(Imm, MVT::i8);
+}
+
+/// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
+///
+/// This is the basis function for the 2-lane 64-bit shuffles as we have full
+/// support for floating point shuffles but not integer shuffles. These
+/// instructions will incur a domain crossing penalty on some chips though so
+/// it is better to avoid lowering through this for integer vectors where
+/// possible.
+static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> Mask = SVOp->getMask();
+ assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
+
+ if (isSingleInputShuffleMask(Mask)) {
+ // Straight shuffle of a single input vector. Simulate this by using the
+ // single input as both of the "inputs" to this instruction..
+ unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
+ return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
+ DAG.getConstant(SHUFPDMask, MVT::i8));
+ }
+ assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
+ assert(Mask[1] >= 2 && "Non-canonicalized blend!");
+
+ unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
+ return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
+ DAG.getConstant(SHUFPDMask, MVT::i8));
+}
+
+/// \brief Handle lowering of 2-lane 64-bit integer shuffles.
+///
+/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
+/// the integer unit to minimize domain crossing penalties. However, for blends
+/// it falls back to the floating point shuffle operation with appropriate bit
+/// casting.
+static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> Mask = SVOp->getMask();
+ assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
+
+ if (isSingleInputShuffleMask(Mask)) {
+ // Straight shuffle of a single input vector. For everything from SSE2
+ // onward this has a single fast instruction with no scary immediates.
+ // We have to map the mask as it is actually a v4i32 shuffle instruction.
+ V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
+ int WidenedMask[4] = {
+ std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
+ std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
+ return DAG.getNode(
+ ISD::BITCAST, DL, MVT::v2i64,
+ DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
+ getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
+ }
+
+ // We implement this with SHUFPD which is pretty lame because it will likely
+ // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
+ // However, all the alternatives are still more cycles and newer chips don't
+ // have this problem. It would be really nice if x86 had better shuffles here.
+ V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
+ V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
+ DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
+}
+
+/// \brief Lower 4-lane 32-bit floating point shuffles.
+///
+/// Uses instructions exclusively from the floating point unit to minimize
+/// domain crossing penalties, as these are sufficient to implement all v4f32
+/// shuffles.
+static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> Mask = SVOp->getMask();
+ assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
+
+ SDValue LowV = V1, HighV = V2;
+ int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
+
+ int NumV2Elements =
+ std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+
+ if (NumV2Elements == 0)
+ // Straight shuffle of a single input vector. We pass the input vector to
+ // both operands to simulate this with a SHUFPS.
+ return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
+ getV4X86ShuffleImm8ForMask(Mask, DAG));
+
+ if (NumV2Elements == 1) {
+ int V2Index =
+ std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
+ Mask.begin();
+ // Compute the index adjacent to V2Index and in the same half by toggling
+ // the low bit.
+ int V2AdjIndex = V2Index ^ 1;
+
+ if (Mask[V2AdjIndex] == -1) {
+ // Handles all the cases where we have a single V2 element and an undef.
+ // This will only ever happen in the high lanes because we commute the
+ // vector otherwise.
+ if (V2Index < 2)
+ std::swap(LowV, HighV);
+ NewMask[V2Index] -= 4;
+ } else {
+ // Handle the case where the V2 element ends up adjacent to a V1 element.
+ // To make this work, blend them together as the first step.
+ int V1Index = V2AdjIndex;
+ int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
+ V2 = DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V2, V1,
+ getV4X86ShuffleImm8ForMask(BlendMask, DAG));
+
+ // Now proceed to reconstruct the final blend as we have the necessary
+ // high or low half formed.
+ if (V2Index < 2) {
+ LowV = V2;
+ HighV = V1;
+ } else {
+ HighV = V2;
+ }
+ NewMask[V1Index] = 2; // We put the V1 element in V2[2].
+ NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
+ }
+ } else if (NumV2Elements == 2) {
+ if (Mask[0] < 4 && Mask[1] < 4) {
+ // Handle the easy case where we have V1 in the low lanes and V2 in the
+ // high lanes. We never see this reversed because we sort the shuffle.
+ NewMask[2] -= 4;
+ NewMask[3] -= 4;
+ } else {
+ // We have a mixture of V1 and V2 in both low and high lanes. Rather than
+ // trying to place elements directly, just blend them and set up the final
+ // shuffle to place them.
+
+ // The first two blend mask elements are for V1, the second two are for
+ // V2.
+ int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
+ Mask[2] < 4 ? Mask[2] : Mask[3],
+ (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
+ (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
+ V1 = DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V2,
+ getV4X86ShuffleImm8ForMask(BlendMask, DAG));
+
+ // Now we do a normal shuffle of V1 by giving V1 as both operands to
+ // a blend.
+ LowV = HighV = V1;
+ NewMask[0] = Mask[0] < 4 ? 0 : 2;
+ NewMask[1] = Mask[0] < 4 ? 2 : 0;
+ NewMask[2] = Mask[2] < 4 ? 1 : 3;
+ NewMask[3] = Mask[2] < 4 ? 3 : 1;
+ }
+ }
+ return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, LowV, HighV,
+ getV4X86ShuffleImm8ForMask(NewMask, DAG));
+}
+
+/// \brief Lower 4-lane i32 vector shuffles.
+///
+/// We try to handle these with integer-domain shuffles where we can, but for
+/// blends we use the floating point domain blend instructions.
+static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> Mask = SVOp->getMask();
+ assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
+
+ if (isSingleInputShuffleMask(Mask))
+ // Straight shuffle of a single input vector. For everything from SSE2
+ // onward this has a single fast instruction with no scary immediates.
+ return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
+ getV4X86ShuffleImm8ForMask(Mask, DAG));
+
+ // We implement this with SHUFPS because it can blend from two vectors.
+ // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
+ // up the inputs, bypassing domain shift penalties that we would encur if we
+ // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
+ // relevant.
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
+ DAG.getVectorShuffle(
+ MVT::v4f32, DL,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
+ DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
+}
+
+/// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
+/// shuffle lowering, and the most complex part.
+///
+/// The lowering strategy is to try to form pairs of input lanes which are
+/// targeted at the same half of the final vector, and then use a dword shuffle
+/// to place them onto the right half, and finally unpack the paired lanes into
+/// their final position.
+///
+/// The exact breakdown of how to form these dword pairs and align them on the
+/// correct sides is really tricky. See the comments within the function for
+/// more of the details.
+static SDValue lowerV8I16SingleInputVectorShuffle(
+ SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
+ const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
+ MutableArrayRef<int> LoMask = Mask.slice(0, 4);
+ MutableArrayRef<int> HiMask = Mask.slice(4, 4);
+
+ SmallVector<int, 4> LoInputs;
+ std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
+ [](int M) { return M >= 0; });
+ std::sort(LoInputs.begin(), LoInputs.end());
+ LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
+ SmallVector<int, 4> HiInputs;
+ std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
+ [](int M) { return M >= 0; });
+ std::sort(HiInputs.begin(), HiInputs.end());
+ HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
+ int NumLToL =
+ std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
+ int NumHToL = LoInputs.size() - NumLToL;
+ int NumLToH =
+ std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
+ int NumHToH = HiInputs.size() - NumLToH;
+ MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
+ MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
+ MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
+ MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
+
+ // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
+ // such inputs we can swap two of the dwords across the half mark and end up
+ // with <=2 inputs to each half in each half. Once there, we can fall through
+ // to the generic code below. For example:
+ //
+ // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
+ // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
+ //
+ // Before we had 3-1 in the low half and 3-1 in the high half. Afterward, 2-2
+ // and 2-2.
+ auto balanceSides = [&](ArrayRef<int> ThreeInputs, int OneInput,
+ int ThreeInputHalfSum, int OneInputHalfOffset) {
+ // Compute the index of dword with only one word among the three inputs in
+ // a half by taking the sum of the half with three inputs and subtracting
+ // the sum of the actual three inputs. The difference is the remaining
+ // slot.
+ int DWordA = (ThreeInputHalfSum -
+ std::accumulate(ThreeInputs.begin(), ThreeInputs.end(), 0)) /
+ 2;
+ int DWordB = OneInputHalfOffset / 2 + (OneInput / 2 + 1) % 2;
+
+ int PSHUFDMask[] = {0, 1, 2, 3};
+ PSHUFDMask[DWordA] = DWordB;
+ PSHUFDMask[DWordB] = DWordA;
+ V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
+
+ // Adjust the mask to match the new locations of A and B.
+ for (int &M : Mask)
+ if (M != -1 && M/2 == DWordA)
+ M = 2 * DWordB + M % 2;
+ else if (M != -1 && M/2 == DWordB)
+ M = 2 * DWordA + M % 2;
+
+ // Recurse back into this routine to re-compute state now that this isn't
+ // a 3 and 1 problem.
+ return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
+ Mask);
+ };
+ if (NumLToL == 3 && NumHToL == 1)
+ return balanceSides(LToLInputs, HToLInputs[0], 0 + 1 + 2 + 3, 4);
+ else if (NumLToL == 1 && NumHToL == 3)
+ return balanceSides(HToLInputs, LToLInputs[0], 4 + 5 + 6 + 7, 0);
+ else if (NumLToH == 1 && NumHToH == 3)
+ return balanceSides(HToHInputs, LToHInputs[0], 4 + 5 + 6 + 7, 0);
+ else if (NumLToH == 3 && NumHToH == 1)
+ return balanceSides(LToHInputs, HToHInputs[0], 0 + 1 + 2 + 3, 4);
+
+ // At this point there are at most two inputs to the low and high halves from
+ // each half. That means the inputs can always be grouped into dwords and
+ // those dwords can then be moved to the correct half with a dword shuffle.
+ // We use at most one low and one high word shuffle to collect these paired
+ // inputs into dwords, and finally a dword shuffle to place them.
+ int PSHUFLMask[4] = {-1, -1, -1, -1};
+ int PSHUFHMask[4] = {-1, -1, -1, -1};
+ int PSHUFDMask[4] = {-1, -1, -1, -1};
+
+ // First fix the masks for all the inputs that are staying in their
+ // original halves. This will then dictate the targets of the cross-half
+ // shuffles.
+ auto fixInPlaceInputs = [&PSHUFDMask](
+ ArrayRef<int> InPlaceInputs, MutableArrayRef<int> SourceHalfMask,
+ MutableArrayRef<int> HalfMask, int HalfOffset) {
+ if (InPlaceInputs.empty())
+ return;
+ if (InPlaceInputs.size() == 1) {
+ SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
+ InPlaceInputs[0] - HalfOffset;
+ PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
+ return;
+ }
+
+ assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
+ SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
+ InPlaceInputs[0] - HalfOffset;
+ // Put the second input next to the first so that they are packed into
+ // a dword. We find the adjacent index by toggling the low bit.
+ int AdjIndex = InPlaceInputs[0] ^ 1;
+ SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
+ std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
+ PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
+ };
+ if (!HToLInputs.empty())
+ fixInPlaceInputs(LToLInputs, PSHUFLMask, LoMask, 0);
+ if (!LToHInputs.empty())
+ fixInPlaceInputs(HToHInputs, PSHUFHMask, HiMask, 4);
+
+ // Now gather the cross-half inputs and place them into a free dword of
+ // their target half.
+ // FIXME: This operation could almost certainly be simplified dramatically to
+ // look more like the 3-1 fixing operation.
+ auto moveInputsToRightHalf = [&PSHUFDMask](
+ MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
+ MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
+ int SourceOffset, int DestOffset) {
+ auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
+ return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
+ };
+ auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
+ int Word) {
+ int LowWord = Word & ~1;
+ int HighWord = Word | 1;
+ return isWordClobbered(SourceHalfMask, LowWord) ||
+ isWordClobbered(SourceHalfMask, HighWord);
+ };
+
+ if (IncomingInputs.empty())
+ return;
+
+ if (ExistingInputs.empty()) {
+ // Map any dwords with inputs from them into the right half.
+ for (int Input : IncomingInputs) {
+ // If the source half mask maps over the inputs, turn those into
+ // swaps and use the swapped lane.
+ if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
+ if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
+ SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
+ Input - SourceOffset;
+ // We have to swap the uses in our half mask in one sweep.
+ for (int &M : HalfMask)
+ if (M == SourceHalfMask[Input - SourceOffset])
+ M = Input;
+ else if (M == Input)
+ M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
+ } else {
+ assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
+ Input - SourceOffset &&
+ "Previous placement doesn't match!");
+ }
+ // Note that this correctly re-maps both when we do a swap and when
+ // we observe the other side of the swap above. We rely on that to
+ // avoid swapping the members of the input list directly.
+ Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
+ }
+
+ // Map the input's dword into the correct half.
+ if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
+ PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
+ else
+ assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
+ Input / 2 &&
+ "Previous placement doesn't match!");
+ }
+
+ // And just directly shift any other-half mask elements to be same-half
+ // as we will have mirrored the dword containing the element into the
+ // same position within that half.
+ for (int &M : HalfMask)
+ if (M >= SourceOffset && M < SourceOffset + 4) {
+ M = M - SourceOffset + DestOffset;
+ assert(M >= 0 && "This should never wrap below zero!");
+ }
+ return;
+ }
+
+ // Ensure we have the input in a viable dword of its current half. This
+ // is particularly tricky because the original position may be clobbered
+ // by inputs being moved and *staying* in that half.
+ if (IncomingInputs.size() == 1) {
+ if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
+ int InputFixed = std::find(std::begin(SourceHalfMask),
+ std::end(SourceHalfMask), -1) -
+ std::begin(SourceHalfMask) + SourceOffset;
+ SourceHalfMask[InputFixed - SourceOffset] =
+ IncomingInputs[0] - SourceOffset;
+ std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
+ InputFixed);
+ IncomingInputs[0] = InputFixed;
+ }
+ } else if (IncomingInputs.size() == 2) {
+ if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
+ isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
+ int SourceDWordBase = !isDWordClobbered(SourceHalfMask, 0) ? 0 : 2;
+ assert(!isDWordClobbered(SourceHalfMask, SourceDWordBase) &&
+ "Not all dwords can be clobbered!");
+ SourceHalfMask[SourceDWordBase] = IncomingInputs[0] - SourceOffset;
+ SourceHalfMask[SourceDWordBase + 1] = IncomingInputs[1] - SourceOffset;
+ for (int &M : HalfMask)
+ if (M == IncomingInputs[0])
+ M = SourceDWordBase + SourceOffset;
+ else if (M == IncomingInputs[1])
+ M = SourceDWordBase + 1 + SourceOffset;
+ IncomingInputs[0] = SourceDWordBase + SourceOffset;
+ IncomingInputs[1] = SourceDWordBase + 1 + SourceOffset;
+ }
+ } else {
+ llvm_unreachable("Unhandled input size!");
+ }
+
+ // Now hoist the DWord down to the right half.
+ int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
+ assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
+ PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
+ for (int Input : IncomingInputs)
+ std::replace(HalfMask.begin(), HalfMask.end(), Input,
+ FreeDWord * 2 + Input % 2);
+ };
+ moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask,
+ /*SourceOffset*/ 4, /*DestOffset*/ 0);
+ moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask,
+ /*SourceOffset*/ 0, /*DestOffset*/ 4);
+
+ // Now enact all the shuffles we've computed to move the inputs into their
+ // target half.
+ if (!isNoopShuffleMask(PSHUFLMask))
+ V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
+ getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
+ if (!isNoopShuffleMask(PSHUFHMask))
+ V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
+ getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
+ if (!isNoopShuffleMask(PSHUFDMask))
+ V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
+
+ // At this point, each half should contain all its inputs, and we can then
+ // just shuffle them into their final position.
+ assert(std::count_if(LoMask.begin(), LoMask.end(),
+ [](int M) { return M >= 4; }) == 0 &&
+ "Failed to lift all the high half inputs to the low mask!");
+ assert(std::count_if(HiMask.begin(), HiMask.end(),
+ [](int M) { return M >= 0 && M < 4; }) == 0 &&
+ "Failed to lift all the low half inputs to the high mask!");
+
+ // Do a half shuffle for the low mask.
+ if (!isNoopShuffleMask(LoMask))
+ V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
+ getV4X86ShuffleImm8ForMask(LoMask, DAG));
+
+ // Do a half shuffle with the high mask after shifting its values down.
+ for (int &M : HiMask)
+ if (M >= 0)
+ M -= 4;
+ if (!isNoopShuffleMask(HiMask))
+ V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
+ getV4X86ShuffleImm8ForMask(HiMask, DAG));
+
+ return V;
+}
+
+/// \brief Detect whether the mask pattern should be lowered through
+/// interleaving.
+///
+/// This essentially tests whether viewing the mask as an interleaving of two
+/// sub-sequences reduces the cross-input traffic of a blend operation. If so,
+/// lowering it through interleaving is a significantly better strategy.
+static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
+ int NumEvenInputs[2] = {0, 0};
+ int NumOddInputs[2] = {0, 0};
+ int NumLoInputs[2] = {0, 0};
+ int NumHiInputs[2] = {0, 0};
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ if (Mask[i] < 0)
+ continue;
+
+ int InputIdx = Mask[i] >= Size;
+
+ if (i < Size / 2)
+ ++NumLoInputs[InputIdx];
+ else
+ ++NumHiInputs[InputIdx];
+
+ if ((i % 2) == 0)
+ ++NumEvenInputs[InputIdx];
+ else
+ ++NumOddInputs[InputIdx];
+ }
+
+ // The minimum number of cross-input results for both the interleaved and
+ // split cases. If interleaving results in fewer cross-input results, return
+ // true.
+ int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
+ NumEvenInputs[0] + NumOddInputs[1]);
+ int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
+ NumLoInputs[0] + NumHiInputs[1]);
+ return InterleavedCrosses < SplitCrosses;
+}
+
+/// \brief Blend two v8i16 vectors using a naive unpack strategy.
+///
+/// This strategy only works when the inputs from each vector fit into a single
+/// half of that vector, and generally there are not so many inputs as to leave
+/// the in-place shuffles required highly constrained (and thus expensive). It
+/// shifts all the inputs into a single side of both input vectors and then
+/// uses an unpack to interleave these inputs in a single vector. At that
+/// point, we will fall back on the generic single input shuffle lowering.
+static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
+ SDValue V2,
+ MutableArrayRef<int> Mask,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
+ assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
+ SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
+ for (int i = 0; i < 8; ++i)
+ if (Mask[i] >= 0 && Mask[i] < 4)
+ LoV1Inputs.push_back(i);
+ else if (Mask[i] >= 4 && Mask[i] < 8)
+ HiV1Inputs.push_back(i);
+ else if (Mask[i] >= 8 && Mask[i] < 12)
+ LoV2Inputs.push_back(i);
+ else if (Mask[i] >= 12)
+ HiV2Inputs.push_back(i);
+
+ int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
+ int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
+ (void)NumV1Inputs;
+ (void)NumV2Inputs;
+ assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
+ assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
+ assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
+
+ bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
+ HiV1Inputs.size() + HiV2Inputs.size();
+
+ auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
+ ArrayRef<int> HiInputs, bool MoveToLo,
+ int MaskOffset) {
+ ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
+ ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
+ if (BadInputs.empty())
+ return V;
+
+ int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ int MoveOffset = MoveToLo ? 0 : 4;
+
+ if (GoodInputs.empty()) {
+ for (int BadInput : BadInputs) {
+ MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
+ Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
+ }
+ } else {
+ if (GoodInputs.size() == 2) {
+ // If the low inputs are spread across two dwords, pack them into
+ // a single dword.
+ MoveMask[Mask[GoodInputs[0]] % 2 + MoveOffset] =
+ Mask[GoodInputs[0]] - MaskOffset;
+ MoveMask[Mask[GoodInputs[1]] % 2 + MoveOffset] =
+ Mask[GoodInputs[1]] - MaskOffset;
+ Mask[GoodInputs[0]] = Mask[GoodInputs[0]] % 2 + MoveOffset + MaskOffset;
+ Mask[GoodInputs[1]] = Mask[GoodInputs[0]] % 2 + MoveOffset + MaskOffset;
+ } else {
+ // Otherwise pin the low inputs.
+ for (int GoodInput : GoodInputs)
+ MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
+ }
+
+ int MoveMaskIdx =
+ std::find(std::begin(MoveMask) + MoveOffset, std::end(MoveMask), -1) -
+ std::begin(MoveMask);
+ assert(MoveMaskIdx >= MoveOffset && "Established above");
+
+ if (BadInputs.size() == 2) {
+ assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
+ assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
+ MoveMask[MoveMaskIdx + Mask[BadInputs[0]] % 2] =
+ Mask[BadInputs[0]] - MaskOffset;
+ MoveMask[MoveMaskIdx + Mask[BadInputs[1]] % 2] =
+ Mask[BadInputs[1]] - MaskOffset;
+ Mask[BadInputs[0]] = MoveMaskIdx + Mask[BadInputs[0]] % 2 + MaskOffset;
+ Mask[BadInputs[1]] = MoveMaskIdx + Mask[BadInputs[1]] % 2 + MaskOffset;
+ } else {
+ assert(BadInputs.size() == 1 && "All sizes handled");
+ MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
+ Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
+ }
+ }
+
+ return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
+ MoveMask);
+ };
+ V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
+ /*MaskOffset*/ 0);
+ V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
+ /*MaskOffset*/ 8);
+
+ // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
+ // cross-half traffic in the final shuffle.
+
+ // Munge the mask to be a single-input mask after the unpack merges the
+ // results.
+ for (int &M : Mask)
+ if (M != -1)
+ M = 2 * (M % 4) + (M / 8);
+
+ return DAG.getVectorShuffle(
+ MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
+ DL, MVT::v8i16, V1, V2),
+ DAG.getUNDEF(MVT::v8i16), Mask);
+}
+
+/// \brief Generic lowering of 8-lane i16 shuffles.
+///
+/// This handles both single-input shuffles and combined shuffle/blends with
+/// two inputs. The single input shuffles are immediately delegated to
+/// a dedicated lowering routine.
+///
+/// The blends are lowered in one of three fundamental ways. If there are few
+/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
+/// of the input is significantly cheaper when lowered as an interleaving of
+/// the two inputs, try to interleave them. Otherwise, blend the low and high
+/// halves of the inputs separately (making them have relatively few inputs)
+/// and then concatenate them.
+static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> OrigMask = SVOp->getMask();
+ int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
+ OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
+ MutableArrayRef<int> Mask(MaskStorage);
+
+ assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
+
+ auto isV1 = [](int M) { return M >= 0 && M < 8; };
+ auto isV2 = [](int M) { return M >= 8; };
+
+ int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
+ int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
+
+ if (NumV2Inputs == 0)
+ return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
+
+ assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
+ "to be V1-input shuffles.");
+
+ if (NumV1Inputs + NumV2Inputs <= 4)
+ return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
+
+ // Check whether an interleaving lowering is likely to be more efficient.
+ // This isn't perfect but it is a strong heuristic that tends to work well on
+ // the kinds of shuffles that show up in practice.
+ //
+ // FIXME: Handle 1x, 2x, and 4x interleaving.
+ if (shouldLowerAsInterleaving(Mask)) {
+ // FIXME: Figure out whether we should pack these into the low or high
+ // halves.
+
+ int EMask[8], OMask[8];
+ for (int i = 0; i < 4; ++i) {
+ EMask[i] = Mask[2*i];
+ OMask[i] = Mask[2*i + 1];
+ EMask[i + 4] = -1;
+ OMask[i + 4] = -1;
+ }
+
+ SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
+ SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
+
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
+ }
+
+ int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+ for (int i = 0; i < 4; ++i) {
+ LoBlendMask[i] = Mask[i];
+ HiBlendMask[i] = Mask[i + 4];
+ }
+
+ SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
+ SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
+ LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
+ HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
+
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
+}
+
+/// \brief Generic lowering of v16i8 shuffles.
+///
+/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
+/// detect any complexity reducing interleaving. If that doesn't help, it uses
+/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
+/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
+/// back together.
+static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
+ assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> OrigMask = SVOp->getMask();
+ assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
+ int MaskStorage[16] = {
+ OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
+ OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
+ OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
+ OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
+ MutableArrayRef<int> Mask(MaskStorage);
+ MutableArrayRef<int> LoMask = Mask.slice(0, 8);
+ MutableArrayRef<int> HiMask = Mask.slice(8, 8);
+
+ // For single-input shuffles, there are some nicer lowering tricks we can use.
+ if (isSingleInputShuffleMask(Mask)) {
+ // Check whether we can widen this to an i16 shuffle by duplicating bytes.
+ // Notably, this handles splat and partial-splat shuffles more efficiently.
+ // However, it only makes sense if the pre-duplication shuffle simplifies
+ // things significantly. Currently, this means we need to be able to
+ // express the pre-duplication shuffle as an i16 shuffle.
+ //
+ // FIXME: We should check for other patterns which can be widened into an
+ // i16 shuffle as well.
+ auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
+ for (int i = 0; i < 16; i += 2) {
+ if (Mask[i] != Mask[i + 1])
+ return false;
+ }
+ return true;
+ };
+ auto tryToWidenViaDuplication = [&]() -> SDValue {
+ if (!canWidenViaDuplication(Mask))
+ return SDValue();
+ SmallVector<int, 4> LoInputs;
+ std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
+ [](int M) { return M >= 0 && M < 8; });
+ std::sort(LoInputs.begin(), LoInputs.end());
+ LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
+ LoInputs.end());
+ SmallVector<int, 4> HiInputs;
+ std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
+ [](int M) { return M >= 8; });
+ std::sort(HiInputs.begin(), HiInputs.end());
+ HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
+ HiInputs.end());
+
+ bool TargetLo = LoInputs.size() >= HiInputs.size();
+ ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
+ ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
+
+ int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ SmallDenseMap<int, int, 8> LaneMap;
+ for (int I : InPlaceInputs) {
+ PreDupI16Shuffle[I/2] = I/2;
+ LaneMap[I] = I;
+ }
+ int j = TargetLo ? 0 : 4, je = j + 4;
+ for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
+ // Check if j is already a shuffle of this input. This happens when
+ // there are two adjacent bytes after we move the low one.
+ if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
+ // If we haven't yet mapped the input, search for a slot into which
+ // we can map it.
+ while (j < je && PreDupI16Shuffle[j] != -1)
+ ++j;
+
+ if (j == je)
+ // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
+ return SDValue();
+
+ // Map this input with the i16 shuffle.
+ PreDupI16Shuffle[j] = MovingInputs[i] / 2;
+ }
+
+ // Update the lane map based on the mapping we ended up with.
+ LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
+ }
+ V1 = DAG.getNode(
+ ISD::BITCAST, DL, MVT::v16i8,
+ DAG.getVectorShuffle(MVT::v8i16, DL,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
+ DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
+
+ // Unpack the bytes to form the i16s that will be shuffled into place.
+ V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
+ MVT::v16i8, V1, V1);
+
+ int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ for (int i = 0; i < 16; i += 2) {
+ if (Mask[i] != -1)
+ PostDupI16Shuffle[i / 2] = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
+ assert(PostDupI16Shuffle[i / 2] < 8 && "Invalid v8 shuffle mask!");
+ }
+ return DAG.getNode(
+ ISD::BITCAST, DL, MVT::v16i8,
+ DAG.getVectorShuffle(MVT::v8i16, DL,
+ DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
+ DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
+ };
+ if (SDValue V = tryToWidenViaDuplication())
+ return V;
+ }
+
+ // Check whether an interleaving lowering is likely to be more efficient.
+ // This isn't perfect but it is a strong heuristic that tends to work well on
+ // the kinds of shuffles that show up in practice.
+ //
+ // FIXME: We need to handle other interleaving widths (i16, i32, ...).
+ if (shouldLowerAsInterleaving(Mask)) {
+ // FIXME: Figure out whether we should pack these into the low or high
+ // halves.
+
+ int EMask[16], OMask[16];
+ for (int i = 0; i < 8; ++i) {
+ EMask[i] = Mask[2*i];
+ OMask[i] = Mask[2*i + 1];
+ EMask[i + 8] = -1;
+ OMask[i + 8] = -1;
+ }
+
+ SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
+ SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
+
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, Evens, Odds);
+ }
+
+ int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+ auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
+ MutableArrayRef<int> V1HalfBlendMask,
+ MutableArrayRef<int> V2HalfBlendMask) {
+ for (int i = 0; i < 8; ++i)
+ if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
+ V1HalfBlendMask[i] = HalfMask[i];
+ HalfMask[i] = i;
+ } else if (HalfMask[i] >= 16) {
+ V2HalfBlendMask[i] = HalfMask[i] - 16;
+ HalfMask[i] = i + 8;
+ }
+ };
+ buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
+ buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
+
+ SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
+
+ auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
+ MutableArrayRef<int> HiBlendMask) {
+ SDValue V1, V2;
+ // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
+ // them out and avoid using UNPCK{L,H} to extract the elements of V as
+ // i16s.
+ if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
+ [](int M) { return M >= 0 && M % 2 == 1; }) &&
+ std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
+ [](int M) { return M >= 0 && M % 2 == 1; })) {
+ // Use a mask to drop the high bytes.
+ V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
+ V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
+ DAG.getConstant(0x00FF, MVT::v8i16));
+
+ // This will be a single vector shuffle instead of a blend so nuke V2.
+ V2 = DAG.getUNDEF(MVT::v8i16);
+
+ // Squash the masks to point directly into V1.
+ for (int &M : LoBlendMask)
+ if (M >= 0)
+ M /= 2;
+ for (int &M : HiBlendMask)
+ if (M >= 0)
+ M /= 2;
+ } else {
+ // Otherwise just unpack the low half of V into V1 and the high half into
+ // V2 so that we can blend them as i16s.
+ V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
+ V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
+ DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
+ }
+
+ SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
+ SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
+ return std::make_pair(BlendedLo, BlendedHi);
+ };
+ SDValue V1Lo, V1Hi, V2Lo, V2Hi;
+ std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
+ std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
+
+ SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
+ SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
+
+ return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
+}
+
+/// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
+///
+/// This routine breaks down the specific type of 128-bit shuffle and
+/// dispatches to the lowering routines accordingly.
+static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
+ MVT VT, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ switch (VT.SimpleTy) {
+ case MVT::v2i64:
+ return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ case MVT::v2f64:
+ return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ case MVT::v4i32:
+ return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ case MVT::v4f32:
+ return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ case MVT::v8i16:
+ return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ case MVT::v16i8:
+ return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
+
+ default:
+ llvm_unreachable("Unimplemented!");
+ }
+}
+
+/// \brief Tiny helper function to test whether adjacent masks are sequential.
+static bool areAdjacentMasksSequential(ArrayRef<int> Mask) {
+ for (int i = 0, Size = Mask.size(); i < Size; i += 2)
+ if (Mask[i] + 1 != Mask[i+1])
+ return false;
+
+ return true;
+}
+
+/// \brief Top-level lowering for x86 vector shuffles.
+///
+/// This handles decomposition, canonicalization, and lowering of all x86
+/// vector shuffles. Most of the specific lowering strategies are encapsulated
+/// above in helper routines. The canonicalization attempts to widen shuffles
+/// to involve fewer lanes of wider elements, consolidate symmetric patterns
+/// s.t. only one of the two inputs needs to be tested, etc.
+static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ ArrayRef<int> Mask = SVOp->getMask();
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ MVT VT = Op.getSimpleValueType();
+ int NumElements = VT.getVectorNumElements();
+ SDLoc dl(Op);
+
+ assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
+
+ bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
+ bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+ if (V1IsUndef && V2IsUndef)
+ return DAG.getUNDEF(VT);
+
+ // When we create a shuffle node we put the UNDEF node to second operand,
+ // but in some cases the first operand may be transformed to UNDEF.
+ // In this case we should just commute the node.
+ if (V1IsUndef)
+ return DAG.getCommutedVectorShuffle(*SVOp);
+
+ // Check for non-undef masks pointing at an undef vector and make the masks
+ // undef as well. This makes it easier to match the shuffle based solely on
+ // the mask.
+ if (V2IsUndef)
+ for (int M : Mask)
+ if (M >= NumElements) {
+ SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
+ for (int &M : NewMask)
+ if (M >= NumElements)
+ M = -1;
+ return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
+ }
+
+ // For integer vector shuffles, try to collapse them into a shuffle of fewer
+ // lanes but wider integers. We cap this to not form integers larger than i64
+ // but it might be interesting to form i128 integers to handle flipping the
+ // low and high halves of AVX 256-bit vectors.
+ if (VT.isInteger() && VT.getScalarSizeInBits() < 64 &&
+ areAdjacentMasksSequential(Mask)) {
+ SmallVector<int, 8> NewMask;
+ for (int i = 0, Size = Mask.size(); i < Size; i += 2)
+ NewMask.push_back(Mask[i] / 2);
+ MVT NewVT =
+ MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() * 2),
+ VT.getVectorNumElements() / 2);
+ V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
+ return DAG.getNode(ISD::BITCAST, dl, VT,
+ DAG.getVectorShuffle(NewVT, dl, V1, V2, NewMask));
+ }
+
+ int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
+ for (int M : SVOp->getMask())
+ if (M < 0)
+ ++NumUndefElements;
+ else if (M < NumElements)
+ ++NumV1Elements;
+ else
+ ++NumV2Elements;
+
+ // Commute the shuffle as needed such that more elements come from V1 than
+ // V2. This allows us to match the shuffle pattern strictly on how many
+ // elements come from V1 without handling the symmetric cases.
+ if (NumV2Elements > NumV1Elements)
+ return DAG.getCommutedVectorShuffle(*SVOp);
+
+ // When the number of V1 and V2 elements are the same, try to minimize the
+ // number of uses of V2 in the low half of the vector.
+ if (NumV1Elements == NumV2Elements) {
+ int LowV1Elements = 0, LowV2Elements = 0;
+ for (int M : SVOp->getMask().slice(0, NumElements / 2))
+ if (M >= NumElements)
+ ++LowV2Elements;
+ else if (M >= 0)
+ ++LowV1Elements;
+ if (LowV2Elements > LowV1Elements)
+ return DAG.getCommutedVectorShuffle(*SVOp);
+ }
+
+ // For each vector width, delegate to a specialized lowering routine.
+ if (VT.getSizeInBits() == 128)
+ return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+
+ llvm_unreachable("Unimplemented!");
+}
+
+
+//===----------------------------------------------------------------------===//
+// Legacy vector shuffle lowering
+//
+// This code is the legacy code handling vector shuffles until the above
+// replaces its functionality and performance.
+//===----------------------------------------------------------------------===//
+
+static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
+ bool hasInt256, unsigned *MaskOut = nullptr) {
MVT EltVT = VT.getVectorElementType();
- unsigned NumElems = VT.getVectorNumElements();
// There is no blend with immediate in AVX-512.
if (VT.is512BitVector())
- return SDValue();
+ return false;
- if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
- return SDValue();
- if (!Subtarget->hasInt256() && VT == MVT::v16i16)
- return SDValue();
+ if (!hasSSE41 || EltVT == MVT::i8)
+ return false;
+ if (!hasInt256 && VT == MVT::v16i16)
+ return false;
- // Check the mask for BLEND and build the value.
unsigned MaskValue = 0;
+ unsigned NumElems = VT.getVectorNumElements();
// There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
- unsigned NumLanes = (NumElems-1)/8 + 1;
+ unsigned NumLanes = (NumElems - 1) / 8 + 1;
unsigned NumElemsInLane = NumElems / NumLanes;
// Blend for v16i16 should be symetric for the both lanes.
for (unsigned i = 0; i < NumElemsInLane; ++i) {
- int SndLaneEltIdx = (NumLanes == 2) ?
- SVOp->getMaskElt(i + NumElemsInLane) : -1;
- int EltIdx = SVOp->getMaskElt(i);
+ int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
+ int EltIdx = MaskVals[i];
if ((EltIdx < 0 || EltIdx == (int)i) &&
(SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
@@ -6208,11 +8038,34 @@ LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
if (((unsigned)EltIdx == (i + NumElems)) &&
(SndLaneEltIdx < 0 ||
(unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
- MaskValue |= (1<<i);
+ MaskValue |= (1 << i);
else
- return SDValue();
+ return false;
}
+ if (MaskOut)
+ *MaskOut = MaskValue;
+ return true;
+}
+
+// Try to lower a shuffle node into a simple blend instruction.
+// This function assumes isBlendMask returns true for this
+// SuffleVectorSDNode
+static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
+ unsigned MaskValue,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ MVT VT = SVOp->getSimpleValueType(0);
+ MVT EltVT = VT.getVectorElementType();
+ assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
+ Subtarget->hasInt256() && "Trying to lower a "
+ "VECTOR_SHUFFLE to a Blend but "
+ "with the wrong mask"));
+ SDValue V1 = SVOp->getOperand(0);
+ SDValue V2 = SVOp->getOperand(1);
+ SDLoc dl(SVOp);
+ unsigned NumElems = VT.getVectorNumElements();
+
// Convert i32 vectors to floating point if it is not AVX2.
// AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
MVT BlendVT = VT;
@@ -6228,6 +8081,58 @@ LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
}
+/// In vector type \p VT, return true if the element at index \p InputIdx
+/// falls on a different 128-bit lane than \p OutputIdx.
+static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
+ unsigned OutputIdx) {
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
+}
+
+/// Generate a PSHUFB if possible. Selects elements from \p V1 according to
+/// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
+/// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
+/// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
+/// zero.
+static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
+ SelectionDAG &DAG) {
+ MVT VT = V1.getSimpleValueType();
+ assert(VT.is128BitVector() || VT.is256BitVector());
+
+ MVT EltVT = VT.getVectorElementType();
+ unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
+ unsigned NumElts = VT.getVectorNumElements();
+
+ SmallVector<SDValue, 32> PshufbMask;
+ for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
+ int InputIdx = MaskVals[OutputIdx];
+ unsigned InputByteIdx;
+
+ if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
+ InputByteIdx = 0x80;
+ else {
+ // Cross lane is not allowed.
+ if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
+ return SDValue();
+ InputByteIdx = InputIdx * EltSizeInBytes;
+ // Index is an byte offset within the 128-bit lane.
+ InputByteIdx &= 0xf;
+ }
+
+ for (unsigned j = 0; j < EltSizeInBytes; ++j) {
+ PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
+ if (InputByteIdx != 0x80)
+ ++InputByteIdx;
+ }
+ }
+
+ MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
+ if (ShufVT != VT)
+ V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
+ return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
+}
+
// v8i16 shuffles - Prefer shuffles in the following order:
// 1. [all] pshuflw, pshufhw, optional move
// 2. [ssse3] 1 x pshufb
@@ -6245,8 +8150,12 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
// Determine if more than 1 of the words in each of the low and high quadwords
// of the result come from the same quadword of one of the two inputs. Undef
// mask values count as coming from any quadword, for better codegen.
+ //
+ // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
+ // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
unsigned LoQuad[] = { 0, 0, 0, 0 };
unsigned HiQuad[] = { 0, 0, 0, 0 };
+ // Indices of quads used.
std::bitset<4> InputQuads;
for (unsigned i = 0; i < 8; ++i) {
unsigned *Quad = i < 4 ? LoQuad : HiQuad;
@@ -6283,7 +8192,7 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
// For SSSE3, If all 8 words of the result come from only 1 quadword of each
// of the two input vectors, shuffle them into one input vector so only a
- // single pshufb instruction is necessary. If There are more than 2 input
+ // single pshufb instruction is necessary. If there are more than 2 input
// quads, disable the next transformation since it does not help SSSE3.
bool V1Used = InputQuads[0] || InputQuads[1];
bool V2Used = InputQuads[2] || InputQuads[3];
@@ -6375,34 +8284,14 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
// mask, and elements that come from V1 in the V2 mask, so that the two
// results can be OR'd together.
bool TwoInputs = V1Used && V2Used;
- for (unsigned i = 0; i != 8; ++i) {
- int EltIdx = MaskVals[i] * 2;
- int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx;
- int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1;
- pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8));
- }
- V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1);
- V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v16i8, &pshufbMask[0], 16));
+ V1 = getPSHUFB(MaskVals, V1, dl, DAG);
if (!TwoInputs)
return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
// Calculate the shuffle mask for the second input, shuffle it, and
// OR it with the first shuffled input.
- pshufbMask.clear();
- for (unsigned i = 0; i != 8; ++i) {
- int EltIdx = MaskVals[i] * 2;
- int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16;
- int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15;
- pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8));
- }
- V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2);
- V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
- DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v16i8, &pshufbMask[0], 16));
+ CommuteVectorShuffleMask(MaskVals, 8);
+ V2 = getPSHUFB(MaskVals, V2, dl, DAG);
V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
}
@@ -6424,7 +8313,7 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
&MaskV[0]);
- if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) {
+ if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
NewV.getOperand(0),
@@ -6448,7 +8337,7 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
&MaskV[0]);
- if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) {
+ if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
NewV.getOperand(0),
@@ -6484,6 +8373,25 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
return NewV;
}
+/// \brief v16i16 shuffles
+///
+/// FIXME: We only support generation of a single pshufb currently. We can
+/// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
+/// well (e.g 2 x pshufb + 1 x por).
+static SDValue
+LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ SDValue V1 = SVOp->getOperand(0);
+ SDValue V2 = SVOp->getOperand(1);
+ SDLoc dl(SVOp);
+
+ if (V2.getOpcode() != ISD::UNDEF)
+ return SDValue();
+
+ SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
+ return getPSHUFB(MaskVals, V1, dl, DAG);
+}
+
// v16i8 shuffles - Prefer shuffles in the following order:
// 1. [ssse3] 1 x pshufb
// 2. [ssse3] 2 x pshufb + 1 x por
@@ -6524,7 +8432,7 @@ static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
}
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v16i8, &pshufbMask[0], 16));
+ MVT::v16i8, pshufbMask));
// As PSHUFB will zero elements with negative indices, it's safe to ignore
// the 2nd operand if it's undefined or zero.
@@ -6542,7 +8450,7 @@ static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
}
V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v16i8, &pshufbMask[0], 16));
+ MVT::v16i8, pshufbMask));
return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
}
@@ -6642,22 +8550,7 @@ SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
CommuteVectorShuffleMask(MaskVals, 32);
V1 = V2;
}
- SmallVector<SDValue, 32> pshufbMask;
- for (unsigned i = 0; i != 32; i++) {
- int EltIdx = MaskVals[i];
- if (EltIdx < 0 || EltIdx >= 32)
- EltIdx = 0x80;
- else {
- if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16))
- // Cross lane is not allowed.
- return SDValue();
- EltIdx &= 0xf;
- }
- pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
- }
- return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::v32i8, &pshufbMask[0], 32));
+ return getPSHUFB(MaskVals, V1, dl, DAG);
}
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
@@ -6675,6 +8568,9 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
unsigned Scale;
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected!");
+ case MVT::v2i64:
+ case MVT::v2f64:
+ return SDValue(SVOp, 0);
case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
@@ -6709,7 +8605,7 @@ static SDValue getVZextMovL(MVT VT, MVT OpVT,
SDValue SrcOp, SelectionDAG &DAG,
const X86Subtarget *Subtarget, SDLoc dl) {
if (VT == MVT::v2f64 || VT == MVT::v4f32) {
- LoadSDNode *LD = NULL;
+ LoadSDNode *LD = nullptr;
if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
LD = dyn_cast<LoadSDNode>(SrcOp);
if (!LD) {
@@ -6828,8 +8724,7 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
}
// Construct the output using a BUILD_VECTOR.
- Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0],
- SVOps.size());
+ Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
} else if (InputUsed[0] < 0) {
// No input vectors were used! The result is undefined.
Output[l] = DAG.getUNDEF(NVT);
@@ -7111,6 +9006,110 @@ SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
getShuffleSHUFImmediate(SVOp), DAG);
}
+static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
+ SelectionDAG &DAG) {
+ SDLoc dl(Load);
+ MVT VT = Load->getSimpleValueType(0);
+ MVT EVT = VT.getVectorElementType();
+ SDValue Addr = Load->getOperand(1);
+ SDValue NewAddr = DAG.getNode(
+ ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
+ DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
+
+ SDValue NewLoad =
+ DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
+ DAG.getMachineFunction().getMachineMemOperand(
+ Load->getMemOperand(), 0, EVT.getStoreSize()));
+ return NewLoad;
+}
+
+// It is only safe to call this function if isINSERTPSMask is true for
+// this shufflevector mask.
+static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
+ SelectionDAG &DAG) {
+ // Generate an insertps instruction when inserting an f32 from memory onto a
+ // v4f32 or when copying a member from one v4f32 to another.
+ // We also use it for transferring i32 from one register to another,
+ // since it simply copies the same bits.
+ // If we're transferring an i32 from memory to a specific element in a
+ // register, we output a generic DAG that will match the PINSRD
+ // instruction.
+ MVT VT = SVOp->getSimpleValueType(0);
+ MVT EVT = VT.getVectorElementType();
+ SDValue V1 = SVOp->getOperand(0);
+ SDValue V2 = SVOp->getOperand(1);
+ auto Mask = SVOp->getMask();
+ assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
+ "unsupported vector type for insertps/pinsrd");
+
+ auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
+ auto FromV2Predicate = [](const int &i) { return i >= 4; };
+ int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
+
+ SDValue From;
+ SDValue To;
+ unsigned DestIndex;
+ if (FromV1 == 1) {
+ From = V1;
+ To = V2;
+ DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
+ Mask.begin();
+
+ // If we have 1 element from each vector, we have to check if we're
+ // changing V1's element's place. If so, we're done. Otherwise, we
+ // should assume we're changing V2's element's place and behave
+ // accordingly.
+ int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
+ if (FromV1 == FromV2 && DestIndex == Mask[DestIndex] % 4) {
+ From = V2;
+ To = V1;
+ DestIndex =
+ std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
+ }
+ } else {
+ assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
+ "More than one element from V1 and from V2, or no elements from one "
+ "of the vectors. This case should not have returned true from "
+ "isINSERTPSMask");
+ From = V2;
+ To = V1;
+ DestIndex =
+ std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
+ }
+
+ // Get an index into the source vector in the range [0,4) (the mask is
+ // in the range [0,8) because it can address V1 and V2)
+ unsigned SrcIndex = Mask[DestIndex] % 4;
+ if (MayFoldLoad(From)) {
+ // Trivial case, when From comes from a load and is only used by the
+ // shuffle. Make it use insertps from the vector that we need from that
+ // load.
+ SDValue NewLoad =
+ NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
+ if (!NewLoad.getNode())
+ return SDValue();
+
+ if (EVT == MVT::f32) {
+ // Create this as a scalar to vector to match the instruction pattern.
+ SDValue LoadScalarToVector =
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
+ SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
+ return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
+ InsertpsMask);
+ } else { // EVT == MVT::i32
+ // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
+ // instruction, to match the PINSRD instruction, which loads an i32 to a
+ // certain vector element.
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
+ DAG.getConstant(DestIndex, MVT::i32));
+ }
+ }
+
+ // Vector-element-to-vector
+ SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
+ return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
+}
+
// Reduce a vector shuffle to zext.
static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
@@ -7199,9 +9198,8 @@ static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
}
-static SDValue
-NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
@@ -7226,31 +9224,29 @@ NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// If the shuffle can be profitably rewritten as a narrower shuffle, then
// do it!
- if (VT == MVT::v8i16 || VT == MVT::v16i8 ||
- VT == MVT::v16i16 || VT == MVT::v32i8) {
+ if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
+ VT == MVT::v32i8) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
- } else if ((VT == MVT::v4i32 ||
- (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
+ } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
// FIXME: Figure out a cleaner way to do this.
- // Try to make use of movq to zero out the top part.
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode()) {
MVT NewVT = NewOp.getSimpleValueType();
if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
NewVT, true, false))
- return getVZextMovL(VT, NewVT, NewOp.getOperand(0),
- DAG, Subtarget, dl);
+ return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
+ dl);
}
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
if (NewOp.getNode()) {
MVT NewVT = NewOp.getSimpleValueType();
if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
- return getVZextMovL(VT, NewVT, NewOp.getOperand(1),
- DAG, Subtarget, dl);
+ return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
+ dl);
}
}
}
@@ -7276,12 +9272,21 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool OptForSize = MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
+ // Check if we should use the experimental vector shuffle lowering. If so,
+ // delegate completely to that code path.
+ if (ExperimentalVectorShuffleLowering)
+ return lowerVectorShuffle(Op, Subtarget, DAG);
+
assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
if (V1IsUndef && V2IsUndef)
return DAG.getUNDEF(VT);
- assert(!V1IsUndef && "Op 1 of shuffle should not be undef");
+ // When we create a shuffle node we put the UNDEF node to second operand,
+ // but in some cases the first operand may be transformed to UNDEF.
+ // In this case we should just commute the node.
+ if (V1IsUndef)
+ return DAG.getCommutedVectorShuffle(*SVOp);
// Vector shuffle lowering takes 3 steps:
//
@@ -7393,7 +9398,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
if (ShouldXformToMOVHLPS(M, VT) ||
ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
- return CommuteVectorShuffle(SVOp, DAG);
+ return DAG.getCommutedVectorShuffle(*SVOp);
if (isShift) {
// No better options. Use a vshldq / vsrldq.
@@ -7405,8 +9410,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool Commuted = false;
// FIXME: This should also accept a bitcast of a splat? Be careful, not
// 1,1,1,1 -> v8i16 though.
- V1IsSplat = isSplatVector(V1.getNode());
- V2IsSplat = isSplatVector(V2.getNode());
+ BitVector UndefElements;
+ if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
+ if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
+ V1IsSplat = true;
+ if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
+ if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
+ V2IsSplat = true;
// Canonicalize the splat or undef, if present, to be on the RHS.
if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
@@ -7450,7 +9460,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
CommuteVectorShuffleMask(M, NumElems);
std::swap(V1, V2);
std::swap(V1IsSplat, V2IsSplat);
- Commuted = false;
if (isUNPCKLMask(M, VT, HasInt256))
return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
@@ -7461,7 +9470,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// Normalize the node to match x86 shuffle ops if needed
if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
- return CommuteVectorShuffle(SVOp, DAG);
+ return DAG.getCommutedVectorShuffle(*SVOp);
// The checks below are all present in isShuffleMaskLegal, but they are
// inlined here right now to enable us to directly emit target specific
@@ -7483,6 +9492,11 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
getShufflePSHUFLWImmediate(SVOp),
DAG);
+ unsigned MaskValue;
+ if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
+ &MaskValue))
+ return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
+
if (isSHUFPMask(M, VT))
return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
getShuffleSHUFImmediate(SVOp), DAG);
@@ -7510,14 +9524,18 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
getShuffleSHUFImmediate(SVOp), DAG);
}
+ unsigned Idx;
+ if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
+ return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
+ Idx*(NumElems/2), DAG, dl);
+
// Handle VPERM2F128/VPERM2I128 permutations
if (isVPERM2X128Mask(M, VT, HasFp256))
return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
- SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG);
- if (BlendOp.getNode())
- return BlendOp;
+ if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
+ return getINSERTPS(SVOp, dl, DAG);
unsigned Imm8;
if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
@@ -7532,14 +9550,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
}
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT,
- &permclMask[0], NumElems);
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
if (V2IsUndef)
// Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
return DAG.getNode(X86ISD::VPERMV, dl, VT,
DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
- return DAG.getNode(X86ISD::VPERMV3, dl, VT,
- DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1, V2);
+ return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
+ DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
}
//===--------------------------------------------------------------------===//
@@ -7555,6 +9572,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return NewOp;
}
+ if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
+ SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
+ if (NewOp.getNode())
+ return NewOp;
+ }
+
if (VT == MVT::v16i8) {
SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
if (NewOp.getNode())
@@ -7579,6 +9602,109 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
+// This function assumes its argument is a BUILD_VECTOR of constants or
+// undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
+// true.
+static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
+ unsigned &MaskValue) {
+ MaskValue = 0;
+ unsigned NumElems = BuildVector->getNumOperands();
+ // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
+ unsigned NumLanes = (NumElems - 1) / 8 + 1;
+ unsigned NumElemsInLane = NumElems / NumLanes;
+
+ // Blend for v16i16 should be symetric for the both lanes.
+ for (unsigned i = 0; i < NumElemsInLane; ++i) {
+ SDValue EltCond = BuildVector->getOperand(i);
+ SDValue SndLaneEltCond =
+ (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
+
+ int Lane1Cond = -1, Lane2Cond = -1;
+ if (isa<ConstantSDNode>(EltCond))
+ Lane1Cond = !isZero(EltCond);
+ if (isa<ConstantSDNode>(SndLaneEltCond))
+ Lane2Cond = !isZero(SndLaneEltCond);
+
+ if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
+ // Lane1Cond != 0, means we want the first argument.
+ // Lane1Cond == 0, means we want the second argument.
+ // The encoding of this argument is 0 for the first argument, 1
+ // for the second. Therefore, invert the condition.
+ MaskValue |= !Lane1Cond << i;
+ else if (Lane1Cond < 0)
+ MaskValue |= !Lane2Cond << i;
+ else
+ return false;
+ }
+ return true;
+}
+
+// Try to lower a vselect node into a simple blend instruction.
+static SDValue LowerVSELECTtoBlend(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDValue Cond = Op.getOperand(0);
+ SDValue LHS = Op.getOperand(1);
+ SDValue RHS = Op.getOperand(2);
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
+ MVT EltVT = VT.getVectorElementType();
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // There is no blend with immediate in AVX-512.
+ if (VT.is512BitVector())
+ return SDValue();
+
+ if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
+ return SDValue();
+ if (!Subtarget->hasInt256() && VT == MVT::v16i16)
+ return SDValue();
+
+ if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
+ return SDValue();
+
+ // Check the mask for BLEND and build the value.
+ unsigned MaskValue = 0;
+ if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
+ return SDValue();
+
+ // Convert i32 vectors to floating point if it is not AVX2.
+ // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
+ MVT BlendVT = VT;
+ if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
+ BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
+ NumElems);
+ LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
+ RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
+ }
+
+ SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
+ DAG.getConstant(MaskValue, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
+}
+
+SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
+ SDValue BlendOp = LowerVSELECTtoBlend(Op, Subtarget, DAG);
+ if (BlendOp.getNode())
+ return BlendOp;
+
+ // Some types for vselect were previously set to Expand, not Legal or
+ // Custom. Return an empty SDValue so we fall-through to Expand, after
+ // the Custom lowering phase.
+ MVT VT = Op.getSimpleValueType();
+ switch (VT.SimpleTy) {
+ default:
+ break;
+ case MVT::v8i16:
+ case MVT::v16i16:
+ return SDValue();
+ }
+
+ // We couldn't create a "Blend with immediate" node.
+ // This node should still be legal, but we'll have to emit a blendv*
+ // instruction.
+ return Op;
+}
+
static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
@@ -7641,6 +9767,39 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
+/// Extract one bit from mask vector, like v16i1 or v8i1.
+/// AVX-512 feature.
+SDValue
+X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Vec = Op.getOperand(0);
+ SDLoc dl(Vec);
+ MVT VecVT = Vec.getSimpleValueType();
+ SDValue Idx = Op.getOperand(1);
+ MVT EltVT = Op.getSimpleValueType();
+
+ assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
+
+ // variable index can't be handled in mask registers,
+ // extend vector to VR512
+ if (!isa<ConstantSDNode>(Idx)) {
+ MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
+ SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ ExtVT.getVectorElementType(), Ext, Idx);
+ return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
+ }
+
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ const TargetRegisterClass* rc = getRegClassFor(VecVT);
+ unsigned MaxSift = rc->getSize()*8 - 1;
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
+ DAG.getConstant(MaxSift - IdxVal, MVT::i8));
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
+ DAG.getConstant(MaxSift, MVT::i8));
+ return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
+ DAG.getIntPtrConstant(0));
+}
+
SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
@@ -7648,6 +9807,10 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue Vec = Op.getOperand(0);
MVT VecVT = Vec.getSimpleValueType();
SDValue Idx = Op.getOperand(1);
+
+ if (Op.getSimpleValueType() == MVT::i1)
+ return ExtractBitFromMaskVector(Op, DAG);
+
if (!isa<ConstantSDNode>(Idx)) {
if (VecVT.is512BitVector() ||
(VecVT.is256BitVector() && Subtarget->hasInt256() &&
@@ -7657,7 +9820,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
MaskEltVT.getSizeInBits());
-
+
Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
getZeroVector(MaskVT, Subtarget, DAG, dl),
@@ -7804,10 +9967,47 @@ static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
+/// Insert one bit to mask vector, like v16i1 or v8i1.
+/// AVX-512 feature.
+SDValue
+X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ SDValue Vec = Op.getOperand(0);
+ SDValue Elt = Op.getOperand(1);
+ SDValue Idx = Op.getOperand(2);
+ MVT VecVT = Vec.getSimpleValueType();
+
+ if (!isa<ConstantSDNode>(Idx)) {
+ // Non constant index. Extend source and destination,
+ // insert element and then truncate the result.
+ MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
+ MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
+ SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
+ DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
+ DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
+ return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
+ }
+
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
+ if (Vec.getOpcode() == ISD::UNDEF)
+ return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
+ DAG.getConstant(IdxVal, MVT::i8));
+ const TargetRegisterClass* rc = getRegClassFor(VecVT);
+ unsigned MaxSift = rc->getSize()*8 - 1;
+ EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
+ DAG.getConstant(MaxSift, MVT::i8));
+ EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
+ DAG.getConstant(MaxSift - IdxVal, MVT::i8));
+ return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
+}
SDValue
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
+
+ if (EltVT == MVT::i1)
+ return InsertBitToMaskVector(Op, DAG);
SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
@@ -7950,7 +10150,7 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
- CodeModel::Model M = getTargetMachine().getCodeModel();
+ CodeModel::Model M = DAG.getTarget().getCodeModel();
if (Subtarget->isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
@@ -7983,7 +10183,7 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
- CodeModel::Model M = getTargetMachine().getCodeModel();
+ CodeModel::Model M = DAG.getTarget().getCodeModel();
if (Subtarget->isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
@@ -8016,7 +10216,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
- CodeModel::Model M = getTargetMachine().getCodeModel();
+ CodeModel::Model M = DAG.getTarget().getCodeModel();
if (Subtarget->isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel)) {
@@ -8037,7 +10237,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
!Subtarget->is64Bit()) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
@@ -8059,7 +10259,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// Create the TargetBlockAddressAddress node.
unsigned char OpFlags =
Subtarget->ClassifyBlockAddressReference();
- CodeModel::Model M = getTargetMachine().getCodeModel();
+ CodeModel::Model M = DAG.getTarget().getCodeModel();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
SDLoc dl(Op);
@@ -8088,8 +10288,8 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
unsigned char OpFlags =
- Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
- CodeModel::Model M = getTargetMachine().getCodeModel();
+ Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
+ CodeModel::Model M = DAG.getTarget().getCodeModel();
SDValue Result;
if (OpFlags == X86II::MO_NO_FLAG &&
X86::isOffsetSuitableForCodeModel(Offset, M)) {
@@ -8152,10 +10352,10 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
if (InFlag) {
SDValue Ops[] = { Chain, TGA, *InFlag };
- Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops));
+ Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
} else {
SDValue Ops[] = { Chain, TGA };
- Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops));
+ Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
}
// TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
@@ -8183,7 +10383,7 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT) {
- return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT,
+ return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
X86::RAX, X86II::MO_TLSGD);
}
@@ -8200,7 +10400,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
SDValue Base;
if (is64Bit) {
- Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX,
+ Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
X86II::MO_TLSLD, /*LocalDynamic=*/true);
} else {
SDValue InFlag;
@@ -8288,7 +10488,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
const GlobalValue *GV = GA->getGlobal();
if (Subtarget->isTargetELF()) {
- TLSModel::Model model = getTargetMachine().getTLSModel(GV);
+ TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
@@ -8300,9 +10500,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
Subtarget->is64Bit());
case TLSModel::InitialExec:
case TLSModel::LocalExec:
- return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
- Subtarget->is64Bit(),
- getTargetMachine().getRelocationModel() == Reloc::PIC_);
+ return LowerToTLSExecModel(
+ GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
+ DAG.getTarget().getRelocationModel() == Reloc::PIC_);
}
llvm_unreachable("Unknown TLS model.");
}
@@ -8315,8 +10515,8 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) &&
- !Subtarget->is64Bit();
+ bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
+ !Subtarget->is64Bit();
if (PIC32)
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
@@ -8339,7 +10539,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = DAG.getEntryNode();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Args[] = { Chain, Offset };
- Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2);
+ Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
@@ -8352,7 +10552,8 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
Chain.getValue(1));
}
- if (Subtarget->isTargetWindows() || Subtarget->isTargetMingw()) {
+ if (Subtarget->isTargetKnownWindowsMSVC() ||
+ Subtarget->isTargetWindowsGNU()) {
// Just use the implicit TLS architecture
// Need to generate someting similar to:
// mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
@@ -8364,10 +10565,6 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// Windows 64bit: gs:0x58
// Windows 32bit: fs:__tls_array
- // If GV is an alias then use the aliasee for determining
- // thread-localness.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GV = GA->resolveAliasedGlobal(false);
SDLoc dl(GA);
SDValue Chain = DAG.getEntryNode();
@@ -8380,13 +10577,16 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
: Type::getInt32PtrTy(*DAG.getContext(),
257));
- SDValue TlsArray = Subtarget->is64Bit() ? DAG.getIntPtrConstant(0x58) :
- (Subtarget->isTargetMingw() ? DAG.getIntPtrConstant(0x2C) :
- DAG.getExternalSymbol("_tls_array", getPointerTy()));
+ SDValue TlsArray =
+ Subtarget->is64Bit()
+ ? DAG.getIntPtrConstant(0x58)
+ : (Subtarget->isTargetWindowsGNU()
+ ? DAG.getIntPtrConstant(0x2C)
+ : DAG.getExternalSymbol("_tls_array", getPointerTy()));
- SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
- MachinePointerInfo(Ptr),
- false, false, false, 0);
+ SDValue ThreadPointer =
+ DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
+ MachinePointerInfo(Ptr), false, false, false, 0);
// Load the _tls_index variable
SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
@@ -8422,9 +10622,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
/// and take a 2 x i32 value to shift plus a shift amount.
-SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{
+static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
unsigned VTBits = VT.getSizeInBits();
SDLoc dl(Op);
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
@@ -8463,25 +10663,25 @@ SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{
SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
if (Op.getOpcode() == ISD::SHL_PARTS) {
- Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4);
- Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4);
+ Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
+ Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
} else {
- Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4);
- Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4);
+ Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
+ Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
}
SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, array_lengthof(Ops), dl);
+ return DAG.getMergeValues(Ops, dl);
}
SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
- EVT SrcVT = Op.getOperand(0).getValueType();
+ MVT SrcVT = Op.getOperand(0).getSimpleValueType();
if (SrcVT.isVector())
return SDValue();
- assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
+ assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!");
// These are really Legal; return the operand so the caller accepts it as
@@ -8534,8 +10734,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
X86ISD::FILD, DL,
- Tys, Ops, array_lengthof(Ops),
- SrcVT, MMO);
+ Tys, Ops, SrcVT, MMO);
if (useSSE) {
Chain = Result.getValue(1);
@@ -8558,8 +10757,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
MachineMemOperand::MOStore, SSFISize, SSFISize);
Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
- Ops, array_lengthof(Ops),
- Op.getValueType(), MMO);
+ Ops, Op.getValueType(), MMO);
Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
MachinePointerInfo::getFixedStack(SSFI),
false, false, false, 0);
@@ -8685,15 +10883,14 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
- EVT SVT = N0.getValueType();
+ MVT SVT = N0.getSimpleValueType();
SDLoc dl(Op);
assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 ||
SVT == MVT::v8i8 || SVT == MVT::v8i16) &&
"Custom UINT_TO_FP is not supported!");
- EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
- SVT.getVectorNumElements());
+ MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
}
@@ -8712,8 +10909,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
- EVT SrcVT = N0.getValueType();
- EVT DstVT = Op.getValueType();
+ MVT SrcVT = N0.getSimpleValueType();
+ MVT DstVT = Op.getSimpleValueType();
if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i64(Op, DAG);
if (SrcVT == MVT::i32 && X86ScalarSSEf64)
@@ -8755,7 +10952,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
- array_lengthof(Ops), MVT::i64, MMO);
+ MVT::i64, MMO);
APInt FF(32, 0x5F800000ULL);
@@ -8848,8 +11045,7 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOLoad, MemSize, MemSize);
- Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops,
- array_lengthof(Ops), DstTy, MMO);
+ Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
Chain = Value.getValue(1);
SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
@@ -8863,8 +11059,7 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
// Build the FP_TO_INT*_IN_MEM
SDValue Ops[] = { Chain, Value, StackSlot };
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
- Ops, array_lengthof(Ops), DstTy,
- MMO);
+ Ops, DstTy, MMO);
return std::make_pair(FIST, StackSlot);
} else {
SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
@@ -8876,8 +11071,8 @@ X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
MVT::i32, eax.getValue(2));
SDValue Ops[] = { eax, edx };
SDValue pair = IsReplace
- ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, array_lengthof(Ops))
- : DAG.getMergeValues(Ops, array_lengthof(Ops), DL);
+ ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
+ : DAG.getMergeValues(Ops, DL);
return std::make_pair(pair, SDValue());
}
}
@@ -8908,7 +11103,7 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
return SDValue();
if (Subtarget->hasInt256())
- return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, In);
+ return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
SDValue Undef = DAG.getUNDEF(InVT);
@@ -8927,9 +11122,9 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
SelectionDAG &DAG) {
- MVT VT = Op->getValueType(0).getSimpleVT();
+ MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
- MVT InVT = In.getValueType().getSimpleVT();
+ MVT InVT = In.getSimpleValueType();
SDLoc DL(Op);
unsigned int NumElts = VT.getVectorNumElements();
if (NumElts != 8 && NumElts != 16)
@@ -8990,9 +11185,21 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
- MVT VT = Op.getSimpleValueType();
+ MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
+
+ if (VT == MVT::i1) {
+ assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
+ "Invalid scalar TRUNCATE operation");
+ if (InVT == MVT::i32)
+ return SDValue();
+ if (InVT.getSizeInBits() == 64)
+ In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::i32, In);
+ else if (InVT.getSizeInBits() < 32)
+ In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
+ }
assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
"Invalid TRUNCATE operation");
@@ -9008,6 +11215,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
InVT = ExtVT;
}
+
SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, getPointerTy());
@@ -9031,24 +11239,14 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(0));
}
- // On AVX, v4i64 -> v4i32 becomes a sequence that uses PSHUFD and MOVLHPS.
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(2));
-
OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
-
- // The PSHUFD mask:
- static const int ShufMask1[] = {0, 2, 0, 0};
- SDValue Undef = DAG.getUNDEF(VT);
- OpLo = DAG.getVectorShuffle(VT, DL, OpLo, Undef, ShufMask1);
- OpHi = DAG.getVectorShuffle(VT, DL, OpHi, Undef, ShufMask1);
-
- // The MOVLHPS mask:
- static const int ShufMask2[] = {0, 1, 4, 5};
- return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask2);
+ static const int ShufMask[] = {0, 2, 4, 6};
+ return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
}
if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
@@ -9069,8 +11267,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
for (unsigned j = 0; j < 8; ++j)
pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
}
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8,
- &pshufbMask[0], 32);
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
@@ -9115,8 +11312,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
unsigned NumElems = VT.getVectorNumElements();
- EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
- NumElems * 2);
+ MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
SmallVector<int, 16> MaskVec(NumElems * 2, -1);
// Prepare truncation shuffle mask
@@ -9131,20 +11327,13 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
SelectionDAG &DAG) const {
- MVT VT = Op.getSimpleValueType();
- if (VT.isVector()) {
- if (VT == MVT::v8i16)
- return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT,
- DAG.getNode(ISD::FP_TO_SINT, SDLoc(Op),
- MVT::v8i32, Op.getOperand(0)));
- return SDValue();
- }
+ assert(!Op.getSimpleValueType().isVector());
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
/*IsSigned=*/ true, /*IsReplace=*/ false);
SDValue FIST = Vals.first, StackSlot = Vals.second;
// If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
- if (FIST.getNode() == 0) return Op;
+ if (!FIST.getNode()) return Op;
if (StackSlot.getNode())
// Load the result.
@@ -9186,7 +11375,7 @@ static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
In, DAG.getUNDEF(SVT)));
}
-SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) {
LLVMContext *Context = DAG.getContext();
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
@@ -9204,7 +11393,8 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle,
APInt(32, ~(1U << 31))));
C = ConstantVector::getSplat(NumElts, C);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
@@ -9220,7 +11410,7 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
}
-SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) {
LLVMContext *Context = DAG.getContext();
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
@@ -9238,7 +11428,8 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle,
APInt(32, 1U << 31)));
C = ConstantVector::getSplat(NumElts, C);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
@@ -9255,7 +11446,8 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
}
-SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
+static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
LLVMContext *Context = DAG.getContext();
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -9291,7 +11483,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
}
Constant *C = ConstantVector::get(CV);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
false, false, false, 16);
@@ -9324,7 +11516,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0))));
}
C = ConstantVector::get(CV);
- CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
false, false, false, 16);
@@ -9362,6 +11554,7 @@ static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
SmallVector<SDValue, 8> Opnds;
DenseMap<SDValue, unsigned> VecInMap;
+ SmallVector<SDValue, 8> VecIns;
EVT VT = MVT::Other;
// Recognize a special case where a vector is casted into wide integer to
@@ -9401,6 +11594,7 @@ static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
VT != VecInMap.begin()->first.getValueType())
return SDValue();
M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
+ VecIns.push_back(ExtractedFromVec);
}
M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
}
@@ -9409,14 +11603,12 @@ static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
"Not extracted from 128-/256-bit vector.");
unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
- SmallVector<SDValue, 8> VecIns;
for (DenseMap<SDValue, unsigned>::const_iterator
I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
// Quit if not all elements are used.
if (I->second != FullMask)
return SDValue();
- VecIns.push_back(I->first);
}
EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
@@ -9438,11 +11630,33 @@ static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
VecIns.back(), VecIns.back());
}
+/// \brief return true if \c Op has a use that doesn't just read flags.
+static bool hasNonFlagsUse(SDValue Op) {
+ for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
+ ++UI) {
+ SDNode *User = *UI;
+ unsigned UOpNo = UI.getOperandNo();
+ if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
+ // Look pass truncate.
+ UOpNo = User->use_begin().getOperandNo();
+ User = *User->use_begin();
+ }
+
+ if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
+ !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
+ return true;
+ }
+ return false;
+}
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
-SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
+SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
SelectionDAG &DAG) const {
- SDLoc dl(Op);
+ if (Op.getValueType() == MVT::i1)
+ // KORTEST instruction should be selected
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+ DAG.getConstant(0, Op.getValueType()));
// CF and OF aren't always set the way we want. Determine which
// of these we need.
@@ -9456,19 +11670,38 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
break;
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
- case X86::COND_O: case X86::COND_NO:
- NeedOF = true;
+ case X86::COND_O: case X86::COND_NO: {
+ // Check if we really need to set the
+ // Overflow flag. If NoSignedWrap is present
+ // that is not actually needed.
+ switch (Op->getOpcode()) {
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::MUL:
+ case ISD::SHL: {
+ const BinaryWithFlagsSDNode *BinNode =
+ cast<BinaryWithFlagsSDNode>(Op.getNode());
+ if (BinNode->hasNoSignedWrap())
+ break;
+ }
+ default:
+ NeedOF = true;
+ break;
+ }
break;
}
-
+ }
// See if we can use the EFLAGS value from the operand instead of
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
- if (Op.getResNo() != 0 || NeedOF || NeedCF)
+ if (Op.getResNo() != 0 || NeedOF || NeedCF) {
// Emit a CMP with 0, which is the TEST pattern.
+ //if (Op.getValueType() == MVT::i1)
+ // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
+ // DAG.getConstant(0, MVT::i1));
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, Op.getValueType()));
-
+ }
unsigned Opcode = 0;
unsigned NumOperands = 0;
@@ -9519,14 +11752,14 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
if (ConstantSDNode *C =
dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
// An add of one will be selected as an INC.
- if (C->getAPIntValue() == 1) {
+ if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
}
// An add of negative one (subtract of one) will be selected as a DEC.
- if (C->getAPIntValue().isAllOnesValue()) {
+ if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
@@ -9537,31 +11770,35 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
Opcode = X86ISD::ADD;
NumOperands = 2;
break;
- case ISD::AND: {
- // If the primary and result isn't used, don't bother using X86ISD::AND,
- // because a TEST instruction will be better.
- bool NonFlagUse = false;
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- unsigned UOpNo = UI.getOperandNo();
- if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
- // Look pass truncate.
- UOpNo = User->use_begin().getOperandNo();
- User = *User->use_begin();
- }
-
- if (User->getOpcode() != ISD::BRCOND &&
- User->getOpcode() != ISD::SETCC &&
- !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) {
- NonFlagUse = true;
+ case ISD::SHL:
+ case ISD::SRL:
+ // If we have a constant logical shift that's only used in a comparison
+ // against zero turn it into an equivalent AND. This allows turning it into
+ // a TEST instruction later.
+ if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
+ isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
+ EVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
+ unsigned ShAmt = Op->getConstantOperandVal(1);
+ if (ShAmt >= BitWidth) // Avoid undefined shifts.
break;
- }
+ APInt Mask = ArithOp.getOpcode() == ISD::SRL
+ ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
+ : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
+ if (!Mask.isSignedIntN(32)) // Avoid large immediates.
+ break;
+ SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
+ DAG.getConstant(Mask, VT));
+ DAG.ReplaceAllUsesWith(Op, New);
+ Op = New;
}
+ break;
- if (!NonFlagUse)
+ case ISD::AND:
+ // If the primary and result isn't used, don't bother using X86ISD::AND,
+ // because a TEST instruction will be better.
+ if (!hasNonFlagsUse(Op))
break;
- }
// FALL THROUGH
case ISD::SUB:
case ISD::OR:
@@ -9644,7 +11881,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
for (unsigned i = 0; i != NumOperands; ++i)
Ops.push_back(Op.getOperand(i));
- SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
+ SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
DAG.ReplaceAllUsesWith(Op, New);
return SDValue(New.getNode(), 1);
}
@@ -9652,14 +11889,30 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
- SelectionDAG &DAG) const {
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1))
+ SDLoc dl, SelectionDAG &DAG) const {
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
if (C->getAPIntValue() == 0)
- return EmitTest(Op0, X86CC, DAG);
+ return EmitTest(Op0, X86CC, dl, DAG);
- SDLoc dl(Op0);
+ if (Op0.getValueType() == MVT::i1)
+ llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
+ }
+
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
+ // Do the comparison at i32 if it's smaller, besides the Atom case.
+ // This avoids subregister aliasing issues. Keep the smaller reference
+ // if we're optimizing for size, however, as that'll allow better folding
+ // of memory operations.
+ if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
+ !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::MinSize) &&
+ !Subtarget->isAtom()) {
+ unsigned ExtendOp =
+ isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+ Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
+ Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
+ }
// Use SUB instead of CMP to enable CSE between SUB and CMP.
SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
@@ -9721,7 +11974,7 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
APInt Zeros, Ones;
- DAG.ComputeMaskedBits(Op0, Zeros, Ones);
+ DAG.computeKnownBits(Op0, Zeros, Ones);
if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
return SDValue();
}
@@ -9844,38 +12097,74 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
}
-static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
+ SDLoc dl(Op);
assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 32 &&
Op.getValueType().getScalarType() == MVT::i1 &&
"Cannot set masked compare for this operation");
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
- SDLoc dl(Op);
-
+ unsigned Opc = 0;
bool Unsigned = false;
+ bool Swap = false;
unsigned SSECC;
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE: SSECC = 4; break;
- case ISD::SETEQ: SSECC = 0; break;
- case ISD::SETUGT: Unsigned = true;
- case ISD::SETGT: SSECC = 6; break; // NLE
- case ISD::SETULT: Unsigned = true;
- case ISD::SETLT: SSECC = 1; break;
- case ISD::SETUGE: Unsigned = true;
- case ISD::SETGE: SSECC = 5; break; // NLT
- case ISD::SETULE: Unsigned = true;
+ case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
+ case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
+ case ISD::SETLT: Swap = true; //fall-through
+ case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
+ case ISD::SETULT: SSECC = 1; Unsigned = true; break;
+ case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
+ case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
+ case ISD::SETULE: Unsigned = true; //fall-through
case ISD::SETLE: SSECC = 2; break;
}
- unsigned Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
+
+ if (Swap)
+ std::swap(Op0, Op1);
+ if (Opc)
+ return DAG.getNode(Opc, dl, VT, Op0, Op1);
+ Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
return DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(SSECC, MVT::i8));
+}
+
+/// \brief Try to turn a VSETULT into a VSETULE by modifying its second
+/// operand \p Op1. If non-trivial (for example because it's not constant)
+/// return an empty value.
+static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
+{
+ BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
+ if (!BV)
+ return SDValue();
+
+ MVT VT = Op1.getSimpleValueType();
+ MVT EVT = VT.getVectorElementType();
+ unsigned n = VT.getVectorNumElements();
+ SmallVector<SDValue, 8> ULTOp1;
+
+ for (unsigned i = 0; i < n; ++i) {
+ ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
+ if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
+ return SDValue();
+
+ // Avoid underflow.
+ APInt Val = Elt->getAPIntValue();
+ if (Val == 0)
+ return SDValue();
+ ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
}
static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
@@ -9931,7 +12220,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
if (Subtarget->hasAVX512()) {
if (Op1.getValueType().is512BitVector() ||
(MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
- return LowerIntVSETCC_AVX512(Op, DAG);
+ return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
// In AVX-512 architecture setcc returns mask with i1 elements,
// But there is no compare instruction for i8 and i16 elements.
@@ -9949,40 +12238,75 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// operations may be required for some comparisons.
unsigned Opc;
bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
-
+ bool Subus = false;
+
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE: Invert = true;
- case ISD::SETEQ: Opc = MaskResult? X86ISD::PCMPEQM: X86ISD::PCMPEQ; break;
+ case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
case ISD::SETLT: Swap = true;
- case ISD::SETGT: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT; break;
+ case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
case ISD::SETGE: Swap = true;
- case ISD::SETLE: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT;
+ case ISD::SETLE: Opc = X86ISD::PCMPGT;
Invert = true; break;
case ISD::SETULT: Swap = true;
- case ISD::SETUGT: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT;
+ case ISD::SETUGT: Opc = X86ISD::PCMPGT;
FlipSigns = true; break;
case ISD::SETUGE: Swap = true;
- case ISD::SETULE: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT;
+ case ISD::SETULE: Opc = X86ISD::PCMPGT;
FlipSigns = true; Invert = true; break;
}
-
+
// Special case: Use min/max operations for SETULE/SETUGE
MVT VET = VT.getVectorElementType();
bool hasMinMax =
(Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
|| (Subtarget->hasSSE2() && (VET == MVT::i8));
-
+
if (hasMinMax) {
switch (SetCCOpcode) {
default: break;
case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
}
-
+
if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
}
-
+
+ bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
+ if (!MinMax && hasSubus) {
+ // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
+ // Op0 u<= Op1:
+ // t = psubus Op0, Op1
+ // pcmpeq t, <0..0>
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETULT: {
+ // If the comparison is against a constant we can turn this into a
+ // setule. With psubus, setule does not require a swap. This is
+ // beneficial because the constant in the register is no longer
+ // destructed as the destination so it can be hoisted out of a loop.
+ // Only do this pre-AVX since vpcmp* is no longer destructive.
+ if (Subtarget->hasAVX())
+ break;
+ SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
+ if (ULEOp1.getNode()) {
+ Op1 = ULEOp1;
+ Subus = true; Invert = false; Swap = false;
+ }
+ break;
+ }
+ // Psubus is better than flip-sign because it requires no inversion.
+ case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
+ case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
+ }
+
+ if (Subus) {
+ Opc = X86ISD::SUBUS;
+ FlipSigns = false;
+ }
+ }
+
if (Swap)
std::swap(Op0, Op1);
@@ -10069,10 +12393,14 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// If the logical-not of the result is required, perform that now.
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
-
+
if (MinMax)
Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
+ if (Subus)
+ Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
+ getZeroVector(VT, Subtarget, DAG, dl));
+
return Result;
}
@@ -10082,7 +12410,8 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
- assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
+ assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
+ && "SetCC type must be 8-bit or 1-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDLoc dl(Op);
@@ -10114,23 +12443,38 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
bool Invert = (CC == ISD::SETNE) ^
cast<ConstantSDNode>(Op1)->isNullValue();
- if (!Invert) return Op0;
+ if (!Invert)
+ return Op0;
CCode = X86::GetOppositeBranchCondition(CCode);
- return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1));
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(CCode, MVT::i8),
+ Op0.getOperand(1));
+ if (VT == MVT::i1)
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
+ return SetCC;
}
}
+ if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
+ (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
+ (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+
+ ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
+ return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
+ }
bool isFP = Op1.getSimpleValueType().isFloatingPoint();
unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
if (X86CC == X86::COND_INVALID)
return SDValue();
- SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG);
+ SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
- return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), EFLAGS);
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), EFLAGS);
+ if (VT == MVT::i1)
+ return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
+ return SetCC;
}
// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
@@ -10159,11 +12503,6 @@ static bool isX86LogicalCmp(SDValue Op) {
return false;
}
-static bool isZero(SDValue V) {
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
- return C && C->isNullValue();
-}
-
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
if (V.getOpcode() != ISD::TRUNCATE)
return false;
@@ -10195,8 +12534,12 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
if (SSECC != 8) {
- unsigned Opcode = VT == MVT::f32 ? X86ISD::FSETCCss : X86ISD::FSETCCsd;
- SDValue Cmp = DAG.getNode(Opcode, DL, VT, CondOp0, CondOp1,
+ if (Subtarget->hasAVX512()) {
+ SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
+ DAG.getConstant(SSECC, MVT::i8));
+ return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
+ }
+ SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
DAG.getConstant(SSECC, MVT::i8));
SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
@@ -10254,7 +12597,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Res = DAG.getNOT(DL, Res, Res.getValueType());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
- if (N2C == 0 || !N2C->isNullValue())
+ if (!N2C || !N2C->isNullValue())
Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
return Res;
}
@@ -10343,7 +12686,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- Cond = EmitTest(Cond, X86::COND_NE, DAG);
+ Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
}
// a < b ? -1 : 0 -> RES = ~setcc_carry
@@ -10383,7 +12726,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// condition is true.
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SDValue Ops[] = { Op2, Op1, CC, Cond };
- return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops));
+ return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
}
static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) {
@@ -10433,7 +12776,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
return SDValue();
if (Subtarget->hasInt256())
- return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, In);
+ return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
// Optimize vectors in AVX mode
// Sign extend v8i16 to v8i32 and
@@ -10462,8 +12805,8 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
VT.getVectorNumElements()/2);
- OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo);
- OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi);
+ OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
+ OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
@@ -10576,11 +12919,26 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
unsigned X86Opcode;
unsigned X86Cond;
SDVTList VTs;
+ // Keep this in sync with LowerXALUO, otherwise we might create redundant
+ // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
+ // X86ISD::INC).
switch (CondOpcode) {
case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
- case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
+ case ISD::SADDO:
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
+ if (C->isOne()) {
+ X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
+ break;
+ }
+ X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
- case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
+ case ISD::SSUBO:
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
+ if (C->isOne()) {
+ X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
+ break;
+ }
+ X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
default: llvm_unreachable("unexpected overflowing operator");
@@ -10748,8 +13106,9 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
}
if (addTest) {
- CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- Cond = EmitTest(Cond, X86::COND_NE, DAG);
+ X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
+ CC = DAG.getConstant(X86Cond, MVT::i8);
+ Cond = EmitTest(Cond, X86Cond, dl, DAG);
}
Cond = ConvertCmpIfNecessary(Cond, DAG);
return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
@@ -10764,13 +13123,50 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
- assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() ||
- getTargetMachine().Options.EnableSegmentedStacks) &&
- "This should be used only on Windows targets or when segmented stacks "
- "are being used");
- assert(!Subtarget->isTargetEnvMacho() && "Not implemented");
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool SplitStack = MF.shouldSplitStack();
+ bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMacho()) ||
+ SplitStack;
SDLoc dl(Op);
+ if (!Lower) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDNode* Node = Op.getNode();
+
+ unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
+ assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
+ " not tell us which reg is the stack pointer!");
+ EVT VT = Node->getValueType(0);
+ SDValue Tmp1 = SDValue(Node, 0);
+ SDValue Tmp2 = SDValue(Node, 1);
+ SDValue Tmp3 = Node->getOperand(2);
+ SDValue Chain = Tmp1.getOperand(0);
+
+ // Chain the dynamic stack allocation so that it doesn't modify the stack
+ // pointer when other instructions are using the stack.
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
+ SDLoc(Node));
+
+ SDValue Size = Tmp2.getOperand(1);
+ SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
+ Chain = SP.getValue(1);
+ unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
+ const TargetFrameLowering &TFI = *DAG.getTarget().getFrameLowering();
+ unsigned StackAlign = TFI.getStackAlignment();
+ Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
+ if (Align > StackAlign)
+ Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
+ DAG.getConstant(-(uint64_t)Align, VT));
+ Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
+
+ Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
+ DAG.getIntPtrConstant(0, true), SDValue(),
+ SDLoc(Node));
+
+ SDValue Ops[2] = { Tmp1, Tmp2 };
+ return DAG.getMergeValues(Ops, dl);
+ }
+
// Get the inputs.
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
@@ -10780,8 +13176,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
bool Is64Bit = Subtarget->is64Bit();
EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32;
- if (getTargetMachine().Options.EnableSegmentedStacks) {
- MachineFunction &MF = DAG.getMachineFunction();
+ if (SplitStack) {
MachineRegisterInfo &MRI = MF.getRegInfo();
if (Is64Bit) {
@@ -10803,7 +13198,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
SDValue Ops1[2] = { Value, Chain };
- return DAG.getMergeValues(Ops1, 2, dl);
+ return DAG.getMergeValues(Ops1, dl);
} else {
SDValue Flag;
unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX);
@@ -10815,7 +13210,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1);
@@ -10827,7 +13222,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
}
SDValue Ops1[2] = { SP, Chain };
- return DAG.getMergeValues(Ops1, 2, dl);
+ return DAG.getMergeValues(Ops1, dl);
}
}
@@ -10888,8 +13283,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
MachinePointerInfo(SV, 16), false, false, 0);
MemOps.push_back(Store);
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOps[0], MemOps.size());
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
@@ -10925,7 +13319,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
- assert(!getTargetMachine().Options.UseSoftFloat &&
+ assert(!DAG.getTarget().Options.UseSoftFloat &&
!(DAG.getMachineFunction()
.getFunction()->getAttributes()
.hasAttribute(AttributeSet::FunctionIndex,
@@ -10943,8 +13337,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
InstOps.push_back(DAG.getConstant(Align, MVT::i32));
SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
- VTs, &InstOps[0], InstOps.size(),
- MVT::i64,
+ VTs, InstOps, MVT::i64,
MachinePointerInfo(SV),
/*Align=*/0,
/*Volatile=*/false,
@@ -10979,14 +13372,19 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
// getTargetVShiftByConstNode - Handle vector element shifts where the shift
// amount is a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, EVT VT,
+static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
SDValue SrcOp, uint64_t ShiftAmt,
SelectionDAG &DAG) {
+ MVT ElementType = VT.getVectorElementType();
+
+ // Fold this packed shift into its first operand if ShiftAmt is 0.
+ if (ShiftAmt == 0)
+ return SrcOp;
// Check for ShiftAmt >= element width
- if (ShiftAmt >= VT.getVectorElementType().getSizeInBits()) {
+ if (ShiftAmt >= ElementType.getSizeInBits()) {
if (Opc == X86ISD::VSRAI)
- ShiftAmt = VT.getVectorElementType().getSizeInBits() - 1;
+ ShiftAmt = ElementType.getSizeInBits() - 1;
else
return DAG.getConstant(0, VT);
}
@@ -10994,12 +13392,63 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, EVT VT,
assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
&& "Unknown target vector shift-by-constant node");
+ // Fold this packed vector shift into a build vector if SrcOp is a
+ // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
+ if (VT == SrcOp.getSimpleValueType() &&
+ ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
+ SmallVector<SDValue, 8> Elts;
+ unsigned NumElts = SrcOp->getNumOperands();
+ ConstantSDNode *ND;
+
+ switch(Opc) {
+ default: llvm_unreachable(nullptr);
+ case X86ISD::VSHLI:
+ for (unsigned i=0; i!=NumElts; ++i) {
+ SDValue CurrentOp = SrcOp->getOperand(i);
+ if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ Elts.push_back(CurrentOp);
+ continue;
+ }
+ ND = cast<ConstantSDNode>(CurrentOp);
+ const APInt &C = ND->getAPIntValue();
+ Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
+ }
+ break;
+ case X86ISD::VSRLI:
+ for (unsigned i=0; i!=NumElts; ++i) {
+ SDValue CurrentOp = SrcOp->getOperand(i);
+ if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ Elts.push_back(CurrentOp);
+ continue;
+ }
+ ND = cast<ConstantSDNode>(CurrentOp);
+ const APInt &C = ND->getAPIntValue();
+ Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
+ }
+ break;
+ case X86ISD::VSRAI:
+ for (unsigned i=0; i!=NumElts; ++i) {
+ SDValue CurrentOp = SrcOp->getOperand(i);
+ if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ Elts.push_back(CurrentOp);
+ continue;
+ }
+ ND = cast<ConstantSDNode>(CurrentOp);
+ const APInt &C = ND->getAPIntValue();
+ Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
+ }
+ break;
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
+ }
+
return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
}
// getTargetVShiftNode - Handle vector element shifts where the shift amount
// may or may not be a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT,
+static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
SDValue SrcOp, SDValue ShAmt,
SelectionDAG &DAG) {
assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32");
@@ -11023,11 +13472,11 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT,
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, MVT::i32);
ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32);
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4);
+ ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, ShOps);
// The return type has to be a 128-bit type with the same element
// type as the input type.
- MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ MVT EltVT = VT.getVectorElementType();
EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
@@ -11146,6 +13595,21 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_sse41_pmuldq:
+ case Intrinsic::x86_avx2_pmul_dq:
+ return DAG.getNode(X86ISD::PMULDQ, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::x86_sse2_pmulhu_w:
+ case Intrinsic::x86_avx2_pmulhu_w:
+ return DAG.getNode(ISD::MULHU, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::x86_sse2_pmulh_w:
+ case Intrinsic::x86_avx2_pmulh_w:
+ return DAG.getNode(ISD::MULHS, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
// SSE2/AVX2 sub with unsigned saturation intrinsics
case Intrinsic::x86_sse2_psubus_b:
case Intrinsic::x86_sse2_psubus_w:
@@ -11210,32 +13674,24 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmaxu_b:
case Intrinsic::x86_avx2_pmaxu_w:
case Intrinsic::x86_avx2_pmaxu_d:
- case Intrinsic::x86_avx512_pmaxu_d:
- case Intrinsic::x86_avx512_pmaxu_q:
case Intrinsic::x86_sse2_pminu_b:
case Intrinsic::x86_sse41_pminuw:
case Intrinsic::x86_sse41_pminud:
case Intrinsic::x86_avx2_pminu_b:
case Intrinsic::x86_avx2_pminu_w:
case Intrinsic::x86_avx2_pminu_d:
- case Intrinsic::x86_avx512_pminu_d:
- case Intrinsic::x86_avx512_pminu_q:
case Intrinsic::x86_sse41_pmaxsb:
case Intrinsic::x86_sse2_pmaxs_w:
case Intrinsic::x86_sse41_pmaxsd:
case Intrinsic::x86_avx2_pmaxs_b:
case Intrinsic::x86_avx2_pmaxs_w:
case Intrinsic::x86_avx2_pmaxs_d:
- case Intrinsic::x86_avx512_pmaxs_d:
- case Intrinsic::x86_avx512_pmaxs_q:
case Intrinsic::x86_sse41_pminsb:
case Intrinsic::x86_sse2_pmins_w:
case Intrinsic::x86_sse41_pminsd:
case Intrinsic::x86_avx2_pmins_b:
case Intrinsic::x86_avx2_pmins_w:
- case Intrinsic::x86_avx2_pmins_d:
- case Intrinsic::x86_avx512_pmins_d:
- case Intrinsic::x86_avx512_pmins_q: {
+ case Intrinsic::x86_avx2_pmins_d: {
unsigned Opcode;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
@@ -11245,8 +13701,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmaxu_b:
case Intrinsic::x86_avx2_pmaxu_w:
case Intrinsic::x86_avx2_pmaxu_d:
- case Intrinsic::x86_avx512_pmaxu_d:
- case Intrinsic::x86_avx512_pmaxu_q:
Opcode = X86ISD::UMAX;
break;
case Intrinsic::x86_sse2_pminu_b:
@@ -11255,8 +13709,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pminu_b:
case Intrinsic::x86_avx2_pminu_w:
case Intrinsic::x86_avx2_pminu_d:
- case Intrinsic::x86_avx512_pminu_d:
- case Intrinsic::x86_avx512_pminu_q:
Opcode = X86ISD::UMIN;
break;
case Intrinsic::x86_sse41_pmaxsb:
@@ -11265,8 +13717,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmaxs_b:
case Intrinsic::x86_avx2_pmaxs_w:
case Intrinsic::x86_avx2_pmaxs_d:
- case Intrinsic::x86_avx512_pmaxs_d:
- case Intrinsic::x86_avx512_pmaxs_q:
Opcode = X86ISD::SMAX;
break;
case Intrinsic::x86_sse41_pminsb:
@@ -11275,8 +13725,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_avx2_pmins_b:
case Intrinsic::x86_avx2_pmins_w:
case Intrinsic::x86_avx2_pmins_d:
- case Intrinsic::x86_avx512_pmins_d:
- case Intrinsic::x86_avx512_pmins_q:
Opcode = X86ISD::SMIN;
break;
}
@@ -11289,14 +13737,10 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_sse2_max_pd:
case Intrinsic::x86_avx_max_ps_256:
case Intrinsic::x86_avx_max_pd_256:
- case Intrinsic::x86_avx512_max_ps_512:
- case Intrinsic::x86_avx512_max_pd_512:
case Intrinsic::x86_sse_min_ps:
case Intrinsic::x86_sse2_min_pd:
case Intrinsic::x86_avx_min_ps_256:
- case Intrinsic::x86_avx_min_pd_256:
- case Intrinsic::x86_avx512_min_ps_512:
- case Intrinsic::x86_avx512_min_pd_512: {
+ case Intrinsic::x86_avx_min_pd_256: {
unsigned Opcode;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
@@ -11304,16 +13748,12 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_sse2_max_pd:
case Intrinsic::x86_avx_max_ps_256:
case Intrinsic::x86_avx_max_pd_256:
- case Intrinsic::x86_avx512_max_ps_512:
- case Intrinsic::x86_avx512_max_pd_512:
Opcode = X86ISD::FMAX;
break;
case Intrinsic::x86_sse_min_ps:
case Intrinsic::x86_sse2_min_pd:
case Intrinsic::x86_avx_min_ps_256:
case Intrinsic::x86_avx_min_pd_256:
- case Intrinsic::x86_avx512_min_ps_512:
- case Intrinsic::x86_avx512_min_pd_512:
Opcode = X86ISD::FMIN;
break;
}
@@ -11356,11 +13796,37 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
Op.getOperand(1), Op.getOperand(2));
}
+ case Intrinsic::x86_sse2_packssdw_128:
+ case Intrinsic::x86_sse2_packsswb_128:
+ case Intrinsic::x86_avx2_packssdw:
+ case Intrinsic::x86_avx2_packsswb:
+ return DAG.getNode(X86ISD::PACKSS, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::x86_sse2_packuswb_128:
+ case Intrinsic::x86_sse41_packusdw:
+ case Intrinsic::x86_avx2_packuswb:
+ case Intrinsic::x86_avx2_packusdw:
+ return DAG.getNode(X86ISD::PACKUS, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
case Intrinsic::x86_ssse3_pshuf_b_128:
case Intrinsic::x86_avx2_pshuf_b:
return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_sse2_pshuf_d:
+ return DAG.getNode(X86ISD::PSHUFD, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::x86_sse2_pshufl_w:
+ return DAG.getNode(X86ISD::PSHUFLW, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::x86_sse2_pshufh_w:
+ return DAG.getNode(X86ISD::PSHUFHW, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
case Intrinsic::x86_ssse3_psign_b_128:
case Intrinsic::x86_ssse3_psign_w_128:
case Intrinsic::x86_ssse3_psign_d_128:
@@ -11459,14 +13925,14 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
- case Intrinsic::x86_avx512_kortestz:
- case Intrinsic::x86_avx512_kortestc: {
- unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz)? X86::COND_E: X86::COND_B;
+ case Intrinsic::x86_avx512_kortestz_w:
+ case Intrinsic::x86_avx512_kortestc_w: {
+ unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
SDValue CC = DAG.getConstant(X86CC, MVT::i8);
SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
@@ -11560,7 +14026,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
Opcode = X86ISD::VSRAI;
break;
}
- return getTargetVShiftNode(Opcode, dl, Op.getValueType(),
+ return getTargetVShiftNode(Opcode, dl, Op.getSimpleValueType(),
Op.getOperand(1), Op.getOperand(2), DAG);
}
@@ -11621,7 +14087,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
}
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
+ SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86CC, MVT::i8),
SDValue(PCMP.getNode(), 1));
@@ -11638,7 +14104,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size());
+ return DAG.getNode(Opcode, dl, VTs, NewOps);
}
case Intrinsic::x86_fma_vfmadd_ps:
case Intrinsic::x86_fma_vfmadd_pd:
@@ -11675,7 +14141,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::x86_fma_vfmaddsub_ps_512:
case Intrinsic::x86_fma_vfmaddsub_pd_512:
case Intrinsic::x86_fma_vfmsubadd_ps_512:
- case Intrinsic::x86_fma_vfmsubadd_pd_512: {
+ case Intrinsic::x86_fma_vfmsubadd_pd_512: {
unsigned Opc;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
@@ -11736,27 +14202,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
}
static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
- SDValue Base, SDValue Index,
- SDValue ScaleOp, SDValue Chain,
- const X86Subtarget * Subtarget) {
- SDLoc dl(Op);
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
- assert(C && "Invalid scale type");
- SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
- SDValue Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
- EVT MaskVT = MVT::getVectorVT(MVT::i1,
- Index.getValueType().getVectorNumElements());
- SDValue MaskInReg = DAG.getConstant(~0, MaskVT);
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
- SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
- SDValue Segment = DAG.getRegister(0, MVT::i32);
- SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
- SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
- SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
- return DAG.getMergeValues(RetOps, array_lengthof(RetOps), dl);
-}
-
-static SDValue getMGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget * Subtarget) {
@@ -11765,22 +14210,27 @@ static SDValue getMGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
assert(C && "Invalid scale type");
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
EVT MaskVT = MVT::getVectorVT(MVT::i1,
- Index.getValueType().getVectorNumElements());
- SDValue MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
+ Index.getSimpleValueType().getVectorNumElements());
+ SDValue MaskInReg;
+ ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
+ if (MaskC)
+ MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
+ else
+ MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
if (Src.getOpcode() == ISD::UNDEF)
- Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
+ Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
- return DAG.getMergeValues(RetOps, array_lengthof(RetOps), dl);
+ return DAG.getMergeValues(RetOps, dl);
}
static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
- SDValue Src, SDValue Base, SDValue Index,
- SDValue ScaleOp, SDValue Chain) {
+ SDValue Src, SDValue Mask, SDValue Base,
+ SDValue Index, SDValue ScaleOp, SDValue Chain) {
SDLoc dl(Op);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
assert(C && "Invalid scale type");
@@ -11788,53 +14238,266 @@ static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
EVT MaskVT = MVT::getVectorVT(MVT::i1,
- Index.getValueType().getVectorNumElements());
- SDValue MaskInReg = DAG.getConstant(~0, MaskVT);
+ Index.getSimpleValueType().getVectorNumElements());
+ SDValue MaskInReg;
+ ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
+ if (MaskC)
+ MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
+ else
+ MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
return SDValue(Res, 1);
}
-static SDValue getMScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
- SDValue Src, SDValue Mask, SDValue Base,
- SDValue Index, SDValue ScaleOp, SDValue Chain) {
+static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
+ SDValue Mask, SDValue Base, SDValue Index,
+ SDValue ScaleOp, SDValue Chain) {
SDLoc dl(Op);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
assert(C && "Invalid scale type");
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
- EVT MaskVT = MVT::getVectorVT(MVT::i1,
- Index.getValueType().getVectorNumElements());
- SDValue MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
- SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
- SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
- SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
- return SDValue(Res, 1);
+ EVT MaskVT =
+ MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
+ SDValue MaskInReg;
+ ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
+ if (MaskC)
+ MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
+ else
+ MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
+ //SDVTList VTs = DAG.getVTList(MVT::Other);
+ SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
+ SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
+ return SDValue(Res, 0);
+}
+
+// getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
+// read performance monitor counters (x86_rdpmc).
+static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
+ SelectionDAG &DAG, const X86Subtarget *Subtarget,
+ SmallVectorImpl<SDValue> &Results) {
+ assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue LO, HI;
+
+ // The ECX register is used to select the index of the performance counter
+ // to read.
+ SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
+ N->getOperand(2));
+ SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
+
+ // Reads the content of a 64-bit performance counter and returns it in the
+ // registers EDX:EAX.
+ if (Subtarget->is64Bit()) {
+ LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
+ HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
+ LO.getValue(2));
+ } else {
+ LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
+ HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
+ LO.getValue(2));
+ }
+ Chain = HI.getValue(1);
+
+ if (Subtarget->is64Bit()) {
+ // The EAX register is loaded with the low-order 32 bits. The EDX register
+ // is loaded with the supported high-order bits of the counter.
+ SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
+ DAG.getConstant(32, MVT::i8));
+ Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
+ Results.push_back(Chain);
+ return;
+ }
+
+ // Use a buildpair to merge the two 32-bit values into a 64-bit one.
+ SDValue Ops[] = { LO, HI };
+ SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
+ Results.push_back(Pair);
+ Results.push_back(Chain);
+}
+
+// getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
+// read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
+// also used to custom lower READCYCLECOUNTER nodes.
+static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
+ SelectionDAG &DAG, const X86Subtarget *Subtarget,
+ SmallVectorImpl<SDValue> &Results) {
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
+ SDValue LO, HI;
+
+ // The processor's time-stamp counter (a 64-bit MSR) is stored into the
+ // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
+ // and the EAX register is loaded with the low-order 32 bits.
+ if (Subtarget->is64Bit()) {
+ LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
+ HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
+ LO.getValue(2));
+ } else {
+ LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
+ HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
+ LO.getValue(2));
+ }
+ SDValue Chain = HI.getValue(1);
+
+ if (Opcode == X86ISD::RDTSCP_DAG) {
+ assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
+
+ // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
+ // the ECX register. Add 'ecx' explicitly to the chain.
+ SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
+ HI.getValue(2));
+ // Explicitly store the content of ECX at the location passed in input
+ // to the 'rdtscp' intrinsic.
+ Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
+ MachinePointerInfo(), false, false, 0);
+ }
+
+ if (Subtarget->is64Bit()) {
+ // The EDX register is loaded with the high-order 32 bits of the MSR, and
+ // the EAX register is loaded with the low-order 32 bits.
+ SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
+ DAG.getConstant(32, MVT::i8));
+ Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
+ Results.push_back(Chain);
+ return;
+ }
+
+ // Use a buildpair to merge the two 32-bit values into a 64-bit one.
+ SDValue Ops[] = { LO, HI };
+ SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
+ Results.push_back(Pair);
+ Results.push_back(Chain);
+}
+
+static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SmallVector<SDValue, 2> Results;
+ SDLoc DL(Op);
+ getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
+ Results);
+ return DAG.getMergeValues(Results, DL);
+}
+
+enum IntrinsicType {
+ GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST
+};
+
+struct IntrinsicData {
+ IntrinsicData(IntrinsicType IType, unsigned IOpc0, unsigned IOpc1)
+ :Type(IType), Opc0(IOpc0), Opc1(IOpc1) {}
+ IntrinsicType Type;
+ unsigned Opc0;
+ unsigned Opc1;
+};
+
+std::map < unsigned, IntrinsicData> IntrMap;
+static void InitIntinsicsMap() {
+ static bool Initialized = false;
+ if (Initialized)
+ return;
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_qps_512,
+ IntrinsicData(GATHER, X86::VGATHERQPSZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_qps_512,
+ IntrinsicData(GATHER, X86::VGATHERQPSZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_qpd_512,
+ IntrinsicData(GATHER, X86::VGATHERQPDZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_dpd_512,
+ IntrinsicData(GATHER, X86::VGATHERDPDZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_dps_512,
+ IntrinsicData(GATHER, X86::VGATHERDPSZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_qpi_512,
+ IntrinsicData(GATHER, X86::VPGATHERQDZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_qpq_512,
+ IntrinsicData(GATHER, X86::VPGATHERQQZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_dpi_512,
+ IntrinsicData(GATHER, X86::VPGATHERDDZrm, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gather_dpq_512,
+ IntrinsicData(GATHER, X86::VPGATHERDQZrm, 0)));
+
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_qps_512,
+ IntrinsicData(SCATTER, X86::VSCATTERQPSZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_qpd_512,
+ IntrinsicData(SCATTER, X86::VSCATTERQPDZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_dpd_512,
+ IntrinsicData(SCATTER, X86::VSCATTERDPDZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_dps_512,
+ IntrinsicData(SCATTER, X86::VSCATTERDPSZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_qpi_512,
+ IntrinsicData(SCATTER, X86::VPSCATTERQDZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_qpq_512,
+ IntrinsicData(SCATTER, X86::VPSCATTERQQZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_dpi_512,
+ IntrinsicData(SCATTER, X86::VPSCATTERDDZmr, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatter_dpq_512,
+ IntrinsicData(SCATTER, X86::VPSCATTERDQZmr, 0)));
+
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gatherpf_qps_512,
+ IntrinsicData(PREFETCH, X86::VGATHERPF0QPSm,
+ X86::VGATHERPF1QPSm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gatherpf_qpd_512,
+ IntrinsicData(PREFETCH, X86::VGATHERPF0QPDm,
+ X86::VGATHERPF1QPDm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gatherpf_dpd_512,
+ IntrinsicData(PREFETCH, X86::VGATHERPF0DPDm,
+ X86::VGATHERPF1DPDm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_gatherpf_dps_512,
+ IntrinsicData(PREFETCH, X86::VGATHERPF0DPSm,
+ X86::VGATHERPF1DPSm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatterpf_qps_512,
+ IntrinsicData(PREFETCH, X86::VSCATTERPF0QPSm,
+ X86::VSCATTERPF1QPSm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatterpf_qpd_512,
+ IntrinsicData(PREFETCH, X86::VSCATTERPF0QPDm,
+ X86::VSCATTERPF1QPDm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatterpf_dpd_512,
+ IntrinsicData(PREFETCH, X86::VSCATTERPF0DPDm,
+ X86::VSCATTERPF1DPDm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_avx512_scatterpf_dps_512,
+ IntrinsicData(PREFETCH, X86::VSCATTERPF0DPSm,
+ X86::VSCATTERPF1DPSm)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdrand_16,
+ IntrinsicData(RDRAND, X86ISD::RDRAND, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdrand_32,
+ IntrinsicData(RDRAND, X86ISD::RDRAND, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdrand_64,
+ IntrinsicData(RDRAND, X86ISD::RDRAND, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdseed_16,
+ IntrinsicData(RDSEED, X86ISD::RDSEED, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdseed_32,
+ IntrinsicData(RDSEED, X86ISD::RDSEED, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdseed_64,
+ IntrinsicData(RDSEED, X86ISD::RDSEED, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_xtest,
+ IntrinsicData(XTEST, X86ISD::XTEST, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdtsc,
+ IntrinsicData(RDTSC, X86ISD::RDTSC_DAG, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdtscp,
+ IntrinsicData(RDTSC, X86ISD::RDTSCP_DAG, 0)));
+ IntrMap.insert(std::make_pair(Intrinsic::x86_rdpmc,
+ IntrinsicData(RDPMC, X86ISD::RDPMC_DAG, 0)));
+ Initialized = true;
}
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- SDLoc dl(Op);
+ InitIntinsicsMap();
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- switch (IntNo) {
- default: return SDValue(); // Don't custom lower most intrinsics.
+ std::map < unsigned, IntrinsicData>::const_iterator itr = IntrMap.find(IntNo);
+ if (itr == IntrMap.end())
+ return SDValue();
- // RDRAND/RDSEED intrinsics.
- case Intrinsic::x86_rdrand_16:
- case Intrinsic::x86_rdrand_32:
- case Intrinsic::x86_rdrand_64:
- case Intrinsic::x86_rdseed_16:
- case Intrinsic::x86_rdseed_32:
- case Intrinsic::x86_rdseed_64: {
- unsigned Opcode = (IntNo == Intrinsic::x86_rdseed_16 ||
- IntNo == Intrinsic::x86_rdseed_32 ||
- IntNo == Intrinsic::x86_rdseed_64) ? X86ISD::RDSEED :
- X86ISD::RDRAND;
+ SDLoc dl(Op);
+ IntrinsicData Intr = itr->second;
+ switch(Intr.Type) {
+ case RDSEED:
+ case RDRAND: {
// Emit the node with the right value type.
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
- SDValue Result = DAG.getNode(Opcode, dl, VTs, Op.getOperand(0));
+ SDValue Result = DAG.getNode(Intr.Opc0, dl, VTs, Op.getOperand(0));
// If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
// Otherwise return the value from Rand, which is always 0, casted to i32.
@@ -11844,152 +14507,61 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue(Result.getNode(), 1) };
SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
DAG.getVTList(Op->getValueType(1), MVT::Glue),
- Ops, array_lengthof(Ops));
+ Ops);
// Return { result, isValid, chain }.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
SDValue(Result.getNode(), 2));
}
- //int_gather(index, base, scale);
- case Intrinsic::x86_avx512_gather_qpd_512:
- case Intrinsic::x86_avx512_gather_qps_512:
- case Intrinsic::x86_avx512_gather_dpd_512:
- case Intrinsic::x86_avx512_gather_qpi_512:
- case Intrinsic::x86_avx512_gather_qpq_512:
- case Intrinsic::x86_avx512_gather_dpq_512:
- case Intrinsic::x86_avx512_gather_dps_512:
- case Intrinsic::x86_avx512_gather_dpi_512: {
- unsigned Opc;
- switch (IntNo) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::x86_avx512_gather_qps_512: Opc = X86::VGATHERQPSZrm; break;
- case Intrinsic::x86_avx512_gather_qpd_512: Opc = X86::VGATHERQPDZrm; break;
- case Intrinsic::x86_avx512_gather_dpd_512: Opc = X86::VGATHERDPDZrm; break;
- case Intrinsic::x86_avx512_gather_dps_512: Opc = X86::VGATHERDPSZrm; break;
- case Intrinsic::x86_avx512_gather_qpi_512: Opc = X86::VPGATHERQDZrm; break;
- case Intrinsic::x86_avx512_gather_qpq_512: Opc = X86::VPGATHERQQZrm; break;
- case Intrinsic::x86_avx512_gather_dpi_512: Opc = X86::VPGATHERDDZrm; break;
- case Intrinsic::x86_avx512_gather_dpq_512: Opc = X86::VPGATHERDQZrm; break;
- }
- SDValue Chain = Op.getOperand(0);
- SDValue Index = Op.getOperand(2);
- SDValue Base = Op.getOperand(3);
- SDValue Scale = Op.getOperand(4);
- return getGatherNode(Opc, Op, DAG, Base, Index, Scale, Chain, Subtarget);
- }
- //int_gather_mask(v1, mask, index, base, scale);
- case Intrinsic::x86_avx512_gather_qps_mask_512:
- case Intrinsic::x86_avx512_gather_qpd_mask_512:
- case Intrinsic::x86_avx512_gather_dpd_mask_512:
- case Intrinsic::x86_avx512_gather_dps_mask_512:
- case Intrinsic::x86_avx512_gather_qpi_mask_512:
- case Intrinsic::x86_avx512_gather_qpq_mask_512:
- case Intrinsic::x86_avx512_gather_dpi_mask_512:
- case Intrinsic::x86_avx512_gather_dpq_mask_512: {
- unsigned Opc;
- switch (IntNo) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::x86_avx512_gather_qps_mask_512:
- Opc = X86::VGATHERQPSZrm; break;
- case Intrinsic::x86_avx512_gather_qpd_mask_512:
- Opc = X86::VGATHERQPDZrm; break;
- case Intrinsic::x86_avx512_gather_dpd_mask_512:
- Opc = X86::VGATHERDPDZrm; break;
- case Intrinsic::x86_avx512_gather_dps_mask_512:
- Opc = X86::VGATHERDPSZrm; break;
- case Intrinsic::x86_avx512_gather_qpi_mask_512:
- Opc = X86::VPGATHERQDZrm; break;
- case Intrinsic::x86_avx512_gather_qpq_mask_512:
- Opc = X86::VPGATHERQQZrm; break;
- case Intrinsic::x86_avx512_gather_dpi_mask_512:
- Opc = X86::VPGATHERDDZrm; break;
- case Intrinsic::x86_avx512_gather_dpq_mask_512:
- Opc = X86::VPGATHERDQZrm; break;
- }
+ case GATHER: {
+ //gather(v1, mask, index, base, scale);
SDValue Chain = Op.getOperand(0);
SDValue Src = Op.getOperand(2);
- SDValue Mask = Op.getOperand(3);
+ SDValue Base = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
- SDValue Base = Op.getOperand(5);
+ SDValue Mask = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
- return getMGatherNode(Opc, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
+ return getGatherNode(Intr.Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
Subtarget);
}
- //int_scatter(base, index, v1, scale);
- case Intrinsic::x86_avx512_scatter_qpd_512:
- case Intrinsic::x86_avx512_scatter_qps_512:
- case Intrinsic::x86_avx512_scatter_dpd_512:
- case Intrinsic::x86_avx512_scatter_qpi_512:
- case Intrinsic::x86_avx512_scatter_qpq_512:
- case Intrinsic::x86_avx512_scatter_dpq_512:
- case Intrinsic::x86_avx512_scatter_dps_512:
- case Intrinsic::x86_avx512_scatter_dpi_512: {
- unsigned Opc;
- switch (IntNo) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::x86_avx512_scatter_qpd_512:
- Opc = X86::VSCATTERQPDZmr; break;
- case Intrinsic::x86_avx512_scatter_qps_512:
- Opc = X86::VSCATTERQPSZmr; break;
- case Intrinsic::x86_avx512_scatter_dpd_512:
- Opc = X86::VSCATTERDPDZmr; break;
- case Intrinsic::x86_avx512_scatter_dps_512:
- Opc = X86::VSCATTERDPSZmr; break;
- case Intrinsic::x86_avx512_scatter_qpi_512:
- Opc = X86::VPSCATTERQDZmr; break;
- case Intrinsic::x86_avx512_scatter_qpq_512:
- Opc = X86::VPSCATTERQQZmr; break;
- case Intrinsic::x86_avx512_scatter_dpq_512:
- Opc = X86::VPSCATTERDQZmr; break;
- case Intrinsic::x86_avx512_scatter_dpi_512:
- Opc = X86::VPSCATTERDDZmr; break;
- }
- SDValue Chain = Op.getOperand(0);
- SDValue Base = Op.getOperand(2);
- SDValue Index = Op.getOperand(3);
- SDValue Src = Op.getOperand(4);
- SDValue Scale = Op.getOperand(5);
- return getScatterNode(Opc, Op, DAG, Src, Base, Index, Scale, Chain);
- }
- //int_scatter_mask(base, mask, index, v1, scale);
- case Intrinsic::x86_avx512_scatter_qps_mask_512:
- case Intrinsic::x86_avx512_scatter_qpd_mask_512:
- case Intrinsic::x86_avx512_scatter_dpd_mask_512:
- case Intrinsic::x86_avx512_scatter_dps_mask_512:
- case Intrinsic::x86_avx512_scatter_qpi_mask_512:
- case Intrinsic::x86_avx512_scatter_qpq_mask_512:
- case Intrinsic::x86_avx512_scatter_dpi_mask_512:
- case Intrinsic::x86_avx512_scatter_dpq_mask_512: {
- unsigned Opc;
- switch (IntNo) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::x86_avx512_scatter_qpd_mask_512:
- Opc = X86::VSCATTERQPDZmr; break;
- case Intrinsic::x86_avx512_scatter_qps_mask_512:
- Opc = X86::VSCATTERQPSZmr; break;
- case Intrinsic::x86_avx512_scatter_dpd_mask_512:
- Opc = X86::VSCATTERDPDZmr; break;
- case Intrinsic::x86_avx512_scatter_dps_mask_512:
- Opc = X86::VSCATTERDPSZmr; break;
- case Intrinsic::x86_avx512_scatter_qpi_mask_512:
- Opc = X86::VPSCATTERQDZmr; break;
- case Intrinsic::x86_avx512_scatter_qpq_mask_512:
- Opc = X86::VPSCATTERQQZmr; break;
- case Intrinsic::x86_avx512_scatter_dpq_mask_512:
- Opc = X86::VPSCATTERDQZmr; break;
- case Intrinsic::x86_avx512_scatter_dpi_mask_512:
- Opc = X86::VPSCATTERDDZmr; break;
- }
+ case SCATTER: {
+ //scatter(base, mask, index, v1, scale);
SDValue Chain = Op.getOperand(0);
SDValue Base = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Src = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
- return getMScatterNode(Opc, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
+ return getScatterNode(Intr.Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
+ }
+ case PREFETCH: {
+ SDValue Hint = Op.getOperand(6);
+ unsigned HintVal;
+ if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
+ (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
+ llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
+ unsigned Opcode = (HintVal ? Intr.Opc1 : Intr.Opc0);
+ SDValue Chain = Op.getOperand(0);
+ SDValue Mask = Op.getOperand(2);
+ SDValue Index = Op.getOperand(3);
+ SDValue Base = Op.getOperand(4);
+ SDValue Scale = Op.getOperand(5);
+ return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
+ }
+ // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
+ case RDTSC: {
+ SmallVector<SDValue, 2> Results;
+ getReadTimeStampCounter(Op.getNode(), dl, Intr.Opc0, DAG, Subtarget, Results);
+ return DAG.getMergeValues(Results, dl);
+ }
+ // Read Performance Monitoring Counters.
+ case RDPMC: {
+ SmallVector<SDValue, 2> Results;
+ getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
+ return DAG.getMergeValues(Results, dl);
}
// XTEST intrinsics.
- case Intrinsic::x86_xtest: {
+ case XTEST: {
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDValue InTrans = DAG.getNode(X86ISD::XTEST, dl, VTs, Op.getOperand(0));
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
@@ -12000,6 +14572,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
Ret, SDValue(InTrans.getNode(), 1));
}
}
+ llvm_unreachable("Unknown Intrinsic Type");
}
SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
@@ -12007,6 +14580,9 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setReturnAddressIsTaken(true);
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDLoc dl(Op);
EVT PtrVT = getPointerTy();
@@ -12014,7 +14590,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT,
@@ -12036,7 +14612,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
@@ -12049,10 +14625,23 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
return FrameAddr;
}
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned X86TargetLowering::getRegisterByName(const char* RegName,
+ EVT VT) const {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("esp", X86::ESP)
+ .Case("rsp", X86::RSP)
+ .Default(0);
+ if (Reg)
+ return Reg;
+ report_fatal_error("Invalid register name global variable");
+}
+
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
}
@@ -12064,7 +14653,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo());
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
@@ -12111,7 +14700,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo* TRI = DAG.getTarget().getRegisterInfo();
if (Subtarget->is64Bit()) {
SDValue OutChains[6];
@@ -12168,7 +14757,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
MachinePointerInfo(TrmpAddr, 22),
false, false, 0);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
} else {
const Function *Func =
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
@@ -12248,7 +14837,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
MachinePointerInfo(TrmpAddr, 6),
false, false, 1);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
}
@@ -12277,7 +14866,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
const TargetMachine &TM = MF.getTarget();
const TargetFrameLowering &TFI = *TM.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
// Save FP Control Word to stack slot
@@ -12291,8 +14880,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
DAG.getVTList(MVT::Other),
- Ops, array_lengthof(Ops), MVT::i16,
- MMO);
+ Ops, MVT::i16, MMO);
// Load FP Control Word from stack slot
SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
@@ -12322,7 +14910,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
}
static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
@@ -12345,7 +14933,7 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(X86::COND_E, MVT::i8),
Op.getValue(1)
};
- Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops));
+ Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
// Finally xor with NumBits-1.
Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
@@ -12356,7 +14944,7 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
}
static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
@@ -12381,7 +14969,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
}
static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
Op = Op.getOperand(0);
@@ -12397,13 +14985,13 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(X86::COND_E, MVT::i8),
Op.getValue(1)
};
- return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops));
+ return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}
// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
// ones, and then concatenate the result back.
static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
assert(VT.is256BitVector() && VT.isInteger() &&
"Unsupported value type for operation");
@@ -12421,8 +15009,8 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
- MVT EltVT = VT.getVectorElementType().getSimpleVT();
- EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
@@ -12430,15 +15018,15 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
}
static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType().is256BitVector() &&
- Op.getValueType().isInteger() &&
+ assert(Op.getSimpleValueType().is256BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
}
static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType().is256BitVector() &&
- Op.getValueType().isInteger() &&
+ assert(Op.getSimpleValueType().is256BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
}
@@ -12446,7 +15034,7 @@ static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget->hasInt256())
@@ -12515,73 +15103,157 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
}
-static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetWin64() && "Unexpected target");
EVT VT = Op.getValueType();
- EVT EltTy = VT.getVectorElementType();
- unsigned NumElts = VT.getVectorNumElements();
- SDValue N0 = Op.getOperand(0);
+ assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
+ "Unexpected return type for lowering");
+
+ RTLIB::Libcall LC;
+ bool isSigned;
+ switch (Op->getOpcode()) {
+ default: llvm_unreachable("Unexpected request for libcall!");
+ case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
+ case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
+ case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
+ case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
+ case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
+ case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
+ }
+
SDLoc dl(Op);
+ SDValue InChain = DAG.getEntryNode();
- // Lower sdiv X, pow2-const.
- BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1));
- if (!C)
- return SDValue();
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
+ EVT ArgVT = Op->getOperand(i).getValueType();
+ assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
+ "Unexpected argument type for lowering");
+ SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
+ Entry.Node = StackPtr;
+ InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
+ false, false, 16);
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Entry.Ty = PointerType::get(ArgTy,0);
+ Entry.isSExt = false;
+ Entry.isZExt = false;
+ Args.push_back(Entry);
+ }
+
+ SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
+ getPointerTy());
- APInt SplatValue, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (!C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
- HasAnyUndefs) ||
- EltTy.getSizeInBits() < SplatBitSize)
- return SDValue();
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(InChain)
+ .setCallee(getLibcallCallingConv(LC),
+ static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
+ Callee, std::move(Args), 0)
+ .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
- if ((SplatValue != 0) &&
- (SplatValue.isPowerOf2() || (-SplatValue).isPowerOf2())) {
- unsigned Lg2 = SplatValue.countTrailingZeros();
- // Splat the sign bit.
- SmallVector<SDValue, 16> Sz(NumElts,
- DAG.getConstant(EltTy.getSizeInBits() - 1,
- EltTy));
- SDValue SGN = DAG.getNode(ISD::SRA, dl, VT, N0,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Sz[0],
- NumElts));
- // Add (N0 < 0) ? abs2 - 1 : 0;
- SmallVector<SDValue, 16> Amt(NumElts,
- DAG.getConstant(EltTy.getSizeInBits() - Lg2,
- EltTy));
- SDValue SRL = DAG.getNode(ISD::SRL, dl, VT, SGN,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Amt[0],
- NumElts));
- SDValue ADD = DAG.getNode(ISD::ADD, dl, VT, N0, SRL);
- SmallVector<SDValue, 16> Lg2Amt(NumElts, DAG.getConstant(Lg2, EltTy));
- SDValue SRA = DAG.getNode(ISD::SRA, dl, VT, ADD,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Lg2Amt[0],
- NumElts));
-
- // If we're dividing by a positive value, we're done. Otherwise, we must
- // negate the result.
- if (SplatValue.isNonNegative())
- return SRA;
-
- SmallVector<SDValue, 16> V(NumElts, DAG.getConstant(0, EltTy));
- SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], NumElts);
- return DAG.getNode(ISD::SUB, dl, VT, Zero, SRA);
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+ return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
+}
+
+static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
+ EVT VT = Op0.getValueType();
+ SDLoc dl(Op);
+
+ assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
+ (VT == MVT::v8i32 && Subtarget->hasInt256()));
+
+ // PMULxD operations multiply each even value (starting at 0) of LHS with
+ // the related value of RHS and produce a widen result.
+ // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
+ // => <2 x i64> <ae|cg>
+ //
+ // In other word, to have all the results, we need to perform two PMULxD:
+ // 1. one with the even values.
+ // 2. one with the odd values.
+ // To achieve #2, with need to place the odd values at an even position.
+ //
+ // Place the odd value at an even position (basically, shift all values 1
+ // step to the left):
+ const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
+ // <a|b|c|d> => <b|undef|d|undef>
+ SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
+ // <e|f|g|h> => <f|undef|h|undef>
+ SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
+
+ // Emit two multiplies, one for the lower 2 ints and one for the higher 2
+ // ints.
+ MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
+ bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
+ unsigned Opcode =
+ (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
+ // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
+ // => <2 x i64> <ae|cg>
+ SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
+ DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
+ // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
+ // => <2 x i64> <bf|dh>
+ SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
+ DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
+
+ // Shuffle it back into the right order.
+ // The internal representation is big endian.
+ // In other words, a i64 bitcasted to 2 x i32 has its high part at index 0
+ // and its low part at index 1.
+ // Moreover, we have: Mul1 = <ae|cg> ; Mul2 = <bf|dh>
+ // Vector index 0 1 ; 2 3
+ // We want <ae|bf|cg|dh>
+ // Vector index 0 2 1 3
+ // Since each element is seen as 2 x i32, we get:
+ // high_mask[i] = 2 x vector_index[i]
+ // low_mask[i] = 2 x vector_index[i] + 1
+ // where vector_index = {0, Size/2, 1, Size/2 + 1, ...,
+ // Size/2 - 1, Size/2 + Size/2 - 1}
+ // where Size is the number of element of the final vector.
+ SDValue Highs, Lows;
+ if (VT == MVT::v8i32) {
+ const int HighMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
+ Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
+ const int LowMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
+ Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
+ } else {
+ const int HighMask[] = {0, 4, 2, 6};
+ Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
+ const int LowMask[] = {1, 5, 3, 7};
+ Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
}
- return SDValue();
+
+ // If we have a signed multiply but no PMULDQ fix up the high parts of a
+ // unsigned multiply.
+ if (IsSigned && !Subtarget->hasSSE41()) {
+ SDValue ShAmt =
+ DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
+ SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
+ SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
+
+ SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
+ Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
+ }
+
+ // The low part of a MUL_LOHI is supposed to be the first value and the
+ // high part the second value.
+ return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getValueType(), Lows, Highs);
}
static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
// Optimize shl/srl/sra with constant shift amount.
- if (isSplatVector(Amt.getNode())) {
- SDValue SclrAmt = Amt->getOperand(0);
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) {
- uint64_t ShiftAmt = C->getZExtValue();
+ if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
+ if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
+ uint64_t ShiftAmt = ShiftConst->getZExtValue();
if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
(Subtarget->hasInt256() &&
@@ -12604,14 +15276,14 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
// Make a large shift.
SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
MVT::v8i16, R, ShiftAmt,
- DAG);
+ DAG);
SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
// Zero out the rightmost bits.
SmallVector<SDValue, 16> V(16,
DAG.getConstant(uint8_t(-1U << ShiftAmt),
MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SHL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16));
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
@@ -12624,7 +15296,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SRL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16));
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
if (Op.getOpcode() == ISD::SRA) {
if (ShiftAmt == 7) {
@@ -12637,7 +15309,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
MVT::i8));
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16);
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
@@ -12657,7 +15329,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(uint8_t(-1U << ShiftAmt),
MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SHL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32));
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
@@ -12670,7 +15342,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SRL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32));
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
if (Op.getOpcode() == ISD::SRA) {
if (ShiftAmt == 7) {
@@ -12683,7 +15355,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
MVT::i8));
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32);
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
@@ -12699,13 +15371,13 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
- unsigned Ratio = Amt.getValueType().getVectorNumElements() /
+ unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
VT.getVectorNumElements();
unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
uint64_t ShiftAmt = 0;
for (unsigned i = 0; i != Ratio; ++i) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
- if (C == 0)
+ if (!C)
return SDValue();
// 6 == Log2(64)
ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
@@ -12716,7 +15388,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
for (unsigned j = 0; j != Ratio; ++j) {
ConstantSDNode *C =
dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
- if (C == 0)
+ if (!C)
return SDValue();
// 6 == Log2(64)
ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
@@ -12744,7 +15416,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget* Subtarget) {
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
@@ -12798,7 +15470,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
BaseShAmt = InVec.getOperand(1);
}
}
- if (BaseShAmt.getNode() == 0)
+ if (!BaseShAmt.getNode())
BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt,
DAG.getIntPtrConstant(0));
}
@@ -12814,7 +15486,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
default:
llvm_unreachable("Unknown shift opcode!");
case ISD::SHL:
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return SDValue();
case MVT::v2i64:
case MVT::v4i32:
@@ -12827,7 +15499,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
}
case ISD::SRA:
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return SDValue();
case MVT::v4i32:
case MVT::v8i16:
@@ -12838,7 +15510,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
}
case ISD::SRL:
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return SDValue();
case MVT::v2i64:
case MVT::v4i32:
@@ -12861,7 +15533,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
- unsigned Ratio = Amt.getValueType().getVectorNumElements() /
+ unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
VT.getVectorNumElements();
std::vector<SDValue> Vals(Ratio);
for (unsigned i = 0; i != Ratio; ++i)
@@ -12888,15 +15560,14 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
SelectionDAG &DAG) {
-
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
SDValue V;
- if (!Subtarget->hasSSE2())
- return SDValue();
+ assert(VT.isVector() && "Custom lowering only for vector shifts!");
+ assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
V = LowerScalarImmediateShift(Op, DAG, Subtarget);
if (V.getNode())
@@ -12922,6 +15593,39 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
return Op;
}
+ // If possible, lower this packed shift into a vector multiply instead of
+ // expanding it into a sequence of scalar shifts.
+ // Do this only if the vector shift count is a constant build_vector.
+ if (Op.getOpcode() == ISD::SHL &&
+ (VT == MVT::v8i16 || VT == MVT::v4i32 ||
+ (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
+ ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ SmallVector<SDValue, 8> Elts;
+ EVT SVT = VT.getScalarType();
+ unsigned SVTBits = SVT.getSizeInBits();
+ const APInt &One = APInt(SVTBits, 1);
+ unsigned NumElems = VT.getVectorNumElements();
+
+ for (unsigned i=0; i !=NumElems; ++i) {
+ SDValue Op = Amt->getOperand(i);
+ if (Op->getOpcode() == ISD::UNDEF) {
+ Elts.push_back(Op);
+ continue;
+ }
+
+ ConstantSDNode *ND = cast<ConstantSDNode>(Op);
+ const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
+ uint64_t ShAmt = C.getZExtValue();
+ if (ShAmt >= SVTBits) {
+ Elts.push_back(DAG.getUNDEF(SVT));
+ continue;
+ }
+ Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
+ }
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
+ return DAG.getNode(ISD::MUL, dl, VT, R, BV);
+ }
+
// Lower SHL with variable shift amount.
if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
@@ -12931,6 +15635,80 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
return DAG.getNode(ISD::MUL, dl, VT, Op, R);
}
+
+ // If possible, lower this shift as a sequence of two shifts by
+ // constant plus a MOVSS/MOVSD instead of scalarizing it.
+ // Example:
+ // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
+ //
+ // Could be rewritten as:
+ // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
+ //
+ // The advantage is that the two shifts from the example would be
+ // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
+ // the vector shift into four scalar shifts plus four pairs of vector
+ // insert/extract.
+ if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
+ ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ unsigned TargetOpcode = X86ISD::MOVSS;
+ bool CanBeSimplified;
+ // The splat value for the first packed shift (the 'X' from the example).
+ SDValue Amt1 = Amt->getOperand(0);
+ // The splat value for the second packed shift (the 'Y' from the example).
+ SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
+ Amt->getOperand(2);
+
+ // See if it is possible to replace this node with a sequence of
+ // two shifts followed by a MOVSS/MOVSD
+ if (VT == MVT::v4i32) {
+ // Check if it is legal to use a MOVSS.
+ CanBeSimplified = Amt2 == Amt->getOperand(2) &&
+ Amt2 == Amt->getOperand(3);
+ if (!CanBeSimplified) {
+ // Otherwise, check if we can still simplify this node using a MOVSD.
+ CanBeSimplified = Amt1 == Amt->getOperand(1) &&
+ Amt->getOperand(2) == Amt->getOperand(3);
+ TargetOpcode = X86ISD::MOVSD;
+ Amt2 = Amt->getOperand(2);
+ }
+ } else {
+ // Do similar checks for the case where the machine value type
+ // is MVT::v8i16.
+ CanBeSimplified = Amt1 == Amt->getOperand(1);
+ for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
+ CanBeSimplified = Amt2 == Amt->getOperand(i);
+
+ if (!CanBeSimplified) {
+ TargetOpcode = X86ISD::MOVSD;
+ CanBeSimplified = true;
+ Amt2 = Amt->getOperand(4);
+ for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
+ CanBeSimplified = Amt1 == Amt->getOperand(i);
+ for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
+ CanBeSimplified = Amt2 == Amt->getOperand(j);
+ }
+ }
+
+ if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
+ isa<ConstantSDNode>(Amt2)) {
+ // Replace this node with two shifts followed by a MOVSS/MOVSD.
+ EVT CastVT = MVT::v4i32;
+ SDValue Splat1 =
+ DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
+ SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
+ SDValue Splat2 =
+ DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
+ SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
+ if (TargetOpcode == X86ISD::MOVSD)
+ CastVT = MVT::v2i64;
+ SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
+ SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
+ SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
+ BitCast1, DAG);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Result);
+ }
+ }
+
if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
@@ -12974,10 +15752,23 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
return R;
}
+ // It's worth extending once and using the v8i32 shifts for 16-bit types, but
+ // the extra overheads to get from v16i8 to v8i32 make the existing SSE
+ // solution better.
+ if (Subtarget->hasInt256() && VT == MVT::v8i16) {
+ MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
+ unsigned ExtOpc =
+ Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ R = DAG.getNode(ExtOpc, dl, NewVT, R);
+ Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
+ return DAG.getNode(ISD::TRUNCATE, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
+ }
+
// Decompose 256-bit shifts into smaller 128-bit shifts.
if (VT.is256BitVector()) {
unsigned NumElems = VT.getVectorNumElements();
- MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ MVT EltVT = VT.getVectorElementType();
EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
// Extract the two vectors
@@ -12995,10 +15786,8 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
for (unsigned i = NumElems/2; i != NumElems; ++i)
Amt2Csts.push_back(Amt->getOperand(i));
- Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
- &Amt1Csts[0], NumElems/2);
- Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
- &Amt2Csts[0], NumElems/2);
+ Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
+ Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
} else {
// Variable shift amount
Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
@@ -13095,7 +15884,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- EVT VT = Op.getValueType();
+ MVT VT = Op.getSimpleValueType();
if (!Subtarget->hasSSE2() || !VT.isVector())
return SDValue();
@@ -13103,7 +15892,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
ExtraVT.getScalarType().getSizeInBits();
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return SDValue();
case MVT::v8i32:
case MVT::v16i16:
@@ -13118,7 +15907,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
- MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ MVT EltVT = VT.getVectorElementType();
EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
EVT ExtraEltVT = ExtraVT.getVectorElementType();
@@ -13205,11 +15994,11 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
- EVT T = Op.getValueType();
+ MVT T = Op.getSimpleValueType();
SDLoc DL(Op);
unsigned Reg = 0;
unsigned size = 0;
- switch(T.getSimpleVT().SimpleTy) {
+ switch(T.SimpleTy) {
default: llvm_unreachable("Invalid value type!");
case MVT::i8: Reg = X86::AL; size = 1; break;
case MVT::i16: Reg = X86::AX; size = 2; break;
@@ -13220,7 +16009,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
break;
}
SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
- Op.getOperand(2), SDValue());
+ Op.getOperand(2), SDValue());
SDValue Ops[] = { cpIn.getValue(0),
Op.getOperand(1),
Op.getOperand(3),
@@ -13229,35 +16018,56 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
- Ops, array_lengthof(Ops), T, MMO);
+ Ops, T, MMO);
+
SDValue cpOut =
DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
- return cpOut;
-}
-
-static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
- assert(Subtarget->is64Bit() && "Result not type legalized?");
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue TheChain = Op.getOperand(0);
- SDLoc dl(Op);
- SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
- SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1));
- SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64,
- rax.getValue(2));
- SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx,
- DAG.getConstant(32, MVT::i8));
- SDValue Ops[] = {
- DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp),
- rdx.getValue(1)
- };
- return DAG.getMergeValues(Ops, array_lengthof(Ops), dl);
+ SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
+ MVT::i32, cpOut.getValue(2));
+ SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
+ DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
+
+ DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
+ DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
+ DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
+ return SDValue();
}
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
+
+ if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
+ assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ if (DstVT != MVT::f64)
+ // This conversion needs to be expanded.
+ return SDValue();
+
+ SDValue InVec = Op->getOperand(0);
+ SDLoc dl(Op);
+ unsigned NumElts = SrcVT.getVectorNumElements();
+ EVT SVT = SrcVT.getVectorElementType();
+
+ // Widen the vector in input in the case of MVT::v2i32.
+ // Example: from MVT::v2i32 to MVT::v4i32.
+ SmallVector<SDValue, 16> Elts;
+ for (unsigned i = 0, e = NumElts; i != e; ++i)
+ Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
+ DAG.getIntPtrConstant(i)));
+
+ // Explicitly mark the extra elements as Undef.
+ SDValue Undef = DAG.getUNDEF(SVT);
+ for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
+ Elts.push_back(Undef);
+
+ EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
+ SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
+ DAG.getIntPtrConstant(0));
+ }
+
assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
@@ -13285,8 +16095,7 @@ static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
cast<AtomicSDNode>(Node)->getMemoryVT(),
Node->getOperand(0),
Node->getOperand(1), negOp,
- cast<AtomicSDNode>(Node)->getSrcValue(),
- cast<AtomicSDNode>(Node)->getAlignment(),
+ cast<AtomicSDNode>(Node)->getMemOperand(),
cast<AtomicSDNode>(Node)->getOrdering(),
cast<AtomicSDNode>(Node)->getSynchScope());
}
@@ -13317,7 +16126,7 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
}
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getNode()->getValueType(0);
+ EVT VT = Op.getNode()->getSimpleValueType(0);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
@@ -13374,12 +16183,11 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
Type *RetTy = isF64
? (Type*)StructType::get(ArgTy, ArgTy, NULL)
: (Type*)VectorType::get(ArgTy, 4);
- TargetLowering::
- CallLoweringInfo CLI(DAG.getEntryNode(), RetTy,
- false, false, false, false, 0,
- CallingConv::C, /*isTaillCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed*/true,
- Callee, Args, DAG, dl);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
+
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
if (isF64)
@@ -13402,12 +16210,14 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
default: llvm_unreachable("Should not custom lower this!");
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
- case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG);
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
+ return LowerCMP_SWAP(Op, Subtarget, DAG);
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::VSELECT: return LowerVSELECT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
@@ -13459,6 +16269,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
case ISD::CTTZ: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
+ case ISD::UMUL_LOHI:
+ case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
case ISD::SRA:
case ISD::SRL:
case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
@@ -13476,14 +16288,13 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
case ISD::ADD: return LowerADD(Op, DAG);
case ISD::SUB: return LowerSUB(Op, DAG);
- case ISD::SDIV: return LowerSDIV(Op, DAG);
case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
}
}
static void ReplaceATOMIC_LOAD(SDNode *Node,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) {
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) {
SDLoc dl(Node);
EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
@@ -13492,37 +16303,16 @@ static void ReplaceATOMIC_LOAD(SDNode *Node,
// (The only way to get a 16-byte load is cmpxchg16b)
// FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment.
SDValue Zero = DAG.getConstant(0, VT);
- SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT,
- Node->getOperand(0),
- Node->getOperand(1), Zero, Zero,
- cast<AtomicSDNode>(Node)->getMemOperand(),
- cast<AtomicSDNode>(Node)->getOrdering(),
- cast<AtomicSDNode>(Node)->getSynchScope());
+ SDVTList VTs = DAG.getVTList(VT, MVT::i1, MVT::Other);
+ SDValue Swap =
+ DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, VT, VTs,
+ Node->getOperand(0), Node->getOperand(1), Zero, Zero,
+ cast<AtomicSDNode>(Node)->getMemOperand(),
+ cast<AtomicSDNode>(Node)->getOrdering(),
+ cast<AtomicSDNode>(Node)->getOrdering(),
+ cast<AtomicSDNode>(Node)->getSynchScope());
Results.push_back(Swap.getValue(0));
- Results.push_back(Swap.getValue(1));
-}
-
-static void
-ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG, unsigned NewOp) {
- SDLoc dl(Node);
- assert (Node->getValueType(0) == MVT::i64 &&
- "Only know how to expand i64 atomics");
-
- SDValue Chain = Node->getOperand(0);
- SDValue In1 = Node->getOperand(1);
- SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(2), DAG.getIntPtrConstant(0));
- SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Node->getOperand(2), DAG.getIntPtrConstant(1));
- SDValue Ops[] = { Chain, In1, In2L, In2H };
- SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
- SDValue Result =
- DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, array_lengthof(Ops), MVT::i64,
- cast<MemSDNode>(Node)->getMemOperand());
- SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
- Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
- Results.push_back(Result.getValue(2));
+ Results.push_back(Swap.getValue(2));
}
/// ReplaceNodeResults - Replace a node with an illegal result type
@@ -13542,6 +16332,16 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::SUBE:
// We don't want to expand or promote these.
return;
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::SREM:
+ case ISD::UREM:
+ case ISD::SDIVREM:
+ case ISD::UDIVREM: {
+ SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
+ Results.push_back(V);
+ return;
+ }
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: {
bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
@@ -13552,10 +16352,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
std::pair<SDValue,SDValue> Vals =
FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
SDValue FIST = Vals.first, StackSlot = Vals.second;
- if (FIST.getNode() != 0) {
+ if (FIST.getNode()) {
EVT VT = N->getValueType(0);
// Return a load from the stack slot.
- if (StackSlot.getNode() != 0)
+ if (StackSlot.getNode())
Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
MachinePointerInfo(),
false, false, false, 0));
@@ -13588,22 +16388,26 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(V);
return;
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default : llvm_unreachable("Do not know how to custom type "
+ "legalize this intrinsic operation!");
+ case Intrinsic::x86_rdtsc:
+ return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
+ Results);
+ case Intrinsic::x86_rdtscp:
+ return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
+ Results);
+ case Intrinsic::x86_rdpmc:
+ return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
+ }
+ }
case ISD::READCYCLECOUNTER: {
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue TheChain = N->getOperand(0);
- SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
- SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32,
- rd.getValue(1));
- SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32,
- eax.getValue(2));
- // Use a buildpair to merge the two 32-bit values into a 64-bit one.
- SDValue Ops[] = { eax, edx };
- Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops,
- array_lengthof(Ops)));
- Results.push_back(edx.getValue(1));
- return;
+ return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
+ Results);
}
- case ISD::ATOMIC_CMP_SWAP: {
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
EVT T = N->getValueType(0);
assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
bool Regs64bit = T == MVT::i128;
@@ -13637,8 +16441,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
X86ISD::LCMPXCHG8_DAG;
- SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys,
- Ops, array_lengthof(Ops), T, MMO);
+ SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
HalfT, Result.getValue(1));
@@ -13646,69 +16449,73 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Regs64bit ? X86::RDX : X86::EDX,
HalfT, cpOutL.getValue(2));
SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
- Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2));
- Results.push_back(cpOutH.getValue(1));
+
+ SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
+ MVT::i32, cpOutH.getValue(2));
+ SDValue Success =
+ DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
+ Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
+
+ Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
+ Results.push_back(Success);
+ Results.push_back(EFLAGS.getValue(1));
return;
}
+ case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_SWAP: {
- unsigned Opc;
- switch (N->getOpcode()) {
- default: llvm_unreachable("Unexpected opcode");
- case ISD::ATOMIC_LOAD_ADD:
- Opc = X86ISD::ATOMADD64_DAG;
- break;
- case ISD::ATOMIC_LOAD_AND:
- Opc = X86ISD::ATOMAND64_DAG;
- break;
- case ISD::ATOMIC_LOAD_NAND:
- Opc = X86ISD::ATOMNAND64_DAG;
- break;
- case ISD::ATOMIC_LOAD_OR:
- Opc = X86ISD::ATOMOR64_DAG;
- break;
- case ISD::ATOMIC_LOAD_SUB:
- Opc = X86ISD::ATOMSUB64_DAG;
- break;
- case ISD::ATOMIC_LOAD_XOR:
- Opc = X86ISD::ATOMXOR64_DAG;
- break;
- case ISD::ATOMIC_LOAD_MAX:
- Opc = X86ISD::ATOMMAX64_DAG;
- break;
- case ISD::ATOMIC_LOAD_MIN:
- Opc = X86ISD::ATOMMIN64_DAG;
- break;
- case ISD::ATOMIC_LOAD_UMAX:
- Opc = X86ISD::ATOMUMAX64_DAG;
- break;
- case ISD::ATOMIC_LOAD_UMIN:
- Opc = X86ISD::ATOMUMIN64_DAG;
- break;
- case ISD::ATOMIC_SWAP:
- Opc = X86ISD::ATOMSWAP64_DAG;
- break;
- }
- ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc);
+ case ISD::ATOMIC_LOAD_UMAX:
+ // Delegate to generic TypeLegalization. Situations we can really handle
+ // should have already been dealt with by X86AtomicExpand.cpp.
+ break;
+ case ISD::ATOMIC_LOAD: {
+ ReplaceATOMIC_LOAD(N, Results, DAG);
return;
}
- case ISD::ATOMIC_LOAD:
- ReplaceATOMIC_LOAD(N, Results, DAG);
+ case ISD::BITCAST: {
+ assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ EVT DstVT = N->getValueType(0);
+ EVT SrcVT = N->getOperand(0)->getValueType(0);
+
+ if (SrcVT != MVT::f64 ||
+ (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
+ return;
+
+ unsigned NumElts = DstVT.getVectorNumElements();
+ EVT SVT = DstVT.getVectorElementType();
+ EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
+ SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
+ MVT::v2f64, N->getOperand(0));
+ SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
+
+ if (ExperimentalVectorWideningLegalization) {
+ // If we are legalizing vectors by widening, we already have the desired
+ // legal vector type, just return it.
+ Results.push_back(ToVecInt);
+ return;
+ }
+
+ SmallVector<SDValue, 8> Elts;
+ for (unsigned i = 0, e = NumElts; i != e; ++i)
+ Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
+ ToVecInt, DAG.getIntPtrConstant(i)));
+
+ Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
+ }
}
}
const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return NULL;
+ default: return nullptr;
case X86ISD::BSF: return "X86ISD::BSF";
case X86ISD::BSR: return "X86ISD::BSR";
case X86ISD::SHLD: return "X86ISD::SHLD";
@@ -13727,6 +16534,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FST: return "X86ISD::FST";
case X86ISD::CALL: return "X86ISD::CALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
+ case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
+ case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
case X86ISD::BT: return "X86ISD::BT";
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::COMI: return "X86ISD::COMI";
@@ -13735,8 +16544,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::CMPMU: return "X86ISD::CMPMU";
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
- case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd";
- case X86ISD::FSETCCss: return "X86ISD::FSETCCss";
+ case X86ISD::FSETCC: return "X86ISD::FSETCC";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
@@ -13781,14 +16589,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
- case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG";
- case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG";
- case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG";
- case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG";
- case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG";
- case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
+ case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
- case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VZEXT: return "X86ISD::VZEXT";
case X86ISD::VSEXT: return "X86ISD::VSEXT";
@@ -13821,17 +16623,15 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::OR: return "X86ISD::OR";
case X86ISD::XOR: return "X86ISD::XOR";
case X86ISD::AND: return "X86ISD::AND";
- case X86ISD::BLSI: return "X86ISD::BLSI";
- case X86ISD::BLSMSK: return "X86ISD::BLSMSK";
- case X86ISD::BLSR: return "X86ISD::BLSR";
- case X86ISD::BZHI: return "X86ISD::BZHI";
case X86ISD::BEXTR: return "X86ISD::BEXTR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
case X86ISD::TESTM: return "X86ISD::TESTM";
+ case X86ISD::TESTNM: return "X86ISD::TESTNM";
case X86ISD::KORTEST: return "X86ISD::KORTEST";
- case X86ISD::KTEST: return "X86ISD::KTEST";
+ case X86ISD::PACKSS: return "X86ISD::PACKSS";
+ case X86ISD::PACKUS: return "X86ISD::PACKUS";
case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
@@ -13851,12 +16651,15 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
+ case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
case X86ISD::VPERMILP: return "X86ISD::VPERMILP";
case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
case X86ISD::VPERMV: return "X86ISD::VPERMV";
case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
+ case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
case X86ISD::VPERMI: return "X86ISD::VPERMI";
case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
+ case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
@@ -13887,7 +16690,7 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
Reloc::Model R = getTargetMachine().getRelocationModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
- if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL))
+ if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
return false;
if (AM.BaseGV) {
@@ -13932,6 +16735,24 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
return true;
}
+bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
+ unsigned Bits = Ty->getScalarSizeInBits();
+
+ // 8-bit shifts are always expensive, but versions with a scalar amount aren't
+ // particularly cheaper than those without.
+ if (Bits == 8)
+ return false;
+
+ // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
+ // variable shifts just as cheap as scalar ones.
+ if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
+ return false;
+
+ // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
+ // fully general vector.
+ return true;
+}
+
bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
@@ -14047,10 +16868,27 @@ X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
if (VT.getSizeInBits() == 64)
return false;
- // FIXME: pshufb, blends, shifts.
+ // If this is a single-input shuffle with no 128 bit lane crossings we can
+ // lower it into pshufb.
+ if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
+ (SVT.is256BitVector() && Subtarget->hasInt256())) {
+ bool isLegal = true;
+ for (unsigned I = 0, E = M.size(); I != E; ++I) {
+ if (M[I] >= (int)SVT.getVectorNumElements() ||
+ ShuffleCrosses128bitLane(SVT, I, M[I])) {
+ isLegal = false;
+ break;
+ }
+ }
+ if (isLegal)
+ return true;
+ }
+
+ // FIXME: blends, shifts.
return (SVT.getVectorNumElements() == 2 ||
ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
isMOVLMask(M, SVT) ||
+ isMOVHLPSMask(M, SVT) ||
isSHUFPMask(M, SVT) ||
isPSHUFDMask(M, SVT) ||
isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
@@ -14059,7 +16897,8 @@ X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
- isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()));
+ isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
+ isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()));
}
bool
@@ -14115,7 +16954,7 @@ static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// thisMBB:
@@ -14142,685 +16981,6 @@ static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
return sinkMBB;
}
-// Get CMPXCHG opcode for the specified data type.
-static unsigned getCmpXChgOpcode(EVT VT) {
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::i8: return X86::LCMPXCHG8;
- case MVT::i16: return X86::LCMPXCHG16;
- case MVT::i32: return X86::LCMPXCHG32;
- case MVT::i64: return X86::LCMPXCHG64;
- default:
- break;
- }
- llvm_unreachable("Invalid operand size!");
-}
-
-// Get LOAD opcode for the specified data type.
-static unsigned getLoadOpcode(EVT VT) {
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::i8: return X86::MOV8rm;
- case MVT::i16: return X86::MOV16rm;
- case MVT::i32: return X86::MOV32rm;
- case MVT::i64: return X86::MOV64rm;
- default:
- break;
- }
- llvm_unreachable("Invalid operand size!");
-}
-
-// Get opcode of the non-atomic one from the specified atomic instruction.
-static unsigned getNonAtomicOpcode(unsigned Opc) {
- switch (Opc) {
- case X86::ATOMAND8: return X86::AND8rr;
- case X86::ATOMAND16: return X86::AND16rr;
- case X86::ATOMAND32: return X86::AND32rr;
- case X86::ATOMAND64: return X86::AND64rr;
- case X86::ATOMOR8: return X86::OR8rr;
- case X86::ATOMOR16: return X86::OR16rr;
- case X86::ATOMOR32: return X86::OR32rr;
- case X86::ATOMOR64: return X86::OR64rr;
- case X86::ATOMXOR8: return X86::XOR8rr;
- case X86::ATOMXOR16: return X86::XOR16rr;
- case X86::ATOMXOR32: return X86::XOR32rr;
- case X86::ATOMXOR64: return X86::XOR64rr;
- }
- llvm_unreachable("Unhandled atomic-load-op opcode!");
-}
-
-// Get opcode of the non-atomic one from the specified atomic instruction with
-// extra opcode.
-static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc,
- unsigned &ExtraOpc) {
- switch (Opc) {
- case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr;
- case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr;
- case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr;
- case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr;
- case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr;
- case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr;
- case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr;
- case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr;
- case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr;
- case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr;
- case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr;
- case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr;
- case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr;
- case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr;
- case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr;
- case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr;
- case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr;
- case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr;
- case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr;
- case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr;
- }
- llvm_unreachable("Unhandled atomic-load-op opcode!");
-}
-
-// Get opcode of the non-atomic one from the specified atomic instruction for
-// 64-bit data type on 32-bit target.
-static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) {
- switch (Opc) {
- case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr;
- case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr;
- case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr;
- case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr;
- case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr;
- case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr;
- case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr;
- case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr;
- case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr;
- case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr;
- }
- llvm_unreachable("Unhandled atomic-load-op opcode!");
-}
-
-// Get opcode of the non-atomic one from the specified atomic instruction for
-// 64-bit data type on 32-bit target with extra opcode.
-static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc,
- unsigned &HiOpc,
- unsigned &ExtraOpc) {
- switch (Opc) {
- case X86::ATOMNAND6432:
- ExtraOpc = X86::NOT32r;
- HiOpc = X86::AND32rr;
- return X86::AND32rr;
- }
- llvm_unreachable("Unhandled atomic-load-op opcode!");
-}
-
-// Get pseudo CMOV opcode from the specified data type.
-static unsigned getPseudoCMOVOpc(EVT VT) {
- switch (VT.getSimpleVT().SimpleTy) {
- case MVT::i8: return X86::CMOV_GR8;
- case MVT::i16: return X86::CMOV_GR16;
- case MVT::i32: return X86::CMOV_GR32;
- default:
- break;
- }
- llvm_unreachable("Unknown CMOV opcode!");
-}
-
-// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions.
-// They will be translated into a spin-loop or compare-exchange loop from
-//
-// ...
-// dst = atomic-fetch-op MI.addr, MI.val
-// ...
-//
-// to
-//
-// ...
-// t1 = LOAD MI.addr
-// loop:
-// t4 = phi(t1, t3 / loop)
-// t2 = OP MI.val, t4
-// EAX = t4
-// LCMPXCHG [MI.addr], t2, [EAX is implicitly used & defined]
-// t3 = EAX
-// JNE loop
-// sink:
-// dst = t3
-// ...
-MachineBasicBlock *
-X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
-
- MachineFunction *MF = MBB->getParent();
- MachineRegisterInfo &MRI = MF->getRegInfo();
-
- const BasicBlock *BB = MBB->getBasicBlock();
- MachineFunction::iterator I = MBB;
- ++I;
-
- assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 &&
- "Unexpected number of operands");
-
- assert(MI->hasOneMemOperand() &&
- "Expected atomic-load-op to have one memoperand");
-
- // Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
-
- unsigned DstReg, SrcReg;
- unsigned MemOpndSlot;
-
- unsigned CurOp = 0;
-
- DstReg = MI->getOperand(CurOp++).getReg();
- MemOpndSlot = CurOp;
- CurOp += X86::AddrNumOperands;
- SrcReg = MI->getOperand(CurOp++).getReg();
-
- const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
- MVT::SimpleValueType VT = *RC->vt_begin();
- unsigned t1 = MRI.createVirtualRegister(RC);
- unsigned t2 = MRI.createVirtualRegister(RC);
- unsigned t3 = MRI.createVirtualRegister(RC);
- unsigned t4 = MRI.createVirtualRegister(RC);
- unsigned PhyReg = getX86SubSuperRegister(X86::EAX, VT);
-
- unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT);
- unsigned LOADOpc = getLoadOpcode(VT);
-
- // For the atomic load-arith operator, we generate
- //
- // thisMBB:
- // t1 = LOAD [MI.addr]
- // mainMBB:
- // t4 = phi(t1 / thisMBB, t3 / mainMBB)
- // t1 = OP MI.val, EAX
- // EAX = t4
- // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined]
- // t3 = EAX
- // JNE mainMBB
- // sinkMBB:
- // dst = t3
-
- MachineBasicBlock *thisMBB = MBB;
- MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
- MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
- MF->insert(I, mainMBB);
- MF->insert(I, sinkMBB);
-
- MachineInstrBuilder MIB;
-
- // Transfer the remainder of BB and its successor edges to sinkMBB.
- sinkMBB->splice(sinkMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
- sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
-
- // thisMBB:
- MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1);
- for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
- MachineOperand NewMO = MI->getOperand(MemOpndSlot + i);
- if (NewMO.isReg())
- NewMO.setIsKill(false);
- MIB.addOperand(NewMO);
- }
- for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) {
- unsigned flags = (*MMOI)->getFlags();
- flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad;
- MachineMemOperand *MMO =
- MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags,
- (*MMOI)->getSize(),
- (*MMOI)->getBaseAlignment(),
- (*MMOI)->getTBAAInfo(),
- (*MMOI)->getRanges());
- MIB.addMemOperand(MMO);
- }
-
- thisMBB->addSuccessor(mainMBB);
-
- // mainMBB:
- MachineBasicBlock *origMainMBB = mainMBB;
-
- // Add a PHI.
- MachineInstr *Phi = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4)
- .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB);
-
- unsigned Opc = MI->getOpcode();
- switch (Opc) {
- default:
- llvm_unreachable("Unhandled atomic-load-op opcode!");
- case X86::ATOMAND8:
- case X86::ATOMAND16:
- case X86::ATOMAND32:
- case X86::ATOMAND64:
- case X86::ATOMOR8:
- case X86::ATOMOR16:
- case X86::ATOMOR32:
- case X86::ATOMOR64:
- case X86::ATOMXOR8:
- case X86::ATOMXOR16:
- case X86::ATOMXOR32:
- case X86::ATOMXOR64: {
- unsigned ARITHOpc = getNonAtomicOpcode(Opc);
- BuildMI(mainMBB, DL, TII->get(ARITHOpc), t2).addReg(SrcReg)
- .addReg(t4);
- break;
- }
- case X86::ATOMNAND8:
- case X86::ATOMNAND16:
- case X86::ATOMNAND32:
- case X86::ATOMNAND64: {
- unsigned Tmp = MRI.createVirtualRegister(RC);
- unsigned NOTOpc;
- unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc);
- BuildMI(mainMBB, DL, TII->get(ANDOpc), Tmp).addReg(SrcReg)
- .addReg(t4);
- BuildMI(mainMBB, DL, TII->get(NOTOpc), t2).addReg(Tmp);
- break;
- }
- case X86::ATOMMAX8:
- case X86::ATOMMAX16:
- case X86::ATOMMAX32:
- case X86::ATOMMAX64:
- case X86::ATOMMIN8:
- case X86::ATOMMIN16:
- case X86::ATOMMIN32:
- case X86::ATOMMIN64:
- case X86::ATOMUMAX8:
- case X86::ATOMUMAX16:
- case X86::ATOMUMAX32:
- case X86::ATOMUMAX64:
- case X86::ATOMUMIN8:
- case X86::ATOMUMIN16:
- case X86::ATOMUMIN32:
- case X86::ATOMUMIN64: {
- unsigned CMPOpc;
- unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc);
-
- BuildMI(mainMBB, DL, TII->get(CMPOpc))
- .addReg(SrcReg)
- .addReg(t4);
-
- if (Subtarget->hasCMov()) {
- if (VT != MVT::i8) {
- // Native support
- BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2)
- .addReg(SrcReg)
- .addReg(t4);
- } else {
- // Promote i8 to i32 to use CMOV32
- const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo();
- const TargetRegisterClass *RC32 =
- TRI->getSubClassWithSubReg(getRegClassFor(MVT::i32), X86::sub_8bit);
- unsigned SrcReg32 = MRI.createVirtualRegister(RC32);
- unsigned AccReg32 = MRI.createVirtualRegister(RC32);
- unsigned Tmp = MRI.createVirtualRegister(RC32);
-
- unsigned Undef = MRI.createVirtualRegister(RC32);
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef);
-
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32)
- .addReg(Undef)
- .addReg(SrcReg)
- .addImm(X86::sub_8bit);
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32)
- .addReg(Undef)
- .addReg(t4)
- .addImm(X86::sub_8bit);
-
- BuildMI(mainMBB, DL, TII->get(CMOVOpc), Tmp)
- .addReg(SrcReg32)
- .addReg(AccReg32);
-
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t2)
- .addReg(Tmp, 0, X86::sub_8bit);
- }
- } else {
- // Use pseudo select and lower them.
- assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
- "Invalid atomic-load-op transformation!");
- unsigned SelOpc = getPseudoCMOVOpc(VT);
- X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc);
- assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!");
- MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t2)
- .addReg(SrcReg).addReg(t4)
- .addImm(CC);
- mainMBB = EmitLoweredSelect(MIB, mainMBB);
- // Replace the original PHI node as mainMBB is changed after CMOV
- // lowering.
- BuildMI(*origMainMBB, Phi, DL, TII->get(X86::PHI), t4)
- .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB);
- Phi->eraseFromParent();
- }
- break;
- }
- }
-
- // Copy PhyReg back from virtual register.
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), PhyReg)
- .addReg(t4);
-
- MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc));
- for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
- MachineOperand NewMO = MI->getOperand(MemOpndSlot + i);
- if (NewMO.isReg())
- NewMO.setIsKill(false);
- MIB.addOperand(NewMO);
- }
- MIB.addReg(t2);
- MIB.setMemRefs(MMOBegin, MMOEnd);
-
- // Copy PhyReg back to virtual register.
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3)
- .addReg(PhyReg);
-
- BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB);
-
- mainMBB->addSuccessor(origMainMBB);
- mainMBB->addSuccessor(sinkMBB);
-
- // sinkMBB:
- BuildMI(*sinkMBB, sinkMBB->begin(), DL,
- TII->get(TargetOpcode::COPY), DstReg)
- .addReg(t3);
-
- MI->eraseFromParent();
- return sinkMBB;
-}
-
-// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic
-// instructions. They will be translated into a spin-loop or compare-exchange
-// loop from
-//
-// ...
-// dst = atomic-fetch-op MI.addr, MI.val
-// ...
-//
-// to
-//
-// ...
-// t1L = LOAD [MI.addr + 0]
-// t1H = LOAD [MI.addr + 4]
-// loop:
-// t4L = phi(t1L, t3L / loop)
-// t4H = phi(t1H, t3H / loop)
-// t2L = OP MI.val.lo, t4L
-// t2H = OP MI.val.hi, t4H
-// EAX = t4L
-// EDX = t4H
-// EBX = t2L
-// ECX = t2H
-// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined]
-// t3L = EAX
-// t3H = EDX
-// JNE loop
-// sink:
-// dstL = t3L
-// dstH = t3H
-// ...
-MachineBasicBlock *
-X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
-
- MachineFunction *MF = MBB->getParent();
- MachineRegisterInfo &MRI = MF->getRegInfo();
-
- const BasicBlock *BB = MBB->getBasicBlock();
- MachineFunction::iterator I = MBB;
- ++I;
-
- assert(MI->getNumOperands() <= X86::AddrNumOperands + 7 &&
- "Unexpected number of operands");
-
- assert(MI->hasOneMemOperand() &&
- "Expected atomic-load-op32 to have one memoperand");
-
- // Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
-
- unsigned DstLoReg, DstHiReg;
- unsigned SrcLoReg, SrcHiReg;
- unsigned MemOpndSlot;
-
- unsigned CurOp = 0;
-
- DstLoReg = MI->getOperand(CurOp++).getReg();
- DstHiReg = MI->getOperand(CurOp++).getReg();
- MemOpndSlot = CurOp;
- CurOp += X86::AddrNumOperands;
- SrcLoReg = MI->getOperand(CurOp++).getReg();
- SrcHiReg = MI->getOperand(CurOp++).getReg();
-
- const TargetRegisterClass *RC = &X86::GR32RegClass;
- const TargetRegisterClass *RC8 = &X86::GR8RegClass;
-
- unsigned t1L = MRI.createVirtualRegister(RC);
- unsigned t1H = MRI.createVirtualRegister(RC);
- unsigned t2L = MRI.createVirtualRegister(RC);
- unsigned t2H = MRI.createVirtualRegister(RC);
- unsigned t3L = MRI.createVirtualRegister(RC);
- unsigned t3H = MRI.createVirtualRegister(RC);
- unsigned t4L = MRI.createVirtualRegister(RC);
- unsigned t4H = MRI.createVirtualRegister(RC);
-
- unsigned LCMPXCHGOpc = X86::LCMPXCHG8B;
- unsigned LOADOpc = X86::MOV32rm;
-
- // For the atomic load-arith operator, we generate
- //
- // thisMBB:
- // t1L = LOAD [MI.addr + 0]
- // t1H = LOAD [MI.addr + 4]
- // mainMBB:
- // t4L = phi(t1L / thisMBB, t3L / mainMBB)
- // t4H = phi(t1H / thisMBB, t3H / mainMBB)
- // t2L = OP MI.val.lo, t4L
- // t2H = OP MI.val.hi, t4H
- // EBX = t2L
- // ECX = t2H
- // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined]
- // t3L = EAX
- // t3H = EDX
- // JNE loop
- // sinkMBB:
- // dstL = t3L
- // dstH = t3H
-
- MachineBasicBlock *thisMBB = MBB;
- MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
- MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
- MF->insert(I, mainMBB);
- MF->insert(I, sinkMBB);
-
- MachineInstrBuilder MIB;
-
- // Transfer the remainder of BB and its successor edges to sinkMBB.
- sinkMBB->splice(sinkMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
- sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
-
- // thisMBB:
- // Lo
- MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1L);
- for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
- MachineOperand NewMO = MI->getOperand(MemOpndSlot + i);
- if (NewMO.isReg())
- NewMO.setIsKill(false);
- MIB.addOperand(NewMO);
- }
- for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) {
- unsigned flags = (*MMOI)->getFlags();
- flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad;
- MachineMemOperand *MMO =
- MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags,
- (*MMOI)->getSize(),
- (*MMOI)->getBaseAlignment(),
- (*MMOI)->getTBAAInfo(),
- (*MMOI)->getRanges());
- MIB.addMemOperand(MMO);
- };
- MachineInstr *LowMI = MIB;
-
- // Hi
- MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1H);
- for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
- if (i == X86::AddrDisp) {
- MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32)
- } else {
- MachineOperand NewMO = MI->getOperand(MemOpndSlot + i);
- if (NewMO.isReg())
- NewMO.setIsKill(false);
- MIB.addOperand(NewMO);
- }
- }
- MIB.setMemRefs(LowMI->memoperands_begin(), LowMI->memoperands_end());
-
- thisMBB->addSuccessor(mainMBB);
-
- // mainMBB:
- MachineBasicBlock *origMainMBB = mainMBB;
-
- // Add PHIs.
- MachineInstr *PhiL = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4L)
- .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB);
- MachineInstr *PhiH = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4H)
- .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB);
-
- unsigned Opc = MI->getOpcode();
- switch (Opc) {
- default:
- llvm_unreachable("Unhandled atomic-load-op6432 opcode!");
- case X86::ATOMAND6432:
- case X86::ATOMOR6432:
- case X86::ATOMXOR6432:
- case X86::ATOMADD6432:
- case X86::ATOMSUB6432: {
- unsigned HiOpc;
- unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc);
- BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(t4L)
- .addReg(SrcLoReg);
- BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(t4H)
- .addReg(SrcHiReg);
- break;
- }
- case X86::ATOMNAND6432: {
- unsigned HiOpc, NOTOpc;
- unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc);
- unsigned TmpL = MRI.createVirtualRegister(RC);
- unsigned TmpH = MRI.createVirtualRegister(RC);
- BuildMI(mainMBB, DL, TII->get(LoOpc), TmpL).addReg(SrcLoReg)
- .addReg(t4L);
- BuildMI(mainMBB, DL, TII->get(HiOpc), TmpH).addReg(SrcHiReg)
- .addReg(t4H);
- BuildMI(mainMBB, DL, TII->get(NOTOpc), t2L).addReg(TmpL);
- BuildMI(mainMBB, DL, TII->get(NOTOpc), t2H).addReg(TmpH);
- break;
- }
- case X86::ATOMMAX6432:
- case X86::ATOMMIN6432:
- case X86::ATOMUMAX6432:
- case X86::ATOMUMIN6432: {
- unsigned HiOpc;
- unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc);
- unsigned cL = MRI.createVirtualRegister(RC8);
- unsigned cH = MRI.createVirtualRegister(RC8);
- unsigned cL32 = MRI.createVirtualRegister(RC);
- unsigned cH32 = MRI.createVirtualRegister(RC);
- unsigned cc = MRI.createVirtualRegister(RC);
- // cl := cmp src_lo, lo
- BuildMI(mainMBB, DL, TII->get(X86::CMP32rr))
- .addReg(SrcLoReg).addReg(t4L);
- BuildMI(mainMBB, DL, TII->get(LoOpc), cL);
- BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL);
- // ch := cmp src_hi, hi
- BuildMI(mainMBB, DL, TII->get(X86::CMP32rr))
- .addReg(SrcHiReg).addReg(t4H);
- BuildMI(mainMBB, DL, TII->get(HiOpc), cH);
- BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH);
- // cc := if (src_hi == hi) ? cl : ch;
- if (Subtarget->hasCMov()) {
- BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc)
- .addReg(cH32).addReg(cL32);
- } else {
- MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc)
- .addReg(cH32).addReg(cL32)
- .addImm(X86::COND_E);
- mainMBB = EmitLoweredSelect(MIB, mainMBB);
- }
- BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc);
- if (Subtarget->hasCMov()) {
- BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2L)
- .addReg(SrcLoReg).addReg(t4L);
- BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2H)
- .addReg(SrcHiReg).addReg(t4H);
- } else {
- MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2L)
- .addReg(SrcLoReg).addReg(t4L)
- .addImm(X86::COND_NE);
- mainMBB = EmitLoweredSelect(MIB, mainMBB);
- // As the lowered CMOV won't clobber EFLAGS, we could reuse it for the
- // 2nd CMOV lowering.
- mainMBB->addLiveIn(X86::EFLAGS);
- MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2H)
- .addReg(SrcHiReg).addReg(t4H)
- .addImm(X86::COND_NE);
- mainMBB = EmitLoweredSelect(MIB, mainMBB);
- // Replace the original PHI node as mainMBB is changed after CMOV
- // lowering.
- BuildMI(*origMainMBB, PhiL, DL, TII->get(X86::PHI), t4L)
- .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB);
- BuildMI(*origMainMBB, PhiH, DL, TII->get(X86::PHI), t4H)
- .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB);
- PhiL->eraseFromParent();
- PhiH->eraseFromParent();
- }
- break;
- }
- case X86::ATOMSWAP6432: {
- unsigned HiOpc;
- unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc);
- BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg);
- BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg);
- break;
- }
- }
-
- // Copy EDX:EAX back from HiReg:LoReg
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(t4L);
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(t4H);
- // Copy ECX:EBX from t1H:t1L
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t2L);
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t2H);
-
- MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc));
- for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
- MachineOperand NewMO = MI->getOperand(MemOpndSlot + i);
- if (NewMO.isReg())
- NewMO.setIsKill(false);
- MIB.addOperand(NewMO);
- }
- MIB.setMemRefs(MMOBegin, MMOEnd);
-
- // Copy EDX:EAX back to t3H:t3L
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3L).addReg(X86::EAX);
- BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3H).addReg(X86::EDX);
-
- BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB);
-
- mainMBB->addSuccessor(origMainMBB);
- mainMBB->addSuccessor(sinkMBB);
-
- // sinkMBB:
- BuildMI(*sinkMBB, sinkMBB->begin(), DL,
- TII->get(TargetOpcode::COPY), DstLoReg)
- .addReg(t3L);
- BuildMI(*sinkMBB, sinkMBB->begin(), DL,
- TII->get(TargetOpcode::COPY), DstHiReg)
- .addReg(t3H);
-
- MI->eraseFromParent();
- return sinkMBB;
-}
-
// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
// or XMM0_V32I8 in AVX all of this code can be replaced with that
// in the .td file.
@@ -14954,7 +17114,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(
MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
// Machine Information
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII = MBB->getParent()->getTarget().getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
@@ -14995,7 +17155,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(
OffsetDestReg = 0; // unused
OverflowDestReg = DestReg;
- offsetMBB = NULL;
+ offsetMBB = nullptr;
overflowMBB = thisMBB;
endMBB = thisMBB;
} else {
@@ -15031,8 +17191,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(
// Transfer the remainder of MBB and its successor edges to endMBB.
endMBB->splice(endMBB->begin(), thisMBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- thisMBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Make offsetMBB and overflowMBB successors of thisMBB
@@ -15202,8 +17361,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
// Transfer the remainder of MBB and its successor edges to EndMBB.
EndMBB->splice(EndMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
// The original block will now fall through to the XMM save block.
@@ -15212,7 +17370,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions.
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII = MBB->getParent()->getTarget().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned CountReg = MI->getOperand(0).getReg();
@@ -15265,7 +17423,7 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock* BB,
const TargetRegisterInfo* TRI) {
// Scan forward through BB for a use/def of EFLAGS.
- MachineBasicBlock::iterator miI(llvm::next(SelectItr));
+ MachineBasicBlock::iterator miI(std::next(SelectItr));
for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
const MachineInstr& mi = *miI;
if (mi.readsRegister(X86::EFLAGS))
@@ -15295,7 +17453,7 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII = BB->getParent()->getTarget().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
@@ -15321,7 +17479,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
- const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo* TRI = BB->getParent()->getTarget().getRegisterInfo();
if (!MI->killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
copy0MBB->addLiveIn(X86::EFLAGS);
@@ -15330,8 +17488,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
// Add the true and fallthrough blocks as its successors.
@@ -15363,12 +17520,12 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
bool Is64Bit) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = BB->getParent();
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
- assert(getTargetMachine().Options.EnableSegmentedStacks);
+ assert(MF->shouldSplitStack());
unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
unsigned TlsOffset = Is64Bit ? 0x70 : 0x30;
@@ -15411,8 +17568,8 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
MF->insert(MBBIter, mallocMBB);
MF->insert(MBBIter, continueMBB);
- continueMBB->splice(continueMBB->begin(), BB, llvm::next
- (MachineBasicBlock::iterator(MI)), BB->end());
+ continueMBB->splice(continueMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
continueMBB->transferSuccessorsAndUpdatePHIs(BB);
// Add code to the main basic block to check if the stack limit has been hit,
@@ -15435,7 +17592,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
// Calls into a routine in libgcc to allocate more space from the heap.
const uint32_t *RegMask =
- getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C);
+ MF->getTarget().getRegisterInfo()->getCallPreservedMask(CallingConv::C);
if (Is64Bit) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
@@ -15483,11 +17640,11 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *
X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = BB->getParent()->getTarget().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- assert(!Subtarget->isTargetEnvMacho());
+ assert(!Subtarget->isTargetMacho());
// The lowering is pretty easy: we're just emitting the call to _alloca. The
// non-trivial part is impdef of ESP.
@@ -15518,7 +17675,7 @@ X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
}
} else {
const char *StackProbeSymbol =
- Subtarget->isTargetWindows() ? "_chkstk" : "_alloca";
+ Subtarget->isTargetKnownWindowsMSVC() ? "_chkstk" : "_alloca";
BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
.addExternalSymbol(StackProbeSymbol)
@@ -15540,10 +17697,10 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
+ MachineFunction *F = BB->getParent();
const X86InstrInfo *TII
- = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo());
+ = static_cast<const X86InstrInfo*>(F->getTarget().getInstrInfo());
DebugLoc DL = MI->getDebugLoc();
- MachineFunction *F = BB->getParent();
assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
assert(MI->getOperand(3).isGlobal() && "This should be a global");
@@ -15552,7 +17709,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask.
const uint32_t *RegMask =
- getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C);
+ F->getTarget().getRegisterInfo()->getCallPreservedMask(CallingConv::C);
if (Subtarget->is64Bit()) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV64rm), X86::RDI)
@@ -15564,7 +17721,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
- } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
+ } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV32rm), X86::EAX)
.addReg(0)
@@ -15596,9 +17753,8 @@ MachineBasicBlock *
X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
MachineFunction *MF = MBB->getParent();
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
@@ -15653,15 +17809,15 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// thisMBB:
unsigned PtrStoreOpc = 0;
unsigned LabelReg = 0;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
- Reloc::Model RM = getTargetMachine().getRelocationModel();
- bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) &&
+ Reloc::Model RM = MF->getTarget().getRelocationModel();
+ bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
(RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
// Prepare IP either in reg or imm.
@@ -15705,7 +17861,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
.addMBB(restoreMBB);
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(MF->getTarget().getRegisterInfo());
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
@@ -15734,9 +17890,8 @@ MachineBasicBlock *
X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
MachineFunction *MF = MBB->getParent();
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
@@ -15752,7 +17907,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
const X86RegisterInfo *RegInfo =
- static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo());
+ static_cast<const X86RegisterInfo*>(MF->getTarget().getRegisterInfo());
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
@@ -15794,6 +17949,89 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
return MBB;
}
+// Replace 213-type (isel default) FMA3 instructions with 231-type for
+// accumulator loops. Writing back to the accumulator allows the coalescer
+// to remove extra copies in the loop.
+MachineBasicBlock *
+X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ MachineOperand &AddendOp = MI->getOperand(3);
+
+ // Bail out early if the addend isn't a register - we can't switch these.
+ if (!AddendOp.isReg())
+ return MBB;
+
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // Check whether the addend is defined by a PHI:
+ assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
+ MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
+ if (!AddendDef.isPHI())
+ return MBB;
+
+ // Look for the following pattern:
+ // loop:
+ // %addend = phi [%entry, 0], [%loop, %result]
+ // ...
+ // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
+
+ // Replace with:
+ // loop:
+ // %addend = phi [%entry, 0], [%loop, %result]
+ // ...
+ // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
+
+ for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
+ assert(AddendDef.getOperand(i).isReg());
+ MachineOperand PHISrcOp = AddendDef.getOperand(i);
+ MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
+ if (&PHISrcInst == MI) {
+ // Found a matching instruction.
+ unsigned NewFMAOpc = 0;
+ switch (MI->getOpcode()) {
+ case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
+ case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
+ case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
+ case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
+ case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
+ case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
+ case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
+ case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
+ case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
+ case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
+ case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
+ case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
+ case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
+ case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
+ case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
+ case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
+ case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
+ case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
+ case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
+ case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
+ case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
+ case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
+ case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
+ case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
+ default: llvm_unreachable("Unrecognized FMA variant.");
+ }
+
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(3))
+ .addOperand(MI->getOperand(2))
+ .addOperand(MI->getOperand(1));
+ MBB->insert(MachineBasicBlock::iterator(MI), MIB);
+ MI->eraseFromParent();
+ }
+ }
+
+ return MBB;
+}
+
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -15844,12 +18082,12 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT16_IN_MEM:
case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineFunction *F = BB->getParent();
+ const TargetInstrInfo *TII = F->getTarget().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
- MachineFunction *F = BB->getParent();
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FNSTCW16m)), CWFrameIdx);
@@ -15929,7 +18167,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRM128MEM:
assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRM(MI, BB, getTargetMachine().getInstrInfo());
+ return EmitPCMPSTRM(MI, BB, BB->getParent()->getTarget().getInstrInfo());
// String/text processing lowering.
case X86::PCMPISTRIREG:
@@ -15942,71 +18180,15 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRIMEM:
assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRI(MI, BB, getTargetMachine().getInstrInfo());
+ return EmitPCMPSTRI(MI, BB, BB->getParent()->getTarget().getInstrInfo());
// Thread synchronization.
case X86::MONITOR:
- return EmitMonitor(MI, BB, getTargetMachine().getInstrInfo(), Subtarget);
+ return EmitMonitor(MI, BB, BB->getParent()->getTarget().getInstrInfo(), Subtarget);
// xbegin
case X86::XBEGIN:
- return EmitXBegin(MI, BB, getTargetMachine().getInstrInfo());
-
- // Atomic Lowering.
- case X86::ATOMAND8:
- case X86::ATOMAND16:
- case X86::ATOMAND32:
- case X86::ATOMAND64:
- // Fall through
- case X86::ATOMOR8:
- case X86::ATOMOR16:
- case X86::ATOMOR32:
- case X86::ATOMOR64:
- // Fall through
- case X86::ATOMXOR16:
- case X86::ATOMXOR8:
- case X86::ATOMXOR32:
- case X86::ATOMXOR64:
- // Fall through
- case X86::ATOMNAND8:
- case X86::ATOMNAND16:
- case X86::ATOMNAND32:
- case X86::ATOMNAND64:
- // Fall through
- case X86::ATOMMAX8:
- case X86::ATOMMAX16:
- case X86::ATOMMAX32:
- case X86::ATOMMAX64:
- // Fall through
- case X86::ATOMMIN8:
- case X86::ATOMMIN16:
- case X86::ATOMMIN32:
- case X86::ATOMMIN64:
- // Fall through
- case X86::ATOMUMAX8:
- case X86::ATOMUMAX16:
- case X86::ATOMUMAX32:
- case X86::ATOMUMAX64:
- // Fall through
- case X86::ATOMUMIN8:
- case X86::ATOMUMIN16:
- case X86::ATOMUMIN32:
- case X86::ATOMUMIN64:
- return EmitAtomicLoadArith(MI, BB);
-
- // This group does 64-bit operations on a 32-bit host.
- case X86::ATOMAND6432:
- case X86::ATOMOR6432:
- case X86::ATOMXOR6432:
- case X86::ATOMNAND6432:
- case X86::ATOMADD6432:
- case X86::ATOMSUB6432:
- case X86::ATOMMAX6432:
- case X86::ATOMMIN6432:
- case X86::ATOMUMAX6432:
- case X86::ATOMUMIN6432:
- case X86::ATOMSWAP6432:
- return EmitAtomicLoadArith6432(MI, BB);
+ return EmitXBegin(MI, BB, BB->getParent()->getTarget().getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
@@ -16021,6 +18203,36 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::EH_SjLj_LongJmp32:
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
+
+ case TargetOpcode::STACKMAP:
+ case TargetOpcode::PATCHPOINT:
+ return emitPatchPoint(MI, BB);
+
+ case X86::VFMADDPDr213r:
+ case X86::VFMADDPSr213r:
+ case X86::VFMADDSDr213r:
+ case X86::VFMADDSSr213r:
+ case X86::VFMSUBPDr213r:
+ case X86::VFMSUBPSr213r:
+ case X86::VFMSUBSDr213r:
+ case X86::VFMSUBSSr213r:
+ case X86::VFNMADDPDr213r:
+ case X86::VFNMADDPSr213r:
+ case X86::VFNMADDSDr213r:
+ case X86::VFNMADDSSr213r:
+ case X86::VFNMSUBPDr213r:
+ case X86::VFNMSUBPSr213r:
+ case X86::VFNMSUBSDr213r:
+ case X86::VFNMSUBSSr213r:
+ case X86::VFMADDPDr213rY:
+ case X86::VFMADDPSr213rY:
+ case X86::VFMSUBPDr213rY:
+ case X86::VFMSUBPSr213rY:
+ case X86::VFNMADDPDr213rY:
+ case X86::VFNMADDPSr213rY:
+ case X86::VFNMSUBPDr213rY:
+ case X86::VFNMSUBPSr213rY:
+ return emitFMA3Instr(MI, BB);
}
}
@@ -16028,11 +18240,11 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// X86 Optimization Hooks
//===----------------------------------------------------------------------===//
-void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
unsigned BitWidth = KnownZero.getBitWidth();
unsigned Opc = Op.getOpcode();
assert((Opc >= ISD::BUILTIN_OP_END ||
@@ -16095,8 +18307,10 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
}
}
-unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
- unsigned Depth) const {
+unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
+ SDValue Op,
+ const SelectionDAG &,
+ unsigned Depth) const {
// SETCC_CARRY sets the dest to ~0 for true or 0 for false.
if (Op.getOpcode() == X86ISD::SETCC_CARRY)
return Op.getValueType().getScalarType().getSizeInBits();
@@ -16198,7 +18412,6 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
SDValue ResNode =
DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
- array_lengthof(Ops),
Ld->getMemoryVT(),
Ld->getPointerInfo(),
Ld->getAlignment(),
@@ -16248,13 +18461,385 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// \brief Get the PSHUF-style mask from PSHUF node.
+///
+/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
+/// PSHUF-style masks that can be reused with such instructions.
+static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
+ SmallVector<int, 4> Mask;
+ bool IsUnary;
+ bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
+ (void)HaveMask;
+ assert(HaveMask);
+
+ switch (N.getOpcode()) {
+ case X86ISD::PSHUFD:
+ return Mask;
+ case X86ISD::PSHUFLW:
+ Mask.resize(4);
+ return Mask;
+ case X86ISD::PSHUFHW:
+ Mask.erase(Mask.begin(), Mask.begin() + 4);
+ for (int &M : Mask)
+ M -= 4;
+ return Mask;
+ default:
+ llvm_unreachable("No valid shuffle instruction found!");
+ }
+}
+
+/// \brief Search for a combinable shuffle across a chain ending in pshufd.
+///
+/// We walk up the chain and look for a combinable shuffle, skipping over
+/// shuffles that we could hoist this shuffle's transformation past without
+/// altering anything.
+static bool combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
+ SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ assert(N.getOpcode() == X86ISD::PSHUFD &&
+ "Called with something other than an x86 128-bit half shuffle!");
+ SDLoc DL(N);
+
+ // Walk up a single-use chain looking for a combinable shuffle.
+ SDValue V = N.getOperand(0);
+ for (; V.hasOneUse(); V = V.getOperand(0)) {
+ switch (V.getOpcode()) {
+ default:
+ return false; // Nothing combined!
+
+ case ISD::BITCAST:
+ // Skip bitcasts as we always know the type for the target specific
+ // instructions.
+ continue;
+
+ case X86ISD::PSHUFD:
+ // Found another dword shuffle.
+ break;
+
+ case X86ISD::PSHUFLW:
+ // Check that the low words (being shuffled) are the identity in the
+ // dword shuffle, and the high words are self-contained.
+ if (Mask[0] != 0 || Mask[1] != 1 ||
+ !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
+ return false;
+
+ continue;
+
+ case X86ISD::PSHUFHW:
+ // Check that the high words (being shuffled) are the identity in the
+ // dword shuffle, and the low words are self-contained.
+ if (Mask[2] != 2 || Mask[3] != 3 ||
+ !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
+ return false;
+
+ continue;
+
+ case X86ISD::UNPCKL:
+ case X86ISD::UNPCKH:
+ // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
+ // shuffle into a preceding word shuffle.
+ if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
+ return false;
+
+ // Search for a half-shuffle which we can combine with.
+ unsigned CombineOp =
+ V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
+ if (V.getOperand(0) != V.getOperand(1) ||
+ !V->isOnlyUserOf(V.getOperand(0).getNode()))
+ return false;
+ V = V.getOperand(0);
+ do {
+ switch (V.getOpcode()) {
+ default:
+ return false; // Nothing to combine.
+
+ case X86ISD::PSHUFLW:
+ case X86ISD::PSHUFHW:
+ if (V.getOpcode() == CombineOp)
+ break;
+
+ // Fallthrough!
+ case ISD::BITCAST:
+ V = V.getOperand(0);
+ continue;
+ }
+ break;
+ } while (V.hasOneUse());
+ break;
+ }
+ // Break out of the loop if we break out of the switch.
+ break;
+ }
+
+ if (!V.hasOneUse())
+ // We fell out of the loop without finding a viable combining instruction.
+ return false;
+
+ // Record the old value to use in RAUW-ing.
+ SDValue Old = V;
+
+ // Merge this node's mask and our incoming mask.
+ SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
+ for (int &M : Mask)
+ M = VMask[M];
+ V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
+ getV4X86ShuffleImm8ForMask(Mask, DAG));
+
+ // It is possible that one of the combinable shuffles was completely absorbed
+ // by the other, just replace it and revisit all users in that case.
+ if (Old.getNode() == V.getNode()) {
+ DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo=*/true);
+ return true;
+ }
+
+ // Replace N with its operand as we're going to combine that shuffle away.
+ DAG.ReplaceAllUsesWith(N, N.getOperand(0));
+
+ // Replace the combinable shuffle with the combined one, updating all users
+ // so that we re-evaluate the chain here.
+ DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
+ return true;
+}
+
+/// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
+///
+/// We walk up the chain, skipping shuffles of the other half and looking
+/// through shuffles which switch halves trying to find a shuffle of the same
+/// pair of dwords.
+static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
+ SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ assert(
+ (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
+ "Called with something other than an x86 128-bit half shuffle!");
+ SDLoc DL(N);
+ unsigned CombineOpcode = N.getOpcode();
+
+ // Walk up a single-use chain looking for a combinable shuffle.
+ SDValue V = N.getOperand(0);
+ for (; V.hasOneUse(); V = V.getOperand(0)) {
+ switch (V.getOpcode()) {
+ default:
+ return false; // Nothing combined!
+
+ case ISD::BITCAST:
+ // Skip bitcasts as we always know the type for the target specific
+ // instructions.
+ continue;
+
+ case X86ISD::PSHUFLW:
+ case X86ISD::PSHUFHW:
+ if (V.getOpcode() == CombineOpcode)
+ break;
+
+ // Other-half shuffles are no-ops.
+ continue;
+
+ case X86ISD::PSHUFD: {
+ // We can only handle pshufd if the half we are combining either stays in
+ // its half, or switches to the other half. Bail if one of these isn't
+ // true.
+ SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
+ int DOffset = CombineOpcode == X86ISD::PSHUFLW ? 0 : 2;
+ if (!((VMask[DOffset + 0] < 2 && VMask[DOffset + 1] < 2) ||
+ (VMask[DOffset + 0] >= 2 && VMask[DOffset + 1] >= 2)))
+ return false;
+
+ // Map the mask through the pshufd and keep walking up the chain.
+ for (int i = 0; i < 4; ++i)
+ Mask[i] = 2 * (VMask[DOffset + Mask[i] / 2] % 2) + Mask[i] % 2;
+
+ // Switch halves if the pshufd does.
+ CombineOpcode =
+ VMask[DOffset + Mask[0] / 2] < 2 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
+ continue;
+ }
+ }
+ // Break out of the loop if we break out of the switch.
+ break;
+ }
+
+ if (!V.hasOneUse())
+ // We fell out of the loop without finding a viable combining instruction.
+ return false;
+
+ // Record the old value to use in RAUW-ing.
+ SDValue Old = V;
+
+ // Merge this node's mask and our incoming mask (adjusted to account for all
+ // the pshufd instructions encountered).
+ SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
+ for (int &M : Mask)
+ M = VMask[M];
+ V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
+ getV4X86ShuffleImm8ForMask(Mask, DAG));
+
+ // Replace N with its operand as we're going to combine that shuffle away.
+ DAG.ReplaceAllUsesWith(N, N.getOperand(0));
+
+ // Replace the combinable shuffle with the combined one, updating all users
+ // so that we re-evaluate the chain here.
+ DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
+ return true;
+}
+
+/// \brief Try to combine x86 target specific shuffles.
+static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ SDLoc DL(N);
+ MVT VT = N.getSimpleValueType();
+ SmallVector<int, 4> Mask;
+
+ switch (N.getOpcode()) {
+ case X86ISD::PSHUFD:
+ case X86ISD::PSHUFLW:
+ case X86ISD::PSHUFHW:
+ Mask = getPSHUFShuffleMask(N);
+ assert(Mask.size() == 4);
+ break;
+ default:
+ return SDValue();
+ }
+
+ // Nuke no-op shuffles that show up after combining.
+ if (isNoopShuffleMask(Mask))
+ return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
+
+ // Look for simplifications involving one or two shuffle instructions.
+ SDValue V = N.getOperand(0);
+ switch (N.getOpcode()) {
+ default:
+ break;
+ case X86ISD::PSHUFLW:
+ case X86ISD::PSHUFHW:
+ assert(VT == MVT::v8i16);
+ (void)VT;
+
+ if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
+ return SDValue(); // We combined away this shuffle, so we're done.
+
+ // See if this reduces to a PSHUFD which is no more expensive and can
+ // combine with more operations.
+ if (Mask[0] % 2 == 0 && Mask[2] % 2 == 0 &&
+ areAdjacentMasksSequential(Mask)) {
+ int DMask[] = {-1, -1, -1, -1};
+ int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
+ DMask[DOffset + 0] = DOffset + Mask[0] / 2;
+ DMask[DOffset + 1] = DOffset + Mask[2] / 2;
+ V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
+ DCI.AddToWorklist(V.getNode());
+ V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
+ getV4X86ShuffleImm8ForMask(DMask, DAG));
+ DCI.AddToWorklist(V.getNode());
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
+ }
+
+ // Look for shuffle patterns which can be implemented as a single unpack.
+ // FIXME: This doesn't handle the location of the PSHUFD generically, and
+ // only works when we have a PSHUFD followed by two half-shuffles.
+ if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
+ (V.getOpcode() == X86ISD::PSHUFLW ||
+ V.getOpcode() == X86ISD::PSHUFHW) &&
+ V.getOpcode() != N.getOpcode() &&
+ V.hasOneUse()) {
+ SDValue D = V.getOperand(0);
+ while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
+ D = D.getOperand(0);
+ if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
+ SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
+ SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
+ int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
+ int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
+ int WordMask[8];
+ for (int i = 0; i < 4; ++i) {
+ WordMask[i + NOffset] = Mask[i] + NOffset;
+ WordMask[i + VOffset] = VMask[i] + VOffset;
+ }
+ // Map the word mask through the DWord mask.
+ int MappedMask[8];
+ for (int i = 0; i < 8; ++i)
+ MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
+ const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
+ const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
+ if (std::equal(std::begin(MappedMask), std::end(MappedMask),
+ std::begin(UnpackLoMask)) ||
+ std::equal(std::begin(MappedMask), std::end(MappedMask),
+ std::begin(UnpackHiMask))) {
+ // We can replace all three shuffles with an unpack.
+ V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
+ DCI.AddToWorklist(V.getNode());
+ return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
+ : X86ISD::UNPCKH,
+ DL, MVT::v8i16, V, V);
+ }
+ }
+ }
+
+ break;
+
+ case X86ISD::PSHUFD:
+ if (combineRedundantDWordShuffle(N, Mask, DAG, DCI))
+ return SDValue(); // We combined away this shuffle.
+
+ break;
+ }
+
+ return SDValue();
+}
+
/// PerformShuffleCombine - Performs several different shuffle combines.
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
SDLoc dl(N);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
+ // Canonicalize shuffles that perform 'addsub' on packed float vectors
+ // according to the rule:
+ // (shuffle (FADD A, B), (FSUB A, B), Mask) ->
+ // (shuffle (FSUB A, -B), (FADD A, -B), Mask)
+ //
+ // Where 'Mask' is:
+ // <0,5,2,7> -- for v4f32 and v4f64 shuffles;
+ // <0,3> -- for v2f64 shuffles;
+ // <0,9,2,11,4,13,6,15> -- for v8f32 shuffles.
+ //
+ // This helps pattern-matching more SSE3/AVX ADDSUB instructions
+ // during ISel stage.
+ if (N->getOpcode() == ISD::VECTOR_SHUFFLE &&
+ ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
+ (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
+ N0->getOpcode() == ISD::FADD && N1->getOpcode() == ISD::FSUB &&
+ // Operands to the FADD and FSUB must be the same.
+ ((N0->getOperand(0) == N1->getOperand(0) &&
+ N0->getOperand(1) == N1->getOperand(1)) ||
+ // FADD is commutable. See if by commuting the operands of the FADD
+ // we would still be able to match the operands of the FSUB dag node.
+ (N0->getOperand(1) == N1->getOperand(0) &&
+ N0->getOperand(0) == N1->getOperand(1))) &&
+ N0->getOperand(0)->getOpcode() != ISD::UNDEF &&
+ N0->getOperand(1)->getOpcode() != ISD::UNDEF) {
+
+ ShuffleVectorSDNode *SV = cast<ShuffleVectorSDNode>(N);
+ unsigned NumElts = VT.getVectorNumElements();
+ ArrayRef<int> Mask = SV->getMask();
+ bool CanFold = true;
+
+ for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i)
+ CanFold = Mask[i] == (int)((i & 1) ? i + NumElts : i);
+
+ if (CanFold) {
+ SDValue Op0 = N1->getOperand(0);
+ SDValue Op1 = DAG.getNode(ISD::FNEG, dl, VT, N1->getOperand(1));
+ SDValue Sub = DAG.getNode(ISD::FSUB, dl, VT, Op0, Op1);
+ SDValue Add = DAG.getNode(ISD::FADD, dl, VT, Op0, Op1);
+ return DAG.getVectorShuffle(VT, dl, Sub, Add, Mask);
+ }
+ }
+
// Don't create instructions with illegal types after legalize types has run.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
@@ -16265,6 +18850,57 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
N->getOpcode() == ISD::VECTOR_SHUFFLE)
return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
+ // During Type Legalization, when promoting illegal vector types,
+ // the backend might introduce new shuffle dag nodes and bitcasts.
+ //
+ // This code performs the following transformation:
+ // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
+ // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
+ //
+ // We do this only if both the bitcast and the BINOP dag nodes have
+ // one use. Also, perform this transformation only if the new binary
+ // operation is legal. This is to avoid introducing dag nodes that
+ // potentially need to be further expanded (or custom lowered) into a
+ // less optimal sequence of dag nodes.
+ if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
+ N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
+ N0.getOpcode() == ISD::BITCAST) {
+ SDValue BC0 = N0.getOperand(0);
+ EVT SVT = BC0.getValueType();
+ unsigned Opcode = BC0.getOpcode();
+ unsigned NumElts = VT.getVectorNumElements();
+
+ if (BC0.hasOneUse() && SVT.isVector() &&
+ SVT.getVectorNumElements() * 2 == NumElts &&
+ TLI.isOperationLegal(Opcode, VT)) {
+ bool CanFold = false;
+ switch (Opcode) {
+ default : break;
+ case ISD::ADD :
+ case ISD::FADD :
+ case ISD::SUB :
+ case ISD::FSUB :
+ case ISD::MUL :
+ case ISD::FMUL :
+ CanFold = true;
+ }
+
+ unsigned SVTNumElts = SVT.getVectorNumElements();
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
+ CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
+ for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
+ CanFold = SVOp->getMaskElt(i) < 0;
+
+ if (CanFold) {
+ SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
+ SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
+ SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
+ return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
+ }
+ }
+ }
+
// Only handle 128 wide vector from here on.
if (!VT.is128BitVector())
return SDValue();
@@ -16276,7 +18912,18 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
- return EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
+ SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
+ if (LD.getNode())
+ return LD;
+
+ if (isTargetShuffle(N->getOpcode())) {
+ SDValue Shuffle =
+ PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
+ if (Shuffle.getNode())
+ return Shuffle;
+ }
+
+ return SDValue();
}
/// PerformTruncateCombine - Converts truncate operation to
@@ -16383,44 +19030,6 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
EltNo);
}
-/// Extract one bit from mask vector, like v16i1 or v8i1.
-/// AVX-512 feature.
-static SDValue ExtractBitFromMaskVector(SDNode *N, SelectionDAG &DAG) {
- SDValue Vec = N->getOperand(0);
- SDLoc dl(Vec);
- MVT VecVT = Vec.getSimpleValueType();
- SDValue Idx = N->getOperand(1);
- MVT EltVT = N->getSimpleValueType(0);
-
- assert((VecVT.getVectorElementType() == MVT::i1 && EltVT == MVT::i8) ||
- "Unexpected operands in ExtractBitFromMaskVector");
-
- // variable index
- if (!isa<ConstantSDNode>(Idx)) {
- MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
- SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- ExtVT.getVectorElementType(), Ext);
- return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
- }
-
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
-
- MVT ScalarVT = MVT::getIntegerVT(VecVT.getSizeInBits());
- unsigned MaxShift = VecVT.getSizeInBits() - 1;
- Vec = DAG.getNode(ISD::BITCAST, dl, ScalarVT, Vec);
- Vec = DAG.getNode(ISD::SHL, dl, ScalarVT, Vec,
- DAG.getConstant(MaxShift - IdxVal, ScalarVT));
- Vec = DAG.getNode(ISD::SRL, dl, ScalarVT, Vec,
- DAG.getConstant(MaxShift, ScalarVT));
-
- if (VecVT == MVT::v16i1) {
- Vec = DAG.getNode(ISD::BITCAST, dl, MVT::i16, Vec);
- return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Vec);
- }
- return DAG.getNode(ISD::BITCAST, dl, MVT::i8, Vec);
-}
-
/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
/// generation and convert it from being a bunch of shuffles and extracts
/// to a simple store and scalar loads to extract the elements.
@@ -16432,10 +19041,6 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
SDValue InputVector = N->getOperand(0);
- if (InputVector.getValueType().getVectorElementType() == MVT::i1 &&
- !DCI.isBeforeLegalize())
- return ExtractBitFromMaskVector(N, DAG);
-
// Detect whether we are trying to convert from mmx to i32 and the bitcast
// from mmx to v2i32 has a single usage.
if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
@@ -16597,6 +19202,51 @@ matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
return std::make_pair(Opc, NeedSplit);
}
+static SDValue
+TransformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ SDLoc dl(N);
+ SDValue Cond = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+
+ if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue CondSrc = Cond->getOperand(0);
+ if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
+ Cond = CondSrc->getOperand(0);
+ }
+
+ MVT VT = N->getSimpleValueType(0);
+ MVT EltVT = VT.getVectorElementType();
+ unsigned NumElems = VT.getVectorNumElements();
+ // There is no blend with immediate in AVX-512.
+ if (VT.is512BitVector())
+ return SDValue();
+
+ if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
+ return SDValue();
+ if (!Subtarget->hasInt256() && VT == MVT::v16i16)
+ return SDValue();
+
+ if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
+ return SDValue();
+
+ unsigned MaskValue = 0;
+ if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
+ return SDValue();
+
+ SmallVector<int, 8> ShuffleMask(NumElems, -1);
+ for (unsigned i = 0; i < NumElems; ++i) {
+ // Be sure we emit undef where we can.
+ if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
+ ShuffleMask[i] = -1;
+ else
+ ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
+ }
+
+ return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
+}
+
/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
/// nodes.
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
@@ -16927,28 +19577,34 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
- // If the RHS is a constant we have to reverse the const canonicalization.
- // x > C-1 ? x+-C : 0 --> subus x, C
- if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
- isSplatVector(CondRHS.getNode()) && isSplatVector(OpRHS.getNode())) {
- APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue();
- if (CondRHS.getConstantOperandVal(0) == -A-1)
- return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS,
- DAG.getConstant(-A, VT));
- }
-
- // Another special case: If C was a sign bit, the sub has been
- // canonicalized into a xor.
- // FIXME: Would it be better to use ComputeMaskedBits to determine whether
- // it's safe to decanonicalize the xor?
- // x s< 0 ? x^C : 0 --> subus x, C
- if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
- ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
- isSplatVector(OpRHS.getNode())) {
- APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue();
- if (A.isSignBit())
- return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
- }
+ if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
+ if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
+ if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
+ if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
+ // If the RHS is a constant we have to reverse the const
+ // canonicalization.
+ // x > C-1 ? x+-C : 0 --> subus x, C
+ if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
+ CondRHSConst->getAPIntValue() ==
+ (-OpRHSConst->getAPIntValue() - 1))
+ return DAG.getNode(
+ X86ISD::SUBUS, DL, VT, OpLHS,
+ DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
+
+ // Another special case: If C was a sign bit, the sub has been
+ // canonicalized into a xor.
+ // FIXME: Would it be better to use computeKnownBits to determine
+ // whether it's safe to decanonicalize the xor?
+ // x s< 0 ? x^C : 0 --> subus x, C
+ if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
+ ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
+ OpRHSConst->getAPIntValue().isSignBit())
+ // Note that we have to rebuild the RHS constant here to ensure we
+ // don't rely on particular values of undef lanes.
+ return DAG.getNode(
+ X86ISD::SUBUS, DL, VT, OpLHS,
+ DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
+ }
}
}
@@ -16981,12 +19637,13 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Simplify vector selection if the selector will be produced by CMPP*/PCMP*.
if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// Check if SETCC has already been promoted
- TLI.getSetCCResultType(*DAG.getContext(), VT) == Cond.getValueType()) {
+ TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT &&
+ // Check that condition value type matches vselect operand type
+ CondVT == VT) {
assert(Cond.getValueType().isVector() &&
"vector select expects a vector selector!");
- EVT IntVT = Cond.getValueType();
bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
@@ -17001,7 +19658,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
ISD::CondCode NewCC =
ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
Cond.getOperand(0).getValueType().isInteger());
- Cond = DAG.getSetCC(DL, IntVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
+ Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
std::swap(LHS, RHS);
TValIsAllOnes = FValIsAllOnes;
FValIsAllZeros = TValIsAllZeros;
@@ -17014,22 +19671,103 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (TValIsAllOnes && FValIsAllZeros)
Ret = Cond;
else if (TValIsAllOnes)
- Ret = DAG.getNode(ISD::OR, DL, IntVT, Cond,
- DAG.getNode(ISD::BITCAST, DL, IntVT, RHS));
+ Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
+ DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
else if (FValIsAllZeros)
- Ret = DAG.getNode(ISD::AND, DL, IntVT, Cond,
- DAG.getNode(ISD::BITCAST, DL, IntVT, LHS));
+ Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
+ DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
}
}
+ // Try to fold this VSELECT into a MOVSS/MOVSD
+ if (N->getOpcode() == ISD::VSELECT &&
+ Cond.getOpcode() == ISD::BUILD_VECTOR && !DCI.isBeforeLegalize()) {
+ if (VT == MVT::v4i32 || VT == MVT::v4f32 ||
+ (Subtarget->hasSSE2() && (VT == MVT::v2i64 || VT == MVT::v2f64))) {
+ bool CanFold = false;
+ unsigned NumElems = Cond.getNumOperands();
+ SDValue A = LHS;
+ SDValue B = RHS;
+
+ if (isZero(Cond.getOperand(0))) {
+ CanFold = true;
+
+ // fold (vselect <0,-1,-1,-1>, A, B) -> (movss A, B)
+ // fold (vselect <0,-1> -> (movsd A, B)
+ for (unsigned i = 1, e = NumElems; i != e && CanFold; ++i)
+ CanFold = isAllOnes(Cond.getOperand(i));
+ } else if (isAllOnes(Cond.getOperand(0))) {
+ CanFold = true;
+ std::swap(A, B);
+
+ // fold (vselect <-1,0,0,0>, A, B) -> (movss B, A)
+ // fold (vselect <-1,0> -> (movsd B, A)
+ for (unsigned i = 1, e = NumElems; i != e && CanFold; ++i)
+ CanFold = isZero(Cond.getOperand(i));
+ }
+
+ if (CanFold) {
+ if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return getTargetShuffleNode(X86ISD::MOVSS, DL, VT, A, B, DAG);
+ return getTargetShuffleNode(X86ISD::MOVSD, DL, VT, A, B, DAG);
+ }
+
+ if (Subtarget->hasSSE2() && (VT == MVT::v4i32 || VT == MVT::v4f32)) {
+ // fold (v4i32: vselect <0,0,-1,-1>, A, B) ->
+ // (v4i32 (bitcast (movsd (v2i64 (bitcast A)),
+ // (v2i64 (bitcast B)))))
+ //
+ // fold (v4f32: vselect <0,0,-1,-1>, A, B) ->
+ // (v4f32 (bitcast (movsd (v2f64 (bitcast A)),
+ // (v2f64 (bitcast B)))))
+ //
+ // fold (v4i32: vselect <-1,-1,0,0>, A, B) ->
+ // (v4i32 (bitcast (movsd (v2i64 (bitcast B)),
+ // (v2i64 (bitcast A)))))
+ //
+ // fold (v4f32: vselect <-1,-1,0,0>, A, B) ->
+ // (v4f32 (bitcast (movsd (v2f64 (bitcast B)),
+ // (v2f64 (bitcast A)))))
+
+ CanFold = (isZero(Cond.getOperand(0)) &&
+ isZero(Cond.getOperand(1)) &&
+ isAllOnes(Cond.getOperand(2)) &&
+ isAllOnes(Cond.getOperand(3)));
+
+ if (!CanFold && isAllOnes(Cond.getOperand(0)) &&
+ isAllOnes(Cond.getOperand(1)) &&
+ isZero(Cond.getOperand(2)) &&
+ isZero(Cond.getOperand(3))) {
+ CanFold = true;
+ std::swap(LHS, RHS);
+ }
+
+ if (CanFold) {
+ EVT NVT = (VT == MVT::v4i32) ? MVT::v2i64 : MVT::v2f64;
+ SDValue NewA = DAG.getNode(ISD::BITCAST, DL, NVT, LHS);
+ SDValue NewB = DAG.getNode(ISD::BITCAST, DL, NVT, RHS);
+ SDValue Select = getTargetShuffleNode(X86ISD::MOVSD, DL, NVT, NewA,
+ NewB, DAG);
+ return DAG.getNode(ISD::BITCAST, DL, VT, Select);
+ }
+ }
+ }
+ }
+
// If we know that this node is legal then we know that it is going to be
// matched by one of the SSE/AVX BLEND instructions. These instructions only
// depend on the highest bit in each word. Try to use SimplifyDemandedBits
// to simplify previous instructions.
if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
- !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) {
+ !DCI.isBeforeLegalize() &&
+ // We explicitly check against v8i16 and v16i16 because, although
+ // they're marked as Custom, they might only be legal when Cond is a
+ // build_vector of constants. This will be taken care in a later
+ // condition.
+ (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
+ VT != MVT::v8i16)) {
unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
// Don't optimize vector selects that map to mask-registers.
@@ -17056,6 +19794,23 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
DCI.CommitTargetLoweringOpt(TLO);
}
+ // We should generate an X86ISD::BLENDI from a vselect if its argument
+ // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
+ // constants. This specific pattern gets generated when we split a
+ // selector for a 512 bit vector in a machine without AVX512 (but with
+ // 256-bit vectors), during legalization:
+ //
+ // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
+ //
+ // Iff we find this pattern and the build_vectors are built from
+ // constants, we translate the vselect into a shuffle_vector that we
+ // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
+ if (N->getOpcode() == ISD::VSELECT && !DCI.isBeforeLegalize()) {
+ SDValue Shuffle = TransformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
+ if (Shuffle.getNode())
+ return Shuffle;
+ }
+
return SDValue();
}
@@ -17090,7 +19845,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
SDValue Op2 = Cmp.getOperand(1);
SDValue SetCC;
- const ConstantSDNode* C = 0;
+ const ConstantSDNode* C = nullptr;
bool needOppositeCond = (CC == X86::COND_E);
bool checkAgainstTrue = false; // Is it a comparison against 1?
@@ -17225,8 +19980,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
(FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
SDValue Ops[] = { FalseOp, TrueOp,
DAG.getConstant(CC, MVT::i8), Flags };
- return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(),
- Ops, array_lengthof(Ops));
+ return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
}
// If this is a select between two integer constants, try to do some
@@ -17341,7 +20095,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
// the DCI.xxxx conditions are provided to postpone the optimization as
// late as possible.
- ConstantSDNode *CmpAgainst = 0;
+ ConstantSDNode *CmpAgainst = nullptr;
if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
(CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
!isa<ConstantSDNode>(Cond.getOperand(0))) {
@@ -17356,8 +20110,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
SDValue Ops[] = { FalseOp, Cond.getOperand(0),
DAG.getConstant(CC, MVT::i8), Cond };
- return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops,
- array_lengthof(Ops));
+ return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
}
}
}
@@ -17365,6 +20118,108 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ switch (IntNo) {
+ default: return SDValue();
+ // SSE/AVX/AVX2 blend intrinsics.
+ case Intrinsic::x86_avx2_pblendvb:
+ case Intrinsic::x86_avx2_pblendw:
+ case Intrinsic::x86_avx2_pblendd_128:
+ case Intrinsic::x86_avx2_pblendd_256:
+ // Don't try to simplify this intrinsic if we don't have AVX2.
+ if (!Subtarget->hasAVX2())
+ return SDValue();
+ // FALL-THROUGH
+ case Intrinsic::x86_avx_blend_pd_256:
+ case Intrinsic::x86_avx_blend_ps_256:
+ case Intrinsic::x86_avx_blendv_pd_256:
+ case Intrinsic::x86_avx_blendv_ps_256:
+ // Don't try to simplify this intrinsic if we don't have AVX.
+ if (!Subtarget->hasAVX())
+ return SDValue();
+ // FALL-THROUGH
+ case Intrinsic::x86_sse41_pblendw:
+ case Intrinsic::x86_sse41_blendpd:
+ case Intrinsic::x86_sse41_blendps:
+ case Intrinsic::x86_sse41_blendvps:
+ case Intrinsic::x86_sse41_blendvpd:
+ case Intrinsic::x86_sse41_pblendvb: {
+ SDValue Op0 = N->getOperand(1);
+ SDValue Op1 = N->getOperand(2);
+ SDValue Mask = N->getOperand(3);
+
+ // Don't try to simplify this intrinsic if we don't have SSE4.1.
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ // fold (blend A, A, Mask) -> A
+ if (Op0 == Op1)
+ return Op0;
+ // fold (blend A, B, allZeros) -> A
+ if (ISD::isBuildVectorAllZeros(Mask.getNode()))
+ return Op0;
+ // fold (blend A, B, allOnes) -> B
+ if (ISD::isBuildVectorAllOnes(Mask.getNode()))
+ return Op1;
+
+ // Simplify the case where the mask is a constant i32 value.
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
+ if (C->isNullValue())
+ return Op0;
+ if (C->isAllOnesValue())
+ return Op1;
+ }
+
+ return SDValue();
+ }
+
+ // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
+ case Intrinsic::x86_sse2_psrai_w:
+ case Intrinsic::x86_sse2_psrai_d:
+ case Intrinsic::x86_avx2_psrai_w:
+ case Intrinsic::x86_avx2_psrai_d:
+ case Intrinsic::x86_sse2_psra_w:
+ case Intrinsic::x86_sse2_psra_d:
+ case Intrinsic::x86_avx2_psra_w:
+ case Intrinsic::x86_avx2_psra_d: {
+ SDValue Op0 = N->getOperand(1);
+ SDValue Op1 = N->getOperand(2);
+ EVT VT = Op0.getValueType();
+ assert(VT.isVector() && "Expected a vector type!");
+
+ if (isa<BuildVectorSDNode>(Op1))
+ Op1 = Op1.getOperand(0);
+
+ if (!isa<ConstantSDNode>(Op1))
+ return SDValue();
+
+ EVT SVT = VT.getVectorElementType();
+ unsigned SVTBits = SVT.getSizeInBits();
+
+ ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
+ const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
+ uint64_t ShAmt = C.getZExtValue();
+
+ // Don't try to convert this shift into a ISD::SRA if the shift
+ // count is bigger than or equal to the element size.
+ if (ShAmt >= SVTBits)
+ return SDValue();
+
+ // Trivial case: if the shift count is zero, then fold this
+ // into the first operand.
+ if (ShAmt == 0)
+ return Op0;
+
+ // Replace this packed shift intrinsic with a target independent
+ // shift dag node.
+ SDValue Splat = DAG.getConstant(C, VT);
+ return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
+ }
+ }
+}
+
/// PerformMulCombine - Optimize a single multiply with constant into two
/// in order to implement it with two cheaper instructions, e.g.
/// LEA + SHL, LEA + LEA.
@@ -17457,22 +20312,21 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
// vector operations in many cases. Also, on sandybridge ADD is faster than
// shl.
// (shl V, 1) -> add V,V
- if (isSplatVector(N1.getNode())) {
- assert(N0.getValueType().isVector() && "Invalid vector shift type");
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0));
- // We shift all of the values by one. In many cases we do not have
- // hardware support for this operation. This is better expressed as an ADD
- // of two values.
- if (N1C && (1 == N1C->getZExtValue())) {
- return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
+ if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
+ if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
+ assert(N0.getValueType().isVector() && "Invalid vector shift type");
+ // We shift all of the values by one. In many cases we do not have
+ // hardware support for this operation. This is better expressed as an ADD
+ // of two values.
+ if (N1SplatC->getZExtValue() == 1)
+ return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
}
- }
return SDValue();
}
/// \brief Returns a vector of 0s if the node in input is a vector logical
-/// shift by a constant amount which is known to be bigger than or equal
+/// shift by a constant amount which is known to be bigger than or equal
/// to the vector element size in bits.
static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
@@ -17485,20 +20339,18 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
SDValue Amt = N->getOperand(1);
SDLoc DL(N);
- if (isSplatVector(Amt.getNode())) {
- SDValue SclrAmt = Amt->getOperand(0);
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) {
- APInt ShiftAmt = C->getAPIntValue();
+ if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
+ if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
+ APInt ShiftAmt = AmtSplat->getAPIntValue();
unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
// SSE2/AVX2 logical shifts always return a vector of 0s
- // if the shift amount is bigger than or equal to
+ // if the shift amount is bigger than or equal to
// the element size. The constant shift amount will be
// encoded as a 8-bit immediate.
if (ShiftAmt.trunc(8).uge(MaxAmount))
return getZeroVector(VT, Subtarget, DAG, DL);
}
- }
return SDValue();
}
@@ -17577,16 +20429,22 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
(cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
- bool is64BitFP = (CMP00.getValueType() == MVT::f64);
- X86ISD::NodeType NTOperator = is64BitFP ?
- X86ISD::FSETCCsd : X86ISD::FSETCCss;
// FIXME: need symbolic constants for these magic numbers.
// See X86ATTInstPrinter.cpp:printSSECC().
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
- SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, CMP00.getValueType(),
- CMP00, CMP01,
+ if (Subtarget->hasAVX512()) {
+ SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
+ CMP01, DAG.getConstant(x86cc, MVT::i8));
+ if (N->getValueType(0) != MVT::i1)
+ return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
+ FSetCC);
+ return FSetCC;
+ }
+ SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
+ CMP00.getValueType(), CMP00, CMP01,
DAG.getConstant(x86cc, MVT::i8));
+ bool is64BitFP = (CMP00.getValueType() == MVT::f64);
MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
if (is64BitFP && !Subtarget->is64Bit()) {
@@ -17686,9 +20544,10 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
// The right side has to be a 'trunc' or a constant vector.
bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
- bool RHSConst = (isSplatVector(N1.getNode()) &&
- isa<ConstantSDNode>(N1->getOperand(0)));
- if (!RHSTrunc && !RHSConst)
+ ConstantSDNode *RHSConstSplat = nullptr;
+ if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
+ RHSConstSplat = RHSBV->getConstantSplatNode();
+ if (!RHSTrunc && !RHSConstSplat)
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -17698,11 +20557,11 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
// Set N0 and N1 to hold the inputs to the new wide operation.
N0 = N0->getOperand(0);
- if (RHSConst) {
+ if (RHSConstSplat) {
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
- N1->getOperand(0));
+ SDValue(RHSConstSplat, 0));
SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
- N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, &C[0], C.size());
+ N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
} else if (RHSTrunc) {
N1 = N1->getOperand(0);
}
@@ -17739,64 +20598,13 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
if (R.getNode())
return R;
- // Create BLSI, BLSR, and BZHI instructions
- // BLSI is X & (-X)
- // BLSR is X & (X-1)
- // BZHI is X & ((1 << Y) - 1)
+ // Create BEXTR instructions
// BEXTR is ((X >> imm) & (2**size-1))
if (VT == MVT::i32 || VT == MVT::i64) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
- if (Subtarget->hasBMI()) {
- // Check LHS for neg
- if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 &&
- isZero(N0.getOperand(0)))
- return DAG.getNode(X86ISD::BLSI, DL, VT, N1);
-
- // Check RHS for neg
- if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 &&
- isZero(N1.getOperand(0)))
- return DAG.getNode(X86ISD::BLSI, DL, VT, N0);
-
- // Check LHS for X-1
- if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
- isAllOnes(N0.getOperand(1)))
- return DAG.getNode(X86ISD::BLSR, DL, VT, N1);
-
- // Check RHS for X-1
- if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
- isAllOnes(N1.getOperand(1)))
- return DAG.getNode(X86ISD::BLSR, DL, VT, N0);
- }
-
- if (Subtarget->hasBMI2()) {
- // Check for (and (add (shl 1, Y), -1), X)
- if (N0.getOpcode() == ISD::ADD && isAllOnes(N0.getOperand(1))) {
- SDValue N00 = N0.getOperand(0);
- if (N00.getOpcode() == ISD::SHL) {
- SDValue N001 = N00.getOperand(1);
- assert(N001.getValueType() == MVT::i8 && "unexpected type");
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(N00.getOperand(0));
- if (C && C->getZExtValue() == 1)
- return DAG.getNode(X86ISD::BZHI, DL, VT, N1, N001);
- }
- }
-
- // Check for (and X, (add (shl 1, Y), -1))
- if (N1.getOpcode() == ISD::ADD && isAllOnes(N1.getOperand(1))) {
- SDValue N10 = N1.getOperand(0);
- if (N10.getOpcode() == ISD::SHL) {
- SDValue N101 = N10.getOperand(1);
- assert(N101.getValueType() == MVT::i8 && "unexpected type");
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(N10.getOperand(0));
- if (C && C->getZExtValue() == 1)
- return DAG.getNode(X86ISD::BZHI, DL, VT, N0, N101);
- }
- }
- }
-
// Check for BEXTR.
if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
(N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
@@ -17846,7 +20654,6 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -17856,6 +20663,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
// look for psign/blend
if (VT == MVT::v2i64 || VT == MVT::v4i64) {
@@ -17897,12 +20705,9 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
unsigned SraAmt = ~0;
if (Mask.getOpcode() == ISD::SRA) {
- SDValue Amt = Mask.getOperand(1);
- if (isSplatVector(Amt.getNode())) {
- SDValue SclrAmt = Amt->getOperand(0);
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt))
- SraAmt = C->getZExtValue();
- }
+ if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
+ if (auto *AmtConst = AmtBV->getConstantSplatNode())
+ SraAmt = AmtConst->getZExtValue();
} else if (Mask.getOpcode() == X86ISD::VSRAI) {
SDValue SraC = Mask.getOperand(1);
SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
@@ -17941,6 +20746,18 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool OptForSize = MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
+
+ // SHLD/SHRD instructions have lower register pressure, but on some
+ // platforms they have higher latency than the equivalent
+ // series of shifts/or that would otherwise be generated.
+ // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
+ // have higher latencies and we are not optimizing for size.
+ if (!OptForSize && Subtarget->isSHLDSlow())
+ return SDValue();
+
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
@@ -18024,8 +20841,7 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
SDValue Ops[] = { N0.getOperand(0), Neg,
DAG.getConstant(X86::COND_GE, MVT::i8),
SDValue(Neg.getNode(), 1) };
- return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue),
- Ops, array_lengthof(Ops));
+ return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
}
return SDValue();
}
@@ -18034,7 +20850,6 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
if (DCI.isBeforeLegalizeOps())
return SDValue();
@@ -18044,28 +20859,6 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
return RV;
}
- // Try forming BMI if it is available.
- if (!Subtarget->hasBMI())
- return SDValue();
-
- if (VT != MVT::i32 && VT != MVT::i64)
- return SDValue();
-
- assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions");
-
- // Create BLSMSK instructions by finding X ^ (X-1)
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDLoc DL(N);
-
- if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
- isAllOnes(N0.getOperand(1)))
- return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1);
-
- if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
- isAllOnes(N1.getOperand(1)))
- return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0);
-
return SDValue();
}
@@ -18205,8 +20998,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
}
- SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0],
- Chains.size());
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
// Bitcast the loaded value to a vector of the original element type, in
// the size of the target vector type.
@@ -18381,8 +21173,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
Chains.push_back(Ch);
}
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0],
- Chains.size());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
}
// Turn load->store of MMX types into GPR load/stores. This avoids clobbering
@@ -18405,7 +21196,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
SDNode* LdVal = St->getValue().getNode();
- LoadSDNode *Ld = 0;
+ LoadSDNode *Ld = nullptr;
int TokenFactorIndex = -1;
SmallVector<SDValue, 8> Ops;
SDNode* ChainVal = St->getChain().getNode();
@@ -18448,8 +21239,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue NewChain = NewLd.getValue(1);
if (TokenFactorIndex != -1) {
Ops.push_back(NewChain);
- NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
- Ops.size());
+ NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
}
return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
St->getPointerInfo(),
@@ -18476,8 +21266,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
if (TokenFactorIndex != -1) {
Ops.push_back(LoLd);
Ops.push_back(HiLd);
- NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
- Ops.size());
+ NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
}
LoAddr = St->getBasePtr();
@@ -18869,6 +21658,17 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ if (N0.getOpcode() == ISD::TRUNCATE &&
+ N0.hasOneUse() &&
+ N0.getOperand(0).hasOneUse()) {
+ SDValue N00 = N0.getOperand(0);
+ if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
+ return DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
+ N00.getOperand(0), N00.getOperand(1)),
+ DAG.getConstant(1, VT));
+ }
+ }
if (VT.is256BitVector()) {
SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
if (R.getNode())
@@ -18880,10 +21680,13 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
// Optimize x == -y --> x+y == 0
// x != -y --> x+y != 0
-static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget* Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
@@ -18901,17 +21704,78 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) {
return DAG.getSetCC(SDLoc(N), N->getValueType(0),
addV, DAG.getConstant(0, addV.getValueType()), CC);
}
+
+ if (VT.getScalarType() == MVT::i1) {
+ bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
+ (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
+ bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
+ if (!IsSEXT0 && !IsVZero0)
+ return SDValue();
+ bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
+ (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
+ bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
+
+ if (!IsSEXT1 && !IsVZero1)
+ return SDValue();
+
+ if (IsSEXT0 && IsVZero1) {
+ assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
+ if (CC == ISD::SETEQ)
+ return DAG.getNOT(DL, LHS.getOperand(0), VT);
+ return LHS.getOperand(0);
+ }
+ if (IsSEXT1 && IsVZero0) {
+ assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
+ if (CC == ISD::SETEQ)
+ return DAG.getNOT(DL, RHS.getOperand(0), VT);
+ return RHS.getOperand(0);
+ }
+ }
+
return SDValue();
}
+static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ SDLoc dl(N);
+ MVT VT = N->getOperand(1)->getSimpleValueType(0);
+ assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
+ "X86insertps is only defined for v4x32");
+
+ SDValue Ld = N->getOperand(1);
+ if (MayFoldLoad(Ld)) {
+ // Extract the countS bits from the immediate so we can get the proper
+ // address when narrowing the vector load to a specific element.
+ // When the second source op is a memory address, interps doesn't use
+ // countS and just gets an f32 from that address.
+ unsigned DestIndex =
+ cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
+ Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
+ } else
+ return SDValue();
+
+ // Create this as a scalar to vector to match the instruction pattern.
+ SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
+ // countS bits are ignored when loading from memory on insertps, which
+ // means we don't need to explicitly set them to 0.
+ return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
+ LoadScalarToVector, N->getOperand(2));
+}
+
// Helper function of PerformSETCCCombine. It is to materialize "setb reg"
// as "sbb reg,reg", since it can be extended without zext and produces
// an all-ones bit which is more useful than 0/1 in some cases.
-static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG) {
- return DAG.getNode(ISD::AND, DL, MVT::i8,
+static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
+ MVT VT) {
+ if (VT == MVT::i8)
+ return DAG.getNode(ISD::AND, DL, VT,
+ DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
+ DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
+ DAG.getConstant(1, VT));
+ assert (VT == MVT::i1 && "Unexpected type for SECCC node");
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
- DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
- DAG.getConstant(1, MVT::i8));
+ DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
@@ -18936,7 +21800,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
EFLAGS.getNode()->getVTList(),
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
- return MaterializeSETB(DL, NewEFLAGS, DAG);
+ return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
}
}
@@ -18944,7 +21808,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
// a zext and produces an all-ones bit which is more useful than 0/1 in some
// cases.
if (CC == X86::COND_B)
- return MaterializeSETB(DL, EFLAGS, DAG);
+ return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
SDValue Flags;
@@ -18980,8 +21844,61 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
+ SelectionDAG &DAG) {
+ // Take advantage of vector comparisons producing 0 or -1 in each lane to
+ // optimize away operation when it's from a constant.
+ //
+ // The general transformation is:
+ // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
+ // AND(VECTOR_CMP(x,y), constant2)
+ // constant2 = UNARYOP(constant)
+
+ // Early exit if this isn't a vector operation, the operand of the
+ // unary operation isn't a bitwise AND, or if the sizes of the operations
+ // aren't the same.
+ EVT VT = N->getValueType(0);
+ if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
+ N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
+ VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
+ return SDValue();
+
+ // Now check that the other operand of the AND is a constant splat. We could
+ // make the transformation for non-constant splats as well, but it's unclear
+ // that would be a benefit as it would not eliminate any operations, just
+ // perform one more step in scalar code before moving to the vector unit.
+ if (BuildVectorSDNode *BV =
+ dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
+ // Bail out if the vector isn't a constant splat.
+ if (!BV->getConstantSplatNode())
+ return SDValue();
+
+ // Everything checks out. Build up the new and improved node.
+ SDLoc DL(N);
+ EVT IntVT = BV->getValueType(0);
+ // Create a new constant of the appropriate type for the transformed
+ // DAG.
+ SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
+ // The AND node needs bitcasts to/from an integer vector type around it.
+ SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
+ SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
+ N->getOperand(0)->getOperand(0), MaskConst);
+ SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
+ return Res;
+ }
+
+ return SDValue();
+}
+
static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
const X86TargetLowering *XTLI) {
+ // First try to optimize away the conversion entirely when it's
+ // conditionally from a constant. Vectors only.
+ SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
+ if (Res != SDValue())
+ return Res;
+
+ // Now move on to more general possibilities.
SDValue Op0 = N->getOperand(0);
EVT InVT = Op0->getValueType(0);
@@ -19177,9 +22094,10 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
- case ISD::SIGN_EXTEND_INREG: return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
+ case ISD::SIGN_EXTEND_INREG:
+ return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
- case ISD::SETCC: return PerformISDSETCCCombine(N, DAG);
+ case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
@@ -19198,6 +22116,11 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::VPERM2X128:
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
+ case ISD::INTRINSIC_WO_CHAIN:
+ return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
+ case X86ISD::INSERTPS:
+ return PerformINSERTPSCombine(N, DAG, Subtarget);
+ case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
}
return SDValue();
@@ -19472,7 +22395,7 @@ TargetLowering::ConstraintWeight
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
@@ -19590,7 +22513,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
- SDValue Result(0, 0);
+ SDValue Result;
// Only support length 1 constraints for now.
if (Constraint.length() > 1) return;
@@ -19673,7 +22596,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
- GlobalAddressSDNode *GA = 0;
+ GlobalAddressSDNode *GA = nullptr;
int64_t Offset = 0;
// Match either (GA), (GA+C), (GA+C1+C2), etc.
@@ -19702,8 +22625,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
- if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV,
- getTargetMachine())))
+ if (isGlobalStubReference(
+ Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
return;
Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
@@ -19829,7 +22752,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
// Not found as a standard register?
- if (Res.second == 0) {
+ if (!Res.second) {
// Map st(0) -> st(7) -> ST0
if (Constraint.size() == 7 && Constraint[0] == '{' &&
tolower(Constraint[1]) == 's' &&
@@ -19954,3 +22877,34 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return Res;
}
+
+int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
+ Type *Ty) const {
+ // Scaling factors are not free at all.
+ // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
+ // will take 2 allocations in the out of order engine instead of 1
+ // for plain addressing mode, i.e. inst (reg1).
+ // E.g.,
+ // vaddps (%rsi,%drx), %ymm0, %ymm1
+ // Requires two allocations (one for the load, one for the computation)
+ // whereas:
+ // vaddps (%rsi), %ymm0, %ymm1
+ // Requires just 1 allocation, i.e., freeing allocations for other operations
+ // and having less micro operations to execute.
+ //
+ // For some X86 architectures, this is even worse because for instance for
+ // stores, the complex addressing mode forces the instruction to use the
+ // "load" ports instead of the dedicated "store" port.
+ // E.g., on Haswell:
+ // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
+ // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
+ if (isLegalAddressingMode(AM, Ty))
+ // Scale represents reg2 * scale, thus account for 1
+ // as soon as we use a second register.
+ return AM.Scale != 0;
+ return -1;
+}
+
+bool X86TargetLowering::isTargetFTOL() const {
+ return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();
+}
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index bc3dd60..c8cdce7 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -15,16 +15,15 @@
#ifndef X86ISELLOWERING_H
#define X86ISELLOWERING_H
-#include "X86MachineFunctionInfo.h"
-#include "X86RegisterInfo.h"
-#include "X86Subtarget.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
namespace llvm {
+ class X86Subtarget;
+ class X86TargetMachine;
+
namespace X86ISD {
// X86 Specific DAG Nodes
enum NodeType {
@@ -84,6 +83,12 @@ namespace llvm {
/// readcyclecounter
RDTSC_DAG,
+ /// X86 Read Time-Stamp Counter and Processor ID.
+ RDTSCP_DAG,
+
+ /// X86 Read Performance Monitoring Counters.
+ RDPMC_DAG,
+
/// X86 compare and logical compare instructions.
CMP, COMI, UCOMI,
@@ -94,6 +99,9 @@ namespace llvm {
/// operand, usually produced by a CMP instruction.
SETCC,
+ /// X86 Select
+ SELECT,
+
// Same as SETCC except it's materialized with a sbb and the value is all
// one's or all zero's.
SETCC_CARRY, // R = carry_bit ? ~0 : 0
@@ -101,7 +109,7 @@ namespace llvm {
/// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
/// Operands are two FP values to compare; result is a mask of
/// 0s or 1s. Generally DTRT for C/C++ with NaNs.
- FSETCCss, FSETCCsd,
+ FSETCC,
/// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
/// result in an integer GPR. Needs masking for scalar result.
@@ -242,12 +250,9 @@ namespace llvm {
/// the list of operands.
TC_RETURN,
- // VZEXT_MOVL - Vector move low and zero extend.
+ // VZEXT_MOVL - Vector move to low scalar and zero higher vector elements.
VZEXT_MOVL,
- // VSEXT_MOVL - Vector move low and sign extend.
- VSEXT_MOVL,
-
// VZEXT - Vector integer zero-extend.
VZEXT,
@@ -292,10 +297,6 @@ namespace llvm {
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
- BLSI, // BLSI - Extract lowest set isolated bit
- BLSMSK, // BLSMSK - Get mask up to lowest set bit
- BLSR, // BLSR - Reset lowest set bit
- BZHI, // BZHI - Zero high bits
BEXTR, // BEXTR - Bit field extract
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
@@ -309,14 +310,16 @@ namespace llvm {
// TESTP - Vector packed fp sign bitwise comparisons.
TESTP,
- // TESTM - Vector "test" in AVX-512, the result is in a mask vector.
+ // TESTM, TESTNM - Vector "test" in AVX-512, the result is in a mask vector.
TESTM,
+ TESTNM,
// OR/AND test for masks
KORTEST,
- KTEST,
// Several flavors of instructions with vector shuffle behaviors.
+ PACKSS,
+ PACKUS,
PALIGNR,
PSHUFD,
PSHUFHW,
@@ -337,15 +340,20 @@ namespace llvm {
VPERMILP,
VPERMV,
VPERMV3,
+ VPERMIV3,
VPERMI,
VPERM2X128,
VBROADCAST,
// masked broadcast
VBROADCASTM,
+ // Insert/Extract vector element
VINSERT,
+ VEXTRACT,
// PMULUDQ - Vector multiply packed unsigned doubleword integers
PMULUDQ,
+ // PMULUDQ - Vector multiply packed signed doubleword integers
+ PMULDQ,
// FMA nodes
FMADD,
@@ -397,23 +405,8 @@ namespace llvm {
// XTEST - Test if in transactional execution.
XTEST,
- // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
- // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
- // Atomic 64-bit binary operations.
- ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
- ATOMSUB64_DAG,
- ATOMOR64_DAG,
- ATOMXOR64_DAG,
- ATOMAND64_DAG,
- ATOMNAND64_DAG,
- ATOMMAX64_DAG,
- ATOMMIN64_DAG,
- ATOMUMAX64_DAG,
- ATOMUMIN64_DAG,
- ATOMSWAP64_DAG,
-
// LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap.
- LCMPXCHG_DAG,
+ LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
LCMPXCHG8_DAG,
LCMPXCHG16_DAG,
@@ -522,32 +515,32 @@ namespace llvm {
//===--------------------------------------------------------------------===//
// X86TargetLowering - X86 Implementation of the TargetLowering interface
- class X86TargetLowering : public TargetLowering {
+ class X86TargetLowering final : public TargetLowering {
public:
explicit X86TargetLowering(X86TargetMachine &TM);
- virtual unsigned getJumpTableEncoding() const;
+ unsigned getJumpTableEncoding() const override;
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; }
- virtual const MCExpr *
+ const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
- MCContext &Ctx) const;
+ MCContext &Ctx) const override;
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
- virtual SDValue getPICJumpTableRelocBase(SDValue Table,
- SelectionDAG &DAG) const;
- virtual const MCExpr *
+ SDValue getPICJumpTableRelocBase(SDValue Table,
+ SelectionDAG &DAG) const override;
+ const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
- unsigned JTI, MCContext &Ctx) const;
+ unsigned JTI, MCContext &Ctx) const override;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
/// 4-byte boundaries.
- virtual unsigned getByValTypeAlignment(Type *Ty) const;
+ unsigned getByValTypeAlignment(Type *Ty) const override;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
@@ -560,10 +553,9 @@ namespace llvm {
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
- virtual EVT
- getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
- bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
- MachineFunction &MF) const;
+ EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
+ MachineFunction &MF) const override;
/// isSafeMemOpType - Returns true if it's safe to use load / store of the
/// specified type to expand memcpy / memset inline. This is mostly true
@@ -571,88 +563,92 @@ namespace llvm {
/// targets without SSE2 f64 load / store are done with fldl / fstpl which
/// also does type conversion. Note the specified type doesn't have to be
/// legal as the hook is used before type legalization.
- virtual bool isSafeMemOpType(MVT VT) const;
+ bool isSafeMemOpType(MVT VT) const override;
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type. Returns whether it
/// is "fast" by reference in the second argument.
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
+ bool *Fast) const override;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const override;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
/// isTypeDesirableForOp - Return true if the target has native support for
/// the specified value type and it is 'desirable' to use the type for the
/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
/// instruction encodings are longer and some i16 instructions are slow.
- virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
+ bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
/// isTypeDesirable - Return true if the target has native support for the
/// specified value type and it is 'desirable' to use the type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
/// and some i16 instructions are slow.
- virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
+ bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
- virtual MachineBasicBlock *
+ MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *MBB) const override;
/// getTargetNodeName - This method returns the name of a target specific
/// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
- virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
// ComputeNumSignBitsForTargetNode - Determine the number of bits in the
// operation that are sign bits.
- virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
- unsigned Depth) const;
+ unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
- virtual bool
- isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
+ bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
+ int64_t &Offset) const override;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
- virtual bool ExpandInlineAsm(CallInst *CI) const;
+ bool ExpandInlineAsm(CallInst *CI) const override;
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
- virtual ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
+ ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const override;
- virtual const char *LowerXConstraint(EVT ConstraintVT) const;
+ const char *LowerXConstraint(EVT ConstraintVT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
/// being processed is 'm'.
- virtual void LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
/// getRegForInlineAsmConstraint - Given a physical register constraint
/// (e.g. {edx}), return the register number and the register class for the
@@ -660,31 +656,40 @@ namespace llvm {
/// error, this returns a register number of 0.
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can
/// compare a register against the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalICmpImmediate(int64_t Imm) const;
+ bool isLegalICmpImmediate(int64_t Imm) const override;
/// isLegalAddImmediate - Return true if the specified immediate is legal
/// add immediate, that is the target has add instructions which can
/// add a register and the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalAddImmediate(int64_t Imm) const;
+ bool isLegalAddImmediate(int64_t Imm) const override;
+
+ /// \brief Return the cost of the scaling factor used in the addressing
+ /// mode represented by AM for this target, for a load/store
+ /// of the specified type.
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
+
+ bool isVectorShiftByScalarCheap(Type *Ty) const override;
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
- virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
- virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
- virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+ bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// isZExtFree - Return true if any actual instruction that defines a
/// value of type Ty1 implicit zero-extends the value to Ty2 in the result
@@ -694,44 +699,44 @@ namespace llvm {
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits.
- virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
- virtual bool isZExtFree(EVT VT1, EVT VT2) const;
- virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+ bool isZExtFree(Type *Ty1, Type *Ty2) const override;
+ bool isZExtFree(EVT VT1, EVT VT2) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
/// expanded to FMAs when this method returns true, otherwise fmuladd is
/// expanded to fmul + fadd.
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16.
- virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
+ bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask
/// values are assumed to be legal.
- virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
- EVT VT) const;
+ bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
+ EVT VT) const override;
/// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
/// used by Targets can use this to indicate if there is a suitable
/// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
/// pool entry.
- virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
- EVT VT) const;
+ bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
+ EVT VT) const override;
/// ShouldShrinkFPConstant - If true, then instruction selection should
/// seek to shrink the FP constant of the specified type to a smaller type
/// in order to save space and / or reduce runtime.
- virtual bool ShouldShrinkFPConstant(EVT VT) const {
+ bool ShouldShrinkFPConstant(EVT VT) const override {
// Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
// expensive than a straight movsd. On the other hand, it's important to
// shrink long double fp constant since fldt is very slow.
@@ -751,9 +756,7 @@ namespace llvm {
/// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine
/// for fptoui.
- bool isTargetFTOL() const {
- return Subtarget->isTargetWindows() && !Subtarget->is64Bit();
- }
+ bool isTargetFTOL() const;
/// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be
/// used for fptoui to the given type.
@@ -761,28 +764,44 @@ namespace llvm {
return isTargetFTOL() && VT == MVT::i64;
}
+ /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// to just the constant itself.
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
+
+ /// Intel processors have a unified instruction and data cache
+ const char * getClearCacheBuiltinName() const override {
+ return nullptr; // nothing to do, move along.
+ }
+
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
+
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) const;
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const override;
/// getStackCookieLocation - Return true if the target stores stack
/// protector cookies at a fixed offset in some non-standard address
/// space, and populates the address space and offset as
/// appropriate.
- virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
+ bool getStackCookieLocation(unsigned &AddressSpace,
+ unsigned &Offset) const override;
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const;
- virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const LLVM_OVERRIDE;
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
/// \brief Reset the operation actions based on target options.
- virtual void resetOperationActions();
+ void resetOperationActions() override;
+
+ /// \brief Customize the preferred legalization strategy for certain types.
+ LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
protected:
std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(MVT VT) const;
+ findRepresentativeClass(MVT VT) const override;
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
@@ -856,7 +875,11 @@ namespace llvm {
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const;
+ SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const;
+
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -865,7 +888,6 @@ namespace llvm {
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
@@ -874,9 +896,6 @@ namespace llvm {
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerToBT(SDValue And, ISD::CondCode CC,
SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
@@ -896,38 +915,36 @@ namespace llvm {
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
- LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc dl, SelectionDAG &DAG) const override;
- virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
+ bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
+ bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- virtual MVT
- getTypeForExtArgOrReturn(MVT VT, ISD::NodeType ExtendKind) const;
+ MVT getTypeForExtArgOrReturn(MVT VT,
+ ISD::NodeType ExtendKind) const override;
- virtual bool
- CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
- virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
+ const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to
@@ -973,13 +990,17 @@ namespace llvm {
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
+ SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
SelectionDAG &DAG) const;
/// Convert a comparison if required by the subtarget.
diff --git a/contrib/llvm/lib/Target/X86/X86InstrAVX512.td b/contrib/llvm/lib/Target/X86/X86InstrAVX512.td
index cb19fbd..d289408 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1,19 +1,36 @@
// Bitcasts between 512-bit vector types. Return the original type since
// no instruction is needed for the conversion
let Predicates = [HasAVX512] in {
- def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
+ def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
+ def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
+ def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
+ def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
+ def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
+ def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
+ def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
+ def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
+ def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
+ def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
+ def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
+ def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
+ def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
+ def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
+ def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
+ def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
+ def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
+ def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
@@ -90,16 +107,17 @@ def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
[(set VR512:$dst, (v16f32 immAllZerosV))]>;
}
+let Predicates = [HasAVX512] in {
def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
-def : Pat<(v16f32 immAllZerosV), (AVX512_512_SET0)>;
+}
//===----------------------------------------------------------------------===//
// AVX-512 - VECTOR INSERT
//
// -- 32x8 form --
-let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
+let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
(ins VR512:$src1, VR128X:$src2, i8imm:$src3),
"vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -112,7 +130,7 @@ def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
}
// -- 64x4 fp form --
-let neverHasSideEffects = 1, ExeDomain = SSEPackedDouble in {
+let hasSideEffects = 0, ExeDomain = SSEPackedDouble in {
def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
(ins VR512:$src1, VR256X:$src2, i8imm:$src3),
"vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -124,7 +142,7 @@ def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
[]>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
}
// -- 32x4 integer form --
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
(ins VR512:$src1, VR128X:$src2, i8imm:$src3),
"vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -134,10 +152,9 @@ def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
(ins VR512:$src1, i128mem:$src2, i8imm:$src3),
"vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
-
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// -- 64x4 form --
def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
(ins VR512:$src1, VR256X:$src2, i8imm:$src3),
@@ -162,12 +179,12 @@ def : Pat<(vinsert128_insert:$ins (v8i64 VR512:$src1), (v2i64 VR128X:$src2),
def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
(iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
(INSERT_get_vinsert128_imm VR512:$ins))>;
-
+
def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
(iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
(INSERT_get_vinsert128_imm VR512:$ins))>;
def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
- (bc_v4i32 (loadv2i64 addr:$src2)),
+ (bc_v4i32 (loadv2i64 addr:$src2)),
(iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
(INSERT_get_vinsert128_imm VR512:$ins))>;
def : Pat<(vinsert128_insert:$ins (v8f64 VR512:$src1), (loadv2f64 addr:$src2),
@@ -207,20 +224,20 @@ def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
// vinsertps - insert f32 to XMM
def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
- "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- [(set VR128X:$dst, (X86insrtps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
+ "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
EVEX_4V;
def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
(ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
- "vinsertps{z}\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- [(set VR128X:$dst, (X86insrtps VR128X:$src1,
+ "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set VR128X:$dst, (X86insertps VR128X:$src1,
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
//===----------------------------------------------------------------------===//
// AVX-512 VECTOR EXTRACT
//---
-let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
+let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
// -- 32x4 form --
def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
(ins VR512:$src1, i8imm:$src2),
@@ -243,7 +260,7 @@ def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
[]>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
}
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
// -- 32x4 form --
def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
(ins VR512:$src1, i8imm:$src2),
@@ -352,15 +369,15 @@ def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
// vextractps - extract 32 bits from XMM
def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
(ins VR128X:$src1, u32u8imm:$src2),
- "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
EVEX;
def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
(ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
- "vextractps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
- addr:$dst)]>, EVEX;
+ addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
//===---------------------------------------------------------------------===//
// AVX-512 BROADCAST
@@ -369,19 +386,19 @@ multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr,
RegisterClass DestRC,
RegisterClass SrcRC, X86MemOperand x86memop> {
def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[]>, EVEX;
def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),[]>, EVEX;
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
}
let ExeDomain = SSEPackedSingle in {
- defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss{z}", VR512,
+ defm VBROADCASTSSZ : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
VR128X, f32mem>,
EVEX_V512, EVEX_CD8<32, CD8VT1>;
}
let ExeDomain = SSEPackedDouble in {
- defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd{z}", VR512,
+ defm VBROADCASTSDZ : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
VR128X, f64mem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
}
@@ -399,12 +416,12 @@ def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
RegisterClass SrcRC, RegisterClass KRC> {
def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[]>, EVEX, EVEX_V512;
def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
(ins KRC:$mask, SrcRC:$src),
!strconcat(OpcodeStr,
- "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_V512, EVEX_KZ;
}
@@ -420,6 +437,8 @@ def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
(VPBROADCASTDrZrr GR32:$src)>;
+def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
+ (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
(VPBROADCASTQrZrr GR64:$src)>;
def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
@@ -430,30 +449,37 @@ def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
(VPBROADCASTQrZrr GR64:$src)>;
+def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
+ (v16i32 immAllZerosV), (i16 GR16:$mask))),
+ (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
+def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
+ (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
+ (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
+
multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
X86MemOperand x86memop, PatFrag ld_frag,
RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
RegisterClass KRC> {
def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
VR128X:$src),
!strconcat(OpcodeStr,
- "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[(set DstRC:$dst,
(OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
EVEX, EVEX_KZ;
let mayLoad = 1 in {
def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
x86memop:$src),
!strconcat(OpcodeStr,
- "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
(ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
}
@@ -466,6 +492,28 @@ defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
EVEX_CD8<64, CD8VT1>;
+multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ RegisterClass KRC> {
+ let mayLoad = 1 in {
+ def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+ def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
+ x86memop:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
+ }
+}
+
+defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
+ i128mem, loadv2i64, VK16WM>,
+ EVEX_V512, EVEX_CD8<32, CD8VT4>;
+defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
+ i256mem, loadv4i64, VK16WM>, VEX_W,
+ EVEX_V512, EVEX_CD8<64, CD8VT4>;
+
def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
(VPBROADCASTDZrr VR128X:$src)>;
def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
@@ -503,14 +551,16 @@ multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
RegisterClass DstRC, RegisterClass KRC,
ValueType OpVT, ValueType SrcVT> {
def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[]>, EVEX;
}
+let Predicates = [HasCDI] in {
defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
VK16, v16i32, v16i1>, EVEX_V512;
defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
+}
//===----------------------------------------------------------------------===//
// AVX-512 - VPERM
@@ -522,14 +572,14 @@ multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
EVEX;
def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (OpNode (mem_frag addr:$src1),
(i8 imm:$src2))))]>, EVEX;
@@ -548,14 +598,14 @@ multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
EVEX_4V;
@@ -575,97 +625,179 @@ defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
// -- VPERM2I - 3 source operands form --
multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
PatFrag mem_frag, X86MemOperand x86memop,
- ValueType OpVT> {
+ SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
let Constraints = "$src1 = $dst" in {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
- (OpVT (X86VPermv3 RC:$src1, RC:$src2, RC:$src3)))]>,
+ (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
EVEX_4V;
+ def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ " \t{$src3, $src2, $dst {${mask}}|"
+ "$dst {${mask}}, $src2, $src3}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode RC:$src1, RC:$src2,
+ RC:$src3),
+ RC:$src1)))]>,
+ EVEX_4V, EVEX_K;
+
+ let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
+ def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ " \t{$src3, $src2, $dst {${mask}} {z} |",
+ "$dst {${mask}} {z}, $src2, $src3}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode RC:$src1, RC:$src2,
+ RC:$src3),
+ (OpVT (bitconvert
+ (v16i32 immAllZerosV))))))]>,
+ EVEX_4V, EVEX_KZ;
+
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
- (OpVT (X86VPermv3 RC:$src1, RC:$src2,
+ (OpVT (OpNode RC:$src1, RC:$src2,
(mem_frag addr:$src3))))]>, EVEX_4V;
+
+ def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
+ !strconcat(OpcodeStr,
+ " \t{$src3, $src2, $dst {${mask}}|"
+ "$dst {${mask}}, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (vselect KRC:$mask,
+ (OpNode RC:$src1, RC:$src2,
+ (mem_frag addr:$src3)),
+ RC:$src1)))]>,
+ EVEX_4V, EVEX_K;
+
+ let AddedComplexity = 10 in // Prefer over the rrkz variant
+ def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
+ !strconcat(OpcodeStr,
+ " \t{$src3, $src2, $dst {${mask}} {z}|"
+ "$dst {${mask}} {z}, $src2, $src3}"),
+ [(set RC:$dst,
+ (OpVT (vselect KRC:$mask,
+ (OpNode RC:$src1, RC:$src2,
+ (mem_frag addr:$src3)),
+ (OpVT (bitconvert
+ (v16i32 immAllZerosV))))))]>,
+ EVEX_4V, EVEX_KZ;
}
}
-defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32, i512mem,
- v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64, i512mem,
- v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32, i512mem,
- v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64, i512mem,
- v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32,
+ i512mem, X86VPermiv3, v16i32, VK16WM>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64,
+ i512mem, X86VPermiv3, v8i64, VK8WM>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32,
+ i512mem, X86VPermiv3, v16f32, VK16WM>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64,
+ i512mem, X86VPermiv3, v8f64, VK8WM>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
+ PatFrag mem_frag, X86MemOperand x86memop,
+ SDNode OpNode, ValueType OpVT, RegisterClass KRC,
+ ValueType MaskVT, RegisterClass MRC> :
+ avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
+ OpVT, KRC> {
+ def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
+ VR512:$idx, VR512:$src1, VR512:$src2, -1)),
+ (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
+
+ def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
+ VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
+ (!cast<Instruction>(NAME#rrk) VR512:$src1,
+ (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
+}
+
+defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, memopv16i32, i512mem,
+ X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, memopv8i64, i512mem,
+ X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, memopv16f32, i512mem,
+ X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, memopv8f64, i512mem,
+ X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
//===----------------------------------------------------------------------===//
// AVX-512 - BLEND using mask
//
-multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, Intrinsic Int,
+multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
RegisterClass KRC, RegisterClass RC,
X86MemOperand x86memop, PatFrag mem_frag,
SDNode OpNode, ValueType vt> {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, RC:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
- (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
- def rr_Int : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, RC:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set RC:$dst, (Int KRC:$mask, (vt RC:$src2),
+ (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
+ [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
(vt RC:$src1)))]>, EVEX_4V, EVEX_K;
-
- let mayLoad = 1 in {
- def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $mask, $dst|$dst, $mask, $src1, $src2}"),
- []>,
- EVEX_4V, EVEX_K;
-
- def rm_Int : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins KRC:$mask, RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $mask, $dst|$dst, $mask, $src1, $src2}"),
- [(set RC:$dst, (Int KRC:$mask, (vt RC:$src1),
- (mem_frag addr:$src2)))]>,
- EVEX_4V, EVEX_K;
- }
+ let mayLoad = 1 in
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
+ []>, EVEX_4V, EVEX_K;
}
let ExeDomain = SSEPackedSingle in
defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
- int_x86_avx512_mskblend_ps_512,
VK16WM, VR512, f512mem,
memopv16f32, vselect, v16f32>,
EVEX_CD8<32, CD8VF>, EVEX_V512;
let ExeDomain = SSEPackedDouble in
defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
- int_x86_avx512_mskblend_pd_512,
VK8WM, VR512, f512mem,
memopv8f64, vselect, v8f64>,
VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
+def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
+ (v16f32 VR512:$src2), (i16 GR16:$mask))),
+ (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
+ VR512:$src1, VR512:$src2)>;
+
+def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
+ (v8f64 VR512:$src2), (i8 GR8:$mask))),
+ (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
+ VR512:$src1, VR512:$src2)>;
+
defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
- int_x86_avx512_mskblend_d_512,
VK16WM, VR512, f512mem,
memopv16i32, vselect, v16i32>,
EVEX_CD8<32, CD8VF>, EVEX_V512;
defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
- int_x86_avx512_mskblend_q_512,
VK8WM, VR512, f512mem,
memopv8i64, vselect, v8i64>,
VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
+def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (i16 GR16:$mask))),
+ (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
+ VR512:$src1, VR512:$src2)>;
+
+def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2), (i8 GR8:$mask))),
+ (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
+ VR512:$src1, VR512:$src2)>;
+
let Predicates = [HasAVX512] in {
def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
(v8f32 VR256X:$src2))),
@@ -681,31 +813,71 @@ def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
}
+//===----------------------------------------------------------------------===//
+// Compare Instructions
+//===----------------------------------------------------------------------===//
+
+// avx512_cmp_scalar - AVX512 CMPSS and CMPSD
+multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
+ Operand CC, SDNode OpNode, ValueType VT,
+ PatFrag ld_frag, string asm, string asm_alt> {
+ def rr : AVX512Ii8<0xC2, MRMSrcReg,
+ (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
+ IIC_SSE_ALU_F32S_RR>, EVEX_4V;
+ def rm : AVX512Ii8<0xC2, MRMSrcMem,
+ (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ [(set VK1:$dst, (OpNode (VT RC:$src1),
+ (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
+ def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
+ (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
+ def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
+ (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ }
+}
+
+let Predicates = [HasAVX512] in {
+defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
+ "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ XS;
+defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
+ "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ XD, VEX_W;
+}
multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC,
RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
SDNode OpNode, ValueType vt> {
def rr : AVX512BI<opc, MRMSrcReg,
(outs KRC:$dst), (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
def rm : AVX512BI<opc, MRMSrcMem,
(outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2)))],
IIC_SSE_ALU_F32P_RM>, EVEX_4V;
}
defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem,
- memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512;
+ memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem,
- memopv8i64, X86pcmpeqm, v8i64>, T8, EVEX_V512, VEX_W;
+ memopv8i64, X86pcmpeqm, v8i64>, T8PD, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem,
- memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512;
+ memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem,
- memopv8i64, X86pcmpgtm, v8i64>, T8, EVEX_V512, VEX_W;
+ memopv8i64, X86pcmpgtm, v8i64>, T8PD, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
(COPY_TO_REGCLASS (VPCMPGTDZrr
@@ -717,83 +889,99 @@ def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
-multiclass avx512_icmp_cc<bits<8> opc, RegisterClass KRC,
+multiclass avx512_icmp_cc<bits<8> opc, RegisterClass WMRC, RegisterClass KRC,
RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
- SDNode OpNode, ValueType vt, Operand CC, string asm,
- string asm_alt> {
+ SDNode OpNode, ValueType vt, Operand CC, string Suffix> {
def rri : AVX512AIi8<opc, MRMSrcReg,
- (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc),
+ !strconcat("vpcmp${cc}", Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
def rmi : AVX512AIi8<opc, MRMSrcMem,
- (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc),
+ !strconcat("vpcmp${cc}", Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2),
imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
// Accept explicit immediate argument form instead of comparison code.
- let neverHasSideEffects = 1 in {
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : AVX512AIi8<opc, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
- asm_alt, [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ !strconcat("vpcmp", Suffix,
+ "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+ [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
+ def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
+ (outs KRC:$dst), (ins WMRC:$mask, RC:$src1, RC:$src2, i8imm:$cc),
+ !strconcat("vpcmp", Suffix,
+ "\t{$cc, $src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2, $cc}"),
+ [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
- asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ !strconcat("vpcmp", Suffix,
+ "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+ [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
+ def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
+ (outs KRC:$dst), (ins WMRC:$mask, RC:$src1, x86memop:$src2, i8imm:$cc),
+ !strconcat("vpcmp", Suffix,
+ "\t{$cc, $src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2, $cc}"),
+ [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
}
}
-defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16, VR512, i512mem, memopv16i32,
- X86cmpm, v16i32, AVXCC,
- "vpcmp${cc}d\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vpcmpd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16, VR512, i512mem, memopv16i32,
- X86cmpmu, v16i32, AVXCC,
- "vpcmp${cc}ud\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vpcmpud\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-
-defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8, VR512, i512mem, memopv8i64,
- X86cmpm, v8i64, AVXCC,
- "vpcmp${cc}q\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vpcmpq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
- VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
-defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64,
- X86cmpmu, v8i64, AVXCC,
- "vpcmp${cc}uq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vpcmpuq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
- VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
-
-// avx512_cmp_packed - sse 1 & 2 compare packed instructions
+defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16WM, VK16, VR512, i512mem, memopv16i32,
+ X86cmpm, v16i32, AVXCC, "d">,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16WM, VK16, VR512, i512mem, memopv16i32,
+ X86cmpmu, v16i32, AVXCC, "ud">,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8WM, VK8, VR512, i512mem, memopv8i64,
+ X86cmpm, v8i64, AVXCC, "q">,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8WM, VK8, VR512, i512mem, memopv8i64,
+ X86cmpmu, v8i64, AVXCC, "uq">,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+// avx512_cmp_packed - compare packed instructions
multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
- X86MemOperand x86memop, Operand CC,
- SDNode OpNode, ValueType vt, string asm,
- string asm_alt, Domain d> {
+ X86MemOperand x86memop, ValueType vt,
+ string suffix, Domain d> {
def rri : AVX512PIi8<0xC2, MRMSrcReg,
- (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
- [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", suffix,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
+ def rrib: AVX512PIi8<0xC2, MRMSrcReg,
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", suffix,
+ " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
+ [], d>, EVEX_B;
def rmi : AVX512PIi8<0xC2, MRMSrcMem,
- (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", suffix,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
[(set KRC:$dst,
- (OpNode (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
+ (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
// Accept explicit immediate argument form instead of comparison code.
- let neverHasSideEffects = 1 in {
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
- asm_alt, [], d>;
+ (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ !strconcat("vcmp", suffix,
+ " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
- asm_alt, [], d>;
+ (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ !strconcat("vcmp", suffix,
+ " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
}
}
-defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, AVXCC, X86cmpm, v16f32,
- "vcmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vcmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedSingle>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, AVXCC, X86cmpm, v8f64,
- "vcmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "vcmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedDouble>, OpSize, EVEX_4V, VEX_W, EVEX_V512,
+defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
+ "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
+ "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
EVEX_CD8<64, CD8VF>;
def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
@@ -811,7 +999,31 @@ def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
imm:$cc), VK8)>;
-
+
+def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
+ (v16f32 VR512:$src2), imm:$cc, (i16 -1),
+ FROUND_NO_EXC)),
+ (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
+ (I8Imm imm:$cc)), GR16)>;
+
+def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
+ (v8f64 VR512:$src2), imm:$cc, (i8 -1),
+ FROUND_NO_EXC)),
+ (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
+ (I8Imm imm:$cc)), GR8)>;
+
+def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
+ (v16f32 VR512:$src2), imm:$cc, (i16 -1),
+ FROUND_CURRENT)),
+ (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
+ (I8Imm imm:$cc)), GR16)>;
+
+def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
+ (v8f64 VR512:$src2), imm:$cc, (i8 -1),
+ FROUND_CURRENT)),
+ (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
+ (I8Imm imm:$cc)), GR8)>;
+
// Mask register copy, including
// - copy between mask registers
// - load/store mask registers
@@ -820,35 +1032,35 @@ def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
string OpcodeStr, RegisterClass KRC,
ValueType vt, X86MemOperand x86memop> {
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
let mayLoad = 1 in
def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set KRC:$dst, (vt (load addr:$src)))]>;
let mayStore = 1 in
def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
}
}
multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
string OpcodeStr,
RegisterClass KRC, RegisterClass GRC> {
- let neverHasSideEffects = 1 in {
+ let hasSideEffects = 0 in {
def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
}
}
let Predicates = [HasAVX512] in {
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
- VEX, TB;
+ VEX, PS;
defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
- VEX, TB;
+ VEX, PS;
}
let Predicates = [HasAVX512] in {
@@ -862,8 +1074,44 @@ let Predicates = [HasAVX512] in {
def : Pat<(store (v16i1 VK16:$src), addr:$dst),
(KMOVWmk addr:$dst, VK16:$src)>;
- def : Pat<(store (v8i1 VK8:$src), addr:$dst),
- (KMOVWmk addr:$dst, (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16)))>;
+ def : Pat<(store VK8:$src, addr:$dst),
+ (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
+
+ def : Pat<(i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
+
+ def : Pat<(v8i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
+
+ def : Pat<(i1 (trunc (i32 GR32:$src))),
+ (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
+
+ def : Pat<(i1 (trunc (i8 GR8:$src))),
+ (COPY_TO_REGCLASS
+ (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
+ VK1)>;
+ def : Pat<(i1 (trunc (i16 GR16:$src))),
+ (COPY_TO_REGCLASS
+ (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
+ VK1)>;
+
+ def : Pat<(i32 (zext VK1:$src)),
+ (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
+ def : Pat<(i8 (zext VK1:$src)),
+ (EXTRACT_SUBREG
+ (AND32ri (KMOVWrk
+ (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
+ def : Pat<(i64 (zext VK1:$src)),
+ (AND64ri8 (SUBREG_TO_REG (i64 0),
+ (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
+ def : Pat<(i16 (zext VK1:$src)),
+ (EXTRACT_SUBREG
+ (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
+ sub_16bit)>;
+ def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
+ (COPY_TO_REGCLASS VK1:$src, VK16)>;
+ def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
+ (COPY_TO_REGCLASS VK1:$src, VK8)>;
}
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512] in {
@@ -876,6 +1124,12 @@ let Predicates = [HasAVX512] in {
(EXTRACT_SUBREG
(KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
sub_8bit)>;
+
+ def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS VK16:$src, VK1)>;
+ def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS VK8:$src, VK1)>;
+
}
// Mask unary operation
@@ -884,18 +1138,27 @@ multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
RegisterClass KRC, SDPatternOperator OpNode> {
let Predicates = [HasAVX512] in
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set KRC:$dst, (OpNode KRC:$src))]>;
}
multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
SDPatternOperator OpNode> {
defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
- VEX, TB;
+ VEX, PS;
}
defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
+multiclass avx512_mask_unop_int<string IntName, string InstName> {
+ let Predicates = [HasAVX512] in
+ def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
+ (i16 GR16:$src)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
+ (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
+}
+defm : avx512_mask_unop_int<"knot", "KNOT">;
+
def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
(COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
@@ -906,27 +1169,26 @@ def : Pat<(not VK8:$src),
(KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
// Mask binary operation
-// - KADD, KAND, KANDN, KOR, KXNOR, KXOR
+// - KAND, KANDN, KOR, KXNOR, KXOR
multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
RegisterClass KRC, SDPatternOperator OpNode> {
let Predicates = [HasAVX512] in
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
}
multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
SDPatternOperator OpNode> {
defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
- VEX_4V, VEX_L, TB;
+ VEX_4V, VEX_L, PS;
}
def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
let isCommutable = 1 in {
- defm KADD : avx512_mask_binop_w<0x4a, "kadd", add>;
defm KAND : avx512_mask_binop_w<0x41, "kand", and>;
let isCommutable = 0 in
defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
@@ -935,19 +1197,33 @@ let isCommutable = 1 in {
defm KXOR : avx512_mask_binop_w<0x47, "kxor", xor>;
}
+def : Pat<(xor VK1:$src1, VK1:$src2),
+ (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
+ (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
+
+def : Pat<(or VK1:$src1, VK1:$src2),
+ (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
+ (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
+
+def : Pat<(and VK1:$src1, VK1:$src2),
+ (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
+ (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
+
multiclass avx512_mask_binop_int<string IntName, string InstName> {
let Predicates = [HasAVX512] in
- def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
- VK16:$src1, VK16:$src2),
- (!cast<Instruction>(InstName##"Wrr") VK16:$src1, VK16:$src2)>;
+ def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
+ (i16 GR16:$src1), (i16 GR16:$src2)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
+ (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
+ (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
}
-defm : avx512_mask_binop_int<"kadd", "KADD">;
defm : avx512_mask_binop_int<"kand", "KAND">;
defm : avx512_mask_binop_int<"kandn", "KANDN">;
defm : avx512_mask_binop_int<"kor", "KOR">;
defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
defm : avx512_mask_binop_int<"kxor", "KXOR">;
+
// With AVX-512, 8-bit mask is promoted to 16-bit mask.
multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
let Predicates = [HasAVX512] in
@@ -965,44 +1241,53 @@ defm : avx512_binop_pat<xor, KXORWrr>;
// Mask unpacking
multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
- RegisterClass KRC1, RegisterClass KRC2> {
+ RegisterClass KRC> {
let Predicates = [HasAVX512] in
- def rr : I<opc, MRMSrcReg, (outs KRC1:$dst), (ins KRC2:$src1, KRC2:$src2),
+ def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
}
multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
- defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16, VK8>,
- VEX_4V, VEX_L, OpSize, TB;
+ defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
+ VEX_4V, VEX_L, PD;
}
defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
+def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
+ (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
+ (COPY_TO_REGCLASS VK8:$src1, VK16))>;
+
multiclass avx512_mask_unpck_int<string IntName, string InstName> {
let Predicates = [HasAVX512] in
- def : Pat<(!cast<Intrinsic>("int_x86_"##IntName##"_v16i1")
- VK8:$src1, VK8:$src2),
- (!cast<Instruction>(InstName##"BWrr") VK8:$src1, VK8:$src2)>;
+ def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
+ (i16 GR16:$src1), (i16 GR16:$src2)),
+ (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
+ (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
+ (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
}
+defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
-defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
// Mask bit testing
multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
SDNode OpNode> {
let Predicates = [HasAVX512], Defs = [EFLAGS] in
def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
}
multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
- VEX, TB;
+ VEX, PS;
}
defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
-defm KTEST : avx512_mask_testop_w<0x99, "ktest", X86ktest>;
+
+def : Pat<(X86cmp VK1:$src1, (i1 0)),
+ (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
+ (COPY_TO_REGCLASS VK1:$src1, VK16))>;
// Mask shift
multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
@@ -1010,18 +1295,18 @@ multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
let Predicates = [HasAVX512] in
def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
!strconcat(OpcodeStr,
- "\t{$imm, $src, $dst|$dst, $src, $imm}"),
+ " \t{$imm, $src, $dst|$dst, $src, $imm}"),
[(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
}
multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
SDNode OpNode> {
defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
- VEX, OpSize, TA, VEX_W;
+ VEX, TAPD, VEX_W;
}
-defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", shl>;
-defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", srl>;
+defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
+defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
// Mask setting all 0s or 1s
multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
@@ -1032,7 +1317,7 @@ multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
}
multiclass avx512_mask_setop_w<PatFrag Val> {
- defm B : avx512_mask_setop<VK8, v8i1, Val>;
+ defm B : avx512_mask_setop<VK8, v8i1, Val>;
defm W : avx512_mask_setop<VK16, v16i1, Val>;
}
@@ -1043,6 +1328,9 @@ defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
let Predicates = [HasAVX512] in {
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
+ def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
+ def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
+ def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
}
def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
(v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
@@ -1053,153 +1341,184 @@ def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
(v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
+ (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
+
+def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
+ (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
-multiclass avx512_mov_packed<bits<8> opc, RegisterClass RC, RegisterClass KRC,
+multiclass avx512_load<bits<8> opc, RegisterClass RC, RegisterClass KRC,
X86MemOperand x86memop, PatFrag ld_frag,
- string asm, Domain d> {
-let neverHasSideEffects = 1 in
+ string asm, Domain d,
+ ValueType vt, bit IsReMaterializable = 1> {
+let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>,
+ !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
EVEX;
-let canFoldAsLoad = 1 in
+ def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
+ !strconcat(asm,
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [], d>, EVEX, EVEX_KZ;
+ }
+ let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (ld_frag addr:$src))], d>, EVEX;
-let Constraints = "$src1 = $dst" in {
+ !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
+ [(set (vt RC:$dst), (ld_frag addr:$src))], d>, EVEX;
+ let Constraints = "$src1 = $dst", hasSideEffects = 0 in {
def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2),
!strconcat(asm,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
EVEX, EVEX_K;
+ let mayLoad = 1 in
def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, x86memop:$src2),
!strconcat(asm,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
[], d>, EVEX, EVEX_K;
-}
+ }
+ let mayLoad = 1 in
+ def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, x86memop:$src2),
+ !strconcat(asm,
+ " \t{$src2, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src2}"),
+ [], d>, EVEX, EVEX_KZ;
}
-defm VMOVAPSZ : avx512_mov_packed<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
- "vmovaps", SSEPackedSingle>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VMOVAPDZ : avx512_mov_packed<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
- "vmovapd", SSEPackedDouble>,
- OpSize, EVEX_V512, VEX_W,
- EVEX_CD8<64, CD8VF>;
-defm VMOVUPSZ : avx512_mov_packed<0x10, VR512, VK16WM, f512mem, loadv16f32,
- "vmovups", SSEPackedSingle>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VMOVUPDZ : avx512_mov_packed<0x10, VR512, VK8WM, f512mem, loadv8f64,
- "vmovupd", SSEPackedDouble>,
- OpSize, EVEX_V512, VEX_W,
- EVEX_CD8<64, CD8VF>;
-def VMOVAPSZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
- "vmovaps\t{$src, $dst|$dst, $src}",
- [(alignedstore512 (v16f32 VR512:$src), addr:$dst)],
- SSEPackedSingle>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
-def VMOVAPDZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
- "vmovapd\t{$src, $dst|$dst, $src}",
- [(alignedstore512 (v8f64 VR512:$src), addr:$dst)],
- SSEPackedDouble>, EVEX, EVEX_V512,
- OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
-def VMOVUPSZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
- "vmovups\t{$src, $dst|$dst, $src}",
- [(store (v16f32 VR512:$src), addr:$dst)],
- SSEPackedSingle>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
-def VMOVUPDZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
- "vmovupd\t{$src, $dst|$dst, $src}",
- [(store (v8f64 VR512:$src), addr:$dst)],
- SSEPackedDouble>, EVEX, EVEX_V512,
- OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
-
-let neverHasSideEffects = 1 in {
- def VMOVDQA32rr : AVX512BI<0x6F, MRMSrcReg, (outs VR512:$dst),
- (ins VR512:$src),
- "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
- EVEX, EVEX_V512;
- def VMOVDQA64rr : AVX512BI<0x6F, MRMSrcReg, (outs VR512:$dst),
- (ins VR512:$src),
- "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
- EVEX, EVEX_V512, VEX_W;
-let mayStore = 1 in {
- def VMOVDQA32mr : AVX512BI<0x7F, MRMDestMem, (outs),
- (ins i512mem:$dst, VR512:$src),
- "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
- EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
- def VMOVDQA64mr : AVX512BI<0x7F, MRMDestMem, (outs),
- (ins i512mem:$dst, VR512:$src),
- "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
- EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-}
-let mayLoad = 1 in {
-def VMOVDQA32rm : AVX512BI<0x6F, MRMSrcMem, (outs VR512:$dst),
- (ins i512mem:$src),
- "vmovdqa32\t{$src, $dst|$dst, $src}", []>,
- EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
-def VMOVDQA64rm : AVX512BI<0x6F, MRMSrcMem, (outs VR512:$dst),
- (ins i512mem:$src),
- "vmovdqa64\t{$src, $dst|$dst, $src}", []>,
- EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-}
-}
-
-// 512-bit aligned load/store
-def : Pat<(alignedloadv8i64 addr:$src), (VMOVDQA64rm addr:$src)>;
-def : Pat<(alignedloadv16i32 addr:$src), (VMOVDQA32rm addr:$src)>;
-
-def : Pat<(alignedstore512 (v8i64 VR512:$src), addr:$dst),
- (VMOVDQA64mr addr:$dst, VR512:$src)>;
-def : Pat<(alignedstore512 (v16i32 VR512:$src), addr:$dst),
- (VMOVDQA32mr addr:$dst, VR512:$src)>;
-
-multiclass avx512_mov_int<bits<8> load_opc, bits<8> store_opc, string asm,
- RegisterClass RC, RegisterClass KRC,
- PatFrag ld_frag, X86MemOperand x86memop> {
-let neverHasSideEffects = 1 in
- def rr : AVX512XSI<load_opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"), []>, EVEX;
-let canFoldAsLoad = 1 in
- def rm : AVX512XSI<load_opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (ld_frag addr:$src))]>, EVEX;
-let mayStore = 1 in
- def mr : AVX512XSI<store_opc, MRMDestMem, (outs),
- (ins x86memop:$dst, VR512:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"), []>, EVEX;
-let Constraints = "$src1 = $dst" in {
- def rrk : AVX512XSI<load_opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, KRC:$mask, RC:$src2),
- !strconcat(asm,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), []>,
+multiclass avx512_store<bits<8> opc, RegisterClass RC, RegisterClass KRC,
+ X86MemOperand x86memop, PatFrag store_frag,
+ string asm, Domain d, ValueType vt> {
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
+ def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
+ EVEX;
+ let Constraints = "$src1 = $dst" in
+ def alt_rrk : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
+ (ins RC:$src1, KRC:$mask, RC:$src2),
+ !strconcat(asm,
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
EVEX, EVEX_K;
- def rmk : AVX512XSI<load_opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, KRC:$mask, x86memop:$src2),
- !strconcat(asm,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
- []>, EVEX, EVEX_K;
-}
+ def alt_rrkz : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src),
+ !strconcat(asm,
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [], d>, EVEX, EVEX_KZ;
+ }
+ let mayStore = 1 in {
+ def mr : AVX512PI<opc, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
+ !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
+ [(store_frag (vt RC:$src), addr:$dst)], d>, EVEX;
+ def mrk : AVX512PI<opc, MRMDestMem, (outs),
+ (ins x86memop:$dst, KRC:$mask, RC:$src),
+ !strconcat(asm,
+ " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
+ [], d>, EVEX, EVEX_K;
+ def mrkz : AVX512PI<opc, MRMDestMem, (outs),
+ (ins x86memop:$dst, KRC:$mask, RC:$src),
+ !strconcat(asm,
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ [], d>, EVEX, EVEX_KZ;
+ }
}
-defm VMOVDQU32 : avx512_mov_int<0x6F, 0x7F, "vmovdqu32", VR512, VK16WM,
- memopv16i32, i512mem>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VMOVDQU64 : avx512_mov_int<0x6F, 0x7F, "vmovdqu64", VR512, VK8WM,
- memopv8i64, i512mem>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-
-// 512-bit unaligned load/store
-def : Pat<(loadv8i64 addr:$src), (VMOVDQU64rm addr:$src)>;
-def : Pat<(loadv16i32 addr:$src), (VMOVDQU32rm addr:$src)>;
-
-def : Pat<(store (v8i64 VR512:$src), addr:$dst),
- (VMOVDQU64mr addr:$dst, VR512:$src)>;
-def : Pat<(store (v16i32 VR512:$src), addr:$dst),
- (VMOVDQU32mr addr:$dst, VR512:$src)>;
+defm VMOVAPSZ : avx512_load<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
+ "vmovaps", SSEPackedSingle, v16f32>,
+ avx512_store<0x29, VR512, VK16WM, f512mem, alignedstore512,
+ "vmovaps", SSEPackedSingle, v16f32>,
+ PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVAPDZ : avx512_load<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
+ "vmovapd", SSEPackedDouble, v8f64>,
+ avx512_store<0x29, VR512, VK8WM, f512mem, alignedstore512,
+ "vmovapd", SSEPackedDouble, v8f64>,
+ PD, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+defm VMOVUPSZ : avx512_load<0x10, VR512, VK16WM, f512mem, loadv16f32,
+ "vmovups", SSEPackedSingle, v16f32>,
+ avx512_store<0x11, VR512, VK16WM, f512mem, store,
+ "vmovups", SSEPackedSingle, v16f32>,
+ PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVUPDZ : avx512_load<0x10, VR512, VK8WM, f512mem, loadv8f64,
+ "vmovupd", SSEPackedDouble, v8f64, 0>,
+ avx512_store<0x11, VR512, VK8WM, f512mem, store,
+ "vmovupd", SSEPackedDouble, v8f64>,
+ PD, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
+ (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
+ (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
+
+def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
+ (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
+ (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
+
+def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
+ GR16:$mask),
+ (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
+ VR512:$src)>;
+def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
+ GR8:$mask),
+ (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
+ VR512:$src)>;
+
+defm VMOVDQA32: avx512_load<0x6F, VR512, VK16WM, i512mem, alignedloadv16i32,
+ "vmovdqa32", SSEPackedInt, v16i32>,
+ avx512_store<0x7F, VR512, VK16WM, i512mem, alignedstore512,
+ "vmovdqa32", SSEPackedInt, v16i32>,
+ PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVDQA64: avx512_load<0x6F, VR512, VK8WM, i512mem, alignedloadv8i64,
+ "vmovdqa64", SSEPackedInt, v8i64>,
+ avx512_store<0x7F, VR512, VK8WM, i512mem, alignedstore512,
+ "vmovdqa64", SSEPackedInt, v8i64>,
+ PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+defm VMOVDQU32: avx512_load<0x6F, VR512, VK16WM, i512mem, load,
+ "vmovdqu32", SSEPackedInt, v16i32>,
+ avx512_store<0x7F, VR512, VK16WM, i512mem, store,
+ "vmovdqu32", SSEPackedInt, v16i32>,
+ XS, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VMOVDQU64: avx512_load<0x6F, VR512, VK8WM, i512mem, load,
+ "vmovdqu64", SSEPackedInt, v8i64>,
+ avx512_store<0x7F, VR512, VK8WM, i512mem, store,
+ "vmovdqu64", SSEPackedInt, v8i64>,
+ XS, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
+ (v16i32 immAllZerosV), GR16:$mask)),
+ (VMOVDQU32rmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
+
+def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
+ (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
+ (VMOVDQU64rmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
+
+def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
+ GR16:$mask),
+ (VMOVDQU32mrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
+ VR512:$src)>;
+def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
+ GR8:$mask),
+ (VMOVDQU64mrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
+ VR512:$src)>;
let AddedComplexity = 20 in {
+def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
+ (bc_v8i64 (v16i32 immAllZerosV)))),
+ (VMOVDQU64rrkz VK8WM:$mask, VR512:$src)>;
+
+def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
+ (v8i64 VR512:$src))),
+ (VMOVDQU64rrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
+ VK8), VR512:$src)>;
+
+def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
+ (v16i32 immAllZerosV))),
+ (VMOVDQU32rrkz VK16WM:$mask, VR512:$src)>;
+
+def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
+ (v16i32 VR512:$src))),
+ (VMOVDQU32rrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
+
def : Pat<(v16f32 (vselect VK16WM:$mask, (v16f32 VR512:$src1),
(v16f32 VR512:$src2))),
(VMOVUPSZrrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
@@ -1215,33 +1534,33 @@ def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src1),
}
// Move Int Doubleword to Packed Double Int
//
-def VMOVDI2PDIZrr : AVX512SI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
+ "vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
EVEX, VEX_LIG;
-def VMOVDI2PDIZrm : AVX512SI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
+ "vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))],
IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-def VMOV64toPQIZrr : AVX512SI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
let isCodeGenOnly = 1 in {
-def VMOV64toSDZrr : AVX512SI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert GR64:$src))],
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
-def VMOVSDto64Zrr : AVX512SI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))],
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
}
-def VMOVSDto64Zmr : AVX512SI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)],
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
EVEX_CD8<64, CD8VT1>;
@@ -1249,68 +1568,68 @@ def VMOVSDto64Zmr : AVX512SI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$s
// Move Int Doubleword to Single Scalar
//
let isCodeGenOnly = 1 in {
-def VMOVDI2SSZrr : AVX512SI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
+ "vmovd\t{$src, $dst|$dst, $src}",
[(set FR32X:$dst, (bitconvert GR32:$src))],
IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
-def VMOVDI2SSZrm : AVX512SI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
+ "vmovd\t{$src, $dst|$dst, $src}",
[(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
}
-// Move Packed Doubleword Int to Packed Double Int
+// Move doubleword from xmm register to r/m32
//
-def VMOVPDI2DIZrr : AVX512SI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
+ "vmovd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
(iPTR 0)))], IIC_SSE_MOVD_ToGP>,
EVEX, VEX_LIG;
-def VMOVPDI2DIZmr : AVX512SI<0x7E, MRMDestMem, (outs),
+def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, VR128X:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+ "vmovd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128X:$src),
(iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-// Move Packed Doubleword Int first element to Doubleword Int
+// Move quadword from xmm1 register to r/m64
//
def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
(iPTR 0)))],
- IIC_SSE_MOVD_ToGP>, TB, OpSize, EVEX, VEX_LIG, VEX_W,
+ IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
Requires<[HasAVX512, In64BitMode]>;
def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
(ins i64mem:$dst, VR128X:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+ "vmovq\t{$src, $dst|$dst, $src}",
[(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
addr:$dst)], IIC_SSE_MOVDQ>,
- EVEX, OpSize, VEX_LIG, VEX_W, TB, EVEX_CD8<64, CD8VT1>,
+ EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
// Move Scalar Single to Double Int
//
let isCodeGenOnly = 1 in {
-def VMOVSS2DIZrr : AVX512SI<0x7E, MRMDestReg, (outs GR32:$dst),
+def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
(ins FR32X:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+ "vmovd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32X:$src))],
IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
-def VMOVSS2DIZmr : AVX512SI<0x7E, MRMDestMem, (outs),
+def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, FR32X:$src),
- "vmovd{z}\t{$src, $dst|$dst, $src}",
+ "vmovd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
}
// Move Quadword Int to Packed Quadword Int
//
-def VMOVQI2PQIZrm : AVX512SI<0x6E, MRMSrcMem, (outs VR128X:$dst),
+def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
(ins i64mem:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
@@ -1322,40 +1641,55 @@ def VMOVQI2PQIZrm : AVX512SI<0x6E, MRMSrcMem, (outs VR128X:$dst),
multiclass avx512_move_scalar <string asm, RegisterClass RC,
SDNode OpNode, ValueType vt,
X86MemOperand x86memop, PatFrag mem_pat> {
+ let hasSideEffects = 0 in {
def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
- !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128X:$dst, (vt (OpNode VR128X:$src1,
(scalar_to_vector RC:$src2))))],
IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
+ let Constraints = "$src1 = $dst" in
+ def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
+ (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
+ !strconcat(asm,
+ " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
+ [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
EVEX, VEX_LIG;
def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
[(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
EVEX, VEX_LIG;
+ } //hasSideEffects = 0
}
let ExeDomain = SSEPackedSingle in
-defm VMOVSSZ : avx512_move_scalar<"movss{z}", FR32X, X86Movss, v4f32, f32mem,
+defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
loadf32>, XS, EVEX_CD8<32, CD8VT1>;
let ExeDomain = SSEPackedDouble in
-defm VMOVSDZ : avx512_move_scalar<"movsd{z}", FR64X, X86Movsd, v2f64, f64mem,
+defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
+def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
+ (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
+ VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
+
+def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
+ (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
+ VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
// For the disassembler
-let isCodeGenOnly = 1 in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, FR32X:$src2),
- "movss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
IIC_SSE_MOV_S_RR>,
XS, EVEX_4V, VEX_LIG;
def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, FR64X:$src2),
- "movsd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
IIC_SSE_MOV_S_RR>,
XD, EVEX_4V, VEX_LIG, VEX_W;
}
@@ -1504,7 +1838,7 @@ let Predicates = [HasAVX512] in {
let AddedComplexity = 15 in
def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst, (v2i64 (X86vzmovl
(v2i64 VR128X:$src))))],
IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
@@ -1512,7 +1846,7 @@ def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
let AddedComplexity = 20 in
def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
(ins i128mem:$src),
- "vmovq{z}\t{$src, $dst|$dst, $src}",
+ "vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst, (v2i64 (X86vzmovl
(loadv2i64 addr:$src))))],
IIC_SSE_MOVDQ>, EVEX, VEX_W,
@@ -1536,6 +1870,8 @@ let Predicates = [HasAVX512] in {
(VMOVZPQILo2PQIZrm addr:$src)>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
(VMOVZPQILo2PQIZrr VR128X:$src)>;
+ def : Pat<(v2i64 (X86vzload addr:$src)),
+ (VMOVZPQILo2PQIZrm addr:$src)>;
}
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
@@ -1560,107 +1896,294 @@ def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
(SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
//===----------------------------------------------------------------------===//
+// AVX-512 - Non-temporals
+//===----------------------------------------------------------------------===//
+
+def VMOVNTDQAZrm : AVX5128I<0x2A, MRMSrcMem, (outs VR512:$dst),
+ (ins i512mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR512:$dst,
+ (int_x86_avx512_movntdqa addr:$src))]>,
+ EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+// Prefer non-temporal over temporal versions
+let AddedComplexity = 400, SchedRW = [WriteStore] in {
+
+def VMOVNTPSZmr : AVX512PSI<0x2B, MRMDestMem, (outs),
+ (ins f512mem:$dst, VR512:$src),
+ "vmovntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v16f32 VR512:$src),
+ addr:$dst)],
+ IIC_SSE_MOVNT>,
+ EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+def VMOVNTPDZmr : AVX512PDI<0x2B, MRMDestMem, (outs),
+ (ins f512mem:$dst, VR512:$src),
+ "vmovntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v8f64 VR512:$src),
+ addr:$dst)],
+ IIC_SSE_MOVNT>,
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+
+def VMOVNTDQZmr : AVX512BI<0xE7, MRMDestMem, (outs),
+ (ins i512mem:$dst, VR512:$src),
+ "vmovntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v8i64 VR512:$src),
+ addr:$dst)],
+ IIC_SSE_MOVNT>,
+ EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
+}
+
+//===----------------------------------------------------------------------===//
// AVX-512 - Integer arithmetic
//
multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ ValueType OpVT, RegisterClass KRC,
+ RegisterClass RC, PatFrag memop_frag,
X86MemOperand x86memop, PatFrag scalar_mfrag,
X86MemOperand x86scalar_mop, string BrdcstStr,
OpndItins itins, bit IsCommutable = 0> {
let isCommutable = IsCommutable in
- def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
- itins.rr>, EVEX_4V;
- def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
- itins.rm>, EVEX_4V;
- def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
- ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
- [(set RC:$dst, (OpNode RC:$src1,
- (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
- itins.rm>, EVEX_4V, EVEX_B;
-}
-multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr,
- ValueType DstVT, ValueType SrcVT, RegisterClass RC,
- PatFrag memop_frag, X86MemOperand x86memop,
- OpndItins itins,
- bit IsCommutable = 0> {
+ def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
+ itins.rr>, EVEX_4V;
+ let AddedComplexity = 30 in {
+ let Constraints = "$src0 = $dst" in
+ def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src0, KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
+ RC:$src0)))],
+ itins.rr>, EVEX_4V, EVEX_K;
+ def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
+ "|$dst {${mask}} {z}, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
+ (OpVT immAllZerosV))))],
+ itins.rr>, EVEX_4V, EVEX_KZ;
+ }
+
+ let mayLoad = 1 in {
+ def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
+ itins.rm>, EVEX_4V;
+ let AddedComplexity = 30 in {
+ let Constraints = "$src0 = $dst" in
+ def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src0, KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
+ RC:$src0)))],
+ itins.rm>, EVEX_4V, EVEX_K;
+ def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
+ (OpVT immAllZerosV))))],
+ itins.rm>, EVEX_4V, EVEX_KZ;
+ }
+ def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
+ [(set RC:$dst, (OpNode RC:$src1,
+ (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
+ itins.rm>, EVEX_4V, EVEX_B;
+ let AddedComplexity = 30 in {
+ let Constraints = "$src0 = $dst" in
+ def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src0, KRC:$mask, RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
+ BrdcstStr, "}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode (OpVT RC:$src1),
+ (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
+ RC:$src0)))],
+ itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
+ def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
+ BrdcstStr, "}"),
+ [(set RC:$dst, (OpVT (vselect KRC:$mask,
+ (OpNode (OpVT RC:$src1),
+ (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
+ (OpVT immAllZerosV))))],
+ itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
+ }
+ }
+}
+
+multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
+ ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
+ PatFrag memop_frag, X86MemOperand x86memop,
+ PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
+ string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
let isCommutable = IsCommutable in
- def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ {
+ def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V, VEX_W;
- def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V, VEX_W;
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V;
+ def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], itins.rr>, EVEX_4V, EVEX_K;
+ def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
+ "|$dst {${mask}} {z}, $src1, $src2}"),
+ [], itins.rr>, EVEX_4V, EVEX_KZ;
+ }
+ let mayLoad = 1 in {
+ def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V;
+ def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], itins.rm>, EVEX_4V, EVEX_K;
+ def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
+ [], itins.rm>, EVEX_4V, EVEX_KZ;
+ def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
+ [], itins.rm>, EVEX_4V, EVEX_B;
+ def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
+ BrdcstStr, "}"),
+ [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
+ def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
+ BrdcstStr, "}"),
+ [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
+ }
}
-defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 0>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
- T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 1>,
- EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
+defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 1>, EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
-defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32,
- VR512, memopv8i64, i512mem, SSE_INTALU_ITINS_P, 1>, T8,
- EVEX_V512, EVEX_CD8<64, CD8VF>;
+defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
+ EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32,
- VR512, memopv8i64, i512mem, SSE_INTMUL_ITINS_P, 1>, EVEX_V512,
- EVEX_CD8<64, CD8VF>;
+defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
(VPMULUDQZrr VR512:$src1, VR512:$src2)>;
-defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
- T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
- T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-
-defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
- T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-
-defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
- T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
- T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-
-defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VR512, memopv16i32,
- i512mem, loadi32, i32mem, "{1to16}", SSE_INTALU_ITINS_P, 1>,
- T8, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_INTALU_ITINS_P, 0>,
- T8, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-
+def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
+def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPMULDQZrr VR512:$src1, VR512:$src2)>;
+
+defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 1>,
+ T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 0>,
+ T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 1>,
+ T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 0>,
+ T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 1>,
+ T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 0>,
+ T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VK16WM, VR512,
+ memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
+ SSE_INTALU_ITINS_P, 1>,
+ T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_INTALU_ITINS_P, 0>,
+ T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
+ (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
+ (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
+ (VPMINSDZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
+ (VPMINUDZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPMINSQZrr VR512:$src1, VR512:$src2)>;
+def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPMINUQZrr VR512:$src1, VR512:$src2)>;
//===----------------------------------------------------------------------===//
// AVX-512 - Unpack Instructions
//===----------------------------------------------------------------------===//
@@ -1684,28 +2207,28 @@ multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, OpSize, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+ SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, OpSize, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+ SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
X86MemOperand x86memop> {
def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
IIC_SSE_UNPCK>, EVEX_4V;
def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
(bitconvert (memop_frag addr:$src2)))))],
IIC_SSE_UNPCK>, EVEX_4V;
@@ -1732,29 +2255,29 @@ multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
EVEX;
def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(OpVT (OpNode (mem_frag addr:$src1),
(i8 imm:$src2))))]>, EVEX;
}
defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
- i512mem, v16i32>, OpSize, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
let ExeDomain = SSEPackedSingle in
defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilp,
- memopv16f32, i512mem, v16f32>, OpSize, TA, EVEX_V512,
+ memopv16f32, i512mem, v16f32>, TAPD, EVEX_V512,
EVEX_CD8<32, CD8VF>;
let ExeDomain = SSEPackedDouble in
defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilp,
- memopv8f64, i512mem, v8f64>, OpSize, TA, EVEX_V512,
+ memopv8f64, i512mem, v8f64>, TAPD, EVEX_V512,
VEX_W, EVEX_CD8<32, CD8VF>;
def : Pat<(v16i32 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
@@ -1766,30 +2289,30 @@ def : Pat<(v8i64 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
// AVX-512 Logical Instructions
//===----------------------------------------------------------------------===//
-defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VR512, memopv16i32,
+defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VK16WM, VR512, memopv16i32,
i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VR512, memopv8i64,
+defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VK8WM, VR512, memopv8i64,
i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPORDZ : avx512_binop_rm<0xEB, "vpord", or, v16i32, VR512, memopv16i32,
+defm VPORDZ : avx512_binop_rm<0xEB, "vpord", or, v16i32, VK16WM, VR512, memopv16i32,
i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPORQZ : avx512_binop_rm<0xEB, "vporq", or, v8i64, VR512, memopv8i64,
+defm VPORQZ : avx512_binop_rm<0xEB, "vporq", or, v8i64, VK8WM, VR512, memopv8i64,
i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VR512, memopv16i32,
+defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VK16WM, VR512, memopv16i32,
i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VR512, memopv8i64,
+defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VK8WM, VR512, memopv8i64,
i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VR512,
+defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VK16WM, VR512,
memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
SSE_BIT_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VR512, memopv8i64,
- i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 0>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VK8WM, VR512,
+ memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
+ SSE_BIT_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
//===----------------------------------------------------------------------===//
// AVX-512 FP arithmetic
@@ -1797,10 +2320,10 @@ defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VR512, memopv8
multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
SizeItins itins> {
- defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss{z}"), OpNode, FR32X,
+ defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
- defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd{z}"), OpNode, FR64X,
+ defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
EVEX_CD8<64, CD8VT1>;
}
@@ -1817,82 +2340,138 @@ defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
}
multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass KRC,
RegisterClass RC, ValueType vt,
X86MemOperand x86memop, PatFrag mem_frag,
X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
string BrdcstStr,
Domain d, OpndItins itins, bit commutable> {
- let isCommutable = commutable in
+ let isCommutable = commutable in {
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
- EVEX_4V, TB;
+ EVEX_4V;
+
+ def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
+ [], itins.rr, d>, EVEX_4V, EVEX_K;
+
+ def rrkz: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
+ [], itins.rr, d>, EVEX_4V, EVEX_KZ;
+ }
+
let mayLoad = 1 in {
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
- itins.rm, d>, EVEX_4V, TB;
+ itins.rm, d>, EVEX_4V;
+
def rmb : PI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
- ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
+ ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
[(set RC:$dst, (OpNode RC:$src1,
(vt (X86VBroadcast (scalar_mfrag addr:$src2)))))],
- itins.rm, d>, EVEX_4V, EVEX_B, TB;
- }
+ itins.rm, d>, EVEX_4V, EVEX_B;
+
+ def rmk : PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ [], itins.rm, d>, EVEX_4V, EVEX_K;
+
+ def rmkz : PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
+ [], itins.rm, d>, EVEX_4V, EVEX_KZ;
+
+ def rmbk : PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
+ " \t{${src2}", BrdcstStr,
+ ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}", BrdcstStr, "}"),
+ [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_K;
+
+ def rmbkz : PI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
+ " \t{${src2}", BrdcstStr,
+ ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
+ BrdcstStr, "}"),
+ [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_KZ;
+ }
}
-defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VR512, v16f32, f512mem,
+defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VK16WM, VR512, v16f32, f512mem,
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
- SSE_ALU_ITINS_P.s, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
-defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VR512, v8f64, f512mem,
+defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VK8WM, VR512, v8f64, f512mem,
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
SSE_ALU_ITINS_P.d, 1>,
- EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+ EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VR512, v16f32, f512mem,
+defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VK16WM, VR512, v16f32, f512mem,
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
- SSE_ALU_ITINS_P.s, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VR512, v8f64, f512mem,
+ SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VK8WM, VR512, v8f64, f512mem,
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
SSE_ALU_ITINS_P.d, 1>,
- EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+ EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VR512, v16f32, f512mem,
+defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VK16WM, VR512, v16f32, f512mem,
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
SSE_ALU_ITINS_P.s, 1>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VR512, v16f32, f512mem,
+ EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VK16WM, VR512, v16f32, f512mem,
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
SSE_ALU_ITINS_P.s, 1>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
-defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VR512, v8f64, f512mem,
+defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VK8WM, VR512, v8f64, f512mem,
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
SSE_ALU_ITINS_P.d, 1>,
- EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VR512, v8f64, f512mem,
+ EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VK8WM, VR512, v8f64, f512mem,
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
SSE_ALU_ITINS_P.d, 1>,
- EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
+ EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VR512, v16f32, f512mem,
+defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VK16WM, VR512, v16f32, f512mem,
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
- SSE_ALU_ITINS_P.s, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VR512, v16f32, f512mem,
+ SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VK16WM, VR512, v16f32, f512mem,
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
- SSE_ALU_ITINS_P.s, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
-defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VR512, v8f64, f512mem,
+defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VK8WM, VR512, v8f64, f512mem,
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
SSE_ALU_ITINS_P.d, 0>,
- EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VR512, v8f64, f512mem,
+ EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VK8WM, VR512, v8f64, f512mem,
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
SSE_ALU_ITINS_P.d, 0>,
- EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>;
-
+ EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
+
+def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
+ (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
+ (i16 -1), FROUND_CURRENT)),
+ (VMAXPSZrr VR512:$src1, VR512:$src2)>;
+
+def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
+ (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
+ (i8 -1), FROUND_CURRENT)),
+ (VMAXPDZrr VR512:$src1, VR512:$src2)>;
+
+def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
+ (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
+ (i16 -1), FROUND_CURRENT)),
+ (VMINPSZrr VR512:$src1, VR512:$src2)>;
+
+def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
+ (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
+ (i8 -1), FROUND_CURRENT)),
+ (VMINPDZrr VR512:$src1, VR512:$src2)>;
//===----------------------------------------------------------------------===//
// AVX-512 VPTESTM instructions
//===----------------------------------------------------------------------===//
@@ -1900,24 +2479,41 @@ defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VR512, v8f64, f512mem,
multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
SDNode OpNode, ValueType vt> {
- def rr : AVX5128I<opc, MRMSrcReg,
+ def rr : AVX512PI<opc, MRMSrcReg,
(outs KRC:$dst), (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))]>, EVEX_4V;
- def rm : AVX5128I<opc, MRMSrcMem,
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
+ SSEPackedInt>, EVEX_4V;
+ def rm : AVX512PI<opc, MRMSrcMem,
(outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set KRC:$dst, (OpNode (vt RC:$src1),
- (bitconvert (memop_frag addr:$src2))))]>, EVEX_4V;
+ (bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
}
defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
- memopv16i32, X86testm, v16i32>, EVEX_V512,
+ memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
EVEX_CD8<32, CD8VF>;
defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
- memopv8i64, X86testm, v8i64>, EVEX_V512, VEX_W,
+ memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
EVEX_CD8<64, CD8VF>;
+let Predicates = [HasCDI] in {
+defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem,
+ memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem,
+ memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+}
+
+def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
+ (v16i32 VR512:$src2), (i16 -1))),
+ (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
+
+def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
+ (v8i64 VR512:$src2), (i8 -1))),
+ (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
//===----------------------------------------------------------------------===//
// AVX-512 Shift instructions
//===----------------------------------------------------------------------===//
@@ -1927,23 +2523,23 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
RegisterClass KRC> {
def ri : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
(ins RC:$src1, i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (vt (OpNode RC:$src1, (i8 imm:$src2))))],
SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
def rik : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
[], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
def mi: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
(ins x86memop:$src1, i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode (mem_frag addr:$src1),
(i8 imm:$src2)))], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
def mik: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
(ins KRC:$mask, x86memop:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
[], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
}
@@ -1953,24 +2549,24 @@ multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
// src2 is always 128-bit
def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, VR128X:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, VR128X:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
[], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (vt (OpNode RC:$src1,
(bc_frag (memopv2i64 addr:$src2)))))],
SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, RC:$src1, i128mem:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
+ " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
[], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
}
@@ -2024,13 +2620,13 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86MemOperand x86memop, PatFrag mem_frag> {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(vt (OpNode RC:$src1, (vt RC:$src2))))]>,
EVEX_4V;
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
EVEX_4V;
@@ -2062,10 +2658,10 @@ defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
X86MemOperand x86memop, PatFrag memop_frag> {
def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set RC:$dst,
(VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
}
@@ -2082,11 +2678,11 @@ multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
ValueType vt, RegisterClass RC, PatFrag mem_frag,
X86MemOperand x86memop> {
def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
let mayLoad = 1 in
def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
}
@@ -2109,12 +2705,12 @@ def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
//===----------------------------------------------------------------------===//
def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
- "vmovlhps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
IIC_SSE_MOV_LH>, EVEX_4V;
def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
- "vmovhlps{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
IIC_SSE_MOV_LH>, EVEX_4V;
@@ -2140,18 +2736,18 @@ multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr,
string BrdcstStr, SDNode OpNode, ValueType OpVT> {
def r: AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
- !strconcat(OpcodeStr,"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr," \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst, (OpVT(OpNode RC:$src1, RC:$src2, RC:$src3)))]>;
let mayLoad = 1 in
def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, x86memop:$src3),
- !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr, " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2,
(mem_frag addr:$src3))))]>;
def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, x86scalar_mop:$src3),
- !strconcat(OpcodeStr, "\t{${src3}", BrdcstStr,
+ !strconcat(OpcodeStr, " \t{${src3}", BrdcstStr,
", $src2, $dst|$dst, $src2, ${src3}", BrdcstStr, "}"),
[(set RC:$dst, (OpNode RC:$src1, RC:$src2,
(OpVT (X86VBroadcast (scalar_mfrag addr:$src3)))))]>, EVEX_B;
@@ -2219,11 +2815,11 @@ multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr,
let mayLoad = 1 in
def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src3, x86memop:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src3, $dst|$dst, $src3, $src2}"),
+ !strconcat(OpcodeStr, " \t{$src2, $src3, $dst|$dst, $src3, $src2}"),
[(set RC:$dst, (OpVT (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3)))]>;
def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src3, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
", $src3, $dst|$dst, $src3, ${src2}", BrdcstStr, "}"),
[(set RC:$dst, (OpNode RC:$src1,
(OpVT (X86VBroadcast (scalar_mfrag addr:$src2))), RC:$src3))]>, EVEX_B;
@@ -2294,14 +2890,14 @@ multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
let mayLoad = 1 in
def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, f128mem:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src2, RC:$src1,
(mem_frag addr:$src3))))]>;
@@ -2309,21 +2905,21 @@ multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
} // Constraints = "$src1 = $dst"
-defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss{z}", X86Fmadd, FR32X,
+defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
-defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd{z}", X86Fmadd, FR64X,
+defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss{z}", X86Fmsub, FR32X,
+defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
-defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd{z}", X86Fmsub, FR64X,
+defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss{z}", X86Fnmadd, FR32X,
+defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
-defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd{z}", X86Fnmadd, FR64X,
+defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss{z}", X86Fnmsub, FR32X,
+defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
-defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd{z}", X86Fnmsub, FR64X,
+defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
//===----------------------------------------------------------------------===//
@@ -2332,25 +2928,25 @@ defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd{z}", X86Fnmsub, FR64X,
multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
X86MemOperand x86memop, string asm> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
- !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+ !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
EVEX_4V;
let mayLoad = 1 in
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
(ins DstRC:$src1, x86memop:$src),
- !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+ !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
EVEX_4V;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
}
let Predicates = [HasAVX512] in {
-defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}{z}">,
+defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}{z}">,
+defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
-defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}{z}">,
+defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}{z}">,
+defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
@@ -2371,13 +2967,13 @@ def : Pat<(f64 (sint_to_fp GR32:$src)),
def : Pat<(f64 (sint_to_fp GR64:$src)),
(VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
-defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}{z}">,
+defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}{z}">,
+defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
-defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}{z}">,
+defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}{z}">,
+defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
@@ -2405,161 +3001,167 @@ def : Pat<(f64 (uint_to_fp GR64:$src)),
multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
string asm> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG;
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
+ Requires<[HasAVX512]>;
let mayLoad = 1 in
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG;
-} // neverHasSideEffects = 1
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
+ Requires<[HasAVX512]>;
+} // hasSideEffects = 0
}
let Predicates = [HasAVX512] in {
// Convert float/double to signed/unsigned int 32/64
defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
- ssmem, sse_load_f32, "cvtss2si{z}">,
+ ssmem, sse_load_f32, "cvtss2si">,
XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
- ssmem, sse_load_f32, "cvtss2si{z}">,
+ ssmem, sse_load_f32, "cvtss2si">,
XS, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
- ssmem, sse_load_f32, "cvtss2usi{z}">,
+ ssmem, sse_load_f32, "cvtss2usi">,
XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
int_x86_avx512_cvtss2usi64, ssmem,
- sse_load_f32, "cvtss2usi{z}">, XS, VEX_W,
+ sse_load_f32, "cvtss2usi">, XS, VEX_W,
EVEX_CD8<32, CD8VT1>;
defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
- sdmem, sse_load_f64, "cvtsd2si{z}">,
+ sdmem, sse_load_f64, "cvtsd2si">,
XD, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
- sdmem, sse_load_f64, "cvtsd2si{z}">,
+ sdmem, sse_load_f64, "cvtsd2si">,
XD, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
- sdmem, sse_load_f64, "cvtsd2usi{z}">,
+ sdmem, sse_load_f64, "cvtsd2usi">,
XD, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
int_x86_avx512_cvtsd2usi64, sdmem,
- sse_load_f64, "cvtsd2usi{z}">, XD, VEX_W,
+ sse_load_f64, "cvtsd2usi">, XD, VEX_W,
EVEX_CD8<64, CD8VT1>;
-defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
- int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}{z}",
- SSE_CVT_Scalar, 0>, XS, EVEX_4V;
-defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
- int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}{z}",
- SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
-defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
- int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}{z}",
- SSE_CVT_Scalar, 0>, XD, EVEX_4V;
-defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
- int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}{z}",
- SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
-
-defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
- int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}{z}",
- SSE_CVT_Scalar, 0>, XS, EVEX_4V;
-defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
- int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}{z}",
- SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
-defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
- int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}{z}",
- SSE_CVT_Scalar, 0>, XD, EVEX_4V;
-defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
- int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}{z}",
- SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
+let isCodeGenOnly = 1 in {
+ defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V;
+ defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
+ defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V;
+ defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
+
+ defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V;
+ defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
+ SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
+ defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V;
+ defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
+ int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
+ SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
+} // isCodeGenOnly = 1
// Convert float/double to signed/unsigned int 32/64 with truncation
-defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
- ssmem, sse_load_f32, "cvttss2si{z}">,
- XS, EVEX_CD8<32, CD8VT1>;
-defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
- int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
- "cvttss2si{z}">, XS, VEX_W,
- EVEX_CD8<32, CD8VT1>;
-defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
- sdmem, sse_load_f64, "cvttsd2si{z}">, XD,
- EVEX_CD8<64, CD8VT1>;
-defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
- int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
- "cvttsd2si{z}">, XD, VEX_W,
- EVEX_CD8<64, CD8VT1>;
-defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
- int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
- "cvttss2si{z}">, XS, EVEX_CD8<32, CD8VT1>;
-defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
- int_x86_avx512_cvttss2usi64, ssmem,
- sse_load_f32, "cvttss2usi{z}">, XS, VEX_W,
- EVEX_CD8<32, CD8VT1>;
-defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
- int_x86_avx512_cvttsd2usi,
- sdmem, sse_load_f64, "cvttsd2usi{z}">, XD,
- EVEX_CD8<64, CD8VT1>;
-defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
- int_x86_avx512_cvttsd2usi64, sdmem,
- sse_load_f64, "cvttsd2usi{z}">, XD, VEX_W,
- EVEX_CD8<64, CD8VT1>;
-}
+let isCodeGenOnly = 1 in {
+ defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
+ ssmem, sse_load_f32, "cvttss2si">,
+ XS, EVEX_CD8<32, CD8VT1>;
+ defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
+ int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
+ "cvttss2si">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+ defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
+ sdmem, sse_load_f64, "cvttsd2si">, XD,
+ EVEX_CD8<64, CD8VT1>;
+ defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
+ int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
+ "cvttsd2si">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+ defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
+ int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
+ "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
+ defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
+ int_x86_avx512_cvttss2usi64, ssmem,
+ sse_load_f32, "cvttss2usi">, XS, VEX_W,
+ EVEX_CD8<32, CD8VT1>;
+ defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
+ int_x86_avx512_cvttsd2usi,
+ sdmem, sse_load_f64, "cvttsd2usi">, XD,
+ EVEX_CD8<64, CD8VT1>;
+ defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
+ int_x86_avx512_cvttsd2usi64, sdmem,
+ sse_load_f64, "cvttsd2usi">, XD, VEX_W,
+ EVEX_CD8<64, CD8VT1>;
+} // isCodeGenOnly = 1
multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
string asm> {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
}
defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
- loadf32, "cvttss2si{z}">, XS,
+ loadf32, "cvttss2si">, XS,
EVEX_CD8<32, CD8VT1>;
defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
- loadf32, "cvttss2usi{z}">, XS,
+ loadf32, "cvttss2usi">, XS,
EVEX_CD8<32, CD8VT1>;
defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
- loadf32, "cvttss2si{z}">, XS, VEX_W,
+ loadf32, "cvttss2si">, XS, VEX_W,
EVEX_CD8<32, CD8VT1>;
defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
- loadf32, "cvttss2usi{z}">, XS, VEX_W,
+ loadf32, "cvttss2usi">, XS, VEX_W,
EVEX_CD8<32, CD8VT1>;
defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
- loadf64, "cvttsd2si{z}">, XD,
+ loadf64, "cvttsd2si">, XD,
EVEX_CD8<64, CD8VT1>;
defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
- loadf64, "cvttsd2usi{z}">, XD,
+ loadf64, "cvttsd2usi">, XD,
EVEX_CD8<64, CD8VT1>;
defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
- loadf64, "cvttsd2si{z}">, XD, VEX_W,
+ loadf64, "cvttsd2si">, XD, VEX_W,
EVEX_CD8<64, CD8VT1>;
defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
- loadf64, "cvttsd2usi{z}">, XD, VEX_W,
+ loadf64, "cvttsd2usi">, XD, VEX_W,
EVEX_CD8<64, CD8VT1>;
+} // HasAVX512
//===----------------------------------------------------------------------===//
// AVX-512 Convert form float to double and back
//===----------------------------------------------------------------------===//
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
(ins FR32X:$src1, FR32X:$src2),
- "vcvtss2sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
let mayLoad = 1 in
def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
(ins FR32X:$src1, f32mem:$src2),
- "vcvtss2sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
EVEX_CD8<32, CD8VT1>;
// Convert scalar double to scalar single
def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
(ins FR64X:$src1, FR64X:$src2),
- "vcvtsd2ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
let mayLoad = 1 in
def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
(ins FR64X:$src1, f64mem:$src2),
- "vcvtsd2ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, EVEX_4V, VEX_LIG, VEX_W,
Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
}
@@ -2580,41 +3182,71 @@ def : Pat<(extloadf32 addr:$src),
def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
Requires<[HasAVX512]>;
-multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
+multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
Domain d> {
-let neverHasSideEffects = 1 in {
+let hasSideEffects = 0 in {
+ def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
+ def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
+ !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
+ [], d>, EVEX, EVEX_B, EVEX_RC;
+ let mayLoad = 1 in
+ def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst,
+ (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
+} // hasSideEffects = 0
+}
+
+multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
+ RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
+ X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
+ Domain d> {
+let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
let mayLoad = 1 in
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
-} // neverHasSideEffects = 1
+} // hasSideEffects = 0
}
-defm VCVTPD2PSZ : avx512_vcvt_fp<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
+defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
memopv8f64, f512mem, v8f32, v8f64,
- SSEPackedSingle>, EVEX_V512, VEX_W, OpSize,
+ SSEPackedSingle>, EVEX_V512, VEX_W, PD,
EVEX_CD8<64, CD8VF>;
defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
memopv4f64, f256mem, v8f64, v8f32,
- SSEPackedDouble>, EVEX_V512, EVEX_CD8<32, CD8VH>;
+ SSEPackedDouble>, EVEX_V512, PS,
+ EVEX_CD8<32, CD8VH>;
def : Pat<(v8f64 (extloadv8f32 addr:$src)),
(VCVTPS2PDZrm addr:$src)>;
+
+def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
+ (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
+ (VCVTPD2PSZrr VR512:$src)>;
+
+def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
+ (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
+ (VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
//===----------------------------------------------------------------------===//
// AVX-512 Vector convert from sign integer to float/double
//===----------------------------------------------------------------------===//
-defm VCVTDQ2PSZ : avx512_vcvt_fp<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
+defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
memopv8i64, i512mem, v16f32, v16i32,
- SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ SSEPackedSingle>, EVEX_V512, PS,
+ EVEX_CD8<32, CD8VF>;
defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
memopv4i64, i256mem, v8f64, v8i32,
@@ -2628,25 +3260,35 @@ defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
memopv8f64, f512mem, v8i32, v8f64,
- SSEPackedDouble>, EVEX_V512, OpSize, VEX_W,
+ SSEPackedDouble>, EVEX_V512, PD, VEX_W,
EVEX_CD8<64, CD8VF>;
defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
memopv16f32, f512mem, v16i32, v16f32,
- SSEPackedSingle>, EVEX_V512,
+ SSEPackedSingle>, EVEX_V512, PS,
EVEX_CD8<32, CD8VF>;
+// cvttps2udq (src, 0, mask-all-ones, sae-current)
+def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
+ (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
+ (VCVTTPS2UDQZrr VR512:$src)>;
+
defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
memopv8f64, f512mem, v8i32, v8f64,
- SSEPackedDouble>, EVEX_V512, VEX_W,
+ SSEPackedDouble>, EVEX_V512, PS, VEX_W,
EVEX_CD8<64, CD8VF>;
+// cvttpd2udq (src, 0, mask-all-ones, sae-current)
+def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
+ (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
+ (VCVTTPD2UDQZrr VR512:$src)>;
+
defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
memopv4i64, f256mem, v8f64, v8i32,
SSEPackedDouble>, EVEX_V512, XS,
EVEX_CD8<32, CD8VH>;
-defm VCVTUDQ2PSZ : avx512_vcvt_fp<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
+defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
memopv16i32, f512mem, v16f32, v16i32,
SSEPackedSingle>, EVEX_V512, XD,
EVEX_CD8<32, CD8VF>;
@@ -2655,23 +3297,81 @@ def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
(EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
+def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
+ (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
+ (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
+
+def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
+ (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
+ (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
+
+def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
+ (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
+ (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
+
+def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
+ (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
+ (v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>;
+
+def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
+ (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
+def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VCVTDQ2PDZrr VR256X:$src)>;
+def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
+ (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
+def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VCVTUDQ2PDZrr VR256X:$src)>;
+
+multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
+ RegisterClass DstRC, PatFrag mem_frag,
+ X86MemOperand x86memop, Domain d> {
+let hasSideEffects = 0 in {
+ def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ [], d>, EVEX;
+ def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
+ !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
+ [], d>, EVEX, EVEX_B, EVEX_RC;
+ let mayLoad = 1 in
+ def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ !strconcat(asm," \t{$src, $dst|$dst, $src}"),
+ [], d>, EVEX;
+} // hasSideEffects = 0
+}
+
+defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
+ memopv16f32, f512mem, SSEPackedSingle>, PD,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
+ memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
+ EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
+ (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
+ (VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
-def : Pat<(int_x86_avx512_cvtdq2_ps_512 VR512:$src),
- (VCVTDQ2PSZrr VR512:$src)>;
-def : Pat<(int_x86_avx512_cvtdq2_ps_512 (bitconvert (memopv8i64 addr:$src))),
- (VCVTDQ2PSZrm addr:$src)>;
+def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
+ (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
+ (VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
-def VCVTPS2DQZrr : AVX512BI<0x5B, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- "vcvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR512:$dst,
- (int_x86_avx512_cvt_ps2dq_512 VR512:$src))],
- IIC_SSE_CVT_PS_RR>, EVEX, EVEX_V512;
-def VCVTPS2DQZrm : AVX512BI<0x5B, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
- "vcvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR512:$dst,
- (int_x86_avx512_cvt_ps2dq_512 (memopv16f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
+ memopv16f32, f512mem, SSEPackedSingle>,
+ PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
+ memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
+ PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
+def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
+ (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
+ (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
+
+def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
+ (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
+ (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
let Predicates = [HasAVX512] in {
def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
@@ -2683,234 +3383,279 @@ let Predicates = [HasAVX512] in {
//===----------------------------------------------------------------------===//
// Half precision conversion instructions
//===----------------------------------------------------------------------===//
-multiclass avx512_f16c_ph2ps<RegisterClass destRC, RegisterClass srcRC,
- X86MemOperand x86memop, Intrinsic Int> {
+multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
+ X86MemOperand x86memop> {
def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}",
- [(set destRC:$dst, (Int srcRC:$src))]>, EVEX;
- let neverHasSideEffects = 1, mayLoad = 1 in
+ []>, EVEX;
+ let hasSideEffects = 0, mayLoad = 1 in
def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
}
-multiclass avx512_f16c_ps2ph<RegisterClass destRC, RegisterClass srcRC,
- X86MemOperand x86memop, Intrinsic Int> {
+multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
+ X86MemOperand x86memop> {
def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
(ins srcRC:$src1, i32i8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set destRC:$dst, (Int srcRC:$src1, imm:$src2))]>, EVEX;
- let neverHasSideEffects = 1, mayStore = 1 in
+ "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, EVEX;
+ let hasSideEffects = 0, mayStore = 1 in
def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
(ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
+ "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
}
-defm VCVTPH2PSZ : avx512_f16c_ph2ps<VR512, VR256X, f256mem,
- int_x86_avx512_vcvtph2ps_512>, EVEX_V512,
+defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
EVEX_CD8<32, CD8VH>;
-defm VCVTPS2PHZ : avx512_f16c_ps2ph<VR256X, VR512, f256mem,
- int_x86_avx512_vcvtps2ph_512>, EVEX_V512,
+defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
EVEX_CD8<32, CD8VH>;
+def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
+ imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
+ (VCVTPS2PHZrr VR512:$src, imm:$rc)>;
+
+def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
+ (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
+ (VCVTPH2PSZrr VR256X:$src)>;
+
let Defs = [EFLAGS], Predicates = [HasAVX512] in {
defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
- "ucomiss{z}">, TB, EVEX, VEX_LIG,
+ "ucomiss">, PS, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
- "ucomisd{z}">, TB, OpSize, EVEX,
+ "ucomisd">, PD, EVEX,
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
let Pattern = []<dag> in {
defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
- "comiss{z}">, TB, EVEX, VEX_LIG,
+ "comiss">, PS, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
- "comisd{z}">, TB, OpSize, EVEX,
+ "comisd">, PD, EVEX,
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
}
- defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
- load, "ucomiss">, TB, EVEX, VEX_LIG,
- EVEX_CD8<32, CD8VT1>;
- defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
- load, "ucomisd">, TB, OpSize, EVEX,
- VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
-
- defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
- load, "comiss">, TB, EVEX, VEX_LIG,
- EVEX_CD8<32, CD8VT1>;
- defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
- load, "comisd">, TB, OpSize, EVEX,
- VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+ let isCodeGenOnly = 1 in {
+ defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
+ load, "ucomiss">, PS, EVEX, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
+ load, "ucomisd">, PD, EVEX,
+ VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+ defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
+ load, "comiss">, PS, EVEX, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+ defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
+ load, "comisd">, PD, EVEX,
+ VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+ }
}
-/// avx512_unop_p - AVX-512 unops in packed form.
-multiclass avx512_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
- def PSZr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- !strconcat(OpcodeStr,
- "ps\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))]>,
- EVEX, EVEX_V512;
- def PSZm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f256mem:$src),
- !strconcat(OpcodeStr,
- "ps\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst, (OpNode (memopv16f32 addr:$src)))]>,
- EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
- def PDZr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- !strconcat(OpcodeStr,
- "pd\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))]>,
- EVEX, EVEX_V512, VEX_W;
- def PDZm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
- !strconcat(OpcodeStr,
- "pd\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst, (OpNode (memopv16f32 addr:$src)))]>,
- EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-}
-
-/// avx512_fp_unop_p_int - AVX-512 intrinsics unops in packed forms.
-multiclass avx512_fp_unop_p_int<bits<8> opc, string OpcodeStr,
- Intrinsic V16F32Int, Intrinsic V8F64Int> {
- def PSZr_Int : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- !strconcat(OpcodeStr,
- "ps\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst, (V16F32Int VR512:$src))]>,
- EVEX, EVEX_V512;
- def PSZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
- !strconcat(OpcodeStr,
- "ps\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst,
- (V16F32Int (memopv16f32 addr:$src)))]>, EVEX,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
- def PDZr_Int : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- !strconcat(OpcodeStr,
- "pd\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst, (V8F64Int VR512:$src))]>,
- EVEX, EVEX_V512, VEX_W;
- def PDZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
- !strconcat(OpcodeStr,
- "pd\t{$src, $dst|$dst, $src}"),
- [(set VR512:$dst,
- (V8F64Int (memopv8f64 addr:$src)))]>,
- EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-}
-
-/// avx512_fp_unop_s - AVX-512 unops in scalar form.
-multiclass avx512_fp_unop_s<bits<8> opc, string OpcodeStr> {
+/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
+multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop> {
let hasSideEffects = 0 in {
- def SSZr : AVX5128I<opc, MRMSrcReg, (outs FR32X:$dst),
- (ins FR32X:$src1, FR32X:$src2),
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V;
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
let mayLoad = 1 in {
- def SSZm : AVX5128I<opc, MRMSrcMem, (outs FR32X:$dst),
- (ins FR32X:$src1, f32mem:$src2),
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
- def SSZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR128X:$dst),
- (ins VR128X:$src1, ssmem:$src2),
- !strconcat(OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
}
- def SDZr : AVX5128I<opc, MRMSrcReg, (outs FR64X:$dst),
- (ins FR64X:$src1, FR64X:$src2),
+}
+}
+
+defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
+ EVEX_CD8<32, CD8VT1>;
+defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
+ VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
+ EVEX_CD8<32, CD8VT1>;
+defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
+ VEX_W, EVEX_CD8<64, CD8VT1>;
+
+def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
+ (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
+ (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
+
+def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
+ (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
+ (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
+
+def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
+ (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
+ (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
+
+def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
+ (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
+ (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
+
+/// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
+multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, X86MemOperand x86memop,
+ PatFrag mem_frag, ValueType OpVt> {
+ def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (OpVt (OpNode RC:$src)))]>,
+ EVEX;
+ def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (OpVt (OpNode (mem_frag addr:$src))))]>,
+ EVEX;
+}
+defm VRSQRT14PSZ : avx512_fp14_p<0x4E, "vrsqrt14ps", X86frsqrt, VR512, f512mem,
+ memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VRSQRT14PDZ : avx512_fp14_p<0x4E, "vrsqrt14pd", X86frsqrt, VR512, f512mem,
+ memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+defm VRCP14PSZ : avx512_fp14_p<0x4C, "vrcp14ps", X86frcp, VR512, f512mem,
+ memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VRCP14PDZ : avx512_fp14_p<0x4C, "vrcp14pd", X86frcp, VR512, f512mem,
+ memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
+ (VRSQRT14PSZr VR512:$src)>;
+def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VRSQRT14PDZr VR512:$src)>;
+
+def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
+ (VRCP14PSZr VR512:$src)>;
+def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VRCP14PDZr VR512:$src)>;
+
+/// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
+multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop> {
+ let hasSideEffects = 0, Predicates = [HasERI] in {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
+ def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- EVEX_4V, VEX_W;
+ " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
+ []>, EVEX_4V, EVEX_B;
let mayLoad = 1 in {
- def SDZm : AVX5128I<opc, MRMSrcMem, (outs FR64X:$dst),
- (ins FR64X:$src1, f64mem:$src2),
+ def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
- def SDZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR128X:$dst),
- (ins VR128X:$src1, sdmem:$src2),
- !strconcat(OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
}
}
}
-defm VRCP14 : avx512_fp_unop_s<0x4D, "vrcp14">,
- avx512_fp_unop_p<0x4C, "vrcp14", X86frcp>,
- avx512_fp_unop_p_int<0x4C, "vrcp14",
- int_x86_avx512_rcp14_ps_512, int_x86_avx512_rcp14_pd_512>;
-
-defm VRSQRT14 : avx512_fp_unop_s<0x4F, "vrsqrt14">,
- avx512_fp_unop_p<0x4E, "vrsqrt14", X86frsqrt>,
- avx512_fp_unop_p_int<0x4E, "vrsqrt14",
- int_x86_avx512_rsqrt14_ps_512, int_x86_avx512_rsqrt14_pd_512>;
-
-def : Pat<(int_x86_avx512_rsqrt14_ss VR128X:$src),
- (COPY_TO_REGCLASS (VRSQRT14SSZr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128X:$src, FR32)),
- VR128X)>;
-def : Pat<(int_x86_avx512_rsqrt14_ss sse_load_f32:$src),
- (VRSQRT14SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
-
-def : Pat<(int_x86_avx512_rcp14_ss VR128X:$src),
- (COPY_TO_REGCLASS (VRCP14SSZr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128X:$src, FR32)),
- VR128X)>;
-def : Pat<(int_x86_avx512_rcp14_ss sse_load_f32:$src),
- (VRCP14SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
-
-let AddedComplexity = 20, Predicates = [HasERI] in {
-defm VRCP28 : avx512_fp_unop_s<0xCB, "vrcp28">,
- avx512_fp_unop_p<0xCA, "vrcp28", X86frcp>,
- avx512_fp_unop_p_int<0xCA, "vrcp28",
- int_x86_avx512_rcp28_ps_512, int_x86_avx512_rcp28_pd_512>;
-
-defm VRSQRT28 : avx512_fp_unop_s<0xCD, "vrsqrt28">,
- avx512_fp_unop_p<0xCC, "vrsqrt28", X86frsqrt>,
- avx512_fp_unop_p_int<0xCC, "vrsqrt28",
- int_x86_avx512_rsqrt28_ps_512, int_x86_avx512_rsqrt28_pd_512>;
-}
-
-let Predicates = [HasERI] in {
- def : Pat<(int_x86_avx512_rsqrt28_ss VR128X:$src),
- (COPY_TO_REGCLASS (VRSQRT28SSZr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128X:$src, FR32)),
- VR128X)>;
- def : Pat<(int_x86_avx512_rsqrt28_ss sse_load_f32:$src),
- (VRSQRT28SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
-
- def : Pat<(int_x86_avx512_rcp28_ss VR128X:$src),
- (COPY_TO_REGCLASS (VRCP28SSZr (f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS VR128X:$src, FR32)),
- VR128X)>;
- def : Pat<(int_x86_avx512_rcp28_ss sse_load_f32:$src),
- (VRCP28SSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
+defm VRCP28SS : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
+ EVEX_CD8<32, CD8VT1>;
+defm VRCP28SD : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
+ VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VRSQRT28SS : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
+ EVEX_CD8<32, CD8VT1>;
+defm VRSQRT28SD : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
+ VEX_W, EVEX_CD8<64, CD8VT1>;
+
+def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
+ (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
+ FROUND_NO_EXC)),
+ (COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
+
+def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
+ (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
+ FROUND_NO_EXC)),
+ (COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
+
+def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
+ (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
+ FROUND_NO_EXC)),
+ (COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
+
+def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
+ (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
+ FROUND_NO_EXC)),
+ (COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
+ (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
+
+/// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
+multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr,
+ RegisterClass RC, X86MemOperand x86memop> {
+ let hasSideEffects = 0, Predicates = [HasERI] in {
+ def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+ def rb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr,
+ " \t{{sae}, $src, $dst|$dst, $src, {sae}}"),
+ []>, EVEX, EVEX_B;
+ def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+ }
}
+defm VRSQRT28PSZ : avx512_fp28_p<0xCC, "vrsqrt28ps", VR512, f512mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VRSQRT28PDZ : avx512_fp28_p<0xCC, "vrsqrt28pd", VR512, f512mem>,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+defm VRCP28PSZ : avx512_fp28_p<0xCA, "vrcp28ps", VR512, f512mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+defm VRCP28PDZ : avx512_fp28_p<0xCA, "vrcp28pd", VR512, f512mem>,
+ VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+
+def : Pat <(v16f32 (int_x86_avx512_rsqrt28_ps (v16f32 VR512:$src),
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
+ (VRSQRT28PSZrb VR512:$src)>;
+def : Pat <(v8f64 (int_x86_avx512_rsqrt28_pd (v8f64 VR512:$src),
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
+ (VRSQRT28PDZrb VR512:$src)>;
+
+def : Pat <(v16f32 (int_x86_avx512_rcp28_ps (v16f32 VR512:$src),
+ (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
+ (VRCP28PSZrb VR512:$src)>;
+def : Pat <(v8f64 (int_x86_avx512_rcp28_pd (v8f64 VR512:$src),
+ (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
+ (VRCP28PDZrb VR512:$src)>;
+
multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
Intrinsic V16F32Int, Intrinsic V8F64Int,
OpndItins itins_s, OpndItins itins_d> {
def PSZrr :AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR512:$dst, (v16f32 (OpNode VR512:$src)))], itins_s.rr>,
EVEX, EVEX_V512;
let mayLoad = 1 in
def PSZrm : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR512:$dst,
(OpNode (v16f32 (bitconvert (memopv16f32 addr:$src)))))],
itins_s.rm>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
def PDZrr : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR512:$dst, (v8f64 (OpNode VR512:$src)))], itins_d.rr>,
EVEX, EVEX_V512;
let mayLoad = 1 in
def PDZrm : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR512:$dst, (OpNode
(v8f64 (bitconvert (memopv16f32 addr:$src)))))],
itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
+let isCodeGenOnly = 1 in {
def PSZr_Int : AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
!strconcat(OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
@@ -2929,7 +3674,8 @@ multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
[(set VR512:$dst, (V8F64Int (memopv8f64 addr:$src)))]>,
- EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+ EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+} // isCodeGenOnly = 1
}
multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
@@ -2938,12 +3684,13 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
(ins FR32X:$src1, FR32X:$src2),
!strconcat(OpcodeStr,
- "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[], itins_s.rr>, XS, EVEX_4V;
+ let isCodeGenOnly = 1 in
def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
!strconcat(OpcodeStr,
- "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128X:$dst,
(F32Int VR128X:$src1, VR128X:$src2))],
itins_s.rr>, XS, EVEX_4V;
@@ -2951,12 +3698,13 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
(ins FR32X:$src1, f32mem:$src2),
!strconcat(OpcodeStr,
- "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+ let isCodeGenOnly = 1 in
def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
(ins VR128X:$src1, ssmem:$src2),
!strconcat(OpcodeStr,
- "ss{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128X:$dst,
(F32Int VR128X:$src1, sse_load_f32:$src2))],
itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
@@ -2964,12 +3712,13 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
(ins FR64X:$src1, FR64X:$src2),
!strconcat(OpcodeStr,
- "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
XD, EVEX_4V, VEX_W;
+ let isCodeGenOnly = 1 in
def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
!strconcat(OpcodeStr,
- "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128X:$dst,
(F64Int VR128X:$src1, VR128X:$src2))],
itins_s.rr>, XD, EVEX_4V, VEX_W;
@@ -2977,12 +3726,13 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
(ins FR64X:$src1, f64mem:$src2),
!strconcat(OpcodeStr,
- "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
+ let isCodeGenOnly = 1 in
def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
(ins VR128X:$src1, sdmem:$src2),
!strconcat(OpcodeStr,
- "sd{z}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128X:$dst,
(F64Int VR128X:$src1, sse_load_f64:$src2))]>,
XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
@@ -3010,15 +3760,15 @@ let Predicates = [HasAVX512] in {
Requires<[OptForSize]>;
def : Pat<(f32 (X86frsqrt FR32X:$src)),
- (VRSQRT14SSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
+ (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
def : Pat<(f32 (X86frsqrt (load addr:$src))),
- (VRSQRT14SSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
Requires<[OptForSize]>;
def : Pat<(f32 (X86frcp FR32X:$src)),
- (VRCP14SSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
+ (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
def : Pat<(f32 (X86frcp (load addr:$src))),
- (VRCP14SSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
+ (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
Requires<[OptForSize]>;
def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
@@ -3094,6 +3844,7 @@ let ExeDomain = GenericDomain in {
[]>;
// Intrinsic operation, reg.
+ let isCodeGenOnly = 1 in
def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
!strconcat(OpcodeStr,
@@ -3118,6 +3869,7 @@ let ExeDomain = GenericDomain in {
[]>, VEX_W;
// Intrinsic operation, reg.
+ let isCodeGenOnly = 1 in
def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
!strconcat(OpcodeStr,
@@ -3136,18 +3888,70 @@ let ExeDomain = GenericDomain in {
} // ExeDomain = GenericDomain
}
-let Predicates = [HasAVX512] in {
- defm VRNDSCALE : avx512_fp_binop_rm<0x0A, 0x0B, "vrndscale",
- int_x86_avx512_rndscale_ss,
- int_x86_avx512_rndscale_sd>, EVEX_4V;
+multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
+ X86MemOperand x86memop, RegisterClass RC,
+ PatFrag mem_frag, Domain d> {
+let ExeDomain = d in {
+ // Intrinsic operation, reg.
+ // Vector intrinsic operation, reg
+ def r : AVX512AIi8<opc, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX;
- defm VRNDSCALEZ : avx512_fp_unop_rm<0x08, 0x09, "vrndscale", f256mem, VR512,
- memopv16f32, memopv8f64,
- int_x86_avx512_rndscale_ps_512,
- int_x86_avx512_rndscale_pd_512, CD8VF>,
- EVEX, EVEX_V512;
+ // Vector intrinsic operation, mem
+ def m : AVX512AIi8<opc, MRMSrcMem,
+ (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX;
+} // ExeDomain
}
+
+defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
+ memopv16f32, SSEPackedSingle>, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+
+def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
+ imm:$src2, (v16f32 VR512:$src1), (i16 -1),
+ FROUND_CURRENT)),
+ (VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
+
+
+defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
+ memopv8f64, SSEPackedDouble>, EVEX_V512,
+ VEX_W, EVEX_CD8<64, CD8VF>;
+
+def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
+ imm:$src2, (v8f64 VR512:$src1), (i8 -1),
+ FROUND_CURRENT)),
+ (VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
+
+multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
+ Operand x86memop, RegisterClass RC, Domain d> {
+let ExeDomain = d in {
+ def r : AVX512AIi8<opc, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V;
+
+ def m : AVX512AIi8<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, EVEX_4V;
+} // ExeDomain
+}
+
+defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
+ SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
+
+defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
+ SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
+
def : Pat<(ffloor FR32X:$src),
(VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
def : Pat<(f64 (ffloor FR64X:$src)),
@@ -3170,26 +3974,26 @@ def : Pat<(f64 (ftrunc FR64X:$src)),
(VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
def : Pat<(v16f32 (ffloor VR512:$src)),
- (VRNDSCALEZPSr VR512:$src, (i32 0x1))>;
+ (VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
def : Pat<(v16f32 (fnearbyint VR512:$src)),
- (VRNDSCALEZPSr VR512:$src, (i32 0xC))>;
+ (VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
def : Pat<(v16f32 (fceil VR512:$src)),
- (VRNDSCALEZPSr VR512:$src, (i32 0x2))>;
+ (VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
def : Pat<(v16f32 (frint VR512:$src)),
- (VRNDSCALEZPSr VR512:$src, (i32 0x4))>;
+ (VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
def : Pat<(v16f32 (ftrunc VR512:$src)),
- (VRNDSCALEZPSr VR512:$src, (i32 0x3))>;
+ (VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
def : Pat<(v8f64 (ffloor VR512:$src)),
- (VRNDSCALEZPDr VR512:$src, (i32 0x1))>;
+ (VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
def : Pat<(v8f64 (fnearbyint VR512:$src)),
- (VRNDSCALEZPDr VR512:$src, (i32 0xC))>;
+ (VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
def : Pat<(v8f64 (fceil VR512:$src)),
- (VRNDSCALEZPDr VR512:$src, (i32 0x2))>;
+ (VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
def : Pat<(v8f64 (frint VR512:$src)),
- (VRNDSCALEZPDr VR512:$src, (i32 0x4))>;
+ (VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
def : Pat<(v8f64 (ftrunc VR512:$src)),
- (VRNDSCALEZPDr VR512:$src, (i32 0x3))>;
+ (VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
//-------------------------------------------------
// Integer truncate and extend operations
@@ -3200,18 +4004,30 @@ multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
RegisterClass KRC, X86MemOperand x86memop> {
def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
(ins srcRC:$src),
- !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
[]>, EVEX;
- def krr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
+ def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
(ins KRC:$mask, srcRC:$src),
!strconcat(OpcodeStr,
- "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
+ []>, EVEX, EVEX_K;
+
+ def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
+ (ins KRC:$mask, srcRC:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
[]>, EVEX, EVEX_KZ;
def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[]>, EVEX;
+
+ def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
+ (ins x86memop:$dst, KRC:$mask, srcRC:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
+ []>, EVEX, EVEX_K;
+
}
defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
@@ -3251,60 +4067,86 @@ def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
- (VPMOVDBkrr VK16WM:$mask, VR512:$src)>;
+ (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
- (VPMOVDWkrr VK16WM:$mask, VR512:$src)>;
+ (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
- (VPMOVQWkrr VK8WM:$mask, VR512:$src)>;
+ (VPMOVQWrrkz VK8WM:$mask, VR512:$src)>;
def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
- (VPMOVQDkrr VK8WM:$mask, VR512:$src)>;
+ (VPMOVQDrrkz VK8WM:$mask, VR512:$src)>;
-multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass DstRC,
- RegisterClass SrcRC, SDNode OpNode, PatFrag mem_frag,
- X86MemOperand x86memop, ValueType OpVT, ValueType InVT> {
+multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
+ RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
+ PatFrag mem_frag, X86MemOperand x86memop,
+ ValueType OpVT, ValueType InVT> {
def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
(ins SrcRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
- def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
+
+ def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
+ (ins KRC:$mask, SrcRC:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
+ []>, EVEX, EVEX_K;
+
+ def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
+ (ins KRC:$mask, SrcRC:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
+
+ let mayLoad = 1 in {
+ def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
(ins x86memop:$src),
- !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
+ !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
[(set DstRC:$dst,
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
EVEX;
+
+ def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins KRC:$mask, x86memop:$src),
+ !strconcat(OpcodeStr," \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
+ []>,
+ EVEX, EVEX_K;
+
+ def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins KRC:$mask, x86memop:$src),
+ !strconcat(OpcodeStr," \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ []>,
+ EVEX, EVEX_KZ;
+ }
}
-defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VR512, VR128X, X86vzext,
+defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VQ>;
-defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VR512, VR128X, X86vzext,
+defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VO>;
-defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VR512, VR256X, X86vzext,
+defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
EVEX_CD8<16, CD8VH>;
-defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VR512, VR128X, X86vzext,
+defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
EVEX_CD8<16, CD8VQ>;
-defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VR512, VR256X, X86vzext,
+defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
EVEX_CD8<32, CD8VH>;
-
-defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VR512, VR128X, X86vsext,
+
+defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VQ>;
-defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VR512, VR128X, X86vsext,
+defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
EVEX_CD8<8, CD8VO>;
-defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VR512, VR256X, X86vsext,
+defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
EVEX_CD8<16, CD8VH>;
-defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VR512, VR128X, X86vsext,
+defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
EVEX_CD8<16, CD8VQ>;
-defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VR512, VR256X, X86vsext,
+defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
EVEX_CD8<32, CD8VH>;
@@ -3318,18 +4160,23 @@ let mayLoad = 1,
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
(ins RC:$src1, KRC:$mask, memop:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
[]>, EVEX, EVEX_K;
}
+
+let ExeDomain = SSEPackedDouble in {
defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
-
defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+}
+
+let ExeDomain = SSEPackedSingle in {
+defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
EVEX_V512, EVEX_CD8<32, CD8VT1>;
+}
defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
@@ -3347,20 +4194,24 @@ let mayStore = 1, Constraints = "$mask = $mask_wb" in
def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
(ins memop:$dst, KRC:$mask, RC:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
[]>, EVEX, EVEX_K;
}
+let ExeDomain = SSEPackedDouble in {
defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
- EVEX_V512, EVEX_CD8<32, CD8VT1>;
-
defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+}
+
+let ExeDomain = SSEPackedSingle in {
+defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
+ EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
EVEX_V512, EVEX_CD8<32, CD8VT1>;
-
+}
+
defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
@@ -3371,6 +4222,62 @@ defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
EVEX_V512, EVEX_CD8<32, CD8VT1>;
+// prefetch
+multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
+ RegisterClass KRC, X86MemOperand memop> {
+ let Predicates = [HasPFI], hasSideEffects = 1 in
+ def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
+ !strconcat(OpcodeStr, " \t{$src {${mask}}|{${mask}}, $src}"),
+ []>, EVEX, EVEX_K;
+}
+
+defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
+ VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
+ VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+
+defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
+ VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+
+defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
+ VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
+ VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
+ VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+
+defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
+ VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+
+defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
+ VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
+ VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
+ VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+
+defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
+ VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+
+defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
+ VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+
+defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
+ VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+
+defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
+ VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+
+defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
+ VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+
+defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
+ VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
//===----------------------------------------------------------------------===//
// VSHUFPS - VSHUFPD Operations
@@ -3380,23 +4287,23 @@ multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
(i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
(i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
EVEX_4V, Sched<[WriteShuffle]>;
}
defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
- SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
- SSEPackedDouble>, OpSize, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
+ SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
(VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
@@ -3415,13 +4322,13 @@ multiclass avx512_alignr<string OpcodeStr, RegisterClass RC,
def rri : AVX512AIi8<0x03, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, EVEX_4V;
let mayLoad = 1 in
def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, EVEX_4V;
}
defm VALIGND : avx512_alignr<"valignd", VR512, i512mem>,
@@ -3438,54 +4345,111 @@ def : Pat<(v16i32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
def : Pat<(v8i64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
(VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
-multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, RegisterClass RC,
- X86MemOperand x86memop> {
- def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
- EVEX;
- def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
- (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
- EVEX;
+// Helper fragments to match sext vXi1 to vXiY.
+def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
+def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
+
+multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
+ RegisterClass KRC, RegisterClass RC,
+ X86MemOperand x86memop, X86MemOperand x86scalar_mop,
+ string BrdcstStr> {
+ def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+ def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ []>, EVEX, EVEX_K;
+ def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
+ let mayLoad = 1 in {
+ def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins x86memop:$src),
+ !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
+ []>, EVEX;
+ def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins KRC:$mask, x86memop:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ []>, EVEX, EVEX_K;
+ def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins KRC:$mask, x86memop:$src),
+ !strconcat(OpcodeStr,
+ " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
+ def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins x86scalar_mop:$src),
+ !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ ", $dst|$dst, ${src}", BrdcstStr, "}"),
+ []>, EVEX, EVEX_B;
+ def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins KRC:$mask, x86scalar_mop:$src),
+ !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
+ []>, EVEX, EVEX_B, EVEX_K;
+ def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
+ (ins KRC:$mask, x86scalar_mop:$src),
+ !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
+ ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
+ BrdcstStr, "}"),
+ []>, EVEX, EVEX_B, EVEX_KZ;
+ }
}
-defm VPABSD : avx512_vpabs<0x1E, "vpabsd", VR512, i512mem>, EVEX_V512,
- EVEX_CD8<32, CD8VF>;
-defm VPABSQ : avx512_vpabs<0x1F, "vpabsq", VR512, i512mem>, EVEX_V512, VEX_W,
- EVEX_CD8<64, CD8VF>;
+defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
+ i512mem, i32mem, "{1to16}">, EVEX_V512,
+ EVEX_CD8<32, CD8VF>;
+defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
+ i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
+ EVEX_CD8<64, CD8VF>;
+
+def : Pat<(xor
+ (bc_v16i32 (v16i1sextv16i32)),
+ (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
+ (VPABSDZrr VR512:$src)>;
+def : Pat<(xor
+ (bc_v8i64 (v8i1sextv8i64)),
+ (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
+ (VPABSQZrr VR512:$src)>;
+
+def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
+ (v16i32 immAllZerosV), (i16 -1))),
+ (VPABSDZrr VR512:$src)>;
+def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
+ (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
+ (VPABSQZrr VR512:$src)>;
multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
- RegisterClass RC, RegisterClass KRC, PatFrag memop_frag,
- X86MemOperand x86memop, PatFrag scalar_mfrag,
- X86MemOperand x86scalar_mop, string BrdcstStr,
- Intrinsic Int, Intrinsic maskInt, Intrinsic maskzInt> {
+ RegisterClass RC, RegisterClass KRC,
+ X86MemOperand x86memop,
+ X86MemOperand x86scalar_mop, string BrdcstStr> {
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src),
- !strconcat(OpcodeStr, "\t{$src, ${dst} |${dst}, $src}"),
- [(set RC:$dst, (Int RC:$src))]>, EVEX;
+ !strconcat(OpcodeStr, " \t{$src, ${dst} |${dst}, $src}"),
+ []>, EVEX;
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, ${dst}|${dst}, $src}"),
- [(set RC:$dst, (Int (memop_frag addr:$src)))]>, EVEX;
+ !strconcat(OpcodeStr, " \t{$src, ${dst}|${dst}, $src}"),
+ []>, EVEX;
def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins x86scalar_mop:$src),
- !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
[]>, EVEX, EVEX_B;
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins KRC:$mask, RC:$src),
!strconcat(OpcodeStr,
- "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
- [(set RC:$dst, (maskzInt KRC:$mask, RC:$src))]>, EVEX, EVEX_KZ;
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, x86memop:$src),
!strconcat(OpcodeStr,
- "\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
- [(set RC:$dst, (maskzInt KRC:$mask, (memop_frag addr:$src)))]>,
- EVEX, EVEX_KZ;
+ " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
+ []>, EVEX, EVEX_KZ;
def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins KRC:$mask, x86scalar_mop:$src),
- !strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
+ !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
BrdcstStr, "}"),
[]>, EVEX, EVEX_KZ, EVEX_B;
@@ -3494,16 +4458,16 @@ multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, RC:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
- [(set RC:$dst, (maskInt RC:$src1, KRC:$mask, RC:$src2))]>, EVEX, EVEX_K;
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ []>, EVEX, EVEX_K;
def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, x86memop:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
- [(set RC:$dst, (maskInt RC:$src1, KRC:$mask, (memop_frag addr:$src2)))]>, EVEX, EVEX_K;
+ " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
+ []>, EVEX, EVEX_K;
def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
- !strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
+ !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
[]>, EVEX, EVEX_K, EVEX_B;
}
@@ -3511,16 +4475,69 @@ multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
let Predicates = [HasCDI] in {
defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
- memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
- int_x86_avx512_conflict_d_512,
- int_x86_avx512_conflict_d_mask_512,
- int_x86_avx512_conflict_d_maskz_512>,
+ i512mem, i32mem, "{1to16}">,
EVEX_V512, EVEX_CD8<32, CD8VF>;
+
defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
- memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
- int_x86_avx512_conflict_q_512,
- int_x86_avx512_conflict_q_mask_512,
- int_x86_avx512_conflict_q_maskz_512>,
+ i512mem, i64mem, "{1to8}">,
+ EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
+}
+
+def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
+ GR16:$mask),
+ (VPCONFLICTDrrk VR512:$src1,
+ (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
+
+def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
+ GR8:$mask),
+ (VPCONFLICTQrrk VR512:$src1,
+ (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
+
+let Predicates = [HasCDI] in {
+defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM,
+ i512mem, i32mem, "{1to16}">,
+ EVEX_V512, EVEX_CD8<32, CD8VF>;
+
+
+defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM,
+ i512mem, i64mem, "{1to8}">,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+
}
+
+def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1,
+ GR16:$mask),
+ (VPLZCNTDrrk VR512:$src1,
+ (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
+
+def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
+ GR8:$mask),
+ (VPLZCNTQrrk VR512:$src1,
+ (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
+
+def : Pat<(v16i32 (ctlz (memopv16i32 addr:$src))),
+ (VPLZCNTDrm addr:$src)>;
+def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
+ (VPLZCNTDrr VR512:$src)>;
+def : Pat<(v8i64 (ctlz (memopv8i64 addr:$src))),
+ (VPLZCNTQrm addr:$src)>;
+def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
+ (VPLZCNTQrr VR512:$src)>;
+
+def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
+def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
+def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
+
+def : Pat<(store VK1:$src, addr:$dst),
+ (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
+
+def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+
+def : Pat<(truncstorei1 GR8:$src, addr:$dst),
+ (MOV8mr addr:$dst, GR8:$src)>;
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
index 7fc9c443..f2574cc 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -18,19 +18,19 @@ let SchedRW = [WriteLEA] in {
let neverHasSideEffects = 1 in
def LEA16r : I<0x8D, MRMSrcMem,
(outs GR16:$dst), (ins i32mem:$src),
- "lea{w}\t{$src|$dst}, {$dst|$src}", [], IIC_LEA_16>, OpSize;
+ "lea{w}\t{$src|$dst}, {$dst|$src}", [], IIC_LEA_16>, OpSize16;
let isReMaterializable = 1 in
def LEA32r : I<0x8D, MRMSrcMem,
(outs GR32:$dst), (ins i32mem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
[(set GR32:$dst, lea32addr:$src)], IIC_LEA>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
def LEA64_32r : I<0x8D, MRMSrcMem,
(outs GR32:$dst), (ins lea64_32mem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
[(set GR32:$dst, lea64_32addr:$src)], IIC_LEA>,
- Requires<[In64BitMode]>;
+ OpSize32, Requires<[In64BitMode]>;
let isReMaterializable = 1 in
def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
@@ -68,13 +68,13 @@ def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src),
"mul{w}\t$src",
- [], IIC_MUL16_REG>, OpSize, Sched<[WriteIMul]>;
+ [], IIC_MUL16_REG>, OpSize16, Sched<[WriteIMul]>;
// EAX,EDX = EAX*GR32
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src),
"mul{l}\t$src",
[/*(set EAX, EDX, EFLAGS, (X86umul_flag EAX, GR32:$src))*/],
- IIC_MUL32_REG>, Sched<[WriteIMul]>;
+ IIC_MUL32_REG>, OpSize32, Sched<[WriteIMul]>;
// RAX,RDX = RAX*GR64
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
@@ -95,12 +95,12 @@ let mayLoad = 1, neverHasSideEffects = 1 in {
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
"mul{w}\t$src",
- [], IIC_MUL16_MEM>, OpSize, SchedLoadReg<WriteIMulLd>;
+ [], IIC_MUL16_MEM>, OpSize16, SchedLoadReg<WriteIMulLd>;
// EAX,EDX = EAX*[mem32]
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
"mul{l}\t$src",
- [], IIC_MUL32_MEM>, SchedLoadReg<WriteIMulLd>;
+ [], IIC_MUL32_MEM>, OpSize32, SchedLoadReg<WriteIMulLd>;
// RAX,RDX = RAX*[mem64]
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
@@ -115,11 +115,11 @@ def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", [],
// AX,DX = AX*GR16
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", [],
- IIC_IMUL16_RR>, OpSize, Sched<[WriteIMul]>;
+ IIC_IMUL16_RR>, OpSize16, Sched<[WriteIMul]>;
// EAX,EDX = EAX*GR32
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", [],
- IIC_IMUL32_RR>, Sched<[WriteIMul]>;
+ IIC_IMUL32_RR>, OpSize32, Sched<[WriteIMul]>;
// RAX,RDX = RAX*GR64
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src), "imul{q}\t$src", [],
@@ -133,12 +133,13 @@ def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src),
// AX,DX = AX*[mem16]
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src),
- "imul{w}\t$src", [], IIC_IMUL16_MEM>, OpSize,
+ "imul{w}\t$src", [], IIC_IMUL16_MEM>, OpSize16,
SchedLoadReg<WriteIMulLd>;
// EAX,EDX = EAX*[mem32]
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src),
- "imul{l}\t$src", [], IIC_IMUL32_MEM>, SchedLoadReg<WriteIMulLd>;
+ "imul{l}\t$src", [], IIC_IMUL32_MEM>, OpSize32,
+ SchedLoadReg<WriteIMulLd>;
// RAX,RDX = RAX*[mem64]
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
@@ -157,12 +158,12 @@ def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
(X86smul_flag GR16:$src1, GR16:$src2))], IIC_IMUL16_RR>,
- TB, OpSize;
+ TB, OpSize16;
def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag GR32:$src1, GR32:$src2))], IIC_IMUL32_RR>,
- TB;
+ TB, OpSize32;
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
@@ -179,14 +180,14 @@ def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
[(set GR16:$dst, EFLAGS,
(X86smul_flag GR16:$src1, (load addr:$src2)))],
IIC_IMUL16_RM>,
- TB, OpSize;
+ TB, OpSize16;
def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag GR32:$src1, (load addr:$src2)))],
IIC_IMUL32_RM>,
- TB;
+ TB, OpSize32;
def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
@@ -208,30 +209,29 @@ def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
(X86smul_flag GR16:$src1, imm:$src2))],
- IIC_IMUL16_RRI>, OpSize;
+ IIC_IMUL16_RRI>, OpSize16;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
(X86smul_flag GR16:$src1, i16immSExt8:$src2))],
- IIC_IMUL16_RRI>,
- OpSize;
+ IIC_IMUL16_RRI>, OpSize16;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag GR32:$src1, imm:$src2))],
- IIC_IMUL32_RRI>;
+ IIC_IMUL32_RRI>, OpSize32;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag GR32:$src1, i32immSExt8:$src2))],
- IIC_IMUL32_RRI>;
-def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, EFLAGS,
+ IIC_IMUL32_RRI>, OpSize32;
+def IMUL64rri32 : RIi32S<0x69, MRMSrcReg, // GR64 = GR64*I32
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, EFLAGS,
(X86smul_flag GR64:$src1, i64immSExt32:$src2))],
IIC_IMUL64_RRI>;
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
@@ -250,31 +250,31 @@ def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
[(set GR16:$dst, EFLAGS,
(X86smul_flag (load addr:$src1), imm:$src2))],
IIC_IMUL16_RMI>,
- OpSize;
+ OpSize16;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
i16immSExt8:$src2))], IIC_IMUL16_RMI>,
- OpSize;
+ OpSize16;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag (load addr:$src1), imm:$src2))],
- IIC_IMUL32_RMI>;
+ IIC_IMUL32_RMI>, OpSize32;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
i32immSExt8:$src2))],
- IIC_IMUL32_RMI>;
-def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
- (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, EFLAGS,
+ IIC_IMUL32_RMI>, OpSize32;
+def IMUL64rmi32 : RIi32S<0x69, MRMSrcMem, // GR64 = [mem64]*I32
+ (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
i64immSExt32:$src2))],
IIC_IMUL64_RMI>;
@@ -299,10 +299,10 @@ def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
"div{b}\t$src", [], IIC_DIV8_REG>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
- "div{w}\t$src", [], IIC_DIV16>, OpSize;
+ "div{w}\t$src", [], IIC_DIV16>, OpSize16;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
- "div{l}\t$src", [], IIC_DIV32>;
+ "div{l}\t$src", [], IIC_DIV32>, OpSize32;
// RDX:RAX/r64 = RAX,RDX
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
@@ -316,12 +316,12 @@ def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
SchedLoadReg<WriteIDivLd>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
- "div{w}\t$src", [], IIC_DIV16>, OpSize,
+ "div{w}\t$src", [], IIC_DIV16>, OpSize16,
SchedLoadReg<WriteIDivLd>;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in // EDX:EAX/[mem32] = EAX,EDX
def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src),
"div{l}\t$src", [], IIC_DIV32>,
- SchedLoadReg<WriteIDivLd>;
+ SchedLoadReg<WriteIDivLd>, OpSize32;
// RDX:RAX/[mem64] = RAX,RDX
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
@@ -336,10 +336,10 @@ def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
"idiv{b}\t$src", [], IIC_IDIV8>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
- "idiv{w}\t$src", [], IIC_IDIV16>, OpSize;
+ "idiv{w}\t$src", [], IIC_IDIV16>, OpSize16;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
- "idiv{l}\t$src", [], IIC_IDIV32>;
+ "idiv{l}\t$src", [], IIC_IDIV32>, OpSize32;
// RDX:RAX/r64 = RAX,RDX
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
@@ -353,11 +353,11 @@ def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
SchedLoadReg<WriteIDivLd>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
- "idiv{w}\t$src", [], IIC_IDIV16>, OpSize,
+ "idiv{w}\t$src", [], IIC_IDIV16>, OpSize16,
SchedLoadReg<WriteIDivLd>;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in // EDX:EAX/[mem32] = EAX,EDX
def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src),
- "idiv{l}\t$src", [], IIC_IDIV32>,
+ "idiv{l}\t$src", [], IIC_IDIV32>, OpSize32,
SchedLoadReg<WriteIDivLd>;
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in // RDX:RAX/[mem64] = RAX,RDX
def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
@@ -381,11 +381,11 @@ def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src1),
def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"neg{w}\t$dst",
[(set GR16:$dst, (ineg GR16:$src1)),
- (implicit EFLAGS)], IIC_UNARY_REG>, OpSize;
+ (implicit EFLAGS)], IIC_UNARY_REG>, OpSize16;
def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"neg{l}\t$dst",
[(set GR32:$dst, (ineg GR32:$src1)),
- (implicit EFLAGS)], IIC_UNARY_REG>;
+ (implicit EFLAGS)], IIC_UNARY_REG>, OpSize32;
def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src1), "neg{q}\t$dst",
[(set GR64:$dst, (ineg GR64:$src1)),
(implicit EFLAGS)], IIC_UNARY_REG>;
@@ -400,11 +400,11 @@ def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst),
def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst),
"neg{w}\t$dst",
[(store (ineg (loadi16 addr:$dst)), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize16;
def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst),
"neg{l}\t$dst",
[(store (ineg (loadi32 addr:$dst)), addr:$dst),
- (implicit EFLAGS)], IIC_UNARY_MEM>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize32;
def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
[(store (ineg (loadi64 addr:$dst)), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>;
@@ -422,10 +422,10 @@ def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src1),
[(set GR8:$dst, (not GR8:$src1))], IIC_UNARY_REG>;
def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"not{w}\t$dst",
- [(set GR16:$dst, (not GR16:$src1))], IIC_UNARY_REG>, OpSize;
+ [(set GR16:$dst, (not GR16:$src1))], IIC_UNARY_REG>, OpSize16;
def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"not{l}\t$dst",
- [(set GR32:$dst, (not GR32:$src1))], IIC_UNARY_REG>;
+ [(set GR32:$dst, (not GR32:$src1))], IIC_UNARY_REG>, OpSize32;
def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src1), "not{q}\t$dst",
[(set GR64:$dst, (not GR64:$src1))], IIC_UNARY_REG>;
}
@@ -438,10 +438,11 @@ def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst),
def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst),
"not{w}\t$dst",
[(store (not (loadi16 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>,
- OpSize;
+ OpSize16;
def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst),
"not{l}\t$dst",
- [(store (not (loadi32 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>;
+ [(store (not (loadi32 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>,
+ OpSize32;
def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
[(store (not (loadi64 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>;
} // SchedRW
@@ -460,12 +461,12 @@ let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
[(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))], IIC_UNARY_REG>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))],
IIC_UNARY_REG>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src1), "inc{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src1))],
IIC_UNARY_REG>;
@@ -479,38 +480,39 @@ def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
[(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))],
IIC_UNARY_REG>,
- OpSize, Requires<[In64BitMode]>;
+ OpSize16, Requires<[In64BitMode]>;
def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))],
IIC_UNARY_REG>,
- Requires<[In64BitMode]>;
+ OpSize32, Requires<[In64BitMode]>;
def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
[(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))],
IIC_UNARY_REG>,
- OpSize, Requires<[In64BitMode]>;
+ OpSize16, Requires<[In64BitMode]>;
def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))],
IIC_UNARY_REG>,
- Requires<[In64BitMode]>;
+ OpSize32, Requires<[In64BitMode]>;
} // isConvertibleToThreeAddress = 1, CodeSize = 2
-let isCodeGenOnly = 1, CodeSize = 2 in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
+ CodeSize = 2 in {
def INC32_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst", [], IIC_UNARY_REG>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def INC32_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst", [], IIC_UNARY_REG>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
def DEC32_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst", [], IIC_UNARY_REG>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def DEC32_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst", [], IIC_UNARY_REG>,
- Requires<[In32BitMode]>;
-} // isCodeGenOnly = 1, CodeSize = 2
+ OpSize32, Requires<[Not64BitMode]>;
+} // isCodeGenOnly = 1, ForceDisassemble = 1, HasSideEffects = 0, CodeSize = 2
} // Constraints = "$src1 = $dst", SchedRW
@@ -521,11 +523,11 @@ let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
[(store (add (loadi16 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
[(store (add (loadi64 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>;
@@ -536,19 +538,19 @@ let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
[(store (add (loadi16 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize, Requires<[In64BitMode]>;
+ OpSize16, Requires<[In64BitMode]>;
def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- Requires<[In64BitMode]>;
+ OpSize32, Requires<[In64BitMode]>;
def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
[(store (add (loadi16 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize, Requires<[In64BitMode]>;
+ OpSize16, Requires<[In64BitMode]>;
def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- Requires<[In64BitMode]>;
+ OpSize32, Requires<[In64BitMode]>;
} // CodeSize = 2, SchedRW
let Constraints = "$src1 = $dst", SchedRW = [WriteALU] in {
@@ -562,12 +564,12 @@ def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
[(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))],
IIC_UNARY_REG>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))],
IIC_UNARY_REG>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "dec{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src1))],
IIC_UNARY_REG>;
@@ -582,11 +584,11 @@ let CodeSize = 2, SchedRW = [WriteALULd, WriteRMW] in {
def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
[(store (add (loadi16 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
[(store (add (loadi64 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)], IIC_UNARY_MEM>;
@@ -600,7 +602,8 @@ class X86TypeInfo<ValueType vt, string instrsuffix, RegisterClass regclass,
PatFrag loadnode, X86MemOperand memoperand, ImmType immkind,
Operand immoperand, SDPatternOperator immoperator,
Operand imm8operand, SDPatternOperator imm8operator,
- bit hasOddOpcode, bit hasOpSizePrefix, bit hasREX_WPrefix> {
+ bit hasOddOpcode, OperandSize opSize,
+ bit hasREX_WPrefix> {
/// VT - This is the value type itself.
ValueType VT = vt;
@@ -650,9 +653,10 @@ class X86TypeInfo<ValueType vt, string instrsuffix, RegisterClass regclass,
/// other datatypes are odd.
bit HasOddOpcode = hasOddOpcode;
- /// HasOpSizePrefix - This bit is set to true if the instruction should have
- /// the 0x66 operand size prefix. This is set for i16 types.
- bit HasOpSizePrefix = hasOpSizePrefix;
+ /// OpSize - Selects whether the instruction needs a 0x66 prefix based on
+ /// 16-bit vs 32-bit mode. i8/i64 set this to OpSizeFixed. i16 sets this
+ /// to Opsize16. i32 sets this to OpSize32.
+ OperandSize OpSize = opSize;
/// HasREX_WPrefix - This bit is set to true if the instruction should have
/// the 0x40 REX prefix. This is set for i64 types.
@@ -664,16 +668,16 @@ def invalid_node : SDNode<"<<invalid_node>>", SDTIntLeaf,[],"<<invalid_node>>">;
def Xi8 : X86TypeInfo<i8 , "b", GR8 , loadi8 , i8mem ,
Imm8 , i8imm , imm, i8imm , invalid_node,
- 0, 0, 0>;
+ 0, OpSizeFixed, 0>;
def Xi16 : X86TypeInfo<i16, "w", GR16, loadi16, i16mem,
Imm16, i16imm, imm, i16i8imm, i16immSExt8,
- 1, 1, 0>;
+ 1, OpSize16, 0>;
def Xi32 : X86TypeInfo<i32, "l", GR32, loadi32, i32mem,
Imm32, i32imm, imm, i32i8imm, i32immSExt8,
- 1, 0, 0>;
+ 1, OpSize32, 0>;
def Xi64 : X86TypeInfo<i64, "q", GR64, loadi64, i64mem,
- Imm32, i64i32imm, i64immSExt32, i64i8imm, i64immSExt8,
- 1, 0, 1>;
+ Imm32S, i64i32imm, i64immSExt32, i64i8imm, i64immSExt8,
+ 1, OpSizeFixed, 1>;
/// ITy - This instruction base class takes the type info for the instruction.
/// Using this, it:
@@ -693,7 +697,7 @@ class ITy<bits<8> opcode, Format f, X86TypeInfo typeinfo, dag outs, dag ins,
itin> {
// Infer instruction prefixes from type info.
- let hasOpSizePrefix = typeinfo.HasOpSizePrefix;
+ let OpSize = typeinfo.OpSize;
let hasREX_WPrefix = typeinfo.HasREX_WPrefix;
}
@@ -752,6 +756,7 @@ class BinOpRR_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
Sched<[WriteALU]> {
// The disassembler should know about this, but not the asmparser.
let isCodeGenOnly = 1;
+ let ForceDisassemble = 1;
let hasSideEffects = 0;
}
@@ -767,6 +772,7 @@ class BinOpRR_F_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
Sched<[WriteALU]> {
// The disassembler should know about this, but not the asmparser.
let isCodeGenOnly = 1;
+ let ForceDisassemble = 1;
let hasSideEffects = 0;
}
@@ -1272,8 +1278,10 @@ let isCompare = 1 in {
def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>;
// When testing the result of EXTRACT_SUBREG sub_8bit_hi, make sure the
- // register class is constrained to GR8_NOREX.
- let isPseudo = 1 in
+ // register class is constrained to GR8_NOREX. This pseudo is explicitly
+ // marked side-effect free, since it doesn't have an isel pattern like
+ // other test instructions.
+ let isPseudo = 1, hasSideEffects = 0 in
def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask),
"", [], IIC_BIN_NONMEM>, Sched<[WriteALU]>;
} // Defs = [EFLAGS]
@@ -1305,8 +1313,8 @@ multiclass bmi_andn<string mnemonic, RegisterClass RC, X86MemOperand x86memop,
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
- defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32>, T8, VEX_4V;
- defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64>, T8, VEX_4V, VEX_W;
+ defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32>, T8PS, VEX_4V;
+ defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64>, T8PS, VEX_4V, VEX_W;
}
let Predicates = [HasBMI] in {
@@ -1351,21 +1359,21 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in {
let SchedRW = [WriteALU] in {
def ADCX32rr : I<0xF6, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"adcx{l}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_NONMEM>, T8, OpSize;
+ [], IIC_BIN_NONMEM>, T8PD;
- def ADCX64rr : I<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ def ADCX64rr : RI<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"adcx{q}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_NONMEM>, T8, OpSize, REX_W, Requires<[In64BitMode]>;
+ [], IIC_BIN_NONMEM>, T8PD, Requires<[In64BitMode]>;
} // SchedRW
let mayLoad = 1, SchedRW = [WriteALULd] in {
def ADCX32rm : I<0xF6, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"adcx{l}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_MEM>, T8, OpSize;
+ [], IIC_BIN_MEM>, T8PD;
- def ADCX64rm : I<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ def ADCX64rm : RI<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"adcx{q}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_MEM>, T8, OpSize, REX_W, Requires<[In64BitMode]>;
+ [], IIC_BIN_MEM>, T8PD, Requires<[In64BitMode]>;
}
}
@@ -1378,9 +1386,9 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in {
"adox{l}\t{$src, $dst|$dst, $src}",
[], IIC_BIN_NONMEM>, T8XS;
- def ADOX64rr : I<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ def ADOX64rr : RI<0xF6, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"adox{q}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_NONMEM>, T8XS, REX_W, Requires<[In64BitMode]>;
+ [], IIC_BIN_NONMEM>, T8XS, Requires<[In64BitMode]>;
} // SchedRW
let mayLoad = 1, SchedRW = [WriteALULd] in {
@@ -1388,8 +1396,8 @@ let hasSideEffects = 0, Predicates = [HasADX], Defs = [EFLAGS] in {
"adox{l}\t{$src, $dst|$dst, $src}",
[], IIC_BIN_MEM>, T8XS;
- def ADOX64rm : I<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ def ADOX64rm : RI<0xF6, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"adox{q}\t{$src, $dst|$dst, $src}",
- [], IIC_BIN_MEM>, T8XS, REX_W, Requires<[In64BitMode]>;
+ [], IIC_BIN_MEM>, T8XS, Requires<[In64BitMode]>;
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
index aaef4a4..e421f8c 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
@@ -52,7 +52,8 @@ struct X86AddressMode {
unsigned GVOpFlags;
X86AddressMode()
- : BaseType(RegBase), Scale(1), IndexReg(0), Disp(0), GV(0), GVOpFlags(0) {
+ : BaseType(RegBase), Scale(1), IndexReg(0), Disp(0), GV(nullptr),
+ GVOpFlags(0) {
Base.Reg = 0;
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td b/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
index a967a4d..315f213 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
@@ -22,13 +22,13 @@ multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
[(set GR16:$dst,
(X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))],
- IIC_CMOV16_RR>,TB,OpSize;
+ IIC_CMOV16_RR>, TB, OpSize16;
def NAME#32rr
: I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
[(set GR32:$dst,
(X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))],
- IIC_CMOV32_RR>, TB;
+ IIC_CMOV32_RR>, TB, OpSize32;
def NAME#64rr
:RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
@@ -44,12 +44,13 @@ multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
CondNode, EFLAGS))], IIC_CMOV16_RM>,
- TB, OpSize;
+ TB, OpSize16;
def NAME#32rm
: I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- CondNode, EFLAGS))], IIC_CMOV32_RM>, TB;
+ CondNode, EFLAGS))], IIC_CMOV32_RM>,
+ TB, OpSize32;
def NAME#64rm
:RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
@@ -81,11 +82,11 @@ defm CMOVG : CMOV<0x4F, "cmovg" , X86_COND_G>;
// SetCC instructions.
multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
let Uses = [EFLAGS] in {
- def r : I<opc, MRM0r, (outs GR8:$dst), (ins),
+ def r : I<opc, MRMXr, (outs GR8:$dst), (ins),
!strconcat(Mnemonic, "\t$dst"),
[(set GR8:$dst, (X86setcc OpNode, EFLAGS))],
IIC_SET_R>, TB, Sched<[WriteALU]>;
- def m : I<opc, MRM0m, (outs), (ins i8mem:$dst),
+ def m : I<opc, MRMXm, (outs), (ins i8mem:$dst),
!strconcat(Mnemonic, "\t$dst"),
[(store (X86setcc OpNode, EFLAGS), addr:$dst)],
IIC_SET_M>, TB, Sched<[WriteALU, WriteStore]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
index 5c88408..ca4f608 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -46,11 +46,11 @@ let Defs = [ESP, EFLAGS], Uses = [ESP] in {
def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
"#ADJCALLSTACKDOWN",
[(X86callseq_start timm:$amt)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
[(X86callseq_end timm:$amt1, timm:$amt2)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
}
// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
@@ -110,7 +110,7 @@ let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
// When using segmented stacks these are lowered into instructions which first
// check if the current stacklet has enough free memory. If it does, memory is
-// allocated by bumping the stack pointer. Otherwise memory is allocated from
+// allocated by bumping the stack pointer. Otherwise memory is allocated from
// the heap.
let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
@@ -118,7 +118,7 @@ def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
"# variable sized alloca for segmented stacks",
[(set GR32:$dst,
(X86SegAlloca GR32:$size))]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
@@ -140,12 +140,12 @@ let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {
def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
"# win32 fptoui",
[(X86WinFTOL RFP32:$src)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
"# win32 fptoui",
[(X86WinFTOL RFP64:$src)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
}
//===----------------------------------------------------------------------===//
@@ -173,7 +173,7 @@ let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
"#EH_SJLJ_SETJMP32",
[(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
"#EH_SJLJ_SETJMP64",
[(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
@@ -182,7 +182,7 @@ let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
"#EH_SJLJ_LONGJMP32",
[(X86eh_sjlj_longjmp addr:$buf)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
"#EH_SJLJ_LONGJMP64",
[(X86eh_sjlj_longjmp addr:$buf)]>,
@@ -197,6 +197,26 @@ let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
}
//===----------------------------------------------------------------------===//
+// Pseudo instructions used by unwind info.
+//
+let isPseudo = 1 in {
+ def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
+ "#SEH_PushReg $reg", []>;
+ def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
+ "#SEH_SaveReg $reg, $dst", []>;
+ def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
+ "#SEH_SaveXMM $reg, $dst", []>;
+ def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
+ "#SEH_StackAlloc $size", []>;
+ def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
+ "#SEH_SetFrame $reg, $offset", []>;
+ def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
+ "#SEH_PushFrame $mode", []>;
+ def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
+ "#SEH_EndPrologue", []>;
+}
+
+//===----------------------------------------------------------------------===//
// Pseudo instructions used by segmented stacks.
//
@@ -220,10 +240,9 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
// Alias instruction mapping movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
-// FIXME: Set encoding to pseudo.
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
- isCodeGenOnly = 1 in
-def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
+ isPseudo = 1 in
+def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
[(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
// Other widths can also make use of the 32-bit xor, which may have a smaller
@@ -319,13 +338,13 @@ let SchedRW = [WriteMicrocoded] in {
let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
[(X86rep_movs i8)], IIC_REP_MOVS>, REP,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
- [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize,
- Requires<[In32BitMode]>;
+ [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
+ Requires<[Not64BitMode]>;
def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
- [(X86rep_movs i32)], IIC_REP_MOVS>, REP,
- Requires<[In32BitMode]>;
+ [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
+ Requires<[Not64BitMode]>;
}
let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
@@ -333,10 +352,10 @@ def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
[(X86rep_movs i8)], IIC_REP_MOVS>, REP,
Requires<[In64BitMode]>;
def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
- [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize,
+ [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
Requires<[In64BitMode]>;
def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
- [(X86rep_movs i32)], IIC_REP_MOVS>, REP,
+ [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
Requires<[In64BitMode]>;
def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
[(X86rep_movs i64)], IIC_REP_MOVS>, REP,
@@ -348,15 +367,15 @@ let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
let Uses = [AL,ECX,EDI] in
def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
[(X86rep_stos i8)], IIC_REP_STOS>, REP,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
let Uses = [AX,ECX,EDI] in
def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
- [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize,
- Requires<[In32BitMode]>;
+ [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
+ Requires<[Not64BitMode]>;
let Uses = [EAX,ECX,EDI] in
def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
- [(X86rep_stos i32)], IIC_REP_STOS>, REP,
- Requires<[In32BitMode]>;
+ [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
+ Requires<[Not64BitMode]>;
}
let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
@@ -366,13 +385,13 @@ let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
Requires<[In64BitMode]>;
let Uses = [AX,RCX,RDI] in
def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
- [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize,
+ [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
Requires<[In64BitMode]>;
let Uses = [RAX,RCX,RDI] in
def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
- [(X86rep_stos i32)], IIC_REP_STOS>, REP,
+ [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
Requires<[In64BitMode]>;
-
+
let Uses = [RAX,RCX,RDI] in
def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
[(X86rep_stos i64)], IIC_REP_STOS>, REP,
@@ -396,11 +415,11 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLS_addr32",
[(X86tlsaddr tls32addr:$sym)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLS_base_addr32",
[(X86tlsbaseaddr tls32baseaddr:$sym)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
}
// All calls clobber the non-callee saved registers. RSP is marked as
@@ -432,7 +451,7 @@ let Defs = [EAX, ECX, EFLAGS],
def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLSCall_32",
[(X86TLSCall addr:$sym)]>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// For x86_64, the address of the thunk is passed in %rdi, on return
// the address of the variable is in %rax. All other registers are preserved.
@@ -503,83 +522,6 @@ def CMOV_RFP80 : I<0, Pseudo,
//===----------------------------------------------------------------------===//
-// Atomic Instruction Pseudo Instructions
-//===----------------------------------------------------------------------===//
-
-// Pseudo atomic instructions
-
-multiclass PSEUDO_ATOMIC_LOAD_BINOP<string mnemonic> {
- let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in {
- let Defs = [EFLAGS, AL] in
- def NAME#8 : I<0, Pseudo, (outs GR8:$dst),
- (ins i8mem:$ptr, GR8:$val),
- !strconcat(mnemonic, "8 PSEUDO!"), []>;
- let Defs = [EFLAGS, AX] in
- def NAME#16 : I<0, Pseudo,(outs GR16:$dst),
- (ins i16mem:$ptr, GR16:$val),
- !strconcat(mnemonic, "16 PSEUDO!"), []>;
- let Defs = [EFLAGS, EAX] in
- def NAME#32 : I<0, Pseudo, (outs GR32:$dst),
- (ins i32mem:$ptr, GR32:$val),
- !strconcat(mnemonic, "32 PSEUDO!"), []>;
- let Defs = [EFLAGS, RAX] in
- def NAME#64 : I<0, Pseudo, (outs GR64:$dst),
- (ins i64mem:$ptr, GR64:$val),
- !strconcat(mnemonic, "64 PSEUDO!"), []>;
- }
-}
-
-multiclass PSEUDO_ATOMIC_LOAD_BINOP_PATS<string name, string frag> {
- def : Pat<(!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val),
- (!cast<Instruction>(name # "8") addr:$ptr, GR8:$val)>;
- def : Pat<(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val),
- (!cast<Instruction>(name # "16") addr:$ptr, GR16:$val)>;
- def : Pat<(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val),
- (!cast<Instruction>(name # "32") addr:$ptr, GR32:$val)>;
- def : Pat<(!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val),
- (!cast<Instruction>(name # "64") addr:$ptr, GR64:$val)>;
-}
-
-// Atomic exchange, and, or, xor
-defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMAND">;
-defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMOR">;
-defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMXOR">;
-defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMNAND">;
-defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMAX">;
-defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMIN">;
-defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMAX">;
-defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMIN">;
-
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMAND", "atomic_load_and">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMOR", "atomic_load_or">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMXOR", "atomic_load_xor">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMNAND", "atomic_load_nand">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMAX", "atomic_load_max">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMIN", "atomic_load_min">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMAX", "atomic_load_umax">;
-defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMIN", "atomic_load_umin">;
-
-multiclass PSEUDO_ATOMIC_LOAD_BINOP6432<string mnemonic> {
- let usesCustomInserter = 1, Defs = [EFLAGS, EAX, EDX],
- mayLoad = 1, mayStore = 1, hasSideEffects = 0 in
- def NAME#6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- !strconcat(mnemonic, "6432 PSEUDO!"), []>;
-}
-
-defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMAND">;
-defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMOR">;
-defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMXOR">;
-defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMNAND">;
-defm ATOMADD : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMADD">;
-defm ATOMSUB : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSUB">;
-defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMAX">;
-defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMIN">;
-defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMAX">;
-defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMIN">;
-defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">;
-
-//===----------------------------------------------------------------------===//
// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
//===----------------------------------------------------------------------===//
@@ -591,7 +533,7 @@ defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">;
let isCodeGenOnly = 1, Defs = [EFLAGS] in
def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
"or{l}\t{$zero, $dst|$dst, $zero}",
- [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK,
+ [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
Sched<[WriteALULd, WriteRMW]>;
let hasSideEffects = 1 in
@@ -619,13 +561,13 @@ def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, OpSize, LOCK;
+ [], IIC_ALU_NONMEM>, OpSize16, LOCK;
def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, LOCK;
+ [], IIC_ALU_NONMEM>, OpSize32, LOCK;
def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -645,14 +587,14 @@ def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize, LOCK;
+ [], IIC_ALU_MEM>, OpSize16, LOCK;
def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, OpSize32, LOCK;
def NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
@@ -666,13 +608,13 @@ def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize, LOCK;
+ [], IIC_ALU_MEM>, OpSize16, LOCK;
def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [], IIC_ALU_MEM>, OpSize32, LOCK;
def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
@@ -701,10 +643,10 @@ def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
[], IIC_UNARY_MEM>, LOCK;
def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
!strconcat(mnemonic, "{w}\t$dst"),
- [], IIC_UNARY_MEM>, OpSize, LOCK;
+ [], IIC_UNARY_MEM>, OpSize16, LOCK;
def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
!strconcat(mnemonic, "{l}\t$dst"),
- [], IIC_UNARY_MEM>, LOCK;
+ [], IIC_UNARY_MEM>, OpSize32, LOCK;
def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
!strconcat(mnemonic, "{q}\t$dst"),
[], IIC_UNARY_MEM>, LOCK;
@@ -736,11 +678,11 @@ let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
let Defs = [AX, EFLAGS], Uses = [AX] in
def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
!strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
- [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize, LOCK;
+ [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
let Defs = [EAX, EFLAGS], Uses = [EAX] in
def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
!strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
- [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, LOCK;
+ [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
let Defs = [RAX, EFLAGS], Uses = [RAX] in
def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
!strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
@@ -783,14 +725,14 @@ multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
[(set
GR16:$dst,
(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
- itin>, OpSize;
+ itin>, OpSize16;
def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$val, i32mem:$ptr),
!strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
[(set
GR32:$dst,
(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
- itin>;
+ itin>, OpSize32;
def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$val, i64mem:$ptr),
!strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
@@ -1021,22 +963,22 @@ def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// FIXME: This is disabled for 32-bit PIC mode because the global base
// register which is part of the address mode may be assigned a
// callee-saved register.
def : Pat<(X86tcret (load addr:$dst), imm:$off),
(TCRETURNmi addr:$dst, imm:$off)>,
- Requires<[In32BitMode, IsNotPIC]>;
+ Requires<[Not64BitMode, IsNotPIC]>;
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
(TCRETURNdi texternalsym:$dst, imm:$off)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
(TCRETURNdi texternalsym:$dst, imm:$off)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
(TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
@@ -1188,9 +1130,9 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
+ CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
+ CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
@@ -1305,13 +1247,13 @@ def : Pat<(and GR32:$src1, 0xff),
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
GR32_ABCD)),
sub_8bit))>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
(EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
(i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
sub_16bit)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// r & (2^32-1) ==> movz
def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
@@ -1346,13 +1288,13 @@ def : Pat<(sext_inreg GR32:$src, i8),
(MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
GR32_ABCD)),
sub_8bit))>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(sext_inreg GR16:$src, i8),
(EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
(i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
sub_16bit)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(sext_inreg GR64:$src, i32),
(MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
@@ -1384,11 +1326,11 @@ def : Pat<(i16 (trunc GR32:$src)),
def : Pat<(i8 (trunc GR32:$src)),
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
sub_8bit)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(i8 (trunc GR16:$src)),
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
sub_8bit)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(i32 (trunc GR64:$src)),
(EXTRACT_SUBREG GR64:$src, sub_32bit)>;
def : Pat<(i16 (trunc GR64:$src)),
@@ -1406,38 +1348,38 @@ def : Pat<(i8 (trunc GR16:$src)),
def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
sub_8bit_hi)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
sub_8bit_hi)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(srl GR16:$src, (i8 8)),
(EXTRACT_SUBREG
(MOVZX32rr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
sub_8bit_hi)),
sub_16bit)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
(MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
GR16_ABCD)),
sub_8bit_hi))>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
(MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
GR16_ABCD)),
sub_8bit_hi))>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
GR32_ABCD)),
sub_8bit_hi))>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
GR32_ABCD)),
sub_8bit_hi))>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// h-register tricks.
// For now, be conservative on x86-64 and use an h-register extract only if the
@@ -1531,62 +1473,34 @@ def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
-// (shl x (and y, 31)) ==> (shl x, y)
-def : Pat<(shl GR8:$src1, (and CL, immShift32)),
- (SHL8rCL GR8:$src1)>;
-def : Pat<(shl GR16:$src1, (and CL, immShift32)),
- (SHL16rCL GR16:$src1)>;
-def : Pat<(shl GR32:$src1, (and CL, immShift32)),
- (SHL32rCL GR32:$src1)>;
-def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SHL8mCL addr:$dst)>;
-def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SHL16mCL addr:$dst)>;
-def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SHL32mCL addr:$dst)>;
-
-def : Pat<(srl GR8:$src1, (and CL, immShift32)),
- (SHR8rCL GR8:$src1)>;
-def : Pat<(srl GR16:$src1, (and CL, immShift32)),
- (SHR16rCL GR16:$src1)>;
-def : Pat<(srl GR32:$src1, (and CL, immShift32)),
- (SHR32rCL GR32:$src1)>;
-def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SHR8mCL addr:$dst)>;
-def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SHR16mCL addr:$dst)>;
-def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SHR32mCL addr:$dst)>;
-
-def : Pat<(sra GR8:$src1, (and CL, immShift32)),
- (SAR8rCL GR8:$src1)>;
-def : Pat<(sra GR16:$src1, (and CL, immShift32)),
- (SAR16rCL GR16:$src1)>;
-def : Pat<(sra GR32:$src1, (and CL, immShift32)),
- (SAR32rCL GR32:$src1)>;
-def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SAR8mCL addr:$dst)>;
-def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SAR16mCL addr:$dst)>;
-def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
- (SAR32mCL addr:$dst)>;
-
-// (shl x (and y, 63)) ==> (shl x, y)
-def : Pat<(shl GR64:$src1, (and CL, immShift64)),
- (SHL64rCL GR64:$src1)>;
-def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
- (SHL64mCL addr:$dst)>;
-
-def : Pat<(srl GR64:$src1, (and CL, immShift64)),
- (SHR64rCL GR64:$src1)>;
-def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
- (SHR64mCL addr:$dst)>;
-
-def : Pat<(sra GR64:$src1, (and CL, immShift64)),
- (SAR64rCL GR64:$src1)>;
-def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
- (SAR64mCL addr:$dst)>;
+// Shift amount is implicitly masked.
+multiclass MaskedShiftAmountPats<SDNode frag, string name> {
+ // (shift x (and y, 31)) ==> (shift x, y)
+ def : Pat<(frag GR8:$src1, (and CL, immShift32)),
+ (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
+ def : Pat<(frag GR16:$src1, (and CL, immShift32)),
+ (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
+ def : Pat<(frag GR32:$src1, (and CL, immShift32)),
+ (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
+ def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
+ (!cast<Instruction>(name # "8mCL") addr:$dst)>;
+ def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
+ (!cast<Instruction>(name # "16mCL") addr:$dst)>;
+ def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
+ (!cast<Instruction>(name # "32mCL") addr:$dst)>;
+
+ // (shift x (and y, 63)) ==> (shift x, y)
+ def : Pat<(frag GR64:$src1, (and CL, immShift64)),
+ (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
+ def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
+ (!cast<Instruction>(name # "64mCL") addr:$dst)>;
+}
+defm : MaskedShiftAmountPats<shl, "SHL">;
+defm : MaskedShiftAmountPats<srl, "SHR">;
+defm : MaskedShiftAmountPats<sra, "SAR">;
+defm : MaskedShiftAmountPats<rotl, "ROL">;
+defm : MaskedShiftAmountPats<rotr, "ROR">;
// (anyext (setcc_carry)) -> (setcc_carry)
def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
@@ -1725,20 +1639,34 @@ def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
(IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
// Increment reg.
-def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
-def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
-def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
-def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
+// Do not make INC if it is slow
+def : Pat<(add GR8:$src, 1),
+ (INC8r GR8:$src)>, Requires<[NotSlowIncDec]>;
+def : Pat<(add GR16:$src, 1),
+ (INC16r GR16:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
+def : Pat<(add GR16:$src, 1),
+ (INC64_16r GR16:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
+def : Pat<(add GR32:$src, 1),
+ (INC32r GR32:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
+def : Pat<(add GR32:$src, 1),
+ (INC64_32r GR32:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
+def : Pat<(add GR64:$src, 1),
+ (INC64r GR64:$src)>, Requires<[NotSlowIncDec]>;
// Decrement reg.
-def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
-def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
-def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
-def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
+// Do not make DEC if it is slow
+def : Pat<(add GR8:$src, -1),
+ (DEC8r GR8:$src)>, Requires<[NotSlowIncDec]>;
+def : Pat<(add GR16:$src, -1),
+ (DEC16r GR16:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
+def : Pat<(add GR16:$src, -1),
+ (DEC64_16r GR16:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
+def : Pat<(add GR32:$src, -1),
+ (DEC32r GR32:$src)>, Requires<[NotSlowIncDec, Not64BitMode]>;
+def : Pat<(add GR32:$src, -1),
+ (DEC64_32r GR32:$src)>, Requires<[NotSlowIncDec, In64BitMode]>;
+def : Pat<(add GR64:$src, -1),
+ (DEC64r GR64:$src)>, Requires<[NotSlowIncDec]>;
// or reg/reg.
def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
@@ -1840,3 +1768,9 @@ def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
+
+// When HasMOVBE is enabled it is possible to get a non-legalized
+// register-register 16 bit bswap. This maps it to a ROL instruction.
+let Predicates = [HasMOVBE] in {
+ def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrControl.td b/contrib/llvm/lib/Target/X86/X86InstrControl.td
index e4ccc06..39ad395 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrControl.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrControl.td
@@ -21,42 +21,50 @@
// ST1 arguments when returning values on the x87 stack.
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP, SchedRW = [WriteJumpLd] in {
- def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
- "ret",
- [(X86retflag 0)], IIC_RET>;
+ def RETL : I <0xC3, RawFrm, (outs), (ins variable_ops),
+ "ret{l}", [(X86retflag 0)], IIC_RET>, OpSize32,
+ Requires<[Not64BitMode]>;
+ def RETQ : I <0xC3, RawFrm, (outs), (ins variable_ops),
+ "ret{q}", [(X86retflag 0)], IIC_RET>, OpSize32,
+ Requires<[In64BitMode]>;
def RETW : I <0xC3, RawFrm, (outs), (ins),
"ret{w}",
- [], IIC_RET>, OpSize;
- def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
- "ret\t$amt",
- [(X86retflag timm:$amt)], IIC_RET_IMM>;
+ [], IIC_RET>, OpSize16;
+ def RETIL : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
+ "ret{l}\t$amt",
+ [(X86retflag timm:$amt)], IIC_RET_IMM>, OpSize32,
+ Requires<[Not64BitMode]>;
+ def RETIQ : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
+ "ret{q}\t$amt",
+ [(X86retflag timm:$amt)], IIC_RET_IMM>, OpSize32,
+ Requires<[In64BitMode]>;
def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret{w}\t$amt",
- [], IIC_RET_IMM>, OpSize;
+ [], IIC_RET_IMM>, OpSize16;
def LRETL : I <0xCB, RawFrm, (outs), (ins),
- "{l}ret{l|f}", [], IIC_RET>;
- def LRETW : I <0xCB, RawFrm, (outs), (ins),
- "{l}ret{w|f}", [], IIC_RET>, OpSize;
+ "{l}ret{l|f}", [], IIC_RET>, OpSize32;
def LRETQ : RI <0xCB, RawFrm, (outs), (ins),
- "{l}ret{q|f}", [], IIC_RET>;
- def LRETI : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
- "{l}ret{l|f}\t$amt", [], IIC_RET>;
+ "{l}ret{|f}q", [], IIC_RET>, Requires<[In64BitMode]>;
+ def LRETW : I <0xCB, RawFrm, (outs), (ins),
+ "{l}ret{w|f}", [], IIC_RET>, OpSize16;
+ def LRETIL : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
+ "{l}ret{l|f}\t$amt", [], IIC_RET>, OpSize32;
+ def LRETIQ : RIi16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
+ "{l}ret{|f}q\t$amt", [], IIC_RET>, Requires<[In64BitMode]>;
def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
- "{l}ret{w|f}\t$amt", [], IIC_RET>, OpSize;
+ "{l}ret{w|f}\t$amt", [], IIC_RET>, OpSize16;
}
// Unconditional branches.
let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in {
def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>;
+ "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>, OpSize32;
+ def JMP_2 : Ii16PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
+ "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>, OpSize16,
+ Requires<[In16BitMode]>;
let hasSideEffects = 0 in
def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
"jmp\t$dst", [], IIC_JMP_REL>;
- // FIXME : Intel syntax for JMP64pcrel32 such that it is not ambiguious
- // with JMP_1.
- let hasSideEffects = 0 in
- def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmpq\t$dst", [], IIC_JMP_REL>;
}
// Conditional Branches.
@@ -65,8 +73,12 @@ let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump] in {
let hasSideEffects = 0 in
def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, [],
IIC_Jcc>;
+ def _2 : Ii16PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
+ [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>, OpSize16,
+ TB, Requires<[In16BitMode]>;
def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
- [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>, TB;
+ [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>, TB,
+ OpSize32;
}
}
@@ -94,10 +106,10 @@ let isBranch = 1, isTerminator = 1, hasSideEffects = 0, SchedRW = [WriteJump] in
// jecxz.
let Uses = [CX] in
def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jcxz\t$dst", [], IIC_JCXZ>, AdSize, Requires<[In32BitMode]>;
+ "jcxz\t$dst", [], IIC_JCXZ>, AdSize, Requires<[Not64BitMode]>;
let Uses = [ECX] in
def JECXZ_32 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", [], IIC_JCXZ>, Requires<[In32BitMode]>;
+ "jecxz\t$dst", [], IIC_JCXZ>, Requires<[Not64BitMode]>;
// J*CXZ instruction: 64-bit versions of this instruction for the asmparser.
// In 64-bit mode, the address size prefix is jecxz and the unprefixed version
@@ -112,12 +124,19 @@ let isBranch = 1, isTerminator = 1, hasSideEffects = 0, SchedRW = [WriteJump] in
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
+ def JMP16r : I<0xFF, MRM4r, (outs), (ins GR16:$dst), "jmp{w}\t{*}$dst",
+ [(brind GR16:$dst)], IIC_JMP_REG>, Requires<[Not64BitMode]>,
+ OpSize16, Sched<[WriteJump]>;
+ def JMP16m : I<0xFF, MRM4m, (outs), (ins i16mem:$dst), "jmp{w}\t{*}$dst",
+ [(brind (loadi16 addr:$dst))], IIC_JMP_MEM>,
+ Requires<[Not64BitMode]>, OpSize16, Sched<[WriteJumpLd]>;
+
def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
- [(brind GR32:$dst)], IIC_JMP_REG>, Requires<[In32BitMode]>,
- Sched<[WriteJump]>;
+ [(brind GR32:$dst)], IIC_JMP_REG>, Requires<[Not64BitMode]>,
+ OpSize32, Sched<[WriteJump]>;
def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
[(brind (loadi32 addr:$dst))], IIC_JMP_MEM>,
- Requires<[In32BitMode]>, Sched<[WriteJumpLd]>;
+ Requires<[Not64BitMode]>, OpSize32, Sched<[WriteJumpLd]>;
def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
[(brind GR64:$dst)], IIC_JMP_REG>, Requires<[In64BitMode]>,
@@ -129,20 +148,20 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
(ins i16imm:$off, i16imm:$seg),
"ljmp{w}\t{$seg, $off|$off, $seg}", [],
- IIC_JMP_FAR_PTR>, OpSize, Sched<[WriteJump]>;
+ IIC_JMP_FAR_PTR>, OpSize16, Sched<[WriteJump]>;
def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
(ins i32imm:$off, i16imm:$seg),
"ljmp{l}\t{$seg, $off|$off, $seg}", [],
- IIC_JMP_FAR_PTR>, Sched<[WriteJump]>;
+ IIC_JMP_FAR_PTR>, OpSize32, Sched<[WriteJump]>;
def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
"ljmp{q}\t{*}$dst", [], IIC_JMP_FAR_MEM>,
Sched<[WriteJump]>;
def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
- "ljmp{w}\t{*}$dst", [], IIC_JMP_FAR_MEM>, OpSize,
+ "ljmp{w}\t{*}$dst", [], IIC_JMP_FAR_MEM>, OpSize16,
Sched<[WriteJumpLd]>;
def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
- "ljmp{l}\t{*}$dst", [], IIC_JMP_FAR_MEM>,
+ "ljmp{l}\t{*}$dst", [], IIC_JMP_FAR_MEM>, OpSize32,
Sched<[WriteJumpLd]>;
}
@@ -165,38 +184,44 @@ let isCall = 1 in
let Uses = [ESP] in {
def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i32imm_pcrel:$dst),
- "call{l}\t$dst", [], IIC_CALL_RI>,
- Requires<[In32BitMode]>, Sched<[WriteJump]>;
+ "call{l}\t$dst", [], IIC_CALL_RI>, OpSize32,
+ Requires<[Not64BitMode]>, Sched<[WriteJump]>;
+ def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
+ (outs), (ins i16imm_pcrel:$dst),
+ "call{w}\t$dst", [], IIC_CALL_RI>, OpSize16,
+ Sched<[WriteJump]>;
+ def CALL16r : I<0xFF, MRM2r, (outs), (ins GR16:$dst),
+ "call{w}\t{*}$dst", [(X86call GR16:$dst)], IIC_CALL_RI>,
+ OpSize16, Requires<[Not64BitMode]>, Sched<[WriteJump]>;
+ def CALL16m : I<0xFF, MRM2m, (outs), (ins i16mem:$dst),
+ "call{w}\t{*}$dst", [(X86call (loadi16 addr:$dst))],
+ IIC_CALL_MEM>, OpSize16,
+ Requires<[Not64BitMode,FavorMemIndirectCall]>,
+ Sched<[WriteJumpLd]>;
def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst),
"call{l}\t{*}$dst", [(X86call GR32:$dst)], IIC_CALL_RI>,
- Requires<[In32BitMode]>, Sched<[WriteJump]>;
+ OpSize32, Requires<[Not64BitMode]>, Sched<[WriteJump]>;
def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst),
"call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))],
- IIC_CALL_MEM>,
- Requires<[In32BitMode,FavorMemIndirectCall]>,
+ IIC_CALL_MEM>, OpSize32,
+ Requires<[Not64BitMode,FavorMemIndirectCall]>,
Sched<[WriteJumpLd]>;
def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
(ins i16imm:$off, i16imm:$seg),
"lcall{w}\t{$seg, $off|$off, $seg}", [],
- IIC_CALL_FAR_PTR>, OpSize, Sched<[WriteJump]>;
+ IIC_CALL_FAR_PTR>, OpSize16, Sched<[WriteJump]>;
def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
(ins i32imm:$off, i16imm:$seg),
"lcall{l}\t{$seg, $off|$off, $seg}", [],
- IIC_CALL_FAR_PTR>, Sched<[WriteJump]>;
+ IIC_CALL_FAR_PTR>, OpSize32, Sched<[WriteJump]>;
def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
- "lcall{w}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize,
+ "lcall{w}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize16,
Sched<[WriteJumpLd]>;
def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
- "lcall{l}\t{*}$dst", [], IIC_CALL_FAR_MEM>,
+ "lcall{l}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize32,
Sched<[WriteJumpLd]>;
-
- // callw for 16 bit code for the assembler.
- let isAsmParserOnly = 1 in
- def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
- (outs), (ins i16imm_pcrel:$dst),
- "callw\t$dst", []>, OpSize;
}
@@ -240,7 +265,7 @@ let isCall = 1, Uses = [RSP], SchedRW = [WriteJump] in {
// the 32-bit pcrel field that we have.
def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i64i32imm_pcrel:$dst),
- "call{q}\t$dst", [], IIC_CALL_RI>,
+ "call{q}\t$dst", [], IIC_CALL_RI>, OpSize32,
Requires<[In64BitMode]>;
def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst),
"call{q}\t{*}$dst", [(X86call GR64:$dst)],
diff --git a/contrib/llvm/lib/Target/X86/X86InstrExtension.td b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
index 4090550..6be6a1f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrExtension.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
@@ -14,17 +14,17 @@
let neverHasSideEffects = 1 in {
let Defs = [AX], Uses = [AL] in
def CBW : I<0x98, RawFrm, (outs), (ins),
- "{cbtw|cbw}", [], IIC_CBW>, OpSize; // AX = signext(AL)
+ "{cbtw|cbw}", [], IIC_CBW>, OpSize16; // AX = signext(AL)
let Defs = [EAX], Uses = [AX] in
def CWDE : I<0x98, RawFrm, (outs), (ins),
- "{cwtl|cwde}", [], IIC_CBW>; // EAX = signext(AX)
+ "{cwtl|cwde}", [], IIC_CBW>, OpSize32; // EAX = signext(AX)
let Defs = [AX,DX], Uses = [AX] in
def CWD : I<0x99, RawFrm, (outs), (ins),
- "{cwtd|cwd}", [], IIC_CBW>, OpSize; // DX:AX = signext(AX)
+ "{cwtd|cwd}", [], IIC_CBW>, OpSize16; // DX:AX = signext(AX)
let Defs = [EAX,EDX], Uses = [EAX] in
def CDQ : I<0x99, RawFrm, (outs), (ins),
- "{cltd|cdq}", [], IIC_CBW>; // EDX:EAX = signext(EAX)
+ "{cltd|cdq}", [], IIC_CBW>, OpSize32; // EDX:EAX = signext(EAX)
let Defs = [RAX], Uses = [EAX] in
@@ -42,54 +42,54 @@ let neverHasSideEffects = 1 in {
let neverHasSideEffects = 1 in {
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_R8>,
- TB, OpSize, Sched<[WriteALU]>;
+ TB, OpSize16, Sched<[WriteALU]>;
let mayLoad = 1 in
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_M8>,
- TB, OpSize, Sched<[WriteALULd]>;
+ TB, OpSize16, Sched<[WriteALULd]>;
} // neverHasSideEffects = 1
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB,
- Sched<[WriteALU]>;
+ OpSize32, Sched<[WriteALU]>;
def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sextloadi32i8 addr:$src))], IIC_MOVSX>, TB,
- Sched<[WriteALULd]>;
+ OpSize32, Sched<[WriteALULd]>;
def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
"movs{wl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sext GR16:$src))], IIC_MOVSX>, TB,
- Sched<[WriteALU]>;
+ OpSize32, Sched<[WriteALU]>;
def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"movs{wl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sextloadi32i16 addr:$src))], IIC_MOVSX>,
- TB, Sched<[WriteALULd]>;
+ OpSize32, TB, Sched<[WriteALULd]>;
let neverHasSideEffects = 1 in {
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_R8>,
- TB, OpSize, Sched<[WriteALU]>;
+ TB, OpSize16, Sched<[WriteALU]>;
let mayLoad = 1 in
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_M8>,
- TB, OpSize, Sched<[WriteALULd]>;
+ TB, OpSize16, Sched<[WriteALULd]>;
} // neverHasSideEffects = 1
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB,
- Sched<[WriteALU]>;
+ OpSize32, Sched<[WriteALU]>;
def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zextloadi32i8 addr:$src))], IIC_MOVZX>, TB,
- Sched<[WriteALULd]>;
+ OpSize32, Sched<[WriteALULd]>;
def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
"movz{wl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zext GR16:$src))], IIC_MOVZX>, TB,
- Sched<[WriteALU]>;
+ OpSize32, Sched<[WriteALU]>;
def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"movz{wl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zextloadi32i16 addr:$src))], IIC_MOVZX>,
- TB, Sched<[WriteALULd]>;
+ TB, OpSize32, Sched<[WriteALULd]>;
// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
// except that they use GR32_NOREX for the output operand register class
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFMA.td b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
index 69cd5a5..c0a6864 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
@@ -19,8 +19,9 @@ let Constraints = "$src1 = $dst" in {
multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
PatFrag MemFrag128, PatFrag MemFrag256,
ValueType OpVT128, ValueType OpVT256,
+ bit IsRVariantCommutable = 0, bit IsMVariantCommutable = 0,
SDPatternOperator Op = null_frag> {
- let isCommutable = 1 in
+ let usesCustomInserter = 1, isCommutable = IsRVariantCommutable in
def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
@@ -28,7 +29,7 @@ multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
[(set VR128:$dst, (OpVT128 (Op VR128:$src2,
VR128:$src1, VR128:$src3)))]>;
- let mayLoad = 1 in
+ let mayLoad = 1, isCommutable = IsMVariantCommutable in
def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, f128mem:$src3),
!strconcat(OpcodeStr,
@@ -36,7 +37,7 @@ multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
[(set VR128:$dst, (OpVT128 (Op VR128:$src2, VR128:$src1,
(MemFrag128 addr:$src3))))]>;
- let isCommutable = 1 in
+ let usesCustomInserter = 1, isCommutable = IsRVariantCommutable in
def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3),
!strconcat(OpcodeStr,
@@ -44,7 +45,7 @@ multiclass fma3p_rm<bits<8> opc, string OpcodeStr,
[(set VR256:$dst, (OpVT256 (Op VR256:$src2, VR256:$src1,
VR256:$src3)))]>, VEX_L;
- let mayLoad = 1 in
+ let mayLoad = 1, isCommutable = IsMVariantCommutable in
def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, f256mem:$src3),
!strconcat(OpcodeStr,
@@ -59,16 +60,27 @@ multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
string OpcodeStr, string PackTy,
PatFrag MemFrag128, PatFrag MemFrag256,
SDNode Op, ValueType OpTy128, ValueType OpTy256> {
+ // For 213, both the register and memory variant are commutable.
+ // Indeed, the commutable operands are 1 and 2 and both live in registers
+ // for both variants.
defm r213 : fma3p_rm<opc213,
!strconcat(OpcodeStr, "213", PackTy),
- MemFrag128, MemFrag256, OpTy128, OpTy256, Op>;
+ MemFrag128, MemFrag256, OpTy128, OpTy256,
+ /* IsRVariantCommutable */ 1,
+ /* IsMVariantCommutable */ 1,
+ Op>;
let neverHasSideEffects = 1 in {
defm r132 : fma3p_rm<opc132,
!strconcat(OpcodeStr, "132", PackTy),
MemFrag128, MemFrag256, OpTy128, OpTy256>;
+ // For 231, only the register variant is commutable.
+ // For the memory variant the folded operand must be in 3. Thus,
+ // in that case, it cannot be swapped with 2.
defm r231 : fma3p_rm<opc231,
!strconcat(OpcodeStr, "231", PackTy),
- MemFrag128, MemFrag256, OpTy128, OpTy256>;
+ MemFrag128, MemFrag256, OpTy128, OpTy256,
+ /* IsRVariantCommutable */ 1,
+ /* IsMVariantCommutable */ 0>;
} // neverHasSideEffects = 1
}
@@ -117,15 +129,17 @@ let ExeDomain = SSEPackedDouble in {
let Constraints = "$src1 = $dst" in {
multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
RegisterClass RC, ValueType OpVT, PatFrag mem_frag,
+ bit IsRVariantCommutable = 0, bit IsMVariantCommutable = 0,
SDPatternOperator OpNode = null_frag> {
- let isCommutable = 1 in
+ let usesCustomInserter = 1, isCommutable = IsRVariantCommutable in
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[(set RC:$dst,
(OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
- let mayLoad = 1 in
+
+ let mayLoad = 1, isCommutable = IsMVariantCommutable in
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, RC:$src2, x86memop:$src3),
!strconcat(OpcodeStr,
@@ -134,51 +148,56 @@ multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
(OpVT (OpNode RC:$src2, RC:$src1,
(mem_frag addr:$src3))))]>;
}
-
-multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr, Operand memop,
- ComplexPattern mem_cpat, Intrinsic IntId,
- RegisterClass RC> {
- let isCommutable = 1 in
- def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst, (IntId VR128:$src2, VR128:$src1,
- VR128:$src3))]>;
- def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, memop:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (IntId VR128:$src2, VR128:$src1, mem_cpat:$src3))]>;
-}
} // Constraints = "$src1 = $dst"
multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
- string OpStr, string PackTy, Intrinsic Int,
+ string OpStr, string PackTy, string PT2, Intrinsic Int,
SDNode OpNode, RegisterClass RC, ValueType OpVT,
X86MemOperand x86memop, Operand memop, PatFrag mem_frag,
ComplexPattern mem_cpat> {
let neverHasSideEffects = 1 in {
defm r132 : fma3s_rm<opc132, !strconcat(OpStr, "132", PackTy),
x86memop, RC, OpVT, mem_frag>;
+ // See the other defm of r231 for the explanation regarding the
+ // commutable flags.
defm r231 : fma3s_rm<opc231, !strconcat(OpStr, "231", PackTy),
- x86memop, RC, OpVT, mem_frag>;
+ x86memop, RC, OpVT, mem_frag,
+ /* IsRVariantCommutable */ 1,
+ /* IsMVariantCommutable */ 0>;
}
+// See the other defm of r213 for the explanation regarding the
+// commutable flags.
defm r213 : fma3s_rm<opc213, !strconcat(OpStr, "213", PackTy),
- x86memop, RC, OpVT, mem_frag, OpNode>,
- fma3s_rm_int<opc213, !strconcat(OpStr, "213", PackTy),
- memop, mem_cpat, Int, RC>;
+ x86memop, RC, OpVT, mem_frag,
+ /* IsRVariantCommutable */ 1,
+ /* IsMVariantCommutable */ 1,
+ OpNode>;
}
multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
string OpStr, Intrinsic IntF32, Intrinsic IntF64,
SDNode OpNode> {
- defm SS : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", IntF32, OpNode,
+ defm SS : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", "SS", IntF32, OpNode,
FR32, f32, f32mem, ssmem, loadf32, sse_load_f32>;
- defm SD : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", IntF64, OpNode,
+ defm SD : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", "PD", IntF64, OpNode,
FR64, f64, f64mem, sdmem, loadf64, sse_load_f64>, VEX_W;
+
+ def : Pat<(IntF32 VR128:$src1, VR128:$src2, VR128:$src3),
+ (COPY_TO_REGCLASS
+ (!cast<Instruction>(NAME#"SSr213r")
+ (COPY_TO_REGCLASS $src2, FR32),
+ (COPY_TO_REGCLASS $src1, FR32),
+ (COPY_TO_REGCLASS $src3, FR32)),
+ VR128)>;
+
+ def : Pat<(IntF64 VR128:$src1, VR128:$src2, VR128:$src3),
+ (COPY_TO_REGCLASS
+ (!cast<Instruction>(NAME#"SDr213r")
+ (COPY_TO_REGCLASS $src2, FR64),
+ (COPY_TO_REGCLASS $src1, FR64),
+ (COPY_TO_REGCLASS $src3, FR64)),
+ VR128)>;
}
defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss,
@@ -220,7 +239,7 @@ multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
[(set RC:$dst,
(OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG;
// For disassembler
-let isCodeGenOnly = 1, hasSideEffects = 0 in
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
def rr_REV : FMA4<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
@@ -230,6 +249,7 @@ let isCodeGenOnly = 1, hasSideEffects = 0 in
multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
ComplexPattern mem_cpat, Intrinsic Int> {
+let isCodeGenOnly = 1 in {
let isCommutable = 1 in
def rr_Int : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
@@ -249,6 +269,7 @@ multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>, VEX_LIG;
+} // isCodeGenOnly = 1
}
multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -295,7 +316,7 @@ multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
[(set VR256:$dst, (OpNode VR256:$src1,
(ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L;
// For disassembler
-let isCodeGenOnly = 1, hasSideEffects = 0 in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
index 7c37888..4ad7b7e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -206,74 +206,91 @@ def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src),
}
let Defs = [FPSW] in {
+// FPBinary_rr just defines pseudo-instructions, no need to set a scheduling
+// resources.
defm ADD : FPBinary_rr<fadd>;
defm SUB : FPBinary_rr<fsub>;
defm MUL : FPBinary_rr<fmul>;
defm DIV : FPBinary_rr<fdiv>;
+// Sets the scheduling resources for the actual NAME#_F<size>m defintions.
+let SchedRW = [WriteFAddLd] in {
defm ADD : FPBinary<fadd, MRM0m, "add">;
defm SUB : FPBinary<fsub, MRM4m, "sub">;
defm SUBR: FPBinary<fsub ,MRM5m, "subr">;
+}
+let SchedRW = [WriteFMulLd] in {
defm MUL : FPBinary<fmul, MRM1m, "mul">;
+}
+let SchedRW = [WriteFDivLd] in {
defm DIV : FPBinary<fdiv, MRM6m, "div">;
defm DIVR: FPBinary<fdiv, MRM7m, "divr">;
}
+}
-class FPST0rInst<bits<8> o, string asm>
- : FPI<o, AddRegFrm, (outs), (ins RST:$op), asm>, D8;
-class FPrST0Inst<bits<8> o, string asm>
- : FPI<o, AddRegFrm, (outs), (ins RST:$op), asm>, DC;
-class FPrST0PInst<bits<8> o, string asm>
- : FPI<o, AddRegFrm, (outs), (ins RST:$op), asm>, DE;
+class FPST0rInst<Format fp, string asm>
+ : FPI<0xD8, fp, (outs), (ins RST:$op), asm>;
+class FPrST0Inst<Format fp, string asm>
+ : FPI<0xDC, fp, (outs), (ins RST:$op), asm>;
+class FPrST0PInst<Format fp, string asm>
+ : FPI<0xDE, fp, (outs), (ins RST:$op), asm>;
// NOTE: GAS and apparently all other AT&T style assemblers have a broken notion
// of some of the 'reverse' forms of the fsub and fdiv instructions. As such,
// we have to put some 'r's in and take them out of weird places.
-def ADD_FST0r : FPST0rInst <0xC0, "fadd\t$op">;
-def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, st(0)}">;
-def ADD_FPrST0 : FPrST0PInst<0xC0, "faddp\t$op">;
-def SUBR_FST0r : FPST0rInst <0xE8, "fsubr\t$op">;
-def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, st(0)}">;
-def SUB_FPrST0 : FPrST0PInst<0xE8, "fsub{r}p\t$op">;
-def SUB_FST0r : FPST0rInst <0xE0, "fsub\t$op">;
-def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, st(0)}">;
-def SUBR_FPrST0 : FPrST0PInst<0xE0, "fsub{|r}p\t$op">;
-def MUL_FST0r : FPST0rInst <0xC8, "fmul\t$op">;
-def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, st(0)}">;
-def MUL_FPrST0 : FPrST0PInst<0xC8, "fmulp\t$op">;
-def DIVR_FST0r : FPST0rInst <0xF8, "fdivr\t$op">;
-def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, st(0)}">;
-def DIV_FPrST0 : FPrST0PInst<0xF8, "fdiv{r}p\t$op">;
-def DIV_FST0r : FPST0rInst <0xF0, "fdiv\t$op">;
-def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, st(0)}">;
-def DIVR_FPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p\t$op">;
-
-def COM_FST0r : FPST0rInst <0xD0, "fcom\t$op">;
-def COMP_FST0r : FPST0rInst <0xD8, "fcomp\t$op">;
+let SchedRW = [WriteFAdd] in {
+def ADD_FST0r : FPST0rInst <MRM0r, "fadd\t$op">;
+def ADD_FrST0 : FPrST0Inst <MRM0r, "fadd\t{%st(0), $op|$op, st(0)}">;
+def ADD_FPrST0 : FPrST0PInst<MRM0r, "faddp\t$op">;
+def SUBR_FST0r : FPST0rInst <MRM5r, "fsubr\t$op">;
+def SUB_FrST0 : FPrST0Inst <MRM5r, "fsub{r}\t{%st(0), $op|$op, st(0)}">;
+def SUB_FPrST0 : FPrST0PInst<MRM5r, "fsub{r}p\t$op">;
+def SUB_FST0r : FPST0rInst <MRM4r, "fsub\t$op">;
+def SUBR_FrST0 : FPrST0Inst <MRM4r, "fsub{|r}\t{%st(0), $op|$op, st(0)}">;
+def SUBR_FPrST0 : FPrST0PInst<MRM4r, "fsub{|r}p\t$op">;
+} // SchedRW
+let SchedRW = [WriteFMul] in {
+def MUL_FST0r : FPST0rInst <MRM1r, "fmul\t$op">;
+def MUL_FrST0 : FPrST0Inst <MRM1r, "fmul\t{%st(0), $op|$op, st(0)}">;
+def MUL_FPrST0 : FPrST0PInst<MRM1r, "fmulp\t$op">;
+} // SchedRW
+let SchedRW = [WriteFDiv] in {
+def DIVR_FST0r : FPST0rInst <MRM7r, "fdivr\t$op">;
+def DIV_FrST0 : FPrST0Inst <MRM7r, "fdiv{r}\t{%st(0), $op|$op, st(0)}">;
+def DIV_FPrST0 : FPrST0PInst<MRM7r, "fdiv{r}p\t$op">;
+def DIV_FST0r : FPST0rInst <MRM6r, "fdiv\t$op">;
+def DIVR_FrST0 : FPrST0Inst <MRM6r, "fdiv{|r}\t{%st(0), $op|$op, st(0)}">;
+def DIVR_FPrST0 : FPrST0PInst<MRM6r, "fdiv{|r}p\t$op">;
+} // SchedRW
+
+def COM_FST0r : FPST0rInst <MRM2r, "fcom\t$op">;
+def COMP_FST0r : FPST0rInst <MRM3r, "fcomp\t$op">;
// Unary operations.
-multiclass FPUnary<SDNode OpNode, bits<8> opcode, string asmstring> {
+multiclass FPUnary<SDNode OpNode, Format fp, string asmstring> {
def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src), OneArgFPRW,
[(set RFP32:$dst, (OpNode RFP32:$src))]>;
def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src), OneArgFPRW,
[(set RFP64:$dst, (OpNode RFP64:$src))]>;
def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src), OneArgFPRW,
[(set RFP80:$dst, (OpNode RFP80:$src))]>;
-def _F : FPI<opcode, RawFrm, (outs), (ins), asmstring>, D9;
+def _F : FPI<0xD9, fp, (outs), (ins), asmstring>;
}
let Defs = [FPSW] in {
-defm CHS : FPUnary<fneg, 0xE0, "fchs">;
-defm ABS : FPUnary<fabs, 0xE1, "fabs">;
-defm SQRT: FPUnary<fsqrt,0xFA, "fsqrt">;
-defm SIN : FPUnary<fsin, 0xFE, "fsin">;
-defm COS : FPUnary<fcos, 0xFF, "fcos">;
+defm CHS : FPUnary<fneg, MRM_E0, "fchs">;
+defm ABS : FPUnary<fabs, MRM_E1, "fabs">;
+let SchedRW = [WriteFSqrt] in {
+defm SQRT: FPUnary<fsqrt,MRM_FA, "fsqrt">;
+}
+defm SIN : FPUnary<fsin, MRM_FE, "fsin">;
+defm COS : FPUnary<fcos, MRM_FF, "fcos">;
let neverHasSideEffects = 1 in {
def TST_Fp32 : FpIf32<(outs), (ins RFP32:$src), OneArgFP, []>;
def TST_Fp64 : FpIf64<(outs), (ins RFP64:$src), OneArgFP, []>;
def TST_Fp80 : FpI_<(outs), (ins RFP80:$src), OneArgFP, []>;
}
-def TST_F : FPI<0xE4, RawFrm, (outs), (ins), "ftst">, D9;
+def TST_F : FPI<0xD9, MRM_E4, (outs), (ins), "ftst">;
} // Defs = [FPSW]
// Versions of FP instructions that take a single memory operand. Added for the
@@ -336,22 +353,22 @@ defm CMOVNP : FPCMov<X86_COND_NP>;
let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
-def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovb\t{$op, %st(0)|st(0), $op}">, DA;
-def CMOVBE_F : FPI<0xD0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovbe\t{$op, %st(0)|st(0), $op}">, DA;
-def CMOVE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
- "fcmove\t{$op, %st(0)|st(0), $op}">, DA;
-def CMOVP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovu\t{$op, %st(0)|st(0), $op}">, DA;
-def CMOVNB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnb\t{$op, %st(0)|st(0), $op}">, DB;
-def CMOVNBE_F: FPI<0xD0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnbe\t{$op, %st(0)|st(0), $op}">, DB;
-def CMOVNE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovne\t{$op, %st(0)|st(0), $op}">, DB;
-def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnu\t{$op, %st(0)|st(0), $op}">, DB;
+def CMOVB_F : FPI<0xDA, MRM0r, (outs RST:$op), (ins),
+ "fcmovb\t{$op, %st(0)|st(0), $op}">;
+def CMOVBE_F : FPI<0xDA, MRM2r, (outs RST:$op), (ins),
+ "fcmovbe\t{$op, %st(0)|st(0), $op}">;
+def CMOVE_F : FPI<0xDA, MRM1r, (outs RST:$op), (ins),
+ "fcmove\t{$op, %st(0)|st(0), $op}">;
+def CMOVP_F : FPI<0xDA, MRM3r, (outs RST:$op), (ins),
+ "fcmovu\t{$op, %st(0)|st(0), $op}">;
+def CMOVNB_F : FPI<0xDB, MRM0r, (outs RST:$op), (ins),
+ "fcmovnb\t{$op, %st(0)|st(0), $op}">;
+def CMOVNBE_F: FPI<0xDB, MRM2r, (outs RST:$op), (ins),
+ "fcmovnbe\t{$op, %st(0)|st(0), $op}">;
+def CMOVNE_F : FPI<0xDB, MRM1r, (outs RST:$op), (ins),
+ "fcmovne\t{$op, %st(0)|st(0), $op}">;
+def CMOVNP_F : FPI<0xDB, MRM3r, (outs RST:$op), (ins),
+ "fcmovnu\t{$op, %st(0)|st(0), $op}">;
} // Predicates = [HasCMov]
// Floating point loads & stores.
@@ -492,14 +509,10 @@ def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst),
// FP Stack manipulation instructions.
let SchedRW = [WriteMove] in {
-def LD_Frr : FPI<0xC0, AddRegFrm, (outs), (ins RST:$op), "fld\t$op",
- IIC_FLD>, D9;
-def ST_Frr : FPI<0xD0, AddRegFrm, (outs), (ins RST:$op), "fst\t$op",
- IIC_FST>, DD;
-def ST_FPrr : FPI<0xD8, AddRegFrm, (outs), (ins RST:$op), "fstp\t$op",
- IIC_FST>, DD;
-def XCH_F : FPI<0xC8, AddRegFrm, (outs), (ins RST:$op), "fxch\t$op",
- IIC_FXCH>, D9;
+def LD_Frr : FPI<0xD9, MRM0r, (outs), (ins RST:$op), "fld\t$op", IIC_FLD>;
+def ST_Frr : FPI<0xDD, MRM2r, (outs), (ins RST:$op), "fst\t$op", IIC_FST>;
+def ST_FPrr : FPI<0xDD, MRM3r, (outs), (ins RST:$op), "fstp\t$op", IIC_FST>;
+def XCH_F : FPI<0xD9, MRM1r, (outs), (ins RST:$op), "fxch\t$op", IIC_FXCH>;
}
// Floating point constant loads.
@@ -519,8 +532,8 @@ def LD_Fp180 : FpI_<(outs RFP80:$dst), (ins), ZeroArgFP,
}
let SchedRW = [WriteZero] in {
-def LD_F0 : FPI<0xEE, RawFrm, (outs), (ins), "fldz", IIC_FLDZ>, D9;
-def LD_F1 : FPI<0xE8, RawFrm, (outs), (ins), "fld1", IIC_FIST>, D9;
+def LD_F0 : FPI<0xD9, MRM_EE, (outs), (ins), "fldz", IIC_FLDZ>;
+def LD_F1 : FPI<0xD9, MRM_E8, (outs), (ins), "fld1", IIC_FIST>;
}
// Floating point compares.
@@ -546,40 +559,35 @@ def UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
}
let Defs = [FPSW], Uses = [ST0] in {
-def UCOM_Fr : FPI<0xE0, AddRegFrm, // FPSW = cmp ST(0) with ST(i)
- (outs), (ins RST:$reg),
- "fucom\t$reg", IIC_FUCOM>, DD;
-def UCOM_FPr : FPI<0xE8, AddRegFrm, // FPSW = cmp ST(0) with ST(i), pop
- (outs), (ins RST:$reg),
- "fucomp\t$reg", IIC_FUCOM>, DD;
-def UCOM_FPPr : FPI<0xE9, RawFrm, // cmp ST(0) with ST(1), pop, pop
- (outs), (ins),
- "fucompp", IIC_FUCOM>, DA;
+def UCOM_Fr : FPI<0xDD, MRM4r, // FPSW = cmp ST(0) with ST(i)
+ (outs), (ins RST:$reg), "fucom\t$reg", IIC_FUCOM>;
+def UCOM_FPr : FPI<0xDD, MRM5r, // FPSW = cmp ST(0) with ST(i), pop
+ (outs), (ins RST:$reg), "fucomp\t$reg", IIC_FUCOM>;
+def UCOM_FPPr : FPI<0xDA, MRM_E9, // cmp ST(0) with ST(1), pop, pop
+ (outs), (ins), "fucompp", IIC_FUCOM>;
}
let Defs = [EFLAGS, FPSW], Uses = [ST0] in {
-def UCOM_FIr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i)
- (outs), (ins RST:$reg),
- "fucomi\t$reg", IIC_FUCOMI>, DB;
-def UCOM_FIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop
- (outs), (ins RST:$reg),
- "fucompi\t$reg", IIC_FUCOMI>, DF;
+def UCOM_FIr : FPI<0xDB, MRM5r, // CC = cmp ST(0) with ST(i)
+ (outs), (ins RST:$reg), "fucomi\t$reg", IIC_FUCOMI>;
+def UCOM_FIPr : FPI<0xDF, MRM5r, // CC = cmp ST(0) with ST(i), pop
+ (outs), (ins RST:$reg), "fucompi\t$reg", IIC_FUCOMI>;
}
let Defs = [EFLAGS, FPSW] in {
-def COM_FIr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
- "fcomi\t$reg", IIC_FCOMI>, DB;
-def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
- "fcompi\t$reg", IIC_FCOMI>, DF;
+def COM_FIr : FPI<0xDB, MRM6r, (outs), (ins RST:$reg),
+ "fcomi\t$reg", IIC_FCOMI>;
+def COM_FIPr : FPI<0xDF, MRM6r, (outs), (ins RST:$reg),
+ "fcompi\t$reg", IIC_FCOMI>;
}
} // SchedRW
// Floating point flag ops.
let SchedRW = [WriteALU] in {
let Defs = [AX], Uses = [FPSW] in
-def FNSTSW16r : I<0xE0, RawFrm, // AX = fp flags
+def FNSTSW16r : I<0xDF, MRM_E0, // AX = fp flags
(outs), (ins), "fnstsw\t{%ax|ax}",
- [(set AX, (X86fp_stsw FPSW))], IIC_FNSTSW>, DF;
+ [(set AX, (X86fp_stsw FPSW))], IIC_FNSTSW>;
def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world
(outs), (ins i16mem:$dst), "fnstcw\t$dst",
@@ -593,50 +601,50 @@ def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16]
// FPU control instructions
let SchedRW = [WriteMicrocoded] in {
let Defs = [FPSW] in
-def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", [], IIC_FNINIT>, DB;
-def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg),
- "ffree\t$reg", IIC_FFREE>, DD;
+def FNINIT : I<0xDB, MRM_E3, (outs), (ins), "fninit", [], IIC_FNINIT>;
+def FFREE : FPI<0xDD, MRM0r, (outs), (ins RST:$reg),
+ "ffree\t$reg", IIC_FFREE>;
// Clear exceptions
let Defs = [FPSW] in
-def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", [], IIC_FNCLEX>, DB;
+def FNCLEX : I<0xDB, MRM_E2, (outs), (ins), "fnclex", [], IIC_FNCLEX>;
} // SchedRW
// Operandless floating-point instructions for the disassembler.
let SchedRW = [WriteMicrocoded] in {
def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", [], IIC_WAIT>;
-def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", [], IIC_FNOP>, D9;
-def FXAM : I<0xE5, RawFrm, (outs), (ins), "fxam", [], IIC_FXAM>, D9;
-def FLDL2T : I<0xE9, RawFrm, (outs), (ins), "fldl2t", [], IIC_FLDL>, D9;
-def FLDL2E : I<0xEA, RawFrm, (outs), (ins), "fldl2e", [], IIC_FLDL>, D9;
-def FLDPI : I<0xEB, RawFrm, (outs), (ins), "fldpi", [], IIC_FLDL>, D9;
-def FLDLG2 : I<0xEC, RawFrm, (outs), (ins), "fldlg2", [], IIC_FLDL>, D9;
-def FLDLN2 : I<0xED, RawFrm, (outs), (ins), "fldln2", [], IIC_FLDL>, D9;
-def F2XM1 : I<0xF0, RawFrm, (outs), (ins), "f2xm1", [], IIC_F2XM1>, D9;
-def FYL2X : I<0xF1, RawFrm, (outs), (ins), "fyl2x", [], IIC_FYL2X>, D9;
-def FPTAN : I<0xF2, RawFrm, (outs), (ins), "fptan", [], IIC_FPTAN>, D9;
-def FPATAN : I<0xF3, RawFrm, (outs), (ins), "fpatan", [], IIC_FPATAN>, D9;
-def FXTRACT : I<0xF4, RawFrm, (outs), (ins), "fxtract", [], IIC_FXTRACT>, D9;
-def FPREM1 : I<0xF5, RawFrm, (outs), (ins), "fprem1", [], IIC_FPREM1>, D9;
-def FDECSTP : I<0xF6, RawFrm, (outs), (ins), "fdecstp", [], IIC_FPSTP>, D9;
-def FINCSTP : I<0xF7, RawFrm, (outs), (ins), "fincstp", [], IIC_FPSTP>, D9;
-def FPREM : I<0xF8, RawFrm, (outs), (ins), "fprem", [], IIC_FPREM>, D9;
-def FYL2XP1 : I<0xF9, RawFrm, (outs), (ins), "fyl2xp1", [], IIC_FYL2XP1>, D9;
-def FSINCOS : I<0xFB, RawFrm, (outs), (ins), "fsincos", [], IIC_FSINCOS>, D9;
-def FRNDINT : I<0xFC, RawFrm, (outs), (ins), "frndint", [], IIC_FRNDINT>, D9;
-def FSCALE : I<0xFD, RawFrm, (outs), (ins), "fscale", [], IIC_FSCALE>, D9;
-def FCOMPP : I<0xD9, RawFrm, (outs), (ins), "fcompp", [], IIC_FCOMPP>, DE;
+def FNOP : I<0xD9, MRM_D0, (outs), (ins), "fnop", [], IIC_FNOP>;
+def FXAM : I<0xD9, MRM_E5, (outs), (ins), "fxam", [], IIC_FXAM>;
+def FLDL2T : I<0xD9, MRM_E9, (outs), (ins), "fldl2t", [], IIC_FLDL>;
+def FLDL2E : I<0xD9, MRM_EA, (outs), (ins), "fldl2e", [], IIC_FLDL>;
+def FLDPI : I<0xD9, MRM_EB, (outs), (ins), "fldpi", [], IIC_FLDL>;
+def FLDLG2 : I<0xD9, MRM_EC, (outs), (ins), "fldlg2", [], IIC_FLDL>;
+def FLDLN2 : I<0xD9, MRM_ED, (outs), (ins), "fldln2", [], IIC_FLDL>;
+def F2XM1 : I<0xD9, MRM_F0, (outs), (ins), "f2xm1", [], IIC_F2XM1>;
+def FYL2X : I<0xD9, MRM_F1, (outs), (ins), "fyl2x", [], IIC_FYL2X>;
+def FPTAN : I<0xD9, MRM_F2, (outs), (ins), "fptan", [], IIC_FPTAN>;
+def FPATAN : I<0xD9, MRM_F3, (outs), (ins), "fpatan", [], IIC_FPATAN>;
+def FXTRACT : I<0xD9, MRM_F4, (outs), (ins), "fxtract", [], IIC_FXTRACT>;
+def FPREM1 : I<0xD9, MRM_F5, (outs), (ins), "fprem1", [], IIC_FPREM1>;
+def FDECSTP : I<0xD9, MRM_F6, (outs), (ins), "fdecstp", [], IIC_FPSTP>;
+def FINCSTP : I<0xD9, MRM_F7, (outs), (ins), "fincstp", [], IIC_FPSTP>;
+def FPREM : I<0xD9, MRM_F8, (outs), (ins), "fprem", [], IIC_FPREM>;
+def FYL2XP1 : I<0xD9, MRM_F9, (outs), (ins), "fyl2xp1", [], IIC_FYL2XP1>;
+def FSINCOS : I<0xD9, MRM_FB, (outs), (ins), "fsincos", [], IIC_FSINCOS>;
+def FRNDINT : I<0xD9, MRM_FC, (outs), (ins), "frndint", [], IIC_FRNDINT>;
+def FSCALE : I<0xD9, MRM_FD, (outs), (ins), "fscale", [], IIC_FSCALE>;
+def FCOMPP : I<0xDE, MRM_D9, (outs), (ins), "fcompp", [], IIC_FCOMPP>;
def FXSAVE : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
"fxsave\t$dst", [], IIC_FXSAVE>, TB;
-def FXSAVE64 : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
- "fxsaveq\t$dst", [], IIC_FXSAVE>, TB, REX_W,
- Requires<[In64BitMode]>;
+def FXSAVE64 : RI<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
+ "fxsave{q|64}\t$dst", [], IIC_FXSAVE>, TB,
+ Requires<[In64BitMode]>;
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
"fxrstor\t$src", [], IIC_FXRSTOR>, TB;
-def FXRSTOR64 : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
- "fxrstorq\t$src", [], IIC_FXRSTOR>, TB, REX_W,
+def FXRSTOR64 : RI<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
+ "fxrstor{q|64}\t$src", [], IIC_FXRSTOR>, TB,
Requires<[In64BitMode]>;
} // SchedRW
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFormats.td b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
index 0fd9011..8ef5f90 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
@@ -14,54 +14,48 @@
// Format specifies the encoding used by the instruction. This is part of the
// ad-hoc solution used to emit machine instruction encodings by our machine
// code emitter.
-class Format<bits<6> val> {
- bits<6> Value = val;
+class Format<bits<7> val> {
+ bits<7> Value = val;
}
def Pseudo : Format<0>; def RawFrm : Format<1>;
def AddRegFrm : Format<2>; def MRMDestReg : Format<3>;
def MRMDestMem : Format<4>; def MRMSrcReg : Format<5>;
-def MRMSrcMem : Format<6>;
+def MRMSrcMem : Format<6>; def RawFrmMemOffs : Format<7>;
+def RawFrmSrc : Format<8>; def RawFrmDst : Format<9>;
+def RawFrmDstSrc: Format<10>;
+def RawFrmImm8 : Format<11>;
+def RawFrmImm16 : Format<12>;
+def MRMXr : Format<14>; def MRMXm : Format<15>;
def MRM0r : Format<16>; def MRM1r : Format<17>; def MRM2r : Format<18>;
def MRM3r : Format<19>; def MRM4r : Format<20>; def MRM5r : Format<21>;
def MRM6r : Format<22>; def MRM7r : Format<23>;
def MRM0m : Format<24>; def MRM1m : Format<25>; def MRM2m : Format<26>;
def MRM3m : Format<27>; def MRM4m : Format<28>; def MRM5m : Format<29>;
def MRM6m : Format<30>; def MRM7m : Format<31>;
-def MRMInitReg : Format<32>;
-def MRM_C1 : Format<33>;
-def MRM_C2 : Format<34>;
-def MRM_C3 : Format<35>;
-def MRM_C4 : Format<36>;
-def MRM_C8 : Format<37>;
-def MRM_C9 : Format<38>;
-def MRM_CA : Format<39>;
-def MRM_CB : Format<40>;
-def MRM_E8 : Format<41>;
-def MRM_F0 : Format<42>;
-def RawFrmImm8 : Format<43>;
-def RawFrmImm16 : Format<44>;
-def MRM_F8 : Format<45>;
-def MRM_F9 : Format<46>;
-def MRM_D0 : Format<47>;
-def MRM_D1 : Format<48>;
-def MRM_D4 : Format<49>;
-def MRM_D5 : Format<50>;
-def MRM_D6 : Format<51>;
-def MRM_D8 : Format<52>;
-def MRM_D9 : Format<53>;
-def MRM_DA : Format<54>;
-def MRM_DB : Format<55>;
-def MRM_DC : Format<56>;
-def MRM_DD : Format<57>;
-def MRM_DE : Format<58>;
-def MRM_DF : Format<59>;
+def MRM_C0 : Format<32>; def MRM_C1 : Format<33>; def MRM_C2 : Format<34>;
+def MRM_C3 : Format<35>; def MRM_C4 : Format<36>; def MRM_C8 : Format<37>;
+def MRM_C9 : Format<38>; def MRM_CA : Format<39>; def MRM_CB : Format<40>;
+def MRM_D0 : Format<41>; def MRM_D1 : Format<42>; def MRM_D4 : Format<43>;
+def MRM_D5 : Format<44>; def MRM_D6 : Format<45>; def MRM_D8 : Format<46>;
+def MRM_D9 : Format<47>; def MRM_DA : Format<48>; def MRM_DB : Format<49>;
+def MRM_DC : Format<50>; def MRM_DD : Format<51>; def MRM_DE : Format<52>;
+def MRM_DF : Format<53>; def MRM_E0 : Format<54>; def MRM_E1 : Format<55>;
+def MRM_E2 : Format<56>; def MRM_E3 : Format<57>; def MRM_E4 : Format<58>;
+def MRM_E5 : Format<59>; def MRM_E8 : Format<60>; def MRM_E9 : Format<61>;
+def MRM_EA : Format<62>; def MRM_EB : Format<63>; def MRM_EC : Format<64>;
+def MRM_ED : Format<65>; def MRM_EE : Format<66>; def MRM_F0 : Format<67>;
+def MRM_F1 : Format<68>; def MRM_F2 : Format<69>; def MRM_F3 : Format<70>;
+def MRM_F4 : Format<71>; def MRM_F5 : Format<72>; def MRM_F6 : Format<73>;
+def MRM_F7 : Format<74>; def MRM_F8 : Format<75>; def MRM_F9 : Format<76>;
+def MRM_FA : Format<77>; def MRM_FB : Format<78>; def MRM_FC : Format<79>;
+def MRM_FD : Format<80>; def MRM_FE : Format<81>; def MRM_FF : Format<82>;
// ImmType - This specifies the immediate type used by an instruction. This is
// part of the ad-hoc solution used to emit machine instruction encodings by our
// machine code emitter.
-class ImmType<bits<3> val> {
- bits<3> Value = val;
+class ImmType<bits<4> val> {
+ bits<4> Value = val;
}
def NoImm : ImmType<0>;
def Imm8 : ImmType<1>;
@@ -70,7 +64,8 @@ def Imm16 : ImmType<3>;
def Imm16PCRel : ImmType<4>;
def Imm32 : ImmType<5>;
def Imm32PCRel : ImmType<6>;
-def Imm64 : ImmType<7>;
+def Imm32S : ImmType<7>;
+def Imm64 : ImmType<8>;
// FPFormat - This specifies what form this FP instruction has. This is used by
// the Floating-Point stackifier pass.
@@ -110,59 +105,101 @@ def CD8VT2 : CD8VForm<5>; // v := 2
def CD8VT4 : CD8VForm<6>; // v := 4
def CD8VT8 : CD8VForm<7>; // v := 8
+// Class specifying the prefix used an opcode extension.
+class Prefix<bits<3> val> {
+ bits<3> Value = val;
+}
+def NoPrfx : Prefix<0>;
+def PS : Prefix<1>;
+def PD : Prefix<2>;
+def XS : Prefix<3>;
+def XD : Prefix<4>;
+
+// Class specifying the opcode map.
+class Map<bits<3> val> {
+ bits<3> Value = val;
+}
+def OB : Map<0>;
+def TB : Map<1>;
+def T8 : Map<2>;
+def TA : Map<3>;
+def XOP8 : Map<4>;
+def XOP9 : Map<5>;
+def XOPA : Map<6>;
+
+// Class specifying the encoding
+class Encoding<bits<2> val> {
+ bits<2> Value = val;
+}
+def EncNormal : Encoding<0>;
+def EncVEX : Encoding<1>;
+def EncXOP : Encoding<2>;
+def EncEVEX : Encoding<3>;
+
+// Operand size for encodings that change based on mode.
+class OperandSize<bits<2> val> {
+ bits<2> Value = val;
+}
+def OpSizeFixed : OperandSize<0>; // Never needs a 0x66 prefix.
+def OpSize16 : OperandSize<1>; // Needs 0x66 prefix in 32-bit mode.
+def OpSize32 : OperandSize<2>; // Needs 0x66 prefix in 16-bit mode.
+
// Prefix byte classes which are used to indicate to the ad-hoc machine code
// emitter that various prefix bytes are required.
-class OpSize { bit hasOpSizePrefix = 1; }
+class OpSize16 { OperandSize OpSize = OpSize16; }
+class OpSize32 { OperandSize OpSize = OpSize32; }
class AdSize { bit hasAdSizePrefix = 1; }
class REX_W { bit hasREX_WPrefix = 1; }
class LOCK { bit hasLockPrefix = 1; }
-class SegFS { bits<2> SegOvrBits = 1; }
-class SegGS { bits<2> SegOvrBits = 2; }
-class TB { bits<5> Prefix = 1; }
-class REP { bits<5> Prefix = 2; }
-class D8 { bits<5> Prefix = 3; }
-class D9 { bits<5> Prefix = 4; }
-class DA { bits<5> Prefix = 5; }
-class DB { bits<5> Prefix = 6; }
-class DC { bits<5> Prefix = 7; }
-class DD { bits<5> Prefix = 8; }
-class DE { bits<5> Prefix = 9; }
-class DF { bits<5> Prefix = 10; }
-class XD { bits<5> Prefix = 11; }
-class XS { bits<5> Prefix = 12; }
-class T8 { bits<5> Prefix = 13; }
-class TA { bits<5> Prefix = 14; }
-class A6 { bits<5> Prefix = 15; }
-class A7 { bits<5> Prefix = 16; }
-class T8XD { bits<5> Prefix = 17; }
-class T8XS { bits<5> Prefix = 18; }
-class TAXD { bits<5> Prefix = 19; }
-class XOP8 { bits<5> Prefix = 20; }
-class XOP9 { bits<5> Prefix = 21; }
-class XOPA { bits<5> Prefix = 22; }
-class VEX { bit hasVEXPrefix = 1; }
+class REP { bit hasREPPrefix = 1; }
+class TB { Map OpMap = TB; }
+class T8 { Map OpMap = T8; }
+class TA { Map OpMap = TA; }
+class XOP8 { Map OpMap = XOP8; Prefix OpPrefix = PS; }
+class XOP9 { Map OpMap = XOP9; Prefix OpPrefix = PS; }
+class XOPA { Map OpMap = XOPA; Prefix OpPrefix = PS; }
+class OBXS { Prefix OpPrefix = XS; }
+class PS : TB { Prefix OpPrefix = PS; }
+class PD : TB { Prefix OpPrefix = PD; }
+class XD : TB { Prefix OpPrefix = XD; }
+class XS : TB { Prefix OpPrefix = XS; }
+class T8PS : T8 { Prefix OpPrefix = PS; }
+class T8PD : T8 { Prefix OpPrefix = PD; }
+class T8XD : T8 { Prefix OpPrefix = XD; }
+class T8XS : T8 { Prefix OpPrefix = XS; }
+class TAPS : TA { Prefix OpPrefix = PS; }
+class TAPD : TA { Prefix OpPrefix = PD; }
+class TAXD : TA { Prefix OpPrefix = XD; }
+class VEX { Encoding OpEnc = EncVEX; }
class VEX_W { bit hasVEX_WPrefix = 1; }
-class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
-class VEX_4VOp3 : VEX { bit hasVEX_4VOp3Prefix = 1; }
+class VEX_4V : VEX { bit hasVEX_4V = 1; }
+class VEX_4VOp3 : VEX { bit hasVEX_4VOp3 = 1; }
class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
class VEX_L { bit hasVEX_L = 1; }
class VEX_LIG { bit ignoresVEX_L = 1; }
-class EVEX : VEX { bit hasEVEXPrefix = 1; }
-class EVEX_4V : VEX_4V { bit hasEVEXPrefix = 1; }
+class EVEX : VEX { Encoding OpEnc = EncEVEX; }
+class EVEX_4V : VEX_4V { Encoding OpEnc = EncEVEX; }
class EVEX_K { bit hasEVEX_K = 1; }
class EVEX_KZ : EVEX_K { bit hasEVEX_Z = 1; }
class EVEX_B { bit hasEVEX_B = 1; }
+class EVEX_RC { bit hasEVEX_RC = 1; }
class EVEX_V512 { bit hasEVEX_L2 = 1; bit hasVEX_L = 0; }
+class EVEX_V256 { bit hasEVEX_L2 = 0; bit hasVEX_L = 1; }
+class EVEX_V128 { bit hasEVEX_L2 = 0; bit hasVEX_L = 0; }
+
+// Specify AVX512 8-bit compressed displacement encoding based on the vector
+// element size in bits (8, 16, 32, 64) and the CDisp8 form.
class EVEX_CD8<int esize, CD8VForm form> {
- bits<2> EVEX_CD8E = !if(!eq(esize, 8), 0b00,
- !if(!eq(esize, 16), 0b01,
- !if(!eq(esize, 32), 0b10,
- !if(!eq(esize, 64), 0b11, ?))));
- bits<3> EVEX_CD8V = form.Value;
+ int CD8_EltSize = !srl(esize, 3);
+ bits<3> CD8_Form = form.Value;
}
+
class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; }
class MemOp4 { bit hasMemOp4Prefix = 1; }
-class XOP { bit hasXOP_Prefix = 1; }
+class XOP { Encoding OpEnc = EncXOP; }
+class XOP_4V : XOP { bit hasVEX_4V = 1; }
+class XOP_4VOp3 : XOP { bit hasVEX_4VOp3 = 1; }
+
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
string AsmStr,
InstrItinClass itin,
@@ -172,7 +209,7 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bits<8> Opcode = opcod;
Format Form = f;
- bits<6> FormBits = Form.Value;
+ bits<7> FormBits = Form.Value;
ImmType ImmT = i;
dag OutOperandList = outs;
@@ -187,64 +224,93 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
//
// Attributes specific to X86 instructions...
//
- bit hasOpSizePrefix = 0; // Does this inst have a 0x66 prefix?
+ bit ForceDisassemble = 0; // Force instruction to disassemble even though it's
+ // isCodeGenonly. Needed to hide an ambiguous
+ // AsmString from the parser, but still disassemble.
+
+ OperandSize OpSize = OpSizeFixed; // Does this instruction's encoding change
+ // based on operand size of the mode
+ bits<2> OpSizeBits = OpSize.Value;
bit hasAdSizePrefix = 0; // Does this inst have a 0x67 prefix?
- bits<5> Prefix = 0; // Which prefix byte does this inst have?
+ Prefix OpPrefix = NoPrfx; // Which prefix byte does this inst have?
+ bits<3> OpPrefixBits = OpPrefix.Value;
+ Map OpMap = OB; // Which opcode map does this inst have?
+ bits<3> OpMapBits = OpMap.Value;
bit hasREX_WPrefix = 0; // Does this inst require the REX.W prefix?
FPFormat FPForm = NotFP; // What flavor of FP instruction is this?
bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix?
- bits<2> SegOvrBits = 0; // Segment override prefix.
Domain ExeDomain = d;
- bit hasVEXPrefix = 0; // Does this inst require a VEX prefix?
+ bit hasREPPrefix = 0; // Does this inst have a REP prefix?
+ Encoding OpEnc = EncNormal; // Encoding used by this instruction
+ bits<2> OpEncBits = OpEnc.Value;
bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field?
- bit hasVEX_4VPrefix = 0; // Does this inst require the VEX.VVVV field?
- bit hasVEX_4VOp3Prefix = 0; // Does this inst require the VEX.VVVV field to
- // encode the third operand?
+ bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field?
+ bit hasVEX_4VOp3 = 0; // Does this inst require the VEX.VVVV field to
+ // encode the third operand?
bit hasVEX_i8ImmReg = 0; // Does this inst require the last source register
// to be encoded in a immediate field?
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
- bit hasEVEXPrefix = 0; // Does this inst require EVEX form?
bit hasEVEX_K = 0; // Does this inst require masking?
bit hasEVEX_Z = 0; // Does this inst set the EVEX_Z field?
bit hasEVEX_L2 = 0; // Does this inst set the EVEX_L2 field?
bit hasEVEX_B = 0; // Does this inst set the EVEX_B field?
- bits<2> EVEX_CD8E = 0; // Compressed disp8 form - element-size.
- bits<3> EVEX_CD8V = 0; // Compressed disp8 form - vector-width.
+ bits<3> CD8_Form = 0; // Compressed disp8 form - vector-width.
+ // Declare it int rather than bits<4> so that all bits are defined when
+ // assigning to bits<7>.
+ int CD8_EltSize = 0; // Compressed disp8 form - element-size in bytes.
bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding?
bit hasMemOp4Prefix = 0; // Same bit as VEX_W, but used for swapping operands
- bit hasXOP_Prefix = 0; // Does this inst require an XOP prefix?
+ bit hasEVEX_RC = 0; // Explicitly specified rounding control in FP instruction.
+
+ bits<2> EVEX_LL;
+ let EVEX_LL{0} = hasVEX_L;
+ let EVEX_LL{1} = hasEVEX_L2;
+ // Vector size in bytes.
+ bits<7> VectSize = !shl(16, EVEX_LL);
+
+ // The scaling factor for AVX512's compressed displacement is either
+ // - the size of a power-of-two number of elements or
+ // - the size of a single element for broadcasts or
+ // - the total vector size divided by a power-of-two number.
+ // Possible values are: 0 (non-AVX512 inst), 1, 2, 4, 8, 16, 32 and 64.
+ bits<7> CD8_Scale = !if (!eq (OpEnc.Value, EncEVEX.Value),
+ !if (CD8_Form{2},
+ !shl(CD8_EltSize, CD8_Form{1-0}),
+ !if (hasEVEX_B,
+ CD8_EltSize,
+ !srl(VectSize, CD8_Form{1-0}))), 0);
// TSFlags layout should be kept in sync with X86InstrInfo.h.
- let TSFlags{5-0} = FormBits;
- let TSFlags{6} = hasOpSizePrefix;
- let TSFlags{7} = hasAdSizePrefix;
- let TSFlags{12-8} = Prefix;
- let TSFlags{13} = hasREX_WPrefix;
- let TSFlags{16-14} = ImmT.Value;
- let TSFlags{19-17} = FPForm.Value;
- let TSFlags{20} = hasLockPrefix;
- let TSFlags{22-21} = SegOvrBits;
- let TSFlags{24-23} = ExeDomain.Value;
- let TSFlags{32-25} = Opcode;
- let TSFlags{33} = hasVEXPrefix;
- let TSFlags{34} = hasVEX_WPrefix;
- let TSFlags{35} = hasVEX_4VPrefix;
- let TSFlags{36} = hasVEX_4VOp3Prefix;
- let TSFlags{37} = hasVEX_i8ImmReg;
- let TSFlags{38} = hasVEX_L;
- let TSFlags{39} = ignoresVEX_L;
- let TSFlags{40} = hasEVEXPrefix;
- let TSFlags{41} = hasEVEX_K;
- let TSFlags{42} = hasEVEX_Z;
- let TSFlags{43} = hasEVEX_L2;
- let TSFlags{44} = hasEVEX_B;
- let TSFlags{46-45} = EVEX_CD8E;
- let TSFlags{49-47} = EVEX_CD8V;
- let TSFlags{50} = has3DNow0F0FOpcode;
- let TSFlags{51} = hasMemOp4Prefix;
- let TSFlags{52} = hasXOP_Prefix;
+ let TSFlags{6-0} = FormBits;
+ let TSFlags{8-7} = OpSizeBits;
+ let TSFlags{9} = hasAdSizePrefix;
+ let TSFlags{12-10} = OpPrefixBits;
+ let TSFlags{15-13} = OpMapBits;
+ let TSFlags{16} = hasREX_WPrefix;
+ let TSFlags{20-17} = ImmT.Value;
+ let TSFlags{23-21} = FPForm.Value;
+ let TSFlags{24} = hasLockPrefix;
+ let TSFlags{25} = hasREPPrefix;
+ let TSFlags{27-26} = ExeDomain.Value;
+ let TSFlags{29-28} = OpEncBits;
+ let TSFlags{37-30} = Opcode;
+ let TSFlags{38} = hasVEX_WPrefix;
+ let TSFlags{39} = hasVEX_4V;
+ let TSFlags{40} = hasVEX_4VOp3;
+ let TSFlags{41} = hasVEX_i8ImmReg;
+ let TSFlags{42} = hasVEX_L;
+ let TSFlags{43} = ignoresVEX_L;
+ let TSFlags{44} = hasEVEX_K;
+ let TSFlags{45} = hasEVEX_Z;
+ let TSFlags{46} = hasEVEX_L2;
+ let TSFlags{47} = hasEVEX_B;
+ // If we run out of TSFlags bits, it's possible to encode this in 3 bits.
+ let TSFlags{54-48} = CD8_Scale;
+ let TSFlags{55} = has3DNow0F0FOpcode;
+ let TSFlags{56} = hasMemOp4Prefix;
+ let TSFlags{57} = hasEVEX_RC;
}
class PseudoI<dag oops, dag iops, list<dag> pattern>
@@ -284,6 +350,12 @@ class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
let Pattern = pattern;
let CodeSize = 3;
}
+class Ii32S<bits<8> o, Format f, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : X86Inst<o, f, Imm32S, outs, ins, asm, itin> {
+ let Pattern = pattern;
+ let CodeSize = 3;
+}
class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -333,73 +405,83 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
-def __xs : XS;
-def __xd : XD;
-
// SI - SSE 1 & 2 scalar instructions
class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin> {
- let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
- !if(hasVEXPrefix /* VEX */, [UseAVX],
- !if(!eq(Prefix, __xs.Prefix), [UseSSE1],
- !if(!eq(Prefix, __xd.Prefix), [UseSSE2],
- !if(hasOpSizePrefix, [UseSSE2], [UseSSE1])))));
+ let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
+ !if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX],
+ !if(!eq(OpPrefix.Value, XS.Value), [UseSSE1],
+ !if(!eq(OpPrefix.Value, XD.Value), [UseSSE2],
+ !if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
+ [UseSSE1])))));
// AVX instructions have a 'v' prefix in the mnemonic
- let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+ let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
+ !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
+ asm));
}
// SIi8 - SSE 1 & 2 scalar instructions
class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin> {
- let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
- !if(hasVEXPrefix /* VEX */, [UseAVX],
- !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])));
+ let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
+ !if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX],
+ !if(!eq(OpPrefix.Value, XS.Value), [UseSSE1],
+ [UseSSE2])));
// AVX instructions have a 'v' prefix in the mnemonic
- let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+ let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
+ !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
+ asm));
}
// PI - SSE 1 & 2 packed instructions
class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
InstrItinClass itin, Domain d>
: I<o, F, outs, ins, asm, pattern, itin, d> {
- let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
- !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1])));
+ let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
+ !if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX],
+ !if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
+ [UseSSE1])));
// AVX instructions have a 'v' prefix in the mnemonic
- let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+ let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
+ !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
+ asm));
}
// MMXPI - SSE 1 & 2 packed instructions with MMX operands
class MMXPI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
InstrItinClass itin, Domain d>
: I<o, F, outs, ins, asm, pattern, itin, d> {
- let Predicates = !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]);
+ let Predicates = !if(!eq(OpPrefix.Value, PD.Value), [HasSSE2],
+ [HasSSE1]);
}
// PIi8 - SSE 1 & 2 packed instructions with immediate
class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin, Domain d>
: Ii8<o, F, outs, ins, asm, pattern, itin, d> {
- let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512],
- !if(hasVEXPrefix /* VEX */, [HasAVX],
- !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1])));
+ let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
+ !if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX],
+ !if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
+ [UseSSE1])));
// AVX instructions have a 'v' prefix in the mnemonic
- let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+ let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
+ !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
+ asm));
}
// SSE1 Instruction Templates:
//
// SSI - SSE1 instructions with XS prefix.
-// PSI - SSE1 instructions with TB prefix.
-// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
+// PSI - SSE1 instructions with PS prefix.
+// PSIi8 - SSE1 instructions with ImmT == Imm8 and PS prefix.
// VSSI - SSE1 instructions with XS prefix in AVX form.
-// VPSI - SSE1 instructions with TB prefix in AVX form, packed single.
+// VPSI - SSE1 instructions with PS prefix in AVX form, packed single.
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -409,11 +491,11 @@ class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
: Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>;
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
Requires<[UseSSE1]>;
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
Requires<[UseSSE1]>;
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -421,7 +503,7 @@ class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasAVX]>;
class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, TB,
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, PS,
Requires<[HasAVX]>;
// SSE2 Instruction Templates:
@@ -430,13 +512,13 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
// SDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix.
// S2SI - SSE2 instructions with XS prefix.
// SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix.
-// PDI - SSE2 instructions with TB and OpSize prefixes, packed double domain.
-// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
+// PDI - SSE2 instructions with PD prefix, packed double domain.
+// PDIi8 - SSE2 instructions with ImmT == Imm8 and PD prefix.
// VSDI - SSE2 scalar instructions with XD prefix in AVX form.
-// VPDI - SSE2 vector instructions with TB and OpSize prefixes in AVX form,
+// VPDI - SSE2 vector instructions with PD prefix in AVX form,
// packed double domain.
-// VS2I - SSE2 scalar instructions with TB and OpSize prefixes in AVX form.
-// S2I - SSE2 scalar instructions with TB and OpSize prefixes.
+// VS2I - SSE2 scalar instructions with PD prefix in AVX form.
+// S2I - SSE2 scalar instructions with PD prefix.
// MMXSDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix as well as
// MMX operands.
// MMXSSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix as well as
@@ -456,11 +538,11 @@ class S2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE2]>;
class PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
Requires<[UseSSE2]>;
class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
Requires<[UseSSE2]>;
class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -472,16 +554,15 @@ class VS2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasAVX]>;
class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB,
- OpSize, Requires<[HasAVX]>;
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>,
+ PD, Requires<[HasAVX]>;
class VS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, TB,
- OpSize, Requires<[UseAVX]>;
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, PD,
+ Requires<[UseAVX]>;
class S2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, TB,
- OpSize, Requires<[UseSSE2]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, PD, Requires<[UseSSE2]>;
class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
@@ -491,7 +572,7 @@ class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// SSE3 Instruction Templates:
//
-// S3I - SSE3 instructions with TB and OpSize prefixes.
+// S3I - SSE3 instructions with PD prefixes.
// S3SI - SSE3 instructions with XS prefix.
// S3DI - SSE3 instructions with XD prefix.
@@ -505,7 +586,7 @@ class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[UseSSE3]>;
class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
Requires<[UseSSE3]>;
@@ -522,19 +603,19 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[UseSSSE3]>;
class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[UseSSSE3]>;
class MMXSS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PS,
Requires<[HasSSSE3]>;
class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPS,
Requires<[HasSSSE3]>;
// SSE4.1 Instruction Templates:
@@ -544,11 +625,11 @@ class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
//
class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[UseSSE41]>;
class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[UseSSE41]>;
// SSE4.2 Instruction Templates:
@@ -556,7 +637,7 @@ class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// SS428I - SSE 4.2 instructions with T8 prefix.
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[UseSSE42]>;
// SS42FI - SSE 4.2 instructions with T8XD prefix.
@@ -568,53 +649,53 @@ class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
// SS42AI = SSE 4.2 instructions with TA prefix
class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[UseSSE42]>;
// AVX Instruction Templates:
// Instructions introduced in AVX (no SSE equivalent forms)
//
-// AVX8I - AVX instructions with T8 and OpSize prefix.
-// AVXAIi8 - AVX instructions with TA, OpSize prefix and ImmT = Imm8.
+// AVX8I - AVX instructions with T8PD prefix.
+// AVXAIi8 - AVX instructions with TAPD prefix and ImmT = Imm8.
class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[HasAVX]>;
class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[HasAVX]>;
// AVX2 Instruction Templates:
// Instructions introduced in AVX2 (no SSE equivalent forms)
//
-// AVX28I - AVX2 instructions with T8 and OpSize prefix.
-// AVX2AIi8 - AVX2 instructions with TA, OpSize prefix and ImmT = Imm8.
+// AVX28I - AVX2 instructions with T8PD prefix.
+// AVX2AIi8 - AVX2 instructions with TAPD prefix and ImmT = Imm8.
class AVX28I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[HasAVX2]>;
class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[HasAVX2]>;
// AVX-512 Instruction Templates:
// Instructions introduced in AVX-512 (no SSE equivalent forms)
//
-// AVX5128I - AVX-512 instructions with T8 and OpSize prefix.
-// AVX512AIi8 - AVX-512 instructions with TA, OpSize prefix and ImmT = Imm8.
-// AVX512PDI - AVX-512 instructions with TB, OpSize, double packed.
-// AVX512PSI - AVX-512 instructions with TB, single packed.
+// AVX5128I - AVX-512 instructions with T8PD prefix.
+// AVX512AIi8 - AVX-512 instructions with TAPD prefix and ImmT = Imm8.
+// AVX512PDI - AVX-512 instructions with PD, double packed.
+// AVX512PSI - AVX-512 instructions with PS, single packed.
// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes.
// AVX512XSI - AVX-512 instructions with XS prefix, generic domain.
-// AVX512BI - AVX-512 instructions with TB, OpSize, int packed domain.
-// AVX512SI - AVX-512 scalar instructions with TB and OpSize prefixes.
+// AVX512BI - AVX-512 instructions with PD, int packed domain.
+// AVX512SI - AVX-512 scalar instructions with PD prefix.
class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[HasAVX512]>;
class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -630,42 +711,38 @@ class AVX512XDI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasAVX512]>;
class AVX512BI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, PD,
Requires<[HasAVX512]>;
class AVX512BIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
- Requires<[HasAVX512]>;
-class AVX512SI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB, OpSize,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, PD,
Requires<[HasAVX512]>;
class AVX512AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[HasAVX512]>;
class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TB,
- Requires<[HasAVX512]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>,
+ Requires<[HasAVX512]>;
class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB,
- OpSize, Requires<[HasAVX512]>;
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
+ Requires<[HasAVX512]>;
class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
Requires<[HasAVX512]>;
class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, d>, TB, Requires<[HasAVX512]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>;
class AVX512PI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin, d>, TB, Requires<[HasAVX512]>;
+ : I<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>;
class AVX512FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, T8,
- OpSize, EVEX_4V, Requires<[HasAVX512]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, T8PD,
+ EVEX_4V, Requires<[HasAVX512]>;
// AES Instruction Templates:
//
@@ -673,54 +750,54 @@ class AVX512FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
// These use the same encoding as the SSE4.2 T8 and TA encodings.
class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = IIC_AES>
- : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
Requires<[HasAES]>;
class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
Requires<[HasAES]>;
// PCLMUL Instruction Templates
class PCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- OpSize, Requires<[HasPCLMUL]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
+ Requires<[HasPCLMUL]>;
class AVXPCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- OpSize, VEX_4V, Requires<[HasAVX, HasPCLMUL]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
+ VEX_4V, Requires<[HasAVX, HasPCLMUL]>;
// FMA3 Instruction Templates
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, T8,
- OpSize, VEX_4V, FMASC, Requires<[HasFMA]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, T8PD,
+ VEX_4V, FMASC, Requires<[HasFMA]>;
// FMA4 Instruction Templates
class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin>, TA,
- OpSize, VEX_4V, VEX_I8IMM, FMASC, Requires<[HasFMA4]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, TAPD,
+ VEX_4V, VEX_I8IMM, FMASC, Requires<[HasFMA4]>;
// XOP 2, 3 and 4 Operand Instruction Template
class IXOP<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>,
- XOP, XOP9, Requires<[HasXOP]>;
+ XOP9, Requires<[HasXOP]>;
// XOP 2, 3 and 4 Operand Instruction Templates with imm byte
class IXOPi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>,
- XOP, XOP8, Requires<[HasXOP]>;
+ XOP8, Requires<[HasXOP]>;
// XOP 5 operand instruction (VEX encoding!)
class IXOP5<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
- OpSize, VEX_4V, VEX_I8IMM, Requires<[HasXOP]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
+ VEX_4V, VEX_I8IMM, Requires<[HasXOP]>;
// X86-64 Instruction templates...
//
@@ -731,9 +808,15 @@ class RI<bits<8> o, Format F, dag outs, dag ins, string asm,
class RIi8 <bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, REX_W;
+class RIi16 <bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : Ii16<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RIi32 <bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii32<o, F, outs, ins, asm, pattern, itin>, REX_W;
+class RIi32S <bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : Ii32S<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RIi64<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
@@ -749,18 +832,6 @@ class RIi64_NOREX<bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
-class RSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, InstrItinClass itin = NoItinerary>
- : SSI<o, F, outs, ins, asm, pattern, itin>, REX_W;
-class RSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, InstrItinClass itin = NoItinerary>
- : SDI<o, F, outs, ins, asm, pattern, itin>, REX_W;
-class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, InstrItinClass itin = NoItinerary>
- : PDI<o, F, outs, ins, asm, pattern, itin>, REX_W;
-class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, InstrItinClass itin = NoItinerary>
- : VPDI<o, F, outs, ins, asm, pattern, itin>, VEX_W;
class RS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: S2I<o, F, outs, ins, asm, pattern, itin>, REX_W;
@@ -774,29 +845,29 @@ class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
// MMXI - MMX instructions with TB prefix.
// MMXI32 - MMX instructions with TB prefix valid only in 32 bit mode.
// MMXI64 - MMX instructions with TB prefix valid only in 64 bit mode.
-// MMX2I - MMX / SSE2 instructions with TB and OpSize prefixes.
-// MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix.
-// MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix.
+// MMX2I - MMX / SSE2 instructions with PD prefix.
+// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
+// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
// MMXID - MMX instructions with XD prefix.
// MMXIS - MMX instructions with XS prefix.
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX]>;
class MMXI32<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In32BitMode]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX,Not64BitMode]>;
class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In64BitMode]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX,In64BitMode]>;
class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, TB, REX_W, Requires<[HasMMX]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, PS, REX_W, Requires<[HasMMX]>;
class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : I<o, F, outs, ins, asm, pattern, itin>, TB, OpSize, Requires<[HasMMX]>;
+ : I<o, F, outs, ins, asm, pattern, itin>, PD, Requires<[HasMMX]>;
class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
- : Ii8<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX]>;
class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, InstrItinClass itin = NoItinerary>
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasMMX]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 1fed424..6f0fa94 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -25,7 +25,8 @@ def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
SDTCisFP<0>, SDTCisInt<2> ]>;
def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
- SDTCisFP<1>, SDTCisVT<3, i8>]>;
+ SDTCisFP<1>, SDTCisVT<3, i8>,
+ SDTCisVec<1>]>;
def X86umin : SDNode<"X86ISD::UMIN", SDTIntBinOp>;
def X86umax : SDNode<"X86ISD::UMAX", SDTIntBinOp>;
@@ -59,8 +60,8 @@ def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
-def X86cmpss : SDNode<"X86ISD::FSETCCss", SDTX86Cmpss>;
-def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
+def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
+//def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
def X86pshufb : SDNode<"X86ISD::PSHUFB",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
@@ -80,44 +81,46 @@ def X86pinsrb : SDNode<"X86ISD::PINSRB",
def X86pinsrw : SDNode<"X86ISD::PINSRW",
SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
-def X86insrtps : SDNode<"X86ISD::INSERTPS",
+def X86insertps : SDNode<"X86ISD::INSERTPS",
SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
-def X86vzmovly : SDNode<"X86ISD::VZEXT_MOVL",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisOpSmallerThanOp<1, 0> ]>>;
-
-def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL",
- SDTypeProfile<1, 1,
- [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
-
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86vzext : SDNode<"X86ISD::VZEXT",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCisInt<1>]>>;
+ SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisOpSmallerThanOp<1, 0>]>>;
def X86vsext : SDNode<"X86ISD::VSEXT",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCisInt<1>]>>;
+ SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisOpSmallerThanOp<1, 0>]>>;
def X86vtrunc : SDNode<"X86ISD::VTRUNC",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCisInt<1>]>>;
+ SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisOpSmallerThanOp<0, 1>]>>;
+def X86trunc : SDNode<"X86ISD::TRUNC",
+ SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>,
+ SDTCisOpSmallerThanOp<0, 1>]>>;
+
def X86vtruncm : SDNode<"X86ISD::VTRUNCM",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisInt<0>, SDTCisInt<1>,
- SDTCisVec<2>, SDTCisInt<2>]>>;
+ SDTCisVec<2>, SDTCisInt<2>,
+ SDTCisOpSmallerThanOp<0, 2>]>>;
def X86vfpext : SDNode<"X86ISD::VFPEXT",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>]>>;
+ SDTCisFP<0>, SDTCisFP<1>,
+ SDTCisOpSmallerThanOp<1, 0>]>>;
def X86vfpround: SDNode<"X86ISD::VFPROUND",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>]>>;
+ SDTCisFP<0>, SDTCisFP<1>,
+ SDTCisOpSmallerThanOp<0, 1>]>>;
def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
@@ -130,9 +133,15 @@ def X86IntCmpMask : SDTypeProfile<1, 2,
def X86pcmpeqm : SDNode<"X86ISD::PCMPEQM", X86IntCmpMask, [SDNPCommutative]>;
def X86pcmpgtm : SDNode<"X86ISD::PCMPGTM", X86IntCmpMask>;
-def X86CmpMaskCC : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+def X86CmpMaskCC :
+ SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, SDTCisVec<1>,
+ SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+def X86CmpMaskCCScalar :
+ SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+
def X86cmpm : SDNode<"X86ISD::CMPM", X86CmpMaskCC>;
def X86cmpmu : SDNode<"X86ISD::CMPMU", X86CmpMaskCC>;
+def X86cmpms : SDNode<"X86ISD::FSETCC", X86CmpMaskCCScalar>;
def X86vshl : SDNode<"X86ISD::VSHL",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
@@ -155,14 +164,20 @@ def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
-def X86ktest : SDNode<"X86ISD::KTEST", SDTX86CmpPTest>;
-def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
+def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
SDTCisVec<1>,
SDTCisSameAs<2, 1>]>>;
+def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>,
+ SDTCisVec<1>,
+ SDTCisSameAs<2, 1>]>>;
+def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>;
def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisSameAs<1,2>]>>;
+def X86pmuldq : SDNode<"X86ISD::PMULDQ",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisSameAs<1,2>]>>;
// Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
// translated into one of the target nodes below during lowering.
@@ -209,6 +224,10 @@ def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
+def SDTPack : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<2, 1>]>;
+def X86Packss : SDNode<"X86ISD::PACKSS", SDTPack>;
+def X86Packus : SDNode<"X86ISD::PACKUS", SDTPack>;
+
def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
@@ -216,6 +235,7 @@ def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>;
def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>;
def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
def X86VPermv3 : SDNode<"X86ISD::VPERMV3", SDTShuff3Op>;
+def X86VPermiv3 : SDNode<"X86ISD::VPERMIV3", SDTShuff3Op>;
def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
@@ -223,6 +243,8 @@ def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>;
def X86Vinsert : SDNode<"X86ISD::VINSERT", SDTypeProfile<1, 3,
[SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
+def X86Vextract : SDNode<"X86ISD::VEXTRACT", SDTypeProfile<1, 2,
+ [SDTCisVec<1>, SDTCisPtrTy<2>]>, []>;
def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
@@ -458,10 +480,13 @@ def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
+def bc_v8f32 : PatFrag<(ops node:$in), (v8f32 (bitconvert node:$in))>;
// 512-bit bitconvert pattern fragments
def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>;
def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>;
+def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>;
+def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>;
def vzmovl_v2i64 : PatFrag<(ops node:$src),
(bitconvert (v2i64 (X86vzmovl
@@ -478,6 +503,14 @@ def fp32imm0 : PatLeaf<(f32 fpimm), [{
return N->isExactlyValue(+0.0);
}]>;
+def I8Imm : SDNodeXForm<imm, [{
+ // Transformation function: get the low 8 bits.
+ return getI8Imm((uint8_t)N->getZExtValue());
+}]>;
+
+def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>;
+def FROUND_CURRENT : ImmLeaf<i32, [{ return Imm == 4; }]>;
+
// BYTE_imm - Transform bit immediates into byte immediates.
def BYTE_imm : SDNodeXForm<imm, [{
// Transformation function: imm >> 3
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
index 2461773..0d3afc4 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -28,6 +28,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -36,11 +37,13 @@
#include "llvm/Target/TargetOptions.h"
#include <limits>
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-instr-info"
+
#define GET_INSTRINFO_CTOR_DTOR
#include "X86GenInstrInfo.inc"
-using namespace llvm;
-
static cl::opt<bool>
NoFusing("disable-spill-fusing",
cl::desc("Disable fusing of spill code into instructions"));
@@ -95,14 +98,11 @@ struct X86OpTblEntry {
// Pin the vtable to this file.
void X86InstrInfo::anchor() {}
-X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
- : X86GenInstrInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
- ? X86::ADJCALLSTACKDOWN64
- : X86::ADJCALLSTACKDOWN32),
- (tm.getSubtarget<X86Subtarget>().is64Bit()
- ? X86::ADJCALLSTACKUP64
- : X86::ADJCALLSTACKUP32)),
- TM(tm), RI(tm) {
+X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
+ : X86GenInstrInfo(
+ (STI.is64Bit() ? X86::ADJCALLSTACKDOWN64 : X86::ADJCALLSTACKDOWN32),
+ (STI.is64Bit() ? X86::ADJCALLSTACKUP64 : X86::ADJCALLSTACKUP32)),
+ Subtarget(STI), RI(STI) {
static const X86OpTblEntry OpTbl2Addr[] = {
{ X86::ADC32ri, X86::ADC32mi, 0 },
@@ -605,6 +605,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVDQA64rr, X86::VMOVDQA64rm, TB_ALIGN_64 },
{ X86::VMOVDQU32rr, X86::VMOVDQU32rm, 0 },
{ X86::VMOVDQU64rr, X86::VMOVDQU64rm, 0 },
+ { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
+ { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
// AES foldable instructions
{ X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 },
@@ -1210,8 +1212,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::PEXT64rr, X86::PEXT64rm, 0 },
// AVX-512 foldable instructions
- { X86::VPADDDZrr, X86::VPADDDZrm, 0 },
- { X86::VPADDQZrr, X86::VPADDQZrm, 0 },
{ X86::VADDPSZrr, X86::VADDPSZrm, 0 },
{ X86::VADDPDZrr, X86::VADDPDZrm, 0 },
{ X86::VSUBPSZrr, X86::VSUBPSZrm, 0 },
@@ -1224,17 +1224,31 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMINPDZrr, X86::VMINPDZrm, 0 },
{ X86::VMAXPSZrr, X86::VMAXPSZrm, 0 },
{ X86::VMAXPDZrr, X86::VMAXPDZrm, 0 },
+ { X86::VPADDDZrr, X86::VPADDDZrm, 0 },
+ { X86::VPADDQZrr, X86::VPADDQZrm, 0 },
{ X86::VPERMPDZri, X86::VPERMPDZmi, 0 },
{ X86::VPERMPSZrr, X86::VPERMPSZrm, 0 },
+ { X86::VPMAXSDZrr, X86::VPMAXSDZrm, 0 },
+ { X86::VPMAXSQZrr, X86::VPMAXSQZrm, 0 },
+ { X86::VPMAXUDZrr, X86::VPMAXUDZrm, 0 },
+ { X86::VPMAXUQZrr, X86::VPMAXUQZrm, 0 },
+ { X86::VPMINSDZrr, X86::VPMINSDZrm, 0 },
+ { X86::VPMINSQZrr, X86::VPMINSQZrm, 0 },
+ { X86::VPMINUDZrr, X86::VPMINUDZrm, 0 },
+ { X86::VPMINUQZrr, X86::VPMINUQZrm, 0 },
+ { X86::VPMULDQZrr, X86::VPMULDQZrm, 0 },
{ X86::VPSLLVDZrr, X86::VPSLLVDZrm, 0 },
{ X86::VPSLLVQZrr, X86::VPSLLVQZrm, 0 },
{ X86::VPSRAVDZrr, X86::VPSRAVDZrm, 0 },
{ X86::VPSRLVDZrr, X86::VPSRLVDZrm, 0 },
{ X86::VPSRLVQZrr, X86::VPSRLVQZrm, 0 },
+ { X86::VPSUBDZrr, X86::VPSUBDZrm, 0 },
+ { X86::VPSUBQZrr, X86::VPSUBQZrm, 0 },
{ X86::VSHUFPDZrri, X86::VSHUFPDZrmi, 0 },
{ X86::VSHUFPSZrri, X86::VSHUFPSZrmi, 0 },
{ X86::VALIGNQrri, X86::VALIGNQrmi, 0 },
{ X86::VALIGNDrri, X86::VALIGNDrmi, 0 },
+ { X86::VPMULUDQZrr, X86::VPMULUDQZrm, 0 },
// AES foldable instructions
{ X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 },
@@ -1268,119 +1282,111 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
static const X86OpTblEntry OpTbl3[] = {
// FMA foldable instructions
- { X86::VFMADDSSr231r, X86::VFMADDSSr231m, 0 },
- { X86::VFMADDSDr231r, X86::VFMADDSDr231m, 0 },
- { X86::VFMADDSSr132r, X86::VFMADDSSr132m, 0 },
- { X86::VFMADDSDr132r, X86::VFMADDSDr132m, 0 },
- { X86::VFMADDSSr213r, X86::VFMADDSSr213m, 0 },
- { X86::VFMADDSDr213r, X86::VFMADDSDr213m, 0 },
- { X86::VFMADDSSr213r_Int, X86::VFMADDSSr213m_Int, 0 },
- { X86::VFMADDSDr213r_Int, X86::VFMADDSDr213m_Int, 0 },
-
- { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_16 },
- { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_16 },
- { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_16 },
- { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_16 },
- { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_16 },
- { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_16 },
- { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_32 },
- { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_32 },
- { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_32 },
- { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_32 },
- { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_32 },
- { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_32 },
-
- { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, 0 },
- { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, 0 },
- { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, 0 },
- { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, 0 },
- { X86::VFNMADDSSr213r, X86::VFNMADDSSr213m, 0 },
- { X86::VFNMADDSDr213r, X86::VFNMADDSDr213m, 0 },
- { X86::VFNMADDSSr213r_Int, X86::VFNMADDSSr213m_Int, 0 },
- { X86::VFNMADDSDr213r_Int, X86::VFNMADDSDr213m_Int, 0 },
-
- { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_16 },
- { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_16 },
- { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_16 },
- { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_16 },
- { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_16 },
- { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_16 },
- { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_32 },
- { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_32 },
- { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_32 },
- { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_32 },
- { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_32 },
- { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_32 },
-
- { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, 0 },
- { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, 0 },
- { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, 0 },
- { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, 0 },
- { X86::VFMSUBSSr213r, X86::VFMSUBSSr213m, 0 },
- { X86::VFMSUBSDr213r, X86::VFMSUBSDr213m, 0 },
- { X86::VFMSUBSSr213r_Int, X86::VFMSUBSSr213m_Int, 0 },
- { X86::VFMSUBSDr213r_Int, X86::VFMSUBSDr213m_Int, 0 },
-
- { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_16 },
- { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_16 },
- { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_16 },
- { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_16 },
- { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_16 },
- { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_16 },
- { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_32 },
- { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_32 },
- { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_32 },
- { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_32 },
- { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_32 },
- { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_32 },
-
- { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, 0 },
- { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, 0 },
- { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr132m, 0 },
- { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr132m, 0 },
- { X86::VFNMSUBSSr213r, X86::VFNMSUBSSr213m, 0 },
- { X86::VFNMSUBSDr213r, X86::VFNMSUBSDr213m, 0 },
- { X86::VFNMSUBSSr213r_Int, X86::VFNMSUBSSr213m_Int, 0 },
- { X86::VFNMSUBSDr213r_Int, X86::VFNMSUBSDr213m_Int, 0 },
-
- { X86::VFNMSUBPSr231r, X86::VFNMSUBPSr231m, TB_ALIGN_16 },
- { X86::VFNMSUBPDr231r, X86::VFNMSUBPDr231m, TB_ALIGN_16 },
- { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr132m, TB_ALIGN_16 },
- { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr132m, TB_ALIGN_16 },
- { X86::VFNMSUBPSr213r, X86::VFNMSUBPSr213m, TB_ALIGN_16 },
- { X86::VFNMSUBPDr213r, X86::VFNMSUBPDr213m, TB_ALIGN_16 },
- { X86::VFNMSUBPSr231rY, X86::VFNMSUBPSr231mY, TB_ALIGN_32 },
- { X86::VFNMSUBPDr231rY, X86::VFNMSUBPDr231mY, TB_ALIGN_32 },
- { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr132mY, TB_ALIGN_32 },
- { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_32 },
- { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_32 },
- { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_32 },
-
- { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_16 },
- { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_16 },
- { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr132m, TB_ALIGN_16 },
- { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr132m, TB_ALIGN_16 },
- { X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr213m, TB_ALIGN_16 },
- { X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr213m, TB_ALIGN_16 },
- { X86::VFMADDSUBPSr231rY, X86::VFMADDSUBPSr231mY, TB_ALIGN_32 },
- { X86::VFMADDSUBPDr231rY, X86::VFMADDSUBPDr231mY, TB_ALIGN_32 },
- { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr132mY, TB_ALIGN_32 },
- { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_32 },
- { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_32 },
- { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_32 },
-
- { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_16 },
- { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_16 },
- { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr132m, TB_ALIGN_16 },
- { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr132m, TB_ALIGN_16 },
- { X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr213m, TB_ALIGN_16 },
- { X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr213m, TB_ALIGN_16 },
- { X86::VFMSUBADDPSr231rY, X86::VFMSUBADDPSr231mY, TB_ALIGN_32 },
- { X86::VFMSUBADDPDr231rY, X86::VFMSUBADDPDr231mY, TB_ALIGN_32 },
- { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr132mY, TB_ALIGN_32 },
- { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_32 },
- { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_32 },
- { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_32 },
+ { X86::VFMADDSSr231r, X86::VFMADDSSr231m, TB_ALIGN_NONE },
+ { X86::VFMADDSDr231r, X86::VFMADDSDr231m, TB_ALIGN_NONE },
+ { X86::VFMADDSSr132r, X86::VFMADDSSr132m, TB_ALIGN_NONE },
+ { X86::VFMADDSDr132r, X86::VFMADDSDr132m, TB_ALIGN_NONE },
+ { X86::VFMADDSSr213r, X86::VFMADDSSr213m, TB_ALIGN_NONE },
+ { X86::VFMADDSDr213r, X86::VFMADDSDr213m, TB_ALIGN_NONE },
+
+ { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_NONE },
+ { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_NONE },
+ { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_NONE },
+ { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_NONE },
+ { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_NONE },
+ { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_NONE },
+ { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_NONE },
+ { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_NONE },
+ { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_NONE },
+ { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_NONE },
+ { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_NONE },
+ { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_NONE },
+
+ { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, TB_ALIGN_NONE },
+ { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, TB_ALIGN_NONE },
+ { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, TB_ALIGN_NONE },
+ { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, TB_ALIGN_NONE },
+ { X86::VFNMADDSSr213r, X86::VFNMADDSSr213m, TB_ALIGN_NONE },
+ { X86::VFNMADDSDr213r, X86::VFNMADDSDr213m, TB_ALIGN_NONE },
+
+ { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_NONE },
+ { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_NONE },
+ { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_NONE },
+ { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_NONE },
+ { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_NONE },
+ { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_NONE },
+ { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_NONE },
+ { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_NONE },
+ { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_NONE },
+ { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_NONE },
+ { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_NONE },
+ { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_NONE },
+
+ { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, TB_ALIGN_NONE },
+ { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, TB_ALIGN_NONE },
+ { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, TB_ALIGN_NONE },
+ { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, TB_ALIGN_NONE },
+ { X86::VFMSUBSSr213r, X86::VFMSUBSSr213m, TB_ALIGN_NONE },
+ { X86::VFMSUBSDr213r, X86::VFMSUBSDr213m, TB_ALIGN_NONE },
+
+ { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_NONE },
+ { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_NONE },
+ { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_NONE },
+ { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_NONE },
+ { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_NONE },
+ { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_NONE },
+ { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_NONE },
+ { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_NONE },
+ { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_NONE },
+ { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_NONE },
+ { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_NONE },
+ { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_NONE },
+
+ { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, TB_ALIGN_NONE },
+ { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, TB_ALIGN_NONE },
+ { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr132m, TB_ALIGN_NONE },
+ { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr132m, TB_ALIGN_NONE },
+ { X86::VFNMSUBSSr213r, X86::VFNMSUBSSr213m, TB_ALIGN_NONE },
+ { X86::VFNMSUBSDr213r, X86::VFNMSUBSDr213m, TB_ALIGN_NONE },
+
+ { X86::VFNMSUBPSr231r, X86::VFNMSUBPSr231m, TB_ALIGN_NONE },
+ { X86::VFNMSUBPDr231r, X86::VFNMSUBPDr231m, TB_ALIGN_NONE },
+ { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr132m, TB_ALIGN_NONE },
+ { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr132m, TB_ALIGN_NONE },
+ { X86::VFNMSUBPSr213r, X86::VFNMSUBPSr213m, TB_ALIGN_NONE },
+ { X86::VFNMSUBPDr213r, X86::VFNMSUBPDr213m, TB_ALIGN_NONE },
+ { X86::VFNMSUBPSr231rY, X86::VFNMSUBPSr231mY, TB_ALIGN_NONE },
+ { X86::VFNMSUBPDr231rY, X86::VFNMSUBPDr231mY, TB_ALIGN_NONE },
+ { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr132mY, TB_ALIGN_NONE },
+ { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_NONE },
+ { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_NONE },
+ { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_NONE },
+
+ { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr132m, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr132m, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr213m, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr213m, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPSr231rY, X86::VFMADDSUBPSr231mY, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPDr231rY, X86::VFMADDSUBPDr231mY, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr132mY, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_NONE },
+ { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_NONE },
+
+ { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr132m, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr132m, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr213m, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr213m, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPSr231rY, X86::VFMSUBADDPSr231mY, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPDr231rY, X86::VFMSUBADDPDr231mY, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr132mY, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_NONE },
+ { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_NONE },
// FMA4 foldable patterns
{ X86::VFMADDSS4rr, X86::VFMADDSS4rm, 0 },
@@ -1420,6 +1426,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VPERMI2Qrr, X86::VPERMI2Qrm, 0 },
{ X86::VPERMI2PSrr, X86::VPERMI2PSrm, 0 },
{ X86::VPERMI2PDrr, X86::VPERMI2PDrm, 0 },
+ { X86::VBLENDMPDZrr, X86::VBLENDMPDZrm, 0 },
+ { X86::VBLENDMPSZrr, X86::VBLENDMPSZrm, 0 },
+ { X86::VPBLENDMDZrr, X86::VPBLENDMDZrm, 0 },
+ { X86::VPBLENDMQZrr, X86::VPBLENDMQZrm, 0 }
};
for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) {
@@ -1460,7 +1470,7 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
- if (!TM.getSubtarget<X86Subtarget>().is64Bit())
+ if (!Subtarget.is64Bit())
// It's not always legal to reference the low 8-bit of the larger
// register in 32-bit mode.
return false;
@@ -1501,12 +1511,14 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
/// operand and follow operands form a reference to the stack frame.
bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
int &FrameIndex) const {
- if (MI->getOperand(Op).isFI() && MI->getOperand(Op+1).isImm() &&
- MI->getOperand(Op+2).isReg() && MI->getOperand(Op+3).isImm() &&
- MI->getOperand(Op+1).getImm() == 1 &&
- MI->getOperand(Op+2).getReg() == 0 &&
- MI->getOperand(Op+3).getImm() == 0) {
- FrameIndex = MI->getOperand(Op).getIndex();
+ if (MI->getOperand(Op+X86::AddrBaseReg).isFI() &&
+ MI->getOperand(Op+X86::AddrScaleAmt).isImm() &&
+ MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
+ MI->getOperand(Op+X86::AddrDisp).isImm() &&
+ MI->getOperand(Op+X86::AddrScaleAmt).getImm() == 1 &&
+ MI->getOperand(Op+X86::AddrIndexReg).getReg() == 0 &&
+ MI->getOperand(Op+X86::AddrDisp).getImm() == 0) {
+ FrameIndex = MI->getOperand(Op+X86::AddrBaseReg).getIndex();
return true;
}
return false;
@@ -1536,8 +1548,8 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::VMOVDQAYrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
- case X86::VMOVDQA32rm:
- case X86::VMOVDQA64rm:
+ case X86::VMOVAPSZrm:
+ case X86::VMOVUPSZrm:
return true;
}
}
@@ -1563,6 +1575,8 @@ static bool isFrameStoreOpcode(int Opcode) {
case X86::VMOVAPSYmr:
case X86::VMOVAPDYmr:
case X86::VMOVDQAYmr:
+ case X86::VMOVUPSZmr:
+ case X86::VMOVAPSZmr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
@@ -1621,9 +1635,9 @@ static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
return false;
bool isPICBase = false;
- for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
- E = MRI.def_end(); I != E; ++I) {
- MachineInstr *DefMI = I.getOperand().getParent();
+ for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
+ E = MRI.def_instr_end(); I != E; ++I) {
+ MachineInstr *DefMI = &*I;
if (DefMI->getOpcode() != X86::MOVPC32r)
return false;
assert(!isPICBase && "More than one PIC base?");
@@ -1668,15 +1682,16 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::FsMOVAPSrm:
case X86::FsMOVAPDrm: {
// Loads from constant pools are trivially rematerializable.
- if (MI->getOperand(1).isReg() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
+ if (MI->getOperand(1+X86::AddrBaseReg).isReg() &&
+ MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
+ MI->getOperand(1+X86::AddrIndexReg).isReg() &&
+ MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
MI->isInvariantLoad(AA)) {
- unsigned BaseReg = MI->getOperand(1).getReg();
+ unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
if (BaseReg == 0 || BaseReg == X86::RIP)
return true;
// Allow re-materialization of PIC load.
- if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
+ if (!ReMatPICStubLoad && MI->getOperand(1+X86::AddrDisp).isGlobal())
return false;
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -1687,13 +1702,14 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::LEA32r:
case X86::LEA64r: {
- if (MI->getOperand(2).isImm() &&
- MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
- !MI->getOperand(4).isReg()) {
+ if (MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
+ MI->getOperand(1+X86::AddrIndexReg).isReg() &&
+ MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
+ !MI->getOperand(1+X86::AddrDisp).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
- if (!MI->getOperand(1).isReg())
+ if (!MI->getOperand(1+X86::AddrBaseReg).isReg())
return true;
- unsigned BaseReg = MI->getOperand(1).getReg();
+ unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
@@ -1710,12 +1726,8 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
return true;
}
-/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
-/// would clobber the EFLAGS condition register. Note the result may be
-/// conservative. If it cannot definitely determine the safety after visiting
-/// a few instructions in each direction it assumes it's not safe.
-static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) {
+bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
MachineBasicBlock::iterator E = MBB.end();
// For compile time consideration, if we are not able to determine the
@@ -1809,7 +1821,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MBB.insert(I, MI);
}
- MachineInstr *NewMI = prior(I);
+ MachineInstr *NewMI = std::prev(I);
NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
@@ -1936,7 +1948,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
unsigned Opc, leaInReg;
- if (TM.getSubtarget<X86Subtarget>().is64Bit()) {
+ if (Subtarget.is64Bit()) {
Opc = X86::LEA64_32r;
leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
} else {
@@ -1986,13 +1998,13 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
unsigned leaInReg2 = 0;
- MachineInstr *InsMI2 = 0;
+ MachineInstr *InsMI2 = nullptr;
if (Src == Src2) {
// ADD16rr %reg1028<kill>, %reg1028
// just a single insert_subreg.
addRegReg(MIB, leaInReg, true, leaInReg, false);
} else {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ if (Subtarget.is64Bit())
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
else
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
@@ -2050,29 +2062,29 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// convert them to equivalent lea if the condition code register def's
// are dead!
if (hasLiveCondCodeDef(MI))
- return 0;
+ return nullptr;
MachineFunction &MF = *MI->getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
const MachineOperand &Dest = MI->getOperand(0);
const MachineOperand &Src = MI->getOperand(1);
- MachineInstr *NewMI = NULL;
+ MachineInstr *NewMI = nullptr;
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
// we have better subtarget support, enable the 16-bit LEA generation here.
// 16-bit LEA is also slow on Core2.
bool DisableLEA16 = true;
- bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
+ bool is64Bit = Subtarget.is64Bit();
unsigned MIOpc = MI->getOpcode();
switch (MIOpc) {
case X86::SHUFPSrri: {
assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
- if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
+ if (!Subtarget.hasSSE2()) return nullptr;
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
- if (B != C) return 0;
+ if (B != C) return nullptr;
unsigned M = MI->getOperand(3).getImm();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
.addOperand(Dest).addOperand(Src).addImm(M);
@@ -2080,11 +2092,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::SHUFPDrri: {
assert(MI->getNumOperands() == 4 && "Unknown shufpd instruction!");
- if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
+ if (!Subtarget.hasSSE2()) return nullptr;
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
- if (B != C) return 0;
+ if (B != C) return nullptr;
unsigned M = MI->getOperand(3).getImm();
// Convert to PSHUFD mask.
@@ -2097,13 +2109,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::SHL64ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
- if (!isTruncatedShiftCountForLEA(ShAmt)) return 0;
+ if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
!MF.getRegInfo().constrainRegClass(Src.getReg(),
&X86::GR64_NOSPRegClass))
- return 0;
+ return nullptr;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
.addOperand(Dest)
@@ -2113,7 +2125,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::SHL32ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
- if (!isTruncatedShiftCountForLEA(ShAmt)) return 0;
+ if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
@@ -2123,7 +2135,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, isUndef, ImplicitOp))
- return 0;
+ return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest)
@@ -2139,10 +2151,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::SHL16ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
- if (!isTruncatedShiftCountForLEA(ShAmt)) return 0;
+ if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addOperand(Dest)
.addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
@@ -2151,7 +2163,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
default: {
switch (MIOpc) {
- default: return 0;
+ default: return nullptr;
case X86::INC64r:
case X86::INC32r:
case X86::INC64_32r: {
@@ -2163,7 +2175,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, isUndef, ImplicitOp))
- return 0;
+ return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest)
@@ -2177,7 +2189,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::INC16r:
case X86::INC64_16r:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addOperand(Dest).addOperand(Src), 1);
@@ -2194,7 +2207,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, isUndef, ImplicitOp))
- return 0;
+ return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest)
@@ -2209,7 +2222,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::DEC16r:
case X86::DEC64_16r:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addOperand(Dest).addOperand(Src), -1);
@@ -2230,7 +2244,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, isUndef, ImplicitOp))
- return 0;
+ return nullptr;
const MachineOperand &Src2 = MI->getOperand(2);
bool isKill2, isUndef2;
@@ -2238,7 +2252,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
SrcReg2, isKill2, isUndef2, ImplicitOp2))
- return 0;
+ return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest);
@@ -2260,7 +2274,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD16rr:
case X86::ADD16rr_DB: {
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
@@ -2299,7 +2314,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, isUndef, ImplicitOp))
- return 0;
+ return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addOperand(Dest)
@@ -2315,7 +2330,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ : nullptr;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addOperand(Dest).addOperand(Src),
@@ -2325,7 +2341,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
}
- if (!NewMI) return 0;
+ if (!NewMI) return nullptr;
if (LV) { // Update live variables
if (Src.isKill())
@@ -2452,6 +2468,41 @@ X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
}
}
+bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const {
+ switch (MI->getOpcode()) {
+ case X86::VFMADDPDr231r:
+ case X86::VFMADDPSr231r:
+ case X86::VFMADDSDr231r:
+ case X86::VFMADDSSr231r:
+ case X86::VFMSUBPDr231r:
+ case X86::VFMSUBPSr231r:
+ case X86::VFMSUBSDr231r:
+ case X86::VFMSUBSSr231r:
+ case X86::VFNMADDPDr231r:
+ case X86::VFNMADDPSr231r:
+ case X86::VFNMADDSDr231r:
+ case X86::VFNMADDSSr231r:
+ case X86::VFNMSUBPDr231r:
+ case X86::VFNMSUBPSr231r:
+ case X86::VFNMSUBSDr231r:
+ case X86::VFNMSUBSSr231r:
+ case X86::VFMADDPDr231rY:
+ case X86::VFMADDPSr231rY:
+ case X86::VFMSUBPDr231rY:
+ case X86::VFMSUBPSr231rY:
+ case X86::VFNMADDPDr231rY:
+ case X86::VFNMADDPSr231rY:
+ case X86::VFNMSUBPDr231rY:
+ case X86::VFNMSUBPSr231rY:
+ SrcOpIdx1 = 2;
+ SrcOpIdx2 = 3;
+ return true;
+ default:
+ return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+ }
+}
+
static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) {
switch (BrOpc) {
default: return X86::COND_INVALID;
@@ -2619,8 +2670,7 @@ static X86::CondCode getSwappedCondition(X86::CondCode CC) {
/// getSETFromCond - Return a set opcode for the given condition and
/// whether it has memory operand.
-static unsigned getSETFromCond(X86::CondCode CC,
- bool HasMemoryOperand) {
+unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
static const uint16_t Opc[16][2] = {
{ X86::SETAr, X86::SETAm },
{ X86::SETAEr, X86::SETAEm },
@@ -2640,14 +2690,14 @@ static unsigned getSETFromCond(X86::CondCode CC,
{ X86::SETSr, X86::SETSm }
};
- assert(CC < 16 && "Can only handle standard cond codes");
+ assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes");
return Opc[CC][HasMemoryOperand ? 1 : 0];
}
/// getCMovFromCond - Return a cmov opcode for the given condition,
/// register size in bytes, and operand type.
-static unsigned getCMovFromCond(X86::CondCode CC, unsigned RegBytes,
- bool HasMemoryOperand) {
+unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
+ bool HasMemoryOperand) {
static const uint16_t Opc[32][3] = {
{ X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr },
{ X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
@@ -2738,15 +2788,15 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// If the block has any instructions after a JMP, delete them.
- while (llvm::next(I) != MBB.end())
- llvm::next(I)->eraseFromParent();
+ while (std::next(I) != MBB.end())
+ std::next(I)->eraseFromParent();
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- TBB = 0;
+ TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
UnCondBrIter = MBB.end();
@@ -2923,7 +2973,7 @@ canInsertSelect(const MachineBasicBlock &MBB,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
// Not all subtargets have cmov instructions.
- if (!TM.getSubtarget<X86Subtarget>().hasCMov())
+ if (!Subtarget.hasCMov())
return false;
if (Cond.size() != 1)
return false;
@@ -2974,8 +3024,7 @@ static bool isHReg(unsigned Reg) {
// Try and copy between VR128/VR64 and GR64 registers.
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
- const X86Subtarget& Subtarget) {
-
+ const X86Subtarget &Subtarget) {
// SrcReg(VR128) -> DestReg(GR64)
// SrcReg(VR64) -> DestReg(GR64)
@@ -3015,6 +3064,11 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
return 0;
}
+inline static bool MaskRegClassContains(unsigned Reg) {
+ return X86::VK8RegClass.contains(Reg) ||
+ X86::VK16RegClass.contains(Reg) ||
+ X86::VK1RegClass.contains(Reg);
+}
static
unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg) {
if (X86::VR128XRegClass.contains(DestReg, SrcReg) ||
@@ -3024,11 +3078,23 @@ unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg) {
SrcReg = get512BitSuperRegister(SrcReg);
return X86::VMOVAPSZrr;
}
- if ((X86::VK8RegClass.contains(DestReg) ||
- X86::VK16RegClass.contains(DestReg)) &&
- (X86::VK8RegClass.contains(SrcReg) ||
- X86::VK16RegClass.contains(SrcReg)))
+ if (MaskRegClassContains(DestReg) &&
+ MaskRegClassContains(SrcReg))
return X86::KMOVWkk;
+ if (MaskRegClassContains(DestReg) &&
+ (X86::GR32RegClass.contains(SrcReg) ||
+ X86::GR16RegClass.contains(SrcReg) ||
+ X86::GR8RegClass.contains(SrcReg))) {
+ SrcReg = getX86SubSuperRegister(SrcReg, MVT::i32);
+ return X86::KMOVWkr;
+ }
+ if ((X86::GR32RegClass.contains(DestReg) ||
+ X86::GR16RegClass.contains(DestReg) ||
+ X86::GR8RegClass.contains(DestReg)) &&
+ MaskRegClassContains(SrcReg)) {
+ DestReg = getX86SubSuperRegister(DestReg, MVT::i32);
+ return X86::KMOVWrk;
+ }
return 0;
}
@@ -3037,8 +3103,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
// First deal with the normal symmetric copies.
- bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
- bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
+ bool HasAVX = Subtarget.hasAVX();
+ bool HasAVX512 = Subtarget.hasAVX512();
unsigned Opc = 0;
if (X86::GR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV64rr;
@@ -3050,7 +3116,7 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
if ((isHReg(DestReg) || isHReg(SrcReg)) &&
- TM.getSubtarget<X86Subtarget>().is64Bit()) {
+ Subtarget.is64Bit()) {
Opc = X86::MOV8rr_NOREX;
// Both operands must be encodable without an REX prefix.
assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
@@ -3067,7 +3133,7 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
else if (X86::VR256RegClass.contains(DestReg, SrcReg))
Opc = X86::VMOVAPSYrr;
if (!Opc)
- Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, TM.getSubtarget<X86Subtarget>());
+ Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
if (Opc) {
BuildMI(MBB, MI, DL, get(Opc), DestReg)
@@ -3113,9 +3179,9 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
static unsigned getLoadStoreRegOpcode(unsigned Reg,
const TargetRegisterClass *RC,
bool isStackAligned,
- const TargetMachine &TM,
+ const X86Subtarget &STI,
bool load) {
- if (TM.getSubtarget<X86Subtarget>().hasAVX512()) {
+ if (STI.hasAVX512()) {
if (X86::VK8RegClass.hasSubClassEq(RC) ||
X86::VK16RegClass.hasSubClassEq(RC))
return load ? X86::KMOVWkm : X86::KMOVWmk;
@@ -3127,13 +3193,13 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
}
- bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ bool HasAVX = STI.hasAVX();
switch (RC->getSize()) {
default:
llvm_unreachable("Unknown spill size");
case 1:
assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ if (STI.is64Bit())
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
@@ -3200,16 +3266,16 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
static unsigned getStoreRegOpcode(unsigned SrcReg,
const TargetRegisterClass *RC,
bool isStackAligned,
- TargetMachine &TM) {
- return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
+ const X86Subtarget &STI) {
+ return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false);
}
static unsigned getLoadRegOpcode(unsigned DestReg,
const TargetRegisterClass *RC,
bool isStackAligned,
- const TargetMachine &TM) {
- return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
+ const X86Subtarget &STI) {
+ return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -3221,9 +3287,10 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
"Stack slot too small for store");
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
- bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) ||
- RI.canRealignStack(MF);
- unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
+ bool isAligned =
+ (MF.getTarget().getFrameLowering()->getStackAlignment() >= Alignment) ||
+ RI.canRealignStack(MF);
+ unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
.addReg(SrcReg, getKillRegState(isKill));
@@ -3239,7 +3306,7 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
- unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
+ unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
@@ -3257,9 +3324,10 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
- bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) ||
- RI.canRealignStack(MF);
- unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
+ bool isAligned =
+ (MF.getTarget().getFrameLowering()->getStackAlignment() >= Alignment) ||
+ RI.canRealignStack(MF);
+ unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
@@ -3273,7 +3341,7 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
- unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
+ unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
@@ -3485,6 +3553,26 @@ inline static bool isDefConvertible(MachineInstr *MI) {
}
}
+/// isUseDefConvertible - check whether the use can be converted
+/// to remove a comparison against zero.
+static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ default: return X86::COND_INVALID;
+ case X86::LZCNT16rr: case X86::LZCNT16rm:
+ case X86::LZCNT32rr: case X86::LZCNT32rm:
+ case X86::LZCNT64rr: case X86::LZCNT64rm:
+ return X86::COND_B;
+ case X86::POPCNT16rr:case X86::POPCNT16rm:
+ case X86::POPCNT32rr:case X86::POPCNT32rm:
+ case X86::POPCNT64rr:case X86::POPCNT64rm:
+ return X86::COND_E;
+ case X86::TZCNT16rr: case X86::TZCNT16rm:
+ case X86::TZCNT32rr: case X86::TZCNT32rm:
+ case X86::TZCNT64rr: case X86::TZCNT64rm:
+ return X86::COND_B;
+ }
+}
+
/// optimizeCompareInstr - Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
@@ -3551,13 +3639,38 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If we are comparing against zero, check whether we can use MI to update
// EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0);
- if (IsCmpZero && (MI->getParent() != CmpInstr->getParent() ||
- !isDefConvertible(MI)))
+ if (IsCmpZero && MI->getParent() != CmpInstr->getParent())
return false;
+ // If we have a use of the source register between the def and our compare
+ // instruction we can eliminate the compare iff the use sets EFLAGS in the
+ // right way.
+ bool ShouldUpdateCC = false;
+ X86::CondCode NewCC = X86::COND_INVALID;
+ if (IsCmpZero && !isDefConvertible(MI)) {
+ // Scan forward from the use until we hit the use we're looking for or the
+ // compare instruction.
+ for (MachineBasicBlock::iterator J = MI;; ++J) {
+ // Do we have a convertible instruction?
+ NewCC = isUseDefConvertible(J);
+ if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
+ J->getOperand(1).getReg() == SrcReg) {
+ assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
+ ShouldUpdateCC = true; // Update CC later on.
+ // This is not a def of SrcReg, but still a def of EFLAGS. Keep going
+ // with the new def.
+ MI = Def = J;
+ break;
+ }
+
+ if (J == I)
+ return false;
+ }
+ }
+
// We are searching for an earlier instruction that can make CmpInstr
// redundant and that instruction will be saved in Sub.
- MachineInstr *Sub = NULL;
+ MachineInstr *Sub = nullptr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
// We iterate backward, starting from the instruction before CmpInstr and
@@ -3570,7 +3683,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
RE = CmpInstr->getParent() == MI->getParent() ?
MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ :
CmpInstr->getParent()->rend();
- MachineInstr *Movr0Inst = 0;
+ MachineInstr *Movr0Inst = nullptr;
for (; RI != RE; ++RI) {
MachineInstr *Instr = &*RI;
// Check whether CmpInstr can be made redundant by the current instruction.
@@ -3626,7 +3739,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
continue;
// EFLAGS is used by this instruction.
- X86::CondCode OldCC;
+ X86::CondCode OldCC = X86::COND_INVALID;
bool OpcIsSET = false;
if (IsCmpZero || IsSwapped) {
// We decode the condition code from opcode.
@@ -3652,13 +3765,28 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// CF and OF are used, we can't perform this optimization.
return false;
}
+
+ // If we're updating the condition code check if we have to reverse the
+ // condition.
+ if (ShouldUpdateCC)
+ switch (OldCC) {
+ default:
+ return false;
+ case X86::COND_E:
+ break;
+ case X86::COND_NE:
+ NewCC = GetOppositeBranchCondition(NewCC);
+ break;
+ }
} else if (IsSwapped) {
// If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
// to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
// We swap the condition code and synthesize the new opcode.
- X86::CondCode NewCC = getSwappedCondition(OldCC);
+ NewCC = getSwappedCondition(OldCC);
if (NewCC == X86::COND_INVALID) return false;
+ }
+ if ((ShouldUpdateCC || IsSwapped) && NewCC != OldCC) {
// Synthesize the new opcode.
bool HasMemoryOperand = Instr.hasOneMemOperand();
unsigned NewOpc;
@@ -3745,19 +3873,19 @@ optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
if (FoldAsLoadDefReg == 0)
- return 0;
+ return nullptr;
// To be conservative, if there exists another load, clear the load candidate.
if (MI->mayLoad()) {
FoldAsLoadDefReg = 0;
- return 0;
+ return nullptr;
}
// Check whether we can move DefMI here.
DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
assert(DefMI);
bool SawStore = false;
- if (!DefMI->isSafeToMove(this, 0, SawStore))
- return 0;
+ if (!DefMI->isSafeToMove(this, nullptr, SawStore))
+ return nullptr;
// We try to commute MI if possible.
unsigned IdxEnd = (MI->isCommutable()) ? 2 : 1;
@@ -3774,12 +3902,12 @@ optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI,
continue;
// Do not fold if we have a subreg use or a def or multiple uses.
if (MO.getSubReg() || MO.isDef() || FoundSrcOperand)
- return 0;
+ return nullptr;
SrcOperandId = i;
FoundSrcOperand = true;
}
- if (!FoundSrcOperand) return 0;
+ if (!FoundSrcOperand) return nullptr;
// Check whether we can fold the def into SrcOperandId.
SmallVector<unsigned, 8> Ops;
@@ -3793,22 +3921,22 @@ optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI,
if (Idx == 1) {
// MI was changed but it didn't help, commute it back!
commuteInstruction(MI, false);
- return 0;
+ return nullptr;
}
// Check whether we can commute MI and enable folding.
if (MI->isCommutable()) {
MachineInstr *NewMI = commuteInstruction(MI, false);
// Unable to commute.
- if (!NewMI) return 0;
+ if (!NewMI) return nullptr;
if (NewMI != MI) {
// New instruction. It doesn't need to be kept.
NewMI->eraseFromParent();
- return 0;
+ return nullptr;
}
}
}
- return 0;
+ return nullptr;
}
/// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr
@@ -3834,9 +3962,11 @@ static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
}
bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ bool HasAVX = Subtarget.hasAVX();
MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
switch (MI->getOpcode()) {
+ case X86::MOV32r0:
+ return Expand2AddrUndef(MIB, get(X86::XOR32rr));
case X86::SETB_C8r:
return Expand2AddrUndef(MIB, get(X86::SBB8rr));
case X86::SETB_C16r:
@@ -3861,6 +3991,7 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::TEST8ri_NOREX:
MI->setDesc(get(X86::TEST8ri));
return true;
+ case X86::KSET0B:
case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr));
case X86::KSET1B:
case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr));
@@ -3940,15 +4071,16 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI, unsigned i,
const SmallVectorImpl<MachineOperand> &MOs,
unsigned Size, unsigned Align) const {
- const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
- bool isCallRegIndirect = TM.getSubtarget<X86Subtarget>().callRegIndirect();
+ const DenseMap<unsigned,
+ std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
+ bool isCallRegIndirect = Subtarget.callRegIndirect();
bool isTwoAddrFold = false;
// Atom favors register form of call. So, we do not fold loads into calls
// when X86Subtarget is Atom.
if (isCallRegIndirect &&
(MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r)) {
- return NULL;
+ return nullptr;
}
unsigned NumOps = MI->getDesc().getNumOperands();
@@ -3959,9 +4091,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
if (MI->getOpcode() == X86::ADD32ri &&
MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
- return NULL;
+ return nullptr;
- MachineInstr *NewMI = NULL;
+ MachineInstr *NewMI = nullptr;
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
@@ -3996,7 +4128,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
unsigned Opcode = I->second.first;
unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
if (Align < MinAlign)
- return NULL;
+ return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize();
@@ -4004,12 +4136,12 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
- return NULL;
+ return nullptr;
// If this is a 64-bit load, but the spill slot is 32, then we can do
// a 32-bit load which is implicitly zero-extended. This likely is due
// to liveintervalanalysis remat'ing a load from stack slot.
if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
- return NULL;
+ return nullptr;
Opcode = X86::MOV32rm;
NarrowToMOV32rm = true;
}
@@ -4038,7 +4170,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// No fusion
if (PrintFailedFusing && !MI->isCopy())
dbgs() << "We failed to fuse operand " << i << " in " << *MI;
- return NULL;
+ return nullptr;
}
/// hasPartialRegUpdate - Return true for all instructions that only update
@@ -4182,7 +4314,7 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
if (X86::VR128RegClass.contains(Reg)) {
// These instructions are all floating point domain, so xorps is the best
// choice.
- bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ bool HasAVX = Subtarget.hasAVX();
unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
@@ -4198,84 +4330,19 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true);
}
-static MachineInstr* foldPatchpoint(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex,
- const TargetInstrInfo &TII) {
- unsigned StartIdx = 0;
- switch (MI->getOpcode()) {
- case TargetOpcode::STACKMAP:
- StartIdx = 2; // Skip ID, nShadowBytes.
- break;
- case TargetOpcode::PATCHPOINT: {
- // For PatchPoint, the call args are not foldable.
- PatchPointOpers opers(MI);
- StartIdx = opers.getVarIdx();
- break;
- }
- default:
- llvm_unreachable("unexpected stackmap opcode");
- }
-
- // Return false if any operands requested for folding are not foldable (not
- // part of the stackmap's live values).
- for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
- I != E; ++I) {
- if (*I < StartIdx)
- return 0;
- }
-
- MachineInstr *NewMI =
- MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
- MachineInstrBuilder MIB(MF, NewMI);
-
- // No need to fold return, the meta data, and function arguments
- for (unsigned i = 0; i < StartIdx; ++i)
- MIB.addOperand(MI->getOperand(i));
-
- for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
- assert(MO.getReg() && "patchpoint can only fold a vreg operand");
- // Compute the spill slot size and offset.
- const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(MO.getReg());
- unsigned SpillSize;
- unsigned SpillOffset;
- bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
- SpillOffset, &MF.getTarget());
- if (!Valid)
- report_fatal_error("cannot spill patchpoint subregister operand");
-
- MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
- MIB.addOperand(MachineOperand::CreateImm(SpillSize));
- MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
- addOffset(MIB, SpillOffset);
- }
- else
- MIB.addOperand(MO);
- }
- return NewMI;
-}
-
MachineInstr*
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
- // Special case stack map and patch point intrinsics.
- if (MI->getOpcode() == TargetOpcode::STACKMAP
- || MI->getOpcode() == TargetOpcode::PATCHPOINT) {
- return foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
- }
// Check switch flag
- if (NoFusing) return NULL;
+ if (NoFusing) return nullptr;
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
if (!MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
- return 0;
+ return nullptr;
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex);
@@ -4283,12 +4350,13 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
// If the function stack isn't realigned we don't want to fold instructions
// that need increased alignment.
if (!RI.needsStackRealignment(MF))
- Alignment = std::min(Alignment, TM.getFrameLowering()->getStackAlignment());
+ Alignment = std::min(
+ Alignment, MF.getTarget().getFrameLowering()->getStackAlignment());
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
switch (MI->getOpcode()) {
- default: return NULL;
+ default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
@@ -4297,12 +4365,12 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
if (Size < RCSize)
- return NULL;
+ return nullptr;
// Change to CMPXXri r, 0 first.
MI->setDesc(get(NewOpc));
MI->getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
- return NULL;
+ return nullptr;
SmallVector<MachineOperand,4> MOs;
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
@@ -4320,14 +4388,14 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
// Check switch flag
- if (NoFusing) return NULL;
+ if (NoFusing) return nullptr;
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
if (!MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
- return 0;
+ return nullptr;
// Determine the alignment of the load.
unsigned Alignment = 0;
@@ -4350,12 +4418,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Alignment = 4;
break;
default:
- return 0;
+ return nullptr;
}
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
switch (MI->getOpcode()) {
- default: return NULL;
+ default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
@@ -4365,12 +4433,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MI->setDesc(get(NewOpc));
MI->getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
- return NULL;
+ return nullptr;
// Make sure the subregisters match.
// Otherwise we risk changing the size of the load.
if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
- return NULL;
+ return nullptr;
SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
switch (LoadMI->getOpcode()) {
@@ -4384,21 +4452,21 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Create a constant-pool entry and operands to load from it.
// Medium and large mode can't fold loads this way.
- if (TM.getCodeModel() != CodeModel::Small &&
- TM.getCodeModel() != CodeModel::Kernel)
- return NULL;
+ if (MF.getTarget().getCodeModel() != CodeModel::Small &&
+ MF.getTarget().getCodeModel() != CodeModel::Kernel)
+ return nullptr;
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
- if (TM.getRelocationModel() == Reloc::PIC_) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ if (MF.getTarget().getRelocationModel() == Reloc::PIC_) {
+ if (Subtarget.is64Bit())
PICBase = X86::RIP;
else
// FIXME: PICBase = getGlobalBaseReg(&MF);
// This doesn't work for several reasons.
// 1. GlobalBaseReg may have been spilled.
// 2. It may not be live at MI.
- return NULL;
+ return nullptr;
}
// Create a constant-pool entry.
@@ -4434,14 +4502,14 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
> 4)
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes).
- return NULL;
+ return nullptr;
if ((LoadMI->getOpcode() == X86::MOVSDrm ||
LoadMI->getOpcode() == X86::VMOVSDrm) &&
MF.getRegInfo().getRegClass(LoadMI->getOperand(0).getReg())->getSize()
> 8)
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes).
- return NULL;
+ return nullptr;
// Folding a normal load. Just copy the load's address operands.
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
@@ -4487,7 +4555,8 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
- const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
+ const DenseMap<unsigned,
+ std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
} else if (OpNum == 0) { // If operand 0
@@ -4530,7 +4599,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
if (!MI->hasOneMemOperand() &&
RC == &X86::VR128RegClass &&
- !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ !Subtarget.isUnalignedMemAccessFast())
// Without memoperands, loadRegFromAddr and storeRegToStackSlot will
// conservatively assume the address is unaligned. That's bad for
// performance.
@@ -4669,7 +4738,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
AddrOps.push_back(Chain);
// Emit the load instruction.
- SDNode *Load = 0;
+ SDNode *Load = nullptr;
if (FoldedLoad) {
EVT VT = *RC->vt_begin();
std::pair<MachineInstr::mmo_iterator,
@@ -4678,13 +4747,13 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
cast<MachineSDNode>(N)->memoperands_end());
if (!(*MMOs.first) &&
RC == &X86::VR128RegClass &&
- !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ !Subtarget.isUnalignedMemAccessFast())
// Do not introduce a slow unaligned load.
return false;
unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
- Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
+ Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl,
VT, MVT::Other, AddrOps);
NewNodes.push_back(Load);
@@ -4694,7 +4763,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
// Emit the data processing instruction.
std::vector<EVT> VTs;
- const TargetRegisterClass *DstRC = 0;
+ const TargetRegisterClass *DstRC = nullptr;
if (MCID.getNumDefs() > 0) {
DstRC = getRegClass(MCID, 0, &RI, MF);
VTs.push_back(*DstRC->vt_begin());
@@ -4721,15 +4790,15 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
cast<MachineSDNode>(N)->memoperands_end());
if (!(*MMOs.first) &&
RC == &X86::VR128RegClass &&
- !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ !Subtarget.isUnalignedMemAccessFast())
// Do not introduce a slow unaligned store.
return false;
unsigned Alignment = RC->getSize() == 32 ? 32 : 16;
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
- SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC,
- isAligned, TM),
- dl, MVT::Other, AddrOps);
+ SDNode *Store =
+ DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
+ dl, MVT::Other, AddrOps);
NewNodes.push_back(Store);
// Preserve memory reference information.
@@ -4890,7 +4959,7 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
default:
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
// have 16 of them to play with.
- if (TM.getSubtargetImpl()->is64Bit()) {
+ if (Subtarget.is64Bit()) {
if (NumLoads >= 3)
return false;
} else if (NumLoads) {
@@ -4916,7 +4985,7 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
// Check if this processor supports macro-fusion. Since this is a minor
// heuristic, we haven't specifically reserved a feature. hasAVX is a decent
// proxy for SandyBridge+.
- if (!TM.getSubtarget<X86Subtarget>().hasAVX())
+ if (!Subtarget.hasAVX())
return false;
enum {
@@ -4968,6 +5037,7 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
case X86::TEST16rm:
case X86::TEST32rm:
case X86::TEST64rm:
+ case X86::TEST8ri_NOREX:
case X86::AND16i16:
case X86::AND16ri:
case X86::AND16ri8:
@@ -5098,7 +5168,7 @@ isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
///
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
- assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
+ assert(!Subtarget.is64Bit() &&
"X86-64 PIC uses RIP relative addressing");
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
@@ -5172,7 +5242,13 @@ static const uint16_t ReplaceableInstrsAVX2[][3] = {
{ X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
{ X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
{ X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
- { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }
+ { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
+ { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
+ { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
+ { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
+ { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
+ { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
+ { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm}
};
// FIXME: Some shuffle and unpack instructions have equivalents in different
@@ -5182,20 +5258,20 @@ static const uint16_t *lookup(unsigned opcode, unsigned domain) {
for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
if (ReplaceableInstrs[i][domain-1] == opcode)
return ReplaceableInstrs[i];
- return 0;
+ return nullptr;
}
static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) {
for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i)
if (ReplaceableInstrsAVX2[i][domain-1] == opcode)
return ReplaceableInstrsAVX2[i];
- return 0;
+ return nullptr;
}
std::pair<uint16_t, uint16_t>
X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const {
uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
- bool hasAVX2 = TM.getSubtarget<X86Subtarget>().hasAVX2();
+ bool hasAVX2 = Subtarget.hasAVX2();
uint16_t validDomains = 0;
if (domain && lookup(MI->getOpcode(), domain))
validDomains = 0xe;
@@ -5210,7 +5286,7 @@ void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
assert(dom && "Not an SSE instruction");
const uint16_t *table = lookup(MI->getOpcode(), dom);
if (!table) { // try the other table
- assert((TM.getSubtarget<X86Subtarget>().hasAVX2() || Domain < 3) &&
+ assert((Subtarget.hasAVX2() || Domain < 3) &&
"256-bit vector operations only available in AVX2");
table = lookupAVX2(MI->getOpcode(), dom);
}
@@ -5223,6 +5299,16 @@ void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
NopInst.setOpcode(X86::NOOP);
}
+void X86InstrInfo::getUnconditionalBranch(
+ MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
+ Branch.setOpcode(X86::JMP_4);
+ Branch.addOperand(MCOperand::CreateExpr(BranchTarget));
+}
+
+void X86InstrInfo::getTrap(MCInst &MI) const {
+ MI.setOpcode(X86::TRAP);
+}
+
bool X86InstrInfo::isHighLatencyDef(int opc) const {
switch (opc) {
default: return false;
@@ -5315,12 +5401,14 @@ namespace {
static char ID;
CGBR() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
- assert(!TM->getSubtarget<X86Subtarget>().is64Bit() &&
- "X86-64 PIC uses RIP relative addressing");
+ // Don't do anything if this is 64-bit as 64-bit PIC
+ // uses RIP relative addressing.
+ if (TM->getSubtarget<X86Subtarget>().is64Bit())
+ return false;
// Only emit a global base reg in PIC mode.
if (TM->getRelocationModel() != Reloc::PIC_)
@@ -5362,11 +5450,11 @@ namespace {
return true;
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "X86 PIC Global Base Reg Initialization";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -5375,14 +5463,14 @@ namespace {
char CGBR::ID = 0;
FunctionPass*
-llvm::createGlobalBaseRegPass() { return new CGBR(); }
+llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
namespace {
struct LDTLSCleanup : public MachineFunctionPass {
static char ID;
LDTLSCleanup() : MachineFunctionPass(ID) {}
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
@@ -5475,11 +5563,11 @@ namespace {
return Copy;
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Local Dynamic TLS Access Clean-up";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
index 600e392..c177e3a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
@@ -14,7 +14,7 @@
#ifndef X86INSTRUCTIONINFO_H
#define X86INSTRUCTIONINFO_H
-#include "X86.h"
+#include "MCTargetDesc/X86BaseInfo.h"
#include "X86RegisterInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -24,7 +24,7 @@
namespace llvm {
class X86RegisterInfo;
- class X86TargetMachine;
+ class X86Subtarget;
namespace X86 {
// X86 specific condition code. These correspond to X86_*_COND in
@@ -46,6 +46,7 @@ namespace X86 {
COND_O = 13,
COND_P = 14,
COND_S = 15,
+ LAST_VALID_COND = COND_S,
// Artificial condition codes. These are used by AnalyzeBranch
// to indicate a block terminated with two conditional branches to
@@ -61,12 +62,21 @@ namespace X86 {
// Turn condition code into conditional branch opcode.
unsigned GetCondBranchFromCond(CondCode CC);
+ /// \brief Return a set opcode for the given condition and whether it has
+ /// a memory operand.
+ unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
+
+ /// \brief Return a cmov opcode for the given condition, register size in
+ /// bytes, and operand type.
+ unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
+ bool HasMemoryOperand = false);
+
// Turn CMov opcode into condition code.
CondCode getCondFromCMovOpc(unsigned Opc);
/// GetOppositeBranchCondition - Return the inverse of the specified cond,
/// e.g. turning COND_E to COND_NE.
- CondCode GetOppositeBranchCondition(X86::CondCode CC);
+ CondCode GetOppositeBranchCondition(CondCode CC);
} // end namespace X86;
@@ -111,24 +121,25 @@ inline static bool isScale(const MachineOperand &MO) {
inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
if (MI->getOperand(Op).isFI()) return true;
- return Op+4 <= MI->getNumOperands() &&
- MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) &&
- MI->getOperand(Op+2).isReg() &&
- (MI->getOperand(Op+3).isImm() ||
- MI->getOperand(Op+3).isGlobal() ||
- MI->getOperand(Op+3).isCPI() ||
- MI->getOperand(Op+3).isJTI());
+ return Op+X86::AddrSegmentReg <= MI->getNumOperands() &&
+ MI->getOperand(Op+X86::AddrBaseReg).isReg() &&
+ isScale(MI->getOperand(Op+X86::AddrScaleAmt)) &&
+ MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
+ (MI->getOperand(Op+X86::AddrDisp).isImm() ||
+ MI->getOperand(Op+X86::AddrDisp).isGlobal() ||
+ MI->getOperand(Op+X86::AddrDisp).isCPI() ||
+ MI->getOperand(Op+X86::AddrDisp).isJTI());
}
inline static bool isMem(const MachineInstr *MI, unsigned Op) {
if (MI->getOperand(Op).isFI()) return true;
- return Op+5 <= MI->getNumOperands() &&
- MI->getOperand(Op+4).isReg() &&
+ return Op+X86::AddrNumOperands <= MI->getNumOperands() &&
+ MI->getOperand(Op+X86::AddrSegmentReg).isReg() &&
isLeaMem(MI, Op);
}
-class X86InstrInfo : public X86GenInstrInfo {
- X86TargetMachine &TM;
+class X86InstrInfo final : public X86GenInstrInfo {
+ X86Subtarget &Subtarget;
const X86RegisterInfo RI;
/// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
@@ -155,13 +166,13 @@ class X86InstrInfo : public X86GenInstrInfo {
virtual void anchor();
public:
- explicit X86InstrInfo(X86TargetMachine &tm);
+ explicit X86InstrInfo(X86Subtarget &STI);
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const X86RegisterInfo &getRegisterInfo() const { return RI; }
+ const X86RegisterInfo &getRegisterInfo() const { return RI; }
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
@@ -169,30 +180,32 @@ public:
/// true, then it's expected the pre-extension value is available as a subreg
/// of the result register. This also returns the sub-register index in
/// SubIdx.
- virtual bool isCoalescableExtInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SubIdx) const;
+ bool isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const override;
- unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
/// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
+ int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
/// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
+ int &FrameIndex) const override;
bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
- AliasAnalysis *AA) const;
+ AliasAnalysis *AA) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo &TRI) const;
+ const TargetRegisterInfo &TRI) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
/// into the right format for a particular kind of LEA instruction. This may
@@ -217,65 +230,68 @@ public:
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
- virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
+ MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const override;
/// commuteInstruction - We have a few instructions that must be hacked on to
/// commute them.
///
- virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
+ MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const override;
+
+ bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const override;
// Branch analysis.
- virtual bool isUnpredicatedTerminator(const MachineInstr* MI) const;
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
- virtual bool canInsertSelect(const MachineBasicBlock&,
- const SmallVectorImpl<MachineOperand> &Cond,
- unsigned, unsigned, int&, int&, int&) const;
- virtual void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
- unsigned TrueReg, unsigned FalseReg) const;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+ bool isUnpredicatedTerminator(const MachineInstr* MI) const override;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
+ bool canInsertSelect(const MachineBasicBlock&,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned, unsigned, int&, int&, int&) const override;
+ void insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DstReg,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned TrueReg, unsigned FalseReg) const override;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ MachineInstr::mmo_iterator MMOBegin,
+ MachineInstr::mmo_iterator MMOEnd,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ MachineInstr::mmo_iterator MMOBegin,
+ MachineInstr::mmo_iterator MMOEnd,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
@@ -283,33 +299,33 @@ public:
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
+ MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const;
+ MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const override;
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
- virtual bool canFoldMemoryOperand(const MachineInstr*,
- const SmallVectorImpl<unsigned> &) const;
+ bool canFoldMemoryOperand(const MachineInstr*,
+ const SmallVectorImpl<unsigned> &) const override;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
- virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
+ bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const override;
- virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode*> &NewNodes) const;
+ bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+ SmallVectorImpl<SDNode*> &NewNodes) const override;
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
@@ -317,17 +333,17 @@ public:
/// possible. If LoadRegIndex is non-null, it is filled in with the operand
/// index of the operand which will hold the register holding the loaded
/// value.
- virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex = 0) const;
+ unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
+ bool UnfoldLoad, bool UnfoldStore,
+ unsigned *LoadRegIndex = nullptr) const override;
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
/// to determine if two loads are loading from the same base address. It
/// should only return true if the base pointers are the same and the
/// only differences between the two addresses are the offset. It also returns
/// the offsets by reference.
- virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
- int64_t &Offset1, int64_t &Offset2) const;
+ bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
+ int64_t &Offset2) const override;
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
@@ -337,21 +353,28 @@ public:
/// from the common base address. It returns true if it decides it's desirable
/// to schedule the two loads together. "NumLoads" is the number of loads that
/// have already been scheduled after Load1.
- virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
- int64_t Offset1, int64_t Offset2,
- unsigned NumLoads) const;
+ bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const override;
- virtual bool shouldScheduleAdjacent(MachineInstr* First,
- MachineInstr *Second) const LLVM_OVERRIDE;
+ bool shouldScheduleAdjacent(MachineInstr* First,
+ MachineInstr *Second) const override;
- virtual void getNoopForMachoTarget(MCInst &NopInst) const;
+ void getNoopForMachoTarget(MCInst &NopInst) const override;
- virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
/// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
/// instruction that defines the specified register class.
- bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
+ bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
+
+ /// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction tha
+ /// would clobber the EFLAGS condition register. Note the result may be
+ /// conservative. If it cannot definitely determine the safety after visiting
+ /// a few instructions in each direction it assumes it's not safe.
+ bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
static bool isX86_64ExtendedReg(const MachineOperand &MO) {
if (!MO.isReg()) return false;
@@ -365,16 +388,17 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr *MI) const;
+ getExecutionDomain(const MachineInstr *MI) const override;
- void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
+ void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
- unsigned getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const;
+ unsigned
+ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const override;
unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo *TRI) const override;
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
@@ -382,27 +406,34 @@ public:
const SmallVectorImpl<MachineOperand> &MOs,
unsigned Size, unsigned Alignment) const;
- bool isHighLatencyDef(int opc) const;
+ void
+ getUnconditionalBranch(MCInst &Branch,
+ const MCSymbolRefExpr *BranchTarget) const override;
+
+ void getTrap(MCInst &MI) const override;
+
+ bool isHighLatencyDef(int opc) const override;
bool hasHighOperandLatency(const InstrItineraryData *ItinData,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const;
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const override;
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
- unsigned &SrcReg2,
- int &CmpMask, int &CmpValue) const;
+ bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const override;
/// optimizeCompareInstr - Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
- virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
- unsigned SrcReg2, int CmpMask, int CmpValue,
- const MachineRegisterInfo *MRI) const;
+ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const override;
/// optimizeLoadInstr - Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if and only if the
@@ -411,10 +442,10 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
- virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
- const MachineRegisterInfo *MRI,
- unsigned &FoldAsLoadDefReg,
- MachineInstr *&DefMI) const;
+ MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const override;
private:
MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index 6e5d543..0f872a6 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -23,8 +23,8 @@ def SDTIntShiftDOp: SDTypeProfile<1, 3,
def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
-def SDTX86Cmpsd : SDTypeProfile<1, 3, [SDTCisVT<0, f64>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
-def SDTX86Cmpss : SDTypeProfile<1, 3, [SDTCisVT<0, f32>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+def SDTX86Cmps : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+//def SDTX86Cmpss : SDTypeProfile<1, 3, [SDTCisVT<0, f32>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
@@ -155,27 +155,6 @@ def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomAdd64 : SDNode<"X86ISD::ATOMADD64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomSub64 : SDNode<"X86ISD::ATOMSUB64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomOr64 : SDNode<"X86ISD::ATOMOR64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomXor64 : SDNode<"X86ISD::ATOMXOR64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomAnd64 : SDNode<"X86ISD::ATOMAND64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomNand64 : SDNode<"X86ISD::ATOMNAND64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
-def X86AtomSwap64 : SDNode<"X86ISD::ATOMSWAP64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
- SDNPMayLoad, SDNPMemOperand]>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -206,6 +185,10 @@ def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
[SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
@@ -249,10 +232,6 @@ def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
-def X86blsi : SDNode<"X86ISD::BLSI", SDTIntUnaryOp>;
-def X86blsmsk : SDNode<"X86ISD::BLSMSK", SDTIntUnaryOp>;
-def X86blsr : SDNode<"X86ISD::BLSR", SDTIntUnaryOp>;
-def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntShiftOp>;
def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
@@ -445,6 +424,46 @@ def brtarget8 : Operand<OtherVT>;
}
+def X86SrcIdx8Operand : AsmOperandClass {
+ let Name = "SrcIdx8";
+ let RenderMethod = "addSrcIdxOperands";
+ let SuperClasses = [X86Mem8AsmOperand];
+}
+def X86SrcIdx16Operand : AsmOperandClass {
+ let Name = "SrcIdx16";
+ let RenderMethod = "addSrcIdxOperands";
+ let SuperClasses = [X86Mem16AsmOperand];
+}
+def X86SrcIdx32Operand : AsmOperandClass {
+ let Name = "SrcIdx32";
+ let RenderMethod = "addSrcIdxOperands";
+ let SuperClasses = [X86Mem32AsmOperand];
+}
+def X86SrcIdx64Operand : AsmOperandClass {
+ let Name = "SrcIdx64";
+ let RenderMethod = "addSrcIdxOperands";
+ let SuperClasses = [X86Mem64AsmOperand];
+}
+def X86DstIdx8Operand : AsmOperandClass {
+ let Name = "DstIdx8";
+ let RenderMethod = "addDstIdxOperands";
+ let SuperClasses = [X86Mem8AsmOperand];
+}
+def X86DstIdx16Operand : AsmOperandClass {
+ let Name = "DstIdx16";
+ let RenderMethod = "addDstIdxOperands";
+ let SuperClasses = [X86Mem16AsmOperand];
+}
+def X86DstIdx32Operand : AsmOperandClass {
+ let Name = "DstIdx32";
+ let RenderMethod = "addDstIdxOperands";
+ let SuperClasses = [X86Mem32AsmOperand];
+}
+def X86DstIdx64Operand : AsmOperandClass {
+ let Name = "DstIdx64";
+ let RenderMethod = "addDstIdxOperands";
+ let SuperClasses = [X86Mem64AsmOperand];
+}
def X86MemOffs8AsmOperand : AsmOperandClass {
let Name = "MemOffs8";
let RenderMethod = "addMemOffsOperands";
@@ -465,19 +484,54 @@ def X86MemOffs64AsmOperand : AsmOperandClass {
let RenderMethod = "addMemOffsOperands";
let SuperClasses = [X86Mem64AsmOperand];
}
-
let OperandType = "OPERAND_MEMORY" in {
-def offset8 : Operand<i64> {
+def srcidx8 : Operand<iPTR> {
+ let ParserMatchClass = X86SrcIdx8Operand;
+ let MIOperandInfo = (ops ptr_rc, i8imm);
+ let PrintMethod = "printSrcIdx8"; }
+def srcidx16 : Operand<iPTR> {
+ let ParserMatchClass = X86SrcIdx16Operand;
+ let MIOperandInfo = (ops ptr_rc, i8imm);
+ let PrintMethod = "printSrcIdx16"; }
+def srcidx32 : Operand<iPTR> {
+ let ParserMatchClass = X86SrcIdx32Operand;
+ let MIOperandInfo = (ops ptr_rc, i8imm);
+ let PrintMethod = "printSrcIdx32"; }
+def srcidx64 : Operand<iPTR> {
+ let ParserMatchClass = X86SrcIdx64Operand;
+ let MIOperandInfo = (ops ptr_rc, i8imm);
+ let PrintMethod = "printSrcIdx64"; }
+def dstidx8 : Operand<iPTR> {
+ let ParserMatchClass = X86DstIdx8Operand;
+ let MIOperandInfo = (ops ptr_rc);
+ let PrintMethod = "printDstIdx8"; }
+def dstidx16 : Operand<iPTR> {
+ let ParserMatchClass = X86DstIdx16Operand;
+ let MIOperandInfo = (ops ptr_rc);
+ let PrintMethod = "printDstIdx16"; }
+def dstidx32 : Operand<iPTR> {
+ let ParserMatchClass = X86DstIdx32Operand;
+ let MIOperandInfo = (ops ptr_rc);
+ let PrintMethod = "printDstIdx32"; }
+def dstidx64 : Operand<iPTR> {
+ let ParserMatchClass = X86DstIdx64Operand;
+ let MIOperandInfo = (ops ptr_rc);
+ let PrintMethod = "printDstIdx64"; }
+def offset8 : Operand<iPTR> {
let ParserMatchClass = X86MemOffs8AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs8"; }
-def offset16 : Operand<i64> {
+def offset16 : Operand<iPTR> {
let ParserMatchClass = X86MemOffs16AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs16"; }
-def offset32 : Operand<i64> {
+def offset32 : Operand<iPTR> {
let ParserMatchClass = X86MemOffs32AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs32"; }
-def offset64 : Operand<i64> {
+def offset64 : Operand<iPTR> {
let ParserMatchClass = X86MemOffs64AsmOperand;
+ let MIOperandInfo = (ops i64imm, i8imm);
let PrintMethod = "printMemOffs64"; }
}
@@ -510,6 +564,10 @@ def GR32orGR64 : RegisterOperand<GR32> {
let ParserMatchClass = X86GR32orGR64AsmOperand;
}
+def AVX512RC : Operand<i32> {
+ let PrintMethod = "printRoundingControl";
+ let OperandType = "OPERAND_IMMEDIATE";
+}
// Sign-extended immediate classes. We don't need to define the full lattice
// here because there is no instruction with an ambiguity between ImmSExti64i32
// and ImmSExti32i8.
@@ -657,13 +715,18 @@ def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
def HasAVX : Predicate<"Subtarget->hasAVX()">;
def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
-def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
+def HasAVX512 : Predicate<"Subtarget->hasAVX512()">,
+ AssemblerPredicate<"FeatureAVX512", "AVX-512 ISA">;
def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
-def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
+def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
def HasCDI : Predicate<"Subtarget->hasCDI()">;
def HasPFI : Predicate<"Subtarget->hasPFI()">;
def HasERI : Predicate<"Subtarget->hasERI()">;
+def HasDQI : Predicate<"Subtarget->hasDQI()">;
+def HasBWI : Predicate<"Subtarget->hasBWI()">;
+def HasVLX : Predicate<"Subtarget->hasVLX()">,
+ AssemblerPredicate<"FeatureVLX", "AVX-512 VLX ISA">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
@@ -691,10 +754,16 @@ def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
-def In32BitMode : Predicate<"!Subtarget->is64Bit()">,
- AssemblerPredicate<"!Mode64Bit", "32-bit mode">;
+def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
+ AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">,
AssemblerPredicate<"Mode64Bit", "64-bit mode">;
+def In16BitMode : Predicate<"Subtarget->is16Bit()">,
+ AssemblerPredicate<"Mode16Bit", "16-bit mode">;
+def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
+ AssemblerPredicate<"!Mode16Bit", "Not 16-bit mode">;
+def In32BitMode : Predicate<"Subtarget->is32Bit()">,
+ AssemblerPredicate<"Mode32Bit", "32-bit mode">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
@@ -711,6 +780,7 @@ def OptForSpeed : Predicate<"!OptForSize">;
def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">;
+def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -845,10 +915,10 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
// Nop
let neverHasSideEffects = 1, SchedRW = [WriteZero] in {
def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", [], IIC_NOP>;
- def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero),
- "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize;
- def NOOPL : I<0x1f, MRM0m, (outs), (ins i32mem:$zero),
- "nop{l}\t$zero", [], IIC_NOP>, TB;
+ def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
+ "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize16;
+ def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
+ "nop{l}\t$zero", [], IIC_NOP>, TB, OpSize32;
}
@@ -860,7 +930,7 @@ let SchedRW = [WriteALU] in {
let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
def LEAVE : I<0xC9, RawFrm,
(outs), (ins), "leave", [], IIC_LEAVE>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
def LEAVE64 : I<0xC9, RawFrm,
@@ -875,98 +945,110 @@ def LEAVE64 : I<0xC9, RawFrm,
let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
- IIC_POP_REG16>, OpSize;
+ IIC_POP_REG16>, OpSize16;
def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
- IIC_POP_REG>;
+ IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>;
def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
- IIC_POP_REG>, OpSize;
+ IIC_POP_REG>, OpSize16;
def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", [],
- IIC_POP_MEM>, OpSize;
+ IIC_POP_MEM>, OpSize16;
def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
- IIC_POP_REG>;
+ IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>;
def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", [],
- IIC_POP_MEM>;
+ IIC_POP_MEM>, OpSize32, Requires<[Not64BitMode]>;
-def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>, OpSize;
+def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>,
+ OpSize16;
def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
} // mayLoad, SchedRW
let mayStore = 1, SchedRW = [WriteStore] in {
def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[],
- IIC_PUSH_REG>, OpSize;
+ IIC_PUSH_REG>, OpSize16;
def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[],
- IIC_PUSH_REG>;
+ IIC_PUSH_REG>, OpSize32, Requires<[Not64BitMode]>;
def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[],
- IIC_PUSH_REG>, OpSize;
+ IIC_PUSH_REG>, OpSize16;
def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[],
- IIC_PUSH_MEM>,
- OpSize;
+ IIC_PUSH_MEM>, OpSize16;
def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[],
- IIC_PUSH_REG>;
+ IIC_PUSH_REG>, OpSize32, Requires<[Not64BitMode]>;
def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[],
- IIC_PUSH_MEM>;
-
-def PUSHi8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
- "push{l}\t$imm", [], IIC_PUSH_IMM>;
+ IIC_PUSH_MEM>, OpSize32, Requires<[Not64BitMode]>;
+
+def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
+ "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16,
+ Requires<[Not64BitMode]>;
+def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
+ "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
+ Requires<[Not64BitMode]>;
def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
- "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize;
+ "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16,
+ Requires<[Not64BitMode]>;
def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
- "push{l}\t$imm", [], IIC_PUSH_IMM>;
+ "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
+ Requires<[Not64BitMode]>;
def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", [], IIC_PUSH_F>,
- OpSize;
+ OpSize16;
def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", [], IIC_PUSH_F>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
} // mayStore, SchedRW
}
let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
-def POP64r : I<0x58, AddRegFrm,
- (outs GR64:$reg), (ins), "pop{q}\t$reg", [], IIC_POP_REG>;
+def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
+ IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>;
def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
- IIC_POP_REG>;
+ IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>;
def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", [],
- IIC_POP_MEM>;
+ IIC_POP_MEM>, OpSize32, Requires<[In64BitMode]>;
} // mayLoad, SchedRW
let mayStore = 1, SchedRW = [WriteStore] in {
-def PUSH64r : I<0x50, AddRegFrm,
- (outs), (ins GR64:$reg), "push{q}\t$reg", [], IIC_PUSH_REG>;
+def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", [],
+ IIC_PUSH_REG>, OpSize32, Requires<[In64BitMode]>;
def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", [],
- IIC_PUSH_REG>;
+ IIC_PUSH_REG>, OpSize32, Requires<[In64BitMode]>;
def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [],
- IIC_PUSH_MEM>;
+ IIC_PUSH_MEM>, OpSize32, Requires<[In64BitMode]>;
} // mayStore, SchedRW
}
let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1,
SchedRW = [WriteStore] in {
def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
- "push{q}\t$imm", [], IIC_PUSH_IMM>;
+ "push{q}\t$imm", [], IIC_PUSH_IMM>, Requires<[In64BitMode]>;
def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
- "push{q}\t$imm", [], IIC_PUSH_IMM>;
-def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
- "push{q}\t$imm", [], IIC_PUSH_IMM>;
+ "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16,
+ Requires<[In64BitMode]>;
+def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
+ "push{q}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
+ Requires<[In64BitMode]>;
}
let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", [], IIC_POP_FD>,
- Requires<[In64BitMode]>, Sched<[WriteLoad]>;
+ OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>,
- Requires<[In64BitMode]>, Sched<[WriteStore]>;
+ OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in {
-def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", [], IIC_POP_A>,
- Requires<[In32BitMode]>;
+def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", [], IIC_POP_A>,
+ OpSize32, Requires<[Not64BitMode]>;
+def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", [], IIC_POP_A>,
+ OpSize16, Requires<[Not64BitMode]>;
}
let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
-def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", [], IIC_PUSH_A>,
- Requires<[In32BitMode]>;
+def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", [], IIC_PUSH_A>,
+ OpSize32, Requires<[Not64BitMode]>;
+def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", [], IIC_PUSH_A>,
+ OpSize16, Requires<[Not64BitMode]>;
}
let Constraints = "$src = $dst", SchedRW = [WriteALU] in {
@@ -974,7 +1056,7 @@ let Constraints = "$src = $dst", SchedRW = [WriteALU] in {
def BSWAP32r : I<0xC8, AddRegFrm,
(outs GR32:$dst), (ins GR32:$src),
"bswap{l}\t$dst",
- [(set GR32:$dst, (bswap GR32:$src))], IIC_BSWAP>, TB;
+ [(set GR32:$dst, (bswap GR32:$src))], IIC_BSWAP>, OpSize32, TB;
def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"bswap{q}\t$dst",
@@ -986,86 +1068,106 @@ let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))],
- IIC_BIT_SCAN_REG>, TB, OpSize, Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))],
- IIC_BIT_SCAN_MEM>, TB, OpSize, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))],
- IIC_BIT_SCAN_REG>, TB,
- Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))],
- IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>;
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))],
- IIC_BIT_SCAN_REG>, TB, Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))],
- IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))],
- IIC_BIT_SCAN_REG>,
- TB, OpSize, Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))],
- IIC_BIT_SCAN_MEM>, TB,
- OpSize, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))],
- IIC_BIT_SCAN_REG>, TB,
- Sched<[WriteShift]>;
+ IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))],
- IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BIT_SCAN_REG>, TB,
- Sched<[WriteShift]>;
+ [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))],
+ IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))],
- IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
+ IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>;
} // Defs = [EFLAGS]
let SchedRW = [WriteMicrocoded] in {
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
-def MOVSB : I<0xA4, RawFrm, (outs), (ins), "movsb", [], IIC_MOVS>;
-def MOVSW : I<0xA5, RawFrm, (outs), (ins), "movsw", [], IIC_MOVS>, OpSize;
-def MOVSD : I<0xA5, RawFrm, (outs), (ins), "movs{l|d}", [], IIC_MOVS>;
-def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", [], IIC_MOVS>;
+def MOVSB : I<0xA4, RawFrmDstSrc, (outs dstidx8:$dst), (ins srcidx8:$src),
+ "movsb\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
+def MOVSW : I<0xA5, RawFrmDstSrc, (outs dstidx16:$dst), (ins srcidx16:$src),
+ "movsw\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize16;
+def MOVSL : I<0xA5, RawFrmDstSrc, (outs dstidx32:$dst), (ins srcidx32:$src),
+ "movs{l|d}\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize32;
+def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs dstidx64:$dst), (ins srcidx64:$src),
+ "movsq\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
}
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
-def STOSB : I<0xAA, RawFrm, (outs), (ins), "stosb", [], IIC_STOS>;
+def STOSB : I<0xAA, RawFrmDst, (outs dstidx8:$dst), (ins),
+ "stosb\t{%al, $dst|$dst, al}", [], IIC_STOS>;
let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
-def STOSW : I<0xAB, RawFrm, (outs), (ins), "stosw", [], IIC_STOS>, OpSize;
+def STOSW : I<0xAB, RawFrmDst, (outs dstidx16:$dst), (ins),
+ "stosw\t{%ax, $dst|$dst, ax}", [], IIC_STOS>, OpSize16;
let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
-def STOSD : I<0xAB, RawFrm, (outs), (ins), "stos{l|d}", [], IIC_STOS>;
+def STOSL : I<0xAB, RawFrmDst, (outs dstidx32:$dst), (ins),
+ "stos{l|d}\t{%eax, $dst|$dst, eax}", [], IIC_STOS>, OpSize32;
let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
-def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", [], IIC_STOS>;
+def STOSQ : RI<0xAB, RawFrmDst, (outs dstidx64:$dst), (ins),
+ "stosq\t{%rax, $dst|$dst, rax}", [], IIC_STOS>;
-def SCAS8 : I<0xAE, RawFrm, (outs), (ins), "scasb", [], IIC_SCAS>;
-def SCAS16 : I<0xAF, RawFrm, (outs), (ins), "scasw", [], IIC_SCAS>, OpSize;
-def SCAS32 : I<0xAF, RawFrm, (outs), (ins), "scas{l|d}", [], IIC_SCAS>;
-def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", [], IIC_SCAS>;
+// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
+let Defs = [EDI,EFLAGS], Uses = [AL,EDI,EFLAGS] in
+def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
+ "scasb\t{$dst, %al|al, $dst}", [], IIC_SCAS>;
+let Defs = [EDI,EFLAGS], Uses = [AX,EDI,EFLAGS] in
+def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
+ "scasw\t{$dst, %ax|ax, $dst}", [], IIC_SCAS>, OpSize16;
+let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,EFLAGS] in
+def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
+ "scas{l|d}\t{$dst, %eax|eax, $dst}", [], IIC_SCAS>, OpSize32;
+let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,EFLAGS] in
+def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
+ "scasq\t{$dst, %rax|rax, $dst}", [], IIC_SCAS>;
-def CMPS8 : I<0xA6, RawFrm, (outs), (ins), "cmpsb", [], IIC_CMPS>;
-def CMPS16 : I<0xA7, RawFrm, (outs), (ins), "cmpsw", [], IIC_CMPS>, OpSize;
-def CMPS32 : I<0xA7, RawFrm, (outs), (ins), "cmps{l|d}", [], IIC_CMPS>;
-def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", [], IIC_CMPS>;
+// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
+let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,EFLAGS] in {
+def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
+ "cmpsb\t{$dst, $src|$src, $dst}", [], IIC_CMPS>;
+def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
+ "cmpsw\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize16;
+def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
+ "cmps{l|d}\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize32;
+def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
+ "cmpsq\t{$dst, $src|$src, $dst}", [], IIC_CMPS>;
+}
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1076,9 +1178,9 @@ let neverHasSideEffects = 1 in {
def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
"mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16;
def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32;
def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
}
@@ -1089,16 +1191,28 @@ def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
[(set GR8:$dst, imm:$src)], IIC_MOV>;
def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, imm:$src)], IIC_MOV>, OpSize;
+ [(set GR16:$dst, imm:$src)], IIC_MOV>, OpSize16;
def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, imm:$src)], IIC_MOV>;
+ [(set GR32:$dst, imm:$src)], IIC_MOV>, OpSize32;
+def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, i64immSExt32:$src)], IIC_MOV>;
+}
+let isReMaterializable = 1 in {
def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
"movabs{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, imm:$src)], IIC_MOV>;
-def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, i64immSExt32:$src)], IIC_MOV>;
+}
+
+// Longer forms that use a ModR/M byte. Needed for disassembler
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
+def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
+ "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
+def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16;
+def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32;
}
} // SchedRW
@@ -1108,84 +1222,125 @@ def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
[(store (i8 imm:$src), addr:$dst)], IIC_MOV_MEM>;
def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(store (i16 imm:$src), addr:$dst)], IIC_MOV_MEM>, OpSize;
+ [(store (i16 imm:$src), addr:$dst)], IIC_MOV_MEM>, OpSize16;
def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(store (i32 imm:$src), addr:$dst)], IIC_MOV_MEM>;
-def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- [(store i64immSExt32:$src, addr:$dst)], IIC_MOV_MEM>;
+ [(store (i32 imm:$src), addr:$dst)], IIC_MOV_MEM>, OpSize32;
+def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ [(store i64immSExt32:$src, addr:$dst)], IIC_MOV_MEM>;
} // SchedRW
let hasSideEffects = 0 in {
/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
-/// 32-bit offset from the PC. These are only valid in x86-32 mode.
+/// 32-bit offset from the segment base. These are only valid in x86-32 mode.
let SchedRW = [WriteALU] in {
let mayLoad = 1 in {
-def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src),
+let Defs = [AL] in
+def MOV8o8a : Ii32 <0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
"mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
-def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src),
- "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>, OpSize,
- Requires<[In32BitMode]>;
-def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src),
+let Defs = [AX] in
+def MOV16o16a : Ii32 <0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
+ "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
+ OpSize16, Requires<[In32BitMode]>;
+let Defs = [EAX] in
+def MOV32o32a : Ii32 <0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
+ "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
+ OpSize32, Requires<[In32BitMode]>;
+
+let Defs = [AL] in
+def MOV8o8a_16 : Ii16 <0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
+ "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
+ AdSize, Requires<[In16BitMode]>;
+let Defs = [AX] in
+def MOV16o16a_16 : Ii16 <0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
+ "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
+ OpSize16, AdSize, Requires<[In16BitMode]>;
+let Defs = [EAX] in
+def MOV32o32a_16 : Ii16 <0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
"mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
- Requires<[In32BitMode]>;
+ AdSize, OpSize32, Requires<[In16BitMode]>;
}
let mayStore = 1 in {
-def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins),
+let Uses = [AL] in
+def MOV8ao8 : Ii32 <0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
Requires<[In32BitMode]>;
-def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins),
- "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>, OpSize,
- Requires<[In32BitMode]>;
-def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins),
+let Uses = [AX] in
+def MOV16ao16 : Ii32 <0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
+ "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
+ OpSize16, Requires<[In32BitMode]>;
+let Uses = [EAX] in
+def MOV32ao32 : Ii32 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[In32BitMode]>;
+
+let Uses = [AL] in
+def MOV8ao8_16 : Ii16 <0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
+ "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>,
+ AdSize, Requires<[In16BitMode]>;
+let Uses = [AX] in
+def MOV16ao16_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
+ "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
+ OpSize16, AdSize, Requires<[In16BitMode]>;
+let Uses = [EAX] in
+def MOV32ao32_16 : Ii16 <0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
+ "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
+ OpSize32, AdSize, Requires<[In16BitMode]>;
}
}
// These forms all have full 64-bit absolute addresses in their instructions
// and use the movabs mnemonic to indicate this specific form.
let mayLoad = 1 in {
-def MOV64o8a : RIi64_NOREX<0xA0, RawFrm, (outs), (ins offset8:$src),
+let Defs = [AL] in
+def MOV64o8a : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset8:$src),
"movabs{b}\t{$src, %al|al, $src}", []>,
Requires<[In64BitMode]>;
-def MOV64o16a : RIi64_NOREX<0xA1, RawFrm, (outs), (ins offset16:$src),
- "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize,
+let Defs = [AX] in
+def MOV64o16a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset16:$src),
+ "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16,
Requires<[In64BitMode]>;
-def MOV64o32a : RIi64_NOREX<0xA1, RawFrm, (outs), (ins offset32:$src),
- "movabs{l}\t{$src, %eax|eax, $src}", []>,
+let Defs = [EAX] in
+def MOV64o32a : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset32:$src),
+ "movabs{l}\t{$src, %eax|eax, $src}", []>, OpSize32,
Requires<[In64BitMode]>;
-def MOV64o64a : RIi64<0xA1, RawFrm, (outs), (ins offset64:$src),
+let Defs = [RAX] in
+def MOV64o64a : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64:$src),
"movabs{q}\t{$src, %rax|rax, $src}", []>,
Requires<[In64BitMode]>;
}
let mayStore = 1 in {
-def MOV64ao8 : RIi64_NOREX<0xA2, RawFrm, (outs offset8:$dst), (ins),
+let Uses = [AL] in
+def MOV64ao8 : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset8:$dst), (ins),
"movabs{b}\t{%al, $dst|$dst, al}", []>,
Requires<[In64BitMode]>;
-def MOV64ao16 : RIi64_NOREX<0xA3, RawFrm, (outs offset16:$dst), (ins),
- "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize,
+let Uses = [AX] in
+def MOV64ao16 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset16:$dst), (ins),
+ "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16,
Requires<[In64BitMode]>;
-def MOV64ao32 : RIi64_NOREX<0xA3, RawFrm, (outs offset32:$dst), (ins),
- "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
+let Uses = [EAX] in
+def MOV64ao32 : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset32:$dst), (ins),
+ "movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32,
Requires<[In64BitMode]>;
-def MOV64ao64 : RIi64<0xA3, RawFrm, (outs offset64:$dst), (ins),
+let Uses = [RAX] in
+def MOV64ao64 : RIi64<0xA3, RawFrmMemOffs, (outs offset64:$dst), (ins),
"movabs{q}\t{%rax, $dst|$dst, rax}", []>,
Requires<[In64BitMode]>;
}
} // hasSideEffects = 0
-let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
+ SchedRW = [WriteMove] in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
"mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16;
def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32;
def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
}
@@ -1196,10 +1351,10 @@ def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
[(set GR8:$dst, (loadi8 addr:$src))], IIC_MOV_MEM>;
def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (loadi16 addr:$src))], IIC_MOV_MEM>, OpSize;
+ [(set GR16:$dst, (loadi16 addr:$src))], IIC_MOV_MEM>, OpSize16;
def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (loadi32 addr:$src))], IIC_MOV_MEM>;
+ [(set GR32:$dst, (loadi32 addr:$src))], IIC_MOV_MEM>, OpSize32;
def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (load addr:$src))], IIC_MOV_MEM>;
@@ -1211,10 +1366,10 @@ def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
[(store GR8:$src, addr:$dst)], IIC_MOV_MEM>;
def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}",
- [(store GR16:$src, addr:$dst)], IIC_MOV_MEM>, OpSize;
+ [(store GR16:$src, addr:$dst)], IIC_MOV_MEM>, OpSize16;
def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
- [(store GR32:$src, addr:$dst)], IIC_MOV_MEM>;
+ [(store GR32:$src, addr:$dst)], IIC_MOV_MEM>, OpSize32;
def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(store GR64:$src, addr:$dst)], IIC_MOV_MEM>;
@@ -1261,10 +1416,11 @@ let SchedRW = [WriteALU] in {
def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))], IIC_BT_RR>,
- OpSize, TB;
+ OpSize16, TB;
def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))], IIC_BT_RR>, TB;
+ [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))], IIC_BT_RR>,
+ OpSize32, TB;
def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))], IIC_BT_RR>, TB;
@@ -1281,13 +1437,13 @@ let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteALULd] in {
// [(X86bt (loadi16 addr:$src1), GR16:$src2),
// (implicit EFLAGS)]
[], IIC_BT_MR
- >, OpSize, TB, Requires<[FastBTMem]>;
+ >, OpSize16, TB, Requires<[FastBTMem]>;
def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi32 addr:$src1), GR32:$src2),
// (implicit EFLAGS)]
[], IIC_BT_MR
- >, TB, Requires<[FastBTMem]>;
+ >, OpSize32, TB, Requires<[FastBTMem]>;
def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi64 addr:$src1), GR64:$src2),
@@ -1300,11 +1456,11 @@ let SchedRW = [WriteALU] in {
def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))],
- IIC_BT_RI>, OpSize, TB;
+ IIC_BT_RI>, OpSize16, TB;
def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))],
- IIC_BT_RI>, TB;
+ IIC_BT_RI>, OpSize32, TB;
def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))],
@@ -1318,11 +1474,11 @@ let SchedRW = [WriteALU] in {
def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2))
- ], IIC_BT_MI>, OpSize, TB;
+ ], IIC_BT_MI>, OpSize16, TB;
def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2))
- ], IIC_BT_MI>, TB;
+ ], IIC_BT_MI>, OpSize32, TB;
def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi64 addr:$src1),
@@ -1333,9 +1489,10 @@ let hasSideEffects = 0 in {
let SchedRW = [WriteALU] in {
def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
- OpSize, TB;
+ OpSize16, TB;
def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
+ OpSize32, TB;
def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
} // SchedRW
@@ -1343,9 +1500,10 @@ def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
- OpSize, TB;
+ OpSize16, TB;
def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize32, TB;
def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
}
@@ -1353,9 +1511,10 @@ def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
let SchedRW = [WriteALU] in {
def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
- OpSize, TB;
+ OpSize16, TB;
def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
+ OpSize32, TB;
def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
} // SchedRW
@@ -1363,9 +1522,10 @@ def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
- OpSize, TB;
+ OpSize16, TB;
def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
+ "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
+ OpSize32, TB;
def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
}
@@ -1373,9 +1533,10 @@ def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
let SchedRW = [WriteALU] in {
def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
- OpSize, TB;
+ OpSize16, TB;
def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
+ OpSize32, TB;
def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
} // SchedRW
@@ -1383,9 +1544,10 @@ def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
- OpSize, TB;
+ OpSize16, TB;
def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize32, TB;
def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
}
@@ -1393,9 +1555,10 @@ def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
let SchedRW = [WriteALU] in {
def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
- OpSize, TB;
+ OpSize16, TB;
def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
+ OpSize32, TB;
def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
} // SchedRW
@@ -1403,9 +1566,10 @@ def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
- OpSize, TB;
+ OpSize16, TB;
def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
+ "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
+ OpSize32, TB;
def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
}
@@ -1413,19 +1577,21 @@ def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
let SchedRW = [WriteALU] in {
def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
- OpSize, TB;
+ OpSize16, TB;
def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
+ OpSize32, TB;
def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
+ "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
} // SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
- "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
- OpSize, TB;
+ "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize16, TB;
def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
+ OpSize32, TB;
def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
}
@@ -1433,9 +1599,10 @@ def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
let SchedRW = [WriteALU] in {
def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
- OpSize, TB;
+ OpSize16, TB;
def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
+ OpSize32, TB;
def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
} // SchedRW
@@ -1443,9 +1610,10 @@ def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
- OpSize, TB;
+ OpSize16, TB;
def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
+ "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
+ OpSize32, TB;
def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
}
@@ -1475,14 +1643,14 @@ multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag,
[(set
GR16:$dst,
(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
- itin>, OpSize;
+ itin>, OpSize16;
def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$val, i32mem:$ptr),
!strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
[(set
GR32:$dst,
(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
- itin>;
+ itin>, OpSize32;
def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$val, i64mem:$ptr),
!strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
@@ -1501,24 +1669,30 @@ let Constraints = "$val = $dst" in {
def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src),
"xchg{b}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
- "xchg{w}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>, OpSize;
+ "xchg{w}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>,
+ OpSize16;
def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src),
- "xchg{l}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
+ "xchg{l}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>,
+ OpSize32;
def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
"xchg{q}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
}
// Swap between EAX and other registers.
+let Uses = [AX], Defs = [AX] in
def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
- "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize;
+ "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize16;
+let Uses = [EAX], Defs = [EAX] in
def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
"xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>,
- Requires<[In32BitMode]>;
+ OpSize32, Requires<[Not64BitMode]>;
+let Uses = [EAX], Defs = [EAX] in
// Uses GR32_NOAX in 64-bit mode to prevent encoding using the 0x90 NOP encoding.
// xchg %eax, %eax needs to clear upper 32-bits of RAX so is not a NOP.
def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src),
"xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>,
- Requires<[In64BitMode]>;
+ OpSize32, Requires<[In64BitMode]>;
+let Uses = [RAX], Defs = [RAX] in
def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
"xchg{q}\t{$src, %rax|rax, $src}", [], IIC_XCHG_REG>;
} // SchedRW
@@ -1528,9 +1702,10 @@ def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
"xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB,
- OpSize;
+ OpSize16;
def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
+ "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB,
+ OpSize32;
def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
} // SchedRW
@@ -1540,9 +1715,10 @@ def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB,
- OpSize;
+ OpSize16;
def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
+ "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB,
+ OpSize32;
def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
@@ -1554,10 +1730,10 @@ def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
IIC_CMPXCHG_REG8>, TB;
def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", [],
- IIC_CMPXCHG_REG>, TB, OpSize;
+ IIC_CMPXCHG_REG>, TB, OpSize16;
def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", [],
- IIC_CMPXCHG_REG>, TB;
+ IIC_CMPXCHG_REG>, TB, OpSize32;
def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"cmpxchg{q}\t{$src, $dst|$dst, $src}", [],
IIC_CMPXCHG_REG>, TB;
@@ -1570,10 +1746,10 @@ def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
IIC_CMPXCHG_MEM8>, TB;
def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", [],
- IIC_CMPXCHG_MEM>, TB, OpSize;
+ IIC_CMPXCHG_MEM>, TB, OpSize16;
def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", [],
- IIC_CMPXCHG_MEM>, TB;
+ IIC_CMPXCHG_MEM>, TB, OpSize32;
def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"cmpxchg{q}\t{$src, $dst|$dst, $src}", [],
IIC_CMPXCHG_MEM>, TB;
@@ -1594,7 +1770,8 @@ def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
// Rex64 instruction prefix
-def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>;
+def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
+ Requires<[In64BitMode]>;
// Data16 instruction prefix
def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>;
@@ -1611,16 +1788,41 @@ def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
// String manipulation instructions
let SchedRW = [WriteMicrocoded] in {
-def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", [], IIC_LODS>;
-def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", [], IIC_LODS>, OpSize;
-def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", [], IIC_LODS>;
-def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", [], IIC_LODS>;
+// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
+let Defs = [AL,ESI], Uses = [ESI,EFLAGS] in
+def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
+ "lodsb\t{$src, %al|al, $src}", [], IIC_LODS>;
+let Defs = [AX,ESI], Uses = [ESI,EFLAGS] in
+def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
+ "lodsw\t{$src, %ax|ax, $src}", [], IIC_LODS>, OpSize16;
+let Defs = [EAX,ESI], Uses = [ESI,EFLAGS] in
+def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
+ "lods{l|d}\t{$src, %eax|eax, $src}", [], IIC_LODS>, OpSize32;
+let Defs = [RAX,ESI], Uses = [ESI,EFLAGS] in
+def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
+ "lodsq\t{$src, %rax|rax, $src}", [], IIC_LODS>;
}
let SchedRW = [WriteSystem] in {
-def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", [], IIC_OUTS>;
-def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", [], IIC_OUTS>, OpSize;
-def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", [], IIC_OUTS>;
+// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
+let Defs = [ESI], Uses = [DX,ESI,EFLAGS] in {
+def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
+ "outsb\t{$src, %dx|dx, $src}", [], IIC_OUTS>;
+def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
+ "outsw\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize16;
+def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
+ "outs{l|d}\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize32;
+}
+
+// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
+let Defs = [EDI], Uses = [DX,EDI,EFLAGS] in {
+def INSB : I<0x6C, RawFrmDst, (outs dstidx8:$dst), (ins),
+ "insb\t{%dx, $dst|$dst, dx}", [], IIC_INS>;
+def INSW : I<0x6D, RawFrmDst, (outs dstidx16:$dst), (ins),
+ "insw\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize16;
+def INSL : I<0x6D, RawFrmDst, (outs dstidx32:$dst), (ins),
+ "ins{l|d}\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize32;
+}
}
// Flag instructions
@@ -1644,50 +1846,50 @@ let SchedRW = [WriteMicrocoded] in {
// ASCII Adjust After Addition
// sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", [], IIC_AAA>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// ASCII Adjust AX Before Division
// sets AL, AH and EFLAGS and uses AL and AH
def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
- "aad\t$src", [], IIC_AAD>, Requires<[In32BitMode]>;
+ "aad\t$src", [], IIC_AAD>, Requires<[Not64BitMode]>;
// ASCII Adjust AX After Multiply
// sets AL, AH and EFLAGS and uses AL
def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
- "aam\t$src", [], IIC_AAM>, Requires<[In32BitMode]>;
+ "aam\t$src", [], IIC_AAM>, Requires<[Not64BitMode]>;
// ASCII Adjust AL After Subtraction - sets
// sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", [], IIC_AAS>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// Decimal Adjust AL after Addition
// sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
def DAA : I<0x27, RawFrm, (outs), (ins), "daa", [], IIC_DAA>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
// Decimal Adjust AL after Subtraction
// sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
def DAS : I<0x2F, RawFrm, (outs), (ins), "das", [], IIC_DAS>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
} // SchedRW
let SchedRW = [WriteSystem] in {
// Check Array Index Against Bounds
def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize,
- Requires<[In32BitMode]>;
+ "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize16,
+ Requires<[Not64BitMode]>;
def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>,
- Requires<[In32BitMode]>;
+ "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize32,
+ Requires<[Not64BitMode]>;
// Adjust RPL Field of Segment Selector
def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_REG>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_MEM>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1698,29 +1900,29 @@ let Predicates = [HasMOVBE] in {
def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (bswap (loadi16 addr:$src)))], IIC_MOVBE>,
- OpSize, T8;
+ OpSize16, T8PS;
def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bswap (loadi32 addr:$src)))], IIC_MOVBE>,
- T8;
+ OpSize32, T8PS;
def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bswap (loadi64 addr:$src)))], IIC_MOVBE>,
- T8;
+ T8PS;
}
let SchedRW = [WriteStore] in {
def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(store (bswap GR16:$src), addr:$dst)], IIC_MOVBE>,
- OpSize, T8;
+ OpSize16, T8PS;
def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(store (bswap GR32:$src), addr:$dst)], IIC_MOVBE>,
- T8;
+ OpSize32, T8PS;
def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(store (bswap GR64:$src), addr:$dst)], IIC_MOVBE>,
- T8;
+ T8PS;
}
}
@@ -1730,10 +1932,10 @@ let Predicates = [HasMOVBE] in {
let Predicates = [HasRDRAND], Defs = [EFLAGS] in {
def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
"rdrand{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86rdrand))]>, OpSize, TB;
+ [(set GR16:$dst, EFLAGS, (X86rdrand))]>, OpSize16, TB;
def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
"rdrand{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86rdrand))]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86rdrand))]>, OpSize32, TB;
def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
"rdrand{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86rdrand))]>, TB;
@@ -1745,10 +1947,10 @@ let Predicates = [HasRDRAND], Defs = [EFLAGS] in {
let Predicates = [HasRDSEED], Defs = [EFLAGS] in {
def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins),
"rdseed{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize, TB;
+ [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB;
def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins),
"rdseed{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86rdseed))]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB;
def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins),
"rdseed{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB;
@@ -1761,19 +1963,20 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"lzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>, XS,
- OpSize;
+ OpSize16;
def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"lzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctlz (loadi16 addr:$src))),
- (implicit EFLAGS)]>, XS, OpSize;
+ (implicit EFLAGS)]>, XS, OpSize16;
def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"lzcnt{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>, XS;
+ [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>, XS,
+ OpSize32;
def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"lzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctlz (loadi32 addr:$src))),
- (implicit EFLAGS)]>, XS;
+ (implicit EFLAGS)]>, XS, OpSize32;
def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"lzcnt{q}\t{$src, $dst|$dst, $src}",
@@ -1785,6 +1988,46 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
(implicit EFLAGS)]>, XS;
}
+let Predicates = [HasLZCNT] in {
+ def : Pat<(X86cmov (ctlz GR16:$src), (i16 16), (X86_COND_E),
+ (X86cmp GR16:$src, (i16 0))),
+ (LZCNT16rr GR16:$src)>;
+ def : Pat<(X86cmov (ctlz GR32:$src), (i32 32), (X86_COND_E),
+ (X86cmp GR32:$src, (i32 0))),
+ (LZCNT32rr GR32:$src)>;
+ def : Pat<(X86cmov (ctlz GR64:$src), (i64 64), (X86_COND_E),
+ (X86cmp GR64:$src, (i64 0))),
+ (LZCNT64rr GR64:$src)>;
+ def : Pat<(X86cmov (i16 16), (ctlz GR16:$src), (X86_COND_E),
+ (X86cmp GR16:$src, (i16 0))),
+ (LZCNT16rr GR16:$src)>;
+ def : Pat<(X86cmov (i32 32), (ctlz GR32:$src), (X86_COND_E),
+ (X86cmp GR32:$src, (i32 0))),
+ (LZCNT32rr GR32:$src)>;
+ def : Pat<(X86cmov (i64 64), (ctlz GR64:$src), (X86_COND_E),
+ (X86cmp GR64:$src, (i64 0))),
+ (LZCNT64rr GR64:$src)>;
+
+ def : Pat<(X86cmov (ctlz (loadi16 addr:$src)), (i16 16), (X86_COND_E),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
+ (LZCNT16rm addr:$src)>;
+ def : Pat<(X86cmov (ctlz (loadi32 addr:$src)), (i32 32), (X86_COND_E),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
+ (LZCNT32rm addr:$src)>;
+ def : Pat<(X86cmov (ctlz (loadi64 addr:$src)), (i64 64), (X86_COND_E),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
+ (LZCNT64rm addr:$src)>;
+ def : Pat<(X86cmov (i16 16), (ctlz (loadi16 addr:$src)), (X86_COND_E),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
+ (LZCNT16rm addr:$src)>;
+ def : Pat<(X86cmov (i32 32), (ctlz (loadi32 addr:$src)), (X86_COND_E),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
+ (LZCNT32rm addr:$src)>;
+ def : Pat<(X86cmov (i64 64), (ctlz (loadi64 addr:$src)), (X86_COND_E),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
+ (LZCNT64rm addr:$src)>;
+}
+
//===----------------------------------------------------------------------===//
// BMI Instructions
//
@@ -1792,19 +2035,20 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in {
def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"tzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>, XS,
- OpSize;
+ OpSize16;
def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"tzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (cttz (loadi16 addr:$src))),
- (implicit EFLAGS)]>, XS, OpSize;
+ (implicit EFLAGS)]>, XS, OpSize16;
def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"tzcnt{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>, XS;
+ [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>, XS,
+ OpSize32;
def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"tzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (cttz (loadi32 addr:$src))),
- (implicit EFLAGS)]>, XS;
+ (implicit EFLAGS)]>, XS, OpSize32;
def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"tzcnt{q}\t{$src, $dst|$dst, $src}",
@@ -1817,43 +2061,101 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in {
}
multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
- RegisterClass RC, X86MemOperand x86memop, SDNode OpNode,
- PatFrag ld_frag> {
+ RegisterClass RC, X86MemOperand x86memop> {
+let hasSideEffects = 0 in {
def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (OpNode RC:$src)), (implicit EFLAGS)]>, T8, VEX_4V;
+ []>, T8PS, VEX_4V;
+ let mayLoad = 1 in
def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (OpNode (ld_frag addr:$src))), (implicit EFLAGS)]>,
- T8, VEX_4V;
+ []>, T8PS, VEX_4V;
+}
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
- defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem,
- X86blsr, loadi32>;
- defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem,
- X86blsr, loadi64>, VEX_W;
- defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem,
- X86blsmsk, loadi32>;
- defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem,
- X86blsmsk, loadi64>, VEX_W;
- defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem,
- X86blsi, loadi32>;
- defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem,
- X86blsi, loadi64>, VEX_W;
+ defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>;
+ defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W;
+ defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>;
+ defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W;
+ defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>;
+ defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W;
+}
+
+//===----------------------------------------------------------------------===//
+// Pattern fragments to auto generate BMI instructions.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasBMI] in {
+ // FIXME: patterns for the load versions are not implemented
+ def : Pat<(and GR32:$src, (add GR32:$src, -1)),
+ (BLSR32rr GR32:$src)>;
+ def : Pat<(and GR64:$src, (add GR64:$src, -1)),
+ (BLSR64rr GR64:$src)>;
+
+ def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
+ (BLSMSK32rr GR32:$src)>;
+ def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
+ (BLSMSK64rr GR64:$src)>;
+
+ def : Pat<(and GR32:$src, (ineg GR32:$src)),
+ (BLSI32rr GR32:$src)>;
+ def : Pat<(and GR64:$src, (ineg GR64:$src)),
+ (BLSI64rr GR64:$src)>;
+}
+
+let Predicates = [HasBMI] in {
+ def : Pat<(X86cmov (cttz GR16:$src), (i16 16), (X86_COND_E),
+ (X86cmp GR16:$src, (i16 0))),
+ (TZCNT16rr GR16:$src)>;
+ def : Pat<(X86cmov (cttz GR32:$src), (i32 32), (X86_COND_E),
+ (X86cmp GR32:$src, (i32 0))),
+ (TZCNT32rr GR32:$src)>;
+ def : Pat<(X86cmov (cttz GR64:$src), (i64 64), (X86_COND_E),
+ (X86cmp GR64:$src, (i64 0))),
+ (TZCNT64rr GR64:$src)>;
+ def : Pat<(X86cmov (i16 16), (cttz GR16:$src), (X86_COND_E),
+ (X86cmp GR16:$src, (i16 0))),
+ (TZCNT16rr GR16:$src)>;
+ def : Pat<(X86cmov (i32 32), (cttz GR32:$src), (X86_COND_E),
+ (X86cmp GR32:$src, (i32 0))),
+ (TZCNT32rr GR32:$src)>;
+ def : Pat<(X86cmov (i64 64), (cttz GR64:$src), (X86_COND_E),
+ (X86cmp GR64:$src, (i64 0))),
+ (TZCNT64rr GR64:$src)>;
+
+ def : Pat<(X86cmov (cttz (loadi16 addr:$src)), (i16 16), (X86_COND_E),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
+ (TZCNT16rm addr:$src)>;
+ def : Pat<(X86cmov (cttz (loadi32 addr:$src)), (i32 32), (X86_COND_E),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
+ (TZCNT32rm addr:$src)>;
+ def : Pat<(X86cmov (cttz (loadi64 addr:$src)), (i64 64), (X86_COND_E),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
+ (TZCNT64rm addr:$src)>;
+ def : Pat<(X86cmov (i16 16), (cttz (loadi16 addr:$src)), (X86_COND_E),
+ (X86cmp (loadi16 addr:$src), (i16 0))),
+ (TZCNT16rm addr:$src)>;
+ def : Pat<(X86cmov (i32 32), (cttz (loadi32 addr:$src)), (X86_COND_E),
+ (X86cmp (loadi32 addr:$src), (i32 0))),
+ (TZCNT32rm addr:$src)>;
+ def : Pat<(X86cmov (i64 64), (cttz (loadi64 addr:$src)), (X86_COND_E),
+ (X86cmp (loadi64 addr:$src), (i64 0))),
+ (TZCNT64rm addr:$src)>;
}
+
multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
PatFrag ld_frag> {
def rr : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
- T8, VEX_4VOp3;
+ T8PS, VEX_4VOp3;
def rm : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
- (implicit EFLAGS)]>, T8, VEX_4VOp3;
+ (implicit EFLAGS)]>, T8PS, VEX_4VOp3;
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
@@ -1870,18 +2172,38 @@ let Predicates = [HasBMI2], Defs = [EFLAGS] in {
int_x86_bmi_bzhi_64, loadi64>, VEX_W;
}
-def : Pat<(X86bzhi GR32:$src1, GR8:$src2),
- (BZHI32rr GR32:$src1,
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-def : Pat<(X86bzhi (loadi32 addr:$src1), GR8:$src2),
- (BZHI32rm addr:$src1,
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-def : Pat<(X86bzhi GR64:$src1, GR8:$src2),
- (BZHI64rr GR64:$src1,
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
-def : Pat<(X86bzhi (loadi64 addr:$src1), GR8:$src2),
- (BZHI64rm addr:$src1,
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
+
+def CountTrailingOnes : SDNodeXForm<imm, [{
+ // Count the trailing ones in the immediate.
+ return getI8Imm(CountTrailingOnes_64(N->getZExtValue()));
+}]>;
+
+def BZHIMask : ImmLeaf<i64, [{
+ return isMask_64(Imm) && (CountTrailingOnes_64(Imm) > 32);
+}]>;
+
+let Predicates = [HasBMI2] in {
+ def : Pat<(and GR64:$src, BZHIMask:$mask),
+ (BZHI64rr GR64:$src,
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
+
+ def : Pat<(and GR32:$src, (add (shl 1, GR8:$lz), -1)),
+ (BZHI32rr GR32:$src,
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
+
+ def : Pat<(and (loadi32 addr:$src), (add (shl 1, GR8:$lz), -1)),
+ (BZHI32rm addr:$src,
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
+
+ def : Pat<(and GR64:$src, (add (shl 1, GR8:$lz), -1)),
+ (BZHI64rr GR64:$src,
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
+
+ def : Pat<(and (loadi64 addr:$src), (add (shl 1, GR8:$lz), -1)),
+ (BZHI64rm addr:$src,
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
+} // HasBMI2
let Predicates = [HasBMI] in {
def : Pat<(X86bextr GR32:$src1, GR32:$src2),
@@ -1930,17 +2252,18 @@ multiclass tbm_ternary_imm_intr<bits<8> opc, RegisterClass RC, string OpcodeStr,
!strconcat(OpcodeStr,
"\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
[(set RC:$dst, (Int RC:$src1, immoperator:$cntl))]>,
- XOP, XOPA, VEX;
+ XOP, XOPA;
def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src1, immtype:$cntl),
!strconcat(OpcodeStr,
"\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
[(set RC:$dst, (Int (ld_frag addr:$src1), immoperator:$cntl))]>,
- XOP, XOPA, VEX;
+ XOP, XOPA;
}
defm BEXTRI32 : tbm_ternary_imm_intr<0x10, GR32, "bextr", i32mem, loadi32,
int_x86_tbm_bextri_u32, i32imm, imm>;
+let ImmT = Imm32S in
defm BEXTRI64 : tbm_ternary_imm_intr<0x10, GR64, "bextr", i64mem, loadi64,
int_x86_tbm_bextri_u64, i64i32imm,
i64immSExt32>, VEX_W;
@@ -1951,11 +2274,11 @@ multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
let hasSideEffects = 0 in {
def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
!strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
- []>, XOP, XOP9, VEX_4V;
+ []>, XOP_4V, XOP9;
let mayLoad = 1 in
def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
- []>, XOP, XOP9, VEX_4V;
+ []>, XOP_4V, XOP9;
}
}
@@ -2088,6 +2411,7 @@ include "X86InstrCompiler.td"
// Assembler Mnemonic Aliases
//===----------------------------------------------------------------------===//
+def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
@@ -2098,17 +2422,20 @@ def : MnemonicAlias<"cdq", "cltd", "att">;
def : MnemonicAlias<"cdqe", "cltq", "att">;
def : MnemonicAlias<"cqo", "cqto", "att">;
-// lret maps to lretl, it is not ambiguous with lretq.
-def : MnemonicAlias<"lret", "lretl", "att">;
+// In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
+def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
-def : MnemonicAlias<"leavel", "leave", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"loopz", "loope", "att">;
def : MnemonicAlias<"loopnz", "loopne", "att">;
+def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"popfd", "popfl", "att">;
@@ -2116,21 +2443,33 @@ def : MnemonicAlias<"popfd", "popfl", "att">;
// FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
// all modes. However: "push (addr)" and "push $42" should default to
// pushl/pushq depending on the current mode. Similar for "pop %bx"
+def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"pushfd", "pushfl", "att">;
-def : MnemonicAlias<"popad", "popa", "intel">, Requires<[In32BitMode]>;
-def : MnemonicAlias<"pushad", "pusha", "intel">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
+def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
+def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
+
+def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"repe", "rep", "att">;
def : MnemonicAlias<"repz", "rep", "att">;
def : MnemonicAlias<"repnz", "repne", "att">;
-def : MnemonicAlias<"retl", "ret", "att">, Requires<[In32BitMode]>;
-def : MnemonicAlias<"retq", "ret", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"salb", "shlb", "att">;
def : MnemonicAlias<"salw", "shlw", "att">;
@@ -2146,18 +2485,23 @@ def : MnemonicAlias<"ud2a", "ud2", "att">;
def : MnemonicAlias<"verrw", "verr", "att">;
// System instruction aliases.
-def : MnemonicAlias<"iret", "iretl", "att">;
+def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
def : MnemonicAlias<"sysret", "sysretl", "att">;
def : MnemonicAlias<"sysexit", "sysexitl", "att">;
-def : MnemonicAlias<"lgdtl", "lgdt", "att">, Requires<[In32BitMode]>;
-def : MnemonicAlias<"lgdtq", "lgdt", "att">, Requires<[In64BitMode]>;
-def : MnemonicAlias<"lidtl", "lidt", "att">, Requires<[In32BitMode]>;
-def : MnemonicAlias<"lidtq", "lidt", "att">, Requires<[In64BitMode]>;
-def : MnemonicAlias<"sgdtl", "sgdt", "att">, Requires<[In32BitMode]>;
-def : MnemonicAlias<"sgdtq", "sgdt", "att">, Requires<[In64BitMode]>;
-def : MnemonicAlias<"sidtl", "sidt", "att">, Requires<[In32BitMode]>;
-def : MnemonicAlias<"sidtq", "sidt", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
+def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
// Floating point stack aliases.
@@ -2241,6 +2585,42 @@ def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
+// lods aliases. Accept the destination being omitted because it's implicit
+// in the mnemonic, or the mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"lodsb $src", (LODSB srcidx8:$src), 0>;
+def : InstAlias<"lodsw $src", (LODSW srcidx16:$src), 0>;
+def : InstAlias<"lods{l|d} $src", (LODSL srcidx32:$src), 0>;
+def : InstAlias<"lodsq $src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"lods {$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
+def : InstAlias<"lods {$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
+def : InstAlias<"lods {$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
+def : InstAlias<"lods {$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
+
+// stos aliases. Accept the source being omitted because it's implicit in
+// the mnemonic, or the mnemonic suffix being omitted because it's implicit
+// in the source.
+def : InstAlias<"stosb $dst", (STOSB dstidx8:$dst), 0>;
+def : InstAlias<"stosw $dst", (STOSW dstidx16:$dst), 0>;
+def : InstAlias<"stos{l|d} $dst", (STOSL dstidx32:$dst), 0>;
+def : InstAlias<"stosq $dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"stos {%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
+def : InstAlias<"stos {%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
+def : InstAlias<"stos {%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
+def : InstAlias<"stos {%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+
+// scas aliases. Accept the destination being omitted because it's implicit
+// in the mnemonic, or the mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"scasb $dst", (SCASB dstidx8:$dst), 0>;
+def : InstAlias<"scasw $dst", (SCASW dstidx16:$dst), 0>;
+def : InstAlias<"scas{l|d} $dst", (SCASL dstidx32:$dst), 0>;
+def : InstAlias<"scasq $dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"scas {$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
+def : InstAlias<"scas {$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
+def : InstAlias<"scas {$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
+def : InstAlias<"scas {$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+
// div and idiv aliases for explicit A register.
def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
@@ -2325,10 +2705,22 @@ def : InstAlias<"fnstsw" , (FNSTSW16r)>;
// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
// this is compatible with what GAS does.
-def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
-def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
-def : InstAlias<"lcall *$dst", (FARCALL32m opaque48mem:$dst)>;
-def : InstAlias<"ljmp *$dst", (FARJMP32m opaque48mem:$dst)>;
+def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"lcall *$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"ljmp *$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"lcall $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"ljmp $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"lcall *$dst", (FARCALL16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"ljmp *$dst", (FARJMP16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
+
+def : InstAlias<"call *$dst", (CALL64m i16mem:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"jmp *$dst", (JMP64m i16mem:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"call *$dst", (CALL32m i16mem:$dst), 0>, Requires<[In32BitMode]>;
+def : InstAlias<"jmp *$dst", (JMP32m i16mem:$dst), 0>, Requires<[In32BitMode]>;
+def : InstAlias<"call *$dst", (CALL16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
+def : InstAlias<"jmp *$dst", (JMP16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
+
// "imul <imm>, B" is an alias for "imul <imm>, B, B".
def : InstAlias<"imulw $imm, $r", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm)>;
@@ -2348,8 +2740,10 @@ def : InstAlias<"inl\t$port", (IN32ri i8imm:$port), 0>;
// jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
-def : InstAlias<"call $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
-def : InstAlias<"jmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"call $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
+def : InstAlias<"jmp $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
+def : InstAlias<"call $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>;
+def : InstAlias<"jmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>;
def : InstAlias<"callw $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>;
def : InstAlias<"jmpw $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>;
def : InstAlias<"calll $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
@@ -2358,11 +2752,11 @@ def : InstAlias<"jmpl $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
// Force mov without a suffix with a segment and mem to prefer the 'l' form of
// the move. All segment/mem forms are equivalent, this has the shortest
// encoding.
-def : InstAlias<"mov $mem, $seg", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem)>;
-def : InstAlias<"mov $seg, $mem", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg)>;
+def : InstAlias<"mov $mem, $seg", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem), 0>;
+def : InstAlias<"mov $seg, $mem", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg), 0>;
// Match 'movq <largeimm>, <reg>' as an alias for movabsq.
-def : InstAlias<"movq $imm, $reg", (MOV64ri GR64:$reg, i64imm:$imm)>;
+def : InstAlias<"movq $imm, $reg", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
// Match 'movq GR64, MMX' as an alias for movd.
def : InstAlias<"movq $src, $dst",
@@ -2370,10 +2764,6 @@ def : InstAlias<"movq $src, $dst",
def : InstAlias<"movq $src, $dst",
(MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
-// movsd with no operands (as opposed to the SSE scalar move of a double) is an
-// alias for movsl. (as in rep; movsd)
-def : InstAlias<"movsd", (MOVSD), 0>;
-
// movsx aliases
def : InstAlias<"movsx $src, $dst", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>;
def : InstAlias<"movsx $src, $dst", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0>;
@@ -2403,7 +2793,7 @@ def : InstAlias<"outl\t$port", (OUT32ir i8imm:$port), 0>;
// 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
// effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
// errors, since its encoding is the most compact.
-def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>;
+def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
// shld/shrd op,op -> shld op, op, CL
def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
@@ -2449,19 +2839,29 @@ defm : ShiftRotateByOneAlias<"ror", "ROR">;
FIXME */
// test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
-def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}", (TEST8rm GR8 :$val, i8mem :$mem)>;
-def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}", (TEST16rm GR16:$val, i16mem:$mem)>;
-def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}", (TEST32rm GR32:$val, i32mem:$mem)>;
-def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}", (TEST64rm GR64:$val, i64mem:$mem)>;
+def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}",
+ (TEST8rm GR8 :$val, i8mem :$mem), 0>;
+def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}",
+ (TEST16rm GR16:$val, i16mem:$mem), 0>;
+def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}",
+ (TEST32rm GR32:$val, i32mem:$mem), 0>;
+def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}",
+ (TEST64rm GR64:$val, i64mem:$mem), 0>;
// xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
-def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}", (XCHG8rm GR8 :$val, i8mem :$mem)>;
-def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}", (XCHG16rm GR16:$val, i16mem:$mem)>;
-def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}", (XCHG32rm GR32:$val, i32mem:$mem)>;
-def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}", (XCHG64rm GR64:$val, i64mem:$mem)>;
+def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
+ (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
+def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
+ (XCHG16rm GR16:$val, i16mem:$mem), 0>;
+def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
+ (XCHG32rm GR32:$val, i32mem:$mem), 0>;
+def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
+ (XCHG64rm GR64:$val, i64mem:$mem), 0>;
// xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
-def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src)>;
-def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src)>, Requires<[In32BitMode]>;
-def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>;
-def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src)>;
+def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
+def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}",
+ (XCHG32ar GR32:$src), 0>, Requires<[Not64BitMode]>;
+def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}",
+ (XCHG32ar64 GR32_NOAX:$src), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrMMX.td b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
index ba58143..ecf80a1 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
@@ -254,6 +254,11 @@ let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", [],
IIC_MMX_MOVQ_RR>;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
+def MMX_MOVQ64rr_REV : MMXI<0x7F, MRMDestReg, (outs VR64:$dst), (ins VR64:$src),
+ "movq\t{$src, $dst|$dst, $src}", [],
+ IIC_MMX_MOVQ_RR>;
+}
} // SchedRW
let SchedRW = [WriteLoad] in {
@@ -262,11 +267,12 @@ def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (load_mmx addr:$src))],
IIC_MMX_MOVQ_RM>;
+} // SchedRW
+let SchedRW = [WriteStore] in
def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (x86mmx VR64:$src), addr:$dst)],
IIC_MMX_MOVQ_RM>;
-} // SchedRW
let SchedRW = [WriteMove] in {
def MMX_MOVDQ2Qrr : MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
@@ -524,24 +530,24 @@ def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
// -- Conversion Instructions
defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
- MMX_CVT_PS_ITINS, SSEPackedSingle>, TB;
+ MMX_CVT_PS_ITINS, SSEPackedSingle>, PS;
defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
- MMX_CVT_PD_ITINS, SSEPackedDouble>, TB, OpSize;
+ MMX_CVT_PD_ITINS, SSEPackedDouble>, PD;
defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
- MMX_CVT_PS_ITINS, SSEPackedSingle>, TB;
+ MMX_CVT_PS_ITINS, SSEPackedSingle>, PS;
defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
- MMX_CVT_PD_ITINS, SSEPackedDouble>, TB, OpSize;
+ MMX_CVT_PD_ITINS, SSEPackedDouble>, PD;
defm MMX_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
- MMX_CVT_PD_ITINS, SSEPackedDouble>, TB, OpSize;
+ MMX_CVT_PD_ITINS, SSEPackedDouble>, PD;
let Constraints = "$src1 = $dst" in {
defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
int_x86_sse_cvtpi2ps,
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
+ SSEPackedSingle>, PS;
}
// Extract / Insert
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index a5debc0..2bb898e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -120,6 +120,11 @@ def SSE_DIV_ITINS_P : SizeItins<
SSE_DIV_F32P, SSE_DIV_F64P
>;
+let Sched = WriteVecLogic in
+def SSE_VEC_BIT_ITINS_P : OpndItins<
+ IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
+>;
+
def SSE_BIT_ITINS_P : OpndItins<
IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
>;
@@ -171,6 +176,7 @@ def SSE_INSERT_ITINS : OpndItins<
IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
>;
+let Sched = WriteMPSAD in
def SSE_MPSADBW_ITINS : OpndItins<
IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
>;
@@ -179,6 +185,44 @@ def SSE_PMULLD_ITINS : OpndItins<
IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
>;
+// Definitions for backward compatibility.
+// The instructions mapped on these definitions uses a different itinerary
+// than the actual scheduling model.
+let Sched = WriteShuffle in
+def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
+ IIC_ALU_NONMEM, IIC_ALU_MEM
+>;
+
+let Sched = WriteVecIMul in
+def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
+ IIC_ALU_NONMEM, IIC_ALU_MEM
+>;
+
+let Sched = WriteShuffle in
+def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
+ IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
+>;
+
+let Sched = WriteMPSAD in
+def DEFAULT_ITINS_MPSADSCHED : OpndItins<
+ IIC_ALU_NONMEM, IIC_ALU_MEM
+>;
+
+let Sched = WriteFBlend in
+def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
+ IIC_ALU_NONMEM, IIC_ALU_MEM
+>;
+
+let Sched = WriteBlend in
+def DEFAULT_ITINS_BLENDSCHED : OpndItins<
+ IIC_ALU_NONMEM, IIC_ALU_MEM
+>;
+
+let Sched = WriteFBlend in
+def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
+ IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
+>;
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 Instructions Classes
//===----------------------------------------------------------------------===//
@@ -210,6 +254,7 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
Operand memopr, ComplexPattern mem_cpat,
OpndItins itins,
bit Is2Addr = 1> {
+let isCodeGenOnly = 1 in {
def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
@@ -227,6 +272,7 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
RC:$src1, mem_cpat:$src2))], itins.rm>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
+}
/// sse12_fp_packed - SSE 1 & 2 packed instructions class
multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -497,14 +543,14 @@ multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
!strconcat(base_opc, asm_opr),
[(set VR128:$dst, (vt (OpNode VR128:$src1,
(scalar_to_vector RC:$src2))))],
- IIC_SSE_MOV_S_RR>, Sched<[WriteMove]>;
+ IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
// For the disassembler
- let isCodeGenOnly = 1, hasSideEffects = 0 in
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, RC:$src2),
!strconcat(base_opc, asm_opr),
- [], IIC_SSE_MOV_S_RR>, Sched<[WriteMove]>;
+ [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
}
multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
@@ -800,7 +846,7 @@ multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
let neverHasSideEffects = 1 in
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
- Sched<[WriteMove]>;
+ Sched<[WriteFShuffle]>;
let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
@@ -810,41 +856,41 @@ let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
- TB, VEX;
+ PS, VEX;
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
- TB, OpSize, VEX;
+ PD, VEX;
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
- TB, VEX;
+ PS, VEX;
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
- TB, OpSize, VEX;
+ PD, VEX;
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
- TB, VEX, VEX_L;
+ PS, VEX, VEX_L;
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
- TB, OpSize, VEX, VEX_L;
+ PD, VEX, VEX_L;
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
- TB, VEX, VEX_L;
+ PS, VEX, VEX_L;
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
- TB, OpSize, VEX, VEX_L;
+ PD, VEX, VEX_L;
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
- TB;
+ PS;
defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
- TB, OpSize;
+ PD;
defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
- TB;
+ PS;
defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
- TB, OpSize;
+ PD;
let SchedRW = [WriteStore] in {
def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
@@ -882,7 +928,8 @@ def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
} // SchedRW
// For disassembler
-let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
+ SchedRW = [WriteFShuffle] in {
def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", [],
@@ -958,7 +1005,8 @@ def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
} // SchedRW
// For disassembler
-let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
+ SchedRW = [WriteMove] in {
def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", [],
IIC_SSE_MOVA_P_RR>;
@@ -1138,16 +1186,16 @@ multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
[(set VR128:$dst,
(psnode VR128:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
- itin, SSEPackedSingle>, TB,
- Sched<[WriteShuffleLd, ReadAfterLd]>;
+ itin, SSEPackedSingle>, PS,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
def PDrm : PI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
!strconcat(base_opc, "d", asm_opr),
[(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)))))],
- itin, SSEPackedDouble>, TB, OpSize,
- Sched<[WriteShuffleLd, ReadAfterLd]>;
+ itin, SSEPackedDouble>, PD,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
}
@@ -1345,14 +1393,14 @@ let AddedComplexity = 20, Predicates = [UseAVX] in {
[(set VR128:$dst,
(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
IIC_SSE_MOV_LH>,
- VEX_4V, Sched<[WriteShuffle]>;
+ VEX_4V, Sched<[WriteFShuffle]>;
def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
IIC_SSE_MOV_LH>,
- VEX_4V, Sched<[WriteShuffle]>;
+ VEX_4V, Sched<[WriteFShuffle]>;
}
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
@@ -1360,13 +1408,13 @@ let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
"movlhps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>;
+ IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>;
+ IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
}
let Predicates = [UseAVX] in {
@@ -1513,9 +1561,9 @@ defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
let Predicates = [UseAVX] in {
def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
- (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src)>;
+ (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
- (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
+ (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
(VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
@@ -1579,9 +1627,9 @@ def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
(CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
- (CVTSI2SSrm FR64:$dst, i32mem:$src)>;
+ (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
- (CVTSI2SDrm FR64:$dst, i32mem:$src)>;
+ (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
// Conversion Instructions Intrinsics - Match intrinsics which expect MM
// and/or XMM operand(s).
@@ -1632,40 +1680,43 @@ defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
-let Predicates = [UseAVX] in {
-defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
- int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
- SSE_CVT_Scalar, 0>, XS, VEX_4V;
-defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
- SSE_CVT_Scalar, 0>, XS, VEX_4V,
- VEX_W;
-defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
- int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
- SSE_CVT_Scalar, 0>, XD, VEX_4V;
-defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
- SSE_CVT_Scalar, 0>, XD,
- VEX_4V, VEX_W;
-}
-let Constraints = "$src1 = $dst" in {
- defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
- int_x86_sse_cvtsi2ss, i32mem, loadi32,
- "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
- defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse_cvtsi642ss, i64mem, loadi64,
- "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
- defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
- int_x86_sse2_cvtsi2sd, i32mem, loadi32,
- "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
- defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse2_cvtsi642sd, i64mem, loadi64,
- "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
-}
+let isCodeGenOnly = 1 in {
+ let Predicates = [UseAVX] in {
+ defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
+ SSE_CVT_Scalar, 0>, XS, VEX_4V;
+ defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
+ SSE_CVT_Scalar, 0>, XS, VEX_4V,
+ VEX_W;
+ defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
+ SSE_CVT_Scalar, 0>, XD, VEX_4V;
+ defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
+ SSE_CVT_Scalar, 0>, XD,
+ VEX_4V, VEX_W;
+ }
+ let Constraints = "$src1 = $dst" in {
+ defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32,
+ "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
+ defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64,
+ "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
+ defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32,
+ "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
+ defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64,
+ "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
+ }
+} // isCodeGenOnly = 1
/// SSE 1 Only
// Aliases for intrinsics
+let isCodeGenOnly = 1 in {
let Predicates = [UseAVX] in {
defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
ssmem, sse_load_f32, "cvttss2si",
@@ -1694,6 +1745,7 @@ defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
"cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
+} // isCodeGenOnly = 1
let Predicates = [UseAVX] in {
defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
@@ -1713,16 +1765,16 @@ defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, SSE_CVT_PS>,
- TB, VEX, Requires<[HasAVX]>;
+ PS, VEX, Requires<[HasAVX]>;
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, SSE_CVT_PS>,
- TB, VEX, VEX_L, Requires<[HasAVX]>;
+ PS, VEX, VEX_L, Requires<[HasAVX]>;
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, SSE_CVT_PS>,
- TB, Requires<[UseSSE2]>;
+ PS, Requires<[UseSSE2]>;
let Predicates = [UseAVX] in {
def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
@@ -1792,6 +1844,7 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
XD,
Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
+let isCodeGenOnly = 1 in {
def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -1823,6 +1876,7 @@ def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
}
+} // isCodeGenOnly = 1
// Convert scalar single to scalar double
// SSE2 instructions with XS prefix
@@ -1875,6 +1929,7 @@ def : Pat<(fextend (loadf32 addr:$src)),
def : Pat<(extloadf32 addr:$src),
(CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
+let isCodeGenOnly = 1 in {
def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -1905,6 +1960,7 @@ def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
}
+} // isCodeGenOnly = 1
// Convert packed single/double fp to doubleword
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
@@ -1949,7 +2005,7 @@ def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// XMM only
def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
- (VCVTPD2DQrr VR128:$dst, VR128:$src)>;
+ (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -1968,7 +2024,7 @@ def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
(int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
- (VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
+ (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
}
def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
@@ -2071,7 +2127,7 @@ def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// XMM only
def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
- (VCVTTPD2DQrr VR128:$dst, VR128:$src)>;
+ (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dqx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
@@ -2090,7 +2146,7 @@ def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
(int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
- (VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
+ (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
@@ -2116,32 +2172,32 @@ let Predicates = [HasAVX] in {
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>, TB, VEX, Sched<[WriteCvtF2F]>;
+ IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, TB, VEX, Sched<[WriteCvtF2FLd]>;
+ IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
- IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L, Sched<[WriteCvtF2F]>;
+ IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
+ IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
}
let Predicates = [UseSSE2] in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>, TB, Sched<[WriteCvtF2F]>;
+ IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, TB, Sched<[WriteCvtF2FLd]>;
+ IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
}
// Convert Packed DW Integers to Packed Double FP
@@ -2196,7 +2252,7 @@ def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// XMM only
def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
- (VCVTPD2PSrr VR128:$dst, VR128:$src)>;
+ (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2psx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -2215,7 +2271,7 @@ def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
(int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
- (VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
+ (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
@@ -2287,7 +2343,7 @@ multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
// Accept explicit immediate argument form instead of comparison code.
- let neverHasSideEffects = 1 in {
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
@@ -2299,23 +2355,23 @@ multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
}
}
-defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmpss, f32, loadf32,
+defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
"cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SSE_ALU_F32S>,
XS, VEX_4V, VEX_LIG;
-defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmpsd, f64, loadf64,
+defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
"cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SSE_ALU_F32S>, // same latency as 32 bit compare
XD, VEX_4V, VEX_LIG;
let Constraints = "$src1 = $dst" in {
- defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmpss, f32, loadf32,
+ defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
"cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
"cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
XS;
- defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmpsd, f64, loadf64,
+ defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
"cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
"cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
SSE_ALU_F64S>,
@@ -2338,23 +2394,25 @@ multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
-// Aliases to match intrinsics which expect XMM operand(s).
-defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
- "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
- SSE_ALU_F32S>,
- XS, VEX_4V;
-defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
- "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
- SSE_ALU_F32S>, // same latency as f32
- XD, VEX_4V;
-let Constraints = "$src1 = $dst" in {
- defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
- "cmp${cc}ss\t{$src, $dst|$dst, $src}",
- SSE_ALU_F32S>, XS;
- defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
- "cmp${cc}sd\t{$src, $dst|$dst, $src}",
- SSE_ALU_F64S>,
- XD;
+let isCodeGenOnly = 1 in {
+ // Aliases to match intrinsics which expect XMM operand(s).
+ defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
+ SSE_ALU_F32S>,
+ XS, VEX_4V;
+ defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ SSE_ALU_F32S>, // same latency as f32
+ XD, VEX_4V;
+ let Constraints = "$src1 = $dst" in {
+ defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $dst|$dst, $src}",
+ SSE_ALU_F32S>, XS;
+ defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $dst|$dst, $src}",
+ SSE_ALU_F64S>,
+ XD;
+}
}
@@ -2377,46 +2435,50 @@ multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
let Defs = [EFLAGS] in {
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
- "ucomiss">, TB, VEX, VEX_LIG;
+ "ucomiss">, PS, VEX, VEX_LIG;
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
- "ucomisd">, TB, OpSize, VEX, VEX_LIG;
+ "ucomisd">, PD, VEX, VEX_LIG;
let Pattern = []<dag> in {
defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
- "comiss">, TB, VEX, VEX_LIG;
+ "comiss">, PS, VEX, VEX_LIG;
defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
- "comisd">, TB, OpSize, VEX, VEX_LIG;
+ "comisd">, PD, VEX, VEX_LIG;
}
- defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
- load, "ucomiss">, TB, VEX;
- defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
- load, "ucomisd">, TB, OpSize, VEX;
+ let isCodeGenOnly = 1 in {
+ defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
+ load, "ucomiss">, PS, VEX;
+ defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
+ load, "ucomisd">, PD, VEX;
- defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
- load, "comiss">, TB, VEX;
- defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
- load, "comisd">, TB, OpSize, VEX;
+ defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
+ load, "comiss">, PS, VEX;
+ defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
+ load, "comisd">, PD, VEX;
+ }
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
- "ucomiss">, TB;
+ "ucomiss">, PS;
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
- "ucomisd">, TB, OpSize;
+ "ucomisd">, PD;
let Pattern = []<dag> in {
defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
- "comiss">, TB;
+ "comiss">, PS;
defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
- "comisd">, TB, OpSize;
+ "comisd">, PD;
}
- defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
- load, "ucomiss">, TB;
- defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
- load, "ucomisd">, TB, OpSize;
+ let isCodeGenOnly = 1 in {
+ defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
+ load, "ucomiss">, PS;
+ defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
+ load, "ucomisd">, PD;
- defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
- "comiss">, TB;
- defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
- "comisd">, TB, OpSize;
+ defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
+ "comiss">, PS;
+ defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
+ "comisd">, PD;
+ }
} // Defs = [EFLAGS]
// sse12_cmp_packed - sse 1 & 2 compare packed instructions
@@ -2436,7 +2498,7 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
Sched<[WriteFAddLd, ReadAfterLd]>;
// Accept explicit immediate argument form instead of comparison code.
- let neverHasSideEffects = 1 in {
+ let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rri_alt : PIi8<0xC2, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
@@ -2450,28 +2512,28 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedSingle>, TB, VEX_4V;
+ SSEPackedSingle>, PS, VEX_4V;
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
+ SSEPackedDouble>, PD, VEX_4V;
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedSingle>, TB, VEX_4V, VEX_L;
+ SSEPackedSingle>, PS, VEX_4V, VEX_L;
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
+ SSEPackedDouble>, PD, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSEPackedSingle, SSE_ALU_F32P>, TB;
+ SSEPackedSingle, SSE_ALU_F32P>, PS;
defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SSEPackedDouble, SSE_ALU_F64P>, TB, OpSize;
+ SSEPackedDouble, SSE_ALU_F64P>, PD;
}
let Predicates = [HasAVX] in {
@@ -2512,7 +2574,7 @@ def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
// SSE 1 & 2 - Shuffle Instructions
//===----------------------------------------------------------------------===//
-/// sse12_shuffle - sse 1 & 2 shuffle instructions
+/// sse12_shuffle - sse 1 & 2 fp shuffle instructions
multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
ValueType vt, string asm, PatFrag mem_frag,
Domain d, bit IsConvertibleToThreeAddress = 0> {
@@ -2520,37 +2582,35 @@ multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
(ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
[(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
(i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
- Sched<[WriteShuffleLd, ReadAfterLd]>;
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, i8imm:$src3), asm,
[(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
(i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
- Sched<[WriteShuffle]>;
+ Sched<[WriteFShuffle]>;
}
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- loadv4f32, SSEPackedSingle>, TB, VEX_4V;
+ loadv4f32, SSEPackedSingle>, PS, VEX_4V;
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- loadv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
+ loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- loadv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
+ loadv2f64, SSEPackedDouble>, PD, VEX_4V;
defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- loadv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
+ loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
- TB;
+ memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>, PS;
defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>,
- TB, OpSize;
+ memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>, PD;
}
let Predicates = [HasAVX] in {
@@ -2598,10 +2658,10 @@ let Predicates = [UseSSE2] in {
}
//===----------------------------------------------------------------------===//
-// SSE 1 & 2 - Unpack Instructions
+// SSE 1 & 2 - Unpack FP Instructions
//===----------------------------------------------------------------------===//
-/// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
+/// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
PatFrag mem_frag, RegisterClass RC,
X86MemOperand x86memop, string asm,
@@ -2610,55 +2670,55 @@ multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
(outs RC:$dst), (ins RC:$src1, RC:$src2),
asm, [(set RC:$dst,
(vt (OpNode RC:$src1, RC:$src2)))],
- IIC_SSE_UNPCK, d>, Sched<[WriteShuffle]>;
+ IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
def rm : PI<opc, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2),
asm, [(set RC:$dst,
(vt (OpNode RC:$src1,
(mem_frag addr:$src2))))],
IIC_SSE_UNPCK, d>,
- Sched<[WriteShuffleLd, ReadAfterLd]>;
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
}
defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
+ SSEPackedSingle>, PS, VEX_4V;
defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
+ SSEPackedDouble>, PD, VEX_4V;
defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
+ SSEPackedSingle>, PS, VEX_4V;
defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
+ SSEPackedDouble>, PD, VEX_4V;
defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V, VEX_L;
+ SSEPackedSingle>, PS, VEX_4V, VEX_L;
defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
+ SSEPackedDouble>, PD, VEX_4V, VEX_L;
defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V, VEX_L;
+ SSEPackedSingle>, PS, VEX_4V, VEX_L;
defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
+ SSEPackedDouble>, PD, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
+ SSEPackedSingle>, PS;
defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
- SSEPackedDouble>, TB, OpSize;
+ SSEPackedDouble>, PD;
defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
+ SSEPackedSingle>, PS;
defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
- SSEPackedDouble>, TB, OpSize;
+ SSEPackedDouble>, PD;
} // Constraints = "$src1 = $dst"
let Predicates = [HasAVX1Only] in {
@@ -2714,16 +2774,15 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
let Predicates = [HasAVX] in {
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
- "movmskps", SSEPackedSingle>, TB, VEX;
+ "movmskps", SSEPackedSingle>, PS, VEX;
defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
- "movmskpd", SSEPackedDouble>, TB,
- OpSize, VEX;
+ "movmskpd", SSEPackedDouble>, PD, VEX;
defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
- "movmskps", SSEPackedSingle>, TB,
+ "movmskps", SSEPackedSingle>, PS,
VEX, VEX_L;
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
- "movmskpd", SSEPackedDouble>, TB,
- OpSize, VEX, VEX_L;
+ "movmskpd", SSEPackedDouble>, PD,
+ VEX, VEX_L;
def : Pat<(i32 (X86fgetsign FR32:$src)),
(VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
@@ -2738,9 +2797,9 @@ let Predicates = [HasAVX] in {
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
- SSEPackedSingle>, TB;
+ SSEPackedSingle>, PS;
defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
- SSEPackedDouble>, TB, OpSize;
+ SSEPackedDouble>, PD;
def : Pat<(i32 (X86fgetsign FR32:$src)),
(MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
@@ -2807,11 +2866,14 @@ let Predicates = [HasAVX2] in
// These are ordered here for pattern ordering requirements with the fp versions
-defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64, SSE_BIT_ITINS_P, 1>;
-defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64, SSE_BIT_ITINS_P, 1>;
-defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64, SSE_BIT_ITINS_P, 1>;
+defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
+ SSE_VEC_BIT_ITINS_P, 1>;
+defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
+ SSE_VEC_BIT_ITINS_P, 1>;
+defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
+ SSE_VEC_BIT_ITINS_P, 1>;
defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
- SSE_BIT_ITINS_P, 0>;
+ SSE_VEC_BIT_ITINS_P, 0>;
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Logical Instructions
@@ -2823,20 +2885,20 @@ multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
SDNode OpNode, OpndItins itins> {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
- TB, VEX_4V;
+ PS, VEX_4V;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
- TB, OpSize, VEX_4V;
+ PD, VEX_4V;
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
- TB;
+ PS;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
- TB, OpSize;
+ PD;
}
}
@@ -2862,7 +2924,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "ps"), f256mem,
[(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
[(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
- (loadv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
+ (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f256mem,
@@ -2870,7 +2932,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
(bc_v4i64 (v4f64 VR256:$src2))))],
[(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
(loadv4i64 addr:$src2)))], 0>,
- TB, OpSize, VEX_4V, VEX_L;
+ PD, VEX_4V, VEX_L;
// In AVX no need to add a pattern for 128-bit logical rr ps, because they
// are all promoted to v2i64, and the patterns are covered by the int
@@ -2879,7 +2941,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, [],
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
- (loadv2i64 addr:$src2)))], 0>, TB, VEX_4V;
+ (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem,
@@ -2887,21 +2949,21 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
(bc_v2i64 (v2f64 VR128:$src2))))],
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
(loadv2i64 addr:$src2)))], 0>,
- TB, OpSize, VEX_4V;
+ PD, VEX_4V;
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem,
[(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>, TB;
+ (memopv2i64 addr:$src2)))]>, PS;
defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem,
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
(bc_v2i64 (v2f64 VR128:$src2))))],
[(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>, TB, OpSize;
+ (memopv2i64 addr:$src2)))]>, PD;
}
}
@@ -2911,6 +2973,19 @@ defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
let isCommutable = 0 in
defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
+// AVX1 requires type coercions in order to fold loads directly into logical
+// operations.
+let Predicates = [HasAVX1Only] in {
+ def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
+ (VANDPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
+ (VORPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
+ (VXORPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
+ (VANDNPSYrm VR256:$src1, addr:$src2)>;
+}
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Arithmetic Instructions
//===----------------------------------------------------------------------===//
@@ -2932,25 +3007,25 @@ multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
SDNode OpNode, SizeItins itins> {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
VR128, v4f32, f128mem, loadv4f32,
- SSEPackedSingle, itins.s, 0>, TB, VEX_4V;
+ SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
VR128, v2f64, f128mem, loadv2f64,
- SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_4V;
+ SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
OpNode, VR256, v8f32, f256mem, loadv8f32,
- SSEPackedSingle, itins.s, 0>, TB, VEX_4V, VEX_L;
+ SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
OpNode, VR256, v4f64, f256mem, loadv4f64,
- SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_4V, VEX_L;
+ SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
v4f32, f128mem, memopv4f32, SSEPackedSingle,
- itins.s>, TB;
+ itins.s>, PS;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
v2f64, f128mem, memopv2f64, SSEPackedDouble,
- itins.d>, TB, OpSize;
+ itins.d>, PD;
}
}
@@ -3017,6 +3092,214 @@ let isCodeGenOnly = 1 in {
basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
}
+// Patterns used to select SSE scalar fp arithmetic instructions from
+// a scalar fp operation followed by a blend.
+//
+// These patterns know, for example, how to select an ADDSS from a
+// float add plus vector insert.
+//
+// The effect is that the backend no longer emits unnecessary vector
+// insert instructions immediately after SSE scalar fp instructions
+// like addss or mulss.
+//
+// For example, given the following code:
+// __m128 foo(__m128 A, __m128 B) {
+// A[0] += B[0];
+// return A;
+// }
+//
+// previously we generated:
+// addss %xmm0, %xmm1
+// movss %xmm1, %xmm0
+//
+// we now generate:
+// addss %xmm1, %xmm0
+
+let Predicates = [UseSSE1] in {
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
+ (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))))),
+ (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
+ (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))))),
+ (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
+ (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))))),
+ (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
+ (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))))),
+ (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+}
+
+let Predicates = [UseSSE2] in {
+ // SSE2 patterns to select scalar double-precision fp arithmetic instructions
+
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+}
+
+let Predicates = [UseSSE41] in {
+ // If the subtarget has SSE4.1 but not AVX, the vector insert
+ // instruction is lowered into a X86insertps rather than a X86Movss.
+ // When selecting SSE scalar single-precision fp arithmetic instructions,
+ // make sure that we correctly match the X86insertps.
+
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fadd (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fsub (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fmul (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fdiv (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+}
+
+let Predicates = [HasAVX] in {
+ // The following patterns select AVX Scalar single/double precision fp
+ // arithmetic instructions.
+
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
+ (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
+ FR64:$src))))),
+ (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fadd (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (VADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fsub (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (VSUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fmul (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (VMULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
+ (fdiv (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
+ FR32:$src))), (iPTR 0))),
+ (VDIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+}
+
+// Patterns used to select SSE scalar fp arithmetic instructions from
+// a vector packed single/double fp operation followed by a vector insert.
+//
+// The effect is that the backend converts the packed fp instruction
+// followed by a vector insert into a single SSE scalar fp instruction.
+//
+// For example, given the following code:
+// __m128 foo(__m128 A, __m128 B) {
+// __m128 C = A + B;
+// return (__m128) {c[0], a[1], a[2], a[3]};
+// }
+//
+// previously we generated:
+// addps %xmm0, %xmm1
+// movss %xmm1, %xmm0
+//
+// we now generate:
+// addss %xmm1, %xmm0
+
+let Predicates = [UseSSE1] in {
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (ADDSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (SUBSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (MULSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (DIVSSrr_Int v4f32:$dst, v4f32:$src)>;
+}
+
+let Predicates = [UseSSE2] in {
+ // SSE2 patterns to select scalar double-precision fp arithmetic instructions
+ // from a packed double-precision fp instruction plus movsd.
+
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
+}
+
+let Predicates = [HasAVX] in {
+ // The following patterns select AVX Scalar single/double precision fp
+ // arithmetic instructions from a packed single precision fp instruction
+ // plus movss/movsd.
+
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (VADDSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (VSUBSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (VMULSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
+ (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
+ (VDIVSSrr_Int v4f32:$dst, v4f32:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
+ (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
+ (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
+}
+
/// Unop Arithmetic
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
@@ -3069,6 +3352,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, VEX_4V, VEX_LIG,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ let isCodeGenOnly = 1 in
def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, ssmem:$src2),
!strconcat("v", OpcodeStr,
@@ -3089,6 +3373,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
[(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
+let isCodeGenOnly = 1 in {
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (F32Int VR128:$src))], itins.rr>,
@@ -3098,6 +3383,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
[(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>,
Sched<[itins.Sched.Folded]>;
}
+}
/// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -3115,6 +3401,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, VEX_4V, VEX_LIG,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ let isCodeGenOnly = 1 in
def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, ssmem:$src2),
!strconcat("v", OpcodeStr,
@@ -3135,7 +3422,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
[(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
- let Constraints = "$src1 = $dst" in {
+ let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
@@ -3188,6 +3475,7 @@ let Predicates = [HasAVX] in {
multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
Intrinsic V4F32Int, Intrinsic V8F32Int,
OpndItins itins> {
+let isCodeGenOnly = 1 in {
let Predicates = [HasAVX] in {
def V#NAME#PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat("v", OpcodeStr,
@@ -3220,6 +3508,7 @@ let Predicates = [HasAVX] in {
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
itins.rm>, Sched<[itins.Sched.Folded]>;
+} // isCodeGenOnly = 1
}
/// sse2_fp_unop_s - SSE2 unops in scalar form.
@@ -3238,6 +3527,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, VEX_4V, VEX_LIG,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
+ let isCodeGenOnly = 1 in
def V#NAME#SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, sdmem:$src2),
!strconcat("v", OpcodeStr,
@@ -3256,6 +3546,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
[(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
Requires<[UseSSE2, OptForSize]>, Sched<[itins.Sched.Folded]>;
+let isCodeGenOnly = 1 in {
def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (F64Int VR128:$src))], itins.rr>,
@@ -3265,6 +3556,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
[(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>,
Sched<[itins.Sched.Folded]>;
}
+}
/// sse2_fp_unop_p - SSE2 unops in vector forms.
multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
@@ -3455,19 +3747,14 @@ def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movnti{l}\t{$src, $dst|$dst, $src}",
[(nontemporalstore (i32 GR32:$src), addr:$dst)],
IIC_SSE_MOVNT>,
- TB, Requires<[HasSSE2]>;
+ PS, Requires<[HasSSE2]>;
def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movnti{q}\t{$src, $dst|$dst, $src}",
[(nontemporalstore (i64 GR64:$src), addr:$dst)],
IIC_SSE_MOVNT>,
- TB, Requires<[HasSSE2]>;
+ PS, Requires<[HasSSE2]>;
} // SchedRW = [WriteStore]
-def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
- (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
-
-def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[UseSSE2]>;
} // AddedComplexity
//===----------------------------------------------------------------------===//
@@ -3490,17 +3777,23 @@ def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
IIC_SSE_PREFETCH>, TB;
}
-// FIXME: How should these memory instructions be modeled?
+// FIXME: How should flush instruction be modeled?
let SchedRW = [WriteLoad] in {
// Flush cache
def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
"clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
+}
+let SchedRW = [WriteNop] in {
// Pause. This "instruction" is encoded as "rep; nop", so even though it
// was introduced with SSE2, it's backward compatible.
-def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", [], IIC_SSE_PAUSE>, REP;
+def PAUSE : I<0x90, RawFrm, (outs), (ins),
+ "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
+ OBXS, Requires<[HasSSE2]>;
+}
+let SchedRW = [WriteFence] in {
// Load, store, and memory fence
def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
"sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
@@ -3557,7 +3850,8 @@ def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
}
// For Disassembler
-let isCodeGenOnly = 1, hasSideEffects = 0, SchedRW = [WriteMove] in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
+ SchedRW = [WriteMove] in {
def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", [],
IIC_SSE_MOVA_P_RR>,
@@ -3621,7 +3915,7 @@ def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
[], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
// For Disassembler
-let isCodeGenOnly = 1, hasSideEffects = 0 in {
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", [],
IIC_SSE_MOVA_P_RR>;
@@ -3787,6 +4081,10 @@ defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
SSE_INTALUQ_ITINS_P, 1>;
defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
SSE_INTMUL_ITINS_P, 1>;
+defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
+ SSE_INTMUL_ITINS_P, 1>;
+defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
+ SSE_INTMUL_ITINS_P, 1>;
defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
SSE_INTALU_ITINS_P, 0>;
defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
@@ -3821,10 +4119,6 @@ defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
-defm PMULHUW : PDI_binop_all_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w,
- int_x86_avx2_pmulhu_w, SSE_INTMUL_ITINS_P, 1>;
-defm PMULHW : PDI_binop_all_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w,
- int_x86_avx2_pmulh_w, SSE_INTMUL_ITINS_P, 1>;
defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
defm PAVGB : PDI_binop_all_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
@@ -4043,17 +4337,6 @@ defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
SSE_INTALU_ITINS_P, 0>;
//===---------------------------------------------------------------------===//
-// SSE2 - Packed Integer Pack Instructions
-//===---------------------------------------------------------------------===//
-
-defm PACKSSWB : PDI_binop_all_int<0x63, "packsswb", int_x86_sse2_packsswb_128,
- int_x86_avx2_packsswb, SSE_INTALU_ITINS_P, 0>;
-defm PACKSSDW : PDI_binop_all_int<0x6B, "packssdw", int_x86_sse2_packssdw_128,
- int_x86_avx2_packssdw, SSE_INTALU_ITINS_P, 0>;
-defm PACKUSWB : PDI_binop_all_int<0x67, "packuswb", int_x86_sse2_packuswb_128,
- int_x86_avx2_packuswb, SSE_INTALU_ITINS_P, 0>;
-
-//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Shuffle Instructions
//===---------------------------------------------------------------------===//
@@ -4111,12 +4394,12 @@ let Predicates = [UseSSE2] in {
[(set VR128:$dst,
(vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
(i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
- Sched<[WriteShuffleLd]>;
+ Sched<[WriteShuffleLd, ReadAfterLd]>;
}
}
} // ExeDomain = SSEPackedInt
-defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, TB, OpSize;
+defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
@@ -4135,6 +4418,136 @@ let Predicates = [UseSSE2] in {
}
//===---------------------------------------------------------------------===//
+// Packed Integer Pack Instructions (SSE & AVX)
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in {
+multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
+ ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
+ bit Is2Addr = 1> {
+ def rr : PDI<opc, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
+ Sched<[WriteShuffle]>;
+ def rm : PDI<opc, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (OutVT (OpNode VR128:$src1,
+ (bc_frag (memopv2i64 addr:$src2)))))]>,
+ Sched<[WriteShuffleLd, ReadAfterLd]>;
+}
+
+multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
+ ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
+ def Yrr : PDI<opc, MRMSrcReg,
+ (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
+ Sched<[WriteShuffle]>;
+ def Yrm : PDI<opc, MRMSrcMem,
+ (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OutVT (OpNode VR256:$src1,
+ (bc_frag (memopv4i64 addr:$src2)))))]>,
+ Sched<[WriteShuffleLd, ReadAfterLd]>;
+}
+
+multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
+ ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
+ bit Is2Addr = 1> {
+ def rr : SS48I<opc, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
+ Sched<[WriteShuffle]>;
+ def rm : SS48I<opc, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (OutVT (OpNode VR128:$src1,
+ (bc_frag (memopv2i64 addr:$src2)))))]>,
+ Sched<[WriteShuffleLd, ReadAfterLd]>;
+}
+
+multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
+ ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
+ def Yrr : SS48I<opc, MRMSrcReg,
+ (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
+ Sched<[WriteShuffle]>;
+ def Yrm : SS48I<opc, MRMSrcMem,
+ (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OutVT (OpNode VR256:$src1,
+ (bc_frag (memopv4i64 addr:$src2)))))]>,
+ Sched<[WriteShuffleLd, ReadAfterLd]>;
+}
+
+let Predicates = [HasAVX] in {
+ defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
+ bc_v8i16, 0>, VEX_4V;
+ defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
+ bc_v4i32, 0>, VEX_4V;
+
+ defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
+ bc_v8i16, 0>, VEX_4V;
+ defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
+ bc_v4i32, 0>, VEX_4V;
+}
+
+let Predicates = [HasAVX2] in {
+ defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
+ bc_v16i16>, VEX_4V, VEX_L;
+ defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
+ bc_v8i32>, VEX_4V, VEX_L;
+
+ defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
+ bc_v16i16>, VEX_4V, VEX_L;
+ defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
+ bc_v8i32>, VEX_4V, VEX_L;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
+ bc_v8i16>;
+ defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
+ bc_v4i32>;
+
+ defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
+ bc_v8i16>;
+
+ let Predicates = [HasSSE41] in
+ defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
+ bc_v4i32>;
+}
+} // ExeDomain = SSEPackedInt
+
+//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Unpack Instructions
//===---------------------------------------------------------------------===//
@@ -4269,7 +4682,7 @@ def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
- imm:$src2))]>, TB, OpSize, VEX,
+ imm:$src2))]>, PD, VEX,
Sched<[WriteShuffle]>;
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
@@ -4280,10 +4693,10 @@ def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
// Insert
let Predicates = [HasAVX] in
-defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
+defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
-defm PINSRW : sse2_pinsrw, TB, OpSize;
+defm PINSRW : sse2_pinsrw, PD;
} // ExeDomain = SSEPackedInt
@@ -4320,7 +4733,7 @@ def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
-let Uses = [EDI], Predicates = [HasAVX,In32BitMode] in
+let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
@@ -4333,7 +4746,7 @@ def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
IIC_SSE_MASKMOV>, VEX;
-let Uses = [EDI], Predicates = [UseSSE2,In32BitMode] in
+let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
@@ -4434,7 +4847,7 @@ def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
- VEX, Sched<[WriteLoad]>;
+ VEX, Sched<[WriteStore]>;
def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
@@ -4444,7 +4857,7 @@ def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)],
- IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+ IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
@@ -4638,17 +5051,24 @@ def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
IIC_SSE_MOVDQ>;
} // SchedRW
+// For disassembler only
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
+ SchedRW = [WriteVecLogic] in {
+def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
+def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
+}
+
//===---------------------------------------------------------------------===//
// Store / copy lower 64-bits of a XMM register.
//
-def VMOVLQ128mr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX,
- Sched<[WriteStore]>;
-def MOVLQ128mr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)],
- IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+let Predicates = [UseAVX] in
+def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
+ (VMOVPQI2QImr addr:$dst, VR128:$src)>;
+let Predicates = [UseSSE2] in
+def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
+ (MOVPQI2QImr addr:$dst, VR128:$src)>;
let isCodeGenOnly = 1, AddedComplexity = 20 in {
def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
@@ -4745,11 +5165,11 @@ multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (vt (OpNode RC:$src)))],
- IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>;
+ IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (OpNode (mem_frag addr:$src)))],
- IIC_SSE_MOV_LH>, Sched<[WriteShuffleLd]>;
+ IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
}
let Predicates = [HasAVX] in {
@@ -4805,13 +5225,13 @@ multiclass sse3_replicate_dfp<string OpcodeStr> {
let neverHasSideEffects = 1 in
def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [], IIC_SSE_MOV_LH>, Sched<[WriteShuffle]>;
+ [], IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(v2f64 (X86Movddup
(scalar_to_vector (loadf64 addr:$src)))))],
- IIC_SSE_MOV_LH>, Sched<[WriteShuffleLd]>;
+ IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
}
// FIXME: Merge with above classe when there're patterns for the ymm version
@@ -4819,13 +5239,13 @@ multiclass sse3_replicate_dfp_y<string OpcodeStr> {
def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
- Sched<[WriteShuffle]>;
+ Sched<[WriteFShuffle]>;
def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
(v4f64 (X86Movddup
(scalar_to_vector (loadf64 addr:$src)))))]>,
- Sched<[WriteShuffleLd]>;
+ Sched<[WriteLoad]>;
}
let Predicates = [HasAVX] in {
@@ -4915,24 +5335,78 @@ multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
- f128mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
+ f128mem, SSE_ALU_F32P, 0>, XD, VEX_4V;
defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
- f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V, VEX_L;
+ f256mem, SSE_ALU_F32P, 0>, XD, VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
- f128mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
+ f128mem, SSE_ALU_F64P, 0>, PD, VEX_4V;
defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
- f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V, VEX_L;
+ f256mem, SSE_ALU_F64P, 0>, PD, VEX_4V, VEX_L;
}
}
let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
let ExeDomain = SSEPackedSingle in
defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
- f128mem, SSE_ALU_F32P>, TB, XD;
+ f128mem, SSE_ALU_F32P>, XD;
let ExeDomain = SSEPackedDouble in
defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
- f128mem, SSE_ALU_F64P>, TB, OpSize;
+ f128mem, SSE_ALU_F64P>, PD;
+}
+
+// Patterns used to select 'addsub' instructions.
+let Predicates = [HasAVX] in {
+ // Constant 170 corresponds to the binary mask '10101010'.
+ // When used as a blend mask, it allows selecting eight elements from two
+ // input vectors as follow:
+ // - Even-numbered values in the destination are copied from
+ // the corresponding elements in the first input vector;
+ // - Odd-numbered values in the destination are copied from
+ // the corresponding elements in the second input vector.
+
+ def : Pat<(v8f32 (X86Blendi (v8f32 (fsub VR256:$lhs, VR256:$rhs)),
+ (v8f32 (fadd VR256:$lhs, VR256:$rhs)), (i32 170))),
+ (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
+
+ // Constant 10 corresponds to the binary mask '1010'.
+ // In the two pattens below, constant 10 is used as a blend mask to select
+ // - the 1st and 3rd element from the first input vector (the 'fsub' node);
+ // - the 2nd and 4th element from the second input vector (the 'fadd' node).
+
+ def : Pat<(v4f64 (X86Blendi (v4f64 (fsub VR256:$lhs, VR256:$rhs)),
+ (v4f64 (fadd VR256:$lhs, VR256:$rhs)), (i32 10))),
+ (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
+ def : Pat<(v4f64 (X86Blendi (v4f64 (fsub VR256:$lhs, VR256:$rhs)),
+ (v4f64 (fadd VR256:$lhs, VR256:$rhs)), (i32 10))),
+ (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
+ def : Pat<(v4f32 (X86Blendi (v4f32 (fsub VR128:$lhs, VR128:$rhs)),
+ (v4f32 (fadd VR128:$lhs, VR128:$rhs)), (i32 10))),
+ (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
+ def : Pat<(v2f64 (X86Blendi (v2f64 (fsub VR128:$lhs, VR128:$rhs)),
+ (v2f64 (fadd VR128:$lhs, VR128:$rhs)), (i32 2))),
+ (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 (fadd VR128:$lhs, VR128:$rhs)),
+ (v2f64 (fsub VR128:$lhs, VR128:$rhs)))),
+ (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
+}
+
+let Predicates = [UseSSE3] in {
+ // Constant 10 corresponds to the binary mask '1010'.
+ // In the pattern below, it is used as a blend mask to select:
+ // - the 1st and 3rd element from the first input vector (the fsub node);
+ // - the 2nd and 4th element from the second input vector (the fadd node).
+
+ def : Pat<(v4f32 (X86Blendi (v4f32 (fsub VR128:$lhs, VR128:$rhs)),
+ (v4f32 (fadd VR128:$lhs, VR128:$rhs)), (i32 10))),
+ (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
+
+ def : Pat<(v2f64 (X86Blendi (v2f64 (fsub VR128:$lhs, VR128:$rhs)),
+ (v2f64 (fadd VR128:$lhs, VR128:$rhs)), (i32 2))),
+ (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
+ def : Pat<(v2f64 (X86Movsd (v2f64 (fadd VR128:$lhs, VR128:$rhs)),
+ (v2f64 (fsub VR128:$lhs, VR128:$rhs)))),
+ (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
}
//===---------------------------------------------------------------------===//
@@ -5019,7 +5493,7 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
- OpSize, Sched<[WriteVecALU]>;
+ Sched<[WriteVecALU]>;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src),
@@ -5027,7 +5501,7 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
[(set VR128:$dst,
(IntId128
(bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
- OpSize, Sched<[WriteVecALULd]>;
+ Sched<[WriteVecALULd]>;
}
/// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
@@ -5037,14 +5511,14 @@ multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
(ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (IntId256 VR256:$src))]>,
- OpSize, Sched<[WriteVecALU]>;
+ Sched<[WriteVecALU]>;
def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
(IntId256
- (bitconvert (memopv4i64 addr:$src))))]>, OpSize,
+ (bitconvert (memopv4i64 addr:$src))))]>,
Sched<[WriteVecALULd]>;
}
@@ -5164,7 +5638,7 @@ multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
- OpSize, Sched<[itins.Sched]>;
+ Sched<[itins.Sched]>;
def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
@@ -5172,7 +5646,7 @@ multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst,
(OpVT (OpNode RC:$src1,
- (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize,
+ (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
@@ -5187,7 +5661,7 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize, Sched<[itins.Sched]>;
+ Sched<[itins.Sched]>;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!if(Is2Addr,
@@ -5195,24 +5669,25 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(IntId128 VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))]>, OpSize,
+ (bitconvert (memopv2i64 addr:$src2))))]>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId256> {
+ Intrinsic IntId256,
+ X86FoldableSchedWrite Sched> {
let isCommutable = 1 in
def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
- OpSize;
+ Sched<[Sched]>;
def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (IntId256 VR256:$src1,
- (bitconvert (loadv4i64 addr:$src2))))]>, OpSize;
+ (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
+ Sched<[Sched.Folded, ReadAfterLd]>;
}
let ImmT = NoImm, Predicates = [HasAVX] in {
@@ -5281,16 +5756,20 @@ let isCommutable = 0 in {
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
+ SSE_PSHUFB, 0>, VEX_4V, VEX_L;
defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
- int_x86_avx2_phadd_sw>, VEX_4V, VEX_L;
+ int_x86_avx2_phadd_sw,
+ WriteVecALU>, VEX_4V, VEX_L;
defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
- int_x86_avx2_phsub_sw>, VEX_4V, VEX_L;
+ int_x86_avx2_phsub_sw,
+ WriteVecALU>, VEX_4V, VEX_L;
defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
- int_x86_avx2_pmadd_ub_sw>, VEX_4V, VEX_L;
+ int_x86_avx2_pmadd_ub_sw,
+ WriteVecIMul>, VEX_4V, VEX_L;
}
defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
- int_x86_avx2_pmul_hr_sw>, VEX_4V, VEX_L;
+ int_x86_avx2_pmul_hr_sw,
+ WriteVecIMul>, VEX_4V, VEX_L;
}
// None of these have i8 immediate fields.
@@ -5338,7 +5817,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- [], IIC_SSE_PALIGNRR>, OpSize, Sched<[WriteShuffle]>;
+ [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
@@ -5346,7 +5825,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- [], IIC_SSE_PALIGNRM>, OpSize, Sched<[WriteShuffleLd, ReadAfterLd]>;
+ [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
}
@@ -5356,13 +5835,13 @@ multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, OpSize, Sched<[WriteShuffle]>;
+ []>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2, i8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, OpSize, Sched<[WriteShuffleLd, ReadAfterLd]>;
+ []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
}
@@ -5426,11 +5905,11 @@ def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
TB, Requires<[HasSSE3]>;
} // SchedRW
-def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[In32BitMode]>;
+def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
- Requires<[In32BitMode]>;
+ Requires<[Not64BitMode]>;
def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
Requires<[In64BitMode]>;
@@ -5442,63 +5921,82 @@ multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId,
OpndItins itins = DEFAULT_ITINS> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))], itins.rr>, OpSize;
+ [(set VR128:$dst, (IntId VR128:$src))], itins.rr>,
+ Sched<[itins.Sched]>;
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))],
- itins.rm>, OpSize;
+ itins.rm>, Sched<[itins.Sched.Folded]>;
}
multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId> {
+ Intrinsic IntId, X86FoldableSchedWrite Sched> {
def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
+ [(set VR256:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (IntId (load addr:$src)))]>,
- OpSize;
+ Sched<[Sched.Folded]>;
}
let Predicates = [HasAVX] in {
defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw",
- int_x86_sse41_pmovsxbw>, VEX;
+ int_x86_sse41_pmovsxbw,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd",
- int_x86_sse41_pmovsxwd>, VEX;
+ int_x86_sse41_pmovsxwd,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq",
- int_x86_sse41_pmovsxdq>, VEX;
+ int_x86_sse41_pmovsxdq,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw",
- int_x86_sse41_pmovzxbw>, VEX;
+ int_x86_sse41_pmovzxbw,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd",
- int_x86_sse41_pmovzxwd>, VEX;
+ int_x86_sse41_pmovzxwd,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq",
- int_x86_sse41_pmovzxdq>, VEX;
+ int_x86_sse41_pmovzxdq,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
}
let Predicates = [HasAVX2] in {
defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
- int_x86_avx2_pmovsxbw>, VEX, VEX_L;
+ int_x86_avx2_pmovsxbw,
+ WriteShuffle>, VEX, VEX_L;
defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
- int_x86_avx2_pmovsxwd>, VEX, VEX_L;
+ int_x86_avx2_pmovsxwd,
+ WriteShuffle>, VEX, VEX_L;
defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
- int_x86_avx2_pmovsxdq>, VEX, VEX_L;
+ int_x86_avx2_pmovsxdq,
+ WriteShuffle>, VEX, VEX_L;
defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
- int_x86_avx2_pmovzxbw>, VEX, VEX_L;
+ int_x86_avx2_pmovzxbw,
+ WriteShuffle>, VEX, VEX_L;
defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
- int_x86_avx2_pmovzxwd>, VEX, VEX_L;
+ int_x86_avx2_pmovzxwd,
+ WriteShuffle>, VEX, VEX_L;
defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
- int_x86_avx2_pmovzxdq>, VEX, VEX_L;
-}
-
-defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw, SSE_INTALU_ITINS_P>;
-defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd, SSE_INTALU_ITINS_P>;
-defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq, SSE_INTALU_ITINS_P>;
-defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw, SSE_INTALU_ITINS_P>;
-defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd, SSE_INTALU_ITINS_P>;
-defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq, SSE_INTALU_ITINS_P>;
+ int_x86_avx2_pmovzxdq,
+ WriteShuffle>, VEX, VEX_L;
+}
+
+defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw,
+ SSE_INTALU_ITINS_SHUFF_P>;
+defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd,
+ SSE_INTALU_ITINS_SHUFF_P>;
+defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq,
+ SSE_INTALU_ITINS_SHUFF_P>;
+defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw,
+ SSE_INTALU_ITINS_SHUFF_P>;
+defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd,
+ SSE_INTALU_ITINS_SHUFF_P>;
+defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq,
+ SSE_INTALU_ITINS_SHUFF_P>;
let Predicates = [HasAVX] in {
// Common patterns involving scalar load.
@@ -5590,91 +6088,67 @@ let Predicates = [UseSSE41] in {
(PMOVZXDQrm addr:$src)>;
}
-let Predicates = [HasAVX2] in {
- let AddedComplexity = 15 in {
- def : Pat<(v4i64 (X86vzmovly (v4i32 VR128:$src))),
- (VPMOVZXDQYrr VR128:$src)>;
- def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))),
- (VPMOVZXWDYrr VR128:$src)>;
- def : Pat<(v16i16 (X86vzmovly (v16i8 VR128:$src))),
- (VPMOVZXBWYrr VR128:$src)>;
- }
-
- def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
- def : Pat<(v8i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
- def : Pat<(v16i16 (X86vsmovl (v16i8 VR128:$src))), (VPMOVSXBWYrr VR128:$src)>;
-}
-
-let Predicates = [HasAVX] in {
- def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
- def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
- def : Pat<(v8i16 (X86vsmovl (v16i8 VR128:$src))), (VPMOVSXBWrr VR128:$src)>;
-}
-
-let Predicates = [UseSSE41] in {
- def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
- def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
- def : Pat<(v8i16 (X86vsmovl (v16i8 VR128:$src))), (PMOVSXBWrr VR128:$src)>;
-}
-
-
multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId,
OpndItins itins = DEFAULT_ITINS> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))], itins.rr>, OpSize;
+ [(set VR128:$dst, (IntId VR128:$src))], itins.rr>,
+ Sched<[itins.Sched]>;
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))],
- itins.rm>,
- OpSize;
+ itins.rm>, Sched<[itins.Sched.Folded]>;
}
multiclass SS41I_binop_rm_int8_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId> {
+ Intrinsic IntId, X86FoldableSchedWrite Sched> {
def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
+ [(set VR256:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i32mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
(IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
- OpSize;
+ Sched<[Sched.Folded]>;
}
let Predicates = [HasAVX] in {
-defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
- VEX;
-defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
- VEX;
-defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
- VEX;
-defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
- VEX;
+defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
+defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
+defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
+defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq,
+ DEFAULT_ITINS_SHUFFLESCHED>, VEX;
}
let Predicates = [HasAVX2] in {
defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
- int_x86_avx2_pmovsxbd>, VEX, VEX_L;
+ int_x86_avx2_pmovsxbd, WriteShuffle>,
+ VEX, VEX_L;
defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
- int_x86_avx2_pmovsxwq>, VEX, VEX_L;
+ int_x86_avx2_pmovsxwq, WriteShuffle>,
+ VEX, VEX_L;
defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
- int_x86_avx2_pmovzxbd>, VEX, VEX_L;
+ int_x86_avx2_pmovzxbd, WriteShuffle>,
+ VEX, VEX_L;
defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
- int_x86_avx2_pmovzxwq>, VEX, VEX_L;
+ int_x86_avx2_pmovzxwq, WriteShuffle>,
+ VEX, VEX_L;
}
defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd,
- SSE_INTALU_ITINS_P>;
+ SSE_INTALU_ITINS_SHUFF_P>;
defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq,
- SSE_INTALU_ITINS_P>;
+ SSE_INTALU_ITINS_SHUFF_P>;
defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd,
- SSE_INTALU_ITINS_P>;
+ SSE_INTALU_ITINS_SHUFF_P>;
defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq,
- SSE_INTALU_ITINS_P>;
+ SSE_INTALU_ITINS_SHUFF_P>;
let Predicates = [HasAVX] in {
// Common patterns involving scalar load
@@ -5703,49 +6177,49 @@ let Predicates = [UseSSE41] in {
}
multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- OpndItins itins = DEFAULT_ITINS> {
+ X86FoldableSchedWrite Sched> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
+ [(set VR128:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
// Expecting a i16 load any extended to i32 value.
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (IntId (bitconvert
(v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
- OpSize;
+ Sched<[Sched.Folded]>;
}
multiclass SS41I_binop_rm_int4_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId> {
+ Intrinsic IntId, X86FoldableSchedWrite Sched> {
def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
+ [(set VR256:$dst, (IntId VR128:$src))]>, Sched<[Sched]>;
// Expecting a i16 load any extended to i32 value.
def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i16mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (IntId (bitconvert
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
- OpSize;
+ Sched<[Sched.Folded]>;
}
let Predicates = [HasAVX] in {
-defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
- VEX;
-defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
- VEX;
+defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq,
+ WriteShuffle>, VEX;
+defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq,
+ WriteShuffle>, VEX;
}
let Predicates = [HasAVX2] in {
-defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
- int_x86_avx2_pmovsxbq>, VEX, VEX_L;
-defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
- int_x86_avx2_pmovzxbq>, VEX, VEX_L;
+defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq", int_x86_avx2_pmovsxbq,
+ WriteShuffle>, VEX, VEX_L;
+defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq", int_x86_avx2_pmovzxbq,
+ WriteShuffle>, VEX, VEX_L;
}
defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq,
- SSE_INTALU_ITINS_P>;
+ WriteShuffle>;
defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq,
- SSE_INTALU_ITINS_P>;
+ WriteShuffle>;
let Predicates = [HasAVX2] in {
def : Pat<(v16i16 (X86vsext (v16i8 VR128:$src))), (VPMOVSXBWYrr VR128:$src)>;
@@ -5772,9 +6246,9 @@ let Predicates = [HasAVX2] in {
def : Pat<(v4i64 (X86vsext (v8i32 VR256:$src))),
(VPMOVSXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v8i32 (X86vsmovl (v8i16 (bitconvert (v2i64 (load addr:$src)))))),
+ def : Pat<(v8i32 (X86vsext (v8i16 (bitconvert (v2i64 (load addr:$src)))))),
(VPMOVSXWDYrm addr:$src)>;
- def : Pat<(v4i64 (X86vsmovl (v4i32 (bitconvert (v2i64 (load addr:$src)))))),
+ def : Pat<(v4i64 (X86vsext (v4i32 (bitconvert (v2i64 (load addr:$src)))))),
(VPMOVSXDQYrm addr:$src)>;
def : Pat<(v8i32 (X86vsext (v16i8 (bitconvert (v2i64
@@ -6003,16 +6477,15 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
imm:$src2))]>,
- OpSize;
- let neverHasSideEffects = 1, mayStore = 1 in
+ Sched<[WriteShuffle]>;
+ let neverHasSideEffects = 1, mayStore = 1,
+ SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, OpSize;
-// FIXME:
-// There's an AssertZext in the way of writing the store pattern
-// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
+ [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
+ imm:$src2)))), addr:$dst)]>;
}
let Predicates = [HasAVX] in
@@ -6023,22 +6496,21 @@ defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
- let isCodeGenOnly = 1, hasSideEffects = 0 in
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, OpSize;
+ []>, Sched<[WriteShuffle]>;
- let neverHasSideEffects = 1, mayStore = 1 in
+ let neverHasSideEffects = 1, mayStore = 1,
+ SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, OpSize;
-// FIXME:
-// There's an AssertZext in the way of writing the store pattern
-// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
+ [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
+ imm:$src2)))), addr:$dst)]>;
}
let Predicates = [HasAVX] in
@@ -6054,13 +6526,15 @@ multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32:$dst,
- (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
+ (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
+ Sched<[WriteShuffle]>;
+ let SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (v4i32 VR128:$src1), imm:$src2),
- addr:$dst)]>, OpSize;
+ addr:$dst)]>;
}
let Predicates = [HasAVX] in
@@ -6075,13 +6549,15 @@ multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR64:$dst,
- (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
+ (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
+ Sched<[WriteShuffle]>, REX_W;
+ let SchedRW = [WriteShuffleLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (v2i64 VR128:$src1), imm:$src2),
- addr:$dst)]>, OpSize, REX_W;
+ addr:$dst)]>, REX_W;
}
let Predicates = [HasAVX] in
@@ -6099,14 +6575,14 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32orGR64:$dst,
(extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
- itins.rr>,
- OpSize;
+ itins.rr>, Sched<[WriteFBlend]>;
+ let SchedRW = [WriteFBlendLd, WriteRMW] in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
- addr:$dst)], itins.rm>, OpSize;
+ addr:$dst)], itins.rm>;
}
let ExeDomain = SSEPackedSingle in {
@@ -6139,7 +6615,8 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
- (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>, OpSize;
+ (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
+ Sched<[WriteShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
!if(Is2Addr,
@@ -6148,7 +6625,7 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
- imm:$src3))]>, OpSize;
+ imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in
@@ -6165,7 +6642,7 @@ multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
- OpSize;
+ Sched<[WriteShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
!if(Is2Addr,
@@ -6174,7 +6651,7 @@ multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
- imm:$src3)))]>, OpSize;
+ imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in
@@ -6191,7 +6668,7 @@ multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
- OpSize;
+ Sched<[WriteShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
!if(Is2Addr,
@@ -6200,7 +6677,7 @@ multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
- imm:$src3)))]>, OpSize;
+ imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in
@@ -6221,8 +6698,8 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
- (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
- OpSize;
+ (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
+ Sched<[WriteFShuffle]>;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
!if(Is2Addr,
@@ -6230,9 +6707,10 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
- (X86insrtps VR128:$src1,
+ (X86insertps VR128:$src1,
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
- imm:$src3))], itins.rm>, OpSize;
+ imm:$src3))], itins.rm>,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
}
let ExeDomain = SSEPackedSingle in {
@@ -6242,6 +6720,29 @@ let ExeDomain = SSEPackedSingle in {
defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
}
+let Predicates = [UseSSE41] in {
+ // If we're inserting an element from a load or a null pshuf of a load,
+ // fold the load into the insertps instruction.
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
+ (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
+ imm:$src3)),
+ (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
+ (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
+ (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
+}
+
+let Predicates = [UseAVX] in {
+ // If we're inserting an element from a vbroadcast of a load, fold the
+ // load into the X86insertps instruction.
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
+ (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
+ (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
+ def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
+ (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
+ (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
+}
+
//===----------------------------------------------------------------------===//
// SSE4.1 - Round Instructions
//===----------------------------------------------------------------------===//
@@ -6258,8 +6759,7 @@ let ExeDomain = SSEPackedSingle in {
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
- IIC_SSE_ROUNDPS_REG>,
- OpSize;
+ IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
// Vector intrinsic operation, mem
def PSm : SS4AIi8<opcps, MRMSrcMem,
@@ -6268,8 +6768,7 @@ let ExeDomain = SSEPackedSingle in {
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
- IIC_SSE_ROUNDPS_MEM>,
- OpSize;
+ IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
} // ExeDomain = SSEPackedSingle
let ExeDomain = SSEPackedDouble in {
@@ -6279,8 +6778,7 @@ let ExeDomain = SSEPackedDouble in {
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
- IIC_SSE_ROUNDPS_REG>,
- OpSize;
+ IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
// Vector intrinsic operation, mem
def PDm : SS4AIi8<opcpd, MRMSrcMem,
@@ -6289,8 +6787,7 @@ let ExeDomain = SSEPackedDouble in {
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
- IIC_SSE_ROUNDPS_REG>,
- OpSize;
+ IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
} // ExeDomain = SSEPackedDouble
}
@@ -6308,9 +6805,10 @@ let ExeDomain = GenericDomain in {
"ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- []>, OpSize;
+ []>, Sched<[WriteFAdd]>;
// Intrinsic operation, reg.
+ let isCodeGenOnly = 1 in
def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!if(Is2Addr,
@@ -6319,7 +6817,7 @@ let ExeDomain = GenericDomain in {
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize;
+ Sched<[WriteFAdd]>;
// Intrinsic operation, mem.
def SSm : SS4AIi8<opcss, MRMSrcMem,
@@ -6331,7 +6829,7 @@ let ExeDomain = GenericDomain in {
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
- OpSize;
+ Sched<[WriteFAddLd, ReadAfterLd]>;
// Operation, reg.
let hasSideEffects = 0 in
@@ -6342,9 +6840,10 @@ let ExeDomain = GenericDomain in {
"sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- []>, OpSize;
+ []>, Sched<[WriteFAdd]>;
// Intrinsic operation, reg.
+ let isCodeGenOnly = 1 in
def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!if(Is2Addr,
@@ -6353,7 +6852,7 @@ let ExeDomain = GenericDomain in {
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize;
+ Sched<[WriteFAdd]>;
// Intrinsic operation, mem.
def SDm : SS4AIi8<opcsd, MRMSrcMem,
@@ -6365,7 +6864,7 @@ let ExeDomain = GenericDomain in {
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
- OpSize;
+ Sched<[WriteFAddLd, ReadAfterLd]>;
} // ExeDomain = GenericDomain
}
@@ -6512,31 +7011,31 @@ let Defs = [EFLAGS], Predicates = [HasAVX] in {
def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
- OpSize, VEX;
+ Sched<[WriteVecLogic]>, VEX;
def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
- OpSize, VEX;
+ Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
- OpSize, VEX, VEX_L;
+ Sched<[WriteVecLogic]>, VEX, VEX_L;
def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
- OpSize, VEX, VEX_L;
+ Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
}
let Defs = [EFLAGS] in {
def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"ptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
- OpSize;
+ Sched<[WriteVecLogic]>;
def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
"ptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
- OpSize;
+ Sched<[WriteVecLogicLd, ReadAfterLd]>;
}
// The bit test instructions below are AVX only
@@ -6544,11 +7043,12 @@ multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
- [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
+ [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
+ Sched<[WriteVecLogic]>, VEX;
def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
- OpSize, VEX;
+ Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
}
let Defs = [EFLAGS], Predicates = [HasAVX] in {
@@ -6572,56 +7072,65 @@ let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
- IIC_SSE_POPCNT_RR>,
- OpSize, XS;
+ IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
+ OpSize16, XS;
def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctpop (loadi16 addr:$src))),
- (implicit EFLAGS)], IIC_SSE_POPCNT_RM>, OpSize, XS;
+ (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
+ Sched<[WriteFAddLd]>, OpSize16, XS;
def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
- IIC_SSE_POPCNT_RR>,
- XS;
+ IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
+ OpSize32, XS;
+
def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctpop (loadi32 addr:$src))),
- (implicit EFLAGS)], IIC_SSE_POPCNT_RM>, XS;
+ (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
+ Sched<[WriteFAddLd]>, OpSize32, XS;
def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
- IIC_SSE_POPCNT_RR>,
- XS;
+ IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctpop (loadi64 addr:$src))),
- (implicit EFLAGS)], IIC_SSE_POPCNT_RM>, XS;
+ (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
+ Sched<[WriteFAddLd]>, XS;
}
// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128> {
+ Intrinsic IntId128,
+ X86FoldableSchedWrite Sched> {
def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
+ [(set VR128:$dst, (IntId128 VR128:$src))]>,
+ Sched<[Sched]>;
def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (IntId128
- (bitconvert (memopv2i64 addr:$src))))]>, OpSize;
+ (IntId128 (bitconvert (memopv2i64 addr:$src))))]>,
+ Sched<[Sched.Folded]>;
}
+// PHMIN has the same profile as PSAD, thus we use the same scheduling
+// model, although the naming is misleading.
let Predicates = [HasAVX] in
defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
- int_x86_sse41_phminposuw>, VEX;
+ int_x86_sse41_phminposuw,
+ WriteVecIMul>, VEX;
defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
- int_x86_sse41_phminposuw>;
+ int_x86_sse41_phminposuw,
+ WriteVecIMul>;
/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
@@ -6634,32 +7143,33 @@ multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))],
- itins.rr>, OpSize;
+ itins.rr>, Sched<[itins.Sched]>;
def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))],
- itins.rm>, OpSize;
+ (IntId128 VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))],
+ itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
/// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId256> {
+ Intrinsic IntId256,
+ X86FoldableSchedWrite Sched> {
let isCommutable = 1 in
def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>, OpSize;
+ [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
+ Sched<[Sched]>;
def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (IntId256 VR256:$src1,
- (bitconvert (loadv4i64 addr:$src2))))]>, OpSize;
+ (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
+ Sched<[Sched.Folded, ReadAfterLd]>;
}
@@ -6667,75 +7177,114 @@ multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
X86MemOperand x86memop, bit Is2Addr = 1,
- OpndItins itins = DEFAULT_ITINS> {
+ OpndItins itins = SSE_INTALU_ITINS_P> {
let isCommutable = 1 in
def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>, OpSize;
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
+ Sched<[itins.Sched]>;
def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst,
- (OpVT (OpNode RC:$src1,
- (bitconvert (memop_frag addr:$src2)))))]>, OpSize;
+ (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
+ Sched<[itins.Sched.Folded, ReadAfterLd]>;
+}
+
+/// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
+/// types.
+multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType DstVT, ValueType SrcVT, RegisterClass RC,
+ PatFrag memop_frag, X86MemOperand x86memop,
+ OpndItins itins,
+ bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
+ def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
+ Sched<[itins.Sched]>;
+ def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
+ (bitconvert (memop_frag addr:$src2)))))]>,
+ Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
let isCommutable = 0 in
- defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
- 0>, VEX_4V;
defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", X86smin, v4i32, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", X86umin, v4i32, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v8i16, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v16i8, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v4i32, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v4i32, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v8i16, VR128,
- loadv2i64, i128mem, 0>, VEX_4V;
- defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
- 0>, VEX_4V;
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
+ defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
+ VR128, loadv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
}
let Predicates = [HasAVX2] in {
let isCommutable = 0 in
- defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
- int_x86_avx2_packusdw>, VEX_4V, VEX_L;
defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", X86smin, v8i32, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", X86umin, v8i32, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v16i16, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v32i8, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v8i32, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v8i32, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v16i16, VR256,
- loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
- defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
- int_x86_avx2_pmul_dq>, VEX_4V, VEX_L;
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
+ defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
+ VR256, loadv4i64, i256mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
let isCommutable = 0 in
- defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
defm PMINSB : SS48I_binop_rm<0x38, "pminsb", X86smin, v16i8, VR128,
memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMINSD : SS48I_binop_rm<0x39, "pminsd", X86smin, v4i32, VR128,
@@ -6752,21 +7301,26 @@ let Constraints = "$src1 = $dst" in {
memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", X86umax, v8i16, VR128,
memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
- defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq,
- 1, SSE_INTMUL_ITINS_P>;
+ defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
+ VR128, memopv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1>;
}
let Predicates = [HasAVX] in {
defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
- memopv2i64, i128mem, 0>, VEX_4V;
+ memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
}
let Predicates = [HasAVX2] in {
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ memopv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
- memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ memopv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -6790,7 +7344,7 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
- OpSize;
+ Sched<[itins.Sched]>;
def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
!if(Is2Addr,
@@ -6801,47 +7355,58 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
[(set RC:$dst,
(IntId RC:$src1,
(bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
- OpSize;
+ Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
let isCommutable = 0 in {
let ExeDomain = SSEPackedSingle in {
defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
- VR128, loadv4f32, f128mem, 0>, VEX_4V;
+ VR128, loadv4f32, f128mem, 0,
+ DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
int_x86_avx_blend_ps_256, VR256, loadv8f32,
- f256mem, 0>, VEX_4V, VEX_L;
+ f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
+ VEX_4V, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
- VR128, loadv2f64, f128mem, 0>, VEX_4V;
+ VR128, loadv2f64, f128mem, 0,
+ DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
int_x86_avx_blend_pd_256,VR256, loadv4f64,
- f256mem, 0>, VEX_4V, VEX_L;
+ f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
+ VEX_4V, VEX_L;
}
defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
- VR128, loadv2i64, i128mem, 0>, VEX_4V;
+ VR128, loadv2i64, i128mem, 0,
+ DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
- VR128, loadv2i64, i128mem, 0>, VEX_4V;
+ VR128, loadv2i64, i128mem, 0,
+ DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
}
let ExeDomain = SSEPackedSingle in
defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
- VR128, loadv4f32, f128mem, 0>, VEX_4V;
+ VR128, loadv4f32, f128mem, 0,
+ SSE_DPPS_ITINS>, VEX_4V;
let ExeDomain = SSEPackedDouble in
defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
- VR128, loadv2f64, f128mem, 0>, VEX_4V;
+ VR128, loadv2f64, f128mem, 0,
+ SSE_DPPS_ITINS>, VEX_4V;
let ExeDomain = SSEPackedSingle in
defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
- VR256, loadv8f32, i256mem, 0>, VEX_4V, VEX_L;
+ VR256, loadv8f32, i256mem, 0,
+ SSE_DPPS_ITINS>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
- VR256, loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ VR256, loadv4i64, i256mem, 0,
+ DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
- VR256, loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
+ VR256, loadv4i64, i256mem, 0,
+ DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
}
}
@@ -6850,17 +7415,17 @@ let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedSingle in
defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
VR128, memopv4f32, f128mem,
- 1, SSE_INTALU_ITINS_P>;
+ 1, SSE_INTALU_ITINS_FBLEND_P>;
let ExeDomain = SSEPackedDouble in
defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
VR128, memopv2f64, f128mem,
- 1, SSE_INTALU_ITINS_P>;
+ 1, SSE_INTALU_ITINS_FBLEND_P>;
defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
VR128, memopv2i64, i128mem,
- 1, SSE_INTALU_ITINS_P>;
+ 1, SSE_INTALU_ITINS_FBLEND_P>;
defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
VR128, memopv2i64, i128mem,
- 1, SSE_INTMUL_ITINS_P>;
+ 1, SSE_MPSADBW_ITINS>;
}
let ExeDomain = SSEPackedSingle in
defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
@@ -6875,13 +7440,15 @@ let Constraints = "$src1 = $dst" in {
/// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
RegisterClass RC, X86MemOperand x86memop,
- PatFrag mem_frag, Intrinsic IntId> {
+ PatFrag mem_frag, Intrinsic IntId,
+ X86FoldableSchedWrite Sched> {
def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
- NoItinerary, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+ NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
+ Sched<[Sched]>;
def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, RC:$src3),
@@ -6890,29 +7457,36 @@ multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
[(set RC:$dst,
(IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
RC:$src3))],
- NoItinerary, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+ NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
+ Sched<[Sched.Folded, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedDouble in {
defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
- loadv2f64, int_x86_sse41_blendvpd>;
+ loadv2f64, int_x86_sse41_blendvpd,
+ WriteFVarBlend>;
defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
- loadv4f64, int_x86_avx_blendv_pd_256>, VEX_L;
+ loadv4f64, int_x86_avx_blendv_pd_256,
+ WriteFVarBlend>, VEX_L;
} // ExeDomain = SSEPackedDouble
let ExeDomain = SSEPackedSingle in {
defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
- loadv4f32, int_x86_sse41_blendvps>;
+ loadv4f32, int_x86_sse41_blendvps,
+ WriteFVarBlend>;
defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
- loadv8f32, int_x86_avx_blendv_ps_256>, VEX_L;
+ loadv8f32, int_x86_avx_blendv_ps_256,
+ WriteFVarBlend>, VEX_L;
} // ExeDomain = SSEPackedSingle
defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
- loadv2i64, int_x86_sse41_pblendvb>;
+ loadv2i64, int_x86_sse41_pblendvb,
+ WriteVarBlend>;
}
let Predicates = [HasAVX2] in {
defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
- loadv4i64, int_x86_avx2_pblendvb>, VEX_L;
+ loadv4i64, int_x86_avx2_pblendvb,
+ WriteVarBlend>, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -6981,7 +7555,7 @@ let Uses = [XMM0], Constraints = "$src1 = $dst" in {
!strconcat(OpcodeStr,
"\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
- itins.rr>, OpSize;
+ itins.rr>;
def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, x86memop:$src2),
@@ -6990,7 +7564,7 @@ let Uses = [XMM0], Constraints = "$src1 = $dst" in {
[(set VR128:$dst,
(IntId VR128:$src1,
(bitconvert (mem_frag addr:$src2)), XMM0))],
- itins.rm>, OpSize;
+ itins.rm>;
}
}
@@ -7046,20 +7620,21 @@ let Predicates = [UseSSE41] in {
}
+let SchedRW = [WriteLoad] in {
let Predicates = [HasAVX] in
def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
- OpSize, VEX;
+ VEX;
let Predicates = [HasAVX2] in
def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
- OpSize, VEX, VEX_L;
+ VEX, VEX_L;
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movntdqa\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
- OpSize;
+ [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
+} // SchedRW
//===----------------------------------------------------------------------===//
// SSE4.2 - Compare Instructions
@@ -7074,15 +7649,14 @@ multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
- OpSize;
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst,
- (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>, OpSize;
+ (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
}
let Predicates = [HasAVX] in
@@ -7122,12 +7696,12 @@ multiclass pcmpistrm_SS42AI<string asm> {
def rr : SS42AI<0x62, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpIStrM]>;
let mayLoad = 1 in
def rm :SS42AI<0x62, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
}
let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
@@ -7157,12 +7731,12 @@ multiclass SS42AI_pcmpestrm<string asm> {
def rr : SS42AI<0x60, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpEStrM]>;
let mayLoad = 1 in
def rm : SS42AI<0x60, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
}
let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
@@ -7192,12 +7766,12 @@ multiclass SS42AI_pcmpistri<string asm> {
def rr : SS42AI<0x63, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpIStrI]>;
let mayLoad = 1 in
def rm : SS42AI<0x63, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
}
let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
@@ -7228,12 +7802,12 @@ multiclass SS42AI_pcmpestri<string asm> {
def rr : SS42AI<0x61, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpEStrI]>;
let mayLoad = 1 in
def rm : SS42AI<0x61, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
!strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
- []>, OpSize;
+ []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
}
let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
@@ -7255,14 +7829,15 @@ class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
RegisterClass RCIn, SDPatternOperator Int> :
SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
!strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
- [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>;
+ [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
+ Sched<[WriteFAdd]>;
class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
X86MemOperand x86memop, SDPatternOperator Int> :
SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
!strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
[(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
- IIC_CRC32_MEM>;
+ IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
let Constraints = "$src1 = $dst" in {
def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
@@ -7270,13 +7845,13 @@ let Constraints = "$src1 = $dst" in {
def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
int_x86_sse42_crc32_32_8>;
def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
- int_x86_sse42_crc32_32_16>, OpSize;
+ int_x86_sse42_crc32_32_16>, OpSize16;
def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
- int_x86_sse42_crc32_32_16>, OpSize;
+ int_x86_sse42_crc32_32_16>, OpSize16;
def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
- int_x86_sse42_crc32_32_32>;
+ int_x86_sse42_crc32_32_32>, OpSize32;
def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
- int_x86_sse42_crc32_32_32>;
+ int_x86_sse42_crc32_32_32>, OpSize32;
def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
int_x86_sse42_crc32_64_64>, REX_W;
def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
@@ -7357,14 +7932,15 @@ multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize;
+ Sched<[WriteAESDecEnc]>;
def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>, OpSize;
+ (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>,
+ Sched<[WriteAESDecEncLd, ReadAfterLd]>;
}
// Perform One Round of an AES Encryption/Decryption Flow
@@ -7396,25 +7972,24 @@ let Predicates = [HasAVX, HasAES] in {
(ins VR128:$src1),
"vaesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst,
- (int_x86_aesni_aesimc VR128:$src1))]>,
- OpSize, VEX;
+ (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
+ VEX;
def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"vaesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
- OpSize, VEX;
+ Sched<[WriteAESIMCLd]>, VEX;
}
def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1),
"aesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst,
- (int_x86_aesni_aesimc VR128:$src1))]>,
- OpSize;
+ (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"aesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
- OpSize;
+ Sched<[WriteAESIMCLd]>;
// AES Round Key Generation Assist
let Predicates = [HasAVX, HasAES] in {
@@ -7423,26 +7998,26 @@ let Predicates = [HasAVX, HasAES] in {
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
- OpSize, VEX;
+ Sched<[WriteAESKeyGen]>, VEX;
def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, i8imm:$src2),
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
- OpSize, VEX;
+ Sched<[WriteAESKeyGenLd]>, VEX;
}
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, i8imm:$src2),
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
- OpSize;
+ Sched<[WriteAESKeyGen]>;
def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, i8imm:$src2),
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
- OpSize;
+ Sched<[WriteAESKeyGenLd]>;
//===----------------------------------------------------------------------===//
// PCLMUL Instructions
@@ -7453,13 +8028,15 @@ def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128:$dst,
- (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
+ (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
+ Sched<[WriteCLMul]>;
def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
- (loadv2i64 addr:$src2), imm:$src3))]>;
+ (loadv2i64 addr:$src2), imm:$src3))]>,
+ Sched<[WriteCLMulLd, ReadAfterLd]>;
// Carry-less Multiplication instructions
let Constraints = "$src1 = $dst" in {
@@ -7468,31 +8045,34 @@ def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
- IIC_SSE_PCLMULQDQ_RR>;
+ IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
(memopv2i64 addr:$src2), imm:$src3))],
- IIC_SSE_PCLMULQDQ_RM>;
+ IIC_SSE_PCLMULQDQ_RM>,
+ Sched<[WriteCLMulLd, ReadAfterLd]>;
} // Constraints = "$src1 = $dst"
multiclass pclmul_alias<string asm, int immop> {
def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
- (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
+ (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
- (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
+ (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
def : InstAlias<!strconcat("vpclmul", asm,
"dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
- (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
+ (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
+ 0>;
def : InstAlias<!strconcat("vpclmul", asm,
"dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
- (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
+ (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
+ 0>;
}
defm : pclmul_alias<"hqhq", 0x11>;
defm : pclmul_alias<"hqlq", 0x01>;
@@ -7506,16 +8086,16 @@ defm : pclmul_alias<"lqlq", 0x00>;
let Predicates = [HasSSE4A] in {
let Constraints = "$src = $dst" in {
-def EXTRQI : Ii8<0x78, MRM0r, (outs VR128:$dst),
+def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
(ins VR128:$src, i8imm:$len, i8imm:$idx),
"extrq\t{$idx, $len, $src|$src, $len, $idx}",
[(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
- imm:$idx))]>, TB, OpSize;
+ imm:$idx))]>, PD;
def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$mask),
"extrq\t{$mask, $src|$src, $mask}",
[(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
- VR128:$mask))]>, TB, OpSize;
+ VR128:$mask))]>, PD;
def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
@@ -7547,43 +8127,59 @@ def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
// destination operand
//
class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
- X86MemOperand x86memop, Intrinsic Int> :
+ X86MemOperand x86memop, Intrinsic Int, SchedWrite Sched> :
+ AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (Int addr:$src))]>, Sched<[Sched]>, VEX;
+
+class avx_broadcast_no_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop, ValueType VT,
+ PatFrag ld_frag, SchedWrite Sched> :
AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (Int addr:$src))]>, VEX;
+ [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
+ Sched<[Sched]>, VEX {
+ let mayLoad = 1;
+}
// AVX2 adds register forms
class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
- Intrinsic Int> :
+ Intrinsic Int, SchedWrite Sched> :
AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (Int VR128:$src))]>, VEX;
+ [(set RC:$dst, (Int VR128:$src))]>, Sched<[Sched]>, VEX;
let ExeDomain = SSEPackedSingle in {
- def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
- int_x86_avx_vbroadcast_ss>;
- def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
- int_x86_avx_vbroadcast_ss_256>, VEX_L;
+ def VBROADCASTSSrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR128,
+ f32mem, v4f32, loadf32, WriteLoad>;
+ def VBROADCASTSSYrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR256,
+ f32mem, v8f32, loadf32,
+ WriteFShuffleLd>, VEX_L;
}
let ExeDomain = SSEPackedDouble in
-def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
- int_x86_avx_vbroadcast_sd_256>, VEX_L;
+def VBROADCASTSDYrm : avx_broadcast_no_int<0x19, "vbroadcastsd", VR256, f64mem,
+ v4f64, loadf64, WriteFShuffleLd>, VEX_L;
def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
- int_x86_avx_vbroadcastf128_pd_256>, VEX_L;
+ int_x86_avx_vbroadcastf128_pd_256,
+ WriteFShuffleLd>, VEX_L;
let ExeDomain = SSEPackedSingle in {
def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
- int_x86_avx2_vbroadcast_ss_ps>;
+ int_x86_avx2_vbroadcast_ss_ps,
+ WriteFShuffle>;
def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
- int_x86_avx2_vbroadcast_ss_ps_256>, VEX_L;
+ int_x86_avx2_vbroadcast_ss_ps_256,
+ WriteFShuffle256>, VEX_L;
}
let ExeDomain = SSEPackedDouble in
def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
- int_x86_avx2_vbroadcast_sd_pd_256>, VEX_L;
+ int_x86_avx2_vbroadcast_sd_pd_256,
+ WriteFShuffle256>, VEX_L;
let Predicates = [HasAVX2] in
def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
- int_x86_avx2_vbroadcasti128>, VEX_L;
+ int_x86_avx2_vbroadcasti128, WriteLoad>,
+ VEX_L;
let Predicates = [HasAVX] in
def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
@@ -7597,12 +8193,12 @@ let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR128:$src2, i8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V, VEX_L;
+ []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
let mayLoad = 1 in
def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f128mem:$src2, i8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V, VEX_L;
+ []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX] in {
@@ -7671,12 +8267,12 @@ let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
(ins VR256:$src1, i8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, VEX, VEX_L;
+ []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
let mayStore = 1 in
def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
(ins f128mem:$dst, VR256:$src1, i8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, VEX, VEX_L;
+ []>, Sched<[WriteStore]>, VEX, VEX_L;
}
// AVX1 patterns
@@ -7785,22 +8381,26 @@ multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
+ [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
+ Sched<[WriteFShuffle]>;
def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop_i:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (IntVar RC:$src1,
- (bitconvert (i_frag addr:$src2))))]>, VEX_4V;
+ (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (vt (X86VPermilp RC:$src1, (i8 imm:$src2))))]>, VEX;
+ [(set RC:$dst, (vt (X86VPermilp RC:$src1, (i8 imm:$src2))))]>, VEX,
+ Sched<[WriteFShuffle]>;
def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
(ins x86memop_f:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
- (vt (X86VPermilp (memop addr:$src1), (i8 imm:$src2))))]>, VEX;
+ (vt (X86VPermilp (memop addr:$src1), (i8 imm:$src2))))]>, VEX,
+ Sched<[WriteFShuffleLd]>;
}
let ExeDomain = SSEPackedSingle in {
@@ -7841,12 +8441,14 @@ def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
- (i8 imm:$src3))))]>, VEX_4V, VEX_L;
+ (i8 imm:$src3))))]>, VEX_4V, VEX_L,
+ Sched<[WriteFShuffle]>;
def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
- (i8 imm:$src3)))]>, VEX_4V, VEX_L;
+ (i8 imm:$src3)))]>, VEX_4V, VEX_L,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX] in {
@@ -7888,11 +8490,11 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
// Zero All YMM registers
def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
- [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
+ [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
// Zero Upper bits of YMM registers
def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
- [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
+ [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
}
//===----------------------------------------------------------------------===//
@@ -7902,10 +8504,11 @@ multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}",
[(set RC:$dst, (Int VR128:$src))]>,
- T8, OpSize, VEX;
+ T8PD, VEX, Sched<[WriteCvtF2F]>;
let neverHasSideEffects = 1, mayLoad = 1 in
def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
- "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
+ "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
+ Sched<[WriteCvtF2FLd]>;
}
multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
@@ -7913,12 +8516,13 @@ multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
(ins RC:$src1, i32i8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
- TA, OpSize, VEX;
- let neverHasSideEffects = 1, mayStore = 1 in
+ TAPD, VEX, Sched<[WriteCvtF2F]>;
+ let neverHasSideEffects = 1, mayStore = 1,
+ SchedRW = [WriteCvtF2FLd, WriteRMW] in
def mr : Ii8<0x1D, MRMDestMem, (outs),
(ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- TA, OpSize, VEX;
+ TAPD, VEX;
}
let Predicates = [HasF16C] in {
@@ -7926,6 +8530,27 @@ let Predicates = [HasF16C] in {
defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
+
+ // Pattern match vcvtph2ps of a scalar i64 load.
+ def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
+ (VCVTPH2PSrm addr:$src)>;
+ def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
+ (VCVTPH2PSrm addr:$src)>;
+}
+
+// Patterns for matching conversions from float to half-float and vice versa.
+let Predicates = [HasF16C] in {
+ def : Pat<(fp_to_f16 FR32:$src),
+ (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
+ (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
+
+ def : Pat<(f16_to_fp GR16:$src),
+ (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
+ (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
+
+ def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
+ (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
+ (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
}
//===----------------------------------------------------------------------===//
@@ -7942,7 +8567,7 @@ multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
- VEX_4V;
+ Sched<[WriteBlend]>, VEX_4V;
def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
!strconcat(OpcodeStr,
@@ -7950,7 +8575,7 @@ multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
[(set RC:$dst,
(IntId RC:$src1,
(bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
- VEX_4V;
+ Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
}
let isCommutable = 0 in {
@@ -7976,19 +8601,22 @@ multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
Intrinsic Int128, Intrinsic Int256> {
def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int128 VR128:$src))]>, VEX;
+ [(set VR128:$dst, (Int128 VR128:$src))]>,
+ Sched<[WriteShuffle]>, VEX;
def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
+ (Int128 (scalar_to_vector (ld_frag addr:$src))))]>,
+ Sched<[WriteLoad]>, VEX;
def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int256 VR128:$src))]>, VEX, VEX_L;
+ [(set VR256:$dst, (Int256 VR128:$src))]>,
+ Sched<[WriteShuffle256]>, VEX, VEX_L;
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
(Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
- VEX, VEX_L;
+ Sched<[WriteLoad]>, VEX, VEX_L;
}
defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
@@ -8063,6 +8691,31 @@ let Predicates = [HasAVX2] in {
(VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
(VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+
+ def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
+ (VPBROADCASTBrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VR128))>;
+ def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
+ (VPBROADCASTBYrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VR128))>;
+
+ def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
+ (VPBROADCASTWrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
+ VR128))>;
+ def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
+ (VPBROADCASTWYrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
+ VR128))>;
+
+ // The patterns for VPBROADCASTD are not needed because they would match
+ // the exact same thing as VBROADCASTSS patterns.
+
+ def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
+ (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
}
}
@@ -8077,13 +8730,6 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
}
let Predicates = [HasAVX] in {
-def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSSYrm addr:$src)>;
-def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
- (VBROADCASTSDYrm addr:$src)>;
-def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSSrm addr:$src)>;
-
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
let AddedComplexity = 20 in {
@@ -8124,7 +8770,7 @@ multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
- VEX_4V, VEX_L;
+ Sched<[WriteFShuffle256]>, VEX_4V, VEX_L;
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr,
@@ -8132,7 +8778,7 @@ multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
[(set VR256:$dst,
(OpVT (X86VPermv VR256:$src1,
(bitconvert (mem_frag addr:$src2)))))]>,
- VEX_4V, VEX_L;
+ Sched<[WriteFShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
}
defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32>;
@@ -8147,14 +8793,15 @@ multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
- VEX, VEX_L;
+ Sched<[WriteShuffle256]>, VEX, VEX_L;
def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(OpVT (X86VPermi (mem_frag addr:$src1),
- (i8 imm:$src2))))]>, VEX, VEX_L;
+ (i8 imm:$src2))))]>,
+ Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX, VEX_L;
}
defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64>, VEX_W;
@@ -8168,12 +8815,14 @@ def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
- (i8 imm:$src3))))]>, VEX_4V, VEX_L;
+ (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
+ VEX_4V, VEX_L;
def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
- (i8 imm:$src3)))]>, VEX_4V, VEX_L;
+ (i8 imm:$src3)))]>,
+ Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
let Predicates = [HasAVX2] in {
def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
@@ -8202,12 +8851,12 @@ let neverHasSideEffects = 1 in {
def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR128:$src2, i8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V, VEX_L;
+ []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
let mayLoad = 1 in
def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i128mem:$src2, i8imm:$src3),
"vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V, VEX_L;
+ []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
}
let Predicates = [HasAVX2] in {
@@ -8257,12 +8906,12 @@ def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
- VEX, VEX_L;
+ Sched<[WriteShuffle256]>, VEX, VEX_L;
let neverHasSideEffects = 1, mayStore = 1 in
def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
(ins i128mem:$dst, VR256:$src1, i8imm:$src2),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- VEX, VEX_L;
+ Sched<[WriteStore]>, VEX, VEX_L;
let Predicates = [HasAVX2] in {
def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
@@ -8347,27 +8996,27 @@ multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
- VEX_4V;
+ VEX_4V, Sched<[WriteVarVecShift]>;
def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1,
(vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
- VEX_4V;
+ VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
- VEX_4V, VEX_L;
+ VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1,
(vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
- VEX_4V, VEX_L;
+ VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
}
defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
@@ -8395,12 +9044,18 @@ multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
let mayLoad = 1, Constraints
= "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
in {
- defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
- defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
- defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
- defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
+
+ let ExeDomain = SSEPackedDouble in {
+ defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
+ defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
+ }
+
+ let ExeDomain = SSEPackedSingle in {
+ defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
+ defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
+ }
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSVM.td b/contrib/llvm/lib/Target/X86/X86InstrSVM.td
index 0191c01..c847be7e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSVM.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSVM.td
@@ -31,7 +31,7 @@ def SKINIT : I<0x01, MRM_DE, (outs), (ins), "skinit\t{%eax|eax}", []>, TB;
// 0F 01 D8
let Uses = [EAX] in
def VMRUN32 : I<0x01, MRM_D8, (outs), (ins),
- "vmrun\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>;
+ "vmrun\t{%eax|eax}", []>, TB, Requires<[Not64BitMode]>;
let Uses = [RAX] in
def VMRUN64 : I<0x01, MRM_D8, (outs), (ins),
"vmrun\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>;
@@ -39,7 +39,7 @@ def VMRUN64 : I<0x01, MRM_D8, (outs), (ins),
// 0F 01 DA
let Uses = [EAX] in
def VMLOAD32 : I<0x01, MRM_DA, (outs), (ins),
- "vmload\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>;
+ "vmload\t{%eax|eax}", []>, TB, Requires<[Not64BitMode]>;
let Uses = [RAX] in
def VMLOAD64 : I<0x01, MRM_DA, (outs), (ins),
"vmload\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>;
@@ -47,7 +47,7 @@ def VMLOAD64 : I<0x01, MRM_DA, (outs), (ins),
// 0F 01 DB
let Uses = [EAX] in
def VMSAVE32 : I<0x01, MRM_DB, (outs), (ins),
- "vmsave\t{%eax|eax}", []>, TB, Requires<[In32BitMode]>;
+ "vmsave\t{%eax|eax}", []>, TB, Requires<[Not64BitMode]>;
let Uses = [RAX] in
def VMSAVE64 : I<0x01, MRM_DB, (outs), (ins),
"vmsave\t{%rax|rax}", []>, TB, Requires<[In64BitMode]>;
@@ -55,7 +55,7 @@ def VMSAVE64 : I<0x01, MRM_DB, (outs), (ins),
// 0F 01 DF
let Uses = [EAX, ECX] in
def INVLPGA32 : I<0x01, MRM_DF, (outs), (ins),
- "invlpga\t{%ecx, %eax|eax, ecx}", []>, TB, Requires<[In32BitMode]>;
+ "invlpga\t{%ecx, %eax|eax, ecx}", []>, TB, Requires<[Not64BitMode]>;
let Uses = [RAX, ECX] in
def INVLPGA64 : I<0x01, MRM_DF, (outs), (ins),
"invlpga\t{%ecx, %rax|rax, ecx}", []>, TB, Requires<[In64BitMode]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
index 1937770..d0bb523 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -22,10 +22,10 @@ def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
[(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>;
def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
"shl{w}\t{%cl, $dst|$dst, cl}",
- [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize;
+ [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize16;
def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
"shl{l}\t{%cl, $dst|$dst, cl}",
- [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>;
+ [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>, OpSize32;
def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (shl GR64:$src1, CL))], IIC_SR>;
@@ -39,10 +39,11 @@ let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"shl{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))], IIC_SR>,
- OpSize;
+ OpSize16;
def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"shl{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))], IIC_SR>;
+ [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))], IIC_SR>,
+ OpSize32;
def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"shl{q}\t{$src2, $dst|$dst, $src2}",
@@ -55,9 +56,9 @@ let hasSideEffects = 0 in {
def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
"shl{b}\t$dst", [], IIC_SR>;
def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
- "shl{w}\t$dst", [], IIC_SR>, OpSize;
+ "shl{w}\t$dst", [], IIC_SR>, OpSize16;
def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
- "shl{l}\t$dst", [], IIC_SR>;
+ "shl{l}\t$dst", [], IIC_SR>, OpSize32;
def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t$dst", [], IIC_SR>;
} // hasSideEffects = 0
@@ -75,10 +76,11 @@ def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
"shl{w}\t{%cl, $dst|$dst, cl}",
[(store (shl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
- OpSize;
+ OpSize16;
def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
"shl{l}\t{%cl, $dst|$dst, cl}",
- [(store (shl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>;
+ [(store (shl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>,
+ OpSize32;
def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
"shl{q}\t{%cl, $dst|$dst, cl}",
[(store (shl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>;
@@ -90,12 +92,11 @@ def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, i8imm:$src),
"shl{w}\t{$src, $dst|$dst, $src}",
[(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, i8imm:$src),
"shl{l}\t{$src, $dst|$dst, $src}",
[(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
"shl{q}\t{$src, $dst|$dst, $src}",
[(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
@@ -109,12 +110,11 @@ def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
"shl{w}\t$dst",
[(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
"shl{l}\t$dst",
[(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
"shl{q}\t$dst",
[(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
@@ -128,10 +128,10 @@ def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
[(set GR8:$dst, (srl GR8:$src1, CL))], IIC_SR>;
def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
"shr{w}\t{%cl, $dst|$dst, cl}",
- [(set GR16:$dst, (srl GR16:$src1, CL))], IIC_SR>, OpSize;
+ [(set GR16:$dst, (srl GR16:$src1, CL))], IIC_SR>, OpSize16;
def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t{%cl, $dst|$dst, cl}",
- [(set GR32:$dst, (srl GR32:$src1, CL))], IIC_SR>;
+ [(set GR32:$dst, (srl GR32:$src1, CL))], IIC_SR>, OpSize32;
def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (srl GR64:$src1, CL))], IIC_SR>;
@@ -143,11 +143,11 @@ def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"shr{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"shr{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
"shr{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))], IIC_SR>;
@@ -158,10 +158,10 @@ def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
[(set GR8:$dst, (srl GR8:$src1, (i8 1)))], IIC_SR>;
def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
"shr{w}\t$dst",
- [(set GR16:$dst, (srl GR16:$src1, (i8 1)))], IIC_SR>, OpSize;
+ [(set GR16:$dst, (srl GR16:$src1, (i8 1)))], IIC_SR>, OpSize16;
def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t$dst",
- [(set GR32:$dst, (srl GR32:$src1, (i8 1)))], IIC_SR>;
+ [(set GR32:$dst, (srl GR32:$src1, (i8 1)))], IIC_SR>, OpSize32;
def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t$dst",
[(set GR64:$dst, (srl GR64:$src1, (i8 1)))], IIC_SR>;
@@ -176,10 +176,11 @@ def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
"shr{w}\t{%cl, $dst|$dst, cl}",
[(store (srl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
- OpSize;
+ OpSize16;
def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
"shr{l}\t{%cl, $dst|$dst, cl}",
- [(store (srl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>;
+ [(store (srl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>,
+ OpSize32;
def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
"shr{q}\t{%cl, $dst|$dst, cl}",
[(store (srl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>;
@@ -191,12 +192,11 @@ def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, i8imm:$src),
"shr{w}\t{$src, $dst|$dst, $src}",
[(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, i8imm:$src),
"shr{l}\t{$src, $dst|$dst, $src}",
[(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
"shr{q}\t{$src, $dst|$dst, $src}",
[(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
@@ -210,11 +210,11 @@ def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
"shr{w}\t$dst",
[(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>,OpSize;
+ IIC_SR>, OpSize16;
def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
"shr{l}\t$dst",
[(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
"shr{q}\t$dst",
[(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
@@ -230,11 +230,11 @@ def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
"sar{w}\t{%cl, $dst|$dst, cl}",
[(set GR16:$dst, (sra GR16:$src1, CL))],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t{%cl, $dst|$dst, cl}",
[(set GR32:$dst, (sra GR32:$src1, CL))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (sra GR64:$src1, CL))],
@@ -248,12 +248,11 @@ def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"sar{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"sar{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"sar{q}\t{$src2, $dst|$dst, $src2}",
@@ -268,11 +267,11 @@ def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
"sar{w}\t$dst",
[(set GR16:$dst, (sra GR16:$src1, (i8 1)))],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t$dst",
[(set GR32:$dst, (sra GR32:$src1, (i8 1)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t$dst",
[(set GR64:$dst, (sra GR64:$src1, (i8 1)))],
@@ -289,11 +288,11 @@ def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
"sar{w}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi16 addr:$dst), CL), addr:$dst)],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi32 addr:$dst), CL), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
"sar{q}\t{%cl, $dst|$dst, cl}",
[(store (sra (loadi64 addr:$dst), CL), addr:$dst)],
@@ -306,12 +305,11 @@ def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src),
def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, i8imm:$src),
"sar{w}\t{$src, $dst|$dst, $src}",
[(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, i8imm:$src),
"sar{l}\t{$src, $dst|$dst, $src}",
[(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
"sar{q}\t{$src, $dst|$dst, $src}",
[(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
@@ -325,12 +323,11 @@ def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
"sar{w}\t$dst",
[(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t$dst",
[(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
"sar{q}\t$dst",
[(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)],
@@ -352,20 +349,20 @@ def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
"rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t$dst", [], IIC_SR>, OpSize;
+ "rcl{w}\t$dst", [], IIC_SR>, OpSize16;
def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
- "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
+ "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
let Uses = [CL] in
def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
+ "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t$dst", [], IIC_SR>;
+ "rcl{l}\t$dst", [], IIC_SR>, OpSize32;
def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
- "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
+ "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
let Uses = [CL] in
def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
+ "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
@@ -386,20 +383,20 @@ def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
"rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t$dst", [], IIC_SR>, OpSize;
+ "rcr{w}\t$dst", [], IIC_SR>, OpSize16;
def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
- "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
+ "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
let Uses = [CL] in
def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
+ "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t$dst", [], IIC_SR>;
+ "rcr{l}\t$dst", [], IIC_SR>, OpSize32;
def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
- "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
+ "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
let Uses = [CL] in
def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
+ "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
"rcr{q}\t$dst", [], IIC_SR>;
@@ -417,13 +414,13 @@ def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt),
"rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t$dst", [], IIC_SR>, OpSize;
+ "rcl{w}\t$dst", [], IIC_SR>, OpSize16;
def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, i8imm:$cnt),
- "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
+ "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t$dst", [], IIC_SR>;
+ "rcl{l}\t$dst", [], IIC_SR>, OpSize32;
def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, i8imm:$cnt),
- "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
+ "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
"rcl{q}\t$dst", [], IIC_SR>;
def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
@@ -434,13 +431,13 @@ def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, i8imm:$cnt),
"rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t$dst", [], IIC_SR>, OpSize;
+ "rcr{w}\t$dst", [], IIC_SR>, OpSize16;
def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, i8imm:$cnt),
- "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
+ "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t$dst", [], IIC_SR>;
+ "rcr{l}\t$dst", [], IIC_SR>, OpSize32;
def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, i8imm:$cnt),
- "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
+ "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
"rcr{q}\t$dst", [], IIC_SR>;
def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
@@ -450,18 +447,18 @@ let Uses = [CL] in {
def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
"rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
+ "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
+ "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
"rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
"rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize;
+ "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
+ "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
"rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
}
@@ -476,10 +473,10 @@ def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
[(set GR8:$dst, (rotl GR8:$src1, CL))], IIC_SR>;
def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"rol{w}\t{%cl, $dst|$dst, cl}",
- [(set GR16:$dst, (rotl GR16:$src1, CL))], IIC_SR>, OpSize;
+ [(set GR16:$dst, (rotl GR16:$src1, CL))], IIC_SR>, OpSize16;
def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t{%cl, $dst|$dst, cl}",
- [(set GR32:$dst, (rotl GR32:$src1, CL))], IIC_SR>;
+ [(set GR32:$dst, (rotl GR32:$src1, CL))], IIC_SR>, OpSize32;
def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (rotl GR64:$src1, CL))], IIC_SR>;
@@ -491,12 +488,11 @@ def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"rol{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"rol{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"rol{q}\t{$src2, $dst|$dst, $src2}",
@@ -511,11 +507,11 @@ def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"rol{w}\t$dst",
[(set GR16:$dst, (rotl GR16:$src1, (i8 1)))],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t$dst",
[(set GR32:$dst, (rotl GR32:$src1, (i8 1)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t$dst",
[(set GR64:$dst, (rotl GR64:$src1, (i8 1)))],
@@ -531,11 +527,11 @@ def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
"rol{w}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi16 addr:$dst), CL), addr:$dst)],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
"rol{l}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi32 addr:$dst), CL), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
"rol{q}\t{%cl, $dst|$dst, cl}",
[(store (rotl (loadi64 addr:$dst), CL), addr:$dst)],
@@ -548,12 +544,11 @@ def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src1),
def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, i8imm:$src1),
"rol{w}\t{$src1, $dst|$dst, $src1}",
[(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, i8imm:$src1),
"rol{l}\t{$src1, $dst|$dst, $src1}",
[(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src1),
"rol{q}\t{$src1, $dst|$dst, $src1}",
[(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)],
@@ -567,12 +562,11 @@ def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
"rol{w}\t$dst",
[(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
"rol{l}\t$dst",
[(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
"rol{q}\t$dst",
[(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
@@ -586,10 +580,10 @@ def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
[(set GR8:$dst, (rotr GR8:$src1, CL))], IIC_SR>;
def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"ror{w}\t{%cl, $dst|$dst, cl}",
- [(set GR16:$dst, (rotr GR16:$src1, CL))], IIC_SR>, OpSize;
+ [(set GR16:$dst, (rotr GR16:$src1, CL))], IIC_SR>, OpSize16;
def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t{%cl, $dst|$dst, cl}",
- [(set GR32:$dst, (rotr GR32:$src1, CL))], IIC_SR>;
+ [(set GR32:$dst, (rotr GR32:$src1, CL))], IIC_SR>, OpSize32;
def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t{%cl, $dst|$dst, cl}",
[(set GR64:$dst, (rotr GR64:$src1, CL))], IIC_SR>;
@@ -601,12 +595,11 @@ def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"ror{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"ror{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"ror{q}\t{$src2, $dst|$dst, $src2}",
@@ -621,11 +614,11 @@ def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"ror{w}\t$dst",
[(set GR16:$dst, (rotr GR16:$src1, (i8 1)))],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t$dst",
[(set GR32:$dst, (rotr GR32:$src1, (i8 1)))],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t$dst",
[(set GR64:$dst, (rotr GR64:$src1, (i8 1)))],
@@ -641,11 +634,11 @@ def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
"ror{w}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi16 addr:$dst), CL), addr:$dst)],
- IIC_SR>, OpSize;
+ IIC_SR>, OpSize16;
def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi32 addr:$dst), CL), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
"ror{q}\t{%cl, $dst|$dst, cl}",
[(store (rotr (loadi64 addr:$dst), CL), addr:$dst)],
@@ -658,12 +651,11 @@ def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, i8imm:$src),
"ror{w}\t{$src, $dst|$dst, $src}",
[(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, i8imm:$src),
"ror{l}\t{$src, $dst|$dst, $src}",
[(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
"ror{q}\t{$src, $dst|$dst, $src}",
[(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
@@ -677,12 +669,11 @@ def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
"ror{w}\t$dst",
[(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>,
- OpSize;
+ IIC_SR>, OpSize16;
def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t$dst",
[(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)],
- IIC_SR>;
+ IIC_SR>, OpSize32;
def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
"ror{q}\t$dst",
[(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)],
@@ -702,23 +693,23 @@ def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
"shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))],
IIC_SHD16_REG_CL>,
- TB, OpSize;
+ TB, OpSize16;
def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))],
IIC_SHD16_REG_CL>,
- TB, OpSize;
+ TB, OpSize16;
def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))],
- IIC_SHD32_REG_CL>, TB;
+ IIC_SHD32_REG_CL>, TB, OpSize32;
def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))],
- IIC_SHD32_REG_CL>, TB;
+ IIC_SHD32_REG_CL>, TB, OpSize32;
def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
@@ -740,28 +731,28 @@ def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
"shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
(i8 imm:$src3)))], IIC_SHD16_REG_IM>,
- TB, OpSize;
+ TB, OpSize16;
def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
(outs GR16:$dst),
(ins GR16:$src1, GR16:$src2, i8imm:$src3),
"shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
(i8 imm:$src3)))], IIC_SHD16_REG_IM>,
- TB, OpSize;
+ TB, OpSize16;
def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
(outs GR32:$dst),
(ins GR32:$src1, GR32:$src2, i8imm:$src3),
"shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
(i8 imm:$src3)))], IIC_SHD32_REG_IM>,
- TB;
+ TB, OpSize32;
def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
(outs GR32:$dst),
(ins GR32:$src1, GR32:$src2, i8imm:$src3),
"shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
(i8 imm:$src3)))], IIC_SHD32_REG_IM>,
- TB;
+ TB, OpSize32;
def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
(outs GR64:$dst),
(ins GR64:$src1, GR64:$src2, i8imm:$src3),
@@ -784,20 +775,20 @@ let Uses = [CL] in {
def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
- addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize;
+ addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize16;
def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
- addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize;
+ addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize16;
def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
- addr:$dst)], IIC_SHD32_MEM_CL>, TB;
+ addr:$dst)], IIC_SHD32_MEM_CL>, TB, OpSize32;
def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
- addr:$dst)], IIC_SHD32_MEM_CL>, TB;
+ addr:$dst)], IIC_SHD32_MEM_CL>, TB, OpSize32;
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
@@ -815,14 +806,14 @@ def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
[(store (X86shld (loadi16 addr:$dst), GR16:$src2,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD16_MEM_IM>,
- TB, OpSize;
+ TB, OpSize16;
def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
"shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD16_MEM_IM>,
- TB, OpSize;
+ TB, OpSize16;
def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
@@ -830,14 +821,14 @@ def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
[(store (X86shld (loadi32 addr:$dst), GR32:$src2,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD32_MEM_IM>,
- TB;
+ TB, OpSize32;
def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
"shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
(i8 imm:$src3)), addr:$dst)],
IIC_SHD32_MEM_IM>,
- TB;
+ TB, OpSize32;
def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
@@ -905,8 +896,8 @@ let Predicates = [HasBMI2] in {
defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, VEX_W;
defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
- defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8, OpSize;
- defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8, OpSize, VEX_W;
+ defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8PD;
+ defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8PD, VEX_W;
// Prefer RORX which is non-destructive and doesn't update EFLAGS.
let AddedComplexity = 10 in {
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSystem.td b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
index 2196dc3..5402780 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
@@ -19,7 +19,7 @@ let Defs = [RAX, RDX] in
TB;
let Defs = [RAX, RCX, RDX] in
- def RDTSCP : I<0x01, MRM_F9, (outs), (ins), "rdtscp", []>, TB;
+ def RDTSCP : I<0x01, MRM_F9, (outs), (ins), "rdtscp", [(X86rdtscp)]>, TB;
// CPU flow control instructions
@@ -61,11 +61,12 @@ def SYSENTER : I<0x34, RawFrm, (outs), (ins), "sysenter", [],
def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit{l}", [],
IIC_SYS_ENTER_EXIT>, TB;
-def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit{q}", []>, TB,
- Requires<[In64BitMode]>;
+def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit{q}", [],
+ IIC_SYS_ENTER_EXIT>, TB, Requires<[In64BitMode]>;
-def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, OpSize;
-def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], IIC_IRET>;
+def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", [], IIC_IRET>, OpSize16;
+def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", [], IIC_IRET>,
+ OpSize32;
def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", [], IIC_IRET>,
Requires<[In64BitMode]>;
} // SchedRW
@@ -80,44 +81,41 @@ def IN8rr : I<0xEC, RawFrm, (outs), (ins),
"in{b}\t{%dx, %al|al, dx}", [], IIC_IN_RR>;
let Defs = [AX], Uses = [DX] in
def IN16rr : I<0xED, RawFrm, (outs), (ins),
- "in{w}\t{%dx, %ax|ax, dx}", [], IIC_IN_RR>, OpSize;
+ "in{w}\t{%dx, %ax|ax, dx}", [], IIC_IN_RR>, OpSize16;
let Defs = [EAX], Uses = [DX] in
def IN32rr : I<0xED, RawFrm, (outs), (ins),
- "in{l}\t{%dx, %eax|eax, dx}", [], IIC_IN_RR>;
+ "in{l}\t{%dx, %eax|eax, dx}", [], IIC_IN_RR>, OpSize32;
let Defs = [AL] in
def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i8imm:$port),
"in{b}\t{$port, %al|al, $port}", [], IIC_IN_RI>;
let Defs = [AX] in
def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
- "in{w}\t{$port, %ax|ax, $port}", [], IIC_IN_RI>, OpSize;
+ "in{w}\t{$port, %ax|ax, $port}", [], IIC_IN_RI>, OpSize16;
let Defs = [EAX] in
def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
- "in{l}\t{$port, %eax|eax, $port}", [], IIC_IN_RI>;
+ "in{l}\t{$port, %eax|eax, $port}", [], IIC_IN_RI>, OpSize32;
let Uses = [DX, AL] in
def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
"out{b}\t{%al, %dx|dx, al}", [], IIC_OUT_RR>;
let Uses = [DX, AX] in
def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
- "out{w}\t{%ax, %dx|dx, ax}", [], IIC_OUT_RR>, OpSize;
+ "out{w}\t{%ax, %dx|dx, ax}", [], IIC_OUT_RR>, OpSize16;
let Uses = [DX, EAX] in
def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
- "out{l}\t{%eax, %dx|dx, eax}", [], IIC_OUT_RR>;
+ "out{l}\t{%eax, %dx|dx, eax}", [], IIC_OUT_RR>, OpSize32;
let Uses = [AL] in
def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i8imm:$port),
"out{b}\t{%al, $port|$port, al}", [], IIC_OUT_IR>;
let Uses = [AX] in
def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
- "out{w}\t{%ax, $port|$port, ax}", [], IIC_OUT_IR>, OpSize;
+ "out{w}\t{%ax, $port|$port, ax}", [], IIC_OUT_IR>, OpSize16;
let Uses = [EAX] in
def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
- "out{l}\t{%eax, $port|$port, eax}", [], IIC_OUT_IR>;
+ "out{l}\t{%eax, $port|$port, eax}", [], IIC_OUT_IR>, OpSize32;
-def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", [], IIC_INS>;
-def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", [], IIC_INS>, OpSize;
-def IN32 : I<0x6D, RawFrm, (outs), (ins), "ins{l}", [], IIC_INS>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -125,14 +123,18 @@ def IN32 : I<0x6D, RawFrm, (outs), (ins), "ins{l}", [], IIC_INS>;
let SchedRW = [WriteSystem] in {
def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB,
+ Requires<[Not64BitMode]>;
def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB;
-
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_DR>, TB,
+ Requires<[In64BitMode]>;
+
def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB,
+ Requires<[Not64BitMode]>;
def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_DR_REG>, TB,
+ Requires<[In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -140,14 +142,18 @@ def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
let SchedRW = [WriteSystem] in {
def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB,
+ Requires<[Not64BitMode]>;
def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB;
-
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_CR>, TB,
+ Requires<[In64BitMode]>;
+
def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB,
+ Requires<[Not64BitMode]>;
def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB;
+ "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_CR_REG>, TB,
+ Requires<[In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -167,30 +173,30 @@ def GS_PREFIX : I<0x65, RawFrm, (outs), (ins), "gs", []>;
let SchedRW = [WriteMove] in {
def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>, OpSize16;
def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>, OpSize32;
def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>;
def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize16;
def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize32;
def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>;
def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>, OpSize16;
def MOV32sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>, OpSize32;
def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_REG>;
def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>, OpSize;
+ "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>, OpSize16;
def MOV32sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i32mem:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>;
+ "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>, OpSize32;
def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_SR_MEM>;
} // SchedRW
@@ -202,15 +208,19 @@ let SchedRW = [WriteSystem] in {
def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", [], IIC_SWAPGS>, TB;
def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB, OpSize;
+ "lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB,
+ OpSize16;
def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB, OpSize;
+ "lar{w}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB,
+ OpSize16;
// i16mem operand in LAR32rm and GR32 operand in LAR32rr is not a typo.
def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
- "lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB;
+ "lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB,
+ OpSize32;
def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB;
+ "lar{l}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB,
+ OpSize32;
// i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
"lar{q}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RM>, TB;
@@ -218,13 +228,17 @@ def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
"lar{q}\t{$src, $dst|$dst, $src}", [], IIC_LAR_RR>, TB;
def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "lsl{w}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB, OpSize;
+ "lsl{w}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB,
+ OpSize16;
def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "lsl{w}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB, OpSize;
+ "lsl{w}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB,
+ OpSize16;
def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB;
+ "lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB,
+ OpSize32;
def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB;
+ "lsl{l}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RR>, TB,
+ OpSize32;
def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"lsl{q}\t{$src, $dst|$dst, $src}", [], IIC_LSL_RM>, TB;
def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
@@ -234,9 +248,9 @@ def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr",
[], IIC_INVLPG>, TB;
def STR16r : I<0x00, MRM1r, (outs GR16:$dst), (ins),
- "str{w}\t$dst", [], IIC_STR>, TB, OpSize;
+ "str{w}\t$dst", [], IIC_STR>, TB, OpSize16;
def STR32r : I<0x00, MRM1r, (outs GR32:$dst), (ins),
- "str{l}\t$dst", [], IIC_STR>, TB;
+ "str{l}\t$dst", [], IIC_STR>, TB, OpSize32;
def STR64r : RI<0x00, MRM1r, (outs GR64:$dst), (ins),
"str{q}\t$dst", [], IIC_STR>, TB;
def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
@@ -248,105 +262,115 @@ def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
"ltr{w}\t$src", [], IIC_LTR>, TB;
def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins),
- "push{w}\t{%cs|cs}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
- OpSize;
+ "push{w}\t{%cs|cs}", [], IIC_PUSH_SR>,
+ OpSize16, Requires<[Not64BitMode]>;
def PUSHCS32 : I<0x0E, RawFrm, (outs), (ins),
- "push{l}\t{%cs|cs}", [], IIC_PUSH_CS>, Requires<[In32BitMode]>;
+ "push{l}\t{%cs|cs}", [], IIC_PUSH_CS>,
+ OpSize32, Requires<[Not64BitMode]>;
def PUSHSS16 : I<0x16, RawFrm, (outs), (ins),
- "push{w}\t{%ss|ss}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
- OpSize;
+ "push{w}\t{%ss|ss}", [], IIC_PUSH_SR>,
+ OpSize16, Requires<[Not64BitMode]>;
def PUSHSS32 : I<0x16, RawFrm, (outs), (ins),
- "push{l}\t{%ss|ss}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
+ "push{l}\t{%ss|ss}", [], IIC_PUSH_SR>,
+ OpSize32, Requires<[Not64BitMode]>;
def PUSHDS16 : I<0x1E, RawFrm, (outs), (ins),
- "push{w}\t{%ds|ds}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
- OpSize;
+ "push{w}\t{%ds|ds}", [], IIC_PUSH_SR>,
+ OpSize16, Requires<[Not64BitMode]>;
def PUSHDS32 : I<0x1E, RawFrm, (outs), (ins),
- "push{l}\t{%ds|ds}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
+ "push{l}\t{%ds|ds}", [], IIC_PUSH_SR>,
+ OpSize32, Requires<[Not64BitMode]>;
def PUSHES16 : I<0x06, RawFrm, (outs), (ins),
- "push{w}\t{%es|es}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>,
- OpSize;
+ "push{w}\t{%es|es}", [], IIC_PUSH_SR>,
+ OpSize16, Requires<[Not64BitMode]>;
def PUSHES32 : I<0x06, RawFrm, (outs), (ins),
- "push{l}\t{%es|es}", [], IIC_PUSH_SR>, Requires<[In32BitMode]>;
-
+ "push{l}\t{%es|es}", [], IIC_PUSH_SR>,
+ OpSize32, Requires<[Not64BitMode]>;
def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins),
- "push{w}\t{%fs|fs}", [], IIC_PUSH_SR>, OpSize, TB;
+ "push{w}\t{%fs|fs}", [], IIC_PUSH_SR>, OpSize16, TB;
def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins),
- "push{l}\t{%fs|fs}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
+ "push{l}\t{%fs|fs}", [], IIC_PUSH_SR>, TB,
+ OpSize32, Requires<[Not64BitMode]>;
def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins),
- "push{w}\t{%gs|gs}", [], IIC_PUSH_SR>, OpSize, TB;
+ "push{w}\t{%gs|gs}", [], IIC_PUSH_SR>, OpSize16, TB;
def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins),
- "push{l}\t{%gs|gs}", [], IIC_PUSH_SR>, TB, Requires<[In32BitMode]>;
-
+ "push{l}\t{%gs|gs}", [], IIC_PUSH_SR>, TB,
+ OpSize32, Requires<[Not64BitMode]>;
def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
- "push{q}\t{%fs|fs}", [], IIC_PUSH_SR>, TB;
+ "push{q}\t{%fs|fs}", [], IIC_PUSH_SR>, TB,
+ OpSize32, Requires<[In64BitMode]>;
def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
- "push{q}\t{%gs|gs}", [], IIC_PUSH_SR>, TB;
+ "push{q}\t{%gs|gs}", [], IIC_PUSH_SR>, TB,
+ OpSize32, Requires<[In64BitMode]>;
// No "pop cs" instruction.
def POPSS16 : I<0x17, RawFrm, (outs), (ins),
"pop{w}\t{%ss|ss}", [], IIC_POP_SR_SS>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def POPSS32 : I<0x17, RawFrm, (outs), (ins),
"pop{l}\t{%ss|ss}", [], IIC_POP_SR_SS>,
- Requires<[In32BitMode]>;
-
+ OpSize32, Requires<[Not64BitMode]>;
+
def POPDS16 : I<0x1F, RawFrm, (outs), (ins),
"pop{w}\t{%ds|ds}", [], IIC_POP_SR>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def POPDS32 : I<0x1F, RawFrm, (outs), (ins),
"pop{l}\t{%ds|ds}", [], IIC_POP_SR>,
- Requires<[In32BitMode]>;
-
+ OpSize32, Requires<[Not64BitMode]>;
+
def POPES16 : I<0x07, RawFrm, (outs), (ins),
"pop{w}\t{%es|es}", [], IIC_POP_SR>,
- OpSize, Requires<[In32BitMode]>;
+ OpSize16, Requires<[Not64BitMode]>;
def POPES32 : I<0x07, RawFrm, (outs), (ins),
"pop{l}\t{%es|es}", [], IIC_POP_SR>,
- Requires<[In32BitMode]>;
-
+ OpSize32, Requires<[Not64BitMode]>;
+
def POPFS16 : I<0xa1, RawFrm, (outs), (ins),
- "pop{w}\t{%fs|fs}", [], IIC_POP_SR>, OpSize, TB;
+ "pop{w}\t{%fs|fs}", [], IIC_POP_SR>, OpSize16, TB;
def POPFS32 : I<0xa1, RawFrm, (outs), (ins),
- "pop{l}\t{%fs|fs}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
+ "pop{l}\t{%fs|fs}", [], IIC_POP_SR>, TB,
+ OpSize32, Requires<[Not64BitMode]>;
def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
- "pop{q}\t{%fs|fs}", [], IIC_POP_SR>, TB;
-
+ "pop{q}\t{%fs|fs}", [], IIC_POP_SR>, TB,
+ OpSize32, Requires<[In64BitMode]>;
+
def POPGS16 : I<0xa9, RawFrm, (outs), (ins),
- "pop{w}\t{%gs|gs}", [], IIC_POP_SR>, OpSize, TB;
+ "pop{w}\t{%gs|gs}", [], IIC_POP_SR>, OpSize16, TB;
def POPGS32 : I<0xa9, RawFrm, (outs), (ins),
- "pop{l}\t{%gs|gs}", [], IIC_POP_SR>, TB, Requires<[In32BitMode]>;
+ "pop{l}\t{%gs|gs}", [], IIC_POP_SR>, TB,
+ OpSize32, Requires<[Not64BitMode]>;
def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
- "pop{q}\t{%gs|gs}", [], IIC_POP_SR>, TB;
-
+ "pop{q}\t{%gs|gs}", [], IIC_POP_SR>, TB,
+ OpSize32, Requires<[In64BitMode]>;
+
def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize;
+ "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>;
+ "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lss{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize;
+ "lss{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lss{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
+ "lss{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize32;
def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lss{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize;
+ "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>;
+ "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lfs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize;
+ "lfs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lfs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
+ "lfs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize32;
def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lfs{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lgs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize;
+ "lgs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lgs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
+ "lgs{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize32;
def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lgs{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
@@ -367,19 +391,23 @@ def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg),
let SchedRW = [WriteSystem] in {
def SGDT16m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
- "sgdt{w}\t$dst", [], IIC_SGDT>, TB, OpSize, Requires<[In32BitMode]>;
-def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
- "sgdt\t$dst", [], IIC_SGDT>, TB;
+ "sgdt{w}\t$dst", [], IIC_SGDT>, TB, OpSize16, Requires<[Not64BitMode]>;
+def SGDT32m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+ "sgdt{l}\t$dst", [], IIC_SGDT>, OpSize32, TB, Requires <[Not64BitMode]>;
+def SGDT64m : I<0x01, MRM0m, (outs opaque80mem:$dst), (ins),
+ "sgdt{q}\t$dst", [], IIC_SGDT>, TB, Requires <[In64BitMode]>;
def SIDT16m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
- "sidt{w}\t$dst", [], IIC_SIDT>, TB, OpSize, Requires<[In32BitMode]>;
-def SIDTm : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
- "sidt\t$dst", []>, TB;
+ "sidt{w}\t$dst", [], IIC_SIDT>, TB, OpSize16, Requires<[Not64BitMode]>;
+def SIDT32m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+ "sidt{l}\t$dst", []>, OpSize32, TB, Requires <[Not64BitMode]>;
+def SIDT64m : I<0x01, MRM1m, (outs opaque80mem:$dst), (ins),
+ "sidt{q}\t$dst", []>, TB, Requires <[In64BitMode]>;
def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins),
- "sldt{w}\t$dst", [], IIC_SLDT>, TB, OpSize;
+ "sldt{w}\t$dst", [], IIC_SLDT>, TB, OpSize16;
def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
"sldt{w}\t$dst", [], IIC_SLDT>, TB;
def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
- "sldt{l}\t$dst", [], IIC_SLDT>, TB;
+ "sldt{l}\t$dst", [], IIC_SLDT>, OpSize32, TB;
// LLDT is not interpreted specially in 64-bit mode because there is no sign
// extension.
@@ -389,13 +417,17 @@ def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
"sldt{q}\t$dst", [], IIC_SLDT>, TB;
def LGDT16m : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
- "lgdt{w}\t$src", [], IIC_LGDT>, TB, OpSize, Requires<[In32BitMode]>;
-def LGDTm : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
- "lgdt\t$src", [], IIC_LGDT>, TB;
+ "lgdt{w}\t$src", [], IIC_LGDT>, TB, OpSize16, Requires<[Not64BitMode]>;
+def LGDT32m : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
+ "lgdt{l}\t$src", [], IIC_LGDT>, OpSize32, TB, Requires<[Not64BitMode]>;
+def LGDT64m : I<0x01, MRM2m, (outs), (ins opaque80mem:$src),
+ "lgdt{q}\t$src", [], IIC_LGDT>, TB, Requires<[In64BitMode]>;
def LIDT16m : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
- "lidt{w}\t$src", [], IIC_LIDT>, TB, OpSize, Requires<[In32BitMode]>;
-def LIDTm : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
- "lidt\t$src", [], IIC_LIDT>, TB;
+ "lidt{w}\t$src", [], IIC_LIDT>, TB, OpSize16, Requires<[Not64BitMode]>;
+def LIDT32m : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
+ "lidt{l}\t$src", [], IIC_LIDT>, OpSize32, TB, Requires<[Not64BitMode]>;
+def LIDT64m : I<0x01, MRM3m, (outs), (ins opaque80mem:$src),
+ "lidt{q}\t$src", [], IIC_LIDT>, TB, Requires<[In64BitMode]>;
def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src),
"lldt{w}\t$src", [], IIC_LLDT_REG>, TB;
def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
@@ -407,12 +439,15 @@ def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
let SchedRW = [WriteSystem] in {
def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", [], IIC_WRMSR>, TB;
def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", [], IIC_RDMSR>, TB;
-def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", [], IIC_RDPMC>, TB;
+
+let Defs = [RAX, RDX], Uses = [ECX] in
+ def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", [(X86rdpmc)], IIC_RDPMC>,
+ TB;
def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins),
- "smsw{w}\t$dst", [], IIC_SMSW>, OpSize, TB;
+ "smsw{w}\t$dst", [], IIC_SMSW>, OpSize16, TB;
def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins),
- "smsw{l}\t$dst", [], IIC_SMSW>, TB;
+ "smsw{l}\t$dst", [], IIC_SMSW>, OpSize32, TB;
// no m form encodable; use SMSW16m
def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
"smsw{q}\t$dst", [], IIC_SMSW>, TB;
@@ -425,8 +460,13 @@ def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
"lmsw{w}\t$src", [], IIC_LMSW_MEM>, TB;
def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src),
"lmsw{w}\t$src", [], IIC_LMSW_REG>, TB;
-
-def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", [], IIC_CPUID>, TB;
+
+let Defs = [EAX, EBX, ECX, EDX], Uses = [EAX, ECX] in
+ def CPUID32 : I<0xA2, RawFrm, (outs), (ins), "cpuid", [], IIC_CPUID>, TB,
+ Requires<[Not64BitMode]>;
+let Defs = [RAX, RBX, RCX, RDX], Uses = [RAX, RCX] in
+ def CPUID64 : I<0xA2, RawFrm, (outs), (ins), "cpuid", [], IIC_CPUID>, TB,
+ Requires<[In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -448,77 +488,77 @@ let Uses = [RDX, RAX, RCX] in
let Uses = [RDX, RAX] in {
def XSAVE : I<0xAE, MRM4m, (outs opaque512mem:$dst), (ins),
"xsave\t$dst", []>, TB;
- def XSAVE64 : I<0xAE, MRM4m, (outs opaque512mem:$dst), (ins),
- "xsave{q|64}\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>;
+ def XSAVE64 : RI<0xAE, MRM4m, (outs opaque512mem:$dst), (ins),
+ "xsave{q|64}\t$dst", []>, TB, Requires<[In64BitMode]>;
def XRSTOR : I<0xAE, MRM5m, (outs), (ins opaque512mem:$dst),
"xrstor\t$dst", []>, TB;
- def XRSTOR64 : I<0xAE, MRM5m, (outs), (ins opaque512mem:$dst),
- "xrstor{q|64}\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>;
+ def XRSTOR64 : RI<0xAE, MRM5m, (outs), (ins opaque512mem:$dst),
+ "xrstor{q|64}\t$dst", []>, TB, Requires<[In64BitMode]>;
def XSAVEOPT : I<0xAE, MRM6m, (outs opaque512mem:$dst), (ins),
"xsaveopt\t$dst", []>, TB;
- def XSAVEOPT64 : I<0xAE, MRM6m, (outs opaque512mem:$dst), (ins),
- "xsaveopt{q|64}\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>;
+ def XSAVEOPT64 : RI<0xAE, MRM6m, (outs opaque512mem:$dst), (ins),
+ "xsaveopt{q|64}\t$dst", []>, TB, Requires<[In64BitMode]>;
}
} // SchedRW
//===----------------------------------------------------------------------===//
// VIA PadLock crypto instructions
let Defs = [RAX, RDI], Uses = [RDX, RDI] in
- def XSTORE : I<0xc0, RawFrm, (outs), (ins), "xstore", []>, A7;
+ def XSTORE : I<0xa7, MRM_C0, (outs), (ins), "xstore", []>, TB;
def : InstAlias<"xstorerng", (XSTORE)>;
let Defs = [RSI, RDI], Uses = [RBX, RDX, RSI, RDI] in {
- def XCRYPTECB : I<0xc8, RawFrm, (outs), (ins), "xcryptecb", []>, A7;
- def XCRYPTCBC : I<0xd0, RawFrm, (outs), (ins), "xcryptcbc", []>, A7;
- def XCRYPTCTR : I<0xd8, RawFrm, (outs), (ins), "xcryptctr", []>, A7;
- def XCRYPTCFB : I<0xe0, RawFrm, (outs), (ins), "xcryptcfb", []>, A7;
- def XCRYPTOFB : I<0xe8, RawFrm, (outs), (ins), "xcryptofb", []>, A7;
+ def XCRYPTECB : I<0xa7, MRM_C8, (outs), (ins), "xcryptecb", []>, TB;
+ def XCRYPTCBC : I<0xa7, MRM_D0, (outs), (ins), "xcryptcbc", []>, TB;
+ def XCRYPTCTR : I<0xa7, MRM_D8, (outs), (ins), "xcryptctr", []>, TB;
+ def XCRYPTCFB : I<0xa7, MRM_E0, (outs), (ins), "xcryptcfb", []>, TB;
+ def XCRYPTOFB : I<0xa7, MRM_E8, (outs), (ins), "xcryptofb", []>, TB;
}
let Defs = [RAX, RSI, RDI], Uses = [RAX, RSI, RDI] in {
- def XSHA1 : I<0xc8, RawFrm, (outs), (ins), "xsha1", []>, A6;
- def XSHA256 : I<0xd0, RawFrm, (outs), (ins), "xsha256", []>, A6;
+ def XSHA1 : I<0xa6, MRM_C8, (outs), (ins), "xsha1", []>, TB;
+ def XSHA256 : I<0xa6, MRM_D0, (outs), (ins), "xsha256", []>, TB;
}
let Defs = [RAX, RDX, RSI], Uses = [RAX, RSI] in
- def MONTMUL : I<0xc0, RawFrm, (outs), (ins), "montmul", []>, A6;
+ def MONTMUL : I<0xa6, MRM_C0, (outs), (ins), "montmul", []>, TB;
//===----------------------------------------------------------------------===//
// FS/GS Base Instructions
let Predicates = [HasFSGSBase, In64BitMode] in {
def RDFSBASE : I<0xAE, MRM0r, (outs GR32:$dst), (ins),
"rdfsbase{l}\t$dst",
- [(set GR32:$dst, (int_x86_rdfsbase_32))]>, TB, XS;
+ [(set GR32:$dst, (int_x86_rdfsbase_32))]>, XS;
def RDFSBASE64 : RI<0xAE, MRM0r, (outs GR64:$dst), (ins),
"rdfsbase{q}\t$dst",
- [(set GR64:$dst, (int_x86_rdfsbase_64))]>, TB, XS;
+ [(set GR64:$dst, (int_x86_rdfsbase_64))]>, XS;
def RDGSBASE : I<0xAE, MRM1r, (outs GR32:$dst), (ins),
"rdgsbase{l}\t$dst",
- [(set GR32:$dst, (int_x86_rdgsbase_32))]>, TB, XS;
+ [(set GR32:$dst, (int_x86_rdgsbase_32))]>, XS;
def RDGSBASE64 : RI<0xAE, MRM1r, (outs GR64:$dst), (ins),
"rdgsbase{q}\t$dst",
- [(set GR64:$dst, (int_x86_rdgsbase_64))]>, TB, XS;
+ [(set GR64:$dst, (int_x86_rdgsbase_64))]>, XS;
def WRFSBASE : I<0xAE, MRM2r, (outs), (ins GR32:$src),
"wrfsbase{l}\t$src",
- [(int_x86_wrfsbase_32 GR32:$src)]>, TB, XS;
+ [(int_x86_wrfsbase_32 GR32:$src)]>, XS;
def WRFSBASE64 : RI<0xAE, MRM2r, (outs), (ins GR64:$src),
"wrfsbase{q}\t$src",
- [(int_x86_wrfsbase_64 GR64:$src)]>, TB, XS;
+ [(int_x86_wrfsbase_64 GR64:$src)]>, XS;
def WRGSBASE : I<0xAE, MRM3r, (outs), (ins GR32:$src),
"wrgsbase{l}\t$src",
- [(int_x86_wrgsbase_32 GR32:$src)]>, TB, XS;
+ [(int_x86_wrgsbase_32 GR32:$src)]>, XS;
def WRGSBASE64 : RI<0xAE, MRM3r, (outs), (ins GR64:$src),
"wrgsbase{q}\t$src",
- [(int_x86_wrgsbase_64 GR64:$src)]>, TB, XS;
+ [(int_x86_wrgsbase_64 GR64:$src)]>, XS;
}
//===----------------------------------------------------------------------===//
// INVPCID Instruction
def INVPCID32 : I<0x82, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invpcid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
- Requires<[In32BitMode]>;
+ "invpcid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ Requires<[Not64BitMode]>;
def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invpcid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invpcid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/X86/X86InstrTSX.td b/contrib/llvm/lib/Target/X86/X86InstrTSX.td
index 59a6f1e..4940efc 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrTSX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrTSX.td
@@ -40,7 +40,8 @@ def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm),
// HLE prefixes
+let isAsmParserOnly = 1 in {
def XACQUIRE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "xacquire", []>, Requires<[HasHLE]>;
-
def XRELEASE_PREFIX : I<0xF3, RawFrm, (outs), (ins), "xrelease", []>, Requires<[HasHLE]>;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrVMX.td b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
index 6d3548f..79afe9a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
@@ -17,22 +17,22 @@
// 66 0F 38 80
def INVEPT32 : I<0x80, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invept\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
- Requires<[In32BitMode]>;
+ "invept\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ Requires<[Not64BitMode]>;
def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invept\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invept\t{$src2, $src1|$src1, $src2}", []>, T8PD,
Requires<[In64BitMode]>;
// 66 0F 38 81
def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invvpid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
- Requires<[In32BitMode]>;
+ "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ Requires<[Not64BitMode]>;
def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invvpid\t{$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
Requires<[In64BitMode]>;
// 0F 01 C1
def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmclear\t$vmcs", []>, OpSize, TB;
+ "vmclear\t$vmcs", []>, PD;
// OF 01 D4
def VMFUNC : I<0x01, MRM_D4, (outs), (ins), "vmfunc", []>, TB;
// 0F 01 C2
@@ -40,25 +40,25 @@ def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
// 0F 01 C3
def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmptrld\t$vmcs", []>, TB;
+ "vmptrld\t$vmcs", []>, PS;
def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
"vmptrst\t$vmcs", []>, TB;
def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
// 0F 01 C4
def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
diff --git a/contrib/llvm/lib/Target/X86/X86InstrXOP.td b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
index 2b6ee5c..45e2ff0 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrXOP.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
@@ -14,10 +14,10 @@
multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int, PatFrag memop> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int VR128:$src))]>, VEX;
+ [(set VR128:$dst, (Int VR128:$src))]>, XOP;
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
+ [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP;
}
defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, memopv2i64>;
@@ -41,10 +41,10 @@ multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
Operand memop, ComplexPattern mem_cpat> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int VR128:$src))]>, VEX;
+ [(set VR128:$dst, (Int VR128:$src))]>, XOP;
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, VEX;
+ [(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, XOP;
}
defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
@@ -56,10 +56,10 @@ multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int VR128:$src))]>, VEX;
+ [(set VR128:$dst, (Int VR128:$src))]>, XOP;
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
+ [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP;
}
defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
@@ -69,10 +69,10 @@ multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
PatFrag memop> {
def rrY : IXOP<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int VR256:$src))]>, VEX, VEX_L;
+ [(set VR256:$dst, (Int VR256:$src))]>, XOP, VEX_L;
def rmY : IXOP<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX, VEX_L;
+ [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP, VEX_L;
}
defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256, memopv8f32>;
@@ -82,19 +82,19 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, VEX_4VOp3;
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, XOP_4VOp3;
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>,
- VEX_4V, VEX_W;
+ XOP_4V, VEX_W;
def mr : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(Int (bitconvert (memopv2i64 addr:$src1)), VR128:$src2))]>,
- VEX_4VOp3;
+ XOP_4VOp3;
}
defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
@@ -114,12 +114,12 @@ multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (Int VR128:$src1, imm:$src2))]>, VEX;
+ [(set VR128:$dst, (Int VR128:$src1, imm:$src2))]>, XOP;
def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
- (Int (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, VEX;
+ (Int (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, XOP;
}
defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>;
@@ -134,14 +134,14 @@ multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_4V, VEX_I8IMM;
+ (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, VEX_I8IMM;
def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
- VR128:$src3))]>, VEX_4V, VEX_I8IMM;
+ VR128:$src3))]>, XOP_4V, VEX_I8IMM;
}
defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>;
@@ -164,14 +164,14 @@ multiclass xop4opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst, (Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- VEX_4V;
+ XOP_4V;
def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
- imm:$src3))]>, VEX_4V;
+ imm:$src3))]>, XOP_4V;
}
defm VPCOMB : xop4opimm<0xCC, "vpcomb", int_x86_xop_vpcomb>;
@@ -190,7 +190,7 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst, (Int VR128:$src1, VR128:$src2, VR128:$src3))]>,
- VEX_4V, VEX_I8IMM;
+ XOP_4V, VEX_I8IMM;
def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i128mem:$src3),
!strconcat(OpcodeStr,
@@ -198,7 +198,7 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR128:$dst,
(Int VR128:$src1, VR128:$src2,
(bitconvert (memopv2i64 addr:$src3))))]>,
- VEX_4V, VEX_I8IMM, VEX_W, MemOp4;
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
def mr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, VR128:$src3),
!strconcat(OpcodeStr,
@@ -206,7 +206,7 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR128:$dst,
(Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
VR128:$src3))]>,
- VEX_4V, VEX_I8IMM;
+ XOP_4V, VEX_I8IMM;
}
defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
@@ -218,7 +218,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>,
- VEX_4V, VEX_I8IMM, VEX_L;
+ XOP_4V, VEX_I8IMM, VEX_L;
def rmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i256mem:$src3),
!strconcat(OpcodeStr,
@@ -226,7 +226,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR256:$dst,
(Int VR256:$src1, VR256:$src2,
(bitconvert (memopv4i64 addr:$src3))))]>,
- VEX_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
def mrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3),
!strconcat(OpcodeStr,
@@ -234,7 +234,7 @@ multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
[(set VR256:$dst,
(Int VR256:$src1, (bitconvert (memopv4i64 addr:$src2)),
VR256:$src3))]>,
- VEX_4V, VEX_I8IMM, VEX_L;
+ XOP_4V, VEX_I8IMM, VEX_L;
}
defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
index e99f2d9..a082c4f 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "X86JITInfo.h"
#include "X86Relocations.h"
#include "X86Subtarget.h"
@@ -24,6 +23,8 @@
#include <cstring>
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
// Determine the platform we're running on
#if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64)
# define X86_64_JIT
@@ -427,17 +428,22 @@ X86JITInfo::getLazyResolverFunction(JITCompilerFn F) {
TsanIgnoreWritesEnd();
#if defined (X86_32_JIT) && !defined (_MSC_VER)
- if (Subtarget->hasSSE1())
+#if defined(__SSE__)
+ // SSE Callback should be called for SSE-enabled LLVM.
+ return X86CompilationCallback_SSE;
+#else
+ if (useSSE)
return X86CompilationCallback_SSE;
#endif
+#endif
return X86CompilationCallback;
}
-X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) {
- Subtarget = &TM.getSubtarget<X86Subtarget>();
+X86JITInfo::X86JITInfo(bool UseSSE) {
+ useSSE = UseSSE;
useGOT = 0;
- TLSOffset = 0;
+ TLSOffset = nullptr;
}
void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.h b/contrib/llvm/lib/Target/X86/X86JITInfo.h
index f916327..564343f 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.h
@@ -19,57 +19,55 @@
#include "llvm/Target/TargetJITInfo.h"
namespace llvm {
- class X86TargetMachine;
class X86Subtarget;
class X86JITInfo : public TargetJITInfo {
- X86TargetMachine &TM;
- const X86Subtarget *Subtarget;
uintptr_t PICBase;
- char* TLSOffset;
+ char *TLSOffset;
+ bool useSSE;
public:
- explicit X86JITInfo(X86TargetMachine &tm);
+ explicit X86JITInfo(bool UseSSE);
/// replaceMachineCodeForFunction - Make it so that calling the function
/// whose machine code is at OLD turns into a call to NEW, perhaps by
/// overwriting OLD with a branch to NEW. This is used for self-modifying
/// code.
///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
+ void replaceMachineCodeForFunction(void *Old, void *New) override;
/// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object
/// to emit an indirect symbol which contains the address of the specified
/// ptr.
- virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
- JITCodeEmitter &JCE);
+ void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
+ JITCodeEmitter &JCE) override;
// getStubLayout - Returns the size and alignment of the largest call stub
// on X86.
- virtual StubLayout getStubLayout();
+ StubLayout getStubLayout() override;
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
/// small native function that simply calls the function at the specified
/// address.
- virtual void *emitFunctionStub(const Function* F, void *Target,
- JITCodeEmitter &JCE);
+ void *emitFunctionStub(const Function* F, void *Target,
+ JITCodeEmitter &JCE) override;
/// getPICJumpTableEntry - Returns the value of the jumptable entry for the
/// specific basic block.
- virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase);
+ uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) override;
/// getLazyResolverFunction - Expose the lazy resolver to the JIT.
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
+ LazyResolverFn getLazyResolverFunction(JITCompilerFn) override;
/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase);
+ void relocate(void *Function, MachineRelocation *MR,
+ unsigned NumRelocs, unsigned char* GOTBase) override;
/// allocateThreadLocalMemory - Each target has its own way of
/// handling thread local variables. This method returns a value only
/// meaningful to the target.
- virtual char* allocateThreadLocalMemory(size_t size);
+ char* allocateThreadLocalMemory(size_t size) override;
/// setPICBase / getPICBase - Getter / setter of PICBase, used to compute
/// PIC jumptable entry.
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
index 6649c82..2bd70a9 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -13,12 +13,16 @@
//===----------------------------------------------------------------------===//
#include "X86AsmPrinter.h"
+#include "X86RegisterInfo.h"
#include "InstPrinter/X86ATTInstPrinter.h"
-#include "X86COFFMachineModuleInfo.h"
+#include "MCTargetDesc/X86BaseInfo.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/StackMaps.h"
-#include "llvm/IR/Type.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
@@ -26,8 +30,6 @@
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
namespace {
@@ -70,70 +72,77 @@ MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
/// operand to an MCSymbol.
MCSymbol *X86MCInstLower::
GetSymbolFromOperand(const MachineOperand &MO) const {
+ const DataLayout *DL = TM.getDataLayout();
assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
SmallString<128> Name;
+ StringRef Suffix;
+
+ switch (MO.getTargetFlags()) {
+ case X86II::MO_DLLIMPORT:
+ // Handle dllimport linkage.
+ Name += "__imp_";
+ break;
+ case X86II::MO_DARWIN_STUB:
+ Suffix = "$stub";
+ break;
+ case X86II::MO_DARWIN_NONLAZY:
+ case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
+ case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
+ Suffix = "$non_lazy_ptr";
+ break;
+ }
+
+ if (!Suffix.empty())
+ Name += DL->getPrivateGlobalPrefix();
+
+ unsigned PrefixLen = Name.size();
if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
- bool isImplicitlyPrivate = false;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
- MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
- isImplicitlyPrivate = true;
-
- getMang()->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
+ AsmPrinter.getNameWithPrefix(Name, GV);
} else if (MO.isSymbol()) {
- Name += MAI.getGlobalPrefix();
- Name += MO.getSymbolName();
+ getMang()->getNameWithPrefix(Name, MO.getSymbolName());
} else if (MO.isMBB()) {
Name += MO.getMBB()->getSymbol()->getName();
}
+ unsigned OrigLen = Name.size() - PrefixLen;
+
+ Name += Suffix;
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name);
+
+ StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen);
// If the target flags on the operand changes the name of the symbol, do that
// before we return the symbol.
switch (MO.getTargetFlags()) {
default: break;
- case X86II::MO_DLLIMPORT: {
- // Handle dllimport linkage.
- const char *Prefix = "__imp_";
- Name.insert(Name.begin(), Prefix, Prefix+strlen(Prefix));
- break;
- }
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: {
- Name += "$non_lazy_ptr";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
-
MachineModuleInfoImpl::StubValueTy &StubSym =
getMachOMMI().getGVStubEntry(Sym);
- if (StubSym.getPointer() == 0) {
+ if (!StubSym.getPointer()) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym =
MachineModuleInfoImpl::
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
}
- return Sym;
+ break;
}
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
- Name += "$non_lazy_ptr";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
MachineModuleInfoImpl::StubValueTy &StubSym =
getMachOMMI().getHiddenGVStubEntry(Sym);
- if (StubSym.getPointer() == 0) {
+ if (!StubSym.getPointer()) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym =
MachineModuleInfoImpl::
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
}
- return Sym;
+ break;
}
case X86II::MO_DARWIN_STUB: {
- Name += "$stub";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
MachineModuleInfoImpl::StubValueTy &StubSym =
getMachOMMI().getFnStubEntry(Sym);
if (StubSym.getPointer())
@@ -145,23 +154,22 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
!MO.getGlobal()->hasInternalLinkage());
} else {
- Name.erase(Name.end()-5, Name.end());
StubSym =
MachineModuleInfoImpl::
- StubValueTy(Ctx.GetOrCreateSymbol(Name.str()), false);
+ StubValueTy(Ctx.GetOrCreateSymbol(OrigName), false);
}
- return Sym;
+ break;
}
}
- return Ctx.GetOrCreateSymbol(Name.str());
+ return Sym;
}
MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
MCSymbol *Sym) const {
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
- const MCExpr *Expr = 0;
+ const MCExpr *Expr = nullptr;
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
switch (MO.getTargetFlags()) {
@@ -216,7 +224,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
break;
}
- if (Expr == 0)
+ if (!Expr)
Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
@@ -227,13 +235,6 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
}
-/// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R
-static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
- OutMI.setOpcode(NewOpc);
- OutMI.addOperand(OutMI.getOperand(0));
- OutMI.addOperand(OutMI.getOperand(0));
-}
-
/// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
/// a short fixed-register form.
static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
@@ -297,12 +298,12 @@ static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
unsigned RegOp = IsStore ? 0 : 5;
unsigned AddrOp = AddrBase + 3;
assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&
- Inst.getOperand(AddrBase + 0).isReg() && // base
- Inst.getOperand(AddrBase + 1).isImm() && // scale
- Inst.getOperand(AddrBase + 2).isReg() && // index register
- (Inst.getOperand(AddrOp).isExpr() || // address
- Inst.getOperand(AddrOp).isImm())&&
- Inst.getOperand(AddrBase + 4).isReg() && // segment
+ Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&
+ Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&
+ Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&
+ Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&
+ (Inst.getOperand(AddrOp).isExpr() ||
+ Inst.getOperand(AddrOp).isImm()) &&
"Unexpected instruction!");
// Check whether the destination register can be fixed.
@@ -322,17 +323,23 @@ static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
}
if (Absolute &&
- (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
- Inst.getOperand(AddrBase + 2).getReg() != 0 ||
- Inst.getOperand(AddrBase + 4).getReg() != 0 ||
- Inst.getOperand(AddrBase + 1).getImm() != 1))
+ (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 ||
+ Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 ||
+ Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0))
return;
// If so, rewrite the instruction.
MCOperand Saved = Inst.getOperand(AddrOp);
+ MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg);
Inst = MCInst();
Inst.setOpcode(Opcode);
Inst.addOperand(Saved);
+ Inst.addOperand(Seg);
+}
+
+static unsigned getRetOpcode(const X86Subtarget &Subtarget)
+{
+ return Subtarget.is64Bit() ? X86::RETQ : X86::RETL;
}
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
@@ -390,7 +397,6 @@ ReSimplify:
assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
"LEA has segment specified!");
break;
- case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
case X86::MOV32ri64:
OutMI.setOpcode(X86::MOV32ri);
@@ -464,7 +470,7 @@ ReSimplify:
case X86::EH_RETURN:
case X86::EH_RETURN64: {
OutMI = MCInst();
- OutMI.setOpcode(X86::RET);
+ OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
break;
}
@@ -598,7 +604,8 @@ ReSimplify:
static void LowerTlsAddr(MCStreamer &OutStreamer,
X86MCInstLower &MCInstLowering,
- const MachineInstr &MI) {
+ const MachineInstr &MI,
+ const MCSubtargetInfo& STI) {
bool is64Bits = MI.getOpcode() == X86::TLS_addr64 ||
MI.getOpcode() == X86::TLS_base_addr64;
@@ -608,7 +615,7 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
MCContext &context = OutStreamer.getContext();
if (needsPadding)
- OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX), STI);
MCSymbolRefExpr::VariantKind SRVK;
switch (MI.getOpcode()) {
@@ -655,12 +662,12 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
LEA.addOperand(MCOperand::CreateReg(0)); // seg
}
- OutStreamer.EmitInstruction(LEA);
+ OutStreamer.EmitInstruction(LEA, STI);
if (needsPadding) {
- OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
- OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX));
- OutStreamer.EmitInstruction(MCInstBuilder(X86::REX64_PREFIX));
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX), STI);
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::DATA16_PREFIX), STI);
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::REX64_PREFIX), STI);
}
StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr";
@@ -672,114 +679,79 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
OutStreamer.EmitInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32
: X86::CALLpcrel32)
- .addExpr(tlsRef));
+ .addExpr(tlsRef), STI);
}
-static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
-parseMemoryOperand(StackMaps::Location::LocationType LocTy, unsigned Size,
- MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE) {
-
- typedef StackMaps::Location Location;
-
- assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op.");
-
- const MachineOperand &Base = *MOI;
- const MachineOperand &Scale = *(++MOI);
- const MachineOperand &Index = *(++MOI);
- const MachineOperand &Disp = *(++MOI);
- const MachineOperand &ZeroReg = *(++MOI);
-
- // Sanity check for supported operand format.
- assert(Base.isReg() &&
- Scale.isImm() && Scale.getImm() == 1 &&
- Index.isReg() && Index.getReg() == 0 &&
- Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
- "Unsupported x86 memory operand sequence.");
- (void)Scale;
- (void)Index;
- (void)ZeroReg;
-
- return std::make_pair(
- Location(LocTy, Size, Base.getReg(), Disp.getImm()), ++MOI);
-}
-
-std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
-X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- const TargetMachine &TM) {
-
- typedef StackMaps::Location Location;
-
- const MachineOperand &MOP = *MOI;
- assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
- "Register mask and implicit operands should not be processed.");
-
- if (MOP.isImm()) {
- // Verify anyregcc
- // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
-
- switch (MOP.getImm()) {
- default: llvm_unreachable("Unrecognized operand type.");
- case StackMaps::DirectMemRefOp: {
- unsigned Size = TM.getDataLayout()->getPointerSizeInBits();
- assert((Size % 8) == 0 && "Need pointer size in bytes.");
- Size /= 8;
- return parseMemoryOperand(StackMaps::Location::Direct, Size,
- llvm::next(MOI), MOE);
- }
- case StackMaps::IndirectMemRefOp: {
- ++MOI;
- int64_t Size = MOI->getImm();
- assert(Size > 0 && "Need a valid size for indirect memory locations.");
- return parseMemoryOperand(StackMaps::Location::Indirect, Size,
- llvm::next(MOI), MOE);
- }
- case StackMaps::ConstantOp: {
- ++MOI;
- assert(MOI->isImm() && "Expected constant operand.");
- int64_t Imm = MOI->getImm();
- return std::make_pair(
- Location(Location::Constant, sizeof(int64_t), 0, Imm), ++MOI);
+/// \brief Emit the optimal amount of multi-byte nops on X86.
+static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) {
+ // This works only for 64bit. For 32bit we have to do additional checking if
+ // the CPU supports multi-byte nops.
+ assert(Is64Bit && "EmitNops only supports X86-64");
+ while (NumBytes) {
+ unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
+ Opc = IndexReg = Displacement = SegmentReg = 0;
+ BaseReg = X86::RAX; ScaleVal = 1;
+ switch (NumBytes) {
+ case 0: llvm_unreachable("Zero nops?"); break;
+ case 1: NumBytes -= 1; Opc = X86::NOOP; break;
+ case 2: NumBytes -= 2; Opc = X86::XCHG16ar; break;
+ case 3: NumBytes -= 3; Opc = X86::NOOPL; break;
+ case 4: NumBytes -= 4; Opc = X86::NOOPL; Displacement = 8; break;
+ case 5: NumBytes -= 5; Opc = X86::NOOPL; Displacement = 8;
+ IndexReg = X86::RAX; break;
+ case 6: NumBytes -= 6; Opc = X86::NOOPW; Displacement = 8;
+ IndexReg = X86::RAX; break;
+ case 7: NumBytes -= 7; Opc = X86::NOOPL; Displacement = 512; break;
+ case 8: NumBytes -= 8; Opc = X86::NOOPL; Displacement = 512;
+ IndexReg = X86::RAX; break;
+ case 9: NumBytes -= 9; Opc = X86::NOOPW; Displacement = 512;
+ IndexReg = X86::RAX; break;
+ default: NumBytes -= 10; Opc = X86::NOOPW; Displacement = 512;
+ IndexReg = X86::RAX; SegmentReg = X86::CS; break;
}
- }
- }
- // Otherwise this is a reg operand. The physical register number will
- // ultimately be encoded as a DWARF regno. The stack map also records the size
- // of a spill slot that can hold the register content. (The runtime can
- // track the actual size of the data type if it needs to.)
- assert(MOP.isReg() && "Expected register operand here.");
- assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
- "Virtreg operands should have been rewritten before now.");
- const TargetRegisterClass *RC =
- TM.getRegisterInfo()->getMinimalPhysRegClass(MOP.getReg());
- assert(!MOP.getSubReg() && "Physical subreg still around.");
- return std::make_pair(
- Location(Location::Register, RC->getSize(), MOP.getReg(), 0), ++MOI);
+ unsigned NumPrefixes = std::min(NumBytes, 5U);
+ NumBytes -= NumPrefixes;
+ for (unsigned i = 0; i != NumPrefixes; ++i)
+ OS.EmitBytes("\x66");
+
+ switch (Opc) {
+ default: llvm_unreachable("Unexpected opcode"); break;
+ case X86::NOOP:
+ OS.EmitInstruction(MCInstBuilder(Opc), STI);
+ break;
+ case X86::XCHG16ar:
+ OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
+ break;
+ case X86::NOOPL:
+ case X86::NOOPW:
+ OS.EmitInstruction(MCInstBuilder(Opc).addReg(BaseReg).addImm(ScaleVal)
+ .addReg(IndexReg)
+ .addImm(Displacement)
+ .addReg(SegmentReg), STI);
+ break;
+ }
+ } // while (NumBytes)
}
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
-static void LowerSTACKMAP(MCStreamer &OutStreamer,
- StackMaps &SM,
- const MachineInstr &MI)
-{
- unsigned NumNOPBytes = MI.getOperand(1).getImm();
+static void LowerSTACKMAP(MCStreamer &OS, StackMaps &SM,
+ const MachineInstr &MI, bool Is64Bit, const MCSubtargetInfo& STI) {
+ unsigned NumBytes = MI.getOperand(1).getImm();
SM.recordStackMap(MI);
// Emit padding.
// FIXME: These nops ensure that the stackmap's shadow is covered by
// instructions from the same basic block, but the nops should not be
// necessary if instructions from the same block follow the stackmap.
- for (unsigned i = 0; i < NumNOPBytes; ++i)
- OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
+ EmitNops(OS, NumBytes, Is64Bit, STI);
}
// Lower a patchpoint of the form:
// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
-static void LowerPATCHPOINT(MCStreamer &OutStreamer,
- StackMaps &SM,
- const MachineInstr &MI) {
+static void LowerPATCHPOINT(MCStreamer &OS, StackMaps &SM,
+ const MachineInstr &MI, bool Is64Bit, const MCSubtargetInfo& STI) {
+ assert(Is64Bit && "Patchpoint currently only supports X86-64");
SM.recordPatchPoint(MI);
PatchPointOpers opers(&MI);
@@ -789,34 +761,35 @@ static void LowerPATCHPOINT(MCStreamer &OutStreamer,
if (CallTarget) {
// Emit MOV to materialize the target address and the CALL to target.
// This is encoded with 12-13 bytes, depending on which register is used.
- // We conservatively assume that it is 12 bytes and emit in worst case one
- // extra NOP byte.
- EncodedBytes = 12;
- OutStreamer.EmitInstruction(MCInstBuilder(X86::MOV64ri)
- .addReg(MI.getOperand(ScratchIdx).getReg())
- .addImm(CallTarget));
- OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r)
- .addReg(MI.getOperand(ScratchIdx).getReg()));
+ unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg();
+ if (X86II::isX86_64ExtendedReg(ScratchReg))
+ EncodedBytes = 13;
+ else
+ EncodedBytes = 12;
+ OS.EmitInstruction(MCInstBuilder(X86::MOV64ri).addReg(ScratchReg)
+ .addImm(CallTarget), STI);
+ OS.EmitInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg), STI);
}
// Emit padding.
unsigned NumBytes = opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
assert(NumBytes >= EncodedBytes &&
"Patchpoint can't request size less than the length of a call.");
- for (unsigned i = EncodedBytes; i < NumBytes; ++i)
- OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
+ EmitNops(OS, NumBytes - EncodedBytes, Is64Bit, STI);
}
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
+ const X86RegisterInfo *RI =
+ static_cast<const X86RegisterInfo *>(TM.getRegisterInfo());
+
switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
llvm_unreachable("Should be handled target independently");
// Emit nothing here but a comment if we can.
case X86::Int_MemBarrier:
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER"));
+ OutStreamer.emitRawComment("MEMBARRIER");
return;
@@ -839,7 +812,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::TLS_addr64:
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
- return LowerTlsAddr(OutStreamer, MCInstLowering, *MI);
+ return LowerTlsAddr(OutStreamer, MCInstLowering, *MI, getSubtargetInfo());
case X86::MOVPC32r: {
// This is a pseudo op for a two instruction sequence with a label, which
@@ -852,14 +825,14 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCSymbol *PICBase = MF->getPICBaseSymbol();
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
- OutStreamer.EmitInstruction(MCInstBuilder(X86::CALLpcrel32)
+ EmitToStreamer(OutStreamer, MCInstBuilder(X86::CALLpcrel32)
.addExpr(MCSymbolRefExpr::Create(PICBase, OutContext)));
// Emit the label.
OutStreamer.EmitLabel(PICBase);
// popl $reg
- OutStreamer.EmitInstruction(MCInstBuilder(X86::POP32r)
+ EmitToStreamer(OutStreamer, MCInstBuilder(X86::POP32r)
.addReg(MI->getOperand(0).getReg()));
return;
}
@@ -890,7 +863,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
DotExpr, OutContext);
- OutStreamer.EmitInstruction(MCInstBuilder(X86::ADD32ri)
+ EmitToStreamer(OutStreamer, MCInstBuilder(X86::ADD32ri)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
.addExpr(DotExpr));
@@ -898,25 +871,56 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case TargetOpcode::STACKMAP:
- return LowerSTACKMAP(OutStreamer, SM, *MI);
+ return LowerSTACKMAP(OutStreamer, SM, *MI, Subtarget->is64Bit(), getSubtargetInfo());
case TargetOpcode::PATCHPOINT:
- return LowerPATCHPOINT(OutStreamer, SM, *MI);
+ return LowerPATCHPOINT(OutStreamer, SM, *MI, Subtarget->is64Bit(), getSubtargetInfo());
case X86::MORESTACK_RET:
- OutStreamer.EmitInstruction(MCInstBuilder(X86::RET));
+ EmitToStreamer(OutStreamer, MCInstBuilder(getRetOpcode(*Subtarget)));
return;
case X86::MORESTACK_RET_RESTORE_R10:
// Return, then restore R10.
- OutStreamer.EmitInstruction(MCInstBuilder(X86::RET));
- OutStreamer.EmitInstruction(MCInstBuilder(X86::MOV64rr)
+ EmitToStreamer(OutStreamer, MCInstBuilder(getRetOpcode(*Subtarget)));
+ EmitToStreamer(OutStreamer, MCInstBuilder(X86::MOV64rr)
.addReg(X86::R10)
.addReg(X86::RAX));
return;
+
+ case X86::SEH_PushReg:
+ OutStreamer.EmitWinCFIPushReg(RI->getSEHRegNum(MI->getOperand(0).getImm()));
+ return;
+
+ case X86::SEH_SaveReg:
+ OutStreamer.EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()),
+ MI->getOperand(1).getImm());
+ return;
+
+ case X86::SEH_SaveXMM:
+ OutStreamer.EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()),
+ MI->getOperand(1).getImm());
+ return;
+
+ case X86::SEH_StackAlloc:
+ OutStreamer.EmitWinCFIAllocStack(MI->getOperand(0).getImm());
+ return;
+
+ case X86::SEH_SetFrame:
+ OutStreamer.EmitWinCFISetFrame(RI->getSEHRegNum(MI->getOperand(0).getImm()),
+ MI->getOperand(1).getImm());
+ return;
+
+ case X86::SEH_PushFrame:
+ OutStreamer.EmitWinCFIPushFrame(MI->getOperand(0).getImm());
+ return;
+
+ case X86::SEH_EndPrologue:
+ OutStreamer.EmitWinCFIEndProlog();
+ return;
}
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
}
diff --git a/contrib/llvm/lib/Target/X86/X86PadShortFunction.cpp b/contrib/llvm/lib/Target/X86/X86PadShortFunction.cpp
index 83e75ea..6639875 100644
--- a/contrib/llvm/lib/Target/X86/X86PadShortFunction.cpp
+++ b/contrib/llvm/lib/Target/X86/X86PadShortFunction.cpp
@@ -15,9 +15,9 @@
#include <algorithm>
-#define DEBUG_TYPE "x86-pad-short-functions"
#include "X86.h"
#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -30,6 +30,8 @@
using namespace llvm;
+#define DEBUG_TYPE "x86-pad-short-functions"
+
STATISTIC(NumBBsPadded, "Number of basic blocks padded");
namespace {
@@ -49,11 +51,11 @@ namespace {
struct PadShortFunc : public MachineFunctionPass {
static char ID;
PadShortFunc() : MachineFunctionPass(ID)
- , Threshold(4), TM(0), TII(0) {}
+ , Threshold(4), TM(nullptr), TII(nullptr) {}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "X86 Atom pad short functions";
}
@@ -100,6 +102,9 @@ bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
}
TM = &MF.getTarget();
+ if (!TM->getSubtarget<X86Subtarget>().padShortFunctions())
+ return false;
+
TII = TM->getInstrInfo();
// Search through basic blocks and mark the ones that have early returns
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
index e6cd593..e8a7e84 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -14,7 +14,6 @@
//===----------------------------------------------------------------------===//
#include "X86RegisterInfo.h"
-#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
@@ -27,7 +26,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
@@ -39,11 +38,11 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
#define GET_REGINFO_TARGET_DESC
#include "X86GenRegisterInfo.inc"
-using namespace llvm;
-
cl::opt<bool>
ForceStackAlign("force-align-stack",
cl::desc("Force align the stack to the minimum alignment"
@@ -54,20 +53,18 @@ static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
-X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm)
- : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
- ? X86::RIP : X86::EIP),
- X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
- X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
- (tm.getSubtarget<X86Subtarget>().is64Bit()
- ? X86::RIP : X86::EIP)),
- TM(tm) {
+X86RegisterInfo::X86RegisterInfo(const X86Subtarget &STI)
+ : X86GenRegisterInfo(
+ (STI.is64Bit() ? X86::RIP : X86::EIP),
+ X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), false),
+ X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), true),
+ (STI.is64Bit() ? X86::RIP : X86::EIP)),
+ Subtarget(STI) {
X86_MC::InitLLVM2SEHRegisterMapping(this);
// Cache some information.
- const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
- Is64Bit = Subtarget->is64Bit();
- IsWin64 = Subtarget->isTargetWin64();
+ Is64Bit = Subtarget.is64Bit();
+ IsWin64 = Subtarget.isTargetWin64();
if (Is64Bit) {
SlotSize = 8;
@@ -84,21 +81,6 @@ X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm)
BasePtr = Is64Bit ? X86::RBX : X86::ESI;
}
-/// getCompactUnwindRegNum - This function maps the register to the number for
-/// compact unwind encoding. Return -1 if the register isn't valid.
-int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
- switch (getLLVMRegNum(RegNum, isEH)) {
- case X86::EBX: case X86::RBX: return 1;
- case X86::ECX: case X86::R12: return 2;
- case X86::EDX: case X86::R13: return 3;
- case X86::EDI: case X86::R14: return 4;
- case X86::ESI: case X86::R15: return 5;
- case X86::EBP: case X86::RBP: return 6;
- }
-
- return -1;
-}
-
bool
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
// ExeDepsFixer and PostRAScheduler require liveness.
@@ -130,7 +112,7 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
if (!Is64Bit && SubIdx == X86::sub_8bit) {
A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
if (!A)
- return 0;
+ return nullptr;
}
return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
}
@@ -174,9 +156,8 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
}
const TargetRegisterClass *
-X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
- const {
- const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
switch (Kind) {
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
case 0: // Normal GPRs.
@@ -226,27 +207,33 @@ X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
case X86::GR64RegClassID:
return 12 - FPDiff;
case X86::VR128RegClassID:
- return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
+ return Subtarget.is64Bit() ? 10 : 4;
case X86::VR64RegClassID:
return 4;
}
}
-const uint16_t *
+const MCPhysReg *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ bool HasAVX = Subtarget.hasAVX();
+ bool HasAVX512 = Subtarget.hasAVX512();
+
+ assert(MF && "MachineFunction required");
switch (MF->getFunction()->getCallingConv()) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_SaveList;
-
- case CallingConv::WebKit_JS:
- return CSR_64_SaveList;
case CallingConv::AnyReg:
- return CSR_MostRegs_64_SaveList;
-
+ if (HasAVX)
+ return CSR_64_AllRegs_AVX_SaveList;
+ return CSR_64_AllRegs_SaveList;
+ case CallingConv::PreserveMost:
+ return CSR_64_RT_MostRegs_SaveList;
+ case CallingConv::PreserveAll:
+ if (HasAVX)
+ return CSR_64_RT_AllRegs_AVX_SaveList;
+ return CSR_64_RT_AllRegs_SaveList;
case CallingConv::Intel_OCL_BI: {
- bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
- bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX512 && Is64Bit)
@@ -259,12 +246,10 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
-
case CallingConv::Cold:
if (Is64Bit)
- return CSR_MostRegs_64_SaveList;
+ return CSR_64_MostRegs_SaveList;
break;
-
default:
break;
}
@@ -284,32 +269,52 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const uint32_t*
X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
- bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
- bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
+ bool HasAVX = Subtarget.hasAVX();
+ bool HasAVX512 = Subtarget.hasAVX512();
- if (CC == CallingConv::Intel_OCL_BI) {
- if (IsWin64 && HasAVX512)
+ switch (CC) {
+ case CallingConv::GHC:
+ case CallingConv::HiPE:
+ return CSR_NoRegs_RegMask;
+ case CallingConv::AnyReg:
+ if (HasAVX)
+ return CSR_64_AllRegs_AVX_RegMask;
+ return CSR_64_AllRegs_RegMask;
+ case CallingConv::PreserveMost:
+ return CSR_64_RT_MostRegs_RegMask;
+ case CallingConv::PreserveAll:
+ if (HasAVX)
+ return CSR_64_RT_AllRegs_AVX_RegMask;
+ return CSR_64_RT_AllRegs_RegMask;
+ case CallingConv::Intel_OCL_BI: {
+ if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
- if (Is64Bit && HasAVX512)
+ if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_RegMask;
- if (IsWin64 && HasAVX)
+ if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
- if (Is64Bit && HasAVX)
+ if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_RegMask;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_RegMask;
+ break;
}
- if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
- return CSR_NoRegs_RegMask;
- if (CC == CallingConv::WebKit_JS || CC == CallingConv::AnyReg)
- return CSR_MostRegs_64_RegMask;
- if (!Is64Bit)
- return CSR_32_RegMask;
- if (CC == CallingConv::Cold)
- return CSR_MostRegs_64_RegMask;
- if (IsWin64)
- return CSR_Win64_RegMask;
- return CSR_64_RegMask;
+ case CallingConv::Cold:
+ if (Is64Bit)
+ return CSR_64_MostRegs_RegMask;
+ break;
+ default:
+ break;
+ }
+
+ // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
+ // callsEHReturn().
+ if (Is64Bit) {
+ if (IsWin64)
+ return CSR_Win64_RegMask;
+ return CSR_64_RegMask;
+ }
+ return CSR_32_RegMask;
}
const uint32_t*
@@ -383,7 +388,7 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(*AI);
}
}
- if (!Is64Bit || !TM.getSubtarget<X86Subtarget>().hasAVX512()) {
+ if (!Is64Bit || !Subtarget.hasAVX512()) {
for (unsigned n = 16; n != 32; ++n) {
for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
@@ -436,7 +441,7 @@ bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
- unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
bool requiresRealignment =
((MFI->getMaxAlignment() > StackAlign) ||
F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
@@ -496,6 +501,15 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
} else
FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
+ // The frame index format for stackmaps and patchpoints is different from the
+ // X86 format. It only has a FI and an offset.
+ if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
+ assert(BasePtr == FramePtr && "Expected the FP as base register");
+ int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ return;
+ }
+
if (MI.getOperand(FIOperandNum+3).isImm()) {
// Offset is a 32-bit integer.
int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index 22251b2..74efd1f 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -22,11 +22,11 @@
namespace llvm {
class Type;
class TargetInstrInfo;
- class X86TargetMachine;
+ class X86Subtarget;
-class X86RegisterInfo : public X86GenRegisterInfo {
+class X86RegisterInfo final : public X86GenRegisterInfo {
public:
- X86TargetMachine &TM;
+ const X86Subtarget &Subtarget;
private:
/// Is64Bit - Is the target 64-bits.
@@ -55,73 +55,73 @@ private:
unsigned BasePtr;
public:
- X86RegisterInfo(X86TargetMachine &tm);
+ X86RegisterInfo(const X86Subtarget &STI);
// FIXME: This should be tablegen'd like getDwarfRegNum is
int getSEHRegNum(unsigned i) const;
- /// getCompactUnwindRegNum - This function maps the register to the number for
- /// compact unwind encoding. Return -1 if the register isn't valid.
- int getCompactUnwindRegNum(unsigned RegNum, bool isEH) const;
-
/// Code Generation virtual methods...
///
- virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
- virtual const TargetRegisterClass *
+ const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const;
+ const TargetRegisterClass *B,
+ unsigned Idx) const override;
- virtual const TargetRegisterClass *
- getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const;
+ const TargetRegisterClass *
+ getSubClassWithSubReg(const TargetRegisterClass *RC,
+ unsigned Idx) const override;
const TargetRegisterClass*
- getLargestLegalSuperClass(const TargetRegisterClass *RC) const;
+ getLargestLegalSuperClass(const TargetRegisterClass *RC) const override;
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
/// values.
const TargetRegisterClass *
- getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
/// getCrossCopyRegClass - Returns a legal register class to copy a register
/// in the specified class to or from. Returns NULL if it is possible to copy
/// between a two registers of the specified class.
const TargetRegisterClass *
- getCrossCopyRegClass(const TargetRegisterClass *RC) const;
+ getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
- MachineFunction &MF) const;
+ MachineFunction &MF) const override;
/// getCalleeSavedRegs - Return a null-terminated list of all of the
/// callee-save registers on this target.
- const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction* MF) const override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
const uint32_t *getNoPreservedMask() const;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses and
/// should be considered unavailable at all times, e.g. SP, RA. This is used by
/// register scavenger to determine what registers are free.
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
bool hasBasePointer(const MachineFunction &MF) const;
bool canRealignStack(const MachineFunction &MF) const;
- bool needsStackRealignment(const MachineFunction &MF) const;
+ bool needsStackRealignment(const MachineFunction &MF) const override;
bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
- int &FrameIdx) const;
+ int &FrameIdx) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
unsigned getStackRegister() const { return StackPtr; }
unsigned getBaseRegister() const { return BasePtr; }
// FIXME: Move to FrameInfok
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
index b802728..0da9863 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -449,7 +449,7 @@ def FPCCR : RegisterClass<"X86", [i16], 16, (add FPSW)> {
}
// AVX-512 vector/mask registers.
-def VR512 : RegisterClass<"X86", [v16f32, v8f64, v16i32, v8i64], 512,
+def VR512 : RegisterClass<"X86", [v16f32, v8f64, v64i8, v32i16, v16i32, v8i64], 512,
(sequence "ZMM%u", 0, 31)>;
// Scalar AVX-512 floating point registers.
@@ -463,9 +463,19 @@ def VR128X : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
def VR256X : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
256, (sequence "YMM%u", 0, 31)>;
-def VK8 : RegisterClass<"X86", [v8i1], 8, (sequence "K%u", 0, 7)>;
-def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)>;
-
-def VK8WM : RegisterClass<"X86", [v8i1], 8, (sub VK8, K0)>;
-def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)>;
-
+// Mask registers
+def VK1 : RegisterClass<"X86", [i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
+def VK2 : RegisterClass<"X86", [v2i1], 16, (add VK1)> {let Size = 16;}
+def VK4 : RegisterClass<"X86", [v4i1], 16, (add VK2)> {let Size = 16;}
+def VK8 : RegisterClass<"X86", [v8i1], 16, (add VK4)> {let Size = 16;}
+def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
+def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
+def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
+
+def VK1WM : RegisterClass<"X86", [i1], 16, (sub VK1, K0)> {let Size = 16;}
+def VK2WM : RegisterClass<"X86", [v2i1], 16, (sub VK2, K0)> {let Size = 16;}
+def VK4WM : RegisterClass<"X86", [v4i1], 16, (sub VK4, K0)> {let Size = 16;}
+def VK8WM : RegisterClass<"X86", [v8i1], 16, (sub VK8, K0)> {let Size = 16;}
+def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)> {let Size = 16;}
+def VK32WM : RegisterClass<"X86", [v32i1], 32, (add VK16WM)> {let Size = 32;}
+def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;}
diff --git a/contrib/llvm/lib/Target/X86/X86SchedHaswell.td b/contrib/llvm/lib/Target/X86/X86SchedHaswell.td
index 9748261..6966d61 100644
--- a/contrib/llvm/lib/Target/X86/X86SchedHaswell.td
+++ b/contrib/llvm/lib/Target/X86/X86SchedHaswell.td
@@ -20,6 +20,9 @@ def HaswellModel : SchedMachineModel {
let LoadLatency = 4;
let MispredictPenalty = 16;
+ // Based on the LSD (loop-stream detector) queue size and benchmarking data.
+ let LoopMicroOpBufferSize = 50;
+
// FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
// the scheduler to assign a default model to unrecognized opcodes.
let CompleteModel = 0;
@@ -29,7 +32,7 @@ let SchedModel = HaswellModel in {
// Haswell can issue micro-ops to 8 different ports in one cycle.
-// Ports 0, 1, 5, 6 and 7 handle all computation.
+// Ports 0, 1, 5, and 6 handle all computation.
// Port 4 gets the data half of stores. Store data can be available later than
// the store address, but since we don't model the latency of stores, we can
// ignore that.
@@ -48,8 +51,9 @@ def HWPort7 : ProcResource<1>;
def HWPort23 : ProcResGroup<[HWPort2, HWPort3]>;
def HWPort237 : ProcResGroup<[HWPort2, HWPort3, HWPort7]>;
def HWPort05 : ProcResGroup<[HWPort0, HWPort5]>;
-def HWPort056 : ProcResGroup<[HWPort0, HWPort5, HWPort6]>;
+def HWPort06 : ProcResGroup<[HWPort0, HWPort6]>;
def HWPort15 : ProcResGroup<[HWPort1, HWPort5]>;
+def HWPort16 : ProcResGroup<[HWPort1, HWPort6]>;
def HWPort015 : ProcResGroup<[HWPort0, HWPort1, HWPort5]>;
def HWPort0156: ProcResGroup<[HWPort0, HWPort1, HWPort5, HWPort6]>;
@@ -88,6 +92,8 @@ multiclass HWWriteResPair<X86FoldableSchedWrite SchedRW,
// need an extra port 2/3 cycle to recompute the address.
def : WriteRes<WriteRMW, [HWPort4]>;
+// Store_addr on 237.
+// Store_data on 4.
def : WriteRes<WriteStore, [HWPort237, HWPort4]>;
def : WriteRes<WriteLoad, [HWPort23]> { let Latency = 4; }
def : WriteRes<WriteMove, [HWPort0156]>;
@@ -96,8 +102,8 @@ def : WriteRes<WriteZero, []>;
defm : HWWriteResPair<WriteALU, HWPort0156, 1>;
defm : HWWriteResPair<WriteIMul, HWPort1, 3>;
def : WriteRes<WriteIMulH, []> { let Latency = 3; }
-defm : HWWriteResPair<WriteShift, HWPort056, 1>;
-defm : HWWriteResPair<WriteJump, HWPort5, 1>;
+defm : HWWriteResPair<WriteShift, HWPort06, 1>;
+defm : HWWriteResPair<WriteJump, HWPort06, 1>;
// This is for simple LEAs with one or two input operands.
// The complex ones can only execute on port 1, and they require two cycles on
@@ -123,14 +129,136 @@ defm : HWWriteResPair<WriteFSqrt, HWPort0, 15>;
defm : HWWriteResPair<WriteCvtF2I, HWPort1, 3>;
defm : HWWriteResPair<WriteCvtI2F, HWPort1, 4>;
defm : HWWriteResPair<WriteCvtF2F, HWPort1, 3>;
+defm : HWWriteResPair<WriteFShuffle, HWPort5, 1>;
+defm : HWWriteResPair<WriteFBlend, HWPort015, 1>;
+defm : HWWriteResPair<WriteFShuffle256, HWPort5, 3>;
+
+def : WriteRes<WriteFVarBlend, [HWPort5]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+def : WriteRes<WriteFVarBlendLd, [HWPort5, HWPort23]> {
+ let Latency = 6;
+ let ResourceCycles = [2, 1];
+}
// Vector integer operations.
-defm : HWWriteResPair<WriteVecShift, HWPort05, 1>;
+defm : HWWriteResPair<WriteVecShift, HWPort0, 1>;
defm : HWWriteResPair<WriteVecLogic, HWPort015, 1>;
defm : HWWriteResPair<WriteVecALU, HWPort15, 1>;
defm : HWWriteResPair<WriteVecIMul, HWPort0, 5>;
-defm : HWWriteResPair<WriteShuffle, HWPort15, 1>;
+defm : HWWriteResPair<WriteShuffle, HWPort5, 1>;
+defm : HWWriteResPair<WriteBlend, HWPort15, 1>;
+defm : HWWriteResPair<WriteShuffle256, HWPort5, 3>;
+
+def : WriteRes<WriteVarBlend, [HWPort5]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+def : WriteRes<WriteVarBlendLd, [HWPort5, HWPort23]> {
+ let Latency = 6;
+ let ResourceCycles = [2, 1];
+}
+
+def : WriteRes<WriteVarVecShift, [HWPort0, HWPort5]> {
+ let Latency = 2;
+ let ResourceCycles = [2, 1];
+}
+def : WriteRes<WriteVarVecShiftLd, [HWPort0, HWPort5, HWPort23]> {
+ let Latency = 6;
+ let ResourceCycles = [2, 1, 1];
+}
+
+def : WriteRes<WriteMPSAD, [HWPort0, HWPort5]> {
+ let Latency = 6;
+ let ResourceCycles = [1, 2];
+}
+def : WriteRes<WriteMPSADLd, [HWPort23, HWPort0, HWPort5]> {
+ let Latency = 6;
+ let ResourceCycles = [1, 1, 2];
+}
+
+// String instructions.
+// Packed Compare Implicit Length Strings, Return Mask
+def : WriteRes<WritePCmpIStrM, [HWPort0]> {
+ let Latency = 10;
+ let ResourceCycles = [3];
+}
+def : WriteRes<WritePCmpIStrMLd, [HWPort0, HWPort23]> {
+ let Latency = 10;
+ let ResourceCycles = [3, 1];
+}
+
+// Packed Compare Explicit Length Strings, Return Mask
+def : WriteRes<WritePCmpEStrM, [HWPort0, HWPort16, HWPort5]> {
+ let Latency = 10;
+ let ResourceCycles = [3, 2, 4];
+}
+def : WriteRes<WritePCmpEStrMLd, [HWPort05, HWPort16, HWPort23]> {
+ let Latency = 10;
+ let ResourceCycles = [6, 2, 1];
+}
+
+// Packed Compare Implicit Length Strings, Return Index
+def : WriteRes<WritePCmpIStrI, [HWPort0]> {
+ let Latency = 11;
+ let ResourceCycles = [3];
+}
+def : WriteRes<WritePCmpIStrILd, [HWPort0, HWPort23]> {
+ let Latency = 11;
+ let ResourceCycles = [3, 1];
+}
+
+// Packed Compare Explicit Length Strings, Return Index
+def : WriteRes<WritePCmpEStrI, [HWPort05, HWPort16]> {
+ let Latency = 11;
+ let ResourceCycles = [6, 2];
+}
+def : WriteRes<WritePCmpEStrILd, [HWPort0, HWPort16, HWPort5, HWPort23]> {
+ let Latency = 11;
+ let ResourceCycles = [3, 2, 2, 1];
+}
+
+// AES Instructions.
+def : WriteRes<WriteAESDecEnc, [HWPort5]> {
+ let Latency = 7;
+ let ResourceCycles = [1];
+}
+def : WriteRes<WriteAESDecEncLd, [HWPort5, HWPort23]> {
+ let Latency = 7;
+ let ResourceCycles = [1, 1];
+}
+
+def : WriteRes<WriteAESIMC, [HWPort5]> {
+ let Latency = 14;
+ let ResourceCycles = [2];
+}
+def : WriteRes<WriteAESIMCLd, [HWPort5, HWPort23]> {
+ let Latency = 14;
+ let ResourceCycles = [2, 1];
+}
+
+def : WriteRes<WriteAESKeyGen, [HWPort0, HWPort5]> {
+ let Latency = 10;
+ let ResourceCycles = [2, 8];
+}
+def : WriteRes<WriteAESKeyGenLd, [HWPort0, HWPort5, HWPort23]> {
+ let Latency = 10;
+ let ResourceCycles = [2, 7, 1];
+}
+
+// Carry-less multiplication instructions.
+def : WriteRes<WriteCLMul, [HWPort0, HWPort5]> {
+ let Latency = 7;
+ let ResourceCycles = [2, 1];
+}
+def : WriteRes<WriteCLMulLd, [HWPort0, HWPort5, HWPort23]> {
+ let Latency = 7;
+ let ResourceCycles = [2, 1, 1];
+}
def : WriteRes<WriteSystem, [HWPort0156]> { let Latency = 100; }
def : WriteRes<WriteMicrocoded, [HWPort0156]> { let Latency = 100; }
+def : WriteRes<WriteFence, [HWPort23, HWPort4]>;
+def : WriteRes<WriteNop, []>;
} // SchedModel
diff --git a/contrib/llvm/lib/Target/X86/X86SchedSandyBridge.td b/contrib/llvm/lib/Target/X86/X86SchedSandyBridge.td
index 3011c6d..83f0534 100644
--- a/contrib/llvm/lib/Target/X86/X86SchedSandyBridge.td
+++ b/contrib/llvm/lib/Target/X86/X86SchedSandyBridge.td
@@ -21,6 +21,9 @@ def SandyBridgeModel : SchedMachineModel {
let LoadLatency = 4;
let MispredictPenalty = 16;
+ // Based on the LSD (loop-stream detector) queue size.
+ let LoopMicroOpBufferSize = 28;
+
// FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
// the scheduler to assign a default model to unrecognized opcodes.
let CompleteModel = 0;
@@ -118,6 +121,16 @@ defm : SBWriteResPair<WriteFSqrt, SBPort0, 15>;
defm : SBWriteResPair<WriteCvtF2I, SBPort1, 3>;
defm : SBWriteResPair<WriteCvtI2F, SBPort1, 4>;
defm : SBWriteResPair<WriteCvtF2F, SBPort1, 3>;
+defm : SBWriteResPair<WriteFShuffle, SBPort5, 1>;
+defm : SBWriteResPair<WriteFBlend, SBPort05, 1>;
+def : WriteRes<WriteFVarBlend, [SBPort0, SBPort5]> {
+ let Latency = 2;
+ let ResourceCycles = [1, 1];
+}
+def : WriteRes<WriteFVarBlendLd, [SBPort0, SBPort5, SBPort23]> {
+ let Latency = 6;
+ let ResourceCycles = [1, 1, 1];
+}
// Vector integer operations.
defm : SBWriteResPair<WriteVecShift, SBPort05, 1>;
@@ -125,7 +138,112 @@ defm : SBWriteResPair<WriteVecLogic, SBPort015, 1>;
defm : SBWriteResPair<WriteVecALU, SBPort15, 1>;
defm : SBWriteResPair<WriteVecIMul, SBPort0, 5>;
defm : SBWriteResPair<WriteShuffle, SBPort15, 1>;
+defm : SBWriteResPair<WriteBlend, SBPort15, 1>;
+def : WriteRes<WriteVarBlend, [SBPort1, SBPort5]> {
+ let Latency = 2;
+ let ResourceCycles = [1, 1];
+}
+def : WriteRes<WriteVarBlendLd, [SBPort1, SBPort5, SBPort23]> {
+ let Latency = 6;
+ let ResourceCycles = [1, 1, 1];
+}
+def : WriteRes<WriteMPSAD, [SBPort0, SBPort1, SBPort5]> {
+ let Latency = 6;
+ let ResourceCycles = [1, 1, 1];
+}
+def : WriteRes<WriteMPSADLd, [SBPort0, SBPort1, SBPort5, SBPort23]> {
+ let Latency = 6;
+ let ResourceCycles = [1, 1, 1, 1];
+}
+
+// String instructions.
+// Packed Compare Implicit Length Strings, Return Mask
+def : WriteRes<WritePCmpIStrM, [SBPort015]> {
+ let Latency = 11;
+ let ResourceCycles = [3];
+}
+def : WriteRes<WritePCmpIStrMLd, [SBPort015, SBPort23]> {
+ let Latency = 11;
+ let ResourceCycles = [3, 1];
+}
+
+// Packed Compare Explicit Length Strings, Return Mask
+def : WriteRes<WritePCmpEStrM, [SBPort015]> {
+ let Latency = 11;
+ let ResourceCycles = [8];
+}
+def : WriteRes<WritePCmpEStrMLd, [SBPort015, SBPort23]> {
+ let Latency = 11;
+ let ResourceCycles = [7, 1];
+}
+
+// Packed Compare Implicit Length Strings, Return Index
+def : WriteRes<WritePCmpIStrI, [SBPort015]> {
+ let Latency = 3;
+ let ResourceCycles = [3];
+}
+def : WriteRes<WritePCmpIStrILd, [SBPort015, SBPort23]> {
+ let Latency = 3;
+ let ResourceCycles = [3, 1];
+}
+
+// Packed Compare Explicit Length Strings, Return Index
+def : WriteRes<WritePCmpEStrI, [SBPort015]> {
+ let Latency = 4;
+ let ResourceCycles = [8];
+}
+def : WriteRes<WritePCmpEStrILd, [SBPort015, SBPort23]> {
+ let Latency = 4;
+ let ResourceCycles = [7, 1];
+}
+
+// AES Instructions.
+def : WriteRes<WriteAESDecEnc, [SBPort015]> {
+ let Latency = 8;
+ let ResourceCycles = [2];
+}
+def : WriteRes<WriteAESDecEncLd, [SBPort015, SBPort23]> {
+ let Latency = 8;
+ let ResourceCycles = [2, 1];
+}
+
+def : WriteRes<WriteAESIMC, [SBPort015]> {
+ let Latency = 8;
+ let ResourceCycles = [2];
+}
+def : WriteRes<WriteAESIMCLd, [SBPort015, SBPort23]> {
+ let Latency = 8;
+ let ResourceCycles = [2, 1];
+}
+
+def : WriteRes<WriteAESKeyGen, [SBPort015]> {
+ let Latency = 8;
+ let ResourceCycles = [11];
+}
+def : WriteRes<WriteAESKeyGenLd, [SBPort015, SBPort23]> {
+ let Latency = 8;
+ let ResourceCycles = [10, 1];
+}
+
+// Carry-less multiplication instructions.
+def : WriteRes<WriteCLMul, [SBPort015]> {
+ let Latency = 14;
+ let ResourceCycles = [18];
+}
+def : WriteRes<WriteCLMulLd, [SBPort015, SBPort23]> {
+ let Latency = 14;
+ let ResourceCycles = [17, 1];
+}
+
def : WriteRes<WriteSystem, [SBPort015]> { let Latency = 100; }
def : WriteRes<WriteMicrocoded, [SBPort015]> { let Latency = 100; }
+def : WriteRes<WriteFence, [SBPort23, SBPort4]>;
+def : WriteRes<WriteNop, []>;
+
+// AVX2 is not supported on that architecture, but we should define the basic
+// scheduling resources anyway.
+defm : SBWriteResPair<WriteFShuffle256, SBPort0, 1>;
+defm : SBWriteResPair<WriteShuffle256, SBPort0, 1>;
+defm : SBWriteResPair<WriteVarVecShift, SBPort0, 1>;
} // SchedModel
diff --git a/contrib/llvm/lib/Target/X86/X86Schedule.td b/contrib/llvm/lib/Target/X86/X86Schedule.td
index 0556437..b76850a 100644
--- a/contrib/llvm/lib/Target/X86/X86Schedule.td
+++ b/contrib/llvm/lib/Target/X86/X86Schedule.td
@@ -69,6 +69,9 @@ defm WriteFDiv : X86SchedWritePair; // Floating point division.
defm WriteFSqrt : X86SchedWritePair; // Floating point square root.
defm WriteFRcp : X86SchedWritePair; // Floating point reciprocal.
defm WriteFMA : X86SchedWritePair; // Fused Multiply Add.
+defm WriteFShuffle : X86SchedWritePair; // Floating point vector shuffles.
+defm WriteFBlend : X86SchedWritePair; // Floating point vector blends.
+defm WriteFVarBlend : X86SchedWritePair; // Fp vector variable blends.
// FMA Scheduling helper class.
class FMASC { X86FoldableSchedWrite Sched = WriteFAdd; }
@@ -77,23 +80,55 @@ class FMASC { X86FoldableSchedWrite Sched = WriteFAdd; }
defm WriteVecALU : X86SchedWritePair; // Vector integer ALU op, no logicals.
defm WriteVecShift : X86SchedWritePair; // Vector integer shifts.
defm WriteVecIMul : X86SchedWritePair; // Vector integer multiply.
+defm WriteShuffle : X86SchedWritePair; // Vector shuffles.
+defm WriteBlend : X86SchedWritePair; // Vector blends.
+defm WriteVarBlend : X86SchedWritePair; // Vector variable blends.
+defm WriteMPSAD : X86SchedWritePair; // Vector MPSAD.
// Vector bitwise operations.
// These are often used on both floating point and integer vectors.
defm WriteVecLogic : X86SchedWritePair; // Vector and/or/xor.
-defm WriteShuffle : X86SchedWritePair; // Vector shuffles and blends.
// Conversion between integer and float.
defm WriteCvtF2I : X86SchedWritePair; // Float -> Integer.
defm WriteCvtI2F : X86SchedWritePair; // Integer -> Float.
defm WriteCvtF2F : X86SchedWritePair; // Float -> Float size conversion.
+// Strings instructions.
+// Packed Compare Implicit Length Strings, Return Mask
+defm WritePCmpIStrM : X86SchedWritePair;
+// Packed Compare Explicit Length Strings, Return Mask
+defm WritePCmpEStrM : X86SchedWritePair;
+// Packed Compare Implicit Length Strings, Return Index
+defm WritePCmpIStrI : X86SchedWritePair;
+// Packed Compare Explicit Length Strings, Return Index
+defm WritePCmpEStrI : X86SchedWritePair;
+
+// AES instructions.
+defm WriteAESDecEnc : X86SchedWritePair; // Decryption, encryption.
+defm WriteAESIMC : X86SchedWritePair; // InvMixColumn.
+defm WriteAESKeyGen : X86SchedWritePair; // Key Generation.
+
+// Carry-less multiplication instructions.
+defm WriteCLMul : X86SchedWritePair;
+
// Catch-all for expensive system instructions.
def WriteSystem : SchedWrite;
+// AVX2.
+defm WriteFShuffle256 : X86SchedWritePair; // Fp 256-bit width vector shuffles.
+defm WriteShuffle256 : X86SchedWritePair; // 256-bit width vector shuffles.
+defm WriteVarVecShift : X86SchedWritePair; // Variable vector shifts.
+
// Old microcoded instructions that nobody use.
def WriteMicrocoded : SchedWrite;
+// Fence instructions.
+def WriteFence : SchedWrite;
+
+// Nop, not very useful expect it provides a model for nops!
+def WriteNop : SchedWrite;
+
//===----------------------------------------------------------------------===//
// Instruction Itinerary classes used for X86
def IIC_ALU_MEM : InstrItinClass;
@@ -577,7 +612,7 @@ def IIC_NOP : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
-// IssueWidth is analagous to the number of decode units. Core and its
+// IssueWidth is analogous to the number of decode units. Core and its
// descendents, including Nehalem and SandyBridge have 4 decoders.
// Resources beyond the decoder operate on micro-ops and are bufferred
// so adjacent micro-ops don't directly compete.
@@ -598,6 +633,7 @@ def GenericModel : SchedMachineModel {
let MicroOpBufferSize = 32;
let LoadLatency = 4;
let HighLatency = 10;
+ let PostRAScheduler = 0;
}
include "X86ScheduleAtom.td"
diff --git a/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td b/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
index ba72f29..c8820aa 100644
--- a/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
+++ b/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
@@ -535,5 +535,10 @@ def AtomModel : SchedMachineModel {
let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
let HighLatency = 30;// Expected, may be overriden by OperandCycles.
+ // On the Atom, the throughput for taken branches is 2 cycles. For small
+ // simple loops, expand by a small factor to hide the backedge cost.
+ let LoopMicroOpBufferSize = 10;
+ let PostRAScheduler = 1;
+
let Itineraries = AtomItineraries;
}
diff --git a/contrib/llvm/lib/Target/X86/X86ScheduleSLM.td b/contrib/llvm/lib/Target/X86/X86ScheduleSLM.td
index 6c2a304..90d8587 100644
--- a/contrib/llvm/lib/Target/X86/X86ScheduleSLM.td
+++ b/contrib/llvm/lib/Target/X86/X86ScheduleSLM.td
@@ -1,4 +1,4 @@
-//===- X86ScheduleSLM.td - X86 Atom Scheduling Definitions -*- tablegen -*-==//
+//=- X86ScheduleSLM.td - X86 Silvermont Scheduling -----------*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,662 +7,226 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the itinerary class data for the Intel Atom
-// (Silvermont) processor.
+// This file defines the machine model for Intel Silvermont to support
+// instruction scheduling and other instruction cost heuristics.
//
//===----------------------------------------------------------------------===//
-def IEC_RSV0 : FuncUnit;
-def IEC_RSV1 : FuncUnit;
-def FPC_RSV0 : FuncUnit;
-def FPC_RSV1 : FuncUnit;
-def MEC_RSV : FuncUnit;
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def SLMItineraries : ProcessorItineraries<
- [ IEC_RSV0, IEC_RSV1, FPC_RSV0, FPC_RSV1, MEC_RSV ],
- [], [
- // [InstrStage<N, [FPC_RSV0, FPC_RSV1]>]
- // [InstrStage<N, [FPC_RSV0, FPC_RSV1], 0>, InstrStage<N, [MEC_RSV]>]
- // [InstrStage<N, [IEC_RSV0, IEC_RSV1]>]
- // [InstrStage<N, [IEC_RSV0, IEC_RSV1], 0>,InstrStage<N,[MEC_RSV]>]
- //
- // Default is 1 cycle, IEC_RSV0 or IEC_RSV1
- //InstrItinData<IIC_DEFAULT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_ALU_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_ALU_NONMEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LEA, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LEA_16, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // mul
- InstrItinData<IIC_MUL8, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MUL16_MEM, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_MUL16_REG, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MUL32_MEM, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_MUL32_REG, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MUL64, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- // imul by al, ax, eax, rax
- InstrItinData<IIC_IMUL8, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL16_MEM, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<6, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL16_REG, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL32_MEM, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<6, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL32_REG, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL64, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- // imul reg by reg|mem
- InstrItinData<IIC_IMUL16_RM, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL16_RR, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL32_RM, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL32_RR, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL64_RM, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL64_RR, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- // imul reg = reg/mem * imm
- InstrItinData<IIC_IMUL16_RRI, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL32_RRI, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL64_RRI, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IMUL16_RMI, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL32_RMI, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_IMUL64_RMI, [InstrStage<4, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- // idiv - min latency
- InstrItinData<IIC_IDIV8, [InstrStage<34, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IDIV16, [InstrStage<35, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IDIV32, [InstrStage<35, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IDIV64, [InstrStage<49, [IEC_RSV0, IEC_RSV1]>] >,
- // div - min latency
- InstrItinData<IIC_DIV8_REG, [InstrStage<25, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_DIV8_MEM, [InstrStage<25, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<25, [MEC_RSV]>] >,
- InstrItinData<IIC_DIV16, [InstrStage<26, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_DIV32, [InstrStage<26, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_DIV64, [InstrStage<38, [IEC_RSV0, IEC_RSV1]>] >,
- // neg/not/inc/dec
- InstrItinData<IIC_UNARY_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_UNARY_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- // add/sub/and/or/xor/adc/sbc/cmp/test
- InstrItinData<IIC_BIN_NONMEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BIN_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- // adc/sbb
- InstrItinData<IIC_BIN_CARRY_NONMEM, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BIN_CARRY_MEM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- // shift/rotate
- InstrItinData<IIC_SR, [InstrStage<1, [IEC_RSV0], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- // shift double
- InstrItinData<IIC_SHD16_REG_IM, [InstrStage<2, [IEC_RSV0]>] >,
- InstrItinData<IIC_SHD16_REG_CL, [InstrStage<4, [IEC_RSV0]>] >,
- InstrItinData<IIC_SHD16_MEM_IM, [InstrStage<2, [IEC_RSV0], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_SHD16_MEM_CL, [InstrStage<4, [IEC_RSV0], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SHD32_REG_IM, [InstrStage<2, [IEC_RSV0]>] >,
- InstrItinData<IIC_SHD32_REG_CL, [InstrStage<4, [IEC_RSV0]>] >,
- InstrItinData<IIC_SHD32_MEM_IM, [InstrStage<2, [IEC_RSV0], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_SHD32_MEM_CL, [InstrStage<4, [IEC_RSV0], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SHD64_REG_IM, [InstrStage<2, [IEC_RSV0]>] >,
- InstrItinData<IIC_SHD64_REG_CL, [InstrStage<4, [IEC_RSV0]>] >,
- InstrItinData<IIC_SHD64_MEM_IM, [InstrStage<2, [IEC_RSV0], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_SHD64_MEM_CL, [InstrStage<4, [IEC_RSV0], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- // cmov
- InstrItinData<IIC_CMOV16_RM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_CMOV16_RR, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMOV32_RM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_CMOV32_RR, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMOV64_RM, [InstrStage<2, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_CMOV64_RR, [InstrStage<2, [IEC_RSV0, IEC_RSV1]>] >,
- // set
- InstrItinData<IIC_SET_M, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SET_R, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // jcc
- InstrItinData<IIC_Jcc, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // jcxz/jecxz/jrcxz
- InstrItinData<IIC_JCXZ, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // jmp rel
- InstrItinData<IIC_JMP_REL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // jmp indirect
- InstrItinData<IIC_JMP_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_JMP_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- // jmp far
- InstrItinData<IIC_JMP_FAR_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_JMP_FAR_PTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // loop/loope/loopne
- InstrItinData<IIC_LOOP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LOOPE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LOOPNE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // call - all but reg/imm
- InstrItinData<IIC_CALL_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CALL_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_CALL_FAR_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_CALL_FAR_PTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- //ret
- InstrItinData<IIC_RET, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_RET_IMM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- //sign extension movs
- InstrItinData<IIC_MOVSX, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOVSX_R16_R8, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOVSX_R16_M8, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_MOVSX_R16_R16, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOVSX_R32_R32, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- //zero extension movs
- InstrItinData<IIC_MOVZX, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOVZX_R16_R8, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOVZX_R16_M8, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_REP_MOVS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_REP_STOS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- // SSE binary operations
- // arithmetic fp scalar
- InstrItinData<IIC_SSE_ALU_F32S_RR, [InstrStage<3, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_ALU_F32S_RM, [InstrStage<3, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_ALU_F64S_RR, [InstrStage<3, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_ALU_F64S_RM, [InstrStage<3, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MUL_F32S_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MUL_F32S_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MUL_F64S_RR, [InstrStage<2, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MUL_F64S_RM, [InstrStage<2, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_DIV_F32S_RR, [InstrStage<13, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_DIV_F32S_RM, [InstrStage<13, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<13, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_DIV_F64S_RR, [InstrStage<13, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_DIV_F64S_RM, [InstrStage<13, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<13, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_COMIS_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_COMIS_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_HADDSUB_RR, [InstrStage<6, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_HADDSUB_RM, [InstrStage<6, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<6, [MEC_RSV]>] >,
-
- // arithmetic fp parallel
- InstrItinData<IIC_SSE_ALU_F32P_RR, [InstrStage<3, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_ALU_F32P_RM, [InstrStage<3, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_ALU_F64P_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_ALU_F64P_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MUL_F32P_RR, [InstrStage<2, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MUL_F32P_RM, [InstrStage<2, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MUL_F64P_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MUL_F64P_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_DIV_F32P_RR, [InstrStage<27, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_DIV_F32P_RM, [InstrStage<27, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<27, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_DIV_F64P_RR, [InstrStage<27, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_DIV_F64P_RM, [InstrStage<27, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<27, [MEC_RSV]>] >,
-
- // bitwise parallel
- InstrItinData<IIC_SSE_BIT_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_BIT_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- // arithmetic int parallel
- InstrItinData<IIC_SSE_INTALU_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_INTALU_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_INTALUQ_P_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_INTALUQ_P_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
-
- // multiply int parallel
- InstrItinData<IIC_SSE_INTMUL_P_RR, [InstrStage<5, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_INTMUL_P_RM, [InstrStage<5, [FPC_RSV0], 0>,
- InstrStage<5, [MEC_RSV]>] >,
-
- // shift parallel
- InstrItinData<IIC_SSE_INTSH_P_RR, [InstrStage<2, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_INTSH_P_RM, [InstrStage<2, [FPC_RSV0], 0>,
- InstrStage<2, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_INTSH_P_RI, [InstrStage<1, [FPC_RSV0]>] >,
-
- InstrItinData<IIC_SSE_INTSHDQ_P_RI, [InstrStage<1, [FPC_RSV0]>] >,
-
- InstrItinData<IIC_SSE_SHUFP, [InstrStage<1, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_PSHUF_RI, [InstrStage<1, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_PSHUF_MI, [InstrStage<1, [FPC_RSV0], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_UNPCK, [InstrStage<1, [FPC_RSV0]>] >,
-
- InstrItinData<IIC_SSE_SQRTPS_RR, [InstrStage<26, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_SQRTPS_RM, [InstrStage<26, [FPC_RSV0], 0>,
- InstrStage<26, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_SQRTSS_RR, [InstrStage<13, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_SQRTSS_RM, [InstrStage<13, [FPC_RSV0], 0>,
- InstrStage<13, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_SQRTPD_RR, [InstrStage<26, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_SQRTPD_RM, [InstrStage<26, [FPC_RSV0], 0>,
- InstrStage<26, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_SQRTSD_RR, [InstrStage<13, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_SQRTSD_RM, [InstrStage<13, [FPC_RSV0], 0>,
- InstrStage<13, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_RCPP_RR, [InstrStage<9, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_RCPP_RM, [InstrStage<9, [FPC_RSV0], 0>,
- InstrStage<9, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_RCPS_RR, [InstrStage<4, [FPC_RSV0]>] >,
- InstrItinData<IIC_SSE_RCPS_RM, [InstrStage<4, [FPC_RSV0], 0>,
- InstrStage<4, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_MOVMSK, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MASKMOV, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_PEXTRW, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PINSRW, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_PABS_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PABS_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_MOV_S_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MOV_S_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MOV_S_MR, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_MOVA_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MOVA_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MOVA_P_MR, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_MOVU_P_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MOVU_P_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MOVU_P_MR, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_MOV_LH, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_LDDQU, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_MOVDQ, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MOVD_ToGP, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MOVQ_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_MOVNT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_PREFETCH, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_PAUSE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_LFENCE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_MFENCE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_SFENCE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_LDMXCSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_STMXCSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- InstrItinData<IIC_SSE_PHADDSUBD_RR, [InstrStage<6, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PHADDSUBD_RM, [InstrStage<6, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<6, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PHADDSUBSW_RR, [InstrStage<9, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PHADDSUBSW_RM, [InstrStage<9, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<9, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PHADDSUBW_RR, [InstrStage<9, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<9, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PHADDSUBW_RM, [InstrStage<9, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<9, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PSHUFB_RR, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PSHUFB_RM, [InstrStage<5, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<5, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PSIGN_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PSIGN_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
-
- InstrItinData<IIC_SSE_PMADD, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PMULHRSW, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PALIGNRR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PALIGNRM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MWAIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_MONITOR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- // conversions
- // to/from PD ...
- InstrItinData<IIC_SSE_CVT_PD_RR, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_CVT_PD_RM, [InstrStage<5, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<5, [MEC_RSV]>] >,
- // to/from PS except to/from PD and PS2PI
- InstrItinData<IIC_SSE_CVT_PS_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_CVT_PS_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_CVT_Scalar_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_CVT_Scalar_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_CVT_SS2SI32_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_CVT_SS2SI32_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_CVT_SS2SI64_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_CVT_SS2SI64_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_CVT_SD2SI_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_CVT_SD2SI_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
-
- // MMX MOVs
- InstrItinData<IIC_MMX_MOV_MM_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_MOV_REG_MM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_MOVQ_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_MOVQ_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // other MMX
- InstrItinData<IIC_MMX_ALU_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_ALU_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_ALUQ_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_ALUQ_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PHADDSUBW_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PHADDSUBW_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PHADDSUBD_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PHADDSUBD_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PMUL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_MISC_FUNC_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_MISC_FUNC_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PSADBW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_SHIFT_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_SHIFT_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_SHIFT_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_UNPCK_H_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_UNPCK_H_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_UNPCK_L, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PCK_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PCK_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PSHUF, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PEXTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_PINSRW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_MASKMOV, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // conversions
- // from/to PD
- InstrItinData<IIC_MMX_CVT_PD_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_CVT_PD_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // from/to PI
- InstrItinData<IIC_MMX_CVT_PS_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MMX_CVT_PS_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- InstrItinData<IIC_CMPX_LOCK, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPX_LOCK_8, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPX_LOCK_8B, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPX_LOCK_16B, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
-
- InstrItinData<IIC_FILD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FLD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FLD80, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+def SLMModel : SchedMachineModel {
+ // All x86 instructions are modeled as a single micro-op, and SLM can decode 2
+ // instructions per cycle.
+ let IssueWidth = 2;
+ let MicroOpBufferSize = 32; // Based on the reorder buffer.
+ let LoadLatency = 3;
+ let MispredictPenalty = 10;
+ let PostRAScheduler = 1;
+
+ // For small loops, expand by a small factor to hide the backedge cost.
+ let LoopMicroOpBufferSize = 10;
+
+ // FIXME: SSE4 is unimplemented. This flag is set to allow
+ // the scheduler to assign a default model to unrecognized opcodes.
+ let CompleteModel = 0;
+}
- InstrItinData<IIC_FST, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FST80, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FIST, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+let SchedModel = SLMModel in {
+
+// Silvermont has 5 reservation stations for micro-ops
+
+def IEC_RSV0 : ProcResource<1>;
+def IEC_RSV1 : ProcResource<1>;
+def FPC_RSV0 : ProcResource<1> { let BufferSize = 1; }
+def FPC_RSV1 : ProcResource<1> { let BufferSize = 1; }
+def MEC_RSV : ProcResource<1>;
+
+// Many micro-ops are capable of issuing on multiple ports.
+def IEC_RSV01 : ProcResGroup<[IEC_RSV0, IEC_RSV1]>;
+def FPC_RSV01 : ProcResGroup<[FPC_RSV0, FPC_RSV1]>;
+
+def SMDivider : ProcResource<1>;
+def SMFPMultiplier : ProcResource<1>;
+def SMFPDivider : ProcResource<1>;
+
+// Loads are 3 cycles, so ReadAfterLd registers needn't be available until 3
+// cycles after the memory operand.
+def : ReadAdvance<ReadAfterLd, 3>;
+
+// Many SchedWrites are defined in pairs with and without a folded load.
+// Instructions with folded loads are usually micro-fused, so they only appear
+// as two micro-ops when queued in the reservation station.
+// This multiclass defines the resource usage for variants with and without
+// folded loads.
+multiclass SMWriteResPair<X86FoldableSchedWrite SchedRW,
+ ProcResourceKind ExePort,
+ int Lat> {
+ // Register variant is using a single cycle on ExePort.
+ def : WriteRes<SchedRW, [ExePort]> { let Latency = Lat; }
+
+ // Memory variant also uses a cycle on MEC_RSV and adds 3 cycles to the
+ // latency.
+ def : WriteRes<SchedRW.Folded, [MEC_RSV, ExePort]> {
+ let Latency = !add(Lat, 3);
+ }
+}
- InstrItinData<IIC_FLDZ, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FUCOM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FUCOMI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FCOMI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FNSTSW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FNSTCW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FLDCW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FNINIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FFREE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FNCLEX, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_WAIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FXAM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FNOP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FLDL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_F2XM1, [InstrStage<88, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FYL2X, [InstrStage<296, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FPTAN, [InstrStage<281, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FPATAN, [InstrStage<296, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FXTRACT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FPREM1, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FPSTP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FPREM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FYL2XP1, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FSINCOS, [InstrStage<281, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FRNDINT, [InstrStage<25, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FSCALE, [InstrStage<74, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_FCOMPP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FXSAVE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FXRSTOR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_FXCH, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
+// A folded store needs a cycle on MEC_RSV for the store data, but it does not
+// need an extra port cycle to recompute the address.
+def : WriteRes<WriteRMW, [MEC_RSV]>;
+
+def : WriteRes<WriteStore, [IEC_RSV01, MEC_RSV]>;
+def : WriteRes<WriteLoad, [MEC_RSV]> { let Latency = 3; }
+def : WriteRes<WriteMove, [IEC_RSV01]>;
+def : WriteRes<WriteZero, []>;
+
+defm : SMWriteResPair<WriteALU, IEC_RSV01, 1>;
+defm : SMWriteResPair<WriteIMul, IEC_RSV1, 3>;
+defm : SMWriteResPair<WriteShift, IEC_RSV0, 1>;
+defm : SMWriteResPair<WriteJump, IEC_RSV1, 1>;
+
+// This is for simple LEAs with one or two input operands.
+// The complex ones can only execute on port 1, and they require two cycles on
+// the port to read all inputs. We don't model that.
+def : WriteRes<WriteLEA, [IEC_RSV1]>;
+
+// This is quite rough, latency depends on the dividend.
+def : WriteRes<WriteIDiv, [IEC_RSV01, SMDivider]> {
+ let Latency = 25;
+ let ResourceCycles = [1, 25];
+}
+def : WriteRes<WriteIDivLd, [MEC_RSV, IEC_RSV01, SMDivider]> {
+ let Latency = 29;
+ let ResourceCycles = [1, 1, 25];
+}
- // System instructions
- InstrItinData<IIC_CPUID, [InstrStage<60, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_INT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_INT3, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_INVD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_INVLPG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IRET, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_HLT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LXS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LTR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_RDTSC, [InstrStage<30, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_RSM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SIDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SGDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SLDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_STR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SWAPGS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SYSCALL, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SYS_ENTER_EXIT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+// Scalar and vector floating point.
+defm : SMWriteResPair<WriteFAdd, FPC_RSV1, 3>;
+defm : SMWriteResPair<WriteFRcp, FPC_RSV0, 5>;
+defm : SMWriteResPair<WriteFSqrt, FPC_RSV0, 15>;
+defm : SMWriteResPair<WriteCvtF2I, FPC_RSV01, 4>;
+defm : SMWriteResPair<WriteCvtI2F, FPC_RSV01, 4>;
+defm : SMWriteResPair<WriteCvtF2F, FPC_RSV01, 4>;
+defm : SMWriteResPair<WriteFShuffle, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteFBlend, FPC_RSV0, 1>;
+
+// This is quite rough, latency depends on precision
+def : WriteRes<WriteFMul, [FPC_RSV0, SMFPMultiplier]> {
+ let Latency = 5;
+ let ResourceCycles = [1, 2];
+}
+def : WriteRes<WriteFMulLd, [MEC_RSV, FPC_RSV0, SMFPMultiplier]> {
+ let Latency = 8;
+ let ResourceCycles = [1, 1, 2];
+}
- InstrItinData<IIC_IN_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_IN_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_OUT_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_OUT_IR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_INS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+def : WriteRes<WriteFDiv, [FPC_RSV0, SMFPDivider]> {
+ let Latency = 34;
+ let ResourceCycles = [1, 34];
+}
+def : WriteRes<WriteFDivLd, [MEC_RSV, FPC_RSV0, SMFPDivider]> {
+ let Latency = 37;
+ let ResourceCycles = [1, 1, 34];
+}
- InstrItinData<IIC_MOV_REG_DR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV_DR_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // worst case for mov REG_CRx
- InstrItinData<IIC_MOV_REG_CR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV_CR_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+// Vector integer operations.
+defm : SMWriteResPair<WriteVecShift, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteVecLogic, FPC_RSV01, 1>;
+defm : SMWriteResPair<WriteVecALU, FPC_RSV01, 1>;
+defm : SMWriteResPair<WriteVecIMul, FPC_RSV0, 4>;
+defm : SMWriteResPair<WriteShuffle, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteBlend, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteMPSAD, FPC_RSV0, 7>;
+
+// String instructions.
+// Packed Compare Implicit Length Strings, Return Mask
+def : WriteRes<WritePCmpIStrM, [FPC_RSV0]> {
+ let Latency = 13;
+ let ResourceCycles = [13];
+}
+def : WriteRes<WritePCmpIStrMLd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 13;
+ let ResourceCycles = [13, 1];
+}
- InstrItinData<IIC_MOV_REG_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV_MEM_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV_SR_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV_SR_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // LAR
- InstrItinData<IIC_LAR_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LAR_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // LSL
- InstrItinData<IIC_LSL_RM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LSL_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+// Packed Compare Explicit Length Strings, Return Mask
+def : WriteRes<WritePCmpEStrM, [FPC_RSV0]> {
+ let Latency = 17;
+ let ResourceCycles = [17];
+}
+def : WriteRes<WritePCmpEStrMLd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 17;
+ let ResourceCycles = [17, 1];
+}
- InstrItinData<IIC_LGDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LIDT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LLDT_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LLDT_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // push control register, segment registers
- InstrItinData<IIC_PUSH_CS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_PUSH_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // pop control register, segment registers
- InstrItinData<IIC_POP_SR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_POP_SR_SS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // VERR, VERW
- InstrItinData<IIC_VERR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_VERW_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_VERW_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // WRMSR, RDMSR
- InstrItinData<IIC_WRMSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_RDMSR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_RDPMC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- // SMSW, LMSW
- InstrItinData<IIC_SMSW, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LMSW_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LMSW_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+// Packed Compare Implicit Length Strings, Return Index
+def : WriteRes<WritePCmpIStrI, [FPC_RSV0]> {
+ let Latency = 17;
+ let ResourceCycles = [17];
+}
+def : WriteRes<WritePCmpIStrILd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 17;
+ let ResourceCycles = [17, 1];
+}
- InstrItinData<IIC_ENTER, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LEAVE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+// Packed Compare Explicit Length Strings, Return Index
+def : WriteRes<WritePCmpEStrI, [FPC_RSV0]> {
+ let Latency = 21;
+ let ResourceCycles = [21];
+}
+def : WriteRes<WritePCmpEStrILd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 21;
+ let ResourceCycles = [21, 1];
+}
- InstrItinData<IIC_POP_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_POP_REG16, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_POP_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_POP_F, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_POP_FD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_POP_A, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+// AES Instructions.
+def : WriteRes<WriteAESDecEnc, [FPC_RSV0]> {
+ let Latency = 8;
+ let ResourceCycles = [5];
+}
+def : WriteRes<WriteAESDecEncLd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 8;
+ let ResourceCycles = [5, 1];
+}
- InstrItinData<IIC_PUSH_IMM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_PUSH_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_PUSH_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_PUSH_F, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_PUSH_A, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
+def : WriteRes<WriteAESIMC, [FPC_RSV0]> {
+ let Latency = 8;
+ let ResourceCycles = [5];
+}
+def : WriteRes<WriteAESIMCLd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 8;
+ let ResourceCycles = [5, 1];
+}
- InstrItinData<IIC_BSWAP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BIT_SCAN_MEM, [InstrStage<10, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<10, [MEC_RSV]>] >,
- InstrItinData<IIC_BIT_SCAN_REG, [InstrStage<10, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOVS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_STOS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_SCAS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_MOV_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_AHF, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BT_MI, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_BT_MR, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_BT_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BT_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BTX_MI, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_BTX_MR, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_BTX_RI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BTX_RR, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_XCHG_REG, [InstrStage<5, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_XCHG_MEM, [InstrStage<5, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<5, [MEC_RSV]>] >,
- InstrItinData<IIC_XADD_REG, [InstrStage<5, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_XADD_MEM, [InstrStage<5, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<5, [MEC_RSV]>] >,
- InstrItinData<IIC_CMPXCHG_MEM, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPXCHG_REG, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPXCHG_MEM8, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<6, [MEC_RSV]>] >,
- InstrItinData<IIC_CMPXCHG_REG8, [InstrStage<6, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<6, [MEC_RSV]>] >,
- InstrItinData<IIC_CMPXCHG_8B, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMPXCHG_16B, [InstrStage<6, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_LODS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_OUTS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CLC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CLD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CLI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CMC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CLTS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_STC, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_STI, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_STD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_XLAT, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_AAA, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_AAD, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_AAM, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_AAS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_DAA, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_DAS, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_BOUND, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_ARPL_REG, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_ARPL_MEM, [InstrStage<1, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_MOVBE, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_AES, [InstrStage<8, [FPC_RSV0]>] >,
- InstrItinData<IIC_BLEND_NOMEM, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_BLEND_MEM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_BIT_SCAN_MEM, [InstrStage<10, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<10, [MEC_RSV]>] >,
- InstrItinData<IIC_BIT_SCAN_REG, [InstrStage<10, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CBW, [InstrStage<4, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CRC32_REG, [InstrStage<3, [IEC_RSV0, IEC_RSV1]>] >,
- InstrItinData<IIC_CRC32_MEM, [InstrStage<3, [IEC_RSV0, IEC_RSV1], 0>,
- InstrStage<3, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_DPPD_RR, [InstrStage<12, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_DPPD_RM, [InstrStage<12, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<12, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_DPPS_RR, [InstrStage<15, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_DPPS_RM, [InstrStage<15, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<15, [MEC_RSV]>] >,
- InstrItinData<IIC_MMX_EMMS, [InstrStage<10, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_EXTRACTPS_RR, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_EXTRACTPS_RM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_INSERTPS_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_INSERTPS_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_MPSADBW_RR, [InstrStage<1, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_MPSADBW_RM, [InstrStage<1, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<1, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PMULLD_RR, [InstrStage<11, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_PMULLD_RM, [InstrStage<11, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<11, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_ROUNDPS_REG, [InstrStage<5, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_ROUNDPS_MEM, [InstrStage<5, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<5, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_ROUNDPD_REG, [InstrStage<4, [FPC_RSV0, FPC_RSV1]>] >,
- InstrItinData<IIC_SSE_ROUNDPD_MEM, [InstrStage<4, [FPC_RSV0, FPC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_POPCNT_RR, [InstrStage<4, [IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_POPCNT_RM, [InstrStage<4, [IEC_RSV1], 0>,
- InstrStage<4, [MEC_RSV]>] >,
- InstrItinData<IIC_SSE_PCLMULQDQ_RR, [InstrStage<10, [IEC_RSV1]>] >,
- InstrItinData<IIC_SSE_PCLMULQDQ_RM, [InstrStage<10, [IEC_RSV1], 0>,
- InstrStage<10, [MEC_RSV]>] >,
+def : WriteRes<WriteAESKeyGen, [FPC_RSV0]> {
+ let Latency = 8;
+ let ResourceCycles = [5];
+}
+def : WriteRes<WriteAESKeyGenLd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 8;
+ let ResourceCycles = [5, 1];
+}
- InstrItinData<IIC_NOP, [InstrStage<1, [IEC_RSV0, IEC_RSV1]>] >
- ]>;
+// Carry-less multiplication instructions.
+def : WriteRes<WriteCLMul, [FPC_RSV0]> {
+ let Latency = 10;
+ let ResourceCycles = [10];
+}
+def : WriteRes<WriteCLMulLd, [FPC_RSV0, MEC_RSV]> {
+ let Latency = 10;
+ let ResourceCycles = [10, 1];
+}
-// Silvermont machine model.
-def SLMModel : SchedMachineModel {
- let IssueWidth = 2; // Allows 2 instructions per scheduling group.
- let MinLatency = 1; // InstrStage cycles overrides MinLatency.
- // OperandCycles may be used for expected latency.
- let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
- let HighLatency = 30;// Expected, may be overriden by OperandCycles.
- let Itineraries = SLMItineraries;
-}
+def : WriteRes<WriteSystem, [FPC_RSV0]> { let Latency = 100; }
+def : WriteRes<WriteMicrocoded, [FPC_RSV0]> { let Latency = 100; }
+def : WriteRes<WriteFence, [MEC_RSV]>;
+def : WriteRes<WriteNop, []>;
+
+// AVX is not supported on that architecture, but we should define the basic
+// scheduling resources anyway.
+def : WriteRes<WriteIMulH, [FPC_RSV0]>;
+defm : SMWriteResPair<WriteVarBlend, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteFVarBlend, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteFShuffle256, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteShuffle256, FPC_RSV0, 1>;
+defm : SMWriteResPair<WriteVarVecShift, FPC_RSV0, 1>;
+} // SchedModel
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index b9c620f..a83dd9b 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -11,20 +11,23 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-selectiondag-info"
-#include "X86TargetMachine.h"
+#include "X86InstrInfo.h"
+#include "X86ISelLowering.h"
+#include "X86RegisterInfo.h"
+#include "X86Subtarget.h"
+#include "X86SelectionDAGInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/Target/TargetLowering.h"
+
using namespace llvm;
-X86SelectionDAGInfo::X86SelectionDAGInfo(const X86TargetMachine &TM) :
- TargetSelectionDAGInfo(TM),
- Subtarget(&TM.getSubtarget<X86Subtarget>()),
- TLI(*TM.getTargetLowering()) {
-}
+#define DEBUG_TYPE "x86-selectiondag-info"
-X86SelectionDAGInfo::~X86SelectionDAGInfo() {
-}
+X86SelectionDAGInfo::X86SelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
+
+X86SelectionDAGInfo::~X86SelectionDAGInfo() {}
SDValue
X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
@@ -34,6 +37,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ const X86Subtarget &Subtarget = DAG.getTarget().getSubtarget<X86Subtarget>();
// If to a segment-relative address space, use the default lowering.
if (DstPtrInfo.getAddrSpace() >= 256)
@@ -42,16 +46,14 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
// If not DWORD aligned or size is more than the threshold, call the library.
// The libc version is likely to be faster for these cases. It can use the
// address value and run time information about the CPU.
- if ((Align & 3) != 0 ||
- !ConstantSize ||
- ConstantSize->getZExtValue() >
- Subtarget->getMaxInlineSizeThreshold()) {
+ if ((Align & 3) != 0 || !ConstantSize ||
+ ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold()) {
// Check to see if there is a specialized entry-point for memory zeroing.
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
if (const char *bzeroEntry = V &&
- V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
- EVT IntPtr = TLI.getPointerTy();
+ V->isNullValue() ? Subtarget.getBZeroEntry() : nullptr) {
+ EVT IntPtr = DAG.getTargetLoweringInfo().getPointerTy();
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -60,15 +62,15 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
Args.push_back(Entry);
Entry.Node = Size;
Args.push_back(Entry);
- TargetLowering::
- CallLoweringInfo CLI(Chain, Type::getVoidTy(*DAG.getContext()),
- false, false, false, false,
- 0, CallingConv::C, /*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/false,
- DAG.getExternalSymbol(bzeroEntry, IntPtr), Args,
- DAG, dl);
- std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(CLI);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args),
+ 0)
+ .setDiscardResult();
+
+ std::pair<SDValue,SDValue> CallResult = DAG.getTargetLoweringInfo().LowerCallTo(CLI);
return CallResult.second;
}
@@ -77,7 +79,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
}
uint64_t SizeVal = ConstantSize->getZExtValue();
- SDValue InFlag(0, 0);
+ SDValue InFlag;
EVT AVT;
SDValue Count;
ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
@@ -99,7 +101,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
ValReg = X86::EAX;
Val = (Val << 8) | Val;
Val = (Val << 16) | Val;
- if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
+ if (Subtarget.is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
AVT = MVT::i64;
ValReg = X86::RAX;
Val = (Val << 32) | Val;
@@ -128,18 +130,16 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
InFlag = Chain.getValue(1);
}
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
- X86::ECX,
- Count, InFlag);
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RCX : X86::ECX,
+ Count, InFlag);
InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
- X86::EDI,
- Dst, InFlag);
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RDI : X86::EDI,
+ Dst, InFlag);
InFlag = Chain.getValue(1);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
- Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
+ Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
if (TwoRepStos) {
InFlag = Chain.getValue(1);
@@ -153,7 +153,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
InFlag = Chain.getValue(1);
Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag };
- Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
+ Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
} else if (BytesLeft) {
// Handle the last 1 - 7 bytes.
unsigned Offset = SizeVal - BytesLeft;
@@ -182,10 +182,11 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
// This requires the copy size to be a constant, preferably
// within a subtarget-specific limit.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ const X86Subtarget &Subtarget = DAG.getTarget().getSubtarget<X86Subtarget>();
if (!ConstantSize)
return SDValue();
uint64_t SizeVal = ConstantSize->getZExtValue();
- if (!AlwaysInline && SizeVal > Subtarget->getMaxInlineSizeThreshold())
+ if (!AlwaysInline && SizeVal > Subtarget.getMaxInlineSizeThreshold())
return SDValue();
/// If not DWORD aligned, it is more efficient to call the library. However
@@ -218,31 +219,30 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
AVT = MVT::i32;
else
// QWORD aligned
- AVT = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
+ AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
unsigned UBytes = AVT.getSizeInBits() / 8;
unsigned CountVal = SizeVal / UBytes;
SDValue Count = DAG.getIntPtrConstant(CountVal);
unsigned BytesLeft = SizeVal % UBytes;
- SDValue InFlag(0, 0);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
+ SDValue InFlag;
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RCX :
X86::ECX,
Count, InFlag);
InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RDI :
X86::EDI,
Dst, InFlag);
InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI :
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RSI :
X86::ESI,
Src, InFlag);
InFlag = Chain.getValue(1);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
- SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops,
- array_lengthof(Ops));
+ SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops);
SmallVector<SDValue, 4> Results;
Results.push_back(RepMovs);
@@ -263,6 +263,5 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SrcPtrInfo.getWithOffset(Offset)));
}
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &Results[0], Results.size());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results);
}
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h
index d728af5..c12555a 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h
@@ -23,32 +23,24 @@ class X86TargetMachine;
class X86Subtarget;
class X86SelectionDAGInfo : public TargetSelectionDAGInfo {
- /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
- /// make the right decision when generating code for different targets.
- const X86Subtarget *Subtarget;
-
- const X86TargetLowering &TLI;
-
public:
- explicit X86SelectionDAGInfo(const X86TargetMachine &TM);
+ explicit X86SelectionDAGInfo(const DataLayout &DL);
~X86SelectionDAGInfo();
- virtual
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile,
- MachinePointerInfo DstPtrInfo) const;
+ MachinePointerInfo DstPtrInfo) const override;
- virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const;
+ MachinePointerInfo SrcPtrInfo) const override;
};
}
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index 01353b2..41551a1 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "subtarget"
#include "X86Subtarget.h"
#include "X86InstrInfo.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Host.h"
@@ -24,15 +24,24 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif
+
+using namespace llvm;
+
+#define DEBUG_TYPE "subtarget"
+
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "X86GenSubtargetInfo.inc"
-using namespace llvm;
+// Temporary option to control early if-conversion for x86 while adding machine
+// models.
+static cl::opt<bool>
+X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
+ cl::desc("Enable early if-conversion on X86"));
-#if defined(_MSC_VER)
-#include <intrin.h>
-#endif
/// ClassifyBlockAddressReference - Classify a blockaddress reference for the
/// current subtarget according to how we should reference it in a non-pcrel
@@ -55,7 +64,7 @@ unsigned char X86Subtarget::
ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
// DLLImport only exists on windows, it is implemented as a load from a
// DLLIMPORT stub.
- if (GV->hasDLLImportLinkage())
+ if (GV->hasDLLImportStorageClass())
return X86II::MO_DLLIMPORT;
// Determine whether this is a reference to a definition or a declaration.
@@ -153,7 +162,7 @@ const char *X86Subtarget::getBZeroEntry() const {
!getTargetTriple().isMacOSXVersionLT(10, 6))
return "__bzero";
- return 0;
+ return nullptr;
}
bool X86Subtarget::hasSinCos() const {
@@ -165,247 +174,24 @@ bool X86Subtarget::hasSinCos() const {
/// IsLegalToCallImmediateAddr - Return true if the subtarget allows calls
/// to immediate address.
bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const {
- if (In64BitMode)
+ // FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
+ // but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
+ // the following check for Win32 should be removed.
+ if (In64BitMode || isTargetWin32())
return false;
return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
}
-static bool OSHasAVXSupport() {
-#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\
- || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
-#if defined(__GNUC__)
- // Check xgetbv; this uses a .byte sequence instead of the instruction
- // directly because older assemblers do not include support for xgetbv and
- // there is no easy way to conditionally compile based on the assembler used.
- int rEAX, rEDX;
- __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a" (rEAX), "=d" (rEDX) : "c" (0));
-#elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
- unsigned long long rEAX = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
-#else
- int rEAX = 0; // Ensures we return false
-#endif
- return (rEAX & 6) == 6;
-#else
- return false;
-#endif
-}
-
-void X86Subtarget::AutoDetectSubtargetFeatures() {
- unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
- unsigned MaxLevel;
- union {
- unsigned u[3];
- char c[12];
- } text;
-
- if (X86_MC::GetCpuIDAndInfo(0, &MaxLevel, text.u+0, text.u+2, text.u+1) ||
- MaxLevel < 1)
- return;
-
- X86_MC::GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
-
- if ((EDX >> 15) & 1) { HasCMov = true; ToggleFeature(X86::FeatureCMOV); }
- if ((EDX >> 23) & 1) { X86SSELevel = MMX; ToggleFeature(X86::FeatureMMX); }
- if ((EDX >> 25) & 1) { X86SSELevel = SSE1; ToggleFeature(X86::FeatureSSE1); }
- if ((EDX >> 26) & 1) { X86SSELevel = SSE2; ToggleFeature(X86::FeatureSSE2); }
- if (ECX & 0x1) { X86SSELevel = SSE3; ToggleFeature(X86::FeatureSSE3); }
- if ((ECX >> 9) & 1) { X86SSELevel = SSSE3; ToggleFeature(X86::FeatureSSSE3);}
- if ((ECX >> 19) & 1) { X86SSELevel = SSE41; ToggleFeature(X86::FeatureSSE41);}
- if ((ECX >> 20) & 1) { X86SSELevel = SSE42; ToggleFeature(X86::FeatureSSE42);}
- if (((ECX >> 27) & 1) && ((ECX >> 28) & 1) && OSHasAVXSupport()) {
- X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX);
- }
-
- bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0;
- bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0;
-
- if ((ECX >> 1) & 0x1) {
- HasPCLMUL = true;
- ToggleFeature(X86::FeaturePCLMUL);
- }
- if ((ECX >> 12) & 0x1) {
- HasFMA = true;
- ToggleFeature(X86::FeatureFMA);
- }
- if (IsIntel && ((ECX >> 22) & 0x1)) {
- HasMOVBE = true;
- ToggleFeature(X86::FeatureMOVBE);
- }
- if ((ECX >> 23) & 0x1) {
- HasPOPCNT = true;
- ToggleFeature(X86::FeaturePOPCNT);
- }
- if ((ECX >> 25) & 0x1) {
- HasAES = true;
- ToggleFeature(X86::FeatureAES);
- }
- if ((ECX >> 29) & 0x1) {
- HasF16C = true;
- ToggleFeature(X86::FeatureF16C);
- }
- if (IsIntel && ((ECX >> 30) & 0x1)) {
- HasRDRAND = true;
- ToggleFeature(X86::FeatureRDRAND);
- }
-
- if ((ECX >> 13) & 0x1) {
- HasCmpxchg16b = true;
- ToggleFeature(X86::FeatureCMPXCHG16B);
- }
-
- if (IsIntel || IsAMD) {
- // Determine if bit test memory instructions are slow.
- unsigned Family = 0;
- unsigned Model = 0;
- X86_MC::DetectFamilyModel(EAX, Family, Model);
- if (IsAMD || (Family == 6 && Model >= 13)) {
- IsBTMemSlow = true;
- ToggleFeature(X86::FeatureSlowBTMem);
- }
-
- // If it's an Intel chip since Nehalem and not an Atom chip, unaligned
- // memory access is fast. We hard code model numbers here because they
- // aren't strictly increasing for Intel chips it seems.
- if (IsIntel &&
- ((Family == 6 && Model == 0x1E) || // Nehalem: Clarksfield, Lynnfield,
- // Jasper Froest
- (Family == 6 && Model == 0x1A) || // Nehalem: Bloomfield, Nehalem-EP
- (Family == 6 && Model == 0x2E) || // Nehalem: Nehalem-EX
- (Family == 6 && Model == 0x25) || // Westmere: Arrandale, Clarksdale
- (Family == 6 && Model == 0x2C) || // Westmere: Gulftown, Westmere-EP
- (Family == 6 && Model == 0x2F) || // Westmere: Westmere-EX
- (Family == 6 && Model == 0x2A) || // SandyBridge
- (Family == 6 && Model == 0x2D) || // SandyBridge: SandyBridge-E*
- (Family == 6 && Model == 0x3A) || // IvyBridge
- (Family == 6 && Model == 0x3E) || // IvyBridge EP
- (Family == 6 && Model == 0x3C) || // Haswell
- (Family == 6 && Model == 0x3F) || // ...
- (Family == 6 && Model == 0x45) || // ...
- (Family == 6 && Model == 0x46))) { // ...
- IsUAMemFast = true;
- ToggleFeature(X86::FeatureFastUAMem);
- }
-
- // Set processor type. Currently only Atom or Silvermont (SLM) is detected.
- if (Family == 6 &&
- (Model == 28 || Model == 38 || Model == 39 ||
- Model == 53 || Model == 54)) {
- X86ProcFamily = IntelAtom;
-
- UseLeaForSP = true;
- ToggleFeature(X86::FeatureLeaForSP);
- }
- else if (Family == 6 &&
- (Model == 55 || Model == 74 || Model == 77)) {
- X86ProcFamily = IntelSLM;
- }
-
- unsigned MaxExtLevel;
- X86_MC::GetCpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
-
- if (MaxExtLevel >= 0x80000001) {
- X86_MC::GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
- if ((EDX >> 29) & 0x1) {
- HasX86_64 = true;
- ToggleFeature(X86::Feature64Bit);
- }
- if ((ECX >> 5) & 0x1) {
- HasLZCNT = true;
- ToggleFeature(X86::FeatureLZCNT);
- }
- if (IsIntel && ((ECX >> 8) & 0x1)) {
- HasPRFCHW = true;
- ToggleFeature(X86::FeaturePRFCHW);
- }
- if (IsAMD) {
- if ((ECX >> 6) & 0x1) {
- HasSSE4A = true;
- ToggleFeature(X86::FeatureSSE4A);
- }
- if ((ECX >> 11) & 0x1) {
- HasXOP = true;
- ToggleFeature(X86::FeatureXOP);
- }
- if ((ECX >> 16) & 0x1) {
- HasFMA4 = true;
- ToggleFeature(X86::FeatureFMA4);
- }
- }
- }
- }
-
- if (MaxLevel >= 7) {
- if (!X86_MC::GetCpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX)) {
- if (IsIntel && (EBX & 0x1)) {
- HasFSGSBase = true;
- ToggleFeature(X86::FeatureFSGSBase);
- }
- if ((EBX >> 3) & 0x1) {
- HasBMI = true;
- ToggleFeature(X86::FeatureBMI);
- }
- if ((EBX >> 4) & 0x1) {
- HasHLE = true;
- ToggleFeature(X86::FeatureHLE);
- }
- if (IsIntel && ((EBX >> 5) & 0x1)) {
- X86SSELevel = AVX2;
- ToggleFeature(X86::FeatureAVX2);
- }
- if (IsIntel && ((EBX >> 8) & 0x1)) {
- HasBMI2 = true;
- ToggleFeature(X86::FeatureBMI2);
- }
- if (IsIntel && ((EBX >> 11) & 0x1)) {
- HasRTM = true;
- ToggleFeature(X86::FeatureRTM);
- }
- if (IsIntel && ((EBX >> 16) & 0x1)) {
- X86SSELevel = AVX512F;
- ToggleFeature(X86::FeatureAVX512);
- }
- if (IsIntel && ((EBX >> 18) & 0x1)) {
- HasRDSEED = true;
- ToggleFeature(X86::FeatureRDSEED);
- }
- if (IsIntel && ((EBX >> 19) & 0x1)) {
- HasADX = true;
- ToggleFeature(X86::FeatureADX);
- }
- if (IsIntel && ((EBX >> 26) & 0x1)) {
- HasPFI = true;
- ToggleFeature(X86::FeaturePFI);
- }
- if (IsIntel && ((EBX >> 27) & 0x1)) {
- HasERI = true;
- ToggleFeature(X86::FeatureERI);
- }
- if (IsIntel && ((EBX >> 28) & 0x1)) {
- HasCDI = true;
- ToggleFeature(X86::FeatureCDI);
- }
- if (IsIntel && ((EBX >> 29) & 0x1)) {
- HasSHA = true;
- ToggleFeature(X86::FeatureSHA);
- }
- }
- if (IsAMD && ((ECX >> 21) & 0x1)) {
- HasTBM = true;
- ToggleFeature(X86::FeatureTBM);
- }
- }
-}
-
void X86Subtarget::resetSubtargetFeatures(const MachineFunction *MF) {
AttributeSet FnAttrs = MF->getFunction()->getAttributes();
- Attribute CPUAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex,
- "target-cpu");
- Attribute FSAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex,
- "target-features");
+ Attribute CPUAttr =
+ FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
+ Attribute FSAttr =
+ FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
std::string CPU =
- !CPUAttr.hasAttribute(Attribute::None) ?CPUAttr.getValueAsString() : "";
+ !CPUAttr.hasAttribute(Attribute::None) ? CPUAttr.getValueAsString() : "";
std::string FS =
- !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString() : "";
+ !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString() : "";
if (!FS.empty()) {
initializeEnvironment();
resetSubtargetFeatures(CPU, FS);
@@ -414,58 +200,24 @@ void X86Subtarget::resetSubtargetFeatures(const MachineFunction *MF) {
void X86Subtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
std::string CPUName = CPU;
- if (!FS.empty() || !CPU.empty()) {
- if (CPUName.empty()) {
-#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\
- || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
- CPUName = sys::getHostCPUName();
-#else
- CPUName = "generic";
-#endif
- }
-
- // Make sure 64-bit features are available in 64-bit mode. (But make sure
- // SSE2 can be turned off explicitly.)
- std::string FullFS = FS;
- if (In64BitMode) {
- if (!FullFS.empty())
- FullFS = "+64bit,+sse2," + FullFS;
- else
- FullFS = "+64bit,+sse2";
- }
-
- // If feature string is not empty, parse features string.
- ParseSubtargetFeatures(CPUName, FullFS);
- } else {
- if (CPUName.empty()) {
-#if defined (__x86_64__) || defined(__i386__)
- CPUName = sys::getHostCPUName();
-#else
- CPUName = "generic";
-#endif
- }
- // Otherwise, use CPUID to auto-detect feature set.
- AutoDetectSubtargetFeatures();
-
- // Make sure 64-bit features are available in 64-bit mode.
- if (In64BitMode) {
- if (!HasX86_64) { HasX86_64 = true; ToggleFeature(X86::Feature64Bit); }
- if (!HasCMov) { HasCMov = true; ToggleFeature(X86::FeatureCMOV); }
-
- if (X86SSELevel < SSE2) {
- X86SSELevel = SSE2;
- ToggleFeature(X86::FeatureSSE1);
- ToggleFeature(X86::FeatureSSE2);
- }
- }
+ if (CPUName.empty())
+ CPUName = "generic";
+
+ // Make sure 64-bit features are available in 64-bit mode. (But make sure
+ // SSE2 can be turned off explicitly.)
+ std::string FullFS = FS;
+ if (In64BitMode) {
+ if (!FullFS.empty())
+ FullFS = "+64bit,+sse2," + FullFS;
+ else
+ FullFS = "+64bit,+sse2";
}
- // CPUName may have been set by the CPU detection code. Make sure the
- // new MCSchedModel is used.
- InitCPUSchedModel(CPUName);
+ // If feature string is not empty, parse features string.
+ ParseSubtargetFeatures(CPUName, FullFS);
- if (X86ProcFamily == IntelAtom || X86ProcFamily == IntelSLM)
- PostRAScheduler = true;
+ // Make sure the right MCSchedModel is used.
+ InitCPUSchedModel(CPUName);
InstrItins = getInstrItineraryForCPU(CPUName);
@@ -473,6 +225,12 @@ void X86Subtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
// target data structure which is shared with MC code emitter, etc.
if (In64BitMode)
ToggleFeature(X86::Mode64Bit);
+ else if (In32BitMode)
+ ToggleFeature(X86::Mode32Bit);
+ else if (In16BitMode)
+ ToggleFeature(X86::Mode16Bit);
+ else
+ llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
@@ -514,43 +272,93 @@ void X86Subtarget::initializeEnvironment() {
HasERI = false;
HasCDI = false;
HasPFI = false;
+ HasDQI = false;
+ HasBWI = false;
+ HasVLX = false;
HasADX = false;
HasSHA = false;
HasPRFCHW = false;
HasRDSEED = false;
IsBTMemSlow = false;
+ IsSHLDSlow = false;
IsUAMemFast = false;
HasVectorUAMem = false;
HasCmpxchg16b = false;
UseLeaForSP = false;
HasSlowDivide = false;
- PostRAScheduler = false;
PadShortFunctions = false;
CallRegIndirect = false;
LEAUsesAG = false;
+ SlowLEA = false;
+ SlowIncDec = false;
stackAlignment = 4;
// FIXME: this is a known good value for Yonah. How about others?
MaxInlineSizeThreshold = 128;
}
-X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS,
- unsigned StackAlignOverride, bool is64Bit)
- : X86GenSubtargetInfo(TT, CPU, FS)
- , X86ProcFamily(Others)
- , PICStyle(PICStyles::None)
- , TargetTriple(TT)
- , StackAlignOverride(StackAlignOverride)
- , In64BitMode(is64Bit) {
+static std::string computeDataLayout(const X86Subtarget &ST) {
+ // X86 is little endian
+ std::string Ret = "e";
+
+ Ret += DataLayout::getManglingComponent(ST.getTargetTriple());
+ // X86 and x32 have 32 bit pointers.
+ if (ST.isTarget64BitILP32() || !ST.is64Bit())
+ Ret += "-p:32:32";
+
+ // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
+ if (ST.is64Bit() || ST.isOSWindows() || ST.isTargetNaCl())
+ Ret += "-i64:64";
+ else
+ Ret += "-f64:32:64";
+
+ // Some ABIs align long double to 128 bits, others to 32.
+ if (ST.isTargetNaCl())
+ ; // No f80
+ else if (ST.is64Bit() || ST.isTargetDarwin())
+ Ret += "-f80:128";
+ else
+ Ret += "-f80:32";
+
+ // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
+ if (ST.is64Bit())
+ Ret += "-n8:16:32:64";
+ else
+ Ret += "-n8:16:32";
+
+ // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
+ if (!ST.is64Bit() && ST.isOSWindows())
+ Ret += "-S32";
+ else
+ Ret += "-S128";
+
+ return Ret;
+}
+
+X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
initializeEnvironment();
resetSubtargetFeatures(CPU, FS);
+ return *this;
}
-bool X86Subtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
+X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, X86TargetMachine &TM,
+ unsigned StackAlignOverride)
+ : X86GenSubtargetInfo(TT, CPU, FS), X86ProcFamily(Others),
+ PICStyle(PICStyles::None), TargetTriple(TT),
+ StackAlignOverride(StackAlignOverride),
+ In64BitMode(TargetTriple.getArch() == Triple::x86_64),
+ In32BitMode(TargetTriple.getArch() == Triple::x86 &&
+ TargetTriple.getEnvironment() != Triple::CODE16),
+ In16BitMode(TargetTriple.getArch() == Triple::x86 &&
+ TargetTriple.getEnvironment() == Triple::CODE16),
+ DL(computeDataLayout(*this)), TSInfo(DL),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
+ FrameLowering(TargetFrameLowering::StackGrowsDown, getStackAlignment(),
+ is64Bit() ? -8 : -4),
+ JITInfo(hasSSE1()) {}
+
+bool X86Subtarget::enableEarlyIfConversion() const {
+ return hasCMov() && X86EarlyIfConv;
}
+
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h
index dd8c081..5f5df5e 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.h
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h
@@ -14,6 +14,11 @@
#ifndef X86SUBTARGET_H
#define X86SUBTARGET_H
+#include "X86FrameLowering.h"
+#include "X86ISelLowering.h"
+#include "X86InstrInfo.h"
+#include "X86JITInfo.h"
+#include "X86SelectionDAGInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -39,7 +44,8 @@ enum Style {
};
}
-class X86Subtarget : public X86GenSubtargetInfo {
+class X86Subtarget final : public X86GenSubtargetInfo {
+
protected:
enum X86SSEEnum {
NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
@@ -142,6 +148,9 @@ protected:
/// IsBTMemSlow - True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
+ /// IsSHLDSlow - True if SHLD instructions are slow.
+ bool IsSHLDSlow;
+
/// IsUAMemFast - True if unaligned memory access is fast.
bool IsUAMemFast;
@@ -161,9 +170,6 @@ protected:
/// full divides and should be used when possible.
bool HasSlowDivide;
- /// PostRAScheduler - True if using post-register-allocation scheduler.
- bool PostRAScheduler;
-
/// PadShortFunctions - True if the short functions should be padded to prevent
/// a stall when returning too early.
bool PadShortFunctions;
@@ -175,15 +181,30 @@ protected:
/// address generation (AG) time.
bool LEAUsesAG;
+ /// SlowLEA - True if the LEA instruction with certain arguments is slow
+ bool SlowLEA;
+
+ /// SlowIncDec - True if INC and DEC instructions are slow when writing to flags
+ bool SlowIncDec;
+
/// Processor has AVX-512 PreFetch Instructions
bool HasPFI;
-
+
/// Processor has AVX-512 Exponential and Reciprocal Instructions
bool HasERI;
-
+
/// Processor has AVX-512 Conflict Detection Instructions
bool HasCDI;
-
+
+ /// Processor has AVX-512 Doubleword and Quadword instructions
+ bool HasDQI;
+
+ /// Processor has AVX-512 Byte and Word instructions
+ bool HasBWI;
+
+ /// Processor has AVX-512 Vector Length eXtenstions
+ bool HasVLX;
+
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -202,16 +223,39 @@ private:
/// StackAlignOverride - Override the stack alignment.
unsigned StackAlignOverride;
- /// In64BitMode - True if compiling for 64-bit, false for 32-bit.
+ /// In64BitMode - True if compiling for 64-bit, false for 16-bit or 32-bit.
bool In64BitMode;
+ /// In32BitMode - True if compiling for 32-bit, false for 16-bit or 64-bit.
+ bool In32BitMode;
+
+ /// In16BitMode - True if compiling for 16-bit, false for 32-bit or 64-bit.
+ bool In16BitMode;
+
+ // Calculates type size & alignment
+ const DataLayout DL;
+ X86SelectionDAGInfo TSInfo;
+ // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
+ // X86TargetLowering needs.
+ X86InstrInfo InstrInfo;
+ X86TargetLowering TLInfo;
+ X86FrameLowering FrameLowering;
+ X86JITInfo JITInfo;
+
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
X86Subtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS,
- unsigned StackAlignOverride, bool is64Bit);
+ const std::string &FS, X86TargetMachine &TM,
+ unsigned StackAlignOverride);
+
+ const X86TargetLowering *getTargetLowering() const { return &TLInfo; }
+ const X86InstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const X86FrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const X86SelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ X86JITInfo *getJITInfo() { return &JITInfo; }
/// getStackAlignment - Returns the minimum alignment known to hold of the
/// stack frame on entry to the function and which must be maintained by every
@@ -226,13 +270,12 @@ public:
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- /// AutoDetectSubtargetFeatures - Auto-detect CPU features using CPUID
- /// instruction.
- void AutoDetectSubtargetFeatures();
-
/// \brief Reset the features for the X86 target.
- virtual void resetSubtargetFeatures(const MachineFunction *MF);
+ void resetSubtargetFeatures(const MachineFunction *MF) override;
private:
+ /// \brief Initialize the full set of dependencies so we can use an initializer
+ /// list for X86Subtarget.
+ X86Subtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
void initializeEnvironment();
void resetSubtargetFeatures(StringRef CPU, StringRef FS);
public:
@@ -241,9 +284,18 @@ public:
return In64BitMode;
}
+ bool is32Bit() const {
+ return In32BitMode;
+ }
+
+ bool is16Bit() const {
+ return In16BitMode;
+ }
+
/// Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTarget64BitILP32() const {
- return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32);
+ return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
+ TargetTriple.getOS() == Triple::NaCl);
}
/// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
@@ -292,6 +344,7 @@ public:
bool hasPRFCHW() const { return HasPRFCHW; }
bool hasRDSEED() const { return HasRDSEED; }
bool isBTMemSlow() const { return IsBTMemSlow; }
+ bool isSHLDSlow() const { return IsSHLDSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
@@ -300,11 +353,17 @@ public:
bool padShortFunctions() const { return PadShortFunctions; }
bool callRegIndirect() const { return CallRegIndirect; }
bool LEAusesAG() const { return LEAUsesAG; }
+ bool slowLEA() const { return SlowLEA; }
+ bool slowIncDec() const { return SlowIncDec; }
bool hasCDI() const { return HasCDI; }
bool hasPFI() const { return HasPFI; }
bool hasERI() const { return HasERI; }
+ bool hasDQI() const { return HasDQI; }
+ bool hasBWI() const { return HasBWI; }
+ bool hasVLX() const { return HasVLX; }
bool isAtom() const { return X86ProcFamily == IntelAtom; }
+ bool isSLM() const { return X86ProcFamily == IntelSLM; }
const Triple &getTargetTriple() const { return TargetTriple; }
@@ -315,23 +374,33 @@ public:
bool isTargetSolaris() const {
return TargetTriple.getOS() == Triple::Solaris;
}
- bool isTargetELF() const {
- return (TargetTriple.getEnvironment() == Triple::ELF ||
- TargetTriple.isOSBinFormatELF());
- }
+
+ bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
+ bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
+ bool isTargetMacho() const { return TargetTriple.isOSBinFormatMachO(); }
+
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
- bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
- bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; }
- bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
- bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
- bool isTargetCOFF() const {
- return (TargetTriple.getEnvironment() != Triple::ELF &&
- TargetTriple.isOSBinFormatCOFF());
+
+ bool isTargetWindowsMSVC() const {
+ return TargetTriple.isWindowsMSVCEnvironment();
+ }
+
+ bool isTargetKnownWindowsMSVC() const {
+ return TargetTriple.isKnownWindowsMSVCEnvironment();
}
- bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); }
+
+ bool isTargetWindowsCygwin() const {
+ return TargetTriple.isWindowsCygwinEnvironment();
+ }
+
+ bool isTargetWindowsGNU() const {
+ return TargetTriple.isWindowsGNUEnvironment();
+ }
+
+ bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
bool isOSWindows() const { return TargetTriple.isOSWindows(); }
@@ -340,7 +409,7 @@ public:
}
bool isTargetWin32() const {
- return !In64BitMode && (isTargetCygMing() || isTargetWindows());
+ return !In64BitMode && (isTargetCygMing() || isTargetKnownWindowsMSVC());
}
bool isPICStyleSet() const { return PICStyle != PICStyles::None; }
@@ -391,18 +460,17 @@ public:
bool hasSinCos() const;
/// Enable the MachineScheduler pass for all X86 subtargets.
- bool enableMachineScheduler() const LLVM_OVERRIDE { return true; }
-
- /// enablePostRAScheduler - run for Atom optimization.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtargetInfo::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
+ bool enableMachineScheduler() const override { return true; }
- bool postRAScheduler() const { return PostRAScheduler; }
+ bool enableEarlyIfConversion() const override;
/// getInstrItins = Return the instruction itineraries based on the
/// subtarget selection.
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
+
+ AntiDepBreakMode getAntiDepBreakMode() const override {
+ return TargetSubtargetInfo::ANTIDEP_CRITICAL;
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
index ddf580f..f12140f 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -13,7 +13,6 @@
#include "X86TargetMachine.h"
#include "X86.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
@@ -24,67 +23,20 @@ using namespace llvm;
extern "C" void LLVMInitializeX86Target() {
// Register the target.
- RegisterTargetMachine<X86_32TargetMachine> X(TheX86_32Target);
- RegisterTargetMachine<X86_64TargetMachine> Y(TheX86_64Target);
+ RegisterTargetMachine<X86TargetMachine> X(TheX86_32Target);
+ RegisterTargetMachine<X86TargetMachine> Y(TheX86_64Target);
}
-void X86_32TargetMachine::anchor() { }
-
-X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false),
- DL(getSubtargetImpl()->isTargetDarwin() ?
- "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-f128:128:128-"
- "n8:16:32-S128" :
- (getSubtargetImpl()->isTargetCygMing() ||
- getSubtargetImpl()->isTargetWindows()) ?
- "e-p:32:32-f64:64:64-i64:64:64-f80:32:32-f128:128:128-"
- "n8:16:32-S32" :
- "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-f128:128:128-"
- "n8:16:32-S128"),
- InstrInfo(*this),
- TLInfo(*this),
- TSInfo(*this),
- JITInfo(*this) {
- initAsmInfo();
-}
-
-void X86_64TargetMachine::anchor() { }
-
-X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true),
- // The x32 ABI dictates the ILP32 programming model for x64.
- DL(getSubtargetImpl()->isTarget64BitILP32() ?
- "e-p:32:32-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
- "n8:16:32:64-S128" :
- "e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
- "n8:16:32:64-S128"),
- InstrInfo(*this),
- TLInfo(*this),
- TSInfo(*this),
- JITInfo(*this) {
- initAsmInfo();
-}
+void X86TargetMachine::anchor() { }
/// X86TargetMachine ctor - Create an X86 target.
///
-X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
+X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool is64Bit)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS, Options.StackAlignmentOverride, is64Bit),
- FrameLowering(*this, Subtarget),
- InstrItins(Subtarget.getInstrItineraryData()){
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
// Determine the PICStyle based on the target selected.
if (getRelocationModel() == Reloc::Static) {
// Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
@@ -108,6 +60,15 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
// default to hard float ABI
if (Options.FloatABIType == FloatABI::Default)
this->Options.FloatABIType = FloatABI::Hard;
+
+ // Windows stack unwinder gets confused when execution flow "falls through"
+ // after a call to 'noreturn' function.
+ // To prevent that, we emit a trap for 'unreachable' IR instructions.
+ // (which on X86, happens to be the 'ud2' instruction)
+ if (Subtarget.isTargetWin64())
+ this->Options.TrapUnreachable = true;
+
+ initAsmInfo();
}
//===----------------------------------------------------------------------===//
@@ -118,12 +79,6 @@ UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
cl::desc("Minimize AVX to SSE transition penalty"),
cl::init(true));
-// Temporary option to control early if-conversion for x86 while adding machine
-// models.
-static cl::opt<bool>
-X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
- cl::desc("Enable early if-conversion on X86"));
-
//===----------------------------------------------------------------------===//
// X86 Analysis Pass Setup
//===----------------------------------------------------------------------===//
@@ -156,11 +111,12 @@ public:
return *getX86TargetMachine().getSubtargetImpl();
}
- virtual bool addInstSelector();
- virtual bool addILPOpts();
- virtual bool addPreRegAlloc();
- virtual bool addPostRegAlloc();
- virtual bool addPreEmitPass();
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ bool addILPOpts() override;
+ bool addPreRegAlloc() override;
+ bool addPostRegAlloc() override;
+ bool addPreEmitPass() override;
};
} // namespace
@@ -168,6 +124,12 @@ TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) {
return new X86PassConfig(this, PM);
}
+void X86PassConfig::addIRPasses() {
+ addPass(createX86AtomicExpandPass(&getX86TargetMachine()));
+
+ TargetPassConfig::addIRPasses();
+}
+
bool X86PassConfig::addInstSelector() {
// Install an instruction selector.
addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
@@ -176,19 +138,14 @@ bool X86PassConfig::addInstSelector() {
if (getX86Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None)
addPass(createCleanupLocalDynamicTLSPass());
- // For 32-bit, prepend instructions to set the "global base reg" for PIC.
- if (!getX86Subtarget().is64Bit())
- addPass(createGlobalBaseRegPass());
+ addPass(createX86GlobalBaseRegPass());
return false;
}
bool X86PassConfig::addILPOpts() {
- if (X86EarlyIfConv && getX86Subtarget().hasCMov()) {
- addPass(&EarlyIfConverterID);
- return true;
- }
- return false;
+ addPass(&EarlyIfConverterID);
+ return true;
}
bool X86PassConfig::addPreRegAlloc() {
@@ -207,18 +164,13 @@ bool X86PassConfig::addPreEmitPass() {
ShouldPrint = true;
}
- if (getX86Subtarget().hasAVX() && UseVZeroUpper) {
+ if (UseVZeroUpper) {
addPass(createX86IssueVZeroUpperPass());
ShouldPrint = true;
}
- if (getOptLevel() != CodeGenOpt::None &&
- getX86Subtarget().padShortFunctions()) {
+ if (getOptLevel() != CodeGenOpt::None) {
addPass(createX86PadShortFunctions());
- ShouldPrint = true;
- }
- if (getOptLevel() != CodeGenOpt::None &&
- getX86Subtarget().LEAusesAG()){
addPass(createX86FixupLEAs());
ShouldPrint = true;
}
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.h b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
index 174d391..41d5157 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
@@ -13,123 +13,56 @@
#ifndef X86TARGETMACHINE_H
#define X86TARGETMACHINE_H
-
-#include "X86.h"
-#include "X86FrameLowering.h"
-#include "X86ISelLowering.h"
#include "X86InstrInfo.h"
-#include "X86JITInfo.h"
-#include "X86SelectionDAGInfo.h"
#include "X86Subtarget.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
class StringRef;
-class X86TargetMachine : public LLVMTargetMachine {
+class X86TargetMachine final : public LLVMTargetMachine {
+ virtual void anchor();
X86Subtarget Subtarget;
- X86FrameLowering FrameLowering;
- InstrItineraryData InstrItins;
public:
X86TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool is64Bit);
+ CodeGenOpt::Level OL);
- virtual const X86InstrInfo *getInstrInfo() const {
- llvm_unreachable("getInstrInfo not implemented");
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const X86InstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual X86JITInfo *getJITInfo() {
- llvm_unreachable("getJITInfo not implemented");
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const X86Subtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual const X86TargetLowering *getTargetLowering() const {
- llvm_unreachable("getTargetLowering not implemented");
+ X86JITInfo *getJITInfo() override { return Subtarget.getJITInfo(); }
+ const X86Subtarget *getSubtargetImpl() const override { return &Subtarget; }
+ const X86TargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
- virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
- llvm_unreachable("getSelectionDAGInfo not implemented");
+ const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
}
- virtual const X86RegisterInfo *getRegisterInfo() const {
+ const X86RegisterInfo *getRegisterInfo() const override {
return &getInstrInfo()->getRegisterInfo();
}
- virtual const InstrItineraryData *getInstrItineraryData() const {
- return &InstrItins;
+ const InstrItineraryData *getInstrItineraryData() const override {
+ return &getSubtargetImpl()->getInstrItineraryData();
}
/// \brief Register X86 analysis passes with a pass manager.
- virtual void addAnalysisPasses(PassManagerBase &PM);
+ void addAnalysisPasses(PassManagerBase &PM) override;
// Set up the pass pipeline.
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
-
- virtual bool addCodeEmitter(PassManagerBase &PM,
- JITCodeEmitter &JCE);
-};
-
-/// X86_32TargetMachine - X86 32-bit target machine.
-///
-class X86_32TargetMachine : public X86TargetMachine {
- virtual void anchor();
- const DataLayout DL; // Calculates type size & alignment
- X86InstrInfo InstrInfo;
- X86TargetLowering TLInfo;
- X86SelectionDAGInfo TSInfo;
- X86JITInfo JITInfo;
-public:
- X86_32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
- virtual const DataLayout *getDataLayout() const { return &DL; }
- virtual const X86TargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
- virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
- return &TSInfo;
- }
- virtual const X86InstrInfo *getInstrInfo() const {
- return &InstrInfo;
- }
- virtual X86JITInfo *getJITInfo() {
- return &JITInfo;
- }
-};
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
-/// X86_64TargetMachine - X86 64-bit target machine.
-///
-class X86_64TargetMachine : public X86TargetMachine {
- virtual void anchor();
- const DataLayout DL; // Calculates type size & alignment
- X86InstrInfo InstrInfo;
- X86TargetLowering TLInfo;
- X86SelectionDAGInfo TSInfo;
- X86JITInfo JITInfo;
-public:
- X86_64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
- virtual const DataLayout *getDataLayout() const { return &DL; }
- virtual const X86TargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
- virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
- return &TSInfo;
- }
- virtual const X86InstrInfo *getInstrInfo() const {
- return &InstrInfo;
- }
- virtual X86JITInfo *getJITInfo() {
- return &JITInfo;
- }
+ bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE) override;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
index 086cd4d..f8bcd61 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
@@ -8,38 +8,42 @@
//===----------------------------------------------------------------------===//
#include "X86TargetObjectFile.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Operator.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
using namespace dwarf;
-const MCExpr *X86_64MachoTargetObjectFile::
-getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const {
+const MCExpr *X86_64MachoTargetObjectFile::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, Mangler &Mang,
+ const TargetMachine &TM, MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
// On Darwin/X86-64, we can reference dwarf symbols with foo@GOTPCREL+4, which
// is an indirect pc-relative reference.
- if (Encoding & (DW_EH_PE_indirect | DW_EH_PE_pcrel)) {
- const MCSymbol *Sym = getSymbol(*Mang, GV);
+ if ((Encoding & DW_EH_PE_indirect) && (Encoding & DW_EH_PE_pcrel)) {
+ const MCSymbol *Sym = TM.getSymbol(GV, Mang);
const MCExpr *Res =
MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, getContext());
const MCExpr *Four = MCConstantExpr::Create(4, getContext());
return MCBinaryExpr::CreateAdd(Res, Four, getContext());
}
- return TargetLoweringObjectFileMachO::
- getTTypeGlobalReference(GV, Mang, MMI, Encoding, Streamer);
+ return TargetLoweringObjectFileMachO::getTTypeGlobalReference(
+ GV, Encoding, Mang, TM, MMI, Streamer);
}
-MCSymbol *X86_64MachoTargetObjectFile::
-getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI) const {
- return getSymbol(*Mang, GV);
+MCSymbol *X86_64MachoTargetObjectFile::getCFIPersonalitySymbol(
+ const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI) const {
+ return TM.getSymbol(GV, Mang);
}
void
@@ -53,3 +57,115 @@ X86LinuxTargetObjectFile::getDebugThreadLocalSymbol(
const MCSymbol *Sym) const {
return MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_DTPOFF, getContext());
}
+
+const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol(
+ const ConstantExpr *CE, Mangler &Mang, const TargetMachine &TM) const {
+ // We are looking for the difference of two symbols, need a subtraction
+ // operation.
+ const SubOperator *Sub = dyn_cast<SubOperator>(CE);
+ if (!Sub)
+ return nullptr;
+
+ // Symbols must first be numbers before we can subtract them, we need to see a
+ // ptrtoint on both subtraction operands.
+ const PtrToIntOperator *SubLHS =
+ dyn_cast<PtrToIntOperator>(Sub->getOperand(0));
+ const PtrToIntOperator *SubRHS =
+ dyn_cast<PtrToIntOperator>(Sub->getOperand(1));
+ if (!SubLHS || !SubRHS)
+ return nullptr;
+
+ // Our symbols should exist in address space zero, cowardly no-op if
+ // otherwise.
+ if (SubLHS->getPointerAddressSpace() != 0 ||
+ SubRHS->getPointerAddressSpace() != 0)
+ return nullptr;
+
+ // Both ptrtoint instructions must wrap global variables:
+ // - Only global variables are eligible for image relative relocations.
+ // - The subtrahend refers to the special symbol __ImageBase, a global.
+ const GlobalVariable *GVLHS =
+ dyn_cast<GlobalVariable>(SubLHS->getPointerOperand());
+ const GlobalVariable *GVRHS =
+ dyn_cast<GlobalVariable>(SubRHS->getPointerOperand());
+ if (!GVLHS || !GVRHS)
+ return nullptr;
+
+ // We expect __ImageBase to be a global variable without a section, externally
+ // defined.
+ //
+ // It should look something like this: @__ImageBase = external constant i8
+ if (GVRHS->isThreadLocal() || GVRHS->getName() != "__ImageBase" ||
+ !GVRHS->hasExternalLinkage() || GVRHS->hasInitializer() ||
+ GVRHS->hasSection())
+ return nullptr;
+
+ // An image-relative, thread-local, symbol makes no sense.
+ if (GVLHS->isThreadLocal())
+ return nullptr;
+
+ return MCSymbolRefExpr::Create(TM.getSymbol(GVLHS, Mang),
+ MCSymbolRefExpr::VK_COFF_IMGREL32,
+ getContext());
+}
+
+static std::string APIntToHexString(const APInt &AI) {
+ unsigned Width = (AI.getBitWidth() / 8) * 2;
+ std::string HexString = utohexstr(AI.getLimitedValue(), /*LowerCase=*/true);
+ unsigned Size = HexString.size();
+ assert(Width >= Size && "hex string is too large!");
+ HexString.insert(HexString.begin(), Width - Size, '0');
+
+ return HexString;
+}
+
+
+static std::string scalarConstantToHexString(const Constant *C) {
+ Type *Ty = C->getType();
+ APInt AI;
+ if (isa<UndefValue>(C)) {
+ AI = APInt(Ty->getPrimitiveSizeInBits(), /*val=*/0);
+ } else if (Ty->isFloatTy() || Ty->isDoubleTy()) {
+ const auto *CFP = cast<ConstantFP>(C);
+ AI = CFP->getValueAPF().bitcastToAPInt();
+ } else if (Ty->isIntegerTy()) {
+ const auto *CI = cast<ConstantInt>(C);
+ AI = CI->getValue();
+ } else {
+ llvm_unreachable("unexpected constant pool element type!");
+ }
+ return APIntToHexString(AI);
+}
+
+const MCSection *
+X86WindowsTargetObjectFile::getSectionForConstant(SectionKind Kind,
+ const Constant *C) const {
+ if (Kind.isReadOnly()) {
+ if (C) {
+ Type *Ty = C->getType();
+ SmallString<32> COMDATSymName;
+ if (Ty->isFloatTy() || Ty->isDoubleTy()) {
+ COMDATSymName = "__real@";
+ COMDATSymName += scalarConstantToHexString(C);
+ } else if (const auto *VTy = dyn_cast<VectorType>(Ty)) {
+ uint64_t NumBits = VTy->getBitWidth();
+ if (NumBits == 128 || NumBits == 256) {
+ COMDATSymName = NumBits == 128 ? "__xmm@" : "__ymm@";
+ for (int I = VTy->getNumElements() - 1, E = -1; I != E; --I)
+ COMDATSymName +=
+ scalarConstantToHexString(C->getAggregateElement(I));
+ }
+ }
+ if (!COMDATSymName.empty()) {
+ unsigned Characteristics = COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_LNK_COMDAT;
+ return getContext().getCOFFSection(".rdata", Characteristics, Kind,
+ COMDATSymName,
+ COFF::IMAGE_COMDAT_SELECT_ANY);
+ }
+ }
+ }
+
+ return TargetLoweringObjectFile::getSectionForConstant(Kind, C);
+}
diff --git a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
index 79c861d..4a10b7e 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
@@ -12,7 +12,6 @@
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -20,25 +19,38 @@ namespace llvm {
/// x86-64.
class X86_64MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
public:
- virtual const MCExpr *
- getTTypeGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding,
- MCStreamer &Streamer) const;
+ const MCExpr *
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
// getCFIPersonalitySymbol - The symbol that gets passed to
// .cfi_personality.
- virtual MCSymbol *
- getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI) const;
+ MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI) const override;
};
/// X86LinuxTargetObjectFile - This implementation is used for linux x86
/// and x86-64.
class X86LinuxTargetObjectFile : public TargetLoweringObjectFileELF {
- virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
/// \brief Describe a TLS variable address within debug info.
- virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
+ const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override;
+ };
+
+ /// \brief This implementation is used for Windows targets on x86 and x86-64.
+ class X86WindowsTargetObjectFile : public TargetLoweringObjectFileCOFF {
+ const MCExpr *
+ getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ /// \brief Given a mergeable constant with the specified size and relocation
+ /// information, return a section that it should be placed in.
+ const MCSection *getSectionForConstant(SectionKind Kind,
+ const Constant *C) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index f88a666..c961e2f 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -14,17 +14,19 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86tti"
#include "X86.h"
#include "X86TargetMachine.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
+#define DEBUG_TYPE "x86tti"
+
// Declare the pass initialization routine locally as target-specific passes
-// don't havve a target-wide initialization entry point, and so we rely on the
+// don't have a target-wide initialization entry point, and so we rely on the
// pass constructor initialization.
namespace llvm {
void initializeX86TTIPass(PassRegistry &);
@@ -32,7 +34,7 @@ void initializeX86TTIPass(PassRegistry &);
namespace {
-class X86TTI : public ImmutablePass, public TargetTransformInfo {
+class X86TTI final : public ImmutablePass, public TargetTransformInfo {
const X86Subtarget *ST;
const X86TargetLowering *TLI;
@@ -41,25 +43,21 @@ class X86TTI : public ImmutablePass, public TargetTransformInfo {
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
public:
- X86TTI() : ImmutablePass(ID), ST(0), TLI(0) {
+ X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
X86TTI(const X86TargetMachine *TM)
- : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
- TLI(TM->getTargetLowering()) {
+ : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
+ TLI(TM->getTargetLowering()) {
initializeX86TTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ void initializePass() override {
pushTTIStack(this);
}
- virtual void finalizePass() {
- popTTIStack();
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
@@ -67,7 +65,7 @@ public:
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
@@ -75,35 +73,43 @@ public:
/// \name Scalar TTI Implementations
/// @{
- virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
+ PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
/// @}
/// \name Vector TTI Implementations
/// @{
- virtual unsigned getNumberOfRegisters(bool Vector) const;
- virtual unsigned getRegisterBitWidth(bool Vector) const;
- virtual unsigned getMaximumUnrollFactor() const;
- virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
- OperandValueKind,
- OperandValueKind) const;
- virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const;
- virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const;
- virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const;
- virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const;
- virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
- unsigned Alignment,
- unsigned AddressSpace) const;
-
- virtual unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex) const;
-
- virtual unsigned getReductionCost(unsigned Opcode, Type *Ty,
- bool IsPairwiseForm) const;
+ unsigned getNumberOfRegisters(bool Vector) const override;
+ unsigned getRegisterBitWidth(bool Vector) const override;
+ unsigned getMaximumUnrollFactor() const override;
+ unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
+ OperandValueKind) const override;
+ unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
+ int Index, Type *SubTp) const override;
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const override;
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const override;
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const override;
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const override;
+
+ unsigned getAddressComputationCost(Type *PtrTy,
+ bool IsComplex) const override;
+
+ unsigned getReductionCost(unsigned Opcode, Type *Ty,
+ bool IsPairwiseForm) const override;
+
+ unsigned getIntImmCost(int64_t) const;
+
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
+
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override;
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) const override;
/// @}
};
@@ -138,13 +144,17 @@ unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
if (Vector && !ST->hasSSE1())
return 0;
- if (ST->is64Bit())
+ if (ST->is64Bit()) {
+ if (Vector && ST->hasAVX512())
+ return 32;
return 16;
+ }
return 8;
}
unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
if (Vector) {
+ if (ST->hasAVX512()) return 512;
if (ST->hasAVX()) return 256;
if (ST->hasSSE1()) return 128;
return 0;
@@ -177,6 +187,21 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ static const CostTblEntry<MVT::SimpleValueType>
+ AVX2UniformConstCostTable[] = {
+ { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
+ { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
+ { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
+ { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
+ };
+
+ if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
+ ST->hasAVX2()) {
+ int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
+ if (Idx != -1)
+ return LT.first * AVX2UniformConstCostTable[Idx].Cost;
+ }
+
static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
// Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
// customize them to detect the cases where shift amount is a scalar one.
@@ -214,6 +239,13 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
// Look for AVX2 lowering tricks.
if (ST->hasAVX2()) {
+ if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
+ (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
+ Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
+ // On AVX2, a packed v16i16 shift left by a constant build_vector
+ // is lowered into a vector multiply (vpmullw).
+ return LT.first;
+
int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
if (Idx != -1)
return LT.first * AVX2CostTable[Idx].Cost;
@@ -237,15 +269,38 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
{ ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
{ ISD::SRA, MVT::v8i16, 1 }, // psraw.
{ ISD::SRA, MVT::v4i32, 1 }, // psrad.
+
+ { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
+ { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
+ { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
+ { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
};
if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
ST->hasSSE2()) {
+ // pmuldq sequence.
+ if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
+ return LT.first * 15;
+
int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
if (Idx != -1)
return LT.first * SSE2UniformConstCostTable[Idx].Cost;
}
+ if (ISD == ISD::SHL &&
+ Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
+ EVT VT = LT.second;
+ if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
+ (VT == MVT::v4i32 && ST->hasSSE41()))
+ // Vector shift left by non uniform constant can be lowered
+ // into vector multiply (pmullw/pmulld).
+ return LT.first;
+ if (VT == MVT::v4i32 && ST->hasSSE2())
+ // A vector shift left by non uniform constant is converted
+ // into a vector multiply; the new multiply is eventually
+ // lowered into a sequence of shuffles and 2 x pmuludq.
+ ISD = ISD::MUL;
+ }
static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
// We don't correctly identify costs of casts because they are marked as
@@ -260,6 +315,7 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
{ ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
{ ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
{ ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
+ { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
{ ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
{ ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
@@ -297,6 +353,7 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
// We don't have to scalarize unsupported ops. We can issue two half-sized
// operations and we only need to extract the upper YMM half.
// Two ops + 1 extract + 1 insert = 4.
+ { ISD::MUL, MVT::v16i16, 4 },
{ ISD::MUL, MVT::v8i32, 4 },
{ ISD::SUB, MVT::v8i32, 4 },
{ ISD::ADD, MVT::v8i32, 4 },
@@ -312,7 +369,15 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
// Look for AVX1 lowering tricks.
if (ST->hasAVX() && !ST->hasAVX2()) {
- int Idx = CostTableLookup(AVX1CostTable, ISD, LT.second);
+ EVT VT = LT.second;
+
+ // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
+ // sequence of extract + two vector multiply + insert.
+ if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
+ Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
+ ISD = ISD::MUL;
+
+ int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
if (Idx != -1)
return LT.first * AVX1CostTable[Idx].Cost;
}
@@ -332,7 +397,7 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
// 2x pmuludq, 2x shuffle.
if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
!ST->hasSSE41())
- return 6;
+ return LT.first * 6;
// Fallback to the default implementation.
return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
@@ -341,17 +406,117 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) const {
- // We only estimate the cost of reverse shuffles.
- if (Kind != SK_Reverse)
+ // We only estimate the cost of reverse and alternate shuffles.
+ if (Kind != SK_Reverse && Kind != SK_Alternate)
return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
- std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
- unsigned Cost = 1;
- if (LT.second.getSizeInBits() > 128)
- Cost = 3; // Extract + insert + copy.
+ if (Kind == SK_Reverse) {
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
+ unsigned Cost = 1;
+ if (LT.second.getSizeInBits() > 128)
+ Cost = 3; // Extract + insert + copy.
+
+ // Multiple by the number of parts.
+ return Cost * LT.first;
+ }
+
+ if (Kind == SK_Alternate) {
+ // 64-bit packed float vectors (v2f32) are widened to type v4f32.
+ // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
+
+ // The backend knows how to generate a single VEX.256 version of
+ // instruction VPBLENDW if the target supports AVX2.
+ if (ST->hasAVX2() && LT.second == MVT::v16i16)
+ return LT.first;
+
+ static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
+ {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
+ {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
+
+ {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
+ {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
+
+ // This shuffle is custom lowered into a sequence of:
+ // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
+ {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
+
+ // This shuffle is custom lowered into a long sequence of:
+ // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
+ {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
+ };
+
+ if (ST->hasAVX()) {
+ int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
+ if (Idx != -1)
+ return LT.first * AVXAltShuffleTbl[Idx].Cost;
+ }
+
+ static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
+ // These are lowered into movsd.
+ {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
+
+ // packed float vectors with four elements are lowered into BLENDI dag
+ // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
+ {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
+ {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
+
+ // This shuffle generates a single pshufw.
+ {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
+
+ // There is no instruction that matches a v16i8 alternate shuffle.
+ // The backend will expand it into the sequence 'pshufb + pshufb + or'.
+ {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
+ };
+
+ if (ST->hasSSE41()) {
+ int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
+ if (Idx != -1)
+ return LT.first * SSE41AltShuffleTbl[Idx].Cost;
+ }
+
+ static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
+ {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
+ {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
- // Multiple by the number of parts.
- return Cost * LT.first;
+ // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
+ // the sequence 'shufps + pshufd'
+ {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
+ {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
+
+ {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
+ {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
+ };
+
+ if (ST->hasSSSE3()) {
+ int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
+ if (Idx != -1)
+ return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
+ }
+
+ static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
+ {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
+ {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
+
+ {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
+ {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
+
+ // This is expanded into a long sequence of four extract + four insert.
+ {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
+
+ // 8 x (pinsrw + pextrw + and + movb + movzb + or)
+ {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
+ };
+
+ // Fall-back (SSE3 and SSE2).
+ int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
+ if (Idx != -1)
+ return LT.first * SSEAltShuffleTbl[Idx].Cost;
+ return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
+ }
+
+ return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
}
unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
@@ -400,16 +565,58 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
static const TypeConversionCostTblEntry<MVT::SimpleValueType>
+ AVX2ConversionTbl[] = {
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
+
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
+ { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
+ { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
+ { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
+ };
+
+ static const TypeConversionCostTblEntry<MVT::SimpleValueType>
AVXConversionTbl[] = {
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
- { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
- { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
+
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
+ { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
+ { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
+ { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
{ ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
{ ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
@@ -436,17 +643,32 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
-
- { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 },
+ // The generic code to compute the scalar overhead is currently broken.
+ // Workaround this limitation by estimating the scalarization overhead
+ // here. We have roughly 10 instructions per scalar element.
+ // Multiply that by the vector width.
+ // FIXME: remove that when PR19268 is fixed.
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
+
+ { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
{ ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 },
- { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 8 },
- { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
- { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
- { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
+ // This node is expanded into scalarized operations but BasicTTI is overly
+ // optimistic estimating its cost. It computes 3 per element (one
+ // vector-extract, one scalar conversion and one vector-insert). The
+ // problem is that the inserts form a read-modify-write chain so latency
+ // should be factored in too. Inflating the cost per element by 1.
+ { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
+ { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
};
+ if (ST->hasAVX2()) {
+ int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
+ DstTy.getSimpleVT(), SrcTy.getSimpleVT());
+ if (Idx != -1)
+ return AVX2ConversionTbl[Idx].Cost;
+ }
+
if (ST->hasAVX()) {
int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
SrcTy.getSimpleVT());
@@ -555,7 +777,7 @@ unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const {
- // Handle non power of two vectors such as <3 x float>
+ // Handle non-power-of-two vectors such as <3 x float>
if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
unsigned NumElem = VTy->getVectorNumElements();
@@ -570,7 +792,7 @@ unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
// Cost = 128 bit store + unpack + 64 bit store.
return 3;
- // Assume that all other non power-of-two numbers are scalarized.
+ // Assume that all other non-power-of-two numbers are scalarized.
if (!isPowerOf2_32(NumElem)) {
unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
VTy->getScalarType(),
@@ -692,3 +914,151 @@ unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
}
+/// \brief Calculate the cost of materializing a 64-bit value. This helper
+/// method might only calculate a fraction of a larger immediate. Therefore it
+/// is valid to return a cost of ZERO.
+unsigned X86TTI::getIntImmCost(int64_t Val) const {
+ if (Val == 0)
+ return TCC_Free;
+
+ if (isInt<32>(Val))
+ return TCC_Basic;
+
+ return 2 * TCC_Basic;
+}
+
+unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ if (BitSize == 0)
+ return ~0U;
+
+ // Never hoist constants larger than 128bit, because this might lead to
+ // incorrect code generation or assertions in codegen.
+ // Fixme: Create a cost model for types larger than i128 once the codegen
+ // issues have been fixed.
+ if (BitSize > 128)
+ return TCC_Free;
+
+ if (Imm == 0)
+ return TCC_Free;
+
+ // Sign-extend all constants to a multiple of 64-bit.
+ APInt ImmVal = Imm;
+ if (BitSize & 0x3f)
+ ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
+
+ // Split the constant into 64-bit chunks and calculate the cost for each
+ // chunk.
+ unsigned Cost = 0;
+ for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
+ APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
+ int64_t Val = Tmp.getSExtValue();
+ Cost += getIntImmCost(Val);
+ }
+ // We need at least one instruction to materialze the constant.
+ return std::max(1U, Cost);
+}
+
+unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ // There is no cost model for constants with a bit size of 0. Return TCC_Free
+ // here, so that constant hoisting will ignore this constant.
+ if (BitSize == 0)
+ return TCC_Free;
+
+ unsigned ImmIdx = ~0U;
+ switch (Opcode) {
+ default: return TCC_Free;
+ case Instruction::GetElementPtr:
+ // Always hoist the base address of a GetElementPtr. This prevents the
+ // creation of new constants for every base constant that gets constant
+ // folded with the offset.
+ if (Idx == 0)
+ return 2 * TCC_Basic;
+ return TCC_Free;
+ case Instruction::Store:
+ ImmIdx = 0;
+ break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::ICmp:
+ ImmIdx = 1;
+ break;
+ // Always return TCC_Free for the shift value of a shift instruction.
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ if (Idx == 1)
+ return TCC_Free;
+ break;
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::IntToPtr:
+ case Instruction::PtrToInt:
+ case Instruction::BitCast:
+ case Instruction::PHI:
+ case Instruction::Call:
+ case Instruction::Select:
+ case Instruction::Ret:
+ case Instruction::Load:
+ break;
+ }
+
+ if (Idx == ImmIdx) {
+ unsigned NumConstants = (BitSize + 63) / 64;
+ unsigned Cost = X86TTI::getIntImmCost(Imm, Ty);
+ return (Cost <= NumConstants * TCC_Basic)
+ ? static_cast<unsigned>(TCC_Free)
+ : Cost;
+ }
+
+ return X86TTI::getIntImmCost(Imm, Ty);
+}
+
+unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
+ const APInt &Imm, Type *Ty) const {
+ assert(Ty->isIntegerTy());
+
+ unsigned BitSize = Ty->getPrimitiveSizeInBits();
+ // There is no cost model for constants with a bit size of 0. Return TCC_Free
+ // here, so that constant hoisting will ignore this constant.
+ if (BitSize == 0)
+ return TCC_Free;
+
+ switch (IID) {
+ default: return TCC_Free;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
+ return TCC_Free;
+ break;
+ case Intrinsic::experimental_stackmap:
+ if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
+ return TCC_Free;
+ break;
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
+ return TCC_Free;
+ break;
+ }
+ return X86TTI::getIntImmCost(Imm, Ty);
+}
diff --git a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 66ae9c2..0bb5f99 100644
--- a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -14,9 +14,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-vzeroupper"
#include "X86.h"
#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -27,76 +27,64 @@
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "x86-vzeroupper"
+
STATISTIC(NumVZU, "Number of vzeroupper instructions inserted");
namespace {
- struct VZeroUpperInserter : public MachineFunctionPass {
- static char ID;
- VZeroUpperInserter() : MachineFunctionPass(ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
- bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
+ class VZeroUpperInserter : public MachineFunctionPass {
+ public:
- virtual const char *getPassName() const { return "X86 vzeroupper inserter";}
+ VZeroUpperInserter() : MachineFunctionPass(ID) {}
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ const char *getPassName() const override {return "X86 vzeroupper inserter";}
private:
- const TargetInstrInfo *TII; // Machine instruction info.
-
- // Any YMM register live-in to this function?
- bool FnHasLiveInYmm;
-
- // BBState - Contains the state of each MBB: unknown, clean, dirty
- SmallVector<uint8_t, 8> BBState;
- // BBSolved - Keep track of all MBB which had been already analyzed
- // and there is no further processing required.
- BitVector BBSolved;
-
- // Machine Basic Blocks are classified according this pass:
- //
- // ST_UNKNOWN - The MBB state is unknown, meaning from the entry state
- // until the MBB exit there isn't a instruction using YMM to change
- // the state to dirty, or one of the incoming predecessors is unknown
- // and there's not a dirty predecessor between them.
+ void processBasicBlock(MachineBasicBlock &MBB);
+ void insertVZeroUpper(MachineBasicBlock::iterator I,
+ MachineBasicBlock &MBB);
+ void addDirtySuccessor(MachineBasicBlock &MBB);
+
+ typedef enum { PASS_THROUGH, EXITS_CLEAN, EXITS_DIRTY } BlockExitState;
+ static const char* getBlockExitStateName(BlockExitState ST);
+
+ // Core algorithm state:
+ // BlockState - Each block is either:
+ // - PASS_THROUGH: There are neither YMM dirtying instructions nor
+ // vzeroupper instructions in this block.
+ // - EXITS_CLEAN: There is (or will be) a vzeroupper instruction in this
+ // block that will ensure that YMM is clean on exit.
+ // - EXITS_DIRTY: An instruction in the block dirties YMM and no
+ // subsequent vzeroupper in the block clears it.
//
- // ST_CLEAN - No YMM usage in the end of the MBB. A MBB could have
- // instructions using YMM and be marked ST_CLEAN, as long as the state
- // is cleaned by a vzeroupper before any call.
+ // AddedToDirtySuccessors - This flag is raised when a block is added to the
+ // DirtySuccessors list to ensure that it's not
+ // added multiple times.
//
- // ST_DIRTY - Any MBB ending with a YMM usage not cleaned up by a
- // vzeroupper instruction.
- //
- // ST_INIT - Placeholder for an empty state set
- //
- enum {
- ST_UNKNOWN = 0,
- ST_CLEAN = 1,
- ST_DIRTY = 2,
- ST_INIT = 3
+ // FirstUnguardedCall - Records the location of the first unguarded call in
+ // each basic block that may need to be guarded by a
+ // vzeroupper. We won't know whether it actually needs
+ // to be guarded until we discover a predecessor that
+ // is DIRTY_OUT.
+ struct BlockState {
+ BlockState() : ExitState(PASS_THROUGH), AddedToDirtySuccessors(false) {}
+ BlockExitState ExitState;
+ bool AddedToDirtySuccessors;
+ MachineBasicBlock::iterator FirstUnguardedCall;
};
+ typedef SmallVector<BlockState, 8> BlockStateMap;
+ typedef SmallVector<MachineBasicBlock*, 8> DirtySuccessorsWorkList;
- // computeState - Given two states, compute the resulting state, in
- // the following way
- //
- // 1) One dirty state yields another dirty state
- // 2) All states must be clean for the result to be clean
- // 3) If none above and one unknown, the result state is also unknown
- //
- static unsigned computeState(unsigned PrevState, unsigned CurState) {
- if (PrevState == ST_INIT)
- return CurState;
-
- if (PrevState == ST_DIRTY || CurState == ST_DIRTY)
- return ST_DIRTY;
-
- if (PrevState == ST_CLEAN && CurState == ST_CLEAN)
- return ST_CLEAN;
-
- return ST_UNKNOWN;
- }
+ BlockStateMap BlockStates;
+ DirtySuccessorsWorkList DirtySuccessors;
+ bool EverMadeChange;
+ const TargetInstrInfo *TII;
+ static char ID;
};
+
char VZeroUpperInserter::ID = 0;
}
@@ -104,29 +92,30 @@ FunctionPass *llvm::createX86IssueVZeroUpperPass() {
return new VZeroUpperInserter();
}
-static bool isYmmReg(unsigned Reg) {
- return (Reg >= X86::YMM0 && Reg <= X86::YMM31);
+const char* VZeroUpperInserter::getBlockExitStateName(BlockExitState ST) {
+ switch (ST) {
+ case PASS_THROUGH: return "Pass-through";
+ case EXITS_DIRTY: return "Exits-dirty";
+ case EXITS_CLEAN: return "Exits-clean";
+ }
+ llvm_unreachable("Invalid block exit state.");
}
-static bool isZmmReg(unsigned Reg) {
- return (Reg >= X86::ZMM0 && Reg <= X86::ZMM31);
+static bool isYmmReg(unsigned Reg) {
+ return (Reg >= X86::YMM0 && Reg <= X86::YMM15);
}
static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) {
for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(),
E = MRI.livein_end(); I != E; ++I)
- if (isYmmReg(I->first) || isZmmReg(I->first))
+ if (isYmmReg(I->first))
return true;
return false;
}
static bool clobbersAllYmmRegs(const MachineOperand &MO) {
- for (unsigned reg = X86::YMM0; reg <= X86::YMM31; ++reg) {
- if (!MO.clobbersPhysReg(reg))
- return false;
- }
- for (unsigned reg = X86::ZMM0; reg <= X86::ZMM31; ++reg) {
+ for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
if (!MO.clobbersPhysReg(reg))
return false;
}
@@ -150,16 +139,13 @@ static bool hasYmmReg(MachineInstr *MI) {
/// clobbersAnyYmmReg() - Check if any YMM register will be clobbered by this
/// instruction.
-static bool clobbersAnyYmmReg(MachineInstr *MI) {
+static bool callClobbersAnyYmmReg(MachineInstr *MI) {
+ assert(MI->isCall() && "Can only be called on call instructions.");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isRegMask())
continue;
- for (unsigned reg = X86::YMM0; reg <= X86::YMM31; ++reg) {
- if (MO.clobbersPhysReg(reg))
- return true;
- }
- for (unsigned reg = X86::ZMM0; reg <= X86::ZMM31; ++reg) {
+ for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
if (MO.clobbersPhysReg(reg))
return true;
}
@@ -167,102 +153,44 @@ static bool clobbersAnyYmmReg(MachineInstr *MI) {
return false;
}
-/// runOnMachineFunction - Loop over all of the basic blocks, inserting
-/// vzero upper instructions before function calls.
-bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- bool EverMadeChange = false;
-
- // Fast check: if the function doesn't use any ymm registers, we don't need
- // to insert any VZEROUPPER instructions. This is constant-time, so it is
- // cheap in the common case of no ymm use.
- bool YMMUsed = false;
- const TargetRegisterClass *RC = &X86::VR256RegClass;
- for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
- i != e; i++) {
- if (!MRI.reg_nodbg_empty(*i)) {
- YMMUsed = true;
- break;
- }
- }
- if (!YMMUsed)
- return EverMadeChange;
-
- // Pre-compute the existence of any live-in YMM registers to this function
- FnHasLiveInYmm = checkFnHasLiveInYmm(MRI);
-
- assert(BBState.empty());
- BBState.resize(MF.getNumBlockIDs(), 0);
- BBSolved.resize(MF.getNumBlockIDs(), 0);
-
- // Each BB state depends on all predecessors, loop over until everything
- // converges. (Once we converge, we can implicitly mark everything that is
- // still ST_UNKNOWN as ST_CLEAN.)
- while (1) {
- bool MadeChange = false;
-
- // Process all basic blocks.
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- MadeChange |= processBasicBlock(MF, *I);
+// Insert a vzeroupper instruction before I.
+void VZeroUpperInserter::insertVZeroUpper(MachineBasicBlock::iterator I,
+ MachineBasicBlock &MBB) {
+ DebugLoc dl = I->getDebugLoc();
+ BuildMI(MBB, I, dl, TII->get(X86::VZEROUPPER));
+ ++NumVZU;
+ EverMadeChange = true;
+}
- // If this iteration over the code changed anything, keep iterating.
- if (!MadeChange) break;
- EverMadeChange = true;
+// Add MBB to the DirtySuccessors list if it hasn't already been added.
+void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
+ if (!BlockStates[MBB.getNumber()].AddedToDirtySuccessors) {
+ DirtySuccessors.push_back(&MBB);
+ BlockStates[MBB.getNumber()].AddedToDirtySuccessors = true;
}
-
- BBState.clear();
- BBSolved.clear();
- return EverMadeChange;
}
/// processBasicBlock - Loop over all of the instructions in the basic block,
/// inserting vzero upper instructions before function calls.
-bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
- MachineBasicBlock &BB) {
- bool Changed = false;
- unsigned BBNum = BB.getNumber();
-
- // Don't process already solved BBs
- if (BBSolved[BBNum])
- return false; // No changes
-
- // Check the state of all predecessors
- unsigned EntryState = ST_INIT;
- for (MachineBasicBlock::const_pred_iterator PI = BB.pred_begin(),
- PE = BB.pred_end(); PI != PE; ++PI) {
- EntryState = computeState(EntryState, BBState[(*PI)->getNumber()]);
- if (EntryState == ST_DIRTY)
- break;
- }
-
+void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
- // The entry MBB for the function may set the initial state to dirty if
- // the function receives any YMM incoming arguments
- if (&BB == MF.begin()) {
- EntryState = ST_CLEAN;
- if (FnHasLiveInYmm)
- EntryState = ST_DIRTY;
- }
-
- // The current state is initialized according to the predecessors
- unsigned CurState = EntryState;
- bool BBHasCall = false;
+ // Start by assuming that the block PASS_THROUGH, which implies no unguarded
+ // calls.
+ BlockExitState CurState = PASS_THROUGH;
+ BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end();
- for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
- DebugLoc dl = I->getDebugLoc();
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
MachineInstr *MI = I;
-
bool isControlFlow = MI->isCall() || MI->isReturn();
// Shortcut: don't need to check regular instructions in dirty state.
- if (!isControlFlow && CurState == ST_DIRTY)
+ if (!isControlFlow && CurState == EXITS_DIRTY)
continue;
if (hasYmmReg(MI)) {
// We found a ymm-using instruction; this could be an AVX instruction,
// or it could be control flow.
- CurState = ST_DIRTY;
+ CurState = EXITS_DIRTY;
continue;
}
@@ -276,11 +204,9 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
// standard calling convention is not used (RegMask is not used to mark
// register clobbered and register usage (def/imp-def/use) is well-dfined
// and explicitly specified.
- if (MI->isCall() && !clobbersAnyYmmReg(MI))
+ if (MI->isCall() && !callClobbersAnyYmmReg(MI))
continue;
- BBHasCall = true;
-
// The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
// registers. This instruction has zero latency. In addition, the processor
// changes back to Clean state, after which execution of Intel SSE
@@ -289,38 +215,102 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
// execute SSE code.
// FIXME: In some cases, we may want to move the VZEROUPPER into a
// predecessor block.
- if (CurState == ST_DIRTY) {
- // Only insert the VZEROUPPER in case the entry state isn't unknown.
- // When unknown, only compute the information within the block to have
- // it available in the exit if possible, but don't change the block.
- if (EntryState != ST_UNKNOWN) {
- BuildMI(BB, I, dl, TII->get(X86::VZEROUPPER));
- ++NumVZU;
- }
-
+ if (CurState == EXITS_DIRTY) {
// After the inserted VZEROUPPER the state becomes clean again, but
// other YMM may appear before other subsequent calls or even before
// the end of the BB.
- CurState = ST_CLEAN;
+ insertVZeroUpper(I, MBB);
+ CurState = EXITS_CLEAN;
+ } else if (CurState == PASS_THROUGH) {
+ // If this block is currently in pass-through state and we encounter a
+ // call then whether we need a vzeroupper or not depends on whether this
+ // block has successors that exit dirty. Record the location of the call,
+ // and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet.
+ // It will be inserted later if necessary.
+ BlockStates[MBB.getNumber()].FirstUnguardedCall = I;
+ CurState = EXITS_CLEAN;
}
}
- DEBUG(dbgs() << "MBB #" << BBNum
- << ", current state: " << CurState << '\n');
+ DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: "
+ << getBlockExitStateName(CurState) << '\n');
+
+ if (CurState == EXITS_DIRTY)
+ for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
+ SE = MBB.succ_end();
+ SI != SE; ++SI)
+ addDirtySuccessor(**SI);
+
+ BlockStates[MBB.getNumber()].ExitState = CurState;
+}
+
+/// runOnMachineFunction - Loop over all of the basic blocks, inserting
+/// vzero upper instructions before function calls.
+bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
+ const X86Subtarget &ST = MF.getTarget().getSubtarget<X86Subtarget>();
+ if (!ST.hasAVX() || ST.hasAVX512())
+ return false;
+ TII = MF.getTarget().getInstrInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ EverMadeChange = false;
- // A BB can only be considered solved when we both have done all the
- // necessary transformations, and have computed the exit state. This happens
- // in two cases:
- // 1) We know the entry state: this immediately implies the exit state and
- // all the necessary transformations.
- // 2) There are no calls, and and a non-call instruction marks this block:
- // no transformations are necessary, and we know the exit state.
- if (EntryState != ST_UNKNOWN || (!BBHasCall && CurState != ST_UNKNOWN))
- BBSolved[BBNum] = true;
+ // Fast check: if the function doesn't use any ymm registers, we don't need
+ // to insert any VZEROUPPER instructions. This is constant-time, so it is
+ // cheap in the common case of no ymm use.
+ bool YMMUsed = false;
+ const TargetRegisterClass *RC = &X86::VR256RegClass;
+ for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
+ i != e; i++) {
+ if (!MRI.reg_nodbg_empty(*i)) {
+ YMMUsed = true;
+ break;
+ }
+ }
+ if (!YMMUsed) {
+ return false;
+ }
- if (CurState != BBState[BBNum])
- Changed = true;
+ assert(BlockStates.empty() && DirtySuccessors.empty() &&
+ "X86VZeroUpper state should be clear");
+ BlockStates.resize(MF.getNumBlockIDs());
+
+ // Process all blocks. This will compute block exit states, record the first
+ // unguarded call in each block, and add successors of dirty blocks to the
+ // DirtySuccessors list.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ processBasicBlock(*I);
+
+ // If any YMM regs are live in to this function, add the entry block to the
+ // DirtySuccessors list
+ if (checkFnHasLiveInYmm(MRI))
+ addDirtySuccessor(MF.front());
+
+ // Re-visit all blocks that are successors of EXITS_DIRTY bsocks. Add
+ // vzeroupper instructions to unguarded calls, and propagate EXITS_DIRTY
+ // through PASS_THROUGH blocks.
+ while (!DirtySuccessors.empty()) {
+ MachineBasicBlock &MBB = *DirtySuccessors.back();
+ DirtySuccessors.pop_back();
+ BlockState &BBState = BlockStates[MBB.getNumber()];
+
+ // MBB is a successor of a dirty block, so its first call needs to be
+ // guarded.
+ if (BBState.FirstUnguardedCall != MBB.end())
+ insertVZeroUpper(BBState.FirstUnguardedCall, MBB);
+
+ // If this successor was a pass-through block then it is now dirty, and its
+ // successors need to be added to the worklist (if they haven't been
+ // already).
+ if (BBState.ExitState == PASS_THROUGH) {
+ DEBUG(dbgs() << "MBB #" << MBB.getNumber()
+ << " was Pass-through, is now Dirty-out.\n");
+ for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
+ SE = MBB.succ_end();
+ SI != SE; ++SI)
+ addDirtySuccessor(**SI);
+ }
+ }
- BBState[BBNum] = CurState;
- return Changed;
+ BlockStates.clear();
+ return EverMadeChange;
}
diff --git a/contrib/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/contrib/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
index 9c20abd..7fef796 100644
--- a/contrib/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
+++ b/contrib/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
@@ -14,6 +14,7 @@
#include "XCore.h"
#include "XCoreRegisterInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
@@ -23,16 +24,17 @@
using namespace llvm;
+#define DEBUG_TYPE "xcore-disassembler"
+
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
/// \brief A disassembler class for XCore.
class XCoreDisassembler : public MCDisassembler {
- OwningPtr<const MCRegisterInfo> RegInfo;
public:
- XCoreDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info) :
- MCDisassembler(STI), RegInfo(Info) {}
+ XCoreDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) :
+ MCDisassembler(STI, Ctx) {}
/// \brief See MCDisassembler.
virtual DecodeStatus getInstruction(MCInst &instr,
@@ -40,9 +42,8 @@ public:
const MemoryObject &region,
uint64_t address,
raw_ostream &vStream,
- raw_ostream &cStream) const;
+ raw_ostream &cStream) const override;
- const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
};
}
@@ -81,7 +82,8 @@ static bool readInstruction32(const MemoryObject &region,
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
const XCoreDisassembler *Dis = static_cast<const XCoreDisassembler*>(D);
- return *(Dis->getRegInfo()->getRegClass(RC).begin() + RegNo);
+ const MCRegisterInfo *RegInfo = Dis->getContext().getRegisterInfo();
+ return *(RegInfo->getRegClass(RC).begin() + RegNo);
}
static DecodeStatus DecodeGRRegsRegisterClass(MCInst &Inst,
@@ -788,8 +790,9 @@ namespace llvm {
}
static MCDisassembler *createXCoreDisassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new XCoreDisassembler(STI, T.createMCRegInfo(""));
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new XCoreDisassembler(STI, Ctx);
}
extern "C" void LLVMInitializeXCoreDisassembler() {
diff --git a/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.cpp b/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.cpp
index 9ae8c0d..215fe89 100644
--- a/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "XCoreInstPrinter.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCExpr.h"
@@ -22,6 +21,8 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#include "XCoreGenAsmWriter.inc"
void XCoreInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
diff --git a/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h b/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h
index 772c515..98e7c98 100644
--- a/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h
+++ b/contrib/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h
@@ -31,8 +31,8 @@ public:
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
- virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
private:
void printInlineJT(const MCInst *MI, int opNum, raw_ostream &O);
void printInlineJT32(const MCInst *MI, int opNum, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
index 3d1c474..5665911 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
@@ -17,11 +17,10 @@ XCoreMCAsmInfo::XCoreMCAsmInfo(StringRef TT) {
SupportsDebugInformation = true;
Data16bitsDirective = "\t.short\t";
Data32bitsDirective = "\t.long\t";
- Data64bitsDirective = 0;
+ Data64bitsDirective = nullptr;
ZeroDirective = "\t.space\t";
CommentString = "#";
-
- PrivateGlobalPrefix = ".L";
+
AscizDirective = ".asciiz";
HiddenVisibilityAttr = MCSA_Invalid;
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
index e53c96b..da2689a 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
@@ -21,7 +21,7 @@ namespace llvm {
class Target;
class XCoreMCAsmInfo : public MCAsmInfoELF {
- virtual void anchor();
+ void anchor() override;
public:
explicit XCoreMCAsmInfo(StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
index 10bb6df..d54e94f 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
@@ -14,13 +14,17 @@
#include "XCoreMCTargetDesc.h"
#include "InstPrinter/XCoreInstPrinter.h"
#include "XCoreMCAsmInfo.h"
+#include "XCoreTargetStreamer.h"
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "XCoreGenInstrInfo.inc"
@@ -30,8 +34,6 @@
#define GET_REGINFO_MC_DESC
#include "XCoreGenRegisterInfo.inc"
-using namespace llvm;
-
static MCInstrInfo *createXCoreMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitXCoreMCInstrInfo(X);
@@ -56,7 +58,7 @@ static MCAsmInfo *createXCoreMCAsmInfo(const MCRegisterInfo &MRI,
MCAsmInfo *MAI = new XCoreMCAsmInfo(TT);
// Initial state of the frame pointer is SP.
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, XCore::SP, 0);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, XCore::SP, 0);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -69,6 +71,12 @@ static MCCodeGenInfo *createXCoreMCCodeGenInfo(StringRef TT, Reloc::Model RM,
if (RM == Reloc::Default) {
RM = Reloc::Static;
}
+ if (CM == CodeModel::Default) {
+ CM = CodeModel::Small;
+ }
+ if (CM != CodeModel::Small && CM != CodeModel::Large)
+ report_fatal_error("Target only supports CodeModel Small or Large");
+
X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
@@ -82,6 +90,53 @@ static MCInstPrinter *createXCoreMCInstPrinter(const Target &T,
return new XCoreInstPrinter(MAI, MII, MRI);
}
+XCoreTargetStreamer::XCoreTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
+XCoreTargetStreamer::~XCoreTargetStreamer() {}
+
+namespace {
+
+class XCoreTargetAsmStreamer : public XCoreTargetStreamer {
+ formatted_raw_ostream &OS;
+public:
+ XCoreTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+ virtual void emitCCTopData(StringRef Name) override;
+ virtual void emitCCTopFunction(StringRef Name) override;
+ virtual void emitCCBottomData(StringRef Name) override;
+ virtual void emitCCBottomFunction(StringRef Name) override;
+};
+
+XCoreTargetAsmStreamer::XCoreTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS)
+ : XCoreTargetStreamer(S), OS(OS) {}
+
+void XCoreTargetAsmStreamer::emitCCTopData(StringRef Name) {
+ OS << "\t.cc_top " << Name << ".data," << Name << '\n';
+}
+
+void XCoreTargetAsmStreamer::emitCCTopFunction(StringRef Name) {
+ OS << "\t.cc_top " << Name << ".function," << Name << '\n';
+}
+
+void XCoreTargetAsmStreamer::emitCCBottomData(StringRef Name) {
+ OS << "\t.cc_bottom " << Name << ".data\n";
+}
+
+void XCoreTargetAsmStreamer::emitCCBottomFunction(StringRef Name) {
+ OS << "\t.cc_bottom " << Name << ".function\n";
+}
+}
+
+static MCStreamer *
+createXCoreMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst) {
+ MCStreamer *S = llvm::createAsmStreamer(
+ Ctx, OS, isVerboseAsm, useDwarfDirectory, InstPrint, CE, TAB, ShowInst);
+ new XCoreTargetAsmStreamer(*S, OS);
+ return S;
+}
+
// Force static initialization.
extern "C" void LLVMInitializeXCoreTargetMC() {
// Register the MC asm info.
@@ -104,4 +159,6 @@ extern "C" void LLVMInitializeXCoreTargetMC() {
// Register the MCInstPrinter
TargetRegistry::RegisterMCInstPrinter(TheXCoreTarget,
createXCoreMCInstPrinter);
+
+ TargetRegistry::RegisterAsmStreamer(TheXCoreTarget, createXCoreMCAsmStreamer);
}
diff --git a/contrib/llvm/lib/Target/XCore/XCore.h b/contrib/llvm/lib/Target/XCore/XCore.h
index 73c310b..d707edc 100644
--- a/contrib/llvm/lib/Target/XCore/XCore.h
+++ b/contrib/llvm/lib/Target/XCore/XCore.h
@@ -27,6 +27,7 @@ namespace llvm {
void initializeXCoreLowerThreadLocalPass(PassRegistry &p);
+ FunctionPass *createXCoreFrameToArgsOffsetEliminationPass();
FunctionPass *createXCoreISelDag(XCoreTargetMachine &TM,
CodeGenOpt::Level OptLevel);
ModulePass *createXCoreLowerThreadLocalPass();
diff --git a/contrib/llvm/lib/Target/XCore/XCore.td b/contrib/llvm/lib/Target/XCore/XCore.td
index e9a6d88..04a1dd5 100644
--- a/contrib/llvm/lib/Target/XCore/XCore.td
+++ b/contrib/llvm/lib/Target/XCore/XCore.td
@@ -41,13 +41,7 @@ def : Proc<"xs1b-generic", []>;
// Declare the target which we are implementing
//===----------------------------------------------------------------------===//
-def XCoreAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
def XCore : Target {
// Pull in Instruction Info:
let InstructionSet = XCoreInstrInfo;
- let AssemblyWriters = [XCoreAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index c03dfe6..e98d4f9 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -12,13 +12,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
#include "XCore.h"
#include "InstPrinter/XCoreInstPrinter.h"
#include "XCoreInstrInfo.h"
#include "XCoreMCInstLower.h"
#include "XCoreSubtarget.h"
#include "XCoreTargetMachine.h"
+#include "XCoreTargetStreamer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/AsmPrinter.h"
@@ -27,35 +27,39 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCExpr.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include <algorithm>
#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
namespace {
class XCoreAsmPrinter : public AsmPrinter {
const XCoreSubtarget &Subtarget;
XCoreMCInstLower MCInstLowering;
+ XCoreTargetStreamer &getTargetStreamer();
+
public:
explicit XCoreAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer), Subtarget(TM.getSubtarget<XCoreSubtarget>()),
MCInstLowering(*this) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "XCore Assembly Printer";
}
@@ -67,22 +71,29 @@ namespace {
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
+ raw_ostream &O) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
void emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV);
- virtual void EmitGlobalVariable(const GlobalVariable *GV);
+ void EmitGlobalVariable(const GlobalVariable *GV) override;
- void EmitFunctionEntryLabel();
- void EmitInstruction(const MachineInstr *MI);
- void EmitFunctionBodyStart();
- void EmitFunctionBodyEnd();
+ void EmitFunctionEntryLabel() override;
+ void EmitInstruction(const MachineInstr *MI) override;
+ void EmitFunctionBodyStart() override;
+ void EmitFunctionBodyEnd() override;
};
} // end of anonymous namespace
+XCoreTargetStreamer &XCoreAsmPrinter::getTargetStreamer() {
+ return static_cast<XCoreTargetStreamer&>(*OutStreamer.getTargetStreamer());
+}
+
void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) {
- assert(((GV->hasExternalLinkage() ||
- GV->hasWeakLinkage()) ||
- GV->hasLinkOnceLinkage()) && "Unexpected linkage");
+ assert( ( GV->hasExternalLinkage() || GV->hasWeakLinkage() ||
+ GV->hasLinkOnceLinkage() || GV->hasCommonLinkage() ) &&
+ "Unexpected linkage");
if (ArrayType *ATy = dyn_cast<ArrayType>(
cast<PointerType>(GV->getType())->getElementType())) {
@@ -92,7 +103,8 @@ void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) {
OutStreamer.EmitAssignment(SymGlob,
MCConstantExpr::Create(ATy->getNumElements(),
OutContext));
- if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) {
+ if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
+ GV->hasCommonLinkage()) {
// TODO Use COMDAT groups for LinkOnceLinkage
OutStreamer.EmitSymbolAttribute(SymGlob, MCSA_Weak);
}
@@ -106,16 +118,15 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
return;
const DataLayout *TD = TM.getDataLayout();
- OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(GV, Mang,TM));
+ OutStreamer.SwitchSection(
+ getObjFileLowering().SectionForGlobal(GV, *Mang, TM));
-
MCSymbol *GVSym = getSymbol(GV);
const Constant *C = GV->getInitializer();
unsigned Align = (unsigned)TD->getPreferredTypeAlignmentShift(C->getType());
// Mark the start of the global
- OutStreamer.EmitRawText("\t.cc_top " + Twine(GVSym->getName()) + ".data," +
- GVSym->getName());
+ getTargetStreamer().emitCCTopData(GVSym->getName());
switch (GV->getLinkage()) {
case GlobalValue::AppendingLinkage:
@@ -125,20 +136,18 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
case GlobalValue::ExternalLinkage:
+ case GlobalValue::CommonLinkage:
emitArrayBound(GVSym, GV);
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
// TODO Use COMDAT groups for LinkOnceLinkage
- if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage())
+ if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
+ GV->hasCommonLinkage())
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Weak);
// FALL THROUGH
case GlobalValue::InternalLinkage:
case GlobalValue::PrivateLinkage:
break;
- case GlobalValue::DLLImportLinkage:
- llvm_unreachable("DLLImport linkage is not supported by this target!");
- case GlobalValue::DLLExportLinkage:
- llvm_unreachable("DLLExport linkage is not supported by this target!");
default:
llvm_unreachable("Unknown linkage type!");
}
@@ -151,8 +160,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
unsigned Size = TD->getTypeAllocSize(C->getType());
if (MAI->hasDotTypeDotSizeDirective()) {
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_ELF_TypeObject);
- OutStreamer.EmitRawText("\t.size " + Twine(GVSym->getName()) + "," +
- Twine(Size));
+ OutStreamer.EmitELFSize(GVSym, MCConstantExpr::Create(Size, OutContext));
}
OutStreamer.EmitLabel(GVSym);
@@ -163,7 +171,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
OutStreamer.EmitZeros(4 - Size);
// Mark the end of the global
- OutStreamer.EmitRawText("\t.cc_bottom " + Twine(GVSym->getName()) + ".data");
+ getTargetStreamer().emitCCBottomData(GVSym->getName());
}
void XCoreAsmPrinter::EmitFunctionBodyStart() {
@@ -174,14 +182,12 @@ void XCoreAsmPrinter::EmitFunctionBodyStart() {
/// the last basic block in the function.
void XCoreAsmPrinter::EmitFunctionBodyEnd() {
// Emit function end directives
- OutStreamer.EmitRawText("\t.cc_bottom " + Twine(CurrentFnSym->getName()) +
- ".function");
+ getTargetStreamer().emitCCBottomFunction(CurrentFnSym->getName());
}
void XCoreAsmPrinter::EmitFunctionEntryLabel() {
// Mark the start of the function
- OutStreamer.EmitRawText("\t.cc_top " + Twine(CurrentFnSym->getName()) +
- ".function," + CurrentFnSym->getName());
+ getTargetStreamer().emitCCTopFunction(CurrentFnSym->getName());
OutStreamer.EmitLabel(CurrentFnSym);
}
@@ -204,6 +210,7 @@ printInlineJT(const MachineInstr *MI, int opNum, raw_ostream &O,
void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
+ const DataLayout *DL = TM.getDataLayout();
const MachineOperand &MO = MI->getOperand(opNum);
switch (MO.getType()) {
case MachineOperand::MO_Register:
@@ -218,15 +225,8 @@ void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
case MachineOperand::MO_GlobalAddress:
O << *getSymbol(MO.getGlobal());
break;
- case MachineOperand::MO_ExternalSymbol:
- O << MO.getSymbolName();
- break;
case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
- << '_' << MO.getIndex();
- break;
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
+ O << DL->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
<< '_' << MO.getIndex();
break;
case MachineOperand::MO_BlockAddress:
@@ -252,6 +252,20 @@ bool XCoreAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
}
+bool XCoreAsmPrinter::
+PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0]) {
+ return true; // Unknown modifier.
+ }
+ printOperand(MI, OpNum, O);
+ O << '[';
+ printOperand(MI, OpNum + 1, O);
+ O << ']';
+ return false;
+}
+
void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallString<128> Str;
raw_svector_ostream O(Str);
@@ -284,7 +298,7 @@ void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
- OutStreamer.EmitInstruction(TmpInst);
+ EmitToStreamer(OutStreamer, TmpInst);
}
// Force static initialization.
diff --git a/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td b/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td
index b20d71f..e149e6d 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td
@@ -14,7 +14,11 @@
//===----------------------------------------------------------------------===//
def RetCC_XCore : CallingConv<[
// i32 are returned in registers R0, R1, R2, R3
- CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>
+ CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
+
+ // Integer values get stored in stack slots that are 4 bytes in
+ // size and 4-byte aligned.
+ CCIfType<[i32], CCAssignToStack<4, 4>>
]>;
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index c34b35c..e694736 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -25,10 +25,15 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
+#include <algorithm> // std::sort
using namespace llvm;
+static const unsigned FramePtr = XCore::R10;
+static const int MaxImmU16 = (1<<16) - 1;
+
// helper functions. FIXME: Eliminate.
static inline bool isImmU6(unsigned val) {
return val < (1 << 6);
@@ -38,37 +43,168 @@ static inline bool isImmU16(unsigned val) {
return val < (1 << 16);
}
-static void loadFromStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, int Offset, DebugLoc dl,
- const TargetInstrInfo &TII) {
- assert(Offset%4 == 0 && "Misaligned stack offset");
- Offset/=4;
- bool isU6 = isImmU6(Offset);
- if (!isU6 && !isImmU16(Offset))
- report_fatal_error("loadFromStack offset too big " + Twine(Offset));
- int Opcode = isU6 ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
- BuildMI(MBB, I, dl, TII.get(Opcode), DstReg)
- .addImm(Offset);
+// Helper structure with compare function for handling stack slots.
+namespace {
+struct StackSlotInfo {
+ int FI;
+ int Offset;
+ unsigned Reg;
+ StackSlotInfo(int f, int o, int r) : FI(f), Offset(o), Reg(r){};
+};
+} // end anonymous namespace
+
+static bool CompareSSIOffset(const StackSlotInfo& a, const StackSlotInfo& b) {
+ return a.Offset < b.Offset;
+}
+
+
+static void EmitDefCfaRegister(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ const TargetInstrInfo &TII,
+ MachineModuleInfo *MMI, unsigned DRegNum) {
+ unsigned CFIIndex = MMI->addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, DRegNum));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+}
+
+static void EmitDefCfaOffset(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ const TargetInstrInfo &TII,
+ MachineModuleInfo *MMI, int Offset) {
+ unsigned CFIIndex =
+ MMI->addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+}
+
+static void EmitCfiOffset(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ const TargetInstrInfo &TII, MachineModuleInfo *MMI,
+ unsigned DRegNum, int Offset) {
+ unsigned CFIIndex = MMI->addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, DRegNum, Offset));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+}
+
+/// The SP register is moved in steps of 'MaxImmU16' towards the bottom of the
+/// frame. During these steps, it may be necessary to spill registers.
+/// IfNeededExtSP emits the necessary EXTSP instructions to move the SP only
+/// as far as to make 'OffsetFromBottom' reachable using an STWSP_lru6.
+/// \param OffsetFromTop the spill offset from the top of the frame.
+/// \param [in,out] Adjusted the current SP offset from the top of the frame.
+static void IfNeededExtSP(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ const TargetInstrInfo &TII, MachineModuleInfo *MMI,
+ int OffsetFromTop, int &Adjusted, int FrameSize,
+ bool emitFrameMoves) {
+ while (OffsetFromTop > Adjusted) {
+ assert(Adjusted < FrameSize && "OffsetFromTop is beyond FrameSize");
+ int remaining = FrameSize - Adjusted;
+ int OpImm = (remaining > MaxImmU16) ? MaxImmU16 : remaining;
+ int Opcode = isImmU6(OpImm) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6;
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(OpImm);
+ Adjusted += OpImm;
+ if (emitFrameMoves)
+ EmitDefCfaOffset(MBB, MBBI, dl, TII, MMI, Adjusted*4);
+ }
+}
+
+/// The SP register is moved in steps of 'MaxImmU16' towards the top of the
+/// frame. During these steps, it may be necessary to re-load registers.
+/// IfNeededLDAWSP emits the necessary LDAWSP instructions to move the SP only
+/// as far as to make 'OffsetFromTop' reachable using an LDAWSP_lru6.
+/// \param OffsetFromTop the spill offset from the top of the frame.
+/// \param [in,out] RemainingAdj the current SP offset from the top of the
+/// frame.
+static void IfNeededLDAWSP(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ const TargetInstrInfo &TII, int OffsetFromTop,
+ int &RemainingAdj) {
+ while (OffsetFromTop < RemainingAdj - MaxImmU16) {
+ assert(RemainingAdj && "OffsetFromTop is beyond FrameSize");
+ int OpImm = (RemainingAdj > MaxImmU16) ? MaxImmU16 : RemainingAdj;
+ int Opcode = isImmU6(OpImm) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(OpImm);
+ RemainingAdj -= OpImm;
+ }
+}
+
+/// Creates an ordered list of registers that are spilled
+/// during the emitPrologue/emitEpilogue.
+/// Registers are ordered according to their frame offset.
+/// As offsets are negative, the largest offsets will be first.
+static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
+ MachineFrameInfo *MFI, XCoreFunctionInfo *XFI,
+ bool fetchLR, bool fetchFP) {
+ if (fetchLR) {
+ int Offset = MFI->getObjectOffset(XFI->getLRSpillSlot());
+ SpillList.push_back(StackSlotInfo(XFI->getLRSpillSlot(),
+ Offset,
+ XCore::LR));
+ }
+ if (fetchFP) {
+ int Offset = MFI->getObjectOffset(XFI->getFPSpillSlot());
+ SpillList.push_back(StackSlotInfo(XFI->getFPSpillSlot(),
+ Offset,
+ FramePtr));
+ }
+ std::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset);
+}
+
+/// Creates an ordered list of EH info register 'spills'.
+/// These slots are only used by the unwinder and calls to llvm.eh.return().
+/// Registers are ordered according to their frame offset.
+/// As offsets are negative, the largest offsets will be first.
+static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
+ MachineFrameInfo *MFI, XCoreFunctionInfo *XFI,
+ const TargetLowering *TL) {
+ assert(XFI->hasEHSpillSlot() && "There are no EH register spill slots");
+ const int* EHSlot = XFI->getEHSpillSlot();
+ SpillList.push_back(StackSlotInfo(EHSlot[0],
+ MFI->getObjectOffset(EHSlot[0]),
+ TL->getExceptionPointerRegister()));
+ SpillList.push_back(StackSlotInfo(EHSlot[0],
+ MFI->getObjectOffset(EHSlot[1]),
+ TL->getExceptionSelectorRegister()));
+ std::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset);
}
-static void storeToStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned SrcReg, int Offset, DebugLoc dl,
- const TargetInstrInfo &TII) {
- assert(Offset%4 == 0 && "Misaligned stack offset");
- Offset/=4;
- bool isU6 = isImmU6(Offset);
- if (!isU6 && !isImmU16(Offset))
- report_fatal_error("storeToStack offset too big " + Twine(Offset));
- int Opcode = isU6 ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
- BuildMI(MBB, I, dl, TII.get(Opcode))
- .addReg(SrcReg)
- .addImm(Offset);
+static MachineMemOperand *
+getFrameIndexMMO(MachineBasicBlock &MBB, int FrameIndex, unsigned flags) {
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO =
+ MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIndex),
+ flags, MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+ return MMO;
}
+/// Restore clobbered registers with their spill slot value.
+/// The SP will be adjusted at the same time, thus the SpillList must be ordered
+/// with the largest (negative) offsets first.
+static void
+RestoreSpillList(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc dl, const TargetInstrInfo &TII, int &RemainingAdj,
+ SmallVectorImpl<StackSlotInfo> &SpillList) {
+ for (unsigned i = 0, e = SpillList.size(); i != e; ++i) {
+ assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset");
+ assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset");
+ int OffsetFromTop = - SpillList[i].Offset/4;
+ IfNeededLDAWSP(MBB, MBBI, dl, TII, OffsetFromTop, RemainingAdj);
+ int Offset = RemainingAdj - OffsetFromTop;
+ int Opcode = isImmU6(Offset) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode), SpillList[i].Reg)
+ .addImm(Offset)
+ .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI,
+ MachineMemOperand::MOLoad));
+ }
+}
+
//===----------------------------------------------------------------------===//
// XCoreFrameLowering:
//===----------------------------------------------------------------------===//
@@ -80,7 +216,7 @@ XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti)
bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const {
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
- MF.getFrameInfo()->hasVarSizedObjects();
+ MF.getFrameInfo()->hasVarSizedObjects();
}
void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
@@ -92,219 +228,225 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
const XCoreInstrInfo &TII =
*static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ // Debug location must be unknown since the first debug location is used
+ // to determine the end of the prologue.
+ DebugLoc dl;
if (MFI->getMaxAlignment() > getStackAlignment())
report_fatal_error("emitPrologue unsupported alignment: "
+ Twine(MFI->getMaxAlignment()));
- bool FP = hasFP(MF);
const AttributeSet &PAL = MF.getFunction()->getAttributes();
-
if (PAL.hasAttrSomewhere(Attribute::Nest))
- loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII);
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0);
+ // FIX: Needs addMemOperand() but can't use getFixedStack() or getStack().
// Work out frame sizes.
- int FrameSize = MFI->getStackSize();
- assert(FrameSize%4 == 0 && "Misaligned frame size");
- FrameSize/=4;
-
- bool isU6 = isImmU6(FrameSize);
-
- if (!isU6 && !isImmU16(FrameSize)) {
- // FIXME could emit multiple instructions.
- report_fatal_error("emitPrologue Frame size too big: " + Twine(FrameSize));
- }
+ // We will adjust the SP in stages towards the final FrameSize.
+ assert(MFI->getStackSize()%4 == 0 && "Misaligned frame size");
+ const int FrameSize = MFI->getStackSize() / 4;
+ int Adjusted = 0;
+
+ bool saveLR = XFI->hasLRSpillSlot();
+ bool UseENTSP = saveLR && FrameSize
+ && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0);
+ if (UseENTSP)
+ saveLR = false;
+ bool FP = hasFP(MF);
bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF);
- bool saveLR = XFI->getUsesLR();
- // Do we need to allocate space on the stack?
- if (FrameSize) {
- bool LRSavedOnEntry = false;
- int Opcode;
- if (saveLR && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0)) {
- Opcode = (isU6) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6;
- MBB.addLiveIn(XCore::LR);
- saveLR = false;
- LRSavedOnEntry = true;
- } else {
- Opcode = (isU6) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6;
- }
- BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
-
+ if (UseENTSP) {
+ // Allocate space on the stack at the same time as saving LR.
+ Adjusted = (FrameSize > MaxImmU16) ? MaxImmU16 : FrameSize;
+ int Opcode = isImmU6(Adjusted) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6;
+ MBB.addLiveIn(XCore::LR);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode));
+ MIB.addImm(Adjusted);
+ MIB->addRegisterKilled(XCore::LR, MF.getTarget().getRegisterInfo(), true);
if (emitFrameMoves) {
- // Show update of SP.
- MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
- MMI->addFrameInst(MCCFIInstruction::createDefCfaOffset(FrameLabel,
- -FrameSize*4));
- if (LRSavedOnEntry) {
- unsigned Reg = MRI->getDwarfRegNum(XCore::LR, true);
- MMI->addFrameInst(MCCFIInstruction::createOffset(FrameLabel, Reg, 0));
- }
+ EmitDefCfaOffset(MBB, MBBI, dl, TII, MMI, Adjusted*4);
+ unsigned DRegNum = MRI->getDwarfRegNum(XCore::LR, true);
+ EmitCfiOffset(MBB, MBBI, dl, TII, MMI, DRegNum, 0);
}
}
- if (saveLR) {
- int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
- storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4, dl, TII);
- MBB.addLiveIn(XCore::LR);
+ // If necessary, save LR and FP to the stack, as we EXTSP.
+ SmallVector<StackSlotInfo,2> SpillList;
+ GetSpillList(SpillList, MFI, XFI, saveLR, FP);
+ // We want the nearest (negative) offsets first, so reverse list.
+ std::reverse(SpillList.begin(), SpillList.end());
+ for (unsigned i = 0, e = SpillList.size(); i != e; ++i) {
+ assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset");
+ assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset");
+ int OffsetFromTop = - SpillList[i].Offset/4;
+ IfNeededExtSP(MBB, MBBI, dl, TII, MMI, OffsetFromTop, Adjusted, FrameSize,
+ emitFrameMoves);
+ int Offset = Adjusted - OffsetFromTop;
+ int Opcode = isImmU6(Offset) ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
+ MBB.addLiveIn(SpillList[i].Reg);
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode))
+ .addReg(SpillList[i].Reg, RegState::Kill)
+ .addImm(Offset)
+ .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI,
+ MachineMemOperand::MOStore));
if (emitFrameMoves) {
- MCSymbol *SaveLRLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLRLabel);
- unsigned Reg = MRI->getDwarfRegNum(XCore::LR, true);
- MMI->addFrameInst(MCCFIInstruction::createOffset(SaveLRLabel, Reg,
- LRSpillOffset));
+ unsigned DRegNum = MRI->getDwarfRegNum(SpillList[i].Reg, true);
+ EmitCfiOffset(MBB, MBBI, dl, TII, MMI, DRegNum, SpillList[i].Offset);
}
}
+ // Complete any remaining Stack adjustment.
+ IfNeededExtSP(MBB, MBBI, dl, TII, MMI, FrameSize, Adjusted, FrameSize,
+ emitFrameMoves);
+ assert(Adjusted==FrameSize && "IfNeededExtSP has not completed adjustment");
+
if (FP) {
- // Save R10 to the stack.
- int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
- storeToStack(MBB, MBBI, XCore::R10, FPSpillOffset + FrameSize*4, dl, TII);
- // R10 is live-in. It is killed at the spill.
- MBB.addLiveIn(XCore::R10);
- if (emitFrameMoves) {
- MCSymbol *SaveR10Label = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveR10Label);
- unsigned Reg = MRI->getDwarfRegNum(XCore::R10, true);
- MMI->addFrameInst(MCCFIInstruction::createOffset(SaveR10Label, Reg,
- FPSpillOffset));
- }
// Set the FP from the SP.
- unsigned FramePtr = XCore::R10;
BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0);
- if (emitFrameMoves) {
- // Show FP is now valid.
- MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
- unsigned Reg = MRI->getDwarfRegNum(FramePtr, true);
- MMI->addFrameInst(MCCFIInstruction::createDefCfaRegister(FrameLabel,
- Reg));
- }
+ if (emitFrameMoves)
+ EmitDefCfaRegister(MBB, MBBI, dl, TII, MMI,
+ MRI->getDwarfRegNum(FramePtr, true));
}
if (emitFrameMoves) {
// Frame moves for callee saved.
- std::vector<std::pair<MCSymbol*, CalleeSavedInfo> >&SpillLabels =
- XFI->getSpillLabels();
+ auto SpillLabels = XFI->getSpillLabels();
for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) {
- MCSymbol *SpillLabel = SpillLabels[I].first;
+ MachineBasicBlock::iterator Pos = SpillLabels[I].first;
+ ++Pos;
CalleeSavedInfo &CSI = SpillLabels[I].second;
int Offset = MFI->getObjectOffset(CSI.getFrameIdx());
- unsigned Reg = MRI->getDwarfRegNum(CSI.getReg(), true);
- MMI->addFrameInst(MCCFIInstruction::createOffset(SpillLabel, Reg,
- Offset));
+ unsigned DRegNum = MRI->getDwarfRegNum(CSI.getReg(), true);
+ EmitCfiOffset(MBB, Pos, dl, TII, MMI, DRegNum, Offset);
+ }
+ if (XFI->hasEHSpillSlot()) {
+ // The unwinder requires stack slot & CFI offsets for the exception info.
+ // We do not save/spill these registers.
+ SmallVector<StackSlotInfo,2> SpillList;
+ GetEHSpillList(SpillList, MFI, XFI, MF.getTarget().getTargetLowering());
+ assert(SpillList.size()==2 && "Unexpected SpillList size");
+ EmitCfiOffset(MBB, MBBI, dl, TII, MMI,
+ MRI->getDwarfRegNum(SpillList[0].Reg, true),
+ SpillList[0].Offset);
+ EmitCfiOffset(MBB, MBBI, dl, TII, MMI,
+ MRI->getDwarfRegNum(SpillList[1].Reg, true),
+ SpillList[1].Offset);
}
}
}
void XCoreFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
const XCoreInstrInfo &TII =
*static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
DebugLoc dl = MBBI->getDebugLoc();
-
- bool FP = hasFP(MF);
- if (FP) {
- // Restore the stack pointer.
- unsigned FramePtr = XCore::R10;
- BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r))
- .addReg(FramePtr);
- }
+ unsigned RetOpcode = MBBI->getOpcode();
// Work out frame sizes.
- int FrameSize = MFI->getStackSize();
-
- assert(FrameSize%4 == 0 && "Misaligned frame size");
-
- FrameSize/=4;
-
- bool isU6 = isImmU6(FrameSize);
-
- if (!isU6 && !isImmU16(FrameSize)) {
- // FIXME could emit multiple instructions.
- report_fatal_error("emitEpilogue Frame size too big: " + Twine(FrameSize));
- }
-
- if (FP) {
- // Restore R10
- int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
- FPSpillOffset += FrameSize*4;
- loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl, TII);
+ // We will adjust the SP in stages towards the final FrameSize.
+ int RemainingAdj = MFI->getStackSize();
+ assert(RemainingAdj%4 == 0 && "Misaligned frame size");
+ RemainingAdj /= 4;
+
+ if (RetOpcode == XCore::EH_RETURN) {
+ // 'Restore' the exception info the unwinder has placed into the stack
+ // slots.
+ SmallVector<StackSlotInfo,2> SpillList;
+ GetEHSpillList(SpillList, MFI, XFI, MF.getTarget().getTargetLowering());
+ RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList);
+
+ // Return to the landing pad.
+ unsigned EhStackReg = MBBI->getOperand(0).getReg();
+ unsigned EhHandlerReg = MBBI->getOperand(1).getReg();
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(EhStackReg);
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::BAU_1r)).addReg(EhHandlerReg);
+ MBB.erase(MBBI); // Erase the previous return instruction.
+ return;
}
- bool restoreLR = XFI->getUsesLR();
- if (restoreLR &&
- (FrameSize == 0 || MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0)) {
- int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
- LRSpillOffset += FrameSize*4;
- loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl, TII);
+ bool restoreLR = XFI->hasLRSpillSlot();
+ bool UseRETSP = restoreLR && RemainingAdj
+ && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0);
+ if (UseRETSP)
restoreLR = false;
- }
+ bool FP = hasFP(MF);
+
+ if (FP) // Restore the stack pointer.
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(FramePtr);
- if (FrameSize) {
- if (restoreLR) {
+ // If necessary, restore LR and FP from the stack, as we EXTSP.
+ SmallVector<StackSlotInfo,2> SpillList;
+ GetSpillList(SpillList, MFI, XFI, restoreLR, FP);
+ RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList);
+
+ if (RemainingAdj) {
+ // Complete all but one of the remaining Stack adjustments.
+ IfNeededLDAWSP(MBB, MBBI, dl, TII, 0, RemainingAdj);
+ if (UseRETSP) {
// Fold prologue into return instruction
- assert(MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0);
- assert(MBBI->getOpcode() == XCore::RETSP_u6
- || MBBI->getOpcode() == XCore::RETSP_lu6);
- int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
+ assert(RetOpcode == XCore::RETSP_u6
+ || RetOpcode == XCore::RETSP_lu6);
+ int Opcode = isImmU6(RemainingAdj) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode))
+ .addImm(RemainingAdj);
for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i)
MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands
- MBB.erase(MBBI);
+ MBB.erase(MBBI); // Erase the previous return instruction.
} else {
- int Opcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
- BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(FrameSize);
+ int Opcode = isImmU6(RemainingAdj) ? XCore::LDAWSP_ru6 :
+ XCore::LDAWSP_lru6;
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(RemainingAdj);
+ // Don't erase the return instruction.
}
- }
+ } // else Don't erase the return instruction.
}
-bool XCoreFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
+bool XCoreFrameLowering::
+spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return true;
MachineFunction *MF = MBB.getParent();
const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
-
XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>();
bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF);
DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
+ if (MI != MBB.end() && !MI->isDebugValue())
+ DL = MI->getDebugLoc();
for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
it != CSI.end(); ++it) {
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(it->getReg());
-
unsigned Reg = it->getReg();
+ assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
+ "LR & FP are always handled in emitPrologue");
+
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, true,
- it->getFrameIdx(), RC, TRI);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, it->getFrameIdx(), RC, TRI);
if (emitFrameMoves) {
- MCSymbol *SaveLabel = MF->getContext().CreateTempSymbol();
- BuildMI(MBB, MI, DL, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLabel);
- XFI->getSpillLabels().push_back(std::make_pair(SaveLabel, *it));
+ auto Store = MI;
+ --Store;
+ XFI->getSpillLabels().push_back(std::make_pair(Store, *it));
}
}
return true;
}
-bool XCoreFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const{
+bool XCoreFrameLowering::
+restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const{
MachineFunction *MF = MBB.getParent();
const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
-
bool AtStart = MI == MBB.begin();
MachineBasicBlock::iterator BeforeI = MI;
if (!AtStart)
@@ -312,9 +454,11 @@ bool XCoreFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
it != CSI.end(); ++it) {
unsigned Reg = it->getReg();
+ assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
+ "LR & FP are always handled in emitEpilogue");
+
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, it->getReg(), it->getFrameIdx(),
- RC, TRI);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, it->getFrameIdx(), RC, TRI);
assert(MI != MBB.begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert multiple
@@ -358,7 +502,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
errs() << "eliminateCallFramePseudoInstr size too big: "
<< Amount << "\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
MachineInstr *New;
@@ -377,44 +521,62 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MBB.insert(I, New);
}
}
-
+
MBB.erase(I);
}
-void
-XCoreFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
- bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR);
- const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+void XCoreFrameLowering::
+processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+
+ bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR);
+
+ if (!LRUsed && !MF.getFunction()->isVarArg() &&
+ MF.getFrameInfo()->estimateStackSize(MF))
+ // If we need to extend the stack it is more efficient to use entsp / retsp.
+ // We force the LR to be saved so these instructions are used.
+ LRUsed = true;
+
+ if (MF.getMMI().callsUnwindInit() || MF.getMMI().callsEHReturn()) {
+ // The unwinder expects to find spill slots for the exception info regs R0
+ // & R1. These are used during llvm.eh.return() to 'restore' the exception
+ // info. N.B. we do not spill or restore R0, R1 during normal operation.
+ XFI->createEHSpillSlot(MF);
+ // As we will have a stack, we force the LR to be saved.
+ LRUsed = true;
+ }
+
if (LRUsed) {
+ // We will handle the LR in the prologue/epilogue
+ // and allocate space on the stack ourselves.
MF.getRegInfo().setPhysRegUnused(XCore::LR);
-
- bool isVarArg = MF.getFunction()->isVarArg();
- int FrameIdx;
- if (! isVarArg) {
- // A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
- FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0, true);
- } else {
- FrameIdx = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(),
- false);
- }
- XFI->setUsesLR(FrameIdx);
- XFI->setLRSpillSlot(FrameIdx);
+ XFI->createLRSpillSlot(MF);
}
- if (RegInfo->requiresRegisterScavenging(MF)) {
- // Reserve a slot close to SP or frame pointer.
+
+ if (hasFP(MF))
+ // A callee save register is used to hold the FP.
+ // This needs saving / restoring in the epilogue / prologue.
+ XFI->createFPSpillSlot(MF);
+}
+
+void XCoreFrameLowering::
+processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const {
+ assert(RS && "requiresRegisterScavenging failed");
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+ // Reserve slots close to SP or frame pointer for Scavenging spills.
+ // When using SP for small frames, we don't need any scratch registers.
+ // When using SP for large frames, we may need 2 scratch registers.
+ // When using FP, for large or small frames, we may need 1 scratch register.
+ if (XFI->isLargeFrame(MF) || hasFP(MF))
+ RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+ if (XFI->isLargeFrame(MF) && !hasFP(MF))
RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
- }
- if (hasFP(MF)) {
- // A callee save register is used to hold the FP.
- // This needs saving / restoring in the epilogue / prologue.
- XFI->setFPSpillSlot(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
index ebad62f..e4f806a 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
@@ -27,26 +27,30 @@ namespace llvm {
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
- bool hasFP(const MachineFunction &MF) const;
+ bool hasFP(const MachineFunction &MF) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
+
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS = nullptr) const override;
//! Stack slot size (4 bytes)
static int stackSlotSize() {
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
new file mode 100644
index 0000000..30c7b59
--- /dev/null
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
@@ -0,0 +1,62 @@
+//===-- XCoreFrameToArgsOffsetElim.cpp ----------------------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Replace Pseudo FRAME_TO_ARGS_OFFSET with the appropriate real offset.
+//
+//===----------------------------------------------------------------------===//
+
+#include "XCore.h"
+#include "XCoreInstrInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+
+namespace {
+ struct XCoreFTAOElim : public MachineFunctionPass {
+ static char ID;
+ XCoreFTAOElim() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ const char *getPassName() const override {
+ return "XCore FRAME_TO_ARGS_OFFSET Elimination";
+ }
+ };
+ char XCoreFTAOElim::ID = 0;
+}
+
+/// createXCoreFrameToArgsOffsetEliminationPass - returns an instance of the
+/// Frame to args offset elimination pass
+FunctionPass *llvm::createXCoreFrameToArgsOffsetEliminationPass() {
+ return new XCoreFTAOElim();
+}
+
+bool XCoreFTAOElim::runOnMachineFunction(MachineFunction &MF) {
+ const XCoreInstrInfo &TII =
+ *static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
+ unsigned StackSize = MF.getFrameInfo()->getStackSize();
+ for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); MFI != E;
+ ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (MachineBasicBlock::iterator MBBI = MBB.begin(), EE = MBB.end();
+ MBBI != EE; ++MBBI) {
+ if (MBBI->getOpcode() == XCore::FRAME_TO_ARGS_OFFSET) {
+ MachineInstr *OldInst = MBBI;
+ unsigned Reg = OldInst->getOperand(0).getReg();
+ MBBI = TII.loadImmediate(MBB, MBBI, Reg, StackSize);
+ OldInst->eraseFromParent();
+ }
+ }
+ }
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index e28f84f..86bc6f2 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -44,7 +44,7 @@ namespace {
: SelectionDAGISel(TM, OptLevel),
Subtarget(*TM.getSubtargetImpl()) { }
- SDNode *Select(SDNode *N);
+ SDNode *Select(SDNode *N) override;
SDNode *SelectBRIND(SDNode *N);
/// getI32Imm - Return a target constant with the specified value, of type
@@ -66,8 +66,11 @@ namespace {
// Complex Pattern Selectors.
bool SelectADDRspii(SDValue Addr, SDValue &Base, SDValue &Offset);
-
- virtual const char *getPassName() const {
+
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
+
+ const char *getPassName() const override {
return "XCore DAG->DAG Pattern Instruction Selection";
}
@@ -86,14 +89,14 @@ FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM,
bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
SDValue &Offset) {
- FrameIndexSDNode *FIN = 0;
+ FrameIndexSDNode *FIN = nullptr;
if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::ADD) {
- ConstantSDNode *CN = 0;
+ ConstantSDNode *CN = nullptr;
if ((FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
&& (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
&& (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
@@ -106,6 +109,28 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
return false;
}
+bool XCoreDAGToDAGISel::
+SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
+ SDValue Reg;
+ switch (ConstraintCode) {
+ default: return true;
+ case 'm': // Memory.
+ switch (Op.getOpcode()) {
+ default: return true;
+ case XCoreISD::CPRelativeWrapper:
+ Reg = CurDAG->getRegister(XCore::CP, MVT::i32);
+ break;
+ case XCoreISD::DPRelativeWrapper:
+ Reg = CurDAG->getRegister(XCore::DP, MVT::i32);
+ break;
+ }
+ }
+ OutOps.push_back(Reg);
+ OutOps.push_back(Op.getOperand(0));
+ return false;
+}
+
SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
switch (N->getOpcode()) {
@@ -202,8 +227,7 @@ replaceInChain(SelectionDAG *CurDAG, SDValue Chain, SDValue Old, SDValue New)
}
if (!found)
return SDValue();
- return CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
- &Ops[0], Ops.size());
+ return CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, Ops);
}
SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
@@ -212,10 +236,10 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue Addr = N->getOperand(1);
if (Addr->getOpcode() != ISD::INTRINSIC_W_CHAIN)
- return 0;
+ return nullptr;
unsigned IntNo = cast<ConstantSDNode>(Addr->getOperand(1))->getZExtValue();
if (IntNo != Intrinsic::xcore_checkevent)
- return 0;
+ return nullptr;
SDValue nextAddr = Addr->getOperand(2);
SDValue CheckEventChainOut(Addr.getNode(), 1);
if (!CheckEventChainOut.use_empty()) {
@@ -227,7 +251,7 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
SDValue NewChain = replaceInChain(CurDAG, Chain, CheckEventChainOut,
CheckEventChainIn);
if (!NewChain.getNode())
- return 0;
+ return nullptr;
Chain = NewChain;
}
// Enable events on the thread using setsr 1 and then disable them immediately
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 89ad27d..be7ef64 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "xcore-lower"
-
#include "XCoreISelLowering.h"
#include "XCore.h"
#include "XCoreMachineFunctionInfo.h"
@@ -28,6 +26,7 @@
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
@@ -40,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "xcore-lower"
+
const char *XCoreTargetLowering::
getTargetNodeName(unsigned Opcode) const
{
@@ -49,6 +50,7 @@ getTargetNodeName(unsigned Opcode) const
case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
+ case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
case XCoreISD::STWSP : return "XCoreISD::STWSP";
case XCoreISD::RETSP : return "XCoreISD::RETSP";
case XCoreISD::LADD : return "XCoreISD::LADD";
@@ -59,15 +61,16 @@ getTargetNodeName(unsigned Opcode) const
case XCoreISD::CRC8 : return "XCoreISD::CRC8";
case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
+ case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
+ case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
- default : return NULL;
+ default : return nullptr;
}
}
-XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
- : TargetLowering(XTM, new XCoreTargetObjectFile()),
- TM(XTM),
- Subtarget(*XTM.getSubtargetImpl()) {
+XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM)
+ : TargetLowering(TM, new XCoreTargetObjectFile()), TM(TM),
+ Subtarget(TM.getSubtarget<XCoreSubtarget>()) {
// Set up the register classes.
addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
@@ -88,15 +91,12 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// XCore does not have the NodeTypes below.
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setOperationAction(ISD::ADDC, MVT::i32, Expand);
setOperationAction(ISD::ADDE, MVT::i32, Expand);
setOperationAction(ISD::SUBC, MVT::i32, Expand);
setOperationAction(ISD::SUBE, MVT::i32, Expand);
- // Stop the combiner recombining select and set_cc
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
-
// 64bit
setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SUB, MVT::i64, Custom);
@@ -150,11 +150,18 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
// Exception handling
+ setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setExceptionPointerRegister(XCore::R0);
setExceptionSelectorRegister(XCore::R1);
+ setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
// Atomic operations
+ // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
+ // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
+ setInsertFencesForAtomic(true);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
// TRAMPOLINE is custom lowered.
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
@@ -170,8 +177,11 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::INTRINSIC_VOID);
+ setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
setMinFunctionAlignment(1);
+ setPrefFunctionAlignment(2);
}
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
@@ -196,13 +206,13 @@ SDValue XCoreTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode())
{
+ case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
case ISD::LOAD: return LowerLOAD(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
@@ -211,10 +221,14 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADD:
case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
+ case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
+ case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
}
@@ -239,51 +253,64 @@ void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
// Misc Lower Operation implementation
//===----------------------------------------------------------------------===//
-SDValue XCoreTargetLowering::
-LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
-{
- SDLoc dl(Op);
- SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
- Op.getOperand(3), Op.getOperand(4));
- return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
- Op.getOperand(1));
-}
-
-SDValue XCoreTargetLowering::
-getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
- SelectionDAG &DAG) const
-{
+SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
+ const GlobalValue *GV,
+ SelectionDAG &DAG) const {
// FIXME there is no actual debug info here
SDLoc dl(GA);
- const GlobalValue *UnderlyingGV = GV;
- // If GV is an alias then use the aliasee to determine the wrapper type
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- UnderlyingGV = GA->resolveAliasedGlobal();
- if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(UnderlyingGV)) {
- if (GVar->isConstant())
- return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
- return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
- }
- return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
+
+ if (GV->getType()->getElementType()->isFunctionTy())
+ return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
+
+ const auto *GVar = dyn_cast<GlobalVariable>(GV);
+ if ((GV->hasSection() && StringRef(GV->getSection()).startswith(".cp.")) ||
+ (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
+ return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
+
+ return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
+}
+
+static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
+ if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small)
+ return true;
+
+ Type *ObjType = GV->getType()->getPointerElementType();
+ if (!ObjType->isSized())
+ return false;
+
+ unsigned ObjSize = XTL.getDataLayout()->getTypeAllocSize(ObjType);
+ return ObjSize < CodeModelLargeSize && ObjSize != 0;
}
SDValue XCoreTargetLowering::
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
{
- SDLoc DL(Op);
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GN->getGlobal();
+ SDLoc DL(GN);
int64_t Offset = GN->getOffset();
- // We can only fold positive offsets that are a multiple of the word size.
- int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
- SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
- GA = getGlobalAddressWrapper(GA, GV, DAG);
- // Handle the rest of the offset.
- if (Offset != FoldedOffset) {
- SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32);
- GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
+ if (IsSmallObject(GV, *this)) {
+ // We can only fold positive offsets that are a multiple of the word size.
+ int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
+ GA = getGlobalAddressWrapper(GA, GV, DAG);
+ // Handle the rest of the offset.
+ if (Offset != FoldedOffset) {
+ SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32);
+ GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
+ }
+ return GA;
+ } else {
+ // Ideally we would not fold in offset with an index <= 11.
+ Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
+ Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
+ Ty = Type::getInt32Ty(*DAG.getContext());
+ Constant *Idx = ConstantInt::get(Ty, Offset);
+ Constant *GAI = ConstantExpr::getGetElementPtr(GA, Idx);
+ SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
+ return DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), CP,
+ MachinePointerInfo(), false, false, false, 0);
}
- return GA;
}
SDValue XCoreTargetLowering::
@@ -307,10 +334,10 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue Res;
if (CP->isMachineConstantPoolEntry()) {
Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
- CP->getAlignment());
+ CP->getAlignment(), CP->getOffset());
} else {
Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
- CP->getAlignment());
+ CP->getAlignment(), CP->getOffset());
}
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
}
@@ -382,13 +409,13 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
High.getValue(1));
SDValue Ops[] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
{
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(Value, KnownZero, KnownOne);
+ DAG.computeKnownBits(Value, KnownZero, KnownOne);
return KnownZero.countTrailingOnes() >= 2;
}
@@ -448,7 +475,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
High.getValue(1));
SDValue Ops[] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
// Lower to a call to __misaligned_load(BasePtr).
@@ -460,17 +487,15 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Entry.Node = BasePtr;
Args.push_back(Entry);
- TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false,
- false, false, 0, CallingConv::C, /*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
- Args, DAG, DL);
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
-
- SDValue Ops[] =
- { CallResult.first, CallResult.second };
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL).setChain(Chain)
+ .setCallee(CallingConv::C, IntPtrTy,
+ DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
+ std::move(Args), 0);
- return DAG.getMergeValues(Ops, 2, DL);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
+ SDValue Ops[] = { CallResult.first, CallResult.second };
+ return DAG.getMergeValues(Ops, DL);
}
SDValue XCoreTargetLowering::
@@ -522,14 +547,13 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
Entry.Node = Value;
Args.push_back(Entry);
- TargetLowering::CallLoweringInfo CLI(Chain,
- Type::getVoidTy(*DAG.getContext()), false, false,
- false, false, 0, CallingConv::C, /*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
- Args, DAG, dl);
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
+ std::move(Args), 0);
+ std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
}
@@ -547,7 +571,7 @@ LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
LHS, RHS);
SDValue Lo(Hi.getNode(), 1);
SDValue Ops[] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
SDValue XCoreTargetLowering::
@@ -564,7 +588,7 @@ LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
Zero, Zero);
SDValue Lo(Hi.getNode(), 1);
SDValue Ops[] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
/// isADDADDMUL - Return whether Op is in a form that is equivalent to
@@ -695,7 +719,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
if (N->getOpcode() == ISD::ADD) {
SDValue Result = TryExpandADDWithMul(N, DAG);
- if (Result.getNode() != 0)
+ if (Result.getNode())
return Result;
}
@@ -767,18 +791,88 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
- SDLoc dl(Op);
+ // This nodes represent llvm.frameaddress on the DAG.
+ // It takes one operand, the index of the frame address to return.
+ // An index of zero corresponds to the current function's frame address.
+ // An index of one to the parent's frame address, and so on.
// Depths > 0 not supported yet!
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
- return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
RegInfo->getFrameRegister(MF), MVT::i32);
}
SDValue XCoreTargetLowering::
+LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
+ // This nodes represent llvm.returnaddress on the DAG.
+ // It takes one operand, the index of the return address to return.
+ // An index of zero corresponds to the current function's return address.
+ // An index of one to the parent's return address, and so on.
+ // Depths > 0 not supported yet!
+ if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
+ return SDValue();
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+ int FI = XFI->createLRSpillSlot(MF);
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ return DAG.getLoad(getPointerTy(), SDLoc(Op), DAG.getEntryNode(), FIN,
+ MachinePointerInfo::getFixedStack(FI), false, false,
+ false, 0);
+}
+
+SDValue XCoreTargetLowering::
+LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
+ // This node represents offset from frame pointer to first on-stack argument.
+ // This is needed for correct stack adjustment during unwind.
+ // However, we don't know the offset until after the frame has be finalised.
+ // This is done during the XCoreFTAOElim pass.
+ return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
+}
+
+SDValue XCoreTargetLowering::
+LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
+ // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
+ // This node represents 'eh_return' gcc dwarf builtin, which is used to
+ // return from exception. The general meaning is: adjust stack by OFFSET and
+ // pass execution to HANDLER.
+ MachineFunction &MF = DAG.getMachineFunction();
+ SDValue Chain = Op.getOperand(0);
+ SDValue Offset = Op.getOperand(1);
+ SDValue Handler = Op.getOperand(2);
+ SDLoc dl(Op);
+
+ // Absolute SP = (FP + FrameToArgs) + Offset
+ const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
+ SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ RegInfo->getFrameRegister(MF), MVT::i32);
+ SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
+ MVT::i32);
+ Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
+ Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
+
+ // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
+ // which leaves 2 caller saved registers, R2 & R3 for us to use.
+ unsigned StackReg = XCore::R2;
+ unsigned HandlerReg = XCore::R3;
+
+ SDValue OutChains[] = {
+ DAG.getCopyToReg(Chain, dl, StackReg, Stack),
+ DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
+ };
+
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
+
+ return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
+ DAG.getRegister(StackReg, MVT::i32),
+ DAG.getRegister(HandlerReg, MVT::i32));
+
+}
+
+SDValue XCoreTargetLowering::
LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
return Op.getOperand(0);
}
@@ -836,7 +930,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(TrmpAddr, 16), false, false,
0);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
SDValue XCoreTargetLowering::
@@ -851,7 +945,7 @@ LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
SDValue Crc(Data.getNode(), 1);
SDValue Results[] = { Crc, Data };
- return DAG.getMergeValues(Results, 2, DL);
+ return DAG.getMergeValues(Results, DL);
}
return SDValue();
}
@@ -862,6 +956,67 @@ LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
}
+SDValue XCoreTargetLowering::
+LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
+ AtomicSDNode *N = cast<AtomicSDNode>(Op);
+ assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
+ assert(N->getOrdering() <= Monotonic &&
+ "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ if (N->getMemoryVT() == MVT::i32) {
+ if (N->getAlignment() < 4)
+ report_fatal_error("atomic load must be aligned");
+ return DAG.getLoad(getPointerTy(), SDLoc(Op), N->getChain(),
+ N->getBasePtr(), N->getPointerInfo(),
+ N->isVolatile(), N->isNonTemporal(),
+ N->isInvariant(), N->getAlignment(),
+ N->getTBAAInfo(), N->getRanges());
+ }
+ if (N->getMemoryVT() == MVT::i16) {
+ if (N->getAlignment() < 2)
+ report_fatal_error("atomic load must be aligned");
+ return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i16,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ }
+ if (N->getMemoryVT() == MVT::i8)
+ return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i8,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ return SDValue();
+}
+
+SDValue XCoreTargetLowering::
+LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
+ AtomicSDNode *N = cast<AtomicSDNode>(Op);
+ assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
+ assert(N->getOrdering() <= Monotonic &&
+ "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ if (N->getMemoryVT() == MVT::i32) {
+ if (N->getAlignment() < 4)
+ report_fatal_error("atomic store must be aligned");
+ return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(),
+ N->getBasePtr(), N->getPointerInfo(),
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ }
+ if (N->getMemoryVT() == MVT::i16) {
+ if (N->getAlignment() < 2)
+ report_fatal_error("atomic store must be aligned");
+ return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i16,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ }
+ if (N->getMemoryVT() == MVT::i8)
+ return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
+ N->getBasePtr(), N->getPointerInfo(), MVT::i8,
+ N->isVolatile(), N->isNonTemporal(),
+ N->getAlignment(), N->getTBAAInfo());
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -902,6 +1057,51 @@ XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
}
+/// LowerCallResult - Lower the result values of a call into the
+/// appropriate copies out of appropriate physical registers / memory locations.
+static SDValue
+LowerCallResult(SDValue Chain, SDValue InFlag,
+ const SmallVectorImpl<CCValAssign> &RVLocs,
+ SDLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) {
+ SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
+ // Copy results out of physical registers.
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
+ const CCValAssign &VA = RVLocs[i];
+ if (VA.isRegLoc()) {
+ Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
+ InFlag).getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ } else {
+ assert(VA.isMemLoc());
+ ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
+ InVals.size()));
+ // Reserve space for this result.
+ InVals.push_back(SDValue());
+ }
+ }
+
+ // Copy results out of memory.
+ SmallVector<SDValue, 4> MemOpChains;
+ for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
+ int offset = ResultMemLocs[i].first;
+ unsigned index = ResultMemLocs[i].second;
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
+ SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, MVT::i32) };
+ SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
+ InVals[index] = load;
+ MemOpChains.push_back(load.getValue(1));
+ }
+
+ // Transform all loads nodes into one single node because
+ // all load nodes are independent of each other.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
+
+ return Chain;
+}
+
/// LowerCCCCallTo - functions arguments are copied from virtual
/// regs to (physical regs)/(stack frame), CALLSEQ_START and
/// CALLSEQ_END are emitted.
@@ -927,8 +1127,15 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
+ SmallVector<CCValAssign, 16> RVLocs;
+ // Analyze return values to determine the number of bytes of stack required.
+ CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), RVLocs, *DAG.getContext());
+ RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
+ RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
+
// Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
+ unsigned NumBytes = RetCCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
getPointerTy(), true), dl);
@@ -974,8 +1181,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because
// all store nodes are independent of each other.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
@@ -1014,7 +1220,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
if (InFlag.getNode())
Ops.push_back(InFlag);
- Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
@@ -1026,35 +1232,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Handle result values, copying them out of physregs into vregs that we
// return.
- return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
- Ins, dl, DAG, InVals);
-}
-
-/// LowerCallResult - Lower the result values of a call into the
-/// appropriate copies out of appropriate physical registers.
-SDValue
-XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
-
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
-
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
- RVLocs[i].getValVT(), InFlag).getValue(1);
- InFlag = Chain.getValue(2);
- InVals.push_back(Chain.getValue(0));
- }
-
- return Chain;
+ return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
}
//===----------------------------------------------------------------------===//
@@ -1102,6 +1280,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -1114,6 +1293,9 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned LRSaveSize = StackSlotSize;
+ if (!isVarArg)
+ XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
+
// All getCopyFromReg ops must precede any getMemcpys to prevent the
// scheduler clobbering a register before it has been copied.
// The stages are:
@@ -1141,7 +1323,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
errs() << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getSimpleVT().SimpleTy << "\n";
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
case MVT::i32:
unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
@@ -1178,7 +1360,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// 1b. CopyFromReg vararg registers.
if (isVarArg) {
// Argument registers
- static const uint16_t ArgRegs[] = {
+ static const MCPhysReg ArgRegs[] = {
XCore::R0, XCore::R1, XCore::R2, XCore::R3
};
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
@@ -1216,8 +1398,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// 2. chain CopyFromReg nodes into a TokenFactor.
if (!CFRegNode.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &CFRegNode[0],
- CFRegNode.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
// 3. Memcpy 'byVal' args & push final InVals.
// Aggregates passed "byVal" need to be copied by the callee.
@@ -1230,7 +1411,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned Size = ArgDI->Flags.getByValSize();
unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
// Create a new object on the stack and copy the pointee into it.
- int FI = MFI->CreateStackObject(Size, Align, false, false);
+ int FI = MFI->CreateStackObject(Size, Align, false);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(FIN);
MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
@@ -1246,8 +1427,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// 4, chain mem ops nodes into a TokenFactor.
if (!MemOps.empty()) {
MemOps.push_back(Chain);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
- MemOps.size());
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
}
return Chain;
@@ -1264,7 +1444,11 @@ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
- return CCInfo.CheckReturn(Outs, RetCC_XCore);
+ if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
+ return false;
+ if (CCInfo.getNextStackOffset() != 0 && isVarArg)
+ return false;
+ return true;
}
SDValue
@@ -1274,6 +1458,10 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const {
+ XCoreFunctionInfo *XFI =
+ DAG.getMachineFunction().getInfo<XCoreFunctionInfo>();
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+
// CCValAssign - represent the assignment of
// the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -1283,6 +1471,9 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
getTargetMachine(), RVLocs, *DAG.getContext());
// Analyze return values.
+ if (!isVarArg)
+ CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
+
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
SDValue Flag;
@@ -1291,13 +1482,42 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
// Return on XCore is always a "retsp 0"
RetOps.push_back(DAG.getConstant(0, MVT::i32));
- // Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ SmallVector<SDValue, 4> MemOpChains;
+ // Handle return values that must be copied to memory.
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
- assert(VA.isRegLoc() && "Can only return in registers!");
+ if (VA.isRegLoc())
+ continue;
+ assert(VA.isMemLoc());
+ if (isVarArg) {
+ report_fatal_error("Can't return value from vararg function in memory");
+ }
+
+ int Offset = VA.getLocMemOffset();
+ unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
+ // Create the frame index object for the memory location.
+ int FI = MFI->CreateFixedObject(ObjSize, Offset, false);
+
+ // Create a SelectionDAG node corresponding to a store
+ // to this memory location.
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, OutVals[i], FIN,
+ MachinePointerInfo::getFixedStack(FI), false, false,
+ 0));
+ }
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- OutVals[i], Flag);
+ // Transform all store nodes into one single node because
+ // all stores are independent of each other.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
+
+ // Now handle return values copied to registers.
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
+ CCValAssign &VA = RVLocs[i];
+ if (!VA.isRegLoc())
+ continue;
+ // Copy the result values into the output registers.
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
// guarantee that all emitted copies are
// stuck together, avoiding something bad
@@ -1311,8 +1531,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
- &RetOps[0], RetOps.size());
+ return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
}
//===----------------------------------------------------------------------===//
@@ -1350,8 +1569,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ std::next(MachineBasicBlock::iterator(MI)), BB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
@@ -1392,6 +1610,46 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
SDLoc dl(N);
switch (N->getOpcode()) {
default: break;
+ case ISD::INTRINSIC_VOID:
+ switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ case Intrinsic::xcore_outt:
+ case Intrinsic::xcore_outct:
+ case Intrinsic::xcore_chkct: {
+ SDValue OutVal = N->getOperand(3);
+ // These instructions ignore the high bits.
+ if (OutVal.hasOneUse()) {
+ unsigned BitWidth = OutVal.getValueSizeInBits();
+ APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLO.ShrinkDemandedConstant(OutVal, DemandedMask) ||
+ TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne,
+ TLO))
+ DCI.CommitTargetLoweringOpt(TLO);
+ }
+ break;
+ }
+ case Intrinsic::xcore_setpt: {
+ SDValue Time = N->getOperand(3);
+ // This instruction ignores the high bits.
+ if (Time.hasOneUse()) {
+ unsigned BitWidth = Time.getValueSizeInBits();
+ APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLO.ShrinkDemandedConstant(Time, DemandedMask) ||
+ TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne,
+ TLO))
+ DCI.CommitTargetLoweringOpt(TLO);
+ }
+ break;
+ }
+ }
+ break;
case XCoreISD::LADD: {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -1410,7 +1668,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
DAG.getConstant(1, VT));
SDValue Ops[] = { Result, Carry };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
// fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
@@ -1419,12 +1677,12 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
SDValue Carry = DAG.getConstant(0, VT);
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
SDValue Ops[] = { Result, Carry };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
}
}
@@ -1442,13 +1700,13 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
SDValue Borrow = N2;
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
DAG.getConstant(0, VT), N2);
SDValue Ops[] = { Result, Borrow };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
}
@@ -1458,12 +1716,12 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
SDValue Borrow = DAG.getConstant(0, VT);
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
SDValue Ops[] = { Result, Borrow };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
}
}
@@ -1489,14 +1747,14 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
if (N->hasNUsesOfValue(0, 0)) {
SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
SDValue Ops[] = { Lo, Lo };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
// Otherwise fold to ladd(a, b, 0)
SDValue Result =
DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
SDValue Carry(Result.getNode(), 1);
SDValue Ops[] = { Carry, Result };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, dl);
}
}
break;
@@ -1580,11 +1838,11 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
-void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
@@ -1596,6 +1854,34 @@ void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
KnownZero.getBitWidth() - 1);
}
break;
+ case ISD::INTRINSIC_W_CHAIN:
+ {
+ unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ case Intrinsic::xcore_getts:
+ // High bits are known to be zero.
+ KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
+ KnownZero.getBitWidth() - 16);
+ break;
+ case Intrinsic::xcore_int:
+ case Intrinsic::xcore_inct:
+ // High bits are known to be zero.
+ KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
+ KnownZero.getBitWidth() - 8);
+ break;
+ case Intrinsic::xcore_testct:
+ // Result is either 0 or 1.
+ KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
+ KnownZero.getBitWidth() - 1);
+ break;
+ case Intrinsic::xcore_testwct:
+ // Result is in the range 0 - 4.
+ KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
+ KnownZero.getBitWidth() - 3);
+ break;
+ }
+ }
+ break;
}
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
index bc08497..62b89c3 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -42,6 +42,9 @@ namespace llvm {
// cp relative address
CPRelativeWrapper,
+ // Load word from stack
+ LDWSP,
+
// Store word to stack
STWSP,
@@ -72,6 +75,13 @@ namespace llvm {
// Jumptable branch using long branches for each entry.
BR_JT32,
+ // Offset from frame pointer to the first (possible) on-stack argument
+ FRAME_TO_ARGS_OFFSET,
+
+ // Exception handler return. The stack is restored to the first
+ // followed by a jump to the second argument.
+ EH_RETURN,
+
// Memory barrier.
MEMBARRIER
};
@@ -84,37 +94,36 @@ namespace llvm {
{
public:
- explicit XCoreTargetLowering(XCoreTargetMachine &TM);
+ explicit XCoreTargetLowering(const TargetMachine &TM);
using TargetLowering::isZExtFree;
- virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
- virtual unsigned getJumpTableEncoding() const;
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+ unsigned getJumpTableEncoding() const override;
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i32; }
/// LowerOperation - Provide custom lowering hooks for some operations.
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const override;
/// getTargetNodeName - This method returns the name of a target specific
// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
- virtual MachineBasicBlock *
+ MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *MBB) const override;
- virtual bool isLegalAddressingMode(const AddrMode &AM,
- Type *Ty) const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
private:
- const XCoreTargetMachine &TM;
+ const TargetMachine &TM;
const XCoreSubtarget &Subtarget;
// Lower Operand helpers
@@ -132,11 +141,6 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
SDValue getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SelectionDAG &DAG) const;
@@ -147,63 +151,67 @@ namespace llvm {
// Lower Operand specifics
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
// Inline asm support
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
// Expand specifics
SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const;
SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG) const;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual SDValue
+ SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual SDValue
+ SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const override;
- virtual bool
+ bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
- LLVMContext &Context) const;
+ LLVMContext &Context) const override;
};
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index 33c7f31..36ea9a0 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -15,13 +15,19 @@
#include "XCore.h"
#include "XCoreMachineFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#include "XCoreGenInstrInfo.inc"
@@ -37,9 +43,6 @@ namespace XCore {
}
}
-using namespace llvm;
-
-
// Pin the vtable to this file.
void XCoreInstrInfo::anchor() {}
@@ -285,7 +288,7 @@ XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
assert((Cond.size() == 2 || Cond.size() == 0) &&
"Unexpected number of components!");
- if (FBB == 0) { // One way branch.
+ if (!FBB) { // One way branch.
if (Cond.empty()) {
// Unconditional branch
BuildMI(&MBB, DL, get(XCore::BRFU_lu6)).addMBB(TBB);
@@ -370,11 +373,20 @@ void XCoreInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const
{
DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
+ if (I != MBB.end() && !I->isDebugValue())
+ DL = I->getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO =
+ MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIndex),
+ MachineMemOperand::MOStore,
+ MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
BuildMI(MBB, I, DL, get(XCore::STWFI))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FrameIndex)
- .addImm(0);
+ .addImm(0)
+ .addMemOperand(MMO);
}
void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
@@ -384,10 +396,19 @@ void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const
{
DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
+ if (I != MBB.end() && !I->isDebugValue())
+ DL = I->getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = *MF->getFrameInfo();
+ MachineMemOperand *MMO =
+ MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIndex),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
BuildMI(MBB, I, DL, get(XCore::LDWFI), DestReg)
.addFrameIndex(FrameIndex)
- .addImm(0);
+ .addImm(0)
+ .addMemOperand(MMO);
}
/// ReverseBranchCondition - Return the inverse opcode of the
@@ -399,3 +420,42 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
Cond[0].setImm(GetOppositeBranchCondition((XCore::CondCode)Cond[0].getImm()));
return false;
}
+
+static inline bool isImmU6(unsigned val) {
+ return val < (1 << 6);
+}
+
+static inline bool isImmU16(unsigned val) {
+ return val < (1 << 16);
+}
+
+static bool isImmMskBitp(unsigned val) {
+ if (!isMask_32(val)) {
+ return false;
+ }
+ int N = Log2_32(val) + 1;
+ return (N >= 1 && N <= 8) || N == 16 || N == 24 || N == 32;
+}
+
+MachineBasicBlock::iterator XCoreInstrInfo::loadImmediate(
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned Reg, uint64_t Value) const {
+ DebugLoc dl;
+ if (MI != MBB.end() && !MI->isDebugValue())
+ dl = MI->getDebugLoc();
+ if (isImmMskBitp(Value)) {
+ int N = Log2_32(Value) + 1;
+ return BuildMI(MBB, MI, dl, get(XCore::MKMSK_rus), Reg).addImm(N);
+ }
+ if (isImmU16(Value)) {
+ int Opcode = isImmU6(Value) ? XCore::LDC_ru6 : XCore::LDC_lru6;
+ return BuildMI(MBB, MI, dl, get(Opcode), Reg).addImm(Value);
+ }
+ MachineConstantPool *ConstantPool = MBB.getParent()->getConstantPool();
+ const Constant *C = ConstantInt::get(
+ Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Value);
+ unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
+ return BuildMI(MBB, MI, dl, get(XCore::LDWCP_lru6), Reg)
+ .addConstantPoolIndex(Idx);
+}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
index 4429b07..e0be96b 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
@@ -32,55 +32,61 @@ public:
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ const TargetRegisterInfo &getRegisterInfo() const { return RI; }
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
-
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual bool ReverseBranchCondition(
- SmallVectorImpl<MachineOperand> &Cond) const;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ bool ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ // Emit code before MBBI to load immediate value into physical register Reg.
+ // Returns an iterator to the new instruction.
+ MachineBasicBlock::iterator loadImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned Reg, uint64_t Value) const;
};
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
index 934a707..00cb705 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -35,6 +35,11 @@ def XCoreBranchLink : SDNode<"XCoreISD::BL",SDT_XCoreBranchLink,
def XCoreRetsp : SDNode<"XCoreISD::RETSP", SDTBrind,
[SDNPHasChain, SDNPOptInGlue, SDNPMayLoad, SDNPVariadic]>;
+def SDT_XCoreEhRet : SDTypeProfile<0, 2,
+ [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def XCoreEhRet : SDNode<"XCoreISD::EH_RETURN", SDT_XCoreEhRet,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
def SDT_XCoreBR_JT : SDTypeProfile<0, 2,
[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
@@ -56,10 +61,17 @@ def dprelwrapper : SDNode<"XCoreISD::DPRelativeWrapper", SDT_XCoreAddress,
def cprelwrapper : SDNode<"XCoreISD::CPRelativeWrapper", SDT_XCoreAddress,
[]>;
+def frametoargsoffset : SDNode<"XCoreISD::FRAME_TO_ARGS_OFFSET", SDTIntLeaf,
+ []>;
+
def SDT_XCoreStwsp : SDTypeProfile<0, 2, [SDTCisInt<1>]>;
def XCoreStwsp : SDNode<"XCoreISD::STWSP", SDT_XCoreStwsp,
[SDNPHasChain, SDNPMayStore]>;
+def SDT_XCoreLdwsp : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
+def XCoreLdwsp : SDNode<"XCoreISD::LDWSP", SDT_XCoreLdwsp,
+ [SDNPHasChain, SDNPMayLoad]>;
+
// These are target-independent nodes, but have target-specific formats.
def SDT_XCoreCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
def SDT_XCoreCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
@@ -326,6 +338,16 @@ def ADJCALLSTACKUP : PseudoInstXCore<(outs), (ins i32imm:$amt1, i32imm:$amt2),
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
+let isReMaterializable = 1 in
+def FRAME_TO_ARGS_OFFSET : PseudoInstXCore<(outs GRRegs:$dst), (ins),
+ "# FRAME_TO_ARGS_OFFSET $dst",
+ [(set GRRegs:$dst, (frametoargsoffset))]>;
+
+let isReturn = 1, isTerminator = 1, isBarrier = 1 in
+def EH_RETURN : PseudoInstXCore<(outs), (ins GRRegs:$s, GRRegs:$handler),
+ "# EH_RETURN $s, $handler",
+ [(XCoreEhRet GRRegs:$s, GRRegs:$handler)]>;
+
def LDWFI : PseudoInstXCore<(outs GRRegs:$dst), (ins MEMii:$addr),
"# LDWFI $dst, $addr",
[(set GRRegs:$dst, (load ADDRspii:$addr))]>;
@@ -563,10 +585,12 @@ def STWSP_lru6 : _FLRU6<0b010101, (outs), (ins RRegs:$a, i32imm:$b),
let mayLoad=1 in {
def LDWSP_ru6 : _FRU6<0b010111, (outs RRegs:$a), (ins i32imm:$b),
- "ldw $a, sp[$b]", []>;
+ "ldw $a, sp[$b]",
+ [(set RRegs:$a, (XCoreLdwsp immU6:$b))]>;
def LDWSP_lru6 : _FLRU6<0b010111, (outs RRegs:$a), (ins i32imm:$b),
- "ldw $a, sp[$b]", []>;
+ "ldw $a, sp[$b]",
+ [(set RRegs:$a, (XCoreLdwsp immU16:$b))]>;
}
let neverHasSideEffects = 1 in {
@@ -694,10 +718,10 @@ def BLACP_u10 : _FU10<0b111000, (outs), (ins i32imm:$a), "bla cp[$a]", []>;
def BLACP_lu10 : _FLU10<0b111000, (outs), (ins i32imm:$a), "bla cp[$a]", []>;
def BLRF_u10 : _FU10<0b110100, (outs), (ins pcrel_imm:$a), "bl $a",
- [(XCoreBranchLink immU10:$a)]>;
+ []>;
def BLRF_lu10 : _FLU10<0b110100, (outs), (ins pcrel_imm:$a), "bl $a",
- [(XCoreBranchLink immU20:$a)]>;
+ [(XCoreBranchLink tglobaladdr:$a)]>;
def BLRB_u10 : _FU10<0b110101, (outs), (ins pcrel_imm_neg:$a), "bl $a", []>;
@@ -995,7 +1019,8 @@ def SETEV_1r : _F1R<0b001111, (outs), (ins GRRegs:$a),
def DGETREG_1r : _F1R<0b001110, (outs GRRegs:$a), (ins), "dgetreg $a", []>;
-def EDU_1r : _F1R<0b000000, (outs), (ins GRRegs:$a), "edu res[$a]", []>;
+def EDU_1r : _F1R<0b000000, (outs), (ins GRRegs:$a), "edu res[$a]",
+ [(int_xcore_edu GRRegs:$a)]>;
def EEU_1r : _F1R<0b000001, (outs), (ins GRRegs:$a),
"eeu res[$a]",
@@ -1009,7 +1034,8 @@ def WAITET_1R : _F1R<0b000010, (outs), (ins GRRegs:$a), "waitet $a", []>;
def TSTART_1R : _F1R<0b000110, (outs), (ins GRRegs:$a), "start t[$a]", []>;
-def CLRPT_1R : _F1R<0b100000, (outs), (ins GRRegs:$a), "clrpt res[$a]", []>;
+def CLRPT_1R : _F1R<0b100000, (outs), (ins GRRegs:$a), "clrpt res[$a]",
+ [(int_xcore_clrpt GRRegs:$a)]>;
// Zero operand short
@@ -1087,7 +1113,6 @@ def WAITEU_0R : _F0R<0b0000001100, (outs), (ins),
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
-def : Pat<(XCoreBranchLink tglobaladdr:$addr), (BLRF_lu10 tglobaladdr:$addr)>;
def : Pat<(XCoreBranchLink texternalsym:$addr), (BLRF_lu10 texternalsym:$addr)>;
/// sext_inreg
@@ -1286,3 +1311,9 @@ def : Pat<(setgt GRRegs:$lhs, -1),
def : Pat<(sra (shl GRRegs:$src, immBpwSubBitp:$imm), immBpwSubBitp:$imm),
(SEXT_rus GRRegs:$src, (bpwsub_xform immBpwSubBitp:$imm))>;
+
+def : Pat<(load (cprelwrapper tconstpool:$b)),
+ (LDWCP_lru6 tconstpool:$b)>;
+
+def : Pat<(cprelwrapper tconstpool:$b),
+ (LDAWCP_lu6 tconstpool:$b)>;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/contrib/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index afce753..ac3bae5 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -17,13 +17,13 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/NoFolder.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#define DEBUG_TYPE "xcore-lower-thread-local"
@@ -48,7 +48,7 @@ namespace {
bool lowerGlobal(GlobalVariable *GV);
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
}
@@ -127,10 +127,7 @@ createReplacementInstr(ConstantExpr *CE, Instruction *Instr) {
static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
do {
- SmallVector<WeakVH,8> WUsers;
- for (Value::use_iterator I = CE->use_begin(), E = CE->use_end();
- I != E; ++I)
- WUsers.push_back(WeakVH(*I));
+ SmallVector<WeakVH,8> WUsers(CE->user_begin(), CE->user_end());
std::sort(WUsers.begin(), WUsers.end());
WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end());
while (!WUsers.empty())
@@ -154,17 +151,17 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
return false;
}
}
- } while (CE->hasNUsesOrMore(1)); // We need to check becasue a recursive
- // sibbling may have used 'CE' when createReplacementInstr was called.
+ } while (CE->hasNUsesOrMore(1)); // We need to check because a recursive
+ // sibling may have used 'CE' when createReplacementInstr was called.
CE->destroyConstant();
return true;
}
static bool rewriteNonInstructionUses(GlobalVariable *GV, Pass *P) {
SmallVector<WeakVH,8> WUsers;
- for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I)
- if (!isa<Instruction>(*I))
- WUsers.push_back(WeakVH(*I));
+ for (User *U : GV->users())
+ if (!isa<Instruction>(U))
+ WUsers.push_back(WeakVH(U));
while (!WUsers.empty())
if (WeakVH WU = WUsers.pop_back_val()) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(WU);
@@ -192,18 +189,19 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
// Create replacement global.
ArrayType *NewType = createLoweredType(GV->getType()->getElementType());
- Constant *NewInitializer = 0;
+ Constant *NewInitializer = nullptr;
if (GV->hasInitializer())
NewInitializer = createLoweredInitializer(NewType,
GV->getInitializer());
GlobalVariable *NewGV =
new GlobalVariable(*M, NewType, GV->isConstant(), GV->getLinkage(),
- NewInitializer, "", 0, GlobalVariable::NotThreadLocal,
+ NewInitializer, "", nullptr,
+ GlobalVariable::NotThreadLocal,
GV->getType()->getAddressSpace(),
GV->isExternallyInitialized());
// Update uses.
- SmallVector<User *, 16> Users(GV->use_begin(), GV->use_end());
+ SmallVector<User *, 16> Users(GV->user_begin(), GV->user_end());
for (unsigned I = 0, E = Users.size(); I != E; ++I) {
User *U = Users[I];
Instruction *Inst = cast<Instruction>(U);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMCInstLower.cpp b/contrib/llvm/lib/Target/XCore/XCoreMCInstLower.cpp
index def2673..dfdadcf 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreMCInstLower.cpp
@@ -17,10 +17,10 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
index 7ca0672..9ef9752 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
@@ -8,7 +8,65 @@
//===----------------------------------------------------------------------===//
#include "XCoreMachineFunctionInfo.h"
+#include "XCoreInstrInfo.h"
+#include "llvm/IR/Function.h"
using namespace llvm;
void XCoreFunctionInfo::anchor() { }
+
+bool XCoreFunctionInfo::isLargeFrame(const MachineFunction &MF) const {
+ if (CachedEStackSize == -1) {
+ CachedEStackSize = MF.getFrameInfo()->estimateStackSize(MF);
+ }
+ // isLargeFrame() is used when deciding if spill slots should be added to
+ // allow eliminateFrameIndex() to scavenge registers.
+ // This is only required when there is no FP and offsets are greater than
+ // ~256KB (~64Kwords). Thus only for code run on the emulator!
+ //
+ // The arbitrary value of 0xf000 allows frames of up to ~240KB before spill
+ // slots are added for the use of eliminateFrameIndex() register scavenging.
+ // For frames less than 240KB, it is assumed that there will be less than
+ // 16KB of function arguments.
+ return CachedEStackSize > 0xf000;
+}
+
+int XCoreFunctionInfo::createLRSpillSlot(MachineFunction &MF) {
+ if (LRSpillSlotSet) {
+ return LRSpillSlot;
+ }
+ const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ if (! MF.getFunction()->isVarArg()) {
+ // A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
+ LRSpillSlot = MFI->CreateFixedObject(RC->getSize(), 0, true);
+ } else {
+ LRSpillSlot = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ }
+ LRSpillSlotSet = true;
+ return LRSpillSlot;
+}
+
+int XCoreFunctionInfo::createFPSpillSlot(MachineFunction &MF) {
+ if (FPSpillSlotSet) {
+ return FPSpillSlot;
+ }
+ const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ FPSpillSlot = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ FPSpillSlotSet = true;
+ return FPSpillSlot;
+}
+
+const int* XCoreFunctionInfo::createEHSpillSlot(MachineFunction &MF) {
+ if (EHSpillSlotSet) {
+ return EHSpillSlot;
+ }
+ const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ EHSpillSlot[0] = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ EHSpillSlot[1] = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ EHSpillSlotSet = true;
+ return EHSpillSlot;
+}
+
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
index 69d5de3..212a5cf 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
@@ -27,40 +27,77 @@ class Function;
/// XCore target-specific information for each MachineFunction.
class XCoreFunctionInfo : public MachineFunctionInfo {
virtual void anchor();
- bool UsesLR;
+ bool LRSpillSlotSet;
int LRSpillSlot;
+ bool FPSpillSlotSet;
int FPSpillSlot;
+ bool EHSpillSlotSet;
+ int EHSpillSlot[2];
+ unsigned ReturnStackOffset;
+ bool ReturnStackOffsetSet;
int VarArgsFrameIndex;
- std::vector<std::pair<MCSymbol*, CalleeSavedInfo> > SpillLabels;
+ mutable int CachedEStackSize;
+ std::vector<std::pair<MachineBasicBlock::iterator, CalleeSavedInfo>>
+ SpillLabels;
public:
XCoreFunctionInfo() :
- UsesLR(false),
- LRSpillSlot(0),
- FPSpillSlot(0),
- VarArgsFrameIndex(0) {}
+ LRSpillSlotSet(false),
+ FPSpillSlotSet(false),
+ EHSpillSlotSet(false),
+ ReturnStackOffsetSet(false),
+ VarArgsFrameIndex(0),
+ CachedEStackSize(-1) {}
explicit XCoreFunctionInfo(MachineFunction &MF) :
- UsesLR(false),
- LRSpillSlot(0),
- FPSpillSlot(0),
- VarArgsFrameIndex(0) {}
+ LRSpillSlotSet(false),
+ FPSpillSlotSet(false),
+ EHSpillSlotSet(false),
+ ReturnStackOffsetSet(false),
+ VarArgsFrameIndex(0),
+ CachedEStackSize(-1) {}
~XCoreFunctionInfo() {}
void setVarArgsFrameIndex(int off) { VarArgsFrameIndex = off; }
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
-
- void setUsesLR(bool val) { UsesLR = val; }
- bool getUsesLR() const { return UsesLR; }
-
- void setLRSpillSlot(int off) { LRSpillSlot = off; }
- int getLRSpillSlot() const { return LRSpillSlot; }
-
- void setFPSpillSlot(int off) { FPSpillSlot = off; }
- int getFPSpillSlot() const { return FPSpillSlot; }
-
- std::vector<std::pair<MCSymbol*, CalleeSavedInfo> > &getSpillLabels() {
+
+ int createLRSpillSlot(MachineFunction &MF);
+ bool hasLRSpillSlot() { return LRSpillSlotSet; }
+ int getLRSpillSlot() const {
+ assert(LRSpillSlotSet && "LR Spill slot not set");
+ return LRSpillSlot;
+ }
+
+ int createFPSpillSlot(MachineFunction &MF);
+ bool hasFPSpillSlot() { return FPSpillSlotSet; }
+ int getFPSpillSlot() const {
+ assert(FPSpillSlotSet && "FP Spill slot not set");
+ return FPSpillSlot;
+ }
+
+ const int* createEHSpillSlot(MachineFunction &MF);
+ bool hasEHSpillSlot() { return EHSpillSlotSet; }
+ const int* getEHSpillSlot() const {
+ assert(EHSpillSlotSet && "EH Spill slot not set");
+ return EHSpillSlot;
+ }
+
+ void setReturnStackOffset(unsigned value) {
+ assert(!ReturnStackOffsetSet && "Return stack offset set twice");
+ ReturnStackOffset = value;
+ ReturnStackOffsetSet = true;
+ }
+
+ unsigned getReturnStackOffset() const {
+ assert(ReturnStackOffsetSet && "Return stack offset not set");
+ return ReturnStackOffset;
+ }
+
+ bool isLargeFrame(const MachineFunction &MF) const;
+
+ std::vector<std::pair<MachineBasicBlock::iterator, CalleeSavedInfo>> &
+ getSpillLabels() {
return SpillLabels;
}
};
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index dbd2f52..316c82c 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -13,6 +13,7 @@
#include "XCoreRegisterInfo.h"
#include "XCore.h"
+#include "XCoreInstrInfo.h"
#include "XCoreMachineFunctionInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
@@ -26,17 +27,19 @@
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "xcore-reg-info"
+
#define GET_REGINFO_TARGET_DESC
#include "XCoreGenRegisterInfo.inc"
-using namespace llvm;
-
XCoreRegisterInfo::XCoreRegisterInfo()
: XCoreGenRegisterInfo(XCore::LR) {
}
@@ -54,18 +57,173 @@ static inline bool isImmU16(unsigned val) {
return val < (1 << 16);
}
+
+static void InsertFPImmInst(MachineBasicBlock::iterator II,
+ const XCoreInstrInfo &TII,
+ unsigned Reg, unsigned FrameReg, int Offset ) {
+ MachineInstr &MI = *II;
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc dl = MI.getDebugLoc();
+
+ switch (MI.getOpcode()) {
+ case XCore::LDWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::LDW_2rus), Reg)
+ .addReg(FrameReg)
+ .addImm(Offset)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::STWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::STW_2rus))
+ .addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
+ .addReg(FrameReg)
+ .addImm(Offset)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::LDAWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l2rus), Reg)
+ .addReg(FrameReg)
+ .addImm(Offset);
+ break;
+ default:
+ llvm_unreachable("Unexpected Opcode");
+ }
+}
+
+static void InsertFPConstInst(MachineBasicBlock::iterator II,
+ const XCoreInstrInfo &TII,
+ unsigned Reg, unsigned FrameReg,
+ int Offset, RegScavenger *RS ) {
+ assert(RS && "requiresRegisterScavenging failed");
+ MachineInstr &MI = *II;
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc dl = MI.getDebugLoc();
+ unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ RS->setUsed(ScratchOffset);
+ TII.loadImmediate(MBB, II, ScratchOffset, Offset);
+
+ switch (MI.getOpcode()) {
+ case XCore::LDWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg)
+ .addReg(FrameReg)
+ .addReg(ScratchOffset, RegState::Kill)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::STWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::STW_l3r))
+ .addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
+ .addReg(FrameReg)
+ .addReg(ScratchOffset, RegState::Kill)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::LDAWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg)
+ .addReg(FrameReg)
+ .addReg(ScratchOffset, RegState::Kill);
+ break;
+ default:
+ llvm_unreachable("Unexpected Opcode");
+ }
+}
+
+static void InsertSPImmInst(MachineBasicBlock::iterator II,
+ const XCoreInstrInfo &TII,
+ unsigned Reg, int Offset) {
+ MachineInstr &MI = *II;
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc dl = MI.getDebugLoc();
+ bool isU6 = isImmU6(Offset);
+
+ switch (MI.getOpcode()) {
+ int NewOpcode;
+ case XCore::LDWFI:
+ NewOpcode = (isU6) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
+ BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg)
+ .addImm(Offset)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::STWFI:
+ NewOpcode = (isU6) ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
+ BuildMI(MBB, II, dl, TII.get(NewOpcode))
+ .addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
+ .addImm(Offset)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::LDAWFI:
+ NewOpcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
+ BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg)
+ .addImm(Offset);
+ break;
+ default:
+ llvm_unreachable("Unexpected Opcode");
+ }
+}
+
+static void InsertSPConstInst(MachineBasicBlock::iterator II,
+ const XCoreInstrInfo &TII,
+ unsigned Reg, int Offset, RegScavenger *RS ) {
+ assert(RS && "requiresRegisterScavenging failed");
+ MachineInstr &MI = *II;
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc dl = MI.getDebugLoc();
+ unsigned OpCode = MI.getOpcode();
+
+ unsigned ScratchBase;
+ if (OpCode==XCore::STWFI) {
+ ScratchBase = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ RS->setUsed(ScratchBase);
+ } else
+ ScratchBase = Reg;
+ BuildMI(MBB, II, dl, TII.get(XCore::LDAWSP_ru6), ScratchBase).addImm(0);
+ unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ RS->setUsed(ScratchOffset);
+ TII.loadImmediate(MBB, II, ScratchOffset, Offset);
+
+ switch (OpCode) {
+ case XCore::LDWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg)
+ .addReg(ScratchBase, RegState::Kill)
+ .addReg(ScratchOffset, RegState::Kill)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::STWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::STW_l3r))
+ .addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
+ .addReg(ScratchBase, RegState::Kill)
+ .addReg(ScratchOffset, RegState::Kill)
+ .addMemOperand(*MI.memoperands_begin());
+ break;
+ case XCore::LDAWFI:
+ BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg)
+ .addReg(ScratchBase, RegState::Kill)
+ .addReg(ScratchOffset, RegState::Kill);
+ break;
+ default:
+ llvm_unreachable("Unexpected Opcode");
+ }
+}
+
bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
return MF.getMMI().hasDebugInfo() ||
MF.getFunction()->needsUnwindTableEntry();
}
-const uint16_t* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
+const MCPhysReg* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
- static const uint16_t CalleeSavedRegs[] = {
+ // The callee saved registers LR & FP are explicitly handled during
+ // emitPrologue & emitEpilogue and related functions.
+ static const MCPhysReg CalleeSavedRegs[] = {
+ XCore::R4, XCore::R5, XCore::R6, XCore::R7,
+ XCore::R8, XCore::R9, XCore::R10,
+ 0
+ };
+ static const MCPhysReg CalleeSavedRegsFP[] = {
XCore::R4, XCore::R5, XCore::R6, XCore::R7,
- XCore::R8, XCore::R9, XCore::R10, XCore::LR,
+ XCore::R8, XCore::R9,
0
};
+ const TargetFrameLowering *TFI = MF->getTarget().getFrameLowering();
+ if (TFI->hasFP(*MF))
+ return CalleeSavedRegsFP;
return CalleeSavedRegs;
}
@@ -85,15 +243,12 @@ BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
bool
XCoreRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- // TODO can we estimate stack size?
- return TFI->hasFP(MF);
+ return true;
}
bool
XCoreRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
- return requiresRegisterScavenging(MF);
+ return true;
}
bool
@@ -107,12 +262,13 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
RegScavenger *RS) const {
assert(SPAdj == 0 && "Unexpected");
MachineInstr &MI = *II;
- DebugLoc dl = MI.getDebugLoc();
MachineOperand &FrameOp = MI.getOperand(FIOperandNum);
int FrameIndex = FrameOp.getIndex();
MachineFunction &MF = *MI.getParent()->getParent();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const XCoreInstrInfo &TII =
+ *static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
+
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
int StackSize = MF.getFrameInfo()->getStackSize();
@@ -143,116 +299,28 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
assert(Offset%4 == 0 && "Misaligned stack offset");
-
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
-
Offset/=4;
- bool FP = TFI->hasFP(MF);
-
unsigned Reg = MI.getOperand(0).getReg();
- bool isKill = MI.getOpcode() == XCore::STWFI && MI.getOperand(0).isKill();
-
assert(XCore::GRRegsRegClass.contains(Reg) && "Unexpected register operand");
-
- MachineBasicBlock &MBB = *MI.getParent();
-
- if (FP) {
- bool isUs = isImmUs(Offset);
-
- if (!isUs) {
- if (!RS)
- report_fatal_error("eliminateFrameIndex Frame size too big: " +
- Twine(Offset));
- unsigned ScratchReg = RS->scavengeRegister(&XCore::GRRegsRegClass, II,
- SPAdj);
- loadConstant(MBB, II, ScratchReg, Offset, dl);
- switch (MI.getOpcode()) {
- case XCore::LDWFI:
- BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg)
- .addReg(FrameReg)
- .addReg(ScratchReg, RegState::Kill);
- break;
- case XCore::STWFI:
- BuildMI(MBB, II, dl, TII.get(XCore::STW_l3r))
- .addReg(Reg, getKillRegState(isKill))
- .addReg(FrameReg)
- .addReg(ScratchReg, RegState::Kill);
- break;
- case XCore::LDAWFI:
- BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg)
- .addReg(FrameReg)
- .addReg(ScratchReg, RegState::Kill);
- break;
- default:
- llvm_unreachable("Unexpected Opcode");
- }
- } else {
- switch (MI.getOpcode()) {
- case XCore::LDWFI:
- BuildMI(MBB, II, dl, TII.get(XCore::LDW_2rus), Reg)
- .addReg(FrameReg)
- .addImm(Offset);
- break;
- case XCore::STWFI:
- BuildMI(MBB, II, dl, TII.get(XCore::STW_2rus))
- .addReg(Reg, getKillRegState(isKill))
- .addReg(FrameReg)
- .addImm(Offset);
- break;
- case XCore::LDAWFI:
- BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l2rus), Reg)
- .addReg(FrameReg)
- .addImm(Offset);
- break;
- default:
- llvm_unreachable("Unexpected Opcode");
- }
- }
+
+ if (TFI->hasFP(MF)) {
+ if (isImmUs(Offset))
+ InsertFPImmInst(II, TII, Reg, FrameReg, Offset);
+ else
+ InsertFPConstInst(II, TII, Reg, FrameReg, Offset, RS);
} else {
- bool isU6 = isImmU6(Offset);
- if (!isU6 && !isImmU16(Offset))
- report_fatal_error("eliminateFrameIndex Frame size too big: " +
- Twine(Offset));
-
- switch (MI.getOpcode()) {
- int NewOpcode;
- case XCore::LDWFI:
- NewOpcode = (isU6) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
- BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg)
- .addImm(Offset);
- break;
- case XCore::STWFI:
- NewOpcode = (isU6) ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
- BuildMI(MBB, II, dl, TII.get(NewOpcode))
- .addReg(Reg, getKillRegState(isKill))
- .addImm(Offset);
- break;
- case XCore::LDAWFI:
- NewOpcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
- BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg)
- .addImm(Offset);
- break;
- default:
- llvm_unreachable("Unexpected Opcode");
- }
+ if (isImmU16(Offset))
+ InsertSPImmInst(II, TII, Reg, Offset);
+ else
+ InsertSPConstInst(II, TII, Reg, Offset, RS);
}
// Erase old instruction.
+ MachineBasicBlock &MBB = *MI.getParent();
MBB.erase(II);
}
-void XCoreRegisterInfo::
-loadConstant(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DstReg, int64_t Value, DebugLoc dl) const {
- // TODO use mkmsk if possible.
- if (!isImmU16(Value)) {
- // TODO use constant pool.
- report_fatal_error("loadConstant value too big " + Twine(Value));
- }
- int Opcode = isImmU6(Value) ? XCore::LDC_ru6 : XCore::LDC_lru6;
- const TargetInstrInfo &TII = *MBB.getParent()->getTarget().getInstrInfo();
- BuildMI(MBB, I, dl, TII.get(Opcode), DstReg).addImm(Value);
-}
unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
index 2370c62..aa617a0 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
@@ -24,40 +24,28 @@ namespace llvm {
class TargetInstrInfo;
struct XCoreRegisterInfo : public XCoreGenRegisterInfo {
-private:
- void loadConstant(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, int64_t Value, DebugLoc dl) const;
-
- void storeToStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned SrcReg, int Offset, DebugLoc dl) const;
-
- void loadFromStack(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, int Offset, DebugLoc dl) const;
-
public:
XCoreRegisterInfo();
/// Code Generation virtual methods...
- const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const MCPhysReg *
+ getCalleeSavedRegs(const MachineFunction *MF =nullptr) const override;
- BitVector getReservedRegs(const MachineFunction &MF) const;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
- bool requiresRegisterScavenging(const MachineFunction &MF) const;
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
- bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
- bool useFPForScavengingIndex(const MachineFunction &MF) const;
+ bool useFPForScavengingIndex(const MachineFunction &MF) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
//! Return whether to emit frame moves
static bool needsFrameMoves(const MachineFunction &MF);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp
index 44aeb60..91b33fd 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp
@@ -11,13 +11,48 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "xcore-selectiondag-info"
#include "XCoreTargetMachine.h"
using namespace llvm;
-XCoreSelectionDAGInfo::XCoreSelectionDAGInfo(const XCoreTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+#define DEBUG_TYPE "xcore-selectiondag-info"
+
+XCoreSelectionDAGInfo::XCoreSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
XCoreSelectionDAGInfo::~XCoreSelectionDAGInfo() {
}
+
+SDValue XCoreSelectionDAGInfo::
+EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
+ SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
+ bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const
+{
+ unsigned SizeBitWidth = Size.getValueType().getSizeInBits();
+ // Call __memcpy_4 if the src, dst and size are all 4 byte aligned.
+ if (!AlwaysInline && (Align & 3) == 0 &&
+ DAG.MaskedValueIsZero(Size, APInt(SizeBitWidth, 3))) {
+ const TargetLowering &TLI = *DAG.getTarget().getTargetLowering();
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Ty = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
+ Entry.Node = Dst; Args.push_back(Entry);
+ Entry.Node = Src; Args.push_back(Entry);
+ Entry.Node = Size; Args.push_back(Entry);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl).setChain(Chain)
+ .setCallee(TLI.getLibcallCallingConv(RTLIB::MEMCPY),
+ Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("__memcpy_4", TLI.getPointerTy()),
+ std::move(Args), 0)
+ .setDiscardResult();
+
+ std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+ return CallResult.second;
+ }
+
+ // Otherwise have the target-independent code call memcpy.
+ return SDValue();
+}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.h b/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.h
index 0386968..0079de1 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.h
@@ -22,8 +22,17 @@ class XCoreTargetMachine;
class XCoreSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit XCoreSelectionDAGInfo(const XCoreTargetMachine &TM);
+ explicit XCoreSelectionDAGInfo(const DataLayout &DL);
~XCoreSelectionDAGInfo();
+
+ SDValue
+ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const override;
};
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp
index 8cfb770..7227411 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp
@@ -15,16 +15,18 @@
#include "XCore.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "xcore-subtarget"
+
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "XCoreGenSubtargetInfo.inc"
-using namespace llvm;
-
void XCoreSubtarget::anchor() { }
-XCoreSubtarget::XCoreSubtarget(const std::string &TT,
- const std::string &CPU, const std::string &FS)
- : XCoreGenSubtargetInfo(TT, CPU, FS)
-{
-}
+XCoreSubtarget::XCoreSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM)
+ : XCoreGenSubtargetInfo(TT, CPU, FS),
+ DL("e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32"),
+ InstrInfo(), FrameLowering(*this), TLInfo(TM), TSInfo(DL) {}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h
index 5ac4dbc..1e9810b 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h
@@ -14,6 +14,11 @@
#ifndef XCORESUBTARGET_H
#define XCORESUBTARGET_H
+#include "XCoreFrameLowering.h"
+#include "XCoreISelLowering.h"
+#include "XCoreInstrInfo.h"
+#include "XCoreSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -26,17 +31,31 @@ class StringRef;
class XCoreSubtarget : public XCoreGenSubtargetInfo {
virtual void anchor();
+ const DataLayout DL; // Calculates type size & alignment
+ XCoreInstrInfo InstrInfo;
+ XCoreFrameLowering FrameLowering;
+ XCoreTargetLowering TLInfo;
+ XCoreSelectionDAGInfo TSInfo;
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
XCoreSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
+ const std::string &FS, const TargetMachine &TM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ const XCoreInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const XCoreFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const XCoreTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const XCoreSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
+ const TargetRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const DataLayout *getDataLayout() const { return &DL; }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index 9ae0b86..8d8bb38 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -25,14 +25,8 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
- DL("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
- "i16:16:32-i32:32:32-i64:32:32-n32"),
- InstrInfo(),
- FrameLowering(Subtarget),
- TLInfo(*this),
- TSInfo(*this) {
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -47,8 +41,9 @@ public:
return getTM<XCoreTargetMachine>();
}
- virtual bool addPreISel();
- virtual bool addInstSelector();
+ bool addPreISel() override;
+ bool addInstSelector() override;
+ bool addPreEmitPass() override;
};
} // namespace
@@ -66,6 +61,11 @@ bool XCorePassConfig::addInstSelector() {
return false;
}
+bool XCorePassConfig::addPreEmitPass() {
+ addPass(createXCoreFrameToArgsOffsetEliminationPass());
+ return false;
+}
+
// Force static initialization.
extern "C" void LLVMInitializeXCoreTarget() {
RegisterTargetMachine<XCoreTargetMachine> X(TheXCoreTarget);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
index a19a677..14c43bf 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
@@ -14,51 +14,43 @@
#ifndef XCORETARGETMACHINE_H
#define XCORETARGETMACHINE_H
-#include "XCoreFrameLowering.h"
-#include "XCoreISelLowering.h"
-#include "XCoreInstrInfo.h"
-#include "XCoreSelectionDAGInfo.h"
#include "XCoreSubtarget.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
class XCoreTargetMachine : public LLVMTargetMachine {
XCoreSubtarget Subtarget;
- const DataLayout DL; // Calculates type size & alignment
- XCoreInstrInfo InstrInfo;
- XCoreFrameLowering FrameLowering;
- XCoreTargetLowering TLInfo;
- XCoreSelectionDAGInfo TSInfo;
public:
XCoreTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
- virtual const XCoreInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const XCoreFrameLowering *getFrameLowering() const {
- return &FrameLowering;
+ const XCoreInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const XCoreSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const XCoreTargetLowering *getTargetLowering() const {
- return &TLInfo;
+ const XCoreFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
-
- virtual const XCoreSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
+ const XCoreSubtarget *getSubtargetImpl() const override { return &Subtarget; }
+ const XCoreTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
-
- virtual const TargetRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
+ const XCoreSelectionDAGInfo* getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
+ }
+ const TargetRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
+ }
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
- virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
- virtual void addAnalysisPasses(PassManagerBase &PM);
+ void addAnalysisPasses(PassManagerBase &PM) override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp
index 88e3bfd..cfd3302 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp
@@ -9,27 +9,58 @@
#include "XCoreTargetObjectFile.h"
#include "XCoreSubtarget.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/ELF.h"
#include "llvm/Target/TargetMachine.h"
+
using namespace llvm;
void XCoreTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ BSSSection =
+ Ctx.getELFSection(".dp.bss", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION,
+ SectionKind::getBSS());
+ BSSSectionLarge =
+ Ctx.getELFSection(".dp.bss.large", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION,
+ SectionKind::getBSS());
DataSection =
- Ctx.getELFSection(".dp.data", ELF::SHT_PROGBITS,
+ Ctx.getELFSection(".dp.data", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_WRITE |
ELF::XCORE_SHF_DP_SECTION,
SectionKind::getDataRel());
- BSSSection =
- Ctx.getELFSection(".dp.bss", ELF::SHT_NOBITS,
+ DataSectionLarge =
+ Ctx.getELFSection(".dp.data.large", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_WRITE |
ELF::XCORE_SHF_DP_SECTION,
- SectionKind::getBSS());
-
+ SectionKind::getDataRel());
+ DataRelROSection =
+ Ctx.getELFSection(".dp.rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION,
+ SectionKind::getReadOnlyWithRel());
+ DataRelROSectionLarge =
+ Ctx.getELFSection(".dp.rodata.large", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION,
+ SectionKind::getReadOnlyWithRel());
+ ReadOnlySection =
+ Ctx.getELFSection(".cp.rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::XCORE_SHF_CP_SECTION,
+ SectionKind::getReadOnlyWithRel());
+ ReadOnlySectionLarge =
+ Ctx.getELFSection(".cp.rodata.large", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::XCORE_SHF_CP_SECTION,
+ SectionKind::getReadOnlyWithRel());
MergeableConst4Section =
Ctx.getELFSection(".cp.rodata.cst4", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_MERGE |
@@ -45,16 +76,104 @@ void XCoreTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
ELF::SHF_ALLOC | ELF::SHF_MERGE |
ELF::XCORE_SHF_CP_SECTION,
SectionKind::getMergeableConst16());
-
- // TLS globals are lowered in the backend to arrays indexed by the current
- // thread id. After lowering they require no special handling by the linker
- // and can be placed in the standard data / bss sections.
- TLSDataSection = DataSection;
- TLSBSSSection = BSSSection;
-
- ReadOnlySection =
- Ctx.getELFSection(".cp.rodata", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC |
+ CStringSection =
+ Ctx.getELFSection(".cp.rodata.string", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS |
ELF::XCORE_SHF_CP_SECTION,
SectionKind::getReadOnlyWithRel());
+ // TextSection - see MObjectFileInfo.cpp
+ // StaticCtorSection - see MObjectFileInfo.cpp
+ // StaticDtorSection - see MObjectFileInfo.cpp
+ }
+
+static unsigned getXCoreSectionType(SectionKind K) {
+ if (K.isBSS())
+ return ELF::SHT_NOBITS;
+ return ELF::SHT_PROGBITS;
+}
+
+static unsigned getXCoreSectionFlags(SectionKind K, bool IsCPRel) {
+ unsigned Flags = 0;
+
+ if (!K.isMetadata())
+ Flags |= ELF::SHF_ALLOC;
+
+ if (K.isText())
+ Flags |= ELF::SHF_EXECINSTR;
+ else if (IsCPRel)
+ Flags |= ELF::XCORE_SHF_CP_SECTION;
+ else
+ Flags |= ELF::XCORE_SHF_DP_SECTION;
+
+ if (K.isWriteable())
+ Flags |= ELF::SHF_WRITE;
+
+ if (K.isMergeableCString() || K.isMergeableConst4() ||
+ K.isMergeableConst8() || K.isMergeableConst16())
+ Flags |= ELF::SHF_MERGE;
+
+ if (K.isMergeableCString())
+ Flags |= ELF::SHF_STRINGS;
+
+ return Flags;
+}
+
+const MCSection *
+XCoreTargetObjectFile::getExplicitSectionGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
+ StringRef SectionName = GV->getSection();
+ // Infer section flags from the section name if we can.
+ bool IsCPRel = SectionName.startswith(".cp.");
+ if (IsCPRel && !Kind.isReadOnly())
+ report_fatal_error("Using .cp. section for writeable object.");
+ return getContext().getELFSection(SectionName, getXCoreSectionType(Kind),
+ getXCoreSectionFlags(Kind, IsCPRel), Kind);
+}
+
+const MCSection *XCoreTargetObjectFile::
+SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const{
+
+ bool UseCPRel = GV->isLocalLinkage(GV->getLinkage());
+
+ if (Kind.isText()) return TextSection;
+ if (UseCPRel) {
+ if (Kind.isMergeable1ByteCString()) return CStringSection;
+ if (Kind.isMergeableConst4()) return MergeableConst4Section;
+ if (Kind.isMergeableConst8()) return MergeableConst8Section;
+ if (Kind.isMergeableConst16()) return MergeableConst16Section;
+ }
+ Type *ObjType = GV->getType()->getPointerElementType();
+ if (TM.getCodeModel() == CodeModel::Small ||
+ !ObjType->isSized() ||
+ TM.getDataLayout()->getTypeAllocSize(ObjType) < CodeModelLargeSize) {
+ if (Kind.isReadOnly()) return UseCPRel? ReadOnlySection
+ : DataRelROSection;
+ if (Kind.isBSS() || Kind.isCommon())return BSSSection;
+ if (Kind.isDataRel()) return DataSection;
+ if (Kind.isReadOnlyWithRel()) return DataRelROSection;
+ } else {
+ if (Kind.isReadOnly()) return UseCPRel? ReadOnlySectionLarge
+ : DataRelROSectionLarge;
+ if (Kind.isBSS() || Kind.isCommon())return BSSSectionLarge;
+ if (Kind.isDataRel()) return DataSectionLarge;
+ if (Kind.isReadOnlyWithRel()) return DataRelROSectionLarge;
+ }
+
+ assert((Kind.isThreadLocal() || Kind.isCommon()) && "Unknown section kind");
+ report_fatal_error("Target does not support TLS or Common sections");
+}
+
+const MCSection *
+XCoreTargetObjectFile::getSectionForConstant(SectionKind Kind,
+ const Constant *C) const {
+ if (Kind.isMergeableConst4()) return MergeableConst4Section;
+ if (Kind.isMergeableConst8()) return MergeableConst8Section;
+ if (Kind.isMergeableConst16()) return MergeableConst16Section;
+ assert((Kind.isReadOnly() || Kind.isReadOnlyWithRel()) &&
+ "Unknown section kind");
+ // We assume the size of the object is never greater than CodeModelLargeSize.
+ // To handle CodeModelLargeSize changes to AsmPrinter would be required.
+ return ReadOnlySection;
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h
index 27875e7..d389e55 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h
@@ -14,11 +14,28 @@
namespace llvm {
+static const unsigned CodeModelLargeSize = 256;
+
class XCoreTargetObjectFile : public TargetLoweringObjectFileELF {
+ const MCSection *BSSSectionLarge;
+ const MCSection *DataSectionLarge;
+ const MCSection *ReadOnlySectionLarge;
+ const MCSection *DataRelROSectionLarge;
public:
- void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+ const MCSection *
+ getExplicitSectionGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ const MCSection *
+ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
- // TODO: Classify globals as xcore wishes.
+ const MCSection *getSectionForConstant(SectionKind Kind,
+ const Constant *C) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetStreamer.h b/contrib/llvm/lib/Target/XCore/XCoreTargetStreamer.h
new file mode 100644
index 0000000..0a394da
--- /dev/null
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetStreamer.h
@@ -0,0 +1,27 @@
+//===-- XCoreTargetStreamer.h - XCore Target Streamer ----------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef XCORETARGETSTREAMER_H
+#define XCORETARGETSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class XCoreTargetStreamer : public MCTargetStreamer {
+public:
+ XCoreTargetStreamer(MCStreamer &S);
+ virtual ~XCoreTargetStreamer();
+ virtual void emitCCTopData(StringRef Name) = 0;
+ virtual void emitCCTopFunction(StringRef Name) = 0;
+ virtual void emitCCBottomData(StringRef Name) = 0;
+ virtual void emitCCBottomFunction(StringRef Name) = 0;
+};
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetTransformInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetTransformInfo.cpp
index cc165f7..80d193d 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetTransformInfo.cpp
@@ -14,16 +14,17 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "xcoretti"
#include "XCore.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
+#define DEBUG_TYPE "xcoretti"
+
// Declare the pass initialization routine locally as target-specific passes
-// don't havve a target-wide initialization entry point, and so we rely on the
+// don't have a target-wide initialization entry point, and so we rely on the
// pass constructor initialization.
namespace llvm {
void initializeXCoreTTIPass(PassRegistry &);
@@ -31,7 +32,7 @@ void initializeXCoreTTIPass(PassRegistry &);
namespace {
-class XCoreTTI : public ImmutablePass, public TargetTransformInfo {
+class XCoreTTI final : public ImmutablePass, public TargetTransformInfo {
public:
XCoreTTI() : ImmutablePass(ID) {
llvm_unreachable("This pass cannot be directly constructed");
@@ -42,27 +43,23 @@ public:
initializeXCoreTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ virtual void initializePass() override {
pushTTIStack(this);
}
- virtual void finalizePass() {
- popTTIStack();
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
static char ID;
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ virtual void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
}
- unsigned getNumberOfRegisters(bool Vector) const {
+ unsigned getNumberOfRegisters(bool Vector) const override {
if (Vector) {
return 0;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index df08091..f9de54a 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -29,7 +29,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "argpromotion"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Statistic.h"
@@ -37,18 +36,22 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "argpromotion"
+
STATISTIC(NumArgumentsPromoted , "Number of pointer arguments promoted");
STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted");
STATISTIC(NumByValArgsPromoted , "Number of byval arguments promoted");
@@ -58,29 +61,32 @@ namespace {
/// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
///
struct ArgPromotion : public CallGraphSCCPass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
CallGraphSCCPass::getAnalysisUsage(AU);
}
- virtual bool runOnSCC(CallGraphSCC &SCC);
+ bool runOnSCC(CallGraphSCC &SCC) override;
static char ID; // Pass identification, replacement for typeid
explicit ArgPromotion(unsigned maxElements = 3)
- : CallGraphSCCPass(ID), maxElements(maxElements) {
+ : CallGraphSCCPass(ID), DL(nullptr), maxElements(maxElements) {
initializeArgPromotionPass(*PassRegistry::getPassRegistry());
}
/// A vector used to hold the indices of a single GEP instruction
typedef std::vector<uint64_t> IndicesVector;
+ const DataLayout *DL;
private:
CallGraphNode *PromoteArguments(CallGraphNode *CGN);
bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const;
CallGraphNode *DoPromotion(Function *F,
SmallPtrSet<Argument*, 8> &ArgsToPromote,
SmallPtrSet<Argument*, 8> &ByValArgsToTransform);
+ bool doInitialization(CallGraph &CG) override;
/// The maximum number of elements to expand, or 0 for unlimited.
unsigned maxElements;
+ DenseMap<const Function *, DISubprogram> FunctionDIs;
};
}
@@ -88,7 +94,7 @@ char ArgPromotion::ID = 0;
INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
"Promote 'by reference' arguments to scalars", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
"Promote 'by reference' arguments to scalars", false, false)
@@ -99,6 +105,9 @@ Pass *llvm::createArgumentPromotionPass(unsigned maxElements) {
bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
bool Changed = false, LocalChange;
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+
do { // Iterate until we stop promoting from this SCC.
LocalChange = false;
// Attempt to promote arguments from all functions in this SCC.
@@ -123,24 +132,23 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
Function *F = CGN->getFunction();
// Make sure that it is local to this module.
- if (!F || !F->hasLocalLinkage()) return 0;
+ if (!F || !F->hasLocalLinkage()) return nullptr;
// First check: see if there are any pointer arguments! If not, quick exit.
SmallVector<Argument*, 16> PointerArgs;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
if (I->getType()->isPointerTy())
PointerArgs.push_back(I);
- if (PointerArgs.empty()) return 0;
+ if (PointerArgs.empty()) return nullptr;
// Second check: make sure that all callers are direct callers. We can't
// transform functions that have indirect callers. Also see if the function
// is self-recursive.
bool isSelfRecursive = false;
- for (Value::use_iterator UI = F->use_begin(), E = F->use_end();
- UI != E; ++UI) {
- CallSite CS(*UI);
+ for (Use &U : F->uses()) {
+ CallSite CS(U.getUser());
// Must be a direct call.
- if (CS.getInstruction() == 0 || !CS.isCallee(UI)) return 0;
+ if (CS.getInstruction() == nullptr || !CS.isCallee(&U)) return nullptr;
if (CS.getInstruction()->getParent()->getParent() == F)
isSelfRecursive = true;
@@ -155,7 +163,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
// If this is a byval argument, and if the aggregate type is small, just
- // pass the elements, which is always safe.
+ // pass the elements, which is always safe. This does not apply to
+ // inalloca.
if (PtrArg->hasByValAttr()) {
if (StructType *STy = dyn_cast<StructType>(AgTy)) {
if (maxElements > 0 && STy->getNumElements() > maxElements) {
@@ -201,32 +210,32 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
}
// Otherwise, see if we can promote the pointer to its value.
- if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValAttr()))
+ if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr()))
ArgsToPromote.insert(PtrArg);
}
// No promotable pointer arguments.
if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
- return 0;
+ return nullptr;
return DoPromotion(F, ArgsToPromote, ByValArgsToTransform);
}
/// AllCallersPassInValidPointerForArgument - Return true if we can prove that
/// all callees pass in a valid pointer for the specified function argument.
-static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
+static bool AllCallersPassInValidPointerForArgument(Argument *Arg,
+ const DataLayout *DL) {
Function *Callee = Arg->getParent();
unsigned ArgNo = Arg->getArgNo();
// Look at all call sites of the function. At this pointer we know we only
// have direct callees.
- for (Value::use_iterator UI = Callee->use_begin(), E = Callee->use_end();
- UI != E; ++UI) {
- CallSite CS(*UI);
+ for (User *U : Callee->users()) {
+ CallSite CS(U);
assert(CS && "Should only have direct calls!");
- if (!CS.getArgument(ArgNo)->isDereferenceablePointer())
+ if (!CS.getArgument(ArgNo)->isDereferenceablePointer(DL))
return false;
}
return true;
@@ -301,7 +310,8 @@ static void MarkIndicesSafe(const ArgPromotion::IndicesVector &ToMark,
/// This method limits promotion of aggregates to only promote up to three
/// elements of the aggregate in order to avoid exploding the number of
/// arguments passed in.
-bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
+bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
+ bool isByValOrInAlloca) const {
typedef std::set<IndicesVector> GEPIndicesSet;
// Quick exit for unused arguments
@@ -323,6 +333,9 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
//
// This set will contain all sets of indices that are loaded in the entry
// block, and thus are safe to unconditionally load in the caller.
+ //
+ // This optimization is also safe for InAlloca parameters, because it verifies
+ // that the address isn't captured.
GEPIndicesSet SafeToUnconditionallyLoad;
// This set contains all the sets of indices that we are planning to promote.
@@ -330,7 +343,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
GEPIndicesSet ToPromote;
// If the pointer is always valid, any load with first index 0 is valid.
- if (isByVal || AllCallersPassInValidPointerForArgument(Arg))
+ if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg, DL))
SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
// First, iterate the entry block and mark loads of (geps of) arguments as
@@ -370,17 +383,16 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
// not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
SmallVector<LoadInst*, 16> Loads;
IndicesVector Operands;
- for (Value::use_iterator UI = Arg->use_begin(), E = Arg->use_end();
- UI != E; ++UI) {
- User *U = *UI;
+ for (Use &U : Arg->uses()) {
+ User *UR = U.getUser();
Operands.clear();
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(UR)) {
// Don't hack volatile/atomic loads
if (!LI->isSimple()) return false;
Loads.push_back(LI);
// Direct loads are equivalent to a GEP with a zero index and then a load.
Operands.push_back(0);
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) {
if (GEP->use_empty()) {
// Dead GEP's cause trouble later. Just remove them if we run into
// them.
@@ -389,7 +401,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
// TODO: This runs the above loop over and over again for dead GEPs
// Couldn't we just do increment the UI iterator earlier and erase the
// use?
- return isSafeToPromoteArgument(Arg, isByVal);
+ return isSafeToPromoteArgument(Arg, isByValOrInAlloca);
}
// Ensure that all of the indices are constants.
@@ -401,9 +413,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
return false; // Not a constant operand GEP!
// Ensure that the only users of the GEP are load instructions.
- for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end();
- UI != E; ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (User *GEPU : GEP->users())
+ if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) {
// Don't hack volatile/atomic loads
if (!LI->isSimple()) return false;
Loads.push_back(LI);
@@ -549,16 +560,15 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// In this table, we will track which indices are loaded from the argument
// (where direct loads are tracked as no indices).
ScalarizeTable &ArgIndices = ScalarizedElements[I];
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
- ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- assert(isa<LoadInst>(User) || isa<GetElementPtrInst>(User));
+ for (User *U : I->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ assert(isa<LoadInst>(UI) || isa<GetElementPtrInst>(UI));
IndicesVector Indices;
- Indices.reserve(User->getNumOperands() - 1);
+ Indices.reserve(UI->getNumOperands() - 1);
// Since loads will only have a single operand, and GEPs only a single
// non-index operand, this will record direct loads without any indices,
// and gep+loads with the GEP indices.
- for (User::op_iterator II = User->op_begin() + 1, IE = User->op_end();
+ for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
II != IE; ++II)
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
// GEPs with a single 0 index can be merged with direct loads
@@ -566,11 +576,11 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Indices.clear();
ArgIndices.insert(Indices);
LoadInst *OrigLoad;
- if (LoadInst *L = dyn_cast<LoadInst>(User))
+ if (LoadInst *L = dyn_cast<LoadInst>(UI))
OrigLoad = L;
else
// Take any load, we will use it only to update Alias Analysis
- OrigLoad = cast<LoadInst>(User->use_back());
+ OrigLoad = cast<LoadInst>(UI->user_back());
OriginalLoads[std::make_pair(I, Indices)] = OrigLoad;
}
@@ -603,6 +613,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
NF->copyAttributesFrom(F);
+ // Patch the pointer to LLVM function in debug info descriptor.
+ auto DI = FunctionDIs.find(F);
+ if (DI != FunctionDIs.end())
+ DI->second.replaceFunction(NF);
DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
<< "From: " << *F);
@@ -621,8 +635,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Get the callgraph information that we need to update to reflect our
// changes.
- CallGraph &CG = getAnalysis<CallGraph>();
-
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
+
// Get a new callgraph node for NF.
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
@@ -631,7 +645,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
//
SmallVector<Value*, 16> Args;
while (!F->use_empty()) {
- CallSite CS(F->use_back());
+ CallSite CS(F->user_back());
assert(CS.getCalledFunction() == F);
Instruction *Call = CS.getInstruction();
const AttributeSet &CallPAL = CS.getAttributes();
@@ -660,7 +674,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
StructType *STy = cast<StructType>(AgTy);
Value *Idxs[2] = {
- ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
+ ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr };
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
Value *Idx = GetElementPtrInst::Create(*AI, Idxs,
@@ -740,6 +754,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
+ New->setDebugLoc(Call->getDebugLoc());
Args.clear();
AttributesVec.clear();
@@ -788,10 +803,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
- Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt);
+ Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt);
StructType *STy = cast<StructType>(AgTy);
Value *Idxs[2] = {
- ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
+ ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr };
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
@@ -807,6 +822,15 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
I->replaceAllUsesWith(TheAlloca);
TheAlloca->takeName(I);
AA.replaceWithNewValue(I, TheAlloca);
+
+ // If the alloca is used in a call, we must clear the tail flag since
+ // the callee now uses an alloca from the caller.
+ for (User *U : TheAlloca->users()) {
+ CallInst *Call = dyn_cast<CallInst>(U);
+ if (!Call)
+ continue;
+ Call->setTailCall(false);
+ }
continue;
}
@@ -821,7 +845,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
ScalarizeTable &ArgIndices = ScalarizedElements[I];
while (!I->use_empty()) {
- if (LoadInst *LI = dyn_cast<LoadInst>(I->use_back())) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
assert(ArgIndices.begin()->empty() &&
"Load element should sort to front!");
I2->setName(I->getName()+".val");
@@ -831,7 +855,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
<< "' in function '" << F->getName() << "'\n");
} else {
- GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back());
+ GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
IndicesVector Operands;
Operands.reserve(GEP->getNumIndices());
for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
@@ -861,7 +885,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// All of the uses must be load instructions. Replace them all with
// the argument specified by ArgNo.
while (!GEP->use_empty()) {
- LoadInst *L = cast<LoadInst>(GEP->use_back());
+ LoadInst *L = cast<LoadInst>(GEP->user_back());
L->replaceAllUsesWith(TheArg);
AA.replaceWithNewValue(L, TheArg);
L->eraseFromParent();
@@ -892,3 +916,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
return NF_CGN;
}
+
+bool ArgPromotion::doInitialization(CallGraph &CG) {
+ FunctionDIs = makeSubprogramMap(CG.getModule());
+ return CallGraphSCCPass::doInitialization(CG);
+}
diff --git a/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp b/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
index 2e32240..6af1043 100644
--- a/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp
@@ -36,7 +36,7 @@ public:
initializeBarrierNoopPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M) { return false; }
+ bool runOnModule(Module &M) override { return false; }
};
}
diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index d94c0f4..23be081 100644
--- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -17,7 +17,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "constmerge"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
@@ -31,6 +30,8 @@
#include "llvm/Pass.h"
using namespace llvm;
+#define DEBUG_TYPE "constmerge"
+
STATISTIC(NumMerged, "Number of global constants merged");
namespace {
@@ -42,7 +43,7 @@ namespace {
// For this pass, process all of the globals in the module, eliminating
// duplicate constants.
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
// Return true iff we can determine the alignment of this global variable.
bool hasKnownAlignment(GlobalVariable *GV) const;
@@ -51,7 +52,7 @@ namespace {
// alignment to a concrete value.
unsigned getAlignment(GlobalVariable *GV) const;
- const DataLayout *TD;
+ const DataLayout *DL;
};
}
@@ -66,7 +67,7 @@ ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); }
/// Find values that are marked as llvm.used.
static void FindUsedValues(GlobalVariable *LLVMUsed,
SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
- if (LLVMUsed == 0) return;
+ if (!LLVMUsed) return;
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) {
@@ -77,8 +78,8 @@ static void FindUsedValues(GlobalVariable *LLVMUsed,
}
// True if A is better than B.
-static bool IsBetterCannonical(const GlobalVariable &A,
- const GlobalVariable &B) {
+static bool IsBetterCanonical(const GlobalVariable &A,
+ const GlobalVariable &B) {
if (!A.hasLocalLinkage() && B.hasLocalLinkage())
return true;
@@ -89,20 +90,21 @@ static bool IsBetterCannonical(const GlobalVariable &A,
}
bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const {
- return TD || GV->getAlignment() != 0;
+ return DL || GV->getAlignment() != 0;
}
unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
unsigned Align = GV->getAlignment();
if (Align)
return Align;
- if (TD)
- return TD->getPreferredAlignment(GV);
+ if (DL)
+ return DL->getPreferredAlignment(GV);
return 0;
}
bool ConstantMerge::runOnModule(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
@@ -160,7 +162,7 @@ bool ConstantMerge::runOnModule(Module &M) {
// If this is the first constant we find or if the old one is local,
// replace with the current one. If the current is externally visible
// it cannot be replace, but can be the canonical constant we merge with.
- if (Slot == 0 || IsBetterCannonical(*GV, *Slot))
+ if (!Slot || IsBetterCanonical(*GV, *Slot))
Slot = GV;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 911c14e..ac3853d 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -17,29 +17,31 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "deadargelim"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <set>
+#include <tuple>
using namespace llvm;
+#define DEBUG_TYPE "deadargelim"
+
STATISTIC(NumArgumentsEliminated, "Number of unread args removed");
STATISTIC(NumRetValsEliminated , "Number of unused return values removed");
STATISTIC(NumArgumentsReplacedWithUndef,
@@ -62,12 +64,7 @@ namespace {
/// Make RetOrArg comparable, so we can put it into a map.
bool operator<(const RetOrArg &O) const {
- if (F != O.F)
- return F < O.F;
- else if (Idx != O.Idx)
- return Idx < O.Idx;
- else
- return IsArg < O.IsArg;
+ return std::tie(F, Idx, IsArg) < std::tie(O.F, O.Idx, O.IsArg);
}
/// Make RetOrArg comparable, so we can easily iterate the multimap.
@@ -130,8 +127,7 @@ namespace {
// As the code generation for module is finished (and DIBuilder is
// finalized) we assume that subprogram descriptors won't be changed, and
// they are stored in map for short duration anyway.
- typedef DenseMap<Function*, DISubprogram> FunctionDIMap;
- FunctionDIMap FunctionDIs;
+ DenseMap<const Function *, DISubprogram> FunctionDIs;
protected:
// DAH uses this to specify a different ID.
@@ -143,17 +139,16 @@ namespace {
initializeDAEPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
virtual bool ShouldHackArguments() const { return false; }
private:
Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
- Liveness SurveyUse(Value::const_use_iterator U, UseVector &MaybeLiveUses,
+ Liveness SurveyUse(const Use *U, UseVector &MaybeLiveUses,
unsigned RetValNum = 0);
Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
- void CollectFunctionDIs(Module &M);
void SurveyFunction(const Function &F);
void MarkValue(const RetOrArg &RA, Liveness L,
const UseVector &MaybeLiveUses);
@@ -178,7 +173,7 @@ namespace {
static char ID;
DAH() : DAE(ID) {}
- virtual bool ShouldHackArguments() const { return true; }
+ bool ShouldHackArguments() const override { return true; }
};
}
@@ -193,35 +188,6 @@ INITIALIZE_PASS(DAH, "deadarghaX0r",
ModulePass *llvm::createDeadArgEliminationPass() { return new DAE(); }
ModulePass *llvm::createDeadArgHackingPass() { return new DAH(); }
-/// CollectFunctionDIs - Map each function in the module to its debug info
-/// descriptor.
-void DAE::CollectFunctionDIs(Module &M) {
- FunctionDIs.clear();
-
- for (Module::named_metadata_iterator I = M.named_metadata_begin(),
- E = M.named_metadata_end(); I != E; ++I) {
- NamedMDNode &NMD = *I;
- for (unsigned MDIndex = 0, MDNum = NMD.getNumOperands();
- MDIndex < MDNum; ++MDIndex) {
- MDNode *Node = NMD.getOperand(MDIndex);
- if (!DIDescriptor(Node).isCompileUnit())
- continue;
- DICompileUnit CU(Node);
- const DIArray &SPs = CU.getSubprograms();
- for (unsigned SPIndex = 0, SPNum = SPs.getNumElements();
- SPIndex < SPNum; ++SPIndex) {
- DISubprogram SP(SPs.getElement(SPIndex));
- assert((!SP || SP.isSubprogram()) &&
- "A MDNode in subprograms of a CU should be null or a DISubprogram.");
- if (!SP)
- continue;
- if (Function *F = SP.getFunction())
- FunctionDIs[F] = SP;
- }
- }
- }
-}
-
/// DeleteDeadVarargs - If this is an function that takes a ... list, and if
/// llvm.vastart is never called, the varargs list is dead for the function.
bool DAE::DeleteDeadVarargs(Function &Fn) {
@@ -265,7 +231,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
// to pass in a smaller number of arguments into the new function.
//
std::vector<Value*> Args;
- for (Value::use_iterator I = Fn.use_begin(), E = Fn.use_end(); I != E; ) {
+ for (Value::user_iterator I = Fn.user_begin(), E = Fn.user_end(); I != E; ) {
CallSite CS(*I++);
if (!CS)
continue;
@@ -330,7 +296,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
}
// Patch the pointer to LLVM function in debug info descriptor.
- FunctionDIMap::iterator DI = FunctionDIs.find(&Fn);
+ auto DI = FunctionDIs.find(&Fn);
if (DI != FunctionDIs.end())
DI->second.replaceFunction(NF);
@@ -378,7 +344,7 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
I != E; ++I) {
Argument *Arg = I;
- if (Arg->use_empty() && !Arg->hasByValAttr())
+ if (Arg->use_empty() && !Arg->hasByValOrInAllocaAttr())
UnusedArgs.push_back(Arg->getArgNo());
}
@@ -387,10 +353,9 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
bool Changed = false;
- for (Function::use_iterator I = Fn.use_begin(), E = Fn.use_end();
- I != E; ++I) {
- CallSite CS(*I);
- if (!CS || !CS.isCallee(I))
+ for (Use &U : Fn.uses()) {
+ CallSite CS(U.getUser());
+ if (!CS || !CS.isCallee(&U))
continue;
// Now go through all unused args and replace them with "undef".
@@ -441,9 +406,9 @@ DAE::Liveness DAE::MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses) {
/// RetValNum is the return value number to use when this use is used in a
/// return instruction. This is used in the recursion, you should always leave
/// it at 0.
-DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
+DAE::Liveness DAE::SurveyUse(const Use *U,
UseVector &MaybeLiveUses, unsigned RetValNum) {
- const User *V = *U;
+ const User *V = U->getUser();
if (const ReturnInst *RI = dyn_cast<ReturnInst>(V)) {
// The value is returned from a function. It's only live when the
// function's return value is live. We use RetValNum here, for the case
@@ -454,7 +419,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
return MarkIfNotLive(Use, MaybeLiveUses);
}
if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(V)) {
- if (U.getOperandNo() != InsertValueInst::getAggregateOperandIndex()
+ if (U->getOperandNo() != InsertValueInst::getAggregateOperandIndex()
&& IV->hasIndices())
// The use we are examining is inserted into an aggregate. Our liveness
// depends on all uses of that aggregate, but if it is used as a return
@@ -465,9 +430,8 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
// we don't change RetValNum, but do survey all our uses.
Liveness Result = MaybeLive;
- for (Value::const_use_iterator I = IV->use_begin(),
- E = V->use_end(); I != E; ++I) {
- Result = SurveyUse(I, MaybeLiveUses, RetValNum);
+ for (const Use &UU : IV->uses()) {
+ Result = SurveyUse(&UU, MaybeLiveUses, RetValNum);
if (Result == Live)
break;
}
@@ -490,7 +454,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
return Live;
assert(CS.getArgument(ArgNo)
- == CS->getOperand(U.getOperandNo())
+ == CS->getOperand(U->getOperandNo())
&& "Argument is not where we expected it");
// Value passed to a normal call. It's only live when the corresponding
@@ -513,9 +477,8 @@ DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) {
// Assume it's dead (which will only hold if there are no uses at all..).
Liveness Result = MaybeLive;
// Check each use.
- for (Value::const_use_iterator I = V->use_begin(),
- E = V->use_end(); I != E; ++I) {
- Result = SurveyUse(I, MaybeLiveUses);
+ for (const Use &U : V->uses()) {
+ Result = SurveyUse(&U, MaybeLiveUses);
if (Result == Live)
break;
}
@@ -531,6 +494,13 @@ DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) {
// well as arguments to functions which have their "address taken".
//
void DAE::SurveyFunction(const Function &F) {
+ // Functions with inalloca parameters are expecting args in a particular
+ // register and memory layout.
+ if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca)) {
+ MarkLive(F);
+ return;
+ }
+
unsigned RetCount = NumRetVals(&F);
// Assume all return values are dead
typedef SmallVector<Liveness, 5> RetVals;
@@ -562,12 +532,11 @@ void DAE::SurveyFunction(const Function &F) {
unsigned NumLiveRetVals = 0;
Type *STy = dyn_cast<StructType>(F.getReturnType());
// Loop all uses of the function.
- for (Value::const_use_iterator I = F.use_begin(), E = F.use_end();
- I != E; ++I) {
+ for (const Use &U : F.uses()) {
// If the function is PASSED IN as an argument, its address has been
// taken.
- ImmutableCallSite CS(*I);
- if (!CS || !CS.isCallee(I)) {
+ ImmutableCallSite CS(U.getUser());
+ if (!CS || !CS.isCallee(&U)) {
MarkLive(F);
return;
}
@@ -586,9 +555,8 @@ void DAE::SurveyFunction(const Function &F) {
if (NumLiveRetVals != RetCount) {
if (STy) {
// Check all uses of the return value.
- for (Value::const_use_iterator I = TheCall->use_begin(),
- E = TheCall->use_end(); I != E; ++I) {
- const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(*I);
+ for (const User *U : TheCall->users()) {
+ const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(U);
if (Ext && Ext->hasIndices()) {
// This use uses a part of our return value, survey the uses of
// that part and store the results for this index only.
@@ -767,7 +735,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Find out the new return value.
Type *RetTy = FTy->getReturnType();
- Type *NRetTy = NULL;
+ Type *NRetTy = nullptr;
unsigned RetCount = NumRetVals(F);
// -1 means unused, other numbers are the new index
@@ -891,7 +859,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
//
std::vector<Value*> Args;
while (!F->use_empty()) {
- CallSite CS(F->use_back());
+ CallSite CS(F->user_back());
Instruction *Call = CS.getInstruction();
AttributesVec.clear();
@@ -1053,7 +1021,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
Value *RetVal;
if (NFTy->getReturnType()->isVoidTy()) {
- RetVal = 0;
+ RetVal = nullptr;
} else {
assert (RetTy->isStructTy());
// The original return value was a struct, insert
@@ -1088,7 +1056,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
}
// Patch the pointer to LLVM function in debug info descriptor.
- FunctionDIMap::iterator DI = FunctionDIs.find(F);
+ auto DI = FunctionDIs.find(F);
if (DI != FunctionDIs.end())
DI->second.replaceFunction(NF);
@@ -1102,7 +1070,7 @@ bool DAE::runOnModule(Module &M) {
bool Changed = false;
// Collect debug info descriptors for functions.
- CollectFunctionDIs(M);
+ FunctionDIs = makeSubprogramMap(M);
// First pass: Do a simple check to see if any functions can have their "..."
// removed. We can do this if they never call va_start. This loop cannot be
diff --git a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
index 50fb3e6..40ec9fa 100644
--- a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
@@ -27,11 +27,10 @@ using namespace llvm;
/// the split module remain valid.
static void makeVisible(GlobalValue &GV, bool Delete) {
bool Local = GV.hasLocalLinkage();
- if (Local)
- GV.setVisibility(GlobalValue::HiddenVisibility);
-
if (Local || Delete) {
GV.setLinkage(GlobalValue::ExternalLinkage);
+ if (Local)
+ GV.setVisibility(GlobalValue::HiddenVisibility);
return;
}
@@ -68,7 +67,7 @@ namespace {
explicit GVExtractorPass(std::vector<GlobalValue*>& GVs, bool deleteS = true)
: ModulePass(ID), Named(GVs.begin(), GVs.end()), deleteStuff(deleteS) {}
- bool runOnModule(Module &M) {
+ bool runOnModule(Module &M) override {
// Visit the global inline asm.
if (!deleteStuff)
M.setModuleInlineAsm("");
@@ -95,7 +94,7 @@ namespace {
makeVisible(*I, Delete);
if (Delete)
- I->setInitializer(0);
+ I->setInitializer(nullptr);
}
// Visit the Functions.
@@ -134,7 +133,7 @@ namespace {
} else {
Declaration =
new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage,
- 0, CurI->getName());
+ nullptr, CurI->getName());
}
CurI->replaceAllUsesWith(Declaration);
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 60e5f06..8174df9 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -18,7 +18,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "functionattrs"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SetVector.h"
@@ -29,12 +28,14 @@
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "functionattrs"
+
STATISTIC(NumReadNone, "Number of functions marked readnone");
STATISTIC(NumReadOnly, "Number of functions marked readonly");
STATISTIC(NumNoCapture, "Number of arguments marked nocapture");
@@ -46,12 +47,12 @@ STATISTIC(NumAnnotated, "Number of attributes added to library functions");
namespace {
struct FunctionAttrs : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- FunctionAttrs() : CallGraphSCCPass(ID), AA(0) {
+ FunctionAttrs() : CallGraphSCCPass(ID), AA(nullptr) {
initializeFunctionAttrsPass(*PassRegistry::getPassRegistry());
}
// runOnSCC - Analyze the SCC, performing the transformation if possible.
- bool runOnSCC(CallGraphSCC &SCC);
+ bool runOnSCC(CallGraphSCC &SCC) override;
// AddReadAttrs - Deduce readonly/readnone attributes for the SCC.
bool AddReadAttrs(const CallGraphSCC &SCC);
@@ -120,7 +121,7 @@ namespace {
// call declarations.
bool annotateLibraryCalls(const CallGraphSCC &SCC);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetLibraryInfo>();
@@ -137,7 +138,7 @@ char FunctionAttrs::ID = 0;
INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs",
"Deduce function attributes", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(FunctionAttrs, "functionattrs",
"Deduce function attributes", false, false)
@@ -160,7 +161,7 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0)
+ if (!F)
// External node - may write memory. Just give up.
return false;
@@ -319,7 +320,7 @@ namespace {
ArgumentGraphNode SyntheticRoot;
public:
- ArgumentGraph() { SyntheticRoot.Definition = 0; }
+ ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
typedef SmallVectorImpl<ArgumentGraphNode*>::iterator iterator;
@@ -342,9 +343,9 @@ namespace {
ArgumentUsesTracker(const SmallPtrSet<Function*, 8> &SCCNodes)
: Captured(false), SCCNodes(SCCNodes) {}
- void tooManyUses() { Captured = true; }
+ void tooManyUses() override { Captured = true; }
- bool captured(Use *U) {
+ bool captured(const Use *U) override {
CallSite CS(U->getUser());
if (!CS.getInstruction()) { Captured = true; return true; }
@@ -414,17 +415,19 @@ determinePointerReadAttrs(Argument *A,
SmallSet<Use*, 32> Visited;
int Count = 0;
+ // inalloca arguments are always clobbered by the call.
+ if (A->hasInAllocaAttr())
+ return Attribute::None;
+
bool IsRead = false;
// We don't need to track IsWritten. If A is written to, return immediately.
- for (Value::use_iterator UI = A->use_begin(), UE = A->use_end();
- UI != UE; ++UI) {
+ for (Use &U : A->uses()) {
if (Count++ >= 20)
return Attribute::None;
- Use *U = &UI.getUse();
- Visited.insert(U);
- Worklist.push_back(U);
+ Visited.insert(&U);
+ Worklist.push_back(&U);
}
while (!Worklist.empty()) {
@@ -437,25 +440,38 @@ determinePointerReadAttrs(Argument *A,
case Instruction::GetElementPtr:
case Instruction::PHI:
case Instruction::Select:
+ case Instruction::AddrSpaceCast:
// The original value is not read/written via this if the new value isn't.
- for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI) {
- Use *U = &UI.getUse();
- if (Visited.insert(U))
- Worklist.push_back(U);
- }
+ for (Use &UU : I->uses())
+ if (Visited.insert(&UU))
+ Worklist.push_back(&UU);
break;
case Instruction::Call:
case Instruction::Invoke: {
+ bool Captures = true;
+
+ if (I->getType()->isVoidTy())
+ Captures = false;
+
+ auto AddUsersToWorklistIfCapturing = [&] {
+ if (Captures)
+ for (Use &UU : I->uses())
+ if (Visited.insert(&UU))
+ Worklist.push_back(&UU);
+ };
+
CallSite CS(I);
- if (CS.doesNotAccessMemory())
+ if (CS.doesNotAccessMemory()) {
+ AddUsersToWorklistIfCapturing();
continue;
+ }
Function *F = CS.getCalledFunction();
if (!F) {
if (CS.onlyReadsMemory()) {
IsRead = true;
+ AddUsersToWorklistIfCapturing();
continue;
}
return Attribute::None;
@@ -470,6 +486,7 @@ determinePointerReadAttrs(Argument *A,
"More params than args in non-varargs call.");
return Attribute::None;
}
+ Captures &= !CS.doesNotCapture(A - B);
if (SCCNodes.count(AI))
continue;
if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(A - B))
@@ -478,6 +495,7 @@ determinePointerReadAttrs(Argument *A,
IsRead = true;
}
}
+ AddUsersToWorklistIfCapturing();
break;
}
@@ -521,7 +539,7 @@ bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0)
+ if (!F)
// External node - only a problem for arguments that we pass to it.
continue;
@@ -599,9 +617,8 @@ bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) {
// made. If the definition doesn't have a 'nocapture' attribute by now, it
// captures.
- for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG), E = scc_end(&AG);
- I != E; ++I) {
- std::vector<ArgumentGraphNode*> &ArgumentSCC = *I;
+ for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
+ const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I;
if (ArgumentSCC.size() == 1) {
if (!ArgumentSCC[0]->Definition) continue; // synthetic root node
@@ -617,8 +634,8 @@ bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) {
}
bool SCCCaptured = false;
- for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
- E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) {
+ for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
+ I != E && !SCCCaptured; ++I) {
ArgumentGraphNode *Node = *I;
if (Node->Uses.empty()) {
if (!Node->Definition->hasNoCaptureAttr())
@@ -630,13 +647,12 @@ bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) {
SmallPtrSet<Argument*, 8> ArgumentSCCNodes;
// Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for
// quickly looking up whether a given Argument is in this ArgumentSCC.
- for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
- E = ArgumentSCC.end(); I != E; ++I) {
+ for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E; ++I) {
ArgumentSCCNodes.insert((*I)->Definition);
}
- for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
- E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) {
+ for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
+ I != E && !SCCCaptured; ++I) {
ArgumentGraphNode *N = *I;
for (SmallVectorImpl<ArgumentGraphNode*>::iterator UI = N->Uses.begin(),
UE = N->Uses.end(); UI != UE; ++UI) {
@@ -723,6 +739,7 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F,
// Extend the analysis by looking upwards.
case Instruction::BitCast:
case Instruction::GetElementPtr:
+ case Instruction::AddrSpaceCast:
FlowsToReturn.insert(RVI->getOperand(0));
continue;
case Instruction::Select: {
@@ -775,7 +792,7 @@ bool FunctionAttrs::AddNoAliasAttrs(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0)
+ if (!F)
// External node - skip it;
return false;
@@ -1649,6 +1666,7 @@ bool FunctionAttrs::inferPrototypeAttributes(Function &F) {
setDoesNotThrow(F);
setDoesNotCapture(F, 1);
setDoesNotCapture(F, 2);
+ break;
default:
// Didn't mark any attributes.
return false;
@@ -1667,7 +1685,7 @@ bool FunctionAttrs::annotateLibraryCalls(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F != 0 && F->isDeclaration())
+ if (F && F->isDeclaration())
MadeChange |= inferPrototypeAttributes(*F);
}
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
index 901295d..7e7a4c0 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
@@ -15,15 +15,18 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "globaldce"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#include "llvm/Transforms/Utils/CtorUtils.h"
#include "llvm/Pass.h"
using namespace llvm;
+#define DEBUG_TYPE "globaldce"
+
STATISTIC(NumAliases , "Number of global aliases removed");
STATISTIC(NumFunctions, "Number of functions removed");
STATISTIC(NumVariables, "Number of global variables removed");
@@ -38,7 +41,7 @@ namespace {
// run - Do the GlobalDCE pass on the specified module, optionally updating
// the specified callgraph to reflect the changes.
//
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
SmallPtrSet<GlobalValue*, 32> AliveGlobals;
@@ -53,6 +56,15 @@ namespace {
};
}
+/// Returns true if F contains only a single "ret" instruction.
+static bool isEmptyFunction(Function *F) {
+ BasicBlock &Entry = F->getEntryBlock();
+ if (Entry.size() != 1 || !isa<ReturnInst>(Entry.front()))
+ return false;
+ ReturnInst &RI = cast<ReturnInst>(Entry.front());
+ return RI.getReturnValue() == nullptr;
+}
+
char GlobalDCE::ID = 0;
INITIALIZE_PASS(GlobalDCE, "globaldce",
"Dead Global Elimination", false, false)
@@ -61,14 +73,23 @@ ModulePass *llvm::createGlobalDCEPass() { return new GlobalDCE(); }
bool GlobalDCE::runOnModule(Module &M) {
bool Changed = false;
-
+
+ // Remove empty functions from the global ctors list.
+ Changed |= optimizeGlobalCtorsList(M, isEmptyFunction);
+
+ typedef std::multimap<const Comdat *, GlobalValue *> ComdatGVPairsTy;
+ ComdatGVPairsTy ComdatGVPairs;
+
// Loop over the module, adding globals which are obviously necessary.
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
Changed |= RemoveUnusedGlobalValue(*I);
// Functions with external linkage are needed if they have a body
- if (!I->isDiscardableIfUnused() &&
- !I->isDeclaration() && !I->hasAvailableExternallyLinkage())
- GlobalIsNeeded(I);
+ if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) {
+ if (!I->isDiscardableIfUnused())
+ GlobalIsNeeded(I);
+ else if (const Comdat *C = I->getComdat())
+ ComdatGVPairs.insert(std::make_pair(C, I));
+ }
}
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
@@ -76,17 +97,38 @@ bool GlobalDCE::runOnModule(Module &M) {
Changed |= RemoveUnusedGlobalValue(*I);
// Externally visible & appending globals are needed, if they have an
// initializer.
- if (!I->isDiscardableIfUnused() &&
- !I->isDeclaration() && !I->hasAvailableExternallyLinkage())
- GlobalIsNeeded(I);
+ if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) {
+ if (!I->isDiscardableIfUnused())
+ GlobalIsNeeded(I);
+ else if (const Comdat *C = I->getComdat())
+ ComdatGVPairs.insert(std::make_pair(C, I));
+ }
}
for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I) {
Changed |= RemoveUnusedGlobalValue(*I);
// Externally visible aliases are needed.
- if (!I->isDiscardableIfUnused())
+ if (!I->isDiscardableIfUnused()) {
GlobalIsNeeded(I);
+ } else if (const Comdat *C = I->getComdat()) {
+ ComdatGVPairs.insert(std::make_pair(C, I));
+ }
+ }
+
+ for (ComdatGVPairsTy::iterator I = ComdatGVPairs.begin(),
+ E = ComdatGVPairs.end();
+ I != E;) {
+ ComdatGVPairsTy::iterator UB = ComdatGVPairs.upper_bound(I->first);
+ bool CanDiscard = std::all_of(I, UB, [](ComdatGVPairsTy::value_type Pair) {
+ return Pair.second->isDiscardableIfUnused();
+ });
+ if (!CanDiscard) {
+ std::for_each(I, UB, [this](ComdatGVPairsTy::value_type Pair) {
+ GlobalIsNeeded(Pair.second);
+ });
+ }
+ I = UB;
}
// Now that all globals which are needed are in the AliveGlobals set, we loop
@@ -99,7 +141,7 @@ bool GlobalDCE::runOnModule(Module &M) {
I != E; ++I)
if (!AliveGlobals.count(I)) {
DeadGlobalVars.push_back(I); // Keep track of dead globals
- I->setInitializer(0);
+ I->setInitializer(nullptr);
}
// The second pass drops the bodies of functions which are dead...
@@ -117,7 +159,7 @@ bool GlobalDCE::runOnModule(Module &M) {
++I)
if (!AliveGlobals.count(I)) {
DeadAliases.push_back(I);
- I->setAliasee(0);
+ I->setAliasee(nullptr);
}
if (!DeadFunctions.empty()) {
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 2ea89a1..c1d0d3bc 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -13,37 +13,41 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "globalopt"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Transforms/Utils/CtorUtils.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <algorithm>
+#include <deque>
using namespace llvm;
+#define DEBUG_TYPE "globalopt"
+
STATISTIC(NumMarked , "Number of globals marked constant");
STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
@@ -63,7 +67,7 @@ STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
namespace {
struct GlobalOpt : public ModulePass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfo>();
}
static char ID; // Pass identification, replacement for typeid
@@ -71,20 +75,18 @@ namespace {
initializeGlobalOptPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
- GlobalVariable *FindGlobalCtors(Module &M);
bool OptimizeFunctions(Module &M);
bool OptimizeGlobalVars(Module &M);
bool OptimizeGlobalAliases(Module &M);
- bool OptimizeGlobalCtorsList(GlobalVariable *&GCL);
bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI,
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
- DataLayout *TD;
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
};
}
@@ -196,7 +198,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
// Constants can't be pointers to dynamically allocated memory.
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
+ for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
UI != E;) {
User *U = *UI++;
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
@@ -266,13 +268,14 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ const DataLayout *DL,
+ TargetLibraryInfo *TLI) {
bool Changed = false;
// Note that we need to use a weak value handle for the worklist items. When
// we delete a constant array, we may also be holding pointer to one of its
// elements (or an element of one of its elements if we're dealing with an
// array of arrays) in the worklist.
- SmallVector<WeakVH, 8> WorkList(V->use_begin(), V->use_end());
+ SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
while (!WorkList.empty()) {
Value *UV = WorkList.pop_back_val();
if (!UV)
@@ -293,14 +296,15 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Changed = true;
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
if (CE->getOpcode() == Instruction::GetElementPtr) {
- Constant *SubInit = 0;
+ Constant *SubInit = nullptr;
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
- Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
- } else if (CE->getOpcode() == Instruction::BitCast &&
- CE->getType()->isPointerTy()) {
+ Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
+ } else if ((CE->getOpcode() == Instruction::BitCast &&
+ CE->getType()->isPointerTy()) ||
+ CE->getOpcode() == Instruction::AddrSpaceCast) {
// Pointer cast, delete any stores and memsets to the global.
- Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
}
if (CE->use_empty()) {
@@ -311,10 +315,10 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// Do not transform "gepinst (gep constexpr (GV))" here, because forming
// "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
// and will invalidate our notion of what Init is.
- Constant *SubInit = 0;
+ Constant *SubInit = nullptr;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));
+ dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
@@ -324,7 +328,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
SubInit = Constant::getNullValue(GEP->getType()->getElementType());
}
- Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
if (GEP->use_empty()) {
GEP->eraseFromParent();
@@ -341,7 +345,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// us, and if they are all dead, nuke them without remorse.
if (isSafeToDestroyConstant(C)) {
C->destroyConstant();
- CleanupConstantGlobalUsers(V, Init, TD, TLI);
+ CleanupConstantGlobalUsers(V, Init, DL, TLI);
return true;
}
}
@@ -368,15 +372,14 @@ static bool isSafeSROAElementUse(Value *V) {
// Otherwise, it must be a GEP.
GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
- if (GEPI == 0) return false;
+ if (!GEPI) return false;
if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
!cast<Constant>(GEPI->getOperand(1))->isNullValue())
return false;
- for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
- I != E; ++I)
- if (!isSafeSROAElementUse(*I))
+ for (User *U : GEPI->users())
+ if (!isSafeSROAElementUse(U))
return false;
return true;
}
@@ -442,9 +445,10 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
}
}
- for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
- if (!isSafeSROAElementUse(*I))
+ for (User *UU : U->users())
+ if (!isSafeSROAElementUse(UU))
return false;
+
return true;
}
@@ -452,11 +456,10 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
/// is safe for us to perform this transformation.
///
static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI) {
- if (!IsUserOfGlobalSafeForSRA(*UI, GV))
+ for (User *U : GV->users())
+ if (!IsUserOfGlobalSafeForSRA(U, GV))
return false;
- }
+
return true;
}
@@ -466,10 +469,10 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
/// behavior of the program in a more fine-grained way. We have determined that
/// this transformation is safe already. We return the first global variable we
/// insert so that the caller can reprocess it.
-static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
+static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
- return 0;
+ return nullptr;
assert(GV->hasLocalLinkage() && !GV->isConstant());
Constant *Init = GV->getInitializer();
@@ -481,11 +484,11 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// Get the alignment of the global, either explicit or target-specific.
unsigned StartAlignment = GV->getAlignment();
if (StartAlignment == 0)
- StartAlignment = TD.getABITypeAlignment(GV->getType());
+ StartAlignment = DL.getABITypeAlignment(GV->getType());
if (StructType *STy = dyn_cast<StructType>(Ty)) {
NewGlobals.reserve(STy->getNumElements());
- const StructLayout &Layout = *TD.getStructLayout(STy);
+ const StructLayout &Layout = *DL.getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
@@ -502,7 +505,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// propagate info to each field.
uint64_t FieldOffset = Layout.getElementOffset(i);
unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
- if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
+ if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
NGV->setAlignment(NewAlign);
}
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
@@ -513,11 +516,11 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
NumElements = cast<VectorType>(STy)->getNumElements();
if (NumElements > 16 && GV->hasNUsesOrMore(16))
- return 0; // It's not worth it.
+ return nullptr; // It's not worth it.
NewGlobals.reserve(NumElements);
- uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
- unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
+ unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
@@ -540,7 +543,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
}
if (NewGlobals.empty())
- return 0;
+ return nullptr;
DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
@@ -549,7 +552,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// Loop over all of the uses of the global, replacing the constantexpr geps,
// with smaller constantexpr geps or direct references.
while (!GV->use_empty()) {
- User *GEP = GV->use_back();
+ User *GEP = GV->user_back();
assert(((isa<ConstantExpr>(GEP) &&
cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
@@ -602,7 +605,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
if (FirstGlobal == i) ++FirstGlobal;
}
- return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0;
+ return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
}
/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
@@ -610,10 +613,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
/// phi nodes we've seen to avoid reprocessing them.
static bool AllUsesOfValueWillTrapIfNull(const Value *V,
SmallPtrSet<const PHINode*, 8> &PHIs) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
- ++UI) {
- const User *U = *UI;
-
+ for (const User *U : V->users())
if (isa<LoadInst>(U)) {
// Will trap.
} else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
@@ -641,13 +641,13 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
return false;
} else if (isa<ICmpInst>(U) &&
- isa<ConstantPointerNull>(UI->getOperand(1))) {
+ isa<ConstantPointerNull>(U->getOperand(1))) {
// Ignore icmp X, null
} else {
//cerr << "NONTRAPPING USE: " << *U;
return false;
}
- }
+
return true;
}
@@ -655,10 +655,7 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
/// from GV will trap if the loaded value is null. Note that this also permits
/// comparisons of the loaded value against null, as a special case.
static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
- for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI) {
- const User *U = *UI;
-
+ for (const User *U : GV->users())
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
SmallPtrSet<const PHINode*, 8> PHIs;
if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
@@ -670,13 +667,12 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
//cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
return false;
}
- }
return true;
}
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
bool Changed = false;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
+ for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
Instruction *I = cast<Instruction>(*UI++);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
LI->setOperand(0, NewV);
@@ -702,7 +698,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
if (PassedAsArg) {
// Being passed as an argument also. Be careful to not invalidate UI!
- UI = V->use_begin();
+ UI = V->user_begin();
}
}
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
@@ -742,7 +738,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
- DataLayout *TD,
+ const DataLayout *DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
@@ -751,7 +747,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
bool AllNonStoreUsesGone = true;
// Replace all uses of loads with uses of uses of the stored value.
- for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
+ for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
User *GlobalUser = *GUI++;
if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
@@ -791,7 +787,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
Changed |= CleanupPointerRootUsers(GV, TLI);
} else {
Changed = true;
- CleanupConstantGlobalUsers(GV, 0, TD, TLI);
+ CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
}
if (GV->use_empty()) {
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
@@ -805,11 +801,11 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
-static void ConstantPropUsersOf(Value *V,
- DataLayout *TD, TargetLibraryInfo *TLI) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
+static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
+ TargetLibraryInfo *TLI) {
+ for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@@ -829,7 +825,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
- DataLayout *TD,
+ const DataLayout *DL,
TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
@@ -853,9 +849,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// If there are bitcast users of the malloc (which is typical, usually we have
// a malloc + bitcast) then replace them with uses of the new global. Update
// other users to use the global as well.
- BitCastInst *TheBC = 0;
+ BitCastInst *TheBC = nullptr;
while (!CI->use_empty()) {
- Instruction *User = cast<Instruction>(CI->use_back());
+ Instruction *User = cast<Instruction>(CI->user_back());
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
if (BCI->getType() == NewGV->getType()) {
BCI->replaceAllUsesWith(NewGV);
@@ -864,7 +860,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
BCI->setOperand(0, NewGV);
}
} else {
- if (TheBC == 0)
+ if (!TheBC)
TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
User->replaceUsesOfWith(CI, TheBC);
}
@@ -886,7 +882,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// Loop over all uses of GV, processing them in turn.
while (!GV->use_empty()) {
- if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
+ if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
// The global is initialized when the store to it occurs.
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
SI->getOrdering(), SI->getSynchScope(), SI);
@@ -894,15 +890,15 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
continue;
}
- LoadInst *LI = cast<LoadInst>(GV->use_back());
+ LoadInst *LI = cast<LoadInst>(GV->user_back());
while (!LI->use_empty()) {
- Use &LoadUse = LI->use_begin().getUse();
- if (!isa<ICmpInst>(LoadUse.getUser())) {
+ Use &LoadUse = *LI->use_begin();
+ ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
+ if (!ICI) {
LoadUse = RepValue;
continue;
}
- ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
// Replace the cmp X, 0 with a use of the bool value.
// Sink the load to where the compare was, if atomic rules allow us to.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
@@ -936,7 +932,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// If the initialization boolean was used, insert it, otherwise delete it.
if (!InitBoolUsed) {
while (!InitBool->use_empty()) // Delete initializations
- cast<StoreInst>(InitBool->use_back())->eraseFromParent();
+ cast<StoreInst>(InitBool->user_back())->eraseFromParent();
delete InitBool;
} else
GV->getParent()->getGlobalList().insert(GV, InitBool);
@@ -948,9 +944,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// To further other optimizations, loop over all users of NewGV and try to
// constant prop them. This will promote GEP instructions with constant
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
- ConstantPropUsersOf(NewGV, TD, TLI);
+ ConstantPropUsersOf(NewGV, DL, TLI);
if (RepValue != NewGV)
- ConstantPropUsersOf(RepValue, TD, TLI);
+ ConstantPropUsersOf(RepValue, DL, TLI);
return NewGV;
}
@@ -962,9 +958,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
const GlobalVariable *GV,
SmallPtrSet<const PHINode*, 8> &PHIs) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- const Instruction *Inst = cast<Instruction>(*UI);
+ for (const User *U : V->users()) {
+ const Instruction *Inst = cast<Instruction>(U);
if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
continue; // Fine, ignore.
@@ -1011,7 +1006,7 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
GlobalVariable *GV) {
while (!Alloc->use_empty()) {
- Instruction *U = cast<Instruction>(*Alloc->use_begin());
+ Instruction *U = cast<Instruction>(*Alloc->user_begin());
Instruction *InsertPt = U;
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
// If this is the store of the allocation into the global, remove it.
@@ -1022,7 +1017,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
} else if (PHINode *PN = dyn_cast<PHINode>(U)) {
// Insert the load in the corresponding predecessor, not right before the
// PHI.
- InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
+ InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
} else if (isa<BitCastInst>(U)) {
// Must be bitcast between the malloc and store to initialize the global.
ReplaceUsesOfMallocWithGlobal(U, GV);
@@ -1032,7 +1027,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
// If this is a "GEP bitcast" and the user is a store to the global, then
// just process it as a bitcast.
if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
- if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
+ if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
if (SI->getOperand(1) == GV) {
// Must be bitcast GEP between the malloc and store to initialize
// the global.
@@ -1056,19 +1051,18 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
// We permit two users of the load: setcc comparing against the null
// pointer, and a getelementptr of a specific form.
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
- ++UI) {
- const Instruction *User = cast<Instruction>(*UI);
+ for (const User *U : V->users()) {
+ const Instruction *UI = cast<Instruction>(U);
// Comparison against null is ok.
- if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
+ if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return false;
continue;
}
// getelementptr is also ok, but only a simple form.
- if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
+ if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
// Must index into the array and into the struct.
if (GEPI->getNumOperands() < 3)
return false;
@@ -1077,7 +1071,7 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
continue;
}
- if (const PHINode *PN = dyn_cast<PHINode>(User)) {
+ if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
if (!LoadUsingPHIsPerLoad.insert(PN))
// This means some phi nodes are dependent on each other.
// Avoid infinite looping!
@@ -1108,9 +1102,8 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
Instruction *StoredVal) {
SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
- for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI)
- if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (const User *U : GV->users())
+ if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
LoadUsingPHIsPerLoad))
return false;
@@ -1178,10 +1171,13 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
// PN's type is pointer to struct. Make a new PHI of pointer to struct
// field.
- StructType *ST = cast<StructType>(PN->getType()->getPointerElementType());
+ PointerType *PTy = cast<PointerType>(PN->getType());
+ StructType *ST = cast<StructType>(PTy->getElementType());
+
+ unsigned AS = PTy->getAddressSpace();
PHINode *NewPN =
- PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)),
+ PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
PN->getNumIncomingValues(),
PN->getName()+".f"+Twine(FieldNo), PN);
Result = NewPN;
@@ -1249,7 +1245,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
// If this is the first time we've seen this PHI, recursively process all
// users.
- for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
+ for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
}
@@ -1262,8 +1258,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
- for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
- UI != E; ) {
+ for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
}
@@ -1277,7 +1272,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value *NElems, DataLayout *TD,
+ Value *NElems, const DataLayout *DL,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
@@ -1294,9 +1289,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
std::vector<Value*> FieldGlobals;
std::vector<Value*> FieldMallocs;
+ unsigned AS = GV->getType()->getPointerAddressSpace();
for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
Type *FieldTy = STy->getElementType(FieldNo);
- PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
+ PointerType *PFieldTy = PointerType::get(FieldTy, AS);
GlobalVariable *NGV =
new GlobalVariable(*GV->getParent(),
@@ -1306,13 +1302,13 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
GV->getThreadLocalMode());
FieldGlobals.push_back(NGV);
- unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
+ unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
if (StructType *ST = dyn_cast<StructType>(FieldTy))
- TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());
+ TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
- NElems, 0,
+ NElems, nullptr,
CI->getName() + ".f" + Twine(FieldNo));
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, CI);
@@ -1394,7 +1390,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// Okay, the malloc site is completely handled. All of the uses of GV are now
// loads, and all uses of those loads are simple. Rewrite them to use loads
// of the per-field globals instead.
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
+ for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
@@ -1469,9 +1465,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- DataLayout *TD,
+ const DataLayout *DL,
TargetLibraryInfo *TLI) {
- if (!TD)
+ if (!DL)
return false;
// If this is a malloc of an abstract type, don't touch it.
@@ -1501,7 +1497,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, TD, TLI, true);
+ Value *NElems = getMallocArraySize(CI, DL, TLI, true);
if (!NElems)
return false;
@@ -1509,8 +1505,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// Restrict this transformation to only working on small allocations
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
- if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);
+ if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
return true;
}
@@ -1539,13 +1535,13 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());
- unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());
+ unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
AllocSize, NumElements,
- 0, CI->getName());
+ nullptr, CI->getName());
Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
CI->replaceAllUsesWith(Cast);
CI->eraseFromParent();
@@ -1555,8 +1551,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),
- TD, TLI);
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
+ DL, TLI);
return true;
}
@@ -1568,7 +1564,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ const DataLayout *DL,
+ TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1583,13 +1580,13 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
- if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
+ if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
Type *MallocType = getMallocAllocatedType(CI, TLI);
if (MallocType &&
TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
- TD, TLI))
+ DL, TLI))
return true;
}
}
@@ -1616,11 +1613,9 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// Walk the use list of the global seeing if all the uses are load or store.
// If there is anything else, bail out.
- for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
- User *U = *I;
+ for (User *U : GV->users())
if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
return false;
- }
DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
@@ -1645,7 +1640,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
IsOneZero = InitVal->isNullValue() && CI->isOne();
while (!GV->use_empty()) {
- Instruction *UI = cast<Instruction>(GV->use_back());
+ Instruction *UI = cast<Instruction>(GV->user_back());
if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
// Change the store into a boolean store.
bool StoringOther = SI->getOperand(0) == OtherVal;
@@ -1705,9 +1700,6 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
/// possible. If we make a change, return true.
bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
Module::global_iterator &GVI) {
- if (!GV->isDiscardableIfUnused())
- return false;
-
// Do more involved optimizations if the global is internal.
GV->removeDeadConstantUsers();
@@ -1746,7 +1738,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// and this function is main (which we know is not recursive), we replace
// the global with a local alloca in this function.
//
- // NOTE: It doesn't make sense to promote non single-value types since we
+ // NOTE: It doesn't make sense to promote non-single-value types since we
// are just replacing static memory to stack memory.
//
// If the global is in different address space, don't bring it to stack.
@@ -1761,7 +1753,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
->getEntryBlock().begin());
Type *ElemTy = GV->getType()->getElementType();
// FIXME: Pass Global's alignment when globals have alignment
- AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
+ AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr,
+ GV->getName(), &FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
new StoreInst(GV->getInitializer(), Alloca, &FirstI);
@@ -1783,7 +1776,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
} else {
// Delete any stores we can find to the global. We may not be able to
// make it completely dead though.
- Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
}
// If the global is dead now, delete it.
@@ -1799,7 +1792,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
@@ -1812,11 +1805,13 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
++NumMarked;
return true;
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
+ if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) {
+ const DataLayout &DL = DLP->getDataLayout();
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
GVI = FirstNewGV; // Don't skip the newly produced globals!
return true;
}
+ }
} else if (GS.StoredType == GlobalStatus::StoredOnce) {
// If the initial value for the global was an undef value, and if only
// one other value was stored into it, we can just change the
@@ -1828,7 +1823,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setInitializer(SOVConstant);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
if (GV->use_empty()) {
DEBUG(dbgs() << " *** Substituting initializer allowed us to "
@@ -1845,7 +1840,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Try to optimize globals based on the knowledge that only one value
// (besides its initializer) is ever stored to the global.
if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
- TD, TLI))
+ DL, TLI))
return true;
// Otherwise, if the global was not a boolean, we can shrink it to be a
@@ -1866,11 +1861,11 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
/// function, changing them to FastCC.
static void ChangeCalleesToFastCall(Function *F) {
- for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
- if (isa<BlockAddress>(*UI))
+ for (User *U : F->users()) {
+ if (isa<BlockAddress>(U))
continue;
- CallSite User(cast<Instruction>(*UI));
- User.setCallingConv(CallingConv::Fast);
+ CallSite CS(cast<Instruction>(U));
+ CS.setCallingConv(CallingConv::Fast);
}
}
@@ -1889,21 +1884,31 @@ static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
static void RemoveNestAttribute(Function *F) {
F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
- for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
- if (isa<BlockAddress>(*UI))
+ for (User *U : F->users()) {
+ if (isa<BlockAddress>(U))
continue;
- CallSite User(cast<Instruction>(*UI));
- User.setAttributes(StripNest(F->getContext(), User.getAttributes()));
+ CallSite CS(cast<Instruction>(U));
+ CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
}
}
+/// Return true if this is a calling convention that we'd like to change. The
+/// idea here is that we don't want to mess with the convention if the user
+/// explicitly requested something with performance implications like coldcc,
+/// GHC, or anyregcc.
+static bool isProfitableToMakeFastCC(Function *F) {
+ CallingConv::ID CC = F->getCallingConv();
+ // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
+ return CC == CallingConv::C || CC == CallingConv::X86_ThisCall;
+}
+
bool GlobalOpt::OptimizeFunctions(Module &M) {
bool Changed = false;
// Optimize functions.
for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
Function *F = FI++;
// Functions without names cannot be referenced outside this module.
- if (!F->hasName() && !F->isDeclaration())
+ if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
F->setLinkage(GlobalValue::InternalLinkage);
F->removeDeadConstantUsers();
if (F->isDefTriviallyDead()) {
@@ -1911,11 +1916,11 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
Changed = true;
++NumFnDeleted;
} else if (F->hasLocalLinkage()) {
- if (F->getCallingConv() == CallingConv::C && !F->isVarArg() &&
+ if (isProfitableToMakeFastCC(F) && !F->isVarArg() &&
!F->hasAddressTaken()) {
- // If this function has C calling conventions, is not a varargs
- // function, and is only called directly, promote it to use the Fast
- // calling convention.
+ // If this function has a calling convention worth changing, is not a
+ // varargs function, and is only called directly, promote it to use the
+ // Fast calling convention.
F->setCallingConv(CallingConv::Fast);
ChangeCalleesToFastCall(F);
++NumFastCallFns;
@@ -1937,139 +1942,41 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
bool GlobalOpt::OptimizeGlobalVars(Module &M) {
bool Changed = false;
+
+ SmallSet<const Comdat *, 8> NotDiscardableComdats;
+ for (const GlobalVariable &GV : M.globals())
+ if (const Comdat *C = GV.getComdat())
+ if (!GV.isDiscardableIfUnused())
+ NotDiscardableComdats.insert(C);
+
for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
GVI != E; ) {
GlobalVariable *GV = GVI++;
// Global variables without names cannot be referenced outside this module.
- if (!GV->hasName() && !GV->isDeclaration())
+ if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
GV->setLinkage(GlobalValue::InternalLinkage);
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
+ Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
- Changed |= ProcessGlobal(GV, GVI);
- }
- return Changed;
-}
-
-/// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all
-/// initializers have an init priority of 65535.
-GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
- GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
- if (GV == 0) return 0;
-
- // Verify that the initializer is simple enough for us to handle. We are
- // only allowed to optimize the initializer if it is unique.
- if (!GV->hasUniqueInitializer()) return 0;
-
- if (isa<ConstantAggregateZero>(GV->getInitializer()))
- return GV;
- ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
-
- for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
- if (isa<ConstantAggregateZero>(*i))
- continue;
- ConstantStruct *CS = cast<ConstantStruct>(*i);
- if (isa<ConstantPointerNull>(CS->getOperand(1)))
- continue;
-
- // Must have a function or null ptr.
- if (!isa<Function>(CS->getOperand(1)))
- return 0;
-
- // Init priority must be standard.
- ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0));
- if (CI->getZExtValue() != 65535)
- return 0;
- }
-
- return GV;
-}
-
-/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
-/// return a list of the functions and null terminator as a vector.
-static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
- if (GV->getInitializer()->isNullValue())
- return std::vector<Function*>();
- ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
- std::vector<Function*> Result;
- Result.reserve(CA->getNumOperands());
- for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
- ConstantStruct *CS = cast<ConstantStruct>(*i);
- Result.push_back(dyn_cast<Function>(CS->getOperand(1)));
- }
- return Result;
-}
-
-/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
-/// specified array, returning the new global to use.
-static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
- const std::vector<Function*> &Ctors) {
- // If we made a change, reassemble the initializer list.
- Constant *CSVals[2];
- CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535);
- CSVals[1] = 0;
-
- StructType *StructTy =
- cast<StructType>(GCL->getType()->getElementType()->getArrayElementType());
-
- // Create the new init list.
- std::vector<Constant*> CAList;
- for (unsigned i = 0, e = Ctors.size(); i != e; ++i) {
- if (Ctors[i]) {
- CSVals[1] = Ctors[i];
- } else {
- Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
- false);
- PointerType *PFTy = PointerType::getUnqual(FTy);
- CSVals[1] = Constant::getNullValue(PFTy);
- CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()),
- 0x7fffffff);
+ if (GV->isDiscardableIfUnused()) {
+ if (const Comdat *C = GV->getComdat())
+ if (NotDiscardableComdats.count(C))
+ continue;
+ Changed |= ProcessGlobal(GV, GVI);
}
- CAList.push_back(ConstantStruct::get(StructTy, CSVals));
- }
-
- // Create the array initializer.
- Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
- CAList.size()), CAList);
-
- // If we didn't change the number of elements, don't create a new GV.
- if (CA->getType() == GCL->getInitializer()->getType()) {
- GCL->setInitializer(CA);
- return GCL;
- }
-
- // Create the new global and insert it next to the existing list.
- GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
- GCL->getLinkage(), CA, "",
- GCL->getThreadLocalMode());
- GCL->getParent()->getGlobalList().insert(GCL, NGV);
- NGV->takeName(GCL);
-
- // Nuke the old list, replacing any uses with the new one.
- if (!GCL->use_empty()) {
- Constant *V = NGV;
- if (V->getType() != GCL->getType())
- V = ConstantExpr::getBitCast(V, GCL->getType());
- GCL->replaceAllUsesWith(V);
}
- GCL->eraseFromParent();
-
- if (Ctors.size())
- return NGV;
- else
- return 0;
+ return Changed;
}
-
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD);
+ const DataLayout *DL);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2082,11 +1989,14 @@ isSimpleEnoughValueToCommit(Constant *C,
/// time.
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD) {
- // Simple integer, undef, constant aggregate zero, global addresses, etc are
- // all supported.
- if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
- isa<GlobalValue>(C))
+ const DataLayout *DL) {
+ // Simple global addresses are supported, do not allow dllimport or
+ // thread-local globals.
+ if (auto *GV = dyn_cast<GlobalValue>(C))
+ return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
+
+ // Simple integer, undef, constant aggregate zero, etc are all supported.
+ if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
return true;
// Aggregate values are safe if all their elements are.
@@ -2094,7 +2004,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
isa<ConstantVector>(C)) {
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
Constant *Op = cast<Constant>(C->getOperand(i));
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))
return false;
}
return true;
@@ -2107,29 +2017,29 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
switch (CE->getOpcode()) {
case Instruction::BitCast:
// Bitcast is fine if the casted value is fine.
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
case Instruction::IntToPtr:
case Instruction::PtrToInt:
// int <=> ptr is fine if the int type is the same size as the
// pointer type.
- if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
- TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
+ DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
// GEP is fine if it is simple + constant offset.
case Instruction::GetElementPtr:
for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
if (!isa<ConstantInt>(CE->getOperand(i)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
case Instruction::Add:
// We allow simple+cst.
if (!isa<ConstantInt>(CE->getOperand(1)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
}
return false;
}
@@ -2137,11 +2047,11 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C)) return true;
// Check the constant.
- return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
}
@@ -2157,8 +2067,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
- // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
- // external globals.
+ // Do not allow weak/*_odr/linkonce linkage or external globals.
return GV->hasUniqueInitializer();
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
@@ -2173,7 +2082,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
// The first index must be zero.
- ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin()));
+ ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin()));
if (!CI || !CI->isZero()) return false;
// The remaining indices must be compile-time known integers within the
@@ -2268,24 +2177,18 @@ namespace {
/// Once an evaluation call fails, the evaluation object should not be reused.
class Evaluator {
public:
- Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
- : TD(TD), TLI(TLI) {
- ValueStack.push_back(new DenseMap<Value*, Constant*>);
+ Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
+ : DL(DL), TLI(TLI) {
+ ValueStack.emplace_back();
}
~Evaluator() {
- DeleteContainerPointers(ValueStack);
- while (!AllocaTmps.empty()) {
- GlobalVariable *Tmp = AllocaTmps.back();
- AllocaTmps.pop_back();
-
+ for (auto &Tmp : AllocaTmps)
// If there are still users of the alloca, the program is doing something
// silly, e.g. storing the address of the alloca somewhere and using it
// later. Since this is undefined, we'll just make it be null.
if (!Tmp->use_empty())
Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
- delete Tmp;
- }
}
/// EvaluateFunction - Evaluate a call to function F, returning true if
@@ -2301,13 +2204,13 @@ public:
Constant *getVal(Value *V) {
if (Constant *CV = dyn_cast<Constant>(V)) return CV;
- Constant *R = ValueStack.back()->lookup(V);
+ Constant *R = ValueStack.back().lookup(V);
assert(R && "Reference to an uncomputed value!");
return R;
}
void setVal(Value *V, Constant *C) {
- ValueStack.back()->operator[](V) = C;
+ ValueStack.back()[V] = C;
}
const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
@@ -2322,9 +2225,9 @@ private:
Constant *ComputeLoadResult(Constant *P);
/// ValueStack - As we compute SSA register values, we store their contents
- /// here. The back of the vector contains the current function and the stack
+ /// here. The back of the deque contains the current function and the stack
/// contains the values in the calling frames.
- SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack;
+ std::deque<DenseMap<Value*, Constant*>> ValueStack;
/// CallStack - This is used to detect recursion. In pathological situations
/// we could hit exponential behavior, but at least there is nothing
@@ -2339,7 +2242,7 @@ private:
/// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
/// to represent its body. This vector is needed so we can delete the
/// temporary globals when we are done.
- SmallVector<GlobalVariable*, 32> AllocaTmps;
+ SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;
/// Invariants - These global variables have been marked invariant by the
/// static constructor.
@@ -2349,7 +2252,7 @@ private:
/// simple enough to live in a static initializer of a global.
SmallPtrSet<Constant*, 8> SimpleConstants;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
};
@@ -2368,7 +2271,7 @@ Constant *Evaluator::ComputeLoadResult(Constant *P) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
if (GV->hasDefinitiveInitializer())
return GV->getInitializer();
- return 0;
+ return nullptr;
}
// Handle a constantexpr getelementptr.
@@ -2380,7 +2283,7 @@ Constant *Evaluator::ComputeLoadResult(Constant *P) {
return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
}
- return 0; // don't know how to evaluate.
+ return nullptr; // don't know how to evaluate.
}
/// EvaluateBlock - Evaluate all instructions in block BB, returning true if
@@ -2390,7 +2293,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
BasicBlock *&NextBB) {
// This is the main evaluation loop.
while (1) {
- Constant *InstResult = 0;
+ Constant *InstResult = nullptr;
DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
@@ -2402,7 +2305,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(SI->getOperand(1));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
@@ -2415,7 +2318,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
- if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) {
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
<< "\n");
return false;
@@ -2447,7 +2350,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
@@ -2511,12 +2414,12 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(LI->getOperand(0));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "Found a constant pointer expression, constant "
"folding: " << *Ptr << "\n");
}
InstResult = ComputeLoadResult(Ptr);
- if (InstResult == 0) {
+ if (!InstResult) {
DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load."
"\n");
return false; // Could not evaluate load.
@@ -2529,11 +2432,10 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
return false; // Cannot handle array allocs.
}
Type *Ty = AI->getType()->getElementType();
- AllocaTmps.push_back(new GlobalVariable(Ty, false,
- GlobalValue::InternalLinkage,
- UndefValue::get(Ty),
- AI->getName()));
- InstResult = AllocaTmps.back();
+ AllocaTmps.push_back(
+ make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage,
+ UndefValue::get(Ty), AI->getName()));
+ InstResult = AllocaTmps.back().get();
DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
} else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
CallSite CS(CurInst);
@@ -2580,7 +2482,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// We don't insert an entry into Values, as it doesn't have a
// meaningful return value.
if (!II->use_empty()) {
- DEBUG(dbgs() << "Found unused invariant_start. Cant evaluate.\n");
+ DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
return false;
}
ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
@@ -2588,9 +2490,9 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Value *Ptr = PtrArg->stripPointerCasts();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
- if (TD && !Size->isAllOnesValue() &&
+ if (DL && !Size->isAllOnesValue() &&
Size->getValue().getLimitedValue() >=
- TD->getTypeStoreSize(ElemTy)) {
+ DL->getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
<< "\n");
@@ -2635,17 +2537,17 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
return false;
}
- Constant *RetVal = 0;
+ Constant *RetVal = nullptr;
// Execute the call, if successful, use the return value.
- ValueStack.push_back(new DenseMap<Value*, Constant*>);
+ ValueStack.emplace_back();
if (!EvaluateFunction(Callee, RetVal, Formals)) {
DEBUG(dbgs() << "Failed to evaluate function.\n");
return false;
}
- delete ValueStack.pop_back_val();
+ ValueStack.pop_back();
InstResult = RetVal;
- if (InstResult != NULL) {
+ if (InstResult) {
DEBUG(dbgs() << "Successfully evaluated function. Result: " <<
InstResult << "\n\n");
} else {
@@ -2677,7 +2579,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
else
return false; // Cannot determine.
} else if (isa<ReturnInst>(CurInst)) {
- NextBB = 0;
+ NextBB = nullptr;
} else {
// invoke, unwind, resume, unreachable.
DEBUG(dbgs() << "Can not handle terminator.");
@@ -2696,7 +2598,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
+ InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
setVal(CurInst, InstResult);
}
@@ -2742,13 +2644,13 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
BasicBlock::iterator CurInst = CurBB->begin();
while (1) {
- BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings.
+ BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
if (!EvaluateBlock(CurInst, NextBB))
return false;
- if (NextBB == 0) {
+ if (!NextBB) {
// Successfully running until there's no next block means that we found
// the return. Fill it the return value and pop the call stack.
ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
@@ -2767,7 +2669,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
// Okay, we have never been in this block before. Check to see if there
// are any PHI nodes. If so, evaluate them with information about where
// we came from.
- PHINode *PN = 0;
+ PHINode *PN = nullptr;
for (CurInst = NextBB->begin();
(PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
@@ -2779,15 +2681,17 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
const TargetLibraryInfo *TLI) {
// Call the function.
- Evaluator Eval(TD, TLI);
+ Evaluator Eval(DL, TLI);
Constant *RetValDummy;
bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
SmallVector<Constant*, 0>());
if (EvalSuccess) {
+ ++NumCtorsEvaluated;
+
// We succeeded at evaluation: commit the result.
DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
<< F->getName() << "' to " << Eval.getMutatedMemory().size()
@@ -2805,46 +2709,6 @@ static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
return EvalSuccess;
}
-/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
-/// Return true if anything changed.
-bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
- std::vector<Function*> Ctors = ParseGlobalCtors(GCL);
- bool MadeChange = false;
- if (Ctors.empty()) return false;
-
- // Loop over global ctors, optimizing them when we can.
- for (unsigned i = 0; i != Ctors.size(); ++i) {
- Function *F = Ctors[i];
- // Found a null terminator in the middle of the list, prune off the rest of
- // the list.
- if (F == 0) {
- if (i != Ctors.size()-1) {
- Ctors.resize(i+1);
- MadeChange = true;
- }
- break;
- }
- DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n");
-
- // We cannot simplify external ctor functions.
- if (F->empty()) continue;
-
- // If we can evaluate the ctor at compile time, do.
- if (EvaluateStaticConstructor(F, TD, TLI)) {
- Ctors.erase(Ctors.begin()+i);
- MadeChange = true;
- --i;
- ++NumCtorsEvaluated;
- continue;
- }
- }
-
- if (!MadeChange) return false;
-
- GCL = InstallGlobalCtors(GCL, Ctors);
- return true;
-}
-
static int compareNames(Constant *const *A, Constant *const *B) {
return (*A)->getName().compare((*B)->getName());
}
@@ -2856,12 +2720,14 @@ static void setUsedInitializer(GlobalVariable &V,
return;
}
- SmallVector<llvm::Constant *, 8> UsedArray;
- PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext());
+ // Type of pointer to the array of pointers.
+ PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
+ SmallVector<llvm::Constant *, 8> UsedArray;
for (SmallPtrSet<GlobalValue *, 8>::iterator I = Init.begin(), E = Init.end();
I != E; ++I) {
- Constant *Cast = llvm::ConstantExpr::getBitCast(*I, Int8PtrTy);
+ Constant *Cast
+ = ConstantExpr::getPointerBitCastOrAddrSpaceCast(*I, Int8PtrTy);
UsedArray.push_back(Cast);
}
// Sort to get deterministic order.
@@ -2992,14 +2858,19 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
I != E;) {
Module::alias_iterator J = I++;
// Aliases without names cannot be referenced outside this module.
- if (!J->hasName() && !J->isDeclaration())
+ if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
J->setLinkage(GlobalValue::InternalLinkage);
// If the aliasee may change at link time, nothing can be done - bail out.
if (J->mayBeOverridden())
continue;
Constant *Aliasee = J->getAliasee();
- GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
+ GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
+ // We can't trivially replace the alias with the aliasee if the aliasee is
+ // non-trivial in some way.
+ // TODO: Try to handle non-zero GEPs of local aliasees.
+ if (!Target)
+ continue;
Target->removeDeadConstantUsers();
// Make all users of the alias use the aliasee instead.
@@ -3007,7 +2878,7 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
if (!hasUsesToReplace(*J, Used, RenameTarget))
continue;
- J->replaceAllUsesWith(Aliasee);
+ J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
++NumAliasesResolved;
Changed = true;
@@ -3015,7 +2886,8 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
// Give the aliasee the name, linkage and other attributes of the alias.
Target->takeName(J);
Target->setLinkage(J->getLinkage());
- Target->GlobalValue::copyAttributesFrom(J);
+ Target->setVisibility(J->getVisibility());
+ Target->setDLLStorageClass(J->getDLLStorageClass());
if (Used.usedErase(J))
Used.usedInsert(Target);
@@ -3038,12 +2910,12 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::cxa_atexit))
- return 0;
+ return nullptr;
Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
if (!Fn)
- return 0;
+ return nullptr;
FunctionType *FTy = Fn->getFunctionType();
@@ -3054,7 +2926,7 @@ static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
!FTy->getParamType(0)->isPointerTy() ||
!FTy->getParamType(1)->isPointerTy() ||
!FTy->getParamType(2)->isPointerTy())
- return 0;
+ return nullptr;
return Fn;
}
@@ -3122,8 +2994,8 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
// and remove them.
bool Changed = false;
- for (Function::use_iterator I = CXAAtExitFn->use_begin(),
- E = CXAAtExitFn->use_end(); I != E;) {
+ for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
+ I != E;) {
// We're only interested in calls. Theoretically, we could handle invoke
// instructions as well, but neither llvm-gcc nor clang generate invokes
// to __cxa_atexit.
@@ -3155,12 +3027,10 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
- // Try to find the llvm.globalctors list.
- GlobalVariable *GlobalCtors = FindGlobalCtors(M);
-
bool LocalChange = true;
while (LocalChange) {
LocalChange = false;
@@ -3169,8 +3039,9 @@ bool GlobalOpt::runOnModule(Module &M) {
LocalChange |= OptimizeFunctions(M);
// Optimize global_ctors list.
- if (GlobalCtors)
- LocalChange |= OptimizeGlobalCtorsList(GlobalCtors);
+ LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
+ return EvaluateStaticConstructor(F, DL, TLI);
+ });
// Optimize non-address-taken globals.
LocalChange |= OptimizeGlobalVars(M);
diff --git a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
index 4ac1dfc..af541d1 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -15,18 +15,19 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ipconstprop"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
using namespace llvm;
+#define DEBUG_TYPE "ipconstprop"
+
STATISTIC(NumArgumentsProped, "Number of args turned into constants");
STATISTIC(NumReturnValProped, "Number of return values turned into constants");
@@ -39,7 +40,7 @@ namespace {
initializeIPCPPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
bool PropagateConstantsIntoArguments(Function &F);
bool PropagateConstantReturn(Function &F);
@@ -86,18 +87,18 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
ArgumentConstants.resize(F.arg_size());
unsigned NumNonconstant = 0;
- for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
- User *U = *UI;
+ for (Use &U : F.uses()) {
+ User *UR = U.getUser();
// Ignore blockaddress uses.
- if (isa<BlockAddress>(U)) continue;
+ if (isa<BlockAddress>(UR)) continue;
// Used by a non-instruction, or not the callee of a function, do not
// transform.
- if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
+ if (!isa<CallInst>(UR) && !isa<InvokeInst>(UR))
return false;
- CallSite CS(cast<Instruction>(U));
- if (!CS.isCallee(UI))
+ CallSite CS(cast<Instruction>(UR));
+ if (!CS.isCallee(&U))
return false;
// Check out all of the potentially constant arguments. Note that we don't
@@ -112,7 +113,7 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
continue;
Constant *C = dyn_cast<Constant>(*AI);
- if (C && ArgumentConstants[i].first == 0) {
+ if (C && ArgumentConstants[i].first == nullptr) {
ArgumentConstants[i].first = C; // First constant seen.
} else if (C && ArgumentConstants[i].first == C) {
// Still the constant value we think it is.
@@ -135,11 +136,11 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
for (unsigned i = 0, e = ArgumentConstants.size(); i != e; ++i, ++AI) {
// Do we have a constant argument?
if (ArgumentConstants[i].second || AI->use_empty() ||
- (AI->hasByValAttr() && !F.onlyReadsMemory()))
+ AI->hasInAllocaAttr() || (AI->hasByValAttr() && !F.onlyReadsMemory()))
continue;
Value *V = ArgumentConstants[i].first;
- if (V == 0) V = UndefValue::get(AI->getType());
+ if (!V) V = UndefValue::get(AI->getType());
AI->replaceAllUsesWith(V);
++NumArgumentsProped;
MadeChange = true;
@@ -209,8 +210,8 @@ bool IPCP::PropagateConstantReturn(Function &F) {
}
// Different or no known return value? Don't propagate this return
// value.
- RetVals[i] = 0;
- // All values non constant? Stop looking.
+ RetVals[i] = nullptr;
+ // All values non-constant? Stop looking.
if (++NumNonConstant == RetVals.size())
return false;
}
@@ -220,13 +221,13 @@ bool IPCP::PropagateConstantReturn(Function &F) {
// over all users, replacing any uses of the return value with the returned
// constant.
bool MadeChange = false;
- for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
- CallSite CS(*UI);
+ for (Use &U : F.uses()) {
+ CallSite CS(U.getUser());
Instruction* Call = CS.getInstruction();
// Not a call instruction or a call instruction that's not calling F
// directly?
- if (!Call || !CS.isCallee(UI))
+ if (!Call || !CS.isCallee(&U))
continue;
// Call result not used?
@@ -235,7 +236,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
MadeChange = true;
- if (STy == 0) {
+ if (!STy) {
Value* New = RetVals[0];
if (Argument *A = dyn_cast<Argument>(New))
// Was an argument returned? Then find the corresponding argument in
@@ -244,9 +245,8 @@ bool IPCP::PropagateConstantReturn(Function &F) {
Call->replaceAllUsesWith(New);
continue;
}
-
- for (Value::use_iterator I = Call->use_begin(), E = Call->use_end();
- I != E;) {
+
+ for (auto I = Call->user_begin(), E = Call->user_end(); I != E;) {
Instruction *Ins = cast<Instruction>(*I);
// Increment now, so we can remove the use
diff --git a/contrib/llvm/lib/Transforms/IPO/IPO.cpp b/contrib/llvm/lib/Transforms/IPO/IPO.cpp
index 5d563d8..b4d31d8 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPO.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPO.cpp
@@ -44,6 +44,7 @@ void llvm::initializeIPO(PassRegistry &Registry) {
initializeStripDebugDeclarePass(Registry);
initializeStripDeadDebugInfoPass(Registry);
initializeStripNonDebugSymbolsPass(Registry);
+ initializeBarrierNoopPass(Registry);
}
void LLVMInitializeIPO(LLVMPassRegistryRef R) {
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
index 437597e..624cb90 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -12,22 +12,23 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "inline"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
using namespace llvm;
+#define DEBUG_TYPE "inline"
+
namespace {
/// \brief Inliner pass which only handles "always inline" functions.
@@ -36,24 +37,25 @@ class AlwaysInliner : public Inliner {
public:
// Use extremely low threshold.
- AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/ true), ICA(0) {
+ AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/ true),
+ ICA(nullptr) {
initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
AlwaysInliner(bool InsertLifetime)
- : Inliner(ID, -2000000000, InsertLifetime), ICA(0) {
+ : Inliner(ID, -2000000000, InsertLifetime), ICA(nullptr) {
initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
static char ID; // Pass identification, replacement for typeid
- virtual InlineCost getInlineCost(CallSite CS);
+ InlineCost getInlineCost(CallSite CS) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnSCC(CallGraphSCC &SCC);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnSCC(CallGraphSCC &SCC) override;
using llvm::Pass::doFinalization;
- virtual bool doFinalization(CallGraph &CG) {
+ bool doFinalization(CallGraph &CG) override {
return removeDeadFunctions(CG, /*AlwaysInlineOnly=*/ true);
}
};
@@ -63,7 +65,7 @@ public:
char AlwaysInliner::ID = 0;
INITIALIZE_PASS_BEGIN(AlwaysInliner, "always-inline",
"Inliner for always_inline functions", false, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_DEPENDENCY(InlineCostAnalysis)
INITIALIZE_PASS_END(AlwaysInliner, "always-inline",
"Inliner for always_inline functions", false, false)
@@ -93,8 +95,7 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
// that are viable for inlining. FIXME: We shouldn't even get here for
// declarations.
if (Callee && !Callee->isDeclaration() &&
- Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::AlwaysInline) &&
+ CS.hasFnAttr(Attribute::AlwaysInline) &&
ICA->isInlineViable(*Callee))
return InlineCost::getAlways();
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
index 57379a3..d189756 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
@@ -11,21 +11,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "inline"
#include "llvm/Transforms/IPO.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
using namespace llvm;
+#define DEBUG_TYPE "inline"
+
namespace {
/// \brief Actual inliner pass implementation.
@@ -37,31 +38,42 @@ class SimpleInliner : public Inliner {
InlineCostAnalysis *ICA;
public:
- SimpleInliner() : Inliner(ID), ICA(0) {
+ SimpleInliner() : Inliner(ID), ICA(nullptr) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
SimpleInliner(int Threshold)
- : Inliner(ID, Threshold, /*InsertLifetime*/ true), ICA(0) {
+ : Inliner(ID, Threshold, /*InsertLifetime*/ true), ICA(nullptr) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
static char ID; // Pass identification, replacement for typeid
- InlineCost getInlineCost(CallSite CS) {
+ InlineCost getInlineCost(CallSite CS) override {
return ICA->getInlineCost(CS, getInlineThreshold(CS));
}
- virtual bool runOnSCC(CallGraphSCC &SCC);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnSCC(CallGraphSCC &SCC) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
};
+static int computeThresholdFromOptLevels(unsigned OptLevel,
+ unsigned SizeOptLevel) {
+ if (OptLevel > 2)
+ return 275;
+ if (SizeOptLevel == 1) // -Os
+ return 75;
+ if (SizeOptLevel == 2) // -Oz
+ return 25;
+ return 225;
+}
+
} // end anonymous namespace
char SimpleInliner::ID = 0;
INITIALIZE_PASS_BEGIN(SimpleInliner, "inline",
"Function Integration/Inlining", false, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_DEPENDENCY(InlineCostAnalysis)
INITIALIZE_PASS_END(SimpleInliner, "inline",
"Function Integration/Inlining", false, false)
@@ -72,6 +84,12 @@ Pass *llvm::createFunctionInliningPass(int Threshold) {
return new SimpleInliner(Threshold);
}
+Pass *llvm::createFunctionInliningPass(unsigned OptLevel,
+ unsigned SizeOptLevel) {
+ return new SimpleInliner(
+ computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
+}
+
bool SimpleInliner::runOnSCC(CallGraphSCC &SCC) {
ICA = &getAnalysis<InlineCostAnalysis>();
return Inliner::runOnSCC(SCC);
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index d75d6ca..9087ab2 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -13,17 +13,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "inline"
#include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -32,6 +32,8 @@
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "inline"
+
STATISTIC(NumInlined, "Number of functions inlined");
STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
@@ -50,6 +52,13 @@ static cl::opt<int>
HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
cl::desc("Threshold for inlining functions with inline hint"));
+// We instroduce this threshold to help performance of instrumentation based
+// PGO before we actually hook up inliner with analysis passes such as BPI and
+// BFI.
+static cl::opt<int>
+ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225),
+ cl::desc("Threshold for inlining functions with cold attribute"));
+
// Threshold to use when optsize is specified (and there is no -inline-limit).
const int OptSizeThreshold = 75;
@@ -117,7 +126,7 @@ static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
int InlineHistory, bool InsertLifetime,
- const DataLayout *TD) {
+ const DataLayout *DL) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
@@ -176,7 +185,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// canonicalized to be an allocation *of* an array), or allocations whose
// type is not itself an array (because we're afraid of pessimizing SRoA).
ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
- if (ATy == 0 || AI->isArrayAllocation())
+ if (!ATy || AI->isArrayAllocation())
continue;
// Get the list of all available allocas for this array type.
@@ -196,7 +205,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// If we don't have data layout information, and only one alloca is using
// the target default, then we can't safely merge them because we can't
// pick the greater alignment.
- if (!TD && (!Align1 || !Align2) && Align1 != Align2)
+ if (!DL && (!Align1 || !Align2) && Align1 != Align2)
continue;
// The available alloca has to be in the right function, not in some other
@@ -218,8 +227,8 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
if (Align1 != Align2) {
if (!Align1 || !Align2) {
- assert(TD && "DataLayout required to compare default alignments");
- unsigned TypeAlign = TD->getABITypeAlignment(AI->getAllocatedType());
+ assert(DL && "DataLayout required to compare default alignments");
+ unsigned TypeAlign = DL->getABITypeAlignment(AI->getAllocatedType());
Align1 = Align1 ? Align1 : TypeAlign;
Align2 = Align2 ? Align2 : TypeAlign;
@@ -232,7 +241,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
AI->eraseFromParent();
MergedAwayAlloca = true;
++NumMergedAllocas;
- IFI.StaticAllocas[AllocaNo] = 0;
+ IFI.StaticAllocas[AllocaNo] = nullptr;
break;
}
@@ -277,9 +286,28 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
Attribute::MinSize))
thres = HintThreshold;
+ // Listen to the cold attribute when it would decrease the threshold.
+ bool ColdCallee = Callee && !Callee->isDeclaration() &&
+ Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::Cold);
+ // Command line argument for InlineLimit will override the default
+ // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold,
+ // do not use the default cold threshold even if it is smaller.
+ if ((InlineLimit.getNumOccurrences() == 0 ||
+ ColdThreshold.getNumOccurrences() > 0) && ColdCallee &&
+ ColdThreshold < thres)
+ thres = ColdThreshold;
+
return thres;
}
+static void emitAnalysis(CallSite CS, const Twine &Msg) {
+ Function *Caller = CS.getCaller();
+ LLVMContext &Ctx = Caller->getContext();
+ DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
+ emitOptimizationRemarkAnalysis(Ctx, DEBUG_TYPE, *Caller, DLoc, Msg);
+}
+
/// shouldInline - Return true if the inliner should attempt to inline
/// at the given CallSite.
bool Inliner::shouldInline(CallSite CS) {
@@ -288,12 +316,16 @@ bool Inliner::shouldInline(CallSite CS) {
if (IC.isAlways()) {
DEBUG(dbgs() << " Inlining: cost=always"
<< ", Call: " << *CS.getInstruction() << "\n");
+ emitAnalysis(CS, Twine(CS.getCalledFunction()->getName()) +
+ " should always be inlined (cost=always)");
return true;
}
if (IC.isNever()) {
DEBUG(dbgs() << " NOT Inlining: cost=never"
<< ", Call: " << *CS.getInstruction() << "\n");
+ emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
+ " should never be inlined (cost=never)"));
return false;
}
@@ -302,6 +334,10 @@ bool Inliner::shouldInline(CallSite CS) {
DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
<< ", thres=" << (IC.getCostDelta() + IC.getCost())
<< ", Call: " << *CS.getInstruction() << "\n");
+ emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
+ " too costly to inline (cost=") +
+ Twine(IC.getCost()) + ", threshold=" +
+ Twine(IC.getCostDelta() + IC.getCost()) + ")");
return false;
}
@@ -330,9 +366,8 @@ bool Inliner::shouldInline(CallSite CS) {
bool callerWillBeRemoved = Caller->hasLocalLinkage();
// This bool tracks what happens if we DO inline C into B.
bool inliningPreventsSomeOuterInline = false;
- for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
- I != E; ++I) {
- CallSite CS2(*I);
+ for (User *U : Caller->users()) {
+ CallSite CS2(U);
// If this isn't a call to Caller (it could be some other sort
// of reference) skip it. Such references will prevent the caller
@@ -363,13 +398,18 @@ bool Inliner::shouldInline(CallSite CS) {
// one is set very low by getInlineCost, in anticipation that Caller will
// be removed entirely. We did not account for this above unless there
// is only one caller of Caller.
- if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
+ if (callerWillBeRemoved && !Caller->use_empty())
TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
" Cost = " << IC.getCost() <<
", outer Cost = " << TotalSecondaryCost << '\n');
+ emitAnalysis(
+ CS, Twine("Not inlining. Cost of inlining " +
+ CS.getCalledFunction()->getName() +
+ " increases the cost of inlining " +
+ CS.getCaller()->getName() + " in other contexts"));
return false;
}
}
@@ -377,6 +417,10 @@ bool Inliner::shouldInline(CallSite CS) {
DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
<< ", thres=" << (IC.getCostDelta() + IC.getCost())
<< ", Call: " << *CS.getInstruction() << '\n');
+ emitAnalysis(
+ CS, CS.getCalledFunction()->getName() + Twine(" can be inlined into ") +
+ CS.getCaller()->getName() + " with cost=" + Twine(IC.getCost()) +
+ " (threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")");
return true;
}
@@ -395,8 +439,9 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
}
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
- CallGraph &CG = getAnalysis<CallGraph>();
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
SmallPtrSet<Function*, 8> SCCFunctions;
@@ -456,7 +501,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
InlinedArrayAllocasTy InlinedArrayAllocas;
- InlineFunctionInfo InlineInfo(&CG, TD);
+ InlineFunctionInfo InlineInfo(&CG, DL);
// Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so.
@@ -485,7 +530,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
++NumCallsDeleted;
} else {
// We can only inline direct calls to non-declarations.
- if (Callee == 0 || Callee->isDeclaration()) continue;
+ if (!Callee || Callee->isDeclaration()) continue;
// If this call site was obtained by inlining another function, verify
// that the include path for the function did not include the callee
@@ -497,18 +542,37 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
continue;
-
+ LLVMContext &CallerCtx = Caller->getContext();
+
+ // Get DebugLoc to report. CS will be invalid after Inliner.
+ DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
+
// If the policy determines that we should inline this function,
// try to do so.
- if (!shouldInline(CS))
+ if (!shouldInline(CS)) {
+ emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
+ Twine(Callee->getName() +
+ " will not be inlined into " +
+ Caller->getName()));
continue;
+ }
// Attempt to inline the function.
if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
- InlineHistoryID, InsertLifetime, TD))
+ InlineHistoryID, InsertLifetime, DL)) {
+ emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
+ Twine(Callee->getName() +
+ " will not be inlined into " +
+ Caller->getName()));
continue;
+ }
++NumInlined;
-
+
+ // Report the inline decision.
+ emitOptimizationRemark(
+ CallerCtx, DEBUG_TYPE, *Caller, DLoc,
+ Twine(Callee->getName() + " inlined into " + Caller->getName()));
+
// If inlining this function gave us any new call sites, throw them
// onto our worklist to process. They are useful inline candidates.
if (!InlineInfo.InlinedCalls.empty()) {
diff --git a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
index 64e2ced..c970a1a 100644
--- a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -19,7 +19,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "internalize"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -35,6 +34,8 @@
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "internalize"
+
STATISTIC(NumAliases , "Number of aliases internalized");
STATISTIC(NumFunctions, "Number of functions internalized");
STATISTIC(NumGlobals , "Number of global vars internalized");
@@ -59,11 +60,11 @@ namespace {
explicit InternalizePass();
explicit InternalizePass(ArrayRef<const char *> ExportList);
void LoadFile(const char *Filename);
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addPreserved<CallGraph>();
+ AU.addPreserved<CallGraphWrapperPass>();
}
};
} // end anonymous namespace
@@ -72,8 +73,7 @@ char InternalizePass::ID = 0;
INITIALIZE_PASS(InternalizePass, "internalize",
"Internalize Global Symbols", false, false)
-InternalizePass::InternalizePass()
- : ModulePass(ID) {
+InternalizePass::InternalizePass() : ModulePass(ID) {
initializeInternalizePassPass(*PassRegistry::getPassRegistry());
if (!APIFile.empty()) // If a filename is specified, use it.
LoadFile(APIFile.c_str());
@@ -81,7 +81,7 @@ InternalizePass::InternalizePass()
}
InternalizePass::InternalizePass(ArrayRef<const char *> ExportList)
- : ModulePass(ID){
+ : ModulePass(ID) {
initializeInternalizePassPass(*PassRegistry::getPassRegistry());
for(ArrayRef<const char *>::const_iterator itr = ExportList.begin();
itr != ExportList.end(); itr++) {
@@ -115,6 +115,10 @@ static bool shouldInternalize(const GlobalValue &GV,
if (GV.hasAvailableExternallyLinkage())
return false;
+ // Assume that dllexported symbols are referenced elsewhere
+ if (GV.hasDLLExportStorageClass())
+ return false;
+
// Already has internal linkage
if (GV.hasLocalLinkage())
return false;
@@ -127,8 +131,9 @@ static bool shouldInternalize(const GlobalValue &GV,
}
bool InternalizePass::runOnModule(Module &M) {
- CallGraph *CG = getAnalysisIfAvailable<CallGraph>();
- CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
+ CallGraphWrapperPass *CGPass = getAnalysisIfAvailable<CallGraphWrapperPass>();
+ CallGraph *CG = CGPass ? &CGPass->getCallGraph() : nullptr;
+ CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : nullptr;
bool Changed = false;
SmallPtrSet<GlobalValue *, 8> Used;
@@ -150,11 +155,11 @@ bool InternalizePass::runOnModule(Module &M) {
}
// Mark all functions not in the api as internal.
- // FIXME: maybe use private linkage?
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!shouldInternalize(*I, ExternalNames))
continue;
+ I->setVisibility(GlobalValue::DefaultVisibility);
I->setLinkage(GlobalValue::InternalLinkage);
if (ExternalNode)
@@ -186,12 +191,12 @@ bool InternalizePass::runOnModule(Module &M) {
// Mark all global variables with initializers that are not in the api as
// internal as well.
- // FIXME: maybe use private linkage?
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
if (!shouldInternalize(*I, ExternalNames))
continue;
+ I->setVisibility(GlobalValue::DefaultVisibility);
I->setLinkage(GlobalValue::InternalLinkage);
Changed = true;
++NumGlobals;
@@ -204,6 +209,7 @@ bool InternalizePass::runOnModule(Module &M) {
if (!shouldInternalize(*I, ExternalNames))
continue;
+ I->setVisibility(GlobalValue::DefaultVisibility);
I->setLinkage(GlobalValue::InternalLinkage);
Changed = true;
++NumAliases;
@@ -213,9 +219,7 @@ bool InternalizePass::runOnModule(Module &M) {
return Changed;
}
-ModulePass *llvm::createInternalizePass() {
- return new InternalizePass();
-}
+ModulePass *llvm::createInternalizePass() { return new InternalizePass(); }
ModulePass *llvm::createInternalizePass(ArrayRef<const char *> ExportList) {
return new InternalizePass(ExportList);
diff --git a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
index 8282a8e..20414aa 100644
--- a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
@@ -14,11 +14,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-extract"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
@@ -30,6 +29,8 @@
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "loop-extract"
+
STATISTIC(NumExtracted, "Number of loops extracted");
namespace {
@@ -42,12 +43,12 @@ namespace {
initializeLoopExtractorPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(BreakCriticalEdgesID);
AU.addRequiredID(LoopSimplifyID);
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
}
};
}
@@ -57,7 +58,7 @@ INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract",
"Extract loops into new functions", false, false)
INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(LoopExtractor, "loop-extract",
"Extract loops into new functions", false, false)
@@ -79,6 +80,9 @@ INITIALIZE_PASS(SingleLoopExtractor, "loop-extract-single",
Pass *llvm::createLoopExtractorPass() { return new LoopExtractor(); }
bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
// Only visit top-level loops.
if (L->getParentLoop())
return false;
@@ -87,7 +91,7 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!L->isLoopSimplifyForm())
return false;
- DominatorTree &DT = getAnalysis<DominatorTree>();
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
bool Changed = false;
// If there is more than one top-level loop in this function, extract all of
@@ -133,7 +137,7 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
if (NumLoops == 0) return Changed;
--NumLoops;
CodeExtractor Extractor(DT, *L);
- if (Extractor.extractCodeRegion() != 0) {
+ if (Extractor.extractCodeRegion() != nullptr) {
Changed = true;
// After extraction, the loop is replaced by a function call, so
// we shouldn't try to run any more loop passes on it.
@@ -177,7 +181,7 @@ namespace {
LoadFile(BlockFile.c_str());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
}
@@ -238,7 +242,7 @@ void BlockExtractorPass::SplitLandingPadPreds(Function *F) {
if (!Split) continue;
SmallVector<BasicBlock*, 2> NewBBs;
- SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", 0, NewBBs);
+ SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", nullptr, NewBBs);
}
}
diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 3861421..2fb0ddb 100644
--- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -9,13 +9,24 @@
//
// This pass looks for equivalent functions that are mergable and folds them.
//
-// A hash is computed from the function, based on its type and number of
-// basic blocks.
+// Order relation is defined on set of functions. It was made through
+// special function comparison procedure that returns
+// 0 when functions are equal,
+// -1 when Left function is less than right function, and
+// 1 for opposite case. We need total-ordering, so we need to maintain
+// four properties on the functions set:
+// a <= a (reflexivity)
+// if a <= b and b <= a then a = b (antisymmetry)
+// if a <= b and b <= c then a <= c (transitivity).
+// for all a and b: a <= b or b <= a (totality).
//
-// Once all hashes are computed, we perform an expensive equality comparison
-// on each function pair. This takes n^2/2 comparisons per bucket, so it's
-// important that the hash function be high quality. The equality comparison
-// iterates through each instruction in each basic block.
+// Comparison iterates through each instruction in each basic block.
+// Functions are kept on binary tree. For each new function F we perform
+// lookup in binary tree.
+// In practice it works the following way:
+// -- We define Function* container class with custom "operator<" (FunctionPtr).
+// -- "FunctionPtr" instances are stored in std::set collection, so every
+// std::set::insert operation will give you result in log(N) time.
//
// When a match is found the functions are folded. If both functions are
// overridable, we move the functionality into a new internal function and
@@ -31,9 +42,6 @@
// the object they belong to. However, as long as it's only used for a lookup
// and call, this is irrelevant, and we'd like to fold such functions.
//
-// * switch from n^2 pair-wise comparisons to an n-way comparison for each
-// bucket.
-//
// * be smarter about bitcasts.
//
// In order to fold functions, we will sometimes add either bitcast instructions
@@ -41,15 +49,45 @@
// analysis since the two functions differ where one has a bitcast and the
// other doesn't. We should learn to look through bitcasts.
//
+// * Compare complex types with pointer types inside.
+// * Compare cross-reference cases.
+// * Compare complex expressions.
+//
+// All the three issues above could be described as ability to prove that
+// fA == fB == fC == fE == fF == fG in example below:
+//
+// void fA() {
+// fB();
+// }
+// void fB() {
+// fA();
+// }
+//
+// void fE() {
+// fF();
+// }
+// void fF() {
+// fG();
+// }
+// void fG() {
+// fE();
+// }
+//
+// Simplest cross-reference case (fA <--> fB) was implemented in previous
+// versions of MergeFunctions, though it presented only in two function pairs
+// in test-suite (that counts >50k functions)
+// Though possibility to detect complex cross-referencing (e.g.: A->B->C->D->A)
+// could cover much more cases.
+//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mergefunc"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
@@ -58,103 +96,28 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "mergefunc"
+
STATISTIC(NumFunctionsMerged, "Number of functions merged");
STATISTIC(NumThunksWritten, "Number of thunks generated");
STATISTIC(NumAliasesWritten, "Number of aliases generated");
STATISTIC(NumDoubleWeak, "Number of new functions created");
-/// Returns the type id for a type to be hashed. We turn pointer types into
-/// integers here because the actual compare logic below considers pointers and
-/// integers of the same size as equal.
-static Type::TypeID getTypeIDForHash(Type *Ty) {
- if (Ty->isPointerTy())
- return Type::IntegerTyID;
- return Ty->getTypeID();
-}
-
-/// Creates a hash-code for the function which is the same for any two
-/// functions that will compare equal, without looking at the instructions
-/// inside the function.
-static unsigned profileFunction(const Function *F) {
- FunctionType *FTy = F->getFunctionType();
-
- FoldingSetNodeID ID;
- ID.AddInteger(F->size());
- ID.AddInteger(F->getCallingConv());
- ID.AddBoolean(F->hasGC());
- ID.AddBoolean(FTy->isVarArg());
- ID.AddInteger(getTypeIDForHash(FTy->getReturnType()));
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- ID.AddInteger(getTypeIDForHash(FTy->getParamType(i)));
- return ID.ComputeHash();
-}
-
-namespace {
-
-/// ComparableFunction - A struct that pairs together functions with a
-/// DataLayout so that we can keep them together as elements in the DenseSet.
-class ComparableFunction {
-public:
- static const ComparableFunction EmptyKey;
- static const ComparableFunction TombstoneKey;
- static DataLayout * const LookupOnly;
-
- ComparableFunction(Function *Func, DataLayout *TD)
- : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
-
- Function *getFunc() const { return Func; }
- unsigned getHash() const { return Hash; }
- DataLayout *getTD() const { return TD; }
-
- // Drops AssertingVH reference to the function. Outside of debug mode, this
- // does nothing.
- void release() {
- assert(Func &&
- "Attempted to release function twice, or release empty/tombstone!");
- Func = NULL;
- }
-
-private:
- explicit ComparableFunction(unsigned Hash)
- : Func(NULL), Hash(Hash), TD(NULL) {}
-
- AssertingVH<Function> Func;
- unsigned Hash;
- DataLayout *TD;
-};
-
-const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
-const ComparableFunction ComparableFunction::TombstoneKey =
- ComparableFunction(1);
-DataLayout *const ComparableFunction::LookupOnly = (DataLayout*)(-1);
-
-}
-
-namespace llvm {
- template <>
- struct DenseMapInfo<ComparableFunction> {
- static ComparableFunction getEmptyKey() {
- return ComparableFunction::EmptyKey;
- }
- static ComparableFunction getTombstoneKey() {
- return ComparableFunction::TombstoneKey;
- }
- static unsigned getHashValue(const ComparableFunction &CF) {
- return CF.getHash();
- }
- static bool isEqual(const ComparableFunction &LHS,
- const ComparableFunction &RHS);
- };
-}
+static cl::opt<unsigned> NumFunctionsForSanityCheck(
+ "mergefunc-sanity",
+ cl::desc("How many functions in module could be used for "
+ "MergeFunctions pass sanity check. "
+ "'0' disables this check. Works only with '-debug' key."),
+ cl::init(0), cl::Hidden);
namespace {
@@ -164,75 +127,518 @@ namespace {
/// side of claiming that two functions are different).
class FunctionComparator {
public:
- FunctionComparator(const DataLayout *TD, const Function *F1,
+ FunctionComparator(const DataLayout *DL, const Function *F1,
const Function *F2)
- : F1(F1), F2(F2), TD(TD) {}
+ : FnL(F1), FnR(F2), DL(DL) {}
/// Test whether the two functions have equivalent behaviour.
- bool compare();
+ int compare();
private:
/// Test whether two basic blocks have equivalent behaviour.
- bool compare(const BasicBlock *BB1, const BasicBlock *BB2);
+ int compare(const BasicBlock *BBL, const BasicBlock *BBR);
+
+ /// Constants comparison.
+ /// Its analog to lexicographical comparison between hypothetical numbers
+ /// of next format:
+ /// <bitcastability-trait><raw-bit-contents>
+ ///
+ /// 1. Bitcastability.
+ /// Check whether L's type could be losslessly bitcasted to R's type.
+ /// On this stage method, in case when lossless bitcast is not possible
+ /// method returns -1 or 1, thus also defining which type is greater in
+ /// context of bitcastability.
+ /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
+ /// to the contents comparison.
+ /// If types differ, remember types comparison result and check
+ /// whether we still can bitcast types.
+ /// Stage 1: Types that satisfies isFirstClassType conditions are always
+ /// greater then others.
+ /// Stage 2: Vector is greater then non-vector.
+ /// If both types are vectors, then vector with greater bitwidth is
+ /// greater.
+ /// If both types are vectors with the same bitwidth, then types
+ /// are bitcastable, and we can skip other stages, and go to contents
+ /// comparison.
+ /// Stage 3: Pointer types are greater than non-pointers. If both types are
+ /// pointers of the same address space - go to contents comparison.
+ /// Different address spaces: pointer with greater address space is
+ /// greater.
+ /// Stage 4: Types are neither vectors, nor pointers. And they differ.
+ /// We don't know how to bitcast them. So, we better don't do it,
+ /// and return types comparison result (so it determines the
+ /// relationship among constants we don't know how to bitcast).
+ ///
+ /// Just for clearance, let's see how the set of constants could look
+ /// on single dimension axis:
+ ///
+ /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+ /// Where: NFCT - Not a FirstClassType
+ /// FCT - FirstClassTyp:
+ ///
+ /// 2. Compare raw contents.
+ /// It ignores types on this stage and only compares bits from L and R.
+ /// Returns 0, if L and R has equivalent contents.
+ /// -1 or 1 if values are different.
+ /// Pretty trivial:
+ /// 2.1. If contents are numbers, compare numbers.
+ /// Ints with greater bitwidth are greater. Ints with same bitwidths
+ /// compared by their contents.
+ /// 2.2. "And so on". Just to avoid discrepancies with comments
+ /// perhaps it would be better to read the implementation itself.
+ /// 3. And again about overall picture. Let's look back at how the ordered set
+ /// of constants will look like:
+ /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+ ///
+ /// Now look, what could be inside [FCT, "others"], for example:
+ /// [FCT, "others"] =
+ /// [
+ /// [double 0.1], [double 1.23],
+ /// [i32 1], [i32 2],
+ /// { double 1.0 }, ; StructTyID, NumElements = 1
+ /// { i32 1 }, ; StructTyID, NumElements = 1
+ /// { double 1, i32 1 }, ; StructTyID, NumElements = 2
+ /// { i32 1, double 1 } ; StructTyID, NumElements = 2
+ /// ]
+ ///
+ /// Let's explain the order. Float numbers will be less than integers, just
+ /// because of cmpType terms: FloatTyID < IntegerTyID.
+ /// Floats (with same fltSemantics) are sorted according to their value.
+ /// Then you can see integers, and they are, like a floats,
+ /// could be easy sorted among each others.
+ /// The structures. Structures are grouped at the tail, again because of their
+ /// TypeID: StructTyID > IntegerTyID > FloatTyID.
+ /// Structures with greater number of elements are greater. Structures with
+ /// greater elements going first are greater.
+ /// The same logic with vectors, arrays and other possible complex types.
+ ///
+ /// Bitcastable constants.
+ /// Let's assume, that some constant, belongs to some group of
+ /// "so-called-equal" values with different types, and at the same time
+ /// belongs to another group of constants with equal types
+ /// and "really" equal values.
+ ///
+ /// Now, prove that this is impossible:
+ ///
+ /// If constant A with type TyA is bitcastable to B with type TyB, then:
+ /// 1. All constants with equal types to TyA, are bitcastable to B. Since
+ /// those should be vectors (if TyA is vector), pointers
+ /// (if TyA is pointer), or else (if TyA equal to TyB), those types should
+ /// be equal to TyB.
+ /// 2. All constants with non-equal, but bitcastable types to TyA, are
+ /// bitcastable to B.
+ /// Once again, just because we allow it to vectors and pointers only.
+ /// This statement could be expanded as below:
+ /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
+ /// vector B, and thus bitcastable to B as well.
+ /// 2.2. All pointers of the same address space, no matter what they point to,
+ /// bitcastable. So if C is pointer, it could be bitcasted to A and to B.
+ /// So any constant equal or bitcastable to A is equal or bitcastable to B.
+ /// QED.
+ ///
+ /// In another words, for pointers and vectors, we ignore top-level type and
+ /// look at their particular properties (bit-width for vectors, and
+ /// address space for pointers).
+ /// If these properties are equal - compare their contents.
+ int cmpConstants(const Constant *L, const Constant *R);
/// Assign or look up previously assigned numbers for the two values, and
/// return whether the numbers are equal. Numbers are assigned in the order
/// visited.
- bool enumerate(const Value *V1, const Value *V2);
+ /// Comparison order:
+ /// Stage 0: Value that is function itself is always greater then others.
+ /// If left and right values are references to their functions, then
+ /// they are equal.
+ /// Stage 1: Constants are greater than non-constants.
+ /// If both left and right are constants, then the result of
+ /// cmpConstants is used as cmpValues result.
+ /// Stage 2: InlineAsm instances are greater than others. If both left and
+ /// right are InlineAsm instances, InlineAsm* pointers casted to
+ /// integers and compared as numbers.
+ /// Stage 3: For all other cases we compare order we meet these values in
+ /// their functions. If right value was met first during scanning,
+ /// then left value is greater.
+ /// In another words, we compare serial numbers, for more details
+ /// see comments for sn_mapL and sn_mapR.
+ int cmpValues(const Value *L, const Value *R);
/// Compare two Instructions for equivalence, similar to
/// Instruction::isSameOperationAs but with modifications to the type
/// comparison.
- bool isEquivalentOperation(const Instruction *I1,
- const Instruction *I2) const;
+ /// Stages are listed in "most significant stage first" order:
+ /// On each stage below, we do comparison between some left and right
+ /// operation parts. If parts are non-equal, we assign parts comparison
+ /// result to the operation comparison result and exit from method.
+ /// Otherwise we proceed to the next stage.
+ /// Stages:
+ /// 1. Operations opcodes. Compared as numbers.
+ /// 2. Number of operands.
+ /// 3. Operation types. Compared with cmpType method.
+ /// 4. Compare operation subclass optional data as stream of bytes:
+ /// just convert it to integers and call cmpNumbers.
+ /// 5. Compare in operation operand types with cmpType in
+ /// most significant operand first order.
+ /// 6. Last stage. Check operations for some specific attributes.
+ /// For example, for Load it would be:
+ /// 6.1.Load: volatile (as boolean flag)
+ /// 6.2.Load: alignment (as integer numbers)
+ /// 6.3.Load: synch-scope (as integer numbers)
+ /// 6.4.Load: range metadata (as integer numbers)
+ /// On this stage its better to see the code, since its not more than 10-15
+ /// strings for particular instruction, and could change sometimes.
+ int cmpOperation(const Instruction *L, const Instruction *R) const;
/// Compare two GEPs for equivalent pointer arithmetic.
- bool isEquivalentGEP(const GEPOperator *GEP1, const GEPOperator *GEP2);
- bool isEquivalentGEP(const GetElementPtrInst *GEP1,
- const GetElementPtrInst *GEP2) {
- return isEquivalentGEP(cast<GEPOperator>(GEP1), cast<GEPOperator>(GEP2));
+ /// Parts to be compared for each comparison stage,
+ /// most significant stage first:
+ /// 1. Address space. As numbers.
+ /// 2. Constant offset, (if "DataLayout *DL" field is not NULL,
+ /// using GEPOperator::accumulateConstantOffset method).
+ /// 3. Pointer operand type (using cmpType method).
+ /// 4. Number of operands.
+ /// 5. Compare operands, using cmpValues method.
+ int cmpGEP(const GEPOperator *GEPL, const GEPOperator *GEPR);
+ int cmpGEP(const GetElementPtrInst *GEPL, const GetElementPtrInst *GEPR) {
+ return cmpGEP(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
}
- /// Compare two Types, treating all pointer types as equal.
- bool isEquivalentType(Type *Ty1, Type *Ty2) const;
+ /// cmpType - compares two types,
+ /// defines total ordering among the types set.
+ ///
+ /// Return values:
+ /// 0 if types are equal,
+ /// -1 if Left is less than Right,
+ /// +1 if Left is greater than Right.
+ ///
+ /// Description:
+ /// Comparison is broken onto stages. Like in lexicographical comparison
+ /// stage coming first has higher priority.
+ /// On each explanation stage keep in mind total ordering properties.
+ ///
+ /// 0. Before comparison we coerce pointer types of 0 address space to
+ /// integer.
+ /// We also don't bother with same type at left and right, so
+ /// just return 0 in this case.
+ ///
+ /// 1. If types are of different kind (different type IDs).
+ /// Return result of type IDs comparison, treating them as numbers.
+ /// 2. If types are vectors or integers, compare Type* values as numbers.
+ /// 3. Types has same ID, so check whether they belongs to the next group:
+ /// * Void
+ /// * Float
+ /// * Double
+ /// * X86_FP80
+ /// * FP128
+ /// * PPC_FP128
+ /// * Label
+ /// * Metadata
+ /// If so - return 0, yes - we can treat these types as equal only because
+ /// their IDs are same.
+ /// 4. If Left and Right are pointers, return result of address space
+ /// comparison (numbers comparison). We can treat pointer types of same
+ /// address space as equal.
+ /// 5. If types are complex.
+ /// Then both Left and Right are to be expanded and their element types will
+ /// be checked with the same way. If we get Res != 0 on some stage, return it.
+ /// Otherwise return 0.
+ /// 6. For all other cases put llvm_unreachable.
+ int cmpType(Type *TyL, Type *TyR) const;
+
+ int cmpNumbers(uint64_t L, uint64_t R) const;
+
+ int cmpAPInt(const APInt &L, const APInt &R) const;
+ int cmpAPFloat(const APFloat &L, const APFloat &R) const;
+ int cmpStrings(StringRef L, StringRef R) const;
+ int cmpAttrs(const AttributeSet L, const AttributeSet R) const;
// The two functions undergoing comparison.
- const Function *F1, *F2;
+ const Function *FnL, *FnR;
+
+ const DataLayout *DL;
+
+ /// Assign serial numbers to values from left function, and values from
+ /// right function.
+ /// Explanation:
+ /// Being comparing functions we need to compare values we meet at left and
+ /// right sides.
+ /// Its easy to sort things out for external values. It just should be
+ /// the same value at left and right.
+ /// But for local values (those were introduced inside function body)
+ /// we have to ensure they were introduced at exactly the same place,
+ /// and plays the same role.
+ /// Let's assign serial number to each value when we meet it first time.
+ /// Values that were met at same place will be with same serial numbers.
+ /// In this case it would be good to explain few points about values assigned
+ /// to BBs and other ways of implementation (see below).
+ ///
+ /// 1. Safety of BB reordering.
+ /// It's safe to change the order of BasicBlocks in function.
+ /// Relationship with other functions and serial numbering will not be
+ /// changed in this case.
+ /// As follows from FunctionComparator::compare(), we do CFG walk: we start
+ /// from the entry, and then take each terminator. So it doesn't matter how in
+ /// fact BBs are ordered in function. And since cmpValues are called during
+ /// this walk, the numbering depends only on how BBs located inside the CFG.
+ /// So the answer is - yes. We will get the same numbering.
+ ///
+ /// 2. Impossibility to use dominance properties of values.
+ /// If we compare two instruction operands: first is usage of local
+ /// variable AL from function FL, and second is usage of local variable AR
+ /// from FR, we could compare their origins and check whether they are
+ /// defined at the same place.
+ /// But, we are still not able to compare operands of PHI nodes, since those
+ /// could be operands from further BBs we didn't scan yet.
+ /// So it's impossible to use dominance properties in general.
+ DenseMap<const Value*, int> sn_mapL, sn_mapR;
+};
- const DataLayout *TD;
+class FunctionPtr {
+ AssertingVH<Function> F;
+ const DataLayout *DL;
- DenseMap<const Value *, const Value *> id_map;
- DenseSet<const Value *> seen_values;
+public:
+ FunctionPtr(Function *F, const DataLayout *DL) : F(F), DL(DL) {}
+ Function *getFunc() const { return F; }
+ void release() { F = 0; }
+ bool operator<(const FunctionPtr &RHS) const {
+ return (FunctionComparator(DL, F, RHS.getFunc()).compare()) == -1;
+ }
};
+}
+
+int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
+ if (L < R) return -1;
+ if (L > R) return 1;
+ return 0;
+}
+int FunctionComparator::cmpAPInt(const APInt &L, const APInt &R) const {
+ if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
+ return Res;
+ if (L.ugt(R)) return 1;
+ if (R.ugt(L)) return -1;
+ return 0;
}
-// Any two pointers in the same address space are equivalent, intptr_t and
-// pointers are equivalent. Otherwise, standard type equivalence rules apply.
-bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const {
+int FunctionComparator::cmpAPFloat(const APFloat &L, const APFloat &R) const {
+ if (int Res = cmpNumbers((uint64_t)&L.getSemantics(),
+ (uint64_t)&R.getSemantics()))
+ return Res;
+ return cmpAPInt(L.bitcastToAPInt(), R.bitcastToAPInt());
+}
- PointerType *PTy1 = dyn_cast<PointerType>(Ty1);
- PointerType *PTy2 = dyn_cast<PointerType>(Ty2);
+int FunctionComparator::cmpStrings(StringRef L, StringRef R) const {
+ // Prevent heavy comparison, compare sizes first.
+ if (int Res = cmpNumbers(L.size(), R.size()))
+ return Res;
- if (TD) {
- if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 = TD->getIntPtrType(Ty1);
- if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 = TD->getIntPtrType(Ty2);
+ // Compare strings lexicographically only when it is necessary: only when
+ // strings are equal in size.
+ return L.compare(R);
+}
+
+int FunctionComparator::cmpAttrs(const AttributeSet L,
+ const AttributeSet R) const {
+ if (int Res = cmpNumbers(L.getNumSlots(), R.getNumSlots()))
+ return Res;
+
+ for (unsigned i = 0, e = L.getNumSlots(); i != e; ++i) {
+ AttributeSet::iterator LI = L.begin(i), LE = L.end(i), RI = R.begin(i),
+ RE = R.end(i);
+ for (; LI != LE && RI != RE; ++LI, ++RI) {
+ Attribute LA = *LI;
+ Attribute RA = *RI;
+ if (LA < RA)
+ return -1;
+ if (RA < LA)
+ return 1;
+ }
+ if (LI != LE)
+ return 1;
+ if (RI != RE)
+ return -1;
}
+ return 0;
+}
- if (Ty1 == Ty2)
- return true;
+/// Constants comparison:
+/// 1. Check whether type of L constant could be losslessly bitcasted to R
+/// type.
+/// 2. Compare constant contents.
+/// For more details see declaration comments.
+int FunctionComparator::cmpConstants(const Constant *L, const Constant *R) {
+
+ Type *TyL = L->getType();
+ Type *TyR = R->getType();
+
+ // Check whether types are bitcastable. This part is just re-factored
+ // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
+ // we also pack into result which type is "less" for us.
+ int TypesRes = cmpType(TyL, TyR);
+ if (TypesRes != 0) {
+ // Types are different, but check whether we can bitcast them.
+ if (!TyL->isFirstClassType()) {
+ if (TyR->isFirstClassType())
+ return -1;
+ // Neither TyL nor TyR are values of first class type. Return the result
+ // of comparing the types
+ return TypesRes;
+ }
+ if (!TyR->isFirstClassType()) {
+ if (TyL->isFirstClassType())
+ return 1;
+ return TypesRes;
+ }
- if (Ty1->getTypeID() != Ty2->getTypeID())
- return false;
+ // Vector -> Vector conversions are always lossless if the two vector types
+ // have the same size, otherwise not.
+ unsigned TyLWidth = 0;
+ unsigned TyRWidth = 0;
+
+ if (const VectorType *VecTyL = dyn_cast<VectorType>(TyL))
+ TyLWidth = VecTyL->getBitWidth();
+ if (const VectorType *VecTyR = dyn_cast<VectorType>(TyR))
+ TyRWidth = VecTyR->getBitWidth();
+
+ if (TyLWidth != TyRWidth)
+ return cmpNumbers(TyLWidth, TyRWidth);
+
+ // Zero bit-width means neither TyL nor TyR are vectors.
+ if (!TyLWidth) {
+ PointerType *PTyL = dyn_cast<PointerType>(TyL);
+ PointerType *PTyR = dyn_cast<PointerType>(TyR);
+ if (PTyL && PTyR) {
+ unsigned AddrSpaceL = PTyL->getAddressSpace();
+ unsigned AddrSpaceR = PTyR->getAddressSpace();
+ if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
+ return Res;
+ }
+ if (PTyL)
+ return 1;
+ if (PTyR)
+ return -1;
+
+ // TyL and TyR aren't vectors, nor pointers. We don't know how to
+ // bitcast them.
+ return TypesRes;
+ }
+ }
+
+ // OK, types are bitcastable, now check constant contents.
+
+ if (L->isNullValue() && R->isNullValue())
+ return TypesRes;
+ if (L->isNullValue() && !R->isNullValue())
+ return 1;
+ if (!L->isNullValue() && R->isNullValue())
+ return -1;
+
+ if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
+ return Res;
+
+ switch (L->getValueID()) {
+ case Value::UndefValueVal: return TypesRes;
+ case Value::ConstantIntVal: {
+ const APInt &LInt = cast<ConstantInt>(L)->getValue();
+ const APInt &RInt = cast<ConstantInt>(R)->getValue();
+ return cmpAPInt(LInt, RInt);
+ }
+ case Value::ConstantFPVal: {
+ const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
+ const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
+ return cmpAPFloat(LAPF, RAPF);
+ }
+ case Value::ConstantArrayVal: {
+ const ConstantArray *LA = cast<ConstantArray>(L);
+ const ConstantArray *RA = cast<ConstantArray>(R);
+ uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
+ uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
+ if (int Res = cmpNumbers(NumElementsL, NumElementsR))
+ return Res;
+ for (uint64_t i = 0; i < NumElementsL; ++i) {
+ if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
+ cast<Constant>(RA->getOperand(i))))
+ return Res;
+ }
+ return 0;
+ }
+ case Value::ConstantStructVal: {
+ const ConstantStruct *LS = cast<ConstantStruct>(L);
+ const ConstantStruct *RS = cast<ConstantStruct>(R);
+ unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
+ unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
+ if (int Res = cmpNumbers(NumElementsL, NumElementsR))
+ return Res;
+ for (unsigned i = 0; i != NumElementsL; ++i) {
+ if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
+ cast<Constant>(RS->getOperand(i))))
+ return Res;
+ }
+ return 0;
+ }
+ case Value::ConstantVectorVal: {
+ const ConstantVector *LV = cast<ConstantVector>(L);
+ const ConstantVector *RV = cast<ConstantVector>(R);
+ unsigned NumElementsL = cast<VectorType>(TyL)->getNumElements();
+ unsigned NumElementsR = cast<VectorType>(TyR)->getNumElements();
+ if (int Res = cmpNumbers(NumElementsL, NumElementsR))
+ return Res;
+ for (uint64_t i = 0; i < NumElementsL; ++i) {
+ if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
+ cast<Constant>(RV->getOperand(i))))
+ return Res;
+ }
+ return 0;
+ }
+ case Value::ConstantExprVal: {
+ const ConstantExpr *LE = cast<ConstantExpr>(L);
+ const ConstantExpr *RE = cast<ConstantExpr>(R);
+ unsigned NumOperandsL = LE->getNumOperands();
+ unsigned NumOperandsR = RE->getNumOperands();
+ if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
+ return Res;
+ for (unsigned i = 0; i < NumOperandsL; ++i) {
+ if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
+ cast<Constant>(RE->getOperand(i))))
+ return Res;
+ }
+ return 0;
+ }
+ case Value::FunctionVal:
+ case Value::GlobalVariableVal:
+ case Value::GlobalAliasVal:
+ default: // Unknown constant, cast L and R pointers to numbers and compare.
+ return cmpNumbers((uint64_t)L, (uint64_t)R);
+ }
+}
+
+/// cmpType - compares two types,
+/// defines total ordering among the types set.
+/// See method declaration comments for more details.
+int FunctionComparator::cmpType(Type *TyL, Type *TyR) const {
- switch (Ty1->getTypeID()) {
+ PointerType *PTyL = dyn_cast<PointerType>(TyL);
+ PointerType *PTyR = dyn_cast<PointerType>(TyR);
+
+ if (DL) {
+ if (PTyL && PTyL->getAddressSpace() == 0) TyL = DL->getIntPtrType(TyL);
+ if (PTyR && PTyR->getAddressSpace() == 0) TyR = DL->getIntPtrType(TyR);
+ }
+
+ if (TyL == TyR)
+ return 0;
+
+ if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
+ return Res;
+
+ switch (TyL->getTypeID()) {
default:
llvm_unreachable("Unknown type!");
// Fall through in Release mode.
case Type::IntegerTyID:
case Type::VectorTyID:
- // Ty1 == Ty2 would have returned true earlier.
- return false;
+ // TyL == TyR would have returned true earlier.
+ return cmpNumbers((uint64_t)TyL, (uint64_t)TyR);
case Type::VoidTyID:
case Type::FloatTyID:
@@ -242,51 +648,55 @@ bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const {
case Type::PPC_FP128TyID:
case Type::LabelTyID:
case Type::MetadataTyID:
- return true;
+ return 0;
case Type::PointerTyID: {
- assert(PTy1 && PTy2 && "Both types must be pointers here.");
- return PTy1->getAddressSpace() == PTy2->getAddressSpace();
+ assert(PTyL && PTyR && "Both types must be pointers here.");
+ return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
}
case Type::StructTyID: {
- StructType *STy1 = cast<StructType>(Ty1);
- StructType *STy2 = cast<StructType>(Ty2);
- if (STy1->getNumElements() != STy2->getNumElements())
- return false;
-
- if (STy1->isPacked() != STy2->isPacked())
- return false;
-
- for (unsigned i = 0, e = STy1->getNumElements(); i != e; ++i) {
- if (!isEquivalentType(STy1->getElementType(i), STy2->getElementType(i)))
- return false;
+ StructType *STyL = cast<StructType>(TyL);
+ StructType *STyR = cast<StructType>(TyR);
+ if (STyL->getNumElements() != STyR->getNumElements())
+ return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
+
+ if (STyL->isPacked() != STyR->isPacked())
+ return cmpNumbers(STyL->isPacked(), STyR->isPacked());
+
+ for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
+ if (int Res = cmpType(STyL->getElementType(i),
+ STyR->getElementType(i)))
+ return Res;
}
- return true;
+ return 0;
}
case Type::FunctionTyID: {
- FunctionType *FTy1 = cast<FunctionType>(Ty1);
- FunctionType *FTy2 = cast<FunctionType>(Ty2);
- if (FTy1->getNumParams() != FTy2->getNumParams() ||
- FTy1->isVarArg() != FTy2->isVarArg())
- return false;
+ FunctionType *FTyL = cast<FunctionType>(TyL);
+ FunctionType *FTyR = cast<FunctionType>(TyR);
+ if (FTyL->getNumParams() != FTyR->getNumParams())
+ return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
- if (!isEquivalentType(FTy1->getReturnType(), FTy2->getReturnType()))
- return false;
+ if (FTyL->isVarArg() != FTyR->isVarArg())
+ return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
+
+ if (int Res = cmpType(FTyL->getReturnType(), FTyR->getReturnType()))
+ return Res;
- for (unsigned i = 0, e = FTy1->getNumParams(); i != e; ++i) {
- if (!isEquivalentType(FTy1->getParamType(i), FTy2->getParamType(i)))
- return false;
+ for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
+ if (int Res = cmpType(FTyL->getParamType(i), FTyR->getParamType(i)))
+ return Res;
}
- return true;
+ return 0;
}
case Type::ArrayTyID: {
- ArrayType *ATy1 = cast<ArrayType>(Ty1);
- ArrayType *ATy2 = cast<ArrayType>(Ty2);
- return ATy1->getNumElements() == ATy2->getNumElements() &&
- isEquivalentType(ATy1->getElementType(), ATy2->getElementType());
+ ArrayType *ATyL = cast<ArrayType>(TyL);
+ ArrayType *ATyR = cast<ArrayType>(TyR);
+ if (ATyL->getNumElements() != ATyR->getNumElements())
+ return cmpNumbers(ATyL->getNumElements(), ATyR->getNumElements());
+ return cmpType(ATyL->getElementType(), ATyR->getElementType());
}
}
}
@@ -294,222 +704,323 @@ bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const {
// Determine whether the two operations are the same except that pointer-to-A
// and pointer-to-B are equivalent. This should be kept in sync with
// Instruction::isSameOperationAs.
-bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
- const Instruction *I2) const {
+// Read method declaration comments for more details.
+int FunctionComparator::cmpOperation(const Instruction *L,
+ const Instruction *R) const {
// Differences from Instruction::isSameOperationAs:
// * replace type comparison with calls to isEquivalentType.
// * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top
// * because of the above, we don't test for the tail bit on calls later on
- if (I1->getOpcode() != I2->getOpcode() ||
- I1->getNumOperands() != I2->getNumOperands() ||
- !isEquivalentType(I1->getType(), I2->getType()) ||
- !I1->hasSameSubclassOptionalData(I2))
- return false;
+ if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
+ return Res;
+
+ if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
+ return Res;
+
+ if (int Res = cmpType(L->getType(), R->getType()))
+ return Res;
+
+ if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
+ R->getRawSubclassOptionalData()))
+ return Res;
// We have two instructions of identical opcode and #operands. Check to see
// if all operands are the same type
- for (unsigned i = 0, e = I1->getNumOperands(); i != e; ++i)
- if (!isEquivalentType(I1->getOperand(i)->getType(),
- I2->getOperand(i)->getType()))
- return false;
+ for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
+ if (int Res =
+ cmpType(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
+ return Res;
+ }
// Check special state that is a part of some instructions.
- if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
- return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
- LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() &&
- LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
- if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
- return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
- SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() &&
- SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
- if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
- return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
- if (const CallInst *CI = dyn_cast<CallInst>(I1))
- return CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
- CI->getAttributes() == cast<CallInst>(I2)->getAttributes();
- if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
- return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
- CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes();
- if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
- return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
- if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
- return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
- if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
- return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
- if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
- return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I2)->getOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
- if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
- return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
- RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
- RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+ if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
+ if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
+ return Res;
+ if (int Res =
+ cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
+ return Res;
+ if (int Res =
+ cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
+ return Res;
+ if (int Res =
+ cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
+ return Res;
+ return cmpNumbers((uint64_t)LI->getMetadata(LLVMContext::MD_range),
+ (uint64_t)cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
+ }
+ if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
+ if (int Res =
+ cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
+ return Res;
+ if (int Res =
+ cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
+ return Res;
+ if (int Res =
+ cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
+ return Res;
+ return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
+ }
+ if (const CmpInst *CI = dyn_cast<CmpInst>(L))
+ return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
+ if (const CallInst *CI = dyn_cast<CallInst>(L)) {
+ if (int Res = cmpNumbers(CI->getCallingConv(),
+ cast<CallInst>(R)->getCallingConv()))
+ return Res;
+ if (int Res =
+ cmpAttrs(CI->getAttributes(), cast<CallInst>(R)->getAttributes()))
+ return Res;
+ return cmpNumbers(
+ (uint64_t)CI->getMetadata(LLVMContext::MD_range),
+ (uint64_t)cast<CallInst>(R)->getMetadata(LLVMContext::MD_range));
+ }
+ if (const InvokeInst *CI = dyn_cast<InvokeInst>(L)) {
+ if (int Res = cmpNumbers(CI->getCallingConv(),
+ cast<InvokeInst>(R)->getCallingConv()))
+ return Res;
+ if (int Res =
+ cmpAttrs(CI->getAttributes(), cast<InvokeInst>(R)->getAttributes()))
+ return Res;
+ return cmpNumbers(
+ (uint64_t)CI->getMetadata(LLVMContext::MD_range),
+ (uint64_t)cast<InvokeInst>(R)->getMetadata(LLVMContext::MD_range));
+ }
+ if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
+ ArrayRef<unsigned> LIndices = IVI->getIndices();
+ ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
+ if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
+ return Res;
+ for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
+ if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
+ return Res;
+ }
+ }
+ if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
+ ArrayRef<unsigned> LIndices = EVI->getIndices();
+ ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
+ if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
+ return Res;
+ for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
+ if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
+ return Res;
+ }
+ }
+ if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
+ if (int Res =
+ cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
+ return Res;
+ return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
+ }
- return true;
+ if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
+ if (int Res = cmpNumbers(CXI->isVolatile(),
+ cast<AtomicCmpXchgInst>(R)->isVolatile()))
+ return Res;
+ if (int Res = cmpNumbers(CXI->isWeak(),
+ cast<AtomicCmpXchgInst>(R)->isWeak()))
+ return Res;
+ if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
+ cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
+ return Res;
+ if (int Res = cmpNumbers(CXI->getFailureOrdering(),
+ cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
+ return Res;
+ return cmpNumbers(CXI->getSynchScope(),
+ cast<AtomicCmpXchgInst>(R)->getSynchScope());
+ }
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
+ if (int Res = cmpNumbers(RMWI->getOperation(),
+ cast<AtomicRMWInst>(R)->getOperation()))
+ return Res;
+ if (int Res = cmpNumbers(RMWI->isVolatile(),
+ cast<AtomicRMWInst>(R)->isVolatile()))
+ return Res;
+ if (int Res = cmpNumbers(RMWI->getOrdering(),
+ cast<AtomicRMWInst>(R)->getOrdering()))
+ return Res;
+ return cmpNumbers(RMWI->getSynchScope(),
+ cast<AtomicRMWInst>(R)->getSynchScope());
+ }
+ return 0;
}
// Determine whether two GEP operations perform the same underlying arithmetic.
-bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
- const GEPOperator *GEP2) {
- unsigned AS = GEP1->getPointerAddressSpace();
- if (AS != GEP2->getPointerAddressSpace())
- return false;
-
- if (TD) {
- // When we have target data, we can reduce the GEP down to the value in bytes
- // added to the address.
- unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 1;
- APInt Offset1(BitWidth, 0), Offset2(BitWidth, 0);
- if (GEP1->accumulateConstantOffset(*TD, Offset1) &&
- GEP2->accumulateConstantOffset(*TD, Offset2)) {
- return Offset1 == Offset2;
- }
+// Read method declaration comments for more details.
+int FunctionComparator::cmpGEP(const GEPOperator *GEPL,
+ const GEPOperator *GEPR) {
+
+ unsigned int ASL = GEPL->getPointerAddressSpace();
+ unsigned int ASR = GEPR->getPointerAddressSpace();
+
+ if (int Res = cmpNumbers(ASL, ASR))
+ return Res;
+
+ // When we have target data, we can reduce the GEP down to the value in bytes
+ // added to the address.
+ if (DL) {
+ unsigned BitWidth = DL->getPointerSizeInBits(ASL);
+ APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
+ if (GEPL->accumulateConstantOffset(*DL, OffsetL) &&
+ GEPR->accumulateConstantOffset(*DL, OffsetR))
+ return cmpAPInt(OffsetL, OffsetR);
}
- if (GEP1->getPointerOperand()->getType() !=
- GEP2->getPointerOperand()->getType())
- return false;
+ if (int Res = cmpNumbers((uint64_t)GEPL->getPointerOperand()->getType(),
+ (uint64_t)GEPR->getPointerOperand()->getType()))
+ return Res;
- if (GEP1->getNumOperands() != GEP2->getNumOperands())
- return false;
+ if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
+ return Res;
- for (unsigned i = 0, e = GEP1->getNumOperands(); i != e; ++i) {
- if (!enumerate(GEP1->getOperand(i), GEP2->getOperand(i)))
- return false;
+ for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
+ if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
+ return Res;
}
- return true;
+ return 0;
}
-// Compare two values used by the two functions under pair-wise comparison. If
-// this is the first time the values are seen, they're added to the mapping so
-// that we will detect mismatches on next use.
-bool FunctionComparator::enumerate(const Value *V1, const Value *V2) {
- // Check for function @f1 referring to itself and function @f2 referring to
- // itself, or referring to each other, or both referring to either of them.
- // They're all equivalent if the two functions are otherwise equivalent.
- if (V1 == F1 && V2 == F2)
- return true;
- if (V1 == F2 && V2 == F1)
- return true;
-
- if (const Constant *C1 = dyn_cast<Constant>(V1)) {
- if (V1 == V2) return true;
- const Constant *C2 = dyn_cast<Constant>(V2);
- if (!C2) return false;
- // TODO: constant expressions with GEP or references to F1 or F2.
- if (C1->isNullValue() && C2->isNullValue() &&
- isEquivalentType(C1->getType(), C2->getType()))
- return true;
- // Try bitcasting C2 to C1's type. If the bitcast is legal and returns C1
- // then they must have equal bit patterns.
- return C1->getType()->canLosslesslyBitCastTo(C2->getType()) &&
- C1 == ConstantExpr::getBitCast(const_cast<Constant*>(C2), C1->getType());
- }
-
- if (isa<InlineAsm>(V1) || isa<InlineAsm>(V2))
- return V1 == V2;
-
- // Check that V1 maps to V2. If we find a value that V1 maps to then we simply
- // check whether it's equal to V2. When there is no mapping then we need to
- // ensure that V2 isn't already equivalent to something else. For this
- // purpose, we track the V2 values in a set.
-
- const Value *&map_elem = id_map[V1];
- if (map_elem)
- return map_elem == V2;
- if (!seen_values.insert(V2).second)
- return false;
- map_elem = V2;
- return true;
-}
-
-// Test whether two basic blocks have equivalent behaviour.
-bool FunctionComparator::compare(const BasicBlock *BB1, const BasicBlock *BB2) {
- BasicBlock::const_iterator F1I = BB1->begin(), F1E = BB1->end();
- BasicBlock::const_iterator F2I = BB2->begin(), F2E = BB2->end();
+/// Compare two values used by the two functions under pair-wise comparison. If
+/// this is the first time the values are seen, they're added to the mapping so
+/// that we will detect mismatches on next use.
+/// See comments in declaration for more details.
+int FunctionComparator::cmpValues(const Value *L, const Value *R) {
+ // Catch self-reference case.
+ if (L == FnL) {
+ if (R == FnR)
+ return 0;
+ return -1;
+ }
+ if (R == FnR) {
+ if (L == FnL)
+ return 0;
+ return 1;
+ }
- do {
- if (!enumerate(F1I, F2I))
- return false;
+ const Constant *ConstL = dyn_cast<Constant>(L);
+ const Constant *ConstR = dyn_cast<Constant>(R);
+ if (ConstL && ConstR) {
+ if (L == R)
+ return 0;
+ return cmpConstants(ConstL, ConstR);
+ }
- if (const GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(F1I)) {
- const GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(F2I);
- if (!GEP2)
- return false;
+ if (ConstL)
+ return 1;
+ if (ConstR)
+ return -1;
- if (!enumerate(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
- return false;
+ const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
+ const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
- if (!isEquivalentGEP(GEP1, GEP2))
- return false;
- } else {
- if (!isEquivalentOperation(F1I, F2I))
- return false;
+ if (InlineAsmL && InlineAsmR)
+ return cmpNumbers((uint64_t)L, (uint64_t)R);
+ if (InlineAsmL)
+ return 1;
+ if (InlineAsmR)
+ return -1;
- assert(F1I->getNumOperands() == F2I->getNumOperands());
- for (unsigned i = 0, e = F1I->getNumOperands(); i != e; ++i) {
- Value *OpF1 = F1I->getOperand(i);
- Value *OpF2 = F2I->getOperand(i);
+ auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
+ RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
- if (!enumerate(OpF1, OpF2))
- return false;
+ return cmpNumbers(LeftSN.first->second, RightSN.first->second);
+}
+// Test whether two basic blocks have equivalent behaviour.
+int FunctionComparator::compare(const BasicBlock *BBL, const BasicBlock *BBR) {
+ BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
+ BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
- if (OpF1->getValueID() != OpF2->getValueID() ||
- !isEquivalentType(OpF1->getType(), OpF2->getType()))
- return false;
+ do {
+ if (int Res = cmpValues(InstL, InstR))
+ return Res;
+
+ const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(InstL);
+ const GetElementPtrInst *GEPR = dyn_cast<GetElementPtrInst>(InstR);
+
+ if (GEPL && !GEPR)
+ return 1;
+ if (GEPR && !GEPL)
+ return -1;
+
+ if (GEPL && GEPR) {
+ if (int Res =
+ cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
+ return Res;
+ if (int Res = cmpGEP(GEPL, GEPR))
+ return Res;
+ } else {
+ if (int Res = cmpOperation(InstL, InstR))
+ return Res;
+ assert(InstL->getNumOperands() == InstR->getNumOperands());
+
+ for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
+ Value *OpL = InstL->getOperand(i);
+ Value *OpR = InstR->getOperand(i);
+ if (int Res = cmpValues(OpL, OpR))
+ return Res;
+ if (int Res = cmpNumbers(OpL->getValueID(), OpR->getValueID()))
+ return Res;
+ // TODO: Already checked in cmpOperation
+ if (int Res = cmpType(OpL->getType(), OpR->getType()))
+ return Res;
}
}
- ++F1I, ++F2I;
- } while (F1I != F1E && F2I != F2E);
+ ++InstL, ++InstR;
+ } while (InstL != InstLE && InstR != InstRE);
- return F1I == F1E && F2I == F2E;
+ if (InstL != InstLE && InstR == InstRE)
+ return 1;
+ if (InstL == InstLE && InstR != InstRE)
+ return -1;
+ return 0;
}
// Test whether the two functions have equivalent behaviour.
-bool FunctionComparator::compare() {
- // We need to recheck everything, but check the things that weren't included
- // in the hash first.
+int FunctionComparator::compare() {
- if (F1->getAttributes() != F2->getAttributes())
- return false;
+ sn_mapL.clear();
+ sn_mapR.clear();
- if (F1->hasGC() != F2->hasGC())
- return false;
+ if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
+ return Res;
- if (F1->hasGC() && F1->getGC() != F2->getGC())
- return false;
+ if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
+ return Res;
- if (F1->hasSection() != F2->hasSection())
- return false;
+ if (FnL->hasGC()) {
+ if (int Res = cmpNumbers((uint64_t)FnL->getGC(), (uint64_t)FnR->getGC()))
+ return Res;
+ }
- if (F1->hasSection() && F1->getSection() != F2->getSection())
- return false;
+ if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
+ return Res;
- if (F1->isVarArg() != F2->isVarArg())
- return false;
+ if (FnL->hasSection()) {
+ if (int Res = cmpStrings(FnL->getSection(), FnR->getSection()))
+ return Res;
+ }
+
+ if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
+ return Res;
// TODO: if it's internal and only used in direct calls, we could handle this
// case too.
- if (F1->getCallingConv() != F2->getCallingConv())
- return false;
+ if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
+ return Res;
- if (!isEquivalentType(F1->getFunctionType(), F2->getFunctionType()))
- return false;
+ if (int Res = cmpType(FnL->getFunctionType(), FnR->getFunctionType()))
+ return Res;
- assert(F1->arg_size() == F2->arg_size() &&
+ assert(FnL->arg_size() == FnR->arg_size() &&
"Identically typed functions have different numbers of args!");
// Visit the arguments so that they get enumerated in the order they're
// passed in.
- for (Function::const_arg_iterator f1i = F1->arg_begin(),
- f2i = F2->arg_begin(), f1e = F1->arg_end(); f1i != f1e; ++f1i, ++f2i) {
- if (!enumerate(f1i, f2i))
+ for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
+ ArgRI = FnR->arg_begin(),
+ ArgLE = FnL->arg_end();
+ ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
+ if (cmpValues(ArgLI, ArgRI) != 0)
llvm_unreachable("Arguments repeat!");
}
@@ -517,33 +1028,36 @@ bool FunctionComparator::compare() {
// linked list is immaterial. Our walk starts at the entry block for both
// functions, then takes each block from each terminator in order. As an
// artifact, this also means that unreachable blocks are ignored.
- SmallVector<const BasicBlock *, 8> F1BBs, F2BBs;
+ SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
- F1BBs.push_back(&F1->getEntryBlock());
- F2BBs.push_back(&F2->getEntryBlock());
+ FnLBBs.push_back(&FnL->getEntryBlock());
+ FnRBBs.push_back(&FnR->getEntryBlock());
- VisitedBBs.insert(F1BBs[0]);
- while (!F1BBs.empty()) {
- const BasicBlock *F1BB = F1BBs.pop_back_val();
- const BasicBlock *F2BB = F2BBs.pop_back_val();
+ VisitedBBs.insert(FnLBBs[0]);
+ while (!FnLBBs.empty()) {
+ const BasicBlock *BBL = FnLBBs.pop_back_val();
+ const BasicBlock *BBR = FnRBBs.pop_back_val();
- if (!enumerate(F1BB, F2BB) || !compare(F1BB, F2BB))
- return false;
+ if (int Res = cmpValues(BBL, BBR))
+ return Res;
- const TerminatorInst *F1TI = F1BB->getTerminator();
- const TerminatorInst *F2TI = F2BB->getTerminator();
+ if (int Res = compare(BBL, BBR))
+ return Res;
- assert(F1TI->getNumSuccessors() == F2TI->getNumSuccessors());
- for (unsigned i = 0, e = F1TI->getNumSuccessors(); i != e; ++i) {
- if (!VisitedBBs.insert(F1TI->getSuccessor(i)))
+ const TerminatorInst *TermL = BBL->getTerminator();
+ const TerminatorInst *TermR = BBR->getTerminator();
+
+ assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
+ for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
+ if (!VisitedBBs.insert(TermL->getSuccessor(i)))
continue;
- F1BBs.push_back(F1TI->getSuccessor(i));
- F2BBs.push_back(F2TI->getSuccessor(i));
+ FnLBBs.push_back(TermL->getSuccessor(i));
+ FnRBBs.push_back(TermR->getSuccessor(i));
}
}
- return true;
+ return 0;
}
namespace {
@@ -561,24 +1075,28 @@ public:
initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
- typedef DenseSet<ComparableFunction> FnSetType;
+ typedef std::set<FunctionPtr> FnTreeType;
/// A work queue of functions that may have been modified and should be
/// analyzed again.
std::vector<WeakVH> Deferred;
- /// Insert a ComparableFunction into the FnSet, or merge it away if it's
+ /// Checks the rules of order relation introduced among functions set.
+ /// Returns true, if sanity check has been passed, and false if failed.
+ bool doSanityCheck(std::vector<WeakVH> &Worklist);
+
+ /// Insert a ComparableFunction into the FnTree, or merge it away if it's
/// equal to one that's already present.
- bool insert(ComparableFunction &NewF);
+ bool insert(Function *NewFunction);
- /// Remove a Function from the FnSet and queue it up for a second sweep of
+ /// Remove a Function from the FnTree and queue it up for a second sweep of
/// analysis.
void remove(Function *F);
- /// Find the functions that use this Value and remove them from FnSet and
+ /// Find the functions that use this Value and remove them from FnTree and
/// queue the functions.
void removeUsers(Value *V);
@@ -603,10 +1121,10 @@ private:
/// The set of all distinct functions. Use the insert() and remove() methods
/// to modify it.
- FnSetType FnSet;
+ FnTreeType FnTree;
/// DataLayout for more accurate GEP comparisons. May be NULL.
- DataLayout *TD;
+ const DataLayout *DL;
/// Whether or not the target supports global aliases.
bool HasGlobalAliases;
@@ -621,20 +1139,94 @@ ModulePass *llvm::createMergeFunctionsPass() {
return new MergeFunctions();
}
+bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
+ if (const unsigned Max = NumFunctionsForSanityCheck) {
+ unsigned TripleNumber = 0;
+ bool Valid = true;
+
+ dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
+
+ unsigned i = 0;
+ for (std::vector<WeakVH>::iterator I = Worklist.begin(), E = Worklist.end();
+ I != E && i < Max; ++I, ++i) {
+ unsigned j = i;
+ for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
+ Function *F1 = cast<Function>(*I);
+ Function *F2 = cast<Function>(*J);
+ int Res1 = FunctionComparator(DL, F1, F2).compare();
+ int Res2 = FunctionComparator(DL, F2, F1).compare();
+
+ // If F1 <= F2, then F2 >= F1, otherwise report failure.
+ if (Res1 != -Res2) {
+ dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber
+ << "\n";
+ F1->dump();
+ F2->dump();
+ Valid = false;
+ }
+
+ if (Res1 == 0)
+ continue;
+
+ unsigned k = j;
+ for (std::vector<WeakVH>::iterator K = J; K != E && k < Max;
+ ++k, ++K, ++TripleNumber) {
+ if (K == J)
+ continue;
+
+ Function *F3 = cast<Function>(*K);
+ int Res3 = FunctionComparator(DL, F1, F3).compare();
+ int Res4 = FunctionComparator(DL, F2, F3).compare();
+
+ bool Transitive = true;
+
+ if (Res1 != 0 && Res1 == Res4) {
+ // F1 > F2, F2 > F3 => F1 > F3
+ Transitive = Res3 == Res1;
+ } else if (Res3 != 0 && Res3 == -Res4) {
+ // F1 > F3, F3 > F2 => F1 > F2
+ Transitive = Res3 == Res1;
+ } else if (Res4 != 0 && -Res3 == Res4) {
+ // F2 > F3, F3 > F1 => F2 > F1
+ Transitive = Res4 == -Res1;
+ }
+
+ if (!Transitive) {
+ dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: "
+ << TripleNumber << "\n";
+ dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", "
+ << Res4 << "\n";
+ F1->dump();
+ F2->dump();
+ F3->dump();
+ Valid = false;
+ }
+ }
+ }
+ }
+
+ dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n";
+ return Valid;
+ }
+ return true;
+}
+
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
Deferred.push_back(WeakVH(I));
}
- FnSet.resize(Deferred.size());
do {
std::vector<WeakVH> Worklist;
Deferred.swap(Worklist);
+ DEBUG(doSanityCheck(Worklist));
+
DEBUG(dbgs() << "size of module: " << M.size() << '\n');
DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
@@ -646,8 +1238,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
!F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, TD);
- Changed |= insert(CF);
+ Changed |= insert(F);
}
}
@@ -661,49 +1252,27 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, TD);
- Changed |= insert(CF);
+ Changed |= insert(F);
}
}
- DEBUG(dbgs() << "size of FnSet: " << FnSet.size() << '\n');
+ DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
} while (!Deferred.empty());
- FnSet.clear();
+ FnTree.clear();
return Changed;
}
-bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
- const ComparableFunction &RHS) {
- if (LHS.getFunc() == RHS.getFunc() &&
- LHS.getHash() == RHS.getHash())
- return true;
- if (!LHS.getFunc() || !RHS.getFunc())
- return false;
-
- // One of these is a special "underlying pointer comparison only" object.
- if (LHS.getTD() == ComparableFunction::LookupOnly ||
- RHS.getTD() == ComparableFunction::LookupOnly)
- return false;
-
- assert(LHS.getTD() == RHS.getTD() &&
- "Comparing functions for different targets");
-
- return FunctionComparator(LHS.getTD(), LHS.getFunc(),
- RHS.getFunc()).compare();
-}
-
// Replace direct callers of Old with New.
void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
- for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
- UI != UE;) {
- Value::use_iterator TheIter = UI;
+ for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
+ Use *U = &*UI;
++UI;
- CallSite CS(*TheIter);
- if (CS && CS.isCallee(TheIter)) {
+ CallSite CS(U->getUser());
+ if (CS && CS.isCallee(U)) {
remove(CS.getInstruction()->getParent()->getParent());
- TheIter.getUse().set(BitcastNew);
+ U->set(BitcastNew);
}
}
}
@@ -723,9 +1292,24 @@ void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
// Helper for writeThunk,
// Selects proper bitcast operation,
-// but a bit simplier then CastInst::getCastOpcode.
-static Value* createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
+// but a bit simpler then CastInst::getCastOpcode.
+static Value *createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
Type *SrcTy = V->getType();
+ if (SrcTy->isStructTy()) {
+ assert(DestTy->isStructTy());
+ assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements());
+ Value *Result = UndefValue::get(DestTy);
+ for (unsigned int I = 0, E = SrcTy->getStructNumElements(); I < E; ++I) {
+ Value *Element = createCast(
+ Builder, Builder.CreateExtractValue(V, ArrayRef<unsigned int>(I)),
+ DestTy->getStructElementType(I));
+
+ Result =
+ Builder.CreateInsertValue(Result, Element, ArrayRef<unsigned int>(I));
+ }
+ return Result;
+ }
+ assert(!DestTy->isStructTy());
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
return Builder.CreateIntToPtr(V, DestTy);
else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
@@ -784,9 +1368,9 @@ void MergeFunctions::writeThunk(Function *F, Function *G) {
// Replace G with an alias to F and delete G.
void MergeFunctions::writeAlias(Function *F, Function *G) {
- Constant *BitcastF = ConstantExpr::getBitCast(F, G->getType());
- GlobalAlias *GA = new GlobalAlias(G->getType(), G->getLinkage(), "",
- BitcastF, G->getParent());
+ PointerType *PTy = G->getType();
+ auto *GA = GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ G->getLinkage(), "", F);
F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
GA->takeName(G);
GA->setVisibility(G->getVisibility());
@@ -833,54 +1417,57 @@ void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
++NumFunctionsMerged;
}
-// Insert a ComparableFunction into the FnSet, or merge it away if equal to one
+// Insert a ComparableFunction into the FnTree, or merge it away if equal to one
// that was already inserted.
-bool MergeFunctions::insert(ComparableFunction &NewF) {
- std::pair<FnSetType::iterator, bool> Result = FnSet.insert(NewF);
+bool MergeFunctions::insert(Function *NewFunction) {
+ std::pair<FnTreeType::iterator, bool> Result =
+ FnTree.insert(FunctionPtr(NewFunction, DL));
+
if (Result.second) {
- DEBUG(dbgs() << "Inserting as unique: " << NewF.getFunc()->getName() << '\n');
+ DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
return false;
}
- const ComparableFunction &OldF = *Result.first;
+ const FunctionPtr &OldF = *Result.first;
// Don't merge tiny functions, since it can just end up making the function
// larger.
// FIXME: Should still merge them if they are unnamed_addr and produce an
// alias.
- if (NewF.getFunc()->size() == 1) {
- if (NewF.getFunc()->front().size() <= 2) {
- DEBUG(dbgs() << NewF.getFunc()->getName()
- << " is to small to bother merging\n");
+ if (NewFunction->size() == 1) {
+ if (NewFunction->front().size() <= 2) {
+ DEBUG(dbgs() << NewFunction->getName()
+ << " is to small to bother merging\n");
return false;
}
}
// Never thunk a strong function to a weak function.
- assert(!OldF.getFunc()->mayBeOverridden() ||
- NewF.getFunc()->mayBeOverridden());
+ assert(!OldF.getFunc()->mayBeOverridden() || NewFunction->mayBeOverridden());
- DEBUG(dbgs() << " " << OldF.getFunc()->getName() << " == "
- << NewF.getFunc()->getName() << '\n');
+ DEBUG(dbgs() << " " << OldF.getFunc()->getName()
+ << " == " << NewFunction->getName() << '\n');
- Function *DeleteF = NewF.getFunc();
- NewF.release();
+ Function *DeleteF = NewFunction;
mergeTwoFunctions(OldF.getFunc(), DeleteF);
return true;
}
-// Remove a function from FnSet. If it was already in FnSet, add it to Deferred
-// so that we'll look at it in the next round.
+// Remove a function from FnTree. If it was already in FnTree, add
+// it to Deferred so that we'll look at it in the next round.
void MergeFunctions::remove(Function *F) {
// We need to make sure we remove F, not a function "equal" to F per the
// function equality comparator.
- //
- // The special "lookup only" ComparableFunction bypasses the expensive
- // function comparison in favour of a pointer comparison on the underlying
- // Function*'s.
- ComparableFunction CF = ComparableFunction(F, ComparableFunction::LookupOnly);
- if (FnSet.erase(CF)) {
- DEBUG(dbgs() << "Removed " << F->getName() << " from set and deferred it.\n");
+ FnTreeType::iterator found = FnTree.find(FunctionPtr(F, DL));
+ size_t Erased = 0;
+ if (found != FnTree.end() && found->getFunc() == F) {
+ Erased = 1;
+ FnTree.erase(found);
+ }
+
+ if (Erased) {
+ DEBUG(dbgs() << "Removed " << F->getName()
+ << " from set and deferred it.\n");
Deferred.push_back(F);
}
}
@@ -894,17 +1481,14 @@ void MergeFunctions::removeUsers(Value *V) {
Value *V = Worklist.back();
Worklist.pop_back();
- for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- Use &U = UI.getUse();
- if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
+ for (User *U : V->users()) {
+ if (Instruction *I = dyn_cast<Instruction>(U)) {
remove(I->getParent()->getParent());
- } else if (isa<GlobalValue>(U.getUser())) {
+ } else if (isa<GlobalValue>(U)) {
// do nothing
- } else if (Constant *C = dyn_cast<Constant>(U.getUser())) {
- for (Value::use_iterator CUI = C->use_begin(), CUE = C->use_end();
- CUI != CUE; ++CUI)
- Worklist.push_back(*CUI);
+ } else if (Constant *C = dyn_cast<Constant>(U)) {
+ for (User *UU : C->users())
+ Worklist.push_back(UU);
}
}
}
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
index fa518cb..76d6dfa 100644
--- a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -12,30 +12,31 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "partialinlining"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/CodeExtractor.h"
using namespace llvm;
+#define DEBUG_TYPE "partialinlining"
+
STATISTIC(NumPartialInlined, "Number of functions partially inlined");
namespace {
struct PartialInliner : public ModulePass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
+ void getAnalysisUsage(AnalysisUsage &AU) const override { }
static char ID; // Pass identification, replacement for typeid
PartialInliner() : ModulePass(ID) {
initializePartialInlinerPass(*PassRegistry::getPassRegistry());
}
-
- bool runOnModule(Module& M);
-
+
+ bool runOnModule(Module& M) override;
+
private:
Function* unswitchFunction(Function* F);
};
@@ -52,10 +53,10 @@ Function* PartialInliner::unswitchFunction(Function* F) {
BasicBlock* entryBlock = F->begin();
BranchInst *BR = dyn_cast<BranchInst>(entryBlock->getTerminator());
if (!BR || BR->isUnconditional())
- return 0;
+ return nullptr;
- BasicBlock* returnBlock = 0;
- BasicBlock* nonReturnBlock = 0;
+ BasicBlock* returnBlock = nullptr;
+ BasicBlock* nonReturnBlock = nullptr;
unsigned returnCount = 0;
for (succ_iterator SI = succ_begin(entryBlock), SE = succ_end(entryBlock);
SI != SE; ++SI)
@@ -66,7 +67,7 @@ Function* PartialInliner::unswitchFunction(Function* F) {
nonReturnBlock = *SI;
if (returnCount != 1)
- return 0;
+ return nullptr;
// Clone the function, so that we can hack away on it.
ValueToValueMapTy VMap;
@@ -119,8 +120,8 @@ Function* PartialInliner::unswitchFunction(Function* F) {
// The CodeExtractor needs a dominator tree.
DominatorTree DT;
- DT.runOnFunction(*duplicateFunction);
-
+ DT.recalculate(*duplicateFunction);
+
// Extract the body of the if.
Function* extractedFunction
= CodeExtractor(toExtract, &DT).extractCodeRegion();
@@ -128,8 +129,8 @@ Function* PartialInliner::unswitchFunction(Function* F) {
InlineFunctionInfo IFI;
// Inline the top-level if test into all callers.
- std::vector<User*> Users(duplicateFunction->use_begin(),
- duplicateFunction->use_end());
+ std::vector<User *> Users(duplicateFunction->user_begin(),
+ duplicateFunction->user_end());
for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end();
UI != UE; ++UI)
if (CallInst *CI = dyn_cast<CallInst>(*UI))
@@ -162,9 +163,8 @@ bool PartialInliner::runOnModule(Module& M) {
if (currFunc->use_empty()) continue;
bool recursive = false;
- for (Function::use_iterator UI = currFunc->use_begin(),
- UE = currFunc->use_end(); UI != UE; ++UI)
- if (Instruction* I = dyn_cast<Instruction>(*UI))
+ for (User *U : currFunc->users())
+ if (Instruction* I = dyn_cast<Instruction>(U))
if (I->getParent()->getParent() == currFunc) {
recursive = true;
break;
diff --git a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index 24c5018..6d9d8be 100644
--- a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -17,7 +17,7 @@
#include "llvm-c/Transforms/PassManagerBuilder.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h"
@@ -33,11 +33,6 @@ RunLoopVectorization("vectorize-loops", cl::Hidden,
cl::desc("Run the Loop vectorization passes"));
static cl::opt<bool>
-LateVectorization("late-vectorize", cl::init(true), cl::Hidden,
- cl::desc("Run the vectorization pasess late in the pass "
- "pipeline (after the inliner)"));
-
-static cl::opt<bool>
RunSLPVectorization("vectorize-slp", cl::Hidden,
cl::desc("Run the SLP vectorization passes"));
@@ -58,18 +53,27 @@ static cl::opt<bool>
RunLoopRerolling("reroll-loops", cl::Hidden,
cl::desc("Run the loop rerolling pass"));
+static cl::opt<bool> RunLoadCombine("combine-loads", cl::init(false),
+ cl::Hidden,
+ cl::desc("Run the load combining pass"));
+
+static cl::opt<bool> RunGVN("enable-gvn", cl::init(true),
+ cl::Hidden,
+ cl::desc("Run the global value numbering pass"));
+
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
- LibraryInfo = 0;
- Inliner = 0;
+ LibraryInfo = nullptr;
+ Inliner = nullptr;
+ DisableTailCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
BBVectorize = RunBBVectorization;
SLPVectorize = RunSLPVectorization;
LoopVectorize = RunLoopVectorization;
- LateVectorize = LateVectorization;
RerollLoops = RunLoopRerolling;
+ LoadCombine = RunLoadCombine;
}
PassManagerBuilder::~PassManagerBuilder() {
@@ -134,7 +138,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (OptLevel == 0) {
if (Inliner) {
MPM.add(Inliner);
- Inliner = 0;
+ Inliner = nullptr;
}
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
@@ -156,12 +160,13 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (!DisableUnitAtATime) {
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
+ MPM.add(createIPSCCPPass()); // IP SCCP
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
- MPM.add(createIPSCCPPass()); // IP SCCP
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
MPM.add(createInstructionCombiningPass());// Clean up after IPCP & DAE
+ addExtensionsToPM(EP_Peephole, MPM);
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
}
@@ -170,7 +175,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createPruneEHPass()); // Remove dead EH info
if (Inliner) {
MPM.add(Inliner);
- Inliner = 0;
+ Inliner = nullptr;
}
if (!DisableUnitAtATime)
MPM.add(createFunctionAttrsPass()); // Set readonly/readnone attrs
@@ -188,11 +193,14 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Combine silly seq's
+ addExtensionsToPM(EP_Peephole, MPM);
- MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
+ if (!DisableTailCalls)
+ MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createReassociatePass()); // Reassociate expressions
- MPM.add(createLoopRotatePass()); // Rotate Loop
+ // Rotate Loop - disable header duplication at -Oz
+ MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1));
MPM.add(createLICMPass()); // Hoist loop invariants
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
MPM.add(createInstructionCombiningPass());
@@ -200,21 +208,22 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
MPM.add(createLoopDeletionPass()); // Delete dead loops
- if (!LateVectorize && LoopVectorize)
- MPM.add(createLoopVectorizePass(DisableUnrollLoops));
-
if (!DisableUnrollLoops)
- MPM.add(createLoopUnrollPass()); // Unroll small loops
+ MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
- if (OptLevel > 1)
- MPM.add(createGVNPass()); // Remove redundancies
+ if (OptLevel > 1) {
+ MPM.add(createMergedLoadStoreMotionPass()); // Merge load/stores in diamond
+ if (RunGVN)
+ MPM.add(createGVNPass()); // Remove redundancies
+ }
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
MPM.add(createSCCPPass()); // Constant prop with SCCP
// Run instcombine after redundancy elimination to exploit opportunities
// opened up by them.
MPM.add(createInstructionCombiningPass());
+ addExtensionsToPM(EP_Peephole, MPM);
MPM.add(createJumpThreadingPass()); // Thread jumps
MPM.add(createCorrelatedValuePropagationPass());
MPM.add(createDeadStoreEliminationPass()); // Delete dead stores
@@ -229,6 +238,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (BBVectorize) {
MPM.add(createBBVectorizePass());
MPM.add(createInstructionCombiningPass());
+ addExtensionsToPM(EP_Peephole, MPM);
if (OptLevel > 1 && UseGVNAfterVectorization)
MPM.add(createGVNPass()); // Remove redundancies
else
@@ -239,25 +249,30 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createLoopUnrollPass());
}
+ if (LoadCombine)
+ MPM.add(createLoadCombinePass());
+
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
+ addExtensionsToPM(EP_Peephole, MPM);
+
+ // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
+ // pass manager that we are specifically trying to avoid. To prevent this
+ // we must insert a no-op module pass to reset the pass manager.
+ MPM.add(createBarrierNoopPass());
+ MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize));
+ // FIXME: Because of #pragma vectorize enable, the passes below are always
+ // inserted in the pipeline, even when the vectorizer doesn't run (ex. when
+ // on -O1 and no #pragma is found). Would be good to have these two passes
+ // as function calls, so that we can only pass them when the vectorizer
+ // changed the code.
+ MPM.add(createInstructionCombiningPass());
+ addExtensionsToPM(EP_Peephole, MPM);
+ MPM.add(createCFGSimplificationPass());
- // As an experimental mode, run any vectorization passes in a separate
- // pipeline from the CGSCC pass manager that runs iteratively with the
- // inliner.
- if (LateVectorize && LoopVectorize) {
- // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
- // pass manager that we are specifically trying to avoid. To prevent this
- // we must insert a no-op module pass to reset the pass manager.
- MPM.add(createBarrierNoopPass());
-
- // Add the various vectorization passes and relevant cleanup passes for
- // them since we are no longer in the middle of the main scalar pipeline.
- MPM.add(createLoopVectorizePass(DisableUnrollLoops));
- MPM.add(createInstructionCombiningPass());
- MPM.add(createCFGSimplificationPass());
- }
+ if (!DisableUnrollLoops)
+ MPM.add(createLoopUnrollPass()); // Unroll small loops
if (!DisableUnitAtATime) {
// FIXME: We shouldn't bother with this anymore.
@@ -306,6 +321,7 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
// function pointers. When this happens, we often have to resolve varargs
// calls, etc, so let instcombine do this.
PM.add(createInstructionCombiningPass());
+ addExtensionsToPM(EP_Peephole, PM);
// Inline small functions
if (RunInliner)
@@ -324,6 +340,7 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
// The IPO passes may leave cruft around. Clean up after them.
PM.add(createInstructionCombiningPass());
+ addExtensionsToPM(EP_Peephole, PM);
PM.add(createJumpThreadingPass());
// Break up allocas
@@ -337,14 +354,27 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
PM.add(createGlobalsModRefPass()); // IP alias analysis.
PM.add(createLICMPass()); // Hoist loop invariants.
+ PM.add(createMergedLoadStoreMotionPass()); // Merge load/stores in diamonds
PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
+ // More loops are countable; try to optimize them.
+ PM.add(createIndVarSimplifyPass());
+ PM.add(createLoopDeletionPass());
+ PM.add(createLoopVectorizePass(true, true));
+
+ // More scalar chains could be vectorized due to more alias information
+ PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
+
+ if (LoadCombine)
+ PM.add(createLoadCombinePass());
+
// Cleanup and simplify the code after the scalar optimizations.
PM.add(createInstructionCombiningPass());
+ addExtensionsToPM(EP_Peephole, PM);
PM.add(createJumpThreadingPass());
diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
index b160913..b2c4a09 100644
--- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -14,22 +14,23 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "prune-eh"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/CFG.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "prune-eh"
+
STATISTIC(NumRemoved, "Number of invokes removed");
STATISTIC(NumUnreach, "Number of noreturn calls optimized");
@@ -41,7 +42,7 @@ namespace {
}
// runOnSCC - Analyze the SCC, performing the transformation if possible.
- bool runOnSCC(CallGraphSCC &SCC);
+ bool runOnSCC(CallGraphSCC &SCC) override;
bool SimplifyFunction(Function *F);
void DeleteBasicBlock(BasicBlock *BB);
@@ -51,7 +52,7 @@ namespace {
char PruneEH::ID = 0;
INITIALIZE_PASS_BEGIN(PruneEH, "prune-eh",
"Remove unused exception handling info", false, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_END(PruneEH, "prune-eh",
"Remove unused exception handling info", false, false)
@@ -60,7 +61,7 @@ Pass *llvm::createPruneEHPass() { return new PruneEH(); }
bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
SmallPtrSet<CallGraphNode *, 8> SCCNodes;
- CallGraph &CG = getAnalysis<CallGraph>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
bool MadeChange = false;
// Fill SCCNodes with the elements of the SCC. Used for quickly
@@ -85,7 +86,7 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end();
(!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0) {
+ if (!F) {
SCCMightUnwind = true;
SCCMightReturn = true;
} else if (F->isDeclaration() || F->mayBeOverridden()) {
@@ -234,7 +235,7 @@ bool PruneEH::SimplifyFunction(Function *F) {
/// exist in the BB.
void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
assert(pred_begin(BB) == pred_end(BB) && "BB is not dead!");
- CallGraph &CG = getAnalysis<CallGraph>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
CallGraphNode *CGN = CG[BB->getParent()];
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) {
diff --git a/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp b/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
index f00830a..956991a 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
@@ -14,13 +14,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "strip-dead-prototypes"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
using namespace llvm;
+#define DEBUG_TYPE "strip-dead-prototypes"
+
STATISTIC(NumDeadPrototypes, "Number of dead prototypes removed");
namespace {
@@ -32,7 +33,7 @@ public:
StripDeadPrototypesPass() : ModulePass(ID) {
initializeStripDeadPrototypesPassPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
} // end anonymous namespace
diff --git a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
index c4f5cfc..1abbccc 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
@@ -23,8 +23,8 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
@@ -44,9 +44,9 @@ namespace {
initializeStripSymbolsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -59,9 +59,9 @@ namespace {
initializeStripNonDebugSymbolsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -74,9 +74,9 @@ namespace {
initializeStripDebugDeclarePass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -89,9 +89,9 @@ namespace {
initializeStripDeadDebugInfoPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -132,11 +132,10 @@ ModulePass *llvm::createStripDeadDebugInfoPass() {
/// OnlyUsedBy - Return true if V is only used by Usr.
static bool OnlyUsedBy(Value *V, Value *Usr) {
- for(Value::use_iterator I = V->use_begin(), E = V->use_end(); I != E; ++I) {
- User *U = *I;
+ for (User *U : V->users())
if (U != Usr)
return false;
- }
+
return true;
}
@@ -147,7 +146,7 @@ static void RemoveDeadConstant(Constant *C) {
if (OnlyUsedBy(C->getOperand(i), C))
Operands.insert(cast<Constant>(C->getOperand(i)));
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
- if (!GV->hasLocalLinkage()) return; // Don't delete non static globals.
+ if (!GV->hasLocalLinkage()) return; // Don't delete non-static globals.
GV->eraseFromParent();
}
else if (!isa<Function>(C))
@@ -193,7 +192,7 @@ static void StripTypeNames(Module &M, bool PreserveDbgInfo) {
/// Find values that are marked as llvm.used.
static void findUsedValues(GlobalVariable *LLVMUsed,
SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
- if (LLVMUsed == 0) return;
+ if (!LLVMUsed) return;
UsedValues.insert(LLVMUsed);
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
@@ -250,7 +249,7 @@ bool StripDebugDeclare::runOnModule(Module &M) {
if (Declare) {
while (!Declare->use_empty()) {
- CallInst *CI = cast<CallInst>(Declare->use_back());
+ CallInst *CI = cast<CallInst>(Declare->user_back());
Value *Arg1 = CI->getArgOperand(0);
Value *Arg2 = CI->getArgOperand(1);
assert(CI->use_empty() && "llvm.dbg intrinsic should have void result");
@@ -307,10 +306,7 @@ bool StripDeadDebugInfo::runOnModule(Module &M) {
SmallVector<Value *, 64> LiveSubprograms;
DenseSet<const MDNode *> VisitedSet;
- for (DebugInfoFinder::iterator CI = F.compile_unit_begin(),
- CE = F.compile_unit_end(); CI != CE; ++CI) {
- // Create our compile unit.
- DICompileUnit DIC(*CI);
+ for (DICompileUnit DIC : F.compile_units()) {
assert(DIC.Verify() && "DIC must verify as a DICompileUnit.");
// Create our live subprogram list.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
index a5eddc2..ab4dc1c 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
@@ -11,46 +11,59 @@
#define INSTCOMBINE_INSTCOMBINE_H
#include "InstCombineWorklist.h"
+#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
-#include "llvm/Support/TargetFolder.h"
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
+#define DEBUG_TYPE "instcombine"
+
namespace llvm {
- class CallSite;
- class DataLayout;
- class TargetLibraryInfo;
- class DbgDeclareInst;
- class MemIntrinsic;
- class MemSetInst;
+class CallSite;
+class DataLayout;
+class TargetLibraryInfo;
+class DbgDeclareInst;
+class MemIntrinsic;
+class MemSetInst;
/// SelectPatternFlavor - We can match a variety of different patterns for
/// select operations.
enum SelectPatternFlavor {
SPF_UNKNOWN = 0,
- SPF_SMIN, SPF_UMIN,
- SPF_SMAX, SPF_UMAX
- //SPF_ABS - TODO.
+ SPF_SMIN,
+ SPF_UMIN,
+ SPF_SMAX,
+ SPF_UMAX,
+ SPF_ABS,
+ SPF_NABS
};
/// getComplexity: Assign a complexity or rank value to LLVM Values...
/// 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
static inline unsigned getComplexity(Value *V) {
if (isa<Instruction>(V)) {
- if (BinaryOperator::isNeg(V) ||
- BinaryOperator::isFNeg(V) ||
+ if (BinaryOperator::isNeg(V) || BinaryOperator::isFNeg(V) ||
BinaryOperator::isNot(V))
return 3;
return 4;
}
- if (isa<Argument>(V)) return 3;
+ if (isa<Argument>(V))
+ return 3;
return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
}
+/// AddOne - Add one to a Constant
+static inline Constant *AddOne(Constant *C) {
+ return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
+}
+/// SubOne - Subtract one from a Constant
+static inline Constant *SubOne(Constant *C) {
+ return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
+}
/// InstCombineIRInserter - This is an IRBuilder insertion helper that works
/// just like the normal insertion helper, but also adds any new instructions
@@ -58,11 +71,12 @@ static inline unsigned getComplexity(Value *V) {
class LLVM_LIBRARY_VISIBILITY InstCombineIRInserter
: public IRBuilderDefaultInserter<true> {
InstCombineWorklist &Worklist;
+
public:
InstCombineIRInserter(InstCombineWorklist &WL) : Worklist(WL) {}
- void InsertHelper(Instruction *I, const Twine &Name,
- BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+ void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
+ BasicBlock::iterator InsertPt) const {
IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
Worklist.Add(I);
}
@@ -70,13 +84,14 @@ public:
/// InstCombiner - The -instcombine pass.
class LLVM_LIBRARY_VISIBILITY InstCombiner
- : public FunctionPass,
- public InstVisitor<InstCombiner, Instruction*> {
- DataLayout *TD;
+ : public FunctionPass,
+ public InstVisitor<InstCombiner, Instruction *> {
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
bool MadeIRChange;
LibCallSimplifier *Simplifier;
bool MinimizeSize;
+
public:
/// Worklist - All of the instructions that need to be simplified.
InstCombineWorklist Worklist;
@@ -87,19 +102,19 @@ public:
BuilderTy *Builder;
static char ID; // Pass identification, replacement for typeid
- InstCombiner() : FunctionPass(ID), TD(0), Builder(0) {
+ InstCombiner() : FunctionPass(ID), DL(nullptr), Builder(nullptr) {
MinimizeSize = false;
initializeInstCombinerPass(*PassRegistry::getPassRegistry());
}
public:
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
bool DoOneIteration(Function &F, unsigned ItNum);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- DataLayout *getDataLayout() const { return TD; }
+ const DataLayout *getDataLayout() const { return DL; }
TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
@@ -116,7 +131,7 @@ public:
Instruction *visitSub(BinaryOperator &I);
Instruction *visitFSub(BinaryOperator &I);
Instruction *visitMul(BinaryOperator &I);
- Value *foldFMulConst(Instruction *FMulOrDiv, ConstantFP *C,
+ Value *foldFMulConst(Instruction *FMulOrDiv, Constant *C,
Instruction *InsertBefore);
Instruction *visitFMul(BinaryOperator &I);
Instruction *visitURem(BinaryOperator &I);
@@ -135,9 +150,9 @@ public:
Instruction *visitAnd(BinaryOperator &I);
Value *FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS);
Value *FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
- Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
- Value *A, Value *B, Value *C);
- Instruction *visitOr (BinaryOperator &I);
+ Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *A,
+ Value *B, Value *C);
+ Instruction *visitOr(BinaryOperator &I);
Instruction *visitXor(BinaryOperator &I);
Instruction *visitShl(BinaryOperator &I);
Instruction *visitAShr(BinaryOperator &I);
@@ -147,12 +162,11 @@ public:
Constant *RHSC);
Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
GlobalVariable *GV, CmpInst &ICI,
- ConstantInt *AndCst = 0);
+ ConstantInt *AndCst = nullptr);
Instruction *visitFCmpInst(FCmpInst &I);
Instruction *visitICmpInst(ICmpInst &I);
Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
- Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
- Instruction *LHS,
+ Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Instruction *LHS,
ConstantInt *RHS);
Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
ConstantInt *DivRHS);
@@ -162,7 +176,7 @@ public:
ICmpInst::Predicate Pred);
Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
ICmpInst::Predicate Cond, Instruction &I);
- Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
+ Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
BinaryOperator &I);
Instruction *commonCastTransforms(CastInst &CI);
Instruction *commonPointerCastTransforms(CastInst &CI);
@@ -179,9 +193,8 @@ public:
Instruction *visitIntToPtr(IntToPtrInst &CI);
Instruction *visitBitCast(BitCastInst &CI);
Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
- Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
- Instruction *FI);
- Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*);
+ Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
+ Instruction *FoldSelectIntoOp(SelectInst &SI, Value *, Value *);
Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
Value *A, Value *B, Instruction &Outer,
SelectPatternFlavor SPF2, Value *C);
@@ -200,6 +213,7 @@ public:
Instruction *visitStoreInst(StoreInst &SI);
Instruction *visitBranchInst(BranchInst &BI);
Instruction *visitSwitchInst(SwitchInst &SI);
+ Instruction *visitInsertValueInst(InsertValueInst &IV);
Instruction *visitInsertElementInst(InsertElementInst &IE);
Instruction *visitExtractElementInst(ExtractElementInst &EI);
Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
@@ -207,25 +221,25 @@ public:
Instruction *visitLandingPadInst(LandingPadInst &LI);
// visitInstruction - Specify what to return for unhandled instructions...
- Instruction *visitInstruction(Instruction &I) { return 0; }
+ Instruction *visitInstruction(Instruction &I) { return nullptr; }
private:
bool ShouldChangeType(Type *From, Type *To) const;
Value *dyn_castNegVal(Value *V) const;
- Value *dyn_castFNegVal(Value *V, bool NoSignedZero=false) const;
+ Value *dyn_castFNegVal(Value *V, bool NoSignedZero = false) const;
Type *FindElementAtOffset(Type *PtrTy, int64_t Offset,
- SmallVectorImpl<Value*> &NewIndices);
+ SmallVectorImpl<Value *> &NewIndices);
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
/// ShouldOptimizeCast - Return true if the cast from "V to Ty" actually
/// results in any code being generated and is interesting to optimize out. If
/// the cast can be eliminated by some other simple transformation, we prefer
/// to do the simplification first.
- bool ShouldOptimizeCast(Instruction::CastOps opcode,const Value *V,
+ bool ShouldOptimizeCast(Instruction::CastOps opcode, const Value *V,
Type *Ty);
Instruction *visitCallSite(CallSite CS);
- Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD);
+ Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *DL);
bool transformConstExprCastCall(CallSite CS);
Instruction *transformCallThroughTrampoline(CallSite CS,
IntrinsicInst *Tramp);
@@ -233,6 +247,7 @@ private:
bool DoXform = true);
Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
+ bool WillNotOverflowUnsignedAdd(Value *LHS, Value *RHS);
Value *EmitGEPOffset(User *GEP);
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
@@ -242,10 +257,10 @@ public:
// in the program. Add the new instruction to the worklist.
//
Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
- assert(New && New->getParent() == 0 &&
+ assert(New && !New->getParent() &&
"New instruction already inserted into a basic block!");
BasicBlock *BB = Old.getParent();
- BB->getInstList().insert(&Old, New); // Insert inst
+ BB->getInstList().insert(&Old, New); // Insert inst
Worklist.Add(New);
return New;
}
@@ -265,7 +280,7 @@ public:
// modified.
//
Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
- Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
+ Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
// If we are replacing the instruction with itself, this must be in a
// segment of unreachable code, so just clobber the instruction.
@@ -297,24 +312,23 @@ public:
Worklist.Remove(&I);
I.eraseFromParent();
MadeIRChange = true;
- return 0; // Don't do anything with FI
+ return nullptr; // Don't do anything with FI
}
- void ComputeMaskedBits(Value *V, APInt &KnownZero,
- APInt &KnownOne, unsigned Depth = 0) const {
- return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
+ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ unsigned Depth = 0) const {
+ return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth);
}
bool MaskedValueIsZero(Value *V, const APInt &Mask,
unsigned Depth = 0) const {
- return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
+ return llvm::MaskedValueIsZero(V, Mask, DL, Depth);
}
unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
- return llvm::ComputeNumSignBits(Op, TD, Depth);
+ return llvm::ComputeNumSignBits(Op, DL, Depth);
}
private:
-
/// SimplifyAssociativeOrCommutative - This performs a few simplifications for
/// operators which are associative or commutative.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
@@ -328,12 +342,10 @@ private:
/// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
/// based on the demanded bits.
- Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
- APInt& KnownZero, APInt& KnownOne,
- unsigned Depth);
- bool SimplifyDemandedBits(Use &U, APInt DemandedMask,
- APInt& KnownZero, APInt& KnownOne,
- unsigned Depth=0);
+ Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero,
+ APInt &KnownOne, unsigned Depth);
+ bool SimplifyDemandedBits(Use &U, APInt DemandedMask, APInt &KnownZero,
+ APInt &KnownOne, unsigned Depth = 0);
/// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
/// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl,
@@ -346,7 +358,9 @@ private:
bool SimplifyDemandedInstructionBits(Instruction &Inst);
Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
- APInt& UndefElts, unsigned Depth = 0);
+ APInt &UndefElts, unsigned Depth = 0);
+
+ Value *SimplifyVectorOp(BinaryOperator &Inst);
// FoldOpIntoPhi - Given a binary operator, cast instruction, or select
// which has a PHI node as operand #0, see if we can fold the instruction
@@ -363,21 +377,19 @@ private:
Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
-
Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
ConstantInt *AndRHS, BinaryOperator &TheAnd);
Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
bool isSub, Instruction &I);
- Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
- bool isSigned, bool Inside);
+ Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned,
+ bool Inside);
Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
Instruction *MatchBSwap(BinaryOperator &I);
bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
Instruction *SimplifyMemSet(MemSetInst *MI);
-
Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
/// Descale - Return a value X such that Val = X * Scale, or null if none. If
@@ -385,8 +397,8 @@ private:
Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
};
-
-
} // end namespace llvm.
+#undef DEBUG_TYPE
+
#endif
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 534feb8..e80d6a9 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -15,11 +15,13 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
namespace {
/// Class representing coefficient of floating-point addend.
@@ -30,7 +32,7 @@ namespace {
///
class FAddendCoef {
public:
- // The constructor has to initialize a APFloat, which is uncessary for
+ // The constructor has to initialize a APFloat, which is unnecessary for
// most addends which have coefficient either 1 or -1. So, the constructor
// is expensive. In order to avoid the cost of the constructor, we should
// reuse some instances whenever possible. The pre-created instances
@@ -112,12 +114,12 @@ namespace {
///
class FAddend {
public:
- FAddend() { Val = 0; }
+ FAddend() { Val = nullptr; }
Value *getSymVal (void) const { return Val; }
const FAddendCoef &getCoef(void) const { return Coeff; }
- bool isConstant() const { return Val == 0; }
+ bool isConstant() const { return Val == nullptr; }
bool isZero() const { return Coeff.isZero(); }
void set(short Coefficient, Value *V) { Coeff.set(Coefficient), Val = V; }
@@ -154,7 +156,7 @@ namespace {
///
class FAddCombine {
public:
- FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(0) {}
+ FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {}
Value *simplify(Instruction *FAdd);
private:
@@ -175,7 +177,7 @@ namespace {
Value *createFDiv(Value *Opnd0, Value *Opnd1);
Value *createFNeg(Value *V);
Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
- void createInstPostProc(Instruction *NewInst);
+ void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
InstCombiner::BuilderTy *Builder;
Instruction *Instr;
@@ -348,8 +350,8 @@ Value *FAddendCoef::getValue(Type *Ty) const {
//
unsigned FAddend::drillValueDownOneStep
(Value *Val, FAddend &Addend0, FAddend &Addend1) {
- Instruction *I = 0;
- if (Val == 0 || !(I = dyn_cast<Instruction>(Val)))
+ Instruction *I = nullptr;
+ if (!Val || !(I = dyn_cast<Instruction>(Val)))
return 0;
unsigned Opcode = I->getOpcode();
@@ -359,16 +361,16 @@ unsigned FAddend::drillValueDownOneStep
Value *Opnd0 = I->getOperand(0);
Value *Opnd1 = I->getOperand(1);
if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
- Opnd0 = 0;
+ Opnd0 = nullptr;
if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
- Opnd1 = 0;
+ Opnd1 = nullptr;
if (Opnd0) {
if (!C0)
Addend0.set(1, Opnd0);
else
- Addend0.set(C0, 0);
+ Addend0.set(C0, nullptr);
}
if (Opnd1) {
@@ -376,7 +378,7 @@ unsigned FAddend::drillValueDownOneStep
if (!C1)
Addend.set(1, Opnd1);
else
- Addend.set(C1, 0);
+ Addend.set(C1, nullptr);
if (Opcode == Instruction::FSub)
Addend.negate();
}
@@ -385,7 +387,7 @@ unsigned FAddend::drillValueDownOneStep
return Opnd0 && Opnd1 ? 2 : 1;
// Both operands are zero. Weird!
- Addend0.set(APFloat(C0->getValueAPF().getSemantics()), 0);
+ Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
return 1;
}
@@ -443,13 +445,13 @@ Value *FAddCombine::performFactorization(Instruction *I) {
Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1));
if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
- return 0;
+ return nullptr;
bool isMpy = false;
if (I0->getOpcode() == Instruction::FMul)
isMpy = true;
else if (I0->getOpcode() != Instruction::FDiv)
- return 0;
+ return nullptr;
Value *Opnd0_0 = I0->getOperand(0);
Value *Opnd0_1 = I0->getOperand(1);
@@ -461,8 +463,8 @@ Value *FAddCombine::performFactorization(Instruction *I) {
// (x*y) +/- (x*z) x y z
// (y/x) +/- (z/x) x y z
//
- Value *Factor = 0;
- Value *AddSub0 = 0, *AddSub1 = 0;
+ Value *Factor = nullptr;
+ Value *AddSub0 = nullptr, *AddSub1 = nullptr;
if (isMpy) {
if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
@@ -481,7 +483,12 @@ Value *FAddCombine::performFactorization(Instruction *I) {
}
if (!Factor)
- return 0;
+ return nullptr;
+
+ FastMathFlags Flags;
+ Flags.setUnsafeAlgebra();
+ if (I0) Flags &= I->getFastMathFlags();
+ if (I1) Flags &= I->getFastMathFlags();
// Create expression "NewAddSub = AddSub0 +/- AddsSub1"
Value *NewAddSub = (I->getOpcode() == Instruction::FAdd) ?
@@ -490,13 +497,21 @@ Value *FAddCombine::performFactorization(Instruction *I) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
const APFloat &F = CFP->getValueAPF();
if (!F.isNormal())
- return 0;
- }
+ return nullptr;
+ } else if (Instruction *II = dyn_cast<Instruction>(NewAddSub))
+ II->setFastMathFlags(Flags);
- if (isMpy)
- return createFMul(Factor, NewAddSub);
+ if (isMpy) {
+ Value *RI = createFMul(Factor, NewAddSub);
+ if (Instruction *II = dyn_cast<Instruction>(RI))
+ II->setFastMathFlags(Flags);
+ return RI;
+ }
- return createFDiv(NewAddSub, Factor);
+ Value *RI = createFDiv(NewAddSub, Factor);
+ if (Instruction *II = dyn_cast<Instruction>(RI))
+ II->setFastMathFlags(Flags);
+ return RI;
}
Value *FAddCombine::simplify(Instruction *I) {
@@ -504,7 +519,7 @@ Value *FAddCombine::simplify(Instruction *I) {
// Currently we are not able to handle vector type.
if (I->getType()->isVectorTy())
- return 0;
+ return nullptr;
assert((I->getOpcode() == Instruction::FAdd ||
I->getOpcode() == Instruction::FSub) && "Expect add/sub");
@@ -555,7 +570,7 @@ Value *FAddCombine::simplify(Instruction *I) {
// been optimized into "I = Y - X" in the previous steps.
//
const FAddendCoef &CE = Opnd0.getCoef();
- return CE.isOne() ? Opnd0.getSymVal() : 0;
+ return CE.isOne() ? Opnd0.getSymVal() : nullptr;
}
// step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
@@ -601,7 +616,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
// constant close to supper-expr(s) will potentially reveal some optimization
// opportunities in super-expr(s).
//
- const FAddend *ConstAdd = 0;
+ const FAddend *ConstAdd = nullptr;
// Simplified addends are placed <SimpVect>.
AddendVect SimpVect;
@@ -634,7 +649,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
if (T && T->getSymVal() == Val) {
// Set null such that next iteration of the outer loop will not process
// this addend again.
- Addends[SameSymIdx] = 0;
+ Addends[SameSymIdx] = nullptr;
SimpVect.push_back(T);
}
}
@@ -648,7 +663,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
// Pop all addends being folded and push the resulting folded addend.
SimpVect.resize(StartIdx);
- if (Val != 0) {
+ if (Val) {
if (!R.isZero()) {
SimpVect.push_back(&R);
}
@@ -685,7 +700,7 @@ Value *FAddCombine::createNaryFAdd
//
unsigned InstrNeeded = calcInstrNumber(Opnds);
if (InstrNeeded > InstrQuota)
- return 0;
+ return nullptr;
initCreateInstNum();
@@ -697,7 +712,7 @@ Value *FAddCombine::createNaryFAdd
// N-ary addition has at most two instructions, and we don't need to worry
// about tree-height when constructing the N-ary addition.
- Value *LastVal = 0;
+ Value *LastVal = nullptr;
bool LastValNeedNeg = false;
// Iterate the addends, creating fadd/fsub using adjacent two addends.
@@ -746,7 +761,10 @@ Value *FAddCombine::createFSub
Value *FAddCombine::createFNeg(Value *V) {
Value *Zero = cast<Value>(ConstantFP::get(V->getType(), 0.0));
- return createFSub(Zero, V);
+ Value *NewV = createFSub(Zero, V);
+ if (Instruction *I = dyn_cast<Instruction>(NewV))
+ createInstPostProc(I, true); // fneg's don't receive instruction numbers.
+ return NewV;
}
Value *FAddCombine::createFAdd
@@ -771,11 +789,13 @@ Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
return V;
}
-void FAddCombine::createInstPostProc(Instruction *NewInstr) {
+void FAddCombine::createInstPostProc(Instruction *NewInstr,
+ bool NoNumber) {
NewInstr->setDebugLoc(Instr->getDebugLoc());
// Keep track of the number of instruction created.
- incCreateInstNum();
+ if (!NoNumber)
+ incCreateInstNum();
// Propagate fast-math flags
NewInstr->setFastMathFlags(Instr->getFastMathFlags());
@@ -845,80 +865,170 @@ Value *FAddCombine::createAddendVal
return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
}
-/// AddOne - Add one to a ConstantInt.
-static Constant *AddOne(Constant *C) {
- return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
-}
-
-/// SubOne - Subtract one from a ConstantInt.
-static Constant *SubOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue()-1);
+// If one of the operands only has one non-zero bit, and if the other
+// operand has a known-zero bit in a more significant place than it (not
+// including the sign bit) the ripple may go up to and fill the zero, but
+// won't change the sign. For example, (X & ~4) + 1.
+static bool checkRippleForAdd(const APInt &Op0KnownZero,
+ const APInt &Op1KnownZero) {
+ APInt Op1MaybeOne = ~Op1KnownZero;
+ // Make sure that one of the operand has at most one bit set to 1.
+ if (Op1MaybeOne.countPopulation() != 1)
+ return false;
+
+ // Find the most significant known 0 other than the sign bit.
+ int BitWidth = Op0KnownZero.getBitWidth();
+ APInt Op0KnownZeroTemp(Op0KnownZero);
+ Op0KnownZeroTemp.clearBit(BitWidth - 1);
+ int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1;
+
+ int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1;
+ assert(Op1OnePosition >= 0);
+
+ // This also covers the case of no known zero, since in that case
+ // Op0ZeroPosition is -1.
+ return Op0ZeroPosition >= Op1OnePosition;
}
-
-// dyn_castFoldableMul - If this value is a multiply that can be folded into
-// other computations (because it has a constant operand), return the
-// non-constant operand of the multiply, and set CST to point to the multiplier.
-// Otherwise, return null.
-//
-static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
- if (!V->hasOneUse() || !V->getType()->isIntegerTy())
- return 0;
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return 0;
-
- if (I->getOpcode() == Instruction::Mul)
- if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
- return I->getOperand(0);
- if (I->getOpcode() == Instruction::Shl)
- if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
- // The multiplier is really 1 << CST.
- uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
- uint32_t CSTVal = CST->getLimitedValue(BitWidth);
- CST = ConstantInt::get(V->getType()->getContext(),
- APInt::getOneBitSet(BitWidth, CSTVal));
- return I->getOperand(0);
- }
- return 0;
-}
-
-
/// WillNotOverflowSignedAdd - Return true if we can prove that:
/// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
/// This basically requires proving that the add in the original type would not
/// overflow to change the sign bit or have a carry out.
+/// TODO: Handle this for Vectors.
bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
// There are different heuristics we can use for this. Here are some simple
// ones.
- // Add has the property that adding any two 2's complement numbers can only
- // have one carry bit which can change a sign. As such, if LHS and RHS each
- // have at least two sign bits, we know that the addition of the two values
- // will sign extend fine.
+ // If LHS and RHS each have at least two sign bits, the addition will look
+ // like
+ //
+ // XX..... +
+ // YY.....
+ //
+ // If the carry into the most significant position is 0, X and Y can't both
+ // be 1 and therefore the carry out of the addition is also 0.
+ //
+ // If the carry into the most significant position is 1, X and Y can't both
+ // be 0 and therefore the carry out of the addition is also 1.
+ //
+ // Since the carry into the most significant position is always equal to
+ // the carry out of the addition, there is no signed overflow.
if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
return true;
+ if (IntegerType *IT = dyn_cast<IntegerType>(LHS->getType())) {
+ int BitWidth = IT->getBitWidth();
+ APInt LHSKnownZero(BitWidth, 0);
+ APInt LHSKnownOne(BitWidth, 0);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
+
+ APInt RHSKnownZero(BitWidth, 0);
+ APInt RHSKnownOne(BitWidth, 0);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
+
+ // Addition of two 2's compliment numbers having opposite signs will never
+ // overflow.
+ if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) ||
+ (LHSKnownZero[BitWidth - 1] && RHSKnownOne[BitWidth - 1]))
+ return true;
+
+ // Check if carry bit of addition will not cause overflow.
+ if (checkRippleForAdd(LHSKnownZero, RHSKnownZero))
+ return true;
+ if (checkRippleForAdd(RHSKnownZero, LHSKnownZero))
+ return true;
+ }
+ return false;
+}
- // If one of the operands only has one non-zero bit, and if the other operand
- // has a known-zero bit in a more significant place than it (not including the
- // sign bit) the ripple may go up to and fill the zero, but won't change the
- // sign. For example, (X & ~4) + 1.
-
- // TODO: Implement.
+/// WillNotOverflowUnsignedAdd - Return true if we can prove that:
+/// (zext (add LHS, RHS)) === (add (zext LHS), (zext RHS))
+bool InstCombiner::WillNotOverflowUnsignedAdd(Value *LHS, Value *RHS) {
+ // There are different heuristics we can use for this. Here is a simple one.
+ // If the sign bit of LHS and that of RHS are both zero, no unsigned wrap.
+ bool LHSKnownNonNegative, LHSKnownNegative;
+ bool RHSKnownNonNegative, RHSKnownNegative;
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, 0);
+ ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, 0);
+ if (LHSKnownNonNegative && RHSKnownNonNegative)
+ return true;
return false;
}
-Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
- bool Changed = SimplifyAssociativeOrCommutative(I);
+// Checks if any operand is negative and we can convert add to sub.
+// This function checks for following negative patterns
+// ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
+// ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
+// XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
+static Value *checkForNegativeOperand(BinaryOperator &I,
+ InstCombiner::BuilderTy *Builder) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), TD))
- return ReplaceInstUsesWith(I, V);
+ // This function creates 2 instructions to replace ADD, we need at least one
+ // of LHS or RHS to have one use to ensure benefit in transform.
+ if (!LHS->hasOneUse() && !RHS->hasOneUse())
+ return nullptr;
+
+ Value *X = nullptr, *Y = nullptr, *Z = nullptr;
+ const APInt *C1 = nullptr, *C2 = nullptr;
+
+ // if ONE is on other side, swap
+ if (match(RHS, m_Add(m_Value(X), m_One())))
+ std::swap(LHS, RHS);
+
+ if (match(LHS, m_Add(m_Value(X), m_One()))) {
+ // if XOR on other side, swap
+ if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
+ std::swap(X, RHS);
+
+ if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
+ // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
+ // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
+ if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
+ Value *NewAnd = Builder->CreateAnd(Z, *C1);
+ return Builder->CreateSub(RHS, NewAnd, "sub");
+ } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
+ // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
+ // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
+ Value *NewOr = Builder->CreateOr(Z, ~(*C1));
+ return Builder->CreateSub(RHS, NewOr, "sub");
+ }
+ }
+ }
- // (A*B)+(A*C) -> A*(B+C) etc
+ // Restore LHS and RHS
+ LHS = I.getOperand(0);
+ RHS = I.getOperand(1);
+
+ // if XOR is on other side, swap
+ if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
+ std::swap(LHS, RHS);
+
+ // C2 is ODD
+ // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
+ // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
+ if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
+ if (C1->countTrailingZeros() == 0)
+ if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
+ Value *NewOr = Builder->CreateOr(Z, ~(*C2));
+ return Builder->CreateSub(RHS, NewOr, "sub");
+ }
+ return nullptr;
+}
+
+Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
+ bool Changed = SimplifyAssociativeOrCommutative(I);
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
+ I.hasNoUnsignedWrap(), DL))
+ return ReplaceInstUsesWith(I, V);
+
+ // (A*B)+(A*C) -> A*(B+C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return ReplaceInstUsesWith(I, V);
@@ -938,7 +1048,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (ZI->getSrcTy()->isIntegerTy(1))
return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
- Value *XorLHS = 0; ConstantInt *XorRHS = 0;
+ Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
const APInt &RHSVal = CI->getValue();
@@ -970,7 +1080,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
IntegerType *IT = cast<IntegerType>(I.getType());
APInt LHSKnownOne(IT->getBitWidth(), 0);
APInt LHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(XorLHS, LHSKnownZero, LHSKnownOne);
+ computeKnownBits(XorLHS, LHSKnownZero, LHSKnownOne);
if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
XorLHS);
@@ -987,7 +1097,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Instruction *NV = FoldOpIntoPhi(I))
return NV;
- if (I.getType()->isIntegerTy(1))
+ if (I.getType()->getScalarType()->isIntegerTy(1))
return BinaryOperator::CreateXor(LHS, RHS);
// X + X --> X << 1
@@ -1016,31 +1126,18 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = dyn_castNegVal(RHS))
return BinaryOperator::CreateSub(LHS, V);
-
- ConstantInt *C2;
- if (Value *X = dyn_castFoldableMul(LHS, C2)) {
- if (X == RHS) // X*C + X --> X * (C+1)
- return BinaryOperator::CreateMul(RHS, AddOne(C2));
-
- // X*C1 + X*C2 --> X * (C1+C2)
- ConstantInt *C1;
- if (X == dyn_castFoldableMul(RHS, C1))
- return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
- }
-
- // X + X*C --> X * (C+1)
- if (dyn_castFoldableMul(RHS, C2) == LHS)
- return BinaryOperator::CreateMul(LHS, AddOne(C2));
+ if (Value *V = checkForNegativeOperand(I, Builder))
+ return ReplaceInstUsesWith(I, V);
// A+B --> A|B iff A and B have no bits set in common.
if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
APInt LHSKnownOne(IT->getBitWidth(), 0);
APInt LHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
if (LHSKnownZero != 0) {
APInt RHSKnownOne(IT->getBitWidth(), 0);
APInt RHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
// No bits in common -> bitwise or.
if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
@@ -1048,35 +1145,16 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
- // W*X + Y*Z --> W * (X+Z) iff W == Y
- {
- Value *W, *X, *Y, *Z;
- if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
- match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
- if (W != Y) {
- if (W == Z) {
- std::swap(Y, Z);
- } else if (Y == X) {
- std::swap(W, X);
- } else if (X == Z) {
- std::swap(Y, Z);
- std::swap(W, X);
- }
- }
-
- if (W == Y) {
- Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
- return BinaryOperator::CreateMul(W, NewAdd);
- }
- }
+ if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
+ Value *X;
+ if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
+ return BinaryOperator::CreateSub(SubOne(CRHS), X);
}
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
- Value *X = 0;
- if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
- return BinaryOperator::CreateSub(SubOne(CRHS), X);
-
// (X & FF00) + xx00 -> (X+xx00) & FF00
+ Value *X;
+ ConstantInt *C2;
if (LHS->hasOneUse() &&
match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
@@ -1164,7 +1242,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// Check for (x & y) + (x ^ y)
{
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
(match(LHS, m_And(m_Specific(A), m_Specific(B))) ||
match(LHS, m_And(m_Specific(B), m_Specific(A)))))
@@ -1176,14 +1254,29 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
return BinaryOperator::CreateOr(A, B);
}
- return Changed ? &I : 0;
+ // TODO(jingyue): Consider WillNotOverflowSignedAdd and
+ // WillNotOverflowUnsignedAdd to reduce the number of invocations of
+ // computeKnownBits.
+ if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS)) {
+ Changed = true;
+ I.setHasNoSignedWrap(true);
+ }
+ if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedAdd(LHS, RHS)) {
+ Changed = true;
+ I.setHasNoUnsignedWrap(true);
+ }
+
+ return Changed ? &I : nullptr;
}
Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL))
return ReplaceInstUsesWith(I, V);
if (isa<Constant>(RHS)) {
@@ -1198,13 +1291,19 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
// -A + B --> B - A
// -A + -B --> -(A + B)
- if (Value *LHSV = dyn_castFNegVal(LHS))
- return BinaryOperator::CreateFSub(RHS, LHSV);
+ if (Value *LHSV = dyn_castFNegVal(LHS)) {
+ Instruction *RI = BinaryOperator::CreateFSub(RHS, LHSV);
+ RI->copyFastMathFlags(&I);
+ return RI;
+ }
// A + -B --> A - B
if (!isa<Constant>(RHS))
- if (Value *V = dyn_castFNegVal(RHS))
- return BinaryOperator::CreateFSub(LHS, V);
+ if (Value *V = dyn_castFNegVal(RHS)) {
+ Instruction *RI = BinaryOperator::CreateFSub(LHS, V);
+ RI->copyFastMathFlags(&I);
+ return RI;
+ }
// Check for (fadd double (sitofp x), y), see if we can merge this into an
// integer add followed by a promotion.
@@ -1250,7 +1349,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
if (match(LHS, m_Select(m_Value(C1), m_Value(A1), m_Value(B1))) &&
match(RHS, m_Select(m_Value(C2), m_Value(A2), m_Value(B2)))) {
if (C1 == C2) {
- Constant *Z1=0, *Z2=0;
+ Constant *Z1=nullptr, *Z2=nullptr;
Value *A, *B, *C=C1;
if (match(A1, m_AnyZero()) && match(B2, m_AnyZero())) {
Z1 = dyn_cast<Constant>(A1); A = A2;
@@ -1274,7 +1373,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
return ReplaceInstUsesWith(I, V);
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
@@ -1284,12 +1383,12 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
///
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
Type *Ty) {
- assert(TD && "Must have target data info for this");
+ assert(DL && "Must have target data info for this");
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
// this.
bool Swapped = false;
- GEPOperator *GEP1 = 0, *GEP2 = 0;
+ GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
// For now we require one side to be the base pointer "A" or a constant
// GEP derived from it.
@@ -1327,9 +1426,9 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
// Avoid duplicating the arithmetic if GEP2 has non-constant indices and
// multiple users.
- if (GEP1 == 0 ||
- (GEP2 != 0 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
- return 0;
+ if (!GEP1 ||
+ (GEP2 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
+ return nullptr;
// Emit the offset of the GEP and an intptr_t.
Value *Result = EmitGEPOffset(GEP1);
@@ -1352,8 +1451,11 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), TD))
+ I.hasNoUnsignedWrap(), DL))
return ReplaceInstUsesWith(I, V);
// (A*B)-(A*C) -> A*(B-C) etc
@@ -1375,51 +1477,53 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (match(Op0, m_AllOnes()))
return BinaryOperator::CreateNot(Op1);
- if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
+ if (Constant *C = dyn_cast<Constant>(Op0)) {
// C - ~X == X + (1+C)
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op1, m_Not(m_Value(X))))
return BinaryOperator::CreateAdd(X, AddOne(C));
- // -(X >>u 31) -> (X >>s 31)
- // -(X >>s 31) -> (X >>u 31)
- if (C->isZero()) {
- Value *X; ConstantInt *CI;
- if (match(Op1, m_LShr(m_Value(X), m_ConstantInt(CI))) &&
- // Verify we are shifting out everything but the sign bit.
- CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
- return BinaryOperator::CreateAShr(X, CI);
-
- if (match(Op1, m_AShr(m_Value(X), m_ConstantInt(CI))) &&
- // Verify we are shifting out everything but the sign bit.
- CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
- return BinaryOperator::CreateLShr(X, CI);
- }
-
// Try to fold constant sub into select arguments.
if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
if (Instruction *R = FoldOpIntoSelect(I, SI))
return R;
// C-(X+C2) --> (C-C2)-X
- ConstantInt *C2;
- if (match(Op1, m_Add(m_Value(X), m_ConstantInt(C2))))
+ Constant *C2;
+ if (match(Op1, m_Add(m_Value(X), m_Constant(C2))))
return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
if (SimplifyDemandedInstructionBits(I))
return &I;
// Fold (sub 0, (zext bool to B)) --> (sext bool to B)
- if (C->isZero() && match(Op1, m_ZExt(m_Value(X))))
- if (X->getType()->isIntegerTy(1))
+ if (C->isNullValue() && match(Op1, m_ZExt(m_Value(X))))
+ if (X->getType()->getScalarType()->isIntegerTy(1))
return CastInst::CreateSExtOrBitCast(X, Op1->getType());
// Fold (sub 0, (sext bool to B)) --> (zext bool to B)
- if (C->isZero() && match(Op1, m_SExt(m_Value(X))))
- if (X->getType()->isIntegerTy(1))
+ if (C->isNullValue() && match(Op1, m_SExt(m_Value(X))))
+ if (X->getType()->getScalarType()->isIntegerTy(1))
return CastInst::CreateZExtOrBitCast(X, Op1->getType());
}
+ if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
+ // -(X >>u 31) -> (X >>s 31)
+ // -(X >>s 31) -> (X >>u 31)
+ if (C->isZero()) {
+ Value *X; ConstantInt *CI;
+ if (match(Op1, m_LShr(m_Value(X), m_ConstantInt(CI))) &&
+ // Verify we are shifting out everything but the sign bit.
+ CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
+ return BinaryOperator::CreateAShr(X, CI);
+
+ if (match(Op1, m_AShr(m_Value(X), m_ConstantInt(CI))) &&
+ // Verify we are shifting out everything but the sign bit.
+ CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
+ return BinaryOperator::CreateLShr(X, CI);
+ }
+ }
+
{ Value *Y;
// X-(X+Y) == -Y X-(Y+X) == -Y
@@ -1433,9 +1537,9 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
}
if (Op1->hasOneUse()) {
- Value *X = 0, *Y = 0, *Z = 0;
- Constant *C = 0;
- ConstantInt *CI = 0;
+ Value *X = nullptr, *Y = nullptr, *Z = nullptr;
+ Constant *C = nullptr;
+ Constant *CI = nullptr;
// (X - (Y - Z)) --> (X + (Z - Y)).
if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
@@ -1449,9 +1553,9 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return BinaryOperator::CreateAnd(Op0,
Builder->CreateNot(Y, Y->getName() + ".not"));
- // 0 - (X sdiv C) -> (X sdiv -C)
- if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) &&
- match(Op0, m_Zero()))
+ // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
+ if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
+ !C->isMinSignedValue())
return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
// 0 - (X << Y) -> (-X << Y) when X is freely negatable.
@@ -1459,19 +1563,6 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (Value *XNeg = dyn_castNegVal(X))
return BinaryOperator::CreateShl(XNeg, Y);
- // X - X*C --> X * (1-C)
- if (match(Op1, m_Mul(m_Specific(Op0), m_ConstantInt(CI)))) {
- Constant *CP1 = ConstantExpr::getSub(ConstantInt::get(I.getType(),1), CI);
- return BinaryOperator::CreateMul(Op0, CP1);
- }
-
- // X - X<<C --> X * (1-(1<<C))
- if (match(Op1, m_Shl(m_Specific(Op0), m_ConstantInt(CI)))) {
- Constant *One = ConstantInt::get(I.getType(), 1);
- C = ConstantExpr::getSub(One, ConstantExpr::getShl(One, CI));
- return BinaryOperator::CreateMul(Op0, C);
- }
-
// X - A*-B -> X + A*B
// X - -A*B -> X + A*B
Value *A, *B;
@@ -1481,26 +1572,16 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// X - A*CI -> X + A*-CI
// X - CI*A -> X + A*-CI
- if (match(Op1, m_Mul(m_Value(A), m_ConstantInt(CI))) ||
- match(Op1, m_Mul(m_ConstantInt(CI), m_Value(A)))) {
+ if (match(Op1, m_Mul(m_Value(A), m_Constant(CI))) ||
+ match(Op1, m_Mul(m_Constant(CI), m_Value(A)))) {
Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
return BinaryOperator::CreateAdd(Op0, NewMul);
}
}
- ConstantInt *C1;
- if (Value *X = dyn_castFoldableMul(Op0, C1)) {
- if (X == Op1) // X*C - X --> X * (C-1)
- return BinaryOperator::CreateMul(Op1, SubOne(C1));
-
- ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
- if (X == dyn_castFoldableMul(Op1, C2))
- return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
- }
-
// Optimize pointer differences into the same array into a size. Consider:
// &A[10] - &A[0]: we should compile this to "10".
- if (TD) {
+ if (DL) {
Value *LHSOp, *RHSOp;
if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
match(Op1, m_PtrToInt(m_Value(RHSOp))))
@@ -1512,15 +1593,18 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
return ReplaceInstUsesWith(I, Res);
- }
+ }
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL))
return ReplaceInstUsesWith(I, V);
if (isa<Constant>(Op0))
@@ -1556,5 +1640,5 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
return ReplaceInstUsesWith(I, V);
}
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 88bb69b..b23a606 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -13,22 +13,14 @@
#include "InstCombine.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/Utils/CmpInstAnalysis.h"
using namespace llvm;
using namespace PatternMatch;
-
-/// AddOne - Add one to a ConstantInt.
-static Constant *AddOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue() + 1);
-}
-/// SubOne - Subtract one from a ConstantInt.
-static Constant *SubOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue()-1);
-}
+#define DEBUG_TYPE "instcombine"
/// isFreeToInvert - Return true if the specified value is free to invert (apply
/// ~ to). This happens in cases where the ~ can be eliminated.
@@ -60,7 +52,7 @@ static inline Value *dyn_castNotVal(Value *V) {
// Constants can be considered to be not'ed values...
if (ConstantInt *C = dyn_cast<ConstantInt>(V))
return ConstantInt::get(C->getType(), ~C->getValue());
- return 0;
+ return nullptr;
}
/// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
@@ -133,7 +125,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
ConstantInt *AndRHS,
BinaryOperator &TheAnd) {
Value *X = Op->getOperand(0);
- Constant *Together = 0;
+ Constant *Together = nullptr;
if (!Op->isShift())
Together = ConstantExpr::getAnd(AndRHS, OpRHS);
@@ -260,7 +252,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
}
break;
}
- return 0;
+ return nullptr;
}
/// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
@@ -342,12 +334,12 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
Instruction &I) {
Instruction *LHSI = dyn_cast<Instruction>(LHS);
if (!LHSI || LHSI->getNumOperands() != 2 ||
- !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
+ !isa<ConstantInt>(LHSI->getOperand(1))) return nullptr;
ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
switch (LHSI->getOpcode()) {
- default: return 0;
+ default: return nullptr;
case Instruction::And:
if (ConstantExpr::getAnd(N, Mask) == Mask) {
// If the AndRHS is a power of two minus one (0+1+), this is simple.
@@ -367,7 +359,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
break;
}
}
- return 0;
+ return nullptr;
case Instruction::Or:
case Instruction::Xor:
// If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
@@ -375,7 +367,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
&& ConstantExpr::getAnd(N, Mask)->isNullValue())
break;
- return 0;
+ return nullptr;
}
if (isSub)
@@ -428,12 +420,12 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
ConstantInt *BCst = dyn_cast<ConstantInt>(B);
ConstantInt *CCst = dyn_cast<ConstantInt>(C);
bool icmp_eq = (SCC == ICmpInst::ICMP_EQ);
- bool icmp_abit = (ACst != 0 && !ACst->isZero() &&
+ bool icmp_abit = (ACst && !ACst->isZero() &&
ACst->getValue().isPowerOf2());
- bool icmp_bbit = (BCst != 0 && !BCst->isZero() &&
+ bool icmp_bbit = (BCst && !BCst->isZero() &&
BCst->getValue().isPowerOf2());
unsigned result = 0;
- if (CCst != 0 && CCst->isZero()) {
+ if (CCst && CCst->isZero()) {
// if C is zero, then both A and B qualify as mask
result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes |
FoldMskICmp_Mask_AllZeroes |
@@ -465,7 +457,7 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
FoldMskICmp_AMask_NotMixed)
: (FoldMskICmp_Mask_AllZeroes |
FoldMskICmp_AMask_Mixed));
- } else if (ACst != 0 && CCst != 0 &&
+ } else if (ACst && CCst &&
ConstantExpr::getAnd(ACst, CCst) == CCst) {
result |= (icmp_eq ? FoldMskICmp_AMask_Mixed
: FoldMskICmp_AMask_NotMixed);
@@ -480,7 +472,7 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
FoldMskICmp_BMask_NotMixed)
: (FoldMskICmp_Mask_AllZeroes |
FoldMskICmp_BMask_Mixed));
- } else if (BCst != 0 && CCst != 0 &&
+ } else if (BCst && CCst &&
ConstantExpr::getAnd(BCst, CCst) == CCst) {
result |= (icmp_eq ? FoldMskICmp_BMask_Mixed
: FoldMskICmp_BMask_NotMixed);
@@ -513,31 +505,46 @@ static unsigned conjugateICmpMask(unsigned Mask) {
/// decomposition fails.
static bool decomposeBitTestICmp(const ICmpInst *I, ICmpInst::Predicate &Pred,
Value *&X, Value *&Y, Value *&Z) {
- // X < 0 is equivalent to (X & SignBit) != 0.
- if (I->getPredicate() == ICmpInst::ICMP_SLT)
- if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (C->isZero()) {
- X = I->getOperand(0);
- Y = ConstantInt::get(I->getContext(),
- APInt::getSignBit(C->getBitWidth()));
- Pred = ICmpInst::ICMP_NE;
- Z = C;
- return true;
- }
+ ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1));
+ if (!C)
+ return false;
- // X > -1 is equivalent to (X & SignBit) == 0.
- if (I->getPredicate() == ICmpInst::ICMP_SGT)
- if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (C->isAllOnesValue()) {
- X = I->getOperand(0);
- Y = ConstantInt::get(I->getContext(),
- APInt::getSignBit(C->getBitWidth()));
- Pred = ICmpInst::ICMP_EQ;
- Z = ConstantInt::getNullValue(C->getType());
- return true;
- }
+ switch (I->getPredicate()) {
+ default:
+ return false;
+ case ICmpInst::ICMP_SLT:
+ // X < 0 is equivalent to (X & SignBit) != 0.
+ if (!C->isZero())
+ return false;
+ Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth()));
+ Pred = ICmpInst::ICMP_NE;
+ break;
+ case ICmpInst::ICMP_SGT:
+ // X > -1 is equivalent to (X & SignBit) == 0.
+ if (!C->isAllOnesValue())
+ return false;
+ Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth()));
+ Pred = ICmpInst::ICMP_EQ;
+ break;
+ case ICmpInst::ICMP_ULT:
+ // X <u 2^n is equivalent to (X & ~(2^n-1)) == 0.
+ if (!C->getValue().isPowerOf2())
+ return false;
+ Y = ConstantInt::get(I->getContext(), -C->getValue());
+ Pred = ICmpInst::ICMP_EQ;
+ break;
+ case ICmpInst::ICMP_UGT:
+ // X >u 2^n-1 is equivalent to (X & ~(2^n-1)) != 0.
+ if (!(C->getValue() + 1).isPowerOf2())
+ return false;
+ Y = ConstantInt::get(I->getContext(), ~C->getValue());
+ Pred = ICmpInst::ICMP_NE;
+ break;
+ }
- return false;
+ X = I->getOperand(0);
+ Z = ConstantInt::getNullValue(C->getType());
+ return true;
}
/// foldLogOpOfMaskedICmpsHelper:
@@ -565,12 +572,12 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
Value *L11,*L12,*L21,*L22;
// Check whether the icmp can be decomposed into a bit test.
if (decomposeBitTestICmp(LHS, LHSCC, L11, L12, L2)) {
- L21 = L22 = L1 = 0;
+ L21 = L22 = L1 = nullptr;
} else {
// Look for ANDs in the LHS icmp.
if (!L1->getType()->isIntegerTy()) {
// You can icmp pointers, for example. They really aren't masks.
- L11 = L12 = 0;
+ L11 = L12 = nullptr;
} else if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
// Any icmp can be viewed as being trivially masked; if it allows us to
// remove one, it's worth it.
@@ -580,7 +587,7 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
if (!L2->getType()->isIntegerTy()) {
// You can icmp pointers, for example. They really aren't masks.
- L21 = L22 = 0;
+ L21 = L22 = nullptr;
} else if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
L21 = L2;
L22 = Constant::getAllOnesValue(L2->getType());
@@ -603,7 +610,7 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
} else {
return 0;
}
- E = R2; R1 = 0; ok = true;
+ E = R2; R1 = nullptr; ok = true;
} else if (R1->getType()->isIntegerTy()) {
if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
// As before, model no mask as a trivial mask if it'll let us do an
@@ -660,11 +667,11 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
/// into a single (icmp(A & X) ==/!= Y)
static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
llvm::InstCombiner::BuilderTy* Builder) {
- Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS,
LHSCC, RHSCC);
- if (mask == 0) return 0;
+ if (mask == 0) return nullptr;
assert(ICmpInst::isEquality(LHSCC) && ICmpInst::isEquality(RHSCC) &&
"foldLogOpOfMaskedICmpsHelper must return an equality predicate.");
@@ -717,9 +724,9 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
// their actual values. This isn't strictly, necessary, just a "handle the
// easy cases for now" decision.
ConstantInt *BCst = dyn_cast<ConstantInt>(B);
- if (BCst == 0) return 0;
+ if (!BCst) return nullptr;
ConstantInt *DCst = dyn_cast<ConstantInt>(D);
- if (DCst == 0) return 0;
+ if (!DCst) return nullptr;
if (mask & (FoldMskICmp_Mask_NotAllZeroes | FoldMskICmp_BMask_NotAllOnes)) {
// (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
@@ -758,11 +765,11 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
// (icmp ne (A & B), B) & (icmp eq (A & D), D)
// with B and D, having a single bit set
ConstantInt *CCst = dyn_cast<ConstantInt>(C);
- if (CCst == 0) return 0;
+ if (!CCst) return nullptr;
if (LHSCC != NEWCC)
CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) );
ConstantInt *ECst = dyn_cast<ConstantInt>(E);
- if (ECst == 0) return 0;
+ if (!ECst) return nullptr;
if (RHSCC != NEWCC)
ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) );
ConstantInt* MCst = dyn_cast<ConstantInt>(
@@ -771,13 +778,13 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
// if there is a conflict we should actually return a false for the
// whole construct
if (!MCst->isZero())
- return 0;
+ return nullptr;
Value *newOr1 = Builder->CreateOr(B, D);
Value *newOr2 = ConstantExpr::getOr(CCst, ECst);
Value *newAnd = Builder->CreateAnd(A, newOr1);
return Builder->CreateICmp(NEWCC, newAnd, newOr2);
}
- return 0;
+ return nullptr;
}
/// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
@@ -806,7 +813,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
- if (LHSCst == 0 || RHSCst == 0) return 0;
+ if (!LHSCst || !RHSCst) return nullptr;
if (LHSCst == RHSCst && LHSCC == RHSCC) {
// (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
@@ -830,7 +837,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC &&
LHS->hasOneUse() && RHS->hasOneUse()) {
Value *V;
- ConstantInt *AndCst, *SmallCst = 0, *BigCst = 0;
+ ConstantInt *AndCst, *SmallCst = nullptr, *BigCst = nullptr;
// (trunc x) == C1 & (and x, CA) == C2
// (and x, CA) == C2 & (trunc x) == C1
@@ -861,14 +868,14 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// From here on, we only handle:
// (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
- if (Val != Val2) return 0;
+ if (Val != Val2) return nullptr;
// ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
- return 0;
+ return nullptr;
// Make a constant range that's the intersection of the two icmp ranges.
// If the intersection is empty, we know that the result is false.
@@ -882,7 +889,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// We can't fold (ugt x, C) & (sgt x, C2).
if (!PredicatesFoldable(LHSCC, RHSCC))
- return 0;
+ return nullptr;
// Ensure that the larger constant is on the RHS.
bool ShouldSwap;
@@ -1011,7 +1018,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
break;
}
- return 0;
+ return nullptr;
}
/// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
@@ -1021,7 +1028,7 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
RHS->getPredicate() == FCmpInst::FCMP_ORD) {
if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType())
- return 0;
+ return nullptr;
// (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
@@ -1038,7 +1045,7 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
isa<ConstantAggregateZero>(RHS->getOperand(1)))
return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
- return 0;
+ return nullptr;
}
Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
@@ -1091,7 +1098,7 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
}
}
- return 0;
+ return nullptr;
}
@@ -1099,7 +1106,10 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyAndInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyAndInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// (A|B)&(A|C) -> A|(B&C) etc
@@ -1193,7 +1203,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// If this is an integer truncation, and if the source is an 'and' with
// immediate, transform it. This frequently occurs for bitfield accesses.
{
- Value *X = 0; ConstantInt *YC = 0;
+ Value *X = nullptr; ConstantInt *YC = nullptr;
if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
// Change: and (trunc (and X, YC) to T), C2
// into : and (trunc X to T), trunc(YC) & C2
@@ -1226,7 +1236,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
{
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
// (A|B) & ~(A&B) -> A^B
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
@@ -1334,7 +1344,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
{
- Value *X = 0;
+ Value *X = nullptr;
bool OpsSwapped = false;
// Canonicalize SExt or Not to the LHS
if (match(Op1, m_SExt(m_Value())) ||
@@ -1361,7 +1371,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
std::swap(Op0, Op1);
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
/// CollectBSwapParts - Analyze the specified subexpression and see if it is
@@ -1493,7 +1503,7 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
if (!ITy || ITy->getBitWidth() % 16 ||
// ByteMask only allows up to 32-byte values.
ITy->getBitWidth() > 32*8)
- return 0; // Can only bswap pairs of bytes. Can't do vectors.
+ return nullptr; // Can only bswap pairs of bytes. Can't do vectors.
/// ByteValues - For each byte of the result, we keep track of which value
/// defines each byte.
@@ -1503,16 +1513,16 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
// Try to find all the pieces corresponding to the bswap.
uint32_t ByteMask = ~0U >> (32-ByteValues.size());
if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
- return 0;
+ return nullptr;
// Check to see if all of the bytes come from the same value.
Value *V = ByteValues[0];
- if (V == 0) return 0; // Didn't find a byte? Must be zero.
+ if (!V) return nullptr; // Didn't find a byte? Must be zero.
// Check to make sure that all of the bytes come from the same value.
for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
if (ByteValues[i] != V)
- return 0;
+ return nullptr;
Module *M = I.getParent()->getParent()->getParent();
Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy);
return CallInst::Create(F, V);
@@ -1524,10 +1534,10 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
Value *C, Value *D) {
// If A is not a select of -1/0, this cannot match.
- Value *Cond = 0;
+ Value *Cond = nullptr;
if (!match(A, m_SExt(m_Value(Cond))) ||
!Cond->getType()->isIntegerTy(1))
- return 0;
+ return nullptr;
// ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
@@ -1540,24 +1550,7 @@ static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
return SelectInst::Create(Cond, C, D);
if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
- return 0;
-}
-
-/// IsOneHotValue - Returns true for "one-hot" values (values where at most
-/// one bit can be set).
-static bool IsOneHotValue(Value *V) {
- // Match 1<<K.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
- if (BO->getOpcode() == Instruction::Shl) {
- ConstantInt *One = dyn_cast<ConstantInt>(BO->getOperand(0));
- return One && One->isOne();
- }
-
- // Check for power of two integer constants.
- if (ConstantInt *K = dyn_cast<ConstantInt>(V))
- return K->getValue().isPowerOf2();
-
- return false;
+ return nullptr;
}
/// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
@@ -1578,16 +1571,16 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
LAnd->getOpcode() == Instruction::And &&
RAnd->getOpcode() == Instruction::And) {
- Value *Mask = 0;
- Value *Masked = 0;
+ Value *Mask = nullptr;
+ Value *Masked = nullptr;
if (LAnd->getOperand(0) == RAnd->getOperand(0) &&
- IsOneHotValue(LAnd->getOperand(1)) &&
- IsOneHotValue(RAnd->getOperand(1))) {
+ isKnownToBeAPowerOfTwo(LAnd->getOperand(1)) &&
+ isKnownToBeAPowerOfTwo(RAnd->getOperand(1))) {
Mask = Builder->CreateOr(LAnd->getOperand(1), RAnd->getOperand(1));
Masked = Builder->CreateAnd(LAnd->getOperand(0), Mask);
} else if (LAnd->getOperand(1) == RAnd->getOperand(1) &&
- IsOneHotValue(LAnd->getOperand(0)) &&
- IsOneHotValue(RAnd->getOperand(0))) {
+ isKnownToBeAPowerOfTwo(LAnd->getOperand(0)) &&
+ isKnownToBeAPowerOfTwo(RAnd->getOperand(0))) {
Mask = Builder->CreateOr(LAnd->getOperand(0), RAnd->getOperand(0));
Masked = Builder->CreateAnd(LAnd->getOperand(1), Mask);
}
@@ -1620,7 +1613,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (LHS->hasOneUse() || RHS->hasOneUse()) {
// (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
// (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (LHSCC == ICmpInst::ICMP_EQ && LHSCst && LHSCst->isZero()) {
B = Val;
if (RHSCC == ICmpInst::ICMP_ULT && Val == RHS->getOperand(1))
@@ -1644,7 +1637,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
}
// This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
- if (LHSCst == 0 || RHSCst == 0) return 0;
+ if (!LHSCst || !RHSCst) return nullptr;
if (LHSCst == RHSCst && LHSCC == RHSCC) {
// (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
@@ -1665,18 +1658,18 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// From here on, we only handle:
// (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
- if (Val != Val2) return 0;
+ if (Val != Val2) return nullptr;
// ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
- return 0;
+ return nullptr;
// We can't fold (ugt x, C) | (sgt x, C2).
if (!PredicatesFoldable(LHSCC, RHSCC))
- return 0;
+ return nullptr;
// Ensure that the larger constant is on the RHS.
bool ShouldSwap;
@@ -1821,7 +1814,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
}
break;
}
- return 0;
+ return nullptr;
}
/// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
@@ -1849,7 +1842,7 @@ Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
isa<ConstantAggregateZero>(RHS->getOperand(1)))
return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
- return 0;
+ return nullptr;
}
Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
@@ -1881,7 +1874,7 @@ Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
}
}
- return 0;
+ return nullptr;
}
/// FoldOrWithConstants - This helper function folds:
@@ -1896,28 +1889,31 @@ Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
Value *A, Value *B, Value *C) {
ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
- if (!CI1) return 0;
+ if (!CI1) return nullptr;
- Value *V1 = 0;
- ConstantInt *CI2 = 0;
- if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
+ Value *V1 = nullptr;
+ ConstantInt *CI2 = nullptr;
+ if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return nullptr;
APInt Xor = CI1->getValue() ^ CI2->getValue();
- if (!Xor.isAllOnesValue()) return 0;
+ if (!Xor.isAllOnesValue()) return nullptr;
if (V1 == A || V1 == B) {
Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
return BinaryOperator::CreateOr(NewOp, V1);
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitOr(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyOrInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyOrInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// (A&B)|(A&C) -> A&(B|C) etc
@@ -1930,7 +1926,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return &I;
if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- ConstantInt *C1 = 0; Value *X = 0;
+ ConstantInt *C1 = nullptr; Value *X = nullptr;
// (X & C1) | C2 --> (X | C2) & (C1|C2)
// iff (C1 & C2) == 0.
if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
@@ -1961,8 +1957,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return NV;
}
- Value *A = 0, *B = 0;
- ConstantInt *C1 = 0, *C2 = 0;
+ Value *A = nullptr, *B = nullptr;
+ ConstantInt *C1 = nullptr, *C2 = nullptr;
// (A | B) | C and A | (B | C) -> bswap if possible.
// (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
@@ -1993,36 +1989,13 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
// (A & C)|(B & D)
- Value *C = 0, *D = 0;
+ Value *C = nullptr, *D = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
match(Op1, m_And(m_Value(B), m_Value(D)))) {
- Value *V1 = 0, *V2 = 0;
+ Value *V1 = nullptr, *V2 = nullptr;
C1 = dyn_cast<ConstantInt>(C);
C2 = dyn_cast<ConstantInt>(D);
if (C1 && C2) { // (A & C1)|(B & C2)
- // If we have: ((V + N) & C1) | (V & C2)
- // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
- // replace with V+N.
- if (C1->getValue() == ~C2->getValue()) {
- if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
- match(A, m_Add(m_Value(V1), m_Value(V2)))) {
- // Add commutes, try both ways.
- if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
- return ReplaceInstUsesWith(I, A);
- if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
- return ReplaceInstUsesWith(I, A);
- }
- // Or commutes, try both ways.
- if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
- match(B, m_Add(m_Value(V1), m_Value(V2)))) {
- // Add commutes, try both ways.
- if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
- return ReplaceInstUsesWith(I, B);
- if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
- return ReplaceInstUsesWith(I, B);
- }
- }
-
if ((C1->getValue() & C2->getValue()) == 0) {
// ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
// iff (C1&C2) == 0 and (N&~C1) == 0
@@ -2040,7 +2013,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
// iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
- ConstantInt *C3 = 0, *C4 = 0;
+ ConstantInt *C3 = nullptr, *C4 = nullptr;
if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
(C3->getValue() & ~C1->getValue()) == 0 &&
match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
@@ -2232,7 +2205,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// Since this OR statement hasn't been optimized further yet, we hope
// that this transformation will allow the new ORs to be optimized.
{
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (Op0->hasOneUse() && Op1->hasOneUse() &&
match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
@@ -2242,14 +2215,17 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
Instruction *InstCombiner::visitXor(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyXorInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyXorInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// (A&B)^(A&C) -> A&(B^C) etc
@@ -2506,5 +2482,5 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
}
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 0cd7b14..658178d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -14,14 +14,16 @@
#include "InstCombine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
STATISTIC(NumSimplified, "Number of library calls simplified");
/// getPromotedType - Return the specified type promoted as it would be to pass
@@ -56,8 +58,8 @@ static Type *reduceToSingleValueType(Type *T) {
}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
- unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
+ unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL);
+ unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
@@ -70,17 +72,17 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
// load/store.
ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
- if (MemOpLength == 0) return 0;
+ if (!MemOpLength) return nullptr;
// Source and destination pointer types are always "i8*" for intrinsic. See
// if the size is something we can handle with a single primitive load/store.
// A single load+store correctly handles overlapping memory in the memmove
// case.
uint64_t Size = MemOpLength->getLimitedValue();
- assert(Size && "0-sized memory transfering should be removed already.");
+ assert(Size && "0-sized memory transferring should be removed already.");
if (Size > 8 || (Size&(Size-1)))
- return 0; // If not 1/2/4/8 bytes, exit.
+ return nullptr; // If not 1/2/4/8 bytes, exit.
// Use an integer load+store unless we can find something better.
unsigned SrcAddrSp =
@@ -99,11 +101,11 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
- MDNode *CopyMD = 0;
+ MDNode *CopyMD = nullptr;
if (StrippedDest != MI->getArgOperand(0)) {
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
- if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
+ if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
SrcETy = reduceToSingleValueType(SrcETy);
@@ -152,7 +154,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
}
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
- unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
+ unsigned Alignment = getKnownAlignment(MI->getDest(), DL);
if (MI->getAlignment() < Alignment) {
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
Alignment, false));
@@ -163,7 +165,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
- return 0;
+ return nullptr;
uint64_t Len = LenC->getLimitedValue();
Alignment = MI->getAlignment();
assert(Len && "0-sized memory setting should be removed already.");
@@ -191,7 +193,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
return MI;
}
- return 0;
+ return nullptr;
}
/// visitCallInst - CallInst simplification. This mostly only handles folding
@@ -233,7 +235,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// No other transformations apply to volatile transfers.
if (MI->isVolatile())
- return 0;
+ return nullptr;
// If we have a memmove and the source operation is a constant global,
// then the source and dest pointers can't alias, so we can change this
@@ -274,13 +276,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
default: break;
case Intrinsic::objectsize: {
uint64_t Size;
- if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))
+ if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
- return 0;
+ return nullptr;
}
case Intrinsic::bswap: {
Value *IIOperand = II->getArgOperand(0);
- Value *X = 0;
+ Value *X = nullptr;
// bswap(bswap(x)) -> x
if (match(IIOperand, m_BSwap(m_Value(X))))
@@ -320,7 +322,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
+ computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne);
unsigned TrailingZeros = KnownOne.countTrailingZeros();
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
if ((Mask & KnownZero) == Mask)
@@ -338,7 +340,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
+ computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne);
unsigned LeadingZeros = KnownOne.countLeadingZeros();
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
if ((Mask & KnownZero) == Mask)
@@ -353,14 +355,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
uint32_t BitWidth = IT->getBitWidth();
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
if (LHSKnownNegative || LHSKnownPositive) {
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
if (LHSKnownNegative && RHSKnownNegative) {
@@ -419,6 +421,21 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
+
+ // We can strength reduce reduce this signed add into a regular add if we
+ // can prove that it will never overflow.
+ if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow) {
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
+ if (WillNotOverflowSignedAdd(LHS, RHS)) {
+ Value *Add = Builder->CreateNSWAdd(LHS, RHS);
+ Add->takeName(&CI);
+ Constant *V[] = {UndefValue::get(Add->getType()), Builder->getFalse()};
+ StructType *ST = cast<StructType>(II->getType());
+ Constant *Struct = ConstantStruct::get(ST, V);
+ return InsertValueInst::Create(Struct, Add, 0);
+ }
+ }
+
break;
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -447,10 +464,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
// Get the largest possible values for each operand.
APInt LHSMax = ~LHSKnownZero;
@@ -504,7 +521,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
// Turn PPC lvx -> load if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
@@ -513,7 +530,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
@@ -524,7 +541,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
@@ -554,6 +571,79 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}
+ // Constant fold <A x Bi> << Ci.
+ // FIXME: We don't handle _dq because it's a shift of an i128, but is
+ // represented in the IR as <2 x i64>. A per element shift is wrong.
+ case Intrinsic::x86_sse2_psll_d:
+ case Intrinsic::x86_sse2_psll_q:
+ case Intrinsic::x86_sse2_psll_w:
+ case Intrinsic::x86_sse2_pslli_d:
+ case Intrinsic::x86_sse2_pslli_q:
+ case Intrinsic::x86_sse2_pslli_w:
+ case Intrinsic::x86_avx2_psll_d:
+ case Intrinsic::x86_avx2_psll_q:
+ case Intrinsic::x86_avx2_psll_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrli_w: {
+ // Simplify if count is constant. To 0 if >= BitWidth,
+ // otherwise to shl/lshr.
+ auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
+ auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
+ if (!CDV && !CInt)
+ break;
+ ConstantInt *Count;
+ if (CDV)
+ Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
+ else
+ Count = CInt;
+
+ auto Vec = II->getArgOperand(0);
+ auto VT = cast<VectorType>(Vec->getType());
+ if (Count->getZExtValue() >
+ VT->getElementType()->getPrimitiveSizeInBits() - 1)
+ return ReplaceInstUsesWith(
+ CI, ConstantAggregateZero::get(Vec->getType()));
+
+ bool isPackedShiftLeft = true;
+ switch (II->getIntrinsicID()) {
+ default : break;
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
+ }
+
+ unsigned VWidth = VT->getNumElements();
+ // Get a constant vector of the same type as the first operand.
+ auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
+ if (isPackedShiftLeft)
+ return BinaryOperator::CreateShl(Vec,
+ Builder->CreateVectorSplat(VWidth, VTCI));
+
+ return BinaryOperator::CreateLShr(Vec,
+ Builder->CreateVectorSplat(VWidth, VTCI));
+ }
case Intrinsic::x86_sse41_pmovsxbw:
case Intrinsic::x86_sse41_pmovsxwd:
@@ -576,8 +666,160 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}
+ case Intrinsic::x86_sse4a_insertqi: {
+ // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
+ // ones undef
+ // TODO: eventually we should lower this intrinsic to IR
+ if (auto CIWidth = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
+ if (auto CIStart = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
+ if (CIWidth->equalsInt(64) && CIStart->isZero()) {
+ Value *Vec = II->getArgOperand(1);
+ Value *Undef = UndefValue::get(Vec->getType());
+ const uint32_t Mask[] = { 0, 2 };
+ return ReplaceInstUsesWith(
+ CI,
+ Builder->CreateShuffleVector(
+ Vec, Undef, ConstantDataVector::get(
+ II->getContext(), ArrayRef<uint32_t>(Mask))));
+
+ } else if (auto Source =
+ dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
+ if (Source->hasOneUse() &&
+ Source->getArgOperand(1) == II->getArgOperand(1)) {
+ // If the source of the insert has only one use and it's another
+ // insert (and they're both inserting from the same vector), try to
+ // bundle both together.
+ auto CISourceWidth =
+ dyn_cast<ConstantInt>(Source->getArgOperand(2));
+ auto CISourceStart =
+ dyn_cast<ConstantInt>(Source->getArgOperand(3));
+ if (CISourceStart && CISourceWidth) {
+ unsigned Start = CIStart->getZExtValue();
+ unsigned Width = CIWidth->getZExtValue();
+ unsigned End = Start + Width;
+ unsigned SourceStart = CISourceStart->getZExtValue();
+ unsigned SourceWidth = CISourceWidth->getZExtValue();
+ unsigned SourceEnd = SourceStart + SourceWidth;
+ unsigned NewStart, NewWidth;
+ bool ShouldReplace = false;
+ if (Start <= SourceStart && SourceStart <= End) {
+ NewStart = Start;
+ NewWidth = std::max(End, SourceEnd) - NewStart;
+ ShouldReplace = true;
+ } else if (SourceStart <= Start && Start <= SourceEnd) {
+ NewStart = SourceStart;
+ NewWidth = std::max(SourceEnd, End) - NewStart;
+ ShouldReplace = true;
+ }
+
+ if (ShouldReplace) {
+ Constant *ConstantWidth = ConstantInt::get(
+ II->getArgOperand(2)->getType(), NewWidth, false);
+ Constant *ConstantStart = ConstantInt::get(
+ II->getArgOperand(3)->getType(), NewStart, false);
+ Value *Args[4] = { Source->getArgOperand(0),
+ II->getArgOperand(1), ConstantWidth,
+ ConstantStart };
+ Module *M = CI.getParent()->getParent()->getParent();
+ Value *F =
+ Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
+ return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
+ }
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ case Intrinsic::x86_sse41_pblendvb:
+ case Intrinsic::x86_sse41_blendvps:
+ case Intrinsic::x86_sse41_blendvpd:
+ case Intrinsic::x86_avx_blendv_ps_256:
+ case Intrinsic::x86_avx_blendv_pd_256:
+ case Intrinsic::x86_avx2_pblendvb: {
+ // Convert blendv* to vector selects if the mask is constant.
+ // This optimization is convoluted because the intrinsic is defined as
+ // getting a vector of floats or doubles for the ps and pd versions.
+ // FIXME: That should be changed.
+ Value *Mask = II->getArgOperand(2);
+ if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
+ auto Tyi1 = Builder->getInt1Ty();
+ auto SelectorType = cast<VectorType>(Mask->getType());
+ auto EltTy = SelectorType->getElementType();
+ unsigned Size = SelectorType->getNumElements();
+ unsigned BitWidth =
+ EltTy->isFloatTy()
+ ? 32
+ : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
+ assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
+ "Wrong arguments for variable blend intrinsic");
+ SmallVector<Constant *, 32> Selectors;
+ for (unsigned I = 0; I < Size; ++I) {
+ // The intrinsics only read the top bit
+ uint64_t Selector;
+ if (BitWidth == 8)
+ Selector = C->getElementAsInteger(I);
+ else
+ Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
+ Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
+ }
+ auto NewSelector = ConstantVector::get(Selectors);
+ return SelectInst::Create(NewSelector, II->getArgOperand(1),
+ II->getArgOperand(0), "blendv");
+ } else {
+ break;
+ }
+ }
+
+ case Intrinsic::x86_avx_vpermilvar_ps:
+ case Intrinsic::x86_avx_vpermilvar_ps_256:
+ case Intrinsic::x86_avx_vpermilvar_pd:
+ case Intrinsic::x86_avx_vpermilvar_pd_256: {
+ // Convert vpermil* to shufflevector if the mask is constant.
+ Value *V = II->getArgOperand(1);
+ unsigned Size = cast<VectorType>(V->getType())->getNumElements();
+ assert(Size == 8 || Size == 4 || Size == 2);
+ uint32_t Indexes[8];
+ if (auto C = dyn_cast<ConstantDataVector>(V)) {
+ // The intrinsics only read one or two bits, clear the rest.
+ for (unsigned I = 0; I < Size; ++I) {
+ uint32_t Index = C->getElementAsInteger(I) & 0x3;
+ if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
+ II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
+ Index >>= 1;
+ Indexes[I] = Index;
+ }
+ } else if (isa<ConstantAggregateZero>(V)) {
+ for (unsigned I = 0; I < Size; ++I)
+ Indexes[I] = 0;
+ } else {
+ break;
+ }
+ // The _256 variants are a bit trickier since the mask bits always index
+ // into the corresponding 128 half. In order to convert to a generic
+ // shuffle, we have to make that explicit.
+ if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
+ II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
+ for (unsigned I = Size / 2; I < Size; ++I)
+ Indexes[I] += Size / 2;
+ }
+ auto NewC =
+ ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
+ auto V1 = II->getArgOperand(0);
+ auto V2 = UndefValue::get(V1->getType());
+ auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
+ return ReplaceInstUsesWith(CI, Shuffle);
+ }
+
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
+ // Note that ppc_altivec_vperm has a big-endian bias, so when creating
+ // a vectorshuffle for little endian, we must undo the transformation
+ // performed on vec_perm in altivec.h. That is, we must complement
+ // the permutation mask with respect to 31 and reverse the order of
+ // V1 and V2.
if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
assert(Mask->getType()->getVectorNumElements() == 16 &&
"Bad type for intrinsic!");
@@ -586,8 +828,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
bool AllEltsOk = true;
for (unsigned i = 0; i != 16; ++i) {
Constant *Elt = Mask->getAggregateElement(i);
- if (Elt == 0 ||
- !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
+ if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
AllEltsOk = false;
break;
}
@@ -611,10 +852,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
unsigned Idx =
cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Idx &= 31; // Match the hardware behavior.
+ if (DL && DL->isLittleEndian())
+ Idx = 31 - Idx;
- if (ExtractedElts[Idx] == 0) {
+ if (!ExtractedElts[Idx]) {
+ Value *Op0ToUse = (DL && DL->isLittleEndian()) ? Op1 : Op0;
+ Value *Op1ToUse = (DL && DL->isLittleEndian()) ? Op0 : Op1;
ExtractedElts[Idx] =
- Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
+ Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
Builder->getInt32(Idx&15));
}
@@ -641,7 +886,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane: {
- unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
+ unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL);
unsigned AlignArg = II->getNumArgOperands() - 1;
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
@@ -654,7 +899,9 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::arm_neon_vmulls:
- case Intrinsic::arm_neon_vmullu: {
+ case Intrinsic::arm_neon_vmullu:
+ case Intrinsic::aarch64_neon_smull:
+ case Intrinsic::aarch64_neon_umull: {
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
@@ -664,46 +911,46 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
// Check for constant LHS & RHS - in this case we just simplify.
- bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
+ bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
+ II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
VectorType *NewVT = cast<VectorType>(II->getType());
- unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth();
- if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) {
- if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
- VectorType* VT = cast<VectorType>(CV0->getType());
- SmallVector<Constant*, 4> NewElems;
- for (unsigned i = 0; i < VT->getNumElements(); ++i) {
- APInt CV0E =
- (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue();
- CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth);
- APInt CV1E =
- (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue();
- CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth);
- NewElems.push_back(
- ConstantInt::get(NewVT->getElementType(), CV0E * CV1E));
- }
- return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
+ if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
+ if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
+ CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
+ CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
+
+ return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
}
- // Couldn't simplify - cannonicalize constant to the RHS.
+ // Couldn't simplify - canonicalize constant to the RHS.
std::swap(Arg0, Arg1);
}
// Handle mul by one:
- if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
+ if (Constant *CV1 = dyn_cast<Constant>(Arg1))
if (ConstantInt *Splat =
- dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) {
- if (Splat->isOne()) {
- if (Zext)
- return CastInst::CreateZExtOrBitCast(Arg0, II->getType());
- // else
- return CastInst::CreateSExtOrBitCast(Arg0, II->getType());
- }
- }
- }
+ dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
+ if (Splat->isOne())
+ return CastInst::CreateIntegerCast(Arg0, II->getType(),
+ /*isSigned=*/!Zext);
break;
}
+ case Intrinsic::AMDGPU_rcp: {
+ if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
+ const APFloat &ArgVal = C->getValueAPF();
+ APFloat Val(ArgVal.getSemantics(), 1.0);
+ APFloat::opStatus Status = Val.divide(ArgVal,
+ APFloat::rmNearestTiesToEven);
+ // Only do this if it was exact and therefore not dependent on the
+ // rounding mode.
+ if (Status == APFloat::opOK)
+ return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
+ }
+
+ break;
+ }
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
@@ -762,15 +1009,15 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
/// passed through the varargs area, we can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
const CastInst * const CI,
- const DataLayout * const TD,
+ const DataLayout * const DL,
const int ix) {
if (!CI->isLosslessCast())
return false;
- // The size of ByVal arguments is derived from the type, so we
+ // The size of ByVal or InAlloca arguments is derived from the type, so we
// can't change to a type with a different size. If the size were
// passed explicitly we could avoid this check.
- if (!CS.isByValArgument(ix))
+ if (!CS.isByValOrInAllocaArgument(ix))
return true;
Type* SrcTy =
@@ -778,7 +1025,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
- if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
+ if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
return false;
return true;
}
@@ -787,15 +1034,15 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
// Currently we're only working with the checking functions, memcpy_chk,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) {
- if (CI->getCalledFunction() == 0) return 0;
+Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
+ if (!CI->getCalledFunction()) return nullptr;
if (Value *With = Simplifier->optimizeCall(CI)) {
++NumSimplified;
return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
}
- return 0;
+ return nullptr;
}
static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
@@ -803,37 +1050,36 @@ static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
// is good enough in practice and simpler than handling any number of casts.
Value *Underlying = TrampMem->stripPointerCasts();
if (Underlying != TrampMem &&
- (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
- return 0;
+ (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
+ return nullptr;
if (!isa<AllocaInst>(Underlying))
- return 0;
+ return nullptr;
- IntrinsicInst *InitTrampoline = 0;
- for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
- I != E; I++) {
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
+ IntrinsicInst *InitTrampoline = nullptr;
+ for (User *U : TrampMem->users()) {
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
if (!II)
- return 0;
+ return nullptr;
if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
if (InitTrampoline)
// More than one init_trampoline writes to this value. Give up.
- return 0;
+ return nullptr;
InitTrampoline = II;
continue;
}
if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
// Allow any number of calls to adjust.trampoline.
continue;
- return 0;
+ return nullptr;
}
// No call to init.trampoline found.
if (!InitTrampoline)
- return 0;
+ return nullptr;
// Check that the alloca is being used in the expected way.
if (InitTrampoline->getOperand(0) != TrampMem)
- return 0;
+ return nullptr;
return InitTrampoline;
}
@@ -850,9 +1096,9 @@ static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
II->getOperand(0) == TrampMem)
return II;
if (Inst->mayWriteToMemory())
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
// Given a call to llvm.adjust.trampoline, find and return the corresponding
@@ -864,7 +1110,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
if (!AdjustTramp ||
AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
- return 0;
+ return nullptr;
Value *TrampMem = AdjustTramp->getOperand(0);
@@ -872,7 +1118,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
return IT;
if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
return IT;
- return 0;
+ return nullptr;
}
// visitCallSite - Improvements for call and invoke instructions.
@@ -887,7 +1133,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// arguments of the call/invoke.
Value *Callee = CS.getCalledValue();
if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
- return 0;
+ return nullptr;
if (Function *CalleeF = dyn_cast<Function>(Callee))
// If the call and callee calling conventions don't match, this call must
@@ -912,7 +1158,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// change the callee to a null pointer.
cast<InvokeInst>(OldCall)->setCalledFunction(
Constant::getNullValue(CalleeF->getType()));
- return 0;
+ return nullptr;
}
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
@@ -924,7 +1170,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (isa<InvokeInst>(CS.getInstruction())) {
// Can't remove an invoke because we cannot change the CFG.
- return 0;
+ return nullptr;
}
// This instruction is not reachable, just remove it. We insert a store to
@@ -949,7 +1195,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
E = CS.arg_end(); I != E; ++I, ++ix) {
CastInst *CI = dyn_cast<CastInst>(*I);
- if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
+ if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
*I = CI->getOperand(0);
Changed = true;
}
@@ -966,13 +1212,13 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
- Instruction *I = tryOptimizeCall(CI, TD);
+ Instruction *I = tryOptimizeCall(CI, DL);
// If we changed something return the result, etc. Otherwise let
// the fallthrough check.
if (I) return EraseInstFromFunction(*I);
}
- return Changed ? CS.getInstruction() : 0;
+ return Changed ? CS.getInstruction() : nullptr;
}
// transformConstExprCastCall - If the callee is a constexpr cast of a function,
@@ -981,7 +1227,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Function *Callee =
dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
- if (Callee == 0)
+ if (!Callee)
return false;
Instruction *Caller = CS.getInstruction();
const AttributeSet &CallerPAL = CS.getAttributes();
@@ -994,11 +1240,12 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Type *OldRetTy = Caller->getType();
Type *NewRetTy = FT->getReturnType();
- if (NewRetTy->isStructTy())
- return false; // TODO: Handle multiple return values.
-
// Check to see if we are changing the return type...
if (OldRetTy != NewRetTy) {
+
+ if (NewRetTy->isStructTy())
+ return false; // TODO: Handle multiple return values.
+
if (!CastInst::isBitCastable(NewRetTy, OldRetTy)) {
if (Callee->isDeclaration())
return false; // Cannot transform this return value.
@@ -1024,9 +1271,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// the critical edge). Bail out in this case.
if (!Caller->use_empty())
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
- for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
- UI != E; ++UI)
- if (PHINode *PN = dyn_cast<PHINode>(*UI))
+ for (User *U : II->users())
+ if (PHINode *PN = dyn_cast<PHINode>(U))
if (PN->getParent() == II->getNormalDest() ||
PN->getParent() == II->getUnwindDest())
return false;
@@ -1048,18 +1294,21 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
typeIncompatible(ParamTy, i + 1), i + 1))
return false; // Attribute not compatible with transformed value.
+ if (CS.isInAllocaArgument(i))
+ return false; // Cannot transform to and from inalloca.
+
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
if (ParamTy != ActTy &&
CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
- if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
+ if (!ParamPTy || !ParamPTy->getElementType()->isSized() || !DL)
return false;
Type *CurElTy = ActTy->getPointerElementType();
- if (TD->getTypeAllocSize(CurElTy) !=
- TD->getTypeAllocSize(ParamPTy->getElementType()))
+ if (DL->getTypeAllocSize(CurElTy) !=
+ DL->getTypeAllocSize(ParamPTy->getElementType()))
return false;
}
}
@@ -1223,6 +1472,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (!Caller->use_empty())
ReplaceInstUsesWith(*Caller, NV);
+ else if (Caller->hasValueHandle())
+ ValueHandleBase::ValueIsRAUWd(Caller, NV);
EraseInstFromFunction(*Caller);
return true;
@@ -1243,7 +1494,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
if (Attrs.hasAttrSomewhere(Attribute::Nest))
- return 0;
+ return nullptr;
assert(Tramp &&
"transformCallThroughTrampoline called with incorrect CallSite.");
@@ -1255,7 +1506,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
const AttributeSet &NestAttrs = NestF->getAttributes();
if (!NestAttrs.isEmpty()) {
unsigned NestIdx = 1;
- Type *NestTy = 0;
+ Type *NestTy = nullptr;
AttributeSet NestAttr;
// Look for a parameter marked with the 'nest' attribute.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 72377dc..b9c3d0f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -14,11 +14,13 @@
#include "InstCombine.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
/// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
/// expression. If so, decompose it, returning some value X, such that Val is
/// X*Scale+Offset.
@@ -79,7 +81,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
// This requires DataLayout to get the alloca alignment and size information.
- if (!TD) return 0;
+ if (!DL) return nullptr;
PointerType *PTy = cast<PointerType>(CI.getType());
@@ -89,26 +91,26 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// Get the type really allocated and the type casted to.
Type *AllocElTy = AI.getAllocatedType();
Type *CastElTy = PTy->getElementType();
- if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
+ if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
- unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
- unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
- if (CastElTyAlign < AllocElTyAlign) return 0;
+ unsigned AllocElTyAlign = DL->getABITypeAlignment(AllocElTy);
+ unsigned CastElTyAlign = DL->getABITypeAlignment(CastElTy);
+ if (CastElTyAlign < AllocElTyAlign) return nullptr;
// If the allocation has multiple uses, only promote it if we are strictly
// increasing the alignment of the resultant allocation. If we keep it the
// same, we open the door to infinite loops of various kinds.
- if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;
+ if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
- uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
- uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
- if (CastElTySize == 0 || AllocElTySize == 0) return 0;
+ uint64_t AllocElTySize = DL->getTypeAllocSize(AllocElTy);
+ uint64_t CastElTySize = DL->getTypeAllocSize(CastElTy);
+ if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
// If the allocation has multiple uses, only promote it if we're not
// shrinking the amount of memory being allocated.
- uint64_t AllocElTyStoreSize = TD->getTypeStoreSize(AllocElTy);
- uint64_t CastElTyStoreSize = TD->getTypeStoreSize(CastElTy);
- if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return 0;
+ uint64_t AllocElTyStoreSize = DL->getTypeStoreSize(AllocElTy);
+ uint64_t CastElTyStoreSize = DL->getTypeStoreSize(CastElTy);
+ if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
// See if we can satisfy the modulus by pulling a scale out of the array
// size argument.
@@ -120,10 +122,10 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// If we can now satisfy the modulus, by using a non-1 scale, we really can
// do the xform.
if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
- (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
+ (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
- Value *Amt = 0;
+ Value *Amt = nullptr;
if (Scale == 1) {
Amt = NumElements;
} else {
@@ -141,6 +143,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
New->setAlignment(AI.getAlignment());
New->takeName(&AI);
+ New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
// If the allocation has multiple real uses, insert a cast and change all
// things that used it to use the new cast. This will also hack on CI, but it
@@ -161,15 +164,15 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
bool isSigned) {
if (Constant *C = dyn_cast<Constant>(V)) {
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
- // If we got a constantexpr back, try to simplify it with TD info.
+ // If we got a constantexpr back, try to simplify it with DL info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD, TLI);
+ C = ConstantFoldConstantExpression(CE, DL, TLI);
return C;
}
// Otherwise, it must be an instruction.
Instruction *I = cast<Instruction>(V);
- Instruction *Res = 0;
+ Instruction *Res = nullptr;
unsigned Opc = I->getOpcode();
switch (Opc) {
case Instruction::Add:
@@ -235,7 +238,7 @@ isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
Type *DstTy, ///< The target type for the second cast instruction
- DataLayout *TD ///< The target data for pointer size
+ const DataLayout *DL ///< The target data for pointer size
) {
Type *SrcTy = CI->getOperand(0)->getType(); // A from above
@@ -244,12 +247,12 @@ isEliminableCastPair(
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opcode);
- Type *SrcIntPtrTy = TD && SrcTy->isPtrOrPtrVectorTy() ?
- TD->getIntPtrType(SrcTy) : 0;
- Type *MidIntPtrTy = TD && MidTy->isPtrOrPtrVectorTy() ?
- TD->getIntPtrType(MidTy) : 0;
- Type *DstIntPtrTy = TD && DstTy->isPtrOrPtrVectorTy() ?
- TD->getIntPtrType(DstTy) : 0;
+ Type *SrcIntPtrTy = DL && SrcTy->isPtrOrPtrVectorTy() ?
+ DL->getIntPtrType(SrcTy) : nullptr;
+ Type *MidIntPtrTy = DL && MidTy->isPtrOrPtrVectorTy() ?
+ DL->getIntPtrType(MidTy) : nullptr;
+ Type *DstIntPtrTy = DL && DstTy->isPtrOrPtrVectorTy() ?
+ DL->getIntPtrType(DstTy) : nullptr;
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
DstTy, SrcIntPtrTy, MidIntPtrTy,
DstIntPtrTy);
@@ -275,7 +278,7 @@ bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
// If this is another cast that can be eliminated, we prefer to have it
// eliminated.
if (const CastInst *CI = dyn_cast<CastInst>(V))
- if (isEliminableCastPair(CI, opc, Ty, TD))
+ if (isEliminableCastPair(CI, opc, Ty, DL))
return false;
// If this is a vector sext from a compare, then we don't want to break the
@@ -295,7 +298,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
// eliminate it now.
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
if (Instruction::CastOps opc =
- isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
+ isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {
// The first cast (CSrc) is eliminable so we need to fix up or replace
// the second cast (CI). CSrc will then have a good chance of being dead.
return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
@@ -318,7 +321,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
return NV;
}
- return 0;
+ return nullptr;
}
/// CanEvaluateTruncated - Return true if we can evaluate the specified
@@ -470,7 +473,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
}
// Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
- Value *A = 0; ConstantInt *Cst = 0;
+ Value *A = nullptr; ConstantInt *Cst = nullptr;
if (Src->hasOneUse() &&
match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
// We have three types to worry about here, the type of A, the source of
@@ -502,7 +505,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
ConstantExpr::getTrunc(Cst, CI.getType()));
}
- return 0;
+ return nullptr;
}
/// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
@@ -550,7 +553,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
// If Op1C some other power of two, convert:
uint32_t BitWidth = Op1C->getType()->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
+ computeKnownBits(ICI->getOperand(0), KnownZero, KnownOne);
APInt KnownZeroMask(~KnownZero);
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
@@ -598,8 +601,8 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
- ComputeMaskedBits(LHS, KnownZeroLHS, KnownOneLHS);
- ComputeMaskedBits(RHS, KnownZeroRHS, KnownOneRHS);
+ computeKnownBits(LHS, KnownZeroLHS, KnownOneLHS);
+ computeKnownBits(RHS, KnownZeroRHS, KnownOneRHS);
if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
APInt KnownBits = KnownZeroLHS | KnownOneLHS;
@@ -627,7 +630,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
}
}
- return 0;
+ return nullptr;
}
/// CanEvaluateZExtd - Determine if the specified value can be computed in the
@@ -757,8 +760,8 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// If this zero extend is only used by a truncate, let the truncate be
// eliminated before we try to optimize this zext.
- if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
- return 0;
+ if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
+ return nullptr;
// If one of the common conversion will work, do it.
if (Instruction *Result = commonCastTransforms(CI))
@@ -858,42 +861,32 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
}
}
- // zext(trunc(t) & C) -> (t & zext(C)).
- if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
- if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
- if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
- Value *TI0 = TI->getOperand(0);
- if (TI0->getType() == CI.getType())
- return
- BinaryOperator::CreateAnd(TI0,
- ConstantExpr::getZExt(C, CI.getType()));
- }
-
- // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
- if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
- if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
- if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
- if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
- And->getOperand(1) == C)
- if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
- Value *TI0 = TI->getOperand(0);
- if (TI0->getType() == CI.getType()) {
- Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
- Value *NewAnd = Builder->CreateAnd(TI0, ZC);
- return BinaryOperator::CreateXor(NewAnd, ZC);
- }
- }
+ // zext(trunc(X) & C) -> (X & zext(C)).
+ Constant *C;
+ Value *X;
+ if (SrcI &&
+ match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
+ X->getType() == CI.getType())
+ return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
+
+ // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
+ Value *And;
+ if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
+ match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
+ X->getType() == CI.getType()) {
+ Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
+ return BinaryOperator::CreateXor(Builder->CreateAnd(X, ZC), ZC);
+ }
// zext (xor i1 X, true) to i32 --> xor (zext i1 X to i32), 1
- Value *X;
- if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isIntegerTy(1) &&
- match(SrcI, m_Not(m_Value(X))) &&
- (!X->hasOneUse() || !isa<CmpInst>(X))) {
+ if (SrcI && SrcI->hasOneUse() &&
+ SrcI->getType()->getScalarType()->isIntegerTy(1) &&
+ match(SrcI, m_Not(m_Value(X))) && (!X->hasOneUse() || !isa<CmpInst>(X))) {
Value *New = Builder->CreateZExt(X, CI.getType());
return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
}
- return 0;
+ return nullptr;
}
/// transformSExtICmp - Transform (sext icmp) to bitwise / integer operations
@@ -902,10 +895,10 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
ICmpInst::Predicate Pred = ICI->getPredicate();
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
+ if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
// (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
// (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
- if ((Pred == ICmpInst::ICMP_SLT && Op1C->isZero()) ||
+ if ((Pred == ICmpInst::ICMP_SLT && Op1C->isNullValue()) ||
(Pred == ICmpInst::ICMP_SGT && Op1C->isAllOnesValue())) {
Value *Sh = ConstantInt::get(Op0->getType(),
@@ -918,7 +911,9 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
In = Builder->CreateNot(In, In->getName()+".not");
return ReplaceInstUsesWith(CI, In);
}
+ }
+ if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
// If we know that only one bit of the LHS of the icmp can be set and we
// have an equality comparison with zero or a power of 2, we can transform
// the icmp and sext into bitwise/integer operations.
@@ -926,7 +921,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
unsigned BitWidth = Op1C->getType()->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(Op0, KnownZero, KnownOne);
+ computeKnownBits(Op0, KnownZero, KnownOne);
APInt KnownZeroMask(~KnownZero);
if (KnownZeroMask.isPowerOf2()) {
@@ -975,20 +970,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
}
}
- // vector (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed.
- if (VectorType *VTy = dyn_cast<VectorType>(CI.getType())) {
- if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_Zero()) &&
- Op0->getType() == CI.getType()) {
- Type *EltTy = VTy->getElementType();
-
- // splat the shift constant to a constant vector.
- Constant *VSh = ConstantInt::get(VTy, EltTy->getScalarSizeInBits()-1);
- Value *In = Builder->CreateAShr(Op0, VSh, Op0->getName()+".lobit");
- return ReplaceInstUsesWith(CI, In);
- }
- }
-
- return 0;
+ return nullptr;
}
/// CanEvaluateSExtd - Return true if we can take the specified value
@@ -1059,8 +1041,8 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// If this sign extend is only used by a truncate, let the truncate be
// eliminated before we try to optimize this sext.
- if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
- return 0;
+ if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
+ return nullptr;
if (Instruction *I = commonCastTransforms(CI))
return I;
@@ -1128,9 +1110,9 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// into:
// %a = shl i32 %i, 30
// %d = ashr i32 %a, 30
- Value *A = 0;
+ Value *A = nullptr;
// TODO: Eventually this could be subsumed by EvaluateInDifferentType.
- ConstantInt *BA = 0, *CA = 0;
+ ConstantInt *BA = nullptr, *CA = nullptr;
if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
m_ConstantInt(CA))) &&
BA == CA && A->getType() == CI.getType()) {
@@ -1142,7 +1124,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
return BinaryOperator::CreateAShr(A, ShAmtV);
}
- return 0;
+ return nullptr;
}
@@ -1154,7 +1136,7 @@ static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
(void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
if (!losesInfo)
return ConstantFP::get(CFP->getContext(), F);
- return 0;
+ return nullptr;
}
/// LookThroughFPExtensions - If this is an fp extension instruction, look
@@ -1189,43 +1171,112 @@ static Value *LookThroughFPExtensions(Value *V) {
Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
if (Instruction *I = commonCastTransforms(CI))
return I;
-
- // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
- // smaller than the destination type, we can eliminate the truncate by doing
- // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
- // as many builtins (sqrt, etc).
+ // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
+ // simpilify this expression to avoid one or more of the trunc/extend
+ // operations if we can do so without changing the numerical results.
+ //
+ // The exact manner in which the widths of the operands interact to limit
+ // what we can and cannot do safely varies from operation to operation, and
+ // is explained below in the various case statements.
BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
if (OpI && OpI->hasOneUse()) {
+ Value *LHSOrig = LookThroughFPExtensions(OpI->getOperand(0));
+ Value *RHSOrig = LookThroughFPExtensions(OpI->getOperand(1));
+ unsigned OpWidth = OpI->getType()->getFPMantissaWidth();
+ unsigned LHSWidth = LHSOrig->getType()->getFPMantissaWidth();
+ unsigned RHSWidth = RHSOrig->getType()->getFPMantissaWidth();
+ unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
+ unsigned DstWidth = CI.getType()->getFPMantissaWidth();
switch (OpI->getOpcode()) {
- default: break;
- case Instruction::FAdd:
- case Instruction::FSub:
- case Instruction::FMul:
- case Instruction::FDiv:
- case Instruction::FRem:
- Type *SrcTy = OpI->getType();
- Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
- Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
- if (LHSTrunc->getType() != SrcTy &&
- RHSTrunc->getType() != SrcTy) {
- unsigned DstSize = CI.getType()->getScalarSizeInBits();
- // If the source types were both smaller than the destination type of
- // the cast, do this xform.
- if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
- RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
- LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
- RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
- return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
+ default: break;
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ // For addition and subtraction, the infinitely precise result can
+ // essentially be arbitrarily wide; proving that double rounding
+ // will not occur because the result of OpI is exact (as we will for
+ // FMul, for example) is hopeless. However, we *can* nonetheless
+ // frequently know that double rounding cannot occur (or that it is
+ // innocuous) by taking advantage of the specific structure of
+ // infinitely-precise results that admit double rounding.
+ //
+ // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
+ // to represent both sources, we can guarantee that the double
+ // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
+ // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
+ // for proof of this fact).
+ //
+ // Note: Figueroa does not consider the case where DstFormat !=
+ // SrcFormat. It's possible (likely even!) that this analysis
+ // could be tightened for those cases, but they are rare (the main
+ // case of interest here is (float)((double)float + float)).
+ if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
+ if (LHSOrig->getType() != CI.getType())
+ LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
+ if (RHSOrig->getType() != CI.getType())
+ RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
+ Instruction *RI =
+ BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig);
+ RI->copyFastMathFlags(OpI);
+ return RI;
}
- }
- break;
+ break;
+ case Instruction::FMul:
+ // For multiplication, the infinitely precise result has at most
+ // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
+ // that such a value can be exactly represented, then no double
+ // rounding can possibly occur; we can safely perform the operation
+ // in the destination format if it can represent both sources.
+ if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
+ if (LHSOrig->getType() != CI.getType())
+ LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
+ if (RHSOrig->getType() != CI.getType())
+ RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
+ Instruction *RI =
+ BinaryOperator::CreateFMul(LHSOrig, RHSOrig);
+ RI->copyFastMathFlags(OpI);
+ return RI;
+ }
+ break;
+ case Instruction::FDiv:
+ // For division, we use again use the bound from Figueroa's
+ // dissertation. I am entirely certain that this bound can be
+ // tightened in the unbalanced operand case by an analysis based on
+ // the diophantine rational approximation bound, but the well-known
+ // condition used here is a good conservative first pass.
+ // TODO: Tighten bound via rigorous analysis of the unbalanced case.
+ if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
+ if (LHSOrig->getType() != CI.getType())
+ LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
+ if (RHSOrig->getType() != CI.getType())
+ RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
+ Instruction *RI =
+ BinaryOperator::CreateFDiv(LHSOrig, RHSOrig);
+ RI->copyFastMathFlags(OpI);
+ return RI;
+ }
+ break;
+ case Instruction::FRem:
+ // Remainder is straightforward. Remainder is always exact, so the
+ // type of OpI doesn't enter into things at all. We simply evaluate
+ // in whichever source type is larger, then convert to the
+ // destination type.
+ if (LHSWidth < SrcWidth)
+ LHSOrig = Builder->CreateFPExt(LHSOrig, RHSOrig->getType());
+ else if (RHSWidth <= SrcWidth)
+ RHSOrig = Builder->CreateFPExt(RHSOrig, LHSOrig->getType());
+ Value *ExactResult = Builder->CreateFRem(LHSOrig, RHSOrig);
+ if (Instruction *RI = dyn_cast<Instruction>(ExactResult))
+ RI->copyFastMathFlags(OpI);
+ return CastInst::CreateFPCast(ExactResult, CI.getType());
}
// (fptrunc (fneg x)) -> (fneg (fptrunc x))
if (BinaryOperator::isFNeg(OpI)) {
Value *InnerTrunc = Builder->CreateFPTrunc(OpI->getOperand(1),
CI.getType());
- return BinaryOperator::CreateFNeg(InnerTrunc);
+ Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc);
+ RI->copyFastMathFlags(OpI);
+ return RI;
}
}
@@ -1297,7 +1348,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitFPExt(CastInst &CI) {
@@ -1306,7 +1357,7 @@ Instruction *InstCombiner::visitFPExt(CastInst &CI) {
Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
- if (OpI == 0)
+ if (!OpI)
return commonCastTransforms(FI);
// fptoui(uitofp(X)) --> X
@@ -1326,7 +1377,7 @@ Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
- if (OpI == 0)
+ if (!OpI)
return commonCastTransforms(FI);
// fptosi(sitofp(X)) --> X
@@ -1357,11 +1408,11 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the
// cast to be exposed to other transforms.
- if (TD) {
+ if (DL) {
unsigned AS = CI.getAddressSpace();
if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
- TD->getPointerSizeInBits(AS)) {
- Type *Ty = TD->getIntPtrType(CI.getContext(), AS);
+ DL->getPointerSizeInBits(AS)) {
+ Type *Ty = DL->getIntPtrType(CI.getContext(), AS);
if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
@@ -1373,7 +1424,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
if (Instruction *I = commonCastTransforms(CI))
return I;
- return 0;
+ return nullptr;
}
/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
@@ -1383,7 +1434,12 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
// If casting the result of a getelementptr instruction with no offset, turn
// this into a cast of the original pointer!
- if (GEP->hasAllZeroIndices()) {
+ if (GEP->hasAllZeroIndices() &&
+ // If CI is an addrspacecast and GEP changes the poiner type, merging
+ // GEP into CI would undo canonicalizing addrspacecast with different
+ // pointer types, causing infinite loops.
+ (!isa<AddrSpaceCastInst>(CI) ||
+ GEP->getType() == GEP->getPointerOperand()->getType())) {
// Changing the cast operand is usually not a good idea but it is safe
// here because the pointer operand is being replaced with another
// pointer operand so the opcode doesn't need to change.
@@ -1392,7 +1448,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
return &CI;
}
- if (!TD)
+ if (!DL)
return commonCastTransforms(CI);
// If the GEP has a single use, and the base pointer is a bitcast, and the
@@ -1400,12 +1456,12 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
// instructions into fewer. This typically happens with unions and other
// non-type-safe code.
unsigned AS = GEP->getPointerAddressSpace();
- unsigned OffsetBits = TD->getPointerSizeInBits(AS);
+ unsigned OffsetBits = DL->getPointerSizeInBits(AS);
APInt Offset(OffsetBits, 0);
BitCastInst *BCI = dyn_cast<BitCastInst>(GEP->getOperand(0));
if (GEP->hasOneUse() &&
BCI &&
- GEP->accumulateConstantOffset(*TD, Offset)) {
+ GEP->accumulateConstantOffset(*DL, Offset)) {
// Get the base pointer input of the bitcast, and the type it points to.
Value *OrigBase = BCI->getOperand(0);
SmallVector<Value*, 8> NewIndices;
@@ -1436,16 +1492,16 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
// to be exposed to other transforms.
- if (!TD)
+ if (!DL)
return commonPointerCastTransforms(CI);
Type *Ty = CI.getType();
unsigned AS = CI.getPointerAddressSpace();
- if (Ty->getScalarSizeInBits() == TD->getPointerSizeInBits(AS))
+ if (Ty->getScalarSizeInBits() == DL->getPointerSizeInBits(AS))
return commonPointerCastTransforms(CI);
- Type *PtrTy = TD->getIntPtrType(CI.getContext(), AS);
+ Type *PtrTy = DL->getIntPtrType(CI.getContext(), AS);
if (Ty->isVectorTy()) // Handle vectors of pointers.
PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
@@ -1472,7 +1528,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
// there yet.
if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
DestTy->getElementType()->getPrimitiveSizeInBits())
- return 0;
+ return nullptr;
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
@@ -1550,7 +1606,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
ElementIndex = Elements.size() - ElementIndex - 1;
// Fail if multiple elements are inserted into this slot.
- if (Elements[ElementIndex] != 0)
+ if (Elements[ElementIndex])
return false;
Elements[ElementIndex] = V;
@@ -1590,7 +1646,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
if (!V->hasOneUse()) return false;
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return false;
+ if (!I) return false;
switch (I->getOpcode()) {
default: return false; // Unhandled case.
case Instruction::BitCast:
@@ -1611,7 +1667,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
case Instruction::Shl: {
// Must be shifting by a constant that is a multiple of the element size.
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
- if (CI == 0) return false;
+ if (!CI) return false;
Shift += CI->getZExtValue();
if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
return CollectInsertionElements(I->getOperand(0), Shift,
@@ -1639,7 +1695,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
InstCombiner &IC) {
// We need to know the target byte order to perform this optimization.
- if (!IC.getDataLayout()) return 0;
+ if (!IC.getDataLayout()) return nullptr;
VectorType *DestVecTy = cast<VectorType>(CI.getType());
Value *IntInput = CI.getOperand(0);
@@ -1647,14 +1703,14 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
if (!CollectInsertionElements(IntInput, 0, Elements,
DestVecTy->getElementType(), IC))
- return 0;
+ return nullptr;
// If we succeeded, we know that all of the element are specified by Elements
// or are zero if Elements has a null entry. Recast this as a set of
// insertions.
Value *Result = Constant::getNullValue(CI.getType());
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- if (Elements[i] == 0) continue; // Unset element.
+ if (!Elements[i]) continue; // Unset element.
Result = IC.Builder->CreateInsertElement(Result, Elements[i],
IC.Builder->getInt32(i));
@@ -1668,14 +1724,14 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
/// bitcast. The various long double bitcasts can't get in here.
static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
// We need to know the target byte order to perform this optimization.
- if (!IC.getDataLayout()) return 0;
+ if (!IC.getDataLayout()) return nullptr;
Value *Src = CI.getOperand(0);
Type *DestTy = CI.getType();
// If this is a bitcast from int to float, check to see if the int is an
// extraction from a vector.
- Value *VecInput = 0;
+ Value *VecInput = nullptr;
// bitcast(trunc(bitcast(somevector)))
if (match(Src, m_Trunc(m_BitCast(m_Value(VecInput)))) &&
isa<VectorType>(VecInput->getType())) {
@@ -1699,7 +1755,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
}
// bitcast(trunc(lshr(bitcast(somevector), cst))
- ConstantInt *ShAmt = 0;
+ ConstantInt *ShAmt = nullptr;
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
m_ConstantInt(ShAmt)))) &&
isa<VectorType>(VecInput->getType())) {
@@ -1721,7 +1777,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
@@ -1741,11 +1797,6 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
Type *DstElTy = DstPTy->getElementType();
Type *SrcElTy = SrcPTy->getElementType();
- // If the address spaces don't match, don't eliminate the bitcast, which is
- // required for changing types.
- if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
- return 0;
-
// If we are casting a alloca to a pointer to a type of the same
// size, rewrite the allocation instruction to allocate the "right" type.
// There is no need to modify malloc calls because it is their bitcast that
@@ -1858,5 +1909,24 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
- return commonCastTransforms(CI);
+ // If the destination pointer element type is not the same as the source's
+ // first do a bitcast to the destination type, and then the addrspacecast.
+ // This allows the cast to be exposed to other transforms.
+ Value *Src = CI.getOperand(0);
+ PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
+ PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
+
+ Type *DestElemTy = DestTy->getElementType();
+ if (SrcTy->getElementType() != DestElemTy) {
+ Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) {
+ // Handle vectors of pointers.
+ MidTy = VectorType::get(MidTy, VT->getNumElements());
+ }
+
+ Value *NewBitCast = Builder->CreateBitCast(Src, MidTy);
+ return new AddrSpaceCastInst(NewBitCast, CI.getType());
+ }
+
+ return commonPointerCastTransforms(CI);
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 9bb65ef..5e71c5c 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -15,28 +15,21 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
static ConstantInt *getOne(Constant *C) {
return ConstantInt::get(cast<IntegerType>(C->getType()), 1);
}
-/// AddOne - Add one to a ConstantInt
-static Constant *AddOne(Constant *C) {
- return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
-}
-/// SubOne - Subtract one from a ConstantInt
-static Constant *SubOne(Constant *C) {
- return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
-}
-
static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
}
@@ -227,15 +220,15 @@ Instruction *InstCombiner::
FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
CmpInst &ICI, ConstantInt *AndCst) {
// We need TD information to know the pointer size unless this is inbounds.
- if (!GEP->isInBounds() && TD == 0)
- return 0;
+ if (!GEP->isInBounds() && !DL)
+ return nullptr;
Constant *Init = GV->getInitializer();
if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
- return 0;
+ return nullptr;
uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
- if (ArrayElementCount > 1024) return 0; // Don't blow up on huge arrays.
+ if (ArrayElementCount > 1024) return nullptr; // Don't blow up on huge arrays.
// There are many forms of this optimization we can handle, for now, just do
// the simple index into a single-dimensional array.
@@ -245,7 +238,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
!isa<ConstantInt>(GEP->getOperand(1)) ||
!cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
isa<Constant>(GEP->getOperand(2)))
- return 0;
+ return nullptr;
// Check that indices after the variable are constants and in-range for the
// type they index. Collect the indices. This is typically for arrays of
@@ -255,18 +248,18 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
Type *EltTy = Init->getType()->getArrayElementType();
for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (Idx == 0) return 0; // Variable index.
+ if (!Idx) return nullptr; // Variable index.
uint64_t IdxVal = Idx->getZExtValue();
- if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
+ if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
if (StructType *STy = dyn_cast<StructType>(EltTy))
EltTy = STy->getElementType(IdxVal);
else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
- if (IdxVal >= ATy->getNumElements()) return 0;
+ if (IdxVal >= ATy->getNumElements()) return nullptr;
EltTy = ATy->getElementType();
} else {
- return 0; // Unknown type.
+ return nullptr; // Unknown type.
}
LaterIndices.push_back(IdxVal);
@@ -305,7 +298,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
Constant *Elt = Init->getAggregateElement(i);
- if (Elt == 0) return 0;
+ if (!Elt) return nullptr;
// If this is indexing an array of structures, get the structure element.
if (!LaterIndices.empty())
@@ -316,7 +309,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// Find out if the comparison would be true or false for the i'th element.
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
- CompareRHS, TD, TLI);
+ CompareRHS, DL, TLI);
// If the result is undef for this element, ignore it.
if (isa<UndefValue>(C)) {
// Extend range state machines to cover this element in case there is an
@@ -330,7 +323,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// If we can't compute the result for any of the elements, we have to give
// up evaluating the entire conditional.
- if (!isa<ConstantInt>(C)) return 0;
+ if (!isa<ConstantInt>(C)) return nullptr;
// Otherwise, we know if the comparison is true or false for this element,
// update our state machines.
@@ -384,7 +377,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
FalseRangeEnd == Overdefined)
- return 0;
+ return nullptr;
}
// Now that we've scanned the entire array, emit our new comparison(s). We
@@ -395,7 +388,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds()) {
- Type *IntPtrTy = TD->getIntPtrType(GEP->getType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
Idx = Builder->CreateTrunc(Idx, IntPtrTy);
@@ -476,7 +469,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// of this load, replace it with computation that does:
// ((magic_cst >> i) & 1) != 0
{
- Type *Ty = 0;
+ Type *Ty = nullptr;
// Look for an appropriate type:
// - The type of Idx if the magic fits
@@ -484,12 +477,12 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// - Default to i32
if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
Ty = Idx->getType();
- else if (TD)
- Ty = TD->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
+ else if (DL)
+ Ty = DL->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
else if (ArrayElementCount <= 32)
Ty = Type::getInt32Ty(Init->getContext());
- if (Ty != 0) {
+ if (Ty) {
Value *V = Builder->CreateIntCast(Idx, Ty, false);
V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
@@ -497,7 +490,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
}
}
- return 0;
+ return nullptr;
}
@@ -512,7 +505,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
- DataLayout &TD = *IC.getDataLayout();
+ const DataLayout &DL = *IC.getDataLayout();
gep_type_iterator GTI = gep_type_begin(GEP);
// Check to see if this gep only has a single variable index. If so, and if
@@ -529,9 +522,9 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
} else {
@@ -542,26 +535,26 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// If there are no variable indices, we must have a constant offset, just
// evaluate it the general way.
- if (i == e) return 0;
+ if (i == e) return nullptr;
Value *VariableIdx = GEP->getOperand(i);
// Determine the scale factor of the variable element. For example, this is
// 4 if the variable index is into an array of i32.
- uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
// Verify that there are no other variable indices. If so, emit the hard way.
for (++i, ++GTI; i != e; ++i, ++GTI) {
ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (!CI) return 0;
+ if (!CI) return nullptr;
// Compute the aggregate offset of constant indices.
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
}
@@ -571,7 +564,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Okay, we know we have a single variable index, which must be a
// pointer/array/vector index. If there is no offset, life is simple, return
// the index.
- Type *IntPtrTy = TD.getIntPtrType(GEP->getOperand(0)->getType());
+ Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
if (Offset == 0) {
// Cast to intptrty in case a truncation occurs. If an extension is needed,
@@ -596,7 +589,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// multiple of the variable scale.
int64_t NewOffs = Offset / (int64_t)VariableScale;
if (Offset != NewOffs*(int64_t)VariableScale)
- return 0;
+ return nullptr;
// Okay, we can do this evaluation. Start by converting the index to intptr.
if (VariableIdx->getType() != IntPtrTy)
@@ -617,14 +610,15 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
// the maximum signed value for the pointer type.
if (ICmpInst::isSigned(Cond))
- return 0;
+ return nullptr;
- // Look through bitcasts.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
- RHS = BCI->getOperand(0);
+ // Look through bitcasts and addrspacecasts. We do not however want to remove
+ // 0 GEPs.
+ if (!isa<GetElementPtrInst>(RHS))
+ RHS = RHS->stripPointerCasts();
Value *PtrBase = GEPLHS->getOperand(0);
- if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
+ if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {
// ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
// This transformation (ignoring the base and scales) is valid because we
// know pointers can't overflow since the gep is inbounds. See if we can
@@ -632,7 +626,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this);
// If not, synthesize the offset the hard way.
- if (Offset == 0)
+ if (!Offset)
Offset = EmitGEPOffset(GEPLHS);
return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
Constant::getNullValue(Offset->getType()));
@@ -657,43 +651,44 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// If we're comparing GEPs with two base pointers that only differ in type
// and both GEPs have only constant indices or just one use, then fold
// the compare with the adjusted indices.
- if (TD && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
+ if (DL && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
(GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
(GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
PtrBase->stripPointerCasts() ==
GEPRHS->getOperand(0)->stripPointerCasts()) {
+ Value *LOffset = EmitGEPOffset(GEPLHS);
+ Value *ROffset = EmitGEPOffset(GEPRHS);
+
+ // If we looked through an addrspacecast between different sized address
+ // spaces, the LHS and RHS pointers are different sized
+ // integers. Truncate to the smaller one.
+ Type *LHSIndexTy = LOffset->getType();
+ Type *RHSIndexTy = ROffset->getType();
+ if (LHSIndexTy != RHSIndexTy) {
+ if (LHSIndexTy->getPrimitiveSizeInBits() <
+ RHSIndexTy->getPrimitiveSizeInBits()) {
+ ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
+ } else
+ LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
+ }
+
Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
- EmitGEPOffset(GEPLHS),
- EmitGEPOffset(GEPRHS));
+ LOffset, ROffset);
return ReplaceInstUsesWith(I, Cmp);
}
// Otherwise, the base pointers are different and the indices are
// different, bail out.
- return 0;
+ return nullptr;
}
// If one of the GEPs has all zero indices, recurse.
- bool AllZeros = true;
- for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
- if (!isa<Constant>(GEPLHS->getOperand(i)) ||
- !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
- AllZeros = false;
- break;
- }
- if (AllZeros)
+ if (GEPLHS->hasAllZeroIndices())
return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
ICmpInst::getSwappedPredicate(Cond), I);
// If the other GEP has all zero indices, recurse.
- AllZeros = true;
- for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
- if (!isa<Constant>(GEPRHS->getOperand(i)) ||
- !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
- AllZeros = false;
- break;
- }
- if (AllZeros)
+ if (GEPRHS->hasAllZeroIndices())
return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
@@ -728,7 +723,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// Only lower this if the icmp is the only user of the GEP or if we expect
// the result to fold to a constant!
- if (TD &&
+ if (DL &&
GEPsInBounds &&
(isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
(isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
@@ -738,7 +733,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
}
}
- return 0;
+ return nullptr;
}
/// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
@@ -821,11 +816,11 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// if it finds it.
bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
- return 0;
+ return nullptr;
if (DivRHS->isZero())
- return 0; // The ProdOV computation fails on divide by zero.
+ return nullptr; // The ProdOV computation fails on divide by zero.
if (DivIsSigned && DivRHS->isAllOnesValue())
- return 0; // The overflow computation also screws up here
+ return nullptr; // The overflow computation also screws up here
if (DivRHS->isOne()) {
// This eliminates some funny cases with INT_MIN.
ICI.setOperand(0, DivI->getOperand(0)); // X/1 == X.
@@ -859,7 +854,7 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// overflow variable is set to 0 if it's corresponding bound variable is valid
// -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
int LoOverflow = 0, HiOverflow = 0;
- Constant *LoBound = 0, *HiBound = 0;
+ Constant *LoBound = nullptr, *HiBound = nullptr;
if (!DivIsSigned) { // udiv
// e.g. X/5 op 3 --> [15, 20)
@@ -899,7 +894,7 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
HiBound = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
if (HiBound == DivRHS) { // -INTMIN = INTMIN
HiOverflow = 1; // [INTMIN+1, overflow)
- HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
+ HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
}
} else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
// e.g. X/-5 op 3 --> [-19, -14)
@@ -973,20 +968,20 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
uint32_t TypeBits = CmpRHSV.getBitWidth();
uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
if (ShAmtVal >= TypeBits || ShAmtVal == 0)
- return 0;
+ return nullptr;
if (!ICI.isEquality()) {
// If we have an unsigned comparison and an ashr, we can't simplify this.
// Similarly for signed comparisons with lshr.
if (ICI.isSigned() != (Shr->getOpcode() == Instruction::AShr))
- return 0;
+ return nullptr;
// Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
// by a power of 2. Since we already have logic to simplify these,
// transform to div and then simplify the resultant comparison.
if (Shr->getOpcode() == Instruction::AShr &&
(!Shr->isExact() || ShAmtVal == TypeBits - 1))
- return 0;
+ return nullptr;
// Revisit the shift (to delete it).
Worklist.Add(Shr);
@@ -1003,7 +998,7 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
// If the builder folded the binop, just return it.
BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
- if (TheDiv == 0)
+ if (!TheDiv)
return &ICI;
// Otherwise, fold this div/compare.
@@ -1046,7 +1041,7 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
Mask, Shr->getName()+".mask");
return new ICmpInst(ICI.getPredicate(), And, ShiftedCmpRHS);
}
- return 0;
+ return nullptr;
}
@@ -1065,7 +1060,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
- ComputeMaskedBits(LHSI->getOperand(0), KnownZero, KnownOne);
+ computeKnownBits(LHSI->getOperand(0), KnownZero, KnownOne);
// If all the high bits are known, we can do this xform.
if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
@@ -1078,17 +1073,17 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
break;
- case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
- if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
+ case Instruction::Xor: // (icmp pred (xor X, XorCst), CI)
+ if (ConstantInt *XorCst = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
// If this is a comparison that tests the signbit (X < 0) or (x > -1),
// fold the xor.
if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
(ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
Value *CompareVal = LHSI->getOperand(0);
- // If the sign bit of the XorCST is not set, there is no change to
+ // If the sign bit of the XorCst is not set, there is no change to
// the operation, just stop using the Xor.
- if (!XorCST->isNegative()) {
+ if (!XorCst->isNegative()) {
ICI.setOperand(0, CompareVal);
Worklist.Add(LHSI);
return &ICI;
@@ -1110,8 +1105,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (LHSI->hasOneUse()) {
// (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
- if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
- const APInt &SignBit = XorCST->getValue();
+ if (!ICI.isEquality() && XorCst->getValue().isSignBit()) {
+ const APInt &SignBit = XorCst->getValue();
ICmpInst::Predicate Pred = ICI.isSigned()
? ICI.getUnsignedPredicate()
: ICI.getSignedPredicate();
@@ -1120,8 +1115,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
// (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
- if (!ICI.isEquality() && XorCST->isMaxValue(true)) {
- const APInt &NotSignBit = XorCST->getValue();
+ if (!ICI.isEquality() && XorCst->isMaxValue(true)) {
+ const APInt &NotSignBit = XorCst->getValue();
ICmpInst::Predicate Pred = ICI.isSigned()
? ICI.getUnsignedPredicate()
: ICI.getSignedPredicate();
@@ -1134,20 +1129,20 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
// iff -C is a power of 2
if (ICI.getPredicate() == ICmpInst::ICMP_UGT &&
- XorCST->getValue() == ~RHSV && (RHSV + 1).isPowerOf2())
- return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), XorCST);
+ XorCst->getValue() == ~RHSV && (RHSV + 1).isPowerOf2())
+ return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), XorCst);
// (icmp ult (xor X, C), -C) -> (icmp uge X, C)
// iff -C is a power of 2
if (ICI.getPredicate() == ICmpInst::ICMP_ULT &&
- XorCST->getValue() == -RHSV && RHSV.isPowerOf2())
- return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), XorCST);
+ XorCst->getValue() == -RHSV && RHSV.isPowerOf2())
+ return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), XorCst);
}
break;
- case Instruction::And: // (icmp pred (and X, AndCST), RHS)
+ case Instruction::And: // (icmp pred (and X, AndCst), RHS)
if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
LHSI->getOperand(0)->hasOneUse()) {
- ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
+ ConstantInt *AndCst = cast<ConstantInt>(LHSI->getOperand(1));
// If the LHS is an AND of a truncating cast, we can widen the
// and/compare to be the input width without changing the value
@@ -1158,10 +1153,10 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// Extending a relational comparison when we're checking the sign
// bit would not work.
if (ICI.isEquality() ||
- (!AndCST->isNegative() && RHSV.isNonNegative())) {
+ (!AndCst->isNegative() && RHSV.isNonNegative())) {
Value *NewAnd =
Builder->CreateAnd(Cast->getOperand(0),
- ConstantExpr::getZExt(AndCST, Cast->getSrcTy()));
+ ConstantExpr::getZExt(AndCst, Cast->getSrcTy()));
NewAnd->takeName(LHSI);
return new ICmpInst(ICI.getPredicate(), NewAnd,
ConstantExpr::getZExt(RHS, Cast->getSrcTy()));
@@ -1177,7 +1172,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) {
Value *NewAnd =
Builder->CreateAnd(Cast->getOperand(0),
- ConstantExpr::getTrunc(AndCST, Ty));
+ ConstantExpr::getTrunc(AndCst, Ty));
NewAnd->takeName(LHSI);
return new ICmpInst(ICI.getPredicate(), NewAnd,
ConstantExpr::getTrunc(RHS, Ty));
@@ -1190,49 +1185,58 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// access.
BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
if (Shift && !Shift->isShift())
- Shift = 0;
+ Shift = nullptr;
ConstantInt *ShAmt;
- ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
- Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
- Type *AndTy = AndCST->getType(); // Type of the and.
-
- // We can fold this as long as we can't shift unknown bits
- // into the mask. This can happen with signed shift
- // rights, as they sign-extend. With logical shifts,
- // we must still make sure the comparison is not signed
- // because we are effectively changing the
- // position of the sign bit (PR17827).
- // TODO: We can relax these constraints a bit more.
+ ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : nullptr;
+
+ // This seemingly simple opportunity to fold away a shift turns out to
+ // be rather complicated. See PR17827
+ // ( http://llvm.org/bugs/show_bug.cgi?id=17827 ) for details.
if (ShAmt) {
bool CanFold = false;
unsigned ShiftOpcode = Shift->getOpcode();
if (ShiftOpcode == Instruction::AShr) {
- // To test for the bad case of the signed shr, see if any
- // of the bits shifted in could be tested after the mask.
- uint32_t TyBits = Ty->getPrimitiveSizeInBits();
- int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
-
- uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
- if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
- AndCST->getValue()) == 0)
+ // There may be some constraints that make this possible,
+ // but nothing simple has been discovered yet.
+ CanFold = false;
+ } else if (ShiftOpcode == Instruction::Shl) {
+ // For a left shift, we can fold if the comparison is not signed.
+ // We can also fold a signed comparison if the mask value and
+ // comparison value are not negative. These constraints may not be
+ // obvious, but we can prove that they are correct using an SMT
+ // solver.
+ if (!ICI.isSigned() || (!AndCst->isNegative() && !RHS->isNegative()))
+ CanFold = true;
+ } else if (ShiftOpcode == Instruction::LShr) {
+ // For a logical right shift, we can fold if the comparison is not
+ // signed. We can also fold a signed comparison if the shifted mask
+ // value and the shifted comparison value are not negative.
+ // These constraints may not be obvious, but we can prove that they
+ // are correct using an SMT solver.
+ if (!ICI.isSigned())
CanFold = true;
- } else if (ShiftOpcode == Instruction::Shl ||
- ShiftOpcode == Instruction::LShr) {
- CanFold = !ICI.isSigned();
+ else {
+ ConstantInt *ShiftedAndCst =
+ cast<ConstantInt>(ConstantExpr::getShl(AndCst, ShAmt));
+ ConstantInt *ShiftedRHSCst =
+ cast<ConstantInt>(ConstantExpr::getShl(RHS, ShAmt));
+
+ if (!ShiftedAndCst->isNegative() && !ShiftedRHSCst->isNegative())
+ CanFold = true;
+ }
}
if (CanFold) {
Constant *NewCst;
- if (Shift->getOpcode() == Instruction::Shl)
+ if (ShiftOpcode == Instruction::Shl)
NewCst = ConstantExpr::getLShr(RHS, ShAmt);
else
NewCst = ConstantExpr::getShl(RHS, ShAmt);
// Check to see if we are shifting out any of the bits being
// compared.
- if (ConstantExpr::get(Shift->getOpcode(),
- NewCst, ShAmt) != RHS) {
+ if (ConstantExpr::get(ShiftOpcode, NewCst, ShAmt) != RHS) {
// If we shifted bits out, the fold is not going to work out.
// As a special case, check to see if this means that the
// result is always true or false now.
@@ -1242,12 +1246,12 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
return ReplaceInstUsesWith(ICI, Builder->getTrue());
} else {
ICI.setOperand(1, NewCst);
- Constant *NewAndCST;
- if (Shift->getOpcode() == Instruction::Shl)
- NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
+ Constant *NewAndCst;
+ if (ShiftOpcode == Instruction::Shl)
+ NewAndCst = ConstantExpr::getLShr(AndCst, ShAmt);
else
- NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
- LHSI->setOperand(1, NewAndCST);
+ NewAndCst = ConstantExpr::getShl(AndCst, ShAmt);
+ LHSI->setOperand(1, NewAndCst);
LHSI->setOperand(0, Shift->getOperand(0));
Worklist.Add(Shift); // Shift is dead.
return &ICI;
@@ -1264,10 +1268,10 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// Compute C << Y.
Value *NS;
if (Shift->getOpcode() == Instruction::LShr) {
- NS = Builder->CreateShl(AndCST, Shift->getOperand(1));
+ NS = Builder->CreateShl(AndCst, Shift->getOperand(1));
} else {
// Insert a logical shift.
- NS = Builder->CreateLShr(AndCST, Shift->getOperand(1));
+ NS = Builder->CreateLShr(AndCst, Shift->getOperand(1));
}
// Compute X & (C << Y).
@@ -1278,12 +1282,12 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
return &ICI;
}
- // Replace ((X & AndCST) > RHSV) with ((X & AndCST) != 0), if any
- // bit set in (X & AndCST) will produce a result greater than RHSV.
+ // Replace ((X & AndCst) > RHSV) with ((X & AndCst) != 0), if any
+ // bit set in (X & AndCst) will produce a result greater than RHSV.
if (ICI.getPredicate() == ICmpInst::ICMP_UGT) {
- unsigned NTZ = AndCST->getValue().countTrailingZeros();
- if ((NTZ < AndCST->getBitWidth()) &&
- APInt::getOneBitSet(AndCST->getBitWidth(), NTZ).ugt(RHSV))
+ unsigned NTZ = AndCst->getValue().countTrailingZeros();
+ if ((NTZ < AndCst->getBitWidth()) &&
+ APInt::getOneBitSet(AndCst->getBitWidth(), NTZ).ugt(RHSV))
return new ICmpInst(ICmpInst::ICMP_NE, LHSI,
Constant::getNullValue(RHS->getType()));
}
@@ -1777,7 +1781,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
}
}
- return 0;
+ return nullptr;
}
/// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
@@ -1792,9 +1796,9 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
- if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
- Value *RHSOp = 0;
+ if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&
+ DL->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
+ Value *RHSOp = nullptr;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
} else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
@@ -1812,7 +1816,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Enforce this.
if (LHSCI->getOpcode() != Instruction::ZExt &&
LHSCI->getOpcode() != Instruction::SExt)
- return 0;
+ return nullptr;
bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
bool isSignedCmp = ICI.isSigned();
@@ -1821,12 +1825,12 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Not an extension from the same type?
RHSCIOp = CI->getOperand(0);
if (RHSCIOp->getType() != LHSCIOp->getType())
- return 0;
+ return nullptr;
// If the signedness of the two casts doesn't agree (i.e. one is a sext
// and the other is a zext), then we can't handle this.
if (CI->getOpcode() != LHSCI->getOpcode())
- return 0;
+ return nullptr;
// Deal with equality cases early.
if (ICI.isEquality())
@@ -1844,7 +1848,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// If we aren't dealing with a constant on the RHS, exit early
ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
if (!CI)
- return 0;
+ return nullptr;
// Compute the constant that would happen if we truncated to SrcTy then
// reextended to DestTy.
@@ -1873,7 +1877,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// by SimplifyICmpInst, so only deal with the tricky case.
if (isSignedCmp || !isSignedExt)
- return 0;
+ return nullptr;
// Evaluate the comparison for LT (we invert for GT below). LE and GE cases
// should have been folded away previously and not enter in here.
@@ -1909,12 +1913,12 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// In order to eliminate the add-with-constant, the compare can be its only
// use.
Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
- if (!AddWithCst->hasOneUse()) return 0;
+ if (!AddWithCst->hasOneUse()) return nullptr;
// If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
- if (!CI2->getValue().isPowerOf2()) return 0;
+ if (!CI2->getValue().isPowerOf2()) return nullptr;
unsigned NewWidth = CI2->getValue().countTrailingZeros();
- if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return 0;
+ if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return nullptr;
// The width of the new add formed is 1 more than the bias.
++NewWidth;
@@ -1922,7 +1926,7 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// Check to see that CI1 is an all-ones value with NewWidth bits.
if (CI1->getBitWidth() == NewWidth ||
CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
- return 0;
+ return nullptr;
// This is only really a signed overflow check if the inputs have been
// sign-extended; check for that condition. For example, if CI2 is 2^31 and
@@ -1930,25 +1934,24 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
if (IC.ComputeNumSignBits(A) < NeededSignBits ||
IC.ComputeNumSignBits(B) < NeededSignBits)
- return 0;
+ return nullptr;
// In order to replace the original add with a narrower
// llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
// and truncates that discard the high bits of the add. Verify that this is
// the case.
Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
- for (Value::use_iterator UI = OrigAdd->use_begin(), E = OrigAdd->use_end();
- UI != E; ++UI) {
- if (*UI == AddWithCst) continue;
+ for (User *U : OrigAdd->users()) {
+ if (U == AddWithCst) continue;
// Only accept truncates for now. We would really like a nice recursive
// predicate like SimplifyDemandedBits, but which goes downwards the use-def
// chain to see which bits of a value are actually demanded. If the
// original add had another add which was then immediately truncated, we
// could still do the transformation.
- TruncInst *TI = dyn_cast<TruncInst>(*UI);
- if (TI == 0 ||
- TI->getType()->getPrimitiveSizeInBits() > NewWidth) return 0;
+ TruncInst *TI = dyn_cast<TruncInst>(U);
+ if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
+ return nullptr;
}
// If the pattern matches, truncate the inputs to the narrower type and
@@ -1984,11 +1987,11 @@ static Instruction *ProcessUAddIdiom(Instruction &I, Value *OrigAddV,
InstCombiner &IC) {
// Don't bother doing this transformation for pointers, don't do it for
// vectors.
- if (!isa<IntegerType>(OrigAddV->getType())) return 0;
+ if (!isa<IntegerType>(OrigAddV->getType())) return nullptr;
// If the add is a constant expr, then we don't bother transforming it.
Instruction *OrigAdd = dyn_cast<Instruction>(OrigAddV);
- if (OrigAdd == 0) return 0;
+ if (!OrigAdd) return nullptr;
Value *LHS = OrigAdd->getOperand(0), *RHS = OrigAdd->getOperand(1);
@@ -2009,6 +2012,240 @@ static Instruction *ProcessUAddIdiom(Instruction &I, Value *OrigAddV,
return ExtractValueInst::Create(Call, 1, "uadd.overflow");
}
+/// \brief Recognize and process idiom involving test for multiplication
+/// overflow.
+///
+/// The caller has matched a pattern of the form:
+/// I = cmp u (mul(zext A, zext B), V
+/// The function checks if this is a test for overflow and if so replaces
+/// multiplication with call to 'mul.with.overflow' intrinsic.
+///
+/// \param I Compare instruction.
+/// \param MulVal Result of 'mult' instruction. It is one of the arguments of
+/// the compare instruction. Must be of integer type.
+/// \param OtherVal The other argument of compare instruction.
+/// \returns Instruction which must replace the compare instruction, NULL if no
+/// replacement required.
+static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
+ Value *OtherVal, InstCombiner &IC) {
+ // Don't bother doing this transformation for pointers, don't do it for
+ // vectors.
+ if (!isa<IntegerType>(MulVal->getType()))
+ return nullptr;
+
+ assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
+ assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
+ Instruction *MulInstr = cast<Instruction>(MulVal);
+ assert(MulInstr->getOpcode() == Instruction::Mul);
+
+ Instruction *LHS = cast<Instruction>(MulInstr->getOperand(0)),
+ *RHS = cast<Instruction>(MulInstr->getOperand(1));
+ assert(LHS->getOpcode() == Instruction::ZExt);
+ assert(RHS->getOpcode() == Instruction::ZExt);
+ Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
+
+ // Calculate type and width of the result produced by mul.with.overflow.
+ Type *TyA = A->getType(), *TyB = B->getType();
+ unsigned WidthA = TyA->getPrimitiveSizeInBits(),
+ WidthB = TyB->getPrimitiveSizeInBits();
+ unsigned MulWidth;
+ Type *MulType;
+ if (WidthB > WidthA) {
+ MulWidth = WidthB;
+ MulType = TyB;
+ } else {
+ MulWidth = WidthA;
+ MulType = TyA;
+ }
+
+ // In order to replace the original mul with a narrower mul.with.overflow,
+ // all uses must ignore upper bits of the product. The number of used low
+ // bits must be not greater than the width of mul.with.overflow.
+ if (MulVal->hasNUsesOrMore(2))
+ for (User *U : MulVal->users()) {
+ if (U == &I)
+ continue;
+ if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
+ // Check if truncation ignores bits above MulWidth.
+ unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
+ if (TruncWidth > MulWidth)
+ return nullptr;
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
+ // Check if AND ignores bits above MulWidth.
+ if (BO->getOpcode() != Instruction::And)
+ return nullptr;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
+ const APInt &CVal = CI->getValue();
+ if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
+ return nullptr;
+ }
+ } else {
+ // Other uses prohibit this transformation.
+ return nullptr;
+ }
+ }
+
+ // Recognize patterns
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_EQ:
+ case ICmpInst::ICMP_NE:
+ // Recognize pattern:
+ // mulval = mul(zext A, zext B)
+ // cmp eq/neq mulval, zext trunc mulval
+ if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
+ if (Zext->hasOneUse()) {
+ Value *ZextArg = Zext->getOperand(0);
+ if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
+ if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
+ break; //Recognized
+ }
+
+ // Recognize pattern:
+ // mulval = mul(zext A, zext B)
+ // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
+ ConstantInt *CI;
+ Value *ValToMask;
+ if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
+ if (ValToMask != MulVal)
+ return nullptr;
+ const APInt &CVal = CI->getValue() + 1;
+ if (CVal.isPowerOf2()) {
+ unsigned MaskWidth = CVal.logBase2();
+ if (MaskWidth == MulWidth)
+ break; // Recognized
+ }
+ }
+ return nullptr;
+
+ case ICmpInst::ICMP_UGT:
+ // Recognize pattern:
+ // mulval = mul(zext A, zext B)
+ // cmp ugt mulval, max
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
+ APInt MaxVal = APInt::getMaxValue(MulWidth);
+ MaxVal = MaxVal.zext(CI->getBitWidth());
+ if (MaxVal.eq(CI->getValue()))
+ break; // Recognized
+ }
+ return nullptr;
+
+ case ICmpInst::ICMP_UGE:
+ // Recognize pattern:
+ // mulval = mul(zext A, zext B)
+ // cmp uge mulval, max+1
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
+ APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
+ if (MaxVal.eq(CI->getValue()))
+ break; // Recognized
+ }
+ return nullptr;
+
+ case ICmpInst::ICMP_ULE:
+ // Recognize pattern:
+ // mulval = mul(zext A, zext B)
+ // cmp ule mulval, max
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
+ APInt MaxVal = APInt::getMaxValue(MulWidth);
+ MaxVal = MaxVal.zext(CI->getBitWidth());
+ if (MaxVal.eq(CI->getValue()))
+ break; // Recognized
+ }
+ return nullptr;
+
+ case ICmpInst::ICMP_ULT:
+ // Recognize pattern:
+ // mulval = mul(zext A, zext B)
+ // cmp ule mulval, max + 1
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
+ APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
+ if (MaxVal.eq(CI->getValue()))
+ break; // Recognized
+ }
+ return nullptr;
+
+ default:
+ return nullptr;
+ }
+
+ InstCombiner::BuilderTy *Builder = IC.Builder;
+ Builder->SetInsertPoint(MulInstr);
+ Module *M = I.getParent()->getParent()->getParent();
+
+ // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
+ Value *MulA = A, *MulB = B;
+ if (WidthA < MulWidth)
+ MulA = Builder->CreateZExt(A, MulType);
+ if (WidthB < MulWidth)
+ MulB = Builder->CreateZExt(B, MulType);
+ Value *F =
+ Intrinsic::getDeclaration(M, Intrinsic::umul_with_overflow, MulType);
+ CallInst *Call = Builder->CreateCall2(F, MulA, MulB, "umul");
+ IC.Worklist.Add(MulInstr);
+
+ // If there are uses of mul result other than the comparison, we know that
+ // they are truncation or binary AND. Change them to use result of
+ // mul.with.overflow and adjust properly mask/size.
+ if (MulVal->hasNUsesOrMore(2)) {
+ Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value");
+ for (User *U : MulVal->users()) {
+ if (U == &I || U == OtherVal)
+ continue;
+ if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
+ if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
+ IC.ReplaceInstUsesWith(*TI, Mul);
+ else
+ TI->setOperand(0, Mul);
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
+ assert(BO->getOpcode() == Instruction::And);
+ // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
+ ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
+ APInt ShortMask = CI->getValue().trunc(MulWidth);
+ Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask);
+ Instruction *Zext =
+ cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
+ IC.Worklist.Add(Zext);
+ IC.ReplaceInstUsesWith(*BO, Zext);
+ } else {
+ llvm_unreachable("Unexpected Binary operation");
+ }
+ IC.Worklist.Add(cast<Instruction>(U));
+ }
+ }
+ if (isa<Instruction>(OtherVal))
+ IC.Worklist.Add(cast<Instruction>(OtherVal));
+
+ // The original icmp gets replaced with the overflow value, maybe inverted
+ // depending on predicate.
+ bool Inverse = false;
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_NE:
+ break;
+ case ICmpInst::ICMP_EQ:
+ Inverse = true;
+ break;
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE:
+ if (I.getOperand(0) == MulVal)
+ break;
+ Inverse = true;
+ break;
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_ULE:
+ if (I.getOperand(1) == MulVal)
+ break;
+ Inverse = true;
+ break;
+ default:
+ llvm_unreachable("Unexpected predicate");
+ }
+ if (Inverse) {
+ Value *Res = Builder->CreateExtractValue(Call, 1);
+ return BinaryOperator::CreateNot(Res);
+ }
+
+ return ExtractValueInst::Create(Call, 1);
+}
+
// DemandedBitsLHSMask - When performing a comparison against a constant,
// it is possible that not all the bits in the LHS are demanded. This helper
// method computes the mask that IS demanded.
@@ -2048,7 +2285,7 @@ static APInt DemandedBitsLHSMask(ICmpInst &I,
/// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
/// should be swapped.
-/// The descision is based on how many times these two operands are reused
+/// The decision is based on how many times these two operands are reused
/// as subtract operands and their positions in those instructions.
/// The rational is that several architectures use the same instruction for
/// both subtract and cmp, thus it is better if the order of those operands
@@ -2064,12 +2301,12 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0,
// Each time Op0 is the first operand, count -1: swapping is bad, the
// subtract has already the same layout as the compare.
// Each time Op0 is the second operand, count +1: swapping is good, the
- // subtract has a diffrent layout as the compare.
+ // subtract has a different layout as the compare.
// At the end, if the benefit is greater than 0, Op0 should come second to
// expose more CSE opportunities.
int GlobalSwapBenefits = 0;
- for (Value::const_use_iterator UI = Op0->use_begin(), UIEnd = Op0->use_end(); UI != UIEnd; ++UI) {
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(*UI);
+ for (const User *U : Op0->users()) {
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
continue;
// If Op0 is the first argument, this is not beneficial to swap the
@@ -2104,7 +2341,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Changed = true;
}
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
+ if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// comparing -val or val with non-zero is the same as just comparing val
@@ -2172,14 +2409,14 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
unsigned BitWidth = 0;
if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
- else if (TD) // Pointers require TD info to get their size.
- BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
+ else if (DL) // Pointers require DL info to get their size.
+ BitWidth = DL->getTypeSizeInBits(Ty->getScalarType());
bool isSignBit = false;
// See if we are doing a comparison with a constant.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
// Match the following pattern, which is a common idiom when writing
// overflow-safe integer arithmetic function. The source performs an
@@ -2292,21 +2529,29 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// bit is set. If the comparison is against zero, then this is a check
// to see if *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0KnownZero;
- if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
+ if (~Op1KnownZero == 0) {
// If the LHS is an AND with the same constant, look through it.
- Value *LHS = 0;
- ConstantInt *LHSC = 0;
+ Value *LHS = nullptr;
+ ConstantInt *LHSC = nullptr;
if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
LHSC->getValue() != Op0KnownZeroInverted)
LHS = Op0;
// If the LHS is 1 << x, and we know the result is a power of 2 like 8,
// then turn "((1 << x)&8) == 0" into "x != 3".
- Value *X = 0;
+ // or turn "((1 << x)&7) == 0" into "x > 2".
+ Value *X = nullptr;
if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
- unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
- return new ICmpInst(ICmpInst::ICMP_NE, X,
- ConstantInt::get(X->getType(), CmpVal));
+ APInt ValToCheck = Op0KnownZeroInverted;
+ if (ValToCheck.isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_NE, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ } else if ((++ValToCheck).isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros() - 1;
+ return new ICmpInst(ICmpInst::ICMP_UGT, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ }
}
// If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
@@ -2329,21 +2574,29 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// bit is set. If the comparison is against zero, then this is a check
// to see if *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0KnownZero;
- if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
+ if (~Op1KnownZero == 0) {
// If the LHS is an AND with the same constant, look through it.
- Value *LHS = 0;
- ConstantInt *LHSC = 0;
+ Value *LHS = nullptr;
+ ConstantInt *LHSC = nullptr;
if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
LHSC->getValue() != Op0KnownZeroInverted)
LHS = Op0;
// If the LHS is 1 << x, and we know the result is a power of 2 like 8,
// then turn "((1 << x)&8) != 0" into "x == 3".
- Value *X = 0;
+ // or turn "((1 << x)&7) != 0" into "x < 3".
+ Value *X = nullptr;
if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
- unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
- return new ICmpInst(ICmpInst::ICMP_EQ, X,
- ConstantInt::get(X->getType(), CmpVal));
+ APInt ValToCheck = Op0KnownZeroInverted;
+ if (ValToCheck.isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_EQ, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ } else if ((++ValToCheck).isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_ULT, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ }
}
// If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
@@ -2468,10 +2721,10 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// operands has at least one user besides the compare (the select),
// which would often largely negate the benefit of folding anyway.
if (I.hasOneUse())
- if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
+ if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
(SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
- return 0;
+ return nullptr;
// See if we are doing a comparison between a constant and an instruction that
// can be folded into the comparison.
@@ -2507,7 +2760,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// If either operand of the select is a constant, we can fold the
// comparison into the select arms, which will cause one to be
// constant folded and the select turned into a bitwise or.
- Value *Op1 = 0, *Op2 = 0;
+ Value *Op1 = nullptr, *Op2 = nullptr;
if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1)))
Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2)))
@@ -2532,8 +2785,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
case Instruction::IntToPtr:
// icmp pred inttoptr(X), null -> icmp pred X, 0
- if (RHSC->isNullValue() && TD &&
- TD->getIntPtrType(RHSC->getType()) ==
+ if (RHSC->isNullValue() && DL &&
+ DL->getIntPtrType(RHSC->getType()) ==
LHSI->getOperand(0)->getType())
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
Constant::getNullValue(LHSI->getOperand(0)->getType()));
@@ -2619,7 +2872,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// Analyze the case when either Op0 or Op1 is an add instruction.
// Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
if (BO0 && BO0->getOpcode() == Instruction::Add)
A = BO0->getOperand(0), B = BO0->getOperand(1);
if (BO1 && BO1->getOpcode() == Instruction::Add)
@@ -2714,7 +2967,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// Analyze the case when either Op0 or Op1 is a sub instruction.
// Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
- A = 0; B = 0; C = 0; D = 0;
+ A = nullptr; B = nullptr; C = nullptr; D = nullptr;
if (BO0 && BO0->getOpcode() == Instruction::Sub)
A = BO0->getOperand(0), B = BO0->getOperand(1);
if (BO1 && BO1->getOpcode() == Instruction::Sub)
@@ -2740,7 +2993,17 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
BO0->hasOneUse() && BO1->hasOneUse())
return new ICmpInst(Pred, D, B);
- BinaryOperator *SRem = NULL;
+ // icmp (0-X) < cst --> x > -cst
+ if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
+ Value *X;
+ if (match(BO0, m_Neg(m_Value(X))))
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
+ if (!RHSC->isMinValue(/*isSigned=*/true))
+ return new ICmpInst(I.getSwappedPredicate(), X,
+ ConstantExpr::getNeg(RHSC));
+ }
+
+ BinaryOperator *SRem = nullptr;
// icmp (srem X, Y), Y
if (BO0 && BO0->getOpcode() == Instruction::SRem &&
Op1 == BO0->getOperand(1))
@@ -2878,6 +3141,16 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
(Op0 == A || Op0 == B))
if (Instruction *R = ProcessUAddIdiom(I, Op1, *this))
return R;
+
+ // (zext a) * (zext b) --> llvm.umul.with.overflow.
+ if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
+ if (Instruction *R = ProcessUMulZExtIdiom(I, Op0, Op1, *this))
+ return R;
+ }
+ if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
+ if (Instruction *R = ProcessUMulZExtIdiom(I, Op1, Op0, *this))
+ return R;
+ }
}
if (I.isEquality()) {
@@ -2919,7 +3192,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// (X&Z) == (Y&Z) -> (X^Y) & Z == 0
if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
- Value *X = 0, *Y = 0, *Z = 0;
+ Value *X = nullptr, *Y = nullptr, *Z = nullptr;
if (A == C) {
X = B; Y = D; Z = A;
@@ -3010,7 +3283,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate());
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
/// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
@@ -3018,13 +3291,13 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
Instruction *LHSI,
Constant *RHSC) {
- if (!isa<ConstantFP>(RHSC)) return 0;
+ if (!isa<ConstantFP>(RHSC)) return nullptr;
const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
// Get the width of the mantissa. We don't want to hack on conversions that
// might lose information from the integer, e.g. "i64 -> float"
int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
- if (MantissaWidth == -1) return 0; // Unknown.
+ if (MantissaWidth == -1) return nullptr; // Unknown.
// Check to see that the input is converted from an integer type that is small
// enough that preserves all bits. TODO: check here for "known" sign bits.
@@ -3038,7 +3311,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
// If the conversion would lose info, don't hack on this.
if ((int)InputSize > MantissaWidth)
- return 0;
+ return nullptr;
// Otherwise, we can potentially simplify the comparison. We know that it
// will always come through as an integer value and we know the constant is
@@ -3229,7 +3502,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
+ if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Simplify 'fcmp pred X, X'
@@ -3313,31 +3586,6 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
return NV;
break;
- case Instruction::Select: {
- // If either operand of the select is a constant, we can fold the
- // comparison into the select arms, which will cause one to be
- // constant folded and the select turned into a bitwise or.
- Value *Op1 = 0, *Op2 = 0;
- if (LHSI->hasOneUse()) {
- if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
- // Fold the known value into the constant operand.
- Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
- // Insert a new FCmp of the other select operand.
- Op2 = Builder->CreateFCmp(I.getPredicate(),
- LHSI->getOperand(2), RHSC, I.getName());
- } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
- // Fold the known value into the constant operand.
- Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
- // Insert a new FCmp of the other select operand.
- Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
- RHSC, I.getName());
- }
- }
-
- if (Op1)
- return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
- break;
- }
case Instruction::FSub: {
// fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
Value *Op;
@@ -3409,5 +3657,5 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
RHSExt->getOperand(0));
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 4c861b3..e9c25d3 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -20,6 +20,8 @@
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "instcombine"
+
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
@@ -29,10 +31,13 @@ STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
static bool pointsToConstantGlobal(Value *V) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return GV->isConstant();
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (CE->getOpcode() == Instruction::BitCast ||
+ CE->getOpcode() == Instruction::AddrSpaceCast ||
CE->getOpcode() == Instruction::GetElementPtr)
return pointsToConstantGlobal(CE->getOperand(0));
+ }
return false;
}
@@ -45,95 +50,102 @@ static bool pointsToConstantGlobal(Value *V) {
/// can optimize this.
static bool
isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
- SmallVectorImpl<Instruction *> &ToDelete,
- bool IsOffset = false) {
+ SmallVectorImpl<Instruction *> &ToDelete) {
// We track lifetime intrinsics as we encounter them. If we decide to go
// ahead and replace the value with the global, this lets the caller quickly
// eliminate the markers.
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- User *U = cast<Instruction>(*UI);
-
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
- // Ignore non-volatile loads, they are always ok.
- if (!LI->isSimple()) return false;
- continue;
- }
-
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- // If uses of the bitcast are ok, we are ok.
- if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
- return false;
- continue;
- }
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
- // If the GEP has all zero indices, it doesn't offset the pointer. If it
- // doesn't, it does.
- if (!isOnlyCopiedFromConstantGlobal(
- GEP, TheCopy, ToDelete, IsOffset || !GEP->hasAllZeroIndices()))
- return false;
- continue;
- }
-
- if (CallSite CS = U) {
- // If this is the function being called then we treat it like a load and
- // ignore it.
- if (CS.isCallee(UI))
+ SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
+ ValuesToInspect.push_back(std::make_pair(V, false));
+ while (!ValuesToInspect.empty()) {
+ auto ValuePair = ValuesToInspect.pop_back_val();
+ const bool IsOffset = ValuePair.second;
+ for (auto &U : ValuePair.first->uses()) {
+ Instruction *I = cast<Instruction>(U.getUser());
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ // Ignore non-volatile loads, they are always ok.
+ if (!LI->isSimple()) return false;
continue;
+ }
- // If this is a readonly/readnone call site, then we know it is just a
- // load (but one that potentially returns the value itself), so we can
- // ignore it if we know that the value isn't captured.
- unsigned ArgNo = CS.getArgumentNo(UI);
- if (CS.onlyReadsMemory() &&
- (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
+ if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
+ // If uses of the bitcast are ok, we are ok.
+ ValuesToInspect.push_back(std::make_pair(I, IsOffset));
continue;
-
- // If this is being passed as a byval argument, the caller is making a
- // copy, so it is only a read of the alloca.
- if (CS.isByValArgument(ArgNo))
+ }
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ // If the GEP has all zero indices, it doesn't offset the pointer. If it
+ // doesn't, it does.
+ ValuesToInspect.push_back(
+ std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
continue;
- }
+ }
- // Lifetime intrinsics can be handled by the caller.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
- if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end) {
- assert(II->use_empty() && "Lifetime markers have no result to use!");
- ToDelete.push_back(II);
- continue;
+ if (CallSite CS = I) {
+ // If this is the function being called then we treat it like a load and
+ // ignore it.
+ if (CS.isCallee(&U))
+ continue;
+
+ // Inalloca arguments are clobbered by the call.
+ unsigned ArgNo = CS.getArgumentNo(&U);
+ if (CS.isInAllocaArgument(ArgNo))
+ return false;
+
+ // If this is a readonly/readnone call site, then we know it is just a
+ // load (but one that potentially returns the value itself), so we can
+ // ignore it if we know that the value isn't captured.
+ if (CS.onlyReadsMemory() &&
+ (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
+ continue;
+
+ // If this is being passed as a byval argument, the caller is making a
+ // copy, so it is only a read of the alloca.
+ if (CS.isByValArgument(ArgNo))
+ continue;
}
- }
- // If this is isn't our memcpy/memmove, reject it as something we can't
- // handle.
- MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
- if (MI == 0)
- return false;
+ // Lifetime intrinsics can be handled by the caller.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ assert(II->use_empty() && "Lifetime markers have no result to use!");
+ ToDelete.push_back(II);
+ continue;
+ }
+ }
- // If the transfer is using the alloca as a source of the transfer, then
- // ignore it since it is a load (unless the transfer is volatile).
- if (UI.getOperandNo() == 1) {
- if (MI->isVolatile()) return false;
- continue;
- }
+ // If this is isn't our memcpy/memmove, reject it as something we can't
+ // handle.
+ MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
+ if (!MI)
+ return false;
- // If we already have seen a copy, reject the second one.
- if (TheCopy) return false;
+ // If the transfer is using the alloca as a source of the transfer, then
+ // ignore it since it is a load (unless the transfer is volatile).
+ if (U.getOperandNo() == 1) {
+ if (MI->isVolatile()) return false;
+ continue;
+ }
- // If the pointer has been offset from the start of the alloca, we can't
- // safely handle this.
- if (IsOffset) return false;
+ // If we already have seen a copy, reject the second one.
+ if (TheCopy) return false;
- // If the memintrinsic isn't using the alloca as the dest, reject it.
- if (UI.getOperandNo() != 0) return false;
+ // If the pointer has been offset from the start of the alloca, we can't
+ // safely handle this.
+ if (IsOffset) return false;
- // If the source of the memcpy/move is not a constant global, reject it.
- if (!pointsToConstantGlobal(MI->getSource()))
- return false;
+ // If the memintrinsic isn't using the alloca as the dest, reject it.
+ if (U.getOperandNo() != 0) return false;
+
+ // If the source of the memcpy/move is not a constant global, reject it.
+ if (!pointsToConstantGlobal(MI->getSource()))
+ return false;
- // Otherwise, the transform is safe. Remember the copy instruction.
- TheCopy = MI;
+ // Otherwise, the transform is safe. Remember the copy instruction.
+ TheCopy = MI;
+ }
}
return true;
}
@@ -144,17 +156,17 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
static MemTransferInst *
isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
SmallVectorImpl<Instruction *> &ToDelete) {
- MemTransferInst *TheCopy = 0;
+ MemTransferInst *TheCopy = nullptr;
if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
return TheCopy;
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
- if (TD) {
- Type *IntPtrTy = TD->getIntPtrType(AI.getType());
+ if (DL) {
+ Type *IntPtrTy = DL->getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
@@ -168,7 +180,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
- AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
+ AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
@@ -180,8 +192,8 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
//
- Type *IdxTy = TD
- ? TD->getIntPtrType(AI.getType())
+ Type *IdxTy = DL
+ ? DL->getIntPtrType(AI.getType())
: Type::getInt64Ty(AI.getContext());
Value *NullIdx = Constant::getNullValue(IdxTy);
Value *Idx[2] = { NullIdx, NullIdx };
@@ -197,15 +209,15 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
- if (TD && AI.getAllocatedType()->isSized()) {
+ if (DL && AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
- AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
+ AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
- if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
+ if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
@@ -223,7 +235,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// dominance as the array size was forced to a constant earlier already.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
- TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
+ DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
@@ -232,7 +244,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// assign it the preferred alignment.
if (EntryAI->getAlignment() == 0)
EntryAI->setAlignment(
- TD->getPrefTypeAlignment(EntryAI->getAllocatedType()));
+ DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
@@ -256,7 +268,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
- AI.getAlignment(), TD);
+ AI.getAlignment(), DL);
if (AI.getAlignment() <= SourceAlign) {
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
@@ -281,7 +293,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
User *CI = cast<User>(LI.getOperand(0));
Value *CastOp = CI->getOperand(0);
@@ -291,7 +303,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
// If the address spaces don't match, don't eliminate the cast.
if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
- return 0;
+ return nullptr;
Type *SrcPTy = SrcTy->getElementType();
@@ -303,8 +315,8 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
- Type *IdxTy = TD
- ? TD->getIntPtrType(SrcTy)
+ Type *IdxTy = DL
+ ? DL->getIntPtrType(SrcTy)
: Type::getInt64Ty(SrcTy->getContext());
Value *Idx = Constant::getNullValue(IdxTy);
Value *Idxs[2] = { Idx, Idx };
@@ -331,23 +343,30 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
NewLoad->setAlignment(LI.getAlignment());
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
// Now cast the result of the load.
+ PointerType *OldTy = dyn_cast<PointerType>(NewLoad->getType());
+ PointerType *NewTy = dyn_cast<PointerType>(LI.getType());
+ if (OldTy && NewTy &&
+ OldTy->getAddressSpace() != NewTy->getAddressSpace()) {
+ return new AddrSpaceCastInst(NewLoad, LI.getType());
+ }
+
return new BitCastInst(NewLoad, LI.getType());
}
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
// Attempt to improve the alignment.
- if (TD) {
+ if (DL) {
unsigned KnownAlign =
- getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
+ getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()),DL);
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
- TD->getABITypeAlignment(LI.getType());
+ DL->getABITypeAlignment(LI.getType());
if (KnownAlign > EffectiveLoadAlign)
LI.setAlignment(KnownAlign);
@@ -357,12 +376,12 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// load (cast X) --> cast (load X) iff safe.
if (isa<CastInst>(Op))
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
+ if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
return Res;
// None of the following transforms are legal for volatile/atomic loads.
// FIXME: Some of it is okay for atomic loads; needs refactoring.
- if (!LI.isSimple()) return 0;
+ if (!LI.isSimple()) return nullptr;
// Do really simple store-to-load forwarding and load CSE, to catch cases
// where there are several consecutive memory accesses to the same location,
@@ -401,7 +420,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// Instcombine load (constantexpr_cast global) -> cast (load global)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
if (CE->isCast())
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
+ if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
return Res;
if (Op->hasOneUse()) {
@@ -418,8 +437,8 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
unsigned Align = LI.getAlignment();
- if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
- isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
+ if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
+ isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
SI->getOperand(1)->getName()+".val");
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
@@ -444,7 +463,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
}
}
}
- return 0;
+ return nullptr;
}
/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
@@ -454,14 +473,14 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
User *CI = cast<User>(SI.getOperand(1));
Value *CastOp = CI->getOperand(0);
- Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
+ Type *DestPTy = CI->getType()->getPointerElementType();
PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
- if (SrcTy == 0) return 0;
+ if (!SrcTy) return nullptr;
Type *SrcPTy = SrcTy->getElementType();
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
- return 0;
+ return nullptr;
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
/// to its first element. This allows us to handle things like:
@@ -495,30 +514,40 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
}
if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
- return 0;
+ return nullptr;
+
+ // If the pointers point into different address spaces don't do the
+ // transformation.
+ if (SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace())
+ return nullptr;
- // If the pointers point into different address spaces or if they point to
- // values with different sizes, we can't do the transformation.
+ // If the pointers point to values of different sizes don't do the
+ // transformation.
if (!IC.getDataLayout() ||
- SrcTy->getAddressSpace() !=
- cast<PointerType>(CI->getType())->getAddressSpace() ||
IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
IC.getDataLayout()->getTypeSizeInBits(DestPTy))
- return 0;
+ return nullptr;
+
+ // If the pointers point to pointers to different address spaces don't do the
+ // transformation. It is not safe to introduce an addrspacecast instruction in
+ // this case since, depending on the target, addrspacecast may not be a no-op
+ // cast.
+ if (SrcPTy->isPointerTy() && DestPTy->isPointerTy() &&
+ SrcPTy->getPointerAddressSpace() != DestPTy->getPointerAddressSpace())
+ return nullptr;
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before
// the store, cast the value to be stored.
Value *NewCast;
- Value *SIOp0 = SI.getOperand(0);
Instruction::CastOps opcode = Instruction::BitCast;
- Type* CastSrcTy = SIOp0->getType();
+ Type* CastSrcTy = DestPTy;
Type* CastDstTy = SrcPTy;
if (CastDstTy->isPointerTy()) {
if (CastSrcTy->isIntegerTy())
opcode = Instruction::IntToPtr;
} else if (CastDstTy->isIntegerTy()) {
- if (SIOp0->getType()->isPointerTy())
+ if (CastSrcTy->isPointerTy())
opcode = Instruction::PtrToInt;
}
@@ -527,6 +556,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
if (!NewGEPIndices.empty())
CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
+ Value *SIOp0 = SI.getOperand(0);
NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
SIOp0->getName()+".c");
SI.setOperand(0, NewCast);
@@ -568,13 +598,13 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
Value *Ptr = SI.getOperand(1);
// Attempt to improve the alignment.
- if (TD) {
+ if (DL) {
unsigned KnownAlign =
- getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
- TD);
+ getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
+ DL);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
- TD->getABITypeAlignment(Val->getType());
+ DL->getABITypeAlignment(Val->getType());
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
@@ -584,7 +614,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// Don't hack volatile/atomic stores.
// FIXME: Some bits are legal for atomic stores; needs refactoring.
- if (!SI.isSimple()) return 0;
+ if (!SI.isSimple()) return nullptr;
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
@@ -651,7 +681,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (Instruction *U = dyn_cast<Instruction>(Val))
Worklist.Add(U); // Dropped a use.
}
- return 0; // Do not modify these!
+ return nullptr; // Do not modify these!
}
// store undef, Ptr -> noop
@@ -680,9 +710,9 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
if (BI->isUnconditional())
if (SimplifyStoreAtEndOfBlock(SI))
- return 0; // xform done!
+ return nullptr; // xform done!
- return 0;
+ return nullptr;
}
/// SimplifyStoreAtEndOfBlock - Turn things like:
@@ -705,7 +735,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// the other predecessor.
pred_iterator PI = pred_begin(DestBB);
BasicBlock *P = *PI;
- BasicBlock *OtherBB = 0;
+ BasicBlock *OtherBB = nullptr;
if (P != StoreBB)
OtherBB = P;
@@ -735,7 +765,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// If the other block ends in an unconditional branch, check for the 'if then
// else' case. there is an instruction before the branch.
- StoreInst *OtherStore = 0;
+ StoreInst *OtherStore = nullptr;
if (OtherBr->isUnconditional()) {
--BBI;
// Skip over debugging info.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index a759548..6c6e7d8 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -15,10 +15,12 @@
#include "InstCombine.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
/// simplifyValueKnownNonZero - The specific integer value is used in a context
/// where it is known to be non-zero. If this allows us to simplify the
@@ -27,13 +29,13 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) {
// If V has multiple uses, then we would have to do more analysis to determine
// if this is safe. For example, the use could be in dynamically unreached
// code.
- if (!V->hasOneUse()) return 0;
+ if (!V->hasOneUse()) return nullptr;
bool MadeChange = false;
// ((1 << A) >>u B) --> (1 << (A-B))
// Because V cannot be zero, we know that B is less than A.
- Value *A = 0, *B = 0, *PowerOf2 = 0;
+ Value *A = nullptr, *B = nullptr, *PowerOf2 = nullptr;
if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(PowerOf2), m_Value(A))),
m_Value(B))) &&
// The "1" can be any value known to be a power of 2.
@@ -68,7 +70,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) {
// If V is a phi node, we can call this on each of its operands.
// "select cond, X, 0" can simplify to "X".
- return MadeChange ? V : 0;
+ return MadeChange ? V : nullptr;
}
@@ -107,7 +109,7 @@ static Constant *getLogBase2Vector(ConstantDataVector *CV) {
for (unsigned I = 0, E = CV->getNumElements(); I != E; ++I) {
Constant *Elt = CV->getElementAsConstant(I);
if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2())
- return 0;
+ return nullptr;
Elts.push_back(ConstantInt::get(Elt->getType(), IVal->logBase2()));
}
@@ -118,7 +120,10 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyMulInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyMulInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
if (Value *V = SimplifyUsingDistributiveLaws(I))
@@ -139,7 +144,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
return BinaryOperator::CreateMul(NewOp, ConstantExpr::getShl(C1, C2));
if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
- Constant *NewCst = 0;
+ Constant *NewCst = nullptr;
if (match(C1, m_APInt(IVal)) && IVal->isPowerOf2())
// Replace X*(2^C) with X << C, where C is either a scalar or a splat.
NewCst = ConstantInt::get(NewOp->getType(), IVal->logBase2());
@@ -158,15 +163,6 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
- { Value *X; ConstantInt *C1;
- if (Op0->hasOneUse() &&
- match(Op0, m_Add(m_Value(X), m_ConstantInt(C1)))) {
- Value *Add = Builder->CreateMul(X, CI);
- return BinaryOperator::CreateAdd(Add, Builder->CreateMul(C1, CI));
- }
- }
-
// (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n
// (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n
// The "* (2**n)" thus becomes a potential shifting opportunity.
@@ -174,10 +170,10 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
const APInt & Val = CI->getValue();
const APInt &PosVal = Val.abs();
if (Val.isNegative() && PosVal.isPowerOf2()) {
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (Op0->hasOneUse()) {
ConstantInt *C1;
- Value *Sub = 0;
+ Value *Sub = nullptr;
if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
Sub = Builder->CreateSub(X, Y, "suba");
else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
@@ -201,6 +197,19 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
if (isa<PHINode>(Op0))
if (Instruction *NV = FoldOpIntoPhi(I))
return NV;
+
+ // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
+ {
+ Value *X;
+ Constant *C1;
+ if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
+ Value *Mul = Builder->CreateMul(C1, Op1);
+ // Only go forward with the transform if C1*CI simplifies to a tidier
+ // constant.
+ if (!match(Mul, m_Mul(m_Value(), m_Value())))
+ return BinaryOperator::CreateAdd(Builder->CreateMul(X, Op1), Mul);
+ }
+ }
}
if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
@@ -247,7 +256,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
/// i1 mul -> i1 and.
- if (I.getType()->isIntegerTy(1))
+ if (I.getType()->getScalarType()->isIntegerTy(1))
return BinaryOperator::CreateAnd(Op0, Op1);
// X*(1 << Y) --> X << Y
@@ -267,7 +276,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
// -2 is "-1 << 1" so it is all bits set except the low one.
APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
- Value *BoolCast = 0, *OtherOp = 0;
+ Value *BoolCast = nullptr, *OtherOp = nullptr;
if (MaskedValueIsZero(Op0, Negative2))
BoolCast = Op0, OtherOp = Op1;
else if (MaskedValueIsZero(Op1, Negative2))
@@ -280,7 +289,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
//
@@ -313,16 +322,41 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) {
if (I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra())
return;
- ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(0));
- if (CFP && CFP->isExactlyValue(0.5)) {
+ if (match(I->getOperand(0), m_SpecificFP(0.5)))
Y = I->getOperand(1);
- return;
- }
- CFP = dyn_cast<ConstantFP>(I->getOperand(1));
- if (CFP && CFP->isExactlyValue(0.5))
+ else if (match(I->getOperand(1), m_SpecificFP(0.5)))
Y = I->getOperand(0);
}
+static bool isFiniteNonZeroFp(Constant *C) {
+ if (C->getType()->isVectorTy()) {
+ for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E;
+ ++I) {
+ ConstantFP *CFP = dyn_cast<ConstantFP>(C->getAggregateElement(I));
+ if (!CFP || !CFP->getValueAPF().isFiniteNonZero())
+ return false;
+ }
+ return true;
+ }
+
+ return isa<ConstantFP>(C) &&
+ cast<ConstantFP>(C)->getValueAPF().isFiniteNonZero();
+}
+
+static bool isNormalFp(Constant *C) {
+ if (C->getType()->isVectorTy()) {
+ for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E;
+ ++I) {
+ ConstantFP *CFP = dyn_cast<ConstantFP>(C->getAggregateElement(I));
+ if (!CFP || !CFP->getValueAPF().isNormal())
+ return false;
+ }
+ return true;
+ }
+
+ return isa<ConstantFP>(C) && cast<ConstantFP>(C)->getValueAPF().isNormal();
+}
+
/// Helper function of InstCombiner::visitFMul(BinaryOperator(). It returns
/// true iff the given value is FMul or FDiv with one and only one operand
/// being a normal constant (i.e. not Zero/NaN/Infinity).
@@ -332,19 +366,13 @@ static bool isFMulOrFDivWithConstant(Value *V) {
I->getOpcode() != Instruction::FDiv))
return false;
- ConstantFP *C0 = dyn_cast<ConstantFP>(I->getOperand(0));
- ConstantFP *C1 = dyn_cast<ConstantFP>(I->getOperand(1));
+ Constant *C0 = dyn_cast<Constant>(I->getOperand(0));
+ Constant *C1 = dyn_cast<Constant>(I->getOperand(1));
if (C0 && C1)
return false;
- return (C0 && C0->getValueAPF().isFiniteNonZero()) ||
- (C1 && C1->getValueAPF().isFiniteNonZero());
-}
-
-static bool isNormalFp(const ConstantFP *C) {
- const APFloat &Flt = C->getValueAPF();
- return Flt.isNormal();
+ return (C0 && isFiniteNonZeroFp(C0)) || (C1 && isFiniteNonZeroFp(C1));
}
/// foldFMulConst() is a helper routine of InstCombiner::visitFMul().
@@ -354,41 +382,41 @@ static bool isNormalFp(const ConstantFP *C) {
/// resulting expression. Note that this function could return NULL in
/// case the constants cannot be folded into a normal floating-point.
///
-Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, ConstantFP *C,
+Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, Constant *C,
Instruction *InsertBefore) {
assert(isFMulOrFDivWithConstant(FMulOrDiv) && "V is invalid");
Value *Opnd0 = FMulOrDiv->getOperand(0);
Value *Opnd1 = FMulOrDiv->getOperand(1);
- ConstantFP *C0 = dyn_cast<ConstantFP>(Opnd0);
- ConstantFP *C1 = dyn_cast<ConstantFP>(Opnd1);
+ Constant *C0 = dyn_cast<Constant>(Opnd0);
+ Constant *C1 = dyn_cast<Constant>(Opnd1);
- BinaryOperator *R = 0;
+ BinaryOperator *R = nullptr;
// (X * C0) * C => X * (C0*C)
if (FMulOrDiv->getOpcode() == Instruction::FMul) {
Constant *F = ConstantExpr::getFMul(C1 ? C1 : C0, C);
- if (isNormalFp(cast<ConstantFP>(F)))
+ if (isNormalFp(F))
R = BinaryOperator::CreateFMul(C1 ? Opnd0 : Opnd1, F);
} else {
if (C0) {
// (C0 / X) * C => (C0 * C) / X
if (FMulOrDiv->hasOneUse()) {
// It would otherwise introduce another div.
- ConstantFP *F = cast<ConstantFP>(ConstantExpr::getFMul(C0, C));
+ Constant *F = ConstantExpr::getFMul(C0, C);
if (isNormalFp(F))
R = BinaryOperator::CreateFDiv(F, Opnd1);
}
} else {
// (X / C1) * C => X * (C/C1) if C/C1 is not a denormal
- ConstantFP *F = cast<ConstantFP>(ConstantExpr::getFDiv(C, C1));
+ Constant *F = ConstantExpr::getFDiv(C, C1);
if (isNormalFp(F)) {
R = BinaryOperator::CreateFMul(Opnd0, F);
} else {
// (X / C1) * C => X / (C1/C)
Constant *F = ConstantExpr::getFDiv(C1, C);
- if (isNormalFp(cast<ConstantFP>(F)))
+ if (isNormalFp(F))
R = BinaryOperator::CreateFDiv(Opnd0, F);
}
}
@@ -406,10 +434,13 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
if (isa<Constant>(Op0))
std::swap(Op0, Op1);
- if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), TD))
+ if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL))
return ReplaceInstUsesWith(I, V);
bool AllowReassociate = I.hasUnsafeAlgebra();
@@ -425,17 +456,23 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
if (Instruction *NV = FoldOpIntoPhi(I))
return NV;
- ConstantFP *C = dyn_cast<ConstantFP>(Op1);
- if (C && AllowReassociate && C->getValueAPF().isFiniteNonZero()) {
+ // (fmul X, -1.0) --> (fsub -0.0, X)
+ if (match(Op1, m_SpecificFP(-1.0))) {
+ Constant *NegZero = ConstantFP::getNegativeZero(Op1->getType());
+ Instruction *RI = BinaryOperator::CreateFSub(NegZero, Op0);
+ RI->copyFastMathFlags(&I);
+ return RI;
+ }
+
+ Constant *C = cast<Constant>(Op1);
+ if (AllowReassociate && isFiniteNonZeroFp(C)) {
// Let MDC denote an expression in one of these forms:
// X * C, C/X, X/C, where C is a constant.
//
// Try to simplify "MDC * Constant"
- if (isFMulOrFDivWithConstant(Op0)) {
- Value *V = foldFMulConst(cast<Instruction>(Op0), C, &I);
- if (V)
+ if (isFMulOrFDivWithConstant(Op0))
+ if (Value *V = foldFMulConst(cast<Instruction>(Op0), C, &I))
return ReplaceInstUsesWith(I, V);
- }
// (MDC +/- C1) * C => (MDC * C) +/- (C1 * C)
Instruction *FAddSub = dyn_cast<Instruction>(Op0);
@@ -444,8 +481,8 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
FAddSub->getOpcode() == Instruction::FSub)) {
Value *Opnd0 = FAddSub->getOperand(0);
Value *Opnd1 = FAddSub->getOperand(1);
- ConstantFP *C0 = dyn_cast<ConstantFP>(Opnd0);
- ConstantFP *C1 = dyn_cast<ConstantFP>(Opnd1);
+ Constant *C0 = dyn_cast<Constant>(Opnd0);
+ Constant *C1 = dyn_cast<Constant>(Opnd1);
bool Swap = false;
if (C0) {
std::swap(C0, C1);
@@ -453,12 +490,11 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Swap = true;
}
- if (C1 && C1->getValueAPF().isFiniteNonZero() &&
- isFMulOrFDivWithConstant(Opnd0)) {
+ if (C1 && isFiniteNonZeroFp(C1) && isFMulOrFDivWithConstant(Opnd0)) {
Value *M1 = ConstantExpr::getFMul(C1, C);
- Value *M0 = isNormalFp(cast<ConstantFP>(M1)) ?
+ Value *M0 = isNormalFp(cast<Constant>(M1)) ?
foldFMulConst(cast<Instruction>(Opnd0), C, &I) :
- 0;
+ nullptr;
if (M0 && M1) {
if (Swap && FAddSub->getOpcode() == Instruction::FSub)
std::swap(M0, M1);
@@ -478,8 +514,8 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
// Under unsafe algebra do:
// X * log2(0.5*Y) = X*log2(Y) - X
if (I.hasUnsafeAlgebra()) {
- Value *OpX = NULL;
- Value *OpY = NULL;
+ Value *OpX = nullptr;
+ Value *OpY = nullptr;
IntrinsicInst *Log2;
detectLog2OfHalf(Op0, OpY, Log2);
if (OpY) {
@@ -515,8 +551,11 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Value *N1 = dyn_castFNegVal(Opnd1, IgnoreZeroSign);
// -X * -Y => X*Y
- if (N1)
- return BinaryOperator::CreateFMul(N0, N1);
+ if (N1) {
+ Value *FMul = Builder->CreateFMul(N0, N1);
+ FMul->takeName(&I);
+ return ReplaceInstUsesWith(I, FMul);
+ }
if (Opnd0->hasOneUse()) {
// -X * Y => -(X*Y) (Promote negation as high as possible)
@@ -539,7 +578,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Value *Opnd0_0, *Opnd0_1;
if (Opnd0->hasOneUse() &&
match(Opnd0, m_FMul(m_Value(Opnd0_0), m_Value(Opnd0_1)))) {
- Value *Y = 0;
+ Value *Y = nullptr;
if (Opnd0_0 == Opnd1 && Opnd0_1 != Opnd1)
Y = Opnd0_1;
else if (Opnd0_1 == Opnd1 && Opnd0_0 != Opnd1)
@@ -564,7 +603,8 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
if (!match(RHS, m_UIToFP(m_Value(C))))
std::swap(LHS, RHS);
- if (match(RHS, m_UIToFP(m_Value(C))) && C->getType()->isIntegerTy(1)) {
+ if (match(RHS, m_UIToFP(m_Value(C))) &&
+ C->getType()->getScalarType()->isIntegerTy(1)) {
B = LHS;
Value *Zero = ConstantFP::getNegativeZero(B->getType());
return SelectInst::Create(C, B, Zero);
@@ -579,7 +619,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
std::swap(LHS, RHS);
if (match(RHS, m_FSub(m_FPOne(), m_UIToFP(m_Value(C)))) &&
- C->getType()->isIntegerTy(1)) {
+ C->getType()->getScalarType()->isIntegerTy(1)) {
A = LHS;
Value *Zero = ConstantFP::getNegativeZero(A->getType());
return SelectInst::Create(C, Zero, A);
@@ -592,7 +632,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
break;
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
/// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
@@ -653,12 +693,12 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
// If we past the instruction, quit looking for it.
if (&*BBI == SI)
- SI = 0;
+ SI = nullptr;
if (&*BBI == SelectCond)
- SelectCond = 0;
+ SelectCond = nullptr;
// If we ran out of things to eliminate, break out of the loop.
- if (SelectCond == 0 && SI == 0)
+ if (!SelectCond && !SI)
break;
}
@@ -690,7 +730,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
if (MultiplyOverflows(RHS, LHSRHS,
- I.getOpcode()==Instruction::SDiv))
+ I.getOpcode() == Instruction::SDiv))
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
ConstantExpr::getMul(RHS, LHSRHS));
@@ -706,12 +746,31 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
}
}
+ if (ConstantInt *One = dyn_cast<ConstantInt>(Op0)) {
+ if (One->isOne() && !I.getType()->isIntegerTy(1)) {
+ bool isSigned = I.getOpcode() == Instruction::SDiv;
+ if (isSigned) {
+ // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
+ // result is one, if Op1 is -1 then the result is minus one, otherwise
+ // it's zero.
+ Value *Inc = Builder->CreateAdd(Op1, One);
+ Value *Cmp = Builder->CreateICmpULT(
+ Inc, ConstantInt::get(I.getType(), 3));
+ return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0));
+ } else {
+ // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
+ // result is one, otherwise it's zero.
+ return new ZExtInst(Builder->CreateICmpEQ(Op1, One), I.getType());
+ }
+ }
+ }
+
// See if we can fold away this div instruction.
if (SimplifyDemandedInstructionBits(I))
return &I;
// (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
- Value *X = 0, *Z = 0;
+ Value *X = nullptr, *Z = nullptr;
if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) { // (X - Z) / Y; Y = Op1
bool isSigned = I.getOpcode() == Instruction::SDiv;
if ((isSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
@@ -719,7 +778,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
return BinaryOperator::Create(I.getOpcode(), X, Op1);
}
- return 0;
+ return nullptr;
}
/// dyn_castZExtVal - Checks if V is a zext or constant that can
@@ -732,7 +791,7 @@ static Value *dyn_castZExtVal(Value *V, Type *Ty) {
if (C->getValue().getActiveBits() <= cast<IntegerType>(Ty)->getBitWidth())
return ConstantExpr::getTrunc(C, Ty);
}
- return 0;
+ return nullptr;
}
namespace {
@@ -757,7 +816,7 @@ struct UDivFoldAction {
};
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
- : FoldAction(FA), OperandToFold(InputOperand), FoldResult(0) {}
+ : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {}
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
: FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
};
@@ -836,7 +895,8 @@ static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
if (size_t LHSIdx = visitUDivOperand(Op0, SI->getOperand(1), I, Actions))
if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions)) {
- Actions.push_back(UDivFoldAction((FoldUDivOperandCb)0, Op1, LHSIdx-1));
+ Actions.push_back(UDivFoldAction((FoldUDivOperandCb)nullptr, Op1,
+ LHSIdx-1));
return Actions.size();
}
@@ -846,7 +906,10 @@ static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyUDivInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyUDivInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle the integer div common cases
@@ -854,13 +917,11 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
return Common;
// (x lshr C1) udiv C2 --> x udiv (C2 << C1)
- if (ConstantInt *C2 = dyn_cast<ConstantInt>(Op1)) {
+ if (Constant *C2 = dyn_cast<Constant>(Op1)) {
Value *X;
- ConstantInt *C1;
- if (match(Op0, m_LShr(m_Value(X), m_ConstantInt(C1)))) {
- APInt NC = C2->getValue().shl(C1->getLimitedValue(C1->getBitWidth()-1));
- return BinaryOperator::CreateUDiv(X, Builder->getInt(NC));
- }
+ Constant *C1;
+ if (match(Op0, m_LShr(m_Value(X), m_Constant(C1))))
+ return BinaryOperator::CreateUDiv(X, ConstantExpr::getShl(C2, C1));
}
// (zext A) udiv (zext B) --> zext (A udiv B)
@@ -901,24 +962,27 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
return Inst;
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifySDivInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifySDivInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle the integer div common cases
if (Instruction *Common = commonIDivTransforms(I))
return Common;
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // sdiv X, -1 == -X
- if (RHS->isAllOnesValue())
- return BinaryOperator::CreateNeg(Op0);
+ // sdiv X, -1 == -X
+ if (match(Op1, m_AllOnes()))
+ return BinaryOperator::CreateNeg(Op0);
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
// sdiv X, C --> ashr exact X, log2(C)
if (I.isExact() && RHS->getValue().isNonNegative() &&
RHS->getValue().isPowerOf2()) {
@@ -926,6 +990,12 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
RHS->getValue().exactLogBase2());
return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName());
}
+ }
+
+ if (Constant *RHS = dyn_cast<Constant>(Op1)) {
+ // X/INT_MIN -> X == INT_MIN
+ if (RHS->isMinSignedValue())
+ return new ZExtInst(Builder->CreateICmpEQ(Op0, Op1), I.getType());
// -X/C --> X/-C provided the negation doesn't overflow.
if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
@@ -954,7 +1024,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
/// CvtFDivConstToReciprocal tries to convert X/C into X*1/C if C not a special
@@ -965,9 +1035,12 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
/// returned; otherwise, NULL is returned.
///
static Instruction *CvtFDivConstToReciprocal(Value *Dividend,
- ConstantFP *Divisor,
+ Constant *Divisor,
bool AllowReciprocal) {
- const APFloat &FpVal = Divisor->getValueAPF();
+ if (!isa<ConstantFP>(Divisor)) // TODO: handle vectors.
+ return nullptr;
+
+ const APFloat &FpVal = cast<ConstantFP>(Divisor)->getValueAPF();
APFloat Reciprocal(FpVal.getSemantics());
bool Cvt = FpVal.getExactInverse(&Reciprocal);
@@ -978,7 +1051,7 @@ static Instruction *CvtFDivConstToReciprocal(Value *Dividend,
}
if (!Cvt)
- return 0;
+ return nullptr;
ConstantFP *R;
R = ConstantFP::get(Dividend->getType()->getContext(), Reciprocal);
@@ -988,7 +1061,10 @@ static Instruction *CvtFDivConstToReciprocal(Value *Dividend,
Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFDivInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyFDivInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
if (isa<Constant>(Op0))
@@ -999,32 +1075,29 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
bool AllowReassociate = I.hasUnsafeAlgebra();
bool AllowReciprocal = I.hasAllowReciprocal();
- if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1)) {
+ if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
if (Instruction *R = FoldOpIntoSelect(I, SI))
return R;
if (AllowReassociate) {
- ConstantFP *C1 = 0;
- ConstantFP *C2 = Op1C;
+ Constant *C1 = nullptr;
+ Constant *C2 = Op1C;
Value *X;
- Instruction *Res = 0;
+ Instruction *Res = nullptr;
- if (match(Op0, m_FMul(m_Value(X), m_ConstantFP(C1)))) {
+ if (match(Op0, m_FMul(m_Value(X), m_Constant(C1)))) {
// (X*C1)/C2 => X * (C1/C2)
//
Constant *C = ConstantExpr::getFDiv(C1, C2);
- const APFloat &F = cast<ConstantFP>(C)->getValueAPF();
- if (F.isNormal())
+ if (isNormalFp(C))
Res = BinaryOperator::CreateFMul(X, C);
- } else if (match(Op0, m_FDiv(m_Value(X), m_ConstantFP(C1)))) {
+ } else if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
// (X/C1)/C2 => X /(C2*C1) [=> X * 1/(C2*C1) if reciprocal is allowed]
//
Constant *C = ConstantExpr::getFMul(C1, C2);
- const APFloat &F = cast<ConstantFP>(C)->getValueAPF();
- if (F.isNormal()) {
- Res = CvtFDivConstToReciprocal(X, cast<ConstantFP>(C),
- AllowReciprocal);
+ if (isNormalFp(C)) {
+ Res = CvtFDivConstToReciprocal(X, C, AllowReciprocal);
if (!Res)
Res = BinaryOperator::CreateFDiv(X, C);
}
@@ -1037,60 +1110,68 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
}
// X / C => X * 1/C
- if (Instruction *T = CvtFDivConstToReciprocal(Op0, Op1C, AllowReciprocal))
+ if (Instruction *T = CvtFDivConstToReciprocal(Op0, Op1C, AllowReciprocal)) {
+ T->copyFastMathFlags(&I);
return T;
+ }
- return 0;
+ return nullptr;
}
- if (AllowReassociate && isa<ConstantFP>(Op0)) {
- ConstantFP *C1 = cast<ConstantFP>(Op0), *C2;
- Constant *Fold = 0;
+ if (AllowReassociate && isa<Constant>(Op0)) {
+ Constant *C1 = cast<Constant>(Op0), *C2;
+ Constant *Fold = nullptr;
Value *X;
bool CreateDiv = true;
// C1 / (X*C2) => (C1/C2) / X
- if (match(Op1, m_FMul(m_Value(X), m_ConstantFP(C2))))
+ if (match(Op1, m_FMul(m_Value(X), m_Constant(C2))))
Fold = ConstantExpr::getFDiv(C1, C2);
- else if (match(Op1, m_FDiv(m_Value(X), m_ConstantFP(C2)))) {
+ else if (match(Op1, m_FDiv(m_Value(X), m_Constant(C2)))) {
// C1 / (X/C2) => (C1*C2) / X
Fold = ConstantExpr::getFMul(C1, C2);
- } else if (match(Op1, m_FDiv(m_ConstantFP(C2), m_Value(X)))) {
+ } else if (match(Op1, m_FDiv(m_Constant(C2), m_Value(X)))) {
// C1 / (C2/X) => (C1/C2) * X
Fold = ConstantExpr::getFDiv(C1, C2);
CreateDiv = false;
}
- if (Fold) {
- const APFloat &FoldC = cast<ConstantFP>(Fold)->getValueAPF();
- if (FoldC.isNormal()) {
- Instruction *R = CreateDiv ?
- BinaryOperator::CreateFDiv(Fold, X) :
- BinaryOperator::CreateFMul(X, Fold);
- R->setFastMathFlags(I.getFastMathFlags());
- return R;
- }
+ if (Fold && isNormalFp(Fold)) {
+ Instruction *R = CreateDiv ? BinaryOperator::CreateFDiv(Fold, X)
+ : BinaryOperator::CreateFMul(X, Fold);
+ R->setFastMathFlags(I.getFastMathFlags());
+ return R;
}
- return 0;
+ return nullptr;
}
if (AllowReassociate) {
Value *X, *Y;
- Value *NewInst = 0;
- Instruction *SimpR = 0;
+ Value *NewInst = nullptr;
+ Instruction *SimpR = nullptr;
if (Op0->hasOneUse() && match(Op0, m_FDiv(m_Value(X), m_Value(Y)))) {
// (X/Y) / Z => X / (Y*Z)
//
- if (!isa<ConstantFP>(Y) || !isa<ConstantFP>(Op1)) {
+ if (!isa<Constant>(Y) || !isa<Constant>(Op1)) {
NewInst = Builder->CreateFMul(Y, Op1);
+ if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
+ FastMathFlags Flags = I.getFastMathFlags();
+ Flags &= cast<Instruction>(Op0)->getFastMathFlags();
+ RI->setFastMathFlags(Flags);
+ }
SimpR = BinaryOperator::CreateFDiv(X, NewInst);
}
} else if (Op1->hasOneUse() && match(Op1, m_FDiv(m_Value(X), m_Value(Y)))) {
// Z / (X/Y) => Z*Y / X
//
- if (!isa<ConstantFP>(Y) || !isa<ConstantFP>(Op0)) {
+ if (!isa<Constant>(Y) || !isa<Constant>(Op0)) {
NewInst = Builder->CreateFMul(Op0, Y);
+ if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
+ FastMathFlags Flags = I.getFastMathFlags();
+ Flags &= cast<Instruction>(Op1)->getFastMathFlags();
+ RI->setFastMathFlags(Flags);
+ }
SimpR = BinaryOperator::CreateFDiv(NewInst, X);
}
}
@@ -1103,7 +1184,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
/// This function implements the transforms common to both integer remainder
@@ -1123,7 +1204,7 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
return &I;
- if (isa<ConstantInt>(Op1)) {
+ if (isa<Constant>(Op1)) {
if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
if (Instruction *R = FoldOpIntoSelect(I, SI))
@@ -1139,13 +1220,16 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitURem(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyURemInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyURemInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *common = commonIRemTransforms(I))
@@ -1171,13 +1255,16 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Ext);
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifySRemInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifySRemInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle the integer rem common cases
@@ -1213,7 +1300,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
bool hasMissing = false;
for (unsigned i = 0; i != VWidth; ++i) {
Constant *Elt = C->getAggregateElement(i);
- if (Elt == 0) {
+ if (!Elt) {
hasMissing = true;
break;
}
@@ -1242,18 +1329,21 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFRemInst(Op0, Op1, TD))
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Value *V = SimplifyFRemInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle cases involving: rem X, (select Cond, Y, Z)
if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
return &I;
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 4c6d0c4..46f7b8a 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -18,6 +18,8 @@
#include "llvm/IR/DataLayout.h"
using namespace llvm;
+#define DEBUG_TYPE "instcombine"
+
/// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
/// and if a/b/c and the add's all have a single use, turn this into a phi
/// and a single binop.
@@ -48,12 +50,12 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
// types.
I->getOperand(0)->getType() != LHSType ||
I->getOperand(1)->getType() != RHSType)
- return 0;
+ return nullptr;
// If they are CmpInst instructions, check their predicates
if (CmpInst *CI = dyn_cast<CmpInst>(I))
if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
- return 0;
+ return nullptr;
if (isNUW)
isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
@@ -63,8 +65,8 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
isExact = cast<PossiblyExactOperator>(I)->isExact();
// Keep track of which operand needs a phi node.
- if (I->getOperand(0) != LHSVal) LHSVal = 0;
- if (I->getOperand(1) != RHSVal) RHSVal = 0;
+ if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
+ if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
}
// If both LHS and RHS would need a PHI, don't do this transformation,
@@ -72,14 +74,14 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
// which leads to higher register pressure. This is especially
// bad when the PHIs are in the header of a loop.
if (!LHSVal && !RHSVal)
- return 0;
+ return nullptr;
// Otherwise, this is safe to transform!
Value *InLHS = FirstInst->getOperand(0);
Value *InRHS = FirstInst->getOperand(1);
- PHINode *NewLHS = 0, *NewRHS = 0;
- if (LHSVal == 0) {
+ PHINode *NewLHS = nullptr, *NewRHS = nullptr;
+ if (!LHSVal) {
NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
FirstInst->getOperand(0)->getName() + ".pn");
NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
@@ -87,7 +89,7 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
LHSVal = NewLHS;
}
- if (RHSVal == 0) {
+ if (!RHSVal) {
NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
FirstInst->getOperand(1)->getName() + ".pn");
NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
@@ -148,7 +150,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
GEP->getNumOperands() != FirstInst->getNumOperands())
- return 0;
+ return nullptr;
AllInBounds &= GEP->isInBounds();
@@ -170,19 +172,19 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
// for struct indices, which must always be constant.
if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
isa<ConstantInt>(GEP->getOperand(op)))
- return 0;
+ return nullptr;
if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
- return 0;
+ return nullptr;
// If we already needed a PHI for an earlier operand, and another operand
// also requires a PHI, we'd be introducing more PHIs than we're
// eliminating, which increases register pressure on entry to the PHI's
// block.
if (NeededPhi)
- return 0;
+ return nullptr;
- FixedOperands[op] = 0; // Needs a PHI.
+ FixedOperands[op] = nullptr; // Needs a PHI.
NeededPhi = true;
}
}
@@ -194,7 +196,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
// load up into the predecessors so that we have a load of a gep of an alloca,
// which can usually all be folded into the load.
if (AllBasePointersAreAllocas)
- return 0;
+ return nullptr;
// Otherwise, this is safe to transform. Insert PHI nodes for each operand
// that is variable.
@@ -255,9 +257,7 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
// profitable to do this xform.
if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
bool isAddressTaken = false;
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
- UI != E; ++UI) {
- User *U = *UI;
+ for (User *U : AI->users()) {
if (isa<LoadInst>(U)) continue;
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
// If storing TO the alloca, then the address isn't taken.
@@ -290,7 +290,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
// FIXME: This is overconservative; this transform is allowed in some cases
// for atomic operations.
if (FirstLI->isAtomic())
- return 0;
+ return nullptr;
// When processing loads, we need to propagate two bits of information to the
// sunk load: whether it is volatile, and what its alignment is. We currently
@@ -305,20 +305,20 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
// load and the PHI.
if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
!isSafeAndProfitableToSinkLoad(FirstLI))
- return 0;
+ return nullptr;
// If the PHI is of volatile loads and the load block has multiple
// successors, sinking it would remove a load of the volatile value from
// the path through the other successor.
if (isVolatile &&
FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
+ return nullptr;
// Check to see if all arguments are the same operation.
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
if (!LI || !LI->hasOneUse())
- return 0;
+ return nullptr;
// We can't sink the load if the loaded value could be modified between
// the load and the PHI.
@@ -326,12 +326,12 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
LI->getParent() != PN.getIncomingBlock(i) ||
LI->getPointerAddressSpace() != LoadAddrSpace ||
!isSafeAndProfitableToSinkLoad(LI))
- return 0;
+ return nullptr;
// If some of the loads have an alignment specified but not all of them,
// we can't do the transformation.
if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
- return 0;
+ return nullptr;
LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
@@ -340,7 +340,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
// the path through the other successor.
if (isVolatile &&
LI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
+ return nullptr;
}
// Okay, they are all the same operation. Create a new PHI node of the
@@ -356,7 +356,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
if (NewInVal != InVal)
- InVal = 0;
+ InVal = nullptr;
NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
}
@@ -400,8 +400,8 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// If all input operands to the phi are the same instruction (e.g. a cast from
// the same type or "+42") we can pull the operation through the PHI, reducing
// code size and simplifying code.
- Constant *ConstantOp = 0;
- Type *CastSrcTy = 0;
+ Constant *ConstantOp = nullptr;
+ Type *CastSrcTy = nullptr;
bool isNUW = false, isNSW = false, isExact = false;
if (isa<CastInst>(FirstInst)) {
@@ -411,13 +411,13 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// the code by turning an i32 into an i1293.
if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
if (!ShouldChangeType(PN.getType(), CastSrcTy))
- return 0;
+ return nullptr;
}
} else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
// Can fold binop, compare or shift here if the RHS is a constant,
// otherwise call FoldPHIArgBinOpIntoPHI.
ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
- if (ConstantOp == 0)
+ if (!ConstantOp)
return FoldPHIArgBinOpIntoPHI(PN);
if (OverflowingBinaryOperator *BO =
@@ -428,19 +428,19 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
dyn_cast<PossiblyExactOperator>(FirstInst))
isExact = PEO->isExact();
} else {
- return 0; // Cannot fold this operation.
+ return nullptr; // Cannot fold this operation.
}
// Check to see if all arguments are the same operation.
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
- if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
- return 0;
+ if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
+ return nullptr;
if (CastSrcTy) {
if (I->getOperand(0)->getType() != CastSrcTy)
- return 0; // Cast operation must match.
+ return nullptr; // Cast operation must match.
} else if (I->getOperand(1) != ConstantOp) {
- return 0;
+ return nullptr;
}
if (isNUW)
@@ -464,7 +464,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
if (NewInVal != InVal)
- InVal = 0;
+ InVal = nullptr;
NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
}
@@ -518,7 +518,7 @@ static bool DeadPHICycle(PHINode *PN,
if (PotentiallyDeadPHIs.size() == 16)
return false;
- if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
+ if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
return DeadPHICycle(PU, PotentiallyDeadPHIs);
return false;
@@ -589,10 +589,10 @@ namespace llvm {
template<>
struct DenseMapInfo<LoweredPHIRecord> {
static inline LoweredPHIRecord getEmptyKey() {
- return LoweredPHIRecord(0, 0);
+ return LoweredPHIRecord(nullptr, 0);
}
static inline LoweredPHIRecord getTombstoneKey() {
- return LoweredPHIRecord(0, 1);
+ return LoweredPHIRecord(nullptr, 1);
}
static unsigned getHashValue(const LoweredPHIRecord &Val) {
return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
@@ -639,42 +639,40 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// bail out.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
- if (II == 0) continue;
+ if (!II) continue;
if (II->getParent() != PN->getIncomingBlock(i))
continue;
// If we have a phi, and if it's directly in the predecessor, then we have
// a critical edge where we need to put the truncate. Since we can't
// split the edge in instcombine, we have to bail out.
- return 0;
+ return nullptr;
}
-
- for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (User *U : PN->users()) {
+ Instruction *UserI = cast<Instruction>(U);
// If the user is a PHI, inspect its uses recursively.
- if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
+ if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
if (PHIsInspected.insert(UserPN))
PHIsToSlice.push_back(UserPN);
continue;
}
// Truncates are always ok.
- if (isa<TruncInst>(User)) {
- PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User));
+ if (isa<TruncInst>(UserI)) {
+ PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
continue;
}
// Otherwise it must be a lshr which can only be used by one trunc.
- if (User->getOpcode() != Instruction::LShr ||
- !User->hasOneUse() || !isa<TruncInst>(User->use_back()) ||
- !isa<ConstantInt>(User->getOperand(1)))
- return 0;
+ if (UserI->getOpcode() != Instruction::LShr ||
+ !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
+ !isa<ConstantInt>(UserI->getOperand(1)))
+ return nullptr;
- unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
- PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back()));
+ unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
+ PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
}
}
@@ -709,7 +707,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// If we've already lowered a user like this, reuse the previously lowered
// value.
- if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) {
+ if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
// Otherwise, Create the new PHI node for this user.
EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
@@ -790,7 +788,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// PHINode simplification
//
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
- if (Value *V = SimplifyInstruction(&PN, TD, TLI))
+ if (Value *V = SimplifyInstruction(&PN, DL, TLI))
return ReplaceInstUsesWith(PN, V);
// If all PHI operands are the same operation, pull them through the PHI,
@@ -809,7 +807,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// this PHI only has a single use (a PHI), and if that PHI only has one use (a
// PHI)... break the cycle.
if (PN.hasOneUse()) {
- Instruction *PHIUser = cast<Instruction>(PN.use_back());
+ Instruction *PHIUser = cast<Instruction>(PN.user_back());
if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
PotentiallyDeadPHIs.insert(&PN);
@@ -825,7 +823,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// late.
if (PHIUser->hasOneUse() &&
(isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
- PHIUser->use_back() == &PN) {
+ PHIUser->user_back() == &PN) {
return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
}
}
@@ -893,10 +891,10 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// it is only used by trunc or trunc(lshr) operations. If so, we split the
// PHI into the various pieces being extracted. This sort of thing is
// introduced when SROA promotes an aggregate to a single large integer type.
- if (PN.getType()->isIntegerTy() && TD &&
- !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
+ if (PN.getType()->isIntegerTy() && DL &&
+ !DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
return Res;
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 283bec2..06c9e29 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -14,28 +14,35 @@
#include "InstCombine.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
/// MatchSelectPattern - Pattern match integer [SU]MIN, [SU]MAX, and ABS idioms,
/// returning the kind and providing the out parameter results if we
/// successfully match.
static SelectPatternFlavor
MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
- if (SI == 0) return SPF_UNKNOWN;
+ if (!SI) return SPF_UNKNOWN;
ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
- if (ICI == 0) return SPF_UNKNOWN;
+ if (!ICI) return SPF_UNKNOWN;
+
+ ICmpInst::Predicate Pred = ICI->getPredicate();
+ Value *CmpLHS = ICI->getOperand(0);
+ Value *CmpRHS = ICI->getOperand(1);
+ Value *TrueVal = SI->getTrueValue();
+ Value *FalseVal = SI->getFalseValue();
- LHS = ICI->getOperand(0);
- RHS = ICI->getOperand(1);
+ LHS = CmpLHS;
+ RHS = CmpRHS;
// (icmp X, Y) ? X : Y
- if (SI->getTrueValue() == ICI->getOperand(0) &&
- SI->getFalseValue() == ICI->getOperand(1)) {
- switch (ICI->getPredicate()) {
+ if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
+ switch (Pred) {
default: return SPF_UNKNOWN; // Equality.
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE: return SPF_UMAX;
@@ -49,18 +56,35 @@ MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
}
// (icmp X, Y) ? Y : X
- if (SI->getTrueValue() == ICI->getOperand(1) &&
- SI->getFalseValue() == ICI->getOperand(0)) {
- switch (ICI->getPredicate()) {
- default: return SPF_UNKNOWN; // Equality.
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_UGE: return SPF_UMIN;
- case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_SGE: return SPF_SMIN;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_ULE: return SPF_UMAX;
- case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_SLE: return SPF_SMAX;
+ if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
+ switch (Pred) {
+ default: return SPF_UNKNOWN; // Equality.
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE: return SPF_UMIN;
+ case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE: return SPF_SMIN;
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_ULE: return SPF_UMAX;
+ case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_SLE: return SPF_SMAX;
+ }
+ }
+
+ if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
+ if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
+ (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
+
+ // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
+ // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
+ if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
+ return (CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS;
+ }
+
+ // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
+ // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
+ if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
+ return (CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS;
+ }
}
}
@@ -129,15 +153,15 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
if (TI->isCast()) {
Type *FIOpndTy = FI->getOperand(0)->getType();
if (TI->getOperand(0)->getType() != FIOpndTy)
- return 0;
+ return nullptr;
// The select condition may be a vector. We may only change the operand
// type if the vector width remains the same (and matches the condition).
Type *CondTy = SI.getCondition()->getType();
if (CondTy->isVectorTy() && (!FIOpndTy->isVectorTy() ||
CondTy->getVectorNumElements() != FIOpndTy->getVectorNumElements()))
- return 0;
+ return nullptr;
} else {
- return 0; // unknown unary op.
+ return nullptr; // unknown unary op.
}
// Fold this by inserting a select from the input values.
@@ -149,7 +173,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
// Only handle binary operators here.
if (!isa<BinaryOperator>(TI))
- return 0;
+ return nullptr;
// Figure out if the operations have any operands in common.
Value *MatchOp, *OtherOpT, *OtherOpF;
@@ -165,7 +189,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
OtherOpF = FI->getOperand(0);
MatchIsOpZero = false;
} else if (!TI->isCommutative()) {
- return 0;
+ return nullptr;
} else if (TI->getOperand(0) == FI->getOperand(1)) {
MatchOp = TI->getOperand(0);
OtherOpT = TI->getOperand(1);
@@ -177,7 +201,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
OtherOpF = FI->getOperand(1);
MatchIsOpZero = true;
} else {
- return 0;
+ return nullptr;
}
// If we reach here, they do have operations in common.
@@ -282,7 +306,7 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
}
}
- return 0;
+ return nullptr;
}
/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
@@ -296,7 +320,7 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
Instruction *I = dyn_cast<Instruction>(V);
if (!I)
- return 0;
+ return nullptr;
// If this is a binary operator, try to simplify it with the replaced op.
if (BinaryOperator *B = dyn_cast<BinaryOperator>(I)) {
@@ -347,7 +371,7 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
}
}
- return 0;
+ return nullptr;
}
/// foldSelectICmpAndOr - We want to turn:
@@ -363,32 +387,59 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
/// 1. The icmp predicate is inverted
/// 2. The select operands are reversed
/// 3. The magnitude of C2 and C1 are flipped
-static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
+///
+/// This also tries to turn
+/// --- Single bit tests:
+/// if ((x & C) == 0) x |= C to x |= C
+/// if ((x & C) != 0) x ^= C to x &= ~C
+/// if ((x & C) == 0) x ^= C to x |= C
+/// if ((x & C) != 0) x &= ~C to x &= ~C
+/// if ((x & C) == 0) x &= ~C to nothing
+static Value *foldSelectICmpAndOr(SelectInst &SI, Value *TrueVal,
Value *FalseVal,
InstCombiner::BuilderTy *Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy())
- return 0;
+ return nullptr;
Value *CmpLHS = IC->getOperand(0);
Value *CmpRHS = IC->getOperand(1);
if (!match(CmpRHS, m_Zero()))
- return 0;
+ return nullptr;
Value *X;
const APInt *C1;
if (!match(CmpLHS, m_And(m_Value(X), m_Power2(C1))))
- return 0;
+ return nullptr;
const APInt *C2;
+ if (match(TrueVal, m_Specific(X))) {
+ // if ((X & C) != 0) X ^= C becomes X &= ~C
+ if (match(FalseVal, m_Xor(m_Specific(X), m_APInt(C2))) && C1 == C2)
+ return Builder->CreateAnd(X, ~(*C1));
+ // if ((X & C) != 0) X &= ~C becomes X &= ~C
+ if (match(FalseVal, m_And(m_Specific(X), m_APInt(C2))) && *C1 == ~(*C2))
+ return FalseVal;
+ } else if (match(FalseVal, m_Specific(X))) {
+ // if ((X & C) == 0) X ^= C becomes X |= C
+ if (match(TrueVal, m_Xor(m_Specific(X), m_APInt(C2))) && C1 == C2)
+ return Builder->CreateOr(X, *C1);
+ // if ((X & C) == 0) X &= ~C becomes nothing
+ if (match(TrueVal, m_And(m_Specific(X), m_APInt(C2))) && *C1 == ~(*C2))
+ return X;
+ // if ((X & C) == 0) X |= C becomes X |= C
+ if (match(TrueVal, m_Or(m_Specific(X), m_APInt(C2))) && C1 == C2)
+ return TrueVal;
+ }
+
bool OrOnTrueVal = false;
bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2)));
if (!OrOnFalseVal)
OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2)));
if (!OrOnFalseVal && !OrOnTrueVal)
- return 0;
+ return nullptr;
Value *V = CmpLHS;
Value *Y = OrOnFalseVal ? TrueVal : FalseVal;
@@ -527,7 +578,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
if (IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
if (TrueVal->getType() == Ty) {
if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
- ConstantInt *C1 = NULL, *C2 = NULL;
+ ConstantInt *C1 = nullptr, *C2 = nullptr;
if (Pred == ICmpInst::ICMP_SGT && Cmp->isAllOnesValue()) {
C1 = dyn_cast<ConstantInt>(TrueVal);
C2 = dyn_cast<ConstantInt>(FalseVal);
@@ -554,18 +605,18 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// arms of the select. See if substituting this value into the arm and
// simplifying the result yields the same value as the other arm.
if (Pred == ICmpInst::ICMP_EQ) {
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) == TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) == TrueVal)
return ReplaceInstUsesWith(SI, FalseVal);
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) == FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) == FalseVal)
return ReplaceInstUsesWith(SI, FalseVal);
} else if (Pred == ICmpInst::ICMP_NE) {
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) == FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) == FalseVal)
return ReplaceInstUsesWith(SI, TrueVal);
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) == TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) == TrueVal)
return ReplaceInstUsesWith(SI, TrueVal);
}
@@ -586,7 +637,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
if (Value *V = foldSelectICmpAndOr(SI, TrueVal, FalseVal, Builder))
return ReplaceInstUsesWith(SI, V);
- return Changed ? &SI : 0;
+ return Changed ? &SI : nullptr;
}
@@ -606,7 +657,7 @@ static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
// If the value is a non-instruction value like a constant or argument, it
// can always be mapped.
const Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return true;
+ if (!I) return true;
// If V is a PHI node defined in the same block as the condition PHI, we can
// map the arguments.
@@ -649,10 +700,50 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
return ReplaceInstUsesWith(Outer, C);
}
- // TODO: MIN(MIN(A, 23), 97)
- return 0;
-}
+ if (SPF1 == SPF2) {
+ if (ConstantInt *CB = dyn_cast<ConstantInt>(B)) {
+ if (ConstantInt *CC = dyn_cast<ConstantInt>(C)) {
+ APInt ACB = CB->getValue();
+ APInt ACC = CC->getValue();
+
+ // MIN(MIN(A, 23), 97) -> MIN(A, 23)
+ // MAX(MAX(A, 97), 23) -> MAX(A, 97)
+ if ((SPF1 == SPF_UMIN && ACB.ule(ACC)) ||
+ (SPF1 == SPF_SMIN && ACB.sle(ACC)) ||
+ (SPF1 == SPF_UMAX && ACB.uge(ACC)) ||
+ (SPF1 == SPF_SMAX && ACB.sge(ACC)))
+ return ReplaceInstUsesWith(Outer, Inner);
+
+ // MIN(MIN(A, 97), 23) -> MIN(A, 23)
+ // MAX(MAX(A, 23), 97) -> MAX(A, 97)
+ if ((SPF1 == SPF_UMIN && ACB.ugt(ACC)) ||
+ (SPF1 == SPF_SMIN && ACB.sgt(ACC)) ||
+ (SPF1 == SPF_UMAX && ACB.ult(ACC)) ||
+ (SPF1 == SPF_SMAX && ACB.slt(ACC))) {
+ Outer.replaceUsesOfWith(Inner, A);
+ return &Outer;
+ }
+ }
+ }
+ }
+
+ // ABS(ABS(X)) -> ABS(X)
+ // NABS(NABS(X)) -> NABS(X)
+ if (SPF1 == SPF2 && (SPF1 == SPF_ABS || SPF1 == SPF_NABS)) {
+ return ReplaceInstUsesWith(Outer, Inner);
+ }
+ // ABS(NABS(X)) -> ABS(X)
+ // NABS(ABS(X)) -> NABS(X)
+ if ((SPF1 == SPF_ABS && SPF2 == SPF_NABS) ||
+ (SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
+ SelectInst *SI = cast<SelectInst>(Inner);
+ Value *NewSI = Builder->CreateSelect(
+ SI->getCondition(), SI->getFalseValue(), SI->getTrueValue());
+ return ReplaceInstUsesWith(Outer, NewSI);
+ }
+ return nullptr;
+}
/// foldSelectICmpAnd - If one of the constants is zero (we know they can't
/// both be) and we have an icmp instruction with zero, and we have an 'and'
@@ -663,27 +754,27 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
InstCombiner::BuilderTy *Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy())
- return 0;
+ return nullptr;
if (!match(IC->getOperand(1), m_Zero()))
- return 0;
+ return nullptr;
ConstantInt *AndRHS;
Value *LHS = IC->getOperand(0);
if (!match(LHS, m_And(m_Value(), m_ConstantInt(AndRHS))))
- return 0;
+ return nullptr;
// If both select arms are non-zero see if we have a select of the form
// 'x ? 2^n + C : C'. Then we can offset both arms by C, use the logic
// for 'x ? 2^n : 0' and fix the thing up at the end.
- ConstantInt *Offset = 0;
+ ConstantInt *Offset = nullptr;
if (!TrueVal->isZero() && !FalseVal->isZero()) {
if ((TrueVal->getValue() - FalseVal->getValue()).isPowerOf2())
Offset = FalseVal;
else if ((FalseVal->getValue() - TrueVal->getValue()).isPowerOf2())
Offset = TrueVal;
else
- return 0;
+ return nullptr;
// Adjust TrueVal and FalseVal to the offset.
TrueVal = ConstantInt::get(Builder->getContext(),
@@ -696,7 +787,7 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
if (!AndRHS->getValue().isPowerOf2() ||
(!TrueVal->getValue().isPowerOf2() &&
!FalseVal->getValue().isPowerOf2()))
- return 0;
+ return nullptr;
// Determine which shift is needed to transform result of the 'and' into the
// desired result.
@@ -708,7 +799,7 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
// or a trunc of the 'and'. The trunc case requires that all of the truncated
// bits are zero, we can figure that out by looking at the 'and' mask.
if (AndZeros >= ValC->getBitWidth())
- return 0;
+ return nullptr;
Value *V = Builder->CreateZExtOrTrunc(LHS, SI.getType());
if (ValZeros > AndZeros)
@@ -734,7 +825,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *TrueVal = SI.getTrueValue();
Value *FalseVal = SI.getFalseValue();
- if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, TD))
+ if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, DL))
return ReplaceInstUsesWith(SI, V);
if (SI.getType()->isIntegerTy(1)) {
@@ -866,7 +957,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
if (TI->hasOneUse() && FI->hasOneUse()) {
- Instruction *AddOp = 0, *SubOp = 0;
+ Instruction *AddOp = nullptr, *SubOp = nullptr;
// Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
if (TI->getOpcode() == FI->getOpcode())
@@ -888,7 +979,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
if (AddOp) {
- Value *OtherAddOp = 0;
+ Value *OtherAddOp = nullptr;
if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
OtherAddOp = AddOp->getOperand(1);
} else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
@@ -901,6 +992,11 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *NegVal; // Compute -Z
if (SI.getType()->isFPOrFPVectorTy()) {
NegVal = Builder->CreateFNeg(SubOp->getOperand(1));
+ if (Instruction *NegInst = dyn_cast<Instruction>(NegVal)) {
+ FastMathFlags Flags = AddOp->getFastMathFlags();
+ Flags &= SubOp->getFastMathFlags();
+ NegInst->setFastMathFlags(Flags);
+ }
} else {
NegVal = Builder->CreateNeg(SubOp->getOperand(1));
}
@@ -913,9 +1009,15 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Builder->CreateSelect(CondVal, NewTrueOp,
NewFalseOp, SI.getName() + ".p");
- if (SI.getType()->isFPOrFPVectorTy())
- return BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
- else
+ if (SI.getType()->isFPOrFPVectorTy()) {
+ Instruction *RI =
+ BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
+
+ FastMathFlags Flags = AddOp->getFastMathFlags();
+ Flags &= SubOp->getFastMathFlags();
+ RI->setFastMathFlags(Flags);
+ return RI;
+ } else
return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
}
}
@@ -944,7 +1046,6 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// TODO.
// ABS(-X) -> ABS(X)
- // ABS(ABS(X)) -> ABS(X)
}
// See if we can fold the select into a phi node if the condition is a select.
@@ -958,7 +1059,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
if (TrueSI->getCondition() == CondVal) {
if (SI.getTrueValue() == TrueSI->getTrueValue())
- return 0;
+ return nullptr;
SI.setOperand(1, TrueSI->getTrueValue());
return &SI;
}
@@ -966,7 +1067,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
if (FalseSI->getCondition() == CondVal) {
if (SI.getFalseValue() == FalseSI->getFalseValue())
- return 0;
+ return nullptr;
SI.setOperand(2, FalseSI->getFalseValue());
return &SI;
}
@@ -994,5 +1095,5 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
}
- return 0;
+ return nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 8cf76e5..2f91c20 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -15,10 +15,12 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
@@ -33,7 +35,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (Instruction *R = FoldOpIntoSelect(I, SI))
return R;
- if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
+ if (Constant *CUI = dyn_cast<Constant>(Op1))
if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
return Res;
@@ -50,7 +52,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
return &I;
}
- return 0;
+ return nullptr;
}
/// CanEvaluateShifted - See if we can compute the specified value, but shifted
@@ -78,7 +80,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
// if the needed bits are already zero in the input. This allows us to reuse
// the value which means that we don't care if the shift has multiple uses.
// TODO: Handle opposite shift by exact value.
- ConstantInt *CI = 0;
+ ConstantInt *CI = nullptr;
if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
(!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
if (CI->getZExtValue() == NumBits) {
@@ -115,7 +117,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::Shl: {
// We can often fold the shift into shifts-by-a-constant.
CI = dyn_cast<ConstantInt>(I->getOperand(1));
- if (CI == 0) return false;
+ if (!CI) return false;
// We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
if (isLeftShift) return true;
@@ -139,7 +141,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::LShr: {
// We can often fold the shift into shifts-by-a-constant.
CI = dyn_cast<ConstantInt>(I->getOperand(1));
- if (CI == 0) return false;
+ if (!CI) return false;
// We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
if (!isLeftShift) return true;
@@ -309,37 +311,38 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
-Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
+Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
BinaryOperator &I) {
bool isLeftShift = I.getOpcode() == Instruction::Shl;
+ ConstantInt *COp1 = nullptr;
+ if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(Op1))
+ COp1 = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
+ else if (ConstantVector *CV = dyn_cast<ConstantVector>(Op1))
+ COp1 = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
+ else
+ COp1 = dyn_cast<ConstantInt>(Op1);
+
+ if (!COp1)
+ return nullptr;
// See if we can propagate this shift into the input, this covers the trivial
// cast of lshr(shl(x,c1),c2) as well as other more complex cases.
if (I.getOpcode() != Instruction::AShr &&
- CanEvaluateShifted(Op0, Op1->getZExtValue(), isLeftShift, *this)) {
+ CanEvaluateShifted(Op0, COp1->getZExtValue(), isLeftShift, *this)) {
DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n");
return ReplaceInstUsesWith(I,
- GetShiftedValue(Op0, Op1->getZExtValue(), isLeftShift, *this));
+ GetShiftedValue(Op0, COp1->getZExtValue(), isLeftShift, *this));
}
-
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
- // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
- // a signed shift.
- //
- if (Op1->uge(TypeBits)) {
- if (I.getOpcode() != Instruction::AShr)
- return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
- // ashr i32 X, 32 --> ashr i32 X, 31
- I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
- return &I;
- }
+ assert(!COp1->uge(TypeBits) &&
+ "Shift over the type width should have been removed already");
// ((X*C1) << C2) == (X * (C1 << C2))
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
@@ -367,7 +370,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
isa<ConstantInt>(TrOp->getOperand(1))) {
// Okay, we'll do this xform. Make the shift of shift.
- Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
+ Constant *ShAmt = ConstantExpr::getZExt(COp1, TrOp->getType());
// (shift2 (shift1 & 0x00FF), c2)
Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
@@ -384,10 +387,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// shift. We know that it is a logical shift by a constant, so adjust the
// mask as appropriate.
if (I.getOpcode() == Instruction::Shl)
- MaskV <<= Op1->getZExtValue();
+ MaskV <<= COp1->getZExtValue();
else {
assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
- MaskV = MaskV.lshr(Op1->getZExtValue());
+ MaskV = MaskV.lshr(COp1->getZExtValue());
}
// shift1 & 0x00FF
@@ -421,9 +424,13 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// (X + (Y << C))
Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
Op0BO->getOperand(1)->getName());
- uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
- return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
- APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
+ uint32_t Op1Val = COp1->getLimitedValue(TypeBits);
+
+ APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
+ Constant *Mask = ConstantInt::get(I.getContext(), Bits);
+ if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
+ Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
+ return BinaryOperator::CreateAnd(X, Mask);
}
// Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
@@ -453,9 +460,13 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// (X + (Y << C))
Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
Op0BO->getOperand(0)->getName());
- uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
- return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
- APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
+ uint32_t Op1Val = COp1->getLimitedValue(TypeBits);
+
+ APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
+ Constant *Mask = ConstantInt::get(I.getContext(), Bits);
+ if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
+ Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
+ return BinaryOperator::CreateAnd(X, Mask);
}
// Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
@@ -523,7 +534,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// Find out if this is a shift of a shift by a constant.
BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
if (ShiftOp && !ShiftOp->isShift())
- ShiftOp = 0;
+ ShiftOp = nullptr;
if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
@@ -541,9 +552,9 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
- uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
+ uint32_t ShiftAmt2 = COp1->getLimitedValue(TypeBits);
assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
- if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
+ if (ShiftAmt1 == 0) return nullptr; // Will be simplified in the future.
Value *X = ShiftOp->getOperand(0);
IntegerType *Ty = cast<IntegerType>(I.getType());
@@ -671,13 +682,16 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
}
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitShl(BinaryOperator &I) {
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
- TD))
+ DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *V = commonShiftTransforms(I))
@@ -709,12 +723,15 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
match(I.getOperand(1), m_Constant(C2)))
return BinaryOperator::CreateShl(ConstantExpr::getShl(C1, C2), A);
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1),
- I.isExact(), TD))
+ I.isExact(), DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *R = commonShiftTransforms(I))
@@ -749,12 +766,15 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
+
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1),
- I.isExact(), TD))
+ I.isExact(), DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *R = commonShiftTransforms(I))
@@ -769,11 +789,6 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
// have a sign-extend idiom.
Value *X;
if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
- // If the left shift is just shifting out partial signbits, delete the
- // extension.
- if (cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
- return ReplaceInstUsesWith(I, X);
-
// If the input is an extension from the shifted amount value, e.g.
// %x = zext i8 %A to i32
// %y = shl i32 %x, 24
@@ -800,11 +815,5 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
APInt::getSignBit(I.getType()->getScalarSizeInBits())))
return BinaryOperator::CreateLShr(Op0, Op1);
- // Arithmetic shifting an all-sign-bit value is a no-op.
- unsigned NumSignBits = ComputeNumSignBits(Op0);
- if (NumSignBits == Op0->getType()->getScalarSizeInBits())
- return ReplaceInstUsesWith(I, Op0);
-
- return 0;
+ return nullptr;
}
-
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index c831ddd..1b42d3d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -12,15 +12,16 @@
//
//===----------------------------------------------------------------------===//
-
#include "InstCombine.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
/// ShrinkDemandedConstant - Check to see if the specified operand of the
/// specified instruction is a constant integer. If so, check to see if there
/// are any bits set in the constant that are not demanded. If so, shrink the
@@ -57,7 +58,7 @@ bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
KnownZero, KnownOne, 0);
- if (V == 0) return false;
+ if (!V) return false;
if (V == &Inst) return true;
ReplaceInstUsesWith(Inst, V);
return true;
@@ -71,7 +72,7 @@ bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
unsigned Depth) {
Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
KnownZero, KnownOne, Depth);
- if (NewVal == 0) return false;
+ if (!NewVal) return false;
U = NewVal;
return true;
}
@@ -101,13 +102,13 @@ bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
APInt &KnownZero, APInt &KnownOne,
unsigned Depth) {
- assert(V != 0 && "Null pointer of Value???");
+ assert(V != nullptr && "Null pointer of Value???");
assert(Depth <= 6 && "Limit Search Depth");
uint32_t BitWidth = DemandedMask.getBitWidth();
Type *VTy = V->getType();
- assert((TD || !VTy->isPointerTy()) &&
+ assert((DL || !VTy->isPointerTy()) &&
"SimplifyDemandedBits needs to know bit widths!");
- assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
+ assert((!DL || DL->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
(!VTy->isIntOrIntVectorTy() ||
VTy->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
@@ -118,33 +119,33 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// We know all of the bits for a constant!
KnownOne = CI->getValue() & DemandedMask;
KnownZero = ~KnownOne & DemandedMask;
- return 0;
+ return nullptr;
}
if (isa<ConstantPointerNull>(V)) {
// We know all of the bits for a constant!
KnownOne.clearAllBits();
KnownZero = DemandedMask;
- return 0;
+ return nullptr;
}
KnownZero.clearAllBits();
KnownOne.clearAllBits();
if (DemandedMask == 0) { // Not demanding any bits from V.
if (isa<UndefValue>(V))
- return 0;
+ return nullptr;
return UndefValue::get(VTy);
}
if (Depth == 6) // Limit search depth.
- return 0;
+ return nullptr;
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
- ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
- return 0; // Only analyze instructions.
+ computeKnownBits(V, KnownZero, KnownOne, Depth);
+ return nullptr; // Only analyze instructions.
}
// If there are multiple uses of this value and we aren't at the root, then
@@ -157,8 +158,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// this instruction has a simpler value in that context.
if (I->getOpcode() == Instruction::And) {
// If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and' in this
@@ -179,8 +180,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// only bits from X or Y are demanded.
// If either the LHS or the RHS are One, the result is One.
- ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If all of the demanded bits are known zero on one side, return the
// other. These bits cannot contribute to the result of the 'or' in this
@@ -204,8 +205,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// We can simplify (X^Y) -> X or Y in the user's context if we know that
// only bits from X or Y are demanded.
- ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If all of the demanded bits are known zero on one side, return the
// other.
@@ -216,8 +217,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
// Compute the KnownZero/KnownOne bits to simplify things downstream.
- ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
- return 0;
+ computeKnownBits(I, KnownZero, KnownOne, Depth);
+ return nullptr;
}
// If this is the root being simplified, allow it to have multiple uses,
@@ -229,7 +230,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
switch (I->getOpcode()) {
default:
- ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
+ computeKnownBits(I, KnownZero, KnownOne, Depth);
break;
case Instruction::And:
// If either the LHS or the RHS are Zero, the result is zero.
@@ -409,20 +410,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
case Instruction::BitCast:
if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
- return 0; // vector->int or fp->int?
+ return nullptr; // vector->int or fp->int?
if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
if (VectorType *SrcVTy =
dyn_cast<VectorType>(I->getOperand(0)->getType())) {
if (DstVTy->getNumElements() != SrcVTy->getNumElements())
// Don't touch a bitcast between vectors of different element counts.
- return 0;
+ return nullptr;
} else
// Don't touch a scalar-to-vector bitcast.
- return 0;
+ return nullptr;
} else if (I->getOperand(0)->getType()->isVectorTy())
// Don't touch a vector-to-scalar bitcast.
- return 0;
+ return nullptr;
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
@@ -578,9 +579,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I;
}
- // Otherwise just hand the sub off to ComputeMaskedBits to fill in
+ // Otherwise just hand the sub off to computeKnownBits to fill in
// the known zeros and ones.
- ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
+ computeKnownBits(V, KnownZero, KnownOne, Depth);
// Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
// zero.
@@ -751,7 +752,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// remainder is zero.
if (DemandedMask.isNegative() && KnownZero.isNonNegative()) {
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
+ computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If it's known zero, our sign bit is also zero.
if (LHSKnownZero.isNegative())
KnownZero.setBit(KnownZero.getBitWidth() - 1);
@@ -810,10 +811,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
case Intrinsic::x86_sse42_crc32_64_64:
KnownZero = APInt::getHighBitsSet(64, 32);
- return 0;
+ return nullptr;
}
}
- ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
+ computeKnownBits(V, KnownZero, KnownOne, Depth);
break;
}
@@ -821,7 +822,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// constant.
if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
return Constant::getIntegerValue(VTy, KnownOne);
- return 0;
+ return nullptr;
}
/// Helper routine of SimplifyDemandedUseBits. It tries to simplify
@@ -847,13 +848,13 @@ Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
const APInt &ShlOp1 = cast<ConstantInt>(Shl->getOperand(1))->getValue();
const APInt &ShrOp1 = cast<ConstantInt>(Shr->getOperand(1))->getValue();
if (!ShlOp1 || !ShrOp1)
- return 0; // Noop.
+ return nullptr; // Noop.
Value *VarX = Shr->getOperand(0);
Type *Ty = VarX->getType();
unsigned BitWidth = Ty->getIntegerBitWidth();
if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
- return 0; // Undef.
+ return nullptr; // Undef.
unsigned ShlAmt = ShlOp1.getZExtValue();
unsigned ShrAmt = ShrOp1.getZExtValue();
@@ -882,7 +883,7 @@ Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
return VarX;
if (!Shr->hasOneUse())
- return 0;
+ return nullptr;
BinaryOperator *New;
if (ShrAmt < ShlAmt) {
@@ -902,7 +903,7 @@ Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
return InsertNewInstWith(New, *Shl);
}
- return 0;
+ return nullptr;
}
/// SimplifyDemandedVectorElts - The specified value produces a vector with
@@ -923,7 +924,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (isa<UndefValue>(V)) {
// If the entire vector is undefined, just return this info.
UndefElts = EltMask;
- return 0;
+ return nullptr;
}
if (DemandedElts == 0) { // If nothing is demanded, provide undef.
@@ -938,7 +939,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// Check if this is identity. If so, return 0 since we are not simplifying
// anything.
if (DemandedElts.isAllOnesValue())
- return 0;
+ return nullptr;
Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
@@ -952,7 +953,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
Constant *Elt = C->getAggregateElement(i);
- if (Elt == 0) return 0;
+ if (!Elt) return nullptr;
if (isa<UndefValue>(Elt)) { // Already undef.
Elts.push_back(Undef);
@@ -964,12 +965,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// If we changed the constant, return it.
Constant *NewCV = ConstantVector::get(Elts);
- return NewCV != C ? NewCV : 0;
+ return NewCV != C ? NewCV : nullptr;
}
// Limit search depth.
if (Depth == 10)
- return 0;
+ return nullptr;
// If multiple users are using the root value, proceed with
// simplification conservatively assuming that all elements
@@ -980,14 +981,14 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// the main instcombine process.
if (Depth != 0)
// TODO: Just compute the UndefElts information recursively.
- return 0;
+ return nullptr;
// Conservatively assume that all elements are needed.
DemandedElts = EltMask;
}
Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return 0; // Only analyze instructions.
+ if (!I) return nullptr; // Only analyze instructions.
bool MadeChange = false;
APInt UndefElts2(VWidth, 0);
@@ -999,7 +1000,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// If this is a variable index, we don't know which element it overwrites.
// demand exactly the same input as we produce.
ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
- if (Idx == 0) {
+ if (!Idx) {
// Note that we can't propagate undef elt info, because we don't know
// which elt is getting updated.
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
@@ -1281,5 +1282,5 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
break;
}
}
- return MadeChange ? I : 0;
+ return MadeChange ? I : nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 30290ee..cb16584 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -13,10 +13,12 @@
//===----------------------------------------------------------------------===//
#include "InstCombine.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
/// CheapToScalarize - Return true if the value is cheaper to scalarize than it
/// is to leave as a vector operation. isConstant indicates whether we're
/// extracting one known element. If false we're extracting a variable index.
@@ -73,7 +75,7 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
if (!isa<ConstantInt>(III->getOperand(2)))
- return 0;
+ return nullptr;
unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
// If this is an insert to the element we are looking for, return the
@@ -97,14 +99,14 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
}
// Extract a value from a vector add operation with a constant zero.
- Value *Val = 0; Constant *Con = 0;
+ Value *Val = nullptr; Constant *Con = nullptr;
if (match(V, m_Add(m_Value(Val), m_Constant(Con)))) {
if (Con->getAggregateElement(EltNo)->isNullValue())
return FindScalarElement(Val, EltNo);
}
// Otherwise, we don't know.
- return 0;
+ return nullptr;
}
// If we have a PHI node with a vector type that has only 2 uses: feed
@@ -113,12 +115,12 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// Verify that the PHI node has exactly 2 uses. Otherwise return NULL.
if (!PN->hasNUses(2))
- return NULL;
+ return nullptr;
// If so, it's known at this point that one operand is PHI and the other is
// an extractelement node. Find the PHI user that is not the extractelement
// node.
- Value::use_iterator iu = PN->use_begin();
+ auto iu = PN->user_begin();
Instruction *PHIUser = dyn_cast<Instruction>(*iu);
if (PHIUser == cast<Instruction>(&EI))
PHIUser = cast<Instruction>(*(++iu));
@@ -126,9 +128,9 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// Verify that this PHI user has one use, which is the PHI itself,
// and that it is a binary operation which is cheap to scalarize.
// otherwise return NULL.
- if (!PHIUser->hasOneUse() || !(PHIUser->use_back() == PN) ||
+ if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
!(isa<BinaryOperator>(PHIUser)) || !CheapToScalarize(PHIUser, true))
- return NULL;
+ return nullptr;
// Create a scalar PHI node that will replace the vector PHI node
// just before the current PHI node.
@@ -142,7 +144,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// If the operand is the PHI induction variable:
if (PHIInVal == PHIUser) {
// Scalarize the binary operation. Its first operand is the
- // scalar PHI and the second operand is extracted from the other
+ // scalar PHI, and the second operand is extracted from the other
// vector operand.
BinaryOperator *B0 = cast<BinaryOperator>(PHIUser);
unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;
@@ -318,7 +320,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
}
}
}
- return 0;
+ return nullptr;
}
/// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
@@ -326,7 +328,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
/// Otherwise, return false.
static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
SmallVectorImpl<Constant*> &Mask) {
- assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
+ assert(LHS->getType() == RHS->getType() &&
"Invalid CollectSingleShuffleElements");
unsigned NumElts = V->getType()->getVectorNumElements();
@@ -359,7 +361,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
- // Okay, we can handle this if the vector we are insertinting into is
+ // We can handle this if the vector we are inserting into is
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted undef.
@@ -367,14 +369,14 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
return true;
}
} else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
- if (isa<ConstantInt>(EI->getOperand(1)) &&
- EI->getOperand(0)->getType() == V->getType()) {
+ if (isa<ConstantInt>(EI->getOperand(1))) {
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
+ unsigned NumLHSElts = LHS->getType()->getVectorNumElements();
// This must be extracting from either LHS or RHS.
if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
- // Okay, we can handle this if the vector we are insertinting into is
+ // We can handle this if the vector we are inserting into is
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted value.
@@ -386,7 +388,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
assert(EI->getOperand(0) == RHS);
Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
- ExtractedIdx+NumElts);
+ ExtractedIdx + NumLHSElts);
}
return true;
}
@@ -394,29 +396,36 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
}
}
}
- // TODO: Handle shufflevector here!
return false;
}
-/// CollectShuffleElements - We are building a shuffle of V, using RHS as the
-/// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
-/// that computes V and the LHS value of the shuffle.
-static Value *CollectShuffleElements(Value *V, SmallVectorImpl<Constant*> &Mask,
- Value *&RHS) {
- assert(V->getType()->isVectorTy() &&
- (RHS == 0 || V->getType() == RHS->getType()) &&
- "Invalid shuffle!");
+
+/// We are building a shuffle to create V, which is a sequence of insertelement,
+/// extractelement pairs. If PermittedRHS is set, then we must either use it or
+/// not rely on the second vector source. Return a std::pair containing the
+/// left and right vectors of the proposed shuffle (or 0), and set the Mask
+/// parameter as required.
+///
+/// Note: we intentionally don't try to fold earlier shuffles since they have
+/// often been chosen carefully to be efficiently implementable on the target.
+typedef std::pair<Value *, Value *> ShuffleOps;
+
+static ShuffleOps CollectShuffleElements(Value *V,
+ SmallVectorImpl<Constant *> &Mask,
+ Value *PermittedRHS) {
+ assert(V->getType()->isVectorTy() && "Invalid shuffle!");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
- return V;
+ return std::make_pair(
+ PermittedRHS ? UndefValue::get(PermittedRHS->getType()) : V, nullptr);
}
if (isa<ConstantAggregateZero>(V)) {
Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(V->getContext()),0));
- return V;
+ return std::make_pair(V, nullptr);
}
if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
@@ -426,51 +435,94 @@ static Value *CollectShuffleElements(Value *V, SmallVectorImpl<Constant*> &Mask,
Value *IdxOp = IEI->getOperand(2);
if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
- if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
- EI->getOperand(0)->getType() == V->getType()) {
+ if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
// Either the extracted from or inserted into vector must be RHSVec,
// otherwise we'd end up with a shuffle of three inputs.
- if (EI->getOperand(0) == RHS || RHS == 0) {
- RHS = EI->getOperand(0);
- Value *V = CollectShuffleElements(VecOp, Mask, RHS);
+ if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {
+ Value *RHS = EI->getOperand(0);
+ ShuffleOps LR = CollectShuffleElements(VecOp, Mask, RHS);
+ assert(LR.second == nullptr || LR.second == RHS);
+
+ if (LR.first->getType() != RHS->getType()) {
+ // We tried our best, but we can't find anything compatible with RHS
+ // further up the chain. Return a trivial shuffle.
+ for (unsigned i = 0; i < NumElts; ++i)
+ Mask[i] = ConstantInt::get(Type::getInt32Ty(V->getContext()), i);
+ return std::make_pair(V, nullptr);
+ }
+
+ unsigned NumLHSElts = RHS->getType()->getVectorNumElements();
Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
- NumElts+ExtractedIdx);
- return V;
+ NumLHSElts+ExtractedIdx);
+ return std::make_pair(LR.first, RHS);
}
- if (VecOp == RHS) {
- Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS);
- // Update Mask to reflect that `ScalarOp' has been inserted at
- // position `InsertedIdx' within the vector returned by IEI.
- Mask[InsertedIdx % NumElts] = Mask[ExtractedIdx];
-
- // Everything but the extracted element is replaced with the RHS.
- for (unsigned i = 0; i != NumElts; ++i) {
- if (i != InsertedIdx)
- Mask[i] = ConstantInt::get(Type::getInt32Ty(V->getContext()),
- NumElts+i);
- }
- return V;
+ if (VecOp == PermittedRHS) {
+ // We've gone as far as we can: anything on the other side of the
+ // extractelement will already have been converted into a shuffle.
+ unsigned NumLHSElts =
+ EI->getOperand(0)->getType()->getVectorNumElements();
+ for (unsigned i = 0; i != NumElts; ++i)
+ Mask.push_back(ConstantInt::get(
+ Type::getInt32Ty(V->getContext()),
+ i == InsertedIdx ? ExtractedIdx : NumLHSElts + i));
+ return std::make_pair(EI->getOperand(0), PermittedRHS);
}
// If this insertelement is a chain that comes from exactly these two
// vectors, return the vector and the effective shuffle.
- if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask))
- return EI->getOperand(0);
+ if (EI->getOperand(0)->getType() == PermittedRHS->getType() &&
+ CollectSingleShuffleElements(IEI, EI->getOperand(0), PermittedRHS,
+ Mask))
+ return std::make_pair(EI->getOperand(0), PermittedRHS);
}
}
}
- // TODO: Handle shufflevector here!
// Otherwise, can't do anything fancy. Return an identity vector.
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
- return V;
+ return std::make_pair(V, nullptr);
+}
+
+/// Try to find redundant insertvalue instructions, like the following ones:
+/// %0 = insertvalue { i8, i32 } undef, i8 %x, 0
+/// %1 = insertvalue { i8, i32 } %0, i8 %y, 0
+/// Here the second instruction inserts values at the same indices, as the
+/// first one, making the first one redundant.
+/// It should be transformed to:
+/// %0 = insertvalue { i8, i32 } undef, i8 %y, 0
+Instruction *InstCombiner::visitInsertValueInst(InsertValueInst &I) {
+ bool IsRedundant = false;
+ ArrayRef<unsigned int> FirstIndices = I.getIndices();
+
+ // If there is a chain of insertvalue instructions (each of them except the
+ // last one has only one use and it's another insertvalue insn from this
+ // chain), check if any of the 'children' uses the same indices as the first
+ // instruction. In this case, the first one is redundant.
+ Value *V = &I;
+ unsigned Depth = 0;
+ while (V->hasOneUse() && Depth < 10) {
+ User *U = V->user_back();
+ auto UserInsInst = dyn_cast<InsertValueInst>(U);
+ if (!UserInsInst || U->getOperand(0) != V)
+ break;
+ if (UserInsInst->getIndices() == FirstIndices) {
+ IsRedundant = true;
+ break;
+ }
+ V = UserInsInst;
+ Depth++;
+ }
+
+ if (IsRedundant)
+ return ReplaceInstUsesWith(I, I.getOperand(0));
+ return nullptr;
}
Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
@@ -485,17 +537,18 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
// If the inserted element was extracted from some other vector, and if the
// indexes are constant, try to turn this into a shufflevector operation.
if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
- if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
- EI->getOperand(0)->getType() == IE.getType()) {
- unsigned NumVectorElts = IE.getType()->getNumElements();
+ if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
+ unsigned NumInsertVectorElts = IE.getType()->getNumElements();
+ unsigned NumExtractVectorElts =
+ EI->getOperand(0)->getType()->getVectorNumElements();
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
- if (ExtractedIdx >= NumVectorElts) // Out of range extract.
+ if (ExtractedIdx >= NumExtractVectorElts) // Out of range extract.
return ReplaceInstUsesWith(IE, VecOp);
- if (InsertedIdx >= NumVectorElts) // Out of range insert.
+ if (InsertedIdx >= NumInsertVectorElts) // Out of range insert.
return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
// If we are extracting a value from a vector, then inserting it right
@@ -505,13 +558,19 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
// If this insertelement isn't used by some other insertelement, turn it
// (and any insertelements it points to), into one big shuffle.
- if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
+ if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.user_back())) {
SmallVector<Constant*, 16> Mask;
- Value *RHS = 0;
- Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
- if (RHS == 0) RHS = UndefValue::get(LHS->getType());
- // We now have a shuffle of LHS, RHS, Mask.
- return new ShuffleVectorInst(LHS, RHS, ConstantVector::get(Mask));
+ ShuffleOps LR = CollectShuffleElements(&IE, Mask, nullptr);
+
+ // The proposed shuffle may be trivial, in which case we shouldn't
+ // perform the combine.
+ if (LR.first != &IE && LR.second != &IE) {
+ // We now have a shuffle of LHS, RHS, Mask.
+ if (LR.second == nullptr)
+ LR.second = UndefValue::get(LR.first->getType());
+ return new ShuffleVectorInst(LR.first, LR.second,
+ ConstantVector::get(Mask));
+ }
}
}
}
@@ -525,7 +584,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
return &IE;
}
- return 0;
+ return nullptr;
}
/// Return true if we can evaluate the specified expression tree if the vector
@@ -640,6 +699,8 @@ static Value *BuildNew(Instruction *I, ArrayRef<Value*> NewOps) {
if (isa<PossiblyExactOperator>(BO)) {
New->setIsExact(BO->isExact());
}
+ if (isa<FPMathOperator>(BO))
+ New->copyFastMathFlags(I);
return New;
}
case Instruction::ICmp:
@@ -765,9 +826,10 @@ InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
}
}
+ // If element is not in Mask, no need to handle the operand 1 (element to
+ // be inserted). Just evaluate values in operand 0 according to Mask.
if (!Found)
- return UndefValue::get(
- VectorType::get(V->getType()->getScalarType(), Mask.size()));
+ return EvaluateInDifferentElementOrder(I->getOperand(0), Mask);
Value *V = EvaluateInDifferentElementOrder(I->getOperand(0), Mask);
return InsertElementInst::Create(V, I->getOperand(1),
@@ -777,6 +839,20 @@ InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
llvm_unreachable("failed to reorder elements of vector instruction!");
}
+static void RecognizeIdentityMask(const SmallVectorImpl<int> &Mask,
+ bool &isLHSID, bool &isRHSID) {
+ isLHSID = isRHSID = true;
+
+ for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
+ if (Mask[i] < 0) continue; // Ignore undef values.
+ // Is this an identity shuffle of the LHS value?
+ isLHSID &= (Mask[i] == (int)i);
+
+ // Is this an identity shuffle of the RHS value?
+ isRHSID &= (Mask[i]-e == i);
+ }
+}
+
Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Value *LHS = SVI.getOperand(0);
Value *RHS = SVI.getOperand(1);
@@ -840,16 +916,8 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
if (VWidth == LHSWidth) {
// Analyze the shuffle, are the LHS or RHS and identity shuffles?
- bool isLHSID = true, isRHSID = true;
-
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] < 0) continue; // Ignore undef values.
- // Is this an identity shuffle of the LHS value?
- isLHSID &= (Mask[i] == (int)i);
-
- // Is this an identity shuffle of the RHS value?
- isRHSID &= (Mask[i]-e == i);
- }
+ bool isLHSID, isRHSID;
+ RecognizeIdentityMask(Mask, isLHSID, isRHSID);
// Eliminate identity shuffles.
if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
@@ -908,16 +976,16 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
if (LHSShuffle)
if (!isa<UndefValue>(LHSShuffle->getOperand(1)) && !isa<UndefValue>(RHS))
- LHSShuffle = NULL;
+ LHSShuffle = nullptr;
if (RHSShuffle)
if (!isa<UndefValue>(RHSShuffle->getOperand(1)))
- RHSShuffle = NULL;
+ RHSShuffle = nullptr;
if (!LHSShuffle && !RHSShuffle)
- return MadeChange ? &SVI : 0;
+ return MadeChange ? &SVI : nullptr;
- Value* LHSOp0 = NULL;
- Value* LHSOp1 = NULL;
- Value* RHSOp0 = NULL;
+ Value* LHSOp0 = nullptr;
+ Value* LHSOp1 = nullptr;
+ Value* RHSOp0 = nullptr;
unsigned LHSOp0Width = 0;
unsigned RHSOp0Width = 0;
if (LHSShuffle) {
@@ -949,11 +1017,11 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// case 4
if (LHSOp0 == RHSOp0) {
newLHS = LHSOp0;
- newRHS = NULL;
+ newRHS = nullptr;
}
if (newLHS == LHS && newRHS == RHS)
- return MadeChange ? &SVI : 0;
+ return MadeChange ? &SVI : nullptr;
SmallVector<int, 16> LHSMask;
SmallVector<int, 16> RHSMask;
@@ -1012,8 +1080,8 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
// If newRHS == newLHS, we want to remap any references from newRHS to
// newLHS so that we can properly identify splats that may occur due to
- // obfuscation accross the two vectors.
- if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS)
+ // obfuscation across the two vectors.
+ if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)
eltMask += newLHSWidth;
}
@@ -1039,10 +1107,17 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Elts.push_back(ConstantInt::get(Int32Ty, newMask[i]));
}
}
- if (newRHS == NULL)
+ if (!newRHS)
newRHS = UndefValue::get(newLHS->getType());
return new ShuffleVectorInst(newLHS, newRHS, ConstantVector::get(Elts));
}
- return MadeChange ? &SVI : 0;
+ // If the result mask is an identity, replace uses of this instruction with
+ // corresponding argument.
+ bool isLHSID, isRHSID;
+ RecognizeIdentityMask(newMask, isLHSID, isRHSID);
+ if (isLHSID && VWidth == LHSOp0Width) return ReplaceInstUsesWith(SVI, newLHS);
+ if (isRHSID && VWidth == RHSOp0Width) return ReplaceInstUsesWith(SVI, newRHS);
+
+ return MadeChange ? &SVI : nullptr;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
index f84db27..1ab7db3 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
@@ -10,7 +10,6 @@
#ifndef INSTCOMBINE_WORKLIST_H
#define INSTCOMBINE_WORKLIST_H
-#define DEBUG_TYPE "instcombine"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Instruction.h"
@@ -18,6 +17,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#define DEBUG_TYPE "instcombine"
+
namespace llvm {
/// InstCombineWorklist - This is the worklist management logic for
@@ -68,7 +69,7 @@ public:
if (It == WorklistMap.end()) return; // Not in worklist.
// Don't bother moving everything down, just null out the slot.
- Worklist[It->second] = 0;
+ Worklist[It->second] = nullptr;
WorklistMap.erase(It);
}
@@ -84,9 +85,8 @@ public:
/// now.
///
void AddUsersToWorkList(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
- UI != UE; ++UI)
- Add(cast<Instruction>(*UI));
+ for (User *U : I.users())
+ Add(cast<Instruction>(U));
}
@@ -102,4 +102,6 @@ public:
} // end namespace llvm.
+#undef DEBUG_TYPE
+
#endif
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 191a101..d3648e2 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -33,7 +33,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "instcombine"
#include "llvm/Transforms/Scalar.h"
#include "InstCombine.h"
#include "llvm-c/Initialization.h"
@@ -43,14 +42,15 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
@@ -58,6 +58,8 @@
using namespace llvm;
using namespace llvm::PatternMatch;
+#define DEBUG_TYPE "instcombine"
+
STATISTIC(NumCombined , "Number of insts combined");
STATISTIC(NumConstProp, "Number of constant folds");
STATISTIC(NumDeadInst , "Number of dead inst eliminated");
@@ -103,13 +105,13 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
assert(From->isIntegerTy() && To->isIntegerTy());
- // If we don't have TD, we don't know if the source/dest are legal.
- if (!TD) return false;
+ // If we don't have DL, we don't know if the source/dest are legal.
+ if (!DL) return false;
unsigned FromWidth = From->getPrimitiveSizeInBits();
unsigned ToWidth = To->getPrimitiveSizeInBits();
- bool FromLegal = TD->isLegalInteger(FromWidth);
- bool ToLegal = TD->isLegalInteger(ToWidth);
+ bool FromLegal = DL->isLegalInteger(FromWidth);
+ bool ToLegal = DL->isLegalInteger(ToWidth);
// If this is a legal integer from type, and the result would be an illegal
// type, don't do the transformation.
@@ -221,7 +223,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = I.getOperand(1);
// Does "B op C" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
// It simplifies to V. Form "A op V".
I.setOperand(0, A);
I.setOperand(1, V);
@@ -250,7 +252,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = Op1->getOperand(1);
// Does "A op B" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
// It simplifies to V. Form "V op C".
I.setOperand(0, V);
I.setOperand(1, C);
@@ -272,7 +274,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = I.getOperand(1);
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
// It simplifies to V. Form "V op B".
I.setOperand(0, V);
I.setOperand(1, B);
@@ -292,7 +294,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = Op1->getOperand(1);
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
// It simplifies to V. Form "B op V".
I.setOperand(0, B);
I.setOperand(1, V);
@@ -319,6 +321,12 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
+ if (isa<FPMathOperator>(New)) {
+ FastMathFlags Flags = I.getFastMathFlags();
+ Flags &= Op0->getFastMathFlags();
+ Flags &= Op1->getFastMathFlags();
+ New->setFastMathFlags(Flags);
+ }
InsertNewInstWith(New, I);
New->takeName(Op1);
I.setOperand(0, New);
@@ -388,6 +396,127 @@ static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
return false;
}
+/// This function returns identity value for given opcode, which can be used to
+/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
+static Value *getIdentityValue(Instruction::BinaryOps OpCode, Value *V) {
+ if (isa<Constant>(V))
+ return nullptr;
+
+ if (OpCode == Instruction::Mul)
+ return ConstantInt::get(V->getType(), 1);
+
+ // TODO: We can handle other cases e.g. Instruction::And, Instruction::Or etc.
+
+ return nullptr;
+}
+
+/// This function factors binary ops which can be combined using distributive
+/// laws. This also factor SHL as MUL e.g. SHL(X, 2) ==> MUL(X, 4).
+static Instruction::BinaryOps
+getBinOpsForFactorization(BinaryOperator *Op, Value *&LHS, Value *&RHS) {
+ if (!Op)
+ return Instruction::BinaryOpsEnd;
+
+ if (Op->getOpcode() == Instruction::Shl) {
+ if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
+ // The multiplier is really 1 << CST.
+ RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
+ LHS = Op->getOperand(0);
+ return Instruction::Mul;
+ }
+ }
+
+ // TODO: We can add other conversions e.g. shr => div etc.
+
+ LHS = Op->getOperand(0);
+ RHS = Op->getOperand(1);
+ return Op->getOpcode();
+}
+
+/// This tries to simplify binary operations by factorizing out common terms
+/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
+static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
+ const DataLayout *DL, BinaryOperator &I,
+ Instruction::BinaryOps InnerOpcode, Value *A,
+ Value *B, Value *C, Value *D) {
+
+ // If any of A, B, C, D are null, we can not factor I, return early.
+ // Checking A and C should be enough.
+ if (!A || !C || !B || !D)
+ return nullptr;
+
+ Value *SimplifiedInst = nullptr;
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
+
+ // Does "X op' Y" always equal "Y op' X"?
+ bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
+
+ // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
+ if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
+ // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
+ // commutative case, "(A op' B) op (C op' A)"?
+ if (A == C || (InnerCommutative && A == D)) {
+ if (A != C)
+ std::swap(C, D);
+ // Consider forming "A op' (B op D)".
+ // If "B op D" simplifies then it can be formed with no cost.
+ Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
+ // If "B op D" doesn't simplify then only go on if both of the existing
+ // operations "A op' B" and "C op' D" will be zapped as no longer used.
+ if (!V && LHS->hasOneUse() && RHS->hasOneUse())
+ V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
+ if (V) {
+ SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V);
+ }
+ }
+
+ // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
+ if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
+ // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
+ // commutative case, "(A op' B) op (B op' D)"?
+ if (B == D || (InnerCommutative && B == C)) {
+ if (B != D)
+ std::swap(C, D);
+ // Consider forming "(A op C) op' B".
+ // If "A op C" simplifies then it can be formed with no cost.
+ Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
+
+ // If "A op C" doesn't simplify then only go on if both of the existing
+ // operations "A op' B" and "C op' D" will be zapped as no longer used.
+ if (!V && LHS->hasOneUse() && RHS->hasOneUse())
+ V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
+ if (V) {
+ SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B);
+ }
+ }
+
+ if (SimplifiedInst) {
+ ++NumFactor;
+ SimplifiedInst->takeName(&I);
+
+ // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
+ // TODO: Check for NUW.
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
+ if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
+ bool HasNSW = false;
+ if (isa<OverflowingBinaryOperator>(&I))
+ HasNSW = I.hasNoSignedWrap();
+
+ if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
+ if (isa<OverflowingBinaryOperator>(Op0))
+ HasNSW &= Op0->hasNoSignedWrap();
+
+ if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
+ if (isa<OverflowingBinaryOperator>(Op1))
+ HasNSW &= Op1->hasNoSignedWrap();
+ BO->setHasNoSignedWrap(HasNSW);
+ }
+ }
+ }
+ return SimplifiedInst;
+}
+
/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
/// which some other binary operation distributes over either by factorizing
/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
@@ -397,65 +526,33 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
// Factorization.
- if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
- // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
- // a common term.
- Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
- Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
- Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
+ Instruction::BinaryOps LHSOpcode = getBinOpsForFactorization(Op0, A, B);
+ Instruction::BinaryOps RHSOpcode = getBinOpsForFactorization(Op1, C, D);
+
+ // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
+ // a common term.
+ if (LHSOpcode == RHSOpcode) {
+ if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, C, D))
+ return V;
+ }
- // Does "X op' Y" always equal "Y op' X"?
- bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
-
- // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
- if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
- // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
- // commutative case, "(A op' B) op (C op' A)"?
- if (A == C || (InnerCommutative && A == D)) {
- if (A != C)
- std::swap(C, D);
- // Consider forming "A op' (B op D)".
- // If "B op D" simplifies then it can be formed with no cost.
- Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
- // If "B op D" doesn't simplify then only go on if both of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && Op0->hasOneUse() && Op1->hasOneUse())
- V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
- if (V) {
- ++NumFactor;
- V = Builder->CreateBinOp(InnerOpcode, A, V);
- V->takeName(&I);
- return V;
- }
- }
+ // The instruction has the form "(A op' B) op (C)". Try to factorize common
+ // term.
+ if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, RHS,
+ getIdentityValue(LHSOpcode, RHS)))
+ return V;
- // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
- if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
- // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
- // commutative case, "(A op' B) op (B op' D)"?
- if (B == D || (InnerCommutative && B == C)) {
- if (B != D)
- std::swap(C, D);
- // Consider forming "(A op C) op' B".
- // If "A op C" simplifies then it can be formed with no cost.
- Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
- // If "A op C" doesn't simplify then only go on if both of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && Op0->hasOneUse() && Op1->hasOneUse())
- V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
- if (V) {
- ++NumFactor;
- V = Builder->CreateBinOp(InnerOpcode, V, B);
- V->takeName(&I);
- return V;
- }
- }
- }
+ // The instruction has the form "(B) op (C op' D)". Try to factorize common
+ // term.
+ if (Value *V = tryFactorization(Builder, DL, I, RHSOpcode, LHS,
+ getIdentityValue(RHSOpcode, LHS), C, D))
+ return V;
// Expansion.
+ Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
// The instruction has the form "(A op' B) op C". See if expanding it out
// to "(A op C) op' (B op C)" results in simplifications.
@@ -463,8 +560,8 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
// Do "A op C" and "B op C" both simplify?
- if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
- if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
// They do! Return "L op' R".
++NumExpand;
// If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
@@ -472,7 +569,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
(Instruction::isCommutative(InnerOpcode) && L == B && R == A))
return Op0;
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
return V;
// Otherwise, create a new instruction.
C = Builder->CreateBinOp(InnerOpcode, L, R);
@@ -488,8 +585,8 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
// Do "A op B" and "A op C" both simplify?
- if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
- if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
// They do! Return "L op' R".
++NumExpand;
// If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
@@ -497,7 +594,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
(Instruction::isCommutative(InnerOpcode) && L == C && R == B))
return Op1;
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
return V;
// Otherwise, create a new instruction.
A = Builder->CreateBinOp(InnerOpcode, L, R);
@@ -506,7 +603,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
@@ -524,7 +621,7 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const {
if (C->getType()->getElementType()->isIntegerTy())
return ConstantExpr::getNeg(C);
- return 0;
+ return nullptr;
}
// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
@@ -543,7 +640,7 @@ Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
if (C->getType()->getElementType()->isFloatingPointTy())
return ConstantExpr::getFNeg(C);
- return 0;
+ return nullptr;
}
static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
@@ -566,9 +663,14 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
if (!ConstIsRHS)
std::swap(Op0, Op1);
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
- return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) {
+ Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
SO->getName()+".op");
+ Instruction *FPInst = dyn_cast<Instruction>(RI);
+ if (FPInst && isa<FPMathOperator>(FPInst))
+ FPInst->copyFastMathFlags(BO);
+ return RI;
+ }
if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
SO->getName()+".cmp");
@@ -584,13 +686,13 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
// not have a second operand.
Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
// Don't modify shared select instructions
- if (!SI->hasOneUse()) return 0;
+ if (!SI->hasOneUse()) return nullptr;
Value *TV = SI->getOperand(1);
Value *FV = SI->getOperand(2);
if (isa<Constant>(TV) || isa<Constant>(FV)) {
// Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isIntegerTy(1)) return 0;
+ if (SI->getType()->isIntegerTy(1)) return nullptr;
// If it's a bitcast involving vectors, make sure it has the same number of
// elements on both sides.
@@ -599,10 +701,10 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
// Verify that either both or neither are vectors.
- if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
+ if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr;
// If vectors, verify that they have the same number of elements.
if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
- return 0;
+ return nullptr;
}
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
@@ -611,7 +713,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
return SelectInst::Create(SI->getCondition(),
SelectTrueVal, SelectFalseVal);
}
- return 0;
+ return nullptr;
}
@@ -623,18 +725,17 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
PHINode *PN = cast<PHINode>(I.getOperand(0));
unsigned NumPHIValues = PN->getNumIncomingValues();
if (NumPHIValues == 0)
- return 0;
+ return nullptr;
// We normally only transform phis with a single use. However, if a PHI has
// multiple uses and they are all the same operation, we can fold *all* of the
// uses into the PHI.
if (!PN->hasOneUse()) {
// Walk the use list for the instruction, comparing them to I.
- for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (User != &I && !I.isIdenticalTo(User))
- return 0;
+ for (User *U : PN->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (UI != &I && !I.isIdenticalTo(UI))
+ return nullptr;
}
// Otherwise, we can replace *all* users with the new PHI we form.
}
@@ -644,14 +745,14 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
// remember the BB it is in. If there is more than one or if *it* is a PHI,
// bail out. We don't do arbitrary constant expressions here because moving
// their computation can be expensive without a cost model.
- BasicBlock *NonConstBB = 0;
+ BasicBlock *NonConstBB = nullptr;
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InVal = PN->getIncomingValue(i);
if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
continue;
- if (isa<PHINode>(InVal)) return 0; // Itself a phi.
- if (NonConstBB) return 0; // More than one non-const value.
+ if (isa<PHINode>(InVal)) return nullptr; // Itself a phi.
+ if (NonConstBB) return nullptr; // More than one non-const value.
NonConstBB = PN->getIncomingBlock(i);
@@ -659,22 +760,22 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
// insert a computation after it without breaking the edge.
if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
if (II->getParent() == NonConstBB)
- return 0;
+ return nullptr;
// If the incoming non-constant value is in I's block, we will remove one
// instruction, but insert another equivalent one, leading to infinite
// instcombine.
if (NonConstBB == I.getParent())
- return 0;
+ return nullptr;
}
// If there is exactly one non-constant value, we can insert a copy of the
// operation in that block. However, if this is a critical edge, we would be
// inserting the computation one some other paths (e.g. inside a loop). Only
// do this if the pred block is unconditionally branching into the phi block.
- if (NonConstBB != 0) {
+ if (NonConstBB != nullptr) {
BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
- if (!BI || !BI->isUnconditional()) return 0;
+ if (!BI || !BI->isUnconditional()) return nullptr;
}
// Okay, we can do the transformation: create the new PHI node.
@@ -698,7 +799,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
BasicBlock *ThisBB = PN->getIncomingBlock(i);
Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
- Value *InV = 0;
+ Value *InV = nullptr;
// Beware of ConstantExpr: it may eventually evaluate to getNullValue,
// even if currently isNullValue gives false.
Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
@@ -712,7 +813,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
} else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
Constant *C = cast<Constant>(I.getOperand(1));
for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = 0;
+ Value *InV = nullptr;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
else if (isa<ICmpInst>(CI))
@@ -726,7 +827,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
} else if (I.getNumOperands() == 2) {
Constant *C = cast<Constant>(I.getOperand(1));
for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = 0;
+ Value *InV = nullptr;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::get(I.getOpcode(), InC, C);
else
@@ -748,8 +849,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
}
}
- for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
- UI != E; ) {
+ for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
if (User == &I) continue;
ReplaceInstUsesWith(*User, NewPN);
@@ -766,19 +866,19 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices) {
assert(PtrTy->isPtrOrPtrVectorTy());
- if (!TD)
- return 0;
+ if (!DL)
+ return nullptr;
Type *Ty = PtrTy->getPointerElementType();
if (!Ty->isSized())
- return 0;
+ return nullptr;
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- Type *IntPtrTy = TD->getIntPtrType(PtrTy);
+ Type *IntPtrTy = DL->getIntPtrType(PtrTy);
int64_t FirstIdx = 0;
- if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
+ if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
Offset -= FirstIdx*TySize;
@@ -796,11 +896,11 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
// Index into the types. If we fail, set OrigBase to null.
while (Offset) {
// Indexing into tail padding between struct/array elements.
- if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
- return 0;
+ if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
+ return nullptr;
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TD->getStructLayout(STy);
+ const StructLayout *SL = DL->getStructLayout(STy);
assert(Offset < (int64_t)SL->getSizeInBytes() &&
"Offset must stay within the indexed type");
@@ -811,14 +911,14 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
+ uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
Offset %= EltSize;
Ty = AT->getElementType();
} else {
// Otherwise, we can't index into the middle of this atomic type, bail.
- return 0;
+ return nullptr;
}
}
@@ -850,7 +950,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// If Scale is zero then it does not divide Val.
if (Scale.isMinValue())
- return 0;
+ return nullptr;
// Look through chains of multiplications, searching for a constant that is
// divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4
@@ -893,7 +993,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
if (!Remainder.isMinValue())
// Not divisible by Scale.
- return 0;
+ return nullptr;
// Replace with the quotient in the parent.
Op = ConstantInt::get(CI->getType(), Quotient);
NoSignedWrap = true;
@@ -906,7 +1006,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Multiplication.
NoSignedWrap = BO->hasNoSignedWrap();
if (RequireNoSignedWrap && !NoSignedWrap)
- return 0;
+ return nullptr;
// There are three cases for multiplication: multiplication by exactly
// the scale, multiplication by a constant different to the scale, and
@@ -925,7 +1025,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Otherwise drill down into the constant.
if (!Op->hasOneUse())
- return 0;
+ return nullptr;
Parent = std::make_pair(BO, 1);
continue;
@@ -934,7 +1034,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Multiplication by something else. Drill down into the left-hand side
// since that's where the reassociate pass puts the good stuff.
if (!Op->hasOneUse())
- return 0;
+ return nullptr;
Parent = std::make_pair(BO, 0);
continue;
@@ -945,7 +1045,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Multiplication by a power of 2.
NoSignedWrap = BO->hasNoSignedWrap();
if (RequireNoSignedWrap && !NoSignedWrap)
- return 0;
+ return nullptr;
Value *LHS = BO->getOperand(0);
int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
@@ -959,7 +1059,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
break;
}
if (Amt < logScale || !Op->hasOneUse())
- return 0;
+ return nullptr;
// Multiplication by more than the scale. Reduce the multiplying amount
// by the scale in the parent.
@@ -970,7 +1070,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
}
if (!Op->hasOneUse())
- return 0;
+ return nullptr;
if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
if (Cast->getOpcode() == Instruction::SExt) {
@@ -984,7 +1084,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Scale and the multiplication Y * SmallScale should not overflow.
if (SmallScale.sext(Scale.getBitWidth()) != Scale)
// SmallScale does not sign-extend to Scale.
- return 0;
+ return nullptr;
assert(SmallScale.exactLogBase2() == logScale);
// Require that Y * SmallScale must not overflow.
RequireNoSignedWrap = true;
@@ -1003,7 +1103,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// trunc (Y * sext Scale) does not, so nsw flags need to be cleared
// from this point up in the expression (see later).
if (RequireNoSignedWrap)
- return 0;
+ return nullptr;
// Drill down through the cast.
unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
@@ -1017,7 +1117,13 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
}
// Unsupported expression, bail out.
- return 0;
+ return nullptr;
+ }
+
+ // If Op is zero then Val = Op * Scale.
+ if (match(Op, m_Zero())) {
+ NoSignedWrap = true;
+ return Op;
}
// We know that we can successfully descale, so from here on we can safely
@@ -1069,23 +1175,125 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Move up one level in the expression.
assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
- Ancestor = Ancestor->use_back();
+ Ancestor = Ancestor->user_back();
} while (1);
}
+/// \brief Creates node of binary operation with the same attributes as the
+/// specified one but with other operands.
+static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
+ InstCombiner::BuilderTy *B) {
+ Value *BORes = B->CreateBinOp(Inst.getOpcode(), LHS, RHS);
+ if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BORes)) {
+ if (isa<OverflowingBinaryOperator>(NewBO)) {
+ NewBO->setHasNoSignedWrap(Inst.hasNoSignedWrap());
+ NewBO->setHasNoUnsignedWrap(Inst.hasNoUnsignedWrap());
+ }
+ if (isa<PossiblyExactOperator>(NewBO))
+ NewBO->setIsExact(Inst.isExact());
+ }
+ return BORes;
+}
+
+/// \brief Makes transformation of binary operation specific for vector types.
+/// \param Inst Binary operator to transform.
+/// \return Pointer to node that must replace the original binary operator, or
+/// null pointer if no transformation was made.
+Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
+ if (!Inst.getType()->isVectorTy()) return nullptr;
+
+ // It may not be safe to reorder shuffles and things like div, urem, etc.
+ // because we may trap when executing those ops on unknown vector elements.
+ // See PR20059.
+ if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr;
+
+ unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
+ Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
+ assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
+ assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
+
+ // If both arguments of binary operation are shuffles, which use the same
+ // mask and shuffle within a single vector, it is worthwhile to move the
+ // shuffle after binary operation:
+ // Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
+ if (isa<ShuffleVectorInst>(LHS) && isa<ShuffleVectorInst>(RHS)) {
+ ShuffleVectorInst *LShuf = cast<ShuffleVectorInst>(LHS);
+ ShuffleVectorInst *RShuf = cast<ShuffleVectorInst>(RHS);
+ if (isa<UndefValue>(LShuf->getOperand(1)) &&
+ isa<UndefValue>(RShuf->getOperand(1)) &&
+ LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType() &&
+ LShuf->getMask() == RShuf->getMask()) {
+ Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
+ RShuf->getOperand(0), Builder);
+ Value *Res = Builder->CreateShuffleVector(NewBO,
+ UndefValue::get(NewBO->getType()), LShuf->getMask());
+ return Res;
+ }
+ }
+
+ // If one argument is a shuffle within one vector, the other is a constant,
+ // try moving the shuffle after the binary operation.
+ ShuffleVectorInst *Shuffle = nullptr;
+ Constant *C1 = nullptr;
+ if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
+ if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
+ if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
+ if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
+ if (Shuffle && C1 &&
+ (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
+ isa<UndefValue>(Shuffle->getOperand(1)) &&
+ Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
+ SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
+ // Find constant C2 that has property:
+ // shuffle(C2, ShMask) = C1
+ // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
+ // reorder is not possible.
+ SmallVector<Constant*, 16> C2M(VWidth,
+ UndefValue::get(C1->getType()->getScalarType()));
+ bool MayChange = true;
+ for (unsigned I = 0; I < VWidth; ++I) {
+ if (ShMask[I] >= 0) {
+ assert(ShMask[I] < (int)VWidth);
+ if (!isa<UndefValue>(C2M[ShMask[I]])) {
+ MayChange = false;
+ break;
+ }
+ C2M[ShMask[I]] = C1->getAggregateElement(I);
+ }
+ }
+ if (MayChange) {
+ Constant *C2 = ConstantVector::get(C2M);
+ Value *NewLHS, *NewRHS;
+ if (isa<Constant>(LHS)) {
+ NewLHS = C2;
+ NewRHS = Shuffle->getOperand(0);
+ } else {
+ NewLHS = Shuffle->getOperand(0);
+ NewRHS = C2;
+ }
+ Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
+ Value *Res = Builder->CreateShuffleVector(NewBO,
+ UndefValue::get(Inst.getType()), Shuffle->getMask());
+ return Res;
+ }
+ }
+
+ return nullptr;
+}
+
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
- if (Value *V = SimplifyGEPInst(Ops, TD))
+ if (Value *V = SimplifyGEPInst(Ops, DL))
return ReplaceInstUsesWith(GEP, V);
Value *PtrOp = GEP.getOperand(0);
// Eliminate unneeded casts for indices, and replace indices which displace
// by multiples of a zero size type with zero.
- if (TD) {
+ if (DL) {
bool MadeChange = false;
- Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
@@ -1097,7 +1305,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the element type has zero size then any index over it is equivalent
// to an index of zero, so replace it with zero if it is not zero already.
if (SeqTy->getElementType()->isSized() &&
- TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
+ DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
*I = Constant::getNullValue(IntPtrTy);
MadeChange = true;
@@ -1115,13 +1323,98 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (MadeChange) return &GEP;
}
+ // Check to see if the inputs to the PHI node are getelementptr instructions.
+ if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
+ GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
+ if (!Op1)
+ return nullptr;
+
+ signed DI = -1;
+
+ for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
+ GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
+ if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
+ return nullptr;
+
+ // Keep track of the type as we walk the GEP.
+ Type *CurTy = Op1->getOperand(0)->getType()->getScalarType();
+
+ for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
+ if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
+ return nullptr;
+
+ if (Op1->getOperand(J) != Op2->getOperand(J)) {
+ if (DI == -1) {
+ // We have not seen any differences yet in the GEPs feeding the
+ // PHI yet, so we record this one if it is allowed to be a
+ // variable.
+
+ // The first two arguments can vary for any GEP, the rest have to be
+ // static for struct slots
+ if (J > 1 && CurTy->isStructTy())
+ return nullptr;
+
+ DI = J;
+ } else {
+ // The GEP is different by more than one input. While this could be
+ // extended to support GEPs that vary by more than one variable it
+ // doesn't make sense since it greatly increases the complexity and
+ // would result in an R+R+R addressing mode which no backend
+ // directly supports and would need to be broken into several
+ // simpler instructions anyway.
+ return nullptr;
+ }
+ }
+
+ // Sink down a layer of the type for the next iteration.
+ if (J > 0) {
+ if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
+ CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
+ } else {
+ CurTy = nullptr;
+ }
+ }
+ }
+ }
+
+ GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
+
+ if (DI == -1) {
+ // All the GEPs feeding the PHI are identical. Clone one down into our
+ // BB so that it can be merged with the current GEP.
+ GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(),
+ NewGEP);
+ } else {
+ // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
+ // into the current block so it can be merged, and create a new PHI to
+ // set that index.
+ Instruction *InsertPt = Builder->GetInsertPoint();
+ Builder->SetInsertPoint(PN);
+ PHINode *NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(),
+ PN->getNumOperands());
+ Builder->SetInsertPoint(InsertPt);
+
+ for (auto &I : PN->operands())
+ NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
+ PN->getIncomingBlock(I));
+
+ NewGEP->setOperand(DI, NewPN);
+ GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(),
+ NewGEP);
+ NewGEP->setOperand(DI, NewPN);
+ }
+
+ GEP.setOperand(0, NewGEP);
+ PtrOp = NewGEP;
+ }
+
// Combine Indices - If the source pointer to this getelementptr instruction
// is a getelementptr instruction, combine the indices of the two
// getelementptr instructions into a single instruction.
//
if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
- return 0;
+ return nullptr;
// Note that if our source is a gep chain itself then we wait for that
// chain to be resolved before we perform this transformation. This
@@ -1129,7 +1422,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (GEPOperator *SrcGEP =
dyn_cast<GEPOperator>(Src->getOperand(0)))
if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
- return 0; // Wait until our source is folded to completion.
+ return nullptr; // Wait until our source is folded to completion.
SmallVector<Value*, 8> Indices;
@@ -1157,7 +1450,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// intptr_t). Just avoid transforming this until the input has been
// normalized.
if (SO1->getType() != GO1->getType())
- return 0;
+ return nullptr;
Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
}
@@ -1188,12 +1481,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X), (ptrtoint Y))
// The GEP pattern is emitted by the SCEV expander for certain kinds of
// pointer arithmetic.
- if (TD && GEP.getNumIndices() == 1 &&
+ if (DL && GEP.getNumIndices() == 1 &&
match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {
unsigned AS = GEP.getPointerAddressSpace();
if (GEP.getType() == Builder->getInt8PtrTy(AS) &&
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
- TD->getPointerSizeInBits(AS)) {
+ DL->getPointerSizeInBits(AS)) {
Operator *Index = cast<Operator>(GEP.getOperand(1));
Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
@@ -1207,11 +1500,9 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// We do not handle pointer-vector geps here.
if (!StrippedPtrTy)
- return 0;
-
- if (StrippedPtr != PtrOp &&
- StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
+ return nullptr;
+ if (StrippedPtr != PtrOp) {
bool HasZeroPointerIndex = false;
if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
HasZeroPointerIndex = C->isZero();
@@ -1234,7 +1525,15 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
GetElementPtrInst *Res =
GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
Res->setIsInBounds(GEP.isInBounds());
- return Res;
+ if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
+ return Res;
+ // Insert Res, and create an addrspacecast.
+ // e.g.,
+ // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
+ // ->
+ // %0 = GEP i8 addrspace(1)* X, ...
+ // addrspacecast i8 addrspace(1)* %0 to i8*
+ return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
}
if (ArrayType *XATy =
@@ -1246,8 +1545,24 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// to an array of the same type as the destination pointer
// array. Because the array type is never stepped over (there
// is a leading zero) we can fold the cast into this GEP.
- GEP.setOperand(0, StrippedPtr);
- return &GEP;
+ if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
+ GEP.setOperand(0, StrippedPtr);
+ return &GEP;
+ }
+ // Cannot replace the base pointer directly because StrippedPtr's
+ // address space is different. Instead, create a new GEP followed by
+ // an addrspacecast.
+ // e.g.,
+ // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
+ // i32 0, ...
+ // ->
+ // %0 = GEP [10 x i8] addrspace(1)* X, ...
+ // addrspacecast i8 addrspace(1)* %0 to i8*
+ SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
+ Value *NewGEP = GEP.isInBounds() ?
+ Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
+ Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
+ return new AddrSpaceCastInst(NewGEP, GEP.getType());
}
}
}
@@ -1257,27 +1572,29 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
Type *SrcElTy = StrippedPtrTy->getElementType();
Type *ResElTy = PtrOp->getType()->getPointerElementType();
- if (TD && SrcElTy->isArrayTy() &&
- TD->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
- TD->getTypeAllocSize(ResElTy)) {
- Type *IdxType = TD->getIntPtrType(GEP.getType());
+ if (DL && SrcElTy->isArrayTy() &&
+ DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
+ DL->getTypeAllocSize(ResElTy)) {
+ Type *IdxType = DL->getIntPtrType(GEP.getType());
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
+
// V and GEP are both pointer types --> BitCast
- return new BitCastInst(NewGEP, GEP.getType());
+ return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
+ GEP.getType());
}
// Transform things like:
// %V = mul i64 %N, 4
// %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
// into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
- if (TD && ResElTy->isSized() && SrcElTy->isSized()) {
+ if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
// Check that changing the type amounts to dividing the index by a scale
// factor.
- uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
- uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy);
+ uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
+ uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
if (ResSize && SrcSize % ResSize == 0) {
Value *Idx = GEP.getOperand(1);
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
@@ -1285,7 +1602,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
+ assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@@ -1296,8 +1613,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *NewGEP = GEP.isInBounds() && NSW ?
Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) :
Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName());
+
// The NewGEP must be pointer typed, so must the old one -> BitCast
- return new BitCastInst(NewGEP, GEP.getType());
+ return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
+ GEP.getType());
}
}
}
@@ -1306,13 +1625,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (TD && ResElTy->isSized() && SrcElTy->isSized() &&
+ if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
SrcElTy->isArrayTy()) {
// Check that changing to the array element type amounts to dividing the
// index by a scale factor.
- uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
+ uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
uint64_t ArrayEltSize
- = TD->getTypeAllocSize(SrcElTy->getArrayElementType());
+ = DL->getTypeAllocSize(SrcElTy->getArrayElementType());
if (ResSize && ArrayEltSize % ResSize == 0) {
Value *Idx = GEP.getOperand(1);
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
@@ -1320,7 +1639,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
+ assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@@ -1329,7 +1648,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the multiplication NewIdx * Scale may overflow then the new
// GEP may not be "inbounds".
Value *Off[2] = {
- Constant::getNullValue(TD->getIntPtrType(GEP.getType())),
+ Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
NewIdx
};
@@ -1337,15 +1656,16 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
Builder->CreateGEP(StrippedPtr, Off, GEP.getName());
// The NewGEP must be pointer typed, so must the old one -> BitCast
- return new BitCastInst(NewGEP, GEP.getType());
+ return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
+ GEP.getType());
}
}
}
}
}
- if (!TD)
- return 0;
+ if (!DL)
+ return nullptr;
/// See if we can simplify:
/// X = bitcast A* to B*
@@ -1355,10 +1675,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
Value *Operand = BCI->getOperand(0);
PointerType *OpType = cast<PointerType>(Operand->getType());
- unsigned OffsetBits = TD->getPointerTypeSizeInBits(OpType);
+ unsigned OffsetBits = DL->getPointerTypeSizeInBits(OpType);
APInt Offset(OffsetBits, 0);
if (!isa<BitCastInst>(Operand) &&
- GEP.accumulateConstantOffset(*TD, Offset) &&
+ GEP.accumulateConstantOffset(*DL, Offset) &&
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
// If this GEP instruction doesn't move the pointer, just replace the GEP
@@ -1397,7 +1717,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
}
- return 0;
+ return nullptr;
}
static bool
@@ -1408,9 +1728,8 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
do {
Instruction *PI = Worklist.pop_back_val();
- for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end(); UI != UE;
- ++UI) {
- Instruction *I = cast<Instruction>(*UI);
+ for (User *U : PI->users()) {
+ Instruction *I = cast<Instruction>(U);
switch (I->getOpcode()) {
default:
// Give up the moment we see something we can't handle.
@@ -1513,7 +1832,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
}
return EraseInstFromFunction(MI);
}
- return 0;
+ return nullptr;
}
/// \brief Move the call to free before a NULL test.
@@ -1542,30 +1861,30 @@ tryToMoveFreeBeforeNullTest(CallInst &FI) {
// would duplicate the call to free in each predecessor and it may
// not be profitable even for code size.
if (!PredBB)
- return 0;
+ return nullptr;
// Validate constraint #2: Does this block contains only the call to
// free and an unconditional branch?
// FIXME: We could check if we can speculate everything in the
// predecessor block
if (FreeInstrBB->size() != 2)
- return 0;
+ return nullptr;
BasicBlock *SuccBB;
if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
- return 0;
+ return nullptr;
// Validate the rest of constraint #1 by matching on the pred branch.
TerminatorInst *TI = PredBB->getTerminator();
BasicBlock *TrueBB, *FalseBB;
ICmpInst::Predicate Pred;
if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
- return 0;
+ return nullptr;
if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
- return 0;
+ return nullptr;
// Validate constraint #3: Ensure the null case just falls through.
if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
- return 0;
+ return nullptr;
assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
"Broken CFG: missing edge from predecessor to successor");
@@ -1600,14 +1919,14 @@ Instruction *InstCombiner::visitFree(CallInst &FI) {
if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
return I;
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
// Change br (not X), label True, label False to: br X, label False, True
- Value *X = 0;
+ Value *X = nullptr;
BasicBlock *TrueDest;
BasicBlock *FalseDest;
if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
@@ -1618,7 +1937,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
return &BI;
}
- // Cannonicalize fcmp_one -> fcmp_oeq
+ // Canonicalize fcmp_one -> fcmp_oeq
FCmpInst::Predicate FPred; Value *Y;
if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
TrueDest, FalseDest)) &&
@@ -1634,7 +1953,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
return &BI;
}
- // Cannonicalize icmp_ne -> icmp_eq
+ // Canonicalize icmp_ne -> icmp_eq
ICmpInst::Predicate IPred;
if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
TrueDest, FalseDest)) &&
@@ -1650,7 +1969,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
return &BI;
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
@@ -1674,7 +1993,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
return &SI;
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
@@ -1691,7 +2010,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// first index
return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
}
- return 0; // Can't handle other constants
+ return nullptr; // Can't handle other constants
}
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
@@ -1824,7 +2143,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// and if again single-use then via load (gep (gep)) to load (gep).
// However, double extracts from e.g. function arguments or return values
// aren't handled yet.
- return 0;
+ return nullptr;
}
enum Personality_Type {
@@ -1880,7 +2199,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// Simplify the list of clauses, eg by removing repeated catch clauses
// (these are often created by inlining).
bool MakeNewInstruction = false; // If true, recreate using the following:
- SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction;
+ SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
@@ -1888,8 +2207,8 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
bool isLastClause = i + 1 == e;
if (LI.isCatch(i)) {
// A catch clause.
- Value *CatchClause = LI.getClause(i);
- Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts());
+ Constant *CatchClause = LI.getClause(i);
+ Constant *TypeInfo = CatchClause->stripPointerCasts();
// If we already saw this clause, there is no point in having a second
// copy of it.
@@ -1918,7 +2237,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// equal (for example if one represents a C++ class, and the other some
// class derived from it).
assert(LI.isFilter(i) && "Unsupported landingpad clause!");
- Value *FilterClause = LI.getClause(i);
+ Constant *FilterClause = LI.getClause(i);
ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
unsigned NumTypeInfos = FilterType->getNumElements();
@@ -1962,8 +2281,8 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// catch-alls. If so, the filter can be discarded.
bool SawCatchAll = false;
for (unsigned j = 0; j != NumTypeInfos; ++j) {
- Value *Elt = Filter->getOperand(j);
- Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts());
+ Constant *Elt = Filter->getOperand(j);
+ Constant *TypeInfo = Elt->stripPointerCasts();
if (isCatchAll(Personality, TypeInfo)) {
// This element is a catch-all. Bail out, noting this fact.
SawCatchAll = true;
@@ -2068,7 +2387,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
continue;
// If Filter is a subset of LFilter, i.e. every element of Filter is also
// an element of LFilter, then discard LFilter.
- SmallVectorImpl<Value *>::iterator J = NewClauses.begin() + j;
+ SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
// If Filter is empty then it is a subset of LFilter.
if (!FElts) {
// Discard LFilter.
@@ -2163,7 +2482,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
return &LI;
}
- return 0;
+ return nullptr;
}
@@ -2214,7 +2533,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
static bool AddReachableCodeToWorklist(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 64> &Visited,
InstCombiner &IC,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI) {
bool MadeIRChange = false;
SmallVector<BasicBlock*, 256> Worklist;
@@ -2242,7 +2561,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
// ConstantProp instruction if trivially constant.
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
<< *Inst << '\n');
Inst->replaceAllUsesWith(C);
@@ -2251,16 +2570,16 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
continue;
}
- if (TD) {
+ if (DL) {
// See if we can constant fold its operands.
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
- if (CE == 0) continue;
+ if (CE == nullptr) continue;
Constant*& FoldRes = FoldedConstants[CE];
if (!FoldRes)
- FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);
+ FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
if (!FoldRes)
FoldRes = CE;
@@ -2327,7 +2646,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// the reachable instructions. Ignore blocks that are not reachable. Keep
// track of which blocks we visit.
SmallPtrSet<BasicBlock*, 64> Visited;
- MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD,
+ MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL,
TLI);
// Do a quick scan over the function. If we find any blocks that are
@@ -2360,7 +2679,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
while (!Worklist.isEmpty()) {
Instruction *I = Worklist.RemoveOne();
- if (I == 0) continue; // skip null values.
+ if (I == nullptr) continue; // skip null values.
// Check to see if we can DCE the instruction.
if (isInstructionTriviallyDead(I, TLI)) {
@@ -2373,7 +2692,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// Instruction isn't dead, see if we can constant propagate it.
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
// Add operands to the worklist.
@@ -2387,12 +2706,12 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// See if we can trivially sink this instruction to a successor basic block.
if (I->hasOneUse()) {
BasicBlock *BB = I->getParent();
- Instruction *UserInst = cast<Instruction>(I->use_back());
+ Instruction *UserInst = cast<Instruction>(*I->user_begin());
BasicBlock *UserParent;
// Get the block the use occurs in.
if (PHINode *PN = dyn_cast<PHINode>(UserInst))
- UserParent = PN->getIncomingBlock(I->use_begin().getUse());
+ UserParent = PN->getIncomingBlock(*I->use_begin());
else
UserParent = UserInst->getParent();
@@ -2408,9 +2727,18 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// If the user is one of our immediate successors, and if that successor
// only has us as a predecessors (we'd have to split the critical edge
// otherwise), we can keep going.
- if (UserIsSuccessor && UserParent->getSinglePredecessor())
+ if (UserIsSuccessor && UserParent->getSinglePredecessor()) {
// Okay, the CFG is simple enough, try to sink this instruction.
- MadeIRChange |= TryToSinkInstruction(I, UserParent);
+ if (TryToSinkInstruction(I, UserParent)) {
+ MadeIRChange = true;
+ // We'll add uses of the sunk instruction below, but since sinking
+ // can expose opportunities for it's *operands* add them to the
+ // worklist
+ for (Use &U : I->operands())
+ if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
+ Worklist.Add(OpI);
+ }
+ }
}
}
@@ -2482,23 +2810,27 @@ namespace {
class InstCombinerLibCallSimplifier : public LibCallSimplifier {
InstCombiner *IC;
public:
- InstCombinerLibCallSimplifier(const DataLayout *TD,
+ InstCombinerLibCallSimplifier(const DataLayout *DL,
const TargetLibraryInfo *TLI,
InstCombiner *IC)
- : LibCallSimplifier(TD, TLI, UnsafeFPShrink) {
+ : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {
this->IC = IC;
}
/// replaceAllUsesWith - override so that instruction replacement
/// can be defined in terms of the instruction combiner framework.
- virtual void replaceAllUsesWith(Instruction *I, Value *With) const {
+ void replaceAllUsesWith(Instruction *I, Value *With) const override {
IC->ReplaceInstUsesWith(*I, With);
}
};
}
bool InstCombiner::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<DataLayout>();
+ if (skipOptnoneFunction(F))
+ return false;
+
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// Minimizing size?
MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
@@ -2507,11 +2839,11 @@ bool InstCombiner::runOnFunction(Function &F) {
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
IRBuilder<true, TargetFolder, InstCombineIRInserter>
- TheBuilder(F.getContext(), TargetFolder(TD),
+ TheBuilder(F.getContext(), TargetFolder(DL),
InstCombineIRInserter(Worklist));
Builder = &TheBuilder;
- InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this);
+ InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);
Simplifier = &TheSimplifier;
bool EverMadeChange = false;
@@ -2525,7 +2857,7 @@ bool InstCombiner::runOnFunction(Function &F) {
while (DoOneIteration(F, Iteration++))
EverMadeChange = true;
- Builder = 0;
+ Builder = nullptr;
return EverMadeChange;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index d731ec5..124ffe2 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -13,52 +13,55 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asan"
-
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/DIBuilder.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/InstVisitor.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
+#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
#include <algorithm>
#include <string>
+#include <system_error>
using namespace llvm;
+#define DEBUG_TYPE "asan"
+
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
+static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
-static const uint64_t kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
+static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa8000;
+static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
+static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
@@ -67,7 +70,7 @@ static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
static const char *const kAsanModuleCtorName = "asan.module_ctor";
static const char *const kAsanModuleDtorName = "asan.module_dtor";
-static const int kAsanCtorAndCtorPriority = 1;
+static const int kAsanCtorAndDtorPriority = 1;
static const char *const kAsanReportErrorTemplate = "__asan_report_";
static const char *const kAsanReportLoadN = "__asan_report_load_n";
static const char *const kAsanReportStoreN = "__asan_report_store_n";
@@ -76,11 +79,12 @@ static const char *const kAsanUnregisterGlobalsName =
"__asan_unregister_globals";
static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
-static const char *const kAsanInitName = "__asan_init_v3";
+static const char *const kAsanInitName = "__asan_init_v4";
+static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init";
static const char *const kAsanCovName = "__sanitizer_cov";
+static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
+static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
-static const char *const kAsanMappingOffsetName = "__asan_mapping_offset";
-static const char *const kAsanMappingScaleName = "__asan_mapping_scale";
static const int kMaxAsanStackMallocSizeClass = 10;
static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
@@ -93,11 +97,6 @@ static const char *const kAsanUnpoisonStackMemoryName =
static const char *const kAsanOptionDetectUAR =
"__asan_option_detect_stack_use_after_return";
-// These constants must match the definitions in the run-time library.
-static const int kAsanStackLeftRedzoneMagic = 0xf1;
-static const int kAsanStackMidRedzoneMagic = 0xf2;
-static const int kAsanStackRightRedzoneMagic = 0xf3;
-static const int kAsanStackPartialRedzoneMagic = 0xf4;
#ifndef NDEBUG
static const int kAsanStackAfterReturnMagic = 0xf5;
#endif
@@ -129,23 +128,36 @@ static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
// This flag may need to be replaced with -f[no]asan-stack.
static cl::opt<bool> ClStack("asan-stack",
cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
-// This flag may need to be replaced with -f[no]asan-use-after-return.
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
- cl::desc("Check return-after-free"), cl::Hidden, cl::init(false));
+ cl::desc("Check return-after-free"), cl::Hidden, cl::init(true));
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
-static cl::opt<bool> ClCoverage("asan-coverage",
- cl::desc("ASan coverage"), cl::Hidden, cl::init(false));
+static cl::opt<int> ClCoverage("asan-coverage",
+ cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks"),
+ cl::Hidden, cl::init(false));
+static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold",
+ cl::desc("Add coverage instrumentation only to the entry block if there "
+ "are more than this number of blocks."),
+ cl::Hidden, cl::init(1500));
static cl::opt<bool> ClInitializers("asan-initialization-order",
- cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false));
-static cl::opt<bool> ClMemIntrin("asan-memintrin",
- cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true));
-static cl::opt<bool> ClRealignStack("asan-realign-stack",
- cl::desc("Realign stack to 32"), cl::Hidden, cl::init(true));
-static cl::opt<std::string> ClBlacklistFile("asan-blacklist",
- cl::desc("File containing the list of objects to ignore "
- "during instrumentation"), cl::Hidden);
+ cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
+ cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
+ cl::Hidden, cl::init(false));
+static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
+ cl::desc("Realign stack to the value of this flag (power of two)"),
+ cl::Hidden, cl::init(32));
+static cl::opt<int> ClInstrumentationWithCallsThreshold(
+ "asan-instrumentation-with-call-threshold",
+ cl::desc("If the function being instrumented contains more than "
+ "this number of memory accesses, use callbacks instead of "
+ "inline checks (-1 means never use callbacks)."),
+ cl::Hidden, cl::init(7000));
+static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
+ "asan-memory-access-callback-prefix",
+ cl::desc("Prefix for memory access callbacks"), cl::Hidden,
+ cl::init("__asan_"));
// This is an experimental feature that will allow to choose between
// instrumented and non-instrumented code at link-time.
@@ -165,11 +177,6 @@ static cl::opt<bool> ClKeepUninstrumented("asan-keep-uninstrumented-functions",
// Shadow = (Mem >> scale) + (1 << offset_log)
static cl::opt<int> ClMappingScale("asan-mapping-scale",
cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
-static cl::opt<int> ClMappingOffsetLog("asan-mapping-offset-log",
- cl::desc("offset of asan shadow mapping"), cl::Hidden, cl::init(-1));
-static cl::opt<bool> ClShort64BitOffset("asan-short-64bit-mapping-offset",
- cl::desc("Use short immediate constant as the mapping offset for 64bit"),
- cl::Hidden, cl::init(true));
// Optimization flags. Not user visible, used mostly for testing
// and benchmarking the tool.
@@ -205,29 +212,86 @@ STATISTIC(NumOptimizedAccessesToGlobalVar,
"Number of optimized accesses to global vars");
namespace {
-/// A set of dynamically initialized globals extracted from metadata.
-class SetOfDynamicallyInitializedGlobals {
+/// Frontend-provided metadata for global variables.
+class GlobalsMetadata {
public:
- void Init(Module& M) {
- // Clang generates metadata identifying all dynamically initialized globals.
- NamedMDNode *DynamicGlobals =
- M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
- if (!DynamicGlobals)
+ struct Entry {
+ Entry()
+ : SourceLoc(nullptr), Name(nullptr), IsDynInit(false),
+ IsBlacklisted(false) {}
+ GlobalVariable *SourceLoc;
+ GlobalVariable *Name;
+ bool IsDynInit;
+ bool IsBlacklisted;
+ };
+
+ GlobalsMetadata() : inited_(false) {}
+
+ void init(Module& M) {
+ assert(!inited_);
+ inited_ = true;
+ NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
+ if (!Globals)
return;
- for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) {
- MDNode *MDN = DynamicGlobals->getOperand(i);
- assert(MDN->getNumOperands() == 1);
- Value *VG = MDN->getOperand(0);
- // The optimizer may optimize away a global entirely, in which case we
- // cannot instrument access to it.
- if (!VG)
+ for (auto MDN : Globals->operands()) {
+ // Metadata node contains the global and the fields of "Entry".
+ assert(MDN->getNumOperands() == 5);
+ Value *V = MDN->getOperand(0);
+ // The optimizer may optimize away a global entirely.
+ if (!V)
continue;
- DynInitGlobals.insert(cast<GlobalVariable>(VG));
+ GlobalVariable *GV = cast<GlobalVariable>(V);
+ // We can already have an entry for GV if it was merged with another
+ // global.
+ Entry &E = Entries[GV];
+ if (Value *Loc = MDN->getOperand(1)) {
+ GlobalVariable *GVLoc = cast<GlobalVariable>(Loc);
+ E.SourceLoc = GVLoc;
+ addSourceLocationGlobal(GVLoc);
+ }
+ if (Value *Name = MDN->getOperand(2)) {
+ GlobalVariable *GVName = cast<GlobalVariable>(Name);
+ E.Name = GVName;
+ InstrumentationGlobals.insert(GVName);
+ }
+ ConstantInt *IsDynInit = cast<ConstantInt>(MDN->getOperand(3));
+ E.IsDynInit |= IsDynInit->isOne();
+ ConstantInt *IsBlacklisted = cast<ConstantInt>(MDN->getOperand(4));
+ E.IsBlacklisted |= IsBlacklisted->isOne();
}
}
- bool Contains(GlobalVariable *G) { return DynInitGlobals.count(G) != 0; }
+
+ /// Returns metadata entry for a given global.
+ Entry get(GlobalVariable *G) const {
+ auto Pos = Entries.find(G);
+ return (Pos != Entries.end()) ? Pos->second : Entry();
+ }
+
+ /// Check if the global was generated by the instrumentation
+ /// (we don't want to instrument it again in this case).
+ bool isInstrumentationGlobal(GlobalVariable *G) const {
+ return InstrumentationGlobals.count(G);
+ }
+
private:
- SmallSet<GlobalValue*, 32> DynInitGlobals;
+ bool inited_;
+ DenseMap<GlobalVariable*, Entry> Entries;
+ // Globals generated by the frontend instrumentation.
+ DenseSet<GlobalVariable*> InstrumentationGlobals;
+
+ void addSourceLocationGlobal(GlobalVariable *SourceLocGV) {
+ // Source location global is a struct with layout:
+ // {
+ // filename,
+ // i32 line_number,
+ // i32 column_number,
+ // }
+ InstrumentationGlobals.insert(SourceLocGV);
+ ConstantStruct *Contents =
+ cast<ConstantStruct>(SourceLocGV->getInitializer());
+ GlobalVariable *FilenameGV = cast<GlobalVariable>(Contents->getOperand(0));
+ InstrumentationGlobals.insert(FilenameGV);
+ }
};
/// This struct defines the shadow mapping using the rule:
@@ -238,11 +302,12 @@ struct ShadowMapping {
bool OrShadowOffset;
};
-static ShadowMapping getShadowMapping(const Module &M, int LongSize,
- bool ZeroBaseShadow) {
+static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
llvm::Triple TargetTriple(M.getTargetTriple());
bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
- bool IsMacOSX = TargetTriple.getOS() == llvm::Triple::MacOSX;
+ bool IsIOS = TargetTriple.getOS() == llvm::Triple::IOS;
+ bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD;
+ bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux;
bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
TargetTriple.getArch() == llvm::Triple::ppc64le;
bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
@@ -251,22 +316,26 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize,
ShadowMapping Mapping;
- // OR-ing shadow offset if more efficient (at least on x86),
- // but on ppc64 we have to use add since the shadow offset is not neccesary
- // 1/8-th of the address space.
- Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset;
-
- Mapping.Offset = (IsAndroid || ZeroBaseShadow) ? 0 :
- (LongSize == 32 ?
- (IsMIPS32 ? kMIPS32_ShadowOffset32 : kDefaultShadowOffset32) :
- IsPPC64 ? kPPC64_ShadowOffset64 : kDefaultShadowOffset64);
- if (!ZeroBaseShadow && ClShort64BitOffset && IsX86_64 && !IsMacOSX) {
- assert(LongSize == 64);
- Mapping.Offset = kDefaultShort64bitShadowOffset;
- }
- if (!ZeroBaseShadow && ClMappingOffsetLog >= 0) {
- // Zero offset log is the special case.
- Mapping.Offset = (ClMappingOffsetLog == 0) ? 0 : 1ULL << ClMappingOffsetLog;
+ if (LongSize == 32) {
+ if (IsAndroid)
+ Mapping.Offset = 0;
+ else if (IsMIPS32)
+ Mapping.Offset = kMIPS32_ShadowOffset32;
+ else if (IsFreeBSD)
+ Mapping.Offset = kFreeBSD_ShadowOffset32;
+ else if (IsIOS)
+ Mapping.Offset = kIOSShadowOffset32;
+ else
+ Mapping.Offset = kDefaultShadowOffset32;
+ } else { // LongSize == 64
+ if (IsPPC64)
+ Mapping.Offset = kPPC64_ShadowOffset64;
+ else if (IsFreeBSD)
+ Mapping.Offset = kFreeBSD_ShadowOffset64;
+ else if (IsLinux && IsX86_64)
+ Mapping.Offset = kSmallX86_64ShadowOffset;
+ else
+ Mapping.Offset = kDefaultShadowOffset64;
}
Mapping.Scale = kDefaultShadowScale;
@@ -274,6 +343,11 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize,
Mapping.Scale = ClMappingScale;
}
+ // OR-ing shadow offset if more efficient (at least on x86) if the offset
+ // is a power of two, but on ppc64 we have to use add since the shadow
+ // offset is not necessary 1/8-th of the address space.
+ Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1));
+
return Mapping;
}
@@ -285,58 +359,37 @@ static size_t RedzoneSizeForScale(int MappingScale) {
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public FunctionPass {
- AddressSanitizer(bool CheckInitOrder = true,
- bool CheckUseAfterReturn = false,
- bool CheckLifetime = false,
- StringRef BlacklistFile = StringRef(),
- bool ZeroBaseShadow = false)
- : FunctionPass(ID),
- CheckInitOrder(CheckInitOrder || ClInitializers),
- CheckUseAfterReturn(CheckUseAfterReturn || ClUseAfterReturn),
- CheckLifetime(CheckLifetime || ClCheckLifetime),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile),
- ZeroBaseShadow(ZeroBaseShadow) {}
- virtual const char *getPassName() const {
+ AddressSanitizer() : FunctionPass(ID) {}
+ const char *getPassName() const override {
return "AddressSanitizerFunctionPass";
}
- void instrumentMop(Instruction *I);
+ void instrumentMop(Instruction *I, bool UseCalls);
+ void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
- Value *SizeArgument);
+ Value *SizeArgument, bool UseCalls);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
Value *SizeArgument);
- bool instrumentMemIntrinsic(MemIntrinsic *MI);
- void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr,
- Value *Size,
- Instruction *InsertBefore, bool IsWrite);
+ void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
- void emitShadowMapping(Module &M, IRBuilder<> &IRB) const;
- virtual bool doInitialization(Module &M);
+ bool doInitialization(Module &M) override;
static char ID; // Pass identification, replacement for typeid
private:
void initializeCallbacks(Module &M);
- bool ShouldInstrumentGlobal(GlobalVariable *G);
bool LooksLikeCodeInBug11395(Instruction *I);
- void FindDynamicInitializers(Module &M);
bool GlobalIsLinkerInitialized(GlobalVariable *G);
- bool InjectCoverage(Function &F);
-
- bool CheckInitOrder;
- bool CheckUseAfterReturn;
- bool CheckLifetime;
- SmallString<64> BlacklistFile;
- bool ZeroBaseShadow;
+ bool InjectCoverage(Function &F, const ArrayRef<BasicBlock*> AllBlocks);
+ void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
LLVMContext *C;
- DataLayout *TD;
+ const DataLayout *DL;
int LongSize;
Type *IntptrTy;
ShadowMapping Mapping;
@@ -344,56 +397,50 @@ struct AddressSanitizer : public FunctionPass {
Function *AsanInitFunction;
Function *AsanHandleNoReturnFunc;
Function *AsanCovFunction;
- OwningPtr<SpecialCaseList> BL;
+ Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
+ Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
// This array is indexed by AccessIsWrite.
- Function *AsanErrorCallbackSized[2];
+ Function *AsanErrorCallbackSized[2],
+ *AsanMemoryAccessCallbackSized[2];
+ Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
- SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
+ GlobalsMetadata GlobalsMD;
friend struct FunctionStackPoisoner;
};
class AddressSanitizerModule : public ModulePass {
public:
- AddressSanitizerModule(bool CheckInitOrder = true,
- StringRef BlacklistFile = StringRef(),
- bool ZeroBaseShadow = false)
- : ModulePass(ID),
- CheckInitOrder(CheckInitOrder || ClInitializers),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile),
- ZeroBaseShadow(ZeroBaseShadow) {}
- bool runOnModule(Module &M);
+ AddressSanitizerModule() : ModulePass(ID) {}
+ bool runOnModule(Module &M) override;
static char ID; // Pass identification, replacement for typeid
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "AddressSanitizerModule";
}
private:
void initializeCallbacks(Module &M);
+ bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
bool ShouldInstrumentGlobal(GlobalVariable *G);
+ void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
- size_t RedzoneSize() const {
+ size_t MinRedzoneSizeForGlobal() const {
return RedzoneSizeForScale(Mapping.Scale);
}
- bool CheckInitOrder;
- SmallString<64> BlacklistFile;
- bool ZeroBaseShadow;
-
- OwningPtr<SpecialCaseList> BL;
- SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
+ GlobalsMetadata GlobalsMD;
Type *IntptrTy;
LLVMContext *C;
- DataLayout *TD;
+ const DataLayout *DL;
ShadowMapping Mapping;
Function *AsanPoisonGlobals;
Function *AsanUnpoisonGlobals;
Function *AsanRegisterGlobals;
Function *AsanUnregisterGlobals;
+ Function *AsanCovModuleInit;
};
// Stack poisoning does not play well with exception handling.
@@ -416,7 +463,6 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
SmallVector<AllocaInst*, 16> AllocaVec;
SmallVector<Instruction*, 8> RetVec;
- uint64_t TotalStackSize;
unsigned StackAlignment;
Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
@@ -440,16 +486,14 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
: F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C),
IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
Mapping(ASan.Mapping),
- TotalStackSize(0), StackAlignment(1 << Mapping.Scale) {}
+ StackAlignment(1 << Mapping.Scale) {}
bool runOnFunction() {
if (!ClStack) return false;
// Collect alloca, ret, lifetime instructions etc.
- for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
- DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
- BasicBlock *BB = *DI;
+ for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
visit(*BB);
- }
+
if (AllocaVec.empty()) return false;
initializeCallbacks(*F.getParent());
@@ -479,14 +523,12 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
StackAlignment = std::max(StackAlignment, AI.getAlignment());
AllocaVec.push_back(&AI);
- uint64_t AlignedSize = getAlignedAllocaSize(&AI);
- TotalStackSize += AlignedSize;
}
/// \brief Collect lifetime intrinsic calls to check for use-after-scope
/// errors.
void visitIntrinsicInst(IntrinsicInst &II) {
- if (!ASan.CheckLifetime) return;
+ if (!ClCheckLifetime) return;
Intrinsic::ID ID = II.getIntrinsicID();
if (ID != Intrinsic::lifetime_start &&
ID != Intrinsic::lifetime_end)
@@ -514,31 +556,20 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// Check if we want (and can) handle this alloca.
bool isInterestingAlloca(AllocaInst &AI) const {
- return (!AI.isArrayAllocation() &&
- AI.isStaticAlloca() &&
- AI.getAlignment() <= RedzoneSize() &&
- AI.getAllocatedType()->isSized());
+ return (!AI.isArrayAllocation() && AI.isStaticAlloca() &&
+ AI.getAllocatedType()->isSized() &&
+ // alloca() may be called with 0 size, ignore it.
+ getAllocaSizeInBytes(&AI) > 0);
}
- size_t RedzoneSize() const {
- return RedzoneSizeForScale(Mapping.Scale);
- }
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
Type *Ty = AI->getAllocatedType();
- uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty);
+ uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
return SizeInBytes;
}
- uint64_t getAlignedSize(uint64_t SizeInBytes) const {
- size_t RZ = RedzoneSize();
- return ((SizeInBytes + RZ - 1) / RZ) * RZ;
- }
- uint64_t getAlignedAllocaSize(AllocaInst *AI) const {
- uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
- return getAlignedSize(SizeInBytes);
- }
/// Finds alloca where the value comes from.
AllocaInst *findAllocaForValue(Value *V);
- void poisonRedZones(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> &IRB,
+ void poisonRedZones(const ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
Value *ShadowBase, bool DoPoison);
void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
@@ -552,21 +583,16 @@ char AddressSanitizer::ID = 0;
INITIALIZE_PASS(AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
false, false)
-FunctionPass *llvm::createAddressSanitizerFunctionPass(
- bool CheckInitOrder, bool CheckUseAfterReturn, bool CheckLifetime,
- StringRef BlacklistFile, bool ZeroBaseShadow) {
- return new AddressSanitizer(CheckInitOrder, CheckUseAfterReturn,
- CheckLifetime, BlacklistFile, ZeroBaseShadow);
+FunctionPass *llvm::createAddressSanitizerFunctionPass() {
+ return new AddressSanitizer();
}
char AddressSanitizerModule::ID = 0;
INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
"ModulePass", false, false)
-ModulePass *llvm::createAddressSanitizerModulePass(
- bool CheckInitOrder, StringRef BlacklistFile, bool ZeroBaseShadow) {
- return new AddressSanitizerModule(CheckInitOrder, BlacklistFile,
- ZeroBaseShadow);
+ModulePass *llvm::createAddressSanitizerModulePass() {
+ return new AddressSanitizerModule();
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
@@ -576,12 +602,16 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
}
// \brief Create a constant for Str so that we can pass it to the run-time lib.
-static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
+static GlobalVariable *createPrivateGlobalForString(
+ Module &M, StringRef Str, bool AllowMerging) {
Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
- GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true,
- GlobalValue::InternalLinkage, StrConst,
- kAsanGenPrefix);
- GV->setUnnamedAddr(true); // Ok to merge these.
+ // We use private linkage for module-local strings. If they can be merged
+ // with another one, we set the unnamed_addr attribute.
+ GlobalVariable *GV =
+ new GlobalVariable(M, StrConst->getType(), true,
+ GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
+ if (AllowMerging)
+ GV->setUnnamedAddr(true);
GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
return GV;
}
@@ -602,90 +632,111 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
}
-void AddressSanitizer::instrumentMemIntrinsicParam(
- Instruction *OrigIns,
- Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) {
- IRBuilder<> IRB(InsertBefore);
- if (Size->getType() != IntptrTy)
- Size = IRB.CreateIntCast(Size, IntptrTy, false);
- // Check the first byte.
- instrumentAddress(OrigIns, InsertBefore, Addr, 8, IsWrite, Size);
- // Check the last byte.
- IRB.SetInsertPoint(InsertBefore);
- Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
- Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
- Value *AddrLast = IRB.CreateAdd(AddrLong, SizeMinusOne);
- instrumentAddress(OrigIns, InsertBefore, AddrLast, 8, IsWrite, Size);
-}
-
// Instrument memset/memmove/memcpy
-bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
- Value *Dst = MI->getDest();
- MemTransferInst *MemTran = dyn_cast<MemTransferInst>(MI);
- Value *Src = MemTran ? MemTran->getSource() : 0;
- Value *Length = MI->getLength();
-
- Constant *ConstLength = dyn_cast<Constant>(Length);
- Instruction *InsertBefore = MI;
- if (ConstLength) {
- if (ConstLength->isNullValue()) return false;
- } else {
- // The size is not a constant so it could be zero -- check at run-time.
- IRBuilder<> IRB(InsertBefore);
-
- Value *Cmp = IRB.CreateICmpNE(Length,
- Constant::getNullValue(Length->getType()));
- InsertBefore = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
+void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+ IRBuilder<> IRB(MI);
+ if (isa<MemTransferInst>(MI)) {
+ IRB.CreateCall3(
+ isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
+ IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
+ } else if (isa<MemSetInst>(MI)) {
+ IRB.CreateCall3(
+ AsanMemset,
+ IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
}
-
- instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true);
- if (Src)
- instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false);
- return true;
+ MI->eraseFromParent();
}
// If I is an interesting memory access, return the PointerOperand
-// and set IsWrite. Otherwise return NULL.
-static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
+// and set IsWrite/Alignment. Otherwise return NULL.
+static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
+ unsigned *Alignment) {
+ // Skip memory accesses inserted by another instrumentation.
+ if (I->getMetadata("nosanitize"))
+ return nullptr;
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- if (!ClInstrumentReads) return NULL;
+ if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
+ *Alignment = LI->getAlignment();
return LI->getPointerOperand();
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (!ClInstrumentWrites) return NULL;
+ if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
+ *Alignment = SI->getAlignment();
return SI->getPointerOperand();
}
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
- if (!ClInstrumentAtomics) return NULL;
+ if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
+ *Alignment = 0;
return RMW->getPointerOperand();
}
if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
- if (!ClInstrumentAtomics) return NULL;
+ if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
+ *Alignment = 0;
return XCHG->getPointerOperand();
}
- return NULL;
+ return nullptr;
+}
+
+static bool isPointerOperand(Value *V) {
+ return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
+}
+
+// This is a rough heuristic; it may cause both false positives and
+// false negatives. The proper implementation requires cooperation with
+// the frontend.
+static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
+ if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
+ if (!Cmp->isRelational())
+ return false;
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ if (BO->getOpcode() != Instruction::Sub)
+ return false;
+ } else {
+ return false;
+ }
+ if (!isPointerOperand(I->getOperand(0)) ||
+ !isPointerOperand(I->getOperand(1)))
+ return false;
+ return true;
}
bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
// If a global variable does not have dynamic initialization we don't
// have to instrument it. However, if a global does not have initializer
// at all, we assume it has dynamic initializer (in other TU).
- return G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G);
+ return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
}
-void AddressSanitizer::instrumentMop(Instruction *I) {
+void
+AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
+ IRBuilder<> IRB(I);
+ Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
+ Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
+ for (int i = 0; i < 2; i++) {
+ if (Param[i]->getType()->isPointerTy())
+ Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
+ }
+ IRB.CreateCall2(F, Param[0], Param[1]);
+}
+
+void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
bool IsWrite = false;
- Value *Addr = isInterestingMemoryAccess(I, &IsWrite);
+ unsigned Alignment = 0;
+ Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
assert(Addr);
if (ClOpt && ClOptGlobals) {
if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
- if (!CheckInitOrder || GlobalIsLinkerInitialized(G)) {
+ if (!ClInitializers || GlobalIsLinkerInitialized(G)) {
NumOptimizedAccessesToGlobalVar++;
return;
}
@@ -705,7 +756,7 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+ uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
assert((TypeSize % 8) == 0);
@@ -714,22 +765,29 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
else
NumInstrumentedReads++;
- // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check.
- if (TypeSize == 8 || TypeSize == 16 ||
- TypeSize == 32 || TypeSize == 64 || TypeSize == 128)
- return instrumentAddress(I, I, Addr, TypeSize, IsWrite, 0);
- // Instrument unusual size (but still multiple of 8).
+ unsigned Granularity = 1 << Mapping.Scale;
+ // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
+ // if the data is properly aligned.
+ if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
+ TypeSize == 128) &&
+ (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
+ return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
+ // Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
IRBuilder<> IRB(I);
- Value *LastByte = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePointerCast(Addr, IntptrTy),
- ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
- OrigPtrTy);
Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
- instrumentAddress(I, I, Addr, 8, IsWrite, Size);
- instrumentAddress(I, I, LastByte, 8, IsWrite, Size);
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ if (UseCalls) {
+ IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
+ } else {
+ Value *LastByte = IRB.CreateIntToPtr(
+ IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
+ OrigPtrTy);
+ instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
+ instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
+ }
}
// Validate the result of Module::getOrInsertFunction called for an interface
@@ -777,11 +835,18 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
}
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
- Instruction *InsertBefore,
- Value *Addr, uint32_t TypeSize,
- bool IsWrite, Value *SizeArgument) {
+ Instruction *InsertBefore, Value *Addr,
+ uint32_t TypeSize, bool IsWrite,
+ Value *SizeArgument, bool UseCalls) {
IRBuilder<> IRB(InsertBefore);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
+
+ if (UseCalls) {
+ IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
+ AddrLong);
+ return;
+ }
Type *ShadowTy = IntegerType::get(
*C, std::max(8U, TypeSize >> Mapping.Scale));
@@ -792,13 +857,12 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
- size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
size_t Granularity = 1 << Mapping.Scale;
- TerminatorInst *CrashTerm = 0;
+ TerminatorInst *CrashTerm = nullptr;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
TerminatorInst *CheckTerm =
- SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
+ SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
@@ -809,7 +873,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
ReplaceInstWithInst(CheckTerm, NewTerm);
} else {
- CrashTerm = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), true);
+ CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
}
Instruction *Crash = generateCrashCode(
@@ -817,27 +881,36 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
-void AddressSanitizerModule::createInitializerPoisonCalls(
- Module &M, GlobalValue *ModuleName) {
- // We do all of our poisoning and unpoisoning within _GLOBAL__I_a.
- Function *GlobalInit = M.getFunction("_GLOBAL__I_a");
- // If that function is not present, this TU contains no globals, or they have
- // all been optimized away
- if (!GlobalInit)
- return;
-
+void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
+ GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
- IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt());
+ IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
// Add a call to poison all external globals before the given function starts.
Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
// Add calls to unpoison all globals before each return instruction.
- for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end();
- I != E; ++I) {
- if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) {
+ for (auto &BB : GlobalInit.getBasicBlockList())
+ if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
CallInst::Create(AsanUnpoisonGlobals, "", RI);
+}
+
+void AddressSanitizerModule::createInitializerPoisonCalls(
+ Module &M, GlobalValue *ModuleName) {
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
+
+ ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
+ for (Use &OP : CA->operands()) {
+ if (isa<ConstantAggregateZero>(OP))
+ continue;
+ ConstantStruct *CS = cast<ConstantStruct>(OP);
+
+ // Must have a function or null ptr.
+ // (CS->getOperand(0) is the init priority.)
+ if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
+ if (F->getName() != kAsanModuleCtorName)
+ poisonOneInitializer(*F, ModuleName);
}
}
}
@@ -846,23 +919,27 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
Type *Ty = cast<PointerType>(G->getType())->getElementType();
DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
- if (BL->isIn(*G)) return false;
+ if (GlobalsMD.get(G).IsBlacklisted) return false;
+ if (GlobalsMD.isInstrumentationGlobal(G)) return false;
if (!Ty->isSized()) return false;
if (!G->hasInitializer()) return false;
if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
// Touch only those globals that will not be defined in other modules.
- // Don't handle ODR type linkages since other modules may be built w/o asan.
+ // Don't handle ODR linkage types and COMDATs since other modules may be built
+ // without ASan.
if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
G->getLinkage() != GlobalVariable::PrivateLinkage &&
G->getLinkage() != GlobalVariable::InternalLinkage)
return false;
+ if (G->hasComdat())
+ return false;
// Two problems with thread-locals:
// - The address of the main thread's copy can't be computed at link-time.
// - Need to poison all copies, not just the main thread's one.
if (G->isThreadLocal())
return false;
- // For now, just ignore this Alloca if the alignment is large.
- if (G->getAlignment() > RedzoneSize()) return false;
+ // For now, just ignore this Global if the alignment is large.
+ if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
// Ignore all the globals with the names starting with "\01L_OBJC_".
// Many of those are put into the .cstring section. The linker compresses
@@ -870,7 +947,7 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// our redzones get broken.
if ((G->getName().find("\01L_OBJC_") == 0) ||
(G->getName().find("\01l_OBJC_") == 0)) {
- DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G);
+ DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G << "\n");
return false;
}
@@ -879,9 +956,9 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// Ignore the globals from the __OBJC section. The ObjC runtime assumes
// those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
// them.
- if ((Section.find("__OBJC,") == 0) ||
- (Section.find("__DATA, __objc_") == 0)) {
- DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G);
+ if (Section.startswith("__OBJC,") ||
+ Section.startswith("__DATA, __objc_")) {
+ DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
return false;
}
// See http://code.google.com/p/address-sanitizer/issues/detail?id=32
@@ -892,10 +969,28 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// is placed into __DATA,__cfstring
// Therefore there's no point in placing redzones into __DATA,__cfstring.
// Moreover, it causes the linker to crash on OS X 10.7
- if (Section.find("__DATA,__cfstring") == 0) {
- DEBUG(dbgs() << "Ignoring CFString: " << *G);
+ if (Section.startswith("__DATA,__cfstring")) {
+ DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
+ return false;
+ }
+ // The linker merges the contents of cstring_literals and removes the
+ // trailing zeroes.
+ if (Section.startswith("__TEXT,__cstring,cstring_literals")) {
+ DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
+ return false;
+ }
+
+ // Callbacks put into the CRT initializer/terminator sections
+ // should not be instrumented.
+ // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
+ // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
+ if (Section.startswith(".CRT")) {
+ DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
return false;
}
+
+ // Globals from llvm.metadata aren't emitted, do not instrument them.
+ if (Section == "llvm.metadata") return false;
}
return true;
@@ -919,31 +1014,23 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) {
kAsanUnregisterGlobalsName,
IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
+ AsanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanCovModuleInitName,
+ IRB.getVoidTy(), IntptrTy, NULL));
+ AsanCovModuleInit->setLinkage(Function::ExternalLinkage);
}
// This function replaces all global variables with new variables that have
// trailing redzones. It also creates a function that poisons
// redzones and inserts this function into llvm.global_ctors.
-bool AddressSanitizerModule::runOnModule(Module &M) {
- if (!ClGlobals) return false;
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
- return false;
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
- if (BL->isIn(M)) return false;
- C = &(M.getContext());
- int LongSize = TD->getPointerSizeInBits();
- IntptrTy = Type::getIntNTy(*C, LongSize);
- Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow);
- initializeCallbacks(M);
- DynamicallyInitializedGlobals.Init(M);
+bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
+ GlobalsMD.init(M);
SmallVector<GlobalVariable *, 16> GlobalsToChange;
- for (Module::GlobalListType::iterator G = M.global_begin(),
- E = M.global_end(); G != E; ++G) {
- if (ShouldInstrumentGlobal(G))
- GlobalsToChange.push_back(G);
+ for (auto &G : M.globals()) {
+ if (ShouldInstrumentGlobal(&G))
+ GlobalsToChange.push_back(&G);
}
size_t n = GlobalsToChange.size();
@@ -956,31 +1043,35 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
// const char *name;
// const char *module_name;
// size_t has_dynamic_init;
+ // void *source_location;
// We initialize an array of such structures and pass it to a run-time call.
- StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy,
- IntptrTy, IntptrTy,
- IntptrTy, IntptrTy, NULL);
+ StructType *GlobalStructTy =
+ StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
+ IntptrTy, IntptrTy, NULL);
SmallVector<Constant *, 16> Initializers(n);
- Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
- assert(CtorFunc);
- IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
-
bool HasDynamicallyInitializedGlobals = false;
- GlobalVariable *ModuleName = createPrivateGlobalForString(
- M, M.getModuleIdentifier());
// We shouldn't merge same module names, as this string serves as unique
// module ID in runtime.
- ModuleName->setUnnamedAddr(false);
+ GlobalVariable *ModuleName = createPrivateGlobalForString(
+ M, M.getModuleIdentifier(), /*AllowMerging*/false);
for (size_t i = 0; i < n; i++) {
static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
+
+ auto MD = GlobalsMD.get(G);
+ // Create string holding the global name unless it was provided by
+ // the metadata.
+ GlobalVariable *Name =
+ MD.Name ? MD.Name : createPrivateGlobalForString(M, G->getName(),
+ /*AllowMerging*/ true);
+
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
- uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
- uint64_t MinRZ = RedzoneSize();
+ uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
+ uint64_t MinRZ = MinRedzoneSizeForGlobal();
// MinRZ <= RZ <= kMaxGlobalRedzone
// and trying to make RZ to be ~ 1/4 of SizeInBytes.
uint64_t RZ = std::max(MinRZ,
@@ -992,19 +1083,12 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
- // Determine whether this global should be poisoned in initialization.
- bool GlobalHasDynamicInitializer =
- DynamicallyInitializedGlobals.Contains(G);
- // Don't check initialization order if this global is blacklisted.
- GlobalHasDynamicInitializer &= !BL->isIn(*G, "init");
StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
Constant *NewInitializer = ConstantStruct::get(
NewTy, G->getInitializer(),
Constant::getNullValue(RightRedZoneTy), NULL);
- GlobalVariable *Name = createPrivateGlobalForString(M, G->getName());
-
// Create a new global variable with enough space for a redzone.
GlobalValue::LinkageTypes Linkage = G->getLinkage();
if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
@@ -1025,17 +1109,17 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
G->eraseFromParent();
Initializers[i] = ConstantStruct::get(
- GlobalStructTy,
- ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
+ GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
ConstantInt::get(IntptrTy, SizeInBytes),
ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
ConstantExpr::getPointerCast(Name, IntptrTy),
ConstantExpr::getPointerCast(ModuleName, IntptrTy),
- ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer),
+ ConstantInt::get(IntptrTy, MD.IsDynInit),
+ MD.SourceLoc ? ConstantExpr::getPointerCast(MD.SourceLoc, IntptrTy)
+ : ConstantInt::get(IntptrTy, 0),
NULL);
- // Populate the first and last globals declared in this TU.
- if (CheckInitOrder && GlobalHasDynamicInitializer)
+ if (ClInitializers && MD.IsDynInit)
HasDynamicallyInitializedGlobals = true;
DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
@@ -1047,7 +1131,7 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
// Create calls for poisoning before initializers run and unpoisoning after.
- if (CheckInitOrder && HasDynamicallyInitializedGlobals)
+ if (HasDynamicallyInitializedGlobals)
createInitializerPoisonCalls(M, ModuleName);
IRB.CreateCall2(AsanRegisterGlobals,
IRB.CreatePointerCast(AllGlobals, IntptrTy),
@@ -1063,12 +1147,42 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
IRB.CreatePointerCast(AllGlobals, IntptrTy),
ConstantInt::get(IntptrTy, n));
- appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndCtorPriority);
+ appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
DEBUG(dbgs() << M);
return true;
}
+bool AddressSanitizerModule::runOnModule(Module &M) {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP)
+ return false;
+ DL = &DLP->getDataLayout();
+ C = &(M.getContext());
+ int LongSize = DL->getPointerSizeInBits();
+ IntptrTy = Type::getIntNTy(*C, LongSize);
+ Mapping = getShadowMapping(M, LongSize);
+ initializeCallbacks(M);
+
+ bool Changed = false;
+
+ Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
+ assert(CtorFunc);
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+
+ if (ClCoverage > 0) {
+ Function *CovFunc = M.getFunction(kAsanCovName);
+ int nCov = CovFunc ? CovFunc->getNumUses() : 0;
+ IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
+ Changed = true;
+ }
+
+ if (ClGlobals)
+ Changed |= InstrumentGlobals(IRB, M);
+
+ return Changed;
+}
+
void AddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Create __asan_report* callbacks.
@@ -1076,12 +1190,16 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
// IsWrite and TypeSize are encoded in the function name.
- std::string FunctionName = std::string(kAsanReportErrorTemplate) +
+ std::string Suffix =
(AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
- // If we are merging crash callbacks, they have two parameters.
AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
- checkInterfaceFunction(M.getOrInsertFunction(
- FunctionName, IRB.getVoidTy(), IntptrTy, NULL));
+ checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
+ IRB.getVoidTy(), IntptrTy, NULL));
+ AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
+ checkInterfaceFunction(
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
+ IRB.getVoidTy(), IntptrTy, NULL));
}
}
AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
@@ -1089,45 +1207,49 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
- AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
+ AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
+ IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
+ IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+
+ AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
+ AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
+ AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, NULL));
+
+ AsanHandleNoReturnFunc = checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanCovName, IRB.getVoidTy(), IntptrTy, NULL));
+ kAsanCovName, IRB.getVoidTy(), NULL));
+ AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
// We insert an empty inline asm after __asan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
}
-void AddressSanitizer::emitShadowMapping(Module &M, IRBuilder<> &IRB) const {
- // Tell the values of mapping offset and scale to the run-time.
- GlobalValue *asan_mapping_offset =
- new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
- ConstantInt::get(IntptrTy, Mapping.Offset),
- kAsanMappingOffsetName);
- // Read the global, otherwise it may be optimized away.
- IRB.CreateLoad(asan_mapping_offset, true);
-
- GlobalValue *asan_mapping_scale =
- new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
- ConstantInt::get(IntptrTy, Mapping.Scale),
- kAsanMappingScaleName);
- // Read the global, otherwise it may be optimized away.
- IRB.CreateLoad(asan_mapping_scale, true);
-}
-
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP)
+ report_fatal_error("data layout missing");
+ DL = &DLP->getDataLayout();
- if (!TD)
- return false;
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
- DynamicallyInitializedGlobals.Init(M);
+ GlobalsMD.init(M);
C = &(M.getContext());
- LongSize = TD->getPointerSizeInBits();
+ LongSize = DL->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
AsanCtorFunction = Function::Create(
@@ -1141,10 +1263,9 @@ bool AddressSanitizer::doInitialization(Module &M) {
AsanInitFunction->setLinkage(Function::ExternalLinkage);
IRB.CreateCall(AsanInitFunction);
- Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow);
- emitShadowMapping(M, IRB);
+ Mapping = getShadowMapping(M, LongSize);
- appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority);
+ appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
return true;
}
@@ -1164,9 +1285,44 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
return false;
}
+void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) {
+ BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
+ // Skip static allocas at the top of the entry block so they don't become
+ // dynamic when we split the block. If we used our optimized stack layout,
+ // then there will only be one alloca and it will come first.
+ for (; IP != BE; ++IP) {
+ AllocaInst *AI = dyn_cast<AllocaInst>(IP);
+ if (!AI || !AI->isStaticAlloca())
+ break;
+ }
+
+ DebugLoc EntryLoc = IP->getDebugLoc().getFnDebugLoc(*C);
+ IRBuilder<> IRB(IP);
+ IRB.SetCurrentDebugLocation(EntryLoc);
+ Type *Int8Ty = IRB.getInt8Ty();
+ GlobalVariable *Guard = new GlobalVariable(
+ *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
+ Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName());
+ LoadInst *Load = IRB.CreateLoad(Guard);
+ Load->setAtomic(Monotonic);
+ Load->setAlignment(1);
+ Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load);
+ Instruction *Ins = SplitBlockAndInsertIfThen(
+ Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
+ IRB.SetInsertPoint(Ins);
+ IRB.SetCurrentDebugLocation(EntryLoc);
+ // We pass &F to __sanitizer_cov. We could avoid this and rely on
+ // GET_CALLER_PC, but having the PC of the first instruction is just nice.
+ IRB.CreateCall(AsanCovFunction);
+ StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
+ Store->setAtomic(Monotonic);
+ Store->setAlignment(1);
+}
+
// Poor man's coverage that works with ASan.
// We create a Guard boolean variable with the same linkage
-// as the function and inject this code into the entry block:
+// as the function and inject this code into the entry block (-asan-coverage=1)
+// or all blocks (-asan-coverage=2):
// if (*Guard) {
// __sanitizer_cov(&F);
// *Guard = 1;
@@ -1175,38 +1331,29 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
// in __sanitizer_cov (it's fine to call it more than once).
//
// This coverage implementation provides very limited data:
-// it only tells if a given function was ever executed.
-// No counters, no per-basic-block or per-edge data.
+// it only tells if a given function (block) was ever executed.
+// No counters, no per-edge data.
// But for many use cases this is what we need and the added slowdown
// is negligible. This simple implementation will probably be obsoleted
// by the upcoming Clang-based coverage implementation.
// By having it here and now we hope to
// a) get the functionality to users earlier and
// b) collect usage statistics to help improve Clang coverage design.
-bool AddressSanitizer::InjectCoverage(Function &F) {
+bool AddressSanitizer::InjectCoverage(Function &F,
+ const ArrayRef<BasicBlock *> AllBlocks) {
if (!ClCoverage) return false;
- IRBuilder<> IRB(F.getEntryBlock().getFirstInsertionPt());
- Type *Int8Ty = IRB.getInt8Ty();
- GlobalVariable *Guard = new GlobalVariable(
- *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
- Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName());
- LoadInst *Load = IRB.CreateLoad(Guard);
- Load->setAtomic(Monotonic);
- Load->setAlignment(1);
- Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load);
- Instruction *Ins = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
- IRB.SetInsertPoint(Ins);
- // We pass &F to __sanitizer_cov. We could avoid this and rely on
- // GET_CALLER_PC, but having the PC of the first instruction is just nice.
- IRB.CreateCall(AsanCovFunction, IRB.CreatePointerCast(&F, IntptrTy));
- StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
- Store->setAtomic(Monotonic);
- Store->setAlignment(1);
+
+ if (ClCoverage == 1 ||
+ (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
+ InjectCoverageAtBlock(F, F.getEntryBlock());
+ } else {
+ for (auto BB : AllBlocks)
+ InjectCoverageAtBlock(F, *BB);
+ }
return true;
}
bool AddressSanitizer::runOnFunction(Function &F) {
- if (BL->isIn(F)) return false;
if (&F == AsanCtorFunction) return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
@@ -1226,28 +1373,35 @@ bool AddressSanitizer::runOnFunction(Function &F) {
SmallSet<Value*, 16> TempsToInstrument;
SmallVector<Instruction*, 16> ToInstrument;
SmallVector<Instruction*, 8> NoReturnCalls;
+ SmallVector<BasicBlock*, 16> AllBlocks;
+ SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0;
bool IsWrite;
+ unsigned Alignment;
// Fill the set of memory operations to instrument.
- for (Function::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI) {
+ for (auto &BB : F) {
+ AllBlocks.push_back(&BB);
TempsToInstrument.clear();
int NumInsnsPerBB = 0;
- for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ++BI) {
- if (LooksLikeCodeInBug11395(BI)) return false;
- if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) {
+ for (auto &Inst : BB) {
+ if (LooksLikeCodeInBug11395(&Inst)) return false;
+ if (Value *Addr =
+ isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB.
}
- } else if (isa<MemIntrinsic>(BI) && ClMemIntrin) {
+ } else if (ClInvalidPointerPairs &&
+ isInterestingPointerComparisonOrSubtraction(&Inst)) {
+ PointerComparisonsOrSubtracts.push_back(&Inst);
+ continue;
+ } else if (isa<MemIntrinsic>(Inst)) {
// ok, take it.
} else {
- if (isa<AllocaInst>(BI))
+ if (isa<AllocaInst>(Inst))
NumAllocas++;
- CallSite CS(BI);
+ CallSite CS(&Inst);
if (CS) {
// A call inside BB.
TempsToInstrument.clear();
@@ -1256,14 +1410,14 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
continue;
}
- ToInstrument.push_back(BI);
+ ToInstrument.push_back(&Inst);
NumInsnsPerBB++;
if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
break;
}
}
- Function *UninstrumentedDuplicate = 0;
+ Function *UninstrumentedDuplicate = nullptr;
bool LikelyToInstrument =
!NoReturnCalls.empty() || !ToInstrument.empty() || (NumAllocas > 0);
if (ClKeepUninstrumented && LikelyToInstrument) {
@@ -1274,14 +1428,18 @@ bool AddressSanitizer::runOnFunction(Function &F) {
F.getParent()->getFunctionList().push_back(UninstrumentedDuplicate);
}
+ bool UseCalls = false;
+ if (ClInstrumentationWithCallsThreshold >= 0 &&
+ ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
+ UseCalls = true;
+
// Instrument.
int NumInstrumented = 0;
- for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
- Instruction *Inst = ToInstrument[i];
+ for (auto Inst : ToInstrument) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
- if (isInterestingMemoryAccess(Inst, &IsWrite))
- instrumentMop(Inst);
+ if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
+ instrumentMop(Inst, UseCalls);
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
@@ -1293,15 +1451,19 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// We must unpoison the stack before every NoReturn call (throw, _exit, etc).
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
- for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
- Instruction *CI = NoReturnCalls[i];
+ for (auto CI : NoReturnCalls) {
IRBuilder<> IRB(CI);
IRB.CreateCall(AsanHandleNoReturnFunc);
}
+ for (auto Inst : PointerComparisonsOrSubtracts) {
+ instrumentPointerComparisonOrSubtraction(Inst);
+ NumInstrumented++;
+ }
+
bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
- if (InjectCoverage(F))
+ if (InjectCoverage(F, AllBlocks))
res = true;
DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
@@ -1323,32 +1485,6 @@ bool AddressSanitizer::runOnFunction(Function &F) {
return res;
}
-static uint64_t ValueForPoison(uint64_t PoisonByte, size_t ShadowRedzoneSize) {
- if (ShadowRedzoneSize == 1) return PoisonByte;
- if (ShadowRedzoneSize == 2) return (PoisonByte << 8) + PoisonByte;
- if (ShadowRedzoneSize == 4)
- return (PoisonByte << 24) + (PoisonByte << 16) +
- (PoisonByte << 8) + (PoisonByte);
- llvm_unreachable("ShadowRedzoneSize is either 1, 2 or 4");
-}
-
-static void PoisonShadowPartialRightRedzone(uint8_t *Shadow,
- size_t Size,
- size_t RZSize,
- size_t ShadowGranularity,
- uint8_t Magic) {
- for (size_t i = 0; i < RZSize;
- i+= ShadowGranularity, Shadow++) {
- if (i + ShadowGranularity <= Size) {
- *Shadow = 0; // fully addressable
- } else if (i >= Size) {
- *Shadow = Magic; // unaddressable
- } else {
- *Shadow = Size - i; // first Size-i bytes are addressable
- }
- }
-}
-
// Workaround for bug 11395: we don't want to instrument stack in functions
// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
// FIXME: remove once the bug 11395 is fixed.
@@ -1378,65 +1514,31 @@ void FunctionStackPoisoner::initializeCallbacks(Module &M) {
kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
}
-void FunctionStackPoisoner::poisonRedZones(
- const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> &IRB, Value *ShadowBase,
- bool DoPoison) {
- size_t ShadowRZSize = RedzoneSize() >> Mapping.Scale;
- assert(ShadowRZSize >= 1 && ShadowRZSize <= 4);
- Type *RZTy = Type::getIntNTy(*C, ShadowRZSize * 8);
- Type *RZPtrTy = PointerType::get(RZTy, 0);
-
- Value *PoisonLeft = ConstantInt::get(RZTy,
- ValueForPoison(DoPoison ? kAsanStackLeftRedzoneMagic : 0LL, ShadowRZSize));
- Value *PoisonMid = ConstantInt::get(RZTy,
- ValueForPoison(DoPoison ? kAsanStackMidRedzoneMagic : 0LL, ShadowRZSize));
- Value *PoisonRight = ConstantInt::get(RZTy,
- ValueForPoison(DoPoison ? kAsanStackRightRedzoneMagic : 0LL, ShadowRZSize));
-
- // poison the first red zone.
- IRB.CreateStore(PoisonLeft, IRB.CreateIntToPtr(ShadowBase, RZPtrTy));
-
- // poison all other red zones.
- uint64_t Pos = RedzoneSize();
- for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
- AllocaInst *AI = AllocaVec[i];
- uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
- uint64_t AlignedSize = getAlignedAllocaSize(AI);
- assert(AlignedSize - SizeInBytes < RedzoneSize());
- Value *Ptr = NULL;
-
- Pos += AlignedSize;
-
- assert(ShadowBase->getType() == IntptrTy);
- if (SizeInBytes < AlignedSize) {
- // Poison the partial redzone at right
- Ptr = IRB.CreateAdd(
- ShadowBase, ConstantInt::get(IntptrTy,
- (Pos >> Mapping.Scale) - ShadowRZSize));
- size_t AddressableBytes = RedzoneSize() - (AlignedSize - SizeInBytes);
- uint32_t Poison = 0;
- if (DoPoison) {
- PoisonShadowPartialRightRedzone((uint8_t*)&Poison, AddressableBytes,
- RedzoneSize(),
- 1ULL << Mapping.Scale,
- kAsanStackPartialRedzoneMagic);
- Poison =
- ASan.TD->isLittleEndian()
- ? support::endian::byte_swap<uint32_t, support::little>(Poison)
- : support::endian::byte_swap<uint32_t, support::big>(Poison);
+void
+FunctionStackPoisoner::poisonRedZones(const ArrayRef<uint8_t> ShadowBytes,
+ IRBuilder<> &IRB, Value *ShadowBase,
+ bool DoPoison) {
+ size_t n = ShadowBytes.size();
+ size_t i = 0;
+ // We need to (un)poison n bytes of stack shadow. Poison as many as we can
+ // using 64-bit stores (if we are on 64-bit arch), then poison the rest
+ // with 32-bit stores, then with 16-byte stores, then with 8-byte stores.
+ for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8;
+ LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) {
+ for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
+ uint64_t Val = 0;
+ for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
+ if (ASan.DL->isLittleEndian())
+ Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
+ else
+ Val = (Val << 8) | ShadowBytes[i + j];
}
- Value *PartialPoison = ConstantInt::get(RZTy, Poison);
- IRB.CreateStore(PartialPoison, IRB.CreateIntToPtr(Ptr, RZPtrTy));
+ if (!Val) continue;
+ Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
+ Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8);
+ Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0);
+ IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo()));
}
-
- // Poison the full redzone at right.
- Ptr = IRB.CreateAdd(ShadowBase,
- ConstantInt::get(IntptrTy, Pos >> Mapping.Scale));
- bool LastAlloca = (i == AllocaVec.size() - 1);
- Value *Poison = LastAlloca ? PoisonRight : PoisonMid;
- IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, RZPtrTy));
-
- Pos += RedzoneSize();
}
}
@@ -1467,25 +1569,47 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
}
}
-void FunctionStackPoisoner::poisonStack() {
- uint64_t LocalStackSize = TotalStackSize +
- (AllocaVec.size() + 1) * RedzoneSize();
+static DebugLoc getFunctionEntryDebugLocation(Function &F) {
+ for (const auto &Inst : F.getEntryBlock())
+ if (!isa<AllocaInst>(Inst))
+ return Inst.getDebugLoc();
+ return DebugLoc();
+}
- bool DoStackMalloc = ASan.CheckUseAfterReturn
- && LocalStackSize <= kMaxStackMallocSize;
+void FunctionStackPoisoner::poisonStack() {
int StackMallocIdx = -1;
+ DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
assert(AllocaVec.size() > 0);
Instruction *InsBefore = AllocaVec[0];
IRBuilder<> IRB(InsBefore);
-
+ IRB.SetCurrentDebugLocation(EntryDebugLocation);
+
+ SmallVector<ASanStackVariableDescription, 16> SVD;
+ SVD.reserve(AllocaVec.size());
+ for (AllocaInst *AI : AllocaVec) {
+ ASanStackVariableDescription D = { AI->getName().data(),
+ getAllocaSizeInBytes(AI),
+ AI->getAlignment(), AI, 0};
+ SVD.push_back(D);
+ }
+ // Minimal header size (left redzone) is 4 pointers,
+ // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
+ size_t MinHeaderSize = ASan.LongSize / 2;
+ ASanStackFrameLayout L;
+ ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
+ DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
+ uint64_t LocalStackSize = L.FrameSize;
+ bool DoStackMalloc =
+ ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
AllocaInst *MyAlloca =
new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore);
- if (ClRealignStack && StackAlignment < RedzoneSize())
- StackAlignment = RedzoneSize();
- MyAlloca->setAlignment(StackAlignment);
+ MyAlloca->setDebugLoc(EntryDebugLocation);
+ assert((ClRealignStack & (ClRealignStack - 1)) == 0);
+ size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
+ MyAlloca->setAlignment(FrameAlignment);
assert(MyAlloca->isStaticAlloca());
Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy);
Value *LocalStackBase = OrigStackBase;
@@ -1500,30 +1624,25 @@ void FunctionStackPoisoner::poisonStack() {
kAsanOptionDetectUAR, IRB.getInt32Ty());
Value *Cmp = IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR),
Constant::getNullValue(IRB.getInt32Ty()));
- Instruction *Term =
- SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
+ Instruction *Term = SplitBlockAndInsertIfThen(Cmp, InsBefore, false);
BasicBlock *CmpBlock = cast<Instruction>(Cmp)->getParent();
IRBuilder<> IRBIf(Term);
+ IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
LocalStackBase = IRBIf.CreateCall2(
AsanStackMallocFunc[StackMallocIdx],
ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
BasicBlock *SetBlock = cast<Instruction>(LocalStackBase)->getParent();
IRB.SetInsertPoint(InsBefore);
+ IRB.SetCurrentDebugLocation(EntryDebugLocation);
PHINode *Phi = IRB.CreatePHI(IntptrTy, 2);
Phi->addIncoming(OrigStackBase, CmpBlock);
Phi->addIncoming(LocalStackBase, SetBlock);
LocalStackBase = Phi;
}
- // This string will be parsed by the run-time (DescribeAddressIfStack).
- SmallString<2048> StackDescriptionStorage;
- raw_svector_ostream StackDescription(StackDescriptionStorage);
- StackDescription << AllocaVec.size() << " ";
-
// Insert poison calls for lifetime intrinsics for alloca.
bool HavePoisonedAllocas = false;
- for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) {
- const AllocaPoisonCall &APC = AllocaPoisonCallVec[i];
+ for (const auto &APC : AllocaPoisonCallVec) {
assert(APC.InsBefore);
assert(APC.AI);
IRBuilder<> IRB(APC.InsBefore);
@@ -1531,24 +1650,15 @@ void FunctionStackPoisoner::poisonStack() {
HavePoisonedAllocas |= APC.DoPoison;
}
- uint64_t Pos = RedzoneSize();
// Replace Alloca instructions with base+offset.
- for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
- AllocaInst *AI = AllocaVec[i];
- uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
- StringRef Name = AI->getName();
- StackDescription << Pos << " " << SizeInBytes << " "
- << Name.size() << " " << Name << " ";
- uint64_t AlignedSize = getAlignedAllocaSize(AI);
- assert((AlignedSize % RedzoneSize()) == 0);
+ for (const auto &Desc : SVD) {
+ AllocaInst *AI = Desc.AI;
Value *NewAllocaPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Pos)),
- AI->getType());
+ IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
+ AI->getType());
replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
AI->replaceAllUsesWith(NewAllocaPtr);
- Pos += AlignedSize + RedzoneSize();
}
- assert(Pos == LocalStackSize);
// The left-most redzone has enough space for at least 4 pointers.
// Write the Magic value to redzone[0].
@@ -1560,7 +1670,8 @@ void FunctionStackPoisoner::poisonStack() {
IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)),
IntptrPtrTy);
GlobalVariable *StackDescriptionGlobal =
- createPrivateGlobalForString(*F.getParent(), StackDescription.str());
+ createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
+ /*AllowMerging*/true);
Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal,
IntptrTy);
IRB.CreateStore(Description, BasePlus1);
@@ -1573,30 +1684,32 @@ void FunctionStackPoisoner::poisonStack() {
// Poison the stack redzones at the entry.
Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
- poisonRedZones(AllocaVec, IRB, ShadowBase, true);
+ poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
- // Unpoison the stack before all ret instructions.
- for (size_t i = 0, n = RetVec.size(); i < n; i++) {
- Instruction *Ret = RetVec[i];
+ // (Un)poison the stack before all ret instructions.
+ for (auto Ret : RetVec) {
IRBuilder<> IRBRet(Ret);
// Mark the current frame as retired.
IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
BasePlus0);
- // Unpoison the stack.
- poisonRedZones(AllocaVec, IRBRet, ShadowBase, false);
if (DoStackMalloc) {
assert(StackMallocIdx >= 0);
- // In use-after-return mode, mark the whole stack frame unaddressable.
+ // if LocalStackBase != OrigStackBase:
+ // // In use-after-return mode, poison the whole stack frame.
+ // if StackMallocIdx <= 4
+ // // For small sizes inline the whole thing:
+ // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
+ // **SavedFlagPtr(LocalStackBase) = 0
+ // else
+ // __asan_stack_free_N(LocalStackBase, OrigStackBase)
+ // else
+ // <This is not a fake stack; unpoison the redzones>
+ Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase);
+ TerminatorInst *ThenTerm, *ElseTerm;
+ SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
+
+ IRBuilder<> IRBPoison(ThenTerm);
if (StackMallocIdx <= 4) {
- // For small sizes inline the whole thing:
- // if LocalStackBase != OrigStackBase:
- // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
- // **SavedFlagPtr(LocalStackBase) = 0
- // FIXME: if LocalStackBase != OrigStackBase don't call poisonRedZones.
- Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase);
- TerminatorInst *PoisonTerm =
- SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false);
- IRBuilder<> IRBPoison(PoisonTerm);
int ClassSize = kMinStackMallocSize << StackMallocIdx;
SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase,
ClassSize >> Mapping.Scale);
@@ -1610,21 +1723,26 @@ void FunctionStackPoisoner::poisonStack() {
IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
} else {
// For larger frames call __asan_stack_free_*.
- IRBRet.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase,
- ConstantInt::get(IntptrTy, LocalStackSize),
- OrigStackBase);
+ IRBPoison.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase,
+ ConstantInt::get(IntptrTy, LocalStackSize),
+ OrigStackBase);
}
+
+ IRBuilder<> IRBElse(ElseTerm);
+ poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false);
} else if (HavePoisonedAllocas) {
// If we poisoned some allocas in llvm.lifetime analysis,
// unpoison whole stack frame now.
assert(LocalStackBase == OrigStackBase);
poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
+ } else {
+ poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false);
}
}
// We are done. Remove the old unused alloca instructions.
- for (size_t i = 0, n = AllocaVec.size(); i < n; i++)
- AllocaVec[i]->eraseFromParent();
+ for (auto AI : AllocaVec)
+ AI->eraseFromParent();
}
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
@@ -1649,7 +1767,7 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
// We're intested only in allocas we can handle.
- return isInterestingAlloca(*AI) ? AI : 0;
+ return isInterestingAlloca(*AI) ? AI : nullptr;
// See if we've already calculated (or started to calculate) alloca for a
// given value.
AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
@@ -1657,8 +1775,8 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
return I->second;
// Store 0 while we're calculating alloca for value V to avoid
// infinite recursion if the value references itself.
- AllocaForValue[V] = 0;
- AllocaInst *Res = 0;
+ AllocaForValue[V] = nullptr;
+ AllocaInst *Res = nullptr;
if (CastInst *CI = dyn_cast<CastInst>(V))
Res = findAllocaForValue(CI->getOperand(0));
else if (PHINode *PN = dyn_cast<PHINode>(V)) {
@@ -1668,12 +1786,12 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (IncValue == PN) continue;
AllocaInst *IncValueAI = findAllocaForValue(IncValue);
// AI for incoming values should exist and should all be equal.
- if (IncValueAI == 0 || (Res != 0 && IncValueAI != Res))
- return 0;
+ if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
+ return nullptr;
Res = IncValueAI;
}
}
- if (Res != 0)
+ if (Res)
AllocaForValue[V] = Res;
return Res;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 7a9f0f6..9a5cea8 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -12,22 +12,23 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "bounds-checking"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/TargetFolder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/TargetFolder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "bounds-checking"
+
static cl::opt<bool> SingleTrapBB("bounds-checking-single-trap",
cl::desc("Use one trap block per function"));
@@ -45,15 +46,15 @@ namespace {
initializeBoundsCheckingPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DataLayout>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetLibraryInfo>();
}
private:
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder;
@@ -61,9 +62,7 @@ namespace {
BasicBlock *TrapBB;
BasicBlock *getTrapBB();
- void emitBranchToTrap(Value *Cmp = 0);
- bool computeAllocSize(Value *Ptr, APInt &Offset, Value* &OffsetValue,
- APInt &Size, Value* &SizeValue);
+ void emitBranchToTrap(Value *Cmp = nullptr);
bool instrument(Value *Ptr, Value *Val);
};
}
@@ -105,7 +104,7 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
if (!C->getZExtValue())
return;
else
- Cmp = 0; // unconditional branch
+ Cmp = nullptr; // unconditional branch
}
++ChecksAdded;
@@ -127,7 +126,7 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
/// size of memory block that is touched.
/// Returns true if any change was made to the IR, false otherwise.
bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
- uint64_t NeededSize = TD->getTypeStoreSize(InstVal->getType());
+ uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());
DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
<< " bytes\n");
@@ -142,7 +141,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- Type *IntTy = TD->getIntPtrType(Ptr->getType());
+ Type *IntTy = DL->getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
@@ -166,13 +165,13 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
- TD = &getAnalysis<DataLayout>();
+ DL = &getAnalysis<DataLayoutPass>().getDataLayout();
TLI = &getAnalysis<TargetLibraryInfo>();
- TrapBB = 0;
- BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));
+ TrapBB = nullptr;
+ BuilderTy TheBuilder(F.getContext(), TargetFolder(DL));
Builder = &TheBuilder;
- ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext(),
+ ObjectSizeOffsetEvaluator TheObjSizeEval(DL, TLI, F.getContext(),
/*RoundToAlign=*/true);
ObjSizeEval = &TheObjSizeEval;
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 9b9e725..35057cd 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -50,19 +50,23 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
+#include <algorithm>
#include <iterator>
+#include <set>
+#include <utility>
using namespace llvm;
@@ -96,6 +100,22 @@ static cl::opt<bool> ClArgsABI(
cl::desc("Use the argument ABI rather than the TLS ABI"),
cl::Hidden);
+// Controls whether the pass includes or ignores the labels of pointers in load
+// instructions.
+static cl::opt<bool> ClCombinePointerLabelsOnLoad(
+ "dfsan-combine-pointer-labels-on-load",
+ cl::desc("Combine the label of the pointer with the label of the data when "
+ "loading from memory."),
+ cl::Hidden, cl::init(true));
+
+// Controls whether the pass includes or ignores the labels of pointers in
+// stores instructions.
+static cl::opt<bool> ClCombinePointerLabelsOnStore(
+ "dfsan-combine-pointer-labels-on-store",
+ cl::desc("Combine the label of the pointer with the label of the data when "
+ "storing in memory."),
+ cl::Hidden, cl::init(false));
+
static cl::opt<bool> ClDebugNonzeroLabels(
"dfsan-debug-nonzero-labels",
cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
@@ -104,6 +124,51 @@ static cl::opt<bool> ClDebugNonzeroLabels(
namespace {
+StringRef GetGlobalTypeString(const GlobalValue &G) {
+ // Types of GlobalVariables are always pointer types.
+ Type *GType = G.getType()->getElementType();
+ // For now we support blacklisting struct types only.
+ if (StructType *SGType = dyn_cast<StructType>(GType)) {
+ if (!SGType->isLiteral())
+ return SGType->getName();
+ }
+ return "<unknown type>";
+}
+
+class DFSanABIList {
+ std::unique_ptr<SpecialCaseList> SCL;
+
+ public:
+ DFSanABIList(SpecialCaseList *SCL) : SCL(SCL) {}
+
+ /// Returns whether either this function or its source file are listed in the
+ /// given category.
+ bool isIn(const Function &F, const StringRef Category) const {
+ return isIn(*F.getParent(), Category) ||
+ SCL->inSection("fun", F.getName(), Category);
+ }
+
+ /// Returns whether this global alias is listed in the given category.
+ ///
+ /// If GA aliases a function, the alias's name is matched as a function name
+ /// would be. Similarly, aliases of globals are matched like globals.
+ bool isIn(const GlobalAlias &GA, const StringRef Category) const {
+ if (isIn(*GA.getParent(), Category))
+ return true;
+
+ if (isa<FunctionType>(GA.getType()->getElementType()))
+ return SCL->inSection("fun", GA.getName(), Category);
+
+ return SCL->inSection("global", GA.getName(), Category) ||
+ SCL->inSection("type", GetGlobalTypeString(GA), Category);
+ }
+
+ /// Returns whether this module is listed in the given category.
+ bool isIn(const Module &M, const StringRef Category) const {
+ return SCL->inSection("src", M.getModuleIdentifier(), Category);
+ }
+};
+
class DataFlowSanitizer : public ModulePass {
friend struct DFSanFunction;
friend class DFSanVisitor;
@@ -148,7 +213,7 @@ class DataFlowSanitizer : public ModulePass {
WK_Custom
};
- DataLayout *DL;
+ const DataLayout *DL;
Module *Mod;
LLVMContext *Ctx;
IntegerType *ShadowTy;
@@ -174,12 +239,11 @@ class DataFlowSanitizer : public ModulePass {
Constant *DFSanSetLabelFn;
Constant *DFSanNonzeroLabelFn;
MDNode *ColdCallWeights;
- OwningPtr<SpecialCaseList> ABIList;
+ DFSanABIList ABIList;
DenseMap<Value *, Function *> UnwrappedFnMap;
AttributeSet ReadOnlyNoneAttrs;
Value *getShadowAddress(Value *Addr, Instruction *Pos);
- Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
bool isInstrumented(const Function *F);
bool isInstrumented(const GlobalAlias *GA);
FunctionType *getArgsFunctionType(FunctionType *T);
@@ -195,15 +259,17 @@ class DataFlowSanitizer : public ModulePass {
public:
DataFlowSanitizer(StringRef ABIListFile = StringRef(),
- void *(*getArgTLS)() = 0, void *(*getRetValTLS)() = 0);
+ void *(*getArgTLS)() = nullptr,
+ void *(*getRetValTLS)() = nullptr);
static char ID;
- bool doInitialization(Module &M);
- bool runOnModule(Module &M);
+ bool doInitialization(Module &M) override;
+ bool runOnModule(Module &M) override;
};
struct DFSanFunction {
DataFlowSanitizer &DFS;
Function *F;
+ DominatorTree DT;
DataFlowSanitizer::InstrumentedABI IA;
bool IsNativeABI;
Value *ArgTLSPtr;
@@ -215,15 +281,26 @@ struct DFSanFunction {
DenseSet<Instruction *> SkipInsts;
DenseSet<Value *> NonZeroChecks;
+ struct CachedCombinedShadow {
+ BasicBlock *Block;
+ Value *Shadow;
+ };
+ DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
+ CachedCombinedShadows;
+ DenseMap<Value *, std::set<Value *>> ShadowElements;
+
DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
: DFS(DFS), F(F), IA(DFS.getInstrumentedABI()),
- IsNativeABI(IsNativeABI), ArgTLSPtr(0), RetvalTLSPtr(0),
- LabelReturnAlloca(0) {}
+ IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr),
+ LabelReturnAlloca(nullptr) {
+ DT.recalculate(*F);
+ }
Value *getArgTLSPtr();
Value *getArgTLS(unsigned Index, Instruction *Pos);
Value *getRetvalTLS();
Value *getShadow(Value *V);
void setShadow(Instruction *I, Value *Shadow);
+ Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
Value *combineOperandShadows(Instruction *Inst);
Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
Instruction *Pos);
@@ -287,7 +364,7 @@ FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
ArgTypes.push_back(ShadowPtrTy);
Type *RetType = T->getReturnType();
if (!RetType->isVoidTy())
- RetType = StructType::get(RetType, ShadowTy, (Type *)0);
+ RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr);
return FunctionType::get(RetType, ArgTypes, T->isVarArg());
}
@@ -327,9 +404,10 @@ FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
}
bool DataFlowSanitizer::doInitialization(Module &M) {
- DL = getAnalysisIfAvailable<DataLayout>();
- if (!DL)
- return false;
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP)
+ report_fatal_error("data layout missing");
+ DL = &DLP->getDataLayout();
Mod = &M;
Ctx = &M.getContext();
@@ -356,18 +434,20 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
if (GetArgTLSPtr) {
Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
- ArgTLS = 0;
+ ArgTLS = nullptr;
GetArgTLS = ConstantExpr::getIntToPtr(
ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)),
PointerType::getUnqual(
- FunctionType::get(PointerType::getUnqual(ArgTLSTy), (Type *)0)));
+ FunctionType::get(PointerType::getUnqual(ArgTLSTy),
+ (Type *)nullptr)));
}
if (GetRetvalTLSPtr) {
- RetvalTLS = 0;
+ RetvalTLS = nullptr;
GetRetvalTLS = ConstantExpr::getIntToPtr(
ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)),
PointerType::getUnqual(
- FunctionType::get(PointerType::getUnqual(ShadowTy), (Type *)0)));
+ FunctionType::get(PointerType::getUnqual(ShadowTy),
+ (Type *)nullptr)));
}
ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
@@ -375,11 +455,11 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
}
bool DataFlowSanitizer::isInstrumented(const Function *F) {
- return !ABIList->isIn(*F, "uninstrumented");
+ return !ABIList.isIn(*F, "uninstrumented");
}
bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
- return !ABIList->isIn(*GA, "uninstrumented");
+ return !ABIList.isIn(*GA, "uninstrumented");
}
DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
@@ -387,11 +467,11 @@ DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
}
DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
- if (ABIList->isIn(*F, "functional"))
+ if (ABIList.isIn(*F, "functional"))
return WK_Functional;
- if (ABIList->isIn(*F, "discard"))
+ if (ABIList.isIn(*F, "discard"))
return WK_Discard;
- if (ABIList->isIn(*F, "custom"))
+ if (ABIList.isIn(*F, "custom"))
return WK_Custom;
return WK_Warning;
@@ -480,7 +560,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
if (!DL)
return false;
- if (ABIList->isIn(M, "skip"))
+ if (ABIList.isIn(M, "skip"))
return false;
if (!GetArgTLSPtr) {
@@ -505,6 +585,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
DFSanUnionLoadFn =
Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy);
if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) {
+ F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
}
DFSanUnimplementedFn =
@@ -536,8 +617,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
++i;
// Don't stop on weak. We assume people aren't playing games with the
// instrumentedness of overridden weak aliases.
- if (Function *F = dyn_cast<Function>(
- GA->resolveAliasedGlobal(/*stopOnWeak=*/false))) {
+ if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
if (GAInst && FInst) {
addGlobalNamePrefix(GA);
@@ -547,7 +627,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// below will take care of instrumenting it.
Function *NewF =
buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
- GA->replaceAllUsesWith(NewF);
+ GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
NewF->takeName(GA);
GA->eraseFromParent();
FnsToInstrument.push_back(NewF);
@@ -589,10 +669,10 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
}
NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
- for (Function::use_iterator ui = F.use_begin(), ue = F.use_end();
- ui != ue;) {
- BlockAddress *BA = dyn_cast<BlockAddress>(ui.getUse().getUser());
- ++ui;
+ for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
+ UI != UE;) {
+ BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
+ ++UI;
if (BA) {
BA->replaceAllUsesWith(
BlockAddress::get(NewF, BA->getBasicBlock()));
@@ -612,7 +692,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// function... yet.
} else if (FT->isVarArg()) {
UnwrappedFnMap[&F] = &F;
- *i = 0;
+ *i = nullptr;
} else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
// Build a wrapper function for F. The wrapper simply calls F, and is
// added to FnsToInstrument so that any instrumentation according to its
@@ -663,9 +743,8 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// DFSanVisitor may create new basic blocks, which confuses df_iterator.
// Build a copy of the list before iterating over it.
- llvm::SmallVector<BasicBlock *, 4> BBList;
- std::copy(df_begin(&(*i)->getEntryBlock()), df_end(&(*i)->getEntryBlock()),
- std::back_inserter(BBList));
+ llvm::SmallVector<BasicBlock *, 4> BBList(
+ depth_first(&(*i)->getEntryBlock()));
for (llvm::SmallVector<BasicBlock *, 4>::iterator i = BBList.begin(),
e = BBList.end();
@@ -718,10 +797,9 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
Pos = Pos->getNextNode();
IRBuilder<> IRB(Pos);
- Instruction *NeInst = cast<Instruction>(
- IRB.CreateICmpNE(*i, DFSF.DFS.ZeroShadow));
+ Value *Ne = IRB.CreateICmpNE(*i, DFSF.DFS.ZeroShadow);
BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
- NeInst, /*Unreachable=*/ false, ColdCallWeights));
+ Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
IRBuilder<> ThenIRB(BI);
ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn);
}
@@ -810,37 +888,72 @@ Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
// Generates IR to compute the union of the two given shadows, inserting it
// before Pos. Returns the computed union Value.
-Value *DataFlowSanitizer::combineShadows(Value *V1, Value *V2,
- Instruction *Pos) {
- if (V1 == ZeroShadow)
+Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
+ if (V1 == DFS.ZeroShadow)
return V2;
- if (V2 == ZeroShadow)
+ if (V2 == DFS.ZeroShadow)
return V1;
if (V1 == V2)
return V1;
+
+ auto V1Elems = ShadowElements.find(V1);
+ auto V2Elems = ShadowElements.find(V2);
+ if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
+ if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
+ V2Elems->second.begin(), V2Elems->second.end())) {
+ return V1;
+ } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
+ V1Elems->second.begin(), V1Elems->second.end())) {
+ return V2;
+ }
+ } else if (V1Elems != ShadowElements.end()) {
+ if (V1Elems->second.count(V2))
+ return V1;
+ } else if (V2Elems != ShadowElements.end()) {
+ if (V2Elems->second.count(V1))
+ return V2;
+ }
+
+ auto Key = std::make_pair(V1, V2);
+ if (V1 > V2)
+ std::swap(Key.first, Key.second);
+ CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
+ if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
+ return CCS.Shadow;
+
IRBuilder<> IRB(Pos);
BasicBlock *Head = Pos->getParent();
Value *Ne = IRB.CreateICmpNE(V1, V2);
- Instruction *NeInst = dyn_cast<Instruction>(Ne);
- if (NeInst) {
- BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
- NeInst, /*Unreachable=*/ false, ColdCallWeights));
- IRBuilder<> ThenIRB(BI);
- CallInst *Call = ThenIRB.CreateCall2(DFSanUnionFn, V1, V2);
- Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
- Call->addAttribute(1, Attribute::ZExt);
- Call->addAttribute(2, Attribute::ZExt);
-
- BasicBlock *Tail = BI->getSuccessor(0);
- PHINode *Phi = PHINode::Create(ShadowTy, 2, "", Tail->begin());
- Phi->addIncoming(Call, Call->getParent());
- Phi->addIncoming(V1, Head);
- Pos = Phi;
- return Phi;
+ BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
+ Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
+ IRBuilder<> ThenIRB(BI);
+ CallInst *Call = ThenIRB.CreateCall2(DFS.DFSanUnionFn, V1, V2);
+ Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
+ Call->addAttribute(1, Attribute::ZExt);
+ Call->addAttribute(2, Attribute::ZExt);
+
+ BasicBlock *Tail = BI->getSuccessor(0);
+ PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", Tail->begin());
+ Phi->addIncoming(Call, Call->getParent());
+ Phi->addIncoming(V1, Head);
+
+ CCS.Block = Tail;
+ CCS.Shadow = Phi;
+
+ std::set<Value *> UnionElems;
+ if (V1Elems != ShadowElements.end()) {
+ UnionElems = V1Elems->second;
+ } else {
+ UnionElems.insert(V1);
+ }
+ if (V2Elems != ShadowElements.end()) {
+ UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
} else {
- assert(0 && "todo");
- return 0;
+ UnionElems.insert(V2);
}
+ ShadowElements[Phi] = std::move(UnionElems);
+
+ return Phi;
}
// A convenience function which folds the shadows of each of the operands
@@ -852,7 +965,7 @@ Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
Value *Shadow = getShadow(Inst->getOperand(0));
for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
- Shadow = DFS.combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
+ Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
}
return Shadow;
}
@@ -905,9 +1018,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> IRB(Pos);
Value *ShadowAddr1 =
IRB.CreateGEP(ShadowAddr, ConstantInt::get(DFS.IntptrTy, 1));
- return DFS.combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
- IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign),
- Pos);
+ return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
+ IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
}
}
if (Size % (64 / DFS.ShadowWidth) == 0) {
@@ -934,16 +1046,27 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
BasicBlock *Head = Pos->getParent();
BasicBlock *Tail = Head->splitBasicBlock(Pos);
+
+ if (DomTreeNode *OldNode = DT.getNode(Head)) {
+ std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
+
+ DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
+ for (auto Child : Children)
+ DT.changeImmediateDominator(Child, NewNode);
+ }
+
// In the following code LastBr will refer to the previous basic block's
// conditional branch instruction, whose true successor is fixed up to point
// to the next block during the loop below or to the tail after the final
// iteration.
BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
ReplaceInstWithInst(Head->getTerminator(), LastBr);
+ DT.addNewBlock(FallbackBB, Head);
for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
Ofs += 64 / DFS.ShadowWidth) {
BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
+ DT.addNewBlock(NextBB, LastBr->getParent());
IRBuilder<> NextIRB(NextBB);
WideAddr = NextIRB.CreateGEP(WideAddr, ConstantInt::get(DFS.IntptrTy, 1));
Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
@@ -978,14 +1101,15 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
Align = 1;
}
IRBuilder<> IRB(&LI);
- Value *LoadedShadow =
- DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
- Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
- Value *CombinedShadow = DFSF.DFS.combineShadows(LoadedShadow, PtrShadow, &LI);
- if (CombinedShadow != DFSF.DFS.ZeroShadow)
- DFSF.NonZeroChecks.insert(CombinedShadow);
-
- DFSF.setShadow(&LI, CombinedShadow);
+ Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
+ if (ClCombinePointerLabelsOnLoad) {
+ Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
+ Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
+ }
+ if (Shadow != DFSF.DFS.ZeroShadow)
+ DFSF.NonZeroChecks.insert(Shadow);
+
+ DFSF.setShadow(&LI, Shadow);
}
void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
@@ -1050,8 +1174,13 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
} else {
Align = 1;
}
- DFSF.storeShadow(SI.getPointerOperand(), Size, Align,
- DFSF.getShadow(SI.getValueOperand()), &SI);
+
+ Value* Shadow = DFSF.getShadow(SI.getValueOperand());
+ if (ClCombinePointerLabelsOnStore) {
+ Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
+ Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
+ }
+ DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
}
void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
@@ -1088,12 +1217,11 @@ void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
bool AllLoadsStores = true;
- for (Instruction::use_iterator i = I.use_begin(), e = I.use_end(); i != e;
- ++i) {
- if (isa<LoadInst>(*i))
+ for (User *U : I.users()) {
+ if (isa<LoadInst>(U))
continue;
- if (StoreInst *SI = dyn_cast<StoreInst>(*i)) {
+ if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getPointerOperand() == &I)
continue;
}
@@ -1115,9 +1243,9 @@ void DFSanVisitor::visitSelectInst(SelectInst &I) {
if (isa<VectorType>(I.getCondition()->getType())) {
DFSF.setShadow(
- &I, DFSF.DFS.combineShadows(
- CondShadow,
- DFSF.DFS.combineShadows(TrueShadow, FalseShadow, &I), &I));
+ &I,
+ DFSF.combineShadows(
+ CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I));
} else {
Value *ShadowSel;
if (TrueShadow == FalseShadow) {
@@ -1126,7 +1254,7 @@ void DFSanVisitor::visitSelectInst(SelectInst &I) {
ShadowSel =
SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
}
- DFSF.setShadow(&I, DFSF.DFS.combineShadows(CondShadow, ShadowSel, &I));
+ DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I));
}
}
@@ -1299,7 +1427,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
}
}
- Instruction *Next = 0;
+ Instruction *Next = nullptr;
if (!CS.getType()->isVoidTy()) {
if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
if (II->getNormalDest()->getSinglePredecessor()) {
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.cpp b/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.cpp
index f50a044..f2f1738 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.cpp
@@ -16,27 +16,23 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "debug-ir"
-
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/Assembly/AssemblyAnnotationWriter.h"
-#include "llvm/DebugInfo.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/InstVisitor.h"
+#include "llvm/IR/ValueMap.h"
+#include "DebugIR.h"
+#include "llvm/IR/AssemblyAnnotationWriter.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ToolOutputFile.h"
-#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/Path.h"
-
-#include "DebugIR.h"
-
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Utils/Cloning.h"
#include <string>
#define STR_HELPER(x) #x
@@ -44,6 +40,8 @@
using namespace llvm;
+#define DEBUG_TYPE "debug-ir"
+
namespace {
/// Builds a map of Value* to line numbers on which the Value appears in a
@@ -69,11 +67,12 @@ public:
// This function is called after an Instruction, GlobalValue, or GlobalAlias
// is printed.
- void printInfoComment(const Value &V, formatted_raw_ostream &Out) {
+ void printInfoComment(const Value &V, formatted_raw_ostream &Out) override {
addEntry(&V, Out);
}
- void emitFunctionAnnot(const Function *F, formatted_raw_ostream &Out) {
+ void emitFunctionAnnot(const Function *F,
+ formatted_raw_ostream &Out) override {
addEntry(F, Out);
}
@@ -119,7 +118,7 @@ public:
void visitInstruction(Instruction &I) {
if (I.getMetadata(LLVMContext::MD_dbg))
- I.setMetadata(LLVMContext::MD_dbg, 0);
+ I.setMetadata(LLVMContext::MD_dbg, nullptr);
}
void run(Module *M) {
@@ -169,11 +168,11 @@ class DIUpdater : public InstVisitor<DIUpdater> {
public:
DIUpdater(Module &M, StringRef Filename = StringRef(),
- StringRef Directory = StringRef(), const Module *DisplayM = 0,
- const ValueToValueMapTy *VMap = 0)
+ StringRef Directory = StringRef(), const Module *DisplayM = nullptr,
+ const ValueToValueMapTy *VMap = nullptr)
: Builder(M), Layout(&M), LineTable(DisplayM ? DisplayM : &M), VMap(VMap),
- Finder(), Filename(Filename), Directory(Directory), FileNode(0),
- LexicalBlockFileNode(0), CUNode(0) {
+ Finder(), Filename(Filename), Directory(Directory), FileNode(nullptr),
+ LexicalBlockFileNode(nullptr), CUNode(nullptr) {
Finder.processModule(M);
visit(&M);
}
@@ -184,8 +183,8 @@ public:
if (Finder.compile_unit_count() > 1)
report_fatal_error("DebugIR pass supports only a signle compile unit per "
"Module.");
- createCompileUnit(
- Finder.compile_unit_count() == 1 ? *Finder.compile_unit_begin() : 0);
+ createCompileUnit(Finder.compile_unit_count() == 1 ?
+ (MDNode*)*Finder.compile_units().begin() : nullptr);
}
void visitFunction(Function &F) {
@@ -233,7 +232,7 @@ public:
/// If a ValueToValueMap is provided, use it to get the real instruction as
/// the line table was generated on a clone of the module on which we are
/// operating.
- Value *RealInst = 0;
+ Value *RealInst = nullptr;
if (VMap)
RealInst = VMap->lookup(&I);
@@ -257,7 +256,7 @@ public:
NewLoc = DebugLoc::get(Line, Col, Loc.getScope(RealInst->getContext()),
Loc.getInlinedAt(RealInst->getContext()));
else if (MDNode *scope = findScope(&I))
- NewLoc = DebugLoc::get(Line, Col, scope, 0);
+ NewLoc = DebugLoc::get(Line, Col, scope, nullptr);
else {
DEBUG(dbgs() << "WARNING: no valid scope for instruction " << &I
<< ". no DebugLoc will be present."
@@ -326,19 +325,16 @@ private:
<< " subprogram nodes"
<< "\n");
- for (DebugInfoFinder::iterator i = Finder.subprogram_begin(),
- e = Finder.subprogram_end();
- i != e; ++i) {
- DISubprogram S(*i);
+ for (DISubprogram S : Finder.subprograms()) {
if (S.getFunction() == F) {
- DEBUG(dbgs() << "Found DISubprogram " << *i << " for function "
+ DEBUG(dbgs() << "Found DISubprogram " << S << " for function "
<< S.getFunction() << "\n");
- return *i;
+ return S;
}
}
DEBUG(dbgs() << "unable to find DISubprogram node for function "
<< F->getName().str() << "\n");
- return 0;
+ return nullptr;
}
/// Sets Line to the line number on which V appears and returns true. If a
@@ -358,7 +354,10 @@ private:
std::string getTypeName(Type *T) {
std::string TypeName;
raw_string_ostream TypeStream(TypeName);
- T->print(TypeStream);
+ if (T)
+ T->print(TypeStream);
+ else
+ TypeStream << "Printing <null> Type";
TypeStream.flush();
return TypeName;
}
@@ -370,7 +369,7 @@ private:
TypeNodeIter i = TypeDescriptors.find(T);
if (i != TypeDescriptors.end())
return i->second;
- return 0;
+ return nullptr;
}
/// Returns a DebugInfo type from an LLVM type T.
@@ -379,12 +378,12 @@ private:
if (N)
return DIDerivedType(N);
else if (T->isVoidTy())
- return DIDerivedType(0);
+ return DIDerivedType(nullptr);
else if (T->isStructTy()) {
N = Builder.createStructType(
DIScope(LexicalBlockFileNode), T->getStructName(), DIFile(FileNode),
0, Layout.getTypeSizeInBits(T), Layout.getABITypeAlignment(T), 0,
- DIType(0), DIArray(0)); // filled in later
+ DIType(nullptr), DIArray(nullptr)); // filled in later
// N is added to the map (early) so that element search below can find it,
// so as to avoid infinite recursion for structs that contain pointers to
@@ -504,7 +503,7 @@ bool DebugIR::updateExtension(StringRef NewExtension) {
return true;
}
-void DebugIR::generateFilename(OwningPtr<int> &fd) {
+void DebugIR::generateFilename(std::unique_ptr<int> &fd) {
SmallVector<char, 16> PathVec;
fd.reset(new int);
sys::fs::createTemporaryFile("debug-ir", "ll", *fd, PathVec);
@@ -525,12 +524,12 @@ std::string DebugIR::getPath() {
}
void DebugIR::writeDebugBitcode(const Module *M, int *fd) {
- OwningPtr<raw_fd_ostream> Out;
+ std::unique_ptr<raw_fd_ostream> Out;
std::string error;
if (!fd) {
std::string Path = getPath();
- Out.reset(new raw_fd_ostream(Path.c_str(), error));
+ Out.reset(new raw_fd_ostream(Path.c_str(), error, sys::fs::F_Text));
DEBUG(dbgs() << "WRITING debug bitcode from Module " << M << " to file "
<< Path << "\n");
} else {
@@ -539,16 +538,16 @@ void DebugIR::writeDebugBitcode(const Module *M, int *fd) {
Out.reset(new raw_fd_ostream(*fd, true));
}
- M->print(*Out, 0);
+ M->print(*Out, nullptr);
Out->close();
}
-void DebugIR::createDebugInfo(Module &M, OwningPtr<Module> &DisplayM) {
+void DebugIR::createDebugInfo(Module &M, std::unique_ptr<Module> &DisplayM) {
if (M.getFunctionList().size() == 0)
// no functions -- no debug info needed
return;
- OwningPtr<ValueToValueMapTy> VMap;
+ std::unique_ptr<ValueToValueMapTy> VMap;
if (WriteSourceToDisk && (HideDebugIntrinsics || HideDebugMetadata)) {
VMap.reset(new ValueToValueMapTy);
@@ -567,7 +566,7 @@ void DebugIR::createDebugInfo(Module &M, OwningPtr<Module> &DisplayM) {
bool DebugIR::isMissingPath() { return Filename.empty() || Directory.empty(); }
bool DebugIR::runOnModule(Module &M) {
- OwningPtr<int> fd;
+ std::unique_ptr<int> fd;
if (isMissingPath() && !getSourceInfo(M)) {
if (!WriteSourceToDisk)
@@ -586,7 +585,7 @@ bool DebugIR::runOnModule(Module &M) {
// file name from the DICompileUnit descriptor.
DebugMetadataRemover::process(M, !ParsedPath);
- OwningPtr<Module> DisplayM;
+ std::unique_ptr<Module> DisplayM;
createDebugInfo(M, DisplayM);
if (WriteSourceToDisk) {
Module *OutputM = DisplayM.get() ? DisplayM.get() : &M;
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.h b/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.h
index 13774cf..02831ed 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.h
+++ b/contrib/llvm/lib/Transforms/Instrumentation/DebugIR.h
@@ -16,7 +16,6 @@
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Pass.h"
namespace llvm {
@@ -43,7 +42,7 @@ class DebugIR : public llvm::ModulePass {
public:
static char ID;
- const char *getPassName() const { return "DebugIR"; }
+ const char *getPassName() const override { return "DebugIR"; }
/// Generate a file on disk to be displayed in a debugger. If Filename and
/// Directory are empty, a temporary path will be generated.
@@ -62,7 +61,7 @@ public:
/// Run pass on M and set Path to the source file path in the output module.
bool runOnModule(llvm::Module &M, std::string &Path);
- bool runOnModule(llvm::Module &M);
+ bool runOnModule(llvm::Module &M) override;
private:
@@ -79,11 +78,11 @@ private:
bool updateExtension(llvm::StringRef NewExtension);
/// Generate a temporary filename and open an fd
- void generateFilename(llvm::OwningPtr<int> &fd);
+ void generateFilename(std::unique_ptr<int> &fd);
/// Creates DWARF CU/Subroutine metadata
void createDebugInfo(llvm::Module &M,
- llvm::OwningPtr<llvm::Module> &DisplayM);
+ std::unique_ptr<llvm::Module> &DisplayM);
/// Returns true if either Directory or Filename is missing, false otherwise.
bool isMissingPath();
@@ -91,7 +90,7 @@ private:
/// Write M to disk, optionally passing in an fd to an open file which is
/// closed by this function after writing. If no fd is specified, a new file
/// is opened, written, and closed.
- void writeDebugBitcode(const llvm::Module *M, int *fd = 0);
+ void writeDebugBitcode(const llvm::Module *M, int *fd = nullptr);
};
} // llvm namespace
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 206bffb..cfeb62e 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -14,33 +14,36 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "insert-gcov-profiling"
-
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/UniqueVector.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/DebugLoc.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <algorithm>
+#include <memory>
#include <string>
#include <utility>
using namespace llvm;
+#define DEBUG_TYPE "insert-gcov-profiling"
+
static cl::opt<std::string>
DefaultGCOVVersion("default-gcov-version", cl::init("402*"), cl::Hidden,
cl::ValueRequired);
@@ -62,20 +65,25 @@ GCOVOptions GCOVOptions::getDefault() {
}
namespace {
+ class GCOVFunction;
+
class GCOVProfiler : public ModulePass {
public:
static char ID;
GCOVProfiler() : ModulePass(ID), Options(GCOVOptions::getDefault()) {
- ReversedVersion[0] = Options.Version[3];
- ReversedVersion[1] = Options.Version[2];
- ReversedVersion[2] = Options.Version[1];
- ReversedVersion[3] = Options.Version[0];
- ReversedVersion[4] = '\0';
- initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
+ init();
}
GCOVProfiler(const GCOVOptions &Options) : ModulePass(ID), Options(Options){
assert((Options.EmitNotes || Options.EmitData) &&
"GCOVProfiler asked to do nothing?");
+ init();
+ }
+ const char *getPassName() const override {
+ return "GCOV Profiler";
+ }
+
+ private:
+ void init() {
ReversedVersion[0] = Options.Version[3];
ReversedVersion[1] = Options.Version[2];
ReversedVersion[2] = Options.Version[1];
@@ -83,12 +91,7 @@ namespace {
ReversedVersion[4] = '\0';
initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
}
- virtual const char *getPassName() const {
- return "GCOV Profiler";
- }
-
- private:
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
// Create the .gcno files for the Module based on DebugInfo.
void emitProfileNotes();
@@ -130,10 +133,13 @@ namespace {
GCOVOptions Options;
// Reversed, NUL-terminated copy of Options.Version.
- char ReversedVersion[5];
+ char ReversedVersion[5];
+ // Checksum, produced by hash of EdgeDestinations
+ SmallVector<uint32_t, 4> FileChecksums;
Module *M;
LLVMContext *Ctx;
+ SmallVector<std::unique_ptr<GCOVFunction>, 16> Funcs;
};
}
@@ -145,7 +151,7 @@ ModulePass *llvm::createGCOVProfilerPass(const GCOVOptions &Options) {
return new GCOVProfiler(Options);
}
-static std::string getFunctionName(DISubprogram SP) {
+static StringRef getFunctionName(DISubprogram SP) {
if (!SP.getLinkageName().empty())
return SP.getLinkageName();
return SP.getName();
@@ -205,6 +211,7 @@ namespace {
class GCOVLines : public GCOVRecord {
public:
void addLine(uint32_t Line) {
+ assert(Line != 0 && "Line zero is not a valid real line number.");
Lines.push_back(Line);
}
@@ -220,7 +227,7 @@ namespace {
write(Lines[i]);
}
- GCOVLines(StringRef F, raw_ostream *os)
+ GCOVLines(StringRef F, raw_ostream *os)
: Filename(F) {
this->os = os;
}
@@ -231,14 +238,6 @@ namespace {
};
- // Sorting function for deterministic behaviour in GCOVBlock::writeOut.
- struct StringKeySort {
- bool operator()(StringMapEntry<GCOVLines *> *LHS,
- StringMapEntry<GCOVLines *> *RHS) const {
- return LHS->getKey() < RHS->getKey();
- }
- };
-
// Represent a basic block in GCOV. Each block has a unique number in the
// function, number of lines belonging to each block, and a set of edges to
// other blocks.
@@ -269,11 +268,14 @@ namespace {
write(Len);
write(Number);
- StringKeySort Sorter;
- std::sort(SortedLinesByFile.begin(), SortedLinesByFile.end(), Sorter);
+ std::sort(SortedLinesByFile.begin(), SortedLinesByFile.end(),
+ [](StringMapEntry<GCOVLines *> *LHS,
+ StringMapEntry<GCOVLines *> *RHS) {
+ return LHS->getKey() < RHS->getKey();
+ });
for (SmallVectorImpl<StringMapEntry<GCOVLines *> *>::iterator
I = SortedLinesByFile.begin(), E = SortedLinesByFile.end();
- I != E; ++I)
+ I != E; ++I)
(*I)->getValue()->writeOut();
write(0);
write(0);
@@ -302,30 +304,23 @@ namespace {
class GCOVFunction : public GCOVRecord {
public:
GCOVFunction(DISubprogram SP, raw_ostream *os, uint32_t Ident,
- bool UseCfgChecksum) {
+ bool UseCfgChecksum) :
+ SP(SP), Ident(Ident), UseCfgChecksum(UseCfgChecksum), CfgChecksum(0) {
this->os = os;
Function *F = SP.getFunction();
- DEBUG(dbgs() << "Function: " << F->getName() << "\n");
+ DEBUG(dbgs() << "Function: " << getFunctionName(SP) << "\n");
uint32_t i = 0;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
Blocks[BB] = new GCOVBlock(i++, os);
}
ReturnBlock = new GCOVBlock(i++, os);
- writeBytes(FunctionTag, 4);
- uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(getFunctionName(SP)) +
- 1 + lengthOfGCOVString(SP.getFilename()) + 1;
- if (UseCfgChecksum)
- ++BlockLen;
- write(BlockLen);
- write(Ident);
- write(0); // lineno checksum
- if (UseCfgChecksum)
- write(0); // cfg checksum
- writeGCOVString(getFunctionName(SP));
- writeGCOVString(SP.getFilename());
- write(SP.getLineNumber());
+ std::string FunctionNameAndLine;
+ raw_string_ostream FNLOS(FunctionNameAndLine);
+ FNLOS << getFunctionName(SP) << SP.getLineNumber();
+ FNLOS.flush();
+ FuncChecksum = hash_value(FunctionNameAndLine);
}
~GCOVFunction() {
@@ -341,7 +336,41 @@ namespace {
return *ReturnBlock;
}
+ std::string getEdgeDestinations() {
+ std::string EdgeDestinations;
+ raw_string_ostream EDOS(EdgeDestinations);
+ Function *F = Blocks.begin()->first->getParent();
+ for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
+ GCOVBlock &Block = *Blocks[I];
+ for (int i = 0, e = Block.OutEdges.size(); i != e; ++i)
+ EDOS << Block.OutEdges[i]->Number;
+ }
+ return EdgeDestinations;
+ }
+
+ uint32_t getFuncChecksum() {
+ return FuncChecksum;
+ }
+
+ void setCfgChecksum(uint32_t Checksum) {
+ CfgChecksum = Checksum;
+ }
+
void writeOut() {
+ writeBytes(FunctionTag, 4);
+ uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(getFunctionName(SP)) +
+ 1 + lengthOfGCOVString(SP.getFilename()) + 1;
+ if (UseCfgChecksum)
+ ++BlockLen;
+ write(BlockLen);
+ write(Ident);
+ write(FuncChecksum);
+ if (UseCfgChecksum)
+ write(CfgChecksum);
+ writeGCOVString(getFunctionName(SP));
+ writeGCOVString(SP.getFilename());
+ write(SP.getLineNumber());
+
// Emit count of blocks.
writeBytes(BlockTag, 4);
write(Blocks.size() + 1);
@@ -375,6 +404,11 @@ namespace {
}
private:
+ DISubprogram SP;
+ uint32_t Ident;
+ uint32_t FuncChecksum;
+ bool UseCfgChecksum;
+ uint32_t CfgChecksum;
DenseMap<BasicBlock *, GCOVBlock *> Blocks;
GCOVBlock *ReturnBlock;
};
@@ -414,6 +448,28 @@ bool GCOVProfiler::runOnModule(Module &M) {
return false;
}
+static bool functionHasLines(Function *F) {
+ // Check whether this function actually has any source lines. Not only
+ // do these waste space, they also can crash gcov.
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ for (BasicBlock::iterator I = BB->begin(), IE = BB->end();
+ I != IE; ++I) {
+ // Debug intrinsic locations correspond to the location of the
+ // declaration, not necessarily any statements or expressions.
+ if (isa<DbgInfoIntrinsic>(I)) continue;
+
+ const DebugLoc &Loc = I->getDebugLoc();
+ if (Loc.isUnknown()) continue;
+
+ // Artificial lines such as calls to the global constructors.
+ if (Loc.getLine() == 0) continue;
+
+ return true;
+ }
+ }
+ return false;
+}
+
void GCOVProfiler::emitProfileNotes() {
NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
if (!CU_Nodes) return;
@@ -426,10 +482,8 @@ void GCOVProfiler::emitProfileNotes() {
DICompileUnit CU(CU_Nodes->getOperand(i));
std::string ErrorInfo;
raw_fd_ostream out(mangleName(CU, "gcno").c_str(), ErrorInfo,
- sys::fs::F_Binary);
- out.write("oncg", 4);
- out.write(ReversedVersion, 4);
- out.write("MVLL", 4);
+ sys::fs::F_None);
+ std::string EdgeDestinations;
DIArray SPs = CU.getSubprograms();
for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
@@ -441,7 +495,19 @@ void GCOVProfiler::emitProfileNotes() {
Function *F = SP.getFunction();
if (!F) continue;
- GCOVFunction Func(SP, &out, i, Options.UseCfgChecksum);
+ if (!functionHasLines(F)) continue;
+
+ // gcov expects every function to start with an entry block that has a
+ // single successor, so split the entry block to make sure of that.
+ BasicBlock &EntryBlock = F->getEntryBlock();
+ BasicBlock::iterator It = EntryBlock.begin();
+ while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
+ ++It;
+ EntryBlock.splitBasicBlock(It);
+
+ Funcs.push_back(
+ make_unique<GCOVFunction>(SP, &out, i, Options.UseCfgChecksum));
+ GCOVFunction &Func = *Funcs.back();
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
GCOVBlock &Block = Func.getBlock(BB);
@@ -457,8 +523,16 @@ void GCOVProfiler::emitProfileNotes() {
uint32_t Line = 0;
for (BasicBlock::iterator I = BB->begin(), IE = BB->end();
I != IE; ++I) {
+ // Debug intrinsic locations correspond to the location of the
+ // declaration, not necessarily any statements or expressions.
+ if (isa<DbgInfoIntrinsic>(I)) continue;
+
const DebugLoc &Loc = I->getDebugLoc();
if (Loc.isUnknown()) continue;
+
+ // Artificial lines such as calls to the global constructors.
+ if (Loc.getLine() == 0) continue;
+
if (Line == Loc.getLine()) continue;
Line = Loc.getLine();
if (SP != getDISubprogram(Loc.getScope(*Ctx))) continue;
@@ -467,8 +541,19 @@ void GCOVProfiler::emitProfileNotes() {
Lines.addLine(Loc.getLine());
}
}
- Func.writeOut();
+ EdgeDestinations += Func.getEdgeDestinations();
}
+
+ FileChecksums.push_back(hash_value(EdgeDestinations));
+ out.write("oncg", 4);
+ out.write(ReversedVersion, 4);
+ out.write(reinterpret_cast<char*>(&FileChecksums.back()), 4);
+
+ for (auto &Func : Funcs) {
+ Func->setCfgChecksum(FileChecksums.back());
+ Func->writeOut();
+ }
+
out.write("\0\0\0\0\0\0\0\0", 8); // EOF
out.close();
}
@@ -478,7 +563,7 @@ bool GCOVProfiler::emitProfileArcs() {
NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
if (!CU_Nodes) return false;
- bool Result = false;
+ bool Result = false;
bool InsertIndCounterIncrCode = false;
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
DICompileUnit CU(CU_Nodes->getOperand(i));
@@ -492,6 +577,7 @@ bool GCOVProfiler::emitProfileArcs() {
continue;
Function *F = SP.getFunction();
if (!F) continue;
+ if (!functionHasLines(F)) continue;
if (!Result) Result = true;
unsigned Edges = 0;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
@@ -501,7 +587,7 @@ bool GCOVProfiler::emitProfileArcs() {
else
Edges += TI->getNumSuccessors();
}
-
+
ArrayType *CounterTy =
ArrayType::get(Type::getInt64Ty(*Ctx), Edges);
GlobalVariable *Counters =
@@ -510,10 +596,10 @@ bool GCOVProfiler::emitProfileArcs() {
Constant::getNullValue(CounterTy),
"__llvm_gcov_ctr");
CountersBySP.push_back(std::make_pair(Counters, (MDNode*)SP));
-
+
UniqueVector<BasicBlock *> ComplexEdgePreds;
UniqueVector<BasicBlock *> ComplexEdgeSuccs;
-
+
unsigned Edge = 0;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
TerminatorInst *TI = BB->getTerminator();
@@ -547,13 +633,13 @@ bool GCOVProfiler::emitProfileArcs() {
Edge += Successors;
}
}
-
+
if (!ComplexEdgePreds.empty()) {
GlobalVariable *EdgeTable =
buildEdgeLookupTable(F, Counters,
ComplexEdgePreds, ComplexEdgeSuccs);
GlobalVariable *EdgeState = getEdgeStateValue();
-
+
for (int i = 0, e = ComplexEdgePreds.size(); i != e; ++i) {
IRBuilder<> Builder(ComplexEdgePreds[i + 1]->getFirstInsertionPt());
Builder.CreateStore(Builder.getInt32(i), EdgeState);
@@ -630,7 +716,7 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
Type *Int64PtrTy = Type::getInt64PtrTy(*Ctx);
ArrayType *EdgeTableTy = ArrayType::get(Int64PtrTy, TableSize);
- OwningArrayPtr<Constant *> EdgeTable(new Constant*[TableSize]);
+ std::unique_ptr<Constant * []> EdgeTable(new Constant *[TableSize]);
Constant *NullValue = Constant::getNullValue(Int64PtrTy);
for (size_t i = 0; i != TableSize; ++i)
EdgeTable[i] = NullValue;
@@ -666,6 +752,7 @@ Constant *GCOVProfiler::getStartFileFunc() {
Type *Args[] = {
Type::getInt8PtrTy(*Ctx), // const char *orig_filename
Type::getInt8PtrTy(*Ctx), // const char version[4]
+ Type::getInt32Ty(*Ctx), // uint32_t checksum
};
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_start_file", FTy);
@@ -683,10 +770,12 @@ Constant *GCOVProfiler::getIncrementIndirectCounterFunc() {
}
Constant *GCOVProfiler::getEmitFunctionFunc() {
- Type *Args[3] = {
+ Type *Args[] = {
Type::getInt32Ty(*Ctx), // uint32_t ident
Type::getInt8PtrTy(*Ctx), // const char *function_name
+ Type::getInt32Ty(*Ctx), // uint32_t func_checksum
Type::getInt8Ty(*Ctx), // uint8_t use_extra_checksum
+ Type::getInt32Ty(*Ctx), // uint32_t cfg_checksum
};
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_function", FTy);
@@ -760,17 +849,22 @@ Function *GCOVProfiler::insertCounterWriteout(
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
DICompileUnit CU(CU_Nodes->getOperand(i));
std::string FilenameGcda = mangleName(CU, "gcda");
- Builder.CreateCall2(StartFile,
+ uint32_t CfgChecksum = FileChecksums.empty() ? 0 : FileChecksums[i];
+ Builder.CreateCall3(StartFile,
Builder.CreateGlobalStringPtr(FilenameGcda),
- Builder.CreateGlobalStringPtr(ReversedVersion));
+ Builder.CreateGlobalStringPtr(ReversedVersion),
+ Builder.getInt32(CfgChecksum));
for (unsigned j = 0, e = CountersBySP.size(); j != e; ++j) {
DISubprogram SP(CountersBySP[j].second);
- Builder.CreateCall3(
+ uint32_t FuncChecksum = Funcs.empty() ? 0 : Funcs[j]->getFuncChecksum();
+ Builder.CreateCall5(
EmitFunction, Builder.getInt32(j),
Options.FunctionNamesInData ?
Builder.CreateGlobalStringPtr(getFunctionName(SP)) :
Constant::getNullValue(Builder.getInt8PtrTy()),
- Builder.getInt8(Options.UseCfgChecksum));
+ Builder.getInt32(FuncChecksum),
+ Builder.getInt8(Options.UseCfgChecksum),
+ Builder.getInt32(CfgChecksum));
GlobalVariable *GV = CountersBySP[j].first;
unsigned Arcs =
@@ -818,7 +912,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
// uint64_t *counter = counters[pred];
// if (!counter) return;
Value *ZExtPred = Builder.CreateZExt(Pred, Builder.getInt64Ty());
- Arg = llvm::next(Fn->arg_begin());
+ Arg = std::next(Fn->arg_begin());
Arg->setName("counters");
Value *GEP = Builder.CreateGEP(Arg, ZExtPred);
Value *Counter = Builder.CreateLoad(GEP, "counter");
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
index b1bea38..ac1dd43 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/InitializePasses.h"
-#include "llvm/PassRegistry.h"
#include "llvm-c/Initialization.h"
+#include "llvm/PassRegistry.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index d547adc..57e308c 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -10,8 +10,6 @@
/// This file is a part of MemorySanitizer, a detector of uninitialized
/// reads.
///
-/// Status: early prototype.
-///
/// The algorithm of the tool is similar to Memcheck
/// (http://goo.gl/QKbem). We associate a few shadow bits with every
/// byte of the application memory, poison the shadow of the malloc-ed
@@ -93,24 +91,23 @@
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "msan"
-
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/ADT/ValueMap.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/InstVisitor.h"
+#include "llvm/IR/ValueMap.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -118,10 +115,11 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
using namespace llvm;
+#define DEBUG_TYPE "msan"
+
static const uint64_t kShadowMask32 = 1ULL << 31;
static const uint64_t kShadowMask64 = 1ULL << 46;
static const uint64_t kOriginOffset32 = 1ULL << 30;
@@ -129,13 +127,16 @@ static const uint64_t kOriginOffset64 = 1ULL << 45;
static const unsigned kMinOriginAlignment = 4;
static const unsigned kShadowTLSAlignment = 8;
+// Accesses sizes are powers of two: 1, 2, 4, 8.
+static const size_t kNumberOfAccessSizes = 4;
+
/// \brief Track origins of uninitialized values.
///
/// Adds a section to MemorySanitizer report that points to the allocation
/// (stack or heap) the uninitialized bits came from originally.
-static cl::opt<bool> ClTrackOrigins("msan-track-origins",
+static cl::opt<int> ClTrackOrigins("msan-track-origins",
cl::desc("Track origins (allocation sites) of poisoned memory"),
- cl::Hidden, cl::init(false));
+ cl::Hidden, cl::init(0));
static cl::opt<bool> ClKeepGoing("msan-keep-going",
cl::desc("keep going after reporting a UMR"),
cl::Hidden, cl::init(false));
@@ -160,10 +161,6 @@ static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
cl::desc("exact handling of relational integer ICmp"),
cl::Hidden, cl::init(false));
-static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
- cl::desc("store origin for clean (fully initialized) values"),
- cl::Hidden, cl::init(false));
-
// This flag controls whether we check the shadow of the address
// operand of load or store. Such bugs are very rare, since load from
// a garbage address typically results in SEGV, but still happen
@@ -178,9 +175,13 @@ static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
cl::desc("print out instructions with default strict semantics"),
cl::Hidden, cl::init(false));
-static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
- cl::desc("File containing the list of functions where MemorySanitizer "
- "should not report bugs"), cl::Hidden);
+static cl::opt<int> ClInstrumentationWithCallThreshold(
+ "msan-instrumentation-with-call-threshold",
+ cl::desc(
+ "If the function being instrumented requires more than "
+ "this number of checks and origin stores, use callbacks instead of "
+ "inline checks (-1 means never use callbacks)."),
+ cl::Hidden, cl::init(3500));
// Experimental. Wraps all indirect calls in the instrumented code with
// a call to the given function. This is needed to assist the dynamic
@@ -203,26 +204,24 @@ namespace {
/// uninitialized reads.
class MemorySanitizer : public FunctionPass {
public:
- MemorySanitizer(bool TrackOrigins = false,
- StringRef BlacklistFile = StringRef())
+ MemorySanitizer(int TrackOrigins = 0)
: FunctionPass(ID),
- TrackOrigins(TrackOrigins || ClTrackOrigins),
- TD(0),
- WarningFn(0),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
+ TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
+ DL(nullptr),
+ WarningFn(nullptr),
WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
- const char *getPassName() const { return "MemorySanitizer"; }
- bool runOnFunction(Function &F);
- bool doInitialization(Module &M);
+ const char *getPassName() const override { return "MemorySanitizer"; }
+ bool runOnFunction(Function &F) override;
+ bool doInitialization(Module &M) override;
static char ID; // Pass identification, replacement for typeid.
private:
void initializeCallbacks(Module &M);
/// \brief Track origins (allocation points) of uninitialized values.
- bool TrackOrigins;
+ int TrackOrigins;
- DataLayout *TD;
+ const DataLayout *DL;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
@@ -249,13 +248,18 @@ class MemorySanitizer : public FunctionPass {
/// \brief The run-time callback to print a warning.
Value *WarningFn;
- /// \brief Run-time helper that copies origin info for a memory range.
- Value *MsanCopyOriginFn;
+ // These arrays are indexed by log2(AccessSize).
+ Value *MaybeWarningFn[kNumberOfAccessSizes];
+ Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
+
/// \brief Run-time helper that generates a new origin value for a stack
/// allocation.
Value *MsanSetAllocaOrigin4Fn;
/// \brief Run-time helper that poisons stack on function entry.
Value *MsanPoisonStackFn;
+ /// \brief Run-time helper that records a store (or any event) of an
+ /// uninitialized value and returns an updated origin id encoding this info.
+ Value *MsanChainOriginFn;
/// \brief MSan runtime replacements for memmove, memcpy and memset.
Value *MemmoveFn, *MemcpyFn, *MemsetFn;
@@ -269,10 +273,6 @@ class MemorySanitizer : public FunctionPass {
MDNode *ColdCallWeights;
/// \brief Branch weights for origin store.
MDNode *OriginStoreWeights;
- /// \brief Path to blacklist file.
- SmallString<64> BlacklistFile;
- /// \brief The blacklist.
- OwningPtr<SpecialCaseList> BL;
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
@@ -292,9 +292,8 @@ INITIALIZE_PASS(MemorySanitizer, "msan",
"MemorySanitizer: detects uninitialized reads.",
false, false)
-FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
- StringRef BlacklistFile) {
- return new MemorySanitizer(TrackOrigins, BlacklistFile);
+FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
+ return new MemorySanitizer(TrackOrigins);
}
/// \brief Create a non-const global initialized with the given string.
@@ -324,14 +323,27 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
: "__msan_warning_noreturn";
WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
- MsanCopyOriginFn = M.getOrInsertFunction(
- "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy, NULL);
+ for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
+ AccessSizeIndex++) {
+ unsigned AccessSize = 1 << AccessSizeIndex;
+ std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
+ MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
+ FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
+ IRB.getInt32Ty(), NULL);
+
+ FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
+ MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
+ FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
+ IRB.getInt8PtrTy(), IRB.getInt32Ty(), NULL);
+ }
+
MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
"__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
IRB.getInt8PtrTy(), IntptrTy, NULL);
MsanPoisonStackFn = M.getOrInsertFunction(
"__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
+ MsanChainOriginFn = M.getOrInsertFunction(
+ "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL);
MemmoveFn = M.getOrInsertFunction(
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IntptrTy, NULL);
@@ -345,31 +357,32 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
// Create globals.
RetvalTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 8), false,
- GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
RetvalOriginTLS = new GlobalVariable(
- M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
- "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
+ M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
+ "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
ParamTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
- GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
ParamOriginTLS = new GlobalVariable(
M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
- 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
+ nullptr, "__msan_param_origin_tls", nullptr,
+ GlobalVariable::InitialExecTLSModel);
VAArgTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
- GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
VAArgOverflowSizeTLS = new GlobalVariable(
- M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
- "__msan_va_arg_overflow_size_tls", 0,
+ M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
+ "__msan_va_arg_overflow_size_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
OriginTLS = new GlobalVariable(
- M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
- "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
+ M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
+ "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
// We insert an empty inline asm after __msan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
@@ -383,14 +396,14 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
}
- if (ClWrapIndirectCallsFast) {
+ if (WrapIndirectCalls && ClWrapIndirectCallsFast) {
MsandrModuleStart = new GlobalVariable(
M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
- 0, "__executable_start");
+ nullptr, "__executable_start");
MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility);
MsandrModuleEnd = new GlobalVariable(
M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
- 0, "_end");
+ nullptr, "_end");
MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility);
}
}
@@ -399,12 +412,13 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
- return false;
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP)
+ report_fatal_error("data layout missing");
+ DL = &DLP->getDataLayout();
+
C = &(M.getContext());
- unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
+ unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
switch (PtrSize) {
case 64:
ShadowMask = kShadowMask64;
@@ -420,7 +434,7 @@ bool MemorySanitizer::doInitialization(Module &M) {
}
IRBuilder<> IRB(*C);
- IntptrTy = IRB.getIntPtrTy(TD);
+ IntptrTy = IRB.getIntPtrTy(DL);
OriginTy = IRB.getInt32Ty();
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
@@ -476,6 +490,11 @@ VarArgHelper*
CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
MemorySanitizerVisitor &Visitor);
+unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
+ if (TypeSize <= 8) return 0;
+ return Log2_32_Ceil(TypeSize / 8);
+}
+
/// This class does all the work for a given function. Store and Load
/// instructions store and load corresponding shadow and origin
/// values. Most instructions propagate shadow from arguments to their
@@ -487,12 +506,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MemorySanitizer &MS;
SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
ValueMap<Value*, Value*> ShadowMap, OriginMap;
- OwningPtr<VarArgHelper> VAHelper;
+ std::unique_ptr<VarArgHelper> VAHelper;
// The following flags disable parts of MSan instrumentation based on
// blacklist contents and command-line options.
bool InsertChecks;
- bool LoadShadow;
+ bool PropagateShadow;
bool PoisonStack;
bool PoisonUndef;
bool CheckReturnValue;
@@ -503,7 +522,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Instruction *OrigIns;
ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
: Shadow(S), Origin(O), OrigIns(I) { }
- ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
SmallVector<Instruction*, 16> StoreList;
@@ -511,11 +529,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
: F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
- bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
- AttributeSet::FunctionIndex,
- Attribute::SanitizeMemory);
+ bool SanitizeFunction = F.getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::SanitizeMemory);
InsertChecks = SanitizeFunction;
- LoadShadow = SanitizeFunction;
+ PropagateShadow = SanitizeFunction;
PoisonStack = SanitizeFunction && ClPoisonStack;
PoisonUndef = SanitizeFunction && ClPoisonUndef;
// FIXME: Consider using SpecialCaseList to specify a list of functions that
@@ -527,89 +544,119 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
<< F.getName() << "'\n");
}
- void materializeStores() {
- for (size_t i = 0, n = StoreList.size(); i < n; i++) {
- StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
+ Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
+ if (MS.TrackOrigins <= 1) return V;
+ return IRB.CreateCall(MS.MsanChainOriginFn, V);
+ }
- IRBuilder<> IRB(&I);
- Value *Val = I.getValueOperand();
- Value *Addr = I.getPointerOperand();
- Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
+ void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
+ unsigned Alignment, bool AsCall) {
+ if (isa<StructType>(Shadow->getType())) {
+ IRB.CreateAlignedStore(updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB),
+ Alignment);
+ } else {
+ Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
+ // TODO(eugenis): handle non-zero constant shadow by inserting an
+ // unconditional check (can not simply fail compilation as this could
+ // be in the dead code).
+ if (isa<Constant>(ConvertedShadow)) return;
+ unsigned TypeSizeInBits =
+ MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
+ unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
+ if (AsCall && SizeIndex < kNumberOfAccessSizes) {
+ Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
+ Value *ConvertedShadow2 = IRB.CreateZExt(
+ ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
+ IRB.CreateCall3(Fn, ConvertedShadow2,
+ IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
+ Origin);
+ } else {
+ Value *Cmp = IRB.CreateICmpNE(
+ ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
+ Instruction *CheckTerm = SplitBlockAndInsertIfThen(
+ Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
+ IRBuilder<> IRBNew(CheckTerm);
+ IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew),
+ getOriginPtr(Addr, IRBNew), Alignment);
+ }
+ }
+ }
+
+ void materializeStores(bool InstrumentWithCalls) {
+ for (auto Inst : StoreList) {
+ StoreInst &SI = *dyn_cast<StoreInst>(Inst);
+
+ IRBuilder<> IRB(&SI);
+ Value *Val = SI.getValueOperand();
+ Value *Addr = SI.getPointerOperand();
+ Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI =
- IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI;
- if (ClCheckAccessAddress)
- insertShadowCheck(Addr, &I);
+ if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
- if (I.isAtomic())
- I.setOrdering(addReleaseOrdering(I.getOrdering()));
+ if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
if (MS.TrackOrigins) {
- unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
- if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
- IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
- Alignment);
- } else {
- Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
-
- // TODO(eugenis): handle non-zero constant shadow by inserting an
- // unconditional check (can not simply fail compilation as this could
- // be in the dead code).
- if (isa<Constant>(ConvertedShadow))
- continue;
-
- Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
- getCleanShadow(ConvertedShadow), "_mscmp");
- Instruction *CheckTerm =
- SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
- MS.OriginStoreWeights);
- IRBuilder<> IRBNew(CheckTerm);
- IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
- Alignment);
- }
+ unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment());
+ storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment,
+ InstrumentWithCalls);
}
}
}
- void materializeChecks() {
- for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
- Value *Shadow = InstrumentationList[i].Shadow;
- Instruction *OrigIns = InstrumentationList[i].OrigIns;
- IRBuilder<> IRB(OrigIns);
- DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
- Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
- DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
- // See the comment in materializeStores().
- if (isa<Constant>(ConvertedShadow))
- continue;
+ void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
+ bool AsCall) {
+ IRBuilder<> IRB(OrigIns);
+ DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
+ Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
+ DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
+ // See the comment in materializeStores().
+ if (isa<Constant>(ConvertedShadow)) return;
+ unsigned TypeSizeInBits =
+ MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
+ unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
+ if (AsCall && SizeIndex < kNumberOfAccessSizes) {
+ Value *Fn = MS.MaybeWarningFn[SizeIndex];
+ Value *ConvertedShadow2 =
+ IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
+ IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin
+ ? Origin
+ : (Value *)IRB.getInt32(0));
+ } else {
Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
getCleanShadow(ConvertedShadow), "_mscmp");
- Instruction *CheckTerm =
- SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
- /* Unreachable */ !ClKeepGoing,
- MS.ColdCallWeights);
+ Instruction *CheckTerm = SplitBlockAndInsertIfThen(
+ Cmp, OrigIns,
+ /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
IRB.SetInsertPoint(CheckTerm);
if (MS.TrackOrigins) {
- Value *Origin = InstrumentationList[i].Origin;
- IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
+ IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
MS.OriginTLS);
}
- CallInst *Call = IRB.CreateCall(MS.WarningFn);
- Call->setDebugLoc(OrigIns->getDebugLoc());
+ IRB.CreateCall(MS.WarningFn);
IRB.CreateCall(MS.EmptyAsm);
DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
}
+ }
+
+ void materializeChecks(bool InstrumentWithCalls) {
+ for (const auto &ShadowData : InstrumentationList) {
+ Instruction *OrigIns = ShadowData.OrigIns;
+ Value *Shadow = ShadowData.Shadow;
+ Value *Origin = ShadowData.Origin;
+ materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
+ }
DEBUG(dbgs() << "DONE:\n" << F);
}
void materializeIndirectCalls() {
- for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) {
- CallSite CS = IndirectCallList[i];
+ for (auto &CS : IndirectCallList) {
Instruction *I = CS.getInstruction();
BasicBlock *B = I->getParent();
IRBuilder<> IRB(I);
@@ -629,7 +676,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target");
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
- cast<Instruction>(NotInThisModule),
+ NotInThisModule, NewFnPhi,
/* Unreachable */ false, MS.ColdCallWeights);
IRB.SetInsertPoint(CheckTerm);
@@ -652,7 +699,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
MS.initializeCallbacks(*F.getParent());
- if (!MS.TD) return false;
+ if (!MS.DL) return false;
// In the presence of unreachable blocks, we may see Phi nodes with
// incoming nodes from such blocks. Since InstVisitor skips unreachable
@@ -663,33 +710,33 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Iterate all BBs in depth-first order and create shadow instructions
// for all instructions (where applicable).
// For PHI nodes we create dummy shadow PHIs which will be finalized later.
- for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
- DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
- BasicBlock *BB = *DI;
+ for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
visit(*BB);
- }
+
// Finalize PHI nodes.
- for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
- PHINode *PN = ShadowPHINodes[i];
+ for (PHINode *PN : ShadowPHINodes) {
PHINode *PNS = cast<PHINode>(getShadow(PN));
- PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
+ PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
size_t NumValues = PN->getNumIncomingValues();
for (size_t v = 0; v < NumValues; v++) {
PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
- if (PNO)
- PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
+ if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
}
}
VAHelper->finalizeInstrumentation();
+ bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
+ InstrumentationList.size() + StoreList.size() >
+ (unsigned)ClInstrumentationWithCallThreshold;
+
// Delayed instrumentation of StoreInst.
// This may add new checks to be inserted later.
- materializeStores();
+ materializeStores(InstrumentWithCalls);
// Insert shadow value checks.
- materializeChecks();
+ materializeChecks(InstrumentWithCalls);
// Wrap indirect calls.
materializeIndirectCalls();
@@ -705,14 +752,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Compute the shadow type that corresponds to a given Type.
Type *getShadowTy(Type *OrigTy) {
if (!OrigTy->isSized()) {
- return 0;
+ return nullptr;
}
// For integer type, shadow is the same as the original type.
// This may return weird-sized types like i1.
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
return IT;
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
- uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
+ uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
@@ -724,7 +771,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
- uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
+ uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
return IntegerType::get(*MS.C, TypeSize);
}
@@ -785,7 +832,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Compute the origin address for a given function argument.
Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
int ArgOffset) {
- if (!MS.TrackOrigins) return 0;
+ if (!MS.TrackOrigins) return nullptr;
Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
@@ -808,7 +855,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Set SV to be the shadow value for V.
void setShadow(Value *V, Value *SV) {
assert(!ShadowMap.count(V) && "Values may only have one shadow");
- ShadowMap[V] = SV;
+ ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
}
/// \brief Set Origin to be the origin value for V.
@@ -826,7 +873,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Constant *getCleanShadow(Value *V) {
Type *ShadowTy = getShadowTy(V);
if (!ShadowTy)
- return 0;
+ return nullptr;
return Constant::getNullValue(ShadowTy);
}
@@ -846,7 +893,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Constant *getPoisonedShadow(Value *V) {
Type *ShadowTy = getShadowTy(V);
if (!ShadowTy)
- return 0;
+ return nullptr;
return getPoisonedShadow(ShadowTy);
}
@@ -860,6 +907,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// This function either returns the value set earlier with setShadow,
/// or extracts if from ParamTLS (for function arguments).
Value *getShadow(Value *V) {
+ if (!PropagateShadow) return getCleanShadow(V);
if (Instruction *I = dyn_cast<Instruction>(V)) {
// For instructions the shadow is already stored in the map.
Value *Shadow = ShadowMap[V];
@@ -884,25 +932,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0;
- for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- if (!AI->getType()->isSized()) {
+ for (auto &FArg : F->args()) {
+ if (!FArg.getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
- unsigned Size = AI->hasByValAttr()
- ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
- : MS.TD->getTypeAllocSize(AI->getType());
- if (A == AI) {
- Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
- if (AI->hasByValAttr()) {
+ unsigned Size = FArg.hasByValAttr()
+ ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
+ : MS.DL->getTypeAllocSize(FArg.getType());
+ if (A == &FArg) {
+ Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment.
- unsigned ArgAlign = AI->getParamAlignment();
+ unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
- ArgAlign = MS.TD->getABITypeAlignment(EltType);
+ ArgAlign = MS.DL->getABITypeAlignment(EltType);
}
unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(
@@ -914,10 +961,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} else {
*ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
}
- DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
+ DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
**ShadowPtr << "\n");
if (MS.TrackOrigins) {
- Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
+ Value *OriginPtr =
+ getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
}
}
@@ -937,7 +985,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Get the origin for a value.
Value *getOrigin(Value *V) {
- if (!MS.TrackOrigins) return 0;
+ if (!MS.TrackOrigins) return nullptr;
if (isa<Instruction>(V) || isa<Argument>(V)) {
Value *Origin = OriginMap[V];
if (!Origin) {
@@ -1027,7 +1075,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(I.getNextNode());
Type *ShadowTy = getShadowTy(&I);
Value *Addr = I.getPointerOperand();
- if (LoadShadow) {
+ if (PropagateShadow) {
Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
setShadow(&I,
IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
@@ -1042,7 +1090,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
I.setOrdering(addAcquireOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
- if (LoadShadow) {
+ if (PropagateShadow) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
setOrigin(&I,
IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
@@ -1088,7 +1136,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
handleCASOrRMW(I);
- I.setOrdering(addReleaseOrdering(I.getOrdering()));
+ I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
}
// Vector manipulation.
@@ -1235,7 +1283,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
public:
Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
- Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
+ Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
/// \brief Add a pair of shadow and origin values to the mix.
Combiner &Add(Value *OpShadow, Value *OpOrigin) {
@@ -1254,10 +1302,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (!Origin) {
Origin = OpOrigin;
} else {
- Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
- Value *Cond = IRB.CreateICmpNE(FlatShadow,
- MSV->getCleanShadow(FlatShadow));
- Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
+ Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
+ // No point in adding something that might result in 0 origin value.
+ if (!ConstOrigin || !ConstOrigin->isNullValue()) {
+ Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
+ Value *Cond =
+ IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
+ Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
+ }
}
}
return *this;
@@ -1266,7 +1318,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add an application value to the mix.
Combiner &Add(Value *V) {
Value *OpShadow = MSV->getShadow(V);
- Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
+ Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
return Add(OpShadow, OpOrigin);
}
@@ -1325,6 +1377,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// TODO: handle struct types.
}
+ /// \brief Cast an application value to the type of its own shadow.
+ Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
+ Type *ShadowTy = getShadowTy(V);
+ if (V->getType() == ShadowTy)
+ return V;
+ if (V->getType()->isPtrOrPtrVectorTy())
+ return IRB.CreatePtrToInt(V, ShadowTy);
+ else
+ return IRB.CreateBitCast(V, ShadowTy);
+ }
+
/// \brief Propagate shadow for arbitrary operation.
void handleShadowOr(Instruction &I) {
IRBuilder<> IRB(&I);
@@ -1334,13 +1397,61 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
SC.Done(&I);
}
+ // \brief Handle multiplication by constant.
+ //
+ // Handle a special case of multiplication by constant that may have one or
+ // more zeros in the lower bits. This makes corresponding number of lower bits
+ // of the result zero as well. We model it by shifting the other operand
+ // shadow left by the required number of bits. Effectively, we transform
+ // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
+ // We use multiplication by 2**N instead of shift to cover the case of
+ // multiplication by 0, which may occur in some elements of a vector operand.
+ void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
+ Value *OtherArg) {
+ Constant *ShadowMul;
+ Type *Ty = ConstArg->getType();
+ if (Ty->isVectorTy()) {
+ unsigned NumElements = Ty->getVectorNumElements();
+ Type *EltTy = Ty->getSequentialElementType();
+ SmallVector<Constant *, 16> Elements;
+ for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
+ ConstantInt *Elt =
+ dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx));
+ APInt V = Elt->getValue();
+ APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
+ Elements.push_back(ConstantInt::get(EltTy, V2));
+ }
+ ShadowMul = ConstantVector::get(Elements);
+ } else {
+ ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg);
+ APInt V = Elt->getValue();
+ APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
+ ShadowMul = ConstantInt::get(Elt->getType(), V2);
+ }
+
+ IRBuilder<> IRB(&I);
+ setShadow(&I,
+ IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
+ setOrigin(&I, getOrigin(OtherArg));
+ }
+
+ void visitMul(BinaryOperator &I) {
+ Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
+ Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
+ if (constOp0 && !constOp1)
+ handleMulByConstant(I, constOp0, I.getOperand(1));
+ else if (constOp1 && !constOp0)
+ handleMulByConstant(I, constOp1, I.getOperand(0));
+ else
+ handleShadowOr(I);
+ }
+
void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
void visitSub(BinaryOperator &I) { handleShadowOr(I); }
void visitXor(BinaryOperator &I) { handleShadowOr(I); }
- void visitMul(BinaryOperator &I) { handleShadowOr(I); }
void handleDiv(Instruction &I) {
IRBuilder<> IRB(&I);
@@ -1470,7 +1581,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleSignedRelationalComparison(ICmpInst &I) {
Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
- Value* op = NULL;
+ Value* op = nullptr;
CmpInst::Predicate pre = I.getPredicate();
if (constOp0 && constOp0->isNullValue() &&
(pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
@@ -1646,7 +1757,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Addr = I.getArgOperand(0);
Type *ShadowTy = getShadowTy(&I);
- if (LoadShadow) {
+ if (PropagateShadow) {
Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
// We don't know the pointer alignment (could be unaligned SSE load!).
// Have to assume to worst case.
@@ -1659,7 +1770,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(Addr, &I);
if (MS.TrackOrigins) {
- if (LoadShadow)
+ if (PropagateShadow)
setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
else
setOrigin(&I, getCleanOrigin());
@@ -1779,7 +1890,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
case 1:
ConvertOp = I.getArgOperand(0);
- CopyOp = NULL;
+ CopyOp = nullptr;
break;
default:
llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
@@ -1793,7 +1904,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// FIXME: consider propagating shadow of ConvertOp, at least in the case of
// int->any conversion.
Value *ConvertShadow = getShadow(ConvertOp);
- Value *AggShadow = 0;
+ Value *AggShadow = nullptr;
if (ConvertOp->getType()->isVectorTy()) {
AggShadow = IRB.CreateExtractElement(
ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
@@ -1827,6 +1938,162 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
}
+ // Given a scalar or vector, extract lower 64 bits (or less), and return all
+ // zeroes if it is zero, and all ones otherwise.
+ Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
+ if (S->getType()->isVectorTy())
+ S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
+ assert(S->getType()->getPrimitiveSizeInBits() <= 64);
+ Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
+ return CreateShadowCast(IRB, S2, T, /* Signed */ true);
+ }
+
+ Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
+ Type *T = S->getType();
+ assert(T->isVectorTy());
+ Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
+ return IRB.CreateSExt(S2, T);
+ }
+
+ // \brief Instrument vector shift instrinsic.
+ //
+ // This function instruments intrinsics like int_x86_avx2_psll_w.
+ // Intrinsic shifts %In by %ShiftSize bits.
+ // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
+ // size, and the rest is ignored. Behavior is defined even if shift size is
+ // greater than register (or field) width.
+ void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
+ assert(I.getNumArgOperands() == 2);
+ IRBuilder<> IRB(&I);
+ // If any of the S2 bits are poisoned, the whole thing is poisoned.
+ // Otherwise perform the same shift on S1.
+ Value *S1 = getShadow(&I, 0);
+ Value *S2 = getShadow(&I, 1);
+ Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
+ : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
+ Value *V1 = I.getOperand(0);
+ Value *V2 = I.getOperand(1);
+ Value *Shift = IRB.CreateCall2(I.getCalledValue(),
+ IRB.CreateBitCast(S1, V1->getType()), V2);
+ Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
+ setShadow(&I, IRB.CreateOr(Shift, S2Conv));
+ setOriginForNaryOp(I);
+ }
+
+ // \brief Get an X86_MMX-sized vector type.
+ Type *getMMXVectorTy(unsigned EltSizeInBits) {
+ const unsigned X86_MMXSizeInBits = 64;
+ return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
+ X86_MMXSizeInBits / EltSizeInBits);
+ }
+
+ // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
+ // intrinsic.
+ Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
+ switch (id) {
+ case llvm::Intrinsic::x86_sse2_packsswb_128:
+ case llvm::Intrinsic::x86_sse2_packuswb_128:
+ return llvm::Intrinsic::x86_sse2_packsswb_128;
+
+ case llvm::Intrinsic::x86_sse2_packssdw_128:
+ case llvm::Intrinsic::x86_sse41_packusdw:
+ return llvm::Intrinsic::x86_sse2_packssdw_128;
+
+ case llvm::Intrinsic::x86_avx2_packsswb:
+ case llvm::Intrinsic::x86_avx2_packuswb:
+ return llvm::Intrinsic::x86_avx2_packsswb;
+
+ case llvm::Intrinsic::x86_avx2_packssdw:
+ case llvm::Intrinsic::x86_avx2_packusdw:
+ return llvm::Intrinsic::x86_avx2_packssdw;
+
+ case llvm::Intrinsic::x86_mmx_packsswb:
+ case llvm::Intrinsic::x86_mmx_packuswb:
+ return llvm::Intrinsic::x86_mmx_packsswb;
+
+ case llvm::Intrinsic::x86_mmx_packssdw:
+ return llvm::Intrinsic::x86_mmx_packssdw;
+ default:
+ llvm_unreachable("unexpected intrinsic id");
+ }
+ }
+
+ // \brief Instrument vector pack instrinsic.
+ //
+ // This function instruments intrinsics like x86_mmx_packsswb, that
+ // packs elements of 2 input vectors into half as many bits with saturation.
+ // Shadow is propagated with the signed variant of the same intrinsic applied
+ // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
+ // EltSizeInBits is used only for x86mmx arguments.
+ void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
+ assert(I.getNumArgOperands() == 2);
+ bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
+ IRBuilder<> IRB(&I);
+ Value *S1 = getShadow(&I, 0);
+ Value *S2 = getShadow(&I, 1);
+ assert(isX86_MMX || S1->getType()->isVectorTy());
+
+ // SExt and ICmpNE below must apply to individual elements of input vectors.
+ // In case of x86mmx arguments, cast them to appropriate vector types and
+ // back.
+ Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
+ if (isX86_MMX) {
+ S1 = IRB.CreateBitCast(S1, T);
+ S2 = IRB.CreateBitCast(S2, T);
+ }
+ Value *S1_ext = IRB.CreateSExt(
+ IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
+ Value *S2_ext = IRB.CreateSExt(
+ IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
+ if (isX86_MMX) {
+ Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
+ S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
+ S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
+ }
+
+ Function *ShadowFn = Intrinsic::getDeclaration(
+ F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
+
+ Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack");
+ if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
+ // \brief Instrument sum-of-absolute-differencies intrinsic.
+ void handleVectorSadIntrinsic(IntrinsicInst &I) {
+ const unsigned SignificantBitsPerResultElement = 16;
+ bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
+ Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
+ unsigned ZeroBitsPerResultElement =
+ ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
+
+ IRBuilder<> IRB(&I);
+ Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ S = IRB.CreateBitCast(S, ResTy);
+ S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
+ ResTy);
+ S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
+ S = IRB.CreateBitCast(S, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
+ // \brief Instrument multiply-add intrinsic.
+ void handleVectorPmaddIntrinsic(IntrinsicInst &I,
+ unsigned EltSizeInBits = 0) {
+ bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
+ Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
+ IRBuilder<> IRB(&I);
+ Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ S = IRB.CreateBitCast(S, ResTy);
+ S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
+ ResTy);
+ S = IRB.CreateBitCast(S, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case llvm::Intrinsic::bswap:
@@ -1866,6 +2133,124 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case llvm::Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
break;
+ case llvm::Intrinsic::x86_avx512_psll_dq:
+ case llvm::Intrinsic::x86_avx512_psrl_dq:
+ case llvm::Intrinsic::x86_avx2_psll_w:
+ case llvm::Intrinsic::x86_avx2_psll_d:
+ case llvm::Intrinsic::x86_avx2_psll_q:
+ case llvm::Intrinsic::x86_avx2_pslli_w:
+ case llvm::Intrinsic::x86_avx2_pslli_d:
+ case llvm::Intrinsic::x86_avx2_pslli_q:
+ case llvm::Intrinsic::x86_avx2_psll_dq:
+ case llvm::Intrinsic::x86_avx2_psrl_w:
+ case llvm::Intrinsic::x86_avx2_psrl_d:
+ case llvm::Intrinsic::x86_avx2_psrl_q:
+ case llvm::Intrinsic::x86_avx2_psra_w:
+ case llvm::Intrinsic::x86_avx2_psra_d:
+ case llvm::Intrinsic::x86_avx2_psrli_w:
+ case llvm::Intrinsic::x86_avx2_psrli_d:
+ case llvm::Intrinsic::x86_avx2_psrli_q:
+ case llvm::Intrinsic::x86_avx2_psrai_w:
+ case llvm::Intrinsic::x86_avx2_psrai_d:
+ case llvm::Intrinsic::x86_avx2_psrl_dq:
+ case llvm::Intrinsic::x86_sse2_psll_w:
+ case llvm::Intrinsic::x86_sse2_psll_d:
+ case llvm::Intrinsic::x86_sse2_psll_q:
+ case llvm::Intrinsic::x86_sse2_pslli_w:
+ case llvm::Intrinsic::x86_sse2_pslli_d:
+ case llvm::Intrinsic::x86_sse2_pslli_q:
+ case llvm::Intrinsic::x86_sse2_psll_dq:
+ case llvm::Intrinsic::x86_sse2_psrl_w:
+ case llvm::Intrinsic::x86_sse2_psrl_d:
+ case llvm::Intrinsic::x86_sse2_psrl_q:
+ case llvm::Intrinsic::x86_sse2_psra_w:
+ case llvm::Intrinsic::x86_sse2_psra_d:
+ case llvm::Intrinsic::x86_sse2_psrli_w:
+ case llvm::Intrinsic::x86_sse2_psrli_d:
+ case llvm::Intrinsic::x86_sse2_psrli_q:
+ case llvm::Intrinsic::x86_sse2_psrai_w:
+ case llvm::Intrinsic::x86_sse2_psrai_d:
+ case llvm::Intrinsic::x86_sse2_psrl_dq:
+ case llvm::Intrinsic::x86_mmx_psll_w:
+ case llvm::Intrinsic::x86_mmx_psll_d:
+ case llvm::Intrinsic::x86_mmx_psll_q:
+ case llvm::Intrinsic::x86_mmx_pslli_w:
+ case llvm::Intrinsic::x86_mmx_pslli_d:
+ case llvm::Intrinsic::x86_mmx_pslli_q:
+ case llvm::Intrinsic::x86_mmx_psrl_w:
+ case llvm::Intrinsic::x86_mmx_psrl_d:
+ case llvm::Intrinsic::x86_mmx_psrl_q:
+ case llvm::Intrinsic::x86_mmx_psra_w:
+ case llvm::Intrinsic::x86_mmx_psra_d:
+ case llvm::Intrinsic::x86_mmx_psrli_w:
+ case llvm::Intrinsic::x86_mmx_psrli_d:
+ case llvm::Intrinsic::x86_mmx_psrli_q:
+ case llvm::Intrinsic::x86_mmx_psrai_w:
+ case llvm::Intrinsic::x86_mmx_psrai_d:
+ handleVectorShiftIntrinsic(I, /* Variable */ false);
+ break;
+ case llvm::Intrinsic::x86_avx2_psllv_d:
+ case llvm::Intrinsic::x86_avx2_psllv_d_256:
+ case llvm::Intrinsic::x86_avx2_psllv_q:
+ case llvm::Intrinsic::x86_avx2_psllv_q_256:
+ case llvm::Intrinsic::x86_avx2_psrlv_d:
+ case llvm::Intrinsic::x86_avx2_psrlv_d_256:
+ case llvm::Intrinsic::x86_avx2_psrlv_q:
+ case llvm::Intrinsic::x86_avx2_psrlv_q_256:
+ case llvm::Intrinsic::x86_avx2_psrav_d:
+ case llvm::Intrinsic::x86_avx2_psrav_d_256:
+ handleVectorShiftIntrinsic(I, /* Variable */ true);
+ break;
+
+ // Byte shifts are not implemented.
+ // case llvm::Intrinsic::x86_avx512_psll_dq_bs:
+ // case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
+ // case llvm::Intrinsic::x86_avx2_psll_dq_bs:
+ // case llvm::Intrinsic::x86_avx2_psrl_dq_bs:
+ // case llvm::Intrinsic::x86_sse2_psll_dq_bs:
+ // case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
+
+ case llvm::Intrinsic::x86_sse2_packsswb_128:
+ case llvm::Intrinsic::x86_sse2_packssdw_128:
+ case llvm::Intrinsic::x86_sse2_packuswb_128:
+ case llvm::Intrinsic::x86_sse41_packusdw:
+ case llvm::Intrinsic::x86_avx2_packsswb:
+ case llvm::Intrinsic::x86_avx2_packssdw:
+ case llvm::Intrinsic::x86_avx2_packuswb:
+ case llvm::Intrinsic::x86_avx2_packusdw:
+ handleVectorPackIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_packsswb:
+ case llvm::Intrinsic::x86_mmx_packuswb:
+ handleVectorPackIntrinsic(I, 16);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_packssdw:
+ handleVectorPackIntrinsic(I, 32);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_psad_bw:
+ case llvm::Intrinsic::x86_sse2_psad_bw:
+ case llvm::Intrinsic::x86_avx2_psad_bw:
+ handleVectorSadIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_sse2_pmadd_wd:
+ case llvm::Intrinsic::x86_avx2_pmadd_wd:
+ case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
+ case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
+ handleVectorPmaddIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
+ handleVectorPmaddIntrinsic(I, 8);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_pmadd_wd:
+ handleVectorPmaddIntrinsic(I, 16);
+ break;
+
default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);
@@ -1887,12 +2272,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return;
}
- // Allow only tail calls with the same types, otherwise
- // we may have a false positive: shadow for a non-void RetVal
- // will get propagated to a void RetVal.
- if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
- Call->setTailCall(false);
-
assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
// We are going to insert code that relies on the fact that the callee
@@ -1926,7 +2305,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
continue;
}
unsigned Size = 0;
- Value *Store = 0;
+ Value *Store = nullptr;
// Compute the Shadow for arg even if it is ByVal, because
// in that case getShadow() will copy the actual arg shadow to
// __msan_param_tls.
@@ -1934,24 +2313,27 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
" Shadow: " << *ArgShadow << "\n");
+ bool ArgIsInitialized = false;
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
- Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
+ Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
unsigned Alignment = CS.getParamAlignment(i + 1);
Store = IRB.CreateMemCpy(ArgShadowBase,
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
Size, Alignment);
} else {
- Size = MS.TD->getTypeAllocSize(A->getType());
+ Size = MS.DL->getTypeAllocSize(A->getType());
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
+ Constant *Cst = dyn_cast<Constant>(ArgShadow);
+ if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
}
- if (MS.TrackOrigins)
+ if (MS.TrackOrigins && !ArgIsInitialized)
IRB.CreateStore(getOrigin(A),
getOriginPtrForArgument(A, IRB, ArgOffset));
(void)Store;
- assert(Size != 0 && Store != 0);
+ assert(Size != 0 && Store != nullptr);
DEBUG(dbgs() << " Param:" << *Store << "\n");
ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
}
@@ -1966,10 +2348,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Now, get the shadow for the RetVal.
if (!I.getType()->isSized()) return;
IRBuilder<> IRBBefore(&I);
- // Untill we have full dynamic coverage, make sure the retval shadow is 0.
+ // Until we have full dynamic coverage, make sure the retval shadow is 0.
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
- Instruction *NextInsn = 0;
+ Instruction *NextInsn = nullptr;
if (CS.isCall()) {
NextInsn = I.getNextNode();
} else {
@@ -2015,6 +2397,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitPHINode(PHINode &I) {
IRBuilder<> IRB(&I);
+ if (!PropagateShadow) {
+ setShadow(&I, getCleanShadow(&I));
+ return;
+ }
+
ShadowPHINodes.push_back(&I);
setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
"_msphi_s"));
@@ -2026,7 +2413,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitAllocaInst(AllocaInst &I) {
setShadow(&I, getCleanShadow(&I));
IRBuilder<> IRB(I.getNextNode());
- uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
+ uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
if (PoisonStack && ClPoisonStackWithCall) {
IRB.CreateCall2(MS.MsanPoisonStackFn,
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
@@ -2062,33 +2449,51 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitSelectInst(SelectInst& I) {
IRBuilder<> IRB(&I);
// a = select b, c, d
- Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
- getShadow(I.getFalseValue()));
+ Value *B = I.getCondition();
+ Value *C = I.getTrueValue();
+ Value *D = I.getFalseValue();
+ Value *Sb = getShadow(B);
+ Value *Sc = getShadow(C);
+ Value *Sd = getShadow(D);
+
+ // Result shadow if condition shadow is 0.
+ Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
+ Value *Sa1;
if (I.getType()->isAggregateType()) {
// To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
// an extra "select". This results in much more compact IR.
// Sa = select Sb, poisoned, (select b, Sc, Sd)
- S = IRB.CreateSelect(getShadow(I.getCondition()),
- getPoisonedShadow(getShadowTy(I.getType())), S,
- "_msprop_select_agg");
+ Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
} else {
- // Sa = (sext Sb) | (select b, Sc, Sd)
- S = IRB.CreateOr(S, CreateShadowCast(IRB, getShadow(I.getCondition()),
- S->getType(), true),
- "_msprop_select");
+ // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
+ // If Sb (condition is poisoned), look for bits in c and d that are equal
+ // and both unpoisoned.
+ // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
+
+ // Cast arguments to shadow-compatible type.
+ C = CreateAppToShadowCast(IRB, C);
+ D = CreateAppToShadowCast(IRB, D);
+
+ // Result shadow if condition shadow is 1.
+ Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
}
- setShadow(&I, S);
+ Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
+ setShadow(&I, Sa);
if (MS.TrackOrigins) {
// Origins are always i32, so any vector conditions must be flattened.
// FIXME: consider tracking vector origins for app vectors?
- Value *Cond = I.getCondition();
- if (Cond->getType()->isVectorTy()) {
- Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
- Cond = IRB.CreateICmpNE(ConvertedShadow,
- getCleanShadow(ConvertedShadow), "_mso_select");
+ if (B->getType()->isVectorTy()) {
+ Type *FlatTy = getShadowTyNoVec(B->getType());
+ B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
+ ConstantInt::getNullValue(FlatTy));
+ Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
+ ConstantInt::getNullValue(FlatTy));
}
- setOrigin(&I, IRB.CreateSelect(Cond,
- getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
+ // a = select b, c, d
+ // Oa = Sb ? Ob : (b ? Oc : Od)
+ setOrigin(&I, IRB.CreateSelect(
+ Sb, getOrigin(I.getCondition()),
+ IRB.CreateSelect(B, getOrigin(C), getOrigin(D))));
}
}
@@ -2171,7 +2576,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
+ : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
+ VAArgOverflowSize(nullptr) {}
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
@@ -2195,34 +2601,47 @@ struct VarArgAMD64Helper : public VarArgHelper {
// would have been to associate each live instance of va_list with a copy of
// MSanParamTLS, and extract shadow on va_arg() call in the argument list
// order.
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
+ void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned GpOffset = 0;
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
- ArgKind AK = classifyArgument(A);
- if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
- AK = AK_Memory;
- if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
- AK = AK_Memory;
- Value *Base;
- switch (AK) {
- case AK_GeneralPurpose:
- Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
- GpOffset += 8;
- break;
- case AK_FloatingPoint:
- Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
- FpOffset += 16;
- break;
- case AK_Memory:
- uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
- Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
+ unsigned ArgNo = CS.getArgumentNo(ArgIt);
+ bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ if (IsByVal) {
+ // ByVal arguments always go to the overflow area.
+ assert(A->getType()->isPointerTy());
+ Type *RealTy = A->getType()->getPointerElementType();
+ uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
+ Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
+ IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
+ ArgSize, kShadowTLSAlignment);
+ } else {
+ ArgKind AK = classifyArgument(A);
+ if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
+ AK = AK_Memory;
+ if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
+ AK = AK_Memory;
+ Value *Base;
+ switch (AK) {
+ case AK_GeneralPurpose:
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
+ GpOffset += 8;
+ break;
+ case AK_FloatingPoint:
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
+ FpOffset += 16;
+ break;
+ case AK_Memory:
+ uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
+ OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
+ }
+ IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
- IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
@@ -2230,15 +2649,15 @@ struct VarArgAMD64Helper : public VarArgHelper {
}
/// \brief Compute the shadow address for a given va_arg.
- Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
+ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
int ArgOffset) {
Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
+ return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
"_msarg");
}
- void visitVAStartInst(VAStartInst &I) {
+ void visitVAStartInst(VAStartInst &I) override {
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
@@ -2250,7 +2669,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
/* size */24, /* alignment */8, false);
}
- void visitVACopyInst(VACopyInst &I) {
+ void visitVACopyInst(VACopyInst &I) override {
IRBuilder<> IRB(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
@@ -2261,7 +2680,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
/* size */24, /* alignment */8, false);
}
- void finalizeInstrumentation() {
+ void finalizeInstrumentation() override {
assert(!VAArgOverflowSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
if (!VAStartInstrumentationList.empty()) {
@@ -2313,13 +2732,13 @@ struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV) {}
- void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {}
+ void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
- void visitVAStartInst(VAStartInst &I) {}
+ void visitVAStartInst(VAStartInst &I) override {}
- void visitVACopyInst(VACopyInst &I) {}
+ void visitVACopyInst(VACopyInst &I) override {}
- void finalizeInstrumentation() {}
+ void finalizeInstrumentation() override {}
};
VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 89fb746..89386a6 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -19,8 +19,6 @@
// The rest is handled by the run-time library.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "tsan"
-
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
@@ -42,12 +40,11 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
using namespace llvm;
-static cl::opt<std::string> ClBlacklistFile("tsan-blacklist",
- cl::desc("Blacklist file"), cl::Hidden);
+#define DEBUG_TYPE "tsan"
+
static cl::opt<bool> ClInstrumentMemoryAccesses(
"tsan-instrument-memory-accesses", cl::init(true),
cl::desc("Instrument memory accesses"), cl::Hidden);
@@ -76,14 +73,10 @@ namespace {
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
- ThreadSanitizer(StringRef BlacklistFile = StringRef())
- : FunctionPass(ID),
- TD(0),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile) { }
- const char *getPassName() const;
- bool runOnFunction(Function &F);
- bool doInitialization(Module &M);
+ ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {}
+ const char *getPassName() const override;
+ bool runOnFunction(Function &F) override;
+ bool doInitialization(Module &M) override;
static char ID; // Pass identification, replacement for typeid.
private:
@@ -96,10 +89,8 @@ struct ThreadSanitizer : public FunctionPass {
bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr);
- DataLayout *TD;
+ const DataLayout *DL;
Type *IntptrTy;
- SmallString<64> BlacklistFile;
- OwningPtr<SpecialCaseList> BL;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
Function *TsanFuncEntry;
@@ -129,8 +120,8 @@ const char *ThreadSanitizer::getPassName() const {
return "ThreadSanitizer";
}
-FunctionPass *llvm::createThreadSanitizerPass(StringRef BlacklistFile) {
- return new ThreadSanitizer(BlacklistFile);
+FunctionPass *llvm::createThreadSanitizerPass() {
+ return new ThreadSanitizer();
}
static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
@@ -174,8 +165,8 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
for (int op = AtomicRMWInst::FIRST_BINOP;
op <= AtomicRMWInst::LAST_BINOP; ++op) {
- TsanAtomicRMW[op][i] = NULL;
- const char *NamePart = NULL;
+ TsanAtomicRMW[op][i] = nullptr;
+ const char *NamePart = nullptr;
if (op == AtomicRMWInst::Xchg)
NamePart = "_exchange";
else if (op == AtomicRMWInst::Add)
@@ -224,14 +215,14 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
}
bool ThreadSanitizer::doInitialization(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
- return false;
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP)
+ report_fatal_error("data layout missing");
+ DL = &DLP->getDataLayout();
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
- IntptrTy = IRB.getIntPtrTy(TD);
+ IntptrTy = IRB.getIntPtrTy(DL);
Value *TsanInit = M.getOrInsertFunction("__tsan_init",
IRB.getVoidTy(), NULL);
appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
@@ -320,8 +311,7 @@ static bool isAtomic(Instruction *I) {
}
bool ThreadSanitizer::runOnFunction(Function &F) {
- if (!TD) return false;
- if (BL->isIn(F)) return false;
+ if (!DL) return false;
initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
@@ -330,22 +320,20 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
SmallVector<Instruction*, 8> MemIntrinCalls;
bool Res = false;
bool HasCalls = false;
+ bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
// Traverse all instructions, collect loads/stores/returns, check for calls.
- for (Function::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI) {
- BasicBlock &BB = *FI;
- for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
- BI != BE; ++BI) {
- if (isAtomic(BI))
- AtomicAccesses.push_back(BI);
- else if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
- LocalLoadsAndStores.push_back(BI);
- else if (isa<ReturnInst>(BI))
- RetVec.push_back(BI);
- else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
- if (isa<MemIntrinsic>(BI))
- MemIntrinCalls.push_back(BI);
+ for (auto &BB : F) {
+ for (auto &Inst : BB) {
+ if (isAtomic(&Inst))
+ AtomicAccesses.push_back(&Inst);
+ else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
+ LocalLoadsAndStores.push_back(&Inst);
+ else if (isa<ReturnInst>(Inst))
+ RetVec.push_back(&Inst);
+ else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
+ if (isa<MemIntrinsic>(Inst))
+ MemIntrinCalls.push_back(&Inst);
HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
}
@@ -357,21 +345,22 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// FIXME: many of these accesses do not need to be checked for races
// (e.g. variables that do not escape, etc).
- // Instrument memory accesses.
- if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread))
- for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
- Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
+ // Instrument memory accesses only if we want to report bugs in the function.
+ if (ClInstrumentMemoryAccesses && SanitizeFunction)
+ for (auto Inst : AllLoadsAndStores) {
+ Res |= instrumentLoadOrStore(Inst);
}
- // Instrument atomic memory accesses.
+ // Instrument atomic memory accesses in any case (they can be used to
+ // implement synchronization).
if (ClInstrumentAtomics)
- for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
- Res |= instrumentAtomic(AtomicAccesses[i]);
+ for (auto Inst : AtomicAccesses) {
+ Res |= instrumentAtomic(Inst);
}
- if (ClInstrumentMemIntrinsics)
- for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
- Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
+ if (ClInstrumentMemIntrinsics && SanitizeFunction)
+ for (auto Inst : MemIntrinCalls) {
+ Res |= instrumentMemIntrinsic(Inst);
}
// Instrument function entry/exit points if there were instrumented accesses.
@@ -381,8 +370,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
IRB.getInt32(0));
IRB.CreateCall(TsanFuncEntry, ReturnAddress);
- for (size_t i = 0, n = RetVec.size(); i < n; ++i) {
- IRBuilder<> IRBRet(RetVec[i]);
+ for (auto RetInst : RetVec) {
+ IRBuilder<> IRBRet(RetInst);
IRBRet.CreateCall(TsanFuncExit);
}
Res = true;
@@ -402,8 +391,13 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
if (IsWrite && isVtableAccess(I)) {
DEBUG(dbgs() << " VPTR : " << *I << "\n");
Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
- // StoredValue does not necessary have a pointer type.
- if (isa<IntegerType>(StoredValue->getType()))
+ // StoredValue may be a vector type if we are storing several vptrs at once.
+ // In this case, just take the first element of the vector since this is
+ // enough to find vptr races.
+ if (isa<VectorType>(StoredValue->getType()))
+ StoredValue = IRB.CreateExtractElement(
+ StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
+ if (StoredValue->getType()->isIntegerTy())
StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
// Call TsanVptrUpdate.
IRB.CreateCall2(TsanVptrUpdate,
@@ -440,21 +434,6 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
return IRB->getInt32(v);
}
-static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
- uint32_t v = 0;
- switch (ord) {
- case NotAtomic: assert(false);
- case Unordered: // Fall-through.
- case Monotonic: v = 0; break;
- // case Consume: v = 1; break; // Not specified yet.
- case Acquire: v = 2; break;
- case Release: v = 0; break;
- case AcquireRelease: v = 2; break;
- case SequentiallyConsistent: v = 5; break;
- }
- return IRB->getInt32(v);
-}
-
// If a memset intrinsic gets inlined by the code gen, we will miss races on it.
// So, we either need to ensure the intrinsic is not inlined, or instrument it.
// We do not instrument memset/memmove/memcpy intrinsics (too complicated),
@@ -482,7 +461,7 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
}
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
-// standards. For background see C++11 standard. A slightly older, publically
+// standards. For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
// for casual browsing) is available here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
@@ -527,7 +506,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
if (Idx < 0)
return false;
Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
- if (F == NULL)
+ if (!F)
return false;
const size_t ByteSize = 1 << Idx;
const size_t BitSize = ByteSize * 8;
@@ -550,10 +529,16 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
- createOrdering(&IRB, CASI->getOrdering()),
- createFailOrdering(&IRB, CASI->getOrdering())};
- CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args));
- ReplaceInstWithInst(I, C);
+ createOrdering(&IRB, CASI->getSuccessOrdering()),
+ createOrdering(&IRB, CASI->getFailureOrdering())};
+ CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
+ Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand());
+
+ Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0);
+ Res = IRB.CreateInsertValue(Res, Success, 1);
+
+ I->replaceAllUsesWith(Res);
+ I->eraseFromParent();
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
Function *F = FI->getSynchScope() == SingleThread ?
@@ -568,7 +553,7 @@ int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
Type *OrigPtrTy = Addr->getType();
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+ uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
if (TypeSize != 8 && TypeSize != 16 &&
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
NumAccessesWithBadSize++;
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h b/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
index 4eac39d..4098428 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
@@ -43,34 +43,34 @@ public:
EPT_RetainAutoreleaseRV
};
- ARCRuntimeEntryPoints() : TheModule(0),
- AutoreleaseRV(0),
- Release(0),
- Retain(0),
- RetainBlock(0),
- Autorelease(0),
- StoreStrong(0),
- RetainRV(0),
- RetainAutorelease(0),
- RetainAutoreleaseRV(0) { }
+ ARCRuntimeEntryPoints() : TheModule(nullptr),
+ AutoreleaseRV(nullptr),
+ Release(nullptr),
+ Retain(nullptr),
+ RetainBlock(nullptr),
+ Autorelease(nullptr),
+ StoreStrong(nullptr),
+ RetainRV(nullptr),
+ RetainAutorelease(nullptr),
+ RetainAutoreleaseRV(nullptr) { }
~ARCRuntimeEntryPoints() { }
void Initialize(Module *M) {
TheModule = M;
- AutoreleaseRV = 0;
- Release = 0;
- Retain = 0;
- RetainBlock = 0;
- Autorelease = 0;
- StoreStrong = 0;
- RetainRV = 0;
- RetainAutorelease = 0;
- RetainAutoreleaseRV = 0;
+ AutoreleaseRV = nullptr;
+ Release = nullptr;
+ Retain = nullptr;
+ RetainBlock = nullptr;
+ Autorelease = nullptr;
+ StoreStrong = nullptr;
+ RetainRV = nullptr;
+ RetainAutorelease = nullptr;
+ RetainAutoreleaseRV = nullptr;
}
Constant *get(const EntryPointType entry) {
- assert(TheModule != 0 && "Not initialized.");
+ assert(TheModule != nullptr && "Not initialized.");
switch (entry) {
case EPT_AutoreleaseRV:
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
index 8f917ae..08c8842 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -20,15 +20,16 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "objc-arc-dependency"
#include "ObjCARC.h"
#include "DependencyAnalysis.h"
#include "ProvenanceAnalysis.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/IR/CFG.h"
using namespace llvm;
using namespace llvm::objcarc;
+#define DEBUG_TYPE "objc-arc-dependency"
+
/// Test whether the given instruction can result in a reference count
/// modification (positive or negative) for the pointer's object.
bool
@@ -223,7 +224,7 @@ llvm::objcarc::FindDependencies(DependenceKind Flavor,
pred_iterator PI(LocalStartBB), PE(LocalStartBB, false);
if (PI == PE)
// If we've reached the function entry, produce a null dependence.
- DependingInsts.insert(0);
+ DependingInsts.insert(nullptr);
else
// Add the predecessors to the worklist.
do {
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h
index 8044494..f71cf2b 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h
@@ -27,10 +27,10 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -308,6 +308,7 @@ static inline bool IsPotentialRetainableObjPtr(const Value *Op) {
// Special arguments can not be a valid retainable object pointer.
if (const Argument *Arg = dyn_cast<Argument>(Op))
if (Arg->hasByValAttr() ||
+ Arg->hasInAllocaAttr() ||
Arg->hasNestAttr() ||
Arg->hasStructRetAttr())
return false;
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
index 00d9864..1a25391 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
@@ -24,7 +24,6 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "objc-arc-ap-elim"
#include "ObjCARC.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
@@ -34,11 +33,13 @@
using namespace llvm;
using namespace llvm::objcarc;
+#define DEBUG_TYPE "objc-arc-ap-elim"
+
namespace {
/// \brief Autorelease pool elimination.
class ObjCARCAPElim : public ModulePass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnModule(Module &M);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnModule(Module &M) override;
static bool MayAutorelease(ImmutableCallSite CS, unsigned Depth = 0);
static bool OptimizeBB(BasicBlock *BB);
@@ -93,7 +94,7 @@ bool ObjCARCAPElim::MayAutorelease(ImmutableCallSite CS, unsigned Depth) {
bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
bool Changed = false;
- Instruction *Push = 0;
+ Instruction *Push = nullptr;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
Instruction *Inst = I++;
switch (GetBasicInstructionClass(Inst)) {
@@ -112,11 +113,11 @@ bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
Inst->eraseFromParent();
Push->eraseFromParent();
}
- Push = 0;
+ Push = nullptr;
break;
case IC_CallOrUser:
if (MayAutorelease(ImmutableCallSite(Inst)))
- Push = 0;
+ Push = nullptr;
break;
default:
break;
@@ -154,8 +155,8 @@ bool ObjCARCAPElim::runOnModule(Module &M) {
for (User::op_iterator OI = Init->op_begin(), OE = Init->op_end();
OI != OE; ++OI) {
Value *Op = *OI;
- // llvm.global_ctors is an array of pairs where the second members
- // are constructor functions.
+ // llvm.global_ctors is an array of three-field structs where the second
+ // members are constructor functions.
Function *F = dyn_cast<Function>(cast<ConstantStruct>(Op)->getOperand(1));
// If the user used a constructor function with the wrong signature and
// it got bitcasted or whatever, look the other way.
@@ -165,7 +166,7 @@ bool ObjCARCAPElim::runOnModule(Module &M) {
if (F->isDeclaration())
continue;
// Only look at functions with one basic block.
- if (llvm::next(F->begin()) != F->end())
+ if (std::next(F->begin()) != F->end())
continue;
// Ok, a single-block constructor function definition. Try to optimize it.
Changed |= OptimizeBB(F->begin());
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
index d18667b..2c09e70 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
@@ -20,7 +20,6 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "objc-arc-aa"
#include "ObjCARC.h"
#include "ObjCARCAliasAnalysis.h"
#include "llvm/IR/Instruction.h"
@@ -28,6 +27,8 @@
#include "llvm/PassAnalysisSupport.h"
#include "llvm/PassSupport.h"
+#define DEBUG_TYPE "objc-arc-aa"
+
namespace llvm {
class Function;
class Value;
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
index 41ccfe2..97b565b 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
@@ -44,28 +44,28 @@ namespace objcarc {
}
private:
- virtual void initializePass() {
+ void initializePass() override {
InitializeAliasAnalysis(this);
}
/// This method is used when a pass implements an analysis interface through
/// multiple inheritance. If needed, it should override this to adjust the
/// this pointer as needed for the specified pass info.
- virtual void *getAdjustedAnalysisPointer(const void *PI) {
+ void *getAdjustedAnalysisPointer(const void *PI) override {
if (PI == &AliasAnalysis::ID)
return static_cast<AliasAnalysis *>(this);
return this;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual AliasResult alias(const Location &LocA, const Location &LocB);
- virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal);
- virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
- virtual ModRefBehavior getModRefBehavior(const Function *F);
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc);
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ AliasResult alias(const Location &LocA, const Location &LocB) override;
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
+ ModRefBehavior getModRefBehavior(const Function *F) override;
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc) override;
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) override;
};
} // namespace objcarc
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index 9d80037..f48d53d 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -26,13 +26,12 @@
// TODO: ObjCARCContract could insert PHI nodes when uses aren't
// dominated by single calls.
-#define DEBUG_TYPE "objc-arc-contract"
#include "ObjCARC.h"
#include "ARCRuntimeEntryPoints.h"
#include "DependencyAnalysis.h"
#include "ProvenanceAnalysis.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/Debug.h"
@@ -40,6 +39,8 @@
using namespace llvm;
using namespace llvm::objcarc;
+#define DEBUG_TYPE "objc-arc-contract"
+
STATISTIC(NumPeeps, "Number of calls peephole-optimized");
STATISTIC(NumStoreStrongs, "Number objc_storeStrong calls formed");
@@ -79,9 +80,9 @@ namespace {
void ContractRelease(Instruction *Release,
inst_iterator &Iter);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function &F);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
public:
static char ID;
@@ -95,7 +96,7 @@ char ObjCARCContract::ID = 0;
INITIALIZE_PASS_BEGIN(ObjCARCContract,
"objc-arc-contract", "ObjC ARC contraction", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(ObjCARCContract,
"objc-arc-contract", "ObjC ARC contraction", false, false)
@@ -105,7 +106,7 @@ Pass *llvm::createObjCARCContractPass() {
void ObjCARCContract::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}
@@ -157,7 +158,7 @@ ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease,
// Check that there are no instructions between the retain and the autorelease
// (such as an autorelease_pop) which may change the count.
- CallInst *Retain = 0;
+ CallInst *Retain = nullptr;
if (Class == IC_AutoreleaseRV)
FindDependencies(RetainAutoreleaseRVDep, Arg,
Autorelease->getParent(), Autorelease,
@@ -218,7 +219,7 @@ void ObjCARCContract::ContractRelease(Instruction *Release,
BasicBlock::iterator I = Load, End = BB->end();
++I;
AliasAnalysis::Location Loc = AA->getLocation(Load);
- StoreInst *Store = 0;
+ StoreInst *Store = nullptr;
bool SawRelease = false;
for (; !Store || !SawRelease; ++I) {
if (I == End)
@@ -300,7 +301,7 @@ bool ObjCARCContract::doInitialization(Module &M) {
EP.Initialize(&M);
// Initialize RetainRVMarker.
- RetainRVMarker = 0;
+ RetainRVMarker = nullptr;
if (NamedMDNode *NMD =
M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"))
if (NMD->getNumOperands() == 1) {
@@ -323,7 +324,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
Changed = false;
AA = &getAnalysis<AliasAnalysis>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
PA.setAA(&getAnalysis<AliasAnalysis>());
@@ -440,17 +441,17 @@ bool ObjCARCContract::runOnFunction(Function &F) {
// Don't use GetObjCArg because we don't want to look through bitcasts
// and such; to do the replacement, the argument must have type i8*.
- const Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
+ Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
for (;;) {
// If we're compiling bugpointed code, don't get in trouble.
if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
break;
// Look through the uses of the pointer.
- for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
+ for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
UI != UE; ) {
- Use &U = UI.getUse();
- unsigned OperandNo = UI.getOperandNo();
- ++UI; // Increment UI now, because we may unlink its element.
+ // Increment UI now, because we may unlink its element.
+ Use &U = *UI++;
+ unsigned OperandNo = U.getOperandNo();
// If the call's return value dominates a use of the call's argument
// value, rewrite the use to use the return value. We check for
@@ -475,9 +476,9 @@ bool ObjCARCContract::runOnFunction(Function &F) {
for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
if (PHI->getIncomingBlock(i) == BB) {
// Keep the UI iterator valid.
- if (&PHI->getOperandUse(
- PHINode::getOperandNumForIncomingValue(i)) ==
- &UI.getUse())
+ if (UI != UE &&
+ &PHI->getOperandUse(
+ PHINode::getOperandNumForIncomingValue(i)) == &*UI)
++UI;
PHI->setIncomingValue(i, Replacement);
}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
index 39bf8f3..bf9fcbb 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
@@ -23,11 +23,10 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "objc-arc-expand"
-
#include "ObjCARC.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Value.h"
@@ -37,9 +36,10 @@
#include "llvm/PassSupport.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Support/raw_ostream.h"
+#define DEBUG_TYPE "objc-arc-expand"
+
namespace llvm {
class Module;
}
@@ -50,9 +50,9 @@ using namespace llvm::objcarc;
namespace {
/// \brief Early ARC transformations.
class ObjCARCExpand : public FunctionPass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function &F);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
/// A flag indicating whether this optimization pass should run.
bool Run;
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index 2976df6..dd4dd50 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -24,7 +24,6 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "objc-arc-opts"
#include "ObjCARC.h"
#include "ARCRuntimeEntryPoints.h"
#include "DependencyAnalysis.h"
@@ -35,15 +34,17 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace llvm::objcarc;
+#define DEBUG_TYPE "objc-arc-opts"
+
/// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
/// @{
@@ -156,24 +157,21 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
return FindSingleUseIdentifiedObject(
cast<CallInst>(Arg)->getArgOperand(0));
if (!IsObjCIdentifiedObject(Arg))
- return 0;
+ return nullptr;
return Arg;
}
// If we found an identifiable object but it has multiple uses, but they are
// trivial uses, we can still consider this to be a single-use value.
if (IsObjCIdentifiedObject(Arg)) {
- for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
- UI != UE; ++UI) {
- const User *U = *UI;
+ for (const User *U : Arg->users())
if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
- return 0;
- }
+ return nullptr;
return Arg;
}
- return 0;
+ return nullptr;
}
/// This is a wrapper around getUnderlyingObjCPtr along the lines of
@@ -376,13 +374,13 @@ namespace {
bool CFGHazardAfflicted;
RRInfo() :
- KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
+ KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
CFGHazardAfflicted(false) {}
void clear();
/// Conservatively merge the two RRInfo. Returns true if a partial merge has
- /// occured, false otherwise.
+ /// occurred, false otherwise.
bool Merge(const RRInfo &Other);
};
@@ -391,7 +389,7 @@ namespace {
void RRInfo::clear() {
KnownSafe = false;
IsTailCallRelease = false;
- ReleaseMetadata = 0;
+ ReleaseMetadata = nullptr;
Calls.clear();
ReverseInsertPts.clear();
CFGHazardAfflicted = false;
@@ -400,7 +398,7 @@ void RRInfo::clear() {
bool RRInfo::Merge(const RRInfo &Other) {
// Conservatively merge the ReleaseMetadata information.
if (ReleaseMetadata != Other.ReleaseMetadata)
- ReleaseMetadata = 0;
+ ReleaseMetadata = nullptr;
// Conservatively merge the boolean state.
KnownSafe &= Other.KnownSafe;
@@ -459,7 +457,7 @@ namespace {
}
bool IsTrackingImpreciseReleases() const {
- return RRI.ReleaseMetadata != 0;
+ return RRI.ReleaseMetadata != nullptr;
}
const MDNode *GetReleaseMetadata() const {
@@ -538,8 +536,7 @@ namespace {
void
PtrState::Merge(const PtrState &Other, bool TopDown) {
- Seq = MergeSeqs(static_cast<Sequence>(Seq), static_cast<Sequence>(Other.Seq),
- TopDown);
+ Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
KnownPositiveRefCount &= Other.KnownPositiveRefCount;
// If we're not in a sequence (anymore), drop all associated state.
@@ -660,7 +657,7 @@ namespace {
/// which pass through this block. This is only valid after both the
/// top-down and bottom-up traversals are complete.
///
- /// Returns true if overflow occured. Returns false if overflow did not
+ /// Returns true if overflow occurred. Returns false if overflow did not
/// occur.
bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
if (TopDownPathCount == OverflowOccurredValue ||
@@ -668,7 +665,7 @@ namespace {
return true;
unsigned long long Product =
(unsigned long long)TopDownPathCount*BottomUpPathCount;
- // Overflow occured if any of the upper bits of Product are set or if all
+ // Overflow occurred if any of the upper bits of Product are set or if all
// the lower bits of Product are all set.
return (Product >> 32) ||
((PathCount = Product) == OverflowOccurredValue);
@@ -712,7 +709,7 @@ void BBState::MergePred(const BBState &Other) {
// In order to be consistent, we clear the top down pointers when by adding
// TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
- // has not occured.
+ // has not occurred.
if (TopDownPathCount == OverflowOccurredValue) {
clearTopDownPointers();
return;
@@ -756,7 +753,7 @@ void BBState::MergeSucc(const BBState &Other) {
// In order to be consistent, we clear the top down pointers when by adding
// BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
- // has not occured.
+ // has not occurred.
if (BottomUpPathCount == OverflowOccurredValue) {
clearBottomUpPointers();
return;
@@ -822,7 +819,7 @@ ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
/// arc annotation processor tool. If the function is an
static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
Value *Ptr) {
- MDString *Hash = 0;
+ MDString *Hash = nullptr;
// If pointer is a result of an instruction and it does not have a source
// MDNode it, attach a new MDNode onto it. If pointer is a result of
@@ -884,7 +881,7 @@ static void AppendMDNodeToInstForPtr(unsigned NodeId,
MDString *PtrSourceMDNodeID,
Sequence OldSeq,
Sequence NewSeq) {
- MDNode *Node = 0;
+ MDNode *Node = nullptr;
Value *tmp[3] = {PtrSourceMDNodeID,
SequenceToMDString(Inst->getContext(),
OldSeq),
@@ -920,7 +917,7 @@ static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
Value *PtrName;
StringRef Tmp = Ptr->getName();
- if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
+ if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
Tmp + "_STR");
PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
@@ -929,7 +926,7 @@ static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
Value *S;
std::string SeqStr = SequenceToString(Seq);
- if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
+ if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
SeqStr + "_STR");
S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
@@ -959,11 +956,11 @@ static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
/*isVarArg=*/false);
Constant *Callee = M->getOrInsertFunction(Name, FTy);
- IRBuilder<> Builder(BB, llvm::prior(BB->end()));
+ IRBuilder<> Builder(BB, std::prev(BB->end()));
Value *PtrName;
StringRef Tmp = Ptr->getName();
- if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
+ if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
Tmp + "_STR");
PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
@@ -972,7 +969,7 @@ static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
Value *S;
std::string SeqStr = SequenceToString(Seq);
- if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
+ if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
SeqStr + "_STR");
S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
@@ -1006,7 +1003,7 @@ static void GenerateARCAnnotation(unsigned InstMDId,
// llvm-arc-annotation-processor tool to cross reference where the source
// pointer is in the LLVM IR since the LLVM IR parser does not submit such
// information via debug info for backends to use (since why would anyone
- // need such a thing from LLVM IR besides in non standard cases
+ // need such a thing from LLVM IR besides in non-standard cases
// [i.e. this]).
MDString *SourcePtrMDNode =
AppendMDNodeToSourcePtr(PtrMDId, Ptr);
@@ -1164,10 +1161,10 @@ namespace {
void GatherStatistics(Function &F, bool AfterOptimization = false);
#endif
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function &F);
- virtual void releaseMemory();
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
public:
static char ID;
@@ -1267,13 +1264,11 @@ ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
Users.push_back(Ptr);
do {
Ptr = Users.pop_back_val();
- for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
- UI != UE; ++UI) {
- const User *I = *UI;
- if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
+ for (const User *U : Ptr->users()) {
+ if (isa<ReturnInst>(U) || GetBasicInstructionClass(U) == IC_RetainRV)
return;
- if (isa<BitCastInst>(I))
- Users.push_back(I);
+ if (isa<BitCastInst>(U))
+ Users.push_back(U);
}
} while (!Users.empty());
@@ -1724,7 +1719,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
BBState &MyStates) {
bool NestingDetected = false;
InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ const Value *Arg = nullptr;
DEBUG(dbgs() << "Class: " << Class << "\n");
@@ -1809,13 +1804,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
// pointer has multiple owners implying that we must be more conservative.
//
// This comes up in the context of a pointer being ``KnownSafe''. In the
- // presense of a block being initialized, the frontend will emit the
+ // presence of a block being initialized, the frontend will emit the
// objc_retain on the original pointer and the release on the pointer loaded
// from the alloca. The optimizer will through the provenance analysis
// realize that the two are related, but since we only require KnownSafe in
// one direction, will match the inner retain on the original pointer with
// the guard release on the original pointer. This is fixed by ensuring that
- // in the presense of allocas we only unconditionally remove pointers if
+ // in the presence of allocas we only unconditionally remove pointers if
// both our retain and our release are KnownSafe.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
@@ -1875,7 +1870,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
if (isa<InvokeInst>(Inst))
S.InsertReverseInsertPt(BB->getFirstInsertionPt());
else
- S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst)));
+ S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
S.SetSeq(S_Use);
ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
} else if (Seq == S_Release && IsUser(Class)) {
@@ -1889,7 +1884,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
if (isa<InvokeInst>(Inst))
S.InsertReverseInsertPt(BB->getFirstInsertionPt());
else
- S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst)));
+ S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
}
break;
case S_Stop:
@@ -1946,7 +1941,7 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
// Visit all the instructions, bottom-up.
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
- Instruction *Inst = llvm::prior(I);
+ Instruction *Inst = std::prev(I);
// Invoke instructions are visited as part of their successors (below).
if (isa<InvokeInst>(Inst))
@@ -1980,7 +1975,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
BBState &MyStates) {
bool NestingDetected = false;
InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ const Value *Arg = nullptr;
switch (Class) {
case IC_RetainBlock:
@@ -2032,7 +2027,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
switch (OldSeq) {
case S_Retain:
case S_CanRelease:
- if (OldSeq == S_Retain || ReleaseMetadata != 0)
+ if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
S.ClearReverseInsertPts();
// FALL THROUGH
case S_Use:
@@ -2438,7 +2433,7 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
} else {
if (ReleasesToMove.ReleaseMetadata !=
NewRetainReleaseRRI.ReleaseMetadata)
- ReleasesToMove.ReleaseMetadata = 0;
+ ReleasesToMove.ReleaseMetadata = nullptr;
if (ReleasesToMove.IsTailCallRelease !=
NewRetainReleaseRRI.IsTailCallRelease)
ReleasesToMove.IsTailCallRelease = false;
@@ -2692,12 +2687,12 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
// within the same block. Theoretically, we could do memdep-style non-local
// analysis too, but that would want caching. A better approach would be to
// use the technique that EarlyCSE uses.
- inst_iterator Current = llvm::prior(I);
+ inst_iterator Current = std::prev(I);
BasicBlock *CurrentBB = Current.getBasicBlockIterator();
for (BasicBlock::iterator B = CurrentBB->begin(),
J = Current.getInstructionIterator();
J != B; --J) {
- Instruction *EarlierInst = &*llvm::prior(J);
+ Instruction *EarlierInst = &*std::prev(J);
InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
switch (EarlierClass) {
case IC_LoadWeak:
@@ -2788,9 +2783,8 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
CallInst *Call = cast<CallInst>(Inst);
Value *Arg = Call->getArgOperand(0);
if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
- for (Value::use_iterator UI = Alloca->use_begin(),
- UE = Alloca->use_end(); UI != UE; ++UI) {
- const Instruction *UserInst = cast<Instruction>(*UI);
+ for (User *U : Alloca->users()) {
+ const Instruction *UserInst = cast<Instruction>(U);
switch (GetBasicInstructionClass(UserInst)) {
case IC_InitWeak:
case IC_StoreWeak:
@@ -2801,8 +2795,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
}
}
Changed = true;
- for (Value::use_iterator UI = Alloca->use_begin(),
- UE = Alloca->use_end(); UI != UE; ) {
+ for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
CallInst *UserInst = cast<CallInst>(*UI++);
switch (GetBasicInstructionClass(UserInst)) {
case IC_InitWeak:
@@ -2892,7 +2885,7 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
FindDependencies(CanChangeRetainCount, Arg,
BB, Autorelease, DepInsts, Visited, PA);
if (DepInsts.size() != 1)
- return 0;
+ return nullptr;
CallInst *Retain =
dyn_cast_or_null<CallInst>(*DepInsts.begin());
@@ -2901,7 +2894,7 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
if (!Retain ||
!IsRetain(GetBasicInstructionClass(Retain)) ||
GetObjCArg(Retain) != Arg) {
- return 0;
+ return nullptr;
}
return Retain;
@@ -2919,17 +2912,17 @@ FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
FindDependencies(NeedsPositiveRetainCount, Arg,
BB, Ret, DepInsts, V, PA);
if (DepInsts.size() != 1)
- return 0;
+ return nullptr;
CallInst *Autorelease =
dyn_cast_or_null<CallInst>(*DepInsts.begin());
if (!Autorelease)
- return 0;
+ return nullptr;
InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
if (!IsAutorelease(AutoreleaseClass))
- return 0;
+ return nullptr;
if (GetObjCArg(Autorelease) != Arg)
- return 0;
+ return nullptr;
return Autorelease;
}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
index ae3c628..22be6fd 100644
--- a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
@@ -79,11 +79,10 @@ static bool IsStoredObjCPointer(const Value *P) {
Visited.insert(P);
do {
P = Worklist.pop_back_val();
- for (Value::const_use_iterator UI = P->use_begin(), UE = P->use_end();
- UI != UE; ++UI) {
- const User *Ur = *UI;
+ for (const Use &U : P->uses()) {
+ const User *Ur = U.getUser();
if (isa<StoreInst>(Ur)) {
- if (UI.getOperandNo() == 0)
+ if (U.getOperandNo() == 0)
// The pointer is stored.
return true;
// The pointed is stored through.
diff --git a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
index a3eb07a9..1a3a4aa 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -14,20 +14,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "adce"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/InstIterator.h"
using namespace llvm;
+#define DEBUG_TYPE "adce"
+
STATISTIC(NumRemoved, "Number of instructions removed");
namespace {
@@ -37,9 +38,9 @@ namespace {
initializeADCEPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function& F);
+ bool runOnFunction(Function& F) override;
- virtual void getAnalysisUsage(AnalysisUsage& AU) const {
+ void getAnalysisUsage(AnalysisUsage& AU) const override {
AU.setPreservesCFG();
}
@@ -50,6 +51,9 @@ char ADCE::ID = 0;
INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false)
bool ADCE::runOnFunction(Function& F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
SmallPtrSet<Instruction*, 128> alive;
SmallVector<Instruction*, 128> worklist;
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
new file mode 100644
index 0000000..763d02b
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -0,0 +1,602 @@
+//===- ConstantHoisting.cpp - Prepare code for expensive constants --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies expensive constants to hoist and coalesces them to
+// better prepare it for SelectionDAG-based code generation. This works around
+// the limitations of the basic-block-at-a-time approach.
+//
+// First it scans all instructions for integer constants and calculates its
+// cost. If the constant can be folded into the instruction (the cost is
+// TCC_Free) or the cost is just a simple operation (TCC_BASIC), then we don't
+// consider it expensive and leave it alone. This is the default behavior and
+// the default implementation of getIntImmCost will always return TCC_Free.
+//
+// If the cost is more than TCC_BASIC, then the integer constant can't be folded
+// into the instruction and it might be beneficial to hoist the constant.
+// Similar constants are coalesced to reduce register pressure and
+// materialization code.
+//
+// When a constant is hoisted, it is also hidden behind a bitcast to force it to
+// be live-out of the basic block. Otherwise the constant would be just
+// duplicated and each basic block would have its own copy in the SelectionDAG.
+// The SelectionDAG recognizes such constants as opaque and doesn't perform
+// certain transformations on them, which would create a new expensive constant.
+//
+// This optimization is only applied to integer constants in instructions and
+// simple (this means not nested) constant cast expressions. For example:
+// %0 = load i64* inttoptr (i64 big_constant to i64*)
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include <tuple>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "consthoist"
+
+STATISTIC(NumConstantsHoisted, "Number of constants hoisted");
+STATISTIC(NumConstantsRebased, "Number of constants rebased");
+
+namespace {
+struct ConstantUser;
+struct RebasedConstantInfo;
+
+typedef SmallVector<ConstantUser, 8> ConstantUseListType;
+typedef SmallVector<RebasedConstantInfo, 4> RebasedConstantListType;
+
+/// \brief Keeps track of the user of a constant and the operand index where the
+/// constant is used.
+struct ConstantUser {
+ Instruction *Inst;
+ unsigned OpndIdx;
+
+ ConstantUser(Instruction *Inst, unsigned Idx) : Inst(Inst), OpndIdx(Idx) { }
+};
+
+/// \brief Keeps track of a constant candidate and its uses.
+struct ConstantCandidate {
+ ConstantUseListType Uses;
+ ConstantInt *ConstInt;
+ unsigned CumulativeCost;
+
+ ConstantCandidate(ConstantInt *ConstInt)
+ : ConstInt(ConstInt), CumulativeCost(0) { }
+
+ /// \brief Add the user to the use list and update the cost.
+ void addUser(Instruction *Inst, unsigned Idx, unsigned Cost) {
+ CumulativeCost += Cost;
+ Uses.push_back(ConstantUser(Inst, Idx));
+ }
+};
+
+/// \brief This represents a constant that has been rebased with respect to a
+/// base constant. The difference to the base constant is recorded in Offset.
+struct RebasedConstantInfo {
+ ConstantUseListType Uses;
+ Constant *Offset;
+
+ RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset)
+ : Uses(Uses), Offset(Offset) { }
+};
+
+/// \brief A base constant and all its rebased constants.
+struct ConstantInfo {
+ ConstantInt *BaseConstant;
+ RebasedConstantListType RebasedConstants;
+};
+
+/// \brief The constant hoisting pass.
+class ConstantHoisting : public FunctionPass {
+ typedef DenseMap<ConstantInt *, unsigned> ConstCandMapType;
+ typedef std::vector<ConstantCandidate> ConstCandVecType;
+
+ const TargetTransformInfo *TTI;
+ DominatorTree *DT;
+ BasicBlock *Entry;
+
+ /// Keeps track of constant candidates found in the function.
+ ConstCandVecType ConstCandVec;
+
+ /// Keep track of cast instructions we already cloned.
+ SmallDenseMap<Instruction *, Instruction *> ClonedCastMap;
+
+ /// These are the final constants we decided to hoist.
+ SmallVector<ConstantInfo, 8> ConstantVec;
+public:
+ static char ID; // Pass identification, replacement for typeid
+ ConstantHoisting() : FunctionPass(ID), TTI(nullptr), DT(nullptr),
+ Entry(nullptr) {
+ initializeConstantHoistingPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &Fn) override;
+
+ const char *getPassName() const override { return "Constant Hoisting"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetTransformInfo>();
+ }
+
+private:
+ /// \brief Initialize the pass.
+ void setup(Function &Fn) {
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ TTI = &getAnalysis<TargetTransformInfo>();
+ Entry = &Fn.getEntryBlock();
+ }
+
+ /// \brief Cleanup.
+ void cleanup() {
+ ConstantVec.clear();
+ ClonedCastMap.clear();
+ ConstCandVec.clear();
+
+ TTI = nullptr;
+ DT = nullptr;
+ Entry = nullptr;
+ }
+
+ Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
+ Instruction *findConstantInsertionPoint(const ConstantInfo &ConstInfo) const;
+ void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+ Instruction *Inst, unsigned Idx,
+ ConstantInt *ConstInt);
+ void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+ Instruction *Inst);
+ void collectConstantCandidates(Function &Fn);
+ void findAndMakeBaseConstant(ConstCandVecType::iterator S,
+ ConstCandVecType::iterator E);
+ void findBaseConstants();
+ void emitBaseConstants(Instruction *Base, Constant *Offset,
+ const ConstantUser &ConstUser);
+ bool emitBaseConstants();
+ void deleteDeadCastInst() const;
+ bool optimizeConstants(Function &Fn);
+};
+}
+
+char ConstantHoisting::ID = 0;
+INITIALIZE_PASS_BEGIN(ConstantHoisting, "consthoist", "Constant Hoisting",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_END(ConstantHoisting, "consthoist", "Constant Hoisting",
+ false, false)
+
+FunctionPass *llvm::createConstantHoistingPass() {
+ return new ConstantHoisting();
+}
+
+/// \brief Perform the constant hoisting optimization for the given function.
+bool ConstantHoisting::runOnFunction(Function &Fn) {
+ DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n");
+ DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n');
+
+ setup(Fn);
+
+ bool MadeChange = optimizeConstants(Fn);
+
+ if (MadeChange) {
+ DEBUG(dbgs() << "********** Function after Constant Hoisting: "
+ << Fn.getName() << '\n');
+ DEBUG(dbgs() << Fn);
+ }
+ DEBUG(dbgs() << "********** End Constant Hoisting **********\n");
+
+ cleanup();
+
+ return MadeChange;
+}
+
+
+/// \brief Find the constant materialization insertion point.
+Instruction *ConstantHoisting::findMatInsertPt(Instruction *Inst,
+ unsigned Idx) const {
+ // If the operand is a cast instruction, then we have to materialize the
+ // constant before the cast instruction.
+ if (Idx != ~0U) {
+ Value *Opnd = Inst->getOperand(Idx);
+ if (auto CastInst = dyn_cast<Instruction>(Opnd))
+ if (CastInst->isCast())
+ return CastInst;
+ }
+
+ // The simple and common case. This also includes constant expressions.
+ if (!isa<PHINode>(Inst) && !isa<LandingPadInst>(Inst))
+ return Inst;
+
+ // We can't insert directly before a phi node or landing pad. Insert before
+ // the terminator of the incoming or dominating block.
+ assert(Entry != Inst->getParent() && "PHI or landing pad in entry block!");
+ if (Idx != ~0U && isa<PHINode>(Inst))
+ return cast<PHINode>(Inst)->getIncomingBlock(Idx)->getTerminator();
+
+ BasicBlock *IDom = DT->getNode(Inst->getParent())->getIDom()->getBlock();
+ return IDom->getTerminator();
+}
+
+/// \brief Find an insertion point that dominates all uses.
+Instruction *ConstantHoisting::
+findConstantInsertionPoint(const ConstantInfo &ConstInfo) const {
+ assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry.");
+ // Collect all basic blocks.
+ SmallPtrSet<BasicBlock *, 8> BBs;
+ for (auto const &RCI : ConstInfo.RebasedConstants)
+ for (auto const &U : RCI.Uses)
+ BBs.insert(findMatInsertPt(U.Inst, U.OpndIdx)->getParent());
+
+ if (BBs.count(Entry))
+ return &Entry->front();
+
+ while (BBs.size() >= 2) {
+ BasicBlock *BB, *BB1, *BB2;
+ BB1 = *BBs.begin();
+ BB2 = *std::next(BBs.begin());
+ BB = DT->findNearestCommonDominator(BB1, BB2);
+ if (BB == Entry)
+ return &Entry->front();
+ BBs.erase(BB1);
+ BBs.erase(BB2);
+ BBs.insert(BB);
+ }
+ assert((BBs.size() == 1) && "Expected only one element.");
+ Instruction &FirstInst = (*BBs.begin())->front();
+ return findMatInsertPt(&FirstInst);
+}
+
+
+/// \brief Record constant integer ConstInt for instruction Inst at operand
+/// index Idx.
+///
+/// The operand at index Idx is not necessarily the constant integer itself. It
+/// could also be a cast instruction or a constant expression that uses the
+// constant integer.
+void ConstantHoisting::collectConstantCandidates(ConstCandMapType &ConstCandMap,
+ Instruction *Inst,
+ unsigned Idx,
+ ConstantInt *ConstInt) {
+ unsigned Cost;
+ // Ask the target about the cost of materializing the constant for the given
+ // instruction and operand index.
+ if (auto IntrInst = dyn_cast<IntrinsicInst>(Inst))
+ Cost = TTI->getIntImmCost(IntrInst->getIntrinsicID(), Idx,
+ ConstInt->getValue(), ConstInt->getType());
+ else
+ Cost = TTI->getIntImmCost(Inst->getOpcode(), Idx, ConstInt->getValue(),
+ ConstInt->getType());
+
+ // Ignore cheap integer constants.
+ if (Cost > TargetTransformInfo::TCC_Basic) {
+ ConstCandMapType::iterator Itr;
+ bool Inserted;
+ std::tie(Itr, Inserted) = ConstCandMap.insert(std::make_pair(ConstInt, 0));
+ if (Inserted) {
+ ConstCandVec.push_back(ConstantCandidate(ConstInt));
+ Itr->second = ConstCandVec.size() - 1;
+ }
+ ConstCandVec[Itr->second].addUser(Inst, Idx, Cost);
+ DEBUG(if (isa<ConstantInt>(Inst->getOperand(Idx)))
+ dbgs() << "Collect constant " << *ConstInt << " from " << *Inst
+ << " with cost " << Cost << '\n';
+ else
+ dbgs() << "Collect constant " << *ConstInt << " indirectly from "
+ << *Inst << " via " << *Inst->getOperand(Idx) << " with cost "
+ << Cost << '\n';
+ );
+ }
+}
+
+/// \brief Scan the instruction for expensive integer constants and record them
+/// in the constant candidate vector.
+void ConstantHoisting::collectConstantCandidates(ConstCandMapType &ConstCandMap,
+ Instruction *Inst) {
+ // Skip all cast instructions. They are visited indirectly later on.
+ if (Inst->isCast())
+ return;
+
+ // Can't handle inline asm. Skip it.
+ if (auto Call = dyn_cast<CallInst>(Inst))
+ if (isa<InlineAsm>(Call->getCalledValue()))
+ return;
+
+ // Scan all operands.
+ for (unsigned Idx = 0, E = Inst->getNumOperands(); Idx != E; ++Idx) {
+ Value *Opnd = Inst->getOperand(Idx);
+
+ // Visit constant integers.
+ if (auto ConstInt = dyn_cast<ConstantInt>(Opnd)) {
+ collectConstantCandidates(ConstCandMap, Inst, Idx, ConstInt);
+ continue;
+ }
+
+ // Visit cast instructions that have constant integers.
+ if (auto CastInst = dyn_cast<Instruction>(Opnd)) {
+ // Only visit cast instructions, which have been skipped. All other
+ // instructions should have already been visited.
+ if (!CastInst->isCast())
+ continue;
+
+ if (auto *ConstInt = dyn_cast<ConstantInt>(CastInst->getOperand(0))) {
+ // Pretend the constant is directly used by the instruction and ignore
+ // the cast instruction.
+ collectConstantCandidates(ConstCandMap, Inst, Idx, ConstInt);
+ continue;
+ }
+ }
+
+ // Visit constant expressions that have constant integers.
+ if (auto ConstExpr = dyn_cast<ConstantExpr>(Opnd)) {
+ // Only visit constant cast expressions.
+ if (!ConstExpr->isCast())
+ continue;
+
+ if (auto ConstInt = dyn_cast<ConstantInt>(ConstExpr->getOperand(0))) {
+ // Pretend the constant is directly used by the instruction and ignore
+ // the constant expression.
+ collectConstantCandidates(ConstCandMap, Inst, Idx, ConstInt);
+ continue;
+ }
+ }
+ } // end of for all operands
+}
+
+/// \brief Collect all integer constants in the function that cannot be folded
+/// into an instruction itself.
+void ConstantHoisting::collectConstantCandidates(Function &Fn) {
+ ConstCandMapType ConstCandMap;
+ for (Function::iterator BB : Fn)
+ for (BasicBlock::iterator Inst : *BB)
+ collectConstantCandidates(ConstCandMap, Inst);
+}
+
+/// \brief Find the base constant within the given range and rebase all other
+/// constants with respect to the base constant.
+void ConstantHoisting::findAndMakeBaseConstant(ConstCandVecType::iterator S,
+ ConstCandVecType::iterator E) {
+ auto MaxCostItr = S;
+ unsigned NumUses = 0;
+ // Use the constant that has the maximum cost as base constant.
+ for (auto ConstCand = S; ConstCand != E; ++ConstCand) {
+ NumUses += ConstCand->Uses.size();
+ if (ConstCand->CumulativeCost > MaxCostItr->CumulativeCost)
+ MaxCostItr = ConstCand;
+ }
+
+ // Don't hoist constants that have only one use.
+ if (NumUses <= 1)
+ return;
+
+ ConstantInfo ConstInfo;
+ ConstInfo.BaseConstant = MaxCostItr->ConstInt;
+ Type *Ty = ConstInfo.BaseConstant->getType();
+
+ // Rebase the constants with respect to the base constant.
+ for (auto ConstCand = S; ConstCand != E; ++ConstCand) {
+ APInt Diff = ConstCand->ConstInt->getValue() -
+ ConstInfo.BaseConstant->getValue();
+ Constant *Offset = Diff == 0 ? nullptr : ConstantInt::get(Ty, Diff);
+ ConstInfo.RebasedConstants.push_back(
+ RebasedConstantInfo(std::move(ConstCand->Uses), Offset));
+ }
+ ConstantVec.push_back(ConstInfo);
+}
+
+/// \brief Finds and combines constant candidates that can be easily
+/// rematerialized with an add from a common base constant.
+void ConstantHoisting::findBaseConstants() {
+ // Sort the constants by value and type. This invalidates the mapping!
+ std::sort(ConstCandVec.begin(), ConstCandVec.end(),
+ [](const ConstantCandidate &LHS, const ConstantCandidate &RHS) {
+ if (LHS.ConstInt->getType() != RHS.ConstInt->getType())
+ return LHS.ConstInt->getType()->getBitWidth() <
+ RHS.ConstInt->getType()->getBitWidth();
+ return LHS.ConstInt->getValue().ult(RHS.ConstInt->getValue());
+ });
+
+ // Simple linear scan through the sorted constant candidate vector for viable
+ // merge candidates.
+ auto MinValItr = ConstCandVec.begin();
+ for (auto CC = std::next(ConstCandVec.begin()), E = ConstCandVec.end();
+ CC != E; ++CC) {
+ if (MinValItr->ConstInt->getType() == CC->ConstInt->getType()) {
+ // Check if the constant is in range of an add with immediate.
+ APInt Diff = CC->ConstInt->getValue() - MinValItr->ConstInt->getValue();
+ if ((Diff.getBitWidth() <= 64) &&
+ TTI->isLegalAddImmediate(Diff.getSExtValue()))
+ continue;
+ }
+ // We either have now a different constant type or the constant is not in
+ // range of an add with immediate anymore.
+ findAndMakeBaseConstant(MinValItr, CC);
+ // Start a new base constant search.
+ MinValItr = CC;
+ }
+ // Finalize the last base constant search.
+ findAndMakeBaseConstant(MinValItr, ConstCandVec.end());
+}
+
+/// \brief Updates the operand at Idx in instruction Inst with the result of
+/// instruction Mat. If the instruction is a PHI node then special
+/// handling for duplicate values form the same incomming basic block is
+/// required.
+/// \return The update will always succeed, but the return value indicated if
+/// Mat was used for the update or not.
+static bool updateOperand(Instruction *Inst, unsigned Idx, Instruction *Mat) {
+ if (auto PHI = dyn_cast<PHINode>(Inst)) {
+ // Check if any previous operand of the PHI node has the same incoming basic
+ // block. This is a very odd case that happens when the incoming basic block
+ // has a switch statement. In this case use the same value as the previous
+ // operand(s), otherwise we will fail verification due to different values.
+ // The values are actually the same, but the variable names are different
+ // and the verifier doesn't like that.
+ BasicBlock *IncomingBB = PHI->getIncomingBlock(Idx);
+ for (unsigned i = 0; i < Idx; ++i) {
+ if (PHI->getIncomingBlock(i) == IncomingBB) {
+ Value *IncomingVal = PHI->getIncomingValue(i);
+ Inst->setOperand(Idx, IncomingVal);
+ return false;
+ }
+ }
+ }
+
+ Inst->setOperand(Idx, Mat);
+ return true;
+}
+
+/// \brief Emit materialization code for all rebased constants and update their
+/// users.
+void ConstantHoisting::emitBaseConstants(Instruction *Base, Constant *Offset,
+ const ConstantUser &ConstUser) {
+ Instruction *Mat = Base;
+ if (Offset) {
+ Instruction *InsertionPt = findMatInsertPt(ConstUser.Inst,
+ ConstUser.OpndIdx);
+ Mat = BinaryOperator::Create(Instruction::Add, Base, Offset,
+ "const_mat", InsertionPt);
+
+ DEBUG(dbgs() << "Materialize constant (" << *Base->getOperand(0)
+ << " + " << *Offset << ") in BB "
+ << Mat->getParent()->getName() << '\n' << *Mat << '\n');
+ Mat->setDebugLoc(ConstUser.Inst->getDebugLoc());
+ }
+ Value *Opnd = ConstUser.Inst->getOperand(ConstUser.OpndIdx);
+
+ // Visit constant integer.
+ if (isa<ConstantInt>(Opnd)) {
+ DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
+ if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, Mat) && Offset)
+ Mat->eraseFromParent();
+ DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
+ return;
+ }
+
+ // Visit cast instruction.
+ if (auto CastInst = dyn_cast<Instruction>(Opnd)) {
+ assert(CastInst->isCast() && "Expected an cast instruction!");
+ // Check if we already have visited this cast instruction before to avoid
+ // unnecessary cloning.
+ Instruction *&ClonedCastInst = ClonedCastMap[CastInst];
+ if (!ClonedCastInst) {
+ ClonedCastInst = CastInst->clone();
+ ClonedCastInst->setOperand(0, Mat);
+ ClonedCastInst->insertAfter(CastInst);
+ // Use the same debug location as the original cast instruction.
+ ClonedCastInst->setDebugLoc(CastInst->getDebugLoc());
+ DEBUG(dbgs() << "Clone instruction: " << *CastInst << '\n'
+ << "To : " << *ClonedCastInst << '\n');
+ }
+
+ DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
+ updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ClonedCastInst);
+ DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
+ return;
+ }
+
+ // Visit constant expression.
+ if (auto ConstExpr = dyn_cast<ConstantExpr>(Opnd)) {
+ Instruction *ConstExprInst = ConstExpr->getAsInstruction();
+ ConstExprInst->setOperand(0, Mat);
+ ConstExprInst->insertBefore(findMatInsertPt(ConstUser.Inst,
+ ConstUser.OpndIdx));
+
+ // Use the same debug location as the instruction we are about to update.
+ ConstExprInst->setDebugLoc(ConstUser.Inst->getDebugLoc());
+
+ DEBUG(dbgs() << "Create instruction: " << *ConstExprInst << '\n'
+ << "From : " << *ConstExpr << '\n');
+ DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
+ if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ConstExprInst)) {
+ ConstExprInst->eraseFromParent();
+ if (Offset)
+ Mat->eraseFromParent();
+ }
+ DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
+ return;
+ }
+}
+
+/// \brief Hoist and hide the base constant behind a bitcast and emit
+/// materialization code for derived constants.
+bool ConstantHoisting::emitBaseConstants() {
+ bool MadeChange = false;
+ for (auto const &ConstInfo : ConstantVec) {
+ // Hoist and hide the base constant behind a bitcast.
+ Instruction *IP = findConstantInsertionPoint(ConstInfo);
+ IntegerType *Ty = ConstInfo.BaseConstant->getType();
+ Instruction *Base =
+ new BitCastInst(ConstInfo.BaseConstant, Ty, "const", IP);
+ DEBUG(dbgs() << "Hoist constant (" << *ConstInfo.BaseConstant << ") to BB "
+ << IP->getParent()->getName() << '\n' << *Base << '\n');
+ NumConstantsHoisted++;
+
+ // Emit materialization code for all rebased constants.
+ for (auto const &RCI : ConstInfo.RebasedConstants) {
+ NumConstantsRebased++;
+ for (auto const &U : RCI.Uses)
+ emitBaseConstants(Base, RCI.Offset, U);
+ }
+
+ // Use the same debug location as the last user of the constant.
+ assert(!Base->use_empty() && "The use list is empty!?");
+ assert(isa<Instruction>(Base->user_back()) &&
+ "All uses should be instructions.");
+ Base->setDebugLoc(cast<Instruction>(Base->user_back())->getDebugLoc());
+
+ // Correct for base constant, which we counted above too.
+ NumConstantsRebased--;
+ MadeChange = true;
+ }
+ return MadeChange;
+}
+
+/// \brief Check all cast instructions we made a copy of and remove them if they
+/// have no more users.
+void ConstantHoisting::deleteDeadCastInst() const {
+ for (auto const &I : ClonedCastMap)
+ if (I.first->use_empty())
+ I.first->eraseFromParent();
+}
+
+/// \brief Optimize expensive integer constants in the given function.
+bool ConstantHoisting::optimizeConstants(Function &Fn) {
+ // Collect all constant candidates.
+ collectConstantCandidates(Fn);
+
+ // There are no constant candidates to worry about.
+ if (ConstCandVec.empty())
+ return false;
+
+ // Combine constants that can be easily materialized with an add from a common
+ // base constant.
+ findBaseConstants();
+
+ // There are no constants to emit.
+ if (ConstantVec.empty())
+ return false;
+
+ // Finally hoist the base constant and emit materialization code for dependent
+ // constants.
+ bool MadeChange = emitBaseConstants();
+
+ // Cleanup dead instructions.
+ deleteDeadCastInst();
+
+ return MadeChange;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index d5a96ec..dd51ce1 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -18,19 +18,20 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "constprop"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Pass.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "constprop"
+
STATISTIC(NumInstKilled, "Number of instructions killed");
namespace {
@@ -40,9 +41,9 @@ namespace {
initializeConstantPropagationPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<TargetLibraryInfo>();
}
@@ -67,7 +68,8 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
- DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
while (!WorkList.empty()) {
@@ -75,12 +77,11 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
if (!I->use_empty()) // Don't muck with dead instructions...
- if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
// Add all of the users of this instruction to the worklist, they might
// be constant propagatable now...
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI)
- WorkList.insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ WorkList.insert(cast<Instruction>(U));
// Replace all of the uses of a variable with uses of the constant.
I->replaceAllUsesWith(C);
diff --git a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 995782e..0829462 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -11,21 +11,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "correlated-value-propagation"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "correlated-value-propagation"
+
STATISTIC(NumPhis, "Number of phis propagated");
STATISTIC(NumSelects, "Number of selects propagated");
STATISTIC(NumMemAccess, "Number of memory access targets propagated");
@@ -48,9 +49,9 @@ namespace {
initializeCorrelatedValuePropagationPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LazyValueInfo>();
}
};
@@ -138,7 +139,7 @@ bool CorrelatedValuePropagation::processPHI(PHINode *P) {
}
bool CorrelatedValuePropagation::processMemAccess(Instruction *I) {
- Value *Pointer = 0;
+ Value *Pointer = nullptr;
if (LoadInst *L = dyn_cast<LoadInst>(I))
Pointer = L->getPointerOperand();
else
@@ -281,6 +282,9 @@ bool CorrelatedValuePropagation::processSwitch(SwitchInst *SI) {
}
bool CorrelatedValuePropagation::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
LVI = &getAnalysis<LazyValueInfo>();
bool FnChanged = false;
diff --git a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
index e8a090a..99fac75 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
@@ -16,16 +16,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dce"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Pass.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "dce"
+
STATISTIC(DIEEliminated, "Number of insts removed by DIE pass");
STATISTIC(DCEEliminated, "Number of insts removed");
@@ -38,7 +39,9 @@ namespace {
DeadInstElimination() : BasicBlockPass(ID) {
initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnBasicBlock(BasicBlock &BB) {
+ bool runOnBasicBlock(BasicBlock &BB) override {
+ if (skipOptnoneFunction(BB))
+ return false;
TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) {
@@ -52,7 +55,7 @@ namespace {
return Changed;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
}
};
@@ -77,9 +80,9 @@ namespace {
initializeDCEPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
}
};
@@ -89,6 +92,9 @@ char DCE::ID = 0;
INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
bool DCE::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
// Start out with all of the instructions in the worklist...
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 57432c7..3af8ee7 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -15,19 +15,18 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dse"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
@@ -38,6 +37,8 @@
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "dse"
+
STATISTIC(NumFastStores, "Number of stores deleted");
STATISTIC(NumFastOther , "Number of other instrs removed");
@@ -49,14 +50,17 @@ namespace {
const TargetLibraryInfo *TLI;
static char ID; // Pass identification, replacement for typeid
- DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
+ DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
initializeDSEPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
+ if (skipOptnoneFunction(F))
+ return false;
+
AA = &getAnalysis<AliasAnalysis>();
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
TLI = AA->getTargetLibraryInfo();
bool Changed = false;
@@ -66,7 +70,7 @@ namespace {
if (DT->isReachableFromEntry(I))
Changed |= runOnBasicBlock(*I);
- AA = 0; MD = 0; DT = 0;
+ AA = nullptr; MD = nullptr; DT = nullptr;
return Changed;
}
@@ -76,13 +80,13 @@ namespace {
void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
SmallSetVector<Value*, 16> &DeadStackObjects);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<MemoryDependenceAnalysis>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
};
@@ -90,7 +94,7 @@ namespace {
char DSE::ID = 0;
INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
@@ -108,9 +112,9 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
/// If ValueSet is non-null, remove any deleted instructions from it as well.
///
static void DeleteDeadInstruction(Instruction *I,
- MemoryDependenceAnalysis &MD,
- const TargetLibraryInfo *TLI,
- SmallSetVector<Value*, 16> *ValueSet = 0) {
+ MemoryDependenceAnalysis &MD,
+ const TargetLibraryInfo *TLI,
+ SmallSetVector<Value*, 16> *ValueSet = nullptr) {
SmallVector<Instruction*, 32> NowDeadInsts;
NowDeadInsts.push_back(I);
@@ -128,7 +132,7 @@ static void DeleteDeadInstruction(Instruction *I,
for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, 0);
+ DeadInst->setOperand(op, nullptr);
// If this operand just became dead, add it to the NowDeadInsts list.
if (!Op->use_empty()) continue;
@@ -190,6 +194,7 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
/// describe the memory operations for this instruction.
static AliasAnalysis::Location
getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
+ const DataLayout *DL = AA.getDataLayout();
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return AA.getLocation(SI);
@@ -199,13 +204,13 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// memset/memcpy, which writes more than an i8.
- if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0)
+ if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr)
return AliasAnalysis::Location();
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
- if (II == 0) return AliasAnalysis::Location();
+ if (!II) return AliasAnalysis::Location();
switch (II->getIntrinsicID()) {
default: return AliasAnalysis::Location(); // Unhandled intrinsic.
@@ -213,7 +218,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// init.trampoline, which writes more than an i8.
- if (AA.getDataLayout() == 0) return AliasAnalysis::Location();
+ if (!DL) return AliasAnalysis::Location();
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
@@ -341,6 +346,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
AliasAnalysis &AA,
int64_t &EarlierOff,
int64_t &LaterOff) {
+ const DataLayout *DL = AA.getDataLayout();
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
@@ -354,8 +360,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// If we have no DataLayout information around, then the size of the store
// is inferrable from the pointee type. If they are the same type, then
// we know that the store is safe.
- if (AA.getDataLayout() == 0 &&
- Later.Ptr->getType() == Earlier.Ptr->getType())
+ if (DL == nullptr && Later.Ptr->getType() == Earlier.Ptr->getType())
return OverwriteComplete;
return OverwriteUnknown;
@@ -369,17 +374,14 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize ||
- AA.getDataLayout() == 0)
+ Earlier.Size == AliasAnalysis::UnknownSize || DL == nullptr)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
- // an alloca, or a byval argument). If so, then it clearly overwrites any
- // other store to the same object.
- const DataLayout *TD = AA.getDataLayout();
-
- const Value *UO1 = GetUnderlyingObject(P1, TD),
- *UO2 = GetUnderlyingObject(P2, TD);
+ // an alloca, or a byval/inalloca argument). If so, then it clearly
+ // overwrites any other store to the same object.
+ const Value *UO1 = GetUnderlyingObject(P1, DL),
+ *UO2 = GetUnderlyingObject(P2, DL);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
@@ -397,8 +399,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// pointers are equal, then we can reason about the two stores.
EarlierOff = 0;
LaterOff = 0;
- const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
- const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
+ const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
+ const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
// If the base pointers still differ, we have two completely different stores.
if (BP1 != BP2)
@@ -460,7 +462,7 @@ static bool isPossibleSelfRead(Instruction *Inst,
// Self reads can only happen for instructions that read memory. Get the
// location read.
AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
- if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction.
+ if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
// If the read and written loc obviously don't alias, it isn't a read.
if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
@@ -527,7 +529,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
DeleteDeadInstruction(SI, *MD, TLI);
- if (NextInst == 0) // Next instruction deleted.
+ if (!NextInst) // Next instruction deleted.
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
@@ -542,7 +544,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
// If we didn't get a useful location, fail.
- if (Loc.Ptr == 0)
+ if (!Loc.Ptr)
continue;
while (InstDep.isDef() || InstDep.isClobber()) {
@@ -556,7 +558,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *DepWrite = InstDep.getInst();
AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
// If we didn't get a useful location, or if it isn't a size, bail out.
- if (DepLoc.Ptr == 0)
+ if (!DepLoc.Ptr)
break;
// If we find a write that is a) removable (i.e., non-volatile), b) is
@@ -679,7 +681,7 @@ bool DSE::HandleFree(CallInst *F) {
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
break;
- Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
+ Instruction *Next = std::next(BasicBlock::iterator(Dependency));
// DCE instructions only used to calculate that store
DeleteDeadInstruction(Dependency, *MD, TLI);
@@ -701,22 +703,6 @@ bool DSE::HandleFree(CallInst *F) {
return MadeChange;
}
-namespace {
- struct CouldRef {
- typedef Value *argument_type;
- const CallSite CS;
- AliasAnalysis *AA;
-
- bool operator()(Value *I) {
- // See if the call site touches the value.
- AliasAnalysis::ModRefResult A =
- AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
-
- return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
- }
- };
-}
-
/// handleEndBlock - Remove dead stores to stack-allocated locations in the
/// function end block. Ex:
/// %A = alloca i32
@@ -742,11 +728,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
DeadStackObjects.insert(I);
}
- // Treat byval arguments the same, stores to them are dead at the end of the
- // function.
+ // Treat byval or inalloca arguments the same, stores to them are dead at the
+ // end of the function.
for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
AE = BB.getParent()->arg_end(); AI != AE; ++AI)
- if (AI->hasByValAttr())
+ if (AI->hasByValOrInAllocaAttr())
DeadStackObjects.insert(AI);
// Scan the basic block backwards
@@ -776,7 +762,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
E = Pointers.end(); I != E; ++I) {
dbgs() << **I;
- if (llvm::next(I) != E)
+ if (std::next(I) != E)
dbgs() << ", ";
}
dbgs() << '\n');
@@ -818,8 +804,13 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// If the call might load from any of our allocas, then any store above
// the call is live.
- CouldRef Pred = { CS, AA };
- DeadStackObjects.remove_if(Pred);
+ DeadStackObjects.remove_if([&](Value *I) {
+ // See if the call site touches the value.
+ AliasAnalysis::ModRefResult A =
+ AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
+
+ return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
+ });
// If all of the allocas were clobbered by the call then we're not going
// to find anything else to process.
@@ -862,20 +853,6 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
return MadeChange;
}
-namespace {
- struct CouldAlias {
- typedef Value *argument_type;
- const AliasAnalysis::Location &LoadedLoc;
- AliasAnalysis *AA;
-
- bool operator()(Value *I) {
- // See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
- return !AA->isNoAlias(StackLoc, LoadedLoc);
- }
- };
-}
-
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
@@ -895,6 +872,9 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
}
// Remove objects that could alias LoadedLoc.
- CouldAlias Pred = { LoadedLoc, AA };
- DeadStackObjects.remove_if(Pred);
+ DeadStackObjects.remove_if([&](Value *I) {
+ // See if the loaded location could alias the stack location.
+ AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
+ return !AA->isNoAlias(StackLoc, LoadedLoc);
+ });
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 5266894..735f5c1 100644
--- a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -12,23 +12,24 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "early-cse"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
-#include <deque>
+#include <vector>
using namespace llvm;
+#define DEBUG_TYPE "early-cse"
+
STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
STATISTIC(NumCSE, "Number of instructions CSE'd");
STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
@@ -207,7 +208,7 @@ namespace {
return false;
CallInst *CI = dyn_cast<CallInst>(Inst);
- if (CI == 0 || !CI->onlyReadsMemory())
+ if (!CI || !CI->onlyReadsMemory())
return false;
return true;
}
@@ -262,7 +263,7 @@ namespace {
/// cases.
class EarlyCSE : public FunctionPass {
public:
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
DominatorTree *DT;
typedef RecyclingAllocator<BumpPtrAllocator,
@@ -303,7 +304,7 @@ public:
initializeEarlyCSEPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
private:
@@ -376,8 +377,8 @@ private:
bool processNode(DomTreeNode *Node);
// This transformation requires dominator postdominator info
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfo>();
AU.setPreservesCFG();
}
@@ -392,7 +393,7 @@ FunctionPass *llvm::createEarlyCSEPass() {
}
INITIALIZE_PASS_BEGIN(EarlyCSE, "early-cse", "Early CSE", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false)
@@ -405,14 +406,14 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// have invalidated the live-out memory values of our parent value. For now,
// just be conservative and invalidate memory if this block has multiple
// predecessors.
- if (BB->getSinglePredecessor() == 0)
+ if (!BB->getSinglePredecessor())
++CurrentGeneration;
/// LastStore - Keep track of the last non-volatile store that we saw... for
/// as long as there in no instruction that reads memory. If we see a store
/// to the same location, we delete the dead store. This zaps trivial dead
/// stores which can occur in bitfield code among other things.
- StoreInst *LastStore = 0;
+ StoreInst *LastStore = nullptr;
bool Changed = false;
@@ -432,7 +433,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
- if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) {
+ if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT)) {
DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
Inst->replaceAllUsesWith(V);
Inst->eraseFromParent();
@@ -462,7 +463,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
// Ignore volatile loads.
if (!LI->isSimple()) {
- LastStore = 0;
+ LastStore = nullptr;
continue;
}
@@ -470,7 +471,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// generation, replace this instruction.
std::pair<Value*, unsigned> InVal =
AvailableLoads->lookup(Inst->getOperand(0));
- if (InVal.first != 0 && InVal.second == CurrentGeneration) {
+ if (InVal.first != nullptr && InVal.second == CurrentGeneration) {
DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst << " to: "
<< *InVal.first << '\n');
if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
@@ -483,20 +484,20 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// Otherwise, remember that we have this instruction.
AvailableLoads->insert(Inst->getOperand(0),
std::pair<Value*, unsigned>(Inst, CurrentGeneration));
- LastStore = 0;
+ LastStore = nullptr;
continue;
}
// If this instruction may read from memory, forget LastStore.
if (Inst->mayReadFromMemory())
- LastStore = 0;
+ LastStore = nullptr;
// If this is a read-only call, process it.
if (CallValue::canHandle(Inst)) {
// If we have an available version of this call, and if it is the right
// generation, replace this instruction.
std::pair<Value*, unsigned> InVal = AvailableCalls->lookup(Inst);
- if (InVal.first != 0 && InVal.second == CurrentGeneration) {
+ if (InVal.first != nullptr && InVal.second == CurrentGeneration) {
DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst << " to: "
<< *InVal.first << '\n');
if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
@@ -528,7 +529,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
LastStore->eraseFromParent();
Changed = true;
++NumDSE;
- LastStore = 0;
+ LastStore = nullptr;
continue;
}
@@ -552,11 +553,15 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
bool EarlyCSE::runOnFunction(Function &F) {
- std::deque<StackNode *> nodesToProcess;
+ if (skipOptnoneFunction(F))
+ return false;
+
+ std::vector<StackNode *> nodesToProcess;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
// Tables that the pass uses when walking the domtree.
ScopedHTType AVTable;
@@ -570,7 +575,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
bool Changed = false;
// Process the root node.
- nodesToProcess.push_front(
+ nodesToProcess.push_back(
new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
CurrentGeneration, DT->getRootNode(),
DT->getRootNode()->begin(),
@@ -583,7 +588,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
while (!nodesToProcess.empty()) {
// Grab the first item off the stack. Set the current generation, remove
// the node from the stack, and process it.
- StackNode *NodeToProcess = nodesToProcess.front();
+ StackNode *NodeToProcess = nodesToProcess.back();
// Initialize class members.
CurrentGeneration = NodeToProcess->currentGeneration();
@@ -597,7 +602,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
} else if (NodeToProcess->childIter() != NodeToProcess->end()) {
// Push the next child onto the stack.
DomTreeNode *child = NodeToProcess->nextChild();
- nodesToProcess.push_front(
+ nodesToProcess.push_back(
new StackNode(AvailableValues,
AvailableLoads,
AvailableCalls,
@@ -607,7 +612,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
// It has been processed, and there are no more children to process,
// so delete it and pop it off the stack.
delete NodeToProcess;
- nodesToProcess.pop_front();
+ nodesToProcess.pop_back();
}
} // while (!nodes...)
diff --git a/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp
index e7de07f..0430c18 100644
--- a/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/FlattenCFGPass.cpp
@@ -11,14 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "flattencfg"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/CFG.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "flattencfg"
+
namespace {
struct FlattenCFGPass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
@@ -26,9 +27,9 @@ public:
FlattenCFGPass() : FunctionPass(ID) {
initializeFlattenCFGPassPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index 6af269d..106eba0 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -15,35 +15,34 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "gvn"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/PatternMatch.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -51,6 +50,8 @@
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "gvn"
+
STATISTIC(NumGVNInstr, "Number of instructions deleted");
STATISTIC(NumGVNLoad, "Number of loads deleted");
STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
@@ -214,13 +215,13 @@ Expression ValueTable::create_cmp_expression(unsigned Opcode,
}
Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) {
- assert(EI != 0 && "Not an ExtractValueInst?");
+ assert(EI && "Not an ExtractValueInst?");
Expression e;
e.type = EI->getType();
e.opcode = 0;
IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand());
- if (I != 0 && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) {
+ if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) {
// EI might be an extract from one of our recognised intrinsics. If it
// is we'll synthesize a semantically equivalent expression instead on
// an extract value expression.
@@ -328,7 +329,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
MD->getNonLocalCallDependency(CallSite(C));
// FIXME: Move the checking logic to MemDep!
- CallInst* cdep = 0;
+ CallInst* cdep = nullptr;
// Check to see if we have a single dominating call instruction that is
// identical to C.
@@ -339,8 +340,8 @@ uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
// We don't handle non-definitions. If we already have a call, reject
// instruction dependencies.
- if (!I->getResult().isDef() || cdep != 0) {
- cdep = 0;
+ if (!I->getResult().isDef() || cdep != nullptr) {
+ cdep = nullptr;
break;
}
@@ -351,7 +352,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
continue;
}
- cdep = 0;
+ cdep = nullptr;
break;
}
@@ -552,7 +553,7 @@ namespace {
static AvailableValueInBlock getUndef(BasicBlock *BB) {
AvailableValueInBlock Res;
Res.BB = BB;
- Res.Val.setPointer(0);
+ Res.Val.setPointer(nullptr);
Res.Val.setInt(UndefVal);
Res.Offset = 0;
return Res;
@@ -587,7 +588,7 @@ namespace {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
SetVector<BasicBlock *> DeadBlocks;
@@ -612,11 +613,11 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit GVN(bool noloads = false)
- : FunctionPass(ID), NoLoads(noloads), MD(0) {
+ : FunctionPass(ID), NoLoads(noloads), MD(nullptr) {
initializeGVNPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
/// markInstructionForDeletion - This removes the specified instruction from
/// our various maps and marks it for deletion.
@@ -625,7 +626,7 @@ namespace {
InstrsToErase.push_back(I);
}
- const DataLayout *getDataLayout() const { return TD; }
+ const DataLayout *getDataLayout() const { return DL; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@@ -650,7 +651,7 @@ namespace {
/// removeFromLeaderTable - Scan the list of values corresponding to a given
/// value number, and remove the given instruction if encountered.
void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
- LeaderTableEntry* Prev = 0;
+ LeaderTableEntry* Prev = nullptr;
LeaderTableEntry* Curr = &LeaderTable[N];
while (Curr->Val != I || Curr->BB != BB) {
@@ -662,8 +663,8 @@ namespace {
Prev->Next = Curr->Next;
} else {
if (!Curr->Next) {
- Curr->Val = 0;
- Curr->BB = 0;
+ Curr->Val = nullptr;
+ Curr->BB = nullptr;
} else {
LeaderTableEntry* Next = Curr->Next;
Curr->Val = Next->Val;
@@ -677,14 +678,14 @@ namespace {
SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
// This transformation requires dominator postdominator info
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfo>();
if (!NoLoads)
AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<AliasAnalysis>();
}
@@ -727,7 +728,7 @@ FunctionPass *llvm::createGVNPass(bool NoLoads) {
INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
@@ -818,8 +819,7 @@ SpeculationFailure:
// Mark as unavailable.
EntryVal = 0;
- for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
- BBWorklist.push_back(*I);
+ BBWorklist.append(succ_begin(Entry), succ_end(Entry));
} while (!BBWorklist.empty());
return false;
@@ -830,7 +830,7 @@ SpeculationFailure:
/// CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
Type *LoadTy,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
@@ -839,8 +839,8 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
return false;
// The store has to be at least as big as the load.
- if (TD.getTypeSizeInBits(StoredVal->getType()) <
- TD.getTypeSizeInBits(LoadTy))
+ if (DL.getTypeSizeInBits(StoredVal->getType()) <
+ DL.getTypeSizeInBits(LoadTy))
return false;
return true;
@@ -855,15 +855,15 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
- const DataLayout &TD) {
- if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
- return 0;
+ const DataLayout &DL) {
+ if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
+ return nullptr;
// If this is already the right type, just return it.
Type *StoredValTy = StoredVal->getType();
- uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
+ uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy);
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy);
// If the store and reload are the same size, we can always reuse it.
if (StoreSize == LoadSize) {
@@ -874,13 +874,13 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->getScalarType()->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy);
+ StoredValTy = DL.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
Type *TypeToCastTo = LoadedTy;
if (TypeToCastTo->getScalarType()->isPointerTy())
- TypeToCastTo = TD.getIntPtrType(TypeToCastTo);
+ TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
@@ -899,7 +899,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->getScalarType()->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy);
+ StoredValTy = DL.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
@@ -911,7 +911,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
- if (TD.isBigEndian()) {
+ if (DL.isBigEndian()) {
Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
}
@@ -942,15 +942,15 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// If the loaded or stored value is a first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD);
- Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &TD);
+ Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL);
if (StoreBase != LoadBase)
return -1;
@@ -972,7 +972,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
// must have gotten confused.
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy);
if ((WriteSizeInBits & 7) | (LoadSize & 7))
return -1;
@@ -1015,61 +1015,61 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
return -1;
Value *StorePtr = DepSI->getPointerOperand();
- uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
+ uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
- StorePtr, StoreSize, TD);
+ StorePtr, StoreSize, DL);
}
/// AnalyzeLoadFromClobberingLoad - This function is called when we have a
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
- LoadInst *DepLI, const DataLayout &TD){
+ LoadInst *DepLI, const DataLayout &DL){
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
Value *DepPtr = DepLI->getPointerOperand();
- uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType());
- int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD);
+ uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType());
+ int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL);
if (R != -1) return R;
// If we have a load/load clobber an DepLI can be widened to cover this load,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
- GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD);
- unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
+ GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);
+ unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
unsigned Size = MemoryDependenceAnalysis::
- getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD);
+ getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL);
if (Size == 0) return -1;
- return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD);
+ return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL);
}
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemIntrinsic *MI,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// If the mem operation is a non-constant size, we can't handle it.
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
- if (SizeCst == 0) return -1;
+ if (!SizeCst) return -1;
uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
// If this is memset, we just need to see if the offset is valid in the size
// of the memset..
if (MI->getIntrinsicID() == Intrinsic::memset)
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
- MemSizeInBits, TD);
+ MemSizeInBits, DL);
// If we have a memcpy/memmove, the only case we can handle is if this is a
// copy from constant memory. In that case, we can read directly from the
@@ -1077,14 +1077,14 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemTransferInst *MTI = cast<MemTransferInst>(MI);
Constant *Src = dyn_cast<Constant>(MTI->getSource());
- if (Src == 0) return -1;
+ if (!Src) return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD));
- if (GV == 0 || !GV->isConstant()) return -1;
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));
+ if (!GV || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
- MI->getDest(), MemSizeInBits, TD);
+ MI->getDest(), MemSizeInBits, DL);
if (Offset == -1)
return Offset;
@@ -1097,7 +1097,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- if (ConstantFoldLoadFromConstPtr(Src, &TD))
+ if (ConstantFoldLoadFromConstPtr(Src, &DL))
return Offset;
return -1;
}
@@ -1110,11 +1110,11 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
- Instruction *InsertPt, const DataLayout &TD){
+ Instruction *InsertPt, const DataLayout &DL){
LLVMContext &Ctx = SrcVal->getType()->getContext();
- uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
- uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
+ uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
+ uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
@@ -1122,13 +1122,13 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
// to an integer type to start with.
if (SrcVal->getType()->getScalarType()->isPointerTy())
SrcVal = Builder.CreatePtrToInt(SrcVal,
- TD.getIntPtrType(SrcVal->getType()));
+ DL.getIntPtrType(SrcVal->getType()));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
- if (TD.isLittleEndian())
+ if (DL.isLittleEndian())
ShiftAmt = Offset*8;
else
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
@@ -1139,7 +1139,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
- return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
+ return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
}
/// GetLoadValueForLoad - This function is called when we have a
@@ -1150,11 +1150,11 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const DataLayout &TD = *gvn.getDataLayout();
+ const DataLayout &DL = *gvn.getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
- unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
- unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
+ unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());
+ unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
if (Offset+LoadSize > SrcValSize) {
assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!");
assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load");
@@ -1186,7 +1186,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
// Replace uses of the original load with the wider load. On a big endian
// system, we need to shift down to get the relevant bits.
Value *RV = NewLoad;
- if (TD.isBigEndian())
+ if (DL.isBigEndian())
RV = Builder.CreateLShr(RV,
NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());
RV = Builder.CreateTrunc(RV, SrcVal->getType());
@@ -1201,7 +1201,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
SrcVal = NewLoad;
}
- return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD);
+ return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL);
}
@@ -1209,9 +1209,9 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
- const DataLayout &TD){
+ const DataLayout &DL){
LLVMContext &Ctx = LoadTy->getContext();
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
@@ -1242,7 +1242,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
++NumBytesSet;
}
- return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
+ return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);
}
// Otherwise, this is a memcpy/memmove from a constant global.
@@ -1258,7 +1258,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- return ConstantFoldLoadFromConstPtr(Src, &TD);
+ return ConstantFoldLoadFromConstPtr(Src, &DL);
}
@@ -1324,10 +1324,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const DataLayout *TD = gvn.getDataLayout();
- assert(TD && "Need target data to handle type mismatch case");
+ const DataLayout *DL = gvn.getDataLayout();
+ assert(DL && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
- *TD);
+ *DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
@@ -1346,10 +1346,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
<< *Res << '\n' << "\n\n\n");
}
} else if (isMemIntrinValue()) {
- const DataLayout *TD = gvn.getDataLayout();
- assert(TD && "Need target data to handle type mismatch case");
+ const DataLayout *DL = gvn.getDataLayout();
+ assert(DL && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
- LoadTy, BB->getTerminator(), *TD);
+ LoadTy, BB->getTerminator(), *DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1402,9 +1402,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
- if (TD && Address) {
+ if (DL && Address) {
int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
- DepSI, *TD);
+ DepSI, *DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
DepSI->getValueOperand(),
@@ -1421,10 +1421,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
- if (DepLI != LI && Address && TD) {
- int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
- LI->getPointerOperand(),
- DepLI, *TD);
+ if (DepLI != LI && Address && DL) {
+ int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address,
+ DepLI, *DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
@@ -1437,9 +1436,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
- if (TD && Address) {
+ if (DL && Address) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
- DepMI, *TD);
+ DepMI, *DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
@@ -1465,14 +1464,21 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
continue;
}
+ // Loading from calloc (which zero initializes memory) -> zero
+ if (isCallocLikeFn(DepInst, TLI)) {
+ ValuesPerBlock.push_back(AvailableValueInBlock::get(
+ DepBB, Constant::getNullValue(LI->getType())));
+ continue;
+ }
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
- LI->getType(), *TD)) {
+ if (!DL || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
+ LI->getType(), *DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1488,7 +1494,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LD->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
+ if (!DL || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1541,7 +1547,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// Check to see how many predecessors have the loaded value fully
// available.
- DenseMap<BasicBlock*, Value*> PredLoads;
+ MapVector<BasicBlock *, Value *> PredLoads;
DenseMap<BasicBlock*, char> FullyAvailableBlocks;
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
@@ -1555,7 +1561,6 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
continue;
}
- PredLoads[Pred] = 0;
if (Pred->getTerminator()->getNumSuccessors() != 1) {
if (isa<IndirectBrInst>(Pred->getTerminator())) {
@@ -1572,11 +1577,14 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
}
CriticalEdgePred.push_back(Pred);
+ } else {
+ // Only add the predecessors that will not be split for now.
+ PredLoads[Pred] = nullptr;
}
}
// Decide whether PRE is profitable for this load.
- unsigned NumUnavailablePreds = PredLoads.size();
+ unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
assert(NumUnavailablePreds != 0 &&
"Fully available value should already be eliminated!");
@@ -1588,12 +1596,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
return false;
// Split critical edges, and update the unavailable predecessors accordingly.
- for (SmallVectorImpl<BasicBlock *>::iterator I = CriticalEdgePred.begin(),
- E = CriticalEdgePred.end(); I != E; I++) {
- BasicBlock *OrigPred = *I;
+ for (BasicBlock *OrigPred : CriticalEdgePred) {
BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
- PredLoads.erase(OrigPred);
- PredLoads[NewPred] = 0;
+ assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
+ PredLoads[NewPred] = nullptr;
DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
<< LoadBB->getName() << '\n');
}
@@ -1601,9 +1607,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
SmallVector<Instruction*, 8> NewInsts;
- for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
- E = PredLoads.end(); I != E; ++I) {
- BasicBlock *UnavailablePred = I->first;
+ for (auto &PredLoad : PredLoads) {
+ BasicBlock *UnavailablePred = PredLoad.first;
// Do PHI translation to get its value in the predecessor if necessary. The
// returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
@@ -1611,21 +1616,21 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// If all preds have a single successor, then we know it is safe to insert
// the load on the pred (?!?), so we can insert code to materialize the
// pointer if it is not available.
- PHITransAddr Address(LI->getPointerOperand(), TD);
- Value *LoadPtr = 0;
+ PHITransAddr Address(LI->getPointerOperand(), DL);
+ Value *LoadPtr = nullptr;
LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
*DT, NewInsts);
// If we couldn't find or insert a computation of this phi translated value,
// we fail PRE.
- if (LoadPtr == 0) {
+ if (!LoadPtr) {
DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
<< *LI->getPointerOperand() << "\n");
CanDoPRE = false;
break;
}
- I->second = LoadPtr;
+ PredLoad.second = LoadPtr;
}
if (!CanDoPRE) {
@@ -1634,8 +1639,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
if (MD) MD->removeInstruction(I);
I->eraseFromParent();
}
- // HINT:Don't revert the edge-splitting as following transformation may
- // also need to split these critial edges.
+ // HINT: Don't revert the edge-splitting as following transformation may
+ // also need to split these critical edges.
return !CriticalEdgePred.empty();
}
@@ -1656,10 +1661,9 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
VN.lookup_or_add(NewInsts[i]);
}
- for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
- E = PredLoads.end(); I != E; ++I) {
- BasicBlock *UnavailablePred = I->first;
- Value *LoadPtr = I->second;
+ for (const auto &PredLoad : PredLoads) {
+ BasicBlock *UnavailablePred = PredLoad.first;
+ Value *LoadPtr = PredLoad.second;
Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
LI->getAlignment(),
@@ -1712,7 +1716,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
!Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
DEBUG(
dbgs() << "GVN: non-local load ";
- WriteAsOperand(dbgs(), LI);
+ LI->printAsOperand(dbgs());
dbgs() << " has unknown dependencies\n";
);
return false;
@@ -1778,7 +1782,7 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) {
MDNode *ReplMD = Metadata[i].second;
switch(Kind) {
default:
- ReplInst->setMetadata(Kind, NULL); // Remove unknown metadata
+ ReplInst->setMetadata(Kind, nullptr); // Remove unknown metadata
break;
case LLVMContext::MD_dbg:
llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
@@ -1789,11 +1793,15 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) {
ReplInst->setMetadata(Kind, MDNode::getMostGenericRange(IMD, ReplMD));
break;
case LLVMContext::MD_prof:
- llvm_unreachable("MD_prof in a non terminator instruction");
+ llvm_unreachable("MD_prof in a non-terminator instruction");
break;
case LLVMContext::MD_fpmath:
ReplInst->setMetadata(Kind, MDNode::getMostGenericFPMath(IMD, ReplMD));
break;
+ case LLVMContext::MD_invariant_load:
+ // Only set the !invariant.load if it is present in both instructions.
+ ReplInst->setMetadata(Kind, IMD);
+ break;
}
}
}
@@ -1823,7 +1831,7 @@ bool GVN::processLoad(LoadInst *L) {
// If we have a clobber and target data is around, see if this is a clobber
// that we can fix up through code synthesis.
- if (Dep.isClobber() && TD) {
+ if (Dep.isClobber() && DL) {
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
@@ -1834,14 +1842,14 @@ bool GVN::processLoad(LoadInst *L) {
// a common base + constant offset, and if the previous store (or memset)
// completely covers this load. This sort of thing can happen in bitfield
// access code.
- Value *AvailVal = 0;
+ Value *AvailVal = nullptr;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
L->getPointerOperand(),
- DepSI, *TD);
+ DepSI, *DL);
if (Offset != -1)
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
- L->getType(), L, *TD);
+ L->getType(), L, *DL);
}
// Check to see if we have something like this:
@@ -1856,7 +1864,7 @@ bool GVN::processLoad(LoadInst *L) {
int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
L->getPointerOperand(),
- DepLI, *TD);
+ DepLI, *DL);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
@@ -1866,9 +1874,9 @@ bool GVN::processLoad(LoadInst *L) {
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
L->getPointerOperand(),
- DepMI, *TD);
+ DepMI, *DL);
if (Offset != -1)
- AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD);
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL);
}
if (AvailVal) {
@@ -1890,7 +1898,7 @@ bool GVN::processLoad(LoadInst *L) {
DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load ";
- WriteAsOperand(dbgs(), L);
+ L->printAsOperand(dbgs());
Instruction *I = Dep.getInst();
dbgs() << " is clobbered by " << *I << '\n';
);
@@ -1905,7 +1913,7 @@ bool GVN::processLoad(LoadInst *L) {
DEBUG(
// fast print dep, using operator<< on instruction is too slow.
dbgs() << "GVN: load ";
- WriteAsOperand(dbgs(), L);
+ L->printAsOperand(dbgs());
dbgs() << " has unknown dependence\n";
);
return false;
@@ -1919,10 +1927,10 @@ bool GVN::processLoad(LoadInst *L) {
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
- if (TD) {
+ if (DL) {
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
- L, *TD);
- if (StoredVal == 0)
+ L, *DL);
+ if (!StoredVal)
return false;
DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
@@ -1948,10 +1956,10 @@ bool GVN::processLoad(LoadInst *L) {
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
- if (TD) {
+ if (DL) {
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
- L, *TD);
- if (AvailableVal == 0)
+ L, *DL);
+ if (!AvailableVal)
return false;
DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
@@ -1991,6 +1999,15 @@ bool GVN::processLoad(LoadInst *L) {
}
}
+ // If this load follows a calloc (which zero initializes memory),
+ // then the loaded value is zero
+ if (isCallocLikeFn(DepInst, TLI)) {
+ L->replaceAllUsesWith(Constant::getNullValue(L->getType()));
+ markInstructionForDeletion(L);
+ ++NumGVNLoad;
+ return true;
+ }
+
return false;
}
@@ -2001,9 +2018,9 @@ bool GVN::processLoad(LoadInst *L) {
// a few comparisons of DFS numbers.
Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
LeaderTableEntry Vals = LeaderTable[num];
- if (!Vals.Val) return 0;
+ if (!Vals.Val) return nullptr;
- Value *Val = 0;
+ Value *Val = nullptr;
if (DT->dominates(Vals.BB, BB)) {
Val = Vals.Val;
if (isa<Constant>(Val)) return Val;
@@ -2030,7 +2047,7 @@ unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
unsigned Count = 0;
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
UI != UE; ) {
- Use &U = (UI++).getUse();
+ Use &U = *UI++;
if (DT->dominates(Root, U)) {
U.set(To);
@@ -2054,7 +2071,7 @@ static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
const BasicBlock *Src = E.getStart();
assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
(void)Src;
- return Pred != 0;
+ return Pred != nullptr;
}
/// propagateEquality - The given values are known to be equal in every block
@@ -2202,7 +2219,7 @@ bool GVN::processInstruction(Instruction *I) {
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
- if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
+ if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) {
I->replaceAllUsesWith(V);
if (MD && V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
@@ -2298,7 +2315,7 @@ bool GVN::processInstruction(Instruction *I) {
// Perform fast-path value-number based elimination of values inherited from
// dominators.
Value *repl = findLeader(I->getParent(), Num);
- if (repl == 0) {
+ if (!repl) {
// Failure, just remember this instance for future use.
addToLeaderTable(Num, I, I->getParent());
return false;
@@ -2314,10 +2331,14 @@ bool GVN::processInstruction(Instruction *I) {
/// runOnFunction - This is the main transformation entry point for a function.
bool GVN::runOnFunction(Function& F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
@@ -2419,10 +2440,7 @@ bool GVN::processBlock(BasicBlock *BB) {
bool GVN::performPRE(Function &F) {
bool Changed = false;
SmallVector<std::pair<Value*, BasicBlock*>, 8> predMap;
- for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
- DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
- BasicBlock *CurrentBlock = *DI;
-
+ for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
// Nothing to PRE in the entry block.
if (CurrentBlock == &F.getEntryBlock()) continue;
@@ -2462,7 +2480,7 @@ bool GVN::performPRE(Function &F) {
// more complicated to get right.
unsigned NumWith = 0;
unsigned NumWithout = 0;
- BasicBlock *PREPred = 0;
+ BasicBlock *PREPred = nullptr;
predMap.clear();
for (pred_iterator PI = pred_begin(CurrentBlock),
@@ -2480,8 +2498,8 @@ bool GVN::performPRE(Function &F) {
}
Value* predV = findLeader(P, ValNo);
- if (predV == 0) {
- predMap.push_back(std::make_pair(static_cast<Value *>(0), P));
+ if (!predV) {
+ predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
PREPred = P;
++NumWithout;
} else if (predV == CurInst) {
@@ -2635,9 +2653,8 @@ bool GVN::iterateOnFunction(Function &F) {
//
std::vector<BasicBlock *> BBVect;
BBVect.reserve(256);
- for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
- DE = df_end(DT->getRootNode()); DI != DE; ++DI)
- BBVect.push_back(DI->getBlock());
+ for (DomTreeNode *x : depth_first(DT->getRootNode()))
+ BBVect.push_back(x->getBlock());
for (std::vector<BasicBlock *>::iterator I = BBVect.begin(), E = BBVect.end();
I != E; I++)
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 235aaaa..e83a5c42 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -24,23 +24,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "indvars"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -50,6 +49,8 @@
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
using namespace llvm;
+#define DEBUG_TYPE "indvars"
+
STATISTIC(NumWidened , "Number of indvars widened");
STATISTIC(NumReplaced , "Number of exit values replaced");
STATISTIC(NumLFTR , "Number of loop exit tests replaced");
@@ -63,12 +64,15 @@ static cl::opt<bool> VerifyIndvars(
"verify-indvars", cl::Hidden,
cl::desc("Verify the ScalarEvolution result after running indvars"));
+static cl::opt<bool> ReduceLiveIVs("liv-reduce", cl::Hidden,
+ cl::desc("Reduce live induction variables."));
+
namespace {
class IndVarSimplify : public LoopPass {
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
- DataLayout *TD;
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
SmallVector<WeakVH, 16> DeadInsts;
@@ -76,15 +80,15 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0),
- Changed(false) {
+ IndVarSimplify() : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr),
+ DL(nullptr), Changed(false) {
initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfo>();
AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
@@ -96,7 +100,7 @@ namespace {
}
private:
- virtual void releaseMemory() {
+ void releaseMemory() override {
DeadInsts.clear();
}
@@ -119,7 +123,7 @@ namespace {
char IndVarSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
"Induction Variable Simplification", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
@@ -193,7 +197,7 @@ static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
if (!PHI)
return User;
- Instruction *InsertPt = 0;
+ Instruction *InsertPt = nullptr;
for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
if (PHI->getIncomingValue(i) != Def)
continue;
@@ -254,34 +258,34 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
// an add or increment value can not be represented by an integer.
BinaryOperator *Incr =
dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
- if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd) return;
+ if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return;
// If this is not an add of the PHI with a constantfp, or if the constant fp
// is not an integer, bail out.
ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
int64_t IncValue;
- if (IncValueVal == 0 || Incr->getOperand(0) != PN ||
+ if (IncValueVal == nullptr || Incr->getOperand(0) != PN ||
!ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
return;
// Check Incr uses. One user is PN and the other user is an exit condition
// used by the conditional terminator.
- Value::use_iterator IncrUse = Incr->use_begin();
+ Value::user_iterator IncrUse = Incr->user_begin();
Instruction *U1 = cast<Instruction>(*IncrUse++);
- if (IncrUse == Incr->use_end()) return;
+ if (IncrUse == Incr->user_end()) return;
Instruction *U2 = cast<Instruction>(*IncrUse++);
- if (IncrUse != Incr->use_end()) return;
+ if (IncrUse != Incr->user_end()) return;
// Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't
// only used by a branch, we can't transform it.
FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
if (!Compare)
Compare = dyn_cast<FCmpInst>(U2);
- if (Compare == 0 || !Compare->hasOneUse() ||
- !isa<BranchInst>(Compare->use_back()))
+ if (!Compare || !Compare->hasOneUse() ||
+ !isa<BranchInst>(Compare->user_back()))
return;
- BranchInst *TheBr = cast<BranchInst>(Compare->use_back());
+ BranchInst *TheBr = cast<BranchInst>(Compare->user_back());
// We need to verify that the branch actually controls the iteration count
// of the loop. If not, the new IV can overflow and no one will notice.
@@ -298,7 +302,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
// transform it.
ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
int64_t ExitValue;
- if (ExitValueVal == 0 ||
+ if (ExitValueVal == nullptr ||
!ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
return;
@@ -494,6 +498,21 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
unsigned NumPreds = PN->getNumIncomingValues();
+ // We would like to be able to RAUW single-incoming value PHI nodes. We
+ // have to be certain this is safe even when this is an LCSSA PHI node.
+ // While the computed exit value is no longer varying in *this* loop, the
+ // exit block may be an exit block for an outer containing loop as well,
+ // the exit value may be varying in the outer loop, and thus it may still
+ // require an LCSSA PHI node. The safe case is when this is
+ // single-predecessor PHI node (LCSSA) and the exit block containing it is
+ // part of the enclosing loop, or this is the outer most loop of the nest.
+ // In either case the exit value could (at most) be varying in the same
+ // loop body as the phi node itself. Thus if it is in turn used outside of
+ // an enclosing loop it will only be via a separate LCSSA node.
+ bool LCSSASafePhiForRAUW =
+ NumPreds == 1 &&
+ (!L->getParentLoop() || L->getParentLoop() == LI->getLoopFor(ExitBB));
+
// Iterate over all of the PHI nodes.
BasicBlock::iterator BBI = ExitBB->begin();
while ((PN = dyn_cast<PHINode>(BBI++))) {
@@ -545,8 +564,8 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
unsigned NumHardInternalUses = 0;
unsigned NumSoftExternalUses = 0;
unsigned NumUses = 0;
- for (Value::use_iterator IB=Inst->use_begin(), IE=Inst->use_end();
- IB!=IE && NumUses<=6 ; ++IB) {
+ for (auto IB = Inst->user_begin(), IE = Inst->user_end();
+ IB != IE && NumUses <= 6; ++IB) {
Instruction *UseInstr = cast<Instruction>(*IB);
unsigned Opc = UseInstr->getOpcode();
NumUses++;
@@ -558,9 +577,9 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
// Do not count the Phi as a use. LCSSA may have inserted
// plenty of trivial ones.
NumUses--;
- for (Value::use_iterator PB=UseInstr->use_begin(),
- PE=UseInstr->use_end();
- PB!=PE && NumUses<=6 ; ++PB, ++NumUses) {
+ for (auto PB = UseInstr->user_begin(),
+ PE = UseInstr->user_end();
+ PB != PE && NumUses <= 6; ++PB, ++NumUses) {
unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode();
if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret)
NumSoftExternalUses++;
@@ -594,17 +613,18 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
if (isInstructionTriviallyDead(Inst, TLI))
DeadInsts.push_back(Inst);
- if (NumPreds == 1) {
- // Completely replace a single-pred PHI. This is safe, because the
- // NewVal won't be variant in the loop, so we don't need an LCSSA phi
- // node anymore.
+ // If we determined that this PHI is safe to replace even if an LCSSA
+ // PHI, do so.
+ if (LCSSASafePhiForRAUW) {
PN->replaceAllUsesWith(ExitVal);
PN->eraseFromParent();
}
}
- if (NumPreds != 1) {
- // Clone the PHI and delete the original one. This lets IVUsers and
- // any other maps purge the original user from their records.
+
+ // If we were unable to completely replace the PHI node, clone the PHI
+ // and delete the original one. This lets IVUsers and any other maps
+ // purge the original user from their records.
+ if (!LCSSASafePhiForRAUW) {
PHINode *NewPN = cast<PHINode>(PN->clone());
NewPN->takeName(PN);
NewPN->insertBefore(PN);
@@ -632,36 +652,23 @@ namespace {
Type *WidestNativeType; // Widest integer type created [sz]ext
bool IsSigned; // Was an sext user seen before a zext?
- WideIVInfo() : NarrowIV(0), WidestNativeType(0), IsSigned(false) {}
- };
-
- class WideIVVisitor : public IVVisitor {
- ScalarEvolution *SE;
- const DataLayout *TD;
-
- public:
- WideIVInfo WI;
-
- WideIVVisitor(PHINode *NarrowIV, ScalarEvolution *SCEV,
- const DataLayout *TData) :
- SE(SCEV), TD(TData) { WI.NarrowIV = NarrowIV; }
-
- // Implement the interface used by simplifyUsersOfIV.
- virtual void visitCast(CastInst *Cast);
+ WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr),
+ IsSigned(false) {}
};
}
/// visitCast - Update information about the induction variable that is
/// extended by this sign or zero extend operation. This is used to determine
/// the final width of the IV before actually widening it.
-void WideIVVisitor::visitCast(CastInst *Cast) {
+static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE,
+ const DataLayout *DL) {
bool IsSigned = Cast->getOpcode() == Instruction::SExt;
if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
return;
Type *Ty = Cast->getType();
uint64_t Width = SE->getTypeSizeInBits(Ty);
- if (TD && !TD->isLegalInteger(Width))
+ if (DL && !DL->isLegalInteger(Width))
return;
if (!WI.WidestNativeType) {
@@ -688,7 +695,7 @@ struct NarrowIVDefUse {
Instruction *NarrowUse;
Instruction *WideDef;
- NarrowIVDefUse(): NarrowDef(0), NarrowUse(0), WideDef(0) {}
+ NarrowIVDefUse(): NarrowDef(nullptr), NarrowUse(nullptr), WideDef(nullptr) {}
NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD):
NarrowDef(ND), NarrowUse(NU), WideDef(WD) {}
@@ -731,9 +738,9 @@ public:
L(LI->getLoopFor(OrigPhi->getParent())),
SE(SEv),
DT(DTree),
- WidePhi(0),
- WideInc(0),
- WideIncExpr(0),
+ WidePhi(nullptr),
+ WideInc(nullptr),
+ WideIncExpr(nullptr),
DeadInsts(DI) {
assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
}
@@ -788,7 +795,7 @@ Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
unsigned Opcode = DU.NarrowUse->getOpcode();
switch (Opcode) {
default:
- return 0;
+ return nullptr;
case Instruction::Add:
case Instruction::Mul:
case Instruction::UDiv:
@@ -833,14 +840,14 @@ Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
// Handle the common case of add<nsw/nuw>
if (DU.NarrowUse->getOpcode() != Instruction::Add)
- return 0;
+ return nullptr;
// One operand (NarrowDef) has already been extended to WideDef. Now determine
// if extending the other will lead to a recurrence.
unsigned ExtendOperIdx = DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
- const SCEV *ExtendOperExpr = 0;
+ const SCEV *ExtendOperExpr = nullptr;
const OverflowingBinaryOperator *OBO =
cast<OverflowingBinaryOperator>(DU.NarrowUse);
if (IsSigned && OBO->hasNoSignedWrap())
@@ -850,7 +857,7 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
ExtendOperExpr = SE->getZeroExtendExpr(
SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
else
- return 0;
+ return nullptr;
// When creating this AddExpr, don't apply the current operations NSW or NUW
// flags. This instruction may be guarded by control flow that the no-wrap
@@ -861,7 +868,7 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
SE->getAddExpr(SE->getSCEV(DU.WideDef), ExtendOperExpr));
if (!AddRec || AddRec->getLoop() != L)
- return 0;
+ return nullptr;
return AddRec;
}
@@ -872,14 +879,14 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
/// recurrence. Otherwise return NULL.
const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
if (!SE->isSCEVable(NarrowUse->getType()))
- return 0;
+ return nullptr;
const SCEV *NarrowExpr = SE->getSCEV(NarrowUse);
if (SE->getTypeSizeInBits(NarrowExpr->getType())
>= SE->getTypeSizeInBits(WideType)) {
// NarrowUse implicitly widens its operand. e.g. a gep with a narrow
// index. So don't follow this use.
- return 0;
+ return nullptr;
}
const SCEV *WideExpr = IsSigned ?
@@ -887,19 +894,47 @@ const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
SE->getZeroExtendExpr(NarrowExpr, WideType);
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
if (!AddRec || AddRec->getLoop() != L)
- return 0;
+ return nullptr;
return AddRec;
}
+/// This IV user cannot be widen. Replace this use of the original narrow IV
+/// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
+static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT) {
+ DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef
+ << " for user " << *DU.NarrowUse << "\n");
+ IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT));
+ Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
+ DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
+}
+
/// WidenIVUse - Determine whether an individual user of the narrow IV can be
/// widened. If so, return the wide clone of the user.
Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// Stop traversing the def-use chain at inner-loop phis or post-loop phis.
- if (isa<PHINode>(DU.NarrowUse) &&
- LI->getLoopFor(DU.NarrowUse->getParent()) != L)
- return 0;
-
+ if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
+ if (LI->getLoopFor(UsePhi->getParent()) != L) {
+ // For LCSSA phis, sink the truncate outside the loop.
+ // After SimplifyCFG most loop exit targets have a single predecessor.
+ // Otherwise fall back to a truncate within the loop.
+ if (UsePhi->getNumOperands() != 1)
+ truncateIVUse(DU, DT);
+ else {
+ PHINode *WidePhi =
+ PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
+ UsePhi);
+ WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
+ IRBuilder<> Builder(WidePhi->getParent()->getFirstInsertionPt());
+ Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
+ UsePhi->replaceAllUsesWith(Trunc);
+ DeadInsts.push_back(UsePhi);
+ DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi
+ << " to " << *WidePhi << "\n");
+ }
+ return nullptr;
+ }
+ }
// Our raison d'etre! Eliminate sign and zero extension.
if (IsSigned ? isa<SExtInst>(DU.NarrowUse) : isa<ZExtInst>(DU.NarrowUse)) {
Value *NewDef = DU.WideDef;
@@ -935,7 +970,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// push the uses of WideDef here.
// No further widening is needed. The deceased [sz]ext had done it for us.
- return 0;
+ return nullptr;
}
// Does this user itself evaluate to a recurrence after widening?
@@ -947,10 +982,8 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// This user does not evaluate to a recurence after widening, so don't
// follow it. Instead insert a Trunc to kill off the original use,
// eventually isolating the original narrow IV so it can be removed.
- IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT));
- Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
- DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
- return 0;
+ truncateIVUse(DU, DT);
+ return nullptr;
}
// Assume block terminators cannot evaluate to a recurrence. We can't to
// insert a Trunc after a terminator if there happens to be a critical edge.
@@ -959,14 +992,14 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// Reuse the IV increment that SCEVExpander created as long as it dominates
// NarrowUse.
- Instruction *WideUse = 0;
+ Instruction *WideUse = nullptr;
if (WideAddRec == WideIncExpr
&& Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
WideUse = WideInc;
else {
WideUse = CloneIVUser(DU);
if (!WideUse)
- return 0;
+ return nullptr;
}
// Evaluation of WideAddRec ensured that the narrow expression could be
// extended outside the loop without overflow. This suggests that the wide use
@@ -977,7 +1010,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse
<< ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n");
DeadInsts.push_back(WideUse);
- return 0;
+ return nullptr;
}
// Returning WideUse pushes it on the worklist.
@@ -987,15 +1020,14 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
/// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers.
///
void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
- for (Value::use_iterator UI = NarrowDef->use_begin(),
- UE = NarrowDef->use_end(); UI != UE; ++UI) {
- Instruction *NarrowUse = cast<Instruction>(*UI);
+ for (User *U : NarrowDef->users()) {
+ Instruction *NarrowUser = cast<Instruction>(U);
// Handle data flow merges and bizarre phi cycles.
- if (!Widened.insert(NarrowUse))
+ if (!Widened.insert(NarrowUser))
continue;
- NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUse, WideDef));
+ NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef));
}
}
@@ -1013,7 +1045,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
// Is this phi an induction variable?
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
if (!AddRec)
- return NULL;
+ return nullptr;
// Widen the induction variable expression.
const SCEV *WideIVExpr = IsSigned ?
@@ -1026,7 +1058,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
// Can the IV be extended outside the loop without overflow?
AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
if (!AddRec || AddRec->getLoop() != L)
- return NULL;
+ return nullptr;
// An AddRec must have loop-invariant operands. Since this AddRec is
// materialized by a loop header phi, the expression cannot have any post-loop
@@ -1080,9 +1112,36 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
}
//===----------------------------------------------------------------------===//
+// Live IV Reduction - Minimize IVs live across the loop.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
// Simplification of IV users based on SCEV evaluation.
//===----------------------------------------------------------------------===//
+namespace {
+ class IndVarSimplifyVisitor : public IVVisitor {
+ ScalarEvolution *SE;
+ const DataLayout *DL;
+ PHINode *IVPhi;
+
+ public:
+ WideIVInfo WI;
+
+ IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
+ const DataLayout *DL, const DominatorTree *DTree):
+ SE(SCEV), DL(DL), IVPhi(IV) {
+ DT = DTree;
+ WI.NarrowIV = IVPhi;
+ if (ReduceLiveIVs)
+ setSplitOverflowIntrinsics();
+ }
+
+ // Implement the interface used by simplifyUsersOfIV.
+ void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, DL); }
+ };
+}
/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV
/// users. Each successive simplification may push more users which may
@@ -1114,12 +1173,12 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L,
PHINode *CurrIV = LoopPhis.pop_back_val();
// Information about sign/zero extensions of CurrIV.
- WideIVVisitor WIV(CurrIV, SE, TD);
+ IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, DT);
- Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &WIV);
+ Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor);
- if (WIV.WI.WidestNativeType) {
- WideIVs.push_back(WIV.WI);
+ if (Visitor.WI.WidestNativeType) {
+ WideIVs.push_back(Visitor.WI);
}
} while(!LoopPhis.empty());
@@ -1225,7 +1284,7 @@ static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
Instruction *IncI = dyn_cast<Instruction>(IncV);
if (!IncI)
- return 0;
+ return nullptr;
switch (IncI->getOpcode()) {
case Instruction::Add:
@@ -1236,17 +1295,17 @@ static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
if (IncI->getNumOperands() == 2)
break;
default:
- return 0;
+ return nullptr;
}
PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0));
if (Phi && Phi->getParent() == L->getHeader()) {
if (isLoopInvariant(IncI->getOperand(1), L, DT))
return Phi;
- return 0;
+ return nullptr;
}
if (IncI->getOpcode() == Instruction::GetElementPtr)
- return 0;
+ return nullptr;
// Allow add/sub to be commuted.
Phi = dyn_cast<PHINode>(IncI->getOperand(1));
@@ -1254,7 +1313,7 @@ static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
if (isLoopInvariant(IncI->getOperand(0), L, DT))
return Phi;
}
- return 0;
+ return nullptr;
}
/// Return the compare guarding the loop latch, or NULL for unrecognized tests.
@@ -1264,7 +1323,7 @@ static ICmpInst *getLoopTest(Loop *L) {
BasicBlock *LatchBlock = L->getLoopLatch();
// Don't bother with LFTR if the loop is not properly simplified.
if (!LatchBlock)
- return 0;
+ return nullptr;
BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
assert(BI && "expected exit branch");
@@ -1359,15 +1418,11 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
Value *IncV = Phi->getIncomingValue(LatchIdx);
- for (Value::use_iterator UI = Phi->use_begin(), UE = Phi->use_end();
- UI != UE; ++UI) {
- if (*UI != Cond && *UI != IncV) return false;
- }
+ for (User *U : Phi->users())
+ if (U != Cond && U != IncV) return false;
- for (Value::use_iterator UI = IncV->use_begin(), UE = IncV->use_end();
- UI != UE; ++UI) {
- if (*UI != Cond && *UI != Phi) return false;
- }
+ for (User *U : IncV->users())
+ if (U != Cond && U != Phi) return false;
return true;
}
@@ -1386,15 +1441,15 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// could at least handle constant BECounts.
static PHINode *
FindLoopCounter(Loop *L, const SCEV *BECount,
- ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {
+ ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) {
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
Value *Cond =
cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition();
// Loop over all of the PHI nodes, looking for a simple counter.
- PHINode *BestPhi = 0;
- const SCEV *BestInit = 0;
+ PHINode *BestPhi = nullptr;
+ const SCEV *BestInit = nullptr;
BasicBlock *LatchBlock = L->getLoopLatch();
assert(LatchBlock && "needsLFTR should guarantee a loop latch");
@@ -1415,7 +1470,7 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
// AR may be wider than BECount. With eq/ne tests overflow is immaterial.
// AR may not be a narrower type, or we may never exit.
uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
- if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth)))
+ if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth)))
continue;
const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
@@ -1518,7 +1573,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
// IVInit integer and IVCount pointer would only occur if a canonical IV
// were generated on top of case #2, which is not expected.
- const SCEV *IVLimit = 0;
+ const SCEV *IVLimit = nullptr;
// For unit stride, IVCount = Start + BECount with 2's complement overflow.
// For non-zero Start, compute IVCount here.
if (AR->getStart()->isZero())
@@ -1697,13 +1752,12 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
// Determine if there is a use in or before the loop (direct or
// otherwise).
bool UsedInLoop = false;
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI) {
- User *U = *UI;
- BasicBlock *UseBB = cast<Instruction>(U)->getParent();
- if (PHINode *P = dyn_cast<PHINode>(U)) {
+ for (Use &U : I->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
+ BasicBlock *UseBB = User->getParent();
+ if (PHINode *P = dyn_cast<PHINode>(User)) {
unsigned i =
- PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
+ PHINode::getIncomingValueNumForOperand(U.getOperandNo());
UseBB = P->getIncomingBlock(i);
}
if (UseBB == Preheader || L->contains(UseBB)) {
@@ -1743,6 +1797,9 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
//===----------------------------------------------------------------------===//
bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
// If LoopSimplify form is not available, stay out of trouble. Some notes:
// - LSR currently only supports LoopSimplify-form loops. Indvars'
// canonicalization can be a pessimization without LSR to "clean up"
@@ -1756,8 +1813,9 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
- DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
DeadInsts.clear();
@@ -1799,13 +1857,13 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// If we have a trip count expression, rewrite the loop's exit condition
// using it. We can currently only handle loops with a single exit.
if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {
- PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
+ PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL);
if (IndVar) {
// Check preconditions for proper SCEVExpander operation. SCEV does not
// express SCEVExpander's dependencies, such as LoopSimplify. Instead any
// pass that uses the SCEVExpander must do it. This does not work well for
- // loop passes because SCEVExpander makes assumptions about all loops, while
- // LoopPassManager only forces the current loop to be simplified.
+ // loop passes because SCEVExpander makes assumptions about all loops,
+ // while LoopPassManager only forces the current loop to be simplified.
//
// FIXME: SCEV expansion has no way to bail out, so the caller must
// explicitly check any assumptions made by SCEV. Brittle.
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index b3ec2fc..21f80385 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jump-threading"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -27,10 +26,10 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -38,6 +37,8 @@
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
+#define DEBUG_TYPE "jump-threading"
+
STATISTIC(NumThreads, "Number of jumps threaded");
STATISTIC(NumFolds, "Number of terminators folded");
STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi");
@@ -76,7 +77,7 @@ namespace {
/// revectored to the false side of the second if.
///
class JumpThreading : public FunctionPass {
- DataLayout *TD;
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG
@@ -105,9 +106,9 @@ namespace {
initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LazyValueInfo>();
AU.addPreserved<LazyValueInfo>();
AU.addRequired<TargetLibraryInfo>();
@@ -148,11 +149,24 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
/// runOnFunction - Top level algorithm.
///
bool JumpThreading::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
+ // Remove unreachable blocks from function as they may result in infinite
+ // loop. We do threading if we found something profitable. Jump threading a
+ // branch can create other opportunities. If these opportunities form a cycle
+ // i.e. if any jump treading is undoing previous threading in the path, then
+ // we will loop forever. We take care of this issue by not jump threading for
+ // back edges. This works for normal cases but not for unreachable blocks as
+ // they may have cycle with no back edge.
+ removeUnreachableBlocks(F);
+
FindLoopHeaders(F);
bool Changed, EverChanged = false;
@@ -251,7 +265,7 @@ static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB,
// as having cost of 2 total, and if they are a vector intrinsic, we model
// them as having cost 1.
if (const CallInst *CI = dyn_cast<CallInst>(I)) {
- if (CI->hasFnAttr(Attribute::NoDuplicate))
+ if (CI->cannotDuplicate())
// Blocks with NoDuplicate are modelled as having infinite cost, so they
// are never duplicated.
return ~0U;
@@ -304,7 +318,7 @@ void JumpThreading::FindLoopHeaders(Function &F) {
/// Returns null if Val is null or not an appropriate constant.
static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
if (!Val)
- return 0;
+ return nullptr;
// Undef is "known" enough.
if (UndefValue *U = dyn_cast<UndefValue>(Val))
@@ -348,7 +362,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
// If V is a non-instruction value, or an instruction in a different block,
// then it can't be derived from a PHI.
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0 || I->getParent() != BB) {
+ if (!I || I->getParent() != BB) {
// Okay, if this is a live-in value, see if it has a known value at the end
// of any of our predecessors.
@@ -490,8 +504,8 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
Value *LHS = PN->getIncomingValue(i);
Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);
- Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD);
- if (Res == 0) {
+ Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL);
+ if (!Res) {
if (!isa<Constant>(RHS))
continue;
@@ -577,7 +591,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
// Either operand will do, so be sure to pick the one that's a known
// constant.
// FIXME: Do this more cleverly if both values are known constants?
- KnownCond = (TrueVal != 0);
+ KnownCond = (TrueVal != nullptr);
}
// See if the select has a known constant value for this predecessor.
@@ -655,14 +669,9 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
if (LoopHeaders.erase(SinglePred))
LoopHeaders.insert(BB);
- // Remember if SinglePred was the entry block of the function. If so, we
- // will need to move BB back to the entry position.
- bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
LVI->eraseBlock(SinglePred);
MergeBasicBlockIntoOnlyPred(BB);
- if (isEntry && BB != &BB->getParent()->getEntryBlock())
- BB->moveBefore(&BB->getParent()->getEntryBlock());
return true;
}
}
@@ -692,7 +701,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// Run constant folding to see if we can reduce the condition to a simple
// constant.
if (Instruction *I = dyn_cast<Instruction>(Condition)) {
- Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI);
+ Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI);
if (SimpleVal) {
I->replaceAllUsesWith(SimpleVal);
I->eraseFromParent();
@@ -733,7 +742,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
Instruction *CondInst = dyn_cast<Instruction>(Condition);
// All the rest of our checks depend on the condition being an instruction.
- if (CondInst == 0) {
+ if (!CondInst) {
// FIXME: Unify this with code below.
if (ProcessThreadableEdges(Condition, BB, Preference))
return true;
@@ -886,7 +895,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
SmallPtrSet<BasicBlock*, 8> PredsScanned;
typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy;
AvailablePredsTy AvailablePreds;
- BasicBlock *OneUnavailablePred = 0;
+ BasicBlock *OneUnavailablePred = nullptr;
// If we got here, the loaded value is transparent through to the start of the
// block. Check to see if it is available in any of the predecessor blocks.
@@ -900,16 +909,16 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Scan the predecessor to see if the value is available in the pred.
BBIt = PredBB->end();
- MDNode *ThisTBAATag = 0;
+ MDNode *ThisTBAATag = nullptr;
Value *PredAvailable = FindAvailableLoadedValue(LoadedPtr, PredBB, BBIt, 6,
- 0, &ThisTBAATag);
+ nullptr, &ThisTBAATag);
if (!PredAvailable) {
OneUnavailablePred = PredBB;
continue;
}
// If tbaa tags disagree or are not present, forget about them.
- if (TBAATag != ThisTBAATag) TBAATag = 0;
+ if (TBAATag != ThisTBAATag) TBAATag = nullptr;
// If so, this load is partially redundant. Remember this info so that we
// can create a PHI node.
@@ -925,7 +934,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// predecessor, we want to insert a merge block for those common predecessors.
// This ensures that we only have to insert one reload, thus not increasing
// code size.
- BasicBlock *UnavailablePred = 0;
+ BasicBlock *UnavailablePred = nullptr;
// If there is exactly one predecessor where the value is unavailable, the
// already computed 'OneUnavailablePred' block is it. If it ends in an
@@ -992,7 +1001,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
BasicBlock *P = *PI;
AvailablePredsTy::iterator I =
std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
- std::make_pair(P, (Value*)0));
+ std::make_pair(P, (Value*)nullptr));
assert(I != AvailablePreds.end() && I->first == P &&
"Didn't find entry for predecessor!");
@@ -1099,7 +1108,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
SmallPtrSet<BasicBlock*, 16> SeenPreds;
SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
- BasicBlock *OnlyDest = 0;
+ BasicBlock *OnlyDest = nullptr;
BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
for (unsigned i = 0, e = PredValues.size(); i != e; ++i) {
@@ -1116,7 +1125,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
BasicBlock *DestBB;
if (isa<UndefValue>(Val))
- DestBB = 0;
+ DestBB = nullptr;
else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
@@ -1167,7 +1176,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
// If the threadable edges are branching on an undefined value, we get to pick
// the destination that these predecessors should get to.
- if (MostPopularDest == 0)
+ if (!MostPopularDest)
MostPopularDest = BB->getTerminator()->
getSuccessor(GetBestDestForJumpOnUndef(BB));
@@ -1269,7 +1278,7 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
}
// Determine which value to split on, true, false, or undef if neither.
- ConstantInt *SplitVal = 0;
+ ConstantInt *SplitVal = nullptr;
if (NumTrue > NumFalse)
SplitVal = ConstantInt::getTrue(BB->getContext());
else if (NumTrue != 0 || NumFalse != 0)
@@ -1290,7 +1299,7 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
// help us. However, we can just replace the LHS or RHS with the constant.
if (BlocksToFoldInto.size() ==
cast<PHINode>(BB->front()).getNumIncomingValues()) {
- if (SplitVal == 0) {
+ if (!SplitVal) {
// If all preds provide undef, just nuke the xor, because it is undef too.
BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
BO->eraseFromParent();
@@ -1431,16 +1440,15 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
// Scan all uses of this instruction to see if it is used outside of its
// block, and if so, record them in UsesToRename.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
- ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (Use &U : I->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
- if (UserPN->getIncomingBlock(UI) == BB)
+ if (UserPN->getIncomingBlock(U) == BB)
continue;
} else if (User->getParent() == BB)
continue;
- UsesToRename.push_back(&UI.getUse());
+ UsesToRename.push_back(&U);
}
// If there are no uses outside the block, we're done with this instruction.
@@ -1475,7 +1483,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
// At this point, the IR is fully up to date and consistent. Do a quick scan
// over the new instructions and zap any that are constants or dead. This
// frequently happens because of phi translation.
- SimplifyInstructionsInBlock(NewBB, TD, TLI);
+ SimplifyInstructionsInBlock(NewBB, DL, TLI);
// Threaded an edge!
++NumThreads;
@@ -1528,7 +1536,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// can just clone the bits from BB into the end of the new PredBB.
BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
- if (OldPredBranch == 0 || !OldPredBranch->isUnconditional()) {
+ if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
PredBB = SplitEdge(PredBB, BB, this);
OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
}
@@ -1557,7 +1565,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// If this instruction can be simplified after the operands are updated,
// just use the simplified value instead. This frequently happens due to
// phi translation.
- if (Value *IV = SimplifyInstruction(New, TD)) {
+ if (Value *IV = SimplifyInstruction(New, DL)) {
delete New;
ValueMapping[BI] = IV;
} else {
@@ -1585,16 +1593,15 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
// Scan all uses of this instruction to see if it is used outside of its
// block, and if so, record them in UsesToRename.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
- ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (Use &U : I->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
- if (UserPN->getIncomingBlock(UI) == BB)
+ if (UserPN->getIncomingBlock(U) == BB)
continue;
} else if (User->getParent() == BB)
continue;
- UsesToRename.push_back(&UI.getUse());
+ UsesToRename.push_back(&U);
}
// If there are no uses outside the block, we're done with this instruction.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
index f94cd2a..abcceb2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -30,33 +30,37 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "licm"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "licm"
+
STATISTIC(NumSunk , "Number of instructions sunk out of loop");
STATISTIC(NumHoisted , "Number of instructions hoisted out of loop");
STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
@@ -74,26 +78,28 @@ namespace {
initializeLICMPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
/// This transformation requires natural loop information & requires that
/// loop preheaders be inserted into the CFG...
///
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
+ AU.addPreservedID(LoopSimplifyID);
+ AU.addRequiredID(LCSSAID);
+ AU.addPreservedID(LCSSAID);
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved("scalar-evolution");
- AU.addPreservedID(LoopSimplifyID);
+ AU.addPreserved<ScalarEvolution>();
AU.addRequired<TargetLibraryInfo>();
}
using llvm::Pass::doFinalization;
- bool doFinalization() {
+ bool doFinalization() override {
assert(LoopToAliasSetMap.empty() && "Didn't free loop alias sets");
return false;
}
@@ -103,7 +109,7 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
- DataLayout *TD; // DataLayout for constant folding.
+ const DataLayout *DL; // DataLayout for constant folding.
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
// State that is updated as we process loops.
@@ -117,11 +123,12 @@ namespace {
DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap;
/// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
- void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L);
+ void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To,
+ Loop *L) override;
/// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
/// set.
- void deleteAnalysisValue(Value *V, Loop *L);
+ void deleteAnalysisValue(Value *V, Loop *L) override;
/// SinkRegion - Walk the specified region of the CFG (defined by all blocks
/// dominated by the specified block, and that are in the current loop) in
@@ -183,15 +190,26 @@ namespace {
void PromoteAliasSet(AliasSet &AS,
SmallVectorImpl<BasicBlock*> &ExitBlocks,
- SmallVectorImpl<Instruction*> &InsertPts);
+ SmallVectorImpl<Instruction*> &InsertPts,
+ PredIteratorCache &PIC);
+
+ /// \brief Create a copy of the instruction in the exit block and patch up
+ /// SSA.
+ /// PN is a user of I in ExitBlock that can be used to get the number and
+ /// list of predecessors fast.
+ Instruction *CloneInstructionInExitBlock(Instruction &I,
+ BasicBlock &ExitBlock,
+ PHINode &PN);
};
}
char LICM::ID = 0;
INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -203,16 +221,22 @@ Pass *llvm::createLICMPass() { return new LICM(); }
/// times on one loop.
///
bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
Changed = false;
// Get our Loop and Alias Analysis information...
LI = &getAnalysis<LoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
+ assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
+
CurAST = new AliasSetTracker(*AA);
// Collect Alias info from subloops.
for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end();
@@ -272,19 +296,36 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
// Now that all loop invariants have been removed from the loop, promote any
// memory references to scalars that we can.
- if (!DisablePromotion && Preheader && L->hasDedicatedExits()) {
+ if (!DisablePromotion && (Preheader || L->hasDedicatedExits())) {
SmallVector<BasicBlock *, 8> ExitBlocks;
SmallVector<Instruction *, 8> InsertPts;
+ PredIteratorCache PIC;
// Loop over all of the alias sets in the tracker object.
for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
I != E; ++I)
- PromoteAliasSet(*I, ExitBlocks, InsertPts);
+ PromoteAliasSet(*I, ExitBlocks, InsertPts, PIC);
+
+ // Once we have promoted values across the loop body we have to recursively
+ // reform LCSSA as any nested loop may now have values defined within the
+ // loop used in the outer loop.
+ // FIXME: This is really heavy handed. It would be a bit better to use an
+ // SSAUpdater strategy during promotion that was LCSSA aware and reformed
+ // it as it went.
+ if (Changed)
+ formLCSSARecursively(*L, *DT, getAnalysisIfAvailable<ScalarEvolution>());
}
+ // Check that neither this loop nor its parent have had LCSSA broken. LICM is
+ // specifically moving instructions across the loop boundary and so it is
+ // especially in need of sanity checking here.
+ assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
+ assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&
+ "Parent loop not left in LCSSA form after LICM!");
+
// Clear out loops state information for the next iteration
- CurLoop = 0;
- Preheader = 0;
+ CurLoop = nullptr;
+ Preheader = nullptr;
// If this loop is nested inside of another one, save the alias information
// for when we process the outer loop.
@@ -302,7 +343,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
/// iteration.
///
void LICM::SinkRegion(DomTreeNode *N) {
- assert(N != 0 && "Null dominator tree node?");
+ assert(N != nullptr && "Null dominator tree node?");
BasicBlock *BB = N->getBlock();
// If this subregion is not in the top level loop at all, exit.
@@ -349,7 +390,7 @@ void LICM::SinkRegion(DomTreeNode *N) {
/// before uses, allowing us to hoist a loop body in one pass without iteration.
///
void LICM::HoistRegion(DomTreeNode *N) {
- assert(N != 0 && "Null dominator tree node?");
+ assert(N != nullptr && "Null dominator tree node?");
BasicBlock *BB = N->getBlock();
// If this subregion is not in the top level loop at all, exit.
@@ -364,7 +405,7 @@ void LICM::HoistRegion(DomTreeNode *N) {
// Try constant folding this instruction. If all the operands are
// constants, it is technically hoistable, but it would be better to just
// fold it.
- if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(&I, DL, TLI)) {
DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
CurAST->copyValue(&I, C);
CurAST->deleteValue(&I);
@@ -450,26 +491,82 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
return isSafeToExecuteUnconditionally(I);
}
+/// \brief Returns true if a PHINode is a trivially replaceable with an
+/// Instruction.
+///
+/// This is true when all incoming values are that instruction. This pattern
+/// occurs most often with LCSSA PHI nodes.
+static bool isTriviallyReplacablePHI(PHINode &PN, Instruction &I) {
+ for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
+ if (PN.getIncomingValue(i) != &I)
+ return false;
+
+ return true;
+}
+
/// isNotUsedInLoop - Return true if the only users of this instruction are
/// outside of the loop. If this is true, we can sink the instruction to the
/// exit blocks of the loop.
///
bool LICM::isNotUsedInLoop(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (PHINode *PN = dyn_cast<PHINode>(User)) {
- // PHI node uses occur in predecessor blocks!
+ for (User *U : I.users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (PHINode *PN = dyn_cast<PHINode>(UI)) {
+ // A PHI node where all of the incoming values are this instruction are
+ // special -- they can just be RAUW'ed with the instruction and thus
+ // don't require a use in the predecessor. This is a particular important
+ // special case because it is the pattern found in LCSSA form.
+ if (isTriviallyReplacablePHI(*PN, I)) {
+ if (CurLoop->contains(PN))
+ return false;
+ else
+ continue;
+ }
+
+ // Otherwise, PHI node uses occur in predecessor blocks if the incoming
+ // values. Check for such a use being inside the loop.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == &I)
if (CurLoop->contains(PN->getIncomingBlock(i)))
return false;
- } else if (CurLoop->contains(User)) {
- return false;
+
+ continue;
}
+
+ if (CurLoop->contains(UI))
+ return false;
}
return true;
}
+Instruction *LICM::CloneInstructionInExitBlock(Instruction &I,
+ BasicBlock &ExitBlock,
+ PHINode &PN) {
+ Instruction *New = I.clone();
+ ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
+ if (!I.getName().empty()) New->setName(I.getName() + ".le");
+
+ // Build LCSSA PHI nodes for any in-loop operands. Note that this is
+ // particularly cheap because we can rip off the PHI node that we're
+ // replacing for the number and blocks of the predecessors.
+ // OPT: If this shows up in a profile, we can instead finish sinking all
+ // invariant instructions, and then walk their operands to re-establish
+ // LCSSA. That will eliminate creating PHI nodes just to nuke them when
+ // sinking bottom-up.
+ for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
+ ++OI)
+ if (Instruction *OInst = dyn_cast<Instruction>(*OI))
+ if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
+ if (!OLoop->contains(&PN)) {
+ PHINode *OpPN =
+ PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
+ OInst->getName() + ".lcssa", ExitBlock.begin());
+ for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
+ OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
+ *OI = OpPN;
+ }
+ return New;
+}
/// sink - When an instruction is found to only be used outside of the loop,
/// this function moves it to the exit blocks and patches up SSA form as needed.
@@ -479,119 +576,45 @@ bool LICM::isNotUsedInLoop(Instruction &I) {
void LICM::sink(Instruction &I) {
DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
- SmallVector<BasicBlock*, 8> ExitBlocks;
- CurLoop->getUniqueExitBlocks(ExitBlocks);
-
if (isa<LoadInst>(I)) ++NumMovedLoads;
else if (isa<CallInst>(I)) ++NumMovedCalls;
++NumSunk;
Changed = true;
- // The case where there is only a single exit node of this loop is common
- // enough that we handle it as a special (more efficient) case. It is more
- // efficient to handle because there are no PHI nodes that need to be placed.
- if (ExitBlocks.size() == 1) {
- if (!DT->dominates(I.getParent(), ExitBlocks[0])) {
- // Instruction is not used, just delete it.
- CurAST->deleteValue(&I);
- // If I has users in unreachable blocks, eliminate.
- // If I is not void type then replaceAllUsesWith undef.
- // This allows ValueHandlers and custom metadata to adjust itself.
- if (!I.use_empty())
- I.replaceAllUsesWith(UndefValue::get(I.getType()));
- I.eraseFromParent();
- } else {
- // Move the instruction to the start of the exit block, after any PHI
- // nodes in it.
- I.moveBefore(ExitBlocks[0]->getFirstInsertionPt());
-
- // This instruction is no longer in the AST for the current loop, because
- // we just sunk it out of the loop. If we just sunk it into an outer
- // loop, we will rediscover the operation when we process it.
- CurAST->deleteValue(&I);
- }
- return;
- }
-
- if (ExitBlocks.empty()) {
- // The instruction is actually dead if there ARE NO exit blocks.
- CurAST->deleteValue(&I);
- // If I has users in unreachable blocks, eliminate.
- // If I is not void type then replaceAllUsesWith undef.
- // This allows ValueHandlers and custom metadata to adjust itself.
- if (!I.use_empty())
- I.replaceAllUsesWith(UndefValue::get(I.getType()));
- I.eraseFromParent();
- return;
- }
-
- // Otherwise, if we have multiple exits, use the SSAUpdater to do all of the
- // hard work of inserting PHI nodes as necessary.
- SmallVector<PHINode*, 8> NewPHIs;
- SSAUpdater SSA(&NewPHIs);
-
- if (!I.use_empty())
- SSA.Initialize(I.getType(), I.getName());
-
- // Insert a copy of the instruction in each exit block of the loop that is
- // dominated by the instruction. Each exit block is known to only be in the
- // ExitBlocks list once.
- BasicBlock *InstOrigBB = I.getParent();
- unsigned NumInserted = 0;
+#ifndef NDEBUG
+ SmallVector<BasicBlock *, 32> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+ SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), ExitBlocks.end());
+#endif
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- BasicBlock *ExitBlock = ExitBlocks[i];
+ // Clones of this instruction. Don't create more than one per exit block!
+ SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
- if (!DT->dominates(InstOrigBB, ExitBlock))
- continue;
+ // If this instruction is only used outside of the loop, then all users are
+ // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
+ // the instruction.
+ while (!I.use_empty()) {
+ // The user must be a PHI node.
+ PHINode *PN = cast<PHINode>(I.user_back());
- // Insert the code after the last PHI node.
- BasicBlock::iterator InsertPt = ExitBlock->getFirstInsertionPt();
+ BasicBlock *ExitBlock = PN->getParent();
+ assert(ExitBlockSet.count(ExitBlock) &&
+ "The LCSSA PHI is not in an exit block!");
- // If this is the first exit block processed, just move the original
- // instruction, otherwise clone the original instruction and insert
- // the copy.
Instruction *New;
- if (NumInserted++ == 0) {
- I.moveBefore(InsertPt);
- New = &I;
- } else {
- New = I.clone();
- if (!I.getName().empty())
- New->setName(I.getName()+".le");
- ExitBlock->getInstList().insert(InsertPt, New);
- }
-
- // Now that we have inserted the instruction, inform SSAUpdater.
- if (!I.use_empty())
- SSA.AddAvailableValue(ExitBlock, New);
- }
-
- // If the instruction doesn't dominate any exit blocks, it must be dead.
- if (NumInserted == 0) {
- CurAST->deleteValue(&I);
- if (!I.use_empty())
- I.replaceAllUsesWith(UndefValue::get(I.getType()));
- I.eraseFromParent();
- return;
- }
-
- // Next, rewrite uses of the instruction, inserting PHI nodes as needed.
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; ) {
- // Grab the use before incrementing the iterator.
- Use &U = UI.getUse();
- // Increment the iterator before removing the use from the list.
- ++UI;
- SSA.RewriteUseAfterInsertions(U);
+ auto It = SunkCopies.find(ExitBlock);
+ if (It != SunkCopies.end())
+ New = It->second;
+ else
+ New = SunkCopies[ExitBlock] =
+ CloneInstructionInExitBlock(I, *ExitBlock, *PN);
+
+ PN->replaceAllUsesWith(New);
+ PN->eraseFromParent();
}
- // Update CurAST for NewPHIs if I had pointer type.
- if (I.getType()->isPointerTy())
- for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
- CurAST->copyValue(&I, NewPHIs[i]);
-
- // Finally, remove the instruction from CurAST. It is no longer in the loop.
CurAST->deleteValue(&I);
+ I.eraseFromParent();
}
/// hoist - When an instruction is found to only use loop invariant operands
@@ -616,7 +639,7 @@ void LICM::hoist(Instruction &I) {
///
bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
// If it is not a trapping instruction, it is always safe to hoist.
- if (isSafeToSpeculativelyExecute(&Inst))
+ if (isSafeToSpeculativelyExecute(&Inst, DL))
return true;
return isGuaranteedToExecute(Inst);
@@ -662,24 +685,42 @@ namespace {
SmallPtrSet<Value*, 4> &PointerMustAliases;
SmallVectorImpl<BasicBlock*> &LoopExitBlocks;
SmallVectorImpl<Instruction*> &LoopInsertPts;
+ PredIteratorCache &PredCache;
AliasSetTracker &AST;
+ LoopInfo &LI;
DebugLoc DL;
int Alignment;
MDNode *TBAATag;
+
+ Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ if (Loop *L = LI.getLoopFor(I->getParent()))
+ if (!L->contains(BB)) {
+ // We need to create an LCSSA PHI node for the incoming value and
+ // store that.
+ PHINode *PN = PHINode::Create(
+ I->getType(), PredCache.GetNumPreds(BB),
+ I->getName() + ".lcssa", BB->begin());
+ for (BasicBlock **PI = PredCache.GetPreds(BB); *PI; ++PI)
+ PN->addIncoming(I, *PI);
+ return PN;
+ }
+ return V;
+ }
+
public:
- LoopPromoter(Value *SP,
- const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
- SmallPtrSet<Value*, 4> &PMA,
- SmallVectorImpl<BasicBlock*> &LEB,
- SmallVectorImpl<Instruction*> &LIP,
- AliasSetTracker &ast, DebugLoc dl, int alignment,
+ LoopPromoter(Value *SP, const SmallVectorImpl<Instruction *> &Insts,
+ SSAUpdater &S, SmallPtrSet<Value *, 4> &PMA,
+ SmallVectorImpl<BasicBlock *> &LEB,
+ SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC,
+ AliasSetTracker &ast, LoopInfo &li, DebugLoc dl, int alignment,
MDNode *TBAATag)
- : LoadAndStorePromoter(Insts, S), SomePtr(SP),
- PointerMustAliases(PMA), LoopExitBlocks(LEB), LoopInsertPts(LIP),
- AST(ast), DL(dl), Alignment(alignment), TBAATag(TBAATag) {}
+ : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
+ LoopExitBlocks(LEB), LoopInsertPts(LIP), PredCache(PIC), AST(ast),
+ LI(li), DL(dl), Alignment(alignment), TBAATag(TBAATag) {}
- virtual bool isInstInList(Instruction *I,
- const SmallVectorImpl<Instruction*> &) const {
+ bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &) const override {
Value *Ptr;
if (LoadInst *LI = dyn_cast<LoadInst>(I))
Ptr = LI->getOperand(0);
@@ -688,7 +729,7 @@ namespace {
return PointerMustAliases.count(Ptr);
}
- virtual void doExtraRewritesBeforeFinalDeletion() const {
+ void doExtraRewritesBeforeFinalDeletion() const override {
// Insert stores after in the loop exit blocks. Each exit block gets a
// store of the live-out values that feed them. Since we've already told
// the SSA updater about the defs in the loop and the preheader
@@ -696,19 +737,21 @@ namespace {
for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = LoopExitBlocks[i];
Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
+ LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
+ Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
Instruction *InsertPos = LoopInsertPts[i];
- StoreInst *NewSI = new StoreInst(LiveInValue, SomePtr, InsertPos);
+ StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
NewSI->setAlignment(Alignment);
NewSI->setDebugLoc(DL);
if (TBAATag) NewSI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
}
}
- virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
+ void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
// Update alias analysis.
AST.copyValue(LI, V);
}
- virtual void instructionDeleted(Instruction *I) const {
+ void instructionDeleted(Instruction *I) const override {
AST.deleteValue(I);
}
};
@@ -721,7 +764,8 @@ namespace {
///
void LICM::PromoteAliasSet(AliasSet &AS,
SmallVectorImpl<BasicBlock*> &ExitBlocks,
- SmallVectorImpl<Instruction*> &InsertPts) {
+ SmallVectorImpl<Instruction*> &InsertPts,
+ PredIteratorCache &PIC) {
// We can promote this alias set if it has a store, if it is a "Must" alias
// set, if the pointer is loop invariant, and if we are not eliminating any
// volatile loads or stores.
@@ -754,7 +798,7 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// We start with an alignment of one and try to find instructions that allow
// us to prove better alignment.
unsigned Alignment = 1;
- MDNode *TBAATag = 0;
+ MDNode *TBAATag = nullptr;
// Check that all of the pointers in the alias set have the same type. We
// cannot (yet) promote a memory location that is loaded and stored in
@@ -769,23 +813,22 @@ void LICM::PromoteAliasSet(AliasSet &AS,
if (SomePtr->getType() != ASIV->getType())
return;
- for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end();
- UI != UE; ++UI) {
+ for (User *U : ASIV->users()) {
// Ignore instructions that are outside the loop.
- Instruction *Use = dyn_cast<Instruction>(*UI);
- if (!Use || !CurLoop->contains(Use))
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI || !CurLoop->contains(UI))
continue;
// If there is an non-load/store instruction in the loop, we can't promote
// it.
- if (LoadInst *load = dyn_cast<LoadInst>(Use)) {
+ if (LoadInst *load = dyn_cast<LoadInst>(UI)) {
assert(!load->isVolatile() && "AST broken");
if (!load->isSimple())
return;
- } else if (StoreInst *store = dyn_cast<StoreInst>(Use)) {
+ } else if (StoreInst *store = dyn_cast<StoreInst>(UI)) {
// Stores *of* the pointer are not interesting, only stores *to* the
// pointer.
- if (Use->getOperand(1) != ASIV)
+ if (UI->getOperand(1) != ASIV)
continue;
assert(!store->isVolatile() && "AST broken");
if (!store->isSimple())
@@ -801,13 +844,13 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// Larger is better, with the exception of 0 being the best alignment.
unsigned InstAlignment = store->getAlignment();
if ((InstAlignment > Alignment || InstAlignment == 0) && Alignment != 0)
- if (isGuaranteedToExecute(*Use)) {
+ if (isGuaranteedToExecute(*UI)) {
GuaranteedToExecute = true;
Alignment = InstAlignment;
}
if (!GuaranteedToExecute)
- GuaranteedToExecute = isGuaranteedToExecute(*Use);
+ GuaranteedToExecute = isGuaranteedToExecute(*UI);
} else
return; // Not a load or store.
@@ -815,13 +858,13 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// Merge the TBAA tags.
if (LoopUses.empty()) {
// On the first load/store, just take its TBAA tag.
- TBAATag = Use->getMetadata(LLVMContext::MD_tbaa);
+ TBAATag = UI->getMetadata(LLVMContext::MD_tbaa);
} else if (TBAATag) {
TBAATag = MDNode::getMostGenericTBAA(TBAATag,
- Use->getMetadata(LLVMContext::MD_tbaa));
+ UI->getMetadata(LLVMContext::MD_tbaa));
}
-
- LoopUses.push_back(Use);
+
+ LoopUses.push_back(UI);
}
}
@@ -853,7 +896,7 @@ void LICM::PromoteAliasSet(AliasSet &AS,
SmallVector<PHINode*, 16> NewPHIs;
SSAUpdater SSA(&NewPHIs);
LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
- InsertPts, *CurAST, DL, Alignment, TBAATag);
+ InsertPts, PIC, *CurAST, *LI, DL, Alignment, TBAATag);
// Set up the preheader to have a definition of the value. It is the live-out
// value from the preheader that uses in the loop will use.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp b/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp
new file mode 100644
index 0000000..846aa70
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/LoadCombine.cpp
@@ -0,0 +1,268 @@
+//===- LoadCombine.cpp - Combine Adjacent Loads ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This transformation combines adjacent loads.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Pass.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "load-combine"
+
+STATISTIC(NumLoadsAnalyzed, "Number of loads analyzed for combining");
+STATISTIC(NumLoadsCombined, "Number of loads combined");
+
+namespace {
+struct PointerOffsetPair {
+ Value *Pointer;
+ uint64_t Offset;
+};
+
+struct LoadPOPPair {
+ LoadPOPPair(LoadInst *L, PointerOffsetPair P, unsigned O)
+ : Load(L), POP(P), InsertOrder(O) {}
+ LoadPOPPair() {}
+ LoadInst *Load;
+ PointerOffsetPair POP;
+ /// \brief The new load needs to be created before the first load in IR order.
+ unsigned InsertOrder;
+};
+
+class LoadCombine : public BasicBlockPass {
+ LLVMContext *C;
+ const DataLayout *DL;
+
+public:
+ LoadCombine()
+ : BasicBlockPass(ID),
+ C(nullptr), DL(nullptr) {
+ initializeSROAPass(*PassRegistry::getPassRegistry());
+ }
+ bool doInitialization(Function &) override;
+ bool runOnBasicBlock(BasicBlock &BB) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ const char *getPassName() const override { return "LoadCombine"; }
+ static char ID;
+
+ typedef IRBuilder<true, TargetFolder> BuilderTy;
+
+private:
+ BuilderTy *Builder;
+
+ PointerOffsetPair getPointerOffsetPair(LoadInst &);
+ bool combineLoads(DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> &);
+ bool aggregateLoads(SmallVectorImpl<LoadPOPPair> &);
+ bool combineLoads(SmallVectorImpl<LoadPOPPair> &);
+};
+}
+
+bool LoadCombine::doInitialization(Function &F) {
+ DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
+ C = &F.getContext();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP) {
+ DEBUG(dbgs() << " Skipping LoadCombine -- no target data!\n");
+ return false;
+ }
+ DL = &DLP->getDataLayout();
+ return true;
+}
+
+PointerOffsetPair LoadCombine::getPointerOffsetPair(LoadInst &LI) {
+ PointerOffsetPair POP;
+ POP.Pointer = LI.getPointerOperand();
+ POP.Offset = 0;
+ while (isa<BitCastInst>(POP.Pointer) || isa<GetElementPtrInst>(POP.Pointer)) {
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(POP.Pointer)) {
+ unsigned BitWidth = DL->getPointerTypeSizeInBits(GEP->getType());
+ APInt Offset(BitWidth, 0);
+ if (GEP->accumulateConstantOffset(*DL, Offset))
+ POP.Offset += Offset.getZExtValue();
+ else
+ // Can't handle GEPs with variable indices.
+ return POP;
+ POP.Pointer = GEP->getPointerOperand();
+ } else if (auto *BC = dyn_cast<BitCastInst>(POP.Pointer))
+ POP.Pointer = BC->getOperand(0);
+ }
+ return POP;
+}
+
+bool LoadCombine::combineLoads(
+ DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> &LoadMap) {
+ bool Combined = false;
+ for (auto &Loads : LoadMap) {
+ if (Loads.second.size() < 2)
+ continue;
+ std::sort(Loads.second.begin(), Loads.second.end(),
+ [](const LoadPOPPair &A, const LoadPOPPair &B) {
+ return A.POP.Offset < B.POP.Offset;
+ });
+ if (aggregateLoads(Loads.second))
+ Combined = true;
+ }
+ return Combined;
+}
+
+/// \brief Try to aggregate loads from a sorted list of loads to be combined.
+///
+/// It is guaranteed that no writes occur between any of the loads. All loads
+/// have the same base pointer. There are at least two loads.
+bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
+ assert(Loads.size() >= 2 && "Insufficient loads!");
+ LoadInst *BaseLoad = nullptr;
+ SmallVector<LoadPOPPair, 8> AggregateLoads;
+ bool Combined = false;
+ uint64_t PrevOffset = -1ull;
+ uint64_t PrevSize = 0;
+ for (auto &L : Loads) {
+ if (PrevOffset == -1ull) {
+ BaseLoad = L.Load;
+ PrevOffset = L.POP.Offset;
+ PrevSize = DL->getTypeStoreSize(L.Load->getType());
+ AggregateLoads.push_back(L);
+ continue;
+ }
+ if (L.Load->getAlignment() > BaseLoad->getAlignment())
+ continue;
+ if (L.POP.Offset > PrevOffset + PrevSize) {
+ // No other load will be combinable
+ if (combineLoads(AggregateLoads))
+ Combined = true;
+ AggregateLoads.clear();
+ PrevOffset = -1;
+ continue;
+ }
+ if (L.POP.Offset != PrevOffset + PrevSize)
+ // This load is offset less than the size of the last load.
+ // FIXME: We may want to handle this case.
+ continue;
+ PrevOffset = L.POP.Offset;
+ PrevSize = DL->getTypeStoreSize(L.Load->getType());
+ AggregateLoads.push_back(L);
+ }
+ if (combineLoads(AggregateLoads))
+ Combined = true;
+ return Combined;
+}
+
+/// \brief Given a list of combinable load. Combine the maximum number of them.
+bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
+ // Remove loads from the end while the size is not a power of 2.
+ unsigned TotalSize = 0;
+ for (const auto &L : Loads)
+ TotalSize += L.Load->getType()->getPrimitiveSizeInBits();
+ while (TotalSize != 0 && !isPowerOf2_32(TotalSize))
+ TotalSize -= Loads.pop_back_val().Load->getType()->getPrimitiveSizeInBits();
+ if (Loads.size() < 2)
+ return false;
+
+ DEBUG({
+ dbgs() << "***** Combining Loads ******\n";
+ for (const auto &L : Loads) {
+ dbgs() << L.POP.Offset << ": " << *L.Load << "\n";
+ }
+ });
+
+ // Find first load. This is where we put the new load.
+ LoadPOPPair FirstLP;
+ FirstLP.InsertOrder = -1u;
+ for (const auto &L : Loads)
+ if (L.InsertOrder < FirstLP.InsertOrder)
+ FirstLP = L;
+
+ unsigned AddressSpace =
+ FirstLP.POP.Pointer->getType()->getPointerAddressSpace();
+
+ Builder->SetInsertPoint(FirstLP.Load);
+ Value *Ptr = Builder->CreateConstGEP1_64(
+ Builder->CreatePointerCast(Loads[0].POP.Pointer,
+ Builder->getInt8PtrTy(AddressSpace)),
+ Loads[0].POP.Offset);
+ LoadInst *NewLoad = new LoadInst(
+ Builder->CreatePointerCast(
+ Ptr, PointerType::get(IntegerType::get(Ptr->getContext(), TotalSize),
+ Ptr->getType()->getPointerAddressSpace())),
+ Twine(Loads[0].Load->getName()) + ".combined", false,
+ Loads[0].Load->getAlignment(), FirstLP.Load);
+
+ for (const auto &L : Loads) {
+ Builder->SetInsertPoint(L.Load);
+ Value *V = Builder->CreateExtractInteger(
+ *DL, NewLoad, cast<IntegerType>(L.Load->getType()),
+ L.POP.Offset - Loads[0].POP.Offset, "combine.extract");
+ L.Load->replaceAllUsesWith(V);
+ }
+
+ NumLoadsCombined = NumLoadsCombined + Loads.size();
+ return true;
+}
+
+bool LoadCombine::runOnBasicBlock(BasicBlock &BB) {
+ if (skipOptnoneFunction(BB) || !DL)
+ return false;
+
+ IRBuilder<true, TargetFolder>
+ TheBuilder(BB.getContext(), TargetFolder(DL));
+ Builder = &TheBuilder;
+
+ DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> LoadMap;
+
+ bool Combined = false;
+ unsigned Index = 0;
+ for (auto &I : BB) {
+ if (I.mayWriteToMemory() || I.mayThrow()) {
+ if (combineLoads(LoadMap))
+ Combined = true;
+ LoadMap.clear();
+ continue;
+ }
+ LoadInst *LI = dyn_cast<LoadInst>(&I);
+ if (!LI)
+ continue;
+ ++NumLoadsAnalyzed;
+ if (!LI->isSimple() || !LI->getType()->isIntegerTy())
+ continue;
+ auto POP = getPointerOffsetPair(*LI);
+ if (!POP.Pointer)
+ continue;
+ LoadMap[POP.Pointer].push_back(LoadPOPPair(LI, POP, Index++));
+ }
+ if (combineLoads(LoadMap))
+ Combined = true;
+ return Combined;
+}
+
+void LoadCombine::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+}
+
+char LoadCombine::ID = 0;
+
+BasicBlockPass *llvm::createLoadCombinePass() {
+ return new LoadCombine();
+}
+
+INITIALIZE_PASS(LoadCombine, "load-combine", "Combine Adjacent Loads", false,
+ false)
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 9e39d2e..5ab686a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -14,15 +14,16 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-delete"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/Dominators.h"
using namespace llvm;
+#define DEBUG_TYPE "loop-delete"
+
STATISTIC(NumDeleted, "Number of loops deleted");
namespace {
@@ -34,17 +35,17 @@ namespace {
}
// Possibly eliminate loop L if it is dead.
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfo>();
AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreserved<ScalarEvolution>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<LoopInfo>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
@@ -61,7 +62,7 @@ namespace {
char LoopDeletion::ID = 0;
INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
"Delete dead loops", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
@@ -130,6 +131,9 @@ bool LoopDeletion::isLoopDead(Loop *L,
/// NOTE: This entire process relies pretty heavily on LoopSimplify and LCSSA
/// in order to make various safety checks work.
bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
// We can only remove the loop if there is a preheader that we can
// branch from after removing it.
BasicBlock *preheader = L->getLoopPreheader();
@@ -202,7 +206,7 @@ bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) {
// Update the dominator tree and remove the instructions and blocks that will
// be deleted from the reference counting scheme.
- DominatorTree &DT = getAnalysis<DominatorTree>();
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SmallVector<DomTreeNode*, 8> ChildNodes;
for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end();
LI != LE; ++LI) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 952b76b..a12f5a7 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -41,7 +41,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-idiom"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -51,6 +50,7 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
@@ -60,6 +60,8 @@
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "loop-idiom"
+
STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
@@ -78,9 +80,6 @@ namespace {
return dyn_cast<BranchInst>(BB->getTerminator());
}
- /// Return the condition of the branch terminating the given basic block.
- static Value *getBrCondtion(BasicBlock *);
-
/// Derive the precondition block (i.e the block that guards the loop
/// preheader) from the given preheader.
static BasicBlock *getPrecondBb(BasicBlock *PreHead);
@@ -108,22 +107,22 @@ namespace {
bool preliminaryScreen();
/// Check if the given conditional branch is based on the comparison
- /// beween a variable and zero, and if the variable is non-zero, the
- /// control yeilds to the loop entry. If the branch matches the behavior,
+ /// between a variable and zero, and if the variable is non-zero, the
+ /// control yields to the loop entry. If the branch matches the behavior,
/// the variable involved in the comparion is returned. This function will
/// be called to see if the precondition and postcondition of the loop
/// are in desirable form.
- Value *matchCondition (BranchInst *Br, BasicBlock *NonZeroTarget) const;
+ Value *matchCondition(BranchInst *Br, BasicBlock *NonZeroTarget) const;
/// Return true iff the idiom is detected in the loop. and 1) \p CntInst
- /// is set to the instruction counting the pupulation bit. 2) \p CntPhi
+ /// is set to the instruction counting the population bit. 2) \p CntPhi
/// is set to the corresponding phi node. 3) \p Var is set to the value
/// whose population bits are being counted.
bool detectIdiom
(Instruction *&CntInst, PHINode *&CntPhi, Value *&Var) const;
/// Insert ctpop intrinsic function and some obviously dead instructions.
- void transform (Instruction *CntInst, PHINode *CntPhi, Value *Var);
+ void transform(Instruction *CntInst, PHINode *CntPhi, Value *Var);
/// Create llvm.ctpop.* intrinsic function.
CallInst *createPopcntIntrinsic(IRBuilderTy &IRB, Value *Val, DebugLoc DL);
@@ -131,7 +130,7 @@ namespace {
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const DataLayout *TD;
+ const DataLayout *DL;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
@@ -140,10 +139,10 @@ namespace {
static char ID;
explicit LoopIdiomRecognize() : LoopPass(ID) {
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
- TD = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;
+ DL = nullptr; DT = nullptr; SE = nullptr; TLI = nullptr; TTI = nullptr;
}
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
SmallVectorImpl<BasicBlock*> &ExitBlocks);
@@ -163,7 +162,7 @@ namespace {
/// This transformation requires natural loop information & requires that
/// loop preheaders be inserted into the CFG.
///
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
@@ -174,18 +173,23 @@ namespace {
AU.addPreserved<AliasAnalysis>();
AU.addRequired<ScalarEvolution>();
AU.addPreserved<ScalarEvolution>();
- AU.addPreserved<DominatorTree>();
- AU.addRequired<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfo>();
AU.addRequired<TargetTransformInfo>();
}
const DataLayout *getDataLayout() {
- return TD ? TD : TD=getAnalysisIfAvailable<DataLayout>();
+ if (DL)
+ return DL;
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ return DL;
}
DominatorTree *getDominatorTree() {
- return DT ? DT : (DT=&getAnalysis<DominatorTree>());
+ return DT ? DT
+ : (DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree());
}
ScalarEvolution *getScalarEvolution() {
@@ -212,7 +216,7 @@ char LoopIdiomRecognize::ID = 0;
INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
@@ -244,7 +248,7 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE,
for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, 0);
+ DeadInst->setOperand(op, nullptr);
// If this operand just became dead, add it to the NowDeadInsts list.
if (!Op->use_empty()) continue;
@@ -286,17 +290,12 @@ bool LIRUtil::isAlmostEmpty(BasicBlock *BB) {
return false;
}
-Value *LIRUtil::getBrCondtion(BasicBlock *BB) {
- BranchInst *Br = getBranch(BB);
- return Br ? Br->getCondition() : 0;
-}
-
BasicBlock *LIRUtil::getPrecondBb(BasicBlock *PreHead) {
if (BasicBlock *BB = PreHead->getSinglePredecessor()) {
BranchInst *Br = getBranch(BB);
- return Br && Br->isConditional() ? BB : 0;
+ return Br && Br->isConditional() ? BB : nullptr;
}
- return 0;
+ return nullptr;
}
//===----------------------------------------------------------------------===//
@@ -306,7 +305,7 @@ BasicBlock *LIRUtil::getPrecondBb(BasicBlock *PreHead) {
//===----------------------------------------------------------------------===//
NclPopcountRecognize::NclPopcountRecognize(LoopIdiomRecognize &TheLIR):
- LIR(TheLIR), CurLoop(TheLIR.getLoop()), PreCondBB(0) {
+ LIR(TheLIR), CurLoop(TheLIR.getLoop()), PreCondBB(nullptr) {
}
bool NclPopcountRecognize::preliminaryScreen() {
@@ -343,25 +342,25 @@ bool NclPopcountRecognize::preliminaryScreen() {
return true;
}
-Value *NclPopcountRecognize::matchCondition (BranchInst *Br,
- BasicBlock *LoopEntry) const {
+Value *NclPopcountRecognize::matchCondition(BranchInst *Br,
+ BasicBlock *LoopEntry) const {
if (!Br || !Br->isConditional())
- return 0;
+ return nullptr;
ICmpInst *Cond = dyn_cast<ICmpInst>(Br->getCondition());
if (!Cond)
- return 0;
+ return nullptr;
ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
if (!CmpZero || !CmpZero->isZero())
- return 0;
+ return nullptr;
ICmpInst::Predicate Pred = Cond->getPredicate();
if ((Pred == ICmpInst::ICMP_NE && Br->getSuccessor(0) == LoopEntry) ||
(Pred == ICmpInst::ICMP_EQ && Br->getSuccessor(1) == LoopEntry))
return Cond->getOperand(0);
- return 0;
+ return nullptr;
}
bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
@@ -392,9 +391,9 @@ bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
Value *VarX1, *VarX0;
PHINode *PhiX, *CountPhi;
- DefX2 = CountInst = 0;
- VarX1 = VarX0 = 0;
- PhiX = CountPhi = 0;
+ DefX2 = CountInst = nullptr;
+ VarX1 = VarX0 = nullptr;
+ PhiX = CountPhi = nullptr;
LoopEntry = *(CurLoop->block_begin());
// step 1: Check if the loop-back branch is in desirable form.
@@ -441,7 +440,7 @@ bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
// step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
{
- CountInst = NULL;
+ CountInst = nullptr;
for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI(),
IterE = LoopEntry->end(); Iter != IterE; Iter++) {
Instruction *Inst = Iter;
@@ -458,9 +457,8 @@ bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
// Check if the result of the instruction is live of the loop.
bool LiveOutLoop = false;
- for (Value::use_iterator I = Inst->use_begin(), E = Inst->use_end();
- I != E; I++) {
- if ((cast<Instruction>(*I))->getParent() != LoopEntry) {
+ for (User *U : Inst->users()) {
+ if ((cast<Instruction>(U))->getParent() != LoopEntry) {
LiveOutLoop = true; break;
}
}
@@ -519,7 +517,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
// TripCnt is exactly the number of iterations the loop has
TripCnt = NewCount;
- // If the popoulation counter's initial value is not zero, insert Add Inst.
+ // If the population counter's initial value is not zero, insert Add Inst.
Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
if (!InitConst || !InitConst->isZero()) {
@@ -596,11 +594,9 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
// __builtin_ctpop().
{
SmallVector<Value *, 4> CntUses;
- for (Value::use_iterator I = CntInst->use_begin(), E = CntInst->use_end();
- I != E; I++) {
- if (cast<Instruction>(*I)->getParent() != Body)
- CntUses.push_back(*I);
- }
+ for (User *U : CntInst->users())
+ if (cast<Instruction>(U)->getParent() != Body)
+ CntUses.push_back(U);
for (unsigned Idx = 0; Idx < CntUses.size(); Idx++) {
(cast<Instruction>(CntUses[Idx]))->replaceUsesOfWith(CntInst, NewCount);
}
@@ -705,6 +701,9 @@ bool LoopIdiomRecognize::runOnNoncountableLoop() {
}
bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
CurLoop = L;
// If the loop could not be converted to canonical form, it must have an
@@ -746,7 +745,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
// If processing the store invalidated our iterator, start over from the
// top of the block.
- if (InstPtr == 0)
+ if (!InstPtr)
I = BB->begin();
continue;
}
@@ -759,7 +758,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
// If processing the memset invalidated our iterator, start over from the
// top of the block.
- if (InstPtr == 0)
+ if (!InstPtr)
I = BB->begin();
continue;
}
@@ -777,7 +776,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
- uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());
+ uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return false;
@@ -786,7 +785,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
// random store we can't handle.
const SCEVAddRecExpr *StoreEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
- if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
+ if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
return false;
// Check to see if the stride matches the size of the store. If so, then we
@@ -794,7 +793,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
unsigned StoreSize = (unsigned)SizeInBits >> 3;
const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
- if (Stride == 0 || StoreSize != Stride->getValue()->getValue()) {
+ if (!Stride || StoreSize != Stride->getValue()->getValue()) {
// TODO: Could also handle negative stride here someday, that will require
// the validity check in mayLoopAccessLocation to be updated though.
// Enable this to print exact negative strides.
@@ -843,7 +842,7 @@ processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
// loop, which indicates a strided store. If we have something else, it's a
// random store we can't handle.
const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
- if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine())
+ if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
return false;
// Reject memsets that are so large that they overflow an unsigned.
@@ -857,7 +856,7 @@ processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
// TODO: Could also handle negative stride here someday, that will require the
// validity check in mayLoopAccessLocation to be updated though.
- if (Stride == 0 || MSI->getLength() != Stride->getValue())
+ if (!Stride || MSI->getLength() != Stride->getValue())
return false;
return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
@@ -905,28 +904,28 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
///
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
/// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
// If the value isn't a constant, we can't promote it to being in a constant
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
Constant *C = dyn_cast<Constant>(V);
- if (C == 0) return 0;
+ if (!C) return nullptr;
// Only handle simple values that are a power of two bytes in size.
- uint64_t Size = TD.getTypeSizeInBits(V->getType());
+ uint64_t Size = DL.getTypeSizeInBits(V->getType());
if (Size == 0 || (Size & 7) || (Size & (Size-1)))
- return 0;
+ return nullptr;
// Don't care enough about darwin/ppc to implement this.
- if (TD.isBigEndian())
- return 0;
+ if (DL.isBigEndian())
+ return nullptr;
// Convert to size in bytes.
Size /= 8;
// TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
// if the top and bottom are the same (e.g. for vectors and large integers).
- if (Size > 16) return 0;
+ if (Size > 16) return nullptr;
// If the constant is exactly 16 bytes, just use it.
if (Size == 16) return C;
@@ -951,7 +950,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// are stored. A store of i32 0x01020304 can never be turned into a memset,
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
- Constant *PatternValue = 0;
+ Constant *PatternValue = nullptr;
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
@@ -962,13 +961,13 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// promote the memset.
CurLoop->isLoopInvariant(SplatValue)) {
// Keep and use SplatValue.
- PatternValue = 0;
+ PatternValue = nullptr;
} else if (DestAS == 0 &&
TLI->has(LibFunc::memset_pattern16) &&
- (PatternValue = getMemSetPatternValue(StoredVal, *TD))) {
+ (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
// Don't create memset_pattern16s with address spaces.
// It looks like we can use PatternValue!
- SplatValue = 0;
+ SplatValue = nullptr;
} else {
// Otherwise, this isn't an idiom we can transform. For example, we can't
// do anything with a 3-byte store.
@@ -1006,7 +1005,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = Builder.getIntPtrTy(TD, DestAS);
+ Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
@@ -1035,7 +1034,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
Int8PtrTy,
Int8PtrTy,
IntPtr,
- (void*)0);
+ (void*)nullptr);
// Otherwise we should form a memset_pattern16. PatternValue is known to be
// an constant array of 16-bytes. Plop the value into a mergable global.
@@ -1120,7 +1119,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtrTy = Builder.getIntPtrTy(TD, SI->getPointerAddressSpace());
+ Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1),
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index a23860a..ab1a939 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -11,21 +11,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-instsimplify"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "loop-instsimplify"
+
STATISTIC(NumSimplified, "Number of redundant instructions simplified");
namespace {
@@ -36,9 +37,9 @@ namespace {
initializeLoopInstSimplifyPass(*PassRegistry::getPassRegistry());
}
- bool runOnLoop(Loop*, LPPassManager&);
+ bool runOnLoop(Loop*, LPPassManager&) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
@@ -54,7 +55,7 @@ char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
"Simplify instructions in loops", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopInstSimplify, "loop-instsimplify",
@@ -65,9 +66,15 @@ Pass *llvm::createLoopInstSimplifyPass() {
}
bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
- DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
+ if (skipOptnoneFunction(L))
+ return false;
+
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = &getAnalysis<LoopInfo>();
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallVector<BasicBlock*, 8> ExitBlocks;
@@ -109,19 +116,26 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Don't bother simplifying unused instructions.
if (!I->use_empty()) {
- Value *V = SimplifyInstruction(I, TD, TLI, DT);
+ Value *V = SimplifyInstruction(I, DL, TLI, DT);
if (V && LI->replacementPreservesLCSSAForm(I, V)) {
// Mark all uses for resimplification next time round the loop.
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI)
- Next->insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ Next->insert(cast<Instruction>(U));
I->replaceAllUsesWith(V);
LocalChanged = true;
++NumSimplified;
}
}
- LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
+ bool res = RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
+ if (res) {
+ // RecursivelyDeleteTriviallyDeadInstruction can remove
+ // more than one instruction, so simply incrementing the
+ // iterator does not work. When instructions get deleted
+ // re-iterate instead.
+ BI = BB->begin(); BE = BB->end();
+ LocalChanged |= res;
+ }
if (IsSubloopHeader && !isa<PHINode>(I))
break;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
index 643bc78..b6fbb16 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -11,11 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-reroll"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/LoopPass.h"
@@ -24,6 +23,7 @@
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -35,6 +35,8 @@
using namespace llvm;
+#define DEBUG_TYPE "loop-reroll"
+
STATISTIC(NumRerolledLoops, "Number of rerolled loops");
static cl::opt<unsigned>
@@ -124,14 +126,14 @@ namespace {
initializeLoopRerollPass(*PassRegistry::getPassRegistry());
}
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.addRequired<TargetLibraryInfo>();
}
@@ -140,7 +142,7 @@ protected:
AliasAnalysis *AA;
LoopInfo *LI;
ScalarEvolution *SE;
- DataLayout *DL;
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
DominatorTree *DT;
@@ -189,12 +191,12 @@ protected:
iterator begin() {
assert(Valid && "Using invalid reduction");
- return llvm::next(Instructions.begin());
+ return std::next(Instructions.begin());
}
const_iterator begin() const {
assert(Valid && "Using invalid reduction");
- return llvm::next(Instructions.begin());
+ return std::next(Instructions.begin());
}
iterator end() { return Instructions.end(); }
@@ -340,7 +342,7 @@ char LoopReroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(LoopReroll, "loop-reroll", "Reroll loops", false, false)
@@ -353,12 +355,9 @@ Pass *llvm::createLoopRerollPass() {
// This operates like Instruction::isUsedOutsideOfBlock, but considers PHIs in
// non-loop blocks to be outside the loop.
static bool hasUsesOutsideLoop(Instruction *I, Loop *L) {
- for (Value::use_iterator UI = I->use_begin(),
- UIE = I->use_end(); UI != UIE; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (!L->contains(User))
+ for (User *U : I->users())
+ if (!L->contains(cast<Instruction>(U)))
return true;
- }
return false;
}
@@ -408,7 +407,7 @@ void LoopReroll::SimpleLoopReduction::add(Loop *L) {
Instruction *C = Instructions.front();
do {
- C = cast<Instruction>(*C->use_begin());
+ C = cast<Instruction>(*C->user_begin());
if (C->hasOneUse()) {
if (!C->isBinaryOp())
return;
@@ -423,17 +422,15 @@ void LoopReroll::SimpleLoopReduction::add(Loop *L) {
if (Instructions.size() < 2 ||
!C->isSameOperationAs(Instructions.back()) ||
- C->use_begin() == C->use_end())
+ C->use_empty())
return;
// C is now the (potential) last instruction in the reduction chain.
- for (Value::use_iterator UI = C->use_begin(), UIE = C->use_end();
- UI != UIE; ++UI) {
+ for (User *U : C->users())
// The only in-loop user can be the initial PHI.
- if (L->contains(cast<Instruction>(*UI)))
- if (cast<Instruction>(*UI ) != Instructions.front())
+ if (L->contains(cast<Instruction>(U)))
+ if (cast<Instruction>(U) != Instructions.front())
return;
- }
Instructions.push_back(C);
Valid = true;
@@ -483,12 +480,11 @@ void LoopReroll::collectInLoopUserSet(Loop *L,
continue;
if (!Final.count(I))
- for (Value::use_iterator UI = I->use_begin(),
- UIE = I->use_end(); UI != UIE; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (Use &U : I->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
if (PHINode *PN = dyn_cast<PHINode>(User)) {
// Ignore "wrap-around" uses to PHIs of this loop's header.
- if (PN->getIncomingBlock(UI) == L->getHeader())
+ if (PN->getIncomingBlock(U) == L->getHeader())
continue;
}
@@ -559,8 +555,8 @@ bool LoopReroll::findScaleFromMul(Instruction *RealIV, uint64_t &Scale,
if (RealIV->getNumUses() != 2)
return false;
const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(RealIV));
- Instruction *User1 = cast<Instruction>(*RealIV->use_begin()),
- *User2 = cast<Instruction>(*llvm::next(RealIV->use_begin()));
+ Instruction *User1 = cast<Instruction>(*RealIV->user_begin()),
+ *User2 = cast<Instruction>(*std::next(RealIV->user_begin()));
if (!SE->isSCEVable(User1->getType()) || !SE->isSCEVable(User2->getType()))
return false;
const SCEVAddRecExpr *User1SCEV =
@@ -616,26 +612,25 @@ bool LoopReroll::collectAllRoots(Loop *L, uint64_t Inc, uint64_t Scale,
SmallVector<SmallInstructionVector, 32> &Roots,
SmallInstructionSet &AllRoots,
SmallInstructionVector &LoopIncs) {
- for (Value::use_iterator UI = IV->use_begin(),
- UIE = IV->use_end(); UI != UIE; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (!SE->isSCEVable(User->getType()))
+ for (User *U : IV->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (!SE->isSCEVable(UI->getType()))
continue;
- if (User->getType() != IV->getType())
+ if (UI->getType() != IV->getType())
continue;
- if (!L->contains(User))
+ if (!L->contains(UI))
continue;
- if (hasUsesOutsideLoop(User, L))
+ if (hasUsesOutsideLoop(UI, L))
continue;
if (const SCEVConstant *Diff = dyn_cast<SCEVConstant>(SE->getMinusSCEV(
- SE->getSCEV(User), SE->getSCEV(IV)))) {
+ SE->getSCEV(UI), SE->getSCEV(IV)))) {
uint64_t Idx = Diff->getValue()->getValue().getZExtValue();
if (Idx > 0 && Idx < Scale) {
- Roots[Idx-1].push_back(User);
- AllRoots.insert(User);
+ Roots[Idx-1].push_back(UI);
+ AllRoots.insert(UI);
} else if (Idx == Scale && Inc > 1) {
- LoopIncs.push_back(User);
+ LoopIncs.push_back(UI);
}
}
}
@@ -719,10 +714,8 @@ void LoopReroll::ReductionTracker::replaceSelected() {
// Replace users with the new end-of-chain value.
SmallInstructionVector Users;
- for (Value::use_iterator UI =
- PossibleReds[i].getReducedValue()->use_begin(),
- UIE = PossibleReds[i].getReducedValue()->use_end(); UI != UIE; ++UI)
- Users.push_back(cast<Instruction>(*UI));
+ for (User *U : PossibleReds[i].getReducedValue()->users())
+ Users.push_back(cast<Instruction>(U));
for (SmallInstructionVector::iterator J = Users.begin(),
JE = Users.end(); J != JE; ++J)
@@ -931,8 +924,10 @@ bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
// them, and this matching fails. As an exception, we allow the alias
// set tracker to handle regular (simple) load/store dependencies.
if (FutureSideEffects &&
- ((!isSimpleLoadStore(J1) && !isSafeToSpeculativelyExecute(J1)) ||
- (!isSimpleLoadStore(J2) && !isSafeToSpeculativelyExecute(J2)))) {
+ ((!isSimpleLoadStore(J1) &&
+ !isSafeToSpeculativelyExecute(J1, DL)) ||
+ (!isSimpleLoadStore(J2) &&
+ !isSafeToSpeculativelyExecute(J2, DL)))) {
DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
" vs. " << *J2 <<
" (side effects prevent reordering)\n");
@@ -953,7 +948,7 @@ bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
bool InReduction = Reductions.isPairInSame(J1, J2);
if (!(InReduction && J1->isAssociative())) {
- bool Swapped = false, SomeOpMatched = false;;
+ bool Swapped = false, SomeOpMatched = false;
for (unsigned j = 0; j < J1->getNumOperands() && !MatchFailed; ++j) {
Value *Op2 = J2->getOperand(j);
@@ -1133,12 +1128,16 @@ bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
}
bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
AA = &getAnalysis<AliasAnalysis>();
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
TLI = &getAnalysis<TargetLibraryInfo>();
- DL = getAnalysisIfAvailable<DataLayout>();
- DT = &getAnalysis<DominatorTree>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
BasicBlock *Header = L->getHeader();
DEBUG(dbgs() << "LRR: F[" << Header->getParent()->getName() <<
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 14c5655..2ce5831 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-rotate"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CodeMetrics.h"
@@ -20,9 +19,11 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -30,7 +31,11 @@
#include "llvm/Transforms/Utils/ValueMapper.h"
using namespace llvm;
-#define MAX_HEADER_SIZE 16
+#define DEBUG_TYPE "loop-rotate"
+
+static cl::opt<unsigned>
+DefaultRotationThreshold("rotation-max-header-size", cl::init(16), cl::Hidden,
+ cl::desc("The default maximum header size for automatic loop rotation"));
STATISTIC(NumRotated, "Number of loops rotated");
namespace {
@@ -38,13 +43,17 @@ namespace {
class LoopRotate : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopRotate() : LoopPass(ID) {
+ LoopRotate(int SpecifiedMaxHeaderSize = -1) : LoopPass(ID) {
initializeLoopRotatePass(*PassRegistry::getPassRegistry());
+ if (SpecifiedMaxHeaderSize == -1)
+ MaxHeaderSize = DefaultRotationThreshold;
+ else
+ MaxHeaderSize = unsigned(SpecifiedMaxHeaderSize);
}
// LCSSA form makes instruction renaming easier.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
@@ -55,11 +64,12 @@ namespace {
AU.addRequired<TargetTransformInfo>();
}
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
bool simplifyLoopLatch(Loop *L);
bool rotateLoop(Loop *L, bool SimplifiedLatch);
private:
+ unsigned MaxHeaderSize;
LoopInfo *LI;
const TargetTransformInfo *TTI;
};
@@ -73,11 +83,19 @@ INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
-Pass *llvm::createLoopRotatePass() { return new LoopRotate(); }
+Pass *llvm::createLoopRotatePass(int MaxHeaderSize) {
+ return new LoopRotate(MaxHeaderSize);
+}
/// Rotate Loop L as many times as possible. Return true if
/// the loop is rotated at least once.
bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
+ // Save the loop metadata.
+ MDNode *LoopMD = L->getLoopID();
+
LI = &getAnalysis<LoopInfo>();
TTI = &getAnalysis<TargetTransformInfo>();
@@ -92,6 +110,12 @@ bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) {
MadeChange = true;
SimplifiedLatch = false;
}
+
+ // Restore the loop metadata.
+ // NB! We presume LoopRotation DOESN'T ADD its own metadata.
+ if ((MadeChange || SimplifiedLatch) && LoopMD)
+ L->setLoopID(LoopMD);
+
return MadeChange;
}
@@ -130,7 +154,7 @@ static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
UE = OrigHeaderVal->use_end(); UI != UE; ) {
// Grab the use before incrementing the iterator.
- Use &U = UI.getUse();
+ Use &U = *UI;
// Increment the iterator before removing the use from the list.
++UI;
@@ -251,8 +275,9 @@ bool LoopRotate::simplifyLoopLatch(Loop *L) {
// Nuke the Latch block.
assert(Latch->empty() && "unable to evacuate Latch");
LI->removeBlock(Latch);
- if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>())
- DT->eraseNode(Latch);
+ if (DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>())
+ DTWP->getDomTree().eraseNode(Latch);
Latch->eraseFromParent();
return true;
}
@@ -276,7 +301,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
BasicBlock *OrigLatch = L->getLoopLatch();
BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
- if (BI == 0 || BI->isUnconditional())
+ if (!BI || BI->isUnconditional())
return false;
// If the loop header is not one of the loop exiting blocks then
@@ -287,7 +312,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// If the loop latch already contains a branch that leaves the loop then the
// loop is already rotated.
- if (OrigLatch == 0)
+ if (!OrigLatch)
return false;
// Rotate if either the loop latch does *not* exit the loop, or if the loop
@@ -301,11 +326,11 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
CodeMetrics Metrics;
Metrics.analyzeBasicBlock(OrigHeader, *TTI);
if (Metrics.notDuplicatable) {
- DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non duplicatable"
+ DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
<< " instructions: "; L->dump());
return false;
}
- if (Metrics.NumInsts > MAX_HEADER_SIZE)
+ if (Metrics.NumInsts > MaxHeaderSize)
return false;
}
@@ -314,7 +339,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// If the loop could not be converted to canonical form, it must have an
// indirectbr in it, just give up.
- if (OrigPreheader == 0)
+ if (!OrigPreheader)
return false;
// Anything ScalarEvolution may know about this loop or the PHI nodes
@@ -433,23 +458,25 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// The conditional branch can't be folded, handle the general case.
// Update DominatorTree to reflect the CFG change we just made. Then split
// edges as necessary to preserve LoopSimplify form.
- if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
+ if (DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
+ DominatorTree &DT = DTWP->getDomTree();
// Everything that was dominated by the old loop header is now dominated
// by the original loop preheader. Conceptually the header was merged
// into the preheader, even though we reuse the actual block as a new
// loop latch.
- DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
+ DomTreeNode *OrigHeaderNode = DT.getNode(OrigHeader);
SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
OrigHeaderNode->end());
- DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader);
+ DomTreeNode *OrigPreheaderNode = DT.getNode(OrigPreheader);
for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I)
- DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode);
+ DT.changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode);
- assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode);
- assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode);
+ assert(DT.getNode(Exit)->getIDom() == OrigPreheaderNode);
+ assert(DT.getNode(NewHeader)->getIDom() == OrigPreheaderNode);
// Update OrigHeader to be dominated by the new header block.
- DT->changeImmediateDominator(OrigHeader, OrigLatch);
+ DT.changeImmediateDominator(OrigHeader, OrigLatch);
}
// Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
@@ -459,9 +486,24 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
NewPH->setName(NewHeader->getName() + ".lr.ph");
// Preserve canonical loop form, which means that 'Exit' should have only
- // one predecessor.
- BasicBlock *ExitSplit = SplitCriticalEdge(L->getLoopLatch(), Exit, this);
- ExitSplit->moveBefore(Exit);
+ // one predecessor. Note that Exit could be an exit block for multiple
+ // nested loops, causing both of the edges to now be critical and need to
+ // be split.
+ SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit));
+ bool SplitLatchEdge = false;
+ for (SmallVectorImpl<BasicBlock *>::iterator PI = ExitPreds.begin(),
+ PE = ExitPreds.end();
+ PI != PE; ++PI) {
+ // We only need to split loop exit edges.
+ Loop *PredLoop = LI->getLoopFor(*PI);
+ if (!PredLoop || PredLoop->contains(Exit))
+ continue;
+ SplitLatchEdge |= L->getLoopLatch() == *PI;
+ BasicBlock *ExitSplit = SplitCriticalEdge(*PI, Exit, this);
+ ExitSplit->moveBefore(Exit);
+ }
+ assert(SplitLatchEdge &&
+ "Despite splitting all preds, failed to split latch exit?");
} else {
// We can fold the conditional branch in the preheader, this makes things
// simpler. The first step is to remove the extra edge to the Exit block.
@@ -471,15 +513,17 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
PHBI->eraseFromParent();
// With our CFG finalized, update DomTree if it is available.
- if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
+ if (DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
+ DominatorTree &DT = DTWP->getDomTree();
// Update OrigHeader to be dominated by the new header block.
- DT->changeImmediateDominator(NewHeader, OrigPreheader);
- DT->changeImmediateDominator(OrigHeader, OrigLatch);
+ DT.changeImmediateDominator(NewHeader, OrigPreheader);
+ DT.changeImmediateDominator(OrigHeader, OrigLatch);
// Brute force incremental dominator tree update. Call
// findNearestCommonDominator on all CFG predecessors of each child of the
// original header.
- DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
+ DomTreeNode *OrigHeaderNode = DT.getNode(OrigHeader);
SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
OrigHeaderNode->end());
bool Changed;
@@ -492,11 +536,11 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
pred_iterator PI = pred_begin(BB);
BasicBlock *NearestDom = *PI;
for (pred_iterator PE = pred_end(BB); PI != PE; ++PI)
- NearestDom = DT->findNearestCommonDominator(NearestDom, *PI);
+ NearestDom = DT.findNearestCommonDominator(NearestDom, *PI);
// Remember if this changes the DomTree.
if (Node->getIDom()->getBlock() != NearestDom) {
- DT->changeImmediateDominator(BB, NearestDom);
+ DT.changeImmediateDominator(BB, NearestDom);
Changed = true;
}
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 6133962..914b56a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -53,31 +53,32 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-reduce"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/IVUsers.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "loop-reduce"
+
/// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for
/// bail out. This threshold is far beyond the number of users that LSR can
/// conceivably solve, so it should not affect generated code, but catches the
@@ -237,7 +238,15 @@ struct Formula {
int64_t Scale;
/// BaseRegs - The list of "base" registers for this use. When this is
- /// non-empty,
+ /// non-empty. The canonical representation of a formula is
+ /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and
+ /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty().
+ /// #1 enforces that the scaled register is always used when at least two
+ /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2.
+ /// #2 enforces that 1 * reg is reg.
+ /// This invariant can be temporarly broken while building a formula.
+ /// However, every formula inserted into the LSRInstance must be in canonical
+ /// form.
SmallVector<const SCEV *, 4> BaseRegs;
/// ScaledReg - The 'scaled' register for this use. This should be non-null
@@ -250,12 +259,18 @@ struct Formula {
int64_t UnfoldedOffset;
Formula()
- : BaseGV(0), BaseOffset(0), HasBaseReg(false), Scale(0), ScaledReg(0),
- UnfoldedOffset(0) {}
+ : BaseGV(nullptr), BaseOffset(0), HasBaseReg(false), Scale(0),
+ ScaledReg(nullptr), UnfoldedOffset(0) {}
void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
- unsigned getNumRegs() const;
+ bool isCanonical() const;
+
+ void Canonicalize();
+
+ bool Unscale();
+
+ size_t getNumRegs() const;
Type *getType() const;
void DeleteBaseReg(const SCEV *&S);
@@ -345,12 +360,58 @@ void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
BaseRegs.push_back(Sum);
HasBaseReg = true;
}
+ Canonicalize();
+}
+
+/// \brief Check whether or not this formula statisfies the canonical
+/// representation.
+/// \see Formula::BaseRegs.
+bool Formula::isCanonical() const {
+ if (ScaledReg)
+ return Scale != 1 || !BaseRegs.empty();
+ return BaseRegs.size() <= 1;
+}
+
+/// \brief Helper method to morph a formula into its canonical representation.
+/// \see Formula::BaseRegs.
+/// Every formula having more than one base register, must use the ScaledReg
+/// field. Otherwise, we would have to do special cases everywhere in LSR
+/// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ...
+/// On the other hand, 1*reg should be canonicalized into reg.
+void Formula::Canonicalize() {
+ if (isCanonical())
+ return;
+ // So far we did not need this case. This is easy to implement but it is
+ // useless to maintain dead code. Beside it could hurt compile time.
+ assert(!BaseRegs.empty() && "1*reg => reg, should not be needed.");
+ // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg.
+ ScaledReg = BaseRegs.back();
+ BaseRegs.pop_back();
+ Scale = 1;
+ size_t BaseRegsSize = BaseRegs.size();
+ size_t Try = 0;
+ // If ScaledReg is an invariant, try to find a variant expression.
+ while (Try < BaseRegsSize && !isa<SCEVAddRecExpr>(ScaledReg))
+ std::swap(ScaledReg, BaseRegs[Try++]);
+}
+
+/// \brief Get rid of the scale in the formula.
+/// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
+/// \return true if it was possible to get rid of the scale, false otherwise.
+/// \note After this operation the formula may not be in the canonical form.
+bool Formula::Unscale() {
+ if (Scale != 1)
+ return false;
+ Scale = 0;
+ BaseRegs.push_back(ScaledReg);
+ ScaledReg = nullptr;
+ return true;
}
/// getNumRegs - Return the total number of register operands used by this
/// formula. This does not include register uses implied by non-constant
/// addrec strides.
-unsigned Formula::getNumRegs() const {
+size_t Formula::getNumRegs() const {
return !!ScaledReg + BaseRegs.size();
}
@@ -360,7 +421,7 @@ Type *Formula::getType() const {
return !BaseRegs.empty() ? BaseRegs.front()->getType() :
ScaledReg ? ScaledReg->getType() :
BaseGV ? BaseGV->getType() :
- 0;
+ nullptr;
}
/// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
@@ -394,7 +455,7 @@ void Formula::print(raw_ostream &OS) const {
bool First = true;
if (BaseGV) {
if (!First) OS << " + "; else First = false;
- WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
+ BaseGV->printAsOperand(OS, /*PrintType=*/false);
}
if (BaseOffset != 0) {
if (!First) OS << " + "; else First = false;
@@ -422,7 +483,7 @@ void Formula::print(raw_ostream &OS) const {
OS << ')';
}
if (UnfoldedOffset != 0) {
- if (!First) OS << " + "; else First = false;
+ if (!First) OS << " + ";
OS << "imm(" << UnfoldedOffset << ')';
}
}
@@ -487,11 +548,11 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
// Check for a division of a constant by a constant.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
if (!RC)
- return 0;
+ return nullptr;
const APInt &LA = C->getValue()->getValue();
const APInt &RA = RC->getValue()->getValue();
if (LA.srem(RA) != 0)
- return 0;
+ return nullptr;
return SE.getConstant(LA.sdiv(RA));
}
@@ -500,16 +561,16 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
IgnoreSignificantBits);
- if (!Step) return 0;
+ if (!Step) return nullptr;
const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
IgnoreSignificantBits);
- if (!Start) return 0;
+ if (!Start) return nullptr;
// FlagNW is independent of the start value, step direction, and is
// preserved with smaller magnitude steps.
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
}
- return 0;
+ return nullptr;
}
// Distribute the sdiv over add operands, if the add doesn't overflow.
@@ -520,12 +581,12 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
I != E; ++I) {
const SCEV *Op = getExactSDiv(*I, RHS, SE,
IgnoreSignificantBits);
- if (!Op) return 0;
+ if (!Op) return nullptr;
Ops.push_back(Op);
}
return SE.getAddExpr(Ops);
}
- return 0;
+ return nullptr;
}
// Check for a multiply operand that we can pull RHS out of.
@@ -544,13 +605,13 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
}
Ops.push_back(S);
}
- return Found ? SE.getMulExpr(Ops) : 0;
+ return Found ? SE.getMulExpr(Ops) : nullptr;
}
- return 0;
+ return nullptr;
}
// Otherwise we don't know.
- return 0;
+ return nullptr;
}
/// ExtractImmediate - If S involves the addition of a constant integer value,
@@ -604,7 +665,7 @@ static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
SCEV::FlagAnyWrap);
return Result;
}
- return 0;
+ return nullptr;
}
/// isAddressUse - Returns true if the specified instruction is using the
@@ -723,13 +784,12 @@ static bool isHighCostExpansion(const SCEV *S,
// multiplication already generates this expression.
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
Value *UVal = U->getValue();
- for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end();
- UI != UE; ++UI) {
+ for (User *UR : UVal->users()) {
// If U is a constant, it may be used by a ConstantExpr.
- Instruction *User = dyn_cast<Instruction>(*UI);
- if (User && User->getOpcode() == Instruction::Mul
- && SE.isSCEVable(User->getType())) {
- return SE.getSCEV(User) == Mul;
+ Instruction *UI = dyn_cast<Instruction>(UR);
+ if (UI && UI->getOpcode() == Instruction::Mul &&
+ SE.isSCEVable(UI->getType())) {
+ return SE.getSCEV(UI) == Mul;
}
}
}
@@ -756,12 +816,12 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
Value *V = DeadInsts.pop_back_val();
Instruction *I = dyn_cast_or_null<Instruction>(V);
- if (I == 0 || !isInstructionTriviallyDead(I))
+ if (!I || !isInstructionTriviallyDead(I))
continue;
for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
if (Instruction *U = dyn_cast<Instruction>(*OI)) {
- *OI = 0;
+ *OI = nullptr;
if (U->use_empty())
DeadInsts.push_back(U);
}
@@ -776,9 +836,18 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
namespace {
class LSRUse;
}
-// Check if it is legal to fold 2 base registers.
-static bool isLegal2RegAMUse(const TargetTransformInfo &TTI, const LSRUse &LU,
- const Formula &F);
+
+/// \brief Check if the addressing mode defined by \p F is completely
+/// folded in \p LU at isel time.
+/// This includes address-mode folding and special icmp tricks.
+/// This function returns true if \p LU can accommodate what \p F
+/// defines and up to 1 base + 1 scaled + offset.
+/// In other words, if \p F has several base registers, this function may
+/// still return true. Therefore, users still need to account for
+/// additional base registers and/or unfolded offsets to derive an
+/// accurate cost model.
+static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
+ const LSRUse &LU, const Formula &F);
// Get the cost of the scaling factor used in F for LU.
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
const LSRUse &LU, const Formula &F);
@@ -804,7 +873,7 @@ public:
bool operator<(const Cost &Other) const;
- void Loose();
+ void Lose();
#ifndef NDEBUG
// Once any of the metrics loses, they must all remain losers.
@@ -829,7 +898,7 @@ public:
const SmallVectorImpl<int64_t> &Offsets,
ScalarEvolution &SE, DominatorTree &DT,
const LSRUse &LU,
- SmallPtrSet<const SCEV *, 16> *LoserRegs = 0);
+ SmallPtrSet<const SCEV *, 16> *LoserRegs = nullptr);
void print(raw_ostream &OS) const;
void dump() const;
@@ -864,7 +933,7 @@ void Cost::RateRegister(const SCEV *Reg,
return;
// Otherwise, do not consider this formula at all.
- Loose();
+ Lose();
return;
}
AddRecCost += 1; /// TODO: This should be a function of the stride.
@@ -903,7 +972,7 @@ void Cost::RatePrimaryRegister(const SCEV *Reg,
ScalarEvolution &SE, DominatorTree &DT,
SmallPtrSet<const SCEV *, 16> *LoserRegs) {
if (LoserRegs && LoserRegs->count(Reg)) {
- Loose();
+ Lose();
return;
}
if (Regs.insert(Reg)) {
@@ -922,10 +991,11 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
ScalarEvolution &SE, DominatorTree &DT,
const LSRUse &LU,
SmallPtrSet<const SCEV *, 16> *LoserRegs) {
+ assert(F.isCanonical() && "Cost is accurate only for canonical formula");
// Tally up the registers.
if (const SCEV *ScaledReg = F.ScaledReg) {
if (VisitedRegs.count(ScaledReg)) {
- Loose();
+ Lose();
return;
}
RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs);
@@ -936,7 +1006,7 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
E = F.BaseRegs.end(); I != E; ++I) {
const SCEV *BaseReg = *I;
if (VisitedRegs.count(BaseReg)) {
- Loose();
+ Lose();
return;
}
RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs);
@@ -945,11 +1015,13 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
}
// Determine how many (unfolded) adds we'll need inside the loop.
- size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0);
+ size_t NumBaseParts = F.getNumRegs();
if (NumBaseParts > 1)
// Do not count the base and a possible second register if the target
// allows to fold 2 registers.
- NumBaseAdds += NumBaseParts - (1 + isLegal2RegAMUse(TTI, LU, F));
+ NumBaseAdds +=
+ NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(TTI, LU, F)));
+ NumBaseAdds += (F.UnfoldedOffset != 0);
// Accumulate non-free scaling amounts.
ScaleCost += getScalingFactorCost(TTI, LU, F);
@@ -967,8 +1039,8 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
assert(isValid() && "invalid cost");
}
-/// Loose - Set this cost to a losing value.
-void Cost::Loose() {
+/// Lose - Set this cost to a losing value.
+void Cost::Lose() {
NumRegs = ~0u;
AddRecCost = ~0u;
NumIVMuls = ~0u;
@@ -980,21 +1052,11 @@ void Cost::Loose() {
/// operator< - Choose the lower cost.
bool Cost::operator<(const Cost &Other) const {
- if (NumRegs != Other.NumRegs)
- return NumRegs < Other.NumRegs;
- if (AddRecCost != Other.AddRecCost)
- return AddRecCost < Other.AddRecCost;
- if (NumIVMuls != Other.NumIVMuls)
- return NumIVMuls < Other.NumIVMuls;
- if (NumBaseAdds != Other.NumBaseAdds)
- return NumBaseAdds < Other.NumBaseAdds;
- if (ScaleCost != Other.ScaleCost)
- return ScaleCost < Other.ScaleCost;
- if (ImmCost != Other.ImmCost)
- return ImmCost < Other.ImmCost;
- if (SetupCost != Other.SetupCost)
- return SetupCost < Other.SetupCost;
- return false;
+ return std::tie(NumRegs, AddRecCost, NumIVMuls, NumBaseAdds, ScaleCost,
+ ImmCost, SetupCost) <
+ std::tie(Other.NumRegs, Other.AddRecCost, Other.NumIVMuls,
+ Other.NumBaseAdds, Other.ScaleCost, Other.ImmCost,
+ Other.SetupCost);
}
void Cost::print(raw_ostream &OS) const {
@@ -1058,7 +1120,8 @@ struct LSRFixup {
}
LSRFixup::LSRFixup()
- : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {}
+ : UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)),
+ Offset(0) {}
/// isUseFullyOutsideLoop - Test whether this fixup always uses its
/// value outside of the given loop.
@@ -1080,19 +1143,19 @@ void LSRFixup::print(raw_ostream &OS) const {
// Store is common and interesting enough to be worth special-casing.
if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
OS << "store ";
- WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false);
+ Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
} else if (UserInst->getType()->isVoidTy())
OS << UserInst->getOpcodeName();
else
- WriteAsOperand(OS, UserInst, /*PrintType=*/false);
+ UserInst->printAsOperand(OS, /*PrintType=*/false);
OS << ", OperandValToReplace=";
- WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false);
+ OperandValToReplace->printAsOperand(OS, /*PrintType=*/false);
for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(),
E = PostIncLoops.end(); I != E; ++I) {
OS << ", PostIncLoop=";
- WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false);
+ (*I)->getHeader()->printAsOperand(OS, /*PrintType=*/false);
}
if (LUIdx != ~size_t(0))
@@ -1126,11 +1189,7 @@ struct UniquifierDenseMapInfo {
}
static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) {
- unsigned Result = 0;
- for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(),
- E = V.end(); I != E; ++I)
- Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I);
- return Result;
+ return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
}
static bool isEqual(const SmallVector<const SCEV *, 4> &LHS,
@@ -1158,6 +1217,8 @@ public:
// TODO: Add a generic icmp too?
};
+ typedef PointerIntPair<const SCEV *, 2, KindType> SCEVUseKindPair;
+
KindType Kind;
Type *AccessTy;
@@ -1196,7 +1257,7 @@ public:
MaxOffset(INT64_MIN),
AllFixupsOutsideLoop(true),
RigidFormula(false),
- WidestFixupType(0) {}
+ WidestFixupType(nullptr) {}
bool HasFormulaWithSameRegs(const Formula &F) const;
bool InsertFormula(const Formula &F);
@@ -1221,7 +1282,10 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
/// InsertFormula - If the given formula has not yet been inserted, add it to
/// the list, and return true. Return false otherwise.
+/// The formula must be in canonical form.
bool LSRUse::InsertFormula(const Formula &F) {
+ assert(F.isCanonical() && "Invalid canonical representation");
+
if (!Formulae.empty() && RigidFormula)
return false;
@@ -1247,6 +1311,8 @@ bool LSRUse::InsertFormula(const Formula &F) {
// Record registers now being used by this use.
Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
+ if (F.ScaledReg)
+ Regs.insert(F.ScaledReg);
return true;
}
@@ -1295,7 +1361,7 @@ void LSRUse::print(raw_ostream &OS) const {
for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
E = Offsets.end(); I != E; ++I) {
OS << *I;
- if (llvm::next(I) != E)
+ if (std::next(I) != E)
OS << ',';
}
OS << '}';
@@ -1313,12 +1379,10 @@ void LSRUse::dump() const {
}
#endif
-/// isLegalUse - Test whether the use described by AM is "legal", meaning it can
-/// be completely folded into the user instruction at isel time. This includes
-/// address-mode folding and special icmp tricks.
-static bool isLegalUse(const TargetTransformInfo &TTI, LSRUse::KindType Kind,
- Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset,
- bool HasBaseReg, int64_t Scale) {
+static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
+ LSRUse::KindType Kind, Type *AccessTy,
+ GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale) {
switch (Kind) {
case LSRUse::Address:
return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale);
@@ -1369,10 +1433,11 @@ static bool isLegalUse(const TargetTransformInfo &TTI, LSRUse::KindType Kind,
llvm_unreachable("Invalid LSRUse Kind!");
}
-static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
- int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy,
- GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
- int64_t Scale) {
+static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
+ int64_t MinOffset, int64_t MaxOffset,
+ LSRUse::KindType Kind, Type *AccessTy,
+ GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale) {
// Check for overflow.
if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) !=
(MinOffset > 0))
@@ -1383,9 +1448,41 @@ static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
return false;
MaxOffset = (uint64_t)BaseOffset + MaxOffset;
- return isLegalUse(TTI, Kind, AccessTy, BaseGV, MinOffset, HasBaseReg,
- Scale) &&
- isLegalUse(TTI, Kind, AccessTy, BaseGV, MaxOffset, HasBaseReg, Scale);
+ return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset,
+ HasBaseReg, Scale) &&
+ isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset,
+ HasBaseReg, Scale);
+}
+
+static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
+ int64_t MinOffset, int64_t MaxOffset,
+ LSRUse::KindType Kind, Type *AccessTy,
+ const Formula &F) {
+ // For the purpose of isAMCompletelyFolded either having a canonical formula
+ // or a scale not equal to zero is correct.
+ // Problems may arise from non canonical formulae having a scale == 0.
+ // Strictly speaking it would best to just rely on canonical formulae.
+ // However, when we generate the scaled formulae, we first check that the
+ // scaling factor is profitable before computing the actual ScaledReg for
+ // compile time sake.
+ assert((F.isCanonical() || F.Scale != 0));
+ return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
+ F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale);
+}
+
+/// isLegalUse - Test whether we know how to expand the current formula.
+static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
+ int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy,
+ GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
+ int64_t Scale) {
+ // We know how to expand completely foldable formulae.
+ return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
+ BaseOffset, HasBaseReg, Scale) ||
+ // Or formulae that use a base register produced by a sum of base
+ // registers.
+ (Scale == 1 &&
+ isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
+ BaseGV, BaseOffset, true, 0));
}
static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
@@ -1395,36 +1492,23 @@ static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
F.BaseOffset, F.HasBaseReg, F.Scale);
}
-static bool isLegal2RegAMUse(const TargetTransformInfo &TTI, const LSRUse &LU,
- const Formula &F) {
- // If F is used as an Addressing Mode, it may fold one Base plus one
- // scaled register. If the scaled register is nil, do as if another
- // element of the base regs is a 1-scaled register.
- // This is possible if BaseRegs has at least 2 registers.
-
- // If this is not an address calculation, this is not an addressing mode
- // use.
- if (LU.Kind != LSRUse::Address)
- return false;
-
- // F is already scaled.
- if (F.Scale != 0)
- return false;
-
- // We need to keep one register for the base and one to scale.
- if (F.BaseRegs.size() < 2)
- return false;
-
- return isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
- F.BaseGV, F.BaseOffset, F.HasBaseReg, 1);
- }
+static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
+ const LSRUse &LU, const Formula &F) {
+ return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
+ LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg,
+ F.Scale);
+}
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
const LSRUse &LU, const Formula &F) {
if (!F.Scale)
return 0;
- assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
- LU.AccessTy, F) && "Illegal formula in use.");
+
+ // If the use is not completely folded in that instruction, we will have to
+ // pay an extra cost only for scale != 1.
+ if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
+ LU.AccessTy, F))
+ return F.Scale != 1;
switch (LU.Kind) {
case LSRUse::Address: {
@@ -1443,12 +1527,10 @@ static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
return std::max(ScaleCostMinOffset, ScaleCostMaxOffset);
}
case LSRUse::ICmpZero:
- // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg.
- // Therefore, return 0 in case F.Scale == -1.
- return F.Scale != -1;
-
case LSRUse::Basic:
case LSRUse::Special:
+ // The use is completely folded, i.e., everything is folded into the
+ // instruction.
return 0;
}
@@ -1473,7 +1555,8 @@ static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
HasBaseReg = true;
}
- return isLegalUse(TTI, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale);
+ return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset,
+ HasBaseReg, Scale);
}
static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
@@ -1498,36 +1581,12 @@ static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
// base and a scale.
int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
- return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
- BaseOffset, HasBaseReg, Scale);
+ return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
+ BaseOffset, HasBaseReg, Scale);
}
namespace {
-/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding
-/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind.
-struct UseMapDenseMapInfo {
- static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() {
- return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic);
- }
-
- static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() {
- return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic);
- }
-
- static unsigned
- getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) {
- unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first);
- Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second));
- return Result;
- }
-
- static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS,
- const std::pair<const SCEV *, LSRUse::KindType> &RHS) {
- return LHS == RHS;
- }
-};
-
/// IVInc - An individual increment in a Chain of IV increments.
/// Relate an IV user to an expression that computes the IV it uses from the IV
/// used by the previous link in the Chain.
@@ -1552,7 +1611,7 @@ struct IVChain {
SmallVector<IVInc,1> Incs;
const SCEV *ExprBase;
- IVChain() : ExprBase(0) {}
+ IVChain() : ExprBase(nullptr) {}
IVChain(const IVInc &Head, const SCEV *Base)
: Incs(1, Head), ExprBase(Base) {}
@@ -1562,7 +1621,7 @@ struct IVChain {
// begin - return the first increment in the chain.
const_iterator begin() const {
assert(!Incs.empty());
- return llvm::next(Incs.begin());
+ return std::next(Incs.begin());
}
const_iterator end() const {
return Incs.end();
@@ -1656,9 +1715,7 @@ class LSRInstance {
}
// Support for sharing of LSRUses between LSRFixups.
- typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>,
- size_t,
- UseMapDenseMapInfo> UseMapTy;
+ typedef DenseMap<LSRUse::SCEVUseKindPair, size_t> UseMapTy;
UseMapTy UseMap;
bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
@@ -1681,8 +1738,19 @@ class LSRInstance {
void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
unsigned Depth = 0);
+
+ void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
+ const Formula &Base, unsigned Depth,
+ size_t Idx, bool IsScaledReg = false);
void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
+ const Formula &Base, size_t Idx,
+ bool IsScaledReg = false);
void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
+ void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx,
+ const Formula &Base,
+ const SmallVectorImpl<int64_t> &Worklist,
+ size_t Idx, bool IsScaledReg = false);
void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
@@ -1760,7 +1828,7 @@ void LSRInstance::OptimizeShadowIV() {
IVUsers::const_iterator CandidateUI = UI;
++UI;
Instruction *ShadowUse = CandidateUI->getUser();
- Type *DestTy = 0;
+ Type *DestTy = nullptr;
bool IsSigned = false;
/* If shadow use is a int->float cast then insert a second IV
@@ -1822,7 +1890,7 @@ void LSRInstance::OptimizeShadowIV() {
continue;
/* Initialize new IV, double d = 0.0 in above example. */
- ConstantInt *C = 0;
+ ConstantInt *C = nullptr;
if (Incr->getOperand(0) == PH)
C = dyn_cast<ConstantInt>(Incr->getOperand(1));
else if (Incr->getOperand(1) == PH)
@@ -1944,7 +2012,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// for ICMP_ULE here because the comparison would be with zero, which
// isn't interesting.
CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
- const SCEVNAryExpr *Max = 0;
+ const SCEVNAryExpr *Max = nullptr;
if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
Pred = ICmpInst::ICMP_SLE;
Max = S;
@@ -1987,7 +2055,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// Check the right operand of the select, and remember it, as it will
// be used in the new comparison instruction.
- Value *NewRHS = 0;
+ Value *NewRHS = nullptr;
if (ICmpInst::isTrueWhenEqual(Pred)) {
// Look for n+1, and grab n.
if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
@@ -2057,7 +2125,7 @@ LSRInstance::OptimizeLoopTermCond() {
continue;
// Search IVUsesByStride to find Cond's IVUse if there is one.
- IVStrideUse *CondUse = 0;
+ IVStrideUse *CondUse = nullptr;
ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
if (!FindIVUserForCond(Cond, CondUse))
continue;
@@ -2110,12 +2178,12 @@ LSRInstance::OptimizeLoopTermCond() {
// Check for possible scaled-address reuse.
Type *AccessTy = getAccessType(UI->getUser());
int64_t Scale = C->getSExtValue();
- if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0,
+ if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr,
/*BaseOffset=*/ 0,
/*HasBaseReg=*/ false, Scale))
goto decline_post_inc;
Scale = -Scale;
- if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0,
+ if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr,
/*BaseOffset=*/ 0,
/*HasBaseReg=*/ false, Scale))
goto decline_post_inc;
@@ -2185,23 +2253,25 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
// the uses will have all its uses outside the loop, for example.
if (LU.Kind != Kind)
return false;
+
+ // Check for a mismatched access type, and fall back conservatively as needed.
+ // TODO: Be less conservative when the type is similar and can use the same
+ // addressing modes.
+ if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
+ NewAccessTy = Type::getVoidTy(AccessTy->getContext());
+
// Conservatively assume HasBaseReg is true for now.
if (NewOffset < LU.MinOffset) {
- if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0,
+ if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
LU.MaxOffset - NewOffset, HasBaseReg))
return false;
NewMinOffset = NewOffset;
} else if (NewOffset > LU.MaxOffset) {
- if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0,
+ if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
NewOffset - LU.MinOffset, HasBaseReg))
return false;
NewMaxOffset = NewOffset;
}
- // Check for a mismatched access type, and fall back conservatively as needed.
- // TODO: Be less conservative when the type is similar and can use the same
- // addressing modes.
- if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
- NewAccessTy = Type::getVoidTy(AccessTy->getContext());
// Update the use.
LU.MinOffset = NewMinOffset;
@@ -2222,14 +2292,14 @@ LSRInstance::getUse(const SCEV *&Expr,
int64_t Offset = ExtractImmediate(Expr, SE);
// Basic uses can't accept any offset, for example.
- if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0,
+ if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
Offset, /*HasBaseReg=*/ true)) {
Expr = Copy;
Offset = 0;
}
std::pair<UseMapTy::iterator, bool> P =
- UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0));
+ UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0));
if (!P.second) {
// A use already existed with this base.
size_t LUIdx = P.first->second;
@@ -2306,7 +2376,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
}
// Nothing looked good.
- return 0;
+ return nullptr;
}
void LSRInstance::CollectInterestingTypesAndFactors() {
@@ -2338,7 +2408,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
for (SmallSetVector<const SCEV *, 4>::const_iterator
I = Strides.begin(), E = Strides.end(); I != E; ++I)
for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
- llvm::next(I); NewStrideIter != E; ++NewStrideIter) {
+ std::next(I); NewStrideIter != E; ++NewStrideIter) {
const SCEV *OldStride = *I;
const SCEV *NewStride = *NewStrideIter;
@@ -2424,7 +2494,7 @@ static const SCEV *getExprBase(const SCEV *S) {
default: // uncluding scUnknown.
return S;
case scConstant:
- return 0;
+ return nullptr;
case scTruncate:
return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
case scZeroExtend:
@@ -2515,7 +2585,7 @@ isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
&& SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
--cost;
}
- const SCEV *LastIncExpr = 0;
+ const SCEV *LastIncExpr = nullptr;
unsigned NumConstIncrements = 0;
unsigned NumVarIncrements = 0;
unsigned NumReusedIncrements = 0;
@@ -2574,7 +2644,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
// Visit all existing chains. Check if its IVOper can be computed as a
// profitable loop invariant increment from the last link in the Chain.
unsigned ChainIdx = 0, NChains = IVChainVec.size();
- const SCEV *LastIncExpr = 0;
+ const SCEV *LastIncExpr = nullptr;
for (; ChainIdx < NChains; ++ChainIdx) {
IVChain &Chain = IVChainVec[ChainIdx];
@@ -2646,9 +2716,8 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
// they will eventually be used be the current chain, or can be computed
// from one of the chain increments. To be more precise we could
// transitively follow its user and only add leaf IV users to the set.
- for (Value::use_iterator UseIter = IVOper->use_begin(),
- UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) {
- Instruction *OtherUse = dyn_cast<Instruction>(*UseIter);
+ for (User *U : IVOper->users()) {
+ Instruction *OtherUse = dyn_cast<Instruction>(U);
if (!OtherUse)
continue;
// Uses in the chain will no longer be uses if the chain is formed.
@@ -2738,7 +2807,7 @@ void LSRInstance::CollectChains() {
Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
if (UniqueOperands.insert(IVOpInst))
ChainInstruction(I, IVOpInst, ChainUsersVec);
- IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE);
+ IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
}
} // Continue walking down the instructions.
} // Continue walking down the domtree.
@@ -2795,7 +2864,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
int64_t IncOffset = IncConst->getValue()->getSExtValue();
if (!isAlwaysFoldable(TTI, LSRUse::Address,
- getAccessType(UserInst), /*BaseGV=*/ 0,
+ getAccessType(UserInst), /*BaseGV=*/ nullptr,
IncOffset, /*HaseBaseReg=*/ false))
return false;
@@ -2813,7 +2882,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
// findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
IVOpEnd, L, SE);
- Value *IVSrc = 0;
+ Value *IVSrc = nullptr;
while (IVOpIter != IVOpEnd) {
IVSrc = getWideOperand(*IVOpIter);
@@ -2829,7 +2898,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
|| SE.getSCEV(IVSrc) == Head.IncExpr) {
break;
}
- IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE);
+ IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
}
if (IVOpIter == IVOpEnd) {
// Gracefully give up on this chain.
@@ -2840,7 +2909,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
Type *IVTy = IVSrc->getType();
Type *IntTy = SE.getEffectiveSCEVType(IVTy);
- const SCEV *LeftOverExpr = 0;
+ const SCEV *LeftOverExpr = nullptr;
for (IVChain::const_iterator IncI = Chain.begin(),
IncE = Chain.end(); IncI != IncE; ++IncI) {
@@ -2871,7 +2940,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
TTI)) {
assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
IVSrc = IVOper;
- LeftOverExpr = 0;
+ LeftOverExpr = nullptr;
}
}
Type *OperTy = IncI->IVOperand->getType();
@@ -2926,7 +2995,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
LF.PostIncLoops = UI->getPostIncLoops();
LSRUse::KindType Kind = LSRUse::Basic;
- Type *AccessTy = 0;
+ Type *AccessTy = nullptr;
if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
Kind = LSRUse::Address;
AccessTy = getAccessType(LF.UserInst);
@@ -2957,7 +3026,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) {
// S is normalized, so normalize N before folding it into S
// to keep the result normalized.
- N = TransformForPostIncUse(Normalize, N, CI, 0,
+ N = TransformForPostIncUse(Normalize, N, CI, nullptr,
LF.PostIncLoops, SE, DT);
Kind = LSRUse::ICmpZero;
S = SE.getMinusSCEV(N, S);
@@ -3032,6 +3101,9 @@ void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
/// InsertFormula - If the given formula has not yet been inserted, add it to
/// the list, and return true. Return false otherwise.
bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
+ // Do not insert formula that we will not be able to expand.
+ assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) &&
+ "Formula is illegal");
if (!LU.InsertFormula(F))
return false;
@@ -3059,18 +3131,17 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
Worklist.push_back(D->getLHS());
Worklist.push_back(D->getRHS());
- } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
- if (!Inserted.insert(U)) continue;
- const Value *V = U->getValue();
+ } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) {
+ if (!Inserted.insert(US)) continue;
+ const Value *V = US->getValue();
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
// Look for instructions defined outside the loop.
if (L->contains(Inst)) continue;
} else if (isa<UndefValue>(V))
// Undef doesn't have a live range, so it doesn't matter.
continue;
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- const Instruction *UserInst = dyn_cast<Instruction>(*UI);
+ for (const Use &U : V->uses()) {
+ const Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
// Ignore non-instructions.
if (!UserInst)
continue;
@@ -3082,7 +3153,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
UserInst->getParent() :
cast<PHINode>(UserInst)->getIncomingBlock(
- PHINode::getIncomingValueNumForOperand(UI.getOperandNo()));
+ PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
if (!DT.dominates(L->getHeader(), UseBB))
continue;
// Ignore uses which are part of other SCEV expressions, to avoid
@@ -3092,7 +3163,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
// If the user is a no-op, look through to its uses.
if (!isa<SCEVUnknown>(UserS))
continue;
- if (UserS == U) {
+ if (UserS == US) {
Worklist.push_back(
SE.getUnknown(const_cast<Instruction *>(UserInst)));
continue;
@@ -3100,7 +3171,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
}
// Ignore icmp instructions which are already being analyzed.
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
- unsigned OtherIdx = !UI.getOperandNo();
+ unsigned OtherIdx = !U.getOperandNo();
Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
continue;
@@ -3108,8 +3179,8 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
LSRFixup &LF = getNewFixup();
LF.UserInst = const_cast<Instruction *>(UserInst);
- LF.OperandValToReplace = UI.getUse();
- std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0);
+ LF.OperandValToReplace = U;
+ std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, nullptr);
LF.LUIdx = P.first;
LF.Offset = P.second;
LSRUse &LU = Uses[LF.LUIdx];
@@ -3118,7 +3189,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
SE.getTypeSizeInBits(LU.WidestFixupType) <
SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
LU.WidestFixupType = LF.OperandValToReplace->getType();
- InsertSupplementalFormula(U, LU, LF.LUIdx);
+ InsertSupplementalFormula(US, LU, LF.LUIdx);
CountRegisters(LU.Formulae.back(), Uses.size() - 1);
break;
}
@@ -3148,7 +3219,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
if (Remainder)
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
}
- return 0;
+ return nullptr;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Split a non-zero base out of an addrec.
if (AR->getStart()->isZero())
@@ -3160,7 +3231,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
// does not pertain to this loop.
if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
- Remainder = 0;
+ Remainder = nullptr;
}
if (Remainder != AR->getStart()) {
if (!Remainder)
@@ -3182,90 +3253,110 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
if (Remainder)
Ops.push_back(SE.getMulExpr(C, Remainder));
- return 0;
+ return nullptr;
}
}
return S;
}
-/// GenerateReassociations - Split out subexpressions from adds and the bases of
-/// addrecs.
-void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
- Formula Base,
- unsigned Depth) {
- // Arbitrarily cap recursion to protect compile time.
- if (Depth >= 3) return;
+/// \brief Helper function for LSRInstance::GenerateReassociations.
+void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
+ const Formula &Base,
+ unsigned Depth, size_t Idx,
+ bool IsScaledReg) {
+ const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+ SmallVector<const SCEV *, 8> AddOps;
+ const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
+ if (Remainder)
+ AddOps.push_back(Remainder);
+
+ if (AddOps.size() == 1)
+ return;
- for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
- const SCEV *BaseReg = Base.BaseRegs[i];
+ for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
+ JE = AddOps.end();
+ J != JE; ++J) {
- SmallVector<const SCEV *, 8> AddOps;
- const SCEV *Remainder = CollectSubexprs(BaseReg, 0, AddOps, L, SE);
- if (Remainder)
- AddOps.push_back(Remainder);
+ // Loop-variant "unknown" values are uninteresting; we won't be able to
+ // do anything meaningful with them.
+ if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
+ continue;
- if (AddOps.size() == 1) continue;
+ // Don't pull a constant into a register if the constant could be folded
+ // into an immediate field.
+ if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
+ LU.AccessTy, *J, Base.getNumRegs() > 1))
+ continue;
- for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
- JE = AddOps.end(); J != JE; ++J) {
+ // Collect all operands except *J.
+ SmallVector<const SCEV *, 8> InnerAddOps(
+ ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
+ InnerAddOps.append(std::next(J),
+ ((const SmallVector<const SCEV *, 8> &)AddOps).end());
+
+ // Don't leave just a constant behind in a register if the constant could
+ // be folded into an immediate field.
+ if (InnerAddOps.size() == 1 &&
+ isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
+ LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1))
+ continue;
- // Loop-variant "unknown" values are uninteresting; we won't be able to
- // do anything meaningful with them.
- if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
- continue;
+ const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
+ if (InnerSum->isZero())
+ continue;
+ Formula F = Base;
- // Don't pull a constant into a register if the constant could be folded
- // into an immediate field.
- if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
- LU.AccessTy, *J, Base.getNumRegs() > 1))
- continue;
+ // Add the remaining pieces of the add back into the new formula.
+ const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
+ if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
+ TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
+ InnerSumSC->getValue()->getZExtValue())) {
+ F.UnfoldedOffset =
+ (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue();
+ if (IsScaledReg)
+ F.ScaledReg = nullptr;
+ else
+ F.BaseRegs.erase(F.BaseRegs.begin() + Idx);
+ } else if (IsScaledReg)
+ F.ScaledReg = InnerSum;
+ else
+ F.BaseRegs[Idx] = InnerSum;
+
+ // Add J as its own register, or an unfolded immediate.
+ const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
+ if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
+ TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
+ SC->getValue()->getZExtValue()))
+ F.UnfoldedOffset =
+ (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue();
+ else
+ F.BaseRegs.push_back(*J);
+ // We may have changed the number of register in base regs, adjust the
+ // formula accordingly.
+ F.Canonicalize();
- // Collect all operands except *J.
- SmallVector<const SCEV *, 8> InnerAddOps
- (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
- InnerAddOps.append
- (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end());
-
- // Don't leave just a constant behind in a register if the constant could
- // be folded into an immediate field.
- if (InnerAddOps.size() == 1 &&
- isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
- LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1))
- continue;
+ if (InsertFormula(LU, LUIdx, F))
+ // If that formula hadn't been seen before, recurse to find more like
+ // it.
+ GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth + 1);
+ }
+}
- const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
- if (InnerSum->isZero())
- continue;
- Formula F = Base;
+/// GenerateReassociations - Split out subexpressions from adds and the bases of
+/// addrecs.
+void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
+ Formula Base, unsigned Depth) {
+ assert(Base.isCanonical() && "Input must be in the canonical form");
+ // Arbitrarily cap recursion to protect compile time.
+ if (Depth >= 3)
+ return;
- // Add the remaining pieces of the add back into the new formula.
- const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
- if (InnerSumSC &&
- SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
- TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
- InnerSumSC->getValue()->getZExtValue())) {
- F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
- InnerSumSC->getValue()->getZExtValue();
- F.BaseRegs.erase(F.BaseRegs.begin() + i);
- } else
- F.BaseRegs[i] = InnerSum;
-
- // Add J as its own register, or an unfolded immediate.
- const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
- if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
- TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
- SC->getValue()->getZExtValue()))
- F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
- SC->getValue()->getZExtValue();
- else
- F.BaseRegs.push_back(*J);
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
+ GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i);
- if (InsertFormula(LU, LUIdx, F))
- // If that formula hadn't been seen before, recurse to find more like
- // it.
- GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1);
- }
- }
+ if (Base.Scale == 1)
+ GenerateReassociationsImpl(LU, LUIdx, Base, Depth,
+ /* Idx */ -1, /* IsScaledReg */ true);
}
/// GenerateCombinations - Generate a formula consisting of all of the
@@ -3273,8 +3364,12 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
Formula Base) {
// This method is only interesting on a plurality of registers.
- if (Base.BaseRegs.size() <= 1) return;
+ if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1)
+ return;
+ // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
+ // processing the formula.
+ Base.Unscale();
Formula F = Base;
F.BaseRegs.clear();
SmallVector<const SCEV *, 4> Ops;
@@ -3294,29 +3389,87 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
// rather than proceed with zero in a register.
if (!Sum->isZero()) {
F.BaseRegs.push_back(Sum);
+ F.Canonicalize();
(void)InsertFormula(LU, LUIdx, F);
}
}
}
+/// \brief Helper function for LSRInstance::GenerateSymbolicOffsets.
+void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
+ const Formula &Base, size_t Idx,
+ bool IsScaledReg) {
+ const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+ GlobalValue *GV = ExtractSymbol(G, SE);
+ if (G->isZero() || !GV)
+ return;
+ Formula F = Base;
+ F.BaseGV = GV;
+ if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
+ return;
+ if (IsScaledReg)
+ F.ScaledReg = G;
+ else
+ F.BaseRegs[Idx] = G;
+ (void)InsertFormula(LU, LUIdx, F);
+}
+
/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets.
void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
Formula Base) {
// We can't add a symbolic offset if the address already contains one.
if (Base.BaseGV) return;
- for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
- const SCEV *G = Base.BaseRegs[i];
- GlobalValue *GV = ExtractSymbol(G, SE);
- if (G->isZero() || !GV)
- continue;
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
+ GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i);
+ if (Base.Scale == 1)
+ GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1,
+ /* IsScaledReg */ true);
+}
+
+/// \brief Helper function for LSRInstance::GenerateConstantOffsets.
+void LSRInstance::GenerateConstantOffsetsImpl(
+ LSRUse &LU, unsigned LUIdx, const Formula &Base,
+ const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
+ const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
+ for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
+ E = Worklist.end();
+ I != E; ++I) {
Formula F = Base;
- F.BaseGV = GV;
- if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
- continue;
- F.BaseRegs[i] = G;
- (void)InsertFormula(LU, LUIdx, F);
+ F.BaseOffset = (uint64_t)Base.BaseOffset - *I;
+ if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind,
+ LU.AccessTy, F)) {
+ // Add the offset to the base register.
+ const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
+ // If it cancelled out, drop the base register, otherwise update it.
+ if (NewG->isZero()) {
+ if (IsScaledReg) {
+ F.Scale = 0;
+ F.ScaledReg = nullptr;
+ } else
+ F.DeleteBaseReg(F.BaseRegs[Idx]);
+ F.Canonicalize();
+ } else if (IsScaledReg)
+ F.ScaledReg = NewG;
+ else
+ F.BaseRegs[Idx] = NewG;
+
+ (void)InsertFormula(LU, LUIdx, F);
+ }
}
+
+ int64_t Imm = ExtractImmediate(G, SE);
+ if (G->isZero() || Imm == 0)
+ return;
+ Formula F = Base;
+ F.BaseOffset = (uint64_t)F.BaseOffset + Imm;
+ if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
+ return;
+ if (IsScaledReg)
+ F.ScaledReg = G;
+ else
+ F.BaseRegs[Idx] = G;
+ (void)InsertFormula(LU, LUIdx, F);
}
/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
@@ -3329,38 +3482,11 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
if (LU.MaxOffset != LU.MinOffset)
Worklist.push_back(LU.MaxOffset);
- for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
- const SCEV *G = Base.BaseRegs[i];
-
- for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
- E = Worklist.end(); I != E; ++I) {
- Formula F = Base;
- F.BaseOffset = (uint64_t)Base.BaseOffset - *I;
- if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind,
- LU.AccessTy, F)) {
- // Add the offset to the base register.
- const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
- // If it cancelled out, drop the base register, otherwise update it.
- if (NewG->isZero()) {
- std::swap(F.BaseRegs[i], F.BaseRegs.back());
- F.BaseRegs.pop_back();
- } else
- F.BaseRegs[i] = NewG;
-
- (void)InsertFormula(LU, LUIdx, F);
- }
- }
-
- int64_t Imm = ExtractImmediate(G, SE);
- if (G->isZero() || Imm == 0)
- continue;
- Formula F = Base;
- F.BaseOffset = (uint64_t)F.BaseOffset + Imm;
- if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
- continue;
- F.BaseRegs[i] = G;
- (void)InsertFormula(LU, LUIdx, F);
- }
+ for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
+ GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i);
+ if (Base.Scale == 1)
+ GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1,
+ /* IsScaledReg */ true);
}
/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up
@@ -3460,7 +3586,11 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
if (!IntTy) return;
// If this Formula already has a scaled register, we can't add another one.
- if (Base.Scale != 0) return;
+ // Try to unscale the formula to generate a better scale.
+ if (Base.Scale != 0 && !Base.Unscale())
+ return;
+
+ assert(Base.Scale == 0 && "Unscale did not did its job!");
// Check each interesting stride.
for (SmallSetVector<int64_t, 8>::const_iterator
@@ -3501,6 +3631,11 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
Formula F = Base;
F.ScaledReg = Quotient;
F.DeleteBaseReg(F.BaseRegs[i]);
+ // The canonical representation of 1*reg is reg, which is already in
+ // Base. In that case, do not try to insert the formula, it will be
+ // rejected anyway.
+ if (F.Scale == 1 && F.BaseRegs.empty())
+ continue;
(void)InsertFormula(LU, LUIdx, F);
}
}
@@ -3626,8 +3761,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
// Conservatively examine offsets between this orig reg a few selected
// other orig regs.
ImmMapTy::const_iterator OtherImms[] = {
- Imms.begin(), prior(Imms.end()),
- Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2)
+ Imms.begin(), std::prev(Imms.end()),
+ Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) /
+ 2)
};
for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
ImmMapTy::const_iterator M = OtherImms[i];
@@ -3664,7 +3800,12 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
// TODO: Use a more targeted data structure.
for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
- const Formula &F = LU.Formulae[L];
+ Formula F = LU.Formulae[L];
+ // FIXME: The code for the scaled and unscaled registers looks
+ // very similar but slightly different. Investigate if they
+ // could be merged. That way, we would not have to unscale the
+ // Formula.
+ F.Unscale();
// Use the immediate in the scaled register.
if (F.ScaledReg == OrigReg) {
int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale;
@@ -3690,6 +3831,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
continue;
// OK, looks good.
+ NewF.Canonicalize();
(void)InsertFormula(LU, LUIdx, NewF);
} else {
// Use the immediate in a base register.
@@ -3723,6 +3865,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
goto skip_formula;
// Ok, looks good.
+ NewF.Canonicalize();
(void)InsertFormula(LU, LUIdx, NewF);
break;
skip_formula:;
@@ -3976,7 +4119,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
E = LU.Formulae.end(); I != E; ++I) {
const Formula &F = *I;
- if (F.BaseOffset == 0 || F.Scale != 0)
+ if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1))
continue;
LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU);
@@ -4073,7 +4216,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
// Pick the register which is used by the most LSRUses, which is likely
// to be a good reuse register candidate.
- const SCEV *Best = 0;
+ const SCEV *Best = nullptr;
unsigned BestNum = 0;
for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
I != E; ++I) {
@@ -4170,19 +4313,22 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
E = LU.Formulae.end(); I != E; ++I) {
const Formula &F = *I;
- // Ignore formulae which do not use any of the required registers.
- bool SatisfiedReqReg = true;
+ // Ignore formulae which may not be ideal in terms of register reuse of
+ // ReqRegs. The formula should use all required registers before
+ // introducing new ones.
+ int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
JE = ReqRegs.end(); J != JE; ++J) {
const SCEV *Reg = *J;
- if ((!F.ScaledReg || F.ScaledReg != Reg) &&
- std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) ==
+ if ((F.ScaledReg && F.ScaledReg == Reg) ||
+ std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) !=
F.BaseRegs.end()) {
- SatisfiedReqReg = false;
- break;
+ --NumReqRegsToFind;
+ if (NumReqRegsToFind == 0)
+ break;
}
}
- if (!SatisfiedReqReg) {
+ if (NumReqRegsToFind != 0) {
// If none of the formulae satisfied the required registers, then we could
// clear ReqRegs and try again. Currently, we simply give up in this case.
continue;
@@ -4222,7 +4368,7 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
SmallVector<const Formula *, 8> Workspace;
Cost SolutionCost;
- SolutionCost.Loose();
+ SolutionCost.Lose();
Cost CurCost;
SmallPtrSet<const SCEV *, 16> CurRegs;
DenseSet<const SCEV *> VisitedRegs;
@@ -4280,7 +4426,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
}
bool AllDominate = true;
- Instruction *BetterPos = 0;
+ Instruction *BetterPos = nullptr;
Instruction *Tentative = IDom->getTerminator();
for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
E = Inputs.end(); I != E; ++I) {
@@ -4293,7 +4439,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
// instead of at the end, so that it can be used for other expansions.
if (IDom == Inst->getParent() &&
(!BetterPos || !DT.dominates(Inst, BetterPos)))
- BetterPos = llvm::next(BasicBlock::iterator(Inst));
+ BetterPos = std::next(BasicBlock::iterator(Inst));
}
if (!AllDominate)
break;
@@ -4419,11 +4565,11 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
LF.UserInst, LF.OperandValToReplace,
Loops, SE, DT);
- Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
+ Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, IP)));
}
// Expand the ScaledReg portion.
- Value *ICmpScaledV = 0;
+ Value *ICmpScaledV = nullptr;
if (F.Scale != 0) {
const SCEV *ScaledS = F.ScaledReg;
@@ -4434,25 +4580,34 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
Loops, SE, DT);
if (LU.Kind == LSRUse::ICmpZero) {
- // An interesting way of "folding" with an icmp is to use a negated
- // scale, which we'll implement by inserting it into the other operand
- // of the icmp.
- assert(F.Scale == -1 &&
- "The only scale supported by ICmpZero uses is -1!");
- ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP);
+ // Expand ScaleReg as if it was part of the base regs.
+ if (F.Scale == 1)
+ Ops.push_back(
+ SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP)));
+ else {
+ // An interesting way of "folding" with an icmp is to use a negated
+ // scale, which we'll implement by inserting it into the other operand
+ // of the icmp.
+ assert(F.Scale == -1 &&
+ "The only scale supported by ICmpZero uses is -1!");
+ ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, IP);
+ }
} else {
// Otherwise just expand the scaled register and an explicit scale,
// which is expected to be matched as part of the address.
// Flush the operand list to suppress SCEVExpander hoisting address modes.
- if (!Ops.empty() && LU.Kind == LSRUse::Address) {
+ // Unless the addressing mode will not be folded.
+ if (!Ops.empty() && LU.Kind == LSRUse::Address &&
+ isAMCompletelyFolded(TTI, LU, F)) {
Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
Ops.clear();
Ops.push_back(SE.getUnknown(FullV));
}
- ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
- ScaledS = SE.getMulExpr(ScaledS,
- SE.getConstant(ScaledS->getType(), F.Scale));
+ ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP));
+ if (F.Scale != 1)
+ ScaledS =
+ SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale));
Ops.push_back(ScaledS);
}
}
@@ -4530,7 +4685,9 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
}
CI->setOperand(1, ICmpScaledV);
} else {
- assert(F.Scale == 0 &&
+ // A scale of 1 means that the scale has been expanded as part of the
+ // base regs.
+ assert((F.Scale == 0 || F.Scale == 1) &&
"ICmp does not support folding a global value and "
"a scale at the same time!");
Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
@@ -4571,7 +4728,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
Loop *PNLoop = LI.getLoopFor(Parent);
if (!PNLoop || Parent != PNLoop->getHeader()) {
// Split the critical edge.
- BasicBlock *NewBB = 0;
+ BasicBlock *NewBB = nullptr;
if (!Parent->isLandingPad()) {
NewBB = SplitCriticalEdge(BB, Parent, P,
/*MergeIdenticalEdges=*/true,
@@ -4600,7 +4757,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
}
std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
- Inserted.insert(std::make_pair(BB, static_cast<Value *>(0)));
+ Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr)));
if (!Pair.second)
PN->setIncomingValue(i, Pair.first->second);
else {
@@ -4707,9 +4864,10 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
LSRInstance::LSRInstance(Loop *L, Pass *P)
: IU(P->getAnalysis<IVUsers>()), SE(P->getAnalysis<ScalarEvolution>()),
- DT(P->getAnalysis<DominatorTree>()), LI(P->getAnalysis<LoopInfo>()),
+ DT(P->getAnalysis<DominatorTreeWrapperPass>().getDomTree()),
+ LI(P->getAnalysis<LoopInfo>()),
TTI(P->getAnalysis<TargetTransformInfo>()), L(L), Changed(false),
- IVIncInsertPos(0) {
+ IVIncInsertPos(nullptr) {
// If LoopSimplify form is not available, stay out of trouble.
if (!L->isLoopSimplifyForm())
return;
@@ -4746,7 +4904,7 @@ LSRInstance::LSRInstance(Loop *L, Pass *P)
#endif // DEBUG
DEBUG(dbgs() << "\nLSR on loop ";
- WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
+ L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
dbgs() << ":\n");
// First, perform some low-level loop optimizations.
@@ -4876,8 +5034,8 @@ public:
LoopStrengthReduce();
private:
- bool runOnLoop(Loop *L, LPPassManager &LPM);
- void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
};
}
@@ -4886,7 +5044,7 @@ char LoopStrengthReduce::ID = 0;
INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
"Loop Strength Reduction", false, false)
INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(IVUsers)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -4911,8 +5069,8 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.addPreserved<ScalarEvolution>();
// Requiring LoopSimplify a second time here prevents IVUsers from running
@@ -4924,6 +5082,9 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
}
bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
+ if (skipOptnoneFunction(L))
+ return false;
+
bool Changed = false;
// Run the main LSR transformation.
@@ -4937,10 +5098,9 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
#ifndef NDEBUG
Rewriter.setDebugType(DEBUG_TYPE);
#endif
- unsigned numFolded =
- Rewriter.replaceCongruentIVs(L, &getAnalysis<DominatorTree>(),
- DeadInsts,
- &getAnalysis<TargetTransformInfo>());
+ unsigned numFolded = Rewriter.replaceCongruentIVs(
+ L, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), DeadInsts,
+ &getAnalysis<TargetTransformInfo>());
if (numFolded) {
Changed = true;
DeleteTriviallyDeadInstructions(DeadInsts);
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 08ac38d..935f289 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -12,14 +12,16 @@
// counts of loops easily.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-unroll"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -28,13 +30,16 @@
using namespace llvm;
+#define DEBUG_TYPE "loop-unroll"
+
static cl::opt<unsigned>
UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden,
cl::desc("The cut-off point for automatic loop unrolling"));
static cl::opt<unsigned>
UnrollCount("unroll-count", cl::init(0), cl::Hidden,
- cl::desc("Use this unroll count for all loops, for testing purposes"));
+ cl::desc("Use this unroll count for all loops including those with "
+ "unroll_count pragma values, for testing purposes"));
static cl::opt<bool>
UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden,
@@ -45,6 +50,11 @@ static cl::opt<bool>
UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::init(false), cl::Hidden,
cl::desc("Unroll loops with run-time trip counts"));
+static cl::opt<unsigned>
+PragmaUnrollThreshold("pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden,
+ cl::desc("Unrolled size limit for loops with an unroll(enable) or "
+ "unroll_count pragma."));
+
namespace {
class LoopUnroll : public LoopPass {
public:
@@ -86,12 +96,12 @@ namespace {
bool UserAllowPartial; // CurrentAllowPartial is user-specified.
bool UserRuntime; // CurrentRuntime is user-specified.
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
/// This transformation requires natural loop information & requires that
/// loop preheaders be inserted into the CFG...
///
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
@@ -105,7 +115,67 @@ namespace {
// If loop unroll does not preserve dom info then LCSSA pass on next
// loop will receive invalid dom info.
// For now, recreate dom info, if loop is unrolled.
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ }
+
+ // Fill in the UnrollingPreferences parameter with values from the
+ // TargetTransformationInfo.
+ void getUnrollingPreferences(Loop *L, const TargetTransformInfo &TTI,
+ TargetTransformInfo::UnrollingPreferences &UP) {
+ UP.Threshold = CurrentThreshold;
+ UP.OptSizeThreshold = OptSizeUnrollThreshold;
+ UP.PartialThreshold = CurrentThreshold;
+ UP.PartialOptSizeThreshold = OptSizeUnrollThreshold;
+ UP.Count = CurrentCount;
+ UP.MaxCount = UINT_MAX;
+ UP.Partial = CurrentAllowPartial;
+ UP.Runtime = CurrentRuntime;
+ TTI.getUnrollingPreferences(L, UP);
+ }
+
+ // Select and return an unroll count based on parameters from
+ // user, unroll preferences, unroll pragmas, or a heuristic.
+ // SetExplicitly is set to true if the unroll count is is set by
+ // the user or a pragma rather than selected heuristically.
+ unsigned
+ selectUnrollCount(const Loop *L, unsigned TripCount, bool HasEnablePragma,
+ unsigned PragmaCount,
+ const TargetTransformInfo::UnrollingPreferences &UP,
+ bool &SetExplicitly);
+
+
+ // Select threshold values used to limit unrolling based on a
+ // total unrolled size. Parameters Threshold and PartialThreshold
+ // are set to the maximum unrolled size for fully and partially
+ // unrolled loops respectively.
+ void selectThresholds(const Loop *L, bool HasPragma,
+ const TargetTransformInfo::UnrollingPreferences &UP,
+ unsigned &Threshold, unsigned &PartialThreshold) {
+ // Determine the current unrolling threshold. While this is
+ // normally set from UnrollThreshold, it is overridden to a
+ // smaller value if the current function is marked as
+ // optimize-for-size, and the unroll threshold was not user
+ // specified.
+ Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
+ PartialThreshold = UserThreshold ? CurrentThreshold : UP.PartialThreshold;
+ if (!UserThreshold &&
+ L->getHeader()->getParent()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize)) {
+ Threshold = UP.OptSizeThreshold;
+ PartialThreshold = UP.PartialOptSizeThreshold;
+ }
+ if (HasPragma) {
+ // If the loop has an unrolling pragma, we want to be more
+ // aggressive with unrolling limits. Set thresholds to at
+ // least the PragmaTheshold value which is larger than the
+ // default limits.
+ if (Threshold != NoThreshold)
+ Threshold = std::max<unsigned>(Threshold, PragmaUnrollThreshold);
+ if (PartialThreshold != NoThreshold)
+ PartialThreshold =
+ std::max<unsigned>(PartialThreshold, PragmaUnrollThreshold);
+ }
}
};
}
@@ -124,6 +194,10 @@ Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial,
return new LoopUnroll(Threshold, Count, AllowPartial, Runtime);
}
+Pass *llvm::createSimpleLoopUnrollPass() {
+ return llvm::createLoopUnrollPass(-1, -1, 0, 0);
+}
+
/// ApproximateLoopSize - Approximate the size of the loop.
static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
bool &NotDuplicatable,
@@ -145,7 +219,144 @@ static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
return LoopSize;
}
+// Returns the value associated with the given metadata node name (for
+// example, "llvm.loop.unroll.count"). If no such named metadata node
+// exists, then nullptr is returned.
+static const ConstantInt *GetUnrollMetadataValue(const Loop *L,
+ StringRef Name) {
+ MDNode *LoopID = L->getLoopID();
+ if (!LoopID) return nullptr;
+
+ // First operand should refer to the loop id itself.
+ assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
+ assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
+
+ for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
+ const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
+ if (!MD) continue;
+
+ const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
+ if (!S) continue;
+
+ if (Name.equals(S->getString())) {
+ assert(MD->getNumOperands() == 2 &&
+ "Unroll hint metadata should have two operands.");
+ return cast<ConstantInt>(MD->getOperand(1));
+ }
+ }
+ return nullptr;
+}
+
+// Returns true if the loop has an unroll(enable) pragma.
+static bool HasUnrollEnablePragma(const Loop *L) {
+ const ConstantInt *EnableValue =
+ GetUnrollMetadataValue(L, "llvm.loop.unroll.enable");
+ return (EnableValue && EnableValue->getZExtValue());
+}
+
+// Returns true if the loop has an unroll(disable) pragma.
+static bool HasUnrollDisablePragma(const Loop *L) {
+ const ConstantInt *EnableValue =
+ GetUnrollMetadataValue(L, "llvm.loop.unroll.enable");
+ return (EnableValue && !EnableValue->getZExtValue());
+}
+
+// If loop has an unroll_count pragma return the (necessarily
+// positive) value from the pragma. Otherwise return 0.
+static unsigned UnrollCountPragmaValue(const Loop *L) {
+ const ConstantInt *CountValue =
+ GetUnrollMetadataValue(L, "llvm.loop.unroll.count");
+ if (CountValue) {
+ unsigned Count = CountValue->getZExtValue();
+ assert(Count >= 1 && "Unroll count must be positive.");
+ return Count;
+ }
+ return 0;
+}
+
+// Remove existing unroll metadata and add unroll disable metadata to
+// indicate the loop has already been unrolled. This prevents a loop
+// from being unrolled more than is directed by a pragma if the loop
+// unrolling pass is run more than once (which it generally is).
+static void SetLoopAlreadyUnrolled(Loop *L) {
+ MDNode *LoopID = L->getLoopID();
+ if (!LoopID) return;
+
+ // First remove any existing loop unrolling metadata.
+ SmallVector<Value *, 4> Vals;
+ // Reserve first location for self reference to the LoopID metadata node.
+ Vals.push_back(nullptr);
+ for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
+ bool IsUnrollMetadata = false;
+ MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
+ if (MD) {
+ const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
+ IsUnrollMetadata = S && S->getString().startswith("llvm.loop.unroll.");
+ }
+ if (!IsUnrollMetadata) Vals.push_back(LoopID->getOperand(i));
+ }
+
+ // Add unroll(disable) metadata to disable future unrolling.
+ LLVMContext &Context = L->getHeader()->getContext();
+ SmallVector<Value *, 2> DisableOperands;
+ DisableOperands.push_back(MDString::get(Context, "llvm.loop.unroll.enable"));
+ DisableOperands.push_back(ConstantInt::get(Type::getInt1Ty(Context), 0));
+ MDNode *DisableNode = MDNode::get(Context, DisableOperands);
+ Vals.push_back(DisableNode);
+
+ MDNode *NewLoopID = MDNode::get(Context, Vals);
+ // Set operand 0 to refer to the loop id itself.
+ NewLoopID->replaceOperandWith(0, NewLoopID);
+ L->setLoopID(NewLoopID);
+ LoopID->replaceAllUsesWith(NewLoopID);
+}
+
+unsigned LoopUnroll::selectUnrollCount(
+ const Loop *L, unsigned TripCount, bool HasEnablePragma,
+ unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP,
+ bool &SetExplicitly) {
+ SetExplicitly = true;
+
+ // User-specified count (either as a command-line option or
+ // constructor parameter) has highest precedence.
+ unsigned Count = UserCount ? CurrentCount : 0;
+
+ // If there is no user-specified count, unroll pragmas have the next
+ // highest precendence.
+ if (Count == 0) {
+ if (PragmaCount) {
+ Count = PragmaCount;
+ } else if (HasEnablePragma) {
+ // unroll(enable) pragma without an unroll_count pragma
+ // indicates to unroll loop fully.
+ Count = TripCount;
+ }
+ }
+
+ if (Count == 0)
+ Count = UP.Count;
+
+ if (Count == 0) {
+ SetExplicitly = false;
+ if (TripCount == 0)
+ // Runtime trip count.
+ Count = UnrollRuntimeCount;
+ else
+ // Conservative heuristic: if we know the trip count, see if we can
+ // completely unroll (subject to the threshold, checked below); otherwise
+ // try to find greatest modulo of the trip count which is still under
+ // threshold value.
+ Count = TripCount;
+ }
+ if (TripCount && Count > TripCount)
+ return TripCount;
+ return Count;
+}
+
bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
LoopInfo *LI = &getAnalysis<LoopInfo>();
ScalarEvolution *SE = &getAnalysis<ScalarEvolution>();
const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
@@ -153,26 +364,16 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
BasicBlock *Header = L->getHeader();
DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName()
<< "] Loop %" << Header->getName() << "\n");
- (void)Header;
+
+ if (HasUnrollDisablePragma(L)) {
+ return false;
+ }
+ bool HasEnablePragma = HasUnrollEnablePragma(L);
+ unsigned PragmaCount = UnrollCountPragmaValue(L);
+ bool HasPragma = HasEnablePragma || PragmaCount > 0;
TargetTransformInfo::UnrollingPreferences UP;
- UP.Threshold = CurrentThreshold;
- UP.OptSizeThreshold = OptSizeUnrollThreshold;
- UP.Count = CurrentCount;
- UP.Partial = CurrentAllowPartial;
- UP.Runtime = CurrentRuntime;
- TTI.getUnrollingPreferences(L, UP);
-
- // Determine the current unrolling threshold. While this is normally set
- // from UnrollThreshold, it is overridden to a smaller value if the current
- // function is marked as optimize-for-size, and the unroll threshold was
- // not user specified.
- unsigned Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
- if (!UserThreshold &&
- Header->getParent()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize))
- Threshold = UP.OptSizeThreshold;
+ getUnrollingPreferences(L, TTI, UP);
// Find trip count and trip multiple if count is not available
unsigned TripCount = 0;
@@ -186,74 +387,121 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
TripMultiple = SE->getSmallConstantTripMultiple(L, LatchBlock);
}
- bool Runtime = UserRuntime ? CurrentRuntime : UP.Runtime;
+ // Select an initial unroll count. This may be reduced later based
+ // on size thresholds.
+ bool CountSetExplicitly;
+ unsigned Count = selectUnrollCount(L, TripCount, HasEnablePragma, PragmaCount,
+ UP, CountSetExplicitly);
+
+ unsigned NumInlineCandidates;
+ bool notDuplicatable;
+ unsigned LoopSize =
+ ApproximateLoopSize(L, NumInlineCandidates, notDuplicatable, TTI);
+ DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
+ uint64_t UnrolledSize = (uint64_t)LoopSize * Count;
+ if (notDuplicatable) {
+ DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
+ << " instructions.\n");
+ return false;
+ }
+ if (NumInlineCandidates != 0) {
+ DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
+ return false;
+ }
- // Use a default unroll-count if the user doesn't specify a value
- // and the trip count is a run-time value. The default is different
- // for run-time or compile-time trip count loops.
- unsigned Count = UserCount ? CurrentCount : UP.Count;
- if (Runtime && Count == 0 && TripCount == 0)
- Count = UnrollRuntimeCount;
+ unsigned Threshold, PartialThreshold;
+ selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold);
- if (Count == 0) {
- // Conservative heuristic: if we know the trip count, see if we can
- // completely unroll (subject to the threshold, checked below); otherwise
- // try to find greatest modulo of the trip count which is still under
- // threshold value.
- if (TripCount == 0)
- return false;
- Count = TripCount;
+ // Given Count, TripCount and thresholds determine the type of
+ // unrolling which is to be performed.
+ enum { Full = 0, Partial = 1, Runtime = 2 };
+ int Unrolling;
+ if (TripCount && Count == TripCount) {
+ if (Threshold != NoThreshold && UnrolledSize > Threshold) {
+ DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
+ << " because size: " << UnrolledSize << ">" << Threshold
+ << "\n");
+ Unrolling = Partial;
+ } else {
+ Unrolling = Full;
+ }
+ } else if (TripCount && Count < TripCount) {
+ Unrolling = Partial;
+ } else {
+ Unrolling = Runtime;
}
- // Enforce the threshold.
- if (Threshold != NoThreshold) {
- unsigned NumInlineCandidates;
- bool notDuplicatable;
- unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates,
- notDuplicatable, TTI);
- DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
- if (notDuplicatable) {
- DEBUG(dbgs() << " Not unrolling loop which contains non duplicatable"
- << " instructions.\n");
+ // Reduce count based on the type of unrolling and the threshold values.
+ unsigned OriginalCount = Count;
+ bool AllowRuntime = UserRuntime ? CurrentRuntime : UP.Runtime;
+ if (Unrolling == Partial) {
+ bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
+ if (!AllowPartial && !CountSetExplicitly) {
+ DEBUG(dbgs() << " will not try to unroll partially because "
+ << "-unroll-allow-partial not given\n");
return false;
}
- if (NumInlineCandidates != 0) {
- DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
+ if (PartialThreshold != NoThreshold && UnrolledSize > PartialThreshold) {
+ // Reduce unroll count to be modulo of TripCount for partial unrolling.
+ Count = PartialThreshold / LoopSize;
+ while (Count != 0 && TripCount % Count != 0)
+ Count--;
+ }
+ } else if (Unrolling == Runtime) {
+ if (!AllowRuntime && !CountSetExplicitly) {
+ DEBUG(dbgs() << " will not try to unroll loop with runtime trip count "
+ << "-unroll-runtime not given\n");
return false;
}
- uint64_t Size = (uint64_t)LoopSize*Count;
- if (TripCount != 1 && Size > Threshold) {
- DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
- << " because size: " << Size << ">" << Threshold << "\n");
- bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
- if (!AllowPartial && !(Runtime && TripCount == 0)) {
- DEBUG(dbgs() << " will not try to unroll partially because "
- << "-unroll-allow-partial not given\n");
- return false;
- }
- if (TripCount) {
- // Reduce unroll count to be modulo of TripCount for partial unrolling
- Count = Threshold / LoopSize;
- while (Count != 0 && TripCount%Count != 0)
- Count--;
- }
- else if (Runtime) {
- // Reduce unroll count to be a lower power-of-two value
- while (Count != 0 && Size > Threshold) {
- Count >>= 1;
- Size = LoopSize*Count;
- }
- }
- if (Count < 2) {
- DEBUG(dbgs() << " could not unroll partially\n");
- return false;
+ // Reduce unroll count to be the largest power-of-two factor of
+ // the original count which satisfies the threshold limit.
+ while (Count != 0 && UnrolledSize > PartialThreshold) {
+ Count >>= 1;
+ UnrolledSize = LoopSize * Count;
+ }
+ if (Count > UP.MaxCount)
+ Count = UP.MaxCount;
+ DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n");
+ }
+
+ if (HasPragma) {
+ // Mark loop as unrolled to prevent unrolling beyond that
+ // requested by the pragma.
+ SetLoopAlreadyUnrolled(L);
+
+ // Emit optimization remarks if we are unable to unroll the loop
+ // as directed by a pragma.
+ DebugLoc LoopLoc = L->getStartLoc();
+ Function *F = Header->getParent();
+ LLVMContext &Ctx = F->getContext();
+ if (HasEnablePragma && PragmaCount == 0) {
+ if (TripCount && Count != TripCount) {
+ emitOptimizationRemarkMissed(
+ Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "Unable to fully unroll loop as directed by unroll(enable) pragma "
+ "because unrolled size is too large.");
+ } else if (!TripCount) {
+ emitOptimizationRemarkMissed(
+ Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "Unable to fully unroll loop as directed by unroll(enable) pragma "
+ "because loop has a runtime trip count.");
}
- DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n");
+ } else if (PragmaCount > 0 && Count != OriginalCount) {
+ emitOptimizationRemarkMissed(
+ Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "Unable to unroll loop the number of times directed by "
+ "unroll_count pragma because unrolled size is too large.");
}
}
+ if (Unrolling != Full && Count < 2) {
+ // Partial unrolling by 1 is a nop. For full unrolling, a factor
+ // of 1 makes sense because loop control can be eliminated.
+ return false;
+ }
+
// Unroll the loop.
- if (!UnrollLoop(L, Count, TripCount, Runtime, TripMultiple, LI, &LPM))
+ if (!UnrollLoop(L, Count, TripCount, AllowRuntime, TripMultiple, LI, this, &LPM))
return false;
return true;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index c4ebfd5..977c53a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -26,13 +26,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-unswitch"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CodeMetrics.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
@@ -40,6 +38,7 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/CommandLine.h"
@@ -53,6 +52,8 @@
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "loop-unswitch"
+
STATISTIC(NumBranches, "Number of branches unswitched");
STATISTIC(NumSwitches, "Number of switches unswitched");
STATISTIC(NumSelects , "Number of selects unswitched");
@@ -96,7 +97,7 @@ namespace {
public:
LUAnalysisCache() :
- CurLoopInstructions(0), CurrentLoopProperties(0),
+ CurLoopInstructions(nullptr), CurrentLoopProperties(nullptr),
MaxSize(Threshold)
{}
@@ -151,44 +152,35 @@ namespace {
static char ID; // Pass ID, replacement for typeid
explicit LoopUnswitch(bool Os = false) :
LoopPass(ID), OptimizeForSize(Os), redoLoop(false),
- currentLoop(0), DT(0), loopHeader(0),
- loopPreheader(0) {
+ currentLoop(nullptr), DT(nullptr), loopHeader(nullptr),
+ loopPreheader(nullptr) {
initializeLoopUnswitchPass(*PassRegistry::getPassRegistry());
}
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
bool processCurrentLoop();
/// This transformation requires natural loop information & requires that
/// loop preheaders be inserted into the CFG.
///
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolution>();
AU.addRequired<TargetTransformInfo>();
}
private:
- virtual void releaseMemory() {
+ void releaseMemory() override {
BranchesInfo.forgetLoop(currentLoop);
}
- /// RemoveLoopFromWorklist - If the specified loop is on the loop worklist,
- /// remove it.
- void RemoveLoopFromWorklist(Loop *L) {
- std::vector<Loop*>::iterator I = std::find(LoopProcessWorklist.begin(),
- LoopProcessWorklist.end(), L);
- if (I != LoopProcessWorklist.end())
- LoopProcessWorklist.erase(I);
- }
-
void initLoopData() {
loopHeader = currentLoop->getHeader();
loopPreheader = currentLoop->getLoopPreheader();
@@ -212,9 +204,8 @@ namespace {
Instruction *InsertPt);
void SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L);
- void RemoveLoopFromHierarchy(Loop *L);
- bool IsTrivialUnswitchCondition(Value *Cond, Constant **Val = 0,
- BasicBlock **LoopExit = 0);
+ bool IsTrivialUnswitchCondition(Value *Cond, Constant **Val = nullptr,
+ BasicBlock **LoopExit = nullptr);
};
}
@@ -225,7 +216,7 @@ bool LUAnalysisCache::countLoop(const Loop *L, const TargetTransformInfo &TTI) {
LoopPropsMapIt PropsIt;
bool Inserted;
- llvm::tie(PropsIt, Inserted) =
+ std::tie(PropsIt, Inserted) =
LoopsProperties.insert(std::make_pair(L, LoopProperties()));
LoopProperties &Props = PropsIt->second;
@@ -283,8 +274,8 @@ void LUAnalysisCache::forgetLoop(const Loop *L) {
LoopsProperties.erase(LIt);
}
- CurrentLoopProperties = 0;
- CurLoopInstructions = 0;
+ CurrentLoopProperties = nullptr;
+ CurLoopInstructions = nullptr;
}
// Mark case value as unswitched.
@@ -355,10 +346,10 @@ static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
// We can never unswitch on vector conditions.
if (Cond->getType()->isVectorTy())
- return 0;
+ return nullptr;
// Constants should be folded, not unswitched on!
- if (isa<Constant>(Cond)) return 0;
+ if (isa<Constant>(Cond)) return nullptr;
// TODO: Handle: br (VARIANT|INVARIANT).
@@ -378,13 +369,18 @@ static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
return RHS;
}
- return 0;
+ return nullptr;
}
bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
+ if (skipOptnoneFunction(L))
+ return false;
+
LI = &getAnalysis<LoopInfo>();
LPM = &LPM_Ref;
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
currentLoop = L;
Function *F = currentLoop->getHeader()->getParent();
bool Changed = false;
@@ -397,7 +393,7 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
if (Changed) {
// FIXME: Reconstruct dom info, because it is not preserved properly.
if (DT)
- DT->runOnFunction(*F);
+ DT->recalculate(*F);
}
return Changed;
}
@@ -456,7 +452,7 @@ bool LoopUnswitch::processCurrentLoop() {
// Find a value to unswitch on:
// FIXME: this should chose the most expensive case!
// FIXME: scan for a case with a non-critical edge?
- Constant *UnswitchVal = 0;
+ Constant *UnswitchVal = nullptr;
// Do not process same value again and again.
// At this point we have some cases already unswitched and
@@ -513,7 +509,7 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
if (!L->contains(BB)) {
// Otherwise, this is a loop exit, this is fine so long as this is the
// first exit.
- if (ExitBB != 0) return false;
+ if (ExitBB) return false;
ExitBB = BB;
return true;
}
@@ -540,10 +536,10 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
static BasicBlock *isTrivialLoopExitBlock(Loop *L, BasicBlock *BB) {
std::set<BasicBlock*> Visited;
Visited.insert(L->getHeader()); // Branches to header make infinite loops.
- BasicBlock *ExitBB = 0;
+ BasicBlock *ExitBB = nullptr;
if (isTrivialLoopExitBlockHelper(L, BB, ExitBB, Visited))
return ExitBB;
- return 0;
+ return nullptr;
}
/// IsTrivialUnswitchCondition - Check to see if this unswitch condition is
@@ -564,7 +560,7 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
TerminatorInst *HeaderTerm = Header->getTerminator();
LLVMContext &Context = Header->getContext();
- BasicBlock *LoopExitBB = 0;
+ BasicBlock *LoopExitBB = nullptr;
if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) {
// If the header block doesn't end with a conditional branch on Cond, we
// can't handle it.
@@ -634,8 +630,8 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
/// unswitch the loop, reprocess the pieces, then return true.
bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
Function *F = loopHeader->getParent();
- Constant *CondVal = 0;
- BasicBlock *ExitBlock = 0;
+ Constant *CondVal = nullptr;
+ BasicBlock *ExitBlock = nullptr;
if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) {
// If the condition is trivial, always unswitch. There is no code growth
@@ -934,9 +930,8 @@ static void ReplaceUsesOfWith(Instruction *I, Value *V,
Worklist.push_back(Use);
// Add users to the worklist which may be simplified now.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI)
- Worklist.push_back(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ Worklist.push_back(cast<Instruction>(U));
LPM->deleteSimpleAnalysisValue(I, L);
RemoveFromWorklist(I, Worklist);
I->replaceAllUsesWith(V);
@@ -944,17 +939,6 @@ static void ReplaceUsesOfWith(Instruction *I, Value *V,
++NumSimplify;
}
-/// RemoveLoopFromHierarchy - We have discovered that the specified loop has
-/// become unwrapped, either because the backedge was deleted, or because the
-/// edge into the header was removed. If the edge into the header from the
-/// latch block was removed, the loop is unwrapped but subloops are still alive,
-/// so they just reparent loops. If the loops are actually dead, they will be
-/// removed later.
-void LoopUnswitch::RemoveLoopFromHierarchy(Loop *L) {
- LPM->deleteLoopFromQueue(L);
- RemoveLoopFromWorklist(L);
-}
-
// RewriteLoopBodyWithConditionConstant - We know either that the value LIC has
// the value specified by Val in the specified loop, or we know it does NOT have
// that value. Rewrite any uses of LIC or of properties correlated to it.
@@ -986,12 +970,11 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
Replacement = ConstantInt::get(Type::getInt1Ty(Val->getContext()),
!cast<ConstantInt>(Val)->getZExtValue());
- for (Value::use_iterator UI = LIC->use_begin(), E = LIC->use_end();
- UI != E; ++UI) {
- Instruction *U = dyn_cast<Instruction>(*UI);
- if (!U || !L->contains(U))
+ for (User *U : LIC->users()) {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI || !L->contains(UI))
continue;
- Worklist.push_back(U);
+ Worklist.push_back(UI);
}
for (std::vector<Instruction*>::iterator UI = Worklist.begin(),
@@ -1005,20 +988,19 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// Otherwise, we don't know the precise value of LIC, but we do know that it
// is certainly NOT "Val". As such, simplify any uses in the loop that we
// can. This case occurs when we unswitch switch statements.
- for (Value::use_iterator UI = LIC->use_begin(), E = LIC->use_end();
- UI != E; ++UI) {
- Instruction *U = dyn_cast<Instruction>(*UI);
- if (!U || !L->contains(U))
+ for (User *U : LIC->users()) {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI || !L->contains(UI))
continue;
- Worklist.push_back(U);
+ Worklist.push_back(UI);
// TODO: We could do other simplifications, for example, turning
// 'icmp eq LIC, Val' -> false.
// If we know that LIC is not Val, use this info to simplify code.
- SwitchInst *SI = dyn_cast<SwitchInst>(U);
- if (SI == 0 || !isa<ConstantInt>(Val)) continue;
+ SwitchInst *SI = dyn_cast<SwitchInst>(UI);
+ if (!SI || !isa<ConstantInt>(Val)) continue;
SwitchInst::CaseIt DeadCase = SI->findCaseValue(cast<ConstantInt>(Val));
// Default case is live for multiple values.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 8ced494..3314e1e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loweratomic"
#include "llvm/Transforms/Scalar.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
@@ -20,6 +19,8 @@
#include "llvm/Pass.h"
using namespace llvm;
+#define DEBUG_TYPE "loweratomic"
+
static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
IRBuilder<> Builder(CXI->getParent(), CXI);
Value *Ptr = CXI->getPointerOperand();
@@ -31,7 +32,10 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
- CXI->replaceAllUsesWith(Orig);
+ Res = Builder.CreateInsertValue(UndefValue::get(CXI->getType()), Orig, 0);
+ Res = Builder.CreateInsertValue(Res, Equal, 1);
+
+ CXI->replaceAllUsesWith(Res);
CXI->eraseFromParent();
return true;
}
@@ -42,7 +46,7 @@ static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) {
Value *Val = RMWI->getValOperand();
LoadInst *Orig = Builder.CreateLoad(Ptr);
- Value *Res = NULL;
+ Value *Res = nullptr;
switch (RMWI->getOperation()) {
default: llvm_unreachable("Unexpected RMW operation");
@@ -111,7 +115,9 @@ namespace {
LowerAtomic() : BasicBlockPass(ID) {
initializeLowerAtomicPass(*PassRegistry::getPassRegistry());
}
- bool runOnBasicBlock(BasicBlock &BB) {
+ bool runOnBasicBlock(BasicBlock &BB) override {
+ if (skipOptnoneFunction(BB))
+ return false;
bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
Instruction *Inst = DI++;
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 9912d3d..7c184a4 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -12,27 +12,28 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "memcpyopt"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include <list>
using namespace llvm;
+#define DEBUG_TYPE "memcpyopt"
+
STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
STATISTIC(NumMemSetInfer, "Number of memsets inferred");
STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
@@ -49,7 +50,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
int64_t Offset = 0;
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (OpC == 0)
+ if (!OpC)
return VariableIdxFound = true;
if (OpC->isZero()) continue; // No offset.
@@ -75,6 +76,13 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
const DataLayout &TD) {
Ptr1 = Ptr1->stripPointerCasts();
Ptr2 = Ptr2->stripPointerCasts();
+
+ // Handle the trivial case first.
+ if (Ptr1 == Ptr2) {
+ Offset = 0;
+ return true;
+ }
+
GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
@@ -82,12 +90,12 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
// If one pointer is a GEP and the other isn't, then see if the GEP is a
// constant offset from the base, as in "P" and "gep P, 1".
- if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
+ if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD);
return !VariableIdxFound;
}
- if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
+ if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
return !VariableIdxFound;
}
@@ -195,9 +203,9 @@ class MemsetRanges {
/// because each element is relatively large and expensive to copy.
std::list<MemsetRange> Ranges;
typedef std::list<MemsetRange>::iterator range_iterator;
- const DataLayout &TD;
+ const DataLayout &DL;
public:
- MemsetRanges(const DataLayout &td) : TD(td) {}
+ MemsetRanges(const DataLayout &DL) : DL(DL) {}
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
@@ -212,7 +220,7 @@ public:
}
void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
- int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
+ int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
addRange(OffsetFromFirst, StoreSize,
SI->getPointerOperand(), SI->getAlignment(), SI);
@@ -305,23 +313,23 @@ namespace {
class MemCpyOpt : public FunctionPass {
MemoryDependenceAnalysis *MD;
TargetLibraryInfo *TLI;
- const DataLayout *TD;
+ const DataLayout *DL;
public:
static char ID; // Pass identification, replacement for typeid
MemCpyOpt() : FunctionPass(ID) {
initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
- MD = 0;
- TLI = 0;
- TD = 0;
+ MD = nullptr;
+ TLI = nullptr;
+ DL = nullptr;
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
private:
// This transformation requires dominator postdominator info
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetLibraryInfo>();
@@ -353,7 +361,7 @@ FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
@@ -366,13 +374,13 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
- if (TD == 0) return 0;
+ if (!DL) return nullptr;
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
- MemsetRanges Ranges(*TD);
+ MemsetRanges Ranges(*DL);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
@@ -396,7 +404,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
- Offset, *TD))
+ Offset, *DL))
break;
Ranges.addStore(Offset, NextStore);
@@ -409,7 +417,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
- if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
+ if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *DL))
break;
Ranges.addMemSet(Offset, MSI);
@@ -419,7 +427,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
- return 0;
+ return nullptr;
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
@@ -433,7 +441,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
- Instruction *AMemSet = 0;
+ Instruction *AMemSet = nullptr;
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
@@ -441,7 +449,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
- if (!Range.isProfitableToUseMemset(*TD))
+ if (!Range.isProfitableToUseMemset(*DL))
continue;
// Otherwise, we do want to transform this! Create a new memset.
@@ -453,7 +461,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (Alignment == 0) {
Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
- Alignment = TD->getABITypeAlignment(EltType);
+ Alignment = DL->getABITypeAlignment(EltType);
}
AMemSet =
@@ -484,7 +492,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
- if (TD == 0) return false;
+ if (!DL) return false;
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
@@ -493,7 +501,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
MemDepResult ldep = MD->getDependency(LI);
- CallInst *C = 0;
+ CallInst *C = nullptr;
if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
C = dyn_cast<CallInst>(ldep.getInst());
@@ -505,7 +513,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
E = C; I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
- C = 0;
+ C = nullptr;
break;
}
}
@@ -514,15 +522,15 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (C) {
unsigned storeAlign = SI->getAlignment();
if (!storeAlign)
- storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType());
+ storeAlign = DL->getABITypeAlignment(SI->getOperand(0)->getType());
unsigned loadAlign = LI->getAlignment();
if (!loadAlign)
- loadAlign = TD->getABITypeAlignment(LI->getType());
+ loadAlign = DL->getABITypeAlignment(LI->getType());
bool changed = performCallSlotOptzn(LI,
SI->getPointerOperand()->stripPointerCasts(),
LI->getPointerOperand()->stripPointerCasts(),
- TD->getTypeStoreSize(SI->getOperand(0)->getType()),
+ DL->getTypeStoreSize(SI->getOperand(0)->getType()),
std::min(storeAlign, loadAlign), C);
if (changed) {
MD->removeInstruction(SI);
@@ -596,13 +604,13 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
// Check that all of src is copied to dest.
- if (TD == 0) return false;
+ if (!DL) return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
- uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
+ uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLen < srcSize)
@@ -617,7 +625,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
if (!destArraySize)
return false;
- uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
+ uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
@@ -636,7 +644,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
}
- uint64_t destSize = TD->getTypeAllocSize(StructTy);
+ uint64_t destSize = DL->getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
} else {
@@ -646,7 +654,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
- srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
+ srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
@@ -657,30 +665,34 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
// the memcpy, and that writing beyond the end of it is undefined.
- SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
- srcAlloca->use_end());
+ SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
+ srcAlloca->user_end());
while (!srcUseList.empty()) {
- User *UI = srcUseList.pop_back_val();
+ User *U = srcUseList.pop_back_val();
- if (isa<BitCastInst>(UI)) {
- for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
- I != E; ++I)
- srcUseList.push_back(*I);
- } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
+ if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
+ for (User *UU : U->users())
+ srcUseList.push_back(UU);
+ } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
if (G->hasAllZeroIndices())
- for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
- I != E; ++I)
- srcUseList.push_back(*I);
+ for (User *UU : U->users())
+ srcUseList.push_back(UU);
else
return false;
- } else if (UI != C && UI != cpy) {
+ } else if (U != C && U != cpy) {
return false;
}
}
+ // Check that src isn't captured by the called function since the
+ // transformation can cause aliasing issues in that case.
+ for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
+ if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
+ return false;
+
// Since we're changing the parameter to the callsite, we need to make sure
// that what would be the new parameter dominates the callsite.
- DominatorTree &DT = getAnalysis<DominatorTree>();
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
if (!DT.dominates(cpyDestInst, C))
return false;
@@ -816,9 +828,8 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
/// circumstances). This allows later passes to remove the first memcpy
/// altogether.
bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
- // We can only optimize statically-sized memcpy's that are non-volatile.
- ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
- if (CopySize == 0 || M->isVolatile()) return false;
+ // We can only optimize non-volatile memcpy's.
+ if (M->isVolatile()) return false;
// If the source and destination of the memcpy are the same, then zap it.
if (M->getSource() == M->getDest()) {
@@ -832,7 +843,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
if (GV->isConstant() && GV->hasDefinitiveInitializer())
if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
IRBuilder<> Builder(M);
- Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize,
+ Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
M->getAlignment(), false);
MD->removeInstruction(M);
M->eraseFromParent();
@@ -840,9 +851,16 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
return true;
}
- // The are two possible optimizations we can do for memcpy:
+ // The optimizations after this point require the memcpy size.
+ ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
+ if (!CopySize) return false;
+
+ // The are three possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE.
// b) call-memcpy xform for return slot optimization.
+ // c) memcpy from freshly alloca'd space or space that has just started its
+ // lifetime copies undefined data, and we can therefore eliminate the
+ // memcpy in favor of the data that was already at the destination.
MemDepResult DepInfo = MD->getDependency(M);
if (DepInfo.isClobber()) {
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
@@ -862,6 +880,25 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
if (SrcDepInfo.isClobber()) {
if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
+ } else if (SrcDepInfo.isDef()) {
+ Instruction *I = SrcDepInfo.getInst();
+ bool hasUndefContents = false;
+
+ if (isa<AllocaInst>(I)) {
+ hasUndefContents = true;
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start)
+ if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
+ if (LTSize->getZExtValue() >= CopySize->getZExtValue())
+ hasUndefContents = true;
+ }
+
+ if (hasUndefContents) {
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ ++NumMemCpyInstr;
+ return true;
+ }
}
return false;
@@ -899,12 +936,12 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
/// processByValArgument - This is called on every byval argument in call sites.
bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
- if (TD == 0) return false;
+ if (!DL) return false;
// Find out what feeds this byval argument.
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
- uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
+ uint64_t ByValSize = DL->getTypeAllocSize(ByValTy);
MemDepResult DepInfo =
MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
true, CS.getInstruction(),
@@ -916,13 +953,13 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
// a memcpy, see if we can byval from the source of the memcpy instead of the
// result.
MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
- if (MDep == 0 || MDep->isVolatile() ||
+ if (!MDep || MDep->isVolatile() ||
ByValArg->stripPointerCasts() != MDep->getDest())
return false;
// The length of the memcpy must be larger or equal to the size of the byval.
ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
- if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
+ if (!C1 || C1->getValue().getZExtValue() < ByValSize)
return false;
// Get the alignment of the byval. If the call doesn't specify the alignment,
@@ -933,7 +970,7 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
// If it is greater than the memcpy, then we check to see if we can force the
// source of the memcpy to the alignment we need. If we fail, we bail out.
if (MDep->getAlignment() < ByValAlign &&
- getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign)
+ getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, DL) < ByValAlign)
return false;
// Verify that the copied-from memory doesn't change in between the memcpy and
@@ -1007,9 +1044,13 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
// function.
//
bool MemCpyOpt::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// If we don't have at least memset and memcpy, there is little point of doing
@@ -1024,6 +1065,6 @@ bool MemCpyOpt::runOnFunction(Function &F) {
MadeChange = true;
}
- MD = 0;
+ MD = nullptr;
return MadeChange;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
new file mode 100644
index 0000000..a7e8024
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -0,0 +1,632 @@
+//===- MergedLoadStoreMotion.cpp - merge and hoist/sink load/stores -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//! \file
+//! \brief This pass performs merges of loads and stores on both sides of a
+// diamond (hammock). It hoists the loads and sinks the stores.
+//
+// The algorithm iteratively hoists two loads to the same address out of a
+// diamond (hammock) and merges them into a single load in the header. Similar
+// it sinks and merges two stores to the tail block (footer). The algorithm
+// iterates over the instructions of one side of the diamond and attempts to
+// find a matching load/store on the other side. It hoists / sinks when it
+// thinks it safe to do so. This optimization helps with eg. hiding load
+// latencies, triggering if-conversion, and reducing static code size.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+// Example:
+// Diamond shaped code before merge:
+//
+// header:
+// br %cond, label %if.then, label %if.else
+// / \
+// / \
+// / \
+// if.then: if.else:
+// %lt = load %addr_l %le = load %addr_l
+// <use %lt> <use %le>
+// <...> <...>
+// store %st, %addr_s store %se, %addr_s
+// br label %if.end br label %if.end
+// \ /
+// \ /
+// \ /
+// if.end ("footer"):
+// <...>
+//
+// Diamond shaped code after merge:
+//
+// header:
+// %l = load %addr_l
+// br %cond, label %if.then, label %if.else
+// / \
+// / \
+// / \
+// if.then: if.else:
+// <use %l> <use %l>
+// <...> <...>
+// br label %if.end br label %if.end
+// \ /
+// \ /
+// \ /
+// if.end ("footer"):
+// %s.sink = phi [%st, if.then], [%se, if.else]
+// <...>
+// store %s.sink, %addr_s
+// <...>
+//
+//
+//===----------------------- TODO -----------------------------------------===//
+//
+// 1) Generalize to regions other than diamonds
+// 2) Be more aggressive merging memory operations
+// Note that both changes require register pressure control
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include <vector>
+using namespace llvm;
+
+#define DEBUG_TYPE "mldst-motion"
+
+//===----------------------------------------------------------------------===//
+// MergedLoadStoreMotion Pass
+//===----------------------------------------------------------------------===//
+static cl::opt<bool>
+EnableMLSM("mlsm", cl::desc("Enable motion of merged load and store"),
+ cl::init(true));
+
+namespace {
+class MergedLoadStoreMotion : public FunctionPass {
+ AliasAnalysis *AA;
+ MemoryDependenceAnalysis *MD;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit MergedLoadStoreMotion(void)
+ : FunctionPass(ID), MD(nullptr), MagicCompileTimeControl(250) {
+ initializeMergedLoadStoreMotionPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+
+private:
+ // This transformation requires dominator postdominator info
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<MemoryDependenceAnalysis>();
+ AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
+ }
+
+ // Helper routines
+
+ ///
+ /// \brief Remove instruction from parent and update memory dependence
+ /// analysis.
+ ///
+ void removeInstruction(Instruction *Inst);
+ BasicBlock *getDiamondTail(BasicBlock *BB);
+ bool isDiamondHead(BasicBlock *BB);
+ // Routines for hoisting loads
+ bool isLoadHoistBarrier(Instruction *Inst);
+ LoadInst *canHoistFromBlock(BasicBlock *BB, LoadInst *LI);
+ void hoistInstruction(BasicBlock *BB, Instruction *HoistCand,
+ Instruction *ElseInst);
+ bool isSafeToHoist(Instruction *I) const;
+ bool hoistLoad(BasicBlock *BB, LoadInst *HoistCand, LoadInst *ElseInst);
+ bool mergeLoads(BasicBlock *BB);
+ // Routines for sinking stores
+ StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI);
+ PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1);
+ bool isStoreSinkBarrier(Instruction *Inst);
+ bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst);
+ bool mergeStores(BasicBlock *BB);
+ // The mergeLoad/Store algorithms could have Size0 * Size1 complexity,
+ // where Size0 and Size1 are the #instructions on the two sides of
+ // the diamond. The constant chosen here is arbitrary. Compiler Time
+ // Control is enforced by the check Size0 * Size1 < MagicCompileTimeControl.
+ const int MagicCompileTimeControl;
+};
+
+char MergedLoadStoreMotion::ID = 0;
+}
+
+///
+/// \brief createMergedLoadStoreMotionPass - The public interface to this file.
+///
+FunctionPass *llvm::createMergedLoadStoreMotionPass() {
+ return new MergedLoadStoreMotion();
+}
+
+INITIALIZE_PASS_BEGIN(MergedLoadStoreMotion, "mldst-motion",
+ "MergedLoadStoreMotion", false, false)
+INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MergedLoadStoreMotion, "mldst-motion",
+ "MergedLoadStoreMotion", false, false)
+
+///
+/// \brief Remove instruction from parent and update memory dependence analysis.
+///
+void MergedLoadStoreMotion::removeInstruction(Instruction *Inst) {
+ // Notify the memory dependence analysis.
+ if (MD) {
+ MD->removeInstruction(Inst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ MD->invalidateCachedPointerInfo(LI->getPointerOperand());
+ if (Inst->getType()->getScalarType()->isPointerTy()) {
+ MD->invalidateCachedPointerInfo(Inst);
+ }
+ }
+ Inst->eraseFromParent();
+}
+
+///
+/// \brief Return tail block of a diamond.
+///
+BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) {
+ assert(isDiamondHead(BB) && "Basic block is not head of a diamond");
+ BranchInst *BI = (BranchInst *)(BB->getTerminator());
+ BasicBlock *Succ0 = BI->getSuccessor(0);
+ BasicBlock *Tail = Succ0->getTerminator()->getSuccessor(0);
+ return Tail;
+}
+
+///
+/// \brief True when BB is the head of a diamond (hammock)
+///
+bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
+ if (!BB)
+ return false;
+ if (!isa<BranchInst>(BB->getTerminator()))
+ return false;
+ if (BB->getTerminator()->getNumSuccessors() != 2)
+ return false;
+
+ BranchInst *BI = (BranchInst *)(BB->getTerminator());
+ BasicBlock *Succ0 = BI->getSuccessor(0);
+ BasicBlock *Succ1 = BI->getSuccessor(1);
+
+ if (!Succ0->getSinglePredecessor() ||
+ Succ0->getTerminator()->getNumSuccessors() != 1)
+ return false;
+ if (!Succ1->getSinglePredecessor() ||
+ Succ1->getTerminator()->getNumSuccessors() != 1)
+ return false;
+
+ BasicBlock *Tail = Succ0->getTerminator()->getSuccessor(0);
+ // Ignore triangles.
+ if (Succ1->getTerminator()->getSuccessor(0) != Tail)
+ return false;
+ return true;
+}
+
+///
+/// \brief True when instruction is a hoist barrier for a load
+///
+/// Whenever an instruction could possibly modify the value
+/// being loaded or protect against the load from happening
+/// it is considered a hoist barrier.
+///
+bool MergedLoadStoreMotion::isLoadHoistBarrier(Instruction *Inst) {
+ // FIXME: A call with no side effects should not be a barrier.
+ // Aren't all such calls covered by mayHaveSideEffects() below?
+ // Then this check can be removed.
+ if (isa<CallInst>(Inst))
+ return true;
+ if (isa<TerminatorInst>(Inst))
+ return true;
+ // FIXME: Conservatively let a store instruction block the load.
+ // Use alias analysis instead.
+ if (isa<StoreInst>(Inst))
+ return true;
+ // Note: mayHaveSideEffects covers all instructions that could
+ // trigger a change to state. Eg. in-flight stores have to be executed
+ // before ordered loads or fences, calls could invoke functions that store
+ // data to memory etc.
+ if (Inst->mayHaveSideEffects()) {
+ return true;
+ }
+ DEBUG(dbgs() << "No Hoist Barrier\n");
+ return false;
+}
+
+///
+/// \brief Decide if a load can be hoisted
+///
+/// When there is a load in \p BB to the same address as \p LI
+/// and it can be hoisted from \p BB, return that load.
+/// Otherwise return Null.
+///
+LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB,
+ LoadInst *LI) {
+ LoadInst *I = nullptr;
+ assert(isa<LoadInst>(LI));
+ if (LI->isUsedOutsideOfBlock(LI->getParent()))
+ return nullptr;
+
+ for (BasicBlock::iterator BBI = BB->begin(), BBE = BB->end(); BBI != BBE;
+ ++BBI) {
+ Instruction *Inst = BBI;
+
+ // Only merge and hoist loads when their result in used only in BB
+ if (isLoadHoistBarrier(Inst))
+ break;
+ if (!isa<LoadInst>(Inst))
+ continue;
+ if (Inst->isUsedOutsideOfBlock(Inst->getParent()))
+ continue;
+
+ AliasAnalysis::Location LocLI = AA->getLocation(LI);
+ AliasAnalysis::Location LocInst = AA->getLocation((LoadInst *)Inst);
+ if (AA->isMustAlias(LocLI, LocInst) && LI->getType() == Inst->getType()) {
+ I = (LoadInst *)Inst;
+ break;
+ }
+ }
+ return I;
+}
+
+///
+/// \brief Merge two equivalent instructions \p HoistCand and \p ElseInst into
+/// \p BB
+///
+/// BB is the head of a diamond
+///
+void MergedLoadStoreMotion::hoistInstruction(BasicBlock *BB,
+ Instruction *HoistCand,
+ Instruction *ElseInst) {
+ DEBUG(dbgs() << " Hoist Instruction into BB \n"; BB->dump();
+ dbgs() << "Instruction Left\n"; HoistCand->dump(); dbgs() << "\n";
+ dbgs() << "Instruction Right\n"; ElseInst->dump(); dbgs() << "\n");
+ // Hoist the instruction.
+ assert(HoistCand->getParent() != BB);
+
+ // Intersect optional metadata.
+ HoistCand->intersectOptionalDataWith(ElseInst);
+ HoistCand->dropUnknownMetadata();
+
+ // Prepend point for instruction insert
+ Instruction *HoistPt = BB->getTerminator();
+
+ // Merged instruction
+ Instruction *HoistedInst = HoistCand->clone();
+
+ // Notify AA of the new value.
+ if (isa<LoadInst>(HoistCand))
+ AA->copyValue(HoistCand, HoistedInst);
+
+ // Hoist instruction.
+ HoistedInst->insertBefore(HoistPt);
+
+ HoistCand->replaceAllUsesWith(HoistedInst);
+ removeInstruction(HoistCand);
+ // Replace the else block instruction.
+ ElseInst->replaceAllUsesWith(HoistedInst);
+ removeInstruction(ElseInst);
+}
+
+///
+/// \brief Return true if no operand of \p I is defined in I's parent block
+///
+bool MergedLoadStoreMotion::isSafeToHoist(Instruction *I) const {
+ BasicBlock *Parent = I->getParent();
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
+ Instruction *Instr = dyn_cast<Instruction>(I->getOperand(i));
+ if (Instr && Instr->getParent() == Parent)
+ return false;
+ }
+ return true;
+}
+
+///
+/// \brief Merge two equivalent loads and GEPs and hoist into diamond head
+///
+bool MergedLoadStoreMotion::hoistLoad(BasicBlock *BB, LoadInst *L0,
+ LoadInst *L1) {
+ // Only one definition?
+ Instruction *A0 = dyn_cast<Instruction>(L0->getPointerOperand());
+ Instruction *A1 = dyn_cast<Instruction>(L1->getPointerOperand());
+ if (A0 && A1 && A0->isIdenticalTo(A1) && isSafeToHoist(A0) &&
+ A0->hasOneUse() && (A0->getParent() == L0->getParent()) &&
+ A1->hasOneUse() && (A1->getParent() == L1->getParent()) &&
+ isa<GetElementPtrInst>(A0)) {
+ DEBUG(dbgs() << "Hoist Instruction into BB \n"; BB->dump();
+ dbgs() << "Instruction Left\n"; L0->dump(); dbgs() << "\n";
+ dbgs() << "Instruction Right\n"; L1->dump(); dbgs() << "\n");
+ hoistInstruction(BB, A0, A1);
+ hoistInstruction(BB, L0, L1);
+ return true;
+ } else
+ return false;
+}
+
+///
+/// \brief Try to hoist two loads to same address into diamond header
+///
+/// Starting from a diamond head block, iterate over the instructions in one
+/// successor block and try to match a load in the second successor.
+///
+bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) {
+ bool MergedLoads = false;
+ assert(isDiamondHead(BB));
+ BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
+ BasicBlock *Succ0 = BI->getSuccessor(0);
+ BasicBlock *Succ1 = BI->getSuccessor(1);
+ // #Instructions in Succ1 for Compile Time Control
+ int Size1 = Succ1->size();
+ int NLoads = 0;
+ for (BasicBlock::iterator BBI = Succ0->begin(), BBE = Succ0->end();
+ BBI != BBE;) {
+
+ Instruction *I = BBI;
+ ++BBI;
+ if (isLoadHoistBarrier(I))
+ break;
+
+ // Only move non-simple (atomic, volatile) loads.
+ if (!isa<LoadInst>(I))
+ continue;
+
+ LoadInst *L0 = (LoadInst *)I;
+ if (!L0->isSimple())
+ continue;
+
+ ++NLoads;
+ if (NLoads * Size1 >= MagicCompileTimeControl)
+ break;
+ if (LoadInst *L1 = canHoistFromBlock(Succ1, L0)) {
+ bool Res = hoistLoad(BB, L0, L1);
+ MergedLoads |= Res;
+ // Don't attempt to hoist above loads that had not been hoisted.
+ if (!Res)
+ break;
+ }
+ }
+ return MergedLoads;
+}
+
+///
+/// \brief True when instruction is sink barrier for a store
+///
+bool MergedLoadStoreMotion::isStoreSinkBarrier(Instruction *Inst) {
+ // FIXME: Conservatively let a load instruction block the store.
+ // Use alias analysis instead.
+ if (isa<LoadInst>(Inst))
+ return true;
+ if (isa<CallInst>(Inst))
+ return true;
+ if (isa<TerminatorInst>(Inst) && !isa<BranchInst>(Inst))
+ return true;
+ // Note: mayHaveSideEffects covers all instructions that could
+ // trigger a change to state. Eg. in-flight stores have to be executed
+ // before ordered loads or fences, calls could invoke functions that store
+ // data to memory etc.
+ if (!isa<StoreInst>(Inst) && Inst->mayHaveSideEffects()) {
+ return true;
+ }
+ DEBUG(dbgs() << "No Sink Barrier\n");
+ return false;
+}
+
+///
+/// \brief Check if \p BB contains a store to the same address as \p SI
+///
+/// \return The store in \p when it is safe to sink. Otherwise return Null.
+///
+StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB,
+ StoreInst *SI) {
+ StoreInst *I = 0;
+ DEBUG(dbgs() << "can Sink? : "; SI->dump(); dbgs() << "\n");
+ for (BasicBlock::reverse_iterator RBI = BB->rbegin(), RBE = BB->rend();
+ RBI != RBE; ++RBI) {
+ Instruction *Inst = &*RBI;
+
+ // Only move loads if they are used in the block.
+ if (isStoreSinkBarrier(Inst))
+ break;
+ if (isa<StoreInst>(Inst)) {
+ AliasAnalysis::Location LocSI = AA->getLocation(SI);
+ AliasAnalysis::Location LocInst = AA->getLocation((StoreInst *)Inst);
+ if (AA->isMustAlias(LocSI, LocInst)) {
+ I = (StoreInst *)Inst;
+ break;
+ }
+ }
+ }
+ return I;
+}
+
+///
+/// \brief Create a PHI node in BB for the operands of S0 and S1
+///
+PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0,
+ StoreInst *S1) {
+ // Create a phi if the values mismatch.
+ PHINode *NewPN = 0;
+ Value *Opd1 = S0->getValueOperand();
+ Value *Opd2 = S1->getValueOperand();
+ if (Opd1 != Opd2) {
+ NewPN = PHINode::Create(Opd1->getType(), 2, Opd2->getName() + ".sink",
+ BB->begin());
+ NewPN->addIncoming(Opd1, S0->getParent());
+ NewPN->addIncoming(Opd2, S1->getParent());
+ if (NewPN->getType()->getScalarType()->isPointerTy()) {
+ // Notify AA of the new value.
+ AA->copyValue(Opd1, NewPN);
+ AA->copyValue(Opd2, NewPN);
+ // AA needs to be informed when a PHI-use of the pointer value is added
+ for (unsigned I = 0, E = NewPN->getNumIncomingValues(); I != E; ++I) {
+ unsigned J = PHINode::getOperandNumForIncomingValue(I);
+ AA->addEscapingUse(NewPN->getOperandUse(J));
+ }
+ if (MD)
+ MD->invalidateCachedPointerInfo(NewPN);
+ }
+ }
+ return NewPN;
+}
+
+///
+/// \brief Merge two stores to same address and sink into \p BB
+///
+/// Also sinks GEP instruction computing the store address
+///
+bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0,
+ StoreInst *S1) {
+ // Only one definition?
+ Instruction *A0 = dyn_cast<Instruction>(S0->getPointerOperand());
+ Instruction *A1 = dyn_cast<Instruction>(S1->getPointerOperand());
+ if (A0 && A1 && A0->isIdenticalTo(A1) && A0->hasOneUse() &&
+ (A0->getParent() == S0->getParent()) && A1->hasOneUse() &&
+ (A1->getParent() == S1->getParent()) && isa<GetElementPtrInst>(A0)) {
+ DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump();
+ dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n";
+ dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n");
+ // Hoist the instruction.
+ BasicBlock::iterator InsertPt = BB->getFirstInsertionPt();
+ // Intersect optional metadata.
+ S0->intersectOptionalDataWith(S1);
+ S0->dropUnknownMetadata();
+
+ // Create the new store to be inserted at the join point.
+ StoreInst *SNew = (StoreInst *)(S0->clone());
+ Instruction *ANew = A0->clone();
+ AA->copyValue(S0, SNew);
+ SNew->insertBefore(InsertPt);
+ ANew->insertBefore(SNew);
+
+ assert(S0->getParent() == A0->getParent());
+ assert(S1->getParent() == A1->getParent());
+
+ PHINode *NewPN = getPHIOperand(BB, S0, S1);
+ // New PHI operand? Use it.
+ if (NewPN)
+ SNew->setOperand(0, NewPN);
+ removeInstruction(S0);
+ removeInstruction(S1);
+ A0->replaceAllUsesWith(ANew);
+ removeInstruction(A0);
+ A1->replaceAllUsesWith(ANew);
+ removeInstruction(A1);
+ return true;
+ }
+ return false;
+}
+
+///
+/// \brief True when two stores are equivalent and can sink into the footer
+///
+/// Starting from a diamond tail block, iterate over the instructions in one
+/// predecessor block and try to match a store in the second predecessor.
+///
+bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) {
+
+ bool MergedStores = false;
+ assert(T && "Footer of a diamond cannot be empty");
+
+ pred_iterator PI = pred_begin(T), E = pred_end(T);
+ assert(PI != E);
+ BasicBlock *Pred0 = *PI;
+ ++PI;
+ BasicBlock *Pred1 = *PI;
+ ++PI;
+ // tail block of a diamond/hammock?
+ if (Pred0 == Pred1)
+ return false; // No.
+ if (PI != E)
+ return false; // No. More than 2 predecessors.
+
+ // #Instructions in Succ1 for Compile Time Control
+ int Size1 = Pred1->size();
+ int NStores = 0;
+
+ for (BasicBlock::reverse_iterator RBI = Pred0->rbegin(), RBE = Pred0->rend();
+ RBI != RBE;) {
+
+ Instruction *I = &*RBI;
+ ++RBI;
+ if (isStoreSinkBarrier(I))
+ break;
+ // Sink move non-simple (atomic, volatile) stores
+ if (!isa<StoreInst>(I))
+ continue;
+ StoreInst *S0 = (StoreInst *)I;
+ if (!S0->isSimple())
+ continue;
+
+ ++NStores;
+ if (NStores * Size1 >= MagicCompileTimeControl)
+ break;
+ if (StoreInst *S1 = canSinkFromBlock(Pred1, S0)) {
+ bool Res = sinkStore(T, S0, S1);
+ MergedStores |= Res;
+ // Don't attempt to sink below stores that had to stick around
+ // But after removal of a store and some of its feeding
+ // instruction search again from the beginning since the iterator
+ // is likely stale at this point.
+ if (!Res)
+ break;
+ else {
+ RBI = Pred0->rbegin();
+ RBE = Pred0->rend();
+ DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump());
+ }
+ }
+ }
+ return MergedStores;
+}
+///
+/// \brief Run the transformation for each function
+///
+bool MergedLoadStoreMotion::runOnFunction(Function &F) {
+ MD = &getAnalysis<MemoryDependenceAnalysis>();
+ AA = &getAnalysis<AliasAnalysis>();
+
+ bool Changed = false;
+ if (!EnableMLSM)
+ return false;
+ DEBUG(dbgs() << "Instruction Merger\n");
+
+ // Merge unconditional branches, allowing PRE to catch more
+ // optimization opportunities.
+ for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE;) {
+ BasicBlock *BB = FI++;
+
+ // Hoist equivalent loads and sink stores
+ // outside diamonds when possible
+ // Run outside core GVN
+ if (isDiamondHead(BB)) {
+ Changed |= mergeLoads(BB);
+ Changed |= mergeStores(getDiamondTail(BB));
+ }
+ }
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 15cee44..7cce89e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -13,7 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "partially-inline-libcalls"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Intrinsics.h"
@@ -25,6 +24,8 @@
using namespace llvm;
+#define DEBUG_TYPE "partially-inline-libcalls"
+
namespace {
class PartiallyInlineLibCalls : public FunctionPass {
public:
@@ -35,8 +36,8 @@ namespace {
initializePartiallyInlineLibCallsPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnFunction(Function &F);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
private:
/// Optimize calls to sqrt.
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 328a9c5..ea2cf7c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -20,29 +20,29 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "reassociate"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "reassociate"
+
STATISTIC(NumChanged, "Number of insts reassociated");
STATISTIC(NumAnnihil, "Number of expr tree annihilated");
STATISTIC(NumFactor , "Number of multiplies factored");
@@ -67,7 +67,7 @@ static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) {
<< *Ops[0].Op->getType() << '\t';
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
dbgs() << "[ ";
- WriteAsOperand(dbgs(), Ops[i].Op, false, M);
+ Ops[i].Op->printAsOperand(dbgs(), false, M);
dbgs() << ", #" << Ops[i].Rank << "] ";
}
}
@@ -123,14 +123,14 @@ namespace {
public:
XorOpnd(Value *V);
- bool isInvalid() const { return SymbolicPart == 0; }
+ bool isInvalid() const { return SymbolicPart == nullptr; }
bool isOrExpr() const { return isOr; }
Value *getValue() const { return OrigVal; }
Value *getSymbolicPart() const { return SymbolicPart; }
unsigned getSymbolicRank() const { return SymbolicRank; }
const APInt &getConstPart() const { return ConstPart; }
- void Invalidate() { SymbolicPart = OrigVal = 0; }
+ void Invalidate() { SymbolicPart = OrigVal = nullptr; }
void setSymbolicRank(unsigned R) { SymbolicRank = R; }
// Sort the XorOpnd-Pointer in ascending order of symbolic-value-rank.
@@ -168,9 +168,9 @@ namespace {
initializeReassociatePass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
}
private:
@@ -237,7 +237,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
if (V->hasOneUse() && isa<Instruction>(V) &&
cast<Instruction>(V)->getOpcode() == Opcode)
return cast<BinaryOperator>(V);
- return 0;
+ return nullptr;
}
static bool isUnmovableInstruction(Instruction *I) {
@@ -285,7 +285,7 @@ void Reassociate::BuildRankMap(Function &F) {
unsigned Reassociate::getRank(Value *V) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) {
+ if (!I) {
if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument.
return 0; // Otherwise it's a global or constant, rank 0.
}
@@ -706,7 +706,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
// ExpressionChanged - Non-null if the rewritten expression differs from the
// original in some non-trivial way, requiring the clearing of optional flags.
// Flags are cleared from the operator in ExpressionChanged up to I inclusive.
- BinaryOperator *ExpressionChanged = 0;
+ BinaryOperator *ExpressionChanged = nullptr;
for (unsigned i = 0; ; ++i) {
// The last operation (which comes earliest in the IR) is special as both
// operands will come from Ops, rather than just one with the other being
@@ -821,7 +821,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
if (ExpressionChanged == I)
break;
ExpressionChanged->moveBefore(I);
- ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->use_begin());
+ ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->user_begin());
} while (1);
// Throw away any left over nodes from the original expression.
@@ -863,8 +863,7 @@ static Value *NegateValue(Value *V, Instruction *BI) {
// Okay, we need to materialize a negated version of V with an instruction.
// Scan the use lists of V to see if we have one already.
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- User *U = *UI;
+ for (User *U : V->users()) {
if (!BinaryOperator::isNeg(U)) continue;
// We found one! Now we have to make sure that the definition dominates
@@ -914,8 +913,8 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) {
isReassociableOp(Sub->getOperand(1), Instruction::Sub))
return true;
if (Sub->hasOneUse() &&
- (isReassociableOp(Sub->use_back(), Instruction::Add) ||
- isReassociableOp(Sub->use_back(), Instruction::Sub)))
+ (isReassociableOp(Sub->user_back(), Instruction::Add) ||
+ isReassociableOp(Sub->user_back(), Instruction::Sub)))
return true;
return false;
@@ -997,7 +996,7 @@ static Value *EmitAddTreeOfValues(Instruction *I,
/// remove Factor from the tree and return the new tree.
Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
BinaryOperator *BO = isReassociableOp(V, Instruction::Mul);
- if (!BO) return 0;
+ if (!BO) return nullptr;
SmallVector<RepeatedValue, 8> Tree;
MadeChange |= LinearizeExprTree(BO, Tree);
@@ -1031,7 +1030,7 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
if (!FoundFactor) {
// Make sure to restore the operands to the expression tree.
RewriteExprTree(BO, Factors);
- return 0;
+ return nullptr;
}
BasicBlock::iterator InsertPt = BO; ++InsertPt;
@@ -1116,7 +1115,7 @@ static Value *OptimizeAndOrXor(unsigned Opcode,
++NumAnnihil;
}
}
- return 0;
+ return nullptr;
}
/// Helper funciton of CombineXorOpnd(). It creates a bitwise-and
@@ -1137,7 +1136,7 @@ static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd,
}
return Opnd;
}
- return 0;
+ return nullptr;
}
// Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd"
@@ -1263,7 +1262,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
return V;
if (Ops.size() == 1)
- return 0;
+ return nullptr;
SmallVector<XorOpnd, 8> Opnds;
SmallVector<XorOpnd*, 8> OpndPtrs;
@@ -1293,10 +1292,10 @@ Value *Reassociate::OptimizeXor(Instruction *I,
// the same symbolic value cluster together. For instance, the input operand
// sequence ("x | 123", "y & 456", "x & 789") will be sorted into:
// ("x | 123", "x & 789", "y & 456").
- std::sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor());
+ std::stable_sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor());
// Step 3: Combine adjacent operands
- XorOpnd *PrevOpnd = 0;
+ XorOpnd *PrevOpnd = nullptr;
bool Changed = false;
for (unsigned i = 0, e = Opnds.size(); i < e; i++) {
XorOpnd *CurrOpnd = OpndPtrs[i];
@@ -1330,7 +1329,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
PrevOpnd = CurrOpnd;
} else {
CurrOpnd->Invalidate();
- PrevOpnd = 0;
+ PrevOpnd = nullptr;
}
Changed = true;
}
@@ -1360,7 +1359,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
}
}
- return 0;
+ return nullptr;
}
/// OptimizeAdd - Optimize a series of operands to an 'add' instruction. This
@@ -1369,11 +1368,10 @@ Value *Reassociate::OptimizeXor(Instruction *I,
Value *Reassociate::OptimizeAdd(Instruction *I,
SmallVectorImpl<ValueEntry> &Ops) {
// Scan the operand lists looking for X and -X pairs. If we find any, we
- // can simplify the expression. X+-X == 0. While we're at it, scan for any
+ // can simplify expressions like X+-X == 0 and X+~X ==-1. While we're at it,
+ // scan for any
// duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z.
- //
- // TODO: We could handle "X + ~X" -> "-1" if we wanted, since "-X = ~X+1".
- //
+
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
Value *TheOp = Ops[i].Op;
// Check to see if we've seen this operand before. If so, we factor all
@@ -1413,19 +1411,28 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
continue;
}
- // Check for X and -X in the operand list.
- if (!BinaryOperator::isNeg(TheOp))
+ // Check for X and -X or X and ~X in the operand list.
+ if (!BinaryOperator::isNeg(TheOp) && !BinaryOperator::isNot(TheOp))
continue;
- Value *X = BinaryOperator::getNegArgument(TheOp);
+ Value *X = nullptr;
+ if (BinaryOperator::isNeg(TheOp))
+ X = BinaryOperator::getNegArgument(TheOp);
+ else if (BinaryOperator::isNot(TheOp))
+ X = BinaryOperator::getNotArgument(TheOp);
+
unsigned FoundX = FindInOperandList(Ops, i, X);
if (FoundX == i)
continue;
// Remove X and -X from the operand list.
- if (Ops.size() == 2)
+ if (Ops.size() == 2 && BinaryOperator::isNeg(TheOp))
return Constant::getNullValue(X->getType());
+ // Remove X and ~X from the operand list.
+ if (Ops.size() == 2 && BinaryOperator::isNot(TheOp))
+ return Constant::getAllOnesValue(X->getType());
+
Ops.erase(Ops.begin()+i);
if (i < FoundX)
--FoundX;
@@ -1435,6 +1442,13 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
++NumAnnihil;
--i; // Revisit element.
e -= 2; // Removed two elements.
+
+ // if X and ~X we append -1 to the operand list.
+ if (BinaryOperator::isNot(TheOp)) {
+ Value *V = Constant::getAllOnesValue(X->getType());
+ Ops.insert(Ops.end(), ValueEntry(getRank(V), V));
+ e += 1;
+ }
}
// Scan the operand list, checking to see if there are any common factors
@@ -1447,7 +1461,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4)
// where they are actually the same multiply.
unsigned MaxOcc = 0;
- Value *MaxOccVal = 0;
+ Value *MaxOccVal = nullptr;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul);
if (!BOp)
@@ -1545,20 +1559,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2));
}
- return 0;
-}
-
-namespace {
- /// \brief Predicate tests whether a ValueEntry's op is in a map.
- struct IsValueInMap {
- const DenseMap<Value *, unsigned> &Map;
-
- IsValueInMap(const DenseMap<Value *, unsigned> &Map) : Map(Map) {}
-
- bool operator()(const ValueEntry &Entry) {
- return Map.find(Entry.Op) != Map.end();
- }
- };
+ return nullptr;
}
/// \brief Build up a vector of value/power pairs factoring a product.
@@ -1619,7 +1620,7 @@ bool Reassociate::collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
// below our mininum of '4'.
assert(FactorPowerSum >= 4);
- std::sort(Factors.begin(), Factors.end(), Factor::PowerDescendingSorter());
+ std::stable_sort(Factors.begin(), Factors.end(), Factor::PowerDescendingSorter());
return true;
}
@@ -1703,14 +1704,14 @@ Value *Reassociate::OptimizeMul(BinaryOperator *I,
// We can only optimize the multiplies when there is a chain of more than
// three, such that a balanced tree might require fewer total multiplies.
if (Ops.size() < 4)
- return 0;
+ return nullptr;
// Try to turn linear trees of multiplies without other uses of the
// intermediate stages into minimal multiply DAGs with perfect sub-expression
// re-use.
SmallVector<Factor, 4> Factors;
if (!collectMultiplyFactors(Ops, Factors))
- return 0; // All distinct factors, so nothing left for us to do.
+ return nullptr; // All distinct factors, so nothing left for us to do.
IRBuilder<> Builder(I);
Value *V = buildMinimalMultiplyDAG(Builder, Factors);
@@ -1719,14 +1720,14 @@ Value *Reassociate::OptimizeMul(BinaryOperator *I,
ValueEntry NewEntry = ValueEntry(getRank(V), V);
Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry);
- return 0;
+ return nullptr;
}
Value *Reassociate::OptimizeExpression(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops) {
// Now that we have the linearized expression tree, try to optimize it.
// Start by folding any constants that we found.
- Constant *Cst = 0;
+ Constant *Cst = nullptr;
unsigned Opcode = I->getOpcode();
while (!Ops.empty() && isa<Constant>(Ops.back().Op)) {
Constant *C = cast<Constant>(Ops.pop_back_val().Op);
@@ -1776,7 +1777,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
if (Ops.size() != NumOps)
return OptimizeExpression(I, Ops);
- return 0;
+ return nullptr;
}
/// EraseInst - Zap the given instruction, adding interesting operands to the
@@ -1795,9 +1796,9 @@ void Reassociate::EraseInst(Instruction *I) {
// If this is a node in an expression tree, climb to the expression root
// and add that since that's where optimization actually happens.
unsigned Opcode = Op->getOpcode();
- while (Op->hasOneUse() && Op->use_back()->getOpcode() == Opcode &&
+ while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode &&
Visited.insert(Op))
- Op = Op->use_back();
+ Op = Op->user_back();
RedoInsts.insert(Op);
}
}
@@ -1815,8 +1816,8 @@ void Reassociate::OptimizeInst(Instruction *I) {
// is used by a reassociable multiply or add, turn into a multiply.
if (isReassociableOp(I->getOperand(0), Instruction::Mul) ||
(I->hasOneUse() &&
- (isReassociableOp(I->use_back(), Instruction::Mul) ||
- isReassociableOp(I->use_back(), Instruction::Add)))) {
+ (isReassociableOp(I->user_back(), Instruction::Mul) ||
+ isReassociableOp(I->user_back(), Instruction::Add)))) {
Instruction *NI = ConvertShiftToMul(I);
RedoInsts.insert(I);
MadeChange = true;
@@ -1869,7 +1870,7 @@ void Reassociate::OptimizeInst(Instruction *I) {
// and if this is not an inner node of a multiply tree.
if (isReassociableOp(I->getOperand(1), Instruction::Mul) &&
(!I->hasOneUse() ||
- !isReassociableOp(I->use_back(), Instruction::Mul))) {
+ !isReassociableOp(I->user_back(), Instruction::Mul))) {
Instruction *NI = LowerNegateToMultiply(I);
RedoInsts.insert(I);
MadeChange = true;
@@ -1885,13 +1886,13 @@ void Reassociate::OptimizeInst(Instruction *I) {
// If this is an interior node of a reassociable tree, ignore it until we
// get to the root of the tree, to avoid N^2 analysis.
unsigned Opcode = BO->getOpcode();
- if (BO->hasOneUse() && BO->use_back()->getOpcode() == Opcode)
+ if (BO->hasOneUse() && BO->user_back()->getOpcode() == Opcode)
return;
// If this is an add tree that is used by a sub instruction, ignore it
// until we process the subtract.
if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add &&
- cast<Instruction>(BO->use_back())->getOpcode() == Instruction::Sub)
+ cast<Instruction>(BO->user_back())->getOpcode() == Instruction::Sub)
return;
ReassociateExpression(BO);
@@ -1943,7 +1944,7 @@ void Reassociate::ReassociateExpression(BinaryOperator *I) {
// In this case we reassociate to put the negation on the outside so that we
// can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
- cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add &&
+ cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Ops.back().Op) &&
cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
ValueEntry Tmp = Ops.pop_back_val();
@@ -1972,6 +1973,9 @@ void Reassociate::ReassociateExpression(BinaryOperator *I) {
}
bool Reassociate::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
// Calculate the rank map for F
BuildRankMap(F);
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
index 07f540a..b6023e2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -16,20 +16,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "reg2mem"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Local.h"
#include <list>
using namespace llvm;
+#define DEBUG_TYPE "reg2mem"
+
STATISTIC(NumRegsDemoted, "Number of registers demoted");
STATISTIC(NumPhisDemoted, "Number of phi-nodes demoted");
@@ -40,23 +41,22 @@ namespace {
initializeRegToMemPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(BreakCriticalEdgesID);
AU.addPreservedID(BreakCriticalEdgesID);
}
- bool valueEscapes(const Instruction *Inst) const {
- const BasicBlock *BB = Inst->getParent();
- for (Value::const_use_iterator UI = Inst->use_begin(),E = Inst->use_end();
- UI != E; ++UI) {
- const Instruction *I = cast<Instruction>(*UI);
- if (I->getParent() != BB || isa<PHINode>(I))
+ bool valueEscapes(const Instruction *Inst) const {
+ const BasicBlock *BB = Inst->getParent();
+ for (const User *U : Inst->users()) {
+ const Instruction *UI = cast<Instruction>(U);
+ if (UI->getParent() != BB || isa<PHINode>(UI))
return true;
}
return false;
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
};
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
index 4364720..90c3520 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -17,7 +17,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sccp"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -26,13 +25,13 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -42,6 +41,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "sccp"
+
STATISTIC(NumInstRemoved, "Number of instructions removed");
STATISTIC(NumDeadBlocks , "Number of basic blocks unreachable");
@@ -81,7 +82,7 @@ class LatticeVal {
}
public:
- LatticeVal() : Val(0, undefined) {}
+ LatticeVal() : Val(nullptr, undefined) {}
bool isUndefined() const { return getLatticeValue() == undefined; }
bool isConstant() const {
@@ -133,7 +134,7 @@ public:
ConstantInt *getConstantInt() const {
if (isConstant())
return dyn_cast<ConstantInt>(getConstant());
- return 0;
+ return nullptr;
}
void markForcedConstant(Constant *V) {
@@ -153,7 +154,7 @@ namespace {
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -205,8 +206,8 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)
- : TD(td), TLI(tli) {}
+ SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)
+ : DL(DL), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
@@ -403,7 +404,7 @@ private:
if (Constant *C = dyn_cast<Constant>(V)) {
Constant *Elt = C->getAggregateElement(i);
- if (Elt == 0)
+ if (!Elt)
LV.markOverdefined(); // Unknown sort of constant.
else if (isa<UndefValue>(Elt))
; // Undef values remain undefined.
@@ -491,10 +492,11 @@ private:
}
void visitCallSite (CallSite CS);
void visitResumeInst (TerminatorInst &I) { /*returns void*/ }
- void visitUnwindInst (TerminatorInst &I) { /*returns void*/ }
void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ }
void visitFenceInst (FenceInst &I) { /*returns void*/ }
- void visitAtomicCmpXchgInst (AtomicCmpXchgInst &I) { markOverdefined(&I); }
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
+ markAnythingOverdefined(&I);
+ }
void visitAtomicRMWInst (AtomicRMWInst &I) { markOverdefined(&I); }
void visitAllocaInst (Instruction &I) { markOverdefined(&I); }
void visitVAArgInst (Instruction &I) { markAnythingOverdefined(&I); }
@@ -523,7 +525,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
LatticeVal BCValue = getValueState(BI->getCondition());
ConstantInt *CI = BCValue.getConstantInt();
- if (CI == 0) {
+ if (!CI) {
// Overdefined condition variables, and branches on unfoldable constant
// conditions, mean the branch could go either way.
if (!BCValue.isUndefined())
@@ -550,7 +552,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
LatticeVal SCValue = getValueState(SI->getCondition());
ConstantInt *CI = SCValue.getConstantInt();
- if (CI == 0) { // Overdefined or undefined condition?
+ if (!CI) { // Overdefined or undefined condition?
// All destinations are executable!
if (!SCValue.isUndefined())
Succs.assign(TI.getNumSuccessors(), true);
@@ -595,7 +597,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
// Overdefined condition variables mean the branch could go either way,
// undef conditions mean that neither edge is feasible yet.
ConstantInt *CI = BCValue.getConstantInt();
- if (CI == 0)
+ if (!CI)
return !BCValue.isUndefined();
// Constant condition variables mean the branch can only go a single way.
@@ -613,7 +615,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
LatticeVal SCValue = getValueState(SI->getCondition());
ConstantInt *CI = SCValue.getConstantInt();
- if (CI == 0)
+ if (!CI)
return !SCValue.isUndefined();
return SI->findCaseValue(CI).getCaseSuccessor() == To;
@@ -627,7 +629,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
#ifndef NDEBUG
dbgs() << "Unknown terminator instruction: " << *TI << '\n';
#endif
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
// visit Implementations - Something changed in this instruction, either an
@@ -668,7 +670,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
// constant. If they are constant and don't agree, the PHI is overdefined.
// If there are no executable operands, the PHI remains undefined.
//
- Constant *OperandVal = 0;
+ Constant *OperandVal = nullptr;
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
LatticeVal IV = getValueState(PN.getIncomingValue(i));
if (IV.isUndefined()) continue; // Doesn't influence PHI node.
@@ -679,7 +681,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
if (IV.isOverdefined()) // PHI node becomes overdefined!
return markOverdefined(&PN);
- if (OperandVal == 0) { // Grab the first value.
+ if (!OperandVal) { // Grab the first value.
OperandVal = IV.getConstant();
continue;
}
@@ -775,7 +777,7 @@ void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
StructType *STy = dyn_cast<StructType>(IVI.getType());
- if (STy == 0)
+ if (!STy)
return markOverdefined(&IVI);
// If this has more than one index, we can't handle it, drive all results to
@@ -863,7 +865,7 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
// If this is an AND or OR with 0 or -1, it doesn't matter that the other
// operand is overdefined.
if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Or) {
- LatticeVal *NonOverdefVal = 0;
+ LatticeVal *NonOverdefVal = nullptr;
if (!V1State.isOverdefined())
NonOverdefVal = &V1State;
else if (!V2State.isOverdefined())
@@ -1067,7 +1069,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
}
// Transform load from a constant into a constant if possible.
- if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, TD))
+ if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL))
return markConstant(IV, &I, C);
// Otherwise we cannot say for certain what value this load will produce.
@@ -1082,7 +1084,7 @@ void SCCPSolver::visitCallSite(CallSite CS) {
// The common case is that we aren't tracking the callee, either because we
// are not doing interprocedural analysis or the callee is indirect, or is
// external. Handle these cases first.
- if (F == 0 || F->isDeclaration()) {
+ if (!F || F->isDeclaration()) {
CallOverdefined:
// Void return and not tracking callee, just bail.
if (I->getType()->isVoidTy()) return;
@@ -1181,10 +1183,9 @@ void SCCPSolver::Solve() {
// since all of its users will have already been marked as overdefined
// Update all of the users of this instruction's value.
//
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI)
- if (Instruction *I = dyn_cast<Instruction>(*UI))
- OperandChangedState(I);
+ for (User *U : I->users())
+ if (Instruction *UI = dyn_cast<Instruction>(U))
+ OperandChangedState(UI);
}
// Process the instruction work list.
@@ -1201,10 +1202,9 @@ void SCCPSolver::Solve() {
// Update all of the users of this instruction's value.
//
if (I->getType()->isStructTy() || !getValueState(I).isOverdefined())
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI)
- if (Instruction *I = dyn_cast<Instruction>(*UI))
- OperandChangedState(I);
+ for (User *U : I->users())
+ if (Instruction *UI = dyn_cast<Instruction>(U))
+ OperandChangedState(UI);
}
// Process the basic block work list.
@@ -1499,7 +1499,7 @@ namespace {
/// Sparse Conditional Constant Propagator.
///
struct SCCP : public FunctionPass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfo>();
}
static char ID; // Pass identification, replacement for typeid
@@ -1510,7 +1510,7 @@ namespace {
// runOnFunction - Run the Sparse Conditional Constant Propagation
// algorithm, and return true if the function was modified.
//
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
};
} // end anonymous namespace
@@ -1553,10 +1553,14 @@ static void DeleteInstructionInBlock(BasicBlock *BB) {
// and return true if the function was modified.
//
bool SCCP::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ const DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
- SCCPSolver Solver(TD, TLI);
+ SCCPSolver Solver(DL, TLI);
// Mark the first block of the function as being executable.
Solver.MarkBlockExecutable(F.begin());
@@ -1628,14 +1632,14 @@ namespace {
/// Constant Propagation.
///
struct IPSCCP : public ModulePass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfo>();
}
static char ID;
IPSCCP() : ModulePass(ID) {
initializeIPSCCPPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
} // end anonymous namespace
@@ -1658,21 +1662,20 @@ static bool AddressIsTaken(const GlobalValue *GV) {
// Delete any dead constantexpr klingons.
GV->removeDeadConstantUsers();
- for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI) {
- const User *U = *UI;
- if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
+ for (const Use &U : GV->uses()) {
+ const User *UR = U.getUser();
+ if (const StoreInst *SI = dyn_cast<StoreInst>(UR)) {
if (SI->getOperand(0) == GV || SI->isVolatile())
return true; // Storing addr of GV.
- } else if (isa<InvokeInst>(U) || isa<CallInst>(U)) {
+ } else if (isa<InvokeInst>(UR) || isa<CallInst>(UR)) {
// Make sure we are calling the function, not passing the address.
- ImmutableCallSite CS(cast<Instruction>(U));
- if (!CS.isCallee(UI))
+ ImmutableCallSite CS(cast<Instruction>(UR));
+ if (!CS.isCallee(&U))
return true;
- } else if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ } else if (const LoadInst *LI = dyn_cast<LoadInst>(UR)) {
if (LI->isVolatile())
return true;
- } else if (isa<BlockAddress>(U)) {
+ } else if (isa<BlockAddress>(UR)) {
// blockaddress doesn't take the address of the function, it takes addr
// of label.
} else {
@@ -1683,9 +1686,10 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
- SCCPSolver Solver(TD, TLI);
+ SCCPSolver Solver(DL, TLI);
// AddressTakenFunctions - This set keeps track of the address-taken functions
// that are in the input. As IPSCCP runs through and simplifies code,
@@ -1834,8 +1838,9 @@ bool IPSCCP::runOnModule(Module &M) {
for (unsigned i = 0, e = BlocksToErase.size(); i != e; ++i) {
// If there are any PHI nodes in this successor, drop entries for BB now.
BasicBlock *DeadBB = BlocksToErase[i];
- for (Value::use_iterator UI = DeadBB->use_begin(), UE = DeadBB->use_end();
- UI != UE; ) {
+ for (Value::user_iterator UI = DeadBB->user_begin(),
+ UE = DeadBB->user_end();
+ UI != UE;) {
// Grab the user and then increment the iterator early, as the user
// will be deleted. Step past all adjacent uses from the same user.
Instruction *I = dyn_cast<Instruction>(*UI);
@@ -1925,7 +1930,7 @@ bool IPSCCP::runOnModule(Module &M) {
"Overdefined values should have been taken out of the map!");
DEBUG(dbgs() << "Found that GV '" << GV->getName() << "' is constant!\n");
while (!GV->use_empty()) {
- StoreInst *SI = cast<StoreInst>(GV->use_back());
+ StoreInst *SI = cast<StoreInst>(GV->user_back());
SI->eraseFromParent();
}
M.getGlobalList().erase(GV);
diff --git a/contrib/llvm/lib/Transforms/Scalar/SROA.cpp b/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
index 9f3fc83..8c7f253 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -23,40 +23,48 @@
///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sroa"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Operator.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
+
+#if __cplusplus >= 201103L && !defined(NDEBUG)
+// We only use this for a debug check in C++11
+#include <random>
+#endif
+
using namespace llvm;
+#define DEBUG_TYPE "sroa"
+
STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
@@ -73,6 +81,16 @@ STATISTIC(NumVectorized, "Number of vectorized aggregates");
static cl::opt<bool>
ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
+/// Hidden option to enable randomly shuffling the slices to help uncover
+/// instability in their order.
+static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
+ cl::init(false), cl::Hidden);
+
+/// Hidden option to experiment with completely strict handling of inbounds
+/// GEPs.
+static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds",
+ cl::init(false), cl::Hidden);
+
namespace {
/// \brief A custom IRBuilder inserter which prefixes all names if they are
/// preserved.
@@ -142,8 +160,8 @@ public:
Use *getUse() const { return UseAndIsSplittable.getPointer(); }
- bool isDead() const { return getUse() == 0; }
- void kill() { UseAndIsSplittable.setPointer(0); }
+ bool isDead() const { return getUse() == nullptr; }
+ void kill() { UseAndIsSplittable.setPointer(nullptr); }
/// \brief Support for ordering ranges.
///
@@ -244,8 +262,8 @@ public:
void printUse(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
void print(raw_ostream &OS) const;
- void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
- void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
+ void dump(const_iterator I) const;
+ void dump() const;
#endif
private:
@@ -303,7 +321,7 @@ static Value *foldSelectInst(SelectInst &SI) {
if (SI.getOperand(1) == SI.getOperand(2))
return SI.getOperand(1);
- return 0;
+ return nullptr;
}
/// \brief Builder for the alloca slices.
@@ -339,7 +357,7 @@ private:
bool IsSplittable = false) {
// Completely skip uses which have a zero size or start either before or
// past the end of the allocation.
- if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
+ if (Size == 0 || Offset.uge(AllocSize)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which has zero size or starts outside of the "
<< AllocSize << " byte alloca:\n"
@@ -380,6 +398,43 @@ private:
if (GEPI.use_empty())
return markAsDead(GEPI);
+ if (SROAStrictInbounds && GEPI.isInBounds()) {
+ // FIXME: This is a manually un-factored variant of the basic code inside
+ // of GEPs with checking of the inbounds invariant specified in the
+ // langref in a very strict sense. If we ever want to enable
+ // SROAStrictInbounds, this code should be factored cleanly into
+ // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
+ // by writing out the code here where we have tho underlying allocation
+ // size readily available.
+ APInt GEPOffset = Offset;
+ for (gep_type_iterator GTI = gep_type_begin(GEPI),
+ GTE = gep_type_end(GEPI);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ break;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ GEPOffset +=
+ APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
+ } else {
+ // For array or vector indices, scale the index by the size of the type.
+ APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
+ GEPOffset += Index * APInt(Offset.getBitWidth(),
+ DL.getTypeAllocSize(GTI.getIndexedType()));
+ }
+
+ // If this index has computed an intermediate pointer which is not
+ // inbounds, then the result of the GEP is a poison value and we can
+ // delete it and all uses.
+ if (GEPOffset.ugt(AllocSize))
+ return markAsDead(GEPI);
+ }
+ }
+
return Base::visitGetElementPtrInst(GEPI);
}
@@ -426,8 +481,7 @@ private:
// risk of overflow.
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
- if (Offset.isNegative() || Size > AllocSize ||
- Offset.ugt(AllocSize - Size)) {
+ if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
<< " which extends past the end of the " << AllocSize
<< " byte alloca:\n"
@@ -446,7 +500,7 @@ private:
assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ (IsOffsetKnown && Offset.uge(AllocSize)))
// Zero-length mem transfer intrinsics can be ignored entirely.
return markAsDead(II);
@@ -461,14 +515,30 @@ private:
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ if (Length && Length->getValue() == 0)
// Zero-length mem transfer intrinsics can be ignored entirely.
return markAsDead(II);
+ // Because we can visit these intrinsics twice, also check to see if the
+ // first time marked this instruction as dead. If so, skip it.
+ if (VisitedDeadInsts.count(&II))
+ return;
+
if (!IsOffsetKnown)
return PI.setAborted(&II);
+ // This side of the transfer is completely out-of-bounds, and so we can
+ // nuke the entire transfer. However, we also need to nuke the other side
+ // if already added to our partitions.
+ // FIXME: Yet another place we really should bypass this when
+ // instrumenting for ASan.
+ if (Offset.uge(AllocSize)) {
+ SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II);
+ if (MTPI != MemTransferSliceMap.end())
+ S.Slices[MTPI->second].kill();
+ return markAsDead(II);
+ }
+
uint64_t RawOffset = Offset.getLimitedValue();
uint64_t Size = Length ? Length->getLimitedValue()
: AllocSize - RawOffset;
@@ -487,7 +557,7 @@ private:
// they both point to the same alloca.
bool Inserted;
SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
- llvm::tie(MTPI, Inserted) =
+ std::tie(MTPI, Inserted) =
MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size()));
unsigned PrevIdx = MTPI->second;
if (!Inserted) {
@@ -546,7 +616,7 @@ private:
Size = 0;
do {
Instruction *I, *UsedI;
- llvm::tie(UsedI, I) = Uses.pop_back_val();
+ std::tie(UsedI, I) = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
@@ -568,13 +638,12 @@ private:
return I;
}
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- if (Visited.insert(cast<Instruction>(*UI)))
- Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
+ for (User *U : I->users())
+ if (Visited.insert(cast<Instruction>(U)))
+ Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
} while (!Uses.empty());
- return 0;
+ return nullptr;
}
void visitPHINode(PHINode &PN) {
@@ -597,8 +666,7 @@ private:
// themselves which should be replaced with undef.
// FIXME: This should instead be escaped in the event we're instrumenting
// for address sanitization.
- if ((Offset.isNegative() && (-Offset).uge(PHISize)) ||
- (!Offset.isNegative() && Offset.uge(AllocSize))) {
+ if (Offset.uge(AllocSize)) {
S.DeadOperands.push_back(U);
return;
}
@@ -638,8 +706,7 @@ private:
// themselves which should be replaced with undef.
// FIXME: This should instead be escaped in the event we're instrumenting
// for address sanitization.
- if ((Offset.isNegative() && Offset.uge(SelectSize)) ||
- (!Offset.isNegative() && Offset.uge(AllocSize))) {
+ if (Offset.uge(AllocSize)) {
S.DeadOperands.push_back(U);
return;
}
@@ -658,7 +725,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AI(AI),
#endif
- PointerEscapingInstr(0) {
+ PointerEscapingInstr(nullptr) {
SliceBuilder PB(DL, AI, *this);
SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
if (PtrI.isEscaped() || PtrI.isAborted()) {
@@ -674,6 +741,13 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
std::mem_fun_ref(&Slice::isDead)),
Slices.end());
+#if __cplusplus >= 201103L && !defined(NDEBUG)
+ if (SROARandomShuffleSlices) {
+ std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec()));
+ std::shuffle(Slices.begin(), Slices.end(), MT);
+ }
+#endif
+
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
std::sort(Slices.begin(), Slices.end());
@@ -712,8 +786,10 @@ void AllocaSlices::print(raw_ostream &OS) const {
print(OS, I);
}
-void AllocaSlices::dump(const_iterator I) const { print(dbgs(), I); }
-void AllocaSlices::dump() const { print(dbgs()); }
+LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
+ print(dbgs(), I);
+}
+LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -741,12 +817,10 @@ public:
// Retain the debug information attached to the alloca for use when
// rewriting loads and stores.
if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
- for (Value::use_iterator UI = DebugNode->use_begin(),
- UE = DebugNode->use_end();
- UI != UE; ++UI)
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ for (User *U : DebugNode->users())
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
DDIs.push_back(DDI);
- else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
+ else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
DVIs.push_back(DVI);
}
@@ -760,8 +834,8 @@ public:
DVIs.pop_back_val()->eraseFromParent();
}
- virtual bool isInstInList(Instruction *I,
- const SmallVectorImpl<Instruction*> &Insts) const {
+ bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const override {
Value *Ptr;
if (LoadInst *LI = dyn_cast<LoadInst>(I))
Ptr = LI->getOperand(0);
@@ -788,7 +862,7 @@ public:
return false;
}
- virtual void updateDebugInfo(Instruction *Inst) const {
+ void updateDebugInfo(Instruction *Inst) const override {
for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
E = DDIs.end(); I != E; ++I) {
DbgDeclareInst *DDI = *I;
@@ -800,7 +874,7 @@ public:
for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
- Value *Arg = 0;
+ Value *Arg = nullptr;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
@@ -896,13 +970,13 @@ class SROA : public FunctionPass {
public:
SROA(bool RequiresDomTree = true)
: FunctionPass(ID), RequiresDomTree(RequiresDomTree),
- C(0), DL(0), DT(0) {
+ C(nullptr), DL(nullptr), DT(nullptr) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
- void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- const char *getPassName() const { return "SROA"; }
+ const char *getPassName() const override { return "SROA"; }
static char ID;
private:
@@ -915,6 +989,7 @@ private:
ArrayRef<AllocaSlices::iterator> SplitUses);
bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
bool runOnAlloca(AllocaInst &AI);
+ void clobberUse(Use &U);
void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
@@ -928,7 +1003,7 @@ FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
@@ -937,8 +1012,12 @@ INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
static Type *findCommonType(AllocaSlices::const_iterator B,
AllocaSlices::const_iterator E,
uint64_t EndOffset) {
- Type *Ty = 0;
- bool IgnoreNonIntegralTypes = false;
+ Type *Ty = nullptr;
+ bool TyIsCommon = true;
+ IntegerType *ITy = nullptr;
+
+ // Note that we need to look at *every* alloca slice's Use to ensure we
+ // always get consistent results regardless of the order of slices.
for (AllocaSlices::const_iterator I = B; I != E; ++I) {
Use *U = I->getUse();
if (isa<IntrinsicInst>(*U->getUser()))
@@ -946,42 +1025,37 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
continue;
- Type *UserTy = 0;
+ Type *UserTy = nullptr;
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
UserTy = LI->getType();
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
UserTy = SI->getValueOperand()->getType();
- } else {
- IgnoreNonIntegralTypes = true; // Give up on anything but an iN type.
- continue;
}
- if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
+ if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
// If the type is larger than the partition, skip it. We only encounter
// this for split integer operations where we want to use the type of the
// entity causing the split. Also skip if the type is not a byte width
// multiple.
- if (ITy->getBitWidth() % 8 != 0 ||
- ITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
+ if (UserITy->getBitWidth() % 8 != 0 ||
+ UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
continue;
- // If we have found an integer type use covering the alloca, use that
- // regardless of the other types, as integers are often used for
- // a "bucket of bits" type.
- //
- // NB: This *must* be the only return from inside the loop so that the
- // order of slices doesn't impact the computed type.
- return ITy;
- } else if (IgnoreNonIntegralTypes) {
- continue;
+ // Track the largest bitwidth integer type used in this way in case there
+ // is no common type.
+ if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
+ ITy = UserITy;
}
- if (Ty && Ty != UserTy)
- IgnoreNonIntegralTypes = true; // Give up on anything but an iN type.
-
- Ty = UserTy;
+ // To avoid depending on the order of slices, Ty and TyIsCommon must not
+ // depend on types skipped above.
+ if (!UserTy || (Ty && Ty != UserTy))
+ TyIsCommon = false; // Give up on anything but an iN type.
+ else
+ Ty = UserTy;
}
- return Ty;
+
+ return TyIsCommon ? Ty : ITy;
}
/// PHI instructions that use an alloca and are subsequently loaded can be
@@ -1003,7 +1077,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
/// FIXME: This should be hoisted into a generic utility, likely in
/// Transforms/Util/Local.h
static bool isSafePHIToSpeculate(PHINode &PN,
- const DataLayout *DL = 0) {
+ const DataLayout *DL = nullptr) {
// For now, we can only do this promotion if the load is in the same block
// as the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1011,10 +1085,9 @@ static bool isSafePHIToSpeculate(PHINode &PN,
BasicBlock *BB = PN.getParent();
unsigned MaxAlign = 0;
bool HaveLoad = false;
- for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE;
- ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple())
+ for (User *U : PN.users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple())
return false;
// For now we only allow loads in the same block as the PHI. This is
@@ -1057,7 +1130,7 @@ static bool isSafePHIToSpeculate(PHINode &PN,
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
- if (InVal->isDereferenceablePointer() ||
+ if (InVal->isDereferenceablePointer(DL) ||
isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
continue;
@@ -1077,13 +1150,13 @@ static void speculatePHINodeLoads(PHINode &PN) {
// Get the TBAA tag and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ.
- LoadInst *SomeLoad = cast<LoadInst>(*PN.use_begin());
+ LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
unsigned Align = SomeLoad->getAlignment();
// Rewrite all loads of the PN to use the new PHI.
while (!PN.use_empty()) {
- LoadInst *LI = cast<LoadInst>(*PN.use_begin());
+ LoadInst *LI = cast<LoadInst>(PN.user_back());
LI->replaceAllUsesWith(NewPN);
LI->eraseFromParent();
}
@@ -1121,16 +1194,16 @@ static void speculatePHINodeLoads(PHINode &PN) {
///
/// We can do this to a select if its only uses are loads and if the operand
/// to the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
+static bool isSafeSelectToSpeculate(SelectInst &SI,
+ const DataLayout *DL = nullptr) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
- bool TDerefable = TValue->isDereferenceablePointer();
- bool FDerefable = FValue->isDereferenceablePointer();
+ bool TDerefable = TValue->isDereferenceablePointer(DL);
+ bool FDerefable = FValue->isDereferenceablePointer(DL);
- for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE;
- ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple())
+ for (User *U : SI.users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple())
return false;
// Both operands to the select need to be dereferencable, either
@@ -1155,7 +1228,7 @@ static void speculateSelectInstLoads(SelectInst &SI) {
Value *FV = SI.getFalseValue();
// Replace the loads of the select with a select of two loads.
while (!SI.use_empty()) {
- LoadInst *LI = cast<LoadInst>(*SI.use_begin());
+ LoadInst *LI = cast<LoadInst>(SI.user_back());
assert(LI->isSimple() && "We only speculate simple loads");
IRB.SetInsertPoint(LI);
@@ -1188,7 +1261,7 @@ static void speculateSelectInstLoads(SelectInst &SI) {
/// This will return the BasePtr if that is valid, or build a new GEP
/// instruction using the IRBuilder if GEP-ing is needed.
static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
if (Indices.empty())
return BasePtr;
@@ -1197,7 +1270,7 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
- return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx");
+ return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx");
}
/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
@@ -1211,9 +1284,13 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
/// indicated by Indices to have the correct offset.
static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
Value *BasePtr, Type *Ty, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices,
+ Twine NamePrefix) {
if (Ty == TargetTy)
- return buildGEP(IRB, BasePtr, Indices);
+ return buildGEP(IRB, BasePtr, Indices, NamePrefix);
+
+ // Pointer size to use for the indices.
+ unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType());
// See if we can descend into a struct and locate a field with the correct
// type.
@@ -1222,11 +1299,13 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
do {
if (ElementTy->isPointerTy())
break;
- if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
- ElementTy = SeqTy->getElementType();
- // Note that we use the default address space as this index is over an
- // array or a vector, not a pointer.
- Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0)));
+
+ if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
+ ElementTy = ArrayTy->getElementType();
+ Indices.push_back(IRB.getIntN(PtrSize, 0));
+ } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
+ ElementTy = VectorTy->getElementType();
+ Indices.push_back(IRB.getInt32(0));
} else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
if (STy->element_begin() == STy->element_end())
break; // Nothing left to descend into.
@@ -1240,7 +1319,7 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
if (ElementTy != TargetTy)
Indices.erase(Indices.end() - NumLayers, Indices.end());
- return buildGEP(IRB, BasePtr, Indices);
+ return buildGEP(IRB, BasePtr, Indices, NamePrefix);
}
/// \brief Recursively compute indices for a natural GEP.
@@ -1250,29 +1329,32 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, Type *Ty, APInt &Offset,
Type *TargetTy,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices,
+ Twine NamePrefix) {
if (Offset == 0)
- return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices);
+ return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix);
// We can't recurse through pointer types.
if (Ty->isPointerTy())
- return 0;
+ return nullptr;
// We try to analyze GEPs over vectors here, but note that these GEPs are
// extremely poorly defined currently. The long-term goal is to remove GEPing
// over a vector from the IR completely.
if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
- if (ElementSizeInBits % 8)
- return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
+ if (ElementSizeInBits % 8 != 0) {
+ // GEPs over non-multiple of 8 size vector elements are invalid.
+ return nullptr;
+ }
APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
- Offset, TargetTy, Indices);
+ Offset, TargetTy, Indices, NamePrefix);
}
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
@@ -1280,31 +1362,31 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(ArrTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
- Indices);
+ Indices, NamePrefix);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
if (StructOffset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
Type *ElementTy = STy->getElementType(Index);
if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
- return 0; // The offset points into alignment padding.
+ return nullptr; // The offset points into alignment padding.
Indices.push_back(IRB.getInt32(Index));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
- Indices);
+ Indices, NamePrefix);
}
/// \brief Get a natural GEP from a base pointer to a particular offset and
@@ -1319,26 +1401,27 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
/// If no natural GEP can be constructed, this function returns null.
static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, APInt Offset, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices,
+ Twine NamePrefix) {
PointerType *Ty = cast<PointerType>(Ptr->getType());
// Don't consider any GEPs through an i8* as natural unless the TargetTy is
// an i8.
- if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
- return 0;
+ if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
+ return nullptr;
Type *ElementTy = Ty->getElementType();
if (!ElementTy->isSized())
- return 0; // We can't GEP through an unsized element.
+ return nullptr; // We can't GEP through an unsized element.
APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
- return 0; // Zero-length arrays can't help us build a natural GEP.
+ return nullptr; // Zero-length arrays can't help us build a natural GEP.
APInt NumSkippedElements = Offset.sdiv(ElementSize);
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
- Indices);
+ Indices, NamePrefix);
}
/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
@@ -1356,8 +1439,9 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
/// properties. The algorithm tries to fold as many constant indices into
/// a single GEP as possible, thus making each GEP more independent of the
/// surrounding code.
-static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
- Value *Ptr, APInt Offset, Type *PointerTy) {
+static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
+ APInt Offset, Type *PointerTy,
+ Twine NamePrefix) {
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
SmallPtrSet<Value *, 4> Visited;
@@ -1367,11 +1451,11 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
// We may end up computing an offset pointer that has the wrong type. If we
// never are able to compute one directly that has the correct type, we'll
// fall back to it, so keep it around here.
- Value *OffsetPtr = 0;
+ Value *OffsetPtr = nullptr;
// Remember any i8 pointer we come across to re-use if we need to do a raw
// byte offset.
- Value *Int8Ptr = 0;
+ Value *Int8Ptr = nullptr;
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
Type *TargetTy = PointerTy->getPointerElementType();
@@ -1391,7 +1475,7 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
// See if we can perform a natural GEP here.
Indices.clear();
if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
- Indices)) {
+ Indices, NamePrefix)) {
if (P->getType() == PointerTy) {
// Zap any offset pointer that we ended up computing in previous rounds.
if (OffsetPtr && OffsetPtr->use_empty())
@@ -1425,20 +1509,21 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL,
if (!OffsetPtr) {
if (!Int8Ptr) {
- Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
- "raw_cast");
+ Int8Ptr = IRB.CreateBitCast(
+ Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
+ NamePrefix + "sroa_raw_cast");
Int8PtrOffset = Offset;
}
OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
- "raw_idx");
+ NamePrefix + "sroa_raw_idx");
}
Ptr = OffsetPtr;
// On the off chance we were targeting i8*, guard the bitcast here.
if (Ptr->getType() != PointerTy)
- Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast");
+ Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast");
return Ptr;
}
@@ -1931,16 +2016,22 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
// integer type will be stored here for easy access during rewriting.
IntegerType *IntTy;
- // The offset of the slice currently being rewritten.
+ // The original offset of the slice currently being rewritten relative to
+ // the original alloca.
uint64_t BeginOffset, EndOffset;
+ // The new offsets of the slice currently being rewritten relative to the
+ // original alloca.
+ uint64_t NewBeginOffset, NewEndOffset;
+
+ uint64_t SliceSize;
bool IsSplittable;
bool IsSplit;
Use *OldUse;
Instruction *OldPtr;
- // Output members carrying state about the result of visiting and rewriting
- // the slice of the alloca.
- bool IsUsedByRewrittenSpeculatableInstructions;
+ // Track post-rewrite users which are PHI nodes and Selects.
+ SmallPtrSetImpl<PHINode *> &PHIUsers;
+ SmallPtrSetImpl<SelectInst *> &SelectUsers;
// Utility IR builder, whose name prefix is setup for each visited use, and
// the insertion point is set to point to the user.
@@ -1949,22 +2040,25 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
public:
AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass,
AllocaInst &OldAI, AllocaInst &NewAI,
- uint64_t NewBeginOffset, uint64_t NewEndOffset,
- bool IsVectorPromotable = false,
- bool IsIntegerPromotable = false)
+ uint64_t NewAllocaBeginOffset,
+ uint64_t NewAllocaEndOffset, bool IsVectorPromotable,
+ bool IsIntegerPromotable,
+ SmallPtrSetImpl<PHINode *> &PHIUsers,
+ SmallPtrSetImpl<SelectInst *> &SelectUsers)
: DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
- NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset),
+ NewAllocaBeginOffset(NewAllocaBeginOffset),
+ NewAllocaEndOffset(NewAllocaEndOffset),
NewAllocaTy(NewAI.getAllocatedType()),
- VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : 0),
- ElementTy(VecTy ? VecTy->getElementType() : 0),
+ VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : nullptr),
+ ElementTy(VecTy ? VecTy->getElementType() : nullptr),
ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
IntTy(IsIntegerPromotable
? Type::getIntNTy(
NewAI.getContext(),
DL.getTypeSizeInBits(NewAI.getAllocatedType()))
- : 0),
+ : nullptr),
BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
- OldPtr(), IsUsedByRewrittenSpeculatableInstructions(false),
+ OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers),
IRB(NewAI.getContext(), ConstantFolder()) {
if (VecTy) {
assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
@@ -1983,6 +2077,14 @@ public:
IsSplit =
BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
+ // Compute the intersecting offset range.
+ assert(BeginOffset < NewAllocaEndOffset);
+ assert(EndOffset > NewAllocaBeginOffset);
+ NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
+ NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
+
+ SliceSize = NewEndOffset - NewBeginOffset;
+
OldUse = I->getUse();
OldPtr = cast<Instruction>(OldUse->get());
@@ -1997,20 +2099,6 @@ public:
return CanSROA;
}
- /// \brief Query whether this slice is used by speculatable instructions after
- /// rewriting.
- ///
- /// These instructions (PHIs and Selects currently) require the alloca slice
- /// to run back through the rewriter. Thus, they are promotable, but not on
- /// this iteration. This is distinct from a slice which is unpromotable for
- /// some other reason, in which case we don't even want to perform the
- /// speculation. This can be querried at any time and reflects whether (at
- /// that point) a visit call has rewritten a speculatable instruction on the
- /// current slice.
- bool isUsedByRewrittenSpeculatableInstructions() const {
- return IsUsedByRewrittenSpeculatableInstructions;
- }
-
private:
// Make sure the other visit overloads are visible.
using Base::visit;
@@ -2021,30 +2109,53 @@ private:
llvm_unreachable("No rewrite rule for this instruction!");
}
- Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset,
- Type *PointerTy) {
- assert(Offset >= NewAllocaBeginOffset);
- return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(),
- Offset - NewAllocaBeginOffset),
- PointerTy);
+ Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
+ // Note that the offset computation can use BeginOffset or NewBeginOffset
+ // interchangeably for unsplit slices.
+ assert(IsSplit || BeginOffset == NewBeginOffset);
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
+
+#ifndef NDEBUG
+ StringRef OldName = OldPtr->getName();
+ // Skip through the last '.sroa.' component of the name.
+ size_t LastSROAPrefix = OldName.rfind(".sroa.");
+ if (LastSROAPrefix != StringRef::npos) {
+ OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
+ // Look for an SROA slice index.
+ size_t IndexEnd = OldName.find_first_not_of("0123456789");
+ if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
+ // Strip the index and look for the offset.
+ OldName = OldName.substr(IndexEnd + 1);
+ size_t OffsetEnd = OldName.find_first_not_of("0123456789");
+ if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
+ // Strip the offset.
+ OldName = OldName.substr(OffsetEnd + 1);
+ }
+ }
+ // Strip any SROA suffixes as well.
+ OldName = OldName.substr(0, OldName.find(".sroa_"));
+#endif
+
+ return getAdjustedPtr(IRB, DL, &NewAI,
+ APInt(DL.getPointerSizeInBits(), Offset), PointerTy,
+#ifndef NDEBUG
+ Twine(OldName) + "."
+#else
+ Twine()
+#endif
+ );
}
- /// \brief Compute suitable alignment to access an offset into the new alloca.
- unsigned getOffsetAlign(uint64_t Offset) {
+ /// \brief Compute suitable alignment to access this slice of the *new* alloca.
+ ///
+ /// You can optionally pass a type to this routine and if that type's ABI
+ /// alignment is itself suitable, this will return zero.
+ unsigned getSliceAlign(Type *Ty = nullptr) {
unsigned NewAIAlign = NewAI.getAlignment();
if (!NewAIAlign)
NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
- return MinAlign(NewAIAlign, Offset);
- }
-
- /// \brief Compute suitable alignment to access a type at an offset of the
- /// new alloca.
- ///
- /// \returns zero if the type's ABI alignment is a suitable alignment,
- /// otherwise returns the maximal suitable alignment.
- unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
- unsigned Align = getOffsetAlign(Offset);
- return Align == DL.getABITypeAlignment(Ty) ? 0 : Align;
+ unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
+ return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
}
unsigned getIndex(uint64_t Offset) {
@@ -2062,8 +2173,7 @@ private:
Pass.DeadInsts.insert(I);
}
- Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ Value *rewriteVectorizedLoadInst() {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
@@ -2073,8 +2183,7 @@ private:
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
- Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
@@ -2093,32 +2202,23 @@ private:
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
- uint64_t Size = NewEndOffset - NewBeginOffset;
-
- Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8)
+ Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
: LI.getType();
bool IsPtrAdjusted = false;
Value *V;
if (VecTy) {
- V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset);
+ V = rewriteVectorizedLoadInst();
} else if (IntTy && LI.getType()->isIntegerTy()) {
- V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset);
+ V = rewriteIntegerLoad(LI);
} else if (NewBeginOffset == NewAllocaBeginOffset &&
canConvertValue(DL, NewAllocaTy, LI.getType())) {
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- LI.isVolatile(), "load");
+ LI.isVolatile(), LI.getName());
} else {
Type *LTy = TargetTy->getPointerTo();
- V = IRB.CreateAlignedLoad(
- getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy),
- getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset),
- LI.isVolatile(), "load");
+ V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
+ getSliceAlign(TargetTy), LI.isVolatile(),
+ LI.getName());
IsPtrAdjusted = true;
}
V = convertValue(DL, IRB, V, TargetTy);
@@ -2127,13 +2227,13 @@ private:
assert(!LI.isVolatile());
assert(LI.getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
- assert(Size < DL.getTypeStoreSize(LI.getType()) &&
+ assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
"Split load isn't smaller than original load");
assert(LI.getType()->getIntegerBitWidth() ==
DL.getTypeStoreSizeInBits(LI.getType()) &&
"Non-byte-multiple bit width");
// Move the insertion point just past the load so that we can refer to it.
- IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
+ IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI)));
// Create a placeholder value with the same type as LI to use as the
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
@@ -2155,9 +2255,7 @@ private:
return !LI.isVolatile() && !IsPtrAdjusted;
}
- bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
- uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) {
if (V->getType() != VecTy) {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
@@ -2183,8 +2281,7 @@ private:
return true;
}
- bool rewriteIntegerStore(Value *V, StoreInst &SI,
- uint64_t NewBeginOffset, uint64_t NewEndOffset) {
+ bool rewriteIntegerStore(Value *V, StoreInst &SI) {
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
@@ -2217,30 +2314,22 @@ private:
if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
Pass.PostPromotionWorklist.insert(AI);
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
- uint64_t Size = NewEndOffset - NewBeginOffset;
- if (Size < DL.getTypeStoreSize(V->getType())) {
+ if (SliceSize < DL.getTypeStoreSize(V->getType())) {
assert(!SI.isVolatile());
assert(V->getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(V->getType()->getIntegerBitWidth() ==
DL.getTypeStoreSizeInBits(V->getType()) &&
"Non-byte-multiple bit width");
- IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
+ IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
"extract");
}
if (VecTy)
- return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset,
- NewEndOffset);
+ return rewriteVectorizedStoreInst(V, SI, OldOp);
if (IntTy && V->getType()->isIntegerTy())
- return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset);
+ return rewriteIntegerStore(V, SI);
StoreInst *NewSI;
if (NewBeginOffset == NewAllocaBeginOffset &&
@@ -2250,12 +2339,9 @@ private:
NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
SI.isVolatile());
} else {
- Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset,
- V->getType()->getPointerTo());
- NewSI = IRB.CreateAlignedStore(
- V, NewPtr, getOffsetTypeAlign(
- V->getType(), NewBeginOffset - NewAllocaBeginOffset),
- SI.isVolatile());
+ Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo());
+ NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
+ SI.isVolatile());
}
(void)NewSI;
Pass.DeadInsts.insert(&SI);
@@ -2307,11 +2393,10 @@ private:
// pointer to the new alloca.
if (!isa<Constant>(II.getLength())) {
assert(!IsSplit);
- assert(BeginOffset >= NewAllocaBeginOffset);
- II.setDest(
- getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
+ assert(NewBeginOffset == BeginOffset);
+ II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
Type *CstTy = II.getAlignmentCst()->getType();
- II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset)));
+ II.setAlignment(ConstantInt::get(CstTy, getSliceAlign()));
deleteIfTriviallyDead(OldPtr);
return false;
@@ -2323,13 +2408,6 @@ private:
Type *AllocaTy = NewAI.getAllocatedType();
Type *ScalarTy = AllocaTy->getScalarType();
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
- uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
-
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memset.
if (!VecTy && !IntTy &&
@@ -2341,8 +2419,8 @@ private:
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
CallInst *New = IRB.CreateMemSet(
- getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()),
- II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile());
+ getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
+ getSliceAlign(), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
@@ -2419,25 +2497,11 @@ private:
DEBUG(dbgs() << " original: " << II << "\n");
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
- assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
- bool IsDest = II.getRawDest() == OldPtr;
+ bool IsDest = &II.getRawDestUse() == OldUse;
+ assert((IsDest && II.getRawDest() == OldPtr) ||
+ (!IsDest && II.getRawSource() == OldPtr));
- // Compute the relative offset within the transfer.
- unsigned IntPtrWidth = DL.getPointerSizeInBits();
- APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
-
- unsigned Align = II.getAlignment();
- uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset;
- if (Align > 1)
- Align =
- MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
- MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset)));
+ unsigned SliceAlign = getSliceAlign();
// For unsplit intrinsics, we simply modify the source and destination
// pointers in place. This isn't just an optimization, it is a matter of
@@ -2447,19 +2511,20 @@ private:
// memcpy, and so simply updating the pointers is the necessary for us to
// update both source and dest of a single call.
if (!IsSplittable) {
- Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
+ Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
if (IsDest)
- II.setDest(
- getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
+ II.setDest(AdjustedPtr);
else
- II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset,
- II.getRawSource()->getType()));
+ II.setSource(AdjustedPtr);
- Type *CstTy = II.getAlignmentCst()->getType();
- II.setAlignment(ConstantInt::get(CstTy, Align));
+ if (II.getAlignment() > SliceAlign) {
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(
+ ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign)));
+ }
DEBUG(dbgs() << " to: " << II << "\n");
- deleteIfTriviallyDead(OldOp);
+ deleteIfTriviallyDead(OldPtr);
return false;
}
// For split transfer intrinsics we have an incredibly useful assurance:
@@ -2495,37 +2560,39 @@ private:
// alloca that should be re-examined after rewriting this instruction.
Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
if (AllocaInst *AI
- = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
+ = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
+ assert(AI != &OldAI && AI != &NewAI &&
+ "Splittable transfers cannot reach the same alloca on both ends.");
Pass.Worklist.insert(AI);
+ }
- if (EmitMemCpy) {
- Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
- : II.getRawDest()->getType();
+ Type *OtherPtrTy = OtherPtr->getType();
+ unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
+ // Compute the relative offset for the other pointer within the transfer.
+ unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS);
+ APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
+ unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1,
+ OtherOffset.zextOrTrunc(64).getZExtValue());
+
+ if (EmitMemCpy) {
// Compute the other pointer, folding as much as possible to produce
// a single, simple GEP in most cases.
- OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
+ OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
+ OtherPtr->getName() + ".");
- Value *OurPtr = getAdjustedAllocaPtr(
- IRB, NewBeginOffset,
- IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType());
+ Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
- CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
- IsDest ? OtherPtr : OurPtr,
- Size, Align, II.isVolatile());
+ CallInst *New = IRB.CreateMemCpy(
+ IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size,
+ MinAlign(SliceAlign, OtherAlign), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
- // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
- // is equivalent to 1, but that isn't true if we end up rewriting this as
- // a load or store.
- if (!Align)
- Align = 1;
-
bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
NewEndOffset == NewAllocaEndOffset;
uint64_t Size = NewEndOffset - NewBeginOffset;
@@ -2533,24 +2600,32 @@ private:
unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
unsigned NumElements = EndIndex - BeginIndex;
IntegerType *SubIntTy
- = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
+ = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr;
- Type *OtherPtrTy = NewAI.getType();
+ // Reset the other pointer type to match the register type we're going to
+ // use, but using the address space of the original other pointer.
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
OtherPtrTy = VecTy->getElementType();
else
OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
- OtherPtrTy = OtherPtrTy->getPointerTo();
+ OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
} else if (IntTy && !IsWholeAlloca) {
- OtherPtrTy = SubIntTy->getPointerTo();
+ OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
+ } else {
+ OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
}
- Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy);
+ Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
+ OtherPtr->getName() + ".");
+ unsigned SrcAlign = OtherAlign;
Value *DstPtr = &NewAI;
- if (!IsDest)
+ unsigned DstAlign = SliceAlign;
+ if (!IsDest) {
std::swap(SrcPtr, DstPtr);
+ std::swap(SrcAlign, DstAlign);
+ }
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
@@ -2564,7 +2639,7 @@ private:
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
- Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
+ Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
"copyload");
}
@@ -2582,7 +2657,7 @@ private:
}
StoreInst *Store = cast<StoreInst>(
- IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
+ IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
@@ -2594,20 +2669,13 @@ private:
DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getArgOperand(1) == OldPtr);
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
// Record this instruction for deletion.
Pass.DeadInsts.insert(&II);
ConstantInt *Size
= ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
NewEndOffset - NewBeginOffset);
- Value *Ptr =
- getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType());
+ Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Value *New;
if (II.getIntrinsicID() == Intrinsic::lifetime_start)
New = IRB.CreateLifetimeStart(Ptr, Size);
@@ -2628,28 +2696,22 @@ private:
// as local as possible to the PHI. To do that, we re-use the location of
// the old pointer, which necessarily must be in the right position to
// dominate the PHI.
- IRBuilderTy PtrBuilder(OldPtr);
- PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
- ".");
+ IRBuilderTy PtrBuilder(IRB);
+ PtrBuilder.SetInsertPoint(OldPtr);
+ PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
- Value *NewPtr =
- getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType());
+ Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
// Replace the operands which were using the old pointer.
std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
DEBUG(dbgs() << " to: " << PN << "\n");
deleteIfTriviallyDead(OldPtr);
- // Check whether we can speculate this PHI node, and if so remember that
- // fact and queue it up for another iteration after the speculation
- // occurs.
- if (isSafePHIToSpeculate(PN, &DL)) {
- Pass.SpeculatablePHIs.insert(&PN);
- IsUsedByRewrittenSpeculatableInstructions = true;
- return true;
- }
-
- return false; // PHIs can't be promoted on their own.
+ // PHIs can't be promoted on their own, but often can be speculated. We
+ // check the speculation outside of the rewriter so that we see the
+ // fully-rewritten alloca.
+ PHIUsers.insert(&PN);
+ return true;
}
bool visitSelectInst(SelectInst &SI) {
@@ -2659,7 +2721,7 @@ private:
assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
- Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType());
+ Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
// Replace the operands which were using the old pointer.
if (SI.getOperand(1) == OldPtr)
SI.setOperand(1, NewPtr);
@@ -2669,16 +2731,11 @@ private:
DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldPtr);
- // Check whether we can speculate this select instruction, and if so
- // remember that fact and queue it up for another iteration after the
- // speculation occurs.
- if (isSafeSelectToSpeculate(SI, &DL)) {
- Pass.SpeculatableSelects.insert(&SI);
- IsUsedByRewrittenSpeculatableInstructions = true;
- return true;
- }
-
- return false; // Selects can't be promoted on their own.
+ // Selects can't be promoted on their own, but often can be speculated. We
+ // check the speculation outside of the rewriter so that we see the
+ // fully-rewritten alloca.
+ SelectUsers.insert(&SI);
+ return true;
}
};
@@ -2726,10 +2783,9 @@ private:
/// Enqueue all the users of the given instruction for further processing.
/// This uses a set to de-duplicate users.
void enqueueUsers(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
- ++UI)
- if (Visited.insert(*UI))
- Queue.push_back(&UI.getUse());
+ for (Use &U : I.uses())
+ if (Visited.insert(U.getUser()))
+ Queue.push_back(&U);
}
// Conservative default is to not rewrite anything.
@@ -2942,22 +2998,22 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
return stripAggregateTypeWrapping(DL, Ty);
if (Offset > DL.getTypeAllocSize(Ty) ||
(DL.getTypeAllocSize(Ty) - Offset) < Size)
- return 0;
+ return nullptr;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
// We can't partition pointers...
if (SeqTy->isPointerTy())
- return 0;
+ return nullptr;
Type *ElementTy = SeqTy->getElementType();
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
uint64_t NumSkippedElements = Offset / ElementSize;
if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
if (NumSkippedElements >= ArrTy->getNumElements())
- return 0;
+ return nullptr;
} else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
if (NumSkippedElements >= VecTy->getNumElements())
- return 0;
+ return nullptr;
}
Offset -= NumSkippedElements * ElementSize;
@@ -2965,7 +3021,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
if (Offset > 0 || Size < ElementSize) {
// Bail if the partition ends in a different array element.
if ((Offset + Size) > ElementSize)
- return 0;
+ return nullptr;
// Recurse through the element type trying to peel off offset bytes.
return getTypePartition(DL, ElementTy, Offset, Size);
}
@@ -2976,20 +3032,20 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
assert(Size > ElementSize);
uint64_t NumElements = Size / ElementSize;
if (NumElements * ElementSize != Size)
- return 0;
+ return nullptr;
return ArrayType::get(ElementTy, NumElements);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
if (Offset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(Offset);
Offset -= SL->getElementOffset(Index);
@@ -2997,12 +3053,12 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
Type *ElementTy = STy->getElementType(Index);
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
if (Offset >= ElementSize)
- return 0; // The offset points into alignment padding.
+ return nullptr; // The offset points into alignment padding.
// See if any partition must be contained by the element.
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
- return 0;
+ return nullptr;
return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
@@ -3015,14 +3071,14 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
if (EndOffset < SL->getSizeInBytes()) {
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
- return 0; // Within a single element and its padding.
+ return nullptr; // Within a single element and its padding.
// Don't try to form "natural" types if the elements don't line up with the
// expected size.
// FIXME: We could potentially recurse down through the last element in the
// sub-struct to find a natural end point.
if (SL->getElementOffset(EndIndex) != EndOffset)
- return 0;
+ return nullptr;
assert(Index < EndIndex);
EE = STy->element_begin() + EndIndex;
@@ -3033,7 +3089,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
STy->isPacked());
const StructLayout *SubSL = DL.getStructLayout(SubTy);
if (Size != SubSL->getSizeInBytes())
- return 0; // The sub-struct doesn't have quite the size needed.
+ return nullptr; // The sub-struct doesn't have quite the size needed.
return SubTy;
}
@@ -3058,7 +3114,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
- Type *SliceTy = 0;
+ Type *SliceTy = nullptr;
if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
SliceTy = CommonUseTy;
@@ -3105,7 +3161,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
// the alloca's alignment unconstrained.
if (Alignment <= DL->getABITypeAlignment(SliceTy))
Alignment = 0;
- NewAI = new AllocaInst(SliceTy, 0, Alignment,
+ NewAI = new AllocaInst(SliceTy, nullptr, Alignment,
AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
++NumNewAllocas;
}
@@ -3114,17 +3170,17 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
<< "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
<< "\n");
- // Track the high watermark on several worklists that are only relevant for
+ // Track the high watermark on the worklist as it is only relevant for
// promoted allocas. We will reset it to this point if the alloca is not in
// fact scheduled for promotion.
unsigned PPWOldSize = PostPromotionWorklist.size();
- unsigned SPOldSize = SpeculatablePHIs.size();
- unsigned SSOldSize = SpeculatableSelects.size();
unsigned NumUses = 0;
+ SmallPtrSet<PHINode *, 8> PHIUsers;
+ SmallPtrSet<SelectInst *, 8> SelectUsers;
AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset,
EndOffset, IsVectorPromotable,
- IsIntegerPromotable);
+ IsIntegerPromotable, PHIUsers, SelectUsers);
bool Promotable = true;
for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
SUE = SplitUses.end();
@@ -3145,50 +3201,60 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
MaxUsesPerAllocaPartition =
std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
- if (Promotable && !Rewriter.isUsedByRewrittenSpeculatableInstructions()) {
- DEBUG(dbgs() << " and queuing for promotion\n");
- PromotableAllocas.push_back(NewAI);
- } else if (NewAI != &AI ||
- (Promotable &&
- Rewriter.isUsedByRewrittenSpeculatableInstructions())) {
+ // Now that we've processed all the slices in the new partition, check if any
+ // PHIs or Selects would block promotion.
+ for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
+ E = PHIUsers.end();
+ I != E; ++I)
+ if (!isSafePHIToSpeculate(**I, DL)) {
+ Promotable = false;
+ PHIUsers.clear();
+ SelectUsers.clear();
+ break;
+ }
+ for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
+ E = SelectUsers.end();
+ I != E; ++I)
+ if (!isSafeSelectToSpeculate(**I, DL)) {
+ Promotable = false;
+ PHIUsers.clear();
+ SelectUsers.clear();
+ break;
+ }
+
+ if (Promotable) {
+ if (PHIUsers.empty() && SelectUsers.empty()) {
+ // Promote the alloca.
+ PromotableAllocas.push_back(NewAI);
+ } else {
+ // If we have either PHIs or Selects to speculate, add them to those
+ // worklists and re-queue the new alloca so that we promote in on the
+ // next iteration.
+ for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
+ E = PHIUsers.end();
+ I != E; ++I)
+ SpeculatablePHIs.insert(*I);
+ for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
+ E = SelectUsers.end();
+ I != E; ++I)
+ SpeculatableSelects.insert(*I);
+ Worklist.insert(NewAI);
+ }
+ } else {
// If we can't promote the alloca, iterate on it to check for new
// refinements exposed by splitting the current alloca. Don't iterate on an
// alloca which didn't actually change and didn't get promoted.
- //
- // Alternatively, if we could promote the alloca but have speculatable
- // instructions then we will speculate them after finishing our processing
- // of the original alloca. Mark the new one for re-visiting in the next
- // iteration so the speculated operations can be rewritten.
- //
- // FIXME: We should actually track whether the rewriter changed anything.
- Worklist.insert(NewAI);
- }
-
- // Drop any post-promotion work items if promotion didn't happen.
- if (!Promotable) {
+ if (NewAI != &AI)
+ Worklist.insert(NewAI);
+
+ // Drop any post-promotion work items if promotion didn't happen.
while (PostPromotionWorklist.size() > PPWOldSize)
PostPromotionWorklist.pop_back();
- while (SpeculatablePHIs.size() > SPOldSize)
- SpeculatablePHIs.pop_back();
- while (SpeculatableSelects.size() > SSOldSize)
- SpeculatableSelects.pop_back();
}
return true;
}
-namespace {
-struct IsSliceEndLessOrEqualTo {
- uint64_t UpperBound;
-
- IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {}
-
- bool operator()(const AllocaSlices::iterator &I) {
- return I->endOffset() <= UpperBound;
- }
-};
-}
-
static void
removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
@@ -3200,7 +3266,9 @@ removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
size_t SplitUsesOldSize = SplitUses.size();
SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
- IsSliceEndLessOrEqualTo(Offset)),
+ [Offset](const AllocaSlices::iterator &I) {
+ return I->endOffset() <= Offset;
+ }),
SplitUses.end());
if (SplitUsesOldSize == SplitUses.size())
return;
@@ -3227,7 +3295,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
uint64_t BeginOffset = S.begin()->beginOffset();
- for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end();
+ for (AllocaSlices::iterator SI = S.begin(), SJ = std::next(SI), SE = S.end();
SI != SE; SI = SJ) {
uint64_t MaxEndOffset = SI->endOffset();
@@ -3326,6 +3394,21 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
return Changed;
}
+/// \brief Clobber a use with undef, deleting the used value if it becomes dead.
+void SROA::clobberUse(Use &U) {
+ Value *OldV = U;
+ // Replace the use with an undef value.
+ U = UndefValue::get(OldV->getType());
+
+ // Check for this making an instruction dead. We have to garbage collect
+ // all the dead instructions to ensure the uses of any alloca end up being
+ // minimal.
+ if (Instruction *OldI = dyn_cast<Instruction>(OldV))
+ if (isInstructionTriviallyDead(OldI)) {
+ DeadInsts.insert(OldI);
+ }
+}
+
/// \brief Analyze an alloca for SROA.
///
/// This analyzes the alloca to ensure we can reason about it, builds
@@ -3363,21 +3446,22 @@ bool SROA::runOnAlloca(AllocaInst &AI) {
for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(),
DE = S.dead_user_end();
DI != DE; ++DI) {
- Changed = true;
+ // Free up everything used by this instruction.
+ for (Use &DeadOp : (*DI)->operands())
+ clobberUse(DeadOp);
+
+ // Now replace the uses of this instruction.
(*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
+
+ // And mark it for deletion.
DeadInsts.insert(*DI);
+ Changed = true;
}
for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(),
DE = S.dead_op_end();
DO != DE; ++DO) {
- Value *OldV = **DO;
- // Clobber the use with an undef value.
- **DO = UndefValue::get(OldV->getType());
- if (Instruction *OldI = dyn_cast<Instruction>(OldV))
- if (isInstructionTriviallyDead(OldI)) {
- Changed = true;
- DeadInsts.insert(OldI);
- }
+ clobberUse(**DO);
+ Changed = true;
}
// No slices to split. Leave the dead alloca for a later pass to clean up.
@@ -3413,10 +3497,10 @@ void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
I->replaceAllUsesWith(UndefValue::get(I->getType()));
- for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
- if (Instruction *U = dyn_cast<Instruction>(*OI)) {
+ for (Use &Operand : I->operands())
+ if (Instruction *U = dyn_cast<Instruction>(Operand)) {
// Zero out the operand and see if it becomes trivially dead.
- *OI = 0;
+ Operand = nullptr;
if (isInstructionTriviallyDead(U))
DeadInsts.insert(U);
}
@@ -3432,10 +3516,9 @@ void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
static void enqueueUsersInWorklist(Instruction &I,
SmallVectorImpl<Instruction *> &Worklist,
SmallPtrSet<Instruction *, 8> &Visited) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
- ++UI)
- if (Visited.insert(cast<Instruction>(*UI)))
- Worklist.push_back(cast<Instruction>(*UI));
+ for (User *U : I.users())
+ if (Visited.insert(cast<Instruction>(U)))
+ Worklist.push_back(cast<Instruction>(U));
}
/// \brief Promote the allocas, using the best available technique.
@@ -3521,32 +3604,24 @@ bool SROA::promoteAllocas(Function &F) {
return true;
}
-namespace {
- /// \brief A predicate to test whether an alloca belongs to a set.
- class IsAllocaInSet {
- typedef SmallPtrSet<AllocaInst *, 4> SetType;
- const SetType &Set;
-
- public:
- typedef AllocaInst *argument_type;
-
- IsAllocaInSet(const SetType &Set) : Set(Set) {}
- bool operator()(AllocaInst *AI) const { return Set.count(AI); }
- };
-}
-
bool SROA::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- DL = getAnalysisIfAvailable<DataLayout>();
- if (!DL) {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP) {
DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
return false;
}
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DL = &DLP->getDataLayout();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
BasicBlock &EntryBB = F.getEntryBlock();
- for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
+ for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
Worklist.insert(AI);
@@ -3564,11 +3639,14 @@ bool SROA::runOnFunction(Function &F) {
// Remove the deleted allocas from various lists so that we don't try to
// continue processing them.
if (!DeletedAllocas.empty()) {
- Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
- PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ auto IsInSet = [&](AllocaInst *AI) {
+ return DeletedAllocas.count(AI);
+ };
+ Worklist.remove_if(IsInSet);
+ PostPromotionWorklist.remove_if(IsInSet);
PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
PromotableAllocas.end(),
- IsAllocaInSet(DeletedAllocas)),
+ IsInSet),
PromotableAllocas.end());
DeletedAllocas.clear();
}
@@ -3585,6 +3663,6 @@ bool SROA::runOnFunction(Function &F) {
void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
if (RequiresDomTree)
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp b/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp
index 9bcd702..73c97ff 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SampleProfile.cpp
@@ -22,38 +22,198 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sample-profile"
-
+#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Metadata.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Scalar.h"
+#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "sample-profile"
+
// Command line option to specify the file to read samples from. This is
// mainly used for debugging.
static cl::opt<std::string> SampleProfileFile(
"sample-profile-file", cl::init(""), cl::value_desc("filename"),
cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
+static cl::opt<unsigned> SampleProfileMaxPropagateIterations(
+ "sample-profile-max-propagate-iterations", cl::init(100),
+ cl::desc("Maximum number of iterations to go through when propagating "
+ "sample block/edge weights through the CFG."));
+
+namespace {
+/// \brief Represents the relative location of an instruction.
+///
+/// Instruction locations are specified by the line offset from the
+/// beginning of the function (marked by the line where the function
+/// header is) and the discriminator value within that line.
+///
+/// The discriminator value is useful to distinguish instructions
+/// that are on the same line but belong to different basic blocks
+/// (e.g., the two post-increment instructions in "if (p) x++; else y++;").
+struct InstructionLocation {
+ InstructionLocation(int L, unsigned D) : LineOffset(L), Discriminator(D) {}
+ int LineOffset;
+ unsigned Discriminator;
+};
+}
+
+namespace llvm {
+template <> struct DenseMapInfo<InstructionLocation> {
+ typedef DenseMapInfo<int> OffsetInfo;
+ typedef DenseMapInfo<unsigned> DiscriminatorInfo;
+ static inline InstructionLocation getEmptyKey() {
+ return InstructionLocation(OffsetInfo::getEmptyKey(),
+ DiscriminatorInfo::getEmptyKey());
+ }
+ static inline InstructionLocation getTombstoneKey() {
+ return InstructionLocation(OffsetInfo::getTombstoneKey(),
+ DiscriminatorInfo::getTombstoneKey());
+ }
+ static inline unsigned getHashValue(InstructionLocation Val) {
+ return DenseMapInfo<std::pair<int, unsigned>>::getHashValue(
+ std::pair<int, unsigned>(Val.LineOffset, Val.Discriminator));
+ }
+ static inline bool isEqual(InstructionLocation LHS, InstructionLocation RHS) {
+ return LHS.LineOffset == RHS.LineOffset &&
+ LHS.Discriminator == RHS.Discriminator;
+ }
+};
+}
namespace {
+typedef DenseMap<InstructionLocation, unsigned> BodySampleMap;
+typedef DenseMap<BasicBlock *, unsigned> BlockWeightMap;
+typedef DenseMap<BasicBlock *, BasicBlock *> EquivalenceClassMap;
+typedef std::pair<BasicBlock *, BasicBlock *> Edge;
+typedef DenseMap<Edge, unsigned> EdgeWeightMap;
+typedef DenseMap<BasicBlock *, SmallVector<BasicBlock *, 8>> BlockEdgeMap;
+
+/// \brief Representation of the runtime profile for a function.
+///
+/// This data structure contains the runtime profile for a given
+/// function. It contains the total number of samples collected
+/// in the function and a map of samples collected in every statement.
+class SampleFunctionProfile {
+public:
+ SampleFunctionProfile()
+ : TotalSamples(0), TotalHeadSamples(0), HeaderLineno(0), DT(nullptr),
+ PDT(nullptr), LI(nullptr), Ctx(nullptr) {}
+
+ unsigned getFunctionLoc(Function &F);
+ bool emitAnnotations(Function &F, DominatorTree *DomTree,
+ PostDominatorTree *PostDomTree, LoopInfo *Loops);
+ unsigned getInstWeight(Instruction &I);
+ unsigned getBlockWeight(BasicBlock *B);
+ void addTotalSamples(unsigned Num) { TotalSamples += Num; }
+ void addHeadSamples(unsigned Num) { TotalHeadSamples += Num; }
+ void addBodySamples(int LineOffset, unsigned Discriminator, unsigned Num) {
+ assert(LineOffset >= 0);
+ BodySamples[InstructionLocation(LineOffset, Discriminator)] += Num;
+ }
+ void print(raw_ostream &OS);
+ void printEdgeWeight(raw_ostream &OS, Edge E);
+ void printBlockWeight(raw_ostream &OS, BasicBlock *BB);
+ void printBlockEquivalence(raw_ostream &OS, BasicBlock *BB);
+ bool computeBlockWeights(Function &F);
+ void findEquivalenceClasses(Function &F);
+ void findEquivalencesFor(BasicBlock *BB1,
+ SmallVector<BasicBlock *, 8> Descendants,
+ DominatorTreeBase<BasicBlock> *DomTree);
+ void propagateWeights(Function &F);
+ unsigned visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
+ void buildEdges(Function &F);
+ bool propagateThroughEdges(Function &F);
+ bool empty() { return BodySamples.empty(); }
+
+protected:
+ /// \brief Total number of samples collected inside this function.
+ ///
+ /// Samples are cumulative, they include all the samples collected
+ /// inside this function and all its inlined callees.
+ unsigned TotalSamples;
+
+ /// \brief Total number of samples collected at the head of the function.
+ /// FIXME: Use head samples to estimate a cold/hot attribute for the function.
+ unsigned TotalHeadSamples;
+
+ /// \brief Line number for the function header. Used to compute relative
+ /// line numbers from the absolute line LOCs found in instruction locations.
+ /// The relative line numbers are needed to address the samples from the
+ /// profile file.
+ unsigned HeaderLineno;
+
+ /// \brief Map line offsets to collected samples.
+ ///
+ /// Each entry in this map contains the number of samples
+ /// collected at the corresponding line offset. All line locations
+ /// are an offset from the start of the function.
+ BodySampleMap BodySamples;
+
+ /// \brief Map basic blocks to their computed weights.
+ ///
+ /// The weight of a basic block is defined to be the maximum
+ /// of all the instruction weights in that block.
+ BlockWeightMap BlockWeights;
+
+ /// \brief Map edges to their computed weights.
+ ///
+ /// Edge weights are computed by propagating basic block weights in
+ /// SampleProfile::propagateWeights.
+ EdgeWeightMap EdgeWeights;
+
+ /// \brief Set of visited blocks during propagation.
+ SmallPtrSet<BasicBlock *, 128> VisitedBlocks;
+
+ /// \brief Set of visited edges during propagation.
+ SmallSet<Edge, 128> VisitedEdges;
+
+ /// \brief Equivalence classes for block weights.
+ ///
+ /// Two blocks BB1 and BB2 are in the same equivalence class if they
+ /// dominate and post-dominate each other, and they are in the same loop
+ /// nest. When this happens, the two blocks are guaranteed to execute
+ /// the same number of times.
+ EquivalenceClassMap EquivalenceClass;
+
+ /// \brief Dominance, post-dominance and loop information.
+ DominatorTree *DT;
+ PostDominatorTree *PDT;
+ LoopInfo *LI;
+
+ /// \brief Predecessors for each basic block in the CFG.
+ BlockEdgeMap Predecessors;
+
+ /// \brief Successors for each basic block in the CFG.
+ BlockEdgeMap Successors;
+
+ /// \brief LLVM context holding the debug data we need.
+ LLVMContext *Ctx;
+};
+
/// \brief Sample-based profile reader.
///
/// Each profile contains sample counts for all the functions
@@ -77,61 +237,33 @@ namespace {
/// 2. The samples collected at each line in F. To provide some
/// protection against source code shuffling, line numbers should
/// be relative to the start of the function.
-class SampleProfile {
+class SampleModuleProfile {
public:
- SampleProfile(StringRef F) : Profiles(0), Filename(F) {}
+ SampleModuleProfile(const Module &M, StringRef F)
+ : Profiles(0), Filename(F), M(M) {}
void dump();
- void loadText();
+ bool loadText();
void loadNative() { llvm_unreachable("not implemented"); }
- bool emitAnnotations(Function &F);
void printFunctionProfile(raw_ostream &OS, StringRef FName);
void dumpFunctionProfile(StringRef FName);
+ SampleFunctionProfile &getProfile(const Function &F) {
+ return Profiles[F.getName()];
+ }
-protected:
- typedef DenseMap<uint32_t, uint32_t> BodySampleMap;
- typedef DenseMap<BasicBlock *, uint32_t> BlockWeightMap;
-
- /// \brief Representation of the runtime profile for a function.
- ///
- /// This data structure contains the runtime profile for a given
- /// function. It contains the total number of samples collected
- /// in the function and a map of samples collected in every statement.
- struct FunctionProfile {
- /// \brief Total number of samples collected inside this function.
- ///
- /// Samples are cumulative, they include all the samples collected
- /// inside this function and all its inlined callees.
- unsigned TotalSamples;
-
- // \brief Total number of samples collected at the head of the function.
- unsigned TotalHeadSamples;
-
- /// \brief Map line offsets to collected samples.
- ///
- /// Each entry in this map contains the number of samples
- /// collected at the corresponding line offset. All line locations
- /// are an offset from the start of the function.
- BodySampleMap BodySamples;
-
- /// \brief Map basic blocks to their computed weights.
- ///
- /// The weight of a basic block is defined to be the maximum
- /// of all the instruction weights in that block.
- BlockWeightMap BlockWeights;
- };
-
- uint32_t getInstWeight(Instruction &I, unsigned FirstLineno,
- BodySampleMap &BodySamples);
- uint32_t computeBlockWeight(BasicBlock *B, unsigned FirstLineno,
- BodySampleMap &BodySamples);
+ /// \brief Report a parse error message.
+ void reportParseError(int64_t LineNumber, Twine Msg) const {
+ DiagnosticInfoSampleProfile Diag(Filename.data(), LineNumber, Msg);
+ M.getContext().diagnose(Diag);
+ }
+protected:
/// \brief Map every function to its associated profile.
///
/// The profile of every function executed at runtime is collected
- /// in the structure FunctionProfile. This maps function objects
+ /// in the structure SampleFunctionProfile. This maps function objects
/// to their corresponding profiles.
- StringMap<FunctionProfile> Profiles;
+ StringMap<SampleFunctionProfile> Profiles;
/// \brief Path name to the file holding the profile data.
///
@@ -140,63 +272,10 @@ protected:
/// version of the profile format to be used in constructing test
/// cases and debugging.
StringRef Filename;
-};
-/// \brief Loader class for text-based profiles.
-///
-/// This class defines a simple interface to read text files containing
-/// profiles. It keeps track of line number information and location of
-/// the file pointer. Users of this class are responsible for actually
-/// parsing the lines returned by the readLine function.
-///
-/// TODO - This does not really belong here. It is a generic text file
-/// reader. It should be moved to the Support library and made more general.
-class ExternalProfileTextLoader {
-public:
- ExternalProfileTextLoader(StringRef F) : Filename(F) {
- error_code EC;
- EC = MemoryBuffer::getFile(Filename, Buffer);
- if (EC)
- report_fatal_error("Could not open profile file " + Filename + ": " +
- EC.message());
- FP = Buffer->getBufferStart();
- Lineno = 0;
- }
-
- /// \brief Read a line from the mapped file.
- StringRef readLine() {
- size_t Length = 0;
- const char *start = FP;
- while (FP != Buffer->getBufferEnd() && *FP != '\n') {
- Length++;
- FP++;
- }
- if (FP != Buffer->getBufferEnd())
- FP++;
- Lineno++;
- return StringRef(start, Length);
- }
-
- /// \brief Return true, if we've reached EOF.
- bool atEOF() const { return FP == Buffer->getBufferEnd(); }
-
- /// \brief Report a parse error message and stop compilation.
- void reportParseError(Twine Msg) const {
- report_fatal_error(Filename + ":" + Twine(Lineno) + ": " + Msg + "\n");
- }
-
-private:
- /// \brief Memory buffer holding the text file.
- OwningPtr<MemoryBuffer> Buffer;
-
- /// \brief Current position into the memory buffer.
- const char *FP;
-
- /// \brief Current line number.
- int64_t Lineno;
-
- /// \brief Path name where to the profile file.
- StringRef Filename;
+ /// \brief Module being compiled. Used mainly to access the current
+ /// LLVM context for diagnostics.
+ const Module &M;
};
/// \brief Sample profile pass.
@@ -210,148 +289,242 @@ public:
static char ID;
SampleProfileLoader(StringRef Name = SampleProfileFile)
- : FunctionPass(ID), Profiler(0), Filename(Name) {
+ : FunctionPass(ID), Profiler(), Filename(Name), ProfileIsValid(false) {
initializeSampleProfileLoaderPass(*PassRegistry::getPassRegistry());
}
- virtual bool doInitialization(Module &M);
+ bool doInitialization(Module &M) override;
void dump() { Profiler->dump(); }
- virtual const char *getPassName() const { return "Sample profile pass"; }
+ const char *getPassName() const override { return "Sample profile pass"; }
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
+ AU.addRequired<LoopInfo>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<PostDominatorTree>();
}
protected:
/// \brief Profile reader object.
- OwningPtr<SampleProfile> Profiler;
+ std::unique_ptr<SampleModuleProfile> Profiler;
/// \brief Name of the profile file to load.
StringRef Filename;
+
+ /// \brief Flag indicating whether the profile input loaded successfully.
+ bool ProfileIsValid;
};
}
-/// \brief Print the function profile for \p FName on stream \p OS.
+/// \brief Print this function profile on stream \p OS.
///
/// \param OS Stream to emit the output to.
-/// \param FName Name of the function to print.
-void SampleProfile::printFunctionProfile(raw_ostream &OS, StringRef FName) {
- FunctionProfile FProfile = Profiles[FName];
- OS << "Function: " << FName << ", " << FProfile.TotalSamples << ", "
- << FProfile.TotalHeadSamples << ", " << FProfile.BodySamples.size()
+void SampleFunctionProfile::print(raw_ostream &OS) {
+ OS << TotalSamples << ", " << TotalHeadSamples << ", " << BodySamples.size()
<< " sampled lines\n";
- for (BodySampleMap::const_iterator SI = FProfile.BodySamples.begin(),
- SE = FProfile.BodySamples.end();
+ for (BodySampleMap::const_iterator SI = BodySamples.begin(),
+ SE = BodySamples.end();
SI != SE; ++SI)
- OS << "\tline offset: " << SI->first
+ OS << "\tline offset: " << SI->first.LineOffset
+ << ", discriminator: " << SI->first.Discriminator
<< ", number of samples: " << SI->second << "\n";
OS << "\n";
}
+/// \brief Print the weight of edge \p E on stream \p OS.
+///
+/// \param OS Stream to emit the output to.
+/// \param E Edge to print.
+void SampleFunctionProfile::printEdgeWeight(raw_ostream &OS, Edge E) {
+ OS << "weight[" << E.first->getName() << "->" << E.second->getName()
+ << "]: " << EdgeWeights[E] << "\n";
+}
+
+/// \brief Print the equivalence class of block \p BB on stream \p OS.
+///
+/// \param OS Stream to emit the output to.
+/// \param BB Block to print.
+void SampleFunctionProfile::printBlockEquivalence(raw_ostream &OS,
+ BasicBlock *BB) {
+ BasicBlock *Equiv = EquivalenceClass[BB];
+ OS << "equivalence[" << BB->getName()
+ << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
+}
+
+/// \brief Print the weight of block \p BB on stream \p OS.
+///
+/// \param OS Stream to emit the output to.
+/// \param BB Block to print.
+void SampleFunctionProfile::printBlockWeight(raw_ostream &OS, BasicBlock *BB) {
+ OS << "weight[" << BB->getName() << "]: " << BlockWeights[BB] << "\n";
+}
+
+/// \brief Print the function profile for \p FName on stream \p OS.
+///
+/// \param OS Stream to emit the output to.
+/// \param FName Name of the function to print.
+void SampleModuleProfile::printFunctionProfile(raw_ostream &OS,
+ StringRef FName) {
+ OS << "Function: " << FName << ":\n";
+ Profiles[FName].print(OS);
+}
+
/// \brief Dump the function profile for \p FName.
///
/// \param FName Name of the function to print.
-void SampleProfile::dumpFunctionProfile(StringRef FName) {
+void SampleModuleProfile::dumpFunctionProfile(StringRef FName) {
printFunctionProfile(dbgs(), FName);
}
/// \brief Dump all the function profiles found.
-void SampleProfile::dump() {
- for (StringMap<FunctionProfile>::const_iterator I = Profiles.begin(),
- E = Profiles.end();
+void SampleModuleProfile::dump() {
+ for (StringMap<SampleFunctionProfile>::const_iterator I = Profiles.begin(),
+ E = Profiles.end();
I != E; ++I)
dumpFunctionProfile(I->getKey());
}
/// \brief Load samples from a text file.
///
-/// The file is divided in two segments:
-///
-/// Symbol table (represented with the string "symbol table")
-/// Number of symbols in the table
-/// symbol 1
-/// symbol 2
-/// ...
-/// symbol N
+/// The file contains a list of samples for every function executed at
+/// runtime. Each function profile has the following format:
///
-/// Function body profiles
-/// function1:total_samples:total_head_samples:number_of_locations
-/// location_offset_1: number_of_samples
-/// location_offset_2: number_of_samples
+/// function1:total_samples:total_head_samples
+/// offset1[.discriminator]: number_of_samples [fn1:num fn2:num ... ]
+/// offset2[.discriminator]: number_of_samples [fn3:num fn4:num ... ]
/// ...
-/// location_offset_N: number_of_samples
+/// offsetN[.discriminator]: number_of_samples [fn5:num fn6:num ... ]
///
/// Function names must be mangled in order for the profile loader to
-/// match them in the current translation unit.
+/// match them in the current translation unit. The two numbers in the
+/// function header specify how many total samples were accumulated in
+/// the function (first number), and the total number of samples accumulated
+/// at the prologue of the function (second number). This head sample
+/// count provides an indicator of how frequent is the function invoked.
+///
+/// Each sampled line may contain several items. Some are optional
+/// (marked below):
+///
+/// a- Source line offset. This number represents the line number
+/// in the function where the sample was collected. The line number
+/// is always relative to the line where symbol of the function
+/// is defined. So, if the function has its header at line 280,
+/// the offset 13 is at line 293 in the file.
+///
+/// b- [OPTIONAL] Discriminator. This is used if the sampled program
+/// was compiled with DWARF discriminator support
+/// (http://wiki.dwarfstd.org/index.php?title=Path_Discriminators)
+///
+/// c- Number of samples. This is the number of samples collected by
+/// the profiler at this source location.
+///
+/// d- [OPTIONAL] Potential call targets and samples. If present, this
+/// line contains a call instruction. This models both direct and
+/// indirect calls. Each called target is listed together with the
+/// number of samples. For example,
+///
+/// 130: 7 foo:3 bar:2 baz:7
+///
+/// The above means that at relative line offset 130 there is a
+/// call instruction that calls one of foo(), bar() and baz(). With
+/// baz() being the relatively more frequent call target.
+///
+/// FIXME: This is currently unhandled, but it has a lot of
+/// potential for aiding the inliner.
+///
///
/// Since this is a flat profile, a function that shows up more than
/// once gets all its samples aggregated across all its instances.
-/// TODO - flat profiles are too imprecise to provide good optimization
-/// opportunities. Convert them to context-sensitive profile.
+///
+/// FIXME: flat profiles are too imprecise to provide good optimization
+/// opportunities. Convert them to context-sensitive profile.
///
/// This textual representation is useful to generate unit tests and
/// for debugging purposes, but it should not be used to generate
/// profiles for large programs, as the representation is extremely
/// inefficient.
-void SampleProfile::loadText() {
- ExternalProfileTextLoader Loader(Filename);
-
- // Read the symbol table.
- StringRef Line = Loader.readLine();
- if (Line != "symbol table")
- Loader.reportParseError("Expected 'symbol table', found " + Line);
- int NumSymbols;
- Line = Loader.readLine();
- if (Line.getAsInteger(10, NumSymbols))
- Loader.reportParseError("Expected a number, found " + Line);
- for (int I = 0; I < NumSymbols; I++) {
- StringRef FName = Loader.readLine();
- FunctionProfile &FProfile = Profiles[FName];
- FProfile.BodySamples.clear();
- FProfile.TotalSamples = 0;
- FProfile.TotalHeadSamples = 0;
+///
+/// \returns true if the file was loaded successfully, false otherwise.
+bool SampleModuleProfile::loadText() {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(Filename);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ std::string Msg(EC.message());
+ M.getContext().diagnose(DiagnosticInfoSampleProfile(Filename.data(), Msg));
+ return false;
}
+ std::unique_ptr<MemoryBuffer> Buffer = std::move(BufferOrErr.get());
+ line_iterator LineIt(*Buffer, '#');
// Read the profile of each function. Since each function may be
// mentioned more than once, and we are collecting flat profiles,
// accumulate samples as we parse them.
- Regex HeadRE("^([^:]+):([0-9]+):([0-9]+):([0-9]+)$");
- Regex LineSample("^([0-9]+): ([0-9]+)$");
- while (!Loader.atEOF()) {
- SmallVector<StringRef, 4> Matches;
- Line = Loader.readLine();
- if (!HeadRE.match(Line, &Matches))
- Loader.reportParseError("Expected 'mangled_name:NUM:NUM:NUM', found " +
- Line);
- assert(Matches.size() == 5);
+ Regex HeadRE("^([^0-9].*):([0-9]+):([0-9]+)$");
+ Regex LineSample("^([0-9]+)\\.?([0-9]+)?: ([0-9]+)(.*)$");
+ while (!LineIt.is_at_eof()) {
+ // Read the header of each function.
+ //
+ // Note that for function identifiers we are actually expecting
+ // mangled names, but we may not always get them. This happens when
+ // the compiler decides not to emit the function (e.g., it was inlined
+ // and removed). In this case, the binary will not have the linkage
+ // name for the function, so the profiler will emit the function's
+ // unmangled name, which may contain characters like ':' and '>' in its
+ // name (member functions, templates, etc).
+ //
+ // The only requirement we place on the identifier, then, is that it
+ // should not begin with a number.
+ SmallVector<StringRef, 3> Matches;
+ if (!HeadRE.match(*LineIt, &Matches)) {
+ reportParseError(LineIt.line_number(),
+ "Expected 'mangled_name:NUM:NUM', found " + *LineIt);
+ return false;
+ }
+ assert(Matches.size() == 4);
StringRef FName = Matches[1];
- unsigned NumSamples, NumHeadSamples, NumSampledLines;
+ unsigned NumSamples, NumHeadSamples;
Matches[2].getAsInteger(10, NumSamples);
Matches[3].getAsInteger(10, NumHeadSamples);
- Matches[4].getAsInteger(10, NumSampledLines);
- FunctionProfile &FProfile = Profiles[FName];
- FProfile.TotalSamples += NumSamples;
- FProfile.TotalHeadSamples += NumHeadSamples;
- BodySampleMap &SampleMap = FProfile.BodySamples;
- unsigned I;
- for (I = 0; I < NumSampledLines && !Loader.atEOF(); I++) {
- Line = Loader.readLine();
- if (!LineSample.match(Line, &Matches))
- Loader.reportParseError("Expected 'NUM: NUM', found " + Line);
- assert(Matches.size() == 3);
- unsigned LineOffset, NumSamples;
+ Profiles[FName] = SampleFunctionProfile();
+ SampleFunctionProfile &FProfile = Profiles[FName];
+ FProfile.addTotalSamples(NumSamples);
+ FProfile.addHeadSamples(NumHeadSamples);
+ ++LineIt;
+
+ // Now read the body. The body of the function ends when we reach
+ // EOF or when we see the start of the next function.
+ while (!LineIt.is_at_eof() && isdigit((*LineIt)[0])) {
+ if (!LineSample.match(*LineIt, &Matches)) {
+ reportParseError(
+ LineIt.line_number(),
+ "Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found " + *LineIt);
+ return false;
+ }
+ assert(Matches.size() == 5);
+ unsigned LineOffset, NumSamples, Discriminator = 0;
Matches[1].getAsInteger(10, LineOffset);
- Matches[2].getAsInteger(10, NumSamples);
- SampleMap[LineOffset] += NumSamples;
- }
+ if (Matches[2] != "")
+ Matches[2].getAsInteger(10, Discriminator);
+ Matches[3].getAsInteger(10, NumSamples);
- if (I < NumSampledLines)
- Loader.reportParseError("Unexpected end of file");
+ // FIXME: Handle called targets (in Matches[4]).
+
+ // When dealing with instruction weights, we use the value
+ // zero to indicate the absence of a sample. If we read an
+ // actual zero from the profile file, return it as 1 to
+ // avoid the confusion later on.
+ if (NumSamples == 0)
+ NumSamples = 1;
+ FProfile.addBodySamples(LineOffset, Discriminator, NumSamples);
+ ++LineIt;
+ }
}
+
+ return true;
}
/// \brief Get the weight for an instruction.
@@ -359,46 +532,49 @@ void SampleProfile::loadText() {
/// The "weight" of an instruction \p Inst is the number of samples
/// collected on that instruction at runtime. To retrieve it, we
/// need to compute the line number of \p Inst relative to the start of its
-/// function. We use \p FirstLineno to compute the offset. We then
-/// look up the samples collected for \p Inst using \p BodySamples.
+/// function. We use HeaderLineno to compute the offset. We then
+/// look up the samples collected for \p Inst using BodySamples.
///
/// \param Inst Instruction to query.
-/// \param FirstLineno Line number of the first instruction in the function.
-/// \param BodySamples Map of relative source line locations to samples.
///
/// \returns The profiled weight of I.
-uint32_t SampleProfile::getInstWeight(Instruction &Inst, unsigned FirstLineno,
- BodySampleMap &BodySamples) {
- unsigned LOffset = Inst.getDebugLoc().getLine() - FirstLineno + 1;
- return BodySamples.lookup(LOffset);
+unsigned SampleFunctionProfile::getInstWeight(Instruction &Inst) {
+ DebugLoc DLoc = Inst.getDebugLoc();
+ unsigned Lineno = DLoc.getLine();
+ if (Lineno < HeaderLineno)
+ return 0;
+
+ DILocation DIL(DLoc.getAsMDNode(*Ctx));
+ int LOffset = Lineno - HeaderLineno;
+ unsigned Discriminator = DIL.getDiscriminator();
+ unsigned Weight =
+ BodySamples.lookup(InstructionLocation(LOffset, Discriminator));
+ DEBUG(dbgs() << " " << Lineno << "." << Discriminator << ":" << Inst
+ << " (line offset: " << LOffset << "." << Discriminator
+ << " - weight: " << Weight << ")\n");
+ return Weight;
}
/// \brief Compute the weight of a basic block.
///
/// The weight of basic block \p B is the maximum weight of all the
-/// instructions in B.
+/// instructions in B. The weight of \p B is computed and cached in
+/// the BlockWeights map.
///
/// \param B The basic block to query.
-/// \param FirstLineno The line number for the first line in the
-/// function holding B.
-/// \param BodySamples The map containing all the samples collected in that
-/// function.
///
/// \returns The computed weight of B.
-uint32_t SampleProfile::computeBlockWeight(BasicBlock *B, unsigned FirstLineno,
- BodySampleMap &BodySamples) {
+unsigned SampleFunctionProfile::getBlockWeight(BasicBlock *B) {
// If we've computed B's weight before, return it.
- Function *F = B->getParent();
- FunctionProfile &FProfile = Profiles[F->getName()];
std::pair<BlockWeightMap::iterator, bool> Entry =
- FProfile.BlockWeights.insert(std::make_pair(B, 0));
+ BlockWeights.insert(std::make_pair(B, 0));
if (!Entry.second)
return Entry.first->second;
// Otherwise, compute and cache B's weight.
- uint32_t Weight = 0;
+ unsigned Weight = 0;
for (BasicBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) {
- uint32_t InstWeight = getInstWeight(*I, FirstLineno, BodySamples);
+ unsigned InstWeight = getInstWeight(*I);
if (InstWeight > Weight)
Weight = InstWeight;
}
@@ -406,31 +582,344 @@ uint32_t SampleProfile::computeBlockWeight(BasicBlock *B, unsigned FirstLineno,
return Weight;
}
-/// \brief Generate branch weight metadata for all branches in \p F.
+/// \brief Compute and store the weights of every basic block.
+///
+/// This populates the BlockWeights map by computing
+/// the weights of every basic block in the CFG.
+///
+/// \param F The function to query.
+bool SampleFunctionProfile::computeBlockWeights(Function &F) {
+ bool Changed = false;
+ DEBUG(dbgs() << "Block weights\n");
+ for (Function::iterator B = F.begin(), E = F.end(); B != E; ++B) {
+ unsigned Weight = getBlockWeight(B);
+ Changed |= (Weight > 0);
+ DEBUG(printBlockWeight(dbgs(), B));
+ }
+
+ return Changed;
+}
+
+/// \brief Find equivalence classes for the given block.
///
-/// For every branch instruction B in \p F, we compute the weight of the
-/// target block for each of the edges out of B. This is the weight
-/// that we associate with that branch.
+/// This finds all the blocks that are guaranteed to execute the same
+/// number of times as \p BB1. To do this, it traverses all the the
+/// descendants of \p BB1 in the dominator or post-dominator tree.
///
-/// TODO - This weight assignment will most likely be wrong if the
-/// target branch has more than two predecessors. This needs to be done
-/// using some form of flow propagation.
+/// A block BB2 will be in the same equivalence class as \p BB1 if
+/// the following holds:
///
-/// Once all the branch weights are computed, we emit the MD_prof
-/// metadata on B using the computed values.
+/// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
+/// is a descendant of \p BB1 in the dominator tree, then BB2 should
+/// dominate BB1 in the post-dominator tree.
+///
+/// 2- Both BB2 and \p BB1 must be in the same loop.
+///
+/// For every block BB2 that meets those two requirements, we set BB2's
+/// equivalence class to \p BB1.
+///
+/// \param BB1 Block to check.
+/// \param Descendants Descendants of \p BB1 in either the dom or pdom tree.
+/// \param DomTree Opposite dominator tree. If \p Descendants is filled
+/// with blocks from \p BB1's dominator tree, then
+/// this is the post-dominator tree, and vice versa.
+void SampleFunctionProfile::findEquivalencesFor(
+ BasicBlock *BB1, SmallVector<BasicBlock *, 8> Descendants,
+ DominatorTreeBase<BasicBlock> *DomTree) {
+ for (SmallVectorImpl<BasicBlock *>::iterator I = Descendants.begin(),
+ E = Descendants.end();
+ I != E; ++I) {
+ BasicBlock *BB2 = *I;
+ bool IsDomParent = DomTree->dominates(BB2, BB1);
+ bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
+ if (BB1 != BB2 && VisitedBlocks.insert(BB2) && IsDomParent &&
+ IsInSameLoop) {
+ EquivalenceClass[BB2] = BB1;
+
+ // If BB2 is heavier than BB1, make BB2 have the same weight
+ // as BB1.
+ //
+ // Note that we don't worry about the opposite situation here
+ // (when BB2 is lighter than BB1). We will deal with this
+ // during the propagation phase. Right now, we just want to
+ // make sure that BB1 has the largest weight of all the
+ // members of its equivalence set.
+ unsigned &BB1Weight = BlockWeights[BB1];
+ unsigned &BB2Weight = BlockWeights[BB2];
+ BB1Weight = std::max(BB1Weight, BB2Weight);
+ }
+ }
+}
+
+/// \brief Find equivalence classes.
+///
+/// Since samples may be missing from blocks, we can fill in the gaps by setting
+/// the weights of all the blocks in the same equivalence class to the same
+/// weight. To compute the concept of equivalence, we use dominance and loop
+/// information. Two blocks B1 and B2 are in the same equivalence class if B1
+/// dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// \param F The function to query.
-bool SampleProfile::emitAnnotations(Function &F) {
+void SampleFunctionProfile::findEquivalenceClasses(Function &F) {
+ SmallVector<BasicBlock *, 8> DominatedBBs;
+ DEBUG(dbgs() << "\nBlock equivalence classes\n");
+ // Find equivalence sets based on dominance and post-dominance information.
+ for (Function::iterator B = F.begin(), E = F.end(); B != E; ++B) {
+ BasicBlock *BB1 = B;
+
+ // Compute BB1's equivalence class once.
+ if (EquivalenceClass.count(BB1)) {
+ DEBUG(printBlockEquivalence(dbgs(), BB1));
+ continue;
+ }
+
+ // By default, blocks are in their own equivalence class.
+ EquivalenceClass[BB1] = BB1;
+
+ // Traverse all the blocks dominated by BB1. We are looking for
+ // every basic block BB2 such that:
+ //
+ // 1- BB1 dominates BB2.
+ // 2- BB2 post-dominates BB1.
+ // 3- BB1 and BB2 are in the same loop nest.
+ //
+ // If all those conditions hold, it means that BB2 is executed
+ // as many times as BB1, so they are placed in the same equivalence
+ // class by making BB2's equivalence class be BB1.
+ DominatedBBs.clear();
+ DT->getDescendants(BB1, DominatedBBs);
+ findEquivalencesFor(BB1, DominatedBBs, PDT->DT);
+
+ // Repeat the same logic for all the blocks post-dominated by BB1.
+ // We are looking for every basic block BB2 such that:
+ //
+ // 1- BB1 post-dominates BB2.
+ // 2- BB2 dominates BB1.
+ // 3- BB1 and BB2 are in the same loop nest.
+ //
+ // If all those conditions hold, BB2's equivalence class is BB1.
+ DominatedBBs.clear();
+ PDT->getDescendants(BB1, DominatedBBs);
+ findEquivalencesFor(BB1, DominatedBBs, DT);
+
+ DEBUG(printBlockEquivalence(dbgs(), BB1));
+ }
+
+ // Assign weights to equivalence classes.
+ //
+ // All the basic blocks in the same equivalence class will execute
+ // the same number of times. Since we know that the head block in
+ // each equivalence class has the largest weight, assign that weight
+ // to all the blocks in that equivalence class.
+ DEBUG(dbgs() << "\nAssign the same weight to all blocks in the same class\n");
+ for (Function::iterator B = F.begin(), E = F.end(); B != E; ++B) {
+ BasicBlock *BB = B;
+ BasicBlock *EquivBB = EquivalenceClass[BB];
+ if (BB != EquivBB)
+ BlockWeights[BB] = BlockWeights[EquivBB];
+ DEBUG(printBlockWeight(dbgs(), BB));
+ }
+}
+
+/// \brief Visit the given edge to decide if it has a valid weight.
+///
+/// If \p E has not been visited before, we copy to \p UnknownEdge
+/// and increment the count of unknown edges.
+///
+/// \param E Edge to visit.
+/// \param NumUnknownEdges Current number of unknown edges.
+/// \param UnknownEdge Set if E has not been visited before.
+///
+/// \returns E's weight, if known. Otherwise, return 0.
+unsigned SampleFunctionProfile::visitEdge(Edge E, unsigned *NumUnknownEdges,
+ Edge *UnknownEdge) {
+ if (!VisitedEdges.count(E)) {
+ (*NumUnknownEdges)++;
+ *UnknownEdge = E;
+ return 0;
+ }
+
+ return EdgeWeights[E];
+}
+
+/// \brief Propagate weights through incoming/outgoing edges.
+///
+/// If the weight of a basic block is known, and there is only one edge
+/// with an unknown weight, we can calculate the weight of that edge.
+///
+/// Similarly, if all the edges have a known count, we can calculate the
+/// count of the basic block, if needed.
+///
+/// \param F Function to process.
+///
+/// \returns True if new weights were assigned to edges or blocks.
+bool SampleFunctionProfile::propagateThroughEdges(Function &F) {
bool Changed = false;
- FunctionProfile &FProfile = Profiles[F.getName()];
- unsigned FirstLineno = inst_begin(F)->getDebugLoc().getLine();
- MDBuilder MDB(F.getContext());
+ DEBUG(dbgs() << "\nPropagation through edges\n");
+ for (Function::iterator BI = F.begin(), EI = F.end(); BI != EI; ++BI) {
+ BasicBlock *BB = BI;
+
+ // Visit all the predecessor and successor edges to determine
+ // which ones have a weight assigned already. Note that it doesn't
+ // matter that we only keep track of a single unknown edge. The
+ // only case we are interested in handling is when only a single
+ // edge is unknown (see setEdgeOrBlockWeight).
+ for (unsigned i = 0; i < 2; i++) {
+ unsigned TotalWeight = 0;
+ unsigned NumUnknownEdges = 0;
+ Edge UnknownEdge, SelfReferentialEdge;
+
+ if (i == 0) {
+ // First, visit all predecessor edges.
+ for (size_t I = 0; I < Predecessors[BB].size(); I++) {
+ Edge E = std::make_pair(Predecessors[BB][I], BB);
+ TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
+ if (E.first == E.second)
+ SelfReferentialEdge = E;
+ }
+ } else {
+ // On the second round, visit all successor edges.
+ for (size_t I = 0; I < Successors[BB].size(); I++) {
+ Edge E = std::make_pair(BB, Successors[BB][I]);
+ TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
+ }
+ }
+
+ // After visiting all the edges, there are three cases that we
+ // can handle immediately:
+ //
+ // - All the edge weights are known (i.e., NumUnknownEdges == 0).
+ // In this case, we simply check that the sum of all the edges
+ // is the same as BB's weight. If not, we change BB's weight
+ // to match. Additionally, if BB had not been visited before,
+ // we mark it visited.
+ //
+ // - Only one edge is unknown and BB has already been visited.
+ // In this case, we can compute the weight of the edge by
+ // subtracting the total block weight from all the known
+ // edge weights. If the edges weight more than BB, then the
+ // edge of the last remaining edge is set to zero.
+ //
+ // - There exists a self-referential edge and the weight of BB is
+ // known. In this case, this edge can be based on BB's weight.
+ // We add up all the other known edges and set the weight on
+ // the self-referential edge as we did in the previous case.
+ //
+ // In any other case, we must continue iterating. Eventually,
+ // all edges will get a weight, or iteration will stop when
+ // it reaches SampleProfileMaxPropagateIterations.
+ if (NumUnknownEdges <= 1) {
+ unsigned &BBWeight = BlockWeights[BB];
+ if (NumUnknownEdges == 0) {
+ // If we already know the weight of all edges, the weight of the
+ // basic block can be computed. It should be no larger than the sum
+ // of all edge weights.
+ if (TotalWeight > BBWeight) {
+ BBWeight = TotalWeight;
+ Changed = true;
+ DEBUG(dbgs() << "All edge weights for " << BB->getName()
+ << " known. Set weight for block: ";
+ printBlockWeight(dbgs(), BB););
+ }
+ if (VisitedBlocks.insert(BB))
+ Changed = true;
+ } else if (NumUnknownEdges == 1 && VisitedBlocks.count(BB)) {
+ // If there is a single unknown edge and the block has been
+ // visited, then we can compute E's weight.
+ if (BBWeight >= TotalWeight)
+ EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
+ else
+ EdgeWeights[UnknownEdge] = 0;
+ VisitedEdges.insert(UnknownEdge);
+ Changed = true;
+ DEBUG(dbgs() << "Set weight for edge: ";
+ printEdgeWeight(dbgs(), UnknownEdge));
+ }
+ } else if (SelfReferentialEdge.first && VisitedBlocks.count(BB)) {
+ unsigned &BBWeight = BlockWeights[BB];
+ // We have a self-referential edge and the weight of BB is known.
+ if (BBWeight >= TotalWeight)
+ EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
+ else
+ EdgeWeights[SelfReferentialEdge] = 0;
+ VisitedEdges.insert(SelfReferentialEdge);
+ Changed = true;
+ DEBUG(dbgs() << "Set self-referential edge weight to: ";
+ printEdgeWeight(dbgs(), SelfReferentialEdge));
+ }
+ }
+ }
+
+ return Changed;
+}
+
+/// \brief Build in/out edge lists for each basic block in the CFG.
+///
+/// We are interested in unique edges. If a block B1 has multiple
+/// edges to another block B2, we only add a single B1->B2 edge.
+void SampleFunctionProfile::buildEdges(Function &F) {
+ for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
+ BasicBlock *B1 = I;
+
+ // Add predecessors for B1.
+ SmallPtrSet<BasicBlock *, 16> Visited;
+ if (!Predecessors[B1].empty())
+ llvm_unreachable("Found a stale predecessors list in a basic block.");
+ for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) {
+ BasicBlock *B2 = *PI;
+ if (Visited.insert(B2))
+ Predecessors[B1].push_back(B2);
+ }
+
+ // Add successors for B1.
+ Visited.clear();
+ if (!Successors[B1].empty())
+ llvm_unreachable("Found a stale successors list in a basic block.");
+ for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) {
+ BasicBlock *B2 = *SI;
+ if (Visited.insert(B2))
+ Successors[B1].push_back(B2);
+ }
+ }
+}
- // Clear the block weights cache.
- FProfile.BlockWeights.clear();
+/// \brief Propagate weights into edges
+///
+/// The following rules are applied to every block B in the CFG:
+///
+/// - If B has a single predecessor/successor, then the weight
+/// of that edge is the weight of the block.
+///
+/// - If all incoming or outgoing edges are known except one, and the
+/// weight of the block is already known, the weight of the unknown
+/// edge will be the weight of the block minus the sum of all the known
+/// edges. If the sum of all the known edges is larger than B's weight,
+/// we set the unknown edge weight to zero.
+///
+/// - If there is a self-referential edge, and the weight of the block is
+/// known, the weight for that edge is set to the weight of the block
+/// minus the weight of the other incoming edges to that block (if
+/// known).
+void SampleFunctionProfile::propagateWeights(Function &F) {
+ bool Changed = true;
+ unsigned i = 0;
+
+ // Before propagation starts, build, for each block, a list of
+ // unique predecessors and successors. This is necessary to handle
+ // identical edges in multiway branches. Since we visit all blocks and all
+ // edges of the CFG, it is cleaner to build these lists once at the start
+ // of the pass.
+ buildEdges(F);
+
+ // Propagate until we converge or we go past the iteration limit.
+ while (Changed && i++ < SampleProfileMaxPropagateIterations) {
+ Changed = propagateThroughEdges(F);
+ }
- // When we find a branch instruction: For each edge E out of the branch,
- // the weight of E is the weight of the target block.
+ // Generate MD_prof metadata for every branch instruction using the
+ // edge weights computed during propagation.
+ DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
+ MDBuilder MDB(F.getContext());
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
BasicBlock *B = I;
TerminatorInst *TI = B->getTerminator();
@@ -439,34 +928,155 @@ bool SampleProfile::emitAnnotations(Function &F) {
if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
continue;
- SmallVector<uint32_t, 4> Weights;
- unsigned NSuccs = TI->getNumSuccessors();
- for (unsigned I = 0; I < NSuccs; ++I) {
+ DEBUG(dbgs() << "\nGetting weights for branch at line "
+ << TI->getDebugLoc().getLine() << ".\n");
+ SmallVector<unsigned, 4> Weights;
+ bool AllWeightsZero = true;
+ for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
BasicBlock *Succ = TI->getSuccessor(I);
- uint32_t Weight =
- computeBlockWeight(Succ, FirstLineno, FProfile.BodySamples);
+ Edge E = std::make_pair(B, Succ);
+ unsigned Weight = EdgeWeights[E];
+ DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
Weights.push_back(Weight);
+ if (Weight != 0)
+ AllWeightsZero = false;
}
- TI->setMetadata(llvm::LLVMContext::MD_prof,
- MDB.createBranchWeights(Weights));
- Changed = true;
+ // Only set weights if there is at least one non-zero weight.
+ // In any other case, let the analyzer set weights.
+ if (!AllWeightsZero) {
+ DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
+ TI->setMetadata(llvm::LLVMContext::MD_prof,
+ MDB.createBranchWeights(Weights));
+ } else {
+ DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
+ }
}
+}
- return Changed;
+/// \brief Get the line number for the function header.
+///
+/// This looks up function \p F in the current compilation unit and
+/// retrieves the line number where the function is defined. This is
+/// line 0 for all the samples read from the profile file. Every line
+/// number is relative to this line.
+///
+/// \param F Function object to query.
+///
+/// \returns the line number where \p F is defined. If it returns 0,
+/// it means that there is no debug information available for \p F.
+unsigned SampleFunctionProfile::getFunctionLoc(Function &F) {
+ NamedMDNode *CUNodes = F.getParent()->getNamedMetadata("llvm.dbg.cu");
+ if (CUNodes) {
+ for (unsigned I = 0, E1 = CUNodes->getNumOperands(); I != E1; ++I) {
+ DICompileUnit CU(CUNodes->getOperand(I));
+ DIArray Subprograms = CU.getSubprograms();
+ for (unsigned J = 0, E2 = Subprograms.getNumElements(); J != E2; ++J) {
+ DISubprogram Subprogram(Subprograms.getElement(J));
+ if (Subprogram.describes(&F))
+ return Subprogram.getLineNumber();
+ }
+ }
+ }
+
+ F.getContext().diagnose(DiagnosticInfoSampleProfile(
+ "No debug information found in function " + F.getName()));
+ return 0;
}
-char SampleProfileLoader::ID = 0;
-INITIALIZE_PASS(SampleProfileLoader, "sample-profile", "Sample Profile loader",
- false, false)
+/// \brief Generate branch weight metadata for all branches in \p F.
+///
+/// Branch weights are computed out of instruction samples using a
+/// propagation heuristic. Propagation proceeds in 3 phases:
+///
+/// 1- Assignment of block weights. All the basic blocks in the function
+/// are initial assigned the same weight as their most frequently
+/// executed instruction.
+///
+/// 2- Creation of equivalence classes. Since samples may be missing from
+/// blocks, we can fill in the gaps by setting the weights of all the
+/// blocks in the same equivalence class to the same weight. To compute
+/// the concept of equivalence, we use dominance and loop information.
+/// Two blocks B1 and B2 are in the same equivalence class if B1
+/// dominates B2, B2 post-dominates B1 and both are in the same loop.
+///
+/// 3- Propagation of block weights into edges. This uses a simple
+/// propagation heuristic. The following rules are applied to every
+/// block B in the CFG:
+///
+/// - If B has a single predecessor/successor, then the weight
+/// of that edge is the weight of the block.
+///
+/// - If all the edges are known except one, and the weight of the
+/// block is already known, the weight of the unknown edge will
+/// be the weight of the block minus the sum of all the known
+/// edges. If the sum of all the known edges is larger than B's weight,
+/// we set the unknown edge weight to zero.
+///
+/// - If there is a self-referential edge, and the weight of the block is
+/// known, the weight for that edge is set to the weight of the block
+/// minus the weight of the other incoming edges to that block (if
+/// known).
+///
+/// Since this propagation is not guaranteed to finalize for every CFG, we
+/// only allow it to proceed for a limited number of iterations (controlled
+/// by -sample-profile-max-propagate-iterations).
+///
+/// FIXME: Try to replace this propagation heuristic with a scheme
+/// that is guaranteed to finalize. A work-list approach similar to
+/// the standard value propagation algorithm used by SSA-CCP might
+/// work here.
+///
+/// Once all the branch weights are computed, we emit the MD_prof
+/// metadata on B using the computed values for each of its branches.
+///
+/// \param F The function to query.
+///
+/// \returns true if \p F was modified. Returns false, otherwise.
+bool SampleFunctionProfile::emitAnnotations(Function &F, DominatorTree *DomTree,
+ PostDominatorTree *PostDomTree,
+ LoopInfo *Loops) {
+ bool Changed = false;
-bool SampleProfileLoader::runOnFunction(Function &F) {
- return Profiler->emitAnnotations(F);
+ // Initialize invariants used during computation and propagation.
+ HeaderLineno = getFunctionLoc(F);
+ if (HeaderLineno == 0)
+ return false;
+
+ DEBUG(dbgs() << "Line number for the first instruction in " << F.getName()
+ << ": " << HeaderLineno << "\n");
+ DT = DomTree;
+ PDT = PostDomTree;
+ LI = Loops;
+ Ctx = &F.getParent()->getContext();
+
+ // Compute basic block weights.
+ Changed |= computeBlockWeights(F);
+
+ if (Changed) {
+ // Find equivalence classes.
+ findEquivalenceClasses(F);
+
+ // Propagate weights to all edges.
+ propagateWeights(F);
+ }
+
+ return Changed;
}
+char SampleProfileLoader::ID = 0;
+INITIALIZE_PASS_BEGIN(SampleProfileLoader, "sample-profile",
+ "Sample Profile loader", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(AddDiscriminators)
+INITIALIZE_PASS_END(SampleProfileLoader, "sample-profile",
+ "Sample Profile loader", false, false)
+
bool SampleProfileLoader::doInitialization(Module &M) {
- Profiler.reset(new SampleProfile(Filename));
- Profiler->loadText();
+ Profiler.reset(new SampleModuleProfile(M, Filename));
+ ProfileIsValid = Profiler->loadText();
return true;
}
@@ -477,3 +1087,15 @@ FunctionPass *llvm::createSampleProfileLoaderPass() {
FunctionPass *llvm::createSampleProfileLoaderPass(StringRef Name) {
return new SampleProfileLoader(Name);
}
+
+bool SampleProfileLoader::runOnFunction(Function &F) {
+ if (!ProfileIsValid)
+ return false;
+ DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ PostDominatorTree *PDT = &getAnalysis<PostDominatorTree>();
+ LoopInfo *LI = &getAnalysis<LoopInfo>();
+ SampleFunctionProfile &FunctionProfile = Profiler->getProfile(F);
+ if (!FunctionProfile.empty())
+ return FunctionProfile.emitAnnotations(F, DT, PDT, LI);
+ return false;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
index 857597e..de724d4 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -17,8 +17,8 @@
#include "llvm-c/Initialization.h"
#include "llvm-c/Transforms/Scalar.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
#include "llvm/PassManager.h"
@@ -29,11 +29,12 @@ using namespace llvm;
void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeADCEPass(Registry);
initializeSampleProfileLoaderPass(Registry);
- initializeCodeGenPreparePass(Registry);
+ initializeConstantHoistingPass(Registry);
initializeConstantPropagationPass(Registry);
initializeCorrelatedValuePropagationPass(Registry);
initializeDCEPass(Registry);
initializeDeadInstEliminationPass(Registry);
+ initializeScalarizerPass(Registry);
initializeDSEPass(Registry);
initializeGVNPass(Registry);
initializeEarlyCSEPass(Registry);
@@ -51,6 +52,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeLowerAtomicPass(Registry);
initializeLowerExpectIntrinsicPass(Registry);
initializeMemCpyOptPass(Registry);
+ initializeMergedLoadStoreMotionPass(Registry);
initializePartiallyInlineLibCallsPass(Registry);
initializeReassociatePass(Registry);
initializeRegToMemPass(Registry);
@@ -63,6 +65,8 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeStructurizeCFGPass(Registry);
initializeSinkingPass(Registry);
initializeTailCallElimPass(Registry);
+ initializeSeparateConstOffsetFromGEPPass(Registry);
+ initializeLoadCombinePass(Registry);
}
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
@@ -81,10 +85,18 @@ void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createDeadStoreEliminationPass());
}
+void LLVMAddScalarizerPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createScalarizerPass());
+}
+
void LLVMAddGVNPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createGVNPass());
}
+void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createMergedLoadStoreMotionPass());
+}
+
void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createIndVarSimplifyPass());
}
@@ -176,6 +188,7 @@ void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM) {
void LLVMAddVerifierPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createVerifierPass());
+ // FIXME: should this also add createDebugInfoVerifierPass()?
}
void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 57b290e..e2a24a7 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -19,20 +19,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "scalarrepl"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
@@ -41,10 +42,8 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -52,6 +51,8 @@
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
+#define DEBUG_TYPE "scalarrepl"
+
STATISTIC(NumReplaced, "Number of allocas broken up");
STATISTIC(NumPromoted, "Number of allocas promoted");
STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion");
@@ -80,14 +81,14 @@ namespace {
ScalarLoadThreshold = SLT;
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
bool performScalarRepl(Function &F);
bool performPromotion(Function &F);
private:
bool HasDomTree;
- DataLayout *TD;
+ const DataLayout *DL;
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.
@@ -195,8 +196,8 @@ namespace {
// getAnalysisUsage - This pass does not require any passes, but we know it
// will not alter the CFG, so say so.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}
};
@@ -212,7 +213,7 @@ namespace {
// getAnalysisUsage - This pass does not require any passes, but we know it
// will not alter the CFG, so say so.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
}
};
@@ -224,7 +225,7 @@ char SROA_SSAUp::ID = 0;
INITIALIZE_PASS_BEGIN(SROA_DT, "scalarrepl",
"Scalar Replacement of Aggregates (DT)", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(SROA_DT, "scalarrepl",
"Scalar Replacement of Aggregates (DT)", false, false)
@@ -258,7 +259,7 @@ namespace {
class ConvertToScalarInfo {
/// AllocaSize - The size of the alloca being considered in bytes.
unsigned AllocaSize;
- const DataLayout &TD;
+ const DataLayout &DL;
unsigned ScalarLoadThreshold;
/// IsNotTrivial - This is set to true if there is some access to the object
@@ -301,10 +302,10 @@ class ConvertToScalarInfo {
bool HadDynamicAccess;
public:
- explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,
+ explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL,
unsigned SLT)
- : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),
- ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
+ : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false),
+ ScalarKind(Unknown), VectorTy(nullptr), HadNonMemTransferAccess(false),
HadDynamicAccess(false) { }
AllocaInst *TryConvert(AllocaInst *AI);
@@ -332,8 +333,8 @@ private:
AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// If we can't convert this scalar, or if mem2reg can trivially do it, bail
// out.
- if (!CanConvertToScalar(AI, 0, 0) || !IsNotTrivial)
- return 0;
+ if (!CanConvertToScalar(AI, 0, nullptr) || !IsNotTrivial)
+ return nullptr;
// If an alloca has only memset / memcpy uses, it may still have an Unknown
// ScalarKind. Treat it as an Integer below.
@@ -361,23 +362,24 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// Do not convert to scalar integer if the alloca size exceeds the
// scalar load threshold.
if (BitWidth > ScalarLoadThreshold)
- return 0;
+ return nullptr;
if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
- !HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth))
- return 0;
+ !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth))
+ return nullptr;
// Dynamic accesses on integers aren't yet supported. They need us to shift
// by a dynamic amount which could be difficult to work out as we might not
// know whether to use a left or right shift.
if (ScalarKind == Integer && HadDynamicAccess)
- return 0;
+ return nullptr;
DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
// Create and insert the integer alloca.
NewTy = IntegerType::get(AI->getContext(), BitWidth);
}
- AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
- ConvertUsesToScalar(AI, NewAI, 0, 0);
+ AllocaInst *NewAI = new AllocaInst(NewTy, nullptr, "",
+ AI->getParent()->begin());
+ ConvertUsesToScalar(AI, NewAI, 0, nullptr);
return NewAI;
}
@@ -466,10 +468,10 @@ bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy,
/// SawVec flag.
bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
Value* NonConstantIdx) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (User *U : V->users()) {
+ Instruction *UI = cast<Instruction>(U);
- if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
// Don't break volatile loads.
if (!LI->isSimple())
return false;
@@ -481,7 +483,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
continue;
}
- if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
// Storing the pointer, not into the value?
if (SI->getOperand(0) == V || !SI->isSimple()) return false;
// Don't touch MMX operations.
@@ -492,7 +494,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
continue;
}
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(UI)) {
if (!onlyUsedByLifetimeMarkers(BCI))
IsNotTrivial = true; // Can't be mem2reg'd.
if (!CanConvertToScalar(BCI, Offset, NonConstantIdx))
@@ -500,7 +502,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
continue;
}
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UI)) {
// If this is a GEP with a variable indices, we can't handle it.
PointerType* PtrTy = dyn_cast<PointerType>(GEP->getPointerOperandType());
if (!PtrTy)
@@ -508,7 +510,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- Value *GEPNonConstantIdx = 0;
+ Value *GEPNonConstantIdx = nullptr;
if (!GEP->hasAllConstantIndices()) {
if (!isa<VectorType>(PtrTy->getElementType()))
return false;
@@ -520,7 +522,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
HadDynamicAccess = true;
} else
GEPNonConstantIdx = NonConstantIdx;
- uint64_t GEPOffset = TD.getIndexedOffset(PtrTy,
+ uint64_t GEPOffset = DL.getIndexedOffset(PtrTy,
Indices);
// See if all uses can be converted.
if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx))
@@ -532,7 +534,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
// If this is a constant sized memset of a constant value (e.g. 0) we can
// handle it.
- if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(UI)) {
// Store to dynamic index.
if (NonConstantIdx)
return false;
@@ -559,12 +561,12 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
// If this is a memcpy or memmove into or out of the whole allocation, we
// can handle it like a load or store of the scalar type.
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(UI)) {
// Store to dynamic index.
if (NonConstantIdx)
return false;
ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
- if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0)
+ if (!Len || Len->getZExtValue() != AllocaSize || Offset != 0)
return false;
IsNotTrivial = true; // Can't be mem2reg'd.
@@ -572,7 +574,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
}
// If this is a lifetime intrinsic, we can handle it.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UI)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end) {
continue;
@@ -597,7 +599,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
uint64_t Offset,
Value* NonConstantIdx) {
while (!Ptr->use_empty()) {
- Instruction *User = cast<Instruction>(Ptr->use_back());
+ Instruction *User = cast<Instruction>(Ptr->user_back());
if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx);
@@ -608,14 +610,14 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- Value* GEPNonConstantIdx = 0;
+ Value* GEPNonConstantIdx = nullptr;
if (!GEP->hasAllConstantIndices()) {
assert(!NonConstantIdx &&
"Dynamic GEP reading from dynamic GEP unsupported");
GEPNonConstantIdx = Indices.pop_back_val();
} else
GEPNonConstantIdx = NonConstantIdx;
- uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
+ uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(),
Indices);
ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx);
GEP->eraseFromParent();
@@ -671,7 +673,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
Value *New = ConvertScalar_InsertValue(
ConstantInt::get(User->getContext(), APVal),
- Old, Offset, 0, Builder);
+ Old, Offset, nullptr, Builder);
Builder.CreateStore(New, NewAI);
// If the load we just inserted is now dead, then the memset overwrote
@@ -692,9 +694,9 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// If the source and destination are both to the same alloca, then this is
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
// as appropriate.
- AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD, 0));
+ AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL, 0));
- if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) {
+ if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) {
// Dest must be OrigAI, change this to be a load from the original
// pointer (bitcasted), then a store to our new alloca.
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
@@ -710,7 +712,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
SrcVal->setAlignment(MTI->getAlignment());
Builder.CreateStore(SrcVal, NewAI);
- } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) {
+ } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) {
// Src must be OrigAI, change this to be a load from NewAI then a store
// through the original dest pointer (bitcasted).
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
@@ -770,15 +772,15 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
// If the result alloca is a vector type, this is either an element
// access or a bitcast to another vector type of the same size.
if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {
- unsigned FromTypeSize = TD.getTypeAllocSize(FromType);
- unsigned ToTypeSize = TD.getTypeAllocSize(ToType);
+ unsigned FromTypeSize = DL.getTypeAllocSize(FromType);
+ unsigned ToTypeSize = DL.getTypeAllocSize(ToType);
if (FromTypeSize == ToTypeSize)
return Builder.CreateBitCast(FromVal, ToType);
// Otherwise it must be an element access.
unsigned Elt = 0;
if (Offset) {
- unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
+ unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType());
Elt = Offset/EltSize;
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
}
@@ -804,12 +806,12 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
if (StructType *ST = dyn_cast<StructType>(ToType)) {
assert(!NonConstantIdx &&
"Dynamic indexing into struct types not supported");
- const StructLayout &Layout = *TD.getStructLayout(ST);
+ const StructLayout &Layout = *DL.getStructLayout(ST);
Value *Res = UndefValue::get(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
Offset+Layout.getElementOffsetInBits(i),
- 0, Builder);
+ nullptr, Builder);
Res = Builder.CreateInsertValue(Res, Elt, i);
}
return Res;
@@ -818,11 +820,12 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
assert(!NonConstantIdx &&
"Dynamic indexing into array types not supported");
- uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
Value *Res = UndefValue::get(AT);
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
- Offset+i*EltSize, 0, Builder);
+ Offset+i*EltSize, nullptr,
+ Builder);
Res = Builder.CreateInsertValue(Res, Elt, i);
}
return Res;
@@ -834,12 +837,12 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
// If this is a big-endian system and the load is narrower than the
// full alloca type, we need to do a shift to get the right bits.
int ShAmt = 0;
- if (TD.isBigEndian()) {
+ if (DL.isBigEndian()) {
// On big-endian machines, the lowest bit is stored at the bit offset
// from the pointer given by getTypeStoreSizeInBits. This matters for
// integers with a bitwidth that is not a multiple of 8.
- ShAmt = TD.getTypeStoreSizeInBits(NTy) -
- TD.getTypeStoreSizeInBits(ToType) - Offset;
+ ShAmt = DL.getTypeStoreSizeInBits(NTy) -
+ DL.getTypeStoreSizeInBits(ToType) - Offset;
} else {
ShAmt = Offset;
}
@@ -855,7 +858,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
ConstantInt::get(FromVal->getType(), -ShAmt));
// Finally, unconditionally truncate the integer to the right width.
- unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
+ unsigned LIBitWidth = DL.getTypeSizeInBits(ToType);
if (LIBitWidth < NTy->getBitWidth())
FromVal =
Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
@@ -902,8 +905,8 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
LLVMContext &Context = Old->getContext();
if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
- uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
- uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
+ uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy);
+ uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType());
// Changing the whole vector with memset or with an access of a different
// vector type?
@@ -914,7 +917,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
Type *EltTy = VTy->getElementType();
if (SV->getType() != EltTy)
SV = Builder.CreateBitCast(SV, EltTy);
- uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy);
unsigned Elt = Offset/EltSize;
Value *Idx;
if (NonConstantIdx) {
@@ -933,12 +936,12 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
if (StructType *ST = dyn_cast<StructType>(SV->getType())) {
assert(!NonConstantIdx &&
"Dynamic indexing into struct types not supported");
- const StructLayout &Layout = *TD.getStructLayout(ST);
+ const StructLayout &Layout = *DL.getStructLayout(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
Old = ConvertScalar_InsertValue(Elt, Old,
Offset+Layout.getElementOffsetInBits(i),
- 0, Builder);
+ nullptr, Builder);
}
return Old;
}
@@ -946,24 +949,25 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
assert(!NonConstantIdx &&
"Dynamic indexing into array types not supported");
- uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
- Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0, Builder);
+ Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, nullptr,
+ Builder);
}
return Old;
}
// If SV is a float, convert it to the appropriate integer type.
// If it is a pointer, do the same.
- unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
- unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
- unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
- unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
+ unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType());
+ unsigned DestWidth = DL.getTypeSizeInBits(AllocaType);
+ unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType());
+ unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType);
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
else if (SV->getType()->isPointerTy())
- SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
+ SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType()));
// Zero extend or truncate the value if needed.
if (SV->getType() != AllocaType) {
@@ -982,7 +986,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
// If this is a big-endian system and the store is narrower than the
// full alloca type, we need to do a shift to get the right bits.
int ShAmt = 0;
- if (TD.isBigEndian()) {
+ if (DL.isBigEndian()) {
// On big-endian machines, the lowest bit is stored at the bit offset
// from the pointer given by getTypeStoreSizeInBits. This matters for
// integers with a bitwidth that is not a multiple of 8.
@@ -1020,7 +1024,11 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
bool SROA::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<DataLayout>();
+ if (skipOptnoneFunction(F))
+ return false;
+
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
bool Changed = performPromotion(F);
@@ -1028,7 +1036,7 @@ bool SROA::runOnFunction(Function &F) {
// theoretically needs to. It should be refactored in order to support
// target-independent IR. Until this is done, just skip the actual
// scalar-replacement portion of this pass.
- if (!TD) return Changed;
+ if (!DL) return Changed;
while (1) {
bool LocalChange = performScalarRepl(F);
@@ -1050,17 +1058,16 @@ class AllocaPromoter : public LoadAndStorePromoter {
public:
AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
DIBuilder *DB)
- : LoadAndStorePromoter(Insts, S), AI(0), DIB(DB) {}
+ : LoadAndStorePromoter(Insts, S), AI(nullptr), DIB(DB) {}
void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
// Remember which alloca we're promoting (for isInstInList).
this->AI = AI;
if (MDNode *DebugNode = MDNode::getIfExists(AI->getContext(), AI)) {
- for (Value::use_iterator UI = DebugNode->use_begin(),
- E = DebugNode->use_end(); UI != E; ++UI)
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ for (User *U : DebugNode->users())
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
DDIs.push_back(DDI);
- else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
+ else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
DVIs.push_back(DVI);
}
@@ -1078,14 +1085,14 @@ public:
}
}
- virtual bool isInstInList(Instruction *I,
- const SmallVectorImpl<Instruction*> &Insts) const {
+ bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const override {
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return LI->getOperand(0) == AI;
return cast<StoreInst>(I)->getPointerOperand() == AI;
}
- virtual void updateDebugInfo(Instruction *Inst) const {
+ void updateDebugInfo(Instruction *Inst) const override {
for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
E = DDIs.end(); I != E; ++I) {
DbgDeclareInst *DDI = *I;
@@ -1097,7 +1104,7 @@ public:
for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
- Value *Arg = NULL;
+ Value *Arg = nullptr;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
@@ -1134,22 +1141,21 @@ public:
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
- bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
- bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
+static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
+ bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(DL);
+ bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(DL);
- for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
- UI != UE; ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple()) return false;
+ for (User *U : SI->users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple()) return false;
// Both operands to the select need to be dereferencable, either absolutely
// (e.g. allocas) or at this point because we can see other accesses to it.
if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
- LI->getAlignment(), TD))
+ LI->getAlignment(), DL))
return false;
if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
- LI->getAlignment(), TD))
+ LI->getAlignment(), DL))
return false;
}
@@ -1172,17 +1178,16 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
+static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
// For now, we can only do this promotion if the load is in the same block as
// the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
// TODO: Allow stores.
BasicBlock *BB = PN->getParent();
unsigned MaxAlign = 0;
- for (Value::use_iterator UI = PN->use_begin(), UE = PN->use_end();
- UI != UE; ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple()) return false;
+ for (User *U : PN->users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple()) return false;
// For now we only allow loads in the same block as the PHI. This is a
// common case that happens when instcombine merges two loads through a PHI.
@@ -1221,8 +1226,8 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
// If this pointer is always safe to load, or if we can prove that there is
// already a load in the block, then we can move the load to the pred block.
- if (InVal->isDereferenceablePointer() ||
- isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD))
+ if (InVal->isDereferenceablePointer(DL) ||
+ isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL))
continue;
return false;
@@ -1236,13 +1241,10 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
/// direct (non-volatile) loads and stores to it. If the alloca is close but
/// not quite there, this will transform the code to allow promotion. As such,
/// it is a non-pure predicate.
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
SetVector<Instruction*, SmallVector<Instruction*, 4>,
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
-
- for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE; ++UI) {
- User *U = *UI;
+ for (User *U : AI->users()) {
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (!LI->isSimple())
return false;
@@ -1265,12 +1267,12 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
// This is very rare and we just scrambled the use list of AI, start
// over completely.
- return tryToMakeAllocaBePromotable(AI, TD);
+ return tryToMakeAllocaBePromotable(AI, DL);
}
// If it is safe to turn "load (select c, AI, ptr)" into a select of two
// loads, then we can transform this by rewriting the select.
- if (!isSafeSelectToSpeculate(SI, TD))
+ if (!isSafeSelectToSpeculate(SI, DL))
return false;
InstsToRewrite.insert(SI);
@@ -1285,7 +1287,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
// If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
// in the pred blocks, then we can transform this by rewriting the PHI.
- if (!isSafePHIToSpeculate(PN, TD))
+ if (!isSafePHIToSpeculate(PN, DL))
return false;
InstsToRewrite.insert(PN);
@@ -1312,12 +1314,9 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(InstsToRewrite[i])) {
// This could only be a bitcast used by nothing but lifetime intrinsics.
- for (BitCastInst::use_iterator I = BCI->use_begin(), E = BCI->use_end();
- I != E;) {
- Use &U = I.getUse();
- ++I;
- cast<Instruction>(U.getUser())->eraseFromParent();
- }
+ for (BitCastInst::user_iterator I = BCI->user_begin(), E = BCI->user_end();
+ I != E;)
+ cast<Instruction>(*I++)->eraseFromParent();
BCI->eraseFromParent();
continue;
}
@@ -1326,7 +1325,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
// Selects in InstsToRewrite only have load uses. Rewrite each as two
// loads with a new select.
while (!SI->use_empty()) {
- LoadInst *LI = cast<LoadInst>(SI->use_back());
+ LoadInst *LI = cast<LoadInst>(SI->user_back());
IRBuilder<> Builder(LI);
LoadInst *TrueLoad =
@@ -1367,13 +1366,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
// Get the TBAA tag and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ, it doesn't matter.
- LoadInst *SomeLoad = cast<LoadInst>(PN->use_back());
+ LoadInst *SomeLoad = cast<LoadInst>(PN->user_back());
MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
unsigned Align = SomeLoad->getAlignment();
// Rewrite all loads of the PN to use the new PHI.
while (!PN->use_empty()) {
- LoadInst *LI = cast<LoadInst>(PN->use_back());
+ LoadInst *LI = cast<LoadInst>(PN->user_back());
LI->replaceAllUsesWith(NewPN);
LI->eraseFromParent();
}
@@ -1385,7 +1384,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *Pred = PN->getIncomingBlock(i);
LoadInst *&Load = InsertedLoads[Pred];
- if (Load == 0) {
+ if (!Load) {
Load = new LoadInst(PN->getIncomingValue(i),
PN->getName() + "." + Pred->getName(),
Pred->getTerminator());
@@ -1405,9 +1404,9 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
bool SROA::performPromotion(Function &F) {
std::vector<AllocaInst*> Allocas;
- DominatorTree *DT = 0;
+ DominatorTree *DT = nullptr;
if (HasDomTree)
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
DIBuilder DIB(*F.getParent());
@@ -1420,7 +1419,7 @@ bool SROA::performPromotion(Function &F) {
// the entry node
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
- if (tryToMakeAllocaBePromotable(AI, TD))
+ if (tryToMakeAllocaBePromotable(AI, DL))
Allocas.push_back(AI);
if (Allocas.empty()) break;
@@ -1433,9 +1432,8 @@ bool SROA::performPromotion(Function &F) {
AllocaInst *AI = Allocas[i];
// Build list of instructions to promote.
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
- UI != E; ++UI)
- Insts.push_back(cast<Instruction>(*UI));
+ for (User *U : AI->users())
+ Insts.push_back(cast<Instruction>(U));
AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts);
Insts.clear();
}
@@ -1496,7 +1494,7 @@ bool SROA::performScalarRepl(Function &F) {
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
// value cannot be decomposed at all.
- uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
+ uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType());
// Do not promote [0 x %struct].
if (AllocaSize == 0) continue;
@@ -1520,7 +1518,7 @@ bool SROA::performScalarRepl(Function &F) {
// that we can't just check based on the type: the alloca may be of an i32
// but that has pointer arithmetic to set byte 3 of it or something.
if (AllocaInst *NewAI = ConvertToScalarInfo(
- (unsigned)AllocaSize, *TD, ScalarLoadThreshold).TryConvert(AI)) {
+ (unsigned)AllocaSize, *DL, ScalarLoadThreshold).TryConvert(AI)) {
NewAI->takeName(AI);
AI->eraseFromParent();
++NumConverted;
@@ -1543,7 +1541,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
ElementAllocas.reserve(ST->getNumContainedTypes());
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
- AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
+ AllocaInst *NA = new AllocaInst(ST->getContainedType(i), nullptr,
AI->getAlignment(),
AI->getName() + "." + Twine(i), AI);
ElementAllocas.push_back(NA);
@@ -1554,7 +1552,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
ElementAllocas.reserve(AT->getNumElements());
Type *ElTy = AT->getElementType();
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
- AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
+ AllocaInst *NA = new AllocaInst(ElTy, nullptr, AI->getAlignment(),
AI->getName() + "." + Twine(i), AI);
ElementAllocas.push_back(NA);
WorkList.push_back(NA); // Add to worklist for recursive processing
@@ -1583,7 +1581,7 @@ void SROA::DeleteDeadInstructions() {
// Zero out the operand and see if it becomes trivially dead.
// (But, don't add allocas to the dead instruction list -- they are
// already on the worklist and will be deleted separately.)
- *OI = 0;
+ *OI = nullptr;
if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
DeadInsts.push_back(U);
}
@@ -1598,8 +1596,8 @@ void SROA::DeleteDeadInstructions() {
/// referenced by this instruction.
void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
AllocaInfo &Info) {
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (Use &U : I->uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
isSafeForScalarRepl(BC, Offset, Info);
@@ -1610,19 +1608,17 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
isSafeForScalarRepl(GEPI, GEPOffset, Info);
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
- if (Length == 0)
- return MarkUnsafe(Info, User);
- if (Length->isNegative())
+ if (!Length || Length->isNegative())
return MarkUnsafe(Info, User);
- isSafeMemAccess(Offset, Length->getZExtValue(), 0,
- UI.getOperandNo() == 0, Info, MI,
+ isSafeMemAccess(Offset, Length->getZExtValue(), nullptr,
+ U.getOperandNo() == 0, Info, MI,
true /*AllowWholeAccess*/);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
if (!LI->isSimple())
return MarkUnsafe(Info, User);
Type *LIType = LI->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
LIType, false, Info, LI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1632,7 +1628,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
return MarkUnsafe(Info, User);
Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
SIType, true, Info, SI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
@@ -1665,39 +1661,39 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
if (!Info.CheckedPHIs.insert(PN))
return;
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (User *U : I->users()) {
+ Instruction *UI = cast<Instruction>(U);
- if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(UI)) {
isSafePHISelectUseForScalarRepl(BC, Offset, Info);
- } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
+ } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
// Only allow "bitcast" GEPs for simplicity. We could generalize this,
// but would have to prove that we're staying inside of an element being
// promoted.
if (!GEPI->hasAllZeroIndices())
- return MarkUnsafe(Info, User);
+ return MarkUnsafe(Info, UI);
isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
- } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
if (!LI->isSimple())
- return MarkUnsafe(Info, User);
+ return MarkUnsafe(Info, UI);
Type *LIType = LI->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
LIType, false, Info, LI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
// Store is ok if storing INTO the pointer, not storing the pointer
if (!SI->isSimple() || SI->getOperand(0) == I)
- return MarkUnsafe(Info, User);
+ return MarkUnsafe(Info, UI);
Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
SIType, true, Info, SI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
- } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
- isSafePHISelectUseForScalarRepl(User, Offset, Info);
+ } else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) {
+ isSafePHISelectUseForScalarRepl(UI, Offset, Info);
} else {
- return MarkUnsafe(Info, User);
+ return MarkUnsafe(Info, UI);
}
if (Info.isUnsafe) return;
}
@@ -1731,12 +1727,12 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI,
// Compute the offset due to this GEP and check if the alloca has a
// component element at that offset.
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
- // If this GEP is non constant then the last operand must have been a
+ // If this GEP is non-constant then the last operand must have been a
// dynamic index into a vector. Pop this now as it has no impact on the
// constant part of the offset.
if (NonConstant)
Indices.pop_back();
- Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
+ Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,
NonConstantIdxSize))
MarkUnsafe(Info, GEPI);
@@ -1750,12 +1746,12 @@ static bool isHomogeneousAggregate(Type *T, unsigned &NumElts,
Type *&EltTy) {
if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
NumElts = AT->getNumElements();
- EltTy = (NumElts == 0 ? 0 : AT->getElementType());
+ EltTy = (NumElts == 0 ? nullptr : AT->getElementType());
return true;
}
if (StructType *ST = dyn_cast<StructType>(T)) {
NumElts = ST->getNumContainedTypes();
- EltTy = (NumElts == 0 ? 0 : ST->getContainedType(0));
+ EltTy = (NumElts == 0 ? nullptr : ST->getContainedType(0));
for (unsigned n = 1; n < NumElts; ++n) {
if (ST->getContainedType(n) != EltTy)
return false;
@@ -1795,7 +1791,7 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
bool AllowWholeAccess) {
// Check if this is a load/store of the entire alloca.
if (Offset == 0 && AllowWholeAccess &&
- MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) {
+ MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) {
// This can be safe for MemIntrinsics (where MemOpType is 0) and integer
// loads/stores (which are essentially the same as the MemIntrinsics with
// regard to copying padding between elements). But, if an alloca is
@@ -1832,20 +1828,20 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
Type *EltTy;
uint64_t EltSize;
if (StructType *ST = dyn_cast<StructType>(T)) {
- const StructLayout *Layout = TD->getStructLayout(ST);
+ const StructLayout *Layout = DL->getStructLayout(ST);
unsigned EltIdx = Layout->getElementContainingOffset(Offset);
EltTy = ST->getContainedType(EltIdx);
- EltSize = TD->getTypeAllocSize(EltTy);
+ EltSize = DL->getTypeAllocSize(EltTy);
Offset -= Layout->getElementOffset(EltIdx);
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
EltTy = AT->getElementType();
- EltSize = TD->getTypeAllocSize(EltTy);
+ EltSize = DL->getTypeAllocSize(EltTy);
if (Offset >= AT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
} else if (VectorType *VT = dyn_cast<VectorType>(T)) {
EltTy = VT->getElementType();
- EltSize = TD->getTypeAllocSize(EltTy);
+ EltSize = DL->getTypeAllocSize(EltTy);
if (Offset >= VT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
@@ -1867,8 +1863,8 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
SmallVectorImpl<AllocaInst *> &NewElts) {
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
- Use &TheUse = UI.getUse();
- Instruction *User = cast<Instruction>(*UI++);
+ Use &TheUse = *UI++;
+ Instruction *User = cast<Instruction>(TheUse.getUser());
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
RewriteBitCast(BC, AI, Offset, NewElts);
@@ -1884,7 +1880,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
uint64_t MemSize = Length->getZExtValue();
if (Offset == 0 &&
- MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
+ MemSize == DL->getTypeAllocSize(AI->getAllocatedType()))
RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
// Otherwise the intrinsic can only touch a single element and the
// address operand will be updated, so nothing else needs to be done.
@@ -1920,8 +1916,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
LI->replaceAllUsesWith(Insert);
DeadInsts.push_back(LI);
} else if (LIType->isIntegerTy() &&
- TD->getTypeAllocSize(LIType) ==
- TD->getTypeAllocSize(AI->getAllocatedType())) {
+ DL->getTypeAllocSize(LIType) ==
+ DL->getTypeAllocSize(AI->getAllocatedType())) {
// If this is a load of the entire alloca to an integer, rewrite it.
RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
}
@@ -1947,8 +1943,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
DeadInsts.push_back(SI);
} else if (SIType->isIntegerTy() &&
- TD->getTypeAllocSize(SIType) ==
- TD->getTypeAllocSize(AI->getAllocatedType())) {
+ DL->getTypeAllocSize(SIType) ==
+ DL->getTypeAllocSize(AI->getAllocatedType())) {
// If this is a store of the entire alloca from an integer, rewrite it.
RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
}
@@ -2010,7 +2006,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
Type *&IdxTy) {
uint64_t Idx = 0;
if (StructType *ST = dyn_cast<StructType>(T)) {
- const StructLayout *Layout = TD->getStructLayout(ST);
+ const StructLayout *Layout = DL->getStructLayout(ST);
Idx = Layout->getElementContainingOffset(Offset);
T = ST->getContainedType(Idx);
Offset -= Layout->getElementOffset(Idx);
@@ -2018,7 +2014,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
return Idx;
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
T = AT->getElementType();
- uint64_t EltSize = TD->getTypeAllocSize(T);
+ uint64_t EltSize = DL->getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
IdxTy = Type::getInt64Ty(T->getContext());
@@ -2026,7 +2022,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
}
VectorType *VT = cast<VectorType>(T);
T = VT->getElementType();
- uint64_t EltSize = TD->getTypeAllocSize(T);
+ uint64_t EltSize = DL->getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
IdxTy = Type::getInt64Ty(T->getContext());
@@ -2044,10 +2040,10 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
// In this case, it must be the last GEP operand which is dynamic so keep that
// aside until we've found the constant GEP offset then add it back in at the
// end.
- Value* NonConstantIdx = 0;
+ Value* NonConstantIdx = nullptr;
if (!GEPI->hasAllConstantIndices())
NonConstantIdx = Indices.pop_back_val();
- Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
+ Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
@@ -2114,11 +2110,12 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
if (NewOffset) {
// Splice the first element and index 'NewOffset' bytes in. SROA will
// split the alloca again later.
- Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy());
+ unsigned AS = AI->getType()->getAddressSpace();
+ Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS));
V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));
IdxTy = NewElts[Idx]->getAllocatedType();
- uint64_t EltSize = TD->getTypeAllocSize(IdxTy) - NewOffset;
+ uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset;
if (EltSize > Size) {
EltSize = Size;
Size = 0;
@@ -2134,7 +2131,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
for (; Idx != NewElts.size() && Size; ++Idx) {
IdxTy = NewElts[Idx]->getAllocatedType();
- uint64_t EltSize = TD->getTypeAllocSize(IdxTy);
+ uint64_t EltSize = DL->getTypeAllocSize(IdxTy);
if (EltSize > Size) {
EltSize = Size;
Size = 0;
@@ -2161,7 +2158,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// appropriate type. The "Other" pointer is the pointer that goes to memory
// that doesn't have anything to do with the alloca that we are promoting. For
// memset, this Value* stays null.
- Value *OtherPtr = 0;
+ Value *OtherPtr = nullptr;
unsigned MemAlignment = MI->getAlignment();
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
if (Inst == MTI->getRawDest())
@@ -2213,7 +2210,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
- Value *OtherElt = 0;
+ Value *OtherElt = nullptr;
unsigned OtherEltAlign = MemAlignment;
if (OtherPtr) {
@@ -2226,10 +2223,10 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
Type *OtherTy = OtherPtrTy->getElementType();
if (StructType *ST = dyn_cast<StructType>(OtherTy)) {
- EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
+ EltOffset = DL->getStructLayout(ST)->getElementOffset(i);
} else {
Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
- EltOffset = TD->getTypeAllocSize(EltTy)*i;
+ EltOffset = DL->getTypeAllocSize(EltTy)*i;
}
// The alignment of the other pointer is the guaranteed alignment of the
@@ -2270,7 +2267,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Type *ValTy = EltTy->getScalarType();
// Construct an integer with the right value.
- unsigned EltSize = TD->getTypeSizeInBits(ValTy);
+ unsigned EltSize = DL->getTypeSizeInBits(ValTy);
APInt OneVal(EltSize, CI->getZExtValue());
APInt TotalVal(OneVal);
// Set each byte.
@@ -2300,7 +2297,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// this element.
}
- unsigned EltSize = TD->getTypeAllocSize(EltTy);
+ unsigned EltSize = DL->getTypeAllocSize(EltTy);
if (!EltSize)
continue;
@@ -2334,12 +2331,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// and store the element value to the individual alloca.
Value *SrcVal = SI->getOperand(0);
Type *AllocaEltTy = AI->getAllocatedType();
- uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+ uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
IRBuilder<> Builder(SI);
// Handle tail padding by extending the operand
- if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
+ if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
SrcVal = Builder.CreateZExt(SrcVal,
IntegerType::get(SI->getContext(), AllocaSizeBits));
@@ -2349,15 +2346,15 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
- const StructLayout *Layout = TD->getStructLayout(EltSTy);
+ const StructLayout *Layout = DL->getStructLayout(EltSTy);
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Get the number of bits to shift SrcVal to get the value.
Type *FieldTy = EltSTy->getElementType(i);
uint64_t Shift = Layout->getElementOffsetInBits(i);
- if (TD->isBigEndian())
- Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
+ if (DL->isBigEndian())
+ Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy);
Value *EltVal = SrcVal;
if (Shift) {
@@ -2366,7 +2363,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
// Truncate down to an integer of the right size.
- uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
+ uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
@@ -2391,12 +2388,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
} else {
ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
Type *ArrayEltTy = ATy->getElementType();
- uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
- uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
+ uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
+ uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy);
uint64_t Shift;
- if (TD->isBigEndian())
+ if (DL->isBigEndian())
Shift = AllocaSizeBits-ElementOffset;
else
Shift = 0;
@@ -2430,7 +2427,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
new StoreInst(EltVal, DestField, SI);
- if (TD->isBigEndian())
+ if (DL->isBigEndian())
Shift -= ElementOffset;
else
Shift += ElementOffset;
@@ -2448,20 +2445,20 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
Type *AllocaEltTy = AI->getAllocatedType();
- uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+ uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
<< '\n');
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
- const StructLayout *Layout = 0;
+ const StructLayout *Layout = nullptr;
uint64_t ArrayEltBitOffset = 0;
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
- Layout = TD->getStructLayout(EltSTy);
+ Layout = DL->getStructLayout(EltSTy);
} else {
Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
- ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
+ ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
}
Value *ResultVal =
@@ -2473,7 +2470,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
Value *SrcField = NewElts[i];
Type *FieldTy =
cast<PointerType>(SrcField->getType())->getElementType();
- uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
+ uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
@@ -2504,7 +2501,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
else // Array case.
Shift = i*ArrayEltBitOffset;
- if (TD->isBigEndian())
+ if (DL->isBigEndian())
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
if (Shift) {
@@ -2521,7 +2518,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
}
// Handle tail padding by truncating the result
- if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
+ if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
LI->replaceAllUsesWith(ResultVal);
@@ -2531,15 +2528,15 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
/// HasPadding - Return true if the specified type has any structure or
/// alignment padding in between the elements that would be split apart
/// by SROA; return false otherwise.
-static bool HasPadding(Type *Ty, const DataLayout &TD) {
+static bool HasPadding(Type *Ty, const DataLayout &DL) {
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Ty = ATy->getElementType();
- return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
+ return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty);
}
// SROA currently handles only Arrays and Structs.
StructType *STy = cast<StructType>(Ty);
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned PrevFieldBitOffset = 0;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
@@ -2548,7 +2545,7 @@ static bool HasPadding(Type *Ty, const DataLayout &TD) {
// previous one.
if (i) {
unsigned PrevFieldEnd =
- PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
+ PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1));
if (PrevFieldEnd < FieldBitOffset)
return true;
}
@@ -2557,7 +2554,7 @@ static bool HasPadding(Type *Ty, const DataLayout &TD) {
// Check for tail padding.
if (unsigned EltCount = STy->getNumElements()) {
unsigned PrevFieldEnd = PrevFieldBitOffset +
- TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
+ DL.getTypeSizeInBits(STy->getElementType(EltCount-1));
if (PrevFieldEnd < SL->getSizeInBits())
return true;
}
@@ -2584,7 +2581,7 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
// types, but may actually be used. In these cases, we refuse to promote the
// struct.
if (Info.isMemCpySrc && Info.isMemCpyDst &&
- HasPadding(AI->getAllocatedType(), *TD))
+ HasPadding(AI->getAllocatedType(), *DL))
return false;
// If the alloca never has an access to just *part* of it, but is accessed
diff --git a/contrib/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/contrib/llvm/lib/Transforms/Scalar/Scalarizer.cpp
new file mode 100644
index 0000000..7a73f11
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -0,0 +1,663 @@
+//===--- Scalarizer.cpp - Scalarize vector operations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass converts vector operations into scalar operations, in order
+// to expose optimization opportunities on the individual scalar operations.
+// It is mainly intended for targets that do not have vector units, but it
+// may also be useful for revectorizing code to different vector widths.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "scalarizer"
+
+namespace {
+// Used to store the scattered form of a vector.
+typedef SmallVector<Value *, 8> ValueVector;
+
+// Used to map a vector Value to its scattered form. We use std::map
+// because we want iterators to persist across insertion and because the
+// values are relatively large.
+typedef std::map<Value *, ValueVector> ScatterMap;
+
+// Lists Instructions that have been replaced with scalar implementations,
+// along with a pointer to their scattered forms.
+typedef SmallVector<std::pair<Instruction *, ValueVector *>, 16> GatherList;
+
+// Provides a very limited vector-like interface for lazily accessing one
+// component of a scattered vector or vector pointer.
+class Scatterer {
+public:
+ Scatterer() {}
+
+ // Scatter V into Size components. If new instructions are needed,
+ // insert them before BBI in BB. If Cache is nonnull, use it to cache
+ // the results.
+ Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
+ ValueVector *cachePtr = nullptr);
+
+ // Return component I, creating a new Value for it if necessary.
+ Value *operator[](unsigned I);
+
+ // Return the number of components.
+ unsigned size() const { return Size; }
+
+private:
+ BasicBlock *BB;
+ BasicBlock::iterator BBI;
+ Value *V;
+ ValueVector *CachePtr;
+ PointerType *PtrTy;
+ ValueVector Tmp;
+ unsigned Size;
+};
+
+// FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
+// called Name that compares X and Y in the same way as FCI.
+struct FCmpSplitter {
+ FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
+ Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
+ const Twine &Name) const {
+ return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
+ }
+ FCmpInst &FCI;
+};
+
+// ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
+// called Name that compares X and Y in the same way as ICI.
+struct ICmpSplitter {
+ ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
+ Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
+ const Twine &Name) const {
+ return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
+ }
+ ICmpInst &ICI;
+};
+
+// BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
+// a binary operator like BO called Name with operands X and Y.
+struct BinarySplitter {
+ BinarySplitter(BinaryOperator &bo) : BO(bo) {}
+ Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
+ const Twine &Name) const {
+ return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
+ }
+ BinaryOperator &BO;
+};
+
+// Information about a load or store that we're scalarizing.
+struct VectorLayout {
+ VectorLayout() : VecTy(nullptr), ElemTy(nullptr), VecAlign(0), ElemSize(0) {}
+
+ // Return the alignment of element I.
+ uint64_t getElemAlign(unsigned I) {
+ return MinAlign(VecAlign, I * ElemSize);
+ }
+
+ // The type of the vector.
+ VectorType *VecTy;
+
+ // The type of each element.
+ Type *ElemTy;
+
+ // The alignment of the vector.
+ uint64_t VecAlign;
+
+ // The size of each element.
+ uint64_t ElemSize;
+};
+
+class Scalarizer : public FunctionPass,
+ public InstVisitor<Scalarizer, bool> {
+public:
+ static char ID;
+
+ Scalarizer() :
+ FunctionPass(ID) {
+ initializeScalarizerPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+
+ // InstVisitor methods. They return true if the instruction was scalarized,
+ // false if nothing changed.
+ bool visitInstruction(Instruction &) { return false; }
+ bool visitSelectInst(SelectInst &SI);
+ bool visitICmpInst(ICmpInst &);
+ bool visitFCmpInst(FCmpInst &);
+ bool visitBinaryOperator(BinaryOperator &);
+ bool visitGetElementPtrInst(GetElementPtrInst &);
+ bool visitCastInst(CastInst &);
+ bool visitBitCastInst(BitCastInst &);
+ bool visitShuffleVectorInst(ShuffleVectorInst &);
+ bool visitPHINode(PHINode &);
+ bool visitLoadInst(LoadInst &);
+ bool visitStoreInst(StoreInst &);
+
+private:
+ Scatterer scatter(Instruction *, Value *);
+ void gather(Instruction *, const ValueVector &);
+ bool canTransferMetadata(unsigned Kind);
+ void transferMetadata(Instruction *, const ValueVector &);
+ bool getVectorLayout(Type *, unsigned, VectorLayout &);
+ bool finish();
+
+ template<typename T> bool splitBinary(Instruction &, const T &);
+
+ ScatterMap Scattered;
+ GatherList Gathered;
+ unsigned ParallelLoopAccessMDKind;
+ const DataLayout *DL;
+};
+
+char Scalarizer::ID = 0;
+} // end anonymous namespace
+
+// This is disabled by default because having separate loads and stores makes
+// it more likely that the -combiner-alias-analysis limits will be reached.
+static cl::opt<bool> ScalarizeLoadStore
+ ("scalarize-load-store", cl::Hidden, cl::init(false),
+ cl::desc("Allow the scalarizer pass to scalarize loads and store"));
+
+INITIALIZE_PASS(Scalarizer, "scalarizer", "Scalarize vector operations",
+ false, false)
+
+Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
+ ValueVector *cachePtr)
+ : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
+ Type *Ty = V->getType();
+ PtrTy = dyn_cast<PointerType>(Ty);
+ if (PtrTy)
+ Ty = PtrTy->getElementType();
+ Size = Ty->getVectorNumElements();
+ if (!CachePtr)
+ Tmp.resize(Size, nullptr);
+ else if (CachePtr->empty())
+ CachePtr->resize(Size, nullptr);
+ else
+ assert(Size == CachePtr->size() && "Inconsistent vector sizes");
+}
+
+// Return component I, creating a new Value for it if necessary.
+Value *Scatterer::operator[](unsigned I) {
+ ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
+ // Try to reuse a previous value.
+ if (CV[I])
+ return CV[I];
+ IRBuilder<> Builder(BB, BBI);
+ if (PtrTy) {
+ if (!CV[0]) {
+ Type *Ty =
+ PointerType::get(PtrTy->getElementType()->getVectorElementType(),
+ PtrTy->getAddressSpace());
+ CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0");
+ }
+ if (I != 0)
+ CV[I] = Builder.CreateConstGEP1_32(CV[0], I,
+ V->getName() + ".i" + Twine(I));
+ } else {
+ // Search through a chain of InsertElementInsts looking for element I.
+ // Record other elements in the cache. The new V is still suitable
+ // for all uncached indices.
+ for (;;) {
+ InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
+ if (!Insert)
+ break;
+ ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
+ if (!Idx)
+ break;
+ unsigned J = Idx->getZExtValue();
+ CV[J] = Insert->getOperand(1);
+ V = Insert->getOperand(0);
+ if (I == J)
+ return CV[J];
+ }
+ CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
+ V->getName() + ".i" + Twine(I));
+ }
+ return CV[I];
+}
+
+bool Scalarizer::doInitialization(Module &M) {
+ ParallelLoopAccessMDKind =
+ M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
+ return false;
+}
+
+bool Scalarizer::runOnFunction(Function &F) {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
+ BasicBlock *BB = BBI;
+ for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
+ Instruction *I = II;
+ bool Done = visit(I);
+ ++II;
+ if (Done && I->getType()->isVoidTy())
+ I->eraseFromParent();
+ }
+ }
+ return finish();
+}
+
+// Return a scattered form of V that can be accessed by Point. V must be a
+// vector or a pointer to a vector.
+Scatterer Scalarizer::scatter(Instruction *Point, Value *V) {
+ if (Argument *VArg = dyn_cast<Argument>(V)) {
+ // Put the scattered form of arguments in the entry block,
+ // so that it can be used everywhere.
+ Function *F = VArg->getParent();
+ BasicBlock *BB = &F->getEntryBlock();
+ return Scatterer(BB, BB->begin(), V, &Scattered[V]);
+ }
+ if (Instruction *VOp = dyn_cast<Instruction>(V)) {
+ // Put the scattered form of an instruction directly after the
+ // instruction.
+ BasicBlock *BB = VOp->getParent();
+ return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
+ V, &Scattered[V]);
+ }
+ // In the fallback case, just put the scattered before Point and
+ // keep the result local to Point.
+ return Scatterer(Point->getParent(), Point, V);
+}
+
+// Replace Op with the gathered form of the components in CV. Defer the
+// deletion of Op and creation of the gathered form to the end of the pass,
+// so that we can avoid creating the gathered form if all uses of Op are
+// replaced with uses of CV.
+void Scalarizer::gather(Instruction *Op, const ValueVector &CV) {
+ // Since we're not deleting Op yet, stub out its operands, so that it
+ // doesn't make anything live unnecessarily.
+ for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
+ Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
+
+ transferMetadata(Op, CV);
+
+ // If we already have a scattered form of Op (created from ExtractElements
+ // of Op itself), replace them with the new form.
+ ValueVector &SV = Scattered[Op];
+ if (!SV.empty()) {
+ for (unsigned I = 0, E = SV.size(); I != E; ++I) {
+ Instruction *Old = cast<Instruction>(SV[I]);
+ CV[I]->takeName(Old);
+ Old->replaceAllUsesWith(CV[I]);
+ Old->eraseFromParent();
+ }
+ }
+ SV = CV;
+ Gathered.push_back(GatherList::value_type(Op, &SV));
+}
+
+// Return true if it is safe to transfer the given metadata tag from
+// vector to scalar instructions.
+bool Scalarizer::canTransferMetadata(unsigned Tag) {
+ return (Tag == LLVMContext::MD_tbaa
+ || Tag == LLVMContext::MD_fpmath
+ || Tag == LLVMContext::MD_tbaa_struct
+ || Tag == LLVMContext::MD_invariant_load
+ || Tag == ParallelLoopAccessMDKind);
+}
+
+// Transfer metadata from Op to the instructions in CV if it is known
+// to be safe to do so.
+void Scalarizer::transferMetadata(Instruction *Op, const ValueVector &CV) {
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ Op->getAllMetadataOtherThanDebugLoc(MDs);
+ for (unsigned I = 0, E = CV.size(); I != E; ++I) {
+ if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
+ for (SmallVectorImpl<std::pair<unsigned, MDNode *> >::iterator
+ MI = MDs.begin(), ME = MDs.end(); MI != ME; ++MI)
+ if (canTransferMetadata(MI->first))
+ New->setMetadata(MI->first, MI->second);
+ New->setDebugLoc(Op->getDebugLoc());
+ }
+ }
+}
+
+// Try to fill in Layout from Ty, returning true on success. Alignment is
+// the alignment of the vector, or 0 if the ABI default should be used.
+bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment,
+ VectorLayout &Layout) {
+ if (!DL)
+ return false;
+
+ // Make sure we're dealing with a vector.
+ Layout.VecTy = dyn_cast<VectorType>(Ty);
+ if (!Layout.VecTy)
+ return false;
+
+ // Check that we're dealing with full-byte elements.
+ Layout.ElemTy = Layout.VecTy->getElementType();
+ if (DL->getTypeSizeInBits(Layout.ElemTy) !=
+ DL->getTypeStoreSizeInBits(Layout.ElemTy))
+ return false;
+
+ if (Alignment)
+ Layout.VecAlign = Alignment;
+ else
+ Layout.VecAlign = DL->getABITypeAlignment(Layout.VecTy);
+ Layout.ElemSize = DL->getTypeStoreSize(Layout.ElemTy);
+ return true;
+}
+
+// Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
+// to create an instruction like I with operands X and Y and name Name.
+template<typename Splitter>
+bool Scalarizer::splitBinary(Instruction &I, const Splitter &Split) {
+ VectorType *VT = dyn_cast<VectorType>(I.getType());
+ if (!VT)
+ return false;
+
+ unsigned NumElems = VT->getNumElements();
+ IRBuilder<> Builder(I.getParent(), &I);
+ Scatterer Op0 = scatter(&I, I.getOperand(0));
+ Scatterer Op1 = scatter(&I, I.getOperand(1));
+ assert(Op0.size() == NumElems && "Mismatched binary operation");
+ assert(Op1.size() == NumElems && "Mismatched binary operation");
+ ValueVector Res;
+ Res.resize(NumElems);
+ for (unsigned Elem = 0; Elem < NumElems; ++Elem)
+ Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
+ I.getName() + ".i" + Twine(Elem));
+ gather(&I, Res);
+ return true;
+}
+
+bool Scalarizer::visitSelectInst(SelectInst &SI) {
+ VectorType *VT = dyn_cast<VectorType>(SI.getType());
+ if (!VT)
+ return false;
+
+ unsigned NumElems = VT->getNumElements();
+ IRBuilder<> Builder(SI.getParent(), &SI);
+ Scatterer Op1 = scatter(&SI, SI.getOperand(1));
+ Scatterer Op2 = scatter(&SI, SI.getOperand(2));
+ assert(Op1.size() == NumElems && "Mismatched select");
+ assert(Op2.size() == NumElems && "Mismatched select");
+ ValueVector Res;
+ Res.resize(NumElems);
+
+ if (SI.getOperand(0)->getType()->isVectorTy()) {
+ Scatterer Op0 = scatter(&SI, SI.getOperand(0));
+ assert(Op0.size() == NumElems && "Mismatched select");
+ for (unsigned I = 0; I < NumElems; ++I)
+ Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
+ SI.getName() + ".i" + Twine(I));
+ } else {
+ Value *Op0 = SI.getOperand(0);
+ for (unsigned I = 0; I < NumElems; ++I)
+ Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
+ SI.getName() + ".i" + Twine(I));
+ }
+ gather(&SI, Res);
+ return true;
+}
+
+bool Scalarizer::visitICmpInst(ICmpInst &ICI) {
+ return splitBinary(ICI, ICmpSplitter(ICI));
+}
+
+bool Scalarizer::visitFCmpInst(FCmpInst &FCI) {
+ return splitBinary(FCI, FCmpSplitter(FCI));
+}
+
+bool Scalarizer::visitBinaryOperator(BinaryOperator &BO) {
+ return splitBinary(BO, BinarySplitter(BO));
+}
+
+bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
+ if (!VT)
+ return false;
+
+ IRBuilder<> Builder(GEPI.getParent(), &GEPI);
+ unsigned NumElems = VT->getNumElements();
+ unsigned NumIndices = GEPI.getNumIndices();
+
+ Scatterer Base = scatter(&GEPI, GEPI.getOperand(0));
+
+ SmallVector<Scatterer, 8> Ops;
+ Ops.resize(NumIndices);
+ for (unsigned I = 0; I < NumIndices; ++I)
+ Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1));
+
+ ValueVector Res;
+ Res.resize(NumElems);
+ for (unsigned I = 0; I < NumElems; ++I) {
+ SmallVector<Value *, 8> Indices;
+ Indices.resize(NumIndices);
+ for (unsigned J = 0; J < NumIndices; ++J)
+ Indices[J] = Ops[J][I];
+ Res[I] = Builder.CreateGEP(Base[I], Indices,
+ GEPI.getName() + ".i" + Twine(I));
+ if (GEPI.isInBounds())
+ if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
+ NewGEPI->setIsInBounds();
+ }
+ gather(&GEPI, Res);
+ return true;
+}
+
+bool Scalarizer::visitCastInst(CastInst &CI) {
+ VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
+ if (!VT)
+ return false;
+
+ unsigned NumElems = VT->getNumElements();
+ IRBuilder<> Builder(CI.getParent(), &CI);
+ Scatterer Op0 = scatter(&CI, CI.getOperand(0));
+ assert(Op0.size() == NumElems && "Mismatched cast");
+ ValueVector Res;
+ Res.resize(NumElems);
+ for (unsigned I = 0; I < NumElems; ++I)
+ Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
+ CI.getName() + ".i" + Twine(I));
+ gather(&CI, Res);
+ return true;
+}
+
+bool Scalarizer::visitBitCastInst(BitCastInst &BCI) {
+ VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
+ VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
+ if (!DstVT || !SrcVT)
+ return false;
+
+ unsigned DstNumElems = DstVT->getNumElements();
+ unsigned SrcNumElems = SrcVT->getNumElements();
+ IRBuilder<> Builder(BCI.getParent(), &BCI);
+ Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
+ ValueVector Res;
+ Res.resize(DstNumElems);
+
+ if (DstNumElems == SrcNumElems) {
+ for (unsigned I = 0; I < DstNumElems; ++I)
+ Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
+ BCI.getName() + ".i" + Twine(I));
+ } else if (DstNumElems > SrcNumElems) {
+ // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
+ // individual elements to the destination.
+ unsigned FanOut = DstNumElems / SrcNumElems;
+ Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
+ unsigned ResI = 0;
+ for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
+ Value *V = Op0[Op0I];
+ Instruction *VI;
+ // Look through any existing bitcasts before converting to <N x t2>.
+ // In the best case, the resulting conversion might be a no-op.
+ while ((VI = dyn_cast<Instruction>(V)) &&
+ VI->getOpcode() == Instruction::BitCast)
+ V = VI->getOperand(0);
+ V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
+ Scatterer Mid = scatter(&BCI, V);
+ for (unsigned MidI = 0; MidI < FanOut; ++MidI)
+ Res[ResI++] = Mid[MidI];
+ }
+ } else {
+ // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
+ unsigned FanIn = SrcNumElems / DstNumElems;
+ Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
+ unsigned Op0I = 0;
+ for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
+ Value *V = UndefValue::get(MidTy);
+ for (unsigned MidI = 0; MidI < FanIn; ++MidI)
+ V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
+ BCI.getName() + ".i" + Twine(ResI)
+ + ".upto" + Twine(MidI));
+ Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
+ BCI.getName() + ".i" + Twine(ResI));
+ }
+ }
+ gather(&BCI, Res);
+ return true;
+}
+
+bool Scalarizer::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
+ VectorType *VT = dyn_cast<VectorType>(SVI.getType());
+ if (!VT)
+ return false;
+
+ unsigned NumElems = VT->getNumElements();
+ Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
+ Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
+ ValueVector Res;
+ Res.resize(NumElems);
+
+ for (unsigned I = 0; I < NumElems; ++I) {
+ int Selector = SVI.getMaskValue(I);
+ if (Selector < 0)
+ Res[I] = UndefValue::get(VT->getElementType());
+ else if (unsigned(Selector) < Op0.size())
+ Res[I] = Op0[Selector];
+ else
+ Res[I] = Op1[Selector - Op0.size()];
+ }
+ gather(&SVI, Res);
+ return true;
+}
+
+bool Scalarizer::visitPHINode(PHINode &PHI) {
+ VectorType *VT = dyn_cast<VectorType>(PHI.getType());
+ if (!VT)
+ return false;
+
+ unsigned NumElems = VT->getNumElements();
+ IRBuilder<> Builder(PHI.getParent(), &PHI);
+ ValueVector Res;
+ Res.resize(NumElems);
+
+ unsigned NumOps = PHI.getNumOperands();
+ for (unsigned I = 0; I < NumElems; ++I)
+ Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
+ PHI.getName() + ".i" + Twine(I));
+
+ for (unsigned I = 0; I < NumOps; ++I) {
+ Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
+ BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
+ for (unsigned J = 0; J < NumElems; ++J)
+ cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
+ }
+ gather(&PHI, Res);
+ return true;
+}
+
+bool Scalarizer::visitLoadInst(LoadInst &LI) {
+ if (!ScalarizeLoadStore)
+ return false;
+ if (!LI.isSimple())
+ return false;
+
+ VectorLayout Layout;
+ if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout))
+ return false;
+
+ unsigned NumElems = Layout.VecTy->getNumElements();
+ IRBuilder<> Builder(LI.getParent(), &LI);
+ Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
+ ValueVector Res;
+ Res.resize(NumElems);
+
+ for (unsigned I = 0; I < NumElems; ++I)
+ Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
+ LI.getName() + ".i" + Twine(I));
+ gather(&LI, Res);
+ return true;
+}
+
+bool Scalarizer::visitStoreInst(StoreInst &SI) {
+ if (!ScalarizeLoadStore)
+ return false;
+ if (!SI.isSimple())
+ return false;
+
+ VectorLayout Layout;
+ Value *FullValue = SI.getValueOperand();
+ if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout))
+ return false;
+
+ unsigned NumElems = Layout.VecTy->getNumElements();
+ IRBuilder<> Builder(SI.getParent(), &SI);
+ Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
+ Scatterer Val = scatter(&SI, FullValue);
+
+ ValueVector Stores;
+ Stores.resize(NumElems);
+ for (unsigned I = 0; I < NumElems; ++I) {
+ unsigned Align = Layout.getElemAlign(I);
+ Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
+ }
+ transferMetadata(&SI, Stores);
+ return true;
+}
+
+// Delete the instructions that we scalarized. If a full vector result
+// is still needed, recreate it using InsertElements.
+bool Scalarizer::finish() {
+ if (Gathered.empty())
+ return false;
+ for (GatherList::iterator GMI = Gathered.begin(), GME = Gathered.end();
+ GMI != GME; ++GMI) {
+ Instruction *Op = GMI->first;
+ ValueVector &CV = *GMI->second;
+ if (!Op->use_empty()) {
+ // The value is still needed, so recreate it using a series of
+ // InsertElements.
+ Type *Ty = Op->getType();
+ Value *Res = UndefValue::get(Ty);
+ BasicBlock *BB = Op->getParent();
+ unsigned Count = Ty->getVectorNumElements();
+ IRBuilder<> Builder(BB, Op);
+ if (isa<PHINode>(Op))
+ Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ for (unsigned I = 0; I < Count; ++I)
+ Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
+ Op->getName() + ".upto" + Twine(I));
+ Res->takeName(Op);
+ Op->replaceAllUsesWith(Res);
+ }
+ Op->eraseFromParent();
+ }
+ Gathered.clear();
+ Scattered.clear();
+ return true;
+}
+
+FunctionPass *llvm::createScalarizerPass() {
+ return new Scalarizer();
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/contrib/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
new file mode 100644
index 0000000..6557ce4
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -0,0 +1,776 @@
+//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loop unrolling may create many similar GEPs for array accesses.
+// e.g., a 2-level loop
+//
+// float a[32][32]; // global variable
+//
+// for (int i = 0; i < 2; ++i) {
+// for (int j = 0; j < 2; ++j) {
+// ...
+// ... = a[x + i][y + j];
+// ...
+// }
+// }
+//
+// will probably be unrolled to:
+//
+// gep %a, 0, %x, %y; load
+// gep %a, 0, %x, %y + 1; load
+// gep %a, 0, %x + 1, %y; load
+// gep %a, 0, %x + 1, %y + 1; load
+//
+// LLVM's GVN does not use partial redundancy elimination yet, and is thus
+// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
+// significant slowdown in targets with limited addressing modes. For instance,
+// because the PTX target does not support the reg+reg addressing mode, the
+// NVPTX backend emits PTX code that literally computes the pointer address of
+// each GEP, wasting tons of registers. It emits the following PTX for the
+// first load and similar PTX for other loads.
+//
+// mov.u32 %r1, %x;
+// mov.u32 %r2, %y;
+// mul.wide.u32 %rl2, %r1, 128;
+// mov.u64 %rl3, a;
+// add.s64 %rl4, %rl3, %rl2;
+// mul.wide.u32 %rl5, %r2, 4;
+// add.s64 %rl6, %rl4, %rl5;
+// ld.global.f32 %f1, [%rl6];
+//
+// To reduce the register pressure, the optimization implemented in this file
+// merges the common part of a group of GEPs, so we can compute each pointer
+// address by adding a simple offset to the common part, saving many registers.
+//
+// It works by splitting each GEP into a variadic base and a constant offset.
+// The variadic base can be computed once and reused by multiple GEPs, and the
+// constant offsets can be nicely folded into the reg+immediate addressing mode
+// (supported by most targets) without using any extra register.
+//
+// For instance, we transform the four GEPs and four loads in the above example
+// into:
+//
+// base = gep a, 0, x, y
+// load base
+// laod base + 1 * sizeof(float)
+// load base + 32 * sizeof(float)
+// load base + 33 * sizeof(float)
+//
+// Given the transformed IR, a backend that supports the reg+immediate
+// addressing mode can easily fold the pointer arithmetics into the loads. For
+// example, the NVPTX backend can easily fold the pointer arithmetics into the
+// ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
+//
+// mov.u32 %r1, %tid.x;
+// mov.u32 %r2, %tid.y;
+// mul.wide.u32 %rl2, %r1, 128;
+// mov.u64 %rl3, a;
+// add.s64 %rl4, %rl3, %rl2;
+// mul.wide.u32 %rl5, %r2, 4;
+// add.s64 %rl6, %rl4, %rl5;
+// ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX
+// ld.global.f32 %f2, [%rl6+4]; // much better
+// ld.global.f32 %f3, [%rl6+128]; // much better
+// ld.global.f32 %f4, [%rl6+132]; // much better
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+
+using namespace llvm;
+
+static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
+ "disable-separate-const-offset-from-gep", cl::init(false),
+ cl::desc("Do not separate the constant offset from a GEP instruction"),
+ cl::Hidden);
+
+namespace {
+
+/// \brief A helper class for separating a constant offset from a GEP index.
+///
+/// In real programs, a GEP index may be more complicated than a simple addition
+/// of something and a constant integer which can be trivially splitted. For
+/// example, to split ((a << 3) | 5) + b, we need to search deeper for the
+/// constant offset, so that we can separate the index to (a << 3) + b and 5.
+///
+/// Therefore, this class looks into the expression that computes a given GEP
+/// index, and tries to find a constant integer that can be hoisted to the
+/// outermost level of the expression as an addition. Not every constant in an
+/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
+/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
+/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
+class ConstantOffsetExtractor {
+ public:
+ /// Extracts a constant offset from the given GEP index. It outputs the
+ /// numeric value of the extracted constant offset (0 if failed), and a
+ /// new index representing the remainder (equal to the original index minus
+ /// the constant offset).
+ /// \p Idx The given GEP index
+ /// \p NewIdx The new index to replace (output)
+ /// \p DL The datalayout of the module
+ /// \p GEP The given GEP
+ static int64_t Extract(Value *Idx, Value *&NewIdx, const DataLayout *DL,
+ GetElementPtrInst *GEP);
+ /// Looks for a constant offset without extracting it. The meaning of the
+ /// arguments and the return value are the same as Extract.
+ static int64_t Find(Value *Idx, const DataLayout *DL, GetElementPtrInst *GEP);
+
+ private:
+ ConstantOffsetExtractor(const DataLayout *Layout, Instruction *InsertionPt)
+ : DL(Layout), IP(InsertionPt) {}
+ /// Searches the expression that computes V for a non-zero constant C s.t.
+ /// V can be reassociated into the form V' + C. If the searching is
+ /// successful, returns C and update UserChain as a def-use chain from C to V;
+ /// otherwise, UserChain is empty.
+ ///
+ /// \p V The given expression
+ /// \p SignExtended Whether V will be sign-extended in the computation of the
+ /// GEP index
+ /// \p ZeroExtended Whether V will be zero-extended in the computation of the
+ /// GEP index
+ /// \p NonNegative Whether V is guaranteed to be non-negative. For example,
+ /// an index of an inbounds GEP is guaranteed to be
+ /// non-negative. Levaraging this, we can better split
+ /// inbounds GEPs.
+ APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
+ /// A helper function to look into both operands of a binary operator.
+ APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
+ bool ZeroExtended);
+ /// After finding the constant offset C from the GEP index I, we build a new
+ /// index I' s.t. I' + C = I. This function builds and returns the new
+ /// index I' according to UserChain produced by function "find".
+ ///
+ /// The building conceptually takes two steps:
+ /// 1) iteratively distribute s/zext towards the leaves of the expression tree
+ /// that computes I
+ /// 2) reassociate the expression tree to the form I' + C.
+ ///
+ /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
+ /// sext to a, b and 5 so that we have
+ /// sext(a) + (sext(b) + 5).
+ /// Then, we reassociate it to
+ /// (sext(a) + sext(b)) + 5.
+ /// Given this form, we know I' is sext(a) + sext(b).
+ Value *rebuildWithoutConstOffset();
+ /// After the first step of rebuilding the GEP index without the constant
+ /// offset, distribute s/zext to the operands of all operators in UserChain.
+ /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
+ /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
+ ///
+ /// The function also updates UserChain to point to new subexpressions after
+ /// distributing s/zext. e.g., the old UserChain of the above example is
+ /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
+ /// and the new UserChain is
+ /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
+ /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
+ ///
+ /// \p ChainIndex The index to UserChain. ChainIndex is initially
+ /// UserChain.size() - 1, and is decremented during
+ /// the recursion.
+ Value *distributeExtsAndCloneChain(unsigned ChainIndex);
+ /// Reassociates the GEP index to the form I' + C and returns I'.
+ Value *removeConstOffset(unsigned ChainIndex);
+ /// A helper function to apply ExtInsts, a list of s/zext, to value V.
+ /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
+ /// returns "sext i32 (zext i16 V to i32) to i64".
+ Value *applyExts(Value *V);
+
+ /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0.
+ bool NoCommonBits(Value *LHS, Value *RHS) const;
+ /// Computes which bits are known to be one or zero.
+ /// \p KnownOne Mask of all bits that are known to be one.
+ /// \p KnownZero Mask of all bits that are known to be zero.
+ void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const;
+ /// A helper function that returns whether we can trace into the operands
+ /// of binary operator BO for a constant offset.
+ ///
+ /// \p SignExtended Whether BO is surrounded by sext
+ /// \p ZeroExtended Whether BO is surrounded by zext
+ /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
+ /// array index.
+ bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
+ bool NonNegative);
+
+ /// The path from the constant offset to the old GEP index. e.g., if the GEP
+ /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
+ /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
+ /// UserChain[2] will be the entire expression "a * b + (c + 5)".
+ ///
+ /// This path helps to rebuild the new GEP index.
+ SmallVector<User *, 8> UserChain;
+ /// A data structure used in rebuildWithoutConstOffset. Contains all
+ /// sext/zext instructions along UserChain.
+ SmallVector<CastInst *, 16> ExtInsts;
+ /// The data layout of the module. Used in ComputeKnownBits.
+ const DataLayout *DL;
+ Instruction *IP; /// Insertion position of cloned instructions.
+};
+
+/// \brief A pass that tries to split every GEP in the function into a variadic
+/// base and a constant offset. It is a FunctionPass because searching for the
+/// constant offset may inspect other basic blocks.
+class SeparateConstOffsetFromGEP : public FunctionPass {
+ public:
+ static char ID;
+ SeparateConstOffsetFromGEP() : FunctionPass(ID) {
+ initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DataLayoutPass>();
+ AU.addRequired<TargetTransformInfo>();
+ }
+
+ bool doInitialization(Module &M) override {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (DLP == nullptr)
+ report_fatal_error("data layout missing");
+ DL = &DLP->getDataLayout();
+ return false;
+ }
+
+ bool runOnFunction(Function &F) override;
+
+ private:
+ /// Tries to split the given GEP into a variadic base and a constant offset,
+ /// and returns true if the splitting succeeds.
+ bool splitGEP(GetElementPtrInst *GEP);
+ /// Finds the constant offset within each index, and accumulates them. This
+ /// function only inspects the GEP without changing it. The output
+ /// NeedsExtraction indicates whether we can extract a non-zero constant
+ /// offset from any index.
+ int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
+ /// Canonicalize array indices to pointer-size integers. This helps to
+ /// simplify the logic of splitting a GEP. For example, if a + b is a
+ /// pointer-size integer, we have
+ /// gep base, a + b = gep (gep base, a), b
+ /// However, this equality may not hold if the size of a + b is smaller than
+ /// the pointer size, because LLVM conceptually sign-extends GEP indices to
+ /// pointer size before computing the address
+ /// (http://llvm.org/docs/LangRef.html#id181).
+ ///
+ /// This canonicalization is very likely already done in clang and
+ /// instcombine. Therefore, the program will probably remain the same.
+ ///
+ /// Returns true if the module changes.
+ ///
+ /// Verified in @i32_add in split-gep.ll
+ bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
+
+ const DataLayout *DL;
+};
+} // anonymous namespace
+
+char SeparateConstOffsetFromGEP::ID = 0;
+INITIALIZE_PASS_BEGIN(
+ SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
+ "Split GEPs to a variadic base and a constant offset for better CSE", false,
+ false)
+INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
+INITIALIZE_PASS_END(
+ SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
+ "Split GEPs to a variadic base and a constant offset for better CSE", false,
+ false)
+
+FunctionPass *llvm::createSeparateConstOffsetFromGEPPass() {
+ return new SeparateConstOffsetFromGEP();
+}
+
+bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
+ bool ZeroExtended,
+ BinaryOperator *BO,
+ bool NonNegative) {
+ // We only consider ADD, SUB and OR, because a non-zero constant found in
+ // expressions composed of these operations can be easily hoisted as a
+ // constant offset by reassociation.
+ if (BO->getOpcode() != Instruction::Add &&
+ BO->getOpcode() != Instruction::Sub &&
+ BO->getOpcode() != Instruction::Or) {
+ return false;
+ }
+
+ Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
+ // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
+ // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
+ if (BO->getOpcode() == Instruction::Or && !NoCommonBits(LHS, RHS))
+ return false;
+
+ // In addition, tracing into BO requires that its surrounding s/zext (if
+ // any) is distributable to both operands.
+ //
+ // Suppose BO = A op B.
+ // SignExtended | ZeroExtended | Distributable?
+ // --------------+--------------+----------------------------------
+ // 0 | 0 | true because no s/zext exists
+ // 0 | 1 | zext(BO) == zext(A) op zext(B)
+ // 1 | 0 | sext(BO) == sext(A) op sext(B)
+ // 1 | 1 | zext(sext(BO)) ==
+ // | | zext(sext(A)) op zext(sext(B))
+ if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
+ // If a + b >= 0 and (a >= 0 or b >= 0), then
+ // sext(a + b) = sext(a) + sext(b)
+ // even if the addition is not marked nsw.
+ //
+ // Leveraging this invarient, we can trace into an sext'ed inbound GEP
+ // index if the constant offset is non-negative.
+ //
+ // Verified in @sext_add in split-gep.ll.
+ if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
+ if (!ConstLHS->isNegative())
+ return true;
+ }
+ if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
+ if (!ConstRHS->isNegative())
+ return true;
+ }
+ }
+
+ // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
+ // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
+ if (BO->getOpcode() == Instruction::Add ||
+ BO->getOpcode() == Instruction::Sub) {
+ if (SignExtended && !BO->hasNoSignedWrap())
+ return false;
+ if (ZeroExtended && !BO->hasNoUnsignedWrap())
+ return false;
+ }
+
+ return true;
+}
+
+APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
+ bool SignExtended,
+ bool ZeroExtended) {
+ // BO being non-negative does not shed light on whether its operands are
+ // non-negative. Clear the NonNegative flag here.
+ APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
+ /* NonNegative */ false);
+ // If we found a constant offset in the left operand, stop and return that.
+ // This shortcut might cause us to miss opportunities of combining the
+ // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
+ // However, such cases are probably already handled by -instcombine,
+ // given this pass runs after the standard optimizations.
+ if (ConstantOffset != 0) return ConstantOffset;
+ ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
+ /* NonNegative */ false);
+ // If U is a sub operator, negate the constant offset found in the right
+ // operand.
+ if (BO->getOpcode() == Instruction::Sub)
+ ConstantOffset = -ConstantOffset;
+ return ConstantOffset;
+}
+
+APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
+ bool ZeroExtended, bool NonNegative) {
+ // TODO(jingyue): We could trace into integer/pointer casts, such as
+ // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
+ // integers because it gives good enough results for our benchmarks.
+ unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+
+ // We cannot do much with Values that are not a User, such as an Argument.
+ User *U = dyn_cast<User>(V);
+ if (U == nullptr) return APInt(BitWidth, 0);
+
+ APInt ConstantOffset(BitWidth, 0);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ // Hooray, we found it!
+ ConstantOffset = CI->getValue();
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
+ // Trace into subexpressions for more hoisting opportunities.
+ if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) {
+ ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
+ }
+ } else if (isa<SExtInst>(V)) {
+ ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
+ ZeroExtended, NonNegative).sext(BitWidth);
+ } else if (isa<ZExtInst>(V)) {
+ // As an optimization, we can clear the SignExtended flag because
+ // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
+ //
+ // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
+ ConstantOffset =
+ find(U->getOperand(0), /* SignExtended */ false,
+ /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
+ }
+
+ // If we found a non-zero constant offset, add it to the path for
+ // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
+ // help this optimization.
+ if (ConstantOffset != 0)
+ UserChain.push_back(U);
+ return ConstantOffset;
+}
+
+Value *ConstantOffsetExtractor::applyExts(Value *V) {
+ Value *Current = V;
+ // ExtInsts is built in the use-def order. Therefore, we apply them to V
+ // in the reversed order.
+ for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
+ if (Constant *C = dyn_cast<Constant>(Current)) {
+ // If Current is a constant, apply s/zext using ConstantExpr::getCast.
+ // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
+ Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
+ } else {
+ Instruction *Ext = (*I)->clone();
+ Ext->setOperand(0, Current);
+ Ext->insertBefore(IP);
+ Current = Ext;
+ }
+ }
+ return Current;
+}
+
+Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
+ distributeExtsAndCloneChain(UserChain.size() - 1);
+ // Remove all nullptrs (used to be s/zext) from UserChain.
+ unsigned NewSize = 0;
+ for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) {
+ if (*I != nullptr) {
+ UserChain[NewSize] = *I;
+ NewSize++;
+ }
+ }
+ UserChain.resize(NewSize);
+ return removeConstOffset(UserChain.size() - 1);
+}
+
+Value *
+ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
+ User *U = UserChain[ChainIndex];
+ if (ChainIndex == 0) {
+ assert(isa<ConstantInt>(U));
+ // If U is a ConstantInt, applyExts will return a ConstantInt as well.
+ return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
+ }
+
+ if (CastInst *Cast = dyn_cast<CastInst>(U)) {
+ assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
+ "We only traced into two types of CastInst: sext and zext");
+ ExtInsts.push_back(Cast);
+ UserChain[ChainIndex] = nullptr;
+ return distributeExtsAndCloneChain(ChainIndex - 1);
+ }
+
+ // Function find only trace into BinaryOperator and CastInst.
+ BinaryOperator *BO = cast<BinaryOperator>(U);
+ // OpNo = which operand of BO is UserChain[ChainIndex - 1]
+ unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
+ Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
+ Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
+
+ BinaryOperator *NewBO = nullptr;
+ if (OpNo == 0) {
+ NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
+ BO->getName(), IP);
+ } else {
+ NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
+ BO->getName(), IP);
+ }
+ return UserChain[ChainIndex] = NewBO;
+}
+
+Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
+ if (ChainIndex == 0) {
+ assert(isa<ConstantInt>(UserChain[ChainIndex]));
+ return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
+ }
+
+ BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
+ unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
+ assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
+ Value *NextInChain = removeConstOffset(ChainIndex - 1);
+ Value *TheOther = BO->getOperand(1 - OpNo);
+
+ // If NextInChain is 0 and not the LHS of a sub, we can simplify the
+ // sub-expression to be just TheOther.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
+ if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
+ return TheOther;
+ }
+
+ if (BO->getOpcode() == Instruction::Or) {
+ // Rebuild "or" as "add", because "or" may be invalid for the new
+ // epxression.
+ //
+ // For instance, given
+ // a | (b + 5) where a and b + 5 have no common bits,
+ // we can extract 5 as the constant offset.
+ //
+ // However, reusing the "or" in the new index would give us
+ // (a | b) + 5
+ // which does not equal a | (b + 5).
+ //
+ // Replacing the "or" with "add" is fine, because
+ // a | (b + 5) = a + (b + 5) = (a + b) + 5
+ return BinaryOperator::CreateAdd(BO->getOperand(0), BO->getOperand(1),
+ BO->getName(), IP);
+ }
+
+ // We can reuse BO in this case, because the new expression shares the same
+ // instruction type and BO is used at most once.
+ assert(BO->getNumUses() <= 1 &&
+ "distributeExtsAndCloneChain clones each BinaryOperator in "
+ "UserChain, so no one should be used more than "
+ "once");
+ BO->setOperand(OpNo, NextInChain);
+ BO->setHasNoSignedWrap(false);
+ BO->setHasNoUnsignedWrap(false);
+ // Make sure it appears after all instructions we've inserted so far.
+ BO->moveBefore(IP);
+ return BO;
+}
+
+int64_t ConstantOffsetExtractor::Extract(Value *Idx, Value *&NewIdx,
+ const DataLayout *DL,
+ GetElementPtrInst *GEP) {
+ ConstantOffsetExtractor Extractor(DL, GEP);
+ // Find a non-zero constant offset first.
+ APInt ConstantOffset =
+ Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
+ GEP->isInBounds());
+ if (ConstantOffset != 0) {
+ // Separates the constant offset from the GEP index.
+ NewIdx = Extractor.rebuildWithoutConstOffset();
+ }
+ return ConstantOffset.getSExtValue();
+}
+
+int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL,
+ GetElementPtrInst *GEP) {
+ // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
+ return ConstantOffsetExtractor(DL, GEP)
+ .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
+ GEP->isInBounds())
+ .getSExtValue();
+}
+
+void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne,
+ APInt &KnownZero) const {
+ IntegerType *IT = cast<IntegerType>(V->getType());
+ KnownOne = APInt(IT->getBitWidth(), 0);
+ KnownZero = APInt(IT->getBitWidth(), 0);
+ llvm::computeKnownBits(V, KnownZero, KnownOne, DL, 0);
+}
+
+bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const {
+ assert(LHS->getType() == RHS->getType() &&
+ "LHS and RHS should have the same type");
+ APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero;
+ ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero);
+ ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero);
+ return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
+}
+
+bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
+ GetElementPtrInst *GEP) {
+ bool Changed = false;
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
+ I != E; ++I, ++GTI) {
+ // Skip struct member indices which must be i32.
+ if (isa<SequentialType>(*GTI)) {
+ if ((*I)->getType() != IntPtrTy) {
+ *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+}
+
+int64_t
+SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
+ bool &NeedsExtraction) {
+ NeedsExtraction = false;
+ int64_t AccumulativeByteOffset = 0;
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
+ if (isa<SequentialType>(*GTI)) {
+ // Tries to extract a constant offset from this GEP index.
+ int64_t ConstantOffset =
+ ConstantOffsetExtractor::Find(GEP->getOperand(I), DL, GEP);
+ if (ConstantOffset != 0) {
+ NeedsExtraction = true;
+ // A GEP may have multiple indices. We accumulate the extracted
+ // constant offset to a byte offset, and later offset the remainder of
+ // the original GEP with this byte offset.
+ AccumulativeByteOffset +=
+ ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
+ }
+ }
+ }
+ return AccumulativeByteOffset;
+}
+
+bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
+ // Skip vector GEPs.
+ if (GEP->getType()->isVectorTy())
+ return false;
+
+ // The backend can already nicely handle the case where all indices are
+ // constant.
+ if (GEP->hasAllConstantIndices())
+ return false;
+
+ bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
+
+ bool NeedsExtraction;
+ int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
+
+ if (!NeedsExtraction)
+ return Changed;
+ // Before really splitting the GEP, check whether the backend supports the
+ // addressing mode we are about to produce. If no, this splitting probably
+ // won't be beneficial.
+ TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
+ if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
+ /*BaseGV=*/nullptr, AccumulativeByteOffset,
+ /*HasBaseReg=*/true, /*Scale=*/0)) {
+ return Changed;
+ }
+
+ // Remove the constant offset in each GEP index. The resultant GEP computes
+ // the variadic base.
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
+ if (isa<SequentialType>(*GTI)) {
+ Value *NewIdx = nullptr;
+ // Tries to extract a constant offset from this GEP index.
+ int64_t ConstantOffset =
+ ConstantOffsetExtractor::Extract(GEP->getOperand(I), NewIdx, DL, GEP);
+ if (ConstantOffset != 0) {
+ assert(NewIdx != nullptr &&
+ "ConstantOffset != 0 implies NewIdx is set");
+ GEP->setOperand(I, NewIdx);
+ }
+ }
+ }
+ // Clear the inbounds attribute because the new index may be off-bound.
+ // e.g.,
+ //
+ // b = add i64 a, 5
+ // addr = gep inbounds float* p, i64 b
+ //
+ // is transformed to:
+ //
+ // addr2 = gep float* p, i64 a
+ // addr = gep float* addr2, i64 5
+ //
+ // If a is -4, although the old index b is in bounds, the new index a is
+ // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
+ // inbounds keyword is not present, the offsets are added to the base
+ // address with silently-wrapping two's complement arithmetic".
+ // Therefore, the final code will be a semantically equivalent.
+ //
+ // TODO(jingyue): do some range analysis to keep as many inbounds as
+ // possible. GEPs with inbounds are more friendly to alias analysis.
+ GEP->setIsInBounds(false);
+
+ // Offsets the base with the accumulative byte offset.
+ //
+ // %gep ; the base
+ // ... %gep ...
+ //
+ // => add the offset
+ //
+ // %gep2 ; clone of %gep
+ // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
+ // %gep ; will be removed
+ // ... %gep ...
+ //
+ // => replace all uses of %gep with %new.gep and remove %gep
+ //
+ // %gep2 ; clone of %gep
+ // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
+ // ... %new.gep ...
+ //
+ // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
+ // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
+ // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
+ // type of %gep.
+ //
+ // %gep2 ; clone of %gep
+ // %0 = bitcast %gep2 to i8*
+ // %uglygep = gep %0, <offset>
+ // %new.gep = bitcast %uglygep to <type of %gep>
+ // ... %new.gep ...
+ Instruction *NewGEP = GEP->clone();
+ NewGEP->insertBefore(GEP);
+
+ uint64_t ElementTypeSizeOfGEP =
+ DL->getTypeAllocSize(GEP->getType()->getElementType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
+ if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
+ // Very likely. As long as %gep is natually aligned, the byte offset we
+ // extracted should be a multiple of sizeof(*%gep).
+ // Per ANSI C standard, signed / unsigned = unsigned. Therefore, we
+ // cast ElementTypeSizeOfGEP to signed.
+ int64_t Index =
+ AccumulativeByteOffset / static_cast<int64_t>(ElementTypeSizeOfGEP);
+ NewGEP = GetElementPtrInst::Create(
+ NewGEP, ConstantInt::get(IntPtrTy, Index, true), GEP->getName(), GEP);
+ } else {
+ // Unlikely but possible. For example,
+ // #pragma pack(1)
+ // struct S {
+ // int a[3];
+ // int64 b[8];
+ // };
+ // #pragma pack()
+ //
+ // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
+ // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
+ // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
+ // sizeof(int64).
+ //
+ // Emit an uglygep in this case.
+ Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
+ GEP->getPointerAddressSpace());
+ NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
+ NewGEP = GetElementPtrInst::Create(
+ NewGEP, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true),
+ "uglygep", GEP);
+ if (GEP->getType() != I8PtrTy)
+ NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
+ }
+
+ GEP->replaceAllUsesWith(NewGEP);
+ GEP->eraseFromParent();
+
+ return true;
+}
+
+bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
+ if (DisableSeparateConstOffsetFromGEP)
+ return false;
+
+ bool Changed = false;
+ for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
+ for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) {
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) {
+ Changed |= splitGEP(GEP);
+ }
+ // No need to split GEP ConstantExprs because all its indices are constant
+ // already.
+ }
+ }
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 8371f6d..5d5606b 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -21,23 +21,24 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "simplifycfg"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "simplifycfg"
+
STATISTIC(NumSimpl, "Number of blocks simplified");
namespace {
@@ -46,9 +47,9 @@ struct CFGSimplifyPass : public FunctionPass {
CFGSimplifyPass() : FunctionPass(ID) {
initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetTransformInfo>();
}
};
@@ -71,7 +72,7 @@ FunctionPass *llvm::createCFGSimplificationPass() {
static bool mergeEmptyReturnBlocks(Function &F) {
bool Changed = false;
- BasicBlock *RetBlock = 0;
+ BasicBlock *RetBlock = nullptr;
// Scan all the blocks in the function, looking for empty return blocks.
for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) {
@@ -79,7 +80,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
// Only look at return blocks.
ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
- if (Ret == 0) continue;
+ if (!Ret) continue;
// Only look at the block if it is empty or the only other thing in it is a
// single PHI node that is the operand to the return.
@@ -98,7 +99,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
}
// If this is the first returning block, remember it and keep going.
- if (RetBlock == 0) {
+ if (!RetBlock) {
RetBlock = &BB;
continue;
}
@@ -119,7 +120,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
// If the canonical return block has no PHI node, create one now.
PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin());
- if (RetBlockPHI == 0) {
+ if (!RetBlockPHI) {
Value *InVal = cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0);
pred_iterator PB = pred_begin(RetBlock), PE = pred_end(RetBlock);
RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(),
@@ -145,7 +146,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
/// iterating until no more changes are made.
static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
bool Changed = false;
bool LocalChange = true;
while (LocalChange) {
@@ -154,7 +155,7 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
// Loop over all of the basic blocks and remove them if they are unneeded...
//
for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
- if (SimplifyCFG(BBIt++, TTI, TD)) {
+ if (SimplifyCFG(BBIt++, TTI, DL)) {
LocalChange = true;
++NumSimpl;
}
@@ -168,11 +169,15 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
// simplify the CFG.
//
bool CFGSimplifyPass::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
bool EverChanged = removeUnreachableBlocks(F);
EverChanged |= mergeEmptyReturnBlocks(F);
- EverChanged |= iterativelySimplifyCFG(F, TTI, TD);
+ EverChanged |= iterativelySimplifyCFG(F, TTI, DL);
// If neither pass changed anything, we're done.
if (!EverChanged) return false;
@@ -186,7 +191,7 @@ bool CFGSimplifyPass::runOnFunction(Function &F) {
return true;
do {
- EverChanged = iterativelySimplifyCFG(F, TTI, TD);
+ EverChanged = iterativelySimplifyCFG(F, TTI, DL);
EverChanged |= removeUnreachableBlocks(F);
} while (EverChanged);
diff --git a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
index d4595bb..7348c45c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -12,20 +12,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sink"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "sink"
+
STATISTIC(NumSunk, "Number of instructions sunk");
STATISTIC(NumSinkIter, "Number of sinking iterations");
@@ -34,6 +35,7 @@ namespace {
DominatorTree *DT;
LoopInfo *LI;
AliasAnalysis *AA;
+ const DataLayout *DL;
public:
static char ID; // Pass identification
@@ -41,15 +43,15 @@ namespace {
initializeSinkingPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfo>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<LoopInfo>();
}
private:
@@ -63,7 +65,7 @@ namespace {
char Sinking::ID = 0;
INITIALIZE_PASS_BEGIN(Sinking, "sink", "Code sinking", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(Sinking, "sink", "Code sinking", false, false)
@@ -77,15 +79,14 @@ bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
// This may leave a referencing dbg_value in the original block, before
// the definition of the vreg. Dwarf generator handles this although the
// user might not get the right info at runtime.
- for (Value::use_iterator I = Inst->use_begin(),
- E = Inst->use_end(); I != E; ++I) {
+ for (Use &U : Inst->uses()) {
// Determine the block of the use.
- Instruction *UseInst = cast<Instruction>(*I);
+ Instruction *UseInst = cast<Instruction>(U.getUser());
BasicBlock *UseBlock = UseInst->getParent();
if (PHINode *PN = dyn_cast<PHINode>(UseInst)) {
// PHI nodes use the operand in the predecessor block, not the block with
// the PHI.
- unsigned Num = PHINode::getIncomingValueNumForOperand(I.getOperandNo());
+ unsigned Num = PHINode::getIncomingValueNumForOperand(U.getOperandNo());
UseBlock = PN->getIncomingBlock(Num);
}
// Check that it dominates.
@@ -96,9 +97,11 @@ bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
}
bool Sinking::runOnFunction(Function &F) {
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
bool MadeChange, EverMadeChange = false;
@@ -194,7 +197,7 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
- if (!isSafeToSpeculativelyExecute(Inst))
+ if (!isSafeToSpeculativelyExecute(Inst, DL))
return false;
// We don't want to sink across a critical edge if we don't dominate the
@@ -205,7 +208,7 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
// Don't sink instructions into a loop.
Loop *succ = LI->getLoopFor(SuccToSinkTo);
Loop *cur = LI->getLoopFor(Inst->getParent());
- if (succ != 0 && succ != cur)
+ if (succ != nullptr && succ != cur)
return false;
}
@@ -218,6 +221,13 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
/// instruction out of its current block into a successor.
bool Sinking::SinkInstruction(Instruction *Inst,
SmallPtrSet<Instruction *, 8> &Stores) {
+
+ // Don't sink static alloca instructions. CodeGen assumes allocas outside the
+ // entry block are dynamically sized stack objects.
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
+ if (AI->isStaticAlloca())
+ return false;
+
// Check if it's safe to move the instruction.
if (!isSafeToMove(Inst, AA, Stores))
return false;
@@ -232,14 +242,14 @@ bool Sinking::SinkInstruction(Instruction *Inst,
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
- BasicBlock *SuccToSinkTo = 0;
+ BasicBlock *SuccToSinkTo = nullptr;
// Instructions can only be sunk if all their uses are in blocks
// dominated by one of the successors.
// Look at all the postdominators and see if we can sink it in one.
DomTreeNode *DTN = DT->getNode(Inst->getParent());
for (DomTreeNode::iterator I = DTN->begin(), E = DTN->end();
- I != E && SuccToSinkTo == 0; ++I) {
+ I != E && SuccToSinkTo == nullptr; ++I) {
BasicBlock *Candidate = (*I)->getBlock();
if ((*I)->getIDom()->getBlock() == Inst->getParent() &&
IsAcceptableTarget(Inst, Candidate))
@@ -249,19 +259,19 @@ bool Sinking::SinkInstruction(Instruction *Inst,
// If no suitable postdominator was found, look at all the successors and
// decide which one we should sink to, if any.
for (succ_iterator I = succ_begin(Inst->getParent()),
- E = succ_end(Inst->getParent()); I != E && SuccToSinkTo == 0; ++I) {
+ E = succ_end(Inst->getParent()); I != E && !SuccToSinkTo; ++I) {
if (IsAcceptableTarget(Inst, *I))
SuccToSinkTo = *I;
}
// If we couldn't find a block to sink to, ignore this instruction.
- if (SuccToSinkTo == 0)
+ if (!SuccToSinkTo)
return false;
DEBUG(dbgs() << "Sink" << *Inst << " (";
- WriteAsOperand(dbgs(), Inst->getParent(), false);
+ Inst->getParent()->printAsOperand(dbgs(), false);
dbgs() << " -> ";
- WriteAsOperand(dbgs(), SuccToSinkTo, false);
+ SuccToSinkTo->printAsOperand(dbgs(), false);
dbgs() << ")\n");
// Move the instruction.
diff --git a/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index 5045ff8f..b9673ed 100644
--- a/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -7,7 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "structurizecfg"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SCCIterator.h"
@@ -15,12 +14,14 @@
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Analysis/RegionPass.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+#define DEBUG_TYPE "structurizecfg"
+
namespace {
// Definition of the complex types used in this pass.
@@ -64,14 +65,14 @@ public:
/// \brief Start a new query
NearestCommonDominator(DominatorTree *DomTree) {
DT = DomTree;
- Result = 0;
+ Result = nullptr;
}
/// \brief Add BB to the resulting dominator
void addBlock(BasicBlock *BB, bool Remember = true) {
DomTreeNode *Node = DT->getNode(BB);
- if (Result == 0) {
+ if (!Result) {
unsigned Numbering = 0;
for (;Node;Node = Node->getIDom())
IndexMap[Node] = ++Numbering;
@@ -235,18 +236,18 @@ public:
}
using Pass::doInitialization;
- virtual bool doInitialization(Region *R, RGPassManager &RGM);
+ bool doInitialization(Region *R, RGPassManager &RGM) override;
- virtual bool runOnRegion(Region *R, RGPassManager &RGM);
+ bool runOnRegion(Region *R, RGPassManager &RGM) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Structurize control flow";
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(LowerSwitchID);
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
RegionPass::getAnalysisUsage(AU);
}
};
@@ -258,8 +259,8 @@ char StructurizeCFG::ID = 0;
INITIALIZE_PASS_BEGIN(StructurizeCFG, "structurizecfg", "Structurize the CFG",
false, false)
INITIALIZE_PASS_DEPENDENCY(LowerSwitch)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
-INITIALIZE_PASS_DEPENDENCY(RegionInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(RegionInfoPass)
INITIALIZE_PASS_END(StructurizeCFG, "structurizecfg", "Structurize the CFG",
false, false)
@@ -277,10 +278,9 @@ bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
/// \brief Build up the general order of nodes
void StructurizeCFG::orderNodes() {
- scc_iterator<Region *> I = scc_begin(ParentRegion),
- E = scc_end(ParentRegion);
- for (Order.clear(); I != E; ++I) {
- std::vector<RegionNode *> &Nodes = *I;
+ scc_iterator<Region *> I = scc_begin(ParentRegion);
+ for (Order.clear(); !I.isAtEnd(); ++I) {
+ const std::vector<RegionNode *> &Nodes = *I;
Order.append(Nodes.begin(), Nodes.end());
}
}
@@ -326,16 +326,10 @@ Value *StructurizeCFG::invert(Value *Condition) {
if (Instruction *Inst = dyn_cast<Instruction>(Condition)) {
// Third: Check all the users for an invert
BasicBlock *Parent = Inst->getParent();
- for (Value::use_iterator I = Condition->use_begin(),
- E = Condition->use_end(); I != E; ++I) {
-
- Instruction *User = dyn_cast<Instruction>(*I);
- if (!User || User->getParent() != Parent)
- continue;
-
- if (match(*I, m_Not(m_Specific(Condition))))
- return *I;
- }
+ for (User *U : Condition->users())
+ if (Instruction *I = dyn_cast<Instruction>(U))
+ if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
+ return I;
// Last option: Create a new instruction
return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator());
@@ -412,11 +406,11 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) {
} else {
// It's an exit from a sub region
- while(R->getParent() != ParentRegion)
+ while (R->getParent() != ParentRegion)
R = R->getParent();
// Edge from inside a subregion to its entry, ignore it
- if (R == N)
+ if (*R == *N)
continue;
BasicBlock *Entry = R->getEntry();
@@ -460,10 +454,7 @@ void StructurizeCFG::insertConditions(bool Loops) {
Value *Default = Loops ? BoolTrue : BoolFalse;
SSAUpdater PhiInserter;
- for (BranchVector::iterator I = Conds.begin(),
- E = Conds.end(); I != E; ++I) {
-
- BranchInst *Term = *I;
+ for (BranchInst *Term : Conds) {
assert(Term->isConditional());
BasicBlock *Parent = Term->getParent();
@@ -479,7 +470,7 @@ void StructurizeCFG::insertConditions(bool Loops) {
NearestCommonDominator Dominator(DT);
Dominator.addBlock(Parent, false);
- Value *ParentValue = 0;
+ Value *ParentValue = nullptr;
for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end();
PI != PE; ++PI) {
@@ -598,7 +589,7 @@ void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
if (Node->isSubRegion()) {
Region *SubRegion = Node->getNodeAs<Region>();
BasicBlock *OldExit = SubRegion->getExit();
- BasicBlock *Dominator = 0;
+ BasicBlock *Dominator = nullptr;
// Find all the edges from the sub region to the exit
for (pred_iterator I = pred_begin(OldExit), E = pred_end(OldExit);
@@ -685,7 +676,8 @@ BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow,
/// \brief Set the previous node
void StructurizeCFG::setPrevNode(BasicBlock *BB) {
- PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : 0;
+ PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB)
+ : nullptr;
}
/// \brief Does BB dominate all the predicates of Node ?
@@ -706,7 +698,7 @@ bool StructurizeCFG::isPredictableTrue(RegionNode *Node) {
bool Dominated = false;
// Regionentry is always true
- if (PrevNode == 0)
+ if (!PrevNode)
return true;
for (BBPredicates::iterator I = Preds.begin(), E = Preds.end();
@@ -813,11 +805,11 @@ void StructurizeCFG::createFlow() {
Conditions.clear();
LoopConds.clear();
- PrevNode = 0;
+ PrevNode = nullptr;
Visited.clear();
while (!Order.empty()) {
- handleLoops(EntryDominatesExit, 0);
+ handleLoops(EntryDominatesExit, nullptr);
}
if (PrevNode)
@@ -830,25 +822,19 @@ void StructurizeCFG::createFlow() {
/// no longer dominate all their uses. Not sure if this is really nessasary
void StructurizeCFG::rebuildSSA() {
SSAUpdater Updater;
- for (Region::block_iterator I = ParentRegion->block_begin(),
- E = ParentRegion->block_end();
- I != E; ++I) {
-
- BasicBlock *BB = *I;
+ for (const auto &BB : ParentRegion->blocks())
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
bool Initialized = false;
- for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) {
-
- Next = I->getNext();
-
- Instruction *User = cast<Instruction>(I->getUser());
+ for (auto I = II->use_begin(), E = II->use_end(); I != E;) {
+ Use &U = *I++;
+ Instruction *User = cast<Instruction>(U.getUser());
if (User->getParent() == BB) {
continue;
} else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
- if (UserPN->getIncomingBlock(*I) == BB)
+ if (UserPN->getIncomingBlock(U) == BB)
continue;
}
@@ -862,10 +848,9 @@ void StructurizeCFG::rebuildSSA() {
Updater.AddAvailableValue(BB, II);
Initialized = true;
}
- Updater.RewriteUseAfterInsertions(*I);
+ Updater.RewriteUseAfterInsertions(U);
}
}
- }
}
/// \brief Run the transformation for each region found
@@ -876,7 +861,7 @@ bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
Func = R->getEntry()->getParent();
ParentRegion = R;
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
orderNodes();
collectInfos();
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 9fb8ddc..b758025 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -50,32 +50,35 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "tailcallelim"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "tailcallelim"
+
STATISTIC(NumEliminated, "Number of tail calls removed");
STATISTIC(NumRetDuped, "Number of return duplicated");
STATISTIC(NumAccumAdded, "Number of accumulators introduced");
@@ -89,11 +92,14 @@ namespace {
initializeTailCallElimPass(*PassRegistry::getPassRegistry());
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
private:
+ bool runTRE(Function &F);
+ bool markTails(Function &F, bool &AllCallsAreTailCalls);
+
CallInst *FindTRECandidate(Instruction *I,
bool CannotTailCallElimCallsMarkedTail);
bool EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
@@ -131,52 +137,253 @@ void TailCallElim::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetTransformInfo>();
}
-/// CanTRE - Scan the specified basic block for alloca instructions.
-/// If it contains any that are variable-sized or not in the entry block,
-/// returns false.
-static bool CanTRE(AllocaInst *AI) {
- // Because of PR962, we don't TRE allocas outside the entry block.
-
- // If this alloca is in the body of the function, or if it is a variable
- // sized allocation, we cannot tail call eliminate calls marked 'tail'
- // with this mechanism.
- BasicBlock *BB = AI->getParent();
- return BB == &BB->getParent()->getEntryBlock() &&
- isa<ConstantInt>(AI->getArraySize());
+/// \brief Scan the specified function for alloca instructions.
+/// If it contains any dynamic allocas, returns false.
+static bool CanTRE(Function &F) {
+ // Because of PR962, we don't TRE dynamic allocas.
+ for (auto &BB : F) {
+ for (auto &I : BB) {
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
+ if (!AI->isStaticAlloca())
+ return false;
+ }
+ }
+ }
+
+ return true;
}
-namespace {
-struct AllocaCaptureTracker : public CaptureTracker {
- AllocaCaptureTracker() : Captured(false) {}
+bool TailCallElim::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
- void tooManyUses() LLVM_OVERRIDE { Captured = true; }
+ bool AllCallsAreTailCalls = false;
+ bool Modified = markTails(F, AllCallsAreTailCalls);
+ if (AllCallsAreTailCalls)
+ Modified |= runTRE(F);
+ return Modified;
+}
- bool shouldExplore(Use *U) LLVM_OVERRIDE {
- Value *V = U->getUser();
- if (isa<CallInst>(V) || isa<InvokeInst>(V))
- UsesAlloca.insert(V);
- return true;
+namespace {
+struct AllocaDerivedValueTracker {
+ // Start at a root value and walk its use-def chain to mark calls that use the
+ // value or a derived value in AllocaUsers, and places where it may escape in
+ // EscapePoints.
+ void walk(Value *Root) {
+ SmallVector<Use *, 32> Worklist;
+ SmallPtrSet<Use *, 32> Visited;
+
+ auto AddUsesToWorklist = [&](Value *V) {
+ for (auto &U : V->uses()) {
+ if (!Visited.insert(&U))
+ continue;
+ Worklist.push_back(&U);
+ }
+ };
+
+ AddUsesToWorklist(Root);
+
+ while (!Worklist.empty()) {
+ Use *U = Worklist.pop_back_val();
+ Instruction *I = cast<Instruction>(U->getUser());
+
+ switch (I->getOpcode()) {
+ case Instruction::Call:
+ case Instruction::Invoke: {
+ CallSite CS(I);
+ bool IsNocapture = !CS.isCallee(U) &&
+ CS.doesNotCapture(CS.getArgumentNo(U));
+ callUsesLocalStack(CS, IsNocapture);
+ if (IsNocapture) {
+ // If the alloca-derived argument is passed in as nocapture, then it
+ // can't propagate to the call's return. That would be capturing.
+ continue;
+ }
+ break;
+ }
+ case Instruction::Load: {
+ // The result of a load is not alloca-derived (unless an alloca has
+ // otherwise escaped, but this is a local analysis).
+ continue;
+ }
+ case Instruction::Store: {
+ if (U->getOperandNo() == 0)
+ EscapePoints.insert(I);
+ continue; // Stores have no users to analyze.
+ }
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ case Instruction::PHI:
+ case Instruction::Select:
+ case Instruction::AddrSpaceCast:
+ break;
+ default:
+ EscapePoints.insert(I);
+ break;
+ }
+
+ AddUsesToWorklist(I);
+ }
}
- bool captured(Use *U) LLVM_OVERRIDE {
- if (isa<ReturnInst>(U->getUser()))
- return false;
- Captured = true;
- return true;
+ void callUsesLocalStack(CallSite CS, bool IsNocapture) {
+ // Add it to the list of alloca users.
+ AllocaUsers.insert(CS.getInstruction());
+
+ // If it's nocapture then it can't capture this alloca.
+ if (IsNocapture)
+ return;
+
+ // If it can write to memory, it can leak the alloca value.
+ if (!CS.onlyReadsMemory())
+ EscapePoints.insert(CS.getInstruction());
}
- bool Captured;
- SmallPtrSet<const Value *, 16> UsesAlloca;
+ SmallPtrSet<Instruction *, 32> AllocaUsers;
+ SmallPtrSet<Instruction *, 32> EscapePoints;
};
-} // end anonymous namespace
+}
-bool TailCallElim::runOnFunction(Function &F) {
+bool TailCallElim::markTails(Function &F, bool &AllCallsAreTailCalls) {
+ if (F.callsFunctionThatReturnsTwice())
+ return false;
+ AllCallsAreTailCalls = true;
+
+ // The local stack holds all alloca instructions and all byval arguments.
+ AllocaDerivedValueTracker Tracker;
+ for (Argument &Arg : F.args()) {
+ if (Arg.hasByValAttr())
+ Tracker.walk(&Arg);
+ }
+ for (auto &BB : F) {
+ for (auto &I : BB)
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
+ Tracker.walk(AI);
+ }
+
+ bool Modified = false;
+
+ // Track whether a block is reachable after an alloca has escaped. Blocks that
+ // contain the escaping instruction will be marked as being visited without an
+ // escaped alloca, since that is how the block began.
+ enum VisitType {
+ UNVISITED,
+ UNESCAPED,
+ ESCAPED
+ };
+ DenseMap<BasicBlock *, VisitType> Visited;
+
+ // We propagate the fact that an alloca has escaped from block to successor.
+ // Visit the blocks that are propagating the escapedness first. To do this, we
+ // maintain two worklists.
+ SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
+
+ // We may enter a block and visit it thinking that no alloca has escaped yet,
+ // then see an escape point and go back around a loop edge and come back to
+ // the same block twice. Because of this, we defer setting tail on calls when
+ // we first encounter them in a block. Every entry in this list does not
+ // statically use an alloca via use-def chain analysis, but may find an alloca
+ // through other means if the block turns out to be reachable after an escape
+ // point.
+ SmallVector<CallInst *, 32> DeferredTails;
+
+ BasicBlock *BB = &F.getEntryBlock();
+ VisitType Escaped = UNESCAPED;
+ do {
+ for (auto &I : *BB) {
+ if (Tracker.EscapePoints.count(&I))
+ Escaped = ESCAPED;
+
+ CallInst *CI = dyn_cast<CallInst>(&I);
+ if (!CI || CI->isTailCall())
+ continue;
+
+ if (CI->doesNotAccessMemory()) {
+ // A call to a readnone function whose arguments are all things computed
+ // outside this function can be marked tail. Even if you stored the
+ // alloca address into a global, a readnone function can't load the
+ // global anyhow.
+ //
+ // Note that this runs whether we know an alloca has escaped or not. If
+ // it has, then we can't trust Tracker.AllocaUsers to be accurate.
+ bool SafeToTail = true;
+ for (auto &Arg : CI->arg_operands()) {
+ if (isa<Constant>(Arg.getUser()))
+ continue;
+ if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
+ if (!A->hasByValAttr())
+ continue;
+ SafeToTail = false;
+ break;
+ }
+ if (SafeToTail) {
+ emitOptimizationRemark(
+ F.getContext(), "tailcallelim", F, CI->getDebugLoc(),
+ "marked this readnone call a tail call candidate");
+ CI->setTailCall();
+ Modified = true;
+ continue;
+ }
+ }
+
+ if (Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) {
+ DeferredTails.push_back(CI);
+ } else {
+ AllCallsAreTailCalls = false;
+ }
+ }
+
+ for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) {
+ auto &State = Visited[SuccBB];
+ if (State < Escaped) {
+ State = Escaped;
+ if (State == ESCAPED)
+ WorklistEscaped.push_back(SuccBB);
+ else
+ WorklistUnescaped.push_back(SuccBB);
+ }
+ }
+
+ if (!WorklistEscaped.empty()) {
+ BB = WorklistEscaped.pop_back_val();
+ Escaped = ESCAPED;
+ } else {
+ BB = nullptr;
+ while (!WorklistUnescaped.empty()) {
+ auto *NextBB = WorklistUnescaped.pop_back_val();
+ if (Visited[NextBB] == UNESCAPED) {
+ BB = NextBB;
+ Escaped = UNESCAPED;
+ break;
+ }
+ }
+ }
+ } while (BB);
+
+ for (CallInst *CI : DeferredTails) {
+ if (Visited[CI->getParent()] != ESCAPED) {
+ // If the escape point was part way through the block, calls after the
+ // escape point wouldn't have been put into DeferredTails.
+ emitOptimizationRemark(F.getContext(), "tailcallelim", F,
+ CI->getDebugLoc(),
+ "marked this call a tail call candidate");
+ CI->setTailCall();
+ Modified = true;
+ } else {
+ AllCallsAreTailCalls = false;
+ }
+ }
+
+ return Modified;
+}
+
+bool TailCallElim::runTRE(Function &F) {
// If this function is a varargs function, we won't be able to PHI the args
// right, so don't even try to convert it...
if (F.getFunctionType()->isVarArg()) return false;
TTI = &getAnalysis<TargetTransformInfo>();
- BasicBlock *OldEntry = 0;
+ BasicBlock *OldEntry = nullptr;
bool TailCallsAreMarkedTail = false;
SmallVector<PHINode*, 8> ArgumentPHIs;
bool MadeChange = false;
@@ -185,39 +392,23 @@ bool TailCallElim::runOnFunction(Function &F) {
// marked with the 'tail' attribute, because doing so would cause the stack
// size to increase (real TRE would deallocate variable sized allocas, TRE
// doesn't).
- bool CanTRETailMarkedCall = true;
-
- // Find calls that can be marked tail.
- AllocaCaptureTracker ACT;
- for (Function::iterator BB = F.begin(), EE = F.end(); BB != EE; ++BB) {
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
- CanTRETailMarkedCall &= CanTRE(AI);
- PointerMayBeCaptured(AI, &ACT);
- // If any allocas are captured, exit.
- if (ACT.Captured)
- return false;
- }
- }
- }
+ bool CanTRETailMarkedCall = CanTRE(F);
- // Second pass, change any tail recursive calls to loops.
+ // Change any tail recursive calls to loops.
//
// FIXME: The code generator produces really bad code when an 'escaping
// alloca' is changed from being a static alloca to being a dynamic alloca.
// Until this is resolved, disable this transformation if that would ever
// happen. This bug is PR962.
- if (ACT.UsesAlloca.empty()) {
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
- bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
- ArgumentPHIs, !CanTRETailMarkedCall);
- if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
- Change = FoldReturnAndProcessPred(BB, Ret, OldEntry,
- TailCallsAreMarkedTail, ArgumentPHIs,
- !CanTRETailMarkedCall);
- MadeChange |= Change;
- }
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
+ bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
+ ArgumentPHIs, !CanTRETailMarkedCall);
+ if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
+ Change = FoldReturnAndProcessPred(BB, Ret, OldEntry,
+ TailCallsAreMarkedTail, ArgumentPHIs,
+ !CanTRETailMarkedCall);
+ MadeChange |= Change;
}
}
@@ -226,34 +417,13 @@ bool TailCallElim::runOnFunction(Function &F) {
// with themselves. Check to see if we did and clean up our mess if so. This
// occurs when a function passes an argument straight through to its tail
// call.
- if (!ArgumentPHIs.empty()) {
- for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) {
- PHINode *PN = ArgumentPHIs[i];
-
- // If the PHI Node is a dynamic constant, replace it with the value it is.
- if (Value *PNV = SimplifyInstruction(PN)) {
- PN->replaceAllUsesWith(PNV);
- PN->eraseFromParent();
- }
- }
- }
+ for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) {
+ PHINode *PN = ArgumentPHIs[i];
- // At this point, we know that the function does not have any captured
- // allocas. If additionally the function does not call setjmp, mark all calls
- // in the function that do not access stack memory with the tail keyword. This
- // implies ensuring that there does not exist any path from a call that takes
- // in an alloca but does not capture it and the call which we wish to mark
- // with "tail".
- if (!F.callsFunctionThatReturnsTwice()) {
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- if (!ACT.UsesAlloca.count(CI)) {
- CI->setTailCall();
- MadeChange = true;
- }
- }
- }
+ // If the PHI Node is a dynamic constant, replace it with the value it is.
+ if (Value *PNV = SimplifyInstruction(PN)) {
+ PN->replaceAllUsesWith(PNV);
+ PN->eraseFromParent();
}
}
@@ -340,11 +510,11 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
//
static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
Function *F = CI->getParent()->getParent();
- Value *ReturnedValue = 0;
+ Value *ReturnedValue = nullptr;
for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) {
ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator());
- if (RI == 0 || RI == IgnoreRI) continue;
+ if (RI == nullptr || RI == IgnoreRI) continue;
// We can only perform this transformation if the value returned is
// evaluatable at the start of the initial invocation of the function,
@@ -352,10 +522,10 @@ static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
//
Value *RetOp = RI->getOperand(0);
if (!isDynamicConstant(RetOp, CI, RI))
- return 0;
+ return nullptr;
if (ReturnedValue && RetOp != ReturnedValue)
- return 0; // Cannot transform if differing values are returned.
+ return nullptr; // Cannot transform if differing values are returned.
ReturnedValue = RetOp;
}
return ReturnedValue;
@@ -367,23 +537,23 @@ static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
///
Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
CallInst *CI) {
- if (!I->isAssociative() || !I->isCommutative()) return 0;
+ if (!I->isAssociative() || !I->isCommutative()) return nullptr;
assert(I->getNumOperands() == 2 &&
"Associative/commutative operations should have 2 args!");
// Exactly one operand should be the result of the call instruction.
if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
(I->getOperand(0) != CI && I->getOperand(1) != CI))
- return 0;
+ return nullptr;
// The only user of this instruction we allow is a single return instruction.
- if (!I->hasOneUse() || !isa<ReturnInst>(I->use_back()))
- return 0;
+ if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
+ return nullptr;
// Ok, now we have to check all of the other return instructions in this
// function. If they return non-constants or differing values, then we cannot
// transform the function safely.
- return getCommonReturnValue(cast<ReturnInst>(I->use_back()), CI);
+ return getCommonReturnValue(cast<ReturnInst>(I->user_back()), CI);
}
static Instruction *FirstNonDbg(BasicBlock::iterator I) {
@@ -399,11 +569,11 @@ TailCallElim::FindTRECandidate(Instruction *TI,
Function *F = BB->getParent();
if (&BB->front() == TI) // Make sure there is something before the terminator.
- return 0;
+ return nullptr;
// Scan backwards from the return, checking to see if there is a tail call in
// this block. If so, set CI to it.
- CallInst *CI = 0;
+ CallInst *CI = nullptr;
BasicBlock::iterator BBI = TI;
while (true) {
CI = dyn_cast<CallInst>(BBI);
@@ -411,14 +581,14 @@ TailCallElim::FindTRECandidate(Instruction *TI,
break;
if (BBI == BB->begin())
- return 0; // Didn't find a potential tail call.
+ return nullptr; // Didn't find a potential tail call.
--BBI;
}
// If this call is marked as a tail call, and if there are dynamic allocas in
// the function, we cannot perform this optimization.
if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail)
- return 0;
+ return nullptr;
// As a special case, detect code like this:
// double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
@@ -426,7 +596,7 @@ TailCallElim::FindTRECandidate(Instruction *TI,
// lower the call to fabs into inline code.
if (BB == &F->getEntryBlock() &&
FirstNonDbg(BB->front()) == CI &&
- FirstNonDbg(llvm::next(BB->begin())) == TI &&
+ FirstNonDbg(std::next(BB->begin())) == TI &&
CI->getCalledFunction() &&
!TTI->isLoweredToCall(CI->getCalledFunction())) {
// A single-block function with just a call and a return. Check that
@@ -438,7 +608,7 @@ TailCallElim::FindTRECandidate(Instruction *TI,
for (; I != E && FI != FE; ++I, ++FI)
if (*I != &*FI) break;
if (I == E && FI == FE)
- return 0;
+ return nullptr;
}
return CI;
@@ -459,8 +629,8 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
// which is different to the constant returned by other return instructions
// (which is recorded in AccumulatorRecursionEliminationInitVal). This is a
// special case of accumulator recursion, the operation being "return C".
- Value *AccumulatorRecursionEliminationInitVal = 0;
- Instruction *AccumulatorRecursionInstr = 0;
+ Value *AccumulatorRecursionEliminationInitVal = nullptr;
+ Instruction *AccumulatorRecursionInstr = nullptr;
// Ok, we found a potential tail call. We can currently only transform the
// tail call if all of the instructions between the call and the return are
@@ -490,8 +660,8 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
// accumulator recursion variable eliminated.
if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
!isa<UndefValue>(Ret->getReturnValue()) &&
- AccumulatorRecursionEliminationInitVal == 0 &&
- !getCommonReturnValue(0, CI)) {
+ AccumulatorRecursionEliminationInitVal == nullptr &&
+ !getCommonReturnValue(nullptr, CI)) {
// One case remains that we are able to handle: the current return
// instruction returns a constant, and all other return instructions
// return a different constant.
@@ -507,9 +677,12 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
BasicBlock *BB = Ret->getParent();
Function *F = BB->getParent();
+ emitOptimizationRemark(F->getContext(), "tailcallelim", *F, CI->getDebugLoc(),
+ "transforming tail recursion to loop");
+
// OK! We can transform this tail call. If this is the first one found,
// create the new entry block, allowing us to branch back to the old entry.
- if (OldEntry == 0) {
+ if (!OldEntry) {
OldEntry = &F->getEntryBlock();
BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry);
NewEntry->takeName(OldEntry);
diff --git a/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp b/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
new file mode 100644
index 0000000..cce016a
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/ASanStackFrameLayout.cpp
@@ -0,0 +1,114 @@
+//===-- ASanStackFrameLayout.cpp - helper for AddressSanitizer ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of ComputeASanStackFrameLayout (see ASanStackFrameLayout.h).
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace llvm {
+
+// We sort the stack variables by alignment (largest first) to minimize
+// unnecessary large gaps due to alignment.
+// It is tempting to also sort variables by size so that larger variables
+// have larger redzones at both ends. But reordering will make report analysis
+// harder, especially when temporary unnamed variables are present.
+// So, until we can provide more information (type, line number, etc)
+// for the stack variables we avoid reordering them too much.
+static inline bool CompareVars(const ASanStackVariableDescription &a,
+ const ASanStackVariableDescription &b) {
+ return a.Alignment > b.Alignment;
+}
+
+// We also force minimal alignment for all vars to kMinAlignment so that vars
+// with e.g. alignment 1 and alignment 16 do not get reordered by CompareVars.
+static const size_t kMinAlignment = 16;
+
+static size_t RoundUpTo(size_t X, size_t RoundTo) {
+ assert((RoundTo & (RoundTo - 1)) == 0);
+ return (X + RoundTo - 1) & ~(RoundTo - 1);
+}
+
+// The larger the variable Size the larger is the redzone.
+// The resulting frame size is a multiple of Alignment.
+static size_t VarAndRedzoneSize(size_t Size, size_t Alignment) {
+ size_t Res = 0;
+ if (Size <= 4) Res = 16;
+ else if (Size <= 16) Res = 32;
+ else if (Size <= 128) Res = Size + 32;
+ else if (Size <= 512) Res = Size + 64;
+ else if (Size <= 4096) Res = Size + 128;
+ else Res = Size + 256;
+ return RoundUpTo(Res, Alignment);
+}
+
+void
+ComputeASanStackFrameLayout(SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ size_t Granularity, size_t MinHeaderSize,
+ ASanStackFrameLayout *Layout) {
+ assert(Granularity >= 8 && Granularity <= 64 &&
+ (Granularity & (Granularity - 1)) == 0);
+ assert(MinHeaderSize >= 16 && (MinHeaderSize & (MinHeaderSize - 1)) == 0 &&
+ MinHeaderSize >= Granularity);
+ size_t NumVars = Vars.size();
+ assert(NumVars > 0);
+ for (size_t i = 0; i < NumVars; i++)
+ Vars[i].Alignment = std::max(Vars[i].Alignment, kMinAlignment);
+
+ std::stable_sort(Vars.begin(), Vars.end(), CompareVars);
+ SmallString<2048> StackDescriptionStorage;
+ raw_svector_ostream StackDescription(StackDescriptionStorage);
+ StackDescription << NumVars;
+ Layout->FrameAlignment = std::max(Granularity, Vars[0].Alignment);
+ SmallVector<uint8_t, 64> &SB(Layout->ShadowBytes);
+ SB.clear();
+ size_t Offset = std::max(std::max(MinHeaderSize, Granularity),
+ Vars[0].Alignment);
+ assert((Offset % Granularity) == 0);
+ SB.insert(SB.end(), Offset / Granularity, kAsanStackLeftRedzoneMagic);
+ for (size_t i = 0; i < NumVars; i++) {
+ bool IsLast = i == NumVars - 1;
+ size_t Alignment = std::max(Granularity, Vars[i].Alignment);
+ (void)Alignment; // Used only in asserts.
+ size_t Size = Vars[i].Size;
+ const char *Name = Vars[i].Name;
+ assert((Alignment & (Alignment - 1)) == 0);
+ assert(Layout->FrameAlignment >= Alignment);
+ assert((Offset % Alignment) == 0);
+ assert(Size > 0);
+ StackDescription << " " << Offset << " " << Size << " " << strlen(Name)
+ << " " << Name;
+ size_t NextAlignment = IsLast ? Granularity
+ : std::max(Granularity, Vars[i + 1].Alignment);
+ size_t SizeWithRedzone = VarAndRedzoneSize(Vars[i].Size, NextAlignment);
+ SB.insert(SB.end(), Size / Granularity, 0);
+ if (Size % Granularity)
+ SB.insert(SB.end(), Size % Granularity);
+ SB.insert(SB.end(), (SizeWithRedzone - Size) / Granularity,
+ IsLast ? kAsanStackRightRedzoneMagic
+ : kAsanStackMidRedzoneMagic);
+ Vars[i].Offset = Offset;
+ Offset += SizeWithRedzone;
+ }
+ if (Offset % MinHeaderSize) {
+ size_t ExtraRedzone = MinHeaderSize - (Offset % MinHeaderSize);
+ SB.insert(SB.end(), ExtraRedzone / Granularity,
+ kAsanStackRightRedzoneMagic);
+ Offset += ExtraRedzone;
+ }
+ Layout->DescriptionString = StackDescription.str();
+ Layout->FrameSize = Offset;
+ assert((Layout->FrameSize % MinHeaderSize) == 0);
+ assert(Layout->FrameSize / Granularity == Layout->ShadowBytes.size());
+}
+
+} // llvm namespace
diff --git a/contrib/llvm/lib/Transforms/Utils/AddDiscriminators.cpp b/contrib/llvm/lib/Transforms/Utils/AddDiscriminators.cpp
new file mode 100644
index 0000000..196ac79
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/AddDiscriminators.cpp
@@ -0,0 +1,222 @@
+//===- AddDiscriminators.cpp - Insert DWARF path discriminators -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file adds DWARF discriminators to the IR. Path discriminators are
+// used to decide what CFG path was taken inside sub-graphs whose instructions
+// share the same line and column number information.
+//
+// The main user of this is the sample profiler. Instruction samples are
+// mapped to line number information. Since a single line may be spread
+// out over several basic blocks, discriminators add more precise location
+// for the samples.
+//
+// For example,
+//
+// 1 #define ASSERT(P)
+// 2 if (!(P))
+// 3 abort()
+// ...
+// 100 while (true) {
+// 101 ASSERT (sum < 0);
+// 102 ...
+// 130 }
+//
+// when converted to IR, this snippet looks something like:
+//
+// while.body: ; preds = %entry, %if.end
+// %0 = load i32* %sum, align 4, !dbg !15
+// %cmp = icmp slt i32 %0, 0, !dbg !15
+// br i1 %cmp, label %if.end, label %if.then, !dbg !15
+//
+// if.then: ; preds = %while.body
+// call void @abort(), !dbg !15
+// br label %if.end, !dbg !15
+//
+// Notice that all the instructions in blocks 'while.body' and 'if.then'
+// have exactly the same debug information. When this program is sampled
+// at runtime, the profiler will assume that all these instructions are
+// equally frequent. This, in turn, will consider the edge while.body->if.then
+// to be frequently taken (which is incorrect).
+//
+// By adding a discriminator value to the instructions in block 'if.then',
+// we can distinguish instructions at line 101 with discriminator 0 from
+// the instructions at line 101 with discriminator 1.
+//
+// For more details about DWARF discriminators, please visit
+// http://wiki.dwarfstd.org/index.php?title=Path_Discriminators
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "add-discriminators"
+
+namespace {
+ struct AddDiscriminators : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ AddDiscriminators() : FunctionPass(ID) {
+ initializeAddDiscriminatorsPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+ };
+}
+
+char AddDiscriminators::ID = 0;
+INITIALIZE_PASS_BEGIN(AddDiscriminators, "add-discriminators",
+ "Add DWARF path discriminators", false, false)
+INITIALIZE_PASS_END(AddDiscriminators, "add-discriminators",
+ "Add DWARF path discriminators", false, false)
+
+// Command line option to disable discriminator generation even in the
+// presence of debug information. This is only needed when debugging
+// debug info generation issues.
+static cl::opt<bool>
+NoDiscriminators("no-discriminators", cl::init(false),
+ cl::desc("Disable generation of discriminator information."));
+
+FunctionPass *llvm::createAddDiscriminatorsPass() {
+ return new AddDiscriminators();
+}
+
+static bool hasDebugInfo(const Function &F) {
+ NamedMDNode *CUNodes = F.getParent()->getNamedMetadata("llvm.dbg.cu");
+ return CUNodes != nullptr;
+}
+
+/// \brief Assign DWARF discriminators.
+///
+/// To assign discriminators, we examine the boundaries of every
+/// basic block and its successors. Suppose there is a basic block B1
+/// with successor B2. The last instruction I1 in B1 and the first
+/// instruction I2 in B2 are located at the same file and line number.
+/// This situation is illustrated in the following code snippet:
+///
+/// if (i < 10) x = i;
+///
+/// entry:
+/// br i1 %cmp, label %if.then, label %if.end, !dbg !10
+/// if.then:
+/// %1 = load i32* %i.addr, align 4, !dbg !10
+/// store i32 %1, i32* %x, align 4, !dbg !10
+/// br label %if.end, !dbg !10
+/// if.end:
+/// ret void, !dbg !12
+///
+/// Notice how the branch instruction in block 'entry' and all the
+/// instructions in block 'if.then' have the exact same debug location
+/// information (!dbg !10).
+///
+/// To distinguish instructions in block 'entry' from instructions in
+/// block 'if.then', we generate a new lexical block for all the
+/// instruction in block 'if.then' that share the same file and line
+/// location with the last instruction of block 'entry'.
+///
+/// This new lexical block will have the same location information as
+/// the previous one, but with a new DWARF discriminator value.
+///
+/// One of the main uses of this discriminator value is in runtime
+/// sample profilers. It allows the profiler to distinguish instructions
+/// at location !dbg !10 that execute on different basic blocks. This is
+/// important because while the predicate 'if (x < 10)' may have been
+/// executed millions of times, the assignment 'x = i' may have only
+/// executed a handful of times (meaning that the entry->if.then edge is
+/// seldom taken).
+///
+/// If we did not have discriminator information, the profiler would
+/// assign the same weight to both blocks 'entry' and 'if.then', which
+/// in turn will make it conclude that the entry->if.then edge is very
+/// hot.
+///
+/// To decide where to create new discriminator values, this function
+/// traverses the CFG and examines instruction at basic block boundaries.
+/// If the last instruction I1 of a block B1 is at the same file and line
+/// location as instruction I2 of successor B2, then it creates a new
+/// lexical block for I2 and all the instruction in B2 that share the same
+/// file and line location as I2. This new lexical block will have a
+/// different discriminator number than I1.
+bool AddDiscriminators::runOnFunction(Function &F) {
+ // If the function has debug information, but the user has disabled
+ // discriminators, do nothing.
+ // Simlarly, if the function has no debug info, do nothing.
+ // Finally, if this module is built with dwarf versions earlier than 4,
+ // do nothing (discriminator support is a DWARF 4 feature).
+ if (NoDiscriminators ||
+ !hasDebugInfo(F) ||
+ F.getParent()->getDwarfVersion() < 4)
+ return false;
+
+ bool Changed = false;
+ Module *M = F.getParent();
+ LLVMContext &Ctx = M->getContext();
+ DIBuilder Builder(*M);
+
+ // Traverse all the blocks looking for instructions in different
+ // blocks that are at the same file:line location.
+ for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
+ BasicBlock *B = I;
+ TerminatorInst *Last = B->getTerminator();
+ DebugLoc LastLoc = Last->getDebugLoc();
+ if (LastLoc.isUnknown()) continue;
+ DILocation LastDIL(LastLoc.getAsMDNode(Ctx));
+
+ for (unsigned I = 0; I < Last->getNumSuccessors(); ++I) {
+ BasicBlock *Succ = Last->getSuccessor(I);
+ Instruction *First = Succ->getFirstNonPHIOrDbgOrLifetime();
+ DebugLoc FirstLoc = First->getDebugLoc();
+ if (FirstLoc.isUnknown()) continue;
+ DILocation FirstDIL(FirstLoc.getAsMDNode(Ctx));
+
+ // If the first instruction (First) of Succ is at the same file
+ // location as B's last instruction (Last), add a new
+ // discriminator for First's location and all the instructions
+ // in Succ that share the same location with First.
+ if (FirstDIL.atSameLineAs(LastDIL)) {
+ // Create a new lexical scope and compute a new discriminator
+ // number for it.
+ StringRef Filename = FirstDIL.getFilename();
+ unsigned LineNumber = FirstDIL.getLineNumber();
+ unsigned ColumnNumber = FirstDIL.getColumnNumber();
+ DIScope Scope = FirstDIL.getScope();
+ DIFile File = Builder.createFile(Filename, Scope.getDirectory());
+ unsigned Discriminator = FirstDIL.computeNewDiscriminator(Ctx);
+ DILexicalBlock NewScope = Builder.createLexicalBlock(
+ Scope, File, LineNumber, ColumnNumber, Discriminator);
+ DILocation NewDIL = FirstDIL.copyWithNewScope(Ctx, NewScope);
+ DebugLoc newDebugLoc = DebugLoc::getFromDILocation(NewDIL);
+
+ // Attach this new debug location to First and every
+ // instruction following First that shares the same location.
+ for (BasicBlock::iterator I1(*First), E1 = Succ->end(); I1 != E1;
+ ++I1) {
+ if (I1->getDebugLoc() != FirstLoc) break;
+ I1->setDebugLoc(newDebugLoc);
+ DEBUG(dbgs() << NewDIL.getFilename() << ":" << NewDIL.getLineNumber()
+ << ":" << NewDIL.getColumnNumber() << ":"
+ << NewDIL.getDiscriminator() << *I1 << "\n");
+ }
+ DEBUG(dbgs() << "\n");
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 12de9ee..602e8ba 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -15,17 +15,17 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
@@ -68,8 +68,8 @@ void llvm::DeleteDeadBlock(BasicBlock *BB) {
void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P) {
if (!isa<PHINode>(BB->begin())) return;
- AliasAnalysis *AA = 0;
- MemoryDependenceAnalysis *MemDep = 0;
+ AliasAnalysis *AA = nullptr;
+ MemoryDependenceAnalysis *MemDep = nullptr;
if (P) {
AA = P->getAnalysisIfAvailable<AliasAnalysis>();
MemDep = P->getAnalysisIfAvailable<MemoryDependenceAnalysis>();
@@ -130,7 +130,7 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
BasicBlock *OnlySucc = BB;
for (; SI != SE; ++SI)
if (*SI != OnlySucc) {
- OnlySucc = 0; // There are multiple distinct successors!
+ OnlySucc = nullptr; // There are multiple distinct successors!
break;
}
@@ -167,15 +167,17 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
// Finally, erase the old block and update dominator info.
if (P) {
- if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>()) {
- if (DomTreeNode *DTN = DT->getNode(BB)) {
- DomTreeNode *PredDTN = DT->getNode(PredBB);
+ if (DominatorTreeWrapperPass *DTWP =
+ P->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
+ DominatorTree &DT = DTWP->getDomTree();
+ if (DomTreeNode *DTN = DT.getNode(BB)) {
+ DomTreeNode *PredDTN = DT.getNode(PredBB);
SmallVector<DomTreeNode*, 8> Children(DTN->begin(), DTN->end());
for (SmallVectorImpl<DomTreeNode *>::iterator DI = Children.begin(),
DE = Children.end(); DI != DE; ++DI)
- DT->changeImmediateDominator(*DI, PredDTN);
+ DT.changeImmediateDominator(*DI, PredDTN);
- DT->eraseNode(BB);
+ DT.eraseNode(BB);
}
if (LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>())
@@ -215,7 +217,7 @@ void llvm::ReplaceInstWithValue(BasicBlock::InstListType &BIL,
///
void llvm::ReplaceInstWithInst(BasicBlock::InstListType &BIL,
BasicBlock::iterator &BI, Instruction *I) {
- assert(I->getParent() == 0 &&
+ assert(I->getParent() == nullptr &&
"ReplaceInstWithInst: Instruction already inserted into basic block!");
// Insert the new instruction into the basic block...
@@ -252,7 +254,7 @@ BasicBlock *llvm::SplitEdge(BasicBlock *BB, BasicBlock *Succ, Pass *P) {
// If the successor only has a single pred, split the top of the successor
// block.
assert(SP == BB && "CFG broken");
- SP = NULL;
+ SP = nullptr;
return SplitBlock(Succ, Succ->begin(), P);
}
@@ -280,18 +282,20 @@ BasicBlock *llvm::SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P) {
if (Loop *L = LI->getLoopFor(Old))
L->addBasicBlockToLoop(New, LI->getBase());
- if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>()) {
+ if (DominatorTreeWrapperPass *DTWP =
+ P->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
+ DominatorTree &DT = DTWP->getDomTree();
// Old dominates New. New node dominates all other nodes dominated by Old.
- if (DomTreeNode *OldNode = DT->getNode(Old)) {
+ if (DomTreeNode *OldNode = DT.getNode(Old)) {
std::vector<DomTreeNode *> Children;
for (DomTreeNode::iterator I = OldNode->begin(), E = OldNode->end();
I != E; ++I)
Children.push_back(*I);
- DomTreeNode *NewNode = DT->addNewBlock(New,Old);
+ DomTreeNode *NewNode = DT.addNewBlock(New, Old);
for (std::vector<DomTreeNode *>::iterator I = Children.begin(),
E = Children.end(); I != E; ++I)
- DT->changeImmediateDominator(*I, NewNode);
+ DT.changeImmediateDominator(*I, NewNode);
}
}
@@ -306,7 +310,7 @@ static void UpdateAnalysisInformation(BasicBlock *OldBB, BasicBlock *NewBB,
if (!P) return;
LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
- Loop *L = LI ? LI->getLoopFor(OldBB) : 0;
+ Loop *L = LI ? LI->getLoopFor(OldBB) : nullptr;
// If we need to preserve loop analyses, collect some information about how
// this split will affect loops.
@@ -336,9 +340,9 @@ static void UpdateAnalysisInformation(BasicBlock *OldBB, BasicBlock *NewBB,
}
// Update dominator tree if available.
- DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
- if (DT)
- DT->splitBlock(NewBB);
+ if (DominatorTreeWrapperPass *DTWP =
+ P->getAnalysisIfAvailable<DominatorTreeWrapperPass>())
+ DTWP->getDomTree().splitBlock(NewBB);
if (!L) return;
@@ -347,7 +351,7 @@ static void UpdateAnalysisInformation(BasicBlock *OldBB, BasicBlock *NewBB,
// loop). To find this, examine each of the predecessors and determine which
// loops enclose them, and select the most-nested loop which contains the
// loop containing the block being split.
- Loop *InnermostPredLoop = 0;
+ Loop *InnermostPredLoop = nullptr;
for (ArrayRef<BasicBlock*>::iterator
i = Preds.begin(), e = Preds.end(); i != e; ++i) {
BasicBlock *Pred = *i;
@@ -380,51 +384,68 @@ static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB,
ArrayRef<BasicBlock*> Preds, BranchInst *BI,
Pass *P, bool HasLoopExit) {
// Otherwise, create a new PHI node in NewBB for each PHI node in OrigBB.
- AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : 0;
+ AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : nullptr;
+ SmallPtrSet<BasicBlock *, 16> PredSet(Preds.begin(), Preds.end());
for (BasicBlock::iterator I = OrigBB->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I++);
// Check to see if all of the values coming in are the same. If so, we
// don't need to create a new PHI node, unless it's needed for LCSSA.
- Value *InVal = 0;
+ Value *InVal = nullptr;
if (!HasLoopExit) {
InVal = PN->getIncomingValueForBlock(Preds[0]);
- for (unsigned i = 1, e = Preds.size(); i != e; ++i)
- if (InVal != PN->getIncomingValueForBlock(Preds[i])) {
- InVal = 0;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ if (!PredSet.count(PN->getIncomingBlock(i)))
+ continue;
+ if (!InVal)
+ InVal = PN->getIncomingValue(i);
+ else if (InVal != PN->getIncomingValue(i)) {
+ InVal = nullptr;
break;
}
+ }
}
if (InVal) {
// If all incoming values for the new PHI would be the same, just don't
// make a new PHI. Instead, just remove the incoming values from the old
// PHI.
- for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
- // Explicitly check the BB index here to handle duplicates in Preds.
- int Idx = PN->getBasicBlockIndex(Preds[i]);
- if (Idx >= 0)
- PN->removeIncomingValue(Idx, false);
- }
- } else {
- // If the values coming into the block are not the same, we need a PHI.
- // Create the new PHI node, insert it into NewBB at the end of the block
- PHINode *NewPHI =
- PHINode::Create(PN->getType(), Preds.size(), PN->getName() + ".ph", BI);
- if (AA) AA->copyValue(PN, NewPHI);
- // Move all of the PHI values for 'Preds' to the new PHI.
- for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
- Value *V = PN->removeIncomingValue(Preds[i], false);
- NewPHI->addIncoming(V, Preds[i]);
- }
+ // NOTE! This loop walks backwards for a reason! First off, this minimizes
+ // the cost of removal if we end up removing a large number of values, and
+ // second off, this ensures that the indices for the incoming values
+ // aren't invalidated when we remove one.
+ for (int64_t i = PN->getNumIncomingValues() - 1; i >= 0; --i)
+ if (PredSet.count(PN->getIncomingBlock(i)))
+ PN->removeIncomingValue(i, false);
+
+ // Add an incoming value to the PHI node in the loop for the preheader
+ // edge.
+ PN->addIncoming(InVal, NewBB);
+ continue;
+ }
- InVal = NewPHI;
+ // If the values coming into the block are not the same, we need a new
+ // PHI.
+ // Create the new PHI node, insert it into NewBB at the end of the block
+ PHINode *NewPHI =
+ PHINode::Create(PN->getType(), Preds.size(), PN->getName() + ".ph", BI);
+ if (AA)
+ AA->copyValue(PN, NewPHI);
+
+ // NOTE! This loop walks backwards for a reason! First off, this minimizes
+ // the cost of removal if we end up removing a large number of values, and
+ // second off, this ensures that the indices for the incoming values aren't
+ // invalidated when we remove one.
+ for (int64_t i = PN->getNumIncomingValues() - 1; i >= 0; --i) {
+ BasicBlock *IncomingBB = PN->getIncomingBlock(i);
+ if (PredSet.count(IncomingBB)) {
+ Value *V = PN->removeIncomingValue(i, false);
+ NewPHI->addIncoming(V, IncomingBB);
+ }
}
- // Add an incoming value to the PHI node in the loop for the preheader
- // edge.
- PN->addIncoming(InVal, NewBB);
+ PN->addIncoming(NewPHI, NewBB);
}
}
@@ -538,7 +559,7 @@ void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
e = pred_end(OrigBB);
}
- BasicBlock *NewBB2 = 0;
+ BasicBlock *NewBB2 = nullptr;
if (!NewBB2Preds.empty()) {
// Create another basic block for the rest of OrigBB's predecessors.
NewBB2 = BasicBlock::Create(OrigBB->getContext(),
@@ -603,7 +624,7 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
for (User::op_iterator i = NewRet->op_begin(), e = NewRet->op_end();
i != e; ++i) {
Value *V = *i;
- Instruction *NewBC = 0;
+ Instruction *NewBC = nullptr;
if (BitCastInst *BCI = dyn_cast<BitCastInst>(V)) {
// Return value might be bitcasted. Clone and insert it before the
// return instruction.
@@ -630,28 +651,30 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
}
/// SplitBlockAndInsertIfThen - Split the containing block at the
-/// specified instruction - everything before and including Cmp stays
-/// in the old basic block, and everything after Cmp is moved to a
+/// specified instruction - everything before and including SplitBefore stays
+/// in the old basic block, and everything after SplitBefore is moved to a
/// new block. The two blocks are connected by a conditional branch
/// (with value of Cmp being the condition).
/// Before:
/// Head
-/// Cmp
+/// SplitBefore
/// Tail
/// After:
/// Head
-/// Cmp
-/// if (Cmp)
+/// if (Cond)
/// ThenBlock
+/// SplitBefore
/// Tail
///
/// If Unreachable is true, then ThenBlock ends with
/// UnreachableInst, otherwise it branches to Tail.
/// Returns the NewBasicBlock's terminator.
-TerminatorInst *llvm::SplitBlockAndInsertIfThen(Instruction *Cmp,
- bool Unreachable, MDNode *BranchWeights) {
- Instruction *SplitBefore = Cmp->getNextNode();
+TerminatorInst *llvm::SplitBlockAndInsertIfThen(Value *Cond,
+ Instruction *SplitBefore,
+ bool Unreachable,
+ MDNode *BranchWeights,
+ DominatorTree *DT) {
BasicBlock *Head = SplitBefore->getParent();
BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
TerminatorInst *HeadOldTerm = Head->getTerminator();
@@ -662,13 +685,65 @@ TerminatorInst *llvm::SplitBlockAndInsertIfThen(Instruction *Cmp,
CheckTerm = new UnreachableInst(C, ThenBlock);
else
CheckTerm = BranchInst::Create(Tail, ThenBlock);
+ CheckTerm->setDebugLoc(SplitBefore->getDebugLoc());
BranchInst *HeadNewTerm =
- BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp);
+ BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cond);
+ HeadNewTerm->setDebugLoc(SplitBefore->getDebugLoc());
HeadNewTerm->setMetadata(LLVMContext::MD_prof, BranchWeights);
ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
+
+ if (DT) {
+ if (DomTreeNode *OldNode = DT->getNode(Head)) {
+ std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
+
+ DomTreeNode *NewNode = DT->addNewBlock(Tail, Head);
+ for (auto Child : Children)
+ DT->changeImmediateDominator(Child, NewNode);
+
+ // Head dominates ThenBlock.
+ DT->addNewBlock(ThenBlock, Head);
+ }
+ }
+
return CheckTerm;
}
+/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
+/// but also creates the ElseBlock.
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// else
+/// ElseBlock
+/// SplitBefore
+/// Tail
+void llvm::SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
+ TerminatorInst **ThenTerm,
+ TerminatorInst **ElseTerm,
+ MDNode *BranchWeights) {
+ BasicBlock *Head = SplitBefore->getParent();
+ BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
+ TerminatorInst *HeadOldTerm = Head->getTerminator();
+ LLVMContext &C = Head->getContext();
+ BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
+ BasicBlock *ElseBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
+ *ThenTerm = BranchInst::Create(Tail, ThenBlock);
+ (*ThenTerm)->setDebugLoc(SplitBefore->getDebugLoc());
+ *ElseTerm = BranchInst::Create(Tail, ElseBlock);
+ (*ElseTerm)->setDebugLoc(SplitBefore->getDebugLoc());
+ BranchInst *HeadNewTerm =
+ BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/ElseBlock, Cond);
+ HeadNewTerm->setDebugLoc(SplitBefore->getDebugLoc());
+ HeadNewTerm->setMetadata(LLVMContext::MD_prof, BranchWeights);
+ ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
+}
+
+
/// GetIfCondition - Given a basic block (BB) with two predecessors,
/// check to see if the merge at this block is due
/// to an "if condition". If so, return the boolean condition that determines
@@ -681,32 +756,32 @@ TerminatorInst *llvm::SplitBlockAndInsertIfThen(Instruction *Cmp,
Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
BasicBlock *&IfFalse) {
PHINode *SomePHI = dyn_cast<PHINode>(BB->begin());
- BasicBlock *Pred1 = NULL;
- BasicBlock *Pred2 = NULL;
+ BasicBlock *Pred1 = nullptr;
+ BasicBlock *Pred2 = nullptr;
if (SomePHI) {
if (SomePHI->getNumIncomingValues() != 2)
- return NULL;
+ return nullptr;
Pred1 = SomePHI->getIncomingBlock(0);
Pred2 = SomePHI->getIncomingBlock(1);
} else {
pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
if (PI == PE) // No predecessor
- return NULL;
+ return nullptr;
Pred1 = *PI++;
if (PI == PE) // Only one predecessor
- return NULL;
+ return nullptr;
Pred2 = *PI++;
if (PI != PE) // More than two predecessors
- return NULL;
+ return nullptr;
}
// We can only handle branches. Other control flow will be lowered to
// branches if possible anyway.
BranchInst *Pred1Br = dyn_cast<BranchInst>(Pred1->getTerminator());
BranchInst *Pred2Br = dyn_cast<BranchInst>(Pred2->getTerminator());
- if (Pred1Br == 0 || Pred2Br == 0)
- return 0;
+ if (!Pred1Br || !Pred2Br)
+ return nullptr;
// Eliminate code duplication by ensuring that Pred1Br is conditional if
// either are.
@@ -716,7 +791,7 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// required anyway, we stand no chance of eliminating it, so the xform is
// probably not profitable.
if (Pred1Br->isConditional())
- return 0;
+ return nullptr;
std::swap(Pred1, Pred2);
std::swap(Pred1Br, Pred2Br);
@@ -726,8 +801,8 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// The only thing we have to watch out for here is to make sure that Pred2
// doesn't have incoming edges from other blocks. If it does, the condition
// doesn't dominate BB.
- if (Pred2->getSinglePredecessor() == 0)
- return 0;
+ if (!Pred2->getSinglePredecessor())
+ return nullptr;
// If we found a conditional branch predecessor, make sure that it branches
// to BB and Pred2Br. If it doesn't, this isn't an "if statement".
@@ -742,7 +817,7 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
} else {
// We know that one arm of the conditional goes to BB, so the other must
// go somewhere unrelated, and this must not be an "if statement".
- return 0;
+ return nullptr;
}
return Pred1Br->getCondition();
@@ -752,12 +827,12 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// BB. Don't panic! If both blocks only have a single (identical)
// predecessor, and THAT is a conditional branch, then we're all ok!
BasicBlock *CommonPred = Pred1->getSinglePredecessor();
- if (CommonPred == 0 || CommonPred != Pred2->getSinglePredecessor())
- return 0;
+ if (CommonPred == nullptr || CommonPred != Pred2->getSinglePredecessor())
+ return nullptr;
// Otherwise, if this is a conditional branch, then we can use it!
BranchInst *BI = dyn_cast<BranchInst>(CommonPred->getTerminator());
- if (BI == 0) return 0;
+ if (!BI) return nullptr;
assert(BI->isConditional() && "Two successors but not conditional?");
if (BI->getSuccessor(0) == Pred1) {
diff --git a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 0e7f7f7..80bd516 100644
--- a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -15,21 +15,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "break-crit-edges"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
using namespace llvm;
+#define DEBUG_TYPE "break-crit-edges"
+
STATISTIC(NumBroken, "Number of blocks inserted");
namespace {
@@ -39,10 +40,10 @@ namespace {
initializeBreakCriticalEdgesPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<LoopInfo>();
// No loop canonicalization guarantees are broken by this pass.
@@ -141,7 +142,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
Pass *P, bool MergeIdenticalEdges,
bool DontDeleteUselessPhis,
bool SplitLandingPads) {
- if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return 0;
+ if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return nullptr;
assert(!isa<IndirectBrInst>(TI) &&
"Cannot split critical edge from IndirectBrInst");
@@ -151,7 +152,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// Splitting the critical edge to a landing pad block is non-trivial. Don't do
// it in this generic function.
- if (DestBB->isLandingPad()) return 0;
+ if (DestBB->isLandingPad()) return nullptr;
// Create a new basic block, linking it into the CFG.
BasicBlock *NewBB = BasicBlock::Create(TI->getContext(),
@@ -207,13 +208,15 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// If we don't have a pass object, we can't update anything...
- if (P == 0) return NewBB;
+ if (!P) return NewBB;
- DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
+ DominatorTreeWrapperPass *DTWP =
+ P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
// If we have nothing to update, just return.
- if (DT == 0 && LI == 0)
+ if (!DT && !LI)
return NewBB;
// Now update analysis information. Since the only predecessor of NewBB is
@@ -249,7 +252,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
//
if (TINode) { // Don't break unreachable code!
DomTreeNode *NewBBNode = DT->addNewBlock(NewBB, TIBB);
- DomTreeNode *DestBBNode = 0;
+ DomTreeNode *DestBBNode = nullptr;
// If NewBBDominatesDestBB hasn't been computed yet, do so with DT.
if (!OtherPreds.empty()) {
@@ -297,9 +300,8 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
P->addBasicBlockToLoop(NewBB, LI->getBase());
}
}
- // If TIBB is in a loop and DestBB is outside of that loop, split the
- // other exit blocks of the loop that also have predecessors outside
- // the loop, to maintain a LoopSimplify guarantee.
+ // If TIBB is in a loop and DestBB is outside of that loop, we may need
+ // to update LoopSimplify form and LCSSA form.
if (!TIL->contains(DestBB) &&
P->mustPreserveAnalysisID(LoopSimplifyID)) {
assert(!TIL->contains(NewBB) &&
@@ -309,50 +311,35 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
if (P->mustPreserveAnalysisID(LCSSAID))
createPHIsForSplitLoopExit(TIBB, NewBB, DestBB);
- // For each unique exit block...
- // FIXME: This code is functionally equivalent to the corresponding
- // loop in LoopSimplify.
- SmallVector<BasicBlock *, 4> ExitBlocks;
- TIL->getExitBlocks(ExitBlocks);
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- // Collect all the preds that are inside the loop, and note
- // whether there are any preds outside the loop.
- SmallVector<BasicBlock *, 4> Preds;
- bool HasPredOutsideOfLoop = false;
- BasicBlock *Exit = ExitBlocks[i];
- for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
- I != E; ++I) {
- BasicBlock *P = *I;
- if (TIL->contains(P)) {
- if (isa<IndirectBrInst>(P->getTerminator())) {
- Preds.clear();
- break;
- }
- Preds.push_back(P);
- } else {
- HasPredOutsideOfLoop = true;
- }
- }
- // If there are any preds not in the loop, we'll need to split
- // the edges. The Preds.empty() check is needed because a block
- // may appear multiple times in the list. We can't use
- // getUniqueExitBlocks above because that depends on LoopSimplify
- // form, which we're in the process of restoring!
- if (!Preds.empty() && HasPredOutsideOfLoop) {
- if (!Exit->isLandingPad()) {
- BasicBlock *NewExitBB =
- SplitBlockPredecessors(Exit, Preds, "split", P);
- if (P->mustPreserveAnalysisID(LCSSAID))
- createPHIsForSplitLoopExit(Preds, NewExitBB, Exit);
- } else if (SplitLandingPads) {
- SmallVector<BasicBlock*, 8> NewBBs;
- SplitLandingPadPredecessors(Exit, Preds,
- ".split1", ".split2",
- P, NewBBs);
- if (P->mustPreserveAnalysisID(LCSSAID))
- createPHIsForSplitLoopExit(Preds, NewBBs[0], Exit);
- }
+ // The only that we can break LoopSimplify form by splitting a critical
+ // edge is if after the split there exists some edge from TIL to DestBB
+ // *and* the only edge into DestBB from outside of TIL is that of
+ // NewBB. If the first isn't true, then LoopSimplify still holds, NewBB
+ // is the new exit block and it has no non-loop predecessors. If the
+ // second isn't true, then DestBB was not in LoopSimplify form prior to
+ // the split as it had a non-loop predecessor. In both of these cases,
+ // the predecessor must be directly in TIL, not in a subloop, or again
+ // LoopSimplify doesn't hold.
+ SmallVector<BasicBlock *, 4> LoopPreds;
+ for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB); I != E;
+ ++I) {
+ BasicBlock *P = *I;
+ if (P == NewBB)
+ continue; // The new block is known.
+ if (LI->getLoopFor(P) != TIL) {
+ // No need to re-simplify, it wasn't to start with.
+ LoopPreds.clear();
+ break;
}
+ LoopPreds.push_back(P);
+ }
+ if (!LoopPreds.empty()) {
+ assert(!DestBB->isLandingPad() &&
+ "We don't split edges to landing pads!");
+ BasicBlock *NewExitBB =
+ SplitBlockPredecessors(DestBB, LoopPreds, "split", P);
+ if (P->mustPreserveAnalysisID(LCSSAID))
+ createPHIsForSplitLoopExit(LoopPreds, NewExitBB, DestBB);
}
}
// LCSSA form was updated above for the case where LoopSimplify is
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 6d13217..be00b695 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -27,7 +27,8 @@ using namespace llvm;
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
Value *llvm::CastToCStr(Value *V, IRBuilder<> &B) {
- return B.CreateBitCast(V, B.getInt8PtrTy(), "cstr");
+ unsigned AS = V->getType()->getPointerAddressSpace();
+ return B.CreateBitCast(V, B.getInt8PtrTy(AS), "cstr");
}
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
@@ -35,7 +36,7 @@ Value *llvm::CastToCStr(Value *V, IRBuilder<> &B) {
Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strlen))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -64,7 +65,7 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strnlen))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -94,7 +95,7 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strchr))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
Attribute::AttrKind AVs[2] = { Attribute::ReadOnly, Attribute::NoUnwind };
@@ -120,7 +121,7 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strncmp))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
@@ -153,7 +154,7 @@ Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name) {
if (!TLI->has(LibFunc::strcpy))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -177,7 +178,7 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI, StringRef Name) {
if (!TLI->has(LibFunc::strncpy))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -204,7 +205,7 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memcpy_chk))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS;
@@ -232,7 +233,7 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
Value *Len, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memchr))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS;
@@ -260,7 +261,7 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
Value *Len, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memcmp))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
@@ -286,6 +287,21 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
return CI;
}
+/// Append a suffix to the function name according to the type of 'Op'.
+static void AppendTypeSuffix(Value *Op, StringRef &Name, SmallString<20> &NameBuffer) {
+ if (!Op->getType()->isDoubleTy()) {
+ NameBuffer += Name;
+
+ if (Op->getType()->isFloatTy())
+ NameBuffer += 'f';
+ else
+ NameBuffer += 'l';
+
+ Name = NameBuffer;
+ }
+ return;
+}
+
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' (e.g.
/// 'floor'). This function is known to take a single of type matching 'Op' and
/// returns one value with the same type. If 'Op' is a long double, 'l' is
@@ -293,15 +309,7 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
Value *llvm::EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
const AttributeSet &Attrs) {
SmallString<20> NameBuffer;
- if (!Op->getType()->isDoubleTy()) {
- // If we need to add a suffix, copy into NameBuffer.
- NameBuffer += Name;
- if (Op->getType()->isFloatTy())
- NameBuffer += 'f'; // floorf
- else
- NameBuffer += 'l'; // floorl
- Name = NameBuffer;
- }
+ AppendTypeSuffix(Op, Name, NameBuffer);
Module *M = B.GetInsertBlock()->getParent()->getParent();
Value *Callee = M->getOrInsertFunction(Name, Op->getType(),
@@ -314,12 +322,33 @@ Value *llvm::EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
return CI;
}
+/// EmitBinaryFloatFnCall - Emit a call to the binary function named 'Name'
+/// (e.g. 'fmin'). This function is known to take type matching 'Op1' and 'Op2'
+/// and return one value with the same type. If 'Op1/Op2' are long double, 'l'
+/// is added as the suffix of name, if 'Op1/Op2' is a float, we add a 'f'
+/// suffix.
+Value *llvm::EmitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
+ IRBuilder<> &B, const AttributeSet &Attrs) {
+ SmallString<20> NameBuffer;
+ AppendTypeSuffix(Op1, Name, NameBuffer);
+
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ Value *Callee = M->getOrInsertFunction(Name, Op1->getType(),
+ Op1->getType(), Op2->getType(), NULL);
+ CallInst *CI = B.CreateCall2(Callee, Op1, Op2, Name);
+ CI->setAttributes(Attrs);
+ if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+}
+
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::putchar))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
Value *PutChar = M->getOrInsertFunction("putchar", B.getInt32Ty(),
@@ -341,7 +370,7 @@ Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::puts))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -365,7 +394,7 @@ Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fputc))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -398,7 +427,7 @@ Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fputs))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
@@ -431,7 +460,7 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fwrite))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
diff --git a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 1f517d0..f2d5e07 100644
--- a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "bypass-slow-division"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Function.h"
@@ -24,6 +23,8 @@
using namespace llvm;
+#define DEBUG_TYPE "bypass-slow-division"
+
namespace {
struct DivOpInfo {
bool SignedOp;
@@ -53,11 +54,11 @@ namespace llvm {
}
static DivOpInfo getEmptyKey() {
- return DivOpInfo(false, 0, 0);
+ return DivOpInfo(false, nullptr, nullptr);
}
static DivOpInfo getTombstoneKey() {
- return DivOpInfo(true, 0, 0);
+ return DivOpInfo(true, nullptr, nullptr);
}
static unsigned getHashValue(const DivOpInfo &Val) {
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index d105f5e..5c8f20d 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -17,8 +17,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
@@ -26,7 +27,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/IR/Module.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
@@ -88,26 +89,28 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
assert(VMap.count(I) && "No mapping from source argument specified!");
#endif
+ // Copy all attributes other than those stored in the AttributeSet. We need
+ // to remap the parameter indices of the AttributeSet.
+ AttributeSet NewAttrs = NewFunc->getAttributes();
+ NewFunc->copyAttributesFrom(OldFunc);
+ NewFunc->setAttributes(NewAttrs);
+
AttributeSet OldAttrs = OldFunc->getAttributes();
// Clone any argument attributes that are present in the VMap.
- for (Function::const_arg_iterator I = OldFunc->arg_begin(),
- E = OldFunc->arg_end();
- I != E; ++I)
- if (Argument *Anew = dyn_cast<Argument>(VMap[I])) {
+ for (const Argument &OldArg : OldFunc->args())
+ if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) {
AttributeSet attrs =
- OldAttrs.getParamAttributes(I->getArgNo() + 1);
+ OldAttrs.getParamAttributes(OldArg.getArgNo() + 1);
if (attrs.getNumSlots() > 0)
- Anew->addAttr(attrs);
+ NewArg->addAttr(attrs);
}
- NewFunc->setAttributes(NewFunc->getAttributes()
- .addAttributes(NewFunc->getContext(),
- AttributeSet::ReturnIndex,
- OldAttrs.getRetAttributes()));
- NewFunc->setAttributes(NewFunc->getAttributes()
- .addAttributes(NewFunc->getContext(),
- AttributeSet::FunctionIndex,
- OldAttrs.getFnAttributes()));
+ NewFunc->setAttributes(
+ NewFunc->getAttributes()
+ .addAttributes(NewFunc->getContext(), AttributeSet::ReturnIndex,
+ OldAttrs.getRetAttributes())
+ .addAttributes(NewFunc->getContext(), AttributeSet::FunctionIndex,
+ OldAttrs.getFnAttributes()));
// Loop over all of the basic blocks in the function, cloning them as
// appropriate. Note that we save BE this way in order to handle cloning of
@@ -151,6 +154,54 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
TypeMapper, Materializer);
}
+// Find the MDNode which corresponds to the DISubprogram data that described F.
+static MDNode* FindSubprogram(const Function *F, DebugInfoFinder &Finder) {
+ for (DISubprogram Subprogram : Finder.subprograms()) {
+ if (Subprogram.describes(F)) return Subprogram;
+ }
+ return nullptr;
+}
+
+// Add an operand to an existing MDNode. The new operand will be added at the
+// back of the operand list.
+static void AddOperand(MDNode *Node, Value *Operand) {
+ SmallVector<Value*, 16> Operands;
+ for (unsigned i = 0; i < Node->getNumOperands(); i++) {
+ Operands.push_back(Node->getOperand(i));
+ }
+ Operands.push_back(Operand);
+ MDNode *NewNode = MDNode::get(Node->getContext(), Operands);
+ Node->replaceAllUsesWith(NewNode);
+}
+
+// Clone the module-level debug info associated with OldFunc. The cloned data
+// will point to NewFunc instead.
+static void CloneDebugInfoMetadata(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap) {
+ DebugInfoFinder Finder;
+ Finder.processModule(*OldFunc->getParent());
+
+ const MDNode *OldSubprogramMDNode = FindSubprogram(OldFunc, Finder);
+ if (!OldSubprogramMDNode) return;
+
+ // Ensure that OldFunc appears in the map.
+ // (if it's already there it must point to NewFunc anyway)
+ VMap[OldFunc] = NewFunc;
+ DISubprogram NewSubprogram(MapValue(OldSubprogramMDNode, VMap));
+
+ for (DICompileUnit CU : Finder.compile_units()) {
+ DIArray Subprograms(CU.getSubprograms());
+
+ // If the compile unit's function list contains the old function, it should
+ // also contain the new one.
+ for (unsigned i = 0; i < Subprograms.getNumElements(); i++) {
+ if ((MDNode*)Subprograms.getElement(i) == OldSubprogramMDNode) {
+ AddOperand(Subprograms, NewSubprogram);
+ }
+ }
+ }
+}
+
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
/// in the VMap are changed to refer to their mapped value instead of the
@@ -188,6 +239,9 @@ Function *llvm::CloneFunction(const Function *F, ValueToValueMapTy &VMap,
VMap[I] = DestI++; // Add mapping to VMap
}
+ if (ModuleLevelChanges)
+ CloneDebugInfoMetadata(NewF, F, VMap);
+
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
CloneFunctionInto(NewF, F, VMap, ModuleLevelChanges, Returns, "", CodeInfo);
return NewF;
@@ -205,17 +259,17 @@ namespace {
bool ModuleLevelChanges;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
- const DataLayout *TD;
+ const DataLayout *DL;
public:
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
ValueToValueMapTy &valueMap,
bool moduleLevelChanges,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
- const DataLayout *td)
+ const DataLayout *DL)
: NewFunc(newFunc), OldFunc(oldFunc),
VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
- NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
+ NameSuffix(nameSuffix), CodeInfo(codeInfo), DL(DL) {
}
/// CloneBlock - The specified block is found to be reachable, clone it and
@@ -272,7 +326,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If we can simplify this instruction to some other value, simply add
// a mapping to that value rather than inserting a new instruction into
// the basic block.
- if (Value *V = SimplifyInstruction(NewInst, TD)) {
+ if (Value *V = SimplifyInstruction(NewInst, DL)) {
// On the off-chance that this simplifies to an instruction in the old
// function, map it back into the new function.
if (Value *MappedV = VMap.lookup(V))
@@ -305,7 +359,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If the condition was a known constant in the callee...
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
- if (Cond == 0) {
+ if (!Cond) {
Value *V = VMap[BI->getCondition()];
Cond = dyn_cast_or_null<ConstantInt>(V);
}
@@ -321,7 +375,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
} else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) {
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
- if (Cond == 0) { // Or known constant after constant prop in the callee...
+ if (!Cond) { // Or known constant after constant prop in the callee...
Value *V = VMap[SI->getCondition()];
Cond = dyn_cast_or_null<ConstantInt>(V);
}
@@ -368,7 +422,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix,
ClonedCodeInfo *CodeInfo,
- const DataLayout *TD,
+ const DataLayout *DL,
Instruction *TheCall) {
assert(NameSuffix && "NameSuffix cannot be null!");
@@ -379,7 +433,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
#endif
PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
- NameSuffix, CodeInfo, TD);
+ NameSuffix, CodeInfo, DL);
// Clone the entry block, and anything recursively reachable from it.
std::vector<const BasicBlock*> CloneWorklist;
@@ -400,7 +454,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
BI != BE; ++BI) {
Value *V = VMap[BI];
BasicBlock *NewBB = cast_or_null<BasicBlock>(V);
- if (NewBB == 0) continue; // Dead block.
+ if (!NewBB) continue; // Dead block.
// Add the new block to the new function.
NewFunc->getBasicBlockList().push_back(NewBB);
@@ -509,7 +563,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// node).
for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx)
if (PHINode *PN = dyn_cast<PHINode>(VMap[PHIToResolve[Idx]]))
- recursivelySimplifyInstruction(PN, TD);
+ recursivelySimplifyInstruction(PN, DL);
// Now that the inlined function body has been fully constructed, go through
// and zap unconditional fall-through branches. This happen all the time when
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
index 64df089..3f75b3e 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
@@ -47,8 +47,8 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
GlobalVariable *GV = new GlobalVariable(*New,
I->getType()->getElementType(),
I->isConstant(), I->getLinkage(),
- (Constant*) 0, I->getName(),
- (GlobalVariable*) 0,
+ (Constant*) nullptr, I->getName(),
+ (GlobalVariable*) nullptr,
I->getThreadLocalMode(),
I->getType()->getAddressSpace());
GV->copyAttributesFrom(I);
@@ -67,8 +67,10 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
// Loop over the aliases in the module
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I) {
- GlobalAlias *GA = new GlobalAlias(I->getType(), I->getLinkage(),
- I->getName(), NULL, New);
+ auto *PTy = cast<PointerType>(I->getType());
+ auto *GA =
+ GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ I->getLinkage(), I->getName(), New);
GA->copyAttributesFrom(I);
VMap[I] = GA;
}
@@ -106,7 +108,7 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
I != E; ++I) {
GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
if (const Constant *C = I->getAliasee())
- GA->setAliasee(MapValue(C, VMap));
+ GA->setAliasee(cast<GlobalObject>(MapValue(C, VMap)));
}
// And named metadata....
diff --git a/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp b/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
index 8fa412a..3b15a0a 100644
--- a/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
@@ -84,7 +84,7 @@ Value *llvm::getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
case 7: // True.
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
}
- return NULL;
+ return nullptr;
}
/// PredicatesFoldable - Return true if both predicates match sign or if at
diff --git a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 6f008644..e70a7d6 100644
--- a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -14,20 +14,20 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/CodeExtractor.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
-#include "llvm/Analysis/Verifier.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -38,6 +38,8 @@
#include <set>
using namespace llvm;
+#define DEBUG_TYPE "code-extractor"
+
// Provide a command-line option to aggregate function arguments into a struct
// for functions produced by the code extractor. This is useful when converting
// extracted functions to pthread-based code, as only one argument (void*) can
@@ -86,7 +88,7 @@ static SetVector<BasicBlock *> buildExtractionBlockSet(IteratorT BBBegin,
}
#ifndef NDEBUG
- for (SetVector<BasicBlock *>::iterator I = llvm::next(Result.begin()),
+ for (SetVector<BasicBlock *>::iterator I = std::next(Result.begin()),
E = Result.end();
I != E; ++I)
for (pred_iterator PI = pred_begin(*I), PE = pred_end(*I);
@@ -118,7 +120,7 @@ buildExtractionBlockSet(const RegionNode &RN) {
}
CodeExtractor::CodeExtractor(BasicBlock *BB, bool AggregateArgs)
- : DT(0), AggregateArgs(AggregateArgs||AggregateArgsOpt),
+ : DT(nullptr), AggregateArgs(AggregateArgs||AggregateArgsOpt),
Blocks(buildExtractionBlockSet(BB)), NumExitBlocks(~0U) {}
CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
@@ -171,9 +173,8 @@ void CodeExtractor::findInputsOutputs(ValueSet &Inputs,
if (definedInCaller(Blocks, *OI))
Inputs.insert(*OI);
- for (Value::use_iterator UI = II->use_begin(), UE = II->use_end();
- UI != UE; ++UI)
- if (!definedInRegion(Blocks, *UI)) {
+ for (User *U : II->users())
+ if (!definedInRegion(Blocks, U)) {
Outputs.insert(II);
break;
}
@@ -369,7 +370,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
} else
RewriteVal = AI++;
- std::vector<User*> Users(inputs[i]->use_begin(), inputs[i]->use_end());
+ std::vector<User*> Users(inputs[i]->user_begin(), inputs[i]->user_end());
for (std::vector<User*>::iterator use = Users.begin(), useE = Users.end();
use != useE; ++use)
if (Instruction* inst = dyn_cast<Instruction>(*use))
@@ -389,7 +390,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
// Rewrite branches to basic blocks outside of the loop to new dummy blocks
// within the new function. This must be done before we lose track of which
// blocks were originally in the code region.
- std::vector<User*> Users(header->use_begin(), header->use_end());
+ std::vector<User*> Users(header->user_begin(), header->user_end());
for (unsigned i = 0, e = Users.size(); i != e; ++i)
// The BasicBlock which contains the branch is not in the region
// modify the branch target to a new block
@@ -405,14 +406,13 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
/// that uses the value within the basic block, and return the predecessor
/// block associated with that use, or return 0 if none is found.
static BasicBlock* FindPhiPredForUseInBlock(Value* Used, BasicBlock* BB) {
- for (Value::use_iterator UI = Used->use_begin(),
- UE = Used->use_end(); UI != UE; ++UI) {
- PHINode *P = dyn_cast<PHINode>(*UI);
+ for (Use &U : Used->uses()) {
+ PHINode *P = dyn_cast<PHINode>(U.getUser());
if (P && P->getParent() == BB)
- return P->getIncomingBlock(UI);
+ return P->getIncomingBlock(U);
}
-
- return 0;
+
+ return nullptr;
}
/// emitCallAndSwitchStatement - This method sets up the caller side by adding
@@ -440,14 +440,14 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
StructValues.push_back(*i);
} else {
AllocaInst *alloca =
- new AllocaInst((*i)->getType(), 0, (*i)->getName()+".loc",
+ new AllocaInst((*i)->getType(), nullptr, (*i)->getName()+".loc",
codeReplacer->getParent()->begin()->begin());
ReloadOutputs.push_back(alloca);
params.push_back(alloca);
}
}
- AllocaInst *Struct = 0;
+ AllocaInst *Struct = nullptr;
if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
std::vector<Type*> ArgTypes;
for (ValueSet::iterator v = StructValues.begin(),
@@ -457,7 +457,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Allocate a struct at the beginning of this function
Type *StructArgTy = StructType::get(newFunction->getContext(), ArgTypes);
Struct =
- new AllocaInst(StructArgTy, 0, "structArg",
+ new AllocaInst(StructArgTy, nullptr, "structArg",
codeReplacer->getParent()->begin()->begin());
params.push_back(Struct);
@@ -486,7 +486,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Reload the outputs passed in by reference
for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
- Value *Output = 0;
+ Value *Output = nullptr;
if (AggregateArgs) {
Value *Idx[2];
Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
@@ -502,7 +502,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
LoadInst *load = new LoadInst(Output, outputs[i]->getName()+".reload");
Reloads.push_back(load);
codeReplacer->getInstList().push_back(load);
- std::vector<User*> Users(outputs[i]->use_begin(), outputs[i]->use_end());
+ std::vector<User*> Users(outputs[i]->user_begin(), outputs[i]->user_end());
for (unsigned u = 0, e = Users.size(); u != e; ++u) {
Instruction *inst = cast<Instruction>(Users[u]);
if (!Blocks.count(inst->getParent()))
@@ -539,7 +539,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
newFunction);
unsigned SuccNum = switchVal++;
- Value *brVal = 0;
+ Value *brVal = nullptr;
switch (NumExitBlocks) {
case 0:
case 1: break; // No value needed.
@@ -635,7 +635,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Check if the function should return a value
if (OldFnRetTy->isVoidTy()) {
- ReturnInst::Create(Context, 0, TheSwitch); // Return void
+ ReturnInst::Create(Context, nullptr, TheSwitch); // Return void
} else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
// return what we have
ReturnInst::Create(Context, TheSwitch->getCondition(), TheSwitch);
@@ -687,7 +687,7 @@ void CodeExtractor::moveCodeToFunction(Function *newFunction) {
Function *CodeExtractor::extractCodeRegion() {
if (!isEligible())
- return 0;
+ return nullptr;
ValueSet inputs, outputs;
diff --git a/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp b/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp
new file mode 100644
index 0000000..a359424
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/CtorUtils.cpp
@@ -0,0 +1,183 @@
+//===- CtorUtils.cpp - Helpers for working with global_ctors ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/CtorUtils.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "ctor_utils"
+
+namespace llvm {
+
+namespace {
+/// Given a specified llvm.global_ctors list, install the
+/// specified array.
+void installGlobalCtors(GlobalVariable *GCL,
+ const std::vector<Function *> &Ctors) {
+ // If we made a change, reassemble the initializer list.
+ Constant *CSVals[3];
+
+ StructType *StructTy =
+ cast<StructType>(GCL->getType()->getElementType()->getArrayElementType());
+
+ // Create the new init list.
+ std::vector<Constant *> CAList;
+ for (Function *F : Ctors) {
+ Type *Int32Ty = Type::getInt32Ty(GCL->getContext());
+ if (F) {
+ CSVals[0] = ConstantInt::get(Int32Ty, 65535);
+ CSVals[1] = F;
+ } else {
+ CSVals[0] = ConstantInt::get(Int32Ty, 0x7fffffff);
+ CSVals[1] = Constant::getNullValue(StructTy->getElementType(1));
+ }
+ // FIXME: Only allow the 3-field form in LLVM 4.0.
+ size_t NumElts = StructTy->getNumElements();
+ if (NumElts > 2)
+ CSVals[2] = Constant::getNullValue(StructTy->getElementType(2));
+ CAList.push_back(
+ ConstantStruct::get(StructTy, makeArrayRef(CSVals, NumElts)));
+ }
+
+ // Create the array initializer.
+ Constant *CA =
+ ConstantArray::get(ArrayType::get(StructTy, CAList.size()), CAList);
+
+ // If we didn't change the number of elements, don't create a new GV.
+ if (CA->getType() == GCL->getInitializer()->getType()) {
+ GCL->setInitializer(CA);
+ return;
+ }
+
+ // Create the new global and insert it next to the existing list.
+ GlobalVariable *NGV =
+ new GlobalVariable(CA->getType(), GCL->isConstant(), GCL->getLinkage(),
+ CA, "", GCL->getThreadLocalMode());
+ GCL->getParent()->getGlobalList().insert(GCL, NGV);
+ NGV->takeName(GCL);
+
+ // Nuke the old list, replacing any uses with the new one.
+ if (!GCL->use_empty()) {
+ Constant *V = NGV;
+ if (V->getType() != GCL->getType())
+ V = ConstantExpr::getBitCast(V, GCL->getType());
+ GCL->replaceAllUsesWith(V);
+ }
+ GCL->eraseFromParent();
+}
+
+/// Given a llvm.global_ctors list that we can understand,
+/// return a list of the functions and null terminator as a vector.
+std::vector<Function*> parseGlobalCtors(GlobalVariable *GV) {
+ if (GV->getInitializer()->isNullValue())
+ return std::vector<Function *>();
+ ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
+ std::vector<Function *> Result;
+ Result.reserve(CA->getNumOperands());
+ for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
+ ConstantStruct *CS = cast<ConstantStruct>(*i);
+ Result.push_back(dyn_cast<Function>(CS->getOperand(1)));
+ }
+ return Result;
+}
+
+/// Find the llvm.global_ctors list, verifying that all initializers have an
+/// init priority of 65535.
+GlobalVariable *findGlobalCtors(Module &M) {
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
+ if (!GV)
+ return nullptr;
+
+ // Verify that the initializer is simple enough for us to handle. We are
+ // only allowed to optimize the initializer if it is unique.
+ if (!GV->hasUniqueInitializer())
+ return nullptr;
+
+ if (isa<ConstantAggregateZero>(GV->getInitializer()))
+ return GV;
+ ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
+
+ for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
+ if (isa<ConstantAggregateZero>(*i))
+ continue;
+ ConstantStruct *CS = cast<ConstantStruct>(*i);
+ if (isa<ConstantPointerNull>(CS->getOperand(1)))
+ continue;
+
+ // Must have a function or null ptr.
+ if (!isa<Function>(CS->getOperand(1)))
+ return nullptr;
+
+ // Init priority must be standard.
+ ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0));
+ if (CI->getZExtValue() != 65535)
+ return nullptr;
+ }
+
+ return GV;
+}
+} // namespace
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true. Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+ function_ref<bool(Function *)> ShouldRemove) {
+ GlobalVariable *GlobalCtors = findGlobalCtors(M);
+ if (!GlobalCtors)
+ return false;
+
+ std::vector<Function *> Ctors = parseGlobalCtors(GlobalCtors);
+ if (Ctors.empty())
+ return false;
+
+ bool MadeChange = false;
+
+ // Loop over global ctors, optimizing them when we can.
+ for (unsigned i = 0; i != Ctors.size(); ++i) {
+ Function *F = Ctors[i];
+ // Found a null terminator in the middle of the list, prune off the rest of
+ // the list.
+ if (!F) {
+ if (i != Ctors.size() - 1) {
+ Ctors.resize(i + 1);
+ MadeChange = true;
+ }
+ break;
+ }
+ DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n");
+
+ // We cannot simplify external ctor functions.
+ if (F->empty())
+ continue;
+
+ // If we can evaluate the ctor at compile time, do.
+ if (ShouldRemove(F)) {
+ Ctors.erase(Ctors.begin() + i);
+ MadeChange = true;
+ --i;
+ continue;
+ }
+ }
+
+ if (!MadeChange)
+ return false;
+
+ installGlobalCtors(GlobalCtors, Ctors);
+ return true;
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
index 0723b35..9972b22 100644
--- a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -8,12 +8,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
/// DemoteRegToStack - This function takes a virtual register computed by an
@@ -25,23 +25,23 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Instruction *AllocaPoint) {
if (I.use_empty()) {
I.eraseFromParent();
- return 0;
+ return nullptr;
}
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
- Slot = new AllocaInst(I.getType(), 0,
+ Slot = new AllocaInst(I.getType(), nullptr,
I.getName()+".reg2mem", AllocaPoint);
} else {
Function *F = I.getParent()->getParent();
- Slot = new AllocaInst(I.getType(), 0, I.getName()+".reg2mem",
+ Slot = new AllocaInst(I.getType(), nullptr, I.getName()+".reg2mem",
F->getEntryBlock().begin());
}
// Change all of the users of the instruction to read from the stack slot.
while (!I.use_empty()) {
- Instruction *U = cast<Instruction>(I.use_back());
+ Instruction *U = cast<Instruction>(I.user_back());
if (PHINode *PN = dyn_cast<PHINode>(U)) {
// If this is a PHI node, we can't insert a load of the value before the
// use. Instead insert the load in the predecessor block corresponding
@@ -56,7 +56,7 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == &I) {
Value *&V = Loads[PN->getIncomingBlock(i)];
- if (V == 0) {
+ if (!V) {
// Insert the load into the predecessor block
V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
@@ -110,17 +110,17 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
AllocaInst *llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
P->eraseFromParent();
- return 0;
+ return nullptr;
}
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
- Slot = new AllocaInst(P->getType(), 0,
+ Slot = new AllocaInst(P->getType(), nullptr,
P->getName()+".reg2mem", AllocaPoint);
} else {
Function *F = P->getParent()->getParent();
- Slot = new AllocaInst(P->getType(), 0, P->getName()+".reg2mem",
+ Slot = new AllocaInst(P->getType(), nullptr, P->getName()+".reg2mem",
F->getEntryBlock().begin());
}
diff --git a/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp
index 1da226b..51ead40 100644
--- a/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/FlattenCFG.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "flattencfg"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -22,16 +21,19 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
using namespace llvm;
+#define DEBUG_TYPE "flattencfg"
+
namespace {
class FlattenCFGOpt {
AliasAnalysis *AA;
/// \brief Use parallel-and or parallel-or to generate conditions for
/// conditional branches.
- bool FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder, Pass *P = 0);
+ bool FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
+ Pass *P = nullptr);
/// \brief If \param BB is the merge block of an if-region, attempt to merge
/// the if-region with an adjacent if-region upstream if two if-regions
/// contain identical instructions.
- bool MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder, Pass *P = 0);
+ bool MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder, Pass *P = nullptr);
/// \brief Compare a pair of blocks: \p Block1 and \p Block2, which
/// are from two if-regions whose entry blocks are \p Head1 and \p
/// Head2. \returns true if \p Block1 and \p Block2 contain identical
@@ -126,9 +128,9 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
if (PHI)
return false; // For simplicity, avoid cases containing PHI nodes.
- BasicBlock *LastCondBlock = NULL;
- BasicBlock *FirstCondBlock = NULL;
- BasicBlock *UnCondBlock = NULL;
+ BasicBlock *LastCondBlock = nullptr;
+ BasicBlock *FirstCondBlock = nullptr;
+ BasicBlock *UnCondBlock = nullptr;
int Idx = -1;
// Check predecessors of \param BB.
@@ -240,7 +242,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
BranchInst *BI = dyn_cast<BranchInst>(CurrBlock->getTerminator());
CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
CmpInst::Predicate Predicate = CI->getPredicate();
- // Cannonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq
+ // Canonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq
if ((Predicate == CmpInst::ICMP_NE) || (Predicate == CmpInst::FCMP_ONE)) {
CI->setPredicate(ICmpInst::getInversePredicate(Predicate));
BI->swapSuccessors();
diff --git a/contrib/llvm/lib/Transforms/Utils/GlobalStatus.cpp b/contrib/llvm/lib/Transforms/Utils/GlobalStatus.cpp
index 5f0a563..12057e4 100644
--- a/contrib/llvm/lib/Transforms/Utils/GlobalStatus.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/GlobalStatus.cpp
@@ -9,9 +9,9 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
using namespace llvm;
@@ -35,9 +35,8 @@ bool llvm::isSafeToDestroyConstant(const Constant *C) {
if (isa<GlobalValue>(C))
return false;
- for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E;
- ++UI)
- if (const Constant *CU = dyn_cast<Constant>(*UI)) {
+ for (const User *U : C->users())
+ if (const Constant *CU = dyn_cast<Constant>(U)) {
if (!isSafeToDestroyConstant(CU))
return false;
} else
@@ -47,10 +46,9 @@ bool llvm::isSafeToDestroyConstant(const Constant *C) {
static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
SmallPtrSet<const PHINode *, 16> &PhiUsers) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
- ++UI) {
- const User *U = *UI;
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
+ for (const Use &U : V->uses()) {
+ const User *UR = U.getUser();
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(UR)) {
GS.HasNonInstructionUser = true;
// If the result of the constantexpr isn't pointer type, then we won't
@@ -60,10 +58,10 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
if (analyzeGlobalAux(CE, GS, PhiUsers))
return true;
- } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
+ } else if (const Instruction *I = dyn_cast<Instruction>(UR)) {
if (!GS.HasMultipleAccessingFunctions) {
const Function *F = I->getParent()->getParent();
- if (GS.AccessingFunction == 0)
+ if (!GS.AccessingFunction)
GS.AccessingFunction = F;
else if (GS.AccessingFunction != F)
GS.HasMultipleAccessingFunctions = true;
@@ -150,13 +148,13 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
return true;
GS.StoredType = GlobalStatus::Stored;
} else if (ImmutableCallSite C = I) {
- if (!C.isCallee(UI))
+ if (!C.isCallee(&U))
return true;
GS.IsLoaded = true;
} else {
return true; // Any other non-load instruction might take address!
}
- } else if (const Constant *C = dyn_cast<Constant>(U)) {
+ } else if (const Constant *C = dyn_cast<Constant>(UR)) {
GS.HasNonInstructionUser = true;
// We might have a dead and dangling constant hanging off of here.
if (!isSafeToDestroyConstant(C))
@@ -178,6 +176,6 @@ bool GlobalStatus::analyzeGlobal(const Value *V, GlobalStatus &GS) {
GlobalStatus::GlobalStatus()
: IsCompared(false), IsLoaded(false), StoredType(NotStored),
- StoredOnceValue(0), AccessingFunction(0),
+ StoredOnceValue(nullptr), AccessingFunction(nullptr),
HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
Ordering(NotAtomic) {}
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index d021bce..f0a9f2b 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -17,17 +17,18 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -51,8 +52,8 @@ namespace {
public:
InvokeInliningInfo(InvokeInst *II)
- : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
- CallerLPad(0), InnerEHValuesPHI(0) {
+ : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
+ CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
// If there are PHI nodes in the unwind destination block, we need to keep
// track of which values came into them from the invoke before removing
// the edge from this block.
@@ -144,7 +145,6 @@ BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
void InvokeInliningInfo::forwardResume(ResumeInst *RI,
SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) {
BasicBlock *Dest = getInnerResumeDest();
- LandingPadInst *OuterLPad = getLandingPadInst();
BasicBlock *Src = RI->getParent();
BranchInst::Create(Dest, Src);
@@ -155,16 +155,6 @@ void InvokeInliningInfo::forwardResume(ResumeInst *RI,
InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
RI->eraseFromParent();
-
- // Append the clauses from the outer landing pad instruction into the inlined
- // landing pad instructions.
- for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(),
- E = InlinedLPads.end(); I != E; ++I) {
- LandingPadInst *InlinedLPad = *I;
- for (unsigned OuterIdx = 0, OuterNum = OuterLPad->getNumClauses();
- OuterIdx != OuterNum; ++OuterIdx)
- InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
- }
}
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
@@ -172,22 +162,11 @@ void InvokeInliningInfo::forwardResume(ResumeInst *RI,
/// invokes. This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
-///
-/// Returns true to indicate that the next block should be skipped.
-static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
+static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
InvokeInliningInfo &Invoke) {
- LandingPadInst *LPI = Invoke.getLandingPadInst();
-
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *I = BBI++;
- if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
- unsigned NumClauses = LPI->getNumClauses();
- L->reserveClauses(NumClauses);
- for (unsigned i = 0; i != NumClauses; ++i)
- L->addClause(LPI->getClause(i));
- }
-
// We only need to check for function calls: inlined invoke
// instructions require no special handling.
CallInst *CI = dyn_cast<CallInst>(I);
@@ -210,6 +189,7 @@ static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
Invoke.getOuterResumeDest(),
InvokeArgs, CI->getName(), BB);
+ II->setDebugLoc(CI->getDebugLoc());
II->setCallingConv(CI->getCallingConv());
II->setAttributes(CI->getAttributes());
@@ -223,10 +203,8 @@ static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
// Update any PHI nodes in the exceptional block to indicate that there is
// now a new entry in them.
Invoke.addIncomingPHIValuesFor(BB);
- return false;
+ return;
}
-
- return false;
}
/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
@@ -252,13 +230,23 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
InlinedLPads.insert(II->getLandingPadInst());
+ // Append the clauses from the outer landing pad instruction into the inlined
+ // landing pad instructions.
+ LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
+ for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(),
+ E = InlinedLPads.end(); I != E; ++I) {
+ LandingPadInst *InlinedLPad = *I;
+ unsigned OuterNum = OuterLPad->getNumClauses();
+ InlinedLPad->reserveClauses(OuterNum);
+ for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
+ InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
+ if (OuterLPad->isCleanup())
+ InlinedLPad->setCleanup(true);
+ }
+
for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
if (InlinedCodeInfo.ContainsCalls)
- if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) {
- // Honor a request to skip the next block.
- ++BB;
- continue;
- }
+ HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
// Forward any resumes that are remaining here.
if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
@@ -303,13 +291,13 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
- if (VMI == VMap.end() || VMI->second == 0)
+ if (VMI == VMap.end() || VMI->second == nullptr)
continue;
// If the call was inlined, but then constant folded, there is no edge to
// add. Check for this case.
Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
- if (NewCall == 0) continue;
+ if (!NewCall) continue;
// Remember that this call site got inlined for the client of
// InlineFunction.
@@ -320,7 +308,7 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
// happens, set the callee of the new call site to a more precise
// destination. This can also happen if the call graph node of the caller
// was just unnecessarily imprecise.
- if (I->second->getFunction() == 0)
+ if (!I->second->getFunction())
if (Function *F = CallSite(NewCall).getCalledFunction()) {
// Indirect call site resolved to direct call.
CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
@@ -336,13 +324,44 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
CallerNode->removeCallEdgeFor(CS);
}
+static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
+ BasicBlock *InsertBlock,
+ InlineFunctionInfo &IFI) {
+ LLVMContext &Context = Src->getContext();
+ Type *VoidPtrTy = Type::getInt8PtrTy(Context);
+ Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
+ Type *Tys[3] = { VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context) };
+ Function *MemCpyFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
+ IRBuilder<> builder(InsertBlock->begin());
+ Value *DstCast = builder.CreateBitCast(Dst, VoidPtrTy, "tmp");
+ Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp");
+
+ Value *Size;
+ if (IFI.DL == nullptr)
+ Size = ConstantExpr::getSizeOf(AggTy);
+ else
+ Size = ConstantInt::get(Type::getInt64Ty(Context),
+ IFI.DL->getTypeStoreSize(AggTy));
+
+ // Always generate a memcpy of alignment 1 here because we don't know
+ // the alignment of the src pointer. Other optimizations can infer
+ // better alignment.
+ Value *CallArgs[] = {
+ DstCast, SrcCast, Size,
+ ConstantInt::get(Type::getInt32Ty(Context), 1),
+ ConstantInt::getFalse(Context) // isVolatile
+ };
+ builder.CreateCall(MemCpyFn, CallArgs);
+}
+
/// HandleByValArgument - When inlining a call site that has a byval argument,
/// we have to make the implicit memcpy explicit by adding it.
static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
const Function *CalledFunc,
InlineFunctionInfo &IFI,
unsigned ByValAlignment) {
- Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
+ PointerType *ArgTy = cast<PointerType>(Arg->getType());
+ Type *AggTy = ArgTy->getElementType();
// If the called function is readonly, then it could not mutate the caller's
// copy of the byval'd memory. In this case, it is safe to elide the copy and
@@ -357,21 +376,17 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
// If the pointer is already known to be sufficiently aligned, or if we can
// round it up to a larger alignment, then we don't need a temporary.
if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
- IFI.TD) >= ByValAlignment)
+ IFI.DL) >= ByValAlignment)
return Arg;
// Otherwise, we have to make a memcpy to get a safe alignment. This is bad
// for code quality, but rarely happens and is required for correctness.
}
-
- LLVMContext &Context = Arg->getContext();
- Type *VoidPtrTy = Type::getInt8PtrTy(Context);
-
// Create the alloca. If we have DataLayout, use nice alignment.
unsigned Align = 1;
- if (IFI.TD)
- Align = IFI.TD->getPrefTypeAlignment(AggTy);
+ if (IFI.DL)
+ Align = IFI.DL->getPrefTypeAlignment(AggTy);
// If the byval had an alignment specified, we *must* use at least that
// alignment, as it is required by the byval argument (and uses of the
@@ -380,32 +395,9 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
Function *Caller = TheCall->getParent()->getParent();
- Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
+ Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
&*Caller->begin()->begin());
- // Emit a memcpy.
- Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
- Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
- Intrinsic::memcpy,
- Tys);
- Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
- Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
-
- Value *Size;
- if (IFI.TD == 0)
- Size = ConstantExpr::getSizeOf(AggTy);
- else
- Size = ConstantInt::get(Type::getInt64Ty(Context),
- IFI.TD->getTypeStoreSize(AggTy));
-
- // Always generate a memcpy of alignment 1 here because we don't know
- // the alignment of the src pointer. Other optimizations can infer
- // better alignment.
- Value *CallArgs[] = {
- DestCast, SrcCast, Size,
- ConstantInt::get(Type::getInt32Ty(Context), 1),
- ConstantInt::getFalse(Context) // isVolatile
- };
- IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs);
+ IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
// Uses of the argument in the function should use our new alloca
// instead.
@@ -415,9 +407,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
// isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
// intrinsic.
static bool isUsedByLifetimeMarker(Value *V) {
- for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
- ++UI) {
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) {
+ for (User *U : V->users()) {
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::lifetime_start:
@@ -432,16 +423,17 @@ static bool isUsedByLifetimeMarker(Value *V) {
// hasLifetimeMarkers - Check whether the given alloca already has
// lifetime.start or lifetime.end intrinsics.
static bool hasLifetimeMarkers(AllocaInst *AI) {
- Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
- if (AI->getType() == Int8PtrTy)
+ Type *Ty = AI->getType();
+ Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
+ Ty->getPointerAddressSpace());
+ if (Ty == Int8PtrTy)
return isUsedByLifetimeMarker(AI);
// Do a scan to find all the casts to i8*.
- for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E;
- ++I) {
- if (I->getType() != Int8PtrTy) continue;
- if (I->stripPointerCasts() != AI) continue;
- if (isUsedByLifetimeMarker(*I))
+ for (User *U : AI->users()) {
+ if (U->getType() != Int8PtrTy) continue;
+ if (U->stripPointerCasts() != AI) continue;
+ if (isUsedByLifetimeMarker(U))
return true;
}
return false;
@@ -475,7 +467,13 @@ static void fixupLineNumbers(Function *Fn, Function::iterator FI,
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) {
DebugLoc DL = BI->getDebugLoc();
- if (!DL.isUnknown()) {
+ if (DL.isUnknown()) {
+ // If the inlined instruction has no line number, make it look as if it
+ // originates from the call location. This is important for
+ // ((__always_inline__, __nodebug__)) functions which must use caller
+ // location for all instructions in their function body.
+ BI->setDebugLoc(TheCallDL);
+ } else {
BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
LLVMContext &Ctx = BI->getContext();
@@ -488,6 +486,33 @@ static void fixupLineNumbers(Function *Fn, Function::iterator FI,
}
}
+/// Returns a musttail call instruction if one immediately precedes the given
+/// return instruction with an optional bitcast instruction between them.
+static CallInst *getPrecedingMustTailCall(ReturnInst *RI) {
+ Instruction *Prev = RI->getPrevNode();
+ if (!Prev)
+ return nullptr;
+
+ if (Value *RV = RI->getReturnValue()) {
+ if (RV != Prev)
+ return nullptr;
+
+ // Look through the optional bitcast.
+ if (auto *BI = dyn_cast<BitCastInst>(Prev)) {
+ RV = BI->getOperand(0);
+ Prev = BI->getPrevNode();
+ if (!Prev || RV != Prev)
+ return nullptr;
+ }
+ }
+
+ if (auto *CI = dyn_cast<CallInst>(Prev)) {
+ if (CI->isMustTailCall())
+ return CI;
+ }
+ return nullptr;
+}
+
/// InlineFunction - This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs
@@ -507,15 +532,10 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
IFI.reset();
const Function *CalledFunc = CS.getCalledFunction();
- if (CalledFunc == 0 || // Can't inline external function or indirect
+ if (!CalledFunc || // Can't inline external function or indirect
CalledFunc->isDeclaration() || // call, or call to a vararg function!
CalledFunc->getFunctionType()->isVarArg()) return false;
- // If the call to the callee is not a tail call, we must clear the 'tail'
- // flags on any calls that we inline.
- bool MustClearTailCallFlags =
- !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
-
// If the call to the callee cannot throw, set the 'nounwind' flag on any
// calls that we inline.
bool MarkNoUnwind = CS.doesNotThrow();
@@ -535,7 +555,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
}
// Get the personality function from the callee if it contains a landing pad.
- Value *CalleePersonality = 0;
+ Value *CalleePersonality = nullptr;
for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
I != E; ++I)
if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
@@ -578,6 +598,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
{ // Scope to destroy VMap after cloning.
ValueToValueMapTy VMap;
+ // Keep a list of pair (dst, src) to emit byval initializations.
+ SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -597,11 +619,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
if (CS.isByValArgument(ArgNo)) {
ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
CalledFunc->getParamAlignment(ArgNo+1));
-
- // Calls that we inline may use the new alloca, so we need to clear
- // their 'tail' flags if HandleByValArgument introduced a new alloca and
- // the callee has calls.
- MustClearTailCallFlags |= ActualArg != *AI;
+ if (ActualArg != *AI)
+ ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
}
VMap[I] = ActualArg;
@@ -613,11 +632,16 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// happy with whatever the cloner can do.
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
/*ModuleLevelChanges=*/false, Returns, ".i",
- &InlinedFunctionInfo, IFI.TD, TheCall);
+ &InlinedFunctionInfo, IFI.DL, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
+ // Inject byval arguments initialization.
+ for (std::pair<Value*, Value*> &Init : ByValInit)
+ HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
+ FirstNewBlock, IFI);
+
// Update the callgraph if requested.
if (IFI.CG)
UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
@@ -635,7 +659,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
for (BasicBlock::iterator I = FirstNewBlock->begin(),
E = FirstNewBlock->end(); I != E; ) {
AllocaInst *AI = dyn_cast<AllocaInst>(I++);
- if (AI == 0) continue;
+ if (!AI) continue;
// If the alloca is now dead, remove it. This often occurs due to code
// specialization.
@@ -667,6 +691,45 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
}
}
+ bool InlinedMustTailCalls = false;
+ if (InlinedFunctionInfo.ContainsCalls) {
+ CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
+ if (CallInst *CI = dyn_cast<CallInst>(TheCall))
+ CallSiteTailKind = CI->getTailCallKind();
+
+ for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
+ ++BB) {
+ for (Instruction &I : *BB) {
+ CallInst *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+
+ // We need to reduce the strength of any inlined tail calls. For
+ // musttail, we have to avoid introducing potential unbounded stack
+ // growth. For example, if functions 'f' and 'g' are mutually recursive
+ // with musttail, we can inline 'g' into 'f' so long as we preserve
+ // musttail on the cloned call to 'f'. If either the inlined call site
+ // or the cloned call site is *not* musttail, the program already has
+ // one frame of stack growth, so it's safe to remove musttail. Here is
+ // a table of example transformations:
+ //
+ // f -> musttail g -> musttail f ==> f -> musttail f
+ // f -> musttail g -> tail f ==> f -> tail f
+ // f -> g -> musttail f ==> f -> f
+ // f -> g -> tail f ==> f -> f
+ CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
+ ChildTCK = std::min(CallSiteTailKind, ChildTCK);
+ CI->setTailCallKind(ChildTCK);
+ InlinedMustTailCalls |= CI->isMustTailCall();
+
+ // Calls inlined through a 'nounwind' call site should be marked
+ // 'nounwind'.
+ if (MarkNoUnwind)
+ CI->setDoesNotThrow();
+ }
+ }
+ }
+
// Leave lifetime markers for the static alloca's, scoping them to the
// function we just inlined.
if (InsertLifetime && !IFI.StaticAllocas.empty()) {
@@ -680,12 +743,12 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
continue;
// Try to determine the size of the allocation.
- ConstantInt *AllocaSize = 0;
+ ConstantInt *AllocaSize = nullptr;
if (ConstantInt *AIArraySize =
dyn_cast<ConstantInt>(AI->getArraySize())) {
- if (IFI.TD) {
+ if (IFI.DL) {
Type *AllocaType = AI->getAllocatedType();
- uint64_t AllocaTypeSize = IFI.TD->getTypeAllocSize(AllocaType);
+ uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
// Check that array size doesn't saturate uint64_t and doesn't
@@ -699,9 +762,12 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
}
builder.CreateLifetimeStart(AI, AllocaSize);
- for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
- IRBuilder<> builder(Returns[ri]);
- builder.CreateLifetimeEnd(AI, AllocaSize);
+ for (ReturnInst *RI : Returns) {
+ // Don't insert llvm.lifetime.end calls between a musttail call and a
+ // return. The return kills all local allocas.
+ if (InlinedMustTailCalls && getPrecedingMustTailCall(RI))
+ continue;
+ IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
}
}
}
@@ -720,33 +786,56 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// Insert a call to llvm.stackrestore before any return instructions in the
// inlined function.
- for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
- IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
+ for (ReturnInst *RI : Returns) {
+ // Don't insert llvm.stackrestore calls between a musttail call and a
+ // return. The return will restore the stack pointer.
+ if (InlinedMustTailCalls && getPrecedingMustTailCall(RI))
+ continue;
+ IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
}
}
- // If we are inlining tail call instruction through a call site that isn't
- // marked 'tail', we must remove the tail marker for any calls in the inlined
- // code. Also, calls inlined through a 'nounwind' call site should be marked
- // 'nounwind'.
- if (InlinedFunctionInfo.ContainsCalls &&
- (MustClearTailCallFlags || MarkNoUnwind)) {
- for (Function::iterator BB = FirstNewBlock, E = Caller->end();
- BB != E; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- if (MustClearTailCallFlags)
- CI->setTailCall(false);
- if (MarkNoUnwind)
- CI->setDoesNotThrow();
- }
- }
-
// If we are inlining for an invoke instruction, we must make sure to rewrite
// any call instructions into invoke instructions.
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
+ // Handle any inlined musttail call sites. In order for a new call site to be
+ // musttail, the source of the clone and the inlined call site must have been
+ // musttail. Therefore it's safe to return without merging control into the
+ // phi below.
+ if (InlinedMustTailCalls) {
+ // Check if we need to bitcast the result of any musttail calls.
+ Type *NewRetTy = Caller->getReturnType();
+ bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
+
+ // Handle the returns preceded by musttail calls separately.
+ SmallVector<ReturnInst *, 8> NormalReturns;
+ for (ReturnInst *RI : Returns) {
+ CallInst *ReturnedMustTail = getPrecedingMustTailCall(RI);
+ if (!ReturnedMustTail) {
+ NormalReturns.push_back(RI);
+ continue;
+ }
+ if (!NeedBitCast)
+ continue;
+
+ // Delete the old return and any preceding bitcast.
+ BasicBlock *CurBB = RI->getParent();
+ auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
+ RI->eraseFromParent();
+ if (OldCast)
+ OldCast->eraseFromParent();
+
+ // Insert a new bitcast and return with the right type.
+ IRBuilder<> Builder(CurBB);
+ Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
+ }
+
+ // Leave behind the normal returns so we can merge control flow.
+ std::swap(Returns, NormalReturns);
+ }
+
// If we cloned in _exactly one_ basic block, and if that block ends in a
// return instruction, we splice the body of the inlined callee directly into
// the calling basic block.
@@ -790,7 +879,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// "starter" and "ender" blocks. How we accomplish this depends on whether
// this is an invoke instruction or a call instruction.
BasicBlock *AfterCallBB;
- BranchInst *CreatedBranchToNormalDest = NULL;
+ BranchInst *CreatedBranchToNormalDest = nullptr;
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
// Add an unconditional branch to make this look like the CallInst case...
@@ -829,7 +918,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// any users of the original call/invoke instruction.
Type *RTy = CalledFunc->getReturnType();
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
if (Returns.size() > 1) {
// The PHI node should go at the front of the new basic block to merge all
// possible incoming values.
@@ -902,6 +991,11 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// Since we are now done with the Call/Invoke, we can delete it.
TheCall->eraseFromParent();
+ // If we inlined any musttail calls and the original return is now
+ // unreachable, delete it. It can only contain a bitcast and ret.
+ if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
+ AfterCallBB->eraseFromParent();
+
// We should always be able to fold the entry block of the function into the
// single predecessor of the block...
assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
@@ -922,7 +1016,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
if (PHI) {
- if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
+ if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
}
diff --git a/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp b/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
index a020bc7..da890a2 100644
--- a/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
@@ -27,11 +27,11 @@ namespace {
initializeInstNamerPass(*PassRegistry::getPassRegistry());
}
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
- bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end();
AI != AE; ++AI)
if (!AI->hasName() && !AI->getType()->isVoidTy())
diff --git a/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp b/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp
index 3cb8ded..9f91eeb 100644
--- a/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/IntegerDivision.cpp
@@ -7,22 +7,24 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains an implementation of 32bit scalar integer division for
-// targets that don't have native support. It's largely derived from
-// compiler-rt's implementation of __udivsi3, but hand-tuned to reduce the
-// amount of control flow
+// This file contains an implementation of 32bit and 64bit scalar integer
+// division for targets that don't have native support. It's largely derived
+// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
+// but hand-tuned for targets that prefer less control flow.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "integer-division"
#include "llvm/Transforms/Utils/IntegerDivision.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include <utility>
using namespace llvm;
+#define DEBUG_TYPE "integer-division"
+
/// Generate code to compute the remainder of two signed integers. Returns the
/// remainder, which will have the sign of the dividend. Builder's insert point
/// should be pointing where the caller wants code generated, e.g. at the srem
@@ -31,7 +33,18 @@ using namespace llvm;
/// be expanded if the user wishes
static Value *generateSignedRemainderCode(Value *Dividend, Value *Divisor,
IRBuilder<> &Builder) {
- ConstantInt *ThirtyOne = Builder.getInt32(31);
+ unsigned BitWidth = Dividend->getType()->getIntegerBitWidth();
+ ConstantInt *Shift;
+
+ if (BitWidth == 64) {
+ Shift = Builder.getInt64(63);
+ } else {
+ assert(BitWidth == 32 && "Unexpected bit width");
+ Shift = Builder.getInt32(31);
+ }
+
+ // Following instructions are generated for both i32 (shift 31) and
+ // i64 (shift 63).
// ; %dividend_sgn = ashr i32 %dividend, 31
// ; %divisor_sgn = ashr i32 %divisor, 31
@@ -42,8 +55,8 @@ static Value *generateSignedRemainderCode(Value *Dividend, Value *Divisor,
// ; %urem = urem i32 %dividend, %divisor
// ; %xored = xor i32 %urem, %dividend_sgn
// ; %srem = sub i32 %xored, %dividend_sgn
- Value *DividendSign = Builder.CreateAShr(Dividend, ThirtyOne);
- Value *DivisorSign = Builder.CreateAShr(Divisor, ThirtyOne);
+ Value *DividendSign = Builder.CreateAShr(Dividend, Shift);
+ Value *DivisorSign = Builder.CreateAShr(Divisor, Shift);
Value *DvdXor = Builder.CreateXor(Dividend, DividendSign);
Value *DvsXor = Builder.CreateXor(Divisor, DivisorSign);
Value *UDividend = Builder.CreateSub(DvdXor, DividendSign);
@@ -68,6 +81,8 @@ static Value *generatedUnsignedRemainderCode(Value *Dividend, Value *Divisor,
IRBuilder<> &Builder) {
// Remainder = Dividend - Quotient*Divisor
+ // Following instructions are generated for both i32 and i64
+
// ; %quotient = udiv i32 %dividend, %divisor
// ; %product = mul i32 %divisor, %quotient
// ; %remainder = sub i32 %dividend, %product
@@ -88,9 +103,20 @@ static Value *generatedUnsignedRemainderCode(Value *Dividend, Value *Divisor,
/// present, i.e. not folded), ready to be expanded if the user wishes.
static Value *generateSignedDivisionCode(Value *Dividend, Value *Divisor,
IRBuilder<> &Builder) {
- // Implementation taken from compiler-rt's __divsi3
+ // Implementation taken from compiler-rt's __divsi3 and __divdi3
+
+ unsigned BitWidth = Dividend->getType()->getIntegerBitWidth();
+ ConstantInt *Shift;
+
+ if (BitWidth == 64) {
+ Shift = Builder.getInt64(63);
+ } else {
+ assert(BitWidth == 32 && "Unexpected bit width");
+ Shift = Builder.getInt32(31);
+ }
- ConstantInt *ThirtyOne = Builder.getInt32(31);
+ // Following instructions are generated for both i32 (shift 31) and
+ // i64 (shift 63).
// ; %tmp = ashr i32 %dividend, 31
// ; %tmp1 = ashr i32 %divisor, 31
@@ -102,8 +128,8 @@ static Value *generateSignedDivisionCode(Value *Dividend, Value *Divisor,
// ; %q_mag = udiv i32 %u_dvnd, %u_dvsr
// ; %tmp4 = xor i32 %q_mag, %q_sgn
// ; %q = sub i32 %tmp4, %q_sgn
- Value *Tmp = Builder.CreateAShr(Dividend, ThirtyOne);
- Value *Tmp1 = Builder.CreateAShr(Divisor, ThirtyOne);
+ Value *Tmp = Builder.CreateAShr(Dividend, Shift);
+ Value *Tmp1 = Builder.CreateAShr(Divisor, Shift);
Value *Tmp2 = Builder.CreateXor(Tmp, Dividend);
Value *U_Dvnd = Builder.CreateSub(Tmp2, Tmp);
Value *Tmp3 = Builder.CreateXor(Tmp1, Divisor);
@@ -119,9 +145,9 @@ static Value *generateSignedDivisionCode(Value *Dividend, Value *Divisor,
return Q;
}
-/// Generates code to divide two unsigned scalar 32-bit integers. Returns the
-/// quotient, rounded towards 0. Builder's insert point should be pointing where
-/// the caller wants code generated, e.g. at the udiv instruction.
+/// Generates code to divide two unsigned scalar 32-bit or 64-bit integers.
+/// Returns the quotient, rounded towards 0. Builder's insert point should
+/// point where the caller wants code generated, e.g. at the udiv instruction.
static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
IRBuilder<> &Builder) {
// The basic algorithm can be found in the compiler-rt project's
@@ -129,18 +155,33 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// that's been hand-tuned to lessen the amount of control flow involved.
// Some helper values
- IntegerType *I32Ty = Builder.getInt32Ty();
+ IntegerType *DivTy = cast<IntegerType>(Dividend->getType());
+ unsigned BitWidth = DivTy->getBitWidth();
+
+ ConstantInt *Zero;
+ ConstantInt *One;
+ ConstantInt *NegOne;
+ ConstantInt *MSB;
+
+ if (BitWidth == 64) {
+ Zero = Builder.getInt64(0);
+ One = Builder.getInt64(1);
+ NegOne = ConstantInt::getSigned(DivTy, -1);
+ MSB = Builder.getInt64(63);
+ } else {
+ assert(BitWidth == 32 && "Unexpected bit width");
+ Zero = Builder.getInt32(0);
+ One = Builder.getInt32(1);
+ NegOne = ConstantInt::getSigned(DivTy, -1);
+ MSB = Builder.getInt32(31);
+ }
- ConstantInt *Zero = Builder.getInt32(0);
- ConstantInt *One = Builder.getInt32(1);
- ConstantInt *ThirtyOne = Builder.getInt32(31);
- ConstantInt *NegOne = ConstantInt::getSigned(I32Ty, -1);
- ConstantInt *True = Builder.getTrue();
+ ConstantInt *True = Builder.getTrue();
BasicBlock *IBB = Builder.GetInsertBlock();
Function *F = IBB->getParent();
- Function *CTLZi32 = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
- I32Ty);
+ Function *CTLZ = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ DivTy);
// Our CFG is going to look like:
// +---------------------+
@@ -190,6 +231,8 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// We'll be overwriting the terminator to insert our extra blocks
SpecialCases->getTerminator()->eraseFromParent();
+ // Same instructions are generated for both i32 (msb 31) and i64 (msb 63).
+
// First off, check for special cases: dividend or divisor is zero, divisor
// is greater than dividend, and divisor is 1.
// ; special-cases:
@@ -209,12 +252,12 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
Value *Ret0_1 = Builder.CreateICmpEQ(Divisor, Zero);
Value *Ret0_2 = Builder.CreateICmpEQ(Dividend, Zero);
Value *Ret0_3 = Builder.CreateOr(Ret0_1, Ret0_2);
- Value *Tmp0 = Builder.CreateCall2(CTLZi32, Divisor, True);
- Value *Tmp1 = Builder.CreateCall2(CTLZi32, Dividend, True);
+ Value *Tmp0 = Builder.CreateCall2(CTLZ, Divisor, True);
+ Value *Tmp1 = Builder.CreateCall2(CTLZ, Dividend, True);
Value *SR = Builder.CreateSub(Tmp0, Tmp1);
- Value *Ret0_4 = Builder.CreateICmpUGT(SR, ThirtyOne);
+ Value *Ret0_4 = Builder.CreateICmpUGT(SR, MSB);
Value *Ret0 = Builder.CreateOr(Ret0_3, Ret0_4);
- Value *RetDividend = Builder.CreateICmpEQ(SR, ThirtyOne);
+ Value *RetDividend = Builder.CreateICmpEQ(SR, MSB);
Value *RetVal = Builder.CreateSelect(Ret0, Zero, Dividend);
Value *EarlyRet = Builder.CreateOr(Ret0, RetDividend);
Builder.CreateCondBr(EarlyRet, End, BB1);
@@ -227,7 +270,7 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// ; br i1 %skipLoop, label %loop-exit, label %preheader
Builder.SetInsertPoint(BB1);
Value *SR_1 = Builder.CreateAdd(SR, One);
- Value *Tmp2 = Builder.CreateSub(ThirtyOne, SR);
+ Value *Tmp2 = Builder.CreateSub(MSB, SR);
Value *Q = Builder.CreateShl(Dividend, Tmp2);
Value *SkipLoop = Builder.CreateICmpEQ(SR_1, Zero);
Builder.CreateCondBr(SkipLoop, LoopExit, Preheader);
@@ -260,17 +303,17 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// ; %tmp12 = icmp eq i32 %sr_2, 0
// ; br i1 %tmp12, label %loop-exit, label %do-while
Builder.SetInsertPoint(DoWhile);
- PHINode *Carry_1 = Builder.CreatePHI(I32Ty, 2);
- PHINode *SR_3 = Builder.CreatePHI(I32Ty, 2);
- PHINode *R_1 = Builder.CreatePHI(I32Ty, 2);
- PHINode *Q_2 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *Carry_1 = Builder.CreatePHI(DivTy, 2);
+ PHINode *SR_3 = Builder.CreatePHI(DivTy, 2);
+ PHINode *R_1 = Builder.CreatePHI(DivTy, 2);
+ PHINode *Q_2 = Builder.CreatePHI(DivTy, 2);
Value *Tmp5 = Builder.CreateShl(R_1, One);
- Value *Tmp6 = Builder.CreateLShr(Q_2, ThirtyOne);
+ Value *Tmp6 = Builder.CreateLShr(Q_2, MSB);
Value *Tmp7 = Builder.CreateOr(Tmp5, Tmp6);
Value *Tmp8 = Builder.CreateShl(Q_2, One);
Value *Q_1 = Builder.CreateOr(Carry_1, Tmp8);
Value *Tmp9 = Builder.CreateSub(Tmp4, Tmp7);
- Value *Tmp10 = Builder.CreateAShr(Tmp9, 31);
+ Value *Tmp10 = Builder.CreateAShr(Tmp9, MSB);
Value *Carry = Builder.CreateAnd(Tmp10, One);
Value *Tmp11 = Builder.CreateAnd(Tmp10, Divisor);
Value *R = Builder.CreateSub(Tmp7, Tmp11);
@@ -285,8 +328,8 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// ; %q_4 = or i32 %carry_2, %tmp13
// ; br label %end
Builder.SetInsertPoint(LoopExit);
- PHINode *Carry_2 = Builder.CreatePHI(I32Ty, 2);
- PHINode *Q_3 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *Carry_2 = Builder.CreatePHI(DivTy, 2);
+ PHINode *Q_3 = Builder.CreatePHI(DivTy, 2);
Value *Tmp13 = Builder.CreateShl(Q_3, One);
Value *Q_4 = Builder.CreateOr(Carry_2, Tmp13);
Builder.CreateBr(End);
@@ -295,7 +338,7 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ]
// ; ret i32 %q_5
Builder.SetInsertPoint(End, End->begin());
- PHINode *Q_5 = Builder.CreatePHI(I32Ty, 2);
+ PHINode *Q_5 = Builder.CreatePHI(DivTy, 2);
// Populate the Phis, since all values have now been created. Our Phis were:
// ; %carry_1 = phi i32 [ 0, %preheader ], [ %carry, %do-while ]
@@ -326,9 +369,8 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
/// Generate code to calculate the remainder of two integers, replacing Rem with
/// the generated code. This currently generates code using the udiv expansion,
/// but future work includes generating more specialized code, e.g. when more
-/// information about the operands are known. Currently only implements 32bit
-/// scalar division (due to udiv's limitation), but future work is removing this
-/// limitation.
+/// information about the operands are known. Implements both 32bit and 64bit
+/// scalar division.
///
/// @brief Replace Rem with generated code.
bool llvm::expandRemainder(BinaryOperator *Rem) {
@@ -338,6 +380,15 @@ bool llvm::expandRemainder(BinaryOperator *Rem) {
IRBuilder<> Builder(Rem);
+ Type *RemTy = Rem->getType();
+ if (RemTy->isVectorTy())
+ llvm_unreachable("Div over vectors not supported");
+
+ unsigned RemTyBitWidth = RemTy->getIntegerBitWidth();
+
+ if (RemTyBitWidth != 32 && RemTyBitWidth != 64)
+ llvm_unreachable("Div of bitwidth other than 32 or 64 not supported");
+
// First prepare the sign if it's a signed remainder
if (Rem->getOpcode() == Instruction::SRem) {
Value *Remainder = generateSignedRemainderCode(Rem->getOperand(0),
@@ -376,9 +427,8 @@ bool llvm::expandRemainder(BinaryOperator *Rem) {
/// Generate code to divide two integers, replacing Div with the generated
/// code. This currently generates code similarly to compiler-rt's
/// implementations, but future work includes generating more specialized code
-/// when more information about the operands are known. Currently only
-/// implements 32bit scalar division, but future work is removing this
-/// limitation.
+/// when more information about the operands are known. Implements both
+/// 32bit and 64bit scalar division.
///
/// @brief Replace Div with generated code.
bool llvm::expandDivision(BinaryOperator *Div) {
@@ -388,9 +438,15 @@ bool llvm::expandDivision(BinaryOperator *Div) {
IRBuilder<> Builder(Div);
- if (Div->getType()->isVectorTy())
+ Type *DivTy = Div->getType();
+ if (DivTy->isVectorTy())
llvm_unreachable("Div over vectors not supported");
+ unsigned DivTyBitWidth = DivTy->getIntegerBitWidth();
+
+ if (DivTyBitWidth != 32 && DivTyBitWidth != 64)
+ llvm_unreachable("Div of bitwidth other than 32 or 64 not supported");
+
// First prepare the sign if it's a signed division
if (Div->getOpcode() == Instruction::SDiv) {
// Lower the code to unsigned division, and reset Div to point to the udiv.
@@ -443,7 +499,7 @@ bool llvm::expandRemainderUpTo32Bits(BinaryOperator *Rem) {
if (RemTyBitWidth == 32)
return expandRemainder(Rem);
- // If bitwidth smaller than 32 extend inputs, truncate output and proceed
+ // If bitwidth smaller than 32 extend inputs, extend output and proceed
// with 32 bit division.
IRBuilder<> Builder(Rem);
@@ -471,6 +527,55 @@ bool llvm::expandRemainderUpTo32Bits(BinaryOperator *Rem) {
return expandRemainder(cast<BinaryOperator>(ExtRem));
}
+/// Generate code to compute the remainder of two integers of bitwidth up to
+/// 64 bits. Uses the above routines and extends the inputs/truncates the
+/// outputs to operate in 64 bits.
+///
+/// @brief Replace Rem with emulation code.
+bool llvm::expandRemainderUpTo64Bits(BinaryOperator *Rem) {
+ assert((Rem->getOpcode() == Instruction::SRem ||
+ Rem->getOpcode() == Instruction::URem) &&
+ "Trying to expand remainder from a non-remainder function");
+
+ Type *RemTy = Rem->getType();
+ if (RemTy->isVectorTy())
+ llvm_unreachable("Div over vectors not supported");
+
+ unsigned RemTyBitWidth = RemTy->getIntegerBitWidth();
+
+ if (RemTyBitWidth > 64)
+ llvm_unreachable("Div of bitwidth greater than 64 not supported");
+
+ if (RemTyBitWidth == 64)
+ return expandRemainder(Rem);
+
+ // If bitwidth smaller than 64 extend inputs, extend output and proceed
+ // with 64 bit division.
+ IRBuilder<> Builder(Rem);
+
+ Value *ExtDividend;
+ Value *ExtDivisor;
+ Value *ExtRem;
+ Value *Trunc;
+ Type *Int64Ty = Builder.getInt64Ty();
+
+ if (Rem->getOpcode() == Instruction::SRem) {
+ ExtDividend = Builder.CreateSExt(Rem->getOperand(0), Int64Ty);
+ ExtDivisor = Builder.CreateSExt(Rem->getOperand(1), Int64Ty);
+ ExtRem = Builder.CreateSRem(ExtDividend, ExtDivisor);
+ } else {
+ ExtDividend = Builder.CreateZExt(Rem->getOperand(0), Int64Ty);
+ ExtDivisor = Builder.CreateZExt(Rem->getOperand(1), Int64Ty);
+ ExtRem = Builder.CreateURem(ExtDividend, ExtDivisor);
+ }
+ Trunc = Builder.CreateTrunc(ExtRem, RemTy);
+
+ Rem->replaceAllUsesWith(Trunc);
+ Rem->dropAllReferences();
+ Rem->eraseFromParent();
+
+ return expandRemainder(cast<BinaryOperator>(ExtRem));
+}
/// Generate code to divide two integers of bitwidth up to 32 bits. Uses the
/// above routines and extends the inputs/truncates the outputs to operate
@@ -495,7 +600,7 @@ bool llvm::expandDivisionUpTo32Bits(BinaryOperator *Div) {
if (DivTyBitWidth == 32)
return expandDivision(Div);
- // If bitwidth smaller than 32 extend inputs, truncate output and proceed
+ // If bitwidth smaller than 32 extend inputs, extend output and proceed
// with 32 bit division.
IRBuilder<> Builder(Div);
@@ -522,3 +627,53 @@ bool llvm::expandDivisionUpTo32Bits(BinaryOperator *Div) {
return expandDivision(cast<BinaryOperator>(ExtDiv));
}
+
+/// Generate code to divide two integers of bitwidth up to 64 bits. Uses the
+/// above routines and extends the inputs/truncates the outputs to operate
+/// in 64 bits.
+///
+/// @brief Replace Div with emulation code.
+bool llvm::expandDivisionUpTo64Bits(BinaryOperator *Div) {
+ assert((Div->getOpcode() == Instruction::SDiv ||
+ Div->getOpcode() == Instruction::UDiv) &&
+ "Trying to expand division from a non-division function");
+
+ Type *DivTy = Div->getType();
+ if (DivTy->isVectorTy())
+ llvm_unreachable("Div over vectors not supported");
+
+ unsigned DivTyBitWidth = DivTy->getIntegerBitWidth();
+
+ if (DivTyBitWidth > 64)
+ llvm_unreachable("Div of bitwidth greater than 64 not supported");
+
+ if (DivTyBitWidth == 64)
+ return expandDivision(Div);
+
+ // If bitwidth smaller than 64 extend inputs, extend output and proceed
+ // with 64 bit division.
+ IRBuilder<> Builder(Div);
+
+ Value *ExtDividend;
+ Value *ExtDivisor;
+ Value *ExtDiv;
+ Value *Trunc;
+ Type *Int64Ty = Builder.getInt64Ty();
+
+ if (Div->getOpcode() == Instruction::SDiv) {
+ ExtDividend = Builder.CreateSExt(Div->getOperand(0), Int64Ty);
+ ExtDivisor = Builder.CreateSExt(Div->getOperand(1), Int64Ty);
+ ExtDiv = Builder.CreateSDiv(ExtDividend, ExtDivisor);
+ } else {
+ ExtDividend = Builder.CreateZExt(Div->getOperand(0), Int64Ty);
+ ExtDivisor = Builder.CreateZExt(Div->getOperand(1), Int64Ty);
+ ExtDiv = Builder.CreateUDiv(ExtDividend, ExtDivisor);
+ }
+ Trunc = Builder.CreateTrunc(ExtDiv, DivTy);
+
+ Div->replaceAllUsesWith(Trunc);
+ Div->dropAllReferences();
+ Div->eraseFromParent();
+
+ return expandDivision(cast<BinaryOperator>(ExtDiv));
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
index 97e7e5d..51a3d9c 100644
--- a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -27,222 +27,108 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "lcssa"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Pass.h"
-#include "llvm/Support/PredIteratorCache.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
-STATISTIC(NumLCSSA, "Number of live out of a loop variables");
-
-namespace {
- struct LCSSA : public LoopPass {
- static char ID; // Pass identification, replacement for typeid
- LCSSA() : LoopPass(ID) {
- initializeLCSSAPass(*PassRegistry::getPassRegistry());
- }
-
- // Cached analysis information for the current function.
- DominatorTree *DT;
- LoopInfo *LI;
- ScalarEvolution *SE;
- PredIteratorCache PredCache;
- Loop *L;
-
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
-
- /// This transformation requires natural loop information & requires that
- /// loop preheaders be inserted into the CFG. It maintains both of these,
- /// as well as the CFG. It also requires dominator information.
- ///
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
-
- AU.addRequired<DominatorTree>();
- AU.addRequired<LoopInfo>();
- AU.addPreservedID(LoopSimplifyID);
- AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<ScalarEvolution>();
- }
- private:
- bool ProcessInstruction(Instruction *Inst,
- const SmallVectorImpl<BasicBlock*> &ExitBlocks);
-
- /// verifyAnalysis() - Verify loop nest.
- virtual void verifyAnalysis() const {
- // Check the special guarantees that LCSSA makes.
- assert(L->isLCSSAForm(*DT) && "LCSSA form not preserved!");
- }
- };
-}
-
-char LCSSA::ID = 0;
-INITIALIZE_PASS_BEGIN(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_END(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
-
-Pass *llvm::createLCSSAPass() { return new LCSSA(); }
-char &llvm::LCSSAID = LCSSA::ID;
+#define DEBUG_TYPE "lcssa"
+STATISTIC(NumLCSSA, "Number of live out of a loop variables");
-/// BlockDominatesAnExit - Return true if the specified block dominates at least
-/// one of the blocks in the specified list.
-static bool BlockDominatesAnExit(BasicBlock *BB,
- const SmallVectorImpl<BasicBlock*> &ExitBlocks,
- DominatorTree *DT) {
- DomTreeNode *DomNode = DT->getNode(BB);
+/// Return true if the specified block is in the list.
+static bool isExitBlock(BasicBlock *BB,
+ const SmallVectorImpl<BasicBlock *> &ExitBlocks) {
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
- if (DT->dominates(DomNode, DT->getNode(ExitBlocks[i])))
+ if (ExitBlocks[i] == BB)
return true;
-
return false;
}
+/// Given an instruction in the loop, check to see if it has any uses that are
+/// outside the current loop. If so, insert LCSSA PHI nodes and rewrite the
+/// uses.
+static bool processInstruction(Loop &L, Instruction &Inst, DominatorTree &DT,
+ const SmallVectorImpl<BasicBlock *> &ExitBlocks,
+ PredIteratorCache &PredCache) {
+ SmallVector<Use *, 16> UsesToRewrite;
-/// runOnFunction - Process all loops in the function, inner-most out.
-bool LCSSA::runOnLoop(Loop *TheLoop, LPPassManager &LPM) {
- L = TheLoop;
-
- DT = &getAnalysis<DominatorTree>();
- LI = &getAnalysis<LoopInfo>();
- SE = getAnalysisIfAvailable<ScalarEvolution>();
-
- // Get the set of exiting blocks.
- SmallVector<BasicBlock*, 8> ExitBlocks;
- L->getExitBlocks(ExitBlocks);
-
- if (ExitBlocks.empty())
- return false;
-
- // Look at all the instructions in the loop, checking to see if they have uses
- // outside the loop. If so, rewrite those uses.
- bool MadeChange = false;
-
- for (Loop::block_iterator BBI = L->block_begin(), E = L->block_end();
- BBI != E; ++BBI) {
- BasicBlock *BB = *BBI;
-
- // For large loops, avoid use-scanning by using dominance information: In
- // particular, if a block does not dominate any of the loop exits, then none
- // of the values defined in the block could be used outside the loop.
- if (!BlockDominatesAnExit(BB, ExitBlocks, DT))
- continue;
-
- for (BasicBlock::iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- // Reject two common cases fast: instructions with no uses (like stores)
- // and instructions with one use that is in the same block as this.
- if (I->use_empty() ||
- (I->hasOneUse() && I->use_back()->getParent() == BB &&
- !isa<PHINode>(I->use_back())))
- continue;
-
- MadeChange |= ProcessInstruction(I, ExitBlocks);
- }
- }
-
- // If we modified the code, remove any caches about the loop from SCEV to
- // avoid dangling entries.
- // FIXME: This is a big hammer, can we clear the cache more selectively?
- if (SE && MadeChange)
- SE->forgetLoop(L);
-
- assert(L->isLCSSAForm(*DT));
- PredCache.clear();
-
- return MadeChange;
-}
+ BasicBlock *InstBB = Inst.getParent();
-/// isExitBlock - Return true if the specified block is in the list.
-static bool isExitBlock(BasicBlock *BB,
- const SmallVectorImpl<BasicBlock*> &ExitBlocks) {
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
- if (ExitBlocks[i] == BB)
- return true;
- return false;
-}
+ for (Use &U : Inst.uses()) {
+ Instruction *User = cast<Instruction>(U.getUser());
+ BasicBlock *UserBB = User->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(User))
+ UserBB = PN->getIncomingBlock(U);
-/// ProcessInstruction - Given an instruction in the loop, check to see if it
-/// has any uses that are outside the current loop. If so, insert LCSSA PHI
-/// nodes and rewrite the uses.
-bool LCSSA::ProcessInstruction(Instruction *Inst,
- const SmallVectorImpl<BasicBlock*> &ExitBlocks) {
- SmallVector<Use*, 16> UsesToRewrite;
-
- BasicBlock *InstBB = Inst->getParent();
-
- for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
- UI != E; ++UI) {
- User *U = *UI;
- BasicBlock *UserBB = cast<Instruction>(U)->getParent();
- if (PHINode *PN = dyn_cast<PHINode>(U))
- UserBB = PN->getIncomingBlock(UI);
-
- if (InstBB != UserBB && !L->contains(UserBB))
- UsesToRewrite.push_back(&UI.getUse());
+ if (InstBB != UserBB && !L.contains(UserBB))
+ UsesToRewrite.push_back(&U);
}
// If there are no uses outside the loop, exit with no change.
- if (UsesToRewrite.empty()) return false;
-
+ if (UsesToRewrite.empty())
+ return false;
+
++NumLCSSA; // We are applying the transformation
// Invoke instructions are special in that their result value is not available
- // along their unwind edge. The code below tests to see whether DomBB dominates
+ // along their unwind edge. The code below tests to see whether DomBB
+ // dominates
// the value, so adjust DomBB to the normal destination block, which is
// effectively where the value is first usable.
- BasicBlock *DomBB = Inst->getParent();
- if (InvokeInst *Inv = dyn_cast<InvokeInst>(Inst))
+ BasicBlock *DomBB = Inst.getParent();
+ if (InvokeInst *Inv = dyn_cast<InvokeInst>(&Inst))
DomBB = Inv->getNormalDest();
- DomTreeNode *DomNode = DT->getNode(DomBB);
+ DomTreeNode *DomNode = DT.getNode(DomBB);
- SmallVector<PHINode*, 16> AddedPHIs;
+ SmallVector<PHINode *, 16> AddedPHIs;
SSAUpdater SSAUpdate;
- SSAUpdate.Initialize(Inst->getType(), Inst->getName());
-
+ SSAUpdate.Initialize(Inst.getType(), Inst.getName());
+
// Insert the LCSSA phi's into all of the exit blocks dominated by the
// value, and add them to the Phi's map.
- for (SmallVectorImpl<BasicBlock*>::const_iterator BBI = ExitBlocks.begin(),
- BBE = ExitBlocks.end(); BBI != BBE; ++BBI) {
+ for (SmallVectorImpl<BasicBlock *>::const_iterator BBI = ExitBlocks.begin(),
+ BBE = ExitBlocks.end();
+ BBI != BBE; ++BBI) {
BasicBlock *ExitBB = *BBI;
- if (!DT->dominates(DomNode, DT->getNode(ExitBB))) continue;
-
+ if (!DT.dominates(DomNode, DT.getNode(ExitBB)))
+ continue;
+
// If we already inserted something for this BB, don't reprocess it.
- if (SSAUpdate.HasValueForBlock(ExitBB)) continue;
-
- PHINode *PN = PHINode::Create(Inst->getType(),
- PredCache.GetNumPreds(ExitBB),
- Inst->getName()+".lcssa",
- ExitBB->begin());
+ if (SSAUpdate.HasValueForBlock(ExitBB))
+ continue;
+
+ PHINode *PN = PHINode::Create(Inst.getType(), PredCache.GetNumPreds(ExitBB),
+ Inst.getName() + ".lcssa", ExitBB->begin());
// Add inputs from inside the loop for this PHI.
for (BasicBlock **PI = PredCache.GetPreds(ExitBB); *PI; ++PI) {
- PN->addIncoming(Inst, *PI);
+ PN->addIncoming(&Inst, *PI);
// If the exit block has a predecessor not within the loop, arrange for
// the incoming value use corresponding to that predecessor to be
// rewritten in terms of a different LCSSA PHI.
- if (!L->contains(*PI))
+ if (!L.contains(*PI))
UsesToRewrite.push_back(
- &PN->getOperandUse(
- PN->getOperandNumForIncomingValue(PN->getNumIncomingValues()-1)));
+ &PN->getOperandUse(PN->getOperandNumForIncomingValue(
+ PN->getNumIncomingValues() - 1)));
}
AddedPHIs.push_back(PN);
-
+
// Remember that this phi makes the value alive in this block.
SSAUpdate.AddAvailableValue(ExitBB, PN);
}
@@ -259,15 +145,14 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
if (PHINode *PN = dyn_cast<PHINode>(User))
UserBB = PN->getIncomingBlock(*UsesToRewrite[i]);
- if (isa<PHINode>(UserBB->begin()) &&
- isExitBlock(UserBB, ExitBlocks)) {
+ if (isa<PHINode>(UserBB->begin()) && isExitBlock(UserBB, ExitBlocks)) {
// Tell the VHs that the uses changed. This updates SCEV's caches.
if (UsesToRewrite[i]->get()->hasValueHandle())
ValueHandleBase::ValueIsRAUWd(*UsesToRewrite[i], UserBB->begin());
UsesToRewrite[i]->set(UserBB->begin());
continue;
}
-
+
// Otherwise, do full PHI insertion.
SSAUpdate.RewriteUse(*UsesToRewrite[i]);
}
@@ -277,7 +162,152 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
if (AddedPHIs[i]->use_empty())
AddedPHIs[i]->eraseFromParent();
}
-
+
return true;
}
+/// Return true if the specified block dominates at least
+/// one of the blocks in the specified list.
+static bool
+blockDominatesAnExit(BasicBlock *BB,
+ DominatorTree &DT,
+ const SmallVectorImpl<BasicBlock *> &ExitBlocks) {
+ DomTreeNode *DomNode = DT.getNode(BB);
+ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
+ if (DT.dominates(DomNode, DT.getNode(ExitBlocks[i])))
+ return true;
+
+ return false;
+}
+
+bool llvm::formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE) {
+ bool Changed = false;
+
+ // Get the set of exiting blocks.
+ SmallVector<BasicBlock *, 8> ExitBlocks;
+ L.getExitBlocks(ExitBlocks);
+
+ if (ExitBlocks.empty())
+ return false;
+
+ PredIteratorCache PredCache;
+
+ // Look at all the instructions in the loop, checking to see if they have uses
+ // outside the loop. If so, rewrite those uses.
+ for (Loop::block_iterator BBI = L.block_begin(), BBE = L.block_end();
+ BBI != BBE; ++BBI) {
+ BasicBlock *BB = *BBI;
+
+ // For large loops, avoid use-scanning by using dominance information: In
+ // particular, if a block does not dominate any of the loop exits, then none
+ // of the values defined in the block could be used outside the loop.
+ if (!blockDominatesAnExit(BB, DT, ExitBlocks))
+ continue;
+
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ // Reject two common cases fast: instructions with no uses (like stores)
+ // and instructions with one use that is in the same block as this.
+ if (I->use_empty() ||
+ (I->hasOneUse() && I->user_back()->getParent() == BB &&
+ !isa<PHINode>(I->user_back())))
+ continue;
+
+ Changed |= processInstruction(L, *I, DT, ExitBlocks, PredCache);
+ }
+ }
+
+ // If we modified the code, remove any caches about the loop from SCEV to
+ // avoid dangling entries.
+ // FIXME: This is a big hammer, can we clear the cache more selectively?
+ if (SE && Changed)
+ SE->forgetLoop(&L);
+
+ assert(L.isLCSSAForm(DT));
+
+ return Changed;
+}
+
+/// Process a loop nest depth first.
+bool llvm::formLCSSARecursively(Loop &L, DominatorTree &DT,
+ ScalarEvolution *SE) {
+ bool Changed = false;
+
+ // Recurse depth-first through inner loops.
+ for (Loop::iterator LI = L.begin(), LE = L.end(); LI != LE; ++LI)
+ Changed |= formLCSSARecursively(**LI, DT, SE);
+
+ Changed |= formLCSSA(L, DT, SE);
+ return Changed;
+}
+
+namespace {
+struct LCSSA : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ LCSSA() : FunctionPass(ID) {
+ initializeLCSSAPass(*PassRegistry::getPassRegistry());
+ }
+
+ // Cached analysis information for the current function.
+ DominatorTree *DT;
+ LoopInfo *LI;
+ ScalarEvolution *SE;
+
+ bool runOnFunction(Function &F) override;
+
+ /// This transformation requires natural loop information & requires that
+ /// loop preheaders be inserted into the CFG. It maintains both of these,
+ /// as well as the CFG. It also requires dominator information.
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<LoopInfo>();
+ AU.addPreservedID(LoopSimplifyID);
+ AU.addPreserved<AliasAnalysis>();
+ AU.addPreserved<ScalarEvolution>();
+ }
+
+private:
+ void verifyAnalysis() const override;
+};
+}
+
+char LCSSA::ID = 0;
+INITIALIZE_PASS_BEGIN(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_END(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
+
+Pass *llvm::createLCSSAPass() { return new LCSSA(); }
+char &llvm::LCSSAID = LCSSA::ID;
+
+
+/// Process all loops in the function, inner-most out.
+bool LCSSA::runOnFunction(Function &F) {
+ bool Changed = false;
+ LI = &getAnalysis<LoopInfo>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ SE = getAnalysisIfAvailable<ScalarEvolution>();
+
+ // Simplify each loop nest in the function.
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
+ Changed |= formLCSSARecursively(**I, *DT, SE);
+
+ return Changed;
+}
+
+static void verifyLoop(Loop &L, DominatorTree &DT) {
+ // Recurse depth-first through inner loops.
+ for (Loop::iterator LI = L.begin(), LE = L.end(); LI != LE; ++LI)
+ verifyLoop(**LI, DT);
+
+ // Check the special guarantees that LCSSA makes.
+ //assert(L.isLCSSAForm(DT) && "LCSSA form not preserved!");
+}
+
+void LCSSA::verifyAnalysis() const {
+ // Verify each loop nest in the function, assuming LI still points at that
+ // function's loop info.
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
+ verifyLoop(**I, *DT);
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index 2768041..a5e443f 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -17,15 +17,17 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
@@ -35,14 +37,14 @@
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/CFG.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "local"
+
STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
//===----------------------------------------------------------------------===//
@@ -127,8 +129,10 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// dest. If so, eliminate it as an explicit compare.
if (i.getCaseSuccessor() == DefaultDest) {
MDNode* MD = SI->getMetadata(LLVMContext::MD_prof);
- // MD should have 2 + NumCases operands.
- if (MD && MD->getNumOperands() == 2 + SI->getNumCases()) {
+ unsigned NCases = SI->getNumCases();
+ // Fold the case metadata into the default if there will be any branches
+ // left, unless the metadata doesn't match the switch.
+ if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
// Collect branch weights into a vector.
SmallVector<uint32_t, 8> Weights;
for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
@@ -157,7 +161,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// Otherwise, check to see if the switch only branches to one destination.
// We do this by reseting "TheOnlyDest" to null when we find two non-equal
// destinations.
- if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = 0;
+ if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = nullptr;
}
if (CI && !TheOnlyDest) {
@@ -178,7 +182,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// Found case matching a constant operand?
BasicBlock *Succ = SI->getSuccessor(i);
if (Succ == TheOnlyDest)
- TheOnlyDest = 0; // Don't modify the first branch to TheOnlyDest
+ TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
else
Succ->removePredecessor(BB);
}
@@ -231,7 +235,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
if (IBI->getDestination(i) == TheOnlyDest)
- TheOnlyDest = 0;
+ TheOnlyDest = nullptr;
else
IBI->getDestination(i)->removePredecessor(IBI->getParent());
}
@@ -329,7 +333,7 @@ llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
// dead as we go.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Value *OpV = I->getOperand(i);
- I->setOperand(i, 0);
+ I->setOperand(i, nullptr);
if (!OpV->use_empty()) continue;
@@ -352,8 +356,8 @@ llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
/// true when there are no uses or multiple uses that all refer to the same
/// value.
static bool areAllUsesEqual(Instruction *I) {
- Value::use_iterator UI = I->use_begin();
- Value::use_iterator UE = I->use_end();
+ Value::user_iterator UI = I->user_begin();
+ Value::user_iterator UE = I->user_end();
if (UI == UE)
return true;
@@ -374,7 +378,7 @@ bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
const TargetLibraryInfo *TLI) {
SmallPtrSet<Instruction*, 4> Visited;
for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
- I = cast<Instruction>(*I->use_begin())) {
+ I = cast<Instruction>(*I->user_begin())) {
if (I->use_empty())
return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
@@ -505,12 +509,18 @@ void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) {
PredBB->getTerminator()->eraseFromParent();
DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
+ // If the PredBB is the entry block of the function, move DestBB up to
+ // become the entry block after we erase PredBB.
+ if (PredBB == &DestBB->getParent()->getEntryBlock())
+ DestBB->moveAfter(PredBB);
+
if (P) {
- DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
- if (DT) {
- BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock();
- DT->changeImmediateDominator(DestBB, PredBBIDom);
- DT->eraseNode(PredBB);
+ if (DominatorTreeWrapperPass *DTWP =
+ P->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
+ DominatorTree &DT = DTWP->getDomTree();
+ BasicBlock *PredBBIDom = DT.getNode(PredBB)->getIDom()->getBlock();
+ DT.changeImmediateDominator(DestBB, PredBBIDom);
+ DT.eraseNode(PredBB);
}
}
// Nuke BB.
@@ -749,10 +759,9 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
if (!Succ->getSinglePredecessor()) {
BasicBlock::iterator BBI = BB->begin();
while (isa<PHINode>(*BBI)) {
- for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end();
- UI != E; ++UI) {
- if (PHINode* PN = dyn_cast<PHINode>(*UI)) {
- if (PN->getIncomingBlock(UI) != BB)
+ for (Use &U : BBI->uses()) {
+ if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
+ if (PN->getIncomingBlock(U) != BB)
return false;
} else {
return false;
@@ -892,24 +901,26 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Align,
return PrefAlign;
}
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (auto *GO = dyn_cast<GlobalObject>(V)) {
// If there is a large requested alignment and we can, bump up the alignment
// of the global.
- if (GV->isDeclaration()) return Align;
+ if (GO->isDeclaration())
+ return Align;
// If the memory we set aside for the global may not be the memory used by
// the final program then it is impossible for us to reliably enforce the
// preferred alignment.
- if (GV->isWeakForLinker()) return Align;
+ if (GO->isWeakForLinker())
+ return Align;
- if (GV->getAlignment() >= PrefAlign)
- return GV->getAlignment();
+ if (GO->getAlignment() >= PrefAlign)
+ return GO->getAlignment();
// We can only increase the alignment of the global if it has no alignment
// specified or if it is not assigned a section. If it is assigned a
// section, the global could be densely packed with other objects in the
// section, increasing the alignment could cause padding issues.
- if (!GV->hasSection() || GV->getAlignment() == 0)
- GV->setAlignment(PrefAlign);
- return GV->getAlignment();
+ if (!GO->hasSection() || GO->getAlignment() == 0)
+ GO->setAlignment(PrefAlign);
+ return GO->getAlignment();
}
return Align;
@@ -926,7 +937,7 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(V->getType()) : 64;
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, KnownZero, KnownOne, DL);
+ computeKnownBits(V, KnownZero, KnownOne, DL);
unsigned TrailZ = KnownZero.countTrailingOnes();
// Avoid trouble with ridiculously large TrailZ values, such as
@@ -979,10 +990,10 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
if (LdStHasDebugValue(DIVar, SI))
return true;
- Instruction *DbgVal = NULL;
+ Instruction *DbgVal = nullptr;
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
- Argument *ExtendedArg = NULL;
+ Argument *ExtendedArg = nullptr;
if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
@@ -991,14 +1002,7 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
DbgVal = Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, SI);
else
DbgVal = Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, SI);
-
- // Propagate any debug metadata from the store onto the dbg.value.
- DebugLoc SIDL = SI->getDebugLoc();
- if (!SIDL.isUnknown())
- DbgVal->setDebugLoc(SIDL);
- // Otherwise propagate debug metadata from dbg.declare.
- else
- DbgVal->setDebugLoc(DDI->getDebugLoc());
+ DbgVal->setDebugLoc(DDI->getDebugLoc());
return true;
}
@@ -1018,51 +1022,54 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
Instruction *DbgVal =
Builder.insertDbgValueIntrinsic(LI->getOperand(0), 0,
DIVar, LI);
-
- // Propagate any debug metadata from the store onto the dbg.value.
- DebugLoc LIDL = LI->getDebugLoc();
- if (!LIDL.isUnknown())
- DbgVal->setDebugLoc(LIDL);
- // Otherwise propagate debug metadata from dbg.declare.
- else
- DbgVal->setDebugLoc(DDI->getDebugLoc());
+ DbgVal->setDebugLoc(DDI->getDebugLoc());
return true;
}
+/// Determine whether this alloca is either a VLA or an array.
+static bool isArray(AllocaInst *AI) {
+ return AI->isArrayAllocation() ||
+ AI->getType()->getElementType()->isArrayTy();
+}
+
/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
/// of llvm.dbg.value intrinsics.
bool llvm::LowerDbgDeclare(Function &F) {
DIBuilder DIB(*F.getParent());
SmallVector<DbgDeclareInst *, 4> Dbgs;
- for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
- for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) {
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI))
+ for (auto &FI : F)
+ for (BasicBlock::iterator BI : FI)
+ if (auto DDI = dyn_cast<DbgDeclareInst>(BI))
Dbgs.push_back(DDI);
- }
+
if (Dbgs.empty())
return false;
- for (SmallVectorImpl<DbgDeclareInst *>::iterator I = Dbgs.begin(),
- E = Dbgs.end(); I != E; ++I) {
- DbgDeclareInst *DDI = *I;
+ for (auto &I : Dbgs) {
+ DbgDeclareInst *DDI = I;
AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
// If this is an alloca for a scalar variable, insert a dbg.value
// at each load and store to the alloca and erase the dbg.declare.
- if (AI && !AI->isArrayAllocation()) {
-
- // We only remove the dbg.declare intrinsic if all uses are
- // converted to dbg.value intrinsics.
- bool RemoveDDI = true;
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
- UI != E; ++UI)
- if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
+ // The dbg.values allow tracking a variable even if it is not
+ // stored on the stack, while the dbg.declare can only describe
+ // the stack slot (and at a lexical-scope granularity). Later
+ // passes will attempt to elide the stack slot.
+ if (AI && !isArray(AI)) {
+ for (User *U : AI->users())
+ if (StoreInst *SI = dyn_cast<StoreInst>(U))
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
- else if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
+ else if (LoadInst *LI = dyn_cast<LoadInst>(U))
ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
- else
- RemoveDDI = false;
- if (RemoveDDI)
- DDI->eraseFromParent();
+ else if (CallInst *CI = dyn_cast<CallInst>(U)) {
+ // This is a call by-value or some other instruction that
+ // takes a pointer to the variable. Insert a *value*
+ // intrinsic that describes the alloca.
+ auto DbgVal =
+ DIB.insertDbgValueIntrinsic(AI, 0,
+ DIVariable(DDI->getVariable()), CI);
+ DbgVal->setDebugLoc(DDI->getDebugLoc());
+ }
+ DDI->eraseFromParent();
}
}
return true;
@@ -1072,12 +1079,11 @@ bool llvm::LowerDbgDeclare(Function &F) {
/// alloca 'V', if any.
DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), V))
- for (Value::use_iterator UI = DebugNode->use_begin(),
- E = DebugNode->use_end(); UI != E; ++UI)
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ for (User *U : DebugNode->users())
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
return DDI;
- return 0;
+ return nullptr;
}
bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index 6d5f16c..ef42291 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -37,331 +37,72 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-simplify"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/DependenceAnalysis.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
using namespace llvm;
+#define DEBUG_TYPE "loop-simplify"
+
STATISTIC(NumInserted, "Number of pre-header or exit blocks inserted");
STATISTIC(NumNested , "Number of nested loops split out");
-namespace {
- struct LoopSimplify : public LoopPass {
- static char ID; // Pass identification, replacement for typeid
- LoopSimplify() : LoopPass(ID) {
- initializeLoopSimplifyPass(*PassRegistry::getPassRegistry());
- }
-
- // AA - If we have an alias analysis object to update, this is it, otherwise
- // this is null.
- AliasAnalysis *AA;
- LoopInfo *LI;
- DominatorTree *DT;
- ScalarEvolution *SE;
- Loop *L;
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- // We need loop information to identify the loops...
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
-
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
-
- AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<ScalarEvolution>();
- AU.addPreserved<DependenceAnalysis>();
- AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
- }
-
- /// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
- void verifyAnalysis() const;
-
- private:
- bool ProcessLoop(Loop *L, LPPassManager &LPM);
- BasicBlock *RewriteLoopExitBlock(Loop *L, BasicBlock *Exit);
- Loop *SeparateNestedLoop(Loop *L, LPPassManager &LPM,
- BasicBlock *Preheader);
- BasicBlock *InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader);
- };
-}
-
-static void PlaceSplitBlockCarefully(BasicBlock *NewBB,
- SmallVectorImpl<BasicBlock*> &SplitPreds,
- Loop *L);
-
-char LoopSimplify::ID = 0;
-INITIALIZE_PASS_BEGIN(LoopSimplify, "loop-simplify",
- "Canonicalize natural loops", true, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_END(LoopSimplify, "loop-simplify",
- "Canonicalize natural loops", true, false)
-
-// Publicly exposed interface to pass...
-char &llvm::LoopSimplifyID = LoopSimplify::ID;
-Pass *llvm::createLoopSimplifyPass() { return new LoopSimplify(); }
-
-/// runOnLoop - Run down all loops in the CFG (recursively, but we could do
-/// it in any convenient order) inserting preheaders...
-///
-bool LoopSimplify::runOnLoop(Loop *l, LPPassManager &LPM) {
- L = l;
- bool Changed = false;
- LI = &getAnalysis<LoopInfo>();
- AA = getAnalysisIfAvailable<AliasAnalysis>();
- DT = &getAnalysis<DominatorTree>();
- SE = getAnalysisIfAvailable<ScalarEvolution>();
-
- Changed |= ProcessLoop(L, LPM);
-
- return Changed;
-}
-
-/// ProcessLoop - Walk the loop structure in depth first order, ensuring that
-/// all loops have preheaders.
-///
-bool LoopSimplify::ProcessLoop(Loop *L, LPPassManager &LPM) {
- bool Changed = false;
-ReprocessLoop:
-
- // Check to see that no blocks (other than the header) in this loop have
- // predecessors that are not in the loop. This is not valid for natural
- // loops, but can occur if the blocks are unreachable. Since they are
- // unreachable we can just shamelessly delete those CFG edges!
- for (Loop::block_iterator BB = L->block_begin(), E = L->block_end();
- BB != E; ++BB) {
- if (*BB == L->getHeader()) continue;
-
- SmallPtrSet<BasicBlock*, 4> BadPreds;
- for (pred_iterator PI = pred_begin(*BB),
- PE = pred_end(*BB); PI != PE; ++PI) {
- BasicBlock *P = *PI;
- if (!L->contains(P))
- BadPreds.insert(P);
- }
-
- // Delete each unique out-of-loop (and thus dead) predecessor.
- for (SmallPtrSet<BasicBlock*, 4>::iterator I = BadPreds.begin(),
- E = BadPreds.end(); I != E; ++I) {
-
- DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
- << (*I)->getName() << "\n");
-
- // Inform each successor of each dead pred.
- for (succ_iterator SI = succ_begin(*I), SE = succ_end(*I); SI != SE; ++SI)
- (*SI)->removePredecessor(*I);
- // Zap the dead pred's terminator and replace it with unreachable.
- TerminatorInst *TI = (*I)->getTerminator();
- TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
- (*I)->getTerminator()->eraseFromParent();
- new UnreachableInst((*I)->getContext(), *I);
- Changed = true;
- }
- }
-
- // If there are exiting blocks with branches on undef, resolve the undef in
- // the direction which will exit the loop. This will help simplify loop
- // trip count computations.
- SmallVector<BasicBlock*, 8> ExitingBlocks;
- L->getExitingBlocks(ExitingBlocks);
- for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
- E = ExitingBlocks.end(); I != E; ++I)
- if (BranchInst *BI = dyn_cast<BranchInst>((*I)->getTerminator()))
- if (BI->isConditional()) {
- if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) {
-
- DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
- << (*I)->getName() << "\n");
-
- BI->setCondition(ConstantInt::get(Cond->getType(),
- !L->contains(BI->getSuccessor(0))));
-
- // This may make the loop analyzable, force SCEV recomputation.
- if (SE)
- SE->forgetLoop(L);
-
- Changed = true;
- }
- }
-
- // Does the loop already have a preheader? If so, don't insert one.
- BasicBlock *Preheader = L->getLoopPreheader();
- if (!Preheader) {
- Preheader = InsertPreheaderForLoop(L, this);
- if (Preheader) {
- ++NumInserted;
- Changed = true;
- }
- }
-
- // Next, check to make sure that all exit nodes of the loop only have
- // predecessors that are inside of the loop. This check guarantees that the
- // loop preheader/header will dominate the exit blocks. If the exit block has
- // predecessors from outside of the loop, split the edge now.
- SmallVector<BasicBlock*, 8> ExitBlocks;
- L->getExitBlocks(ExitBlocks);
-
- SmallSetVector<BasicBlock *, 8> ExitBlockSet(ExitBlocks.begin(),
- ExitBlocks.end());
- for (SmallSetVector<BasicBlock *, 8>::iterator I = ExitBlockSet.begin(),
- E = ExitBlockSet.end(); I != E; ++I) {
- BasicBlock *ExitBlock = *I;
- for (pred_iterator PI = pred_begin(ExitBlock), PE = pred_end(ExitBlock);
- PI != PE; ++PI)
- // Must be exactly this loop: no subloops, parent loops, or non-loop preds
- // allowed.
- if (!L->contains(*PI)) {
- if (RewriteLoopExitBlock(L, ExitBlock)) {
- ++NumInserted;
- Changed = true;
- }
- break;
- }
- }
-
- // If the header has more than two predecessors at this point (from the
- // preheader and from multiple backedges), we must adjust the loop.
- BasicBlock *LoopLatch = L->getLoopLatch();
- if (!LoopLatch) {
- // If this is really a nested loop, rip it out into a child loop. Don't do
- // this for loops with a giant number of backedges, just factor them into a
- // common backedge instead.
- if (L->getNumBackEdges() < 8) {
- if (SeparateNestedLoop(L, LPM, Preheader)) {
- ++NumNested;
- // This is a big restructuring change, reprocess the whole loop.
- Changed = true;
- // GCC doesn't tail recursion eliminate this.
- goto ReprocessLoop;
- }
- }
-
- // If we either couldn't, or didn't want to, identify nesting of the loops,
- // insert a new block that all backedges target, then make it jump to the
- // loop header.
- LoopLatch = InsertUniqueBackedgeBlock(L, Preheader);
- if (LoopLatch) {
- ++NumInserted;
- Changed = true;
- }
+// If the block isn't already, move the new block to right after some 'outside
+// block' block. This prevents the preheader from being placed inside the loop
+// body, e.g. when the loop hasn't been rotated.
+static void placeSplitBlockCarefully(BasicBlock *NewBB,
+ SmallVectorImpl<BasicBlock *> &SplitPreds,
+ Loop *L) {
+ // Check to see if NewBB is already well placed.
+ Function::iterator BBI = NewBB; --BBI;
+ for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
+ if (&*BBI == SplitPreds[i])
+ return;
}
- // Scan over the PHI nodes in the loop header. Since they now have only two
- // incoming values (the loop is canonicalized), we may have simplified the PHI
- // down to 'X = phi [X, Y]', which should be replaced with 'Y'.
- PHINode *PN;
- for (BasicBlock::iterator I = L->getHeader()->begin();
- (PN = dyn_cast<PHINode>(I++)); )
- if (Value *V = SimplifyInstruction(PN, 0, 0, DT)) {
- if (AA) AA->deleteValue(PN);
- if (SE) SE->forgetValue(PN);
- PN->replaceAllUsesWith(V);
- PN->eraseFromParent();
- }
-
- // If this loop has multiple exits and the exits all go to the same
- // block, attempt to merge the exits. This helps several passes, such
- // as LoopRotation, which do not support loops with multiple exits.
- // SimplifyCFG also does this (and this code uses the same utility
- // function), however this code is loop-aware, where SimplifyCFG is
- // not. That gives it the advantage of being able to hoist
- // loop-invariant instructions out of the way to open up more
- // opportunities, and the disadvantage of having the responsibility
- // to preserve dominator information.
- bool UniqueExit = true;
- if (!ExitBlocks.empty())
- for (unsigned i = 1, e = ExitBlocks.size(); i != e; ++i)
- if (ExitBlocks[i] != ExitBlocks[0]) {
- UniqueExit = false;
- break;
- }
- if (UniqueExit) {
- for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
- BasicBlock *ExitingBlock = ExitingBlocks[i];
- if (!ExitingBlock->getSinglePredecessor()) continue;
- BranchInst *BI = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
- if (!BI || !BI->isConditional()) continue;
- CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
- if (!CI || CI->getParent() != ExitingBlock) continue;
-
- // Attempt to hoist out all instructions except for the
- // comparison and the branch.
- bool AllInvariant = true;
- for (BasicBlock::iterator I = ExitingBlock->begin(); &*I != BI; ) {
- Instruction *Inst = I++;
- // Skip debug info intrinsics.
- if (isa<DbgInfoIntrinsic>(Inst))
- continue;
- if (Inst == CI)
- continue;
- if (!L->makeLoopInvariant(Inst, Changed,
- Preheader ? Preheader->getTerminator() : 0)) {
- AllInvariant = false;
- break;
- }
- }
- if (!AllInvariant) continue;
-
- // The block has now been cleared of all instructions except for
- // a comparison and a conditional branch. SimplifyCFG may be able
- // to fold it now.
- if (!FoldBranchToCommonDest(BI)) continue;
-
- // Success. The block is now dead, so remove it from the loop,
- // update the dominator tree and delete it.
- DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block "
- << ExitingBlock->getName() << "\n");
-
- // If any reachable control flow within this loop has changed, notify
- // ScalarEvolution. Currently assume the parent loop doesn't change
- // (spliting edges doesn't count). If blocks, CFG edges, or other values
- // in the parent loop change, then we need call to forgetLoop() for the
- // parent instead.
- if (SE)
- SE->forgetLoop(L);
-
- assert(pred_begin(ExitingBlock) == pred_end(ExitingBlock));
- Changed = true;
- LI->removeBlock(ExitingBlock);
-
- DomTreeNode *Node = DT->getNode(ExitingBlock);
- const std::vector<DomTreeNodeBase<BasicBlock> *> &Children =
- Node->getChildren();
- while (!Children.empty()) {
- DomTreeNode *Child = Children.front();
- DT->changeImmediateDominator(Child, Node->getIDom());
- }
- DT->eraseNode(ExitingBlock);
+ // If it isn't already after an outside block, move it after one. This is
+ // always good as it makes the uncond branch from the outside block into a
+ // fall-through.
- BI->getSuccessor(0)->removePredecessor(ExitingBlock);
- BI->getSuccessor(1)->removePredecessor(ExitingBlock);
- ExitingBlock->eraseFromParent();
+ // Figure out *which* outside block to put this after. Prefer an outside
+ // block that neighbors a BB actually in the loop.
+ BasicBlock *FoundBB = nullptr;
+ for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
+ Function::iterator BBI = SplitPreds[i];
+ if (++BBI != NewBB->getParent()->end() &&
+ L->contains(BBI)) {
+ FoundBB = SplitPreds[i];
+ break;
}
}
- return Changed;
+ // If our heuristic for a *good* bb to place this after doesn't find
+ // anything, just pick something. It's likely better than leaving it within
+ // the loop.
+ if (!FoundBB)
+ FoundBB = SplitPreds[0];
+ NewBB->moveAfter(FoundBB);
}
/// InsertPreheaderForLoop - Once we discover that a loop doesn't have a
@@ -380,7 +121,7 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, Pass *PP) {
// If the loop is branched to from an indirect branch, we won't
// be able to fully transform the loop, because it prohibits
// edge splitting.
- if (isa<IndirectBrInst>(P->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
// Keep track of it.
OutsideBlocks.push_back(P);
@@ -406,38 +147,39 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, Pass *PP) {
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
- PlaceSplitBlockCarefully(PreheaderBB, OutsideBlocks, L);
+ placeSplitBlockCarefully(PreheaderBB, OutsideBlocks, L);
return PreheaderBB;
}
-/// RewriteLoopExitBlock - Ensure that the loop preheader dominates all exit
-/// blocks. This method is used to split exit blocks that have predecessors
-/// outside of the loop.
-BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
+/// \brief Ensure that the loop preheader dominates all exit blocks.
+///
+/// This method is used to split exit blocks that have predecessors outside of
+/// the loop.
+static BasicBlock *rewriteLoopExitBlock(Loop *L, BasicBlock *Exit, Pass *PP) {
SmallVector<BasicBlock*, 8> LoopBlocks;
for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I) {
BasicBlock *P = *I;
if (L->contains(P)) {
// Don't do this if the loop is exited via an indirect branch.
- if (isa<IndirectBrInst>(P->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
LoopBlocks.push_back(P);
}
}
assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
- BasicBlock *NewExitBB = 0;
+ BasicBlock *NewExitBB = nullptr;
if (Exit->isLandingPad()) {
SmallVector<BasicBlock*, 2> NewBBs;
SplitLandingPadPredecessors(Exit, ArrayRef<BasicBlock*>(&LoopBlocks[0],
LoopBlocks.size()),
".loopexit", ".nonloopexit",
- this, NewBBs);
+ PP, NewBBs);
NewExitBB = NewBBs[0];
} else {
- NewExitBB = SplitBlockPredecessors(Exit, LoopBlocks, ".loopexit", this);
+ NewExitBB = SplitBlockPredecessors(Exit, LoopBlocks, ".loopexit", PP);
}
DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
@@ -445,33 +187,33 @@ BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
return NewExitBB;
}
-/// AddBlockAndPredsToSet - Add the specified block, and all of its
-/// predecessors, to the specified set, if it's not already in there. Stop
-/// predecessor traversal when we reach StopBlock.
-static void AddBlockAndPredsToSet(BasicBlock *InputBB, BasicBlock *StopBlock,
+/// Add the specified block, and all of its predecessors, to the specified set,
+/// if it's not already in there. Stop predecessor traversal when we reach
+/// StopBlock.
+static void addBlockAndPredsToSet(BasicBlock *InputBB, BasicBlock *StopBlock,
std::set<BasicBlock*> &Blocks) {
- std::vector<BasicBlock *> WorkList;
- WorkList.push_back(InputBB);
+ SmallVector<BasicBlock *, 8> Worklist;
+ Worklist.push_back(InputBB);
do {
- BasicBlock *BB = WorkList.back(); WorkList.pop_back();
+ BasicBlock *BB = Worklist.pop_back_val();
if (Blocks.insert(BB).second && BB != StopBlock)
// If BB is not already processed and it is not a stop block then
// insert its predecessor in the work list
for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
BasicBlock *WBB = *I;
- WorkList.push_back(WBB);
+ Worklist.push_back(WBB);
}
- } while(!WorkList.empty());
+ } while (!Worklist.empty());
}
-/// FindPHIToPartitionLoops - The first part of loop-nestification is to find a
-/// PHI node that tells us how to partition the loops.
-static PHINode *FindPHIToPartitionLoops(Loop *L, DominatorTree *DT,
- AliasAnalysis *AA, LoopInfo *LI) {
+/// \brief The first part of loop-nestification is to find a PHI node that tells
+/// us how to partition the loops.
+static PHINode *findPHIToPartitionLoops(Loop *L, AliasAnalysis *AA,
+ DominatorTree *DT) {
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I);
++I;
- if (Value *V = SimplifyInstruction(PN, 0, 0, DT)) {
+ if (Value *V = SimplifyInstruction(PN, nullptr, nullptr, DT)) {
// This is a degenerate PHI already, don't modify it!
PN->replaceAllUsesWith(V);
if (AA) AA->deleteValue(PN);
@@ -486,49 +228,13 @@ static PHINode *FindPHIToPartitionLoops(Loop *L, DominatorTree *DT,
// We found something tasty to remove.
return PN;
}
- return 0;
+ return nullptr;
}
-// PlaceSplitBlockCarefully - If the block isn't already, move the new block to
-// right after some 'outside block' block. This prevents the preheader from
-// being placed inside the loop body, e.g. when the loop hasn't been rotated.
-void PlaceSplitBlockCarefully(BasicBlock *NewBB,
- SmallVectorImpl<BasicBlock*> &SplitPreds,
- Loop *L) {
- // Check to see if NewBB is already well placed.
- Function::iterator BBI = NewBB; --BBI;
- for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
- if (&*BBI == SplitPreds[i])
- return;
- }
-
- // If it isn't already after an outside block, move it after one. This is
- // always good as it makes the uncond branch from the outside block into a
- // fall-through.
-
- // Figure out *which* outside block to put this after. Prefer an outside
- // block that neighbors a BB actually in the loop.
- BasicBlock *FoundBB = 0;
- for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
- Function::iterator BBI = SplitPreds[i];
- if (++BBI != NewBB->getParent()->end() &&
- L->contains(BBI)) {
- FoundBB = SplitPreds[i];
- break;
- }
- }
-
- // If our heuristic for a *good* bb to place this after doesn't find
- // anything, just pick something. It's likely better than leaving it within
- // the loop.
- if (!FoundBB)
- FoundBB = SplitPreds[0];
- NewBB->moveAfter(FoundBB);
-}
-
-
-/// SeparateNestedLoop - If this loop has multiple backedges, try to pull one of
-/// them out into a nested loop. This is important for code that looks like
+/// \brief If this loop has multiple backedges, try to pull one of them out into
+/// a nested loop.
+///
+/// This is important for code that looks like
/// this:
///
/// Loop:
@@ -544,18 +250,19 @@ void PlaceSplitBlockCarefully(BasicBlock *NewBB,
/// If we are able to separate out a loop, return the new outer loop that was
/// created.
///
-Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
- BasicBlock *Preheader) {
+static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
+ AliasAnalysis *AA, DominatorTree *DT,
+ LoopInfo *LI, ScalarEvolution *SE, Pass *PP) {
// Don't try to separate loops without a preheader.
if (!Preheader)
- return 0;
+ return nullptr;
// The header is not a landing pad; preheader insertion should ensure this.
assert(!L->getHeader()->isLandingPad() &&
"Can't insert backedge to landing pad");
- PHINode *PN = FindPHIToPartitionLoops(L, DT, AA, LI);
- if (PN == 0) return 0; // No known way to partition.
+ PHINode *PN = findPHIToPartitionLoops(L, AA, DT);
+ if (!PN) return nullptr; // No known way to partition.
// Pull out all predecessors that have varying values in the loop. This
// handles the case when a PHI node has multiple instances of itself as
@@ -566,7 +273,7 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
!L->contains(PN->getIncomingBlock(i))) {
// We can't split indirectbr edges.
if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator()))
- return 0;
+ return nullptr;
OuterLoopPreds.push_back(PN->getIncomingBlock(i));
}
}
@@ -580,11 +287,11 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
BasicBlock *Header = L->getHeader();
BasicBlock *NewBB =
- SplitBlockPredecessors(Header, OuterLoopPreds, ".outer", this);
+ SplitBlockPredecessors(Header, OuterLoopPreds, ".outer", PP);
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
- PlaceSplitBlockCarefully(NewBB, OuterLoopPreds, L);
+ placeSplitBlockCarefully(NewBB, OuterLoopPreds, L);
// Create the new outer loop.
Loop *NewOuter = new Loop();
@@ -598,9 +305,6 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
// L is now a subloop of our outer loop.
NewOuter->addChildLoop(L);
- // Add the new loop to the pass manager queue.
- LPM.insertLoopIntoQueue(NewOuter);
-
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I)
NewOuter->addBlockEntry(*I);
@@ -615,7 +319,7 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
for (pred_iterator PI=pred_begin(Header), E = pred_end(Header); PI!=E; ++PI) {
BasicBlock *P = *PI;
if (DT->dominates(Header, P))
- AddBlockAndPredsToSet(P, Header, BlocksInL);
+ addBlockAndPredsToSet(P, Header, BlocksInL);
}
// Scan all of the loop children of L, moving them to OuterLoop if they are
@@ -643,15 +347,15 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
return NewOuter;
}
-
-
-/// InsertUniqueBackedgeBlock - This method is called when the specified loop
-/// has more than one backedge in it. If this occurs, revector all of these
-/// backedges to target a new basic block and have that block branch to the loop
-/// header. This ensures that loops have exactly one backedge.
+/// \brief This method is called when the specified loop has more than one
+/// backedge in it.
///
-BasicBlock *
-LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
+/// If this occurs, revector all of these backedges to target a new basic block
+/// and have that block branch to the loop header. This ensures that loops
+/// have exactly one backedge.
+static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
+ AliasAnalysis *AA,
+ DominatorTree *DT, LoopInfo *LI) {
assert(L->getNumBackEdges() > 1 && "Must have > 1 backedge!");
// Get information about the loop
@@ -660,7 +364,7 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// Unique backedge insertion currently depends on having a preheader.
if (!Preheader)
- return 0;
+ return nullptr;
// The header is not a landing pad; preheader insertion should ensure this.
assert(!Header->isLandingPad() && "Can't insert backedge to landing pad");
@@ -672,7 +376,7 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// Indirectbr edges cannot be split, so we must fail if we find one.
if (isa<IndirectBrInst>(P->getTerminator()))
- return 0;
+ return nullptr;
if (P != Preheader) BackedgeBlocks.push_back(P);
}
@@ -701,7 +405,7 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// preheader over to the new PHI node.
unsigned PreheaderIdx = ~0U;
bool HasUniqueIncomingValue = true;
- Value *UniqueValue = 0;
+ Value *UniqueValue = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *IBB = PN->getIncomingBlock(i);
Value *IV = PN->getIncomingValue(i);
@@ -710,7 +414,7 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
} else {
NewPN->addIncoming(IV, IBB);
if (HasUniqueIncomingValue) {
- if (UniqueValue == 0)
+ if (!UniqueValue)
UniqueValue = IV;
else if (UniqueValue != IV)
HasUniqueIncomingValue = false;
@@ -762,7 +466,350 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
return BEBlock;
}
-void LoopSimplify::verifyAnalysis() const {
+/// \brief Simplify one loop and queue further loops for simplification.
+///
+/// FIXME: Currently this accepts both lots of analyses that it uses and a raw
+/// Pass pointer. The Pass pointer is used by numerous utilities to update
+/// specific analyses. Rather than a pass it would be much cleaner and more
+/// explicit if they accepted the analysis directly and then updated it.
+static bool simplifyOneLoop(Loop *L, SmallVectorImpl<Loop *> &Worklist,
+ AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI,
+ ScalarEvolution *SE, Pass *PP,
+ const DataLayout *DL) {
+ bool Changed = false;
+ReprocessLoop:
+
+ // Check to see that no blocks (other than the header) in this loop have
+ // predecessors that are not in the loop. This is not valid for natural
+ // loops, but can occur if the blocks are unreachable. Since they are
+ // unreachable we can just shamelessly delete those CFG edges!
+ for (Loop::block_iterator BB = L->block_begin(), E = L->block_end();
+ BB != E; ++BB) {
+ if (*BB == L->getHeader()) continue;
+
+ SmallPtrSet<BasicBlock*, 4> BadPreds;
+ for (pred_iterator PI = pred_begin(*BB),
+ PE = pred_end(*BB); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (!L->contains(P))
+ BadPreds.insert(P);
+ }
+
+ // Delete each unique out-of-loop (and thus dead) predecessor.
+ for (SmallPtrSet<BasicBlock*, 4>::iterator I = BadPreds.begin(),
+ E = BadPreds.end(); I != E; ++I) {
+
+ DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
+ << (*I)->getName() << "\n");
+
+ // Inform each successor of each dead pred.
+ for (succ_iterator SI = succ_begin(*I), SE = succ_end(*I); SI != SE; ++SI)
+ (*SI)->removePredecessor(*I);
+ // Zap the dead pred's terminator and replace it with unreachable.
+ TerminatorInst *TI = (*I)->getTerminator();
+ TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
+ (*I)->getTerminator()->eraseFromParent();
+ new UnreachableInst((*I)->getContext(), *I);
+ Changed = true;
+ }
+ }
+
+ // If there are exiting blocks with branches on undef, resolve the undef in
+ // the direction which will exit the loop. This will help simplify loop
+ // trip count computations.
+ SmallVector<BasicBlock*, 8> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+ for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
+ E = ExitingBlocks.end(); I != E; ++I)
+ if (BranchInst *BI = dyn_cast<BranchInst>((*I)->getTerminator()))
+ if (BI->isConditional()) {
+ if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) {
+
+ DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
+ << (*I)->getName() << "\n");
+
+ BI->setCondition(ConstantInt::get(Cond->getType(),
+ !L->contains(BI->getSuccessor(0))));
+
+ // This may make the loop analyzable, force SCEV recomputation.
+ if (SE)
+ SE->forgetLoop(L);
+
+ Changed = true;
+ }
+ }
+
+ // Does the loop already have a preheader? If so, don't insert one.
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader) {
+ Preheader = InsertPreheaderForLoop(L, PP);
+ if (Preheader) {
+ ++NumInserted;
+ Changed = true;
+ }
+ }
+
+ // Next, check to make sure that all exit nodes of the loop only have
+ // predecessors that are inside of the loop. This check guarantees that the
+ // loop preheader/header will dominate the exit blocks. If the exit block has
+ // predecessors from outside of the loop, split the edge now.
+ SmallVector<BasicBlock*, 8> ExitBlocks;
+ L->getExitBlocks(ExitBlocks);
+
+ SmallSetVector<BasicBlock *, 8> ExitBlockSet(ExitBlocks.begin(),
+ ExitBlocks.end());
+ for (SmallSetVector<BasicBlock *, 8>::iterator I = ExitBlockSet.begin(),
+ E = ExitBlockSet.end(); I != E; ++I) {
+ BasicBlock *ExitBlock = *I;
+ for (pred_iterator PI = pred_begin(ExitBlock), PE = pred_end(ExitBlock);
+ PI != PE; ++PI)
+ // Must be exactly this loop: no subloops, parent loops, or non-loop preds
+ // allowed.
+ if (!L->contains(*PI)) {
+ if (rewriteLoopExitBlock(L, ExitBlock, PP)) {
+ ++NumInserted;
+ Changed = true;
+ }
+ break;
+ }
+ }
+
+ // If the header has more than two predecessors at this point (from the
+ // preheader and from multiple backedges), we must adjust the loop.
+ BasicBlock *LoopLatch = L->getLoopLatch();
+ if (!LoopLatch) {
+ // If this is really a nested loop, rip it out into a child loop. Don't do
+ // this for loops with a giant number of backedges, just factor them into a
+ // common backedge instead.
+ if (L->getNumBackEdges() < 8) {
+ if (Loop *OuterL = separateNestedLoop(L, Preheader, AA, DT, LI, SE, PP)) {
+ ++NumNested;
+ // Enqueue the outer loop as it should be processed next in our
+ // depth-first nest walk.
+ Worklist.push_back(OuterL);
+
+ // This is a big restructuring change, reprocess the whole loop.
+ Changed = true;
+ // GCC doesn't tail recursion eliminate this.
+ // FIXME: It isn't clear we can't rely on LLVM to TRE this.
+ goto ReprocessLoop;
+ }
+ }
+
+ // If we either couldn't, or didn't want to, identify nesting of the loops,
+ // insert a new block that all backedges target, then make it jump to the
+ // loop header.
+ LoopLatch = insertUniqueBackedgeBlock(L, Preheader, AA, DT, LI);
+ if (LoopLatch) {
+ ++NumInserted;
+ Changed = true;
+ }
+ }
+
+ // Scan over the PHI nodes in the loop header. Since they now have only two
+ // incoming values (the loop is canonicalized), we may have simplified the PHI
+ // down to 'X = phi [X, Y]', which should be replaced with 'Y'.
+ PHINode *PN;
+ for (BasicBlock::iterator I = L->getHeader()->begin();
+ (PN = dyn_cast<PHINode>(I++)); )
+ if (Value *V = SimplifyInstruction(PN, nullptr, nullptr, DT)) {
+ if (AA) AA->deleteValue(PN);
+ if (SE) SE->forgetValue(PN);
+ PN->replaceAllUsesWith(V);
+ PN->eraseFromParent();
+ }
+
+ // If this loop has multiple exits and the exits all go to the same
+ // block, attempt to merge the exits. This helps several passes, such
+ // as LoopRotation, which do not support loops with multiple exits.
+ // SimplifyCFG also does this (and this code uses the same utility
+ // function), however this code is loop-aware, where SimplifyCFG is
+ // not. That gives it the advantage of being able to hoist
+ // loop-invariant instructions out of the way to open up more
+ // opportunities, and the disadvantage of having the responsibility
+ // to preserve dominator information.
+ bool UniqueExit = true;
+ if (!ExitBlocks.empty())
+ for (unsigned i = 1, e = ExitBlocks.size(); i != e; ++i)
+ if (ExitBlocks[i] != ExitBlocks[0]) {
+ UniqueExit = false;
+ break;
+ }
+ if (UniqueExit) {
+ for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
+ BasicBlock *ExitingBlock = ExitingBlocks[i];
+ if (!ExitingBlock->getSinglePredecessor()) continue;
+ BranchInst *BI = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
+ if (!BI || !BI->isConditional()) continue;
+ CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
+ if (!CI || CI->getParent() != ExitingBlock) continue;
+
+ // Attempt to hoist out all instructions except for the
+ // comparison and the branch.
+ bool AllInvariant = true;
+ bool AnyInvariant = false;
+ for (BasicBlock::iterator I = ExitingBlock->begin(); &*I != BI; ) {
+ Instruction *Inst = I++;
+ // Skip debug info intrinsics.
+ if (isa<DbgInfoIntrinsic>(Inst))
+ continue;
+ if (Inst == CI)
+ continue;
+ if (!L->makeLoopInvariant(Inst, AnyInvariant,
+ Preheader ? Preheader->getTerminator()
+ : nullptr)) {
+ AllInvariant = false;
+ break;
+ }
+ }
+ if (AnyInvariant) {
+ Changed = true;
+ // The loop disposition of all SCEV expressions that depend on any
+ // hoisted values have also changed.
+ if (SE)
+ SE->forgetLoopDispositions(L);
+ }
+ if (!AllInvariant) continue;
+
+ // The block has now been cleared of all instructions except for
+ // a comparison and a conditional branch. SimplifyCFG may be able
+ // to fold it now.
+ if (!FoldBranchToCommonDest(BI, DL)) continue;
+
+ // Success. The block is now dead, so remove it from the loop,
+ // update the dominator tree and delete it.
+ DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block "
+ << ExitingBlock->getName() << "\n");
+
+ // Notify ScalarEvolution before deleting this block. Currently assume the
+ // parent loop doesn't change (spliting edges doesn't count). If blocks,
+ // CFG edges, or other values in the parent loop change, then we need call
+ // to forgetLoop() for the parent instead.
+ if (SE)
+ SE->forgetLoop(L);
+
+ assert(pred_begin(ExitingBlock) == pred_end(ExitingBlock));
+ Changed = true;
+ LI->removeBlock(ExitingBlock);
+
+ DomTreeNode *Node = DT->getNode(ExitingBlock);
+ const std::vector<DomTreeNodeBase<BasicBlock> *> &Children =
+ Node->getChildren();
+ while (!Children.empty()) {
+ DomTreeNode *Child = Children.front();
+ DT->changeImmediateDominator(Child, Node->getIDom());
+ }
+ DT->eraseNode(ExitingBlock);
+
+ BI->getSuccessor(0)->removePredecessor(ExitingBlock);
+ BI->getSuccessor(1)->removePredecessor(ExitingBlock);
+ ExitingBlock->eraseFromParent();
+ }
+ }
+
+ return Changed;
+}
+
+bool llvm::simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
+ AliasAnalysis *AA, ScalarEvolution *SE,
+ const DataLayout *DL) {
+ bool Changed = false;
+
+ // Worklist maintains our depth-first queue of loops in this nest to process.
+ SmallVector<Loop *, 4> Worklist;
+ Worklist.push_back(L);
+
+ // Walk the worklist from front to back, pushing newly found sub loops onto
+ // the back. This will let us process loops from back to front in depth-first
+ // order. We can use this simple process because loops form a tree.
+ for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
+ Loop *L2 = Worklist[Idx];
+ for (Loop::iterator I = L2->begin(), E = L2->end(); I != E; ++I)
+ Worklist.push_back(*I);
+ }
+
+ while (!Worklist.empty())
+ Changed |= simplifyOneLoop(Worklist.pop_back_val(), Worklist, AA, DT, LI,
+ SE, PP, DL);
+
+ return Changed;
+}
+
+namespace {
+ struct LoopSimplify : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ LoopSimplify() : FunctionPass(ID) {
+ initializeLoopSimplifyPass(*PassRegistry::getPassRegistry());
+ }
+
+ // AA - If we have an alias analysis object to update, this is it, otherwise
+ // this is null.
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ LoopInfo *LI;
+ ScalarEvolution *SE;
+ const DataLayout *DL;
+
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ // We need loop information to identify the loops...
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
+
+ AU.addPreserved<AliasAnalysis>();
+ AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<DependenceAnalysis>();
+ AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
+ }
+
+ /// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
+ void verifyAnalysis() const override;
+ };
+}
+
+char LoopSimplify::ID = 0;
+INITIALIZE_PASS_BEGIN(LoopSimplify, "loop-simplify",
+ "Canonicalize natural loops", true, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_END(LoopSimplify, "loop-simplify",
+ "Canonicalize natural loops", true, false)
+
+// Publicly exposed interface to pass...
+char &llvm::LoopSimplifyID = LoopSimplify::ID;
+Pass *llvm::createLoopSimplifyPass() { return new LoopSimplify(); }
+
+/// runOnFunction - Run down all loops in the CFG (recursively, but we could do
+/// it in any convenient order) inserting preheaders...
+///
+bool LoopSimplify::runOnFunction(Function &F) {
+ bool Changed = false;
+ AA = getAnalysisIfAvailable<AliasAnalysis>();
+ LI = &getAnalysis<LoopInfo>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ SE = getAnalysisIfAvailable<ScalarEvolution>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+
+ // Simplify each loop nest in the function.
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
+ Changed |= simplifyLoop(*I, DT, LI, this, AA, SE, DL);
+
+ return Changed;
+}
+
+// FIXME: Restore this code when we re-enable verification in verifyAnalysis
+// below.
+#if 0
+static void verifyLoop(Loop *L) {
+ // Verify subloops.
+ for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
+ verifyLoop(*I);
+
// It used to be possible to just assert L->isLoopSimplifyForm(), however
// with the introduction of indirectbr, there are now cases where it's
// not possible to transform a loop as necessary. We can at least check
@@ -799,3 +846,15 @@ void LoopSimplify::verifyAnalysis() const {
(void)HasIndBrExiting;
}
}
+#endif
+
+void LoopSimplify::verifyAnalysis() const {
+ // FIXME: This routine is being called mid-way through the loop pass manager
+ // as loop passes destroy this analysis. That's actually fine, but we have no
+ // way of expressing that here. Once all of the passes that destroy this are
+ // hoisted out of the loop pass manager we can add back verification here.
+#if 0
+ for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
+ verifyLoop(*I);
+#endif
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index 162807d..ab1c25a 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -16,22 +16,29 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-unroll"
#include "llvm/Transforms/Utils/UnrollLoop.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
using namespace llvm;
+#define DEBUG_TYPE "loop-unroll"
+
// TODO: Should these be here or in LoopUnroll?
STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
@@ -58,18 +65,23 @@ static inline void RemapInstruction(Instruction *I,
/// FoldBlockIntoPredecessor - Folds a basic block into its predecessor if it
/// only has one predecessor, and that predecessor only has one successor.
-/// The LoopInfo Analysis that is passed will be kept consistent.
-/// Returns the new combined block.
-static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
- LPPassManager *LPM) {
+/// The LoopInfo Analysis that is passed will be kept consistent. If folding is
+/// successful references to the containing loop must be removed from
+/// ScalarEvolution by calling ScalarEvolution::forgetLoop because SE may have
+/// references to the eliminated BB. The argument ForgottenLoops contains a set
+/// of loops that have already been forgotten to prevent redundant, expensive
+/// calls to ScalarEvolution::forgetLoop. Returns the new combined block.
+static BasicBlock *
+FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI, LPPassManager *LPM,
+ SmallPtrSetImpl<Loop *> &ForgottenLoops) {
// Merge basic blocks into their predecessor if there is only one distinct
// pred, and if there is only one distinct successor of the predecessor, and
// if there are no PHI nodes.
BasicBlock *OnlyPred = BB->getSinglePredecessor();
- if (!OnlyPred) return 0;
+ if (!OnlyPred) return nullptr;
if (OnlyPred->getTerminator()->getNumSuccessors() != 1)
- return 0;
+ return nullptr;
DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
@@ -98,8 +110,10 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
// ScalarEvolution holds references to loop exit blocks.
if (LPM) {
if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
- if (Loop *L = LI->getLoopFor(BB))
- SE->forgetLoop(L);
+ if (Loop *L = LI->getLoopFor(BB)) {
+ if (ForgottenLoops.insert(L))
+ SE->forgetLoop(L);
+ }
}
}
LI->removeBlock(BB);
@@ -137,10 +151,10 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
/// removed from the LoopPassManager as well. LPM can also be NULL.
///
/// This utility preserves LoopInfo. If DominatorTree or ScalarEvolution are
-/// available it must also preserve those analyses.
+/// available from the Pass it must also preserve those analyses.
bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
bool AllowRuntime, unsigned TripMultiple,
- LoopInfo *LI, LPPassManager *LPM) {
+ LoopInfo *LI, Pass *PP, LPPassManager *LPM) {
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
@@ -208,8 +222,8 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
// Notify ScalarEvolution that the loop will be substantially changed,
// if not outright eliminated.
- if (LPM) {
- ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
+ if (PP) {
+ ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
if (SE)
SE->forgetLoop(L);
}
@@ -225,18 +239,35 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
(unsigned)GreatestCommonDivisor64(Count, TripMultiple);
}
+ // Report the unrolling decision.
+ DebugLoc LoopLoc = L->getStartLoc();
+ Function *F = Header->getParent();
+ LLVMContext &Ctx = F->getContext();
+
if (CompletelyUnroll) {
DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
<< " with trip count " << TripCount << "!\n");
+ emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
+ Twine("completely unrolled loop with ") +
+ Twine(TripCount) + " iterations");
} else {
+ auto EmitDiag = [&](const Twine &T) {
+ emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "unrolled loop by a factor of " + Twine(Count) +
+ T);
+ };
+
DEBUG(dbgs() << "UNROLLING loop %" << Header->getName()
<< " by " << Count);
if (TripMultiple == 0 || BreakoutTrip != TripMultiple) {
DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
+ EmitDiag(" with a breakout at trip " + Twine(BreakoutTrip));
} else if (TripMultiple != 1) {
DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
+ EmitDiag(" with " + Twine(TripMultiple) + " trips per branch");
} else if (RuntimeTripCount) {
DEBUG(dbgs() << " with run-time trip count");
+ EmitDiag(" with run-time trip count");
}
DEBUG(dbgs() << "!\n");
}
@@ -400,23 +431,29 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
}
// Merge adjacent basic blocks, if possible.
+ SmallPtrSet<Loop *, 4> ForgottenLoops;
for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
if (Term->isUnconditional()) {
BasicBlock *Dest = Term->getSuccessor(0);
- if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI, LPM))
+ if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI, LPM,
+ ForgottenLoops))
std::replace(Latches.begin(), Latches.end(), Dest, Fold);
}
}
- if (LPM) {
+ DominatorTree *DT = nullptr;
+ if (PP) {
// FIXME: Reconstruct dom info, because it is not preserved properly.
// Incrementally updating domtree after loop unrolling would be easy.
- if (DominatorTree *DT = LPM->getAnalysisIfAvailable<DominatorTree>())
- DT->runOnFunction(*L->getHeader()->getParent());
+ if (DominatorTreeWrapperPass *DTWP =
+ PP->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
+ DT = &DTWP->getDomTree();
+ DT->recalculate(*L->getHeader()->getParent());
+ }
// Simplify any new induction variables in the partially unrolled loop.
- ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
+ ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
if (SE && !CompletelyUnroll) {
SmallVector<WeakVH, 16> DeadInsts;
simplifyLoopIVs(L, SE, LPM, DeadInsts);
@@ -449,9 +486,36 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
NumCompletelyUnrolled += CompletelyUnroll;
++NumUnrolled;
+
+ Loop *OuterL = L->getParentLoop();
// Remove the loop from the LoopPassManager if it's completely removed.
- if (CompletelyUnroll && LPM != NULL)
+ if (CompletelyUnroll && LPM != nullptr)
LPM->deleteLoopFromQueue(L);
+ // If we have a pass and a DominatorTree we should re-simplify impacted loops
+ // to ensure subsequent analyses can rely on this form. We want to simplify
+ // at least one layer outside of the loop that was unrolled so that any
+ // changes to the parent loop exposed by the unrolling are considered.
+ if (PP && DT) {
+ if (!OuterL && !CompletelyUnroll)
+ OuterL = L;
+ if (OuterL) {
+ DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
+ simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL);
+
+ // LCSSA must be performed on the outermost affected loop. The unrolled
+ // loop's last loop latch is guaranteed to be in the outermost loop after
+ // deleteLoopFromQueue updates LoopInfo.
+ Loop *LatchLoop = LI->getLoopFor(Latches.back());
+ if (!OuterL->contains(LatchLoop))
+ while (OuterL->getParentLoop() != LatchLoop)
+ OuterL = OuterL->getParentLoop();
+
+ formLCSSARecursively(*OuterL, *DT, SE);
+ }
+ }
+
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index d801d5f..a96c46a 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -21,7 +21,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loop-unroll"
#include "llvm/Transforms/Utils/UnrollLoop.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopIterator.h"
@@ -37,6 +36,8 @@
using namespace llvm;
+#define DEBUG_TYPE "loop-unroll"
+
STATISTIC(NumRuntimeUnrolled,
"Number of loops unrolled with run-time trip counts");
@@ -58,7 +59,7 @@ static void ConnectProlog(Loop *L, Value *TripCount, unsigned Count,
BasicBlock *OrigPH, BasicBlock *NewPH,
ValueToValueMapTy &LVMap, Pass *P) {
BasicBlock *Latch = L->getLoopLatch();
- assert(Latch != 0 && "Loop must have a latch");
+ assert(Latch && "Loop must have a latch");
// Create a PHI node for each outgoing value from the original loop
// (which means it is an outgoing value from the prolog code too).
@@ -110,7 +111,7 @@ static void ConnectProlog(Loop *L, Value *TripCount, unsigned Count,
new ICmpInst(InsertPt, ICmpInst::ICMP_ULT, TripCount,
ConstantInt::get(TripCount->getType(), Count));
BasicBlock *Exit = L->getUniqueExitBlock();
- assert(Exit != 0 && "Loop must have a single exit block only");
+ assert(Exit && "Loop must have a single exit block only");
// Split the exit to maintain loop canonicalization guarantees
SmallVector<BasicBlock*, 4> Preds(pred_begin(Exit), pred_end(Exit));
if (!Exit->isLandingPad()) {
@@ -232,7 +233,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
// Make sure the loop is in canonical form, and there is a single
// exit block only.
- if (!L->isLoopSimplifyForm() || L->getUniqueExitBlock() == 0)
+ if (!L->isLoopSimplifyForm() || !L->getUniqueExitBlock())
return false;
// Use Scalar Evolution to compute the trip count. This allows more
@@ -240,7 +241,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
if (!LPM)
return false;
ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
- if (SE == 0)
+ if (!SE)
return false;
// Only unroll loops with a computable trip count and the trip count needs
@@ -279,17 +280,17 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
SCEVExpander Expander(*SE, "loop-unroll");
Value *TripCount = Expander.expandCodeFor(TripCountSC, TripCountSC->getType(),
PreHeaderBR);
- Type *CountTy = TripCount->getType();
- BinaryOperator *ModVal =
- BinaryOperator::CreateURem(TripCount,
- ConstantInt::get(CountTy, Count),
- "xtraiter");
- ModVal->insertBefore(PreHeaderBR);
-
- // Check if for no extra iterations, then jump to unrolled loop
- Value *BranchVal = new ICmpInst(PreHeaderBR,
- ICmpInst::ICMP_NE, ModVal,
- ConstantInt::get(CountTy, 0), "lcmp");
+
+ IRBuilder<> B(PreHeaderBR);
+ Value *ModVal = B.CreateAnd(TripCount, Count - 1, "xtraiter");
+
+ // Check if for no extra iterations, then jump to unrolled loop. We have to
+ // check that the trip count computation didn't overflow when adding one to
+ // the backedge taken count.
+ Value *LCmp = B.CreateIsNotNull(ModVal, "lcmp.mod");
+ Value *OverflowCheck = B.CreateIsNull(TripCount, "lcmp.overflow");
+ Value *BranchVal = B.CreateOr(OverflowCheck, LCmp, "lcmp.or");
+
// Branch to either the extra iterations or the unrolled loop
// We will fix up the true branch label when adding loop body copies
BranchInst::Create(PEnd, PEnd, BranchVal, PreHeaderBR);
@@ -301,7 +302,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
ValueToValueMapTy LVMap;
Function *F = Header->getParent();
// These variables are used to update the CFG links in each iteration
- BasicBlock *CompareBB = 0;
+ BasicBlock *CompareBB = nullptr;
BasicBlock *LastLoopBB = PH;
// Get an ordered list of blocks in the loop to help with the ordering of the
// cloned blocks in the prolog code
@@ -343,6 +344,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
}
// The comparison w/ the extra iteration value and branch
+ Type *CountTy = TripCount->getType();
Value *BranchVal = new ICmpInst(*NewBB, ICmpInst::ICMP_EQ, ModVal,
ConstantInt::get(CountTy, leftOverIters),
"un.tmp");
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp b/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
index e017f50..ff89e74 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "lower-expect-intrinsic"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/BasicBlock.h"
@@ -29,6 +28,8 @@
using namespace llvm;
+#define DEBUG_TYPE "lower-expect-intrinsic"
+
STATISTIC(IfHandled, "Number of 'expect' intrinsic instructions handled");
static cl::opt<uint32_t>
@@ -52,7 +53,7 @@ namespace {
initializeLowerExpectIntrinsicPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
};
}
@@ -94,15 +95,25 @@ bool LowerExpectIntrinsic::HandleIfExpect(BranchInst *BI) {
return false;
// Handle non-optimized IR code like:
- // %expval = call i64 @llvm.expect.i64.i64(i64 %conv1, i64 1)
+ // %expval = call i64 @llvm.expect.i64(i64 %conv1, i64 1)
// %tobool = icmp ne i64 %expval, 0
// br i1 %tobool, label %if.then, label %if.end
+ //
+ // Or the following simpler case:
+ // %expval = call i1 @llvm.expect.i1(i1 %cmp, i1 1)
+ // br i1 %expval, label %if.then, label %if.end
+
+ CallInst *CI;
ICmpInst *CmpI = dyn_cast<ICmpInst>(BI->getCondition());
- if (!CmpI || CmpI->getPredicate() != CmpInst::ICMP_NE)
- return false;
+ if (!CmpI) {
+ CI = dyn_cast<CallInst>(BI->getCondition());
+ } else {
+ if (CmpI->getPredicate() != CmpInst::ICMP_NE)
+ return false;
+ CI = dyn_cast<CallInst>(CmpI->getOperand(0));
+ }
- CallInst *CI = dyn_cast<CallInst>(CmpI->getOperand(0));
if (!CI)
return false;
@@ -127,7 +138,10 @@ bool LowerExpectIntrinsic::HandleIfExpect(BranchInst *BI) {
BI->setMetadata(LLVMContext::MD_prof, Node);
- CmpI->setOperand(0, ArgValue);
+ if (CmpI)
+ CmpI->setOperand(0, ArgValue);
+ else
+ BI->setCondition(ArgValue);
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
index 9799a30..66d57b0 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
@@ -1,4 +1,4 @@
-//===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===//
+//===- LowerInvoke.cpp - Eliminate Invoke instructions --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,94 +8,34 @@
//===----------------------------------------------------------------------===//
//
// This transformation is designed for use by code generators which do not yet
-// support stack unwinding. This pass supports two models of exception handling
-// lowering, the 'cheap' support and the 'expensive' support.
-//
-// 'Cheap' exception handling support gives the program the ability to execute
-// any program which does not "throw an exception", by turning 'invoke'
-// instructions into calls and by turning 'unwind' instructions into calls to
-// abort(). If the program does dynamically use the unwind instruction, the
-// program will print a message then abort.
-//
-// 'Expensive' exception handling support gives the full exception handling
-// support to the program at the cost of making the 'invoke' instruction
-// really expensive. It basically inserts setjmp/longjmp calls to emulate the
-// exception handling as necessary.
-//
-// Because the 'expensive' support slows down programs a lot, and EH is only
-// used for a subset of the programs, it must be specifically enabled by an
-// option.
-//
-// Note that after this pass runs the CFG is not entirely accurate (exceptional
-// control flow edges are not correct anymore) so only very simple things should
-// be done after the lowerinvoke pass has run (like generation of native code).
-// This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't
-// support the invoke instruction yet" lowering pass.
+// support stack unwinding. This pass converts 'invoke' instructions to 'call'
+// instructions, so that any exception-handling 'landingpad' blocks become dead
+// code (which can be removed by running the '-simplifycfg' pass afterwards).
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "lowerinvoke"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include <csetjmp>
-#include <set>
using namespace llvm;
-STATISTIC(NumInvokes, "Number of invokes replaced");
-STATISTIC(NumSpilled, "Number of registers live across unwind edges");
+#define DEBUG_TYPE "lowerinvoke"
-static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
- cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code"));
+STATISTIC(NumInvokes, "Number of invokes replaced");
namespace {
class LowerInvoke : public FunctionPass {
- const TargetMachine *TM;
-
- // Used for both models.
- Constant *AbortFn;
-
- // Used for expensive EH support.
- StructType *JBLinkTy;
- GlobalVariable *JBListHead;
- Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
- bool useExpensiveEHSupport;
-
public:
static char ID; // Pass identification, replacement for typeid
- explicit LowerInvoke(const TargetMachine *TM = 0,
- bool useExpensiveEHSupport = ExpensiveEHSupport)
- : FunctionPass(ID), TM(TM),
- useExpensiveEHSupport(useExpensiveEHSupport) {
+ explicit LowerInvoke() : FunctionPass(ID) {
initializeLowerInvokePass(*PassRegistry::getPassRegistry());
}
- bool doInitialization(Module &M);
- bool runOnFunction(Function &F);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- // This is a cluster of orthogonal Transforms
- AU.addPreserved("mem2reg");
- AU.addPreservedID(LowerSwitchID);
- }
-
- private:
- bool insertCheapEHSupport(Function &F);
- void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes);
- void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
- AllocaInst *InvokeNum, AllocaInst *StackPtr,
- SwitchInst *CatchSwitch);
- bool insertExpensiveEHSupport(Function &F);
+ bool runOnFunction(Function &F) override;
};
}
@@ -107,65 +47,11 @@ INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
char &llvm::LowerInvokePassID = LowerInvoke::ID;
// Public Interface To the LowerInvoke pass.
-FunctionPass *llvm::createLowerInvokePass(const TargetMachine *TM,
- bool useExpensiveEHSupport) {
- return new LowerInvoke(TM, useExpensiveEHSupport || ExpensiveEHSupport);
+FunctionPass *llvm::createLowerInvokePass() {
+ return new LowerInvoke();
}
-// doInitialization - Make sure that there is a prototype for abort in the
-// current module.
-bool LowerInvoke::doInitialization(Module &M) {
- Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
- if (useExpensiveEHSupport) {
- // Insert a type for the linked list of jump buffers.
- const TargetLowering *TLI = TM ? TM->getTargetLowering() : 0;
- unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
- JBSize = JBSize ? JBSize : 200;
- Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize);
-
- JBLinkTy = StructType::create(M.getContext(), "llvm.sjljeh.jmpbufty");
- Type *Elts[] = { JmpBufTy, PointerType::getUnqual(JBLinkTy) };
- JBLinkTy->setBody(Elts);
-
- Type *PtrJBList = PointerType::getUnqual(JBLinkTy);
-
- // Now that we've done that, insert the jmpbuf list head global, unless it
- // already exists.
- if (!(JBListHead = M.getGlobalVariable("llvm.sjljeh.jblist", PtrJBList))) {
- JBListHead = new GlobalVariable(M, PtrJBList, false,
- GlobalValue::LinkOnceAnyLinkage,
- Constant::getNullValue(PtrJBList),
- "llvm.sjljeh.jblist");
- }
-
-// VisualStudio defines setjmp as _setjmp
-#if defined(_MSC_VER) && defined(setjmp) && \
- !defined(setjmp_undefined_for_msvc)
-# pragma push_macro("setjmp")
-# undef setjmp
-# define setjmp_undefined_for_msvc
-#endif
-
- SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp);
-
-#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
- // let's return it to _setjmp state
-# pragma pop_macro("setjmp")
-# undef setjmp_undefined_for_msvc
-#endif
-
- LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp);
- StackSaveFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
- StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
- }
-
- // We need the 'write' and 'abort' functions for both models.
- AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()),
- (Type *)0);
- return true;
-}
-
-bool LowerInvoke::insertCheapEHSupport(Function &F) {
+bool LowerInvoke::runOnFunction(Function &F) {
bool Changed = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
@@ -192,388 +78,3 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
}
return Changed;
}
-
-/// rewriteExpensiveInvoke - Insert code and hack the function to replace the
-/// specified invoke instruction with a call.
-void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
- AllocaInst *InvokeNum,
- AllocaInst *StackPtr,
- SwitchInst *CatchSwitch) {
- ConstantInt *InvokeNoC = ConstantInt::get(Type::getInt32Ty(II->getContext()),
- InvokeNo);
-
- // If the unwind edge has phi nodes, split the edge.
- if (isa<PHINode>(II->getUnwindDest()->begin())) {
- SplitCriticalEdge(II, 1, this);
-
- // If there are any phi nodes left, they must have a single predecessor.
- while (PHINode *PN = dyn_cast<PHINode>(II->getUnwindDest()->begin())) {
- PN->replaceAllUsesWith(PN->getIncomingValue(0));
- PN->eraseFromParent();
- }
- }
-
- // Insert a store of the invoke num before the invoke and store zero into the
- // location afterward.
- new StoreInst(InvokeNoC, InvokeNum, true, II); // volatile
-
- // Insert a store of the stack ptr before the invoke, so we can restore it
- // later in the exception case.
- CallInst* StackSaveRet = CallInst::Create(StackSaveFn, "ssret", II);
- new StoreInst(StackSaveRet, StackPtr, true, II); // volatile
-
- BasicBlock::iterator NI = II->getNormalDest()->getFirstInsertionPt();
- // nonvolatile.
- new StoreInst(Constant::getNullValue(Type::getInt32Ty(II->getContext())),
- InvokeNum, false, NI);
-
- Instruction* StackPtrLoad =
- new LoadInst(StackPtr, "stackptr.restore", true,
- II->getUnwindDest()->getFirstInsertionPt());
- CallInst::Create(StackRestoreFn, StackPtrLoad, "")->insertAfter(StackPtrLoad);
-
- // Add a switch case to our unwind block.
- CatchSwitch->addCase(InvokeNoC, II->getUnwindDest());
-
- // Insert a normal call instruction.
- SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
- CallInst *NewCall = CallInst::Create(II->getCalledValue(),
- CallArgs, "", II);
- NewCall->takeName(II);
- NewCall->setCallingConv(II->getCallingConv());
- NewCall->setAttributes(II->getAttributes());
- NewCall->setDebugLoc(II->getDebugLoc());
- II->replaceAllUsesWith(NewCall);
-
- // Replace the invoke with an uncond branch.
- BranchInst::Create(II->getNormalDest(), NewCall->getParent());
- II->eraseFromParent();
-}
-
-/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
-/// we reach blocks we've already seen.
-static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
- if (!LiveBBs.insert(BB).second) return; // already been here.
-
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- MarkBlocksLiveIn(*PI, LiveBBs);
-}
-
-// First thing we need to do is scan the whole function for values that are
-// live across unwind edges. Each value that is live across an unwind edge
-// we spill into a stack location, guaranteeing that there is nothing live
-// across the unwind edge. This process also splits all critical edges
-// coming out of invoke's.
-void LowerInvoke::
-splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
- // First step, split all critical edges from invoke instructions.
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
- InvokeInst *II = Invokes[i];
- SplitCriticalEdge(II, 0, this);
- SplitCriticalEdge(II, 1, this);
- assert(!isa<PHINode>(II->getNormalDest()) &&
- !isa<PHINode>(II->getUnwindDest()) &&
- "critical edge splitting left single entry phi nodes?");
- }
-
- Function *F = Invokes.back()->getParent()->getParent();
-
- // To avoid having to handle incoming arguments specially, we lower each arg
- // to a copy instruction in the entry block. This ensures that the argument
- // value itself cannot be live across the entry block.
- BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
- while (isa<AllocaInst>(AfterAllocaInsertPt) &&
- isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
- ++AfterAllocaInsertPt;
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
- AI != E; ++AI) {
- Type *Ty = AI->getType();
- // Aggregate types can't be cast, but are legal argument types, so we have
- // to handle them differently. We use an extract/insert pair as a
- // lightweight method to achieve the same goal.
- if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
- Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
- Instruction *NI = InsertValueInst::Create(AI, EI, 0);
- NI->insertAfter(EI);
- AI->replaceAllUsesWith(NI);
- // Set the operand of the instructions back to the AllocaInst.
- EI->setOperand(0, AI);
- NI->setOperand(0, AI);
- } else {
- // This is always a no-op cast because we're casting AI to AI->getType()
- // so src and destination types are identical. BitCast is the only
- // possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Set the operand of the cast instruction back to the AllocaInst.
- // Normally it's forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However,
- // we're replacing it here with the same value it was constructed with.
- // We do this because the above replaceAllUsesWith() clobbered the
- // operand, but we want this one to remain.
- NC->setOperand(0, AI);
- }
- }
-
- // Finally, scan the code looking for instructions with bad live ranges.
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
- // Ignore obvious cases we don't have to handle. In particular, most
- // instructions either have no uses or only have a single use inside the
- // current block. Ignore them quickly.
- Instruction *Inst = II;
- if (Inst->use_empty()) continue;
- if (Inst->hasOneUse() &&
- cast<Instruction>(Inst->use_back())->getParent() == BB &&
- !isa<PHINode>(Inst->use_back())) continue;
-
- // If this is an alloca in the entry block, it's not a real register
- // value.
- if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
- if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
- continue;
-
- // Avoid iterator invalidation by copying users to a temporary vector.
- SmallVector<Instruction*,16> Users;
- for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (User->getParent() != BB || isa<PHINode>(User))
- Users.push_back(User);
- }
-
- // Scan all of the uses and see if the live range is live across an unwind
- // edge. If we find a use live across an invoke edge, create an alloca
- // and spill the value.
-
- // Find all of the blocks that this value is live in.
- std::set<BasicBlock*> LiveBBs;
- LiveBBs.insert(Inst->getParent());
- while (!Users.empty()) {
- Instruction *U = Users.back();
- Users.pop_back();
-
- if (!isa<PHINode>(U)) {
- MarkBlocksLiveIn(U->getParent(), LiveBBs);
- } else {
- // Uses for a PHI node occur in their predecessor block.
- PHINode *PN = cast<PHINode>(U);
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == Inst)
- MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
- }
- }
-
- // Now that we know all of the blocks that this thing is live in, see if
- // it includes any of the unwind locations.
- bool NeedsSpill = false;
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
- BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
- if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
- NeedsSpill = true;
- }
- }
-
- // If we decided we need a spill, do it.
- if (NeedsSpill) {
- ++NumSpilled;
- DemoteRegToStack(*Inst, true);
- }
- }
-}
-
-bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
- SmallVector<ReturnInst*,16> Returns;
- SmallVector<InvokeInst*,16> Invokes;
- UnreachableInst* UnreachablePlaceholder = 0;
-
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
- // Remember all return instructions in case we insert an invoke into this
- // function.
- Returns.push_back(RI);
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
- Invokes.push_back(II);
- }
-
- if (Invokes.empty()) return false;
-
- NumInvokes += Invokes.size();
-
- // TODO: This is not an optimal way to do this. In particular, this always
- // inserts setjmp calls into the entries of functions with invoke instructions
- // even though there are possibly paths through the function that do not
- // execute any invokes. In particular, for functions with early exits, e.g.
- // the 'addMove' method in hexxagon, it would be nice to not have to do the
- // setjmp stuff on the early exit path. This requires a bit of dataflow, but
- // would not be too hard to do.
-
- // If we have an invoke instruction, insert a setjmp that dominates all
- // invokes. After the setjmp, use a cond branch that goes to the original
- // code path on zero, and to a designated 'catch' block of nonzero.
- Value *OldJmpBufPtr = 0;
- if (!Invokes.empty()) {
- // First thing we need to do is scan the whole function for values that are
- // live across unwind edges. Each value that is live across an unwind edge
- // we spill into a stack location, guaranteeing that there is nothing live
- // across the unwind edge. This process also splits all critical edges
- // coming out of invoke's.
- splitLiveRangesLiveAcrossInvokes(Invokes);
-
- BasicBlock *EntryBB = F.begin();
-
- // Create an alloca for the incoming jump buffer ptr and the new jump buffer
- // that needs to be restored on all exits from the function. This is an
- // alloca because the value needs to be live across invokes.
- const TargetLowering *TLI = TM ? TM->getTargetLowering() : 0;
- unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
- AllocaInst *JmpBuf =
- new AllocaInst(JBLinkTy, 0, Align,
- "jblink", F.begin()->begin());
-
- Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
- ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
- OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "OldBuf",
- EntryBB->getTerminator());
-
- // Copy the JBListHead to the alloca.
- Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
- EntryBB->getTerminator());
- new StoreInst(OldBuf, OldJmpBufPtr, true, EntryBB->getTerminator());
-
- // Add the new jumpbuf to the list.
- new StoreInst(JmpBuf, JBListHead, true, EntryBB->getTerminator());
-
- // Create the catch block. The catch block is basically a big switch
- // statement that goes to all of the invoke catch blocks.
- BasicBlock *CatchBB =
- BasicBlock::Create(F.getContext(), "setjmp.catch", &F);
-
- // Create an alloca which keeps track of the stack pointer before every
- // invoke, this allows us to properly restore the stack pointer after
- // long jumping.
- AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0,
- "stackptr", EntryBB->begin());
-
- // Create an alloca which keeps track of which invoke is currently
- // executing. For normal calls it contains zero.
- AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0,
- "invokenum",EntryBB->begin());
- new StoreInst(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
- InvokeNum, true, EntryBB->getTerminator());
-
- // Insert a load in the Catch block, and a switch on its value. By default,
- // we go to a block that just does an unwind (which is the correct action
- // for a standard call). We insert an unreachable instruction here and
- // modify the block to jump to the correct unwinding pad later.
- BasicBlock *UnwindBB = BasicBlock::Create(F.getContext(), "unwindbb", &F);
- UnreachablePlaceholder = new UnreachableInst(F.getContext(), UnwindBB);
-
- Value *CatchLoad = new LoadInst(InvokeNum, "invoke.num", true, CatchBB);
- SwitchInst *CatchSwitch =
- SwitchInst::Create(CatchLoad, UnwindBB, Invokes.size(), CatchBB);
-
- // Now that things are set up, insert the setjmp call itself.
-
- // Split the entry block to insert the conditional branch for the setjmp.
- BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
- "setjmp.cont");
-
- Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
- Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "TheJmpBuf",
- EntryBB->getTerminator());
- JmpBufPtr = new BitCastInst(JmpBufPtr,
- Type::getInt8PtrTy(F.getContext()),
- "tmp", EntryBB->getTerminator());
- Value *SJRet = CallInst::Create(SetJmpFn, JmpBufPtr, "sjret",
- EntryBB->getTerminator());
-
- // Compare the return value to zero.
- Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
- ICmpInst::ICMP_EQ, SJRet,
- Constant::getNullValue(SJRet->getType()),
- "notunwind");
- // Nuke the uncond branch.
- EntryBB->getTerminator()->eraseFromParent();
-
- // Put in a new condbranch in its place.
- BranchInst::Create(ContBlock, CatchBB, IsNormal, EntryBB);
-
- // At this point, we are all set up, rewrite each invoke instruction.
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
- rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch);
- }
-
- // We know that there is at least one unwind.
-
- // Create three new blocks, the block to load the jmpbuf ptr and compare
- // against null, the block to do the longjmp, and the error block for if it
- // is null. Add them at the end of the function because they are not hot.
- BasicBlock *UnwindHandler = BasicBlock::Create(F.getContext(),
- "dounwind", &F);
- BasicBlock *UnwindBlock = BasicBlock::Create(F.getContext(), "unwind", &F);
- BasicBlock *TermBlock = BasicBlock::Create(F.getContext(), "unwinderror", &F);
-
- // If this function contains an invoke, restore the old jumpbuf ptr.
- Value *BufPtr;
- if (OldJmpBufPtr) {
- // Before the return, insert a copy from the saved value to the new value.
- BufPtr = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", UnwindHandler);
- new StoreInst(BufPtr, JBListHead, UnwindHandler);
- } else {
- BufPtr = new LoadInst(JBListHead, "ehlist", UnwindHandler);
- }
-
- // Load the JBList, if it's null, then there was no catch!
- Value *NotNull = new ICmpInst(*UnwindHandler, ICmpInst::ICMP_NE, BufPtr,
- Constant::getNullValue(BufPtr->getType()),
- "notnull");
- BranchInst::Create(UnwindBlock, TermBlock, NotNull, UnwindHandler);
-
- // Create the block to do the longjmp.
- // Get a pointer to the jmpbuf and longjmp.
- Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
- ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
- Idx[0] = GetElementPtrInst::Create(BufPtr, Idx, "JmpBuf", UnwindBlock);
- Idx[0] = new BitCastInst(Idx[0],
- Type::getInt8PtrTy(F.getContext()),
- "tmp", UnwindBlock);
- Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
- CallInst::Create(LongJmpFn, Idx, "", UnwindBlock);
- new UnreachableInst(F.getContext(), UnwindBlock);
-
- // Set up the term block ("throw without a catch").
- new UnreachableInst(F.getContext(), TermBlock);
-
- // Insert a call to abort()
- CallInst::Create(AbortFn, "",
- TermBlock->getTerminator())->setTailCall();
-
- // Replace the inserted unreachable with a branch to the unwind handler.
- if (UnreachablePlaceholder) {
- BranchInst::Create(UnwindHandler, UnreachablePlaceholder);
- UnreachablePlaceholder->eraseFromParent();
- }
-
- // Finally, for any returns from this function, if this function contains an
- // invoke, restore the old jmpbuf pointer to its input value.
- if (OldJmpBufPtr) {
- for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
- ReturnInst *R = Returns[i];
-
- // Before the return, insert a copy from the saved value to the new value.
- Value *OldBuf = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", true, R);
- new StoreInst(OldBuf, JBListHead, true, R);
- }
- }
-
- return true;
-}
-
-bool LowerInvoke::runOnFunction(Function &F) {
- if (useExpensiveEHSupport)
- return insertExpensiveEHSupport(F);
- else
- return insertCheapEHSupport(F);
-}
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 2d2a8a5..d6e5bb6 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -14,11 +14,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/CFG.h"
#include "llvm/Pass.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -27,6 +29,8 @@
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE "lower-switch"
+
namespace {
/// LowerSwitch Pass - Replace all SwitchInst instructions with chained branch
/// instructions.
@@ -37,9 +41,9 @@ namespace {
initializeLowerSwitchPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnFunction(Function &F);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
// This is a cluster of orthogonal Transforms
AU.addPreserved<UnifyFunctionExitNodes>();
AU.addPreserved("mem2reg");
@@ -51,20 +55,23 @@ namespace {
Constant* High;
BasicBlock* BB;
- CaseRange(Constant *low = 0, Constant *high = 0, BasicBlock *bb = 0) :
+ CaseRange(Constant *low = nullptr, Constant *high = nullptr,
+ BasicBlock *bb = nullptr) :
Low(low), High(high), BB(bb) { }
};
- typedef std::vector<CaseRange> CaseVector;
+ typedef std::vector<CaseRange> CaseVector;
typedef std::vector<CaseRange>::iterator CaseItr;
private:
void processSwitchInst(SwitchInst *SI);
- BasicBlock* switchConvert(CaseItr Begin, CaseItr End, Value* Val,
- BasicBlock* OrigBlock, BasicBlock* Default);
- BasicBlock* newLeafBlock(CaseRange& Leaf, Value* Val,
- BasicBlock* OrigBlock, BasicBlock* Default);
- unsigned Clusterify(CaseVector& Cases, SwitchInst *SI);
+ BasicBlock *switchConvert(CaseItr Begin, CaseItr End,
+ ConstantInt *LowerBound, ConstantInt *UpperBound,
+ Value *Val, BasicBlock *Predecessor,
+ BasicBlock *OrigBlock, BasicBlock *Default);
+ BasicBlock *newLeafBlock(CaseRange &Leaf, Value *Val, BasicBlock *OrigBlock,
+ BasicBlock *Default);
+ unsigned Clusterify(CaseVector &Cases, SwitchInst *SI);
};
/// The comparison function for sorting the switch case values in the vector.
@@ -124,17 +131,45 @@ static raw_ostream& operator<<(raw_ostream &O,
return O << "]";
}
+static void fixPhis(BasicBlock *Succ,
+ BasicBlock *OrigBlock,
+ BasicBlock *NewNode) {
+ for (BasicBlock::iterator I = Succ->begin(),
+ E = Succ->getFirstNonPHI();
+ I != E; ++I) {
+ PHINode *PN = cast<PHINode>(I);
+
+ for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
+ if (PN->getIncomingBlock(I) == OrigBlock)
+ PN->setIncomingBlock(I, NewNode);
+ }
+ }
+}
+
// switchConvert - Convert the switch statement into a binary lookup of
// the case values. The function recursively builds this tree.
-//
-BasicBlock* LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
- Value* Val, BasicBlock* OrigBlock,
- BasicBlock* Default)
-{
+// LowerBound and UpperBound are used to keep track of the bounds for Val
+// that have already been checked by a block emitted by one of the previous
+// calls to switchConvert in the call stack.
+BasicBlock *LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
+ ConstantInt *LowerBound,
+ ConstantInt *UpperBound, Value *Val,
+ BasicBlock *Predecessor,
+ BasicBlock *OrigBlock,
+ BasicBlock *Default) {
unsigned Size = End - Begin;
- if (Size == 1)
+ if (Size == 1) {
+ // Check if the Case Range is perfectly squeezed in between
+ // already checked Upper and Lower bounds. If it is then we can avoid
+ // emitting the code that checks if the value actually falls in the range
+ // because the bounds already tell us so.
+ if (Begin->Low == LowerBound && Begin->High == UpperBound) {
+ fixPhis(Begin->BB, OrigBlock, Predecessor);
+ return Begin->BB;
+ }
return newLeafBlock(*Begin, Val, OrigBlock, Default);
+ }
unsigned Mid = Size / 2;
std::vector<CaseRange> LHS(Begin, Begin + Mid);
@@ -142,26 +177,65 @@ BasicBlock* LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
std::vector<CaseRange> RHS(Begin + Mid, End);
DEBUG(dbgs() << "RHS: " << RHS << "\n");
- CaseRange& Pivot = *(Begin + Mid);
- DEBUG(dbgs() << "Pivot ==> "
- << cast<ConstantInt>(Pivot.Low)->getValue() << " -"
- << cast<ConstantInt>(Pivot.High)->getValue() << "\n");
+ CaseRange &Pivot = *(Begin + Mid);
+ DEBUG(dbgs() << "Pivot ==> "
+ << cast<ConstantInt>(Pivot.Low)->getValue()
+ << " -" << cast<ConstantInt>(Pivot.High)->getValue() << "\n");
+
+ // NewLowerBound here should never be the integer minimal value.
+ // This is because it is computed from a case range that is never
+ // the smallest, so there is always a case range that has at least
+ // a smaller value.
+ ConstantInt *NewLowerBound = cast<ConstantInt>(Pivot.Low);
+ ConstantInt *NewUpperBound;
+
+ // If we don't have a Default block then it means that we can never
+ // have a value outside of a case range, so set the UpperBound to the highest
+ // value in the LHS part of the case ranges.
+ if (Default != nullptr) {
+ // Because NewLowerBound is never the smallest representable integer
+ // it is safe here to subtract one.
+ NewUpperBound = ConstantInt::get(NewLowerBound->getContext(),
+ NewLowerBound->getValue() - 1);
+ } else {
+ CaseItr LastLHS = LHS.begin() + LHS.size() - 1;
+ NewUpperBound = cast<ConstantInt>(LastLHS->High);
+ }
- BasicBlock* LBranch = switchConvert(LHS.begin(), LHS.end(), Val,
- OrigBlock, Default);
- BasicBlock* RBranch = switchConvert(RHS.begin(), RHS.end(), Val,
- OrigBlock, Default);
+ DEBUG(dbgs() << "LHS Bounds ==> ";
+ if (LowerBound) {
+ dbgs() << cast<ConstantInt>(LowerBound)->getSExtValue();
+ } else {
+ dbgs() << "NONE";
+ }
+ dbgs() << " - " << NewUpperBound->getSExtValue() << "\n";
+ dbgs() << "RHS Bounds ==> ";
+ dbgs() << NewLowerBound->getSExtValue() << " - ";
+ if (UpperBound) {
+ dbgs() << cast<ConstantInt>(UpperBound)->getSExtValue() << "\n";
+ } else {
+ dbgs() << "NONE\n";
+ });
// Create a new node that checks if the value is < pivot. Go to the
// left branch if it is and right branch if not.
Function* F = OrigBlock->getParent();
BasicBlock* NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock");
- Function::iterator FI = OrigBlock;
- F->getBasicBlockList().insert(++FI, NewNode);
ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_SLT,
Val, Pivot.Low, "Pivot");
+
+ BasicBlock *LBranch = switchConvert(LHS.begin(), LHS.end(), LowerBound,
+ NewUpperBound, Val, NewNode, OrigBlock,
+ Default);
+ BasicBlock *RBranch = switchConvert(RHS.begin(), RHS.end(), NewLowerBound,
+ UpperBound, Val, NewNode, OrigBlock,
+ Default);
+
+ Function::iterator FI = OrigBlock;
+ F->getBasicBlockList().insert(++FI, NewNode);
NewNode->getInstList().push_back(Comp);
+
BranchInst::Create(LBranch, RBranch, Comp, NewNode);
return NewNode;
}
@@ -182,7 +256,7 @@ BasicBlock* LowerSwitch::newLeafBlock(CaseRange& Leaf, Value* Val,
F->getBasicBlockList().insert(++FI, NewLeaf);
// Emit comparison
- ICmpInst* Comp = NULL;
+ ICmpInst* Comp = nullptr;
if (Leaf.Low == Leaf.High) {
// Make the seteq instruction...
Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_EQ, Val,
@@ -245,7 +319,8 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
// Merge case into clusters
if (Cases.size()>=2)
- for (CaseItr I=Cases.begin(), J=llvm::next(Cases.begin()); J!=Cases.end(); ) {
+ for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
+ J != Cases.end();) {
int64_t nextValue = cast<ConstantInt>(J->Low)->getSExtValue();
int64_t currentValue = cast<ConstantInt>(I->High)->getSExtValue();
BasicBlock* nextBB = J->BB;
@@ -287,13 +362,19 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
return;
}
+ const bool DefaultIsUnreachable =
+ Default->size() == 1 && isa<UnreachableInst>(Default->getTerminator());
// Create a new, empty default block so that the new hierarchy of
// if-then statements go to this and the PHI nodes are happy.
- BasicBlock* NewDefault = BasicBlock::Create(SI->getContext(), "NewDefault");
- F->getBasicBlockList().insert(Default, NewDefault);
-
- BranchInst::Create(Default, NewDefault);
-
+ // if the default block is set as an unreachable we avoid creating one
+ // because will never be a valid target.
+ BasicBlock *NewDefault = nullptr;
+ if (!DefaultIsUnreachable) {
+ NewDefault = BasicBlock::Create(SI->getContext(), "NewDefault");
+ F->getBasicBlockList().insert(Default, NewDefault);
+
+ BranchInst::Create(Default, NewDefault);
+ }
// If there is an entry in any PHI nodes for the default edge, make sure
// to update them as well.
for (BasicBlock::iterator I = Default->begin(); isa<PHINode>(I); ++I) {
@@ -312,12 +393,31 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
DEBUG(dbgs() << "Cases: " << Cases << "\n");
(void)numCmps;
- BasicBlock* SwitchBlock = switchConvert(Cases.begin(), Cases.end(), Val,
- OrigBlock, NewDefault);
+ ConstantInt *UpperBound = nullptr;
+ ConstantInt *LowerBound = nullptr;
+
+ // Optimize the condition where Default is an unreachable block. In this case
+ // we can make the bounds tightly fitted around the case value ranges,
+ // because we know that the value passed to the switch should always be
+ // exactly one of the case values.
+ if (DefaultIsUnreachable) {
+ CaseItr LastCase = Cases.begin() + Cases.size() - 1;
+ UpperBound = cast<ConstantInt>(LastCase->High);
+ LowerBound = cast<ConstantInt>(Cases.begin()->Low);
+ }
+ BasicBlock *SwitchBlock =
+ switchConvert(Cases.begin(), Cases.end(), LowerBound, UpperBound, Val,
+ OrigBlock, OrigBlock, NewDefault);
// Branch to our shiny new if-then stuff...
BranchInst::Create(SwitchBlock, OrigBlock);
// We are now done with the switch instruction, delete it.
CurBlock->getInstList().erase(SI);
+
+ pred_iterator PI = pred_begin(Default), E = pred_end(Default);
+ // If the Default block has no more predecessors just remove it
+ if (PI == E) {
+ DeleteDeadBlock(Default);
+ }
}
diff --git a/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp b/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp
index 61b3965..189caa7 100644
--- a/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp
@@ -12,16 +12,17 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mem2reg"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
using namespace llvm;
+#define DEBUG_TYPE "mem2reg"
+
STATISTIC(NumPromoted, "Number of alloca's promoted");
namespace {
@@ -34,10 +35,10 @@ namespace {
// runOnFunction - To run this pass, first we calculate the alloca
// instructions that are safe for promotion, then we promote each one.
//
- virtual bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
// This is a cluster of orthogonal Transforms
AU.addPreserved<UnifyFunctionExitNodes>();
@@ -50,7 +51,7 @@ namespace {
char PromotePass::ID = 0;
INITIALIZE_PASS_BEGIN(PromotePass, "mem2reg", "Promote Memory to Register",
false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(PromotePass, "mem2reg", "Promote Memory to Register",
false, false)
@@ -61,7 +62,7 @@ bool PromotePass::runOnFunction(Function &F) {
bool Changed = false;
- DominatorTree &DT = getAnalysis<DominatorTree>();
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
while (1) {
Allocas.clear();
diff --git a/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp b/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
index c3704531..395a46b 100644
--- a/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/MetaRenamer.cpp
@@ -48,11 +48,11 @@ namespace {
initializeMetaRenamerPass(*PassRegistry::getPassRegistry());
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
- bool runOnModule(Module &M) {
+ bool runOnModule(Module &M) override {
static const char *const metaNames[] = {
// See http://en.wikipedia.org/wiki/Metasyntactic_variable
"foo", "bar", "baz", "quux", "barney", "snork", "zot", "blam", "hoge",
diff --git a/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp b/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
index ff6e6f9..d9dbbca 100644
--- a/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
@@ -24,16 +24,16 @@ static void appendToGlobalArray(const char *Array,
Module &M, Function *F, int Priority) {
IRBuilder<> IRB(M.getContext());
FunctionType *FnTy = FunctionType::get(IRB.getVoidTy(), false);
- StructType *Ty = StructType::get(
- IRB.getInt32Ty(), PointerType::getUnqual(FnTy), NULL);
-
- Constant *RuntimeCtorInit = ConstantStruct::get(
- Ty, IRB.getInt32(Priority), F, NULL);
// Get the current set of static global constructors and add the new ctor
// to the list.
SmallVector<Constant *, 16> CurrentCtors;
- if (GlobalVariable * GVCtor = M.getNamedGlobal(Array)) {
+ StructType *EltTy;
+ if (GlobalVariable *GVCtor = M.getNamedGlobal(Array)) {
+ // If there is a global_ctors array, use the existing struct type, which can
+ // have 2 or 3 fields.
+ ArrayType *ATy = cast<ArrayType>(GVCtor->getType()->getElementType());
+ EltTy = cast<StructType>(ATy->getElementType());
if (Constant *Init = GVCtor->getInitializer()) {
unsigned n = Init->getNumOperands();
CurrentCtors.reserve(n + 1);
@@ -41,13 +41,26 @@ static void appendToGlobalArray(const char *Array,
CurrentCtors.push_back(cast<Constant>(Init->getOperand(i)));
}
GVCtor->eraseFromParent();
+ } else {
+ // Use a simple two-field struct if there isn't one already.
+ EltTy = StructType::get(IRB.getInt32Ty(), PointerType::getUnqual(FnTy),
+ nullptr);
}
+ // Build a 2 or 3 field global_ctor entry. We don't take a comdat key.
+ Constant *CSVals[3];
+ CSVals[0] = IRB.getInt32(Priority);
+ CSVals[1] = F;
+ // FIXME: Drop support for the two element form in LLVM 4.0.
+ if (EltTy->getNumElements() >= 3)
+ CSVals[2] = llvm::Constant::getNullValue(IRB.getInt8PtrTy());
+ Constant *RuntimeCtorInit =
+ ConstantStruct::get(EltTy, makeArrayRef(CSVals, EltTy->getNumElements()));
+
CurrentCtors.push_back(RuntimeCtorInit);
// Create a new initializer.
- ArrayType *AT = ArrayType::get(RuntimeCtorInit->getType(),
- CurrentCtors.size());
+ ArrayType *AT = ArrayType::get(EltTy, CurrentCtors.size());
Constant *NewInit = ConstantArray::get(AT, CurrentCtors);
// Create the new global variable and replace all uses of
diff --git a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 8f6eee3..06d73fe 100644
--- a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -25,7 +25,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mem2reg"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
@@ -34,23 +33,25 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasSetTracker.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
#include <queue>
using namespace llvm;
+#define DEBUG_TYPE "mem2reg"
+
STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store");
STATISTIC(NumDeadAlloca, "Number of dead alloca's removed");
@@ -59,11 +60,10 @@ STATISTIC(NumPHIInsert, "Number of PHI nodes inserted");
bool llvm::isAllocaPromotable(const AllocaInst *AI) {
// FIXME: If the memory unit is of pointer or integer type, we can permit
// assignments to subsections of the memory unit.
+ unsigned AS = AI->getType()->getAddressSpace();
// Only allow direct and non-volatile loads and stores...
- for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE; ++UI) { // Loop over all of the uses of the alloca
- const User *U = *UI;
+ for (const User *U : AI->users()) {
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
// Note that atomic loads can be transformed; atomic semantics do
// not have any meaning for a local alloca.
@@ -81,12 +81,12 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
II->getIntrinsicID() != Intrinsic::lifetime_end)
return false;
} else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- if (BCI->getType() != Type::getInt8PtrTy(U->getContext()))
+ if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
return false;
if (!onlyUsedByLifetimeMarkers(BCI))
return false;
} else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
- if (GEPI->getType() != Type::getInt8PtrTy(U->getContext()))
+ if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS))
return false;
if (!GEPI->hasAllZeroIndices())
return false;
@@ -116,11 +116,11 @@ struct AllocaInfo {
void clear() {
DefiningBlocks.clear();
UsingBlocks.clear();
- OnlyStore = 0;
- OnlyBlock = 0;
+ OnlyStore = nullptr;
+ OnlyBlock = nullptr;
OnlyUsedInOneBlock = true;
- AllocaPointerVal = 0;
- DbgDeclare = 0;
+ AllocaPointerVal = nullptr;
+ DbgDeclare = nullptr;
}
/// Scan the uses of the specified alloca, filling in the AllocaInfo used
@@ -131,8 +131,7 @@ struct AllocaInfo {
// As we scan the uses of the alloca instruction, keep track of stores,
// and decide whether all of the loads and stores to the alloca are within
// the same basic block.
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
- UI != E;) {
+ for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
@@ -149,7 +148,7 @@ struct AllocaInfo {
}
if (OnlyUsedInOneBlock) {
- if (OnlyBlock == 0)
+ if (!OnlyBlock)
OnlyBlock = User->getParent();
else if (OnlyBlock != User->getParent())
OnlyUsedInOneBlock = false;
@@ -165,7 +164,7 @@ class RenamePassData {
public:
typedef std::vector<Value *> ValVector;
- RenamePassData() : BB(NULL), Pred(NULL), Values() {}
+ RenamePassData() : BB(nullptr), Pred(nullptr), Values() {}
RenamePassData(BasicBlock *B, BasicBlock *P, const ValVector &V)
: BB(B), Pred(P), Values(V) {}
BasicBlock *BB;
@@ -317,8 +316,7 @@ static void removeLifetimeIntrinsicUsers(AllocaInst *AI) {
// Knowing that this alloca is promotable, we know that it's safe to kill all
// instructions except for load and store.
- for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE;) {
+ for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
Instruction *I = cast<Instruction>(*UI);
++UI;
if (isa<LoadInst>(I) || isa<StoreInst>(I))
@@ -328,10 +326,9 @@ static void removeLifetimeIntrinsicUsers(AllocaInst *AI) {
// The only users of this bitcast/GEP instruction are lifetime intrinsics.
// Follow the use/def chain to erase them now instead of leaving it for
// dead code elimination later.
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE;) {
- Instruction *Inst = cast<Instruction>(*UI);
- ++UI;
+ for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
+ Instruction *Inst = cast<Instruction>(*UUI);
+ ++UUI;
Inst->eraseFromParent();
}
}
@@ -359,7 +356,7 @@ static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
// Clear out UsingBlocks. We will reconstruct it here if needed.
Info.UsingBlocks.clear();
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
+ for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
Instruction *UserInst = cast<Instruction>(*UI++);
if (!isa<LoadInst>(UserInst)) {
assert(UserInst == OnlyStore && "Should only have load/stores");
@@ -456,9 +453,8 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy;
StoresByIndexTy StoresByIndex;
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;
- ++UI)
- if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
+ for (User *U : AI->users())
+ if (StoreInst *SI = dyn_cast<StoreInst>(U))
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
// Sort the stores by their index, making it efficient to do a lookup with a
@@ -467,7 +463,7 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
+ for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
if (!LI)
continue;
@@ -477,7 +473,8 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Find the nearest store that has a lower index than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
- std::make_pair(LoadIdx, static_cast<StoreInst *>(0)),
+ std::make_pair(LoadIdx,
+ static_cast<StoreInst *>(nullptr)),
less_first());
if (I == StoresByIndex.begin())
@@ -485,7 +482,7 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
LI->replaceAllUsesWith(UndefValue::get(LI->getType()));
else
// Otherwise, there was a store before this load, the load takes its value.
- LI->replaceAllUsesWith(llvm::prior(I)->second->getOperand(0));
+ LI->replaceAllUsesWith(std::prev(I)->second->getOperand(0));
if (AST && LI->getType()->isPointerTy())
AST->deleteValue(LI);
@@ -495,7 +492,7 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Remove the (now dead) stores and alloca.
while (!AI->use_empty()) {
- StoreInst *SI = cast<StoreInst>(AI->use_back());
+ StoreInst *SI = cast<StoreInst>(AI->user_back());
// Record debuginfo for the store before removing it.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
DIBuilder DIB(*AI->getParent()->getParent()->getParent());
@@ -638,7 +635,7 @@ void PromoteMem2Reg::run() {
// and inserting the phi nodes we marked as necessary
//
std::vector<RenamePassData> RenamePassWorkList;
- RenamePassWorkList.push_back(RenamePassData(F.begin(), 0, Values));
+ RenamePassWorkList.push_back(RenamePassData(F.begin(), nullptr, Values));
do {
RenamePassData RPD;
RPD.swap(RenamePassWorkList.back());
@@ -679,8 +676,8 @@ void PromoteMem2Reg::run() {
// Iterating over NewPhiNodes is deterministic, so it is safe to try to
// simplify and RAUW them as we go. If it was not, we could add uses to
- // the values we replace with in a non deterministic order, thus creating
- // non deterministic def->use chains.
+ // the values we replace with in a non-deterministic order, thus creating
+ // non-deterministic def->use chains.
for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator
I = NewPhiNodes.begin(),
E = NewPhiNodes.end();
@@ -688,7 +685,7 @@ void PromoteMem2Reg::run() {
PHINode *PN = I->second;
// If this PHI node merges one value and/or undefs, get the value.
- if (Value *V = SimplifyInstruction(PN, 0, 0, &DT)) {
+ if (Value *V = SimplifyInstruction(PN, nullptr, nullptr, &DT)) {
if (AST && PN->getType()->isPointerTy())
AST->deleteValue(PN);
PN->replaceAllUsesWith(V);
@@ -996,7 +993,7 @@ NextIteration:
// Get the next phi node.
++PNI;
APN = dyn_cast<PHINode>(PNI);
- if (APN == 0)
+ if (!APN)
break;
// Verify that it is missing entries. If not, it is not being inserted
diff --git a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index 30adbfa..3fcb789 100644
--- a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -11,17 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "ssaupdater"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/AlignOf.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -30,20 +27,22 @@
using namespace llvm;
+#define DEBUG_TYPE "ssaupdater"
+
typedef DenseMap<BasicBlock*, Value*> AvailableValsTy;
static AvailableValsTy &getAvailableVals(void *AV) {
return *static_cast<AvailableValsTy*>(AV);
}
SSAUpdater::SSAUpdater(SmallVectorImpl<PHINode*> *NewPHI)
- : AV(0), ProtoType(0), ProtoName(), InsertedPHIs(NewPHI) {}
+ : AV(nullptr), ProtoType(nullptr), ProtoName(), InsertedPHIs(NewPHI) {}
SSAUpdater::~SSAUpdater() {
delete static_cast<AvailableValsTy*>(AV);
}
void SSAUpdater::Initialize(Type *Ty, StringRef Name) {
- if (AV == 0)
+ if (!AV)
AV = new AvailableValsTy();
else
getAvailableVals(AV).clear();
@@ -56,7 +55,7 @@ bool SSAUpdater::HasValueForBlock(BasicBlock *BB) const {
}
void SSAUpdater::AddAvailableValue(BasicBlock *BB, Value *V) {
- assert(ProtoType != 0 && "Need to initialize SSAUpdater");
+ assert(ProtoType && "Need to initialize SSAUpdater");
assert(ProtoType == V->getType() &&
"All rewritten values must have the same type");
getAvailableVals(AV)[BB] = V;
@@ -92,7 +91,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// Otherwise, we have the hard case. Get the live-in values for each
// predecessor.
SmallVector<std::pair<BasicBlock*, Value*>, 8> PredValues;
- Value *SingularValue = 0;
+ Value *SingularValue = nullptr;
// We can get our predecessor info by walking the pred_iterator list, but it
// is relatively slow. If we already have PHI nodes in this block, walk one
@@ -107,7 +106,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
if (i == 0)
SingularValue = PredVal;
else if (PredVal != SingularValue)
- SingularValue = 0;
+ SingularValue = nullptr;
}
} else {
bool isFirstPred = true;
@@ -121,7 +120,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
SingularValue = PredVal;
isFirstPred = false;
} else if (PredVal != SingularValue)
- SingularValue = 0;
+ SingularValue = nullptr;
}
}
@@ -130,7 +129,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
return UndefValue::get(ProtoType);
// Otherwise, if all the merged values are the same, just use it.
- if (SingularValue != 0)
+ if (SingularValue)
return SingularValue;
// Otherwise, we do need a PHI: check to see if we already have one available
@@ -293,7 +292,7 @@ public:
PHINode *PHI = ValueIsPHI(Val, Updater);
if (PHI && PHI->getNumIncomingValues() == 0)
return PHI;
- return 0;
+ return nullptr;
}
/// GetPHIValue - For the specified PHI instruction, return the value
@@ -403,7 +402,7 @@ run(const SmallVectorImpl<Instruction*> &Insts) const {
// the order of these instructions in the block. If the first use in the
// block is a load, then it uses the live in value. The last store defines
// the live out value. We handle this by doing a linear scan of the block.
- Value *StoredValue = 0;
+ Value *StoredValue = nullptr;
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
if (LoadInst *L = dyn_cast<LoadInst>(II)) {
// If this is a load from an unrelated pointer, ignore it.
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index ff50b12..24bb63b 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "simplifycfg"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
@@ -23,6 +22,8 @@
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@@ -34,14 +35,12 @@
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/NoFolder.h"
-#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <algorithm>
@@ -50,6 +49,8 @@
using namespace llvm;
using namespace PatternMatch;
+#define DEBUG_TYPE "simplifycfg"
+
static cl::opt<unsigned>
PHINodeFoldingThreshold("phi-node-folding-threshold", cl::Hidden, cl::init(1),
cl::desc("Control the amount of phi node folding to perform (default = 1)"));
@@ -62,12 +63,13 @@ static cl::opt<bool>
SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
cl::desc("Sink common instructions down to the end block"));
-static cl::opt<bool>
-HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
- cl::desc("Hoist conditional stores if an unconditional store preceeds"));
+static cl::opt<bool> HoistCondStores(
+ "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
+ cl::desc("Hoist conditional stores if an unconditional store precedes"));
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables");
+STATISTIC(NumLookupTablesHoles, "Number of switch instructions turned into lookup tables (holes checked)");
STATISTIC(NumSinkCommons, "Number of common instructions sunk down to the end block");
STATISTIC(NumSpeculations, "Number of speculative executed instructions");
@@ -90,7 +92,7 @@ namespace {
class SimplifyCFGOpt {
const TargetTransformInfo &TTI;
- const DataLayout *const TD;
+ const DataLayout *const DL;
Value *isValueEqualityComparison(TerminatorInst *TI);
BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
std::vector<ValueEqualityComparisonCase> &Cases);
@@ -109,8 +111,8 @@ class SimplifyCFGOpt {
bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder);
public:
- SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *TD)
- : TTI(TTI), TD(TD) {}
+ SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *DL)
+ : TTI(TTI), DL(DL) {}
bool run(BasicBlock *BB);
};
}
@@ -199,8 +201,8 @@ static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
/// ComputeSpeculationCost - Compute an abstract "cost" of speculating the
/// given instruction, which is assumed to be safe to speculate. 1 means
/// cheap, 2 means less cheap, and UINT_MAX means prohibitively expensive.
-static unsigned ComputeSpeculationCost(const User *I) {
- assert(isSafeToSpeculativelyExecute(I) &&
+static unsigned ComputeSpeculationCost(const User *I, const DataLayout *DL) {
+ assert(isSafeToSpeculativelyExecute(I, DL) &&
"Instruction is not safe to speculatively execute!");
switch (Operator::getOpcode(I)) {
default:
@@ -211,6 +213,7 @@ static unsigned ComputeSpeculationCost(const User *I) {
if (!cast<GEPOperator>(I)->hasAllConstantIndices())
return UINT_MAX;
return 1;
+ case Instruction::ExtractValue:
case Instruction::Load:
case Instruction::Add:
case Instruction::Sub:
@@ -224,6 +227,9 @@ static unsigned ComputeSpeculationCost(const User *I) {
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
+ case Instruction::BitCast:
+ case Instruction::ExtractElement:
+ case Instruction::InsertElement:
return 1; // These are all cheap.
case Instruction::Call:
@@ -251,7 +257,8 @@ static unsigned ComputeSpeculationCost(const User *I) {
/// CostRemaining, false is returned and CostRemaining is undefined.
static bool DominatesMergePoint(Value *V, BasicBlock *BB,
SmallPtrSet<Instruction*, 4> *AggressiveInsts,
- unsigned &CostRemaining) {
+ unsigned &CostRemaining,
+ const DataLayout *DL) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
// Non-instructions all dominate instructions, but not all constantexprs
@@ -271,12 +278,12 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// branch to BB, then it must be in the 'conditional' part of the "if
// statement". If not, it definitely dominates the region.
BranchInst *BI = dyn_cast<BranchInst>(PBB->getTerminator());
- if (BI == 0 || BI->isConditional() || BI->getSuccessor(0) != BB)
+ if (!BI || BI->isConditional() || BI->getSuccessor(0) != BB)
return true;
// If we aren't allowing aggressive promotion anymore, then don't consider
// instructions in the 'if region'.
- if (AggressiveInsts == 0) return false;
+ if (!AggressiveInsts) return false;
// If we have seen this instruction before, don't count it again.
if (AggressiveInsts->count(I)) return true;
@@ -284,10 +291,10 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// Okay, it looks like the instruction IS in the "condition". Check to
// see if it's a cheap instruction to unconditionally compute, and if it
// only uses stuff defined outside of the condition. If so, hoist it out.
- if (!isSafeToSpeculativelyExecute(I))
+ if (!isSafeToSpeculativelyExecute(I, DL))
return false;
- unsigned Cost = ComputeSpeculationCost(I);
+ unsigned Cost = ComputeSpeculationCost(I, DL);
if (Cost > CostRemaining)
return false;
@@ -297,7 +304,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// Okay, we can only really hoist these out if their operands do
// not take us over the cost threshold.
for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
- if (!DominatesMergePoint(*i, BB, AggressiveInsts, CostRemaining))
+ if (!DominatesMergePoint(*i, BB, AggressiveInsts, CostRemaining, DL))
return false;
// Okay, it's safe to do this! Remember this instruction.
AggressiveInsts->insert(I);
@@ -306,15 +313,15 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
/// and PointerNullValue. Return NULL if value is not a constant int.
-static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
+static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) {
// Normal constant int.
ConstantInt *CI = dyn_cast<ConstantInt>(V);
- if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy())
+ if (CI || !DL || !isa<Constant>(V) || !V->getType()->isPointerTy())
return CI;
// This is some kind of pointer constant. Turn it into a pointer-sized
// ConstantInt if possible.
- IntegerType *PtrTy = cast<IntegerType>(TD->getIntPtrType(V->getType()));
+ IntegerType *PtrTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));
// Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
if (isa<ConstantPointerNull>(V))
@@ -331,7 +338,7 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
return cast<ConstantInt>
(ConstantExpr::getIntegerCast(CI, PtrTy, /*isSigned=*/false));
}
- return 0;
+ return nullptr;
}
/// GatherConstantCompares - Given a potentially 'or'd or 'and'd together
@@ -340,13 +347,13 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
/// Values vector.
static Value *
GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
- const DataLayout *TD, bool isEQ, unsigned &UsedICmps) {
+ const DataLayout *DL, bool isEQ, unsigned &UsedICmps) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return 0;
+ if (!I) return nullptr;
// If this is an icmp against a constant, handle this as one of the cases.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
- if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) {
+ if (ConstantInt *C = GetConstantInt(I->getOperand(1), DL)) {
Value *RHSVal;
ConstantInt *RHSC;
@@ -389,27 +396,27 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
// If there are a ton of values, we don't want to make a ginormous switch.
if (Span.getSetSize().ugt(8) || Span.isEmptySet())
- return 0;
+ return nullptr;
for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp)
Vals.push_back(ConstantInt::get(V->getContext(), Tmp));
UsedICmps++;
return hasAdd ? RHSVal : I->getOperand(0);
}
- return 0;
+ return nullptr;
}
// Otherwise, we can only handle an | or &, depending on isEQ.
if (I->getOpcode() != (isEQ ? Instruction::Or : Instruction::And))
- return 0;
+ return nullptr;
unsigned NumValsBeforeLHS = Vals.size();
unsigned UsedICmpsBeforeLHS = UsedICmps;
- if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD,
+ if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, DL,
isEQ, UsedICmps)) {
unsigned NumVals = Vals.size();
unsigned UsedICmpsBeforeRHS = UsedICmps;
- if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,
isEQ, UsedICmps)) {
if (LHS == RHS)
return LHS;
@@ -419,33 +426,33 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
// The RHS of the or/and can't be folded in and we haven't used "Extra" yet,
// set it and return success.
- if (Extra == 0 || Extra == I->getOperand(1)) {
+ if (Extra == nullptr || Extra == I->getOperand(1)) {
Extra = I->getOperand(1);
return LHS;
}
Vals.resize(NumValsBeforeLHS);
UsedICmps = UsedICmpsBeforeLHS;
- return 0;
+ return nullptr;
}
// If the LHS can't be folded in, but Extra is available and RHS can, try to
// use LHS as Extra.
- if (Extra == 0 || Extra == I->getOperand(0)) {
+ if (Extra == nullptr || Extra == I->getOperand(0)) {
Value *OldExtra = Extra;
Extra = I->getOperand(0);
- if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,
isEQ, UsedICmps))
return RHS;
assert(Vals.size() == NumValsBeforeLHS);
Extra = OldExtra;
}
- return 0;
+ return nullptr;
}
static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
- Instruction *Cond = 0;
+ Instruction *Cond = nullptr;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
Cond = dyn_cast<Instruction>(SI->getCondition());
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
@@ -462,7 +469,7 @@ static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
/// isValueEqualityComparison - Return true if the specified terminator checks
/// to see if a value is equal to constant integer value.
Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
- Value *CV = 0;
+ Value *CV = nullptr;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
// Do not permit merging of large switch instructions into their
// predecessors unless there is only one predecessor.
@@ -472,14 +479,14 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI))
if (BI->isConditional() && BI->getCondition()->hasOneUse())
if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
- if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), TD))
+ if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL))
CV = ICI->getOperand(0);
// Unwrap any lossless ptrtoint cast.
- if (TD && CV) {
+ if (DL && CV) {
if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) {
Value *Ptr = PTII->getPointerOperand();
- if (PTII->getType() == TD->getIntPtrType(Ptr->getType()))
+ if (PTII->getType() == DL->getIntPtrType(Ptr->getType()))
CV = Ptr;
}
}
@@ -504,7 +511,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE);
Cases.push_back(ValueEqualityComparisonCase(GetConstantInt(ICI->getOperand(1),
- TD),
+ DL),
Succ));
return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
}
@@ -652,11 +659,11 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// Otherwise, TI's block must correspond to some matched value. Find out
// which value (or set of values) this is.
- ConstantInt *TIV = 0;
+ ConstantInt *TIV = nullptr;
BasicBlock *TIBB = TI->getParent();
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
if (PredCases[i].Dest == TIBB) {
- if (TIV != 0)
+ if (TIV)
return false; // Cannot handle multiple values coming to this block.
TIV = PredCases[i].Value;
}
@@ -664,7 +671,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// Okay, we found the one constant that our value can be if we get into TI's
// BB. Find out which successor will unconditionally be branched to.
- BasicBlock *TheRealDest = 0;
+ BasicBlock *TheRealDest = nullptr;
for (unsigned i = 0, e = ThisCases.size(); i != e; ++i)
if (ThisCases[i].Value == TIV) {
TheRealDest = ThisCases[i].Dest;
@@ -672,7 +679,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
}
// If not handled by any explicit cases, it is handled by the default case.
- if (TheRealDest == 0) TheRealDest = ThisDef;
+ if (!TheRealDest) TheRealDest = ThisDef;
// Remove PHI node entries for dead edges.
BasicBlock *CheckEdge = TheRealDest;
@@ -680,7 +687,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
if (*SI != CheckEdge)
(*SI)->removePredecessor(TIBB);
else
- CheckEdge = 0;
+ CheckEdge = nullptr;
// Insert the new branch.
Instruction *NI = Builder.CreateBr(TheRealDest);
@@ -732,8 +739,7 @@ static void GetBranchWeights(TerminatorInst *TI,
MDNode* MD = TI->getMetadata(LLVMContext::MD_prof);
assert(MD);
for (unsigned i = 1, e = MD->getNumOperands(); i < e; ++i) {
- ConstantInt* CI = dyn_cast<ConstantInt>(MD->getOperand(i));
- assert(CI);
+ ConstantInt *CI = cast<ConstantInt>(MD->getOperand(i));
Weights.push_back(CI->getValue().getZExtValue());
}
@@ -748,21 +754,14 @@ static void GetBranchWeights(TerminatorInst *TI,
}
}
-/// Sees if any of the weights are too big for a uint32_t, and halves all the
-/// weights if any are.
+/// Keep halving the weights until all can fit in uint32_t.
static void FitWeights(MutableArrayRef<uint64_t> Weights) {
- bool Halve = false;
- for (unsigned i = 0; i < Weights.size(); ++i)
- if (Weights[i] > UINT_MAX) {
- Halve = true;
- break;
- }
-
- if (! Halve)
- return;
-
- for (unsigned i = 0; i < Weights.size(); ++i)
- Weights[i] /= 2;
+ uint64_t Max = *std::max_element(Weights.begin(), Weights.end());
+ if (Max > UINT_MAX) {
+ unsigned Offset = 32 - countLeadingZeros(Max);
+ for (uint64_t &I : Weights)
+ I >>= Offset;
+ }
}
/// FoldValueComparisonIntoPredecessors - The specified terminator is a value
@@ -929,8 +928,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
Builder.SetInsertPoint(PTI);
// Convert pointer to int before we switch.
if (CV->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without DataLayout");
- CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
+ assert(DL && "Cannot switch on pointer without DataLayout");
+ CV = Builder.CreatePtrToInt(CV, DL->getIntPtrType(CV->getType()),
"magicptr");
}
@@ -957,10 +956,10 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// Okay, last check. If BB is still a successor of PSI, then we must
// have an infinite loop case. If so, add an infinitely looping block
// to handle the case to preserve the behavior of the code.
- BasicBlock *InfLoopBlock = 0;
+ BasicBlock *InfLoopBlock = nullptr;
for (unsigned i = 0, e = NewSI->getNumSuccessors(); i != e; ++i)
if (NewSI->getSuccessor(i) == BB) {
- if (InfLoopBlock == 0) {
+ if (!InfLoopBlock) {
// Insert it at the end of the function, because it's either code,
// or it won't matter if it's hot. :)
InfLoopBlock = BasicBlock::Create(BB->getContext(),
@@ -999,7 +998,7 @@ static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2,
/// HoistThenElseCodeToIf - Given a conditional branch that goes to BB1 and
/// BB2, hoist any common code in the two blocks up into the branch block. The
/// caller of this function guarantees that BI's block dominates BB1 and BB2.
-static bool HoistThenElseCodeToIf(BranchInst *BI) {
+static bool HoistThenElseCodeToIf(BranchInst *BI, const DataLayout *DL) {
// This does very trivial matching, with limited scanning, to find identical
// instructions in the two blocks. In particular, we don't want to get into
// O(M*N) situations here where M and N are the sizes of BB1 and BB2. As
@@ -1073,9 +1072,9 @@ HoistTerminator:
if (BB1V == BB2V)
continue;
- if (isa<ConstantExpr>(BB1V) && !isSafeToSpeculativelyExecute(BB1V))
+ if (isa<ConstantExpr>(BB1V) && !isSafeToSpeculativelyExecute(BB1V, DL))
return Changed;
- if (isa<ConstantExpr>(BB2V) && !isSafeToSpeculativelyExecute(BB2V))
+ if (isa<ConstantExpr>(BB2V) && !isSafeToSpeculativelyExecute(BB2V, DL))
return Changed;
}
}
@@ -1106,7 +1105,7 @@ HoistTerminator:
// These values do not agree. Insert a select instruction before NT
// that determines the right value.
SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
- if (SI == 0)
+ if (!SI)
SI = cast<SelectInst>
(Builder.CreateSelect(BI->getCondition(), BB1V, BB2V,
BB1V->getName()+"."+BB2V->getName()));
@@ -1151,7 +1150,7 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) {
// Gather the PHI nodes in BBEnd.
std::map<Value*, std::pair<Value*, PHINode*> > MapValueFromBB1ToBB2;
- Instruction *FirstNonPhiInBBEnd = 0;
+ Instruction *FirstNonPhiInBBEnd = nullptr;
for (BasicBlock::iterator I = BBEnd->begin(), E = BBEnd->end();
I != E; ++I) {
if (PHINode *PN = dyn_cast<PHINode>(I)) {
@@ -1229,7 +1228,7 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) {
// The operands should be either the same or they need to be generated
// with a PHI node after sinking. We only handle the case where there is
// a single pair of different operands.
- Value *DifferentOp1 = 0, *DifferentOp2 = 0;
+ Value *DifferentOp1 = nullptr, *DifferentOp2 = nullptr;
unsigned Op1Idx = 0;
for (unsigned I = 0, E = I1->getNumOperands(); I != E; ++I) {
if (I1->getOperand(I) == I2->getOperand(I))
@@ -1325,11 +1324,11 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
BasicBlock *StoreBB, BasicBlock *EndBB) {
StoreInst *StoreToHoist = dyn_cast<StoreInst>(I);
if (!StoreToHoist)
- return 0;
+ return nullptr;
// Volatile or atomic.
if (!StoreToHoist->isSimple())
- return 0;
+ return nullptr;
Value *StorePtr = StoreToHoist->getPointerOperand();
@@ -1341,7 +1340,7 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
// Could be calling an instruction that effects memory like free().
if (CurI->mayHaveSideEffects() && !isa<StoreInst>(CurI))
- return 0;
+ return nullptr;
StoreInst *SI = dyn_cast<StoreInst>(CurI);
// Found the previous store make sure it stores to the same location.
@@ -1349,10 +1348,10 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
// Found the previous store, return its value operand.
return SI->getValueOperand();
else if (SI)
- return 0; // Unknown store.
+ return nullptr; // Unknown store.
}
- return 0;
+ return nullptr;
}
/// \brief Speculate a conditional basic block flattening the CFG.
@@ -1392,7 +1391,8 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
/// \endcode
///
/// \returns true if the conditional block is removed.
-static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
+static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
+ const DataLayout *DL) {
// Be conservative for now. FP select instruction can often be expensive.
Value *BrCond = BI->getCondition();
if (isa<FCmpInst>(BrCond))
@@ -1418,10 +1418,10 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
SmallDenseMap<Instruction *, unsigned, 4> SinkCandidateUseCounts;
unsigned SpeculationCost = 0;
- Value *SpeculatedStoreValue = 0;
- StoreInst *SpeculatedStore = 0;
+ Value *SpeculatedStoreValue = nullptr;
+ StoreInst *SpeculatedStore = nullptr;
for (BasicBlock::iterator BBI = ThenBB->begin(),
- BBE = llvm::prior(ThenBB->end());
+ BBE = std::prev(ThenBB->end());
BBI != BBE; ++BBI) {
Instruction *I = BBI;
// Skip debug info.
@@ -1435,13 +1435,13 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
return false;
// Don't hoist the instruction if it's unsafe or expensive.
- if (!isSafeToSpeculativelyExecute(I) &&
+ if (!isSafeToSpeculativelyExecute(I, DL) &&
!(HoistCondStores &&
(SpeculatedStoreValue = isSafeToSpeculateStore(I, BB, ThenBB,
EndBB))))
return false;
if (!SpeculatedStoreValue &&
- ComputeSpeculationCost(I) > PHINodeFoldingThreshold)
+ ComputeSpeculationCost(I, DL) > PHINodeFoldingThreshold)
return false;
// Store the store speculation candidate.
@@ -1492,11 +1492,11 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
if (!OrigCE && !ThenCE)
continue; // Known safe and cheap.
- if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE)) ||
- (OrigCE && !isSafeToSpeculativelyExecute(OrigCE)))
+ if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE, DL)) ||
+ (OrigCE && !isSafeToSpeculativelyExecute(OrigCE, DL)))
return false;
- unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE) : 0;
- unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE) : 0;
+ unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE, DL) : 0;
+ unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE, DL) : 0;
if (OrigCost + ThenCost > 2 * PHINodeFoldingThreshold)
return false;
@@ -1531,7 +1531,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
// Hoist the instructions.
BB->getInstList().splice(BI, ThenBB->getInstList(), ThenBB->begin(),
- llvm::prior(ThenBB->end()));
+ std::prev(ThenBB->end()));
// Insert selects and rewrite the PHI operands.
IRBuilder<true, NoFolder> Builder(BI);
@@ -1589,10 +1589,9 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
// We can only support instructions that do not define values that are
// live outside of the current basic block.
- for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end();
- UI != E; ++UI) {
- Instruction *U = cast<Instruction>(*UI);
- if (U->getParent() != BB || isa<PHINode>(U)) return false;
+ for (User *U : BBI->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ if (UI->getParent() != BB || isa<PHINode>(UI)) return false;
}
// Looks ok, continue checking.
@@ -1605,7 +1604,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
/// that is defined in the same block as the branch and if any PHI entries are
/// constants, thread edges corresponding to that entry to be branches to their
/// ultimate destination.
-static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
+static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) {
BasicBlock *BB = BI->getParent();
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
// NOTE: we currently cannot transform this case if the PHI node is used
@@ -1628,7 +1627,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
// constants.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantInt *CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i));
- if (CB == 0 || !CB->getType()->isIntegerTy(1)) continue;
+ if (!CB || !CB->getType()->isIntegerTy(1)) continue;
// Okay, we now know that all edges from PredBB should be revectored to
// branch to RealDest.
@@ -1674,7 +1673,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
}
// Check for trivial simplification.
- if (Value *V = SimplifyInstruction(N, TD)) {
+ if (Value *V = SimplifyInstruction(N, DL)) {
TranslateMap[BBI] = V;
delete N; // Instruction folded away, don't need actual inst
} else {
@@ -1695,7 +1694,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
}
// Recurse, simplifying any other constants.
- return FoldCondBranchOnPHI(BI, TD) | true;
+ return FoldCondBranchOnPHI(BI, DL) | true;
}
return false;
@@ -1703,7 +1702,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
/// PHI node, see if we can eliminate it.
-static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
+static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
// Ok, this is a two entry PHI node. Check to see if this is a simple "if
// statement", which has a very simple dominance structure. Basically, we
// are trying to find the condition that is being branched on, which
@@ -1737,23 +1736,23 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
PHINode *PN = cast<PHINode>(II++);
- if (Value *V = SimplifyInstruction(PN, TD)) {
+ if (Value *V = SimplifyInstruction(PN, DL)) {
PN->replaceAllUsesWith(V);
PN->eraseFromParent();
continue;
}
if (!DominatesMergePoint(PN->getIncomingValue(0), BB, &AggressiveInsts,
- MaxCostVal0) ||
+ MaxCostVal0, DL) ||
!DominatesMergePoint(PN->getIncomingValue(1), BB, &AggressiveInsts,
- MaxCostVal1))
+ MaxCostVal1, DL))
return false;
}
// If we folded the first phi, PN dangles at this point. Refresh it. If
// we ran out of PHIs then we simplified them all.
PN = dyn_cast<PHINode>(BB->begin());
- if (PN == 0) return true;
+ if (!PN) return true;
// Don't fold i1 branches on PHIs which contain binary operators. These can
// often be turned into switches and other things.
@@ -1767,11 +1766,11 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
// instructions in the predecessor blocks can be promoted as well. If
// not, we won't be able to get rid of the control flow, so it's not
// worth promoting to select instructions.
- BasicBlock *DomBlock = 0;
+ BasicBlock *DomBlock = nullptr;
BasicBlock *IfBlock1 = PN->getIncomingBlock(0);
BasicBlock *IfBlock2 = PN->getIncomingBlock(1);
if (cast<BranchInst>(IfBlock1->getTerminator())->isConditional()) {
- IfBlock1 = 0;
+ IfBlock1 = nullptr;
} else {
DomBlock = *pred_begin(IfBlock1);
for (BasicBlock::iterator I = IfBlock1->begin();!isa<TerminatorInst>(I);++I)
@@ -1784,7 +1783,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
}
if (cast<BranchInst>(IfBlock2->getTerminator())->isConditional()) {
- IfBlock2 = 0;
+ IfBlock2 = nullptr;
} else {
DomBlock = *pred_begin(IfBlock2);
for (BasicBlock::iterator I = IfBlock2->begin();!isa<TerminatorInst>(I);++I)
@@ -1964,10 +1963,10 @@ static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) {
/// FoldBranchToCommonDest - If this basic block is simple enough, and if a
/// predecessor branches to us and one of our successors, fold the block into
/// the predecessor and use logical operations to pick the right destination.
-bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
+bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) {
BasicBlock *BB = BI->getParent();
- Instruction *Cond = 0;
+ Instruction *Cond = nullptr;
if (BI->isConditional())
Cond = dyn_cast<Instruction>(BI->getCondition());
else {
@@ -1993,12 +1992,12 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
}
}
- if (Cond == 0)
+ if (!Cond)
return false;
}
- if (Cond == 0 || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
- Cond->getParent() != BB || !Cond->hasOneUse())
+ if (!Cond || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
+ Cond->getParent() != BB || !Cond->hasOneUse())
return false;
// Only allow this if the condition is a simple instruction that can be
@@ -2013,10 +2012,10 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// that feeds the branch. We later ensure that any values that _it_ uses
// were also live in the predecessor, so that we don't unnecessarily create
// register pressure or inhibit out-of-order execution.
- Instruction *BonusInst = 0;
+ Instruction *BonusInst = nullptr;
if (&*FrontIt != Cond &&
- FrontIt->hasOneUse() && *FrontIt->use_begin() == Cond &&
- isSafeToSpeculativelyExecute(FrontIt)) {
+ FrontIt->hasOneUse() && FrontIt->user_back() == Cond &&
+ isSafeToSpeculativelyExecute(FrontIt, DL)) {
BonusInst = &*FrontIt;
++FrontIt;
@@ -2031,7 +2030,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Make sure the instruction after the condition is the cond branch.
BasicBlock::iterator CondIt = Cond; ++CondIt;
- // Ingore dbg intrinsics.
+ // Ignore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(CondIt)) ++CondIt;
if (&*CondIt != BI)
@@ -2048,7 +2047,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Finally, don't infinitely unroll conditional loops.
BasicBlock *TrueDest = BI->getSuccessor(0);
- BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : 0;
+ BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : nullptr;
if (TrueDest == BB || FalseDest == BB)
return false;
@@ -2060,7 +2059,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// the common successor, verify that the same value flows in from both
// blocks.
SmallVector<PHINode*, 4> PHIs;
- if (PBI == 0 || PBI->isUnconditional() ||
+ if (!PBI || PBI->isUnconditional() ||
(BI->isConditional() &&
!SafeToMergeTerminators(BI, PBI)) ||
(!BI->isConditional() &&
@@ -2094,7 +2093,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// instructions that are used by the terminator's condition because it
// exposes more merging opportunities.
bool UsedByBranch = (BonusInst && BonusInst->hasOneUse() &&
- *BonusInst->use_begin() == Cond);
+ BonusInst->user_back() == Cond);
if (BonusInst && !UsedByBranch) {
// Collect the values used by the bonus inst
@@ -2150,9 +2149,17 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
}
// If we have a bonus inst, clone it into the predecessor block.
- Instruction *NewBonus = 0;
+ Instruction *NewBonus = nullptr;
if (BonusInst) {
NewBonus = BonusInst->clone();
+
+ // If we moved a load, we cannot any longer claim any knowledge about
+ // its potential value. The previous information might have been valid
+ // only given the branch precondition.
+ // For an analogous reason, we must also drop all the metadata whose
+ // semantics we don't understand.
+ NewBonus->dropUnknownMetadata(LLVMContext::MD_dbg);
+
PredBlock->getInstList().insert(PBI, NewBonus);
NewBonus->takeName(BonusInst);
BonusInst->setName(BonusInst->getName()+".old");
@@ -2218,14 +2225,14 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
MDBuilder(BI->getContext()).
createBranchWeights(MDWeights));
} else
- PBI->setMetadata(LLVMContext::MD_prof, NULL);
+ PBI->setMetadata(LLVMContext::MD_prof, nullptr);
} else {
// Update PHI nodes in the common successors.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
ConstantInt *PBI_C = cast<ConstantInt>(
PHIs[i]->getIncomingValueForBlock(PBI->getParent()));
assert(PBI_C->getType()->isIntegerTy(1));
- Instruction *MergedCond = 0;
+ Instruction *MergedCond = nullptr;
if (PBI->getSuccessor(0) == TrueDest) {
// Create (PBI_Cond and PBI_C) or (!PBI_Cond and BI_Value)
// PBI_C is true: PBI_Cond or (!PBI_Cond and BI_Value)
@@ -2338,7 +2345,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
}
// If this is a conditional branch in an empty block, and if any
- // predecessors is a conditional branch to one of our destinations,
+ // predecessors are a conditional branch to one of our destinations,
// fold the conditions into logical ops and one cond br.
BasicBlock::iterator BBI = BB->begin();
// Ignore dbg intrinsics.
@@ -2373,16 +2380,33 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Do not perform this transformation if it would require
// insertion of a large number of select instructions. For targets
// without predication/cmovs, this is a big pessimization.
- BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
+ // Also do not perform this transformation if any phi node in the common
+ // destination block can trap when reached by BB or PBB (PR17073). In that
+ // case, it would be unsafe to hoist the operation into a select instruction.
+
+ BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
unsigned NumPhis = 0;
for (BasicBlock::iterator II = CommonDest->begin();
- isa<PHINode>(II); ++II, ++NumPhis)
+ isa<PHINode>(II); ++II, ++NumPhis) {
if (NumPhis > 2) // Disable this xform.
return false;
+ PHINode *PN = cast<PHINode>(II);
+ Value *BIV = PN->getIncomingValueForBlock(BB);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(BIV))
+ if (CE->canTrap())
+ return false;
+
+ unsigned PBBIdx = PN->getBasicBlockIndex(PBI->getParent());
+ Value *PBIV = PN->getIncomingValue(PBBIdx);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(PBIV))
+ if (CE->canTrap())
+ return false;
+ }
+
// Finally, if everything is ok, fold the branches to logical ops.
- BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
+ BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()
<< "AND: " << *BI->getParent());
@@ -2498,16 +2522,16 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
// If TrueBB and FalseBB are equal, only try to preserve one copy of that
// successor.
BasicBlock *KeepEdge1 = TrueBB;
- BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : 0;
+ BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : nullptr;
// Then remove the rest.
for (unsigned I = 0, E = OldTerm->getNumSuccessors(); I != E; ++I) {
BasicBlock *Succ = OldTerm->getSuccessor(I);
// Make sure only to keep exactly one copy of each edge.
if (Succ == KeepEdge1)
- KeepEdge1 = 0;
+ KeepEdge1 = nullptr;
else if (Succ == KeepEdge2)
- KeepEdge2 = 0;
+ KeepEdge2 = nullptr;
else
Succ->removePredecessor(OldTerm->getParent());
}
@@ -2516,7 +2540,7 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
Builder.SetCurrentDebugLocation(OldTerm->getDebugLoc());
// Insert an appropriate new terminator.
- if ((KeepEdge1 == 0) && (KeepEdge2 == 0)) {
+ if (!KeepEdge1 && !KeepEdge2) {
if (TrueBB == FalseBB)
// We were only looking for one successor, and it was present.
// Create an unconditional branch to it.
@@ -2538,7 +2562,7 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
// One of the selected values was a successor, but the other wasn't.
// Insert an unconditional branch to the one that was found;
// the edge to the one that wasn't must be unreachable.
- if (KeepEdge1 == 0)
+ if (!KeepEdge1)
// Only TrueBB was found.
Builder.CreateBr(TrueBB);
else
@@ -2625,7 +2649,7 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) {
/// the PHI, merging the third icmp into the switch.
static bool TryToSimplifyUncondBranchWithICmpInIt(
ICmpInst *ICI, IRBuilder<> &Builder, const TargetTransformInfo &TTI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
BasicBlock *BB = ICI->getParent();
// If the block has any PHIs in it or the icmp has multiple uses, it is too
@@ -2639,7 +2663,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
// 'V' and this block is the default case for the switch. In this case we can
// fold the compared value into the switch to simplify things.
BasicBlock *Pred = BB->getSinglePredecessor();
- if (Pred == 0 || !isa<SwitchInst>(Pred->getTerminator())) return false;
+ if (!Pred || !isa<SwitchInst>(Pred->getTerminator())) return false;
SwitchInst *SI = cast<SwitchInst>(Pred->getTerminator());
if (SI->getCondition() != V)
@@ -2653,12 +2677,12 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
assert(VVal && "Should have a unique destination value");
ICI->setOperand(0, VVal);
- if (Value *V = SimplifyInstruction(ICI, TD)) {
+ if (Value *V = SimplifyInstruction(ICI, DL)) {
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
}
// BB is now empty, so it is likely to simplify away.
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// Ok, the block is reachable from the default dest. If the constant we're
@@ -2674,14 +2698,14 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
// BB is now empty, so it is likely to simplify away.
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// The use of the icmp has to be in the 'end' block, by the only PHI node in
// the block.
BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
- PHINode *PHIUse = dyn_cast<PHINode>(ICI->use_back());
- if (PHIUse == 0 || PHIUse != &SuccBlock->front() ||
+ PHINode *PHIUse = dyn_cast<PHINode>(ICI->user_back());
+ if (PHIUse == nullptr || PHIUse != &SuccBlock->front() ||
isa<PHINode>(++BasicBlock::iterator(PHIUse)))
return false;
@@ -2730,32 +2754,32 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.
/// Check to see if it is branching on an or/and chain of icmp instructions, and
/// fold it into a switch instruction if so.
-static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
+static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL,
IRBuilder<> &Builder) {
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
- if (Cond == 0) return false;
+ if (!Cond) return false;
// Change br (X == 0 | X == 1), T, F into a switch instruction.
// If this is a bunch of seteq's or'd together, or if it's a bunch of
// 'setne's and'ed together, collect them.
- Value *CompVal = 0;
+ Value *CompVal = nullptr;
std::vector<ConstantInt*> Values;
bool TrueWhenEqual = true;
- Value *ExtraCase = 0;
+ Value *ExtraCase = nullptr;
unsigned UsedICmps = 0;
if (Cond->getOpcode() == Instruction::Or) {
- CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true,
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, true,
UsedICmps);
} else if (Cond->getOpcode() == Instruction::And) {
- CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, false,
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, false,
UsedICmps);
TrueWhenEqual = false;
}
// If we didn't have a multiply compared value, fail.
- if (CompVal == 0) return false;
+ if (!CompVal) return false;
// Avoid turning single icmps into a switch.
if (UsedICmps <= 1)
@@ -2811,9 +2835,9 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
Builder.SetInsertPoint(BI);
// Convert pointer to int before we switch.
if (CompVal->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without DataLayout");
+ assert(DL && "Cannot switch on pointer without DataLayout");
CompVal = Builder.CreatePtrToInt(CompVal,
- TD->getIntPtrType(CompVal->getType()),
+ DL->getIntPtrType(CompVal->getType()),
"magicptr");
}
@@ -3050,7 +3074,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
// Find the most popular block.
unsigned MaxPop = 0;
unsigned MaxIndex = 0;
- BasicBlock *MaxBlock = 0;
+ BasicBlock *MaxBlock = nullptr;
for (std::map<BasicBlock*, std::pair<unsigned, unsigned> >::iterator
I = Popularity.begin(), E = Popularity.end(); I != E; ++I) {
if (I->second.first > MaxPop ||
@@ -3188,7 +3212,7 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI) {
Value *Cond = SI->getCondition();
unsigned Bits = Cond->getType()->getIntegerBitWidth();
APInt KnownZero(Bits, 0), KnownOne(Bits, 0);
- ComputeMaskedBits(Cond, KnownZero, KnownOne);
+ computeKnownBits(Cond, KnownZero, KnownOne);
// Gather dead cases.
SmallVector<ConstantInt*, 8> DeadCases;
@@ -3222,7 +3246,7 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI) {
Case.getCaseSuccessor()->removePredecessor(SI->getParent());
SI->removeCase(Case);
}
- if (HasWeight) {
+ if (HasWeight && Weights.size() >= 2) {
SmallVector<uint32_t, 8> MDWeights(Weights.begin(), Weights.end());
SI->setMetadata(LLVMContext::MD_prof,
MDBuilder(SI->getParent()->getContext()).
@@ -3241,13 +3265,13 @@ static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue,
BasicBlock *BB,
int *PhiIndex) {
if (BB->getFirstNonPHIOrDbg() != BB->getTerminator())
- return NULL; // BB must be empty to be a candidate for simplification.
+ return nullptr; // BB must be empty to be a candidate for simplification.
if (!BB->getSinglePredecessor())
- return NULL; // BB must be dominated by the switch.
+ return nullptr; // BB must be dominated by the switch.
BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
if (!Branch || !Branch->isUnconditional())
- return NULL; // Terminator must be unconditional branch.
+ return nullptr; // Terminator must be unconditional branch.
BasicBlock *Succ = Branch->getSuccessor(0);
@@ -3263,7 +3287,7 @@ static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue,
return PHI;
}
- return NULL;
+ return nullptr;
}
/// ForwardSwitchConditionToPHI - Try to forward the condition of a switch
@@ -3306,6 +3330,11 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
/// ValidLookupTableConstant - Return true if the backend will be able to handle
/// initializing an array of constants like C.
static bool ValidLookupTableConstant(Constant *C) {
+ if (C->isThreadDependent())
+ return false;
+ if (C->isDLLImportDependent())
+ return false;
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
return CE->isGEPWithNoNotionalOverIndexing();
@@ -3336,12 +3365,12 @@ ConstantFold(Instruction *I,
if (SelectInst *Select = dyn_cast<SelectInst>(I)) {
Constant *A = LookupConstant(Select->getCondition(), ConstantPool);
if (!A)
- return 0;
+ return nullptr;
if (A->isAllOnesValue())
return LookupConstant(Select->getTrueValue(), ConstantPool);
if (A->isNullValue())
return LookupConstant(Select->getFalseValue(), ConstantPool);
- return 0;
+ return nullptr;
}
SmallVector<Constant *, 4> COps;
@@ -3349,7 +3378,7 @@ ConstantFold(Instruction *I,
if (Constant *A = LookupConstant(I->getOperand(N), ConstantPool))
COps.push_back(A);
else
- return 0;
+ return nullptr;
}
if (CmpInst *Cmp = dyn_cast<CmpInst>(I))
@@ -3428,7 +3457,7 @@ GetCaseResults(SwitchInst *SI,
Res.push_back(std::make_pair(PHI, ConstVal));
}
- return true;
+ return Res.size() > 0;
}
namespace {
@@ -3444,7 +3473,7 @@ namespace {
ConstantInt *Offset,
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
- const DataLayout *TD);
+ const DataLayout *DL);
/// BuildLookup - Build instructions with Builder to retrieve the value at
/// the position given by Index in the lookup table.
@@ -3452,7 +3481,7 @@ namespace {
/// WouldFitInRegister - Return true if a table with TableSize elements of
/// type ElementType would fit in a target-legal register.
- static bool WouldFitInRegister(const DataLayout *TD,
+ static bool WouldFitInRegister(const DataLayout *DL,
uint64_t TableSize,
const Type *ElementType);
@@ -3491,38 +3520,44 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
ConstantInt *Offset,
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
- const DataLayout *TD)
- : SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {
+ const DataLayout *DL)
+ : SingleValue(nullptr), BitMap(nullptr), BitMapElementTy(nullptr),
+ Array(nullptr) {
assert(Values.size() && "Can't build lookup table without values!");
assert(TableSize >= Values.size() && "Can't fit values in table!");
// If all values in the table are equal, this is that value.
SingleValue = Values.begin()->second;
+ Type *ValueType = Values.begin()->second->getType();
+
// Build up the table contents.
SmallVector<Constant*, 64> TableContents(TableSize);
for (size_t I = 0, E = Values.size(); I != E; ++I) {
ConstantInt *CaseVal = Values[I].first;
Constant *CaseRes = Values[I].second;
- assert(CaseRes->getType() == DefaultValue->getType());
+ assert(CaseRes->getType() == ValueType);
uint64_t Idx = (CaseVal->getValue() - Offset->getValue())
.getLimitedValue();
TableContents[Idx] = CaseRes;
if (CaseRes != SingleValue)
- SingleValue = 0;
+ SingleValue = nullptr;
}
// Fill in any holes in the table with the default result.
if (Values.size() < TableSize) {
+ assert(DefaultValue &&
+ "Need a default value to fill the lookup table holes.");
+ assert(DefaultValue->getType() == ValueType);
for (uint64_t I = 0; I < TableSize; ++I) {
if (!TableContents[I])
TableContents[I] = DefaultValue;
}
if (DefaultValue != SingleValue)
- SingleValue = 0;
+ SingleValue = nullptr;
}
// If each element in the table contains the same value, we only need to store
@@ -3533,8 +3568,8 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
}
// If the type is integer and the table fits in a register, build a bitmap.
- if (WouldFitInRegister(TD, TableSize, DefaultValue->getType())) {
- IntegerType *IT = cast<IntegerType>(DefaultValue->getType());
+ if (WouldFitInRegister(DL, TableSize, ValueType)) {
+ IntegerType *IT = cast<IntegerType>(ValueType);
APInt TableInt(TableSize * IT->getBitWidth(), 0);
for (uint64_t I = TableSize; I > 0; --I) {
TableInt <<= IT->getBitWidth();
@@ -3552,7 +3587,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
}
// Store the table in an array.
- ArrayType *ArrayTy = ArrayType::get(DefaultValue->getType(), TableSize);
+ ArrayType *ArrayTy = ArrayType::get(ValueType, TableSize);
Constant *Initializer = ConstantArray::get(ArrayTy, TableContents);
Array = new GlobalVariable(M, ArrayTy, /*constant=*/ true,
@@ -3589,6 +3624,16 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
"switch.masked");
}
case ArrayKind: {
+ // Make sure the table index will not overflow when treated as signed.
+ IntegerType *IT = cast<IntegerType>(Index->getType());
+ uint64_t TableSize = Array->getInitializer()->getType()
+ ->getArrayNumElements();
+ if (TableSize > (1ULL << (IT->getBitWidth() - 1)))
+ Index = Builder.CreateZExt(Index,
+ IntegerType::get(IT->getContext(),
+ IT->getBitWidth() + 1),
+ "switch.tableidx.zext");
+
Value *GEPIndices[] = { Builder.getInt32(0), Index };
Value *GEP = Builder.CreateInBoundsGEP(Array, GEPIndices,
"switch.gep");
@@ -3598,10 +3643,10 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
llvm_unreachable("Unknown lookup table kind!");
}
-bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
+bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL,
uint64_t TableSize,
const Type *ElementType) {
- if (!TD)
+ if (!DL)
return false;
const IntegerType *IT = dyn_cast<IntegerType>(ElementType);
if (!IT)
@@ -3612,7 +3657,7 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
// Avoid overflow, fitsInLegalInteger uses unsigned int for the width.
if (TableSize >= UINT_MAX/IT->getBitWidth())
return false;
- return TD->fitsInLegalInteger(TableSize * IT->getBitWidth());
+ return DL->fitsInLegalInteger(TableSize * IT->getBitWidth());
}
/// ShouldBuildLookupTable - Determine whether a lookup table should be built
@@ -3621,7 +3666,7 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
static bool ShouldBuildLookupTable(SwitchInst *SI,
uint64_t TableSize,
const TargetTransformInfo &TTI,
- const DataLayout *TD,
+ const DataLayout *DL,
const SmallDenseMap<PHINode*, Type*>& ResultTypes) {
if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10)
return false; // TableSize overflowed, or mul below might overflow.
@@ -3637,7 +3682,7 @@ static bool ShouldBuildLookupTable(SwitchInst *SI,
// Saturate this flag to false.
AllTablesFitInRegister = AllTablesFitInRegister &&
- SwitchLookupTable::WouldFitInRegister(TD, TableSize, Ty);
+ SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty);
// If both flags saturate, we're done. NOTE: This *only* works with
// saturating flags, and all flags have to saturate first due to the
@@ -3666,7 +3711,7 @@ static bool ShouldBuildLookupTable(SwitchInst *SI,
static bool SwitchToLookupTable(SwitchInst *SI,
IRBuilder<> &Builder,
const TargetTransformInfo &TTI,
- const DataLayout* TD) {
+ const DataLayout* DL) {
assert(SI->getNumCases() > 1 && "Degenerate switch?");
// Only build lookup table when we have a target that supports it.
@@ -3680,11 +3725,9 @@ static bool SwitchToLookupTable(SwitchInst *SI,
// GEP needs a runtime relocation in PIC code. We should just build one big
// string and lookup indices into that.
- // Ignore the switch if the number of cases is too small.
- // This is similar to the check when building jump tables in
- // SelectionDAGBuilder::handleJTSwitchCase.
- // FIXME: Determine the best cut-off.
- if (SI->getNumCases() < 4)
+ // Ignore switches with less than three cases. Lookup tables will not make them
+ // faster, so we don't analyze them.
+ if (SI->getNumCases() < 3)
return false;
// Figure out the corresponding result for each case value and phi node in the
@@ -3694,7 +3737,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
ConstantInt *MinCaseVal = CI.getCaseValue();
ConstantInt *MaxCaseVal = CI.getCaseValue();
- BasicBlock *CommonDest = 0;
+ BasicBlock *CommonDest = nullptr;
typedef SmallVector<std::pair<ConstantInt*, Constant*>, 4> ResultListTy;
SmallDenseMap<PHINode*, ResultListTy> ResultLists;
SmallDenseMap<PHINode*, Constant*> DefaultResults;
@@ -3712,7 +3755,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
typedef SmallVector<std::pair<PHINode*, Constant*>, 4> ResultsTy;
ResultsTy Results;
if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest,
- Results, TD))
+ Results, DL))
return false;
// Append the result from this case to the list for each phi.
@@ -3723,21 +3766,41 @@ static bool SwitchToLookupTable(SwitchInst *SI,
}
}
- // Get the resulting values for the default case.
+ // Keep track of the result types.
+ for (size_t I = 0, E = PHIs.size(); I != E; ++I) {
+ PHINode *PHI = PHIs[I];
+ ResultTypes[PHI] = ResultLists[PHI][0].second->getType();
+ }
+
+ uint64_t NumResults = ResultLists[PHIs[0]].size();
+ APInt RangeSpread = MaxCaseVal->getValue() - MinCaseVal->getValue();
+ uint64_t TableSize = RangeSpread.getLimitedValue() + 1;
+ bool TableHasHoles = (NumResults < TableSize);
+
+ // If the table has holes, we need a constant result for the default case
+ // or a bitmask that fits in a register.
SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList;
- if (!GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest,
- DefaultResultsList, TD))
- return false;
+ bool HasDefaultResults = false;
+ if (TableHasHoles) {
+ HasDefaultResults = GetCaseResults(SI, nullptr, SI->getDefaultDest(),
+ &CommonDest, DefaultResultsList, DL);
+ }
+ bool NeedMask = (TableHasHoles && !HasDefaultResults);
+ if (NeedMask) {
+ // As an extra penalty for the validity test we require more cases.
+ if (SI->getNumCases() < 4) // FIXME: Find best threshold value (benchmark).
+ return false;
+ if (!(DL && DL->fitsInLegalInteger(TableSize)))
+ return false;
+ }
+
for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) {
PHINode *PHI = DefaultResultsList[I].first;
Constant *Result = DefaultResultsList[I].second;
DefaultResults[PHI] = Result;
- ResultTypes[PHI] = Result->getType();
}
- APInt RangeSpread = MaxCaseVal->getValue() - MinCaseVal->getValue();
- uint64_t TableSize = RangeSpread.getLimitedValue() + 1;
- if (!ShouldBuildLookupTable(SI, TableSize, TTI, TD, ResultTypes))
+ if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes))
return false;
// Create the BB that does the lookups.
@@ -3755,7 +3818,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
// Compute the maximum table size representable by the integer type we are
// switching upon.
unsigned CaseSize = MinCaseVal->getType()->getPrimitiveSizeInBits();
- uint64_t MaxTableSize = CaseSize > 63? UINT64_MAX : 1ULL << CaseSize;
+ uint64_t MaxTableSize = CaseSize > 63 ? UINT64_MAX : 1ULL << CaseSize;
assert(MaxTableSize >= TableSize &&
"It is impossible for a switch to have more entries than the max "
"representable value of its input integer type's size.");
@@ -3770,25 +3833,67 @@ static bool SwitchToLookupTable(SwitchInst *SI,
SI->getDefaultDest()->removePredecessor(SI->getParent());
} else {
Value *Cmp = Builder.CreateICmpULT(TableIndex, ConstantInt::get(
- MinCaseVal->getType(), TableSize));
+ MinCaseVal->getType(), TableSize));
Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest());
}
// Populate the BB that does the lookups.
Builder.SetInsertPoint(LookupBB);
+
+ if (NeedMask) {
+ // Before doing the lookup we do the hole check.
+ // The LookupBB is therefore re-purposed to do the hole check
+ // and we create a new LookupBB.
+ BasicBlock *MaskBB = LookupBB;
+ MaskBB->setName("switch.hole_check");
+ LookupBB = BasicBlock::Create(Mod.getContext(),
+ "switch.lookup",
+ CommonDest->getParent(),
+ CommonDest);
+
+ // Build bitmask; fill in a 1 bit for every case.
+ APInt MaskInt(TableSize, 0);
+ APInt One(TableSize, 1);
+ const ResultListTy &ResultList = ResultLists[PHIs[0]];
+ for (size_t I = 0, E = ResultList.size(); I != E; ++I) {
+ uint64_t Idx = (ResultList[I].first->getValue() -
+ MinCaseVal->getValue()).getLimitedValue();
+ MaskInt |= One << Idx;
+ }
+ ConstantInt *TableMask = ConstantInt::get(Mod.getContext(), MaskInt);
+
+ // Get the TableIndex'th bit of the bitmask.
+ // If this bit is 0 (meaning hole) jump to the default destination,
+ // else continue with table lookup.
+ IntegerType *MapTy = TableMask->getType();
+ Value *MaskIndex = Builder.CreateZExtOrTrunc(TableIndex, MapTy,
+ "switch.maskindex");
+ Value *Shifted = Builder.CreateLShr(TableMask, MaskIndex,
+ "switch.shifted");
+ Value *LoBit = Builder.CreateTrunc(Shifted,
+ Type::getInt1Ty(Mod.getContext()),
+ "switch.lobit");
+ Builder.CreateCondBr(LoBit, LookupBB, SI->getDefaultDest());
+
+ Builder.SetInsertPoint(LookupBB);
+ AddPredecessorToBlock(SI->getDefaultDest(), MaskBB, SI->getParent());
+ }
+
bool ReturnedEarly = false;
for (size_t I = 0, E = PHIs.size(); I != E; ++I) {
PHINode *PHI = PHIs[I];
+ // If using a bitmask, use any value to fill the lookup table holes.
+ Constant *DV = NeedMask ? ResultLists[PHI][0].second : DefaultResults[PHI];
SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI],
- DefaultResults[PHI], TD);
+ DV, DL);
Value *Result = Table.BuildLookup(TableIndex, Builder);
// If the result is used to return immediately from the function, we want to
// do that right here.
- if (PHI->hasOneUse() && isa<ReturnInst>(*PHI->use_begin()) &&
- *PHI->use_begin() == CommonDest->getFirstNonPHIOrDbg()) {
+ if (PHI->hasOneUse() && isa<ReturnInst>(*PHI->user_begin()) &&
+ PHI->user_back() == CommonDest->getFirstNonPHIOrDbg()) {
Builder.CreateRet(Result);
ReturnedEarly = true;
break;
@@ -3811,6 +3916,8 @@ static bool SwitchToLookupTable(SwitchInst *SI,
SI->eraseFromParent();
++NumLookupTables;
+ if (NeedMask)
+ ++NumLookupTablesHoles;
return true;
}
@@ -3822,12 +3929,12 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
// see if that predecessor totally determines the outcome of this switch.
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
Value *Cond = SI->getCondition();
if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
if (SimplifySwitchOnSelect(SI, Select))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// If the block only contains the switch, see if we can fold the block
// away into any preds.
@@ -3837,22 +3944,22 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
++BBI;
if (SI == &*BBI)
if (FoldValueComparisonIntoPredecessors(SI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// Try to transform the switch into an icmp and a branch.
if (TurnSwitchRangeIntoICmp(SI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// Remove unreachable cases.
if (EliminateDeadSwitchCases(SI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
if (ForwardSwitchConditionToPHI(SI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
- if (SwitchToLookupTable(SI, Builder, TTI, TD))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (SwitchToLookupTable(SI, Builder, TTI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -3889,7 +3996,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
if (SimplifyIndirectBrOnSelect(IBI, SI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
return Changed;
}
@@ -3913,7 +4020,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
for (++I; isa<DbgInfoIntrinsic>(I); ++I)
;
if (I->isTerminator() &&
- TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, TD))
+ TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, DL))
return true;
}
@@ -3921,8 +4028,8 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
// branches to us and our successor, fold the comparison into the
// predecessor and use logical operations to update the incoming value
// for PHI nodes in common successor.
- if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (FoldBranchToCommonDest(BI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -3937,7 +4044,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// switch.
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// This block must be empty, except for the setcond inst, if it exists.
// Ignore dbg intrinsics.
@@ -3947,67 +4054,67 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
++I;
if (&*I == BI) {
if (FoldValueComparisonIntoPredecessors(BI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
} else if (&*I == cast<Instruction>(BI->getCondition())){
++I;
// Ignore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(I))
++I;
if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
}
// Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
- if (SimplifyBranchOnICmpChain(BI, TD, Builder))
+ if (SimplifyBranchOnICmpChain(BI, DL, Builder))
return true;
// If this basic block is ONLY a compare and a branch, and if a predecessor
// branches to us and one of our successors, fold the comparison into the
// predecessor and use logical operations to pick the right destination.
- if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (FoldBranchToCommonDest(BI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
// We have a conditional branch to two blocks that are only reachable
// from BI. We know that the condbr dominates the two blocks, so see if
// there is any identical code in the "then" and "else" blocks. If so, we
// can hoist it up to the branching block.
- if (BI->getSuccessor(0)->getSinglePredecessor() != 0) {
- if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
- if (HoistThenElseCodeToIf(BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (BI->getSuccessor(0)->getSinglePredecessor()) {
+ if (BI->getSuccessor(1)->getSinglePredecessor()) {
+ if (HoistThenElseCodeToIf(BI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
} else {
// If Successor #1 has multiple preds, we may be able to conditionally
- // execute Successor #0 if it branches to successor #1.
+ // execute Successor #0 if it branches to Successor #1.
TerminatorInst *Succ0TI = BI->getSuccessor(0)->getTerminator();
if (Succ0TI->getNumSuccessors() == 1 &&
Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
- if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
}
- } else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
+ } else if (BI->getSuccessor(1)->getSinglePredecessor()) {
// If Successor #0 has multiple preds, we may be able to conditionally
- // execute Successor #1 if it branches to successor #0.
+ // execute Successor #1 if it branches to Successor #0.
TerminatorInst *Succ1TI = BI->getSuccessor(1)->getTerminator();
if (Succ1TI->getNumSuccessors() == 1 &&
Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
- if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// If this is a branch on a phi node in the current block, thread control
// through this block if any PHI node entries are constants.
if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
if (PN->getParent() == BI->getParent())
- if (FoldCondBranchOnPHI(BI, TD))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (FoldCondBranchOnPHI(BI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
// Scan predecessor blocks for conditional branches.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
if (PBI != BI && PBI->isConditional())
if (SimplifyCondBranchToCondBranch(PBI, BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -4023,7 +4130,7 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) {
if (C->isNullValue()) {
// Only look at the first use, avoid hurting compile time with long uselists
- User *Use = *I->use_begin();
+ User *Use = *I->user_begin();
// Now make sure that there are no instructions in between that can alter
// control flow (eg. calls)
@@ -4119,7 +4226,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
// eliminate it, do so now.
if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))
if (PN->getNumIncomingValues() == 2)
- Changed |= FoldTwoEntryPHINode(PN, TD);
+ Changed |= FoldTwoEntryPHINode(PN, DL);
Builder.SetInsertPoint(BB->getTerminator());
if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
@@ -4151,6 +4258,6 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
/// of the CFG. It returns true if a modification was made.
///
bool llvm::SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
- const DataLayout *TD) {
- return SimplifyCFGOpt(TTI, TD).run(BB);
+ const DataLayout *DL) {
+ return SimplifyCFGOpt(TTI, DL).run(BB);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index bf3442a..b284e6f 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -13,9 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "indvars"
-
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/IVUsers.h"
@@ -23,13 +22,18 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "indvars"
+
STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
STATISTIC(NumElimOperand, "Number of IV operands folded into a use");
STATISTIC(NumElimRem , "Number of IV remainder operations eliminated");
@@ -44,7 +48,7 @@ namespace {
Loop *L;
LoopInfo *LI;
ScalarEvolution *SE;
- const DataLayout *TD; // May be NULL
+ const DataLayout *DL; // May be NULL
SmallVectorImpl<WeakVH> &DeadInsts;
@@ -52,13 +56,14 @@ namespace {
public:
SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, LPPassManager *LPM,
- SmallVectorImpl<WeakVH> &Dead, IVUsers *IVU = NULL) :
+ SmallVectorImpl<WeakVH> &Dead, IVUsers *IVU = nullptr) :
L(Loop),
LI(LPM->getAnalysisIfAvailable<LoopInfo>()),
SE(SE),
- TD(LPM->getAnalysisIfAvailable<DataLayout>()),
DeadInsts(Dead),
Changed(false) {
+ DataLayoutPass *DLP = LPM->getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
assert(LI && "IV simplification requires LoopInfo");
}
@@ -67,7 +72,7 @@ namespace {
/// Iteratively perform simplification on a worklist of users of the
/// specified induction variable. This is the top-level driver that applies
/// all simplicitions to users of an IV.
- void simplifyUsers(PHINode *CurrIV, IVVisitor *V = NULL);
+ void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
@@ -75,6 +80,9 @@ namespace {
void eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand);
void eliminateIVRemainder(BinaryOperator *Rem, Value *IVOperand,
bool IsSigned);
+
+ Instruction *splitOverflowIntrinsic(Instruction *IVUser,
+ const DominatorTree *DT);
};
}
@@ -87,25 +95,25 @@ namespace {
/// be folded (in case more folding opportunities have been exposed).
/// Otherwise return null.
Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
- Value *IVSrc = 0;
+ Value *IVSrc = nullptr;
unsigned OperIdx = 0;
- const SCEV *FoldedExpr = 0;
+ const SCEV *FoldedExpr = nullptr;
switch (UseInst->getOpcode()) {
default:
- return 0;
+ return nullptr;
case Instruction::UDiv:
case Instruction::LShr:
// We're only interested in the case where we know something about
// the numerator and have a constant denominator.
if (IVOperand != UseInst->getOperand(OperIdx) ||
!isa<ConstantInt>(UseInst->getOperand(1)))
- return 0;
+ return nullptr;
// Attempt to fold a binary operator with constant operand.
// e.g. ((I + 1) >> 2) => I >> 2
if (!isa<BinaryOperator>(IVOperand)
|| !isa<ConstantInt>(IVOperand->getOperand(1)))
- return 0;
+ return nullptr;
IVSrc = IVOperand->getOperand(0);
// IVSrc must be the (SCEVable) IV, since the other operand is const.
@@ -116,7 +124,7 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
// Get a constant for the divisor. See createSCEV.
uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
if (D->getValue().uge(BitWidth))
- return 0;
+ return nullptr;
D = ConstantInt::get(UseInst->getContext(),
APInt::getOneBitSet(BitWidth, D->getZExtValue()));
@@ -125,11 +133,11 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
}
// We have something that might fold it's operand. Compare SCEVs.
if (!SE->isSCEVable(UseInst->getType()))
- return 0;
+ return nullptr;
// Bypass the operand if SCEV can prove it has no effect.
if (SE->getSCEV(UseInst) != FoldedExpr)
- return 0;
+ return nullptr;
DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
<< " -> " << *UseInst << '\n');
@@ -263,6 +271,69 @@ bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
return true;
}
+/// \brief Split sadd.with.overflow into add + sadd.with.overflow to allow
+/// analysis and optimization.
+///
+/// \return A new value representing the non-overflowing add if possible,
+/// otherwise return the original value.
+Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser,
+ const DominatorTree *DT) {
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(IVUser);
+ if (!II || II->getIntrinsicID() != Intrinsic::sadd_with_overflow)
+ return IVUser;
+
+ // Find a branch guarded by the overflow check.
+ BranchInst *Branch = nullptr;
+ Instruction *AddVal = nullptr;
+ for (User *U : II->users()) {
+ if (ExtractValueInst *ExtractInst = dyn_cast<ExtractValueInst>(U)) {
+ if (ExtractInst->getNumIndices() != 1)
+ continue;
+ if (ExtractInst->getIndices()[0] == 0)
+ AddVal = ExtractInst;
+ else if (ExtractInst->getIndices()[0] == 1 && ExtractInst->hasOneUse())
+ Branch = dyn_cast<BranchInst>(ExtractInst->user_back());
+ }
+ }
+ if (!AddVal || !Branch)
+ return IVUser;
+
+ BasicBlock *ContinueBB = Branch->getSuccessor(1);
+ if (std::next(pred_begin(ContinueBB)) != pred_end(ContinueBB))
+ return IVUser;
+
+ // Check if all users of the add are provably NSW.
+ bool AllNSW = true;
+ for (Use &U : AddVal->uses()) {
+ if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser())) {
+ BasicBlock *UseBB = UseInst->getParent();
+ if (PHINode *PHI = dyn_cast<PHINode>(UseInst))
+ UseBB = PHI->getIncomingBlock(U);
+ if (!DT->dominates(ContinueBB, UseBB)) {
+ AllNSW = false;
+ break;
+ }
+ }
+ }
+ if (!AllNSW)
+ return IVUser;
+
+ // Go for it...
+ IRBuilder<> Builder(IVUser);
+ Instruction *AddInst = dyn_cast<Instruction>(
+ Builder.CreateNSWAdd(II->getOperand(0), II->getOperand(1)));
+
+ // The caller expects the new add to have the same form as the intrinsic. The
+ // IV operand position must be the same.
+ assert((AddInst->getOpcode() == Instruction::Add &&
+ AddInst->getOperand(0) == II->getOperand(0)) &&
+ "Bad add instruction created from overflow intrinsic.");
+
+ AddVal->replaceAllUsesWith(AddInst);
+ DeadInsts.push_back(AddVal);
+ return AddInst;
+}
+
/// pushIVUsers - Add all uses of Def to the current IV's worklist.
///
static void pushIVUsers(
@@ -270,16 +341,15 @@ static void pushIVUsers(
SmallPtrSet<Instruction*,16> &Simplified,
SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
- for (Value::use_iterator UI = Def->use_begin(), E = Def->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (User *U : Def->users()) {
+ Instruction *UI = cast<Instruction>(U);
// Avoid infinite or exponential worklist processing.
// Also ensure unique worklist users.
// If Def is a LoopPhi, it may not be in the Simplified set, so check for
// self edges first.
- if (User != Def && Simplified.insert(User))
- SimpleIVUsers.push_back(std::make_pair(User, Def));
+ if (UI != Def && Simplified.insert(UI))
+ SimpleIVUsers.push_back(std::make_pair(UI, Def));
}
}
@@ -334,8 +404,16 @@ void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
while (!SimpleIVUsers.empty()) {
std::pair<Instruction*, Instruction*> UseOper =
SimpleIVUsers.pop_back_val();
+ Instruction *UseInst = UseOper.first;
+
// Bypass back edges to avoid extra work.
- if (UseOper.first == CurrIV) continue;
+ if (UseInst == CurrIV) continue;
+
+ if (V && V->shouldSplitOverflowInstrinsics()) {
+ UseInst = splitOverflowIntrinsic(UseInst, V->getDomTree());
+ if (!UseInst)
+ continue;
+ }
Instruction *IVOperand = UseOper.second;
for (unsigned N = 0; IVOperand; ++N) {
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
index f9687e4d..33b3637 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -14,14 +14,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "instsimplify"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
@@ -29,6 +28,8 @@
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+#define DEBUG_TYPE "instsimplify"
+
STATISTIC(NumSimplified, "Number of redundant instructions removed");
namespace {
@@ -38,23 +39,27 @@ namespace {
initializeInstSimplifierPass(*PassRegistry::getPassRegistry());
}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<TargetLibraryInfo>();
}
/// runOnFunction - Remove instructions that simplify.
- bool runOnFunction(Function &F) {
- const DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ bool runOnFunction(Function &F) override {
+ const DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;
bool Changed = false;
do {
- for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
- DE = df_end(&F.getEntryBlock()); DI != DE; ++DI)
- for (BasicBlock::iterator BI = DI->begin(), BE = DI->end(); BI != BE;) {
+ for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
+ // Here be subtlety: the iterator must be incremented before the loop
+ // body (not sure why), so a range-for loop won't work here.
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
Instruction *I = BI++;
// The first time through the loop ToSimplify is empty and we try to
// simplify all instructions. On later iterations ToSimplify is not
@@ -63,16 +68,23 @@ namespace {
continue;
// Don't waste time simplifying unused instructions.
if (!I->use_empty())
- if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
+ if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) {
// Mark all uses for resimplification next time round the loop.
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI)
- Next->insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ Next->insert(cast<Instruction>(U));
I->replaceAllUsesWith(V);
++NumSimplified;
Changed = true;
}
- Changed |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
+ bool res = RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
+ if (res) {
+ // RecursivelyDeleteTriviallyDeadInstruction can remove
+ // more than one instruction, so simply incrementing the
+ // iterator does not work. When instructions get deleted
+ // re-iterate instead.
+ BI = BB->begin(); BE = BB->end();
+ Changed |= res;
+ }
}
// Place the list of instructions to simplify on the next loop iteration
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 15b3e66..3b61bb5 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -43,7 +44,7 @@ namespace {
class LibCallOptimization {
protected:
Function *Caller;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
const LibCallSimplifier *LCS;
LLVMContext* Context;
@@ -63,11 +64,11 @@ public:
/// change the calling convention.
virtual bool ignoreCallingConv() { return false; }
- Value *optimizeCall(CallInst *CI, const DataLayout *TD,
+ Value *optimizeCall(CallInst *CI, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const LibCallSimplifier *LCS, IRBuilder<> &B) {
Caller = CI->getParent()->getParent();
- this->TD = TD;
+ this->DL = DL;
this->TLI = TLI;
this->LCS = LCS;
if (CI->getCalledFunction())
@@ -75,7 +76,7 @@ public:
// We never change the calling convention.
if (!ignoreCallingConv() && CI->getCallingConv() != llvm::CallingConv::C)
- return NULL;
+ return nullptr;
return callOptimizer(CI->getCalledFunction(), CI, B);
}
@@ -88,9 +89,8 @@ public:
/// isOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
/// value is equal or not-equal to zero.
static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ for (User *U : V->users()) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
if (IC->isEquality())
if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (C->isNullValue())
@@ -104,9 +104,8 @@ static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
/// isOnlyUsedInEqualityComparison - Return true if it is only used in equality
/// comparisons with With.
static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ for (User *U : V->users()) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
if (IC->isEquality() && IC->getOperand(1) == With)
continue;
// Unknown instruction.
@@ -152,7 +151,8 @@ protected:
struct InstFortifiedLibCallOptimization : public FortifiedLibCallOptimization {
CallInst *CI;
- bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
+ bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp,
+ bool isString) const override {
if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
return true;
if (ConstantInt *SizeCI =
@@ -175,7 +175,8 @@ struct InstFortifiedLibCallOptimization : public FortifiedLibCallOptimization {
};
struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
this->CI = CI;
FunctionType *FT = Callee->getFunctionType();
LLVMContext &Context = CI->getParent()->getContext();
@@ -184,21 +185,22 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||
+ FT->getParamType(3) != DL->getIntPtrType(Context))
+ return nullptr;
if (isFoldable(3, 2, false)) {
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
- return 0;
+ return nullptr;
}
};
struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
this->CI = CI;
FunctionType *FT = Callee->getFunctionType();
LLVMContext &Context = CI->getParent()->getContext();
@@ -207,21 +209,22 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||
+ FT->getParamType(3) != DL->getIntPtrType(Context))
+ return nullptr;
if (isFoldable(3, 2, false)) {
B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
- return 0;
+ return nullptr;
}
};
struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
this->CI = CI;
FunctionType *FT = Callee->getFunctionType();
LLVMContext &Context = CI->getParent()->getContext();
@@ -230,9 +233,9 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||
+ FT->getParamType(3) != DL->getIntPtrType(Context))
+ return nullptr;
if (isFoldable(3, 2, false)) {
Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
@@ -240,12 +243,13 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
- return 0;
+ return nullptr;
}
};
struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
this->CI = CI;
StringRef Name = Callee->getName();
FunctionType *FT = Callee->getFunctionType();
@@ -256,8 +260,8 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
- FT->getParamType(2) != TD->getIntPtrType(Context))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(Context))
+ return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) // __strcpy_chk(x,x) -> x
@@ -269,28 +273,29 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
if (isFoldable(2, 1, true)) {
- Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
+ Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));
return Ret;
} else {
// Maybe we can stil fold __strcpy_chk to __memcpy_chk.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
// This optimization require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
Value *Ret =
EmitMemCpyChk(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(Context), Len),
- CI->getArgOperand(2), B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(Context), Len),
+ CI->getArgOperand(2), B, DL, TLI);
return Ret;
}
- return 0;
+ return nullptr;
}
};
struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
this->CI = CI;
StringRef Name = Callee->getName();
FunctionType *FT = Callee->getFunctionType();
@@ -301,13 +306,13 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
- FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
+ return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);
- return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ Value *StrLen = EmitStrLen(Src, B, DL, TLI);
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : nullptr;
}
// If a) we don't have any length information, or b) we know this will
@@ -316,31 +321,32 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
if (isFoldable(2, 1, true)) {
- Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
+ Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));
return Ret;
} else {
// Maybe we can stil fold __stpcpy_chk to __memcpy_chk.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
// This optimization require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
Type *PT = FT->getParamType(0);
- Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
+ Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
Value *DstEnd = B.CreateGEP(Dst,
- ConstantInt::get(TD->getIntPtrType(PT),
+ ConstantInt::get(DL->getIntPtrType(PT),
Len - 1));
- if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD, TLI))
- return 0;
+ if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, DL, TLI))
+ return nullptr;
return DstEnd;
}
- return 0;
+ return nullptr;
}
};
struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
this->CI = CI;
StringRef Name = Callee->getName();
FunctionType *FT = Callee->getFunctionType();
@@ -351,16 +357,16 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
!FT->getParamType(2)->isIntegerTy() ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
- return 0;
+ FT->getParamType(3) != DL->getIntPtrType(Context))
+ return nullptr;
if (isFoldable(3, 2, false)) {
Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), B, TD, TLI,
+ CI->getArgOperand(2), B, DL, TLI,
Name.substr(2, 7));
return Ret;
}
- return 0;
+ return nullptr;
}
};
@@ -369,14 +375,15 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
//===----------------------------------------------------------------------===//
struct StrCatOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strcat" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType())
- return 0;
+ return nullptr;
// Extract some information from the instruction
Value *Dst = CI->getArgOperand(0);
@@ -384,7 +391,7 @@ struct StrCatOpt : public LibCallOptimization {
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
--Len; // Unbias length.
// Handle the simple, do-nothing case: strcat(x, "") -> x
@@ -392,7 +399,7 @@ struct StrCatOpt : public LibCallOptimization {
return Dst;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
return emitStrLenMemCpy(Src, Dst, Len, B);
}
@@ -401,9 +408,9 @@ struct StrCatOpt : public LibCallOptimization {
IRBuilder<> &B) {
// We need to find the end of the destination string. That's where the
// memory is to be moved to. We just generate a call to strlen.
- Value *DstLen = EmitStrLen(Dst, B, TD, TLI);
+ Value *DstLen = EmitStrLen(Dst, B, DL, TLI);
if (!DstLen)
- return 0;
+ return nullptr;
// Now that we have the destination's length, we must index into the
// destination's pointer to get the actual memcpy destination (end of
@@ -413,13 +420,14 @@ struct StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len + 1), 1);
return Dst;
}
};
struct StrNCatOpt : public StrCatOpt {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strncat" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
@@ -427,7 +435,7 @@ struct StrNCatOpt : public StrCatOpt {
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType() ||
!FT->getParamType(2)->isIntegerTy())
- return 0;
+ return nullptr;
// Extract some information from the instruction
Value *Dst = CI->getArgOperand(0);
@@ -438,11 +446,11 @@ struct StrNCatOpt : public StrCatOpt {
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Len = LengthArg->getZExtValue();
else
- return 0;
+ return nullptr;
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
- if (SrcLen == 0) return 0;
+ if (SrcLen == 0) return nullptr;
--SrcLen; // Unbias length.
// Handle the simple, do-nothing cases:
@@ -451,10 +459,10 @@ struct StrNCatOpt : public StrCatOpt {
if (SrcLen == 0 || Len == 0) return Dst;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
// We don't optimize this case
- if (Len < SrcLen) return 0;
+ if (Len < SrcLen) return nullptr;
// strncat(x, s, c) -> strcat(x, s)
// s is constant so the strcat can be optimized further
@@ -463,38 +471,42 @@ struct StrNCatOpt : public StrCatOpt {
};
struct StrChrOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strchr" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
!FT->getParamType(1)->isIntegerTy(32))
- return 0;
+ return nullptr;
Value *SrcStr = CI->getArgOperand(0);
// If the second operand is non-constant, see if we can compute the length
// of the input string and turn this into memchr.
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
- if (CharC == 0) {
+ if (!CharC) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
uint64_t Len = GetStringLength(SrcStr);
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
- return 0;
+ return nullptr;
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len),
+ B, DL, TLI);
}
// Otherwise, the character is a constant, see if the first argument is
// a string literal. If so, we can constant fold.
StringRef Str;
- if (!getConstantStringInfo(SrcStr, Str))
- return 0;
+ if (!getConstantStringInfo(SrcStr, Str)) {
+ if (DL && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
+ return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, DL, TLI), "strchr");
+ return nullptr;
+ }
// Compute the offset, make sure to handle the case when we're searching for
// zero (a weird way to spell strlen).
@@ -509,28 +521,29 @@ struct StrChrOpt : public LibCallOptimization {
};
struct StrRChrOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strrchr" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
!FT->getParamType(1)->isIntegerTy(32))
- return 0;
+ return nullptr;
Value *SrcStr = CI->getArgOperand(0);
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
// Cannot fold anything if we're not looking for a constant.
if (!CharC)
- return 0;
+ return nullptr;
StringRef Str;
if (!getConstantStringInfo(SrcStr, Str)) {
// strrchr(s, 0) -> strchr(s, 0)
- if (TD && CharC->isZero())
- return EmitStrChr(SrcStr, '\0', B, TD, TLI);
- return 0;
+ if (DL && CharC->isZero())
+ return EmitStrChr(SrcStr, '\0', B, DL, TLI);
+ return nullptr;
}
// Compute the offset.
@@ -545,14 +558,15 @@ struct StrRChrOpt : public LibCallOptimization {
};
struct StrCmpOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strcmp" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
!FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
+ return nullptr;
Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strcmp(x,x) -> 0
@@ -578,19 +592,20 @@ struct StrCmpOpt : public LibCallOptimization {
uint64_t Len2 = GetStringLength(Str2P);
if (Len1 && Len2) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
return EmitMemCmp(Str1P, Str2P,
- ConstantInt::get(TD->getIntPtrType(*Context),
- std::min(Len1, Len2)), B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(*Context),
+ std::min(Len1, Len2)), B, DL, TLI);
}
- return 0;
+ return nullptr;
}
};
struct StrNCmpOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strncmp" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
@@ -598,7 +613,7 @@ struct StrNCmpOpt : public LibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getParamType(2)->isIntegerTy())
- return 0;
+ return nullptr;
Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strncmp(x,x,n) -> 0
@@ -609,13 +624,13 @@ struct StrNCmpOpt : public LibCallOptimization {
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Length = LengthArg->getZExtValue();
else
- return 0;
+ return nullptr;
if (Length == 0) // strncmp(x,y,0) -> 0
return ConstantInt::get(CI->getType(), 0);
- if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
- return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);
+ if (DL && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI);
StringRef Str1, Str2;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
@@ -635,66 +650,68 @@ struct StrNCmpOpt : public LibCallOptimization {
if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
- return 0;
+ return nullptr;
}
};
struct StrCpyOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "strcpy" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
+ return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) // strcpy(x,x) -> x
return Src;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len), 1);
return Dst;
}
};
struct StpCpyOpt: public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Verify the "stpcpy" function prototype.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
+ return nullptr;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);
- return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ Value *StrLen = EmitStrLen(Src, B, DL, TLI);
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : nullptr;
}
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
Type *PT = FT->getParamType(0);
- Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
+ Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
Value *DstEnd = B.CreateGEP(Dst,
- ConstantInt::get(TD->getIntPtrType(PT),
+ ConstantInt::get(DL->getIntPtrType(PT),
Len - 1));
// We have enough information to now generate the memcpy call to do the
@@ -705,13 +722,14 @@ struct StpCpyOpt: public LibCallOptimization {
};
struct StrNCpyOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getParamType(2)->isIntegerTy())
- return 0;
+ return nullptr;
Value *Dst = CI->getArgOperand(0);
Value *Src = CI->getArgOperand(1);
@@ -719,7 +737,7 @@ struct StrNCpyOpt : public LibCallOptimization {
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
- if (SrcLen == 0) return 0;
+ if (SrcLen == 0) return nullptr;
--SrcLen;
if (SrcLen == 0) {
@@ -732,33 +750,34 @@ struct StrNCpyOpt : public LibCallOptimization {
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(LenOp))
Len = LengthArg->getZExtValue();
else
- return 0;
+ return nullptr;
if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
// Let strncpy handle the zero padding
- if (Len > SrcLen+1) return 0;
+ if (Len > SrcLen+1) return nullptr;
Type *PT = FT->getParamType(0);
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(PT), Len), 1);
+ ConstantInt::get(DL->getIntPtrType(PT), Len), 1);
return Dst;
}
};
struct StrLenOpt : public LibCallOptimization {
- virtual bool ignoreCallingConv() { return true; }
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ bool ignoreCallingConv() override { return true; }
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
Value *Src = CI->getArgOperand(0);
@@ -766,22 +785,38 @@ struct StrLenOpt : public LibCallOptimization {
if (uint64_t Len = GetStringLength(Src))
return ConstantInt::get(CI->getType(), Len-1);
+ // strlen(x?"foo":"bars") --> x ? 3 : 4
+ if (SelectInst *SI = dyn_cast<SelectInst>(Src)) {
+ uint64_t LenTrue = GetStringLength(SI->getTrueValue());
+ uint64_t LenFalse = GetStringLength(SI->getFalseValue());
+ if (LenTrue && LenFalse) {
+ emitOptimizationRemark(*Context, "simplify-libcalls", *Caller,
+ SI->getDebugLoc(),
+ "folded strlen(select) to select of constants");
+ return B.CreateSelect(SI->getCondition(),
+ ConstantInt::get(CI->getType(), LenTrue-1),
+ ConstantInt::get(CI->getType(), LenFalse-1));
+ }
+ }
+
// strlen(x) != 0 --> *x != 0
// strlen(x) == 0 --> *x == 0
if (isOnlyUsedInZeroEqualityComparison(CI))
return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
- return 0;
+
+ return nullptr;
}
};
struct StrPBrkOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
FT->getReturnType() != FT->getParamType(0))
- return 0;
+ return nullptr;
StringRef S1, S2;
bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
@@ -802,20 +837,21 @@ struct StrPBrkOpt : public LibCallOptimization {
}
// strpbrk(s, "a") -> strchr(s, 'a')
- if (TD && HasS2 && S2.size() == 1)
- return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);
+ if (DL && HasS2 && S2.size() == 1)
+ return EmitStrChr(CI->getArgOperand(0), S2[0], B, DL, TLI);
- return 0;
+ return nullptr;
}
};
struct StrToOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy())
- return 0;
+ return nullptr;
Value *EndPtr = CI->getArgOperand(1);
if (isa<ConstantPointerNull>(EndPtr)) {
@@ -824,18 +860,19 @@ struct StrToOpt : public LibCallOptimization {
CI->addAttribute(1, Attribute::NoCapture);
}
- return 0;
+ return nullptr;
}
};
struct StrSpnOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
StringRef S1, S2;
bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
@@ -853,18 +890,19 @@ struct StrSpnOpt : public LibCallOptimization {
return ConstantInt::get(CI->getType(), Pos);
}
- return 0;
+ return nullptr;
}
};
struct StrCSpnOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
StringRef S1, S2;
bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
@@ -882,37 +920,37 @@ struct StrCSpnOpt : public LibCallOptimization {
}
// strcspn(s, "") -> strlen(s)
- if (TD && HasS2 && S2.empty())
- return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);
+ if (DL && HasS2 && S2.empty())
+ return EmitStrLen(CI->getArgOperand(0), B, DL, TLI);
- return 0;
+ return nullptr;
}
};
struct StrStrOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isPointerTy())
- return 0;
+ return nullptr;
// fold strstr(x, x) -> x.
if (CI->getArgOperand(0) == CI->getArgOperand(1))
return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
// fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
- if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
- Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);
+ if (DL && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, DL, TLI);
if (!StrLen)
- return 0;
+ return nullptr;
Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
- StrLen, B, TD, TLI);
+ StrLen, B, DL, TLI);
if (!StrNCmp)
- return 0;
- for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
- UI != UE; ) {
+ return nullptr;
+ for (auto UI = CI->user_begin(), UE = CI->user_end(); UI != UE;) {
ICmpInst *Old = cast<ICmpInst>(*UI++);
Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
ConstantInt::getNullValue(StrNCmp->getType()),
@@ -946,20 +984,21 @@ struct StrStrOpt : public LibCallOptimization {
// fold strstr(x, "y") -> strchr(x, 'y').
if (HasStr2 && ToFindStr.size() == 1) {
- Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI);
- return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
+ Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, DL, TLI);
+ return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : nullptr;
}
- return 0;
+ return nullptr;
}
};
struct MemCmpOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy(32))
- return 0;
+ return nullptr;
Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
@@ -968,7 +1007,7 @@ struct MemCmpOpt : public LibCallOptimization {
// Make sure we have a constant length.
ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
- if (!LenC) return 0;
+ if (!LenC) return nullptr;
uint64_t Len = LenC->getZExtValue();
if (Len == 0) // memcmp(s1,s2,0) -> 0
@@ -989,7 +1028,7 @@ struct MemCmpOpt : public LibCallOptimization {
getConstantStringInfo(RHS, RHSStr)) {
// Make sure we're not reading out-of-bounds memory.
if (Len > LHSStr.size() || Len > RHSStr.size())
- return 0;
+ return nullptr;
// Fold the memcmp and normalize the result. This way we get consistent
// results across multiple platforms.
uint64_t Ret = 0;
@@ -1001,21 +1040,22 @@ struct MemCmpOpt : public LibCallOptimization {
return ConstantInt::get(CI->getType(), Ret);
}
- return 0;
+ return nullptr;
}
};
struct MemCpyOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(*Context))
+ return nullptr;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
@@ -1025,16 +1065,17 @@ struct MemCpyOpt : public LibCallOptimization {
};
struct MemMoveOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(*Context))
+ return nullptr;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
@@ -1044,16 +1085,17 @@ struct MemMoveOpt : public LibCallOptimization {
};
struct MemSetOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
- return 0;
+ FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
+ return nullptr;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
@@ -1072,26 +1114,26 @@ struct MemSetOpt : public LibCallOptimization {
struct UnaryDoubleFPOpt : public LibCallOptimization {
bool CheckRetType;
UnaryDoubleFPOpt(bool CheckReturnType): CheckRetType(CheckReturnType) {}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() ||
!FT->getParamType(0)->isDoubleTy())
- return 0;
+ return nullptr;
if (CheckRetType) {
// Check if all the uses for function like 'sin' are converted to float.
- for (Value::use_iterator UseI = CI->use_begin(); UseI != CI->use_end();
- ++UseI) {
- FPTruncInst *Cast = dyn_cast<FPTruncInst>(*UseI);
- if (Cast == 0 || !Cast->getType()->isFloatTy())
- return 0;
+ for (User *U : CI->users()) {
+ FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
+ if (!Cast || !Cast->getType()->isFloatTy())
+ return nullptr;
}
}
// If this is something like 'floor((double)floatval)', convert to floorf.
FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getArgOperand(0));
- if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
- return 0;
+ if (!Cast || !Cast->getOperand(0)->getType()->isFloatTy())
+ return nullptr;
// floor((double)floatval) -> (double)floorf(floatval)
Value *V = Cast->getOperand(0);
@@ -1100,6 +1142,49 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
}
};
+// Double -> Float Shrinking Optimizations for Binary Functions like 'fmin/fmax'
+struct BinaryDoubleFPOpt : public LibCallOptimization {
+ bool CheckRetType;
+ BinaryDoubleFPOpt(bool CheckReturnType): CheckRetType(CheckReturnType) {}
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
+ FunctionType *FT = Callee->getFunctionType();
+ // Just make sure this has 2 arguments of the same FP type, which match the
+ // result type.
+ if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ !FT->getParamType(0)->isFloatingPointTy())
+ return nullptr;
+
+ if (CheckRetType) {
+ // Check if all the uses for function like 'fmin/fmax' are converted to
+ // float.
+ for (User *U : CI->users()) {
+ FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
+ if (!Cast || !Cast->getType()->isFloatTy())
+ return nullptr;
+ }
+ }
+
+ // If this is something like 'fmin((double)floatval1, (double)floatval2)',
+ // we convert it to fminf.
+ FPExtInst *Cast1 = dyn_cast<FPExtInst>(CI->getArgOperand(0));
+ FPExtInst *Cast2 = dyn_cast<FPExtInst>(CI->getArgOperand(1));
+ if (!Cast1 || !Cast1->getOperand(0)->getType()->isFloatTy() ||
+ !Cast2 || !Cast2->getOperand(0)->getType()->isFloatTy())
+ return nullptr;
+
+ // fmin((double)floatval1, (double)floatval2)
+ // -> (double)fmin(floatval1, floatval2)
+ Value *V = nullptr;
+ Value *V1 = Cast1->getOperand(0);
+ Value *V2 = Cast2->getOperand(0);
+ V = EmitBinaryFloatFnCall(V1, V2, Callee->getName(), B,
+ Callee->getAttributes());
+ return B.CreateFPExt(V, B.getDoubleTy());
+ }
+};
+
struct UnsafeFPLibCallOptimization : public LibCallOptimization {
bool UnsafeFPShrink;
UnsafeFPLibCallOptimization(bool UnsafeFPShrink) {
@@ -1109,8 +1194,9 @@ struct UnsafeFPLibCallOptimization : public LibCallOptimization {
struct CosOpt : public UnsafeFPLibCallOptimization {
CosOpt(bool UnsafeFPShrink) : UnsafeFPLibCallOptimization(UnsafeFPShrink) {}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- Value *Ret = NULL;
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
+ Value *Ret = nullptr;
if (UnsafeFPShrink && Callee->getName() == "cos" &&
TLI->has(LibFunc::cosf)) {
UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
@@ -1136,8 +1222,9 @@ struct CosOpt : public UnsafeFPLibCallOptimization {
struct PowOpt : public UnsafeFPLibCallOptimization {
PowOpt(bool UnsafeFPShrink) : UnsafeFPLibCallOptimization(UnsafeFPShrink) {}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- Value *Ret = NULL;
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
+ Value *Ret = nullptr;
if (UnsafeFPShrink && Callee->getName() == "pow" &&
TLI->has(LibFunc::powf)) {
UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
@@ -1162,10 +1249,16 @@ struct PowOpt : public UnsafeFPLibCallOptimization {
hasUnaryFloatFn(TLI, Op1->getType(), LibFunc::exp2, LibFunc::exp2f,
LibFunc::exp2l))
return EmitUnaryFloatFnCall(Op2, "exp2", B, Callee->getAttributes());
+ // pow(10.0, x) -> exp10(x)
+ if (Op1C->isExactlyValue(10.0) &&
+ hasUnaryFloatFn(TLI, Op1->getType(), LibFunc::exp10, LibFunc::exp10f,
+ LibFunc::exp10l))
+ return EmitUnaryFloatFnCall(Op2, TLI->getName(LibFunc::exp10), B,
+ Callee->getAttributes());
}
ConstantFP *Op2C = dyn_cast<ConstantFP>(Op2);
- if (Op2C == 0) return Ret;
+ if (!Op2C) return Ret;
if (Op2C->getValueAPF().isZero()) // pow(x, 0.0) -> 1.0
return ConstantFP::get(CI->getType(), 1.0);
@@ -1198,14 +1291,15 @@ struct PowOpt : public UnsafeFPLibCallOptimization {
if (Op2C->isExactlyValue(-1.0)) // pow(x, -1.0) -> 1.0/x
return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0),
Op1, "powrecip");
- return 0;
+ return nullptr;
}
};
struct Exp2Opt : public UnsafeFPLibCallOptimization {
Exp2Opt(bool UnsafeFPShrink) : UnsafeFPLibCallOptimization(UnsafeFPShrink) {}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- Value *Ret = NULL;
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
+ Value *Ret = nullptr;
if (UnsafeFPShrink && Callee->getName() == "exp2" &&
TLI->has(LibFunc::exp2f)) {
UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
@@ -1222,37 +1316,37 @@ struct Exp2Opt : public UnsafeFPLibCallOptimization {
Value *Op = CI->getArgOperand(0);
// Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32
// Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x)) if sizeof(x) < 32
- Value *LdExpArg = 0;
- if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
- if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
- LdExpArg = B.CreateSExt(OpC->getOperand(0), B.getInt32Ty());
- } else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
- if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
- LdExpArg = B.CreateZExt(OpC->getOperand(0), B.getInt32Ty());
- }
+ LibFunc::Func LdExp = LibFunc::ldexpl;
+ if (Op->getType()->isFloatTy())
+ LdExp = LibFunc::ldexpf;
+ else if (Op->getType()->isDoubleTy())
+ LdExp = LibFunc::ldexp;
+
+ if (TLI->has(LdExp)) {
+ Value *LdExpArg = nullptr;
+ if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
+ if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
+ LdExpArg = B.CreateSExt(OpC->getOperand(0), B.getInt32Ty());
+ } else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
+ if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
+ LdExpArg = B.CreateZExt(OpC->getOperand(0), B.getInt32Ty());
+ }
- if (LdExpArg) {
- const char *Name;
- if (Op->getType()->isFloatTy())
- Name = "ldexpf";
- else if (Op->getType()->isDoubleTy())
- Name = "ldexp";
- else
- Name = "ldexpl";
-
- Constant *One = ConstantFP::get(*Context, APFloat(1.0f));
- if (!Op->getType()->isFloatTy())
- One = ConstantExpr::getFPExtend(One, Op->getType());
-
- Module *M = Caller->getParent();
- Value *Callee = M->getOrInsertFunction(Name, Op->getType(),
- Op->getType(),
- B.getInt32Ty(), NULL);
- CallInst *CI = B.CreateCall2(Callee, One, LdExpArg);
- if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
- CI->setCallingConv(F->getCallingConv());
+ if (LdExpArg) {
+ Constant *One = ConstantFP::get(*Context, APFloat(1.0f));
+ if (!Op->getType()->isFloatTy())
+ One = ConstantExpr::getFPExtend(One, Op->getType());
- return CI;
+ Module *M = Caller->getParent();
+ Value *Callee =
+ M->getOrInsertFunction(TLI->getName(LdExp), Op->getType(),
+ Op->getType(), B.getInt32Ty(), NULL);
+ CallInst *CI = B.CreateCall2(Callee, One, LdExpArg);
+ if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+ }
}
return Ret;
}
@@ -1261,11 +1355,12 @@ struct Exp2Opt : public UnsafeFPLibCallOptimization {
struct SinCosPiOpt : public LibCallOptimization {
SinCosPiOpt() {}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Make sure the prototype is as expected, otherwise the rest of the
// function is probably invalid and likely to abort.
if (!isTrigLibCall(CI))
- return 0;
+ return nullptr;
Value *Arg = CI->getArgOperand(0);
SmallVector<CallInst *, 1> SinCalls;
@@ -1277,14 +1372,13 @@ struct SinCosPiOpt : public LibCallOptimization {
// Look for all compatible sinpi, cospi and sincospi calls with the same
// argument. If there are enough (in some sense) we can make the
// substitution.
- for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
- UI != UE; ++UI)
- classifyArgUse(*UI, CI->getParent(), IsFloat, SinCalls, CosCalls,
+ for (User *U : Arg->users())
+ classifyArgUse(U, CI->getParent(), IsFloat, SinCalls, CosCalls,
SinCosCalls);
// It's only worthwhile if both sinpi and cospi are actually used.
if (SinCosCalls.empty() && (SinCalls.empty() || CosCalls.empty()))
- return 0;
+ return nullptr;
Value *Sin, *Cos, *SinCos;
insertSinCosCall(B, CI->getCalledFunction(), Arg, IsFloat, Sin, Cos,
@@ -1294,7 +1388,7 @@ struct SinCosPiOpt : public LibCallOptimization {
replaceTrigInsts(CosCalls, Cos);
replaceTrigInsts(SinCosCalls, SinCos);
- return 0;
+ return nullptr;
}
bool isTrigLibCall(CallInst *CI) {
@@ -1334,7 +1428,7 @@ struct SinCosPiOpt : public LibCallOptimization {
SinCalls.push_back(CI);
else if (Func == LibFunc::cospif)
CosCalls.push_back(CI);
- else if (Func == LibFunc::sincospi_stretf)
+ else if (Func == LibFunc::sincospif_stret)
SinCosCalls.push_back(CI);
} else {
if (Func == LibFunc::sinpi)
@@ -1363,7 +1457,7 @@ struct SinCosPiOpt : public LibCallOptimization {
Triple T(OrigCallee->getParent()->getTargetTriple());
if (UseFloat) {
- Name = "__sincospi_stretf";
+ Name = "__sincospif_stret";
assert(T.getArch() != Triple::x86 && "x86 messy and unsupported for now");
// x86_64 can't use {float, float} since that would be returned in both
@@ -1412,14 +1506,15 @@ struct SinCosPiOpt : public LibCallOptimization {
//===----------------------------------------------------------------------===//
struct FFSOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
!FT->getReturnType()->isIntegerTy(32) ||
!FT->getParamType(0)->isIntegerTy())
- return 0;
+ return nullptr;
Value *Op = CI->getArgOperand(0);
@@ -1445,13 +1540,14 @@ struct FFSOpt : public LibCallOptimization {
};
struct AbsOpt : public LibCallOptimization {
- virtual bool ignoreCallingConv() { return true; }
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ bool ignoreCallingConv() override { return true; }
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
// We require integer(integer) where the types agree.
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
FT->getParamType(0) != FT->getReturnType())
- return 0;
+ return nullptr;
// abs(x) -> x >s -1 ? x : -x
Value *Op = CI->getArgOperand(0);
@@ -1463,12 +1559,13 @@ struct AbsOpt : public LibCallOptimization {
};
struct IsDigitOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
!FT->getParamType(0)->isIntegerTy(32))
- return 0;
+ return nullptr;
// isdigit(c) -> (c-'0') <u 10
Value *Op = CI->getArgOperand(0);
@@ -1479,12 +1576,13 @@ struct IsDigitOpt : public LibCallOptimization {
};
struct IsAsciiOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
!FT->getParamType(0)->isIntegerTy(32))
- return 0;
+ return nullptr;
// isascii(c) -> c <u 128
Value *Op = CI->getArgOperand(0);
@@ -1494,12 +1592,13 @@ struct IsAsciiOpt : public LibCallOptimization {
};
struct ToAsciiOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
FunctionType *FT = Callee->getFunctionType();
// We require i32(i32)
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isIntegerTy(32))
- return 0;
+ return nullptr;
// toascii(c) -> c & 0x7f
return B.CreateAnd(CI->getArgOperand(0),
@@ -1514,7 +1613,8 @@ struct ToAsciiOpt : public LibCallOptimization {
struct ErrorReportingOpt : public LibCallOptimization {
ErrorReportingOpt(int S = -1) : StreamArg(S) {}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &) override {
// Error reporting calls should be cold, mark them as such.
// This applies even to non-builtin calls: it is only a hint and applies to
// functions that the frontend might not understand as builtins.
@@ -1528,7 +1628,7 @@ struct ErrorReportingOpt : public LibCallOptimization {
CI->addAttribute(AttributeSet::FunctionIndex, Attribute::Cold);
}
- return 0;
+ return nullptr;
}
protected:
@@ -1565,7 +1665,7 @@ struct PrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
StringRef FormatStr;
if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
- return 0;
+ return nullptr;
// Empty format string -> noop.
if (FormatStr.empty()) // Tolerate printf's declared void.
@@ -1576,11 +1676,11 @@ struct PrintFOpt : public LibCallOptimization {
// is used, in general the printf return value is not compatible with either
// putchar() or puts().
if (!CI->use_empty())
- return 0;
+ return nullptr;
// printf("x") -> putchar('x'), even for '%'.
if (FormatStr.size() == 1) {
- Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD, TLI);
+ Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, DL, TLI);
if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
@@ -1592,7 +1692,7 @@ struct PrintFOpt : public LibCallOptimization {
// pass to be run after this pass, to merge duplicate strings.
FormatStr = FormatStr.drop_back();
Value *GV = B.CreateGlobalString(FormatStr, "str");
- Value *NewCI = EmitPutS(GV, B, TD, TLI);
+ Value *NewCI = EmitPutS(GV, B, DL, TLI);
return (CI->use_empty() || !NewCI) ?
NewCI :
ConstantInt::get(CI->getType(), FormatStr.size()+1);
@@ -1602,7 +1702,7 @@ struct PrintFOpt : public LibCallOptimization {
// printf("%c", chr) --> putchar(chr)
if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
CI->getArgOperand(1)->getType()->isIntegerTy()) {
- Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD, TLI);
+ Value *Res = EmitPutChar(CI->getArgOperand(1), B, DL, TLI);
if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
@@ -1611,18 +1711,19 @@ struct PrintFOpt : public LibCallOptimization {
// printf("%s\n", str) --> puts(str)
if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
CI->getArgOperand(1)->getType()->isPointerTy()) {
- return EmitPutS(CI->getArgOperand(1), B, TD, TLI);
+ return EmitPutS(CI->getArgOperand(1), B, DL, TLI);
}
- return 0;
+ return nullptr;
}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Require one fixed pointer argument and an integer/void result.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
!(FT->getReturnType()->isIntegerTy() ||
FT->getReturnType()->isVoidTy()))
- return 0;
+ return nullptr;
if (Value *V = optimizeFixedFormatString(Callee, CI, B)) {
return V;
@@ -1639,7 +1740,7 @@ struct PrintFOpt : public LibCallOptimization {
B.Insert(New);
return New;
}
- return 0;
+ return nullptr;
}
};
@@ -1649,7 +1750,7 @@ struct SPrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
StringRef FormatStr;
if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
- return 0;
+ return nullptr;
// If we just have a format string (nothing else crazy) transform it.
if (CI->getNumArgOperands() == 2) {
@@ -1657,14 +1758,14 @@ struct SPrintFOpt : public LibCallOptimization {
// %% -> % in the future if we cared.
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
if (FormatStr[i] == '%')
- return 0; // we found a format specifier, bail out.
+ return nullptr; // we found a format specifier, bail out.
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
+ ConstantInt::get(DL->getIntPtrType(*Context), // Copy the
FormatStr.size() + 1), 1); // nul byte.
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1673,12 +1774,12 @@ struct SPrintFOpt : public LibCallOptimization {
// and have an extra operand.
if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
CI->getNumArgOperands() < 3)
- return 0;
+ return nullptr;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
- if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return nullptr;
Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
Value *Ptr = CastToCStr(CI->getArgOperand(0), B);
B.CreateStore(V, Ptr);
@@ -1690,14 +1791,14 @@ struct SPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 's') {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
- if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isPointerTy()) return nullptr;
- Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD, TLI);
+ Value *Len = EmitStrLen(CI->getArgOperand(2), B, DL, TLI);
if (!Len)
- return 0;
+ return nullptr;
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
@@ -1706,16 +1807,17 @@ struct SPrintFOpt : public LibCallOptimization {
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
}
- return 0;
+ return nullptr;
}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Require two fixed pointer arguments and an integer result.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
if (Value *V = OptimizeFixedFormatString(Callee, CI, B)) {
return V;
@@ -1732,7 +1834,7 @@ struct SPrintFOpt : public LibCallOptimization {
B.Insert(New);
return New;
}
- return 0;
+ return nullptr;
}
};
@@ -1745,58 +1847,59 @@ struct FPrintFOpt : public LibCallOptimization {
// All the optimizations depend on the format string.
StringRef FormatStr;
if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
- return 0;
+ return nullptr;
// Do not do any of the following transformations if the fprintf return
// value is used, in general the fprintf return value is not compatible
// with fwrite(), fputc() or fputs().
if (!CI->use_empty())
- return 0;
+ return nullptr;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
if (CI->getNumArgOperands() == 2) {
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
- return 0; // We found a format specifier.
+ return nullptr; // We found a format specifier.
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
return EmitFWrite(CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context),
+ ConstantInt::get(DL->getIntPtrType(*Context),
FormatStr.size()),
- CI->getArgOperand(0), B, TD, TLI);
+ CI->getArgOperand(0), B, DL, TLI);
}
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
CI->getNumArgOperands() < 3)
- return 0;
+ return nullptr;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// fprintf(F, "%c", chr) --> fputc(chr, F)
- if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
- return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return nullptr;
+ return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);
}
if (FormatStr[1] == 's') {
// fprintf(F, "%s", str) --> fputs(str, F)
if (!CI->getArgOperand(2)->getType()->isPointerTy())
- return 0;
- return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
+ return nullptr;
+ return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);
}
- return 0;
+ return nullptr;
}
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Require two fixed paramters as pointers and integer result.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
if (Value *V = optimizeFixedFormatString(Callee, CI, B)) {
return V;
@@ -1813,12 +1916,13 @@ struct FPrintFOpt : public LibCallOptimization {
B.Insert(New);
return New;
}
- return 0;
+ return nullptr;
}
};
struct FWriteOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
ErrorReportingOpt ER(/* StreamArg = */ 3);
(void) ER.callOptimizer(Callee, CI, B);
@@ -1829,12 +1933,12 @@ struct FWriteOpt : public LibCallOptimization {
!FT->getParamType(2)->isIntegerTy() ||
!FT->getParamType(3)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
// Get the element size and count.
ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
- if (!SizeC || !CountC) return 0;
+ if (!SizeC || !CountC) return nullptr;
uint64_t Bytes = SizeC->getZExtValue()*CountC->getZExtValue();
// If this is writing zero records, remove the call (it's a noop).
@@ -1845,61 +1949,63 @@ struct FWriteOpt : public LibCallOptimization {
// This optimisation is only valid, if the return value is unused.
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
- Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, TD, TLI);
- return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;
+ Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, DL, TLI);
+ return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
}
- return 0;
+ return nullptr;
}
};
struct FPutsOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
ErrorReportingOpt ER(/* StreamArg = */ 1);
(void) ER.callOptimizer(Callee, CI, B);
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return nullptr;
// Require two pointers. Also, we can't optimize if return value is used.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!CI->use_empty())
- return 0;
+ return nullptr;
// fputs(s,F) --> fwrite(s,1,strlen(s),F)
uint64_t Len = GetStringLength(CI->getArgOperand(0));
- if (!Len) return 0;
+ if (!Len) return nullptr;
// Known to have no uses (see above).
return EmitFWrite(CI->getArgOperand(0),
- ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getArgOperand(1), B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len-1),
+ CI->getArgOperand(1), B, DL, TLI);
}
};
struct PutsOpt : public LibCallOptimization {
- virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ Value *callOptimizer(Function *Callee, CallInst *CI,
+ IRBuilder<> &B) override {
// Require one fixed pointer argument and an integer/void result.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
!(FT->getReturnType()->isIntegerTy() ||
FT->getReturnType()->isVoidTy()))
- return 0;
+ return nullptr;
// Check for a constant string.
StringRef Str;
if (!getConstantStringInfo(CI->getArgOperand(0), Str))
- return 0;
+ return nullptr;
if (Str.empty() && CI->use_empty()) {
// puts("") -> putchar('\n')
- Value *Res = EmitPutChar(B.getInt32('\n'), B, TD, TLI);
+ Value *Res = EmitPutChar(B.getInt32('\n'), B, DL, TLI);
if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
- return 0;
+ return nullptr;
}
};
@@ -1908,7 +2014,7 @@ struct PutsOpt : public LibCallOptimization {
namespace llvm {
class LibCallSimplifierImpl {
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
const LibCallSimplifier *LCS;
bool UnsafeFPShrink;
@@ -1918,11 +2024,11 @@ class LibCallSimplifierImpl {
PowOpt Pow;
Exp2Opt Exp2;
public:
- LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ LibCallSimplifierImpl(const DataLayout *DL, const TargetLibraryInfo *TLI,
const LibCallSimplifier *LCS,
bool UnsafeFPShrink = false)
: Cos(UnsafeFPShrink), Pow(UnsafeFPShrink), Exp2(UnsafeFPShrink) {
- this->TD = TD;
+ this->DL = DL;
this->TLI = TLI;
this->LCS = LCS;
this->UnsafeFPShrink = UnsafeFPShrink;
@@ -1975,6 +2081,7 @@ static MemSetOpt MemSet;
// Math library call optimizations.
static UnaryDoubleFPOpt UnaryDoubleFP(false);
+static BinaryDoubleFPOpt BinaryDoubleFP(false);
static UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
static SinCosPiOpt SinCosPi;
@@ -2009,7 +2116,7 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
case Intrinsic::exp2:
return &Exp2;
default:
- return 0;
+ return nullptr;
}
}
@@ -2119,7 +2226,7 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
case LibFunc::trunc:
if (hasFloatVersion(FuncName))
return &UnaryDoubleFP;
- return 0;
+ return nullptr;
case LibFunc::acos:
case LibFunc::acosh:
case LibFunc::asin:
@@ -2143,11 +2250,16 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
case LibFunc::tanh:
if (UnsafeFPShrink && hasFloatVersion(FuncName))
return &UnsafeUnaryDoubleFP;
- return 0;
+ return nullptr;
+ case LibFunc::fmin:
+ case LibFunc::fmax:
+ if (hasFloatVersion(FuncName))
+ return &BinaryDoubleFP;
+ return nullptr;
case LibFunc::memcpy_chk:
return &MemCpyChk;
default:
- return 0;
+ return nullptr;
}
}
@@ -2167,7 +2279,7 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
return &StrNCpyChk;
}
- return 0;
+ return nullptr;
}
@@ -2175,15 +2287,15 @@ Value *LibCallSimplifierImpl::optimizeCall(CallInst *CI) {
LibCallOptimization *LCO = lookupOptimization(CI);
if (LCO) {
IRBuilder<> Builder(CI);
- return LCO->optimizeCall(CI, TD, TLI, LCS, Builder);
+ return LCO->optimizeCall(CI, DL, TLI, LCS, Builder);
}
- return 0;
+ return nullptr;
}
-LibCallSimplifier::LibCallSimplifier(const DataLayout *TD,
+LibCallSimplifier::LibCallSimplifier(const DataLayout *DL,
const TargetLibraryInfo *TLI,
bool UnsafeFPShrink) {
- Impl = new LibCallSimplifierImpl(TD, TLI, this, UnsafeFPShrink);
+ Impl = new LibCallSimplifierImpl(DL, TLI, this, UnsafeFPShrink);
}
LibCallSimplifier::~LibCallSimplifier() {
@@ -2191,7 +2303,7 @@ LibCallSimplifier::~LibCallSimplifier() {
}
Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
- if (CI->isNoBuiltin()) return 0;
+ if (CI->isNoBuiltin()) return nullptr;
return Impl->optimizeCall(CI);
}
@@ -2242,8 +2354,6 @@ void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) const {
// * sqrt(Nroot(x)) -> pow(x,1/(2*N))
// * sqrt(pow(x,y)) -> pow(|x|,y*0.5)
//
-// strchr:
-// * strchr(p, 0) -> strlen(p)
// tan, tanf, tanl:
// * tan(atan(x)) -> x
//
diff --git a/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp b/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
index 560f581..0c2fc0a 100644
--- a/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@@ -59,7 +59,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
// Then unreachable blocks.
if (UnreachableBlocks.empty()) {
- UnreachableBlock = 0;
+ UnreachableBlock = nullptr;
} else if (UnreachableBlocks.size() == 1) {
UnreachableBlock = UnreachableBlocks.front();
} else {
@@ -77,7 +77,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
// Now handle return blocks.
if (ReturningBlocks.empty()) {
- ReturnBlock = 0;
+ ReturnBlock = nullptr;
return false; // No blocks return
} else if (ReturningBlocks.size() == 1) {
ReturnBlock = ReturningBlocks.front(); // Already has a single return block
@@ -91,9 +91,9 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(),
"UnifiedReturnBlock", &F);
- PHINode *PN = 0;
+ PHINode *PN = nullptr;
if (F.getReturnType()->isVoidTy()) {
- ReturnInst::Create(F.getContext(), NULL, NewRetBlock);
+ ReturnInst::Create(F.getContext(), nullptr, NewRetBlock);
} else {
// If the function doesn't return void... add a PHI node to the block...
PN = PHINode::Create(F.getReturnType(), ReturningBlocks.size(),
diff --git a/contrib/llvm/lib/Transforms/Utils/Utils.cpp b/contrib/llvm/lib/Transforms/Utils/Utils.cpp
index c3df215..ed4f45c 100644
--- a/contrib/llvm/lib/Transforms/Utils/Utils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Utils.cpp
@@ -13,14 +13,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/InitializePasses.h"
-#include "llvm/PassRegistry.h"
#include "llvm-c/Initialization.h"
+#include "llvm/PassRegistry.h"
using namespace llvm;
/// initializeTransformUtils - Initialize all passes in the TransformUtils
/// library.
void llvm::initializeTransformUtils(PassRegistry &Registry) {
+ initializeAddDiscriminatorsPass(Registry);
initializeBreakCriticalEdgesPass(Registry);
initializeInstNamerPass(Registry);
initializeLCSSAPass(Registry);
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index 457fc80..0f20e6d 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -71,12 +71,12 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
// Check all operands to see if any need to be remapped.
for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) {
Value *OP = MD->getOperand(i);
- if (OP == 0) continue;
+ if (!OP) continue;
Value *Mapped_OP = MapValue(OP, VM, Flags, TypeMapper, Materializer);
// Use identity map if Mapped_Op is null and we can ignore missing
// entries.
if (Mapped_OP == OP ||
- (Mapped_OP == 0 && (Flags & RF_IgnoreMissingEntries)))
+ (Mapped_OP == nullptr && (Flags & RF_IgnoreMissingEntries)))
continue;
// Ok, at least one operand needs remapping.
@@ -84,13 +84,13 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
Elts.reserve(MD->getNumOperands());
for (i = 0; i != e; ++i) {
Value *Op = MD->getOperand(i);
- if (Op == 0)
- Elts.push_back(0);
+ if (!Op)
+ Elts.push_back(nullptr);
else {
Value *Mapped_Op = MapValue(Op, VM, Flags, TypeMapper, Materializer);
// Use identity map if Mapped_Op is null and we can ignore missing
// entries.
- if (Mapped_Op == 0 && (Flags & RF_IgnoreMissingEntries))
+ if (Mapped_Op == nullptr && (Flags & RF_IgnoreMissingEntries))
Mapped_Op = Op;
Elts.push_back(Mapped_Op);
}
@@ -112,8 +112,8 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
// Okay, this either must be a constant (which may or may not be mappable) or
// is something that is not in the mapping table.
Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V));
- if (C == 0)
- return 0;
+ if (!C)
+ return nullptr;
if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
Function *F =
@@ -126,7 +126,7 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
// Otherwise, we have some other constant to remap. Start by checking to see
// if all operands have an identity remapping.
unsigned OpNo = 0, NumOperands = C->getNumOperands();
- Value *Mapped = 0;
+ Value *Mapped = nullptr;
for (; OpNo != NumOperands; ++OpNo) {
Value *Op = C->getOperand(OpNo);
Mapped = MapValue(Op, VM, Flags, TypeMapper, Materializer);
@@ -187,7 +187,7 @@ void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
Value *V = MapValue(*op, VMap, Flags, TypeMapper, Materializer);
// If we aren't ignoring missing entries, assert that something happened.
- if (V != 0)
+ if (V)
*op = V;
else
assert((Flags & RF_IgnoreMissingEntries) &&
@@ -199,7 +199,7 @@ void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = MapValue(PN->getIncomingBlock(i), VMap, Flags);
// If we aren't ignoring missing entries, assert that something happened.
- if (V != 0)
+ if (V)
PN->setIncomingBlock(i, cast<BasicBlock>(V));
else
assert((Flags & RF_IgnoreMissingEntries) &&
diff --git a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
index c5e1dcb..28ec83b 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -15,7 +15,6 @@
//===----------------------------------------------------------------------===//
#define BBV_NAME "bb-vectorize"
-#define DEBUG_TYPE BBV_NAME
#include "llvm/Transforms/Vectorize.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -26,7 +25,6 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -34,6 +32,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -41,15 +40,17 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
using namespace llvm;
+#define DEBUG_TYPE BBV_NAME
+
static cl::opt<bool>
IgnoreTargetInfo("bb-vectorize-ignore-target-info", cl::init(false),
cl::Hidden, cl::desc("Ignore target information"));
@@ -122,6 +123,10 @@ NoMath("bb-vectorize-no-math", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize floating-point math intrinsics"));
static cl::opt<bool>
+ NoBitManipulation("bb-vectorize-no-bitmanip", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize BitManipulation intrinsics"));
+
+static cl::opt<bool>
NoFMA("bb-vectorize-no-fma", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize the fused-multiply-add intrinsic"));
@@ -199,10 +204,11 @@ namespace {
BBVectorize(Pass *P, const VectorizeConfig &C)
: BasicBlockPass(ID), Config(C) {
AA = &P->getAnalysis<AliasAnalysis>();
- DT = &P->getAnalysis<DominatorTree>();
+ DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &P->getAnalysis<ScalarEvolution>();
- TD = P->getAnalysisIfAvailable<DataLayout>();
- TTI = IgnoreTargetInfo ? 0 : &P->getAnalysis<TargetTransformInfo>();
+ DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ TTI = IgnoreTargetInfo ? nullptr : &P->getAnalysis<TargetTransformInfo>();
}
typedef std::pair<Value *, Value *> ValuePair;
@@ -214,7 +220,7 @@ namespace {
AliasAnalysis *AA;
DominatorTree *DT;
ScalarEvolution *SE;
- DataLayout *TD;
+ const DataLayout *DL;
const TargetTransformInfo *TTI;
// FIXME: const correct?
@@ -278,7 +284,7 @@ namespace {
bool trackUsesOfI(DenseSet<Value *> &Users,
AliasSetTracker &WriteSet, Instruction *I,
Instruction *J, bool UpdateUsers = true,
- DenseSet<ValuePair> *LoadMoveSetPairs = 0);
+ DenseSet<ValuePair> *LoadMoveSetPairs = nullptr);
void computePairsConnectedTo(
DenseMap<Value *, std::vector<Value *> > &CandidatePairs,
@@ -291,8 +297,8 @@ namespace {
bool pairsConflict(ValuePair P, ValuePair Q,
DenseSet<ValuePair> &PairableInstUsers,
DenseMap<ValuePair, std::vector<ValuePair> >
- *PairableInstUserMap = 0,
- DenseSet<VPPair> *PairableInstUserPairSet = 0);
+ *PairableInstUserMap = nullptr,
+ DenseSet<VPPair> *PairableInstUserPairSet = nullptr);
bool pairWillFormCycle(ValuePair P,
DenseMap<ValuePair, std::vector<ValuePair> > &PairableInstUsers,
@@ -388,6 +394,8 @@ namespace {
void combineMetadata(Instruction *K, const Instruction *J);
bool vectorizeBB(BasicBlock &BB) {
+ if (skipOptnoneFunction(BB))
+ return false;
if (!DT->isReachableFromEntry(&BB)) {
DEBUG(dbgs() << "BBV: skipping unreachable " << BB.getName() <<
" in " << BB.getParent()->getName() << "\n");
@@ -428,24 +436,27 @@ namespace {
return changed;
}
- virtual bool runOnBasicBlock(BasicBlock &BB) {
+ bool runOnBasicBlock(BasicBlock &BB) override {
+ // OptimizeNone check deferred to vectorizeBB().
+
AA = &getAnalysis<AliasAnalysis>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<DataLayout>();
- TTI = IgnoreTargetInfo ? 0 : &getAnalysis<TargetTransformInfo>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ TTI = IgnoreTargetInfo ? nullptr : &getAnalysis<TargetTransformInfo>();
return vectorizeBB(BB);
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
BasicBlockPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.addRequired<TargetTransformInfo>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolution>();
AU.setPreservesCFG();
}
@@ -528,7 +539,11 @@ namespace {
// Returns the cost of the provided instruction using TTI.
// This does not handle loads and stores.
- unsigned getInstrCost(unsigned Opcode, Type *T1, Type *T2) {
+ unsigned getInstrCost(unsigned Opcode, Type *T1, Type *T2,
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue,
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_AnyValue) {
switch (Opcode) {
default: break;
case Instruction::GetElementPtr:
@@ -558,7 +573,7 @@ namespace {
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
- return TTI->getArithmeticInstrCost(Opcode, T1);
+ return TTI->getArithmeticInstrCost(Opcode, T1, Op1VK, Op2VK);
case Instruction::Select:
case Instruction::ICmp:
case Instruction::FCmp:
@@ -626,11 +641,11 @@ namespace {
int64_t Offset = IntOff->getSExtValue();
Type *VTy = IPtr->getType()->getPointerElementType();
- int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);
+ int64_t VTyTSS = (int64_t) DL->getTypeStoreSize(VTy);
Type *VTy2 = JPtr->getType()->getPointerElementType();
if (VTy != VTy2 && Offset < 0) {
- int64_t VTy2TSS = (int64_t) TD->getTypeStoreSize(VTy2);
+ int64_t VTy2TSS = (int64_t) DL->getTypeStoreSize(VTy2);
OffsetInElmts = Offset/VTy2TSS;
return (abs64(Offset) % VTy2TSS) == 0;
}
@@ -664,7 +679,20 @@ namespace {
case Intrinsic::exp:
case Intrinsic::exp2:
case Intrinsic::pow:
+ case Intrinsic::round:
+ case Intrinsic::copysign:
+ case Intrinsic::ceil:
+ case Intrinsic::nearbyint:
+ case Intrinsic::rint:
+ case Intrinsic::trunc:
+ case Intrinsic::floor:
+ case Intrinsic::fabs:
return Config.VectorizeMath;
+ case Intrinsic::bswap:
+ case Intrinsic::ctpop:
+ case Intrinsic::ctlz:
+ case Intrinsic::cttz:
+ return Config.VectorizeBitManipulations;
case Intrinsic::fma:
case Intrinsic::fmuladd:
return Config.VectorizeFMA;
@@ -813,7 +841,7 @@ namespace {
// It is important to cleanup here so that future iterations of this
// function have less work to do.
- (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo());
+ (void) SimplifyInstructionsInBlock(&BB, DL, AA->getTargetLibraryInfo());
return true;
}
@@ -868,7 +896,7 @@ namespace {
}
// We can't vectorize memory operations without target data
- if (TD == 0 && IsSimpleLoadStore)
+ if (!DL && IsSimpleLoadStore)
return false;
Type *T1, *T2;
@@ -905,7 +933,7 @@ namespace {
if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())
return false;
- if ((!Config.VectorizePointers || TD == 0) &&
+ if ((!Config.VectorizePointers || !DL) &&
(T1->getScalarType()->isPointerTy() ||
T2->getScalarType()->isPointerTy()))
return false;
@@ -969,7 +997,7 @@ namespace {
// with the lower offset has an alignment suitable for the
// vector type.
- unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
+ unsigned VecAlignment = DL->getPrefTypeAlignment(VType);
if (BottomAlignment < VecAlignment)
return false;
}
@@ -1009,13 +1037,49 @@ namespace {
unsigned JCost = getInstrCost(J->getOpcode(), JT1, JT2);
Type *VT1 = getVecTypeForPair(IT1, JT1),
*VT2 = getVecTypeForPair(IT2, JT2);
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_AnyValue;
+
+ // On some targets (example X86) the cost of a vector shift may vary
+ // depending on whether the second operand is a Uniform or
+ // NonUniform Constant.
+ switch (I->getOpcode()) {
+ default : break;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+
+ // If both I and J are scalar shifts by constant, then the
+ // merged vector shift count would be either a constant splat value
+ // or a non-uniform vector of constants.
+ if (ConstantInt *CII = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ if (ConstantInt *CIJ = dyn_cast<ConstantInt>(J->getOperand(1)))
+ Op2VK = CII == CIJ ? TargetTransformInfo::OK_UniformConstantValue :
+ TargetTransformInfo::OK_NonUniformConstantValue;
+ } else {
+ // Check for a splat of a constant or for a non uniform vector
+ // of constants.
+ Value *IOp = I->getOperand(1);
+ Value *JOp = J->getOperand(1);
+ if ((isa<ConstantVector>(IOp) || isa<ConstantDataVector>(IOp)) &&
+ (isa<ConstantVector>(JOp) || isa<ConstantDataVector>(JOp))) {
+ Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
+ Constant *SplatValue = cast<Constant>(IOp)->getSplatValue();
+ if (SplatValue != nullptr &&
+ SplatValue == cast<Constant>(JOp)->getSplatValue())
+ Op2VK = TargetTransformInfo::OK_UniformConstantValue;
+ }
+ }
+ }
// Note that this procedure is incorrect for insert and extract element
// instructions (because combining these often results in a shuffle),
// but this cost is ignored (because insert and extract element
// instructions are assigned a zero depth factor and are not really
// fused in general).
- unsigned VCost = getInstrCost(I->getOpcode(), VT1, VT2);
+ unsigned VCost = getInstrCost(I->getOpcode(), VT1, VT2, Op1VK, Op2VK);
if (VCost > ICost + JCost)
return false;
@@ -1033,13 +1097,14 @@ namespace {
CostSavings = ICost + JCost - VCost;
}
- // The powi intrinsic is special because only the first argument is
- // vectorized, the second arguments must be equal.
+ // The powi,ctlz,cttz intrinsics are special because only the first
+ // argument is vectorized, the second arguments must be equal.
CallInst *CI = dyn_cast<CallInst>(I);
Function *FI;
if (CI && (FI = CI->getCalledFunction())) {
Intrinsic::ID IID = (Intrinsic::ID) FI->getIntrinsicID();
- if (IID == Intrinsic::powi) {
+ if (IID == Intrinsic::powi || IID == Intrinsic::ctlz ||
+ IID == Intrinsic::cttz) {
Value *A1I = CI->getArgOperand(1),
*A1J = cast<CallInst>(J)->getArgOperand(1);
const SCEV *A1ISCEV = SE->getSCEV(A1I),
@@ -1063,7 +1128,8 @@ namespace {
assert(CI->getNumArgOperands() == CJ->getNumArgOperands() &&
"Intrinsic argument counts differ");
for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
- if (IID == Intrinsic::powi && i == 1)
+ if ((IID == Intrinsic::powi || IID == Intrinsic::ctlz ||
+ IID == Intrinsic::cttz) && i == 1)
Tys.push_back(CI->getArgOperand(i)->getType());
else
Tys.push_back(getVecTypeForPair(CI->getArgOperand(i)->getType(),
@@ -1185,7 +1251,7 @@ namespace {
if (I->mayWriteToMemory()) WriteSet.add(I);
bool JAfterStart = IAfterStart;
- BasicBlock::iterator J = llvm::next(I);
+ BasicBlock::iterator J = std::next(I);
for (unsigned ss = 0; J != E && ss <= Config.SearchLimit; ++J, ++ss) {
if (J == Start) JAfterStart = true;
@@ -1230,7 +1296,7 @@ namespace {
// The next call to this function must start after the last instruction
// selected during this invocation.
if (JAfterStart) {
- Start = llvm::next(J);
+ Start = std::next(J);
IAfterStart = JAfterStart = false;
}
@@ -1272,13 +1338,15 @@ namespace {
// For each possible pairing for this variable, look at the uses of
// the first value...
- for (Value::use_iterator I = P.first->use_begin(),
- E = P.first->use_end(); I != E; ++I) {
- if (isa<LoadInst>(*I)) {
+ for (Value::user_iterator I = P.first->user_begin(),
+ E = P.first->user_end();
+ I != E; ++I) {
+ User *UI = *I;
+ if (isa<LoadInst>(UI)) {
// A pair cannot be connected to a load because the load only takes one
// operand (the address) and it is a scalar even after vectorization.
continue;
- } else if ((SI = dyn_cast<StoreInst>(*I)) &&
+ } else if ((SI = dyn_cast<StoreInst>(UI)) &&
P.first == SI->getPointerOperand()) {
// Similarly, a pair cannot be connected to a store through its
// pointer operand.
@@ -1287,22 +1355,21 @@ namespace {
// For each use of the first variable, look for uses of the second
// variable...
- for (Value::use_iterator J = P.second->use_begin(),
- E2 = P.second->use_end(); J != E2; ++J) {
- if ((SJ = dyn_cast<StoreInst>(*J)) &&
+ for (User *UJ : P.second->users()) {
+ if ((SJ = dyn_cast<StoreInst>(UJ)) &&
P.second == SJ->getPointerOperand())
continue;
// Look for <I, J>:
- if (CandidatePairsSet.count(ValuePair(*I, *J))) {
- VPPair VP(P, ValuePair(*I, *J));
+ if (CandidatePairsSet.count(ValuePair(UI, UJ))) {
+ VPPair VP(P, ValuePair(UI, UJ));
ConnectedPairs[VP.first].push_back(VP.second);
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionDirect));
}
// Look for <J, I>:
- if (CandidatePairsSet.count(ValuePair(*J, *I))) {
- VPPair VP(P, ValuePair(*J, *I));
+ if (CandidatePairsSet.count(ValuePair(UJ, UI))) {
+ VPPair VP(P, ValuePair(UJ, UI));
ConnectedPairs[VP.first].push_back(VP.second);
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSwap));
}
@@ -1311,13 +1378,14 @@ namespace {
if (Config.SplatBreaksChain) continue;
// Look for cases where just the first value in the pair is used by
// both members of another pair (splatting).
- for (Value::use_iterator J = P.first->use_begin(); J != E; ++J) {
- if ((SJ = dyn_cast<StoreInst>(*J)) &&
+ for (Value::user_iterator J = P.first->user_begin(); J != E; ++J) {
+ User *UJ = *J;
+ if ((SJ = dyn_cast<StoreInst>(UJ)) &&
P.first == SJ->getPointerOperand())
continue;
- if (CandidatePairsSet.count(ValuePair(*I, *J))) {
- VPPair VP(P, ValuePair(*I, *J));
+ if (CandidatePairsSet.count(ValuePair(UI, UJ))) {
+ VPPair VP(P, ValuePair(UI, UJ));
ConnectedPairs[VP.first].push_back(VP.second);
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat));
}
@@ -1327,21 +1395,24 @@ namespace {
if (Config.SplatBreaksChain) return;
// Look for cases where just the second value in the pair is used by
// both members of another pair (splatting).
- for (Value::use_iterator I = P.second->use_begin(),
- E = P.second->use_end(); I != E; ++I) {
- if (isa<LoadInst>(*I))
+ for (Value::user_iterator I = P.second->user_begin(),
+ E = P.second->user_end();
+ I != E; ++I) {
+ User *UI = *I;
+ if (isa<LoadInst>(UI))
continue;
- else if ((SI = dyn_cast<StoreInst>(*I)) &&
+ else if ((SI = dyn_cast<StoreInst>(UI)) &&
P.second == SI->getPointerOperand())
continue;
- for (Value::use_iterator J = P.second->use_begin(); J != E; ++J) {
- if ((SJ = dyn_cast<StoreInst>(*J)) &&
+ for (Value::user_iterator J = P.second->user_begin(); J != E; ++J) {
+ User *UJ = *J;
+ if ((SJ = dyn_cast<StoreInst>(UJ)) &&
P.second == SJ->getPointerOperand())
continue;
- if (CandidatePairsSet.count(ValuePair(*I, *J))) {
- VPPair VP(P, ValuePair(*I, *J));
+ if (CandidatePairsSet.count(ValuePair(UI, UJ))) {
+ VPPair VP(P, ValuePair(UI, UJ));
ConnectedPairs[VP.first].push_back(VP.second);
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat));
}
@@ -1407,7 +1478,7 @@ namespace {
AliasSetTracker WriteSet(*AA);
if (I->mayWriteToMemory()) WriteSet.add(I);
- for (BasicBlock::iterator J = llvm::next(I); J != E; ++J) {
+ for (BasicBlock::iterator J = std::next(I); J != E; ++J) {
(void) trackUsesOfI(Users, WriteSet, I, J);
if (J == EL)
@@ -1614,8 +1685,9 @@ namespace {
C2->first.second == C->first.first ||
C2->first.second == C->first.second ||
pairsConflict(C2->first, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
if (C2->second >= C->second) {
CanAdd = false;
break;
@@ -1635,8 +1707,9 @@ namespace {
T->second == C->first.first ||
T->second == C->first.second ||
pairsConflict(*T, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
CanAdd = false;
break;
}
@@ -1653,8 +1726,9 @@ namespace {
C2->first.second == C->first.first ||
C2->first.second == C->first.second ||
pairsConflict(C2->first, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
CanAdd = false;
break;
}
@@ -1669,8 +1743,9 @@ namespace {
ChosenPairs.begin(), E2 = ChosenPairs.end();
C2 != E2; ++C2) {
if (pairsConflict(*C2, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
CanAdd = false;
break;
}
@@ -1751,8 +1826,8 @@ namespace {
for (DenseMap<Value *, Value *>::iterator C = ChosenPairs.begin(),
E = ChosenPairs.end(); C != E; ++C) {
if (pairsConflict(*C, IJ, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet : nullptr)) {
DoesConflict = true;
break;
}
@@ -1901,16 +1976,15 @@ namespace {
Type *VTy = getVecTypeForPair(Ty1, Ty2);
bool NeedsExtraction = false;
- for (Value::use_iterator I = S->first->use_begin(),
- IE = S->first->use_end(); I != IE; ++I) {
- if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(*I)) {
+ for (User *U : S->first->users()) {
+ if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(U)) {
// Shuffle can be folded if it has no other input
if (isa<UndefValue>(SI->getOperand(1)))
continue;
}
- if (isa<ExtractElementInst>(*I))
+ if (isa<ExtractElementInst>(U))
continue;
- if (PrunedDAGInstrs.count(*I))
+ if (PrunedDAGInstrs.count(U))
continue;
NeedsExtraction = true;
break;
@@ -1933,16 +2007,15 @@ namespace {
}
NeedsExtraction = false;
- for (Value::use_iterator I = S->second->use_begin(),
- IE = S->second->use_end(); I != IE; ++I) {
- if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(*I)) {
+ for (User *U : S->second->users()) {
+ if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(U)) {
// Shuffle can be folded if it has no other input
if (isa<UndefValue>(SI->getOperand(1)))
continue;
}
- if (isa<ExtractElementInst>(*I))
+ if (isa<ExtractElementInst>(U))
continue;
- if (PrunedDAGInstrs.count(*I))
+ if (PrunedDAGInstrs.count(U))
continue;
NeedsExtraction = true;
break;
@@ -2324,7 +2397,7 @@ namespace {
} while ((LIENext =
dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
- LIENext = 0;
+ LIENext = nullptr;
Value *LIEPrev = UndefValue::get(ArgTypeH);
for (unsigned i = 0; i < numElemL; ++i) {
if (isa<UndefValue>(VectElemts[i])) continue;
@@ -2392,14 +2465,14 @@ namespace {
if ((LEE || LSV) && (HEE || HSV) && !IsSizeChangeShuffle) {
// We can have at most two unique vector inputs.
bool CanUseInputs = true;
- Value *I1, *I2 = 0;
+ Value *I1, *I2 = nullptr;
if (LEE) {
I1 = LEE->getOperand(0);
} else {
I1 = LSV->getOperand(0);
I2 = LSV->getOperand(1);
if (I2 == I1 || isa<UndefValue>(I2))
- I2 = 0;
+ I2 = nullptr;
}
if (HEE) {
@@ -2715,10 +2788,11 @@ namespace {
ReplacedOperands[o] = Intrinsic::getDeclaration(M, IID, VArgType);
continue;
- } else if (IID == Intrinsic::powi && o == 1) {
- // The second argument of powi is a single integer and we've already
- // checked that both arguments are equal. As a result, we just keep
- // I's second argument.
+ } else if ((IID == Intrinsic::powi || IID == Intrinsic::ctlz ||
+ IID == Intrinsic::cttz) && o == 1) {
+ // The second argument of powi/ctlz/cttz is a single integer/constant
+ // and we've already checked that both arguments are equal.
+ // As a result, we just keep I's second argument.
ReplacedOperands[o] = I->getOperand(o);
continue;
}
@@ -2795,7 +2869,7 @@ namespace {
DenseSet<ValuePair> &LoadMoveSetPairs,
Instruction *I, Instruction *J) {
// Skip to the first instruction past I.
- BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
+ BasicBlock::iterator L = std::next(BasicBlock::iterator(I));
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
@@ -2817,7 +2891,7 @@ namespace {
Instruction *&InsertionPt,
Instruction *I, Instruction *J) {
// Skip to the first instruction past I.
- BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
+ BasicBlock::iterator L = std::next(BasicBlock::iterator(I));
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
@@ -2848,7 +2922,7 @@ namespace {
DenseSet<ValuePair> &LoadMoveSetPairs,
Instruction *I) {
// Skip to the first instruction past I.
- BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
+ BasicBlock::iterator L = std::next(BasicBlock::iterator(I));
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
@@ -2903,7 +2977,7 @@ namespace {
switch (Kind) {
default:
- K->setMetadata(Kind, 0); // Remove unknown metadata
+ K->setMetadata(Kind, nullptr); // Remove unknown metadata
break;
case LLVMContext::MD_tbaa:
K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
@@ -3074,7 +3148,7 @@ namespace {
// Instruction insertion point:
Instruction *InsertionPt = K;
- Instruction *K1 = 0, *K2 = 0;
+ Instruction *K1 = nullptr, *K2 = nullptr;
replaceOutputsOfPair(Context, L, H, K, InsertionPt, K1, K2);
// The use dag of the first original instruction must be moved to after
@@ -3119,7 +3193,7 @@ namespace {
}
// Before removing I, set the iterator to the next instruction.
- PI = llvm::next(BasicBlock::iterator(I));
+ PI = std::next(BasicBlock::iterator(I));
if (cast<Instruction>(PI) == J)
++PI;
@@ -3141,7 +3215,7 @@ static const char bb_vectorize_name[] = "Basic-Block Vectorization";
INITIALIZE_PASS_BEGIN(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
@@ -3164,6 +3238,7 @@ VectorizeConfig::VectorizeConfig() {
VectorizePointers = !::NoPointers;
VectorizeCasts = !::NoCasts;
VectorizeMath = !::NoMath;
+ VectorizeBitManipulations = !::NoBitManipulation;
VectorizeFMA = !::NoFMA;
VectorizeSelect = !::NoSelect;
VectorizeCmp = !::NoCmp;
diff --git a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f9f6b18..531d349 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -42,9 +42,6 @@
//
//===----------------------------------------------------------------------===//
-#define LV_NAME "loop-vectorize"
-#define DEBUG_TYPE LV_NAME
-
#include "llvm/Transforms/Vectorize.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/EquivalenceClasses.h"
@@ -54,9 +51,11 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/LoopPass.h"
@@ -65,34 +64,45 @@
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Analysis/Verifier.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/VectorUtils.h"
#include <algorithm>
#include <map>
+#include <tuple>
using namespace llvm;
using namespace llvm::PatternMatch;
+#define LV_NAME "loop-vectorize"
+#define DEBUG_TYPE LV_NAME
+
+STATISTIC(LoopsVectorized, "Number of loops vectorized");
+STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
+
static cl::opt<unsigned>
VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden,
cl::desc("Sets the SIMD width. Zero is autoselect."));
@@ -114,6 +124,21 @@ TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16),
"trip count that is smaller than this "
"value."));
+/// This enables versioning on the strides of symbolically striding memory
+/// accesses in code like the following.
+/// for (i = 0; i < N; ++i)
+/// A[i * Stride1] += B[i * Stride2] ...
+///
+/// Will be roughly translated to
+/// if (Stride1 == 1 && Stride2 == 1) {
+/// for (i = 0; i < N; i+=4)
+/// A[i:i+3] += ...
+/// } else
+/// ...
+static cl::opt<bool> EnableMemAccessVersioning(
+ "enable-mem-access-versioning", cl::init(true), cl::Hidden,
+ cl::desc("Enable symblic stride memory access versioning"));
+
/// We don't unroll loops with a known constant trip count below this number.
static const unsigned TinyTripCountUnrollThreshold = 128;
@@ -124,11 +149,60 @@ static const unsigned RuntimeMemoryCheckThreshold = 8;
/// Maximum simd width.
static const unsigned MaxVectorWidth = 64;
+static cl::opt<unsigned> ForceTargetNumScalarRegs(
+ "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
+ cl::desc("A flag that overrides the target's number of scalar registers."));
+
+static cl::opt<unsigned> ForceTargetNumVectorRegs(
+ "force-target-num-vector-regs", cl::init(0), cl::Hidden,
+ cl::desc("A flag that overrides the target's number of vector registers."));
+
/// Maximum vectorization unroll count.
static const unsigned MaxUnrollFactor = 16;
-/// The cost of a loop that is considered 'small' by the unroller.
-static const unsigned SmallLoopCost = 20;
+static cl::opt<unsigned> ForceTargetMaxScalarUnrollFactor(
+ "force-target-max-scalar-unroll", cl::init(0), cl::Hidden,
+ cl::desc("A flag that overrides the target's max unroll factor for scalar "
+ "loops."));
+
+static cl::opt<unsigned> ForceTargetMaxVectorUnrollFactor(
+ "force-target-max-vector-unroll", cl::init(0), cl::Hidden,
+ cl::desc("A flag that overrides the target's max unroll factor for "
+ "vectorized loops."));
+
+static cl::opt<unsigned> ForceTargetInstructionCost(
+ "force-target-instruction-cost", cl::init(0), cl::Hidden,
+ cl::desc("A flag that overrides the target's expected cost for "
+ "an instruction to a single constant value. Mostly "
+ "useful for getting consistent testing."));
+
+static cl::opt<unsigned> SmallLoopCost(
+ "small-loop-cost", cl::init(20), cl::Hidden,
+ cl::desc("The cost of a loop that is considered 'small' by the unroller."));
+
+static cl::opt<bool> LoopVectorizeWithBlockFrequency(
+ "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden,
+ cl::desc("Enable the use of the block frequency analysis to access PGO "
+ "heuristics minimizing code growth in cold regions and being more "
+ "aggressive in hot regions."));
+
+// Runtime unroll loops for load/store throughput.
+static cl::opt<bool> EnableLoadStoreRuntimeUnroll(
+ "enable-loadstore-runtime-unroll", cl::init(true), cl::Hidden,
+ cl::desc("Enable runtime unrolling until load/store ports are saturated"));
+
+/// The number of stores in a loop that are allowed to need predication.
+static cl::opt<unsigned> NumberOfStoresToPredicate(
+ "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
+ cl::desc("Max number of stores to be predicated behind an if."));
+
+static cl::opt<bool> EnableIndVarRegisterHeur(
+ "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
+ cl::desc("Count the induction variable only once when unrolling"));
+
+static cl::opt<bool> EnableCondStoresVectorization(
+ "enable-cond-stores-vec", cl::init(false), cl::Hidden,
+ cl::desc("Enable if predication of stores during vectorization."));
namespace {
@@ -136,6 +210,29 @@ namespace {
class LoopVectorizationLegality;
class LoopVectorizationCostModel;
+/// Optimization analysis message produced during vectorization. Messages inform
+/// the user why vectorization did not occur.
+class Report {
+ std::string Message;
+ raw_string_ostream Out;
+ Instruction *Instr;
+
+public:
+ Report(Instruction *I = nullptr) : Out(Message), Instr(I) {
+ Out << "loop not vectorized: ";
+ }
+
+ template <typename A> Report &operator<<(const A &Value) {
+ Out << Value;
+ return *this;
+ }
+
+ Instruction *getInstr() { return Instr; }
+
+ std::string &str() { return Out.str(); }
+ operator Twine() { return Out.str(); }
+};
+
/// InnerLoopVectorizer vectorizes loops which contain only one basic
/// block to a specified vectorization factor (VF).
/// This class performs the widening of scalars into vectors, or multiple
@@ -153,20 +250,22 @@ class LoopVectorizationCostModel;
class InnerLoopVectorizer {
public:
InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
- DominatorTree *DT, DataLayout *DL,
+ DominatorTree *DT, const DataLayout *DL,
const TargetLibraryInfo *TLI, unsigned VecWidth,
unsigned UnrollFactor)
: OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), DL(DL), TLI(TLI),
- VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()), Induction(0),
- OldInduction(0), WidenMap(UnrollFactor) {}
+ VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()),
+ Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor),
+ Legal(nullptr) {}
// Perform the actual loop widening (vectorization).
- void vectorize(LoopVectorizationLegality *Legal) {
+ void vectorize(LoopVectorizationLegality *L) {
+ Legal = L;
// Create a new empty loop. Unlink the old loop and connect the new one.
- createEmptyLoop(Legal);
+ createEmptyLoop();
// Widen each instruction in the old loop to a new one in the new loop.
// Use the Legality module to find the induction and reduction variables.
- vectorizeLoop(Legal);
+ vectorizeLoop();
// Register the new loop and update the analysis passes.
updateAnalysis();
}
@@ -186,14 +285,23 @@ protected:
typedef DenseMap<std::pair<BasicBlock*, BasicBlock*>,
VectorParts> EdgeMaskCache;
- /// Add code that checks at runtime if the accessed arrays overlap.
- /// Returns the comparator value or NULL if no check is needed.
- Instruction *addRuntimeCheck(LoopVectorizationLegality *Legal,
- Instruction *Loc);
+ /// \brief Add code that checks at runtime if the accessed arrays overlap.
+ ///
+ /// Returns a pair of instructions where the first element is the first
+ /// instruction generated in possibly a sequence of instructions and the
+ /// second value is the final comparator value or NULL if no check is needed.
+ std::pair<Instruction *, Instruction *> addRuntimeCheck(Instruction *Loc);
+
+ /// \brief Add checks for strides that where assumed to be 1.
+ ///
+ /// Returns the last check instruction and the first check instruction in the
+ /// pair as (first, last).
+ std::pair<Instruction *, Instruction *> addStrideCheck(Instruction *Loc);
+
/// Create an empty loop, based on the loop ranges of the old loop.
- void createEmptyLoop(LoopVectorizationLegality *Legal);
+ void createEmptyLoop();
/// Copy and widen the instructions from the old loop.
- virtual void vectorizeLoop(LoopVectorizationLegality *Legal);
+ virtual void vectorizeLoop();
/// \brief The Loop exit block may have single value PHI nodes where the
/// incoming value is 'Undef'. While vectorizing we only handled real values
@@ -210,14 +318,12 @@ protected:
VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst);
/// A helper function to vectorize a single BB within the innermost loop.
- void vectorizeBlockInLoop(LoopVectorizationLegality *Legal, BasicBlock *BB,
- PhiVector *PV);
+ void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV);
/// Vectorize a single PHINode in a block. This method handles the induction
/// variable canonicalization. It supports both VF = 1 for unrolled loops and
/// arbitrary length vectors.
void widenPHIInstruction(Instruction *PN, VectorParts &Entry,
- LoopVectorizationLegality *Legal,
unsigned UF, unsigned VF, PhiVector *PV);
/// Insert the new loop to the loop hierarchy and pass manager
@@ -225,12 +331,14 @@ protected:
void updateAnalysis();
/// This instruction is un-vectorizable. Implement it as a sequence
- /// of scalars.
- virtual void scalarizeInstruction(Instruction *Instr);
+ /// of scalars. If \p IfPredicateStore is true we need to 'hide' each
+ /// scalarized instruction behind an if block predicated on the control
+ /// dependence of the instruction.
+ virtual void scalarizeInstruction(Instruction *Instr,
+ bool IfPredicateStore=false);
/// Vectorize Load and Store instructions,
- virtual void vectorizeMemoryInstruction(Instruction *Instr,
- LoopVectorizationLegality *Legal);
+ virtual void vectorizeMemoryInstruction(Instruction *Instr);
/// Create a broadcast instruction. This method generates a broadcast
/// instruction (shuffle) for loop invariant values and for the induction
@@ -302,8 +410,10 @@ protected:
LoopInfo *LI;
/// Dominator Tree.
DominatorTree *DT;
+ /// Alias Analysis.
+ AliasAnalysis *AA;
/// Data Layout.
- DataLayout *DL;
+ const DataLayout *DL;
/// Target Library Info.
const TargetLibraryInfo *TLI;
@@ -330,7 +440,7 @@ protected:
///The ExitBlock of the scalar loop.
BasicBlock *LoopExitBlock;
///The vector loop body.
- BasicBlock *LoopVectorBody;
+ SmallVector<BasicBlock *, 4> LoopVectorBody;
///The scalar loop body.
BasicBlock *LoopScalarBody;
/// A list of all bypass blocks. The first block is the entry of the loop.
@@ -345,22 +455,24 @@ protected:
/// Maps scalars to widened vectors.
ValueMap WidenMap;
EdgeMaskCache MaskCache;
+
+ LoopVectorizationLegality *Legal;
};
class InnerLoopUnroller : public InnerLoopVectorizer {
public:
InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
- DominatorTree *DT, DataLayout *DL,
+ DominatorTree *DT, const DataLayout *DL,
const TargetLibraryInfo *TLI, unsigned UnrollFactor) :
InnerLoopVectorizer(OrigLoop, SE, LI, DT, DL, TLI, 1, UnrollFactor) { }
private:
- virtual void scalarizeInstruction(Instruction *Instr);
- virtual void vectorizeMemoryInstruction(Instruction *Instr,
- LoopVectorizationLegality *Legal);
- virtual Value *getBroadcastInstrs(Value *V);
- virtual Value *getConsecutiveVector(Value* Val, int StartIdx, bool Negate);
- virtual Value *reverseVector(Value *Vec);
+ void scalarizeInstruction(Instruction *Instr,
+ bool IfPredicateStore = false) override;
+ void vectorizeMemoryInstruction(Instruction *Instr) override;
+ Value *getBroadcastInstrs(Value *V) override;
+ Value *getConsecutiveVector(Value* Val, int StartIdx, bool Negate) override;
+ Value *reverseVector(Value *Vec) override;
};
/// \brief Look for a meaningful debug location on the instruction or it's
@@ -391,6 +503,52 @@ static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
B.SetCurrentDebugLocation(DebugLoc());
}
+#ifndef NDEBUG
+/// \return string containing a file name and a line # for the given loop.
+static std::string getDebugLocString(const Loop *L) {
+ std::string Result;
+ if (L) {
+ raw_string_ostream OS(Result);
+ const DebugLoc LoopDbgLoc = L->getStartLoc();
+ if (!LoopDbgLoc.isUnknown())
+ LoopDbgLoc.print(L->getHeader()->getContext(), OS);
+ else
+ // Just print the module name.
+ OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
+ OS.flush();
+ }
+ return Result;
+}
+#endif
+
+/// \brief Propagate known metadata from one instruction to another.
+static void propagateMetadata(Instruction *To, const Instruction *From) {
+ SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
+ From->getAllMetadataOtherThanDebugLoc(Metadata);
+
+ for (auto M : Metadata) {
+ unsigned Kind = M.first;
+
+ // These are safe to transfer (this is safe for TBAA, even when we
+ // if-convert, because should that metadata have had a control dependency
+ // on the condition, and thus actually aliased with some other
+ // non-speculated memory access when the condition was false, this would be
+ // caught by the runtime overlap checks).
+ if (Kind != LLVMContext::MD_tbaa &&
+ Kind != LLVMContext::MD_fpmath)
+ continue;
+
+ To->setMetadata(Kind, M.second);
+ }
+}
+
+/// \brief Propagate known metadata from one instruction to a vector of others.
+static void propagateMetadata(SmallVectorImpl<Value *> &To, const Instruction *From) {
+ for (Value *V : To)
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ propagateMetadata(I, From);
+}
+
/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
/// to what vectorization factor.
/// This class does not look at the profitability of vectorization, only the
@@ -406,11 +564,17 @@ static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
/// induction variable and the different reduction variables.
class LoopVectorizationLegality {
public:
- LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DataLayout *DL,
- DominatorTree *DT, TargetLibraryInfo *TLI)
- : TheLoop(L), SE(SE), DL(DL), DT(DT), TLI(TLI),
- Induction(0), WidestIndTy(0), HasFunNoNaNAttr(false),
- MaxSafeDepDistBytes(-1U) {}
+ unsigned NumLoads;
+ unsigned NumStores;
+ unsigned NumPredStores;
+
+ LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, const DataLayout *DL,
+ DominatorTree *DT, TargetLibraryInfo *TLI,
+ AliasAnalysis *AA, Function *F)
+ : NumLoads(0), NumStores(0), NumPredStores(0), TheLoop(L), SE(SE), DL(DL),
+ DT(DT), TLI(TLI), AA(AA), TheFunction(F), Induction(nullptr),
+ WidestIndTy(nullptr), HasFunNoNaNAttr(false), MaxSafeDepDistBytes(-1U) {
+ }
/// This enum represents the kinds of reductions that we support.
enum ReductionKind {
@@ -448,7 +612,7 @@ public:
/// This struct holds information about reduction variables.
struct ReductionDescriptor {
- ReductionDescriptor() : StartValue(0), LoopExitInstr(0),
+ ReductionDescriptor() : StartValue(nullptr), LoopExitInstr(nullptr),
Kind(RK_NoReduction), MinMaxKind(MRK_Invalid) {}
ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K,
@@ -496,11 +660,12 @@ public:
Ends.clear();
IsWritePtr.clear();
DependencySetId.clear();
+ AliasSetId.clear();
}
/// Insert a pointer and calculate the start and end SCEVs.
void insert(ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr,
- unsigned DepSetId);
+ unsigned DepSetId, unsigned ASId, ValueToValueMap &Strides);
/// This flag indicates if we need to add the runtime check.
bool Need;
@@ -515,12 +680,14 @@ public:
/// Holds the id of the set of pointers that could be dependent because of a
/// shared underlying object.
SmallVector<unsigned, 2> DependencySetId;
+ /// Holds the id of the disjoint alias set to which this pointer belongs.
+ SmallVector<unsigned, 2> AliasSetId;
};
/// A struct for saving information about induction variables.
struct InductionInfo {
InductionInfo(Value *Start, InductionKind K) : StartValue(Start), IK(K) {}
- InductionInfo() : StartValue(0), IK(IK_NoInduction) {}
+ InductionInfo() : StartValue(nullptr), IK(IK_NoInduction) {}
/// Start value.
TrackingVH<Value> StartValue;
/// Induction kind.
@@ -564,7 +731,7 @@ public:
/// pointer itself is an induction variable.
/// This check allows us to vectorize A[idx] into a wide load/store.
/// Returns:
- /// 0 - Stride is unknown or non consecutive.
+ /// 0 - Stride is unknown or non-consecutive.
/// 1 - Address is consecutive.
/// -1 - Address is consecutive, and decreasing.
int isConsecutivePtr(Value *Ptr);
@@ -584,6 +751,13 @@ public:
unsigned getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
+ bool hasStride(Value *V) { return StrideSet.count(V); }
+ bool mustCheckStrides() { return !StrideSet.empty(); }
+ SmallPtrSet<Value *, 8>::iterator strides_begin() {
+ return StrideSet.begin();
+ }
+ SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); }
+
private:
/// Check if a single basic block loop is vectorizable.
/// At this point we know that this is a loop with a constant trip count
@@ -626,16 +800,36 @@ private:
/// if the PHI is not an induction variable.
InductionKind isInductionVariable(PHINode *Phi);
+ /// \brief Collect memory access with loop invariant strides.
+ ///
+ /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
+ /// invariant.
+ void collectStridedAcccess(Value *LoadOrStoreInst);
+
+ /// Report an analysis message to assist the user in diagnosing loops that are
+ /// not vectorized.
+ void emitAnalysis(Report &Message) {
+ DebugLoc DL = TheLoop->getStartLoc();
+ if (Instruction *I = Message.getInstr())
+ DL = I->getDebugLoc();
+ emitOptimizationRemarkAnalysis(TheFunction->getContext(), DEBUG_TYPE,
+ *TheFunction, DL, Message.str());
+ }
+
/// The loop that we evaluate.
Loop *TheLoop;
/// Scev analysis.
ScalarEvolution *SE;
/// DataLayout analysis.
- DataLayout *DL;
+ const DataLayout *DL;
/// Dominators.
DominatorTree *DT;
/// Target Library Info.
TargetLibraryInfo *TLI;
+ /// Alias analysis.
+ AliasAnalysis *AA;
+ /// Parent function
+ Function *TheFunction;
// --- vectorization state --- //
@@ -664,6 +858,9 @@ private:
bool HasFunNoNaNAttr;
unsigned MaxSafeDepDistBytes;
+
+ ValueToValueMap Strides;
+ SmallPtrSet<Value *, 8> StrideSet;
};
/// LoopVectorizationCostModel - estimates the expected speedups due to
@@ -678,7 +875,7 @@ public:
LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI,
LoopVectorizationLegality *Legal,
const TargetTransformInfo &TTI,
- DataLayout *DL, const TargetLibraryInfo *TLI)
+ const DataLayout *DL, const TargetLibraryInfo *TLI)
: TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), DL(DL), TLI(TLI) {}
/// Information about vectorization costs
@@ -691,7 +888,8 @@ public:
/// then this vectorization factor will be selected if vectorization is
/// possible.
VectorizationFactor selectVectorizationFactor(bool OptForSize,
- unsigned UserVF);
+ unsigned UserVF,
+ bool ForceVectorization);
/// \return The size (in bits) of the widest type in the code that
/// needs to be vectorized. We ignore values that remain scalar such as
@@ -751,39 +949,39 @@ private:
/// Vector target information.
const TargetTransformInfo &TTI;
/// Target data layout information.
- DataLayout *DL;
+ const DataLayout *DL;
/// Target Library Info.
const TargetLibraryInfo *TLI;
};
/// Utility class for getting and setting loop vectorizer hints in the form
/// of loop metadata.
-struct LoopVectorizeHints {
- /// Vectorization width.
- unsigned Width;
- /// Vectorization unroll factor.
- unsigned Unroll;
+class LoopVectorizeHints {
+public:
+ enum ForceKind {
+ FK_Undefined = -1, ///< Not selected.
+ FK_Disabled = 0, ///< Forcing disabled.
+ FK_Enabled = 1, ///< Forcing enabled.
+ };
LoopVectorizeHints(const Loop *L, bool DisableUnrolling)
- : Width(VectorizationFactor)
- , Unroll(DisableUnrolling ? 1 : VectorizationUnroll)
- , LoopID(L->getLoopID()) {
+ : Width(VectorizationFactor),
+ Unroll(DisableUnrolling),
+ Force(FK_Undefined),
+ LoopID(L->getLoopID()) {
getHints(L);
- // The command line options override any loop metadata except for when
- // width == 1 which is used to indicate the loop is already vectorized.
- if (VectorizationFactor.getNumOccurrences() > 0 && Width != 1)
- Width = VectorizationFactor;
+ // force-vector-unroll overrides DisableUnrolling.
if (VectorizationUnroll.getNumOccurrences() > 0)
Unroll = VectorizationUnroll;
- DEBUG(if (DisableUnrolling && Unroll == 1)
- dbgs() << "LV: Unrolling disabled by the pass manager\n");
+ DEBUG(if (DisableUnrolling && Unroll == 1) dbgs()
+ << "LV: Unrolling disabled by the pass manager\n");
}
- /// Return the loop vectorizer metadata prefix.
- static StringRef Prefix() { return "llvm.vectorizer."; }
+ /// Return the loop metadata prefix.
+ static StringRef Prefix() { return "llvm.loop."; }
- MDNode *createHint(LLVMContext &Context, StringRef Name, unsigned V) {
+ MDNode *createHint(LLVMContext &Context, StringRef Name, unsigned V) const {
SmallVector<Value*, 2> Vals;
Vals.push_back(MDString::get(Context, Name));
Vals.push_back(ConstantInt::get(Type::getInt32Ty(Context), V));
@@ -803,8 +1001,10 @@ struct LoopVectorizeHints {
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i)
Vals.push_back(LoopID->getOperand(i));
- Vals.push_back(createHint(Context, Twine(Prefix(), "width").str(), Width));
- Vals.push_back(createHint(Context, Twine(Prefix(), "unroll").str(), 1));
+ Vals.push_back(
+ createHint(Context, Twine(Prefix(), "vectorize.width").str(), Width));
+ Vals.push_back(
+ createHint(Context, Twine(Prefix(), "interleave.count").str(), 1));
MDNode *NewLoopID = MDNode::get(Context, Vals);
// Set operand 0 to refer to the loop id itself.
@@ -817,9 +1017,35 @@ struct LoopVectorizeHints {
LoopID = NewLoopID;
}
-private:
- MDNode *LoopID;
+ std::string emitRemark() const {
+ Report R;
+ R << "vectorization ";
+ switch (Force) {
+ case LoopVectorizeHints::FK_Disabled:
+ R << "is explicitly disabled";
+ break;
+ case LoopVectorizeHints::FK_Enabled:
+ R << "is explicitly enabled";
+ if (Width != 0 && Unroll != 0)
+ R << " with width " << Width << " and interleave count " << Unroll;
+ else if (Width != 0)
+ R << " with width " << Width;
+ else if (Unroll != 0)
+ R << " with interleave count " << Unroll;
+ break;
+ case LoopVectorizeHints::FK_Undefined:
+ R << "was not specified";
+ break;
+ }
+ return R.str();
+ }
+ unsigned getWidth() const { return Width; }
+ unsigned getUnroll() const { return Unroll; }
+ enum ForceKind getForce() const { return Force; }
+ MDNode *getLoopID() const { return LoopID; }
+
+private:
/// Find hints specified in the loop metadata.
void getHints(const Loop *L) {
if (!LoopID)
@@ -830,7 +1056,7 @@ private:
assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
- const MDString *S = 0;
+ const MDString *S = nullptr;
SmallVector<Value*, 4> Args;
// The expected hint is either a MDString or a MDNode with the first
@@ -849,7 +1075,7 @@ private:
if (!S)
continue;
- // Check if the hint starts with the vectorizer prefix.
+ // Check if the hint starts with the loop metadata prefix.
StringRef Hint = S->getString();
if (!Hint.startswith(Prefix()))
continue;
@@ -867,12 +1093,18 @@ private:
if (!C) return;
unsigned Val = C->getZExtValue();
- if (Hint == "width") {
+ if (Hint == "vectorize.width") {
if (isPowerOf2_32(Val) && Val <= MaxVectorWidth)
Width = Val;
else
DEBUG(dbgs() << "LV: ignoring invalid width hint metadata\n");
- } else if (Hint == "unroll") {
+ } else if (Hint == "vectorize.enable") {
+ if (C->getBitWidth() == 1)
+ Force = Val == 1 ? LoopVectorizeHints::FK_Enabled
+ : LoopVectorizeHints::FK_Disabled;
+ else
+ DEBUG(dbgs() << "LV: ignoring invalid enable hint metadata\n");
+ } else if (Hint == "interleave.count") {
if (isPowerOf2_32(Val) && Val <= MaxUnrollFactor)
Unroll = Val;
else
@@ -881,62 +1113,192 @@ private:
DEBUG(dbgs() << "LV: ignoring unknown hint " << Hint << '\n');
}
}
+
+ /// Vectorization width.
+ unsigned Width;
+ /// Vectorization unroll factor.
+ unsigned Unroll;
+ /// Vectorization forced
+ enum ForceKind Force;
+
+ MDNode *LoopID;
};
+static void emitMissedWarning(Function *F, Loop *L,
+ const LoopVectorizeHints &LH) {
+ emitOptimizationRemarkMissed(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), LH.emitRemark());
+
+ if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
+ if (LH.getWidth() != 1)
+ emitLoopVectorizeWarning(
+ F->getContext(), *F, L->getStartLoc(),
+ "failed explicitly specified loop vectorization");
+ else if (LH.getUnroll() != 1)
+ emitLoopInterleaveWarning(
+ F->getContext(), *F, L->getStartLoc(),
+ "failed explicitly specified loop interleaving");
+ }
+}
+
+static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) {
+ if (L.empty())
+ return V.push_back(&L);
+
+ for (Loop *InnerL : L)
+ addInnerLoop(*InnerL, V);
+}
+
/// The LoopVectorize Pass.
-struct LoopVectorize : public LoopPass {
+struct LoopVectorize : public FunctionPass {
/// Pass identification, replacement for typeid
static char ID;
- explicit LoopVectorize(bool NoUnrolling = false)
- : LoopPass(ID), DisableUnrolling(NoUnrolling) {
+ explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
+ : FunctionPass(ID),
+ DisableUnrolling(NoUnrolling),
+ AlwaysVectorize(AlwaysVectorize) {
initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
}
ScalarEvolution *SE;
- DataLayout *DL;
+ const DataLayout *DL;
LoopInfo *LI;
TargetTransformInfo *TTI;
DominatorTree *DT;
+ BlockFrequencyInfo *BFI;
TargetLibraryInfo *TLI;
+ AliasAnalysis *AA;
bool DisableUnrolling;
+ bool AlwaysVectorize;
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
- // We only vectorize innermost loops.
- if (!L->empty())
- return false;
+ BlockFrequency ColdEntryFreq;
+ bool runOnFunction(Function &F) override {
SE = &getAnalysis<ScalarEvolution>();
- DL = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
LI = &getAnalysis<LoopInfo>();
TTI = &getAnalysis<TargetTransformInfo>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ BFI = &getAnalysis<BlockFrequencyInfo>();
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
+ AA = &getAnalysis<AliasAnalysis>();
+
+ // Compute some weights outside of the loop over the loops. Compute this
+ // using a BranchProbability to re-use its scaling math.
+ const BranchProbability ColdProb(1, 5); // 20%
+ ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb;
// If the target claims to have no vector registers don't attempt
// vectorization.
if (!TTI->getNumberOfRegisters(true))
return false;
- if (DL == NULL) {
- DEBUG(dbgs() << "LV: Not vectorizing because of missing data layout\n");
+ if (!DL) {
+ DEBUG(dbgs() << "\nLV: Not vectorizing " << F.getName()
+ << ": Missing data layout\n");
return false;
}
- DEBUG(dbgs() << "LV: Checking a loop in \"" <<
- L->getHeader()->getParent()->getName() << "\"\n");
+ // Build up a worklist of inner-loops to vectorize. This is necessary as
+ // the act of vectorizing or partially unrolling a loop creates new loops
+ // and can invalidate iterators across the loops.
+ SmallVector<Loop *, 8> Worklist;
+
+ for (Loop *L : *LI)
+ addInnerLoop(*L, Worklist);
+
+ LoopsAnalyzed += Worklist.size();
+
+ // Now walk the identified inner loops.
+ bool Changed = false;
+ while (!Worklist.empty())
+ Changed |= processLoop(Worklist.pop_back_val());
+
+ // Process each loop nest in the function.
+ return Changed;
+ }
+
+ bool processLoop(Loop *L) {
+ assert(L->empty() && "Only process inner loops.");
+
+#ifndef NDEBUG
+ const std::string DebugLocStr = getDebugLocString(L);
+#endif /* NDEBUG */
+
+ DEBUG(dbgs() << "\nLV: Checking a loop in \""
+ << L->getHeader()->getParent()->getName() << "\" from "
+ << DebugLocStr << "\n");
LoopVectorizeHints Hints(L, DisableUnrolling);
- if (Hints.Width == 1 && Hints.Unroll == 1) {
- DEBUG(dbgs() << "LV: Not vectorizing.\n");
+ DEBUG(dbgs() << "LV: Loop hints:"
+ << " force="
+ << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
+ ? "disabled"
+ : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
+ ? "enabled"
+ : "?")) << " width=" << Hints.getWidth()
+ << " unroll=" << Hints.getUnroll() << "\n");
+
+ // Function containing loop
+ Function *F = L->getHeader()->getParent();
+
+ // Looking at the diagnostic output is the only way to determine if a loop
+ // was vectorized (other than looking at the IR or machine code), so it
+ // is important to generate an optimization remark for each loop. Most of
+ // these messages are generated by emitOptimizationRemarkAnalysis. Remarks
+ // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are
+ // less verbose reporting vectorized loops and unvectorized loops that may
+ // benefit from vectorization, respectively.
+
+ if (Hints.getForce() == LoopVectorizeHints::FK_Disabled) {
+ DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
+ emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), Hints.emitRemark());
return false;
}
+ if (!AlwaysVectorize && Hints.getForce() != LoopVectorizeHints::FK_Enabled) {
+ DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
+ emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), Hints.emitRemark());
+ return false;
+ }
+
+ if (Hints.getWidth() == 1 && Hints.getUnroll() == 1) {
+ DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "loop not vectorized: vector width and interleave count are "
+ "explicitly set to 1");
+ return false;
+ }
+
+ // Check the loop for a trip count threshold:
+ // do not vectorize loops with a tiny trip count.
+ BasicBlock *Latch = L->getLoopLatch();
+ const unsigned TC = SE->getSmallConstantTripCount(L, Latch);
+ if (TC > 0u && TC < TinyTripCountVectorThreshold) {
+ DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
+ << "This loop is not worth vectorizing.");
+ if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
+ DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
+ else {
+ DEBUG(dbgs() << "\n");
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "vectorization is not beneficial and is not explicitly forced");
+ return false;
+ }
+ }
+
// Check if it is legal to vectorize the loop.
- LoopVectorizationLegality LVL(L, SE, DL, DT, TLI);
+ LoopVectorizationLegality LVL(L, SE, DL, DT, TLI, AA, F);
if (!LVL.canVectorize()) {
- DEBUG(dbgs() << "LV: Not vectorizing.\n");
+ DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
+ emitMissedWarning(F, L, Hints);
return false;
}
@@ -945,41 +1307,81 @@ struct LoopVectorize : public LoopPass {
// Check the function attributes to find out if this function should be
// optimized for size.
- Function *F = L->getHeader()->getParent();
- Attribute::AttrKind SzAttr = Attribute::OptimizeForSize;
- Attribute::AttrKind FlAttr = Attribute::NoImplicitFloat;
- unsigned FnIndex = AttributeSet::FunctionIndex;
- bool OptForSize = F->getAttributes().hasAttribute(FnIndex, SzAttr);
- bool NoFloat = F->getAttributes().hasAttribute(FnIndex, FlAttr);
+ bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
+ F->hasFnAttribute(Attribute::OptimizeForSize);
+
+ // Compute the weighted frequency of this loop being executed and see if it
+ // is less than 20% of the function entry baseline frequency. Note that we
+ // always have a canonical loop here because we think we *can* vectoriez.
+ // FIXME: This is hidden behind a flag due to pervasive problems with
+ // exactly what block frequency models.
+ if (LoopVectorizeWithBlockFrequency) {
+ BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader());
+ if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
+ LoopEntryFreq < ColdEntryFreq)
+ OptForSize = true;
+ }
- if (NoFloat) {
+ // Check the function attributes to see if implicit floats are allowed.a
+ // FIXME: This check doesn't seem possibly correct -- what if the loop is
+ // an integer loop and the vector instructions selected are purely integer
+ // vector instructions?
+ if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
"attribute is used.\n");
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "loop not vectorized due to NoImplicitFloat attribute");
+ emitMissedWarning(F, L, Hints);
return false;
}
// Select the optimal vectorization factor.
- LoopVectorizationCostModel::VectorizationFactor VF;
- VF = CM.selectVectorizationFactor(OptForSize, Hints.Width);
+ const LoopVectorizationCostModel::VectorizationFactor VF =
+ CM.selectVectorizationFactor(OptForSize, Hints.getWidth(),
+ Hints.getForce() ==
+ LoopVectorizeHints::FK_Enabled);
+
// Select the unroll factor.
- unsigned UF = CM.selectUnrollFactor(OptForSize, Hints.Unroll, VF.Width,
- VF.Cost);
+ const unsigned UF =
+ CM.selectUnrollFactor(OptForSize, Hints.getUnroll(), VF.Width, VF.Cost);
- DEBUG(dbgs() << "LV: Found a vectorizable loop ("<< VF.Width << ") in "<<
- F->getParent()->getModuleIdentifier() << '\n');
+ DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
+ << DebugLocStr << '\n');
DEBUG(dbgs() << "LV: Unroll Factor is " << UF << '\n');
if (VF.Width == 1) {
- DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
- if (UF == 1)
+ DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial\n");
+
+ if (UF == 1) {
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "not beneficial to vectorize and user disabled interleaving");
return false;
+ }
+ DEBUG(dbgs() << "LV: Trying to at least unroll the loops.\n");
+
+ // Report the unrolling decision.
+ emitOptimizationRemark(F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ Twine("unrolled with interleaving factor " +
+ Twine(UF) +
+ " (vectorization not beneficial)"));
+
// We decided not to vectorize, but we may want to unroll.
+
InnerLoopUnroller Unroller(L, SE, LI, DT, DL, TLI, UF);
Unroller.vectorize(&LVL);
} else {
// If we decided that it is *legal* to vectorize the loop then do it.
InnerLoopVectorizer LB(L, SE, LI, DT, DL, TLI, VF.Width, UF);
LB.vectorize(&LVL);
+ ++LoopsVectorized;
+
+ // Report the vectorization decision.
+ emitOptimizationRemark(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ Twine("vectorized loop (vectorization factor: ") + Twine(VF.Width) +
+ ", unrolling interleave factor: " + Twine(UF) + ")");
}
// Mark the loop as already vectorized to avoid vectorizing again.
@@ -989,16 +1391,18 @@ struct LoopVectorize : public LoopPass {
return true;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- LoopPass::getAnalysisUsage(AU);
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
- AU.addRequired<DominatorTree>();
+ AU.addRequired<BlockFrequencyInfo>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfo>();
AU.addRequired<ScalarEvolution>();
AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<AliasAnalysis>();
AU.addPreserved<LoopInfo>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addPreserved<AliasAnalysis>();
}
};
@@ -1010,12 +1414,53 @@ struct LoopVectorize : public LoopPass {
// LoopVectorizationCostModel.
//===----------------------------------------------------------------------===//
-void
-LoopVectorizationLegality::RuntimePointerCheck::insert(ScalarEvolution *SE,
- Loop *Lp, Value *Ptr,
- bool WritePtr,
- unsigned DepSetId) {
- const SCEV *Sc = SE->getSCEV(Ptr);
+static Value *stripIntegerCast(Value *V) {
+ if (CastInst *CI = dyn_cast<CastInst>(V))
+ if (CI->getOperand(0)->getType()->isIntegerTy())
+ return CI->getOperand(0);
+ return V;
+}
+
+///\brief Replaces the symbolic stride in a pointer SCEV expression by one.
+///
+/// If \p OrigPtr is not null, use it to look up the stride value instead of
+/// \p Ptr.
+static const SCEV *replaceSymbolicStrideSCEV(ScalarEvolution *SE,
+ ValueToValueMap &PtrToStride,
+ Value *Ptr, Value *OrigPtr = nullptr) {
+
+ const SCEV *OrigSCEV = SE->getSCEV(Ptr);
+
+ // If there is an entry in the map return the SCEV of the pointer with the
+ // symbolic stride replaced by one.
+ ValueToValueMap::iterator SI = PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
+ if (SI != PtrToStride.end()) {
+ Value *StrideVal = SI->second;
+
+ // Strip casts.
+ StrideVal = stripIntegerCast(StrideVal);
+
+ // Replace symbolic stride by one.
+ Value *One = ConstantInt::get(StrideVal->getType(), 1);
+ ValueToValueMap RewriteMap;
+ RewriteMap[StrideVal] = One;
+
+ const SCEV *ByOne =
+ SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true);
+ DEBUG(dbgs() << "LV: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne
+ << "\n");
+ return ByOne;
+ }
+
+ // Otherwise, just return the SCEV of the original pointer.
+ return SE->getSCEV(Ptr);
+}
+
+void LoopVectorizationLegality::RuntimePointerCheck::insert(
+ ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
+ unsigned ASId, ValueToValueMap &Strides) {
+ // Get the stride replaced scev.
+ const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
assert(AR && "Invalid addrec expression");
const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
@@ -1025,12 +1470,15 @@ LoopVectorizationLegality::RuntimePointerCheck::insert(ScalarEvolution *SE,
Ends.push_back(ScEnd);
IsWritePtr.push_back(WritePtr);
DependencySetId.push_back(DepSetId);
+ AliasSetId.push_back(ASId);
}
Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
// We need to place the broadcast of invariant variables outside the loop.
Instruction *Instr = dyn_cast<Instruction>(V);
- bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
+ bool NewInstr =
+ (Instr && std::find(LoopVectorBody.begin(), LoopVectorBody.end(),
+ Instr->getParent()) != LoopVectorBody.end());
bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
// Place the code for broadcasting invariant variables in the new preheader.
@@ -1070,7 +1518,7 @@ Value *InnerLoopVectorizer::getConsecutiveVector(Value* Val, int StartIdx,
/// \brief Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
-static unsigned getGEPInductionOperand(DataLayout *DL,
+static unsigned getGEPInductionOperand(const DataLayout *DL,
const GetElementPtrInst *Gep) {
unsigned LastOperand = Gep->getNumOperands() - 1;
unsigned GEPAllocSize = DL->getTypeAllocSize(
@@ -1093,7 +1541,7 @@ static unsigned getGEPInductionOperand(DataLayout *DL,
}
int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
- assert(Ptr->getType()->isPointerTy() && "Unexpected non ptr");
+ assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
// Make sure that the pointer does not point to structs.
if (Ptr->getType()->getPointerElementType()->isAggregateType())
return 0;
@@ -1147,7 +1595,27 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
// We can emit wide load/stores only if the last non-zero index is the
// induction variable.
- const SCEV *Last = SE->getSCEV(Gep->getOperand(InductionOperand));
+ const SCEV *Last = nullptr;
+ if (!Strides.count(Gep))
+ Last = SE->getSCEV(Gep->getOperand(InductionOperand));
+ else {
+ // Because of the multiplication by a stride we can have a s/zext cast.
+ // We are going to replace this stride by 1 so the cast is safe to ignore.
+ //
+ // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ // %0 = trunc i64 %indvars.iv to i32
+ // %mul = mul i32 %0, %Stride1
+ // %idxprom = zext i32 %mul to i64 << Safe cast.
+ // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom
+ //
+ Last = replaceSymbolicStrideSCEV(SE, Strides,
+ Gep->getOperand(InductionOperand), Gep);
+ if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last))
+ Last =
+ (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend)
+ ? C->getOperand()
+ : Last;
+ }
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) {
const SCEV *Step = AR->getStepRecurrence(*SE);
@@ -1171,6 +1639,10 @@ InnerLoopVectorizer::getVectorValue(Value *V) {
assert(V != Induction && "The new induction variable should not be used.");
assert(!V->getType()->isVectorTy() && "Can't widen a vector");
+ // If we have a stride that is replaced by one, do it here.
+ if (Legal->hasStride(V))
+ V = ConstantInt::get(V->getType(), 1);
+
// If we have this scalar in the map, return it.
if (WidenMap.has(V))
return WidenMap.get(V);
@@ -1192,9 +1664,7 @@ Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
"reverse");
}
-
-void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
- LoopVectorizationLegality *Legal) {
+void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
// Attempt to issue a wide load.
LoadInst *LI = dyn_cast<LoadInst>(Instr);
StoreInst *SI = dyn_cast<StoreInst>(Instr);
@@ -1213,10 +1683,13 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
unsigned ScalarAllocatedSize = DL->getTypeAllocSize(ScalarDataTy);
unsigned VectorElementSize = DL->getTypeStoreSize(DataTy)/VF;
+ if (SI && Legal->blockNeedsPredication(SI->getParent()))
+ return scalarizeInstruction(Instr, true);
+
if (ScalarAllocatedSize != VectorElementSize)
return scalarizeInstruction(Instr);
- // If the pointer is loop invariant or if it is non consecutive,
+ // If the pointer is loop invariant or if it is non-consecutive,
// scalarize the load.
int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
bool Reverse = ConsecutiveStride < 0;
@@ -1304,7 +1777,9 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
Value *VecPtr = Builder.CreateBitCast(PartPtr,
DataTy->getPointerTo(AddressSpace));
- Builder.CreateStore(StoredVal[Part], VecPtr)->setAlignment(Alignment);
+ StoreInst *NewSI =
+ Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment);
+ propagateMetadata(NewSI, SI);
}
return;
}
@@ -1325,13 +1800,13 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
Value *VecPtr = Builder.CreateBitCast(PartPtr,
DataTy->getPointerTo(AddressSpace));
- Value *LI = Builder.CreateLoad(VecPtr, "wide.load");
- cast<LoadInst>(LI)->setAlignment(Alignment);
- Entry[Part] = Reverse ? reverseVector(LI) : LI;
+ LoadInst *NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
+ propagateMetadata(NewLI, LI);
+ Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI;
}
}
-void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr) {
+void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, bool IfPredicateStore) {
assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
// Holds vector parameters or scalars, in case of uniform vals.
SmallVector<VectorParts, 4> Params;
@@ -1371,15 +1846,43 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr) {
// Does this instruction return a value ?
bool IsVoidRetTy = Instr->getType()->isVoidTy();
- Value *UndefVec = IsVoidRetTy ? 0 :
+ Value *UndefVec = IsVoidRetTy ? nullptr :
UndefValue::get(VectorType::get(Instr->getType(), VF));
// Create a new entry in the WidenMap and initialize it to Undef or Null.
VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
+ Instruction *InsertPt = Builder.GetInsertPoint();
+ BasicBlock *IfBlock = Builder.GetInsertBlock();
+ BasicBlock *CondBlock = nullptr;
+
+ VectorParts Cond;
+ Loop *VectorLp = nullptr;
+ if (IfPredicateStore) {
+ assert(Instr->getParent()->getSinglePredecessor() &&
+ "Only support single predecessor blocks");
+ Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(),
+ Instr->getParent());
+ VectorLp = LI->getLoopFor(IfBlock);
+ assert(VectorLp && "Must have a loop for this block");
+ }
+
// For each vector unroll 'part':
for (unsigned Part = 0; Part < UF; ++Part) {
// For each scalar that we create:
for (unsigned Width = 0; Width < VF; ++Width) {
+
+ // Start if-block.
+ Value *Cmp = nullptr;
+ if (IfPredicateStore) {
+ Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width));
+ Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, ConstantInt::get(Cmp->getType(), 1));
+ CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store");
+ LoopVectorBody.push_back(CondBlock);
+ VectorLp->addBasicBlockToLoop(CondBlock, LI->getBase());
+ // Update Builder with newly created basic block.
+ Builder.SetInsertPoint(InsertPt);
+ }
+
Instruction *Cloned = Instr->clone();
if (!IsVoidRetTy)
Cloned->setName(Instr->getName() + ".cloned");
@@ -1400,18 +1903,75 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr) {
if (!IsVoidRetTy)
VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned,
Builder.getInt32(Width));
+ // End if-block.
+ if (IfPredicateStore) {
+ BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
+ LoopVectorBody.push_back(NewIfBlock);
+ VectorLp->addBasicBlockToLoop(NewIfBlock, LI->getBase());
+ Builder.SetInsertPoint(InsertPt);
+ Instruction *OldBr = IfBlock->getTerminator();
+ BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
+ OldBr->eraseFromParent();
+ IfBlock = NewIfBlock;
+ }
}
}
}
-Instruction *
-InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
- Instruction *Loc) {
+static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
+ Instruction *Loc) {
+ if (FirstInst)
+ return FirstInst;
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return I->getParent() == Loc->getParent() ? I : nullptr;
+ return nullptr;
+}
+
+std::pair<Instruction *, Instruction *>
+InnerLoopVectorizer::addStrideCheck(Instruction *Loc) {
+ Instruction *tnullptr = nullptr;
+ if (!Legal->mustCheckStrides())
+ return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
+
+ IRBuilder<> ChkBuilder(Loc);
+
+ // Emit checks.
+ Value *Check = nullptr;
+ Instruction *FirstInst = nullptr;
+ for (SmallPtrSet<Value *, 8>::iterator SI = Legal->strides_begin(),
+ SE = Legal->strides_end();
+ SI != SE; ++SI) {
+ Value *Ptr = stripIntegerCast(*SI);
+ Value *C = ChkBuilder.CreateICmpNE(Ptr, ConstantInt::get(Ptr->getType(), 1),
+ "stride.chk");
+ // Store the first instruction we create.
+ FirstInst = getFirstInst(FirstInst, C, Loc);
+ if (Check)
+ Check = ChkBuilder.CreateOr(Check, C);
+ else
+ Check = C;
+ }
+
+ // We have to do this trickery because the IRBuilder might fold the check to a
+ // constant expression in which case there is no Instruction anchored in a
+ // the block.
+ LLVMContext &Ctx = Loc->getContext();
+ Instruction *TheCheck =
+ BinaryOperator::CreateAnd(Check, ConstantInt::getTrue(Ctx));
+ ChkBuilder.Insert(TheCheck, "stride.not.one");
+ FirstInst = getFirstInst(FirstInst, TheCheck, Loc);
+
+ return std::make_pair(FirstInst, TheCheck);
+}
+
+std::pair<Instruction *, Instruction *>
+InnerLoopVectorizer::addRuntimeCheck(Instruction *Loc) {
LoopVectorizationLegality::RuntimePointerCheck *PtrRtCheck =
Legal->getRuntimePointerCheck();
+ Instruction *tnullptr = nullptr;
if (!PtrRtCheck->Need)
- return NULL;
+ return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
unsigned NumPointers = PtrRtCheck->Pointers.size();
SmallVector<TrackingVH<Value> , 2> Starts;
@@ -1419,6 +1979,7 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
LLVMContext &Ctx = Loc->getContext();
SCEVExpander Exp(*SE, "induction");
+ Instruction *FirstInst = nullptr;
for (unsigned i = 0; i < NumPointers; ++i) {
Value *Ptr = PtrRtCheck->Pointers[i];
@@ -1445,7 +2006,7 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
IRBuilder<> ChkBuilder(Loc);
// Our instructions might fold to a constant.
- Value *MemoryRuntimeCheck = 0;
+ Value *MemoryRuntimeCheck = nullptr;
for (unsigned i = 0; i < NumPointers; ++i) {
for (unsigned j = i+1; j < NumPointers; ++j) {
// No need to check if two readonly pointers intersect.
@@ -1455,6 +2016,9 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
// Only need to check pointers between two different dependency sets.
if (PtrRtCheck->DependencySetId[i] == PtrRtCheck->DependencySetId[j])
continue;
+ // Only need to check pointers in the same alias set.
+ if (PtrRtCheck->AliasSetId[i] != PtrRtCheck->AliasSetId[j])
+ continue;
unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace();
unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace();
@@ -1472,11 +2036,16 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc");
Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
+ FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
+ FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
- if (MemoryRuntimeCheck)
+ FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
+ if (MemoryRuntimeCheck) {
IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict,
"conflict.rdx");
+ FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
+ }
MemoryRuntimeCheck = IsConflict;
}
}
@@ -1487,30 +2056,33 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
ConstantInt::getTrue(Ctx));
ChkBuilder.Insert(Check, "memcheck.conflict");
- return Check;
+ FirstInst = getFirstInst(FirstInst, Check, Loc);
+ return std::make_pair(FirstInst, Check);
}
-void
-InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
+void InnerLoopVectorizer::createEmptyLoop() {
/*
In this function we generate a new loop. The new loop will contain
the vectorized instructions while the old loop will continue to run the
scalar remainder.
- [ ] <-- vector loop bypass (may consist of multiple blocks).
- / |
- / v
- | [ ] <-- vector pre header.
- | |
- | v
- | [ ] \
- | [ ]_| <-- vector loop.
- | |
- \ v
- >[ ] <--- middle-block.
- / |
- / v
- | [ ] <--- new preheader.
+ [ ] <-- Back-edge taken count overflow check.
+ / |
+ / v
+ | [ ] <-- vector loop bypass (may consist of multiple blocks).
+ | / |
+ | / v
+ || [ ] <-- vector pre header.
+ || |
+ || v
+ || [ ] \
+ || [ ]_| <-- vector loop.
+ || |
+ | \ v
+ | >[ ] <--- middle-block.
+ | / |
+ | / v
+ -|- >[ ] <--- new preheader.
| |
| v
| [ ] \
@@ -1524,6 +2096,7 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
BasicBlock *OldBasicBlock = OrigLoop->getHeader();
BasicBlock *BypassBlock = OrigLoop->getLoopPreheader();
BasicBlock *ExitBlock = OrigLoop->getExitBlock();
+ assert(BypassBlock && "Invalid loop structure");
assert(ExitBlock && "Must have an exit block");
// Some loops have a single integer induction variable, while other loops
@@ -1546,18 +2119,30 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
IdxTy->getPrimitiveSizeInBits())
ExitCount = SE->getTruncateOrNoop(ExitCount, IdxTy);
- ExitCount = SE->getNoopOrZeroExtend(ExitCount, IdxTy);
+ const SCEV *BackedgeTakeCount = SE->getNoopOrZeroExtend(ExitCount, IdxTy);
// Get the total trip count from the count by adding 1.
- ExitCount = SE->getAddExpr(ExitCount,
- SE->getConstant(ExitCount->getType(), 1));
+ ExitCount = SE->getAddExpr(BackedgeTakeCount,
+ SE->getConstant(BackedgeTakeCount->getType(), 1));
// Expand the trip count and place the new instructions in the preheader.
// Notice that the pre-header does not change, only the loop body.
SCEVExpander Exp(*SE, "induction");
- // Count holds the overall loop count (N).
- Value *Count = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
- BypassBlock->getTerminator());
+ // We need to test whether the backedge-taken count is uint##_max. Adding one
+ // to it will cause overflow and an incorrect loop trip count in the vector
+ // body. In case of overflow we want to directly jump to the scalar remainder
+ // loop.
+ Value *BackedgeCount =
+ Exp.expandCodeFor(BackedgeTakeCount, BackedgeTakeCount->getType(),
+ BypassBlock->getTerminator());
+ if (BackedgeCount->getType()->isPointerTy())
+ BackedgeCount = CastInst::CreatePointerCast(BackedgeCount, IdxTy,
+ "backedge.ptrcnt.to.int",
+ BypassBlock->getTerminator());
+ Instruction *CheckBCOverflow =
+ CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, BackedgeCount,
+ Constant::getAllOnesValue(BackedgeCount->getType()),
+ "backedge.overflow", BypassBlock->getTerminator());
// The loop index does not have to start at Zero. Find the original start
// value from the induction PHI node. If we don't have an induction variable
@@ -1568,7 +2153,18 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
IdxTy):
ConstantInt::get(IdxTy, 0);
- assert(BypassBlock && "Invalid loop structure");
+ // We need an instruction to anchor the overflow check on. StartIdx needs to
+ // be defined before the overflow check branch. Because the scalar preheader
+ // is going to merge the start index and so the overflow branch block needs to
+ // contain a definition of the start index.
+ Instruction *OverflowCheckAnchor = BinaryOperator::CreateAdd(
+ StartIdx, ConstantInt::get(IdxTy, 0), "overflow.check.anchor",
+ BypassBlock->getTerminator());
+
+ // Count holds the overall loop count (N).
+ Value *Count = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
+ BypassBlock->getTerminator());
+
LoopBypassBlocks.push_back(BypassBlock);
// Split the single block loop into the two loop structure described above.
@@ -1637,27 +2233,69 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
// Now, compare the new count to zero. If it is zero skip the vector loop and
// jump to the scalar loop.
- Value *Cmp = BypassBuilder.CreateICmpEQ(IdxEndRoundDown, StartIdx,
- "cmp.zero");
+ Value *Cmp =
+ BypassBuilder.CreateICmpEQ(IdxEndRoundDown, StartIdx, "cmp.zero");
BasicBlock *LastBypassBlock = BypassBlock;
+ // Generate code to check that the loops trip count that we computed by adding
+ // one to the backedge-taken count will not overflow.
+ {
+ auto PastOverflowCheck =
+ std::next(BasicBlock::iterator(OverflowCheckAnchor));
+ BasicBlock *CheckBlock =
+ LastBypassBlock->splitBasicBlock(PastOverflowCheck, "overflow.checked");
+ if (ParentLoop)
+ ParentLoop->addBasicBlockToLoop(CheckBlock, LI->getBase());
+ LoopBypassBlocks.push_back(CheckBlock);
+ Instruction *OldTerm = LastBypassBlock->getTerminator();
+ BranchInst::Create(ScalarPH, CheckBlock, CheckBCOverflow, OldTerm);
+ OldTerm->eraseFromParent();
+ LastBypassBlock = CheckBlock;
+ }
+
+ // Generate the code to check that the strides we assumed to be one are really
+ // one. We want the new basic block to start at the first instruction in a
+ // sequence of instructions that form a check.
+ Instruction *StrideCheck;
+ Instruction *FirstCheckInst;
+ std::tie(FirstCheckInst, StrideCheck) =
+ addStrideCheck(LastBypassBlock->getTerminator());
+ if (StrideCheck) {
+ // Create a new block containing the stride check.
+ BasicBlock *CheckBlock =
+ LastBypassBlock->splitBasicBlock(FirstCheckInst, "vector.stridecheck");
+ if (ParentLoop)
+ ParentLoop->addBasicBlockToLoop(CheckBlock, LI->getBase());
+ LoopBypassBlocks.push_back(CheckBlock);
+
+ // Replace the branch into the memory check block with a conditional branch
+ // for the "few elements case".
+ Instruction *OldTerm = LastBypassBlock->getTerminator();
+ BranchInst::Create(MiddleBlock, CheckBlock, Cmp, OldTerm);
+ OldTerm->eraseFromParent();
+
+ Cmp = StrideCheck;
+ LastBypassBlock = CheckBlock;
+ }
+
// Generate the code that checks in runtime if arrays overlap. We put the
// checks into a separate block to make the more common case of few elements
// faster.
- Instruction *MemRuntimeCheck = addRuntimeCheck(Legal,
- BypassBlock->getTerminator());
+ Instruction *MemRuntimeCheck;
+ std::tie(FirstCheckInst, MemRuntimeCheck) =
+ addRuntimeCheck(LastBypassBlock->getTerminator());
if (MemRuntimeCheck) {
// Create a new block containing the memory check.
- BasicBlock *CheckBlock = BypassBlock->splitBasicBlock(MemRuntimeCheck,
- "vector.memcheck");
+ BasicBlock *CheckBlock =
+ LastBypassBlock->splitBasicBlock(MemRuntimeCheck, "vector.memcheck");
if (ParentLoop)
ParentLoop->addBasicBlockToLoop(CheckBlock, LI->getBase());
LoopBypassBlocks.push_back(CheckBlock);
// Replace the branch into the memory check block with a conditional branch
// for the "few elements case".
- Instruction *OldTerm = BypassBlock->getTerminator();
+ Instruction *OldTerm = LastBypassBlock->getTerminator();
BranchInst::Create(MiddleBlock, CheckBlock, Cmp, OldTerm);
OldTerm->eraseFromParent();
@@ -1678,7 +2316,7 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
// start value.
// This variable saves the new starting index for the scalar loop.
- PHINode *ResumeIndex = 0;
+ PHINode *ResumeIndex = nullptr;
LoopVectorizationLegality::InductionList::iterator I, E;
LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
// Set builder to point to last bypass block.
@@ -1694,9 +2332,22 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
// truncated version for the scalar loop.
PHINode *TruncResumeVal = (OrigPhi == OldInduction) ?
PHINode::Create(OrigPhi->getType(), 2, "trunc.resume.val",
- MiddleBlock->getTerminator()) : 0;
+ MiddleBlock->getTerminator()) : nullptr;
+
+ // Create phi nodes to merge from the backedge-taken check block.
+ PHINode *BCResumeVal = PHINode::Create(ResumeValTy, 3, "bc.resume.val",
+ ScalarPH->getTerminator());
+ BCResumeVal->addIncoming(ResumeVal, MiddleBlock);
+
+ PHINode *BCTruncResumeVal = nullptr;
+ if (OrigPhi == OldInduction) {
+ BCTruncResumeVal =
+ PHINode::Create(OrigPhi->getType(), 2, "bc.trunc.resume.val",
+ ScalarPH->getTerminator());
+ BCTruncResumeVal->addIncoming(TruncResumeVal, MiddleBlock);
+ }
- Value *EndValue = 0;
+ Value *EndValue = nullptr;
switch (II.IK) {
case LoopVectorizationLegality::IK_NoInduction:
llvm_unreachable("Unknown induction");
@@ -1712,10 +2363,12 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
BypassBuilder.CreateTrunc(IdxEndRoundDown, OrigPhi->getType());
// The new PHI merges the original incoming value, in case of a bypass,
// or the value at the end of the vectorized loop.
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
TruncResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[I]);
TruncResumeVal->addIncoming(EndValue, VecBody);
+ BCTruncResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[0]);
+
// We know what the end value is.
EndValue = IdxEndRoundDown;
// We also know which PHI node holds it.
@@ -1761,7 +2414,7 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
// The new PHI merges the original incoming value, in case of a bypass,
// or the value at the end of the vectorized loop.
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) {
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I) {
if (OrigPhi == OldInduction)
ResumeVal->addIncoming(StartIdx, LoopBypassBlocks[I]);
else
@@ -1771,11 +2424,16 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
// Fix the scalar body counter (PHI node).
unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
- // The old inductions phi node in the scalar body needs the truncated value.
- if (OrigPhi == OldInduction)
- OrigPhi->setIncomingValue(BlockIdx, TruncResumeVal);
- else
- OrigPhi->setIncomingValue(BlockIdx, ResumeVal);
+
+ // The old induction's phi node in the scalar body needs the truncated
+ // value.
+ if (OrigPhi == OldInduction) {
+ BCResumeVal->addIncoming(StartIdx, LoopBypassBlocks[0]);
+ OrigPhi->setIncomingValue(BlockIdx, BCTruncResumeVal);
+ } else {
+ BCResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[0]);
+ OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
+ }
}
// If we are generating a new induction variable then we also need to
@@ -1786,7 +2444,7 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
assert(!ResumeIndex && "Unexpected resume value found");
ResumeIndex = PHINode::Create(IdxTy, 2, "new.indc.resume.val",
MiddleBlock->getTerminator());
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
ResumeIndex->addIncoming(StartIdx, LoopBypassBlocks[I]);
ResumeIndex->addIncoming(IdxEndRoundDown, VecBody);
}
@@ -1825,7 +2483,7 @@ InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
LoopScalarPreHeader = ScalarPH;
LoopMiddleBlock = MiddleBlock;
LoopExitBlock = ExitBlock;
- LoopVectorBody = VecBody;
+ LoopVectorBody.push_back(VecBody);
LoopScalarBody = OldBasicBlock;
LoopVectorizeHints Hints(Lp, true);
@@ -1859,148 +2517,6 @@ LoopVectorizationLegality::getReductionIdentity(ReductionKind K, Type *Tp) {
}
}
-static Intrinsic::ID checkUnaryFloatSignature(const CallInst &I,
- Intrinsic::ID ValidIntrinsicID) {
- if (I.getNumArgOperands() != 1 ||
- !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
- I.getType() != I.getArgOperand(0)->getType() ||
- !I.onlyReadsMemory())
- return Intrinsic::not_intrinsic;
-
- return ValidIntrinsicID;
-}
-
-static Intrinsic::ID checkBinaryFloatSignature(const CallInst &I,
- Intrinsic::ID ValidIntrinsicID) {
- if (I.getNumArgOperands() != 2 ||
- !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
- !I.getArgOperand(1)->getType()->isFloatingPointTy() ||
- I.getType() != I.getArgOperand(0)->getType() ||
- I.getType() != I.getArgOperand(1)->getType() ||
- !I.onlyReadsMemory())
- return Intrinsic::not_intrinsic;
-
- return ValidIntrinsicID;
-}
-
-
-static Intrinsic::ID
-getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI) {
- // If we have an intrinsic call, check if it is trivially vectorizable.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
- switch (II->getIntrinsicID()) {
- case Intrinsic::sqrt:
- case Intrinsic::sin:
- case Intrinsic::cos:
- case Intrinsic::exp:
- case Intrinsic::exp2:
- case Intrinsic::log:
- case Intrinsic::log10:
- case Intrinsic::log2:
- case Intrinsic::fabs:
- case Intrinsic::copysign:
- case Intrinsic::floor:
- case Intrinsic::ceil:
- case Intrinsic::trunc:
- case Intrinsic::rint:
- case Intrinsic::nearbyint:
- case Intrinsic::round:
- case Intrinsic::pow:
- case Intrinsic::fma:
- case Intrinsic::fmuladd:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- return II->getIntrinsicID();
- default:
- return Intrinsic::not_intrinsic;
- }
- }
-
- if (!TLI)
- return Intrinsic::not_intrinsic;
-
- LibFunc::Func Func;
- Function *F = CI->getCalledFunction();
- // We're going to make assumptions on the semantics of the functions, check
- // that the target knows that it's available in this environment and it does
- // not have local linkage.
- if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(F->getName(), Func))
- return Intrinsic::not_intrinsic;
-
- // Otherwise check if we have a call to a function that can be turned into a
- // vector intrinsic.
- switch (Func) {
- default:
- break;
- case LibFunc::sin:
- case LibFunc::sinf:
- case LibFunc::sinl:
- return checkUnaryFloatSignature(*CI, Intrinsic::sin);
- case LibFunc::cos:
- case LibFunc::cosf:
- case LibFunc::cosl:
- return checkUnaryFloatSignature(*CI, Intrinsic::cos);
- case LibFunc::exp:
- case LibFunc::expf:
- case LibFunc::expl:
- return checkUnaryFloatSignature(*CI, Intrinsic::exp);
- case LibFunc::exp2:
- case LibFunc::exp2f:
- case LibFunc::exp2l:
- return checkUnaryFloatSignature(*CI, Intrinsic::exp2);
- case LibFunc::log:
- case LibFunc::logf:
- case LibFunc::logl:
- return checkUnaryFloatSignature(*CI, Intrinsic::log);
- case LibFunc::log10:
- case LibFunc::log10f:
- case LibFunc::log10l:
- return checkUnaryFloatSignature(*CI, Intrinsic::log10);
- case LibFunc::log2:
- case LibFunc::log2f:
- case LibFunc::log2l:
- return checkUnaryFloatSignature(*CI, Intrinsic::log2);
- case LibFunc::fabs:
- case LibFunc::fabsf:
- case LibFunc::fabsl:
- return checkUnaryFloatSignature(*CI, Intrinsic::fabs);
- case LibFunc::copysign:
- case LibFunc::copysignf:
- case LibFunc::copysignl:
- return checkBinaryFloatSignature(*CI, Intrinsic::copysign);
- case LibFunc::floor:
- case LibFunc::floorf:
- case LibFunc::floorl:
- return checkUnaryFloatSignature(*CI, Intrinsic::floor);
- case LibFunc::ceil:
- case LibFunc::ceilf:
- case LibFunc::ceill:
- return checkUnaryFloatSignature(*CI, Intrinsic::ceil);
- case LibFunc::trunc:
- case LibFunc::truncf:
- case LibFunc::truncl:
- return checkUnaryFloatSignature(*CI, Intrinsic::trunc);
- case LibFunc::rint:
- case LibFunc::rintf:
- case LibFunc::rintl:
- return checkUnaryFloatSignature(*CI, Intrinsic::rint);
- case LibFunc::nearbyint:
- case LibFunc::nearbyintf:
- case LibFunc::nearbyintl:
- return checkUnaryFloatSignature(*CI, Intrinsic::nearbyint);
- case LibFunc::round:
- case LibFunc::roundf:
- case LibFunc::roundl:
- return checkUnaryFloatSignature(*CI, Intrinsic::round);
- case LibFunc::pow:
- case LibFunc::powf:
- case LibFunc::powl:
- return checkBinaryFloatSignature(*CI, Intrinsic::pow);
- }
-
- return Intrinsic::not_intrinsic;
-}
-
/// This function translates the reduction kind to an LLVM binary operator.
static unsigned
getReductionBinOp(LoopVectorizationLegality::ReductionKind Kind) {
@@ -2093,30 +2609,56 @@ struct CSEDenseMapInfo {
};
}
+/// \brief Check whether this block is a predicated block.
+/// Due to if predication of stores we might create a sequence of "if(pred) a[i]
+/// = ...; " blocks. We start with one vectorized basic block. For every
+/// conditional block we split this vectorized block. Therefore, every second
+/// block will be a predicated one.
+static bool isPredicatedBlock(unsigned BlockNum) {
+ return BlockNum % 2;
+}
+
///\brief Perform cse of induction variable instructions.
-static void cse(BasicBlock *BB) {
+static void cse(SmallVector<BasicBlock *, 4> &BBs) {
// Perform simple cse.
SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
- Instruction *In = I++;
+ for (unsigned i = 0, e = BBs.size(); i != e; ++i) {
+ BasicBlock *BB = BBs[i];
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
+ Instruction *In = I++;
- if (!CSEDenseMapInfo::canHandle(In))
- continue;
+ if (!CSEDenseMapInfo::canHandle(In))
+ continue;
- // Check if we can replace this instruction with any of the
- // visited instructions.
- if (Instruction *V = CSEMap.lookup(In)) {
- In->replaceAllUsesWith(V);
- In->eraseFromParent();
- continue;
+ // Check if we can replace this instruction with any of the
+ // visited instructions.
+ if (Instruction *V = CSEMap.lookup(In)) {
+ In->replaceAllUsesWith(V);
+ In->eraseFromParent();
+ continue;
+ }
+ // Ignore instructions in conditional blocks. We create "if (pred) a[i] =
+ // ...;" blocks for predicated stores. Every second block is a predicated
+ // block.
+ if (isPredicatedBlock(i))
+ continue;
+
+ CSEMap[In] = In;
}
+ }
+}
- CSEMap[In] = In;
+/// \brief Adds a 'fast' flag to floating point operations.
+static Value *addFastMathFlag(Value *V) {
+ if (isa<FPMathOperator>(V)){
+ FastMathFlags Flags;
+ Flags.setUnsafeAlgebra();
+ cast<Instruction>(V)->setFastMathFlags(Flags);
}
+ return V;
}
-void
-InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
+void InnerLoopVectorizer::vectorizeLoop() {
//===------------------------------------------------===//
//
// Notice: any optimization or new instruction that go
@@ -2144,7 +2686,7 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
// Vectorize all of the blocks in the original loop.
for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(),
be = DFS.endRPO(); bb != be; ++bb)
- vectorizeBlockInLoop(Legal, *bb, &RdxPHIsToFix);
+ vectorizeBlockInLoop(*bb, &RdxPHIsToFix);
// At this point every instruction in the original loop is widened to
// a vector form. We are almost done. Now, we need to fix the PHI nodes
@@ -2169,10 +2711,10 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
setDebugLocFromInst(Builder, RdxDesc.StartValue);
// We need to generate a reduction vector from the incoming scalar.
- // To do so, we need to generate the 'identity' vector and overide
+ // To do so, we need to generate the 'identity' vector and override
// one of the elements with the incoming scalar reduction. We need
// to do it in the vector-loop preheader.
- Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator());
+ Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator());
// This is the vector-clone of the value that leaves the loop.
VectorParts &VectorExit = getVectorValue(RdxDesc.LoopExitInstr);
@@ -2228,7 +2770,8 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
// first unroll part.
Value *StartVal = (part == 0) ? VectorStart : Identity;
cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal, VecPreheader);
- cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part], LoopVectorBody);
+ cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part],
+ LoopVectorBody.back());
}
// Before each round, move the insertion point right between
@@ -2245,9 +2788,10 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
VectorParts &RdxExitVal = getVectorValue(RdxDesc.LoopExitInstr);
PHINode *NewPhi = Builder.CreatePHI(VecTy, 2, "rdx.vec.exit.phi");
Value *StartVal = (part == 0) ? VectorStart : Identity;
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
NewPhi->addIncoming(StartVal, LoopBypassBlocks[I]);
- NewPhi->addIncoming(RdxExitVal[part], LoopVectorBody);
+ NewPhi->addIncoming(RdxExitVal[part],
+ LoopVectorBody.back());
RdxParts.push_back(NewPhi);
}
@@ -2257,9 +2801,10 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
setDebugLocFromInst(Builder, ReducedPartRdx);
for (unsigned part = 1; part < UF; ++part) {
if (Op != Instruction::ICmp && Op != Instruction::FCmp)
- ReducedPartRdx = Builder.CreateBinOp((Instruction::BinaryOps)Op,
- RdxParts[part], ReducedPartRdx,
- "bin.rdx");
+ // Floating point operations had to be 'fast' to enable the reduction.
+ ReducedPartRdx = addFastMathFlag(
+ Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part],
+ ReducedPartRdx, "bin.rdx"));
else
ReducedPartRdx = createMinMaxOp(Builder, RdxDesc.MinMaxKind,
ReducedPartRdx, RdxParts[part]);
@@ -2272,7 +2817,7 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
assert(isPowerOf2_32(VF) &&
"Reduction emission only supported for pow2 vectors!");
Value *TmpVec = ReducedPartRdx;
- SmallVector<Constant*, 32> ShuffleMask(VF, 0);
+ SmallVector<Constant*, 32> ShuffleMask(VF, nullptr);
for (unsigned i = VF; i != 1; i >>= 1) {
// Move the upper half of the vector to the lower half.
for (unsigned j = 0; j != i/2; ++j)
@@ -2289,8 +2834,9 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
"rdx.shuf");
if (Op != Instruction::ICmp && Op != Instruction::FCmp)
- TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf,
- "bin.rdx");
+ // Floating point operations had to be 'fast' to enable the reduction.
+ TmpVec = addFastMathFlag(Builder.CreateBinOp(
+ (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx"));
else
TmpVec = createMinMaxOp(Builder, RdxDesc.MinMaxKind, TmpVec, Shuf);
}
@@ -2300,6 +2846,13 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
Builder.getInt32(0));
}
+ // Create a phi node that merges control-flow from the backedge-taken check
+ // block and the middle block.
+ PHINode *BCBlockPhi = PHINode::Create(RdxPhi->getType(), 2, "bc.merge.rdx",
+ LoopScalarPreHeader->getTerminator());
+ BCBlockPhi->addIncoming(RdxDesc.StartValue, LoopBypassBlocks[0]);
+ BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
+
// Now, we need to fix the users of the reduction variable
// inside and outside of the scalar remainder loop.
// We know that the loop is in LCSSA form. We need to update the
@@ -2329,7 +2882,7 @@ InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
// Pick the other block.
int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
- (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, ReducedPartRdx);
+ (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
(RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, RdxDesc.LoopExitInstr);
}// end of for each redux variable.
@@ -2411,7 +2964,6 @@ InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
InnerLoopVectorizer::VectorParts &Entry,
- LoopVectorizationLegality *Legal,
unsigned UF, unsigned VF, PhiVector *PV) {
PHINode* P = cast<PHINode>(PN);
// Handle reduction variables:
@@ -2421,7 +2973,7 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
Type *VecTy = (VF == 1) ? PN->getType() :
VectorType::get(PN->getType(), VF);
Entry[part] = PHINode::Create(VecTy, 2, "vec.phi",
- LoopVectorBody-> getFirstInsertionPt());
+ LoopVectorBody.back()-> getFirstInsertionPt());
}
PV->push_back(P);
return;
@@ -2430,7 +2982,7 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
setDebugLocFromInst(Builder, P);
// Check for PHI nodes that are lowered to vector selects.
if (P->getParent() != OrigLoop->getHeader()) {
- // We know that all PHIs in non header blocks are converted into
+ // We know that all PHIs in non-header blocks are converted into
// selects, so we don't have to worry about the insertion order and we
// can just use the builder.
// At this point we generate the predication tree. There may be
@@ -2573,9 +3125,7 @@ void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
}
}
-void
-InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
- BasicBlock *BB, PhiVector *PV) {
+void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
// For each instruction in the old loop.
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
VectorParts &Entry = WidenMap.get(it);
@@ -2586,7 +3136,7 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
continue;
case Instruction::PHI:{
// Vectorize PHINodes.
- widenPHIInstruction(it, Entry, Legal, UF, VF, PV);
+ widenPHIInstruction(it, Entry, UF, VF, PV);
continue;
}// End of PHI.
@@ -2627,8 +3177,14 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
if (VecOp && isa<PossiblyExactOperator>(VecOp))
VecOp->setIsExact(BinOp->isExact());
+ // Copy the fast-math flags.
+ if (VecOp && isa<FPMathOperator>(V))
+ VecOp->setFastMathFlags(it->getFastMathFlags());
+
Entry[Part] = V;
}
+
+ propagateMetadata(Entry, it);
break;
}
case Instruction::Select: {
@@ -2656,6 +3212,8 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
Op0[Part],
Op1[Part]);
}
+
+ propagateMetadata(Entry, it);
break;
}
@@ -2668,19 +3226,21 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
VectorParts &A = getVectorValue(it->getOperand(0));
VectorParts &B = getVectorValue(it->getOperand(1));
for (unsigned Part = 0; Part < UF; ++Part) {
- Value *C = 0;
+ Value *C = nullptr;
if (FCmp)
C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
else
C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]);
Entry[Part] = C;
}
+
+ propagateMetadata(Entry, it);
break;
}
case Instruction::Store:
case Instruction::Load:
- vectorizeMemoryInstruction(it, Legal);
+ vectorizeMemoryInstruction(it);
break;
case Instruction::ZExt:
case Instruction::SExt:
@@ -2707,6 +3267,7 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
Value *Broadcasted = getBroadcastInstrs(ScalarCast);
for (unsigned Part = 0; Part < UF; ++Part)
Entry[Part] = getConsecutiveVector(Broadcasted, VF * Part, false);
+ propagateMetadata(Entry, it);
break;
}
/// Vectorize casts.
@@ -2716,6 +3277,7 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
VectorParts &A = getVectorValue(it->getOperand(0));
for (unsigned Part = 0; Part < UF; ++Part)
Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy);
+ propagateMetadata(Entry, it);
break;
}
@@ -2735,9 +3297,14 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
scalarizeInstruction(it);
break;
default:
+ bool HasScalarOpd = hasVectorInstrinsicScalarOpd(ID, 1);
for (unsigned Part = 0; Part < UF; ++Part) {
SmallVector<Value *, 4> Args;
for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
+ if (HasScalarOpd && i == 1) {
+ Args.push_back(CI->getArgOperand(i));
+ continue;
+ }
VectorParts &Arg = getVectorValue(CI->getArgOperand(i));
Args.push_back(Arg[Part]);
}
@@ -2748,6 +3315,8 @@ InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
Function *F = Intrinsic::getDeclaration(M, ID, Tys);
Entry[Part] = Builder.CreateCall(F, Args);
}
+
+ propagateMetadata(Entry, it);
break;
}
break;
@@ -2772,13 +3341,25 @@ void InnerLoopVectorizer::updateAnalysis() {
for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
DT->addNewBlock(LoopBypassBlocks[I], LoopBypassBlocks[I-1]);
DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlocks.back());
- DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader);
- DT->addNewBlock(LoopMiddleBlock, LoopBypassBlocks.front());
- DT->addNewBlock(LoopScalarPreHeader, LoopMiddleBlock);
+
+ // Due to if predication of stores we might create a sequence of "if(pred)
+ // a[i] = ...; " blocks.
+ for (unsigned i = 0, e = LoopVectorBody.size(); i != e; ++i) {
+ if (i == 0)
+ DT->addNewBlock(LoopVectorBody[0], LoopVectorPreHeader);
+ else if (isPredicatedBlock(i)) {
+ DT->addNewBlock(LoopVectorBody[i], LoopVectorBody[i-1]);
+ } else {
+ DT->addNewBlock(LoopVectorBody[i], LoopVectorBody[i-2]);
+ }
+ }
+
+ DT->addNewBlock(LoopMiddleBlock, LoopBypassBlocks[1]);
+ DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
- DEBUG(DT->verifyAnalysis());
+ DEBUG(DT->verifyDomTree());
}
/// \brief Check whether it is safe to if-convert this phi node.
@@ -2799,8 +3380,10 @@ static bool canIfConvertPHINodes(BasicBlock *BB) {
}
bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
- if (!EnableIfConversion)
+ if (!EnableIfConversion) {
+ emitAnalysis(Report() << "if-conversion is disabled");
return false;
+ }
assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
@@ -2830,16 +3413,24 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
BasicBlock *BB = *BI;
// We don't support switch statements inside loops.
- if (!isa<BranchInst>(BB->getTerminator()))
+ if (!isa<BranchInst>(BB->getTerminator())) {
+ emitAnalysis(Report(BB->getTerminator())
+ << "loop contains a switch statement");
return false;
+ }
// We must be able to predicate all blocks that need to be predicated.
if (blockNeedsPredication(BB)) {
- if (!blockCanBePredicated(BB, SafePointes))
+ if (!blockCanBePredicated(BB, SafePointes)) {
+ emitAnalysis(Report(BB->getTerminator())
+ << "control flow cannot be substituted for a select");
return false;
- } else if (BB != Header && !canIfConvertPHINodes(BB))
+ }
+ } else if (BB != Header && !canIfConvertPHINodes(BB)) {
+ emitAnalysis(Report(BB->getTerminator())
+ << "control flow cannot be substituted for a select");
return false;
-
+ }
}
// We can if-convert this loop.
@@ -2849,26 +3440,37 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
bool LoopVectorizationLegality::canVectorize() {
// We must have a loop in canonical form. Loops with indirectbr in them cannot
// be canonicalized.
- if (!TheLoop->getLoopPreheader())
+ if (!TheLoop->getLoopPreheader()) {
+ emitAnalysis(
+ Report() << "loop control flow is not understood by vectorizer");
return false;
+ }
// We can only vectorize innermost loops.
- if (TheLoop->getSubLoopsVector().size())
+ if (TheLoop->getSubLoopsVector().size()) {
+ emitAnalysis(Report() << "loop is not the innermost loop");
return false;
+ }
// We must have a single backedge.
- if (TheLoop->getNumBackEdges() != 1)
+ if (TheLoop->getNumBackEdges() != 1) {
+ emitAnalysis(
+ Report() << "loop control flow is not understood by vectorizer");
return false;
+ }
// We must have a single exiting block.
- if (!TheLoop->getExitingBlock())
+ if (!TheLoop->getExitingBlock()) {
+ emitAnalysis(
+ Report() << "loop control flow is not understood by vectorizer");
return false;
+ }
// We need to have a loop header.
DEBUG(dbgs() << "LV: Found a loop: " <<
TheLoop->getHeader()->getName() << '\n');
- // Check if we can if-convert non single-bb loops.
+ // Check if we can if-convert non-single-bb loops.
unsigned NumBlocks = TheLoop->getNumBlocks();
if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
@@ -2878,19 +3480,11 @@ bool LoopVectorizationLegality::canVectorize() {
// ScalarEvolution needs to be able to find the exit count.
const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
if (ExitCount == SE->getCouldNotCompute()) {
+ emitAnalysis(Report() << "could not determine number of loop iterations");
DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
return false;
}
- // Do not loop-vectorize loops with a tiny trip count.
- BasicBlock *Latch = TheLoop->getLoopLatch();
- unsigned TC = SE->getSmallConstantTripCount(TheLoop, Latch);
- if (TC > 0u && TC < TinyTripCountVectorThreshold) {
- DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " <<
- "This loop is not worth vectorizing.\n");
- return false;
- }
-
// Check if we can vectorize the instructions and CFG in this loop.
if (!canVectorizeInstrs()) {
DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
@@ -2916,7 +3510,7 @@ bool LoopVectorizationLegality::canVectorize() {
return true;
}
-static Type *convertPointerToIntegerType(DataLayout &DL, Type *Ty) {
+static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
if (Ty->isPointerTy())
return DL.getIntPtrType(Ty);
@@ -2928,7 +3522,7 @@ static Type *convertPointerToIntegerType(DataLayout &DL, Type *Ty) {
return Ty;
}
-static Type* getWiderType(DataLayout &DL, Type *Ty0, Type *Ty1) {
+static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
Ty0 = convertPointerToIntegerType(DL, Ty0);
Ty1 = convertPointerToIntegerType(DL, Ty1);
if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
@@ -2944,12 +3538,11 @@ static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
// instructions must not have external users.
if (!Reductions.count(Inst))
//Check that all of the users of the loop are inside the BB.
- for (Value::use_iterator I = Inst->use_begin(), E = Inst->use_end();
- I != E; ++I) {
- Instruction *U = cast<Instruction>(*I);
+ for (User *U : Inst->users()) {
+ Instruction *UI = cast<Instruction>(U);
// This user may be a reduction exit value.
- if (!TheLoop->contains(U)) {
- DEBUG(dbgs() << "LV: Found an outside user for : " << *U << '\n');
+ if (!TheLoop->contains(UI)) {
+ DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
return true;
}
}
@@ -2981,6 +3574,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (!PhiTy->isIntegerTy() &&
!PhiTy->isFloatingPointTy() &&
!PhiTy->isPointerTy()) {
+ emitAnalysis(Report(it)
+ << "loop control flow is not understood by vectorizer");
DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
return false;
}
@@ -2991,13 +3586,17 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (*bb != Header) {
// Check that this instruction has no outside users or is an
// identified reduction value with an outside user.
- if(!hasOutsideLoopUser(TheLoop, it, AllowedExit))
+ if (!hasOutsideLoopUser(TheLoop, it, AllowedExit))
continue;
+ emitAnalysis(Report(it) << "value that could not be identified as "
+ "reduction is used outside the loop");
return false;
}
// We only allow if-converted PHIs with more than two incoming values.
if (Phi->getNumIncomingValues() != 2) {
+ emitAnalysis(Report(it)
+ << "control flow not understood by vectorizer");
DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
return false;
}
@@ -3028,8 +3627,11 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Until we explicitly handle the case of an induction variable with
// an outside loop user we have to give up vectorizing this loop.
- if (hasOutsideLoopUser(TheLoop, it, AllowedExit))
+ if (hasOutsideLoopUser(TheLoop, it, AllowedExit)) {
+ emitAnalysis(Report(it) << "use of induction value outside of the "
+ "loop is not handled by vectorizer");
return false;
+ }
continue;
}
@@ -3072,6 +3674,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
continue;
}
+ emitAnalysis(Report(it) << "unvectorizable operation");
DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n");
return false;
}// end of PHI handling
@@ -3080,14 +3683,29 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// calls and we do handle certain intrinsic and libm functions.
CallInst *CI = dyn_cast<CallInst>(it);
if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI)) {
+ emitAnalysis(Report(it) << "call instruction cannot be vectorized");
DEBUG(dbgs() << "LV: Found a call site.\n");
return false;
}
+ // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
+ // second argument is the same (i.e. loop invariant)
+ if (CI &&
+ hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) {
+ if (!SE->isLoopInvariant(SE->getSCEV(CI->getOperand(1)), TheLoop)) {
+ emitAnalysis(Report(it)
+ << "intrinsic instruction cannot be vectorized");
+ DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
+ return false;
+ }
+ }
+
// Check that the instruction return type is vectorizable.
// Also, we can't vectorize extractelement instructions.
if ((!VectorType::isValidElementType(it->getType()) &&
!it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) {
+ emitAnalysis(Report(it)
+ << "instruction return type cannot be vectorized");
DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
return false;
}
@@ -3095,14 +3713,24 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Check that the stored type is vectorizable.
if (StoreInst *ST = dyn_cast<StoreInst>(it)) {
Type *T = ST->getValueOperand()->getType();
- if (!VectorType::isValidElementType(T))
+ if (!VectorType::isValidElementType(T)) {
+ emitAnalysis(Report(ST) << "store instruction cannot be vectorized");
return false;
+ }
+ if (EnableMemAccessVersioning)
+ collectStridedAcccess(ST);
}
+ if (EnableMemAccessVersioning)
+ if (LoadInst *LI = dyn_cast<LoadInst>(it))
+ collectStridedAcccess(LI);
+
// Reduction instructions are allowed to have exit users.
// All other instructions must not have external users.
- if (hasOutsideLoopUser(TheLoop, it, AllowedExit))
+ if (hasOutsideLoopUser(TheLoop, it, AllowedExit)) {
+ emitAnalysis(Report(it) << "value cannot be used outside the loop");
return false;
+ }
} // next instr.
@@ -3110,13 +3738,148 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (!Induction) {
DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
- if (Inductions.empty())
+ if (Inductions.empty()) {
+ emitAnalysis(Report()
+ << "loop induction variable could not be identified");
return false;
+ }
}
return true;
}
+///\brief Remove GEPs whose indices but the last one are loop invariant and
+/// return the induction operand of the gep pointer.
+static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE,
+ const DataLayout *DL, Loop *Lp) {
+ GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
+ if (!GEP)
+ return Ptr;
+
+ unsigned InductionOperand = getGEPInductionOperand(DL, GEP);
+
+ // Check that all of the gep indices are uniform except for our induction
+ // operand.
+ for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
+ if (i != InductionOperand &&
+ !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
+ return Ptr;
+ return GEP->getOperand(InductionOperand);
+}
+
+///\brief Look for a cast use of the passed value.
+static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
+ Value *UniqueCast = nullptr;
+ for (User *U : Ptr->users()) {
+ CastInst *CI = dyn_cast<CastInst>(U);
+ if (CI && CI->getType() == Ty) {
+ if (!UniqueCast)
+ UniqueCast = CI;
+ else
+ return nullptr;
+ }
+ }
+ return UniqueCast;
+}
+
+///\brief Get the stride of a pointer access in a loop.
+/// Looks for symbolic strides "a[i*stride]". Returns the symbolic stride as a
+/// pointer to the Value, or null otherwise.
+static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
+ const DataLayout *DL, Loop *Lp) {
+ const PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
+ if (!PtrTy || PtrTy->isAggregateType())
+ return nullptr;
+
+ // Try to remove a gep instruction to make the pointer (actually index at this
+ // point) easier analyzable. If OrigPtr is equal to Ptr we are analzying the
+ // pointer, otherwise, we are analyzing the index.
+ Value *OrigPtr = Ptr;
+
+ // The size of the pointer access.
+ int64_t PtrAccessSize = 1;
+
+ Ptr = stripGetElementPtr(Ptr, SE, DL, Lp);
+ const SCEV *V = SE->getSCEV(Ptr);
+
+ if (Ptr != OrigPtr)
+ // Strip off casts.
+ while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
+ V = C->getOperand();
+
+ const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
+ if (!S)
+ return nullptr;
+
+ V = S->getStepRecurrence(*SE);
+ if (!V)
+ return nullptr;
+
+ // Strip off the size of access multiplication if we are still analyzing the
+ // pointer.
+ if (OrigPtr == Ptr) {
+ DL->getTypeAllocSize(PtrTy->getElementType());
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
+ if (M->getOperand(0)->getSCEVType() != scConstant)
+ return nullptr;
+
+ const APInt &APStepVal =
+ cast<SCEVConstant>(M->getOperand(0))->getValue()->getValue();
+
+ // Huge step value - give up.
+ if (APStepVal.getBitWidth() > 64)
+ return nullptr;
+
+ int64_t StepVal = APStepVal.getSExtValue();
+ if (PtrAccessSize != StepVal)
+ return nullptr;
+ V = M->getOperand(1);
+ }
+ }
+
+ // Strip off casts.
+ Type *StripedOffRecurrenceCast = nullptr;
+ if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
+ StripedOffRecurrenceCast = C->getType();
+ V = C->getOperand();
+ }
+
+ // Look for the loop invariant symbolic value.
+ const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
+ if (!U)
+ return nullptr;
+
+ Value *Stride = U->getValue();
+ if (!Lp->isLoopInvariant(Stride))
+ return nullptr;
+
+ // If we have stripped off the recurrence cast we have to make sure that we
+ // return the value that is used in this loop so that we can replace it later.
+ if (StripedOffRecurrenceCast)
+ Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
+
+ return Stride;
+}
+
+void LoopVectorizationLegality::collectStridedAcccess(Value *MemAccess) {
+ Value *Ptr = nullptr;
+ if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
+ Ptr = LI->getPointerOperand();
+ else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
+ Ptr = SI->getPointerOperand();
+ else
+ return;
+
+ Value *Stride = getStrideFromPointer(Ptr, SE, DL, TheLoop);
+ if (!Stride)
+ return;
+
+ DEBUG(dbgs() << "LV: Found a strided access that we can version");
+ DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
+ Strides[Ptr] = Stride;
+ StrideSet.insert(Stride);
+}
+
void LoopVectorizationLegality::collectLoopUniforms() {
// We now know that the loop is vectorizable!
// Collect variables that will remain uniform after vectorization.
@@ -3126,6 +3889,16 @@ void LoopVectorizationLegality::collectLoopUniforms() {
// Start with the conditional branch and walk up the block.
Worklist.push_back(Latch->getTerminator()->getOperand(0));
+ // Also add all consecutive pointer values; these values will be uniform
+ // after vectorization (and subsequent cleanup) and, until revectorization is
+ // supported, all dependencies must also be uniform.
+ for (Loop::block_iterator B = TheLoop->block_begin(),
+ BE = TheLoop->block_end(); B != BE; ++B)
+ for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end();
+ I != IE; ++I)
+ if (I->getType()->isPointerTy() && isConsecutivePtr(I))
+ Worklist.insert(Worklist.end(), I->op_begin(), I->op_end());
+
while (Worklist.size()) {
Instruction *I = dyn_cast<Instruction>(Worklist.back());
Worklist.pop_back();
@@ -3158,19 +3931,22 @@ public:
/// \brief Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
- AccessAnalysis(DataLayout *Dl, DepCandidates &DA) :
- DL(Dl), DepCands(DA), AreAllWritesIdentified(true),
- AreAllReadsIdentified(true), IsRTCheckNeeded(false) {}
+ AccessAnalysis(const DataLayout *Dl, AliasAnalysis *AA, DepCandidates &DA) :
+ DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}
/// \brief Register a load and whether it is only read from.
- void addLoad(Value *Ptr, bool IsReadOnly) {
+ void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
+ Value *Ptr = const_cast<Value*>(Loc.Ptr);
+ AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.TBAATag);
Accesses.insert(MemAccessInfo(Ptr, false));
if (IsReadOnly)
ReadOnlyPtr.insert(Ptr);
}
/// \brief Register a store.
- void addStore(Value *Ptr) {
+ void addStore(AliasAnalysis::Location &Loc) {
+ Value *Ptr = const_cast<Value*>(Loc.Ptr);
+ AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.TBAATag);
Accesses.insert(MemAccessInfo(Ptr, true));
}
@@ -3178,15 +3954,13 @@ public:
/// non-intersection.
bool canCheckPtrAtRT(LoopVectorizationLegality::RuntimePointerCheck &RtCheck,
unsigned &NumComparisons, ScalarEvolution *SE,
- Loop *TheLoop, bool ShouldCheckStride = false);
+ Loop *TheLoop, ValueToValueMap &Strides,
+ bool ShouldCheckStride = false);
/// \brief Goes over all memory accesses, checks whether a RT check is needed
/// and builds sets of dependent accesses.
void buildDependenceSets() {
- // Process read-write pointers first.
- processMemAccesses(false);
- // Next, process read pointers.
- processMemAccesses(true);
+ processMemAccesses();
}
bool isRTCheckNeeded() { return IsRTCheckNeeded; }
@@ -3198,48 +3972,40 @@ public:
private:
typedef SetVector<MemAccessInfo> PtrAccessSet;
- typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
- /// \brief Go over all memory access or only the deferred ones if
- /// \p UseDeferred is true and check whether runtime pointer checks are needed
- /// and build sets of dependency check candidates.
- void processMemAccesses(bool UseDeferred);
+ /// \brief Go over all memory access and check whether runtime pointer checks
+ /// are needed /// and build sets of dependency check candidates.
+ void processMemAccesses();
/// Set of all accesses.
PtrAccessSet Accesses;
- /// Set of access to check after all writes have been processed.
- PtrAccessSet DeferredAccesses;
-
- /// Map of pointers to last access encountered.
- UnderlyingObjToAccessMap ObjToLastAccess;
-
/// Set of accesses that need a further dependence check.
MemAccessInfoSet CheckDeps;
/// Set of pointers that are read only.
SmallPtrSet<Value*, 16> ReadOnlyPtr;
- /// Set of underlying objects already written to.
- SmallPtrSet<Value*, 16> WriteObjects;
+ const DataLayout *DL;
- DataLayout *DL;
+ /// An alias set tracker to partition the access set by underlying object and
+ //intrinsic property (such as TBAA metadata).
+ AliasSetTracker AST;
/// Sets of potentially dependent accesses - members of one set share an
/// underlying pointer. The set "CheckDeps" identfies which sets really need a
/// dependence check.
DepCandidates &DepCands;
- bool AreAllWritesIdentified;
- bool AreAllReadsIdentified;
bool IsRTCheckNeeded;
};
} // end anonymous namespace
/// \brief Check whether a pointer can participate in a runtime bounds check.
-static bool hasComputableBounds(ScalarEvolution *SE, Value *Ptr) {
- const SCEV *PtrScev = SE->getSCEV(Ptr);
+static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides,
+ Value *Ptr) {
+ const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
if (!AR)
return false;
@@ -3249,70 +4015,76 @@ static bool hasComputableBounds(ScalarEvolution *SE, Value *Ptr) {
/// \brief Check the stride of the pointer and ensure that it does not wrap in
/// the address space.
-static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr,
- const Loop *Lp);
+static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
+ const Loop *Lp, ValueToValueMap &StridesMap);
bool AccessAnalysis::canCheckPtrAtRT(
- LoopVectorizationLegality::RuntimePointerCheck &RtCheck,
- unsigned &NumComparisons, ScalarEvolution *SE,
- Loop *TheLoop, bool ShouldCheckStride) {
+ LoopVectorizationLegality::RuntimePointerCheck &RtCheck,
+ unsigned &NumComparisons, ScalarEvolution *SE, Loop *TheLoop,
+ ValueToValueMap &StridesMap, bool ShouldCheckStride) {
// Find pointers with computable bounds. We are going to use this information
// to place a runtime bound check.
- unsigned NumReadPtrChecks = 0;
- unsigned NumWritePtrChecks = 0;
bool CanDoRT = true;
bool IsDepCheckNeeded = isDependencyCheckNeeded();
- // We assign consecutive id to access from different dependence sets.
- // Accesses within the same set don't need a runtime check.
- unsigned RunningDepId = 1;
- DenseMap<Value *, unsigned> DepSetId;
-
- for (PtrAccessSet::iterator AI = Accesses.begin(), AE = Accesses.end();
- AI != AE; ++AI) {
- const MemAccessInfo &Access = *AI;
- Value *Ptr = Access.getPointer();
- bool IsWrite = Access.getInt();
-
- // Just add write checks if we have both.
- if (!IsWrite && Accesses.count(MemAccessInfo(Ptr, true)))
- continue;
+ NumComparisons = 0;
- if (IsWrite)
- ++NumWritePtrChecks;
- else
- ++NumReadPtrChecks;
-
- if (hasComputableBounds(SE, Ptr) &&
- // When we run after a failing dependency check we have to make sure we
- // don't have wrapping pointers.
- (!ShouldCheckStride || isStridedPtr(SE, DL, Ptr, TheLoop) == 1)) {
- // The id of the dependence set.
- unsigned DepId;
-
- if (IsDepCheckNeeded) {
- Value *Leader = DepCands.getLeaderValue(Access).getPointer();
- unsigned &LeaderId = DepSetId[Leader];
- if (!LeaderId)
- LeaderId = RunningDepId++;
- DepId = LeaderId;
- } else
- // Each access has its own dependence set.
- DepId = RunningDepId++;
-
- RtCheck.insert(SE, TheLoop, Ptr, IsWrite, DepId);
-
- DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *Ptr << '\n');
- } else {
- CanDoRT = false;
+ // We assign a consecutive id to access from different alias sets.
+ // Accesses between different groups doesn't need to be checked.
+ unsigned ASId = 1;
+ for (auto &AS : AST) {
+ unsigned NumReadPtrChecks = 0;
+ unsigned NumWritePtrChecks = 0;
+
+ // We assign consecutive id to access from different dependence sets.
+ // Accesses within the same set don't need a runtime check.
+ unsigned RunningDepId = 1;
+ DenseMap<Value *, unsigned> DepSetId;
+
+ for (auto A : AS) {
+ Value *Ptr = A.getValue();
+ bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
+ MemAccessInfo Access(Ptr, IsWrite);
+
+ if (IsWrite)
+ ++NumWritePtrChecks;
+ else
+ ++NumReadPtrChecks;
+
+ if (hasComputableBounds(SE, StridesMap, Ptr) &&
+ // When we run after a failing dependency check we have to make sure we
+ // don't have wrapping pointers.
+ (!ShouldCheckStride ||
+ isStridedPtr(SE, DL, Ptr, TheLoop, StridesMap) == 1)) {
+ // The id of the dependence set.
+ unsigned DepId;
+
+ if (IsDepCheckNeeded) {
+ Value *Leader = DepCands.getLeaderValue(Access).getPointer();
+ unsigned &LeaderId = DepSetId[Leader];
+ if (!LeaderId)
+ LeaderId = RunningDepId++;
+ DepId = LeaderId;
+ } else
+ // Each access has its own dependence set.
+ DepId = RunningDepId++;
+
+ RtCheck.insert(SE, TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap);
+
+ DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *Ptr << '\n');
+ } else {
+ CanDoRT = false;
+ }
}
- }
- if (IsDepCheckNeeded && CanDoRT && RunningDepId == 2)
- NumComparisons = 0; // Only one dependence set.
- else {
- NumComparisons = (NumWritePtrChecks * (NumReadPtrChecks +
- NumWritePtrChecks - 1));
+ if (IsDepCheckNeeded && CanDoRT && RunningDepId == 2)
+ NumComparisons += 0; // Only one dependence set.
+ else {
+ NumComparisons += (NumWritePtrChecks * (NumReadPtrChecks +
+ NumWritePtrChecks - 1));
+ }
+
+ ++ASId;
}
// If the pointers that we would use for the bounds comparison have different
@@ -3326,6 +4098,9 @@ bool AccessAnalysis::canCheckPtrAtRT(
// Only need to check pointers between two different dependency sets.
if (RtCheck.DependencySetId[i] == RtCheck.DependencySetId[j])
continue;
+ // Only need to check pointers in the same alias set.
+ if (RtCheck.AliasSetId[i] != RtCheck.AliasSetId[j])
+ continue;
Value *PtrI = RtCheck.Pointers[i];
Value *PtrJ = RtCheck.Pointers[j];
@@ -3343,90 +4118,99 @@ bool AccessAnalysis::canCheckPtrAtRT(
return CanDoRT;
}
-static bool isFunctionScopeIdentifiedObject(Value *Ptr) {
- return isNoAliasArgument(Ptr) || isNoAliasCall(Ptr) || isa<AllocaInst>(Ptr);
-}
-
-void AccessAnalysis::processMemAccesses(bool UseDeferred) {
+void AccessAnalysis::processMemAccesses() {
// We process the set twice: first we process read-write pointers, last we
// process read-only pointers. This allows us to skip dependence tests for
// read-only pointers.
- PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
- for (PtrAccessSet::iterator AI = S.begin(), AE = S.end(); AI != AE; ++AI) {
- const MemAccessInfo &Access = *AI;
- Value *Ptr = Access.getPointer();
- bool IsWrite = Access.getInt();
-
- DepCands.insert(Access);
-
- // Memorize read-only pointers for later processing and skip them in the
- // first round (they need to be checked after we have seen all write
- // pointers). Note: we also mark pointer that are not consecutive as
- // "read-only" pointers (so that we check "a[b[i]] +="). Hence, we need the
- // second check for "!IsWrite".
- bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
- if (!UseDeferred && IsReadOnlyPtr) {
- DeferredAccesses.insert(Access);
- continue;
- }
+ DEBUG(dbgs() << "LV: Processing memory accesses...\n");
+ DEBUG(dbgs() << " AST: "; AST.dump());
+ DEBUG(dbgs() << "LV: Accesses:\n");
+ DEBUG({
+ for (auto A : Accesses)
+ dbgs() << "\t" << *A.getPointer() << " (" <<
+ (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
+ "read-only" : "read")) << ")\n";
+ });
+
+ // The AliasSetTracker has nicely partitioned our pointers by metadata
+ // compatibility and potential for underlying-object overlap. As a result, we
+ // only need to check for potential pointer dependencies within each alias
+ // set.
+ for (auto &AS : AST) {
+ // Note that both the alias-set tracker and the alias sets themselves used
+ // linked lists internally and so the iteration order here is deterministic
+ // (matching the original instruction order within each set).
+
+ bool SetHasWrite = false;
+
+ // Map of pointers to last access encountered.
+ typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
+ UnderlyingObjToAccessMap ObjToLastAccess;
+
+ // Set of access to check after all writes have been processed.
+ PtrAccessSet DeferredAccesses;
+
+ // Iterate over each alias set twice, once to process read/write pointers,
+ // and then to process read-only pointers.
+ for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
+ bool UseDeferred = SetIteration > 0;
+ PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
+
+ for (auto A : AS) {
+ Value *Ptr = A.getValue();
+ bool IsWrite = S.count(MemAccessInfo(Ptr, true));
+
+ // If we're using the deferred access set, then it contains only reads.
+ bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
+ if (UseDeferred && !IsReadOnlyPtr)
+ continue;
+ // Otherwise, the pointer must be in the PtrAccessSet, either as a read
+ // or a write.
+ assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
+ S.count(MemAccessInfo(Ptr, false))) &&
+ "Alias-set pointer not in the access set?");
+
+ MemAccessInfo Access(Ptr, IsWrite);
+ DepCands.insert(Access);
+
+ // Memorize read-only pointers for later processing and skip them in the
+ // first round (they need to be checked after we have seen all write
+ // pointers). Note: we also mark pointer that are not consecutive as
+ // "read-only" pointers (so that we check "a[b[i]] +="). Hence, we need
+ // the second check for "!IsWrite".
+ if (!UseDeferred && IsReadOnlyPtr) {
+ DeferredAccesses.insert(Access);
+ continue;
+ }
- bool NeedDepCheck = false;
- // Check whether there is the possiblity of dependency because of underlying
- // objects being the same.
- typedef SmallVector<Value*, 16> ValueVector;
- ValueVector TempObjects;
- GetUnderlyingObjects(Ptr, TempObjects, DL);
- for (ValueVector::iterator UI = TempObjects.begin(), UE = TempObjects.end();
- UI != UE; ++UI) {
- Value *UnderlyingObj = *UI;
-
- // If this is a write then it needs to be an identified object. If this a
- // read and all writes (so far) are identified function scope objects we
- // don't need an identified underlying object but only an Argument (the
- // next write is going to invalidate this assumption if it is
- // unidentified).
- // This is a micro-optimization for the case where all writes are
- // identified and we have one argument pointer.
- // Otherwise, we do need a runtime check.
- if ((IsWrite && !isFunctionScopeIdentifiedObject(UnderlyingObj)) ||
- (!IsWrite && (!AreAllWritesIdentified ||
- !isa<Argument>(UnderlyingObj)) &&
- !isIdentifiedObject(UnderlyingObj))) {
- DEBUG(dbgs() << "LV: Found an unidentified " <<
- (IsWrite ? "write" : "read" ) << " ptr: " << *UnderlyingObj <<
- "\n");
- IsRTCheckNeeded = (IsRTCheckNeeded ||
- !isIdentifiedObject(UnderlyingObj) ||
- !AreAllReadsIdentified);
+ // If this is a write - check other reads and writes for conflicts. If
+ // this is a read only check other writes for conflicts (but only if
+ // there is no other write to the ptr - this is an optimization to
+ // catch "a[i] = a[i] + " without having to do a dependence check).
+ if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
+ CheckDeps.insert(Access);
+ IsRTCheckNeeded = true;
+ }
if (IsWrite)
- AreAllWritesIdentified = false;
- if (!IsWrite)
- AreAllReadsIdentified = false;
+ SetHasWrite = true;
+
+ // Create sets of pointers connected by a shared alias set and
+ // underlying object.
+ typedef SmallVector<Value*, 16> ValueVector;
+ ValueVector TempObjects;
+ GetUnderlyingObjects(Ptr, TempObjects, DL);
+ for (Value *UnderlyingObj : TempObjects) {
+ UnderlyingObjToAccessMap::iterator Prev =
+ ObjToLastAccess.find(UnderlyingObj);
+ if (Prev != ObjToLastAccess.end())
+ DepCands.unionSets(Access, Prev->second);
+
+ ObjToLastAccess[UnderlyingObj] = Access;
+ }
}
-
- // If this is a write - check other reads and writes for conflicts. If
- // this is a read only check other writes for conflicts (but only if there
- // is no other write to the ptr - this is an optimization to catch "a[i] =
- // a[i] + " without having to do a dependence check).
- if ((IsWrite || IsReadOnlyPtr) && WriteObjects.count(UnderlyingObj))
- NeedDepCheck = true;
-
- if (IsWrite)
- WriteObjects.insert(UnderlyingObj);
-
- // Create sets of pointers connected by shared underlying objects.
- UnderlyingObjToAccessMap::iterator Prev =
- ObjToLastAccess.find(UnderlyingObj);
- if (Prev != ObjToLastAccess.end())
- DepCands.unionSets(Access, Prev->second);
-
- ObjToLastAccess[UnderlyingObj] = Access;
}
-
- if (NeedDepCheck)
- CheckDeps.insert(Access);
}
}
@@ -3468,7 +4252,7 @@ public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
- MemoryDepChecker(ScalarEvolution *Se, DataLayout *Dl, const Loop *L)
+ MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L)
: SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0),
ShouldRetryWithRuntimeCheck(false) {}
@@ -3494,7 +4278,7 @@ public:
///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
- MemAccessInfoSet &CheckDeps);
+ MemAccessInfoSet &CheckDeps, ValueToValueMap &Strides);
/// \brief The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
@@ -3506,7 +4290,7 @@ public:
private:
ScalarEvolution *SE;
- DataLayout *DL;
+ const DataLayout *DL;
const Loop *InnermostLoop;
/// \brief Maps access locations (ptr, read/write) to program order.
@@ -3521,7 +4305,7 @@ private:
// We can access this many bytes in parallel safely.
unsigned MaxSafeDepDistBytes;
- /// \brief If we see a non constant dependence distance we can still try to
+ /// \brief If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
bool ShouldRetryWithRuntimeCheck;
@@ -3538,7 +4322,8 @@ private:
/// distance is smaller than any other distance encountered so far).
/// Otherwise, this function returns true signaling a possible dependence.
bool isDependent(const MemAccessInfo &A, unsigned AIdx,
- const MemAccessInfo &B, unsigned BIdx);
+ const MemAccessInfo &B, unsigned BIdx,
+ ValueToValueMap &Strides);
/// \brief Check whether the data dependence could prevent store-load
/// forwarding.
@@ -3554,10 +4339,10 @@ static bool isInBoundsGep(Value *Ptr) {
}
/// \brief Check whether the access through \p Ptr has a constant stride.
-static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr,
- const Loop *Lp) {
+static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
+ const Loop *Lp, ValueToValueMap &StridesMap) {
const Type *Ty = Ptr->getType();
- assert(Ty->isPointerTy() && "Unexpected non ptr");
+ assert(Ty->isPointerTy() && "Unexpected non-ptr");
// Make sure that the pointer does not point to aggregate types.
const PointerType *PtrTy = cast<PointerType>(Ty);
@@ -3567,7 +4352,8 @@ static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr,
return 0;
}
- const SCEV *PtrScev = SE->getSCEV(Ptr);
+ const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr);
+
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
if (!AR) {
DEBUG(dbgs() << "LV: Bad stride - Not an AddRecExpr pointer "
@@ -3671,7 +4457,8 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
}
bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
- const MemAccessInfo &B, unsigned BIdx) {
+ const MemAccessInfo &B, unsigned BIdx,
+ ValueToValueMap &Strides) {
assert (AIdx < BIdx && "Must pass arguments in program order");
Value *APtr = A.getPointer();
@@ -3683,11 +4470,16 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
if (!AIsWrite && !BIsWrite)
return false;
- const SCEV *AScev = SE->getSCEV(APtr);
- const SCEV *BScev = SE->getSCEV(BPtr);
+ // We cannot check pointers in different address spaces.
+ if (APtr->getType()->getPointerAddressSpace() !=
+ BPtr->getType()->getPointerAddressSpace())
+ return true;
+
+ const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr);
+ const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr);
- int StrideAPtr = isStridedPtr(SE, DL, APtr, InnermostLoop);
- int StrideBPtr = isStridedPtr(SE, DL, BPtr, InnermostLoop);
+ int StrideAPtr = isStridedPtr(SE, DL, APtr, InnermostLoop, Strides);
+ int StrideBPtr = isStridedPtr(SE, DL, BPtr, InnermostLoop, Strides);
const SCEV *Src = AScev;
const SCEV *Sink = BScev;
@@ -3721,7 +4513,7 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
if (!C) {
- DEBUG(dbgs() << "LV: Dependence because of non constant distance\n");
+ DEBUG(dbgs() << "LV: Dependence because of non-constant distance\n");
ShouldRetryWithRuntimeCheck = true;
return true;
}
@@ -3792,9 +4584,9 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
return false;
}
-bool
-MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
- MemAccessInfoSet &CheckDeps) {
+bool MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
+ MemAccessInfoSet &CheckDeps,
+ ValueToValueMap &Strides) {
MaxSafeDepDistBytes = -1U;
while (!CheckDeps.empty()) {
@@ -3811,16 +4603,16 @@ MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
// Check every access pair.
while (AI != AE) {
CheckDeps.erase(*AI);
- EquivalenceClasses<MemAccessInfo>::member_iterator OI = llvm::next(AI);
+ EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
while (OI != AE) {
// Check every accessing instruction pair in program order.
for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
- if (*I1 < *I2 && isDependent(*AI, *I1, *OI, *I2))
+ if (*I1 < *I2 && isDependent(*AI, *I1, *OI, *I2, Strides))
return false;
- if (*I2 < *I1 && isDependent(*OI, *I2, *AI, *I1))
+ if (*I2 < *I1 && isDependent(*OI, *I2, *AI, *I1, Strides))
return false;
}
++OI;
@@ -3870,11 +4662,13 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
continue;
LoadInst *Ld = dyn_cast<LoadInst>(it);
- if (!Ld) return false;
- if (!Ld->isSimple() && !IsAnnotatedParallel) {
+ if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
+ emitAnalysis(Report(Ld)
+ << "read with atomic ordering or volatile read");
DEBUG(dbgs() << "LV: Found a non-simple load.\n");
return false;
}
+ NumLoads++;
Loads.push_back(Ld);
DepChecker.addAccess(Ld);
continue;
@@ -3883,11 +4677,17 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
// Save 'store' instructions. Abort if other instructions write to memory.
if (it->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(it);
- if (!St) return false;
+ if (!St) {
+ emitAnalysis(Report(it) << "instruction cannot be vectorized");
+ return false;
+ }
if (!St->isSimple() && !IsAnnotatedParallel) {
+ emitAnalysis(Report(St)
+ << "write with atomic ordering or volatile write");
DEBUG(dbgs() << "LV: Found a non-simple store.\n");
return false;
}
+ NumStores++;
Stores.push_back(St);
DepChecker.addAccess(St);
}
@@ -3905,7 +4705,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
}
AccessAnalysis::DepCandidates DependentAccesses;
- AccessAnalysis Accesses(DL, DependentAccesses);
+ AccessAnalysis Accesses(DL, AA, DependentAccesses);
// Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once
@@ -3920,6 +4720,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
Value* Ptr = ST->getPointerOperand();
if (isUniform(Ptr)) {
+ emitAnalysis(
+ Report(ST)
+ << "write to a loop invariant address could not be vectorized");
DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
return false;
}
@@ -3928,7 +4731,15 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
// list. At this phase it is only a 'write' list.
if (Seen.insert(Ptr)) {
++NumReadWrites;
- Accesses.addStore(Ptr);
+
+ AliasAnalysis::Location Loc = AA->getLocation(ST);
+ // The TBAA metadata could have a control dependency on the predication
+ // condition, so we cannot rely on it when determining whether or not we
+ // need runtime pointer checks.
+ if (blockNeedsPredication(ST->getParent()))
+ Loc.TBAATag = nullptr;
+
+ Accesses.addStore(Loc);
}
}
@@ -3951,11 +4762,19 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
// read a few words, modify, and write a few words, and some of the
// words may be written to the same address.
bool IsReadOnlyPtr = false;
- if (Seen.insert(Ptr) || !isStridedPtr(SE, DL, Ptr, TheLoop)) {
+ if (Seen.insert(Ptr) || !isStridedPtr(SE, DL, Ptr, TheLoop, Strides)) {
++NumReads;
IsReadOnlyPtr = true;
}
- Accesses.addLoad(Ptr, IsReadOnlyPtr);
+
+ AliasAnalysis::Location Loc = AA->getLocation(LD);
+ // The TBAA metadata could have a control dependency on the predication
+ // condition, so we cannot rely on it when determining whether or not we
+ // need runtime pointer checks.
+ if (blockNeedsPredication(LD->getParent()))
+ Loc.TBAATag = nullptr;
+
+ Accesses.addLoad(Loc, IsReadOnlyPtr);
}
// If we write (or read-write) to a single destination and there are no
@@ -3975,8 +4794,8 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
unsigned NumComparisons = 0;
bool CanDoRT = false;
if (NeedRTCheck)
- CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE, TheLoop);
-
+ CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE, TheLoop,
+ Strides);
DEBUG(dbgs() << "LV: We need to do " << NumComparisons <<
" pointer comparisons.\n");
@@ -3998,6 +4817,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
}
if (NeedRTCheck && !CanDoRT) {
+ emitAnalysis(Report() << "cannot identify array bounds");
DEBUG(dbgs() << "LV: We can't vectorize because we can't find " <<
"the array bounds.\n");
PtrRtCheck.reset();
@@ -4009,8 +4829,8 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
bool CanVecMem = true;
if (Accesses.isDependencyCheckNeeded()) {
DEBUG(dbgs() << "LV: Checking memory dependencies\n");
- CanVecMem = DepChecker.areDepsSafe(DependentAccesses,
- Accesses.getDependenciesToCheck());
+ CanVecMem = DepChecker.areDepsSafe(
+ DependentAccesses, Accesses.getDependenciesToCheck(), Strides);
MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes();
if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) {
@@ -4024,10 +4844,18 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
PtrRtCheck.Need = true;
CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE,
- TheLoop, true);
+ TheLoop, Strides, true);
// Check that we did not collect too many pointers or found an unsizeable
// pointer.
if (!CanDoRT || NumComparisons > RuntimeMemoryCheckThreshold) {
+ if (!CanDoRT && NumComparisons > 0)
+ emitAnalysis(Report()
+ << "cannot check memory dependencies at runtime");
+ else
+ emitAnalysis(Report()
+ << NumComparisons << " exceeds limit of "
+ << RuntimeMemoryCheckThreshold
+ << " dependent memory operations checked at runtime");
DEBUG(dbgs() << "LV: Can't vectorize with memory checks\n");
PtrRtCheck.reset();
return false;
@@ -4037,6 +4865,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
}
}
+ if (!CanVecMem)
+ emitAnalysis(Report() << "unsafe dependent memory operations in loop");
+
DEBUG(dbgs() << "LV: We" << (NeedRTCheck ? "" : " don't") <<
" need a runtime memory check.\n");
@@ -4080,7 +4911,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// We only allow for a single reduction value to be used outside the loop.
// This includes users of the reduction, variables (which form a cycle
// which ends in the phi node).
- Instruction *ExitInstruction = 0;
+ Instruction *ExitInstruction = nullptr;
// Indicates that we found a reduction operation in our scan.
bool FoundReduxOp = false;
@@ -4094,7 +4925,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// the number of instruction we saw from the recognized min/max pattern,
// to make sure we only see exactly the two instructions.
unsigned NumCmpSelectPatternInst = 0;
- ReductionInstDesc ReduxDesc(false, 0);
+ ReductionInstDesc ReduxDesc(false, nullptr);
SmallPtrSet<Instruction *, 8> VisitedInsts;
SmallVector<Instruction *, 8> Worklist;
@@ -4162,23 +4993,22 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// Check whether we found a reduction operator.
FoundReduxOp |= !IsAPhi;
- // Process users of current instruction. Push non PHI nodes after PHI nodes
+ // Process users of current instruction. Push non-PHI nodes after PHI nodes
// onto the stack. This way we are going to have seen all inputs to PHI
// nodes once we get to them.
SmallVector<Instruction *, 8> NonPHIs;
SmallVector<Instruction *, 8> PHIs;
- for (Value::use_iterator UI = Cur->use_begin(), E = Cur->use_end(); UI != E;
- ++UI) {
- Instruction *Usr = cast<Instruction>(*UI);
+ for (User *U : Cur->users()) {
+ Instruction *UI = cast<Instruction>(U);
// Check if we found the exit user.
- BasicBlock *Parent = Usr->getParent();
+ BasicBlock *Parent = UI->getParent();
if (!TheLoop->contains(Parent)) {
// Exit if you find multiple outside users or if the header phi node is
// being used. In this case the user uses the value of the previous
// iteration, in which case we would loose "VF-1" iterations of the
// reduction operation if we vectorize.
- if (ExitInstruction != 0 || Cur == Phi)
+ if (ExitInstruction != nullptr || Cur == Phi)
return false;
// The instruction used by an outside user must be the last instruction
@@ -4194,21 +5024,21 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// Process instructions only once (termination). Each reduction cycle
// value must only be used once, except by phi nodes and min/max
// reductions which are represented as a cmp followed by a select.
- ReductionInstDesc IgnoredVal(false, 0);
- if (VisitedInsts.insert(Usr)) {
- if (isa<PHINode>(Usr))
- PHIs.push_back(Usr);
+ ReductionInstDesc IgnoredVal(false, nullptr);
+ if (VisitedInsts.insert(UI)) {
+ if (isa<PHINode>(UI))
+ PHIs.push_back(UI);
else
- NonPHIs.push_back(Usr);
- } else if (!isa<PHINode>(Usr) &&
- ((!isa<FCmpInst>(Usr) &&
- !isa<ICmpInst>(Usr) &&
- !isa<SelectInst>(Usr)) ||
- !isMinMaxSelectCmpPattern(Usr, IgnoredVal).IsReduction))
+ NonPHIs.push_back(UI);
+ } else if (!isa<PHINode>(UI) &&
+ ((!isa<FCmpInst>(UI) &&
+ !isa<ICmpInst>(UI) &&
+ !isa<SelectInst>(UI)) ||
+ !isMinMaxSelectCmpPattern(UI, IgnoredVal).IsReduction))
return false;
// Remember that we completed the cycle.
- if (Usr == Phi)
+ if (UI == Phi)
FoundStartPHI = true;
}
Worklist.append(PHIs.begin(), PHIs.end());
@@ -4248,13 +5078,13 @@ LoopVectorizationLegality::isMinMaxSelectCmpPattern(Instruction *I,
assert((isa<ICmpInst>(I) || isa<FCmpInst>(I) || isa<SelectInst>(I)) &&
"Expect a select instruction");
- Instruction *Cmp = 0;
- SelectInst *Select = 0;
+ Instruction *Cmp = nullptr;
+ SelectInst *Select = nullptr;
// We must handle the select(cmp()) as a single instruction. Advance to the
// select.
if ((Cmp = dyn_cast<ICmpInst>(I)) || (Cmp = dyn_cast<FCmpInst>(I))) {
- if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->use_begin())))
+ if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->user_begin())))
return ReductionInstDesc(false, I);
return ReductionInstDesc(Select, Prev.MinMaxKind);
}
@@ -4399,7 +5229,16 @@ bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
}
// We don't predicate stores at the moment.
- if (it->mayWriteToMemory() || it->mayThrow())
+ if (it->mayWriteToMemory()) {
+ StoreInst *SI = dyn_cast<StoreInst>(it);
+ // We only support predication of stores in basic blocks with one
+ // predecessor.
+ if (!SI || ++NumPredStores > NumberOfStoresToPredicate ||
+ !SafePtrs.count(SI->getPointerOperand()) ||
+ !SI->getParent()->getSinglePredecessor())
+ return false;
+ }
+ if (it->mayThrow())
return false;
// Check that we don't have a constant expression that can trap as operand.
@@ -4426,7 +5265,8 @@ bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
LoopVectorizationCostModel::VectorizationFactor
LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize,
- unsigned UserVF) {
+ unsigned UserVF,
+ bool ForceVectorization) {
// Width 1 means no vectorize
VectorizationFactor Factor = { 1U, 0U };
if (OptForSize && Legal->getRuntimePointerCheck()->Need) {
@@ -4434,6 +5274,11 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize,
return Factor;
}
+ if (!EnableCondStoresVectorization && Legal->NumPredStores) {
+ DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
+ return Factor;
+ }
+
// Find the trip count.
unsigned TC = SE->getSmallConstantTripCount(TheLoop, TheLoop->getLoopLatch());
DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
@@ -4491,8 +5336,18 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize,
}
float Cost = expectedCost(1);
+#ifndef NDEBUG
+ const float ScalarCost = Cost;
+#endif /* NDEBUG */
unsigned Width = 1;
- DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)Cost << ".\n");
+ DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
+
+ // Ignore scalar width, because the user explicitly wants vectorization.
+ if (ForceVectorization && VF > 1) {
+ Width = 2;
+ Cost = expectedCost(Width) / (float)Width;
+ }
+
for (unsigned i=2; i <= VF; i*=2) {
// Notice that the vector loop needs to be executed less times, so
// we need to divide the cost of the vector loops by the width of
@@ -4506,7 +5361,10 @@ LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize,
}
}
- DEBUG(dbgs() << "LV: Selecting VF = : "<< Width << ".\n");
+ DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
+ << "LV: Vectorization seems to be not beneficial, "
+ << "but was forced by a user.\n");
+ DEBUG(dbgs() << "LV: Selecting VF: "<< Width << ".\n");
Factor.Width = Width;
Factor.Cost = Width * Cost;
return Factor;
@@ -4589,9 +5447,17 @@ LoopVectorizationCostModel::selectUnrollFactor(bool OptForSize,
if (TC > 1 && TC < TinyTripCountUnrollThreshold)
return 1;
- unsigned TargetVectorRegisters = TTI.getNumberOfRegisters(true);
- DEBUG(dbgs() << "LV: The target has " << TargetVectorRegisters <<
- " vector registers\n");
+ unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
+ DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters <<
+ " registers\n");
+
+ if (VF == 1) {
+ if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
+ TargetNumRegisters = ForceTargetNumScalarRegs;
+ } else {
+ if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
+ TargetNumRegisters = ForceTargetNumVectorRegs;
+ }
LoopVectorizationCostModel::RegisterUsage R = calculateRegisterUsage();
// We divide by these constants so assume that we have at least one
@@ -4604,12 +5470,29 @@ LoopVectorizationCostModel::selectUnrollFactor(bool OptForSize,
// registers. These registers are used by all of the unrolled instances.
// Next, divide the remaining registers by the number of registers that is
// required by the loop, in order to estimate how many parallel instances
- // fit without causing spills.
- unsigned UF = (TargetVectorRegisters - R.LoopInvariantRegs) / R.MaxLocalUsers;
+ // fit without causing spills. All of this is rounded down if necessary to be
+ // a power of two. We want power of two unroll factors to simplify any
+ // addressing operations or alignment considerations.
+ unsigned UF = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
+ R.MaxLocalUsers);
+
+ // Don't count the induction variable as unrolled.
+ if (EnableIndVarRegisterHeur)
+ UF = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
+ std::max(1U, (R.MaxLocalUsers - 1)));
// Clamp the unroll factor ranges to reasonable factors.
unsigned MaxUnrollSize = TTI.getMaximumUnrollFactor();
+ // Check if the user has overridden the unroll max.
+ if (VF == 1) {
+ if (ForceTargetMaxScalarUnrollFactor.getNumOccurrences() > 0)
+ MaxUnrollSize = ForceTargetMaxScalarUnrollFactor;
+ } else {
+ if (ForceTargetMaxVectorUnrollFactor.getNumOccurrences() > 0)
+ MaxUnrollSize = ForceTargetMaxVectorUnrollFactor;
+ }
+
// If we did not calculate the cost for VF (because the user selected the VF)
// then we calculate the cost of VF here.
if (LoopCost == 0)
@@ -4622,32 +5505,40 @@ LoopVectorizationCostModel::selectUnrollFactor(bool OptForSize,
else if (UF < 1)
UF = 1;
- bool HasReductions = Legal->getReductionVars()->size();
-
- // Decide if we want to unroll if we decided that it is legal to vectorize
- // but not profitable.
- if (VF == 1) {
- if (TheLoop->getNumBlocks() > 1 || !HasReductions ||
- LoopCost > SmallLoopCost)
- return 1;
-
- return UF;
- }
-
- if (HasReductions) {
+ // Unroll if we vectorized this loop and there is a reduction that could
+ // benefit from unrolling.
+ if (VF > 1 && Legal->getReductionVars()->size()) {
DEBUG(dbgs() << "LV: Unrolling because of reductions.\n");
return UF;
}
- // We want to unroll tiny loops in order to reduce the loop overhead.
- // We assume that the cost overhead is 1 and we use the cost model
- // to estimate the cost of the loop and unroll until the cost of the
- // loop overhead is about 5% of the cost of the loop.
+ // Note that if we've already vectorized the loop we will have done the
+ // runtime check and so unrolling won't require further checks.
+ bool UnrollingRequiresRuntimePointerCheck =
+ (VF == 1 && Legal->getRuntimePointerCheck()->Need);
+
+ // We want to unroll small loops in order to reduce the loop overhead and
+ // potentially expose ILP opportunities.
DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
- if (LoopCost < SmallLoopCost) {
+ if (!UnrollingRequiresRuntimePointerCheck &&
+ LoopCost < SmallLoopCost) {
+ // We assume that the cost overhead is 1 and we use the cost model
+ // to estimate the cost of the loop and unroll until the cost of the
+ // loop overhead is about 5% of the cost of the loop.
+ unsigned SmallUF = std::min(UF, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
+
+ // Unroll until store/load ports (estimated by max unroll factor) are
+ // saturated.
+ unsigned StoresUF = UF / (Legal->NumStores ? Legal->NumStores : 1);
+ unsigned LoadsUF = UF / (Legal->NumLoads ? Legal->NumLoads : 1);
+
+ if (EnableLoadStoreRuntimeUnroll && std::max(StoresUF, LoadsUF) > SmallUF) {
+ DEBUG(dbgs() << "LV: Unrolling to saturate store or load ports.\n");
+ return std::max(StoresUF, LoadsUF);
+ }
+
DEBUG(dbgs() << "LV: Unrolling to reduce branch cost.\n");
- unsigned NewUF = SmallLoopCost / (LoopCost + 1);
- return std::min(NewUF, UF);
+ return SmallUF;
}
DEBUG(dbgs() << "LV: Not Unrolling.\n");
@@ -4783,6 +5674,11 @@ unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) {
continue;
unsigned C = getInstructionCost(it, VF);
+
+ // Check if we should override the cost.
+ if (ForceTargetInstructionCost.getNumOccurrences() > 0)
+ C = ForceTargetInstructionCost;
+
BlockCost += C;
DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF " <<
VF << " For instruction: " << *it << '\n');
@@ -4853,6 +5749,12 @@ static bool isLikelyComplexAddressComputation(Value *Ptr,
return StepVal > MaxMergeDistance;
}
+static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
+ if (Legal->hasStride(I->getOperand(0)) || Legal->hasStride(I->getOperand(1)))
+ return true;
+ return false;
+}
+
unsigned
LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
// If we know that this instruction will remain uniform, check the cost of
@@ -4895,15 +5797,25 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
+ // Since we will replace the stride by 1 the multiplication should go away.
+ if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
+ return 0;
// Certain instructions can be cheaper to vectorize if they have a constant
// second vector operand. One example of this are shifts on x86.
TargetTransformInfo::OperandValueKind Op1VK =
TargetTransformInfo::OK_AnyValue;
TargetTransformInfo::OperandValueKind Op2VK =
TargetTransformInfo::OK_AnyValue;
+ Value *Op2 = I->getOperand(1);
- if (isa<ConstantInt>(I->getOperand(1)))
+ // Check for a splat of a constant or for a non uniform vector of constants.
+ if (isa<ConstantInt>(Op2))
Op2VK = TargetTransformInfo::OK_UniformConstantValue;
+ else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
+ Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
+ if (cast<Constant>(Op2)->getSplatValue() != nullptr)
+ Op2VK = TargetTransformInfo::OK_UniformConstantValue;
+ }
return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK);
}
@@ -5047,7 +5959,9 @@ char LoopVectorize::ID = 0;
static const char lv_name[] = "Loop Vectorization";
INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -5055,8 +5969,8 @@ INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
namespace llvm {
- Pass *createLoopVectorizePass(bool NoUnrolling) {
- return new LoopVectorize(NoUnrolling);
+ Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
+ return new LoopVectorize(NoUnrolling, AlwaysVectorize);
}
}
@@ -5073,7 +5987,8 @@ bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
}
-void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr) {
+void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
+ bool IfPredicateStore) {
assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
// Holds vector parameters or scalars, in case of uniform vals.
SmallVector<VectorParts, 4> Params;
@@ -5113,15 +6028,45 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr) {
// Does this instruction return a value ?
bool IsVoidRetTy = Instr->getType()->isVoidTy();
- Value *UndefVec = IsVoidRetTy ? 0 :
+ Value *UndefVec = IsVoidRetTy ? nullptr :
UndefValue::get(Instr->getType());
// Create a new entry in the WidenMap and initialize it to Undef or Null.
VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
+ Instruction *InsertPt = Builder.GetInsertPoint();
+ BasicBlock *IfBlock = Builder.GetInsertBlock();
+ BasicBlock *CondBlock = nullptr;
+
+ VectorParts Cond;
+ Loop *VectorLp = nullptr;
+ if (IfPredicateStore) {
+ assert(Instr->getParent()->getSinglePredecessor() &&
+ "Only support single predecessor blocks");
+ Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(),
+ Instr->getParent());
+ VectorLp = LI->getLoopFor(IfBlock);
+ assert(VectorLp && "Must have a loop for this block");
+ }
+
// For each vector unroll 'part':
for (unsigned Part = 0; Part < UF; ++Part) {
// For each scalar that we create:
+ // Start an "if (pred) a[i] = ..." block.
+ Value *Cmp = nullptr;
+ if (IfPredicateStore) {
+ if (Cond[Part]->getType()->isVectorTy())
+ Cond[Part] =
+ Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0));
+ Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part],
+ ConstantInt::get(Cond[Part]->getType(), 1));
+ CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store");
+ LoopVectorBody.push_back(CondBlock);
+ VectorLp->addBasicBlockToLoop(CondBlock, LI->getBase());
+ // Update Builder with newly created basic block.
+ Builder.SetInsertPoint(InsertPt);
+ }
+
Instruction *Cloned = Instr->clone();
if (!IsVoidRetTy)
Cloned->setName(Instr->getName() + ".cloned");
@@ -5138,13 +6083,26 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr) {
// so that future users will be able to use it.
if (!IsVoidRetTy)
VecResults[Part] = Cloned;
+
+ // End if-block.
+ if (IfPredicateStore) {
+ BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
+ LoopVectorBody.push_back(NewIfBlock);
+ VectorLp->addBasicBlockToLoop(NewIfBlock, LI->getBase());
+ Builder.SetInsertPoint(InsertPt);
+ Instruction *OldBr = IfBlock->getTerminator();
+ BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
+ OldBr->eraseFromParent();
+ IfBlock = NewIfBlock;
+ }
}
}
-void
-InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr,
- LoopVectorizationLegality*) {
- return scalarizeInstruction(Instr);
+void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) {
+ StoreInst *SI = dyn_cast<StoreInst>(Instr);
+ bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent()));
+
+ return scalarizeInstruction(Instr, IfPredicateStore);
}
Value *InnerLoopUnroller::reverseVector(Value *Vec) {
@@ -5163,4 +6121,3 @@ Value *InnerLoopUnroller::getConsecutiveVector(Value* Val, int StartIdx,
Constant *C = ConstantInt::get(ITy, StartIdx, Negate);
return Builder.CreateAdd(Val, C, "induction");
}
-
diff --git a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index c72b51f..53a43d9 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -15,36 +15,39 @@
// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
//
//===----------------------------------------------------------------------===//
-#define SV_NAME "slp-vectorizer"
-#define DEBUG_TYPE "SLP"
-
#include "llvm/Transforms/Vectorize.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/VectorUtils.h"
#include <algorithm>
#include <map>
using namespace llvm;
+#define SV_NAME "slp-vectorizer"
+#define DEBUG_TYPE "SLP"
+
static cl::opt<int>
SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
cl::desc("Only vectorize if you gain more than this "
@@ -71,8 +74,6 @@ struct BlockNumbering {
BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {}
- BlockNumbering() : BB(0), Valid(false) {}
-
void numberInstructions() {
unsigned Loc = 0;
InstrIdx.clear();
@@ -119,15 +120,15 @@ private:
static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
if (!I0)
- return 0;
+ return nullptr;
BasicBlock *BB = I0->getParent();
for (int i = 1, e = VL.size(); i < e; i++) {
Instruction *I = dyn_cast<Instruction>(VL[i]);
if (!I)
- return 0;
+ return nullptr;
if (BB != I->getParent())
- return 0;
+ return nullptr;
}
return BB;
}
@@ -148,6 +149,48 @@ static bool isSplat(ArrayRef<Value *> VL) {
return true;
}
+///\returns Opcode that can be clubbed with \p Op to create an alternate
+/// sequence which can later be merged as a ShuffleVector instruction.
+static unsigned getAltOpcode(unsigned Op) {
+ switch (Op) {
+ case Instruction::FAdd:
+ return Instruction::FSub;
+ case Instruction::FSub:
+ return Instruction::FAdd;
+ case Instruction::Add:
+ return Instruction::Sub;
+ case Instruction::Sub:
+ return Instruction::Add;
+ default:
+ return 0;
+ }
+}
+
+///\returns bool representing if Opcode \p Op can be part
+/// of an alternate sequence which can later be merged as
+/// a ShuffleVector instruction.
+static bool canCombineAsAltInst(unsigned Op) {
+ if (Op == Instruction::FAdd || Op == Instruction::FSub ||
+ Op == Instruction::Sub || Op == Instruction::Add)
+ return true;
+ return false;
+}
+
+/// \returns ShuffleVector instruction if intructions in \p VL have
+/// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
+/// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
+static unsigned isAltInst(ArrayRef<Value *> VL) {
+ Instruction *I0 = dyn_cast<Instruction>(VL[0]);
+ unsigned Opcode = I0->getOpcode();
+ unsigned AltOpcode = getAltOpcode(Opcode);
+ for (int i = 1, e = VL.size(); i < e; i++) {
+ Instruction *I = dyn_cast<Instruction>(VL[i]);
+ if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
+ return 0;
+ }
+ return Instruction::ShuffleVector;
+}
+
/// \returns The opcode if all of the Instructions in \p VL have the same
/// opcode, or zero.
static unsigned getSameOpcode(ArrayRef<Value *> VL) {
@@ -157,8 +200,11 @@ static unsigned getSameOpcode(ArrayRef<Value *> VL) {
unsigned Opcode = I0->getOpcode();
for (int i = 1, e = VL.size(); i < e; i++) {
Instruction *I = dyn_cast<Instruction>(VL[i]);
- if (!I || Opcode != I->getOpcode())
+ if (!I || Opcode != I->getOpcode()) {
+ if (canCombineAsAltInst(Opcode) && i == 1)
+ return isAltInst(VL);
return 0;
+ }
}
return Opcode;
}
@@ -179,7 +225,7 @@ static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) {
switch (Kind) {
default:
- MD = 0; // Remove unknown metadata
+ MD = nullptr; // Remove unknown metadata
break;
case LLVMContext::MD_tbaa:
MD = MDNode::getMostGenericTBAA(MD, IMD);
@@ -200,7 +246,7 @@ static Type* getSameType(ArrayRef<Value *> VL) {
Type *Ty = VL[0]->getType();
for (int i = 1, e = VL.size(); i < e; i++)
if (VL[i]->getType() != Ty)
- return 0;
+ return nullptr;
return Ty;
}
@@ -343,18 +389,11 @@ public:
typedef SmallPtrSet<Value *, 16> ValueSet;
typedef SmallVector<StoreInst *, 8> StoreList;
- BoUpSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl,
- TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li,
- DominatorTree *Dt) :
- F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt),
- Builder(Se->getContext()) {
- // Setup the block numbering utility for all of the blocks in the
- // function.
- for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) {
- BasicBlock *BB = it;
- BlocksNumbers[BB] = BlockNumbering(BB);
- }
- }
+ BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl,
+ TargetTransformInfo *Tti, TargetLibraryInfo *TLi, AliasAnalysis *Aa,
+ LoopInfo *Li, DominatorTree *Dt)
+ : F(Func), SE(Se), DL(Dl), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt),
+ Builder(Se->getContext()) {}
/// \brief Vectorize the tree that starts with the elements in \p VL.
/// Returns the vectorized root.
@@ -364,13 +403,13 @@ public:
/// A negative number means that this is profitable.
int getTreeCost();
- /// Construct a vectorizable tree that starts at \p Roots and is possibly
- /// used by a reduction of \p RdxOps.
- void buildTree(ArrayRef<Value *> Roots, ValueSet *RdxOps = 0);
+ /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
+ /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
+ void buildTree(ArrayRef<Value *> Roots,
+ ArrayRef<Value *> UserIgnoreLst = None);
/// Clear the internal data structures that are created by 'buildTree'.
void deleteTree() {
- RdxOps = 0;
VectorizableTree.clear();
ScalarToTreeEntry.clear();
MustGather.clear();
@@ -383,6 +422,7 @@ public:
/// \brief Perform LICM and CSE on the newly generated gather sequences.
void optimizeGatherSequence();
+
private:
struct TreeEntry;
@@ -442,10 +482,10 @@ private:
/// \returns whether the VectorizableTree is fully vectoriable and will
/// be beneficial even the tree height is tiny.
- bool isFullyVectorizableTinyTree();
+ bool isFullyVectorizableTinyTree();
struct TreeEntry {
- TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0),
+ TreeEntry() : Scalars(), VectorizedValue(nullptr), LastScalarIndex(0),
NeedToGather(0) {}
/// \returns true if the scalars in VL are equal to this entry.
@@ -521,19 +561,27 @@ private:
/// Holds all of the instructions that we gathered.
SetVector<Instruction *> GatherSeq;
/// A list of blocks that we are going to CSE.
- SmallSet<BasicBlock *, 8> CSEBlocks;
+ SetVector<BasicBlock *> CSEBlocks;
/// Numbers instructions in different blocks.
DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers;
- /// Reduction operators.
- ValueSet *RdxOps;
+ /// \brief Get the corresponding instruction numbering list for a given
+ /// BasicBlock. The list is allocated lazily.
+ BlockNumbering &getBlockNumbering(BasicBlock *BB) {
+ auto I = BlocksNumbers.insert(std::make_pair(BB, BlockNumbering(BB)));
+ return I.first->second;
+ }
+
+ /// List of users to ignore during scheduling and that don't need extracting.
+ ArrayRef<Value *> UserIgnoreList;
// Analysis and block reference.
Function *F;
ScalarEvolution *SE;
- DataLayout *DL;
+ const DataLayout *DL;
TargetTransformInfo *TTI;
+ TargetLibraryInfo *TLI;
AliasAnalysis *AA;
LoopInfo *LI;
DominatorTree *DT;
@@ -541,9 +589,10 @@ private:
IRBuilder<> Builder;
};
-void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) {
+void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
+ ArrayRef<Value *> UserIgnoreLst) {
deleteTree();
- RdxOps = Rdx;
+ UserIgnoreList = UserIgnoreLst;
if (!getSameType(Roots))
return;
buildTree_rec(Roots, 0);
@@ -560,29 +609,29 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) {
if (Entry->NeedToGather)
continue;
- for (Value::use_iterator User = Scalar->use_begin(),
- UE = Scalar->use_end(); User != UE; ++User) {
- DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n");
+ for (User *U : Scalar->users()) {
+ DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
// Skip in-tree scalars that become vectors.
- if (ScalarToTreeEntry.count(*User)) {
+ if (ScalarToTreeEntry.count(U)) {
DEBUG(dbgs() << "SLP: \tInternal user will be removed:" <<
- **User << ".\n");
- int Idx = ScalarToTreeEntry[*User]; (void) Idx;
+ *U << ".\n");
+ int Idx = ScalarToTreeEntry[U]; (void) Idx;
assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
continue;
}
- Instruction *UserInst = dyn_cast<Instruction>(*User);
+ Instruction *UserInst = dyn_cast<Instruction>(U);
if (!UserInst)
continue;
- // Ignore uses that are part of the reduction.
- if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end())
+ // Ignore users in the user ignore list.
+ if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) !=
+ UserIgnoreList.end())
continue;
- DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " <<
+ DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
Lane << " from " << *Scalar << ".\n");
- ExternalUses.push_back(ExternalUser(Scalar, *User, Lane));
+ ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
}
}
}
@@ -591,6 +640,7 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) {
void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
bool SameTy = getSameType(VL); (void)SameTy;
+ bool isAltShuffle = false;
assert(SameTy && "Invalid types!");
if (Depth == RecursionMaxDepth) {
@@ -612,10 +662,19 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
newTreeEntry(VL, false);
return;
}
+ unsigned Opcode = getSameOpcode(VL);
+
+ // Check that this shuffle vector refers to the alternate
+ // sequence of opcodes.
+ if (Opcode == Instruction::ShuffleVector) {
+ Instruction *I0 = dyn_cast<Instruction>(VL[0]);
+ unsigned Op = I0->getOpcode();
+ if (Op != Instruction::ShuffleVector)
+ isAltShuffle = true;
+ }
// If all of the operands are identical or constant we have a simple solution.
- if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) ||
- !getSameOpcode(VL)) {
+ if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) {
DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
newTreeEntry(VL, false);
return;
@@ -669,57 +728,57 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
Instruction *Scalar = cast<Instruction>(VL[i]);
DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n");
- for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end();
- U != UE; ++U) {
- DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n");
- Instruction *User = dyn_cast<Instruction>(*U);
- if (!User) {
+ for (User *U : Scalar->users()) {
+ DEBUG(dbgs() << "SLP: \tUser " << *U << ". \n");
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI) {
DEBUG(dbgs() << "SLP: Gathering due unknown user. \n");
newTreeEntry(VL, false);
return;
}
// We don't care if the user is in a different basic block.
- BasicBlock *UserBlock = User->getParent();
+ BasicBlock *UserBlock = UI->getParent();
if (UserBlock != BB) {
DEBUG(dbgs() << "SLP: User from a different basic block "
- << *User << ". \n");
+ << *UI << ". \n");
continue;
}
// If this is a PHINode within this basic block then we can place the
// extract wherever we want.
- if (isa<PHINode>(*User)) {
- DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n");
+ if (isa<PHINode>(*UI)) {
+ DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *UI << ". \n");
continue;
}
// Check if this is a safe in-tree user.
- if (ScalarToTreeEntry.count(User)) {
- int Idx = ScalarToTreeEntry[User];
+ if (ScalarToTreeEntry.count(UI)) {
+ int Idx = ScalarToTreeEntry[UI];
int VecLocation = VectorizableTree[Idx].LastScalarIndex;
if (VecLocation <= MyLastIndex) {
DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n");
newTreeEntry(VL, false);
return;
}
- DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" <<
+ DEBUG(dbgs() << "SLP: In-tree user (" << *UI << ") at #" <<
VecLocation << " vector value (" << *Scalar << ") at #"
<< MyLastIndex << ".\n");
continue;
}
- // This user is part of the reduction.
- if (RdxOps && RdxOps->count(User))
+ // Ignore users in the user ignore list.
+ if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UI) !=
+ UserIgnoreList.end())
continue;
// Make sure that we can schedule this unknown user.
- BlockNumbering &BN = BlocksNumbers[BB];
- int UserIndex = BN.getIndex(User);
+ BlockNumbering &BN = getBlockNumbering(BB);
+ int UserIndex = BN.getIndex(UI);
if (UserIndex < MyLastIndex) {
DEBUG(dbgs() << "SLP: Can't schedule extractelement for "
- << *User << ". \n");
+ << *UI << ". \n");
newTreeEntry(VL, false);
return;
}
@@ -738,11 +797,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
// Check that instructions in this bundle don't reference other instructions.
// The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4.
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
- for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end();
- U != UE; ++U) {
+ for (User *U : VL[i]->users()) {
for (unsigned j = 0; j < e; ++j) {
- if (i != j && *U == VL[j]) {
- DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n");
+ if (i != j && U == VL[j]) {
+ DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << *U << ". \n");
newTreeEntry(VL, false);
return;
}
@@ -752,8 +810,6 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
- unsigned Opcode = getSameOpcode(VL);
-
// Check if it is safe to sink the loads or the stores.
if (Opcode == Instruction::Load || Opcode == Instruction::Store) {
Instruction *Last = getLastInstruction(VL);
@@ -778,7 +834,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
// Check for terminator values (e.g. invoke).
for (unsigned j = 0; j < VL.size(); ++j)
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
- TerminatorInst *Term = dyn_cast<TerminatorInst>(cast<PHINode>(VL[j])->getIncomingValue(i));
+ TerminatorInst *Term = dyn_cast<TerminatorInst>(
+ cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
if (Term) {
DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
newTreeEntry(VL, false);
@@ -793,7 +850,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
ValueList Operands;
// Prepare the operand vector.
for (unsigned j = 0; j < VL.size(); ++j)
- Operands.push_back(cast<PHINode>(VL[j])->getIncomingValue(i));
+ Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock(
+ PH->getIncomingBlock(i)));
buildTree_rec(Operands, Depth + 1);
}
@@ -910,8 +968,20 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
ValueList Left, Right;
reorderInputsAccordingToOpcode(VL, Left, Right);
- buildTree_rec(Left, Depth + 1);
- buildTree_rec(Right, Depth + 1);
+ BasicBlock *LeftBB = getSameBlock(Left);
+ BasicBlock *RightBB = getSameBlock(Right);
+ // If we have common uses on separate paths in the tree make sure we
+ // process the one with greater common depth first.
+ // We can use block numbering to determine the subtree traversal as
+ // earler user has to come in between the common use and the later user.
+ if (LeftBB && RightBB && LeftBB == RightBB &&
+ getLastIndex(Right) > getLastIndex(Left)) {
+ buildTree_rec(Right, Depth + 1);
+ buildTree_rec(Left, Depth + 1);
+ } else {
+ buildTree_rec(Left, Depth + 1);
+ buildTree_rec(Right, Depth + 1);
+ }
return;
}
@@ -925,12 +995,57 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
}
return;
}
+ case Instruction::GetElementPtr: {
+ // We don't combine GEPs with complicated (nested) indexing.
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
+ DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
+ newTreeEntry(VL, false);
+ return;
+ }
+ }
+
+ // We can't combine several GEPs into one vector if they operate on
+ // different types.
+ Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
+ if (Ty0 != CurTy) {
+ DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
+ newTreeEntry(VL, false);
+ return;
+ }
+ }
+
+ // We don't combine GEPs with non-constant indexes.
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ auto Op = cast<Instruction>(VL[j])->getOperand(1);
+ if (!isa<ConstantInt>(Op)) {
+ DEBUG(
+ dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
+ newTreeEntry(VL, false);
+ return;
+ }
+ }
+
+ newTreeEntry(VL, true);
+ DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
+ for (unsigned i = 0, e = 2; i < e; ++i) {
+ ValueList Operands;
+ // Prepare the operand vector.
+ for (unsigned j = 0; j < VL.size(); ++j)
+ Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
+
+ buildTree_rec(Operands, Depth + 1);
+ }
+ return;
+ }
case Instruction::Store: {
// Check if the stores are consecutive or of we need to swizzle them.
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
if (!isConsecutiveAccess(VL[i], VL[i + 1])) {
newTreeEntry(VL, false);
- DEBUG(dbgs() << "SLP: Non consecutive store.\n");
+ DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
return;
}
@@ -946,6 +1061,76 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
buildTree_rec(Operands, Depth + 1);
return;
}
+ case Instruction::Call: {
+ // Check if the calls are all to the same vectorizable intrinsic.
+ CallInst *CI = cast<CallInst>(VL[0]);
+ // Check if this is an Intrinsic call or something that can be
+ // represented by an intrinsic call
+ Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
+ if (!isTriviallyVectorizable(ID)) {
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
+ return;
+ }
+ Function *Int = CI->getCalledFunction();
+ Value *A1I = nullptr;
+ if (hasVectorInstrinsicScalarOpd(ID, 1))
+ A1I = CI->getArgOperand(1);
+ for (unsigned i = 1, e = VL.size(); i != e; ++i) {
+ CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
+ if (!CI2 || CI2->getCalledFunction() != Int ||
+ getIntrinsicIDForCall(CI2, TLI) != ID) {
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
+ << "\n");
+ return;
+ }
+ // ctlz,cttz and powi are special intrinsics whose second argument
+ // should be same in order for them to be vectorized.
+ if (hasVectorInstrinsicScalarOpd(ID, 1)) {
+ Value *A1J = CI2->getArgOperand(1);
+ if (A1I != A1J) {
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
+ << " argument "<< A1I<<"!=" << A1J
+ << "\n");
+ return;
+ }
+ }
+ }
+
+ newTreeEntry(VL, true);
+ for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
+ ValueList Operands;
+ // Prepare the operand vector.
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ CallInst *CI2 = dyn_cast<CallInst>(VL[j]);
+ Operands.push_back(CI2->getArgOperand(i));
+ }
+ buildTree_rec(Operands, Depth + 1);
+ }
+ return;
+ }
+ case Instruction::ShuffleVector: {
+ // If this is not an alternate sequence of opcode like add-sub
+ // then do not vectorize this instruction.
+ if (!isAltShuffle) {
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
+ return;
+ }
+ newTreeEntry(VL, true);
+ DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
+ for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
+ ValueList Operands;
+ // Prepare the operand vector.
+ for (unsigned j = 0; j < VL.size(); ++j)
+ Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
+
+ buildTree_rec(Operands, Depth + 1);
+ }
+ return;
+ }
default:
newTreeEntry(VL, false);
DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
@@ -969,18 +1154,25 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
}
return getGatherCost(E->Scalars);
}
-
- assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) &&
- "Invalid VL");
+ unsigned Opcode = getSameOpcode(VL);
+ assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL");
Instruction *VL0 = cast<Instruction>(VL[0]);
- unsigned Opcode = VL0->getOpcode();
switch (Opcode) {
case Instruction::PHI: {
return 0;
}
case Instruction::ExtractElement: {
- if (CanReuseExtract(VL))
- return 0;
+ if (CanReuseExtract(VL)) {
+ int DeadCost = 0;
+ for (unsigned i = 0, e = VL.size(); i < e; ++i) {
+ ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
+ if (E->hasOneUse())
+ // Take credit for instruction that will become dead.
+ DeadCost +=
+ TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
+ }
+ return -DeadCost;
+ }
return getGatherCost(VecTy);
}
case Instruction::ZExt:
@@ -1043,12 +1235,26 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
TargetTransformInfo::OperandValueKind Op2VK =
TargetTransformInfo::OK_UniformConstantValue;
- // Check whether all second operands are constant.
- for (unsigned i = 0; i < VL.size(); ++i)
- if (!isa<ConstantInt>(cast<Instruction>(VL[i])->getOperand(1))) {
+ // If all operands are exactly the same ConstantInt then set the
+ // operand kind to OK_UniformConstantValue.
+ // If instead not all operands are constants, then set the operand kind
+ // to OK_AnyValue. If all operands are constants but not the same,
+ // then set the operand kind to OK_NonUniformConstantValue.
+ ConstantInt *CInt = nullptr;
+ for (unsigned i = 0; i < VL.size(); ++i) {
+ const Instruction *I = cast<Instruction>(VL[i]);
+ if (!isa<ConstantInt>(I->getOperand(1))) {
Op2VK = TargetTransformInfo::OK_AnyValue;
break;
}
+ if (i == 0) {
+ CInt = cast<ConstantInt>(I->getOperand(1));
+ continue;
+ }
+ if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
+ CInt != cast<ConstantInt>(I->getOperand(1)))
+ Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
+ }
ScalarCost =
VecTy->getNumElements() *
@@ -1057,6 +1263,20 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
}
return VecCost - ScalarCost;
}
+ case Instruction::GetElementPtr: {
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_UniformConstantValue;
+
+ int ScalarCost =
+ VecTy->getNumElements() *
+ TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
+ int VecCost =
+ TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
+
+ return VecCost - ScalarCost;
+ }
case Instruction::Load: {
// Cost of wide load - cost of scalar loads.
int ScalarLdCost = VecTy->getNumElements() *
@@ -1071,6 +1291,55 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0);
return VecStCost - ScalarStCost;
}
+ case Instruction::Call: {
+ CallInst *CI = cast<CallInst>(VL0);
+ Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
+
+ // Calculate the cost of the scalar and vector calls.
+ SmallVector<Type*, 4> ScalarTys, VecTys;
+ for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
+ ScalarTys.push_back(CI->getArgOperand(op)->getType());
+ VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
+ VecTy->getNumElements()));
+ }
+
+ int ScalarCallCost = VecTy->getNumElements() *
+ TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys);
+
+ int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys);
+
+ DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
+ << " (" << VecCallCost << "-" << ScalarCallCost << ")"
+ << " for " << *CI << "\n");
+
+ return VecCallCost - ScalarCallCost;
+ }
+ case Instruction::ShuffleVector: {
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_AnyValue;
+ int ScalarCost = 0;
+ int VecCost = 0;
+ for (unsigned i = 0; i < VL.size(); ++i) {
+ Instruction *I = cast<Instruction>(VL[i]);
+ if (!I)
+ break;
+ ScalarCost +=
+ TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
+ }
+ // VecCost is equal to sum of the cost of creating 2 vectors
+ // and the cost of creating shuffle.
+ Instruction *I0 = cast<Instruction>(VL[0]);
+ VecCost =
+ TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
+ Instruction *I1 = cast<Instruction>(VL[1]);
+ VecCost +=
+ TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
+ VecCost +=
+ TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
+ return VecCost - ScalarCost;
+ }
default:
llvm_unreachable("Unknown instruction");
}
@@ -1084,11 +1353,15 @@ bool BoUpSLP::isFullyVectorizableTinyTree() {
if (VectorizableTree.size() != 2)
return false;
+ // Handle splat stores.
+ if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars))
+ return true;
+
// Gathering cost would be too much for tiny trees.
- if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
- return false;
+ if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
+ return false;
- return true;
+ return true;
}
int BoUpSLP::getTreeCost() {
@@ -1113,16 +1386,19 @@ int BoUpSLP::getTreeCost() {
Cost += C;
}
+ SmallSet<Value *, 16> ExtractCostCalculated;
int ExtractCost = 0;
for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end();
I != E; ++I) {
+ // We only add extract cost once for the same scalar.
+ if (!ExtractCostCalculated.insert(I->Scalar))
+ continue;
VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth);
ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
I->Lane);
}
-
DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n");
return Cost + ExtractCost;
}
@@ -1157,7 +1433,7 @@ Value *BoUpSLP::getPointerOperand(Value *I) {
return LI->getPointerOperand();
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
- return 0;
+ return nullptr;
}
unsigned BoUpSLP::getAddressSpaceOperand(Value *I) {
@@ -1231,13 +1507,13 @@ Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) {
if (!A.Ptr || !B.Ptr || AA->alias(A, B))
return I;
}
- return 0;
+ return nullptr;
}
int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) {
BasicBlock *BB = cast<Instruction>(VL[0])->getParent();
- assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block");
- BlockNumbering &BN = BlocksNumbers[BB];
+ assert(BB == getSameBlock(VL) && "Invalid block");
+ BlockNumbering &BN = getBlockNumbering(BB);
int MaxIdx = BN.getIndex(BB->getFirstNonPHI());
for (unsigned i = 0, e = VL.size(); i < e; ++i)
@@ -1247,8 +1523,8 @@ int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) {
Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) {
BasicBlock *BB = cast<Instruction>(VL[0])->getParent();
- assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block");
- BlockNumbering &BN = BlocksNumbers[BB];
+ assert(BB == getSameBlock(VL) && "Invalid block");
+ BlockNumbering &BN = getBlockNumbering(BB);
int MaxIdx = BN.getIndex(cast<Instruction>(VL[0]));
for (unsigned i = 1, e = VL.size(); i < e; ++i)
@@ -1307,7 +1583,7 @@ Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
if (En->isSame(VL) && En->VectorizedValue)
return En->VectorizedValue;
}
- return 0;
+ return nullptr;
}
Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
@@ -1344,9 +1620,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars);
return Gather(E->Scalars, VecTy);
}
-
- unsigned Opcode = VL0->getOpcode();
- assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode");
+ unsigned Opcode = getSameOpcode(E->Scalars);
switch (Opcode) {
case Instruction::PHI: {
@@ -1528,6 +1802,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
VecTy->getPointerTo(AS));
unsigned Alignment = LI->getAlignment();
LI = Builder.CreateLoad(VecPtr);
+ if (!Alignment)
+ Alignment = DL->getABITypeAlignment(LI->getPointerOperand()->getType());
LI->setAlignment(Alignment);
E->VectorizedValue = LI;
return propagateMetadata(LI, E->Scalars);
@@ -1547,14 +1823,123 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
VecTy->getPointerTo(AS));
StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
+ if (!Alignment)
+ Alignment = DL->getABITypeAlignment(SI->getPointerOperand()->getType());
S->setAlignment(Alignment);
E->VectorizedValue = S;
return propagateMetadata(S, E->Scalars);
}
+ case Instruction::GetElementPtr: {
+ setInsertPointAfterBundle(E->Scalars);
+
+ ValueList Op0VL;
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i)
+ Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0));
+
+ Value *Op0 = vectorizeTree(Op0VL);
+
+ std::vector<Value *> OpVecs;
+ for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
+ ++j) {
+ ValueList OpVL;
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i)
+ OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j));
+
+ Value *OpVec = vectorizeTree(OpVL);
+ OpVecs.push_back(OpVec);
+ }
+
+ Value *V = Builder.CreateGEP(Op0, OpVecs);
+ E->VectorizedValue = V;
+
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return propagateMetadata(I, E->Scalars);
+
+ return V;
+ }
+ case Instruction::Call: {
+ CallInst *CI = cast<CallInst>(VL0);
+ setInsertPointAfterBundle(E->Scalars);
+ Function *FI;
+ Intrinsic::ID IID = Intrinsic::not_intrinsic;
+ if (CI && (FI = CI->getCalledFunction())) {
+ IID = (Intrinsic::ID) FI->getIntrinsicID();
+ }
+ std::vector<Value *> OpVecs;
+ for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
+ ValueList OpVL;
+ // ctlz,cttz and powi are special intrinsics whose second argument is
+ // a scalar. This argument should not be vectorized.
+ if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
+ CallInst *CEI = cast<CallInst>(E->Scalars[0]);
+ OpVecs.push_back(CEI->getArgOperand(j));
+ continue;
+ }
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
+ CallInst *CEI = cast<CallInst>(E->Scalars[i]);
+ OpVL.push_back(CEI->getArgOperand(j));
+ }
+
+ Value *OpVec = vectorizeTree(OpVL);
+ DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
+ OpVecs.push_back(OpVec);
+ }
+
+ Module *M = F->getParent();
+ Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
+ Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
+ Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
+ Value *V = Builder.CreateCall(CF, OpVecs);
+ E->VectorizedValue = V;
+ return V;
+ }
+ case Instruction::ShuffleVector: {
+ ValueList LHSVL, RHSVL;
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
+ LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
+ RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
+ }
+ setInsertPointAfterBundle(E->Scalars);
+
+ Value *LHS = vectorizeTree(LHSVL);
+ Value *RHS = vectorizeTree(RHSVL);
+
+ if (Value *V = alreadyVectorized(E->Scalars))
+ return V;
+
+ // Create a vector of LHS op1 RHS
+ BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
+ Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
+
+ // Create a vector of LHS op2 RHS
+ Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
+ BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
+ Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
+
+ // Create appropriate shuffle to take alternative operations from
+ // the vector.
+ std::vector<Constant *> Mask(E->Scalars.size());
+ unsigned e = E->Scalars.size();
+ for (unsigned i = 0; i < e; ++i) {
+ if (i & 1)
+ Mask[i] = Builder.getInt32(e + i);
+ else
+ Mask[i] = Builder.getInt32(i);
+ }
+
+ Value *ShuffleMask = ConstantVector::get(Mask);
+
+ Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
+ E->VectorizedValue = V;
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return propagateMetadata(I, E->Scalars);
+
+ return V;
+ }
default:
llvm_unreachable("unknown inst");
}
- return 0;
+ return nullptr;
}
Value *BoUpSLP::vectorizeTree() {
@@ -1571,8 +1956,8 @@ Value *BoUpSLP::vectorizeTree() {
// Skip users that we already RAUW. This happens when one instruction
// has multiple uses of the same value.
- if (std::find(Scalar->use_begin(), Scalar->use_end(), User) ==
- Scalar->use_end())
+ if (std::find(Scalar->user_begin(), Scalar->user_end(), User) ==
+ Scalar->user_end())
continue;
assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
@@ -1586,12 +1971,7 @@ Value *BoUpSLP::vectorizeTree() {
Value *Lane = Builder.getInt32(it->Lane);
// Generate extracts for out-of-tree users.
// Find the insertion point for the extractelement lane.
- if (PHINode *PN = dyn_cast<PHINode>(Vec)) {
- Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt());
- Value *Ex = Builder.CreateExtractElement(Vec, Lane);
- CSEBlocks.insert(PN->getParent());
- User->replaceUsesOfWith(Scalar, Ex);
- } else if (isa<Instruction>(Vec)){
+ if (isa<Instruction>(Vec)){
if (PHINode *PH = dyn_cast<PHINode>(User)) {
for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
if (PH->getIncomingValue(i) == Scalar) {
@@ -1624,7 +2004,6 @@ Value *BoUpSLP::vectorizeTree() {
// For each lane:
for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
Value *Scalar = Entry->Scalars[Lane];
-
// No need to handle users of gathered values.
if (Entry->NeedToGather)
continue;
@@ -1633,15 +2012,17 @@ Value *BoUpSLP::vectorizeTree() {
Type *Ty = Scalar->getType();
if (!Ty->isVoidTy()) {
- for (Value::use_iterator User = Scalar->use_begin(),
- UE = Scalar->use_end(); User != UE; ++User) {
- DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n");
-
- assert((ScalarToTreeEntry.count(*User) ||
- // It is legal to replace the reduction users by undef.
- (RdxOps && RdxOps->count(*User))) &&
+#ifndef NDEBUG
+ for (User *U : Scalar->users()) {
+ DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
+
+ assert((ScalarToTreeEntry.count(U) ||
+ // It is legal to replace users in the ignorelist by undef.
+ (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) !=
+ UserIgnoreList.end())) &&
"Replacing out-of-tree value with undef");
}
+#endif
Value *Undef = UndefValue::get(Ty);
Scalar->replaceAllUsesWith(Undef);
}
@@ -1650,24 +2031,14 @@ Value *BoUpSLP::vectorizeTree() {
}
}
- for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) {
- BlocksNumbers[it].forget();
- }
+ for (auto &BN : BlocksNumbers)
+ BN.second.forget();
+
Builder.ClearInsertionPoint();
return VectorizableTree[0].VectorizedValue;
}
-class DTCmp {
- const DominatorTree *DT;
-
-public:
- DTCmp(const DominatorTree *DT) : DT(DT) {}
- bool operator()(const BasicBlock *A, const BasicBlock *B) const {
- return DT->properlyDominates(A, B);
- }
-};
-
void BoUpSLP::optimizeGatherSequence() {
DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
<< " gather sequences instructions.\n");
@@ -1703,21 +2074,30 @@ void BoUpSLP::optimizeGatherSequence() {
Insert->moveBefore(PreHeader->getTerminator());
}
+ // Make a list of all reachable blocks in our CSE queue.
+ SmallVector<const DomTreeNode *, 8> CSEWorkList;
+ CSEWorkList.reserve(CSEBlocks.size());
+ for (BasicBlock *BB : CSEBlocks)
+ if (DomTreeNode *N = DT->getNode(BB)) {
+ assert(DT->isReachableFromEntry(N));
+ CSEWorkList.push_back(N);
+ }
+
// Sort blocks by domination. This ensures we visit a block after all blocks
// dominating it are visited.
- SmallVector<BasicBlock *, 8> CSEWorkList(CSEBlocks.begin(), CSEBlocks.end());
- std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), DTCmp(DT));
+ std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
+ [this](const DomTreeNode *A, const DomTreeNode *B) {
+ return DT->properlyDominates(A, B);
+ });
// Perform O(N^2) search over the gather sequences and merge identical
// instructions. TODO: We can further optimize this scan if we split the
// instructions into different buckets based on the insert lane.
SmallVector<Instruction *, 16> Visited;
- for (SmallVectorImpl<BasicBlock *>::iterator I = CSEWorkList.begin(),
- E = CSEWorkList.end();
- I != E; ++I) {
- assert((I == CSEWorkList.begin() || !DT->dominates(*I, *llvm::prior(I))) &&
+ for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
+ assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
"Worklist not sorted properly!");
- BasicBlock *BB = *I;
+ BasicBlock *BB = (*I)->getBlock();
// For all instructions in blocks containing gather sequences:
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
Instruction *In = it++;
@@ -1733,7 +2113,7 @@ void BoUpSLP::optimizeGatherSequence() {
DT->dominates((*v)->getParent(), In->getParent())) {
In->replaceAllUsesWith(*v);
In->eraseFromParent();
- In = 0;
+ In = nullptr;
break;
}
}
@@ -1760,19 +2140,25 @@ struct SLPVectorizer : public FunctionPass {
}
ScalarEvolution *SE;
- DataLayout *DL;
+ const DataLayout *DL;
TargetTransformInfo *TTI;
+ TargetLibraryInfo *TLI;
AliasAnalysis *AA;
LoopInfo *LI;
DominatorTree *DT;
- virtual bool runOnFunction(Function &F) {
+ bool runOnFunction(Function &F) override {
+ if (skipOptnoneFunction(F))
+ return false;
+
SE = &getAnalysis<ScalarEvolution>();
- DL = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TTI = &getAnalysis<TargetTransformInfo>();
+ TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &getAnalysis<AliasAnalysis>();
LI = &getAnalysis<LoopInfo>();
- DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
StoreRefs.clear();
bool Changed = false;
@@ -1793,15 +2179,14 @@ struct SLPVectorizer : public FunctionPass {
DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
- // Use the bollom up slp vectorizer to construct chains that start with
- // he store instructions.
- BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT);
+ // Use the bottom up slp vectorizer to construct chains that start with
+ // store instructions.
+ BoUpSLP R(&F, SE, DL, TTI, TLI, AA, LI, DT);
// Scan the blocks in the function in post order.
for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()),
e = po_end(&F.getEntryBlock()); it != e; ++it) {
BasicBlock *BB = *it;
-
// Vectorize trees that end at stores.
if (unsigned count = collectStores(BB, R)) {
(void)count;
@@ -1821,15 +2206,15 @@ struct SLPVectorizer : public FunctionPass {
return Changed;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<ScalarEvolution>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetTransformInfo>();
AU.addRequired<LoopInfo>();
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<LoopInfo>();
- AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}
@@ -1845,8 +2230,11 @@ private:
bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R);
/// \brief Try to vectorize a list of operands.
+ /// \@param BuildVector A list of users to ignore for the purpose of
+ /// scheduling and that don't need extracting.
/// \returns true if a value was vectorized.
- bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R);
+ bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
+ ArrayRef<Value *> BuildVector = None);
/// \brief Try to vectorize a chain that may start at the operands of \V;
bool tryToVectorize(BinaryOperator *V, BoUpSLP &R);
@@ -1867,7 +2255,7 @@ private:
StoreListMap StoreRefs;
};
-/// \brief Check that the Values in the slice in VL array are still existant in
+/// \brief Check that the Values in the slice in VL array are still existent in
/// the WeakVH array.
/// Vectorization of part of the VL array may cause later values in the VL array
/// to become invalid. We track when this has happened in the WeakVH array.
@@ -1894,7 +2282,7 @@ bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain,
if (!isPowerOf2_32(Sz) || VF < 2)
return false;
- // Keep track of values that were delete by vectorizing in the loop below.
+ // Keep track of values that were deleted by vectorizing in the loop below.
SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
bool Changed = false;
@@ -2000,7 +2388,7 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
// Check that the pointer points to scalars.
Type *Ty = SI->getValueOperand()->getType();
if (Ty->isAggregateType() || Ty->isVectorTy())
- return 0;
+ continue;
// Find the base pointer.
Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL);
@@ -2019,7 +2407,8 @@ bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
return tryToVectorizeList(VL, R);
}
-bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) {
+bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
+ ArrayRef<Value *> BuildVector) {
if (VL.size() < 2)
return false;
@@ -2047,7 +2436,7 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) {
bool Changed = false;
- // Keep track of values that were delete by vectorizing in the loop below.
+ // Keep track of values that were deleted by vectorizing in the loop below.
SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
@@ -2069,13 +2458,38 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) {
<< "\n");
ArrayRef<Value *> Ops = VL.slice(i, OpsWidth);
- R.buildTree(Ops);
+ ArrayRef<Value *> BuildVectorSlice;
+ if (!BuildVector.empty())
+ BuildVectorSlice = BuildVector.slice(i, OpsWidth);
+
+ R.buildTree(Ops, BuildVectorSlice);
int Cost = R.getTreeCost();
if (Cost < -SLPCostThreshold) {
- DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n");
- R.vectorizeTree();
-
+ DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
+ Value *VectorizedRoot = R.vectorizeTree();
+
+ // Reconstruct the build vector by extracting the vectorized root. This
+ // way we handle the case where some elements of the vector are undefined.
+ // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
+ if (!BuildVectorSlice.empty()) {
+ // The insert point is the last build vector instruction. The vectorized
+ // root will precede it. This guarantees that we get an instruction. The
+ // vectorized tree could have been constant folded.
+ Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
+ unsigned VecIdx = 0;
+ for (auto &V : BuildVectorSlice) {
+ IRBuilder<true, NoFolder> Builder(
+ ++BasicBlock::iterator(InsertAfter));
+ InsertElementInst *IE = cast<InsertElementInst>(V);
+ Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement(
+ VectorizedRoot, Builder.getInt32(VecIdx++)));
+ IE->setOperand(1, Extract);
+ IE->removeFromParent();
+ IE->insertAfter(Extract);
+ InsertAfter = IE;
+ }
+ }
// Move to the next bundle.
i += VF - 1;
Changed = true;
@@ -2184,7 +2598,7 @@ static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
/// *p =
///
class HorizontalReduction {
- SmallPtrSet<Value *, 16> ReductionOps;
+ SmallVector<Value *, 16> ReductionOps;
SmallVector<Value *, 32> ReducedVals;
BinaryOperator *ReductionRoot;
@@ -2202,12 +2616,12 @@ class HorizontalReduction {
public:
HorizontalReduction()
- : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0),
+ : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0),
ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {}
/// \brief Try to find a reduction tree.
bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B,
- DataLayout *DL) {
+ const DataLayout *DL) {
assert((!Phi ||
std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) &&
"Thi phi needs to use the binary operator");
@@ -2217,10 +2631,10 @@ public:
// In such a case start looking for a tree rooted in the first '+'.
if (Phi) {
if (B->getOperand(0) == Phi) {
- Phi = 0;
+ Phi = nullptr;
B = dyn_cast<BinaryOperator>(B->getOperand(1));
} else if (B->getOperand(1) == Phi) {
- Phi = 0;
+ Phi = nullptr;
B = dyn_cast<BinaryOperator>(B->getOperand(0));
}
}
@@ -2278,7 +2692,7 @@ public:
// We need to be able to reassociate the adds.
if (!TreeN->isAssociative())
return false;
- ReductionOps.insert(TreeN);
+ ReductionOps.push_back(TreeN);
}
// Retract.
Stack.pop_back();
@@ -2306,7 +2720,7 @@ public:
if (NumReducedVals < ReduxWidth)
return false;
- Value *VectorizedTree = 0;
+ Value *VectorizedTree = nullptr;
IRBuilder<> Builder(ReductionRoot);
FastMathFlags Unsafe;
Unsafe.setUnsafeAlgebra();
@@ -2315,7 +2729,7 @@ public:
for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth);
- V.buildTree(ValsToReduce, &ReductionOps);
+ V.buildTree(ValsToReduce, ReductionOps);
// Estimate cost.
int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]);
@@ -2349,13 +2763,13 @@ public:
}
// Update users.
if (ReductionPHI) {
- assert(ReductionRoot != NULL && "Need a reduction operation");
+ assert(ReductionRoot && "Need a reduction operation");
ReductionRoot->setOperand(0, VectorizedTree);
ReductionRoot->setOperand(1, ReductionPHI);
} else
ReductionRoot->replaceAllUsesWith(VectorizedTree);
}
- return VectorizedTree != 0;
+ return VectorizedTree != nullptr;
}
private:
@@ -2434,18 +2848,21 @@ private:
///
/// Returns true if it matches
///
-static bool findBuildVector(InsertElementInst *IE,
- SmallVectorImpl<Value *> &Ops) {
- if (!isa<UndefValue>(IE->getOperand(0)))
+static bool findBuildVector(InsertElementInst *FirstInsertElem,
+ SmallVectorImpl<Value *> &BuildVector,
+ SmallVectorImpl<Value *> &BuildVectorOpds) {
+ if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
return false;
+ InsertElementInst *IE = FirstInsertElem;
while (true) {
- Ops.push_back(IE->getOperand(1));
+ BuildVector.push_back(IE);
+ BuildVectorOpds.push_back(IE->getOperand(1));
if (IE->use_empty())
return false;
- InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back());
+ InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
if (!NextUse)
return true;
@@ -2512,7 +2929,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
break;
}
- // Start over at the next instruction of a differnt type (or the end).
+ // Start over at the next instruction of a different type (or the end).
IncIt = SameTypeIt;
}
}
@@ -2535,7 +2952,8 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
Value *Rdx =
(P->getIncomingBlock(0) == BB
? (P->getIncomingValue(0))
- : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0));
+ : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1)
+ : nullptr));
// Check if this is a Binary Operator.
BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx);
if (!BI)
@@ -2574,7 +2992,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
if (BinaryOperator *BinOp =
dyn_cast<BinaryOperator>(SI->getValueOperand())) {
HorizontalReduction HorRdx;
- if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) &&
+ if (((HorRdx.matchAssociativeReduction(nullptr, BinOp, DL) &&
HorRdx.tryToReduce(R, TTI)) ||
tryToVectorize(BinOp, R))) {
Changed = true;
@@ -2610,12 +3028,16 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
}
// Try to vectorize trees that start at insertelement instructions.
- if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) {
- SmallVector<Value *, 8> Ops;
- if (!findBuildVector(IE, Ops))
+ if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
+ SmallVector<Value *, 16> BuildVector;
+ SmallVector<Value *, 16> BuildVectorOpds;
+ if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
continue;
- if (tryToVectorizeList(Ops, R)) {
+ // Vectorize starting with the build vector operands ignoring the
+ // BuildVector instructions for the purpose of scheduling and user
+ // extraction.
+ if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
Changed = true;
it = BB->begin();
e = BB->end();
diff --git a/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
index a927fe1..d459bcf 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
@@ -17,7 +17,7 @@
#include "llvm-c/Initialization.h"
#include "llvm-c/Transforms/Vectorize.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
#include "llvm/PassManager.h"
OpenPOWER on IntegriCloud